xref: /freebsd-src/sys/fs/devfs/devfs_vnops.c (revision cd844e7a7df72952bb1135fea0a4c6ee372a7027)
1 /*-
2  * Copyright (c) 2000-2004
3  *	Poul-Henning Kamp.  All rights reserved.
4  * Copyright (c) 1989, 1992-1993, 1995
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software donated to Berkeley by
8  * Jan-Simon Pendry.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)kernfs_vnops.c	8.15 (Berkeley) 5/21/95
32  * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vnops.c 1.43
33  *
34  * $FreeBSD$
35  */
36 
37 /*
38  * TODO:
39  *	remove empty directories
40  *	mkdir: want it ?
41  */
42 
43 #include "opt_mac.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/conf.h>
48 #include <sys/dirent.h>
49 #include <sys/fcntl.h>
50 #include <sys/file.h>
51 #include <sys/filedesc.h>
52 #include <sys/filio.h>
53 #include <sys/kernel.h>
54 #include <sys/lock.h>
55 #include <sys/malloc.h>
56 #include <sys/mount.h>
57 #include <sys/namei.h>
58 #include <sys/priv.h>
59 #include <sys/proc.h>
60 #include <sys/stat.h>
61 #include <sys/sx.h>
62 #include <sys/time.h>
63 #include <sys/ttycom.h>
64 #include <sys/unistd.h>
65 #include <sys/vnode.h>
66 
67 static struct vop_vector devfs_vnodeops;
68 static struct vop_vector devfs_specops;
69 static struct fileops devfs_ops_f;
70 
71 #include <fs/devfs/devfs.h>
72 #include <fs/devfs/devfs_int.h>
73 
74 #include <security/mac/mac_framework.h>
75 
76 static MALLOC_DEFINE(M_CDEVPDATA, "DEVFSP", "Metainfo for cdev-fp data");
77 
78 struct mtx	devfs_de_interlock;
79 MTX_SYSINIT(devfs_de_interlock, &devfs_de_interlock, "devfs interlock", MTX_DEF);
80 struct sx	clone_drain_lock;
81 SX_SYSINIT(clone_drain_lock, &clone_drain_lock, "clone events drain lock");
82 struct mtx	cdevpriv_mtx;
83 MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF);
84 
85 static int
86 devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp)
87 {
88 
89 	*dswp = devvn_refthread(fp->f_vnode, devp);
90 	if (*devp != fp->f_data) {
91 		if (*dswp != NULL)
92 			dev_relthread(*devp);
93 		return (ENXIO);
94 	}
95 	KASSERT((*devp)->si_refcount > 0,
96 	    ("devfs: un-referenced struct cdev *(%s)", devtoname(*devp)));
97 	if (*dswp == NULL)
98 		return (ENXIO);
99 	curthread->td_fpop = fp;
100 	return (0);
101 }
102 
103 int
104 devfs_get_cdevpriv(void **datap)
105 {
106 	struct file *fp;
107 	struct cdev_privdata *p;
108 	int error;
109 
110 	fp = curthread->td_fpop;
111 	if (fp == NULL)
112 		return (EBADF);
113 	mtx_lock(&cdevpriv_mtx);
114 	p = fp->f_cdevpriv;
115 	mtx_unlock(&cdevpriv_mtx);
116 	if (p != NULL) {
117 		error = 0;
118 		*datap = p->cdpd_data;
119 	} else
120 		error = ENOENT;
121 	return (error);
122 }
123 
124 int
125 devfs_set_cdevpriv(void *priv, cdevpriv_dtr_t priv_dtr)
126 {
127 	struct file *fp;
128 	struct cdev_priv *cdp;
129 	struct cdev_privdata *p;
130 	int error;
131 
132 	fp = curthread->td_fpop;
133 	if (fp == NULL)
134 		return (ENOENT);
135 	cdp = ((struct cdev *)fp->f_data)->si_priv;
136 	p = malloc(sizeof(struct cdev_privdata), M_CDEVPDATA, M_WAITOK);
137 	p->cdpd_data = priv;
138 	p->cdpd_dtr = priv_dtr;
139 	p->cdpd_fp = fp;
140 	mtx_lock(&cdevpriv_mtx);
141 	if (fp->f_cdevpriv == NULL) {
142 		LIST_INSERT_HEAD(&cdp->cdp_fdpriv, p, cdpd_list);
143 		fp->f_cdevpriv = p;
144 		mtx_unlock(&cdevpriv_mtx);
145 		error = 0;
146 	} else {
147 		mtx_unlock(&cdevpriv_mtx);
148 		free(p, M_CDEVPDATA);
149 		error = EBUSY;
150 	}
151 	return (error);
152 }
153 
154 void
155 devfs_destroy_cdevpriv(struct cdev_privdata *p)
156 {
157 
158 	mtx_assert(&cdevpriv_mtx, MA_OWNED);
159 	p->cdpd_fp->f_cdevpriv = NULL;
160 	LIST_REMOVE(p, cdpd_list);
161 	mtx_unlock(&cdevpriv_mtx);
162 	(p->cdpd_dtr)(p->cdpd_data);
163 	free(p, M_CDEVPDATA);
164 }
165 
166 void
167 devfs_fpdrop(struct file *fp)
168 {
169 	struct cdev_privdata *p;
170 
171 	mtx_lock(&cdevpriv_mtx);
172 	if ((p = fp->f_cdevpriv) == NULL) {
173 		mtx_unlock(&cdevpriv_mtx);
174 		return;
175 	}
176 	devfs_destroy_cdevpriv(p);
177 }
178 
179 void
180 devfs_clear_cdevpriv(void)
181 {
182 	struct file *fp;
183 
184 	fp = curthread->td_fpop;
185 	if (fp == NULL)
186 		return;
187 	devfs_fpdrop(fp);
188 }
189 
190 /*
191  * Construct the fully qualified path name relative to the mountpoint
192  */
193 static char *
194 devfs_fqpn(char *buf, struct vnode *dvp, struct componentname *cnp)
195 {
196 	int i;
197 	struct devfs_dirent *de, *dd;
198 	struct devfs_mount *dmp;
199 
200 	dmp = VFSTODEVFS(dvp->v_mount);
201 	dd = dvp->v_data;
202 	i = SPECNAMELEN;
203 	buf[i] = '\0';
204 	i -= cnp->cn_namelen;
205 	if (i < 0)
206 		 return (NULL);
207 	bcopy(cnp->cn_nameptr, buf + i, cnp->cn_namelen);
208 	de = dd;
209 	while (de != dmp->dm_rootdir) {
210 		i--;
211 		if (i < 0)
212 			 return (NULL);
213 		buf[i] = '/';
214 		i -= de->de_dirent->d_namlen;
215 		if (i < 0)
216 			 return (NULL);
217 		bcopy(de->de_dirent->d_name, buf + i,
218 		    de->de_dirent->d_namlen);
219 		de = TAILQ_FIRST(&de->de_dlist);	/* "." */
220 		de = TAILQ_NEXT(de, de_list);		/* ".." */
221 		de = de->de_dir;
222 	}
223 	return (buf + i);
224 }
225 
226 static int
227 devfs_allocv_drop_refs(int drop_dm_lock, struct devfs_mount *dmp,
228 	struct devfs_dirent *de)
229 {
230 	int not_found;
231 
232 	not_found = 0;
233 	if (de->de_flags & DE_DOOMED)
234 		not_found = 1;
235 	if (DEVFS_DE_DROP(de)) {
236 		KASSERT(not_found == 1, ("DEVFS de dropped but not doomed"));
237 		devfs_dirent_free(de);
238 	}
239 	if (DEVFS_DMP_DROP(dmp)) {
240 		KASSERT(not_found == 1,
241 			("DEVFS mount struct freed before dirent"));
242 		not_found = 2;
243 		sx_xunlock(&dmp->dm_lock);
244 		devfs_unmount_final(dmp);
245 	}
246 	if (not_found == 1 || (drop_dm_lock && not_found != 2))
247 		sx_unlock(&dmp->dm_lock);
248 	return (not_found);
249 }
250 
251 static void
252 devfs_insmntque_dtr(struct vnode *vp, void *arg)
253 {
254 	struct devfs_dirent *de;
255 
256 	de = (struct devfs_dirent *)arg;
257 	mtx_lock(&devfs_de_interlock);
258 	vp->v_data = NULL;
259 	de->de_vnode = NULL;
260 	mtx_unlock(&devfs_de_interlock);
261 	vgone(vp);
262 	vput(vp);
263 }
264 
265 /*
266  * devfs_allocv shall be entered with dmp->dm_lock held, and it drops
267  * it on return.
268  */
269 int
270 devfs_allocv(struct devfs_dirent *de, struct mount *mp, struct vnode **vpp, struct thread *td)
271 {
272 	int error;
273 	struct vnode *vp;
274 	struct cdev *dev;
275 	struct devfs_mount *dmp;
276 
277 	KASSERT(td == curthread, ("devfs_allocv: td != curthread"));
278 	dmp = VFSTODEVFS(mp);
279 	if (de->de_flags & DE_DOOMED) {
280 		sx_xunlock(&dmp->dm_lock);
281 		return (ENOENT);
282 	}
283  loop:
284 	DEVFS_DE_HOLD(de);
285 	DEVFS_DMP_HOLD(dmp);
286 	mtx_lock(&devfs_de_interlock);
287 	vp = de->de_vnode;
288 	if (vp != NULL) {
289 		VI_LOCK(vp);
290 		mtx_unlock(&devfs_de_interlock);
291 		sx_xunlock(&dmp->dm_lock);
292 		error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td);
293 		sx_xlock(&dmp->dm_lock);
294 		if (devfs_allocv_drop_refs(0, dmp, de)) {
295 			if (error == 0)
296 				vput(vp);
297 			return (ENOENT);
298 		}
299 		else if (error)
300 			goto loop;
301 		sx_xunlock(&dmp->dm_lock);
302 		*vpp = vp;
303 		return (0);
304 	}
305 	mtx_unlock(&devfs_de_interlock);
306 	if (de->de_dirent->d_type == DT_CHR) {
307 		if (!(de->de_cdp->cdp_flags & CDP_ACTIVE)) {
308 			devfs_allocv_drop_refs(1, dmp, de);
309 			return (ENOENT);
310 		}
311 		dev = &de->de_cdp->cdp_c;
312 	} else {
313 		dev = NULL;
314 	}
315 	error = getnewvnode("devfs", mp, &devfs_vnodeops, &vp);
316 	if (error != 0) {
317 		devfs_allocv_drop_refs(1, dmp, de);
318 		printf("devfs_allocv: failed to allocate new vnode\n");
319 		return (error);
320 	}
321 
322 	if (de->de_dirent->d_type == DT_CHR) {
323 		vp->v_type = VCHR;
324 		VI_LOCK(vp);
325 		dev_lock();
326 		dev_refl(dev);
327 		/* XXX: v_rdev should be protect by vnode lock */
328 		vp->v_rdev = dev;
329 		KASSERT(vp->v_usecount == 1,
330 		    ("%s %d (%d)\n", __func__, __LINE__, vp->v_usecount));
331 		dev->si_usecount += vp->v_usecount;
332 		dev_unlock();
333 		VI_UNLOCK(vp);
334 		vp->v_op = &devfs_specops;
335 	} else if (de->de_dirent->d_type == DT_DIR) {
336 		vp->v_type = VDIR;
337 	} else if (de->de_dirent->d_type == DT_LNK) {
338 		vp->v_type = VLNK;
339 	} else {
340 		vp->v_type = VBAD;
341 	}
342 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
343 	mtx_lock(&devfs_de_interlock);
344 	vp->v_data = de;
345 	de->de_vnode = vp;
346 	mtx_unlock(&devfs_de_interlock);
347 	error = insmntque1(vp, mp, devfs_insmntque_dtr, de);
348 	if (error != 0) {
349 		(void) devfs_allocv_drop_refs(1, dmp, de);
350 		return (error);
351 	}
352 	if (devfs_allocv_drop_refs(0, dmp, de)) {
353 		vput(vp);
354 		return (ENOENT);
355 	}
356 #ifdef MAC
357 	mac_devfs_vnode_associate(mp, de, vp);
358 #endif
359 	sx_xunlock(&dmp->dm_lock);
360 	*vpp = vp;
361 	return (0);
362 }
363 
364 static int
365 devfs_access(struct vop_access_args *ap)
366 {
367 	struct vnode *vp = ap->a_vp;
368 	struct devfs_dirent *de;
369 	int error;
370 
371 	de = vp->v_data;
372 	if (vp->v_type == VDIR)
373 		de = de->de_dir;
374 
375 	error = vaccess(vp->v_type, de->de_mode, de->de_uid, de->de_gid,
376 	    ap->a_mode, ap->a_cred, NULL);
377 	if (!error)
378 		return (error);
379 	if (error != EACCES)
380 		return (error);
381 	/* We do, however, allow access to the controlling terminal */
382 	if (!(ap->a_td->td_proc->p_flag & P_CONTROLT))
383 		return (error);
384 	if (ap->a_td->td_proc->p_session->s_ttyvp == de->de_vnode)
385 		return (0);
386 	return (error);
387 }
388 
389 /* ARGSUSED */
390 static int
391 devfs_advlock(struct vop_advlock_args *ap)
392 {
393 
394 	return (ap->a_flags & F_FLOCK ? EOPNOTSUPP : EINVAL);
395 }
396 
397 /* ARGSUSED */
398 static int
399 devfs_close(struct vop_close_args *ap)
400 {
401 	struct vnode *vp = ap->a_vp, *oldvp;
402 	struct thread *td = ap->a_td;
403 	struct cdev *dev = vp->v_rdev;
404 	struct cdevsw *dsw;
405 	int vp_locked, error;
406 
407 	/*
408 	 * Hack: a tty device that is a controlling terminal
409 	 * has a reference from the session structure.
410 	 * We cannot easily tell that a character device is
411 	 * a controlling terminal, unless it is the closing
412 	 * process' controlling terminal.  In that case,
413 	 * if the reference count is 2 (this last descriptor
414 	 * plus the session), release the reference from the session.
415 	 */
416 	oldvp = NULL;
417 	sx_xlock(&proctree_lock);
418 	if (td && vp == td->td_proc->p_session->s_ttyvp) {
419 		SESS_LOCK(td->td_proc->p_session);
420 		VI_LOCK(vp);
421 		if (count_dev(dev) == 2 && (vp->v_iflag & VI_DOOMED) == 0) {
422 			td->td_proc->p_session->s_ttyvp = NULL;
423 			oldvp = vp;
424 		}
425 		VI_UNLOCK(vp);
426 		SESS_UNLOCK(td->td_proc->p_session);
427 	}
428 	sx_xunlock(&proctree_lock);
429 	if (oldvp != NULL)
430 		vrele(oldvp);
431 	/*
432 	 * We do not want to really close the device if it
433 	 * is still in use unless we are trying to close it
434 	 * forcibly. Since every use (buffer, vnode, swap, cmap)
435 	 * holds a reference to the vnode, and because we mark
436 	 * any other vnodes that alias this device, when the
437 	 * sum of the reference counts on all the aliased
438 	 * vnodes descends to one, we are on last close.
439 	 */
440 	dsw = dev_refthread(dev);
441 	if (dsw == NULL)
442 		return (ENXIO);
443 	VI_LOCK(vp);
444 	if (vp->v_iflag & VI_DOOMED) {
445 		/* Forced close. */
446 	} else if (dsw->d_flags & D_TRACKCLOSE) {
447 		/* Keep device updated on status. */
448 	} else if (count_dev(dev) > 1) {
449 		VI_UNLOCK(vp);
450 		dev_relthread(dev);
451 		return (0);
452 	}
453 	vholdl(vp);
454 	VI_UNLOCK(vp);
455 	vp_locked = VOP_ISLOCKED(vp);
456 	VOP_UNLOCK(vp, 0);
457 	KASSERT(dev->si_refcount > 0,
458 	    ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev)));
459 	error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td);
460 	dev_relthread(dev);
461 	vn_lock(vp, vp_locked | LK_RETRY);
462 	vdrop(vp);
463 	return (error);
464 }
465 
466 static int
467 devfs_close_f(struct file *fp, struct thread *td)
468 {
469 	int error;
470 
471 	curthread->td_fpop = fp;
472 	error = vnops.fo_close(fp, td);
473 	curthread->td_fpop = NULL;
474 	return (error);
475 }
476 
477 /* ARGSUSED */
478 static int
479 devfs_fsync(struct vop_fsync_args *ap)
480 {
481 	if (!vn_isdisk(ap->a_vp, NULL))
482 		return (0);
483 
484 	return (vop_stdfsync(ap));
485 }
486 
487 static int
488 devfs_getattr(struct vop_getattr_args *ap)
489 {
490 	struct vnode *vp = ap->a_vp;
491 	struct vattr *vap = ap->a_vap;
492 	int error = 0;
493 	struct devfs_dirent *de;
494 	struct cdev *dev;
495 
496 	de = vp->v_data;
497 	KASSERT(de != NULL, ("Null dirent in devfs_getattr vp=%p", vp));
498 	if (vp->v_type == VDIR) {
499 		de = de->de_dir;
500 		KASSERT(de != NULL,
501 		    ("Null dir dirent in devfs_getattr vp=%p", vp));
502 	}
503 	bzero((caddr_t) vap, sizeof(*vap));
504 	vattr_null(vap);
505 	vap->va_uid = de->de_uid;
506 	vap->va_gid = de->de_gid;
507 	vap->va_mode = de->de_mode;
508 	if (vp->v_type == VLNK)
509 		vap->va_size = strlen(de->de_symlink);
510 	else if (vp->v_type == VDIR)
511 		vap->va_size = vap->va_bytes = DEV_BSIZE;
512 	else
513 		vap->va_size = 0;
514 	if (vp->v_type != VDIR)
515 		vap->va_bytes = 0;
516 	vap->va_blocksize = DEV_BSIZE;
517 	vap->va_type = vp->v_type;
518 
519 #define fix(aa)							\
520 	do {							\
521 		if ((aa).tv_sec <= 3600) {			\
522 			(aa).tv_sec = boottime.tv_sec;		\
523 			(aa).tv_nsec = boottime.tv_usec * 1000; \
524 		}						\
525 	} while (0)
526 
527 	if (vp->v_type != VCHR)  {
528 		fix(de->de_atime);
529 		vap->va_atime = de->de_atime;
530 		fix(de->de_mtime);
531 		vap->va_mtime = de->de_mtime;
532 		fix(de->de_ctime);
533 		vap->va_ctime = de->de_ctime;
534 	} else {
535 		dev = vp->v_rdev;
536 		fix(dev->si_atime);
537 		vap->va_atime = dev->si_atime;
538 		fix(dev->si_mtime);
539 		vap->va_mtime = dev->si_mtime;
540 		fix(dev->si_ctime);
541 		vap->va_ctime = dev->si_ctime;
542 
543 		vap->va_rdev = dev->si_priv->cdp_inode;
544 	}
545 	vap->va_gen = 0;
546 	vap->va_flags = 0;
547 	vap->va_nlink = de->de_links;
548 	vap->va_fileid = de->de_inode;
549 
550 	return (error);
551 }
552 
553 /* ARGSUSED */
554 static int
555 devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struct thread *td)
556 {
557 	struct cdev *dev;
558 	struct cdevsw *dsw;
559 	struct vnode *vp;
560 	struct vnode *vpold;
561 	int error, i;
562 	const char *p;
563 	struct fiodgname_arg *fgn;
564 
565 	error = devfs_fp_check(fp, &dev, &dsw);
566 	if (error)
567 		return (error);
568 
569 	if (com == FIODTYPE) {
570 		*(int *)data = dsw->d_flags & D_TYPEMASK;
571 		td->td_fpop = NULL;
572 		dev_relthread(dev);
573 		return (0);
574 	} else if (com == FIODGNAME) {
575 		fgn = data;
576 		p = devtoname(dev);
577 		i = strlen(p) + 1;
578 		if (i > fgn->len)
579 			error = EINVAL;
580 		else
581 			error = copyout(p, fgn->buf, i);
582 		td->td_fpop = NULL;
583 		dev_relthread(dev);
584 		return (error);
585 	}
586 	error = dsw->d_ioctl(dev, com, data, fp->f_flag, td);
587 	td->td_fpop = NULL;
588 	dev_relthread(dev);
589 	if (error == ENOIOCTL)
590 		error = ENOTTY;
591 	if (error == 0 && com == TIOCSCTTY) {
592 		vp = fp->f_vnode;
593 
594 		/* Do nothing if reassigning same control tty */
595 		sx_slock(&proctree_lock);
596 		if (td->td_proc->p_session->s_ttyvp == vp) {
597 			sx_sunlock(&proctree_lock);
598 			return (0);
599 		}
600 
601 		mtx_lock(&Giant);	/* XXX TTY */
602 
603 		vpold = td->td_proc->p_session->s_ttyvp;
604 		VREF(vp);
605 		SESS_LOCK(td->td_proc->p_session);
606 		td->td_proc->p_session->s_ttyvp = vp;
607 		SESS_UNLOCK(td->td_proc->p_session);
608 
609 		sx_sunlock(&proctree_lock);
610 
611 		/* Get rid of reference to old control tty */
612 		if (vpold)
613 			vrele(vpold);
614 		mtx_unlock(&Giant);	/* XXX TTY */
615 	}
616 	return (error);
617 }
618 
619 /* ARGSUSED */
620 static int
621 devfs_kqfilter_f(struct file *fp, struct knote *kn)
622 {
623 	struct cdev *dev;
624 	struct cdevsw *dsw;
625 	int error;
626 
627 	error = devfs_fp_check(fp, &dev, &dsw);
628 	if (error)
629 		return (error);
630 	error = dsw->d_kqfilter(dev, kn);
631 	curthread->td_fpop = NULL;
632 	dev_relthread(dev);
633 	return (error);
634 }
635 
636 static int
637 devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock)
638 {
639 	struct componentname *cnp;
640 	struct vnode *dvp, **vpp;
641 	struct thread *td;
642 	struct devfs_dirent *de, *dd;
643 	struct devfs_dirent **dde;
644 	struct devfs_mount *dmp;
645 	struct cdev *cdev;
646 	int error, flags, nameiop;
647 	char specname[SPECNAMELEN + 1], *pname;
648 
649 	cnp = ap->a_cnp;
650 	vpp = ap->a_vpp;
651 	dvp = ap->a_dvp;
652 	pname = cnp->cn_nameptr;
653 	td = cnp->cn_thread;
654 	flags = cnp->cn_flags;
655 	nameiop = cnp->cn_nameiop;
656 	dmp = VFSTODEVFS(dvp->v_mount);
657 	dd = dvp->v_data;
658 	*vpp = NULLVP;
659 
660 	if ((flags & ISLASTCN) && nameiop == RENAME)
661 		return (EOPNOTSUPP);
662 
663 	if (dvp->v_type != VDIR)
664 		return (ENOTDIR);
665 
666 	if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
667 		return (EIO);
668 
669 	error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, td);
670 	if (error)
671 		return (error);
672 
673 	if (cnp->cn_namelen == 1 && *pname == '.') {
674 		if ((flags & ISLASTCN) && nameiop != LOOKUP)
675 			return (EINVAL);
676 		*vpp = dvp;
677 		VREF(dvp);
678 		return (0);
679 	}
680 
681 	if (flags & ISDOTDOT) {
682 		if ((flags & ISLASTCN) && nameiop != LOOKUP)
683 			return (EINVAL);
684 		VOP_UNLOCK(dvp, 0);
685 		de = TAILQ_FIRST(&dd->de_dlist);	/* "." */
686 		de = TAILQ_NEXT(de, de_list);		/* ".." */
687 		de = de->de_dir;
688 		error = devfs_allocv(de, dvp->v_mount, vpp, td);
689 		*dm_unlock = 0;
690 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
691 		return (error);
692 	}
693 
694 	DEVFS_DMP_HOLD(dmp);
695 	devfs_populate(dmp);
696 	if (DEVFS_DMP_DROP(dmp)) {
697 		*dm_unlock = 0;
698 		sx_xunlock(&dmp->dm_lock);
699 		devfs_unmount_final(dmp);
700 		return (ENOENT);
701 	}
702 	dd = dvp->v_data;
703 	de = devfs_find(dd, cnp->cn_nameptr, cnp->cn_namelen);
704 	while (de == NULL) {	/* While(...) so we can use break */
705 
706 		if (nameiop == DELETE)
707 			return (ENOENT);
708 
709 		/*
710 		 * OK, we didn't have an entry for the name we were asked for
711 		 * so we try to see if anybody can create it on demand.
712 		 */
713 		pname = devfs_fqpn(specname, dvp, cnp);
714 		if (pname == NULL)
715 			break;
716 
717 		cdev = NULL;
718 		DEVFS_DMP_HOLD(dmp);
719 		sx_xunlock(&dmp->dm_lock);
720 		sx_slock(&clone_drain_lock);
721 		EVENTHANDLER_INVOKE(dev_clone,
722 		    td->td_ucred, pname, strlen(pname), &cdev);
723 		sx_sunlock(&clone_drain_lock);
724 		sx_xlock(&dmp->dm_lock);
725 		if (DEVFS_DMP_DROP(dmp)) {
726 			*dm_unlock = 0;
727 			sx_xunlock(&dmp->dm_lock);
728 			devfs_unmount_final(dmp);
729 			return (ENOENT);
730 		}
731 		if (cdev == NULL)
732 			break;
733 
734 		DEVFS_DMP_HOLD(dmp);
735 		devfs_populate(dmp);
736 		if (DEVFS_DMP_DROP(dmp)) {
737 			*dm_unlock = 0;
738 			sx_xunlock(&dmp->dm_lock);
739 			devfs_unmount_final(dmp);
740 			return (ENOENT);
741 		}
742 
743 		dev_lock();
744 		dde = &cdev->si_priv->cdp_dirents[dmp->dm_idx];
745 		if (dde != NULL && *dde != NULL)
746 			de = *dde;
747 		dev_unlock();
748 		dev_rel(cdev);
749 		break;
750 	}
751 
752 	if (de == NULL || de->de_flags & DE_WHITEOUT) {
753 		if ((nameiop == CREATE || nameiop == RENAME) &&
754 		    (flags & (LOCKPARENT | WANTPARENT)) && (flags & ISLASTCN)) {
755 			cnp->cn_flags |= SAVENAME;
756 			return (EJUSTRETURN);
757 		}
758 		return (ENOENT);
759 	}
760 
761 	if ((cnp->cn_nameiop == DELETE) && (flags & ISLASTCN)) {
762 		error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
763 		if (error)
764 			return (error);
765 		if (*vpp == dvp) {
766 			VREF(dvp);
767 			*vpp = dvp;
768 			return (0);
769 		}
770 	}
771 	error = devfs_allocv(de, dvp->v_mount, vpp, td);
772 	*dm_unlock = 0;
773 	return (error);
774 }
775 
776 static int
777 devfs_lookup(struct vop_lookup_args *ap)
778 {
779 	int j;
780 	struct devfs_mount *dmp;
781 	int dm_unlock;
782 
783 	dmp = VFSTODEVFS(ap->a_dvp->v_mount);
784 	dm_unlock = 1;
785 	sx_xlock(&dmp->dm_lock);
786 	j = devfs_lookupx(ap, &dm_unlock);
787 	if (dm_unlock == 1)
788 		sx_xunlock(&dmp->dm_lock);
789 	return (j);
790 }
791 
792 static int
793 devfs_mknod(struct vop_mknod_args *ap)
794 {
795 	struct componentname *cnp;
796 	struct vnode *dvp, **vpp;
797 	struct thread *td;
798 	struct devfs_dirent *dd, *de;
799 	struct devfs_mount *dmp;
800 	int error;
801 
802 	/*
803 	 * The only type of node we should be creating here is a
804 	 * character device, for anything else return EOPNOTSUPP.
805 	 */
806 	if (ap->a_vap->va_type != VCHR)
807 		return (EOPNOTSUPP);
808 	dvp = ap->a_dvp;
809 	dmp = VFSTODEVFS(dvp->v_mount);
810 
811 	cnp = ap->a_cnp;
812 	vpp = ap->a_vpp;
813 	td = cnp->cn_thread;
814 	dd = dvp->v_data;
815 
816 	error = ENOENT;
817 	sx_xlock(&dmp->dm_lock);
818 	TAILQ_FOREACH(de, &dd->de_dlist, de_list) {
819 		if (cnp->cn_namelen != de->de_dirent->d_namlen)
820 			continue;
821 		if (bcmp(cnp->cn_nameptr, de->de_dirent->d_name,
822 		    de->de_dirent->d_namlen) != 0)
823 			continue;
824 		if (de->de_flags & DE_WHITEOUT)
825 			break;
826 		goto notfound;
827 	}
828 	if (de == NULL)
829 		goto notfound;
830 	de->de_flags &= ~DE_WHITEOUT;
831 	error = devfs_allocv(de, dvp->v_mount, vpp, td);
832 	return (error);
833 notfound:
834 	sx_xunlock(&dmp->dm_lock);
835 	return (error);
836 }
837 
838 /* ARGSUSED */
839 static int
840 devfs_open(struct vop_open_args *ap)
841 {
842 	struct thread *td = ap->a_td;
843 	struct vnode *vp = ap->a_vp;
844 	struct cdev *dev = vp->v_rdev;
845 	struct file *fp = ap->a_fp;
846 	int error;
847 	struct cdevsw *dsw;
848 
849 	if (vp->v_type == VBLK)
850 		return (ENXIO);
851 
852 	if (dev == NULL)
853 		return (ENXIO);
854 
855 	/* Make this field valid before any I/O in d_open. */
856 	if (dev->si_iosize_max == 0)
857 		dev->si_iosize_max = DFLTPHYS;
858 
859 	dsw = dev_refthread(dev);
860 	if (dsw == NULL)
861 		return (ENXIO);
862 
863 	/* XXX: Special casing of ttys for deadfs.  Probably redundant. */
864 	if (dsw->d_flags & D_TTY)
865 		vp->v_vflag |= VV_ISTTY;
866 
867 	VOP_UNLOCK(vp, 0);
868 
869 	if (fp != NULL) {
870 		td->td_fpop = fp;
871 		fp->f_data = dev;
872 	}
873 	if (dsw->d_fdopen != NULL)
874 		error = dsw->d_fdopen(dev, ap->a_mode, td, fp);
875 	else
876 		error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td);
877 	td->td_fpop = NULL;
878 
879 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
880 
881 	dev_relthread(dev);
882 
883 	if (error)
884 		return (error);
885 
886 #if 0	/* /dev/console */
887 	KASSERT(fp != NULL,
888 	     ("Could not vnode bypass device on NULL fp"));
889 #else
890 	if(fp == NULL)
891 		return (error);
892 #endif
893 	KASSERT(fp->f_ops == &badfileops,
894 	     ("Could not vnode bypass device on fdops %p", fp->f_ops));
895 	finit(fp, fp->f_flag, DTYPE_VNODE, dev, &devfs_ops_f);
896 	return (error);
897 }
898 
899 static int
900 devfs_pathconf(struct vop_pathconf_args *ap)
901 {
902 
903 	switch (ap->a_name) {
904 	case _PC_MAC_PRESENT:
905 #ifdef MAC
906 		/*
907 		 * If MAC is enabled, devfs automatically supports
908 		 * trivial non-persistant label storage.
909 		 */
910 		*ap->a_retval = 1;
911 #else
912 		*ap->a_retval = 0;
913 #endif
914 		return (0);
915 	default:
916 		return (vop_stdpathconf(ap));
917 	}
918 	/* NOTREACHED */
919 }
920 
921 /* ARGSUSED */
922 static int
923 devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td)
924 {
925 	struct cdev *dev;
926 	struct cdevsw *dsw;
927 	int error;
928 
929 	error = devfs_fp_check(fp, &dev, &dsw);
930 	if (error)
931 		return (error);
932 	error = dsw->d_poll(dev, events, td);
933 	curthread->td_fpop = NULL;
934 	dev_relthread(dev);
935 	return(error);
936 }
937 
938 /*
939  * Print out the contents of a special device vnode.
940  */
941 static int
942 devfs_print(struct vop_print_args *ap)
943 {
944 
945 	printf("\tdev %s\n", devtoname(ap->a_vp->v_rdev));
946 	return (0);
947 }
948 
949 /* ARGSUSED */
950 static int
951 devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td)
952 {
953 	struct cdev *dev;
954 	int ioflag, error, resid;
955 	struct cdevsw *dsw;
956 
957 	error = devfs_fp_check(fp, &dev, &dsw);
958 	if (error)
959 		return (error);
960 	resid = uio->uio_resid;
961 	ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT);
962 	if (ioflag & O_DIRECT)
963 		ioflag |= IO_DIRECT;
964 
965 	if ((flags & FOF_OFFSET) == 0)
966 		uio->uio_offset = fp->f_offset;
967 
968 	error = dsw->d_read(dev, uio, ioflag);
969 	if (uio->uio_resid != resid || (error == 0 && resid != 0))
970 		vfs_timestamp(&dev->si_atime);
971 	curthread->td_fpop = NULL;
972 	dev_relthread(dev);
973 
974 	if ((flags & FOF_OFFSET) == 0)
975 		fp->f_offset = uio->uio_offset;
976 	fp->f_nextoff = uio->uio_offset;
977 	return (error);
978 }
979 
980 static int
981 devfs_readdir(struct vop_readdir_args *ap)
982 {
983 	int error;
984 	struct uio *uio;
985 	struct dirent *dp;
986 	struct devfs_dirent *dd;
987 	struct devfs_dirent *de;
988 	struct devfs_mount *dmp;
989 	off_t off, oldoff;
990 	int *tmp_ncookies = NULL;
991 
992 	if (ap->a_vp->v_type != VDIR)
993 		return (ENOTDIR);
994 
995 	uio = ap->a_uio;
996 	if (uio->uio_offset < 0)
997 		return (EINVAL);
998 
999 	/*
1000 	 * XXX: This is a temporary hack to get around this filesystem not
1001 	 * supporting cookies. We store the location of the ncookies pointer
1002 	 * in a temporary variable before calling vfs_subr.c:vfs_read_dirent()
1003 	 * and set the number of cookies to 0. We then set the pointer to
1004 	 * NULL so that vfs_read_dirent doesn't try to call realloc() on
1005 	 * ap->a_cookies. Later in this function, we restore the ap->a_ncookies
1006 	 * pointer to its original location before returning to the caller.
1007 	 */
1008 	if (ap->a_ncookies != NULL) {
1009 		tmp_ncookies = ap->a_ncookies;
1010 		*ap->a_ncookies = 0;
1011 		ap->a_ncookies = NULL;
1012 	}
1013 
1014 	dmp = VFSTODEVFS(ap->a_vp->v_mount);
1015 	sx_xlock(&dmp->dm_lock);
1016 	DEVFS_DMP_HOLD(dmp);
1017 	devfs_populate(dmp);
1018 	if (DEVFS_DMP_DROP(dmp)) {
1019 		sx_xunlock(&dmp->dm_lock);
1020 		devfs_unmount_final(dmp);
1021 		if (tmp_ncookies != NULL)
1022 			ap->a_ncookies = tmp_ncookies;
1023 		return (EIO);
1024 	}
1025 	error = 0;
1026 	de = ap->a_vp->v_data;
1027 	off = 0;
1028 	oldoff = uio->uio_offset;
1029 	TAILQ_FOREACH(dd, &de->de_dlist, de_list) {
1030 		KASSERT(dd->de_cdp != (void *)0xdeadc0de, ("%s %d\n", __func__, __LINE__));
1031 		if (dd->de_flags & DE_WHITEOUT)
1032 			continue;
1033 		if (dd->de_dirent->d_type == DT_DIR)
1034 			de = dd->de_dir;
1035 		else
1036 			de = dd;
1037 		dp = dd->de_dirent;
1038 		if (dp->d_reclen > uio->uio_resid)
1039 			break;
1040 		dp->d_fileno = de->de_inode;
1041 		if (off >= uio->uio_offset) {
1042 			error = vfs_read_dirent(ap, dp, off);
1043 			if (error)
1044 				break;
1045 		}
1046 		off += dp->d_reclen;
1047 	}
1048 	sx_xunlock(&dmp->dm_lock);
1049 	uio->uio_offset = off;
1050 
1051 	/*
1052 	 * Restore ap->a_ncookies if it wasn't originally NULL in the first
1053 	 * place.
1054 	 */
1055 	if (tmp_ncookies != NULL)
1056 		ap->a_ncookies = tmp_ncookies;
1057 
1058 	return (error);
1059 }
1060 
1061 static int
1062 devfs_readlink(struct vop_readlink_args *ap)
1063 {
1064 	struct devfs_dirent *de;
1065 
1066 	de = ap->a_vp->v_data;
1067 	return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio));
1068 }
1069 
1070 static int
1071 devfs_reclaim(struct vop_reclaim_args *ap)
1072 {
1073 	struct vnode *vp = ap->a_vp;
1074 	struct devfs_dirent *de;
1075 	struct cdev *dev;
1076 
1077 	mtx_lock(&devfs_de_interlock);
1078 	de = vp->v_data;
1079 	if (de != NULL) {
1080 		de->de_vnode = NULL;
1081 		vp->v_data = NULL;
1082 	}
1083 	mtx_unlock(&devfs_de_interlock);
1084 
1085 	vnode_destroy_vobject(vp);
1086 
1087 	VI_LOCK(vp);
1088 	dev_lock();
1089 	dev = vp->v_rdev;
1090 	vp->v_rdev = NULL;
1091 
1092 	if (dev == NULL) {
1093 		dev_unlock();
1094 		VI_UNLOCK(vp);
1095 		return (0);
1096 	}
1097 
1098 	dev->si_usecount -= vp->v_usecount;
1099 	dev_unlock();
1100 	VI_UNLOCK(vp);
1101 	dev_rel(dev);
1102 	return (0);
1103 }
1104 
1105 static int
1106 devfs_remove(struct vop_remove_args *ap)
1107 {
1108 	struct vnode *vp = ap->a_vp;
1109 	struct devfs_dirent *dd;
1110 	struct devfs_dirent *de;
1111 	struct devfs_mount *dmp = VFSTODEVFS(vp->v_mount);
1112 
1113 	sx_xlock(&dmp->dm_lock);
1114 	dd = ap->a_dvp->v_data;
1115 	de = vp->v_data;
1116 	if (de->de_cdp == NULL) {
1117 		TAILQ_REMOVE(&dd->de_dlist, de, de_list);
1118 		devfs_delete(dmp, de, 1);
1119 	} else {
1120 		de->de_flags |= DE_WHITEOUT;
1121 	}
1122 	sx_xunlock(&dmp->dm_lock);
1123 	return (0);
1124 }
1125 
1126 /*
1127  * Revoke is called on a tty when a terminal session ends.  The vnode
1128  * is orphaned by setting v_op to deadfs so we need to let go of it
1129  * as well so that we create a new one next time around.
1130  *
1131  */
1132 static int
1133 devfs_revoke(struct vop_revoke_args *ap)
1134 {
1135 	struct vnode *vp = ap->a_vp, *vp2;
1136 	struct cdev *dev;
1137 	struct cdev_priv *cdp;
1138 	struct devfs_dirent *de;
1139 	int i;
1140 
1141 	KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL"));
1142 
1143 	dev = vp->v_rdev;
1144 	cdp = dev->si_priv;
1145 
1146 	dev_lock();
1147 	cdp->cdp_inuse++;
1148 	dev_unlock();
1149 
1150 	vhold(vp);
1151 	vgone(vp);
1152 	vdrop(vp);
1153 
1154 	VOP_UNLOCK(vp,0);
1155  loop:
1156 	for (;;) {
1157 		mtx_lock(&devfs_de_interlock);
1158 		dev_lock();
1159 		vp2 = NULL;
1160 		for (i = 0; i <= cdp->cdp_maxdirent; i++) {
1161 			de = cdp->cdp_dirents[i];
1162 			if (de == NULL)
1163 				continue;
1164 
1165 			vp2 = de->de_vnode;
1166 			if (vp2 != NULL) {
1167 				dev_unlock();
1168 				VI_LOCK(vp2);
1169 				mtx_unlock(&devfs_de_interlock);
1170 				if (vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK,
1171 				    curthread))
1172 					goto loop;
1173 				vhold(vp2);
1174 				vgone(vp2);
1175 				vdrop(vp2);
1176 				vput(vp2);
1177 				break;
1178 			}
1179 		}
1180 		if (vp2 != NULL) {
1181 			continue;
1182 		}
1183 		dev_unlock();
1184 		mtx_unlock(&devfs_de_interlock);
1185 		break;
1186 	}
1187 	dev_lock();
1188 	cdp->cdp_inuse--;
1189 	if (!(cdp->cdp_flags & CDP_ACTIVE) && cdp->cdp_inuse == 0) {
1190 		TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
1191 		dev_unlock();
1192 		dev_rel(&cdp->cdp_c);
1193 	} else
1194 		dev_unlock();
1195 
1196 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1197 	return (0);
1198 }
1199 
1200 static int
1201 devfs_rioctl(struct vop_ioctl_args *ap)
1202 {
1203 	int error;
1204 	struct devfs_mount *dmp;
1205 
1206 	dmp = VFSTODEVFS(ap->a_vp->v_mount);
1207 	sx_xlock(&dmp->dm_lock);
1208 	DEVFS_DMP_HOLD(dmp);
1209 	devfs_populate(dmp);
1210 	if (DEVFS_DMP_DROP(dmp)) {
1211 		sx_xunlock(&dmp->dm_lock);
1212 		devfs_unmount_final(dmp);
1213 		return (ENOENT);
1214 	}
1215 	error = devfs_rules_ioctl(dmp, ap->a_command, ap->a_data, ap->a_td);
1216 	sx_xunlock(&dmp->dm_lock);
1217 	return (error);
1218 }
1219 
1220 static int
1221 devfs_rread(struct vop_read_args *ap)
1222 {
1223 
1224 	if (ap->a_vp->v_type != VDIR)
1225 		return (EINVAL);
1226 	return (VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, NULL, NULL, NULL));
1227 }
1228 
1229 static int
1230 devfs_setattr(struct vop_setattr_args *ap)
1231 {
1232 	struct devfs_dirent *de;
1233 	struct vattr *vap;
1234 	struct vnode *vp;
1235 	int c, error;
1236 	uid_t uid;
1237 	gid_t gid;
1238 
1239 	vap = ap->a_vap;
1240 	vp = ap->a_vp;
1241 	if ((vap->va_type != VNON) ||
1242 	    (vap->va_nlink != VNOVAL) ||
1243 	    (vap->va_fsid != VNOVAL) ||
1244 	    (vap->va_fileid != VNOVAL) ||
1245 	    (vap->va_blocksize != VNOVAL) ||
1246 	    (vap->va_flags != VNOVAL && vap->va_flags != 0) ||
1247 	    (vap->va_rdev != VNOVAL) ||
1248 	    ((int)vap->va_bytes != VNOVAL) ||
1249 	    (vap->va_gen != VNOVAL)) {
1250 		return (EINVAL);
1251 	}
1252 
1253 	de = vp->v_data;
1254 	if (vp->v_type == VDIR)
1255 		de = de->de_dir;
1256 
1257 	error = c = 0;
1258 	if (vap->va_uid == (uid_t)VNOVAL)
1259 		uid = de->de_uid;
1260 	else
1261 		uid = vap->va_uid;
1262 	if (vap->va_gid == (gid_t)VNOVAL)
1263 		gid = de->de_gid;
1264 	else
1265 		gid = vap->va_gid;
1266 	if (uid != de->de_uid || gid != de->de_gid) {
1267 		if ((ap->a_cred->cr_uid != de->de_uid) || uid != de->de_uid ||
1268 		    (gid != de->de_gid && !groupmember(gid, ap->a_cred))) {
1269 			error = priv_check(ap->a_td, PRIV_VFS_CHOWN);
1270 			if (error)
1271 				return (error);
1272 		}
1273 		de->de_uid = uid;
1274 		de->de_gid = gid;
1275 		c = 1;
1276 	}
1277 
1278 	if (vap->va_mode != (mode_t)VNOVAL) {
1279 		if (ap->a_cred->cr_uid != de->de_uid) {
1280 			error = priv_check(ap->a_td, PRIV_VFS_ADMIN);
1281 			if (error)
1282 				return (error);
1283 		}
1284 		de->de_mode = vap->va_mode;
1285 		c = 1;
1286 	}
1287 
1288 	if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
1289 		/* See the comment in ufs_vnops::ufs_setattr(). */
1290 		if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, ap->a_td)) &&
1291 		    ((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
1292 		    (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, ap->a_td))))
1293 			return (error);
1294 		if (vap->va_atime.tv_sec != VNOVAL) {
1295 			if (vp->v_type == VCHR)
1296 				vp->v_rdev->si_atime = vap->va_atime;
1297 			else
1298 				de->de_atime = vap->va_atime;
1299 		}
1300 		if (vap->va_mtime.tv_sec != VNOVAL) {
1301 			if (vp->v_type == VCHR)
1302 				vp->v_rdev->si_mtime = vap->va_mtime;
1303 			else
1304 				de->de_mtime = vap->va_mtime;
1305 		}
1306 		c = 1;
1307 	}
1308 
1309 	if (c) {
1310 		if (vp->v_type == VCHR)
1311 			vfs_timestamp(&vp->v_rdev->si_ctime);
1312 		else
1313 			vfs_timestamp(&de->de_mtime);
1314 	}
1315 	return (0);
1316 }
1317 
1318 #ifdef MAC
1319 static int
1320 devfs_setlabel(struct vop_setlabel_args *ap)
1321 {
1322 	struct vnode *vp;
1323 	struct devfs_dirent *de;
1324 
1325 	vp = ap->a_vp;
1326 	de = vp->v_data;
1327 
1328 	mac_vnode_relabel(ap->a_cred, vp, ap->a_label);
1329 	mac_devfs_update(vp->v_mount, de, vp);
1330 
1331 	return (0);
1332 }
1333 #endif
1334 
1335 static int
1336 devfs_stat_f(struct file *fp, struct stat *sb, struct ucred *cred, struct thread *td)
1337 {
1338 
1339 	return (vnops.fo_stat(fp, sb, cred, td));
1340 }
1341 
1342 static int
1343 devfs_symlink(struct vop_symlink_args *ap)
1344 {
1345 	int i, error;
1346 	struct devfs_dirent *dd;
1347 	struct devfs_dirent *de;
1348 	struct devfs_mount *dmp;
1349 	struct thread *td;
1350 
1351 	td = ap->a_cnp->cn_thread;
1352 	KASSERT(td == curthread, ("devfs_symlink: td != curthread"));
1353 
1354 	error = priv_check(td, PRIV_DEVFS_SYMLINK);
1355 	if (error)
1356 		return(error);
1357 	dmp = VFSTODEVFS(ap->a_dvp->v_mount);
1358 	dd = ap->a_dvp->v_data;
1359 	de = devfs_newdirent(ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen);
1360 	de->de_uid = 0;
1361 	de->de_gid = 0;
1362 	de->de_mode = 0755;
1363 	de->de_inode = alloc_unr(devfs_inos);
1364 	de->de_dirent->d_type = DT_LNK;
1365 	i = strlen(ap->a_target) + 1;
1366 	de->de_symlink = malloc(i, M_DEVFS, M_WAITOK);
1367 	bcopy(ap->a_target, de->de_symlink, i);
1368 	sx_xlock(&dmp->dm_lock);
1369 #ifdef MAC
1370 	mac_devfs_create_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de);
1371 #endif
1372 	TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
1373 	return (devfs_allocv(de, ap->a_dvp->v_mount, ap->a_vpp, td));
1374 }
1375 
1376 static int
1377 devfs_truncate_f(struct file *fp, off_t length, struct ucred *cred, struct thread *td)
1378 {
1379 
1380 	return (vnops.fo_truncate(fp, length, cred, td));
1381 }
1382 
1383 /* ARGSUSED */
1384 static int
1385 devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td)
1386 {
1387 	struct cdev *dev;
1388 	int error, ioflag, resid;
1389 	struct cdevsw *dsw;
1390 
1391 	error = devfs_fp_check(fp, &dev, &dsw);
1392 	if (error)
1393 		return (error);
1394 	KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
1395 	ioflag = fp->f_flag & (O_NONBLOCK | O_DIRECT | O_FSYNC);
1396 	if (ioflag & O_DIRECT)
1397 		ioflag |= IO_DIRECT;
1398 	if ((flags & FOF_OFFSET) == 0)
1399 		uio->uio_offset = fp->f_offset;
1400 
1401 	resid = uio->uio_resid;
1402 
1403 	error = dsw->d_write(dev, uio, ioflag);
1404 	if (uio->uio_resid != resid || (error == 0 && resid != 0)) {
1405 		vfs_timestamp(&dev->si_ctime);
1406 		dev->si_mtime = dev->si_ctime;
1407 	}
1408 	curthread->td_fpop = NULL;
1409 	dev_relthread(dev);
1410 
1411 	if ((flags & FOF_OFFSET) == 0)
1412 		fp->f_offset = uio->uio_offset;
1413 	fp->f_nextoff = uio->uio_offset;
1414 	return (error);
1415 }
1416 
1417 dev_t
1418 dev2udev(struct cdev *x)
1419 {
1420 	if (x == NULL)
1421 		return (NODEV);
1422 	return (x->si_priv->cdp_inode);
1423 }
1424 
1425 static struct fileops devfs_ops_f = {
1426 	.fo_read =	devfs_read_f,
1427 	.fo_write =	devfs_write_f,
1428 	.fo_truncate =	devfs_truncate_f,
1429 	.fo_ioctl =	devfs_ioctl_f,
1430 	.fo_poll =	devfs_poll_f,
1431 	.fo_kqfilter =	devfs_kqfilter_f,
1432 	.fo_stat =	devfs_stat_f,
1433 	.fo_close =	devfs_close_f,
1434 	.fo_flags =	DFLAG_PASSABLE | DFLAG_SEEKABLE
1435 };
1436 
1437 static struct vop_vector devfs_vnodeops = {
1438 	.vop_default =		&default_vnodeops,
1439 
1440 	.vop_access =		devfs_access,
1441 	.vop_getattr =		devfs_getattr,
1442 	.vop_ioctl =		devfs_rioctl,
1443 	.vop_lookup =		devfs_lookup,
1444 	.vop_mknod =		devfs_mknod,
1445 	.vop_pathconf =		devfs_pathconf,
1446 	.vop_read =		devfs_rread,
1447 	.vop_readdir =		devfs_readdir,
1448 	.vop_readlink =		devfs_readlink,
1449 	.vop_reclaim =		devfs_reclaim,
1450 	.vop_remove =		devfs_remove,
1451 	.vop_revoke =		devfs_revoke,
1452 	.vop_setattr =		devfs_setattr,
1453 #ifdef MAC
1454 	.vop_setlabel =		devfs_setlabel,
1455 #endif
1456 	.vop_symlink =		devfs_symlink,
1457 };
1458 
1459 static struct vop_vector devfs_specops = {
1460 	.vop_default =		&default_vnodeops,
1461 
1462 	.vop_access =		devfs_access,
1463 	.vop_advlock =		devfs_advlock,
1464 	.vop_bmap =		VOP_PANIC,
1465 	.vop_close =		devfs_close,
1466 	.vop_create =		VOP_PANIC,
1467 	.vop_fsync =		devfs_fsync,
1468 	.vop_getattr =		devfs_getattr,
1469 	.vop_lease =		VOP_NULL,
1470 	.vop_link =		VOP_PANIC,
1471 	.vop_mkdir =		VOP_PANIC,
1472 	.vop_mknod =		VOP_PANIC,
1473 	.vop_open =		devfs_open,
1474 	.vop_pathconf =		devfs_pathconf,
1475 	.vop_print =		devfs_print,
1476 	.vop_read =		VOP_PANIC,
1477 	.vop_readdir =		VOP_PANIC,
1478 	.vop_readlink =		VOP_PANIC,
1479 	.vop_reallocblks =	VOP_PANIC,
1480 	.vop_reclaim =		devfs_reclaim,
1481 	.vop_remove =		devfs_remove,
1482 	.vop_rename =		VOP_PANIC,
1483 	.vop_revoke =		devfs_revoke,
1484 	.vop_rmdir =		VOP_PANIC,
1485 	.vop_setattr =		devfs_setattr,
1486 #ifdef MAC
1487 	.vop_setlabel =		devfs_setlabel,
1488 #endif
1489 	.vop_strategy =		VOP_PANIC,
1490 	.vop_symlink =		VOP_PANIC,
1491 	.vop_write =		VOP_PANIC,
1492 };
1493 
1494 /*
1495  * Our calling convention to the device drivers used to be that we passed
1496  * vnode.h IO_* flags to read()/write(), but we're moving to fcntl.h O_
1497  * flags instead since that's what open(), close() and ioctl() takes and
1498  * we don't really want vnode.h in device drivers.
1499  * We solved the source compatibility by redefining some vnode flags to
1500  * be the same as the fcntl ones and by sending down the bitwise OR of
1501  * the respective fcntl/vnode flags.  These CTASSERTS make sure nobody
1502  * pulls the rug out under this.
1503  */
1504 CTASSERT(O_NONBLOCK == IO_NDELAY);
1505 CTASSERT(O_FSYNC == IO_SYNC);
1506