xref: /openbsd-src/sys/kern/spec_vnops.c (revision db3296cf5c1dd9058ceecc3a29fe4aaa0bd26000)
1 /*	$OpenBSD: spec_vnops.c,v 1.26 2003/06/02 23:28:11 millert Exp $	*/
2 /*	$NetBSD: spec_vnops.c,v 1.29 1996/04/22 01:42:38 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  *	@(#)spec_vnops.c	8.8 (Berkeley) 11/21/94
33  */
34 
35 #include <sys/param.h>
36 #include <sys/proc.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/conf.h>
40 #include <sys/buf.h>
41 #include <sys/mount.h>
42 #include <sys/namei.h>
43 #include <sys/vnode.h>
44 #include <sys/stat.h>
45 #include <sys/errno.h>
46 #include <sys/ioctl.h>
47 #include <sys/file.h>
48 #include <sys/disklabel.h>
49 #include <sys/lockf.h>
50 
51 #include <miscfs/specfs/specdev.h>
52 
53 #define v_lastr v_specinfo->si_lastr
54 
55 struct vnode *speclisth[SPECHSZ];
56 
57 /* symbolic sleep message strings for devices */
58 char	devopn[] = "devopn";
59 char	devio[] = "devio";
60 char	devwait[] = "devwait";
61 char	devin[] = "devin";
62 char	devout[] = "devout";
63 char	devioc[] = "devioc";
64 char	devcls[] = "devcls";
65 
66 int (**spec_vnodeop_p)(void *);
67 struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
68 	{ &vop_default_desc, vn_default_error },
69 	{ &vop_lookup_desc, spec_lookup },		/* lookup */
70 	{ &vop_create_desc, spec_create },		/* create */
71 	{ &vop_mknod_desc, spec_mknod },		/* mknod */
72 	{ &vop_open_desc, spec_open },			/* open */
73 	{ &vop_close_desc, spec_close },		/* close */
74 	{ &vop_access_desc, spec_access },		/* access */
75 	{ &vop_getattr_desc, spec_getattr },		/* getattr */
76 	{ &vop_setattr_desc, spec_setattr },		/* setattr */
77 	{ &vop_read_desc, spec_read },			/* read */
78 	{ &vop_write_desc, spec_write },		/* write */
79 	{ &vop_lease_desc, spec_lease_check },		/* lease */
80 	{ &vop_ioctl_desc, spec_ioctl },		/* ioctl */
81 	{ &vop_select_desc, spec_select },		/* select */
82 	{ &vop_kqfilter_desc, spec_kqfilter },		/* kqfilter */
83 	{ &vop_revoke_desc, spec_revoke },              /* revoke */
84 	{ &vop_fsync_desc, spec_fsync },		/* fsync */
85 	{ &vop_remove_desc, spec_remove },		/* remove */
86 	{ &vop_link_desc, spec_link },			/* link */
87 	{ &vop_rename_desc, spec_rename },		/* rename */
88 	{ &vop_mkdir_desc, spec_mkdir },		/* mkdir */
89 	{ &vop_rmdir_desc, spec_rmdir },		/* rmdir */
90 	{ &vop_symlink_desc, spec_symlink },		/* symlink */
91 	{ &vop_readdir_desc, spec_readdir },		/* readdir */
92 	{ &vop_readlink_desc, spec_readlink },		/* readlink */
93 	{ &vop_abortop_desc, spec_abortop },		/* abortop */
94 	{ &vop_inactive_desc, spec_inactive },		/* inactive */
95 	{ &vop_reclaim_desc, spec_reclaim },		/* reclaim */
96 	{ &vop_lock_desc, spec_lock },			/* lock */
97 	{ &vop_unlock_desc, spec_unlock },		/* unlock */
98 	{ &vop_bmap_desc, spec_bmap },			/* bmap */
99 	{ &vop_strategy_desc, spec_strategy },		/* strategy */
100 	{ &vop_print_desc, spec_print },		/* print */
101 	{ &vop_islocked_desc, spec_islocked },		/* islocked */
102 	{ &vop_pathconf_desc, spec_pathconf },		/* pathconf */
103 	{ &vop_advlock_desc, spec_advlock },		/* advlock */
104 	{ &vop_bwrite_desc, spec_bwrite },		/* bwrite */
105 	{ NULL, NULL }
106 };
107 struct vnodeopv_desc spec_vnodeop_opv_desc =
108 	{ &spec_vnodeop_p, spec_vnodeop_entries };
109 
110 int
111 spec_vnoperate(void *v)
112 {
113 	struct vop_generic_args *ap = v;
114 
115 	return (VOCALL(spec_vnodeop_p, ap->a_desc->vdesc_offset, ap));
116 }
117 
118 /*
119  * Trivial lookup routine that always fails.
120  */
121 int
122 spec_lookup(v)
123 	void *v;
124 {
125 	struct vop_lookup_args /* {
126 		struct vnode *a_dvp;
127 		struct vnode **a_vpp;
128 		struct componentname *a_cnp;
129 	} */ *ap = v;
130 
131 	*ap->a_vpp = NULL;
132 	return (ENOTDIR);
133 }
134 
135 /*
136  * Open a special file.
137  */
138 /* ARGSUSED */
139 int
140 spec_open(v)
141 	void *v;
142 {
143 	struct vop_open_args /* {
144 		struct vnode *a_vp;
145 		int  a_mode;
146 		struct ucred *a_cred;
147 		struct proc *a_p;
148 	} */ *ap = v;
149 	struct proc *p = ap->a_p;
150 	struct vnode *vp = ap->a_vp;
151 	struct vnode *bvp;
152 	dev_t bdev;
153 	dev_t dev = (dev_t)vp->v_rdev;
154 	register int maj = major(dev);
155 	int error;
156 
157 	/*
158 	 * Don't allow open if fs is mounted -nodev.
159 	 */
160 	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV))
161 		return (ENXIO);
162 
163 	switch (vp->v_type) {
164 
165 	case VCHR:
166 		if ((u_int)maj >= nchrdev)
167 			return (ENXIO);
168 		if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) {
169 			/*
170 			 * When running in very secure mode, do not allow
171 			 * opens for writing of any disk character devices.
172 			 */
173 			if (securelevel >= 2 && cdevsw[maj].d_type == D_DISK)
174 				return (EPERM);
175 			/*
176 			 * When running in secure mode, do not allow opens
177 			 * for writing of /dev/mem, /dev/kmem, or character
178 			 * devices whose corresponding block devices are
179 			 * currently mounted.
180 			 */
181 			if (securelevel >= 1) {
182 				if ((bdev = chrtoblk(dev)) != NODEV &&
183 				    vfinddev(bdev, VBLK, &bvp) &&
184 				    bvp->v_usecount > 0 &&
185 				    (error = vfs_mountedon(bvp)))
186 					return (error);
187 				if (iskmemdev(dev))
188 					return (EPERM);
189 			}
190 		}
191 		if (cdevsw[maj].d_type == D_TTY)
192 			vp->v_flag |= VISTTY;
193 		VOP_UNLOCK(vp, 0, p);
194 		error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, ap->a_p);
195 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
196 		return (error);
197 
198 	case VBLK:
199 		if ((u_int)maj >= nblkdev)
200 			return (ENXIO);
201 		/*
202 		 * When running in very secure mode, do not allow
203 		 * opens for writing of any disk block devices.
204 		 */
205 		if (securelevel >= 2 && ap->a_cred != FSCRED &&
206 		    (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK)
207 			return (EPERM);
208 		/*
209 		 * Do not allow opens of block devices that are
210 		 * currently mounted.
211 		 */
212 		if ((error = vfs_mountedon(vp)) != 0)
213 			return (error);
214 		return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, ap->a_p));
215 	case VNON:
216 	case VLNK:
217 	case VDIR:
218 	case VREG:
219 	case VBAD:
220 	case VFIFO:
221 	case VSOCK:
222 		break;
223 	}
224 	return (0);
225 }
226 
227 /*
228  * Vnode op for read
229  */
230 /* ARGSUSED */
231 int
232 spec_read(v)
233 	void *v;
234 {
235 	struct vop_read_args /* {
236 		struct vnode *a_vp;
237 		struct uio *a_uio;
238 		int  a_ioflag;
239 		struct ucred *a_cred;
240 	} */ *ap = v;
241 	register struct vnode *vp = ap->a_vp;
242 	register struct uio *uio = ap->a_uio;
243  	struct proc *p = uio->uio_procp;
244 	struct buf *bp;
245 	daddr_t bn, nextbn;
246 	long bsize, bscale, ssize;
247 	struct partinfo dpart;
248 	int n, on, majordev;
249 	int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *);
250 	int error = 0;
251 
252 #ifdef DIAGNOSTIC
253 	if (uio->uio_rw != UIO_READ)
254 		panic("spec_read mode");
255 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
256 		panic("spec_read proc");
257 #endif
258 	if (uio->uio_resid == 0)
259 		return (0);
260 
261 	switch (vp->v_type) {
262 
263 	case VCHR:
264 		VOP_UNLOCK(vp, 0, p);
265 		error = (*cdevsw[major(vp->v_rdev)].d_read)
266 			(vp->v_rdev, uio, ap->a_ioflag);
267 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
268 		return (error);
269 
270 	case VBLK:
271 		if (uio->uio_resid == 0)
272 			return (0);
273 		if (uio->uio_offset < 0)
274 			return (EINVAL);
275 		bsize = BLKDEV_IOSIZE;
276 		ssize = DEV_BSIZE;
277 		if ((majordev = major(vp->v_rdev)) < nblkdev &&
278 		    (ioctl = bdevsw[majordev].d_ioctl) != NULL &&
279 		    (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) {
280 			if (dpart.part->p_fstype == FS_BSDFFS &&
281 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
282 				bsize = dpart.part->p_frag *
283 				    dpart.part->p_fsize;
284 			if (dpart.disklab->d_secsize != 0)
285 				ssize = dpart.disklab->d_secsize;
286 		}
287 		bscale = bsize / ssize;
288 		do {
289 			bn = (uio->uio_offset / ssize) &~ (bscale - 1);
290 			on = uio->uio_offset % bsize;
291 			n = min((unsigned)(bsize - on), uio->uio_resid);
292 			if (vp->v_lastr + bscale == bn) {
293 				nextbn = bn + bscale;
294 				error = breadn(vp, bn, (int)bsize, &nextbn,
295 					(int *)&bsize, 1, NOCRED, &bp);
296 			} else
297 				error = bread(vp, bn, (int)bsize, NOCRED, &bp);
298 			vp->v_lastr = bn;
299 			n = min(n, bsize - bp->b_resid);
300 			if (error) {
301 				brelse(bp);
302 				return (error);
303 			}
304 			error = uiomove((char *)bp->b_data + on, n, uio);
305 			brelse(bp);
306 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
307 		return (error);
308 
309 	default:
310 		panic("spec_read type");
311 	}
312 	/* NOTREACHED */
313 }
314 
315 int
316 spec_inactive(v)
317 	void *v;
318 {
319 	struct vop_inactive_args /* {
320 		struct vnode *a_vp;
321 		struct proc *a_p;
322 	} */ *ap = v;
323 
324 	VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
325 	return (0);
326 }
327 
328 /*
329  * Vnode op for write
330  */
331 /* ARGSUSED */
332 int
333 spec_write(v)
334 	void *v;
335 {
336 	struct vop_write_args /* {
337 		struct vnode *a_vp;
338 		struct uio *a_uio;
339 		int  a_ioflag;
340 		struct ucred *a_cred;
341 	} */ *ap = v;
342 	register struct vnode *vp = ap->a_vp;
343 	register struct uio *uio = ap->a_uio;
344 	struct proc *p = uio->uio_procp;
345 	struct buf *bp;
346 	daddr_t bn;
347 	long bsize, bscale, ssize;
348 	struct partinfo dpart;
349 	int n, on, majordev;
350 	int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *);
351 	int error = 0;
352 
353 #ifdef DIAGNOSTIC
354 	if (uio->uio_rw != UIO_WRITE)
355 		panic("spec_write mode");
356 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
357 		panic("spec_write proc");
358 #endif
359 
360 	switch (vp->v_type) {
361 
362 	case VCHR:
363 		VOP_UNLOCK(vp, 0, p);
364 		error = (*cdevsw[major(vp->v_rdev)].d_write)
365 			(vp->v_rdev, uio, ap->a_ioflag);
366 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
367 		return (error);
368 
369 	case VBLK:
370 		if (uio->uio_resid == 0)
371 			return (0);
372 		if (uio->uio_offset < 0)
373 			return (EINVAL);
374 		bsize = BLKDEV_IOSIZE;
375 		ssize = DEV_BSIZE;
376 		if ((majordev = major(vp->v_rdev)) < nblkdev &&
377 		    (ioctl = bdevsw[majordev].d_ioctl) != NULL &&
378 		    (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) {
379 			if (dpart.part->p_fstype == FS_BSDFFS &&
380 			    dpart.part->p_frag != 0 && dpart.part->p_fsize != 0)
381 				bsize = dpart.part->p_frag *
382 				    dpart.part->p_fsize;
383 			if (dpart.disklab->d_secsize != 0)
384 				ssize = dpart.disklab->d_secsize;
385 		}
386 		bscale = bsize / ssize;
387 		do {
388 			bn = (uio->uio_offset / ssize) &~ (bscale - 1);
389 			on = uio->uio_offset % bsize;
390 			n = min((unsigned)(bsize - on), uio->uio_resid);
391 			if (n == bsize)
392 				bp = getblk(vp, bn, bsize, 0, 0);
393 			else
394 				error = bread(vp, bn, bsize, NOCRED, &bp);
395 			n = min(n, bsize - bp->b_resid);
396 			if (error) {
397 				brelse(bp);
398 				return (error);
399 			}
400 			error = uiomove((char *)bp->b_data + on, n, uio);
401 			if (n + on == bsize)
402 				bawrite(bp);
403 			else
404 				bdwrite(bp);
405 		} while (error == 0 && uio->uio_resid > 0 && n != 0);
406 		return (error);
407 
408 	default:
409 		panic("spec_write type");
410 	}
411 	/* NOTREACHED */
412 }
413 
414 /*
415  * Device ioctl operation.
416  */
417 /* ARGSUSED */
418 int
419 spec_ioctl(v)
420 	void *v;
421 {
422 	struct vop_ioctl_args /* {
423 		struct vnode *a_vp;
424 		u_long a_command;
425 		caddr_t  a_data;
426 		int  a_fflag;
427 		struct ucred *a_cred;
428 		struct proc *a_p;
429 	} */ *ap = v;
430 	dev_t dev = ap->a_vp->v_rdev;
431 	int maj = major(dev);
432 
433 	switch (ap->a_vp->v_type) {
434 
435 	case VCHR:
436 		return ((*cdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data,
437 		    ap->a_fflag, ap->a_p));
438 
439 	case VBLK:
440 		if (ap->a_command == 0 && (long)ap->a_data == B_TAPE)
441 			return ((bdevsw[maj].d_type == D_TAPE) ? 0 : 1);
442 		return ((*bdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data,
443 		    ap->a_fflag, ap->a_p));
444 
445 	default:
446 		panic("spec_ioctl");
447 		/* NOTREACHED */
448 	}
449 }
450 
451 /* ARGSUSED */
452 int
453 spec_select(v)
454 	void *v;
455 {
456 	struct vop_select_args /* {
457 		struct vnode *a_vp;
458 		int  a_which;
459 		int  a_fflags;
460 		struct ucred *a_cred;
461 		struct proc *a_p;
462 	} */ *ap = v;
463 	register dev_t dev;
464 
465 	switch (ap->a_vp->v_type) {
466 
467 	default:
468 		return (1);		/* XXX */
469 
470 	case VCHR:
471 		dev = ap->a_vp->v_rdev;
472 		return (*cdevsw[major(dev)].d_select)(dev, ap->a_which, ap->a_p);
473 	}
474 }
475 /* ARGSUSED */
476 int
477 spec_kqfilter(v)
478 	void *v;
479 {
480 	struct vop_kqfilter_args /* {
481 		struct vnode *a_vp;
482 		struct knote *a_kn;
483 	} */ *ap = v;
484 
485 	dev_t dev;
486 
487 	dev = ap->a_vp->v_rdev;
488 	if (cdevsw[major(dev)].d_type & D_KQFILTER)
489 		return (*cdevsw[major(dev)].d_kqfilter)(dev, ap->a_kn);
490 	return (1);
491 }
492 
493 /*
494  * Synch buffers associated with a block device
495  */
496 /* ARGSUSED */
497 int
498 spec_fsync(v)
499 	void *v;
500 {
501 	struct vop_fsync_args /* {
502 		struct vnode *a_vp;
503 		struct ucred *a_cred;
504 		int  a_waitfor;
505 		struct proc *a_p;
506 	} */ *ap = v;
507 	register struct vnode *vp = ap->a_vp;
508 	register struct buf *bp;
509 	struct buf *nbp;
510 	int s;
511 
512 	if (vp->v_type == VCHR)
513 		return (0);
514 	/*
515 	 * Flush all dirty buffers associated with a block device.
516 	 */
517 loop:
518 	s = splbio();
519 	for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
520 		nbp = bp->b_vnbufs.le_next;
521 		if ((bp->b_flags & B_BUSY))
522 			continue;
523 		if ((bp->b_flags & B_DELWRI) == 0)
524 			panic("spec_fsync: not dirty");
525 		bremfree(bp);
526 		bp->b_flags |= B_BUSY;
527 		splx(s);
528 		bawrite(bp);
529 		goto loop;
530 	}
531 	if (ap->a_waitfor == MNT_WAIT) {
532 		vwaitforio (vp, 0, "spec_fsync", 0);
533 
534 #ifdef DIAGNOSTIC
535 		if (vp->v_dirtyblkhd.lh_first) {
536 			splx(s);
537 			vprint("spec_fsync: dirty", vp);
538 			goto loop;
539 		}
540 #endif
541 	}
542 	splx(s);
543 	return (0);
544 }
545 
546 int
547 spec_strategy(v)
548 	void *v;
549 {
550 	struct vop_strategy_args /* {
551 		struct buf *a_bp;
552 	} */ *ap = v;
553 	struct buf *bp = ap->a_bp;
554 	int maj = major(bp->b_dev);
555 
556 	if (LIST_FIRST(&bp->b_dep) != NULL)
557 		buf_start(bp);
558 
559 	(*bdevsw[maj].d_strategy)(bp);
560 	return (0);
561 }
562 
563 /*
564  * This is a noop, simply returning what one has been given.
565  */
566 int
567 spec_bmap(v)
568 	void *v;
569 {
570 	struct vop_bmap_args /* {
571 		struct vnode *a_vp;
572 		daddr_t  a_bn;
573 		struct vnode **a_vpp;
574 		daddr_t *a_bnp;
575 		int *a_runp;
576 	} */ *ap = v;
577 
578 	if (ap->a_vpp != NULL)
579 		*ap->a_vpp = ap->a_vp;
580 	if (ap->a_bnp != NULL)
581 		*ap->a_bnp = ap->a_bn;
582 	if (ap->a_runp != NULL)
583 		*ap->a_runp = 0;
584 
585 	return (0);
586 }
587 
588 /*
589  * Device close routine
590  */
591 /* ARGSUSED */
592 int
593 spec_close(v)
594 	void *v;
595 {
596 	struct vop_close_args /* {
597 		struct vnode *a_vp;
598 		int  a_fflag;
599 		struct ucred *a_cred;
600 		struct proc *a_p;
601 	} */ *ap = v;
602 	register struct vnode *vp = ap->a_vp;
603 	dev_t dev = vp->v_rdev;
604 	int (*devclose)(dev_t, int, int, struct proc *);
605 	int mode, error;
606 
607 	switch (vp->v_type) {
608 
609 	case VCHR:
610 		/*
611 		 * Hack: a tty device that is a controlling terminal
612 		 * has a reference from the session structure.
613 		 * We cannot easily tell that a character device is
614 		 * a controlling terminal, unless it is the closing
615 		 * process' controlling terminal.  In that case,
616 		 * if the reference count is 2 (this last descriptor
617 		 * plus the session), release the reference from the session.
618 		 */
619 		if (vcount(vp) == 2 && ap->a_p &&
620 		    vp == ap->a_p->p_session->s_ttyvp) {
621 			vrele(vp);
622 			ap->a_p->p_session->s_ttyvp = NULL;
623 		}
624 		/*
625 		 * If the vnode is locked, then we are in the midst
626 		 * of forcably closing the device, otherwise we only
627 		 * close on last reference.
628 		 */
629 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
630 			return (0);
631 		devclose = cdevsw[major(dev)].d_close;
632 		mode = S_IFCHR;
633 		break;
634 
635 	case VBLK:
636 		/*
637 		 * On last close of a block device (that isn't mounted)
638 		 * we must invalidate any in core blocks, so that
639 		 * we can, for instance, change floppy disks.
640 		 */
641 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p);
642 		error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
643 		VOP_UNLOCK(vp, 0, ap->a_p);
644 		if (error)
645 			return (error);
646 		/*
647 		 * We do not want to really close the device if it
648 		 * is still in use unless we are trying to close it
649 		 * forcibly. Since every use (buffer, vnode, swap, cmap)
650 		 * holds a reference to the vnode, and because we mark
651 		 * any other vnodes that alias this device, when the
652 		 * sum of the reference counts on all the aliased
653 		 * vnodes descends to one, we are on last close.
654 		 */
655 		if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0)
656 			return (0);
657 		devclose = bdevsw[major(dev)].d_close;
658 		mode = S_IFBLK;
659 		break;
660 
661 	default:
662 		panic("spec_close: not special");
663 	}
664 
665 	return ((*devclose)(dev, ap->a_fflag, mode, ap->a_p));
666 }
667 
668 /*
669  * Print out the contents of a special device vnode.
670  */
671 int
672 spec_print(v)
673 	void *v;
674 {
675 	struct vop_print_args /* {
676 		struct vnode *a_vp;
677 	} */ *ap = v;
678 
679 	printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev),
680 		minor(ap->a_vp->v_rdev));
681 	return 0;
682 }
683 
684 /*
685  * Return POSIX pathconf information applicable to special devices.
686  */
687 int
688 spec_pathconf(v)
689 	void *v;
690 {
691 	struct vop_pathconf_args /* {
692 		struct vnode *a_vp;
693 		int a_name;
694 		register_t *a_retval;
695 	} */ *ap = v;
696 
697 	switch (ap->a_name) {
698 	case _PC_LINK_MAX:
699 		*ap->a_retval = LINK_MAX;
700 		return (0);
701 	case _PC_MAX_CANON:
702 		*ap->a_retval = MAX_CANON;
703 		return (0);
704 	case _PC_MAX_INPUT:
705 		*ap->a_retval = MAX_INPUT;
706 		return (0);
707 	case _PC_PIPE_BUF:
708 		*ap->a_retval = PIPE_BUF;
709 		return (0);
710 	case _PC_CHOWN_RESTRICTED:
711 		*ap->a_retval = 1;
712 		return (0);
713 	case _PC_VDISABLE:
714 		*ap->a_retval = _POSIX_VDISABLE;
715 		return (0);
716 	default:
717 		return (EINVAL);
718 	}
719 	/* NOTREACHED */
720 }
721 
722 /*
723  * Special device advisory byte-level locks.
724  */
725 /* ARGSUSED */
726 int
727 spec_advlock(v)
728 	void *v;
729 {
730 	struct vop_advlock_args /* {
731 		struct vnodeop_desc *a_desc;
732 		struct vnode *a_vp;
733 		caddr_t  a_id;
734 		int  a_op;
735 		struct flock *a_fl;
736 		int  a_flags;
737 	} */ *ap = v;
738 	register struct vnode *vp = ap->a_vp;
739 
740 	return (lf_advlock(&vp->v_speclockf, (off_t)0, ap->a_id,
741 		ap->a_op, ap->a_fl, ap->a_flags));
742 }
743 
744 /*
745  * Special device failed operation
746  */
747 /*ARGSUSED*/
748 int
749 spec_ebadf(v)
750 	void *v;
751 {
752 
753 	return (EBADF);
754 }
755 
756 /*
757  * Special device bad operation
758  */
759 /*ARGSUSED*/
760 int
761 spec_badop(v)
762 	void *v;
763 {
764 
765 	panic("spec_badop called");
766 	/* NOTREACHED */
767 }
768