1 /* $NetBSD: spec_vnops.c,v 1.64 2002/09/06 13:18:43 gehenna Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)spec_vnops.c 8.15 (Berkeley) 7/14/95 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: spec_vnops.c,v 1.64 2002/09/06 13:18:43 gehenna Exp $"); 40 41 #include <sys/param.h> 42 #include <sys/proc.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/conf.h> 46 #include <sys/buf.h> 47 #include <sys/mount.h> 48 #include <sys/namei.h> 49 #include <sys/vnode.h> 50 #include <sys/stat.h> 51 #include <sys/errno.h> 52 #include <sys/ioctl.h> 53 #include <sys/file.h> 54 #include <sys/disklabel.h> 55 #include <sys/lockf.h> 56 57 #include <miscfs/genfs/genfs.h> 58 #include <miscfs/specfs/specdev.h> 59 60 /* symbolic sleep message strings for devices */ 61 const char devopn[] = "devopn"; 62 const char devio[] = "devio"; 63 const char devwait[] = "devwait"; 64 const char devin[] = "devin"; 65 const char devout[] = "devout"; 66 const char devioc[] = "devioc"; 67 const char devcls[] = "devcls"; 68 69 struct vnode *speclisth[SPECHSZ]; 70 71 /* 72 * This vnode operations vector is used for two things only: 73 * - special device nodes created from whole cloth by the kernel. 74 * - as a temporary vnodeops replacement for vnodes which were found to 75 * be aliased by callers of checkalias(). 76 * For the ops vector for vnodes built from special devices found in a 77 * filesystem, see (e.g) ffs_specop_entries[] in ffs_vnops.c or the 78 * equivalent for other filesystems. 79 */ 80 81 int (**spec_vnodeop_p) __P((void *)); 82 const struct vnodeopv_entry_desc spec_vnodeop_entries[] = { 83 { &vop_default_desc, vn_default_error }, 84 { &vop_lookup_desc, spec_lookup }, /* lookup */ 85 { &vop_create_desc, spec_create }, /* create */ 86 { &vop_mknod_desc, spec_mknod }, /* mknod */ 87 { &vop_open_desc, spec_open }, /* open */ 88 { &vop_close_desc, spec_close }, /* close */ 89 { &vop_access_desc, spec_access }, /* access */ 90 { &vop_getattr_desc, spec_getattr }, /* getattr */ 91 { &vop_setattr_desc, spec_setattr }, /* setattr */ 92 { &vop_read_desc, spec_read }, /* read */ 93 { &vop_write_desc, spec_write }, /* write */ 94 { &vop_lease_desc, spec_lease_check }, /* lease */ 95 { &vop_fcntl_desc, spec_fcntl }, /* fcntl */ 96 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 97 { &vop_poll_desc, spec_poll }, /* poll */ 98 { &vop_revoke_desc, spec_revoke }, /* revoke */ 99 { &vop_mmap_desc, spec_mmap }, /* mmap */ 100 { &vop_fsync_desc, spec_fsync }, /* fsync */ 101 { &vop_seek_desc, spec_seek }, /* seek */ 102 { &vop_remove_desc, spec_remove }, /* remove */ 103 { &vop_link_desc, spec_link }, /* link */ 104 { &vop_rename_desc, spec_rename }, /* rename */ 105 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */ 106 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */ 107 { &vop_symlink_desc, spec_symlink }, /* symlink */ 108 { &vop_readdir_desc, spec_readdir }, /* readdir */ 109 { &vop_readlink_desc, spec_readlink }, /* readlink */ 110 { &vop_abortop_desc, spec_abortop }, /* abortop */ 111 { &vop_inactive_desc, spec_inactive }, /* inactive */ 112 { &vop_reclaim_desc, spec_reclaim }, /* reclaim */ 113 { &vop_lock_desc, spec_lock }, /* lock */ 114 { &vop_unlock_desc, spec_unlock }, /* unlock */ 115 { &vop_bmap_desc, spec_bmap }, /* bmap */ 116 { &vop_strategy_desc, spec_strategy }, /* strategy */ 117 { &vop_print_desc, spec_print }, /* print */ 118 { &vop_islocked_desc, spec_islocked }, /* islocked */ 119 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ 120 { &vop_advlock_desc, spec_advlock }, /* advlock */ 121 { &vop_blkatoff_desc, spec_blkatoff }, /* blkatoff */ 122 { &vop_valloc_desc, spec_valloc }, /* valloc */ 123 { &vop_vfree_desc, spec_vfree }, /* vfree */ 124 { &vop_truncate_desc, spec_truncate }, /* truncate */ 125 { &vop_update_desc, spec_update }, /* update */ 126 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */ 127 { &vop_getpages_desc, spec_getpages }, /* getpages */ 128 { &vop_putpages_desc, spec_putpages }, /* putpages */ 129 { NULL, NULL } 130 }; 131 const struct vnodeopv_desc spec_vnodeop_opv_desc = 132 { &spec_vnodeop_p, spec_vnodeop_entries }; 133 134 /* 135 * Trivial lookup routine that always fails. 136 */ 137 int 138 spec_lookup(v) 139 void *v; 140 { 141 struct vop_lookup_args /* { 142 struct vnode *a_dvp; 143 struct vnode **a_vpp; 144 struct componentname *a_cnp; 145 } */ *ap = v; 146 147 *ap->a_vpp = NULL; 148 return (ENOTDIR); 149 } 150 151 /* 152 * Open a special file. 153 */ 154 /* ARGSUSED */ 155 int 156 spec_open(v) 157 void *v; 158 { 159 struct vop_open_args /* { 160 struct vnode *a_vp; 161 int a_mode; 162 struct ucred *a_cred; 163 struct proc *a_p; 164 } */ *ap = v; 165 struct proc *p = ap->a_p; 166 struct vnode *bvp, *vp = ap->a_vp; 167 const struct bdevsw *bdev; 168 const struct cdevsw *cdev; 169 dev_t blkdev, dev = (dev_t)vp->v_rdev; 170 int error; 171 struct partinfo pi; 172 173 /* 174 * Don't allow open if fs is mounted -nodev. 175 */ 176 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) 177 return (ENXIO); 178 179 switch (vp->v_type) { 180 181 case VCHR: 182 cdev = cdevsw_lookup(dev); 183 if (cdev == NULL) 184 return (ENXIO); 185 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) { 186 /* 187 * When running in very secure mode, do not allow 188 * opens for writing of any disk character devices. 189 */ 190 if (securelevel >= 2 && cdev->d_type == D_DISK) 191 return (EPERM); 192 /* 193 * When running in secure mode, do not allow opens 194 * for writing of /dev/mem, /dev/kmem, or character 195 * devices whose corresponding block devices are 196 * currently mounted. 197 */ 198 if (securelevel >= 1) { 199 blkdev = devsw_chr2blk(dev); 200 if (blkdev != (dev_t)NODEV && 201 vfinddev(blkdev, VBLK, &bvp) && 202 (error = vfs_mountedon(bvp))) 203 return (error); 204 if (iskmemdev(dev)) 205 return (EPERM); 206 } 207 } 208 if (cdev->d_type == D_TTY) 209 vp->v_flag |= VISTTY; 210 VOP_UNLOCK(vp, 0); 211 error = (*cdev->d_open)(dev, ap->a_mode, S_IFCHR, p); 212 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 213 return (error); 214 215 case VBLK: 216 bdev = bdevsw_lookup(dev); 217 if (bdev == NULL) 218 return (ENXIO); 219 /* 220 * When running in very secure mode, do not allow 221 * opens for writing of any disk block devices. 222 */ 223 if (securelevel >= 2 && ap->a_cred != FSCRED && 224 (ap->a_mode & FWRITE) && bdev->d_type == D_DISK) 225 return (EPERM); 226 /* 227 * Do not allow opens of block devices that are 228 * currently mounted. 229 */ 230 if ((error = vfs_mountedon(vp)) != 0) 231 return (error); 232 error = (*bdev->d_open)(dev, ap->a_mode, S_IFBLK, p); 233 if (error) { 234 return error; 235 } 236 error = (*bdev->d_ioctl)(vp->v_rdev, 237 DIOCGPART, (caddr_t)&pi, FREAD, curproc); 238 if (error == 0) { 239 vp->v_size = (voff_t)pi.disklab->d_secsize * 240 pi.part->p_size; 241 } 242 return 0; 243 244 case VNON: 245 case VLNK: 246 case VDIR: 247 case VREG: 248 case VBAD: 249 case VFIFO: 250 case VSOCK: 251 break; 252 } 253 return (0); 254 } 255 256 /* 257 * Vnode op for read 258 */ 259 /* ARGSUSED */ 260 int 261 spec_read(v) 262 void *v; 263 { 264 struct vop_read_args /* { 265 struct vnode *a_vp; 266 struct uio *a_uio; 267 int a_ioflag; 268 struct ucred *a_cred; 269 } */ *ap = v; 270 struct vnode *vp = ap->a_vp; 271 struct uio *uio = ap->a_uio; 272 struct proc *p = uio->uio_procp; 273 struct buf *bp; 274 const struct bdevsw *bdev; 275 const struct cdevsw *cdev; 276 daddr_t bn; 277 int bsize, bscale; 278 struct partinfo dpart; 279 int n, on; 280 int error = 0; 281 282 #ifdef DIAGNOSTIC 283 if (uio->uio_rw != UIO_READ) 284 panic("spec_read mode"); 285 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 286 panic("spec_read proc"); 287 #endif 288 if (uio->uio_resid == 0) 289 return (0); 290 291 switch (vp->v_type) { 292 293 case VCHR: 294 VOP_UNLOCK(vp, 0); 295 cdev = cdevsw_lookup(vp->v_rdev); 296 if (cdev != NULL) 297 error = (*cdev->d_read)(vp->v_rdev, uio, ap->a_ioflag); 298 else 299 error = ENXIO; 300 vn_lock(vp, LK_SHARED | LK_RETRY); 301 return (error); 302 303 case VBLK: 304 if (uio->uio_offset < 0) 305 return (EINVAL); 306 bsize = BLKDEV_IOSIZE; 307 bdev = bdevsw_lookup(vp->v_rdev); 308 if (bdev != NULL && 309 (*bdev->d_ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, 310 FREAD, p) == 0) { 311 if (dpart.part->p_fstype == FS_BSDFFS && 312 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 313 bsize = dpart.part->p_frag * 314 dpart.part->p_fsize; 315 } 316 bscale = bsize >> DEV_BSHIFT; 317 do { 318 bn = (uio->uio_offset >> DEV_BSHIFT) &~ (bscale - 1); 319 on = uio->uio_offset % bsize; 320 n = min((unsigned)(bsize - on), uio->uio_resid); 321 error = bread(vp, bn, bsize, NOCRED, &bp); 322 n = min(n, bsize - bp->b_resid); 323 if (error) { 324 brelse(bp); 325 return (error); 326 } 327 error = uiomove((char *)bp->b_data + on, n, uio); 328 brelse(bp); 329 } while (error == 0 && uio->uio_resid > 0 && n != 0); 330 return (error); 331 332 default: 333 panic("spec_read type"); 334 } 335 /* NOTREACHED */ 336 } 337 338 /* 339 * Vnode op for write 340 */ 341 /* ARGSUSED */ 342 int 343 spec_write(v) 344 void *v; 345 { 346 struct vop_write_args /* { 347 struct vnode *a_vp; 348 struct uio *a_uio; 349 int a_ioflag; 350 struct ucred *a_cred; 351 } */ *ap = v; 352 struct vnode *vp = ap->a_vp; 353 struct uio *uio = ap->a_uio; 354 struct proc *p = uio->uio_procp; 355 struct buf *bp; 356 const struct bdevsw *bdev; 357 const struct cdevsw *cdev; 358 daddr_t bn; 359 int bsize, bscale; 360 struct partinfo dpart; 361 int n, on; 362 int error = 0; 363 364 #ifdef DIAGNOSTIC 365 if (uio->uio_rw != UIO_WRITE) 366 panic("spec_write mode"); 367 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 368 panic("spec_write proc"); 369 #endif 370 371 switch (vp->v_type) { 372 373 case VCHR: 374 VOP_UNLOCK(vp, 0); 375 cdev = cdevsw_lookup(vp->v_rdev); 376 if (cdev != NULL) 377 error = (*cdev->d_write)(vp->v_rdev, uio, ap->a_ioflag); 378 else 379 error = ENXIO; 380 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 381 return (error); 382 383 case VBLK: 384 if (uio->uio_resid == 0) 385 return (0); 386 if (uio->uio_offset < 0) 387 return (EINVAL); 388 bsize = BLKDEV_IOSIZE; 389 bdev = bdevsw_lookup(vp->v_rdev); 390 if (bdev != NULL && 391 (*bdev->d_ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, 392 FREAD, p) == 0) { 393 if (dpart.part->p_fstype == FS_BSDFFS && 394 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 395 bsize = dpart.part->p_frag * 396 dpart.part->p_fsize; 397 } 398 bscale = bsize >> DEV_BSHIFT; 399 do { 400 bn = (uio->uio_offset >> DEV_BSHIFT) &~ (bscale - 1); 401 on = uio->uio_offset % bsize; 402 n = min((unsigned)(bsize - on), uio->uio_resid); 403 if (n == bsize) 404 bp = getblk(vp, bn, bsize, 0, 0); 405 else 406 error = bread(vp, bn, bsize, NOCRED, &bp); 407 if (error) { 408 brelse(bp); 409 return (error); 410 } 411 n = min(n, bsize - bp->b_resid); 412 error = uiomove((char *)bp->b_data + on, n, uio); 413 if (error) 414 brelse(bp); 415 else { 416 if (n + on == bsize) 417 bawrite(bp); 418 else 419 bdwrite(bp); 420 if (bp->b_flags & B_ERROR) 421 error = bp->b_error; 422 } 423 } while (error == 0 && uio->uio_resid > 0 && n != 0); 424 return (error); 425 426 default: 427 panic("spec_write type"); 428 } 429 /* NOTREACHED */ 430 } 431 432 /* 433 * Device ioctl operation. 434 */ 435 /* ARGSUSED */ 436 int 437 spec_ioctl(v) 438 void *v; 439 { 440 struct vop_ioctl_args /* { 441 struct vnode *a_vp; 442 u_long a_command; 443 caddr_t a_data; 444 int a_fflag; 445 struct ucred *a_cred; 446 struct proc *a_p; 447 } */ *ap = v; 448 const struct bdevsw *bdev; 449 const struct cdevsw *cdev; 450 dev_t dev = ap->a_vp->v_rdev; 451 452 switch (ap->a_vp->v_type) { 453 454 case VCHR: 455 cdev = cdevsw_lookup(dev); 456 if (cdev == NULL) 457 return (ENXIO); 458 return ((*cdev->d_ioctl)(dev, ap->a_command, ap->a_data, 459 ap->a_fflag, ap->a_p)); 460 461 case VBLK: 462 bdev = bdevsw_lookup(dev); 463 if (bdev == NULL) 464 return (ENXIO); 465 if (ap->a_command == 0 && (long)ap->a_data == B_TAPE) { 466 if (bdev->d_type == D_TAPE) 467 return (0); 468 else 469 return (1); 470 } 471 return ((*bdev->d_ioctl)(dev, ap->a_command, ap->a_data, 472 ap->a_fflag, ap->a_p)); 473 474 default: 475 panic("spec_ioctl"); 476 /* NOTREACHED */ 477 } 478 } 479 480 /* ARGSUSED */ 481 int 482 spec_poll(v) 483 void *v; 484 { 485 struct vop_poll_args /* { 486 struct vnode *a_vp; 487 int a_events; 488 struct proc *a_p; 489 } */ *ap = v; 490 const struct cdevsw *cdev; 491 dev_t dev; 492 493 switch (ap->a_vp->v_type) { 494 495 case VCHR: 496 dev = ap->a_vp->v_rdev; 497 cdev = cdevsw_lookup(dev); 498 if (cdev == NULL) 499 return (ENXIO); 500 return (*cdev->d_poll)(dev, ap->a_events, ap->a_p); 501 502 default: 503 return (genfs_poll(v)); 504 } 505 } 506 /* 507 * Synch buffers associated with a block device 508 */ 509 /* ARGSUSED */ 510 int 511 spec_fsync(v) 512 void *v; 513 { 514 struct vop_fsync_args /* { 515 struct vnode *a_vp; 516 struct ucred *a_cred; 517 int a_flags; 518 off_t offlo; 519 off_t offhi; 520 struct proc *a_p; 521 } */ *ap = v; 522 struct vnode *vp = ap->a_vp; 523 524 if (vp->v_type == VBLK) 525 vflushbuf(vp, (ap->a_flags & FSYNC_WAIT) != 0); 526 return (0); 527 } 528 529 /* 530 * Just call the device strategy routine 531 */ 532 int 533 spec_strategy(v) 534 void *v; 535 { 536 struct vop_strategy_args /* { 537 struct buf *a_bp; 538 } */ *ap = v; 539 struct buf *bp; 540 const struct bdevsw *bdev; 541 542 bp = ap->a_bp; 543 if (!(bp->b_flags & B_READ) && 544 (LIST_FIRST(&bp->b_dep)) != NULL && bioops.io_start) 545 (*bioops.io_start)(bp); 546 bdev = bdevsw_lookup(bp->b_dev); 547 if (bdev != NULL) 548 (*bdev->d_strategy)(bp); 549 return (0); 550 } 551 552 int 553 spec_inactive(v) 554 void *v; 555 { 556 struct vop_inactive_args /* { 557 struct vnode *a_vp; 558 struct proc *a_p; 559 } */ *ap = v; 560 561 VOP_UNLOCK(ap->a_vp, 0); 562 return (0); 563 } 564 565 /* 566 * This is a noop, simply returning what one has been given. 567 */ 568 int 569 spec_bmap(v) 570 void *v; 571 { 572 struct vop_bmap_args /* { 573 struct vnode *a_vp; 574 daddr_t a_bn; 575 struct vnode **a_vpp; 576 daddr_t *a_bnp; 577 int *a_runp; 578 } */ *ap = v; 579 580 if (ap->a_vpp != NULL) 581 *ap->a_vpp = ap->a_vp; 582 if (ap->a_bnp != NULL) 583 *ap->a_bnp = ap->a_bn; 584 if (ap->a_runp != NULL) 585 *ap->a_runp = (MAXBSIZE >> DEV_BSHIFT) - 1; 586 return (0); 587 } 588 589 /* 590 * Device close routine 591 */ 592 /* ARGSUSED */ 593 int 594 spec_close(v) 595 void *v; 596 { 597 struct vop_close_args /* { 598 struct vnode *a_vp; 599 int a_fflag; 600 struct ucred *a_cred; 601 struct proc *a_p; 602 } */ *ap = v; 603 struct vnode *vp = ap->a_vp; 604 const struct bdevsw *bdev; 605 const struct cdevsw *cdev; 606 dev_t dev = vp->v_rdev; 607 int (*devclose) __P((dev_t, int, int, struct proc *)); 608 int mode, error, count, flags, flags1; 609 610 count = vcount(vp); 611 simple_lock(&vp->v_interlock); 612 flags = vp->v_flag; 613 simple_unlock(&vp->v_interlock); 614 615 switch (vp->v_type) { 616 617 case VCHR: 618 /* 619 * Hack: a tty device that is a controlling terminal 620 * has a reference from the session structure. 621 * We cannot easily tell that a character device is 622 * a controlling terminal, unless it is the closing 623 * process' controlling terminal. In that case, 624 * if the reference count is 2 (this last descriptor 625 * plus the session), release the reference from the session. 626 */ 627 if (count == 2 && ap->a_p && 628 vp == ap->a_p->p_session->s_ttyvp) { 629 vrele(vp); 630 count--; 631 ap->a_p->p_session->s_ttyvp = NULL; 632 } 633 /* 634 * If the vnode is locked, then we are in the midst 635 * of forcably closing the device, otherwise we only 636 * close on last reference. 637 */ 638 if (count > 1 && (flags & VXLOCK) == 0) 639 return (0); 640 cdev = cdevsw_lookup(dev); 641 if (cdev != NULL) 642 devclose = cdev->d_close; 643 else 644 devclose = NULL; 645 mode = S_IFCHR; 646 break; 647 648 case VBLK: 649 /* 650 * On last close of a block device (that isn't mounted) 651 * we must invalidate any in core blocks, so that 652 * we can, for instance, change floppy disks. 653 */ 654 error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0); 655 if (error) 656 return (error); 657 /* 658 * We do not want to really close the device if it 659 * is still in use unless we are trying to close it 660 * forcibly. Since every use (buffer, vnode, swap, cmap) 661 * holds a reference to the vnode, and because we mark 662 * any other vnodes that alias this device, when the 663 * sum of the reference counts on all the aliased 664 * vnodes descends to one, we are on last close. 665 */ 666 if (count > 1 && (flags & VXLOCK) == 0) 667 return (0); 668 bdev = bdevsw_lookup(dev); 669 if (bdev != NULL) 670 devclose = bdev->d_close; 671 else 672 devclose = NULL; 673 mode = S_IFBLK; 674 break; 675 676 default: 677 panic("spec_close: not special"); 678 } 679 680 flags1 = ap->a_fflag; 681 682 /* 683 * if VXLOCK is set, then we're going away soon, so make this 684 * non-blocking. Also ensures that we won't wedge in vn_lock below. 685 */ 686 if (flags & VXLOCK) 687 flags1 |= FNONBLOCK; 688 689 /* 690 * If we're able to block, release the vnode lock & reacquire. We 691 * might end up sleaping for someone else who wants our queues. They 692 * won't get them if we hold the vnode locked. Also, if VXLOCK is set, 693 * don't release the lock as we won't be able to regain it. 694 */ 695 if (!(flags1 & FNONBLOCK)) 696 VOP_UNLOCK(vp, 0); 697 698 if (devclose != NULL) 699 error = (*devclose)(dev, flags1, mode, ap->a_p); 700 else 701 error = ENXIO; 702 703 if (!(flags1 & FNONBLOCK)) 704 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 705 706 return (error); 707 } 708 709 /* 710 * Print out the contents of a special device vnode. 711 */ 712 int 713 spec_print(v) 714 void *v; 715 { 716 struct vop_print_args /* { 717 struct vnode *a_vp; 718 } */ *ap = v; 719 720 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev), 721 minor(ap->a_vp->v_rdev)); 722 return 0; 723 } 724 725 /* 726 * Return POSIX pathconf information applicable to special devices. 727 */ 728 int 729 spec_pathconf(v) 730 void *v; 731 { 732 struct vop_pathconf_args /* { 733 struct vnode *a_vp; 734 int a_name; 735 register_t *a_retval; 736 } */ *ap = v; 737 738 switch (ap->a_name) { 739 case _PC_LINK_MAX: 740 *ap->a_retval = LINK_MAX; 741 return (0); 742 case _PC_MAX_CANON: 743 *ap->a_retval = MAX_CANON; 744 return (0); 745 case _PC_MAX_INPUT: 746 *ap->a_retval = MAX_INPUT; 747 return (0); 748 case _PC_PIPE_BUF: 749 *ap->a_retval = PIPE_BUF; 750 return (0); 751 case _PC_CHOWN_RESTRICTED: 752 *ap->a_retval = 1; 753 return (0); 754 case _PC_VDISABLE: 755 *ap->a_retval = _POSIX_VDISABLE; 756 return (0); 757 case _PC_SYNC_IO: 758 *ap->a_retval = 1; 759 return (0); 760 default: 761 return (EINVAL); 762 } 763 /* NOTREACHED */ 764 } 765 766 /* 767 * Advisory record locking support. 768 */ 769 int 770 spec_advlock(v) 771 void *v; 772 { 773 struct vop_advlock_args /* { 774 struct vnode *a_vp; 775 caddr_t a_id; 776 int a_op; 777 struct flock *a_fl; 778 int a_flags; 779 } */ *ap = v; 780 struct vnode *vp = ap->a_vp; 781 782 return lf_advlock(ap, &vp->v_speclockf, (off_t)0); 783 } 784