1 /* $NetBSD: spec_vnops.c,v 1.60 2001/11/10 13:33:44 lukem Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)spec_vnops.c 8.15 (Berkeley) 7/14/95 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: spec_vnops.c,v 1.60 2001/11/10 13:33:44 lukem Exp $"); 40 41 #include <sys/param.h> 42 #include <sys/proc.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/conf.h> 46 #include <sys/buf.h> 47 #include <sys/mount.h> 48 #include <sys/namei.h> 49 #include <sys/vnode.h> 50 #include <sys/stat.h> 51 #include <sys/errno.h> 52 #include <sys/ioctl.h> 53 #include <sys/file.h> 54 #include <sys/disklabel.h> 55 #include <sys/lockf.h> 56 57 #include <miscfs/genfs/genfs.h> 58 #include <miscfs/specfs/specdev.h> 59 60 /* symbolic sleep message strings for devices */ 61 const char devopn[] = "devopn"; 62 const char devio[] = "devio"; 63 const char devwait[] = "devwait"; 64 const char devin[] = "devin"; 65 const char devout[] = "devout"; 66 const char devioc[] = "devioc"; 67 const char devcls[] = "devcls"; 68 69 /* 70 * This vnode operations vector is used for two things only: 71 * - special device nodes created from whole cloth by the kernel. 72 * - as a temporary vnodeops replacement for vnodes which were found to 73 * be aliased by callers of checkalias(). 74 * For the ops vector for vnodes built from special devices found in a 75 * filesystem, see (e.g) ffs_specop_entries[] in ffs_vnops.c or the 76 * equivalent for other filesystems. 77 */ 78 79 int (**spec_vnodeop_p) __P((void *)); 80 const struct vnodeopv_entry_desc spec_vnodeop_entries[] = { 81 { &vop_default_desc, vn_default_error }, 82 { &vop_lookup_desc, spec_lookup }, /* lookup */ 83 { &vop_create_desc, spec_create }, /* create */ 84 { &vop_mknod_desc, spec_mknod }, /* mknod */ 85 { &vop_open_desc, spec_open }, /* open */ 86 { &vop_close_desc, spec_close }, /* close */ 87 { &vop_access_desc, spec_access }, /* access */ 88 { &vop_getattr_desc, spec_getattr }, /* getattr */ 89 { &vop_setattr_desc, spec_setattr }, /* setattr */ 90 { &vop_read_desc, spec_read }, /* read */ 91 { &vop_write_desc, spec_write }, /* write */ 92 { &vop_lease_desc, spec_lease_check }, /* lease */ 93 { &vop_fcntl_desc, spec_fcntl }, /* fcntl */ 94 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 95 { &vop_poll_desc, spec_poll }, /* poll */ 96 { &vop_revoke_desc, spec_revoke }, /* revoke */ 97 { &vop_mmap_desc, spec_mmap }, /* mmap */ 98 { &vop_fsync_desc, spec_fsync }, /* fsync */ 99 { &vop_seek_desc, spec_seek }, /* seek */ 100 { &vop_remove_desc, spec_remove }, /* remove */ 101 { &vop_link_desc, spec_link }, /* link */ 102 { &vop_rename_desc, spec_rename }, /* rename */ 103 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */ 104 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */ 105 { &vop_symlink_desc, spec_symlink }, /* symlink */ 106 { &vop_readdir_desc, spec_readdir }, /* readdir */ 107 { &vop_readlink_desc, spec_readlink }, /* readlink */ 108 { &vop_abortop_desc, spec_abortop }, /* abortop */ 109 { &vop_inactive_desc, spec_inactive }, /* inactive */ 110 { &vop_reclaim_desc, spec_reclaim }, /* reclaim */ 111 { &vop_lock_desc, spec_lock }, /* lock */ 112 { &vop_unlock_desc, spec_unlock }, /* unlock */ 113 { &vop_bmap_desc, spec_bmap }, /* bmap */ 114 { &vop_strategy_desc, spec_strategy }, /* strategy */ 115 { &vop_print_desc, spec_print }, /* print */ 116 { &vop_islocked_desc, spec_islocked }, /* islocked */ 117 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ 118 { &vop_advlock_desc, spec_advlock }, /* advlock */ 119 { &vop_blkatoff_desc, spec_blkatoff }, /* blkatoff */ 120 { &vop_valloc_desc, spec_valloc }, /* valloc */ 121 { &vop_vfree_desc, spec_vfree }, /* vfree */ 122 { &vop_truncate_desc, spec_truncate }, /* truncate */ 123 { &vop_update_desc, spec_update }, /* update */ 124 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */ 125 { &vop_getpages_desc, spec_getpages }, /* getpages */ 126 { &vop_putpages_desc, spec_putpages }, /* putpages */ 127 { NULL, NULL } 128 }; 129 const struct vnodeopv_desc spec_vnodeop_opv_desc = 130 { &spec_vnodeop_p, spec_vnodeop_entries }; 131 132 /* 133 * Trivial lookup routine that always fails. 134 */ 135 int 136 spec_lookup(v) 137 void *v; 138 { 139 struct vop_lookup_args /* { 140 struct vnode *a_dvp; 141 struct vnode **a_vpp; 142 struct componentname *a_cnp; 143 } */ *ap = v; 144 145 *ap->a_vpp = NULL; 146 return (ENOTDIR); 147 } 148 149 /* 150 * Open a special file. 151 */ 152 /* ARGSUSED */ 153 int 154 spec_open(v) 155 void *v; 156 { 157 struct vop_open_args /* { 158 struct vnode *a_vp; 159 int a_mode; 160 struct ucred *a_cred; 161 struct proc *a_p; 162 } */ *ap = v; 163 struct proc *p = ap->a_p; 164 struct vnode *bvp, *vp = ap->a_vp; 165 dev_t bdev, dev = (dev_t)vp->v_rdev; 166 int maj = major(dev); 167 int error; 168 struct partinfo pi; 169 170 /* 171 * Don't allow open if fs is mounted -nodev. 172 */ 173 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) 174 return (ENXIO); 175 176 switch (vp->v_type) { 177 178 case VCHR: 179 if ((u_int)maj >= nchrdev) 180 return (ENXIO); 181 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) { 182 /* 183 * When running in very secure mode, do not allow 184 * opens for writing of any disk character devices. 185 */ 186 if (securelevel >= 2 && cdevsw[maj].d_type == D_DISK) 187 return (EPERM); 188 /* 189 * When running in secure mode, do not allow opens 190 * for writing of /dev/mem, /dev/kmem, or character 191 * devices whose corresponding block devices are 192 * currently mounted. 193 */ 194 if (securelevel >= 1) { 195 if ((bdev = chrtoblk(dev)) != (dev_t)NODEV && 196 vfinddev(bdev, VBLK, &bvp) && 197 (error = vfs_mountedon(bvp))) 198 return (error); 199 if (iskmemdev(dev)) 200 return (EPERM); 201 } 202 } 203 if (cdevsw[maj].d_type == D_TTY) 204 vp->v_flag |= VISTTY; 205 VOP_UNLOCK(vp, 0); 206 error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, p); 207 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 208 return (error); 209 210 case VBLK: 211 if ((u_int)maj >= nblkdev) 212 return (ENXIO); 213 /* 214 * When running in very secure mode, do not allow 215 * opens for writing of any disk block devices. 216 */ 217 if (securelevel >= 2 && ap->a_cred != FSCRED && 218 (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK) 219 return (EPERM); 220 /* 221 * Do not allow opens of block devices that are 222 * currently mounted. 223 */ 224 if ((error = vfs_mountedon(vp)) != 0) 225 return (error); 226 error = (*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p); 227 if (error) { 228 return error; 229 } 230 error = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, 231 DIOCGPART, (caddr_t)&pi, FREAD, curproc); 232 if (error == 0) { 233 vp->v_size = (voff_t)pi.disklab->d_secsize * 234 pi.part->p_size; 235 } 236 return 0; 237 238 case VNON: 239 case VLNK: 240 case VDIR: 241 case VREG: 242 case VBAD: 243 case VFIFO: 244 case VSOCK: 245 break; 246 } 247 return (0); 248 } 249 250 /* 251 * Vnode op for read 252 */ 253 /* ARGSUSED */ 254 int 255 spec_read(v) 256 void *v; 257 { 258 struct vop_read_args /* { 259 struct vnode *a_vp; 260 struct uio *a_uio; 261 int a_ioflag; 262 struct ucred *a_cred; 263 } */ *ap = v; 264 struct vnode *vp = ap->a_vp; 265 struct uio *uio = ap->a_uio; 266 struct proc *p = uio->uio_procp; 267 struct buf *bp; 268 daddr_t bn; 269 int bsize, bscale; 270 struct partinfo dpart; 271 int n, on, majordev; 272 int (*ioctl) __P((dev_t, u_long, caddr_t, int, struct proc *)); 273 int error = 0; 274 275 #ifdef DIAGNOSTIC 276 if (uio->uio_rw != UIO_READ) 277 panic("spec_read mode"); 278 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 279 panic("spec_read proc"); 280 #endif 281 if (uio->uio_resid == 0) 282 return (0); 283 284 switch (vp->v_type) { 285 286 case VCHR: 287 VOP_UNLOCK(vp, 0); 288 error = (*cdevsw[major(vp->v_rdev)].d_read) 289 (vp->v_rdev, uio, ap->a_ioflag); 290 vn_lock(vp, LK_SHARED | LK_RETRY); 291 return (error); 292 293 case VBLK: 294 if (uio->uio_offset < 0) 295 return (EINVAL); 296 bsize = BLKDEV_IOSIZE; 297 if ((majordev = major(vp->v_rdev)) < nblkdev && 298 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 299 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 300 if (dpart.part->p_fstype == FS_BSDFFS && 301 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 302 bsize = dpart.part->p_frag * 303 dpart.part->p_fsize; 304 } 305 bscale = bsize >> DEV_BSHIFT; 306 do { 307 bn = (uio->uio_offset >> DEV_BSHIFT) &~ (bscale - 1); 308 on = uio->uio_offset % bsize; 309 n = min((unsigned)(bsize - on), uio->uio_resid); 310 error = bread(vp, bn, bsize, NOCRED, &bp); 311 n = min(n, bsize - bp->b_resid); 312 if (error) { 313 brelse(bp); 314 return (error); 315 } 316 error = uiomove((char *)bp->b_data + on, n, uio); 317 brelse(bp); 318 } while (error == 0 && uio->uio_resid > 0 && n != 0); 319 return (error); 320 321 default: 322 panic("spec_read type"); 323 } 324 /* NOTREACHED */ 325 } 326 327 /* 328 * Vnode op for write 329 */ 330 /* ARGSUSED */ 331 int 332 spec_write(v) 333 void *v; 334 { 335 struct vop_write_args /* { 336 struct vnode *a_vp; 337 struct uio *a_uio; 338 int a_ioflag; 339 struct ucred *a_cred; 340 } */ *ap = v; 341 struct vnode *vp = ap->a_vp; 342 struct uio *uio = ap->a_uio; 343 struct proc *p = uio->uio_procp; 344 struct buf *bp; 345 daddr_t bn; 346 int bsize, bscale; 347 struct partinfo dpart; 348 int n, on, majordev; 349 int (*ioctl) __P((dev_t, u_long, caddr_t, int, struct proc *)); 350 int error = 0; 351 352 #ifdef DIAGNOSTIC 353 if (uio->uio_rw != UIO_WRITE) 354 panic("spec_write mode"); 355 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 356 panic("spec_write proc"); 357 #endif 358 359 switch (vp->v_type) { 360 361 case VCHR: 362 VOP_UNLOCK(vp, 0); 363 error = (*cdevsw[major(vp->v_rdev)].d_write) 364 (vp->v_rdev, uio, ap->a_ioflag); 365 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 366 return (error); 367 368 case VBLK: 369 if (uio->uio_resid == 0) 370 return (0); 371 if (uio->uio_offset < 0) 372 return (EINVAL); 373 bsize = BLKDEV_IOSIZE; 374 if ((majordev = major(vp->v_rdev)) < nblkdev && 375 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 376 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 377 if (dpart.part->p_fstype == FS_BSDFFS && 378 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 379 bsize = dpart.part->p_frag * 380 dpart.part->p_fsize; 381 } 382 bscale = bsize >> DEV_BSHIFT; 383 do { 384 bn = (uio->uio_offset >> DEV_BSHIFT) &~ (bscale - 1); 385 on = uio->uio_offset % bsize; 386 n = min((unsigned)(bsize - on), uio->uio_resid); 387 if (n == bsize) 388 bp = getblk(vp, bn, bsize, 0, 0); 389 else 390 error = bread(vp, bn, bsize, NOCRED, &bp); 391 if (error) { 392 brelse(bp); 393 return (error); 394 } 395 n = min(n, bsize - bp->b_resid); 396 error = uiomove((char *)bp->b_data + on, n, uio); 397 if (error) 398 brelse(bp); 399 else { 400 if (n + on == bsize) 401 bawrite(bp); 402 else 403 bdwrite(bp); 404 if (bp->b_flags & B_ERROR) 405 error = bp->b_error; 406 } 407 } while (error == 0 && uio->uio_resid > 0 && n != 0); 408 return (error); 409 410 default: 411 panic("spec_write type"); 412 } 413 /* NOTREACHED */ 414 } 415 416 /* 417 * Device ioctl operation. 418 */ 419 /* ARGSUSED */ 420 int 421 spec_ioctl(v) 422 void *v; 423 { 424 struct vop_ioctl_args /* { 425 struct vnode *a_vp; 426 u_long a_command; 427 caddr_t a_data; 428 int a_fflag; 429 struct ucred *a_cred; 430 struct proc *a_p; 431 } */ *ap = v; 432 dev_t dev = ap->a_vp->v_rdev; 433 int maj = major(dev); 434 435 switch (ap->a_vp->v_type) { 436 437 case VCHR: 438 return ((*cdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 439 ap->a_fflag, ap->a_p)); 440 441 case VBLK: 442 if (ap->a_command == 0 && (long)ap->a_data == B_TAPE) { 443 if (bdevsw[maj].d_type == D_TAPE) 444 return (0); 445 else 446 return (1); 447 } 448 return ((*bdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 449 ap->a_fflag, ap->a_p)); 450 451 default: 452 panic("spec_ioctl"); 453 /* NOTREACHED */ 454 } 455 } 456 457 /* ARGSUSED */ 458 int 459 spec_poll(v) 460 void *v; 461 { 462 struct vop_poll_args /* { 463 struct vnode *a_vp; 464 int a_events; 465 struct proc *a_p; 466 } */ *ap = v; 467 dev_t dev; 468 469 switch (ap->a_vp->v_type) { 470 471 case VCHR: 472 dev = ap->a_vp->v_rdev; 473 return (*cdevsw[major(dev)].d_poll)(dev, ap->a_events, ap->a_p); 474 475 default: 476 return (genfs_poll(v)); 477 } 478 } 479 /* 480 * Synch buffers associated with a block device 481 */ 482 /* ARGSUSED */ 483 int 484 spec_fsync(v) 485 void *v; 486 { 487 struct vop_fsync_args /* { 488 struct vnode *a_vp; 489 struct ucred *a_cred; 490 int a_flags; 491 off_t offlo; 492 off_t offhi; 493 struct proc *a_p; 494 } */ *ap = v; 495 struct vnode *vp = ap->a_vp; 496 497 if (vp->v_type == VBLK) 498 vflushbuf(vp, (ap->a_flags & FSYNC_WAIT) != 0); 499 return (0); 500 } 501 502 /* 503 * Just call the device strategy routine 504 */ 505 int 506 spec_strategy(v) 507 void *v; 508 { 509 struct vop_strategy_args /* { 510 struct buf *a_bp; 511 } */ *ap = v; 512 struct buf *bp; 513 514 bp = ap->a_bp; 515 if (!(bp->b_flags & B_READ) && 516 (LIST_FIRST(&bp->b_dep)) != NULL && bioops.io_start) 517 (*bioops.io_start)(bp); 518 (*bdevsw[major(bp->b_dev)].d_strategy)(bp); 519 return (0); 520 } 521 522 int 523 spec_inactive(v) 524 void *v; 525 { 526 struct vop_inactive_args /* { 527 struct vnode *a_vp; 528 struct proc *a_p; 529 } */ *ap = v; 530 531 VOP_UNLOCK(ap->a_vp, 0); 532 return (0); 533 } 534 535 /* 536 * This is a noop, simply returning what one has been given. 537 */ 538 int 539 spec_bmap(v) 540 void *v; 541 { 542 struct vop_bmap_args /* { 543 struct vnode *a_vp; 544 daddr_t a_bn; 545 struct vnode **a_vpp; 546 daddr_t *a_bnp; 547 int *a_runp; 548 } */ *ap = v; 549 550 if (ap->a_vpp != NULL) 551 *ap->a_vpp = ap->a_vp; 552 if (ap->a_bnp != NULL) 553 *ap->a_bnp = ap->a_bn; 554 if (ap->a_runp != NULL) 555 *ap->a_runp = (MAXBSIZE >> DEV_BSHIFT) - 1; 556 return (0); 557 } 558 559 /* 560 * Device close routine 561 */ 562 /* ARGSUSED */ 563 int 564 spec_close(v) 565 void *v; 566 { 567 struct vop_close_args /* { 568 struct vnode *a_vp; 569 int a_fflag; 570 struct ucred *a_cred; 571 struct proc *a_p; 572 } */ *ap = v; 573 struct vnode *vp = ap->a_vp; 574 dev_t dev = vp->v_rdev; 575 int (*devclose) __P((dev_t, int, int, struct proc *)); 576 int mode, error, count, flags, flags1; 577 578 count = vcount(vp); 579 simple_lock(&vp->v_interlock); 580 flags = vp->v_flag; 581 simple_unlock(&vp->v_interlock); 582 583 switch (vp->v_type) { 584 585 case VCHR: 586 /* 587 * Hack: a tty device that is a controlling terminal 588 * has a reference from the session structure. 589 * We cannot easily tell that a character device is 590 * a controlling terminal, unless it is the closing 591 * process' controlling terminal. In that case, 592 * if the reference count is 2 (this last descriptor 593 * plus the session), release the reference from the session. 594 */ 595 if (count == 2 && ap->a_p && 596 vp == ap->a_p->p_session->s_ttyvp) { 597 vrele(vp); 598 count--; 599 ap->a_p->p_session->s_ttyvp = NULL; 600 } 601 /* 602 * If the vnode is locked, then we are in the midst 603 * of forcably closing the device, otherwise we only 604 * close on last reference. 605 */ 606 if (count > 1 && (flags & VXLOCK) == 0) 607 return (0); 608 devclose = cdevsw[major(dev)].d_close; 609 mode = S_IFCHR; 610 break; 611 612 case VBLK: 613 /* 614 * On last close of a block device (that isn't mounted) 615 * we must invalidate any in core blocks, so that 616 * we can, for instance, change floppy disks. 617 */ 618 error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0); 619 if (error) 620 return (error); 621 /* 622 * We do not want to really close the device if it 623 * is still in use unless we are trying to close it 624 * forcibly. Since every use (buffer, vnode, swap, cmap) 625 * holds a reference to the vnode, and because we mark 626 * any other vnodes that alias this device, when the 627 * sum of the reference counts on all the aliased 628 * vnodes descends to one, we are on last close. 629 */ 630 if (count > 1 && (flags & VXLOCK) == 0) 631 return (0); 632 devclose = bdevsw[major(dev)].d_close; 633 mode = S_IFBLK; 634 break; 635 636 default: 637 panic("spec_close: not special"); 638 } 639 640 flags1 = ap->a_fflag; 641 642 /* 643 * if VXLOCK is set, then we're going away soon, so make this 644 * non-blocking. Also ensures that we won't wedge in vn_lock below. 645 */ 646 if (flags & VXLOCK) 647 flags1 |= FNONBLOCK; 648 649 /* 650 * If we're able to block, release the vnode lock & reaquire. We 651 * might end up sleaping for someone else who wants our queues. They 652 * won't get them if we hold the vnode locked. Also, if VXLOCK is set, 653 * don't release the lock as we won't be able to regain it. 654 */ 655 if (!(flags1 & FNONBLOCK)) 656 VOP_UNLOCK(vp, 0); 657 658 error = (*devclose)(dev, flags1, mode, ap->a_p); 659 660 if (!(flags1 & FNONBLOCK)) 661 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 662 663 return (error); 664 } 665 666 /* 667 * Print out the contents of a special device vnode. 668 */ 669 int 670 spec_print(v) 671 void *v; 672 { 673 struct vop_print_args /* { 674 struct vnode *a_vp; 675 } */ *ap = v; 676 677 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev), 678 minor(ap->a_vp->v_rdev)); 679 return 0; 680 } 681 682 /* 683 * Return POSIX pathconf information applicable to special devices. 684 */ 685 int 686 spec_pathconf(v) 687 void *v; 688 { 689 struct vop_pathconf_args /* { 690 struct vnode *a_vp; 691 int a_name; 692 register_t *a_retval; 693 } */ *ap = v; 694 695 switch (ap->a_name) { 696 case _PC_LINK_MAX: 697 *ap->a_retval = LINK_MAX; 698 return (0); 699 case _PC_MAX_CANON: 700 *ap->a_retval = MAX_CANON; 701 return (0); 702 case _PC_MAX_INPUT: 703 *ap->a_retval = MAX_INPUT; 704 return (0); 705 case _PC_PIPE_BUF: 706 *ap->a_retval = PIPE_BUF; 707 return (0); 708 case _PC_CHOWN_RESTRICTED: 709 *ap->a_retval = 1; 710 return (0); 711 case _PC_VDISABLE: 712 *ap->a_retval = _POSIX_VDISABLE; 713 return (0); 714 case _PC_SYNC_IO: 715 *ap->a_retval = 1; 716 return (0); 717 default: 718 return (EINVAL); 719 } 720 /* NOTREACHED */ 721 } 722 723 /* 724 * Advisory record locking support. 725 */ 726 int 727 spec_advlock(v) 728 void *v; 729 { 730 struct vop_advlock_args /* { 731 struct vnode *a_vp; 732 caddr_t a_id; 733 int a_op; 734 struct flock *a_fl; 735 int a_flags; 736 } */ *ap = v; 737 struct vnode *vp = ap->a_vp; 738 739 return lf_advlock(ap, &vp->v_speclockf, (off_t)0); 740 } 741