1 /* $NetBSD: spec_vnops.c,v 1.54 2001/04/17 18:49:26 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)spec_vnops.c 8.15 (Berkeley) 7/14/95 36 */ 37 38 #include <sys/param.h> 39 #include <sys/proc.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/conf.h> 43 #include <sys/buf.h> 44 #include <sys/mount.h> 45 #include <sys/namei.h> 46 #include <sys/vnode.h> 47 #include <sys/stat.h> 48 #include <sys/errno.h> 49 #include <sys/ioctl.h> 50 #include <sys/file.h> 51 #include <sys/disklabel.h> 52 #include <sys/lockf.h> 53 54 #include <miscfs/genfs/genfs.h> 55 #include <miscfs/specfs/specdev.h> 56 57 /* symbolic sleep message strings for devices */ 58 const char devopn[] = "devopn"; 59 const char devio[] = "devio"; 60 const char devwait[] = "devwait"; 61 const char devin[] = "devin"; 62 const char devout[] = "devout"; 63 const char devioc[] = "devioc"; 64 const char devcls[] = "devcls"; 65 66 /* 67 * This vnode operations vector is used for two things only: 68 * - special device nodes created from whole cloth by the kernel. 69 * - as a temporary vnodeops replacement for vnodes which were found to 70 * be aliased by callers of checkalias(). 71 * For the ops vector for vnodes built from special devices found in a 72 * filesystem, see (e.g) ffs_specop_entries[] in ffs_vnops.c or the 73 * equivalent for other filesystems. 74 */ 75 76 int (**spec_vnodeop_p) __P((void *)); 77 const struct vnodeopv_entry_desc spec_vnodeop_entries[] = { 78 { &vop_default_desc, vn_default_error }, 79 { &vop_lookup_desc, spec_lookup }, /* lookup */ 80 { &vop_create_desc, spec_create }, /* create */ 81 { &vop_mknod_desc, spec_mknod }, /* mknod */ 82 { &vop_open_desc, spec_open }, /* open */ 83 { &vop_close_desc, spec_close }, /* close */ 84 { &vop_access_desc, spec_access }, /* access */ 85 { &vop_getattr_desc, spec_getattr }, /* getattr */ 86 { &vop_setattr_desc, spec_setattr }, /* setattr */ 87 { &vop_read_desc, spec_read }, /* read */ 88 { &vop_write_desc, spec_write }, /* write */ 89 { &vop_lease_desc, spec_lease_check }, /* lease */ 90 { &vop_fcntl_desc, spec_fcntl }, /* fcntl */ 91 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 92 { &vop_poll_desc, spec_poll }, /* poll */ 93 { &vop_revoke_desc, spec_revoke }, /* revoke */ 94 { &vop_mmap_desc, spec_mmap }, /* mmap */ 95 { &vop_fsync_desc, spec_fsync }, /* fsync */ 96 { &vop_seek_desc, spec_seek }, /* seek */ 97 { &vop_remove_desc, spec_remove }, /* remove */ 98 { &vop_link_desc, spec_link }, /* link */ 99 { &vop_rename_desc, spec_rename }, /* rename */ 100 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */ 101 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */ 102 { &vop_symlink_desc, spec_symlink }, /* symlink */ 103 { &vop_readdir_desc, spec_readdir }, /* readdir */ 104 { &vop_readlink_desc, spec_readlink }, /* readlink */ 105 { &vop_abortop_desc, spec_abortop }, /* abortop */ 106 { &vop_inactive_desc, spec_inactive }, /* inactive */ 107 { &vop_reclaim_desc, spec_reclaim }, /* reclaim */ 108 { &vop_lock_desc, spec_lock }, /* lock */ 109 { &vop_unlock_desc, spec_unlock }, /* unlock */ 110 { &vop_bmap_desc, spec_bmap }, /* bmap */ 111 { &vop_strategy_desc, spec_strategy }, /* strategy */ 112 { &vop_print_desc, spec_print }, /* print */ 113 { &vop_islocked_desc, spec_islocked }, /* islocked */ 114 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ 115 { &vop_advlock_desc, spec_advlock }, /* advlock */ 116 { &vop_blkatoff_desc, spec_blkatoff }, /* blkatoff */ 117 { &vop_valloc_desc, spec_valloc }, /* valloc */ 118 { &vop_vfree_desc, spec_vfree }, /* vfree */ 119 { &vop_truncate_desc, spec_truncate }, /* truncate */ 120 { &vop_update_desc, spec_update }, /* update */ 121 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */ 122 { (struct vnodeop_desc*)NULL, (int(*) __P((void *)))NULL } 123 }; 124 const struct vnodeopv_desc spec_vnodeop_opv_desc = 125 { &spec_vnodeop_p, spec_vnodeop_entries }; 126 127 /* 128 * Trivial lookup routine that always fails. 129 */ 130 int 131 spec_lookup(v) 132 void *v; 133 { 134 struct vop_lookup_args /* { 135 struct vnode *a_dvp; 136 struct vnode **a_vpp; 137 struct componentname *a_cnp; 138 } */ *ap = v; 139 140 *ap->a_vpp = NULL; 141 return (ENOTDIR); 142 } 143 144 /* 145 * Open a special file. 146 */ 147 /* ARGSUSED */ 148 int 149 spec_open(v) 150 void *v; 151 { 152 struct vop_open_args /* { 153 struct vnode *a_vp; 154 int a_mode; 155 struct ucred *a_cred; 156 struct proc *a_p; 157 } */ *ap = v; 158 struct proc *p = ap->a_p; 159 struct vnode *bvp, *vp = ap->a_vp; 160 dev_t bdev, dev = (dev_t)vp->v_rdev; 161 int maj = major(dev); 162 int error; 163 164 /* 165 * Don't allow open if fs is mounted -nodev. 166 */ 167 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) 168 return (ENXIO); 169 170 switch (vp->v_type) { 171 172 case VCHR: 173 if ((u_int)maj >= nchrdev) 174 return (ENXIO); 175 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) { 176 /* 177 * When running in very secure mode, do not allow 178 * opens for writing of any disk character devices. 179 */ 180 if (securelevel >= 2 && cdevsw[maj].d_type == D_DISK) 181 return (EPERM); 182 /* 183 * When running in secure mode, do not allow opens 184 * for writing of /dev/mem, /dev/kmem, or character 185 * devices whose corresponding block devices are 186 * currently mounted. 187 */ 188 if (securelevel >= 1) { 189 if ((bdev = chrtoblk(dev)) != (dev_t)NODEV && 190 vfinddev(bdev, VBLK, &bvp) && 191 (error = vfs_mountedon(bvp))) 192 return (error); 193 if (iskmemdev(dev)) 194 return (EPERM); 195 } 196 } 197 if (cdevsw[maj].d_type == D_TTY) 198 vp->v_flag |= VISTTY; 199 VOP_UNLOCK(vp, 0); 200 error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, p); 201 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 202 return (error); 203 204 case VBLK: 205 if ((u_int)maj >= nblkdev) 206 return (ENXIO); 207 /* 208 * When running in very secure mode, do not allow 209 * opens for writing of any disk block devices. 210 */ 211 if (securelevel >= 2 && ap->a_cred != FSCRED && 212 (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK) 213 return (EPERM); 214 /* 215 * Do not allow opens of block devices that are 216 * currently mounted. 217 */ 218 if ((error = vfs_mountedon(vp)) != 0) 219 return (error); 220 return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p)); 221 case VNON: 222 case VLNK: 223 case VDIR: 224 case VREG: 225 case VBAD: 226 case VFIFO: 227 case VSOCK: 228 break; 229 } 230 return (0); 231 } 232 233 /* 234 * Vnode op for read 235 */ 236 /* ARGSUSED */ 237 int 238 spec_read(v) 239 void *v; 240 { 241 struct vop_read_args /* { 242 struct vnode *a_vp; 243 struct uio *a_uio; 244 int a_ioflag; 245 struct ucred *a_cred; 246 } */ *ap = v; 247 struct vnode *vp = ap->a_vp; 248 struct uio *uio = ap->a_uio; 249 struct proc *p = uio->uio_procp; 250 struct buf *bp; 251 daddr_t bn, nextbn; 252 int bsize, bscale, ssize; 253 struct partinfo dpart; 254 int n, on, majordev; 255 int (*ioctl) __P((dev_t, u_long, caddr_t, int, struct proc *)); 256 int error = 0; 257 258 #ifdef DIAGNOSTIC 259 if (uio->uio_rw != UIO_READ) 260 panic("spec_read mode"); 261 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 262 panic("spec_read proc"); 263 #endif 264 if (uio->uio_resid == 0) 265 return (0); 266 267 switch (vp->v_type) { 268 269 case VCHR: 270 VOP_UNLOCK(vp, 0); 271 error = (*cdevsw[major(vp->v_rdev)].d_read) 272 (vp->v_rdev, uio, ap->a_ioflag); 273 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 274 return (error); 275 276 case VBLK: 277 if (uio->uio_offset < 0) 278 return (EINVAL); 279 bsize = BLKDEV_IOSIZE; 280 ssize = DEV_BSIZE; 281 if ((majordev = major(vp->v_rdev)) < nblkdev && 282 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 283 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 284 if (dpart.part->p_fstype == FS_BSDFFS && 285 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 286 bsize = dpart.part->p_frag * 287 dpart.part->p_fsize; 288 if (dpart.disklab->d_secsize != 0) 289 ssize = dpart.disklab->d_secsize; 290 } 291 bscale = bsize / ssize; 292 do { 293 bn = (uio->uio_offset / ssize) &~ (bscale - 1); 294 on = uio->uio_offset % bsize; 295 n = min((unsigned)(bsize - on), uio->uio_resid); 296 if (vp->v_lastr + bscale == bn) { 297 nextbn = bn + bscale; 298 error = breadn(vp, bn, bsize, &nextbn, 299 &bsize, 1, NOCRED, &bp); 300 } else 301 error = bread(vp, bn, bsize, NOCRED, &bp); 302 vp->v_lastr = bn; 303 n = min(n, bsize - bp->b_resid); 304 if (error) { 305 brelse(bp); 306 return (error); 307 } 308 error = uiomove((char *)bp->b_data + on, n, uio); 309 brelse(bp); 310 } while (error == 0 && uio->uio_resid > 0 && n != 0); 311 return (error); 312 313 default: 314 panic("spec_read type"); 315 } 316 /* NOTREACHED */ 317 } 318 319 /* 320 * Vnode op for write 321 */ 322 /* ARGSUSED */ 323 int 324 spec_write(v) 325 void *v; 326 { 327 struct vop_write_args /* { 328 struct vnode *a_vp; 329 struct uio *a_uio; 330 int a_ioflag; 331 struct ucred *a_cred; 332 } */ *ap = v; 333 struct vnode *vp = ap->a_vp; 334 struct uio *uio = ap->a_uio; 335 struct proc *p = uio->uio_procp; 336 struct buf *bp; 337 daddr_t bn; 338 int bsize, bscale, ssize; 339 struct partinfo dpart; 340 int n, on, majordev; 341 int (*ioctl) __P((dev_t, u_long, caddr_t, int, struct proc *)); 342 int error = 0; 343 344 #ifdef DIAGNOSTIC 345 if (uio->uio_rw != UIO_WRITE) 346 panic("spec_write mode"); 347 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 348 panic("spec_write proc"); 349 #endif 350 351 switch (vp->v_type) { 352 353 case VCHR: 354 VOP_UNLOCK(vp, 0); 355 error = (*cdevsw[major(vp->v_rdev)].d_write) 356 (vp->v_rdev, uio, ap->a_ioflag); 357 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 358 return (error); 359 360 case VBLK: 361 if (uio->uio_resid == 0) 362 return (0); 363 if (uio->uio_offset < 0) 364 return (EINVAL); 365 bsize = BLKDEV_IOSIZE; 366 ssize = DEV_BSIZE; 367 if ((majordev = major(vp->v_rdev)) < nblkdev && 368 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 369 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 370 if (dpart.part->p_fstype == FS_BSDFFS && 371 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 372 bsize = dpart.part->p_frag * 373 dpart.part->p_fsize; 374 if (dpart.disklab->d_secsize != 0) 375 ssize = dpart.disklab->d_secsize; 376 } 377 bscale = bsize / ssize; 378 do { 379 bn = (uio->uio_offset / ssize) &~ (bscale - 1); 380 on = uio->uio_offset % bsize; 381 n = min((unsigned)(bsize - on), uio->uio_resid); 382 if (n == bsize) 383 bp = getblk(vp, bn, bsize, 0, 0); 384 else 385 error = bread(vp, bn, bsize, NOCRED, &bp); 386 if (error) { 387 brelse(bp); 388 return (error); 389 } 390 n = min(n, bsize - bp->b_resid); 391 error = uiomove((char *)bp->b_data + on, n, uio); 392 if (error) 393 brelse(bp); 394 else { 395 if (n + on == bsize) 396 bawrite(bp); 397 else 398 bdwrite(bp); 399 if (bp->b_flags & B_ERROR) 400 error = bp->b_error; 401 } 402 } while (error == 0 && uio->uio_resid > 0 && n != 0); 403 return (error); 404 405 default: 406 panic("spec_write type"); 407 } 408 /* NOTREACHED */ 409 } 410 411 /* 412 * Device ioctl operation. 413 */ 414 /* ARGSUSED */ 415 int 416 spec_ioctl(v) 417 void *v; 418 { 419 struct vop_ioctl_args /* { 420 struct vnode *a_vp; 421 u_long a_command; 422 caddr_t a_data; 423 int a_fflag; 424 struct ucred *a_cred; 425 struct proc *a_p; 426 } */ *ap = v; 427 dev_t dev = ap->a_vp->v_rdev; 428 int maj = major(dev); 429 430 switch (ap->a_vp->v_type) { 431 432 case VCHR: 433 return ((*cdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 434 ap->a_fflag, ap->a_p)); 435 436 case VBLK: 437 if (ap->a_command == 0 && (long)ap->a_data == B_TAPE) { 438 if (bdevsw[maj].d_type == D_TAPE) 439 return (0); 440 else 441 return (1); 442 } 443 return ((*bdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 444 ap->a_fflag, ap->a_p)); 445 446 default: 447 panic("spec_ioctl"); 448 /* NOTREACHED */ 449 } 450 } 451 452 /* ARGSUSED */ 453 int 454 spec_poll(v) 455 void *v; 456 { 457 struct vop_poll_args /* { 458 struct vnode *a_vp; 459 int a_events; 460 struct proc *a_p; 461 } */ *ap = v; 462 dev_t dev; 463 464 switch (ap->a_vp->v_type) { 465 466 case VCHR: 467 dev = ap->a_vp->v_rdev; 468 return (*cdevsw[major(dev)].d_poll)(dev, ap->a_events, ap->a_p); 469 470 default: 471 return (genfs_poll(v)); 472 } 473 } 474 /* 475 * Synch buffers associated with a block device 476 */ 477 /* ARGSUSED */ 478 int 479 spec_fsync(v) 480 void *v; 481 { 482 struct vop_fsync_args /* { 483 struct vnode *a_vp; 484 struct ucred *a_cred; 485 int a_flags; 486 off_t offlo; 487 off_t offhi; 488 struct proc *a_p; 489 } */ *ap = v; 490 struct vnode *vp = ap->a_vp; 491 492 if (vp->v_type == VBLK) 493 vflushbuf(vp, (ap->a_flags & FSYNC_WAIT) != 0); 494 return (0); 495 } 496 497 /* 498 * Just call the device strategy routine 499 */ 500 int 501 spec_strategy(v) 502 void *v; 503 { 504 struct vop_strategy_args /* { 505 struct buf *a_bp; 506 } */ *ap = v; 507 struct buf *bp; 508 509 bp = ap->a_bp; 510 if (!(bp->b_flags & B_READ) && 511 (LIST_FIRST(&bp->b_dep)) != NULL && bioops.io_start) 512 (*bioops.io_start)(bp); 513 (*bdevsw[major(bp->b_dev)].d_strategy)(bp); 514 return (0); 515 } 516 517 int 518 spec_inactive(v) 519 void *v; 520 { 521 struct vop_inactive_args /* { 522 struct vnode *a_vp; 523 struct proc *a_p; 524 } */ *ap = v; 525 526 VOP_UNLOCK(ap->a_vp, 0); 527 return (0); 528 } 529 530 /* 531 * This is a noop, simply returning what one has been given. 532 */ 533 int 534 spec_bmap(v) 535 void *v; 536 { 537 struct vop_bmap_args /* { 538 struct vnode *a_vp; 539 daddr_t a_bn; 540 struct vnode **a_vpp; 541 daddr_t *a_bnp; 542 int *a_runp; 543 } */ *ap = v; 544 545 if (ap->a_vpp != NULL) 546 *ap->a_vpp = ap->a_vp; 547 if (ap->a_bnp != NULL) 548 *ap->a_bnp = ap->a_bn; 549 if (ap->a_runp != NULL) 550 *ap->a_runp = 0; 551 return (0); 552 } 553 554 /* 555 * Device close routine 556 */ 557 /* ARGSUSED */ 558 int 559 spec_close(v) 560 void *v; 561 { 562 struct vop_close_args /* { 563 struct vnode *a_vp; 564 int a_fflag; 565 struct ucred *a_cred; 566 struct proc *a_p; 567 } */ *ap = v; 568 struct vnode *vp = ap->a_vp; 569 dev_t dev = vp->v_rdev; 570 int (*devclose) __P((dev_t, int, int, struct proc *)); 571 int mode, error, count, flags, flags1; 572 573 count = vcount(vp); 574 simple_lock(&vp->v_interlock); 575 flags = vp->v_flag; 576 simple_unlock(&vp->v_interlock); 577 578 switch (vp->v_type) { 579 580 case VCHR: 581 /* 582 * Hack: a tty device that is a controlling terminal 583 * has a reference from the session structure. 584 * We cannot easily tell that a character device is 585 * a controlling terminal, unless it is the closing 586 * process' controlling terminal. In that case, 587 * if the reference count is 2 (this last descriptor 588 * plus the session), release the reference from the session. 589 */ 590 if (count == 2 && ap->a_p && 591 vp == ap->a_p->p_session->s_ttyvp) { 592 vrele(vp); 593 count--; 594 ap->a_p->p_session->s_ttyvp = NULL; 595 } 596 /* 597 * If the vnode is locked, then we are in the midst 598 * of forcably closing the device, otherwise we only 599 * close on last reference. 600 */ 601 if (count > 1 && (flags & VXLOCK) == 0) 602 return (0); 603 devclose = cdevsw[major(dev)].d_close; 604 mode = S_IFCHR; 605 break; 606 607 case VBLK: 608 /* 609 * On last close of a block device (that isn't mounted) 610 * we must invalidate any in core blocks, so that 611 * we can, for instance, change floppy disks. 612 */ 613 error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0); 614 if (error) 615 return (error); 616 /* 617 * We do not want to really close the device if it 618 * is still in use unless we are trying to close it 619 * forcibly. Since every use (buffer, vnode, swap, cmap) 620 * holds a reference to the vnode, and because we mark 621 * any other vnodes that alias this device, when the 622 * sum of the reference counts on all the aliased 623 * vnodes descends to one, we are on last close. 624 */ 625 if (count > 1 && (flags & VXLOCK) == 0) 626 return (0); 627 devclose = bdevsw[major(dev)].d_close; 628 mode = S_IFBLK; 629 break; 630 631 default: 632 panic("spec_close: not special"); 633 } 634 635 flags1 = ap->a_fflag; 636 637 /* 638 * if VXLOCK is set, then we're going away soon, so make this 639 * non-blocking. Also ensures that we won't wedge in vn_lock below. 640 */ 641 if (flags & VXLOCK) 642 flags1 |= FNONBLOCK; 643 644 /* 645 * If we're able to block, release the vnode lock & reaquire. We 646 * might end up sleaping for someone else who wants our queues. They 647 * won't get them if we hold the vnode locked. Also, if VXLOCK is set, 648 * don't release the lock as we won't be able to regain it. 649 */ 650 if (!(flags1 & FNONBLOCK)) 651 VOP_UNLOCK(vp, 0); 652 653 error = (*devclose)(dev, flags1, mode, ap->a_p); 654 655 if (!(flags1 & FNONBLOCK)) 656 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 657 658 return (error); 659 } 660 661 /* 662 * Print out the contents of a special device vnode. 663 */ 664 int 665 spec_print(v) 666 void *v; 667 { 668 struct vop_print_args /* { 669 struct vnode *a_vp; 670 } */ *ap = v; 671 672 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev), 673 minor(ap->a_vp->v_rdev)); 674 return 0; 675 } 676 677 /* 678 * Return POSIX pathconf information applicable to special devices. 679 */ 680 int 681 spec_pathconf(v) 682 void *v; 683 { 684 struct vop_pathconf_args /* { 685 struct vnode *a_vp; 686 int a_name; 687 register_t *a_retval; 688 } */ *ap = v; 689 690 switch (ap->a_name) { 691 case _PC_LINK_MAX: 692 *ap->a_retval = LINK_MAX; 693 return (0); 694 case _PC_MAX_CANON: 695 *ap->a_retval = MAX_CANON; 696 return (0); 697 case _PC_MAX_INPUT: 698 *ap->a_retval = MAX_INPUT; 699 return (0); 700 case _PC_PIPE_BUF: 701 *ap->a_retval = PIPE_BUF; 702 return (0); 703 case _PC_CHOWN_RESTRICTED: 704 *ap->a_retval = 1; 705 return (0); 706 case _PC_VDISABLE: 707 *ap->a_retval = _POSIX_VDISABLE; 708 return (0); 709 case _PC_SYNC_IO: 710 *ap->a_retval = 1; 711 return (0); 712 default: 713 return (EINVAL); 714 } 715 /* NOTREACHED */ 716 } 717 718 /* 719 * Advisory record locking support. 720 */ 721 int 722 spec_advlock(v) 723 void *v; 724 { 725 struct vop_advlock_args /* { 726 struct vnode *a_vp; 727 caddr_t a_id; 728 int a_op; 729 struct flock *a_fl; 730 int a_flags; 731 } */ *ap = v; 732 struct vnode *vp = ap->a_vp; 733 734 return lf_advlock(ap, &vp->v_speclockf, (off_t)0); 735 } 736