1 /* $OpenBSD: spec_vnops.c,v 1.27 2003/09/23 16:51:13 millert Exp $ */ 2 /* $NetBSD: spec_vnops.c,v 1.29 1996/04/22 01:42:38 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)spec_vnops.c 8.8 (Berkeley) 11/21/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/proc.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/conf.h> 40 #include <sys/buf.h> 41 #include <sys/mount.h> 42 #include <sys/namei.h> 43 #include <sys/vnode.h> 44 #include <sys/stat.h> 45 #include <sys/errno.h> 46 #include <sys/ioctl.h> 47 #include <sys/file.h> 48 #include <sys/disklabel.h> 49 #include <sys/lockf.h> 50 #include <sys/poll.h> 51 52 #include <miscfs/specfs/specdev.h> 53 54 #define v_lastr v_specinfo->si_lastr 55 56 struct vnode *speclisth[SPECHSZ]; 57 58 /* symbolic sleep message strings for devices */ 59 char devopn[] = "devopn"; 60 char devio[] = "devio"; 61 char devwait[] = "devwait"; 62 char devin[] = "devin"; 63 char devout[] = "devout"; 64 char devioc[] = "devioc"; 65 char devcls[] = "devcls"; 66 67 int (**spec_vnodeop_p)(void *); 68 struct vnodeopv_entry_desc spec_vnodeop_entries[] = { 69 { &vop_default_desc, vn_default_error }, 70 { &vop_lookup_desc, spec_lookup }, /* lookup */ 71 { &vop_create_desc, spec_create }, /* create */ 72 { &vop_mknod_desc, spec_mknod }, /* mknod */ 73 { &vop_open_desc, spec_open }, /* open */ 74 { &vop_close_desc, spec_close }, /* close */ 75 { &vop_access_desc, spec_access }, /* access */ 76 { &vop_getattr_desc, spec_getattr }, /* getattr */ 77 { &vop_setattr_desc, spec_setattr }, /* setattr */ 78 { &vop_read_desc, spec_read }, /* read */ 79 { &vop_write_desc, spec_write }, /* write */ 80 { &vop_lease_desc, spec_lease_check }, /* lease */ 81 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 82 { &vop_poll_desc, spec_poll }, /* poll */ 83 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */ 84 { &vop_revoke_desc, spec_revoke }, /* revoke */ 85 { &vop_fsync_desc, spec_fsync }, /* fsync */ 86 { &vop_remove_desc, spec_remove }, /* remove */ 87 { &vop_link_desc, spec_link }, /* link */ 88 { &vop_rename_desc, spec_rename }, /* rename */ 89 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */ 90 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */ 91 { &vop_symlink_desc, spec_symlink }, /* symlink */ 92 { &vop_readdir_desc, spec_readdir }, /* readdir */ 93 { &vop_readlink_desc, spec_readlink }, /* readlink */ 94 { &vop_abortop_desc, spec_abortop }, /* abortop */ 95 { &vop_inactive_desc, spec_inactive }, /* inactive */ 96 { &vop_reclaim_desc, spec_reclaim }, /* reclaim */ 97 { &vop_lock_desc, spec_lock }, /* lock */ 98 { &vop_unlock_desc, spec_unlock }, /* unlock */ 99 { &vop_bmap_desc, spec_bmap }, /* bmap */ 100 { &vop_strategy_desc, spec_strategy }, /* strategy */ 101 { &vop_print_desc, spec_print }, /* print */ 102 { &vop_islocked_desc, spec_islocked }, /* islocked */ 103 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ 104 { &vop_advlock_desc, spec_advlock }, /* advlock */ 105 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */ 106 { NULL, NULL } 107 }; 108 struct vnodeopv_desc spec_vnodeop_opv_desc = 109 { &spec_vnodeop_p, spec_vnodeop_entries }; 110 111 int 112 spec_vnoperate(void *v) 113 { 114 struct vop_generic_args *ap = v; 115 116 return (VOCALL(spec_vnodeop_p, ap->a_desc->vdesc_offset, ap)); 117 } 118 119 /* 120 * Trivial lookup routine that always fails. 121 */ 122 int 123 spec_lookup(v) 124 void *v; 125 { 126 struct vop_lookup_args /* { 127 struct vnode *a_dvp; 128 struct vnode **a_vpp; 129 struct componentname *a_cnp; 130 } */ *ap = v; 131 132 *ap->a_vpp = NULL; 133 return (ENOTDIR); 134 } 135 136 /* 137 * Open a special file. 138 */ 139 /* ARGSUSED */ 140 int 141 spec_open(v) 142 void *v; 143 { 144 struct vop_open_args /* { 145 struct vnode *a_vp; 146 int a_mode; 147 struct ucred *a_cred; 148 struct proc *a_p; 149 } */ *ap = v; 150 struct proc *p = ap->a_p; 151 struct vnode *vp = ap->a_vp; 152 struct vnode *bvp; 153 dev_t bdev; 154 dev_t dev = (dev_t)vp->v_rdev; 155 register int maj = major(dev); 156 int error; 157 158 /* 159 * Don't allow open if fs is mounted -nodev. 160 */ 161 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) 162 return (ENXIO); 163 164 switch (vp->v_type) { 165 166 case VCHR: 167 if ((u_int)maj >= nchrdev) 168 return (ENXIO); 169 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) { 170 /* 171 * When running in very secure mode, do not allow 172 * opens for writing of any disk character devices. 173 */ 174 if (securelevel >= 2 && cdevsw[maj].d_type == D_DISK) 175 return (EPERM); 176 /* 177 * When running in secure mode, do not allow opens 178 * for writing of /dev/mem, /dev/kmem, or character 179 * devices whose corresponding block devices are 180 * currently mounted. 181 */ 182 if (securelevel >= 1) { 183 if ((bdev = chrtoblk(dev)) != NODEV && 184 vfinddev(bdev, VBLK, &bvp) && 185 bvp->v_usecount > 0 && 186 (error = vfs_mountedon(bvp))) 187 return (error); 188 if (iskmemdev(dev)) 189 return (EPERM); 190 } 191 } 192 if (cdevsw[maj].d_type == D_TTY) 193 vp->v_flag |= VISTTY; 194 VOP_UNLOCK(vp, 0, p); 195 error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, ap->a_p); 196 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 197 return (error); 198 199 case VBLK: 200 if ((u_int)maj >= nblkdev) 201 return (ENXIO); 202 /* 203 * When running in very secure mode, do not allow 204 * opens for writing of any disk block devices. 205 */ 206 if (securelevel >= 2 && ap->a_cred != FSCRED && 207 (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK) 208 return (EPERM); 209 /* 210 * Do not allow opens of block devices that are 211 * currently mounted. 212 */ 213 if ((error = vfs_mountedon(vp)) != 0) 214 return (error); 215 return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, ap->a_p)); 216 case VNON: 217 case VLNK: 218 case VDIR: 219 case VREG: 220 case VBAD: 221 case VFIFO: 222 case VSOCK: 223 break; 224 } 225 return (0); 226 } 227 228 /* 229 * Vnode op for read 230 */ 231 /* ARGSUSED */ 232 int 233 spec_read(v) 234 void *v; 235 { 236 struct vop_read_args /* { 237 struct vnode *a_vp; 238 struct uio *a_uio; 239 int a_ioflag; 240 struct ucred *a_cred; 241 } */ *ap = v; 242 register struct vnode *vp = ap->a_vp; 243 register struct uio *uio = ap->a_uio; 244 struct proc *p = uio->uio_procp; 245 struct buf *bp; 246 daddr_t bn, nextbn; 247 long bsize, bscale, ssize; 248 struct partinfo dpart; 249 int n, on, majordev; 250 int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *); 251 int error = 0; 252 253 #ifdef DIAGNOSTIC 254 if (uio->uio_rw != UIO_READ) 255 panic("spec_read mode"); 256 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 257 panic("spec_read proc"); 258 #endif 259 if (uio->uio_resid == 0) 260 return (0); 261 262 switch (vp->v_type) { 263 264 case VCHR: 265 VOP_UNLOCK(vp, 0, p); 266 error = (*cdevsw[major(vp->v_rdev)].d_read) 267 (vp->v_rdev, uio, ap->a_ioflag); 268 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 269 return (error); 270 271 case VBLK: 272 if (uio->uio_resid == 0) 273 return (0); 274 if (uio->uio_offset < 0) 275 return (EINVAL); 276 bsize = BLKDEV_IOSIZE; 277 ssize = DEV_BSIZE; 278 if ((majordev = major(vp->v_rdev)) < nblkdev && 279 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 280 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 281 if (dpart.part->p_fstype == FS_BSDFFS && 282 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 283 bsize = dpart.part->p_frag * 284 dpart.part->p_fsize; 285 if (dpart.disklab->d_secsize != 0) 286 ssize = dpart.disklab->d_secsize; 287 } 288 bscale = bsize / ssize; 289 do { 290 bn = (uio->uio_offset / ssize) &~ (bscale - 1); 291 on = uio->uio_offset % bsize; 292 n = min((unsigned)(bsize - on), uio->uio_resid); 293 if (vp->v_lastr + bscale == bn) { 294 nextbn = bn + bscale; 295 error = breadn(vp, bn, (int)bsize, &nextbn, 296 (int *)&bsize, 1, NOCRED, &bp); 297 } else 298 error = bread(vp, bn, (int)bsize, NOCRED, &bp); 299 vp->v_lastr = bn; 300 n = min(n, bsize - bp->b_resid); 301 if (error) { 302 brelse(bp); 303 return (error); 304 } 305 error = uiomove((char *)bp->b_data + on, n, uio); 306 brelse(bp); 307 } while (error == 0 && uio->uio_resid > 0 && n != 0); 308 return (error); 309 310 default: 311 panic("spec_read type"); 312 } 313 /* NOTREACHED */ 314 } 315 316 int 317 spec_inactive(v) 318 void *v; 319 { 320 struct vop_inactive_args /* { 321 struct vnode *a_vp; 322 struct proc *a_p; 323 } */ *ap = v; 324 325 VOP_UNLOCK(ap->a_vp, 0, ap->a_p); 326 return (0); 327 } 328 329 /* 330 * Vnode op for write 331 */ 332 /* ARGSUSED */ 333 int 334 spec_write(v) 335 void *v; 336 { 337 struct vop_write_args /* { 338 struct vnode *a_vp; 339 struct uio *a_uio; 340 int a_ioflag; 341 struct ucred *a_cred; 342 } */ *ap = v; 343 register struct vnode *vp = ap->a_vp; 344 register struct uio *uio = ap->a_uio; 345 struct proc *p = uio->uio_procp; 346 struct buf *bp; 347 daddr_t bn; 348 long bsize, bscale, ssize; 349 struct partinfo dpart; 350 int n, on, majordev; 351 int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *); 352 int error = 0; 353 354 #ifdef DIAGNOSTIC 355 if (uio->uio_rw != UIO_WRITE) 356 panic("spec_write mode"); 357 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 358 panic("spec_write proc"); 359 #endif 360 361 switch (vp->v_type) { 362 363 case VCHR: 364 VOP_UNLOCK(vp, 0, p); 365 error = (*cdevsw[major(vp->v_rdev)].d_write) 366 (vp->v_rdev, uio, ap->a_ioflag); 367 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 368 return (error); 369 370 case VBLK: 371 if (uio->uio_resid == 0) 372 return (0); 373 if (uio->uio_offset < 0) 374 return (EINVAL); 375 bsize = BLKDEV_IOSIZE; 376 ssize = DEV_BSIZE; 377 if ((majordev = major(vp->v_rdev)) < nblkdev && 378 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 379 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 380 if (dpart.part->p_fstype == FS_BSDFFS && 381 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 382 bsize = dpart.part->p_frag * 383 dpart.part->p_fsize; 384 if (dpart.disklab->d_secsize != 0) 385 ssize = dpart.disklab->d_secsize; 386 } 387 bscale = bsize / ssize; 388 do { 389 bn = (uio->uio_offset / ssize) &~ (bscale - 1); 390 on = uio->uio_offset % bsize; 391 n = min((unsigned)(bsize - on), uio->uio_resid); 392 if (n == bsize) 393 bp = getblk(vp, bn, bsize, 0, 0); 394 else 395 error = bread(vp, bn, bsize, NOCRED, &bp); 396 n = min(n, bsize - bp->b_resid); 397 if (error) { 398 brelse(bp); 399 return (error); 400 } 401 error = uiomove((char *)bp->b_data + on, n, uio); 402 if (n + on == bsize) 403 bawrite(bp); 404 else 405 bdwrite(bp); 406 } while (error == 0 && uio->uio_resid > 0 && n != 0); 407 return (error); 408 409 default: 410 panic("spec_write type"); 411 } 412 /* NOTREACHED */ 413 } 414 415 /* 416 * Device ioctl operation. 417 */ 418 /* ARGSUSED */ 419 int 420 spec_ioctl(v) 421 void *v; 422 { 423 struct vop_ioctl_args /* { 424 struct vnode *a_vp; 425 u_long a_command; 426 caddr_t a_data; 427 int a_fflag; 428 struct ucred *a_cred; 429 struct proc *a_p; 430 } */ *ap = v; 431 dev_t dev = ap->a_vp->v_rdev; 432 int maj = major(dev); 433 434 switch (ap->a_vp->v_type) { 435 436 case VCHR: 437 return ((*cdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 438 ap->a_fflag, ap->a_p)); 439 440 case VBLK: 441 if (ap->a_command == 0 && (long)ap->a_data == B_TAPE) 442 return ((bdevsw[maj].d_type == D_TAPE) ? 0 : 1); 443 return ((*bdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 444 ap->a_fflag, ap->a_p)); 445 446 default: 447 panic("spec_ioctl"); 448 /* NOTREACHED */ 449 } 450 } 451 452 /* ARGSUSED */ 453 int 454 spec_poll(v) 455 void *v; 456 { 457 struct vop_poll_args /* { 458 struct vnode *a_vp; 459 int a_events; 460 struct proc *a_p; 461 } */ *ap = v; 462 register dev_t dev; 463 464 switch (ap->a_vp->v_type) { 465 466 default: 467 return (seltrue(ap->a_vp->v_rdev, ap->a_events, ap->a_p)); 468 469 case VCHR: 470 dev = ap->a_vp->v_rdev; 471 return (*cdevsw[major(dev)].d_poll)(dev, ap->a_events, ap->a_p); 472 } 473 } 474 /* ARGSUSED */ 475 int 476 spec_kqfilter(v) 477 void *v; 478 { 479 struct vop_kqfilter_args /* { 480 struct vnode *a_vp; 481 struct knote *a_kn; 482 } */ *ap = v; 483 484 dev_t dev; 485 486 dev = ap->a_vp->v_rdev; 487 if (cdevsw[major(dev)].d_type & D_KQFILTER) 488 return (*cdevsw[major(dev)].d_kqfilter)(dev, ap->a_kn); 489 return (1); 490 } 491 492 /* 493 * Synch buffers associated with a block device 494 */ 495 /* ARGSUSED */ 496 int 497 spec_fsync(v) 498 void *v; 499 { 500 struct vop_fsync_args /* { 501 struct vnode *a_vp; 502 struct ucred *a_cred; 503 int a_waitfor; 504 struct proc *a_p; 505 } */ *ap = v; 506 register struct vnode *vp = ap->a_vp; 507 register struct buf *bp; 508 struct buf *nbp; 509 int s; 510 511 if (vp->v_type == VCHR) 512 return (0); 513 /* 514 * Flush all dirty buffers associated with a block device. 515 */ 516 loop: 517 s = splbio(); 518 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { 519 nbp = bp->b_vnbufs.le_next; 520 if ((bp->b_flags & B_BUSY)) 521 continue; 522 if ((bp->b_flags & B_DELWRI) == 0) 523 panic("spec_fsync: not dirty"); 524 bremfree(bp); 525 bp->b_flags |= B_BUSY; 526 splx(s); 527 bawrite(bp); 528 goto loop; 529 } 530 if (ap->a_waitfor == MNT_WAIT) { 531 vwaitforio (vp, 0, "spec_fsync", 0); 532 533 #ifdef DIAGNOSTIC 534 if (vp->v_dirtyblkhd.lh_first) { 535 splx(s); 536 vprint("spec_fsync: dirty", vp); 537 goto loop; 538 } 539 #endif 540 } 541 splx(s); 542 return (0); 543 } 544 545 int 546 spec_strategy(v) 547 void *v; 548 { 549 struct vop_strategy_args /* { 550 struct buf *a_bp; 551 } */ *ap = v; 552 struct buf *bp = ap->a_bp; 553 int maj = major(bp->b_dev); 554 555 if (LIST_FIRST(&bp->b_dep) != NULL) 556 buf_start(bp); 557 558 (*bdevsw[maj].d_strategy)(bp); 559 return (0); 560 } 561 562 /* 563 * This is a noop, simply returning what one has been given. 564 */ 565 int 566 spec_bmap(v) 567 void *v; 568 { 569 struct vop_bmap_args /* { 570 struct vnode *a_vp; 571 daddr_t a_bn; 572 struct vnode **a_vpp; 573 daddr_t *a_bnp; 574 int *a_runp; 575 } */ *ap = v; 576 577 if (ap->a_vpp != NULL) 578 *ap->a_vpp = ap->a_vp; 579 if (ap->a_bnp != NULL) 580 *ap->a_bnp = ap->a_bn; 581 if (ap->a_runp != NULL) 582 *ap->a_runp = 0; 583 584 return (0); 585 } 586 587 /* 588 * Device close routine 589 */ 590 /* ARGSUSED */ 591 int 592 spec_close(v) 593 void *v; 594 { 595 struct vop_close_args /* { 596 struct vnode *a_vp; 597 int a_fflag; 598 struct ucred *a_cred; 599 struct proc *a_p; 600 } */ *ap = v; 601 register struct vnode *vp = ap->a_vp; 602 dev_t dev = vp->v_rdev; 603 int (*devclose)(dev_t, int, int, struct proc *); 604 int mode, error; 605 606 switch (vp->v_type) { 607 608 case VCHR: 609 /* 610 * Hack: a tty device that is a controlling terminal 611 * has a reference from the session structure. 612 * We cannot easily tell that a character device is 613 * a controlling terminal, unless it is the closing 614 * process' controlling terminal. In that case, 615 * if the reference count is 2 (this last descriptor 616 * plus the session), release the reference from the session. 617 */ 618 if (vcount(vp) == 2 && ap->a_p && 619 vp == ap->a_p->p_session->s_ttyvp) { 620 vrele(vp); 621 ap->a_p->p_session->s_ttyvp = NULL; 622 } 623 /* 624 * If the vnode is locked, then we are in the midst 625 * of forcably closing the device, otherwise we only 626 * close on last reference. 627 */ 628 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) 629 return (0); 630 devclose = cdevsw[major(dev)].d_close; 631 mode = S_IFCHR; 632 break; 633 634 case VBLK: 635 /* 636 * On last close of a block device (that isn't mounted) 637 * we must invalidate any in core blocks, so that 638 * we can, for instance, change floppy disks. 639 */ 640 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p); 641 error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0); 642 VOP_UNLOCK(vp, 0, ap->a_p); 643 if (error) 644 return (error); 645 /* 646 * We do not want to really close the device if it 647 * is still in use unless we are trying to close it 648 * forcibly. Since every use (buffer, vnode, swap, cmap) 649 * holds a reference to the vnode, and because we mark 650 * any other vnodes that alias this device, when the 651 * sum of the reference counts on all the aliased 652 * vnodes descends to one, we are on last close. 653 */ 654 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) 655 return (0); 656 devclose = bdevsw[major(dev)].d_close; 657 mode = S_IFBLK; 658 break; 659 660 default: 661 panic("spec_close: not special"); 662 } 663 664 return ((*devclose)(dev, ap->a_fflag, mode, ap->a_p)); 665 } 666 667 /* 668 * Print out the contents of a special device vnode. 669 */ 670 int 671 spec_print(v) 672 void *v; 673 { 674 struct vop_print_args /* { 675 struct vnode *a_vp; 676 } */ *ap = v; 677 678 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev), 679 minor(ap->a_vp->v_rdev)); 680 return 0; 681 } 682 683 /* 684 * Return POSIX pathconf information applicable to special devices. 685 */ 686 int 687 spec_pathconf(v) 688 void *v; 689 { 690 struct vop_pathconf_args /* { 691 struct vnode *a_vp; 692 int a_name; 693 register_t *a_retval; 694 } */ *ap = v; 695 696 switch (ap->a_name) { 697 case _PC_LINK_MAX: 698 *ap->a_retval = LINK_MAX; 699 return (0); 700 case _PC_MAX_CANON: 701 *ap->a_retval = MAX_CANON; 702 return (0); 703 case _PC_MAX_INPUT: 704 *ap->a_retval = MAX_INPUT; 705 return (0); 706 case _PC_PIPE_BUF: 707 *ap->a_retval = PIPE_BUF; 708 return (0); 709 case _PC_CHOWN_RESTRICTED: 710 *ap->a_retval = 1; 711 return (0); 712 case _PC_VDISABLE: 713 *ap->a_retval = _POSIX_VDISABLE; 714 return (0); 715 default: 716 return (EINVAL); 717 } 718 /* NOTREACHED */ 719 } 720 721 /* 722 * Special device advisory byte-level locks. 723 */ 724 /* ARGSUSED */ 725 int 726 spec_advlock(v) 727 void *v; 728 { 729 struct vop_advlock_args /* { 730 struct vnodeop_desc *a_desc; 731 struct vnode *a_vp; 732 caddr_t a_id; 733 int a_op; 734 struct flock *a_fl; 735 int a_flags; 736 } */ *ap = v; 737 register struct vnode *vp = ap->a_vp; 738 739 return (lf_advlock(&vp->v_speclockf, (off_t)0, ap->a_id, 740 ap->a_op, ap->a_fl, ap->a_flags)); 741 } 742 743 /* 744 * Special device failed operation 745 */ 746 /*ARGSUSED*/ 747 int 748 spec_ebadf(v) 749 void *v; 750 { 751 752 return (EBADF); 753 } 754 755 /* 756 * Special device bad operation 757 */ 758 /*ARGSUSED*/ 759 int 760 spec_badop(v) 761 void *v; 762 { 763 764 panic("spec_badop called"); 765 /* NOTREACHED */ 766 } 767