1 /* $OpenBSD: spec_vnops.c,v 1.20 2001/06/23 02:14:26 csapuntz Exp $ */ 2 /* $NetBSD: spec_vnops.c,v 1.29 1996/04/22 01:42:38 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)spec_vnops.c 8.8 (Berkeley) 11/21/94 37 */ 38 39 #include <sys/param.h> 40 #include <sys/proc.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/conf.h> 44 #include <sys/buf.h> 45 #include <sys/mount.h> 46 #include <sys/namei.h> 47 #include <sys/vnode.h> 48 #include <sys/stat.h> 49 #include <sys/errno.h> 50 #include <sys/ioctl.h> 51 #include <sys/file.h> 52 #include <sys/disklabel.h> 53 #include <sys/lockf.h> 54 55 #include <miscfs/specfs/specdev.h> 56 57 #define v_lastr v_specinfo->si_lastr 58 59 /* symbolic sleep message strings for devices */ 60 char devopn[] = "devopn"; 61 char devio[] = "devio"; 62 char devwait[] = "devwait"; 63 char devin[] = "devin"; 64 char devout[] = "devout"; 65 char devioc[] = "devioc"; 66 char devcls[] = "devcls"; 67 68 int (**spec_vnodeop_p) __P((void *)); 69 struct vnodeopv_entry_desc spec_vnodeop_entries[] = { 70 { &vop_default_desc, vn_default_error }, 71 { &vop_lookup_desc, spec_lookup }, /* lookup */ 72 { &vop_create_desc, spec_create }, /* create */ 73 { &vop_mknod_desc, spec_mknod }, /* mknod */ 74 { &vop_open_desc, spec_open }, /* open */ 75 { &vop_close_desc, spec_close }, /* close */ 76 { &vop_access_desc, spec_access }, /* access */ 77 { &vop_getattr_desc, spec_getattr }, /* getattr */ 78 { &vop_setattr_desc, spec_setattr }, /* setattr */ 79 { &vop_read_desc, spec_read }, /* read */ 80 { &vop_write_desc, spec_write }, /* write */ 81 { &vop_lease_desc, spec_lease_check }, /* lease */ 82 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 83 { &vop_select_desc, spec_select }, /* select */ 84 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */ 85 { &vop_revoke_desc, spec_revoke }, /* revoke */ 86 { &vop_fsync_desc, spec_fsync }, /* fsync */ 87 { &vop_remove_desc, spec_remove }, /* remove */ 88 { &vop_link_desc, spec_link }, /* link */ 89 { &vop_rename_desc, spec_rename }, /* rename */ 90 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */ 91 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */ 92 { &vop_symlink_desc, spec_symlink }, /* symlink */ 93 { &vop_readdir_desc, spec_readdir }, /* readdir */ 94 { &vop_readlink_desc, spec_readlink }, /* readlink */ 95 { &vop_abortop_desc, spec_abortop }, /* abortop */ 96 { &vop_inactive_desc, spec_inactive }, /* inactive */ 97 { &vop_reclaim_desc, spec_reclaim }, /* reclaim */ 98 { &vop_lock_desc, spec_lock }, /* lock */ 99 { &vop_unlock_desc, spec_unlock }, /* unlock */ 100 { &vop_bmap_desc, spec_bmap }, /* bmap */ 101 { &vop_strategy_desc, spec_strategy }, /* strategy */ 102 { &vop_print_desc, spec_print }, /* print */ 103 { &vop_islocked_desc, spec_islocked }, /* islocked */ 104 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ 105 { &vop_advlock_desc, spec_advlock }, /* advlock */ 106 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */ 107 { (struct vnodeop_desc*)NULL, (int(*) __P((void *)))NULL } 108 }; 109 struct vnodeopv_desc spec_vnodeop_opv_desc = 110 { &spec_vnodeop_p, spec_vnodeop_entries }; 111 112 /* 113 * Trivial lookup routine that always fails. 114 */ 115 int 116 spec_lookup(v) 117 void *v; 118 { 119 struct vop_lookup_args /* { 120 struct vnode *a_dvp; 121 struct vnode **a_vpp; 122 struct componentname *a_cnp; 123 } */ *ap = v; 124 125 *ap->a_vpp = NULL; 126 return (ENOTDIR); 127 } 128 129 /* 130 * Open a special file. 131 */ 132 /* ARGSUSED */ 133 int 134 spec_open(v) 135 void *v; 136 { 137 struct vop_open_args /* { 138 struct vnode *a_vp; 139 int a_mode; 140 struct ucred *a_cred; 141 struct proc *a_p; 142 } */ *ap = v; 143 struct proc *p = ap->a_p; 144 struct vnode *vp = ap->a_vp; 145 struct vnode *bvp; 146 dev_t bdev; 147 dev_t dev = (dev_t)vp->v_rdev; 148 register int maj = major(dev); 149 int error; 150 151 /* 152 * Don't allow open if fs is mounted -nodev. 153 */ 154 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) 155 return (ENXIO); 156 157 switch (vp->v_type) { 158 159 case VCHR: 160 if ((u_int)maj >= nchrdev) 161 return (ENXIO); 162 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) { 163 /* 164 * When running in very secure mode, do not allow 165 * opens for writing of any disk character devices. 166 */ 167 if (securelevel >= 2 && cdevsw[maj].d_type == D_DISK) 168 return (EPERM); 169 /* 170 * When running in secure mode, do not allow opens 171 * for writing of /dev/mem, /dev/kmem, or character 172 * devices whose corresponding block devices are 173 * currently mounted. 174 */ 175 if (securelevel >= 1) { 176 if ((bdev = chrtoblk(dev)) != NODEV && 177 vfinddev(bdev, VBLK, &bvp) && 178 bvp->v_usecount > 0 && 179 (error = vfs_mountedon(bvp))) 180 return (error); 181 if (iskmemdev(dev)) 182 return (EPERM); 183 } 184 } 185 if (cdevsw[maj].d_type == D_TTY) 186 vp->v_flag |= VISTTY; 187 VOP_UNLOCK(vp, 0, p); 188 error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, ap->a_p); 189 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 190 return (error); 191 192 case VBLK: 193 if ((u_int)maj >= nblkdev) 194 return (ENXIO); 195 /* 196 * When running in very secure mode, do not allow 197 * opens for writing of any disk block devices. 198 */ 199 if (securelevel >= 2 && ap->a_cred != FSCRED && 200 (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK) 201 return (EPERM); 202 /* 203 * Do not allow opens of block devices that are 204 * currently mounted. 205 */ 206 if ((error = vfs_mountedon(vp)) != 0) 207 return (error); 208 return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, ap->a_p)); 209 case VNON: 210 case VLNK: 211 case VDIR: 212 case VREG: 213 case VBAD: 214 case VFIFO: 215 case VSOCK: 216 break; 217 } 218 return (0); 219 } 220 221 /* 222 * Vnode op for read 223 */ 224 /* ARGSUSED */ 225 int 226 spec_read(v) 227 void *v; 228 { 229 struct vop_read_args /* { 230 struct vnode *a_vp; 231 struct uio *a_uio; 232 int a_ioflag; 233 struct ucred *a_cred; 234 } */ *ap = v; 235 register struct vnode *vp = ap->a_vp; 236 register struct uio *uio = ap->a_uio; 237 struct proc *p = uio->uio_procp; 238 struct buf *bp; 239 daddr_t bn, nextbn; 240 long bsize, bscale, ssize; 241 struct partinfo dpart; 242 int n, on, majordev; 243 int (*ioctl) __P((dev_t, u_long, caddr_t, int, struct proc *)); 244 int error = 0; 245 246 #ifdef DIAGNOSTIC 247 if (uio->uio_rw != UIO_READ) 248 panic("spec_read mode"); 249 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 250 panic("spec_read proc"); 251 #endif 252 if (uio->uio_resid == 0) 253 return (0); 254 255 switch (vp->v_type) { 256 257 case VCHR: 258 VOP_UNLOCK(vp, 0, p); 259 error = (*cdevsw[major(vp->v_rdev)].d_read) 260 (vp->v_rdev, uio, ap->a_ioflag); 261 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 262 return (error); 263 264 case VBLK: 265 if (uio->uio_resid == 0) 266 return (0); 267 if (uio->uio_offset < 0) 268 return (EINVAL); 269 bsize = BLKDEV_IOSIZE; 270 ssize = DEV_BSIZE; 271 if ((majordev = major(vp->v_rdev)) < nblkdev && 272 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 273 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 274 if (dpart.part->p_fstype == FS_BSDFFS && 275 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 276 bsize = dpart.part->p_frag * 277 dpart.part->p_fsize; 278 if (dpart.disklab->d_secsize != 0) 279 ssize = dpart.disklab->d_secsize; 280 } 281 bscale = bsize / ssize; 282 do { 283 bn = (uio->uio_offset / ssize) &~ (bscale - 1); 284 on = uio->uio_offset % bsize; 285 n = min((unsigned)(bsize - on), uio->uio_resid); 286 if (vp->v_lastr + bscale == bn) { 287 nextbn = bn + bscale; 288 error = breadn(vp, bn, (int)bsize, &nextbn, 289 (int *)&bsize, 1, NOCRED, &bp); 290 } else 291 error = bread(vp, bn, (int)bsize, NOCRED, &bp); 292 vp->v_lastr = bn; 293 n = min(n, bsize - bp->b_resid); 294 if (error) { 295 brelse(bp); 296 return (error); 297 } 298 error = uiomove((char *)bp->b_data + on, n, uio); 299 brelse(bp); 300 } while (error == 0 && uio->uio_resid > 0 && n != 0); 301 return (error); 302 303 default: 304 panic("spec_read type"); 305 } 306 /* NOTREACHED */ 307 } 308 309 int 310 spec_inactive(v) 311 void *v; 312 { 313 struct vop_inactive_args /* { 314 struct vnode *a_vp; 315 struct proc *a_p; 316 } */ *ap = v; 317 318 VOP_UNLOCK(ap->a_vp, 0, ap->a_p); 319 return (0); 320 } 321 322 /* 323 * Vnode op for write 324 */ 325 /* ARGSUSED */ 326 int 327 spec_write(v) 328 void *v; 329 { 330 struct vop_write_args /* { 331 struct vnode *a_vp; 332 struct uio *a_uio; 333 int a_ioflag; 334 struct ucred *a_cred; 335 } */ *ap = v; 336 register struct vnode *vp = ap->a_vp; 337 register struct uio *uio = ap->a_uio; 338 struct proc *p = uio->uio_procp; 339 struct buf *bp; 340 daddr_t bn; 341 long bsize, bscale, ssize; 342 struct partinfo dpart; 343 int n, on, majordev; 344 int (*ioctl) __P((dev_t, u_long, caddr_t, int, struct proc *)); 345 int error = 0; 346 347 #ifdef DIAGNOSTIC 348 if (uio->uio_rw != UIO_WRITE) 349 panic("spec_write mode"); 350 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 351 panic("spec_write proc"); 352 #endif 353 354 switch (vp->v_type) { 355 356 case VCHR: 357 VOP_UNLOCK(vp, 0, p); 358 error = (*cdevsw[major(vp->v_rdev)].d_write) 359 (vp->v_rdev, uio, ap->a_ioflag); 360 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 361 return (error); 362 363 case VBLK: 364 if (uio->uio_resid == 0) 365 return (0); 366 if (uio->uio_offset < 0) 367 return (EINVAL); 368 bsize = BLKDEV_IOSIZE; 369 ssize = DEV_BSIZE; 370 if ((majordev = major(vp->v_rdev)) < nblkdev && 371 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 372 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 373 if (dpart.part->p_fstype == FS_BSDFFS && 374 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 375 bsize = dpart.part->p_frag * 376 dpart.part->p_fsize; 377 if (dpart.disklab->d_secsize != 0) 378 ssize = dpart.disklab->d_secsize; 379 } 380 bscale = bsize / ssize; 381 do { 382 bn = (uio->uio_offset / ssize) &~ (bscale - 1); 383 on = uio->uio_offset % bsize; 384 n = min((unsigned)(bsize - on), uio->uio_resid); 385 if (n == bsize) 386 bp = getblk(vp, bn, bsize, 0, 0); 387 else 388 error = bread(vp, bn, bsize, NOCRED, &bp); 389 n = min(n, bsize - bp->b_resid); 390 if (error) { 391 brelse(bp); 392 return (error); 393 } 394 error = uiomove((char *)bp->b_data + on, n, uio); 395 if (n + on == bsize) 396 bawrite(bp); 397 else 398 bdwrite(bp); 399 } while (error == 0 && uio->uio_resid > 0 && n != 0); 400 return (error); 401 402 default: 403 panic("spec_write type"); 404 } 405 /* NOTREACHED */ 406 } 407 408 /* 409 * Device ioctl operation. 410 */ 411 /* ARGSUSED */ 412 int 413 spec_ioctl(v) 414 void *v; 415 { 416 struct vop_ioctl_args /* { 417 struct vnode *a_vp; 418 u_long a_command; 419 caddr_t a_data; 420 int a_fflag; 421 struct ucred *a_cred; 422 struct proc *a_p; 423 } */ *ap = v; 424 dev_t dev = ap->a_vp->v_rdev; 425 int maj = major(dev); 426 427 switch (ap->a_vp->v_type) { 428 429 case VCHR: 430 return ((*cdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 431 ap->a_fflag, ap->a_p)); 432 433 case VBLK: 434 if (ap->a_command == 0 && (long)ap->a_data == B_TAPE) 435 return ((bdevsw[maj].d_type == D_TAPE) ? 0 : 1); 436 return ((*bdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 437 ap->a_fflag, ap->a_p)); 438 439 default: 440 panic("spec_ioctl"); 441 /* NOTREACHED */ 442 } 443 } 444 445 /* ARGSUSED */ 446 int 447 spec_select(v) 448 void *v; 449 { 450 struct vop_select_args /* { 451 struct vnode *a_vp; 452 int a_which; 453 int a_fflags; 454 struct ucred *a_cred; 455 struct proc *a_p; 456 } */ *ap = v; 457 register dev_t dev; 458 459 switch (ap->a_vp->v_type) { 460 461 default: 462 return (1); /* XXX */ 463 464 case VCHR: 465 dev = ap->a_vp->v_rdev; 466 return (*cdevsw[major(dev)].d_select)(dev, ap->a_which, ap->a_p); 467 } 468 } 469 /* ARGSUSED */ 470 int 471 spec_kqfilter(v) 472 void *v; 473 { 474 struct vop_kqfilter_args /* { 475 struct vnode *a_vp; 476 struct knote *a_kn; 477 } */ *ap = v; 478 479 dev_t dev; 480 481 dev = ap->a_vp->v_rdev; 482 if (cdevsw[major(dev)].d_type & D_KQFILTER) 483 return (*cdevsw[major(dev)].d_kqfilter)(dev, ap->a_kn); 484 return (1); 485 } 486 487 /* 488 * Synch buffers associated with a block device 489 */ 490 /* ARGSUSED */ 491 int 492 spec_fsync(v) 493 void *v; 494 { 495 struct vop_fsync_args /* { 496 struct vnode *a_vp; 497 struct ucred *a_cred; 498 int a_waitfor; 499 struct proc *a_p; 500 } */ *ap = v; 501 register struct vnode *vp = ap->a_vp; 502 register struct buf *bp; 503 struct buf *nbp; 504 int s; 505 506 if (vp->v_type == VCHR) 507 return (0); 508 /* 509 * Flush all dirty buffers associated with a block device. 510 */ 511 loop: 512 s = splbio(); 513 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { 514 nbp = bp->b_vnbufs.le_next; 515 if ((bp->b_flags & B_BUSY)) 516 continue; 517 if ((bp->b_flags & B_DELWRI) == 0) 518 panic("spec_fsync: not dirty"); 519 bremfree(bp); 520 bp->b_flags |= B_BUSY; 521 splx(s); 522 bawrite(bp); 523 goto loop; 524 } 525 if (ap->a_waitfor == MNT_WAIT) { 526 vwaitforio (vp, 0, "spec_fsync", 0); 527 528 #ifdef DIAGNOSTIC 529 if (vp->v_dirtyblkhd.lh_first) { 530 splx(s); 531 vprint("spec_fsync: dirty", vp); 532 goto loop; 533 } 534 #endif 535 } 536 splx(s); 537 return (0); 538 } 539 540 int 541 spec_strategy(v) 542 void *v; 543 { 544 struct vop_strategy_args /* { 545 struct buf *a_bp; 546 } */ *ap = v; 547 struct buf *bp = ap->a_bp; 548 int maj = major(bp->b_dev); 549 550 if (LIST_FIRST(&bp->b_dep) != NULL) 551 buf_start(bp); 552 553 (*bdevsw[maj].d_strategy)(bp); 554 return (0); 555 } 556 557 /* 558 * This is a noop, simply returning what one has been given. 559 */ 560 int 561 spec_bmap(v) 562 void *v; 563 { 564 struct vop_bmap_args /* { 565 struct vnode *a_vp; 566 daddr_t a_bn; 567 struct vnode **a_vpp; 568 daddr_t *a_bnp; 569 int *a_runp; 570 } */ *ap = v; 571 572 if (ap->a_vpp != NULL) 573 *ap->a_vpp = ap->a_vp; 574 if (ap->a_bnp != NULL) 575 *ap->a_bnp = ap->a_bn; 576 if (ap->a_runp != NULL) 577 *ap->a_runp = 0; 578 579 return (0); 580 } 581 582 /* 583 * Device close routine 584 */ 585 /* ARGSUSED */ 586 int 587 spec_close(v) 588 void *v; 589 { 590 struct vop_close_args /* { 591 struct vnode *a_vp; 592 int a_fflag; 593 struct ucred *a_cred; 594 struct proc *a_p; 595 } */ *ap = v; 596 register struct vnode *vp = ap->a_vp; 597 dev_t dev = vp->v_rdev; 598 int (*devclose) __P((dev_t, int, int, struct proc *)); 599 int mode, error; 600 601 switch (vp->v_type) { 602 603 case VCHR: 604 /* 605 * Hack: a tty device that is a controlling terminal 606 * has a reference from the session structure. 607 * We cannot easily tell that a character device is 608 * a controlling terminal, unless it is the closing 609 * process' controlling terminal. In that case, 610 * if the reference count is 2 (this last descriptor 611 * plus the session), release the reference from the session. 612 */ 613 if (vcount(vp) == 2 && ap->a_p && 614 vp == ap->a_p->p_session->s_ttyvp) { 615 vrele(vp); 616 ap->a_p->p_session->s_ttyvp = NULL; 617 } 618 /* 619 * If the vnode is locked, then we are in the midst 620 * of forcably closing the device, otherwise we only 621 * close on last reference. 622 */ 623 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) 624 return (0); 625 devclose = cdevsw[major(dev)].d_close; 626 mode = S_IFCHR; 627 break; 628 629 case VBLK: 630 /* 631 * On last close of a block device (that isn't mounted) 632 * we must invalidate any in core blocks, so that 633 * we can, for instance, change floppy disks. 634 */ 635 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p); 636 error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0); 637 VOP_UNLOCK(vp, 0, ap->a_p); 638 if (error) 639 return (error); 640 /* 641 * We do not want to really close the device if it 642 * is still in use unless we are trying to close it 643 * forcibly. Since every use (buffer, vnode, swap, cmap) 644 * holds a reference to the vnode, and because we mark 645 * any other vnodes that alias this device, when the 646 * sum of the reference counts on all the aliased 647 * vnodes descends to one, we are on last close. 648 */ 649 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) 650 return (0); 651 devclose = bdevsw[major(dev)].d_close; 652 mode = S_IFBLK; 653 break; 654 655 default: 656 panic("spec_close: not special"); 657 } 658 659 return ((*devclose)(dev, ap->a_fflag, mode, ap->a_p)); 660 } 661 662 /* 663 * Print out the contents of a special device vnode. 664 */ 665 int 666 spec_print(v) 667 void *v; 668 { 669 struct vop_print_args /* { 670 struct vnode *a_vp; 671 } */ *ap = v; 672 673 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev), 674 minor(ap->a_vp->v_rdev)); 675 return 0; 676 } 677 678 /* 679 * Return POSIX pathconf information applicable to special devices. 680 */ 681 int 682 spec_pathconf(v) 683 void *v; 684 { 685 struct vop_pathconf_args /* { 686 struct vnode *a_vp; 687 int a_name; 688 register_t *a_retval; 689 } */ *ap = v; 690 691 switch (ap->a_name) { 692 case _PC_LINK_MAX: 693 *ap->a_retval = LINK_MAX; 694 return (0); 695 case _PC_MAX_CANON: 696 *ap->a_retval = MAX_CANON; 697 return (0); 698 case _PC_MAX_INPUT: 699 *ap->a_retval = MAX_INPUT; 700 return (0); 701 case _PC_PIPE_BUF: 702 *ap->a_retval = PIPE_BUF; 703 return (0); 704 case _PC_CHOWN_RESTRICTED: 705 *ap->a_retval = 1; 706 return (0); 707 case _PC_VDISABLE: 708 *ap->a_retval = _POSIX_VDISABLE; 709 return (0); 710 default: 711 return (EINVAL); 712 } 713 /* NOTREACHED */ 714 } 715 716 /* 717 * Special device advisory byte-level locks. 718 */ 719 /* ARGSUSED */ 720 int 721 spec_advlock(v) 722 void *v; 723 { 724 struct vop_advlock_args /* { 725 struct vnodeop_desc *a_desc; 726 struct vnode *a_vp; 727 caddr_t a_id; 728 int a_op; 729 struct flock *a_fl; 730 int a_flags; 731 } */ *ap = v; 732 register struct vnode *vp = ap->a_vp; 733 734 return (lf_advlock(&vp->v_speclockf, (off_t)0, ap->a_id, 735 ap->a_op, ap->a_fl, ap->a_flags)); 736 } 737 738 /* 739 * Special device failed operation 740 */ 741 /*ARGSUSED*/ 742 int 743 spec_ebadf(v) 744 void *v; 745 { 746 747 return (EBADF); 748 } 749 750 /* 751 * Special device bad operation 752 */ 753 /*ARGSUSED*/ 754 int 755 spec_badop(v) 756 void *v; 757 { 758 759 panic("spec_badop called"); 760 /* NOTREACHED */ 761 } 762