1 /* $NetBSD: spec_vnops.c,v 1.48 2000/03/30 12:22:14 augustss Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)spec_vnops.c 8.15 (Berkeley) 7/14/95 36 */ 37 38 #include <sys/param.h> 39 #include <sys/proc.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/conf.h> 43 #include <sys/buf.h> 44 #include <sys/mount.h> 45 #include <sys/namei.h> 46 #include <sys/vnode.h> 47 #include <sys/stat.h> 48 #include <sys/errno.h> 49 #include <sys/ioctl.h> 50 #include <sys/file.h> 51 #include <sys/disklabel.h> 52 #include <sys/lockf.h> 53 54 #include <miscfs/genfs/genfs.h> 55 #include <miscfs/specfs/specdev.h> 56 57 /* symbolic sleep message strings for devices */ 58 const char devopn[] = "devopn"; 59 const char devio[] = "devio"; 60 const char devwait[] = "devwait"; 61 const char devin[] = "devin"; 62 const char devout[] = "devout"; 63 const char devioc[] = "devioc"; 64 const char devcls[] = "devcls"; 65 66 /* 67 * This vnode operations vector is used for two things only: 68 * - special device nodes created from whole cloth by the kernel. 69 * - as a temporary vnodeops replacement for vnodes which were found to 70 * be aliased by callers of checkalias(). 71 * For the ops vector for vnodes built from special devices found in a 72 * filesystem, see (e.g) ffs_specop_entries[] in ffs_vnops.c or the 73 * equivalent for other filesystems. 74 */ 75 76 int (**spec_vnodeop_p) __P((void *)); 77 struct vnodeopv_entry_desc spec_vnodeop_entries[] = { 78 { &vop_default_desc, vn_default_error }, 79 { &vop_lookup_desc, spec_lookup }, /* lookup */ 80 { &vop_create_desc, spec_create }, /* create */ 81 { &vop_mknod_desc, spec_mknod }, /* mknod */ 82 { &vop_open_desc, spec_open }, /* open */ 83 { &vop_close_desc, spec_close }, /* close */ 84 { &vop_access_desc, spec_access }, /* access */ 85 { &vop_getattr_desc, spec_getattr }, /* getattr */ 86 { &vop_setattr_desc, spec_setattr }, /* setattr */ 87 { &vop_read_desc, spec_read }, /* read */ 88 { &vop_write_desc, spec_write }, /* write */ 89 { &vop_lease_desc, spec_lease_check }, /* lease */ 90 { &vop_fcntl_desc, spec_fcntl }, /* fcntl */ 91 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 92 { &vop_poll_desc, spec_poll }, /* poll */ 93 { &vop_revoke_desc, spec_revoke }, /* revoke */ 94 { &vop_mmap_desc, spec_mmap }, /* mmap */ 95 { &vop_fsync_desc, spec_fsync }, /* fsync */ 96 { &vop_seek_desc, spec_seek }, /* seek */ 97 { &vop_remove_desc, spec_remove }, /* remove */ 98 { &vop_link_desc, spec_link }, /* link */ 99 { &vop_rename_desc, spec_rename }, /* rename */ 100 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */ 101 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */ 102 { &vop_symlink_desc, spec_symlink }, /* symlink */ 103 { &vop_readdir_desc, spec_readdir }, /* readdir */ 104 { &vop_readlink_desc, spec_readlink }, /* readlink */ 105 { &vop_abortop_desc, spec_abortop }, /* abortop */ 106 { &vop_inactive_desc, spec_inactive }, /* inactive */ 107 { &vop_reclaim_desc, spec_reclaim }, /* reclaim */ 108 { &vop_lock_desc, spec_lock }, /* lock */ 109 { &vop_unlock_desc, spec_unlock }, /* unlock */ 110 { &vop_bmap_desc, spec_bmap }, /* bmap */ 111 { &vop_strategy_desc, spec_strategy }, /* strategy */ 112 { &vop_print_desc, spec_print }, /* print */ 113 { &vop_islocked_desc, spec_islocked }, /* islocked */ 114 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ 115 { &vop_advlock_desc, spec_advlock }, /* advlock */ 116 { &vop_blkatoff_desc, spec_blkatoff }, /* blkatoff */ 117 { &vop_valloc_desc, spec_valloc }, /* valloc */ 118 { &vop_vfree_desc, spec_vfree }, /* vfree */ 119 { &vop_truncate_desc, spec_truncate }, /* truncate */ 120 { &vop_update_desc, spec_update }, /* update */ 121 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */ 122 { (struct vnodeop_desc*)NULL, (int(*) __P((void *)))NULL } 123 }; 124 struct vnodeopv_desc spec_vnodeop_opv_desc = 125 { &spec_vnodeop_p, spec_vnodeop_entries }; 126 127 /* 128 * Trivial lookup routine that always fails. 129 */ 130 int 131 spec_lookup(v) 132 void *v; 133 { 134 struct vop_lookup_args /* { 135 struct vnode *a_dvp; 136 struct vnode **a_vpp; 137 struct componentname *a_cnp; 138 } */ *ap = v; 139 140 *ap->a_vpp = NULL; 141 return (ENOTDIR); 142 } 143 144 /* 145 * Open a special file. 146 */ 147 /* ARGSUSED */ 148 int 149 spec_open(v) 150 void *v; 151 { 152 struct vop_open_args /* { 153 struct vnode *a_vp; 154 int a_mode; 155 struct ucred *a_cred; 156 struct proc *a_p; 157 } */ *ap = v; 158 struct proc *p = ap->a_p; 159 struct vnode *bvp, *vp = ap->a_vp; 160 dev_t bdev, dev = (dev_t)vp->v_rdev; 161 int maj = major(dev); 162 int error; 163 164 /* 165 * Don't allow open if fs is mounted -nodev. 166 */ 167 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) 168 return (ENXIO); 169 170 switch (vp->v_type) { 171 172 case VCHR: 173 if ((u_int)maj >= nchrdev) 174 return (ENXIO); 175 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) { 176 /* 177 * When running in very secure mode, do not allow 178 * opens for writing of any disk character devices. 179 */ 180 if (securelevel >= 2 && cdevsw[maj].d_type == D_DISK) 181 return (EPERM); 182 /* 183 * When running in secure mode, do not allow opens 184 * for writing of /dev/mem, /dev/kmem, or character 185 * devices whose corresponding block devices are 186 * currently mounted. 187 */ 188 if (securelevel >= 1) { 189 if ((bdev = chrtoblk(dev)) != (dev_t)NODEV && 190 vfinddev(bdev, VBLK, &bvp) && 191 bvp->v_usecount > 0 && 192 (error = vfs_mountedon(bvp))) 193 return (error); 194 if (iskmemdev(dev)) 195 return (EPERM); 196 } 197 } 198 if (cdevsw[maj].d_type == D_TTY) 199 vp->v_flag |= VISTTY; 200 VOP_UNLOCK(vp, 0); 201 error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, p); 202 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 203 return (error); 204 205 case VBLK: 206 if ((u_int)maj >= nblkdev) 207 return (ENXIO); 208 /* 209 * When running in very secure mode, do not allow 210 * opens for writing of any disk block devices. 211 */ 212 if (securelevel >= 2 && ap->a_cred != FSCRED && 213 (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK) 214 return (EPERM); 215 /* 216 * Do not allow opens of block devices that are 217 * currently mounted. 218 */ 219 if ((error = vfs_mountedon(vp)) != 0) 220 return (error); 221 return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p)); 222 case VNON: 223 case VLNK: 224 case VDIR: 225 case VREG: 226 case VBAD: 227 case VFIFO: 228 case VSOCK: 229 break; 230 } 231 return (0); 232 } 233 234 /* 235 * Vnode op for read 236 */ 237 /* ARGSUSED */ 238 int 239 spec_read(v) 240 void *v; 241 { 242 struct vop_read_args /* { 243 struct vnode *a_vp; 244 struct uio *a_uio; 245 int a_ioflag; 246 struct ucred *a_cred; 247 } */ *ap = v; 248 struct vnode *vp = ap->a_vp; 249 struct uio *uio = ap->a_uio; 250 struct proc *p = uio->uio_procp; 251 struct buf *bp; 252 daddr_t bn, nextbn; 253 long bsize, bscale, ssize; 254 struct partinfo dpart; 255 int n, on, majordev; 256 int (*ioctl) __P((dev_t, u_long, caddr_t, int, struct proc *)); 257 int error = 0; 258 259 #ifdef DIAGNOSTIC 260 if (uio->uio_rw != UIO_READ) 261 panic("spec_read mode"); 262 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 263 panic("spec_read proc"); 264 #endif 265 if (uio->uio_resid == 0) 266 return (0); 267 268 switch (vp->v_type) { 269 270 case VCHR: 271 VOP_UNLOCK(vp, 0); 272 error = (*cdevsw[major(vp->v_rdev)].d_read) 273 (vp->v_rdev, uio, ap->a_ioflag); 274 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 275 return (error); 276 277 case VBLK: 278 if (uio->uio_offset < 0) 279 return (EINVAL); 280 bsize = BLKDEV_IOSIZE; 281 ssize = DEV_BSIZE; 282 if ((majordev = major(vp->v_rdev)) < nblkdev && 283 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 284 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 285 if (dpart.part->p_fstype == FS_BSDFFS && 286 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 287 bsize = dpart.part->p_frag * 288 dpart.part->p_fsize; 289 if (dpart.disklab->d_secsize != 0) 290 ssize = dpart.disklab->d_secsize; 291 } 292 bscale = bsize / ssize; 293 do { 294 bn = (uio->uio_offset / ssize) &~ (bscale - 1); 295 on = uio->uio_offset % bsize; 296 n = min((unsigned)(bsize - on), uio->uio_resid); 297 if (vp->v_lastr + bscale == bn) { 298 nextbn = bn + bscale; 299 error = breadn(vp, bn, (int)bsize, &nextbn, 300 (int *)&bsize, 1, NOCRED, &bp); 301 } else 302 error = bread(vp, bn, (int)bsize, NOCRED, &bp); 303 vp->v_lastr = bn; 304 n = min(n, bsize - bp->b_resid); 305 if (error) { 306 brelse(bp); 307 return (error); 308 } 309 error = uiomove((char *)bp->b_data + on, n, uio); 310 brelse(bp); 311 } while (error == 0 && uio->uio_resid > 0 && n != 0); 312 return (error); 313 314 default: 315 panic("spec_read type"); 316 } 317 /* NOTREACHED */ 318 } 319 320 /* 321 * Vnode op for write 322 */ 323 /* ARGSUSED */ 324 int 325 spec_write(v) 326 void *v; 327 { 328 struct vop_write_args /* { 329 struct vnode *a_vp; 330 struct uio *a_uio; 331 int a_ioflag; 332 struct ucred *a_cred; 333 } */ *ap = v; 334 struct vnode *vp = ap->a_vp; 335 struct uio *uio = ap->a_uio; 336 struct proc *p = uio->uio_procp; 337 struct buf *bp; 338 daddr_t bn; 339 long bsize, bscale, ssize; 340 struct partinfo dpart; 341 int n, on, majordev; 342 int (*ioctl) __P((dev_t, u_long, caddr_t, int, struct proc *)); 343 int error = 0; 344 345 #ifdef DIAGNOSTIC 346 if (uio->uio_rw != UIO_WRITE) 347 panic("spec_write mode"); 348 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 349 panic("spec_write proc"); 350 #endif 351 352 switch (vp->v_type) { 353 354 case VCHR: 355 VOP_UNLOCK(vp, 0); 356 error = (*cdevsw[major(vp->v_rdev)].d_write) 357 (vp->v_rdev, uio, ap->a_ioflag); 358 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 359 return (error); 360 361 case VBLK: 362 if (uio->uio_resid == 0) 363 return (0); 364 if (uio->uio_offset < 0) 365 return (EINVAL); 366 bsize = BLKDEV_IOSIZE; 367 ssize = DEV_BSIZE; 368 if ((majordev = major(vp->v_rdev)) < nblkdev && 369 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 370 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 371 if (dpart.part->p_fstype == FS_BSDFFS && 372 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 373 bsize = dpart.part->p_frag * 374 dpart.part->p_fsize; 375 if (dpart.disklab->d_secsize != 0) 376 ssize = dpart.disklab->d_secsize; 377 } 378 bscale = bsize / ssize; 379 do { 380 bn = (uio->uio_offset / ssize) &~ (bscale - 1); 381 on = uio->uio_offset % bsize; 382 n = min((unsigned)(bsize - on), uio->uio_resid); 383 if (n == bsize) 384 bp = getblk(vp, bn, bsize, 0, 0); 385 else 386 error = bread(vp, bn, bsize, NOCRED, &bp); 387 if (error) { 388 brelse(bp); 389 return (error); 390 } 391 n = min(n, bsize - bp->b_resid); 392 error = uiomove((char *)bp->b_data + on, n, uio); 393 if (error) 394 brelse(bp); 395 else { 396 if (n + on == bsize) 397 bawrite(bp); 398 else 399 bdwrite(bp); 400 if (bp->b_flags & B_ERROR) 401 error = bp->b_error; 402 } 403 } while (error == 0 && uio->uio_resid > 0 && n != 0); 404 return (error); 405 406 default: 407 panic("spec_write type"); 408 } 409 /* NOTREACHED */ 410 } 411 412 /* 413 * Device ioctl operation. 414 */ 415 /* ARGSUSED */ 416 int 417 spec_ioctl(v) 418 void *v; 419 { 420 struct vop_ioctl_args /* { 421 struct vnode *a_vp; 422 u_long a_command; 423 caddr_t a_data; 424 int a_fflag; 425 struct ucred *a_cred; 426 struct proc *a_p; 427 } */ *ap = v; 428 dev_t dev = ap->a_vp->v_rdev; 429 int maj = major(dev); 430 431 switch (ap->a_vp->v_type) { 432 433 case VCHR: 434 return ((*cdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 435 ap->a_fflag, ap->a_p)); 436 437 case VBLK: 438 if (ap->a_command == 0 && (long)ap->a_data == B_TAPE) { 439 if (bdevsw[maj].d_type == D_TAPE) 440 return (0); 441 else 442 return (1); 443 } 444 return ((*bdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 445 ap->a_fflag, ap->a_p)); 446 447 default: 448 panic("spec_ioctl"); 449 /* NOTREACHED */ 450 } 451 } 452 453 /* ARGSUSED */ 454 int 455 spec_poll(v) 456 void *v; 457 { 458 struct vop_poll_args /* { 459 struct vnode *a_vp; 460 int a_events; 461 struct proc *a_p; 462 } */ *ap = v; 463 dev_t dev; 464 465 switch (ap->a_vp->v_type) { 466 467 case VCHR: 468 dev = ap->a_vp->v_rdev; 469 return (*cdevsw[major(dev)].d_poll)(dev, ap->a_events, ap->a_p); 470 471 default: 472 return (genfs_poll(v)); 473 } 474 } 475 /* 476 * Synch buffers associated with a block device 477 */ 478 /* ARGSUSED */ 479 int 480 spec_fsync(v) 481 void *v; 482 { 483 struct vop_fsync_args /* { 484 struct vnode *a_vp; 485 struct ucred *a_cred; 486 int a_flags; 487 struct proc *a_p; 488 } */ *ap = v; 489 struct vnode *vp = ap->a_vp; 490 491 if (vp->v_type == VBLK) 492 vflushbuf(vp, (ap->a_flags & FSYNC_WAIT) != 0); 493 return (0); 494 } 495 496 /* 497 * Just call the device strategy routine 498 */ 499 int 500 spec_strategy(v) 501 void *v; 502 { 503 struct vop_strategy_args /* { 504 struct buf *a_bp; 505 } */ *ap = v; 506 struct buf *bp; 507 508 bp = ap->a_bp; 509 if (!(bp->b_flags & B_READ) && 510 (LIST_FIRST(&bp->b_dep)) != NULL && bioops.io_start) 511 (*bioops.io_start)(bp); 512 (*bdevsw[major(bp->b_dev)].d_strategy)(bp); 513 return (0); 514 } 515 516 int 517 spec_inactive(v) 518 void *v; 519 { 520 struct vop_inactive_args /* { 521 struct vnode *a_vp; 522 struct proc *a_p; 523 } */ *ap = v; 524 525 VOP_UNLOCK(ap->a_vp, 0); 526 return (0); 527 } 528 529 /* 530 * This is a noop, simply returning what one has been given. 531 */ 532 int 533 spec_bmap(v) 534 void *v; 535 { 536 struct vop_bmap_args /* { 537 struct vnode *a_vp; 538 daddr_t a_bn; 539 struct vnode **a_vpp; 540 daddr_t *a_bnp; 541 int *a_runp; 542 } */ *ap = v; 543 544 if (ap->a_vpp != NULL) 545 *ap->a_vpp = ap->a_vp; 546 if (ap->a_bnp != NULL) 547 *ap->a_bnp = ap->a_bn; 548 if (ap->a_runp != NULL) 549 *ap->a_runp = 0; 550 return (0); 551 } 552 553 /* 554 * Device close routine 555 */ 556 /* ARGSUSED */ 557 int 558 spec_close(v) 559 void *v; 560 { 561 struct vop_close_args /* { 562 struct vnode *a_vp; 563 int a_fflag; 564 struct ucred *a_cred; 565 struct proc *a_p; 566 } */ *ap = v; 567 struct vnode *vp = ap->a_vp; 568 dev_t dev = vp->v_rdev; 569 int (*devclose) __P((dev_t, int, int, struct proc *)); 570 int mode, error, count, flags, flags1; 571 572 simple_lock(&vp->v_interlock); 573 count = vcount(vp); 574 flags = vp->v_flag; 575 simple_unlock(&vp->v_interlock); 576 577 switch (vp->v_type) { 578 579 case VCHR: 580 /* 581 * Hack: a tty device that is a controlling terminal 582 * has a reference from the session structure. 583 * We cannot easily tell that a character device is 584 * a controlling terminal, unless it is the closing 585 * process' controlling terminal. In that case, 586 * if the reference count is 2 (this last descriptor 587 * plus the session), release the reference from the session. 588 */ 589 if (count == 2 && ap->a_p && 590 vp == ap->a_p->p_session->s_ttyvp) { 591 vrele(vp); 592 count--; 593 ap->a_p->p_session->s_ttyvp = NULL; 594 } 595 /* 596 * If the vnode is locked, then we are in the midst 597 * of forcably closing the device, otherwise we only 598 * close on last reference. 599 */ 600 if (count > 1 && (flags & VXLOCK) == 0) 601 return (0); 602 devclose = cdevsw[major(dev)].d_close; 603 mode = S_IFCHR; 604 break; 605 606 case VBLK: 607 /* 608 * On last close of a block device (that isn't mounted) 609 * we must invalidate any in core blocks, so that 610 * we can, for instance, change floppy disks. 611 */ 612 error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0); 613 if (error) 614 return (error); 615 /* 616 * We do not want to really close the device if it 617 * is still in use unless we are trying to close it 618 * forcibly. Since every use (buffer, vnode, swap, cmap) 619 * holds a reference to the vnode, and because we mark 620 * any other vnodes that alias this device, when the 621 * sum of the reference counts on all the aliased 622 * vnodes descends to one, we are on last close. 623 */ 624 if (count > 1 && (flags & VXLOCK) == 0) 625 return (0); 626 devclose = bdevsw[major(dev)].d_close; 627 mode = S_IFBLK; 628 break; 629 630 default: 631 panic("spec_close: not special"); 632 } 633 634 flags1 = ap->a_fflag; 635 636 /* 637 * if VXLOCK is set, then we're going away soon, so make this 638 * non-blocking. Also ensures that we won't wedge in vn_lock below. 639 */ 640 if (flags & VXLOCK) 641 flags1 |= FNONBLOCK; 642 643 /* 644 * If we're able to block, release the vnode lock & reaquire. We 645 * might end up sleaping for someone else who wants our queues. They 646 * won't get them if we hold the vnode locked. Also, if VXLOCK is set, 647 * don't release the lock as we won't be able to regain it. 648 */ 649 if (!(flags1 & FNONBLOCK)) 650 VOP_UNLOCK(vp, 0); 651 652 error = (*devclose)(dev, flags1, mode, ap->a_p); 653 654 if (!(flags1 & FNONBLOCK)) 655 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 656 657 return (error); 658 } 659 660 /* 661 * Print out the contents of a special device vnode. 662 */ 663 int 664 spec_print(v) 665 void *v; 666 { 667 struct vop_print_args /* { 668 struct vnode *a_vp; 669 } */ *ap = v; 670 671 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev), 672 minor(ap->a_vp->v_rdev)); 673 return 0; 674 } 675 676 /* 677 * Return POSIX pathconf information applicable to special devices. 678 */ 679 int 680 spec_pathconf(v) 681 void *v; 682 { 683 struct vop_pathconf_args /* { 684 struct vnode *a_vp; 685 int a_name; 686 register_t *a_retval; 687 } */ *ap = v; 688 689 switch (ap->a_name) { 690 case _PC_LINK_MAX: 691 *ap->a_retval = LINK_MAX; 692 return (0); 693 case _PC_MAX_CANON: 694 *ap->a_retval = MAX_CANON; 695 return (0); 696 case _PC_MAX_INPUT: 697 *ap->a_retval = MAX_INPUT; 698 return (0); 699 case _PC_PIPE_BUF: 700 *ap->a_retval = PIPE_BUF; 701 return (0); 702 case _PC_CHOWN_RESTRICTED: 703 *ap->a_retval = 1; 704 return (0); 705 case _PC_VDISABLE: 706 *ap->a_retval = _POSIX_VDISABLE; 707 return (0); 708 case _PC_SYNC_IO: 709 *ap->a_retval = 1; 710 return (0); 711 default: 712 return (EINVAL); 713 } 714 /* NOTREACHED */ 715 } 716 717 /* 718 * Advisory record locking support. 719 */ 720 int 721 spec_advlock(v) 722 void *v; 723 { 724 struct vop_advlock_args /* { 725 struct vnode *a_vp; 726 caddr_t a_id; 727 int a_op; 728 struct flock *a_fl; 729 int a_flags; 730 } */ *ap = v; 731 struct vnode *vp = ap->a_vp; 732 733 return (lf_advlock(&vp->v_speclockf, (off_t)0, ap->a_id, ap->a_op, 734 ap->a_fl, ap->a_flags)); 735 } 736