1 /* $NetBSD: spec_vnops.c,v 1.76 2004/01/25 18:06:49 hannken Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)spec_vnops.c 8.15 (Berkeley) 7/14/95 32 */ 33 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: spec_vnops.c,v 1.76 2004/01/25 18:06:49 hannken Exp $"); 36 37 #include "fss.h" 38 39 #include <sys/param.h> 40 #include <sys/proc.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/conf.h> 44 #include <sys/buf.h> 45 #include <sys/mount.h> 46 #include <sys/namei.h> 47 #include <sys/vnode.h> 48 #include <sys/stat.h> 49 #include <sys/errno.h> 50 #include <sys/ioctl.h> 51 #include <sys/file.h> 52 #include <sys/disklabel.h> 53 #include <sys/lockf.h> 54 #include <sys/tty.h> 55 56 #include <miscfs/genfs/genfs.h> 57 #include <miscfs/specfs/specdev.h> 58 59 #if NFSS > 0 60 #include <dev/fssvar.h> 61 #endif 62 63 /* symbolic sleep message strings for devices */ 64 const char devopn[] = "devopn"; 65 const char devio[] = "devio"; 66 const char devwait[] = "devwait"; 67 const char devin[] = "devin"; 68 const char devout[] = "devout"; 69 const char devioc[] = "devioc"; 70 const char devcls[] = "devcls"; 71 72 struct vnode *speclisth[SPECHSZ]; 73 74 /* 75 * This vnode operations vector is used for two things only: 76 * - special device nodes created from whole cloth by the kernel. 77 * - as a temporary vnodeops replacement for vnodes which were found to 78 * be aliased by callers of checkalias(). 79 * For the ops vector for vnodes built from special devices found in a 80 * filesystem, see (e.g) ffs_specop_entries[] in ffs_vnops.c or the 81 * equivalent for other filesystems. 82 */ 83 84 int (**spec_vnodeop_p) __P((void *)); 85 const struct vnodeopv_entry_desc spec_vnodeop_entries[] = { 86 { &vop_default_desc, vn_default_error }, 87 { &vop_lookup_desc, spec_lookup }, /* lookup */ 88 { &vop_create_desc, spec_create }, /* create */ 89 { &vop_mknod_desc, spec_mknod }, /* mknod */ 90 { &vop_open_desc, spec_open }, /* open */ 91 { &vop_close_desc, spec_close }, /* close */ 92 { &vop_access_desc, spec_access }, /* access */ 93 { &vop_getattr_desc, spec_getattr }, /* getattr */ 94 { &vop_setattr_desc, spec_setattr }, /* setattr */ 95 { &vop_read_desc, spec_read }, /* read */ 96 { &vop_write_desc, spec_write }, /* write */ 97 { &vop_lease_desc, spec_lease_check }, /* lease */ 98 { &vop_fcntl_desc, spec_fcntl }, /* fcntl */ 99 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 100 { &vop_poll_desc, spec_poll }, /* poll */ 101 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */ 102 { &vop_revoke_desc, spec_revoke }, /* revoke */ 103 { &vop_mmap_desc, spec_mmap }, /* mmap */ 104 { &vop_fsync_desc, spec_fsync }, /* fsync */ 105 { &vop_seek_desc, spec_seek }, /* seek */ 106 { &vop_remove_desc, spec_remove }, /* remove */ 107 { &vop_link_desc, spec_link }, /* link */ 108 { &vop_rename_desc, spec_rename }, /* rename */ 109 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */ 110 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */ 111 { &vop_symlink_desc, spec_symlink }, /* symlink */ 112 { &vop_readdir_desc, spec_readdir }, /* readdir */ 113 { &vop_readlink_desc, spec_readlink }, /* readlink */ 114 { &vop_abortop_desc, spec_abortop }, /* abortop */ 115 { &vop_inactive_desc, spec_inactive }, /* inactive */ 116 { &vop_reclaim_desc, spec_reclaim }, /* reclaim */ 117 { &vop_lock_desc, spec_lock }, /* lock */ 118 { &vop_unlock_desc, spec_unlock }, /* unlock */ 119 { &vop_bmap_desc, spec_bmap }, /* bmap */ 120 { &vop_strategy_desc, spec_strategy }, /* strategy */ 121 { &vop_print_desc, spec_print }, /* print */ 122 { &vop_islocked_desc, spec_islocked }, /* islocked */ 123 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ 124 { &vop_advlock_desc, spec_advlock }, /* advlock */ 125 { &vop_blkatoff_desc, spec_blkatoff }, /* blkatoff */ 126 { &vop_valloc_desc, spec_valloc }, /* valloc */ 127 { &vop_vfree_desc, spec_vfree }, /* vfree */ 128 { &vop_truncate_desc, spec_truncate }, /* truncate */ 129 { &vop_update_desc, spec_update }, /* update */ 130 { &vop_bwrite_desc, spec_bwrite }, /* bwrite */ 131 { &vop_getpages_desc, spec_getpages }, /* getpages */ 132 { &vop_putpages_desc, spec_putpages }, /* putpages */ 133 { NULL, NULL } 134 }; 135 const struct vnodeopv_desc spec_vnodeop_opv_desc = 136 { &spec_vnodeop_p, spec_vnodeop_entries }; 137 138 /* 139 * Trivial lookup routine that always fails. 140 */ 141 int 142 spec_lookup(v) 143 void *v; 144 { 145 struct vop_lookup_args /* { 146 struct vnode *a_dvp; 147 struct vnode **a_vpp; 148 struct componentname *a_cnp; 149 } */ *ap = v; 150 151 *ap->a_vpp = NULL; 152 return (ENOTDIR); 153 } 154 155 /* 156 * Returns true if dev is /dev/mem or /dev/kmem. 157 */ 158 static int 159 iskmemdev(dev_t dev) 160 { 161 /* mem_no is emitted by config(8) to generated devsw.c */ 162 extern const int mem_no; 163 164 /* minor 14 is /dev/io on i386 with COMPAT_10 */ 165 return (major(dev) == mem_no && (minor(dev) < 2 || minor(dev) == 14)); 166 } 167 168 /* 169 * Open a special file. 170 */ 171 /* ARGSUSED */ 172 int 173 spec_open(v) 174 void *v; 175 { 176 struct vop_open_args /* { 177 struct vnode *a_vp; 178 int a_mode; 179 struct ucred *a_cred; 180 struct proc *a_p; 181 } */ *ap = v; 182 struct proc *p = ap->a_p; 183 struct vnode *bvp, *vp = ap->a_vp; 184 const struct bdevsw *bdev; 185 const struct cdevsw *cdev; 186 dev_t blkdev, dev = (dev_t)vp->v_rdev; 187 int error; 188 struct partinfo pi; 189 int (*d_ioctl)(dev_t, u_long, caddr_t, int, struct proc *); 190 191 /* 192 * Don't allow open if fs is mounted -nodev. 193 */ 194 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) 195 return (ENXIO); 196 197 switch (vp->v_type) { 198 199 case VCHR: 200 cdev = cdevsw_lookup(dev); 201 if (cdev == NULL) 202 return (ENXIO); 203 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) { 204 /* 205 * When running in very secure mode, do not allow 206 * opens for writing of any disk character devices. 207 */ 208 if (securelevel >= 2 && cdev->d_type == D_DISK) 209 return (EPERM); 210 /* 211 * When running in secure mode, do not allow opens 212 * for writing of /dev/mem, /dev/kmem, or character 213 * devices whose corresponding block devices are 214 * currently mounted. 215 */ 216 if (securelevel >= 1) { 217 blkdev = devsw_chr2blk(dev); 218 if (blkdev != (dev_t)NODEV && 219 vfinddev(blkdev, VBLK, &bvp) && 220 (error = vfs_mountedon(bvp))) 221 return (error); 222 if (iskmemdev(dev)) 223 return (EPERM); 224 } 225 } 226 if (cdev->d_type == D_TTY) 227 vp->v_flag |= VISTTY; 228 VOP_UNLOCK(vp, 0); 229 error = (*cdev->d_open)(dev, ap->a_mode, S_IFCHR, p); 230 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 231 if (cdev->d_type != D_DISK) 232 return error; 233 d_ioctl = cdev->d_ioctl; 234 break; 235 236 case VBLK: 237 bdev = bdevsw_lookup(dev); 238 if (bdev == NULL) 239 return (ENXIO); 240 /* 241 * When running in very secure mode, do not allow 242 * opens for writing of any disk block devices. 243 */ 244 if (securelevel >= 2 && ap->a_cred != FSCRED && 245 (ap->a_mode & FWRITE) && bdev->d_type == D_DISK) 246 return (EPERM); 247 /* 248 * Do not allow opens of block devices that are 249 * currently mounted. 250 */ 251 if ((error = vfs_mountedon(vp)) != 0) 252 return (error); 253 error = (*bdev->d_open)(dev, ap->a_mode, S_IFBLK, p); 254 d_ioctl = bdev->d_ioctl; 255 break; 256 257 case VNON: 258 case VLNK: 259 case VDIR: 260 case VREG: 261 case VBAD: 262 case VFIFO: 263 case VSOCK: 264 default: 265 return 0; 266 } 267 268 if (error) 269 return error; 270 if (!(*d_ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&pi, FREAD, curproc)) 271 vp->v_size = (voff_t)pi.disklab->d_secsize * pi.part->p_size; 272 return 0; 273 } 274 275 /* 276 * Vnode op for read 277 */ 278 /* ARGSUSED */ 279 int 280 spec_read(v) 281 void *v; 282 { 283 struct vop_read_args /* { 284 struct vnode *a_vp; 285 struct uio *a_uio; 286 int a_ioflag; 287 struct ucred *a_cred; 288 } */ *ap = v; 289 struct vnode *vp = ap->a_vp; 290 struct uio *uio = ap->a_uio; 291 struct proc *p = uio->uio_procp; 292 struct buf *bp; 293 const struct bdevsw *bdev; 294 const struct cdevsw *cdev; 295 daddr_t bn; 296 int bsize, bscale; 297 struct partinfo dpart; 298 int n, on; 299 int error = 0; 300 301 #ifdef DIAGNOSTIC 302 if (uio->uio_rw != UIO_READ) 303 panic("spec_read mode"); 304 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 305 panic("spec_read proc"); 306 #endif 307 if (uio->uio_resid == 0) 308 return (0); 309 310 switch (vp->v_type) { 311 312 case VCHR: 313 VOP_UNLOCK(vp, 0); 314 cdev = cdevsw_lookup(vp->v_rdev); 315 if (cdev != NULL) 316 error = (*cdev->d_read)(vp->v_rdev, uio, ap->a_ioflag); 317 else 318 error = ENXIO; 319 vn_lock(vp, LK_SHARED | LK_RETRY); 320 return (error); 321 322 case VBLK: 323 if (uio->uio_offset < 0) 324 return (EINVAL); 325 bsize = BLKDEV_IOSIZE; 326 bdev = bdevsw_lookup(vp->v_rdev); 327 if (bdev != NULL && 328 (*bdev->d_ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, 329 FREAD, p) == 0) { 330 if (dpart.part->p_fstype == FS_BSDFFS && 331 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 332 bsize = dpart.part->p_frag * 333 dpart.part->p_fsize; 334 } 335 bscale = bsize >> DEV_BSHIFT; 336 do { 337 bn = (uio->uio_offset >> DEV_BSHIFT) &~ (bscale - 1); 338 on = uio->uio_offset % bsize; 339 n = min((unsigned)(bsize - on), uio->uio_resid); 340 error = bread(vp, bn, bsize, NOCRED, &bp); 341 n = min(n, bsize - bp->b_resid); 342 if (error) { 343 brelse(bp); 344 return (error); 345 } 346 error = uiomove((char *)bp->b_data + on, n, uio); 347 brelse(bp); 348 } while (error == 0 && uio->uio_resid > 0 && n != 0); 349 return (error); 350 351 default: 352 panic("spec_read type"); 353 } 354 /* NOTREACHED */ 355 } 356 357 /* 358 * Vnode op for write 359 */ 360 /* ARGSUSED */ 361 int 362 spec_write(v) 363 void *v; 364 { 365 struct vop_write_args /* { 366 struct vnode *a_vp; 367 struct uio *a_uio; 368 int a_ioflag; 369 struct ucred *a_cred; 370 } */ *ap = v; 371 struct vnode *vp = ap->a_vp; 372 struct uio *uio = ap->a_uio; 373 struct proc *p = uio->uio_procp; 374 struct buf *bp; 375 const struct bdevsw *bdev; 376 const struct cdevsw *cdev; 377 daddr_t bn; 378 int bsize, bscale; 379 struct partinfo dpart; 380 int n, on; 381 int error = 0; 382 383 #ifdef DIAGNOSTIC 384 if (uio->uio_rw != UIO_WRITE) 385 panic("spec_write mode"); 386 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 387 panic("spec_write proc"); 388 #endif 389 390 switch (vp->v_type) { 391 392 case VCHR: 393 VOP_UNLOCK(vp, 0); 394 cdev = cdevsw_lookup(vp->v_rdev); 395 if (cdev != NULL) 396 error = (*cdev->d_write)(vp->v_rdev, uio, ap->a_ioflag); 397 else 398 error = ENXIO; 399 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 400 return (error); 401 402 case VBLK: 403 if (uio->uio_resid == 0) 404 return (0); 405 if (uio->uio_offset < 0) 406 return (EINVAL); 407 bsize = BLKDEV_IOSIZE; 408 bdev = bdevsw_lookup(vp->v_rdev); 409 if (bdev != NULL && 410 (*bdev->d_ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, 411 FREAD, p) == 0) { 412 if (dpart.part->p_fstype == FS_BSDFFS && 413 dpart.part->p_frag != 0 && dpart.part->p_fsize != 0) 414 bsize = dpart.part->p_frag * 415 dpart.part->p_fsize; 416 } 417 bscale = bsize >> DEV_BSHIFT; 418 do { 419 bn = (uio->uio_offset >> DEV_BSHIFT) &~ (bscale - 1); 420 on = uio->uio_offset % bsize; 421 n = min((unsigned)(bsize - on), uio->uio_resid); 422 if (n == bsize) 423 bp = getblk(vp, bn, bsize, 0, 0); 424 else 425 error = bread(vp, bn, bsize, NOCRED, &bp); 426 if (error) { 427 brelse(bp); 428 return (error); 429 } 430 n = min(n, bsize - bp->b_resid); 431 error = uiomove((char *)bp->b_data + on, n, uio); 432 if (error) 433 brelse(bp); 434 else { 435 if (n + on == bsize) 436 bawrite(bp); 437 else 438 bdwrite(bp); 439 if (bp->b_flags & B_ERROR) 440 error = bp->b_error; 441 } 442 } while (error == 0 && uio->uio_resid > 0 && n != 0); 443 return (error); 444 445 default: 446 panic("spec_write type"); 447 } 448 /* NOTREACHED */ 449 } 450 451 /* 452 * Device ioctl operation. 453 */ 454 /* ARGSUSED */ 455 int 456 spec_ioctl(v) 457 void *v; 458 { 459 struct vop_ioctl_args /* { 460 struct vnode *a_vp; 461 u_long a_command; 462 caddr_t a_data; 463 int a_fflag; 464 struct ucred *a_cred; 465 struct proc *a_p; 466 } */ *ap = v; 467 const struct bdevsw *bdev; 468 const struct cdevsw *cdev; 469 dev_t dev = ap->a_vp->v_rdev; 470 471 switch (ap->a_vp->v_type) { 472 473 case VCHR: 474 cdev = cdevsw_lookup(dev); 475 if (cdev == NULL) 476 return (ENXIO); 477 return ((*cdev->d_ioctl)(dev, ap->a_command, ap->a_data, 478 ap->a_fflag, ap->a_p)); 479 480 case VBLK: 481 bdev = bdevsw_lookup(dev); 482 if (bdev == NULL) 483 return (ENXIO); 484 if (ap->a_command == 0 && (long)ap->a_data == B_TAPE) { 485 if (bdev->d_type == D_TAPE) 486 return (0); 487 else 488 return (1); 489 } 490 return ((*bdev->d_ioctl)(dev, ap->a_command, ap->a_data, 491 ap->a_fflag, ap->a_p)); 492 493 default: 494 panic("spec_ioctl"); 495 /* NOTREACHED */ 496 } 497 } 498 499 /* ARGSUSED */ 500 int 501 spec_poll(v) 502 void *v; 503 { 504 struct vop_poll_args /* { 505 struct vnode *a_vp; 506 int a_events; 507 struct proc *a_p; 508 } */ *ap = v; 509 const struct cdevsw *cdev; 510 dev_t dev; 511 512 switch (ap->a_vp->v_type) { 513 514 case VCHR: 515 dev = ap->a_vp->v_rdev; 516 cdev = cdevsw_lookup(dev); 517 if (cdev == NULL) 518 return (ENXIO); 519 return (*cdev->d_poll)(dev, ap->a_events, ap->a_p); 520 521 default: 522 return (genfs_poll(v)); 523 } 524 } 525 526 /* ARGSUSED */ 527 int 528 spec_kqfilter(v) 529 void *v; 530 { 531 struct vop_kqfilter_args /* { 532 struct vnode *a_vp; 533 struct proc *a_kn; 534 } */ *ap = v; 535 const struct cdevsw *cdev; 536 dev_t dev; 537 538 switch (ap->a_vp->v_type) { 539 540 case VCHR: 541 dev = ap->a_vp->v_rdev; 542 cdev = cdevsw_lookup(dev); 543 if (cdev == NULL) 544 return (ENXIO); 545 return (*cdev->d_kqfilter)(dev, ap->a_kn); 546 default: 547 /* 548 * Block devices don't support kqfilter, and refuse it 549 * for any other files (like those vflush()ed) too. 550 */ 551 return (EOPNOTSUPP); 552 } 553 } 554 555 /* 556 * Synch buffers associated with a block device 557 */ 558 /* ARGSUSED */ 559 int 560 spec_fsync(v) 561 void *v; 562 { 563 struct vop_fsync_args /* { 564 struct vnode *a_vp; 565 struct ucred *a_cred; 566 int a_flags; 567 off_t offlo; 568 off_t offhi; 569 struct proc *a_p; 570 } */ *ap = v; 571 struct vnode *vp = ap->a_vp; 572 573 if (vp->v_type == VBLK) 574 vflushbuf(vp, (ap->a_flags & FSYNC_WAIT) != 0); 575 return (0); 576 } 577 578 /* 579 * Just call the device strategy routine 580 */ 581 int 582 spec_strategy(v) 583 void *v; 584 { 585 struct vop_strategy_args /* { 586 struct vnode *a_vp; 587 struct buf *a_bp; 588 } */ *ap = v; 589 struct vnode *vp = ap->a_vp; 590 struct buf *bp = ap->a_bp; 591 592 bp->b_dev = vp->v_rdev; 593 if (!(bp->b_flags & B_READ) && 594 (LIST_FIRST(&bp->b_dep)) != NULL && bioops.io_start) 595 (*bioops.io_start)(bp); 596 #if NFSS > 0 597 fss_cow_hook(bp); 598 #endif 599 DEV_STRATEGY(bp); 600 601 return (0); 602 } 603 604 int 605 spec_inactive(v) 606 void *v; 607 { 608 struct vop_inactive_args /* { 609 struct vnode *a_vp; 610 struct proc *a_p; 611 } */ *ap = v; 612 613 VOP_UNLOCK(ap->a_vp, 0); 614 return (0); 615 } 616 617 /* 618 * This is a noop, simply returning what one has been given. 619 */ 620 int 621 spec_bmap(v) 622 void *v; 623 { 624 struct vop_bmap_args /* { 625 struct vnode *a_vp; 626 daddr_t a_bn; 627 struct vnode **a_vpp; 628 daddr_t *a_bnp; 629 int *a_runp; 630 } */ *ap = v; 631 632 if (ap->a_vpp != NULL) 633 *ap->a_vpp = ap->a_vp; 634 if (ap->a_bnp != NULL) 635 *ap->a_bnp = ap->a_bn; 636 if (ap->a_runp != NULL) 637 *ap->a_runp = (MAXBSIZE >> DEV_BSHIFT) - 1; 638 return (0); 639 } 640 641 /* 642 * Device close routine 643 */ 644 /* ARGSUSED */ 645 int 646 spec_close(v) 647 void *v; 648 { 649 struct vop_close_args /* { 650 struct vnode *a_vp; 651 int a_fflag; 652 struct ucred *a_cred; 653 struct proc *a_p; 654 } */ *ap = v; 655 struct vnode *vp = ap->a_vp; 656 const struct bdevsw *bdev; 657 const struct cdevsw *cdev; 658 struct session *sess; 659 dev_t dev = vp->v_rdev; 660 int (*devclose) __P((dev_t, int, int, struct proc *)); 661 int mode, error, count, flags, flags1; 662 663 count = vcount(vp); 664 flags = vp->v_flag; 665 666 switch (vp->v_type) { 667 668 case VCHR: 669 /* 670 * Hack: a tty device that is a controlling terminal 671 * has a reference from the session structure. 672 * We cannot easily tell that a character device is 673 * a controlling terminal, unless it is the closing 674 * process' controlling terminal. In that case, 675 * if the reference count is 2 (this last descriptor 676 * plus the session), release the reference from the session. 677 * Also remove the link from the tty back to the session 678 * and pgrp - due to the way consoles are handled we cannot 679 * guarantee that the vrele() will do the final close on the 680 * actual tty device. 681 */ 682 if (count == 2 && ap->a_p && 683 vp == (sess = ap->a_p->p_session)->s_ttyvp) { 684 sess->s_ttyvp = NULL; 685 if (sess->s_ttyp->t_session != NULL) { 686 sess->s_ttyp->t_pgrp = NULL; 687 sess->s_ttyp->t_session = NULL; 688 SESSRELE(sess); 689 } else if (sess->s_ttyp->t_pgrp != NULL) 690 panic("spec_close: spurious pgrp ref"); 691 vrele(vp); 692 count--; 693 } 694 /* 695 * If the vnode is locked, then we are in the midst 696 * of forcably closing the device, otherwise we only 697 * close on last reference. 698 */ 699 if (count > 1 && (flags & VXLOCK) == 0) 700 return (0); 701 cdev = cdevsw_lookup(dev); 702 if (cdev != NULL) 703 devclose = cdev->d_close; 704 else 705 devclose = NULL; 706 mode = S_IFCHR; 707 break; 708 709 case VBLK: 710 /* 711 * On last close of a block device (that isn't mounted) 712 * we must invalidate any in core blocks, so that 713 * we can, for instance, change floppy disks. 714 */ 715 error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0); 716 if (error) 717 return (error); 718 /* 719 * We do not want to really close the device if it 720 * is still in use unless we are trying to close it 721 * forcibly. Since every use (buffer, vnode, swap, cmap) 722 * holds a reference to the vnode, and because we mark 723 * any other vnodes that alias this device, when the 724 * sum of the reference counts on all the aliased 725 * vnodes descends to one, we are on last close. 726 */ 727 if (count > 1 && (flags & VXLOCK) == 0) 728 return (0); 729 bdev = bdevsw_lookup(dev); 730 if (bdev != NULL) 731 devclose = bdev->d_close; 732 else 733 devclose = NULL; 734 mode = S_IFBLK; 735 break; 736 737 default: 738 panic("spec_close: not special"); 739 } 740 741 flags1 = ap->a_fflag; 742 743 /* 744 * if VXLOCK is set, then we're going away soon, so make this 745 * non-blocking. Also ensures that we won't wedge in vn_lock below. 746 */ 747 if (flags & VXLOCK) 748 flags1 |= FNONBLOCK; 749 750 /* 751 * If we're able to block, release the vnode lock & reacquire. We 752 * might end up sleeping for someone else who wants our queues. They 753 * won't get them if we hold the vnode locked. Also, if VXLOCK is set, 754 * don't release the lock as we won't be able to regain it. 755 */ 756 if (!(flags1 & FNONBLOCK)) 757 VOP_UNLOCK(vp, 0); 758 759 if (devclose != NULL) 760 error = (*devclose)(dev, flags1, mode, ap->a_p); 761 else 762 error = ENXIO; 763 764 if (!(flags1 & FNONBLOCK)) 765 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 766 767 return (error); 768 } 769 770 /* 771 * Print out the contents of a special device vnode. 772 */ 773 int 774 spec_print(v) 775 void *v; 776 { 777 struct vop_print_args /* { 778 struct vnode *a_vp; 779 } */ *ap = v; 780 781 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev), 782 minor(ap->a_vp->v_rdev)); 783 return 0; 784 } 785 786 /* 787 * Return POSIX pathconf information applicable to special devices. 788 */ 789 int 790 spec_pathconf(v) 791 void *v; 792 { 793 struct vop_pathconf_args /* { 794 struct vnode *a_vp; 795 int a_name; 796 register_t *a_retval; 797 } */ *ap = v; 798 799 switch (ap->a_name) { 800 case _PC_LINK_MAX: 801 *ap->a_retval = LINK_MAX; 802 return (0); 803 case _PC_MAX_CANON: 804 *ap->a_retval = MAX_CANON; 805 return (0); 806 case _PC_MAX_INPUT: 807 *ap->a_retval = MAX_INPUT; 808 return (0); 809 case _PC_PIPE_BUF: 810 *ap->a_retval = PIPE_BUF; 811 return (0); 812 case _PC_CHOWN_RESTRICTED: 813 *ap->a_retval = 1; 814 return (0); 815 case _PC_VDISABLE: 816 *ap->a_retval = _POSIX_VDISABLE; 817 return (0); 818 case _PC_SYNC_IO: 819 *ap->a_retval = 1; 820 return (0); 821 default: 822 return (EINVAL); 823 } 824 /* NOTREACHED */ 825 } 826 827 /* 828 * Advisory record locking support. 829 */ 830 int 831 spec_advlock(v) 832 void *v; 833 { 834 struct vop_advlock_args /* { 835 struct vnode *a_vp; 836 caddr_t a_id; 837 int a_op; 838 struct flock *a_fl; 839 int a_flags; 840 } */ *ap = v; 841 struct vnode *vp = ap->a_vp; 842 843 return lf_advlock(ap, &vp->v_speclockf, (off_t)0); 844 } 845