1 /* $OpenBSD: spec_vnops.c,v 1.95 2018/07/07 15:41:25 visa Exp $ */ 2 /* $NetBSD: spec_vnops.c,v 1.29 1996/04/22 01:42:38 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)spec_vnops.c 8.8 (Berkeley) 11/21/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/proc.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/conf.h> 40 #include <sys/buf.h> 41 #include <sys/mount.h> 42 #include <sys/namei.h> 43 #include <sys/vnode.h> 44 #include <sys/lock.h> 45 #include <sys/stat.h> 46 #include <sys/errno.h> 47 #include <sys/ioctl.h> 48 #include <sys/fcntl.h> 49 #include <sys/disklabel.h> 50 #include <sys/lockf.h> 51 #include <sys/poll.h> 52 #include <sys/dkio.h> 53 #include <sys/malloc.h> 54 #include <sys/specdev.h> 55 #include <sys/unistd.h> 56 57 #include <uvm/uvm_extern.h> 58 59 #define v_lastr v_specinfo->si_lastr 60 61 int spec_open_clone(struct vop_open_args *); 62 int spec_close_clone(struct vop_close_args *); 63 64 struct vnode *speclisth[SPECHSZ]; 65 66 struct vops spec_vops = { 67 .vop_lookup = vop_generic_lookup, 68 .vop_create = spec_badop, 69 .vop_mknod = spec_badop, 70 .vop_open = spec_open, 71 .vop_close = spec_close, 72 .vop_access = spec_access, 73 .vop_getattr = spec_getattr, 74 .vop_setattr = spec_setattr, 75 .vop_read = spec_read, 76 .vop_write = spec_write, 77 .vop_ioctl = spec_ioctl, 78 .vop_poll = spec_poll, 79 .vop_kqfilter = spec_kqfilter, 80 .vop_revoke = vop_generic_revoke, 81 .vop_fsync = spec_fsync, 82 .vop_remove = spec_badop, 83 .vop_link = spec_badop, 84 .vop_rename = spec_badop, 85 .vop_mkdir = spec_badop, 86 .vop_rmdir = spec_badop, 87 .vop_symlink = spec_badop, 88 .vop_readdir = spec_badop, 89 .vop_readlink = spec_badop, 90 .vop_abortop = spec_badop, 91 .vop_inactive = spec_inactive, 92 .vop_reclaim = nullop, 93 .vop_lock = vop_generic_lock, 94 .vop_unlock = vop_generic_unlock, 95 .vop_islocked = vop_generic_islocked, 96 .vop_bmap = vop_generic_bmap, 97 .vop_strategy = spec_strategy, 98 .vop_print = spec_print, 99 .vop_pathconf = spec_pathconf, 100 .vop_advlock = spec_advlock, 101 .vop_bwrite = vop_generic_bwrite, 102 }; 103 104 /* 105 * Open a special file. 106 */ 107 int 108 spec_open(void *v) 109 { 110 struct vop_open_args *ap = v; 111 struct proc *p = ap->a_p; 112 struct vnode *vp = ap->a_vp; 113 struct vnode *bvp; 114 dev_t bdev; 115 dev_t dev = (dev_t)vp->v_rdev; 116 int maj = major(dev); 117 int error; 118 119 /* 120 * Don't allow open if fs is mounted -nodev. 121 */ 122 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) 123 return (ENXIO); 124 125 switch (vp->v_type) { 126 127 case VCHR: 128 if ((u_int)maj >= nchrdev) 129 return (ENXIO); 130 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) { 131 /* 132 * When running in very secure mode, do not allow 133 * opens for writing of any disk character devices. 134 */ 135 if (securelevel >= 2 && cdevsw[maj].d_type == D_DISK) 136 return (EPERM); 137 /* 138 * When running in secure mode, do not allow opens 139 * for writing of /dev/mem, /dev/kmem, or character 140 * devices whose corresponding block devices are 141 * currently mounted. 142 */ 143 if (securelevel >= 1) { 144 if ((bdev = chrtoblk(dev)) != NODEV && 145 vfinddev(bdev, VBLK, &bvp) && 146 bvp->v_usecount > 0 && 147 (error = vfs_mountedon(bvp))) 148 return (error); 149 if (iskmemdev(dev)) 150 return (EPERM); 151 } 152 } 153 if (cdevsw[maj].d_type == D_TTY) 154 vp->v_flag |= VISTTY; 155 if (cdevsw[maj].d_flags & D_CLONE) 156 return (spec_open_clone(ap)); 157 VOP_UNLOCK(vp); 158 error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, p); 159 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 160 return (error); 161 162 case VBLK: 163 if ((u_int)maj >= nblkdev) 164 return (ENXIO); 165 /* 166 * When running in very secure mode, do not allow 167 * opens for writing of any disk block devices. 168 */ 169 if (securelevel >= 2 && ap->a_cred != FSCRED && 170 (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK) 171 return (EPERM); 172 /* 173 * Do not allow opens of block devices that are 174 * currently mounted. 175 */ 176 if ((error = vfs_mountedon(vp)) != 0) 177 return (error); 178 return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p)); 179 case VNON: 180 case VLNK: 181 case VDIR: 182 case VREG: 183 case VBAD: 184 case VFIFO: 185 case VSOCK: 186 break; 187 } 188 return (0); 189 } 190 191 /* 192 * Vnode op for read 193 */ 194 int 195 spec_read(void *v) 196 { 197 struct vop_read_args *ap = v; 198 struct vnode *vp = ap->a_vp; 199 struct uio *uio = ap->a_uio; 200 struct proc *p = uio->uio_procp; 201 struct buf *bp; 202 daddr_t bn, nextbn, bscale; 203 int bsize; 204 struct partinfo dpart; 205 size_t n; 206 int on, majordev; 207 int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *); 208 int error = 0; 209 210 #ifdef DIAGNOSTIC 211 if (uio->uio_rw != UIO_READ) 212 panic("spec_read mode"); 213 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 214 panic("spec_read proc"); 215 #endif 216 if (uio->uio_resid == 0) 217 return (0); 218 219 switch (vp->v_type) { 220 221 case VCHR: 222 VOP_UNLOCK(vp); 223 error = (*cdevsw[major(vp->v_rdev)].d_read) 224 (vp->v_rdev, uio, ap->a_ioflag); 225 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 226 return (error); 227 228 case VBLK: 229 if (uio->uio_offset < 0) 230 return (EINVAL); 231 bsize = BLKDEV_IOSIZE; 232 if ((majordev = major(vp->v_rdev)) < nblkdev && 233 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 234 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 235 u_int32_t frag = 236 DISKLABELV1_FFS_FRAG(dpart.part->p_fragblock); 237 u_int32_t fsize = 238 DISKLABELV1_FFS_FSIZE(dpart.part->p_fragblock); 239 if (dpart.part->p_fstype == FS_BSDFFS && frag != 0 && 240 fsize != 0) 241 bsize = frag * fsize; 242 } 243 bscale = btodb(bsize); 244 do { 245 bn = btodb(uio->uio_offset) & ~(bscale - 1); 246 on = uio->uio_offset % bsize; 247 n = ulmin((bsize - on), uio->uio_resid); 248 if (vp->v_lastr + bscale == bn) { 249 nextbn = bn + bscale; 250 error = breadn(vp, bn, bsize, &nextbn, &bsize, 251 1, &bp); 252 } else 253 error = bread(vp, bn, bsize, &bp); 254 vp->v_lastr = bn; 255 n = ulmin(n, bsize - bp->b_resid); 256 if (error) { 257 brelse(bp); 258 return (error); 259 } 260 error = uiomove((char *)bp->b_data + on, n, uio); 261 brelse(bp); 262 } while (error == 0 && uio->uio_resid > 0 && n != 0); 263 return (error); 264 265 default: 266 panic("spec_read type"); 267 } 268 /* NOTREACHED */ 269 } 270 271 int 272 spec_inactive(void *v) 273 { 274 struct vop_inactive_args *ap = v; 275 276 VOP_UNLOCK(ap->a_vp); 277 return (0); 278 } 279 280 /* 281 * Vnode op for write 282 */ 283 int 284 spec_write(void *v) 285 { 286 struct vop_write_args *ap = v; 287 struct vnode *vp = ap->a_vp; 288 struct uio *uio = ap->a_uio; 289 struct proc *p = uio->uio_procp; 290 struct buf *bp; 291 daddr_t bn, bscale; 292 int bsize; 293 struct partinfo dpart; 294 size_t n; 295 int on, majordev; 296 int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *); 297 int error = 0; 298 299 #ifdef DIAGNOSTIC 300 if (uio->uio_rw != UIO_WRITE) 301 panic("spec_write mode"); 302 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 303 panic("spec_write proc"); 304 #endif 305 306 switch (vp->v_type) { 307 308 case VCHR: 309 VOP_UNLOCK(vp); 310 error = (*cdevsw[major(vp->v_rdev)].d_write) 311 (vp->v_rdev, uio, ap->a_ioflag); 312 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 313 return (error); 314 315 case VBLK: 316 if (uio->uio_resid == 0) 317 return (0); 318 if (uio->uio_offset < 0) 319 return (EINVAL); 320 bsize = BLKDEV_IOSIZE; 321 if ((majordev = major(vp->v_rdev)) < nblkdev && 322 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 323 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 324 u_int32_t frag = 325 DISKLABELV1_FFS_FRAG(dpart.part->p_fragblock); 326 u_int32_t fsize = 327 DISKLABELV1_FFS_FSIZE(dpart.part->p_fragblock); 328 if (dpart.part->p_fstype == FS_BSDFFS && frag != 0 && 329 fsize != 0) 330 bsize = frag * fsize; 331 } 332 bscale = btodb(bsize); 333 do { 334 bn = btodb(uio->uio_offset) & ~(bscale - 1); 335 on = uio->uio_offset % bsize; 336 n = ulmin((bsize - on), uio->uio_resid); 337 error = bread(vp, bn, bsize, &bp); 338 n = ulmin(n, bsize - bp->b_resid); 339 if (error) { 340 brelse(bp); 341 return (error); 342 } 343 error = uiomove((char *)bp->b_data + on, n, uio); 344 if (n + on == bsize) 345 bawrite(bp); 346 else 347 bdwrite(bp); 348 } while (error == 0 && uio->uio_resid > 0 && n != 0); 349 return (error); 350 351 default: 352 panic("spec_write type"); 353 } 354 /* NOTREACHED */ 355 } 356 357 /* 358 * Device ioctl operation. 359 */ 360 int 361 spec_ioctl(void *v) 362 { 363 struct vop_ioctl_args *ap = v; 364 dev_t dev = ap->a_vp->v_rdev; 365 int maj = major(dev); 366 367 switch (ap->a_vp->v_type) { 368 369 case VCHR: 370 return ((*cdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 371 ap->a_fflag, ap->a_p)); 372 373 case VBLK: 374 return ((*bdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 375 ap->a_fflag, ap->a_p)); 376 377 default: 378 panic("spec_ioctl"); 379 /* NOTREACHED */ 380 } 381 } 382 383 int 384 spec_poll(void *v) 385 { 386 struct vop_poll_args *ap = v; 387 dev_t dev; 388 389 switch (ap->a_vp->v_type) { 390 391 default: 392 return (ap->a_events & 393 (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 394 395 case VCHR: 396 dev = ap->a_vp->v_rdev; 397 return (*cdevsw[major(dev)].d_poll)(dev, ap->a_events, ap->a_p); 398 } 399 } 400 int 401 spec_kqfilter(void *v) 402 { 403 struct vop_kqfilter_args *ap = v; 404 405 dev_t dev; 406 407 dev = ap->a_vp->v_rdev; 408 if (cdevsw[major(dev)].d_kqfilter) 409 return (*cdevsw[major(dev)].d_kqfilter)(dev, ap->a_kn); 410 return (EOPNOTSUPP); 411 } 412 413 /* 414 * Synch buffers associated with a block device 415 */ 416 int 417 spec_fsync(void *v) 418 { 419 struct vop_fsync_args *ap = v; 420 struct vnode *vp = ap->a_vp; 421 struct buf *bp; 422 struct buf *nbp; 423 int s; 424 425 if (vp->v_type == VCHR) 426 return (0); 427 /* 428 * Flush all dirty buffers associated with a block device. 429 */ 430 loop: 431 s = splbio(); 432 LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) { 433 if ((bp->b_flags & B_BUSY)) 434 continue; 435 if ((bp->b_flags & B_DELWRI) == 0) 436 panic("spec_fsync: not dirty"); 437 bremfree(bp); 438 buf_acquire(bp); 439 splx(s); 440 bawrite(bp); 441 goto loop; 442 } 443 if (ap->a_waitfor == MNT_WAIT) { 444 vwaitforio (vp, 0, "spec_fsync", 0); 445 446 #ifdef DIAGNOSTIC 447 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 448 splx(s); 449 vprint("spec_fsync: dirty", vp); 450 goto loop; 451 } 452 #endif 453 } 454 splx(s); 455 return (0); 456 } 457 458 int 459 spec_strategy(void *v) 460 { 461 struct vop_strategy_args *ap = v; 462 struct buf *bp = ap->a_bp; 463 int maj = major(bp->b_dev); 464 465 if (LIST_FIRST(&bp->b_dep) != NULL) 466 buf_start(bp); 467 468 (*bdevsw[maj].d_strategy)(bp); 469 return (0); 470 } 471 472 /* 473 * Device close routine 474 */ 475 int 476 spec_close(void *v) 477 { 478 struct vop_close_args *ap = v; 479 struct proc *p = ap->a_p; 480 struct vnode *vp = ap->a_vp; 481 dev_t dev = vp->v_rdev; 482 int (*devclose)(dev_t, int, int, struct proc *); 483 int mode, relock, error; 484 485 switch (vp->v_type) { 486 487 case VCHR: 488 /* 489 * Hack: a tty device that is a controlling terminal 490 * has a reference from the session structure. 491 * We cannot easily tell that a character device is 492 * a controlling terminal, unless it is the closing 493 * process' controlling terminal. In that case, 494 * if the reference count is 2 (this last descriptor 495 * plus the session), release the reference from the session. 496 */ 497 if (vcount(vp) == 2 && p != NULL && p->p_p->ps_pgrp && 498 vp == p->p_p->ps_pgrp->pg_session->s_ttyvp) { 499 vrele(vp); 500 p->p_p->ps_pgrp->pg_session->s_ttyvp = NULL; 501 } 502 if (cdevsw[major(dev)].d_flags & D_CLONE) 503 return (spec_close_clone(ap)); 504 /* 505 * If the vnode is locked, then we are in the midst 506 * of forcably closing the device, otherwise we only 507 * close on last reference. 508 */ 509 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) 510 return (0); 511 devclose = cdevsw[major(dev)].d_close; 512 mode = S_IFCHR; 513 break; 514 515 case VBLK: 516 /* 517 * On last close of a block device (that isn't mounted) 518 * we must invalidate any in core blocks, so that 519 * we can, for instance, change floppy disks. In order to do 520 * that, we must lock the vnode. If we are coming from 521 * vclean(), the vnode is already locked. 522 */ 523 if (!(vp->v_flag & VXLOCK)) 524 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 525 error = vinvalbuf(vp, V_SAVE, ap->a_cred, p, 0, 0); 526 if (!(vp->v_flag & VXLOCK)) 527 VOP_UNLOCK(vp); 528 if (error) 529 return (error); 530 /* 531 * We do not want to really close the device if it 532 * is still in use unless we are trying to close it 533 * forcibly. Since every use (buffer, vnode, swap, cmap) 534 * holds a reference to the vnode, and because we mark 535 * any other vnodes that alias this device, when the 536 * sum of the reference counts on all the aliased 537 * vnodes descends to one, we are on last close. 538 */ 539 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) 540 return (0); 541 devclose = bdevsw[major(dev)].d_close; 542 mode = S_IFBLK; 543 break; 544 545 default: 546 panic("spec_close: not special"); 547 } 548 549 /* release lock if held and this isn't coming from vclean() */ 550 relock = VOP_ISLOCKED(vp) && !(vp->v_flag & VXLOCK); 551 if (relock) 552 VOP_UNLOCK(vp); 553 error = (*devclose)(dev, ap->a_fflag, mode, p); 554 if (relock) 555 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 556 return (error); 557 } 558 559 int 560 spec_getattr(void *v) 561 { 562 struct vop_getattr_args *ap = v; 563 struct vnode *vp = ap->a_vp; 564 int error; 565 566 if (!(vp->v_flag & VCLONE)) 567 return (EBADF); 568 569 vn_lock(vp->v_specparent, LK_EXCLUSIVE|LK_RETRY); 570 error = VOP_GETATTR(vp->v_specparent, ap->a_vap, ap->a_cred, ap->a_p); 571 VOP_UNLOCK(vp->v_specparent); 572 573 return (error); 574 } 575 576 int 577 spec_setattr(void *v) 578 { 579 struct vop_getattr_args *ap = v; 580 struct proc *p = ap->a_p; 581 struct vnode *vp = ap->a_vp; 582 int error; 583 584 if (!(vp->v_flag & VCLONE)) 585 return (EBADF); 586 587 vn_lock(vp->v_specparent, LK_EXCLUSIVE|LK_RETRY); 588 error = VOP_SETATTR(vp->v_specparent, ap->a_vap, ap->a_cred, p); 589 VOP_UNLOCK(vp->v_specparent); 590 591 return (error); 592 } 593 594 int 595 spec_access(void *v) 596 { 597 struct vop_access_args *ap = v; 598 struct vnode *vp = ap->a_vp; 599 int error; 600 601 if (!(vp->v_flag & VCLONE)) 602 return (EBADF); 603 604 vn_lock(vp->v_specparent, LK_EXCLUSIVE|LK_RETRY); 605 error = VOP_ACCESS(vp->v_specparent, ap->a_mode, ap->a_cred, ap->a_p); 606 VOP_UNLOCK(vp->v_specparent); 607 608 return (error); 609 } 610 611 /* 612 * Print out the contents of a special device vnode. 613 */ 614 int 615 spec_print(void *v) 616 { 617 struct vop_print_args *ap = v; 618 619 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev), 620 minor(ap->a_vp->v_rdev)); 621 return 0; 622 } 623 624 /* 625 * Return POSIX pathconf information applicable to special devices. 626 */ 627 int 628 spec_pathconf(void *v) 629 { 630 struct vop_pathconf_args *ap = v; 631 int error = 0; 632 633 switch (ap->a_name) { 634 case _PC_LINK_MAX: 635 *ap->a_retval = LINK_MAX; 636 break; 637 case _PC_MAX_CANON: 638 *ap->a_retval = MAX_CANON; 639 break; 640 case _PC_MAX_INPUT: 641 *ap->a_retval = MAX_INPUT; 642 break; 643 case _PC_CHOWN_RESTRICTED: 644 *ap->a_retval = 1; 645 break; 646 case _PC_VDISABLE: 647 *ap->a_retval = _POSIX_VDISABLE; 648 break; 649 case _PC_TIMESTAMP_RESOLUTION: 650 *ap->a_retval = 1; 651 break; 652 default: 653 error = EINVAL; 654 break; 655 } 656 657 return (error); 658 } 659 660 /* 661 * Special device advisory byte-level locks. 662 */ 663 int 664 spec_advlock(void *v) 665 { 666 struct vop_advlock_args *ap = v; 667 struct vnode *vp = ap->a_vp; 668 669 return (lf_advlock(&vp->v_speclockf, (off_t)0, ap->a_id, 670 ap->a_op, ap->a_fl, ap->a_flags)); 671 } 672 673 /* 674 * Special device bad operation 675 */ 676 int 677 spec_badop(void *v) 678 { 679 680 panic("spec_badop called"); 681 /* NOTREACHED */ 682 } 683 684 /* 685 * Copyright (c) 2006 Pedro Martelletto <pedro@ambientworks.net> 686 * Copyright (c) 2006 Thordur Bjornsson <thib@openbsd.org> 687 * 688 * Permission to use, copy, modify, and distribute this software for any 689 * purpose with or without fee is hereby granted, provided that the above 690 * copyright notice and this permission notice appear in all copies. 691 * 692 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 693 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 694 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 695 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 696 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 697 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 698 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 699 */ 700 701 #ifdef CLONE_DEBUG 702 #define DNPRINTF(m...) do { printf(m); } while (0) 703 #else 704 #define DNPRINTF(m...) /* nothing */ 705 #endif 706 707 int 708 spec_open_clone(struct vop_open_args *ap) 709 { 710 struct vnode *cvp, *vp = ap->a_vp; 711 struct cloneinfo *cip; 712 int error, i; 713 714 DNPRINTF("cloning vnode\n"); 715 716 if (minor(vp->v_rdev) >= (1 << CLONE_SHIFT)) 717 return (ENXIO); 718 719 for (i = 1; i < CLONE_MAPSZ * NBBY; i++) 720 if (isclr(vp->v_specbitmap, i)) { 721 setbit(vp->v_specbitmap, i); 722 break; 723 } 724 725 if (i == CLONE_MAPSZ * NBBY) 726 return (EBUSY); /* too many open instances */ 727 728 error = cdevvp(makedev(major(vp->v_rdev), 729 (i << CLONE_SHIFT) | minor(vp->v_rdev)), &cvp); 730 if (error) { 731 clrbit(vp->v_specbitmap, i); 732 return (error); /* out of vnodes */ 733 } 734 735 VOP_UNLOCK(vp); 736 737 error = cdevsw[major(vp->v_rdev)].d_open(cvp->v_rdev, ap->a_mode, 738 S_IFCHR, ap->a_p); 739 740 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 741 742 if (error) { 743 vput(cvp); 744 clrbit(vp->v_specbitmap, i); 745 return (error); /* device open failed */ 746 } 747 748 cvp->v_flag |= VCLONE; 749 750 cip = malloc(sizeof(struct cloneinfo), M_TEMP, M_WAITOK); 751 cip->ci_data = vp->v_data; 752 cip->ci_vp = cvp; 753 754 cvp->v_specparent = vp; 755 vp->v_flag |= VCLONED; 756 vp->v_data = cip; 757 758 DNPRINTF("clone of vnode %p is vnode %p\n", vp, cvp); 759 760 return (0); /* device cloned */ 761 } 762 763 int 764 spec_close_clone(struct vop_close_args *ap) 765 { 766 struct vnode *pvp, *vp = ap->a_vp; 767 int error; 768 769 error = cdevsw[major(vp->v_rdev)].d_close(vp->v_rdev, ap->a_fflag, 770 S_IFCHR, ap->a_p); 771 if (error) 772 return (error); /* device close failed */ 773 774 pvp = vp->v_specparent; /* get parent device */ 775 clrbit(pvp->v_specbitmap, minor(vp->v_rdev) >> CLONE_SHIFT); 776 vrele(pvp); 777 778 return (0); /* clone closed */ 779 } 780