1 /* $OpenBSD: spec_vnops.c,v 1.105 2021/10/02 08:51:41 semarie Exp $ */ 2 /* $NetBSD: spec_vnops.c,v 1.29 1996/04/22 01:42:38 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)spec_vnops.c 8.8 (Berkeley) 11/21/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/proc.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/conf.h> 40 #include <sys/buf.h> 41 #include <sys/mount.h> 42 #include <sys/namei.h> 43 #include <sys/vnode.h> 44 #include <sys/lock.h> 45 #include <sys/stat.h> 46 #include <sys/errno.h> 47 #include <sys/ioctl.h> 48 #include <sys/fcntl.h> 49 #include <sys/disklabel.h> 50 #include <sys/lockf.h> 51 #include <sys/poll.h> 52 #include <sys/dkio.h> 53 #include <sys/malloc.h> 54 #include <sys/specdev.h> 55 #include <sys/unistd.h> 56 57 #include <uvm/uvm_extern.h> 58 59 #define v_lastr v_specinfo->si_lastr 60 61 int spec_open_clone(struct vop_open_args *); 62 63 struct vnodechain speclisth[SPECHSZ]; 64 65 const struct vops spec_vops = { 66 .vop_lookup = vop_generic_lookup, 67 .vop_create = vop_generic_badop, 68 .vop_mknod = vop_generic_badop, 69 .vop_open = spec_open, 70 .vop_close = spec_close, 71 .vop_access = spec_access, 72 .vop_getattr = spec_getattr, 73 .vop_setattr = spec_setattr, 74 .vop_read = spec_read, 75 .vop_write = spec_write, 76 .vop_ioctl = spec_ioctl, 77 .vop_poll = spec_poll, 78 .vop_kqfilter = spec_kqfilter, 79 .vop_revoke = vop_generic_revoke, 80 .vop_fsync = spec_fsync, 81 .vop_remove = vop_generic_badop, 82 .vop_link = vop_generic_badop, 83 .vop_rename = vop_generic_badop, 84 .vop_mkdir = vop_generic_badop, 85 .vop_rmdir = vop_generic_badop, 86 .vop_symlink = vop_generic_badop, 87 .vop_readdir = vop_generic_badop, 88 .vop_readlink = vop_generic_badop, 89 .vop_abortop = vop_generic_badop, 90 .vop_inactive = spec_inactive, 91 .vop_reclaim = nullop, 92 .vop_lock = vop_generic_lock, 93 .vop_unlock = vop_generic_unlock, 94 .vop_islocked = vop_generic_islocked, 95 .vop_bmap = vop_generic_bmap, 96 .vop_strategy = spec_strategy, 97 .vop_print = spec_print, 98 .vop_pathconf = spec_pathconf, 99 .vop_advlock = spec_advlock, 100 .vop_bwrite = vop_generic_bwrite, 101 }; 102 103 /* 104 * Open a special file. 105 */ 106 int 107 spec_open(void *v) 108 { 109 struct vop_open_args *ap = v; 110 struct proc *p = ap->a_p; 111 struct vnode *vp = ap->a_vp; 112 struct vnode *bvp; 113 dev_t bdev; 114 dev_t dev = (dev_t)vp->v_rdev; 115 int maj = major(dev); 116 int error; 117 118 /* 119 * Don't allow open if fs is mounted -nodev. 120 */ 121 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) 122 return (ENXIO); 123 124 switch (vp->v_type) { 125 126 case VCHR: 127 if ((u_int)maj >= nchrdev) 128 return (ENXIO); 129 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) { 130 /* 131 * When running in very secure mode, do not allow 132 * opens for writing of any disk character devices. 133 */ 134 if (securelevel >= 2 && cdevsw[maj].d_type == D_DISK) 135 return (EPERM); 136 /* 137 * When running in secure mode, do not allow opens 138 * for writing of /dev/mem, /dev/kmem, or character 139 * devices whose corresponding block devices are 140 * currently mounted. 141 */ 142 if (securelevel >= 1) { 143 if ((bdev = chrtoblk(dev)) != NODEV && 144 vfinddev(bdev, VBLK, &bvp) && 145 bvp->v_usecount > 0 && 146 (error = vfs_mountedon(bvp))) 147 return (error); 148 if (iskmemdev(dev)) 149 return (EPERM); 150 } 151 } 152 if (cdevsw[maj].d_type == D_TTY) 153 vp->v_flag |= VISTTY; 154 if (cdevsw[maj].d_flags & D_CLONE) 155 return (spec_open_clone(ap)); 156 VOP_UNLOCK(vp); 157 error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, p); 158 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 159 return (error); 160 161 case VBLK: 162 if ((u_int)maj >= nblkdev) 163 return (ENXIO); 164 /* 165 * When running in very secure mode, do not allow 166 * opens for writing of any disk block devices. 167 */ 168 if (securelevel >= 2 && ap->a_cred != FSCRED && 169 (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK) 170 return (EPERM); 171 /* 172 * Do not allow opens of block devices that are 173 * currently mounted. 174 */ 175 if ((error = vfs_mountedon(vp)) != 0) 176 return (error); 177 return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p)); 178 case VNON: 179 case VLNK: 180 case VDIR: 181 case VREG: 182 case VBAD: 183 case VFIFO: 184 case VSOCK: 185 break; 186 } 187 return (0); 188 } 189 190 /* 191 * Vnode op for read 192 */ 193 int 194 spec_read(void *v) 195 { 196 struct vop_read_args *ap = v; 197 struct vnode *vp = ap->a_vp; 198 struct uio *uio = ap->a_uio; 199 struct proc *p = uio->uio_procp; 200 struct buf *bp; 201 daddr_t bn, nextbn, bscale; 202 int bsize; 203 struct partinfo dpart; 204 size_t n; 205 int on, majordev; 206 int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *); 207 int error = 0; 208 209 #ifdef DIAGNOSTIC 210 if (uio->uio_rw != UIO_READ) 211 panic("spec_read mode"); 212 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 213 panic("spec_read proc"); 214 #endif 215 if (uio->uio_resid == 0) 216 return (0); 217 218 switch (vp->v_type) { 219 220 case VCHR: 221 VOP_UNLOCK(vp); 222 error = (*cdevsw[major(vp->v_rdev)].d_read) 223 (vp->v_rdev, uio, ap->a_ioflag); 224 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 225 return (error); 226 227 case VBLK: 228 if (uio->uio_offset < 0) 229 return (EINVAL); 230 bsize = BLKDEV_IOSIZE; 231 if ((majordev = major(vp->v_rdev)) < nblkdev && 232 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 233 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 234 u_int32_t frag = 235 DISKLABELV1_FFS_FRAG(dpart.part->p_fragblock); 236 u_int32_t fsize = 237 DISKLABELV1_FFS_FSIZE(dpart.part->p_fragblock); 238 if (dpart.part->p_fstype == FS_BSDFFS && frag != 0 && 239 fsize != 0) 240 bsize = frag * fsize; 241 } 242 bscale = btodb(bsize); 243 do { 244 bn = btodb(uio->uio_offset) & ~(bscale - 1); 245 on = uio->uio_offset % bsize; 246 n = ulmin((bsize - on), uio->uio_resid); 247 if (vp->v_lastr + bscale == bn) { 248 nextbn = bn + bscale; 249 error = breadn(vp, bn, bsize, &nextbn, &bsize, 250 1, &bp); 251 } else 252 error = bread(vp, bn, bsize, &bp); 253 vp->v_lastr = bn; 254 n = ulmin(n, bsize - bp->b_resid); 255 if (error) { 256 brelse(bp); 257 return (error); 258 } 259 error = uiomove((char *)bp->b_data + on, n, uio); 260 brelse(bp); 261 } while (error == 0 && uio->uio_resid > 0 && n != 0); 262 return (error); 263 264 default: 265 panic("spec_read type"); 266 } 267 /* NOTREACHED */ 268 } 269 270 int 271 spec_inactive(void *v) 272 { 273 struct vop_inactive_args *ap = v; 274 275 VOP_UNLOCK(ap->a_vp); 276 return (0); 277 } 278 279 /* 280 * Vnode op for write 281 */ 282 int 283 spec_write(void *v) 284 { 285 struct vop_write_args *ap = v; 286 struct vnode *vp = ap->a_vp; 287 struct uio *uio = ap->a_uio; 288 struct proc *p = uio->uio_procp; 289 struct buf *bp; 290 daddr_t bn, bscale; 291 int bsize; 292 struct partinfo dpart; 293 size_t n; 294 int on, majordev; 295 int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *); 296 int error = 0; 297 298 #ifdef DIAGNOSTIC 299 if (uio->uio_rw != UIO_WRITE) 300 panic("spec_write mode"); 301 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 302 panic("spec_write proc"); 303 #endif 304 305 switch (vp->v_type) { 306 307 case VCHR: 308 VOP_UNLOCK(vp); 309 error = (*cdevsw[major(vp->v_rdev)].d_write) 310 (vp->v_rdev, uio, ap->a_ioflag); 311 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 312 return (error); 313 314 case VBLK: 315 if (uio->uio_resid == 0) 316 return (0); 317 if (uio->uio_offset < 0) 318 return (EINVAL); 319 bsize = BLKDEV_IOSIZE; 320 if ((majordev = major(vp->v_rdev)) < nblkdev && 321 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 322 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 323 u_int32_t frag = 324 DISKLABELV1_FFS_FRAG(dpart.part->p_fragblock); 325 u_int32_t fsize = 326 DISKLABELV1_FFS_FSIZE(dpart.part->p_fragblock); 327 if (dpart.part->p_fstype == FS_BSDFFS && frag != 0 && 328 fsize != 0) 329 bsize = frag * fsize; 330 } 331 bscale = btodb(bsize); 332 do { 333 bn = btodb(uio->uio_offset) & ~(bscale - 1); 334 on = uio->uio_offset % bsize; 335 n = ulmin((bsize - on), uio->uio_resid); 336 error = bread(vp, bn, bsize, &bp); 337 n = ulmin(n, bsize - bp->b_resid); 338 if (error) { 339 brelse(bp); 340 return (error); 341 } 342 error = uiomove((char *)bp->b_data + on, n, uio); 343 if (n + on == bsize) 344 bawrite(bp); 345 else 346 bdwrite(bp); 347 } while (error == 0 && uio->uio_resid > 0 && n != 0); 348 return (error); 349 350 default: 351 panic("spec_write type"); 352 } 353 /* NOTREACHED */ 354 } 355 356 /* 357 * Device ioctl operation. 358 */ 359 int 360 spec_ioctl(void *v) 361 { 362 struct vop_ioctl_args *ap = v; 363 dev_t dev = ap->a_vp->v_rdev; 364 int maj = major(dev); 365 366 switch (ap->a_vp->v_type) { 367 368 case VCHR: 369 return ((*cdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 370 ap->a_fflag, ap->a_p)); 371 372 case VBLK: 373 return ((*bdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 374 ap->a_fflag, ap->a_p)); 375 376 default: 377 panic("spec_ioctl"); 378 /* NOTREACHED */ 379 } 380 } 381 382 int 383 spec_poll(void *v) 384 { 385 struct vop_poll_args *ap = v; 386 dev_t dev; 387 388 switch (ap->a_vp->v_type) { 389 default: 390 return (ap->a_events & 391 (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 392 case VCHR: 393 dev = ap->a_vp->v_rdev; 394 return (*cdevsw[major(dev)].d_poll)(dev, ap->a_events, ap->a_p); 395 } 396 } 397 int 398 spec_kqfilter(void *v) 399 { 400 struct vop_kqfilter_args *ap = v; 401 dev_t dev; 402 403 dev = ap->a_vp->v_rdev; 404 405 switch (ap->a_vp->v_type) { 406 default: 407 if (ap->a_kn->kn_flags & __EV_POLL) 408 return seltrue_kqfilter(dev, ap->a_kn); 409 break; 410 case VCHR: 411 if (cdevsw[major(dev)].d_kqfilter) 412 return (*cdevsw[major(dev)].d_kqfilter)(dev, ap->a_kn); 413 } 414 return (EOPNOTSUPP); 415 } 416 417 /* 418 * Synch buffers associated with a block device 419 */ 420 int 421 spec_fsync(void *v) 422 { 423 struct vop_fsync_args *ap = v; 424 struct vnode *vp = ap->a_vp; 425 struct buf *bp; 426 struct buf *nbp; 427 int s; 428 429 if (vp->v_type == VCHR) 430 return (0); 431 /* 432 * Flush all dirty buffers associated with a block device. 433 */ 434 loop: 435 s = splbio(); 436 LIST_FOREACH_SAFE(bp, &vp->v_dirtyblkhd, b_vnbufs, nbp) { 437 if ((bp->b_flags & B_BUSY)) 438 continue; 439 if ((bp->b_flags & B_DELWRI) == 0) 440 panic("spec_fsync: not dirty"); 441 bremfree(bp); 442 buf_acquire(bp); 443 splx(s); 444 bawrite(bp); 445 goto loop; 446 } 447 if (ap->a_waitfor == MNT_WAIT) { 448 vwaitforio (vp, 0, "spec_fsync", INFSLP); 449 450 #ifdef DIAGNOSTIC 451 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 452 splx(s); 453 vprint("spec_fsync: dirty", vp); 454 goto loop; 455 } 456 #endif 457 } 458 splx(s); 459 return (0); 460 } 461 462 int 463 spec_strategy(void *v) 464 { 465 struct vop_strategy_args *ap = v; 466 struct buf *bp = ap->a_bp; 467 int maj = major(bp->b_dev); 468 469 if (LIST_FIRST(&bp->b_dep) != NULL) 470 buf_start(bp); 471 472 (*bdevsw[maj].d_strategy)(bp); 473 return (0); 474 } 475 476 /* 477 * Device close routine 478 */ 479 int 480 spec_close(void *v) 481 { 482 struct vop_close_args *ap = v; 483 struct proc *p = ap->a_p; 484 struct vnode *vp = ap->a_vp; 485 dev_t dev = vp->v_rdev; 486 int (*devclose)(dev_t, int, int, struct proc *); 487 int mode, relock, xlocked, error; 488 int clone = 0; 489 490 switch (vp->v_type) { 491 492 case VCHR: 493 /* 494 * Hack: a tty device that is a controlling terminal 495 * has a reference from the session structure. 496 * We cannot easily tell that a character device is 497 * a controlling terminal, unless it is the closing 498 * process' controlling terminal. In that case, 499 * if the reference count is 2 (this last descriptor 500 * plus the session), release the reference from the session. 501 */ 502 if (vcount(vp) == 2 && p != NULL && p->p_p->ps_pgrp && 503 vp == p->p_p->ps_pgrp->pg_session->s_ttyvp) { 504 vrele(vp); 505 p->p_p->ps_pgrp->pg_session->s_ttyvp = NULL; 506 } 507 if (cdevsw[major(dev)].d_flags & D_CLONE) { 508 clone = 1; 509 } else { 510 /* 511 * If the vnode is locked, then we are in the midst 512 * of forcibly closing the device, otherwise we only 513 * close on last reference. 514 */ 515 mtx_enter(&vnode_mtx); 516 xlocked = (vp->v_lflag & VXLOCK); 517 mtx_leave(&vnode_mtx); 518 if (vcount(vp) > 1 && !xlocked) 519 return (0); 520 } 521 devclose = cdevsw[major(dev)].d_close; 522 mode = S_IFCHR; 523 break; 524 525 case VBLK: 526 /* 527 * On last close of a block device (that isn't mounted) 528 * we must invalidate any in core blocks, so that 529 * we can, for instance, change floppy disks. In order to do 530 * that, we must lock the vnode. If we are coming from 531 * vclean(), the vnode is already locked. 532 */ 533 mtx_enter(&vnode_mtx); 534 xlocked = (vp->v_lflag & VXLOCK); 535 mtx_leave(&vnode_mtx); 536 if (!xlocked) 537 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 538 error = vinvalbuf(vp, V_SAVE, ap->a_cred, p, 0, INFSLP); 539 if (!xlocked) 540 VOP_UNLOCK(vp); 541 if (error) 542 return (error); 543 /* 544 * We do not want to really close the device if it 545 * is still in use unless we are trying to close it 546 * forcibly. Since every use (buffer, vnode, swap, cmap) 547 * holds a reference to the vnode, and because we mark 548 * any other vnodes that alias this device, when the 549 * sum of the reference counts on all the aliased 550 * vnodes descends to one, we are on last close. 551 */ 552 mtx_enter(&vnode_mtx); 553 xlocked = (vp->v_lflag & VXLOCK); 554 mtx_leave(&vnode_mtx); 555 if (vcount(vp) > 1 && !xlocked) 556 return (0); 557 devclose = bdevsw[major(dev)].d_close; 558 mode = S_IFBLK; 559 break; 560 561 default: 562 panic("spec_close: not special"); 563 } 564 565 /* release lock if held and this isn't coming from vclean() */ 566 mtx_enter(&vnode_mtx); 567 xlocked = (vp->v_lflag & VXLOCK); 568 mtx_leave(&vnode_mtx); 569 relock = VOP_ISLOCKED(vp) && !xlocked; 570 if (relock) 571 VOP_UNLOCK(vp); 572 error = (*devclose)(dev, ap->a_fflag, mode, p); 573 if (relock) 574 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 575 576 if (error == 0 && clone) { 577 struct vnode *pvp; 578 579 pvp = vp->v_specparent; /* get parent device */ 580 clrbit(pvp->v_specbitmap, minor(dev) >> CLONE_SHIFT); 581 vrele(pvp); 582 } 583 584 return (error); 585 } 586 587 int 588 spec_getattr(void *v) 589 { 590 struct vop_getattr_args *ap = v; 591 struct vnode *vp = ap->a_vp; 592 int error; 593 594 if (!(vp->v_flag & VCLONE)) 595 return (EBADF); 596 597 vn_lock(vp->v_specparent, LK_EXCLUSIVE|LK_RETRY); 598 error = VOP_GETATTR(vp->v_specparent, ap->a_vap, ap->a_cred, ap->a_p); 599 VOP_UNLOCK(vp->v_specparent); 600 601 return (error); 602 } 603 604 int 605 spec_setattr(void *v) 606 { 607 struct vop_getattr_args *ap = v; 608 struct proc *p = ap->a_p; 609 struct vnode *vp = ap->a_vp; 610 int error; 611 612 if (!(vp->v_flag & VCLONE)) 613 return (EBADF); 614 615 vn_lock(vp->v_specparent, LK_EXCLUSIVE|LK_RETRY); 616 error = VOP_SETATTR(vp->v_specparent, ap->a_vap, ap->a_cred, p); 617 VOP_UNLOCK(vp->v_specparent); 618 619 return (error); 620 } 621 622 int 623 spec_access(void *v) 624 { 625 struct vop_access_args *ap = v; 626 struct vnode *vp = ap->a_vp; 627 int error; 628 629 if (!(vp->v_flag & VCLONE)) 630 return (EBADF); 631 632 vn_lock(vp->v_specparent, LK_EXCLUSIVE|LK_RETRY); 633 error = VOP_ACCESS(vp->v_specparent, ap->a_mode, ap->a_cred, ap->a_p); 634 VOP_UNLOCK(vp->v_specparent); 635 636 return (error); 637 } 638 639 /* 640 * Print out the contents of a special device vnode. 641 */ 642 int 643 spec_print(void *v) 644 { 645 struct vop_print_args *ap = v; 646 647 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev), 648 minor(ap->a_vp->v_rdev)); 649 return 0; 650 } 651 652 /* 653 * Return POSIX pathconf information applicable to special devices. 654 */ 655 int 656 spec_pathconf(void *v) 657 { 658 struct vop_pathconf_args *ap = v; 659 int error = 0; 660 661 switch (ap->a_name) { 662 case _PC_LINK_MAX: 663 *ap->a_retval = LINK_MAX; 664 break; 665 case _PC_MAX_CANON: 666 *ap->a_retval = MAX_CANON; 667 break; 668 case _PC_MAX_INPUT: 669 *ap->a_retval = MAX_INPUT; 670 break; 671 case _PC_CHOWN_RESTRICTED: 672 *ap->a_retval = 1; 673 break; 674 case _PC_VDISABLE: 675 *ap->a_retval = _POSIX_VDISABLE; 676 break; 677 case _PC_TIMESTAMP_RESOLUTION: 678 *ap->a_retval = 1; 679 break; 680 default: 681 error = EINVAL; 682 break; 683 } 684 685 return (error); 686 } 687 688 /* 689 * Special device advisory byte-level locks. 690 */ 691 int 692 spec_advlock(void *v) 693 { 694 struct vop_advlock_args *ap = v; 695 struct vnode *vp = ap->a_vp; 696 697 return (lf_advlock(&vp->v_speclockf, (off_t)0, ap->a_id, 698 ap->a_op, ap->a_fl, ap->a_flags)); 699 } 700 701 /* 702 * Copyright (c) 2006 Pedro Martelletto <pedro@ambientworks.net> 703 * Copyright (c) 2006 Thordur Bjornsson <thib@openbsd.org> 704 * 705 * Permission to use, copy, modify, and distribute this software for any 706 * purpose with or without fee is hereby granted, provided that the above 707 * copyright notice and this permission notice appear in all copies. 708 * 709 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 710 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 711 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 712 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 713 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 714 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 715 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 716 */ 717 718 #ifdef CLONE_DEBUG 719 #define DNPRINTF(m...) do { printf(m); } while (0) 720 #else 721 #define DNPRINTF(m...) /* nothing */ 722 #endif 723 724 int 725 spec_open_clone(struct vop_open_args *ap) 726 { 727 struct vnode *cvp, *vp = ap->a_vp; 728 struct cloneinfo *cip; 729 int error, i; 730 731 DNPRINTF("cloning vnode\n"); 732 733 if (minor(vp->v_rdev) >= (1 << CLONE_SHIFT)) 734 return (ENXIO); 735 736 for (i = 1; i < CLONE_MAPSZ * NBBY; i++) 737 if (isclr(vp->v_specbitmap, i)) { 738 setbit(vp->v_specbitmap, i); 739 break; 740 } 741 742 if (i == CLONE_MAPSZ * NBBY) 743 return (EBUSY); /* too many open instances */ 744 745 error = cdevvp(makedev(major(vp->v_rdev), 746 (i << CLONE_SHIFT) | minor(vp->v_rdev)), &cvp); 747 if (error) { 748 clrbit(vp->v_specbitmap, i); 749 return (error); /* out of vnodes */ 750 } 751 752 VOP_UNLOCK(vp); 753 754 error = cdevsw[major(vp->v_rdev)].d_open(cvp->v_rdev, ap->a_mode, 755 S_IFCHR, ap->a_p); 756 757 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 758 759 if (error) { 760 vput(cvp); 761 clrbit(vp->v_specbitmap, i); 762 return (error); /* device open failed */ 763 } 764 765 cvp->v_flag |= VCLONE; 766 767 cip = malloc(sizeof(struct cloneinfo), M_TEMP, M_WAITOK); 768 cip->ci_data = vp->v_data; 769 cip->ci_vp = cvp; 770 771 cvp->v_specparent = vp; 772 vp->v_flag |= VCLONED; 773 vp->v_data = cip; 774 775 DNPRINTF("clone of vnode %p is vnode %p\n", vp, cvp); 776 777 return (0); /* device cloned */ 778 } 779