1 /* $OpenBSD: spec_vnops.c,v 1.68 2011/07/30 10:26:03 jsing Exp $ */ 2 /* $NetBSD: spec_vnops.c,v 1.29 1996/04/22 01:42:38 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)spec_vnops.c 8.8 (Berkeley) 11/21/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/proc.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/conf.h> 40 #include <sys/buf.h> 41 #include <sys/mount.h> 42 #include <sys/namei.h> 43 #include <sys/vnode.h> 44 #include <sys/stat.h> 45 #include <sys/errno.h> 46 #include <sys/ioctl.h> 47 #include <sys/file.h> 48 #include <sys/disklabel.h> 49 #include <sys/lockf.h> 50 #include <sys/poll.h> 51 #include <sys/dkio.h> 52 #include <sys/malloc.h> 53 #include <sys/specdev.h> 54 55 #define v_lastr v_specinfo->si_lastr 56 57 int spec_open_clone(struct vop_open_args *); 58 int spec_close_clone(struct vop_close_args *); 59 60 struct vnode *speclisth[SPECHSZ]; 61 62 struct vops spec_vops = { 63 .vop_lookup = vop_generic_lookup, 64 .vop_create = spec_badop, 65 .vop_mknod = spec_badop, 66 .vop_open = spec_open, 67 .vop_close = spec_close, 68 .vop_access = spec_access, 69 .vop_getattr = spec_getattr, 70 .vop_setattr = spec_setattr, 71 .vop_read = spec_read, 72 .vop_write = spec_write, 73 .vop_ioctl = spec_ioctl, 74 .vop_poll = spec_poll, 75 .vop_kqfilter = spec_kqfilter, 76 .vop_revoke = vop_generic_revoke, 77 .vop_fsync = spec_fsync, 78 .vop_remove = spec_badop, 79 .vop_link = spec_badop, 80 .vop_rename = spec_badop, 81 .vop_mkdir = spec_badop, 82 .vop_rmdir = spec_badop, 83 .vop_symlink = spec_badop, 84 .vop_readdir = spec_badop, 85 .vop_readlink = spec_badop, 86 .vop_abortop = spec_badop, 87 .vop_inactive = spec_inactive, 88 .vop_reclaim = nullop, 89 .vop_lock = vop_generic_lock, 90 .vop_unlock = vop_generic_unlock, 91 .vop_islocked = vop_generic_islocked, 92 .vop_bmap = vop_generic_bmap, 93 .vop_strategy = spec_strategy, 94 .vop_print = spec_print, 95 .vop_pathconf = spec_pathconf, 96 .vop_advlock = spec_advlock, 97 .vop_bwrite = vop_generic_bwrite, 98 }; 99 100 /* 101 * Open a special file. 102 */ 103 int 104 spec_open(void *v) 105 { 106 struct vop_open_args *ap = v; 107 struct proc *p = ap->a_p; 108 struct vnode *vp = ap->a_vp; 109 struct vnode *bvp; 110 dev_t bdev; 111 dev_t dev = (dev_t)vp->v_rdev; 112 int maj = major(dev); 113 int error; 114 115 /* 116 * Don't allow open if fs is mounted -nodev. 117 */ 118 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) 119 return (ENXIO); 120 121 switch (vp->v_type) { 122 123 case VCHR: 124 if ((u_int)maj >= nchrdev) 125 return (ENXIO); 126 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) { 127 /* 128 * When running in very secure mode, do not allow 129 * opens for writing of any disk character devices. 130 */ 131 if (securelevel >= 2 && cdevsw[maj].d_type == D_DISK) 132 return (EPERM); 133 /* 134 * When running in secure mode, do not allow opens 135 * for writing of /dev/mem, /dev/kmem, or character 136 * devices whose corresponding block devices are 137 * currently mounted. 138 */ 139 if (securelevel >= 1) { 140 if ((bdev = chrtoblk(dev)) != NODEV && 141 vfinddev(bdev, VBLK, &bvp) && 142 bvp->v_usecount > 0 && 143 (error = vfs_mountedon(bvp))) 144 return (error); 145 if (iskmemdev(dev)) 146 return (EPERM); 147 } 148 } 149 if (cdevsw[maj].d_type == D_TTY) 150 vp->v_flag |= VISTTY; 151 if (cdevsw[maj].d_flags & D_CLONE) 152 return (spec_open_clone(ap)); 153 VOP_UNLOCK(vp, 0, p); 154 error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, ap->a_p); 155 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 156 return (error); 157 158 case VBLK: 159 if ((u_int)maj >= nblkdev) 160 return (ENXIO); 161 /* 162 * When running in very secure mode, do not allow 163 * opens for writing of any disk block devices. 164 */ 165 if (securelevel >= 2 && ap->a_cred != FSCRED && 166 (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK) 167 return (EPERM); 168 /* 169 * Do not allow opens of block devices that are 170 * currently mounted. 171 */ 172 if ((error = vfs_mountedon(vp)) != 0) 173 return (error); 174 return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, ap->a_p)); 175 case VNON: 176 case VLNK: 177 case VDIR: 178 case VREG: 179 case VBAD: 180 case VFIFO: 181 case VSOCK: 182 break; 183 } 184 return (0); 185 } 186 187 /* 188 * Vnode op for read 189 */ 190 int 191 spec_read(void *v) 192 { 193 struct vop_read_args *ap = v; 194 struct vnode *vp = ap->a_vp; 195 struct uio *uio = ap->a_uio; 196 struct proc *p = uio->uio_procp; 197 struct buf *bp; 198 daddr64_t bn, nextbn, bscale; 199 int bsize; 200 struct partinfo dpart; 201 int n, on, majordev; 202 int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *); 203 int error = 0; 204 205 #ifdef DIAGNOSTIC 206 if (uio->uio_rw != UIO_READ) 207 panic("spec_read mode"); 208 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 209 panic("spec_read proc"); 210 #endif 211 if (uio->uio_resid == 0) 212 return (0); 213 214 switch (vp->v_type) { 215 216 case VCHR: 217 VOP_UNLOCK(vp, 0, p); 218 error = (*cdevsw[major(vp->v_rdev)].d_read) 219 (vp->v_rdev, uio, ap->a_ioflag); 220 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 221 return (error); 222 223 case VBLK: 224 if (uio->uio_offset < 0) 225 return (EINVAL); 226 bsize = BLKDEV_IOSIZE; 227 if ((majordev = major(vp->v_rdev)) < nblkdev && 228 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 229 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 230 u_int32_t frag = 231 DISKLABELV1_FFS_FRAG(dpart.part->p_fragblock); 232 u_int32_t fsize = 233 DISKLABELV1_FFS_FSIZE(dpart.part->p_fragblock); 234 if (dpart.part->p_fstype == FS_BSDFFS && frag != 0 && 235 fsize != 0) 236 bsize = frag * fsize; 237 } 238 bscale = btodb(bsize); 239 do { 240 bn = btodb(uio->uio_offset) & ~(bscale - 1); 241 on = uio->uio_offset % bsize; 242 n = min((bsize - on), uio->uio_resid); 243 if (vp->v_lastr + bscale == bn) { 244 nextbn = bn + bscale; 245 error = breadn(vp, bn, bsize, &nextbn, &bsize, 246 1, &bp); 247 } else 248 error = bread(vp, bn, bsize, &bp); 249 vp->v_lastr = bn; 250 n = min(n, bsize - bp->b_resid); 251 if (error) { 252 brelse(bp); 253 return (error); 254 } 255 error = uiomove((char *)bp->b_data + on, n, uio); 256 brelse(bp); 257 } while (error == 0 && uio->uio_resid > 0 && n != 0); 258 return (error); 259 260 default: 261 panic("spec_read type"); 262 } 263 /* NOTREACHED */ 264 } 265 266 int 267 spec_inactive(void *v) 268 { 269 struct vop_inactive_args *ap = v; 270 271 VOP_UNLOCK(ap->a_vp, 0, ap->a_p); 272 return (0); 273 } 274 275 /* 276 * Vnode op for write 277 */ 278 int 279 spec_write(void *v) 280 { 281 struct vop_write_args *ap = v; 282 struct vnode *vp = ap->a_vp; 283 struct uio *uio = ap->a_uio; 284 struct proc *p = uio->uio_procp; 285 struct buf *bp; 286 daddr64_t bn, bscale; 287 int bsize; 288 struct partinfo dpart; 289 int n, on, majordev; 290 int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *); 291 int error = 0; 292 293 #ifdef DIAGNOSTIC 294 if (uio->uio_rw != UIO_WRITE) 295 panic("spec_write mode"); 296 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 297 panic("spec_write proc"); 298 #endif 299 300 switch (vp->v_type) { 301 302 case VCHR: 303 VOP_UNLOCK(vp, 0, p); 304 error = (*cdevsw[major(vp->v_rdev)].d_write) 305 (vp->v_rdev, uio, ap->a_ioflag); 306 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 307 return (error); 308 309 case VBLK: 310 if (uio->uio_resid == 0) 311 return (0); 312 if (uio->uio_offset < 0) 313 return (EINVAL); 314 bsize = BLKDEV_IOSIZE; 315 if ((majordev = major(vp->v_rdev)) < nblkdev && 316 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 317 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 318 u_int32_t frag = 319 DISKLABELV1_FFS_FRAG(dpart.part->p_fragblock); 320 u_int32_t fsize = 321 DISKLABELV1_FFS_FSIZE(dpart.part->p_fragblock); 322 if (dpart.part->p_fstype == FS_BSDFFS && frag != 0 && 323 fsize != 0) 324 bsize = frag * fsize; 325 } 326 bscale = btodb(bsize); 327 do { 328 bn = btodb(uio->uio_offset) & ~(bscale - 1); 329 on = uio->uio_offset % bsize; 330 n = min((bsize - on), uio->uio_resid); 331 error = bread(vp, bn, bsize, &bp); 332 n = min(n, bsize - bp->b_resid); 333 if (error) { 334 brelse(bp); 335 return (error); 336 } 337 error = uiomove((char *)bp->b_data + on, n, uio); 338 if (n + on == bsize) 339 bawrite(bp); 340 else 341 bdwrite(bp); 342 } while (error == 0 && uio->uio_resid > 0 && n != 0); 343 return (error); 344 345 default: 346 panic("spec_write type"); 347 } 348 /* NOTREACHED */ 349 } 350 351 /* 352 * Device ioctl operation. 353 */ 354 int 355 spec_ioctl(void *v) 356 { 357 struct vop_ioctl_args *ap = v; 358 dev_t dev = ap->a_vp->v_rdev; 359 int maj = major(dev); 360 361 switch (ap->a_vp->v_type) { 362 363 case VCHR: 364 return ((*cdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 365 ap->a_fflag, ap->a_p)); 366 367 case VBLK: 368 return ((*bdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 369 ap->a_fflag, ap->a_p)); 370 371 default: 372 panic("spec_ioctl"); 373 /* NOTREACHED */ 374 } 375 } 376 377 int 378 spec_poll(void *v) 379 { 380 struct vop_poll_args *ap = v; 381 dev_t dev; 382 383 switch (ap->a_vp->v_type) { 384 385 default: 386 return (ap->a_events & 387 (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 388 389 case VCHR: 390 dev = ap->a_vp->v_rdev; 391 return (*cdevsw[major(dev)].d_poll)(dev, ap->a_events, ap->a_p); 392 } 393 } 394 int 395 spec_kqfilter(void *v) 396 { 397 struct vop_kqfilter_args *ap = v; 398 399 dev_t dev; 400 401 dev = ap->a_vp->v_rdev; 402 if (cdevsw[major(dev)].d_kqfilter) 403 return (*cdevsw[major(dev)].d_kqfilter)(dev, ap->a_kn); 404 return (EOPNOTSUPP); 405 } 406 407 /* 408 * Synch buffers associated with a block device 409 */ 410 int 411 spec_fsync(void *v) 412 { 413 struct vop_fsync_args *ap = v; 414 struct vnode *vp = ap->a_vp; 415 struct buf *bp; 416 struct buf *nbp; 417 int s; 418 419 if (vp->v_type == VCHR) 420 return (0); 421 /* 422 * Flush all dirty buffers associated with a block device. 423 */ 424 loop: 425 s = splbio(); 426 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); 427 bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) { 428 nbp = LIST_NEXT(bp, b_vnbufs); 429 if ((bp->b_flags & B_BUSY)) 430 continue; 431 if ((bp->b_flags & B_DELWRI) == 0) 432 panic("spec_fsync: not dirty"); 433 bremfree(bp); 434 buf_acquire(bp); 435 splx(s); 436 bawrite(bp); 437 goto loop; 438 } 439 if (ap->a_waitfor == MNT_WAIT) { 440 vwaitforio (vp, 0, "spec_fsync", 0); 441 442 #ifdef DIAGNOSTIC 443 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 444 splx(s); 445 vprint("spec_fsync: dirty", vp); 446 goto loop; 447 } 448 #endif 449 } 450 splx(s); 451 return (0); 452 } 453 454 int 455 spec_strategy(void *v) 456 { 457 struct vop_strategy_args *ap = v; 458 struct buf *bp = ap->a_bp; 459 int maj = major(bp->b_dev); 460 461 if (LIST_FIRST(&bp->b_dep) != NULL) 462 buf_start(bp); 463 464 (*bdevsw[maj].d_strategy)(bp); 465 return (0); 466 } 467 468 /* 469 * Device close routine 470 */ 471 int 472 spec_close(void *v) 473 { 474 struct vop_close_args *ap = v; 475 struct vnode *vp = ap->a_vp; 476 dev_t dev = vp->v_rdev; 477 int (*devclose)(dev_t, int, int, struct proc *); 478 int mode, error; 479 480 switch (vp->v_type) { 481 482 case VCHR: 483 /* 484 * Hack: a tty device that is a controlling terminal 485 * has a reference from the session structure. 486 * We cannot easily tell that a character device is 487 * a controlling terminal, unless it is the closing 488 * process' controlling terminal. In that case, 489 * if the reference count is 2 (this last descriptor 490 * plus the session), release the reference from the session. 491 */ 492 if (vcount(vp) == 2 && ap->a_p && ap->a_p->p_p->ps_pgrp && 493 vp == ap->a_p->p_p->ps_pgrp->pg_session->s_ttyvp) { 494 vrele(vp); 495 ap->a_p->p_p->ps_pgrp->pg_session->s_ttyvp = NULL; 496 } 497 if (cdevsw[major(dev)].d_flags & D_CLONE) 498 return (spec_close_clone(ap)); 499 /* 500 * If the vnode is locked, then we are in the midst 501 * of forcably closing the device, otherwise we only 502 * close on last reference. 503 */ 504 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) 505 return (0); 506 devclose = cdevsw[major(dev)].d_close; 507 mode = S_IFCHR; 508 break; 509 510 case VBLK: 511 /* 512 * On last close of a block device (that isn't mounted) 513 * we must invalidate any in core blocks, so that 514 * we can, for instance, change floppy disks. In order to do 515 * that, we must lock the vnode. If we are coming from 516 * vclean(), the vnode is already locked. 517 */ 518 if (!(vp->v_flag & VXLOCK)) 519 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p); 520 error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0); 521 if (!(vp->v_flag & VXLOCK)) 522 VOP_UNLOCK(vp, 0, ap->a_p); 523 if (error) 524 return (error); 525 /* 526 * We do not want to really close the device if it 527 * is still in use unless we are trying to close it 528 * forcibly. Since every use (buffer, vnode, swap, cmap) 529 * holds a reference to the vnode, and because we mark 530 * any other vnodes that alias this device, when the 531 * sum of the reference counts on all the aliased 532 * vnodes descends to one, we are on last close. 533 */ 534 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) 535 return (0); 536 devclose = bdevsw[major(dev)].d_close; 537 mode = S_IFBLK; 538 break; 539 540 default: 541 panic("spec_close: not special"); 542 } 543 544 return ((*devclose)(dev, ap->a_fflag, mode, ap->a_p)); 545 } 546 547 int 548 spec_getattr(void *v) 549 { 550 struct vop_getattr_args *ap = v; 551 struct vnode *vp = ap->a_vp; 552 553 if (!(vp->v_flag & VCLONE)) 554 return (EBADF); 555 556 return (VOP_GETATTR(vp->v_specparent, ap->a_vap, ap->a_cred, ap->a_p)); 557 } 558 559 int 560 spec_setattr(void *v) 561 { 562 struct vop_getattr_args *ap = v; 563 struct vnode *vp = ap->a_vp; 564 int error; 565 566 if (!(vp->v_flag & VCLONE)) 567 return (EBADF); 568 569 vn_lock(vp->v_specparent, LK_EXCLUSIVE|LK_RETRY, ap->a_p); 570 error = VOP_SETATTR(vp->v_specparent, ap->a_vap, ap->a_cred, ap->a_p); 571 VOP_UNLOCK(vp, 0, ap->a_p); 572 573 return (error); 574 } 575 576 int 577 spec_access(void *v) 578 { 579 struct vop_access_args *ap = v; 580 struct vnode *vp = ap->a_vp; 581 582 if (!(vp->v_flag & VCLONE)) 583 return (EBADF); 584 585 return (VOP_ACCESS(vp->v_specparent, ap->a_mode, ap->a_cred, ap->a_p)); 586 } 587 588 /* 589 * Print out the contents of a special device vnode. 590 */ 591 int 592 spec_print(void *v) 593 { 594 struct vop_print_args *ap = v; 595 596 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev), 597 minor(ap->a_vp->v_rdev)); 598 return 0; 599 } 600 601 /* 602 * Return POSIX pathconf information applicable to special devices. 603 */ 604 int 605 spec_pathconf(void *v) 606 { 607 struct vop_pathconf_args *ap = v; 608 609 switch (ap->a_name) { 610 case _PC_LINK_MAX: 611 *ap->a_retval = LINK_MAX; 612 return (0); 613 case _PC_MAX_CANON: 614 *ap->a_retval = MAX_CANON; 615 return (0); 616 case _PC_MAX_INPUT: 617 *ap->a_retval = MAX_INPUT; 618 return (0); 619 case _PC_PIPE_BUF: 620 *ap->a_retval = PIPE_BUF; 621 return (0); 622 case _PC_CHOWN_RESTRICTED: 623 *ap->a_retval = 1; 624 return (0); 625 case _PC_VDISABLE: 626 *ap->a_retval = _POSIX_VDISABLE; 627 return (0); 628 default: 629 return (EINVAL); 630 } 631 /* NOTREACHED */ 632 } 633 634 /* 635 * Special device advisory byte-level locks. 636 */ 637 int 638 spec_advlock(void *v) 639 { 640 struct vop_advlock_args *ap = v; 641 struct vnode *vp = ap->a_vp; 642 643 return (lf_advlock(&vp->v_speclockf, (off_t)0, ap->a_id, 644 ap->a_op, ap->a_fl, ap->a_flags)); 645 } 646 647 /* 648 * Special device bad operation 649 */ 650 /*ARGSUSED*/ 651 int 652 spec_badop(void *v) 653 { 654 655 panic("spec_badop called"); 656 /* NOTREACHED */ 657 } 658 659 /* 660 * Copyright (c) 2006 Pedro Martelletto <pedro@ambientworks.net> 661 * Copyright (c) 2006 Thordur Bjornsson <thib@openbsd.org> 662 * 663 * Permission to use, copy, modify, and distribute this software for any 664 * purpose with or without fee is hereby granted, provided that the above 665 * copyright notice and this permission notice appear in all copies. 666 * 667 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 668 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 669 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 670 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 671 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 672 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 673 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 674 */ 675 676 #ifdef CLONE_DEBUG 677 #define DNPRINTF(m...) do { printf(m); } while (0) 678 #else 679 #define DNPRINTF(m...) /* nothing */ 680 #endif 681 682 int 683 spec_open_clone(struct vop_open_args *ap) 684 { 685 struct vnode *cvp, *vp = ap->a_vp; 686 struct cloneinfo *cip; 687 int error, i; 688 689 DNPRINTF("cloning vnode\n"); 690 691 for (i = 1; i < sizeof(vp->v_specbitmap) * NBBY; i++) 692 if (isclr(vp->v_specbitmap, i)) { 693 setbit(vp->v_specbitmap, i); 694 break; 695 } 696 697 if (i == sizeof(vp->v_specbitmap) * NBBY) 698 return (EBUSY); /* too many open instances */ 699 700 error = cdevvp(makedev(major(vp->v_rdev), i), &cvp); 701 if (error) 702 return (error); /* out of vnodes */ 703 704 VOP_UNLOCK(vp, 0, ap->a_p); 705 706 error = cdevsw[major(vp->v_rdev)].d_open(cvp->v_rdev, ap->a_mode, 707 S_IFCHR, ap->a_p); 708 709 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p); 710 711 if (error) { 712 clrbit(vp->v_specbitmap, i); 713 return (error); /* device open failed */ 714 } 715 716 cvp->v_flag |= VCLONE; 717 718 cip = malloc(sizeof(struct cloneinfo), M_TEMP, M_WAITOK); 719 cip->ci_data = vp->v_data; 720 cip->ci_vp = cvp; 721 722 cvp->v_specparent = vp; 723 vp->v_flag |= VCLONED; 724 vp->v_data = cip; 725 726 DNPRINTF("clone of vnode %p is vnode %p\n", vp, cvp); 727 728 return (0); /* device cloned */ 729 } 730 731 int 732 spec_close_clone(struct vop_close_args *ap) 733 { 734 struct vnode *pvp, *vp = ap->a_vp; 735 int error; 736 737 error = cdevsw[major(vp->v_rdev)].d_close(vp->v_rdev, ap->a_fflag, 738 S_IFCHR, ap->a_p); 739 if (error) 740 return (error); /* device close failed */ 741 742 pvp = vp->v_specparent; /* get parent device */ 743 clrbit(pvp->v_specbitmap, minor(vp->v_rdev)); 744 vrele(pvp); 745 746 return (0); /* clone closed */ 747 } 748