1 /* $OpenBSD: spec_vnops.c,v 1.78 2013/10/30 03:16:49 guenther Exp $ */ 2 /* $NetBSD: spec_vnops.c,v 1.29 1996/04/22 01:42:38 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)spec_vnops.c 8.8 (Berkeley) 11/21/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/proc.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/conf.h> 40 #include <sys/buf.h> 41 #include <sys/mount.h> 42 #include <sys/namei.h> 43 #include <sys/vnode.h> 44 #include <sys/stat.h> 45 #include <sys/errno.h> 46 #include <sys/ioctl.h> 47 #include <sys/file.h> 48 #include <sys/disklabel.h> 49 #include <sys/lockf.h> 50 #include <sys/poll.h> 51 #include <sys/dkio.h> 52 #include <sys/malloc.h> 53 #include <sys/specdev.h> 54 55 #define v_lastr v_specinfo->si_lastr 56 57 int spec_open_clone(struct vop_open_args *); 58 int spec_close_clone(struct vop_close_args *); 59 60 struct vnode *speclisth[SPECHSZ]; 61 62 struct vops spec_vops = { 63 .vop_lookup = vop_generic_lookup, 64 .vop_create = spec_badop, 65 .vop_mknod = spec_badop, 66 .vop_open = spec_open, 67 .vop_close = spec_close, 68 .vop_access = spec_access, 69 .vop_getattr = spec_getattr, 70 .vop_setattr = spec_setattr, 71 .vop_read = spec_read, 72 .vop_write = spec_write, 73 .vop_ioctl = spec_ioctl, 74 .vop_poll = spec_poll, 75 .vop_kqfilter = spec_kqfilter, 76 .vop_revoke = vop_generic_revoke, 77 .vop_fsync = spec_fsync, 78 .vop_remove = spec_badop, 79 .vop_link = spec_badop, 80 .vop_rename = spec_badop, 81 .vop_mkdir = spec_badop, 82 .vop_rmdir = spec_badop, 83 .vop_symlink = spec_badop, 84 .vop_readdir = spec_badop, 85 .vop_readlink = spec_badop, 86 .vop_abortop = spec_badop, 87 .vop_inactive = spec_inactive, 88 .vop_reclaim = nullop, 89 .vop_lock = vop_generic_lock, 90 .vop_unlock = vop_generic_unlock, 91 .vop_islocked = vop_generic_islocked, 92 .vop_bmap = vop_generic_bmap, 93 .vop_strategy = spec_strategy, 94 .vop_print = spec_print, 95 .vop_pathconf = spec_pathconf, 96 .vop_advlock = spec_advlock, 97 .vop_bwrite = vop_generic_bwrite, 98 }; 99 100 /* 101 * Open a special file. 102 */ 103 int 104 spec_open(void *v) 105 { 106 struct vop_open_args *ap = v; 107 struct proc *p = ap->a_p; 108 struct vnode *vp = ap->a_vp; 109 struct vnode *bvp; 110 dev_t bdev; 111 dev_t dev = (dev_t)vp->v_rdev; 112 int maj = major(dev); 113 int error; 114 115 /* 116 * Don't allow open if fs is mounted -nodev. 117 */ 118 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) 119 return (ENXIO); 120 121 switch (vp->v_type) { 122 123 case VCHR: 124 if ((u_int)maj >= nchrdev) 125 return (ENXIO); 126 if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) { 127 /* 128 * When running in very secure mode, do not allow 129 * opens for writing of any disk character devices. 130 */ 131 if (securelevel >= 2 && cdevsw[maj].d_type == D_DISK) 132 return (EPERM); 133 /* 134 * When running in secure mode, do not allow opens 135 * for writing of /dev/mem, /dev/kmem, or character 136 * devices whose corresponding block devices are 137 * currently mounted. 138 */ 139 if (securelevel >= 1) { 140 if ((bdev = chrtoblk(dev)) != NODEV && 141 vfinddev(bdev, VBLK, &bvp) && 142 bvp->v_usecount > 0 && 143 (error = vfs_mountedon(bvp))) 144 return (error); 145 if (iskmemdev(dev)) 146 return (EPERM); 147 } 148 } 149 if (cdevsw[maj].d_type == D_TTY) 150 vp->v_flag |= VISTTY; 151 if (cdevsw[maj].d_flags & D_CLONE) 152 return (spec_open_clone(ap)); 153 VOP_UNLOCK(vp, 0, p); 154 error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, p); 155 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 156 return (error); 157 158 case VBLK: 159 if ((u_int)maj >= nblkdev) 160 return (ENXIO); 161 /* 162 * When running in very secure mode, do not allow 163 * opens for writing of any disk block devices. 164 */ 165 if (securelevel >= 2 && ap->a_cred != FSCRED && 166 (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK) 167 return (EPERM); 168 /* 169 * Do not allow opens of block devices that are 170 * currently mounted. 171 */ 172 if ((error = vfs_mountedon(vp)) != 0) 173 return (error); 174 return ((*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p)); 175 case VNON: 176 case VLNK: 177 case VDIR: 178 case VREG: 179 case VBAD: 180 case VFIFO: 181 case VSOCK: 182 break; 183 } 184 return (0); 185 } 186 187 /* 188 * Vnode op for read 189 */ 190 int 191 spec_read(void *v) 192 { 193 struct vop_read_args *ap = v; 194 struct vnode *vp = ap->a_vp; 195 struct uio *uio = ap->a_uio; 196 struct proc *p = uio->uio_procp; 197 struct buf *bp; 198 daddr_t bn, nextbn, bscale; 199 int bsize; 200 struct partinfo dpart; 201 int n, on, majordev; 202 int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *); 203 int error = 0; 204 205 #ifdef DIAGNOSTIC 206 if (uio->uio_rw != UIO_READ) 207 panic("spec_read mode"); 208 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 209 panic("spec_read proc"); 210 #endif 211 if (uio->uio_resid == 0) 212 return (0); 213 214 switch (vp->v_type) { 215 216 case VCHR: 217 VOP_UNLOCK(vp, 0, p); 218 error = (*cdevsw[major(vp->v_rdev)].d_read) 219 (vp->v_rdev, uio, ap->a_ioflag); 220 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 221 return (error); 222 223 case VBLK: 224 if (uio->uio_offset < 0) 225 return (EINVAL); 226 bsize = BLKDEV_IOSIZE; 227 if ((majordev = major(vp->v_rdev)) < nblkdev && 228 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 229 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 230 u_int32_t frag = 231 DISKLABELV1_FFS_FRAG(dpart.part->p_fragblock); 232 u_int32_t fsize = 233 DISKLABELV1_FFS_FSIZE(dpart.part->p_fragblock); 234 if (dpart.part->p_fstype == FS_BSDFFS && frag != 0 && 235 fsize != 0) 236 bsize = frag * fsize; 237 } 238 bscale = btodb(bsize); 239 do { 240 bn = btodb(uio->uio_offset) & ~(bscale - 1); 241 on = uio->uio_offset % bsize; 242 n = min((bsize - on), uio->uio_resid); 243 if (vp->v_lastr + bscale == bn) { 244 nextbn = bn + bscale; 245 error = breadn(vp, bn, bsize, &nextbn, &bsize, 246 1, &bp); 247 } else 248 error = bread(vp, bn, bsize, &bp); 249 vp->v_lastr = bn; 250 n = min(n, bsize - bp->b_resid); 251 if (error) { 252 brelse(bp); 253 return (error); 254 } 255 error = uiomove((char *)bp->b_data + on, n, uio); 256 brelse(bp); 257 } while (error == 0 && uio->uio_resid > 0 && n != 0); 258 return (error); 259 260 default: 261 panic("spec_read type"); 262 } 263 /* NOTREACHED */ 264 } 265 266 int 267 spec_inactive(void *v) 268 { 269 struct vop_inactive_args *ap = v; 270 271 VOP_UNLOCK(ap->a_vp, 0, ap->a_p); 272 return (0); 273 } 274 275 /* 276 * Vnode op for write 277 */ 278 int 279 spec_write(void *v) 280 { 281 struct vop_write_args *ap = v; 282 struct vnode *vp = ap->a_vp; 283 struct uio *uio = ap->a_uio; 284 struct proc *p = uio->uio_procp; 285 struct buf *bp; 286 daddr_t bn, bscale; 287 int bsize; 288 struct partinfo dpart; 289 int n, on, majordev; 290 int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *); 291 int error = 0; 292 293 #ifdef DIAGNOSTIC 294 if (uio->uio_rw != UIO_WRITE) 295 panic("spec_write mode"); 296 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) 297 panic("spec_write proc"); 298 #endif 299 300 switch (vp->v_type) { 301 302 case VCHR: 303 VOP_UNLOCK(vp, 0, p); 304 error = (*cdevsw[major(vp->v_rdev)].d_write) 305 (vp->v_rdev, uio, ap->a_ioflag); 306 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 307 return (error); 308 309 case VBLK: 310 if (uio->uio_resid == 0) 311 return (0); 312 if (uio->uio_offset < 0) 313 return (EINVAL); 314 bsize = BLKDEV_IOSIZE; 315 if ((majordev = major(vp->v_rdev)) < nblkdev && 316 (ioctl = bdevsw[majordev].d_ioctl) != NULL && 317 (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { 318 u_int32_t frag = 319 DISKLABELV1_FFS_FRAG(dpart.part->p_fragblock); 320 u_int32_t fsize = 321 DISKLABELV1_FFS_FSIZE(dpart.part->p_fragblock); 322 if (dpart.part->p_fstype == FS_BSDFFS && frag != 0 && 323 fsize != 0) 324 bsize = frag * fsize; 325 } 326 bscale = btodb(bsize); 327 do { 328 bn = btodb(uio->uio_offset) & ~(bscale - 1); 329 on = uio->uio_offset % bsize; 330 n = min((bsize - on), uio->uio_resid); 331 error = bread(vp, bn, bsize, &bp); 332 n = min(n, bsize - bp->b_resid); 333 if (error) { 334 brelse(bp); 335 return (error); 336 } 337 error = uiomove((char *)bp->b_data + on, n, uio); 338 if (n + on == bsize) 339 bawrite(bp); 340 else 341 bdwrite(bp); 342 } while (error == 0 && uio->uio_resid > 0 && n != 0); 343 return (error); 344 345 default: 346 panic("spec_write type"); 347 } 348 /* NOTREACHED */ 349 } 350 351 /* 352 * Device ioctl operation. 353 */ 354 int 355 spec_ioctl(void *v) 356 { 357 struct vop_ioctl_args *ap = v; 358 dev_t dev = ap->a_vp->v_rdev; 359 int maj = major(dev); 360 361 switch (ap->a_vp->v_type) { 362 363 case VCHR: 364 return ((*cdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 365 ap->a_fflag, ap->a_p)); 366 367 case VBLK: 368 return ((*bdevsw[maj].d_ioctl)(dev, ap->a_command, ap->a_data, 369 ap->a_fflag, ap->a_p)); 370 371 default: 372 panic("spec_ioctl"); 373 /* NOTREACHED */ 374 } 375 } 376 377 int 378 spec_poll(void *v) 379 { 380 struct vop_poll_args *ap = v; 381 dev_t dev; 382 383 switch (ap->a_vp->v_type) { 384 385 default: 386 return (ap->a_events & 387 (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 388 389 case VCHR: 390 dev = ap->a_vp->v_rdev; 391 return (*cdevsw[major(dev)].d_poll)(dev, ap->a_events, ap->a_p); 392 } 393 } 394 int 395 spec_kqfilter(void *v) 396 { 397 struct vop_kqfilter_args *ap = v; 398 399 dev_t dev; 400 401 dev = ap->a_vp->v_rdev; 402 if (cdevsw[major(dev)].d_kqfilter) 403 return (*cdevsw[major(dev)].d_kqfilter)(dev, ap->a_kn); 404 return (EOPNOTSUPP); 405 } 406 407 /* 408 * Synch buffers associated with a block device 409 */ 410 int 411 spec_fsync(void *v) 412 { 413 struct vop_fsync_args *ap = v; 414 struct vnode *vp = ap->a_vp; 415 struct buf *bp; 416 struct buf *nbp; 417 int s; 418 419 if (vp->v_type == VCHR) 420 return (0); 421 /* 422 * Flush all dirty buffers associated with a block device. 423 */ 424 loop: 425 s = splbio(); 426 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); 427 bp != LIST_END(&vp->v_dirtyblkhd); bp = nbp) { 428 nbp = LIST_NEXT(bp, b_vnbufs); 429 if ((bp->b_flags & B_BUSY)) 430 continue; 431 if ((bp->b_flags & B_DELWRI) == 0) 432 panic("spec_fsync: not dirty"); 433 bremfree(bp); 434 buf_acquire(bp); 435 splx(s); 436 bawrite(bp); 437 goto loop; 438 } 439 if (ap->a_waitfor == MNT_WAIT) { 440 vwaitforio (vp, 0, "spec_fsync", 0); 441 442 #ifdef DIAGNOSTIC 443 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { 444 splx(s); 445 vprint("spec_fsync: dirty", vp); 446 goto loop; 447 } 448 #endif 449 } 450 splx(s); 451 return (0); 452 } 453 454 int 455 spec_strategy(void *v) 456 { 457 struct vop_strategy_args *ap = v; 458 struct buf *bp = ap->a_bp; 459 int maj = major(bp->b_dev); 460 461 if (LIST_FIRST(&bp->b_dep) != NULL) 462 buf_start(bp); 463 464 (*bdevsw[maj].d_strategy)(bp); 465 return (0); 466 } 467 468 /* 469 * Device close routine 470 */ 471 int 472 spec_close(void *v) 473 { 474 struct vop_close_args *ap = v; 475 struct proc *p = ap->a_p; 476 struct vnode *vp = ap->a_vp; 477 dev_t dev = vp->v_rdev; 478 int (*devclose)(dev_t, int, int, struct proc *); 479 int mode, relock, error; 480 481 switch (vp->v_type) { 482 483 case VCHR: 484 /* 485 * Hack: a tty device that is a controlling terminal 486 * has a reference from the session structure. 487 * We cannot easily tell that a character device is 488 * a controlling terminal, unless it is the closing 489 * process' controlling terminal. In that case, 490 * if the reference count is 2 (this last descriptor 491 * plus the session), release the reference from the session. 492 */ 493 if (vcount(vp) == 2 && p != NULL && p->p_p->ps_pgrp && 494 vp == p->p_p->ps_pgrp->pg_session->s_ttyvp) { 495 vrele(vp); 496 p->p_p->ps_pgrp->pg_session->s_ttyvp = NULL; 497 } 498 if (cdevsw[major(dev)].d_flags & D_CLONE) 499 return (spec_close_clone(ap)); 500 /* 501 * If the vnode is locked, then we are in the midst 502 * of forcably closing the device, otherwise we only 503 * close on last reference. 504 */ 505 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) 506 return (0); 507 devclose = cdevsw[major(dev)].d_close; 508 mode = S_IFCHR; 509 break; 510 511 case VBLK: 512 /* 513 * On last close of a block device (that isn't mounted) 514 * we must invalidate any in core blocks, so that 515 * we can, for instance, change floppy disks. In order to do 516 * that, we must lock the vnode. If we are coming from 517 * vclean(), the vnode is already locked. 518 */ 519 if (!(vp->v_flag & VXLOCK)) 520 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 521 error = vinvalbuf(vp, V_SAVE, ap->a_cred, p, 0, 0); 522 if (!(vp->v_flag & VXLOCK)) 523 VOP_UNLOCK(vp, 0, p); 524 if (error) 525 return (error); 526 /* 527 * We do not want to really close the device if it 528 * is still in use unless we are trying to close it 529 * forcibly. Since every use (buffer, vnode, swap, cmap) 530 * holds a reference to the vnode, and because we mark 531 * any other vnodes that alias this device, when the 532 * sum of the reference counts on all the aliased 533 * vnodes descends to one, we are on last close. 534 */ 535 if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) 536 return (0); 537 devclose = bdevsw[major(dev)].d_close; 538 mode = S_IFBLK; 539 break; 540 541 default: 542 panic("spec_close: not special"); 543 } 544 545 /* release lock if held and this isn't coming from vclean() */ 546 relock = VOP_ISLOCKED(vp) && !(vp->v_flag & VXLOCK); 547 if (relock) 548 VOP_UNLOCK(vp, 0, p); 549 error = (*devclose)(dev, ap->a_fflag, mode, p); 550 if (relock) 551 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 552 return (error); 553 } 554 555 int 556 spec_getattr(void *v) 557 { 558 struct vop_getattr_args *ap = v; 559 struct vnode *vp = ap->a_vp; 560 561 if (!(vp->v_flag & VCLONE)) 562 return (EBADF); 563 564 return (VOP_GETATTR(vp->v_specparent, ap->a_vap, ap->a_cred, ap->a_p)); 565 } 566 567 int 568 spec_setattr(void *v) 569 { 570 struct vop_getattr_args *ap = v; 571 struct proc *p = ap->a_p; 572 struct vnode *vp = ap->a_vp; 573 int error; 574 575 if (!(vp->v_flag & VCLONE)) 576 return (EBADF); 577 578 vn_lock(vp->v_specparent, LK_EXCLUSIVE|LK_RETRY, p); 579 error = VOP_SETATTR(vp->v_specparent, ap->a_vap, ap->a_cred, p); 580 VOP_UNLOCK(vp, 0, p); 581 582 return (error); 583 } 584 585 int 586 spec_access(void *v) 587 { 588 struct vop_access_args *ap = v; 589 struct vnode *vp = ap->a_vp; 590 591 if (!(vp->v_flag & VCLONE)) 592 return (EBADF); 593 594 return (VOP_ACCESS(vp->v_specparent, ap->a_mode, ap->a_cred, ap->a_p)); 595 } 596 597 /* 598 * Print out the contents of a special device vnode. 599 */ 600 int 601 spec_print(void *v) 602 { 603 struct vop_print_args *ap = v; 604 605 printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev), 606 minor(ap->a_vp->v_rdev)); 607 return 0; 608 } 609 610 /* 611 * Return POSIX pathconf information applicable to special devices. 612 */ 613 int 614 spec_pathconf(void *v) 615 { 616 struct vop_pathconf_args *ap = v; 617 int error = 0; 618 619 switch (ap->a_name) { 620 case _PC_LINK_MAX: 621 *ap->a_retval = LINK_MAX; 622 break; 623 case _PC_MAX_CANON: 624 *ap->a_retval = MAX_CANON; 625 break; 626 case _PC_MAX_INPUT: 627 *ap->a_retval = MAX_INPUT; 628 break; 629 case _PC_CHOWN_RESTRICTED: 630 *ap->a_retval = 1; 631 break; 632 case _PC_VDISABLE: 633 *ap->a_retval = _POSIX_VDISABLE; 634 break; 635 case _PC_TIMESTAMP_RESOLUTION: 636 *ap->a_retval = 1; 637 break; 638 default: 639 error = EINVAL; 640 break; 641 } 642 643 return (error); 644 } 645 646 /* 647 * Special device advisory byte-level locks. 648 */ 649 int 650 spec_advlock(void *v) 651 { 652 struct vop_advlock_args *ap = v; 653 struct vnode *vp = ap->a_vp; 654 655 return (lf_advlock(&vp->v_speclockf, (off_t)0, ap->a_id, 656 ap->a_op, ap->a_fl, ap->a_flags)); 657 } 658 659 /* 660 * Special device bad operation 661 */ 662 /*ARGSUSED*/ 663 int 664 spec_badop(void *v) 665 { 666 667 panic("spec_badop called"); 668 /* NOTREACHED */ 669 } 670 671 /* 672 * Copyright (c) 2006 Pedro Martelletto <pedro@ambientworks.net> 673 * Copyright (c) 2006 Thordur Bjornsson <thib@openbsd.org> 674 * 675 * Permission to use, copy, modify, and distribute this software for any 676 * purpose with or without fee is hereby granted, provided that the above 677 * copyright notice and this permission notice appear in all copies. 678 * 679 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 680 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 681 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 682 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 683 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 684 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 685 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 686 */ 687 688 #ifdef CLONE_DEBUG 689 #define DNPRINTF(m...) do { printf(m); } while (0) 690 #else 691 #define DNPRINTF(m...) /* nothing */ 692 #endif 693 694 int 695 spec_open_clone(struct vop_open_args *ap) 696 { 697 struct vnode *cvp, *vp = ap->a_vp; 698 struct cloneinfo *cip; 699 int error, i; 700 701 DNPRINTF("cloning vnode\n"); 702 703 if (minor(vp->v_rdev) >= (1 << CLONE_SHIFT)) 704 return (ENXIO); 705 706 for (i = 1; i < sizeof(vp->v_specbitmap) * NBBY; i++) 707 if (isclr(vp->v_specbitmap, i)) { 708 setbit(vp->v_specbitmap, i); 709 break; 710 } 711 712 if (i == sizeof(vp->v_specbitmap) * NBBY) 713 return (EBUSY); /* too many open instances */ 714 715 error = cdevvp(makedev(major(vp->v_rdev), 716 (i << CLONE_SHIFT) | minor(vp->v_rdev)), &cvp); 717 if (error) { 718 clrbit(vp->v_specbitmap, i); 719 return (error); /* out of vnodes */ 720 } 721 722 VOP_UNLOCK(vp, 0, ap->a_p); 723 724 error = cdevsw[major(vp->v_rdev)].d_open(cvp->v_rdev, ap->a_mode, 725 S_IFCHR, ap->a_p); 726 727 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p); 728 729 if (error) { 730 vput(cvp); 731 clrbit(vp->v_specbitmap, i); 732 return (error); /* device open failed */ 733 } 734 735 cvp->v_flag |= VCLONE; 736 737 cip = malloc(sizeof(struct cloneinfo), M_TEMP, M_WAITOK); 738 cip->ci_data = vp->v_data; 739 cip->ci_vp = cvp; 740 741 cvp->v_specparent = vp; 742 vp->v_flag |= VCLONED; 743 vp->v_data = cip; 744 745 DNPRINTF("clone of vnode %p is vnode %p\n", vp, cvp); 746 747 return (0); /* device cloned */ 748 } 749 750 int 751 spec_close_clone(struct vop_close_args *ap) 752 { 753 struct vnode *pvp, *vp = ap->a_vp; 754 int error; 755 756 error = cdevsw[major(vp->v_rdev)].d_close(vp->v_rdev, ap->a_fflag, 757 S_IFCHR, ap->a_p); 758 if (error) 759 return (error); /* device close failed */ 760 761 pvp = vp->v_specparent; /* get parent device */ 762 clrbit(pvp->v_specbitmap, minor(vp->v_rdev) >> CLONE_SHIFT); 763 vrele(pvp); 764 765 return (0); /* clone closed */ 766 } 767