1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Alex Hornung <ahornung@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/time.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/fcntl.h> 40 #include <sys/proc.h> 41 #include <sys/priv.h> 42 #include <sys/signalvar.h> 43 #include <sys/vnode.h> 44 #include <sys/uio.h> 45 #include <sys/mount.h> 46 #include <sys/file.h> 47 #include <sys/fcntl.h> 48 #include <sys/namei.h> 49 #include <sys/dirent.h> 50 #include <sys/malloc.h> 51 #include <sys/stat.h> 52 #include <sys/reg.h> 53 #include <sys/buf2.h> 54 #include <vm/vm_pager.h> 55 #include <vm/vm_zone.h> 56 #include <vm/vm_object.h> 57 #include <sys/filio.h> 58 #include <sys/ttycom.h> 59 #include <sys/sysref2.h> 60 #include <sys/tty.h> 61 #include <sys/devfs.h> 62 #include <sys/pioctl.h> 63 64 #include <machine/limits.h> 65 66 MALLOC_DECLARE(M_DEVFS); 67 #define DEVFS_BADOP (void *)devfs_badop 68 69 static int devfs_badop(struct vop_generic_args *); 70 static int devfs_access(struct vop_access_args *); 71 static int devfs_inactive(struct vop_inactive_args *); 72 static int devfs_reclaim(struct vop_reclaim_args *); 73 static int devfs_readdir(struct vop_readdir_args *); 74 static int devfs_getattr(struct vop_getattr_args *); 75 static int devfs_setattr(struct vop_setattr_args *); 76 static int devfs_readlink(struct vop_readlink_args *); 77 static int devfs_print(struct vop_print_args *); 78 79 static int devfs_nresolve(struct vop_nresolve_args *); 80 static int devfs_nlookupdotdot(struct vop_nlookupdotdot_args *); 81 static int devfs_nsymlink(struct vop_nsymlink_args *); 82 static int devfs_nremove(struct vop_nremove_args *); 83 84 static int devfs_spec_open(struct vop_open_args *); 85 static int devfs_spec_close(struct vop_close_args *); 86 static int devfs_spec_fsync(struct vop_fsync_args *); 87 88 static int devfs_spec_read(struct vop_read_args *); 89 static int devfs_spec_write(struct vop_write_args *); 90 static int devfs_spec_ioctl(struct vop_ioctl_args *); 91 static int devfs_spec_poll(struct vop_poll_args *); 92 static int devfs_spec_kqfilter(struct vop_kqfilter_args *); 93 static int devfs_spec_strategy(struct vop_strategy_args *); 94 static void devfs_spec_strategy_done(struct bio *); 95 static int devfs_spec_freeblks(struct vop_freeblks_args *); 96 static int devfs_spec_bmap(struct vop_bmap_args *); 97 static int devfs_spec_advlock(struct vop_advlock_args *); 98 static void devfs_spec_getpages_iodone(struct bio *); 99 static int devfs_spec_getpages(struct vop_getpages_args *); 100 101 102 static int devfs_specf_close(struct file *); 103 static int devfs_specf_read(struct file *, struct uio *, struct ucred *, int); 104 static int devfs_specf_write(struct file *, struct uio *, struct ucred *, int); 105 static int devfs_specf_stat(struct file *, struct stat *, struct ucred *); 106 static int devfs_specf_kqfilter(struct file *, struct knote *); 107 static int devfs_specf_poll(struct file *, int, struct ucred *); 108 static int devfs_specf_ioctl(struct file *, u_long, caddr_t, struct ucred *); 109 110 111 static __inline int sequential_heuristic(struct uio *, struct file *); 112 extern struct lock devfs_lock; 113 114 /* 115 * devfs vnode operations for regular files 116 */ 117 struct vop_ops devfs_vnode_norm_vops = { 118 .vop_default = vop_defaultop, 119 .vop_access = devfs_access, 120 .vop_advlock = DEVFS_BADOP, 121 .vop_bmap = DEVFS_BADOP, 122 .vop_close = vop_stdclose, 123 .vop_getattr = devfs_getattr, 124 .vop_inactive = devfs_inactive, 125 .vop_ncreate = DEVFS_BADOP, 126 .vop_nresolve = devfs_nresolve, 127 .vop_nlookupdotdot = devfs_nlookupdotdot, 128 .vop_nlink = DEVFS_BADOP, 129 .vop_nmkdir = DEVFS_BADOP, 130 .vop_nmknod = DEVFS_BADOP, 131 .vop_nremove = devfs_nremove, 132 .vop_nrename = DEVFS_BADOP, 133 .vop_nrmdir = DEVFS_BADOP, 134 .vop_nsymlink = devfs_nsymlink, 135 .vop_open = vop_stdopen, 136 .vop_pathconf = vop_stdpathconf, 137 .vop_print = devfs_print, 138 .vop_read = DEVFS_BADOP, 139 .vop_readdir = devfs_readdir, 140 .vop_readlink = devfs_readlink, 141 .vop_reclaim = devfs_reclaim, 142 .vop_setattr = devfs_setattr, 143 .vop_write = DEVFS_BADOP, 144 .vop_ioctl = DEVFS_BADOP 145 }; 146 147 /* 148 * devfs vnode operations for character devices 149 */ 150 struct vop_ops devfs_vnode_dev_vops = { 151 .vop_default = vop_defaultop, 152 .vop_access = devfs_access, 153 .vop_advlock = devfs_spec_advlock, 154 .vop_bmap = devfs_spec_bmap, 155 .vop_close = devfs_spec_close, 156 .vop_freeblks = devfs_spec_freeblks, 157 .vop_fsync = devfs_spec_fsync, 158 .vop_getattr = devfs_getattr, 159 .vop_getpages = devfs_spec_getpages, 160 .vop_inactive = devfs_inactive, 161 .vop_open = devfs_spec_open, 162 .vop_pathconf = vop_stdpathconf, 163 .vop_print = devfs_print, 164 .vop_poll = devfs_spec_poll, 165 .vop_kqfilter = devfs_spec_kqfilter, 166 .vop_read = devfs_spec_read, 167 .vop_readdir = DEVFS_BADOP, 168 .vop_readlink = DEVFS_BADOP, 169 .vop_reclaim = devfs_reclaim, 170 .vop_setattr = devfs_setattr, 171 .vop_strategy = devfs_spec_strategy, 172 .vop_write = devfs_spec_write, 173 .vop_ioctl = devfs_spec_ioctl 174 }; 175 176 struct vop_ops *devfs_vnode_dev_vops_p = &devfs_vnode_dev_vops; 177 178 struct fileops devfs_dev_fileops = { 179 .fo_read = devfs_specf_read, 180 .fo_write = devfs_specf_write, 181 .fo_ioctl = devfs_specf_ioctl, 182 .fo_poll = devfs_specf_poll, 183 .fo_kqfilter = devfs_specf_kqfilter, 184 .fo_stat = devfs_specf_stat, 185 .fo_close = devfs_specf_close, 186 .fo_shutdown = nofo_shutdown 187 }; 188 189 /* 190 * These two functions are possibly temporary hacks for 191 * devices (aka the pty code) which want to control the 192 * node attributes themselves. 193 * 194 * XXX we may ultimately desire to simply remove the uid/gid/mode 195 * from the node entirely. 196 */ 197 static __inline void 198 node_sync_dev_get(struct devfs_node *node) 199 { 200 cdev_t dev; 201 202 if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) { 203 node->uid = dev->si_uid; 204 node->gid = dev->si_gid; 205 node->mode = dev->si_perms; 206 } 207 } 208 209 static __inline void 210 node_sync_dev_set(struct devfs_node *node) 211 { 212 cdev_t dev; 213 214 if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) { 215 dev->si_uid = node->uid; 216 dev->si_gid = node->gid; 217 dev->si_perms = node->mode; 218 } 219 } 220 221 /* 222 * generic entry point for unsupported operations 223 */ 224 static int 225 devfs_badop(struct vop_generic_args *ap) 226 { 227 return (EIO); 228 } 229 230 231 static int 232 devfs_access(struct vop_access_args *ap) 233 { 234 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 235 int error; 236 237 if (!devfs_node_is_accessible(node)) 238 return ENOENT; 239 node_sync_dev_get(node); 240 error = vop_helper_access(ap, node->uid, node->gid, 241 node->mode, node->flags); 242 243 return error; 244 } 245 246 247 static int 248 devfs_inactive(struct vop_inactive_args *ap) 249 { 250 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 251 252 if (node == NULL || (node->flags & DEVFS_NODE_LINKED) == 0) 253 vrecycle(ap->a_vp); 254 return 0; 255 } 256 257 258 static int 259 devfs_reclaim(struct vop_reclaim_args *ap) 260 { 261 struct devfs_node *node; 262 struct vnode *vp; 263 int locked; 264 265 /* 266 * Check if it is locked already. if not, we acquire the devfs lock 267 */ 268 if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) { 269 lockmgr(&devfs_lock, LK_EXCLUSIVE); 270 locked = 1; 271 } else { 272 locked = 0; 273 } 274 275 /* 276 * Get rid of the devfs_node if it is no longer linked into the 277 * topology. 278 */ 279 vp = ap->a_vp; 280 if ((node = DEVFS_NODE(vp)) != NULL) { 281 node->v_node = NULL; 282 if ((node->flags & DEVFS_NODE_LINKED) == 0) 283 devfs_freep(node); 284 } 285 286 if (locked) 287 lockmgr(&devfs_lock, LK_RELEASE); 288 289 /* 290 * v_rdev needs to be properly released using v_release_rdev 291 * Make sure v_data is NULL as well. 292 */ 293 vp->v_data = NULL; 294 v_release_rdev(vp); 295 return 0; 296 } 297 298 299 static int 300 devfs_readdir(struct vop_readdir_args *ap) 301 { 302 struct devfs_node *dnode = DEVFS_NODE(ap->a_vp); 303 struct devfs_node *node; 304 int cookie_index; 305 int ncookies; 306 int error2; 307 int error; 308 int r; 309 off_t *cookies; 310 off_t saveoff; 311 312 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readdir() called!\n"); 313 314 if (ap->a_uio->uio_offset < 0 || ap->a_uio->uio_offset > INT_MAX) 315 return (EINVAL); 316 if ((error = vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY)) != 0) 317 return (error); 318 319 if (!devfs_node_is_accessible(dnode)) 320 return ENOENT; 321 322 lockmgr(&devfs_lock, LK_EXCLUSIVE); 323 324 saveoff = ap->a_uio->uio_offset; 325 326 if (ap->a_ncookies) { 327 ncookies = ap->a_uio->uio_resid / 16 + 1; /* Why / 16 ?? */ 328 if (ncookies > 256) 329 ncookies = 256; 330 cookies = kmalloc(256 * sizeof(off_t), M_TEMP, M_WAITOK); 331 cookie_index = 0; 332 } else { 333 ncookies = -1; 334 cookies = NULL; 335 cookie_index = 0; 336 } 337 338 nanotime(&dnode->atime); 339 340 if (saveoff == 0) { 341 r = vop_write_dirent(&error, ap->a_uio, dnode->d_dir.d_ino, 342 DT_DIR, 1, "."); 343 if (r) 344 goto done; 345 if (cookies) 346 cookies[cookie_index] = saveoff; 347 saveoff++; 348 cookie_index++; 349 if (cookie_index == ncookies) 350 goto done; 351 } 352 353 if (saveoff == 1) { 354 if (dnode->parent) { 355 r = vop_write_dirent(&error, ap->a_uio, 356 dnode->parent->d_dir.d_ino, 357 DT_DIR, 2, ".."); 358 } else { 359 r = vop_write_dirent(&error, ap->a_uio, 360 dnode->d_dir.d_ino, 361 DT_DIR, 2, ".."); 362 } 363 if (r) 364 goto done; 365 if (cookies) 366 cookies[cookie_index] = saveoff; 367 saveoff++; 368 cookie_index++; 369 if (cookie_index == ncookies) 370 goto done; 371 } 372 373 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 374 if ((node->flags & DEVFS_HIDDEN) || 375 (node->flags & DEVFS_INVISIBLE)) { 376 continue; 377 } 378 379 /* 380 * If the node type is a valid devfs alias, then we make sure that the 381 * target isn't hidden. If it is, we don't show the link in the 382 * directory listing. 383 */ 384 if ((node->node_type == Plink) && (node->link_target != NULL) && 385 (node->link_target->flags & DEVFS_HIDDEN)) 386 continue; 387 388 if (node->cookie < saveoff) 389 continue; 390 391 saveoff = node->cookie; 392 393 error2 = vop_write_dirent(&error, ap->a_uio, node->d_dir.d_ino, 394 node->d_dir.d_type, 395 node->d_dir.d_namlen, 396 node->d_dir.d_name); 397 398 if (error2) 399 break; 400 401 saveoff++; 402 403 if (cookies) 404 cookies[cookie_index] = node->cookie; 405 ++cookie_index; 406 if (cookie_index == ncookies) 407 break; 408 } 409 410 done: 411 lockmgr(&devfs_lock, LK_RELEASE); 412 vn_unlock(ap->a_vp); 413 414 ap->a_uio->uio_offset = saveoff; 415 if (error && cookie_index == 0) { 416 if (cookies) { 417 kfree(cookies, M_TEMP); 418 *ap->a_ncookies = 0; 419 *ap->a_cookies = NULL; 420 } 421 } else { 422 if (cookies) { 423 *ap->a_ncookies = cookie_index; 424 *ap->a_cookies = cookies; 425 } 426 } 427 return (error); 428 } 429 430 431 static int 432 devfs_nresolve(struct vop_nresolve_args *ap) 433 { 434 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 435 struct devfs_node *node, *found = NULL; 436 struct namecache *ncp; 437 struct vnode *vp = NULL; 438 int error = 0; 439 int len; 440 int hidden = 0; 441 int depth; 442 443 ncp = ap->a_nch->ncp; 444 len = ncp->nc_nlen; 445 446 if (!devfs_node_is_accessible(dnode)) 447 return ENOENT; 448 449 lockmgr(&devfs_lock, LK_EXCLUSIVE); 450 451 if ((dnode->node_type != Proot) && (dnode->node_type != Pdir)) { 452 error = ENOENT; 453 cache_setvp(ap->a_nch, NULL); 454 goto out; 455 } 456 457 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 458 if (len == node->d_dir.d_namlen) { 459 if (!memcmp(ncp->nc_name, node->d_dir.d_name, len)) { 460 found = node; 461 break; 462 } 463 } 464 } 465 466 if (found) { 467 depth = 0; 468 while ((found->node_type == Plink) && (found->link_target)) { 469 if (depth >= 8) { 470 devfs_debug(DEVFS_DEBUG_SHOW, "Recursive link or depth >= 8"); 471 break; 472 } 473 474 found = found->link_target; 475 ++depth; 476 } 477 478 if (!(found->flags & DEVFS_HIDDEN)) 479 devfs_allocv(/*ap->a_dvp->v_mount, */ &vp, found); 480 else 481 hidden = 1; 482 } 483 484 if (vp == NULL) { 485 error = ENOENT; 486 cache_setvp(ap->a_nch, NULL); 487 goto out; 488 489 } 490 KKASSERT(vp); 491 vn_unlock(vp); 492 cache_setvp(ap->a_nch, vp); 493 vrele(vp); 494 out: 495 lockmgr(&devfs_lock, LK_RELEASE); 496 497 return error; 498 } 499 500 501 static int 502 devfs_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 503 { 504 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 505 506 *ap->a_vpp = NULL; 507 if (!devfs_node_is_accessible(dnode)) 508 return ENOENT; 509 510 lockmgr(&devfs_lock, LK_EXCLUSIVE); 511 if (dnode->parent != NULL) { 512 devfs_allocv(ap->a_vpp, dnode->parent); 513 vn_unlock(*ap->a_vpp); 514 } 515 lockmgr(&devfs_lock, LK_RELEASE); 516 517 return ((*ap->a_vpp == NULL) ? ENOENT : 0); 518 } 519 520 521 static int 522 devfs_getattr(struct vop_getattr_args *ap) 523 { 524 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 525 struct vattr *vap = ap->a_vap; 526 int error = 0; 527 528 if (!devfs_node_is_accessible(node)) 529 return ENOENT; 530 node_sync_dev_get(node); 531 532 lockmgr(&devfs_lock, LK_EXCLUSIVE); 533 534 /* start by zeroing out the attributes */ 535 VATTR_NULL(vap); 536 537 /* next do all the common fields */ 538 vap->va_type = ap->a_vp->v_type; 539 vap->va_mode = node->mode; 540 vap->va_fileid = DEVFS_NODE(ap->a_vp)->d_dir.d_ino ; 541 vap->va_flags = 0; /* XXX: what should this be? */ 542 vap->va_blocksize = DEV_BSIZE; 543 vap->va_bytes = vap->va_size = sizeof(struct devfs_node); 544 545 vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 546 547 vap->va_atime = node->atime; 548 vap->va_mtime = node->mtime; 549 vap->va_ctime = node->ctime; 550 551 vap->va_nlink = 1; /* number of references to file */ 552 553 vap->va_uid = node->uid; 554 vap->va_gid = node->gid; 555 556 vap->va_rmajor = 0; 557 vap->va_rminor = 0; 558 559 if ((node->node_type == Pdev) && node->d_dev) { 560 reference_dev(node->d_dev); 561 vap->va_rminor = node->d_dev->si_uminor; 562 release_dev(node->d_dev); 563 } 564 565 /* For a softlink the va_size is the length of the softlink */ 566 if (node->symlink_name != 0) { 567 vap->va_size = node->symlink_namelen; 568 } 569 lockmgr(&devfs_lock, LK_RELEASE); 570 571 return (error); 572 } 573 574 575 static int 576 devfs_setattr(struct vop_setattr_args *ap) 577 { 578 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 579 struct vattr *vap; 580 int error = 0; 581 582 if (!devfs_node_is_accessible(node)) 583 return ENOENT; 584 node_sync_dev_get(node); 585 586 lockmgr(&devfs_lock, LK_EXCLUSIVE); 587 588 vap = ap->a_vap; 589 590 if (vap->va_uid != (uid_t)VNOVAL) { 591 if ((ap->a_cred->cr_uid != node->uid) && 592 (!groupmember(node->gid, ap->a_cred))) { 593 error = priv_check(curthread, PRIV_VFS_CHOWN); 594 if (error) 595 goto out; 596 } 597 node->uid = vap->va_uid; 598 } 599 600 if (vap->va_gid != (uid_t)VNOVAL) { 601 if ((ap->a_cred->cr_uid != node->uid) && 602 (!groupmember(node->gid, ap->a_cred))) { 603 error = priv_check(curthread, PRIV_VFS_CHOWN); 604 if (error) 605 goto out; 606 } 607 node->gid = vap->va_gid; 608 } 609 610 if (vap->va_mode != (mode_t)VNOVAL) { 611 if (ap->a_cred->cr_uid != node->uid) { 612 error = priv_check(curthread, PRIV_VFS_ADMIN); 613 if (error) 614 goto out; 615 } 616 node->mode = vap->va_mode; 617 } 618 619 out: 620 node_sync_dev_set(node); 621 nanotime(&node->ctime); 622 lockmgr(&devfs_lock, LK_RELEASE); 623 624 return error; 625 } 626 627 628 static int 629 devfs_readlink(struct vop_readlink_args *ap) 630 { 631 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 632 int ret; 633 634 if (!devfs_node_is_accessible(node)) 635 return ENOENT; 636 637 lockmgr(&devfs_lock, LK_EXCLUSIVE); 638 ret = uiomove(node->symlink_name, node->symlink_namelen, ap->a_uio); 639 lockmgr(&devfs_lock, LK_RELEASE); 640 641 return ret; 642 } 643 644 645 static int 646 devfs_print(struct vop_print_args *ap) 647 { 648 return (0); 649 } 650 651 652 static int 653 devfs_nsymlink(struct vop_nsymlink_args *ap) 654 { 655 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 656 struct devfs_node *node; 657 size_t targetlen; 658 659 if (!devfs_node_is_accessible(dnode)) 660 return ENOENT; 661 662 ap->a_vap->va_type = VLNK; 663 664 if ((dnode->node_type != Proot) && (dnode->node_type != Pdir)) 665 goto out; 666 667 lockmgr(&devfs_lock, LK_EXCLUSIVE); 668 devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Plink, 669 ap->a_nch->ncp->nc_name, dnode, NULL); 670 671 targetlen = strlen(ap->a_target); 672 if (*ap->a_vpp) { 673 node = DEVFS_NODE(*ap->a_vpp); 674 node->flags |= DEVFS_USER_CREATED; 675 node->symlink_namelen = targetlen; 676 node->symlink_name = kmalloc(targetlen + 1, M_DEVFS, M_WAITOK); 677 memcpy(node->symlink_name, ap->a_target, targetlen); 678 node->symlink_name[targetlen] = '\0'; 679 cache_setunresolved(ap->a_nch); 680 cache_setvp(ap->a_nch, *ap->a_vpp); 681 } 682 lockmgr(&devfs_lock, LK_RELEASE); 683 out: 684 return ((*ap->a_vpp == NULL) ? ENOTDIR : 0); 685 } 686 687 688 static int 689 devfs_nremove(struct vop_nremove_args *ap) 690 { 691 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 692 struct devfs_node *node; 693 struct namecache *ncp; 694 int error = ENOENT; 695 696 ncp = ap->a_nch->ncp; 697 698 if (!devfs_node_is_accessible(dnode)) 699 return ENOENT; 700 701 lockmgr(&devfs_lock, LK_EXCLUSIVE); 702 703 if ((dnode->node_type != Proot) && (dnode->node_type != Pdir)) 704 goto out; 705 706 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 707 if (ncp->nc_nlen != node->d_dir.d_namlen) 708 continue; 709 if (memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen)) 710 continue; 711 712 /* 713 * only allow removal of user created stuff (e.g. symlinks) 714 */ 715 if ((node->flags & DEVFS_USER_CREATED) == 0) { 716 error = EPERM; 717 goto out; 718 } else { 719 if (node->v_node) 720 cache_inval_vp(node->v_node, CINV_DESTROY); 721 devfs_unlinkp(node); 722 error = 0; 723 break; 724 } 725 } 726 727 cache_setunresolved(ap->a_nch); 728 cache_setvp(ap->a_nch, NULL); 729 730 out: 731 lockmgr(&devfs_lock, LK_RELEASE); 732 return error; 733 } 734 735 736 static int 737 devfs_spec_open(struct vop_open_args *ap) 738 { 739 struct vnode *vp = ap->a_vp; 740 struct vnode *orig_vp = NULL; 741 struct devfs_node *node = DEVFS_NODE(vp); 742 struct devfs_node *newnode; 743 cdev_t dev, ndev = NULL; 744 int error = 0; 745 746 if (node) { 747 if (node->d_dev == NULL) 748 return ENXIO; 749 if (!devfs_node_is_accessible(node)) 750 return ENOENT; 751 } 752 753 if ((dev = vp->v_rdev) == NULL) 754 return ENXIO; 755 756 if (node && ap->a_fp) { 757 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.1-\n"); 758 lockmgr(&devfs_lock, LK_EXCLUSIVE); 759 760 ndev = devfs_clone(dev, node->d_dir.d_name, node->d_dir.d_namlen, 761 ap->a_mode, ap->a_cred); 762 if (ndev != NULL) { 763 newnode = devfs_create_device_node( 764 DEVFS_MNTDATA(vp->v_mount)->root_node, 765 ndev, NULL, NULL); 766 /* XXX: possibly destroy device if this happens */ 767 768 if (newnode != NULL) { 769 dev = ndev; 770 devfs_link_dev(dev); 771 772 devfs_debug(DEVFS_DEBUG_DEBUG, 773 "parent here is: %s, node is: |%s|\n", 774 ((node->parent->node_type == Proot) ? 775 "ROOT!" : node->parent->d_dir.d_name), 776 newnode->d_dir.d_name); 777 devfs_debug(DEVFS_DEBUG_DEBUG, 778 "test: %s\n", 779 ((struct devfs_node *)(TAILQ_LAST(DEVFS_DENODE_HEAD(node->parent), devfs_node_head)))->d_dir.d_name); 780 781 /* 782 * orig_vp is set to the original vp if we cloned. 783 */ 784 /* node->flags |= DEVFS_CLONED; */ 785 devfs_allocv(&vp, newnode); 786 orig_vp = ap->a_vp; 787 ap->a_vp = vp; 788 } 789 } 790 lockmgr(&devfs_lock, LK_RELEASE); 791 } 792 793 devfs_debug(DEVFS_DEBUG_DEBUG, 794 "devfs_spec_open() called on %s! \n", 795 dev->si_name); 796 797 /* 798 * Make this field valid before any I/O in ->d_open 799 */ 800 if (!dev->si_iosize_max) 801 dev->si_iosize_max = DFLTPHYS; 802 803 if (dev_dflags(dev) & D_TTY) 804 vp->v_flag |= VISTTY; 805 806 vn_unlock(vp); 807 error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred); 808 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 809 810 /* 811 * Clean up any cloned vp if we error out. 812 */ 813 if (error) { 814 if (orig_vp) { 815 vput(vp); 816 ap->a_vp = orig_vp; 817 /* orig_vp = NULL; */ 818 } 819 return error; 820 } 821 822 823 if (dev_dflags(dev) & D_TTY) { 824 if (dev->si_tty) { 825 struct tty *tp; 826 tp = dev->si_tty; 827 if (!tp->t_stop) { 828 devfs_debug(DEVFS_DEBUG_DEBUG, 829 "devfs: no t_stop\n"); 830 tp->t_stop = nottystop; 831 } 832 } 833 } 834 835 836 if (vn_isdisk(vp, NULL)) { 837 if (!dev->si_bsize_phys) 838 dev->si_bsize_phys = DEV_BSIZE; 839 vinitvmio(vp, IDX_TO_OFF(INT_MAX)); 840 } 841 842 vop_stdopen(ap); 843 #if 0 844 if (node) 845 nanotime(&node->atime); 846 #endif 847 848 if (orig_vp) 849 vn_unlock(vp); 850 851 /* Ugly pty magic, to make pty devices appear once they are opened */ 852 if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY) 853 node->flags &= ~DEVFS_INVISIBLE; 854 855 if (ap->a_fp) { 856 ap->a_fp->f_type = DTYPE_VNODE; 857 ap->a_fp->f_flag = ap->a_mode & FMASK; 858 ap->a_fp->f_ops = &devfs_dev_fileops; 859 ap->a_fp->f_data = vp; 860 } 861 862 return 0; 863 } 864 865 866 static int 867 devfs_spec_close(struct vop_close_args *ap) 868 { 869 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 870 struct proc *p = curproc; 871 struct vnode *vp = ap->a_vp; 872 cdev_t dev = vp->v_rdev; 873 int error = 0; 874 int needrelock; 875 876 devfs_debug(DEVFS_DEBUG_DEBUG, 877 "devfs_spec_close() called on %s! \n", 878 dev->si_name); 879 880 /* 881 * A couple of hacks for devices and tty devices. The 882 * vnode ref count cannot be used to figure out the 883 * last close, but we can use v_opencount now that 884 * revoke works properly. 885 * 886 * Detect the last close on a controlling terminal and clear 887 * the session (half-close). 888 */ 889 if (dev) 890 reference_dev(dev); 891 892 if (p && vp->v_opencount <= 1 && vp == p->p_session->s_ttyvp) { 893 p->p_session->s_ttyvp = NULL; 894 vrele(vp); 895 } 896 897 /* 898 * Vnodes can be opened and closed multiple times. Do not really 899 * close the device unless (1) it is being closed forcibly, 900 * (2) the device wants to track closes, or (3) this is the last 901 * vnode doing its last close on the device. 902 * 903 * XXX the VXLOCK (force close) case can leave vnodes referencing 904 * a closed device. This might not occur now that our revoke is 905 * fixed. 906 */ 907 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -1- \n"); 908 if (dev && ((vp->v_flag & VRECLAIMED) || 909 (dev_dflags(dev) & D_TRACKCLOSE) || 910 (vp->v_opencount == 1))) { 911 /* 912 * Unlock around dev_dclose() 913 */ 914 needrelock = 0; 915 if (vn_islocked(vp)) { 916 needrelock = 1; 917 vn_unlock(vp); 918 } 919 error = dev_dclose(dev, ap->a_fflag, S_IFCHR); 920 921 /* 922 * Ugly pty magic, to make pty devices disappear again once 923 * they are closed 924 */ 925 if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY) 926 node->flags |= DEVFS_INVISIBLE; 927 928 if (needrelock) 929 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 930 } else { 931 error = 0; 932 } 933 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -2- \n"); 934 935 /* 936 * Track the actual opens and closes on the vnode. The last close 937 * disassociates the rdev. If the rdev is already disassociated or 938 * the opencount is already 0, the vnode might have been revoked 939 * and no further opencount tracking occurs. 940 */ 941 if (dev) 942 release_dev(dev); 943 if (vp->v_opencount > 0) 944 vop_stdclose(ap); 945 return(error); 946 947 } 948 949 950 static int 951 devfs_specf_close(struct file *fp) 952 { 953 struct vnode *vp = (struct vnode *)fp->f_data; 954 int error; 955 956 get_mplock(); 957 fp->f_ops = &badfileops; 958 error = vn_close(vp, fp->f_flag); 959 rel_mplock(); 960 961 return (error); 962 } 963 964 965 /* 966 * Device-optimized file table vnode read routine. 967 * 968 * This bypasses the VOP table and talks directly to the device. Most 969 * filesystems just route to specfs and can make this optimization. 970 * 971 * MPALMOSTSAFE - acquires mplock 972 */ 973 static int 974 devfs_specf_read(struct file *fp, struct uio *uio, 975 struct ucred *cred, int flags) 976 { 977 struct devfs_node *node; 978 struct vnode *vp; 979 int ioflag; 980 int error; 981 cdev_t dev; 982 983 get_mplock(); 984 KASSERT(uio->uio_td == curthread, 985 ("uio_td %p is not td %p", uio->uio_td, curthread)); 986 987 vp = (struct vnode *)fp->f_data; 988 if (vp == NULL || vp->v_type == VBAD) { 989 error = EBADF; 990 goto done; 991 } 992 node = DEVFS_NODE(vp); 993 994 if ((dev = vp->v_rdev) == NULL) { 995 error = EBADF; 996 goto done; 997 } 998 999 reference_dev(dev); 1000 1001 if (uio->uio_resid == 0) { 1002 error = 0; 1003 goto done; 1004 } 1005 if ((flags & O_FOFFSET) == 0) 1006 uio->uio_offset = fp->f_offset; 1007 1008 ioflag = 0; 1009 if (flags & O_FBLOCKING) { 1010 /* ioflag &= ~IO_NDELAY; */ 1011 } else if (flags & O_FNONBLOCKING) { 1012 ioflag |= IO_NDELAY; 1013 } else if (fp->f_flag & FNONBLOCK) { 1014 ioflag |= IO_NDELAY; 1015 } 1016 if (flags & O_FBUFFERED) { 1017 /* ioflag &= ~IO_DIRECT; */ 1018 } else if (flags & O_FUNBUFFERED) { 1019 ioflag |= IO_DIRECT; 1020 } else if (fp->f_flag & O_DIRECT) { 1021 ioflag |= IO_DIRECT; 1022 } 1023 ioflag |= sequential_heuristic(uio, fp); 1024 1025 error = dev_dread(dev, uio, ioflag); 1026 1027 release_dev(dev); 1028 if (node) 1029 nanotime(&node->atime); 1030 if ((flags & O_FOFFSET) == 0) 1031 fp->f_offset = uio->uio_offset; 1032 fp->f_nextoff = uio->uio_offset; 1033 done: 1034 rel_mplock(); 1035 return (error); 1036 } 1037 1038 1039 static int 1040 devfs_specf_write(struct file *fp, struct uio *uio, 1041 struct ucred *cred, int flags) 1042 { 1043 struct devfs_node *node; 1044 struct vnode *vp; 1045 int ioflag; 1046 int error; 1047 cdev_t dev; 1048 1049 get_mplock(); 1050 KASSERT(uio->uio_td == curthread, 1051 ("uio_td %p is not p %p", uio->uio_td, curthread)); 1052 1053 vp = (struct vnode *)fp->f_data; 1054 if (vp == NULL || vp->v_type == VBAD) { 1055 error = EBADF; 1056 goto done; 1057 } 1058 node = DEVFS_NODE(vp); 1059 if (vp->v_type == VREG) 1060 bwillwrite(uio->uio_resid); 1061 vp = (struct vnode *)fp->f_data; 1062 1063 if ((dev = vp->v_rdev) == NULL) { 1064 error = EBADF; 1065 goto done; 1066 } 1067 reference_dev(dev); 1068 1069 if ((flags & O_FOFFSET) == 0) 1070 uio->uio_offset = fp->f_offset; 1071 1072 ioflag = IO_UNIT; 1073 if (vp->v_type == VREG && 1074 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) { 1075 ioflag |= IO_APPEND; 1076 } 1077 1078 if (flags & O_FBLOCKING) { 1079 /* ioflag &= ~IO_NDELAY; */ 1080 } else if (flags & O_FNONBLOCKING) { 1081 ioflag |= IO_NDELAY; 1082 } else if (fp->f_flag & FNONBLOCK) { 1083 ioflag |= IO_NDELAY; 1084 } 1085 if (flags & O_FBUFFERED) { 1086 /* ioflag &= ~IO_DIRECT; */ 1087 } else if (flags & O_FUNBUFFERED) { 1088 ioflag |= IO_DIRECT; 1089 } else if (fp->f_flag & O_DIRECT) { 1090 ioflag |= IO_DIRECT; 1091 } 1092 if (flags & O_FASYNCWRITE) { 1093 /* ioflag &= ~IO_SYNC; */ 1094 } else if (flags & O_FSYNCWRITE) { 1095 ioflag |= IO_SYNC; 1096 } else if (fp->f_flag & O_FSYNC) { 1097 ioflag |= IO_SYNC; 1098 } 1099 1100 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)) 1101 ioflag |= IO_SYNC; 1102 ioflag |= sequential_heuristic(uio, fp); 1103 1104 error = dev_dwrite(dev, uio, ioflag); 1105 1106 release_dev(dev); 1107 if (node) { 1108 nanotime(&node->atime); 1109 nanotime(&node->mtime); 1110 } 1111 1112 if ((flags & O_FOFFSET) == 0) 1113 fp->f_offset = uio->uio_offset; 1114 fp->f_nextoff = uio->uio_offset; 1115 done: 1116 rel_mplock(); 1117 return (error); 1118 } 1119 1120 1121 static int 1122 devfs_specf_stat(struct file *fp, struct stat *sb, struct ucred *cred) 1123 { 1124 struct vnode *vp; 1125 int error; 1126 1127 get_mplock(); 1128 vp = (struct vnode *)fp->f_data; 1129 error = vn_stat(vp, sb, cred); 1130 if (error) { 1131 rel_mplock(); 1132 return (error); 1133 } 1134 1135 struct vattr vattr; 1136 struct vattr *vap; 1137 u_short mode; 1138 cdev_t dev; 1139 1140 vap = &vattr; 1141 error = VOP_GETATTR(vp, vap); 1142 if (error) { 1143 rel_mplock(); 1144 return (error); 1145 } 1146 1147 /* 1148 * Zero the spare stat fields 1149 */ 1150 sb->st_lspare = 0; 1151 sb->st_qspare = 0; 1152 1153 /* 1154 * Copy from vattr table ... or not in case it's a cloned device 1155 */ 1156 if (vap->va_fsid != VNOVAL) 1157 sb->st_dev = vap->va_fsid; 1158 else 1159 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 1160 1161 sb->st_ino = vap->va_fileid; 1162 1163 mode = vap->va_mode; 1164 mode |= S_IFCHR; 1165 sb->st_mode = mode; 1166 1167 if (vap->va_nlink > (nlink_t)-1) 1168 sb->st_nlink = (nlink_t)-1; 1169 else 1170 sb->st_nlink = vap->va_nlink; 1171 sb->st_uid = vap->va_uid; 1172 sb->st_gid = vap->va_gid; 1173 sb->st_rdev = dev2udev(DEVFS_NODE(vp)->d_dev); 1174 sb->st_size = vap->va_size; 1175 sb->st_atimespec = vap->va_atime; 1176 sb->st_mtimespec = vap->va_mtime; 1177 sb->st_ctimespec = vap->va_ctime; 1178 1179 /* 1180 * A VCHR and VBLK device may track the last access and last modified 1181 * time independantly of the filesystem. This is particularly true 1182 * because device read and write calls may bypass the filesystem. 1183 */ 1184 if (vp->v_type == VCHR || vp->v_type == VBLK) { 1185 dev = vp->v_rdev; 1186 if (dev != NULL) { 1187 if (dev->si_lastread) { 1188 sb->st_atimespec.tv_sec = dev->si_lastread; 1189 sb->st_atimespec.tv_nsec = 0; 1190 } 1191 if (dev->si_lastwrite) { 1192 sb->st_atimespec.tv_sec = dev->si_lastwrite; 1193 sb->st_atimespec.tv_nsec = 0; 1194 } 1195 } 1196 } 1197 1198 /* 1199 * According to www.opengroup.org, the meaning of st_blksize is 1200 * "a filesystem-specific preferred I/O block size for this 1201 * object. In some filesystem types, this may vary from file 1202 * to file" 1203 * Default to PAGE_SIZE after much discussion. 1204 */ 1205 1206 sb->st_blksize = PAGE_SIZE; 1207 1208 sb->st_flags = vap->va_flags; 1209 1210 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0); 1211 if (error) 1212 sb->st_gen = 0; 1213 else 1214 sb->st_gen = (u_int32_t)vap->va_gen; 1215 1216 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 1217 sb->st_fsmid = vap->va_fsmid; 1218 1219 rel_mplock(); 1220 return (0); 1221 } 1222 1223 1224 static int 1225 devfs_specf_kqfilter(struct file *fp, struct knote *kn) 1226 { 1227 struct devfs_node *node; 1228 struct vnode *vp; 1229 int error; 1230 cdev_t dev; 1231 1232 get_mplock(); 1233 1234 vp = (struct vnode *)fp->f_data; 1235 if (vp == NULL || vp->v_type == VBAD) { 1236 error = EBADF; 1237 goto done; 1238 } 1239 node = DEVFS_NODE(vp); 1240 1241 if ((dev = vp->v_rdev) == NULL) { 1242 error = EBADF; 1243 goto done; 1244 } 1245 reference_dev(dev); 1246 1247 error = dev_dkqfilter(dev, kn); 1248 1249 release_dev(dev); 1250 1251 done: 1252 rel_mplock(); 1253 return (error); 1254 } 1255 1256 1257 static int 1258 devfs_specf_poll(struct file *fp, int events, struct ucred *cred) 1259 { 1260 struct devfs_node *node; 1261 struct vnode *vp; 1262 int error; 1263 cdev_t dev; 1264 1265 get_mplock(); 1266 1267 vp = (struct vnode *)fp->f_data; 1268 if (vp == NULL || vp->v_type == VBAD) { 1269 error = EBADF; 1270 goto done; 1271 } 1272 node = DEVFS_NODE(vp); 1273 1274 if ((dev = vp->v_rdev) == NULL) { 1275 error = EBADF; 1276 goto done; 1277 } 1278 reference_dev(dev); 1279 error = dev_dpoll(dev, events); 1280 1281 release_dev(dev); 1282 1283 #if 0 1284 if (node) 1285 nanotime(&node->atime); 1286 #endif 1287 done: 1288 rel_mplock(); 1289 return (error); 1290 } 1291 1292 1293 /* 1294 * MPALMOSTSAFE - acquires mplock 1295 */ 1296 static int 1297 devfs_specf_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred) 1298 { 1299 struct devfs_node *node; 1300 struct vnode *vp; 1301 struct vnode *ovp; 1302 cdev_t dev; 1303 int error; 1304 struct fiodname_args *name_args; 1305 size_t namlen; 1306 const char *name; 1307 1308 get_mplock(); 1309 vp = ((struct vnode *)fp->f_data); 1310 if ((dev = vp->v_rdev) == NULL) { 1311 error = EBADF; /* device was revoked */ 1312 goto out; 1313 } 1314 1315 node = DEVFS_NODE(vp); 1316 1317 devfs_debug(DEVFS_DEBUG_DEBUG, 1318 "devfs_specf_ioctl() called! for dev %s\n", 1319 dev->si_name); 1320 1321 if (com == FIODTYPE) { 1322 *(int *)data = dev_dflags(dev) & D_TYPEMASK; 1323 error = 0; 1324 goto out; 1325 } else if (com == FIODNAME) { 1326 name_args = (struct fiodname_args *)data; 1327 name = dev->si_name; 1328 namlen = strlen(name) + 1; 1329 1330 devfs_debug(DEVFS_DEBUG_DEBUG, 1331 "ioctl, got: FIODNAME for %s\n", name); 1332 1333 if (namlen <= name_args->len) 1334 error = copyout(dev->si_name, name_args->name, namlen); 1335 else 1336 error = EINVAL; 1337 1338 devfs_debug(DEVFS_DEBUG_DEBUG, 1339 "ioctl stuff: error: %d\n", error); 1340 goto out; 1341 } 1342 reference_dev(dev); 1343 error = dev_dioctl(dev, com, data, fp->f_flag, ucred); 1344 release_dev(dev); 1345 #if 0 1346 if (node) { 1347 nanotime(&node->atime); 1348 nanotime(&node->mtime); 1349 } 1350 #endif 1351 1352 if (com == TIOCSCTTY) { 1353 devfs_debug(DEVFS_DEBUG_DEBUG, 1354 "devfs_specf_ioctl: got TIOCSCTTY on %s\n", 1355 dev->si_name); 1356 } 1357 if (error == 0 && com == TIOCSCTTY) { 1358 struct proc *p = curthread->td_proc; 1359 struct session *sess; 1360 1361 devfs_debug(DEVFS_DEBUG_DEBUG, 1362 "devfs_specf_ioctl: dealing with TIOCSCTTY on %s\n", 1363 dev->si_name); 1364 if (p == NULL) { 1365 error = ENOTTY; 1366 goto out; 1367 } 1368 sess = p->p_session; 1369 1370 /* 1371 * Do nothing if reassigning same control tty 1372 */ 1373 if (sess->s_ttyvp == vp) { 1374 error = 0; 1375 goto out; 1376 } 1377 1378 /* 1379 * Get rid of reference to old control tty 1380 */ 1381 ovp = sess->s_ttyvp; 1382 vref(vp); 1383 sess->s_ttyvp = vp; 1384 if (ovp) 1385 vrele(ovp); 1386 } 1387 1388 out: 1389 rel_mplock(); 1390 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() finished! \n"); 1391 return (error); 1392 } 1393 1394 1395 static int 1396 devfs_spec_fsync(struct vop_fsync_args *ap) 1397 { 1398 struct vnode *vp = ap->a_vp; 1399 int error; 1400 1401 if (!vn_isdisk(vp, NULL)) 1402 return (0); 1403 1404 /* 1405 * Flush all dirty buffers associated with a block device. 1406 */ 1407 error = vfsync(vp, ap->a_waitfor, 10000, NULL, NULL); 1408 return (error); 1409 } 1410 1411 static int 1412 devfs_spec_read(struct vop_read_args *ap) 1413 { 1414 struct devfs_node *node; 1415 struct vnode *vp; 1416 struct uio *uio; 1417 cdev_t dev; 1418 int error; 1419 1420 vp = ap->a_vp; 1421 dev = vp->v_rdev; 1422 uio = ap->a_uio; 1423 node = DEVFS_NODE(vp); 1424 1425 if (dev == NULL) /* device was revoked */ 1426 return (EBADF); 1427 if (uio->uio_resid == 0) 1428 return (0); 1429 1430 vn_unlock(vp); 1431 error = dev_dread(dev, uio, ap->a_ioflag); 1432 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1433 1434 if (node) 1435 nanotime(&node->atime); 1436 1437 return (error); 1438 } 1439 1440 /* 1441 * Vnode op for write 1442 * 1443 * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag, 1444 * struct ucred *a_cred) 1445 */ 1446 static int 1447 devfs_spec_write(struct vop_write_args *ap) 1448 { 1449 struct devfs_node *node; 1450 struct vnode *vp; 1451 struct uio *uio; 1452 cdev_t dev; 1453 int error; 1454 1455 vp = ap->a_vp; 1456 dev = vp->v_rdev; 1457 uio = ap->a_uio; 1458 node = DEVFS_NODE(vp); 1459 1460 KKASSERT(uio->uio_segflg != UIO_NOCOPY); 1461 1462 if (dev == NULL) /* device was revoked */ 1463 return (EBADF); 1464 1465 vn_unlock(vp); 1466 error = dev_dwrite(dev, uio, ap->a_ioflag); 1467 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1468 1469 if (node) { 1470 nanotime(&node->atime); 1471 nanotime(&node->mtime); 1472 } 1473 1474 return (error); 1475 } 1476 1477 /* 1478 * Device ioctl operation. 1479 * 1480 * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data, 1481 * int a_fflag, struct ucred *a_cred) 1482 */ 1483 static int 1484 devfs_spec_ioctl(struct vop_ioctl_args *ap) 1485 { 1486 struct vnode *vp = ap->a_vp; 1487 struct devfs_node *node; 1488 cdev_t dev; 1489 1490 if ((dev = vp->v_rdev) == NULL) 1491 return (EBADF); /* device was revoked */ 1492 node = DEVFS_NODE(vp); 1493 1494 #if 0 1495 if (node) { 1496 nanotime(&node->atime); 1497 nanotime(&node->mtime); 1498 } 1499 #endif 1500 1501 return (dev_dioctl(dev, ap->a_command, ap->a_data, 1502 ap->a_fflag, ap->a_cred)); 1503 } 1504 1505 /* 1506 * spec_poll(struct vnode *a_vp, int a_events, struct ucred *a_cred) 1507 */ 1508 /* ARGSUSED */ 1509 static int 1510 devfs_spec_poll(struct vop_poll_args *ap) 1511 { 1512 struct vnode *vp = ap->a_vp; 1513 struct devfs_node *node; 1514 cdev_t dev; 1515 1516 if ((dev = vp->v_rdev) == NULL) 1517 return (EBADF); /* device was revoked */ 1518 node = DEVFS_NODE(vp); 1519 1520 #if 0 1521 if (node) 1522 nanotime(&node->atime); 1523 #endif 1524 1525 return (dev_dpoll(dev, ap->a_events)); 1526 } 1527 1528 /* 1529 * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn) 1530 */ 1531 /* ARGSUSED */ 1532 static int 1533 devfs_spec_kqfilter(struct vop_kqfilter_args *ap) 1534 { 1535 struct vnode *vp = ap->a_vp; 1536 struct devfs_node *node; 1537 cdev_t dev; 1538 1539 if ((dev = vp->v_rdev) == NULL) 1540 return (EBADF); /* device was revoked */ 1541 node = DEVFS_NODE(vp); 1542 1543 #if 0 1544 if (node) 1545 nanotime(&node->atime); 1546 #endif 1547 1548 return (dev_dkqfilter(dev, ap->a_kn)); 1549 } 1550 1551 /* 1552 * Convert a vnode strategy call into a device strategy call. Vnode strategy 1553 * calls are not limited to device DMA limits so we have to deal with the 1554 * case. 1555 * 1556 * spec_strategy(struct vnode *a_vp, struct bio *a_bio) 1557 */ 1558 static int 1559 devfs_spec_strategy(struct vop_strategy_args *ap) 1560 { 1561 struct bio *bio = ap->a_bio; 1562 struct buf *bp = bio->bio_buf; 1563 struct buf *nbp; 1564 struct vnode *vp; 1565 struct mount *mp; 1566 int chunksize; 1567 int maxiosize; 1568 1569 if (bp->b_cmd != BUF_CMD_READ && LIST_FIRST(&bp->b_dep) != NULL) 1570 buf_start(bp); 1571 1572 /* 1573 * Collect statistics on synchronous and asynchronous read 1574 * and write counts for disks that have associated filesystems. 1575 */ 1576 vp = ap->a_vp; 1577 KKASSERT(vp->v_rdev != NULL); /* XXX */ 1578 if (vn_isdisk(vp, NULL) && (mp = vp->v_rdev->si_mountpoint) != NULL) { 1579 if (bp->b_cmd == BUF_CMD_READ) { 1580 if (bp->b_flags & BIO_SYNC) 1581 mp->mnt_stat.f_syncreads++; 1582 else 1583 mp->mnt_stat.f_asyncreads++; 1584 } else { 1585 if (bp->b_flags & BIO_SYNC) 1586 mp->mnt_stat.f_syncwrites++; 1587 else 1588 mp->mnt_stat.f_asyncwrites++; 1589 } 1590 } 1591 1592 /* 1593 * Device iosize limitations only apply to read and write. Shortcut 1594 * the I/O if it fits. 1595 */ 1596 if ((maxiosize = vp->v_rdev->si_iosize_max) == 0) { 1597 devfs_debug(DEVFS_DEBUG_DEBUG, 1598 "%s: si_iosize_max not set!\n", 1599 dev_dname(vp->v_rdev)); 1600 maxiosize = MAXPHYS; 1601 } 1602 #if SPEC_CHAIN_DEBUG & 2 1603 maxiosize = 4096; 1604 #endif 1605 if (bp->b_bcount <= maxiosize || 1606 (bp->b_cmd != BUF_CMD_READ && bp->b_cmd != BUF_CMD_WRITE)) { 1607 dev_dstrategy_chain(vp->v_rdev, bio); 1608 return (0); 1609 } 1610 1611 /* 1612 * Clone the buffer and set up an I/O chain to chunk up the I/O. 1613 */ 1614 nbp = kmalloc(sizeof(*bp), M_DEVBUF, M_INTWAIT|M_ZERO); 1615 initbufbio(nbp); 1616 buf_dep_init(nbp); 1617 BUF_LOCKINIT(nbp); 1618 BUF_LOCK(nbp, LK_EXCLUSIVE); 1619 BUF_KERNPROC(nbp); 1620 nbp->b_vp = vp; 1621 nbp->b_flags = B_PAGING | (bp->b_flags & B_BNOCLIP); 1622 nbp->b_data = bp->b_data; 1623 nbp->b_bio1.bio_done = devfs_spec_strategy_done; 1624 nbp->b_bio1.bio_offset = bio->bio_offset; 1625 nbp->b_bio1.bio_caller_info1.ptr = bio; 1626 1627 /* 1628 * Start the first transfer 1629 */ 1630 if (vn_isdisk(vp, NULL)) 1631 chunksize = vp->v_rdev->si_bsize_phys; 1632 else 1633 chunksize = DEV_BSIZE; 1634 chunksize = maxiosize / chunksize * chunksize; 1635 #if SPEC_CHAIN_DEBUG & 1 1636 devfs_debug(DEVFS_DEBUG_DEBUG, 1637 "spec_strategy chained I/O chunksize=%d\n", 1638 chunksize); 1639 #endif 1640 nbp->b_cmd = bp->b_cmd; 1641 nbp->b_bcount = chunksize; 1642 nbp->b_bufsize = chunksize; /* used to detect a short I/O */ 1643 nbp->b_bio1.bio_caller_info2.index = chunksize; 1644 1645 #if SPEC_CHAIN_DEBUG & 1 1646 devfs_debug(DEVFS_DEBUG_DEBUG, 1647 "spec_strategy: chain %p offset %d/%d bcount %d\n", 1648 bp, 0, bp->b_bcount, nbp->b_bcount); 1649 #endif 1650 1651 dev_dstrategy(vp->v_rdev, &nbp->b_bio1); 1652 1653 if (DEVFS_NODE(vp)) { 1654 nanotime(&DEVFS_NODE(vp)->atime); 1655 nanotime(&DEVFS_NODE(vp)->mtime); 1656 } 1657 1658 return (0); 1659 } 1660 1661 /* 1662 * Chunked up transfer completion routine - chain transfers until done 1663 */ 1664 static 1665 void 1666 devfs_spec_strategy_done(struct bio *nbio) 1667 { 1668 struct buf *nbp = nbio->bio_buf; 1669 struct bio *bio = nbio->bio_caller_info1.ptr; /* original bio */ 1670 struct buf *bp = bio->bio_buf; /* original bp */ 1671 int chunksize = nbio->bio_caller_info2.index; /* chunking */ 1672 int boffset = nbp->b_data - bp->b_data; 1673 1674 if (nbp->b_flags & B_ERROR) { 1675 /* 1676 * An error terminates the chain, propogate the error back 1677 * to the original bp 1678 */ 1679 bp->b_flags |= B_ERROR; 1680 bp->b_error = nbp->b_error; 1681 bp->b_resid = bp->b_bcount - boffset + 1682 (nbp->b_bcount - nbp->b_resid); 1683 #if SPEC_CHAIN_DEBUG & 1 1684 devfs_debug(DEVFS_DEBUG_DEBUG, 1685 "spec_strategy: chain %p error %d bcount %d/%d\n", 1686 bp, bp->b_error, bp->b_bcount, 1687 bp->b_bcount - bp->b_resid); 1688 #endif 1689 kfree(nbp, M_DEVBUF); 1690 biodone(bio); 1691 } else if (nbp->b_resid) { 1692 /* 1693 * A short read or write terminates the chain 1694 */ 1695 bp->b_error = nbp->b_error; 1696 bp->b_resid = bp->b_bcount - boffset + 1697 (nbp->b_bcount - nbp->b_resid); 1698 #if SPEC_CHAIN_DEBUG & 1 1699 devfs_debug(DEVFS_DEBUG_DEBUG, 1700 "spec_strategy: chain %p short read(1) " 1701 "bcount %d/%d\n", 1702 bp, bp->b_bcount - bp->b_resid, bp->b_bcount); 1703 #endif 1704 kfree(nbp, M_DEVBUF); 1705 biodone(bio); 1706 } else if (nbp->b_bcount != nbp->b_bufsize) { 1707 /* 1708 * A short read or write can also occur by truncating b_bcount 1709 */ 1710 #if SPEC_CHAIN_DEBUG & 1 1711 devfs_debug(DEVFS_DEBUG_DEBUG, 1712 "spec_strategy: chain %p short read(2) " 1713 "bcount %d/%d\n", 1714 bp, nbp->b_bcount + boffset, bp->b_bcount); 1715 #endif 1716 bp->b_error = 0; 1717 bp->b_bcount = nbp->b_bcount + boffset; 1718 bp->b_resid = nbp->b_resid; 1719 kfree(nbp, M_DEVBUF); 1720 biodone(bio); 1721 } else if (nbp->b_bcount + boffset == bp->b_bcount) { 1722 /* 1723 * No more data terminates the chain 1724 */ 1725 #if SPEC_CHAIN_DEBUG & 1 1726 devfs_debug(DEVFS_DEBUG_DEBUG, 1727 "spec_strategy: chain %p finished bcount %d\n", 1728 bp, bp->b_bcount); 1729 #endif 1730 bp->b_error = 0; 1731 bp->b_resid = 0; 1732 kfree(nbp, M_DEVBUF); 1733 biodone(bio); 1734 } else { 1735 /* 1736 * Continue the chain 1737 */ 1738 boffset += nbp->b_bcount; 1739 nbp->b_data = bp->b_data + boffset; 1740 nbp->b_bcount = bp->b_bcount - boffset; 1741 if (nbp->b_bcount > chunksize) 1742 nbp->b_bcount = chunksize; 1743 nbp->b_bio1.bio_done = devfs_spec_strategy_done; 1744 nbp->b_bio1.bio_offset = bio->bio_offset + boffset; 1745 1746 #if SPEC_CHAIN_DEBUG & 1 1747 devfs_debug(DEVFS_DEBUG_DEBUG, 1748 "spec_strategy: chain %p offset %d/%d bcount %d\n", 1749 bp, boffset, bp->b_bcount, nbp->b_bcount); 1750 #endif 1751 1752 dev_dstrategy(nbp->b_vp->v_rdev, &nbp->b_bio1); 1753 } 1754 } 1755 1756 /* 1757 * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length) 1758 */ 1759 static int 1760 devfs_spec_freeblks(struct vop_freeblks_args *ap) 1761 { 1762 struct buf *bp; 1763 1764 /* 1765 * XXX: This assumes that strategy does the deed right away. 1766 * XXX: this may not be TRTTD. 1767 */ 1768 KKASSERT(ap->a_vp->v_rdev != NULL); 1769 if ((dev_dflags(ap->a_vp->v_rdev) & D_CANFREE) == 0) 1770 return (0); 1771 bp = geteblk(ap->a_length); 1772 bp->b_cmd = BUF_CMD_FREEBLKS; 1773 bp->b_bio1.bio_offset = ap->a_offset; 1774 bp->b_bcount = ap->a_length; 1775 dev_dstrategy(ap->a_vp->v_rdev, &bp->b_bio1); 1776 return (0); 1777 } 1778 1779 /* 1780 * Implement degenerate case where the block requested is the block 1781 * returned, and assume that the entire device is contiguous in regards 1782 * to the contiguous block range (runp and runb). 1783 * 1784 * spec_bmap(struct vnode *a_vp, off_t a_loffset, 1785 * off_t *a_doffsetp, int *a_runp, int *a_runb) 1786 */ 1787 static int 1788 devfs_spec_bmap(struct vop_bmap_args *ap) 1789 { 1790 if (ap->a_doffsetp != NULL) 1791 *ap->a_doffsetp = ap->a_loffset; 1792 if (ap->a_runp != NULL) 1793 *ap->a_runp = MAXBSIZE; 1794 if (ap->a_runb != NULL) { 1795 if (ap->a_loffset < MAXBSIZE) 1796 *ap->a_runb = (int)ap->a_loffset; 1797 else 1798 *ap->a_runb = MAXBSIZE; 1799 } 1800 return (0); 1801 } 1802 1803 1804 /* 1805 * Special device advisory byte-level locks. 1806 * 1807 * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op, 1808 * struct flock *a_fl, int a_flags) 1809 */ 1810 /* ARGSUSED */ 1811 static int 1812 devfs_spec_advlock(struct vop_advlock_args *ap) 1813 { 1814 return ((ap->a_flags & F_POSIX) ? EINVAL : EOPNOTSUPP); 1815 } 1816 1817 static void 1818 devfs_spec_getpages_iodone(struct bio *bio) 1819 { 1820 bio->bio_buf->b_cmd = BUF_CMD_DONE; 1821 wakeup(bio->bio_buf); 1822 } 1823 1824 /* 1825 * spec_getpages() - get pages associated with device vnode. 1826 * 1827 * Note that spec_read and spec_write do not use the buffer cache, so we 1828 * must fully implement getpages here. 1829 */ 1830 static int 1831 devfs_spec_getpages(struct vop_getpages_args *ap) 1832 { 1833 vm_offset_t kva; 1834 int error; 1835 int i, pcount, size; 1836 struct buf *bp; 1837 vm_page_t m; 1838 vm_ooffset_t offset; 1839 int toff, nextoff, nread; 1840 struct vnode *vp = ap->a_vp; 1841 int blksiz; 1842 int gotreqpage; 1843 1844 error = 0; 1845 pcount = round_page(ap->a_count) / PAGE_SIZE; 1846 1847 /* 1848 * Calculate the offset of the transfer and do sanity check. 1849 */ 1850 offset = IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset; 1851 1852 /* 1853 * Round up physical size for real devices. We cannot round using 1854 * v_mount's block size data because v_mount has nothing to do with 1855 * the device. i.e. it's usually '/dev'. We need the physical block 1856 * size for the device itself. 1857 * 1858 * We can't use v_rdev->si_mountpoint because it only exists when the 1859 * block device is mounted. However, we can use v_rdev. 1860 */ 1861 if (vn_isdisk(vp, NULL)) 1862 blksiz = vp->v_rdev->si_bsize_phys; 1863 else 1864 blksiz = DEV_BSIZE; 1865 1866 size = (ap->a_count + blksiz - 1) & ~(blksiz - 1); 1867 1868 bp = getpbuf(NULL); 1869 kva = (vm_offset_t)bp->b_data; 1870 1871 /* 1872 * Map the pages to be read into the kva. 1873 */ 1874 pmap_qenter(kva, ap->a_m, pcount); 1875 1876 /* Build a minimal buffer header. */ 1877 bp->b_cmd = BUF_CMD_READ; 1878 bp->b_bcount = size; 1879 bp->b_resid = 0; 1880 bp->b_runningbufspace = size; 1881 if (size) { 1882 runningbufspace += bp->b_runningbufspace; 1883 ++runningbufcount; 1884 } 1885 1886 bp->b_bio1.bio_offset = offset; 1887 bp->b_bio1.bio_done = devfs_spec_getpages_iodone; 1888 1889 mycpu->gd_cnt.v_vnodein++; 1890 mycpu->gd_cnt.v_vnodepgsin += pcount; 1891 1892 /* Do the input. */ 1893 vn_strategy(ap->a_vp, &bp->b_bio1); 1894 1895 crit_enter(); 1896 1897 /* We definitely need to be at splbio here. */ 1898 while (bp->b_cmd != BUF_CMD_DONE) 1899 tsleep(bp, 0, "spread", 0); 1900 1901 crit_exit(); 1902 1903 if (bp->b_flags & B_ERROR) { 1904 if (bp->b_error) 1905 error = bp->b_error; 1906 else 1907 error = EIO; 1908 } 1909 1910 /* 1911 * If EOF is encountered we must zero-extend the result in order 1912 * to ensure that the page does not contain garabge. When no 1913 * error occurs, an early EOF is indicated if b_bcount got truncated. 1914 * b_resid is relative to b_bcount and should be 0, but some devices 1915 * might indicate an EOF with b_resid instead of truncating b_bcount. 1916 */ 1917 nread = bp->b_bcount - bp->b_resid; 1918 if (nread < ap->a_count) 1919 bzero((caddr_t)kva + nread, ap->a_count - nread); 1920 pmap_qremove(kva, pcount); 1921 1922 gotreqpage = 0; 1923 for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) { 1924 nextoff = toff + PAGE_SIZE; 1925 m = ap->a_m[i]; 1926 1927 m->flags &= ~PG_ZERO; 1928 1929 if (nextoff <= nread) { 1930 m->valid = VM_PAGE_BITS_ALL; 1931 vm_page_undirty(m); 1932 } else if (toff < nread) { 1933 /* 1934 * Since this is a VM request, we have to supply the 1935 * unaligned offset to allow vm_page_set_validclean() 1936 * to zero sub-DEV_BSIZE'd portions of the page. 1937 */ 1938 vm_page_set_validclean(m, 0, nread - toff); 1939 } else { 1940 m->valid = 0; 1941 vm_page_undirty(m); 1942 } 1943 1944 if (i != ap->a_reqpage) { 1945 /* 1946 * Just in case someone was asking for this page we 1947 * now tell them that it is ok to use. 1948 */ 1949 if (!error || (m->valid == VM_PAGE_BITS_ALL)) { 1950 if (m->valid) { 1951 if (m->flags & PG_WANTED) { 1952 vm_page_activate(m); 1953 } else { 1954 vm_page_deactivate(m); 1955 } 1956 vm_page_wakeup(m); 1957 } else { 1958 vm_page_free(m); 1959 } 1960 } else { 1961 vm_page_free(m); 1962 } 1963 } else if (m->valid) { 1964 gotreqpage = 1; 1965 /* 1966 * Since this is a VM request, we need to make the 1967 * entire page presentable by zeroing invalid sections. 1968 */ 1969 if (m->valid != VM_PAGE_BITS_ALL) 1970 vm_page_zero_invalid(m, FALSE); 1971 } 1972 } 1973 if (!gotreqpage) { 1974 m = ap->a_m[ap->a_reqpage]; 1975 devfs_debug(DEVFS_DEBUG_WARNING, 1976 "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n", 1977 devtoname(vp->v_rdev), error, bp, bp->b_vp); 1978 devfs_debug(DEVFS_DEBUG_WARNING, 1979 " size: %d, resid: %d, a_count: %d, valid: 0x%x\n", 1980 size, bp->b_resid, ap->a_count, m->valid); 1981 devfs_debug(DEVFS_DEBUG_WARNING, 1982 " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n", 1983 nread, ap->a_reqpage, (u_long)m->pindex, pcount); 1984 /* 1985 * Free the buffer header back to the swap buffer pool. 1986 */ 1987 relpbuf(bp, NULL); 1988 return VM_PAGER_ERROR; 1989 } 1990 /* 1991 * Free the buffer header back to the swap buffer pool. 1992 */ 1993 relpbuf(bp, NULL); 1994 if (DEVFS_NODE(ap->a_vp)) 1995 nanotime(&DEVFS_NODE(ap->a_vp)->mtime); 1996 return VM_PAGER_OK; 1997 } 1998 1999 static __inline 2000 int 2001 sequential_heuristic(struct uio *uio, struct file *fp) 2002 { 2003 /* 2004 * Sequential heuristic - detect sequential operation 2005 */ 2006 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 2007 uio->uio_offset == fp->f_nextoff) { 2008 /* 2009 * XXX we assume that the filesystem block size is 2010 * the default. Not true, but still gives us a pretty 2011 * good indicator of how sequential the read operations 2012 * are. 2013 */ 2014 int tmpseq = fp->f_seqcount; 2015 2016 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 2017 if (tmpseq > IO_SEQMAX) 2018 tmpseq = IO_SEQMAX; 2019 fp->f_seqcount = tmpseq; 2020 return(fp->f_seqcount << IO_SEQSHIFT); 2021 } 2022 2023 /* 2024 * Not sequential, quick draw-down of seqcount 2025 */ 2026 if (fp->f_seqcount > 1) 2027 fp->f_seqcount = 1; 2028 else 2029 fp->f_seqcount = 0; 2030 return(0); 2031 } 2032