1 /* 2 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Alex Hornung <ahornung@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/time.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/fcntl.h> 40 #include <sys/proc.h> 41 #include <sys/priv.h> 42 #include <sys/signalvar.h> 43 #include <sys/vnode.h> 44 #include <sys/uio.h> 45 #include <sys/mount.h> 46 #include <sys/file.h> 47 #include <sys/fcntl.h> 48 #include <sys/namei.h> 49 #include <sys/dirent.h> 50 #include <sys/malloc.h> 51 #include <sys/stat.h> 52 #include <sys/reg.h> 53 #include <vm/vm_pager.h> 54 #include <vm/vm_zone.h> 55 #include <vm/vm_object.h> 56 #include <sys/filio.h> 57 #include <sys/ttycom.h> 58 #include <sys/tty.h> 59 #include <sys/diskslice.h> 60 #include <sys/sysctl.h> 61 #include <sys/devfs.h> 62 #include <sys/pioctl.h> 63 #include <vfs/fifofs/fifo.h> 64 65 #include <machine/limits.h> 66 67 #include <sys/buf2.h> 68 #include <sys/sysref2.h> 69 #include <sys/mplock2.h> 70 #include <vm/vm_page2.h> 71 72 MALLOC_DECLARE(M_DEVFS); 73 #define DEVFS_BADOP (void *)devfs_badop 74 75 static int devfs_badop(struct vop_generic_args *); 76 static int devfs_access(struct vop_access_args *); 77 static int devfs_inactive(struct vop_inactive_args *); 78 static int devfs_reclaim(struct vop_reclaim_args *); 79 static int devfs_readdir(struct vop_readdir_args *); 80 static int devfs_getattr(struct vop_getattr_args *); 81 static int devfs_setattr(struct vop_setattr_args *); 82 static int devfs_readlink(struct vop_readlink_args *); 83 static int devfs_print(struct vop_print_args *); 84 85 static int devfs_nresolve(struct vop_nresolve_args *); 86 static int devfs_nlookupdotdot(struct vop_nlookupdotdot_args *); 87 static int devfs_nmkdir(struct vop_nmkdir_args *); 88 static int devfs_nsymlink(struct vop_nsymlink_args *); 89 static int devfs_nrmdir(struct vop_nrmdir_args *); 90 static int devfs_nremove(struct vop_nremove_args *); 91 92 static int devfs_spec_open(struct vop_open_args *); 93 static int devfs_spec_close(struct vop_close_args *); 94 static int devfs_spec_fsync(struct vop_fsync_args *); 95 96 static int devfs_spec_read(struct vop_read_args *); 97 static int devfs_spec_write(struct vop_write_args *); 98 static int devfs_spec_ioctl(struct vop_ioctl_args *); 99 static int devfs_spec_kqfilter(struct vop_kqfilter_args *); 100 static int devfs_spec_strategy(struct vop_strategy_args *); 101 static void devfs_spec_strategy_done(struct bio *); 102 static int devfs_spec_freeblks(struct vop_freeblks_args *); 103 static int devfs_spec_bmap(struct vop_bmap_args *); 104 static int devfs_spec_advlock(struct vop_advlock_args *); 105 static void devfs_spec_getpages_iodone(struct bio *); 106 static int devfs_spec_getpages(struct vop_getpages_args *); 107 108 109 static int devfs_specf_close(struct file *); 110 static int devfs_specf_read(struct file *, struct uio *, struct ucred *, int); 111 static int devfs_specf_write(struct file *, struct uio *, struct ucred *, int); 112 static int devfs_specf_stat(struct file *, struct stat *, struct ucred *); 113 static int devfs_specf_kqfilter(struct file *, struct knote *); 114 static int devfs_specf_ioctl(struct file *, u_long, caddr_t, 115 struct ucred *, struct sysmsg *); 116 static __inline int sequential_heuristic(struct uio *, struct file *); 117 118 extern struct lock devfs_lock; 119 120 static int mpsafe_reads, mpsafe_writes, mplock_reads, mplock_writes; 121 122 /* 123 * devfs vnode operations for regular files 124 */ 125 struct vop_ops devfs_vnode_norm_vops = { 126 .vop_default = vop_defaultop, 127 .vop_access = devfs_access, 128 .vop_advlock = DEVFS_BADOP, 129 .vop_bmap = DEVFS_BADOP, 130 .vop_close = vop_stdclose, 131 .vop_getattr = devfs_getattr, 132 .vop_inactive = devfs_inactive, 133 .vop_ncreate = DEVFS_BADOP, 134 .vop_nresolve = devfs_nresolve, 135 .vop_nlookupdotdot = devfs_nlookupdotdot, 136 .vop_nlink = DEVFS_BADOP, 137 .vop_nmkdir = devfs_nmkdir, 138 .vop_nmknod = DEVFS_BADOP, 139 .vop_nremove = devfs_nremove, 140 .vop_nrename = DEVFS_BADOP, 141 .vop_nrmdir = devfs_nrmdir, 142 .vop_nsymlink = devfs_nsymlink, 143 .vop_open = vop_stdopen, 144 .vop_pathconf = vop_stdpathconf, 145 .vop_print = devfs_print, 146 .vop_read = DEVFS_BADOP, 147 .vop_readdir = devfs_readdir, 148 .vop_readlink = devfs_readlink, 149 .vop_reclaim = devfs_reclaim, 150 .vop_setattr = devfs_setattr, 151 .vop_write = DEVFS_BADOP, 152 .vop_ioctl = DEVFS_BADOP 153 }; 154 155 /* 156 * devfs vnode operations for character devices 157 */ 158 struct vop_ops devfs_vnode_dev_vops = { 159 .vop_default = vop_defaultop, 160 .vop_access = devfs_access, 161 .vop_advlock = devfs_spec_advlock, 162 .vop_bmap = devfs_spec_bmap, 163 .vop_close = devfs_spec_close, 164 .vop_freeblks = devfs_spec_freeblks, 165 .vop_fsync = devfs_spec_fsync, 166 .vop_getattr = devfs_getattr, 167 .vop_getpages = devfs_spec_getpages, 168 .vop_inactive = devfs_inactive, 169 .vop_open = devfs_spec_open, 170 .vop_pathconf = vop_stdpathconf, 171 .vop_print = devfs_print, 172 .vop_kqfilter = devfs_spec_kqfilter, 173 .vop_read = devfs_spec_read, 174 .vop_readdir = DEVFS_BADOP, 175 .vop_readlink = DEVFS_BADOP, 176 .vop_reclaim = devfs_reclaim, 177 .vop_setattr = devfs_setattr, 178 .vop_strategy = devfs_spec_strategy, 179 .vop_write = devfs_spec_write, 180 .vop_ioctl = devfs_spec_ioctl 181 }; 182 183 struct vop_ops *devfs_vnode_dev_vops_p = &devfs_vnode_dev_vops; 184 185 struct fileops devfs_dev_fileops = { 186 .fo_read = devfs_specf_read, 187 .fo_write = devfs_specf_write, 188 .fo_ioctl = devfs_specf_ioctl, 189 .fo_kqfilter = devfs_specf_kqfilter, 190 .fo_stat = devfs_specf_stat, 191 .fo_close = devfs_specf_close, 192 .fo_shutdown = nofo_shutdown 193 }; 194 195 /* 196 * These two functions are possibly temporary hacks for 197 * devices (aka the pty code) which want to control the 198 * node attributes themselves. 199 * 200 * XXX we may ultimately desire to simply remove the uid/gid/mode 201 * from the node entirely. 202 */ 203 static __inline void 204 node_sync_dev_get(struct devfs_node *node) 205 { 206 cdev_t dev; 207 208 if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) { 209 node->uid = dev->si_uid; 210 node->gid = dev->si_gid; 211 node->mode = dev->si_perms; 212 } 213 } 214 215 static __inline void 216 node_sync_dev_set(struct devfs_node *node) 217 { 218 cdev_t dev; 219 220 if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) { 221 dev->si_uid = node->uid; 222 dev->si_gid = node->gid; 223 dev->si_perms = node->mode; 224 } 225 } 226 227 /* 228 * generic entry point for unsupported operations 229 */ 230 static int 231 devfs_badop(struct vop_generic_args *ap) 232 { 233 return (EIO); 234 } 235 236 237 static int 238 devfs_access(struct vop_access_args *ap) 239 { 240 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 241 int error; 242 243 if (!devfs_node_is_accessible(node)) 244 return ENOENT; 245 node_sync_dev_get(node); 246 error = vop_helper_access(ap, node->uid, node->gid, 247 node->mode, node->flags); 248 249 return error; 250 } 251 252 253 static int 254 devfs_inactive(struct vop_inactive_args *ap) 255 { 256 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 257 258 if (node == NULL || (node->flags & DEVFS_NODE_LINKED) == 0) 259 vrecycle(ap->a_vp); 260 return 0; 261 } 262 263 264 static int 265 devfs_reclaim(struct vop_reclaim_args *ap) 266 { 267 struct devfs_node *node; 268 struct vnode *vp; 269 int locked; 270 271 /* 272 * Check if it is locked already. if not, we acquire the devfs lock 273 */ 274 if (!(lockstatus(&devfs_lock, curthread)) == LK_EXCLUSIVE) { 275 lockmgr(&devfs_lock, LK_EXCLUSIVE); 276 locked = 1; 277 } else { 278 locked = 0; 279 } 280 281 /* 282 * Get rid of the devfs_node if it is no longer linked into the 283 * topology. 284 */ 285 vp = ap->a_vp; 286 if ((node = DEVFS_NODE(vp)) != NULL) { 287 node->v_node = NULL; 288 if ((node->flags & DEVFS_NODE_LINKED) == 0) 289 devfs_freep(node); 290 } 291 292 if (locked) 293 lockmgr(&devfs_lock, LK_RELEASE); 294 295 /* 296 * v_rdev needs to be properly released using v_release_rdev 297 * Make sure v_data is NULL as well. 298 */ 299 vp->v_data = NULL; 300 v_release_rdev(vp); 301 return 0; 302 } 303 304 305 static int 306 devfs_readdir(struct vop_readdir_args *ap) 307 { 308 struct devfs_node *dnode = DEVFS_NODE(ap->a_vp); 309 struct devfs_node *node; 310 int cookie_index; 311 int ncookies; 312 int error2; 313 int error; 314 int r; 315 off_t *cookies; 316 off_t saveoff; 317 318 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readdir() called!\n"); 319 320 if (ap->a_uio->uio_offset < 0 || ap->a_uio->uio_offset > INT_MAX) 321 return (EINVAL); 322 if ((error = vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY)) != 0) 323 return (error); 324 325 if (!devfs_node_is_accessible(dnode)) { 326 vn_unlock(ap->a_vp); 327 return ENOENT; 328 } 329 330 lockmgr(&devfs_lock, LK_EXCLUSIVE); 331 332 saveoff = ap->a_uio->uio_offset; 333 334 if (ap->a_ncookies) { 335 ncookies = ap->a_uio->uio_resid / 16 + 1; /* Why / 16 ?? */ 336 if (ncookies > 256) 337 ncookies = 256; 338 cookies = kmalloc(256 * sizeof(off_t), M_TEMP, M_WAITOK); 339 cookie_index = 0; 340 } else { 341 ncookies = -1; 342 cookies = NULL; 343 cookie_index = 0; 344 } 345 346 nanotime(&dnode->atime); 347 348 if (saveoff == 0) { 349 r = vop_write_dirent(&error, ap->a_uio, dnode->d_dir.d_ino, 350 DT_DIR, 1, "."); 351 if (r) 352 goto done; 353 if (cookies) 354 cookies[cookie_index] = saveoff; 355 saveoff++; 356 cookie_index++; 357 if (cookie_index == ncookies) 358 goto done; 359 } 360 361 if (saveoff == 1) { 362 if (dnode->parent) { 363 r = vop_write_dirent(&error, ap->a_uio, 364 dnode->parent->d_dir.d_ino, 365 DT_DIR, 2, ".."); 366 } else { 367 r = vop_write_dirent(&error, ap->a_uio, 368 dnode->d_dir.d_ino, 369 DT_DIR, 2, ".."); 370 } 371 if (r) 372 goto done; 373 if (cookies) 374 cookies[cookie_index] = saveoff; 375 saveoff++; 376 cookie_index++; 377 if (cookie_index == ncookies) 378 goto done; 379 } 380 381 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 382 if ((node->flags & DEVFS_HIDDEN) || 383 (node->flags & DEVFS_INVISIBLE)) { 384 continue; 385 } 386 387 /* 388 * If the node type is a valid devfs alias, then we make sure that the 389 * target isn't hidden. If it is, we don't show the link in the 390 * directory listing. 391 */ 392 if ((node->node_type == Plink) && (node->link_target != NULL) && 393 (node->link_target->flags & DEVFS_HIDDEN)) 394 continue; 395 396 if (node->cookie < saveoff) 397 continue; 398 399 saveoff = node->cookie; 400 401 error2 = vop_write_dirent(&error, ap->a_uio, node->d_dir.d_ino, 402 node->d_dir.d_type, 403 node->d_dir.d_namlen, 404 node->d_dir.d_name); 405 406 if (error2) 407 break; 408 409 saveoff++; 410 411 if (cookies) 412 cookies[cookie_index] = node->cookie; 413 ++cookie_index; 414 if (cookie_index == ncookies) 415 break; 416 } 417 418 done: 419 lockmgr(&devfs_lock, LK_RELEASE); 420 vn_unlock(ap->a_vp); 421 422 ap->a_uio->uio_offset = saveoff; 423 if (error && cookie_index == 0) { 424 if (cookies) { 425 kfree(cookies, M_TEMP); 426 *ap->a_ncookies = 0; 427 *ap->a_cookies = NULL; 428 } 429 } else { 430 if (cookies) { 431 *ap->a_ncookies = cookie_index; 432 *ap->a_cookies = cookies; 433 } 434 } 435 return (error); 436 } 437 438 439 static int 440 devfs_nresolve(struct vop_nresolve_args *ap) 441 { 442 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 443 struct devfs_node *node, *found = NULL; 444 struct namecache *ncp; 445 struct vnode *vp = NULL; 446 int error = 0; 447 int len; 448 int depth; 449 450 ncp = ap->a_nch->ncp; 451 len = ncp->nc_nlen; 452 453 if (!devfs_node_is_accessible(dnode)) 454 return ENOENT; 455 456 lockmgr(&devfs_lock, LK_EXCLUSIVE); 457 458 if ((dnode->node_type != Proot) && (dnode->node_type != Pdir)) { 459 error = ENOENT; 460 cache_setvp(ap->a_nch, NULL); 461 goto out; 462 } 463 464 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 465 if (len == node->d_dir.d_namlen) { 466 if (!memcmp(ncp->nc_name, node->d_dir.d_name, len)) { 467 found = node; 468 break; 469 } 470 } 471 } 472 473 if (found) { 474 depth = 0; 475 while ((found->node_type == Plink) && (found->link_target)) { 476 if (depth >= 8) { 477 devfs_debug(DEVFS_DEBUG_SHOW, "Recursive link or depth >= 8"); 478 break; 479 } 480 481 found = found->link_target; 482 ++depth; 483 } 484 485 if (!(found->flags & DEVFS_HIDDEN)) 486 devfs_allocv(/*ap->a_dvp->v_mount, */ &vp, found); 487 } 488 489 if (vp == NULL) { 490 error = ENOENT; 491 cache_setvp(ap->a_nch, NULL); 492 goto out; 493 494 } 495 KKASSERT(vp); 496 vn_unlock(vp); 497 cache_setvp(ap->a_nch, vp); 498 vrele(vp); 499 out: 500 lockmgr(&devfs_lock, LK_RELEASE); 501 502 return error; 503 } 504 505 506 static int 507 devfs_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 508 { 509 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 510 511 *ap->a_vpp = NULL; 512 if (!devfs_node_is_accessible(dnode)) 513 return ENOENT; 514 515 lockmgr(&devfs_lock, LK_EXCLUSIVE); 516 if (dnode->parent != NULL) { 517 devfs_allocv(ap->a_vpp, dnode->parent); 518 vn_unlock(*ap->a_vpp); 519 } 520 lockmgr(&devfs_lock, LK_RELEASE); 521 522 return ((*ap->a_vpp == NULL) ? ENOENT : 0); 523 } 524 525 526 static int 527 devfs_getattr(struct vop_getattr_args *ap) 528 { 529 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 530 struct vattr *vap = ap->a_vap; 531 struct partinfo pinfo; 532 int error = 0; 533 534 #if 0 535 if (!devfs_node_is_accessible(node)) 536 return ENOENT; 537 #endif 538 node_sync_dev_get(node); 539 540 lockmgr(&devfs_lock, LK_EXCLUSIVE); 541 542 /* start by zeroing out the attributes */ 543 VATTR_NULL(vap); 544 545 /* next do all the common fields */ 546 vap->va_type = ap->a_vp->v_type; 547 vap->va_mode = node->mode; 548 vap->va_fileid = DEVFS_NODE(ap->a_vp)->d_dir.d_ino ; 549 vap->va_flags = 0; 550 vap->va_blocksize = DEV_BSIZE; 551 vap->va_bytes = vap->va_size = 0; 552 553 vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 554 555 vap->va_atime = node->atime; 556 vap->va_mtime = node->mtime; 557 vap->va_ctime = node->ctime; 558 559 vap->va_nlink = 1; /* number of references to file */ 560 561 vap->va_uid = node->uid; 562 vap->va_gid = node->gid; 563 564 vap->va_rmajor = 0; 565 vap->va_rminor = 0; 566 567 if ((node->node_type == Pdev) && node->d_dev) { 568 reference_dev(node->d_dev); 569 vap->va_rminor = node->d_dev->si_uminor; 570 release_dev(node->d_dev); 571 } 572 573 /* For a softlink the va_size is the length of the softlink */ 574 if (node->symlink_name != 0) { 575 vap->va_bytes = vap->va_size = node->symlink_namelen; 576 } 577 578 /* 579 * For a disk-type device, va_size is the size of the underlying 580 * device, so that lseek() works properly. 581 */ 582 if ((node->d_dev) && (dev_dflags(node->d_dev) & D_DISK)) { 583 bzero(&pinfo, sizeof(pinfo)); 584 error = dev_dioctl(node->d_dev, DIOCGPART, (void *)&pinfo, 585 0, proc0.p_ucred, NULL); 586 if ((error == 0) && (pinfo.media_blksize != 0)) { 587 vap->va_size = pinfo.media_size; 588 } else { 589 vap->va_size = 0; 590 error = 0; 591 } 592 } 593 594 lockmgr(&devfs_lock, LK_RELEASE); 595 596 return (error); 597 } 598 599 600 static int 601 devfs_setattr(struct vop_setattr_args *ap) 602 { 603 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 604 struct vattr *vap; 605 int error = 0; 606 607 if (!devfs_node_is_accessible(node)) 608 return ENOENT; 609 node_sync_dev_get(node); 610 611 lockmgr(&devfs_lock, LK_EXCLUSIVE); 612 613 vap = ap->a_vap; 614 615 if (vap->va_uid != (uid_t)VNOVAL) { 616 if ((ap->a_cred->cr_uid != node->uid) && 617 (!groupmember(node->gid, ap->a_cred))) { 618 error = priv_check(curthread, PRIV_VFS_CHOWN); 619 if (error) 620 goto out; 621 } 622 node->uid = vap->va_uid; 623 } 624 625 if (vap->va_gid != (uid_t)VNOVAL) { 626 if ((ap->a_cred->cr_uid != node->uid) && 627 (!groupmember(node->gid, ap->a_cred))) { 628 error = priv_check(curthread, PRIV_VFS_CHOWN); 629 if (error) 630 goto out; 631 } 632 node->gid = vap->va_gid; 633 } 634 635 if (vap->va_mode != (mode_t)VNOVAL) { 636 if (ap->a_cred->cr_uid != node->uid) { 637 error = priv_check(curthread, PRIV_VFS_ADMIN); 638 if (error) 639 goto out; 640 } 641 node->mode = vap->va_mode; 642 } 643 644 out: 645 node_sync_dev_set(node); 646 nanotime(&node->ctime); 647 lockmgr(&devfs_lock, LK_RELEASE); 648 649 return error; 650 } 651 652 653 static int 654 devfs_readlink(struct vop_readlink_args *ap) 655 { 656 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 657 int ret; 658 659 if (!devfs_node_is_accessible(node)) 660 return ENOENT; 661 662 lockmgr(&devfs_lock, LK_EXCLUSIVE); 663 ret = uiomove(node->symlink_name, node->symlink_namelen, ap->a_uio); 664 lockmgr(&devfs_lock, LK_RELEASE); 665 666 return ret; 667 } 668 669 670 static int 671 devfs_print(struct vop_print_args *ap) 672 { 673 return (0); 674 } 675 676 static int 677 devfs_nmkdir(struct vop_nmkdir_args *ap) 678 { 679 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 680 struct devfs_node *node; 681 682 if (!devfs_node_is_accessible(dnode)) 683 return ENOENT; 684 685 if ((dnode->node_type != Proot) && (dnode->node_type != Pdir)) 686 goto out; 687 688 lockmgr(&devfs_lock, LK_EXCLUSIVE); 689 devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Pdir, 690 ap->a_nch->ncp->nc_name, dnode, NULL); 691 692 if (*ap->a_vpp) { 693 node = DEVFS_NODE(*ap->a_vpp); 694 node->flags |= DEVFS_USER_CREATED; 695 cache_setunresolved(ap->a_nch); 696 cache_setvp(ap->a_nch, *ap->a_vpp); 697 } 698 lockmgr(&devfs_lock, LK_RELEASE); 699 out: 700 return ((*ap->a_vpp == NULL) ? ENOTDIR : 0); 701 } 702 703 static int 704 devfs_nsymlink(struct vop_nsymlink_args *ap) 705 { 706 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 707 struct devfs_node *node; 708 size_t targetlen; 709 710 if (!devfs_node_is_accessible(dnode)) 711 return ENOENT; 712 713 ap->a_vap->va_type = VLNK; 714 715 if ((dnode->node_type != Proot) && (dnode->node_type != Pdir)) 716 goto out; 717 718 lockmgr(&devfs_lock, LK_EXCLUSIVE); 719 devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Plink, 720 ap->a_nch->ncp->nc_name, dnode, NULL); 721 722 targetlen = strlen(ap->a_target); 723 if (*ap->a_vpp) { 724 node = DEVFS_NODE(*ap->a_vpp); 725 node->flags |= DEVFS_USER_CREATED; 726 node->symlink_namelen = targetlen; 727 node->symlink_name = kmalloc(targetlen + 1, M_DEVFS, M_WAITOK); 728 memcpy(node->symlink_name, ap->a_target, targetlen); 729 node->symlink_name[targetlen] = '\0'; 730 cache_setunresolved(ap->a_nch); 731 cache_setvp(ap->a_nch, *ap->a_vpp); 732 } 733 lockmgr(&devfs_lock, LK_RELEASE); 734 out: 735 return ((*ap->a_vpp == NULL) ? ENOTDIR : 0); 736 } 737 738 static int 739 devfs_nrmdir(struct vop_nrmdir_args *ap) 740 { 741 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 742 struct devfs_node *node; 743 struct namecache *ncp; 744 int error = ENOENT; 745 746 ncp = ap->a_nch->ncp; 747 748 if (!devfs_node_is_accessible(dnode)) 749 return ENOENT; 750 751 lockmgr(&devfs_lock, LK_EXCLUSIVE); 752 753 if ((dnode->node_type != Proot) && (dnode->node_type != Pdir)) 754 goto out; 755 756 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 757 if (ncp->nc_nlen != node->d_dir.d_namlen) 758 continue; 759 if (memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen)) 760 continue; 761 762 /* 763 * only allow removal of user created dirs 764 */ 765 if ((node->flags & DEVFS_USER_CREATED) == 0) { 766 error = EPERM; 767 goto out; 768 } else if (node->node_type != Pdir) { 769 error = ENOTDIR; 770 goto out; 771 } else if (node->nchildren > 2) { 772 error = ENOTEMPTY; 773 goto out; 774 } else { 775 if (node->v_node) 776 cache_inval_vp(node->v_node, CINV_DESTROY); 777 devfs_unlinkp(node); 778 error = 0; 779 break; 780 } 781 } 782 783 cache_setunresolved(ap->a_nch); 784 cache_setvp(ap->a_nch, NULL); 785 786 out: 787 lockmgr(&devfs_lock, LK_RELEASE); 788 return error; 789 } 790 791 static int 792 devfs_nremove(struct vop_nremove_args *ap) 793 { 794 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 795 struct devfs_node *node; 796 struct namecache *ncp; 797 int error = ENOENT; 798 799 ncp = ap->a_nch->ncp; 800 801 if (!devfs_node_is_accessible(dnode)) 802 return ENOENT; 803 804 lockmgr(&devfs_lock, LK_EXCLUSIVE); 805 806 if ((dnode->node_type != Proot) && (dnode->node_type != Pdir)) 807 goto out; 808 809 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 810 if (ncp->nc_nlen != node->d_dir.d_namlen) 811 continue; 812 if (memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen)) 813 continue; 814 815 /* 816 * only allow removal of user created stuff (e.g. symlinks) 817 */ 818 if ((node->flags & DEVFS_USER_CREATED) == 0) { 819 error = EPERM; 820 goto out; 821 } else if (node->node_type == Pdir) { 822 error = EISDIR; 823 goto out; 824 } else { 825 if (node->v_node) 826 cache_inval_vp(node->v_node, CINV_DESTROY); 827 devfs_unlinkp(node); 828 error = 0; 829 break; 830 } 831 } 832 833 cache_setunresolved(ap->a_nch); 834 cache_setvp(ap->a_nch, NULL); 835 836 out: 837 lockmgr(&devfs_lock, LK_RELEASE); 838 return error; 839 } 840 841 842 static int 843 devfs_spec_open(struct vop_open_args *ap) 844 { 845 struct vnode *vp = ap->a_vp; 846 struct vnode *orig_vp = NULL; 847 struct devfs_node *node = DEVFS_NODE(vp); 848 struct devfs_node *newnode; 849 cdev_t dev, ndev = NULL; 850 int error = 0; 851 852 if (node) { 853 if (node->d_dev == NULL) 854 return ENXIO; 855 if (!devfs_node_is_accessible(node)) 856 return ENOENT; 857 } 858 859 if ((dev = vp->v_rdev) == NULL) 860 return ENXIO; 861 862 if (node && ap->a_fp) { 863 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.1-\n"); 864 lockmgr(&devfs_lock, LK_EXCLUSIVE); 865 866 ndev = devfs_clone(dev, node->d_dir.d_name, node->d_dir.d_namlen, 867 ap->a_mode, ap->a_cred); 868 if (ndev != NULL) { 869 newnode = devfs_create_device_node( 870 DEVFS_MNTDATA(vp->v_mount)->root_node, 871 ndev, NULL, NULL); 872 /* XXX: possibly destroy device if this happens */ 873 874 if (newnode != NULL) { 875 dev = ndev; 876 devfs_link_dev(dev); 877 878 devfs_debug(DEVFS_DEBUG_DEBUG, 879 "parent here is: %s, node is: |%s|\n", 880 ((node->parent->node_type == Proot) ? 881 "ROOT!" : node->parent->d_dir.d_name), 882 newnode->d_dir.d_name); 883 devfs_debug(DEVFS_DEBUG_DEBUG, 884 "test: %s\n", 885 ((struct devfs_node *)(TAILQ_LAST(DEVFS_DENODE_HEAD(node->parent), devfs_node_head)))->d_dir.d_name); 886 887 /* 888 * orig_vp is set to the original vp if we cloned. 889 */ 890 /* node->flags |= DEVFS_CLONED; */ 891 devfs_allocv(&vp, newnode); 892 orig_vp = ap->a_vp; 893 ap->a_vp = vp; 894 } 895 } 896 lockmgr(&devfs_lock, LK_RELEASE); 897 } 898 899 devfs_debug(DEVFS_DEBUG_DEBUG, 900 "devfs_spec_open() called on %s! \n", 901 dev->si_name); 902 903 /* 904 * Make this field valid before any I/O in ->d_open 905 */ 906 if (!dev->si_iosize_max) 907 dev->si_iosize_max = DFLTPHYS; 908 909 if (dev_dflags(dev) & D_TTY) 910 vsetflags(vp, VISTTY); 911 912 vn_unlock(vp); 913 error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred); 914 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 915 916 /* 917 * Clean up any cloned vp if we error out. 918 */ 919 if (error) { 920 if (orig_vp) { 921 vput(vp); 922 ap->a_vp = orig_vp; 923 /* orig_vp = NULL; */ 924 } 925 return error; 926 } 927 928 /* 929 * This checks if the disk device is going to be opened for writing. 930 * It will be only allowed in the cases where securelevel permits it 931 * and it's not mounted R/W. 932 */ 933 if ((dev_dflags(dev) & D_DISK) && (ap->a_mode & FWRITE) && 934 (ap->a_cred != FSCRED)) { 935 936 /* Very secure mode. No open for writing allowed */ 937 if (securelevel >= 2) 938 return EPERM; 939 940 /* 941 * If it is mounted R/W, do not allow to open for writing. 942 * In the case it's mounted read-only but securelevel 943 * is >= 1, then do not allow opening for writing either. 944 */ 945 if (vfs_mountedon(vp)) { 946 if (!(dev->si_mountpoint->mnt_flag & MNT_RDONLY)) 947 return EBUSY; 948 else if (securelevel >= 1) 949 return EPERM; 950 } 951 } 952 953 if (dev_dflags(dev) & D_TTY) { 954 if (dev->si_tty) { 955 struct tty *tp; 956 tp = dev->si_tty; 957 if (!tp->t_stop) { 958 devfs_debug(DEVFS_DEBUG_DEBUG, 959 "devfs: no t_stop\n"); 960 tp->t_stop = nottystop; 961 } 962 } 963 } 964 965 966 if (vn_isdisk(vp, NULL)) { 967 if (!dev->si_bsize_phys) 968 dev->si_bsize_phys = DEV_BSIZE; 969 vinitvmio(vp, IDX_TO_OFF(INT_MAX), PAGE_SIZE, -1); 970 } 971 972 vop_stdopen(ap); 973 #if 0 974 if (node) 975 nanotime(&node->atime); 976 #endif 977 978 if (orig_vp) 979 vn_unlock(vp); 980 981 /* Ugly pty magic, to make pty devices appear once they are opened */ 982 if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY) 983 node->flags &= ~DEVFS_INVISIBLE; 984 985 if (ap->a_fp) { 986 ap->a_fp->f_type = DTYPE_VNODE; 987 ap->a_fp->f_flag = ap->a_mode & FMASK; 988 ap->a_fp->f_ops = &devfs_dev_fileops; 989 ap->a_fp->f_data = vp; 990 } 991 992 return 0; 993 } 994 995 996 static int 997 devfs_spec_close(struct vop_close_args *ap) 998 { 999 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 1000 struct proc *p = curproc; 1001 struct vnode *vp = ap->a_vp; 1002 cdev_t dev = vp->v_rdev; 1003 int error = 0; 1004 int needrelock; 1005 1006 devfs_debug(DEVFS_DEBUG_DEBUG, 1007 "devfs_spec_close() called on %s! \n", 1008 dev->si_name); 1009 1010 /* 1011 * A couple of hacks for devices and tty devices. The 1012 * vnode ref count cannot be used to figure out the 1013 * last close, but we can use v_opencount now that 1014 * revoke works properly. 1015 * 1016 * Detect the last close on a controlling terminal and clear 1017 * the session (half-close). 1018 */ 1019 if (dev) 1020 reference_dev(dev); 1021 1022 if (p && vp->v_opencount <= 1 && vp == p->p_session->s_ttyvp) { 1023 p->p_session->s_ttyvp = NULL; 1024 vrele(vp); 1025 } 1026 1027 /* 1028 * Vnodes can be opened and closed multiple times. Do not really 1029 * close the device unless (1) it is being closed forcibly, 1030 * (2) the device wants to track closes, or (3) this is the last 1031 * vnode doing its last close on the device. 1032 * 1033 * XXX the VXLOCK (force close) case can leave vnodes referencing 1034 * a closed device. This might not occur now that our revoke is 1035 * fixed. 1036 */ 1037 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -1- \n"); 1038 if (dev && ((vp->v_flag & VRECLAIMED) || 1039 (dev_dflags(dev) & D_TRACKCLOSE) || 1040 (vp->v_opencount == 1))) { 1041 /* 1042 * Unlock around dev_dclose() 1043 */ 1044 needrelock = 0; 1045 if (vn_islocked(vp)) { 1046 needrelock = 1; 1047 vn_unlock(vp); 1048 } 1049 error = dev_dclose(dev, ap->a_fflag, S_IFCHR); 1050 1051 /* 1052 * Ugly pty magic, to make pty devices disappear again once 1053 * they are closed 1054 */ 1055 if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY) 1056 node->flags |= DEVFS_INVISIBLE; 1057 1058 if (needrelock) 1059 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1060 } else { 1061 error = 0; 1062 } 1063 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -2- \n"); 1064 1065 /* 1066 * Track the actual opens and closes on the vnode. The last close 1067 * disassociates the rdev. If the rdev is already disassociated or 1068 * the opencount is already 0, the vnode might have been revoked 1069 * and no further opencount tracking occurs. 1070 */ 1071 if (dev) 1072 release_dev(dev); 1073 if (vp->v_opencount > 0) 1074 vop_stdclose(ap); 1075 return(error); 1076 1077 } 1078 1079 1080 static int 1081 devfs_specf_close(struct file *fp) 1082 { 1083 struct vnode *vp = (struct vnode *)fp->f_data; 1084 int error; 1085 1086 get_mplock(); 1087 fp->f_ops = &badfileops; 1088 error = vn_close(vp, fp->f_flag); 1089 rel_mplock(); 1090 1091 return (error); 1092 } 1093 1094 1095 /* 1096 * Device-optimized file table vnode read routine. 1097 * 1098 * This bypasses the VOP table and talks directly to the device. Most 1099 * filesystems just route to specfs and can make this optimization. 1100 * 1101 * MPALMOSTSAFE - acquires mplock 1102 */ 1103 static int 1104 devfs_specf_read(struct file *fp, struct uio *uio, 1105 struct ucred *cred, int flags) 1106 { 1107 struct devfs_node *node; 1108 struct vnode *vp; 1109 int ioflag; 1110 int error; 1111 cdev_t dev; 1112 1113 KASSERT(uio->uio_td == curthread, 1114 ("uio_td %p is not td %p", uio->uio_td, curthread)); 1115 1116 if (uio->uio_resid == 0) 1117 return 0; 1118 1119 vp = (struct vnode *)fp->f_data; 1120 if (vp == NULL || vp->v_type == VBAD) 1121 return EBADF; 1122 1123 node = DEVFS_NODE(vp); 1124 1125 if ((dev = vp->v_rdev) == NULL) 1126 return EBADF; 1127 1128 /* only acquire mplock for devices that require it */ 1129 if (!(dev_dflags(dev) & D_MPSAFE_READ)) { 1130 atomic_add_int(&mplock_reads, 1); 1131 get_mplock(); 1132 } else { 1133 atomic_add_int(&mpsafe_reads, 1); 1134 } 1135 1136 reference_dev(dev); 1137 1138 if ((flags & O_FOFFSET) == 0) 1139 uio->uio_offset = fp->f_offset; 1140 1141 ioflag = 0; 1142 if (flags & O_FBLOCKING) { 1143 /* ioflag &= ~IO_NDELAY; */ 1144 } else if (flags & O_FNONBLOCKING) { 1145 ioflag |= IO_NDELAY; 1146 } else if (fp->f_flag & FNONBLOCK) { 1147 ioflag |= IO_NDELAY; 1148 } 1149 if (flags & O_FBUFFERED) { 1150 /* ioflag &= ~IO_DIRECT; */ 1151 } else if (flags & O_FUNBUFFERED) { 1152 ioflag |= IO_DIRECT; 1153 } else if (fp->f_flag & O_DIRECT) { 1154 ioflag |= IO_DIRECT; 1155 } 1156 ioflag |= sequential_heuristic(uio, fp); 1157 1158 error = dev_dread(dev, uio, ioflag); 1159 1160 release_dev(dev); 1161 if (node) 1162 nanotime(&node->atime); 1163 if ((flags & O_FOFFSET) == 0) 1164 fp->f_offset = uio->uio_offset; 1165 fp->f_nextoff = uio->uio_offset; 1166 1167 if (!(dev_dflags(dev) & D_MPSAFE_READ)) 1168 rel_mplock(); 1169 1170 return (error); 1171 } 1172 1173 1174 static int 1175 devfs_specf_write(struct file *fp, struct uio *uio, 1176 struct ucred *cred, int flags) 1177 { 1178 struct devfs_node *node; 1179 struct vnode *vp; 1180 int ioflag; 1181 int error; 1182 cdev_t dev; 1183 1184 KASSERT(uio->uio_td == curthread, 1185 ("uio_td %p is not p %p", uio->uio_td, curthread)); 1186 1187 vp = (struct vnode *)fp->f_data; 1188 if (vp == NULL || vp->v_type == VBAD) 1189 return EBADF; 1190 1191 node = DEVFS_NODE(vp); 1192 1193 if (vp->v_type == VREG) 1194 bwillwrite(uio->uio_resid); 1195 1196 vp = (struct vnode *)fp->f_data; 1197 1198 if ((dev = vp->v_rdev) == NULL) 1199 return EBADF; 1200 1201 /* only acquire mplock for devices that require it */ 1202 if (!(dev_dflags(dev) & D_MPSAFE_WRITE)) { 1203 atomic_add_int(&mplock_writes, 1); 1204 get_mplock(); 1205 } else { 1206 atomic_add_int(&mpsafe_writes, 1); 1207 } 1208 1209 reference_dev(dev); 1210 1211 if ((flags & O_FOFFSET) == 0) 1212 uio->uio_offset = fp->f_offset; 1213 1214 ioflag = IO_UNIT; 1215 if (vp->v_type == VREG && 1216 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) { 1217 ioflag |= IO_APPEND; 1218 } 1219 1220 if (flags & O_FBLOCKING) { 1221 /* ioflag &= ~IO_NDELAY; */ 1222 } else if (flags & O_FNONBLOCKING) { 1223 ioflag |= IO_NDELAY; 1224 } else if (fp->f_flag & FNONBLOCK) { 1225 ioflag |= IO_NDELAY; 1226 } 1227 if (flags & O_FBUFFERED) { 1228 /* ioflag &= ~IO_DIRECT; */ 1229 } else if (flags & O_FUNBUFFERED) { 1230 ioflag |= IO_DIRECT; 1231 } else if (fp->f_flag & O_DIRECT) { 1232 ioflag |= IO_DIRECT; 1233 } 1234 if (flags & O_FASYNCWRITE) { 1235 /* ioflag &= ~IO_SYNC; */ 1236 } else if (flags & O_FSYNCWRITE) { 1237 ioflag |= IO_SYNC; 1238 } else if (fp->f_flag & O_FSYNC) { 1239 ioflag |= IO_SYNC; 1240 } 1241 1242 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)) 1243 ioflag |= IO_SYNC; 1244 ioflag |= sequential_heuristic(uio, fp); 1245 1246 error = dev_dwrite(dev, uio, ioflag); 1247 1248 release_dev(dev); 1249 if (node) { 1250 nanotime(&node->atime); 1251 nanotime(&node->mtime); 1252 } 1253 1254 if ((flags & O_FOFFSET) == 0) 1255 fp->f_offset = uio->uio_offset; 1256 fp->f_nextoff = uio->uio_offset; 1257 1258 if (!(dev_dflags(dev) & D_MPSAFE_WRITE)) 1259 rel_mplock(); 1260 return (error); 1261 } 1262 1263 1264 static int 1265 devfs_specf_stat(struct file *fp, struct stat *sb, struct ucred *cred) 1266 { 1267 struct vnode *vp; 1268 struct vattr vattr; 1269 struct vattr *vap; 1270 u_short mode; 1271 cdev_t dev; 1272 int error; 1273 1274 vp = (struct vnode *)fp->f_data; 1275 if (vp == NULL || vp->v_type == VBAD) 1276 return EBADF; 1277 1278 error = vn_stat(vp, sb, cred); 1279 if (error) 1280 return (error); 1281 1282 vap = &vattr; 1283 error = VOP_GETATTR(vp, vap); 1284 if (error) 1285 return (error); 1286 1287 /* 1288 * Zero the spare stat fields 1289 */ 1290 sb->st_lspare = 0; 1291 sb->st_qspare1 = 0; 1292 sb->st_qspare2 = 0; 1293 1294 /* 1295 * Copy from vattr table ... or not in case it's a cloned device 1296 */ 1297 if (vap->va_fsid != VNOVAL) 1298 sb->st_dev = vap->va_fsid; 1299 else 1300 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 1301 1302 sb->st_ino = vap->va_fileid; 1303 1304 mode = vap->va_mode; 1305 mode |= S_IFCHR; 1306 sb->st_mode = mode; 1307 1308 if (vap->va_nlink > (nlink_t)-1) 1309 sb->st_nlink = (nlink_t)-1; 1310 else 1311 sb->st_nlink = vap->va_nlink; 1312 1313 sb->st_uid = vap->va_uid; 1314 sb->st_gid = vap->va_gid; 1315 sb->st_rdev = dev2udev(DEVFS_NODE(vp)->d_dev); 1316 sb->st_size = vap->va_bytes; 1317 sb->st_atimespec = vap->va_atime; 1318 sb->st_mtimespec = vap->va_mtime; 1319 sb->st_ctimespec = vap->va_ctime; 1320 1321 /* 1322 * A VCHR and VBLK device may track the last access and last modified 1323 * time independantly of the filesystem. This is particularly true 1324 * because device read and write calls may bypass the filesystem. 1325 */ 1326 if (vp->v_type == VCHR || vp->v_type == VBLK) { 1327 dev = vp->v_rdev; 1328 if (dev != NULL) { 1329 if (dev->si_lastread) { 1330 sb->st_atimespec.tv_sec = dev->si_lastread; 1331 sb->st_atimespec.tv_nsec = 0; 1332 } 1333 if (dev->si_lastwrite) { 1334 sb->st_atimespec.tv_sec = dev->si_lastwrite; 1335 sb->st_atimespec.tv_nsec = 0; 1336 } 1337 } 1338 } 1339 1340 /* 1341 * According to www.opengroup.org, the meaning of st_blksize is 1342 * "a filesystem-specific preferred I/O block size for this 1343 * object. In some filesystem types, this may vary from file 1344 * to file" 1345 * Default to PAGE_SIZE after much discussion. 1346 */ 1347 1348 sb->st_blksize = PAGE_SIZE; 1349 1350 sb->st_flags = vap->va_flags; 1351 1352 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0); 1353 if (error) 1354 sb->st_gen = 0; 1355 else 1356 sb->st_gen = (u_int32_t)vap->va_gen; 1357 1358 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 1359 1360 return (0); 1361 } 1362 1363 1364 static int 1365 devfs_specf_kqfilter(struct file *fp, struct knote *kn) 1366 { 1367 struct vnode *vp; 1368 int error; 1369 cdev_t dev; 1370 1371 get_mplock(); 1372 1373 vp = (struct vnode *)fp->f_data; 1374 if (vp == NULL || vp->v_type == VBAD) { 1375 error = EBADF; 1376 goto done; 1377 } 1378 if ((dev = vp->v_rdev) == NULL) { 1379 error = EBADF; 1380 goto done; 1381 } 1382 reference_dev(dev); 1383 1384 error = dev_dkqfilter(dev, kn); 1385 1386 release_dev(dev); 1387 1388 done: 1389 rel_mplock(); 1390 return (error); 1391 } 1392 1393 /* 1394 * MPALMOSTSAFE - acquires mplock 1395 */ 1396 static int 1397 devfs_specf_ioctl(struct file *fp, u_long com, caddr_t data, 1398 struct ucred *ucred, struct sysmsg *msg) 1399 { 1400 struct devfs_node *node; 1401 struct vnode *vp; 1402 struct vnode *ovp; 1403 cdev_t dev; 1404 int error; 1405 struct fiodname_args *name_args; 1406 size_t namlen; 1407 const char *name; 1408 1409 vp = ((struct vnode *)fp->f_data); 1410 1411 if ((dev = vp->v_rdev) == NULL) 1412 return EBADF; /* device was revoked */ 1413 1414 reference_dev(dev); 1415 1416 node = DEVFS_NODE(vp); 1417 1418 devfs_debug(DEVFS_DEBUG_DEBUG, 1419 "devfs_specf_ioctl() called! for dev %s\n", 1420 dev->si_name); 1421 1422 if (com == FIODTYPE) { 1423 *(int *)data = dev_dflags(dev) & D_TYPEMASK; 1424 error = 0; 1425 goto out; 1426 } else if (com == FIODNAME) { 1427 name_args = (struct fiodname_args *)data; 1428 name = dev->si_name; 1429 namlen = strlen(name) + 1; 1430 1431 devfs_debug(DEVFS_DEBUG_DEBUG, 1432 "ioctl, got: FIODNAME for %s\n", name); 1433 1434 if (namlen <= name_args->len) 1435 error = copyout(dev->si_name, name_args->name, namlen); 1436 else 1437 error = EINVAL; 1438 1439 devfs_debug(DEVFS_DEBUG_DEBUG, 1440 "ioctl stuff: error: %d\n", error); 1441 goto out; 1442 } 1443 1444 /* only acquire mplock for devices that require it */ 1445 if (!(dev_dflags(dev) & D_MPSAFE_IOCTL)) 1446 get_mplock(); 1447 1448 error = dev_dioctl(dev, com, data, fp->f_flag, ucred, msg); 1449 1450 #if 0 1451 if (node) { 1452 nanotime(&node->atime); 1453 nanotime(&node->mtime); 1454 } 1455 #endif 1456 1457 if (!(dev_dflags(dev) & D_MPSAFE_IOCTL)) 1458 rel_mplock(); 1459 1460 if (com == TIOCSCTTY) { 1461 devfs_debug(DEVFS_DEBUG_DEBUG, 1462 "devfs_specf_ioctl: got TIOCSCTTY on %s\n", 1463 dev->si_name); 1464 } 1465 if (error == 0 && com == TIOCSCTTY) { 1466 struct proc *p = curthread->td_proc; 1467 struct session *sess; 1468 1469 devfs_debug(DEVFS_DEBUG_DEBUG, 1470 "devfs_specf_ioctl: dealing with TIOCSCTTY on %s\n", 1471 dev->si_name); 1472 if (p == NULL) { 1473 error = ENOTTY; 1474 goto out; 1475 } 1476 sess = p->p_session; 1477 1478 /* 1479 * Do nothing if reassigning same control tty 1480 */ 1481 if (sess->s_ttyvp == vp) { 1482 error = 0; 1483 goto out; 1484 } 1485 1486 /* 1487 * Get rid of reference to old control tty 1488 */ 1489 ovp = sess->s_ttyvp; 1490 vref(vp); 1491 sess->s_ttyvp = vp; 1492 if (ovp) 1493 vrele(ovp); 1494 } 1495 1496 out: 1497 release_dev(dev); 1498 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_specf_ioctl() finished! \n"); 1499 return (error); 1500 } 1501 1502 1503 static int 1504 devfs_spec_fsync(struct vop_fsync_args *ap) 1505 { 1506 struct vnode *vp = ap->a_vp; 1507 int error; 1508 1509 if (!vn_isdisk(vp, NULL)) 1510 return (0); 1511 1512 /* 1513 * Flush all dirty buffers associated with a block device. 1514 */ 1515 error = vfsync(vp, ap->a_waitfor, 10000, NULL, NULL); 1516 return (error); 1517 } 1518 1519 static int 1520 devfs_spec_read(struct vop_read_args *ap) 1521 { 1522 struct devfs_node *node; 1523 struct vnode *vp; 1524 struct uio *uio; 1525 cdev_t dev; 1526 int error; 1527 1528 vp = ap->a_vp; 1529 dev = vp->v_rdev; 1530 uio = ap->a_uio; 1531 node = DEVFS_NODE(vp); 1532 1533 if (dev == NULL) /* device was revoked */ 1534 return (EBADF); 1535 if (uio->uio_resid == 0) 1536 return (0); 1537 1538 vn_unlock(vp); 1539 error = dev_dread(dev, uio, ap->a_ioflag); 1540 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1541 1542 if (node) 1543 nanotime(&node->atime); 1544 1545 return (error); 1546 } 1547 1548 /* 1549 * Vnode op for write 1550 * 1551 * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag, 1552 * struct ucred *a_cred) 1553 */ 1554 static int 1555 devfs_spec_write(struct vop_write_args *ap) 1556 { 1557 struct devfs_node *node; 1558 struct vnode *vp; 1559 struct uio *uio; 1560 cdev_t dev; 1561 int error; 1562 1563 vp = ap->a_vp; 1564 dev = vp->v_rdev; 1565 uio = ap->a_uio; 1566 node = DEVFS_NODE(vp); 1567 1568 KKASSERT(uio->uio_segflg != UIO_NOCOPY); 1569 1570 if (dev == NULL) /* device was revoked */ 1571 return (EBADF); 1572 1573 vn_unlock(vp); 1574 error = dev_dwrite(dev, uio, ap->a_ioflag); 1575 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1576 1577 if (node) { 1578 nanotime(&node->atime); 1579 nanotime(&node->mtime); 1580 } 1581 1582 return (error); 1583 } 1584 1585 /* 1586 * Device ioctl operation. 1587 * 1588 * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data, 1589 * int a_fflag, struct ucred *a_cred, struct sysmsg *msg) 1590 */ 1591 static int 1592 devfs_spec_ioctl(struct vop_ioctl_args *ap) 1593 { 1594 struct vnode *vp = ap->a_vp; 1595 struct devfs_node *node; 1596 cdev_t dev; 1597 1598 if ((dev = vp->v_rdev) == NULL) 1599 return (EBADF); /* device was revoked */ 1600 node = DEVFS_NODE(vp); 1601 1602 #if 0 1603 if (node) { 1604 nanotime(&node->atime); 1605 nanotime(&node->mtime); 1606 } 1607 #endif 1608 1609 return (dev_dioctl(dev, ap->a_command, ap->a_data, ap->a_fflag, 1610 ap->a_cred, ap->a_sysmsg)); 1611 } 1612 1613 /* 1614 * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn) 1615 */ 1616 /* ARGSUSED */ 1617 static int 1618 devfs_spec_kqfilter(struct vop_kqfilter_args *ap) 1619 { 1620 struct vnode *vp = ap->a_vp; 1621 struct devfs_node *node; 1622 cdev_t dev; 1623 1624 if ((dev = vp->v_rdev) == NULL) 1625 return (EBADF); /* device was revoked (EBADF) */ 1626 node = DEVFS_NODE(vp); 1627 1628 #if 0 1629 if (node) 1630 nanotime(&node->atime); 1631 #endif 1632 1633 return (dev_dkqfilter(dev, ap->a_kn)); 1634 } 1635 1636 /* 1637 * Convert a vnode strategy call into a device strategy call. Vnode strategy 1638 * calls are not limited to device DMA limits so we have to deal with the 1639 * case. 1640 * 1641 * spec_strategy(struct vnode *a_vp, struct bio *a_bio) 1642 */ 1643 static int 1644 devfs_spec_strategy(struct vop_strategy_args *ap) 1645 { 1646 struct bio *bio = ap->a_bio; 1647 struct buf *bp = bio->bio_buf; 1648 struct buf *nbp; 1649 struct vnode *vp; 1650 struct mount *mp; 1651 int chunksize; 1652 int maxiosize; 1653 1654 if (bp->b_cmd != BUF_CMD_READ && LIST_FIRST(&bp->b_dep) != NULL) 1655 buf_start(bp); 1656 1657 /* 1658 * Collect statistics on synchronous and asynchronous read 1659 * and write counts for disks that have associated filesystems. 1660 */ 1661 vp = ap->a_vp; 1662 KKASSERT(vp->v_rdev != NULL); /* XXX */ 1663 if (vn_isdisk(vp, NULL) && (mp = vp->v_rdev->si_mountpoint) != NULL) { 1664 if (bp->b_cmd == BUF_CMD_READ) { 1665 if (bp->b_flags & BIO_SYNC) 1666 mp->mnt_stat.f_syncreads++; 1667 else 1668 mp->mnt_stat.f_asyncreads++; 1669 } else { 1670 if (bp->b_flags & BIO_SYNC) 1671 mp->mnt_stat.f_syncwrites++; 1672 else 1673 mp->mnt_stat.f_asyncwrites++; 1674 } 1675 } 1676 1677 /* 1678 * Device iosize limitations only apply to read and write. Shortcut 1679 * the I/O if it fits. 1680 */ 1681 if ((maxiosize = vp->v_rdev->si_iosize_max) == 0) { 1682 devfs_debug(DEVFS_DEBUG_DEBUG, 1683 "%s: si_iosize_max not set!\n", 1684 dev_dname(vp->v_rdev)); 1685 maxiosize = MAXPHYS; 1686 } 1687 #if SPEC_CHAIN_DEBUG & 2 1688 maxiosize = 4096; 1689 #endif 1690 if (bp->b_bcount <= maxiosize || 1691 (bp->b_cmd != BUF_CMD_READ && bp->b_cmd != BUF_CMD_WRITE)) { 1692 dev_dstrategy_chain(vp->v_rdev, bio); 1693 return (0); 1694 } 1695 1696 /* 1697 * Clone the buffer and set up an I/O chain to chunk up the I/O. 1698 */ 1699 nbp = kmalloc(sizeof(*bp), M_DEVBUF, M_INTWAIT|M_ZERO); 1700 initbufbio(nbp); 1701 buf_dep_init(nbp); 1702 BUF_LOCKINIT(nbp); 1703 BUF_LOCK(nbp, LK_EXCLUSIVE); 1704 BUF_KERNPROC(nbp); 1705 nbp->b_vp = vp; 1706 nbp->b_flags = B_PAGING | (bp->b_flags & B_BNOCLIP); 1707 nbp->b_data = bp->b_data; 1708 nbp->b_bio1.bio_done = devfs_spec_strategy_done; 1709 nbp->b_bio1.bio_offset = bio->bio_offset; 1710 nbp->b_bio1.bio_caller_info1.ptr = bio; 1711 1712 /* 1713 * Start the first transfer 1714 */ 1715 if (vn_isdisk(vp, NULL)) 1716 chunksize = vp->v_rdev->si_bsize_phys; 1717 else 1718 chunksize = DEV_BSIZE; 1719 chunksize = maxiosize / chunksize * chunksize; 1720 #if SPEC_CHAIN_DEBUG & 1 1721 devfs_debug(DEVFS_DEBUG_DEBUG, 1722 "spec_strategy chained I/O chunksize=%d\n", 1723 chunksize); 1724 #endif 1725 nbp->b_cmd = bp->b_cmd; 1726 nbp->b_bcount = chunksize; 1727 nbp->b_bufsize = chunksize; /* used to detect a short I/O */ 1728 nbp->b_bio1.bio_caller_info2.index = chunksize; 1729 1730 #if SPEC_CHAIN_DEBUG & 1 1731 devfs_debug(DEVFS_DEBUG_DEBUG, 1732 "spec_strategy: chain %p offset %d/%d bcount %d\n", 1733 bp, 0, bp->b_bcount, nbp->b_bcount); 1734 #endif 1735 1736 dev_dstrategy(vp->v_rdev, &nbp->b_bio1); 1737 1738 if (DEVFS_NODE(vp)) { 1739 nanotime(&DEVFS_NODE(vp)->atime); 1740 nanotime(&DEVFS_NODE(vp)->mtime); 1741 } 1742 1743 return (0); 1744 } 1745 1746 /* 1747 * Chunked up transfer completion routine - chain transfers until done 1748 */ 1749 static 1750 void 1751 devfs_spec_strategy_done(struct bio *nbio) 1752 { 1753 struct buf *nbp = nbio->bio_buf; 1754 struct bio *bio = nbio->bio_caller_info1.ptr; /* original bio */ 1755 struct buf *bp = bio->bio_buf; /* original bp */ 1756 int chunksize = nbio->bio_caller_info2.index; /* chunking */ 1757 int boffset = nbp->b_data - bp->b_data; 1758 1759 if (nbp->b_flags & B_ERROR) { 1760 /* 1761 * An error terminates the chain, propogate the error back 1762 * to the original bp 1763 */ 1764 bp->b_flags |= B_ERROR; 1765 bp->b_error = nbp->b_error; 1766 bp->b_resid = bp->b_bcount - boffset + 1767 (nbp->b_bcount - nbp->b_resid); 1768 #if SPEC_CHAIN_DEBUG & 1 1769 devfs_debug(DEVFS_DEBUG_DEBUG, 1770 "spec_strategy: chain %p error %d bcount %d/%d\n", 1771 bp, bp->b_error, bp->b_bcount, 1772 bp->b_bcount - bp->b_resid); 1773 #endif 1774 kfree(nbp, M_DEVBUF); 1775 biodone(bio); 1776 } else if (nbp->b_resid) { 1777 /* 1778 * A short read or write terminates the chain 1779 */ 1780 bp->b_error = nbp->b_error; 1781 bp->b_resid = bp->b_bcount - boffset + 1782 (nbp->b_bcount - nbp->b_resid); 1783 #if SPEC_CHAIN_DEBUG & 1 1784 devfs_debug(DEVFS_DEBUG_DEBUG, 1785 "spec_strategy: chain %p short read(1) " 1786 "bcount %d/%d\n", 1787 bp, bp->b_bcount - bp->b_resid, bp->b_bcount); 1788 #endif 1789 kfree(nbp, M_DEVBUF); 1790 biodone(bio); 1791 } else if (nbp->b_bcount != nbp->b_bufsize) { 1792 /* 1793 * A short read or write can also occur by truncating b_bcount 1794 */ 1795 #if SPEC_CHAIN_DEBUG & 1 1796 devfs_debug(DEVFS_DEBUG_DEBUG, 1797 "spec_strategy: chain %p short read(2) " 1798 "bcount %d/%d\n", 1799 bp, nbp->b_bcount + boffset, bp->b_bcount); 1800 #endif 1801 bp->b_error = 0; 1802 bp->b_bcount = nbp->b_bcount + boffset; 1803 bp->b_resid = nbp->b_resid; 1804 kfree(nbp, M_DEVBUF); 1805 biodone(bio); 1806 } else if (nbp->b_bcount + boffset == bp->b_bcount) { 1807 /* 1808 * No more data terminates the chain 1809 */ 1810 #if SPEC_CHAIN_DEBUG & 1 1811 devfs_debug(DEVFS_DEBUG_DEBUG, 1812 "spec_strategy: chain %p finished bcount %d\n", 1813 bp, bp->b_bcount); 1814 #endif 1815 bp->b_error = 0; 1816 bp->b_resid = 0; 1817 kfree(nbp, M_DEVBUF); 1818 biodone(bio); 1819 } else { 1820 /* 1821 * Continue the chain 1822 */ 1823 boffset += nbp->b_bcount; 1824 nbp->b_data = bp->b_data + boffset; 1825 nbp->b_bcount = bp->b_bcount - boffset; 1826 if (nbp->b_bcount > chunksize) 1827 nbp->b_bcount = chunksize; 1828 nbp->b_bio1.bio_done = devfs_spec_strategy_done; 1829 nbp->b_bio1.bio_offset = bio->bio_offset + boffset; 1830 1831 #if SPEC_CHAIN_DEBUG & 1 1832 devfs_debug(DEVFS_DEBUG_DEBUG, 1833 "spec_strategy: chain %p offset %d/%d bcount %d\n", 1834 bp, boffset, bp->b_bcount, nbp->b_bcount); 1835 #endif 1836 1837 dev_dstrategy(nbp->b_vp->v_rdev, &nbp->b_bio1); 1838 } 1839 } 1840 1841 /* 1842 * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length) 1843 */ 1844 static int 1845 devfs_spec_freeblks(struct vop_freeblks_args *ap) 1846 { 1847 struct buf *bp; 1848 1849 /* 1850 * XXX: This assumes that strategy does the deed right away. 1851 * XXX: this may not be TRTTD. 1852 */ 1853 KKASSERT(ap->a_vp->v_rdev != NULL); 1854 if ((dev_dflags(ap->a_vp->v_rdev) & D_CANFREE) == 0) 1855 return (0); 1856 bp = geteblk(ap->a_length); 1857 bp->b_cmd = BUF_CMD_FREEBLKS; 1858 bp->b_bio1.bio_offset = ap->a_offset; 1859 bp->b_bcount = ap->a_length; 1860 dev_dstrategy(ap->a_vp->v_rdev, &bp->b_bio1); 1861 return (0); 1862 } 1863 1864 /* 1865 * Implement degenerate case where the block requested is the block 1866 * returned, and assume that the entire device is contiguous in regards 1867 * to the contiguous block range (runp and runb). 1868 * 1869 * spec_bmap(struct vnode *a_vp, off_t a_loffset, 1870 * off_t *a_doffsetp, int *a_runp, int *a_runb) 1871 */ 1872 static int 1873 devfs_spec_bmap(struct vop_bmap_args *ap) 1874 { 1875 if (ap->a_doffsetp != NULL) 1876 *ap->a_doffsetp = ap->a_loffset; 1877 if (ap->a_runp != NULL) 1878 *ap->a_runp = MAXBSIZE; 1879 if (ap->a_runb != NULL) { 1880 if (ap->a_loffset < MAXBSIZE) 1881 *ap->a_runb = (int)ap->a_loffset; 1882 else 1883 *ap->a_runb = MAXBSIZE; 1884 } 1885 return (0); 1886 } 1887 1888 1889 /* 1890 * Special device advisory byte-level locks. 1891 * 1892 * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op, 1893 * struct flock *a_fl, int a_flags) 1894 */ 1895 /* ARGSUSED */ 1896 static int 1897 devfs_spec_advlock(struct vop_advlock_args *ap) 1898 { 1899 return ((ap->a_flags & F_POSIX) ? EINVAL : EOPNOTSUPP); 1900 } 1901 1902 static void 1903 devfs_spec_getpages_iodone(struct bio *bio) 1904 { 1905 bio->bio_buf->b_cmd = BUF_CMD_DONE; 1906 wakeup(bio->bio_buf); 1907 } 1908 1909 /* 1910 * spec_getpages() - get pages associated with device vnode. 1911 * 1912 * Note that spec_read and spec_write do not use the buffer cache, so we 1913 * must fully implement getpages here. 1914 */ 1915 static int 1916 devfs_spec_getpages(struct vop_getpages_args *ap) 1917 { 1918 vm_offset_t kva; 1919 int error; 1920 int i, pcount, size; 1921 struct buf *bp; 1922 vm_page_t m; 1923 vm_ooffset_t offset; 1924 int toff, nextoff, nread; 1925 struct vnode *vp = ap->a_vp; 1926 int blksiz; 1927 int gotreqpage; 1928 1929 error = 0; 1930 pcount = round_page(ap->a_count) / PAGE_SIZE; 1931 1932 /* 1933 * Calculate the offset of the transfer and do sanity check. 1934 */ 1935 offset = IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset; 1936 1937 /* 1938 * Round up physical size for real devices. We cannot round using 1939 * v_mount's block size data because v_mount has nothing to do with 1940 * the device. i.e. it's usually '/dev'. We need the physical block 1941 * size for the device itself. 1942 * 1943 * We can't use v_rdev->si_mountpoint because it only exists when the 1944 * block device is mounted. However, we can use v_rdev. 1945 */ 1946 if (vn_isdisk(vp, NULL)) 1947 blksiz = vp->v_rdev->si_bsize_phys; 1948 else 1949 blksiz = DEV_BSIZE; 1950 1951 size = (ap->a_count + blksiz - 1) & ~(blksiz - 1); 1952 1953 bp = getpbuf(NULL); 1954 kva = (vm_offset_t)bp->b_data; 1955 1956 /* 1957 * Map the pages to be read into the kva. 1958 */ 1959 pmap_qenter(kva, ap->a_m, pcount); 1960 1961 /* Build a minimal buffer header. */ 1962 bp->b_cmd = BUF_CMD_READ; 1963 bp->b_bcount = size; 1964 bp->b_resid = 0; 1965 bp->b_runningbufspace = size; 1966 if (size) { 1967 runningbufspace += bp->b_runningbufspace; 1968 ++runningbufcount; 1969 } 1970 1971 bp->b_bio1.bio_offset = offset; 1972 bp->b_bio1.bio_done = devfs_spec_getpages_iodone; 1973 1974 mycpu->gd_cnt.v_vnodein++; 1975 mycpu->gd_cnt.v_vnodepgsin += pcount; 1976 1977 /* Do the input. */ 1978 vn_strategy(ap->a_vp, &bp->b_bio1); 1979 1980 crit_enter(); 1981 1982 /* We definitely need to be at splbio here. */ 1983 while (bp->b_cmd != BUF_CMD_DONE) 1984 tsleep(bp, 0, "spread", 0); 1985 1986 crit_exit(); 1987 1988 if (bp->b_flags & B_ERROR) { 1989 if (bp->b_error) 1990 error = bp->b_error; 1991 else 1992 error = EIO; 1993 } 1994 1995 /* 1996 * If EOF is encountered we must zero-extend the result in order 1997 * to ensure that the page does not contain garabge. When no 1998 * error occurs, an early EOF is indicated if b_bcount got truncated. 1999 * b_resid is relative to b_bcount and should be 0, but some devices 2000 * might indicate an EOF with b_resid instead of truncating b_bcount. 2001 */ 2002 nread = bp->b_bcount - bp->b_resid; 2003 if (nread < ap->a_count) 2004 bzero((caddr_t)kva + nread, ap->a_count - nread); 2005 pmap_qremove(kva, pcount); 2006 2007 gotreqpage = 0; 2008 for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) { 2009 nextoff = toff + PAGE_SIZE; 2010 m = ap->a_m[i]; 2011 2012 m->flags &= ~PG_ZERO; 2013 2014 /* 2015 * NOTE: vm_page_undirty/clear_dirty etc do not clear the 2016 * pmap modified bit. pmap modified bit should have 2017 * already been cleared. 2018 */ 2019 if (nextoff <= nread) { 2020 m->valid = VM_PAGE_BITS_ALL; 2021 vm_page_undirty(m); 2022 } else if (toff < nread) { 2023 /* 2024 * Since this is a VM request, we have to supply the 2025 * unaligned offset to allow vm_page_set_valid() 2026 * to zero sub-DEV_BSIZE'd portions of the page. 2027 */ 2028 vm_page_set_valid(m, 0, nread - toff); 2029 vm_page_clear_dirty_end_nonincl(m, 0, nread - toff); 2030 } else { 2031 m->valid = 0; 2032 vm_page_undirty(m); 2033 } 2034 2035 if (i != ap->a_reqpage) { 2036 /* 2037 * Just in case someone was asking for this page we 2038 * now tell them that it is ok to use. 2039 */ 2040 if (!error || (m->valid == VM_PAGE_BITS_ALL)) { 2041 if (m->valid) { 2042 if (m->flags & PG_WANTED) { 2043 vm_page_activate(m); 2044 } else { 2045 vm_page_deactivate(m); 2046 } 2047 vm_page_wakeup(m); 2048 } else { 2049 vm_page_free(m); 2050 } 2051 } else { 2052 vm_page_free(m); 2053 } 2054 } else if (m->valid) { 2055 gotreqpage = 1; 2056 /* 2057 * Since this is a VM request, we need to make the 2058 * entire page presentable by zeroing invalid sections. 2059 */ 2060 if (m->valid != VM_PAGE_BITS_ALL) 2061 vm_page_zero_invalid(m, FALSE); 2062 } 2063 } 2064 if (!gotreqpage) { 2065 m = ap->a_m[ap->a_reqpage]; 2066 devfs_debug(DEVFS_DEBUG_WARNING, 2067 "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n", 2068 devtoname(vp->v_rdev), error, bp, bp->b_vp); 2069 devfs_debug(DEVFS_DEBUG_WARNING, 2070 " size: %d, resid: %d, a_count: %d, valid: 0x%x\n", 2071 size, bp->b_resid, ap->a_count, m->valid); 2072 devfs_debug(DEVFS_DEBUG_WARNING, 2073 " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n", 2074 nread, ap->a_reqpage, (u_long)m->pindex, pcount); 2075 /* 2076 * Free the buffer header back to the swap buffer pool. 2077 */ 2078 relpbuf(bp, NULL); 2079 return VM_PAGER_ERROR; 2080 } 2081 /* 2082 * Free the buffer header back to the swap buffer pool. 2083 */ 2084 relpbuf(bp, NULL); 2085 if (DEVFS_NODE(ap->a_vp)) 2086 nanotime(&DEVFS_NODE(ap->a_vp)->mtime); 2087 return VM_PAGER_OK; 2088 } 2089 2090 static __inline 2091 int 2092 sequential_heuristic(struct uio *uio, struct file *fp) 2093 { 2094 /* 2095 * Sequential heuristic - detect sequential operation 2096 */ 2097 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 2098 uio->uio_offset == fp->f_nextoff) { 2099 /* 2100 * XXX we assume that the filesystem block size is 2101 * the default. Not true, but still gives us a pretty 2102 * good indicator of how sequential the read operations 2103 * are. 2104 */ 2105 int tmpseq = fp->f_seqcount; 2106 2107 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 2108 if (tmpseq > IO_SEQMAX) 2109 tmpseq = IO_SEQMAX; 2110 fp->f_seqcount = tmpseq; 2111 return(fp->f_seqcount << IO_SEQSHIFT); 2112 } 2113 2114 /* 2115 * Not sequential, quick draw-down of seqcount 2116 */ 2117 if (fp->f_seqcount > 1) 2118 fp->f_seqcount = 1; 2119 else 2120 fp->f_seqcount = 0; 2121 return(0); 2122 } 2123 2124 extern SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "devfs"); 2125 2126 SYSCTL_INT(_vfs_devfs, OID_AUTO, mpsafe_writes, CTLFLAG_RD, &mpsafe_writes, 2127 0, "mpsafe writes"); 2128 SYSCTL_INT(_vfs_devfs, OID_AUTO, mplock_writes, CTLFLAG_RD, &mplock_writes, 2129 0, "non-mpsafe writes"); 2130 SYSCTL_INT(_vfs_devfs, OID_AUTO, mpsafe_reads, CTLFLAG_RD, &mpsafe_reads, 2131 0, "mpsafe reads"); 2132 SYSCTL_INT(_vfs_devfs, OID_AUTO, mplock_reads, CTLFLAG_RD, &mplock_reads, 2133 0, "non-mpsafe reads"); 2134