1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Alex Hornung <ahornung@gmail.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/time.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/fcntl.h> 42 #include <sys/proc.h> 43 #include <sys/priv.h> 44 #include <sys/signalvar.h> 45 #include <sys/vnode.h> 46 #include <sys/uio.h> 47 #include <sys/mount.h> 48 #include <sys/file.h> 49 #include <sys/namei.h> 50 #include <sys/dirent.h> 51 #include <sys/malloc.h> 52 #include <sys/stat.h> 53 #include <sys/reg.h> 54 #include <vm/vm_pager.h> 55 #include <vm/vm_zone.h> 56 #include <vm/vm_object.h> 57 #include <sys/filio.h> 58 #include <sys/ttycom.h> 59 #include <sys/tty.h> 60 #include <sys/diskslice.h> 61 #include <sys/sysctl.h> 62 #include <sys/devfs.h> 63 #include <sys/pioctl.h> 64 #include <vfs/fifofs/fifo.h> 65 66 #include <machine/limits.h> 67 68 #include <sys/buf2.h> 69 #include <sys/sysref2.h> 70 #include <sys/mplock2.h> 71 #include <vm/vm_page2.h> 72 73 #ifndef SPEC_CHAIN_DEBUG 74 #define SPEC_CHAIN_DEBUG 0 75 #endif 76 77 MALLOC_DECLARE(M_DEVFS); 78 #define DEVFS_BADOP (void *)devfs_vop_badop 79 80 static int devfs_vop_badop(struct vop_generic_args *); 81 static int devfs_vop_access(struct vop_access_args *); 82 static int devfs_vop_inactive(struct vop_inactive_args *); 83 static int devfs_vop_reclaim(struct vop_reclaim_args *); 84 static int devfs_vop_readdir(struct vop_readdir_args *); 85 static int devfs_vop_getattr(struct vop_getattr_args *); 86 static int devfs_vop_setattr(struct vop_setattr_args *); 87 static int devfs_vop_readlink(struct vop_readlink_args *); 88 static int devfs_vop_print(struct vop_print_args *); 89 90 static int devfs_vop_nresolve(struct vop_nresolve_args *); 91 static int devfs_vop_nlookupdotdot(struct vop_nlookupdotdot_args *); 92 static int devfs_vop_nmkdir(struct vop_nmkdir_args *); 93 static int devfs_vop_nsymlink(struct vop_nsymlink_args *); 94 static int devfs_vop_nrmdir(struct vop_nrmdir_args *); 95 static int devfs_vop_nremove(struct vop_nremove_args *); 96 97 static int devfs_spec_open(struct vop_open_args *); 98 static int devfs_spec_close(struct vop_close_args *); 99 static int devfs_spec_fsync(struct vop_fsync_args *); 100 101 static int devfs_spec_read(struct vop_read_args *); 102 static int devfs_spec_write(struct vop_write_args *); 103 static int devfs_spec_ioctl(struct vop_ioctl_args *); 104 static int devfs_spec_kqfilter(struct vop_kqfilter_args *); 105 static int devfs_spec_strategy(struct vop_strategy_args *); 106 static void devfs_spec_strategy_done(struct bio *); 107 static int devfs_spec_freeblks(struct vop_freeblks_args *); 108 static int devfs_spec_bmap(struct vop_bmap_args *); 109 static int devfs_spec_advlock(struct vop_advlock_args *); 110 static void devfs_spec_getpages_iodone(struct bio *); 111 static int devfs_spec_getpages(struct vop_getpages_args *); 112 113 static int devfs_fo_close(struct file *); 114 static int devfs_fo_read(struct file *, struct uio *, struct ucred *, int); 115 static int devfs_fo_write(struct file *, struct uio *, struct ucred *, int); 116 static int devfs_fo_stat(struct file *, struct stat *, struct ucred *); 117 static int devfs_fo_kqfilter(struct file *, struct knote *); 118 static int devfs_fo_ioctl(struct file *, u_long, caddr_t, 119 struct ucred *, struct sysmsg *); 120 static __inline int sequential_heuristic(struct uio *, struct file *); 121 122 extern struct lock devfs_lock; 123 124 /* 125 * devfs vnode operations for regular files. All vnode ops are MPSAFE. 126 */ 127 struct vop_ops devfs_vnode_norm_vops = { 128 .vop_default = vop_defaultop, 129 .vop_access = devfs_vop_access, 130 .vop_advlock = DEVFS_BADOP, 131 .vop_bmap = DEVFS_BADOP, 132 .vop_close = vop_stdclose, 133 .vop_getattr = devfs_vop_getattr, 134 .vop_inactive = devfs_vop_inactive, 135 .vop_ncreate = DEVFS_BADOP, 136 .vop_nresolve = devfs_vop_nresolve, 137 .vop_nlookupdotdot = devfs_vop_nlookupdotdot, 138 .vop_nlink = DEVFS_BADOP, 139 .vop_nmkdir = devfs_vop_nmkdir, 140 .vop_nmknod = DEVFS_BADOP, 141 .vop_nremove = devfs_vop_nremove, 142 .vop_nrename = DEVFS_BADOP, 143 .vop_nrmdir = devfs_vop_nrmdir, 144 .vop_nsymlink = devfs_vop_nsymlink, 145 .vop_open = vop_stdopen, 146 .vop_pathconf = vop_stdpathconf, 147 .vop_print = devfs_vop_print, 148 .vop_read = DEVFS_BADOP, 149 .vop_readdir = devfs_vop_readdir, 150 .vop_readlink = devfs_vop_readlink, 151 .vop_reclaim = devfs_vop_reclaim, 152 .vop_setattr = devfs_vop_setattr, 153 .vop_write = DEVFS_BADOP, 154 .vop_ioctl = DEVFS_BADOP 155 }; 156 157 /* 158 * devfs vnode operations for character devices. All vnode ops are MPSAFE. 159 */ 160 struct vop_ops devfs_vnode_dev_vops = { 161 .vop_default = vop_defaultop, 162 .vop_access = devfs_vop_access, 163 .vop_advlock = devfs_spec_advlock, 164 .vop_bmap = devfs_spec_bmap, 165 .vop_close = devfs_spec_close, 166 .vop_freeblks = devfs_spec_freeblks, 167 .vop_fsync = devfs_spec_fsync, 168 .vop_getattr = devfs_vop_getattr, 169 .vop_getpages = devfs_spec_getpages, 170 .vop_inactive = devfs_vop_inactive, 171 .vop_open = devfs_spec_open, 172 .vop_pathconf = vop_stdpathconf, 173 .vop_print = devfs_vop_print, 174 .vop_kqfilter = devfs_spec_kqfilter, 175 .vop_read = devfs_spec_read, 176 .vop_readdir = DEVFS_BADOP, 177 .vop_readlink = DEVFS_BADOP, 178 .vop_reclaim = devfs_vop_reclaim, 179 .vop_setattr = devfs_vop_setattr, 180 .vop_strategy = devfs_spec_strategy, 181 .vop_write = devfs_spec_write, 182 .vop_ioctl = devfs_spec_ioctl 183 }; 184 185 /* 186 * devfs file pointer operations. All fileops are MPSAFE. 187 */ 188 struct vop_ops *devfs_vnode_dev_vops_p = &devfs_vnode_dev_vops; 189 190 struct fileops devfs_dev_fileops = { 191 .fo_read = devfs_fo_read, 192 .fo_write = devfs_fo_write, 193 .fo_ioctl = devfs_fo_ioctl, 194 .fo_kqfilter = devfs_fo_kqfilter, 195 .fo_stat = devfs_fo_stat, 196 .fo_close = devfs_fo_close, 197 .fo_shutdown = nofo_shutdown 198 }; 199 200 /* 201 * These two functions are possibly temporary hacks for devices (aka 202 * the pty code) which want to control the node attributes themselves. 203 * 204 * XXX we may ultimately desire to simply remove the uid/gid/mode 205 * from the node entirely. 206 * 207 * MPSAFE - sorta. Theoretically the overwrite can compete since they 208 * are loading from the same fields. 209 */ 210 static __inline void 211 node_sync_dev_get(struct devfs_node *node) 212 { 213 cdev_t dev; 214 215 if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) { 216 node->uid = dev->si_uid; 217 node->gid = dev->si_gid; 218 node->mode = dev->si_perms; 219 } 220 } 221 222 static __inline void 223 node_sync_dev_set(struct devfs_node *node) 224 { 225 cdev_t dev; 226 227 if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) { 228 dev->si_uid = node->uid; 229 dev->si_gid = node->gid; 230 dev->si_perms = node->mode; 231 } 232 } 233 234 /* 235 * generic entry point for unsupported operations 236 */ 237 static int 238 devfs_vop_badop(struct vop_generic_args *ap) 239 { 240 return (EIO); 241 } 242 243 244 static int 245 devfs_vop_access(struct vop_access_args *ap) 246 { 247 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 248 int error; 249 250 if (!devfs_node_is_accessible(node)) 251 return ENOENT; 252 node_sync_dev_get(node); 253 error = vop_helper_access(ap, node->uid, node->gid, 254 node->mode, node->flags); 255 256 return error; 257 } 258 259 260 static int 261 devfs_vop_inactive(struct vop_inactive_args *ap) 262 { 263 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 264 265 if (node == NULL || (node->flags & DEVFS_NODE_LINKED) == 0) 266 vrecycle(ap->a_vp); 267 return 0; 268 } 269 270 271 static int 272 devfs_vop_reclaim(struct vop_reclaim_args *ap) 273 { 274 struct devfs_node *node; 275 struct vnode *vp; 276 int locked; 277 278 /* 279 * Check if it is locked already. if not, we acquire the devfs lock 280 */ 281 if ((lockstatus(&devfs_lock, curthread)) != LK_EXCLUSIVE) { 282 lockmgr(&devfs_lock, LK_EXCLUSIVE); 283 locked = 1; 284 } else { 285 locked = 0; 286 } 287 288 /* 289 * Get rid of the devfs_node if it is no longer linked into the 290 * topology. 291 */ 292 vp = ap->a_vp; 293 if ((node = DEVFS_NODE(vp)) != NULL) { 294 node->v_node = NULL; 295 if ((node->flags & DEVFS_NODE_LINKED) == 0) 296 devfs_freep(node); 297 } 298 299 if (locked) 300 lockmgr(&devfs_lock, LK_RELEASE); 301 302 /* 303 * v_rdev needs to be properly released using v_release_rdev 304 * Make sure v_data is NULL as well. 305 */ 306 vp->v_data = NULL; 307 v_release_rdev(vp); 308 return 0; 309 } 310 311 312 static int 313 devfs_vop_readdir(struct vop_readdir_args *ap) 314 { 315 struct devfs_node *dnode = DEVFS_NODE(ap->a_vp); 316 struct devfs_node *node; 317 int cookie_index; 318 int ncookies; 319 int error2; 320 int error; 321 int r; 322 off_t *cookies; 323 off_t saveoff; 324 325 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readdir() called!\n"); 326 327 if (ap->a_uio->uio_offset < 0 || ap->a_uio->uio_offset > INT_MAX) 328 return (EINVAL); 329 error = vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY | LK_FAILRECLAIM); 330 if (error) 331 return (error); 332 333 if (!devfs_node_is_accessible(dnode)) { 334 vn_unlock(ap->a_vp); 335 return ENOENT; 336 } 337 338 lockmgr(&devfs_lock, LK_EXCLUSIVE); 339 340 saveoff = ap->a_uio->uio_offset; 341 342 if (ap->a_ncookies) { 343 ncookies = ap->a_uio->uio_resid / 16 + 1; /* Why / 16 ?? */ 344 if (ncookies > 256) 345 ncookies = 256; 346 cookies = kmalloc(256 * sizeof(off_t), M_TEMP, M_WAITOK); 347 cookie_index = 0; 348 } else { 349 ncookies = -1; 350 cookies = NULL; 351 cookie_index = 0; 352 } 353 354 nanotime(&dnode->atime); 355 356 if (saveoff == 0) { 357 r = vop_write_dirent(&error, ap->a_uio, dnode->d_dir.d_ino, 358 DT_DIR, 1, "."); 359 if (r) 360 goto done; 361 if (cookies) 362 cookies[cookie_index] = saveoff; 363 saveoff++; 364 cookie_index++; 365 if (cookie_index == ncookies) 366 goto done; 367 } 368 369 if (saveoff == 1) { 370 if (dnode->parent) { 371 r = vop_write_dirent(&error, ap->a_uio, 372 dnode->parent->d_dir.d_ino, 373 DT_DIR, 2, ".."); 374 } else { 375 r = vop_write_dirent(&error, ap->a_uio, 376 dnode->d_dir.d_ino, 377 DT_DIR, 2, ".."); 378 } 379 if (r) 380 goto done; 381 if (cookies) 382 cookies[cookie_index] = saveoff; 383 saveoff++; 384 cookie_index++; 385 if (cookie_index == ncookies) 386 goto done; 387 } 388 389 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 390 if ((node->flags & DEVFS_HIDDEN) || 391 (node->flags & DEVFS_INVISIBLE)) { 392 continue; 393 } 394 395 /* 396 * If the node type is a valid devfs alias, then we make 397 * sure that the target isn't hidden. If it is, we don't 398 * show the link in the directory listing. 399 */ 400 if ((node->node_type == Nlink) && (node->link_target != NULL) && 401 (node->link_target->flags & DEVFS_HIDDEN)) 402 continue; 403 404 if (node->cookie < saveoff) 405 continue; 406 407 saveoff = node->cookie; 408 409 error2 = vop_write_dirent(&error, ap->a_uio, node->d_dir.d_ino, 410 node->d_dir.d_type, 411 node->d_dir.d_namlen, 412 node->d_dir.d_name); 413 414 if (error2) 415 break; 416 417 saveoff++; 418 419 if (cookies) 420 cookies[cookie_index] = node->cookie; 421 ++cookie_index; 422 if (cookie_index == ncookies) 423 break; 424 } 425 426 done: 427 lockmgr(&devfs_lock, LK_RELEASE); 428 vn_unlock(ap->a_vp); 429 430 ap->a_uio->uio_offset = saveoff; 431 if (error && cookie_index == 0) { 432 if (cookies) { 433 kfree(cookies, M_TEMP); 434 *ap->a_ncookies = 0; 435 *ap->a_cookies = NULL; 436 } 437 } else { 438 if (cookies) { 439 *ap->a_ncookies = cookie_index; 440 *ap->a_cookies = cookies; 441 } 442 } 443 return (error); 444 } 445 446 447 static int 448 devfs_vop_nresolve(struct vop_nresolve_args *ap) 449 { 450 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 451 struct devfs_node *node, *found = NULL; 452 struct namecache *ncp; 453 struct vnode *vp = NULL; 454 int error = 0; 455 int len; 456 int depth; 457 458 ncp = ap->a_nch->ncp; 459 len = ncp->nc_nlen; 460 461 if (!devfs_node_is_accessible(dnode)) 462 return ENOENT; 463 464 lockmgr(&devfs_lock, LK_EXCLUSIVE); 465 466 if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) { 467 error = ENOENT; 468 cache_setvp(ap->a_nch, NULL); 469 goto out; 470 } 471 472 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 473 if (len == node->d_dir.d_namlen) { 474 if (!memcmp(ncp->nc_name, node->d_dir.d_name, len)) { 475 found = node; 476 break; 477 } 478 } 479 } 480 481 if (found) { 482 depth = 0; 483 while ((found->node_type == Nlink) && (found->link_target)) { 484 if (depth >= 8) { 485 devfs_debug(DEVFS_DEBUG_SHOW, "Recursive link or depth >= 8"); 486 break; 487 } 488 489 found = found->link_target; 490 ++depth; 491 } 492 493 if (!(found->flags & DEVFS_HIDDEN)) 494 devfs_allocv(/*ap->a_dvp->v_mount, */ &vp, found); 495 } 496 497 if (vp == NULL) { 498 error = ENOENT; 499 cache_setvp(ap->a_nch, NULL); 500 goto out; 501 502 } 503 KKASSERT(vp); 504 vn_unlock(vp); 505 cache_setvp(ap->a_nch, vp); 506 vrele(vp); 507 out: 508 lockmgr(&devfs_lock, LK_RELEASE); 509 510 return error; 511 } 512 513 514 static int 515 devfs_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 516 { 517 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 518 519 *ap->a_vpp = NULL; 520 if (!devfs_node_is_accessible(dnode)) 521 return ENOENT; 522 523 lockmgr(&devfs_lock, LK_EXCLUSIVE); 524 if (dnode->parent != NULL) { 525 devfs_allocv(ap->a_vpp, dnode->parent); 526 vn_unlock(*ap->a_vpp); 527 } 528 lockmgr(&devfs_lock, LK_RELEASE); 529 530 return ((*ap->a_vpp == NULL) ? ENOENT : 0); 531 } 532 533 534 static int 535 devfs_vop_getattr(struct vop_getattr_args *ap) 536 { 537 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 538 struct vattr *vap = ap->a_vap; 539 struct partinfo pinfo; 540 int error = 0; 541 542 #if 0 543 if (!devfs_node_is_accessible(node)) 544 return ENOENT; 545 #endif 546 node_sync_dev_get(node); 547 548 lockmgr(&devfs_lock, LK_EXCLUSIVE); 549 550 /* start by zeroing out the attributes */ 551 VATTR_NULL(vap); 552 553 /* next do all the common fields */ 554 vap->va_type = ap->a_vp->v_type; 555 vap->va_mode = node->mode; 556 vap->va_fileid = DEVFS_NODE(ap->a_vp)->d_dir.d_ino ; 557 vap->va_flags = 0; 558 vap->va_blocksize = DEV_BSIZE; 559 vap->va_bytes = vap->va_size = 0; 560 561 vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 562 563 vap->va_atime = node->atime; 564 vap->va_mtime = node->mtime; 565 vap->va_ctime = node->ctime; 566 567 vap->va_nlink = 1; /* number of references to file */ 568 569 vap->va_uid = node->uid; 570 vap->va_gid = node->gid; 571 572 vap->va_rmajor = 0; 573 vap->va_rminor = 0; 574 575 if ((node->node_type == Ndev) && node->d_dev) { 576 reference_dev(node->d_dev); 577 vap->va_rminor = node->d_dev->si_uminor; 578 release_dev(node->d_dev); 579 } 580 581 /* For a softlink the va_size is the length of the softlink */ 582 if (node->symlink_name != 0) { 583 vap->va_bytes = vap->va_size = node->symlink_namelen; 584 } 585 586 /* 587 * For a disk-type device, va_size is the size of the underlying 588 * device, so that lseek() works properly. 589 */ 590 if ((node->d_dev) && (dev_dflags(node->d_dev) & D_DISK)) { 591 bzero(&pinfo, sizeof(pinfo)); 592 error = dev_dioctl(node->d_dev, DIOCGPART, (void *)&pinfo, 593 0, proc0.p_ucred, NULL, NULL); 594 if ((error == 0) && (pinfo.media_blksize != 0)) { 595 vap->va_size = pinfo.media_size; 596 } else { 597 vap->va_size = 0; 598 error = 0; 599 } 600 } 601 602 lockmgr(&devfs_lock, LK_RELEASE); 603 604 return (error); 605 } 606 607 608 static int 609 devfs_vop_setattr(struct vop_setattr_args *ap) 610 { 611 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 612 struct vattr *vap; 613 uid_t cur_uid; 614 gid_t cur_gid; 615 mode_t cur_mode; 616 int error = 0; 617 618 if (!devfs_node_is_accessible(node)) 619 return ENOENT; 620 node_sync_dev_get(node); 621 622 lockmgr(&devfs_lock, LK_EXCLUSIVE); 623 624 vap = ap->a_vap; 625 626 if ((vap->va_uid != (uid_t)VNOVAL) || (vap->va_gid != (gid_t)VNOVAL)) { 627 cur_uid = node->uid; 628 cur_gid = node->gid; 629 cur_mode = node->mode; 630 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 631 ap->a_cred, &cur_uid, &cur_gid, &cur_mode); 632 if (error) 633 goto out; 634 635 if (node->uid != cur_uid || node->gid != cur_gid) { 636 node->uid = cur_uid; 637 node->gid = cur_gid; 638 node->mode = cur_mode; 639 } 640 } 641 642 if (vap->va_mode != (mode_t)VNOVAL) { 643 cur_mode = node->mode; 644 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 645 node->uid, node->gid, &cur_mode); 646 if (error == 0 && node->mode != cur_mode) { 647 node->mode = cur_mode; 648 } 649 } 650 651 out: 652 node_sync_dev_set(node); 653 nanotime(&node->ctime); 654 lockmgr(&devfs_lock, LK_RELEASE); 655 656 return error; 657 } 658 659 660 static int 661 devfs_vop_readlink(struct vop_readlink_args *ap) 662 { 663 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 664 int ret; 665 666 if (!devfs_node_is_accessible(node)) 667 return ENOENT; 668 669 lockmgr(&devfs_lock, LK_EXCLUSIVE); 670 ret = uiomove(node->symlink_name, node->symlink_namelen, ap->a_uio); 671 lockmgr(&devfs_lock, LK_RELEASE); 672 673 return ret; 674 } 675 676 677 static int 678 devfs_vop_print(struct vop_print_args *ap) 679 { 680 return (0); 681 } 682 683 static int 684 devfs_vop_nmkdir(struct vop_nmkdir_args *ap) 685 { 686 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 687 struct devfs_node *node; 688 689 if (!devfs_node_is_accessible(dnode)) 690 return ENOENT; 691 692 if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 693 goto out; 694 695 lockmgr(&devfs_lock, LK_EXCLUSIVE); 696 devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Ndir, 697 ap->a_nch->ncp->nc_name, dnode, NULL); 698 699 if (*ap->a_vpp) { 700 node = DEVFS_NODE(*ap->a_vpp); 701 node->flags |= DEVFS_USER_CREATED; 702 cache_setunresolved(ap->a_nch); 703 cache_setvp(ap->a_nch, *ap->a_vpp); 704 } 705 lockmgr(&devfs_lock, LK_RELEASE); 706 out: 707 return ((*ap->a_vpp == NULL) ? ENOTDIR : 0); 708 } 709 710 static int 711 devfs_vop_nsymlink(struct vop_nsymlink_args *ap) 712 { 713 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 714 struct devfs_node *node; 715 size_t targetlen; 716 717 if (!devfs_node_is_accessible(dnode)) 718 return ENOENT; 719 720 ap->a_vap->va_type = VLNK; 721 722 if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 723 goto out; 724 725 lockmgr(&devfs_lock, LK_EXCLUSIVE); 726 devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Nlink, 727 ap->a_nch->ncp->nc_name, dnode, NULL); 728 729 targetlen = strlen(ap->a_target); 730 if (*ap->a_vpp) { 731 node = DEVFS_NODE(*ap->a_vpp); 732 node->flags |= DEVFS_USER_CREATED; 733 node->symlink_namelen = targetlen; 734 node->symlink_name = kmalloc(targetlen + 1, M_DEVFS, M_WAITOK); 735 memcpy(node->symlink_name, ap->a_target, targetlen); 736 node->symlink_name[targetlen] = '\0'; 737 cache_setunresolved(ap->a_nch); 738 cache_setvp(ap->a_nch, *ap->a_vpp); 739 } 740 lockmgr(&devfs_lock, LK_RELEASE); 741 out: 742 return ((*ap->a_vpp == NULL) ? ENOTDIR : 0); 743 } 744 745 static int 746 devfs_vop_nrmdir(struct vop_nrmdir_args *ap) 747 { 748 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 749 struct devfs_node *node; 750 struct namecache *ncp; 751 int error = ENOENT; 752 753 ncp = ap->a_nch->ncp; 754 755 if (!devfs_node_is_accessible(dnode)) 756 return ENOENT; 757 758 lockmgr(&devfs_lock, LK_EXCLUSIVE); 759 760 if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 761 goto out; 762 763 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 764 if (ncp->nc_nlen != node->d_dir.d_namlen) 765 continue; 766 if (memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen)) 767 continue; 768 769 /* 770 * only allow removal of user created dirs 771 */ 772 if ((node->flags & DEVFS_USER_CREATED) == 0) { 773 error = EPERM; 774 goto out; 775 } else if (node->node_type != Ndir) { 776 error = ENOTDIR; 777 goto out; 778 } else if (node->nchildren > 2) { 779 error = ENOTEMPTY; 780 goto out; 781 } else { 782 if (node->v_node) 783 cache_inval_vp(node->v_node, CINV_DESTROY); 784 devfs_unlinkp(node); 785 error = 0; 786 break; 787 } 788 } 789 790 cache_unlink(ap->a_nch); 791 out: 792 lockmgr(&devfs_lock, LK_RELEASE); 793 return error; 794 } 795 796 static int 797 devfs_vop_nremove(struct vop_nremove_args *ap) 798 { 799 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 800 struct devfs_node *node; 801 struct namecache *ncp; 802 int error = ENOENT; 803 804 ncp = ap->a_nch->ncp; 805 806 if (!devfs_node_is_accessible(dnode)) 807 return ENOENT; 808 809 lockmgr(&devfs_lock, LK_EXCLUSIVE); 810 811 if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 812 goto out; 813 814 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 815 if (ncp->nc_nlen != node->d_dir.d_namlen) 816 continue; 817 if (memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen)) 818 continue; 819 820 /* 821 * only allow removal of user created stuff (e.g. symlinks) 822 */ 823 if ((node->flags & DEVFS_USER_CREATED) == 0) { 824 error = EPERM; 825 goto out; 826 } else if (node->node_type == Ndir) { 827 error = EISDIR; 828 goto out; 829 } else { 830 if (node->v_node) 831 cache_inval_vp(node->v_node, CINV_DESTROY); 832 devfs_unlinkp(node); 833 error = 0; 834 break; 835 } 836 } 837 838 cache_unlink(ap->a_nch); 839 out: 840 lockmgr(&devfs_lock, LK_RELEASE); 841 return error; 842 } 843 844 845 static int 846 devfs_spec_open(struct vop_open_args *ap) 847 { 848 struct vnode *vp = ap->a_vp; 849 struct vnode *orig_vp = NULL; 850 struct devfs_node *node = DEVFS_NODE(vp); 851 struct devfs_node *newnode; 852 cdev_t dev, ndev = NULL; 853 int error = 0; 854 855 if (node) { 856 if (node->d_dev == NULL) 857 return ENXIO; 858 if (!devfs_node_is_accessible(node)) 859 return ENOENT; 860 } 861 862 if ((dev = vp->v_rdev) == NULL) 863 return ENXIO; 864 865 vn_lock(vp, LK_UPGRADE | LK_RETRY); 866 867 if (node && ap->a_fp) { 868 int exists; 869 870 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.1-\n"); 871 lockmgr(&devfs_lock, LK_EXCLUSIVE); 872 873 ndev = devfs_clone(dev, node->d_dir.d_name, 874 node->d_dir.d_namlen, 875 ap->a_mode, ap->a_cred); 876 if (ndev != NULL) { 877 newnode = devfs_create_device_node( 878 DEVFS_MNTDATA(vp->v_mount)->root_node, 879 ndev, &exists, NULL, NULL); 880 /* XXX: possibly destroy device if this happens */ 881 882 if (newnode != NULL) { 883 dev = ndev; 884 if (exists == 0) 885 devfs_link_dev(dev); 886 887 devfs_debug(DEVFS_DEBUG_DEBUG, 888 "parent here is: %s, node is: |%s|\n", 889 ((node->parent->node_type == Nroot) ? 890 "ROOT!" : node->parent->d_dir.d_name), 891 newnode->d_dir.d_name); 892 devfs_debug(DEVFS_DEBUG_DEBUG, 893 "test: %s\n", 894 ((struct devfs_node *)(TAILQ_LAST(DEVFS_DENODE_HEAD(node->parent), devfs_node_head)))->d_dir.d_name); 895 896 /* 897 * orig_vp is set to the original vp if we 898 * cloned. 899 */ 900 /* node->flags |= DEVFS_CLONED; */ 901 devfs_allocv(&vp, newnode); 902 orig_vp = ap->a_vp; 903 ap->a_vp = vp; 904 } 905 } 906 lockmgr(&devfs_lock, LK_RELEASE); 907 /* 908 * Synchronize devfs here to make sure that, if the cloned 909 * device creates other device nodes in addition to the 910 * cloned one, all of them are created by the time we return 911 * from opening the cloned one. 912 */ 913 if (ndev) 914 devfs_config(); 915 } 916 917 devfs_debug(DEVFS_DEBUG_DEBUG, 918 "devfs_spec_open() called on %s! \n", 919 dev->si_name); 920 921 /* 922 * Make this field valid before any I/O in ->d_open 923 */ 924 if (!dev->si_iosize_max) 925 /* XXX: old DFLTPHYS == 64KB dependency */ 926 dev->si_iosize_max = min(MAXPHYS,64*1024); 927 928 if (dev_dflags(dev) & D_TTY) 929 vsetflags(vp, VISTTY); 930 931 /* 932 * Open underlying device 933 */ 934 vn_unlock(vp); 935 error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred, ap->a_fp); 936 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 937 938 /* 939 * Clean up any cloned vp if we error out. 940 */ 941 if (error) { 942 if (orig_vp) { 943 vput(vp); 944 ap->a_vp = orig_vp; 945 /* orig_vp = NULL; */ 946 } 947 return error; 948 } 949 950 /* 951 * This checks if the disk device is going to be opened for writing. 952 * It will be only allowed in the cases where securelevel permits it 953 * and it's not mounted R/W. 954 */ 955 if ((dev_dflags(dev) & D_DISK) && (ap->a_mode & FWRITE) && 956 (ap->a_cred != FSCRED)) { 957 958 /* Very secure mode. No open for writing allowed */ 959 if (securelevel >= 2) 960 return EPERM; 961 962 /* 963 * If it is mounted R/W, do not allow to open for writing. 964 * In the case it's mounted read-only but securelevel 965 * is >= 1, then do not allow opening for writing either. 966 */ 967 if (vfs_mountedon(vp)) { 968 if (!(dev->si_mountpoint->mnt_flag & MNT_RDONLY)) 969 return EBUSY; 970 else if (securelevel >= 1) 971 return EPERM; 972 } 973 } 974 975 if (dev_dflags(dev) & D_TTY) { 976 if (dev->si_tty) { 977 struct tty *tp; 978 tp = dev->si_tty; 979 if (!tp->t_stop) { 980 devfs_debug(DEVFS_DEBUG_DEBUG, 981 "devfs: no t_stop\n"); 982 tp->t_stop = nottystop; 983 } 984 } 985 } 986 987 988 if (vn_isdisk(vp, NULL)) { 989 if (!dev->si_bsize_phys) 990 dev->si_bsize_phys = DEV_BSIZE; 991 vinitvmio(vp, IDX_TO_OFF(INT_MAX), PAGE_SIZE, -1); 992 } 993 994 vop_stdopen(ap); 995 #if 0 996 if (node) 997 nanotime(&node->atime); 998 #endif 999 1000 /* 1001 * If we replaced the vp the vop_stdopen() call will have loaded 1002 * it into fp->f_data and vref()d the vp, giving us two refs. So 1003 * instead of just unlocking it here we have to vput() it. 1004 */ 1005 if (orig_vp) 1006 vput(vp); 1007 1008 /* Ugly pty magic, to make pty devices appear once they are opened */ 1009 if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY) 1010 node->flags &= ~DEVFS_INVISIBLE; 1011 1012 if (ap->a_fp) { 1013 KKASSERT(ap->a_fp->f_type == DTYPE_VNODE); 1014 KKASSERT((ap->a_fp->f_flag & FMASK) == (ap->a_mode & FMASK)); 1015 ap->a_fp->f_ops = &devfs_dev_fileops; 1016 KKASSERT(ap->a_fp->f_data == (void *)vp); 1017 } 1018 1019 return 0; 1020 } 1021 1022 static int 1023 devfs_spec_close(struct vop_close_args *ap) 1024 { 1025 struct devfs_node *node; 1026 struct proc *p = curproc; 1027 struct vnode *vp = ap->a_vp; 1028 cdev_t dev = vp->v_rdev; 1029 int error = 0; 1030 int needrelock; 1031 int opencount; 1032 1033 /* 1034 * We do special tests on the opencount so unfortunately we need 1035 * an exclusive lock. 1036 */ 1037 vn_lock(vp, LK_UPGRADE | LK_RETRY); 1038 1039 if (dev) 1040 devfs_debug(DEVFS_DEBUG_DEBUG, 1041 "devfs_spec_close() called on %s! \n", 1042 dev->si_name); 1043 else 1044 devfs_debug(DEVFS_DEBUG_DEBUG, 1045 "devfs_spec_close() called, null vode!\n"); 1046 1047 /* 1048 * A couple of hacks for devices and tty devices. The 1049 * vnode ref count cannot be used to figure out the 1050 * last close, but we can use v_opencount now that 1051 * revoke works properly. 1052 * 1053 * Detect the last close on a controlling terminal and clear 1054 * the session (half-close). 1055 * 1056 * XXX opencount is not SMP safe. The vnode is locked but there 1057 * may be multiple vnodes referencing the same device. 1058 */ 1059 if (dev) { 1060 /* 1061 * NOTE: Try to avoid global tokens when testing opencount 1062 * XXX hack, fixme. needs a struct lock and opencount in 1063 * struct cdev itself. 1064 */ 1065 reference_dev(dev); 1066 opencount = vp->v_opencount; 1067 if (opencount <= 1) 1068 opencount = count_dev(dev); /* XXX NOT SMP SAFE */ 1069 } else { 1070 opencount = 0; 1071 } 1072 1073 if (p && vp->v_opencount <= 1 && vp == p->p_session->s_ttyvp) { 1074 p->p_session->s_ttyvp = NULL; 1075 vrele(vp); 1076 } 1077 1078 /* 1079 * Vnodes can be opened and closed multiple times. Do not really 1080 * close the device unless (1) it is being closed forcibly, 1081 * (2) the device wants to track closes, or (3) this is the last 1082 * vnode doing its last close on the device. 1083 * 1084 * XXX the VXLOCK (force close) case can leave vnodes referencing 1085 * a closed device. This might not occur now that our revoke is 1086 * fixed. 1087 */ 1088 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -1- \n"); 1089 if (dev && ((vp->v_flag & VRECLAIMED) || 1090 (dev_dflags(dev) & D_TRACKCLOSE) || 1091 (opencount == 1))) { 1092 /* 1093 * Ugly pty magic, to make pty devices disappear again once 1094 * they are closed. 1095 */ 1096 node = DEVFS_NODE(ap->a_vp); 1097 if (node && (node->flags & DEVFS_PTY)) 1098 node->flags |= DEVFS_INVISIBLE; 1099 1100 /* 1101 * Unlock around dev_dclose(), unless the vnode is 1102 * undergoing a vgone/reclaim (during umount). 1103 */ 1104 needrelock = 0; 1105 if ((vp->v_flag & VRECLAIMED) == 0 && vn_islocked(vp)) { 1106 needrelock = 1; 1107 vn_unlock(vp); 1108 } 1109 1110 /* 1111 * WARNING! If the device destroys itself the devfs node 1112 * can disappear here. 1113 * 1114 * WARNING! vn_lock() will fail if the vp is in a VRECLAIM, 1115 * which can occur during umount. 1116 */ 1117 error = dev_dclose(dev, ap->a_fflag, S_IFCHR, ap->a_fp); 1118 /* node is now stale */ 1119 1120 if (needrelock) { 1121 if (vn_lock(vp, LK_EXCLUSIVE | 1122 LK_RETRY | 1123 LK_FAILRECLAIM) != 0) { 1124 panic("devfs_spec_close: vnode %p " 1125 "unexpectedly could not be relocked", 1126 vp); 1127 } 1128 } 1129 } else { 1130 error = 0; 1131 } 1132 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -2- \n"); 1133 1134 /* 1135 * Track the actual opens and closes on the vnode. The last close 1136 * disassociates the rdev. If the rdev is already disassociated or 1137 * the opencount is already 0, the vnode might have been revoked 1138 * and no further opencount tracking occurs. 1139 */ 1140 if (dev) 1141 release_dev(dev); 1142 if (vp->v_opencount > 0) 1143 vop_stdclose(ap); 1144 return(error); 1145 1146 } 1147 1148 1149 static int 1150 devfs_fo_close(struct file *fp) 1151 { 1152 struct vnode *vp = (struct vnode *)fp->f_data; 1153 int error; 1154 1155 fp->f_ops = &badfileops; 1156 error = vn_close(vp, fp->f_flag, fp); 1157 devfs_clear_cdevpriv(fp); 1158 1159 return (error); 1160 } 1161 1162 1163 /* 1164 * Device-optimized file table vnode read routine. 1165 * 1166 * This bypasses the VOP table and talks directly to the device. Most 1167 * filesystems just route to specfs and can make this optimization. 1168 * 1169 * MPALMOSTSAFE - acquires mplock 1170 */ 1171 static int 1172 devfs_fo_read(struct file *fp, struct uio *uio, 1173 struct ucred *cred, int flags) 1174 { 1175 struct devfs_node *node; 1176 struct vnode *vp; 1177 int ioflag; 1178 int error; 1179 cdev_t dev; 1180 1181 KASSERT(uio->uio_td == curthread, 1182 ("uio_td %p is not td %p", uio->uio_td, curthread)); 1183 1184 if (uio->uio_resid == 0) 1185 return 0; 1186 1187 vp = (struct vnode *)fp->f_data; 1188 if (vp == NULL || vp->v_type == VBAD) 1189 return EBADF; 1190 1191 node = DEVFS_NODE(vp); 1192 1193 if ((dev = vp->v_rdev) == NULL) 1194 return EBADF; 1195 1196 reference_dev(dev); 1197 1198 if ((flags & O_FOFFSET) == 0) 1199 uio->uio_offset = fp->f_offset; 1200 1201 ioflag = 0; 1202 if (flags & O_FBLOCKING) { 1203 /* ioflag &= ~IO_NDELAY; */ 1204 } else if (flags & O_FNONBLOCKING) { 1205 ioflag |= IO_NDELAY; 1206 } else if (fp->f_flag & FNONBLOCK) { 1207 ioflag |= IO_NDELAY; 1208 } 1209 if (flags & O_FBUFFERED) { 1210 /* ioflag &= ~IO_DIRECT; */ 1211 } else if (flags & O_FUNBUFFERED) { 1212 ioflag |= IO_DIRECT; 1213 } else if (fp->f_flag & O_DIRECT) { 1214 ioflag |= IO_DIRECT; 1215 } 1216 ioflag |= sequential_heuristic(uio, fp); 1217 1218 error = dev_dread(dev, uio, ioflag, fp); 1219 1220 release_dev(dev); 1221 if (node) 1222 nanotime(&node->atime); 1223 if ((flags & O_FOFFSET) == 0) 1224 fp->f_offset = uio->uio_offset; 1225 fp->f_nextoff = uio->uio_offset; 1226 1227 return (error); 1228 } 1229 1230 1231 static int 1232 devfs_fo_write(struct file *fp, struct uio *uio, 1233 struct ucred *cred, int flags) 1234 { 1235 struct devfs_node *node; 1236 struct vnode *vp; 1237 int ioflag; 1238 int error; 1239 cdev_t dev; 1240 1241 KASSERT(uio->uio_td == curthread, 1242 ("uio_td %p is not p %p", uio->uio_td, curthread)); 1243 1244 vp = (struct vnode *)fp->f_data; 1245 if (vp == NULL || vp->v_type == VBAD) 1246 return EBADF; 1247 1248 node = DEVFS_NODE(vp); 1249 1250 if (vp->v_type == VREG) 1251 bwillwrite(uio->uio_resid); 1252 1253 vp = (struct vnode *)fp->f_data; 1254 1255 if ((dev = vp->v_rdev) == NULL) 1256 return EBADF; 1257 1258 reference_dev(dev); 1259 1260 if ((flags & O_FOFFSET) == 0) 1261 uio->uio_offset = fp->f_offset; 1262 1263 ioflag = IO_UNIT; 1264 if (vp->v_type == VREG && 1265 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) { 1266 ioflag |= IO_APPEND; 1267 } 1268 1269 if (flags & O_FBLOCKING) { 1270 /* ioflag &= ~IO_NDELAY; */ 1271 } else if (flags & O_FNONBLOCKING) { 1272 ioflag |= IO_NDELAY; 1273 } else if (fp->f_flag & FNONBLOCK) { 1274 ioflag |= IO_NDELAY; 1275 } 1276 if (flags & O_FBUFFERED) { 1277 /* ioflag &= ~IO_DIRECT; */ 1278 } else if (flags & O_FUNBUFFERED) { 1279 ioflag |= IO_DIRECT; 1280 } else if (fp->f_flag & O_DIRECT) { 1281 ioflag |= IO_DIRECT; 1282 } 1283 if (flags & O_FASYNCWRITE) { 1284 /* ioflag &= ~IO_SYNC; */ 1285 } else if (flags & O_FSYNCWRITE) { 1286 ioflag |= IO_SYNC; 1287 } else if (fp->f_flag & O_FSYNC) { 1288 ioflag |= IO_SYNC; 1289 } 1290 1291 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)) 1292 ioflag |= IO_SYNC; 1293 ioflag |= sequential_heuristic(uio, fp); 1294 1295 error = dev_dwrite(dev, uio, ioflag, fp); 1296 1297 release_dev(dev); 1298 if (node) { 1299 nanotime(&node->atime); 1300 nanotime(&node->mtime); 1301 } 1302 1303 if ((flags & O_FOFFSET) == 0) 1304 fp->f_offset = uio->uio_offset; 1305 fp->f_nextoff = uio->uio_offset; 1306 1307 return (error); 1308 } 1309 1310 1311 static int 1312 devfs_fo_stat(struct file *fp, struct stat *sb, struct ucred *cred) 1313 { 1314 struct vnode *vp; 1315 struct vattr vattr; 1316 struct vattr *vap; 1317 u_short mode; 1318 cdev_t dev; 1319 int error; 1320 1321 vp = (struct vnode *)fp->f_data; 1322 if (vp == NULL || vp->v_type == VBAD) 1323 return EBADF; 1324 1325 error = vn_stat(vp, sb, cred); 1326 if (error) 1327 return (error); 1328 1329 vap = &vattr; 1330 error = VOP_GETATTR(vp, vap); 1331 if (error) 1332 return (error); 1333 1334 /* 1335 * Zero the spare stat fields 1336 */ 1337 sb->st_lspare = 0; 1338 sb->st_qspare1 = 0; 1339 sb->st_qspare2 = 0; 1340 1341 /* 1342 * Copy from vattr table ... or not in case it's a cloned device 1343 */ 1344 if (vap->va_fsid != VNOVAL) 1345 sb->st_dev = vap->va_fsid; 1346 else 1347 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 1348 1349 sb->st_ino = vap->va_fileid; 1350 1351 mode = vap->va_mode; 1352 mode |= S_IFCHR; 1353 sb->st_mode = mode; 1354 1355 if (vap->va_nlink > (nlink_t)-1) 1356 sb->st_nlink = (nlink_t)-1; 1357 else 1358 sb->st_nlink = vap->va_nlink; 1359 1360 sb->st_uid = vap->va_uid; 1361 sb->st_gid = vap->va_gid; 1362 sb->st_rdev = dev2udev(DEVFS_NODE(vp)->d_dev); 1363 sb->st_size = vap->va_bytes; 1364 sb->st_atimespec = vap->va_atime; 1365 sb->st_mtimespec = vap->va_mtime; 1366 sb->st_ctimespec = vap->va_ctime; 1367 1368 /* 1369 * A VCHR and VBLK device may track the last access and last modified 1370 * time independantly of the filesystem. This is particularly true 1371 * because device read and write calls may bypass the filesystem. 1372 */ 1373 if (vp->v_type == VCHR || vp->v_type == VBLK) { 1374 dev = vp->v_rdev; 1375 if (dev != NULL) { 1376 if (dev->si_lastread) { 1377 sb->st_atimespec.tv_sec = time_second + 1378 (time_uptime - 1379 dev->si_lastread); 1380 sb->st_atimespec.tv_nsec = 0; 1381 } 1382 if (dev->si_lastwrite) { 1383 sb->st_atimespec.tv_sec = time_second + 1384 (time_uptime - 1385 dev->si_lastwrite); 1386 sb->st_atimespec.tv_nsec = 0; 1387 } 1388 } 1389 } 1390 1391 /* 1392 * According to www.opengroup.org, the meaning of st_blksize is 1393 * "a filesystem-specific preferred I/O block size for this 1394 * object. In some filesystem types, this may vary from file 1395 * to file" 1396 * Default to PAGE_SIZE after much discussion. 1397 */ 1398 1399 sb->st_blksize = PAGE_SIZE; 1400 1401 sb->st_flags = vap->va_flags; 1402 1403 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0); 1404 if (error) 1405 sb->st_gen = 0; 1406 else 1407 sb->st_gen = (u_int32_t)vap->va_gen; 1408 1409 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 1410 1411 return (0); 1412 } 1413 1414 1415 static int 1416 devfs_fo_kqfilter(struct file *fp, struct knote *kn) 1417 { 1418 struct vnode *vp; 1419 int error; 1420 cdev_t dev; 1421 1422 vp = (struct vnode *)fp->f_data; 1423 if (vp == NULL || vp->v_type == VBAD) { 1424 error = EBADF; 1425 goto done; 1426 } 1427 if ((dev = vp->v_rdev) == NULL) { 1428 error = EBADF; 1429 goto done; 1430 } 1431 reference_dev(dev); 1432 1433 error = dev_dkqfilter(dev, kn, fp); 1434 1435 release_dev(dev); 1436 1437 done: 1438 return (error); 1439 } 1440 1441 /* 1442 * MPALMOSTSAFE - acquires mplock 1443 */ 1444 static int 1445 devfs_fo_ioctl(struct file *fp, u_long com, caddr_t data, 1446 struct ucred *ucred, struct sysmsg *msg) 1447 { 1448 #if 0 1449 struct devfs_node *node; 1450 #endif 1451 struct vnode *vp; 1452 struct vnode *ovp; 1453 cdev_t dev; 1454 int error; 1455 struct fiodname_args *name_args; 1456 size_t namlen; 1457 const char *name; 1458 1459 vp = ((struct vnode *)fp->f_data); 1460 1461 if ((dev = vp->v_rdev) == NULL) 1462 return EBADF; /* device was revoked */ 1463 1464 reference_dev(dev); 1465 1466 #if 0 1467 node = DEVFS_NODE(vp); 1468 #endif 1469 1470 devfs_debug(DEVFS_DEBUG_DEBUG, 1471 "devfs_fo_ioctl() called! for dev %s\n", 1472 dev->si_name); 1473 1474 if (com == FIODTYPE) { 1475 *(int *)data = dev_dflags(dev) & D_TYPEMASK; 1476 error = 0; 1477 goto out; 1478 } else if (com == FIODNAME) { 1479 name_args = (struct fiodname_args *)data; 1480 name = dev->si_name; 1481 namlen = strlen(name) + 1; 1482 1483 devfs_debug(DEVFS_DEBUG_DEBUG, 1484 "ioctl, got: FIODNAME for %s\n", name); 1485 1486 if (namlen <= name_args->len) 1487 error = copyout(dev->si_name, name_args->name, namlen); 1488 else 1489 error = EINVAL; 1490 1491 devfs_debug(DEVFS_DEBUG_DEBUG, 1492 "ioctl stuff: error: %d\n", error); 1493 goto out; 1494 } 1495 1496 error = dev_dioctl(dev, com, data, fp->f_flag, ucred, msg, fp); 1497 1498 #if 0 1499 if (node) { 1500 nanotime(&node->atime); 1501 nanotime(&node->mtime); 1502 } 1503 #endif 1504 if (com == TIOCSCTTY) { 1505 devfs_debug(DEVFS_DEBUG_DEBUG, 1506 "devfs_fo_ioctl: got TIOCSCTTY on %s\n", 1507 dev->si_name); 1508 } 1509 if (error == 0 && com == TIOCSCTTY) { 1510 struct proc *p = curthread->td_proc; 1511 struct session *sess; 1512 1513 devfs_debug(DEVFS_DEBUG_DEBUG, 1514 "devfs_fo_ioctl: dealing with TIOCSCTTY on %s\n", 1515 dev->si_name); 1516 if (p == NULL) { 1517 error = ENOTTY; 1518 goto out; 1519 } 1520 sess = p->p_session; 1521 1522 /* 1523 * Do nothing if reassigning same control tty 1524 */ 1525 if (sess->s_ttyvp == vp) { 1526 error = 0; 1527 goto out; 1528 } 1529 1530 /* 1531 * Get rid of reference to old control tty 1532 */ 1533 ovp = sess->s_ttyvp; 1534 vref(vp); 1535 sess->s_ttyvp = vp; 1536 if (ovp) 1537 vrele(ovp); 1538 } 1539 1540 out: 1541 release_dev(dev); 1542 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_fo_ioctl() finished! \n"); 1543 return (error); 1544 } 1545 1546 1547 static int 1548 devfs_spec_fsync(struct vop_fsync_args *ap) 1549 { 1550 struct vnode *vp = ap->a_vp; 1551 int error; 1552 1553 if (!vn_isdisk(vp, NULL)) 1554 return (0); 1555 1556 /* 1557 * Flush all dirty buffers associated with a block device. 1558 */ 1559 error = vfsync(vp, ap->a_waitfor, 10000, NULL, NULL); 1560 return (error); 1561 } 1562 1563 static int 1564 devfs_spec_read(struct vop_read_args *ap) 1565 { 1566 struct devfs_node *node; 1567 struct vnode *vp; 1568 struct uio *uio; 1569 cdev_t dev; 1570 int error; 1571 1572 vp = ap->a_vp; 1573 dev = vp->v_rdev; 1574 uio = ap->a_uio; 1575 node = DEVFS_NODE(vp); 1576 1577 if (dev == NULL) /* device was revoked */ 1578 return (EBADF); 1579 if (uio->uio_resid == 0) 1580 return (0); 1581 1582 vn_unlock(vp); 1583 error = dev_dread(dev, uio, ap->a_ioflag, NULL); 1584 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1585 1586 if (node) 1587 nanotime(&node->atime); 1588 1589 return (error); 1590 } 1591 1592 /* 1593 * Vnode op for write 1594 * 1595 * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag, 1596 * struct ucred *a_cred) 1597 */ 1598 static int 1599 devfs_spec_write(struct vop_write_args *ap) 1600 { 1601 struct devfs_node *node; 1602 struct vnode *vp; 1603 struct uio *uio; 1604 cdev_t dev; 1605 int error; 1606 1607 vp = ap->a_vp; 1608 dev = vp->v_rdev; 1609 uio = ap->a_uio; 1610 node = DEVFS_NODE(vp); 1611 1612 KKASSERT(uio->uio_segflg != UIO_NOCOPY); 1613 1614 if (dev == NULL) /* device was revoked */ 1615 return (EBADF); 1616 1617 vn_unlock(vp); 1618 error = dev_dwrite(dev, uio, ap->a_ioflag, NULL); 1619 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1620 1621 if (node) { 1622 nanotime(&node->atime); 1623 nanotime(&node->mtime); 1624 } 1625 1626 return (error); 1627 } 1628 1629 /* 1630 * Device ioctl operation. 1631 * 1632 * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data, 1633 * int a_fflag, struct ucred *a_cred, struct sysmsg *msg) 1634 */ 1635 static int 1636 devfs_spec_ioctl(struct vop_ioctl_args *ap) 1637 { 1638 struct vnode *vp = ap->a_vp; 1639 #if 0 1640 struct devfs_node *node; 1641 #endif 1642 cdev_t dev; 1643 1644 if ((dev = vp->v_rdev) == NULL) 1645 return (EBADF); /* device was revoked */ 1646 #if 0 1647 node = DEVFS_NODE(vp); 1648 1649 if (node) { 1650 nanotime(&node->atime); 1651 nanotime(&node->mtime); 1652 } 1653 #endif 1654 1655 return (dev_dioctl(dev, ap->a_command, ap->a_data, ap->a_fflag, 1656 ap->a_cred, ap->a_sysmsg, NULL)); 1657 } 1658 1659 /* 1660 * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn) 1661 */ 1662 /* ARGSUSED */ 1663 static int 1664 devfs_spec_kqfilter(struct vop_kqfilter_args *ap) 1665 { 1666 struct vnode *vp = ap->a_vp; 1667 #if 0 1668 struct devfs_node *node; 1669 #endif 1670 cdev_t dev; 1671 1672 if ((dev = vp->v_rdev) == NULL) 1673 return (EBADF); /* device was revoked (EBADF) */ 1674 #if 0 1675 node = DEVFS_NODE(vp); 1676 1677 if (node) 1678 nanotime(&node->atime); 1679 #endif 1680 1681 return (dev_dkqfilter(dev, ap->a_kn, NULL)); 1682 } 1683 1684 /* 1685 * Convert a vnode strategy call into a device strategy call. Vnode strategy 1686 * calls are not limited to device DMA limits so we have to deal with the 1687 * case. 1688 * 1689 * spec_strategy(struct vnode *a_vp, struct bio *a_bio) 1690 */ 1691 static int 1692 devfs_spec_strategy(struct vop_strategy_args *ap) 1693 { 1694 struct bio *bio = ap->a_bio; 1695 struct buf *bp = bio->bio_buf; 1696 struct buf *nbp; 1697 struct vnode *vp; 1698 struct mount *mp; 1699 int chunksize; 1700 int maxiosize; 1701 1702 if (bp->b_cmd != BUF_CMD_READ && LIST_FIRST(&bp->b_dep) != NULL) 1703 buf_start(bp); 1704 1705 /* 1706 * Collect statistics on synchronous and asynchronous read 1707 * and write counts for disks that have associated filesystems. 1708 */ 1709 vp = ap->a_vp; 1710 KKASSERT(vp->v_rdev != NULL); /* XXX */ 1711 if (vn_isdisk(vp, NULL) && (mp = vp->v_rdev->si_mountpoint) != NULL) { 1712 if (bp->b_cmd == BUF_CMD_READ) { 1713 if (bp->b_flags & BIO_SYNC) 1714 mp->mnt_stat.f_syncreads++; 1715 else 1716 mp->mnt_stat.f_asyncreads++; 1717 } else { 1718 if (bp->b_flags & BIO_SYNC) 1719 mp->mnt_stat.f_syncwrites++; 1720 else 1721 mp->mnt_stat.f_asyncwrites++; 1722 } 1723 } 1724 1725 /* 1726 * Device iosize limitations only apply to read and write. Shortcut 1727 * the I/O if it fits. 1728 */ 1729 if ((maxiosize = vp->v_rdev->si_iosize_max) == 0) { 1730 devfs_debug(DEVFS_DEBUG_DEBUG, 1731 "%s: si_iosize_max not set!\n", 1732 dev_dname(vp->v_rdev)); 1733 maxiosize = MAXPHYS; 1734 } 1735 #if SPEC_CHAIN_DEBUG & 2 1736 maxiosize = 4096; 1737 #endif 1738 if (bp->b_bcount <= maxiosize || 1739 (bp->b_cmd != BUF_CMD_READ && bp->b_cmd != BUF_CMD_WRITE)) { 1740 dev_dstrategy_chain(vp->v_rdev, bio); 1741 return (0); 1742 } 1743 1744 /* 1745 * Clone the buffer and set up an I/O chain to chunk up the I/O. 1746 */ 1747 nbp = kmalloc(sizeof(*bp), M_DEVBUF, M_INTWAIT|M_ZERO); 1748 initbufbio(nbp); 1749 buf_dep_init(nbp); 1750 BUF_LOCK(nbp, LK_EXCLUSIVE); 1751 BUF_KERNPROC(nbp); 1752 nbp->b_vp = vp; 1753 nbp->b_flags = B_PAGING | (bp->b_flags & B_BNOCLIP); 1754 nbp->b_data = bp->b_data; 1755 nbp->b_bio1.bio_done = devfs_spec_strategy_done; 1756 nbp->b_bio1.bio_offset = bio->bio_offset; 1757 nbp->b_bio1.bio_caller_info1.ptr = bio; 1758 1759 /* 1760 * Start the first transfer 1761 */ 1762 if (vn_isdisk(vp, NULL)) 1763 chunksize = vp->v_rdev->si_bsize_phys; 1764 else 1765 chunksize = DEV_BSIZE; 1766 chunksize = maxiosize / chunksize * chunksize; 1767 #if SPEC_CHAIN_DEBUG & 1 1768 devfs_debug(DEVFS_DEBUG_DEBUG, 1769 "spec_strategy chained I/O chunksize=%d\n", 1770 chunksize); 1771 #endif 1772 nbp->b_cmd = bp->b_cmd; 1773 nbp->b_bcount = chunksize; 1774 nbp->b_bufsize = chunksize; /* used to detect a short I/O */ 1775 nbp->b_bio1.bio_caller_info2.index = chunksize; 1776 1777 #if SPEC_CHAIN_DEBUG & 1 1778 devfs_debug(DEVFS_DEBUG_DEBUG, 1779 "spec_strategy: chain %p offset %d/%d bcount %d\n", 1780 bp, 0, bp->b_bcount, nbp->b_bcount); 1781 #endif 1782 1783 dev_dstrategy(vp->v_rdev, &nbp->b_bio1); 1784 1785 if (DEVFS_NODE(vp)) { 1786 nanotime(&DEVFS_NODE(vp)->atime); 1787 nanotime(&DEVFS_NODE(vp)->mtime); 1788 } 1789 1790 return (0); 1791 } 1792 1793 /* 1794 * Chunked up transfer completion routine - chain transfers until done 1795 * 1796 * NOTE: MPSAFE callback. 1797 */ 1798 static 1799 void 1800 devfs_spec_strategy_done(struct bio *nbio) 1801 { 1802 struct buf *nbp = nbio->bio_buf; 1803 struct bio *bio = nbio->bio_caller_info1.ptr; /* original bio */ 1804 struct buf *bp = bio->bio_buf; /* original bp */ 1805 int chunksize = nbio->bio_caller_info2.index; /* chunking */ 1806 int boffset = nbp->b_data - bp->b_data; 1807 1808 if (nbp->b_flags & B_ERROR) { 1809 /* 1810 * An error terminates the chain, propogate the error back 1811 * to the original bp 1812 */ 1813 bp->b_flags |= B_ERROR; 1814 bp->b_error = nbp->b_error; 1815 bp->b_resid = bp->b_bcount - boffset + 1816 (nbp->b_bcount - nbp->b_resid); 1817 #if SPEC_CHAIN_DEBUG & 1 1818 devfs_debug(DEVFS_DEBUG_DEBUG, 1819 "spec_strategy: chain %p error %d bcount %d/%d\n", 1820 bp, bp->b_error, bp->b_bcount, 1821 bp->b_bcount - bp->b_resid); 1822 #endif 1823 } else if (nbp->b_resid) { 1824 /* 1825 * A short read or write terminates the chain 1826 */ 1827 bp->b_error = nbp->b_error; 1828 bp->b_resid = bp->b_bcount - boffset + 1829 (nbp->b_bcount - nbp->b_resid); 1830 #if SPEC_CHAIN_DEBUG & 1 1831 devfs_debug(DEVFS_DEBUG_DEBUG, 1832 "spec_strategy: chain %p short read(1) " 1833 "bcount %d/%d\n", 1834 bp, bp->b_bcount - bp->b_resid, bp->b_bcount); 1835 #endif 1836 } else if (nbp->b_bcount != nbp->b_bufsize) { 1837 /* 1838 * A short read or write can also occur by truncating b_bcount 1839 */ 1840 #if SPEC_CHAIN_DEBUG & 1 1841 devfs_debug(DEVFS_DEBUG_DEBUG, 1842 "spec_strategy: chain %p short read(2) " 1843 "bcount %d/%d\n", 1844 bp, nbp->b_bcount + boffset, bp->b_bcount); 1845 #endif 1846 bp->b_error = 0; 1847 bp->b_bcount = nbp->b_bcount + boffset; 1848 bp->b_resid = nbp->b_resid; 1849 } else if (nbp->b_bcount + boffset == bp->b_bcount) { 1850 /* 1851 * No more data terminates the chain 1852 */ 1853 #if SPEC_CHAIN_DEBUG & 1 1854 devfs_debug(DEVFS_DEBUG_DEBUG, 1855 "spec_strategy: chain %p finished bcount %d\n", 1856 bp, bp->b_bcount); 1857 #endif 1858 bp->b_error = 0; 1859 bp->b_resid = 0; 1860 } else { 1861 /* 1862 * Continue the chain 1863 */ 1864 boffset += nbp->b_bcount; 1865 nbp->b_data = bp->b_data + boffset; 1866 nbp->b_bcount = bp->b_bcount - boffset; 1867 if (nbp->b_bcount > chunksize) 1868 nbp->b_bcount = chunksize; 1869 nbp->b_bio1.bio_done = devfs_spec_strategy_done; 1870 nbp->b_bio1.bio_offset = bio->bio_offset + boffset; 1871 1872 #if SPEC_CHAIN_DEBUG & 1 1873 devfs_debug(DEVFS_DEBUG_DEBUG, 1874 "spec_strategy: chain %p offset %d/%d bcount %d\n", 1875 bp, boffset, bp->b_bcount, nbp->b_bcount); 1876 #endif 1877 1878 dev_dstrategy(nbp->b_vp->v_rdev, &nbp->b_bio1); 1879 return; 1880 } 1881 1882 /* 1883 * Fall through to here on termination. biodone(bp) and 1884 * clean up and free nbp. 1885 */ 1886 biodone(bio); 1887 BUF_UNLOCK(nbp); 1888 uninitbufbio(nbp); 1889 kfree(nbp, M_DEVBUF); 1890 } 1891 1892 /* 1893 * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length) 1894 */ 1895 static int 1896 devfs_spec_freeblks(struct vop_freeblks_args *ap) 1897 { 1898 struct buf *bp; 1899 1900 /* 1901 * XXX: This assumes that strategy does the deed right away. 1902 * XXX: this may not be TRTTD. 1903 */ 1904 KKASSERT(ap->a_vp->v_rdev != NULL); 1905 if ((ap->a_vp->v_rdev->si_flags & SI_CANFREE) == 0) 1906 return (0); 1907 bp = geteblk(ap->a_length); 1908 bp->b_cmd = BUF_CMD_FREEBLKS; 1909 bp->b_bio1.bio_offset = ap->a_offset; 1910 bp->b_bcount = ap->a_length; 1911 dev_dstrategy(ap->a_vp->v_rdev, &bp->b_bio1); 1912 return (0); 1913 } 1914 1915 /* 1916 * Implement degenerate case where the block requested is the block 1917 * returned, and assume that the entire device is contiguous in regards 1918 * to the contiguous block range (runp and runb). 1919 * 1920 * spec_bmap(struct vnode *a_vp, off_t a_loffset, 1921 * off_t *a_doffsetp, int *a_runp, int *a_runb) 1922 */ 1923 static int 1924 devfs_spec_bmap(struct vop_bmap_args *ap) 1925 { 1926 if (ap->a_doffsetp != NULL) 1927 *ap->a_doffsetp = ap->a_loffset; 1928 if (ap->a_runp != NULL) 1929 *ap->a_runp = MAXBSIZE; 1930 if (ap->a_runb != NULL) { 1931 if (ap->a_loffset < MAXBSIZE) 1932 *ap->a_runb = (int)ap->a_loffset; 1933 else 1934 *ap->a_runb = MAXBSIZE; 1935 } 1936 return (0); 1937 } 1938 1939 1940 /* 1941 * Special device advisory byte-level locks. 1942 * 1943 * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op, 1944 * struct flock *a_fl, int a_flags) 1945 */ 1946 /* ARGSUSED */ 1947 static int 1948 devfs_spec_advlock(struct vop_advlock_args *ap) 1949 { 1950 return ((ap->a_flags & F_POSIX) ? EINVAL : EOPNOTSUPP); 1951 } 1952 1953 /* 1954 * NOTE: MPSAFE callback. 1955 */ 1956 static void 1957 devfs_spec_getpages_iodone(struct bio *bio) 1958 { 1959 bio->bio_buf->b_cmd = BUF_CMD_DONE; 1960 wakeup(bio->bio_buf); 1961 } 1962 1963 /* 1964 * spec_getpages() - get pages associated with device vnode. 1965 * 1966 * Note that spec_read and spec_write do not use the buffer cache, so we 1967 * must fully implement getpages here. 1968 */ 1969 static int 1970 devfs_spec_getpages(struct vop_getpages_args *ap) 1971 { 1972 vm_offset_t kva; 1973 int error; 1974 int i, pcount, size; 1975 struct buf *bp; 1976 vm_page_t m; 1977 vm_ooffset_t offset; 1978 int toff, nextoff, nread; 1979 struct vnode *vp = ap->a_vp; 1980 int blksiz; 1981 int gotreqpage; 1982 1983 error = 0; 1984 pcount = round_page(ap->a_count) / PAGE_SIZE; 1985 1986 /* 1987 * Calculate the offset of the transfer and do sanity check. 1988 */ 1989 offset = IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset; 1990 1991 /* 1992 * Round up physical size for real devices. We cannot round using 1993 * v_mount's block size data because v_mount has nothing to do with 1994 * the device. i.e. it's usually '/dev'. We need the physical block 1995 * size for the device itself. 1996 * 1997 * We can't use v_rdev->si_mountpoint because it only exists when the 1998 * block device is mounted. However, we can use v_rdev. 1999 */ 2000 if (vn_isdisk(vp, NULL)) 2001 blksiz = vp->v_rdev->si_bsize_phys; 2002 else 2003 blksiz = DEV_BSIZE; 2004 2005 size = (ap->a_count + blksiz - 1) & ~(blksiz - 1); 2006 2007 bp = getpbuf_kva(NULL); 2008 kva = (vm_offset_t)bp->b_data; 2009 2010 /* 2011 * Map the pages to be read into the kva. 2012 */ 2013 pmap_qenter(kva, ap->a_m, pcount); 2014 2015 /* Build a minimal buffer header. */ 2016 bp->b_cmd = BUF_CMD_READ; 2017 bp->b_bcount = size; 2018 bp->b_resid = 0; 2019 bsetrunningbufspace(bp, size); 2020 2021 bp->b_bio1.bio_offset = offset; 2022 bp->b_bio1.bio_done = devfs_spec_getpages_iodone; 2023 2024 mycpu->gd_cnt.v_vnodein++; 2025 mycpu->gd_cnt.v_vnodepgsin += pcount; 2026 2027 /* Do the input. */ 2028 vn_strategy(ap->a_vp, &bp->b_bio1); 2029 2030 crit_enter(); 2031 2032 /* We definitely need to be at splbio here. */ 2033 while (bp->b_cmd != BUF_CMD_DONE) 2034 tsleep(bp, 0, "spread", 0); 2035 2036 crit_exit(); 2037 2038 if (bp->b_flags & B_ERROR) { 2039 if (bp->b_error) 2040 error = bp->b_error; 2041 else 2042 error = EIO; 2043 } 2044 2045 /* 2046 * If EOF is encountered we must zero-extend the result in order 2047 * to ensure that the page does not contain garabge. When no 2048 * error occurs, an early EOF is indicated if b_bcount got truncated. 2049 * b_resid is relative to b_bcount and should be 0, but some devices 2050 * might indicate an EOF with b_resid instead of truncating b_bcount. 2051 */ 2052 nread = bp->b_bcount - bp->b_resid; 2053 if (nread < ap->a_count) 2054 bzero((caddr_t)kva + nread, ap->a_count - nread); 2055 pmap_qremove(kva, pcount); 2056 2057 gotreqpage = 0; 2058 for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) { 2059 nextoff = toff + PAGE_SIZE; 2060 m = ap->a_m[i]; 2061 2062 m->flags &= ~PG_ZERO; 2063 2064 /* 2065 * NOTE: vm_page_undirty/clear_dirty etc do not clear the 2066 * pmap modified bit. pmap modified bit should have 2067 * already been cleared. 2068 */ 2069 if (nextoff <= nread) { 2070 m->valid = VM_PAGE_BITS_ALL; 2071 vm_page_undirty(m); 2072 } else if (toff < nread) { 2073 /* 2074 * Since this is a VM request, we have to supply the 2075 * unaligned offset to allow vm_page_set_valid() 2076 * to zero sub-DEV_BSIZE'd portions of the page. 2077 */ 2078 vm_page_set_valid(m, 0, nread - toff); 2079 vm_page_clear_dirty_end_nonincl(m, 0, nread - toff); 2080 } else { 2081 m->valid = 0; 2082 vm_page_undirty(m); 2083 } 2084 2085 if (i != ap->a_reqpage) { 2086 /* 2087 * Just in case someone was asking for this page we 2088 * now tell them that it is ok to use. 2089 */ 2090 if (!error || (m->valid == VM_PAGE_BITS_ALL)) { 2091 if (m->valid) { 2092 if (m->flags & PG_REFERENCED) { 2093 vm_page_activate(m); 2094 } else { 2095 vm_page_deactivate(m); 2096 } 2097 vm_page_wakeup(m); 2098 } else { 2099 vm_page_free(m); 2100 } 2101 } else { 2102 vm_page_free(m); 2103 } 2104 } else if (m->valid) { 2105 gotreqpage = 1; 2106 /* 2107 * Since this is a VM request, we need to make the 2108 * entire page presentable by zeroing invalid sections. 2109 */ 2110 if (m->valid != VM_PAGE_BITS_ALL) 2111 vm_page_zero_invalid(m, FALSE); 2112 } 2113 } 2114 if (!gotreqpage) { 2115 m = ap->a_m[ap->a_reqpage]; 2116 devfs_debug(DEVFS_DEBUG_WARNING, 2117 "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n", 2118 devtoname(vp->v_rdev), error, bp, bp->b_vp); 2119 devfs_debug(DEVFS_DEBUG_WARNING, 2120 " size: %d, resid: %d, a_count: %d, valid: 0x%x\n", 2121 size, bp->b_resid, ap->a_count, m->valid); 2122 devfs_debug(DEVFS_DEBUG_WARNING, 2123 " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n", 2124 nread, ap->a_reqpage, (u_long)m->pindex, pcount); 2125 /* 2126 * Free the buffer header back to the swap buffer pool. 2127 */ 2128 relpbuf(bp, NULL); 2129 return VM_PAGER_ERROR; 2130 } 2131 /* 2132 * Free the buffer header back to the swap buffer pool. 2133 */ 2134 relpbuf(bp, NULL); 2135 if (DEVFS_NODE(ap->a_vp)) 2136 nanotime(&DEVFS_NODE(ap->a_vp)->mtime); 2137 return VM_PAGER_OK; 2138 } 2139 2140 static __inline 2141 int 2142 sequential_heuristic(struct uio *uio, struct file *fp) 2143 { 2144 /* 2145 * Sequential heuristic - detect sequential operation 2146 */ 2147 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 2148 uio->uio_offset == fp->f_nextoff) { 2149 /* 2150 * XXX we assume that the filesystem block size is 2151 * the default. Not true, but still gives us a pretty 2152 * good indicator of how sequential the read operations 2153 * are. 2154 */ 2155 int tmpseq = fp->f_seqcount; 2156 2157 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 2158 if (tmpseq > IO_SEQMAX) 2159 tmpseq = IO_SEQMAX; 2160 fp->f_seqcount = tmpseq; 2161 return(fp->f_seqcount << IO_SEQSHIFT); 2162 } 2163 2164 /* 2165 * Not sequential, quick draw-down of seqcount 2166 */ 2167 if (fp->f_seqcount > 1) 2168 fp->f_seqcount = 1; 2169 else 2170 fp->f_seqcount = 0; 2171 return(0); 2172 } 2173