1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Alex Hornung <ahornung@gmail.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/time.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/fcntl.h> 42 #include <sys/proc.h> 43 #include <sys/priv.h> 44 #include <sys/signalvar.h> 45 #include <sys/vnode.h> 46 #include <sys/uio.h> 47 #include <sys/mount.h> 48 #include <sys/file.h> 49 #include <sys/namei.h> 50 #include <sys/dirent.h> 51 #include <sys/malloc.h> 52 #include <sys/stat.h> 53 #include <sys/reg.h> 54 #include <vm/vm_pager.h> 55 #include <vm/vm_zone.h> 56 #include <vm/vm_object.h> 57 #include <sys/filio.h> 58 #include <sys/ttycom.h> 59 #include <sys/tty.h> 60 #include <sys/diskslice.h> 61 #include <sys/sysctl.h> 62 #include <sys/devfs.h> 63 #include <sys/pioctl.h> 64 #include <vfs/fifofs/fifo.h> 65 66 #include <machine/limits.h> 67 68 #include <sys/buf2.h> 69 #include <sys/sysref2.h> 70 #include <vm/vm_page2.h> 71 72 #ifndef SPEC_CHAIN_DEBUG 73 #define SPEC_CHAIN_DEBUG 0 74 #endif 75 76 MALLOC_DECLARE(M_DEVFS); 77 #define DEVFS_BADOP (void *)devfs_vop_badop 78 79 static int devfs_vop_badop(struct vop_generic_args *); 80 static int devfs_vop_access(struct vop_access_args *); 81 static int devfs_vop_inactive(struct vop_inactive_args *); 82 static int devfs_vop_reclaim(struct vop_reclaim_args *); 83 static int devfs_vop_readdir(struct vop_readdir_args *); 84 static int devfs_vop_getattr(struct vop_getattr_args *); 85 static int devfs_vop_setattr(struct vop_setattr_args *); 86 static int devfs_vop_readlink(struct vop_readlink_args *); 87 static int devfs_vop_print(struct vop_print_args *); 88 89 static int devfs_vop_nresolve(struct vop_nresolve_args *); 90 static int devfs_vop_nlookupdotdot(struct vop_nlookupdotdot_args *); 91 static int devfs_vop_nmkdir(struct vop_nmkdir_args *); 92 static int devfs_vop_nsymlink(struct vop_nsymlink_args *); 93 static int devfs_vop_nrmdir(struct vop_nrmdir_args *); 94 static int devfs_vop_nremove(struct vop_nremove_args *); 95 96 static int devfs_spec_open(struct vop_open_args *); 97 static int devfs_spec_close(struct vop_close_args *); 98 static int devfs_spec_fsync(struct vop_fsync_args *); 99 100 static int devfs_spec_read(struct vop_read_args *); 101 static int devfs_spec_write(struct vop_write_args *); 102 static int devfs_spec_ioctl(struct vop_ioctl_args *); 103 static int devfs_spec_kqfilter(struct vop_kqfilter_args *); 104 static int devfs_spec_strategy(struct vop_strategy_args *); 105 static void devfs_spec_strategy_done(struct bio *); 106 static int devfs_spec_freeblks(struct vop_freeblks_args *); 107 static int devfs_spec_bmap(struct vop_bmap_args *); 108 static int devfs_spec_advlock(struct vop_advlock_args *); 109 static void devfs_spec_getpages_iodone(struct bio *); 110 static int devfs_spec_getpages(struct vop_getpages_args *); 111 112 static int devfs_fo_close(struct file *); 113 static int devfs_fo_read(struct file *, struct uio *, struct ucred *, int); 114 static int devfs_fo_write(struct file *, struct uio *, struct ucred *, int); 115 static int devfs_fo_stat(struct file *, struct stat *, struct ucred *); 116 static int devfs_fo_kqfilter(struct file *, struct knote *); 117 static int devfs_fo_ioctl(struct file *, u_long, caddr_t, 118 struct ucred *, struct sysmsg *); 119 static __inline int sequential_heuristic(struct uio *, struct file *); 120 121 extern struct lock devfs_lock; 122 123 /* 124 * devfs vnode operations for regular files. All vnode ops are MPSAFE. 125 */ 126 struct vop_ops devfs_vnode_norm_vops = { 127 .vop_default = vop_defaultop, 128 .vop_access = devfs_vop_access, 129 .vop_advlock = DEVFS_BADOP, 130 .vop_bmap = DEVFS_BADOP, 131 .vop_close = vop_stdclose, 132 .vop_getattr = devfs_vop_getattr, 133 .vop_inactive = devfs_vop_inactive, 134 .vop_ncreate = DEVFS_BADOP, 135 .vop_nresolve = devfs_vop_nresolve, 136 .vop_nlookupdotdot = devfs_vop_nlookupdotdot, 137 .vop_nlink = DEVFS_BADOP, 138 .vop_nmkdir = devfs_vop_nmkdir, 139 .vop_nmknod = DEVFS_BADOP, 140 .vop_nremove = devfs_vop_nremove, 141 .vop_nrename = DEVFS_BADOP, 142 .vop_nrmdir = devfs_vop_nrmdir, 143 .vop_nsymlink = devfs_vop_nsymlink, 144 .vop_open = vop_stdopen, 145 .vop_pathconf = vop_stdpathconf, 146 .vop_print = devfs_vop_print, 147 .vop_read = DEVFS_BADOP, 148 .vop_readdir = devfs_vop_readdir, 149 .vop_readlink = devfs_vop_readlink, 150 .vop_reallocblks = DEVFS_BADOP, 151 .vop_reclaim = devfs_vop_reclaim, 152 .vop_setattr = devfs_vop_setattr, 153 .vop_write = DEVFS_BADOP, 154 .vop_ioctl = DEVFS_BADOP 155 }; 156 157 /* 158 * devfs vnode operations for character devices. All vnode ops are MPSAFE. 159 */ 160 struct vop_ops devfs_vnode_dev_vops = { 161 .vop_default = vop_defaultop, 162 .vop_access = devfs_vop_access, 163 .vop_advlock = devfs_spec_advlock, 164 .vop_bmap = devfs_spec_bmap, 165 .vop_close = devfs_spec_close, 166 .vop_freeblks = devfs_spec_freeblks, 167 .vop_fsync = devfs_spec_fsync, 168 .vop_getattr = devfs_vop_getattr, 169 .vop_getpages = devfs_spec_getpages, 170 .vop_inactive = devfs_vop_inactive, 171 .vop_open = devfs_spec_open, 172 .vop_pathconf = vop_stdpathconf, 173 .vop_print = devfs_vop_print, 174 .vop_kqfilter = devfs_spec_kqfilter, 175 .vop_read = devfs_spec_read, 176 .vop_readdir = DEVFS_BADOP, 177 .vop_readlink = DEVFS_BADOP, 178 .vop_reallocblks = DEVFS_BADOP, 179 .vop_reclaim = devfs_vop_reclaim, 180 .vop_setattr = devfs_vop_setattr, 181 .vop_strategy = devfs_spec_strategy, 182 .vop_write = devfs_spec_write, 183 .vop_ioctl = devfs_spec_ioctl 184 }; 185 186 /* 187 * devfs file pointer operations. All fileops are MPSAFE. 188 */ 189 struct vop_ops *devfs_vnode_dev_vops_p = &devfs_vnode_dev_vops; 190 191 struct fileops devfs_dev_fileops = { 192 .fo_read = devfs_fo_read, 193 .fo_write = devfs_fo_write, 194 .fo_ioctl = devfs_fo_ioctl, 195 .fo_kqfilter = devfs_fo_kqfilter, 196 .fo_stat = devfs_fo_stat, 197 .fo_close = devfs_fo_close, 198 .fo_shutdown = nofo_shutdown 199 }; 200 201 /* 202 * These two functions are possibly temporary hacks for devices (aka 203 * the pty code) which want to control the node attributes themselves. 204 * 205 * XXX we may ultimately desire to simply remove the uid/gid/mode 206 * from the node entirely. 207 * 208 * MPSAFE - sorta. Theoretically the overwrite can compete since they 209 * are loading from the same fields. 210 */ 211 static __inline void 212 node_sync_dev_get(struct devfs_node *node) 213 { 214 cdev_t dev; 215 216 if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) { 217 node->uid = dev->si_uid; 218 node->gid = dev->si_gid; 219 node->mode = dev->si_perms; 220 } 221 } 222 223 static __inline void 224 node_sync_dev_set(struct devfs_node *node) 225 { 226 cdev_t dev; 227 228 if ((dev = node->d_dev) && (dev->si_flags & SI_OVERRIDE)) { 229 dev->si_uid = node->uid; 230 dev->si_gid = node->gid; 231 dev->si_perms = node->mode; 232 } 233 } 234 235 /* 236 * generic entry point for unsupported operations 237 */ 238 static int 239 devfs_vop_badop(struct vop_generic_args *ap) 240 { 241 return (EIO); 242 } 243 244 245 static int 246 devfs_vop_access(struct vop_access_args *ap) 247 { 248 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 249 int error; 250 251 if (!devfs_node_is_accessible(node)) 252 return ENOENT; 253 node_sync_dev_get(node); 254 error = vop_helper_access(ap, node->uid, node->gid, 255 node->mode, node->flags); 256 257 return error; 258 } 259 260 261 static int 262 devfs_vop_inactive(struct vop_inactive_args *ap) 263 { 264 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 265 266 if (node == NULL || (node->flags & DEVFS_NODE_LINKED) == 0) 267 vrecycle(ap->a_vp); 268 return 0; 269 } 270 271 272 static int 273 devfs_vop_reclaim(struct vop_reclaim_args *ap) 274 { 275 struct devfs_node *node; 276 struct vnode *vp; 277 int locked; 278 279 /* 280 * Check if it is locked already. if not, we acquire the devfs lock 281 */ 282 if ((lockstatus(&devfs_lock, curthread)) != LK_EXCLUSIVE) { 283 lockmgr(&devfs_lock, LK_EXCLUSIVE); 284 locked = 1; 285 } else { 286 locked = 0; 287 } 288 289 /* 290 * Get rid of the devfs_node if it is no longer linked into the 291 * topology. 292 */ 293 vp = ap->a_vp; 294 if ((node = DEVFS_NODE(vp)) != NULL) { 295 node->v_node = NULL; 296 if ((node->flags & DEVFS_NODE_LINKED) == 0) 297 devfs_freep(node); 298 } 299 300 if (locked) 301 lockmgr(&devfs_lock, LK_RELEASE); 302 303 /* 304 * v_rdev needs to be properly released using v_release_rdev 305 * Make sure v_data is NULL as well. 306 */ 307 vp->v_data = NULL; 308 v_release_rdev(vp); 309 return 0; 310 } 311 312 313 static int 314 devfs_vop_readdir(struct vop_readdir_args *ap) 315 { 316 struct devfs_node *dnode = DEVFS_NODE(ap->a_vp); 317 struct devfs_node *node; 318 int cookie_index; 319 int ncookies; 320 int error2; 321 int error; 322 int r; 323 off_t *cookies; 324 off_t saveoff; 325 326 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_readdir() called!\n"); 327 328 if (ap->a_uio->uio_offset < 0 || ap->a_uio->uio_offset > INT_MAX) 329 return (EINVAL); 330 error = vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY | LK_FAILRECLAIM); 331 if (error) 332 return (error); 333 334 if (!devfs_node_is_accessible(dnode)) { 335 vn_unlock(ap->a_vp); 336 return ENOENT; 337 } 338 339 lockmgr(&devfs_lock, LK_EXCLUSIVE); 340 341 saveoff = ap->a_uio->uio_offset; 342 343 if (ap->a_ncookies) { 344 ncookies = ap->a_uio->uio_resid / 16 + 1; /* Why / 16 ?? */ 345 if (ncookies > 256) 346 ncookies = 256; 347 cookies = kmalloc(256 * sizeof(off_t), M_TEMP, M_WAITOK); 348 cookie_index = 0; 349 } else { 350 ncookies = -1; 351 cookies = NULL; 352 cookie_index = 0; 353 } 354 355 nanotime(&dnode->atime); 356 357 if (saveoff == 0) { 358 r = vop_write_dirent(&error, ap->a_uio, dnode->d_dir.d_ino, 359 DT_DIR, 1, "."); 360 if (r) 361 goto done; 362 if (cookies) 363 cookies[cookie_index] = saveoff; 364 saveoff++; 365 cookie_index++; 366 if (cookie_index == ncookies) 367 goto done; 368 } 369 370 if (saveoff == 1) { 371 if (dnode->parent) { 372 r = vop_write_dirent(&error, ap->a_uio, 373 dnode->parent->d_dir.d_ino, 374 DT_DIR, 2, ".."); 375 } else { 376 r = vop_write_dirent(&error, ap->a_uio, 377 dnode->d_dir.d_ino, 378 DT_DIR, 2, ".."); 379 } 380 if (r) 381 goto done; 382 if (cookies) 383 cookies[cookie_index] = saveoff; 384 saveoff++; 385 cookie_index++; 386 if (cookie_index == ncookies) 387 goto done; 388 } 389 390 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 391 if ((node->flags & DEVFS_HIDDEN) || 392 (node->flags & DEVFS_INVISIBLE)) { 393 continue; 394 } 395 396 /* 397 * If the node type is a valid devfs alias, then we make 398 * sure that the target isn't hidden. If it is, we don't 399 * show the link in the directory listing. 400 */ 401 if ((node->node_type == Nlink) && (node->link_target != NULL) && 402 (node->link_target->flags & DEVFS_HIDDEN)) 403 continue; 404 405 if (node->cookie < saveoff) 406 continue; 407 408 saveoff = node->cookie; 409 410 error2 = vop_write_dirent(&error, ap->a_uio, node->d_dir.d_ino, 411 node->d_dir.d_type, 412 node->d_dir.d_namlen, 413 node->d_dir.d_name); 414 415 if (error2) 416 break; 417 418 saveoff++; 419 420 if (cookies) 421 cookies[cookie_index] = node->cookie; 422 ++cookie_index; 423 if (cookie_index == ncookies) 424 break; 425 } 426 427 done: 428 lockmgr(&devfs_lock, LK_RELEASE); 429 vn_unlock(ap->a_vp); 430 431 ap->a_uio->uio_offset = saveoff; 432 if (error && cookie_index == 0) { 433 if (cookies) { 434 kfree(cookies, M_TEMP); 435 *ap->a_ncookies = 0; 436 *ap->a_cookies = NULL; 437 } 438 } else { 439 if (cookies) { 440 *ap->a_ncookies = cookie_index; 441 *ap->a_cookies = cookies; 442 } 443 } 444 return (error); 445 } 446 447 448 static int 449 devfs_vop_nresolve(struct vop_nresolve_args *ap) 450 { 451 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 452 struct devfs_node *node, *found = NULL; 453 struct namecache *ncp; 454 struct vnode *vp = NULL; 455 int error = 0; 456 int len; 457 int depth; 458 459 ncp = ap->a_nch->ncp; 460 len = ncp->nc_nlen; 461 462 if (!devfs_node_is_accessible(dnode)) 463 return ENOENT; 464 465 lockmgr(&devfs_lock, LK_EXCLUSIVE); 466 467 if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) { 468 error = ENOENT; 469 cache_setvp(ap->a_nch, NULL); 470 goto out; 471 } 472 473 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 474 if (len == node->d_dir.d_namlen) { 475 if (!memcmp(ncp->nc_name, node->d_dir.d_name, len)) { 476 found = node; 477 break; 478 } 479 } 480 } 481 482 if (found) { 483 depth = 0; 484 while ((found->node_type == Nlink) && (found->link_target)) { 485 if (depth >= 8) { 486 devfs_debug(DEVFS_DEBUG_SHOW, "Recursive link or depth >= 8"); 487 break; 488 } 489 490 found = found->link_target; 491 ++depth; 492 } 493 494 if (!(found->flags & DEVFS_HIDDEN)) 495 devfs_allocv(/*ap->a_dvp->v_mount, */ &vp, found); 496 } 497 498 if (vp == NULL) { 499 error = ENOENT; 500 cache_setvp(ap->a_nch, NULL); 501 goto out; 502 503 } 504 KKASSERT(vp); 505 vn_unlock(vp); 506 cache_setvp(ap->a_nch, vp); 507 vrele(vp); 508 out: 509 lockmgr(&devfs_lock, LK_RELEASE); 510 511 return error; 512 } 513 514 515 static int 516 devfs_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 517 { 518 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 519 520 *ap->a_vpp = NULL; 521 if (!devfs_node_is_accessible(dnode)) 522 return ENOENT; 523 524 lockmgr(&devfs_lock, LK_EXCLUSIVE); 525 if (dnode->parent != NULL) { 526 devfs_allocv(ap->a_vpp, dnode->parent); 527 vn_unlock(*ap->a_vpp); 528 } 529 lockmgr(&devfs_lock, LK_RELEASE); 530 531 return ((*ap->a_vpp == NULL) ? ENOENT : 0); 532 } 533 534 535 static int 536 devfs_vop_getattr(struct vop_getattr_args *ap) 537 { 538 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 539 struct vattr *vap = ap->a_vap; 540 struct partinfo pinfo; 541 int error = 0; 542 543 #if 0 544 if (!devfs_node_is_accessible(node)) 545 return ENOENT; 546 #endif 547 node_sync_dev_get(node); 548 549 lockmgr(&devfs_lock, LK_EXCLUSIVE); 550 551 /* start by zeroing out the attributes */ 552 VATTR_NULL(vap); 553 554 /* next do all the common fields */ 555 vap->va_type = ap->a_vp->v_type; 556 vap->va_mode = node->mode; 557 vap->va_fileid = DEVFS_NODE(ap->a_vp)->d_dir.d_ino ; 558 vap->va_flags = 0; 559 vap->va_blocksize = DEV_BSIZE; 560 vap->va_bytes = vap->va_size = 0; 561 562 vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; 563 564 vap->va_atime = node->atime; 565 vap->va_mtime = node->mtime; 566 vap->va_ctime = node->ctime; 567 568 vap->va_nlink = 1; /* number of references to file */ 569 570 vap->va_uid = node->uid; 571 vap->va_gid = node->gid; 572 573 vap->va_rmajor = 0; 574 vap->va_rminor = 0; 575 576 if ((node->node_type == Ndev) && node->d_dev) { 577 reference_dev(node->d_dev); 578 vap->va_rminor = node->d_dev->si_uminor; 579 release_dev(node->d_dev); 580 } 581 582 /* For a softlink the va_size is the length of the softlink */ 583 if (node->symlink_name != 0) { 584 vap->va_bytes = vap->va_size = node->symlink_namelen; 585 } 586 587 /* 588 * For a disk-type device, va_size is the size of the underlying 589 * device, so that lseek() works properly. 590 */ 591 if ((node->d_dev) && (dev_dflags(node->d_dev) & D_DISK)) { 592 bzero(&pinfo, sizeof(pinfo)); 593 error = dev_dioctl(node->d_dev, DIOCGPART, (void *)&pinfo, 594 0, proc0.p_ucred, NULL, NULL); 595 if ((error == 0) && (pinfo.media_blksize != 0)) { 596 vap->va_size = pinfo.media_size; 597 } else { 598 vap->va_size = 0; 599 error = 0; 600 } 601 } 602 603 lockmgr(&devfs_lock, LK_RELEASE); 604 605 return (error); 606 } 607 608 609 static int 610 devfs_vop_setattr(struct vop_setattr_args *ap) 611 { 612 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 613 struct vattr *vap; 614 uid_t cur_uid; 615 gid_t cur_gid; 616 mode_t cur_mode; 617 int error = 0; 618 619 if (!devfs_node_is_accessible(node)) 620 return ENOENT; 621 node_sync_dev_get(node); 622 623 lockmgr(&devfs_lock, LK_EXCLUSIVE); 624 625 vap = ap->a_vap; 626 627 if ((vap->va_uid != (uid_t)VNOVAL) || (vap->va_gid != (gid_t)VNOVAL)) { 628 cur_uid = node->uid; 629 cur_gid = node->gid; 630 cur_mode = node->mode; 631 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 632 ap->a_cred, &cur_uid, &cur_gid, &cur_mode); 633 if (error) 634 goto out; 635 636 if (node->uid != cur_uid || node->gid != cur_gid) { 637 node->uid = cur_uid; 638 node->gid = cur_gid; 639 node->mode = cur_mode; 640 } 641 } 642 643 if (vap->va_mode != (mode_t)VNOVAL) { 644 cur_mode = node->mode; 645 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 646 node->uid, node->gid, &cur_mode); 647 if (error == 0 && node->mode != cur_mode) { 648 node->mode = cur_mode; 649 } 650 } 651 652 out: 653 node_sync_dev_set(node); 654 nanotime(&node->ctime); 655 lockmgr(&devfs_lock, LK_RELEASE); 656 657 return error; 658 } 659 660 661 static int 662 devfs_vop_readlink(struct vop_readlink_args *ap) 663 { 664 struct devfs_node *node = DEVFS_NODE(ap->a_vp); 665 int ret; 666 667 if (!devfs_node_is_accessible(node)) 668 return ENOENT; 669 670 lockmgr(&devfs_lock, LK_EXCLUSIVE); 671 ret = uiomove(node->symlink_name, node->symlink_namelen, ap->a_uio); 672 lockmgr(&devfs_lock, LK_RELEASE); 673 674 return ret; 675 } 676 677 678 static int 679 devfs_vop_print(struct vop_print_args *ap) 680 { 681 return (0); 682 } 683 684 static int 685 devfs_vop_nmkdir(struct vop_nmkdir_args *ap) 686 { 687 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 688 struct devfs_node *node; 689 690 if (!devfs_node_is_accessible(dnode)) 691 return ENOENT; 692 693 if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 694 goto out; 695 696 lockmgr(&devfs_lock, LK_EXCLUSIVE); 697 devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Ndir, 698 ap->a_nch->ncp->nc_name, dnode, NULL); 699 700 if (*ap->a_vpp) { 701 node = DEVFS_NODE(*ap->a_vpp); 702 node->flags |= DEVFS_USER_CREATED; 703 cache_setunresolved(ap->a_nch); 704 cache_setvp(ap->a_nch, *ap->a_vpp); 705 } 706 lockmgr(&devfs_lock, LK_RELEASE); 707 out: 708 return ((*ap->a_vpp == NULL) ? ENOTDIR : 0); 709 } 710 711 static int 712 devfs_vop_nsymlink(struct vop_nsymlink_args *ap) 713 { 714 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 715 struct devfs_node *node; 716 size_t targetlen; 717 718 if (!devfs_node_is_accessible(dnode)) 719 return ENOENT; 720 721 ap->a_vap->va_type = VLNK; 722 723 if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 724 goto out; 725 726 lockmgr(&devfs_lock, LK_EXCLUSIVE); 727 devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Nlink, 728 ap->a_nch->ncp->nc_name, dnode, NULL); 729 730 targetlen = strlen(ap->a_target); 731 if (*ap->a_vpp) { 732 node = DEVFS_NODE(*ap->a_vpp); 733 node->flags |= DEVFS_USER_CREATED; 734 node->symlink_namelen = targetlen; 735 node->symlink_name = kmalloc(targetlen + 1, M_DEVFS, M_WAITOK); 736 memcpy(node->symlink_name, ap->a_target, targetlen); 737 node->symlink_name[targetlen] = '\0'; 738 cache_setunresolved(ap->a_nch); 739 cache_setvp(ap->a_nch, *ap->a_vpp); 740 } 741 lockmgr(&devfs_lock, LK_RELEASE); 742 out: 743 return ((*ap->a_vpp == NULL) ? ENOTDIR : 0); 744 } 745 746 static int 747 devfs_vop_nrmdir(struct vop_nrmdir_args *ap) 748 { 749 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 750 struct devfs_node *node; 751 struct namecache *ncp; 752 int error = ENOENT; 753 754 ncp = ap->a_nch->ncp; 755 756 if (!devfs_node_is_accessible(dnode)) 757 return ENOENT; 758 759 lockmgr(&devfs_lock, LK_EXCLUSIVE); 760 761 if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 762 goto out; 763 764 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 765 if (ncp->nc_nlen != node->d_dir.d_namlen) 766 continue; 767 if (memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen)) 768 continue; 769 770 /* 771 * only allow removal of user created dirs 772 */ 773 if ((node->flags & DEVFS_USER_CREATED) == 0) { 774 error = EPERM; 775 goto out; 776 } else if (node->node_type != Ndir) { 777 error = ENOTDIR; 778 goto out; 779 } else if (node->nchildren > 2) { 780 error = ENOTEMPTY; 781 goto out; 782 } else { 783 if (node->v_node) 784 cache_inval_vp(node->v_node, CINV_DESTROY); 785 devfs_unlinkp(node); 786 error = 0; 787 break; 788 } 789 } 790 791 cache_unlink(ap->a_nch); 792 out: 793 lockmgr(&devfs_lock, LK_RELEASE); 794 return error; 795 } 796 797 static int 798 devfs_vop_nremove(struct vop_nremove_args *ap) 799 { 800 struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp); 801 struct devfs_node *node; 802 struct namecache *ncp; 803 int error = ENOENT; 804 805 ncp = ap->a_nch->ncp; 806 807 if (!devfs_node_is_accessible(dnode)) 808 return ENOENT; 809 810 lockmgr(&devfs_lock, LK_EXCLUSIVE); 811 812 if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) 813 goto out; 814 815 TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) { 816 if (ncp->nc_nlen != node->d_dir.d_namlen) 817 continue; 818 if (memcmp(ncp->nc_name, node->d_dir.d_name, ncp->nc_nlen)) 819 continue; 820 821 /* 822 * only allow removal of user created stuff (e.g. symlinks) 823 */ 824 if ((node->flags & DEVFS_USER_CREATED) == 0) { 825 error = EPERM; 826 goto out; 827 } else if (node->node_type == Ndir) { 828 error = EISDIR; 829 goto out; 830 } else { 831 if (node->v_node) 832 cache_inval_vp(node->v_node, CINV_DESTROY); 833 devfs_unlinkp(node); 834 error = 0; 835 break; 836 } 837 } 838 839 cache_unlink(ap->a_nch); 840 out: 841 lockmgr(&devfs_lock, LK_RELEASE); 842 return error; 843 } 844 845 846 static int 847 devfs_spec_open(struct vop_open_args *ap) 848 { 849 struct vnode *vp = ap->a_vp; 850 struct vnode *orig_vp = NULL; 851 struct devfs_node *node = DEVFS_NODE(vp); 852 struct devfs_node *newnode; 853 cdev_t dev, ndev = NULL; 854 int error = 0; 855 856 if (node) { 857 if (node->d_dev == NULL) 858 return ENXIO; 859 if (!devfs_node_is_accessible(node)) 860 return ENOENT; 861 } 862 863 if ((dev = vp->v_rdev) == NULL) 864 return ENXIO; 865 866 vn_lock(vp, LK_UPGRADE | LK_RETRY); 867 868 if (node && ap->a_fp) { 869 int exists; 870 871 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_open: -1.1-\n"); 872 lockmgr(&devfs_lock, LK_EXCLUSIVE); 873 874 ndev = devfs_clone(dev, node->d_dir.d_name, 875 node->d_dir.d_namlen, 876 ap->a_mode, ap->a_cred); 877 if (ndev != NULL) { 878 newnode = devfs_create_device_node( 879 DEVFS_MNTDATA(vp->v_mount)->root_node, 880 ndev, &exists, NULL, NULL); 881 /* XXX: possibly destroy device if this happens */ 882 883 if (newnode != NULL) { 884 dev = ndev; 885 if (exists == 0) 886 devfs_link_dev(dev); 887 888 devfs_debug(DEVFS_DEBUG_DEBUG, 889 "parent here is: %s, node is: |%s|\n", 890 ((node->parent->node_type == Nroot) ? 891 "ROOT!" : node->parent->d_dir.d_name), 892 newnode->d_dir.d_name); 893 devfs_debug(DEVFS_DEBUG_DEBUG, 894 "test: %s\n", 895 ((struct devfs_node *)(TAILQ_LAST(DEVFS_DENODE_HEAD(node->parent), devfs_node_head)))->d_dir.d_name); 896 897 /* 898 * orig_vp is set to the original vp if we 899 * cloned. 900 */ 901 /* node->flags |= DEVFS_CLONED; */ 902 devfs_allocv(&vp, newnode); 903 orig_vp = ap->a_vp; 904 ap->a_vp = vp; 905 } 906 } 907 lockmgr(&devfs_lock, LK_RELEASE); 908 /* 909 * Synchronize devfs here to make sure that, if the cloned 910 * device creates other device nodes in addition to the 911 * cloned one, all of them are created by the time we return 912 * from opening the cloned one. 913 */ 914 if (ndev) 915 devfs_config(); 916 } 917 918 devfs_debug(DEVFS_DEBUG_DEBUG, 919 "devfs_spec_open() called on %s! \n", 920 dev->si_name); 921 922 /* 923 * Make this field valid before any I/O in ->d_open 924 */ 925 if (!dev->si_iosize_max) 926 /* XXX: old DFLTPHYS == 64KB dependency */ 927 dev->si_iosize_max = min(MAXPHYS,64*1024); 928 929 if (dev_dflags(dev) & D_TTY) 930 vsetflags(vp, VISTTY); 931 932 /* 933 * Open underlying device 934 */ 935 vn_unlock(vp); 936 error = dev_dopen(dev, ap->a_mode, S_IFCHR, ap->a_cred, ap->a_fp); 937 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 938 939 /* 940 * Clean up any cloned vp if we error out. 941 */ 942 if (error) { 943 if (orig_vp) { 944 vput(vp); 945 ap->a_vp = orig_vp; 946 /* orig_vp = NULL; */ 947 } 948 return error; 949 } 950 951 /* 952 * This checks if the disk device is going to be opened for writing. 953 * It will be only allowed in the cases where securelevel permits it 954 * and it's not mounted R/W. 955 */ 956 if ((dev_dflags(dev) & D_DISK) && (ap->a_mode & FWRITE) && 957 (ap->a_cred != FSCRED)) { 958 959 /* Very secure mode. No open for writing allowed */ 960 if (securelevel >= 2) 961 return EPERM; 962 963 /* 964 * If it is mounted R/W, do not allow to open for writing. 965 * In the case it's mounted read-only but securelevel 966 * is >= 1, then do not allow opening for writing either. 967 */ 968 if (vfs_mountedon(vp)) { 969 if (!(dev->si_mountpoint->mnt_flag & MNT_RDONLY)) 970 return EBUSY; 971 else if (securelevel >= 1) 972 return EPERM; 973 } 974 } 975 976 if (dev_dflags(dev) & D_TTY) { 977 if (dev->si_tty) { 978 struct tty *tp; 979 tp = dev->si_tty; 980 if (!tp->t_stop) { 981 devfs_debug(DEVFS_DEBUG_DEBUG, 982 "devfs: no t_stop\n"); 983 tp->t_stop = nottystop; 984 } 985 } 986 } 987 988 989 if (vn_isdisk(vp, NULL)) { 990 if (!dev->si_bsize_phys) 991 dev->si_bsize_phys = DEV_BSIZE; 992 vinitvmio(vp, IDX_TO_OFF(INT_MAX), PAGE_SIZE, -1); 993 } 994 995 vop_stdopen(ap); 996 #if 0 997 if (node) 998 nanotime(&node->atime); 999 #endif 1000 1001 /* 1002 * If we replaced the vp the vop_stdopen() call will have loaded 1003 * it into fp->f_data and vref()d the vp, giving us two refs. So 1004 * instead of just unlocking it here we have to vput() it. 1005 */ 1006 if (orig_vp) 1007 vput(vp); 1008 1009 /* Ugly pty magic, to make pty devices appear once they are opened */ 1010 if (node && (node->flags & DEVFS_PTY) == DEVFS_PTY) 1011 node->flags &= ~DEVFS_INVISIBLE; 1012 1013 if (ap->a_fp) { 1014 KKASSERT(ap->a_fp->f_type == DTYPE_VNODE); 1015 KKASSERT((ap->a_fp->f_flag & FMASK) == (ap->a_mode & FMASK)); 1016 ap->a_fp->f_ops = &devfs_dev_fileops; 1017 KKASSERT(ap->a_fp->f_data == (void *)vp); 1018 } 1019 1020 return 0; 1021 } 1022 1023 static int 1024 devfs_spec_close(struct vop_close_args *ap) 1025 { 1026 struct devfs_node *node; 1027 struct proc *p = curproc; 1028 struct vnode *vp = ap->a_vp; 1029 cdev_t dev = vp->v_rdev; 1030 int error = 0; 1031 int needrelock; 1032 int opencount; 1033 1034 /* 1035 * We do special tests on the opencount so unfortunately we need 1036 * an exclusive lock. 1037 */ 1038 vn_lock(vp, LK_UPGRADE | LK_RETRY); 1039 1040 if (dev) 1041 devfs_debug(DEVFS_DEBUG_DEBUG, 1042 "devfs_spec_close() called on %s! \n", 1043 dev->si_name); 1044 else 1045 devfs_debug(DEVFS_DEBUG_DEBUG, 1046 "devfs_spec_close() called, null vode!\n"); 1047 1048 /* 1049 * A couple of hacks for devices and tty devices. The 1050 * vnode ref count cannot be used to figure out the 1051 * last close, but we can use v_opencount now that 1052 * revoke works properly. 1053 * 1054 * Detect the last close on a controlling terminal and clear 1055 * the session (half-close). 1056 * 1057 * XXX opencount is not SMP safe. The vnode is locked but there 1058 * may be multiple vnodes referencing the same device. 1059 */ 1060 if (dev) { 1061 /* 1062 * NOTE: Try to avoid global tokens when testing opencount 1063 * XXX hack, fixme. needs a struct lock and opencount in 1064 * struct cdev itself. 1065 */ 1066 reference_dev(dev); 1067 opencount = vp->v_opencount; 1068 if (opencount <= 1) 1069 opencount = count_dev(dev); /* XXX NOT SMP SAFE */ 1070 } else { 1071 opencount = 0; 1072 } 1073 1074 if (p && vp->v_opencount <= 1 && vp == p->p_session->s_ttyvp) { 1075 p->p_session->s_ttyvp = NULL; 1076 vrele(vp); 1077 } 1078 1079 /* 1080 * Vnodes can be opened and closed multiple times. Do not really 1081 * close the device unless (1) it is being closed forcibly, 1082 * (2) the device wants to track closes, or (3) this is the last 1083 * vnode doing its last close on the device. 1084 * 1085 * XXX the VXLOCK (force close) case can leave vnodes referencing 1086 * a closed device. This might not occur now that our revoke is 1087 * fixed. 1088 */ 1089 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -1- \n"); 1090 if (dev && ((vp->v_flag & VRECLAIMED) || 1091 (dev_dflags(dev) & D_TRACKCLOSE) || 1092 (opencount == 1))) { 1093 /* 1094 * Ugly pty magic, to make pty devices disappear again once 1095 * they are closed. 1096 */ 1097 node = DEVFS_NODE(ap->a_vp); 1098 if (node && (node->flags & DEVFS_PTY)) 1099 node->flags |= DEVFS_INVISIBLE; 1100 1101 /* 1102 * Unlock around dev_dclose(), unless the vnode is 1103 * undergoing a vgone/reclaim (during umount). 1104 */ 1105 needrelock = 0; 1106 if ((vp->v_flag & VRECLAIMED) == 0 && vn_islocked(vp)) { 1107 needrelock = 1; 1108 vn_unlock(vp); 1109 } 1110 1111 /* 1112 * WARNING! If the device destroys itself the devfs node 1113 * can disappear here. 1114 * 1115 * WARNING! vn_lock() will fail if the vp is in a VRECLAIM, 1116 * which can occur during umount. 1117 */ 1118 error = dev_dclose(dev, ap->a_fflag, S_IFCHR, ap->a_fp); 1119 /* node is now stale */ 1120 1121 if (needrelock) { 1122 if (vn_lock(vp, LK_EXCLUSIVE | 1123 LK_RETRY | 1124 LK_FAILRECLAIM) != 0) { 1125 panic("devfs_spec_close: vnode %p " 1126 "unexpectedly could not be relocked", 1127 vp); 1128 } 1129 } 1130 } else { 1131 error = 0; 1132 } 1133 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -2- \n"); 1134 1135 /* 1136 * Track the actual opens and closes on the vnode. The last close 1137 * disassociates the rdev. If the rdev is already disassociated or 1138 * the opencount is already 0, the vnode might have been revoked 1139 * and no further opencount tracking occurs. 1140 */ 1141 if (dev) 1142 release_dev(dev); 1143 if (vp->v_opencount > 0) 1144 vop_stdclose(ap); 1145 return(error); 1146 1147 } 1148 1149 1150 static int 1151 devfs_fo_close(struct file *fp) 1152 { 1153 struct vnode *vp = (struct vnode *)fp->f_data; 1154 int error; 1155 1156 fp->f_ops = &badfileops; 1157 error = vn_close(vp, fp->f_flag, fp); 1158 devfs_clear_cdevpriv(fp); 1159 1160 return (error); 1161 } 1162 1163 1164 /* 1165 * Device-optimized file table vnode read routine. 1166 * 1167 * This bypasses the VOP table and talks directly to the device. Most 1168 * filesystems just route to specfs and can make this optimization. 1169 */ 1170 static int 1171 devfs_fo_read(struct file *fp, struct uio *uio, 1172 struct ucred *cred, int flags) 1173 { 1174 struct devfs_node *node; 1175 struct vnode *vp; 1176 int ioflag; 1177 int error; 1178 cdev_t dev; 1179 1180 KASSERT(uio->uio_td == curthread, 1181 ("uio_td %p is not td %p", uio->uio_td, curthread)); 1182 1183 if (uio->uio_resid == 0) 1184 return 0; 1185 1186 vp = (struct vnode *)fp->f_data; 1187 if (vp == NULL || vp->v_type == VBAD) 1188 return EBADF; 1189 1190 node = DEVFS_NODE(vp); 1191 1192 if ((dev = vp->v_rdev) == NULL) 1193 return EBADF; 1194 1195 reference_dev(dev); 1196 1197 if ((flags & O_FOFFSET) == 0) 1198 uio->uio_offset = fp->f_offset; 1199 1200 ioflag = 0; 1201 if (flags & O_FBLOCKING) { 1202 /* ioflag &= ~IO_NDELAY; */ 1203 } else if (flags & O_FNONBLOCKING) { 1204 ioflag |= IO_NDELAY; 1205 } else if (fp->f_flag & FNONBLOCK) { 1206 ioflag |= IO_NDELAY; 1207 } 1208 if (fp->f_flag & O_DIRECT) { 1209 ioflag |= IO_DIRECT; 1210 } 1211 ioflag |= sequential_heuristic(uio, fp); 1212 1213 error = dev_dread(dev, uio, ioflag, fp); 1214 1215 release_dev(dev); 1216 if (node) 1217 nanotime(&node->atime); 1218 if ((flags & O_FOFFSET) == 0) 1219 fp->f_offset = uio->uio_offset; 1220 fp->f_nextoff = uio->uio_offset; 1221 1222 return (error); 1223 } 1224 1225 1226 static int 1227 devfs_fo_write(struct file *fp, struct uio *uio, 1228 struct ucred *cred, int flags) 1229 { 1230 struct devfs_node *node; 1231 struct vnode *vp; 1232 int ioflag; 1233 int error; 1234 cdev_t dev; 1235 1236 KASSERT(uio->uio_td == curthread, 1237 ("uio_td %p is not p %p", uio->uio_td, curthread)); 1238 1239 vp = (struct vnode *)fp->f_data; 1240 if (vp == NULL || vp->v_type == VBAD) 1241 return EBADF; 1242 1243 node = DEVFS_NODE(vp); 1244 1245 if (vp->v_type == VREG) 1246 bwillwrite(uio->uio_resid); 1247 1248 vp = (struct vnode *)fp->f_data; 1249 1250 if ((dev = vp->v_rdev) == NULL) 1251 return EBADF; 1252 1253 reference_dev(dev); 1254 1255 if ((flags & O_FOFFSET) == 0) 1256 uio->uio_offset = fp->f_offset; 1257 1258 ioflag = IO_UNIT; 1259 if (vp->v_type == VREG && 1260 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) { 1261 ioflag |= IO_APPEND; 1262 } 1263 1264 if (flags & O_FBLOCKING) { 1265 /* ioflag &= ~IO_NDELAY; */ 1266 } else if (flags & O_FNONBLOCKING) { 1267 ioflag |= IO_NDELAY; 1268 } else if (fp->f_flag & FNONBLOCK) { 1269 ioflag |= IO_NDELAY; 1270 } 1271 if (fp->f_flag & O_DIRECT) { 1272 ioflag |= IO_DIRECT; 1273 } 1274 if (flags & O_FASYNCWRITE) { 1275 /* ioflag &= ~IO_SYNC; */ 1276 } else if (flags & O_FSYNCWRITE) { 1277 ioflag |= IO_SYNC; 1278 } else if (fp->f_flag & O_FSYNC) { 1279 ioflag |= IO_SYNC; 1280 } 1281 1282 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)) 1283 ioflag |= IO_SYNC; 1284 ioflag |= sequential_heuristic(uio, fp); 1285 1286 error = dev_dwrite(dev, uio, ioflag, fp); 1287 1288 release_dev(dev); 1289 if (node) { 1290 nanotime(&node->atime); 1291 nanotime(&node->mtime); 1292 } 1293 1294 if ((flags & O_FOFFSET) == 0) 1295 fp->f_offset = uio->uio_offset; 1296 fp->f_nextoff = uio->uio_offset; 1297 1298 return (error); 1299 } 1300 1301 1302 static int 1303 devfs_fo_stat(struct file *fp, struct stat *sb, struct ucred *cred) 1304 { 1305 struct vnode *vp; 1306 struct vattr vattr; 1307 struct vattr *vap; 1308 u_short mode; 1309 cdev_t dev; 1310 int error; 1311 1312 vp = (struct vnode *)fp->f_data; 1313 if (vp == NULL || vp->v_type == VBAD) 1314 return EBADF; 1315 1316 error = vn_stat(vp, sb, cred); 1317 if (error) 1318 return (error); 1319 1320 vap = &vattr; 1321 error = VOP_GETATTR(vp, vap); 1322 if (error) 1323 return (error); 1324 1325 /* 1326 * Zero the spare stat fields 1327 */ 1328 sb->st_lspare = 0; 1329 sb->st_qspare1 = 0; 1330 sb->st_qspare2 = 0; 1331 1332 /* 1333 * Copy from vattr table ... or not in case it's a cloned device 1334 */ 1335 if (vap->va_fsid != VNOVAL) 1336 sb->st_dev = vap->va_fsid; 1337 else 1338 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 1339 1340 sb->st_ino = vap->va_fileid; 1341 1342 mode = vap->va_mode; 1343 mode |= S_IFCHR; 1344 sb->st_mode = mode; 1345 1346 if (vap->va_nlink > (nlink_t)-1) 1347 sb->st_nlink = (nlink_t)-1; 1348 else 1349 sb->st_nlink = vap->va_nlink; 1350 1351 sb->st_uid = vap->va_uid; 1352 sb->st_gid = vap->va_gid; 1353 sb->st_rdev = dev2udev(DEVFS_NODE(vp)->d_dev); 1354 sb->st_size = vap->va_bytes; 1355 sb->st_atimespec = vap->va_atime; 1356 sb->st_mtimespec = vap->va_mtime; 1357 sb->st_ctimespec = vap->va_ctime; 1358 1359 /* 1360 * A VCHR and VBLK device may track the last access and last modified 1361 * time independantly of the filesystem. This is particularly true 1362 * because device read and write calls may bypass the filesystem. 1363 */ 1364 if (vp->v_type == VCHR || vp->v_type == VBLK) { 1365 dev = vp->v_rdev; 1366 if (dev != NULL) { 1367 if (dev->si_lastread) { 1368 sb->st_atimespec.tv_sec = time_second + 1369 (time_uptime - 1370 dev->si_lastread); 1371 sb->st_atimespec.tv_nsec = 0; 1372 } 1373 if (dev->si_lastwrite) { 1374 sb->st_atimespec.tv_sec = time_second + 1375 (time_uptime - 1376 dev->si_lastwrite); 1377 sb->st_atimespec.tv_nsec = 0; 1378 } 1379 } 1380 } 1381 1382 /* 1383 * According to www.opengroup.org, the meaning of st_blksize is 1384 * "a filesystem-specific preferred I/O block size for this 1385 * object. In some filesystem types, this may vary from file 1386 * to file" 1387 * Default to PAGE_SIZE after much discussion. 1388 */ 1389 1390 sb->st_blksize = PAGE_SIZE; 1391 1392 sb->st_flags = vap->va_flags; 1393 1394 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0); 1395 if (error) 1396 sb->st_gen = 0; 1397 else 1398 sb->st_gen = (u_int32_t)vap->va_gen; 1399 1400 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 1401 1402 return (0); 1403 } 1404 1405 1406 static int 1407 devfs_fo_kqfilter(struct file *fp, struct knote *kn) 1408 { 1409 struct vnode *vp; 1410 int error; 1411 cdev_t dev; 1412 1413 vp = (struct vnode *)fp->f_data; 1414 if (vp == NULL || vp->v_type == VBAD) { 1415 error = EBADF; 1416 goto done; 1417 } 1418 if ((dev = vp->v_rdev) == NULL) { 1419 error = EBADF; 1420 goto done; 1421 } 1422 reference_dev(dev); 1423 1424 error = dev_dkqfilter(dev, kn, fp); 1425 1426 release_dev(dev); 1427 1428 done: 1429 return (error); 1430 } 1431 1432 static int 1433 devfs_fo_ioctl(struct file *fp, u_long com, caddr_t data, 1434 struct ucred *ucred, struct sysmsg *msg) 1435 { 1436 #if 0 1437 struct devfs_node *node; 1438 #endif 1439 struct vnode *vp; 1440 struct vnode *ovp; 1441 cdev_t dev; 1442 int error; 1443 struct fiodname_args *name_args; 1444 size_t namlen; 1445 const char *name; 1446 1447 vp = ((struct vnode *)fp->f_data); 1448 1449 if ((dev = vp->v_rdev) == NULL) 1450 return EBADF; /* device was revoked */ 1451 1452 reference_dev(dev); 1453 1454 #if 0 1455 node = DEVFS_NODE(vp); 1456 #endif 1457 1458 devfs_debug(DEVFS_DEBUG_DEBUG, 1459 "devfs_fo_ioctl() called! for dev %s\n", 1460 dev->si_name); 1461 1462 if (com == FIODTYPE) { 1463 *(int *)data = dev_dflags(dev) & D_TYPEMASK; 1464 error = 0; 1465 goto out; 1466 } else if (com == FIODNAME) { 1467 name_args = (struct fiodname_args *)data; 1468 name = dev->si_name; 1469 namlen = strlen(name) + 1; 1470 1471 devfs_debug(DEVFS_DEBUG_DEBUG, 1472 "ioctl, got: FIODNAME for %s\n", name); 1473 1474 if (namlen <= name_args->len) 1475 error = copyout(dev->si_name, name_args->name, namlen); 1476 else 1477 error = EINVAL; 1478 1479 devfs_debug(DEVFS_DEBUG_DEBUG, 1480 "ioctl stuff: error: %d\n", error); 1481 goto out; 1482 } 1483 1484 error = dev_dioctl(dev, com, data, fp->f_flag, ucred, msg, fp); 1485 1486 #if 0 1487 if (node) { 1488 nanotime(&node->atime); 1489 nanotime(&node->mtime); 1490 } 1491 #endif 1492 if (com == TIOCSCTTY) { 1493 devfs_debug(DEVFS_DEBUG_DEBUG, 1494 "devfs_fo_ioctl: got TIOCSCTTY on %s\n", 1495 dev->si_name); 1496 } 1497 if (error == 0 && com == TIOCSCTTY) { 1498 struct proc *p = curthread->td_proc; 1499 struct session *sess; 1500 1501 devfs_debug(DEVFS_DEBUG_DEBUG, 1502 "devfs_fo_ioctl: dealing with TIOCSCTTY on %s\n", 1503 dev->si_name); 1504 if (p == NULL) { 1505 error = ENOTTY; 1506 goto out; 1507 } 1508 sess = p->p_session; 1509 1510 /* 1511 * Do nothing if reassigning same control tty 1512 */ 1513 if (sess->s_ttyvp == vp) { 1514 error = 0; 1515 goto out; 1516 } 1517 1518 /* 1519 * Get rid of reference to old control tty 1520 */ 1521 ovp = sess->s_ttyvp; 1522 vref(vp); 1523 sess->s_ttyvp = vp; 1524 if (ovp) 1525 vrele(ovp); 1526 } 1527 1528 out: 1529 release_dev(dev); 1530 devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_fo_ioctl() finished! \n"); 1531 return (error); 1532 } 1533 1534 1535 static int 1536 devfs_spec_fsync(struct vop_fsync_args *ap) 1537 { 1538 struct vnode *vp = ap->a_vp; 1539 int error; 1540 1541 if (!vn_isdisk(vp, NULL)) 1542 return (0); 1543 1544 /* 1545 * Flush all dirty buffers associated with a block device. 1546 */ 1547 error = vfsync(vp, ap->a_waitfor, 10000, NULL, NULL); 1548 return (error); 1549 } 1550 1551 static int 1552 devfs_spec_read(struct vop_read_args *ap) 1553 { 1554 struct devfs_node *node; 1555 struct vnode *vp; 1556 struct uio *uio; 1557 cdev_t dev; 1558 int error; 1559 1560 vp = ap->a_vp; 1561 dev = vp->v_rdev; 1562 uio = ap->a_uio; 1563 node = DEVFS_NODE(vp); 1564 1565 if (dev == NULL) /* device was revoked */ 1566 return (EBADF); 1567 if (uio->uio_resid == 0) 1568 return (0); 1569 1570 vn_unlock(vp); 1571 error = dev_dread(dev, uio, ap->a_ioflag, NULL); 1572 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1573 1574 if (node) 1575 nanotime(&node->atime); 1576 1577 return (error); 1578 } 1579 1580 /* 1581 * Vnode op for write 1582 * 1583 * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag, 1584 * struct ucred *a_cred) 1585 */ 1586 static int 1587 devfs_spec_write(struct vop_write_args *ap) 1588 { 1589 struct devfs_node *node; 1590 struct vnode *vp; 1591 struct uio *uio; 1592 cdev_t dev; 1593 int error; 1594 1595 vp = ap->a_vp; 1596 dev = vp->v_rdev; 1597 uio = ap->a_uio; 1598 node = DEVFS_NODE(vp); 1599 1600 KKASSERT(uio->uio_segflg != UIO_NOCOPY); 1601 1602 if (dev == NULL) /* device was revoked */ 1603 return (EBADF); 1604 1605 vn_unlock(vp); 1606 error = dev_dwrite(dev, uio, ap->a_ioflag, NULL); 1607 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1608 1609 if (node) { 1610 nanotime(&node->atime); 1611 nanotime(&node->mtime); 1612 } 1613 1614 return (error); 1615 } 1616 1617 /* 1618 * Device ioctl operation. 1619 * 1620 * spec_ioctl(struct vnode *a_vp, int a_command, caddr_t a_data, 1621 * int a_fflag, struct ucred *a_cred, struct sysmsg *msg) 1622 */ 1623 static int 1624 devfs_spec_ioctl(struct vop_ioctl_args *ap) 1625 { 1626 struct vnode *vp = ap->a_vp; 1627 #if 0 1628 struct devfs_node *node; 1629 #endif 1630 cdev_t dev; 1631 1632 if ((dev = vp->v_rdev) == NULL) 1633 return (EBADF); /* device was revoked */ 1634 #if 0 1635 node = DEVFS_NODE(vp); 1636 1637 if (node) { 1638 nanotime(&node->atime); 1639 nanotime(&node->mtime); 1640 } 1641 #endif 1642 1643 return (dev_dioctl(dev, ap->a_command, ap->a_data, ap->a_fflag, 1644 ap->a_cred, ap->a_sysmsg, NULL)); 1645 } 1646 1647 /* 1648 * spec_kqfilter(struct vnode *a_vp, struct knote *a_kn) 1649 */ 1650 /* ARGSUSED */ 1651 static int 1652 devfs_spec_kqfilter(struct vop_kqfilter_args *ap) 1653 { 1654 struct vnode *vp = ap->a_vp; 1655 #if 0 1656 struct devfs_node *node; 1657 #endif 1658 cdev_t dev; 1659 1660 if ((dev = vp->v_rdev) == NULL) 1661 return (EBADF); /* device was revoked (EBADF) */ 1662 #if 0 1663 node = DEVFS_NODE(vp); 1664 1665 if (node) 1666 nanotime(&node->atime); 1667 #endif 1668 1669 return (dev_dkqfilter(dev, ap->a_kn, NULL)); 1670 } 1671 1672 /* 1673 * Convert a vnode strategy call into a device strategy call. Vnode strategy 1674 * calls are not limited to device DMA limits so we have to deal with the 1675 * case. 1676 * 1677 * spec_strategy(struct vnode *a_vp, struct bio *a_bio) 1678 */ 1679 static int 1680 devfs_spec_strategy(struct vop_strategy_args *ap) 1681 { 1682 struct bio *bio = ap->a_bio; 1683 struct buf *bp = bio->bio_buf; 1684 struct buf *nbp; 1685 struct vnode *vp; 1686 struct mount *mp; 1687 int chunksize; 1688 int maxiosize; 1689 1690 if (bp->b_cmd != BUF_CMD_READ && LIST_FIRST(&bp->b_dep) != NULL) 1691 buf_start(bp); 1692 1693 /* 1694 * Collect statistics on synchronous and asynchronous read 1695 * and write counts for disks that have associated filesystems. 1696 */ 1697 vp = ap->a_vp; 1698 KKASSERT(vp->v_rdev != NULL); /* XXX */ 1699 if (vn_isdisk(vp, NULL) && (mp = vp->v_rdev->si_mountpoint) != NULL) { 1700 if (bp->b_cmd == BUF_CMD_READ) { 1701 if (bp->b_flags & BIO_SYNC) 1702 mp->mnt_stat.f_syncreads++; 1703 else 1704 mp->mnt_stat.f_asyncreads++; 1705 } else { 1706 if (bp->b_flags & BIO_SYNC) 1707 mp->mnt_stat.f_syncwrites++; 1708 else 1709 mp->mnt_stat.f_asyncwrites++; 1710 } 1711 } 1712 1713 /* 1714 * Device iosize limitations only apply to read and write. Shortcut 1715 * the I/O if it fits. 1716 */ 1717 if ((maxiosize = vp->v_rdev->si_iosize_max) == 0) { 1718 devfs_debug(DEVFS_DEBUG_DEBUG, 1719 "%s: si_iosize_max not set!\n", 1720 dev_dname(vp->v_rdev)); 1721 maxiosize = MAXPHYS; 1722 } 1723 #if SPEC_CHAIN_DEBUG & 2 1724 maxiosize = 4096; 1725 #endif 1726 if (bp->b_bcount <= maxiosize || 1727 (bp->b_cmd != BUF_CMD_READ && bp->b_cmd != BUF_CMD_WRITE)) { 1728 dev_dstrategy_chain(vp->v_rdev, bio); 1729 return (0); 1730 } 1731 1732 /* 1733 * Clone the buffer and set up an I/O chain to chunk up the I/O. 1734 */ 1735 nbp = kmalloc(sizeof(*bp), M_DEVBUF, M_INTWAIT|M_ZERO); 1736 initbufbio(nbp); 1737 buf_dep_init(nbp); 1738 BUF_LOCK(nbp, LK_EXCLUSIVE); 1739 BUF_KERNPROC(nbp); 1740 nbp->b_vp = vp; 1741 nbp->b_flags = B_PAGING | (bp->b_flags & B_BNOCLIP); 1742 nbp->b_data = bp->b_data; 1743 nbp->b_bio1.bio_done = devfs_spec_strategy_done; 1744 nbp->b_bio1.bio_offset = bio->bio_offset; 1745 nbp->b_bio1.bio_caller_info1.ptr = bio; 1746 1747 /* 1748 * Start the first transfer 1749 */ 1750 if (vn_isdisk(vp, NULL)) 1751 chunksize = vp->v_rdev->si_bsize_phys; 1752 else 1753 chunksize = DEV_BSIZE; 1754 chunksize = maxiosize / chunksize * chunksize; 1755 #if SPEC_CHAIN_DEBUG & 1 1756 devfs_debug(DEVFS_DEBUG_DEBUG, 1757 "spec_strategy chained I/O chunksize=%d\n", 1758 chunksize); 1759 #endif 1760 nbp->b_cmd = bp->b_cmd; 1761 nbp->b_bcount = chunksize; 1762 nbp->b_bufsize = chunksize; /* used to detect a short I/O */ 1763 nbp->b_bio1.bio_caller_info2.index = chunksize; 1764 1765 #if SPEC_CHAIN_DEBUG & 1 1766 devfs_debug(DEVFS_DEBUG_DEBUG, 1767 "spec_strategy: chain %p offset %d/%d bcount %d\n", 1768 bp, 0, bp->b_bcount, nbp->b_bcount); 1769 #endif 1770 1771 dev_dstrategy(vp->v_rdev, &nbp->b_bio1); 1772 1773 if (DEVFS_NODE(vp)) { 1774 nanotime(&DEVFS_NODE(vp)->atime); 1775 nanotime(&DEVFS_NODE(vp)->mtime); 1776 } 1777 1778 return (0); 1779 } 1780 1781 /* 1782 * Chunked up transfer completion routine - chain transfers until done 1783 * 1784 * NOTE: MPSAFE callback. 1785 */ 1786 static 1787 void 1788 devfs_spec_strategy_done(struct bio *nbio) 1789 { 1790 struct buf *nbp = nbio->bio_buf; 1791 struct bio *bio = nbio->bio_caller_info1.ptr; /* original bio */ 1792 struct buf *bp = bio->bio_buf; /* original bp */ 1793 int chunksize = nbio->bio_caller_info2.index; /* chunking */ 1794 int boffset = nbp->b_data - bp->b_data; 1795 1796 if (nbp->b_flags & B_ERROR) { 1797 /* 1798 * An error terminates the chain, propogate the error back 1799 * to the original bp 1800 */ 1801 bp->b_flags |= B_ERROR; 1802 bp->b_error = nbp->b_error; 1803 bp->b_resid = bp->b_bcount - boffset + 1804 (nbp->b_bcount - nbp->b_resid); 1805 #if SPEC_CHAIN_DEBUG & 1 1806 devfs_debug(DEVFS_DEBUG_DEBUG, 1807 "spec_strategy: chain %p error %d bcount %d/%d\n", 1808 bp, bp->b_error, bp->b_bcount, 1809 bp->b_bcount - bp->b_resid); 1810 #endif 1811 } else if (nbp->b_resid) { 1812 /* 1813 * A short read or write terminates the chain 1814 */ 1815 bp->b_error = nbp->b_error; 1816 bp->b_resid = bp->b_bcount - boffset + 1817 (nbp->b_bcount - nbp->b_resid); 1818 #if SPEC_CHAIN_DEBUG & 1 1819 devfs_debug(DEVFS_DEBUG_DEBUG, 1820 "spec_strategy: chain %p short read(1) " 1821 "bcount %d/%d\n", 1822 bp, bp->b_bcount - bp->b_resid, bp->b_bcount); 1823 #endif 1824 } else if (nbp->b_bcount != nbp->b_bufsize) { 1825 /* 1826 * A short read or write can also occur by truncating b_bcount 1827 */ 1828 #if SPEC_CHAIN_DEBUG & 1 1829 devfs_debug(DEVFS_DEBUG_DEBUG, 1830 "spec_strategy: chain %p short read(2) " 1831 "bcount %d/%d\n", 1832 bp, nbp->b_bcount + boffset, bp->b_bcount); 1833 #endif 1834 bp->b_error = 0; 1835 bp->b_bcount = nbp->b_bcount + boffset; 1836 bp->b_resid = nbp->b_resid; 1837 } else if (nbp->b_bcount + boffset == bp->b_bcount) { 1838 /* 1839 * No more data terminates the chain 1840 */ 1841 #if SPEC_CHAIN_DEBUG & 1 1842 devfs_debug(DEVFS_DEBUG_DEBUG, 1843 "spec_strategy: chain %p finished bcount %d\n", 1844 bp, bp->b_bcount); 1845 #endif 1846 bp->b_error = 0; 1847 bp->b_resid = 0; 1848 } else { 1849 /* 1850 * Continue the chain 1851 */ 1852 boffset += nbp->b_bcount; 1853 nbp->b_data = bp->b_data + boffset; 1854 nbp->b_bcount = bp->b_bcount - boffset; 1855 if (nbp->b_bcount > chunksize) 1856 nbp->b_bcount = chunksize; 1857 nbp->b_bio1.bio_done = devfs_spec_strategy_done; 1858 nbp->b_bio1.bio_offset = bio->bio_offset + boffset; 1859 1860 #if SPEC_CHAIN_DEBUG & 1 1861 devfs_debug(DEVFS_DEBUG_DEBUG, 1862 "spec_strategy: chain %p offset %d/%d bcount %d\n", 1863 bp, boffset, bp->b_bcount, nbp->b_bcount); 1864 #endif 1865 1866 dev_dstrategy(nbp->b_vp->v_rdev, &nbp->b_bio1); 1867 return; 1868 } 1869 1870 /* 1871 * Fall through to here on termination. biodone(bp) and 1872 * clean up and free nbp. 1873 */ 1874 biodone(bio); 1875 BUF_UNLOCK(nbp); 1876 uninitbufbio(nbp); 1877 kfree(nbp, M_DEVBUF); 1878 } 1879 1880 /* 1881 * spec_freeblks(struct vnode *a_vp, daddr_t a_addr, daddr_t a_length) 1882 */ 1883 static int 1884 devfs_spec_freeblks(struct vop_freeblks_args *ap) 1885 { 1886 struct buf *bp; 1887 1888 /* 1889 * Must be a synchronous operation 1890 */ 1891 KKASSERT(ap->a_vp->v_rdev != NULL); 1892 if ((ap->a_vp->v_rdev->si_flags & SI_CANFREE) == 0) 1893 return (0); 1894 bp = geteblk(ap->a_length); 1895 bp->b_cmd = BUF_CMD_FREEBLKS; 1896 bp->b_bio1.bio_flags |= BIO_SYNC; 1897 bp->b_bio1.bio_offset = ap->a_offset; 1898 bp->b_bio1.bio_done = biodone_sync; 1899 bp->b_bcount = ap->a_length; 1900 dev_dstrategy(ap->a_vp->v_rdev, &bp->b_bio1); 1901 biowait(&bp->b_bio1, "TRIM"); 1902 brelse(bp); 1903 1904 return (0); 1905 } 1906 1907 /* 1908 * Implement degenerate case where the block requested is the block 1909 * returned, and assume that the entire device is contiguous in regards 1910 * to the contiguous block range (runp and runb). 1911 * 1912 * spec_bmap(struct vnode *a_vp, off_t a_loffset, 1913 * off_t *a_doffsetp, int *a_runp, int *a_runb) 1914 */ 1915 static int 1916 devfs_spec_bmap(struct vop_bmap_args *ap) 1917 { 1918 if (ap->a_doffsetp != NULL) 1919 *ap->a_doffsetp = ap->a_loffset; 1920 if (ap->a_runp != NULL) 1921 *ap->a_runp = MAXBSIZE; 1922 if (ap->a_runb != NULL) { 1923 if (ap->a_loffset < MAXBSIZE) 1924 *ap->a_runb = (int)ap->a_loffset; 1925 else 1926 *ap->a_runb = MAXBSIZE; 1927 } 1928 return (0); 1929 } 1930 1931 1932 /* 1933 * Special device advisory byte-level locks. 1934 * 1935 * spec_advlock(struct vnode *a_vp, caddr_t a_id, int a_op, 1936 * struct flock *a_fl, int a_flags) 1937 */ 1938 /* ARGSUSED */ 1939 static int 1940 devfs_spec_advlock(struct vop_advlock_args *ap) 1941 { 1942 return ((ap->a_flags & F_POSIX) ? EINVAL : EOPNOTSUPP); 1943 } 1944 1945 /* 1946 * NOTE: MPSAFE callback. 1947 */ 1948 static void 1949 devfs_spec_getpages_iodone(struct bio *bio) 1950 { 1951 bio->bio_buf->b_cmd = BUF_CMD_DONE; 1952 wakeup(bio->bio_buf); 1953 } 1954 1955 /* 1956 * spec_getpages() - get pages associated with device vnode. 1957 * 1958 * Note that spec_read and spec_write do not use the buffer cache, so we 1959 * must fully implement getpages here. 1960 */ 1961 static int 1962 devfs_spec_getpages(struct vop_getpages_args *ap) 1963 { 1964 vm_offset_t kva; 1965 int error; 1966 int i, pcount, size; 1967 struct buf *bp; 1968 vm_page_t m; 1969 vm_ooffset_t offset; 1970 int toff, nextoff, nread; 1971 struct vnode *vp = ap->a_vp; 1972 int blksiz; 1973 int gotreqpage; 1974 1975 error = 0; 1976 pcount = round_page(ap->a_count) / PAGE_SIZE; 1977 1978 /* 1979 * Calculate the offset of the transfer and do sanity check. 1980 */ 1981 offset = IDX_TO_OFF(ap->a_m[0]->pindex) + ap->a_offset; 1982 1983 /* 1984 * Round up physical size for real devices. We cannot round using 1985 * v_mount's block size data because v_mount has nothing to do with 1986 * the device. i.e. it's usually '/dev'. We need the physical block 1987 * size for the device itself. 1988 * 1989 * We can't use v_rdev->si_mountpoint because it only exists when the 1990 * block device is mounted. However, we can use v_rdev. 1991 */ 1992 if (vn_isdisk(vp, NULL)) 1993 blksiz = vp->v_rdev->si_bsize_phys; 1994 else 1995 blksiz = DEV_BSIZE; 1996 1997 size = roundup2(ap->a_count, blksiz); 1998 1999 bp = getpbuf_kva(NULL); 2000 kva = (vm_offset_t)bp->b_data; 2001 2002 /* 2003 * Map the pages to be read into the kva. 2004 */ 2005 pmap_qenter(kva, ap->a_m, pcount); 2006 2007 /* Build a minimal buffer header. */ 2008 bp->b_cmd = BUF_CMD_READ; 2009 bp->b_bcount = size; 2010 bp->b_resid = 0; 2011 bsetrunningbufspace(bp, size); 2012 2013 bp->b_bio1.bio_offset = offset; 2014 bp->b_bio1.bio_done = devfs_spec_getpages_iodone; 2015 2016 mycpu->gd_cnt.v_vnodein++; 2017 mycpu->gd_cnt.v_vnodepgsin += pcount; 2018 2019 /* Do the input. */ 2020 vn_strategy(ap->a_vp, &bp->b_bio1); 2021 2022 crit_enter(); 2023 2024 /* We definitely need to be at splbio here. */ 2025 while (bp->b_cmd != BUF_CMD_DONE) 2026 tsleep(bp, 0, "spread", 0); 2027 2028 crit_exit(); 2029 2030 if (bp->b_flags & B_ERROR) { 2031 if (bp->b_error) 2032 error = bp->b_error; 2033 else 2034 error = EIO; 2035 } 2036 2037 /* 2038 * If EOF is encountered we must zero-extend the result in order 2039 * to ensure that the page does not contain garabge. When no 2040 * error occurs, an early EOF is indicated if b_bcount got truncated. 2041 * b_resid is relative to b_bcount and should be 0, but some devices 2042 * might indicate an EOF with b_resid instead of truncating b_bcount. 2043 */ 2044 nread = bp->b_bcount - bp->b_resid; 2045 if (nread < ap->a_count) 2046 bzero((caddr_t)kva + nread, ap->a_count - nread); 2047 pmap_qremove(kva, pcount); 2048 2049 gotreqpage = 0; 2050 for (i = 0, toff = 0; i < pcount; i++, toff = nextoff) { 2051 nextoff = toff + PAGE_SIZE; 2052 m = ap->a_m[i]; 2053 2054 /* 2055 * NOTE: vm_page_undirty/clear_dirty etc do not clear the 2056 * pmap modified bit. pmap modified bit should have 2057 * already been cleared. 2058 */ 2059 if (nextoff <= nread) { 2060 m->valid = VM_PAGE_BITS_ALL; 2061 vm_page_undirty(m); 2062 } else if (toff < nread) { 2063 /* 2064 * Since this is a VM request, we have to supply the 2065 * unaligned offset to allow vm_page_set_valid() 2066 * to zero sub-DEV_BSIZE'd portions of the page. 2067 */ 2068 vm_page_set_valid(m, 0, nread - toff); 2069 vm_page_clear_dirty_end_nonincl(m, 0, nread - toff); 2070 } else { 2071 m->valid = 0; 2072 vm_page_undirty(m); 2073 } 2074 2075 if (i != ap->a_reqpage) { 2076 /* 2077 * Just in case someone was asking for this page we 2078 * now tell them that it is ok to use. 2079 */ 2080 if (!error || (m->valid == VM_PAGE_BITS_ALL)) { 2081 if (m->valid) { 2082 if (m->flags & PG_REFERENCED) { 2083 vm_page_activate(m); 2084 } else { 2085 vm_page_deactivate(m); 2086 } 2087 vm_page_wakeup(m); 2088 } else { 2089 vm_page_free(m); 2090 } 2091 } else { 2092 vm_page_free(m); 2093 } 2094 } else if (m->valid) { 2095 gotreqpage = 1; 2096 /* 2097 * Since this is a VM request, we need to make the 2098 * entire page presentable by zeroing invalid sections. 2099 */ 2100 if (m->valid != VM_PAGE_BITS_ALL) 2101 vm_page_zero_invalid(m, FALSE); 2102 } 2103 } 2104 if (!gotreqpage) { 2105 m = ap->a_m[ap->a_reqpage]; 2106 devfs_debug(DEVFS_DEBUG_WARNING, 2107 "spec_getpages:(%s) I/O read failure: (error=%d) bp %p vp %p\n", 2108 devtoname(vp->v_rdev), error, bp, bp->b_vp); 2109 devfs_debug(DEVFS_DEBUG_WARNING, 2110 " size: %d, resid: %d, a_count: %d, valid: 0x%x\n", 2111 size, bp->b_resid, ap->a_count, m->valid); 2112 devfs_debug(DEVFS_DEBUG_WARNING, 2113 " nread: %d, reqpage: %d, pindex: %lu, pcount: %d\n", 2114 nread, ap->a_reqpage, (u_long)m->pindex, pcount); 2115 /* 2116 * Free the buffer header back to the swap buffer pool. 2117 */ 2118 relpbuf(bp, NULL); 2119 return VM_PAGER_ERROR; 2120 } 2121 /* 2122 * Free the buffer header back to the swap buffer pool. 2123 */ 2124 relpbuf(bp, NULL); 2125 if (DEVFS_NODE(ap->a_vp)) 2126 nanotime(&DEVFS_NODE(ap->a_vp)->mtime); 2127 return VM_PAGER_OK; 2128 } 2129 2130 static __inline 2131 int 2132 sequential_heuristic(struct uio *uio, struct file *fp) 2133 { 2134 /* 2135 * Sequential heuristic - detect sequential operation 2136 */ 2137 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 2138 uio->uio_offset == fp->f_nextoff) { 2139 /* 2140 * XXX we assume that the filesystem block size is 2141 * the default. Not true, but still gives us a pretty 2142 * good indicator of how sequential the read operations 2143 * are. 2144 */ 2145 int tmpseq = fp->f_seqcount; 2146 2147 tmpseq += (uio->uio_resid + MAXBSIZE - 1) / MAXBSIZE; 2148 if (tmpseq > IO_SEQMAX) 2149 tmpseq = IO_SEQMAX; 2150 fp->f_seqcount = tmpseq; 2151 return(fp->f_seqcount << IO_SEQSHIFT); 2152 } 2153 2154 /* 2155 * Not sequential, quick draw-down of seqcount 2156 */ 2157 if (fp->f_seqcount > 1) 2158 fp->f_seqcount = 1; 2159 else 2160 fp->f_seqcount = 0; 2161 return(0); 2162 } 2163