1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 /* 37 * Kernel Filesystem interface 38 * 39 * NOTE! local ipdata pointers must be reloaded on any modifying operation 40 * to the inode as its underlying chain may have changed. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/fcntl.h> 47 #include <sys/buf.h> 48 #include <sys/proc.h> 49 #include <sys/namei.h> 50 #include <sys/mount.h> 51 #include <sys/vnode.h> 52 #include <sys/mountctl.h> 53 #include <sys/dirent.h> 54 #include <sys/uio.h> 55 #include <sys/objcache.h> 56 #include <sys/event.h> 57 #include <sys/file.h> 58 #include <vfs/fifofs/fifo.h> 59 60 #include "hammer2.h" 61 62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, 63 int seqcount); 64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 65 int ioflag, int seqcount); 66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize); 67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize); 68 69 struct objcache *cache_xops; 70 71 static __inline 72 void 73 hammer2_knote(struct vnode *vp, int flags) 74 { 75 if (flags) 76 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 77 } 78 79 /* 80 * Last reference to a vnode is going away but it is still cached. 81 */ 82 static 83 int 84 hammer2_vop_inactive(struct vop_inactive_args *ap) 85 { 86 hammer2_inode_t *ip; 87 struct vnode *vp; 88 89 LOCKSTART; 90 vp = ap->a_vp; 91 ip = VTOI(vp); 92 93 /* 94 * Degenerate case 95 */ 96 if (ip == NULL) { 97 vrecycle(vp); 98 LOCKSTOP; 99 return (0); 100 } 101 102 /* 103 * Check for deleted inodes and recycle immediately on the last 104 * release. Be sure to destroy any left-over buffer cache buffers 105 * so we do not waste time trying to flush them. 106 * 107 * WARNING: nvtruncbuf() can only be safely called without the inode 108 * lock held due to the way our write thread works. 109 */ 110 if (ip->flags & HAMMER2_INODE_ISUNLINKED) { 111 hammer2_key_t lbase; 112 int nblksize; 113 114 /* 115 * Detect updates to the embedded data which may be 116 * synchronized by the strategy code. Simply mark the 117 * inode modified so it gets picked up by our normal flush. 118 */ 119 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL); 120 nvtruncbuf(vp, 0, nblksize, 0, 0); 121 vrecycle(vp); 122 } 123 LOCKSTOP; 124 return (0); 125 } 126 127 /* 128 * Reclaim a vnode so that it can be reused; after the inode is 129 * disassociated, the filesystem must manage it alone. 130 */ 131 static 132 int 133 hammer2_vop_reclaim(struct vop_reclaim_args *ap) 134 { 135 hammer2_inode_t *ip; 136 hammer2_pfs_t *pmp; 137 struct vnode *vp; 138 139 LOCKSTART; 140 vp = ap->a_vp; 141 ip = VTOI(vp); 142 if (ip == NULL) { 143 LOCKSTOP; 144 return(0); 145 } 146 pmp = ip->pmp; 147 148 /* 149 * The final close of a deleted file or directory marks it for 150 * destruction. The DELETED flag allows the flusher to shortcut 151 * any modified blocks still unflushed (that is, just ignore them). 152 * 153 * HAMMER2 usually does not try to optimize the freemap by returning 154 * deleted blocks to it as it does not usually know how many snapshots 155 * might be referencing portions of the file/dir. 156 */ 157 vp->v_data = NULL; 158 ip->vp = NULL; 159 160 /* 161 * NOTE! We do not attempt to flush chains here, flushing is 162 * really fragile and could also deadlock. 163 */ 164 vclrisdirty(vp); 165 166 /* 167 * An unlinked inode may have been relinked to the ihidden directory. 168 * This occurs if the inode was unlinked while open. Reclamation of 169 * these inodes requires processing we cannot safely do here so add 170 * the inode to the unlinkq in that situation. 171 * 172 * A reclaim can occur at any time so we cannot safely start a 173 * transaction to handle reclamation of unlinked files. Instead, 174 * the ip is left with a reference and placed on a linked list and 175 * handled later on. 176 */ 177 if ((ip->flags & HAMMER2_INODE_ISUNLINKED) && 178 (ip->flags & HAMMER2_INODE_ISDELETED) == 0) { 179 hammer2_inode_unlink_t *ipul; 180 181 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO); 182 ipul->ip = ip; 183 184 hammer2_spin_ex(&pmp->list_spin); 185 TAILQ_INSERT_TAIL(&pmp->unlinkq, ipul, entry); 186 hammer2_spin_unex(&pmp->list_spin); 187 /* retain ref from vp for ipul */ 188 } else { 189 hammer2_inode_drop(ip); /* vp ref */ 190 } 191 192 /* 193 * XXX handle background sync when ip dirty, kernel will no longer 194 * notify us regarding this inode because there is no longer a 195 * vnode attached to it. 196 */ 197 198 LOCKSTOP; 199 return (0); 200 } 201 202 static 203 int 204 hammer2_vop_fsync(struct vop_fsync_args *ap) 205 { 206 hammer2_inode_t *ip; 207 struct vnode *vp; 208 209 LOCKSTART; 210 vp = ap->a_vp; 211 ip = VTOI(vp); 212 213 #if 0 214 /* XXX can't do this yet */ 215 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_ISFLUSH); 216 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 217 #endif 218 hammer2_trans_init(ip->pmp, 0); 219 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 220 221 /* 222 * Calling chain_flush here creates a lot of duplicative 223 * COW operations due to non-optimal vnode ordering. 224 * 225 * Only do it for an actual fsync() syscall. The other forms 226 * which call this function will eventually call chain_flush 227 * on the volume root as a catch-all, which is far more optimal. 228 */ 229 hammer2_inode_lock(ip, 0); 230 if (ip->flags & HAMMER2_INODE_MODIFIED) 231 hammer2_inode_chain_sync(ip); 232 hammer2_inode_unlock(ip); 233 hammer2_trans_done(ip->pmp); 234 235 LOCKSTOP; 236 return (0); 237 } 238 239 static 240 int 241 hammer2_vop_access(struct vop_access_args *ap) 242 { 243 hammer2_inode_t *ip = VTOI(ap->a_vp); 244 uid_t uid; 245 gid_t gid; 246 int error; 247 248 LOCKSTART; 249 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 250 uid = hammer2_to_unix_xid(&ip->meta.uid); 251 gid = hammer2_to_unix_xid(&ip->meta.gid); 252 error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags); 253 hammer2_inode_unlock(ip); 254 255 LOCKSTOP; 256 return (error); 257 } 258 259 static 260 int 261 hammer2_vop_getattr(struct vop_getattr_args *ap) 262 { 263 hammer2_pfs_t *pmp; 264 hammer2_inode_t *ip; 265 struct vnode *vp; 266 struct vattr *vap; 267 hammer2_chain_t *chain; 268 int i; 269 270 LOCKSTART; 271 vp = ap->a_vp; 272 vap = ap->a_vap; 273 274 ip = VTOI(vp); 275 pmp = ip->pmp; 276 277 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 278 279 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 280 vap->va_fileid = ip->meta.inum; 281 vap->va_mode = ip->meta.mode; 282 vap->va_nlink = ip->meta.nlinks; 283 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 284 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 285 vap->va_rmajor = 0; 286 vap->va_rminor = 0; 287 vap->va_size = ip->meta.size; /* protected by shared lock */ 288 vap->va_blocksize = HAMMER2_PBUFSIZE; 289 vap->va_flags = ip->meta.uflags; 290 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime); 291 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime); 292 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime); 293 vap->va_gen = 1; 294 vap->va_bytes = 0; 295 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) { 296 /* 297 * Can't really calculate directory use sans the files under 298 * it, just assume one block for now. 299 */ 300 vap->va_bytes += HAMMER2_INODE_BYTES; 301 } else { 302 for (i = 0; i < ip->cluster.nchains; ++i) { 303 if ((chain = ip->cluster.array[i].chain) != NULL) { 304 if (vap->va_bytes < chain->bref.data_count) 305 vap->va_bytes = chain->bref.data_count; 306 } 307 } 308 } 309 vap->va_type = hammer2_get_vtype(ip->meta.type); 310 vap->va_filerev = 0; 311 vap->va_uid_uuid = ip->meta.uid; 312 vap->va_gid_uuid = ip->meta.gid; 313 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 314 VA_FSID_UUID_VALID; 315 316 hammer2_inode_unlock(ip); 317 318 LOCKSTOP; 319 return (0); 320 } 321 322 static 323 int 324 hammer2_vop_setattr(struct vop_setattr_args *ap) 325 { 326 hammer2_inode_t *ip; 327 struct vnode *vp; 328 struct vattr *vap; 329 int error; 330 int kflags = 0; 331 uint64_t ctime; 332 333 LOCKSTART; 334 vp = ap->a_vp; 335 vap = ap->a_vap; 336 hammer2_update_time(&ctime); 337 338 ip = VTOI(vp); 339 340 if (ip->pmp->ronly) { 341 LOCKSTOP; 342 return(EROFS); 343 } 344 345 hammer2_pfs_memory_wait(ip->pmp); 346 hammer2_trans_init(ip->pmp, 0); 347 hammer2_inode_lock(ip, 0); 348 error = 0; 349 350 if (vap->va_flags != VNOVAL) { 351 u_int32_t flags; 352 353 flags = ip->meta.uflags; 354 error = vop_helper_setattr_flags(&flags, vap->va_flags, 355 hammer2_to_unix_xid(&ip->meta.uid), 356 ap->a_cred); 357 if (error == 0) { 358 if (ip->meta.uflags != flags) { 359 hammer2_inode_modify(ip); 360 ip->meta.uflags = flags; 361 ip->meta.ctime = ctime; 362 kflags |= NOTE_ATTRIB; 363 } 364 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 365 error = 0; 366 goto done; 367 } 368 } 369 goto done; 370 } 371 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 372 error = EPERM; 373 goto done; 374 } 375 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { 376 mode_t cur_mode = ip->meta.mode; 377 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 378 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 379 uuid_t uuid_uid; 380 uuid_t uuid_gid; 381 382 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 383 ap->a_cred, 384 &cur_uid, &cur_gid, &cur_mode); 385 if (error == 0) { 386 hammer2_guid_to_uuid(&uuid_uid, cur_uid); 387 hammer2_guid_to_uuid(&uuid_gid, cur_gid); 388 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) || 389 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) || 390 ip->meta.mode != cur_mode 391 ) { 392 hammer2_inode_modify(ip); 393 ip->meta.uid = uuid_uid; 394 ip->meta.gid = uuid_gid; 395 ip->meta.mode = cur_mode; 396 ip->meta.ctime = ctime; 397 } 398 kflags |= NOTE_ATTRIB; 399 } 400 } 401 402 /* 403 * Resize the file 404 */ 405 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) { 406 switch(vp->v_type) { 407 case VREG: 408 if (vap->va_size == ip->meta.size) 409 break; 410 if (vap->va_size < ip->meta.size) { 411 hammer2_truncate_file(ip, vap->va_size); 412 } else { 413 hammer2_extend_file(ip, vap->va_size); 414 } 415 hammer2_inode_modify(ip); 416 ip->meta.mtime = ctime; 417 break; 418 default: 419 error = EINVAL; 420 goto done; 421 } 422 } 423 #if 0 424 /* atime not supported */ 425 if (vap->va_atime.tv_sec != VNOVAL) { 426 hammer2_inode_modify(ip); 427 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime); 428 kflags |= NOTE_ATTRIB; 429 } 430 #endif 431 if (vap->va_mode != (mode_t)VNOVAL) { 432 mode_t cur_mode = ip->meta.mode; 433 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 434 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 435 436 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 437 cur_uid, cur_gid, &cur_mode); 438 if (error == 0 && ip->meta.mode != cur_mode) { 439 hammer2_inode_modify(ip); 440 ip->meta.mode = cur_mode; 441 ip->meta.ctime = ctime; 442 kflags |= NOTE_ATTRIB; 443 } 444 } 445 446 if (vap->va_mtime.tv_sec != VNOVAL) { 447 hammer2_inode_modify(ip); 448 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime); 449 kflags |= NOTE_ATTRIB; 450 } 451 452 done: 453 /* 454 * If a truncation occurred we must call inode_fsync() now in order 455 * to trim the related data chains, otherwise a later expansion can 456 * cause havoc. 457 * 458 * If an extend occured that changed the DIRECTDATA state, we must 459 * call inode_fsync now in order to prepare the inode's indirect 460 * block table. 461 */ 462 if (ip->flags & HAMMER2_INODE_RESIZED) 463 hammer2_inode_chain_sync(ip); 464 465 /* 466 * Cleanup. 467 */ 468 hammer2_inode_unlock(ip); 469 hammer2_trans_done(ip->pmp); 470 hammer2_knote(ip->vp, kflags); 471 472 LOCKSTOP; 473 return (error); 474 } 475 476 static 477 int 478 hammer2_vop_readdir(struct vop_readdir_args *ap) 479 { 480 hammer2_xop_readdir_t *xop; 481 hammer2_blockref_t bref; 482 hammer2_inode_t *ip; 483 hammer2_tid_t inum; 484 hammer2_key_t lkey; 485 struct uio *uio; 486 off_t *cookies; 487 off_t saveoff; 488 int cookie_index; 489 int ncookies; 490 int error; 491 int eofflag; 492 int dtype; 493 int r; 494 495 LOCKSTART; 496 ip = VTOI(ap->a_vp); 497 uio = ap->a_uio; 498 saveoff = uio->uio_offset; 499 eofflag = 0; 500 error = 0; 501 502 /* 503 * Setup cookies directory entry cookies if requested 504 */ 505 if (ap->a_ncookies) { 506 ncookies = uio->uio_resid / 16 + 1; 507 if (ncookies > 1024) 508 ncookies = 1024; 509 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK); 510 } else { 511 ncookies = -1; 512 cookies = NULL; 513 } 514 cookie_index = 0; 515 516 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 517 518 /* 519 * Handle artificial entries. To ensure that only positive 64 bit 520 * quantities are returned to userland we always strip off bit 63. 521 * The hash code is designed such that codes 0x0000-0x7FFF are not 522 * used, allowing us to use these codes for articial entries. 523 * 524 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not 525 * allow '..' to cross the mount point into (e.g.) the super-root. 526 */ 527 if (saveoff == 0) { 528 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 529 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, "."); 530 if (r) 531 goto done; 532 if (cookies) 533 cookies[cookie_index] = saveoff; 534 ++saveoff; 535 ++cookie_index; 536 if (cookie_index == ncookies) 537 goto done; 538 } 539 540 if (saveoff == 1) { 541 /* 542 * Be careful with lockorder when accessing ".." 543 * 544 * (ip is the current dir. xip is the parent dir). 545 */ 546 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 547 if (ip->pip && ip != ip->pmp->iroot) 548 inum = ip->pip->meta.inum & HAMMER2_DIRHASH_USERMSK; 549 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, ".."); 550 if (r) 551 goto done; 552 if (cookies) 553 cookies[cookie_index] = saveoff; 554 ++saveoff; 555 ++cookie_index; 556 if (cookie_index == ncookies) 557 goto done; 558 } 559 560 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE; 561 if (hammer2_debug & 0x0020) 562 kprintf("readdir: lkey %016jx\n", lkey); 563 if (error) 564 goto done; 565 566 /* 567 * Use XOP for cluster scan. 568 * 569 * parent is the inode cluster, already locked for us. Don't 570 * double lock shared locks as this will screw up upgrades. 571 */ 572 xop = hammer2_xop_alloc(ip, 0); 573 xop->lkey = lkey; 574 hammer2_xop_start(&xop->head, hammer2_xop_readdir); 575 576 for (;;) { 577 const hammer2_inode_data_t *ripdata; 578 579 error = hammer2_xop_collect(&xop->head, 0); 580 if (error) 581 break; 582 if (cookie_index == ncookies) 583 break; 584 if (hammer2_debug & 0x0020) 585 kprintf("cluster chain %p %p\n", 586 xop->head.cluster.focus, 587 (xop->head.cluster.focus ? 588 xop->head.cluster.focus->data : (void *)-1)); 589 ripdata = &hammer2_cluster_rdata(&xop->head.cluster)->ipdata; 590 hammer2_cluster_bref(&xop->head.cluster, &bref); 591 if (bref.type == HAMMER2_BREF_TYPE_INODE) { 592 dtype = hammer2_get_dtype(ripdata); 593 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 594 r = vop_write_dirent(&error, uio, 595 ripdata->meta.inum & 596 HAMMER2_DIRHASH_USERMSK, 597 dtype, 598 ripdata->meta.name_len, 599 ripdata->filename); 600 if (r) 601 break; 602 if (cookies) 603 cookies[cookie_index] = saveoff; 604 ++cookie_index; 605 } else { 606 /* XXX chain error */ 607 kprintf("bad chain type readdir %d\n", bref.type); 608 } 609 } 610 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 611 if (error == ENOENT) { 612 error = 0; 613 eofflag = 1; 614 saveoff = (hammer2_key_t)-1; 615 } else { 616 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 617 } 618 done: 619 hammer2_inode_unlock(ip); 620 if (ap->a_eofflag) 621 *ap->a_eofflag = eofflag; 622 if (hammer2_debug & 0x0020) 623 kprintf("readdir: done at %016jx\n", saveoff); 624 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE; 625 if (error && cookie_index == 0) { 626 if (cookies) { 627 kfree(cookies, M_TEMP); 628 *ap->a_ncookies = 0; 629 *ap->a_cookies = NULL; 630 } 631 } else { 632 if (cookies) { 633 *ap->a_ncookies = cookie_index; 634 *ap->a_cookies = cookies; 635 } 636 } 637 LOCKSTOP; 638 return (error); 639 } 640 641 /* 642 * hammer2_vop_readlink { vp, uio, cred } 643 */ 644 static 645 int 646 hammer2_vop_readlink(struct vop_readlink_args *ap) 647 { 648 struct vnode *vp; 649 hammer2_inode_t *ip; 650 int error; 651 652 vp = ap->a_vp; 653 if (vp->v_type != VLNK) 654 return (EINVAL); 655 ip = VTOI(vp); 656 657 error = hammer2_read_file(ip, ap->a_uio, 0); 658 return (error); 659 } 660 661 static 662 int 663 hammer2_vop_read(struct vop_read_args *ap) 664 { 665 struct vnode *vp; 666 hammer2_inode_t *ip; 667 struct uio *uio; 668 int error; 669 int seqcount; 670 int bigread; 671 672 /* 673 * Read operations supported on this vnode? 674 */ 675 vp = ap->a_vp; 676 if (vp->v_type != VREG) 677 return (EINVAL); 678 679 /* 680 * Misc 681 */ 682 ip = VTOI(vp); 683 uio = ap->a_uio; 684 error = 0; 685 686 seqcount = ap->a_ioflag >> 16; 687 bigread = (uio->uio_resid > 100 * 1024 * 1024); 688 689 error = hammer2_read_file(ip, uio, seqcount); 690 return (error); 691 } 692 693 static 694 int 695 hammer2_vop_write(struct vop_write_args *ap) 696 { 697 hammer2_inode_t *ip; 698 thread_t td; 699 struct vnode *vp; 700 struct uio *uio; 701 int error; 702 int seqcount; 703 704 /* 705 * Read operations supported on this vnode? 706 */ 707 vp = ap->a_vp; 708 if (vp->v_type != VREG) 709 return (EINVAL); 710 711 /* 712 * Misc 713 */ 714 ip = VTOI(vp); 715 uio = ap->a_uio; 716 error = 0; 717 if (ip->pmp->ronly) { 718 return (EROFS); 719 } 720 721 seqcount = ap->a_ioflag >> 16; 722 723 /* 724 * Check resource limit 725 */ 726 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc && 727 uio->uio_offset + uio->uio_resid > 728 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 729 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ); 730 return (EFBIG); 731 } 732 733 /* 734 * The transaction interlocks against flushes initiations 735 * (note: but will run concurrently with the actual flush). 736 */ 737 hammer2_trans_init(ip->pmp, 0); 738 error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount); 739 hammer2_trans_done(ip->pmp); 740 741 return (error); 742 } 743 744 /* 745 * Perform read operations on a file or symlink given an UNLOCKED 746 * inode and uio. 747 * 748 * The passed ip is not locked. 749 */ 750 static 751 int 752 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount) 753 { 754 hammer2_off_t size; 755 struct buf *bp; 756 int error; 757 758 error = 0; 759 760 /* 761 * UIO read loop. 762 * 763 * WARNING! Assumes that the kernel interlocks size changes at the 764 * vnode level. 765 */ 766 hammer2_mtx_sh(&ip->lock); 767 size = ip->meta.size; 768 hammer2_mtx_unlock(&ip->lock); 769 770 while (uio->uio_resid > 0 && uio->uio_offset < size) { 771 hammer2_key_t lbase; 772 hammer2_key_t leof; 773 int lblksize; 774 int loff; 775 int n; 776 777 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 778 &lbase, &leof); 779 780 error = cluster_read(ip->vp, leof, lbase, lblksize, 781 uio->uio_resid, seqcount * BKVASIZE, 782 &bp); 783 784 if (error) 785 break; 786 loff = (int)(uio->uio_offset - lbase); 787 n = lblksize - loff; 788 if (n > uio->uio_resid) 789 n = uio->uio_resid; 790 if (n > size - uio->uio_offset) 791 n = (int)(size - uio->uio_offset); 792 bp->b_flags |= B_AGE; 793 uiomove((char *)bp->b_data + loff, n, uio); 794 bqrelse(bp); 795 } 796 return (error); 797 } 798 799 /* 800 * Write to the file represented by the inode via the logical buffer cache. 801 * The inode may represent a regular file or a symlink. 802 * 803 * The inode must not be locked. 804 */ 805 static 806 int 807 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 808 int ioflag, int seqcount) 809 { 810 hammer2_key_t old_eof; 811 hammer2_key_t new_eof; 812 struct buf *bp; 813 int kflags; 814 int error; 815 int modified; 816 817 /* 818 * Setup if append 819 * 820 * WARNING! Assumes that the kernel interlocks size changes at the 821 * vnode level. 822 */ 823 hammer2_mtx_ex(&ip->lock); 824 if (ioflag & IO_APPEND) 825 uio->uio_offset = ip->meta.size; 826 old_eof = ip->meta.size; 827 828 /* 829 * Extend the file if necessary. If the write fails at some point 830 * we will truncate it back down to cover as much as we were able 831 * to write. 832 * 833 * Doing this now makes it easier to calculate buffer sizes in 834 * the loop. 835 */ 836 kflags = 0; 837 error = 0; 838 modified = 0; 839 840 if (uio->uio_offset + uio->uio_resid > old_eof) { 841 new_eof = uio->uio_offset + uio->uio_resid; 842 modified = 1; 843 hammer2_extend_file(ip, new_eof); 844 kflags |= NOTE_EXTEND; 845 } else { 846 new_eof = old_eof; 847 } 848 hammer2_mtx_unlock(&ip->lock); 849 850 /* 851 * UIO write loop 852 */ 853 while (uio->uio_resid > 0) { 854 hammer2_key_t lbase; 855 int trivial; 856 int endofblk; 857 int lblksize; 858 int loff; 859 int n; 860 861 /* 862 * Don't allow the buffer build to blow out the buffer 863 * cache. 864 */ 865 if ((ioflag & IO_RECURSE) == 0) 866 bwillwrite(HAMMER2_PBUFSIZE); 867 868 /* 869 * This nominally tells us how much we can cluster and 870 * what the logical buffer size needs to be. Currently 871 * we don't try to cluster the write and just handle one 872 * block at a time. 873 */ 874 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 875 &lbase, NULL); 876 loff = (int)(uio->uio_offset - lbase); 877 878 KKASSERT(lblksize <= 65536); 879 880 /* 881 * Calculate bytes to copy this transfer and whether the 882 * copy completely covers the buffer or not. 883 */ 884 trivial = 0; 885 n = lblksize - loff; 886 if (n > uio->uio_resid) { 887 n = uio->uio_resid; 888 if (loff == lbase && uio->uio_offset + n == new_eof) 889 trivial = 1; 890 endofblk = 0; 891 } else { 892 if (loff == 0) 893 trivial = 1; 894 endofblk = 1; 895 } 896 897 /* 898 * Get the buffer 899 */ 900 if (uio->uio_segflg == UIO_NOCOPY) { 901 /* 902 * Issuing a write with the same data backing the 903 * buffer. Instantiate the buffer to collect the 904 * backing vm pages, then read-in any missing bits. 905 * 906 * This case is used by vop_stdputpages(). 907 */ 908 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0); 909 if ((bp->b_flags & B_CACHE) == 0) { 910 bqrelse(bp); 911 error = bread(ip->vp, lbase, lblksize, &bp); 912 } 913 } else if (trivial) { 914 /* 915 * Even though we are entirely overwriting the buffer 916 * we may still have to zero it out to avoid a 917 * mmap/write visibility issue. 918 */ 919 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0); 920 if ((bp->b_flags & B_CACHE) == 0) 921 vfs_bio_clrbuf(bp); 922 } else { 923 /* 924 * Partial overwrite, read in any missing bits then 925 * replace the portion being written. 926 * 927 * (The strategy code will detect zero-fill physical 928 * blocks for this case). 929 */ 930 error = bread(ip->vp, lbase, lblksize, &bp); 931 if (error == 0) 932 bheavy(bp); 933 } 934 935 if (error) { 936 brelse(bp); 937 break; 938 } 939 940 /* 941 * Ok, copy the data in 942 */ 943 error = uiomove(bp->b_data + loff, n, uio); 944 kflags |= NOTE_WRITE; 945 modified = 1; 946 if (error) { 947 brelse(bp); 948 break; 949 } 950 951 /* 952 * WARNING: Pageout daemon will issue UIO_NOCOPY writes 953 * with IO_SYNC or IO_ASYNC set. These writes 954 * must be handled as the pageout daemon expects. 955 */ 956 if (ioflag & IO_SYNC) { 957 bwrite(bp); 958 } else if ((ioflag & IO_DIRECT) && endofblk) { 959 bawrite(bp); 960 } else if (ioflag & IO_ASYNC) { 961 bawrite(bp); 962 } else { 963 bdwrite(bp); 964 } 965 } 966 967 /* 968 * Cleanup. If we extended the file EOF but failed to write through 969 * the entire write is a failure and we have to back-up. 970 */ 971 if (error && new_eof != old_eof) { 972 hammer2_mtx_ex(&ip->lock); 973 hammer2_truncate_file(ip, old_eof); 974 if (ip->flags & HAMMER2_INODE_MODIFIED) 975 hammer2_inode_chain_sync(ip); 976 hammer2_mtx_unlock(&ip->lock); 977 } else if (modified) { 978 hammer2_mtx_ex(&ip->lock); 979 hammer2_inode_modify(ip); 980 hammer2_update_time(&ip->meta.mtime); 981 if (ip->flags & HAMMER2_INODE_MODIFIED) 982 hammer2_inode_chain_sync(ip); 983 hammer2_mtx_unlock(&ip->lock); 984 hammer2_knote(ip->vp, kflags); 985 } 986 hammer2_trans_assert_strategy(ip->pmp); 987 988 return error; 989 } 990 991 /* 992 * Truncate the size of a file. The inode must not be locked. 993 * 994 * We must unconditionally set HAMMER2_INODE_RESIZED to properly 995 * ensure that any on-media data beyond the new file EOF has been destroyed. 996 * 997 * WARNING: nvtruncbuf() can only be safely called without the inode lock 998 * held due to the way our write thread works. If the truncation 999 * occurs in the middle of a buffer, nvtruncbuf() is responsible 1000 * for dirtying that buffer and zeroing out trailing bytes. 1001 * 1002 * WARNING! Assumes that the kernel interlocks size changes at the 1003 * vnode level. 1004 * 1005 * WARNING! Caller assumes responsibility for removing dead blocks 1006 * if INODE_RESIZED is set. 1007 */ 1008 static 1009 void 1010 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1011 { 1012 hammer2_key_t lbase; 1013 int nblksize; 1014 1015 LOCKSTART; 1016 hammer2_mtx_unlock(&ip->lock); 1017 if (ip->vp) { 1018 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1019 nvtruncbuf(ip->vp, nsize, 1020 nblksize, (int)nsize & (nblksize - 1), 1021 0); 1022 } 1023 hammer2_mtx_ex(&ip->lock); 1024 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1025 ip->osize = ip->meta.size; 1026 ip->meta.size = nsize; 1027 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED | 1028 HAMMER2_INODE_RESIZED); 1029 LOCKSTOP; 1030 } 1031 1032 /* 1033 * Extend the size of a file. The inode must not be locked. 1034 * 1035 * Even though the file size is changing, we do not have to set the 1036 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES 1037 * boundary. When this occurs a hammer2_inode_chain_sync() is required 1038 * to prepare the inode cluster's indirect block table, otherwise 1039 * async execution of the strategy code will implode on us. 1040 * 1041 * WARNING! Assumes that the kernel interlocks size changes at the 1042 * vnode level. 1043 * 1044 * WARNING! Caller assumes responsibility for transitioning out 1045 * of the inode DIRECTDATA mode if INODE_RESIZED is set. 1046 */ 1047 static 1048 void 1049 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1050 { 1051 hammer2_key_t lbase; 1052 hammer2_key_t osize; 1053 int oblksize; 1054 int nblksize; 1055 1056 LOCKSTART; 1057 1058 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1059 osize = ip->meta.size; 1060 ip->osize = osize; 1061 ip->meta.size = nsize; 1062 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED); 1063 1064 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) { 1065 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1066 hammer2_inode_chain_sync(ip); 1067 } 1068 1069 hammer2_mtx_unlock(&ip->lock); 1070 if (ip->vp) { 1071 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL); 1072 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1073 nvextendbuf(ip->vp, 1074 osize, nsize, 1075 oblksize, nblksize, 1076 -1, -1, 0); 1077 } 1078 hammer2_mtx_ex(&ip->lock); 1079 1080 LOCKSTOP; 1081 } 1082 1083 static 1084 int 1085 hammer2_vop_nresolve(struct vop_nresolve_args *ap) 1086 { 1087 hammer2_xop_nresolve_t *xop; 1088 hammer2_inode_t *ip; 1089 hammer2_inode_t *dip; 1090 struct namecache *ncp; 1091 struct vnode *vp; 1092 int error; 1093 1094 LOCKSTART; 1095 dip = VTOI(ap->a_dvp); 1096 xop = hammer2_xop_alloc(dip, 0); 1097 1098 ncp = ap->a_nch->ncp; 1099 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1100 1101 /* 1102 * Note: In DragonFly the kernel handles '.' and '..'. 1103 */ 1104 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1105 hammer2_xop_start(&xop->head, hammer2_xop_nresolve); 1106 1107 error = hammer2_xop_collect(&xop->head, 0); 1108 if (error) { 1109 ip = NULL; 1110 } else { 1111 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 1112 } 1113 hammer2_inode_unlock(dip); 1114 1115 /* 1116 * Acquire the related vnode 1117 * 1118 * NOTE: For error processing, only ENOENT resolves the namecache 1119 * entry to NULL, otherwise we just return the error and 1120 * leave the namecache unresolved. 1121 * 1122 * NOTE: multiple hammer2_inode structures can be aliased to the 1123 * same chain element, for example for hardlinks. This 1124 * use case does not 'reattach' inode associations that 1125 * might already exist, but always allocates a new one. 1126 * 1127 * WARNING: inode structure is locked exclusively via inode_get 1128 * but chain was locked shared. inode_unlock() 1129 * will handle it properly. 1130 */ 1131 if (ip) { 1132 vp = hammer2_igetv(ip, &error); 1133 if (error == 0) { 1134 vn_unlock(vp); 1135 cache_setvp(ap->a_nch, vp); 1136 } else if (error == ENOENT) { 1137 cache_setvp(ap->a_nch, NULL); 1138 } 1139 hammer2_inode_unlock(ip); 1140 1141 /* 1142 * The vp should not be released until after we've disposed 1143 * of our locks, because it might cause vop_inactive() to 1144 * be called. 1145 */ 1146 if (vp) 1147 vrele(vp); 1148 } else { 1149 error = ENOENT; 1150 cache_setvp(ap->a_nch, NULL); 1151 } 1152 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1153 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL, 1154 ("resolve error %d/%p ap %p\n", 1155 error, ap->a_nch->ncp->nc_vp, ap)); 1156 LOCKSTOP; 1157 1158 return error; 1159 } 1160 1161 static 1162 int 1163 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 1164 { 1165 hammer2_inode_t *dip; 1166 hammer2_inode_t *ip; 1167 int error; 1168 1169 LOCKSTART; 1170 dip = VTOI(ap->a_dvp); 1171 1172 if ((ip = dip->pip) == NULL) { 1173 *ap->a_vpp = NULL; 1174 LOCKSTOP; 1175 return ENOENT; 1176 } 1177 hammer2_inode_lock(ip, 0); 1178 *ap->a_vpp = hammer2_igetv(ip, &error); 1179 hammer2_inode_unlock(ip); 1180 1181 LOCKSTOP; 1182 return error; 1183 } 1184 1185 static 1186 int 1187 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap) 1188 { 1189 hammer2_inode_t *dip; 1190 hammer2_inode_t *nip; 1191 struct namecache *ncp; 1192 const uint8_t *name; 1193 size_t name_len; 1194 int error; 1195 1196 LOCKSTART; 1197 dip = VTOI(ap->a_dvp); 1198 if (dip->pmp->ronly) { 1199 LOCKSTOP; 1200 return (EROFS); 1201 } 1202 1203 ncp = ap->a_nch->ncp; 1204 name = ncp->nc_name; 1205 name_len = ncp->nc_nlen; 1206 1207 hammer2_pfs_memory_wait(dip->pmp); 1208 hammer2_trans_init(dip->pmp, 0); 1209 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred, 1210 name, name_len, 0, 1211 hammer2_trans_newinum(dip->pmp), 0, 0, 1212 0, &error); 1213 if (error) { 1214 KKASSERT(nip == NULL); 1215 *ap->a_vpp = NULL; 1216 } else { 1217 *ap->a_vpp = hammer2_igetv(nip, &error); 1218 hammer2_inode_unlock(nip); 1219 } 1220 hammer2_trans_done(dip->pmp); 1221 1222 if (error == 0) { 1223 cache_setunresolved(ap->a_nch); 1224 cache_setvp(ap->a_nch, *ap->a_vpp); 1225 } 1226 LOCKSTOP; 1227 return error; 1228 } 1229 1230 static 1231 int 1232 hammer2_vop_open(struct vop_open_args *ap) 1233 { 1234 return vop_stdopen(ap); 1235 } 1236 1237 /* 1238 * hammer2_vop_advlock { vp, id, op, fl, flags } 1239 */ 1240 static 1241 int 1242 hammer2_vop_advlock(struct vop_advlock_args *ap) 1243 { 1244 hammer2_inode_t *ip = VTOI(ap->a_vp); 1245 hammer2_off_t size; 1246 1247 size = ip->meta.size; 1248 return (lf_advlock(ap, &ip->advlock, size)); 1249 } 1250 1251 static 1252 int 1253 hammer2_vop_close(struct vop_close_args *ap) 1254 { 1255 return vop_stdclose(ap); 1256 } 1257 1258 /* 1259 * hammer2_vop_nlink { nch, dvp, vp, cred } 1260 * 1261 * Create a hardlink from (vp) to {dvp, nch}. 1262 */ 1263 static 1264 int 1265 hammer2_vop_nlink(struct vop_nlink_args *ap) 1266 { 1267 hammer2_xop_nlink_t *xop1; 1268 hammer2_inode_t *fdip; /* target directory to create link in */ 1269 hammer2_inode_t *tdip; /* target directory to create link in */ 1270 hammer2_inode_t *cdip; /* common parent directory */ 1271 hammer2_inode_t *ip; /* inode we are hardlinking to */ 1272 struct namecache *ncp; 1273 const uint8_t *name; 1274 size_t name_len; 1275 int error; 1276 1277 LOCKSTART; 1278 tdip = VTOI(ap->a_dvp); 1279 if (tdip->pmp->ronly) { 1280 LOCKSTOP; 1281 return (EROFS); 1282 } 1283 1284 ncp = ap->a_nch->ncp; 1285 name = ncp->nc_name; 1286 name_len = ncp->nc_nlen; 1287 1288 /* 1289 * ip represents the file being hardlinked. The file could be a 1290 * normal file or a hardlink target if it has already been hardlinked. 1291 * If ip is a hardlinked target then ip->pip represents the location 1292 * of the hardlinked target, NOT the location of the hardlink pointer. 1293 * 1294 * Bump nlinks and potentially also create or move the hardlink 1295 * target in the parent directory common to (ip) and (tdip). The 1296 * consolidation code can modify ip->cluster and ip->pip. The 1297 * returned cluster is locked. 1298 */ 1299 ip = VTOI(ap->a_vp); 1300 hammer2_pfs_memory_wait(ip->pmp); 1301 hammer2_trans_init(ip->pmp, 0); 1302 1303 /* 1304 * The common parent directory must be locked first to avoid deadlocks. 1305 * Also note that fdip and/or tdip might match cdip. 1306 */ 1307 fdip = ip->pip; 1308 cdip = hammer2_inode_common_parent(fdip, tdip); 1309 hammer2_inode_lock(cdip, 0); 1310 hammer2_inode_lock(fdip, 0); 1311 hammer2_inode_lock(tdip, 0); 1312 hammer2_inode_lock(ip, 0); 1313 error = 0; 1314 1315 /* 1316 * If ip is not a hardlink target we must convert it to a hardlink. 1317 * If fdip != cdip we must shift the inode to cdip. 1318 */ 1319 if (fdip != cdip || (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 1320 xop1 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 1321 hammer2_xop_setip2(&xop1->head, ip); 1322 hammer2_xop_setip3(&xop1->head, cdip); 1323 1324 hammer2_xop_start(&xop1->head, hammer2_xop_nlink); 1325 error = hammer2_xop_collect(&xop1->head, 0); 1326 hammer2_xop_retire(&xop1->head, HAMMER2_XOPMASK_VOP); 1327 if (error == ENOENT) 1328 error = 0; 1329 } 1330 1331 /* 1332 * Must synchronize original inode whos chains are now a hardlink 1333 * target. We must match what the backend XOP did to the 1334 * chains. 1335 */ 1336 if (error == 0 && (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 1337 hammer2_inode_modify(ip); 1338 ip->meta.name_key = ip->meta.inum; 1339 ip->meta.name_len = 18; /* "0x%016jx" */ 1340 } 1341 1342 /* 1343 * Create the hardlink target and bump nlinks. 1344 */ 1345 if (error == 0) { 1346 hammer2_inode_create(tdip, NULL, NULL, 1347 name, name_len, 0, 1348 ip->meta.inum, 1349 HAMMER2_OBJTYPE_HARDLINK, ip->meta.type, 1350 0, &error); 1351 hammer2_inode_modify(ip); 1352 ++ip->meta.nlinks; 1353 } 1354 if (error == 0) { 1355 cache_setunresolved(ap->a_nch); 1356 cache_setvp(ap->a_nch, ap->a_vp); 1357 } 1358 hammer2_inode_unlock(ip); 1359 hammer2_inode_unlock(tdip); 1360 hammer2_inode_unlock(fdip); 1361 hammer2_inode_unlock(cdip); 1362 hammer2_inode_drop(cdip); 1363 hammer2_trans_done(ip->pmp); 1364 1365 LOCKSTOP; 1366 return error; 1367 } 1368 1369 /* 1370 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap } 1371 * 1372 * The operating system has already ensured that the directory entry 1373 * does not exist and done all appropriate namespace locking. 1374 */ 1375 static 1376 int 1377 hammer2_vop_ncreate(struct vop_ncreate_args *ap) 1378 { 1379 hammer2_inode_t *dip; 1380 hammer2_inode_t *nip; 1381 struct namecache *ncp; 1382 const uint8_t *name; 1383 size_t name_len; 1384 int error; 1385 1386 LOCKSTART; 1387 dip = VTOI(ap->a_dvp); 1388 if (dip->pmp->ronly) { 1389 LOCKSTOP; 1390 return (EROFS); 1391 } 1392 1393 ncp = ap->a_nch->ncp; 1394 name = ncp->nc_name; 1395 name_len = ncp->nc_nlen; 1396 hammer2_pfs_memory_wait(dip->pmp); 1397 hammer2_trans_init(dip->pmp, 0); 1398 1399 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred, 1400 name, name_len, 0, 1401 hammer2_trans_newinum(dip->pmp), 0, 0, 1402 0, &error); 1403 if (error) { 1404 KKASSERT(nip == NULL); 1405 *ap->a_vpp = NULL; 1406 } else { 1407 *ap->a_vpp = hammer2_igetv(nip, &error); 1408 hammer2_inode_unlock(nip); 1409 } 1410 hammer2_trans_done(dip->pmp); 1411 1412 if (error == 0) { 1413 cache_setunresolved(ap->a_nch); 1414 cache_setvp(ap->a_nch, *ap->a_vpp); 1415 } 1416 LOCKSTOP; 1417 return error; 1418 } 1419 1420 /* 1421 * Make a device node (typically a fifo) 1422 */ 1423 static 1424 int 1425 hammer2_vop_nmknod(struct vop_nmknod_args *ap) 1426 { 1427 hammer2_inode_t *dip; 1428 hammer2_inode_t *nip; 1429 struct namecache *ncp; 1430 const uint8_t *name; 1431 size_t name_len; 1432 int error; 1433 1434 LOCKSTART; 1435 dip = VTOI(ap->a_dvp); 1436 if (dip->pmp->ronly) { 1437 LOCKSTOP; 1438 return (EROFS); 1439 } 1440 1441 ncp = ap->a_nch->ncp; 1442 name = ncp->nc_name; 1443 name_len = ncp->nc_nlen; 1444 hammer2_pfs_memory_wait(dip->pmp); 1445 hammer2_trans_init(dip->pmp, 0); 1446 1447 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred, 1448 name, name_len, 0, 1449 hammer2_trans_newinum(dip->pmp), 0, 0, 1450 0, &error); 1451 if (error) { 1452 KKASSERT(nip == NULL); 1453 *ap->a_vpp = NULL; 1454 } else { 1455 *ap->a_vpp = hammer2_igetv(nip, &error); 1456 hammer2_inode_unlock(nip); 1457 } 1458 hammer2_trans_done(dip->pmp); 1459 1460 if (error == 0) { 1461 cache_setunresolved(ap->a_nch); 1462 cache_setvp(ap->a_nch, *ap->a_vpp); 1463 } 1464 LOCKSTOP; 1465 return error; 1466 } 1467 1468 /* 1469 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target } 1470 */ 1471 static 1472 int 1473 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap) 1474 { 1475 hammer2_inode_t *dip; 1476 hammer2_inode_t *nip; 1477 struct namecache *ncp; 1478 const uint8_t *name; 1479 size_t name_len; 1480 int error; 1481 1482 dip = VTOI(ap->a_dvp); 1483 if (dip->pmp->ronly) 1484 return (EROFS); 1485 1486 ncp = ap->a_nch->ncp; 1487 name = ncp->nc_name; 1488 name_len = ncp->nc_nlen; 1489 hammer2_pfs_memory_wait(dip->pmp); 1490 hammer2_trans_init(dip->pmp, 0); 1491 1492 ap->a_vap->va_type = VLNK; /* enforce type */ 1493 1494 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred, 1495 name, name_len, 0, 1496 hammer2_trans_newinum(dip->pmp), 0, 0, 1497 0, &error); 1498 if (error) { 1499 KKASSERT(nip == NULL); 1500 *ap->a_vpp = NULL; 1501 hammer2_trans_done(dip->pmp); 1502 return error; 1503 } 1504 *ap->a_vpp = hammer2_igetv(nip, &error); 1505 1506 /* 1507 * Build the softlink (~like file data) and finalize the namecache. 1508 */ 1509 if (error == 0) { 1510 size_t bytes; 1511 struct uio auio; 1512 struct iovec aiov; 1513 1514 bytes = strlen(ap->a_target); 1515 1516 hammer2_inode_unlock(nip); 1517 bzero(&auio, sizeof(auio)); 1518 bzero(&aiov, sizeof(aiov)); 1519 auio.uio_iov = &aiov; 1520 auio.uio_segflg = UIO_SYSSPACE; 1521 auio.uio_rw = UIO_WRITE; 1522 auio.uio_resid = bytes; 1523 auio.uio_iovcnt = 1; 1524 auio.uio_td = curthread; 1525 aiov.iov_base = ap->a_target; 1526 aiov.iov_len = bytes; 1527 error = hammer2_write_file(nip, &auio, IO_APPEND, 0); 1528 /* XXX handle error */ 1529 error = 0; 1530 } else { 1531 hammer2_inode_unlock(nip); 1532 } 1533 hammer2_trans_done(dip->pmp); 1534 1535 /* 1536 * Finalize namecache 1537 */ 1538 if (error == 0) { 1539 cache_setunresolved(ap->a_nch); 1540 cache_setvp(ap->a_nch, *ap->a_vpp); 1541 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */ 1542 } 1543 return error; 1544 } 1545 1546 /* 1547 * hammer2_vop_nremove { nch, dvp, cred } 1548 */ 1549 static 1550 int 1551 hammer2_vop_nremove(struct vop_nremove_args *ap) 1552 { 1553 hammer2_xop_unlink_t *xop; 1554 hammer2_inode_t *dip; 1555 hammer2_inode_t *ip; 1556 struct namecache *ncp; 1557 int error; 1558 int isopen; 1559 1560 LOCKSTART; 1561 dip = VTOI(ap->a_dvp); 1562 if (dip->pmp->ronly) { 1563 LOCKSTOP; 1564 return(EROFS); 1565 } 1566 1567 ncp = ap->a_nch->ncp; 1568 1569 hammer2_pfs_memory_wait(dip->pmp); 1570 hammer2_trans_init(dip->pmp, 0); 1571 hammer2_inode_lock(dip, 0); 1572 1573 /* 1574 * The unlink XOP unlinks the path from the directory and 1575 * locates and returns the cluster associated with the real inode. 1576 * We have to handle nlinks here on the frontend. 1577 */ 1578 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1579 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1580 isopen = cache_isopen(ap->a_nch); 1581 xop->isdir = 0; 1582 xop->dopermanent = isopen ? 0 : HAMMER2_DELETE_PERMANENT; 1583 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 1584 1585 /* 1586 * Collect the real inode and adjust nlinks, destroy the real 1587 * inode if nlinks transitions to 0 and it was the real inode 1588 * (else it has already been removed). 1589 */ 1590 error = hammer2_xop_collect(&xop->head, 0); 1591 hammer2_inode_unlock(dip); 1592 1593 if (error == 0) { 1594 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 1595 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1596 if (ip) { 1597 hammer2_inode_unlink_finisher(ip, isopen); 1598 hammer2_inode_unlock(ip); 1599 } 1600 } else { 1601 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1602 } 1603 1604 hammer2_inode_run_unlinkq(dip->pmp); 1605 hammer2_trans_done(dip->pmp); 1606 if (error == 0) 1607 cache_unlink(ap->a_nch); 1608 LOCKSTOP; 1609 return (error); 1610 } 1611 1612 /* 1613 * hammer2_vop_nrmdir { nch, dvp, cred } 1614 */ 1615 static 1616 int 1617 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap) 1618 { 1619 hammer2_xop_unlink_t *xop; 1620 hammer2_inode_t *dip; 1621 hammer2_inode_t *ip; 1622 struct namecache *ncp; 1623 int isopen; 1624 int error; 1625 1626 LOCKSTART; 1627 dip = VTOI(ap->a_dvp); 1628 if (dip->pmp->ronly) { 1629 LOCKSTOP; 1630 return(EROFS); 1631 } 1632 1633 hammer2_pfs_memory_wait(dip->pmp); 1634 hammer2_trans_init(dip->pmp, 0); 1635 hammer2_inode_lock(dip, 0); 1636 1637 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1638 1639 ncp = ap->a_nch->ncp; 1640 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1641 isopen = cache_isopen(ap->a_nch); 1642 xop->isdir = 1; 1643 xop->dopermanent = isopen ? 0 : HAMMER2_DELETE_PERMANENT; 1644 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 1645 1646 /* 1647 * Collect the real inode and adjust nlinks, destroy the real 1648 * inode if nlinks transitions to 0 and it was the real inode 1649 * (else it has already been removed). 1650 */ 1651 error = hammer2_xop_collect(&xop->head, 0); 1652 hammer2_inode_unlock(dip); 1653 1654 if (error == 0) { 1655 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 1656 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1657 if (ip) { 1658 hammer2_inode_unlink_finisher(ip, isopen); 1659 hammer2_inode_unlock(ip); 1660 } 1661 } else { 1662 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1663 } 1664 hammer2_inode_run_unlinkq(dip->pmp); 1665 hammer2_trans_done(dip->pmp); 1666 if (error == 0) 1667 cache_unlink(ap->a_nch); 1668 LOCKSTOP; 1669 return (error); 1670 } 1671 1672 /* 1673 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred } 1674 */ 1675 static 1676 int 1677 hammer2_vop_nrename(struct vop_nrename_args *ap) 1678 { 1679 struct namecache *fncp; 1680 struct namecache *tncp; 1681 hammer2_inode_t *cdip; 1682 hammer2_inode_t *fdip; 1683 hammer2_inode_t *tdip; 1684 hammer2_inode_t *ip; 1685 const uint8_t *fname; 1686 size_t fname_len; 1687 const uint8_t *tname; 1688 size_t tname_len; 1689 int error; 1690 int tnch_error; 1691 hammer2_key_t tlhc; 1692 1693 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount) 1694 return(EXDEV); 1695 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount) 1696 return(EXDEV); 1697 1698 fdip = VTOI(ap->a_fdvp); /* source directory */ 1699 tdip = VTOI(ap->a_tdvp); /* target directory */ 1700 1701 if (fdip->pmp->ronly) 1702 return(EROFS); 1703 1704 LOCKSTART; 1705 fncp = ap->a_fnch->ncp; /* entry name in source */ 1706 fname = fncp->nc_name; 1707 fname_len = fncp->nc_nlen; 1708 1709 tncp = ap->a_tnch->ncp; /* entry name in target */ 1710 tname = tncp->nc_name; 1711 tname_len = tncp->nc_nlen; 1712 1713 hammer2_pfs_memory_wait(tdip->pmp); 1714 hammer2_trans_init(tdip->pmp, 0); 1715 1716 /* 1717 * ip is the inode being renamed. If this is a hardlink then 1718 * ip represents the actual file and not the hardlink marker. 1719 */ 1720 ip = VTOI(fncp->nc_vp); 1721 1722 /* 1723 * The common parent directory must be locked first to avoid deadlocks. 1724 * Also note that fdip and/or tdip might match cdip. 1725 */ 1726 cdip = hammer2_inode_common_parent(ip->pip, tdip); 1727 hammer2_inode_lock(cdip, 0); 1728 hammer2_inode_lock(fdip, 0); 1729 hammer2_inode_lock(tdip, 0); 1730 hammer2_inode_ref(ip); /* extra ref */ 1731 error = 0; 1732 1733 /* 1734 * If ip is a hardlink target and fdip != cdip we must shift the 1735 * inode to cdip. 1736 */ 1737 if (fdip != cdip && 1738 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) { 1739 hammer2_xop_nlink_t *xop1; 1740 1741 xop1 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 1742 hammer2_xop_setip2(&xop1->head, ip); 1743 hammer2_xop_setip3(&xop1->head, cdip); 1744 1745 hammer2_xop_start(&xop1->head, hammer2_xop_nlink); 1746 error = hammer2_xop_collect(&xop1->head, 0); 1747 hammer2_xop_retire(&xop1->head, HAMMER2_XOPMASK_VOP); 1748 } 1749 1750 /* 1751 * Delete the target namespace. 1752 */ 1753 { 1754 hammer2_xop_unlink_t *xop2; 1755 hammer2_inode_t *tip; 1756 int isopen; 1757 1758 /* 1759 * The unlink XOP unlinks the path from the directory and 1760 * locates and returns the cluster associated with the real 1761 * inode. We have to handle nlinks here on the frontend. 1762 */ 1763 xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 1764 hammer2_xop_setname(&xop2->head, tname, tname_len); 1765 isopen = cache_isopen(ap->a_tnch); 1766 xop2->isdir = -1; 1767 xop2->dopermanent = isopen ? 0 : HAMMER2_DELETE_PERMANENT; 1768 hammer2_xop_start(&xop2->head, hammer2_xop_unlink); 1769 1770 /* 1771 * Collect the real inode and adjust nlinks, destroy the real 1772 * inode if nlinks transitions to 0 and it was the real inode 1773 * (else it has already been removed). 1774 */ 1775 tnch_error = hammer2_xop_collect(&xop2->head, 0); 1776 /* hammer2_inode_unlock(tdip); */ 1777 1778 if (tnch_error == 0) { 1779 tip = hammer2_inode_get(tdip->pmp, NULL, 1780 &xop2->head.cluster, -1); 1781 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP); 1782 if (tip) { 1783 hammer2_inode_unlink_finisher(tip, isopen); 1784 hammer2_inode_unlock(tip); 1785 } 1786 } else { 1787 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP); 1788 } 1789 /* hammer2_inode_lock(tdip, 0); */ 1790 1791 if (tnch_error && tnch_error != ENOENT) { 1792 error = tnch_error; 1793 goto done2; 1794 } 1795 } 1796 1797 /* 1798 * Resolve the collision space for (tdip, tname, tname_len) 1799 * 1800 * tdip must be held exclusively locked to prevent races. 1801 */ 1802 { 1803 hammer2_xop_scanlhc_t *sxop; 1804 hammer2_tid_t lhcbase; 1805 1806 tlhc = hammer2_dirhash(tname, tname_len); 1807 lhcbase = tlhc; 1808 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 1809 sxop->lhc = tlhc; 1810 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc); 1811 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 1812 if (tlhc != sxop->head.cluster.focus->bref.key) 1813 break; 1814 ++tlhc; 1815 } 1816 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 1817 1818 if (error) { 1819 if (error != ENOENT) 1820 goto done2; 1821 ++tlhc; 1822 error = 0; 1823 } 1824 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) { 1825 error = ENOSPC; 1826 goto done2; 1827 } 1828 } 1829 1830 /* 1831 * Everything is setup, do the rename. 1832 * 1833 * We have to synchronize ip->meta to the underlying operation. 1834 * 1835 * NOTE: To avoid deadlocks we cannot lock (ip) while we are 1836 * unlinking elements from their directories. Locking 1837 * the nlinks field does not lock the whole inode. 1838 */ 1839 hammer2_inode_lock(ip, 0); 1840 if (error == 0) { 1841 hammer2_xop_nrename_t *xop4; 1842 1843 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 1844 xop4->lhc = tlhc; 1845 xop4->ip_key = ip->meta.name_key; 1846 hammer2_xop_setip2(&xop4->head, ip); 1847 hammer2_xop_setip3(&xop4->head, tdip); 1848 hammer2_xop_setname(&xop4->head, fname, fname_len); 1849 hammer2_xop_setname2(&xop4->head, tname, tname_len); 1850 hammer2_xop_start(&xop4->head, hammer2_xop_nrename); 1851 1852 error = hammer2_xop_collect(&xop4->head, 0); 1853 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP); 1854 1855 if (error == ENOENT) 1856 error = 0; 1857 if (error == 0 && 1858 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 1859 hammer2_inode_modify(ip); 1860 ip->meta.name_len = tname_len; 1861 ip->meta.name_key = tlhc; 1862 1863 } 1864 } 1865 1866 /* 1867 * Fixup ip->pip if we were renaming the actual file and not a 1868 * hardlink pointer. 1869 */ 1870 if (error == 0 && (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 1871 hammer2_inode_t *opip; 1872 1873 if (ip->pip != tdip) { 1874 hammer2_inode_ref(tdip); 1875 opip = ip->pip; 1876 ip->pip = tdip; 1877 if (opip) 1878 hammer2_inode_drop(opip); 1879 } 1880 } 1881 hammer2_inode_unlock(ip); 1882 done2: 1883 hammer2_inode_unlock(tdip); 1884 hammer2_inode_unlock(fdip); 1885 hammer2_inode_unlock(cdip); 1886 hammer2_inode_drop(ip); 1887 hammer2_inode_drop(cdip); 1888 hammer2_inode_run_unlinkq(fdip->pmp); 1889 hammer2_trans_done(tdip->pmp); 1890 1891 /* 1892 * Issue the namecache update after unlocking all the internal 1893 * hammer structures, otherwise we might deadlock. 1894 */ 1895 if (tnch_error == 0) { 1896 cache_unlink(ap->a_tnch); 1897 cache_setunresolved(ap->a_tnch); 1898 } 1899 if (error == 0) 1900 cache_rename(ap->a_fnch, ap->a_tnch); 1901 1902 LOCKSTOP; 1903 return (error); 1904 } 1905 1906 /* 1907 * hammer2_vop_ioctl { vp, command, data, fflag, cred } 1908 */ 1909 static 1910 int 1911 hammer2_vop_ioctl(struct vop_ioctl_args *ap) 1912 { 1913 hammer2_inode_t *ip; 1914 int error; 1915 1916 LOCKSTART; 1917 ip = VTOI(ap->a_vp); 1918 1919 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data, 1920 ap->a_fflag, ap->a_cred); 1921 LOCKSTOP; 1922 return (error); 1923 } 1924 1925 static 1926 int 1927 hammer2_vop_mountctl(struct vop_mountctl_args *ap) 1928 { 1929 struct mount *mp; 1930 hammer2_pfs_t *pmp; 1931 int rc; 1932 1933 LOCKSTART; 1934 switch (ap->a_op) { 1935 case (MOUNTCTL_SET_EXPORT): 1936 mp = ap->a_head.a_ops->head.vv_mount; 1937 pmp = MPTOPMP(mp); 1938 1939 if (ap->a_ctllen != sizeof(struct export_args)) 1940 rc = (EINVAL); 1941 else 1942 rc = vfs_export(mp, &pmp->export, 1943 (const struct export_args *)ap->a_ctl); 1944 break; 1945 default: 1946 rc = vop_stdmountctl(ap); 1947 break; 1948 } 1949 LOCKSTOP; 1950 return (rc); 1951 } 1952 1953 /* 1954 * KQFILTER 1955 */ 1956 static void filt_hammer2detach(struct knote *kn); 1957 static int filt_hammer2read(struct knote *kn, long hint); 1958 static int filt_hammer2write(struct knote *kn, long hint); 1959 static int filt_hammer2vnode(struct knote *kn, long hint); 1960 1961 static struct filterops hammer2read_filtops = 1962 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1963 NULL, filt_hammer2detach, filt_hammer2read }; 1964 static struct filterops hammer2write_filtops = 1965 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1966 NULL, filt_hammer2detach, filt_hammer2write }; 1967 static struct filterops hammer2vnode_filtops = 1968 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1969 NULL, filt_hammer2detach, filt_hammer2vnode }; 1970 1971 static 1972 int 1973 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap) 1974 { 1975 struct vnode *vp = ap->a_vp; 1976 struct knote *kn = ap->a_kn; 1977 1978 switch (kn->kn_filter) { 1979 case EVFILT_READ: 1980 kn->kn_fop = &hammer2read_filtops; 1981 break; 1982 case EVFILT_WRITE: 1983 kn->kn_fop = &hammer2write_filtops; 1984 break; 1985 case EVFILT_VNODE: 1986 kn->kn_fop = &hammer2vnode_filtops; 1987 break; 1988 default: 1989 return (EOPNOTSUPP); 1990 } 1991 1992 kn->kn_hook = (caddr_t)vp; 1993 1994 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 1995 1996 return(0); 1997 } 1998 1999 static void 2000 filt_hammer2detach(struct knote *kn) 2001 { 2002 struct vnode *vp = (void *)kn->kn_hook; 2003 2004 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2005 } 2006 2007 static int 2008 filt_hammer2read(struct knote *kn, long hint) 2009 { 2010 struct vnode *vp = (void *)kn->kn_hook; 2011 hammer2_inode_t *ip = VTOI(vp); 2012 off_t off; 2013 2014 if (hint == NOTE_REVOKE) { 2015 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2016 return(1); 2017 } 2018 off = ip->meta.size - kn->kn_fp->f_offset; 2019 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 2020 if (kn->kn_sfflags & NOTE_OLDAPI) 2021 return(1); 2022 return (kn->kn_data != 0); 2023 } 2024 2025 2026 static int 2027 filt_hammer2write(struct knote *kn, long hint) 2028 { 2029 if (hint == NOTE_REVOKE) 2030 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2031 kn->kn_data = 0; 2032 return (1); 2033 } 2034 2035 static int 2036 filt_hammer2vnode(struct knote *kn, long hint) 2037 { 2038 if (kn->kn_sfflags & hint) 2039 kn->kn_fflags |= hint; 2040 if (hint == NOTE_REVOKE) { 2041 kn->kn_flags |= (EV_EOF | EV_NODATA); 2042 return (1); 2043 } 2044 return (kn->kn_fflags != 0); 2045 } 2046 2047 /* 2048 * FIFO VOPS 2049 */ 2050 static 2051 int 2052 hammer2_vop_markatime(struct vop_markatime_args *ap) 2053 { 2054 hammer2_inode_t *ip; 2055 struct vnode *vp; 2056 2057 vp = ap->a_vp; 2058 ip = VTOI(vp); 2059 2060 if (ip->pmp->ronly) 2061 return(EROFS); 2062 return(0); 2063 } 2064 2065 static 2066 int 2067 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap) 2068 { 2069 int error; 2070 2071 error = VOCALL(&fifo_vnode_vops, &ap->a_head); 2072 if (error) 2073 error = hammer2_vop_kqfilter(ap); 2074 return(error); 2075 } 2076 2077 /* 2078 * VOPS vector 2079 */ 2080 struct vop_ops hammer2_vnode_vops = { 2081 .vop_default = vop_defaultop, 2082 .vop_fsync = hammer2_vop_fsync, 2083 .vop_getpages = vop_stdgetpages, 2084 .vop_putpages = vop_stdputpages, 2085 .vop_access = hammer2_vop_access, 2086 .vop_advlock = hammer2_vop_advlock, 2087 .vop_close = hammer2_vop_close, 2088 .vop_nlink = hammer2_vop_nlink, 2089 .vop_ncreate = hammer2_vop_ncreate, 2090 .vop_nsymlink = hammer2_vop_nsymlink, 2091 .vop_nremove = hammer2_vop_nremove, 2092 .vop_nrmdir = hammer2_vop_nrmdir, 2093 .vop_nrename = hammer2_vop_nrename, 2094 .vop_getattr = hammer2_vop_getattr, 2095 .vop_setattr = hammer2_vop_setattr, 2096 .vop_readdir = hammer2_vop_readdir, 2097 .vop_readlink = hammer2_vop_readlink, 2098 .vop_getpages = vop_stdgetpages, 2099 .vop_putpages = vop_stdputpages, 2100 .vop_read = hammer2_vop_read, 2101 .vop_write = hammer2_vop_write, 2102 .vop_open = hammer2_vop_open, 2103 .vop_inactive = hammer2_vop_inactive, 2104 .vop_reclaim = hammer2_vop_reclaim, 2105 .vop_nresolve = hammer2_vop_nresolve, 2106 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot, 2107 .vop_nmkdir = hammer2_vop_nmkdir, 2108 .vop_nmknod = hammer2_vop_nmknod, 2109 .vop_ioctl = hammer2_vop_ioctl, 2110 .vop_mountctl = hammer2_vop_mountctl, 2111 .vop_bmap = hammer2_vop_bmap, 2112 .vop_strategy = hammer2_vop_strategy, 2113 .vop_kqfilter = hammer2_vop_kqfilter 2114 }; 2115 2116 struct vop_ops hammer2_spec_vops = { 2117 .vop_default = vop_defaultop, 2118 .vop_fsync = hammer2_vop_fsync, 2119 .vop_read = vop_stdnoread, 2120 .vop_write = vop_stdnowrite, 2121 .vop_access = hammer2_vop_access, 2122 .vop_close = hammer2_vop_close, 2123 .vop_markatime = hammer2_vop_markatime, 2124 .vop_getattr = hammer2_vop_getattr, 2125 .vop_inactive = hammer2_vop_inactive, 2126 .vop_reclaim = hammer2_vop_reclaim, 2127 .vop_setattr = hammer2_vop_setattr 2128 }; 2129 2130 struct vop_ops hammer2_fifo_vops = { 2131 .vop_default = fifo_vnoperate, 2132 .vop_fsync = hammer2_vop_fsync, 2133 #if 0 2134 .vop_read = hammer2_vop_fiforead, 2135 .vop_write = hammer2_vop_fifowrite, 2136 #endif 2137 .vop_access = hammer2_vop_access, 2138 #if 0 2139 .vop_close = hammer2_vop_fifoclose, 2140 #endif 2141 .vop_markatime = hammer2_vop_markatime, 2142 .vop_getattr = hammer2_vop_getattr, 2143 .vop_inactive = hammer2_vop_inactive, 2144 .vop_reclaim = hammer2_vop_reclaim, 2145 .vop_setattr = hammer2_vop_setattr, 2146 .vop_kqfilter = hammer2_vop_fifokqfilter 2147 }; 2148 2149