1 /* 2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 /* 37 * Kernel Filesystem interface 38 * 39 * NOTE! local ipdata pointers must be reloaded on any modifying operation 40 * to the inode as its underlying chain may have changed. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/fcntl.h> 47 #include <sys/buf.h> 48 #include <sys/proc.h> 49 #include <sys/namei.h> 50 #include <sys/mount.h> 51 #include <sys/vnode.h> 52 #include <sys/mountctl.h> 53 #include <sys/dirent.h> 54 #include <sys/uio.h> 55 #include <sys/objcache.h> 56 #include <sys/event.h> 57 #include <sys/file.h> 58 #include <vfs/fifofs/fifo.h> 59 60 #include "hammer2.h" 61 62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, 63 int seqcount); 64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 65 int ioflag, int seqcount); 66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize); 67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize); 68 69 struct objcache *cache_xops; 70 71 static __inline 72 void 73 hammer2_knote(struct vnode *vp, int flags) 74 { 75 if (flags) 76 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 77 } 78 79 /* 80 * Last reference to a vnode is going away but it is still cached. 81 */ 82 static 83 int 84 hammer2_vop_inactive(struct vop_inactive_args *ap) 85 { 86 hammer2_inode_t *ip; 87 struct vnode *vp; 88 89 LOCKSTART; 90 vp = ap->a_vp; 91 ip = VTOI(vp); 92 93 /* 94 * Degenerate case 95 */ 96 if (ip == NULL) { 97 vrecycle(vp); 98 LOCKSTOP; 99 return (0); 100 } 101 102 /* 103 * Check for deleted inodes and recycle immediately on the last 104 * release. Be sure to destroy any left-over buffer cache buffers 105 * so we do not waste time trying to flush them. 106 * 107 * WARNING: nvtruncbuf() can only be safely called without the inode 108 * lock held due to the way our write thread works. 109 */ 110 if (ip->flags & HAMMER2_INODE_ISUNLINKED) { 111 hammer2_key_t lbase; 112 int nblksize; 113 114 /* 115 * Detect updates to the embedded data which may be 116 * synchronized by the strategy code. Simply mark the 117 * inode modified so it gets picked up by our normal flush. 118 */ 119 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL); 120 nvtruncbuf(vp, 0, nblksize, 0, 0); 121 vrecycle(vp); 122 } 123 LOCKSTOP; 124 return (0); 125 } 126 127 /* 128 * Reclaim a vnode so that it can be reused; after the inode is 129 * disassociated, the filesystem must manage it alone. 130 */ 131 static 132 int 133 hammer2_vop_reclaim(struct vop_reclaim_args *ap) 134 { 135 hammer2_inode_t *ip; 136 hammer2_pfs_t *pmp; 137 struct vnode *vp; 138 139 LOCKSTART; 140 vp = ap->a_vp; 141 ip = VTOI(vp); 142 if (ip == NULL) { 143 LOCKSTOP; 144 return(0); 145 } 146 pmp = ip->pmp; 147 148 /* 149 * The final close of a deleted file or directory marks it for 150 * destruction. The DELETED flag allows the flusher to shortcut 151 * any modified blocks still unflushed (that is, just ignore them). 152 * 153 * HAMMER2 usually does not try to optimize the freemap by returning 154 * deleted blocks to it as it does not usually know how many snapshots 155 * might be referencing portions of the file/dir. 156 */ 157 vp->v_data = NULL; 158 ip->vp = NULL; 159 160 /* 161 * NOTE! We do not attempt to flush chains here, flushing is 162 * really fragile and could also deadlock. 163 */ 164 vclrisdirty(vp); 165 166 /* 167 * Once reclaimed the inode is disconnected from the normal flush 168 * mechanism and must be tracked 169 * 170 * A reclaim can occur at any time so we cannot safely start a 171 * transaction to handle reclamation of unlinked files. Instead, 172 * the ip is left with a reference and placed on a linked list and 173 * handled later on. 174 */ 175 if (ip->flags & HAMMER2_INODE_ISUNLINKED) { 176 hammer2_inode_unlink_t *ipul; 177 178 ipul = kmalloc(sizeof(*ipul), pmp->minode, M_WAITOK | M_ZERO); 179 ipul->ip = ip; 180 181 hammer2_spin_ex(&pmp->list_spin); 182 TAILQ_INSERT_TAIL(&pmp->unlinkq, ipul, entry); 183 hammer2_spin_unex(&pmp->list_spin); 184 /* retain ref from vp for ipul */ 185 } else { 186 hammer2_inode_drop(ip); /* vp ref */ 187 } 188 189 /* 190 * XXX handle background sync when ip dirty, kernel will no longer 191 * notify us regarding this inode because there is no longer a 192 * vnode attached to it. 193 */ 194 195 LOCKSTOP; 196 return (0); 197 } 198 199 static 200 int 201 hammer2_vop_fsync(struct vop_fsync_args *ap) 202 { 203 hammer2_inode_t *ip; 204 struct vnode *vp; 205 206 LOCKSTART; 207 vp = ap->a_vp; 208 ip = VTOI(vp); 209 210 #if 0 211 /* XXX can't do this yet */ 212 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_ISFLUSH); 213 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 214 #endif 215 hammer2_trans_init(ip->pmp, 0); 216 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 217 218 /* 219 * Calling chain_flush here creates a lot of duplicative 220 * COW operations due to non-optimal vnode ordering. 221 * 222 * Only do it for an actual fsync() syscall. The other forms 223 * which call this function will eventually call chain_flush 224 * on the volume root as a catch-all, which is far more optimal. 225 */ 226 hammer2_inode_lock(ip, 0); 227 if (ip->flags & HAMMER2_INODE_MODIFIED) 228 hammer2_inode_fsync(ip); 229 hammer2_inode_unlock(ip); 230 hammer2_trans_done(ip->pmp); 231 232 LOCKSTOP; 233 return (0); 234 } 235 236 static 237 int 238 hammer2_vop_access(struct vop_access_args *ap) 239 { 240 hammer2_inode_t *ip = VTOI(ap->a_vp); 241 uid_t uid; 242 gid_t gid; 243 int error; 244 245 LOCKSTART; 246 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 247 uid = hammer2_to_unix_xid(&ip->meta.uid); 248 gid = hammer2_to_unix_xid(&ip->meta.gid); 249 error = vop_helper_access(ap, uid, gid, ip->meta.mode, ip->meta.uflags); 250 hammer2_inode_unlock(ip); 251 252 LOCKSTOP; 253 return (error); 254 } 255 256 static 257 int 258 hammer2_vop_getattr(struct vop_getattr_args *ap) 259 { 260 hammer2_pfs_t *pmp; 261 hammer2_inode_t *ip; 262 struct vnode *vp; 263 struct vattr *vap; 264 hammer2_chain_t *chain; 265 int i; 266 267 LOCKSTART; 268 vp = ap->a_vp; 269 vap = ap->a_vap; 270 271 ip = VTOI(vp); 272 pmp = ip->pmp; 273 274 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 275 276 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 277 vap->va_fileid = ip->meta.inum; 278 vap->va_mode = ip->meta.mode; 279 vap->va_nlink = ip->meta.nlinks; 280 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 281 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 282 vap->va_rmajor = 0; 283 vap->va_rminor = 0; 284 vap->va_size = ip->meta.size; /* protected by shared lock */ 285 vap->va_blocksize = HAMMER2_PBUFSIZE; 286 vap->va_flags = ip->meta.uflags; 287 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime); 288 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime); 289 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime); 290 vap->va_gen = 1; 291 vap->va_bytes = 0; 292 for (i = 0; i < ip->cluster.nchains; ++i) { 293 if ((chain = ip->cluster.array[i].chain) != NULL) { 294 if (vap->va_bytes < chain->bref.data_count) 295 vap->va_bytes = chain->bref.data_count; 296 } 297 } 298 vap->va_type = hammer2_get_vtype(ip->meta.type); 299 vap->va_filerev = 0; 300 vap->va_uid_uuid = ip->meta.uid; 301 vap->va_gid_uuid = ip->meta.gid; 302 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 303 VA_FSID_UUID_VALID; 304 305 hammer2_inode_unlock(ip); 306 307 LOCKSTOP; 308 return (0); 309 } 310 311 static 312 int 313 hammer2_vop_setattr(struct vop_setattr_args *ap) 314 { 315 hammer2_inode_t *ip; 316 struct vnode *vp; 317 struct vattr *vap; 318 int error; 319 int kflags = 0; 320 uint64_t ctime; 321 322 LOCKSTART; 323 vp = ap->a_vp; 324 vap = ap->a_vap; 325 hammer2_update_time(&ctime); 326 327 ip = VTOI(vp); 328 329 if (ip->pmp->ronly) { 330 LOCKSTOP; 331 return(EROFS); 332 } 333 334 hammer2_pfs_memory_wait(ip->pmp); 335 hammer2_trans_init(ip->pmp, 0); 336 hammer2_inode_lock(ip, 0); 337 error = 0; 338 339 if (vap->va_flags != VNOVAL) { 340 u_int32_t flags; 341 342 flags = ip->meta.uflags; 343 error = vop_helper_setattr_flags(&flags, vap->va_flags, 344 hammer2_to_unix_xid(&ip->meta.uid), 345 ap->a_cred); 346 if (error == 0) { 347 if (ip->meta.uflags != flags) { 348 hammer2_inode_modify(ip); 349 ip->meta.uflags = flags; 350 ip->meta.ctime = ctime; 351 kflags |= NOTE_ATTRIB; 352 } 353 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 354 error = 0; 355 goto done; 356 } 357 } 358 goto done; 359 } 360 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 361 error = EPERM; 362 goto done; 363 } 364 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { 365 mode_t cur_mode = ip->meta.mode; 366 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 367 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 368 uuid_t uuid_uid; 369 uuid_t uuid_gid; 370 371 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 372 ap->a_cred, 373 &cur_uid, &cur_gid, &cur_mode); 374 if (error == 0) { 375 hammer2_guid_to_uuid(&uuid_uid, cur_uid); 376 hammer2_guid_to_uuid(&uuid_gid, cur_gid); 377 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) || 378 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) || 379 ip->meta.mode != cur_mode 380 ) { 381 hammer2_inode_modify(ip); 382 ip->meta.uid = uuid_uid; 383 ip->meta.gid = uuid_gid; 384 ip->meta.mode = cur_mode; 385 ip->meta.ctime = ctime; 386 } 387 kflags |= NOTE_ATTRIB; 388 } 389 } 390 391 /* 392 * Resize the file 393 */ 394 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) { 395 switch(vp->v_type) { 396 case VREG: 397 if (vap->va_size == ip->meta.size) 398 break; 399 if (vap->va_size < ip->meta.size) { 400 hammer2_truncate_file(ip, vap->va_size); 401 } else { 402 hammer2_extend_file(ip, vap->va_size); 403 } 404 hammer2_inode_modify(ip); 405 ip->meta.mtime = ctime; 406 break; 407 default: 408 error = EINVAL; 409 goto done; 410 } 411 } 412 #if 0 413 /* atime not supported */ 414 if (vap->va_atime.tv_sec != VNOVAL) { 415 hammer2_inode_modify(ip); 416 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime); 417 kflags |= NOTE_ATTRIB; 418 } 419 #endif 420 if (vap->va_mode != (mode_t)VNOVAL) { 421 mode_t cur_mode = ip->meta.mode; 422 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 423 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 424 425 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 426 cur_uid, cur_gid, &cur_mode); 427 if (error == 0 && ip->meta.mode != cur_mode) { 428 hammer2_inode_modify(ip); 429 ip->meta.mode = cur_mode; 430 ip->meta.ctime = ctime; 431 kflags |= NOTE_ATTRIB; 432 } 433 } 434 435 if (vap->va_mtime.tv_sec != VNOVAL) { 436 hammer2_inode_modify(ip); 437 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime); 438 kflags |= NOTE_ATTRIB; 439 } 440 441 done: 442 /* 443 * If a truncation occurred we must call inode_fsync() now in order 444 * to trim the related data chains, otherwise a later expansion can 445 * cause havoc. 446 * 447 * If an extend occured that changed the DIRECTDATA state, we must 448 * call inode_fsync now in order to prepare the inode's indirect 449 * block table. 450 */ 451 if (ip->flags & HAMMER2_INODE_RESIZED) 452 hammer2_inode_fsync(ip); 453 454 /* 455 * Cleanup. 456 */ 457 hammer2_inode_unlock(ip); 458 hammer2_trans_done(ip->pmp); 459 hammer2_knote(ip->vp, kflags); 460 461 LOCKSTOP; 462 return (error); 463 } 464 465 static 466 int 467 hammer2_vop_readdir(struct vop_readdir_args *ap) 468 { 469 hammer2_xop_readdir_t *xop; 470 hammer2_blockref_t bref; 471 hammer2_inode_t *ip; 472 hammer2_tid_t inum; 473 hammer2_key_t lkey; 474 struct uio *uio; 475 off_t *cookies; 476 off_t saveoff; 477 int cookie_index; 478 int ncookies; 479 int error; 480 int eofflag; 481 int dtype; 482 int r; 483 484 LOCKSTART; 485 ip = VTOI(ap->a_vp); 486 uio = ap->a_uio; 487 saveoff = uio->uio_offset; 488 eofflag = 0; 489 error = 0; 490 491 /* 492 * Setup cookies directory entry cookies if requested 493 */ 494 if (ap->a_ncookies) { 495 ncookies = uio->uio_resid / 16 + 1; 496 if (ncookies > 1024) 497 ncookies = 1024; 498 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK); 499 } else { 500 ncookies = -1; 501 cookies = NULL; 502 } 503 cookie_index = 0; 504 505 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 506 507 /* 508 * Handle artificial entries. To ensure that only positive 64 bit 509 * quantities are returned to userland we always strip off bit 63. 510 * The hash code is designed such that codes 0x0000-0x7FFF are not 511 * used, allowing us to use these codes for articial entries. 512 * 513 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not 514 * allow '..' to cross the mount point into (e.g.) the super-root. 515 */ 516 if (saveoff == 0) { 517 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 518 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, "."); 519 if (r) 520 goto done; 521 if (cookies) 522 cookies[cookie_index] = saveoff; 523 ++saveoff; 524 ++cookie_index; 525 if (cookie_index == ncookies) 526 goto done; 527 } 528 529 if (saveoff == 1) { 530 /* 531 * Be careful with lockorder when accessing ".." 532 * 533 * (ip is the current dir. xip is the parent dir). 534 */ 535 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 536 if (ip->pip && ip != ip->pmp->iroot) 537 inum = ip->pip->meta.inum & HAMMER2_DIRHASH_USERMSK; 538 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, ".."); 539 if (r) 540 goto done; 541 if (cookies) 542 cookies[cookie_index] = saveoff; 543 ++saveoff; 544 ++cookie_index; 545 if (cookie_index == ncookies) 546 goto done; 547 } 548 549 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE; 550 if (hammer2_debug & 0x0020) 551 kprintf("readdir: lkey %016jx\n", lkey); 552 if (error) 553 goto done; 554 555 /* 556 * Use XOP for cluster scan. 557 * 558 * parent is the inode cluster, already locked for us. Don't 559 * double lock shared locks as this will screw up upgrades. 560 */ 561 xop = hammer2_xop_alloc(ip, 0); 562 xop->lkey = lkey; 563 hammer2_xop_start(&xop->head, hammer2_xop_readdir); 564 565 for (;;) { 566 const hammer2_inode_data_t *ripdata; 567 568 error = hammer2_xop_collect(&xop->head, 0); 569 if (error) 570 break; 571 if (cookie_index == ncookies) 572 break; 573 if (hammer2_debug & 0x0020) 574 kprintf("cluster chain %p %p\n", 575 xop->head.cluster.focus, 576 (xop->head.cluster.focus ? 577 xop->head.cluster.focus->data : (void *)-1)); 578 ripdata = &hammer2_cluster_rdata(&xop->head.cluster)->ipdata; 579 hammer2_cluster_bref(&xop->head.cluster, &bref); 580 if (bref.type == HAMMER2_BREF_TYPE_INODE) { 581 dtype = hammer2_get_dtype(ripdata); 582 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 583 r = vop_write_dirent(&error, uio, 584 ripdata->meta.inum & 585 HAMMER2_DIRHASH_USERMSK, 586 dtype, 587 ripdata->meta.name_len, 588 ripdata->filename); 589 if (r) 590 break; 591 if (cookies) 592 cookies[cookie_index] = saveoff; 593 ++cookie_index; 594 } else { 595 /* XXX chain error */ 596 kprintf("bad chain type readdir %d\n", bref.type); 597 } 598 } 599 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 600 if (error == ENOENT) { 601 error = 0; 602 eofflag = 1; 603 saveoff = (hammer2_key_t)-1; 604 } else { 605 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 606 } 607 done: 608 hammer2_inode_unlock(ip); 609 if (ap->a_eofflag) 610 *ap->a_eofflag = eofflag; 611 if (hammer2_debug & 0x0020) 612 kprintf("readdir: done at %016jx\n", saveoff); 613 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE; 614 if (error && cookie_index == 0) { 615 if (cookies) { 616 kfree(cookies, M_TEMP); 617 *ap->a_ncookies = 0; 618 *ap->a_cookies = NULL; 619 } 620 } else { 621 if (cookies) { 622 *ap->a_ncookies = cookie_index; 623 *ap->a_cookies = cookies; 624 } 625 } 626 LOCKSTOP; 627 return (error); 628 } 629 630 /* 631 * hammer2_vop_readlink { vp, uio, cred } 632 */ 633 static 634 int 635 hammer2_vop_readlink(struct vop_readlink_args *ap) 636 { 637 struct vnode *vp; 638 hammer2_inode_t *ip; 639 int error; 640 641 vp = ap->a_vp; 642 if (vp->v_type != VLNK) 643 return (EINVAL); 644 ip = VTOI(vp); 645 646 error = hammer2_read_file(ip, ap->a_uio, 0); 647 return (error); 648 } 649 650 static 651 int 652 hammer2_vop_read(struct vop_read_args *ap) 653 { 654 struct vnode *vp; 655 hammer2_inode_t *ip; 656 struct uio *uio; 657 int error; 658 int seqcount; 659 int bigread; 660 661 /* 662 * Read operations supported on this vnode? 663 */ 664 vp = ap->a_vp; 665 if (vp->v_type != VREG) 666 return (EINVAL); 667 668 /* 669 * Misc 670 */ 671 ip = VTOI(vp); 672 uio = ap->a_uio; 673 error = 0; 674 675 seqcount = ap->a_ioflag >> 16; 676 bigread = (uio->uio_resid > 100 * 1024 * 1024); 677 678 error = hammer2_read_file(ip, uio, seqcount); 679 return (error); 680 } 681 682 static 683 int 684 hammer2_vop_write(struct vop_write_args *ap) 685 { 686 hammer2_inode_t *ip; 687 thread_t td; 688 struct vnode *vp; 689 struct uio *uio; 690 int error; 691 int seqcount; 692 693 /* 694 * Read operations supported on this vnode? 695 */ 696 vp = ap->a_vp; 697 if (vp->v_type != VREG) 698 return (EINVAL); 699 700 /* 701 * Misc 702 */ 703 ip = VTOI(vp); 704 uio = ap->a_uio; 705 error = 0; 706 if (ip->pmp->ronly) { 707 return (EROFS); 708 } 709 710 seqcount = ap->a_ioflag >> 16; 711 712 /* 713 * Check resource limit 714 */ 715 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc && 716 uio->uio_offset + uio->uio_resid > 717 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 718 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ); 719 return (EFBIG); 720 } 721 722 /* 723 * The transaction interlocks against flushes initiations 724 * (note: but will run concurrently with the actual flush). 725 */ 726 hammer2_trans_init(ip->pmp, 0); 727 error = hammer2_write_file(ip, uio, ap->a_ioflag, seqcount); 728 hammer2_trans_done(ip->pmp); 729 730 return (error); 731 } 732 733 /* 734 * Perform read operations on a file or symlink given an UNLOCKED 735 * inode and uio. 736 * 737 * The passed ip is not locked. 738 */ 739 static 740 int 741 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount) 742 { 743 hammer2_off_t size; 744 struct buf *bp; 745 int error; 746 747 error = 0; 748 749 /* 750 * UIO read loop. 751 * 752 * WARNING! Assumes that the kernel interlocks size changes at the 753 * vnode level. 754 */ 755 hammer2_mtx_sh(&ip->lock); 756 size = ip->meta.size; 757 hammer2_mtx_unlock(&ip->lock); 758 759 while (uio->uio_resid > 0 && uio->uio_offset < size) { 760 hammer2_key_t lbase; 761 hammer2_key_t leof; 762 int lblksize; 763 int loff; 764 int n; 765 766 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 767 &lbase, &leof); 768 769 error = cluster_read(ip->vp, leof, lbase, lblksize, 770 uio->uio_resid, seqcount * BKVASIZE, 771 &bp); 772 773 if (error) 774 break; 775 loff = (int)(uio->uio_offset - lbase); 776 n = lblksize - loff; 777 if (n > uio->uio_resid) 778 n = uio->uio_resid; 779 if (n > size - uio->uio_offset) 780 n = (int)(size - uio->uio_offset); 781 bp->b_flags |= B_AGE; 782 uiomove((char *)bp->b_data + loff, n, uio); 783 bqrelse(bp); 784 } 785 return (error); 786 } 787 788 /* 789 * Write to the file represented by the inode via the logical buffer cache. 790 * The inode may represent a regular file or a symlink. 791 * 792 * The inode must not be locked. 793 */ 794 static 795 int 796 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 797 int ioflag, int seqcount) 798 { 799 hammer2_key_t old_eof; 800 hammer2_key_t new_eof; 801 struct buf *bp; 802 int kflags; 803 int error; 804 int modified; 805 806 /* 807 * Setup if append 808 * 809 * WARNING! Assumes that the kernel interlocks size changes at the 810 * vnode level. 811 */ 812 hammer2_mtx_ex(&ip->lock); 813 if (ioflag & IO_APPEND) 814 uio->uio_offset = ip->meta.size; 815 old_eof = ip->meta.size; 816 817 /* 818 * Extend the file if necessary. If the write fails at some point 819 * we will truncate it back down to cover as much as we were able 820 * to write. 821 * 822 * Doing this now makes it easier to calculate buffer sizes in 823 * the loop. 824 */ 825 kflags = 0; 826 error = 0; 827 modified = 0; 828 829 if (uio->uio_offset + uio->uio_resid > old_eof) { 830 new_eof = uio->uio_offset + uio->uio_resid; 831 modified = 1; 832 hammer2_extend_file(ip, new_eof); 833 kflags |= NOTE_EXTEND; 834 } else { 835 new_eof = old_eof; 836 } 837 hammer2_mtx_unlock(&ip->lock); 838 839 /* 840 * UIO write loop 841 */ 842 while (uio->uio_resid > 0) { 843 hammer2_key_t lbase; 844 int trivial; 845 int endofblk; 846 int lblksize; 847 int loff; 848 int n; 849 850 /* 851 * Don't allow the buffer build to blow out the buffer 852 * cache. 853 */ 854 if ((ioflag & IO_RECURSE) == 0) 855 bwillwrite(HAMMER2_PBUFSIZE); 856 857 /* 858 * This nominally tells us how much we can cluster and 859 * what the logical buffer size needs to be. Currently 860 * we don't try to cluster the write and just handle one 861 * block at a time. 862 */ 863 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 864 &lbase, NULL); 865 loff = (int)(uio->uio_offset - lbase); 866 867 KKASSERT(lblksize <= 65536); 868 869 /* 870 * Calculate bytes to copy this transfer and whether the 871 * copy completely covers the buffer or not. 872 */ 873 trivial = 0; 874 n = lblksize - loff; 875 if (n > uio->uio_resid) { 876 n = uio->uio_resid; 877 if (loff == lbase && uio->uio_offset + n == new_eof) 878 trivial = 1; 879 endofblk = 0; 880 } else { 881 if (loff == 0) 882 trivial = 1; 883 endofblk = 1; 884 } 885 886 /* 887 * Get the buffer 888 */ 889 if (uio->uio_segflg == UIO_NOCOPY) { 890 /* 891 * Issuing a write with the same data backing the 892 * buffer. Instantiate the buffer to collect the 893 * backing vm pages, then read-in any missing bits. 894 * 895 * This case is used by vop_stdputpages(). 896 */ 897 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0); 898 if ((bp->b_flags & B_CACHE) == 0) { 899 bqrelse(bp); 900 error = bread(ip->vp, lbase, lblksize, &bp); 901 } 902 } else if (trivial) { 903 /* 904 * Even though we are entirely overwriting the buffer 905 * we may still have to zero it out to avoid a 906 * mmap/write visibility issue. 907 */ 908 bp = getblk(ip->vp, lbase, lblksize, GETBLK_BHEAVY, 0); 909 if ((bp->b_flags & B_CACHE) == 0) 910 vfs_bio_clrbuf(bp); 911 } else { 912 /* 913 * Partial overwrite, read in any missing bits then 914 * replace the portion being written. 915 * 916 * (The strategy code will detect zero-fill physical 917 * blocks for this case). 918 */ 919 error = bread(ip->vp, lbase, lblksize, &bp); 920 if (error == 0) 921 bheavy(bp); 922 } 923 924 if (error) { 925 brelse(bp); 926 break; 927 } 928 929 /* 930 * Ok, copy the data in 931 */ 932 error = uiomove(bp->b_data + loff, n, uio); 933 kflags |= NOTE_WRITE; 934 modified = 1; 935 if (error) { 936 brelse(bp); 937 break; 938 } 939 940 /* 941 * WARNING: Pageout daemon will issue UIO_NOCOPY writes 942 * with IO_SYNC or IO_ASYNC set. These writes 943 * must be handled as the pageout daemon expects. 944 */ 945 if (ioflag & IO_SYNC) { 946 bwrite(bp); 947 } else if ((ioflag & IO_DIRECT) && endofblk) { 948 bawrite(bp); 949 } else if (ioflag & IO_ASYNC) { 950 bawrite(bp); 951 } else { 952 bdwrite(bp); 953 } 954 } 955 956 /* 957 * Cleanup. If we extended the file EOF but failed to write through 958 * the entire write is a failure and we have to back-up. 959 */ 960 if (error && new_eof != old_eof) { 961 hammer2_mtx_ex(&ip->lock); 962 hammer2_truncate_file(ip, old_eof); 963 if (ip->flags & HAMMER2_INODE_MODIFIED) 964 hammer2_inode_fsync(ip); 965 hammer2_mtx_unlock(&ip->lock); 966 } else if (modified) { 967 hammer2_mtx_ex(&ip->lock); 968 hammer2_inode_modify(ip); 969 hammer2_update_time(&ip->meta.mtime); 970 if (ip->flags & HAMMER2_INODE_MODIFIED) 971 hammer2_inode_fsync(ip); 972 hammer2_mtx_unlock(&ip->lock); 973 hammer2_knote(ip->vp, kflags); 974 } 975 hammer2_trans_assert_strategy(ip->pmp); 976 977 return error; 978 } 979 980 /* 981 * Truncate the size of a file. The inode must not be locked. 982 * 983 * We must unconditionally set HAMMER2_INODE_RESIZED to properly 984 * ensure that any on-media data beyond the new file EOF has been destroyed. 985 * 986 * WARNING: nvtruncbuf() can only be safely called without the inode lock 987 * held due to the way our write thread works. If the truncation 988 * occurs in the middle of a buffer, nvtruncbuf() is responsible 989 * for dirtying that buffer and zeroing out trailing bytes. 990 * 991 * WARNING! Assumes that the kernel interlocks size changes at the 992 * vnode level. 993 * 994 * WARNING! Caller assumes responsibility for removing dead blocks 995 * if INODE_RESIZED is set. 996 */ 997 static 998 void 999 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1000 { 1001 hammer2_key_t lbase; 1002 int nblksize; 1003 1004 LOCKSTART; 1005 hammer2_mtx_unlock(&ip->lock); 1006 if (ip->vp) { 1007 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1008 nvtruncbuf(ip->vp, nsize, 1009 nblksize, (int)nsize & (nblksize - 1), 1010 0); 1011 } 1012 hammer2_mtx_ex(&ip->lock); 1013 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1014 ip->osize = ip->meta.size; 1015 ip->meta.size = nsize; 1016 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED | 1017 HAMMER2_INODE_RESIZED); 1018 LOCKSTOP; 1019 } 1020 1021 /* 1022 * Extend the size of a file. The inode must not be locked. 1023 * 1024 * Even though the file size is changing, we do not have to set the 1025 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES 1026 * boundary. When this occurs a hammer2_inode_fsync() is required 1027 * to prepare the inode cluster's indirect block table. 1028 * 1029 * WARNING! Assumes that the kernel interlocks size changes at the 1030 * vnode level. 1031 * 1032 * WARNING! Caller assumes responsibility for transitioning out 1033 * of the inode DIRECTDATA mode if INODE_RESIZED is set. 1034 */ 1035 static 1036 void 1037 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1038 { 1039 hammer2_key_t lbase; 1040 hammer2_key_t osize; 1041 int oblksize; 1042 int nblksize; 1043 1044 LOCKSTART; 1045 1046 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1047 osize = ip->meta.size; 1048 ip->osize = osize; 1049 ip->meta.size = nsize; 1050 atomic_set_int(&ip->flags, HAMMER2_INODE_MODIFIED); 1051 1052 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) 1053 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1054 1055 hammer2_mtx_unlock(&ip->lock); 1056 if (ip->vp) { 1057 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL); 1058 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1059 nvextendbuf(ip->vp, 1060 osize, nsize, 1061 oblksize, nblksize, 1062 -1, -1, 0); 1063 } 1064 hammer2_mtx_ex(&ip->lock); 1065 1066 LOCKSTOP; 1067 } 1068 1069 static 1070 int 1071 hammer2_vop_nresolve(struct vop_nresolve_args *ap) 1072 { 1073 hammer2_xop_nresolve_t *xop; 1074 hammer2_inode_t *ip; 1075 hammer2_inode_t *dip; 1076 struct namecache *ncp; 1077 struct vnode *vp; 1078 int error; 1079 1080 LOCKSTART; 1081 dip = VTOI(ap->a_dvp); 1082 xop = hammer2_xop_alloc(dip, 0); 1083 1084 ncp = ap->a_nch->ncp; 1085 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1086 1087 /* 1088 * Note: In DragonFly the kernel handles '.' and '..'. 1089 */ 1090 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1091 hammer2_xop_start(&xop->head, hammer2_xop_nresolve); 1092 1093 error = hammer2_xop_collect(&xop->head, 0); 1094 if (error) { 1095 ip = NULL; 1096 } else { 1097 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 1098 } 1099 hammer2_inode_unlock(dip); 1100 1101 /* 1102 * Acquire the related vnode 1103 * 1104 * NOTE: For error processing, only ENOENT resolves the namecache 1105 * entry to NULL, otherwise we just return the error and 1106 * leave the namecache unresolved. 1107 * 1108 * NOTE: multiple hammer2_inode structures can be aliased to the 1109 * same chain element, for example for hardlinks. This 1110 * use case does not 'reattach' inode associations that 1111 * might already exist, but always allocates a new one. 1112 * 1113 * WARNING: inode structure is locked exclusively via inode_get 1114 * but chain was locked shared. inode_unlock() 1115 * will handle it properly. 1116 */ 1117 if (ip) { 1118 vp = hammer2_igetv(ip, &error); 1119 if (error == 0) { 1120 vn_unlock(vp); 1121 cache_setvp(ap->a_nch, vp); 1122 } else if (error == ENOENT) { 1123 cache_setvp(ap->a_nch, NULL); 1124 } 1125 hammer2_inode_unlock(ip); 1126 1127 /* 1128 * The vp should not be released until after we've disposed 1129 * of our locks, because it might cause vop_inactive() to 1130 * be called. 1131 */ 1132 if (vp) 1133 vrele(vp); 1134 } else { 1135 error = ENOENT; 1136 cache_setvp(ap->a_nch, NULL); 1137 } 1138 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1139 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL, 1140 ("resolve error %d/%p ap %p\n", 1141 error, ap->a_nch->ncp->nc_vp, ap)); 1142 LOCKSTOP; 1143 1144 return error; 1145 } 1146 1147 static 1148 int 1149 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 1150 { 1151 hammer2_inode_t *dip; 1152 hammer2_inode_t *ip; 1153 int error; 1154 1155 LOCKSTART; 1156 dip = VTOI(ap->a_dvp); 1157 1158 if ((ip = dip->pip) == NULL) { 1159 *ap->a_vpp = NULL; 1160 LOCKSTOP; 1161 return ENOENT; 1162 } 1163 hammer2_inode_lock(ip, 0); 1164 *ap->a_vpp = hammer2_igetv(ip, &error); 1165 hammer2_inode_unlock(ip); 1166 1167 LOCKSTOP; 1168 return error; 1169 } 1170 1171 static 1172 int 1173 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap) 1174 { 1175 hammer2_inode_t *dip; 1176 hammer2_inode_t *nip; 1177 struct namecache *ncp; 1178 const uint8_t *name; 1179 size_t name_len; 1180 int error; 1181 1182 LOCKSTART; 1183 dip = VTOI(ap->a_dvp); 1184 if (dip->pmp->ronly) { 1185 LOCKSTOP; 1186 return (EROFS); 1187 } 1188 1189 ncp = ap->a_nch->ncp; 1190 name = ncp->nc_name; 1191 name_len = ncp->nc_nlen; 1192 1193 hammer2_pfs_memory_wait(dip->pmp); 1194 hammer2_trans_init(dip->pmp, 0); 1195 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred, 1196 name, name_len, 0, 1197 hammer2_trans_newinum(dip->pmp), 0, 0, 1198 0, &error); 1199 if (error) { 1200 KKASSERT(nip == NULL); 1201 *ap->a_vpp = NULL; 1202 } else { 1203 *ap->a_vpp = hammer2_igetv(nip, &error); 1204 hammer2_inode_unlock(nip); 1205 } 1206 hammer2_trans_done(dip->pmp); 1207 1208 if (error == 0) { 1209 cache_setunresolved(ap->a_nch); 1210 cache_setvp(ap->a_nch, *ap->a_vpp); 1211 } 1212 LOCKSTOP; 1213 return error; 1214 } 1215 1216 static 1217 int 1218 hammer2_vop_open(struct vop_open_args *ap) 1219 { 1220 return vop_stdopen(ap); 1221 } 1222 1223 /* 1224 * hammer2_vop_advlock { vp, id, op, fl, flags } 1225 */ 1226 static 1227 int 1228 hammer2_vop_advlock(struct vop_advlock_args *ap) 1229 { 1230 hammer2_inode_t *ip = VTOI(ap->a_vp); 1231 hammer2_off_t size; 1232 1233 size = ip->meta.size; 1234 return (lf_advlock(ap, &ip->advlock, size)); 1235 } 1236 1237 static 1238 int 1239 hammer2_vop_close(struct vop_close_args *ap) 1240 { 1241 return vop_stdclose(ap); 1242 } 1243 1244 /* 1245 * hammer2_vop_nlink { nch, dvp, vp, cred } 1246 * 1247 * Create a hardlink from (vp) to {dvp, nch}. 1248 */ 1249 static 1250 int 1251 hammer2_vop_nlink(struct vop_nlink_args *ap) 1252 { 1253 hammer2_xop_nlink_t *xop1; 1254 hammer2_inode_t *fdip; /* target directory to create link in */ 1255 hammer2_inode_t *tdip; /* target directory to create link in */ 1256 hammer2_inode_t *cdip; /* common parent directory */ 1257 hammer2_inode_t *ip; /* inode we are hardlinking to */ 1258 struct namecache *ncp; 1259 const uint8_t *name; 1260 size_t name_len; 1261 int error; 1262 1263 LOCKSTART; 1264 tdip = VTOI(ap->a_dvp); 1265 if (tdip->pmp->ronly) { 1266 LOCKSTOP; 1267 return (EROFS); 1268 } 1269 1270 ncp = ap->a_nch->ncp; 1271 name = ncp->nc_name; 1272 name_len = ncp->nc_nlen; 1273 1274 /* 1275 * ip represents the file being hardlinked. The file could be a 1276 * normal file or a hardlink target if it has already been hardlinked. 1277 * If ip is a hardlinked target then ip->pip represents the location 1278 * of the hardlinked target, NOT the location of the hardlink pointer. 1279 * 1280 * Bump nlinks and potentially also create or move the hardlink 1281 * target in the parent directory common to (ip) and (tdip). The 1282 * consolidation code can modify ip->cluster and ip->pip. The 1283 * returned cluster is locked. 1284 */ 1285 ip = VTOI(ap->a_vp); 1286 hammer2_pfs_memory_wait(ip->pmp); 1287 hammer2_trans_init(ip->pmp, 0); 1288 1289 /* 1290 * The common parent directory must be locked first to avoid deadlocks. 1291 * Also note that fdip and/or tdip might match cdip. 1292 */ 1293 fdip = ip->pip; 1294 cdip = hammer2_inode_common_parent(fdip, tdip); 1295 hammer2_inode_lock(cdip, 0); 1296 hammer2_inode_lock(fdip, 0); 1297 hammer2_inode_lock(tdip, 0); 1298 hammer2_inode_lock(ip, 0); 1299 error = 0; 1300 1301 /* 1302 * If ip is not a hardlink target we must convert it to a hardlink. 1303 * If fdip != cdip we must shift the inode to cdip. 1304 */ 1305 if (fdip != cdip || (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 1306 xop1 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 1307 hammer2_xop_setip2(&xop1->head, ip); 1308 hammer2_xop_setip3(&xop1->head, cdip); 1309 1310 hammer2_xop_start(&xop1->head, hammer2_xop_nlink); 1311 error = hammer2_xop_collect(&xop1->head, 0); 1312 hammer2_xop_retire(&xop1->head, HAMMER2_XOPMASK_VOP); 1313 if (error == ENOENT) 1314 error = 0; 1315 } 1316 1317 /* 1318 * Must synchronize original inode whos chains are now a hardlink 1319 * target. We must match what the backend XOP did to the 1320 * chains. 1321 */ 1322 if (error == 0 && (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 1323 hammer2_inode_modify(ip); 1324 ip->meta.name_key = ip->meta.inum; 1325 ip->meta.name_len = 18; /* "0x%016jx" */ 1326 } 1327 1328 /* 1329 * Create the hardlink target and bump nlinks. 1330 */ 1331 if (error == 0) { 1332 hammer2_inode_create(tdip, NULL, NULL, 1333 name, name_len, 0, 1334 ip->meta.inum, 1335 HAMMER2_OBJTYPE_HARDLINK, ip->meta.type, 1336 0, &error); 1337 hammer2_inode_modify(ip); 1338 ++ip->meta.nlinks; 1339 } 1340 if (error == 0) { 1341 cache_setunresolved(ap->a_nch); 1342 cache_setvp(ap->a_nch, ap->a_vp); 1343 } 1344 hammer2_inode_unlock(ip); 1345 hammer2_inode_unlock(tdip); 1346 hammer2_inode_unlock(fdip); 1347 hammer2_inode_unlock(cdip); 1348 hammer2_inode_drop(cdip); 1349 hammer2_trans_done(ip->pmp); 1350 1351 LOCKSTOP; 1352 return error; 1353 } 1354 1355 /* 1356 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap } 1357 * 1358 * The operating system has already ensured that the directory entry 1359 * does not exist and done all appropriate namespace locking. 1360 */ 1361 static 1362 int 1363 hammer2_vop_ncreate(struct vop_ncreate_args *ap) 1364 { 1365 hammer2_inode_t *dip; 1366 hammer2_inode_t *nip; 1367 struct namecache *ncp; 1368 const uint8_t *name; 1369 size_t name_len; 1370 int error; 1371 1372 LOCKSTART; 1373 dip = VTOI(ap->a_dvp); 1374 if (dip->pmp->ronly) { 1375 LOCKSTOP; 1376 return (EROFS); 1377 } 1378 1379 ncp = ap->a_nch->ncp; 1380 name = ncp->nc_name; 1381 name_len = ncp->nc_nlen; 1382 hammer2_pfs_memory_wait(dip->pmp); 1383 hammer2_trans_init(dip->pmp, 0); 1384 1385 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred, 1386 name, name_len, 0, 1387 hammer2_trans_newinum(dip->pmp), 0, 0, 1388 0, &error); 1389 if (error) { 1390 KKASSERT(nip == NULL); 1391 *ap->a_vpp = NULL; 1392 } else { 1393 *ap->a_vpp = hammer2_igetv(nip, &error); 1394 hammer2_inode_unlock(nip); 1395 } 1396 hammer2_trans_done(dip->pmp); 1397 1398 if (error == 0) { 1399 cache_setunresolved(ap->a_nch); 1400 cache_setvp(ap->a_nch, *ap->a_vpp); 1401 } 1402 LOCKSTOP; 1403 return error; 1404 } 1405 1406 /* 1407 * Make a device node (typically a fifo) 1408 */ 1409 static 1410 int 1411 hammer2_vop_nmknod(struct vop_nmknod_args *ap) 1412 { 1413 hammer2_inode_t *dip; 1414 hammer2_inode_t *nip; 1415 struct namecache *ncp; 1416 const uint8_t *name; 1417 size_t name_len; 1418 int error; 1419 1420 LOCKSTART; 1421 dip = VTOI(ap->a_dvp); 1422 if (dip->pmp->ronly) { 1423 LOCKSTOP; 1424 return (EROFS); 1425 } 1426 1427 ncp = ap->a_nch->ncp; 1428 name = ncp->nc_name; 1429 name_len = ncp->nc_nlen; 1430 hammer2_pfs_memory_wait(dip->pmp); 1431 hammer2_trans_init(dip->pmp, 0); 1432 1433 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred, 1434 name, name_len, 0, 1435 hammer2_trans_newinum(dip->pmp), 0, 0, 1436 0, &error); 1437 if (error) { 1438 KKASSERT(nip == NULL); 1439 *ap->a_vpp = NULL; 1440 } else { 1441 *ap->a_vpp = hammer2_igetv(nip, &error); 1442 hammer2_inode_unlock(nip); 1443 } 1444 hammer2_trans_done(dip->pmp); 1445 1446 if (error == 0) { 1447 cache_setunresolved(ap->a_nch); 1448 cache_setvp(ap->a_nch, *ap->a_vpp); 1449 } 1450 LOCKSTOP; 1451 return error; 1452 } 1453 1454 /* 1455 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target } 1456 */ 1457 static 1458 int 1459 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap) 1460 { 1461 hammer2_inode_t *dip; 1462 hammer2_inode_t *nip; 1463 struct namecache *ncp; 1464 const uint8_t *name; 1465 size_t name_len; 1466 int error; 1467 1468 dip = VTOI(ap->a_dvp); 1469 if (dip->pmp->ronly) 1470 return (EROFS); 1471 1472 ncp = ap->a_nch->ncp; 1473 name = ncp->nc_name; 1474 name_len = ncp->nc_nlen; 1475 hammer2_pfs_memory_wait(dip->pmp); 1476 hammer2_trans_init(dip->pmp, 0); 1477 1478 ap->a_vap->va_type = VLNK; /* enforce type */ 1479 1480 nip = hammer2_inode_create(dip, ap->a_vap, ap->a_cred, 1481 name, name_len, 0, 1482 hammer2_trans_newinum(dip->pmp), 0, 0, 1483 0, &error); 1484 if (error) { 1485 KKASSERT(nip == NULL); 1486 *ap->a_vpp = NULL; 1487 hammer2_trans_done(dip->pmp); 1488 return error; 1489 } 1490 *ap->a_vpp = hammer2_igetv(nip, &error); 1491 1492 /* 1493 * Build the softlink (~like file data) and finalize the namecache. 1494 */ 1495 if (error == 0) { 1496 size_t bytes; 1497 struct uio auio; 1498 struct iovec aiov; 1499 1500 bytes = strlen(ap->a_target); 1501 1502 hammer2_inode_unlock(nip); 1503 bzero(&auio, sizeof(auio)); 1504 bzero(&aiov, sizeof(aiov)); 1505 auio.uio_iov = &aiov; 1506 auio.uio_segflg = UIO_SYSSPACE; 1507 auio.uio_rw = UIO_WRITE; 1508 auio.uio_resid = bytes; 1509 auio.uio_iovcnt = 1; 1510 auio.uio_td = curthread; 1511 aiov.iov_base = ap->a_target; 1512 aiov.iov_len = bytes; 1513 error = hammer2_write_file(nip, &auio, IO_APPEND, 0); 1514 /* XXX handle error */ 1515 error = 0; 1516 } else { 1517 hammer2_inode_unlock(nip); 1518 } 1519 hammer2_trans_done(dip->pmp); 1520 1521 /* 1522 * Finalize namecache 1523 */ 1524 if (error == 0) { 1525 cache_setunresolved(ap->a_nch); 1526 cache_setvp(ap->a_nch, *ap->a_vpp); 1527 /* hammer2_knote(ap->a_dvp, NOTE_WRITE); */ 1528 } 1529 return error; 1530 } 1531 1532 /* 1533 * hammer2_vop_nremove { nch, dvp, cred } 1534 */ 1535 static 1536 int 1537 hammer2_vop_nremove(struct vop_nremove_args *ap) 1538 { 1539 hammer2_xop_unlink_t *xop; 1540 hammer2_inode_t *dip; 1541 hammer2_inode_t *ip; 1542 struct namecache *ncp; 1543 int error; 1544 int isopen; 1545 1546 LOCKSTART; 1547 dip = VTOI(ap->a_dvp); 1548 if (dip->pmp->ronly) { 1549 LOCKSTOP; 1550 return(EROFS); 1551 } 1552 1553 ncp = ap->a_nch->ncp; 1554 1555 hammer2_pfs_memory_wait(dip->pmp); 1556 hammer2_trans_init(dip->pmp, 0); 1557 hammer2_inode_lock(dip, 0); 1558 1559 /* 1560 * The unlink XOP unlinks the path from the directory and 1561 * locates and returns the cluster associated with the real inode. 1562 * We have to handle nlinks here on the frontend. 1563 */ 1564 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1565 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1566 isopen = cache_isopen(ap->a_nch); 1567 xop->isdir = 0; 1568 xop->dopermanent = isopen ? 0 : HAMMER2_DELETE_PERMANENT; 1569 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 1570 1571 /* 1572 * Collect the real inode and adjust nlinks, destroy the real 1573 * inode if nlinks transitions to 0 and it was the real inode 1574 * (else it has already been removed). 1575 */ 1576 error = hammer2_xop_collect(&xop->head, 0); 1577 hammer2_inode_unlock(dip); 1578 1579 if (error == 0) { 1580 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 1581 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1582 if (ip) { 1583 hammer2_inode_unlink_finisher(ip, isopen); 1584 hammer2_inode_unlock(ip); 1585 } 1586 } else { 1587 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1588 } 1589 1590 hammer2_inode_run_unlinkq(dip->pmp); 1591 hammer2_trans_done(dip->pmp); 1592 if (error == 0) 1593 cache_unlink(ap->a_nch); 1594 LOCKSTOP; 1595 return (error); 1596 } 1597 1598 /* 1599 * hammer2_vop_nrmdir { nch, dvp, cred } 1600 */ 1601 static 1602 int 1603 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap) 1604 { 1605 hammer2_xop_unlink_t *xop; 1606 hammer2_inode_t *dip; 1607 hammer2_inode_t *ip; 1608 struct namecache *ncp; 1609 int isopen; 1610 int error; 1611 1612 LOCKSTART; 1613 dip = VTOI(ap->a_dvp); 1614 if (dip->pmp->ronly) { 1615 LOCKSTOP; 1616 return(EROFS); 1617 } 1618 1619 hammer2_pfs_memory_wait(dip->pmp); 1620 hammer2_trans_init(dip->pmp, 0); 1621 hammer2_inode_lock(dip, 0); 1622 1623 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1624 1625 ncp = ap->a_nch->ncp; 1626 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1627 isopen = cache_isopen(ap->a_nch); 1628 xop->isdir = 1; 1629 xop->dopermanent = isopen ? 0 : HAMMER2_DELETE_PERMANENT; 1630 hammer2_xop_start(&xop->head, hammer2_xop_unlink); 1631 1632 /* 1633 * Collect the real inode and adjust nlinks, destroy the real 1634 * inode if nlinks transitions to 0 and it was the real inode 1635 * (else it has already been removed). 1636 */ 1637 error = hammer2_xop_collect(&xop->head, 0); 1638 hammer2_inode_unlock(dip); 1639 1640 if (error == 0) { 1641 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1); 1642 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1643 if (ip) { 1644 hammer2_inode_unlink_finisher(ip, isopen); 1645 hammer2_inode_unlock(ip); 1646 } 1647 } else { 1648 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1649 } 1650 hammer2_inode_run_unlinkq(dip->pmp); 1651 hammer2_trans_done(dip->pmp); 1652 if (error == 0) 1653 cache_unlink(ap->a_nch); 1654 LOCKSTOP; 1655 return (error); 1656 } 1657 1658 /* 1659 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred } 1660 */ 1661 static 1662 int 1663 hammer2_vop_nrename(struct vop_nrename_args *ap) 1664 { 1665 struct namecache *fncp; 1666 struct namecache *tncp; 1667 hammer2_inode_t *cdip; 1668 hammer2_inode_t *fdip; 1669 hammer2_inode_t *tdip; 1670 hammer2_inode_t *ip; 1671 const uint8_t *fname; 1672 size_t fname_len; 1673 const uint8_t *tname; 1674 size_t tname_len; 1675 int error; 1676 int tnch_error; 1677 hammer2_key_t tlhc; 1678 1679 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount) 1680 return(EXDEV); 1681 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount) 1682 return(EXDEV); 1683 1684 fdip = VTOI(ap->a_fdvp); /* source directory */ 1685 tdip = VTOI(ap->a_tdvp); /* target directory */ 1686 1687 if (fdip->pmp->ronly) 1688 return(EROFS); 1689 1690 LOCKSTART; 1691 fncp = ap->a_fnch->ncp; /* entry name in source */ 1692 fname = fncp->nc_name; 1693 fname_len = fncp->nc_nlen; 1694 1695 tncp = ap->a_tnch->ncp; /* entry name in target */ 1696 tname = tncp->nc_name; 1697 tname_len = tncp->nc_nlen; 1698 1699 hammer2_pfs_memory_wait(tdip->pmp); 1700 hammer2_trans_init(tdip->pmp, 0); 1701 1702 /* 1703 * ip is the inode being renamed. If this is a hardlink then 1704 * ip represents the actual file and not the hardlink marker. 1705 */ 1706 ip = VTOI(fncp->nc_vp); 1707 1708 /* 1709 * The common parent directory must be locked first to avoid deadlocks. 1710 * Also note that fdip and/or tdip might match cdip. 1711 */ 1712 cdip = hammer2_inode_common_parent(ip->pip, tdip); 1713 hammer2_inode_lock(cdip, 0); 1714 hammer2_inode_lock(fdip, 0); 1715 hammer2_inode_lock(tdip, 0); 1716 hammer2_inode_ref(ip); /* extra ref */ 1717 error = 0; 1718 1719 /* 1720 * If ip is a hardlink target and fdip != cdip we must shift the 1721 * inode to cdip. 1722 */ 1723 if (fdip != cdip && 1724 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0) { 1725 hammer2_xop_nlink_t *xop1; 1726 1727 xop1 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 1728 hammer2_xop_setip2(&xop1->head, ip); 1729 hammer2_xop_setip3(&xop1->head, cdip); 1730 1731 hammer2_xop_start(&xop1->head, hammer2_xop_nlink); 1732 error = hammer2_xop_collect(&xop1->head, 0); 1733 hammer2_xop_retire(&xop1->head, HAMMER2_XOPMASK_VOP); 1734 } 1735 1736 /* 1737 * Delete the target namespace. 1738 */ 1739 { 1740 hammer2_xop_unlink_t *xop2; 1741 hammer2_inode_t *tip; 1742 int isopen; 1743 1744 /* 1745 * The unlink XOP unlinks the path from the directory and 1746 * locates and returns the cluster associated with the real 1747 * inode. We have to handle nlinks here on the frontend. 1748 */ 1749 xop2 = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 1750 hammer2_xop_setname(&xop2->head, tname, tname_len); 1751 isopen = cache_isopen(ap->a_tnch); 1752 xop2->isdir = -1; 1753 xop2->dopermanent = isopen ? 0 : HAMMER2_DELETE_PERMANENT; 1754 hammer2_xop_start(&xop2->head, hammer2_xop_unlink); 1755 1756 /* 1757 * Collect the real inode and adjust nlinks, destroy the real 1758 * inode if nlinks transitions to 0 and it was the real inode 1759 * (else it has already been removed). 1760 */ 1761 tnch_error = hammer2_xop_collect(&xop2->head, 0); 1762 /* hammer2_inode_unlock(tdip); */ 1763 1764 if (tnch_error == 0) { 1765 tip = hammer2_inode_get(tdip->pmp, NULL, 1766 &xop2->head.cluster, -1); 1767 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP); 1768 if (tip) { 1769 hammer2_inode_unlink_finisher(tip, isopen); 1770 hammer2_inode_unlock(tip); 1771 } 1772 } else { 1773 hammer2_xop_retire(&xop2->head, HAMMER2_XOPMASK_VOP); 1774 } 1775 /* hammer2_inode_lock(tdip, 0); */ 1776 1777 if (tnch_error && tnch_error != ENOENT) { 1778 error = tnch_error; 1779 goto done2; 1780 } 1781 } 1782 1783 /* 1784 * Resolve the collision space for (tdip, tname, tname_len) 1785 * 1786 * tdip must be held exclusively locked to prevent races. 1787 */ 1788 { 1789 hammer2_xop_scanlhc_t *sxop; 1790 hammer2_tid_t lhcbase; 1791 1792 tlhc = hammer2_dirhash(tname, tname_len); 1793 lhcbase = tlhc; 1794 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 1795 sxop->lhc = tlhc; 1796 hammer2_xop_start(&sxop->head, hammer2_xop_scanlhc); 1797 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 1798 if (tlhc != sxop->head.cluster.focus->bref.key) 1799 break; 1800 ++tlhc; 1801 } 1802 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 1803 1804 if (error) { 1805 if (error != ENOENT) 1806 goto done2; 1807 ++tlhc; 1808 error = 0; 1809 } 1810 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) { 1811 error = ENOSPC; 1812 goto done2; 1813 } 1814 } 1815 1816 /* 1817 * Everything is setup, do the rename. 1818 * 1819 * We have to synchronize ip->meta to the underlying operation. 1820 * 1821 * NOTE: To avoid deadlocks we cannot lock (ip) while we are 1822 * unlinking elements from their directories. Locking 1823 * the nlinks field does not lock the whole inode. 1824 */ 1825 hammer2_inode_lock(ip, 0); 1826 if (error == 0) { 1827 hammer2_xop_nrename_t *xop4; 1828 1829 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 1830 xop4->lhc = tlhc; 1831 xop4->ip_key = ip->meta.name_key; 1832 hammer2_xop_setip2(&xop4->head, ip); 1833 hammer2_xop_setip3(&xop4->head, tdip); 1834 hammer2_xop_setname(&xop4->head, fname, fname_len); 1835 hammer2_xop_setname2(&xop4->head, tname, tname_len); 1836 hammer2_xop_start(&xop4->head, hammer2_xop_nrename); 1837 1838 error = hammer2_xop_collect(&xop4->head, 0); 1839 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP); 1840 1841 if (error == ENOENT) 1842 error = 0; 1843 if (error == 0 && 1844 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 1845 hammer2_inode_modify(ip); 1846 ip->meta.name_len = tname_len; 1847 ip->meta.name_key = tlhc; 1848 1849 } 1850 } 1851 1852 /* 1853 * Fixup ip->pip if we were renaming the actual file and not a 1854 * hardlink pointer. 1855 */ 1856 if (error == 0 && (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 1857 hammer2_inode_t *opip; 1858 1859 if (ip->pip != tdip) { 1860 hammer2_inode_ref(tdip); 1861 opip = ip->pip; 1862 ip->pip = tdip; 1863 if (opip) 1864 hammer2_inode_drop(opip); 1865 } 1866 } 1867 hammer2_inode_unlock(ip); 1868 done2: 1869 hammer2_inode_unlock(tdip); 1870 hammer2_inode_unlock(fdip); 1871 hammer2_inode_unlock(cdip); 1872 hammer2_inode_drop(ip); 1873 hammer2_inode_drop(cdip); 1874 hammer2_inode_run_unlinkq(fdip->pmp); 1875 hammer2_trans_done(tdip->pmp); 1876 1877 /* 1878 * Issue the namecache update after unlocking all the internal 1879 * hammer structures, otherwise we might deadlock. 1880 */ 1881 if (tnch_error == 0) { 1882 cache_unlink(ap->a_tnch); 1883 cache_setunresolved(ap->a_tnch); 1884 } 1885 if (error == 0) 1886 cache_rename(ap->a_fnch, ap->a_tnch); 1887 1888 LOCKSTOP; 1889 return (error); 1890 } 1891 1892 /* 1893 * hammer2_vop_ioctl { vp, command, data, fflag, cred } 1894 */ 1895 static 1896 int 1897 hammer2_vop_ioctl(struct vop_ioctl_args *ap) 1898 { 1899 hammer2_inode_t *ip; 1900 int error; 1901 1902 LOCKSTART; 1903 ip = VTOI(ap->a_vp); 1904 1905 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data, 1906 ap->a_fflag, ap->a_cred); 1907 LOCKSTOP; 1908 return (error); 1909 } 1910 1911 static 1912 int 1913 hammer2_vop_mountctl(struct vop_mountctl_args *ap) 1914 { 1915 struct mount *mp; 1916 hammer2_pfs_t *pmp; 1917 int rc; 1918 1919 LOCKSTART; 1920 switch (ap->a_op) { 1921 case (MOUNTCTL_SET_EXPORT): 1922 mp = ap->a_head.a_ops->head.vv_mount; 1923 pmp = MPTOPMP(mp); 1924 1925 if (ap->a_ctllen != sizeof(struct export_args)) 1926 rc = (EINVAL); 1927 else 1928 rc = vfs_export(mp, &pmp->export, 1929 (const struct export_args *)ap->a_ctl); 1930 break; 1931 default: 1932 rc = vop_stdmountctl(ap); 1933 break; 1934 } 1935 LOCKSTOP; 1936 return (rc); 1937 } 1938 1939 /* 1940 * KQFILTER 1941 */ 1942 static void filt_hammer2detach(struct knote *kn); 1943 static int filt_hammer2read(struct knote *kn, long hint); 1944 static int filt_hammer2write(struct knote *kn, long hint); 1945 static int filt_hammer2vnode(struct knote *kn, long hint); 1946 1947 static struct filterops hammer2read_filtops = 1948 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1949 NULL, filt_hammer2detach, filt_hammer2read }; 1950 static struct filterops hammer2write_filtops = 1951 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1952 NULL, filt_hammer2detach, filt_hammer2write }; 1953 static struct filterops hammer2vnode_filtops = 1954 { FILTEROP_ISFD | FILTEROP_MPSAFE, 1955 NULL, filt_hammer2detach, filt_hammer2vnode }; 1956 1957 static 1958 int 1959 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap) 1960 { 1961 struct vnode *vp = ap->a_vp; 1962 struct knote *kn = ap->a_kn; 1963 1964 switch (kn->kn_filter) { 1965 case EVFILT_READ: 1966 kn->kn_fop = &hammer2read_filtops; 1967 break; 1968 case EVFILT_WRITE: 1969 kn->kn_fop = &hammer2write_filtops; 1970 break; 1971 case EVFILT_VNODE: 1972 kn->kn_fop = &hammer2vnode_filtops; 1973 break; 1974 default: 1975 return (EOPNOTSUPP); 1976 } 1977 1978 kn->kn_hook = (caddr_t)vp; 1979 1980 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 1981 1982 return(0); 1983 } 1984 1985 static void 1986 filt_hammer2detach(struct knote *kn) 1987 { 1988 struct vnode *vp = (void *)kn->kn_hook; 1989 1990 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 1991 } 1992 1993 static int 1994 filt_hammer2read(struct knote *kn, long hint) 1995 { 1996 struct vnode *vp = (void *)kn->kn_hook; 1997 hammer2_inode_t *ip = VTOI(vp); 1998 off_t off; 1999 2000 if (hint == NOTE_REVOKE) { 2001 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2002 return(1); 2003 } 2004 off = ip->meta.size - kn->kn_fp->f_offset; 2005 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 2006 if (kn->kn_sfflags & NOTE_OLDAPI) 2007 return(1); 2008 return (kn->kn_data != 0); 2009 } 2010 2011 2012 static int 2013 filt_hammer2write(struct knote *kn, long hint) 2014 { 2015 if (hint == NOTE_REVOKE) 2016 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2017 kn->kn_data = 0; 2018 return (1); 2019 } 2020 2021 static int 2022 filt_hammer2vnode(struct knote *kn, long hint) 2023 { 2024 if (kn->kn_sfflags & hint) 2025 kn->kn_fflags |= hint; 2026 if (hint == NOTE_REVOKE) { 2027 kn->kn_flags |= (EV_EOF | EV_NODATA); 2028 return (1); 2029 } 2030 return (kn->kn_fflags != 0); 2031 } 2032 2033 /* 2034 * FIFO VOPS 2035 */ 2036 static 2037 int 2038 hammer2_vop_markatime(struct vop_markatime_args *ap) 2039 { 2040 hammer2_inode_t *ip; 2041 struct vnode *vp; 2042 2043 vp = ap->a_vp; 2044 ip = VTOI(vp); 2045 2046 if (ip->pmp->ronly) 2047 return(EROFS); 2048 return(0); 2049 } 2050 2051 static 2052 int 2053 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap) 2054 { 2055 int error; 2056 2057 error = VOCALL(&fifo_vnode_vops, &ap->a_head); 2058 if (error) 2059 error = hammer2_vop_kqfilter(ap); 2060 return(error); 2061 } 2062 2063 /* 2064 * VOPS vector 2065 */ 2066 struct vop_ops hammer2_vnode_vops = { 2067 .vop_default = vop_defaultop, 2068 .vop_fsync = hammer2_vop_fsync, 2069 .vop_getpages = vop_stdgetpages, 2070 .vop_putpages = vop_stdputpages, 2071 .vop_access = hammer2_vop_access, 2072 .vop_advlock = hammer2_vop_advlock, 2073 .vop_close = hammer2_vop_close, 2074 .vop_nlink = hammer2_vop_nlink, 2075 .vop_ncreate = hammer2_vop_ncreate, 2076 .vop_nsymlink = hammer2_vop_nsymlink, 2077 .vop_nremove = hammer2_vop_nremove, 2078 .vop_nrmdir = hammer2_vop_nrmdir, 2079 .vop_nrename = hammer2_vop_nrename, 2080 .vop_getattr = hammer2_vop_getattr, 2081 .vop_setattr = hammer2_vop_setattr, 2082 .vop_readdir = hammer2_vop_readdir, 2083 .vop_readlink = hammer2_vop_readlink, 2084 .vop_getpages = vop_stdgetpages, 2085 .vop_putpages = vop_stdputpages, 2086 .vop_read = hammer2_vop_read, 2087 .vop_write = hammer2_vop_write, 2088 .vop_open = hammer2_vop_open, 2089 .vop_inactive = hammer2_vop_inactive, 2090 .vop_reclaim = hammer2_vop_reclaim, 2091 .vop_nresolve = hammer2_vop_nresolve, 2092 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot, 2093 .vop_nmkdir = hammer2_vop_nmkdir, 2094 .vop_nmknod = hammer2_vop_nmknod, 2095 .vop_ioctl = hammer2_vop_ioctl, 2096 .vop_mountctl = hammer2_vop_mountctl, 2097 .vop_bmap = hammer2_vop_bmap, 2098 .vop_strategy = hammer2_vop_strategy, 2099 .vop_kqfilter = hammer2_vop_kqfilter 2100 }; 2101 2102 struct vop_ops hammer2_spec_vops = { 2103 .vop_default = vop_defaultop, 2104 .vop_fsync = hammer2_vop_fsync, 2105 .vop_read = vop_stdnoread, 2106 .vop_write = vop_stdnowrite, 2107 .vop_access = hammer2_vop_access, 2108 .vop_close = hammer2_vop_close, 2109 .vop_markatime = hammer2_vop_markatime, 2110 .vop_getattr = hammer2_vop_getattr, 2111 .vop_inactive = hammer2_vop_inactive, 2112 .vop_reclaim = hammer2_vop_reclaim, 2113 .vop_setattr = hammer2_vop_setattr 2114 }; 2115 2116 struct vop_ops hammer2_fifo_vops = { 2117 .vop_default = fifo_vnoperate, 2118 .vop_fsync = hammer2_vop_fsync, 2119 #if 0 2120 .vop_read = hammer2_vop_fiforead, 2121 .vop_write = hammer2_vop_fifowrite, 2122 #endif 2123 .vop_access = hammer2_vop_access, 2124 #if 0 2125 .vop_close = hammer2_vop_fifoclose, 2126 #endif 2127 .vop_markatime = hammer2_vop_markatime, 2128 .vop_getattr = hammer2_vop_getattr, 2129 .vop_inactive = hammer2_vop_inactive, 2130 .vop_reclaim = hammer2_vop_reclaim, 2131 .vop_setattr = hammer2_vop_setattr, 2132 .vop_kqfilter = hammer2_vop_fifokqfilter 2133 }; 2134 2135