1 /* 2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 /* 37 * Kernel Filesystem interface 38 * 39 * NOTE! local ipdata pointers must be reloaded on any modifying operation 40 * to the inode as its underlying chain may have changed. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/fcntl.h> 47 #include <sys/buf.h> 48 #include <sys/proc.h> 49 #include <sys/mount.h> 50 #include <sys/vnode.h> 51 #include <sys/mountctl.h> 52 #include <sys/dirent.h> 53 #include <sys/uio.h> 54 #include <sys/objcache.h> 55 #include <sys/event.h> 56 #include <sys/file.h> 57 #include <vfs/fifofs/fifo.h> 58 59 #include "hammer2.h" 60 61 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, 62 int seqcount); 63 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 64 int ioflag, int seqcount); 65 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize); 66 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize); 67 68 /* 69 * Last reference to a vnode is going away but it is still cached. 70 */ 71 static 72 int 73 hammer2_vop_inactive(struct vop_inactive_args *ap) 74 { 75 hammer2_inode_t *ip; 76 struct vnode *vp; 77 78 vp = ap->a_vp; 79 ip = VTOI(vp); 80 81 /* 82 * Degenerate case 83 */ 84 if (ip == NULL) { 85 vrecycle(vp); 86 return (0); 87 } 88 89 /* 90 * Aquire the inode lock to interlock against vp updates via 91 * the inode path and file deletions and such (which can be 92 * namespace-only operations that might not hold the vnode). 93 */ 94 hammer2_inode_lock(ip, 0); 95 if (ip->flags & HAMMER2_INODE_ISUNLINKED) { 96 hammer2_key_t lbase; 97 int nblksize; 98 99 /* 100 * If the inode has been unlinked we can throw away all 101 * buffers (dirty or not) and clean the file out. 102 * 103 * Because vrecycle() calls are not guaranteed, try to 104 * dispose of the inode as much as possible right here. 105 */ 106 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL); 107 nvtruncbuf(vp, 0, nblksize, 0, 0); 108 109 /* 110 * Delete the file on-media. 111 */ 112 if ((ip->flags & HAMMER2_INODE_DELETING) == 0) { 113 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING); 114 hammer2_inode_delayed_sideq(ip); 115 } 116 hammer2_inode_unlock(ip); 117 118 /* 119 * Recycle immediately if possible 120 */ 121 vrecycle(vp); 122 } else { 123 hammer2_inode_unlock(ip); 124 } 125 return (0); 126 } 127 128 /* 129 * Reclaim a vnode so that it can be reused; after the inode is 130 * disassociated, the filesystem must manage it alone. 131 */ 132 static 133 int 134 hammer2_vop_reclaim(struct vop_reclaim_args *ap) 135 { 136 hammer2_inode_t *ip; 137 struct vnode *vp; 138 139 vp = ap->a_vp; 140 ip = VTOI(vp); 141 if (ip == NULL) 142 return(0); 143 144 /* 145 * NOTE! We do not attempt to flush chains here, flushing is 146 * really fragile and could also deadlock. 147 */ 148 vclrisdirty(vp); 149 150 /* 151 * The inode lock is required to disconnect it. 152 */ 153 hammer2_inode_lock(ip, 0); 154 vp->v_data = NULL; 155 ip->vp = NULL; 156 157 /* 158 * Delete the file on-media. This should have been handled by the 159 * inactivation. The operation is likely still queued on the inode 160 * though so only complain if the stars don't align. 161 */ 162 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED | HAMMER2_INODE_DELETING)) == 163 HAMMER2_INODE_ISUNLINKED) 164 { 165 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING); 166 hammer2_inode_delayed_sideq(ip); 167 kprintf("hammer2: vp=%p ip=%p unlinked but not disposed\n", 168 vp, ip); 169 } 170 hammer2_inode_unlock(ip); 171 172 /* 173 * Modified inodes will already be on SIDEQ or SYNCQ, no further 174 * action is needed. 175 * 176 * We cannot safely synchronize the inode from inside the reclaim 177 * due to potentially deep locks held as-of when the reclaim occurs. 178 * Interactions and potential deadlocks abound. We also can't do it 179 * here without desynchronizing from the related directory entrie(s). 180 */ 181 hammer2_inode_drop(ip); /* vp ref */ 182 183 /* 184 * XXX handle background sync when ip dirty, kernel will no longer 185 * notify us regarding this inode because there is no longer a 186 * vnode attached to it. 187 */ 188 189 return (0); 190 } 191 192 /* 193 * Currently this function synchronizes the front-end inode state to the 194 * backend chain topology, then flushes the inode's chain and sub-topology 195 * to backend media. This function does not flush the root topology down to 196 * the inode. 197 */ 198 static 199 int 200 hammer2_vop_fsync(struct vop_fsync_args *ap) 201 { 202 hammer2_inode_t *ip; 203 struct vnode *vp; 204 int error1; 205 int error2; 206 207 vp = ap->a_vp; 208 ip = VTOI(vp); 209 error1 = 0; 210 211 hammer2_trans_init(ip->pmp, 0); 212 213 /* 214 * Flush dirty buffers in the file's logical buffer cache. 215 * It is best to wait for the strategy code to commit the 216 * buffers to the device's backing buffer cache before 217 * then trying to flush the inode. 218 * 219 * This should be quick, but certain inode modifications cached 220 * entirely in the hammer2_inode structure may not trigger a 221 * buffer read until the flush so the fsync can wind up also 222 * doing scattered reads. 223 */ 224 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 225 bio_track_wait(&vp->v_track_write, 0, 0); 226 227 /* 228 * Flush any inode changes 229 */ 230 hammer2_inode_lock(ip, 0); 231 if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MODIFIED)) 232 error1 = hammer2_inode_chain_sync(ip); 233 234 /* 235 * Flush dirty chains related to the inode. 236 * 237 * NOTE! We are not in a flush transaction. The inode remains on 238 * the sideq so the filesystem syncer can synchronize it to 239 * the volume root. 240 */ 241 error2 = hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP); 242 if (error2) 243 error1 = error2; 244 245 /* 246 * We may be able to clear the vnode dirty flag. 247 */ 248 if ((ip->flags & (HAMMER2_INODE_MODIFIED | 249 HAMMER2_INODE_RESIZED | 250 HAMMER2_INODE_DIRTYDATA)) == 0 && 251 RB_EMPTY(&vp->v_rbdirty_tree) && 252 !bio_track_active(&vp->v_track_write)) { 253 vclrisdirty(vp); 254 } 255 hammer2_inode_unlock(ip); 256 hammer2_trans_done(ip->pmp, 0); 257 258 return (error1); 259 } 260 261 /* 262 * No lock needed, just handle ip->update 263 */ 264 static 265 int 266 hammer2_vop_access(struct vop_access_args *ap) 267 { 268 hammer2_inode_t *ip = VTOI(ap->a_vp); 269 uid_t uid; 270 gid_t gid; 271 mode_t mode; 272 uint32_t uflags; 273 int error; 274 int update; 275 276 retry: 277 update = spin_access_start(&ip->cluster_spin); 278 279 /*hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);*/ 280 uid = hammer2_to_unix_xid(&ip->meta.uid); 281 gid = hammer2_to_unix_xid(&ip->meta.gid); 282 mode = ip->meta.mode; 283 uflags = ip->meta.uflags; 284 /*hammer2_inode_unlock(ip);*/ 285 286 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 287 goto retry; 288 289 error = vop_helper_access(ap, uid, gid, mode, uflags); 290 291 return (error); 292 } 293 294 static 295 int 296 hammer2_vop_getattr(struct vop_getattr_args *ap) 297 { 298 hammer2_pfs_t *pmp; 299 hammer2_inode_t *ip; 300 struct vnode *vp; 301 struct vattr *vap; 302 int update; 303 304 vp = ap->a_vp; 305 vap = ap->a_vap; 306 307 ip = VTOI(vp); 308 pmp = ip->pmp; 309 310 retry: 311 update = spin_access_start(&ip->cluster_spin); 312 313 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 314 vap->va_fileid = ip->meta.inum; 315 vap->va_mode = ip->meta.mode; 316 vap->va_nlink = ip->meta.nlinks; 317 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 318 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 319 vap->va_rmajor = 0; 320 vap->va_rminor = 0; 321 vap->va_size = ip->meta.size; /* protected by shared lock */ 322 vap->va_blocksize = HAMMER2_PBUFSIZE; 323 vap->va_flags = ip->meta.uflags; 324 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime); 325 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime); 326 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime); 327 vap->va_gen = 1; 328 vap->va_bytes = 0; 329 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) { 330 /* 331 * Can't really calculate directory use sans the files under 332 * it, just assume one block for now. 333 */ 334 vap->va_bytes += HAMMER2_INODE_BYTES; 335 } else { 336 vap->va_bytes = hammer2_inode_data_count(ip); 337 } 338 vap->va_type = hammer2_get_vtype(ip->meta.type); 339 vap->va_filerev = 0; 340 vap->va_uid_uuid = ip->meta.uid; 341 vap->va_gid_uuid = ip->meta.gid; 342 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 343 VA_FSID_UUID_VALID; 344 345 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 346 goto retry; 347 348 return (0); 349 } 350 351 static 352 int 353 hammer2_vop_getattr_lite(struct vop_getattr_lite_args *ap) 354 { 355 hammer2_pfs_t *pmp; 356 hammer2_inode_t *ip; 357 struct vnode *vp; 358 struct vattr_lite *lvap; 359 int update; 360 361 vp = ap->a_vp; 362 lvap = ap->a_lvap; 363 364 ip = VTOI(vp); 365 pmp = ip->pmp; 366 367 retry: 368 update = spin_access_start(&ip->cluster_spin); 369 370 #if 0 371 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 372 vap->va_fileid = ip->meta.inum; 373 #endif 374 lvap->va_mode = ip->meta.mode; 375 lvap->va_nlink = ip->meta.nlinks; 376 lvap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 377 lvap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 378 #if 0 379 vap->va_rmajor = 0; 380 vap->va_rminor = 0; 381 #endif 382 lvap->va_size = ip->meta.size; 383 #if 0 384 vap->va_blocksize = HAMMER2_PBUFSIZE; 385 #endif 386 lvap->va_flags = ip->meta.uflags; 387 lvap->va_type = hammer2_get_vtype(ip->meta.type); 388 #if 0 389 vap->va_filerev = 0; 390 vap->va_uid_uuid = ip->meta.uid; 391 vap->va_gid_uuid = ip->meta.gid; 392 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 393 VA_FSID_UUID_VALID; 394 #endif 395 396 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 397 goto retry; 398 399 return (0); 400 } 401 402 static 403 int 404 hammer2_vop_setattr(struct vop_setattr_args *ap) 405 { 406 hammer2_inode_t *ip; 407 struct vnode *vp; 408 struct vattr *vap; 409 int error; 410 int kflags = 0; 411 uint64_t ctime; 412 413 vp = ap->a_vp; 414 vap = ap->a_vap; 415 hammer2_update_time(&ctime); 416 417 ip = VTOI(vp); 418 419 if (ip->pmp->ronly) 420 return (EROFS); 421 422 /* 423 * Normally disallow setattr if there is no space, unless we 424 * are in emergency mode (might be needed to chflags -R noschg 425 * files prior to removal). 426 */ 427 if ((ip->pmp->flags & HAMMER2_PMPF_EMERG) == 0 && 428 hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1) { 429 return (ENOSPC); 430 } 431 432 hammer2_trans_init(ip->pmp, 0); 433 hammer2_inode_lock(ip, 0); 434 error = 0; 435 436 if (vap->va_flags != VNOVAL) { 437 uint32_t flags; 438 439 flags = ip->meta.uflags; 440 error = vop_helper_setattr_flags(&flags, vap->va_flags, 441 hammer2_to_unix_xid(&ip->meta.uid), 442 ap->a_cred); 443 if (error == 0) { 444 if (ip->meta.uflags != flags) { 445 hammer2_inode_modify(ip); 446 hammer2_spin_lock_update(&ip->cluster_spin); 447 ip->meta.uflags = flags; 448 ip->meta.ctime = ctime; 449 hammer2_spin_unlock_update(&ip->cluster_spin); 450 kflags |= NOTE_ATTRIB; 451 } 452 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 453 error = 0; 454 goto done; 455 } 456 } 457 goto done; 458 } 459 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 460 error = EPERM; 461 goto done; 462 } 463 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { 464 mode_t cur_mode = ip->meta.mode; 465 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 466 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 467 uuid_t uuid_uid; 468 uuid_t uuid_gid; 469 470 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 471 ap->a_cred, 472 &cur_uid, &cur_gid, &cur_mode); 473 if (error == 0) { 474 hammer2_guid_to_uuid(&uuid_uid, cur_uid); 475 hammer2_guid_to_uuid(&uuid_gid, cur_gid); 476 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) || 477 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) || 478 ip->meta.mode != cur_mode 479 ) { 480 hammer2_inode_modify(ip); 481 hammer2_spin_lock_update(&ip->cluster_spin); 482 ip->meta.uid = uuid_uid; 483 ip->meta.gid = uuid_gid; 484 ip->meta.mode = cur_mode; 485 ip->meta.ctime = ctime; 486 hammer2_spin_unlock_update(&ip->cluster_spin); 487 } 488 kflags |= NOTE_ATTRIB; 489 } 490 } 491 492 /* 493 * Resize the file 494 */ 495 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) { 496 switch(vp->v_type) { 497 case VREG: 498 if (vap->va_size == ip->meta.size) 499 break; 500 if (vap->va_size < ip->meta.size) { 501 hammer2_mtx_ex(&ip->truncate_lock); 502 hammer2_truncate_file(ip, vap->va_size); 503 hammer2_mtx_unlock(&ip->truncate_lock); 504 kflags |= NOTE_WRITE; 505 } else { 506 hammer2_extend_file(ip, vap->va_size); 507 kflags |= NOTE_WRITE | NOTE_EXTEND; 508 } 509 hammer2_inode_modify(ip); 510 ip->meta.mtime = ctime; 511 vclrflags(vp, VLASTWRITETS); 512 break; 513 default: 514 error = EINVAL; 515 goto done; 516 } 517 } 518 #if 0 519 /* atime not supported */ 520 if (vap->va_atime.tv_sec != VNOVAL) { 521 hammer2_inode_modify(ip); 522 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime); 523 kflags |= NOTE_ATTRIB; 524 } 525 #endif 526 if (vap->va_mode != (mode_t)VNOVAL) { 527 mode_t cur_mode = ip->meta.mode; 528 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 529 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 530 531 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 532 cur_uid, cur_gid, &cur_mode); 533 if (error == 0) { 534 hammer2_inode_modify(ip); 535 hammer2_spin_lock_update(&ip->cluster_spin); 536 ip->meta.mode = cur_mode; 537 ip->meta.ctime = ctime; 538 hammer2_spin_unlock_update(&ip->cluster_spin); 539 kflags |= NOTE_ATTRIB; 540 } 541 } 542 543 if (vap->va_mtime.tv_sec != VNOVAL) { 544 hammer2_inode_modify(ip); 545 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime); 546 kflags |= NOTE_ATTRIB; 547 vclrflags(vp, VLASTWRITETS); 548 } 549 550 done: 551 /* 552 * If a truncation occurred we must call chain_sync() now in order 553 * to trim the related data chains, otherwise a later expansion can 554 * cause havoc. 555 * 556 * If an extend occured that changed the DIRECTDATA state, we must 557 * call inode_chain_sync now in order to prepare the inode's indirect 558 * block table. 559 * 560 * WARNING! This means we are making an adjustment to the inode's 561 * chain outside of sync/fsync, and not just to inode->meta, which 562 * may result in some consistency issues if a crash were to occur 563 * at just the wrong time. 564 */ 565 if (ip->flags & HAMMER2_INODE_RESIZED) 566 hammer2_inode_chain_sync(ip); 567 568 /* 569 * Cleanup. 570 */ 571 hammer2_inode_unlock(ip); 572 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 573 hammer2_knote(ip->vp, kflags); 574 575 return (error); 576 } 577 578 static 579 int 580 hammer2_vop_readdir(struct vop_readdir_args *ap) 581 { 582 hammer2_xop_readdir_t *xop; 583 hammer2_blockref_t bref; 584 hammer2_inode_t *ip; 585 hammer2_tid_t inum; 586 hammer2_key_t lkey; 587 struct uio *uio; 588 off_t *cookies; 589 off_t saveoff; 590 int cookie_index; 591 int ncookies; 592 int error; 593 int eofflag; 594 int r; 595 596 ip = VTOI(ap->a_vp); 597 uio = ap->a_uio; 598 saveoff = uio->uio_offset; 599 eofflag = 0; 600 error = 0; 601 602 /* 603 * Setup cookies directory entry cookies if requested 604 */ 605 if (ap->a_ncookies) { 606 ncookies = uio->uio_resid / 16 + 1; 607 if (ncookies > 1024) 608 ncookies = 1024; 609 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK); 610 } else { 611 ncookies = -1; 612 cookies = NULL; 613 } 614 cookie_index = 0; 615 616 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 617 618 /* 619 * Handle artificial entries. To ensure that only positive 64 bit 620 * quantities are returned to userland we always strip off bit 63. 621 * The hash code is designed such that codes 0x0000-0x7FFF are not 622 * used, allowing us to use these codes for articial entries. 623 * 624 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not 625 * allow '..' to cross the mount point into (e.g.) the super-root. 626 */ 627 if (saveoff == 0) { 628 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 629 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, "."); 630 if (r) 631 goto done; 632 if (cookies) 633 cookies[cookie_index] = saveoff; 634 ++saveoff; 635 ++cookie_index; 636 if (cookie_index == ncookies) 637 goto done; 638 } 639 640 if (saveoff == 1) { 641 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 642 if (ip != ip->pmp->iroot) 643 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK; 644 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, ".."); 645 if (r) 646 goto done; 647 if (cookies) 648 cookies[cookie_index] = saveoff; 649 ++saveoff; 650 ++cookie_index; 651 if (cookie_index == ncookies) 652 goto done; 653 } 654 655 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE; 656 if (hammer2_debug & 0x0020) 657 kprintf("readdir: lkey %016jx\n", lkey); 658 if (error) 659 goto done; 660 661 xop = hammer2_xop_alloc(ip, 0); 662 xop->lkey = lkey; 663 hammer2_xop_start(&xop->head, &hammer2_readdir_desc); 664 665 for (;;) { 666 const hammer2_inode_data_t *ripdata; 667 const char *dname; 668 int dtype; 669 670 error = hammer2_xop_collect(&xop->head, 0); 671 error = hammer2_error_to_errno(error); 672 if (error) { 673 break; 674 } 675 if (cookie_index == ncookies) 676 break; 677 if (hammer2_debug & 0x0020) 678 kprintf("cluster chain %p %p\n", 679 xop->head.cluster.focus, 680 (xop->head.cluster.focus ? 681 xop->head.cluster.focus->data : (void *)-1)); 682 hammer2_cluster_bref(&xop->head.cluster, &bref); 683 684 if (bref.type == HAMMER2_BREF_TYPE_INODE) { 685 ripdata = &hammer2_xop_gdata(&xop->head)->ipdata; 686 dtype = hammer2_get_dtype(ripdata->meta.type); 687 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 688 r = vop_write_dirent(&error, uio, 689 ripdata->meta.inum & 690 HAMMER2_DIRHASH_USERMSK, 691 dtype, 692 ripdata->meta.name_len, 693 ripdata->filename); 694 hammer2_xop_pdata(&xop->head); 695 if (r) 696 break; 697 if (cookies) 698 cookies[cookie_index] = saveoff; 699 ++cookie_index; 700 } else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) { 701 uint16_t namlen; 702 703 dtype = hammer2_get_dtype(bref.embed.dirent.type); 704 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 705 namlen = bref.embed.dirent.namlen; 706 if (namlen <= sizeof(bref.check.buf)) { 707 dname = bref.check.buf; 708 } else { 709 dname = hammer2_xop_gdata(&xop->head)->buf; 710 } 711 r = vop_write_dirent(&error, uio, 712 bref.embed.dirent.inum, dtype, 713 namlen, dname); 714 if (namlen > sizeof(bref.check.buf)) 715 hammer2_xop_pdata(&xop->head); 716 if (r) 717 break; 718 if (cookies) 719 cookies[cookie_index] = saveoff; 720 ++cookie_index; 721 } else { 722 /* XXX chain error */ 723 kprintf("bad chain type readdir %d\n", bref.type); 724 } 725 } 726 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 727 if (error == ENOENT) { 728 error = 0; 729 eofflag = 1; 730 saveoff = (hammer2_key_t)-1; 731 } else { 732 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 733 } 734 done: 735 hammer2_inode_unlock(ip); 736 if (ap->a_eofflag) 737 *ap->a_eofflag = eofflag; 738 if (hammer2_debug & 0x0020) 739 kprintf("readdir: done at %016jx\n", saveoff); 740 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE; 741 if (error && cookie_index == 0) { 742 if (cookies) { 743 kfree(cookies, M_TEMP); 744 *ap->a_ncookies = 0; 745 *ap->a_cookies = NULL; 746 } 747 } else { 748 if (cookies) { 749 *ap->a_ncookies = cookie_index; 750 *ap->a_cookies = cookies; 751 } 752 } 753 return (error); 754 } 755 756 /* 757 * hammer2_vop_readlink { vp, uio, cred } 758 */ 759 static 760 int 761 hammer2_vop_readlink(struct vop_readlink_args *ap) 762 { 763 struct vnode *vp; 764 hammer2_inode_t *ip; 765 int error; 766 767 vp = ap->a_vp; 768 if (vp->v_type != VLNK) 769 return (EINVAL); 770 ip = VTOI(vp); 771 772 error = hammer2_read_file(ip, ap->a_uio, 0); 773 return (error); 774 } 775 776 static 777 int 778 hammer2_vop_read(struct vop_read_args *ap) 779 { 780 struct vnode *vp; 781 hammer2_inode_t *ip; 782 struct uio *uio; 783 int error; 784 int seqcount; 785 786 /* 787 * Read operations supported on this vnode? 788 */ 789 vp = ap->a_vp; 790 if (vp->v_type == VDIR) 791 return (EISDIR); 792 if (vp->v_type != VREG) 793 return (EINVAL); 794 795 /* 796 * Misc 797 */ 798 ip = VTOI(vp); 799 uio = ap->a_uio; 800 error = 0; 801 802 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 803 804 error = hammer2_read_file(ip, uio, seqcount); 805 return (error); 806 } 807 808 static 809 int 810 hammer2_vop_write(struct vop_write_args *ap) 811 { 812 hammer2_inode_t *ip; 813 thread_t td; 814 struct vnode *vp; 815 struct uio *uio; 816 int error; 817 int seqcount; 818 int ioflag; 819 820 /* 821 * Read operations supported on this vnode? 822 */ 823 vp = ap->a_vp; 824 if (vp->v_type != VREG) 825 return (EINVAL); 826 827 /* 828 * Misc 829 */ 830 ip = VTOI(vp); 831 ioflag = ap->a_ioflag; 832 uio = ap->a_uio; 833 error = 0; 834 if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG)) 835 return (EROFS); 836 switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) { 837 case 2: 838 return (ENOSPC); 839 case 1: 840 ioflag |= IO_DIRECT; /* semi-synchronous */ 841 /* fall through */ 842 default: 843 break; 844 } 845 846 seqcount = ioflag >> IO_SEQSHIFT; 847 848 /* 849 * Check resource limit 850 */ 851 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc && 852 uio->uio_offset + uio->uio_resid > 853 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 854 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ); 855 return (EFBIG); 856 } 857 858 /* 859 * The transaction interlocks against flush initiations 860 * (note: but will run concurrently with the actual flush). 861 * 862 * To avoid deadlocking against the VM system, we must flag any 863 * transaction related to the buffer cache or other direct 864 * VM page manipulation. 865 */ 866 if (uio->uio_segflg == UIO_NOCOPY) { 867 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE); 868 } else { 869 hammer2_trans_init(ip->pmp, 0); 870 } 871 error = hammer2_write_file(ip, uio, ioflag, seqcount); 872 if (uio->uio_segflg == UIO_NOCOPY) 873 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_BUFCACHE | 874 HAMMER2_TRANS_SIDEQ); 875 else 876 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 877 878 return (error); 879 } 880 881 /* 882 * Perform read operations on a file or symlink given an UNLOCKED 883 * inode and uio. 884 * 885 * The passed ip is not locked. 886 */ 887 static 888 int 889 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount) 890 { 891 hammer2_off_t size; 892 struct buf *bp; 893 int error; 894 895 error = 0; 896 897 /* 898 * UIO read loop. 899 * 900 * WARNING! Assumes that the kernel interlocks size changes at the 901 * vnode level. 902 */ 903 hammer2_mtx_sh(&ip->lock); 904 hammer2_mtx_sh(&ip->truncate_lock); 905 size = ip->meta.size; 906 hammer2_mtx_unlock(&ip->lock); 907 908 while (uio->uio_resid > 0 && uio->uio_offset < size) { 909 hammer2_key_t lbase; 910 hammer2_key_t leof; 911 int lblksize; 912 int loff; 913 int n; 914 915 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 916 &lbase, &leof); 917 918 #if 1 919 bp = NULL; 920 error = cluster_readx(ip->vp, leof, lbase, lblksize, 921 B_NOTMETA | B_KVABIO, 922 uio->uio_resid, 923 seqcount * MAXBSIZE, 924 &bp); 925 #else 926 if (uio->uio_segflg == UIO_NOCOPY) { 927 bp = getblk(ip->vp, lbase, lblksize, 928 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 929 if (bp->b_flags & B_CACHE) { 930 int i; 931 int j = 0; 932 if (bp->b_xio.xio_npages != 16) 933 kprintf("NPAGES BAD\n"); 934 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 935 vm_page_t m; 936 m = bp->b_xio.xio_pages[i]; 937 if (m == NULL || m->valid == 0) { 938 kprintf("bp %016jx %016jx pg %d inv", 939 lbase, leof, i); 940 if (m) 941 kprintf("m->object %p/%p", m->object, ip->vp->v_object); 942 kprintf("\n"); 943 j = 1; 944 } 945 } 946 if (j) 947 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error); 948 } 949 bqrelse(bp); 950 } 951 error = bread_kvabio(ip->vp, lbase, lblksize, &bp); 952 #endif 953 if (error) { 954 brelse(bp); 955 break; 956 } 957 bkvasync(bp); 958 loff = (int)(uio->uio_offset - lbase); 959 n = lblksize - loff; 960 if (n > uio->uio_resid) 961 n = uio->uio_resid; 962 if (n > size - uio->uio_offset) 963 n = (int)(size - uio->uio_offset); 964 bp->b_flags |= B_AGE; 965 uiomovebp(bp, bp->b_data + loff, n, uio); 966 bqrelse(bp); 967 } 968 hammer2_mtx_unlock(&ip->truncate_lock); 969 970 return (error); 971 } 972 973 /* 974 * Write to the file represented by the inode via the logical buffer cache. 975 * The inode may represent a regular file or a symlink. 976 * 977 * The inode must not be locked. 978 */ 979 static 980 int 981 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 982 int ioflag, int seqcount) 983 { 984 hammer2_key_t old_eof; 985 hammer2_key_t new_eof; 986 struct buf *bp; 987 int kflags; 988 int error; 989 int modified; 990 991 /* 992 * Setup if append 993 * 994 * WARNING! Assumes that the kernel interlocks size changes at the 995 * vnode level. 996 */ 997 hammer2_mtx_ex(&ip->lock); 998 hammer2_mtx_sh(&ip->truncate_lock); 999 if (ioflag & IO_APPEND) 1000 uio->uio_offset = ip->meta.size; 1001 old_eof = ip->meta.size; 1002 1003 /* 1004 * Extend the file if necessary. If the write fails at some point 1005 * we will truncate it back down to cover as much as we were able 1006 * to write. 1007 * 1008 * Doing this now makes it easier to calculate buffer sizes in 1009 * the loop. 1010 */ 1011 kflags = 0; 1012 error = 0; 1013 modified = 0; 1014 1015 if (uio->uio_offset + uio->uio_resid > old_eof) { 1016 new_eof = uio->uio_offset + uio->uio_resid; 1017 modified = 1; 1018 hammer2_extend_file(ip, new_eof); 1019 kflags |= NOTE_EXTEND; 1020 } else { 1021 new_eof = old_eof; 1022 } 1023 hammer2_mtx_unlock(&ip->lock); 1024 1025 /* 1026 * UIO write loop 1027 */ 1028 while (uio->uio_resid > 0) { 1029 hammer2_key_t lbase; 1030 int trivial; 1031 int endofblk; 1032 int lblksize; 1033 int loff; 1034 int n; 1035 1036 /* 1037 * Don't allow the buffer build to blow out the buffer 1038 * cache. 1039 */ 1040 if ((ioflag & IO_RECURSE) == 0) 1041 bwillwrite(HAMMER2_PBUFSIZE); 1042 1043 /* 1044 * This nominally tells us how much we can cluster and 1045 * what the logical buffer size needs to be. Currently 1046 * we don't try to cluster the write and just handle one 1047 * block at a time. 1048 */ 1049 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 1050 &lbase, NULL); 1051 loff = (int)(uio->uio_offset - lbase); 1052 1053 KKASSERT(lblksize <= MAXBSIZE); 1054 1055 /* 1056 * Calculate bytes to copy this transfer and whether the 1057 * copy completely covers the buffer or not. 1058 */ 1059 trivial = 0; 1060 n = lblksize - loff; 1061 if (n > uio->uio_resid) { 1062 n = uio->uio_resid; 1063 if (loff == lbase && uio->uio_offset + n == new_eof) 1064 trivial = 1; 1065 endofblk = 0; 1066 } else { 1067 if (loff == 0) 1068 trivial = 1; 1069 endofblk = 1; 1070 } 1071 if (lbase >= new_eof) 1072 trivial = 1; 1073 1074 /* 1075 * Get the buffer 1076 */ 1077 if (uio->uio_segflg == UIO_NOCOPY) { 1078 /* 1079 * Issuing a write with the same data backing the 1080 * buffer. Instantiate the buffer to collect the 1081 * backing vm pages, then read-in any missing bits. 1082 * 1083 * This case is used by vop_stdputpages(). 1084 */ 1085 bp = getblk(ip->vp, lbase, lblksize, 1086 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1087 if ((bp->b_flags & B_CACHE) == 0) { 1088 bqrelse(bp); 1089 error = bread_kvabio(ip->vp, lbase, 1090 lblksize, &bp); 1091 } 1092 } else if (trivial) { 1093 /* 1094 * Even though we are entirely overwriting the buffer 1095 * we may still have to zero it out to avoid a 1096 * mmap/write visibility issue. 1097 */ 1098 bp = getblk(ip->vp, lbase, lblksize, 1099 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1100 if ((bp->b_flags & B_CACHE) == 0) 1101 vfs_bio_clrbuf(bp); 1102 } else { 1103 /* 1104 * Partial overwrite, read in any missing bits then 1105 * replace the portion being written. 1106 * 1107 * (The strategy code will detect zero-fill physical 1108 * blocks for this case). 1109 */ 1110 error = bread_kvabio(ip->vp, lbase, lblksize, &bp); 1111 if (error == 0) 1112 bheavy(bp); 1113 } 1114 1115 if (error) { 1116 brelse(bp); 1117 break; 1118 } 1119 1120 /* 1121 * Ok, copy the data in 1122 */ 1123 bkvasync(bp); 1124 error = uiomovebp(bp, bp->b_data + loff, n, uio); 1125 kflags |= NOTE_WRITE; 1126 modified = 1; 1127 if (error) { 1128 brelse(bp); 1129 break; 1130 } 1131 1132 /* 1133 * WARNING: Pageout daemon will issue UIO_NOCOPY writes 1134 * with IO_SYNC or IO_ASYNC set. These writes 1135 * must be handled as the pageout daemon expects. 1136 * 1137 * NOTE! H2 relies on cluster_write() here because it 1138 * cannot preallocate disk blocks at the logical 1139 * level due to not knowing what the compression 1140 * size will be at this time. 1141 * 1142 * We must use cluster_write() here and we depend 1143 * on the write-behind feature to flush buffers 1144 * appropriately. If we let the buffer daemons do 1145 * it the block allocations will be all over the 1146 * map. 1147 */ 1148 if (ioflag & IO_SYNC) { 1149 bwrite(bp); 1150 } else if ((ioflag & IO_DIRECT) && endofblk) { 1151 bawrite(bp); 1152 } else if (ioflag & IO_ASYNC) { 1153 bawrite(bp); 1154 } else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) { 1155 bdwrite(bp); 1156 } else { 1157 #if 1 1158 bp->b_flags |= B_CLUSTEROK; 1159 cluster_write(bp, new_eof, lblksize, seqcount); 1160 #else 1161 bp->b_flags |= B_CLUSTEROK; 1162 bdwrite(bp); 1163 #endif 1164 } 1165 } 1166 1167 /* 1168 * Cleanup. If we extended the file EOF but failed to write through 1169 * the entire write is a failure and we have to back-up. 1170 */ 1171 if (error && new_eof != old_eof) { 1172 hammer2_mtx_unlock(&ip->truncate_lock); 1173 hammer2_mtx_ex(&ip->lock); /* note lock order */ 1174 hammer2_mtx_ex(&ip->truncate_lock); /* note lock order */ 1175 hammer2_truncate_file(ip, old_eof); 1176 if (ip->flags & HAMMER2_INODE_MODIFIED) 1177 hammer2_inode_chain_sync(ip); 1178 hammer2_mtx_unlock(&ip->lock); 1179 } else if (modified) { 1180 struct vnode *vp = ip->vp; 1181 1182 hammer2_mtx_ex(&ip->lock); 1183 hammer2_inode_modify(ip); 1184 if (uio->uio_segflg == UIO_NOCOPY) { 1185 if (vp->v_flag & VLASTWRITETS) { 1186 ip->meta.mtime = 1187 (unsigned long)vp->v_lastwrite_ts.tv_sec * 1188 1000000 + 1189 vp->v_lastwrite_ts.tv_nsec / 1000; 1190 } 1191 } else { 1192 hammer2_update_time(&ip->meta.mtime); 1193 vclrflags(vp, VLASTWRITETS); 1194 } 1195 1196 #if 0 1197 /* 1198 * REMOVED - handled by hammer2_extend_file(). Do not issue 1199 * a chain_sync() outside of a sync/fsync except for DIRECTDATA 1200 * state changes. 1201 * 1202 * Under normal conditions we only issue a chain_sync if 1203 * the inode's DIRECTDATA state changed. 1204 */ 1205 if (ip->flags & HAMMER2_INODE_RESIZED) 1206 hammer2_inode_chain_sync(ip); 1207 #endif 1208 hammer2_mtx_unlock(&ip->lock); 1209 hammer2_knote(ip->vp, kflags); 1210 } 1211 hammer2_trans_assert_strategy(ip->pmp); 1212 hammer2_mtx_unlock(&ip->truncate_lock); 1213 1214 return error; 1215 } 1216 1217 /* 1218 * Truncate the size of a file. The inode must be locked. 1219 * 1220 * We must unconditionally set HAMMER2_INODE_RESIZED to properly 1221 * ensure that any on-media data beyond the new file EOF has been destroyed. 1222 * 1223 * WARNING: nvtruncbuf() can only be safely called without the inode lock 1224 * held due to the way our write thread works. If the truncation 1225 * occurs in the middle of a buffer, nvtruncbuf() is responsible 1226 * for dirtying that buffer and zeroing out trailing bytes. 1227 * 1228 * WARNING! Assumes that the kernel interlocks size changes at the 1229 * vnode level. 1230 * 1231 * WARNING! Caller assumes responsibility for removing dead blocks 1232 * if INODE_RESIZED is set. 1233 */ 1234 static 1235 void 1236 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1237 { 1238 hammer2_key_t lbase; 1239 int nblksize; 1240 1241 hammer2_mtx_unlock(&ip->lock); 1242 if (ip->vp) { 1243 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1244 nvtruncbuf(ip->vp, nsize, 1245 nblksize, (int)nsize & (nblksize - 1), 1246 0); 1247 } 1248 hammer2_mtx_ex(&ip->lock); 1249 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1250 ip->osize = ip->meta.size; 1251 ip->meta.size = nsize; 1252 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1253 hammer2_inode_modify(ip); 1254 } 1255 1256 /* 1257 * Extend the size of a file. The inode must be locked. 1258 * 1259 * Even though the file size is changing, we do not have to set the 1260 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES 1261 * boundary. When this occurs a hammer2_inode_chain_sync() is required 1262 * to prepare the inode cluster's indirect block table, otherwise 1263 * async execution of the strategy code will implode on us. 1264 * 1265 * WARNING! Assumes that the kernel interlocks size changes at the 1266 * vnode level. 1267 * 1268 * WARNING! Caller assumes responsibility for transitioning out 1269 * of the inode DIRECTDATA mode if INODE_RESIZED is set. 1270 */ 1271 static 1272 void 1273 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1274 { 1275 hammer2_key_t lbase; 1276 hammer2_key_t osize; 1277 int oblksize; 1278 int nblksize; 1279 int error; 1280 1281 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1282 hammer2_inode_modify(ip); 1283 osize = ip->meta.size; 1284 ip->osize = osize; 1285 ip->meta.size = nsize; 1286 1287 /* 1288 * We must issue a chain_sync() when the DIRECTDATA state changes 1289 * to prevent confusion between the flush code and the in-memory 1290 * state. This is not perfect because we are doing it outside of 1291 * a sync/fsync operation, so it might not be fully synchronized 1292 * with the meta-data topology flush. 1293 * 1294 * We must retain and re-dirty the buffer cache buffer containing 1295 * the direct data so it can be written to a real block. It should 1296 * not be possible for a bread error to occur since the original data 1297 * is extracted from the inode structure directly. 1298 */ 1299 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) { 1300 if (osize) { 1301 struct buf *bp; 1302 1303 oblksize = hammer2_calc_logical(ip, 0, NULL, NULL); 1304 error = bread_kvabio(ip->vp, 0, oblksize, &bp); 1305 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1306 hammer2_inode_chain_sync(ip); 1307 if (error == 0) { 1308 bheavy(bp); 1309 bdwrite(bp); 1310 } else { 1311 brelse(bp); 1312 } 1313 } else { 1314 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1315 hammer2_inode_chain_sync(ip); 1316 } 1317 } 1318 hammer2_mtx_unlock(&ip->lock); 1319 if (ip->vp) { 1320 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL); 1321 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1322 nvextendbuf(ip->vp, 1323 osize, nsize, 1324 oblksize, nblksize, 1325 -1, -1, 0); 1326 } 1327 hammer2_mtx_ex(&ip->lock); 1328 } 1329 1330 static 1331 int 1332 hammer2_vop_nresolve(struct vop_nresolve_args *ap) 1333 { 1334 hammer2_xop_nresolve_t *xop; 1335 hammer2_inode_t *ip; 1336 hammer2_inode_t *dip; 1337 struct namecache *ncp; 1338 struct vnode *vp; 1339 int error; 1340 1341 dip = VTOI(ap->a_dvp); 1342 xop = hammer2_xop_alloc(dip, 0); 1343 1344 ncp = ap->a_nch->ncp; 1345 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1346 1347 /* 1348 * Note: In DragonFly the kernel handles '.' and '..'. 1349 */ 1350 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1351 hammer2_xop_start(&xop->head, &hammer2_nresolve_desc); 1352 1353 error = hammer2_xop_collect(&xop->head, 0); 1354 error = hammer2_error_to_errno(error); 1355 if (error) { 1356 ip = NULL; 1357 } else { 1358 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 1359 } 1360 hammer2_inode_unlock(dip); 1361 1362 /* 1363 * Acquire the related vnode 1364 * 1365 * NOTE: For error processing, only ENOENT resolves the namecache 1366 * entry to NULL, otherwise we just return the error and 1367 * leave the namecache unresolved. 1368 * 1369 * WARNING: inode structure is locked exclusively via inode_get 1370 * but chain was locked shared. inode_unlock() 1371 * will handle it properly. 1372 */ 1373 if (ip) { 1374 vp = hammer2_igetv(ip, &error); /* error set to UNIX error */ 1375 if (error == 0) { 1376 vn_unlock(vp); 1377 cache_setvp(ap->a_nch, vp); 1378 } else if (error == ENOENT) { 1379 cache_setvp(ap->a_nch, NULL); 1380 } 1381 hammer2_inode_unlock(ip); 1382 1383 /* 1384 * The vp should not be released until after we've disposed 1385 * of our locks, because it might cause vop_inactive() to 1386 * be called. 1387 */ 1388 if (vp) 1389 vrele(vp); 1390 } else { 1391 error = ENOENT; 1392 cache_setvp(ap->a_nch, NULL); 1393 } 1394 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1395 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL, 1396 ("resolve error %d/%p ap %p\n", 1397 error, ap->a_nch->ncp->nc_vp, ap)); 1398 1399 return error; 1400 } 1401 1402 static 1403 int 1404 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 1405 { 1406 hammer2_inode_t *dip; 1407 hammer2_tid_t inum; 1408 int error; 1409 1410 dip = VTOI(ap->a_dvp); 1411 inum = dip->meta.iparent; 1412 *ap->a_vpp = NULL; 1413 1414 if (inum) { 1415 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL, 1416 inum, ap->a_vpp); 1417 } else { 1418 error = ENOENT; 1419 } 1420 return error; 1421 } 1422 1423 static 1424 int 1425 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap) 1426 { 1427 hammer2_inode_t *dip; 1428 hammer2_inode_t *nip; 1429 struct namecache *ncp; 1430 const char *name; 1431 size_t name_len; 1432 hammer2_tid_t inum; 1433 int error; 1434 1435 dip = VTOI(ap->a_dvp); 1436 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1437 return (EROFS); 1438 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1439 return (ENOSPC); 1440 1441 ncp = ap->a_nch->ncp; 1442 name = ncp->nc_name; 1443 name_len = ncp->nc_nlen; 1444 1445 hammer2_trans_init(dip->pmp, 0); 1446 1447 inum = hammer2_trans_newinum(dip->pmp); 1448 1449 /* 1450 * Create the directory as an inode and then create the directory 1451 * entry. 1452 * 1453 * dip must be locked before nip to avoid deadlock. 1454 */ 1455 hammer2_inode_lock(dip, 0); 1456 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1457 inum, &error); 1458 if (error) { 1459 error = hammer2_error_to_errno(error); 1460 } else { 1461 error = hammer2_dirent_create(dip, name, name_len, 1462 nip->meta.inum, nip->meta.type); 1463 /* returns UNIX error code */ 1464 } 1465 if (error) { 1466 if (nip) { 1467 hammer2_inode_unlink_finisher(nip, NULL); 1468 hammer2_inode_unlock(nip); 1469 nip = NULL; 1470 } 1471 *ap->a_vpp = NULL; 1472 } else { 1473 /* 1474 * inode_depend() must occur before the igetv() because 1475 * the igetv() can temporarily release the inode lock. 1476 */ 1477 hammer2_inode_depend(dip, nip); /* before igetv */ 1478 *ap->a_vpp = hammer2_igetv(nip, &error); 1479 hammer2_inode_unlock(nip); 1480 } 1481 1482 /* 1483 * Update dip's mtime 1484 * 1485 * We can use a shared inode lock and allow the meta.mtime update 1486 * SMP race. hammer2_inode_modify() is MPSAFE w/a shared lock. 1487 */ 1488 if (error == 0) { 1489 uint64_t mtime; 1490 1491 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1492 hammer2_update_time(&mtime); 1493 hammer2_inode_modify(dip); 1494 dip->meta.mtime = mtime; 1495 /*hammer2_inode_unlock(dip);*/ 1496 } 1497 hammer2_inode_unlock(dip); 1498 1499 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1500 1501 if (error == 0) { 1502 cache_setunresolved(ap->a_nch); 1503 cache_setvp(ap->a_nch, *ap->a_vpp); 1504 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK); 1505 } 1506 return error; 1507 } 1508 1509 static 1510 int 1511 hammer2_vop_open(struct vop_open_args *ap) 1512 { 1513 return vop_stdopen(ap); 1514 } 1515 1516 /* 1517 * hammer2_vop_advlock { vp, id, op, fl, flags } 1518 */ 1519 static 1520 int 1521 hammer2_vop_advlock(struct vop_advlock_args *ap) 1522 { 1523 hammer2_inode_t *ip = VTOI(ap->a_vp); 1524 hammer2_off_t size; 1525 1526 size = ip->meta.size; 1527 return (lf_advlock(ap, &ip->advlock, size)); 1528 } 1529 1530 static 1531 int 1532 hammer2_vop_close(struct vop_close_args *ap) 1533 { 1534 return vop_stdclose(ap); 1535 } 1536 1537 /* 1538 * hammer2_vop_nlink { nch, dvp, vp, cred } 1539 * 1540 * Create a hardlink from (vp) to {dvp, nch}. 1541 */ 1542 static 1543 int 1544 hammer2_vop_nlink(struct vop_nlink_args *ap) 1545 { 1546 hammer2_inode_t *tdip; /* target directory to create link in */ 1547 hammer2_inode_t *ip; /* inode we are hardlinking to */ 1548 struct namecache *ncp; 1549 const char *name; 1550 size_t name_len; 1551 int error; 1552 uint64_t cmtime; 1553 1554 if (ap->a_dvp->v_mount != ap->a_vp->v_mount) 1555 return(EXDEV); 1556 1557 tdip = VTOI(ap->a_dvp); 1558 if (tdip->pmp->ronly || (tdip->pmp->flags & HAMMER2_PMPF_EMERG)) 1559 return (EROFS); 1560 if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1) 1561 return (ENOSPC); 1562 1563 ncp = ap->a_nch->ncp; 1564 name = ncp->nc_name; 1565 name_len = ncp->nc_nlen; 1566 1567 /* 1568 * ip represents the file being hardlinked. The file could be a 1569 * normal file or a hardlink target if it has already been hardlinked. 1570 * (with the new semantics, it will almost always be a hardlink 1571 * target). 1572 * 1573 * Bump nlinks and potentially also create or move the hardlink 1574 * target in the parent directory common to (ip) and (tdip). The 1575 * consolidation code can modify ip->cluster. The returned cluster 1576 * is locked. 1577 */ 1578 ip = VTOI(ap->a_vp); 1579 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp)); 1580 hammer2_trans_init(ip->pmp, 0); 1581 1582 /* 1583 * Target should be an indexed inode or there's no way we will ever 1584 * be able to find it! 1585 */ 1586 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0); 1587 1588 hammer2_inode_lock4(tdip, ip, NULL, NULL); 1589 1590 hammer2_update_time(&cmtime); 1591 1592 /* 1593 * Create the directory entry and bump nlinks. 1594 * Also update ip's ctime. 1595 */ 1596 error = hammer2_dirent_create(tdip, name, name_len, 1597 ip->meta.inum, ip->meta.type); 1598 hammer2_inode_modify(ip); 1599 ++ip->meta.nlinks; 1600 ip->meta.ctime = cmtime; 1601 if (error == 0) { 1602 /* 1603 * Update dip's [cm]time 1604 */ 1605 hammer2_inode_modify(tdip); 1606 tdip->meta.mtime = cmtime; 1607 tdip->meta.ctime = cmtime; 1608 1609 cache_setunresolved(ap->a_nch); 1610 cache_setvp(ap->a_nch, ap->a_vp); 1611 } 1612 hammer2_inode_unlock(ip); 1613 hammer2_inode_unlock(tdip); 1614 1615 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 1616 hammer2_knote(ap->a_vp, NOTE_LINK); 1617 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1618 1619 return error; 1620 } 1621 1622 /* 1623 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap } 1624 * 1625 * The operating system has already ensured that the directory entry 1626 * does not exist and done all appropriate namespace locking. 1627 */ 1628 static 1629 int 1630 hammer2_vop_ncreate(struct vop_ncreate_args *ap) 1631 { 1632 hammer2_inode_t *dip; 1633 hammer2_inode_t *nip; 1634 struct namecache *ncp; 1635 const char *name; 1636 size_t name_len; 1637 hammer2_tid_t inum; 1638 int error; 1639 1640 dip = VTOI(ap->a_dvp); 1641 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1642 return (EROFS); 1643 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1644 return (ENOSPC); 1645 1646 ncp = ap->a_nch->ncp; 1647 name = ncp->nc_name; 1648 name_len = ncp->nc_nlen; 1649 hammer2_trans_init(dip->pmp, 0); 1650 1651 inum = hammer2_trans_newinum(dip->pmp); 1652 1653 /* 1654 * Create the regular file as an inode and then create the directory 1655 * entry. 1656 * 1657 * dip must be locked before nip to avoid deadlock. 1658 */ 1659 hammer2_inode_lock(dip, 0); 1660 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1661 inum, &error); 1662 if (error) { 1663 error = hammer2_error_to_errno(error); 1664 } else { 1665 error = hammer2_dirent_create(dip, name, name_len, 1666 nip->meta.inum, nip->meta.type); 1667 } 1668 if (error) { 1669 if (nip) { 1670 hammer2_inode_unlink_finisher(nip, NULL); 1671 hammer2_inode_unlock(nip); 1672 nip = NULL; 1673 } 1674 *ap->a_vpp = NULL; 1675 } else { 1676 hammer2_inode_depend(dip, nip); /* before igetv */ 1677 *ap->a_vpp = hammer2_igetv(nip, &error); 1678 hammer2_inode_unlock(nip); 1679 } 1680 1681 /* 1682 * Update dip's mtime 1683 */ 1684 if (error == 0) { 1685 uint64_t mtime; 1686 1687 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1688 hammer2_update_time(&mtime); 1689 hammer2_inode_modify(dip); 1690 dip->meta.mtime = mtime; 1691 /*hammer2_inode_unlock(dip);*/ 1692 } 1693 hammer2_inode_unlock(dip); 1694 1695 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1696 1697 if (error == 0) { 1698 cache_setunresolved(ap->a_nch); 1699 cache_setvp(ap->a_nch, *ap->a_vpp); 1700 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1701 } 1702 return error; 1703 } 1704 1705 /* 1706 * Make a device node (typically a fifo) 1707 */ 1708 static 1709 int 1710 hammer2_vop_nmknod(struct vop_nmknod_args *ap) 1711 { 1712 hammer2_inode_t *dip; 1713 hammer2_inode_t *nip; 1714 struct namecache *ncp; 1715 const char *name; 1716 size_t name_len; 1717 hammer2_tid_t inum; 1718 int error; 1719 1720 dip = VTOI(ap->a_dvp); 1721 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1722 return (EROFS); 1723 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1724 return (ENOSPC); 1725 1726 ncp = ap->a_nch->ncp; 1727 name = ncp->nc_name; 1728 name_len = ncp->nc_nlen; 1729 hammer2_trans_init(dip->pmp, 0); 1730 1731 /* 1732 * Create the device inode and then create the directory entry. 1733 * 1734 * dip must be locked before nip to avoid deadlock. 1735 */ 1736 inum = hammer2_trans_newinum(dip->pmp); 1737 1738 hammer2_inode_lock(dip, 0); 1739 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1740 inum, &error); 1741 if (error) { 1742 error = hammer2_error_to_errno(error); 1743 } else { 1744 error = hammer2_dirent_create(dip, name, name_len, 1745 nip->meta.inum, nip->meta.type); 1746 } 1747 if (error) { 1748 if (nip) { 1749 hammer2_inode_unlink_finisher(nip, NULL); 1750 hammer2_inode_unlock(nip); 1751 nip = NULL; 1752 } 1753 *ap->a_vpp = NULL; 1754 } else { 1755 hammer2_inode_depend(dip, nip); /* before igetv */ 1756 *ap->a_vpp = hammer2_igetv(nip, &error); 1757 hammer2_inode_unlock(nip); 1758 } 1759 1760 /* 1761 * Update dip's mtime 1762 */ 1763 if (error == 0) { 1764 uint64_t mtime; 1765 1766 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1767 hammer2_update_time(&mtime); 1768 hammer2_inode_modify(dip); 1769 dip->meta.mtime = mtime; 1770 /*hammer2_inode_unlock(dip);*/ 1771 } 1772 hammer2_inode_unlock(dip); 1773 1774 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1775 1776 if (error == 0) { 1777 cache_setunresolved(ap->a_nch); 1778 cache_setvp(ap->a_nch, *ap->a_vpp); 1779 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1780 } 1781 return error; 1782 } 1783 1784 /* 1785 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target } 1786 */ 1787 static 1788 int 1789 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap) 1790 { 1791 hammer2_inode_t *dip; 1792 hammer2_inode_t *nip; 1793 struct namecache *ncp; 1794 const char *name; 1795 size_t name_len; 1796 hammer2_tid_t inum; 1797 int error; 1798 1799 dip = VTOI(ap->a_dvp); 1800 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1801 return (EROFS); 1802 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1803 return (ENOSPC); 1804 1805 ncp = ap->a_nch->ncp; 1806 name = ncp->nc_name; 1807 name_len = ncp->nc_nlen; 1808 hammer2_trans_init(dip->pmp, 0); 1809 1810 ap->a_vap->va_type = VLNK; /* enforce type */ 1811 1812 /* 1813 * Create the softlink as an inode and then create the directory 1814 * entry. 1815 * 1816 * dip must be locked before nip to avoid deadlock. 1817 */ 1818 inum = hammer2_trans_newinum(dip->pmp); 1819 1820 hammer2_inode_lock(dip, 0); 1821 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1822 inum, &error); 1823 if (error) { 1824 error = hammer2_error_to_errno(error); 1825 } else { 1826 error = hammer2_dirent_create(dip, name, name_len, 1827 nip->meta.inum, nip->meta.type); 1828 } 1829 if (error) { 1830 if (nip) { 1831 hammer2_inode_unlink_finisher(nip, NULL); 1832 hammer2_inode_unlock(nip); 1833 nip = NULL; 1834 } 1835 *ap->a_vpp = NULL; 1836 hammer2_inode_unlock(dip); 1837 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1838 return error; 1839 } 1840 hammer2_inode_depend(dip, nip); /* before igetv */ 1841 *ap->a_vpp = hammer2_igetv(nip, &error); 1842 1843 /* 1844 * Build the softlink (~like file data) and finalize the namecache. 1845 */ 1846 if (error == 0) { 1847 size_t bytes; 1848 struct uio auio; 1849 struct iovec aiov; 1850 1851 bytes = strlen(ap->a_target); 1852 1853 hammer2_inode_unlock(nip); 1854 bzero(&auio, sizeof(auio)); 1855 bzero(&aiov, sizeof(aiov)); 1856 auio.uio_iov = &aiov; 1857 auio.uio_segflg = UIO_SYSSPACE; 1858 auio.uio_rw = UIO_WRITE; 1859 auio.uio_resid = bytes; 1860 auio.uio_iovcnt = 1; 1861 auio.uio_td = curthread; 1862 aiov.iov_base = ap->a_target; 1863 aiov.iov_len = bytes; 1864 error = hammer2_write_file(nip, &auio, IO_APPEND, 0); 1865 /* XXX handle error */ 1866 error = 0; 1867 } else { 1868 hammer2_inode_unlock(nip); 1869 } 1870 1871 /* 1872 * Update dip's mtime 1873 */ 1874 if (error == 0) { 1875 uint64_t mtime; 1876 1877 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1878 hammer2_update_time(&mtime); 1879 hammer2_inode_modify(dip); 1880 dip->meta.mtime = mtime; 1881 /*hammer2_inode_unlock(dip);*/ 1882 } 1883 hammer2_inode_unlock(dip); 1884 1885 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1886 1887 /* 1888 * Finalize namecache 1889 */ 1890 if (error == 0) { 1891 cache_setunresolved(ap->a_nch); 1892 cache_setvp(ap->a_nch, *ap->a_vpp); 1893 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1894 } 1895 return error; 1896 } 1897 1898 /* 1899 * hammer2_vop_nremove { nch, dvp, cred } 1900 */ 1901 static 1902 int 1903 hammer2_vop_nremove(struct vop_nremove_args *ap) 1904 { 1905 hammer2_xop_unlink_t *xop; 1906 hammer2_inode_t *dip; 1907 hammer2_inode_t *ip; 1908 struct vnode *vprecycle; 1909 struct namecache *ncp; 1910 int error; 1911 1912 dip = VTOI(ap->a_dvp); 1913 if (dip->pmp->ronly) 1914 return (EROFS); 1915 #if 0 1916 /* allow removals, except user to also bulkfree */ 1917 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1918 return (ENOSPC); 1919 #endif 1920 1921 ncp = ap->a_nch->ncp; 1922 1923 if (hammer2_debug_inode && dip->meta.inum == hammer2_debug_inode) { 1924 kprintf("hammer2: attempt to delete inside debug inode: %s\n", 1925 ncp->nc_name); 1926 while (hammer2_debug_inode && 1927 dip->meta.inum == hammer2_debug_inode) { 1928 tsleep(&hammer2_debug_inode, 0, "h2debug", hz*5); 1929 } 1930 } 1931 1932 hammer2_trans_init(dip->pmp, 0); 1933 hammer2_inode_lock(dip, 0); 1934 1935 /* 1936 * The unlink XOP unlinks the path from the directory and 1937 * locates and returns the cluster associated with the real inode. 1938 * We have to handle nlinks here on the frontend. 1939 */ 1940 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1941 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1942 1943 xop->isdir = 0; 1944 xop->dopermanent = 0; 1945 hammer2_xop_start(&xop->head, &hammer2_unlink_desc); 1946 1947 /* 1948 * Collect the real inode and adjust nlinks, destroy the real 1949 * inode if nlinks transitions to 0 and it was the real inode 1950 * (else it has already been removed). 1951 */ 1952 error = hammer2_xop_collect(&xop->head, 0); 1953 error = hammer2_error_to_errno(error); 1954 vprecycle = NULL; 1955 1956 if (error == 0) { 1957 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 1958 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1959 if (ip) { 1960 if (hammer2_debug_inode && 1961 ip->meta.inum == hammer2_debug_inode) { 1962 kprintf("hammer2: attempt to delete debug " 1963 "inode!\n"); 1964 while (hammer2_debug_inode && 1965 ip->meta.inum == hammer2_debug_inode) { 1966 tsleep(&hammer2_debug_inode, 0, 1967 "h2debug", hz*5); 1968 } 1969 } 1970 hammer2_inode_unlink_finisher(ip, &vprecycle); 1971 hammer2_inode_depend(dip, ip); /* after modified */ 1972 hammer2_inode_unlock(ip); 1973 } 1974 } else { 1975 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1976 } 1977 1978 /* 1979 * Update dip's mtime 1980 */ 1981 if (error == 0) { 1982 uint64_t mtime; 1983 1984 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1985 hammer2_update_time(&mtime); 1986 hammer2_inode_modify(dip); 1987 dip->meta.mtime = mtime; 1988 /*hammer2_inode_unlock(dip);*/ 1989 } 1990 hammer2_inode_unlock(dip); 1991 1992 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1993 if (error == 0) { 1994 cache_unlink(ap->a_nch); 1995 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1996 } 1997 if (vprecycle) 1998 hammer2_inode_vprecycle(vprecycle); 1999 2000 return (error); 2001 } 2002 2003 /* 2004 * hammer2_vop_nrmdir { nch, dvp, cred } 2005 */ 2006 static 2007 int 2008 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap) 2009 { 2010 hammer2_xop_unlink_t *xop; 2011 hammer2_inode_t *dip; 2012 hammer2_inode_t *ip; 2013 struct namecache *ncp; 2014 struct vnode *vprecycle; 2015 int error; 2016 2017 dip = VTOI(ap->a_dvp); 2018 if (dip->pmp->ronly) 2019 return (EROFS); 2020 #if 0 2021 /* allow removals, except user to also bulkfree */ 2022 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 2023 return (ENOSPC); 2024 #endif 2025 2026 hammer2_trans_init(dip->pmp, 0); 2027 hammer2_inode_lock(dip, 0); 2028 2029 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 2030 2031 ncp = ap->a_nch->ncp; 2032 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 2033 xop->isdir = 1; 2034 xop->dopermanent = 0; 2035 hammer2_xop_start(&xop->head, &hammer2_unlink_desc); 2036 2037 /* 2038 * Collect the real inode and adjust nlinks, destroy the real 2039 * inode if nlinks transitions to 0 and it was the real inode 2040 * (else it has already been removed). 2041 */ 2042 error = hammer2_xop_collect(&xop->head, 0); 2043 error = hammer2_error_to_errno(error); 2044 vprecycle = NULL; 2045 2046 if (error == 0) { 2047 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 2048 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2049 if (ip) { 2050 hammer2_inode_unlink_finisher(ip, &vprecycle); 2051 hammer2_inode_depend(dip, ip); /* after modified */ 2052 hammer2_inode_unlock(ip); 2053 } 2054 } else { 2055 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2056 } 2057 2058 /* 2059 * Update dip's mtime 2060 */ 2061 if (error == 0) { 2062 uint64_t mtime; 2063 2064 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2065 hammer2_update_time(&mtime); 2066 hammer2_inode_modify(dip); 2067 dip->meta.mtime = mtime; 2068 /*hammer2_inode_unlock(dip);*/ 2069 } 2070 hammer2_inode_unlock(dip); 2071 2072 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2073 if (error == 0) { 2074 cache_unlink(ap->a_nch); 2075 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK); 2076 } 2077 if (vprecycle) 2078 hammer2_inode_vprecycle(vprecycle); 2079 return (error); 2080 } 2081 2082 /* 2083 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred } 2084 */ 2085 static 2086 int 2087 hammer2_vop_nrename(struct vop_nrename_args *ap) 2088 { 2089 struct namecache *fncp; 2090 struct namecache *tncp; 2091 hammer2_inode_t *fdip; /* source directory */ 2092 hammer2_inode_t *tdip; /* target directory */ 2093 hammer2_inode_t *ip; /* file being renamed */ 2094 hammer2_inode_t *tip; /* replaced target during rename or NULL */ 2095 struct vnode *vprecycle; 2096 const char *fname; 2097 size_t fname_len; 2098 const char *tname; 2099 size_t tname_len; 2100 int error; 2101 int update_tdip; 2102 int update_fdip; 2103 hammer2_key_t tlhc; 2104 2105 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount) 2106 return(EXDEV); 2107 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount) 2108 return(EXDEV); 2109 2110 fdip = VTOI(ap->a_fdvp); /* source directory */ 2111 tdip = VTOI(ap->a_tdvp); /* target directory */ 2112 2113 if (fdip->pmp->ronly || (fdip->pmp->flags & HAMMER2_PMPF_EMERG)) 2114 return (EROFS); 2115 if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1) 2116 return (ENOSPC); 2117 2118 fncp = ap->a_fnch->ncp; /* entry name in source */ 2119 fname = fncp->nc_name; 2120 fname_len = fncp->nc_nlen; 2121 2122 tncp = ap->a_tnch->ncp; /* entry name in target */ 2123 tname = tncp->nc_name; 2124 tname_len = tncp->nc_nlen; 2125 2126 hammer2_trans_init(tdip->pmp, 0); 2127 2128 update_tdip = 0; 2129 update_fdip = 0; 2130 2131 ip = VTOI(fncp->nc_vp); 2132 hammer2_inode_ref(ip); /* extra ref */ 2133 2134 /* 2135 * Lookup the target name to determine if a directory entry 2136 * is being overwritten. We only hold related inode locks 2137 * temporarily, the operating system is expected to protect 2138 * against rename races. 2139 */ 2140 tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL; 2141 if (tip) 2142 hammer2_inode_ref(tip); /* extra ref */ 2143 2144 /* 2145 * For now try to avoid deadlocks with a simple pointer address 2146 * test. (tip) can be NULL. 2147 */ 2148 error = 0; 2149 { 2150 hammer2_inode_t *ip1 = fdip; 2151 hammer2_inode_t *ip2 = tdip; 2152 hammer2_inode_t *ip3 = ip; 2153 hammer2_inode_t *ip4 = tip; /* may be NULL */ 2154 2155 if (fdip > tdip) { 2156 ip1 = tdip; 2157 ip2 = fdip; 2158 } 2159 if (tip && ip > tip) { 2160 ip3 = tip; 2161 ip4 = ip; 2162 } 2163 hammer2_inode_lock4(ip1, ip2, ip3, ip4); 2164 } 2165 2166 /* 2167 * Resolve the collision space for (tdip, tname, tname_len) 2168 * 2169 * tdip must be held exclusively locked to prevent races since 2170 * multiple filenames can end up in the same collision space. 2171 */ 2172 { 2173 hammer2_xop_scanlhc_t *sxop; 2174 hammer2_tid_t lhcbase; 2175 2176 tlhc = hammer2_dirhash(tname, tname_len); 2177 lhcbase = tlhc; 2178 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 2179 sxop->lhc = tlhc; 2180 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 2181 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 2182 if (tlhc != sxop->head.cluster.focus->bref.key) 2183 break; 2184 ++tlhc; 2185 } 2186 error = hammer2_error_to_errno(error); 2187 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 2188 2189 if (error) { 2190 if (error != ENOENT) 2191 goto done2; 2192 ++tlhc; 2193 error = 0; 2194 } 2195 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) { 2196 error = ENOSPC; 2197 goto done2; 2198 } 2199 } 2200 2201 /* 2202 * Ready to go, issue the rename to the backend. Note that meta-data 2203 * updates to the related inodes occur separately from the rename 2204 * operation. 2205 * 2206 * NOTE: While it is not necessary to update ip->meta.name*, doing 2207 * so aids catastrophic recovery and debugging. 2208 */ 2209 if (error == 0) { 2210 hammer2_xop_nrename_t *xop4; 2211 2212 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 2213 xop4->lhc = tlhc; 2214 xop4->ip_key = ip->meta.name_key; 2215 hammer2_xop_setip2(&xop4->head, ip); 2216 hammer2_xop_setip3(&xop4->head, tdip); 2217 if (tip && tip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) 2218 hammer2_xop_setip4(&xop4->head, tip); 2219 hammer2_xop_setname(&xop4->head, fname, fname_len); 2220 hammer2_xop_setname2(&xop4->head, tname, tname_len); 2221 hammer2_xop_start(&xop4->head, &hammer2_nrename_desc); 2222 2223 error = hammer2_xop_collect(&xop4->head, 0); 2224 error = hammer2_error_to_errno(error); 2225 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP); 2226 2227 if (error == ENOENT) 2228 error = 0; 2229 2230 /* 2231 * Update inode meta-data. 2232 * 2233 * WARNING! The in-memory inode (ip) structure does not 2234 * maintain a copy of the inode's filename buffer. 2235 */ 2236 if (error == 0 && 2237 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 2238 hammer2_inode_modify(ip); 2239 ip->meta.name_len = tname_len; 2240 ip->meta.name_key = tlhc; 2241 } 2242 if (error == 0) { 2243 hammer2_inode_modify(ip); 2244 ip->meta.iparent = tdip->meta.inum; 2245 } 2246 update_fdip = 1; 2247 update_tdip = 1; 2248 } 2249 2250 done2: 2251 /* 2252 * If no error, the backend has replaced the target directory entry. 2253 * We must adjust nlinks on the original replace target if it exists. 2254 */ 2255 vprecycle = NULL; 2256 if (error == 0 && tip) { 2257 hammer2_inode_unlink_finisher(tip, &vprecycle); 2258 } 2259 2260 /* 2261 * Update directory mtimes to represent the something changed. 2262 */ 2263 if (update_fdip || update_tdip) { 2264 uint64_t mtime; 2265 2266 hammer2_update_time(&mtime); 2267 if (update_fdip) { 2268 hammer2_inode_modify(fdip); 2269 fdip->meta.mtime = mtime; 2270 } 2271 if (update_tdip) { 2272 hammer2_inode_modify(tdip); 2273 tdip->meta.mtime = mtime; 2274 } 2275 } 2276 if (tip) { 2277 hammer2_inode_unlock(tip); 2278 hammer2_inode_drop(tip); 2279 } 2280 hammer2_inode_unlock(ip); 2281 hammer2_inode_unlock(tdip); 2282 hammer2_inode_unlock(fdip); 2283 hammer2_inode_drop(ip); 2284 hammer2_trans_done(tdip->pmp, HAMMER2_TRANS_SIDEQ); 2285 2286 /* 2287 * Issue the namecache update after unlocking all the internal 2288 * hammer2 structures, otherwise we might deadlock. 2289 * 2290 * WARNING! The target namespace must be updated atomically, 2291 * and we depend on cache_rename() to handle that for 2292 * us. Do not do a separate cache_unlink() because 2293 * that leaves a small window of opportunity for other 2294 * threads to allocate the target namespace before we 2295 * manage to complete our rename. 2296 * 2297 * WARNING! cache_rename() (and cache_unlink()) will properly 2298 * set VREF_FINALIZE on any attached vnode. Do not 2299 * call cache_setunresolved() manually before-hand as 2300 * this will prevent the flag from being set later via 2301 * cache_rename(). If VREF_FINALIZE is not properly set 2302 * and the inode is no longer in the topology, related 2303 * chains can remain dirty indefinitely. 2304 */ 2305 if (error == 0 && tip) { 2306 /*cache_unlink(ap->a_tnch); see above */ 2307 /*cache_setunresolved(ap->a_tnch); see above */ 2308 } 2309 if (error == 0) { 2310 cache_rename(ap->a_fnch, ap->a_tnch); 2311 hammer2_knote(ap->a_fdvp, NOTE_WRITE); 2312 hammer2_knote(ap->a_tdvp, NOTE_WRITE); 2313 hammer2_knote(fncp->nc_vp, NOTE_RENAME); 2314 } 2315 if (vprecycle) 2316 hammer2_inode_vprecycle(vprecycle); 2317 2318 return (error); 2319 } 2320 2321 /* 2322 * hammer2_vop_ioctl { vp, command, data, fflag, cred } 2323 */ 2324 static 2325 int 2326 hammer2_vop_ioctl(struct vop_ioctl_args *ap) 2327 { 2328 hammer2_inode_t *ip; 2329 int error; 2330 2331 ip = VTOI(ap->a_vp); 2332 2333 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data, 2334 ap->a_fflag, ap->a_cred); 2335 return (error); 2336 } 2337 2338 static 2339 int 2340 hammer2_vop_mountctl(struct vop_mountctl_args *ap) 2341 { 2342 struct mount *mp; 2343 hammer2_pfs_t *pmp; 2344 int rc; 2345 2346 switch (ap->a_op) { 2347 case (MOUNTCTL_SET_EXPORT): 2348 mp = ap->a_head.a_ops->head.vv_mount; 2349 pmp = MPTOPMP(mp); 2350 2351 if (ap->a_ctllen != sizeof(struct export_args)) 2352 rc = (EINVAL); 2353 else 2354 rc = vfs_export(mp, &pmp->export, 2355 (const struct export_args *)ap->a_ctl); 2356 break; 2357 default: 2358 rc = vop_stdmountctl(ap); 2359 break; 2360 } 2361 return (rc); 2362 } 2363 2364 /* 2365 * KQFILTER 2366 */ 2367 static void filt_hammer2detach(struct knote *kn); 2368 static int filt_hammer2read(struct knote *kn, long hint); 2369 static int filt_hammer2write(struct knote *kn, long hint); 2370 static int filt_hammer2vnode(struct knote *kn, long hint); 2371 2372 static struct filterops hammer2read_filtops = 2373 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2374 NULL, filt_hammer2detach, filt_hammer2read }; 2375 static struct filterops hammer2write_filtops = 2376 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2377 NULL, filt_hammer2detach, filt_hammer2write }; 2378 static struct filterops hammer2vnode_filtops = 2379 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2380 NULL, filt_hammer2detach, filt_hammer2vnode }; 2381 2382 static 2383 int 2384 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap) 2385 { 2386 struct vnode *vp = ap->a_vp; 2387 struct knote *kn = ap->a_kn; 2388 2389 switch (kn->kn_filter) { 2390 case EVFILT_READ: 2391 kn->kn_fop = &hammer2read_filtops; 2392 break; 2393 case EVFILT_WRITE: 2394 kn->kn_fop = &hammer2write_filtops; 2395 break; 2396 case EVFILT_VNODE: 2397 kn->kn_fop = &hammer2vnode_filtops; 2398 break; 2399 default: 2400 return (EOPNOTSUPP); 2401 } 2402 2403 kn->kn_hook = (caddr_t)vp; 2404 2405 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2406 2407 return(0); 2408 } 2409 2410 static void 2411 filt_hammer2detach(struct knote *kn) 2412 { 2413 struct vnode *vp = (void *)kn->kn_hook; 2414 2415 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2416 } 2417 2418 static int 2419 filt_hammer2read(struct knote *kn, long hint) 2420 { 2421 struct vnode *vp = (void *)kn->kn_hook; 2422 hammer2_inode_t *ip = VTOI(vp); 2423 off_t off; 2424 2425 if (hint == NOTE_REVOKE) { 2426 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2427 return(1); 2428 } 2429 off = ip->meta.size - kn->kn_fp->f_offset; 2430 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 2431 if (kn->kn_sfflags & NOTE_OLDAPI) 2432 return(1); 2433 return (kn->kn_data != 0); 2434 } 2435 2436 2437 static int 2438 filt_hammer2write(struct knote *kn, long hint) 2439 { 2440 if (hint == NOTE_REVOKE) 2441 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2442 kn->kn_data = 0; 2443 return (1); 2444 } 2445 2446 static int 2447 filt_hammer2vnode(struct knote *kn, long hint) 2448 { 2449 if (kn->kn_sfflags & hint) 2450 kn->kn_fflags |= hint; 2451 if (hint == NOTE_REVOKE) { 2452 kn->kn_flags |= (EV_EOF | EV_NODATA); 2453 return (1); 2454 } 2455 return (kn->kn_fflags != 0); 2456 } 2457 2458 /* 2459 * FIFO VOPS 2460 */ 2461 static 2462 int 2463 hammer2_vop_markatime(struct vop_markatime_args *ap) 2464 { 2465 hammer2_inode_t *ip; 2466 struct vnode *vp; 2467 2468 vp = ap->a_vp; 2469 ip = VTOI(vp); 2470 2471 if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG)) 2472 return (EROFS); 2473 return(0); 2474 } 2475 2476 static 2477 int 2478 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap) 2479 { 2480 int error; 2481 2482 error = VOCALL(&fifo_vnode_vops, &ap->a_head); 2483 if (error) 2484 error = hammer2_vop_kqfilter(ap); 2485 return(error); 2486 } 2487 2488 /* 2489 * VOPS vector 2490 */ 2491 struct vop_ops hammer2_vnode_vops = { 2492 .vop_default = vop_defaultop, 2493 .vop_fsync = hammer2_vop_fsync, 2494 .vop_getpages = vop_stdgetpages, 2495 .vop_putpages = vop_stdputpages, 2496 .vop_access = hammer2_vop_access, 2497 .vop_advlock = hammer2_vop_advlock, 2498 .vop_close = hammer2_vop_close, 2499 .vop_nlink = hammer2_vop_nlink, 2500 .vop_ncreate = hammer2_vop_ncreate, 2501 .vop_nsymlink = hammer2_vop_nsymlink, 2502 .vop_nremove = hammer2_vop_nremove, 2503 .vop_nrmdir = hammer2_vop_nrmdir, 2504 .vop_nrename = hammer2_vop_nrename, 2505 .vop_getattr = hammer2_vop_getattr, 2506 .vop_getattr_lite = hammer2_vop_getattr_lite, 2507 .vop_setattr = hammer2_vop_setattr, 2508 .vop_readdir = hammer2_vop_readdir, 2509 .vop_readlink = hammer2_vop_readlink, 2510 .vop_read = hammer2_vop_read, 2511 .vop_write = hammer2_vop_write, 2512 .vop_open = hammer2_vop_open, 2513 .vop_inactive = hammer2_vop_inactive, 2514 .vop_reclaim = hammer2_vop_reclaim, 2515 .vop_nresolve = hammer2_vop_nresolve, 2516 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot, 2517 .vop_nmkdir = hammer2_vop_nmkdir, 2518 .vop_nmknod = hammer2_vop_nmknod, 2519 .vop_ioctl = hammer2_vop_ioctl, 2520 .vop_mountctl = hammer2_vop_mountctl, 2521 .vop_bmap = hammer2_vop_bmap, 2522 .vop_strategy = hammer2_vop_strategy, 2523 .vop_kqfilter = hammer2_vop_kqfilter 2524 }; 2525 2526 struct vop_ops hammer2_spec_vops = { 2527 .vop_default = vop_defaultop, 2528 .vop_fsync = hammer2_vop_fsync, 2529 .vop_read = vop_stdnoread, 2530 .vop_write = vop_stdnowrite, 2531 .vop_access = hammer2_vop_access, 2532 .vop_close = hammer2_vop_close, 2533 .vop_markatime = hammer2_vop_markatime, 2534 .vop_getattr = hammer2_vop_getattr, 2535 .vop_inactive = hammer2_vop_inactive, 2536 .vop_reclaim = hammer2_vop_reclaim, 2537 .vop_setattr = hammer2_vop_setattr 2538 }; 2539 2540 struct vop_ops hammer2_fifo_vops = { 2541 .vop_default = fifo_vnoperate, 2542 .vop_fsync = hammer2_vop_fsync, 2543 #if 0 2544 .vop_read = hammer2_vop_fiforead, 2545 .vop_write = hammer2_vop_fifowrite, 2546 #endif 2547 .vop_access = hammer2_vop_access, 2548 #if 0 2549 .vop_close = hammer2_vop_fifoclose, 2550 #endif 2551 .vop_markatime = hammer2_vop_markatime, 2552 .vop_getattr = hammer2_vop_getattr, 2553 .vop_inactive = hammer2_vop_inactive, 2554 .vop_reclaim = hammer2_vop_reclaim, 2555 .vop_setattr = hammer2_vop_setattr, 2556 .vop_kqfilter = hammer2_vop_fifokqfilter 2557 }; 2558 2559