1 /* 2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org> 7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression) 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 /* 37 * Kernel Filesystem interface 38 * 39 * NOTE! local ipdata pointers must be reloaded on any modifying operation 40 * to the inode as its underlying chain may have changed. 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/fcntl.h> 47 #include <sys/buf.h> 48 #include <sys/proc.h> 49 #include <sys/namei.h> 50 #include <sys/mount.h> 51 #include <sys/vnode.h> 52 #include <sys/mountctl.h> 53 #include <sys/dirent.h> 54 #include <sys/uio.h> 55 #include <sys/objcache.h> 56 #include <sys/event.h> 57 #include <sys/file.h> 58 #include <vfs/fifofs/fifo.h> 59 60 #include "hammer2.h" 61 62 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, 63 int seqcount); 64 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 65 int ioflag, int seqcount); 66 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize); 67 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize); 68 69 struct objcache *cache_xops; 70 71 static __inline 72 void 73 hammer2_knote(struct vnode *vp, int flags) 74 { 75 if (flags) 76 KNOTE(&vp->v_pollinfo.vpi_kqinfo.ki_note, flags); 77 } 78 79 /* 80 * Last reference to a vnode is going away but it is still cached. 81 */ 82 static 83 int 84 hammer2_vop_inactive(struct vop_inactive_args *ap) 85 { 86 hammer2_inode_t *ip; 87 struct vnode *vp; 88 89 vp = ap->a_vp; 90 ip = VTOI(vp); 91 92 /* 93 * Degenerate case 94 */ 95 if (ip == NULL) { 96 vrecycle(vp); 97 return (0); 98 } 99 100 /* 101 * Check for deleted inodes and recycle immediately on the last 102 * release. Be sure to destroy any left-over buffer cache buffers 103 * so we do not waste time trying to flush them. 104 * 105 * Note that deleting the file block chains under the inode chain 106 * would just be a waste of energy, so don't do it. 107 * 108 * WARNING: nvtruncbuf() can only be safely called without the inode 109 * lock held due to the way our write thread works. 110 */ 111 if (ip->flags & HAMMER2_INODE_ISUNLINKED) { 112 hammer2_key_t lbase; 113 int nblksize; 114 115 /* 116 * Detect updates to the embedded data which may be 117 * synchronized by the strategy code. Simply mark the 118 * inode modified so it gets picked up by our normal flush. 119 */ 120 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL); 121 nvtruncbuf(vp, 0, nblksize, 0, 0); 122 vrecycle(vp); 123 } 124 return (0); 125 } 126 127 /* 128 * Reclaim a vnode so that it can be reused; after the inode is 129 * disassociated, the filesystem must manage it alone. 130 */ 131 static 132 int 133 hammer2_vop_reclaim(struct vop_reclaim_args *ap) 134 { 135 hammer2_inode_t *ip; 136 hammer2_pfs_t *pmp; 137 struct vnode *vp; 138 139 vp = ap->a_vp; 140 ip = VTOI(vp); 141 if (ip == NULL) { 142 return(0); 143 } 144 pmp = ip->pmp; 145 146 /* 147 * The final close of a deleted file or directory marks it for 148 * destruction. The DELETED flag allows the flusher to shortcut 149 * any modified blocks still unflushed (that is, just ignore them). 150 * 151 * HAMMER2 usually does not try to optimize the freemap by returning 152 * deleted blocks to it as it does not usually know how many snapshots 153 * might be referencing portions of the file/dir. 154 */ 155 vp->v_data = NULL; 156 ip->vp = NULL; 157 158 /* 159 * NOTE! We do not attempt to flush chains here, flushing is 160 * really fragile and could also deadlock. 161 */ 162 vclrisdirty(vp); 163 164 /* 165 * Modified inodes will already be on SIDEQ or SYNCQ. However, 166 * unlinked-but-open inodes may already have been synced and might 167 * still require deletion-on-reclaim. 168 */ 169 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED | 170 HAMMER2_INODE_DELETING)) == 171 HAMMER2_INODE_ISUNLINKED) { 172 hammer2_inode_lock(ip, 0); 173 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED | 174 HAMMER2_INODE_DELETING)) == 175 HAMMER2_INODE_ISUNLINKED) { 176 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING); 177 hammer2_inode_delayed_sideq(ip); 178 } 179 hammer2_inode_unlock(ip); 180 } 181 182 /* 183 * Modified inodes will already be on SIDEQ or SYNCQ, no further 184 * action is needed. 185 * 186 * We cannot safely synchronize the inode from inside the reclaim 187 * due to potentially deep locks held as-of when the reclaim occurs. 188 * Interactions and potential deadlocks abound. We also can't do it 189 * here without desynchronizing from the related directory entrie(s). 190 */ 191 hammer2_inode_drop(ip); /* vp ref */ 192 193 /* 194 * XXX handle background sync when ip dirty, kernel will no longer 195 * notify us regarding this inode because there is no longer a 196 * vnode attached to it. 197 */ 198 199 return (0); 200 } 201 202 /* 203 * Currently this function synchronizes the front-end inode state to the 204 * backend chain topology, then flushes the inode's chain and sub-topology 205 * to backend media. This function does not flush the root topology down to 206 * the inode. 207 */ 208 static 209 int 210 hammer2_vop_fsync(struct vop_fsync_args *ap) 211 { 212 hammer2_inode_t *ip; 213 struct vnode *vp; 214 int error1; 215 int error2; 216 217 vp = ap->a_vp; 218 ip = VTOI(vp); 219 error1 = 0; 220 221 hammer2_trans_init(ip->pmp, 0); 222 223 /* 224 * Flush dirty buffers in the file's logical buffer cache. 225 * It is best to wait for the strategy code to commit the 226 * buffers to the device's backing buffer cache before 227 * then trying to flush the inode. 228 * 229 * This should be quick, but certain inode modifications cached 230 * entirely in the hammer2_inode structure may not trigger a 231 * buffer read until the flush so the fsync can wind up also 232 * doing scattered reads. 233 */ 234 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 235 bio_track_wait(&vp->v_track_write, 0, 0); 236 237 /* 238 * Flush any inode changes 239 */ 240 hammer2_inode_lock(ip, 0); 241 if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MODIFIED)) 242 error1 = hammer2_inode_chain_sync(ip); 243 244 /* 245 * Flush dirty chains related to the inode. 246 * 247 * NOTE! We are not in a flush transaction. The inode remains on 248 * the sideq so the filesystem syncer can synchronize it to 249 * the volume root. 250 */ 251 error2 = hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP); 252 if (error2) 253 error1 = error2; 254 255 /* 256 * We may be able to clear the vnode dirty flag. The 257 * hammer2_pfs_moderate() code depends on this usually working. 258 */ 259 if ((ip->flags & (HAMMER2_INODE_MODIFIED | 260 HAMMER2_INODE_RESIZED | 261 HAMMER2_INODE_DIRTYDATA)) == 0 && 262 RB_EMPTY(&vp->v_rbdirty_tree) && 263 !bio_track_active(&vp->v_track_write)) { 264 vclrisdirty(vp); 265 } 266 hammer2_inode_unlock(ip); 267 hammer2_trans_done(ip->pmp, 0); 268 269 return (error1); 270 } 271 272 /* 273 * No lock needed, just handle ip->update 274 */ 275 static 276 int 277 hammer2_vop_access(struct vop_access_args *ap) 278 { 279 hammer2_inode_t *ip = VTOI(ap->a_vp); 280 uid_t uid; 281 gid_t gid; 282 mode_t mode; 283 uint32_t uflags; 284 int error; 285 int update; 286 287 retry: 288 update = spin_access_start(&ip->cluster_spin); 289 290 /*hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);*/ 291 uid = hammer2_to_unix_xid(&ip->meta.uid); 292 gid = hammer2_to_unix_xid(&ip->meta.gid); 293 mode = ip->meta.mode; 294 uflags = ip->meta.uflags; 295 /*hammer2_inode_unlock(ip);*/ 296 297 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 298 goto retry; 299 300 error = vop_helper_access(ap, uid, gid, mode, uflags); 301 302 return (error); 303 } 304 305 static 306 int 307 hammer2_vop_getattr(struct vop_getattr_args *ap) 308 { 309 hammer2_pfs_t *pmp; 310 hammer2_inode_t *ip; 311 struct vnode *vp; 312 struct vattr *vap; 313 hammer2_chain_t *chain; 314 int update; 315 int i; 316 317 vp = ap->a_vp; 318 vap = ap->a_vap; 319 320 ip = VTOI(vp); 321 pmp = ip->pmp; 322 323 retry: 324 update = spin_access_start(&ip->cluster_spin); 325 326 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 327 vap->va_fileid = ip->meta.inum; 328 vap->va_mode = ip->meta.mode; 329 vap->va_nlink = ip->meta.nlinks; 330 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 331 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 332 vap->va_rmajor = 0; 333 vap->va_rminor = 0; 334 vap->va_size = ip->meta.size; /* protected by shared lock */ 335 vap->va_blocksize = HAMMER2_PBUFSIZE; 336 vap->va_flags = ip->meta.uflags; 337 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime); 338 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime); 339 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime); 340 vap->va_gen = 1; 341 vap->va_bytes = 0; 342 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) { 343 /* 344 * Can't really calculate directory use sans the files under 345 * it, just assume one block for now. 346 */ 347 vap->va_bytes += HAMMER2_INODE_BYTES; 348 } else { 349 for (i = 0; i < ip->cluster.nchains; ++i) { 350 if ((chain = ip->cluster.array[i].chain) != NULL) { 351 if (vap->va_bytes < 352 chain->bref.embed.stats.data_count) { 353 vap->va_bytes = 354 chain->bref.embed.stats.data_count; 355 } 356 } 357 } 358 } 359 vap->va_type = hammer2_get_vtype(ip->meta.type); 360 vap->va_filerev = 0; 361 vap->va_uid_uuid = ip->meta.uid; 362 vap->va_gid_uuid = ip->meta.gid; 363 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 364 VA_FSID_UUID_VALID; 365 366 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 367 goto retry; 368 369 return (0); 370 } 371 372 static 373 int 374 hammer2_vop_getattr_quick(struct vop_getattr_args *ap) 375 { 376 hammer2_pfs_t *pmp; 377 hammer2_inode_t *ip; 378 struct vnode *vp; 379 struct vattr *vap; 380 int update; 381 382 vp = ap->a_vp; 383 vap = ap->a_vap; 384 385 ip = VTOI(vp); 386 pmp = ip->pmp; 387 388 retry: 389 update = spin_access_start(&ip->cluster_spin); 390 391 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 392 vap->va_fileid = ip->meta.inum; 393 vap->va_mode = ip->meta.mode; 394 vap->va_nlink = ip->meta.nlinks; 395 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 396 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 397 vap->va_rmajor = 0; 398 vap->va_rminor = 0; 399 vap->va_size = -1; 400 vap->va_blocksize = HAMMER2_PBUFSIZE; 401 vap->va_flags = ip->meta.uflags; 402 vap->va_type = hammer2_get_vtype(ip->meta.type); 403 vap->va_filerev = 0; 404 vap->va_uid_uuid = ip->meta.uid; 405 vap->va_gid_uuid = ip->meta.gid; 406 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 407 VA_FSID_UUID_VALID; 408 409 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 410 goto retry; 411 412 return (0); 413 } 414 415 static 416 int 417 hammer2_vop_setattr(struct vop_setattr_args *ap) 418 { 419 hammer2_inode_t *ip; 420 struct vnode *vp; 421 struct vattr *vap; 422 int error; 423 int kflags = 0; 424 uint64_t ctime; 425 426 vp = ap->a_vp; 427 vap = ap->a_vap; 428 hammer2_update_time(&ctime); 429 430 ip = VTOI(vp); 431 432 if (ip->pmp->ronly) 433 return (EROFS); 434 435 /* 436 * Normally disallow setattr if there is no space, unless we 437 * are in emergency mode (might be needed to chflags -R noschg 438 * files prior to removal). 439 */ 440 if ((ip->pmp->flags & HAMMER2_PMPF_EMERG) == 0 && 441 hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1) { 442 return (ENOSPC); 443 } 444 445 hammer2_trans_init(ip->pmp, 0); 446 hammer2_inode_lock(ip, 0); 447 error = 0; 448 449 if (vap->va_flags != VNOVAL) { 450 uint32_t flags; 451 452 flags = ip->meta.uflags; 453 error = vop_helper_setattr_flags(&flags, vap->va_flags, 454 hammer2_to_unix_xid(&ip->meta.uid), 455 ap->a_cred); 456 if (error == 0) { 457 if (ip->meta.uflags != flags) { 458 hammer2_inode_modify(ip); 459 spin_lock_update(&ip->cluster_spin); 460 ip->meta.uflags = flags; 461 ip->meta.ctime = ctime; 462 spin_unlock_update(&ip->cluster_spin); 463 kflags |= NOTE_ATTRIB; 464 } 465 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 466 error = 0; 467 goto done; 468 } 469 } 470 goto done; 471 } 472 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 473 error = EPERM; 474 goto done; 475 } 476 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { 477 mode_t cur_mode = ip->meta.mode; 478 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 479 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 480 uuid_t uuid_uid; 481 uuid_t uuid_gid; 482 483 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 484 ap->a_cred, 485 &cur_uid, &cur_gid, &cur_mode); 486 if (error == 0) { 487 hammer2_guid_to_uuid(&uuid_uid, cur_uid); 488 hammer2_guid_to_uuid(&uuid_gid, cur_gid); 489 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) || 490 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) || 491 ip->meta.mode != cur_mode 492 ) { 493 hammer2_inode_modify(ip); 494 spin_lock_update(&ip->cluster_spin); 495 ip->meta.uid = uuid_uid; 496 ip->meta.gid = uuid_gid; 497 ip->meta.mode = cur_mode; 498 ip->meta.ctime = ctime; 499 spin_unlock_update(&ip->cluster_spin); 500 } 501 kflags |= NOTE_ATTRIB; 502 } 503 } 504 505 /* 506 * Resize the file 507 */ 508 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) { 509 switch(vp->v_type) { 510 case VREG: 511 if (vap->va_size == ip->meta.size) 512 break; 513 if (vap->va_size < ip->meta.size) { 514 hammer2_mtx_ex(&ip->truncate_lock); 515 hammer2_truncate_file(ip, vap->va_size); 516 hammer2_mtx_unlock(&ip->truncate_lock); 517 kflags |= NOTE_WRITE; 518 } else { 519 hammer2_extend_file(ip, vap->va_size); 520 kflags |= NOTE_WRITE | NOTE_EXTEND; 521 } 522 hammer2_inode_modify(ip); 523 ip->meta.mtime = ctime; 524 vclrflags(vp, VLASTWRITETS); 525 break; 526 default: 527 error = EINVAL; 528 goto done; 529 } 530 } 531 #if 0 532 /* atime not supported */ 533 if (vap->va_atime.tv_sec != VNOVAL) { 534 hammer2_inode_modify(ip); 535 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime); 536 kflags |= NOTE_ATTRIB; 537 } 538 #endif 539 if (vap->va_mode != (mode_t)VNOVAL) { 540 mode_t cur_mode = ip->meta.mode; 541 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 542 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 543 544 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 545 cur_uid, cur_gid, &cur_mode); 546 if (error == 0 && ip->meta.mode != cur_mode) { 547 hammer2_inode_modify(ip); 548 spin_lock_update(&ip->cluster_spin); 549 ip->meta.mode = cur_mode; 550 ip->meta.ctime = ctime; 551 spin_unlock_update(&ip->cluster_spin); 552 kflags |= NOTE_ATTRIB; 553 } 554 } 555 556 if (vap->va_mtime.tv_sec != VNOVAL) { 557 hammer2_inode_modify(ip); 558 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime); 559 kflags |= NOTE_ATTRIB; 560 vclrflags(vp, VLASTWRITETS); 561 } 562 563 done: 564 /* 565 * If a truncation occurred we must call chain_sync() now in order 566 * to trim the related data chains, otherwise a later expansion can 567 * cause havoc. 568 * 569 * If an extend occured that changed the DIRECTDATA state, we must 570 * call inode_fsync now in order to prepare the inode's indirect 571 * block table. 572 * 573 * WARNING! This means we are making an adjustment to the inode's 574 * chain outside of sync/fsync, and not just to inode->meta, which 575 * may result in some consistency issues if a crash were to occur 576 * at just the wrong time. 577 */ 578 if (ip->flags & HAMMER2_INODE_RESIZED) 579 hammer2_inode_chain_sync(ip); 580 581 /* 582 * Cleanup. 583 */ 584 hammer2_inode_unlock(ip); 585 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 586 hammer2_knote(ip->vp, kflags); 587 588 return (error); 589 } 590 591 static 592 int 593 hammer2_vop_readdir(struct vop_readdir_args *ap) 594 { 595 hammer2_xop_readdir_t *xop; 596 hammer2_blockref_t bref; 597 hammer2_inode_t *ip; 598 hammer2_tid_t inum; 599 hammer2_key_t lkey; 600 struct uio *uio; 601 off_t *cookies; 602 off_t saveoff; 603 int cookie_index; 604 int ncookies; 605 int error; 606 int eofflag; 607 int r; 608 609 ip = VTOI(ap->a_vp); 610 uio = ap->a_uio; 611 saveoff = uio->uio_offset; 612 eofflag = 0; 613 error = 0; 614 615 /* 616 * Setup cookies directory entry cookies if requested 617 */ 618 if (ap->a_ncookies) { 619 ncookies = uio->uio_resid / 16 + 1; 620 if (ncookies > 1024) 621 ncookies = 1024; 622 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK); 623 } else { 624 ncookies = -1; 625 cookies = NULL; 626 } 627 cookie_index = 0; 628 629 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 630 631 /* 632 * Handle artificial entries. To ensure that only positive 64 bit 633 * quantities are returned to userland we always strip off bit 63. 634 * The hash code is designed such that codes 0x0000-0x7FFF are not 635 * used, allowing us to use these codes for articial entries. 636 * 637 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not 638 * allow '..' to cross the mount point into (e.g.) the super-root. 639 */ 640 if (saveoff == 0) { 641 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 642 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, "."); 643 if (r) 644 goto done; 645 if (cookies) 646 cookies[cookie_index] = saveoff; 647 ++saveoff; 648 ++cookie_index; 649 if (cookie_index == ncookies) 650 goto done; 651 } 652 653 if (saveoff == 1) { 654 /* 655 * Be careful with lockorder when accessing ".." 656 * 657 * (ip is the current dir. xip is the parent dir). 658 */ 659 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 660 if (ip != ip->pmp->iroot) 661 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK; 662 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, ".."); 663 if (r) 664 goto done; 665 if (cookies) 666 cookies[cookie_index] = saveoff; 667 ++saveoff; 668 ++cookie_index; 669 if (cookie_index == ncookies) 670 goto done; 671 } 672 673 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE; 674 if (hammer2_debug & 0x0020) 675 kprintf("readdir: lkey %016jx\n", lkey); 676 if (error) 677 goto done; 678 679 /* 680 * Use XOP for cluster scan. 681 * 682 * parent is the inode cluster, already locked for us. Don't 683 * double lock shared locks as this will screw up upgrades. 684 */ 685 xop = hammer2_xop_alloc(ip, 0); 686 xop->lkey = lkey; 687 hammer2_xop_start(&xop->head, &hammer2_readdir_desc); 688 689 for (;;) { 690 const hammer2_inode_data_t *ripdata; 691 const char *dname; 692 int dtype; 693 694 error = hammer2_xop_collect(&xop->head, 0); 695 error = hammer2_error_to_errno(error); 696 if (error) { 697 break; 698 } 699 if (cookie_index == ncookies) 700 break; 701 if (hammer2_debug & 0x0020) 702 kprintf("cluster chain %p %p\n", 703 xop->head.cluster.focus, 704 (xop->head.cluster.focus ? 705 xop->head.cluster.focus->data : (void *)-1)); 706 hammer2_cluster_bref(&xop->head.cluster, &bref); 707 708 if (bref.type == HAMMER2_BREF_TYPE_INODE) { 709 ripdata = &hammer2_xop_gdata(&xop->head)->ipdata; 710 dtype = hammer2_get_dtype(ripdata->meta.type); 711 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 712 r = vop_write_dirent(&error, uio, 713 ripdata->meta.inum & 714 HAMMER2_DIRHASH_USERMSK, 715 dtype, 716 ripdata->meta.name_len, 717 ripdata->filename); 718 hammer2_xop_pdata(&xop->head); 719 if (r) 720 break; 721 if (cookies) 722 cookies[cookie_index] = saveoff; 723 ++cookie_index; 724 } else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) { 725 uint16_t namlen; 726 727 dtype = hammer2_get_dtype(bref.embed.dirent.type); 728 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 729 namlen = bref.embed.dirent.namlen; 730 if (namlen <= sizeof(bref.check.buf)) { 731 dname = bref.check.buf; 732 } else { 733 dname = hammer2_xop_gdata(&xop->head)->buf; 734 } 735 r = vop_write_dirent(&error, uio, 736 bref.embed.dirent.inum, dtype, 737 namlen, dname); 738 if (namlen > sizeof(bref.check.buf)) 739 hammer2_xop_pdata(&xop->head); 740 if (r) 741 break; 742 if (cookies) 743 cookies[cookie_index] = saveoff; 744 ++cookie_index; 745 } else { 746 /* XXX chain error */ 747 kprintf("bad chain type readdir %d\n", bref.type); 748 } 749 } 750 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 751 if (error == ENOENT) { 752 error = 0; 753 eofflag = 1; 754 saveoff = (hammer2_key_t)-1; 755 } else { 756 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 757 } 758 done: 759 hammer2_inode_unlock(ip); 760 if (ap->a_eofflag) 761 *ap->a_eofflag = eofflag; 762 if (hammer2_debug & 0x0020) 763 kprintf("readdir: done at %016jx\n", saveoff); 764 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE; 765 if (error && cookie_index == 0) { 766 if (cookies) { 767 kfree(cookies, M_TEMP); 768 *ap->a_ncookies = 0; 769 *ap->a_cookies = NULL; 770 } 771 } else { 772 if (cookies) { 773 *ap->a_ncookies = cookie_index; 774 *ap->a_cookies = cookies; 775 } 776 } 777 return (error); 778 } 779 780 /* 781 * hammer2_vop_readlink { vp, uio, cred } 782 */ 783 static 784 int 785 hammer2_vop_readlink(struct vop_readlink_args *ap) 786 { 787 struct vnode *vp; 788 hammer2_inode_t *ip; 789 int error; 790 791 vp = ap->a_vp; 792 if (vp->v_type != VLNK) 793 return (EINVAL); 794 ip = VTOI(vp); 795 796 error = hammer2_read_file(ip, ap->a_uio, 0); 797 return (error); 798 } 799 800 static 801 int 802 hammer2_vop_read(struct vop_read_args *ap) 803 { 804 struct vnode *vp; 805 hammer2_inode_t *ip; 806 struct uio *uio; 807 int error; 808 int seqcount; 809 int bigread; 810 811 /* 812 * Read operations supported on this vnode? 813 */ 814 vp = ap->a_vp; 815 if (vp->v_type != VREG) 816 return (EINVAL); 817 818 /* 819 * Misc 820 */ 821 ip = VTOI(vp); 822 uio = ap->a_uio; 823 error = 0; 824 825 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 826 bigread = (uio->uio_resid > 100 * 1024 * 1024); 827 828 error = hammer2_read_file(ip, uio, seqcount); 829 return (error); 830 } 831 832 static 833 int 834 hammer2_vop_write(struct vop_write_args *ap) 835 { 836 hammer2_inode_t *ip; 837 thread_t td; 838 struct vnode *vp; 839 struct uio *uio; 840 int error; 841 int seqcount; 842 int ioflag; 843 844 /* 845 * Read operations supported on this vnode? 846 */ 847 vp = ap->a_vp; 848 if (vp->v_type != VREG) 849 return (EINVAL); 850 851 /* 852 * Misc 853 */ 854 ip = VTOI(vp); 855 ioflag = ap->a_ioflag; 856 uio = ap->a_uio; 857 error = 0; 858 if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG)) 859 return (EROFS); 860 switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) { 861 case 2: 862 return (ENOSPC); 863 case 1: 864 ioflag |= IO_DIRECT; /* semi-synchronous */ 865 /* fall through */ 866 default: 867 break; 868 } 869 870 seqcount = ioflag >> IO_SEQSHIFT; 871 872 /* 873 * Check resource limit 874 */ 875 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc && 876 uio->uio_offset + uio->uio_resid > 877 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 878 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ); 879 return (EFBIG); 880 } 881 882 /* 883 * The transaction interlocks against flush initiations 884 * (note: but will run concurrently with the actual flush). 885 * 886 * To avoid deadlocking against the VM system, we must flag any 887 * transaction related to the buffer cache or other direct 888 * VM page manipulation. 889 */ 890 if (uio->uio_segflg == UIO_NOCOPY) { 891 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE); 892 } else { 893 hammer2_trans_init(ip->pmp, 0); 894 } 895 error = hammer2_write_file(ip, uio, ioflag, seqcount); 896 if (uio->uio_segflg == UIO_NOCOPY) 897 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_BUFCACHE | 898 HAMMER2_TRANS_SIDEQ); 899 else 900 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 901 902 return (error); 903 } 904 905 /* 906 * Perform read operations on a file or symlink given an UNLOCKED 907 * inode and uio. 908 * 909 * The passed ip is not locked. 910 */ 911 static 912 int 913 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount) 914 { 915 hammer2_off_t size; 916 struct buf *bp; 917 int error; 918 919 error = 0; 920 921 /* 922 * UIO read loop. 923 * 924 * WARNING! Assumes that the kernel interlocks size changes at the 925 * vnode level. 926 */ 927 hammer2_mtx_sh(&ip->lock); 928 hammer2_mtx_sh(&ip->truncate_lock); 929 size = ip->meta.size; 930 hammer2_mtx_unlock(&ip->lock); 931 932 while (uio->uio_resid > 0 && uio->uio_offset < size) { 933 hammer2_key_t lbase; 934 hammer2_key_t leof; 935 int lblksize; 936 int loff; 937 int n; 938 939 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 940 &lbase, &leof); 941 942 #if 1 943 bp = NULL; 944 error = cluster_readx(ip->vp, leof, lbase, lblksize, 945 B_NOTMETA | B_KVABIO, 946 uio->uio_resid, 947 seqcount * MAXBSIZE, 948 &bp); 949 #else 950 if (uio->uio_segflg == UIO_NOCOPY) { 951 bp = getblk(ip->vp, lbase, lblksize, 952 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 953 if (bp->b_flags & B_CACHE) { 954 int i; 955 int j = 0; 956 if (bp->b_xio.xio_npages != 16) 957 kprintf("NPAGES BAD\n"); 958 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 959 vm_page_t m; 960 m = bp->b_xio.xio_pages[i]; 961 if (m == NULL || m->valid == 0) { 962 kprintf("bp %016jx %016jx pg %d inv", 963 lbase, leof, i); 964 if (m) 965 kprintf("m->object %p/%p", m->object, ip->vp->v_object); 966 kprintf("\n"); 967 j = 1; 968 } 969 } 970 if (j) 971 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error); 972 } 973 bqrelse(bp); 974 } 975 error = bread_kvabio(ip->vp, lbase, lblksize, &bp); 976 #endif 977 if (error) { 978 brelse(bp); 979 break; 980 } 981 bkvasync(bp); 982 loff = (int)(uio->uio_offset - lbase); 983 n = lblksize - loff; 984 if (n > uio->uio_resid) 985 n = uio->uio_resid; 986 if (n > size - uio->uio_offset) 987 n = (int)(size - uio->uio_offset); 988 bp->b_flags |= B_AGE; 989 uiomovebp(bp, (char *)bp->b_data + loff, n, uio); 990 bqrelse(bp); 991 } 992 hammer2_mtx_unlock(&ip->truncate_lock); 993 994 return (error); 995 } 996 997 /* 998 * Write to the file represented by the inode via the logical buffer cache. 999 * The inode may represent a regular file or a symlink. 1000 * 1001 * The inode must not be locked. 1002 */ 1003 static 1004 int 1005 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 1006 int ioflag, int seqcount) 1007 { 1008 hammer2_key_t old_eof; 1009 hammer2_key_t new_eof; 1010 struct buf *bp; 1011 int kflags; 1012 int error; 1013 int modified; 1014 1015 /* 1016 * Setup if append 1017 * 1018 * WARNING! Assumes that the kernel interlocks size changes at the 1019 * vnode level. 1020 */ 1021 hammer2_mtx_ex(&ip->lock); 1022 hammer2_mtx_sh(&ip->truncate_lock); 1023 if (ioflag & IO_APPEND) 1024 uio->uio_offset = ip->meta.size; 1025 old_eof = ip->meta.size; 1026 1027 /* 1028 * Extend the file if necessary. If the write fails at some point 1029 * we will truncate it back down to cover as much as we were able 1030 * to write. 1031 * 1032 * Doing this now makes it easier to calculate buffer sizes in 1033 * the loop. 1034 */ 1035 kflags = 0; 1036 error = 0; 1037 modified = 0; 1038 1039 if (uio->uio_offset + uio->uio_resid > old_eof) { 1040 new_eof = uio->uio_offset + uio->uio_resid; 1041 modified = 1; 1042 hammer2_extend_file(ip, new_eof); 1043 kflags |= NOTE_EXTEND; 1044 } else { 1045 new_eof = old_eof; 1046 } 1047 hammer2_mtx_unlock(&ip->lock); 1048 1049 /* 1050 * UIO write loop 1051 */ 1052 while (uio->uio_resid > 0) { 1053 hammer2_key_t lbase; 1054 int trivial; 1055 int endofblk; 1056 int lblksize; 1057 int loff; 1058 int n; 1059 1060 /* 1061 * Don't allow the buffer build to blow out the buffer 1062 * cache. 1063 */ 1064 if ((ioflag & IO_RECURSE) == 0) 1065 bwillwrite(HAMMER2_PBUFSIZE); 1066 1067 /* 1068 * This nominally tells us how much we can cluster and 1069 * what the logical buffer size needs to be. Currently 1070 * we don't try to cluster the write and just handle one 1071 * block at a time. 1072 */ 1073 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 1074 &lbase, NULL); 1075 loff = (int)(uio->uio_offset - lbase); 1076 1077 KKASSERT(lblksize <= MAXBSIZE); 1078 1079 /* 1080 * Calculate bytes to copy this transfer and whether the 1081 * copy completely covers the buffer or not. 1082 */ 1083 trivial = 0; 1084 n = lblksize - loff; 1085 if (n > uio->uio_resid) { 1086 n = uio->uio_resid; 1087 if (loff == lbase && uio->uio_offset + n == new_eof) 1088 trivial = 1; 1089 endofblk = 0; 1090 } else { 1091 if (loff == 0) 1092 trivial = 1; 1093 endofblk = 1; 1094 } 1095 if (lbase >= new_eof) 1096 trivial = 1; 1097 1098 /* 1099 * Get the buffer 1100 */ 1101 if (uio->uio_segflg == UIO_NOCOPY) { 1102 /* 1103 * Issuing a write with the same data backing the 1104 * buffer. Instantiate the buffer to collect the 1105 * backing vm pages, then read-in any missing bits. 1106 * 1107 * This case is used by vop_stdputpages(). 1108 */ 1109 bp = getblk(ip->vp, lbase, lblksize, 1110 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1111 if ((bp->b_flags & B_CACHE) == 0) { 1112 bqrelse(bp); 1113 error = bread_kvabio(ip->vp, lbase, 1114 lblksize, &bp); 1115 } 1116 } else if (trivial) { 1117 /* 1118 * Even though we are entirely overwriting the buffer 1119 * we may still have to zero it out to avoid a 1120 * mmap/write visibility issue. 1121 */ 1122 bp = getblk(ip->vp, lbase, lblksize, 1123 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1124 if ((bp->b_flags & B_CACHE) == 0) 1125 vfs_bio_clrbuf(bp); 1126 } else { 1127 /* 1128 * Partial overwrite, read in any missing bits then 1129 * replace the portion being written. 1130 * 1131 * (The strategy code will detect zero-fill physical 1132 * blocks for this case). 1133 */ 1134 error = bread_kvabio(ip->vp, lbase, lblksize, &bp); 1135 if (error == 0) 1136 bheavy(bp); 1137 } 1138 1139 if (error) { 1140 brelse(bp); 1141 break; 1142 } 1143 1144 /* 1145 * Ok, copy the data in 1146 */ 1147 bkvasync(bp); 1148 error = uiomovebp(bp, bp->b_data + loff, n, uio); 1149 kflags |= NOTE_WRITE; 1150 modified = 1; 1151 if (error) { 1152 brelse(bp); 1153 break; 1154 } 1155 1156 /* 1157 * WARNING: Pageout daemon will issue UIO_NOCOPY writes 1158 * with IO_SYNC or IO_ASYNC set. These writes 1159 * must be handled as the pageout daemon expects. 1160 * 1161 * NOTE! H2 relies on cluster_write() here because it 1162 * cannot preallocate disk blocks at the logical 1163 * level due to not knowing what the compression 1164 * size will be at this time. 1165 * 1166 * We must use cluster_write() here and we depend 1167 * on the write-behind feature to flush buffers 1168 * appropriately. If we let the buffer daemons do 1169 * it the block allocations will be all over the 1170 * map. 1171 */ 1172 if (ioflag & IO_SYNC) { 1173 bwrite(bp); 1174 } else if ((ioflag & IO_DIRECT) && endofblk) { 1175 bawrite(bp); 1176 } else if (ioflag & IO_ASYNC) { 1177 bawrite(bp); 1178 } else if (ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW) { 1179 bdwrite(bp); 1180 } else { 1181 #if 1 1182 bp->b_flags |= B_CLUSTEROK; 1183 cluster_write(bp, new_eof, lblksize, seqcount); 1184 #else 1185 bp->b_flags |= B_CLUSTEROK; 1186 bdwrite(bp); 1187 #endif 1188 } 1189 } 1190 1191 /* 1192 * Cleanup. If we extended the file EOF but failed to write through 1193 * the entire write is a failure and we have to back-up. 1194 */ 1195 if (error && new_eof != old_eof) { 1196 hammer2_mtx_unlock(&ip->truncate_lock); 1197 hammer2_mtx_ex(&ip->lock); /* note lock order */ 1198 hammer2_mtx_ex(&ip->truncate_lock); /* note lock order */ 1199 hammer2_truncate_file(ip, old_eof); 1200 if (ip->flags & HAMMER2_INODE_MODIFIED) 1201 hammer2_inode_chain_sync(ip); 1202 hammer2_mtx_unlock(&ip->lock); 1203 } else if (modified) { 1204 struct vnode *vp = ip->vp; 1205 1206 hammer2_mtx_ex(&ip->lock); 1207 hammer2_inode_modify(ip); 1208 if (uio->uio_segflg == UIO_NOCOPY) { 1209 if (vp->v_flag & VLASTWRITETS) { 1210 ip->meta.mtime = 1211 (unsigned long)vp->v_lastwrite_ts.tv_sec * 1212 1000000 + 1213 vp->v_lastwrite_ts.tv_nsec / 1000; 1214 } 1215 } else { 1216 hammer2_update_time(&ip->meta.mtime); 1217 vclrflags(vp, VLASTWRITETS); 1218 } 1219 1220 #if 0 1221 /* 1222 * REMOVED - handled by hammer2_extend_file(). Do not issue 1223 * a chain_sync() outside of a sync/fsync except for DIRECTDATA 1224 * state changes. 1225 * 1226 * Under normal conditions we only issue a chain_sync if 1227 * the inode's DIRECTDATA state changed. 1228 */ 1229 if (ip->flags & HAMMER2_INODE_RESIZED) 1230 hammer2_inode_chain_sync(ip); 1231 #endif 1232 hammer2_mtx_unlock(&ip->lock); 1233 hammer2_knote(ip->vp, kflags); 1234 } 1235 hammer2_trans_assert_strategy(ip->pmp); 1236 hammer2_mtx_unlock(&ip->truncate_lock); 1237 1238 return error; 1239 } 1240 1241 /* 1242 * Truncate the size of a file. The inode must be locked. 1243 * 1244 * We must unconditionally set HAMMER2_INODE_RESIZED to properly 1245 * ensure that any on-media data beyond the new file EOF has been destroyed. 1246 * 1247 * WARNING: nvtruncbuf() can only be safely called without the inode lock 1248 * held due to the way our write thread works. If the truncation 1249 * occurs in the middle of a buffer, nvtruncbuf() is responsible 1250 * for dirtying that buffer and zeroing out trailing bytes. 1251 * 1252 * WARNING! Assumes that the kernel interlocks size changes at the 1253 * vnode level. 1254 * 1255 * WARNING! Caller assumes responsibility for removing dead blocks 1256 * if INODE_RESIZED is set. 1257 */ 1258 static 1259 void 1260 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1261 { 1262 hammer2_key_t lbase; 1263 int nblksize; 1264 1265 hammer2_mtx_unlock(&ip->lock); 1266 if (ip->vp) { 1267 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1268 nvtruncbuf(ip->vp, nsize, 1269 nblksize, (int)nsize & (nblksize - 1), 1270 0); 1271 } 1272 hammer2_mtx_ex(&ip->lock); 1273 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1274 ip->osize = ip->meta.size; 1275 ip->meta.size = nsize; 1276 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1277 hammer2_inode_modify(ip); 1278 } 1279 1280 /* 1281 * Extend the size of a file. The inode must be locked. 1282 * 1283 * Even though the file size is changing, we do not have to set the 1284 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES 1285 * boundary. When this occurs a hammer2_inode_chain_sync() is required 1286 * to prepare the inode cluster's indirect block table, otherwise 1287 * async execution of the strategy code will implode on us. 1288 * 1289 * WARNING! Assumes that the kernel interlocks size changes at the 1290 * vnode level. 1291 * 1292 * WARNING! Caller assumes responsibility for transitioning out 1293 * of the inode DIRECTDATA mode if INODE_RESIZED is set. 1294 */ 1295 static 1296 void 1297 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1298 { 1299 hammer2_key_t lbase; 1300 hammer2_key_t osize; 1301 int oblksize; 1302 int nblksize; 1303 int error; 1304 1305 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1306 hammer2_inode_modify(ip); 1307 osize = ip->meta.size; 1308 ip->osize = osize; 1309 ip->meta.size = nsize; 1310 1311 /* 1312 * We must issue a chain_sync() when the DIRECTDATA state changes 1313 * to prevent confusion between the flush code and the in-memory 1314 * state. This is not perfect because we are doing it outside of 1315 * a sync/fsync operation, so it might not be fully synchronized 1316 * with the meta-data topology flush. 1317 * 1318 * We must retain and re-dirty the buffer cache buffer containing 1319 * the direct data so it can be written to a real block. It should 1320 * not be possible for a bread error to occur since the original data 1321 * is extracted from the inode structure directly. 1322 */ 1323 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) { 1324 if (osize) { 1325 struct buf *bp; 1326 1327 oblksize = hammer2_calc_logical(ip, 0, NULL, NULL); 1328 error = bread_kvabio(ip->vp, 0, oblksize, &bp); 1329 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1330 hammer2_inode_chain_sync(ip); 1331 if (error == 0) { 1332 bheavy(bp); 1333 bdwrite(bp); 1334 } else { 1335 brelse(bp); 1336 } 1337 } else { 1338 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1339 hammer2_inode_chain_sync(ip); 1340 } 1341 } 1342 hammer2_mtx_unlock(&ip->lock); 1343 if (ip->vp) { 1344 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL); 1345 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1346 nvextendbuf(ip->vp, 1347 osize, nsize, 1348 oblksize, nblksize, 1349 -1, -1, 0); 1350 } 1351 hammer2_mtx_ex(&ip->lock); 1352 } 1353 1354 static 1355 int 1356 hammer2_vop_nresolve(struct vop_nresolve_args *ap) 1357 { 1358 hammer2_xop_nresolve_t *xop; 1359 hammer2_inode_t *ip; 1360 hammer2_inode_t *dip; 1361 struct namecache *ncp; 1362 struct vnode *vp; 1363 int error; 1364 1365 dip = VTOI(ap->a_dvp); 1366 xop = hammer2_xop_alloc(dip, 0); 1367 1368 ncp = ap->a_nch->ncp; 1369 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1370 1371 /* 1372 * Note: In DragonFly the kernel handles '.' and '..'. 1373 */ 1374 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1375 hammer2_xop_start(&xop->head, &hammer2_nresolve_desc); 1376 1377 error = hammer2_xop_collect(&xop->head, 0); 1378 error = hammer2_error_to_errno(error); 1379 if (error) { 1380 ip = NULL; 1381 } else { 1382 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 1383 } 1384 hammer2_inode_unlock(dip); 1385 1386 /* 1387 * Acquire the related vnode 1388 * 1389 * NOTE: For error processing, only ENOENT resolves the namecache 1390 * entry to NULL, otherwise we just return the error and 1391 * leave the namecache unresolved. 1392 * 1393 * NOTE: multiple hammer2_inode structures can be aliased to the 1394 * same chain element, for example for hardlinks. This 1395 * use case does not 'reattach' inode associations that 1396 * might already exist, but always allocates a new one. 1397 * 1398 * WARNING: inode structure is locked exclusively via inode_get 1399 * but chain was locked shared. inode_unlock() 1400 * will handle it properly. 1401 */ 1402 if (ip) { 1403 vp = hammer2_igetv(ip, &error); /* error set to UNIX error */ 1404 if (error == 0) { 1405 vn_unlock(vp); 1406 cache_setvp(ap->a_nch, vp); 1407 } else if (error == ENOENT) { 1408 cache_setvp(ap->a_nch, NULL); 1409 } 1410 hammer2_inode_unlock(ip); 1411 1412 /* 1413 * The vp should not be released until after we've disposed 1414 * of our locks, because it might cause vop_inactive() to 1415 * be called. 1416 */ 1417 if (vp) 1418 vrele(vp); 1419 } else { 1420 error = ENOENT; 1421 cache_setvp(ap->a_nch, NULL); 1422 } 1423 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1424 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL, 1425 ("resolve error %d/%p ap %p\n", 1426 error, ap->a_nch->ncp->nc_vp, ap)); 1427 1428 return error; 1429 } 1430 1431 static 1432 int 1433 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 1434 { 1435 hammer2_inode_t *dip; 1436 hammer2_tid_t inum; 1437 int error; 1438 1439 dip = VTOI(ap->a_dvp); 1440 inum = dip->meta.iparent; 1441 *ap->a_vpp = NULL; 1442 1443 if (inum) { 1444 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL, 1445 inum, ap->a_vpp); 1446 } else { 1447 error = ENOENT; 1448 } 1449 return error; 1450 } 1451 1452 static 1453 int 1454 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap) 1455 { 1456 hammer2_inode_t *dip; 1457 hammer2_inode_t *nip; 1458 struct namecache *ncp; 1459 const uint8_t *name; 1460 size_t name_len; 1461 hammer2_tid_t inum; 1462 int error; 1463 1464 dip = VTOI(ap->a_dvp); 1465 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1466 return (EROFS); 1467 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1468 return (ENOSPC); 1469 1470 ncp = ap->a_nch->ncp; 1471 name = ncp->nc_name; 1472 name_len = ncp->nc_nlen; 1473 1474 hammer2_trans_init(dip->pmp, 0); 1475 1476 inum = hammer2_trans_newinum(dip->pmp); 1477 1478 /* 1479 * Create the actual inode as a hidden file in the iroot, then 1480 * create the directory entry. The creation of the actual inode 1481 * sets its nlinks to 1 which is the value we desire. 1482 * 1483 * dip must be locked before nip to avoid deadlock. 1484 */ 1485 hammer2_inode_lock(dip, 0); 1486 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1487 inum, &error); 1488 if (error) { 1489 error = hammer2_error_to_errno(error); 1490 } else { 1491 error = hammer2_dirent_create(dip, name, name_len, 1492 nip->meta.inum, nip->meta.type); 1493 /* returns UNIX error code */ 1494 } 1495 if (error) { 1496 if (nip) { 1497 hammer2_inode_unlink_finisher(nip, 0); 1498 hammer2_inode_unlock(nip); 1499 nip = NULL; 1500 } 1501 *ap->a_vpp = NULL; 1502 } else { 1503 /* 1504 * inode_depend() must occur before the igetv() because 1505 * the igetv() can temporarily release the inode lock. 1506 */ 1507 hammer2_inode_depend(dip, nip); /* before igetv */ 1508 *ap->a_vpp = hammer2_igetv(nip, &error); 1509 hammer2_inode_unlock(nip); 1510 } 1511 1512 /* 1513 * Update dip's mtime 1514 * 1515 * We can use a shared inode lock and allow the meta.mtime update 1516 * SMP race. hammer2_inode_modify() is MPSAFE w/a shared lock. 1517 */ 1518 if (error == 0) { 1519 uint64_t mtime; 1520 1521 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1522 hammer2_update_time(&mtime); 1523 hammer2_inode_modify(dip); 1524 dip->meta.mtime = mtime; 1525 /*hammer2_inode_unlock(dip);*/ 1526 } 1527 hammer2_inode_unlock(dip); 1528 1529 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1530 1531 if (error == 0) { 1532 cache_setunresolved(ap->a_nch); 1533 cache_setvp(ap->a_nch, *ap->a_vpp); 1534 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK); 1535 } 1536 return error; 1537 } 1538 1539 static 1540 int 1541 hammer2_vop_open(struct vop_open_args *ap) 1542 { 1543 return vop_stdopen(ap); 1544 } 1545 1546 /* 1547 * hammer2_vop_advlock { vp, id, op, fl, flags } 1548 */ 1549 static 1550 int 1551 hammer2_vop_advlock(struct vop_advlock_args *ap) 1552 { 1553 hammer2_inode_t *ip = VTOI(ap->a_vp); 1554 hammer2_off_t size; 1555 1556 size = ip->meta.size; 1557 return (lf_advlock(ap, &ip->advlock, size)); 1558 } 1559 1560 static 1561 int 1562 hammer2_vop_close(struct vop_close_args *ap) 1563 { 1564 return vop_stdclose(ap); 1565 } 1566 1567 /* 1568 * hammer2_vop_nlink { nch, dvp, vp, cred } 1569 * 1570 * Create a hardlink from (vp) to {dvp, nch}. 1571 */ 1572 static 1573 int 1574 hammer2_vop_nlink(struct vop_nlink_args *ap) 1575 { 1576 hammer2_inode_t *tdip; /* target directory to create link in */ 1577 hammer2_inode_t *ip; /* inode we are hardlinking to */ 1578 struct namecache *ncp; 1579 const uint8_t *name; 1580 size_t name_len; 1581 int error; 1582 1583 if (ap->a_dvp->v_mount != ap->a_vp->v_mount) 1584 return(EXDEV); 1585 1586 tdip = VTOI(ap->a_dvp); 1587 if (tdip->pmp->ronly || (tdip->pmp->flags & HAMMER2_PMPF_EMERG)) 1588 return (EROFS); 1589 if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1) 1590 return (ENOSPC); 1591 1592 ncp = ap->a_nch->ncp; 1593 name = ncp->nc_name; 1594 name_len = ncp->nc_nlen; 1595 1596 /* 1597 * ip represents the file being hardlinked. The file could be a 1598 * normal file or a hardlink target if it has already been hardlinked. 1599 * (with the new semantics, it will almost always be a hardlink 1600 * target). 1601 * 1602 * Bump nlinks and potentially also create or move the hardlink 1603 * target in the parent directory common to (ip) and (tdip). The 1604 * consolidation code can modify ip->cluster. The returned cluster 1605 * is locked. 1606 */ 1607 ip = VTOI(ap->a_vp); 1608 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp)); 1609 hammer2_trans_init(ip->pmp, 0); 1610 1611 /* 1612 * Target should be an indexed inode or there's no way we will ever 1613 * be able to find it! 1614 */ 1615 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0); 1616 1617 error = 0; 1618 1619 /* 1620 * Can return NULL and error == EXDEV if the common parent 1621 * crosses a directory with the xlink flag set. 1622 */ 1623 hammer2_inode_lock4(tdip, ip, NULL, NULL); 1624 1625 /* 1626 * Create the directory entry and bump nlinks. 1627 */ 1628 if (error == 0) { 1629 error = hammer2_dirent_create(tdip, name, name_len, 1630 ip->meta.inum, ip->meta.type); 1631 hammer2_inode_modify(ip); 1632 ++ip->meta.nlinks; 1633 } 1634 if (error == 0) { 1635 /* 1636 * Update dip's mtime 1637 */ 1638 uint64_t mtime; 1639 1640 hammer2_update_time(&mtime); 1641 hammer2_inode_modify(tdip); 1642 tdip->meta.mtime = mtime; 1643 1644 cache_setunresolved(ap->a_nch); 1645 cache_setvp(ap->a_nch, ap->a_vp); 1646 } 1647 hammer2_inode_unlock(ip); 1648 hammer2_inode_unlock(tdip); 1649 1650 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 1651 hammer2_knote(ap->a_vp, NOTE_LINK); 1652 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1653 1654 return error; 1655 } 1656 1657 /* 1658 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap } 1659 * 1660 * The operating system has already ensured that the directory entry 1661 * does not exist and done all appropriate namespace locking. 1662 */ 1663 static 1664 int 1665 hammer2_vop_ncreate(struct vop_ncreate_args *ap) 1666 { 1667 hammer2_inode_t *dip; 1668 hammer2_inode_t *nip; 1669 struct namecache *ncp; 1670 const uint8_t *name; 1671 size_t name_len; 1672 hammer2_tid_t inum; 1673 int error; 1674 1675 dip = VTOI(ap->a_dvp); 1676 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1677 return (EROFS); 1678 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1679 return (ENOSPC); 1680 1681 ncp = ap->a_nch->ncp; 1682 name = ncp->nc_name; 1683 name_len = ncp->nc_nlen; 1684 hammer2_trans_init(dip->pmp, 0); 1685 1686 inum = hammer2_trans_newinum(dip->pmp); 1687 1688 /* 1689 * Create the actual inode as a hidden file in the iroot, then 1690 * create the directory entry. The creation of the actual inode 1691 * sets its nlinks to 1 which is the value we desire. 1692 * 1693 * dip must be locked before nip to avoid deadlock. 1694 */ 1695 hammer2_inode_lock(dip, 0); 1696 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1697 inum, &error); 1698 1699 if (error) { 1700 error = hammer2_error_to_errno(error); 1701 } else { 1702 error = hammer2_dirent_create(dip, name, name_len, 1703 nip->meta.inum, nip->meta.type); 1704 } 1705 if (error) { 1706 if (nip) { 1707 hammer2_inode_unlink_finisher(nip, 0); 1708 hammer2_inode_unlock(nip); 1709 nip = NULL; 1710 } 1711 *ap->a_vpp = NULL; 1712 } else { 1713 hammer2_inode_depend(dip, nip); /* before igetv */ 1714 *ap->a_vpp = hammer2_igetv(nip, &error); 1715 hammer2_inode_unlock(nip); 1716 } 1717 1718 /* 1719 * Update dip's mtime 1720 */ 1721 if (error == 0) { 1722 uint64_t mtime; 1723 1724 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1725 hammer2_update_time(&mtime); 1726 hammer2_inode_modify(dip); 1727 dip->meta.mtime = mtime; 1728 /*hammer2_inode_unlock(dip);*/ 1729 } 1730 hammer2_inode_unlock(dip); 1731 1732 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1733 1734 if (error == 0) { 1735 cache_setunresolved(ap->a_nch); 1736 cache_setvp(ap->a_nch, *ap->a_vpp); 1737 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1738 } 1739 return error; 1740 } 1741 1742 /* 1743 * Make a device node (typically a fifo) 1744 */ 1745 static 1746 int 1747 hammer2_vop_nmknod(struct vop_nmknod_args *ap) 1748 { 1749 hammer2_inode_t *dip; 1750 hammer2_inode_t *nip; 1751 struct namecache *ncp; 1752 const uint8_t *name; 1753 size_t name_len; 1754 hammer2_tid_t inum; 1755 int error; 1756 1757 dip = VTOI(ap->a_dvp); 1758 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1759 return (EROFS); 1760 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1761 return (ENOSPC); 1762 1763 ncp = ap->a_nch->ncp; 1764 name = ncp->nc_name; 1765 name_len = ncp->nc_nlen; 1766 hammer2_trans_init(dip->pmp, 0); 1767 1768 /* 1769 * Create the device inode and then create the directory entry. 1770 * 1771 * dip must be locked before nip to avoid deadlock. 1772 */ 1773 inum = hammer2_trans_newinum(dip->pmp); 1774 1775 hammer2_inode_lock(dip, 0); 1776 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1777 inum, &error); 1778 if (error == 0) { 1779 error = hammer2_dirent_create(dip, name, name_len, 1780 nip->meta.inum, nip->meta.type); 1781 } 1782 if (error) { 1783 if (nip) { 1784 hammer2_inode_unlink_finisher(nip, 0); 1785 hammer2_inode_unlock(nip); 1786 nip = NULL; 1787 } 1788 *ap->a_vpp = NULL; 1789 } else { 1790 hammer2_inode_depend(dip, nip); /* before igetv */ 1791 *ap->a_vpp = hammer2_igetv(nip, &error); 1792 hammer2_inode_unlock(nip); 1793 } 1794 1795 /* 1796 * Update dip's mtime 1797 */ 1798 if (error == 0) { 1799 uint64_t mtime; 1800 1801 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1802 hammer2_update_time(&mtime); 1803 hammer2_inode_modify(dip); 1804 dip->meta.mtime = mtime; 1805 /*hammer2_inode_unlock(dip);*/ 1806 } 1807 hammer2_inode_unlock(dip); 1808 1809 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1810 1811 if (error == 0) { 1812 cache_setunresolved(ap->a_nch); 1813 cache_setvp(ap->a_nch, *ap->a_vpp); 1814 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1815 } 1816 return error; 1817 } 1818 1819 /* 1820 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target } 1821 */ 1822 static 1823 int 1824 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap) 1825 { 1826 hammer2_inode_t *dip; 1827 hammer2_inode_t *nip; 1828 struct namecache *ncp; 1829 const uint8_t *name; 1830 size_t name_len; 1831 hammer2_tid_t inum; 1832 int error; 1833 1834 dip = VTOI(ap->a_dvp); 1835 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1836 return (EROFS); 1837 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1838 return (ENOSPC); 1839 1840 ncp = ap->a_nch->ncp; 1841 name = ncp->nc_name; 1842 name_len = ncp->nc_nlen; 1843 hammer2_trans_init(dip->pmp, 0); 1844 1845 ap->a_vap->va_type = VLNK; /* enforce type */ 1846 1847 /* 1848 * Create the softlink as an inode and then create the directory 1849 * entry. 1850 * 1851 * dip must be locked before nip to avoid deadlock. 1852 */ 1853 inum = hammer2_trans_newinum(dip->pmp); 1854 1855 hammer2_inode_lock(dip, 0); 1856 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1857 inum, &error); 1858 if (error == 0) { 1859 error = hammer2_dirent_create(dip, name, name_len, 1860 nip->meta.inum, nip->meta.type); 1861 } 1862 if (error) { 1863 if (nip) { 1864 hammer2_inode_unlink_finisher(nip, 0); 1865 hammer2_inode_unlock(nip); 1866 nip = NULL; 1867 } 1868 *ap->a_vpp = NULL; 1869 hammer2_inode_unlock(dip); 1870 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1871 return error; 1872 } 1873 hammer2_inode_depend(dip, nip); /* before igetv */ 1874 *ap->a_vpp = hammer2_igetv(nip, &error); 1875 1876 /* 1877 * Build the softlink (~like file data) and finalize the namecache. 1878 */ 1879 if (error == 0) { 1880 size_t bytes; 1881 struct uio auio; 1882 struct iovec aiov; 1883 1884 bytes = strlen(ap->a_target); 1885 1886 hammer2_inode_unlock(nip); 1887 bzero(&auio, sizeof(auio)); 1888 bzero(&aiov, sizeof(aiov)); 1889 auio.uio_iov = &aiov; 1890 auio.uio_segflg = UIO_SYSSPACE; 1891 auio.uio_rw = UIO_WRITE; 1892 auio.uio_resid = bytes; 1893 auio.uio_iovcnt = 1; 1894 auio.uio_td = curthread; 1895 aiov.iov_base = ap->a_target; 1896 aiov.iov_len = bytes; 1897 error = hammer2_write_file(nip, &auio, IO_APPEND, 0); 1898 /* XXX handle error */ 1899 error = 0; 1900 } else { 1901 hammer2_inode_unlock(nip); 1902 } 1903 1904 /* 1905 * Update dip's mtime 1906 */ 1907 if (error == 0) { 1908 uint64_t mtime; 1909 1910 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1911 hammer2_update_time(&mtime); 1912 hammer2_inode_modify(dip); 1913 dip->meta.mtime = mtime; 1914 /*hammer2_inode_unlock(dip);*/ 1915 } 1916 hammer2_inode_unlock(dip); 1917 1918 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1919 1920 /* 1921 * Finalize namecache 1922 */ 1923 if (error == 0) { 1924 cache_setunresolved(ap->a_nch); 1925 cache_setvp(ap->a_nch, *ap->a_vpp); 1926 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1927 } 1928 return error; 1929 } 1930 1931 /* 1932 * hammer2_vop_nremove { nch, dvp, cred } 1933 */ 1934 static 1935 int 1936 hammer2_vop_nremove(struct vop_nremove_args *ap) 1937 { 1938 hammer2_xop_unlink_t *xop; 1939 hammer2_inode_t *dip; 1940 hammer2_inode_t *ip; 1941 struct namecache *ncp; 1942 int error; 1943 int isopen; 1944 1945 dip = VTOI(ap->a_dvp); 1946 if (dip->pmp->ronly) 1947 return (EROFS); 1948 #if 0 1949 /* allow removals, except user to also bulkfree */ 1950 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1951 return (ENOSPC); 1952 #endif 1953 1954 ncp = ap->a_nch->ncp; 1955 1956 if (hammer2_debug_inode && dip->meta.inum == hammer2_debug_inode) { 1957 kprintf("hammer2: attempt to delete inside debug inode: %s\n", 1958 ncp->nc_name); 1959 while (hammer2_debug_inode && 1960 dip->meta.inum == hammer2_debug_inode) { 1961 tsleep(&hammer2_debug_inode, 0, "h2debug", hz*5); 1962 } 1963 } 1964 1965 hammer2_trans_init(dip->pmp, 0); 1966 hammer2_inode_lock(dip, 0); 1967 1968 /* 1969 * The unlink XOP unlinks the path from the directory and 1970 * locates and returns the cluster associated with the real inode. 1971 * We have to handle nlinks here on the frontend. 1972 */ 1973 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 1974 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1975 1976 /* 1977 * The namecache entry is locked so nobody can use this namespace. 1978 * Calculate isopen to determine if this namespace has an open vp 1979 * associated with it and resolve the vp only if it does. 1980 * 1981 * We try to avoid resolving the vnode if nobody has it open, but 1982 * note that the test is via this namespace only. 1983 */ 1984 isopen = cache_isopen(ap->a_nch); 1985 xop->isdir = 0; 1986 xop->dopermanent = 0; 1987 hammer2_xop_start(&xop->head, &hammer2_unlink_desc); 1988 1989 /* 1990 * Collect the real inode and adjust nlinks, destroy the real 1991 * inode if nlinks transitions to 0 and it was the real inode 1992 * (else it has already been removed). 1993 */ 1994 error = hammer2_xop_collect(&xop->head, 0); 1995 error = hammer2_error_to_errno(error); 1996 1997 if (error == 0) { 1998 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 1999 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2000 if (ip) { 2001 if (hammer2_debug_inode && 2002 ip->meta.inum == hammer2_debug_inode) { 2003 kprintf("hammer2: attempt to delete debug " 2004 "inode!\n"); 2005 while (hammer2_debug_inode && 2006 ip->meta.inum == hammer2_debug_inode) { 2007 tsleep(&hammer2_debug_inode, 0, 2008 "h2debug", hz*5); 2009 } 2010 } 2011 hammer2_inode_unlink_finisher(ip, isopen); 2012 hammer2_inode_depend(dip, ip); /* after modified */ 2013 hammer2_inode_unlock(ip); 2014 } 2015 } else { 2016 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2017 } 2018 2019 /* 2020 * Update dip's mtime 2021 */ 2022 if (error == 0) { 2023 uint64_t mtime; 2024 2025 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2026 hammer2_update_time(&mtime); 2027 hammer2_inode_modify(dip); 2028 dip->meta.mtime = mtime; 2029 /*hammer2_inode_unlock(dip);*/ 2030 } 2031 hammer2_inode_unlock(dip); 2032 2033 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2034 if (error == 0) { 2035 cache_unlink(ap->a_nch); 2036 hammer2_knote(ap->a_dvp, NOTE_WRITE); 2037 } 2038 return (error); 2039 } 2040 2041 /* 2042 * hammer2_vop_nrmdir { nch, dvp, cred } 2043 */ 2044 static 2045 int 2046 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap) 2047 { 2048 hammer2_xop_unlink_t *xop; 2049 hammer2_inode_t *dip; 2050 hammer2_inode_t *ip; 2051 struct namecache *ncp; 2052 int isopen; 2053 int error; 2054 2055 dip = VTOI(ap->a_dvp); 2056 if (dip->pmp->ronly) 2057 return (EROFS); 2058 #if 0 2059 /* allow removals, except user to also bulkfree */ 2060 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 2061 return (ENOSPC); 2062 #endif 2063 2064 hammer2_trans_init(dip->pmp, 0); 2065 hammer2_inode_lock(dip, 0); 2066 2067 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 2068 2069 ncp = ap->a_nch->ncp; 2070 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 2071 isopen = cache_isopen(ap->a_nch); 2072 xop->isdir = 1; 2073 xop->dopermanent = 0; 2074 hammer2_xop_start(&xop->head, &hammer2_unlink_desc); 2075 2076 /* 2077 * Collect the real inode and adjust nlinks, destroy the real 2078 * inode if nlinks transitions to 0 and it was the real inode 2079 * (else it has already been removed). 2080 */ 2081 error = hammer2_xop_collect(&xop->head, 0); 2082 error = hammer2_error_to_errno(error); 2083 2084 if (error == 0) { 2085 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 2086 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2087 if (ip) { 2088 hammer2_inode_unlink_finisher(ip, isopen); 2089 hammer2_inode_depend(dip, ip); /* after modified */ 2090 hammer2_inode_unlock(ip); 2091 } 2092 } else { 2093 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2094 } 2095 2096 /* 2097 * Update dip's mtime 2098 */ 2099 if (error == 0) { 2100 uint64_t mtime; 2101 2102 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2103 hammer2_update_time(&mtime); 2104 hammer2_inode_modify(dip); 2105 dip->meta.mtime = mtime; 2106 /*hammer2_inode_unlock(dip);*/ 2107 } 2108 hammer2_inode_unlock(dip); 2109 2110 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2111 if (error == 0) { 2112 cache_unlink(ap->a_nch); 2113 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK); 2114 } 2115 return (error); 2116 } 2117 2118 /* 2119 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred } 2120 */ 2121 static 2122 int 2123 hammer2_vop_nrename(struct vop_nrename_args *ap) 2124 { 2125 struct namecache *fncp; 2126 struct namecache *tncp; 2127 hammer2_inode_t *fdip; /* source directory */ 2128 hammer2_inode_t *tdip; /* target directory */ 2129 hammer2_inode_t *ip; /* file being renamed */ 2130 hammer2_inode_t *tip; /* replaced target during rename or NULL */ 2131 const uint8_t *fname; 2132 size_t fname_len; 2133 const uint8_t *tname; 2134 size_t tname_len; 2135 int error; 2136 int update_tdip; 2137 int update_fdip; 2138 hammer2_key_t tlhc; 2139 2140 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount) 2141 return(EXDEV); 2142 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount) 2143 return(EXDEV); 2144 2145 fdip = VTOI(ap->a_fdvp); /* source directory */ 2146 tdip = VTOI(ap->a_tdvp); /* target directory */ 2147 2148 if (fdip->pmp->ronly || (fdip->pmp->flags & HAMMER2_PMPF_EMERG)) 2149 return (EROFS); 2150 if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1) 2151 return (ENOSPC); 2152 2153 fncp = ap->a_fnch->ncp; /* entry name in source */ 2154 fname = fncp->nc_name; 2155 fname_len = fncp->nc_nlen; 2156 2157 tncp = ap->a_tnch->ncp; /* entry name in target */ 2158 tname = tncp->nc_name; 2159 tname_len = tncp->nc_nlen; 2160 2161 hammer2_trans_init(tdip->pmp, 0); 2162 2163 update_tdip = 0; 2164 update_fdip = 0; 2165 2166 ip = VTOI(fncp->nc_vp); 2167 hammer2_inode_ref(ip); /* extra ref */ 2168 2169 /* 2170 * Lookup the target name to determine if a directory entry 2171 * is being overwritten. We only hold related inode locks 2172 * temporarily, the operating system is expected to protect 2173 * against rename races. 2174 */ 2175 tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL; 2176 if (tip) 2177 hammer2_inode_ref(tip); /* extra ref */ 2178 2179 /* 2180 * Can return NULL and error == EXDEV if the common parent 2181 * crosses a directory with the xlink flag set. 2182 * 2183 * For now try to avoid deadlocks with a simple pointer address 2184 * test. (tip) can be NULL. 2185 */ 2186 error = 0; 2187 { 2188 hammer2_inode_t *ip1 = fdip; 2189 hammer2_inode_t *ip2 = tdip; 2190 hammer2_inode_t *ip3 = ip; 2191 hammer2_inode_t *ip4 = tip; /* may be NULL */ 2192 2193 if (fdip > tdip) { 2194 ip1 = tdip; 2195 ip2 = fdip; 2196 } 2197 if (tip && ip > tip) { 2198 ip3 = tip; 2199 ip4 = ip; 2200 } 2201 hammer2_inode_lock4(ip1, ip2, ip3, ip4); 2202 } 2203 2204 /* 2205 * Resolve the collision space for (tdip, tname, tname_len) 2206 * 2207 * tdip must be held exclusively locked to prevent races since 2208 * multiple filenames can end up in the same collision space. 2209 */ 2210 { 2211 hammer2_xop_scanlhc_t *sxop; 2212 hammer2_tid_t lhcbase; 2213 2214 tlhc = hammer2_dirhash(tname, tname_len); 2215 lhcbase = tlhc; 2216 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 2217 sxop->lhc = tlhc; 2218 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 2219 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 2220 if (tlhc != sxop->head.cluster.focus->bref.key) 2221 break; 2222 ++tlhc; 2223 } 2224 error = hammer2_error_to_errno(error); 2225 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 2226 2227 if (error) { 2228 if (error != ENOENT) 2229 goto done2; 2230 ++tlhc; 2231 error = 0; 2232 } 2233 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) { 2234 error = ENOSPC; 2235 goto done2; 2236 } 2237 } 2238 2239 /* 2240 * Ready to go, issue the rename to the backend. Note that meta-data 2241 * updates to the related inodes occur separately from the rename 2242 * operation. 2243 * 2244 * NOTE: While it is not necessary to update ip->meta.name*, doing 2245 * so aids catastrophic recovery and debugging. 2246 */ 2247 if (error == 0) { 2248 hammer2_xop_nrename_t *xop4; 2249 2250 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 2251 xop4->lhc = tlhc; 2252 xop4->ip_key = ip->meta.name_key; 2253 hammer2_xop_setip2(&xop4->head, ip); 2254 hammer2_xop_setip3(&xop4->head, tdip); 2255 hammer2_xop_setname(&xop4->head, fname, fname_len); 2256 hammer2_xop_setname2(&xop4->head, tname, tname_len); 2257 hammer2_xop_start(&xop4->head, &hammer2_nrename_desc); 2258 2259 error = hammer2_xop_collect(&xop4->head, 0); 2260 error = hammer2_error_to_errno(error); 2261 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP); 2262 2263 if (error == ENOENT) 2264 error = 0; 2265 2266 /* 2267 * Update inode meta-data. 2268 * 2269 * WARNING! The in-memory inode (ip) structure does not 2270 * maintain a copy of the inode's filename buffer. 2271 */ 2272 if (error == 0 && 2273 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 2274 hammer2_inode_modify(ip); 2275 ip->meta.name_len = tname_len; 2276 ip->meta.name_key = tlhc; 2277 } 2278 if (error == 0) { 2279 hammer2_inode_modify(ip); 2280 ip->meta.iparent = tdip->meta.inum; 2281 } 2282 update_fdip = 1; 2283 update_tdip = 1; 2284 } 2285 2286 done2: 2287 /* 2288 * If no error, the backend has replaced the target directory entry. 2289 * We must adjust nlinks on the original replace target if it exists. 2290 */ 2291 if (error == 0 && tip) { 2292 int isopen; 2293 2294 isopen = cache_isopen(ap->a_tnch); 2295 hammer2_inode_unlink_finisher(tip, isopen); 2296 } 2297 2298 /* 2299 * Update directory mtimes to represent the something changed. 2300 */ 2301 if (update_fdip || update_tdip) { 2302 uint64_t mtime; 2303 2304 hammer2_update_time(&mtime); 2305 if (update_fdip) { 2306 hammer2_inode_modify(fdip); 2307 fdip->meta.mtime = mtime; 2308 } 2309 if (update_tdip) { 2310 hammer2_inode_modify(tdip); 2311 tdip->meta.mtime = mtime; 2312 } 2313 } 2314 if (tip) { 2315 hammer2_inode_unlock(tip); 2316 hammer2_inode_drop(tip); 2317 } 2318 hammer2_inode_unlock(ip); 2319 hammer2_inode_unlock(tdip); 2320 hammer2_inode_unlock(fdip); 2321 hammer2_inode_drop(ip); 2322 hammer2_trans_done(tdip->pmp, HAMMER2_TRANS_SIDEQ); 2323 2324 /* 2325 * Issue the namecache update after unlocking all the internal 2326 * hammer2 structures, otherwise we might deadlock. 2327 * 2328 * WARNING! The target namespace must be updated atomically, 2329 * and we depend on cache_rename() to handle that for 2330 * us. Do not do a separate cache_unlink() because 2331 * that leaves a small window of opportunity for other 2332 * threads to allocate the target namespace before we 2333 * manage to complete our rename. 2334 * 2335 * WARNING! cache_rename() (and cache_unlink()) will properly 2336 * set VREF_FINALIZE on any attached vnode. Do not 2337 * call cache_setunresolved() manually before-hand as 2338 * this will prevent the flag from being set later via 2339 * cache_rename(). If VREF_FINALIZE is not properly set 2340 * and the inode is no longer in the topology, related 2341 * chains can remain dirty indefinitely. 2342 */ 2343 if (error == 0 && tip) { 2344 /*cache_unlink(ap->a_tnch); see above */ 2345 /*cache_setunresolved(ap->a_tnch); see above */ 2346 } 2347 if (error == 0) { 2348 cache_rename(ap->a_fnch, ap->a_tnch); 2349 hammer2_knote(ap->a_fdvp, NOTE_WRITE); 2350 hammer2_knote(ap->a_tdvp, NOTE_WRITE); 2351 hammer2_knote(fncp->nc_vp, NOTE_RENAME); 2352 } 2353 2354 return (error); 2355 } 2356 2357 /* 2358 * hammer2_vop_ioctl { vp, command, data, fflag, cred } 2359 */ 2360 static 2361 int 2362 hammer2_vop_ioctl(struct vop_ioctl_args *ap) 2363 { 2364 hammer2_inode_t *ip; 2365 int error; 2366 2367 ip = VTOI(ap->a_vp); 2368 2369 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data, 2370 ap->a_fflag, ap->a_cred); 2371 return (error); 2372 } 2373 2374 static 2375 int 2376 hammer2_vop_mountctl(struct vop_mountctl_args *ap) 2377 { 2378 struct mount *mp; 2379 hammer2_pfs_t *pmp; 2380 int rc; 2381 2382 switch (ap->a_op) { 2383 case (MOUNTCTL_SET_EXPORT): 2384 mp = ap->a_head.a_ops->head.vv_mount; 2385 pmp = MPTOPMP(mp); 2386 2387 if (ap->a_ctllen != sizeof(struct export_args)) 2388 rc = (EINVAL); 2389 else 2390 rc = vfs_export(mp, &pmp->export, 2391 (const struct export_args *)ap->a_ctl); 2392 break; 2393 default: 2394 rc = vop_stdmountctl(ap); 2395 break; 2396 } 2397 return (rc); 2398 } 2399 2400 /* 2401 * KQFILTER 2402 */ 2403 static void filt_hammer2detach(struct knote *kn); 2404 static int filt_hammer2read(struct knote *kn, long hint); 2405 static int filt_hammer2write(struct knote *kn, long hint); 2406 static int filt_hammer2vnode(struct knote *kn, long hint); 2407 2408 static struct filterops hammer2read_filtops = 2409 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2410 NULL, filt_hammer2detach, filt_hammer2read }; 2411 static struct filterops hammer2write_filtops = 2412 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2413 NULL, filt_hammer2detach, filt_hammer2write }; 2414 static struct filterops hammer2vnode_filtops = 2415 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2416 NULL, filt_hammer2detach, filt_hammer2vnode }; 2417 2418 static 2419 int 2420 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap) 2421 { 2422 struct vnode *vp = ap->a_vp; 2423 struct knote *kn = ap->a_kn; 2424 2425 switch (kn->kn_filter) { 2426 case EVFILT_READ: 2427 kn->kn_fop = &hammer2read_filtops; 2428 break; 2429 case EVFILT_WRITE: 2430 kn->kn_fop = &hammer2write_filtops; 2431 break; 2432 case EVFILT_VNODE: 2433 kn->kn_fop = &hammer2vnode_filtops; 2434 break; 2435 default: 2436 return (EOPNOTSUPP); 2437 } 2438 2439 kn->kn_hook = (caddr_t)vp; 2440 2441 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2442 2443 return(0); 2444 } 2445 2446 static void 2447 filt_hammer2detach(struct knote *kn) 2448 { 2449 struct vnode *vp = (void *)kn->kn_hook; 2450 2451 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2452 } 2453 2454 static int 2455 filt_hammer2read(struct knote *kn, long hint) 2456 { 2457 struct vnode *vp = (void *)kn->kn_hook; 2458 hammer2_inode_t *ip = VTOI(vp); 2459 off_t off; 2460 2461 if (hint == NOTE_REVOKE) { 2462 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2463 return(1); 2464 } 2465 off = ip->meta.size - kn->kn_fp->f_offset; 2466 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 2467 if (kn->kn_sfflags & NOTE_OLDAPI) 2468 return(1); 2469 return (kn->kn_data != 0); 2470 } 2471 2472 2473 static int 2474 filt_hammer2write(struct knote *kn, long hint) 2475 { 2476 if (hint == NOTE_REVOKE) 2477 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2478 kn->kn_data = 0; 2479 return (1); 2480 } 2481 2482 static int 2483 filt_hammer2vnode(struct knote *kn, long hint) 2484 { 2485 if (kn->kn_sfflags & hint) 2486 kn->kn_fflags |= hint; 2487 if (hint == NOTE_REVOKE) { 2488 kn->kn_flags |= (EV_EOF | EV_NODATA); 2489 return (1); 2490 } 2491 return (kn->kn_fflags != 0); 2492 } 2493 2494 /* 2495 * FIFO VOPS 2496 */ 2497 static 2498 int 2499 hammer2_vop_markatime(struct vop_markatime_args *ap) 2500 { 2501 hammer2_inode_t *ip; 2502 struct vnode *vp; 2503 2504 vp = ap->a_vp; 2505 ip = VTOI(vp); 2506 2507 if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG)) 2508 return (EROFS); 2509 return(0); 2510 } 2511 2512 static 2513 int 2514 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap) 2515 { 2516 int error; 2517 2518 error = VOCALL(&fifo_vnode_vops, &ap->a_head); 2519 if (error) 2520 error = hammer2_vop_kqfilter(ap); 2521 return(error); 2522 } 2523 2524 /* 2525 * VOPS vector 2526 */ 2527 struct vop_ops hammer2_vnode_vops = { 2528 .vop_default = vop_defaultop, 2529 .vop_fsync = hammer2_vop_fsync, 2530 .vop_getpages = vop_stdgetpages, 2531 .vop_putpages = vop_stdputpages, 2532 .vop_access = hammer2_vop_access, 2533 .vop_advlock = hammer2_vop_advlock, 2534 .vop_close = hammer2_vop_close, 2535 .vop_nlink = hammer2_vop_nlink, 2536 .vop_ncreate = hammer2_vop_ncreate, 2537 .vop_nsymlink = hammer2_vop_nsymlink, 2538 .vop_nremove = hammer2_vop_nremove, 2539 .vop_nrmdir = hammer2_vop_nrmdir, 2540 .vop_nrename = hammer2_vop_nrename, 2541 .vop_getattr = hammer2_vop_getattr, 2542 .vop_getattr_quick = hammer2_vop_getattr_quick, 2543 .vop_setattr = hammer2_vop_setattr, 2544 .vop_readdir = hammer2_vop_readdir, 2545 .vop_readlink = hammer2_vop_readlink, 2546 .vop_read = hammer2_vop_read, 2547 .vop_write = hammer2_vop_write, 2548 .vop_open = hammer2_vop_open, 2549 .vop_inactive = hammer2_vop_inactive, 2550 .vop_reclaim = hammer2_vop_reclaim, 2551 .vop_nresolve = hammer2_vop_nresolve, 2552 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot, 2553 .vop_nmkdir = hammer2_vop_nmkdir, 2554 .vop_nmknod = hammer2_vop_nmknod, 2555 .vop_ioctl = hammer2_vop_ioctl, 2556 .vop_mountctl = hammer2_vop_mountctl, 2557 .vop_bmap = hammer2_vop_bmap, 2558 .vop_strategy = hammer2_vop_strategy, 2559 .vop_kqfilter = hammer2_vop_kqfilter 2560 }; 2561 2562 struct vop_ops hammer2_spec_vops = { 2563 .vop_default = vop_defaultop, 2564 .vop_fsync = hammer2_vop_fsync, 2565 .vop_read = vop_stdnoread, 2566 .vop_write = vop_stdnowrite, 2567 .vop_access = hammer2_vop_access, 2568 .vop_close = hammer2_vop_close, 2569 .vop_markatime = hammer2_vop_markatime, 2570 .vop_getattr = hammer2_vop_getattr, 2571 .vop_inactive = hammer2_vop_inactive, 2572 .vop_reclaim = hammer2_vop_reclaim, 2573 .vop_setattr = hammer2_vop_setattr 2574 }; 2575 2576 struct vop_ops hammer2_fifo_vops = { 2577 .vop_default = fifo_vnoperate, 2578 .vop_fsync = hammer2_vop_fsync, 2579 #if 0 2580 .vop_read = hammer2_vop_fiforead, 2581 .vop_write = hammer2_vop_fifowrite, 2582 #endif 2583 .vop_access = hammer2_vop_access, 2584 #if 0 2585 .vop_close = hammer2_vop_fifoclose, 2586 #endif 2587 .vop_markatime = hammer2_vop_markatime, 2588 .vop_getattr = hammer2_vop_getattr, 2589 .vop_inactive = hammer2_vop_inactive, 2590 .vop_reclaim = hammer2_vop_reclaim, 2591 .vop_setattr = hammer2_vop_setattr, 2592 .vop_kqfilter = hammer2_vop_fifokqfilter 2593 }; 2594 2595