1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2022 Tomohiro Kusumi <tkusumi@netbsd.org> 5 * Copyright (c) 2011-2022 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Matthew Dillon <dillon@dragonflybsd.org> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 /* 38 * Kernel Filesystem interface 39 * 40 * NOTE! local ipdata pointers must be reloaded on any modifying operation 41 * to the inode as its underlying chain may have changed. 42 */ 43 44 /* 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/fcntl.h> 49 #include <sys/buf.h> 50 #include <sys/proc.h> 51 #include <sys/mount.h> 52 #include <sys/vnode.h> 53 #include <sys/mountctl.h> 54 #include <sys/dirent.h> 55 #include <sys/uio.h> 56 #include <sys/objcache.h> 57 #include <sys/event.h> 58 #include <sys/file.h> 59 #include <vfs/fifofs/fifo.h> 60 */ 61 62 #include "hammer2.h" 63 64 /* 65 static int hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, 66 int seqcount); 67 */ 68 static int hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 69 int ioflag, int seqcount); 70 static void hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize); 71 static void hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize); 72 73 /* 74 * Last reference to a vnode is going away but it is still cached. 75 */ 76 static 77 int 78 hammer2_vop_inactive(struct vop_inactive_args *ap) 79 { 80 #if 0 81 hammer2_inode_t *ip; 82 struct vnode *vp; 83 84 vp = ap->a_vp; 85 ip = VTOI(vp); 86 87 /* 88 * Degenerate case 89 */ 90 if (ip == NULL) { 91 vrecycle(vp); 92 return (0); 93 } 94 95 /* 96 * Aquire the inode lock to interlock against vp updates via 97 * the inode path and file deletions and such (which can be 98 * namespace-only operations that might not hold the vnode). 99 */ 100 hammer2_inode_lock(ip, 0); 101 if (ip->flags & HAMMER2_INODE_ISUNLINKED) { 102 hammer2_key_t lbase; 103 int nblksize; 104 105 /* 106 * If the inode has been unlinked we can throw away all 107 * buffers (dirty or not) and clean the file out. 108 * 109 * Because vrecycle() calls are not guaranteed, try to 110 * dispose of the inode as much as possible right here. 111 */ 112 nblksize = hammer2_calc_logical(ip, 0, &lbase, NULL); 113 nvtruncbuf(vp, 0, nblksize, 0, 0); 114 115 /* 116 * Delete the file on-media. 117 */ 118 if ((ip->flags & HAMMER2_INODE_DELETING) == 0) { 119 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING); 120 hammer2_inode_delayed_sideq(ip); 121 } 122 hammer2_inode_unlock(ip); 123 124 /* 125 * Recycle immediately if possible 126 */ 127 vrecycle(vp); 128 } else { 129 hammer2_inode_unlock(ip); 130 } 131 return (0); 132 #endif 133 return (EOPNOTSUPP); 134 } 135 136 /* 137 * Reclaim a vnode so that it can be reused; after the inode is 138 * disassociated, the filesystem must manage it alone. 139 */ 140 static 141 int 142 hammer2_vop_reclaim(struct vop_reclaim_args *ap) 143 { 144 hammer2_inode_t *ip; 145 hammer2_pfs_t *pmp; 146 struct vnode *vp; 147 148 vp = ap->a_vp; 149 ip = VTOI(vp); 150 if (ip == NULL) 151 return(0); 152 153 pmp = ip->pmp; 154 155 /* 156 * NOTE! We do not attempt to flush chains here, flushing is 157 * really fragile and could also deadlock. 158 */ 159 vclrisdirty(vp); 160 161 /* 162 * The inode lock is required to disconnect it. 163 */ 164 hammer2_inode_lock(ip, 0); 165 vp->v_data = NULL; 166 ip->vp = NULL; 167 168 /* 169 * Delete the file on-media. This should have been handled by the 170 * inactivation. The operation is likely still queued on the inode 171 * though so only complain if the stars don't align. 172 */ 173 if ((ip->flags & (HAMMER2_INODE_ISUNLINKED | HAMMER2_INODE_DELETING)) == 174 HAMMER2_INODE_ISUNLINKED) 175 { 176 assert(0); 177 atomic_set_int(&ip->flags, HAMMER2_INODE_DELETING); 178 hammer2_inode_delayed_sideq(ip); 179 kprintf("hammer2: vp=%p ip=%p unlinked but not disposed\n", 180 vp, ip); 181 } 182 hammer2_inode_unlock(ip); 183 184 /* 185 * Modified inodes will already be on SIDEQ or SYNCQ, no further 186 * action is needed. 187 * 188 * We cannot safely synchronize the inode from inside the reclaim 189 * due to potentially deep locks held as-of when the reclaim occurs. 190 * Interactions and potential deadlocks abound. We also can't do it 191 * here without desynchronizing from the related directory entrie(s). 192 */ 193 hammer2_inode_drop(ip); /* vp ref */ 194 195 /* 196 * XXX handle background sync when ip dirty, kernel will no longer 197 * notify us regarding this inode because there is no longer a 198 * vnode attached to it. 199 */ 200 201 return (0); 202 } 203 204 int 205 hammer2_reclaim(struct vnode *vp) 206 { 207 struct vop_reclaim_args ap = { 208 .a_vp = vp, 209 }; 210 211 return hammer2_vop_reclaim(&ap); 212 } 213 214 /* 215 * Currently this function synchronizes the front-end inode state to the 216 * backend chain topology, then flushes the inode's chain and sub-topology 217 * to backend media. This function does not flush the root topology down to 218 * the inode. 219 */ 220 static 221 int 222 hammer2_vop_fsync(struct vop_fsync_args *ap) 223 { 224 #if 0 225 hammer2_inode_t *ip; 226 struct vnode *vp; 227 int error1; 228 int error2; 229 230 vp = ap->a_vp; 231 ip = VTOI(vp); 232 error1 = 0; 233 234 hammer2_trans_init(ip->pmp, 0); 235 236 /* 237 * Flush dirty buffers in the file's logical buffer cache. 238 * It is best to wait for the strategy code to commit the 239 * buffers to the device's backing buffer cache before 240 * then trying to flush the inode. 241 * 242 * This should be quick, but certain inode modifications cached 243 * entirely in the hammer2_inode structure may not trigger a 244 * buffer read until the flush so the fsync can wind up also 245 * doing scattered reads. 246 */ 247 vfsync(vp, ap->a_waitfor, 1, NULL, NULL); 248 bio_track_wait(&vp->v_track_write, 0, 0); 249 250 /* 251 * Flush any inode changes 252 */ 253 hammer2_inode_lock(ip, 0); 254 if (ip->flags & (HAMMER2_INODE_RESIZED|HAMMER2_INODE_MODIFIED)) 255 error1 = hammer2_inode_chain_sync(ip); 256 257 /* 258 * Flush dirty chains related to the inode. 259 * 260 * NOTE! We are not in a flush transaction. The inode remains on 261 * the sideq so the filesystem syncer can synchronize it to 262 * the volume root. 263 */ 264 error2 = hammer2_inode_chain_flush(ip, HAMMER2_XOP_INODE_STOP); 265 if (error2) 266 error1 = error2; 267 268 /* 269 * We may be able to clear the vnode dirty flag. 270 */ 271 if ((ip->flags & (HAMMER2_INODE_MODIFIED | 272 HAMMER2_INODE_RESIZED | 273 HAMMER2_INODE_DIRTYDATA)) == 0 && 274 RB_EMPTY(&vp->v_rbdirty_tree) && 275 !bio_track_active(&vp->v_track_write)) { 276 vclrisdirty(vp); 277 } 278 hammer2_inode_unlock(ip); 279 hammer2_trans_done(ip->pmp, 0); 280 281 return (error1); 282 #endif 283 return (EOPNOTSUPP); 284 } 285 286 /* 287 * No lock needed, just handle ip->update 288 */ 289 static 290 int 291 hammer2_vop_access(struct vop_access_args *ap) 292 { 293 #if 0 294 hammer2_inode_t *ip = VTOI(ap->a_vp); 295 uid_t uid; 296 gid_t gid; 297 mode_t mode; 298 uint32_t uflags; 299 int error; 300 int update; 301 302 retry: 303 update = spin_access_start(&ip->cluster_spin); 304 305 /*hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);*/ 306 uid = hammer2_to_unix_xid(&ip->meta.uid); 307 gid = hammer2_to_unix_xid(&ip->meta.gid); 308 mode = ip->meta.mode; 309 uflags = ip->meta.uflags; 310 /*hammer2_inode_unlock(ip);*/ 311 312 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 313 goto retry; 314 315 error = vop_helper_access(ap, uid, gid, mode, uflags); 316 317 return (error); 318 #endif 319 return (EOPNOTSUPP); 320 } 321 322 static 323 int 324 hammer2_vop_getattr(struct vop_getattr_args *ap) 325 { 326 #if 0 327 hammer2_pfs_t *pmp; 328 hammer2_inode_t *ip; 329 struct vnode *vp; 330 struct vattr *vap; 331 int update; 332 333 vp = ap->a_vp; 334 vap = ap->a_vap; 335 336 ip = VTOI(vp); 337 pmp = ip->pmp; 338 339 retry: 340 update = spin_access_start(&ip->cluster_spin); 341 342 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 343 vap->va_fileid = ip->meta.inum; 344 vap->va_mode = ip->meta.mode; 345 vap->va_nlink = ip->meta.nlinks; 346 vap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 347 vap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 348 vap->va_rmajor = 0; 349 vap->va_rminor = 0; 350 vap->va_size = ip->meta.size; /* protected by shared lock */ 351 vap->va_blocksize = HAMMER2_PBUFSIZE; 352 vap->va_flags = ip->meta.uflags; 353 hammer2_time_to_timespec(ip->meta.ctime, &vap->va_ctime); 354 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_mtime); 355 hammer2_time_to_timespec(ip->meta.mtime, &vap->va_atime); 356 vap->va_gen = 1; 357 vap->va_bytes = 0; 358 if (ip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) { 359 /* 360 * Can't really calculate directory use sans the files under 361 * it, just assume one block for now. 362 */ 363 vap->va_bytes += HAMMER2_INODE_BYTES; 364 } else { 365 vap->va_bytes = hammer2_inode_data_count(ip); 366 } 367 vap->va_type = hammer2_get_vtype(ip->meta.type); 368 vap->va_filerev = 0; 369 vap->va_uid_uuid = ip->meta.uid; 370 vap->va_gid_uuid = ip->meta.gid; 371 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 372 VA_FSID_UUID_VALID; 373 374 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 375 goto retry; 376 377 return (0); 378 #endif 379 return (EOPNOTSUPP); 380 } 381 382 static 383 int 384 hammer2_vop_getattr_lite(struct vop_getattr_lite_args *ap) 385 { 386 #if 0 387 hammer2_pfs_t *pmp; 388 hammer2_inode_t *ip; 389 struct vnode *vp; 390 struct vattr_lite *lvap; 391 int update; 392 393 vp = ap->a_vp; 394 lvap = ap->a_lvap; 395 396 ip = VTOI(vp); 397 pmp = ip->pmp; 398 399 retry: 400 update = spin_access_start(&ip->cluster_spin); 401 402 #if 0 403 vap->va_fsid = pmp->mp->mnt_stat.f_fsid.val[0]; 404 vap->va_fileid = ip->meta.inum; 405 #endif 406 lvap->va_mode = ip->meta.mode; 407 lvap->va_nlink = ip->meta.nlinks; 408 lvap->va_uid = hammer2_to_unix_xid(&ip->meta.uid); 409 lvap->va_gid = hammer2_to_unix_xid(&ip->meta.gid); 410 #if 0 411 vap->va_rmajor = 0; 412 vap->va_rminor = 0; 413 #endif 414 lvap->va_size = ip->meta.size; 415 #if 0 416 vap->va_blocksize = HAMMER2_PBUFSIZE; 417 #endif 418 lvap->va_flags = ip->meta.uflags; 419 lvap->va_type = hammer2_get_vtype(ip->meta.type); 420 #if 0 421 vap->va_filerev = 0; 422 vap->va_uid_uuid = ip->meta.uid; 423 vap->va_gid_uuid = ip->meta.gid; 424 vap->va_vaflags = VA_UID_UUID_VALID | VA_GID_UUID_VALID | 425 VA_FSID_UUID_VALID; 426 #endif 427 428 if (__predict_false(spin_access_end(&ip->cluster_spin, update))) 429 goto retry; 430 431 return (0); 432 #endif 433 return (EOPNOTSUPP); 434 } 435 436 static 437 int 438 hammer2_vop_setattr(struct vop_setattr_args *ap) 439 { 440 #if 0 441 hammer2_inode_t *ip; 442 struct vnode *vp; 443 struct vattr *vap; 444 int error; 445 int kflags = 0; 446 uint64_t ctime; 447 448 vp = ap->a_vp; 449 vap = ap->a_vap; 450 hammer2_update_time(&ctime); 451 452 ip = VTOI(vp); 453 454 if (ip->pmp->ronly) 455 return (EROFS); 456 457 /* 458 * Normally disallow setattr if there is no space, unless we 459 * are in emergency mode (might be needed to chflags -R noschg 460 * files prior to removal). 461 */ 462 if ((ip->pmp->flags & HAMMER2_PMPF_EMERG) == 0 && 463 hammer2_vfs_enospace(ip, 0, ap->a_cred) > 1) { 464 return (ENOSPC); 465 } 466 467 hammer2_trans_init(ip->pmp, 0); 468 hammer2_inode_lock(ip, 0); 469 error = 0; 470 471 if (vap->va_flags != VNOVAL) { 472 uint32_t flags; 473 474 flags = ip->meta.uflags; 475 error = vop_helper_setattr_flags(&flags, vap->va_flags, 476 hammer2_to_unix_xid(&ip->meta.uid), 477 ap->a_cred); 478 if (error == 0) { 479 if (ip->meta.uflags != flags) { 480 hammer2_inode_modify(ip); 481 hammer2_spin_lock_update(&ip->cluster_spin); 482 ip->meta.uflags = flags; 483 ip->meta.ctime = ctime; 484 hammer2_spin_unlock_update(&ip->cluster_spin); 485 kflags |= NOTE_ATTRIB; 486 } 487 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 488 error = 0; 489 goto done; 490 } 491 } 492 goto done; 493 } 494 if (ip->meta.uflags & (IMMUTABLE | APPEND)) { 495 error = EPERM; 496 goto done; 497 } 498 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { 499 mode_t cur_mode = ip->meta.mode; 500 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 501 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 502 uuid_t uuid_uid; 503 uuid_t uuid_gid; 504 505 error = vop_helper_chown(ap->a_vp, vap->va_uid, vap->va_gid, 506 ap->a_cred, 507 &cur_uid, &cur_gid, &cur_mode); 508 if (error == 0) { 509 hammer2_guid_to_uuid(&uuid_uid, cur_uid); 510 hammer2_guid_to_uuid(&uuid_gid, cur_gid); 511 if (bcmp(&uuid_uid, &ip->meta.uid, sizeof(uuid_uid)) || 512 bcmp(&uuid_gid, &ip->meta.gid, sizeof(uuid_gid)) || 513 ip->meta.mode != cur_mode 514 ) { 515 hammer2_inode_modify(ip); 516 hammer2_spin_lock_update(&ip->cluster_spin); 517 ip->meta.uid = uuid_uid; 518 ip->meta.gid = uuid_gid; 519 ip->meta.mode = cur_mode; 520 ip->meta.ctime = ctime; 521 hammer2_spin_unlock_update(&ip->cluster_spin); 522 } 523 kflags |= NOTE_ATTRIB; 524 } 525 } 526 527 /* 528 * Resize the file 529 */ 530 if (vap->va_size != VNOVAL && ip->meta.size != vap->va_size) { 531 switch(vp->v_type) { 532 case VREG: 533 if (vap->va_size == ip->meta.size) 534 break; 535 if (vap->va_size < ip->meta.size) { 536 hammer2_mtx_ex(&ip->truncate_lock); 537 hammer2_truncate_file(ip, vap->va_size); 538 hammer2_mtx_unlock(&ip->truncate_lock); 539 kflags |= NOTE_WRITE; 540 } else { 541 hammer2_extend_file(ip, vap->va_size); 542 kflags |= NOTE_WRITE | NOTE_EXTEND; 543 } 544 hammer2_inode_modify(ip); 545 ip->meta.mtime = ctime; 546 vclrflags(vp, VLASTWRITETS); 547 break; 548 default: 549 error = EINVAL; 550 goto done; 551 } 552 } 553 #if 0 554 /* atime not supported */ 555 if (vap->va_atime.tv_sec != VNOVAL) { 556 hammer2_inode_modify(ip); 557 ip->meta.atime = hammer2_timespec_to_time(&vap->va_atime); 558 kflags |= NOTE_ATTRIB; 559 } 560 #endif 561 if (vap->va_mode != (mode_t)VNOVAL) { 562 mode_t cur_mode = ip->meta.mode; 563 uid_t cur_uid = hammer2_to_unix_xid(&ip->meta.uid); 564 gid_t cur_gid = hammer2_to_unix_xid(&ip->meta.gid); 565 566 error = vop_helper_chmod(ap->a_vp, vap->va_mode, ap->a_cred, 567 cur_uid, cur_gid, &cur_mode); 568 if (error == 0) { 569 hammer2_inode_modify(ip); 570 hammer2_spin_lock_update(&ip->cluster_spin); 571 ip->meta.mode = cur_mode; 572 ip->meta.ctime = ctime; 573 hammer2_spin_unlock_update(&ip->cluster_spin); 574 kflags |= NOTE_ATTRIB; 575 } 576 } 577 578 if (vap->va_mtime.tv_sec != VNOVAL) { 579 hammer2_inode_modify(ip); 580 ip->meta.mtime = hammer2_timespec_to_time(&vap->va_mtime); 581 kflags |= NOTE_ATTRIB; 582 vclrflags(vp, VLASTWRITETS); 583 } 584 585 done: 586 /* 587 * If a truncation occurred we must call chain_sync() now in order 588 * to trim the related data chains, otherwise a later expansion can 589 * cause havoc. 590 * 591 * If an extend occured that changed the DIRECTDATA state, we must 592 * call inode_chain_sync now in order to prepare the inode's indirect 593 * block table. 594 * 595 * WARNING! This means we are making an adjustment to the inode's 596 * chain outside of sync/fsync, and not just to inode->meta, which 597 * may result in some consistency issues if a crash were to occur 598 * at just the wrong time. 599 */ 600 if (ip->flags & HAMMER2_INODE_RESIZED) 601 hammer2_inode_chain_sync(ip); 602 603 /* 604 * Cleanup. 605 */ 606 hammer2_inode_unlock(ip); 607 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 608 hammer2_knote(ip->vp, kflags); 609 610 return (error); 611 #endif 612 return (EOPNOTSUPP); 613 } 614 615 static 616 int 617 hammer2_vop_readdir(struct vop_readdir_args *ap) 618 { 619 #if 0 620 hammer2_xop_readdir_t *xop; 621 hammer2_blockref_t bref; 622 hammer2_inode_t *ip; 623 hammer2_tid_t inum; 624 hammer2_key_t lkey; 625 struct uio *uio; 626 off_t *cookies; 627 off_t saveoff; 628 int cookie_index; 629 int ncookies; 630 int error; 631 int eofflag; 632 int r; 633 634 ip = VTOI(ap->a_vp); 635 uio = ap->a_uio; 636 saveoff = uio->uio_offset; 637 eofflag = 0; 638 error = 0; 639 640 /* 641 * Setup cookies directory entry cookies if requested 642 */ 643 if (ap->a_ncookies) { 644 ncookies = uio->uio_resid / 16 + 1; 645 if (ncookies > 1024) 646 ncookies = 1024; 647 cookies = kmalloc(ncookies * sizeof(off_t), M_TEMP, M_WAITOK); 648 } else { 649 ncookies = -1; 650 cookies = NULL; 651 } 652 cookie_index = 0; 653 654 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED); 655 656 /* 657 * Handle artificial entries. To ensure that only positive 64 bit 658 * quantities are returned to userland we always strip off bit 63. 659 * The hash code is designed such that codes 0x0000-0x7FFF are not 660 * used, allowing us to use these codes for articial entries. 661 * 662 * Entry 0 is used for '.' and entry 1 is used for '..'. Do not 663 * allow '..' to cross the mount point into (e.g.) the super-root. 664 */ 665 if (saveoff == 0) { 666 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 667 r = vop_write_dirent(&error, uio, inum, DT_DIR, 1, "."); 668 if (r) 669 goto done; 670 if (cookies) 671 cookies[cookie_index] = saveoff; 672 ++saveoff; 673 ++cookie_index; 674 if (cookie_index == ncookies) 675 goto done; 676 } 677 678 if (saveoff == 1) { 679 inum = ip->meta.inum & HAMMER2_DIRHASH_USERMSK; 680 if (ip != ip->pmp->iroot) 681 inum = ip->meta.iparent & HAMMER2_DIRHASH_USERMSK; 682 r = vop_write_dirent(&error, uio, inum, DT_DIR, 2, ".."); 683 if (r) 684 goto done; 685 if (cookies) 686 cookies[cookie_index] = saveoff; 687 ++saveoff; 688 ++cookie_index; 689 if (cookie_index == ncookies) 690 goto done; 691 } 692 693 lkey = saveoff | HAMMER2_DIRHASH_VISIBLE; 694 if (hammer2_debug & 0x0020) 695 kprintf("readdir: lkey %016jx\n", lkey); 696 if (error) 697 goto done; 698 699 xop = hammer2_xop_alloc(ip, 0); 700 xop->lkey = lkey; 701 hammer2_xop_start(&xop->head, &hammer2_readdir_desc); 702 703 for (;;) { 704 const hammer2_inode_data_t *ripdata; 705 const char *dname; 706 int dtype; 707 708 error = hammer2_xop_collect(&xop->head, 0); 709 error = hammer2_error_to_errno(error); 710 if (error) { 711 break; 712 } 713 if (cookie_index == ncookies) 714 break; 715 if (hammer2_debug & 0x0020) 716 kprintf("cluster chain %p %p\n", 717 xop->head.cluster.focus, 718 (xop->head.cluster.focus ? 719 xop->head.cluster.focus->data : (void *)-1)); 720 hammer2_cluster_bref(&xop->head.cluster, &bref); 721 722 if (bref.type == HAMMER2_BREF_TYPE_INODE) { 723 ripdata = &hammer2_xop_gdata(&xop->head)->ipdata; 724 dtype = hammer2_get_dtype(ripdata->meta.type); 725 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 726 r = vop_write_dirent(&error, uio, 727 ripdata->meta.inum & 728 HAMMER2_DIRHASH_USERMSK, 729 dtype, 730 ripdata->meta.name_len, 731 ripdata->filename); 732 hammer2_xop_pdata(&xop->head); 733 if (r) 734 break; 735 if (cookies) 736 cookies[cookie_index] = saveoff; 737 ++cookie_index; 738 } else if (bref.type == HAMMER2_BREF_TYPE_DIRENT) { 739 uint16_t namlen; 740 741 dtype = hammer2_get_dtype(bref.embed.dirent.type); 742 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 743 namlen = bref.embed.dirent.namlen; 744 if (namlen <= sizeof(bref.check.buf)) { 745 dname = bref.check.buf; 746 } else { 747 dname = hammer2_xop_gdata(&xop->head)->buf; 748 } 749 r = vop_write_dirent(&error, uio, 750 bref.embed.dirent.inum, dtype, 751 namlen, dname); 752 if (namlen > sizeof(bref.check.buf)) 753 hammer2_xop_pdata(&xop->head); 754 if (r) 755 break; 756 if (cookies) 757 cookies[cookie_index] = saveoff; 758 ++cookie_index; 759 } else { 760 /* XXX chain error */ 761 kprintf("bad chain type readdir %d\n", bref.type); 762 } 763 } 764 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 765 if (error == ENOENT) { 766 error = 0; 767 eofflag = 1; 768 saveoff = (hammer2_key_t)-1; 769 } else { 770 saveoff = bref.key & HAMMER2_DIRHASH_USERMSK; 771 } 772 done: 773 hammer2_inode_unlock(ip); 774 if (ap->a_eofflag) 775 *ap->a_eofflag = eofflag; 776 if (hammer2_debug & 0x0020) 777 kprintf("readdir: done at %016jx\n", saveoff); 778 uio->uio_offset = saveoff & ~HAMMER2_DIRHASH_VISIBLE; 779 if (error && cookie_index == 0) { 780 if (cookies) { 781 kfree(cookies, M_TEMP); 782 *ap->a_ncookies = 0; 783 *ap->a_cookies = NULL; 784 } 785 } else { 786 if (cookies) { 787 *ap->a_ncookies = cookie_index; 788 *ap->a_cookies = cookies; 789 } 790 } 791 return (error); 792 #endif 793 return (EOPNOTSUPP); 794 } 795 796 /* 797 * hammer2_vop_readlink { vp, uio, cred } 798 */ 799 static 800 int 801 hammer2_vop_readlink(struct vop_readlink_args *ap) 802 { 803 #if 0 804 struct vnode *vp; 805 hammer2_inode_t *ip; 806 int error; 807 808 vp = ap->a_vp; 809 if (vp->v_type != VLNK) 810 return (EINVAL); 811 ip = VTOI(vp); 812 813 error = hammer2_read_file(ip, ap->a_uio, 0); 814 return (error); 815 #endif 816 return (EOPNOTSUPP); 817 } 818 819 static 820 int 821 hammer2_vop_read(struct vop_read_args *ap) 822 { 823 #if 0 824 struct vnode *vp; 825 hammer2_inode_t *ip; 826 struct uio *uio; 827 int error; 828 int seqcount; 829 830 /* 831 * Read operations supported on this vnode? 832 */ 833 vp = ap->a_vp; 834 if (vp->v_type == VDIR) 835 return (EISDIR); 836 if (vp->v_type != VREG) 837 return (EINVAL); 838 839 /* 840 * Misc 841 */ 842 ip = VTOI(vp); 843 uio = ap->a_uio; 844 error = 0; 845 846 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 847 848 error = hammer2_read_file(ip, uio, seqcount); 849 return (error); 850 #endif 851 return (EOPNOTSUPP); 852 } 853 854 static 855 int 856 hammer2_vop_write(struct vop_write_args *ap) 857 { 858 hammer2_inode_t *ip; 859 //thread_t td; 860 struct vnode *vp; 861 struct uio *uio; 862 int error; 863 int seqcount; 864 int ioflag; 865 866 /* 867 * Read operations supported on this vnode? 868 */ 869 vp = ap->a_vp; 870 if (vp->v_type != VREG) 871 return (EINVAL); 872 873 /* 874 * Misc 875 */ 876 ip = VTOI(vp); 877 ioflag = ap->a_ioflag; 878 uio = ap->a_uio; 879 error = 0; 880 if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG)) 881 return (EROFS); 882 switch (hammer2_vfs_enospace(ip, uio->uio_resid, ap->a_cred)) { 883 case 2: 884 return (ENOSPC); 885 case 1: 886 ioflag |= IO_DIRECT; /* semi-synchronous */ 887 /* fall through */ 888 default: 889 break; 890 } 891 892 seqcount = ioflag >> IO_SEQSHIFT; 893 894 /* 895 * Check resource limit 896 */ 897 /* 898 if (uio->uio_resid > 0 && (td = uio->uio_td) != NULL && td->td_proc && 899 uio->uio_offset + uio->uio_resid > 900 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 901 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ); 902 return (EFBIG); 903 } 904 */ 905 906 /* 907 * The transaction interlocks against flush initiations 908 * (note: but will run concurrently with the actual flush). 909 * 910 * To avoid deadlocking against the VM system, we must flag any 911 * transaction related to the buffer cache or other direct 912 * VM page manipulation. 913 */ 914 if (uio->uio_segflg == UIO_NOCOPY) { 915 assert(0); /* no UIO_NOCOPY in makefs */ 916 hammer2_trans_init(ip->pmp, HAMMER2_TRANS_BUFCACHE); 917 } else { 918 hammer2_trans_init(ip->pmp, 0); 919 } 920 error = hammer2_write_file(ip, uio, ioflag, seqcount); 921 if (uio->uio_segflg == UIO_NOCOPY) { 922 assert(0); /* no UIO_NOCOPY in makefs */ 923 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_BUFCACHE | 924 HAMMER2_TRANS_SIDEQ); 925 } else 926 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 927 928 return (error); 929 } 930 931 int 932 hammer2_write(struct vnode *vp, void *buf, size_t size, off_t offset) 933 { 934 assert(buf); 935 assert(size > 0); 936 assert(size <= HAMMER2_PBUFSIZE); 937 938 struct iovec iov = { 939 .iov_base = buf, 940 .iov_len = size, 941 }; 942 struct uio uio = { 943 .uio_iov = &iov, 944 .uio_iovcnt = 1, 945 .uio_offset = offset, 946 .uio_resid = size, 947 .uio_segflg = UIO_USERSPACE, 948 .uio_rw = UIO_WRITE, 949 .uio_td = NULL, 950 }; 951 struct vop_write_args ap = { 952 .a_vp = vp, 953 .a_uio = &uio, 954 .a_ioflag = 0, 955 .a_cred = NULL, 956 }; 957 958 return hammer2_vop_write(&ap); 959 } 960 961 #if 0 962 /* 963 * Perform read operations on a file or symlink given an UNLOCKED 964 * inode and uio. 965 * 966 * The passed ip is not locked. 967 */ 968 static 969 int 970 hammer2_read_file(hammer2_inode_t *ip, struct uio *uio, int seqcount) 971 { 972 hammer2_off_t size; 973 struct buf *bp; 974 int error; 975 976 error = 0; 977 978 /* 979 * UIO read loop. 980 * 981 * WARNING! Assumes that the kernel interlocks size changes at the 982 * vnode level. 983 */ 984 hammer2_mtx_sh(&ip->lock); 985 hammer2_mtx_sh(&ip->truncate_lock); 986 size = ip->meta.size; 987 hammer2_mtx_unlock(&ip->lock); 988 989 while (uio->uio_resid > 0 && uio->uio_offset < size) { 990 hammer2_key_t lbase; 991 hammer2_key_t leof; 992 int lblksize; 993 int loff; 994 int n; 995 996 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 997 &lbase, &leof); 998 999 #if 1 1000 bp = NULL; 1001 error = cluster_readx(ip->vp, leof, lbase, lblksize, 1002 B_NOTMETA | B_KVABIO, 1003 uio->uio_resid, 1004 seqcount * MAXBSIZE, 1005 &bp); 1006 #else 1007 if (uio->uio_segflg == UIO_NOCOPY) { 1008 bp = getblk(ip->vp, lbase, lblksize, 1009 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1010 if (bp->b_flags & B_CACHE) { 1011 int i; 1012 int j = 0; 1013 if (bp->b_xio.xio_npages != 16) 1014 kprintf("NPAGES BAD\n"); 1015 for (i = 0; i < bp->b_xio.xio_npages; ++i) { 1016 vm_page_t m; 1017 m = bp->b_xio.xio_pages[i]; 1018 if (m == NULL || m->valid == 0) { 1019 kprintf("bp %016jx %016jx pg %d inv", 1020 lbase, leof, i); 1021 if (m) 1022 kprintf("m->object %p/%p", m->object, ip->vp->v_object); 1023 kprintf("\n"); 1024 j = 1; 1025 } 1026 } 1027 if (j) 1028 kprintf("b_flags %08x, b_error %d\n", bp->b_flags, bp->b_error); 1029 } 1030 bqrelse(bp); 1031 } 1032 error = bread_kvabio(ip->vp, lbase, lblksize, &bp); 1033 #endif 1034 if (error) { 1035 brelse(bp); 1036 break; 1037 } 1038 bkvasync(bp); 1039 loff = (int)(uio->uio_offset - lbase); 1040 n = lblksize - loff; 1041 if (n > uio->uio_resid) 1042 n = uio->uio_resid; 1043 if (n > size - uio->uio_offset) 1044 n = (int)(size - uio->uio_offset); 1045 bp->b_flags |= B_AGE; 1046 uiomovebp(bp, (char *)bp->b_data + loff, n, uio); 1047 bqrelse(bp); 1048 } 1049 hammer2_mtx_unlock(&ip->truncate_lock); 1050 1051 return (error); 1052 } 1053 #endif 1054 1055 /* 1056 * Write to the file represented by the inode via the logical buffer cache. 1057 * The inode may represent a regular file or a symlink. 1058 * 1059 * The inode must not be locked. 1060 */ 1061 static 1062 int 1063 hammer2_write_file(hammer2_inode_t *ip, struct uio *uio, 1064 int ioflag, int seqcount) 1065 { 1066 hammer2_key_t old_eof; 1067 hammer2_key_t new_eof; 1068 struct buf *bp; 1069 int kflags; 1070 int error; 1071 int modified; 1072 1073 /* 1074 * Setup if append 1075 * 1076 * WARNING! Assumes that the kernel interlocks size changes at the 1077 * vnode level. 1078 */ 1079 hammer2_mtx_ex(&ip->lock); 1080 hammer2_mtx_sh(&ip->truncate_lock); 1081 if (ioflag & IO_APPEND) 1082 uio->uio_offset = ip->meta.size; 1083 old_eof = ip->meta.size; 1084 1085 /* 1086 * Extend the file if necessary. If the write fails at some point 1087 * we will truncate it back down to cover as much as we were able 1088 * to write. 1089 * 1090 * Doing this now makes it easier to calculate buffer sizes in 1091 * the loop. 1092 */ 1093 kflags = 0; 1094 error = 0; 1095 modified = 0; 1096 1097 if (uio->uio_offset + uio->uio_resid > old_eof) { 1098 new_eof = uio->uio_offset + uio->uio_resid; 1099 modified = 1; 1100 hammer2_extend_file(ip, new_eof); 1101 kflags |= NOTE_EXTEND; 1102 } else { 1103 new_eof = old_eof; 1104 } 1105 hammer2_mtx_unlock(&ip->lock); 1106 1107 /* 1108 * UIO write loop 1109 */ 1110 while (uio->uio_resid > 0) { 1111 hammer2_key_t lbase; 1112 int trivial; 1113 int endofblk; 1114 int lblksize; 1115 int loff; 1116 int n; 1117 1118 /* 1119 * Don't allow the buffer build to blow out the buffer 1120 * cache. 1121 */ 1122 if ((ioflag & IO_RECURSE) == 0) 1123 bwillwrite(HAMMER2_PBUFSIZE); 1124 1125 /* 1126 * This nominally tells us how much we can cluster and 1127 * what the logical buffer size needs to be. Currently 1128 * we don't try to cluster the write and just handle one 1129 * block at a time. 1130 */ 1131 lblksize = hammer2_calc_logical(ip, uio->uio_offset, 1132 &lbase, NULL); 1133 loff = (int)(uio->uio_offset - lbase); 1134 1135 KKASSERT(lblksize <= MAXBSIZE); 1136 1137 /* 1138 * Calculate bytes to copy this transfer and whether the 1139 * copy completely covers the buffer or not. 1140 */ 1141 trivial = 0; 1142 n = lblksize - loff; 1143 if (n > uio->uio_resid) { 1144 n = uio->uio_resid; 1145 if (loff == lbase && uio->uio_offset + n == new_eof) 1146 trivial = 1; 1147 endofblk = 0; 1148 } else { 1149 if (loff == 0) 1150 trivial = 1; 1151 endofblk = 1; 1152 } 1153 if (lbase >= new_eof) 1154 trivial = 1; 1155 trivial = 1; /* force trivial for makefs */ 1156 1157 /* 1158 * Get the buffer 1159 */ 1160 if (uio->uio_segflg == UIO_NOCOPY) { 1161 assert(0); /* no UIO_NOCOPY in makefs */ 1162 /* 1163 * Issuing a write with the same data backing the 1164 * buffer. Instantiate the buffer to collect the 1165 * backing vm pages, then read-in any missing bits. 1166 * 1167 * This case is used by vop_stdputpages(). 1168 */ 1169 bp = getblkx(ip->vp, lbase, lblksize, 1170 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1171 /* 1172 if ((bp->b_flags & B_CACHE) == 0) { 1173 bqrelse(bp); 1174 error = bread_kvabio(ip->vp, lbase, 1175 lblksize, &bp); 1176 } 1177 */ 1178 } else if (trivial) { 1179 /* 1180 * Even though we are entirely overwriting the buffer 1181 * we may still have to zero it out to avoid a 1182 * mmap/write visibility issue. 1183 */ 1184 bp = getblkx(ip->vp, lbase, lblksize, 1185 GETBLK_BHEAVY | GETBLK_KVABIO, 0); 1186 /* 1187 if ((bp->b_flags & B_CACHE) == 0) 1188 vfs_bio_clrbuf(bp); 1189 */ 1190 } else { 1191 assert(0); /* no partial write in makefs */ 1192 /* 1193 * Partial overwrite, read in any missing bits then 1194 * replace the portion being written. 1195 * 1196 * (The strategy code will detect zero-fill physical 1197 * blocks for this case). 1198 */ 1199 error = bread_kvabio(ip->vp, lbase, lblksize, &bp); 1200 if (error == 0) 1201 bheavy(bp); 1202 } 1203 1204 if (error) { 1205 brelse(bp); 1206 break; 1207 } 1208 1209 /* 1210 * Ok, copy the data in 1211 */ 1212 bkvasync(bp); 1213 error = uiomovebp(bp, bp->b_data + loff, n, uio); 1214 kflags |= NOTE_WRITE; 1215 modified = 1; 1216 if (error) { 1217 brelse(bp); 1218 break; 1219 } 1220 1221 /* 1222 * WARNING: Pageout daemon will issue UIO_NOCOPY writes 1223 * with IO_SYNC or IO_ASYNC set. These writes 1224 * must be handled as the pageout daemon expects. 1225 * 1226 * NOTE! H2 relies on cluster_write() here because it 1227 * cannot preallocate disk blocks at the logical 1228 * level due to not knowing what the compression 1229 * size will be at this time. 1230 * 1231 * We must use cluster_write() here and we depend 1232 * on the write-behind feature to flush buffers 1233 * appropriately. If we let the buffer daemons do 1234 * it the block allocations will be all over the 1235 * map. 1236 */ 1237 if (1) { 1238 bp->b_cmd = BUF_CMD_WRITE; 1239 1240 struct bio bio; 1241 bio.bio_buf = bp; 1242 bio.bio_offset = lbase; 1243 1244 struct vop_strategy_args ap; 1245 ap.a_vp = ip->vp; 1246 ap.a_bio = &bio; 1247 1248 error = hammer2_vop_strategy(&ap); 1249 assert(!error); 1250 1251 brelse(bp); 1252 } else if (ioflag & IO_SYNC) { 1253 assert(0); 1254 bwrite(bp); 1255 } else if ((ioflag & IO_DIRECT) && endofblk) { 1256 assert(0); 1257 bawrite(bp); 1258 } else if (ioflag & IO_ASYNC) { 1259 assert(0); 1260 bawrite(bp); 1261 } else if (0 /*ip->vp->v_mount->mnt_flag & MNT_NOCLUSTERW*/) { 1262 assert(0); 1263 bdwrite(bp); 1264 } else { 1265 assert(0); 1266 #if 0 1267 #if 1 1268 bp->b_flags |= B_CLUSTEROK; 1269 cluster_write(bp, new_eof, lblksize, seqcount); 1270 #else 1271 bp->b_flags |= B_CLUSTEROK; 1272 bdwrite(bp); 1273 #endif 1274 #endif 1275 } 1276 } 1277 1278 /* 1279 * Cleanup. If we extended the file EOF but failed to write through 1280 * the entire write is a failure and we have to back-up. 1281 */ 1282 if (error && new_eof != old_eof) { 1283 hammer2_mtx_unlock(&ip->truncate_lock); 1284 hammer2_mtx_ex(&ip->lock); /* note lock order */ 1285 hammer2_mtx_ex(&ip->truncate_lock); /* note lock order */ 1286 hammer2_truncate_file(ip, old_eof); 1287 if (ip->flags & HAMMER2_INODE_MODIFIED) 1288 hammer2_inode_chain_sync(ip); 1289 hammer2_mtx_unlock(&ip->lock); 1290 } else if (modified) { 1291 struct vnode *vp = ip->vp; 1292 1293 hammer2_mtx_ex(&ip->lock); 1294 hammer2_inode_modify(ip); 1295 if (uio->uio_segflg == UIO_NOCOPY) { 1296 assert(0); /* no UIO_NOCOPY in makefs */ 1297 /* 1298 if (vp->v_flag & VLASTWRITETS) { 1299 ip->meta.mtime = 1300 (unsigned long)vp->v_lastwrite_ts.tv_sec * 1301 1000000 + 1302 vp->v_lastwrite_ts.tv_nsec / 1000; 1303 } 1304 */ 1305 } else { 1306 hammer2_update_time(&ip->meta.mtime); 1307 vclrflags(vp, VLASTWRITETS); 1308 } 1309 1310 #if 0 1311 /* 1312 * REMOVED - handled by hammer2_extend_file(). Do not issue 1313 * a chain_sync() outside of a sync/fsync except for DIRECTDATA 1314 * state changes. 1315 * 1316 * Under normal conditions we only issue a chain_sync if 1317 * the inode's DIRECTDATA state changed. 1318 */ 1319 if (ip->flags & HAMMER2_INODE_RESIZED) 1320 hammer2_inode_chain_sync(ip); 1321 #endif 1322 hammer2_mtx_unlock(&ip->lock); 1323 hammer2_knote(ip->vp, kflags); 1324 } 1325 hammer2_trans_assert_strategy(ip->pmp); 1326 hammer2_mtx_unlock(&ip->truncate_lock); 1327 1328 return error; 1329 } 1330 1331 /* 1332 * Truncate the size of a file. The inode must be locked. 1333 * 1334 * We must unconditionally set HAMMER2_INODE_RESIZED to properly 1335 * ensure that any on-media data beyond the new file EOF has been destroyed. 1336 * 1337 * WARNING: nvtruncbuf() can only be safely called without the inode lock 1338 * held due to the way our write thread works. If the truncation 1339 * occurs in the middle of a buffer, nvtruncbuf() is responsible 1340 * for dirtying that buffer and zeroing out trailing bytes. 1341 * 1342 * WARNING! Assumes that the kernel interlocks size changes at the 1343 * vnode level. 1344 * 1345 * WARNING! Caller assumes responsibility for removing dead blocks 1346 * if INODE_RESIZED is set. 1347 */ 1348 static 1349 void 1350 hammer2_truncate_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1351 { 1352 hammer2_key_t lbase; 1353 int nblksize; 1354 1355 hammer2_mtx_unlock(&ip->lock); 1356 if (ip->vp) { 1357 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1358 nvtruncbuf(ip->vp, nsize, 1359 nblksize, (int)nsize & (nblksize - 1), 1360 0); 1361 } 1362 hammer2_mtx_ex(&ip->lock); 1363 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1364 ip->osize = ip->meta.size; 1365 ip->meta.size = nsize; 1366 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1367 hammer2_inode_modify(ip); 1368 } 1369 1370 /* 1371 * Extend the size of a file. The inode must be locked. 1372 * 1373 * Even though the file size is changing, we do not have to set the 1374 * INODE_RESIZED bit unless the file size crosses the EMBEDDED_BYTES 1375 * boundary. When this occurs a hammer2_inode_chain_sync() is required 1376 * to prepare the inode cluster's indirect block table, otherwise 1377 * async execution of the strategy code will implode on us. 1378 * 1379 * WARNING! Assumes that the kernel interlocks size changes at the 1380 * vnode level. 1381 * 1382 * WARNING! Caller assumes responsibility for transitioning out 1383 * of the inode DIRECTDATA mode if INODE_RESIZED is set. 1384 */ 1385 static 1386 void 1387 hammer2_extend_file(hammer2_inode_t *ip, hammer2_key_t nsize) 1388 { 1389 hammer2_key_t lbase; 1390 hammer2_key_t osize; 1391 int oblksize; 1392 int nblksize; 1393 int error; 1394 1395 KKASSERT((ip->flags & HAMMER2_INODE_RESIZED) == 0); 1396 hammer2_inode_modify(ip); 1397 osize = ip->meta.size; 1398 ip->osize = osize; 1399 ip->meta.size = nsize; 1400 1401 /* 1402 * We must issue a chain_sync() when the DIRECTDATA state changes 1403 * to prevent confusion between the flush code and the in-memory 1404 * state. This is not perfect because we are doing it outside of 1405 * a sync/fsync operation, so it might not be fully synchronized 1406 * with the meta-data topology flush. 1407 * 1408 * We must retain and re-dirty the buffer cache buffer containing 1409 * the direct data so it can be written to a real block. It should 1410 * not be possible for a bread error to occur since the original data 1411 * is extracted from the inode structure directly. 1412 */ 1413 if (osize <= HAMMER2_EMBEDDED_BYTES && nsize > HAMMER2_EMBEDDED_BYTES) { 1414 if (osize) { 1415 assert(0); /* no such transition in makefs */ 1416 struct buf *bp; 1417 1418 oblksize = hammer2_calc_logical(ip, 0, NULL, NULL); 1419 error = bread_kvabio(ip->vp, 0, oblksize, &bp); 1420 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1421 hammer2_inode_chain_sync(ip); 1422 if (error == 0) { 1423 bheavy(bp); 1424 bdwrite(bp); 1425 } else { 1426 brelse(bp); 1427 } 1428 } else { 1429 atomic_set_int(&ip->flags, HAMMER2_INODE_RESIZED); 1430 hammer2_inode_chain_sync(ip); 1431 } 1432 } 1433 hammer2_mtx_unlock(&ip->lock); 1434 if (ip->vp) { 1435 oblksize = hammer2_calc_logical(ip, osize, &lbase, NULL); 1436 nblksize = hammer2_calc_logical(ip, nsize, &lbase, NULL); 1437 nvextendbuf(ip->vp, 1438 osize, nsize, 1439 oblksize, nblksize, 1440 -1, -1, 0); 1441 } 1442 hammer2_mtx_ex(&ip->lock); 1443 } 1444 1445 static 1446 int 1447 hammer2_vop_nresolve(struct vop_nresolve_args *ap) 1448 { 1449 hammer2_xop_nresolve_t *xop; 1450 hammer2_inode_t *ip; 1451 hammer2_inode_t *dip; 1452 struct namecache *ncp; 1453 struct vnode *vp; 1454 int error; 1455 1456 dip = VTOI(ap->a_dvp); 1457 xop = hammer2_xop_alloc(dip, 0); 1458 1459 ncp = ap->a_nch->ncp; 1460 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 1461 1462 /* 1463 * Note: In DragonFly the kernel handles '.' and '..'. 1464 */ 1465 hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED); 1466 hammer2_xop_start(&xop->head, &hammer2_nresolve_desc); 1467 1468 error = hammer2_xop_collect(&xop->head, 0); 1469 error = hammer2_error_to_errno(error); 1470 if (error) { 1471 ip = NULL; 1472 } else { 1473 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 1474 } 1475 hammer2_inode_unlock(dip); 1476 1477 /* 1478 * Acquire the related vnode 1479 * 1480 * NOTE: For error processing, only ENOENT resolves the namecache 1481 * entry to NULL, otherwise we just return the error and 1482 * leave the namecache unresolved. 1483 * 1484 * WARNING: inode structure is locked exclusively via inode_get 1485 * but chain was locked shared. inode_unlock() 1486 * will handle it properly. 1487 */ 1488 if (ip) { 1489 vp = hammer2_igetv(ip, &error); /* error set to UNIX error */ 1490 if (error == 0) { 1491 vn_unlock(vp); 1492 cache_setvp(ap->a_nch, vp); 1493 *ap->a_vpp = vp; 1494 } else if (error == ENOENT) { 1495 cache_setvp(ap->a_nch, NULL); 1496 } 1497 hammer2_inode_unlock(ip); 1498 1499 /* 1500 * The vp should not be released until after we've disposed 1501 * of our locks, because it might cause vop_inactive() to 1502 * be called. 1503 */ 1504 if (vp) 1505 vrele(vp); 1506 } else { 1507 error = ENOENT; 1508 cache_setvp(ap->a_nch, NULL); 1509 } 1510 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 1511 /* 1512 KASSERT(error || ap->a_nch->ncp->nc_vp != NULL, 1513 ("resolve error %d/%p ap %p\n", 1514 error, ap->a_nch->ncp->nc_vp, ap)); 1515 */ 1516 1517 return error; 1518 } 1519 1520 int 1521 hammer2_nresolve(struct vnode *dvp, struct vnode **vpp, char *name, int nlen) 1522 { 1523 *vpp = NULL; 1524 struct namecache nc = { 1525 .nc_name = name, 1526 .nc_nlen = nlen, 1527 }; 1528 struct nchandle nch = { 1529 .ncp = &nc, 1530 }; 1531 struct vop_nresolve_args ap = { 1532 .a_nch = &nch, 1533 .a_dvp = dvp, 1534 .a_vpp = vpp, 1535 }; 1536 1537 return hammer2_vop_nresolve(&ap); 1538 } 1539 1540 static 1541 int 1542 hammer2_vop_nlookupdotdot(struct vop_nlookupdotdot_args *ap) 1543 { 1544 #if 0 1545 hammer2_inode_t *dip; 1546 hammer2_tid_t inum; 1547 int error; 1548 1549 dip = VTOI(ap->a_dvp); 1550 inum = dip->meta.iparent; 1551 *ap->a_vpp = NULL; 1552 1553 if (inum) { 1554 error = hammer2_vfs_vget(ap->a_dvp->v_mount, NULL, 1555 inum, ap->a_vpp); 1556 } else { 1557 error = ENOENT; 1558 } 1559 return error; 1560 #endif 1561 return (EOPNOTSUPP); 1562 } 1563 1564 static 1565 int 1566 hammer2_vop_nmkdir(struct vop_nmkdir_args *ap) 1567 { 1568 hammer2_inode_t *dip; 1569 hammer2_inode_t *nip; 1570 struct namecache *ncp; 1571 const uint8_t *name; 1572 size_t name_len; 1573 hammer2_tid_t inum; 1574 int error; 1575 1576 dip = VTOI(ap->a_dvp); 1577 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1578 return (EROFS); 1579 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1580 return (ENOSPC); 1581 1582 ncp = ap->a_nch->ncp; 1583 name = ncp->nc_name; 1584 name_len = ncp->nc_nlen; 1585 1586 hammer2_trans_init(dip->pmp, 0); 1587 1588 inum = hammer2_trans_newinum(dip->pmp); 1589 1590 /* 1591 * Create the actual inode as a hidden file in the iroot, then 1592 * create the directory entry. The creation of the actual inode 1593 * sets its nlinks to 1 which is the value we desire. 1594 * 1595 * dip must be locked before nip to avoid deadlock. 1596 */ 1597 hammer2_inode_lock(dip, 0); 1598 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1599 inum, &error); 1600 if (error) { 1601 error = hammer2_error_to_errno(error); 1602 } else { 1603 error = hammer2_dirent_create(dip, name, name_len, 1604 nip->meta.inum, nip->meta.type); 1605 /* returns UNIX error code */ 1606 } 1607 if (error) { 1608 if (nip) { 1609 hammer2_inode_unlink_finisher(nip, NULL); 1610 hammer2_inode_unlock(nip); 1611 nip = NULL; 1612 } 1613 *ap->a_vpp = NULL; 1614 } else { 1615 /* 1616 * inode_depend() must occur before the igetv() because 1617 * the igetv() can temporarily release the inode lock. 1618 */ 1619 hammer2_inode_depend(dip, nip); /* before igetv */ 1620 *ap->a_vpp = hammer2_igetv(nip, &error); 1621 hammer2_inode_unlock(nip); 1622 } 1623 1624 /* 1625 * Update dip's mtime 1626 * 1627 * We can use a shared inode lock and allow the meta.mtime update 1628 * SMP race. hammer2_inode_modify() is MPSAFE w/a shared lock. 1629 */ 1630 if (error == 0) { 1631 uint64_t mtime; 1632 1633 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1634 hammer2_update_time(&mtime); 1635 hammer2_inode_modify(dip); 1636 dip->meta.mtime = mtime; 1637 /*hammer2_inode_unlock(dip);*/ 1638 } 1639 hammer2_inode_unlock(dip); 1640 1641 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1642 1643 if (error == 0) { 1644 cache_setunresolved(ap->a_nch); 1645 cache_setvp(ap->a_nch, *ap->a_vpp); 1646 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK); 1647 } 1648 return error; 1649 } 1650 1651 int 1652 hammer2_nmkdir(struct vnode *dvp, struct vnode **vpp, char *name, int nlen) 1653 { 1654 struct namecache nc = { 1655 .nc_name = name, 1656 .nc_nlen = nlen, 1657 }; 1658 struct nchandle nch = { 1659 .ncp = &nc, 1660 }; 1661 uid_t va_uid = VNOVAL; //getuid(); 1662 uid_t va_gid = VNOVAL; //getgid(); 1663 struct vattr va = { 1664 .va_type = VDIR, 1665 .va_mode = 0755, /* should be tunable */ 1666 .va_uid = va_uid, 1667 .va_gid = va_gid, 1668 }; 1669 struct vop_nmkdir_args ap = { 1670 .a_nch = &nch, 1671 .a_dvp = dvp, 1672 .a_vpp = vpp, 1673 .a_vap = &va, 1674 }; 1675 1676 return hammer2_vop_nmkdir(&ap); 1677 } 1678 1679 static 1680 int 1681 hammer2_vop_open(struct vop_open_args *ap) 1682 { 1683 #if 0 1684 return vop_stdopen(ap); 1685 #endif 1686 return (EOPNOTSUPP); 1687 } 1688 1689 /* 1690 * hammer2_vop_advlock { vp, id, op, fl, flags } 1691 */ 1692 static 1693 int 1694 hammer2_vop_advlock(struct vop_advlock_args *ap) 1695 { 1696 #if 0 1697 hammer2_inode_t *ip = VTOI(ap->a_vp); 1698 hammer2_off_t size; 1699 1700 size = ip->meta.size; 1701 return (lf_advlock(ap, &ip->advlock, size)); 1702 #endif 1703 return (EOPNOTSUPP); 1704 } 1705 1706 static 1707 int 1708 hammer2_vop_close(struct vop_close_args *ap) 1709 { 1710 #if 0 1711 return vop_stdclose(ap); 1712 #endif 1713 return (EOPNOTSUPP); 1714 } 1715 1716 /* 1717 * hammer2_vop_nlink { nch, dvp, vp, cred } 1718 * 1719 * Create a hardlink from (vp) to {dvp, nch}. 1720 */ 1721 static 1722 int 1723 hammer2_vop_nlink(struct vop_nlink_args *ap) 1724 { 1725 hammer2_inode_t *tdip; /* target directory to create link in */ 1726 hammer2_inode_t *ip; /* inode we are hardlinking to */ 1727 struct namecache *ncp; 1728 const uint8_t *name; 1729 size_t name_len; 1730 int error; 1731 uint64_t cmtime; 1732 1733 /* We know it's the same in makefs */ 1734 /* 1735 if (ap->a_dvp->v_mount != ap->a_vp->v_mount) 1736 return(EXDEV); 1737 */ 1738 1739 tdip = VTOI(ap->a_dvp); 1740 if (tdip->pmp->ronly || (tdip->pmp->flags & HAMMER2_PMPF_EMERG)) 1741 return (EROFS); 1742 if (hammer2_vfs_enospace(tdip, 0, ap->a_cred) > 1) 1743 return (ENOSPC); 1744 1745 ncp = ap->a_nch->ncp; 1746 name = ncp->nc_name; 1747 name_len = ncp->nc_nlen; 1748 1749 /* 1750 * ip represents the file being hardlinked. The file could be a 1751 * normal file or a hardlink target if it has already been hardlinked. 1752 * (with the new semantics, it will almost always be a hardlink 1753 * target). 1754 * 1755 * Bump nlinks and potentially also create or move the hardlink 1756 * target in the parent directory common to (ip) and (tdip). The 1757 * consolidation code can modify ip->cluster. The returned cluster 1758 * is locked. 1759 */ 1760 ip = VTOI(ap->a_vp); 1761 KASSERT(ip->pmp, ("ip->pmp is NULL %p %p", ip, ip->pmp)); 1762 hammer2_trans_init(ip->pmp, 0); 1763 1764 /* 1765 * Target should be an indexed inode or there's no way we will ever 1766 * be able to find it! 1767 */ 1768 KKASSERT((ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE) == 0); 1769 1770 error = 0; 1771 1772 /* 1773 * Can return NULL and error == EXDEV if the common parent 1774 * crosses a directory with the xlink flag set. 1775 */ 1776 hammer2_inode_lock4(tdip, ip, NULL, NULL); 1777 1778 hammer2_update_time(&cmtime); 1779 1780 /* 1781 * Create the directory entry and bump nlinks. 1782 * Also update ip's ctime. 1783 */ 1784 if (error == 0) { 1785 error = hammer2_dirent_create(tdip, name, name_len, 1786 ip->meta.inum, ip->meta.type); 1787 hammer2_inode_modify(ip); 1788 ++ip->meta.nlinks; 1789 ip->meta.ctime = cmtime; 1790 } 1791 if (error == 0) { 1792 /* 1793 * Update dip's [cm]time 1794 */ 1795 hammer2_inode_modify(tdip); 1796 tdip->meta.mtime = cmtime; 1797 tdip->meta.ctime = cmtime; 1798 1799 cache_setunresolved(ap->a_nch); 1800 cache_setvp(ap->a_nch, ap->a_vp); 1801 } 1802 hammer2_inode_unlock(ip); 1803 hammer2_inode_unlock(tdip); 1804 1805 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_SIDEQ); 1806 hammer2_knote(ap->a_vp, NOTE_LINK); 1807 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1808 1809 return error; 1810 } 1811 1812 int 1813 hammer2_nlink(struct vnode *dvp, struct vnode *vp, char *name, int nlen) 1814 { 1815 struct namecache nc = { 1816 .nc_name = name, 1817 .nc_nlen = nlen, 1818 }; 1819 struct nchandle nch = { 1820 .ncp = &nc, 1821 }; 1822 struct vop_nlink_args ap = { 1823 .a_nch = &nch, 1824 .a_dvp = dvp, 1825 .a_vp = vp, 1826 }; 1827 1828 return hammer2_vop_nlink(&ap); 1829 } 1830 1831 /* 1832 * hammer2_vop_ncreate { nch, dvp, vpp, cred, vap } 1833 * 1834 * The operating system has already ensured that the directory entry 1835 * does not exist and done all appropriate namespace locking. 1836 */ 1837 static 1838 int 1839 hammer2_vop_ncreate(struct vop_ncreate_args *ap) 1840 { 1841 hammer2_inode_t *dip; 1842 hammer2_inode_t *nip; 1843 struct namecache *ncp; 1844 const uint8_t *name; 1845 size_t name_len; 1846 hammer2_tid_t inum; 1847 int error; 1848 1849 dip = VTOI(ap->a_dvp); 1850 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1851 return (EROFS); 1852 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1853 return (ENOSPC); 1854 1855 ncp = ap->a_nch->ncp; 1856 name = ncp->nc_name; 1857 name_len = ncp->nc_nlen; 1858 hammer2_trans_init(dip->pmp, 0); 1859 1860 inum = hammer2_trans_newinum(dip->pmp); 1861 1862 /* 1863 * Create the actual inode as a hidden file in the iroot, then 1864 * create the directory entry. The creation of the actual inode 1865 * sets its nlinks to 1 which is the value we desire. 1866 * 1867 * dip must be locked before nip to avoid deadlock. 1868 */ 1869 hammer2_inode_lock(dip, 0); 1870 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1871 inum, &error); 1872 1873 if (error) { 1874 error = hammer2_error_to_errno(error); 1875 } else { 1876 error = hammer2_dirent_create(dip, name, name_len, 1877 nip->meta.inum, nip->meta.type); 1878 } 1879 if (error) { 1880 if (nip) { 1881 hammer2_inode_unlink_finisher(nip, NULL); 1882 hammer2_inode_unlock(nip); 1883 nip = NULL; 1884 } 1885 *ap->a_vpp = NULL; 1886 } else { 1887 hammer2_inode_depend(dip, nip); /* before igetv */ 1888 *ap->a_vpp = hammer2_igetv(nip, &error); 1889 hammer2_inode_unlock(nip); 1890 } 1891 1892 /* 1893 * Update dip's mtime 1894 */ 1895 if (error == 0) { 1896 uint64_t mtime; 1897 1898 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 1899 hammer2_update_time(&mtime); 1900 hammer2_inode_modify(dip); 1901 dip->meta.mtime = mtime; 1902 /*hammer2_inode_unlock(dip);*/ 1903 } 1904 hammer2_inode_unlock(dip); 1905 1906 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 1907 1908 if (error == 0) { 1909 cache_setunresolved(ap->a_nch); 1910 cache_setvp(ap->a_nch, *ap->a_vpp); 1911 hammer2_knote(ap->a_dvp, NOTE_WRITE); 1912 } 1913 return error; 1914 } 1915 1916 int 1917 hammer2_ncreate(struct vnode *dvp, struct vnode **vpp, char *name, int nlen) 1918 { 1919 struct namecache nc = { 1920 .nc_name = name, 1921 .nc_nlen = nlen, 1922 }; 1923 struct nchandle nch = { 1924 .ncp = &nc, 1925 }; 1926 uid_t va_uid = VNOVAL; //getuid(); 1927 uid_t va_gid = VNOVAL; //getgid(); 1928 struct vattr va = { 1929 .va_type = VREG, 1930 .va_mode = 0644, /* should be tunable */ 1931 .va_uid = va_uid, 1932 .va_gid = va_gid, 1933 }; 1934 struct vop_ncreate_args ap = { 1935 .a_nch = &nch, 1936 .a_dvp = dvp, 1937 .a_vpp = vpp, 1938 .a_vap = &va, 1939 }; 1940 1941 return hammer2_vop_ncreate(&ap); 1942 } 1943 1944 /* 1945 * Make a device node (typically a fifo) 1946 */ 1947 static 1948 int 1949 hammer2_vop_nmknod(struct vop_nmknod_args *ap) 1950 { 1951 hammer2_inode_t *dip; 1952 hammer2_inode_t *nip; 1953 struct namecache *ncp; 1954 const uint8_t *name; 1955 size_t name_len; 1956 hammer2_tid_t inum; 1957 int error; 1958 1959 dip = VTOI(ap->a_dvp); 1960 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 1961 return (EROFS); 1962 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 1963 return (ENOSPC); 1964 1965 ncp = ap->a_nch->ncp; 1966 name = ncp->nc_name; 1967 name_len = ncp->nc_nlen; 1968 hammer2_trans_init(dip->pmp, 0); 1969 1970 /* 1971 * Create the device inode and then create the directory entry. 1972 * 1973 * dip must be locked before nip to avoid deadlock. 1974 */ 1975 inum = hammer2_trans_newinum(dip->pmp); 1976 1977 hammer2_inode_lock(dip, 0); 1978 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 1979 inum, &error); 1980 if (error == 0) { 1981 error = hammer2_dirent_create(dip, name, name_len, 1982 nip->meta.inum, nip->meta.type); 1983 } 1984 if (error) { 1985 if (nip) { 1986 hammer2_inode_unlink_finisher(nip, NULL); 1987 hammer2_inode_unlock(nip); 1988 nip = NULL; 1989 } 1990 *ap->a_vpp = NULL; 1991 } else { 1992 hammer2_inode_depend(dip, nip); /* before igetv */ 1993 *ap->a_vpp = hammer2_igetv(nip, &error); 1994 hammer2_inode_unlock(nip); 1995 } 1996 1997 /* 1998 * Update dip's mtime 1999 */ 2000 if (error == 0) { 2001 uint64_t mtime; 2002 2003 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2004 hammer2_update_time(&mtime); 2005 hammer2_inode_modify(dip); 2006 dip->meta.mtime = mtime; 2007 /*hammer2_inode_unlock(dip);*/ 2008 } 2009 hammer2_inode_unlock(dip); 2010 2011 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2012 2013 if (error == 0) { 2014 cache_setunresolved(ap->a_nch); 2015 cache_setvp(ap->a_nch, *ap->a_vpp); 2016 hammer2_knote(ap->a_dvp, NOTE_WRITE); 2017 } 2018 return error; 2019 } 2020 2021 int 2022 hammer2_nmknod(struct vnode *dvp, struct vnode **vpp, char *name, int nlen, 2023 int type) 2024 { 2025 struct namecache nc = { 2026 .nc_name = name, 2027 .nc_nlen = nlen, 2028 }; 2029 struct nchandle nch = { 2030 .ncp = &nc, 2031 }; 2032 uid_t va_uid = VNOVAL; //getuid(); 2033 uid_t va_gid = VNOVAL; //getgid(); 2034 struct vattr va = { 2035 .va_type = type, 2036 .va_mode = 0644, /* should be tunable */ 2037 .va_uid = va_uid, 2038 .va_gid = va_gid, 2039 }; 2040 struct vop_nmknod_args ap = { 2041 .a_nch = &nch, 2042 .a_dvp = dvp, 2043 .a_vpp = vpp, 2044 .a_vap = &va, 2045 }; 2046 2047 return hammer2_vop_nmknod(&ap); 2048 } 2049 2050 /* 2051 * hammer2_vop_nsymlink { nch, dvp, vpp, cred, vap, target } 2052 */ 2053 static 2054 int 2055 hammer2_vop_nsymlink(struct vop_nsymlink_args *ap) 2056 { 2057 hammer2_inode_t *dip; 2058 hammer2_inode_t *nip; 2059 struct namecache *ncp; 2060 const uint8_t *name; 2061 size_t name_len; 2062 hammer2_tid_t inum; 2063 int error; 2064 2065 dip = VTOI(ap->a_dvp); 2066 if (dip->pmp->ronly || (dip->pmp->flags & HAMMER2_PMPF_EMERG)) 2067 return (EROFS); 2068 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 2069 return (ENOSPC); 2070 2071 ncp = ap->a_nch->ncp; 2072 name = ncp->nc_name; 2073 name_len = ncp->nc_nlen; 2074 hammer2_trans_init(dip->pmp, 0); 2075 2076 ap->a_vap->va_type = VLNK; /* enforce type */ 2077 2078 /* 2079 * Create the softlink as an inode and then create the directory 2080 * entry. 2081 * 2082 * dip must be locked before nip to avoid deadlock. 2083 */ 2084 inum = hammer2_trans_newinum(dip->pmp); 2085 2086 hammer2_inode_lock(dip, 0); 2087 nip = hammer2_inode_create_normal(dip, ap->a_vap, ap->a_cred, 2088 inum, &error); 2089 if (error == 0) { 2090 error = hammer2_dirent_create(dip, name, name_len, 2091 nip->meta.inum, nip->meta.type); 2092 } 2093 if (error) { 2094 if (nip) { 2095 hammer2_inode_unlink_finisher(nip, NULL); 2096 hammer2_inode_unlock(nip); 2097 nip = NULL; 2098 } 2099 *ap->a_vpp = NULL; 2100 hammer2_inode_unlock(dip); 2101 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2102 return error; 2103 } 2104 hammer2_inode_depend(dip, nip); /* before igetv */ 2105 *ap->a_vpp = hammer2_igetv(nip, &error); 2106 2107 /* 2108 * Build the softlink (~like file data) and finalize the namecache. 2109 */ 2110 if (error == 0) { 2111 size_t bytes; 2112 struct uio auio; 2113 struct iovec aiov; 2114 2115 bytes = strlen(ap->a_target); 2116 2117 hammer2_inode_unlock(nip); 2118 bzero(&auio, sizeof(auio)); 2119 bzero(&aiov, sizeof(aiov)); 2120 auio.uio_iov = &aiov; 2121 auio.uio_segflg = UIO_SYSSPACE; 2122 auio.uio_rw = UIO_WRITE; 2123 auio.uio_resid = bytes; 2124 auio.uio_iovcnt = 1; 2125 auio.uio_td = curthread; 2126 aiov.iov_base = ap->a_target; 2127 aiov.iov_len = bytes; 2128 error = hammer2_write_file(nip, &auio, IO_APPEND, 0); 2129 /* XXX handle error */ 2130 error = 0; 2131 } else { 2132 hammer2_inode_unlock(nip); 2133 } 2134 2135 /* 2136 * Update dip's mtime 2137 */ 2138 if (error == 0) { 2139 uint64_t mtime; 2140 2141 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2142 hammer2_update_time(&mtime); 2143 hammer2_inode_modify(dip); 2144 dip->meta.mtime = mtime; 2145 /*hammer2_inode_unlock(dip);*/ 2146 } 2147 hammer2_inode_unlock(dip); 2148 2149 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2150 2151 /* 2152 * Finalize namecache 2153 */ 2154 if (error == 0) { 2155 cache_setunresolved(ap->a_nch); 2156 cache_setvp(ap->a_nch, *ap->a_vpp); 2157 hammer2_knote(ap->a_dvp, NOTE_WRITE); 2158 } 2159 return error; 2160 } 2161 2162 int 2163 hammer2_nsymlink(struct vnode *dvp, struct vnode **vpp, char *name, int nlen, 2164 char *target) 2165 { 2166 struct namecache nc = { 2167 .nc_name = name, 2168 .nc_nlen = nlen, 2169 }; 2170 struct nchandle nch = { 2171 .ncp = &nc, 2172 }; 2173 uid_t va_uid = VNOVAL; //getuid(); 2174 uid_t va_gid = VNOVAL; //getgid(); 2175 struct vattr va = { 2176 .va_type = VDIR, 2177 .va_mode = 0755, /* should be tunable */ 2178 .va_uid = va_uid, 2179 .va_gid = va_gid, 2180 }; 2181 struct vop_nsymlink_args ap = { 2182 .a_nch = &nch, 2183 .a_dvp = dvp, 2184 .a_vpp = vpp, 2185 .a_vap = &va, 2186 .a_target = target, 2187 }; 2188 2189 return hammer2_vop_nsymlink(&ap); 2190 } 2191 2192 /* 2193 * hammer2_vop_nremove { nch, dvp, cred } 2194 */ 2195 static 2196 int 2197 hammer2_vop_nremove(struct vop_nremove_args *ap) 2198 { 2199 #if 0 2200 hammer2_xop_unlink_t *xop; 2201 hammer2_inode_t *dip; 2202 hammer2_inode_t *ip; 2203 struct vnode *vprecycle; 2204 struct namecache *ncp; 2205 int error; 2206 2207 dip = VTOI(ap->a_dvp); 2208 if (dip->pmp->ronly) 2209 return (EROFS); 2210 #if 0 2211 /* allow removals, except user to also bulkfree */ 2212 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 2213 return (ENOSPC); 2214 #endif 2215 2216 ncp = ap->a_nch->ncp; 2217 2218 if (hammer2_debug_inode && dip->meta.inum == hammer2_debug_inode) { 2219 kprintf("hammer2: attempt to delete inside debug inode: %s\n", 2220 ncp->nc_name); 2221 while (hammer2_debug_inode && 2222 dip->meta.inum == hammer2_debug_inode) { 2223 tsleep(&hammer2_debug_inode, 0, "h2debug", hz*5); 2224 } 2225 } 2226 2227 hammer2_trans_init(dip->pmp, 0); 2228 hammer2_inode_lock(dip, 0); 2229 2230 /* 2231 * The unlink XOP unlinks the path from the directory and 2232 * locates and returns the cluster associated with the real inode. 2233 * We have to handle nlinks here on the frontend. 2234 */ 2235 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 2236 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 2237 2238 xop->isdir = 0; 2239 xop->dopermanent = 0; 2240 hammer2_xop_start(&xop->head, &hammer2_unlink_desc); 2241 2242 /* 2243 * Collect the real inode and adjust nlinks, destroy the real 2244 * inode if nlinks transitions to 0 and it was the real inode 2245 * (else it has already been removed). 2246 */ 2247 error = hammer2_xop_collect(&xop->head, 0); 2248 error = hammer2_error_to_errno(error); 2249 vprecycle = NULL; 2250 2251 if (error == 0) { 2252 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 2253 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2254 if (ip) { 2255 if (hammer2_debug_inode && 2256 ip->meta.inum == hammer2_debug_inode) { 2257 kprintf("hammer2: attempt to delete debug " 2258 "inode!\n"); 2259 while (hammer2_debug_inode && 2260 ip->meta.inum == hammer2_debug_inode) { 2261 tsleep(&hammer2_debug_inode, 0, 2262 "h2debug", hz*5); 2263 } 2264 } 2265 hammer2_inode_unlink_finisher(ip, &vprecycle); 2266 hammer2_inode_depend(dip, ip); /* after modified */ 2267 hammer2_inode_unlock(ip); 2268 } 2269 } else { 2270 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2271 } 2272 2273 /* 2274 * Update dip's mtime 2275 */ 2276 if (error == 0) { 2277 uint64_t mtime; 2278 2279 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2280 hammer2_update_time(&mtime); 2281 hammer2_inode_modify(dip); 2282 dip->meta.mtime = mtime; 2283 /*hammer2_inode_unlock(dip);*/ 2284 } 2285 hammer2_inode_unlock(dip); 2286 2287 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2288 if (error == 0) { 2289 cache_unlink(ap->a_nch); 2290 hammer2_knote(ap->a_dvp, NOTE_WRITE); 2291 } 2292 if (vprecycle) 2293 hammer2_inode_vprecycle(vprecycle); 2294 2295 return (error); 2296 #endif 2297 return (EOPNOTSUPP); 2298 } 2299 2300 /* 2301 * hammer2_vop_nrmdir { nch, dvp, cred } 2302 */ 2303 static 2304 int 2305 hammer2_vop_nrmdir(struct vop_nrmdir_args *ap) 2306 { 2307 #if 0 2308 hammer2_xop_unlink_t *xop; 2309 hammer2_inode_t *dip; 2310 hammer2_inode_t *ip; 2311 struct namecache *ncp; 2312 struct vnode *vprecycle; 2313 int error; 2314 2315 dip = VTOI(ap->a_dvp); 2316 if (dip->pmp->ronly) 2317 return (EROFS); 2318 #if 0 2319 /* allow removals, except user to also bulkfree */ 2320 if (hammer2_vfs_enospace(dip, 0, ap->a_cred) > 1) 2321 return (ENOSPC); 2322 #endif 2323 2324 hammer2_trans_init(dip->pmp, 0); 2325 hammer2_inode_lock(dip, 0); 2326 2327 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING); 2328 2329 ncp = ap->a_nch->ncp; 2330 hammer2_xop_setname(&xop->head, ncp->nc_name, ncp->nc_nlen); 2331 xop->isdir = 1; 2332 xop->dopermanent = 0; 2333 hammer2_xop_start(&xop->head, &hammer2_unlink_desc); 2334 2335 /* 2336 * Collect the real inode and adjust nlinks, destroy the real 2337 * inode if nlinks transitions to 0 and it was the real inode 2338 * (else it has already been removed). 2339 */ 2340 error = hammer2_xop_collect(&xop->head, 0); 2341 error = hammer2_error_to_errno(error); 2342 vprecycle = NULL; 2343 2344 if (error == 0) { 2345 ip = hammer2_inode_get(dip->pmp, &xop->head, -1, -1); 2346 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2347 if (ip) { 2348 hammer2_inode_unlink_finisher(ip, &vprecycle); 2349 hammer2_inode_depend(dip, ip); /* after modified */ 2350 hammer2_inode_unlock(ip); 2351 } 2352 } else { 2353 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP); 2354 } 2355 2356 /* 2357 * Update dip's mtime 2358 */ 2359 if (error == 0) { 2360 uint64_t mtime; 2361 2362 /*hammer2_inode_lock(dip, HAMMER2_RESOLVE_SHARED);*/ 2363 hammer2_update_time(&mtime); 2364 hammer2_inode_modify(dip); 2365 dip->meta.mtime = mtime; 2366 /*hammer2_inode_unlock(dip);*/ 2367 } 2368 hammer2_inode_unlock(dip); 2369 2370 hammer2_trans_done(dip->pmp, HAMMER2_TRANS_SIDEQ); 2371 if (error == 0) { 2372 cache_unlink(ap->a_nch); 2373 hammer2_knote(ap->a_dvp, NOTE_WRITE | NOTE_LINK); 2374 } 2375 if (vprecycle) 2376 hammer2_inode_vprecycle(vprecycle); 2377 return (error); 2378 #endif 2379 return (EOPNOTSUPP); 2380 } 2381 2382 /* 2383 * hammer2_vop_nrename { fnch, tnch, fdvp, tdvp, cred } 2384 */ 2385 static 2386 int 2387 hammer2_vop_nrename(struct vop_nrename_args *ap) 2388 { 2389 #if 0 2390 struct namecache *fncp; 2391 struct namecache *tncp; 2392 hammer2_inode_t *fdip; /* source directory */ 2393 hammer2_inode_t *tdip; /* target directory */ 2394 hammer2_inode_t *ip; /* file being renamed */ 2395 hammer2_inode_t *tip; /* replaced target during rename or NULL */ 2396 struct vnode *vprecycle; 2397 const uint8_t *fname; 2398 size_t fname_len; 2399 const uint8_t *tname; 2400 size_t tname_len; 2401 int error; 2402 int update_tdip; 2403 int update_fdip; 2404 hammer2_key_t tlhc; 2405 2406 if (ap->a_fdvp->v_mount != ap->a_tdvp->v_mount) 2407 return(EXDEV); 2408 if (ap->a_fdvp->v_mount != ap->a_fnch->ncp->nc_vp->v_mount) 2409 return(EXDEV); 2410 2411 fdip = VTOI(ap->a_fdvp); /* source directory */ 2412 tdip = VTOI(ap->a_tdvp); /* target directory */ 2413 2414 if (fdip->pmp->ronly || (fdip->pmp->flags & HAMMER2_PMPF_EMERG)) 2415 return (EROFS); 2416 if (hammer2_vfs_enospace(fdip, 0, ap->a_cred) > 1) 2417 return (ENOSPC); 2418 2419 fncp = ap->a_fnch->ncp; /* entry name in source */ 2420 fname = fncp->nc_name; 2421 fname_len = fncp->nc_nlen; 2422 2423 tncp = ap->a_tnch->ncp; /* entry name in target */ 2424 tname = tncp->nc_name; 2425 tname_len = tncp->nc_nlen; 2426 2427 hammer2_trans_init(tdip->pmp, 0); 2428 2429 update_tdip = 0; 2430 update_fdip = 0; 2431 2432 ip = VTOI(fncp->nc_vp); 2433 hammer2_inode_ref(ip); /* extra ref */ 2434 2435 /* 2436 * Lookup the target name to determine if a directory entry 2437 * is being overwritten. We only hold related inode locks 2438 * temporarily, the operating system is expected to protect 2439 * against rename races. 2440 */ 2441 tip = tncp->nc_vp ? VTOI(tncp->nc_vp) : NULL; 2442 if (tip) 2443 hammer2_inode_ref(tip); /* extra ref */ 2444 2445 /* 2446 * Can return NULL and error == EXDEV if the common parent 2447 * crosses a directory with the xlink flag set. 2448 * 2449 * For now try to avoid deadlocks with a simple pointer address 2450 * test. (tip) can be NULL. 2451 */ 2452 error = 0; 2453 { 2454 hammer2_inode_t *ip1 = fdip; 2455 hammer2_inode_t *ip2 = tdip; 2456 hammer2_inode_t *ip3 = ip; 2457 hammer2_inode_t *ip4 = tip; /* may be NULL */ 2458 2459 if (fdip > tdip) { 2460 ip1 = tdip; 2461 ip2 = fdip; 2462 } 2463 if (tip && ip > tip) { 2464 ip3 = tip; 2465 ip4 = ip; 2466 } 2467 hammer2_inode_lock4(ip1, ip2, ip3, ip4); 2468 } 2469 2470 /* 2471 * Resolve the collision space for (tdip, tname, tname_len) 2472 * 2473 * tdip must be held exclusively locked to prevent races since 2474 * multiple filenames can end up in the same collision space. 2475 */ 2476 { 2477 hammer2_xop_scanlhc_t *sxop; 2478 hammer2_tid_t lhcbase; 2479 2480 tlhc = hammer2_dirhash(tname, tname_len); 2481 lhcbase = tlhc; 2482 sxop = hammer2_xop_alloc(tdip, HAMMER2_XOP_MODIFYING); 2483 sxop->lhc = tlhc; 2484 hammer2_xop_start(&sxop->head, &hammer2_scanlhc_desc); 2485 while ((error = hammer2_xop_collect(&sxop->head, 0)) == 0) { 2486 if (tlhc != sxop->head.cluster.focus->bref.key) 2487 break; 2488 ++tlhc; 2489 } 2490 error = hammer2_error_to_errno(error); 2491 hammer2_xop_retire(&sxop->head, HAMMER2_XOPMASK_VOP); 2492 2493 if (error) { 2494 if (error != ENOENT) 2495 goto done2; 2496 ++tlhc; 2497 error = 0; 2498 } 2499 if ((lhcbase ^ tlhc) & ~HAMMER2_DIRHASH_LOMASK) { 2500 error = ENOSPC; 2501 goto done2; 2502 } 2503 } 2504 2505 /* 2506 * Ready to go, issue the rename to the backend. Note that meta-data 2507 * updates to the related inodes occur separately from the rename 2508 * operation. 2509 * 2510 * NOTE: While it is not necessary to update ip->meta.name*, doing 2511 * so aids catastrophic recovery and debugging. 2512 */ 2513 if (error == 0) { 2514 hammer2_xop_nrename_t *xop4; 2515 2516 xop4 = hammer2_xop_alloc(fdip, HAMMER2_XOP_MODIFYING); 2517 xop4->lhc = tlhc; 2518 xop4->ip_key = ip->meta.name_key; 2519 hammer2_xop_setip2(&xop4->head, ip); 2520 hammer2_xop_setip3(&xop4->head, tdip); 2521 if (tip && tip->meta.type == HAMMER2_OBJTYPE_DIRECTORY) 2522 hammer2_xop_setip4(&xop4->head, tip); 2523 hammer2_xop_setname(&xop4->head, fname, fname_len); 2524 hammer2_xop_setname2(&xop4->head, tname, tname_len); 2525 hammer2_xop_start(&xop4->head, &hammer2_nrename_desc); 2526 2527 error = hammer2_xop_collect(&xop4->head, 0); 2528 error = hammer2_error_to_errno(error); 2529 hammer2_xop_retire(&xop4->head, HAMMER2_XOPMASK_VOP); 2530 2531 if (error == ENOENT) 2532 error = 0; 2533 2534 /* 2535 * Update inode meta-data. 2536 * 2537 * WARNING! The in-memory inode (ip) structure does not 2538 * maintain a copy of the inode's filename buffer. 2539 */ 2540 if (error == 0 && 2541 (ip->meta.name_key & HAMMER2_DIRHASH_VISIBLE)) { 2542 hammer2_inode_modify(ip); 2543 ip->meta.name_len = tname_len; 2544 ip->meta.name_key = tlhc; 2545 } 2546 if (error == 0) { 2547 hammer2_inode_modify(ip); 2548 ip->meta.iparent = tdip->meta.inum; 2549 } 2550 update_fdip = 1; 2551 update_tdip = 1; 2552 } 2553 2554 done2: 2555 /* 2556 * If no error, the backend has replaced the target directory entry. 2557 * We must adjust nlinks on the original replace target if it exists. 2558 */ 2559 vprecycle = NULL; 2560 if (error == 0 && tip) { 2561 hammer2_inode_unlink_finisher(tip, &vprecycle); 2562 } 2563 2564 /* 2565 * Update directory mtimes to represent the something changed. 2566 */ 2567 if (update_fdip || update_tdip) { 2568 uint64_t mtime; 2569 2570 hammer2_update_time(&mtime); 2571 if (update_fdip) { 2572 hammer2_inode_modify(fdip); 2573 fdip->meta.mtime = mtime; 2574 } 2575 if (update_tdip) { 2576 hammer2_inode_modify(tdip); 2577 tdip->meta.mtime = mtime; 2578 } 2579 } 2580 if (tip) { 2581 hammer2_inode_unlock(tip); 2582 hammer2_inode_drop(tip); 2583 } 2584 hammer2_inode_unlock(ip); 2585 hammer2_inode_unlock(tdip); 2586 hammer2_inode_unlock(fdip); 2587 hammer2_inode_drop(ip); 2588 hammer2_trans_done(tdip->pmp, HAMMER2_TRANS_SIDEQ); 2589 2590 /* 2591 * Issue the namecache update after unlocking all the internal 2592 * hammer2 structures, otherwise we might deadlock. 2593 * 2594 * WARNING! The target namespace must be updated atomically, 2595 * and we depend on cache_rename() to handle that for 2596 * us. Do not do a separate cache_unlink() because 2597 * that leaves a small window of opportunity for other 2598 * threads to allocate the target namespace before we 2599 * manage to complete our rename. 2600 * 2601 * WARNING! cache_rename() (and cache_unlink()) will properly 2602 * set VREF_FINALIZE on any attached vnode. Do not 2603 * call cache_setunresolved() manually before-hand as 2604 * this will prevent the flag from being set later via 2605 * cache_rename(). If VREF_FINALIZE is not properly set 2606 * and the inode is no longer in the topology, related 2607 * chains can remain dirty indefinitely. 2608 */ 2609 if (error == 0 && tip) { 2610 /*cache_unlink(ap->a_tnch); see above */ 2611 /*cache_setunresolved(ap->a_tnch); see above */ 2612 } 2613 if (error == 0) { 2614 cache_rename(ap->a_fnch, ap->a_tnch); 2615 hammer2_knote(ap->a_fdvp, NOTE_WRITE); 2616 hammer2_knote(ap->a_tdvp, NOTE_WRITE); 2617 hammer2_knote(fncp->nc_vp, NOTE_RENAME); 2618 } 2619 if (vprecycle) 2620 hammer2_inode_vprecycle(vprecycle); 2621 2622 return (error); 2623 #endif 2624 return (EOPNOTSUPP); 2625 } 2626 2627 /* 2628 * hammer2_vop_ioctl { vp, command, data, fflag, cred } 2629 */ 2630 static 2631 int 2632 hammer2_vop_ioctl(struct vop_ioctl_args *ap) 2633 { 2634 #if 0 2635 hammer2_inode_t *ip; 2636 int error; 2637 2638 ip = VTOI(ap->a_vp); 2639 2640 error = hammer2_ioctl(ip, ap->a_command, (void *)ap->a_data, 2641 ap->a_fflag, ap->a_cred); 2642 return (error); 2643 #endif 2644 return (EOPNOTSUPP); 2645 } 2646 2647 static 2648 int 2649 hammer2_vop_mountctl(struct vop_mountctl_args *ap) 2650 { 2651 #if 0 2652 struct mount *mp; 2653 hammer2_pfs_t *pmp; 2654 int rc; 2655 2656 switch (ap->a_op) { 2657 case (MOUNTCTL_SET_EXPORT): 2658 mp = ap->a_head.a_ops->head.vv_mount; 2659 pmp = MPTOPMP(mp); 2660 2661 if (ap->a_ctllen != sizeof(struct export_args)) 2662 rc = (EINVAL); 2663 else 2664 rc = vfs_export(mp, &pmp->export, 2665 (const struct export_args *)ap->a_ctl); 2666 break; 2667 default: 2668 rc = vop_stdmountctl(ap); 2669 break; 2670 } 2671 return (rc); 2672 #endif 2673 return (EOPNOTSUPP); 2674 } 2675 2676 /* 2677 * KQFILTER 2678 */ 2679 /* 2680 static void filt_hammer2detach(struct knote *kn); 2681 static int filt_hammer2read(struct knote *kn, long hint); 2682 static int filt_hammer2write(struct knote *kn, long hint); 2683 static int filt_hammer2vnode(struct knote *kn, long hint); 2684 2685 static struct filterops hammer2read_filtops = 2686 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2687 NULL, filt_hammer2detach, filt_hammer2read }; 2688 static struct filterops hammer2write_filtops = 2689 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2690 NULL, filt_hammer2detach, filt_hammer2write }; 2691 static struct filterops hammer2vnode_filtops = 2692 { FILTEROP_ISFD | FILTEROP_MPSAFE, 2693 NULL, filt_hammer2detach, filt_hammer2vnode }; 2694 */ 2695 2696 static 2697 int 2698 hammer2_vop_kqfilter(struct vop_kqfilter_args *ap) 2699 { 2700 #if 0 2701 struct vnode *vp = ap->a_vp; 2702 struct knote *kn = ap->a_kn; 2703 2704 switch (kn->kn_filter) { 2705 case EVFILT_READ: 2706 kn->kn_fop = &hammer2read_filtops; 2707 break; 2708 case EVFILT_WRITE: 2709 kn->kn_fop = &hammer2write_filtops; 2710 break; 2711 case EVFILT_VNODE: 2712 kn->kn_fop = &hammer2vnode_filtops; 2713 break; 2714 default: 2715 return (EOPNOTSUPP); 2716 } 2717 2718 kn->kn_hook = (caddr_t)vp; 2719 2720 knote_insert(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2721 2722 return(0); 2723 #endif 2724 return (EOPNOTSUPP); 2725 } 2726 2727 #if 0 2728 static void 2729 filt_hammer2detach(struct knote *kn) 2730 { 2731 struct vnode *vp = (void *)kn->kn_hook; 2732 2733 knote_remove(&vp->v_pollinfo.vpi_kqinfo.ki_note, kn); 2734 } 2735 2736 static int 2737 filt_hammer2read(struct knote *kn, long hint) 2738 { 2739 struct vnode *vp = (void *)kn->kn_hook; 2740 hammer2_inode_t *ip = VTOI(vp); 2741 off_t off; 2742 2743 if (hint == NOTE_REVOKE) { 2744 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2745 return(1); 2746 } 2747 off = ip->meta.size - kn->kn_fp->f_offset; 2748 kn->kn_data = (off < INTPTR_MAX) ? off : INTPTR_MAX; 2749 if (kn->kn_sfflags & NOTE_OLDAPI) 2750 return(1); 2751 return (kn->kn_data != 0); 2752 } 2753 2754 2755 static int 2756 filt_hammer2write(struct knote *kn, long hint) 2757 { 2758 if (hint == NOTE_REVOKE) 2759 kn->kn_flags |= (EV_EOF | EV_NODATA | EV_ONESHOT); 2760 kn->kn_data = 0; 2761 return (1); 2762 } 2763 2764 static int 2765 filt_hammer2vnode(struct knote *kn, long hint) 2766 { 2767 if (kn->kn_sfflags & hint) 2768 kn->kn_fflags |= hint; 2769 if (hint == NOTE_REVOKE) { 2770 kn->kn_flags |= (EV_EOF | EV_NODATA); 2771 return (1); 2772 } 2773 return (kn->kn_fflags != 0); 2774 } 2775 #endif 2776 2777 /* 2778 * FIFO VOPS 2779 */ 2780 static 2781 int 2782 hammer2_vop_markatime(struct vop_markatime_args *ap) 2783 { 2784 #if 0 2785 hammer2_inode_t *ip; 2786 struct vnode *vp; 2787 2788 vp = ap->a_vp; 2789 ip = VTOI(vp); 2790 2791 if (ip->pmp->ronly || (ip->pmp->flags & HAMMER2_PMPF_EMERG)) 2792 return (EROFS); 2793 return(0); 2794 #endif 2795 return (EOPNOTSUPP); 2796 } 2797 2798 static 2799 int 2800 hammer2_vop_fifokqfilter(struct vop_kqfilter_args *ap) 2801 { 2802 #if 0 2803 int error; 2804 2805 error = VOCALL(&fifo_vnode_vops, &ap->a_head); 2806 if (error) 2807 error = hammer2_vop_kqfilter(ap); 2808 return(error); 2809 #endif 2810 return (EOPNOTSUPP); 2811 } 2812 2813 /* 2814 * VOPS vector 2815 */ 2816 struct vop_ops hammer2_vnode_vops = { 2817 .vop_default = vop_defaultop, 2818 .vop_fsync = hammer2_vop_fsync, 2819 .vop_getpages = vop_stdgetpages, 2820 .vop_putpages = vop_stdputpages, 2821 .vop_access = hammer2_vop_access, 2822 .vop_advlock = hammer2_vop_advlock, 2823 .vop_close = hammer2_vop_close, 2824 .vop_nlink = hammer2_vop_nlink, 2825 .vop_ncreate = hammer2_vop_ncreate, 2826 .vop_nsymlink = hammer2_vop_nsymlink, 2827 .vop_nremove = hammer2_vop_nremove, 2828 .vop_nrmdir = hammer2_vop_nrmdir, 2829 .vop_nrename = hammer2_vop_nrename, 2830 .vop_getattr = hammer2_vop_getattr, 2831 .vop_getattr_lite = hammer2_vop_getattr_lite, 2832 .vop_setattr = hammer2_vop_setattr, 2833 .vop_readdir = hammer2_vop_readdir, 2834 .vop_readlink = hammer2_vop_readlink, 2835 .vop_read = hammer2_vop_read, 2836 .vop_write = hammer2_vop_write, 2837 .vop_open = hammer2_vop_open, 2838 .vop_inactive = hammer2_vop_inactive, 2839 .vop_reclaim = hammer2_vop_reclaim, 2840 .vop_nresolve = hammer2_vop_nresolve, 2841 .vop_nlookupdotdot = hammer2_vop_nlookupdotdot, 2842 .vop_nmkdir = hammer2_vop_nmkdir, 2843 .vop_nmknod = hammer2_vop_nmknod, 2844 .vop_ioctl = hammer2_vop_ioctl, 2845 .vop_mountctl = hammer2_vop_mountctl, 2846 .vop_bmap = hammer2_vop_bmap, 2847 .vop_strategy = hammer2_vop_strategy, 2848 .vop_kqfilter = hammer2_vop_kqfilter 2849 }; 2850 2851 struct vop_ops hammer2_spec_vops = { 2852 .vop_default = vop_defaultop, 2853 .vop_fsync = hammer2_vop_fsync, 2854 .vop_read = vop_stdnoread, 2855 .vop_write = vop_stdnowrite, 2856 .vop_access = hammer2_vop_access, 2857 .vop_close = hammer2_vop_close, 2858 .vop_markatime = hammer2_vop_markatime, 2859 .vop_getattr = hammer2_vop_getattr, 2860 .vop_inactive = hammer2_vop_inactive, 2861 .vop_reclaim = hammer2_vop_reclaim, 2862 .vop_setattr = hammer2_vop_setattr 2863 }; 2864 2865 struct vop_ops hammer2_fifo_vops = { 2866 .vop_default = fifo_vnoperate, 2867 .vop_fsync = hammer2_vop_fsync, 2868 #if 0 2869 .vop_read = hammer2_vop_fiforead, 2870 .vop_write = hammer2_vop_fifowrite, 2871 #endif 2872 .vop_access = hammer2_vop_access, 2873 #if 0 2874 .vop_close = hammer2_vop_fifoclose, 2875 #endif 2876 .vop_markatime = hammer2_vop_markatime, 2877 .vop_getattr = hammer2_vop_getattr, 2878 .vop_inactive = hammer2_vop_inactive, 2879 .vop_reclaim = hammer2_vop_reclaim, 2880 .vop_setattr = hammer2_vop_setattr, 2881 .vop_kqfilter = hammer2_vop_fifokqfilter 2882 }; 2883 2884