1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 24 * Copyright (c) 2014 Integros [integros.com] 25 */ 26 27 /* Portions Copyright 2007 Jeremy Teo */ 28 /* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */ 29 30 #ifdef _KERNEL 31 #include <sys/types.h> 32 #include <sys/param.h> 33 #include <sys/time.h> 34 #include <sys/systm.h> 35 #include <sys/sysmacros.h> 36 #include <sys/resource.h> 37 #include <sys/mntent.h> 38 #include <sys/u8_textprep.h> 39 #include <sys/dsl_dataset.h> 40 #include <sys/vfs.h> 41 #include <sys/vnode.h> 42 #include <sys/file.h> 43 #include <sys/kmem.h> 44 #include <sys/errno.h> 45 #include <sys/unistd.h> 46 #include <sys/atomic.h> 47 #include <sys/zfs_dir.h> 48 #include <sys/zfs_acl.h> 49 #include <sys/zfs_ioctl.h> 50 #include <sys/zfs_rlock.h> 51 #include <sys/zfs_fuid.h> 52 #include <sys/dnode.h> 53 #include <sys/fs/zfs.h> 54 #include <sys/kidmap.h> 55 56 #ifdef __NetBSD__ 57 #include <sys/zfs_ctldir.h> 58 #include <miscfs/specfs/specdev.h> 59 60 extern int (**zfs_vnodeop_p)(void *); 61 extern int (**zfs_fifoop_p)(void *); 62 extern int (**zfs_specop_p)(void *); 63 64 #endif 65 #endif /* _KERNEL */ 66 67 #include <sys/dmu.h> 68 #include <sys/dmu_objset.h> 69 #include <sys/refcount.h> 70 #include <sys/stat.h> 71 #include <sys/zap.h> 72 #include <sys/zfs_znode.h> 73 #include <sys/sa.h> 74 #include <sys/zfs_sa.h> 75 #include <sys/zfs_stat.h> 76 #include <sys/refcount.h> 77 78 #include "zfs_prop.h" 79 #include "zfs_comutil.h" 80 81 /* Used by fstat(1). */ 82 SYSCTL_INT(_debug_sizeof, OID_AUTO, znode, CTLFLAG_RD, 83 SYSCTL_NULL_INT_PTR, sizeof(znode_t), "sizeof(znode_t)"); 84 85 /* 86 * Define ZNODE_STATS to turn on statistic gathering. By default, it is only 87 * turned on when DEBUG is also defined. 88 */ 89 #ifdef DEBUG 90 #define ZNODE_STATS 91 #endif /* DEBUG */ 92 93 #ifdef ZNODE_STATS 94 #define ZNODE_STAT_ADD(stat) ((stat)++) 95 #else 96 #define ZNODE_STAT_ADD(stat) /* nothing */ 97 #endif /* ZNODE_STATS */ 98 99 /* 100 * Functions needed for userland (ie: libzpool) are not put under 101 * #ifdef_KERNEL; the rest of the functions have dependencies 102 * (such as VFS logic) that will not compile easily in userland. 103 */ 104 #ifdef _KERNEL 105 /* 106 * Needed to close a small window in zfs_znode_move() that allows the zfsvfs to 107 * be freed before it can be safely accessed. 108 */ 109 krwlock_t zfsvfs_lock; 110 111 static kmem_cache_t *znode_cache = NULL; 112 113 /*ARGSUSED*/ 114 static void 115 znode_evict_error(dmu_buf_t *dbuf, void *user_ptr) 116 { 117 /* 118 * We should never drop all dbuf refs without first clearing 119 * the eviction callback. 120 */ 121 panic("evicting znode %p\n", user_ptr); 122 } 123 124 extern struct vop_vector zfs_vnodeops; 125 extern struct vop_vector zfs_fifoops; 126 extern struct vop_vector zfs_shareops; 127 128 static int 129 zfs_znode_cache_constructor(void *buf, void *arg, int kmflags) 130 { 131 znode_t *zp = buf; 132 133 POINTER_INVALIDATE(&zp->z_zfsvfs); 134 135 list_link_init(&zp->z_link_node); 136 137 mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL); 138 139 mutex_init(&zp->z_range_lock, NULL, MUTEX_DEFAULT, NULL); 140 avl_create(&zp->z_range_avl, zfs_range_compare, 141 sizeof (rl_t), offsetof(rl_t, r_node)); 142 143 zp->z_acl_cached = NULL; 144 zp->z_vnode = NULL; 145 zp->z_moved = 0; 146 return (0); 147 } 148 149 /*ARGSUSED*/ 150 static void 151 zfs_znode_cache_destructor(void *buf, void *arg) 152 { 153 znode_t *zp = buf; 154 155 ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs)); 156 ASSERT(ZTOV(zp) == NULL); 157 #ifndef __NetBSD__ 158 vn_free(ZTOV(zp)); 159 #endif 160 ASSERT(!list_link_active(&zp->z_link_node)); 161 mutex_destroy(&zp->z_acl_lock); 162 avl_destroy(&zp->z_range_avl); 163 mutex_destroy(&zp->z_range_lock); 164 165 ASSERT(zp->z_acl_cached == NULL); 166 } 167 168 #ifdef ZNODE_STATS 169 static struct { 170 uint64_t zms_zfsvfs_invalid; 171 uint64_t zms_zfsvfs_recheck1; 172 uint64_t zms_zfsvfs_unmounted; 173 uint64_t zms_zfsvfs_recheck2; 174 uint64_t zms_obj_held; 175 uint64_t zms_vnode_locked; 176 uint64_t zms_not_only_dnlc; 177 } znode_move_stats; 178 #endif /* ZNODE_STATS */ 179 180 #ifdef illumos 181 static void 182 zfs_znode_move_impl(znode_t *ozp, znode_t *nzp) 183 { 184 vnode_t *vp; 185 186 /* Copy fields. */ 187 nzp->z_zfsvfs = ozp->z_zfsvfs; 188 189 /* Swap vnodes. */ 190 vp = nzp->z_vnode; 191 nzp->z_vnode = ozp->z_vnode; 192 ozp->z_vnode = vp; /* let destructor free the overwritten vnode */ 193 ZTOV(ozp)->v_data = ozp; 194 ZTOV(nzp)->v_data = nzp; 195 196 nzp->z_id = ozp->z_id; 197 ASSERT(ozp->z_dirlocks == NULL); /* znode not in use */ 198 ASSERT(avl_numnodes(&ozp->z_range_avl) == 0); 199 nzp->z_unlinked = ozp->z_unlinked; 200 nzp->z_atime_dirty = ozp->z_atime_dirty; 201 nzp->z_zn_prefetch = ozp->z_zn_prefetch; 202 nzp->z_blksz = ozp->z_blksz; 203 nzp->z_seq = ozp->z_seq; 204 nzp->z_mapcnt = ozp->z_mapcnt; 205 nzp->z_gen = ozp->z_gen; 206 nzp->z_sync_cnt = ozp->z_sync_cnt; 207 nzp->z_is_sa = ozp->z_is_sa; 208 nzp->z_sa_hdl = ozp->z_sa_hdl; 209 bcopy(ozp->z_atime, nzp->z_atime, sizeof (uint64_t) * 2); 210 nzp->z_links = ozp->z_links; 211 nzp->z_size = ozp->z_size; 212 nzp->z_pflags = ozp->z_pflags; 213 nzp->z_uid = ozp->z_uid; 214 nzp->z_gid = ozp->z_gid; 215 nzp->z_mode = ozp->z_mode; 216 217 /* 218 * Since this is just an idle znode and kmem is already dealing with 219 * memory pressure, release any cached ACL. 220 */ 221 if (ozp->z_acl_cached) { 222 zfs_acl_free(ozp->z_acl_cached); 223 ozp->z_acl_cached = NULL; 224 } 225 226 sa_set_userp(nzp->z_sa_hdl, nzp); 227 228 /* 229 * Invalidate the original znode by clearing fields that provide a 230 * pointer back to the znode. Set the low bit of the vfs pointer to 231 * ensure that zfs_znode_move() recognizes the znode as invalid in any 232 * subsequent callback. 233 */ 234 ozp->z_sa_hdl = NULL; 235 POINTER_INVALIDATE(&ozp->z_zfsvfs); 236 237 /* 238 * Mark the znode. 239 */ 240 nzp->z_moved = 1; 241 ozp->z_moved = (uint8_t)-1; 242 } 243 244 /*ARGSUSED*/ 245 static kmem_cbrc_t 246 zfs_znode_move(void *buf, void *newbuf, size_t size, void *arg) 247 { 248 znode_t *ozp = buf, *nzp = newbuf; 249 zfsvfs_t *zfsvfs; 250 vnode_t *vp; 251 252 /* 253 * The znode is on the file system's list of known znodes if the vfs 254 * pointer is valid. We set the low bit of the vfs pointer when freeing 255 * the znode to invalidate it, and the memory patterns written by kmem 256 * (baddcafe and deadbeef) set at least one of the two low bits. A newly 257 * created znode sets the vfs pointer last of all to indicate that the 258 * znode is known and in a valid state to be moved by this function. 259 */ 260 zfsvfs = ozp->z_zfsvfs; 261 if (!POINTER_IS_VALID(zfsvfs)) { 262 ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_invalid); 263 return (KMEM_CBRC_DONT_KNOW); 264 } 265 266 /* 267 * Close a small window in which it's possible that the filesystem could 268 * be unmounted and freed, and zfsvfs, though valid in the previous 269 * statement, could point to unrelated memory by the time we try to 270 * prevent the filesystem from being unmounted. 271 */ 272 rw_enter(&zfsvfs_lock, RW_WRITER); 273 if (zfsvfs != ozp->z_zfsvfs) { 274 rw_exit(&zfsvfs_lock); 275 ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_recheck1); 276 return (KMEM_CBRC_DONT_KNOW); 277 } 278 279 /* 280 * If the znode is still valid, then so is the file system. We know that 281 * no valid file system can be freed while we hold zfsvfs_lock, so we 282 * can safely ensure that the filesystem is not and will not be 283 * unmounted. The next statement is equivalent to ZFS_ENTER(). 284 */ 285 rrm_enter(&zfsvfs->z_teardown_lock, RW_READER, FTAG); 286 if (zfsvfs->z_unmounted) { 287 ZFS_EXIT(zfsvfs); 288 rw_exit(&zfsvfs_lock); 289 ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_unmounted); 290 return (KMEM_CBRC_DONT_KNOW); 291 } 292 rw_exit(&zfsvfs_lock); 293 294 mutex_enter(&zfsvfs->z_znodes_lock); 295 /* 296 * Recheck the vfs pointer in case the znode was removed just before 297 * acquiring the lock. 298 */ 299 if (zfsvfs != ozp->z_zfsvfs) { 300 mutex_exit(&zfsvfs->z_znodes_lock); 301 ZFS_EXIT(zfsvfs); 302 ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_recheck2); 303 return (KMEM_CBRC_DONT_KNOW); 304 } 305 306 /* 307 * At this point we know that as long as we hold z_znodes_lock, the 308 * znode cannot be freed and fields within the znode can be safely 309 * accessed. Now, prevent a race with zfs_zget(). 310 */ 311 if (ZFS_OBJ_HOLD_TRYENTER(zfsvfs, ozp->z_id) == 0) { 312 mutex_exit(&zfsvfs->z_znodes_lock); 313 ZFS_EXIT(zfsvfs); 314 ZNODE_STAT_ADD(znode_move_stats.zms_obj_held); 315 return (KMEM_CBRC_LATER); 316 } 317 318 vp = ZTOV(ozp); 319 if (mutex_tryenter(&vp->v_lock) == 0) { 320 ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id); 321 mutex_exit(&zfsvfs->z_znodes_lock); 322 ZFS_EXIT(zfsvfs); 323 ZNODE_STAT_ADD(znode_move_stats.zms_vnode_locked); 324 return (KMEM_CBRC_LATER); 325 } 326 327 /* Only move znodes that are referenced _only_ by the DNLC. */ 328 if (vp->v_count != 1 || !vn_in_dnlc(vp)) { 329 mutex_exit(&vp->v_lock); 330 ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id); 331 mutex_exit(&zfsvfs->z_znodes_lock); 332 ZFS_EXIT(zfsvfs); 333 ZNODE_STAT_ADD(znode_move_stats.zms_not_only_dnlc); 334 return (KMEM_CBRC_LATER); 335 } 336 337 /* 338 * The znode is known and in a valid state to move. We're holding the 339 * locks needed to execute the critical section. 340 */ 341 zfs_znode_move_impl(ozp, nzp); 342 mutex_exit(&vp->v_lock); 343 ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id); 344 345 list_link_replace(&ozp->z_link_node, &nzp->z_link_node); 346 mutex_exit(&zfsvfs->z_znodes_lock); 347 ZFS_EXIT(zfsvfs); 348 349 return (KMEM_CBRC_YES); 350 } 351 #endif /* illumos */ 352 353 void 354 zfs_znode_init(void) 355 { 356 /* 357 * Initialize zcache 358 */ 359 rw_init(&zfsvfs_lock, NULL, RW_DEFAULT, NULL); 360 ASSERT(znode_cache == NULL); 361 znode_cache = kmem_cache_create("zfs_znode_cache", 362 sizeof (znode_t), 0, zfs_znode_cache_constructor, 363 zfs_znode_cache_destructor, NULL, NULL, NULL, 0); 364 kmem_cache_set_move(znode_cache, zfs_znode_move); 365 } 366 367 void 368 zfs_znode_fini(void) 369 { 370 #ifdef illumos 371 /* 372 * Cleanup vfs & vnode ops 373 */ 374 zfs_remove_op_tables(); 375 #endif 376 377 /* 378 * Cleanup zcache 379 */ 380 if (znode_cache) 381 kmem_cache_destroy(znode_cache); 382 znode_cache = NULL; 383 rw_destroy(&zfsvfs_lock); 384 } 385 386 #ifdef illumos 387 struct vnodeops *zfs_dvnodeops; 388 struct vnodeops *zfs_fvnodeops; 389 struct vnodeops *zfs_symvnodeops; 390 struct vnodeops *zfs_xdvnodeops; 391 struct vnodeops *zfs_evnodeops; 392 struct vnodeops *zfs_sharevnodeops; 393 394 void 395 zfs_remove_op_tables() 396 { 397 /* 398 * Remove vfs ops 399 */ 400 ASSERT(zfsfstype); 401 (void) vfs_freevfsops_by_type(zfsfstype); 402 zfsfstype = 0; 403 404 /* 405 * Remove vnode ops 406 */ 407 if (zfs_dvnodeops) 408 vn_freevnodeops(zfs_dvnodeops); 409 if (zfs_fvnodeops) 410 vn_freevnodeops(zfs_fvnodeops); 411 if (zfs_symvnodeops) 412 vn_freevnodeops(zfs_symvnodeops); 413 if (zfs_xdvnodeops) 414 vn_freevnodeops(zfs_xdvnodeops); 415 if (zfs_evnodeops) 416 vn_freevnodeops(zfs_evnodeops); 417 if (zfs_sharevnodeops) 418 vn_freevnodeops(zfs_sharevnodeops); 419 420 zfs_dvnodeops = NULL; 421 zfs_fvnodeops = NULL; 422 zfs_symvnodeops = NULL; 423 zfs_xdvnodeops = NULL; 424 zfs_evnodeops = NULL; 425 zfs_sharevnodeops = NULL; 426 } 427 428 extern const fs_operation_def_t zfs_dvnodeops_template[]; 429 extern const fs_operation_def_t zfs_fvnodeops_template[]; 430 extern const fs_operation_def_t zfs_xdvnodeops_template[]; 431 extern const fs_operation_def_t zfs_symvnodeops_template[]; 432 extern const fs_operation_def_t zfs_evnodeops_template[]; 433 extern const fs_operation_def_t zfs_sharevnodeops_template[]; 434 435 int 436 zfs_create_op_tables() 437 { 438 int error; 439 440 /* 441 * zfs_dvnodeops can be set if mod_remove() calls mod_installfs() 442 * due to a failure to remove the the 2nd modlinkage (zfs_modldrv). 443 * In this case we just return as the ops vectors are already set up. 444 */ 445 if (zfs_dvnodeops) 446 return (0); 447 448 error = vn_make_ops(MNTTYPE_ZFS, zfs_dvnodeops_template, 449 &zfs_dvnodeops); 450 if (error) 451 return (error); 452 453 error = vn_make_ops(MNTTYPE_ZFS, zfs_fvnodeops_template, 454 &zfs_fvnodeops); 455 if (error) 456 return (error); 457 458 error = vn_make_ops(MNTTYPE_ZFS, zfs_symvnodeops_template, 459 &zfs_symvnodeops); 460 if (error) 461 return (error); 462 463 error = vn_make_ops(MNTTYPE_ZFS, zfs_xdvnodeops_template, 464 &zfs_xdvnodeops); 465 if (error) 466 return (error); 467 468 error = vn_make_ops(MNTTYPE_ZFS, zfs_evnodeops_template, 469 &zfs_evnodeops); 470 if (error) 471 return (error); 472 473 error = vn_make_ops(MNTTYPE_ZFS, zfs_sharevnodeops_template, 474 &zfs_sharevnodeops); 475 476 return (error); 477 } 478 #endif /* illumos */ 479 480 int 481 zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx) 482 { 483 zfs_acl_ids_t acl_ids; 484 vattr_t vattr; 485 znode_t *sharezp; 486 znode_t *zp; 487 int error; 488 489 vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE; 490 vattr.va_type = VDIR; 491 vattr.va_mode = S_IFDIR|0555; 492 vattr.va_uid = crgetuid(kcred); 493 vattr.va_gid = crgetgid(kcred); 494 495 sharezp = kmem_cache_alloc(znode_cache, KM_SLEEP); 496 ASSERT(!POINTER_IS_VALID(sharezp->z_zfsvfs)); 497 sharezp->z_moved = 0; 498 sharezp->z_unlinked = 0; 499 sharezp->z_atime_dirty = 0; 500 sharezp->z_zfsvfs = zfsvfs; 501 sharezp->z_is_sa = zfsvfs->z_use_sa; 502 503 VERIFY(0 == zfs_acl_ids_create(sharezp, IS_ROOT_NODE, &vattr, 504 kcred, NULL, &acl_ids)); 505 zfs_mknode(sharezp, &vattr, tx, kcred, IS_ROOT_NODE, &zp, &acl_ids); 506 ASSERT3P(zp, ==, sharezp); 507 POINTER_INVALIDATE(&sharezp->z_zfsvfs); 508 error = zap_add(zfsvfs->z_os, MASTER_NODE_OBJ, 509 ZFS_SHARES_DIR, 8, 1, &sharezp->z_id, tx); 510 zfsvfs->z_shares_dir = sharezp->z_id; 511 512 zfs_acl_ids_free(&acl_ids); 513 sa_handle_destroy(sharezp->z_sa_hdl); 514 kmem_cache_free(znode_cache, sharezp); 515 516 return (error); 517 } 518 519 /* 520 * define a couple of values we need available 521 * for both 64 and 32 bit environments. 522 */ 523 #ifndef NBITSMINOR64 524 #define NBITSMINOR64 32 525 #endif 526 #ifndef MAXMAJ64 527 #define MAXMAJ64 0xffffffffUL 528 #endif 529 #ifndef MAXMIN64 530 #define MAXMIN64 0xffffffffUL 531 #endif 532 533 /* 534 * Create special expldev for ZFS private use. 535 * Can't use standard expldev since it doesn't do 536 * what we want. The standard expldev() takes a 537 * dev32_t in LP64 and expands it to a long dev_t. 538 * We need an interface that takes a dev32_t in ILP32 539 * and expands it to a long dev_t. 540 */ 541 static uint64_t 542 zfs_expldev(dev_t dev) 543 { 544 return (((uint64_t)major(dev) << NBITSMINOR64) | minor(dev)); 545 } 546 /* 547 * Special cmpldev for ZFS private use. 548 * Can't use standard cmpldev since it takes 549 * a long dev_t and compresses it to dev32_t in 550 * LP64. We need to do a compaction of a long dev_t 551 * to a dev32_t in ILP32. 552 */ 553 dev_t 554 zfs_cmpldev(uint64_t dev) 555 { 556 return (makedev((dev >> NBITSMINOR64), (dev & MAXMIN64))); 557 } 558 559 static void 560 zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp, 561 dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl) 562 { 563 ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs)); 564 ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id))); 565 566 ASSERT(zp->z_sa_hdl == NULL); 567 ASSERT(zp->z_acl_cached == NULL); 568 if (sa_hdl == NULL) { 569 VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp, 570 SA_HDL_SHARED, &zp->z_sa_hdl)); 571 } else { 572 zp->z_sa_hdl = sa_hdl; 573 sa_set_userp(sa_hdl, zp); 574 } 575 576 zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE; 577 578 /* 579 * Slap on VROOT if we are the root znode unless we are the root 580 * node of a snapshot mounted under .zfs. 581 */ 582 if (zp->z_id == zfsvfs->z_root && zfsvfs->z_parent == zfsvfs) 583 ZTOV(zp)->v_flag |= VROOT; 584 585 vn_exists(ZTOV(zp)); 586 } 587 588 void 589 zfs_znode_dmu_fini(znode_t *zp) 590 { 591 ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zp->z_zfsvfs, zp->z_id)) || 592 zp->z_unlinked || 593 RW_WRITE_HELD(&zp->z_zfsvfs->z_teardown_inactive_lock)); 594 595 sa_handle_destroy(zp->z_sa_hdl); 596 zp->z_sa_hdl = NULL; 597 } 598 599 #ifdef __FreeBSD__ 600 static void 601 zfs_vnode_forget(vnode_t *vp) 602 { 603 604 /* copied from insmntque_stddtr */ 605 vp->v_data = NULL; 606 vp->v_op = &dead_vnodeops; 607 vgone(vp); 608 vput(vp); 609 } 610 #endif /* __FreeBSD__ */ 611 612 /* 613 * Construct a new znode/vnode and intialize. 614 * 615 * This does not do a call to dmu_set_user() that is 616 * up to the caller to do, in case you don't want to 617 * return the znode 618 */ 619 static znode_t * 620 #ifdef __NetBSD__ 621 zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz, 622 dmu_object_type_t obj_type, sa_handle_t *hdl, vnode_t *vp) 623 #else 624 zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz, 625 dmu_object_type_t obj_type, sa_handle_t *hdl) 626 #endif 627 { 628 znode_t *zp; 629 #ifndef __NetBSD__ 630 vnode_t *vp; 631 #endif 632 uint64_t mode; 633 uint64_t parent; 634 sa_bulk_attr_t bulk[9]; 635 int count = 0; 636 int error; 637 638 zp = kmem_cache_alloc(znode_cache, KM_SLEEP); 639 640 #ifndef __NetBSD__ 641 KASSERT(curthread->td_vp_reserv > 0, 642 ("zfs_znode_alloc: getnewvnode without any vnodes reserved")); 643 error = getnewvnode("zfs", zfsvfs->z_parent->z_vfs, &zfs_vnodeops, &vp); 644 if (error != 0) { 645 kmem_cache_free(znode_cache, zp); 646 return (NULL); 647 } 648 #endif 649 zp->z_vnode = vp; 650 vp->v_data = zp; 651 652 ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs)); 653 zp->z_moved = 0; 654 655 /* 656 * Defer setting z_zfsvfs until the znode is ready to be a candidate for 657 * the zfs_znode_move() callback. 658 */ 659 zp->z_sa_hdl = NULL; 660 zp->z_unlinked = 0; 661 zp->z_atime_dirty = 0; 662 zp->z_mapcnt = 0; 663 zp->z_id = db->db_object; 664 zp->z_blksz = blksz; 665 zp->z_seq = 0x7A4653; 666 zp->z_sync_cnt = 0; 667 668 #ifdef __NetBSD__ 669 vp->v_op = zfs_vnodeop_p; 670 vp->v_tag = VT_ZFS; 671 zp->z_lockf = NULL; 672 #endif 673 674 vp = ZTOV(zp); 675 676 zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl); 677 678 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8); 679 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &zp->z_gen, 8); 680 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, 681 &zp->z_size, 8); 682 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, 683 &zp->z_links, 8); 684 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, 685 &zp->z_pflags, 8); 686 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8); 687 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, 688 &zp->z_atime, 16); 689 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, 690 &zp->z_uid, 8); 691 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, 692 &zp->z_gid, 8); 693 694 if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) { 695 if (hdl == NULL) 696 sa_handle_destroy(zp->z_sa_hdl); 697 #ifndef __NetBSD__ 698 zfs_vnode_forget(vp); 699 #endif 700 zp->z_vnode = NULL; 701 kmem_cache_free(znode_cache, zp); 702 return (NULL); 703 } 704 705 zp->z_mode = mode; 706 707 vp->v_type = IFTOVT((mode_t)mode); 708 709 switch (vp->v_type) { 710 case VDIR: 711 zp->z_zn_prefetch = B_TRUE; /* z_prefetch default is enabled */ 712 break; 713 #if defined(illumos) || defined(__NetBSD__) 714 case VBLK: 715 case VCHR: 716 { 717 uint64_t rdev; 718 VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(zfsvfs), 719 &rdev, sizeof (rdev)) == 0); 720 721 #ifdef illumos 722 vp->v_rdev = zfs_cmpldev(rdev); 723 #else 724 vp->v_op = zfs_specop_p; 725 spec_node_init(vp, zfs_cmpldev(rdev)); 726 #endif 727 } 728 break; 729 #endif 730 case VFIFO: 731 #ifdef __NetBSD__ 732 vp->v_op = zfs_fifoop_p; 733 break; 734 #else /* __NetBSD__ */ 735 #ifdef illumos 736 case VSOCK: 737 case VDOOR: 738 #endif 739 vp->v_op = &zfs_fifoops; 740 break; 741 case VREG: 742 if (parent == zfsvfs->z_shares_dir) { 743 ASSERT(zp->z_uid == 0 && zp->z_gid == 0); 744 vp->v_op = &zfs_shareops; 745 } 746 break; 747 #ifdef illumos 748 case VLNK: 749 vn_setops(vp, zfs_symvnodeops); 750 break; 751 default: 752 vn_setops(vp, zfs_evnodeops); 753 break; 754 #endif 755 #endif /* __NetBSD__ */ 756 } 757 758 #ifdef __NetBSD__ 759 extern const struct genfs_ops zfs_genfsops; 760 genfs_node_init(vp, &zfs_genfsops); 761 uvm_vnp_setsize(vp, zp->z_size); 762 #endif 763 764 mutex_enter(&zfsvfs->z_znodes_lock); 765 list_insert_tail(&zfsvfs->z_all_znodes, zp); 766 membar_producer(); 767 /* 768 * Everything else must be valid before assigning z_zfsvfs makes the 769 * znode eligible for zfs_znode_move(). 770 */ 771 zp->z_zfsvfs = zfsvfs; 772 mutex_exit(&zfsvfs->z_znodes_lock); 773 774 #ifndef __NetBSD__ 775 /* 776 * Acquire vnode lock before making it available to the world. 777 */ 778 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 779 VN_LOCK_AREC(vp); 780 if (vp->v_type != VFIFO) 781 VN_LOCK_ASHARE(vp); 782 #endif 783 784 #if defined(illumos) || defined(__NetBSD__) 785 VFS_HOLD(zfsvfs->z_vfs); 786 #endif 787 return (zp); 788 } 789 790 static uint64_t empty_xattr; 791 static uint64_t pad[4]; 792 static zfs_acl_phys_t acl_phys; 793 /* 794 * Create a new DMU object to hold a zfs znode. 795 * 796 * IN: dzp - parent directory for new znode 797 * vap - file attributes for new znode 798 * tx - dmu transaction id for zap operations 799 * cr - credentials of caller 800 * flag - flags: 801 * IS_ROOT_NODE - new object will be root 802 * IS_XATTR - new object is an attribute 803 * bonuslen - length of bonus buffer 804 * setaclp - File/Dir initial ACL 805 * fuidp - Tracks fuid allocation. 806 * 807 * OUT: zpp - allocated znode 808 * 809 */ 810 #ifdef __NetBSD__ 811 struct zfs_newvnode_args { 812 dmu_tx_t *tx; 813 uint_t flag; 814 zfs_acl_ids_t *acl_ids; 815 }; 816 817 static void 818 zfs_mknode1(znode_t *, vattr_t *, dmu_tx_t *, cred_t *, 819 uint_t, znode_t **, zfs_acl_ids_t *, vnode_t *); 820 821 int 822 zfs_loadvnode(struct mount *mp, struct vnode *vp, 823 const void *key, size_t key_len, const void **new_key) 824 { 825 int err, blksz; 826 uint64_t obj_num; 827 zfsvfs_t *zfsvfs; 828 dmu_buf_t *db; 829 dmu_object_info_t doi; 830 dmu_object_type_t obj_type; 831 sa_handle_t *hdl; 832 znode_t *zp; 833 834 if (key_len != sizeof(obj_num)) 835 return zfsctl_loadvnode(mp, vp, key, key_len, new_key); 836 837 memcpy(&obj_num, key, key_len); 838 839 zfsvfs = mp->mnt_data; 840 841 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num); 842 843 err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db); 844 if (err) { 845 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 846 return (SET_ERROR(err)); 847 } 848 849 dmu_object_info_from_db(db, &doi); 850 if (doi.doi_bonus_type != DMU_OT_SA && 851 (doi.doi_bonus_type != DMU_OT_ZNODE || 852 (doi.doi_bonus_type == DMU_OT_ZNODE && 853 doi.doi_bonus_size < sizeof (znode_phys_t)))) { 854 sa_buf_rele(db, NULL); 855 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 856 return (SET_ERROR(EINVAL)); 857 } 858 blksz = doi.doi_data_block_size; 859 obj_type = doi.doi_bonus_type; 860 hdl = dmu_buf_get_user(db); 861 862 if (hdl != NULL) { 863 sa_buf_rele(db, NULL); 864 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 865 return (SET_ERROR(ENOENT)); 866 } 867 868 zp = zfs_znode_alloc(zfsvfs, db, blksz, obj_type, hdl, vp); 869 if (zp == NULL) { 870 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 871 return (SET_ERROR(ENOENT)); 872 } 873 ASSERT(zp == VTOZ(vp)); 874 875 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 876 877 *new_key = &zp->z_id; 878 879 return 0; 880 } 881 882 int 883 zfs_newvnode(struct mount *mp, vnode_t *dvp, vnode_t *vp, vattr_t *vap, 884 cred_t *cr, void *extra, size_t *key_len, const void **new_key) 885 { 886 struct zfs_newvnode_args *args = extra; 887 znode_t *zp, *dzp = VTOZ(dvp); 888 dmu_tx_t *tx = args->tx; 889 uint_t flag = args->flag; 890 zfs_acl_ids_t *acl_ids = args->acl_ids; 891 892 zfs_mknode1(dzp, vap, tx, cr, flag, &zp, acl_ids, vp); 893 ASSERT(zp == VTOZ(vp)); 894 *key_len = sizeof(zp->z_id); 895 *new_key = &zp->z_id; 896 897 return 0; 898 } 899 900 void 901 zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, 902 uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids) 903 { 904 vnode_t *vp, *dvp = ZTOV(dzp); 905 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 906 struct zfs_newvnode_args args = { tx, flag, acl_ids }; 907 908 if (flag & IS_ROOT_NODE) 909 return zfs_mknode1(dzp, vap, tx, cr, flag, zpp, acl_ids, NULL); 910 911 VERIFY(vcache_new(zfsvfs->z_vfs, dvp, vap, cr, &args, &vp) == 0); 912 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 913 *zpp = VTOZ(vp); 914 } 915 916 static void 917 zfs_mknode1(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, 918 uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids, vnode_t *vp) 919 #else 920 void 921 zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, 922 uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids) 923 #endif 924 { 925 uint64_t crtime[2], atime[2], mtime[2], ctime[2]; 926 uint64_t mode, size, links, parent, pflags; 927 uint64_t dzp_pflags = 0; 928 uint64_t rdev = 0; 929 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 930 dmu_buf_t *db; 931 timestruc_t now; 932 uint64_t gen, obj; 933 int err; 934 int bonuslen; 935 sa_handle_t *sa_hdl; 936 dmu_object_type_t obj_type; 937 sa_bulk_attr_t sa_attrs[ZPL_END]; 938 int cnt = 0; 939 zfs_acl_locator_cb_t locate = { 0 }; 940 941 ASSERT(vap && (vap->va_mask & (AT_TYPE|AT_MODE)) == (AT_TYPE|AT_MODE)); 942 943 if (zfsvfs->z_replay) { 944 obj = vap->va_nodeid; 945 now = vap->va_ctime; /* see zfs_replay_create() */ 946 gen = vap->va_nblocks; /* ditto */ 947 } else { 948 obj = 0; 949 vfs_timestamp(&now); 950 gen = dmu_tx_get_txg(tx); 951 } 952 953 obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE; 954 bonuslen = (obj_type == DMU_OT_SA) ? 955 DN_MAX_BONUSLEN : ZFS_OLD_ZNODE_PHYS_SIZE; 956 957 /* 958 * Create a new DMU object. 959 */ 960 /* 961 * There's currently no mechanism for pre-reading the blocks that will 962 * be needed to allocate a new object, so we accept the small chance 963 * that there will be an i/o error and we will fail one of the 964 * assertions below. 965 */ 966 if (vap->va_type == VDIR) { 967 if (zfsvfs->z_replay) { 968 VERIFY0(zap_create_claim_norm(zfsvfs->z_os, obj, 969 zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS, 970 obj_type, bonuslen, tx)); 971 } else { 972 obj = zap_create_norm(zfsvfs->z_os, 973 zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS, 974 obj_type, bonuslen, tx); 975 } 976 } else { 977 if (zfsvfs->z_replay) { 978 VERIFY0(dmu_object_claim(zfsvfs->z_os, obj, 979 DMU_OT_PLAIN_FILE_CONTENTS, 0, 980 obj_type, bonuslen, tx)); 981 } else { 982 obj = dmu_object_alloc(zfsvfs->z_os, 983 DMU_OT_PLAIN_FILE_CONTENTS, 0, 984 obj_type, bonuslen, tx); 985 } 986 } 987 988 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj); 989 VERIFY(0 == sa_buf_hold(zfsvfs->z_os, obj, NULL, &db)); 990 991 /* 992 * If this is the root, fix up the half-initialized parent pointer 993 * to reference the just-allocated physical data area. 994 */ 995 if (flag & IS_ROOT_NODE) { 996 dzp->z_id = obj; 997 } else { 998 dzp_pflags = dzp->z_pflags; 999 } 1000 1001 /* 1002 * If parent is an xattr, so am I. 1003 */ 1004 if (dzp_pflags & ZFS_XATTR) { 1005 flag |= IS_XATTR; 1006 } 1007 1008 if (zfsvfs->z_use_fuids) 1009 pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED; 1010 else 1011 pflags = 0; 1012 1013 if (vap->va_type == VDIR) { 1014 size = 2; /* contents ("." and "..") */ 1015 links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1; 1016 } else { 1017 size = links = 0; 1018 } 1019 1020 if (vap->va_type == VBLK || vap->va_type == VCHR) { 1021 rdev = zfs_expldev(vap->va_rdev); 1022 } 1023 1024 parent = dzp->z_id; 1025 mode = acl_ids->z_mode; 1026 if (flag & IS_XATTR) 1027 pflags |= ZFS_XATTR; 1028 1029 /* 1030 * No execs denied will be deterimed when zfs_mode_compute() is called. 1031 */ 1032 pflags |= acl_ids->z_aclp->z_hints & 1033 (ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT| 1034 ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED); 1035 1036 ZFS_TIME_ENCODE(&now, crtime); 1037 ZFS_TIME_ENCODE(&now, ctime); 1038 1039 if (vap->va_mask & AT_ATIME) { 1040 ZFS_TIME_ENCODE(&vap->va_atime, atime); 1041 } else { 1042 ZFS_TIME_ENCODE(&now, atime); 1043 } 1044 1045 if (vap->va_mask & AT_MTIME) { 1046 ZFS_TIME_ENCODE(&vap->va_mtime, mtime); 1047 } else { 1048 ZFS_TIME_ENCODE(&now, mtime); 1049 } 1050 1051 /* Now add in all of the "SA" attributes */ 1052 VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED, 1053 &sa_hdl)); 1054 1055 /* 1056 * Setup the array of attributes to be replaced/set on the new file 1057 * 1058 * order for DMU_OT_ZNODE is critical since it needs to be constructed 1059 * in the old znode_phys_t format. Don't change this ordering 1060 */ 1061 1062 if (obj_type == DMU_OT_ZNODE) { 1063 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs), 1064 NULL, &atime, 16); 1065 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs), 1066 NULL, &mtime, 16); 1067 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs), 1068 NULL, &ctime, 16); 1069 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs), 1070 NULL, &crtime, 16); 1071 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs), 1072 NULL, &gen, 8); 1073 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs), 1074 NULL, &mode, 8); 1075 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs), 1076 NULL, &size, 8); 1077 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs), 1078 NULL, &parent, 8); 1079 } else { 1080 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs), 1081 NULL, &mode, 8); 1082 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs), 1083 NULL, &size, 8); 1084 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs), 1085 NULL, &gen, 8); 1086 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL, 1087 &acl_ids->z_fuid, 8); 1088 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL, 1089 &acl_ids->z_fgid, 8); 1090 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs), 1091 NULL, &parent, 8); 1092 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs), 1093 NULL, &pflags, 8); 1094 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs), 1095 NULL, &atime, 16); 1096 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs), 1097 NULL, &mtime, 16); 1098 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs), 1099 NULL, &ctime, 16); 1100 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs), 1101 NULL, &crtime, 16); 1102 } 1103 1104 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8); 1105 1106 if (obj_type == DMU_OT_ZNODE) { 1107 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL, 1108 &empty_xattr, 8); 1109 } 1110 if (obj_type == DMU_OT_ZNODE || 1111 (vap->va_type == VBLK || vap->va_type == VCHR)) { 1112 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs), 1113 NULL, &rdev, 8); 1114 1115 } 1116 if (obj_type == DMU_OT_ZNODE) { 1117 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs), 1118 NULL, &pflags, 8); 1119 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL, 1120 &acl_ids->z_fuid, 8); 1121 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL, 1122 &acl_ids->z_fgid, 8); 1123 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad, 1124 sizeof (uint64_t) * 4); 1125 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL, 1126 &acl_phys, sizeof (zfs_acl_phys_t)); 1127 } else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) { 1128 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL, 1129 &acl_ids->z_aclp->z_acl_count, 8); 1130 locate.cb_aclp = acl_ids->z_aclp; 1131 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs), 1132 zfs_acl_data_locator, &locate, 1133 acl_ids->z_aclp->z_acl_bytes); 1134 mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags, 1135 acl_ids->z_fuid, acl_ids->z_fgid); 1136 } 1137 1138 VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0); 1139 1140 if (!(flag & IS_ROOT_NODE)) { 1141 #ifdef __NetBSD__ 1142 *zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl, vp); 1143 #else 1144 *zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl); 1145 #endif 1146 ASSERT(*zpp != NULL); 1147 } else { 1148 /* 1149 * If we are creating the root node, the "parent" we 1150 * passed in is the znode for the root. 1151 */ 1152 *zpp = dzp; 1153 1154 (*zpp)->z_sa_hdl = sa_hdl; 1155 } 1156 1157 (*zpp)->z_pflags = pflags; 1158 (*zpp)->z_mode = mode; 1159 1160 if (vap->va_mask & AT_XVATTR) 1161 zfs_xvattr_set(*zpp, (xvattr_t *)vap, tx); 1162 1163 if (obj_type == DMU_OT_ZNODE || 1164 acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) { 1165 VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx)); 1166 } 1167 #ifndef __NetBSD__ 1168 if (!(flag & IS_ROOT_NODE)) { 1169 vnode_t *vp; 1170 1171 vp = ZTOV(*zpp); 1172 vp->v_vflag |= VV_FORCEINSMQ; 1173 err = insmntque(vp, zfsvfs->z_vfs); 1174 vp->v_vflag &= ~VV_FORCEINSMQ; 1175 KASSERT(err == 0, ("insmntque() failed: error %d", err)); 1176 } 1177 #endif 1178 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj); 1179 } 1180 1181 /* 1182 * Update in-core attributes. It is assumed the caller will be doing an 1183 * sa_bulk_update to push the changes out. 1184 */ 1185 void 1186 zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx) 1187 { 1188 xoptattr_t *xoap; 1189 1190 xoap = xva_getxoptattr(xvap); 1191 ASSERT(xoap); 1192 1193 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) { 1194 uint64_t times[2]; 1195 ZFS_TIME_ENCODE(&xoap->xoa_createtime, times); 1196 (void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(zp->z_zfsvfs), 1197 ×, sizeof (times), tx); 1198 XVA_SET_RTN(xvap, XAT_CREATETIME); 1199 } 1200 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) { 1201 ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly, 1202 zp->z_pflags, tx); 1203 XVA_SET_RTN(xvap, XAT_READONLY); 1204 } 1205 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) { 1206 ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden, 1207 zp->z_pflags, tx); 1208 XVA_SET_RTN(xvap, XAT_HIDDEN); 1209 } 1210 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) { 1211 ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system, 1212 zp->z_pflags, tx); 1213 XVA_SET_RTN(xvap, XAT_SYSTEM); 1214 } 1215 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) { 1216 ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive, 1217 zp->z_pflags, tx); 1218 XVA_SET_RTN(xvap, XAT_ARCHIVE); 1219 } 1220 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) { 1221 ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable, 1222 zp->z_pflags, tx); 1223 XVA_SET_RTN(xvap, XAT_IMMUTABLE); 1224 } 1225 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) { 1226 ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink, 1227 zp->z_pflags, tx); 1228 XVA_SET_RTN(xvap, XAT_NOUNLINK); 1229 } 1230 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) { 1231 ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly, 1232 zp->z_pflags, tx); 1233 XVA_SET_RTN(xvap, XAT_APPENDONLY); 1234 } 1235 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) { 1236 ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump, 1237 zp->z_pflags, tx); 1238 XVA_SET_RTN(xvap, XAT_NODUMP); 1239 } 1240 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) { 1241 ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque, 1242 zp->z_pflags, tx); 1243 XVA_SET_RTN(xvap, XAT_OPAQUE); 1244 } 1245 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) { 1246 ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED, 1247 xoap->xoa_av_quarantined, zp->z_pflags, tx); 1248 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED); 1249 } 1250 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) { 1251 ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified, 1252 zp->z_pflags, tx); 1253 XVA_SET_RTN(xvap, XAT_AV_MODIFIED); 1254 } 1255 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) { 1256 zfs_sa_set_scanstamp(zp, xvap, tx); 1257 XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP); 1258 } 1259 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) { 1260 ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse, 1261 zp->z_pflags, tx); 1262 XVA_SET_RTN(xvap, XAT_REPARSE); 1263 } 1264 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) { 1265 ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline, 1266 zp->z_pflags, tx); 1267 XVA_SET_RTN(xvap, XAT_OFFLINE); 1268 } 1269 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) { 1270 ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse, 1271 zp->z_pflags, tx); 1272 XVA_SET_RTN(xvap, XAT_SPARSE); 1273 } 1274 } 1275 1276 #ifdef __NetBSD__ 1277 1278 int 1279 zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp) 1280 { 1281 int error; 1282 vnode_t *vp; 1283 1284 error = vcache_get(zfsvfs->z_vfs, &obj_num, sizeof(obj_num), &vp); 1285 if (error == 0) 1286 *zpp = VTOZ(vp); 1287 1288 return error; 1289 } 1290 1291 /* 1292 * Get a known cached znode, to be used from zil_commit()->zfs_get_data() 1293 * to resolve log entries. Doesn't take a reference, will never fail and 1294 * depends on zfs_vnops.c::zfs_netbsd_reclaim() running a zil_commit() 1295 * before the znode gets freed. 1296 */ 1297 int 1298 zfs_zget_cleaner(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp) 1299 { 1300 dmu_buf_t *db; 1301 sa_handle_t *hdl; 1302 dmu_object_info_t doi; 1303 znode_t *zp; 1304 1305 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num); 1306 1307 VERIFY(0 == sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db)); 1308 1309 dmu_object_info_from_db(db, &doi); 1310 ASSERT(doi.doi_bonus_type == DMU_OT_SA || 1311 (doi.doi_bonus_type == DMU_OT_ZNODE && 1312 doi.doi_bonus_size >= sizeof (znode_phys_t))); 1313 1314 hdl = dmu_buf_get_user(db); 1315 ASSERT3P(hdl, !=, NULL); 1316 1317 zp = sa_get_userdata(hdl); 1318 ASSERT3U(zp->z_id, ==, obj_num); 1319 1320 sa_buf_rele(db, NULL); 1321 1322 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1323 1324 *zpp = zp; 1325 return (0); 1326 } 1327 1328 #else /* __NetBSD__ */ 1329 1330 int 1331 zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp) 1332 { 1333 dmu_object_info_t doi; 1334 dmu_buf_t *db; 1335 znode_t *zp; 1336 vnode_t *vp; 1337 sa_handle_t *hdl; 1338 struct thread *td; 1339 int locked; 1340 int err; 1341 1342 td = curthread; 1343 getnewvnode_reserve(1); 1344 again: 1345 *zpp = NULL; 1346 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num); 1347 1348 err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db); 1349 if (err) { 1350 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1351 getnewvnode_drop_reserve(); 1352 return (err); 1353 } 1354 1355 dmu_object_info_from_db(db, &doi); 1356 if (doi.doi_bonus_type != DMU_OT_SA && 1357 (doi.doi_bonus_type != DMU_OT_ZNODE || 1358 (doi.doi_bonus_type == DMU_OT_ZNODE && 1359 doi.doi_bonus_size < sizeof (znode_phys_t)))) { 1360 sa_buf_rele(db, NULL); 1361 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1362 #ifdef __FreeBSD__ 1363 getnewvnode_drop_reserve(); 1364 #endif 1365 return (SET_ERROR(EINVAL)); 1366 } 1367 1368 hdl = dmu_buf_get_user(db); 1369 if (hdl != NULL) { 1370 zp = sa_get_userdata(hdl); 1371 1372 /* 1373 * Since "SA" does immediate eviction we 1374 * should never find a sa handle that doesn't 1375 * know about the znode. 1376 */ 1377 ASSERT3P(zp, !=, NULL); 1378 ASSERT3U(zp->z_id, ==, obj_num); 1379 *zpp = zp; 1380 vp = ZTOV(zp); 1381 1382 /* Don't let the vnode disappear after ZFS_OBJ_HOLD_EXIT. */ 1383 VN_HOLD(vp); 1384 1385 sa_buf_rele(db, NULL); 1386 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1387 1388 locked = VOP_ISLOCKED(vp); 1389 VI_LOCK(vp); 1390 if ((vp->v_iflag & VI_DOOMED) != 0 && 1391 locked != LK_EXCLUSIVE) { 1392 /* 1393 * The vnode is doomed and this thread doesn't 1394 * hold the exclusive lock on it, so the vnode 1395 * must be being reclaimed by another thread. 1396 * Otherwise the doomed vnode is being reclaimed 1397 * by this thread and zfs_zget is called from 1398 * ZIL internals. 1399 */ 1400 VI_UNLOCK(vp); 1401 1402 /* 1403 * XXX vrele() locks the vnode when the last reference 1404 * is dropped. Although in this case the vnode is 1405 * doomed / dead and so no inactivation is required, 1406 * the vnode lock is still acquired. That could result 1407 * in a LOR with z_teardown_lock if another thread holds 1408 * the vnode's lock and tries to take z_teardown_lock. 1409 * But that is only possible if the other thread peforms 1410 * a ZFS vnode operation on the vnode. That either 1411 * should not happen if the vnode is dead or the thread 1412 * should also have a refrence to the vnode and thus 1413 * our reference is not last. 1414 */ 1415 VN_RELE(vp); 1416 goto again; 1417 } 1418 VI_UNLOCK(vp); 1419 getnewvnode_drop_reserve(); 1420 return (0); 1421 } 1422 1423 /* 1424 * Not found create new znode/vnode 1425 * but only if file exists. 1426 * 1427 * There is a small window where zfs_vget() could 1428 * find this object while a file create is still in 1429 * progress. This is checked for in zfs_znode_alloc() 1430 * 1431 * if zfs_znode_alloc() fails it will drop the hold on the 1432 * bonus buffer. 1433 */ 1434 zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size, 1435 doi.doi_bonus_type, NULL); 1436 if (zp == NULL) { 1437 err = SET_ERROR(ENOENT); 1438 } else { 1439 *zpp = zp; 1440 } 1441 if (err == 0) { 1442 vnode_t *vp = ZTOV(zp); 1443 1444 err = insmntque(vp, zfsvfs->z_vfs); 1445 if (err == 0) { 1446 vp->v_hash = obj_num; 1447 VOP_UNLOCK(vp, 0); 1448 } else { 1449 zp->z_vnode = NULL; 1450 zfs_znode_dmu_fini(zp); 1451 zfs_znode_free(zp); 1452 *zpp = NULL; 1453 } 1454 } 1455 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1456 getnewvnode_drop_reserve(); 1457 return (err); 1458 } 1459 1460 #endif /* __NetBSD__ */ 1461 1462 int 1463 zfs_rezget(znode_t *zp) 1464 { 1465 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1466 dmu_object_info_t doi; 1467 dmu_buf_t *db; 1468 vnode_t *vp; 1469 uint64_t obj_num = zp->z_id; 1470 uint64_t mode, size; 1471 sa_bulk_attr_t bulk[8]; 1472 int err; 1473 int count = 0; 1474 uint64_t gen; 1475 1476 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num); 1477 1478 mutex_enter(&zp->z_acl_lock); 1479 if (zp->z_acl_cached) { 1480 zfs_acl_free(zp->z_acl_cached); 1481 zp->z_acl_cached = NULL; 1482 } 1483 1484 mutex_exit(&zp->z_acl_lock); 1485 ASSERT(zp->z_sa_hdl == NULL); 1486 err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db); 1487 if (err) { 1488 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1489 return (err); 1490 } 1491 1492 dmu_object_info_from_db(db, &doi); 1493 if (doi.doi_bonus_type != DMU_OT_SA && 1494 (doi.doi_bonus_type != DMU_OT_ZNODE || 1495 (doi.doi_bonus_type == DMU_OT_ZNODE && 1496 doi.doi_bonus_size < sizeof (znode_phys_t)))) { 1497 sa_buf_rele(db, NULL); 1498 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1499 return (SET_ERROR(EINVAL)); 1500 } 1501 1502 zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL); 1503 size = zp->z_size; 1504 1505 /* reload cached values */ 1506 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, 1507 &gen, sizeof (gen)); 1508 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, 1509 &zp->z_size, sizeof (zp->z_size)); 1510 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, 1511 &zp->z_links, sizeof (zp->z_links)); 1512 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, 1513 &zp->z_pflags, sizeof (zp->z_pflags)); 1514 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, 1515 &zp->z_atime, sizeof (zp->z_atime)); 1516 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, 1517 &zp->z_uid, sizeof (zp->z_uid)); 1518 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, 1519 &zp->z_gid, sizeof (zp->z_gid)); 1520 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, 1521 &mode, sizeof (mode)); 1522 1523 if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) { 1524 zfs_znode_dmu_fini(zp); 1525 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1526 return (SET_ERROR(EIO)); 1527 } 1528 1529 zp->z_mode = mode; 1530 1531 if (gen != zp->z_gen) { 1532 zfs_znode_dmu_fini(zp); 1533 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1534 return (SET_ERROR(EIO)); 1535 } 1536 1537 /* 1538 * It is highly improbable but still quite possible that two 1539 * objects in different datasets are created with the same 1540 * object numbers and in transaction groups with the same 1541 * numbers. znodes corresponding to those objects would 1542 * have the same z_id and z_gen, but their other attributes 1543 * may be different. 1544 * zfs recv -F may replace one of such objects with the other. 1545 * As a result file properties recorded in the replaced 1546 * object's vnode may no longer match the received object's 1547 * properties. At present the only cached property is the 1548 * files type recorded in v_type. 1549 * So, handle this case by leaving the old vnode and znode 1550 * disassociated from the actual object. A new vnode and a 1551 * znode will be created if the object is accessed 1552 * (e.g. via a look-up). The old vnode and znode will be 1553 * recycled when the last vnode reference is dropped. 1554 */ 1555 vp = ZTOV(zp); 1556 if (vp->v_type != IFTOVT((mode_t)zp->z_mode)) { 1557 zfs_znode_dmu_fini(zp); 1558 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1559 return (EIO); 1560 } 1561 1562 zp->z_unlinked = (zp->z_links == 0); 1563 zp->z_blksz = doi.doi_data_block_size; 1564 #ifdef __NetBSD__ 1565 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 1566 (void)VOP_PUTPAGES(vp, 0, 0, PGO_ALLPAGES|PGO_FREE|PGO_SYNCIO); 1567 #else 1568 vn_pages_remove(vp, 0, 0); 1569 #endif 1570 if (zp->z_size != size) 1571 vnode_pager_setsize(vp, zp->z_size); 1572 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1573 1574 return (0); 1575 } 1576 1577 void 1578 zfs_znode_delete(znode_t *zp, dmu_tx_t *tx) 1579 { 1580 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1581 objset_t *os = zfsvfs->z_os; 1582 uint64_t obj = zp->z_id; 1583 uint64_t acl_obj = zfs_external_acl(zp); 1584 1585 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj); 1586 if (acl_obj) { 1587 VERIFY(!zp->z_is_sa); 1588 VERIFY(0 == dmu_object_free(os, acl_obj, tx)); 1589 } 1590 VERIFY(0 == dmu_object_free(os, obj, tx)); 1591 zfs_znode_dmu_fini(zp); 1592 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj); 1593 zfs_znode_free(zp); 1594 } 1595 1596 void 1597 zfs_zinactive(znode_t *zp) 1598 { 1599 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1600 uint64_t z_id = zp->z_id; 1601 1602 ASSERT(zp->z_sa_hdl); 1603 1604 /* 1605 * Don't allow a zfs_zget() while were trying to release this znode 1606 */ 1607 ZFS_OBJ_HOLD_ENTER(zfsvfs, z_id); 1608 1609 /* 1610 * If this was the last reference to a file with no links, 1611 * remove the file from the file system. 1612 */ 1613 if (zp->z_unlinked) { 1614 ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id); 1615 zfs_rmnode(zp); 1616 return; 1617 } 1618 1619 zfs_znode_dmu_fini(zp); 1620 ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id); 1621 zfs_znode_free(zp); 1622 } 1623 1624 void 1625 zfs_znode_free(znode_t *zp) 1626 { 1627 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1628 1629 #ifdef __NetBSD__ 1630 struct vnode *vp = ZTOV(zp); 1631 1632 genfs_node_destroy(vp); 1633 1634 /* 1635 * Interlock with zfs_sync(). 1636 */ 1637 mutex_enter(vp->v_interlock); 1638 vp->v_data = NULL; 1639 mutex_exit(vp->v_interlock); 1640 #endif 1641 1642 ASSERT(zp->z_sa_hdl == NULL); 1643 zp->z_vnode = NULL; 1644 mutex_enter(&zfsvfs->z_znodes_lock); 1645 POINTER_INVALIDATE(&zp->z_zfsvfs); 1646 list_remove(&zfsvfs->z_all_znodes, zp); 1647 mutex_exit(&zfsvfs->z_znodes_lock); 1648 1649 if (zp->z_acl_cached) { 1650 zfs_acl_free(zp->z_acl_cached); 1651 zp->z_acl_cached = NULL; 1652 } 1653 1654 kmem_cache_free(znode_cache, zp); 1655 1656 #ifdef illumos 1657 VFS_RELE(zfsvfs->z_vfs); 1658 #endif 1659 } 1660 1661 void 1662 zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2], 1663 uint64_t ctime[2], boolean_t have_tx) 1664 { 1665 timestruc_t now; 1666 1667 vfs_timestamp(&now); 1668 1669 if (have_tx) { /* will sa_bulk_update happen really soon? */ 1670 zp->z_atime_dirty = 0; 1671 zp->z_seq++; 1672 } else { 1673 zp->z_atime_dirty = 1; 1674 } 1675 1676 if (flag & AT_ATIME) { 1677 ZFS_TIME_ENCODE(&now, zp->z_atime); 1678 } 1679 1680 if (flag & AT_MTIME) { 1681 ZFS_TIME_ENCODE(&now, mtime); 1682 if (zp->z_zfsvfs->z_use_fuids) { 1683 zp->z_pflags |= (ZFS_ARCHIVE | 1684 ZFS_AV_MODIFIED); 1685 } 1686 } 1687 1688 if (flag & AT_CTIME) { 1689 ZFS_TIME_ENCODE(&now, ctime); 1690 if (zp->z_zfsvfs->z_use_fuids) 1691 zp->z_pflags |= ZFS_ARCHIVE; 1692 } 1693 } 1694 1695 /* 1696 * Grow the block size for a file. 1697 * 1698 * IN: zp - znode of file to free data in. 1699 * size - requested block size 1700 * tx - open transaction. 1701 * 1702 * NOTE: this function assumes that the znode is write locked. 1703 */ 1704 void 1705 zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx) 1706 { 1707 int error; 1708 u_longlong_t dummy; 1709 1710 if (size <= zp->z_blksz) 1711 return; 1712 /* 1713 * If the file size is already greater than the current blocksize, 1714 * we will not grow. If there is more than one block in a file, 1715 * the blocksize cannot change. 1716 */ 1717 if (zp->z_blksz && zp->z_size > zp->z_blksz) 1718 return; 1719 1720 error = dmu_object_set_blocksize(zp->z_zfsvfs->z_os, zp->z_id, 1721 size, 0, tx); 1722 1723 if (error == ENOTSUP) 1724 return; 1725 ASSERT0(error); 1726 1727 /* What blocksize did we actually get? */ 1728 dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy); 1729 } 1730 1731 #ifdef illumos 1732 /* 1733 * This is a dummy interface used when pvn_vplist_dirty() should *not* 1734 * be calling back into the fs for a putpage(). E.g.: when truncating 1735 * a file, the pages being "thrown away* don't need to be written out. 1736 */ 1737 /* ARGSUSED */ 1738 static int 1739 zfs_no_putpage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp, 1740 int flags, cred_t *cr) 1741 { 1742 ASSERT(0); 1743 return (0); 1744 } 1745 #endif 1746 1747 /* 1748 * Increase the file length 1749 * 1750 * IN: zp - znode of file to free data in. 1751 * end - new end-of-file 1752 * 1753 * RETURN: 0 on success, error code on failure 1754 */ 1755 static int 1756 zfs_extend(znode_t *zp, uint64_t end) 1757 { 1758 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1759 dmu_tx_t *tx; 1760 rl_t *rl; 1761 uint64_t newblksz; 1762 int error; 1763 1764 /* 1765 * We will change zp_size, lock the whole file. 1766 */ 1767 rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER); 1768 1769 /* 1770 * Nothing to do if file already at desired length. 1771 */ 1772 if (end <= zp->z_size) { 1773 zfs_range_unlock(rl); 1774 return (0); 1775 } 1776 tx = dmu_tx_create(zfsvfs->z_os); 1777 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 1778 zfs_sa_upgrade_txholds(tx, zp); 1779 if (end > zp->z_blksz && 1780 (!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) { 1781 /* 1782 * We are growing the file past the current block size. 1783 */ 1784 if (zp->z_blksz > zp->z_zfsvfs->z_max_blksz) { 1785 /* 1786 * File's blocksize is already larger than the 1787 * "recordsize" property. Only let it grow to 1788 * the next power of 2. 1789 */ 1790 ASSERT(!ISP2(zp->z_blksz)); 1791 newblksz = MIN(end, 1 << highbit64(zp->z_blksz)); 1792 } else { 1793 newblksz = MIN(end, zp->z_zfsvfs->z_max_blksz); 1794 } 1795 dmu_tx_hold_write(tx, zp->z_id, 0, newblksz); 1796 } else { 1797 newblksz = 0; 1798 } 1799 1800 error = dmu_tx_assign(tx, TXG_WAIT); 1801 if (error) { 1802 dmu_tx_abort(tx); 1803 zfs_range_unlock(rl); 1804 return (error); 1805 } 1806 1807 if (newblksz) 1808 zfs_grow_blocksize(zp, newblksz, tx); 1809 1810 zp->z_size = end; 1811 1812 VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zp->z_zfsvfs), 1813 &zp->z_size, sizeof (zp->z_size), tx)); 1814 1815 vnode_pager_setsize(ZTOV(zp), end); 1816 1817 zfs_range_unlock(rl); 1818 1819 dmu_tx_commit(tx); 1820 1821 return (0); 1822 } 1823 1824 /* 1825 * Free space in a file. 1826 * 1827 * IN: zp - znode of file to free data in. 1828 * off - start of section to free. 1829 * len - length of section to free. 1830 * 1831 * RETURN: 0 on success, error code on failure 1832 */ 1833 static int 1834 zfs_free_range(znode_t *zp, uint64_t off, uint64_t len) 1835 { 1836 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1837 rl_t *rl; 1838 int error; 1839 1840 /* 1841 * Lock the range being freed. 1842 */ 1843 rl = zfs_range_lock(zp, off, len, RL_WRITER); 1844 1845 /* 1846 * Nothing to do if file already at desired length. 1847 */ 1848 if (off >= zp->z_size) { 1849 zfs_range_unlock(rl); 1850 return (0); 1851 } 1852 1853 if (off + len > zp->z_size) 1854 len = zp->z_size - off; 1855 1856 error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len); 1857 1858 if (error == 0) { 1859 /* 1860 * In FreeBSD we cannot free block in the middle of a file, 1861 * but only at the end of a file, so this code path should 1862 * never happen. 1863 */ 1864 vnode_pager_setsize(ZTOV(zp), off); 1865 } 1866 1867 zfs_range_unlock(rl); 1868 1869 return (error); 1870 } 1871 1872 /* 1873 * Truncate a file 1874 * 1875 * IN: zp - znode of file to free data in. 1876 * end - new end-of-file. 1877 * 1878 * RETURN: 0 on success, error code on failure 1879 */ 1880 static int 1881 zfs_trunc(znode_t *zp, uint64_t end) 1882 { 1883 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1884 vnode_t *vp = ZTOV(zp); 1885 dmu_tx_t *tx; 1886 rl_t *rl; 1887 int error; 1888 sa_bulk_attr_t bulk[2]; 1889 int count = 0; 1890 1891 /* 1892 * We will change zp_size, lock the whole file. 1893 */ 1894 rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER); 1895 1896 /* 1897 * Nothing to do if file already at desired length. 1898 */ 1899 if (end >= zp->z_size) { 1900 zfs_range_unlock(rl); 1901 return (0); 1902 } 1903 1904 error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end, -1); 1905 if (error) { 1906 zfs_range_unlock(rl); 1907 return (error); 1908 } 1909 tx = dmu_tx_create(zfsvfs->z_os); 1910 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 1911 zfs_sa_upgrade_txholds(tx, zp); 1912 dmu_tx_mark_netfree(tx); 1913 error = dmu_tx_assign(tx, TXG_WAIT); 1914 if (error) { 1915 dmu_tx_abort(tx); 1916 zfs_range_unlock(rl); 1917 return (error); 1918 } 1919 1920 zp->z_size = end; 1921 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), 1922 NULL, &zp->z_size, sizeof (zp->z_size)); 1923 1924 if (end == 0) { 1925 zp->z_pflags &= ~ZFS_SPARSE; 1926 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), 1927 NULL, &zp->z_pflags, 8); 1928 } 1929 VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0); 1930 1931 dmu_tx_commit(tx); 1932 1933 /* 1934 * Clear any mapped pages in the truncated region. This has to 1935 * happen outside of the transaction to avoid the possibility of 1936 * a deadlock with someone trying to push a page that we are 1937 * about to invalidate. 1938 */ 1939 vnode_pager_setsize(vp, end); 1940 1941 zfs_range_unlock(rl); 1942 1943 return (0); 1944 } 1945 1946 /* 1947 * Free space in a file 1948 * 1949 * IN: zp - znode of file to free data in. 1950 * off - start of range 1951 * len - end of range (0 => EOF) 1952 * flag - current file open mode flags. 1953 * log - TRUE if this action should be logged 1954 * 1955 * RETURN: 0 on success, error code on failure 1956 */ 1957 int 1958 zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log) 1959 { 1960 vnode_t *vp = ZTOV(zp); 1961 dmu_tx_t *tx; 1962 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1963 zilog_t *zilog = zfsvfs->z_log; 1964 uint64_t mode; 1965 uint64_t mtime[2], ctime[2]; 1966 sa_bulk_attr_t bulk[3]; 1967 int count = 0; 1968 int error; 1969 1970 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode, 1971 sizeof (mode))) != 0) 1972 return (error); 1973 1974 if (off > zp->z_size) { 1975 error = zfs_extend(zp, off+len); 1976 if (error == 0 && log) 1977 goto log; 1978 else 1979 return (error); 1980 } 1981 1982 /* 1983 * Check for any locks in the region to be freed. 1984 */ 1985 1986 if (MANDLOCK(vp, (mode_t)mode)) { 1987 uint64_t length = (len ? len : zp->z_size - off); 1988 if (error = chklock(vp, FWRITE, off, length, flag, NULL)) 1989 return (error); 1990 } 1991 1992 if (len == 0) { 1993 error = zfs_trunc(zp, off); 1994 } else { 1995 if ((error = zfs_free_range(zp, off, len)) == 0 && 1996 off + len > zp->z_size) 1997 error = zfs_extend(zp, off+len); 1998 } 1999 if (error || !log) 2000 return (error); 2001 log: 2002 tx = dmu_tx_create(zfsvfs->z_os); 2003 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 2004 zfs_sa_upgrade_txholds(tx, zp); 2005 error = dmu_tx_assign(tx, TXG_WAIT); 2006 if (error) { 2007 dmu_tx_abort(tx); 2008 return (error); 2009 } 2010 2011 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16); 2012 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16); 2013 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), 2014 NULL, &zp->z_pflags, 8); 2015 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, B_TRUE); 2016 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); 2017 ASSERT(error == 0); 2018 2019 zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len); 2020 2021 dmu_tx_commit(tx); 2022 return (0); 2023 } 2024 2025 void 2026 zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx) 2027 { 2028 uint64_t moid, obj, sa_obj, version; 2029 uint64_t sense = ZFS_CASE_SENSITIVE; 2030 uint64_t norm = 0; 2031 nvpair_t *elem; 2032 int error; 2033 int i; 2034 znode_t *rootzp = NULL; 2035 zfsvfs_t *zfsvfs; 2036 vattr_t vattr; 2037 znode_t *zp; 2038 zfs_acl_ids_t acl_ids; 2039 2040 /* 2041 * First attempt to create master node. 2042 */ 2043 /* 2044 * In an empty objset, there are no blocks to read and thus 2045 * there can be no i/o errors (which we assert below). 2046 */ 2047 moid = MASTER_NODE_OBJ; 2048 error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE, 2049 DMU_OT_NONE, 0, tx); 2050 ASSERT(error == 0); 2051 2052 /* 2053 * Set starting attributes. 2054 */ 2055 version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os))); 2056 elem = NULL; 2057 while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) { 2058 /* For the moment we expect all zpl props to be uint64_ts */ 2059 uint64_t val; 2060 char *name; 2061 2062 ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64); 2063 VERIFY(nvpair_value_uint64(elem, &val) == 0); 2064 name = nvpair_name(elem); 2065 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) { 2066 if (val < version) 2067 version = val; 2068 } else { 2069 error = zap_update(os, moid, name, 8, 1, &val, tx); 2070 } 2071 ASSERT(error == 0); 2072 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0) 2073 norm = val; 2074 else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0) 2075 sense = val; 2076 } 2077 ASSERT(version != 0); 2078 error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx); 2079 2080 /* 2081 * Create zap object used for SA attribute registration 2082 */ 2083 2084 if (version >= ZPL_VERSION_SA) { 2085 sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE, 2086 DMU_OT_NONE, 0, tx); 2087 error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx); 2088 ASSERT(error == 0); 2089 } else { 2090 sa_obj = 0; 2091 } 2092 /* 2093 * Create a delete queue. 2094 */ 2095 obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx); 2096 2097 error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx); 2098 ASSERT(error == 0); 2099 2100 /* 2101 * Create root znode. Create minimal znode/vnode/zfsvfs 2102 * to allow zfs_mknode to work. 2103 */ 2104 VATTR_NULL(&vattr); 2105 vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE; 2106 vattr.va_type = VDIR; 2107 vattr.va_mode = S_IFDIR|0755; 2108 vattr.va_uid = crgetuid(cr); 2109 vattr.va_gid = crgetgid(cr); 2110 2111 zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP); 2112 2113 rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP); 2114 ASSERT(!POINTER_IS_VALID(rootzp->z_zfsvfs)); 2115 rootzp->z_moved = 0; 2116 rootzp->z_unlinked = 0; 2117 rootzp->z_atime_dirty = 0; 2118 rootzp->z_is_sa = USE_SA(version, os); 2119 2120 zfsvfs->z_os = os; 2121 zfsvfs->z_parent = zfsvfs; 2122 zfsvfs->z_version = version; 2123 zfsvfs->z_use_fuids = USE_FUIDS(version, os); 2124 zfsvfs->z_use_sa = USE_SA(version, os); 2125 zfsvfs->z_norm = norm; 2126 2127 error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END, 2128 &zfsvfs->z_attr_table); 2129 2130 ASSERT(error == 0); 2131 2132 /* 2133 * Fold case on file systems that are always or sometimes case 2134 * insensitive. 2135 */ 2136 if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED) 2137 zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER; 2138 2139 mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL); 2140 list_create(&zfsvfs->z_all_znodes, sizeof (znode_t), 2141 offsetof(znode_t, z_link_node)); 2142 2143 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) 2144 mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL); 2145 2146 rootzp->z_zfsvfs = zfsvfs; 2147 VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr, 2148 cr, NULL, &acl_ids)); 2149 zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids); 2150 ASSERT3P(zp, ==, rootzp); 2151 error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx); 2152 ASSERT(error == 0); 2153 zfs_acl_ids_free(&acl_ids); 2154 POINTER_INVALIDATE(&rootzp->z_zfsvfs); 2155 2156 sa_handle_destroy(rootzp->z_sa_hdl); 2157 kmem_cache_free(znode_cache, rootzp); 2158 2159 /* 2160 * Create shares directory 2161 */ 2162 2163 error = zfs_create_share_dir(zfsvfs, tx); 2164 2165 ASSERT(error == 0); 2166 2167 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) 2168 mutex_destroy(&zfsvfs->z_hold_mtx[i]); 2169 mutex_destroy(&zfsvfs->z_znodes_lock); 2170 kmem_free(zfsvfs, sizeof (zfsvfs_t)); 2171 } 2172 #endif /* _KERNEL */ 2173 2174 static int 2175 zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table) 2176 { 2177 uint64_t sa_obj = 0; 2178 int error; 2179 2180 error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj); 2181 if (error != 0 && error != ENOENT) 2182 return (error); 2183 2184 error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table); 2185 return (error); 2186 } 2187 2188 static int 2189 zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp, 2190 dmu_buf_t **db, void *tag) 2191 { 2192 dmu_object_info_t doi; 2193 int error; 2194 2195 if ((error = sa_buf_hold(osp, obj, tag, db)) != 0) 2196 return (error); 2197 2198 dmu_object_info_from_db(*db, &doi); 2199 if ((doi.doi_bonus_type != DMU_OT_SA && 2200 doi.doi_bonus_type != DMU_OT_ZNODE) || 2201 doi.doi_bonus_type == DMU_OT_ZNODE && 2202 doi.doi_bonus_size < sizeof (znode_phys_t)) { 2203 sa_buf_rele(*db, tag); 2204 return (SET_ERROR(ENOTSUP)); 2205 } 2206 2207 error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp); 2208 if (error != 0) { 2209 sa_buf_rele(*db, tag); 2210 return (error); 2211 } 2212 2213 return (0); 2214 } 2215 2216 void 2217 zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, void *tag) 2218 { 2219 sa_handle_destroy(hdl); 2220 sa_buf_rele(db, tag); 2221 } 2222 2223 /* 2224 * Given an object number, return its parent object number and whether 2225 * or not the object is an extended attribute directory. 2226 */ 2227 static int 2228 zfs_obj_to_pobj(objset_t *osp, sa_handle_t *hdl, sa_attr_type_t *sa_table, 2229 uint64_t *pobjp, int *is_xattrdir) 2230 { 2231 uint64_t parent; 2232 uint64_t pflags; 2233 uint64_t mode; 2234 uint64_t parent_mode; 2235 sa_bulk_attr_t bulk[3]; 2236 sa_handle_t *sa_hdl; 2237 dmu_buf_t *sa_db; 2238 int count = 0; 2239 int error; 2240 2241 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL, 2242 &parent, sizeof (parent)); 2243 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL, 2244 &pflags, sizeof (pflags)); 2245 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL, 2246 &mode, sizeof (mode)); 2247 2248 if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0) 2249 return (error); 2250 2251 /* 2252 * When a link is removed its parent pointer is not changed and will 2253 * be invalid. There are two cases where a link is removed but the 2254 * file stays around, when it goes to the delete queue and when there 2255 * are additional links. 2256 */ 2257 error = zfs_grab_sa_handle(osp, parent, &sa_hdl, &sa_db, FTAG); 2258 if (error != 0) 2259 return (error); 2260 2261 error = sa_lookup(sa_hdl, ZPL_MODE, &parent_mode, sizeof (parent_mode)); 2262 zfs_release_sa_handle(sa_hdl, sa_db, FTAG); 2263 if (error != 0) 2264 return (error); 2265 2266 *is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode); 2267 2268 /* 2269 * Extended attributes can be applied to files, directories, etc. 2270 * Otherwise the parent must be a directory. 2271 */ 2272 if (!*is_xattrdir && !S_ISDIR(parent_mode)) 2273 return (SET_ERROR(EINVAL)); 2274 2275 *pobjp = parent; 2276 2277 return (0); 2278 } 2279 2280 /* 2281 * Given an object number, return some zpl level statistics 2282 */ 2283 static int 2284 zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table, 2285 zfs_stat_t *sb) 2286 { 2287 sa_bulk_attr_t bulk[4]; 2288 int count = 0; 2289 2290 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL, 2291 &sb->zs_mode, sizeof (sb->zs_mode)); 2292 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL, 2293 &sb->zs_gen, sizeof (sb->zs_gen)); 2294 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL, 2295 &sb->zs_links, sizeof (sb->zs_links)); 2296 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL, 2297 &sb->zs_ctime, sizeof (sb->zs_ctime)); 2298 2299 return (sa_bulk_lookup(hdl, bulk, count)); 2300 } 2301 2302 static int 2303 zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl, 2304 sa_attr_type_t *sa_table, char *buf, int len) 2305 { 2306 sa_handle_t *sa_hdl; 2307 sa_handle_t *prevhdl = NULL; 2308 dmu_buf_t *prevdb = NULL; 2309 dmu_buf_t *sa_db = NULL; 2310 char *path = buf + len - 1; 2311 int error; 2312 2313 *path = '\0'; 2314 sa_hdl = hdl; 2315 2316 for (;;) { 2317 uint64_t pobj; 2318 char component[MAXNAMELEN + 2]; 2319 size_t complen; 2320 int is_xattrdir; 2321 2322 if (prevdb) 2323 zfs_release_sa_handle(prevhdl, prevdb, FTAG); 2324 2325 if ((error = zfs_obj_to_pobj(osp, sa_hdl, sa_table, &pobj, 2326 &is_xattrdir)) != 0) 2327 break; 2328 2329 if (pobj == obj) { 2330 if (path[0] != '/') 2331 *--path = '/'; 2332 break; 2333 } 2334 2335 component[0] = '/'; 2336 if (is_xattrdir) { 2337 (void) sprintf(component + 1, "<xattrdir>"); 2338 } else { 2339 error = zap_value_search(osp, pobj, obj, 2340 ZFS_DIRENT_OBJ(-1ULL), component + 1); 2341 if (error != 0) 2342 break; 2343 } 2344 2345 complen = strlen(component); 2346 path -= complen; 2347 ASSERT(path >= buf); 2348 bcopy(component, path, complen); 2349 obj = pobj; 2350 2351 if (sa_hdl != hdl) { 2352 prevhdl = sa_hdl; 2353 prevdb = sa_db; 2354 } 2355 error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG); 2356 if (error != 0) { 2357 sa_hdl = prevhdl; 2358 sa_db = prevdb; 2359 break; 2360 } 2361 } 2362 2363 if (sa_hdl != NULL && sa_hdl != hdl) { 2364 ASSERT(sa_db != NULL); 2365 zfs_release_sa_handle(sa_hdl, sa_db, FTAG); 2366 } 2367 2368 if (error == 0) 2369 (void) memmove(buf, path, buf + len - path); 2370 2371 return (error); 2372 } 2373 2374 int 2375 zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len) 2376 { 2377 sa_attr_type_t *sa_table; 2378 sa_handle_t *hdl; 2379 dmu_buf_t *db; 2380 int error; 2381 2382 error = zfs_sa_setup(osp, &sa_table); 2383 if (error != 0) 2384 return (error); 2385 2386 error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG); 2387 if (error != 0) 2388 return (error); 2389 2390 error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len); 2391 2392 zfs_release_sa_handle(hdl, db, FTAG); 2393 return (error); 2394 } 2395 2396 int 2397 zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb, 2398 char *buf, int len) 2399 { 2400 char *path = buf + len - 1; 2401 sa_attr_type_t *sa_table; 2402 sa_handle_t *hdl; 2403 dmu_buf_t *db; 2404 int error; 2405 2406 *path = '\0'; 2407 2408 error = zfs_sa_setup(osp, &sa_table); 2409 if (error != 0) 2410 return (error); 2411 2412 error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG); 2413 if (error != 0) 2414 return (error); 2415 2416 error = zfs_obj_to_stats_impl(hdl, sa_table, sb); 2417 if (error != 0) { 2418 zfs_release_sa_handle(hdl, db, FTAG); 2419 return (error); 2420 } 2421 2422 error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len); 2423 2424 zfs_release_sa_handle(hdl, db, FTAG); 2425 return (error); 2426 } 2427 2428 #ifdef _KERNEL 2429 int 2430 zfs_znode_parent_and_name(znode_t *zp, znode_t **dzpp, char *buf) 2431 { 2432 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2433 uint64_t parent; 2434 int is_xattrdir; 2435 int err; 2436 2437 /* Extended attributes should not be visible as regular files. */ 2438 if ((zp->z_pflags & ZFS_XATTR) != 0) 2439 return (SET_ERROR(EINVAL)); 2440 2441 err = zfs_obj_to_pobj(zfsvfs->z_os, zp->z_sa_hdl, zfsvfs->z_attr_table, 2442 &parent, &is_xattrdir); 2443 if (err != 0) 2444 return (err); 2445 ASSERT0(is_xattrdir); 2446 2447 /* No name as this is a root object. */ 2448 if (parent == zp->z_id) 2449 return (SET_ERROR(EINVAL)); 2450 2451 err = zap_value_search(zfsvfs->z_os, parent, zp->z_id, 2452 ZFS_DIRENT_OBJ(-1ULL), buf); 2453 if (err != 0) 2454 return (err); 2455 err = zfs_zget(zfsvfs, parent, dzpp); 2456 return (err); 2457 } 2458 #endif /* _KERNEL */ 2459