1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 24 * Copyright (c) 2014 Integros [integros.com] 25 */ 26 27 /* Portions Copyright 2007 Jeremy Teo */ 28 /* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */ 29 30 #ifdef _KERNEL 31 #include <sys/types.h> 32 #include <sys/param.h> 33 #include <sys/time.h> 34 #include <sys/systm.h> 35 #include <sys/sysmacros.h> 36 #include <sys/resource.h> 37 #include <sys/mntent.h> 38 #include <sys/u8_textprep.h> 39 #include <sys/dsl_dataset.h> 40 #include <sys/vfs.h> 41 #include <sys/vnode.h> 42 #include <sys/file.h> 43 #include <sys/kmem.h> 44 #include <sys/errno.h> 45 #include <sys/unistd.h> 46 #include <sys/atomic.h> 47 #include <sys/zfs_dir.h> 48 #include <sys/zfs_acl.h> 49 #include <sys/zfs_ioctl.h> 50 #include <sys/zfs_rlock.h> 51 #include <sys/zfs_fuid.h> 52 #include <sys/dnode.h> 53 #include <sys/fs/zfs.h> 54 #include <sys/kidmap.h> 55 56 #ifdef __NetBSD__ 57 #include <sys/zfs_ctldir.h> 58 #include <miscfs/specfs/specdev.h> 59 60 extern int (**zfs_vnodeop_p)(void *); 61 extern int (**zfs_fifoop_p)(void *); 62 extern int (**zfs_specop_p)(void *); 63 64 #endif 65 #endif /* _KERNEL */ 66 67 #include <sys/dmu.h> 68 #include <sys/dmu_objset.h> 69 #include <sys/refcount.h> 70 #include <sys/stat.h> 71 #include <sys/zap.h> 72 #include <sys/zfs_znode.h> 73 #include <sys/sa.h> 74 #include <sys/zfs_sa.h> 75 #include <sys/zfs_stat.h> 76 #include <sys/refcount.h> 77 78 #include "zfs_prop.h" 79 #include "zfs_comutil.h" 80 81 /* Used by fstat(1). */ 82 SYSCTL_INT(_debug_sizeof, OID_AUTO, znode, CTLFLAG_RD, 83 SYSCTL_NULL_INT_PTR, sizeof(znode_t), "sizeof(znode_t)"); 84 85 /* 86 * Define ZNODE_STATS to turn on statistic gathering. By default, it is only 87 * turned on when DEBUG is also defined. 88 */ 89 #ifdef DEBUG 90 #define ZNODE_STATS 91 #endif /* DEBUG */ 92 93 #ifdef ZNODE_STATS 94 #define ZNODE_STAT_ADD(stat) ((stat)++) 95 #else 96 #define ZNODE_STAT_ADD(stat) /* nothing */ 97 #endif /* ZNODE_STATS */ 98 99 /* 100 * Functions needed for userland (ie: libzpool) are not put under 101 * #ifdef_KERNEL; the rest of the functions have dependencies 102 * (such as VFS logic) that will not compile easily in userland. 103 */ 104 #ifdef _KERNEL 105 /* 106 * Needed to close a small window in zfs_znode_move() that allows the zfsvfs to 107 * be freed before it can be safely accessed. 108 */ 109 krwlock_t zfsvfs_lock; 110 111 static kmem_cache_t *znode_cache = NULL; 112 113 /*ARGSUSED*/ 114 static void 115 znode_evict_error(dmu_buf_t *dbuf, void *user_ptr) 116 { 117 /* 118 * We should never drop all dbuf refs without first clearing 119 * the eviction callback. 120 */ 121 panic("evicting znode %p\n", user_ptr); 122 } 123 124 extern struct vop_vector zfs_vnodeops; 125 extern struct vop_vector zfs_fifoops; 126 extern struct vop_vector zfs_shareops; 127 128 static int 129 zfs_znode_cache_constructor(void *buf, void *arg, int kmflags) 130 { 131 znode_t *zp = buf; 132 133 POINTER_INVALIDATE(&zp->z_zfsvfs); 134 135 list_link_init(&zp->z_link_node); 136 137 mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL); 138 139 mutex_init(&zp->z_range_lock, NULL, MUTEX_DEFAULT, NULL); 140 avl_create(&zp->z_range_avl, zfs_range_compare, 141 sizeof (rl_t), offsetof(rl_t, r_node)); 142 143 zp->z_acl_cached = NULL; 144 zp->z_vnode = NULL; 145 zp->z_moved = 0; 146 return (0); 147 } 148 149 /*ARGSUSED*/ 150 static void 151 zfs_znode_cache_destructor(void *buf, void *arg) 152 { 153 znode_t *zp = buf; 154 155 ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs)); 156 ASSERT(ZTOV(zp) == NULL); 157 #ifndef __NetBSD__ 158 vn_free(ZTOV(zp)); 159 #endif 160 ASSERT(!list_link_active(&zp->z_link_node)); 161 mutex_destroy(&zp->z_acl_lock); 162 avl_destroy(&zp->z_range_avl); 163 mutex_destroy(&zp->z_range_lock); 164 165 ASSERT(zp->z_acl_cached == NULL); 166 } 167 168 #ifdef ZNODE_STATS 169 static struct { 170 uint64_t zms_zfsvfs_invalid; 171 uint64_t zms_zfsvfs_recheck1; 172 uint64_t zms_zfsvfs_unmounted; 173 uint64_t zms_zfsvfs_recheck2; 174 uint64_t zms_obj_held; 175 uint64_t zms_vnode_locked; 176 uint64_t zms_not_only_dnlc; 177 } znode_move_stats; 178 #endif /* ZNODE_STATS */ 179 180 #ifdef illumos 181 static void 182 zfs_znode_move_impl(znode_t *ozp, znode_t *nzp) 183 { 184 vnode_t *vp; 185 186 /* Copy fields. */ 187 nzp->z_zfsvfs = ozp->z_zfsvfs; 188 189 /* Swap vnodes. */ 190 vp = nzp->z_vnode; 191 nzp->z_vnode = ozp->z_vnode; 192 ozp->z_vnode = vp; /* let destructor free the overwritten vnode */ 193 ZTOV(ozp)->v_data = ozp; 194 ZTOV(nzp)->v_data = nzp; 195 196 nzp->z_id = ozp->z_id; 197 ASSERT(ozp->z_dirlocks == NULL); /* znode not in use */ 198 ASSERT(avl_numnodes(&ozp->z_range_avl) == 0); 199 nzp->z_unlinked = ozp->z_unlinked; 200 nzp->z_atime_dirty = ozp->z_atime_dirty; 201 nzp->z_zn_prefetch = ozp->z_zn_prefetch; 202 nzp->z_blksz = ozp->z_blksz; 203 nzp->z_seq = ozp->z_seq; 204 nzp->z_mapcnt = ozp->z_mapcnt; 205 nzp->z_gen = ozp->z_gen; 206 nzp->z_sync_cnt = ozp->z_sync_cnt; 207 nzp->z_is_sa = ozp->z_is_sa; 208 nzp->z_sa_hdl = ozp->z_sa_hdl; 209 bcopy(ozp->z_atime, nzp->z_atime, sizeof (uint64_t) * 2); 210 nzp->z_links = ozp->z_links; 211 nzp->z_size = ozp->z_size; 212 nzp->z_pflags = ozp->z_pflags; 213 nzp->z_uid = ozp->z_uid; 214 nzp->z_gid = ozp->z_gid; 215 nzp->z_mode = ozp->z_mode; 216 217 /* 218 * Since this is just an idle znode and kmem is already dealing with 219 * memory pressure, release any cached ACL. 220 */ 221 if (ozp->z_acl_cached) { 222 zfs_acl_free(ozp->z_acl_cached); 223 ozp->z_acl_cached = NULL; 224 } 225 226 sa_set_userp(nzp->z_sa_hdl, nzp); 227 228 /* 229 * Invalidate the original znode by clearing fields that provide a 230 * pointer back to the znode. Set the low bit of the vfs pointer to 231 * ensure that zfs_znode_move() recognizes the znode as invalid in any 232 * subsequent callback. 233 */ 234 ozp->z_sa_hdl = NULL; 235 POINTER_INVALIDATE(&ozp->z_zfsvfs); 236 237 /* 238 * Mark the znode. 239 */ 240 nzp->z_moved = 1; 241 ozp->z_moved = (uint8_t)-1; 242 } 243 244 /*ARGSUSED*/ 245 static kmem_cbrc_t 246 zfs_znode_move(void *buf, void *newbuf, size_t size, void *arg) 247 { 248 znode_t *ozp = buf, *nzp = newbuf; 249 zfsvfs_t *zfsvfs; 250 vnode_t *vp; 251 252 /* 253 * The znode is on the file system's list of known znodes if the vfs 254 * pointer is valid. We set the low bit of the vfs pointer when freeing 255 * the znode to invalidate it, and the memory patterns written by kmem 256 * (baddcafe and deadbeef) set at least one of the two low bits. A newly 257 * created znode sets the vfs pointer last of all to indicate that the 258 * znode is known and in a valid state to be moved by this function. 259 */ 260 zfsvfs = ozp->z_zfsvfs; 261 if (!POINTER_IS_VALID(zfsvfs)) { 262 ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_invalid); 263 return (KMEM_CBRC_DONT_KNOW); 264 } 265 266 /* 267 * Close a small window in which it's possible that the filesystem could 268 * be unmounted and freed, and zfsvfs, though valid in the previous 269 * statement, could point to unrelated memory by the time we try to 270 * prevent the filesystem from being unmounted. 271 */ 272 rw_enter(&zfsvfs_lock, RW_WRITER); 273 if (zfsvfs != ozp->z_zfsvfs) { 274 rw_exit(&zfsvfs_lock); 275 ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_recheck1); 276 return (KMEM_CBRC_DONT_KNOW); 277 } 278 279 /* 280 * If the znode is still valid, then so is the file system. We know that 281 * no valid file system can be freed while we hold zfsvfs_lock, so we 282 * can safely ensure that the filesystem is not and will not be 283 * unmounted. The next statement is equivalent to ZFS_ENTER(). 284 */ 285 rrm_enter(&zfsvfs->z_teardown_lock, RW_READER, FTAG); 286 if (zfsvfs->z_unmounted) { 287 ZFS_EXIT(zfsvfs); 288 rw_exit(&zfsvfs_lock); 289 ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_unmounted); 290 return (KMEM_CBRC_DONT_KNOW); 291 } 292 rw_exit(&zfsvfs_lock); 293 294 mutex_enter(&zfsvfs->z_znodes_lock); 295 /* 296 * Recheck the vfs pointer in case the znode was removed just before 297 * acquiring the lock. 298 */ 299 if (zfsvfs != ozp->z_zfsvfs) { 300 mutex_exit(&zfsvfs->z_znodes_lock); 301 ZFS_EXIT(zfsvfs); 302 ZNODE_STAT_ADD(znode_move_stats.zms_zfsvfs_recheck2); 303 return (KMEM_CBRC_DONT_KNOW); 304 } 305 306 /* 307 * At this point we know that as long as we hold z_znodes_lock, the 308 * znode cannot be freed and fields within the znode can be safely 309 * accessed. Now, prevent a race with zfs_zget(). 310 */ 311 if (ZFS_OBJ_HOLD_TRYENTER(zfsvfs, ozp->z_id) == 0) { 312 mutex_exit(&zfsvfs->z_znodes_lock); 313 ZFS_EXIT(zfsvfs); 314 ZNODE_STAT_ADD(znode_move_stats.zms_obj_held); 315 return (KMEM_CBRC_LATER); 316 } 317 318 vp = ZTOV(ozp); 319 if (mutex_tryenter(&vp->v_lock) == 0) { 320 ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id); 321 mutex_exit(&zfsvfs->z_znodes_lock); 322 ZFS_EXIT(zfsvfs); 323 ZNODE_STAT_ADD(znode_move_stats.zms_vnode_locked); 324 return (KMEM_CBRC_LATER); 325 } 326 327 /* Only move znodes that are referenced _only_ by the DNLC. */ 328 if (vp->v_count != 1 || !vn_in_dnlc(vp)) { 329 mutex_exit(&vp->v_lock); 330 ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id); 331 mutex_exit(&zfsvfs->z_znodes_lock); 332 ZFS_EXIT(zfsvfs); 333 ZNODE_STAT_ADD(znode_move_stats.zms_not_only_dnlc); 334 return (KMEM_CBRC_LATER); 335 } 336 337 /* 338 * The znode is known and in a valid state to move. We're holding the 339 * locks needed to execute the critical section. 340 */ 341 zfs_znode_move_impl(ozp, nzp); 342 mutex_exit(&vp->v_lock); 343 ZFS_OBJ_HOLD_EXIT(zfsvfs, ozp->z_id); 344 345 list_link_replace(&ozp->z_link_node, &nzp->z_link_node); 346 mutex_exit(&zfsvfs->z_znodes_lock); 347 ZFS_EXIT(zfsvfs); 348 349 return (KMEM_CBRC_YES); 350 } 351 #endif /* illumos */ 352 353 void 354 zfs_znode_init(void) 355 { 356 /* 357 * Initialize zcache 358 */ 359 rw_init(&zfsvfs_lock, NULL, RW_DEFAULT, NULL); 360 ASSERT(znode_cache == NULL); 361 znode_cache = kmem_cache_create("zfs_znode_cache", 362 sizeof (znode_t), 0, zfs_znode_cache_constructor, 363 zfs_znode_cache_destructor, NULL, NULL, NULL, 0); 364 kmem_cache_set_move(znode_cache, zfs_znode_move); 365 } 366 367 void 368 zfs_znode_fini(void) 369 { 370 #ifdef illumos 371 /* 372 * Cleanup vfs & vnode ops 373 */ 374 zfs_remove_op_tables(); 375 #endif 376 377 /* 378 * Cleanup zcache 379 */ 380 if (znode_cache) 381 kmem_cache_destroy(znode_cache); 382 znode_cache = NULL; 383 rw_destroy(&zfsvfs_lock); 384 } 385 386 #ifdef illumos 387 struct vnodeops *zfs_dvnodeops; 388 struct vnodeops *zfs_fvnodeops; 389 struct vnodeops *zfs_symvnodeops; 390 struct vnodeops *zfs_xdvnodeops; 391 struct vnodeops *zfs_evnodeops; 392 struct vnodeops *zfs_sharevnodeops; 393 394 void 395 zfs_remove_op_tables() 396 { 397 /* 398 * Remove vfs ops 399 */ 400 ASSERT(zfsfstype); 401 (void) vfs_freevfsops_by_type(zfsfstype); 402 zfsfstype = 0; 403 404 /* 405 * Remove vnode ops 406 */ 407 if (zfs_dvnodeops) 408 vn_freevnodeops(zfs_dvnodeops); 409 if (zfs_fvnodeops) 410 vn_freevnodeops(zfs_fvnodeops); 411 if (zfs_symvnodeops) 412 vn_freevnodeops(zfs_symvnodeops); 413 if (zfs_xdvnodeops) 414 vn_freevnodeops(zfs_xdvnodeops); 415 if (zfs_evnodeops) 416 vn_freevnodeops(zfs_evnodeops); 417 if (zfs_sharevnodeops) 418 vn_freevnodeops(zfs_sharevnodeops); 419 420 zfs_dvnodeops = NULL; 421 zfs_fvnodeops = NULL; 422 zfs_symvnodeops = NULL; 423 zfs_xdvnodeops = NULL; 424 zfs_evnodeops = NULL; 425 zfs_sharevnodeops = NULL; 426 } 427 428 extern const fs_operation_def_t zfs_dvnodeops_template[]; 429 extern const fs_operation_def_t zfs_fvnodeops_template[]; 430 extern const fs_operation_def_t zfs_xdvnodeops_template[]; 431 extern const fs_operation_def_t zfs_symvnodeops_template[]; 432 extern const fs_operation_def_t zfs_evnodeops_template[]; 433 extern const fs_operation_def_t zfs_sharevnodeops_template[]; 434 435 int 436 zfs_create_op_tables() 437 { 438 int error; 439 440 /* 441 * zfs_dvnodeops can be set if mod_remove() calls mod_installfs() 442 * due to a failure to remove the the 2nd modlinkage (zfs_modldrv). 443 * In this case we just return as the ops vectors are already set up. 444 */ 445 if (zfs_dvnodeops) 446 return (0); 447 448 error = vn_make_ops(MNTTYPE_ZFS, zfs_dvnodeops_template, 449 &zfs_dvnodeops); 450 if (error) 451 return (error); 452 453 error = vn_make_ops(MNTTYPE_ZFS, zfs_fvnodeops_template, 454 &zfs_fvnodeops); 455 if (error) 456 return (error); 457 458 error = vn_make_ops(MNTTYPE_ZFS, zfs_symvnodeops_template, 459 &zfs_symvnodeops); 460 if (error) 461 return (error); 462 463 error = vn_make_ops(MNTTYPE_ZFS, zfs_xdvnodeops_template, 464 &zfs_xdvnodeops); 465 if (error) 466 return (error); 467 468 error = vn_make_ops(MNTTYPE_ZFS, zfs_evnodeops_template, 469 &zfs_evnodeops); 470 if (error) 471 return (error); 472 473 error = vn_make_ops(MNTTYPE_ZFS, zfs_sharevnodeops_template, 474 &zfs_sharevnodeops); 475 476 return (error); 477 } 478 #endif /* illumos */ 479 480 int 481 zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx) 482 { 483 zfs_acl_ids_t acl_ids; 484 vattr_t vattr; 485 znode_t *sharezp; 486 znode_t *zp; 487 int error; 488 489 vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE; 490 vattr.va_type = VDIR; 491 vattr.va_mode = S_IFDIR|0555; 492 vattr.va_uid = crgetuid(kcred); 493 vattr.va_gid = crgetgid(kcred); 494 495 sharezp = kmem_cache_alloc(znode_cache, KM_SLEEP); 496 ASSERT(!POINTER_IS_VALID(sharezp->z_zfsvfs)); 497 sharezp->z_moved = 0; 498 sharezp->z_unlinked = 0; 499 sharezp->z_atime_dirty = 0; 500 sharezp->z_zfsvfs = zfsvfs; 501 sharezp->z_is_sa = zfsvfs->z_use_sa; 502 503 VERIFY(0 == zfs_acl_ids_create(sharezp, IS_ROOT_NODE, &vattr, 504 kcred, NULL, &acl_ids)); 505 zfs_mknode(sharezp, &vattr, tx, kcred, IS_ROOT_NODE, &zp, &acl_ids); 506 ASSERT3P(zp, ==, sharezp); 507 POINTER_INVALIDATE(&sharezp->z_zfsvfs); 508 error = zap_add(zfsvfs->z_os, MASTER_NODE_OBJ, 509 ZFS_SHARES_DIR, 8, 1, &sharezp->z_id, tx); 510 zfsvfs->z_shares_dir = sharezp->z_id; 511 512 zfs_acl_ids_free(&acl_ids); 513 sa_handle_destroy(sharezp->z_sa_hdl); 514 kmem_cache_free(znode_cache, sharezp); 515 516 return (error); 517 } 518 519 /* 520 * define a couple of values we need available 521 * for both 64 and 32 bit environments. 522 */ 523 #ifndef NBITSMINOR64 524 #define NBITSMINOR64 32 525 #endif 526 #ifndef MAXMAJ64 527 #define MAXMAJ64 0xffffffffUL 528 #endif 529 #ifndef MAXMIN64 530 #define MAXMIN64 0xffffffffUL 531 #endif 532 533 /* 534 * Create special expldev for ZFS private use. 535 * Can't use standard expldev since it doesn't do 536 * what we want. The standard expldev() takes a 537 * dev32_t in LP64 and expands it to a long dev_t. 538 * We need an interface that takes a dev32_t in ILP32 539 * and expands it to a long dev_t. 540 */ 541 static uint64_t 542 zfs_expldev(dev_t dev) 543 { 544 return (((uint64_t)major(dev) << NBITSMINOR64) | minor(dev)); 545 } 546 /* 547 * Special cmpldev for ZFS private use. 548 * Can't use standard cmpldev since it takes 549 * a long dev_t and compresses it to dev32_t in 550 * LP64. We need to do a compaction of a long dev_t 551 * to a dev32_t in ILP32. 552 */ 553 dev_t 554 zfs_cmpldev(uint64_t dev) 555 { 556 return (makedev((dev >> NBITSMINOR64), (dev & MAXMIN64))); 557 } 558 559 static void 560 zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp, 561 dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl) 562 { 563 ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs)); 564 ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id))); 565 566 ASSERT(zp->z_sa_hdl == NULL); 567 ASSERT(zp->z_acl_cached == NULL); 568 if (sa_hdl == NULL) { 569 VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp, 570 SA_HDL_SHARED, &zp->z_sa_hdl)); 571 } else { 572 zp->z_sa_hdl = sa_hdl; 573 sa_set_userp(sa_hdl, zp); 574 } 575 576 zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE; 577 578 /* 579 * Slap on VROOT if we are the root znode unless we are the root 580 * node of a snapshot mounted under .zfs. 581 */ 582 if (zp->z_id == zfsvfs->z_root && zfsvfs->z_parent == zfsvfs) 583 ZTOV(zp)->v_flag |= VROOT; 584 585 vn_exists(ZTOV(zp)); 586 } 587 588 void 589 zfs_znode_dmu_fini(znode_t *zp) 590 { 591 ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zp->z_zfsvfs, zp->z_id)) || 592 zp->z_unlinked || 593 RW_WRITE_HELD(&zp->z_zfsvfs->z_teardown_inactive_lock)); 594 595 sa_handle_destroy(zp->z_sa_hdl); 596 zp->z_sa_hdl = NULL; 597 } 598 599 #ifdef __FreeBSD__ 600 static void 601 zfs_vnode_forget(vnode_t *vp) 602 { 603 604 /* copied from insmntque_stddtr */ 605 vp->v_data = NULL; 606 vp->v_op = &dead_vnodeops; 607 vgone(vp); 608 vput(vp); 609 } 610 #endif /* __FreeBSD__ */ 611 612 /* 613 * Construct a new znode/vnode and intialize. 614 * 615 * This does not do a call to dmu_set_user() that is 616 * up to the caller to do, in case you don't want to 617 * return the znode 618 */ 619 static znode_t * 620 #ifdef __NetBSD__ 621 zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz, 622 dmu_object_type_t obj_type, sa_handle_t *hdl, vnode_t *vp) 623 #else 624 zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz, 625 dmu_object_type_t obj_type, sa_handle_t *hdl) 626 #endif 627 { 628 znode_t *zp; 629 #ifndef __NetBSD__ 630 vnode_t *vp; 631 #endif 632 uint64_t mode; 633 uint64_t parent; 634 sa_bulk_attr_t bulk[9]; 635 int count = 0; 636 int error; 637 638 zp = kmem_cache_alloc(znode_cache, KM_SLEEP); 639 640 #ifndef __NetBSD__ 641 KASSERT(curthread->td_vp_reserv > 0, 642 ("zfs_znode_alloc: getnewvnode without any vnodes reserved")); 643 error = getnewvnode("zfs", zfsvfs->z_parent->z_vfs, &zfs_vnodeops, &vp); 644 if (error != 0) { 645 kmem_cache_free(znode_cache, zp); 646 return (NULL); 647 } 648 #endif 649 zp->z_vnode = vp; 650 vp->v_data = zp; 651 652 ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs)); 653 zp->z_moved = 0; 654 655 /* 656 * Defer setting z_zfsvfs until the znode is ready to be a candidate for 657 * the zfs_znode_move() callback. 658 */ 659 zp->z_sa_hdl = NULL; 660 zp->z_unlinked = 0; 661 zp->z_atime_dirty = 0; 662 zp->z_mapcnt = 0; 663 zp->z_id = db->db_object; 664 zp->z_blksz = blksz; 665 zp->z_seq = 0x7A4653; 666 zp->z_sync_cnt = 0; 667 668 #ifdef __NetBSD__ 669 vp->v_op = zfs_vnodeop_p; 670 vp->v_tag = VT_ZFS; 671 zp->z_lockf = NULL; 672 #endif 673 674 vp = ZTOV(zp); 675 676 zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl); 677 678 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8); 679 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &zp->z_gen, 8); 680 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, 681 &zp->z_size, 8); 682 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, 683 &zp->z_links, 8); 684 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, 685 &zp->z_pflags, 8); 686 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL, &parent, 8); 687 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, 688 &zp->z_atime, 16); 689 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, 690 &zp->z_uid, 8); 691 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, 692 &zp->z_gid, 8); 693 694 if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || zp->z_gen == 0) { 695 if (hdl == NULL) 696 sa_handle_destroy(zp->z_sa_hdl); 697 #ifndef __NetBSD__ 698 zfs_vnode_forget(vp); 699 #endif 700 zp->z_vnode = NULL; 701 kmem_cache_free(znode_cache, zp); 702 return (NULL); 703 } 704 705 zp->z_mode = mode; 706 707 vp->v_type = IFTOVT((mode_t)mode); 708 709 switch (vp->v_type) { 710 case VDIR: 711 zp->z_zn_prefetch = B_TRUE; /* z_prefetch default is enabled */ 712 break; 713 #if defined(illumos) || defined(__NetBSD__) 714 case VBLK: 715 case VCHR: 716 { 717 uint64_t rdev; 718 VERIFY(sa_lookup(zp->z_sa_hdl, SA_ZPL_RDEV(zfsvfs), 719 &rdev, sizeof (rdev)) == 0); 720 721 #ifdef illumos 722 vp->v_rdev = zfs_cmpldev(rdev); 723 #else 724 vp->v_op = zfs_specop_p; 725 spec_node_init(vp, zfs_cmpldev(rdev)); 726 #endif 727 } 728 break; 729 #endif 730 case VFIFO: 731 #ifdef __NetBSD__ 732 vp->v_op = zfs_fifoop_p; 733 break; 734 #else /* __NetBSD__ */ 735 #ifdef illumos 736 case VSOCK: 737 case VDOOR: 738 #endif 739 vp->v_op = &zfs_fifoops; 740 break; 741 case VREG: 742 if (parent == zfsvfs->z_shares_dir) { 743 ASSERT(zp->z_uid == 0 && zp->z_gid == 0); 744 vp->v_op = &zfs_shareops; 745 } 746 break; 747 #ifdef illumos 748 case VLNK: 749 vn_setops(vp, zfs_symvnodeops); 750 break; 751 default: 752 vn_setops(vp, zfs_evnodeops); 753 break; 754 #endif 755 #endif /* __NetBSD__ */ 756 } 757 758 #ifdef __NetBSD__ 759 extern const struct genfs_ops zfs_genfsops; 760 genfs_node_init(vp, &zfs_genfsops); 761 uvm_vnp_setsize(vp, zp->z_size); 762 #endif 763 764 mutex_enter(&zfsvfs->z_znodes_lock); 765 list_insert_tail(&zfsvfs->z_all_znodes, zp); 766 membar_producer(); 767 /* 768 * Everything else must be valid before assigning z_zfsvfs makes the 769 * znode eligible for zfs_znode_move(). 770 */ 771 zp->z_zfsvfs = zfsvfs; 772 mutex_exit(&zfsvfs->z_znodes_lock); 773 774 #ifndef __NetBSD__ 775 /* 776 * Acquire vnode lock before making it available to the world. 777 */ 778 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 779 VN_LOCK_AREC(vp); 780 if (vp->v_type != VFIFO) 781 VN_LOCK_ASHARE(vp); 782 #endif 783 784 #if defined(illumos) || defined(__NetBSD__) 785 VFS_HOLD(zfsvfs->z_vfs); 786 #endif 787 return (zp); 788 } 789 790 static uint64_t empty_xattr; 791 static uint64_t pad[4]; 792 static zfs_acl_phys_t acl_phys; 793 /* 794 * Create a new DMU object to hold a zfs znode. 795 * 796 * IN: dzp - parent directory for new znode 797 * vap - file attributes for new znode 798 * tx - dmu transaction id for zap operations 799 * cr - credentials of caller 800 * flag - flags: 801 * IS_ROOT_NODE - new object will be root 802 * IS_XATTR - new object is an attribute 803 * bonuslen - length of bonus buffer 804 * setaclp - File/Dir initial ACL 805 * fuidp - Tracks fuid allocation. 806 * 807 * OUT: zpp - allocated znode 808 * 809 */ 810 #ifdef __NetBSD__ 811 struct zfs_newvnode_args { 812 dmu_tx_t *tx; 813 uint_t flag; 814 zfs_acl_ids_t *acl_ids; 815 }; 816 817 static void 818 zfs_mknode1(znode_t *, vattr_t *, dmu_tx_t *, cred_t *, 819 uint_t, znode_t **, zfs_acl_ids_t *, vnode_t *); 820 821 int 822 zfs_loadvnode(struct mount *mp, struct vnode *vp, 823 const void *key, size_t key_len, const void **new_key) 824 { 825 int err, blksz; 826 uint64_t obj_num; 827 zfsvfs_t *zfsvfs; 828 dmu_buf_t *db; 829 dmu_object_info_t doi; 830 dmu_object_type_t obj_type; 831 sa_handle_t *hdl; 832 znode_t *zp; 833 834 if (key_len != sizeof(obj_num)) 835 return zfsctl_loadvnode(mp, vp, key, key_len, new_key); 836 837 memcpy(&obj_num, key, key_len); 838 839 zfsvfs = mp->mnt_data; 840 841 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num); 842 843 err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db); 844 if (err) { 845 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 846 return (SET_ERROR(err)); 847 } 848 849 dmu_object_info_from_db(db, &doi); 850 if (doi.doi_bonus_type != DMU_OT_SA && 851 (doi.doi_bonus_type != DMU_OT_ZNODE || 852 (doi.doi_bonus_type == DMU_OT_ZNODE && 853 doi.doi_bonus_size < sizeof (znode_phys_t)))) { 854 sa_buf_rele(db, NULL); 855 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 856 return (SET_ERROR(EINVAL)); 857 } 858 blksz = doi.doi_data_block_size; 859 obj_type = doi.doi_bonus_type; 860 hdl = dmu_buf_get_user(db); 861 862 if (hdl != NULL) { 863 sa_buf_rele(db, NULL); 864 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 865 return (SET_ERROR(ENOENT)); 866 } 867 868 zp = zfs_znode_alloc(zfsvfs, db, blksz, obj_type, hdl, vp); 869 if (zp == NULL) { 870 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 871 return (SET_ERROR(ENOENT)); 872 } 873 ASSERT(zp == VTOZ(vp)); 874 875 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 876 877 *new_key = &zp->z_id; 878 879 return 0; 880 } 881 882 int 883 zfs_newvnode(struct mount *mp, vnode_t *dvp, vnode_t *vp, vattr_t *vap, 884 cred_t *cr, void *extra, size_t *key_len, const void **new_key) 885 { 886 struct zfs_newvnode_args *args = extra; 887 znode_t *zp, *dzp = VTOZ(dvp); 888 dmu_tx_t *tx = args->tx; 889 uint_t flag = args->flag; 890 zfs_acl_ids_t *acl_ids = args->acl_ids; 891 892 zfs_mknode1(dzp, vap, tx, cr, flag, &zp, acl_ids, vp); 893 ASSERT(zp == VTOZ(vp)); 894 *key_len = sizeof(zp->z_id); 895 *new_key = &zp->z_id; 896 897 return 0; 898 } 899 900 void 901 zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, 902 uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids) 903 { 904 vnode_t *vp, *dvp = ZTOV(dzp); 905 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 906 struct zfs_newvnode_args args = { tx, flag, acl_ids }; 907 908 if (flag & IS_ROOT_NODE) 909 return zfs_mknode1(dzp, vap, tx, cr, flag, zpp, acl_ids, NULL); 910 911 VERIFY(vcache_new(zfsvfs->z_vfs, dvp, vap, cr, &args, &vp) == 0); 912 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 913 *zpp = VTOZ(vp); 914 } 915 916 static void 917 zfs_mknode1(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, 918 uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids, vnode_t *vp) 919 #else 920 void 921 zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, 922 uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids) 923 #endif 924 { 925 uint64_t crtime[2], atime[2], mtime[2], ctime[2]; 926 uint64_t mode, size, links, parent, pflags; 927 uint64_t dzp_pflags = 0; 928 uint64_t rdev = 0; 929 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 930 dmu_buf_t *db; 931 timestruc_t now; 932 uint64_t gen, obj; 933 int err; 934 int bonuslen; 935 sa_handle_t *sa_hdl; 936 dmu_object_type_t obj_type; 937 sa_bulk_attr_t sa_attrs[ZPL_END]; 938 int cnt = 0; 939 zfs_acl_locator_cb_t locate = { 0 }; 940 941 ASSERT(vap && (vap->va_mask & (AT_TYPE|AT_MODE)) == (AT_TYPE|AT_MODE)); 942 943 if (zfsvfs->z_replay) { 944 obj = vap->va_nodeid; 945 now = vap->va_ctime; /* see zfs_replay_create() */ 946 gen = vap->va_nblocks; /* ditto */ 947 } else { 948 obj = 0; 949 vfs_timestamp(&now); 950 gen = dmu_tx_get_txg(tx); 951 } 952 953 obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE; 954 bonuslen = (obj_type == DMU_OT_SA) ? 955 DN_MAX_BONUSLEN : ZFS_OLD_ZNODE_PHYS_SIZE; 956 957 /* 958 * Create a new DMU object. 959 */ 960 /* 961 * There's currently no mechanism for pre-reading the blocks that will 962 * be needed to allocate a new object, so we accept the small chance 963 * that there will be an i/o error and we will fail one of the 964 * assertions below. 965 */ 966 if (vap->va_type == VDIR) { 967 if (zfsvfs->z_replay) { 968 VERIFY0(zap_create_claim_norm(zfsvfs->z_os, obj, 969 zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS, 970 obj_type, bonuslen, tx)); 971 } else { 972 obj = zap_create_norm(zfsvfs->z_os, 973 zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS, 974 obj_type, bonuslen, tx); 975 } 976 } else { 977 if (zfsvfs->z_replay) { 978 VERIFY0(dmu_object_claim(zfsvfs->z_os, obj, 979 DMU_OT_PLAIN_FILE_CONTENTS, 0, 980 obj_type, bonuslen, tx)); 981 } else { 982 obj = dmu_object_alloc(zfsvfs->z_os, 983 DMU_OT_PLAIN_FILE_CONTENTS, 0, 984 obj_type, bonuslen, tx); 985 } 986 } 987 988 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj); 989 VERIFY(0 == sa_buf_hold(zfsvfs->z_os, obj, NULL, &db)); 990 991 /* 992 * If this is the root, fix up the half-initialized parent pointer 993 * to reference the just-allocated physical data area. 994 */ 995 if (flag & IS_ROOT_NODE) { 996 dzp->z_id = obj; 997 } else { 998 dzp_pflags = dzp->z_pflags; 999 } 1000 1001 /* 1002 * If parent is an xattr, so am I. 1003 */ 1004 if (dzp_pflags & ZFS_XATTR) { 1005 flag |= IS_XATTR; 1006 } 1007 1008 if (zfsvfs->z_use_fuids) 1009 pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED; 1010 else 1011 pflags = 0; 1012 1013 if (vap->va_type == VDIR) { 1014 size = 2; /* contents ("." and "..") */ 1015 links = (flag & (IS_ROOT_NODE | IS_XATTR)) ? 2 : 1; 1016 } else { 1017 size = links = 0; 1018 } 1019 1020 if (vap->va_type == VBLK || vap->va_type == VCHR) { 1021 rdev = zfs_expldev(vap->va_rdev); 1022 } 1023 1024 parent = dzp->z_id; 1025 mode = acl_ids->z_mode; 1026 if (flag & IS_XATTR) 1027 pflags |= ZFS_XATTR; 1028 1029 /* 1030 * No execs denied will be deterimed when zfs_mode_compute() is called. 1031 */ 1032 pflags |= acl_ids->z_aclp->z_hints & 1033 (ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT| 1034 ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED); 1035 1036 ZFS_TIME_ENCODE(&now, crtime); 1037 ZFS_TIME_ENCODE(&now, ctime); 1038 1039 if (vap->va_mask & AT_ATIME) { 1040 ZFS_TIME_ENCODE(&vap->va_atime, atime); 1041 } else { 1042 ZFS_TIME_ENCODE(&now, atime); 1043 } 1044 1045 if (vap->va_mask & AT_MTIME) { 1046 ZFS_TIME_ENCODE(&vap->va_mtime, mtime); 1047 } else { 1048 ZFS_TIME_ENCODE(&now, mtime); 1049 } 1050 1051 /* Now add in all of the "SA" attributes */ 1052 VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED, 1053 &sa_hdl)); 1054 1055 /* 1056 * Setup the array of attributes to be replaced/set on the new file 1057 * 1058 * order for DMU_OT_ZNODE is critical since it needs to be constructed 1059 * in the old znode_phys_t format. Don't change this ordering 1060 */ 1061 1062 if (obj_type == DMU_OT_ZNODE) { 1063 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs), 1064 NULL, &atime, 16); 1065 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs), 1066 NULL, &mtime, 16); 1067 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs), 1068 NULL, &ctime, 16); 1069 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs), 1070 NULL, &crtime, 16); 1071 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs), 1072 NULL, &gen, 8); 1073 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs), 1074 NULL, &mode, 8); 1075 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs), 1076 NULL, &size, 8); 1077 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs), 1078 NULL, &parent, 8); 1079 } else { 1080 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs), 1081 NULL, &mode, 8); 1082 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs), 1083 NULL, &size, 8); 1084 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs), 1085 NULL, &gen, 8); 1086 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL, 1087 &acl_ids->z_fuid, 8); 1088 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL, 1089 &acl_ids->z_fgid, 8); 1090 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs), 1091 NULL, &parent, 8); 1092 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs), 1093 NULL, &pflags, 8); 1094 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs), 1095 NULL, &atime, 16); 1096 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs), 1097 NULL, &mtime, 16); 1098 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs), 1099 NULL, &ctime, 16); 1100 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs), 1101 NULL, &crtime, 16); 1102 } 1103 1104 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8); 1105 1106 if (obj_type == DMU_OT_ZNODE) { 1107 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL, 1108 &empty_xattr, 8); 1109 } 1110 if (obj_type == DMU_OT_ZNODE || 1111 (vap->va_type == VBLK || vap->va_type == VCHR)) { 1112 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs), 1113 NULL, &rdev, 8); 1114 1115 } 1116 if (obj_type == DMU_OT_ZNODE) { 1117 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs), 1118 NULL, &pflags, 8); 1119 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL, 1120 &acl_ids->z_fuid, 8); 1121 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL, 1122 &acl_ids->z_fgid, 8); 1123 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad, 1124 sizeof (uint64_t) * 4); 1125 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL, 1126 &acl_phys, sizeof (zfs_acl_phys_t)); 1127 } else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) { 1128 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL, 1129 &acl_ids->z_aclp->z_acl_count, 8); 1130 locate.cb_aclp = acl_ids->z_aclp; 1131 SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs), 1132 zfs_acl_data_locator, &locate, 1133 acl_ids->z_aclp->z_acl_bytes); 1134 mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags, 1135 acl_ids->z_fuid, acl_ids->z_fgid); 1136 } 1137 1138 VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0); 1139 1140 if (!(flag & IS_ROOT_NODE)) { 1141 #ifdef __NetBSD__ 1142 *zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl, vp); 1143 #else 1144 *zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl); 1145 #endif 1146 ASSERT(*zpp != NULL); 1147 } else { 1148 /* 1149 * If we are creating the root node, the "parent" we 1150 * passed in is the znode for the root. 1151 */ 1152 *zpp = dzp; 1153 1154 (*zpp)->z_sa_hdl = sa_hdl; 1155 } 1156 1157 (*zpp)->z_pflags = pflags; 1158 (*zpp)->z_mode = mode; 1159 1160 if (vap->va_mask & AT_XVATTR) 1161 zfs_xvattr_set(*zpp, (xvattr_t *)vap, tx); 1162 1163 if (obj_type == DMU_OT_ZNODE || 1164 acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) { 1165 VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx)); 1166 } 1167 #ifndef __NetBSD__ 1168 if (!(flag & IS_ROOT_NODE)) { 1169 vnode_t *vp; 1170 1171 vp = ZTOV(*zpp); 1172 vp->v_vflag |= VV_FORCEINSMQ; 1173 err = insmntque(vp, zfsvfs->z_vfs); 1174 vp->v_vflag &= ~VV_FORCEINSMQ; 1175 KASSERT(err == 0, ("insmntque() failed: error %d", err)); 1176 } 1177 #endif 1178 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj); 1179 } 1180 1181 /* 1182 * Update in-core attributes. It is assumed the caller will be doing an 1183 * sa_bulk_update to push the changes out. 1184 */ 1185 void 1186 zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx) 1187 { 1188 xoptattr_t *xoap; 1189 1190 xoap = xva_getxoptattr(xvap); 1191 ASSERT(xoap); 1192 1193 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) { 1194 uint64_t times[2]; 1195 ZFS_TIME_ENCODE(&xoap->xoa_createtime, times); 1196 (void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(zp->z_zfsvfs), 1197 ×, sizeof (times), tx); 1198 XVA_SET_RTN(xvap, XAT_CREATETIME); 1199 } 1200 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) { 1201 ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly, 1202 zp->z_pflags, tx); 1203 XVA_SET_RTN(xvap, XAT_READONLY); 1204 } 1205 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) { 1206 ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden, 1207 zp->z_pflags, tx); 1208 XVA_SET_RTN(xvap, XAT_HIDDEN); 1209 } 1210 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) { 1211 ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system, 1212 zp->z_pflags, tx); 1213 XVA_SET_RTN(xvap, XAT_SYSTEM); 1214 } 1215 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) { 1216 ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive, 1217 zp->z_pflags, tx); 1218 XVA_SET_RTN(xvap, XAT_ARCHIVE); 1219 } 1220 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) { 1221 ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable, 1222 zp->z_pflags, tx); 1223 XVA_SET_RTN(xvap, XAT_IMMUTABLE); 1224 } 1225 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) { 1226 ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink, 1227 zp->z_pflags, tx); 1228 XVA_SET_RTN(xvap, XAT_NOUNLINK); 1229 } 1230 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) { 1231 ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly, 1232 zp->z_pflags, tx); 1233 XVA_SET_RTN(xvap, XAT_APPENDONLY); 1234 } 1235 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) { 1236 ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump, 1237 zp->z_pflags, tx); 1238 XVA_SET_RTN(xvap, XAT_NODUMP); 1239 } 1240 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) { 1241 ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque, 1242 zp->z_pflags, tx); 1243 XVA_SET_RTN(xvap, XAT_OPAQUE); 1244 } 1245 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) { 1246 ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED, 1247 xoap->xoa_av_quarantined, zp->z_pflags, tx); 1248 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED); 1249 } 1250 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) { 1251 ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified, 1252 zp->z_pflags, tx); 1253 XVA_SET_RTN(xvap, XAT_AV_MODIFIED); 1254 } 1255 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) { 1256 zfs_sa_set_scanstamp(zp, xvap, tx); 1257 XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP); 1258 } 1259 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) { 1260 ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse, 1261 zp->z_pflags, tx); 1262 XVA_SET_RTN(xvap, XAT_REPARSE); 1263 } 1264 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) { 1265 ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline, 1266 zp->z_pflags, tx); 1267 XVA_SET_RTN(xvap, XAT_OFFLINE); 1268 } 1269 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) { 1270 ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse, 1271 zp->z_pflags, tx); 1272 XVA_SET_RTN(xvap, XAT_SPARSE); 1273 } 1274 } 1275 1276 #ifdef __NetBSD__ 1277 1278 int 1279 zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp) 1280 { 1281 int error; 1282 vnode_t *vp; 1283 1284 error = vcache_get(zfsvfs->z_vfs, &obj_num, sizeof(obj_num), &vp); 1285 if (error == 0) 1286 *zpp = VTOZ(vp); 1287 1288 return error; 1289 } 1290 1291 int 1292 zfs_zget_cleaner(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp) 1293 { 1294 dmu_buf_t *db; 1295 sa_handle_t *hdl; 1296 dmu_object_info_t doi; 1297 znode_t *zp; 1298 int err; 1299 1300 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num); 1301 1302 err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db); 1303 if (err) { 1304 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1305 return (SET_ERROR(err)); 1306 } 1307 1308 dmu_object_info_from_db(db, &doi); 1309 if (doi.doi_bonus_type != DMU_OT_SA && 1310 (doi.doi_bonus_type != DMU_OT_ZNODE || 1311 (doi.doi_bonus_type == DMU_OT_ZNODE && 1312 doi.doi_bonus_size < sizeof (znode_phys_t)))) { 1313 sa_buf_rele(db, NULL); 1314 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1315 return (SET_ERROR(EINVAL)); 1316 } 1317 hdl = dmu_buf_get_user(db); 1318 ASSERT3P(hdl, !=, NULL); 1319 zp = sa_get_userdata(hdl); 1320 ASSERT3U(zp->z_id, ==, obj_num); 1321 sa_buf_rele(db, NULL); 1322 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1323 *zpp = zp; 1324 return (0); 1325 } 1326 1327 #else /* __NetBSD__ */ 1328 1329 int 1330 zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp) 1331 { 1332 dmu_object_info_t doi; 1333 dmu_buf_t *db; 1334 znode_t *zp; 1335 vnode_t *vp; 1336 sa_handle_t *hdl; 1337 struct thread *td; 1338 int locked; 1339 int err; 1340 1341 td = curthread; 1342 getnewvnode_reserve(1); 1343 again: 1344 *zpp = NULL; 1345 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num); 1346 1347 err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db); 1348 if (err) { 1349 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1350 getnewvnode_drop_reserve(); 1351 return (err); 1352 } 1353 1354 dmu_object_info_from_db(db, &doi); 1355 if (doi.doi_bonus_type != DMU_OT_SA && 1356 (doi.doi_bonus_type != DMU_OT_ZNODE || 1357 (doi.doi_bonus_type == DMU_OT_ZNODE && 1358 doi.doi_bonus_size < sizeof (znode_phys_t)))) { 1359 sa_buf_rele(db, NULL); 1360 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1361 #ifdef __FreeBSD__ 1362 getnewvnode_drop_reserve(); 1363 #endif 1364 return (SET_ERROR(EINVAL)); 1365 } 1366 1367 hdl = dmu_buf_get_user(db); 1368 if (hdl != NULL) { 1369 zp = sa_get_userdata(hdl); 1370 1371 /* 1372 * Since "SA" does immediate eviction we 1373 * should never find a sa handle that doesn't 1374 * know about the znode. 1375 */ 1376 ASSERT3P(zp, !=, NULL); 1377 ASSERT3U(zp->z_id, ==, obj_num); 1378 *zpp = zp; 1379 vp = ZTOV(zp); 1380 1381 /* Don't let the vnode disappear after ZFS_OBJ_HOLD_EXIT. */ 1382 VN_HOLD(vp); 1383 1384 sa_buf_rele(db, NULL); 1385 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1386 1387 locked = VOP_ISLOCKED(vp); 1388 VI_LOCK(vp); 1389 if ((vp->v_iflag & VI_DOOMED) != 0 && 1390 locked != LK_EXCLUSIVE) { 1391 /* 1392 * The vnode is doomed and this thread doesn't 1393 * hold the exclusive lock on it, so the vnode 1394 * must be being reclaimed by another thread. 1395 * Otherwise the doomed vnode is being reclaimed 1396 * by this thread and zfs_zget is called from 1397 * ZIL internals. 1398 */ 1399 VI_UNLOCK(vp); 1400 1401 /* 1402 * XXX vrele() locks the vnode when the last reference 1403 * is dropped. Although in this case the vnode is 1404 * doomed / dead and so no inactivation is required, 1405 * the vnode lock is still acquired. That could result 1406 * in a LOR with z_teardown_lock if another thread holds 1407 * the vnode's lock and tries to take z_teardown_lock. 1408 * But that is only possible if the other thread peforms 1409 * a ZFS vnode operation on the vnode. That either 1410 * should not happen if the vnode is dead or the thread 1411 * should also have a refrence to the vnode and thus 1412 * our reference is not last. 1413 */ 1414 VN_RELE(vp); 1415 goto again; 1416 } 1417 VI_UNLOCK(vp); 1418 getnewvnode_drop_reserve(); 1419 return (0); 1420 } 1421 1422 /* 1423 * Not found create new znode/vnode 1424 * but only if file exists. 1425 * 1426 * There is a small window where zfs_vget() could 1427 * find this object while a file create is still in 1428 * progress. This is checked for in zfs_znode_alloc() 1429 * 1430 * if zfs_znode_alloc() fails it will drop the hold on the 1431 * bonus buffer. 1432 */ 1433 zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size, 1434 doi.doi_bonus_type, NULL); 1435 if (zp == NULL) { 1436 err = SET_ERROR(ENOENT); 1437 } else { 1438 *zpp = zp; 1439 } 1440 if (err == 0) { 1441 vnode_t *vp = ZTOV(zp); 1442 1443 err = insmntque(vp, zfsvfs->z_vfs); 1444 if (err == 0) { 1445 vp->v_hash = obj_num; 1446 VOP_UNLOCK(vp, 0); 1447 } else { 1448 zp->z_vnode = NULL; 1449 zfs_znode_dmu_fini(zp); 1450 zfs_znode_free(zp); 1451 *zpp = NULL; 1452 } 1453 } 1454 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1455 getnewvnode_drop_reserve(); 1456 return (err); 1457 } 1458 1459 #endif /* __NetBSD__ */ 1460 1461 int 1462 zfs_rezget(znode_t *zp) 1463 { 1464 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1465 dmu_object_info_t doi; 1466 dmu_buf_t *db; 1467 vnode_t *vp; 1468 uint64_t obj_num = zp->z_id; 1469 uint64_t mode, size; 1470 sa_bulk_attr_t bulk[8]; 1471 int err; 1472 int count = 0; 1473 uint64_t gen; 1474 1475 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj_num); 1476 1477 mutex_enter(&zp->z_acl_lock); 1478 if (zp->z_acl_cached) { 1479 zfs_acl_free(zp->z_acl_cached); 1480 zp->z_acl_cached = NULL; 1481 } 1482 1483 mutex_exit(&zp->z_acl_lock); 1484 ASSERT(zp->z_sa_hdl == NULL); 1485 err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db); 1486 if (err) { 1487 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1488 return (err); 1489 } 1490 1491 dmu_object_info_from_db(db, &doi); 1492 if (doi.doi_bonus_type != DMU_OT_SA && 1493 (doi.doi_bonus_type != DMU_OT_ZNODE || 1494 (doi.doi_bonus_type == DMU_OT_ZNODE && 1495 doi.doi_bonus_size < sizeof (znode_phys_t)))) { 1496 sa_buf_rele(db, NULL); 1497 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1498 return (SET_ERROR(EINVAL)); 1499 } 1500 1501 zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL); 1502 size = zp->z_size; 1503 1504 /* reload cached values */ 1505 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, 1506 &gen, sizeof (gen)); 1507 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, 1508 &zp->z_size, sizeof (zp->z_size)); 1509 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, 1510 &zp->z_links, sizeof (zp->z_links)); 1511 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, 1512 &zp->z_pflags, sizeof (zp->z_pflags)); 1513 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, 1514 &zp->z_atime, sizeof (zp->z_atime)); 1515 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, 1516 &zp->z_uid, sizeof (zp->z_uid)); 1517 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, 1518 &zp->z_gid, sizeof (zp->z_gid)); 1519 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, 1520 &mode, sizeof (mode)); 1521 1522 if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) { 1523 zfs_znode_dmu_fini(zp); 1524 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1525 return (SET_ERROR(EIO)); 1526 } 1527 1528 zp->z_mode = mode; 1529 1530 if (gen != zp->z_gen) { 1531 zfs_znode_dmu_fini(zp); 1532 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1533 return (SET_ERROR(EIO)); 1534 } 1535 1536 /* 1537 * It is highly improbable but still quite possible that two 1538 * objects in different datasets are created with the same 1539 * object numbers and in transaction groups with the same 1540 * numbers. znodes corresponding to those objects would 1541 * have the same z_id and z_gen, but their other attributes 1542 * may be different. 1543 * zfs recv -F may replace one of such objects with the other. 1544 * As a result file properties recorded in the replaced 1545 * object's vnode may no longer match the received object's 1546 * properties. At present the only cached property is the 1547 * files type recorded in v_type. 1548 * So, handle this case by leaving the old vnode and znode 1549 * disassociated from the actual object. A new vnode and a 1550 * znode will be created if the object is accessed 1551 * (e.g. via a look-up). The old vnode and znode will be 1552 * recycled when the last vnode reference is dropped. 1553 */ 1554 vp = ZTOV(zp); 1555 if (vp->v_type != IFTOVT((mode_t)zp->z_mode)) { 1556 zfs_znode_dmu_fini(zp); 1557 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1558 return (EIO); 1559 } 1560 1561 zp->z_unlinked = (zp->z_links == 0); 1562 zp->z_blksz = doi.doi_data_block_size; 1563 #ifdef __NetBSD__ 1564 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); 1565 (void)VOP_PUTPAGES(vp, 0, 0, PGO_ALLPAGES|PGO_FREE|PGO_SYNCIO); 1566 #else 1567 vn_pages_remove(vp, 0, 0); 1568 #endif 1569 if (zp->z_size != size) 1570 vnode_pager_setsize(vp, zp->z_size); 1571 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); 1572 1573 return (0); 1574 } 1575 1576 void 1577 zfs_znode_delete(znode_t *zp, dmu_tx_t *tx) 1578 { 1579 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1580 objset_t *os = zfsvfs->z_os; 1581 uint64_t obj = zp->z_id; 1582 uint64_t acl_obj = zfs_external_acl(zp); 1583 1584 ZFS_OBJ_HOLD_ENTER(zfsvfs, obj); 1585 if (acl_obj) { 1586 VERIFY(!zp->z_is_sa); 1587 VERIFY(0 == dmu_object_free(os, acl_obj, tx)); 1588 } 1589 VERIFY(0 == dmu_object_free(os, obj, tx)); 1590 zfs_znode_dmu_fini(zp); 1591 ZFS_OBJ_HOLD_EXIT(zfsvfs, obj); 1592 zfs_znode_free(zp); 1593 } 1594 1595 void 1596 zfs_zinactive(znode_t *zp) 1597 { 1598 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1599 uint64_t z_id = zp->z_id; 1600 1601 ASSERT(zp->z_sa_hdl); 1602 1603 /* 1604 * Don't allow a zfs_zget() while were trying to release this znode 1605 */ 1606 ZFS_OBJ_HOLD_ENTER(zfsvfs, z_id); 1607 1608 /* 1609 * If this was the last reference to a file with no links, 1610 * remove the file from the file system. 1611 */ 1612 if (zp->z_unlinked) { 1613 ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id); 1614 zfs_rmnode(zp); 1615 return; 1616 } 1617 1618 zfs_znode_dmu_fini(zp); 1619 ZFS_OBJ_HOLD_EXIT(zfsvfs, z_id); 1620 zfs_znode_free(zp); 1621 } 1622 1623 void 1624 zfs_znode_free(znode_t *zp) 1625 { 1626 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1627 1628 #ifdef __NetBSD__ 1629 struct vnode *vp = ZTOV(zp); 1630 1631 genfs_node_destroy(vp); 1632 1633 /* 1634 * Interlock with zfs_sync(). 1635 */ 1636 mutex_enter(vp->v_interlock); 1637 vp->v_data = NULL; 1638 mutex_exit(vp->v_interlock); 1639 #endif 1640 1641 ASSERT(zp->z_sa_hdl == NULL); 1642 zp->z_vnode = NULL; 1643 mutex_enter(&zfsvfs->z_znodes_lock); 1644 POINTER_INVALIDATE(&zp->z_zfsvfs); 1645 list_remove(&zfsvfs->z_all_znodes, zp); 1646 mutex_exit(&zfsvfs->z_znodes_lock); 1647 1648 if (zp->z_acl_cached) { 1649 zfs_acl_free(zp->z_acl_cached); 1650 zp->z_acl_cached = NULL; 1651 } 1652 1653 kmem_cache_free(znode_cache, zp); 1654 1655 #ifdef illumos 1656 VFS_RELE(zfsvfs->z_vfs); 1657 #endif 1658 } 1659 1660 void 1661 zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2], 1662 uint64_t ctime[2], boolean_t have_tx) 1663 { 1664 timestruc_t now; 1665 1666 vfs_timestamp(&now); 1667 1668 if (have_tx) { /* will sa_bulk_update happen really soon? */ 1669 zp->z_atime_dirty = 0; 1670 zp->z_seq++; 1671 } else { 1672 zp->z_atime_dirty = 1; 1673 } 1674 1675 if (flag & AT_ATIME) { 1676 ZFS_TIME_ENCODE(&now, zp->z_atime); 1677 } 1678 1679 if (flag & AT_MTIME) { 1680 ZFS_TIME_ENCODE(&now, mtime); 1681 if (zp->z_zfsvfs->z_use_fuids) { 1682 zp->z_pflags |= (ZFS_ARCHIVE | 1683 ZFS_AV_MODIFIED); 1684 } 1685 } 1686 1687 if (flag & AT_CTIME) { 1688 ZFS_TIME_ENCODE(&now, ctime); 1689 if (zp->z_zfsvfs->z_use_fuids) 1690 zp->z_pflags |= ZFS_ARCHIVE; 1691 } 1692 } 1693 1694 /* 1695 * Grow the block size for a file. 1696 * 1697 * IN: zp - znode of file to free data in. 1698 * size - requested block size 1699 * tx - open transaction. 1700 * 1701 * NOTE: this function assumes that the znode is write locked. 1702 */ 1703 void 1704 zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx) 1705 { 1706 int error; 1707 u_longlong_t dummy; 1708 1709 if (size <= zp->z_blksz) 1710 return; 1711 /* 1712 * If the file size is already greater than the current blocksize, 1713 * we will not grow. If there is more than one block in a file, 1714 * the blocksize cannot change. 1715 */ 1716 if (zp->z_blksz && zp->z_size > zp->z_blksz) 1717 return; 1718 1719 error = dmu_object_set_blocksize(zp->z_zfsvfs->z_os, zp->z_id, 1720 size, 0, tx); 1721 1722 if (error == ENOTSUP) 1723 return; 1724 ASSERT0(error); 1725 1726 /* What blocksize did we actually get? */ 1727 dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy); 1728 } 1729 1730 #ifdef illumos 1731 /* 1732 * This is a dummy interface used when pvn_vplist_dirty() should *not* 1733 * be calling back into the fs for a putpage(). E.g.: when truncating 1734 * a file, the pages being "thrown away* don't need to be written out. 1735 */ 1736 /* ARGSUSED */ 1737 static int 1738 zfs_no_putpage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp, 1739 int flags, cred_t *cr) 1740 { 1741 ASSERT(0); 1742 return (0); 1743 } 1744 #endif 1745 1746 /* 1747 * Increase the file length 1748 * 1749 * IN: zp - znode of file to free data in. 1750 * end - new end-of-file 1751 * 1752 * RETURN: 0 on success, error code on failure 1753 */ 1754 static int 1755 zfs_extend(znode_t *zp, uint64_t end) 1756 { 1757 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1758 dmu_tx_t *tx; 1759 rl_t *rl; 1760 uint64_t newblksz; 1761 int error; 1762 1763 /* 1764 * We will change zp_size, lock the whole file. 1765 */ 1766 rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER); 1767 1768 /* 1769 * Nothing to do if file already at desired length. 1770 */ 1771 if (end <= zp->z_size) { 1772 zfs_range_unlock(rl); 1773 return (0); 1774 } 1775 tx = dmu_tx_create(zfsvfs->z_os); 1776 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 1777 zfs_sa_upgrade_txholds(tx, zp); 1778 if (end > zp->z_blksz && 1779 (!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) { 1780 /* 1781 * We are growing the file past the current block size. 1782 */ 1783 if (zp->z_blksz > zp->z_zfsvfs->z_max_blksz) { 1784 /* 1785 * File's blocksize is already larger than the 1786 * "recordsize" property. Only let it grow to 1787 * the next power of 2. 1788 */ 1789 ASSERT(!ISP2(zp->z_blksz)); 1790 newblksz = MIN(end, 1 << highbit64(zp->z_blksz)); 1791 } else { 1792 newblksz = MIN(end, zp->z_zfsvfs->z_max_blksz); 1793 } 1794 dmu_tx_hold_write(tx, zp->z_id, 0, newblksz); 1795 } else { 1796 newblksz = 0; 1797 } 1798 1799 error = dmu_tx_assign(tx, TXG_WAIT); 1800 if (error) { 1801 dmu_tx_abort(tx); 1802 zfs_range_unlock(rl); 1803 return (error); 1804 } 1805 1806 if (newblksz) 1807 zfs_grow_blocksize(zp, newblksz, tx); 1808 1809 zp->z_size = end; 1810 1811 VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zp->z_zfsvfs), 1812 &zp->z_size, sizeof (zp->z_size), tx)); 1813 1814 vnode_pager_setsize(ZTOV(zp), end); 1815 1816 zfs_range_unlock(rl); 1817 1818 dmu_tx_commit(tx); 1819 1820 return (0); 1821 } 1822 1823 /* 1824 * Free space in a file. 1825 * 1826 * IN: zp - znode of file to free data in. 1827 * off - start of section to free. 1828 * len - length of section to free. 1829 * 1830 * RETURN: 0 on success, error code on failure 1831 */ 1832 static int 1833 zfs_free_range(znode_t *zp, uint64_t off, uint64_t len) 1834 { 1835 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1836 rl_t *rl; 1837 int error; 1838 1839 /* 1840 * Lock the range being freed. 1841 */ 1842 rl = zfs_range_lock(zp, off, len, RL_WRITER); 1843 1844 /* 1845 * Nothing to do if file already at desired length. 1846 */ 1847 if (off >= zp->z_size) { 1848 zfs_range_unlock(rl); 1849 return (0); 1850 } 1851 1852 if (off + len > zp->z_size) 1853 len = zp->z_size - off; 1854 1855 error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len); 1856 1857 if (error == 0) { 1858 /* 1859 * In FreeBSD we cannot free block in the middle of a file, 1860 * but only at the end of a file, so this code path should 1861 * never happen. 1862 */ 1863 vnode_pager_setsize(ZTOV(zp), off); 1864 } 1865 1866 zfs_range_unlock(rl); 1867 1868 return (error); 1869 } 1870 1871 /* 1872 * Truncate a file 1873 * 1874 * IN: zp - znode of file to free data in. 1875 * end - new end-of-file. 1876 * 1877 * RETURN: 0 on success, error code on failure 1878 */ 1879 static int 1880 zfs_trunc(znode_t *zp, uint64_t end) 1881 { 1882 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1883 vnode_t *vp = ZTOV(zp); 1884 dmu_tx_t *tx; 1885 rl_t *rl; 1886 int error; 1887 sa_bulk_attr_t bulk[2]; 1888 int count = 0; 1889 1890 /* 1891 * We will change zp_size, lock the whole file. 1892 */ 1893 rl = zfs_range_lock(zp, 0, UINT64_MAX, RL_WRITER); 1894 1895 /* 1896 * Nothing to do if file already at desired length. 1897 */ 1898 if (end >= zp->z_size) { 1899 zfs_range_unlock(rl); 1900 return (0); 1901 } 1902 1903 error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end, -1); 1904 if (error) { 1905 zfs_range_unlock(rl); 1906 return (error); 1907 } 1908 tx = dmu_tx_create(zfsvfs->z_os); 1909 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 1910 zfs_sa_upgrade_txholds(tx, zp); 1911 dmu_tx_mark_netfree(tx); 1912 error = dmu_tx_assign(tx, TXG_WAIT); 1913 if (error) { 1914 dmu_tx_abort(tx); 1915 zfs_range_unlock(rl); 1916 return (error); 1917 } 1918 1919 zp->z_size = end; 1920 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), 1921 NULL, &zp->z_size, sizeof (zp->z_size)); 1922 1923 if (end == 0) { 1924 zp->z_pflags &= ~ZFS_SPARSE; 1925 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), 1926 NULL, &zp->z_pflags, 8); 1927 } 1928 VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0); 1929 1930 dmu_tx_commit(tx); 1931 1932 /* 1933 * Clear any mapped pages in the truncated region. This has to 1934 * happen outside of the transaction to avoid the possibility of 1935 * a deadlock with someone trying to push a page that we are 1936 * about to invalidate. 1937 */ 1938 vnode_pager_setsize(vp, end); 1939 1940 zfs_range_unlock(rl); 1941 1942 return (0); 1943 } 1944 1945 /* 1946 * Free space in a file 1947 * 1948 * IN: zp - znode of file to free data in. 1949 * off - start of range 1950 * len - end of range (0 => EOF) 1951 * flag - current file open mode flags. 1952 * log - TRUE if this action should be logged 1953 * 1954 * RETURN: 0 on success, error code on failure 1955 */ 1956 int 1957 zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log) 1958 { 1959 vnode_t *vp = ZTOV(zp); 1960 dmu_tx_t *tx; 1961 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1962 zilog_t *zilog = zfsvfs->z_log; 1963 uint64_t mode; 1964 uint64_t mtime[2], ctime[2]; 1965 sa_bulk_attr_t bulk[3]; 1966 int count = 0; 1967 int error; 1968 1969 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode, 1970 sizeof (mode))) != 0) 1971 return (error); 1972 1973 if (off > zp->z_size) { 1974 error = zfs_extend(zp, off+len); 1975 if (error == 0 && log) 1976 goto log; 1977 else 1978 return (error); 1979 } 1980 1981 /* 1982 * Check for any locks in the region to be freed. 1983 */ 1984 1985 if (MANDLOCK(vp, (mode_t)mode)) { 1986 uint64_t length = (len ? len : zp->z_size - off); 1987 if (error = chklock(vp, FWRITE, off, length, flag, NULL)) 1988 return (error); 1989 } 1990 1991 if (len == 0) { 1992 error = zfs_trunc(zp, off); 1993 } else { 1994 if ((error = zfs_free_range(zp, off, len)) == 0 && 1995 off + len > zp->z_size) 1996 error = zfs_extend(zp, off+len); 1997 } 1998 if (error || !log) 1999 return (error); 2000 log: 2001 tx = dmu_tx_create(zfsvfs->z_os); 2002 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 2003 zfs_sa_upgrade_txholds(tx, zp); 2004 error = dmu_tx_assign(tx, TXG_WAIT); 2005 if (error) { 2006 dmu_tx_abort(tx); 2007 return (error); 2008 } 2009 2010 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16); 2011 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16); 2012 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), 2013 NULL, &zp->z_pflags, 8); 2014 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, B_TRUE); 2015 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); 2016 ASSERT(error == 0); 2017 2018 zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len); 2019 2020 dmu_tx_commit(tx); 2021 return (0); 2022 } 2023 2024 void 2025 zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx) 2026 { 2027 uint64_t moid, obj, sa_obj, version; 2028 uint64_t sense = ZFS_CASE_SENSITIVE; 2029 uint64_t norm = 0; 2030 nvpair_t *elem; 2031 int error; 2032 int i; 2033 znode_t *rootzp = NULL; 2034 zfsvfs_t *zfsvfs; 2035 vattr_t vattr; 2036 znode_t *zp; 2037 zfs_acl_ids_t acl_ids; 2038 2039 /* 2040 * First attempt to create master node. 2041 */ 2042 /* 2043 * In an empty objset, there are no blocks to read and thus 2044 * there can be no i/o errors (which we assert below). 2045 */ 2046 moid = MASTER_NODE_OBJ; 2047 error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE, 2048 DMU_OT_NONE, 0, tx); 2049 ASSERT(error == 0); 2050 2051 /* 2052 * Set starting attributes. 2053 */ 2054 version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os))); 2055 elem = NULL; 2056 while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) { 2057 /* For the moment we expect all zpl props to be uint64_ts */ 2058 uint64_t val; 2059 char *name; 2060 2061 ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64); 2062 VERIFY(nvpair_value_uint64(elem, &val) == 0); 2063 name = nvpair_name(elem); 2064 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) { 2065 if (val < version) 2066 version = val; 2067 } else { 2068 error = zap_update(os, moid, name, 8, 1, &val, tx); 2069 } 2070 ASSERT(error == 0); 2071 if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0) 2072 norm = val; 2073 else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0) 2074 sense = val; 2075 } 2076 ASSERT(version != 0); 2077 error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx); 2078 2079 /* 2080 * Create zap object used for SA attribute registration 2081 */ 2082 2083 if (version >= ZPL_VERSION_SA) { 2084 sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE, 2085 DMU_OT_NONE, 0, tx); 2086 error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx); 2087 ASSERT(error == 0); 2088 } else { 2089 sa_obj = 0; 2090 } 2091 /* 2092 * Create a delete queue. 2093 */ 2094 obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx); 2095 2096 error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx); 2097 ASSERT(error == 0); 2098 2099 /* 2100 * Create root znode. Create minimal znode/vnode/zfsvfs 2101 * to allow zfs_mknode to work. 2102 */ 2103 VATTR_NULL(&vattr); 2104 vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE; 2105 vattr.va_type = VDIR; 2106 vattr.va_mode = S_IFDIR|0755; 2107 vattr.va_uid = crgetuid(cr); 2108 vattr.va_gid = crgetgid(cr); 2109 2110 zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP); 2111 2112 rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP); 2113 ASSERT(!POINTER_IS_VALID(rootzp->z_zfsvfs)); 2114 rootzp->z_moved = 0; 2115 rootzp->z_unlinked = 0; 2116 rootzp->z_atime_dirty = 0; 2117 rootzp->z_is_sa = USE_SA(version, os); 2118 2119 zfsvfs->z_os = os; 2120 zfsvfs->z_parent = zfsvfs; 2121 zfsvfs->z_version = version; 2122 zfsvfs->z_use_fuids = USE_FUIDS(version, os); 2123 zfsvfs->z_use_sa = USE_SA(version, os); 2124 zfsvfs->z_norm = norm; 2125 2126 error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END, 2127 &zfsvfs->z_attr_table); 2128 2129 ASSERT(error == 0); 2130 2131 /* 2132 * Fold case on file systems that are always or sometimes case 2133 * insensitive. 2134 */ 2135 if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED) 2136 zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER; 2137 2138 mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL); 2139 list_create(&zfsvfs->z_all_znodes, sizeof (znode_t), 2140 offsetof(znode_t, z_link_node)); 2141 2142 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) 2143 mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL); 2144 2145 rootzp->z_zfsvfs = zfsvfs; 2146 VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr, 2147 cr, NULL, &acl_ids)); 2148 zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids); 2149 ASSERT3P(zp, ==, rootzp); 2150 error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx); 2151 ASSERT(error == 0); 2152 zfs_acl_ids_free(&acl_ids); 2153 POINTER_INVALIDATE(&rootzp->z_zfsvfs); 2154 2155 sa_handle_destroy(rootzp->z_sa_hdl); 2156 kmem_cache_free(znode_cache, rootzp); 2157 2158 /* 2159 * Create shares directory 2160 */ 2161 2162 error = zfs_create_share_dir(zfsvfs, tx); 2163 2164 ASSERT(error == 0); 2165 2166 for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) 2167 mutex_destroy(&zfsvfs->z_hold_mtx[i]); 2168 mutex_destroy(&zfsvfs->z_znodes_lock); 2169 kmem_free(zfsvfs, sizeof (zfsvfs_t)); 2170 } 2171 #endif /* _KERNEL */ 2172 2173 static int 2174 zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table) 2175 { 2176 uint64_t sa_obj = 0; 2177 int error; 2178 2179 error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj); 2180 if (error != 0 && error != ENOENT) 2181 return (error); 2182 2183 error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table); 2184 return (error); 2185 } 2186 2187 static int 2188 zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp, 2189 dmu_buf_t **db, void *tag) 2190 { 2191 dmu_object_info_t doi; 2192 int error; 2193 2194 if ((error = sa_buf_hold(osp, obj, tag, db)) != 0) 2195 return (error); 2196 2197 dmu_object_info_from_db(*db, &doi); 2198 if ((doi.doi_bonus_type != DMU_OT_SA && 2199 doi.doi_bonus_type != DMU_OT_ZNODE) || 2200 doi.doi_bonus_type == DMU_OT_ZNODE && 2201 doi.doi_bonus_size < sizeof (znode_phys_t)) { 2202 sa_buf_rele(*db, tag); 2203 return (SET_ERROR(ENOTSUP)); 2204 } 2205 2206 error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp); 2207 if (error != 0) { 2208 sa_buf_rele(*db, tag); 2209 return (error); 2210 } 2211 2212 return (0); 2213 } 2214 2215 void 2216 zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, void *tag) 2217 { 2218 sa_handle_destroy(hdl); 2219 sa_buf_rele(db, tag); 2220 } 2221 2222 /* 2223 * Given an object number, return its parent object number and whether 2224 * or not the object is an extended attribute directory. 2225 */ 2226 static int 2227 zfs_obj_to_pobj(objset_t *osp, sa_handle_t *hdl, sa_attr_type_t *sa_table, 2228 uint64_t *pobjp, int *is_xattrdir) 2229 { 2230 uint64_t parent; 2231 uint64_t pflags; 2232 uint64_t mode; 2233 uint64_t parent_mode; 2234 sa_bulk_attr_t bulk[3]; 2235 sa_handle_t *sa_hdl; 2236 dmu_buf_t *sa_db; 2237 int count = 0; 2238 int error; 2239 2240 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL, 2241 &parent, sizeof (parent)); 2242 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL, 2243 &pflags, sizeof (pflags)); 2244 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL, 2245 &mode, sizeof (mode)); 2246 2247 if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0) 2248 return (error); 2249 2250 /* 2251 * When a link is removed its parent pointer is not changed and will 2252 * be invalid. There are two cases where a link is removed but the 2253 * file stays around, when it goes to the delete queue and when there 2254 * are additional links. 2255 */ 2256 error = zfs_grab_sa_handle(osp, parent, &sa_hdl, &sa_db, FTAG); 2257 if (error != 0) 2258 return (error); 2259 2260 error = sa_lookup(sa_hdl, ZPL_MODE, &parent_mode, sizeof (parent_mode)); 2261 zfs_release_sa_handle(sa_hdl, sa_db, FTAG); 2262 if (error != 0) 2263 return (error); 2264 2265 *is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode); 2266 2267 /* 2268 * Extended attributes can be applied to files, directories, etc. 2269 * Otherwise the parent must be a directory. 2270 */ 2271 if (!*is_xattrdir && !S_ISDIR(parent_mode)) 2272 return (SET_ERROR(EINVAL)); 2273 2274 *pobjp = parent; 2275 2276 return (0); 2277 } 2278 2279 /* 2280 * Given an object number, return some zpl level statistics 2281 */ 2282 static int 2283 zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table, 2284 zfs_stat_t *sb) 2285 { 2286 sa_bulk_attr_t bulk[4]; 2287 int count = 0; 2288 2289 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL, 2290 &sb->zs_mode, sizeof (sb->zs_mode)); 2291 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL, 2292 &sb->zs_gen, sizeof (sb->zs_gen)); 2293 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL, 2294 &sb->zs_links, sizeof (sb->zs_links)); 2295 SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL, 2296 &sb->zs_ctime, sizeof (sb->zs_ctime)); 2297 2298 return (sa_bulk_lookup(hdl, bulk, count)); 2299 } 2300 2301 static int 2302 zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl, 2303 sa_attr_type_t *sa_table, char *buf, int len) 2304 { 2305 sa_handle_t *sa_hdl; 2306 sa_handle_t *prevhdl = NULL; 2307 dmu_buf_t *prevdb = NULL; 2308 dmu_buf_t *sa_db = NULL; 2309 char *path = buf + len - 1; 2310 int error; 2311 2312 *path = '\0'; 2313 sa_hdl = hdl; 2314 2315 for (;;) { 2316 uint64_t pobj; 2317 char component[MAXNAMELEN + 2]; 2318 size_t complen; 2319 int is_xattrdir; 2320 2321 if (prevdb) 2322 zfs_release_sa_handle(prevhdl, prevdb, FTAG); 2323 2324 if ((error = zfs_obj_to_pobj(osp, sa_hdl, sa_table, &pobj, 2325 &is_xattrdir)) != 0) 2326 break; 2327 2328 if (pobj == obj) { 2329 if (path[0] != '/') 2330 *--path = '/'; 2331 break; 2332 } 2333 2334 component[0] = '/'; 2335 if (is_xattrdir) { 2336 (void) sprintf(component + 1, "<xattrdir>"); 2337 } else { 2338 error = zap_value_search(osp, pobj, obj, 2339 ZFS_DIRENT_OBJ(-1ULL), component + 1); 2340 if (error != 0) 2341 break; 2342 } 2343 2344 complen = strlen(component); 2345 path -= complen; 2346 ASSERT(path >= buf); 2347 bcopy(component, path, complen); 2348 obj = pobj; 2349 2350 if (sa_hdl != hdl) { 2351 prevhdl = sa_hdl; 2352 prevdb = sa_db; 2353 } 2354 error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG); 2355 if (error != 0) { 2356 sa_hdl = prevhdl; 2357 sa_db = prevdb; 2358 break; 2359 } 2360 } 2361 2362 if (sa_hdl != NULL && sa_hdl != hdl) { 2363 ASSERT(sa_db != NULL); 2364 zfs_release_sa_handle(sa_hdl, sa_db, FTAG); 2365 } 2366 2367 if (error == 0) 2368 (void) memmove(buf, path, buf + len - path); 2369 2370 return (error); 2371 } 2372 2373 int 2374 zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len) 2375 { 2376 sa_attr_type_t *sa_table; 2377 sa_handle_t *hdl; 2378 dmu_buf_t *db; 2379 int error; 2380 2381 error = zfs_sa_setup(osp, &sa_table); 2382 if (error != 0) 2383 return (error); 2384 2385 error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG); 2386 if (error != 0) 2387 return (error); 2388 2389 error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len); 2390 2391 zfs_release_sa_handle(hdl, db, FTAG); 2392 return (error); 2393 } 2394 2395 int 2396 zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb, 2397 char *buf, int len) 2398 { 2399 char *path = buf + len - 1; 2400 sa_attr_type_t *sa_table; 2401 sa_handle_t *hdl; 2402 dmu_buf_t *db; 2403 int error; 2404 2405 *path = '\0'; 2406 2407 error = zfs_sa_setup(osp, &sa_table); 2408 if (error != 0) 2409 return (error); 2410 2411 error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG); 2412 if (error != 0) 2413 return (error); 2414 2415 error = zfs_obj_to_stats_impl(hdl, sa_table, sb); 2416 if (error != 0) { 2417 zfs_release_sa_handle(hdl, db, FTAG); 2418 return (error); 2419 } 2420 2421 error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len); 2422 2423 zfs_release_sa_handle(hdl, db, FTAG); 2424 return (error); 2425 } 2426 2427 #ifdef _KERNEL 2428 int 2429 zfs_znode_parent_and_name(znode_t *zp, znode_t **dzpp, char *buf) 2430 { 2431 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2432 uint64_t parent; 2433 int is_xattrdir; 2434 int err; 2435 2436 /* Extended attributes should not be visible as regular files. */ 2437 if ((zp->z_pflags & ZFS_XATTR) != 0) 2438 return (SET_ERROR(EINVAL)); 2439 2440 err = zfs_obj_to_pobj(zfsvfs->z_os, zp->z_sa_hdl, zfsvfs->z_attr_table, 2441 &parent, &is_xattrdir); 2442 if (err != 0) 2443 return (err); 2444 ASSERT0(is_xattrdir); 2445 2446 /* No name as this is a root object. */ 2447 if (parent == zp->z_id) 2448 return (SET_ERROR(EINVAL)); 2449 2450 err = zap_value_search(zfsvfs->z_os, parent, zp->z_id, 2451 ZFS_DIRENT_OBJ(-1ULL), buf); 2452 if (err != 0) 2453 return (err); 2454 err = zfs_zget(zfsvfs, parent, dzpp); 2455 return (err); 2456 } 2457 #endif /* _KERNEL */ 2458