1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2012, 2018 by Delphix. All rights reserved. 25 * Copyright (c) 2015 by Chunwei Chen. All rights reserved. 26 * Copyright 2017 Nexenta Systems, Inc. 27 */ 28 29 /* Portions Copyright 2007 Jeremy Teo */ 30 /* Portions Copyright 2010 Robert Milkowski */ 31 32 33 #include <sys/types.h> 34 #include <sys/param.h> 35 #include <sys/time.h> 36 #include <sys/sysmacros.h> 37 #include <sys/vfs.h> 38 #include <sys/file.h> 39 #include <sys/stat.h> 40 #include <sys/kmem.h> 41 #include <sys/taskq.h> 42 #include <sys/uio.h> 43 #include <sys/vmsystm.h> 44 #include <sys/atomic.h> 45 #include <sys/pathname.h> 46 #include <sys/cmn_err.h> 47 #include <sys/errno.h> 48 #include <sys/zfs_dir.h> 49 #include <sys/zfs_acl.h> 50 #include <sys/zfs_ioctl.h> 51 #include <sys/fs/zfs.h> 52 #include <sys/dmu.h> 53 #include <sys/dmu_objset.h> 54 #include <sys/spa.h> 55 #include <sys/txg.h> 56 #include <sys/dbuf.h> 57 #include <sys/zap.h> 58 #include <sys/sa.h> 59 #include <sys/policy.h> 60 #include <sys/sunddi.h> 61 #include <sys/sid.h> 62 #include <sys/zfs_ctldir.h> 63 #include <sys/zfs_fuid.h> 64 #include <sys/zfs_quota.h> 65 #include <sys/zfs_sa.h> 66 #include <sys/zfs_vnops.h> 67 #include <sys/zfs_rlock.h> 68 #include <sys/cred.h> 69 #include <sys/zpl.h> 70 #include <sys/zil.h> 71 #include <sys/sa_impl.h> 72 73 /* 74 * Programming rules. 75 * 76 * Each vnode op performs some logical unit of work. To do this, the ZPL must 77 * properly lock its in-core state, create a DMU transaction, do the work, 78 * record this work in the intent log (ZIL), commit the DMU transaction, 79 * and wait for the intent log to commit if it is a synchronous operation. 80 * Moreover, the vnode ops must work in both normal and log replay context. 81 * The ordering of events is important to avoid deadlocks and references 82 * to freed memory. The example below illustrates the following Big Rules: 83 * 84 * (1) A check must be made in each zfs thread for a mounted file system. 85 * This is done avoiding races using ZFS_ENTER(zfsvfs). 86 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes 87 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros 88 * can return EIO from the calling function. 89 * 90 * (2) zrele() should always be the last thing except for zil_commit() (if 91 * necessary) and ZFS_EXIT(). This is for 3 reasons: First, if it's the 92 * last reference, the vnode/znode can be freed, so the zp may point to 93 * freed memory. Second, the last reference will call zfs_zinactive(), 94 * which may induce a lot of work -- pushing cached pages (which acquires 95 * range locks) and syncing out cached atime changes. Third, 96 * zfs_zinactive() may require a new tx, which could deadlock the system 97 * if you were already holding one. This deadlock occurs because the tx 98 * currently being operated on prevents a txg from syncing, which 99 * prevents the new tx from progressing, resulting in a deadlock. If you 100 * must call zrele() within a tx, use zfs_zrele_async(). Note that iput() 101 * is a synonym for zrele(). 102 * 103 * (3) All range locks must be grabbed before calling dmu_tx_assign(), 104 * as they can span dmu_tx_assign() calls. 105 * 106 * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to 107 * dmu_tx_assign(). This is critical because we don't want to block 108 * while holding locks. 109 * 110 * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This 111 * reduces lock contention and CPU usage when we must wait (note that if 112 * throughput is constrained by the storage, nearly every transaction 113 * must wait). 114 * 115 * Note, in particular, that if a lock is sometimes acquired before 116 * the tx assigns, and sometimes after (e.g. z_lock), then failing 117 * to use a non-blocking assign can deadlock the system. The scenario: 118 * 119 * Thread A has grabbed a lock before calling dmu_tx_assign(). 120 * Thread B is in an already-assigned tx, and blocks for this lock. 121 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open() 122 * forever, because the previous txg can't quiesce until B's tx commits. 123 * 124 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT, 125 * then drop all locks, call dmu_tx_wait(), and try again. On subsequent 126 * calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT, 127 * to indicate that this operation has already called dmu_tx_wait(). 128 * This will ensure that we don't retry forever, waiting a short bit 129 * each time. 130 * 131 * (5) If the operation succeeded, generate the intent log entry for it 132 * before dropping locks. This ensures that the ordering of events 133 * in the intent log matches the order in which they actually occurred. 134 * During ZIL replay the zfs_log_* functions will update the sequence 135 * number to indicate the zil transaction has replayed. 136 * 137 * (6) At the end of each vnode op, the DMU tx must always commit, 138 * regardless of whether there were any errors. 139 * 140 * (7) After dropping all locks, invoke zil_commit(zilog, foid) 141 * to ensure that synchronous semantics are provided when necessary. 142 * 143 * In general, this is how things should be ordered in each vnode op: 144 * 145 * ZFS_ENTER(zfsvfs); // exit if unmounted 146 * top: 147 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab()) 148 * rw_enter(...); // grab any other locks you need 149 * tx = dmu_tx_create(...); // get DMU tx 150 * dmu_tx_hold_*(); // hold each object you might modify 151 * error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); 152 * if (error) { 153 * rw_exit(...); // drop locks 154 * zfs_dirent_unlock(dl); // unlock directory entry 155 * zrele(...); // release held znodes 156 * if (error == ERESTART) { 157 * waited = B_TRUE; 158 * dmu_tx_wait(tx); 159 * dmu_tx_abort(tx); 160 * goto top; 161 * } 162 * dmu_tx_abort(tx); // abort DMU tx 163 * ZFS_EXIT(zfsvfs); // finished in zfs 164 * return (error); // really out of space 165 * } 166 * error = do_real_work(); // do whatever this VOP does 167 * if (error == 0) 168 * zfs_log_*(...); // on success, make ZIL entry 169 * dmu_tx_commit(tx); // commit DMU tx -- error or not 170 * rw_exit(...); // drop locks 171 * zfs_dirent_unlock(dl); // unlock directory entry 172 * zrele(...); // release held znodes 173 * zil_commit(zilog, foid); // synchronous when necessary 174 * ZFS_EXIT(zfsvfs); // finished in zfs 175 * return (error); // done, report error 176 */ 177 178 /* 179 * Virus scanning is unsupported. It would be possible to add a hook 180 * here to performance the required virus scan. This could be done 181 * entirely in the kernel or potentially as an update to invoke a 182 * scanning utility. 183 */ 184 static int 185 zfs_vscan(struct inode *ip, cred_t *cr, int async) 186 { 187 return (0); 188 } 189 190 /* ARGSUSED */ 191 int 192 zfs_open(struct inode *ip, int mode, int flag, cred_t *cr) 193 { 194 znode_t *zp = ITOZ(ip); 195 zfsvfs_t *zfsvfs = ITOZSB(ip); 196 197 ZFS_ENTER(zfsvfs); 198 ZFS_VERIFY_ZP(zp); 199 200 /* Honor ZFS_APPENDONLY file attribute */ 201 if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) && 202 ((flag & O_APPEND) == 0)) { 203 ZFS_EXIT(zfsvfs); 204 return (SET_ERROR(EPERM)); 205 } 206 207 /* Virus scan eligible files on open */ 208 if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) && 209 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) { 210 if (zfs_vscan(ip, cr, 0) != 0) { 211 ZFS_EXIT(zfsvfs); 212 return (SET_ERROR(EACCES)); 213 } 214 } 215 216 /* Keep a count of the synchronous opens in the znode */ 217 if (flag & O_SYNC) 218 atomic_inc_32(&zp->z_sync_cnt); 219 220 ZFS_EXIT(zfsvfs); 221 return (0); 222 } 223 224 /* ARGSUSED */ 225 int 226 zfs_close(struct inode *ip, int flag, cred_t *cr) 227 { 228 znode_t *zp = ITOZ(ip); 229 zfsvfs_t *zfsvfs = ITOZSB(ip); 230 231 ZFS_ENTER(zfsvfs); 232 ZFS_VERIFY_ZP(zp); 233 234 /* Decrement the synchronous opens in the znode */ 235 if (flag & O_SYNC) 236 atomic_dec_32(&zp->z_sync_cnt); 237 238 if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) && 239 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) 240 VERIFY(zfs_vscan(ip, cr, 1) == 0); 241 242 ZFS_EXIT(zfsvfs); 243 return (0); 244 } 245 246 #if defined(_KERNEL) 247 /* 248 * When a file is memory mapped, we must keep the IO data synchronized 249 * between the DMU cache and the memory mapped pages. What this means: 250 * 251 * On Write: If we find a memory mapped page, we write to *both* 252 * the page and the dmu buffer. 253 */ 254 void 255 update_pages(znode_t *zp, int64_t start, int len, objset_t *os) 256 { 257 struct inode *ip = ZTOI(zp); 258 struct address_space *mp = ip->i_mapping; 259 struct page *pp; 260 uint64_t nbytes; 261 int64_t off; 262 void *pb; 263 264 off = start & (PAGE_SIZE-1); 265 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) { 266 nbytes = MIN(PAGE_SIZE - off, len); 267 268 pp = find_lock_page(mp, start >> PAGE_SHIFT); 269 if (pp) { 270 if (mapping_writably_mapped(mp)) 271 flush_dcache_page(pp); 272 273 pb = kmap(pp); 274 (void) dmu_read(os, zp->z_id, start + off, nbytes, 275 pb + off, DMU_READ_PREFETCH); 276 kunmap(pp); 277 278 if (mapping_writably_mapped(mp)) 279 flush_dcache_page(pp); 280 281 mark_page_accessed(pp); 282 SetPageUptodate(pp); 283 ClearPageError(pp); 284 unlock_page(pp); 285 put_page(pp); 286 } 287 288 len -= nbytes; 289 off = 0; 290 } 291 } 292 293 /* 294 * When a file is memory mapped, we must keep the IO data synchronized 295 * between the DMU cache and the memory mapped pages. What this means: 296 * 297 * On Read: We "read" preferentially from memory mapped pages, 298 * else we default from the dmu buffer. 299 * 300 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when 301 * the file is memory mapped. 302 */ 303 int 304 mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio) 305 { 306 struct inode *ip = ZTOI(zp); 307 struct address_space *mp = ip->i_mapping; 308 struct page *pp; 309 int64_t start, off; 310 uint64_t bytes; 311 int len = nbytes; 312 int error = 0; 313 void *pb; 314 315 start = uio->uio_loffset; 316 off = start & (PAGE_SIZE-1); 317 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) { 318 bytes = MIN(PAGE_SIZE - off, len); 319 320 pp = find_lock_page(mp, start >> PAGE_SHIFT); 321 if (pp) { 322 ASSERT(PageUptodate(pp)); 323 unlock_page(pp); 324 325 pb = kmap(pp); 326 error = zfs_uiomove(pb + off, bytes, UIO_READ, uio); 327 kunmap(pp); 328 329 if (mapping_writably_mapped(mp)) 330 flush_dcache_page(pp); 331 332 mark_page_accessed(pp); 333 put_page(pp); 334 } else { 335 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl), 336 uio, bytes); 337 } 338 339 len -= bytes; 340 off = 0; 341 if (error) 342 break; 343 } 344 return (error); 345 } 346 #endif /* _KERNEL */ 347 348 unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT; 349 350 /* 351 * Write the bytes to a file. 352 * 353 * IN: zp - znode of file to be written to 354 * data - bytes to write 355 * len - number of bytes to write 356 * pos - offset to start writing at 357 * 358 * OUT: resid - remaining bytes to write 359 * 360 * RETURN: 0 if success 361 * positive error code if failure. EIO is returned 362 * for a short write when residp isn't provided. 363 * 364 * Timestamps: 365 * zp - ctime|mtime updated if byte count > 0 366 */ 367 int 368 zfs_write_simple(znode_t *zp, const void *data, size_t len, 369 loff_t pos, size_t *residp) 370 { 371 fstrans_cookie_t cookie; 372 int error; 373 374 struct iovec iov; 375 iov.iov_base = (void *)data; 376 iov.iov_len = len; 377 378 zfs_uio_t uio; 379 zfs_uio_iovec_init(&uio, &iov, 1, pos, UIO_SYSSPACE, len, 0); 380 381 cookie = spl_fstrans_mark(); 382 error = zfs_write(zp, &uio, 0, kcred); 383 spl_fstrans_unmark(cookie); 384 385 if (error == 0) { 386 if (residp != NULL) 387 *residp = zfs_uio_resid(&uio); 388 else if (zfs_uio_resid(&uio) != 0) 389 error = SET_ERROR(EIO); 390 } 391 392 return (error); 393 } 394 395 void 396 zfs_zrele_async(znode_t *zp) 397 { 398 struct inode *ip = ZTOI(zp); 399 objset_t *os = ITOZSB(ip)->z_os; 400 401 ASSERT(atomic_read(&ip->i_count) > 0); 402 ASSERT(os != NULL); 403 404 /* 405 * If decrementing the count would put us at 0, we can't do it inline 406 * here, because that would be synchronous. Instead, dispatch an iput 407 * to run later. 408 * 409 * For more information on the dangers of a synchronous iput, see the 410 * header comment of this file. 411 */ 412 if (!atomic_add_unless(&ip->i_count, -1, 1)) { 413 VERIFY(taskq_dispatch(dsl_pool_zrele_taskq(dmu_objset_pool(os)), 414 (task_func_t *)iput, ip, TQ_SLEEP) != TASKQID_INVALID); 415 } 416 } 417 418 419 /* 420 * Lookup an entry in a directory, or an extended attribute directory. 421 * If it exists, return a held inode reference for it. 422 * 423 * IN: zdp - znode of directory to search. 424 * nm - name of entry to lookup. 425 * flags - LOOKUP_XATTR set if looking for an attribute. 426 * cr - credentials of caller. 427 * direntflags - directory lookup flags 428 * realpnp - returned pathname. 429 * 430 * OUT: zpp - znode of located entry, NULL if not found. 431 * 432 * RETURN: 0 on success, error code on failure. 433 * 434 * Timestamps: 435 * NA 436 */ 437 /* ARGSUSED */ 438 int 439 zfs_lookup(znode_t *zdp, char *nm, znode_t **zpp, int flags, cred_t *cr, 440 int *direntflags, pathname_t *realpnp) 441 { 442 zfsvfs_t *zfsvfs = ZTOZSB(zdp); 443 int error = 0; 444 445 /* 446 * Fast path lookup, however we must skip DNLC lookup 447 * for case folding or normalizing lookups because the 448 * DNLC code only stores the passed in name. This means 449 * creating 'a' and removing 'A' on a case insensitive 450 * file system would work, but DNLC still thinks 'a' 451 * exists and won't let you create it again on the next 452 * pass through fast path. 453 */ 454 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) { 455 456 if (!S_ISDIR(ZTOI(zdp)->i_mode)) { 457 return (SET_ERROR(ENOTDIR)); 458 } else if (zdp->z_sa_hdl == NULL) { 459 return (SET_ERROR(EIO)); 460 } 461 462 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) { 463 error = zfs_fastaccesschk_execute(zdp, cr); 464 if (!error) { 465 *zpp = zdp; 466 zhold(*zpp); 467 return (0); 468 } 469 return (error); 470 } 471 } 472 473 ZFS_ENTER(zfsvfs); 474 ZFS_VERIFY_ZP(zdp); 475 476 *zpp = NULL; 477 478 if (flags & LOOKUP_XATTR) { 479 /* 480 * We don't allow recursive attributes.. 481 * Maybe someday we will. 482 */ 483 if (zdp->z_pflags & ZFS_XATTR) { 484 ZFS_EXIT(zfsvfs); 485 return (SET_ERROR(EINVAL)); 486 } 487 488 if ((error = zfs_get_xattrdir(zdp, zpp, cr, flags))) { 489 ZFS_EXIT(zfsvfs); 490 return (error); 491 } 492 493 /* 494 * Do we have permission to get into attribute directory? 495 */ 496 497 if ((error = zfs_zaccess(*zpp, ACE_EXECUTE, 0, 498 B_FALSE, cr))) { 499 zrele(*zpp); 500 *zpp = NULL; 501 } 502 503 ZFS_EXIT(zfsvfs); 504 return (error); 505 } 506 507 if (!S_ISDIR(ZTOI(zdp)->i_mode)) { 508 ZFS_EXIT(zfsvfs); 509 return (SET_ERROR(ENOTDIR)); 510 } 511 512 /* 513 * Check accessibility of directory. 514 */ 515 516 if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) { 517 ZFS_EXIT(zfsvfs); 518 return (error); 519 } 520 521 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm), 522 NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 523 ZFS_EXIT(zfsvfs); 524 return (SET_ERROR(EILSEQ)); 525 } 526 527 error = zfs_dirlook(zdp, nm, zpp, flags, direntflags, realpnp); 528 if ((error == 0) && (*zpp)) 529 zfs_znode_update_vfs(*zpp); 530 531 ZFS_EXIT(zfsvfs); 532 return (error); 533 } 534 535 /* 536 * Attempt to create a new entry in a directory. If the entry 537 * already exists, truncate the file if permissible, else return 538 * an error. Return the ip of the created or trunc'd file. 539 * 540 * IN: dzp - znode of directory to put new file entry in. 541 * name - name of new file entry. 542 * vap - attributes of new file. 543 * excl - flag indicating exclusive or non-exclusive mode. 544 * mode - mode to open file with. 545 * cr - credentials of caller. 546 * flag - file flag. 547 * vsecp - ACL to be set 548 * 549 * OUT: zpp - znode of created or trunc'd entry. 550 * 551 * RETURN: 0 on success, error code on failure. 552 * 553 * Timestamps: 554 * dzp - ctime|mtime updated if new entry created 555 * zp - ctime|mtime always, atime if new 556 */ 557 558 /* ARGSUSED */ 559 int 560 zfs_create(znode_t *dzp, char *name, vattr_t *vap, int excl, 561 int mode, znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp) 562 { 563 znode_t *zp; 564 zfsvfs_t *zfsvfs = ZTOZSB(dzp); 565 zilog_t *zilog; 566 objset_t *os; 567 zfs_dirlock_t *dl; 568 dmu_tx_t *tx; 569 int error; 570 uid_t uid; 571 gid_t gid; 572 zfs_acl_ids_t acl_ids; 573 boolean_t fuid_dirtied; 574 boolean_t have_acl = B_FALSE; 575 boolean_t waited = B_FALSE; 576 577 /* 578 * If we have an ephemeral id, ACL, or XVATTR then 579 * make sure file system is at proper version 580 */ 581 582 gid = crgetgid(cr); 583 uid = crgetuid(cr); 584 585 if (zfsvfs->z_use_fuids == B_FALSE && 586 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) 587 return (SET_ERROR(EINVAL)); 588 589 if (name == NULL) 590 return (SET_ERROR(EINVAL)); 591 592 ZFS_ENTER(zfsvfs); 593 ZFS_VERIFY_ZP(dzp); 594 os = zfsvfs->z_os; 595 zilog = zfsvfs->z_log; 596 597 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), 598 NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 599 ZFS_EXIT(zfsvfs); 600 return (SET_ERROR(EILSEQ)); 601 } 602 603 if (vap->va_mask & ATTR_XVATTR) { 604 if ((error = secpolicy_xvattr((xvattr_t *)vap, 605 crgetuid(cr), cr, vap->va_mode)) != 0) { 606 ZFS_EXIT(zfsvfs); 607 return (error); 608 } 609 } 610 611 top: 612 *zpp = NULL; 613 if (*name == '\0') { 614 /* 615 * Null component name refers to the directory itself. 616 */ 617 zhold(dzp); 618 zp = dzp; 619 dl = NULL; 620 error = 0; 621 } else { 622 /* possible igrab(zp) */ 623 int zflg = 0; 624 625 if (flag & FIGNORECASE) 626 zflg |= ZCILOOK; 627 628 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, 629 NULL, NULL); 630 if (error) { 631 if (have_acl) 632 zfs_acl_ids_free(&acl_ids); 633 if (strcmp(name, "..") == 0) 634 error = SET_ERROR(EISDIR); 635 ZFS_EXIT(zfsvfs); 636 return (error); 637 } 638 } 639 640 if (zp == NULL) { 641 uint64_t txtype; 642 uint64_t projid = ZFS_DEFAULT_PROJID; 643 644 /* 645 * Create a new file object and update the directory 646 * to reference it. 647 */ 648 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) { 649 if (have_acl) 650 zfs_acl_ids_free(&acl_ids); 651 goto out; 652 } 653 654 /* 655 * We only support the creation of regular files in 656 * extended attribute directories. 657 */ 658 659 if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) { 660 if (have_acl) 661 zfs_acl_ids_free(&acl_ids); 662 error = SET_ERROR(EINVAL); 663 goto out; 664 } 665 666 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap, 667 cr, vsecp, &acl_ids)) != 0) 668 goto out; 669 have_acl = B_TRUE; 670 671 if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode)) 672 projid = zfs_inherit_projid(dzp); 673 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) { 674 zfs_acl_ids_free(&acl_ids); 675 error = SET_ERROR(EDQUOT); 676 goto out; 677 } 678 679 tx = dmu_tx_create(os); 680 681 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + 682 ZFS_SA_BASE_ATTR_SIZE); 683 684 fuid_dirtied = zfsvfs->z_fuid_dirty; 685 if (fuid_dirtied) 686 zfs_fuid_txhold(zfsvfs, tx); 687 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); 688 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); 689 if (!zfsvfs->z_use_sa && 690 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { 691 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 692 0, acl_ids.z_aclp->z_acl_bytes); 693 } 694 695 error = dmu_tx_assign(tx, 696 (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); 697 if (error) { 698 zfs_dirent_unlock(dl); 699 if (error == ERESTART) { 700 waited = B_TRUE; 701 dmu_tx_wait(tx); 702 dmu_tx_abort(tx); 703 goto top; 704 } 705 zfs_acl_ids_free(&acl_ids); 706 dmu_tx_abort(tx); 707 ZFS_EXIT(zfsvfs); 708 return (error); 709 } 710 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); 711 712 error = zfs_link_create(dl, zp, tx, ZNEW); 713 if (error != 0) { 714 /* 715 * Since, we failed to add the directory entry for it, 716 * delete the newly created dnode. 717 */ 718 zfs_znode_delete(zp, tx); 719 remove_inode_hash(ZTOI(zp)); 720 zfs_acl_ids_free(&acl_ids); 721 dmu_tx_commit(tx); 722 goto out; 723 } 724 725 if (fuid_dirtied) 726 zfs_fuid_sync(zfsvfs, tx); 727 728 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap); 729 if (flag & FIGNORECASE) 730 txtype |= TX_CI; 731 zfs_log_create(zilog, tx, txtype, dzp, zp, name, 732 vsecp, acl_ids.z_fuidp, vap); 733 zfs_acl_ids_free(&acl_ids); 734 dmu_tx_commit(tx); 735 } else { 736 int aflags = (flag & O_APPEND) ? V_APPEND : 0; 737 738 if (have_acl) 739 zfs_acl_ids_free(&acl_ids); 740 have_acl = B_FALSE; 741 742 /* 743 * A directory entry already exists for this name. 744 */ 745 /* 746 * Can't truncate an existing file if in exclusive mode. 747 */ 748 if (excl) { 749 error = SET_ERROR(EEXIST); 750 goto out; 751 } 752 /* 753 * Can't open a directory for writing. 754 */ 755 if (S_ISDIR(ZTOI(zp)->i_mode)) { 756 error = SET_ERROR(EISDIR); 757 goto out; 758 } 759 /* 760 * Verify requested access to file. 761 */ 762 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) { 763 goto out; 764 } 765 766 mutex_enter(&dzp->z_lock); 767 dzp->z_seq++; 768 mutex_exit(&dzp->z_lock); 769 770 /* 771 * Truncate regular files if requested. 772 */ 773 if (S_ISREG(ZTOI(zp)->i_mode) && 774 (vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) { 775 /* we can't hold any locks when calling zfs_freesp() */ 776 if (dl) { 777 zfs_dirent_unlock(dl); 778 dl = NULL; 779 } 780 error = zfs_freesp(zp, 0, 0, mode, TRUE); 781 } 782 } 783 out: 784 785 if (dl) 786 zfs_dirent_unlock(dl); 787 788 if (error) { 789 if (zp) 790 zrele(zp); 791 } else { 792 zfs_znode_update_vfs(dzp); 793 zfs_znode_update_vfs(zp); 794 *zpp = zp; 795 } 796 797 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 798 zil_commit(zilog, 0); 799 800 ZFS_EXIT(zfsvfs); 801 return (error); 802 } 803 804 /* ARGSUSED */ 805 int 806 zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl, 807 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp) 808 { 809 znode_t *zp = NULL, *dzp = ITOZ(dip); 810 zfsvfs_t *zfsvfs = ITOZSB(dip); 811 objset_t *os; 812 dmu_tx_t *tx; 813 int error; 814 uid_t uid; 815 gid_t gid; 816 zfs_acl_ids_t acl_ids; 817 uint64_t projid = ZFS_DEFAULT_PROJID; 818 boolean_t fuid_dirtied; 819 boolean_t have_acl = B_FALSE; 820 boolean_t waited = B_FALSE; 821 822 /* 823 * If we have an ephemeral id, ACL, or XVATTR then 824 * make sure file system is at proper version 825 */ 826 827 gid = crgetgid(cr); 828 uid = crgetuid(cr); 829 830 if (zfsvfs->z_use_fuids == B_FALSE && 831 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) 832 return (SET_ERROR(EINVAL)); 833 834 ZFS_ENTER(zfsvfs); 835 ZFS_VERIFY_ZP(dzp); 836 os = zfsvfs->z_os; 837 838 if (vap->va_mask & ATTR_XVATTR) { 839 if ((error = secpolicy_xvattr((xvattr_t *)vap, 840 crgetuid(cr), cr, vap->va_mode)) != 0) { 841 ZFS_EXIT(zfsvfs); 842 return (error); 843 } 844 } 845 846 top: 847 *ipp = NULL; 848 849 /* 850 * Create a new file object and update the directory 851 * to reference it. 852 */ 853 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) { 854 if (have_acl) 855 zfs_acl_ids_free(&acl_ids); 856 goto out; 857 } 858 859 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap, 860 cr, vsecp, &acl_ids)) != 0) 861 goto out; 862 have_acl = B_TRUE; 863 864 if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode)) 865 projid = zfs_inherit_projid(dzp); 866 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) { 867 zfs_acl_ids_free(&acl_ids); 868 error = SET_ERROR(EDQUOT); 869 goto out; 870 } 871 872 tx = dmu_tx_create(os); 873 874 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + 875 ZFS_SA_BASE_ATTR_SIZE); 876 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 877 878 fuid_dirtied = zfsvfs->z_fuid_dirty; 879 if (fuid_dirtied) 880 zfs_fuid_txhold(zfsvfs, tx); 881 if (!zfsvfs->z_use_sa && 882 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { 883 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 884 0, acl_ids.z_aclp->z_acl_bytes); 885 } 886 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); 887 if (error) { 888 if (error == ERESTART) { 889 waited = B_TRUE; 890 dmu_tx_wait(tx); 891 dmu_tx_abort(tx); 892 goto top; 893 } 894 zfs_acl_ids_free(&acl_ids); 895 dmu_tx_abort(tx); 896 ZFS_EXIT(zfsvfs); 897 return (error); 898 } 899 zfs_mknode(dzp, vap, tx, cr, IS_TMPFILE, &zp, &acl_ids); 900 901 if (fuid_dirtied) 902 zfs_fuid_sync(zfsvfs, tx); 903 904 /* Add to unlinked set */ 905 zp->z_unlinked = B_TRUE; 906 zfs_unlinked_add(zp, tx); 907 zfs_acl_ids_free(&acl_ids); 908 dmu_tx_commit(tx); 909 out: 910 911 if (error) { 912 if (zp) 913 zrele(zp); 914 } else { 915 zfs_znode_update_vfs(dzp); 916 zfs_znode_update_vfs(zp); 917 *ipp = ZTOI(zp); 918 } 919 920 ZFS_EXIT(zfsvfs); 921 return (error); 922 } 923 924 /* 925 * Remove an entry from a directory. 926 * 927 * IN: dzp - znode of directory to remove entry from. 928 * name - name of entry to remove. 929 * cr - credentials of caller. 930 * flags - case flags. 931 * 932 * RETURN: 0 if success 933 * error code if failure 934 * 935 * Timestamps: 936 * dzp - ctime|mtime 937 * ip - ctime (if nlink > 0) 938 */ 939 940 uint64_t null_xattr = 0; 941 942 /*ARGSUSED*/ 943 int 944 zfs_remove(znode_t *dzp, char *name, cred_t *cr, int flags) 945 { 946 znode_t *zp; 947 znode_t *xzp; 948 zfsvfs_t *zfsvfs = ZTOZSB(dzp); 949 zilog_t *zilog; 950 uint64_t acl_obj, xattr_obj; 951 uint64_t xattr_obj_unlinked = 0; 952 uint64_t obj = 0; 953 uint64_t links; 954 zfs_dirlock_t *dl; 955 dmu_tx_t *tx; 956 boolean_t may_delete_now, delete_now = FALSE; 957 boolean_t unlinked, toobig = FALSE; 958 uint64_t txtype; 959 pathname_t *realnmp = NULL; 960 pathname_t realnm; 961 int error; 962 int zflg = ZEXISTS; 963 boolean_t waited = B_FALSE; 964 965 if (name == NULL) 966 return (SET_ERROR(EINVAL)); 967 968 ZFS_ENTER(zfsvfs); 969 ZFS_VERIFY_ZP(dzp); 970 zilog = zfsvfs->z_log; 971 972 if (flags & FIGNORECASE) { 973 zflg |= ZCILOOK; 974 pn_alloc(&realnm); 975 realnmp = &realnm; 976 } 977 978 top: 979 xattr_obj = 0; 980 xzp = NULL; 981 /* 982 * Attempt to lock directory; fail if entry doesn't exist. 983 */ 984 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, 985 NULL, realnmp))) { 986 if (realnmp) 987 pn_free(realnmp); 988 ZFS_EXIT(zfsvfs); 989 return (error); 990 } 991 992 if ((error = zfs_zaccess_delete(dzp, zp, cr))) { 993 goto out; 994 } 995 996 /* 997 * Need to use rmdir for removing directories. 998 */ 999 if (S_ISDIR(ZTOI(zp)->i_mode)) { 1000 error = SET_ERROR(EPERM); 1001 goto out; 1002 } 1003 1004 mutex_enter(&zp->z_lock); 1005 may_delete_now = atomic_read(&ZTOI(zp)->i_count) == 1 && 1006 !(zp->z_is_mapped); 1007 mutex_exit(&zp->z_lock); 1008 1009 /* 1010 * We may delete the znode now, or we may put it in the unlinked set; 1011 * it depends on whether we're the last link, and on whether there are 1012 * other holds on the inode. So we dmu_tx_hold() the right things to 1013 * allow for either case. 1014 */ 1015 obj = zp->z_id; 1016 tx = dmu_tx_create(zfsvfs->z_os); 1017 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); 1018 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 1019 zfs_sa_upgrade_txholds(tx, zp); 1020 zfs_sa_upgrade_txholds(tx, dzp); 1021 if (may_delete_now) { 1022 toobig = zp->z_size > zp->z_blksz * zfs_delete_blocks; 1023 /* if the file is too big, only hold_free a token amount */ 1024 dmu_tx_hold_free(tx, zp->z_id, 0, 1025 (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END)); 1026 } 1027 1028 /* are there any extended attributes? */ 1029 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), 1030 &xattr_obj, sizeof (xattr_obj)); 1031 if (error == 0 && xattr_obj) { 1032 error = zfs_zget(zfsvfs, xattr_obj, &xzp); 1033 ASSERT0(error); 1034 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); 1035 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); 1036 } 1037 1038 mutex_enter(&zp->z_lock); 1039 if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now) 1040 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); 1041 mutex_exit(&zp->z_lock); 1042 1043 /* charge as an update -- would be nice not to charge at all */ 1044 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 1045 1046 /* 1047 * Mark this transaction as typically resulting in a net free of space 1048 */ 1049 dmu_tx_mark_netfree(tx); 1050 1051 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); 1052 if (error) { 1053 zfs_dirent_unlock(dl); 1054 if (error == ERESTART) { 1055 waited = B_TRUE; 1056 dmu_tx_wait(tx); 1057 dmu_tx_abort(tx); 1058 zrele(zp); 1059 if (xzp) 1060 zrele(xzp); 1061 goto top; 1062 } 1063 if (realnmp) 1064 pn_free(realnmp); 1065 dmu_tx_abort(tx); 1066 zrele(zp); 1067 if (xzp) 1068 zrele(xzp); 1069 ZFS_EXIT(zfsvfs); 1070 return (error); 1071 } 1072 1073 /* 1074 * Remove the directory entry. 1075 */ 1076 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked); 1077 1078 if (error) { 1079 dmu_tx_commit(tx); 1080 goto out; 1081 } 1082 1083 if (unlinked) { 1084 /* 1085 * Hold z_lock so that we can make sure that the ACL obj 1086 * hasn't changed. Could have been deleted due to 1087 * zfs_sa_upgrade(). 1088 */ 1089 mutex_enter(&zp->z_lock); 1090 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), 1091 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked)); 1092 delete_now = may_delete_now && !toobig && 1093 atomic_read(&ZTOI(zp)->i_count) == 1 && 1094 !(zp->z_is_mapped) && xattr_obj == xattr_obj_unlinked && 1095 zfs_external_acl(zp) == acl_obj; 1096 } 1097 1098 if (delete_now) { 1099 if (xattr_obj_unlinked) { 1100 ASSERT3U(ZTOI(xzp)->i_nlink, ==, 2); 1101 mutex_enter(&xzp->z_lock); 1102 xzp->z_unlinked = B_TRUE; 1103 clear_nlink(ZTOI(xzp)); 1104 links = 0; 1105 error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs), 1106 &links, sizeof (links), tx); 1107 ASSERT3U(error, ==, 0); 1108 mutex_exit(&xzp->z_lock); 1109 zfs_unlinked_add(xzp, tx); 1110 1111 if (zp->z_is_sa) 1112 error = sa_remove(zp->z_sa_hdl, 1113 SA_ZPL_XATTR(zfsvfs), tx); 1114 else 1115 error = sa_update(zp->z_sa_hdl, 1116 SA_ZPL_XATTR(zfsvfs), &null_xattr, 1117 sizeof (uint64_t), tx); 1118 ASSERT0(error); 1119 } 1120 /* 1121 * Add to the unlinked set because a new reference could be 1122 * taken concurrently resulting in a deferred destruction. 1123 */ 1124 zfs_unlinked_add(zp, tx); 1125 mutex_exit(&zp->z_lock); 1126 } else if (unlinked) { 1127 mutex_exit(&zp->z_lock); 1128 zfs_unlinked_add(zp, tx); 1129 } 1130 1131 txtype = TX_REMOVE; 1132 if (flags & FIGNORECASE) 1133 txtype |= TX_CI; 1134 zfs_log_remove(zilog, tx, txtype, dzp, name, obj, unlinked); 1135 1136 dmu_tx_commit(tx); 1137 out: 1138 if (realnmp) 1139 pn_free(realnmp); 1140 1141 zfs_dirent_unlock(dl); 1142 zfs_znode_update_vfs(dzp); 1143 zfs_znode_update_vfs(zp); 1144 1145 if (delete_now) 1146 zrele(zp); 1147 else 1148 zfs_zrele_async(zp); 1149 1150 if (xzp) { 1151 zfs_znode_update_vfs(xzp); 1152 zfs_zrele_async(xzp); 1153 } 1154 1155 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 1156 zil_commit(zilog, 0); 1157 1158 ZFS_EXIT(zfsvfs); 1159 return (error); 1160 } 1161 1162 /* 1163 * Create a new directory and insert it into dzp using the name 1164 * provided. Return a pointer to the inserted directory. 1165 * 1166 * IN: dzp - znode of directory to add subdir to. 1167 * dirname - name of new directory. 1168 * vap - attributes of new directory. 1169 * cr - credentials of caller. 1170 * flags - case flags. 1171 * vsecp - ACL to be set 1172 * 1173 * OUT: zpp - znode of created directory. 1174 * 1175 * RETURN: 0 if success 1176 * error code if failure 1177 * 1178 * Timestamps: 1179 * dzp - ctime|mtime updated 1180 * zpp - ctime|mtime|atime updated 1181 */ 1182 /*ARGSUSED*/ 1183 int 1184 zfs_mkdir(znode_t *dzp, char *dirname, vattr_t *vap, znode_t **zpp, 1185 cred_t *cr, int flags, vsecattr_t *vsecp) 1186 { 1187 znode_t *zp; 1188 zfsvfs_t *zfsvfs = ZTOZSB(dzp); 1189 zilog_t *zilog; 1190 zfs_dirlock_t *dl; 1191 uint64_t txtype; 1192 dmu_tx_t *tx; 1193 int error; 1194 int zf = ZNEW; 1195 uid_t uid; 1196 gid_t gid = crgetgid(cr); 1197 zfs_acl_ids_t acl_ids; 1198 boolean_t fuid_dirtied; 1199 boolean_t waited = B_FALSE; 1200 1201 ASSERT(S_ISDIR(vap->va_mode)); 1202 1203 /* 1204 * If we have an ephemeral id, ACL, or XVATTR then 1205 * make sure file system is at proper version 1206 */ 1207 1208 uid = crgetuid(cr); 1209 if (zfsvfs->z_use_fuids == B_FALSE && 1210 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) 1211 return (SET_ERROR(EINVAL)); 1212 1213 if (dirname == NULL) 1214 return (SET_ERROR(EINVAL)); 1215 1216 ZFS_ENTER(zfsvfs); 1217 ZFS_VERIFY_ZP(dzp); 1218 zilog = zfsvfs->z_log; 1219 1220 if (dzp->z_pflags & ZFS_XATTR) { 1221 ZFS_EXIT(zfsvfs); 1222 return (SET_ERROR(EINVAL)); 1223 } 1224 1225 if (zfsvfs->z_utf8 && u8_validate(dirname, 1226 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 1227 ZFS_EXIT(zfsvfs); 1228 return (SET_ERROR(EILSEQ)); 1229 } 1230 if (flags & FIGNORECASE) 1231 zf |= ZCILOOK; 1232 1233 if (vap->va_mask & ATTR_XVATTR) { 1234 if ((error = secpolicy_xvattr((xvattr_t *)vap, 1235 crgetuid(cr), cr, vap->va_mode)) != 0) { 1236 ZFS_EXIT(zfsvfs); 1237 return (error); 1238 } 1239 } 1240 1241 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr, 1242 vsecp, &acl_ids)) != 0) { 1243 ZFS_EXIT(zfsvfs); 1244 return (error); 1245 } 1246 /* 1247 * First make sure the new directory doesn't exist. 1248 * 1249 * Existence is checked first to make sure we don't return 1250 * EACCES instead of EEXIST which can cause some applications 1251 * to fail. 1252 */ 1253 top: 1254 *zpp = NULL; 1255 1256 if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf, 1257 NULL, NULL))) { 1258 zfs_acl_ids_free(&acl_ids); 1259 ZFS_EXIT(zfsvfs); 1260 return (error); 1261 } 1262 1263 if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) { 1264 zfs_acl_ids_free(&acl_ids); 1265 zfs_dirent_unlock(dl); 1266 ZFS_EXIT(zfsvfs); 1267 return (error); 1268 } 1269 1270 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) { 1271 zfs_acl_ids_free(&acl_ids); 1272 zfs_dirent_unlock(dl); 1273 ZFS_EXIT(zfsvfs); 1274 return (SET_ERROR(EDQUOT)); 1275 } 1276 1277 /* 1278 * Add a new entry to the directory. 1279 */ 1280 tx = dmu_tx_create(zfsvfs->z_os); 1281 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname); 1282 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); 1283 fuid_dirtied = zfsvfs->z_fuid_dirty; 1284 if (fuid_dirtied) 1285 zfs_fuid_txhold(zfsvfs, tx); 1286 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { 1287 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 1288 acl_ids.z_aclp->z_acl_bytes); 1289 } 1290 1291 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + 1292 ZFS_SA_BASE_ATTR_SIZE); 1293 1294 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); 1295 if (error) { 1296 zfs_dirent_unlock(dl); 1297 if (error == ERESTART) { 1298 waited = B_TRUE; 1299 dmu_tx_wait(tx); 1300 dmu_tx_abort(tx); 1301 goto top; 1302 } 1303 zfs_acl_ids_free(&acl_ids); 1304 dmu_tx_abort(tx); 1305 ZFS_EXIT(zfsvfs); 1306 return (error); 1307 } 1308 1309 /* 1310 * Create new node. 1311 */ 1312 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); 1313 1314 /* 1315 * Now put new name in parent dir. 1316 */ 1317 error = zfs_link_create(dl, zp, tx, ZNEW); 1318 if (error != 0) { 1319 zfs_znode_delete(zp, tx); 1320 remove_inode_hash(ZTOI(zp)); 1321 goto out; 1322 } 1323 1324 if (fuid_dirtied) 1325 zfs_fuid_sync(zfsvfs, tx); 1326 1327 *zpp = zp; 1328 1329 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap); 1330 if (flags & FIGNORECASE) 1331 txtype |= TX_CI; 1332 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp, 1333 acl_ids.z_fuidp, vap); 1334 1335 out: 1336 zfs_acl_ids_free(&acl_ids); 1337 1338 dmu_tx_commit(tx); 1339 1340 zfs_dirent_unlock(dl); 1341 1342 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 1343 zil_commit(zilog, 0); 1344 1345 if (error != 0) { 1346 zrele(zp); 1347 } else { 1348 zfs_znode_update_vfs(dzp); 1349 zfs_znode_update_vfs(zp); 1350 } 1351 ZFS_EXIT(zfsvfs); 1352 return (error); 1353 } 1354 1355 /* 1356 * Remove a directory subdir entry. If the current working 1357 * directory is the same as the subdir to be removed, the 1358 * remove will fail. 1359 * 1360 * IN: dzp - znode of directory to remove from. 1361 * name - name of directory to be removed. 1362 * cwd - inode of current working directory. 1363 * cr - credentials of caller. 1364 * flags - case flags 1365 * 1366 * RETURN: 0 on success, error code on failure. 1367 * 1368 * Timestamps: 1369 * dzp - ctime|mtime updated 1370 */ 1371 /*ARGSUSED*/ 1372 int 1373 zfs_rmdir(znode_t *dzp, char *name, znode_t *cwd, cred_t *cr, 1374 int flags) 1375 { 1376 znode_t *zp; 1377 zfsvfs_t *zfsvfs = ZTOZSB(dzp); 1378 zilog_t *zilog; 1379 zfs_dirlock_t *dl; 1380 dmu_tx_t *tx; 1381 int error; 1382 int zflg = ZEXISTS; 1383 boolean_t waited = B_FALSE; 1384 1385 if (name == NULL) 1386 return (SET_ERROR(EINVAL)); 1387 1388 ZFS_ENTER(zfsvfs); 1389 ZFS_VERIFY_ZP(dzp); 1390 zilog = zfsvfs->z_log; 1391 1392 if (flags & FIGNORECASE) 1393 zflg |= ZCILOOK; 1394 top: 1395 zp = NULL; 1396 1397 /* 1398 * Attempt to lock directory; fail if entry doesn't exist. 1399 */ 1400 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, 1401 NULL, NULL))) { 1402 ZFS_EXIT(zfsvfs); 1403 return (error); 1404 } 1405 1406 if ((error = zfs_zaccess_delete(dzp, zp, cr))) { 1407 goto out; 1408 } 1409 1410 if (!S_ISDIR(ZTOI(zp)->i_mode)) { 1411 error = SET_ERROR(ENOTDIR); 1412 goto out; 1413 } 1414 1415 if (zp == cwd) { 1416 error = SET_ERROR(EINVAL); 1417 goto out; 1418 } 1419 1420 /* 1421 * Grab a lock on the directory to make sure that no one is 1422 * trying to add (or lookup) entries while we are removing it. 1423 */ 1424 rw_enter(&zp->z_name_lock, RW_WRITER); 1425 1426 /* 1427 * Grab a lock on the parent pointer to make sure we play well 1428 * with the treewalk and directory rename code. 1429 */ 1430 rw_enter(&zp->z_parent_lock, RW_WRITER); 1431 1432 tx = dmu_tx_create(zfsvfs->z_os); 1433 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); 1434 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 1435 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 1436 zfs_sa_upgrade_txholds(tx, zp); 1437 zfs_sa_upgrade_txholds(tx, dzp); 1438 dmu_tx_mark_netfree(tx); 1439 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); 1440 if (error) { 1441 rw_exit(&zp->z_parent_lock); 1442 rw_exit(&zp->z_name_lock); 1443 zfs_dirent_unlock(dl); 1444 if (error == ERESTART) { 1445 waited = B_TRUE; 1446 dmu_tx_wait(tx); 1447 dmu_tx_abort(tx); 1448 zrele(zp); 1449 goto top; 1450 } 1451 dmu_tx_abort(tx); 1452 zrele(zp); 1453 ZFS_EXIT(zfsvfs); 1454 return (error); 1455 } 1456 1457 error = zfs_link_destroy(dl, zp, tx, zflg, NULL); 1458 1459 if (error == 0) { 1460 uint64_t txtype = TX_RMDIR; 1461 if (flags & FIGNORECASE) 1462 txtype |= TX_CI; 1463 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT, 1464 B_FALSE); 1465 } 1466 1467 dmu_tx_commit(tx); 1468 1469 rw_exit(&zp->z_parent_lock); 1470 rw_exit(&zp->z_name_lock); 1471 out: 1472 zfs_dirent_unlock(dl); 1473 1474 zfs_znode_update_vfs(dzp); 1475 zfs_znode_update_vfs(zp); 1476 zrele(zp); 1477 1478 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 1479 zil_commit(zilog, 0); 1480 1481 ZFS_EXIT(zfsvfs); 1482 return (error); 1483 } 1484 1485 /* 1486 * Read directory entries from the given directory cursor position and emit 1487 * name and position for each entry. 1488 * 1489 * IN: ip - inode of directory to read. 1490 * ctx - directory entry context. 1491 * cr - credentials of caller. 1492 * 1493 * RETURN: 0 if success 1494 * error code if failure 1495 * 1496 * Timestamps: 1497 * ip - atime updated 1498 * 1499 * Note that the low 4 bits of the cookie returned by zap is always zero. 1500 * This allows us to use the low range for "special" directory entries: 1501 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem, 1502 * we use the offset 2 for the '.zfs' directory. 1503 */ 1504 /* ARGSUSED */ 1505 int 1506 zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr) 1507 { 1508 znode_t *zp = ITOZ(ip); 1509 zfsvfs_t *zfsvfs = ITOZSB(ip); 1510 objset_t *os; 1511 zap_cursor_t zc; 1512 zap_attribute_t zap; 1513 int error; 1514 uint8_t prefetch; 1515 uint8_t type; 1516 int done = 0; 1517 uint64_t parent; 1518 uint64_t offset; /* must be unsigned; checks for < 1 */ 1519 1520 ZFS_ENTER(zfsvfs); 1521 ZFS_VERIFY_ZP(zp); 1522 1523 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), 1524 &parent, sizeof (parent))) != 0) 1525 goto out; 1526 1527 /* 1528 * Quit if directory has been removed (posix) 1529 */ 1530 if (zp->z_unlinked) 1531 goto out; 1532 1533 error = 0; 1534 os = zfsvfs->z_os; 1535 offset = ctx->pos; 1536 prefetch = zp->z_zn_prefetch; 1537 1538 /* 1539 * Initialize the iterator cursor. 1540 */ 1541 if (offset <= 3) { 1542 /* 1543 * Start iteration from the beginning of the directory. 1544 */ 1545 zap_cursor_init(&zc, os, zp->z_id); 1546 } else { 1547 /* 1548 * The offset is a serialized cursor. 1549 */ 1550 zap_cursor_init_serialized(&zc, os, zp->z_id, offset); 1551 } 1552 1553 /* 1554 * Transform to file-system independent format 1555 */ 1556 while (!done) { 1557 uint64_t objnum; 1558 /* 1559 * Special case `.', `..', and `.zfs'. 1560 */ 1561 if (offset == 0) { 1562 (void) strcpy(zap.za_name, "."); 1563 zap.za_normalization_conflict = 0; 1564 objnum = zp->z_id; 1565 type = DT_DIR; 1566 } else if (offset == 1) { 1567 (void) strcpy(zap.za_name, ".."); 1568 zap.za_normalization_conflict = 0; 1569 objnum = parent; 1570 type = DT_DIR; 1571 } else if (offset == 2 && zfs_show_ctldir(zp)) { 1572 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME); 1573 zap.za_normalization_conflict = 0; 1574 objnum = ZFSCTL_INO_ROOT; 1575 type = DT_DIR; 1576 } else { 1577 /* 1578 * Grab next entry. 1579 */ 1580 if ((error = zap_cursor_retrieve(&zc, &zap))) { 1581 if (error == ENOENT) 1582 break; 1583 else 1584 goto update; 1585 } 1586 1587 /* 1588 * Allow multiple entries provided the first entry is 1589 * the object id. Non-zpl consumers may safely make 1590 * use of the additional space. 1591 * 1592 * XXX: This should be a feature flag for compatibility 1593 */ 1594 if (zap.za_integer_length != 8 || 1595 zap.za_num_integers == 0) { 1596 cmn_err(CE_WARN, "zap_readdir: bad directory " 1597 "entry, obj = %lld, offset = %lld, " 1598 "length = %d, num = %lld\n", 1599 (u_longlong_t)zp->z_id, 1600 (u_longlong_t)offset, 1601 zap.za_integer_length, 1602 (u_longlong_t)zap.za_num_integers); 1603 error = SET_ERROR(ENXIO); 1604 goto update; 1605 } 1606 1607 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer); 1608 type = ZFS_DIRENT_TYPE(zap.za_first_integer); 1609 } 1610 1611 done = !zpl_dir_emit(ctx, zap.za_name, strlen(zap.za_name), 1612 objnum, type); 1613 if (done) 1614 break; 1615 1616 /* Prefetch znode */ 1617 if (prefetch) { 1618 dmu_prefetch(os, objnum, 0, 0, 0, 1619 ZIO_PRIORITY_SYNC_READ); 1620 } 1621 1622 /* 1623 * Move to the next entry, fill in the previous offset. 1624 */ 1625 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) { 1626 zap_cursor_advance(&zc); 1627 offset = zap_cursor_serialize(&zc); 1628 } else { 1629 offset += 1; 1630 } 1631 ctx->pos = offset; 1632 } 1633 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */ 1634 1635 update: 1636 zap_cursor_fini(&zc); 1637 if (error == ENOENT) 1638 error = 0; 1639 out: 1640 ZFS_EXIT(zfsvfs); 1641 1642 return (error); 1643 } 1644 1645 /* 1646 * Get the basic file attributes and place them in the provided kstat 1647 * structure. The inode is assumed to be the authoritative source 1648 * for most of the attributes. However, the znode currently has the 1649 * authoritative atime, blksize, and block count. 1650 * 1651 * IN: ip - inode of file. 1652 * 1653 * OUT: sp - kstat values. 1654 * 1655 * RETURN: 0 (always succeeds) 1656 */ 1657 /* ARGSUSED */ 1658 int 1659 zfs_getattr_fast(struct inode *ip, struct kstat *sp) 1660 { 1661 znode_t *zp = ITOZ(ip); 1662 zfsvfs_t *zfsvfs = ITOZSB(ip); 1663 uint32_t blksize; 1664 u_longlong_t nblocks; 1665 1666 ZFS_ENTER(zfsvfs); 1667 ZFS_VERIFY_ZP(zp); 1668 1669 mutex_enter(&zp->z_lock); 1670 1671 generic_fillattr(ip, sp); 1672 /* 1673 * +1 link count for root inode with visible '.zfs' directory. 1674 */ 1675 if ((zp->z_id == zfsvfs->z_root) && zfs_show_ctldir(zp)) 1676 if (sp->nlink < ZFS_LINK_MAX) 1677 sp->nlink++; 1678 1679 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks); 1680 sp->blksize = blksize; 1681 sp->blocks = nblocks; 1682 1683 if (unlikely(zp->z_blksz == 0)) { 1684 /* 1685 * Block size hasn't been set; suggest maximal I/O transfers. 1686 */ 1687 sp->blksize = zfsvfs->z_max_blksz; 1688 } 1689 1690 mutex_exit(&zp->z_lock); 1691 1692 /* 1693 * Required to prevent NFS client from detecting different inode 1694 * numbers of snapshot root dentry before and after snapshot mount. 1695 */ 1696 if (zfsvfs->z_issnap) { 1697 if (ip->i_sb->s_root->d_inode == ip) 1698 sp->ino = ZFSCTL_INO_SNAPDIRS - 1699 dmu_objset_id(zfsvfs->z_os); 1700 } 1701 1702 ZFS_EXIT(zfsvfs); 1703 1704 return (0); 1705 } 1706 1707 /* 1708 * For the operation of changing file's user/group/project, we need to 1709 * handle not only the main object that is assigned to the file directly, 1710 * but also the ones that are used by the file via hidden xattr directory. 1711 * 1712 * Because the xattr directory may contains many EA entries, as to it may 1713 * be impossible to change all of them via the transaction of changing the 1714 * main object's user/group/project attributes. Then we have to change them 1715 * via other multiple independent transactions one by one. It may be not good 1716 * solution, but we have no better idea yet. 1717 */ 1718 static int 1719 zfs_setattr_dir(znode_t *dzp) 1720 { 1721 struct inode *dxip = ZTOI(dzp); 1722 struct inode *xip = NULL; 1723 zfsvfs_t *zfsvfs = ZTOZSB(dzp); 1724 objset_t *os = zfsvfs->z_os; 1725 zap_cursor_t zc; 1726 zap_attribute_t zap; 1727 zfs_dirlock_t *dl; 1728 znode_t *zp = NULL; 1729 dmu_tx_t *tx = NULL; 1730 uint64_t uid, gid; 1731 sa_bulk_attr_t bulk[4]; 1732 int count; 1733 int err; 1734 1735 zap_cursor_init(&zc, os, dzp->z_id); 1736 while ((err = zap_cursor_retrieve(&zc, &zap)) == 0) { 1737 count = 0; 1738 if (zap.za_integer_length != 8 || zap.za_num_integers != 1) { 1739 err = ENXIO; 1740 break; 1741 } 1742 1743 err = zfs_dirent_lock(&dl, dzp, (char *)zap.za_name, &zp, 1744 ZEXISTS, NULL, NULL); 1745 if (err == ENOENT) 1746 goto next; 1747 if (err) 1748 break; 1749 1750 xip = ZTOI(zp); 1751 if (KUID_TO_SUID(xip->i_uid) == KUID_TO_SUID(dxip->i_uid) && 1752 KGID_TO_SGID(xip->i_gid) == KGID_TO_SGID(dxip->i_gid) && 1753 zp->z_projid == dzp->z_projid) 1754 goto next; 1755 1756 tx = dmu_tx_create(os); 1757 if (!(zp->z_pflags & ZFS_PROJID)) 1758 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); 1759 else 1760 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 1761 1762 err = dmu_tx_assign(tx, TXG_WAIT); 1763 if (err) 1764 break; 1765 1766 mutex_enter(&dzp->z_lock); 1767 1768 if (KUID_TO_SUID(xip->i_uid) != KUID_TO_SUID(dxip->i_uid)) { 1769 xip->i_uid = dxip->i_uid; 1770 uid = zfs_uid_read(dxip); 1771 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, 1772 &uid, sizeof (uid)); 1773 } 1774 1775 if (KGID_TO_SGID(xip->i_gid) != KGID_TO_SGID(dxip->i_gid)) { 1776 xip->i_gid = dxip->i_gid; 1777 gid = zfs_gid_read(dxip); 1778 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, 1779 &gid, sizeof (gid)); 1780 } 1781 1782 if (zp->z_projid != dzp->z_projid) { 1783 if (!(zp->z_pflags & ZFS_PROJID)) { 1784 zp->z_pflags |= ZFS_PROJID; 1785 SA_ADD_BULK_ATTR(bulk, count, 1786 SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, 1787 sizeof (zp->z_pflags)); 1788 } 1789 1790 zp->z_projid = dzp->z_projid; 1791 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PROJID(zfsvfs), 1792 NULL, &zp->z_projid, sizeof (zp->z_projid)); 1793 } 1794 1795 mutex_exit(&dzp->z_lock); 1796 1797 if (likely(count > 0)) { 1798 err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); 1799 dmu_tx_commit(tx); 1800 } else { 1801 dmu_tx_abort(tx); 1802 } 1803 tx = NULL; 1804 if (err != 0 && err != ENOENT) 1805 break; 1806 1807 next: 1808 if (zp) { 1809 zrele(zp); 1810 zp = NULL; 1811 zfs_dirent_unlock(dl); 1812 } 1813 zap_cursor_advance(&zc); 1814 } 1815 1816 if (tx) 1817 dmu_tx_abort(tx); 1818 if (zp) { 1819 zrele(zp); 1820 zfs_dirent_unlock(dl); 1821 } 1822 zap_cursor_fini(&zc); 1823 1824 return (err == ENOENT ? 0 : err); 1825 } 1826 1827 /* 1828 * Set the file attributes to the values contained in the 1829 * vattr structure. 1830 * 1831 * IN: zp - znode of file to be modified. 1832 * vap - new attribute values. 1833 * If ATTR_XVATTR set, then optional attrs are being set 1834 * flags - ATTR_UTIME set if non-default time values provided. 1835 * - ATTR_NOACLCHECK (CIFS context only). 1836 * cr - credentials of caller. 1837 * 1838 * RETURN: 0 if success 1839 * error code if failure 1840 * 1841 * Timestamps: 1842 * ip - ctime updated, mtime updated if size changed. 1843 */ 1844 /* ARGSUSED */ 1845 int 1846 zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr) 1847 { 1848 struct inode *ip; 1849 zfsvfs_t *zfsvfs = ZTOZSB(zp); 1850 objset_t *os = zfsvfs->z_os; 1851 zilog_t *zilog; 1852 dmu_tx_t *tx; 1853 vattr_t oldva; 1854 xvattr_t *tmpxvattr; 1855 uint_t mask = vap->va_mask; 1856 uint_t saved_mask = 0; 1857 int trim_mask = 0; 1858 uint64_t new_mode; 1859 uint64_t new_kuid = 0, new_kgid = 0, new_uid, new_gid; 1860 uint64_t xattr_obj; 1861 uint64_t mtime[2], ctime[2], atime[2]; 1862 uint64_t projid = ZFS_INVALID_PROJID; 1863 znode_t *attrzp; 1864 int need_policy = FALSE; 1865 int err, err2 = 0; 1866 zfs_fuid_info_t *fuidp = NULL; 1867 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */ 1868 xoptattr_t *xoap; 1869 zfs_acl_t *aclp; 1870 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 1871 boolean_t fuid_dirtied = B_FALSE; 1872 boolean_t handle_eadir = B_FALSE; 1873 sa_bulk_attr_t *bulk, *xattr_bulk; 1874 int count = 0, xattr_count = 0, bulks = 8; 1875 1876 if (mask == 0) 1877 return (0); 1878 1879 ZFS_ENTER(zfsvfs); 1880 ZFS_VERIFY_ZP(zp); 1881 ip = ZTOI(zp); 1882 1883 /* 1884 * If this is a xvattr_t, then get a pointer to the structure of 1885 * optional attributes. If this is NULL, then we have a vattr_t. 1886 */ 1887 xoap = xva_getxoptattr(xvap); 1888 if (xoap != NULL && (mask & ATTR_XVATTR)) { 1889 if (XVA_ISSET_REQ(xvap, XAT_PROJID)) { 1890 if (!dmu_objset_projectquota_enabled(os) || 1891 (!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode))) { 1892 ZFS_EXIT(zfsvfs); 1893 return (SET_ERROR(ENOTSUP)); 1894 } 1895 1896 projid = xoap->xoa_projid; 1897 if (unlikely(projid == ZFS_INVALID_PROJID)) { 1898 ZFS_EXIT(zfsvfs); 1899 return (SET_ERROR(EINVAL)); 1900 } 1901 1902 if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID) 1903 projid = ZFS_INVALID_PROJID; 1904 else 1905 need_policy = TRUE; 1906 } 1907 1908 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) && 1909 (xoap->xoa_projinherit != 1910 ((zp->z_pflags & ZFS_PROJINHERIT) != 0)) && 1911 (!dmu_objset_projectquota_enabled(os) || 1912 (!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode)))) { 1913 ZFS_EXIT(zfsvfs); 1914 return (SET_ERROR(ENOTSUP)); 1915 } 1916 } 1917 1918 zilog = zfsvfs->z_log; 1919 1920 /* 1921 * Make sure that if we have ephemeral uid/gid or xvattr specified 1922 * that file system is at proper version level 1923 */ 1924 1925 if (zfsvfs->z_use_fuids == B_FALSE && 1926 (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) || 1927 ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) || 1928 (mask & ATTR_XVATTR))) { 1929 ZFS_EXIT(zfsvfs); 1930 return (SET_ERROR(EINVAL)); 1931 } 1932 1933 if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) { 1934 ZFS_EXIT(zfsvfs); 1935 return (SET_ERROR(EISDIR)); 1936 } 1937 1938 if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) { 1939 ZFS_EXIT(zfsvfs); 1940 return (SET_ERROR(EINVAL)); 1941 } 1942 1943 tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP); 1944 xva_init(tmpxvattr); 1945 1946 bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP); 1947 xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP); 1948 1949 /* 1950 * Immutable files can only alter immutable bit and atime 1951 */ 1952 if ((zp->z_pflags & ZFS_IMMUTABLE) && 1953 ((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) || 1954 ((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) { 1955 err = SET_ERROR(EPERM); 1956 goto out3; 1957 } 1958 1959 if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) { 1960 err = SET_ERROR(EPERM); 1961 goto out3; 1962 } 1963 1964 /* 1965 * Verify timestamps doesn't overflow 32 bits. 1966 * ZFS can handle large timestamps, but 32bit syscalls can't 1967 * handle times greater than 2039. This check should be removed 1968 * once large timestamps are fully supported. 1969 */ 1970 if (mask & (ATTR_ATIME | ATTR_MTIME)) { 1971 if (((mask & ATTR_ATIME) && 1972 TIMESPEC_OVERFLOW(&vap->va_atime)) || 1973 ((mask & ATTR_MTIME) && 1974 TIMESPEC_OVERFLOW(&vap->va_mtime))) { 1975 err = SET_ERROR(EOVERFLOW); 1976 goto out3; 1977 } 1978 } 1979 1980 top: 1981 attrzp = NULL; 1982 aclp = NULL; 1983 1984 /* Can this be moved to before the top label? */ 1985 if (zfs_is_readonly(zfsvfs)) { 1986 err = SET_ERROR(EROFS); 1987 goto out3; 1988 } 1989 1990 /* 1991 * First validate permissions 1992 */ 1993 1994 if (mask & ATTR_SIZE) { 1995 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr); 1996 if (err) 1997 goto out3; 1998 1999 /* 2000 * XXX - Note, we are not providing any open 2001 * mode flags here (like FNDELAY), so we may 2002 * block if there are locks present... this 2003 * should be addressed in openat(). 2004 */ 2005 /* XXX - would it be OK to generate a log record here? */ 2006 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE); 2007 if (err) 2008 goto out3; 2009 } 2010 2011 if (mask & (ATTR_ATIME|ATTR_MTIME) || 2012 ((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) || 2013 XVA_ISSET_REQ(xvap, XAT_READONLY) || 2014 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) || 2015 XVA_ISSET_REQ(xvap, XAT_OFFLINE) || 2016 XVA_ISSET_REQ(xvap, XAT_SPARSE) || 2017 XVA_ISSET_REQ(xvap, XAT_CREATETIME) || 2018 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) { 2019 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0, 2020 skipaclchk, cr); 2021 } 2022 2023 if (mask & (ATTR_UID|ATTR_GID)) { 2024 int idmask = (mask & (ATTR_UID|ATTR_GID)); 2025 int take_owner; 2026 int take_group; 2027 2028 /* 2029 * NOTE: even if a new mode is being set, 2030 * we may clear S_ISUID/S_ISGID bits. 2031 */ 2032 2033 if (!(mask & ATTR_MODE)) 2034 vap->va_mode = zp->z_mode; 2035 2036 /* 2037 * Take ownership or chgrp to group we are a member of 2038 */ 2039 2040 take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr)); 2041 take_group = (mask & ATTR_GID) && 2042 zfs_groupmember(zfsvfs, vap->va_gid, cr); 2043 2044 /* 2045 * If both ATTR_UID and ATTR_GID are set then take_owner and 2046 * take_group must both be set in order to allow taking 2047 * ownership. 2048 * 2049 * Otherwise, send the check through secpolicy_vnode_setattr() 2050 * 2051 */ 2052 2053 if (((idmask == (ATTR_UID|ATTR_GID)) && 2054 take_owner && take_group) || 2055 ((idmask == ATTR_UID) && take_owner) || 2056 ((idmask == ATTR_GID) && take_group)) { 2057 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0, 2058 skipaclchk, cr) == 0) { 2059 /* 2060 * Remove setuid/setgid for non-privileged users 2061 */ 2062 (void) secpolicy_setid_clear(vap, cr); 2063 trim_mask = (mask & (ATTR_UID|ATTR_GID)); 2064 } else { 2065 need_policy = TRUE; 2066 } 2067 } else { 2068 need_policy = TRUE; 2069 } 2070 } 2071 2072 mutex_enter(&zp->z_lock); 2073 oldva.va_mode = zp->z_mode; 2074 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid); 2075 if (mask & ATTR_XVATTR) { 2076 /* 2077 * Update xvattr mask to include only those attributes 2078 * that are actually changing. 2079 * 2080 * the bits will be restored prior to actually setting 2081 * the attributes so the caller thinks they were set. 2082 */ 2083 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) { 2084 if (xoap->xoa_appendonly != 2085 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) { 2086 need_policy = TRUE; 2087 } else { 2088 XVA_CLR_REQ(xvap, XAT_APPENDONLY); 2089 XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY); 2090 } 2091 } 2092 2093 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) { 2094 if (xoap->xoa_projinherit != 2095 ((zp->z_pflags & ZFS_PROJINHERIT) != 0)) { 2096 need_policy = TRUE; 2097 } else { 2098 XVA_CLR_REQ(xvap, XAT_PROJINHERIT); 2099 XVA_SET_REQ(tmpxvattr, XAT_PROJINHERIT); 2100 } 2101 } 2102 2103 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) { 2104 if (xoap->xoa_nounlink != 2105 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) { 2106 need_policy = TRUE; 2107 } else { 2108 XVA_CLR_REQ(xvap, XAT_NOUNLINK); 2109 XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK); 2110 } 2111 } 2112 2113 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) { 2114 if (xoap->xoa_immutable != 2115 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) { 2116 need_policy = TRUE; 2117 } else { 2118 XVA_CLR_REQ(xvap, XAT_IMMUTABLE); 2119 XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE); 2120 } 2121 } 2122 2123 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) { 2124 if (xoap->xoa_nodump != 2125 ((zp->z_pflags & ZFS_NODUMP) != 0)) { 2126 need_policy = TRUE; 2127 } else { 2128 XVA_CLR_REQ(xvap, XAT_NODUMP); 2129 XVA_SET_REQ(tmpxvattr, XAT_NODUMP); 2130 } 2131 } 2132 2133 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) { 2134 if (xoap->xoa_av_modified != 2135 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) { 2136 need_policy = TRUE; 2137 } else { 2138 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED); 2139 XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED); 2140 } 2141 } 2142 2143 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) { 2144 if ((!S_ISREG(ip->i_mode) && 2145 xoap->xoa_av_quarantined) || 2146 xoap->xoa_av_quarantined != 2147 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) { 2148 need_policy = TRUE; 2149 } else { 2150 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED); 2151 XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED); 2152 } 2153 } 2154 2155 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) { 2156 mutex_exit(&zp->z_lock); 2157 err = SET_ERROR(EPERM); 2158 goto out3; 2159 } 2160 2161 if (need_policy == FALSE && 2162 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) || 2163 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) { 2164 need_policy = TRUE; 2165 } 2166 } 2167 2168 mutex_exit(&zp->z_lock); 2169 2170 if (mask & ATTR_MODE) { 2171 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) { 2172 err = secpolicy_setid_setsticky_clear(ip, vap, 2173 &oldva, cr); 2174 if (err) 2175 goto out3; 2176 2177 trim_mask |= ATTR_MODE; 2178 } else { 2179 need_policy = TRUE; 2180 } 2181 } 2182 2183 if (need_policy) { 2184 /* 2185 * If trim_mask is set then take ownership 2186 * has been granted or write_acl is present and user 2187 * has the ability to modify mode. In that case remove 2188 * UID|GID and or MODE from mask so that 2189 * secpolicy_vnode_setattr() doesn't revoke it. 2190 */ 2191 2192 if (trim_mask) { 2193 saved_mask = vap->va_mask; 2194 vap->va_mask &= ~trim_mask; 2195 } 2196 err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags, 2197 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp); 2198 if (err) 2199 goto out3; 2200 2201 if (trim_mask) 2202 vap->va_mask |= saved_mask; 2203 } 2204 2205 /* 2206 * secpolicy_vnode_setattr, or take ownership may have 2207 * changed va_mask 2208 */ 2209 mask = vap->va_mask; 2210 2211 if ((mask & (ATTR_UID | ATTR_GID)) || projid != ZFS_INVALID_PROJID) { 2212 handle_eadir = B_TRUE; 2213 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), 2214 &xattr_obj, sizeof (xattr_obj)); 2215 2216 if (err == 0 && xattr_obj) { 2217 err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp); 2218 if (err) 2219 goto out2; 2220 } 2221 if (mask & ATTR_UID) { 2222 new_kuid = zfs_fuid_create(zfsvfs, 2223 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp); 2224 if (new_kuid != KUID_TO_SUID(ZTOI(zp)->i_uid) && 2225 zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT, 2226 new_kuid)) { 2227 if (attrzp) 2228 zrele(attrzp); 2229 err = SET_ERROR(EDQUOT); 2230 goto out2; 2231 } 2232 } 2233 2234 if (mask & ATTR_GID) { 2235 new_kgid = zfs_fuid_create(zfsvfs, 2236 (uint64_t)vap->va_gid, cr, ZFS_GROUP, &fuidp); 2237 if (new_kgid != KGID_TO_SGID(ZTOI(zp)->i_gid) && 2238 zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT, 2239 new_kgid)) { 2240 if (attrzp) 2241 zrele(attrzp); 2242 err = SET_ERROR(EDQUOT); 2243 goto out2; 2244 } 2245 } 2246 2247 if (projid != ZFS_INVALID_PROJID && 2248 zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) { 2249 if (attrzp) 2250 zrele(attrzp); 2251 err = EDQUOT; 2252 goto out2; 2253 } 2254 } 2255 tx = dmu_tx_create(os); 2256 2257 if (mask & ATTR_MODE) { 2258 uint64_t pmode = zp->z_mode; 2259 uint64_t acl_obj; 2260 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT); 2261 2262 if (ZTOZSB(zp)->z_acl_mode == ZFS_ACL_RESTRICTED && 2263 !(zp->z_pflags & ZFS_ACL_TRIVIAL)) { 2264 err = EPERM; 2265 goto out; 2266 } 2267 2268 if ((err = zfs_acl_chmod_setattr(zp, &aclp, new_mode))) 2269 goto out; 2270 2271 mutex_enter(&zp->z_lock); 2272 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) { 2273 /* 2274 * Are we upgrading ACL from old V0 format 2275 * to V1 format? 2276 */ 2277 if (zfsvfs->z_version >= ZPL_VERSION_FUID && 2278 zfs_znode_acl_version(zp) == 2279 ZFS_ACL_VERSION_INITIAL) { 2280 dmu_tx_hold_free(tx, acl_obj, 0, 2281 DMU_OBJECT_END); 2282 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 2283 0, aclp->z_acl_bytes); 2284 } else { 2285 dmu_tx_hold_write(tx, acl_obj, 0, 2286 aclp->z_acl_bytes); 2287 } 2288 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) { 2289 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 2290 0, aclp->z_acl_bytes); 2291 } 2292 mutex_exit(&zp->z_lock); 2293 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); 2294 } else { 2295 if (((mask & ATTR_XVATTR) && 2296 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) || 2297 (projid != ZFS_INVALID_PROJID && 2298 !(zp->z_pflags & ZFS_PROJID))) 2299 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); 2300 else 2301 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 2302 } 2303 2304 if (attrzp) { 2305 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE); 2306 } 2307 2308 fuid_dirtied = zfsvfs->z_fuid_dirty; 2309 if (fuid_dirtied) 2310 zfs_fuid_txhold(zfsvfs, tx); 2311 2312 zfs_sa_upgrade_txholds(tx, zp); 2313 2314 err = dmu_tx_assign(tx, TXG_WAIT); 2315 if (err) 2316 goto out; 2317 2318 count = 0; 2319 /* 2320 * Set each attribute requested. 2321 * We group settings according to the locks they need to acquire. 2322 * 2323 * Note: you cannot set ctime directly, although it will be 2324 * updated as a side-effect of calling this function. 2325 */ 2326 2327 if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) { 2328 /* 2329 * For the existed object that is upgraded from old system, 2330 * its on-disk layout has no slot for the project ID attribute. 2331 * But quota accounting logic needs to access related slots by 2332 * offset directly. So we need to adjust old objects' layout 2333 * to make the project ID to some unified and fixed offset. 2334 */ 2335 if (attrzp) 2336 err = sa_add_projid(attrzp->z_sa_hdl, tx, projid); 2337 if (err == 0) 2338 err = sa_add_projid(zp->z_sa_hdl, tx, projid); 2339 2340 if (unlikely(err == EEXIST)) 2341 err = 0; 2342 else if (err != 0) 2343 goto out; 2344 else 2345 projid = ZFS_INVALID_PROJID; 2346 } 2347 2348 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE)) 2349 mutex_enter(&zp->z_acl_lock); 2350 mutex_enter(&zp->z_lock); 2351 2352 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, 2353 &zp->z_pflags, sizeof (zp->z_pflags)); 2354 2355 if (attrzp) { 2356 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE)) 2357 mutex_enter(&attrzp->z_acl_lock); 2358 mutex_enter(&attrzp->z_lock); 2359 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, 2360 SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags, 2361 sizeof (attrzp->z_pflags)); 2362 if (projid != ZFS_INVALID_PROJID) { 2363 attrzp->z_projid = projid; 2364 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, 2365 SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid, 2366 sizeof (attrzp->z_projid)); 2367 } 2368 } 2369 2370 if (mask & (ATTR_UID|ATTR_GID)) { 2371 2372 if (mask & ATTR_UID) { 2373 ZTOI(zp)->i_uid = SUID_TO_KUID(new_kuid); 2374 new_uid = zfs_uid_read(ZTOI(zp)); 2375 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, 2376 &new_uid, sizeof (new_uid)); 2377 if (attrzp) { 2378 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, 2379 SA_ZPL_UID(zfsvfs), NULL, &new_uid, 2380 sizeof (new_uid)); 2381 ZTOI(attrzp)->i_uid = SUID_TO_KUID(new_uid); 2382 } 2383 } 2384 2385 if (mask & ATTR_GID) { 2386 ZTOI(zp)->i_gid = SGID_TO_KGID(new_kgid); 2387 new_gid = zfs_gid_read(ZTOI(zp)); 2388 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), 2389 NULL, &new_gid, sizeof (new_gid)); 2390 if (attrzp) { 2391 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, 2392 SA_ZPL_GID(zfsvfs), NULL, &new_gid, 2393 sizeof (new_gid)); 2394 ZTOI(attrzp)->i_gid = SGID_TO_KGID(new_kgid); 2395 } 2396 } 2397 if (!(mask & ATTR_MODE)) { 2398 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), 2399 NULL, &new_mode, sizeof (new_mode)); 2400 new_mode = zp->z_mode; 2401 } 2402 err = zfs_acl_chown_setattr(zp); 2403 ASSERT(err == 0); 2404 if (attrzp) { 2405 err = zfs_acl_chown_setattr(attrzp); 2406 ASSERT(err == 0); 2407 } 2408 } 2409 2410 if (mask & ATTR_MODE) { 2411 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, 2412 &new_mode, sizeof (new_mode)); 2413 zp->z_mode = ZTOI(zp)->i_mode = new_mode; 2414 ASSERT3P(aclp, !=, NULL); 2415 err = zfs_aclset_common(zp, aclp, cr, tx); 2416 ASSERT0(err); 2417 if (zp->z_acl_cached) 2418 zfs_acl_free(zp->z_acl_cached); 2419 zp->z_acl_cached = aclp; 2420 aclp = NULL; 2421 } 2422 2423 if ((mask & ATTR_ATIME) || zp->z_atime_dirty) { 2424 zp->z_atime_dirty = B_FALSE; 2425 ZFS_TIME_ENCODE(&ip->i_atime, atime); 2426 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, 2427 &atime, sizeof (atime)); 2428 } 2429 2430 if (mask & (ATTR_MTIME | ATTR_SIZE)) { 2431 ZFS_TIME_ENCODE(&vap->va_mtime, mtime); 2432 ZTOI(zp)->i_mtime = zpl_inode_timestamp_truncate( 2433 vap->va_mtime, ZTOI(zp)); 2434 2435 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, 2436 mtime, sizeof (mtime)); 2437 } 2438 2439 if (mask & (ATTR_CTIME | ATTR_SIZE)) { 2440 ZFS_TIME_ENCODE(&vap->va_ctime, ctime); 2441 ZTOI(zp)->i_ctime = zpl_inode_timestamp_truncate(vap->va_ctime, 2442 ZTOI(zp)); 2443 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, 2444 ctime, sizeof (ctime)); 2445 } 2446 2447 if (projid != ZFS_INVALID_PROJID) { 2448 zp->z_projid = projid; 2449 SA_ADD_BULK_ATTR(bulk, count, 2450 SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid, 2451 sizeof (zp->z_projid)); 2452 } 2453 2454 if (attrzp && mask) { 2455 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, 2456 SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 2457 sizeof (ctime)); 2458 } 2459 2460 /* 2461 * Do this after setting timestamps to prevent timestamp 2462 * update from toggling bit 2463 */ 2464 2465 if (xoap && (mask & ATTR_XVATTR)) { 2466 2467 /* 2468 * restore trimmed off masks 2469 * so that return masks can be set for caller. 2470 */ 2471 2472 if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) { 2473 XVA_SET_REQ(xvap, XAT_APPENDONLY); 2474 } 2475 if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) { 2476 XVA_SET_REQ(xvap, XAT_NOUNLINK); 2477 } 2478 if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) { 2479 XVA_SET_REQ(xvap, XAT_IMMUTABLE); 2480 } 2481 if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) { 2482 XVA_SET_REQ(xvap, XAT_NODUMP); 2483 } 2484 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) { 2485 XVA_SET_REQ(xvap, XAT_AV_MODIFIED); 2486 } 2487 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) { 2488 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED); 2489 } 2490 if (XVA_ISSET_REQ(tmpxvattr, XAT_PROJINHERIT)) { 2491 XVA_SET_REQ(xvap, XAT_PROJINHERIT); 2492 } 2493 2494 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) 2495 ASSERT(S_ISREG(ip->i_mode)); 2496 2497 zfs_xvattr_set(zp, xvap, tx); 2498 } 2499 2500 if (fuid_dirtied) 2501 zfs_fuid_sync(zfsvfs, tx); 2502 2503 if (mask != 0) 2504 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp); 2505 2506 mutex_exit(&zp->z_lock); 2507 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE)) 2508 mutex_exit(&zp->z_acl_lock); 2509 2510 if (attrzp) { 2511 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE)) 2512 mutex_exit(&attrzp->z_acl_lock); 2513 mutex_exit(&attrzp->z_lock); 2514 } 2515 out: 2516 if (err == 0 && xattr_count > 0) { 2517 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk, 2518 xattr_count, tx); 2519 ASSERT(err2 == 0); 2520 } 2521 2522 if (aclp) 2523 zfs_acl_free(aclp); 2524 2525 if (fuidp) { 2526 zfs_fuid_info_free(fuidp); 2527 fuidp = NULL; 2528 } 2529 2530 if (err) { 2531 dmu_tx_abort(tx); 2532 if (attrzp) 2533 zrele(attrzp); 2534 if (err == ERESTART) 2535 goto top; 2536 } else { 2537 if (count > 0) 2538 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); 2539 dmu_tx_commit(tx); 2540 if (attrzp) { 2541 if (err2 == 0 && handle_eadir) 2542 err2 = zfs_setattr_dir(attrzp); 2543 zrele(attrzp); 2544 } 2545 zfs_znode_update_vfs(zp); 2546 } 2547 2548 out2: 2549 if (os->os_sync == ZFS_SYNC_ALWAYS) 2550 zil_commit(zilog, 0); 2551 2552 out3: 2553 kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * bulks); 2554 kmem_free(bulk, sizeof (sa_bulk_attr_t) * bulks); 2555 kmem_free(tmpxvattr, sizeof (xvattr_t)); 2556 ZFS_EXIT(zfsvfs); 2557 return (err); 2558 } 2559 2560 typedef struct zfs_zlock { 2561 krwlock_t *zl_rwlock; /* lock we acquired */ 2562 znode_t *zl_znode; /* znode we held */ 2563 struct zfs_zlock *zl_next; /* next in list */ 2564 } zfs_zlock_t; 2565 2566 /* 2567 * Drop locks and release vnodes that were held by zfs_rename_lock(). 2568 */ 2569 static void 2570 zfs_rename_unlock(zfs_zlock_t **zlpp) 2571 { 2572 zfs_zlock_t *zl; 2573 2574 while ((zl = *zlpp) != NULL) { 2575 if (zl->zl_znode != NULL) 2576 zfs_zrele_async(zl->zl_znode); 2577 rw_exit(zl->zl_rwlock); 2578 *zlpp = zl->zl_next; 2579 kmem_free(zl, sizeof (*zl)); 2580 } 2581 } 2582 2583 /* 2584 * Search back through the directory tree, using the ".." entries. 2585 * Lock each directory in the chain to prevent concurrent renames. 2586 * Fail any attempt to move a directory into one of its own descendants. 2587 * XXX - z_parent_lock can overlap with map or grow locks 2588 */ 2589 static int 2590 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp) 2591 { 2592 zfs_zlock_t *zl; 2593 znode_t *zp = tdzp; 2594 uint64_t rootid = ZTOZSB(zp)->z_root; 2595 uint64_t oidp = zp->z_id; 2596 krwlock_t *rwlp = &szp->z_parent_lock; 2597 krw_t rw = RW_WRITER; 2598 2599 /* 2600 * First pass write-locks szp and compares to zp->z_id. 2601 * Later passes read-lock zp and compare to zp->z_parent. 2602 */ 2603 do { 2604 if (!rw_tryenter(rwlp, rw)) { 2605 /* 2606 * Another thread is renaming in this path. 2607 * Note that if we are a WRITER, we don't have any 2608 * parent_locks held yet. 2609 */ 2610 if (rw == RW_READER && zp->z_id > szp->z_id) { 2611 /* 2612 * Drop our locks and restart 2613 */ 2614 zfs_rename_unlock(&zl); 2615 *zlpp = NULL; 2616 zp = tdzp; 2617 oidp = zp->z_id; 2618 rwlp = &szp->z_parent_lock; 2619 rw = RW_WRITER; 2620 continue; 2621 } else { 2622 /* 2623 * Wait for other thread to drop its locks 2624 */ 2625 rw_enter(rwlp, rw); 2626 } 2627 } 2628 2629 zl = kmem_alloc(sizeof (*zl), KM_SLEEP); 2630 zl->zl_rwlock = rwlp; 2631 zl->zl_znode = NULL; 2632 zl->zl_next = *zlpp; 2633 *zlpp = zl; 2634 2635 if (oidp == szp->z_id) /* We're a descendant of szp */ 2636 return (SET_ERROR(EINVAL)); 2637 2638 if (oidp == rootid) /* We've hit the top */ 2639 return (0); 2640 2641 if (rw == RW_READER) { /* i.e. not the first pass */ 2642 int error = zfs_zget(ZTOZSB(zp), oidp, &zp); 2643 if (error) 2644 return (error); 2645 zl->zl_znode = zp; 2646 } 2647 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)), 2648 &oidp, sizeof (oidp)); 2649 rwlp = &zp->z_parent_lock; 2650 rw = RW_READER; 2651 2652 } while (zp->z_id != sdzp->z_id); 2653 2654 return (0); 2655 } 2656 2657 /* 2658 * Move an entry from the provided source directory to the target 2659 * directory. Change the entry name as indicated. 2660 * 2661 * IN: sdzp - Source directory containing the "old entry". 2662 * snm - Old entry name. 2663 * tdzp - Target directory to contain the "new entry". 2664 * tnm - New entry name. 2665 * cr - credentials of caller. 2666 * flags - case flags 2667 * 2668 * RETURN: 0 on success, error code on failure. 2669 * 2670 * Timestamps: 2671 * sdzp,tdzp - ctime|mtime updated 2672 */ 2673 /*ARGSUSED*/ 2674 int 2675 zfs_rename(znode_t *sdzp, char *snm, znode_t *tdzp, char *tnm, 2676 cred_t *cr, int flags) 2677 { 2678 znode_t *szp, *tzp; 2679 zfsvfs_t *zfsvfs = ZTOZSB(sdzp); 2680 zilog_t *zilog; 2681 zfs_dirlock_t *sdl, *tdl; 2682 dmu_tx_t *tx; 2683 zfs_zlock_t *zl; 2684 int cmp, serr, terr; 2685 int error = 0; 2686 int zflg = 0; 2687 boolean_t waited = B_FALSE; 2688 2689 if (snm == NULL || tnm == NULL) 2690 return (SET_ERROR(EINVAL)); 2691 2692 ZFS_ENTER(zfsvfs); 2693 ZFS_VERIFY_ZP(sdzp); 2694 zilog = zfsvfs->z_log; 2695 2696 ZFS_VERIFY_ZP(tdzp); 2697 2698 /* 2699 * We check i_sb because snapshots and the ctldir must have different 2700 * super blocks. 2701 */ 2702 if (ZTOI(tdzp)->i_sb != ZTOI(sdzp)->i_sb || 2703 zfsctl_is_node(ZTOI(tdzp))) { 2704 ZFS_EXIT(zfsvfs); 2705 return (SET_ERROR(EXDEV)); 2706 } 2707 2708 if (zfsvfs->z_utf8 && u8_validate(tnm, 2709 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 2710 ZFS_EXIT(zfsvfs); 2711 return (SET_ERROR(EILSEQ)); 2712 } 2713 2714 if (flags & FIGNORECASE) 2715 zflg |= ZCILOOK; 2716 2717 top: 2718 szp = NULL; 2719 tzp = NULL; 2720 zl = NULL; 2721 2722 /* 2723 * This is to prevent the creation of links into attribute space 2724 * by renaming a linked file into/outof an attribute directory. 2725 * See the comment in zfs_link() for why this is considered bad. 2726 */ 2727 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) { 2728 ZFS_EXIT(zfsvfs); 2729 return (SET_ERROR(EINVAL)); 2730 } 2731 2732 /* 2733 * Lock source and target directory entries. To prevent deadlock, 2734 * a lock ordering must be defined. We lock the directory with 2735 * the smallest object id first, or if it's a tie, the one with 2736 * the lexically first name. 2737 */ 2738 if (sdzp->z_id < tdzp->z_id) { 2739 cmp = -1; 2740 } else if (sdzp->z_id > tdzp->z_id) { 2741 cmp = 1; 2742 } else { 2743 /* 2744 * First compare the two name arguments without 2745 * considering any case folding. 2746 */ 2747 int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER); 2748 2749 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error); 2750 ASSERT(error == 0 || !zfsvfs->z_utf8); 2751 if (cmp == 0) { 2752 /* 2753 * POSIX: "If the old argument and the new argument 2754 * both refer to links to the same existing file, 2755 * the rename() function shall return successfully 2756 * and perform no other action." 2757 */ 2758 ZFS_EXIT(zfsvfs); 2759 return (0); 2760 } 2761 /* 2762 * If the file system is case-folding, then we may 2763 * have some more checking to do. A case-folding file 2764 * system is either supporting mixed case sensitivity 2765 * access or is completely case-insensitive. Note 2766 * that the file system is always case preserving. 2767 * 2768 * In mixed sensitivity mode case sensitive behavior 2769 * is the default. FIGNORECASE must be used to 2770 * explicitly request case insensitive behavior. 2771 * 2772 * If the source and target names provided differ only 2773 * by case (e.g., a request to rename 'tim' to 'Tim'), 2774 * we will treat this as a special case in the 2775 * case-insensitive mode: as long as the source name 2776 * is an exact match, we will allow this to proceed as 2777 * a name-change request. 2778 */ 2779 if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE || 2780 (zfsvfs->z_case == ZFS_CASE_MIXED && 2781 flags & FIGNORECASE)) && 2782 u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST, 2783 &error) == 0) { 2784 /* 2785 * case preserving rename request, require exact 2786 * name matches 2787 */ 2788 zflg |= ZCIEXACT; 2789 zflg &= ~ZCILOOK; 2790 } 2791 } 2792 2793 /* 2794 * If the source and destination directories are the same, we should 2795 * grab the z_name_lock of that directory only once. 2796 */ 2797 if (sdzp == tdzp) { 2798 zflg |= ZHAVELOCK; 2799 rw_enter(&sdzp->z_name_lock, RW_READER); 2800 } 2801 2802 if (cmp < 0) { 2803 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp, 2804 ZEXISTS | zflg, NULL, NULL); 2805 terr = zfs_dirent_lock(&tdl, 2806 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL); 2807 } else { 2808 terr = zfs_dirent_lock(&tdl, 2809 tdzp, tnm, &tzp, zflg, NULL, NULL); 2810 serr = zfs_dirent_lock(&sdl, 2811 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg, 2812 NULL, NULL); 2813 } 2814 2815 if (serr) { 2816 /* 2817 * Source entry invalid or not there. 2818 */ 2819 if (!terr) { 2820 zfs_dirent_unlock(tdl); 2821 if (tzp) 2822 zrele(tzp); 2823 } 2824 2825 if (sdzp == tdzp) 2826 rw_exit(&sdzp->z_name_lock); 2827 2828 if (strcmp(snm, "..") == 0) 2829 serr = EINVAL; 2830 ZFS_EXIT(zfsvfs); 2831 return (serr); 2832 } 2833 if (terr) { 2834 zfs_dirent_unlock(sdl); 2835 zrele(szp); 2836 2837 if (sdzp == tdzp) 2838 rw_exit(&sdzp->z_name_lock); 2839 2840 if (strcmp(tnm, "..") == 0) 2841 terr = EINVAL; 2842 ZFS_EXIT(zfsvfs); 2843 return (terr); 2844 } 2845 2846 /* 2847 * If we are using project inheritance, means if the directory has 2848 * ZFS_PROJINHERIT set, then its descendant directories will inherit 2849 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under 2850 * such case, we only allow renames into our tree when the project 2851 * IDs are the same. 2852 */ 2853 if (tdzp->z_pflags & ZFS_PROJINHERIT && 2854 tdzp->z_projid != szp->z_projid) { 2855 error = SET_ERROR(EXDEV); 2856 goto out; 2857 } 2858 2859 /* 2860 * Must have write access at the source to remove the old entry 2861 * and write access at the target to create the new entry. 2862 * Note that if target and source are the same, this can be 2863 * done in a single check. 2864 */ 2865 2866 if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr))) 2867 goto out; 2868 2869 if (S_ISDIR(ZTOI(szp)->i_mode)) { 2870 /* 2871 * Check to make sure rename is valid. 2872 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d 2873 */ 2874 if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl))) 2875 goto out; 2876 } 2877 2878 /* 2879 * Does target exist? 2880 */ 2881 if (tzp) { 2882 /* 2883 * Source and target must be the same type. 2884 */ 2885 if (S_ISDIR(ZTOI(szp)->i_mode)) { 2886 if (!S_ISDIR(ZTOI(tzp)->i_mode)) { 2887 error = SET_ERROR(ENOTDIR); 2888 goto out; 2889 } 2890 } else { 2891 if (S_ISDIR(ZTOI(tzp)->i_mode)) { 2892 error = SET_ERROR(EISDIR); 2893 goto out; 2894 } 2895 } 2896 /* 2897 * POSIX dictates that when the source and target 2898 * entries refer to the same file object, rename 2899 * must do nothing and exit without error. 2900 */ 2901 if (szp->z_id == tzp->z_id) { 2902 error = 0; 2903 goto out; 2904 } 2905 } 2906 2907 tx = dmu_tx_create(zfsvfs->z_os); 2908 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE); 2909 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE); 2910 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm); 2911 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm); 2912 if (sdzp != tdzp) { 2913 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE); 2914 zfs_sa_upgrade_txholds(tx, tdzp); 2915 } 2916 if (tzp) { 2917 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE); 2918 zfs_sa_upgrade_txholds(tx, tzp); 2919 } 2920 2921 zfs_sa_upgrade_txholds(tx, szp); 2922 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 2923 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); 2924 if (error) { 2925 if (zl != NULL) 2926 zfs_rename_unlock(&zl); 2927 zfs_dirent_unlock(sdl); 2928 zfs_dirent_unlock(tdl); 2929 2930 if (sdzp == tdzp) 2931 rw_exit(&sdzp->z_name_lock); 2932 2933 if (error == ERESTART) { 2934 waited = B_TRUE; 2935 dmu_tx_wait(tx); 2936 dmu_tx_abort(tx); 2937 zrele(szp); 2938 if (tzp) 2939 zrele(tzp); 2940 goto top; 2941 } 2942 dmu_tx_abort(tx); 2943 zrele(szp); 2944 if (tzp) 2945 zrele(tzp); 2946 ZFS_EXIT(zfsvfs); 2947 return (error); 2948 } 2949 2950 if (tzp) /* Attempt to remove the existing target */ 2951 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL); 2952 2953 if (error == 0) { 2954 error = zfs_link_create(tdl, szp, tx, ZRENAMING); 2955 if (error == 0) { 2956 szp->z_pflags |= ZFS_AV_MODIFIED; 2957 if (tdzp->z_pflags & ZFS_PROJINHERIT) 2958 szp->z_pflags |= ZFS_PROJINHERIT; 2959 2960 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs), 2961 (void *)&szp->z_pflags, sizeof (uint64_t), tx); 2962 ASSERT0(error); 2963 2964 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL); 2965 if (error == 0) { 2966 zfs_log_rename(zilog, tx, TX_RENAME | 2967 (flags & FIGNORECASE ? TX_CI : 0), sdzp, 2968 sdl->dl_name, tdzp, tdl->dl_name, szp); 2969 } else { 2970 /* 2971 * At this point, we have successfully created 2972 * the target name, but have failed to remove 2973 * the source name. Since the create was done 2974 * with the ZRENAMING flag, there are 2975 * complications; for one, the link count is 2976 * wrong. The easiest way to deal with this 2977 * is to remove the newly created target, and 2978 * return the original error. This must 2979 * succeed; fortunately, it is very unlikely to 2980 * fail, since we just created it. 2981 */ 2982 VERIFY3U(zfs_link_destroy(tdl, szp, tx, 2983 ZRENAMING, NULL), ==, 0); 2984 } 2985 } else { 2986 /* 2987 * If we had removed the existing target, subsequent 2988 * call to zfs_link_create() to add back the same entry 2989 * but, the new dnode (szp) should not fail. 2990 */ 2991 ASSERT(tzp == NULL); 2992 } 2993 } 2994 2995 dmu_tx_commit(tx); 2996 out: 2997 if (zl != NULL) 2998 zfs_rename_unlock(&zl); 2999 3000 zfs_dirent_unlock(sdl); 3001 zfs_dirent_unlock(tdl); 3002 3003 zfs_znode_update_vfs(sdzp); 3004 if (sdzp == tdzp) 3005 rw_exit(&sdzp->z_name_lock); 3006 3007 if (sdzp != tdzp) 3008 zfs_znode_update_vfs(tdzp); 3009 3010 zfs_znode_update_vfs(szp); 3011 zrele(szp); 3012 if (tzp) { 3013 zfs_znode_update_vfs(tzp); 3014 zrele(tzp); 3015 } 3016 3017 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 3018 zil_commit(zilog, 0); 3019 3020 ZFS_EXIT(zfsvfs); 3021 return (error); 3022 } 3023 3024 /* 3025 * Insert the indicated symbolic reference entry into the directory. 3026 * 3027 * IN: dzp - Directory to contain new symbolic link. 3028 * name - Name of directory entry in dip. 3029 * vap - Attributes of new entry. 3030 * link - Name for new symlink entry. 3031 * cr - credentials of caller. 3032 * flags - case flags 3033 * 3034 * OUT: zpp - Znode for new symbolic link. 3035 * 3036 * RETURN: 0 on success, error code on failure. 3037 * 3038 * Timestamps: 3039 * dip - ctime|mtime updated 3040 */ 3041 /*ARGSUSED*/ 3042 int 3043 zfs_symlink(znode_t *dzp, char *name, vattr_t *vap, char *link, 3044 znode_t **zpp, cred_t *cr, int flags) 3045 { 3046 znode_t *zp; 3047 zfs_dirlock_t *dl; 3048 dmu_tx_t *tx; 3049 zfsvfs_t *zfsvfs = ZTOZSB(dzp); 3050 zilog_t *zilog; 3051 uint64_t len = strlen(link); 3052 int error; 3053 int zflg = ZNEW; 3054 zfs_acl_ids_t acl_ids; 3055 boolean_t fuid_dirtied; 3056 uint64_t txtype = TX_SYMLINK; 3057 boolean_t waited = B_FALSE; 3058 3059 ASSERT(S_ISLNK(vap->va_mode)); 3060 3061 if (name == NULL) 3062 return (SET_ERROR(EINVAL)); 3063 3064 ZFS_ENTER(zfsvfs); 3065 ZFS_VERIFY_ZP(dzp); 3066 zilog = zfsvfs->z_log; 3067 3068 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), 3069 NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 3070 ZFS_EXIT(zfsvfs); 3071 return (SET_ERROR(EILSEQ)); 3072 } 3073 if (flags & FIGNORECASE) 3074 zflg |= ZCILOOK; 3075 3076 if (len > MAXPATHLEN) { 3077 ZFS_EXIT(zfsvfs); 3078 return (SET_ERROR(ENAMETOOLONG)); 3079 } 3080 3081 if ((error = zfs_acl_ids_create(dzp, 0, 3082 vap, cr, NULL, &acl_ids)) != 0) { 3083 ZFS_EXIT(zfsvfs); 3084 return (error); 3085 } 3086 top: 3087 *zpp = NULL; 3088 3089 /* 3090 * Attempt to lock directory; fail if entry already exists. 3091 */ 3092 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL); 3093 if (error) { 3094 zfs_acl_ids_free(&acl_ids); 3095 ZFS_EXIT(zfsvfs); 3096 return (error); 3097 } 3098 3099 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) { 3100 zfs_acl_ids_free(&acl_ids); 3101 zfs_dirent_unlock(dl); 3102 ZFS_EXIT(zfsvfs); 3103 return (error); 3104 } 3105 3106 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, ZFS_DEFAULT_PROJID)) { 3107 zfs_acl_ids_free(&acl_ids); 3108 zfs_dirent_unlock(dl); 3109 ZFS_EXIT(zfsvfs); 3110 return (SET_ERROR(EDQUOT)); 3111 } 3112 tx = dmu_tx_create(zfsvfs->z_os); 3113 fuid_dirtied = zfsvfs->z_fuid_dirty; 3114 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len)); 3115 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); 3116 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + 3117 ZFS_SA_BASE_ATTR_SIZE + len); 3118 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); 3119 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { 3120 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 3121 acl_ids.z_aclp->z_acl_bytes); 3122 } 3123 if (fuid_dirtied) 3124 zfs_fuid_txhold(zfsvfs, tx); 3125 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); 3126 if (error) { 3127 zfs_dirent_unlock(dl); 3128 if (error == ERESTART) { 3129 waited = B_TRUE; 3130 dmu_tx_wait(tx); 3131 dmu_tx_abort(tx); 3132 goto top; 3133 } 3134 zfs_acl_ids_free(&acl_ids); 3135 dmu_tx_abort(tx); 3136 ZFS_EXIT(zfsvfs); 3137 return (error); 3138 } 3139 3140 /* 3141 * Create a new object for the symlink. 3142 * for version 4 ZPL datsets the symlink will be an SA attribute 3143 */ 3144 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); 3145 3146 if (fuid_dirtied) 3147 zfs_fuid_sync(zfsvfs, tx); 3148 3149 mutex_enter(&zp->z_lock); 3150 if (zp->z_is_sa) 3151 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs), 3152 link, len, tx); 3153 else 3154 zfs_sa_symlink(zp, link, len, tx); 3155 mutex_exit(&zp->z_lock); 3156 3157 zp->z_size = len; 3158 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs), 3159 &zp->z_size, sizeof (zp->z_size), tx); 3160 /* 3161 * Insert the new object into the directory. 3162 */ 3163 error = zfs_link_create(dl, zp, tx, ZNEW); 3164 if (error != 0) { 3165 zfs_znode_delete(zp, tx); 3166 remove_inode_hash(ZTOI(zp)); 3167 } else { 3168 if (flags & FIGNORECASE) 3169 txtype |= TX_CI; 3170 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link); 3171 3172 zfs_znode_update_vfs(dzp); 3173 zfs_znode_update_vfs(zp); 3174 } 3175 3176 zfs_acl_ids_free(&acl_ids); 3177 3178 dmu_tx_commit(tx); 3179 3180 zfs_dirent_unlock(dl); 3181 3182 if (error == 0) { 3183 *zpp = zp; 3184 3185 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 3186 zil_commit(zilog, 0); 3187 } else { 3188 zrele(zp); 3189 } 3190 3191 ZFS_EXIT(zfsvfs); 3192 return (error); 3193 } 3194 3195 /* 3196 * Return, in the buffer contained in the provided uio structure, 3197 * the symbolic path referred to by ip. 3198 * 3199 * IN: ip - inode of symbolic link 3200 * uio - structure to contain the link path. 3201 * cr - credentials of caller. 3202 * 3203 * RETURN: 0 if success 3204 * error code if failure 3205 * 3206 * Timestamps: 3207 * ip - atime updated 3208 */ 3209 /* ARGSUSED */ 3210 int 3211 zfs_readlink(struct inode *ip, zfs_uio_t *uio, cred_t *cr) 3212 { 3213 znode_t *zp = ITOZ(ip); 3214 zfsvfs_t *zfsvfs = ITOZSB(ip); 3215 int error; 3216 3217 ZFS_ENTER(zfsvfs); 3218 ZFS_VERIFY_ZP(zp); 3219 3220 mutex_enter(&zp->z_lock); 3221 if (zp->z_is_sa) 3222 error = sa_lookup_uio(zp->z_sa_hdl, 3223 SA_ZPL_SYMLINK(zfsvfs), uio); 3224 else 3225 error = zfs_sa_readlink(zp, uio); 3226 mutex_exit(&zp->z_lock); 3227 3228 ZFS_EXIT(zfsvfs); 3229 return (error); 3230 } 3231 3232 /* 3233 * Insert a new entry into directory tdzp referencing szp. 3234 * 3235 * IN: tdzp - Directory to contain new entry. 3236 * szp - znode of new entry. 3237 * name - name of new entry. 3238 * cr - credentials of caller. 3239 * flags - case flags. 3240 * 3241 * RETURN: 0 if success 3242 * error code if failure 3243 * 3244 * Timestamps: 3245 * tdzp - ctime|mtime updated 3246 * szp - ctime updated 3247 */ 3248 /* ARGSUSED */ 3249 int 3250 zfs_link(znode_t *tdzp, znode_t *szp, char *name, cred_t *cr, 3251 int flags) 3252 { 3253 struct inode *sip = ZTOI(szp); 3254 znode_t *tzp; 3255 zfsvfs_t *zfsvfs = ZTOZSB(tdzp); 3256 zilog_t *zilog; 3257 zfs_dirlock_t *dl; 3258 dmu_tx_t *tx; 3259 int error; 3260 int zf = ZNEW; 3261 uint64_t parent; 3262 uid_t owner; 3263 boolean_t waited = B_FALSE; 3264 boolean_t is_tmpfile = 0; 3265 uint64_t txg; 3266 #ifdef HAVE_TMPFILE 3267 is_tmpfile = (sip->i_nlink == 0 && (sip->i_state & I_LINKABLE)); 3268 #endif 3269 ASSERT(S_ISDIR(ZTOI(tdzp)->i_mode)); 3270 3271 if (name == NULL) 3272 return (SET_ERROR(EINVAL)); 3273 3274 ZFS_ENTER(zfsvfs); 3275 ZFS_VERIFY_ZP(tdzp); 3276 zilog = zfsvfs->z_log; 3277 3278 /* 3279 * POSIX dictates that we return EPERM here. 3280 * Better choices include ENOTSUP or EISDIR. 3281 */ 3282 if (S_ISDIR(sip->i_mode)) { 3283 ZFS_EXIT(zfsvfs); 3284 return (SET_ERROR(EPERM)); 3285 } 3286 3287 ZFS_VERIFY_ZP(szp); 3288 3289 /* 3290 * If we are using project inheritance, means if the directory has 3291 * ZFS_PROJINHERIT set, then its descendant directories will inherit 3292 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under 3293 * such case, we only allow hard link creation in our tree when the 3294 * project IDs are the same. 3295 */ 3296 if (tdzp->z_pflags & ZFS_PROJINHERIT && 3297 tdzp->z_projid != szp->z_projid) { 3298 ZFS_EXIT(zfsvfs); 3299 return (SET_ERROR(EXDEV)); 3300 } 3301 3302 /* 3303 * We check i_sb because snapshots and the ctldir must have different 3304 * super blocks. 3305 */ 3306 if (sip->i_sb != ZTOI(tdzp)->i_sb || zfsctl_is_node(sip)) { 3307 ZFS_EXIT(zfsvfs); 3308 return (SET_ERROR(EXDEV)); 3309 } 3310 3311 /* Prevent links to .zfs/shares files */ 3312 3313 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), 3314 &parent, sizeof (uint64_t))) != 0) { 3315 ZFS_EXIT(zfsvfs); 3316 return (error); 3317 } 3318 if (parent == zfsvfs->z_shares_dir) { 3319 ZFS_EXIT(zfsvfs); 3320 return (SET_ERROR(EPERM)); 3321 } 3322 3323 if (zfsvfs->z_utf8 && u8_validate(name, 3324 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 3325 ZFS_EXIT(zfsvfs); 3326 return (SET_ERROR(EILSEQ)); 3327 } 3328 if (flags & FIGNORECASE) 3329 zf |= ZCILOOK; 3330 3331 /* 3332 * We do not support links between attributes and non-attributes 3333 * because of the potential security risk of creating links 3334 * into "normal" file space in order to circumvent restrictions 3335 * imposed in attribute space. 3336 */ 3337 if ((szp->z_pflags & ZFS_XATTR) != (tdzp->z_pflags & ZFS_XATTR)) { 3338 ZFS_EXIT(zfsvfs); 3339 return (SET_ERROR(EINVAL)); 3340 } 3341 3342 owner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(sip->i_uid), 3343 cr, ZFS_OWNER); 3344 if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) { 3345 ZFS_EXIT(zfsvfs); 3346 return (SET_ERROR(EPERM)); 3347 } 3348 3349 if ((error = zfs_zaccess(tdzp, ACE_ADD_FILE, 0, B_FALSE, cr))) { 3350 ZFS_EXIT(zfsvfs); 3351 return (error); 3352 } 3353 3354 top: 3355 /* 3356 * Attempt to lock directory; fail if entry already exists. 3357 */ 3358 error = zfs_dirent_lock(&dl, tdzp, name, &tzp, zf, NULL, NULL); 3359 if (error) { 3360 ZFS_EXIT(zfsvfs); 3361 return (error); 3362 } 3363 3364 tx = dmu_tx_create(zfsvfs->z_os); 3365 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE); 3366 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, name); 3367 if (is_tmpfile) 3368 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 3369 3370 zfs_sa_upgrade_txholds(tx, szp); 3371 zfs_sa_upgrade_txholds(tx, tdzp); 3372 error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); 3373 if (error) { 3374 zfs_dirent_unlock(dl); 3375 if (error == ERESTART) { 3376 waited = B_TRUE; 3377 dmu_tx_wait(tx); 3378 dmu_tx_abort(tx); 3379 goto top; 3380 } 3381 dmu_tx_abort(tx); 3382 ZFS_EXIT(zfsvfs); 3383 return (error); 3384 } 3385 /* unmark z_unlinked so zfs_link_create will not reject */ 3386 if (is_tmpfile) 3387 szp->z_unlinked = B_FALSE; 3388 error = zfs_link_create(dl, szp, tx, 0); 3389 3390 if (error == 0) { 3391 uint64_t txtype = TX_LINK; 3392 /* 3393 * tmpfile is created to be in z_unlinkedobj, so remove it. 3394 * Also, we don't log in ZIL, because all previous file 3395 * operation on the tmpfile are ignored by ZIL. Instead we 3396 * always wait for txg to sync to make sure all previous 3397 * operation are sync safe. 3398 */ 3399 if (is_tmpfile) { 3400 VERIFY(zap_remove_int(zfsvfs->z_os, 3401 zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0); 3402 } else { 3403 if (flags & FIGNORECASE) 3404 txtype |= TX_CI; 3405 zfs_log_link(zilog, tx, txtype, tdzp, szp, name); 3406 } 3407 } else if (is_tmpfile) { 3408 /* restore z_unlinked since when linking failed */ 3409 szp->z_unlinked = B_TRUE; 3410 } 3411 txg = dmu_tx_get_txg(tx); 3412 dmu_tx_commit(tx); 3413 3414 zfs_dirent_unlock(dl); 3415 3416 if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 3417 zil_commit(zilog, 0); 3418 3419 if (is_tmpfile && zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) 3420 txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), txg); 3421 3422 zfs_znode_update_vfs(tdzp); 3423 zfs_znode_update_vfs(szp); 3424 ZFS_EXIT(zfsvfs); 3425 return (error); 3426 } 3427 3428 static void 3429 zfs_putpage_commit_cb(void *arg) 3430 { 3431 struct page *pp = arg; 3432 3433 ClearPageError(pp); 3434 end_page_writeback(pp); 3435 } 3436 3437 /* 3438 * Push a page out to disk, once the page is on stable storage the 3439 * registered commit callback will be run as notification of completion. 3440 * 3441 * IN: ip - page mapped for inode. 3442 * pp - page to push (page is locked) 3443 * wbc - writeback control data 3444 * 3445 * RETURN: 0 if success 3446 * error code if failure 3447 * 3448 * Timestamps: 3449 * ip - ctime|mtime updated 3450 */ 3451 /* ARGSUSED */ 3452 int 3453 zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) 3454 { 3455 znode_t *zp = ITOZ(ip); 3456 zfsvfs_t *zfsvfs = ITOZSB(ip); 3457 loff_t offset; 3458 loff_t pgoff; 3459 unsigned int pglen; 3460 dmu_tx_t *tx; 3461 caddr_t va; 3462 int err = 0; 3463 uint64_t mtime[2], ctime[2]; 3464 sa_bulk_attr_t bulk[3]; 3465 int cnt = 0; 3466 struct address_space *mapping; 3467 3468 ZFS_ENTER(zfsvfs); 3469 ZFS_VERIFY_ZP(zp); 3470 3471 ASSERT(PageLocked(pp)); 3472 3473 pgoff = page_offset(pp); /* Page byte-offset in file */ 3474 offset = i_size_read(ip); /* File length in bytes */ 3475 pglen = MIN(PAGE_SIZE, /* Page length in bytes */ 3476 P2ROUNDUP(offset, PAGE_SIZE)-pgoff); 3477 3478 /* Page is beyond end of file */ 3479 if (pgoff >= offset) { 3480 unlock_page(pp); 3481 ZFS_EXIT(zfsvfs); 3482 return (0); 3483 } 3484 3485 /* Truncate page length to end of file */ 3486 if (pgoff + pglen > offset) 3487 pglen = offset - pgoff; 3488 3489 #if 0 3490 /* 3491 * FIXME: Allow mmap writes past its quota. The correct fix 3492 * is to register a page_mkwrite() handler to count the page 3493 * against its quota when it is about to be dirtied. 3494 */ 3495 if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, 3496 KUID_TO_SUID(ip->i_uid)) || 3497 zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, 3498 KGID_TO_SGID(ip->i_gid)) || 3499 (zp->z_projid != ZFS_DEFAULT_PROJID && 3500 zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT, 3501 zp->z_projid))) { 3502 err = EDQUOT; 3503 } 3504 #endif 3505 3506 /* 3507 * The ordering here is critical and must adhere to the following 3508 * rules in order to avoid deadlocking in either zfs_read() or 3509 * zfs_free_range() due to a lock inversion. 3510 * 3511 * 1) The page must be unlocked prior to acquiring the range lock. 3512 * This is critical because zfs_read() calls find_lock_page() 3513 * which may block on the page lock while holding the range lock. 3514 * 3515 * 2) Before setting or clearing write back on a page the range lock 3516 * must be held in order to prevent a lock inversion with the 3517 * zfs_free_range() function. 3518 * 3519 * This presents a problem because upon entering this function the 3520 * page lock is already held. To safely acquire the range lock the 3521 * page lock must be dropped. This creates a window where another 3522 * process could truncate, invalidate, dirty, or write out the page. 3523 * 3524 * Therefore, after successfully reacquiring the range and page locks 3525 * the current page state is checked. In the common case everything 3526 * will be as is expected and it can be written out. However, if 3527 * the page state has changed it must be handled accordingly. 3528 */ 3529 mapping = pp->mapping; 3530 redirty_page_for_writepage(wbc, pp); 3531 unlock_page(pp); 3532 3533 zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock, 3534 pgoff, pglen, RL_WRITER); 3535 lock_page(pp); 3536 3537 /* Page mapping changed or it was no longer dirty, we're done */ 3538 if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) { 3539 unlock_page(pp); 3540 zfs_rangelock_exit(lr); 3541 ZFS_EXIT(zfsvfs); 3542 return (0); 3543 } 3544 3545 /* Another process started write block if required */ 3546 if (PageWriteback(pp)) { 3547 unlock_page(pp); 3548 zfs_rangelock_exit(lr); 3549 3550 if (wbc->sync_mode != WB_SYNC_NONE) { 3551 if (PageWriteback(pp)) 3552 wait_on_page_bit(pp, PG_writeback); 3553 } 3554 3555 ZFS_EXIT(zfsvfs); 3556 return (0); 3557 } 3558 3559 /* Clear the dirty flag the required locks are held */ 3560 if (!clear_page_dirty_for_io(pp)) { 3561 unlock_page(pp); 3562 zfs_rangelock_exit(lr); 3563 ZFS_EXIT(zfsvfs); 3564 return (0); 3565 } 3566 3567 /* 3568 * Counterpart for redirty_page_for_writepage() above. This page 3569 * was in fact not skipped and should not be counted as if it were. 3570 */ 3571 wbc->pages_skipped--; 3572 set_page_writeback(pp); 3573 unlock_page(pp); 3574 3575 tx = dmu_tx_create(zfsvfs->z_os); 3576 dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen); 3577 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 3578 zfs_sa_upgrade_txholds(tx, zp); 3579 3580 err = dmu_tx_assign(tx, TXG_NOWAIT); 3581 if (err != 0) { 3582 if (err == ERESTART) 3583 dmu_tx_wait(tx); 3584 3585 dmu_tx_abort(tx); 3586 __set_page_dirty_nobuffers(pp); 3587 ClearPageError(pp); 3588 end_page_writeback(pp); 3589 zfs_rangelock_exit(lr); 3590 ZFS_EXIT(zfsvfs); 3591 return (err); 3592 } 3593 3594 va = kmap(pp); 3595 ASSERT3U(pglen, <=, PAGE_SIZE); 3596 dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx); 3597 kunmap(pp); 3598 3599 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); 3600 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); 3601 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zfsvfs), NULL, 3602 &zp->z_pflags, 8); 3603 3604 /* Preserve the mtime and ctime provided by the inode */ 3605 ZFS_TIME_ENCODE(&ip->i_mtime, mtime); 3606 ZFS_TIME_ENCODE(&ip->i_ctime, ctime); 3607 zp->z_atime_dirty = B_FALSE; 3608 zp->z_seq++; 3609 3610 err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx); 3611 3612 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0, 3613 zfs_putpage_commit_cb, pp); 3614 dmu_tx_commit(tx); 3615 3616 zfs_rangelock_exit(lr); 3617 3618 if (wbc->sync_mode != WB_SYNC_NONE) { 3619 /* 3620 * Note that this is rarely called under writepages(), because 3621 * writepages() normally handles the entire commit for 3622 * performance reasons. 3623 */ 3624 zil_commit(zfsvfs->z_log, zp->z_id); 3625 } 3626 3627 ZFS_EXIT(zfsvfs); 3628 return (err); 3629 } 3630 3631 /* 3632 * Update the system attributes when the inode has been dirtied. For the 3633 * moment we only update the mode, atime, mtime, and ctime. 3634 */ 3635 int 3636 zfs_dirty_inode(struct inode *ip, int flags) 3637 { 3638 znode_t *zp = ITOZ(ip); 3639 zfsvfs_t *zfsvfs = ITOZSB(ip); 3640 dmu_tx_t *tx; 3641 uint64_t mode, atime[2], mtime[2], ctime[2]; 3642 sa_bulk_attr_t bulk[4]; 3643 int error = 0; 3644 int cnt = 0; 3645 3646 if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os)) 3647 return (0); 3648 3649 ZFS_ENTER(zfsvfs); 3650 ZFS_VERIFY_ZP(zp); 3651 3652 #ifdef I_DIRTY_TIME 3653 /* 3654 * This is the lazytime semantic introduced in Linux 4.0 3655 * This flag will only be called from update_time when lazytime is set. 3656 * (Note, I_DIRTY_SYNC will also set if not lazytime) 3657 * Fortunately mtime and ctime are managed within ZFS itself, so we 3658 * only need to dirty atime. 3659 */ 3660 if (flags == I_DIRTY_TIME) { 3661 zp->z_atime_dirty = B_TRUE; 3662 goto out; 3663 } 3664 #endif 3665 3666 tx = dmu_tx_create(zfsvfs->z_os); 3667 3668 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 3669 zfs_sa_upgrade_txholds(tx, zp); 3670 3671 error = dmu_tx_assign(tx, TXG_WAIT); 3672 if (error) { 3673 dmu_tx_abort(tx); 3674 goto out; 3675 } 3676 3677 mutex_enter(&zp->z_lock); 3678 zp->z_atime_dirty = B_FALSE; 3679 3680 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8); 3681 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16); 3682 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); 3683 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); 3684 3685 /* Preserve the mode, mtime and ctime provided by the inode */ 3686 ZFS_TIME_ENCODE(&ip->i_atime, atime); 3687 ZFS_TIME_ENCODE(&ip->i_mtime, mtime); 3688 ZFS_TIME_ENCODE(&ip->i_ctime, ctime); 3689 mode = ip->i_mode; 3690 3691 zp->z_mode = mode; 3692 3693 error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx); 3694 mutex_exit(&zp->z_lock); 3695 3696 dmu_tx_commit(tx); 3697 out: 3698 ZFS_EXIT(zfsvfs); 3699 return (error); 3700 } 3701 3702 /*ARGSUSED*/ 3703 void 3704 zfs_inactive(struct inode *ip) 3705 { 3706 znode_t *zp = ITOZ(ip); 3707 zfsvfs_t *zfsvfs = ITOZSB(ip); 3708 uint64_t atime[2]; 3709 int error; 3710 int need_unlock = 0; 3711 3712 /* Only read lock if we haven't already write locked, e.g. rollback */ 3713 if (!RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)) { 3714 need_unlock = 1; 3715 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER); 3716 } 3717 if (zp->z_sa_hdl == NULL) { 3718 if (need_unlock) 3719 rw_exit(&zfsvfs->z_teardown_inactive_lock); 3720 return; 3721 } 3722 3723 if (zp->z_atime_dirty && zp->z_unlinked == B_FALSE) { 3724 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os); 3725 3726 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 3727 zfs_sa_upgrade_txholds(tx, zp); 3728 error = dmu_tx_assign(tx, TXG_WAIT); 3729 if (error) { 3730 dmu_tx_abort(tx); 3731 } else { 3732 ZFS_TIME_ENCODE(&ip->i_atime, atime); 3733 mutex_enter(&zp->z_lock); 3734 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs), 3735 (void *)&atime, sizeof (atime), tx); 3736 zp->z_atime_dirty = B_FALSE; 3737 mutex_exit(&zp->z_lock); 3738 dmu_tx_commit(tx); 3739 } 3740 } 3741 3742 zfs_zinactive(zp); 3743 if (need_unlock) 3744 rw_exit(&zfsvfs->z_teardown_inactive_lock); 3745 } 3746 3747 /* 3748 * Fill pages with data from the disk. 3749 */ 3750 static int 3751 zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages) 3752 { 3753 znode_t *zp = ITOZ(ip); 3754 zfsvfs_t *zfsvfs = ITOZSB(ip); 3755 objset_t *os; 3756 struct page *cur_pp; 3757 u_offset_t io_off, total; 3758 size_t io_len; 3759 loff_t i_size; 3760 unsigned page_idx; 3761 int err; 3762 3763 os = zfsvfs->z_os; 3764 io_len = nr_pages << PAGE_SHIFT; 3765 i_size = i_size_read(ip); 3766 io_off = page_offset(pl[0]); 3767 3768 if (io_off + io_len > i_size) 3769 io_len = i_size - io_off; 3770 3771 /* 3772 * Iterate over list of pages and read each page individually. 3773 */ 3774 page_idx = 0; 3775 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) { 3776 caddr_t va; 3777 3778 cur_pp = pl[page_idx++]; 3779 va = kmap(cur_pp); 3780 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va, 3781 DMU_READ_PREFETCH); 3782 kunmap(cur_pp); 3783 if (err) { 3784 /* convert checksum errors into IO errors */ 3785 if (err == ECKSUM) 3786 err = SET_ERROR(EIO); 3787 return (err); 3788 } 3789 } 3790 3791 return (0); 3792 } 3793 3794 /* 3795 * Uses zfs_fillpage to read data from the file and fill the pages. 3796 * 3797 * IN: ip - inode of file to get data from. 3798 * pl - list of pages to read 3799 * nr_pages - number of pages to read 3800 * 3801 * RETURN: 0 on success, error code on failure. 3802 * 3803 * Timestamps: 3804 * vp - atime updated 3805 */ 3806 /* ARGSUSED */ 3807 int 3808 zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages) 3809 { 3810 znode_t *zp = ITOZ(ip); 3811 zfsvfs_t *zfsvfs = ITOZSB(ip); 3812 int err; 3813 3814 if (pl == NULL) 3815 return (0); 3816 3817 ZFS_ENTER(zfsvfs); 3818 ZFS_VERIFY_ZP(zp); 3819 3820 err = zfs_fillpage(ip, pl, nr_pages); 3821 3822 ZFS_EXIT(zfsvfs); 3823 return (err); 3824 } 3825 3826 /* 3827 * Check ZFS specific permissions to memory map a section of a file. 3828 * 3829 * IN: ip - inode of the file to mmap 3830 * off - file offset 3831 * addrp - start address in memory region 3832 * len - length of memory region 3833 * vm_flags- address flags 3834 * 3835 * RETURN: 0 if success 3836 * error code if failure 3837 */ 3838 /*ARGSUSED*/ 3839 int 3840 zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len, 3841 unsigned long vm_flags) 3842 { 3843 znode_t *zp = ITOZ(ip); 3844 zfsvfs_t *zfsvfs = ITOZSB(ip); 3845 3846 ZFS_ENTER(zfsvfs); 3847 ZFS_VERIFY_ZP(zp); 3848 3849 if ((vm_flags & VM_WRITE) && (zp->z_pflags & 3850 (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) { 3851 ZFS_EXIT(zfsvfs); 3852 return (SET_ERROR(EPERM)); 3853 } 3854 3855 if ((vm_flags & (VM_READ | VM_EXEC)) && 3856 (zp->z_pflags & ZFS_AV_QUARANTINED)) { 3857 ZFS_EXIT(zfsvfs); 3858 return (SET_ERROR(EACCES)); 3859 } 3860 3861 if (off < 0 || len > MAXOFFSET_T - off) { 3862 ZFS_EXIT(zfsvfs); 3863 return (SET_ERROR(ENXIO)); 3864 } 3865 3866 ZFS_EXIT(zfsvfs); 3867 return (0); 3868 } 3869 3870 /* 3871 * Free or allocate space in a file. Currently, this function only 3872 * supports the `F_FREESP' command. However, this command is somewhat 3873 * misnamed, as its functionality includes the ability to allocate as 3874 * well as free space. 3875 * 3876 * IN: zp - znode of file to free data in. 3877 * cmd - action to take (only F_FREESP supported). 3878 * bfp - section of file to free/alloc. 3879 * flag - current file open mode flags. 3880 * offset - current file offset. 3881 * cr - credentials of caller. 3882 * 3883 * RETURN: 0 on success, error code on failure. 3884 * 3885 * Timestamps: 3886 * zp - ctime|mtime updated 3887 */ 3888 /* ARGSUSED */ 3889 int 3890 zfs_space(znode_t *zp, int cmd, flock64_t *bfp, int flag, 3891 offset_t offset, cred_t *cr) 3892 { 3893 zfsvfs_t *zfsvfs = ZTOZSB(zp); 3894 uint64_t off, len; 3895 int error; 3896 3897 ZFS_ENTER(zfsvfs); 3898 ZFS_VERIFY_ZP(zp); 3899 3900 if (cmd != F_FREESP) { 3901 ZFS_EXIT(zfsvfs); 3902 return (SET_ERROR(EINVAL)); 3903 } 3904 3905 /* 3906 * Callers might not be able to detect properly that we are read-only, 3907 * so check it explicitly here. 3908 */ 3909 if (zfs_is_readonly(zfsvfs)) { 3910 ZFS_EXIT(zfsvfs); 3911 return (SET_ERROR(EROFS)); 3912 } 3913 3914 if (bfp->l_len < 0) { 3915 ZFS_EXIT(zfsvfs); 3916 return (SET_ERROR(EINVAL)); 3917 } 3918 3919 /* 3920 * Permissions aren't checked on Solaris because on this OS 3921 * zfs_space() can only be called with an opened file handle. 3922 * On Linux we can get here through truncate_range() which 3923 * operates directly on inodes, so we need to check access rights. 3924 */ 3925 if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) { 3926 ZFS_EXIT(zfsvfs); 3927 return (error); 3928 } 3929 3930 off = bfp->l_start; 3931 len = bfp->l_len; /* 0 means from off to end of file */ 3932 3933 error = zfs_freesp(zp, off, len, flag, TRUE); 3934 3935 ZFS_EXIT(zfsvfs); 3936 return (error); 3937 } 3938 3939 /*ARGSUSED*/ 3940 int 3941 zfs_fid(struct inode *ip, fid_t *fidp) 3942 { 3943 znode_t *zp = ITOZ(ip); 3944 zfsvfs_t *zfsvfs = ITOZSB(ip); 3945 uint32_t gen; 3946 uint64_t gen64; 3947 uint64_t object = zp->z_id; 3948 zfid_short_t *zfid; 3949 int size, i, error; 3950 3951 ZFS_ENTER(zfsvfs); 3952 ZFS_VERIFY_ZP(zp); 3953 3954 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), 3955 &gen64, sizeof (uint64_t))) != 0) { 3956 ZFS_EXIT(zfsvfs); 3957 return (error); 3958 } 3959 3960 gen = (uint32_t)gen64; 3961 3962 size = SHORT_FID_LEN; 3963 3964 zfid = (zfid_short_t *)fidp; 3965 3966 zfid->zf_len = size; 3967 3968 for (i = 0; i < sizeof (zfid->zf_object); i++) 3969 zfid->zf_object[i] = (uint8_t)(object >> (8 * i)); 3970 3971 /* Must have a non-zero generation number to distinguish from .zfs */ 3972 if (gen == 0) 3973 gen = 1; 3974 for (i = 0; i < sizeof (zfid->zf_gen); i++) 3975 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i)); 3976 3977 ZFS_EXIT(zfsvfs); 3978 return (0); 3979 } 3980 3981 #if defined(_KERNEL) 3982 EXPORT_SYMBOL(zfs_open); 3983 EXPORT_SYMBOL(zfs_close); 3984 EXPORT_SYMBOL(zfs_lookup); 3985 EXPORT_SYMBOL(zfs_create); 3986 EXPORT_SYMBOL(zfs_tmpfile); 3987 EXPORT_SYMBOL(zfs_remove); 3988 EXPORT_SYMBOL(zfs_mkdir); 3989 EXPORT_SYMBOL(zfs_rmdir); 3990 EXPORT_SYMBOL(zfs_readdir); 3991 EXPORT_SYMBOL(zfs_getattr_fast); 3992 EXPORT_SYMBOL(zfs_setattr); 3993 EXPORT_SYMBOL(zfs_rename); 3994 EXPORT_SYMBOL(zfs_symlink); 3995 EXPORT_SYMBOL(zfs_readlink); 3996 EXPORT_SYMBOL(zfs_link); 3997 EXPORT_SYMBOL(zfs_inactive); 3998 EXPORT_SYMBOL(zfs_space); 3999 EXPORT_SYMBOL(zfs_fid); 4000 EXPORT_SYMBOL(zfs_getpage); 4001 EXPORT_SYMBOL(zfs_putpage); 4002 EXPORT_SYMBOL(zfs_dirty_inode); 4003 EXPORT_SYMBOL(zfs_map); 4004 4005 /* BEGIN CSTYLED */ 4006 module_param(zfs_delete_blocks, ulong, 0644); 4007 MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async"); 4008 /* END CSTYLED */ 4009 4010 #endif 4011