Lines Matching defs:zp
97 zfs_fsync(znode_t *zp, int syncflag, cred_t *cr)
100 zfsvfs_t *zfsvfs = ZTOZSB(zp);
103 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
105 atomic_inc_32(&zp->z_sync_writes_cnt);
106 zil_commit(zfsvfs->z_log, zp->z_id);
107 atomic_dec_32(&zp->z_sync_writes_cnt);
120 zfs_holey_common(znode_t *zp, ulong_t cmd, loff_t *off)
128 file_sz = zp->z_size;
139 if (zn_has_cached_data(zp, 0, file_sz - 1))
140 zn_flush_cached_data(zp, B_TRUE);
142 lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_READER);
143 error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
176 zfs_holey(znode_t *zp, ulong_t cmd, loff_t *off)
178 zfsvfs_t *zfsvfs = ZTOZSB(zp);
181 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
184 error = zfs_holey_common(zp, cmd, off);
192 zfs_access(znode_t *zp, int mode, int flag, cred_t *cr)
194 zfsvfs_t *zfsvfs = ZTOZSB(zp);
197 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
202 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr,
205 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr,
210 error = zfs_zaccess_rwx(zp, mode, flag, cr, zfs_init_idmap);
212 error = zfs_zaccess_rwx(zp, mode, flag, cr, NULL);
236 zfs_setup_direct(struct znode *zp, zfs_uio_t *uio, zfs_uio_rw_t rw,
239 zfsvfs_t *zfsvfs = ZTOZSB(zp);
245 zn_has_cached_data(zp, zfs_uio_offset(uio),
256 if ((rw == UIO_WRITE && zfs_uio_resid(uio) >= zp->z_blksz) ||
293 * IN: zp - inode of file to be read from.
308 zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
315 zfsvfs_t *zfsvfs = ZTOZSB(zp);
316 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
319 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
325 if (Z_ISDIR(ZTOTYPE(zp))) {
359 zil_commit(zfsvfs->z_log, zp->z_id);
364 zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
371 if (zfs_uio_offset(uio) >= zp->z_size) {
375 ASSERT(zfs_uio_offset(uio) < zp->z_size);
380 error = zfs_setup_direct(zp, uio, UIO_READ, &ioflag);
389 ssize_t n = MIN(zfs_uio_resid(uio), zp->z_size - zfs_uio_offset(uio));
422 error = mappedread_sf(zp, nbytes, uio);
425 if (zn_has_cached_data(zp, zfs_uio_offset(uio),
427 error = mappedread(zp, nbytes, uio);
429 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
481 if (zn_has_cached_data(zp, zfs_uio_offset(uio),
483 error = mappedread(zp, dio_remaining_resid, uio);
485 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl), uio,
510 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
516 zfs_clear_setid_bits_if_necessary(zfsvfs_t *zfsvfs, znode_t *zp, cred_t *cr,
520 const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
536 mutex_enter(&zp->z_acl_lock);
537 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) | (S_IXUSR >> 6))) != 0 &&
538 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
539 secpolicy_vnode_setid_retain(zp, cr,
540 ((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
543 zp->z_mode &= ~(S_ISUID | S_ISGID);
544 newmode = zp->z_mode;
545 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
548 mutex_exit(&zp->z_acl_lock);
559 va.va_nodeid = zp->z_id;
561 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, &va,
566 mutex_exit(&zp->z_acl_lock);
573 * IN: zp - znode of file to be written to.
589 zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
603 zfsvfs_t *zfsvfs = ZTOZSB(zp);
604 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
613 &zp->z_size, 8);
615 &zp->z_pflags, 8);
631 if ((zp->z_pflags & ZFS_IMMUTABLE) ||
632 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & O_APPEND) &&
633 (zfs_uio_offset(uio) < zp->z_size))) {
641 offset_t woff = ioflag & O_APPEND ? zp->z_size : zfs_uio_offset(uio);
650 error = zfs_setup_direct(zp, uio, UIO_WRITE, &ioflag);
675 lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
683 woff = zp->z_size;
698 lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
701 if (zn_rlimit_fsize_uio(zp, uio)) {
718 uint64_t end_size = MAX(zp->z_size, woff + n);
723 const uint64_t uid = KUID_TO_SUID(ZTOUID(zp));
724 const uint64_t gid = KGID_TO_SGID(ZTOGID(zp));
725 const uint64_t projid = zp->z_projid;
772 if (lr->lr_length == UINT64_MAX && zp->z_size <= zp->z_blksz) {
773 if (zp->z_blksz > zfsvfs->z_max_blksz &&
774 !ISP2(zp->z_blksz)) {
780 blksz = 1 << highbit64(zp->z_blksz);
786 blksz = MAX(blksz, zp->z_blksz);
788 blksz = zp->z_blksz;
793 if (n >= blksz && woff >= zp->z_size &&
804 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
830 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
831 dmu_buf_impl_t *db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
835 zfs_sa_upgrade_txholds(tx, zp);
856 zfs_grow_blocksize(zp, blksz, tx);
864 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
869 zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
889 zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
904 sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
911 zfs_clear_setid_bits_if_necessary(zfsvfs, zp,
944 zn_has_cached_data(zp, woff, woff + tx_bytes - 1)) {
945 update_pages(zp, woff, tx_bytes, zfsvfs->z_os);
953 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
954 (void *)&zp->z_size, sizeof (uint64_t), tx);
960 zfs_clear_setid_bits_if_necessary(zfsvfs, zp, cr,
963 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
969 while ((end_size = zp->z_size) < zfs_uio_offset(uio)) {
970 (void) atomic_cas_64(&zp->z_size, end_size,
980 zp->z_size = zfsvfs->z_replay_eof;
982 error1 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
992 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, commit,
1021 zfs_znode_update_vfs(zp);
1042 zil_commit(zilog, zp->z_id);
1052 zfs_getsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
1054 zfsvfs_t *zfsvfs = ZTOZSB(zp);
1058 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
1060 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
1067 zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, cred_t *cr)
1069 zfsvfs_t *zfsvfs = ZTOZSB(zp);
1074 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
1077 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
1101 znode_t *zp;
1115 if (zfs_zget(zfsvfs, object, &zp) != 0)
1117 if (zp->z_unlinked) {
1122 zfs_zrele_async(zp);
1126 if (sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), &zp_gen,
1128 zfs_zrele_async(zp);
1132 zfs_zrele_async(zp);
1138 zgd->zgd_private = zp;
1148 zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock, offset,
1151 if (offset >= zp->z_size) {
1168 size = zp->z_blksz;
1171 zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
1173 if (zp->z_blksz == size)
1179 if (lr->lr_offset >= zp->z_size)
1214 ASSERT3U(dbp->db_size, ==, zp->z_blksz);
1263 znode_t *zp = zgd->zgd_private;
1274 zfs_zrele_async(zp);
1743 zfs_clone_range_replay(znode_t *zp, uint64_t off, uint64_t len, uint64_t blksz,
1758 zfsvfs = ZTOZSB(zp);
1763 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
1777 &zp->z_size, 8);
1784 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1785 db = (dmu_buf_impl_t *)sa_get_db(zp->z_sa_hdl);
1789 zfs_sa_upgrade_txholds(tx, zp);
1797 if (zp->z_blksz < blksz)
1798 zfs_grow_blocksize(zp, blksz, tx);
1800 dmu_brt_clone(zfsvfs->z_os, zp->z_id, off, len, tx, bps, nbps);
1802 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
1804 if (zp->z_size < off + len)
1805 zp->z_size = off + len;
1807 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1817 zfs_znode_update_vfs(zp);