1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * CDDL HEADER START 3eda14cbcSMatt Macy * 4eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9271171e0SMartin Matuska * or https://opensource.org/licenses/CDDL-1.0. 10eda14cbcSMatt Macy * See the License for the specific language governing permissions 11eda14cbcSMatt Macy * and limitations under the License. 12eda14cbcSMatt Macy * 13eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18eda14cbcSMatt Macy * 19eda14cbcSMatt Macy * CDDL HEADER END 20eda14cbcSMatt Macy */ 21eda14cbcSMatt Macy /* 22eda14cbcSMatt Macy * Copyright (c) 2011, Lawrence Livermore National Security, LLC. 23eda14cbcSMatt Macy * Copyright (c) 2015 by Chunwei Chen. All rights reserved. 24eda14cbcSMatt Macy */ 25eda14cbcSMatt Macy 26eda14cbcSMatt Macy 27eda14cbcSMatt Macy #ifdef CONFIG_COMPAT 28eda14cbcSMatt Macy #include <linux/compat.h> 29eda14cbcSMatt Macy #endif 30c7046f76SMartin Matuska #include <linux/fs.h> 315c65a0a9SMartin Matuska #include <linux/migrate.h> 32eda14cbcSMatt Macy #include <sys/file.h> 33eda14cbcSMatt Macy #include <sys/dmu_objset.h> 34eda14cbcSMatt Macy #include <sys/zfs_znode.h> 35eda14cbcSMatt Macy #include <sys/zfs_vfsops.h> 36eda14cbcSMatt Macy #include <sys/zfs_vnops.h> 37eda14cbcSMatt Macy #include <sys/zfs_project.h> 38716fd348SMartin Matuska #if defined(HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS) || \ 39716fd348SMartin Matuska defined(HAVE_VFS_FILEMAP_DIRTY_FOLIO) 401f88aa09SMartin Matuska #include <linux/pagemap.h> 411f88aa09SMartin Matuska #endif 42c7046f76SMartin Matuska #include <linux/fadvise.h> 43716fd348SMartin Matuska #ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO 44716fd348SMartin Matuska #include <linux/writeback.h> 45716fd348SMartin Matuska #endif 46eda14cbcSMatt Macy 47eda14cbcSMatt Macy /* 48eda14cbcSMatt Macy * When using fallocate(2) to preallocate space, inflate the requested 49eda14cbcSMatt Macy * capacity check by 10% to account for the required metadata blocks. 50eda14cbcSMatt Macy */ 51e92ffd9bSMartin Matuska static unsigned int zfs_fallocate_reserve_percent = 110; 52eda14cbcSMatt Macy 53eda14cbcSMatt Macy static int 54eda14cbcSMatt Macy zpl_open(struct inode *ip, struct file *filp) 55eda14cbcSMatt Macy { 56eda14cbcSMatt Macy cred_t *cr = CRED(); 57eda14cbcSMatt Macy int error; 58eda14cbcSMatt Macy fstrans_cookie_t cookie; 59eda14cbcSMatt Macy 60eda14cbcSMatt Macy error = generic_file_open(ip, filp); 61eda14cbcSMatt Macy if (error) 62eda14cbcSMatt Macy return (error); 63eda14cbcSMatt Macy 64eda14cbcSMatt Macy crhold(cr); 65eda14cbcSMatt Macy cookie = spl_fstrans_mark(); 66eda14cbcSMatt Macy error = -zfs_open(ip, filp->f_mode, filp->f_flags, cr); 67eda14cbcSMatt Macy spl_fstrans_unmark(cookie); 68eda14cbcSMatt Macy crfree(cr); 69eda14cbcSMatt Macy ASSERT3S(error, <=, 0); 70eda14cbcSMatt Macy 71eda14cbcSMatt Macy return (error); 72eda14cbcSMatt Macy } 73eda14cbcSMatt Macy 74eda14cbcSMatt Macy static int 75eda14cbcSMatt Macy zpl_release(struct inode *ip, struct file *filp) 76eda14cbcSMatt Macy { 77eda14cbcSMatt Macy cred_t *cr = CRED(); 78eda14cbcSMatt Macy int error; 79eda14cbcSMatt Macy fstrans_cookie_t cookie; 80eda14cbcSMatt Macy 81eda14cbcSMatt Macy cookie = spl_fstrans_mark(); 82eda14cbcSMatt Macy if (ITOZ(ip)->z_atime_dirty) 83eda14cbcSMatt Macy zfs_mark_inode_dirty(ip); 84eda14cbcSMatt Macy 85eda14cbcSMatt Macy crhold(cr); 86eda14cbcSMatt Macy error = -zfs_close(ip, filp->f_flags, cr); 87eda14cbcSMatt Macy spl_fstrans_unmark(cookie); 88eda14cbcSMatt Macy crfree(cr); 89eda14cbcSMatt Macy ASSERT3S(error, <=, 0); 90eda14cbcSMatt Macy 91eda14cbcSMatt Macy return (error); 92eda14cbcSMatt Macy } 93eda14cbcSMatt Macy 94eda14cbcSMatt Macy static int 957a7741afSMartin Matuska zpl_iterate(struct file *filp, struct dir_context *ctx) 96eda14cbcSMatt Macy { 97eda14cbcSMatt Macy cred_t *cr = CRED(); 98eda14cbcSMatt Macy int error; 99eda14cbcSMatt Macy fstrans_cookie_t cookie; 100eda14cbcSMatt Macy 101eda14cbcSMatt Macy crhold(cr); 102eda14cbcSMatt Macy cookie = spl_fstrans_mark(); 103eda14cbcSMatt Macy error = -zfs_readdir(file_inode(filp), ctx, cr); 104eda14cbcSMatt Macy spl_fstrans_unmark(cookie); 105eda14cbcSMatt Macy crfree(cr); 106eda14cbcSMatt Macy ASSERT3S(error, <=, 0); 107eda14cbcSMatt Macy 108eda14cbcSMatt Macy return (error); 109eda14cbcSMatt Macy } 110eda14cbcSMatt Macy 111eda14cbcSMatt Macy static int 112eda14cbcSMatt Macy zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync) 113eda14cbcSMatt Macy { 114eda14cbcSMatt Macy struct inode *inode = filp->f_mapping->host; 115716fd348SMartin Matuska znode_t *zp = ITOZ(inode); 116716fd348SMartin Matuska zfsvfs_t *zfsvfs = ITOZSB(inode); 117eda14cbcSMatt Macy cred_t *cr = CRED(); 118eda14cbcSMatt Macy int error; 119eda14cbcSMatt Macy fstrans_cookie_t cookie; 120eda14cbcSMatt Macy 121716fd348SMartin Matuska /* 122716fd348SMartin Matuska * The variables z_sync_writes_cnt and z_async_writes_cnt work in 123716fd348SMartin Matuska * tandem so that sync writes can detect if there are any non-sync 124716fd348SMartin Matuska * writes going on and vice-versa. The "vice-versa" part to this logic 125716fd348SMartin Matuska * is located in zfs_putpage() where non-sync writes check if there are 126716fd348SMartin Matuska * any ongoing sync writes. If any sync and non-sync writes overlap, 127716fd348SMartin Matuska * we do a commit to complete the non-sync writes since the latter can 128716fd348SMartin Matuska * potentially take several seconds to complete and thus block sync 129716fd348SMartin Matuska * writes in the upcoming call to filemap_write_and_wait_range(). 130716fd348SMartin Matuska */ 131716fd348SMartin Matuska atomic_inc_32(&zp->z_sync_writes_cnt); 132716fd348SMartin Matuska /* 133716fd348SMartin Matuska * If the following check does not detect an overlapping non-sync write 134716fd348SMartin Matuska * (say because it's just about to start), then it is guaranteed that 135716fd348SMartin Matuska * the non-sync write will detect this sync write. This is because we 136716fd348SMartin Matuska * always increment z_sync_writes_cnt / z_async_writes_cnt before doing 137716fd348SMartin Matuska * the check on z_async_writes_cnt / z_sync_writes_cnt here and in 138716fd348SMartin Matuska * zfs_putpage() respectively. 139716fd348SMartin Matuska */ 140716fd348SMartin Matuska if (atomic_load_32(&zp->z_async_writes_cnt) > 0) { 141c7046f76SMartin Matuska if ((error = zpl_enter(zfsvfs, FTAG)) != 0) { 142c7046f76SMartin Matuska atomic_dec_32(&zp->z_sync_writes_cnt); 143c7046f76SMartin Matuska return (error); 144c7046f76SMartin Matuska } 145716fd348SMartin Matuska zil_commit(zfsvfs->z_log, zp->z_id); 146c7046f76SMartin Matuska zpl_exit(zfsvfs, FTAG); 147716fd348SMartin Matuska } 148716fd348SMartin Matuska 149eda14cbcSMatt Macy error = filemap_write_and_wait_range(inode->i_mapping, start, end); 150716fd348SMartin Matuska 151716fd348SMartin Matuska /* 152716fd348SMartin Matuska * The sync write is not complete yet but we decrement 153716fd348SMartin Matuska * z_sync_writes_cnt since zfs_fsync() increments and decrements 154716fd348SMartin Matuska * it internally. If a non-sync write starts just after the decrement 155716fd348SMartin Matuska * operation but before we call zfs_fsync(), it may not detect this 156716fd348SMartin Matuska * overlapping sync write but it does not matter since we have already 157716fd348SMartin Matuska * gone past filemap_write_and_wait_range() and we won't block due to 158716fd348SMartin Matuska * the non-sync write. 159716fd348SMartin Matuska */ 160716fd348SMartin Matuska atomic_dec_32(&zp->z_sync_writes_cnt); 161716fd348SMartin Matuska 162eda14cbcSMatt Macy if (error) 163eda14cbcSMatt Macy return (error); 164eda14cbcSMatt Macy 165eda14cbcSMatt Macy crhold(cr); 166eda14cbcSMatt Macy cookie = spl_fstrans_mark(); 167716fd348SMartin Matuska error = -zfs_fsync(zp, datasync, cr); 168eda14cbcSMatt Macy spl_fstrans_unmark(cookie); 169eda14cbcSMatt Macy crfree(cr); 170eda14cbcSMatt Macy ASSERT3S(error, <=, 0); 171eda14cbcSMatt Macy 172eda14cbcSMatt Macy return (error); 173eda14cbcSMatt Macy } 174eda14cbcSMatt Macy 175eda14cbcSMatt Macy static inline int 176eda14cbcSMatt Macy zfs_io_flags(struct kiocb *kiocb) 177eda14cbcSMatt Macy { 178eda14cbcSMatt Macy int flags = 0; 179eda14cbcSMatt Macy 180eda14cbcSMatt Macy #if defined(IOCB_DSYNC) 181eda14cbcSMatt Macy if (kiocb->ki_flags & IOCB_DSYNC) 182eda14cbcSMatt Macy flags |= O_DSYNC; 183eda14cbcSMatt Macy #endif 184eda14cbcSMatt Macy #if defined(IOCB_SYNC) 185eda14cbcSMatt Macy if (kiocb->ki_flags & IOCB_SYNC) 186eda14cbcSMatt Macy flags |= O_SYNC; 187eda14cbcSMatt Macy #endif 188eda14cbcSMatt Macy #if defined(IOCB_APPEND) 189eda14cbcSMatt Macy if (kiocb->ki_flags & IOCB_APPEND) 190eda14cbcSMatt Macy flags |= O_APPEND; 191eda14cbcSMatt Macy #endif 192eda14cbcSMatt Macy #if defined(IOCB_DIRECT) 193eda14cbcSMatt Macy if (kiocb->ki_flags & IOCB_DIRECT) 194eda14cbcSMatt Macy flags |= O_DIRECT; 195eda14cbcSMatt Macy #endif 196eda14cbcSMatt Macy return (flags); 197eda14cbcSMatt Macy } 198eda14cbcSMatt Macy 199eda14cbcSMatt Macy /* 2007877fdebSMatt Macy * If relatime is enabled, call file_accessed() if zfs_relatime_need_update() 2017877fdebSMatt Macy * is true. This is needed since datasets with inherited "relatime" property 2027877fdebSMatt Macy * aren't necessarily mounted with the MNT_RELATIME flag (e.g. after 2037877fdebSMatt Macy * `zfs set relatime=...`), which is what relatime test in VFS by 2047877fdebSMatt Macy * relatime_need_update() is based on. 205eda14cbcSMatt Macy */ 2067877fdebSMatt Macy static inline void 2077877fdebSMatt Macy zpl_file_accessed(struct file *filp) 2087877fdebSMatt Macy { 2097877fdebSMatt Macy struct inode *ip = filp->f_mapping->host; 2107877fdebSMatt Macy 2117877fdebSMatt Macy if (!IS_NOATIME(ip) && ITOZSB(ip)->z_relatime) { 212eda14cbcSMatt Macy if (zfs_relatime_need_update(ip)) 213eda14cbcSMatt Macy file_accessed(filp); 214eda14cbcSMatt Macy } else { 215eda14cbcSMatt Macy file_accessed(filp); 216eda14cbcSMatt Macy } 2177877fdebSMatt Macy } 2187877fdebSMatt Macy 2197877fdebSMatt Macy static ssize_t 2207877fdebSMatt Macy zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to) 2217877fdebSMatt Macy { 2227877fdebSMatt Macy cred_t *cr = CRED(); 2237877fdebSMatt Macy fstrans_cookie_t cookie; 2247877fdebSMatt Macy struct file *filp = kiocb->ki_filp; 2257877fdebSMatt Macy ssize_t count = iov_iter_count(to); 226184c1b94SMartin Matuska zfs_uio_t uio; 2277877fdebSMatt Macy 228*dd215568SMartin Matuska zfs_uio_iov_iter_init(&uio, to, kiocb->ki_pos, count, 0); 2297877fdebSMatt Macy 2307877fdebSMatt Macy crhold(cr); 2317877fdebSMatt Macy cookie = spl_fstrans_mark(); 2327877fdebSMatt Macy 2337a7741afSMartin Matuska ssize_t ret = -zfs_read(ITOZ(filp->f_mapping->host), &uio, 2347877fdebSMatt Macy filp->f_flags | zfs_io_flags(kiocb), cr); 2357877fdebSMatt Macy 2367877fdebSMatt Macy spl_fstrans_unmark(cookie); 2377877fdebSMatt Macy crfree(cr); 2387877fdebSMatt Macy 2397a7741afSMartin Matuska if (ret < 0) 2407a7741afSMartin Matuska return (ret); 2417877fdebSMatt Macy 2427877fdebSMatt Macy ssize_t read = count - uio.uio_resid; 2437877fdebSMatt Macy kiocb->ki_pos += read; 2447877fdebSMatt Macy 2457877fdebSMatt Macy zpl_file_accessed(filp); 246eda14cbcSMatt Macy 247eda14cbcSMatt Macy return (read); 248eda14cbcSMatt Macy } 249eda14cbcSMatt Macy 2507877fdebSMatt Macy static inline ssize_t 2517877fdebSMatt Macy zpl_generic_write_checks(struct kiocb *kiocb, struct iov_iter *from, 2527877fdebSMatt Macy size_t *countp) 253eda14cbcSMatt Macy { 2547877fdebSMatt Macy ssize_t ret = generic_write_checks(kiocb, from); 2557877fdebSMatt Macy if (ret <= 0) 256eda14cbcSMatt Macy return (ret); 257eda14cbcSMatt Macy 2587877fdebSMatt Macy *countp = ret; 259eda14cbcSMatt Macy 2607877fdebSMatt Macy return (0); 261eda14cbcSMatt Macy } 262eda14cbcSMatt Macy 263eda14cbcSMatt Macy static ssize_t 264eda14cbcSMatt Macy zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from) 265eda14cbcSMatt Macy { 2667877fdebSMatt Macy cred_t *cr = CRED(); 2677877fdebSMatt Macy fstrans_cookie_t cookie; 2687877fdebSMatt Macy struct file *filp = kiocb->ki_filp; 2697877fdebSMatt Macy struct inode *ip = filp->f_mapping->host; 270184c1b94SMartin Matuska zfs_uio_t uio; 2717877fdebSMatt Macy size_t count = 0; 272eda14cbcSMatt Macy ssize_t ret; 273eda14cbcSMatt Macy 2747877fdebSMatt Macy ret = zpl_generic_write_checks(kiocb, from, &count); 275eda14cbcSMatt Macy if (ret) 276eda14cbcSMatt Macy return (ret); 277eda14cbcSMatt Macy 278*dd215568SMartin Matuska zfs_uio_iov_iter_init(&uio, from, kiocb->ki_pos, count, 279*dd215568SMartin Matuska from->iov_offset); 280eda14cbcSMatt Macy 2817877fdebSMatt Macy crhold(cr); 2827877fdebSMatt Macy cookie = spl_fstrans_mark(); 283eda14cbcSMatt Macy 2847a7741afSMartin Matuska ret = -zfs_write(ITOZ(ip), &uio, 2857877fdebSMatt Macy filp->f_flags | zfs_io_flags(kiocb), cr); 2867877fdebSMatt Macy 2877877fdebSMatt Macy spl_fstrans_unmark(cookie); 2887877fdebSMatt Macy crfree(cr); 2897877fdebSMatt Macy 2907a7741afSMartin Matuska if (ret < 0) 2917a7741afSMartin Matuska return (ret); 2927877fdebSMatt Macy 2937877fdebSMatt Macy ssize_t wrote = count - uio.uio_resid; 2947877fdebSMatt Macy kiocb->ki_pos += wrote; 2957877fdebSMatt Macy 2967877fdebSMatt Macy return (wrote); 297eda14cbcSMatt Macy } 2987877fdebSMatt Macy 299eda14cbcSMatt Macy static ssize_t 300*dd215568SMartin Matuska zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter) 301eda14cbcSMatt Macy { 3027a7741afSMartin Matuska /* 3037a7741afSMartin Matuska * All O_DIRECT requests should be handled by 304*dd215568SMartin Matuska * zpl_iter_write/read}(). There is no way kernel generic code should 305*dd215568SMartin Matuska * call the direct_IO address_space_operations function. We set this 306*dd215568SMartin Matuska * code path to be fatal if it is executed. 3077a7741afSMartin Matuska */ 3087a7741afSMartin Matuska PANIC(0); 3097a7741afSMartin Matuska return (0); 3107877fdebSMatt Macy } 3117877fdebSMatt Macy 312eda14cbcSMatt Macy static loff_t 313eda14cbcSMatt Macy zpl_llseek(struct file *filp, loff_t offset, int whence) 314eda14cbcSMatt Macy { 315eda14cbcSMatt Macy #if defined(SEEK_HOLE) && defined(SEEK_DATA) 316eda14cbcSMatt Macy fstrans_cookie_t cookie; 317eda14cbcSMatt Macy 318eda14cbcSMatt Macy if (whence == SEEK_DATA || whence == SEEK_HOLE) { 319eda14cbcSMatt Macy struct inode *ip = filp->f_mapping->host; 320eda14cbcSMatt Macy loff_t maxbytes = ip->i_sb->s_maxbytes; 321eda14cbcSMatt Macy loff_t error; 322eda14cbcSMatt Macy 323eda14cbcSMatt Macy spl_inode_lock_shared(ip); 324eda14cbcSMatt Macy cookie = spl_fstrans_mark(); 3257877fdebSMatt Macy error = -zfs_holey(ITOZ(ip), whence, &offset); 326eda14cbcSMatt Macy spl_fstrans_unmark(cookie); 327eda14cbcSMatt Macy if (error == 0) 328eda14cbcSMatt Macy error = lseek_execute(filp, ip, offset, maxbytes); 329eda14cbcSMatt Macy spl_inode_unlock_shared(ip); 330eda14cbcSMatt Macy 331eda14cbcSMatt Macy return (error); 332eda14cbcSMatt Macy } 333eda14cbcSMatt Macy #endif /* SEEK_HOLE && SEEK_DATA */ 334eda14cbcSMatt Macy 335eda14cbcSMatt Macy return (generic_file_llseek(filp, offset, whence)); 336eda14cbcSMatt Macy } 337eda14cbcSMatt Macy 338eda14cbcSMatt Macy /* 339eda14cbcSMatt Macy * It's worth taking a moment to describe how mmap is implemented 340eda14cbcSMatt Macy * for zfs because it differs considerably from other Linux filesystems. 341eda14cbcSMatt Macy * However, this issue is handled the same way under OpenSolaris. 342eda14cbcSMatt Macy * 343eda14cbcSMatt Macy * The issue is that by design zfs bypasses the Linux page cache and 344eda14cbcSMatt Macy * leaves all caching up to the ARC. This has been shown to work 345eda14cbcSMatt Macy * well for the common read(2)/write(2) case. However, mmap(2) 346eda14cbcSMatt Macy * is problem because it relies on being tightly integrated with the 347eda14cbcSMatt Macy * page cache. To handle this we cache mmap'ed files twice, once in 348eda14cbcSMatt Macy * the ARC and a second time in the page cache. The code is careful 349eda14cbcSMatt Macy * to keep both copies synchronized. 350eda14cbcSMatt Macy * 351eda14cbcSMatt Macy * When a file with an mmap'ed region is written to using write(2) 352eda14cbcSMatt Macy * both the data in the ARC and existing pages in the page cache 353eda14cbcSMatt Macy * are updated. For a read(2) data will be read first from the page 354eda14cbcSMatt Macy * cache then the ARC if needed. Neither a write(2) or read(2) will 355eda14cbcSMatt Macy * will ever result in new pages being added to the page cache. 356eda14cbcSMatt Macy * 357eda14cbcSMatt Macy * New pages are added to the page cache only via .readpage() which 358eda14cbcSMatt Macy * is called when the vfs needs to read a page off disk to back the 359eda14cbcSMatt Macy * virtual memory region. These pages may be modified without 360eda14cbcSMatt Macy * notifying the ARC and will be written out periodically via 361eda14cbcSMatt Macy * .writepage(). This will occur due to either a sync or the usual 362eda14cbcSMatt Macy * page aging behavior. Note because a read(2) of a mmap'ed file 363eda14cbcSMatt Macy * will always check the page cache first even when the ARC is out 364eda14cbcSMatt Macy * of date correct data will still be returned. 365eda14cbcSMatt Macy * 366eda14cbcSMatt Macy * While this implementation ensures correct behavior it does have 367eda14cbcSMatt Macy * have some drawbacks. The most obvious of which is that it 368eda14cbcSMatt Macy * increases the required memory footprint when access mmap'ed 369eda14cbcSMatt Macy * files. It also adds additional complexity to the code keeping 370eda14cbcSMatt Macy * both caches synchronized. 371eda14cbcSMatt Macy * 372eda14cbcSMatt Macy * Longer term it may be possible to cleanly resolve this wart by 373eda14cbcSMatt Macy * mapping page cache pages directly on to the ARC buffers. The 374eda14cbcSMatt Macy * Linux address space operations are flexible enough to allow 375eda14cbcSMatt Macy * selection of which pages back a particular index. The trick 376eda14cbcSMatt Macy * would be working out the details of which subsystem is in 377eda14cbcSMatt Macy * charge, the ARC, the page cache, or both. It may also prove 378eda14cbcSMatt Macy * helpful to move the ARC buffers to a scatter-gather lists 379eda14cbcSMatt Macy * rather than a vmalloc'ed region. 380eda14cbcSMatt Macy */ 381eda14cbcSMatt Macy static int 382eda14cbcSMatt Macy zpl_mmap(struct file *filp, struct vm_area_struct *vma) 383eda14cbcSMatt Macy { 384eda14cbcSMatt Macy struct inode *ip = filp->f_mapping->host; 385eda14cbcSMatt Macy int error; 386eda14cbcSMatt Macy fstrans_cookie_t cookie; 387eda14cbcSMatt Macy 388eda14cbcSMatt Macy cookie = spl_fstrans_mark(); 389eda14cbcSMatt Macy error = -zfs_map(ip, vma->vm_pgoff, (caddr_t *)vma->vm_start, 390eda14cbcSMatt Macy (size_t)(vma->vm_end - vma->vm_start), vma->vm_flags); 391eda14cbcSMatt Macy spl_fstrans_unmark(cookie); 3927a7741afSMartin Matuska 393eda14cbcSMatt Macy if (error) 394eda14cbcSMatt Macy return (error); 395eda14cbcSMatt Macy 396eda14cbcSMatt Macy error = generic_file_mmap(filp, vma); 397eda14cbcSMatt Macy if (error) 398eda14cbcSMatt Macy return (error); 399eda14cbcSMatt Macy 400eda14cbcSMatt Macy return (error); 401eda14cbcSMatt Macy } 402eda14cbcSMatt Macy 403eda14cbcSMatt Macy /* 404eda14cbcSMatt Macy * Populate a page with data for the Linux page cache. This function is 405eda14cbcSMatt Macy * only used to support mmap(2). There will be an identical copy of the 406eda14cbcSMatt Macy * data in the ARC which is kept up to date via .write() and .writepage(). 407eda14cbcSMatt Macy */ 4083f9d360cSMartin Matuska static inline int 4093f9d360cSMartin Matuska zpl_readpage_common(struct page *pp) 410eda14cbcSMatt Macy { 411eda14cbcSMatt Macy fstrans_cookie_t cookie; 412eda14cbcSMatt Macy 413eda14cbcSMatt Macy ASSERT(PageLocked(pp)); 414eda14cbcSMatt Macy 415eda14cbcSMatt Macy cookie = spl_fstrans_mark(); 4162a58b312SMartin Matuska int error = -zfs_getpage(pp->mapping->host, pp); 417eda14cbcSMatt Macy spl_fstrans_unmark(cookie); 418eda14cbcSMatt Macy 419eda14cbcSMatt Macy unlock_page(pp); 4202a58b312SMartin Matuska 421eda14cbcSMatt Macy return (error); 422eda14cbcSMatt Macy } 423eda14cbcSMatt Macy 424e3aa18adSMartin Matuska #ifdef HAVE_VFS_READ_FOLIO 425e3aa18adSMartin Matuska static int 426e3aa18adSMartin Matuska zpl_read_folio(struct file *filp, struct folio *folio) 427e3aa18adSMartin Matuska { 428e3aa18adSMartin Matuska return (zpl_readpage_common(&folio->page)); 429e3aa18adSMartin Matuska } 430e3aa18adSMartin Matuska #else 4313f9d360cSMartin Matuska static int 4323f9d360cSMartin Matuska zpl_readpage(struct file *filp, struct page *pp) 4333f9d360cSMartin Matuska { 4343f9d360cSMartin Matuska return (zpl_readpage_common(pp)); 4353f9d360cSMartin Matuska } 436e3aa18adSMartin Matuska #endif 4373f9d360cSMartin Matuska 4383f9d360cSMartin Matuska static int 4393f9d360cSMartin Matuska zpl_readpage_filler(void *data, struct page *pp) 4403f9d360cSMartin Matuska { 4413f9d360cSMartin Matuska return (zpl_readpage_common(pp)); 4423f9d360cSMartin Matuska } 4433f9d360cSMartin Matuska 444eda14cbcSMatt Macy /* 445eda14cbcSMatt Macy * Populate a set of pages with data for the Linux page cache. This 446eda14cbcSMatt Macy * function will only be called for read ahead and never for demand 447eda14cbcSMatt Macy * paging. For simplicity, the code relies on read_cache_pages() to 448eda14cbcSMatt Macy * correctly lock each page for IO and call zpl_readpage(). 449eda14cbcSMatt Macy */ 450716fd348SMartin Matuska #ifdef HAVE_VFS_READPAGES 451eda14cbcSMatt Macy static int 452eda14cbcSMatt Macy zpl_readpages(struct file *filp, struct address_space *mapping, 453eda14cbcSMatt Macy struct list_head *pages, unsigned nr_pages) 454eda14cbcSMatt Macy { 4553f9d360cSMartin Matuska return (read_cache_pages(mapping, pages, zpl_readpage_filler, NULL)); 456eda14cbcSMatt Macy } 457716fd348SMartin Matuska #else 458716fd348SMartin Matuska static void 459716fd348SMartin Matuska zpl_readahead(struct readahead_control *ractl) 460716fd348SMartin Matuska { 461716fd348SMartin Matuska struct page *page; 462716fd348SMartin Matuska 463716fd348SMartin Matuska while ((page = readahead_page(ractl)) != NULL) { 464716fd348SMartin Matuska int ret; 465716fd348SMartin Matuska 466716fd348SMartin Matuska ret = zpl_readpage_filler(NULL, page); 467716fd348SMartin Matuska put_page(page); 468716fd348SMartin Matuska if (ret) 469716fd348SMartin Matuska break; 470716fd348SMartin Matuska } 471716fd348SMartin Matuska } 472716fd348SMartin Matuska #endif 473eda14cbcSMatt Macy 474eda14cbcSMatt Macy static int 475eda14cbcSMatt Macy zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data) 476eda14cbcSMatt Macy { 477716fd348SMartin Matuska boolean_t *for_sync = data; 478eda14cbcSMatt Macy fstrans_cookie_t cookie; 479783d3ff6SMartin Matuska int ret; 480eda14cbcSMatt Macy 481eda14cbcSMatt Macy ASSERT(PageLocked(pp)); 482eda14cbcSMatt Macy ASSERT(!PageWriteback(pp)); 483eda14cbcSMatt Macy 484eda14cbcSMatt Macy cookie = spl_fstrans_mark(); 485783d3ff6SMartin Matuska ret = zfs_putpage(pp->mapping->host, pp, wbc, *for_sync); 486eda14cbcSMatt Macy spl_fstrans_unmark(cookie); 487eda14cbcSMatt Macy 488783d3ff6SMartin Matuska return (ret); 489eda14cbcSMatt Macy } 490eda14cbcSMatt Macy 491d411c1d6SMartin Matuska #ifdef HAVE_WRITEPAGE_T_FOLIO 492d411c1d6SMartin Matuska static int 493d411c1d6SMartin Matuska zpl_putfolio(struct folio *pp, struct writeback_control *wbc, void *data) 494d411c1d6SMartin Matuska { 495783d3ff6SMartin Matuska return (zpl_putpage(&pp->page, wbc, data)); 496d411c1d6SMartin Matuska } 497d411c1d6SMartin Matuska #endif 498d411c1d6SMartin Matuska 499d411c1d6SMartin Matuska static inline int 500d411c1d6SMartin Matuska zpl_write_cache_pages(struct address_space *mapping, 501d411c1d6SMartin Matuska struct writeback_control *wbc, void *data) 502d411c1d6SMartin Matuska { 503d411c1d6SMartin Matuska int result; 504d411c1d6SMartin Matuska 505d411c1d6SMartin Matuska #ifdef HAVE_WRITEPAGE_T_FOLIO 506d411c1d6SMartin Matuska result = write_cache_pages(mapping, wbc, zpl_putfolio, data); 507d411c1d6SMartin Matuska #else 508d411c1d6SMartin Matuska result = write_cache_pages(mapping, wbc, zpl_putpage, data); 509d411c1d6SMartin Matuska #endif 510d411c1d6SMartin Matuska return (result); 511d411c1d6SMartin Matuska } 512d411c1d6SMartin Matuska 513eda14cbcSMatt Macy static int 514eda14cbcSMatt Macy zpl_writepages(struct address_space *mapping, struct writeback_control *wbc) 515eda14cbcSMatt Macy { 516eda14cbcSMatt Macy znode_t *zp = ITOZ(mapping->host); 517eda14cbcSMatt Macy zfsvfs_t *zfsvfs = ITOZSB(mapping->host); 518eda14cbcSMatt Macy enum writeback_sync_modes sync_mode; 519eda14cbcSMatt Macy int result; 520eda14cbcSMatt Macy 521c7046f76SMartin Matuska if ((result = zpl_enter(zfsvfs, FTAG)) != 0) 522c7046f76SMartin Matuska return (result); 523eda14cbcSMatt Macy if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 524eda14cbcSMatt Macy wbc->sync_mode = WB_SYNC_ALL; 525c7046f76SMartin Matuska zpl_exit(zfsvfs, FTAG); 526eda14cbcSMatt Macy sync_mode = wbc->sync_mode; 527eda14cbcSMatt Macy 528eda14cbcSMatt Macy /* 529eda14cbcSMatt Macy * We don't want to run write_cache_pages() in SYNC mode here, because 530eda14cbcSMatt Macy * that would make putpage() wait for a single page to be committed to 531eda14cbcSMatt Macy * disk every single time, resulting in atrocious performance. Instead 532eda14cbcSMatt Macy * we run it once in non-SYNC mode so that the ZIL gets all the data, 533eda14cbcSMatt Macy * and then we commit it all in one go. 534eda14cbcSMatt Macy */ 535716fd348SMartin Matuska boolean_t for_sync = (sync_mode == WB_SYNC_ALL); 536eda14cbcSMatt Macy wbc->sync_mode = WB_SYNC_NONE; 537d411c1d6SMartin Matuska result = zpl_write_cache_pages(mapping, wbc, &for_sync); 538eda14cbcSMatt Macy if (sync_mode != wbc->sync_mode) { 539c7046f76SMartin Matuska if ((result = zpl_enter_verify_zp(zfsvfs, zp, FTAG)) != 0) 540c7046f76SMartin Matuska return (result); 541eda14cbcSMatt Macy if (zfsvfs->z_log != NULL) 542eda14cbcSMatt Macy zil_commit(zfsvfs->z_log, zp->z_id); 543c7046f76SMartin Matuska zpl_exit(zfsvfs, FTAG); 544eda14cbcSMatt Macy 545eda14cbcSMatt Macy /* 546eda14cbcSMatt Macy * We need to call write_cache_pages() again (we can't just 547eda14cbcSMatt Macy * return after the commit) because the previous call in 548eda14cbcSMatt Macy * non-SYNC mode does not guarantee that we got all the dirty 549eda14cbcSMatt Macy * pages (see the implementation of write_cache_pages() for 550eda14cbcSMatt Macy * details). That being said, this is a no-op in most cases. 551eda14cbcSMatt Macy */ 552eda14cbcSMatt Macy wbc->sync_mode = sync_mode; 553d411c1d6SMartin Matuska result = zpl_write_cache_pages(mapping, wbc, &for_sync); 554eda14cbcSMatt Macy } 555eda14cbcSMatt Macy return (result); 556eda14cbcSMatt Macy } 557eda14cbcSMatt Macy 558eda14cbcSMatt Macy /* 559eda14cbcSMatt Macy * Write out dirty pages to the ARC, this function is only required to 560eda14cbcSMatt Macy * support mmap(2). Mapped pages may be dirtied by memory operations 561eda14cbcSMatt Macy * which never call .write(). These dirty pages are kept in sync with 562eda14cbcSMatt Macy * the ARC buffers via this hook. 563eda14cbcSMatt Macy */ 564eda14cbcSMatt Macy static int 565eda14cbcSMatt Macy zpl_writepage(struct page *pp, struct writeback_control *wbc) 566eda14cbcSMatt Macy { 567eda14cbcSMatt Macy if (ITOZSB(pp->mapping->host)->z_os->os_sync == ZFS_SYNC_ALWAYS) 568eda14cbcSMatt Macy wbc->sync_mode = WB_SYNC_ALL; 569eda14cbcSMatt Macy 570716fd348SMartin Matuska boolean_t for_sync = (wbc->sync_mode == WB_SYNC_ALL); 571716fd348SMartin Matuska 572716fd348SMartin Matuska return (zpl_putpage(pp, wbc, &for_sync)); 573eda14cbcSMatt Macy } 574eda14cbcSMatt Macy 575eda14cbcSMatt Macy /* 576eda14cbcSMatt Macy * The flag combination which matches the behavior of zfs_space() is 577eda14cbcSMatt Macy * FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE. The FALLOC_FL_PUNCH_HOLE 578eda14cbcSMatt Macy * flag was introduced in the 2.6.38 kernel. 579eda14cbcSMatt Macy * 580eda14cbcSMatt Macy * The original mode=0 (allocate space) behavior can be reasonably emulated 581eda14cbcSMatt Macy * by checking if enough space exists and creating a sparse file, as real 582eda14cbcSMatt Macy * persistent space reservation is not possible due to COW, snapshots, etc. 583eda14cbcSMatt Macy */ 584eda14cbcSMatt Macy static long 585eda14cbcSMatt Macy zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len) 586eda14cbcSMatt Macy { 587eda14cbcSMatt Macy cred_t *cr = CRED(); 588eda14cbcSMatt Macy loff_t olen; 589eda14cbcSMatt Macy fstrans_cookie_t cookie; 590eda14cbcSMatt Macy int error = 0; 591eda14cbcSMatt Macy 5927a7741afSMartin Matuska int test_mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE; 593c03c5b1cSMartin Matuska 594c03c5b1cSMartin Matuska if ((mode & ~(FALLOC_FL_KEEP_SIZE | test_mode)) != 0) 595eda14cbcSMatt Macy return (-EOPNOTSUPP); 596eda14cbcSMatt Macy 597eda14cbcSMatt Macy if (offset < 0 || len <= 0) 598eda14cbcSMatt Macy return (-EINVAL); 599eda14cbcSMatt Macy 600eda14cbcSMatt Macy spl_inode_lock(ip); 601eda14cbcSMatt Macy olen = i_size_read(ip); 602eda14cbcSMatt Macy 603eda14cbcSMatt Macy crhold(cr); 604eda14cbcSMatt Macy cookie = spl_fstrans_mark(); 605c03c5b1cSMartin Matuska if (mode & (test_mode)) { 606eda14cbcSMatt Macy flock64_t bf; 607eda14cbcSMatt Macy 608716fd348SMartin Matuska if (mode & FALLOC_FL_KEEP_SIZE) { 609eda14cbcSMatt Macy if (offset > olen) 610eda14cbcSMatt Macy goto out_unmark; 611eda14cbcSMatt Macy 612eda14cbcSMatt Macy if (offset + len > olen) 613eda14cbcSMatt Macy len = olen - offset; 614716fd348SMartin Matuska } 615eda14cbcSMatt Macy bf.l_type = F_WRLCK; 616eda14cbcSMatt Macy bf.l_whence = SEEK_SET; 617eda14cbcSMatt Macy bf.l_start = offset; 618eda14cbcSMatt Macy bf.l_len = len; 619eda14cbcSMatt Macy bf.l_pid = 0; 620eda14cbcSMatt Macy 621eda14cbcSMatt Macy error = -zfs_space(ITOZ(ip), F_FREESP, &bf, O_RDWR, offset, cr); 622eda14cbcSMatt Macy } else if ((mode & ~FALLOC_FL_KEEP_SIZE) == 0) { 623eda14cbcSMatt Macy unsigned int percent = zfs_fallocate_reserve_percent; 624eda14cbcSMatt Macy struct kstatfs statfs; 625eda14cbcSMatt Macy 626eda14cbcSMatt Macy /* Legacy mode, disable fallocate compatibility. */ 627eda14cbcSMatt Macy if (percent == 0) { 628eda14cbcSMatt Macy error = -EOPNOTSUPP; 629eda14cbcSMatt Macy goto out_unmark; 630eda14cbcSMatt Macy } 631eda14cbcSMatt Macy 632eda14cbcSMatt Macy /* 633eda14cbcSMatt Macy * Use zfs_statvfs() instead of dmu_objset_space() since it 634eda14cbcSMatt Macy * also checks project quota limits, which are relevant here. 635eda14cbcSMatt Macy */ 636eda14cbcSMatt Macy error = zfs_statvfs(ip, &statfs); 637eda14cbcSMatt Macy if (error) 638eda14cbcSMatt Macy goto out_unmark; 639eda14cbcSMatt Macy 640eda14cbcSMatt Macy /* 641eda14cbcSMatt Macy * Shrink available space a bit to account for overhead/races. 642eda14cbcSMatt Macy * We know the product previously fit into availbytes from 643eda14cbcSMatt Macy * dmu_objset_space(), so the smaller product will also fit. 644eda14cbcSMatt Macy */ 645eda14cbcSMatt Macy if (len > statfs.f_bavail * (statfs.f_bsize * 100 / percent)) { 646eda14cbcSMatt Macy error = -ENOSPC; 647eda14cbcSMatt Macy goto out_unmark; 648eda14cbcSMatt Macy } 649eda14cbcSMatt Macy if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > olen) 650eda14cbcSMatt Macy error = zfs_freesp(ITOZ(ip), offset + len, 0, 0, FALSE); 651eda14cbcSMatt Macy } 652eda14cbcSMatt Macy out_unmark: 653eda14cbcSMatt Macy spl_fstrans_unmark(cookie); 654eda14cbcSMatt Macy spl_inode_unlock(ip); 655eda14cbcSMatt Macy 656eda14cbcSMatt Macy crfree(cr); 657eda14cbcSMatt Macy 658eda14cbcSMatt Macy return (error); 659eda14cbcSMatt Macy } 660eda14cbcSMatt Macy 661eda14cbcSMatt Macy static long 662eda14cbcSMatt Macy zpl_fallocate(struct file *filp, int mode, loff_t offset, loff_t len) 663eda14cbcSMatt Macy { 664eda14cbcSMatt Macy return zpl_fallocate_common(file_inode(filp), 665eda14cbcSMatt Macy mode, offset, len); 666eda14cbcSMatt Macy } 667eda14cbcSMatt Macy 668e92ffd9bSMartin Matuska static int 669e92ffd9bSMartin Matuska zpl_ioctl_getversion(struct file *filp, void __user *arg) 670e92ffd9bSMartin Matuska { 671e92ffd9bSMartin Matuska uint32_t generation = file_inode(filp)->i_generation; 672e92ffd9bSMartin Matuska 673e92ffd9bSMartin Matuska return (copy_to_user(arg, &generation, sizeof (generation))); 674e92ffd9bSMartin Matuska } 675e92ffd9bSMartin Matuska 676c7046f76SMartin Matuska static int 677c7046f76SMartin Matuska zpl_fadvise(struct file *filp, loff_t offset, loff_t len, int advice) 678c7046f76SMartin Matuska { 679c7046f76SMartin Matuska struct inode *ip = file_inode(filp); 680c7046f76SMartin Matuska znode_t *zp = ITOZ(ip); 681c7046f76SMartin Matuska zfsvfs_t *zfsvfs = ITOZSB(ip); 682c7046f76SMartin Matuska objset_t *os = zfsvfs->z_os; 683c7046f76SMartin Matuska int error = 0; 684c7046f76SMartin Matuska 685c7046f76SMartin Matuska if (S_ISFIFO(ip->i_mode)) 686c7046f76SMartin Matuska return (-ESPIPE); 687c7046f76SMartin Matuska 688c7046f76SMartin Matuska if (offset < 0 || len < 0) 689c7046f76SMartin Matuska return (-EINVAL); 690c7046f76SMartin Matuska 691c7046f76SMartin Matuska if ((error = zpl_enter_verify_zp(zfsvfs, zp, FTAG)) != 0) 692c7046f76SMartin Matuska return (error); 693c7046f76SMartin Matuska 694c7046f76SMartin Matuska switch (advice) { 695c7046f76SMartin Matuska case POSIX_FADV_SEQUENTIAL: 696c7046f76SMartin Matuska case POSIX_FADV_WILLNEED: 697c7046f76SMartin Matuska #ifdef HAVE_GENERIC_FADVISE 698c9539b89SMartin Matuska if (zn_has_cached_data(zp, offset, offset + len - 1)) 699c7046f76SMartin Matuska error = generic_fadvise(filp, offset, len, advice); 700c7046f76SMartin Matuska #endif 701c7046f76SMartin Matuska /* 702c7046f76SMartin Matuska * Pass on the caller's size directly, but note that 703c7046f76SMartin Matuska * dmu_prefetch_max will effectively cap it. If there 704c7046f76SMartin Matuska * really is a larger sequential access pattern, perhaps 705c7046f76SMartin Matuska * dmu_zfetch will detect it. 706c7046f76SMartin Matuska */ 707c7046f76SMartin Matuska if (len == 0) 708c7046f76SMartin Matuska len = i_size_read(ip) - offset; 709c7046f76SMartin Matuska 710c7046f76SMartin Matuska dmu_prefetch(os, zp->z_id, 0, offset, len, 711c7046f76SMartin Matuska ZIO_PRIORITY_ASYNC_READ); 712c7046f76SMartin Matuska break; 713c7046f76SMartin Matuska case POSIX_FADV_NORMAL: 714c7046f76SMartin Matuska case POSIX_FADV_RANDOM: 715c7046f76SMartin Matuska case POSIX_FADV_DONTNEED: 716c7046f76SMartin Matuska case POSIX_FADV_NOREUSE: 717c7046f76SMartin Matuska /* ignored for now */ 718c7046f76SMartin Matuska break; 719c7046f76SMartin Matuska default: 720c7046f76SMartin Matuska error = -EINVAL; 721c7046f76SMartin Matuska break; 722c7046f76SMartin Matuska } 723c7046f76SMartin Matuska 724c7046f76SMartin Matuska zfs_exit(zfsvfs, FTAG); 725c7046f76SMartin Matuska 726c7046f76SMartin Matuska return (error); 727c7046f76SMartin Matuska } 728c7046f76SMartin Matuska 729eda14cbcSMatt Macy #define ZFS_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | ZFS_PROJINHERIT_FL) 730eda14cbcSMatt Macy #define ZFS_FL_USER_MODIFIABLE (FS_FL_USER_MODIFIABLE | ZFS_PROJINHERIT_FL) 731eda14cbcSMatt Macy 732eda14cbcSMatt Macy static uint32_t 733eda14cbcSMatt Macy __zpl_ioctl_getflags(struct inode *ip) 734eda14cbcSMatt Macy { 735eda14cbcSMatt Macy uint64_t zfs_flags = ITOZ(ip)->z_pflags; 736eda14cbcSMatt Macy uint32_t ioctl_flags = 0; 737eda14cbcSMatt Macy 738eda14cbcSMatt Macy if (zfs_flags & ZFS_IMMUTABLE) 739eda14cbcSMatt Macy ioctl_flags |= FS_IMMUTABLE_FL; 740eda14cbcSMatt Macy 741eda14cbcSMatt Macy if (zfs_flags & ZFS_APPENDONLY) 742eda14cbcSMatt Macy ioctl_flags |= FS_APPEND_FL; 743eda14cbcSMatt Macy 744eda14cbcSMatt Macy if (zfs_flags & ZFS_NODUMP) 745eda14cbcSMatt Macy ioctl_flags |= FS_NODUMP_FL; 746eda14cbcSMatt Macy 747eda14cbcSMatt Macy if (zfs_flags & ZFS_PROJINHERIT) 748eda14cbcSMatt Macy ioctl_flags |= ZFS_PROJINHERIT_FL; 749eda14cbcSMatt Macy 750eda14cbcSMatt Macy return (ioctl_flags & ZFS_FL_USER_VISIBLE); 751eda14cbcSMatt Macy } 752eda14cbcSMatt Macy 753eda14cbcSMatt Macy /* 754eda14cbcSMatt Macy * Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file 755eda14cbcSMatt Macy * attributes common to both Linux and Solaris are mapped. 756eda14cbcSMatt Macy */ 757eda14cbcSMatt Macy static int 758eda14cbcSMatt Macy zpl_ioctl_getflags(struct file *filp, void __user *arg) 759eda14cbcSMatt Macy { 760eda14cbcSMatt Macy uint32_t flags; 761eda14cbcSMatt Macy int err; 762eda14cbcSMatt Macy 763eda14cbcSMatt Macy flags = __zpl_ioctl_getflags(file_inode(filp)); 764eda14cbcSMatt Macy err = copy_to_user(arg, &flags, sizeof (flags)); 765eda14cbcSMatt Macy 766eda14cbcSMatt Macy return (err); 767eda14cbcSMatt Macy } 768eda14cbcSMatt Macy 769eda14cbcSMatt Macy /* 770eda14cbcSMatt Macy * fchange() is a helper macro to detect if we have been asked to change a 771eda14cbcSMatt Macy * flag. This is ugly, but the requirement that we do this is a consequence of 772eda14cbcSMatt Macy * how the Linux file attribute interface was designed. Another consequence is 773eda14cbcSMatt Macy * that concurrent modification of files suffers from a TOCTOU race. Neither 774eda14cbcSMatt Macy * are things we can fix without modifying the kernel-userland interface, which 775eda14cbcSMatt Macy * is outside of our jurisdiction. 776eda14cbcSMatt Macy */ 777eda14cbcSMatt Macy 778eda14cbcSMatt Macy #define fchange(f0, f1, b0, b1) (!((f0) & (b0)) != !((f1) & (b1))) 779eda14cbcSMatt Macy 780eda14cbcSMatt Macy static int 781eda14cbcSMatt Macy __zpl_ioctl_setflags(struct inode *ip, uint32_t ioctl_flags, xvattr_t *xva) 782eda14cbcSMatt Macy { 783eda14cbcSMatt Macy uint64_t zfs_flags = ITOZ(ip)->z_pflags; 784eda14cbcSMatt Macy xoptattr_t *xoap; 785eda14cbcSMatt Macy 786eda14cbcSMatt Macy if (ioctl_flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | 787eda14cbcSMatt Macy ZFS_PROJINHERIT_FL)) 788eda14cbcSMatt Macy return (-EOPNOTSUPP); 789eda14cbcSMatt Macy 790eda14cbcSMatt Macy if (ioctl_flags & ~ZFS_FL_USER_MODIFIABLE) 791eda14cbcSMatt Macy return (-EACCES); 792eda14cbcSMatt Macy 793eda14cbcSMatt Macy if ((fchange(ioctl_flags, zfs_flags, FS_IMMUTABLE_FL, ZFS_IMMUTABLE) || 794eda14cbcSMatt Macy fchange(ioctl_flags, zfs_flags, FS_APPEND_FL, ZFS_APPENDONLY)) && 795eda14cbcSMatt Macy !capable(CAP_LINUX_IMMUTABLE)) 7966db169e9SMartin Matuska return (-EPERM); 797eda14cbcSMatt Macy 798d411c1d6SMartin Matuska if (!zpl_inode_owner_or_capable(zfs_init_idmap, ip)) 799eda14cbcSMatt Macy return (-EACCES); 800eda14cbcSMatt Macy 801eda14cbcSMatt Macy xva_init(xva); 802eda14cbcSMatt Macy xoap = xva_getxoptattr(xva); 803eda14cbcSMatt Macy 804c03c5b1cSMartin Matuska #define FLAG_CHANGE(iflag, zflag, xflag, xfield) do { \ 805c03c5b1cSMartin Matuska if (((ioctl_flags & (iflag)) && !(zfs_flags & (zflag))) || \ 806c03c5b1cSMartin Matuska ((zfs_flags & (zflag)) && !(ioctl_flags & (iflag)))) { \ 807c03c5b1cSMartin Matuska XVA_SET_REQ(xva, (xflag)); \ 808c03c5b1cSMartin Matuska (xfield) = ((ioctl_flags & (iflag)) != 0); \ 809c03c5b1cSMartin Matuska } \ 810c03c5b1cSMartin Matuska } while (0) 811eda14cbcSMatt Macy 812c03c5b1cSMartin Matuska FLAG_CHANGE(FS_IMMUTABLE_FL, ZFS_IMMUTABLE, XAT_IMMUTABLE, 813c03c5b1cSMartin Matuska xoap->xoa_immutable); 814c03c5b1cSMartin Matuska FLAG_CHANGE(FS_APPEND_FL, ZFS_APPENDONLY, XAT_APPENDONLY, 815c03c5b1cSMartin Matuska xoap->xoa_appendonly); 816c03c5b1cSMartin Matuska FLAG_CHANGE(FS_NODUMP_FL, ZFS_NODUMP, XAT_NODUMP, 817c03c5b1cSMartin Matuska xoap->xoa_nodump); 818c03c5b1cSMartin Matuska FLAG_CHANGE(ZFS_PROJINHERIT_FL, ZFS_PROJINHERIT, XAT_PROJINHERIT, 819c03c5b1cSMartin Matuska xoap->xoa_projinherit); 820eda14cbcSMatt Macy 821c03c5b1cSMartin Matuska #undef FLAG_CHANGE 822eda14cbcSMatt Macy 823eda14cbcSMatt Macy return (0); 824eda14cbcSMatt Macy } 825eda14cbcSMatt Macy 826eda14cbcSMatt Macy static int 827eda14cbcSMatt Macy zpl_ioctl_setflags(struct file *filp, void __user *arg) 828eda14cbcSMatt Macy { 829eda14cbcSMatt Macy struct inode *ip = file_inode(filp); 830eda14cbcSMatt Macy uint32_t flags; 831eda14cbcSMatt Macy cred_t *cr = CRED(); 832eda14cbcSMatt Macy xvattr_t xva; 833eda14cbcSMatt Macy int err; 834eda14cbcSMatt Macy fstrans_cookie_t cookie; 835eda14cbcSMatt Macy 836eda14cbcSMatt Macy if (copy_from_user(&flags, arg, sizeof (flags))) 837eda14cbcSMatt Macy return (-EFAULT); 838eda14cbcSMatt Macy 839eda14cbcSMatt Macy err = __zpl_ioctl_setflags(ip, flags, &xva); 840eda14cbcSMatt Macy if (err) 841eda14cbcSMatt Macy return (err); 842eda14cbcSMatt Macy 843eda14cbcSMatt Macy crhold(cr); 844eda14cbcSMatt Macy cookie = spl_fstrans_mark(); 845d411c1d6SMartin Matuska err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr, zfs_init_idmap); 846eda14cbcSMatt Macy spl_fstrans_unmark(cookie); 847eda14cbcSMatt Macy crfree(cr); 848eda14cbcSMatt Macy 849eda14cbcSMatt Macy return (err); 850eda14cbcSMatt Macy } 851eda14cbcSMatt Macy 852eda14cbcSMatt Macy static int 853eda14cbcSMatt Macy zpl_ioctl_getxattr(struct file *filp, void __user *arg) 854eda14cbcSMatt Macy { 855eda14cbcSMatt Macy zfsxattr_t fsx = { 0 }; 856eda14cbcSMatt Macy struct inode *ip = file_inode(filp); 857eda14cbcSMatt Macy int err; 858eda14cbcSMatt Macy 859eda14cbcSMatt Macy fsx.fsx_xflags = __zpl_ioctl_getflags(ip); 860eda14cbcSMatt Macy fsx.fsx_projid = ITOZ(ip)->z_projid; 861eda14cbcSMatt Macy err = copy_to_user(arg, &fsx, sizeof (fsx)); 862eda14cbcSMatt Macy 863eda14cbcSMatt Macy return (err); 864eda14cbcSMatt Macy } 865eda14cbcSMatt Macy 866eda14cbcSMatt Macy static int 867eda14cbcSMatt Macy zpl_ioctl_setxattr(struct file *filp, void __user *arg) 868eda14cbcSMatt Macy { 869eda14cbcSMatt Macy struct inode *ip = file_inode(filp); 870eda14cbcSMatt Macy zfsxattr_t fsx; 871eda14cbcSMatt Macy cred_t *cr = CRED(); 872eda14cbcSMatt Macy xvattr_t xva; 873eda14cbcSMatt Macy xoptattr_t *xoap; 874eda14cbcSMatt Macy int err; 875eda14cbcSMatt Macy fstrans_cookie_t cookie; 876eda14cbcSMatt Macy 877eda14cbcSMatt Macy if (copy_from_user(&fsx, arg, sizeof (fsx))) 878eda14cbcSMatt Macy return (-EFAULT); 879eda14cbcSMatt Macy 880eda14cbcSMatt Macy if (!zpl_is_valid_projid(fsx.fsx_projid)) 881eda14cbcSMatt Macy return (-EINVAL); 882eda14cbcSMatt Macy 883eda14cbcSMatt Macy err = __zpl_ioctl_setflags(ip, fsx.fsx_xflags, &xva); 884eda14cbcSMatt Macy if (err) 885eda14cbcSMatt Macy return (err); 886eda14cbcSMatt Macy 887eda14cbcSMatt Macy xoap = xva_getxoptattr(&xva); 888eda14cbcSMatt Macy XVA_SET_REQ(&xva, XAT_PROJID); 889eda14cbcSMatt Macy xoap->xoa_projid = fsx.fsx_projid; 890eda14cbcSMatt Macy 891eda14cbcSMatt Macy crhold(cr); 892eda14cbcSMatt Macy cookie = spl_fstrans_mark(); 893d411c1d6SMartin Matuska err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr, zfs_init_idmap); 894eda14cbcSMatt Macy spl_fstrans_unmark(cookie); 895eda14cbcSMatt Macy crfree(cr); 896eda14cbcSMatt Macy 897eda14cbcSMatt Macy return (err); 898eda14cbcSMatt Macy } 899eda14cbcSMatt Macy 900c03c5b1cSMartin Matuska /* 901c03c5b1cSMartin Matuska * Expose Additional File Level Attributes of ZFS. 902c03c5b1cSMartin Matuska */ 903c03c5b1cSMartin Matuska static int 904c03c5b1cSMartin Matuska zpl_ioctl_getdosflags(struct file *filp, void __user *arg) 905c03c5b1cSMartin Matuska { 906c03c5b1cSMartin Matuska struct inode *ip = file_inode(filp); 907c03c5b1cSMartin Matuska uint64_t dosflags = ITOZ(ip)->z_pflags; 908c03c5b1cSMartin Matuska dosflags &= ZFS_DOS_FL_USER_VISIBLE; 909c03c5b1cSMartin Matuska int err = copy_to_user(arg, &dosflags, sizeof (dosflags)); 910c03c5b1cSMartin Matuska 911c03c5b1cSMartin Matuska return (err); 912c03c5b1cSMartin Matuska } 913c03c5b1cSMartin Matuska 914c03c5b1cSMartin Matuska static int 915c03c5b1cSMartin Matuska __zpl_ioctl_setdosflags(struct inode *ip, uint64_t ioctl_flags, xvattr_t *xva) 916c03c5b1cSMartin Matuska { 917c03c5b1cSMartin Matuska uint64_t zfs_flags = ITOZ(ip)->z_pflags; 918c03c5b1cSMartin Matuska xoptattr_t *xoap; 919c03c5b1cSMartin Matuska 920c03c5b1cSMartin Matuska if (ioctl_flags & (~ZFS_DOS_FL_USER_VISIBLE)) 921c03c5b1cSMartin Matuska return (-EOPNOTSUPP); 922c03c5b1cSMartin Matuska 923c03c5b1cSMartin Matuska if ((fchange(ioctl_flags, zfs_flags, ZFS_IMMUTABLE, ZFS_IMMUTABLE) || 924c03c5b1cSMartin Matuska fchange(ioctl_flags, zfs_flags, ZFS_APPENDONLY, ZFS_APPENDONLY)) && 925c03c5b1cSMartin Matuska !capable(CAP_LINUX_IMMUTABLE)) 926c03c5b1cSMartin Matuska return (-EPERM); 927c03c5b1cSMartin Matuska 928d411c1d6SMartin Matuska if (!zpl_inode_owner_or_capable(zfs_init_idmap, ip)) 929c03c5b1cSMartin Matuska return (-EACCES); 930c03c5b1cSMartin Matuska 931c03c5b1cSMartin Matuska xva_init(xva); 932c03c5b1cSMartin Matuska xoap = xva_getxoptattr(xva); 933c03c5b1cSMartin Matuska 934c03c5b1cSMartin Matuska #define FLAG_CHANGE(iflag, xflag, xfield) do { \ 935c03c5b1cSMartin Matuska if (((ioctl_flags & (iflag)) && !(zfs_flags & (iflag))) || \ 936c03c5b1cSMartin Matuska ((zfs_flags & (iflag)) && !(ioctl_flags & (iflag)))) { \ 937c03c5b1cSMartin Matuska XVA_SET_REQ(xva, (xflag)); \ 938c03c5b1cSMartin Matuska (xfield) = ((ioctl_flags & (iflag)) != 0); \ 939c03c5b1cSMartin Matuska } \ 940c03c5b1cSMartin Matuska } while (0) 941c03c5b1cSMartin Matuska 942c03c5b1cSMartin Matuska FLAG_CHANGE(ZFS_IMMUTABLE, XAT_IMMUTABLE, xoap->xoa_immutable); 943c03c5b1cSMartin Matuska FLAG_CHANGE(ZFS_APPENDONLY, XAT_APPENDONLY, xoap->xoa_appendonly); 944c03c5b1cSMartin Matuska FLAG_CHANGE(ZFS_NODUMP, XAT_NODUMP, xoap->xoa_nodump); 945c03c5b1cSMartin Matuska FLAG_CHANGE(ZFS_READONLY, XAT_READONLY, xoap->xoa_readonly); 946c03c5b1cSMartin Matuska FLAG_CHANGE(ZFS_HIDDEN, XAT_HIDDEN, xoap->xoa_hidden); 947c03c5b1cSMartin Matuska FLAG_CHANGE(ZFS_SYSTEM, XAT_SYSTEM, xoap->xoa_system); 948c03c5b1cSMartin Matuska FLAG_CHANGE(ZFS_ARCHIVE, XAT_ARCHIVE, xoap->xoa_archive); 949c03c5b1cSMartin Matuska FLAG_CHANGE(ZFS_NOUNLINK, XAT_NOUNLINK, xoap->xoa_nounlink); 950c03c5b1cSMartin Matuska FLAG_CHANGE(ZFS_REPARSE, XAT_REPARSE, xoap->xoa_reparse); 951c03c5b1cSMartin Matuska FLAG_CHANGE(ZFS_OFFLINE, XAT_OFFLINE, xoap->xoa_offline); 952c03c5b1cSMartin Matuska FLAG_CHANGE(ZFS_SPARSE, XAT_SPARSE, xoap->xoa_sparse); 953c03c5b1cSMartin Matuska 954c03c5b1cSMartin Matuska #undef FLAG_CHANGE 955c03c5b1cSMartin Matuska 956c03c5b1cSMartin Matuska return (0); 957c03c5b1cSMartin Matuska } 958c03c5b1cSMartin Matuska 959c03c5b1cSMartin Matuska /* 960c03c5b1cSMartin Matuska * Set Additional File Level Attributes of ZFS. 961c03c5b1cSMartin Matuska */ 962c03c5b1cSMartin Matuska static int 963c03c5b1cSMartin Matuska zpl_ioctl_setdosflags(struct file *filp, void __user *arg) 964c03c5b1cSMartin Matuska { 965c03c5b1cSMartin Matuska struct inode *ip = file_inode(filp); 966c03c5b1cSMartin Matuska uint64_t dosflags; 967c03c5b1cSMartin Matuska cred_t *cr = CRED(); 968c03c5b1cSMartin Matuska xvattr_t xva; 969c03c5b1cSMartin Matuska int err; 970c03c5b1cSMartin Matuska fstrans_cookie_t cookie; 971c03c5b1cSMartin Matuska 972c03c5b1cSMartin Matuska if (copy_from_user(&dosflags, arg, sizeof (dosflags))) 973c03c5b1cSMartin Matuska return (-EFAULT); 974c03c5b1cSMartin Matuska 975c03c5b1cSMartin Matuska err = __zpl_ioctl_setdosflags(ip, dosflags, &xva); 976c03c5b1cSMartin Matuska if (err) 977c03c5b1cSMartin Matuska return (err); 978c03c5b1cSMartin Matuska 979c03c5b1cSMartin Matuska crhold(cr); 980c03c5b1cSMartin Matuska cookie = spl_fstrans_mark(); 981d411c1d6SMartin Matuska err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr, zfs_init_idmap); 982c03c5b1cSMartin Matuska spl_fstrans_unmark(cookie); 983c03c5b1cSMartin Matuska crfree(cr); 984c03c5b1cSMartin Matuska 985c03c5b1cSMartin Matuska return (err); 986c03c5b1cSMartin Matuska } 987c03c5b1cSMartin Matuska 988eda14cbcSMatt Macy static long 989eda14cbcSMatt Macy zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 990eda14cbcSMatt Macy { 991eda14cbcSMatt Macy switch (cmd) { 992e92ffd9bSMartin Matuska case FS_IOC_GETVERSION: 993e92ffd9bSMartin Matuska return (zpl_ioctl_getversion(filp, (void *)arg)); 994eda14cbcSMatt Macy case FS_IOC_GETFLAGS: 995eda14cbcSMatt Macy return (zpl_ioctl_getflags(filp, (void *)arg)); 996eda14cbcSMatt Macy case FS_IOC_SETFLAGS: 997eda14cbcSMatt Macy return (zpl_ioctl_setflags(filp, (void *)arg)); 998eda14cbcSMatt Macy case ZFS_IOC_FSGETXATTR: 999eda14cbcSMatt Macy return (zpl_ioctl_getxattr(filp, (void *)arg)); 1000eda14cbcSMatt Macy case ZFS_IOC_FSSETXATTR: 1001eda14cbcSMatt Macy return (zpl_ioctl_setxattr(filp, (void *)arg)); 1002c03c5b1cSMartin Matuska case ZFS_IOC_GETDOSFLAGS: 1003c03c5b1cSMartin Matuska return (zpl_ioctl_getdosflags(filp, (void *)arg)); 1004c03c5b1cSMartin Matuska case ZFS_IOC_SETDOSFLAGS: 1005c03c5b1cSMartin Matuska return (zpl_ioctl_setdosflags(filp, (void *)arg)); 1006315ee00fSMartin Matuska case ZFS_IOC_COMPAT_FICLONE: 1007315ee00fSMartin Matuska return (zpl_ioctl_ficlone(filp, (void *)arg)); 1008315ee00fSMartin Matuska case ZFS_IOC_COMPAT_FICLONERANGE: 1009315ee00fSMartin Matuska return (zpl_ioctl_ficlonerange(filp, (void *)arg)); 1010315ee00fSMartin Matuska case ZFS_IOC_COMPAT_FIDEDUPERANGE: 1011315ee00fSMartin Matuska return (zpl_ioctl_fideduperange(filp, (void *)arg)); 1012eda14cbcSMatt Macy default: 1013eda14cbcSMatt Macy return (-ENOTTY); 1014eda14cbcSMatt Macy } 1015eda14cbcSMatt Macy } 1016eda14cbcSMatt Macy 1017eda14cbcSMatt Macy #ifdef CONFIG_COMPAT 1018eda14cbcSMatt Macy static long 1019eda14cbcSMatt Macy zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1020eda14cbcSMatt Macy { 1021eda14cbcSMatt Macy switch (cmd) { 1022e92ffd9bSMartin Matuska case FS_IOC32_GETVERSION: 1023e92ffd9bSMartin Matuska cmd = FS_IOC_GETVERSION; 1024e92ffd9bSMartin Matuska break; 1025eda14cbcSMatt Macy case FS_IOC32_GETFLAGS: 1026eda14cbcSMatt Macy cmd = FS_IOC_GETFLAGS; 1027eda14cbcSMatt Macy break; 1028eda14cbcSMatt Macy case FS_IOC32_SETFLAGS: 1029eda14cbcSMatt Macy cmd = FS_IOC_SETFLAGS; 1030eda14cbcSMatt Macy break; 1031eda14cbcSMatt Macy default: 1032eda14cbcSMatt Macy return (-ENOTTY); 1033eda14cbcSMatt Macy } 1034eda14cbcSMatt Macy return (zpl_ioctl(filp, cmd, (unsigned long)compat_ptr(arg))); 1035eda14cbcSMatt Macy } 1036eda14cbcSMatt Macy #endif /* CONFIG_COMPAT */ 1037eda14cbcSMatt Macy 1038eda14cbcSMatt Macy const struct address_space_operations zpl_address_space_operations = { 1039716fd348SMartin Matuska #ifdef HAVE_VFS_READPAGES 1040eda14cbcSMatt Macy .readpages = zpl_readpages, 1041716fd348SMartin Matuska #else 1042716fd348SMartin Matuska .readahead = zpl_readahead, 1043716fd348SMartin Matuska #endif 1044e3aa18adSMartin Matuska #ifdef HAVE_VFS_READ_FOLIO 1045e3aa18adSMartin Matuska .read_folio = zpl_read_folio, 1046e3aa18adSMartin Matuska #else 1047eda14cbcSMatt Macy .readpage = zpl_readpage, 1048e3aa18adSMartin Matuska #endif 1049eda14cbcSMatt Macy .writepage = zpl_writepage, 1050eda14cbcSMatt Macy .writepages = zpl_writepages, 1051eda14cbcSMatt Macy .direct_IO = zpl_direct_IO, 10521f88aa09SMartin Matuska #ifdef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS 10531f88aa09SMartin Matuska .set_page_dirty = __set_page_dirty_nobuffers, 10541f88aa09SMartin Matuska #endif 1055716fd348SMartin Matuska #ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO 1056716fd348SMartin Matuska .dirty_folio = filemap_dirty_folio, 1057716fd348SMartin Matuska #endif 10585c65a0a9SMartin Matuska #ifdef HAVE_VFS_MIGRATE_FOLIO 10595c65a0a9SMartin Matuska .migrate_folio = migrate_folio, 10607a7741afSMartin Matuska #else 10615c65a0a9SMartin Matuska .migratepage = migrate_page, 10627a7741afSMartin Matuska #endif 1063eda14cbcSMatt Macy }; 1064eda14cbcSMatt Macy 1065eda14cbcSMatt Macy const struct file_operations zpl_file_operations = { 1066eda14cbcSMatt Macy .open = zpl_open, 1067eda14cbcSMatt Macy .release = zpl_release, 1068eda14cbcSMatt Macy .llseek = zpl_llseek, 1069eda14cbcSMatt Macy .read_iter = zpl_iter_read, 1070eda14cbcSMatt Macy .write_iter = zpl_iter_write, 1071315ee00fSMartin Matuska #ifdef HAVE_COPY_SPLICE_READ 1072315ee00fSMartin Matuska .splice_read = copy_splice_read, 1073315ee00fSMartin Matuska #else 10747877fdebSMatt Macy .splice_read = generic_file_splice_read, 1075315ee00fSMartin Matuska #endif 10767877fdebSMatt Macy .splice_write = iter_file_splice_write, 1077eda14cbcSMatt Macy .mmap = zpl_mmap, 1078eda14cbcSMatt Macy .fsync = zpl_fsync, 1079eda14cbcSMatt Macy .fallocate = zpl_fallocate, 1080315ee00fSMartin Matuska .copy_file_range = zpl_copy_file_range, 1081315ee00fSMartin Matuska #ifdef HAVE_VFS_CLONE_FILE_RANGE 1082315ee00fSMartin Matuska .clone_file_range = zpl_clone_file_range, 1083315ee00fSMartin Matuska #endif 1084315ee00fSMartin Matuska #ifdef HAVE_VFS_REMAP_FILE_RANGE 1085315ee00fSMartin Matuska .remap_file_range = zpl_remap_file_range, 1086315ee00fSMartin Matuska #endif 1087315ee00fSMartin Matuska #ifdef HAVE_VFS_DEDUPE_FILE_RANGE 1088315ee00fSMartin Matuska .dedupe_file_range = zpl_dedupe_file_range, 1089315ee00fSMartin Matuska #endif 1090c7046f76SMartin Matuska .fadvise = zpl_fadvise, 1091eda14cbcSMatt Macy .unlocked_ioctl = zpl_ioctl, 1092eda14cbcSMatt Macy #ifdef CONFIG_COMPAT 1093eda14cbcSMatt Macy .compat_ioctl = zpl_compat_ioctl, 1094eda14cbcSMatt Macy #endif 1095eda14cbcSMatt Macy }; 1096eda14cbcSMatt Macy 1097eda14cbcSMatt Macy const struct file_operations zpl_dir_file_operations = { 1098eda14cbcSMatt Macy .llseek = generic_file_llseek, 1099eda14cbcSMatt Macy .read = generic_read_dir, 1100eda14cbcSMatt Macy .iterate_shared = zpl_iterate, 1101eda14cbcSMatt Macy .fsync = zpl_fsync, 1102eda14cbcSMatt Macy .unlocked_ioctl = zpl_ioctl, 1103eda14cbcSMatt Macy #ifdef CONFIG_COMPAT 1104eda14cbcSMatt Macy .compat_ioctl = zpl_compat_ioctl, 1105eda14cbcSMatt Macy #endif 1106eda14cbcSMatt Macy }; 1107eda14cbcSMatt Macy 1108eda14cbcSMatt Macy module_param(zfs_fallocate_reserve_percent, uint, 0644); 1109eda14cbcSMatt Macy MODULE_PARM_DESC(zfs_fallocate_reserve_percent, 1110eda14cbcSMatt Macy "Percentage of length to use for the available capacity check"); 1111