xref: /onnv-gate/usr/src/uts/common/fs/zfs/zfs_vnops.c (revision 5326:6752aa2bd5bc)
1789Sahrens /*
2789Sahrens  * CDDL HEADER START
3789Sahrens  *
4789Sahrens  * The contents of this file are subject to the terms of the
51460Smarks  * Common Development and Distribution License (the "License").
61460Smarks  * You may not use this file except in compliance with the License.
7789Sahrens  *
8789Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9789Sahrens  * or http://www.opensolaris.org/os/licensing.
10789Sahrens  * See the License for the specific language governing permissions
11789Sahrens  * and limitations under the License.
12789Sahrens  *
13789Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14789Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15789Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16789Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17789Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18789Sahrens  *
19789Sahrens  * CDDL HEADER END
20789Sahrens  */
21789Sahrens /*
223461Sahrens  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23789Sahrens  * Use is subject to license terms.
24789Sahrens  */
25789Sahrens 
264144Speteh /* Portions Copyright 2007 Jeremy Teo */
274144Speteh 
28789Sahrens #pragma ident	"%Z%%M%	%I%	%E% SMI"
29789Sahrens 
30789Sahrens #include <sys/types.h>
31789Sahrens #include <sys/param.h>
32789Sahrens #include <sys/time.h>
33789Sahrens #include <sys/systm.h>
34789Sahrens #include <sys/sysmacros.h>
35789Sahrens #include <sys/resource.h>
36789Sahrens #include <sys/vfs.h>
373898Srsb #include <sys/vfs_opreg.h>
38789Sahrens #include <sys/vnode.h>
39789Sahrens #include <sys/file.h>
40789Sahrens #include <sys/stat.h>
41789Sahrens #include <sys/kmem.h>
42789Sahrens #include <sys/taskq.h>
43789Sahrens #include <sys/uio.h>
44789Sahrens #include <sys/vmsystm.h>
45789Sahrens #include <sys/atomic.h>
462688Smaybee #include <sys/vm.h>
47789Sahrens #include <vm/seg_vn.h>
48789Sahrens #include <vm/pvn.h>
49789Sahrens #include <vm/as.h>
50789Sahrens #include <sys/mman.h>
51789Sahrens #include <sys/pathname.h>
52789Sahrens #include <sys/cmn_err.h>
53789Sahrens #include <sys/errno.h>
54789Sahrens #include <sys/unistd.h>
55789Sahrens #include <sys/zfs_vfsops.h>
56789Sahrens #include <sys/zfs_dir.h>
57789Sahrens #include <sys/zfs_acl.h>
58789Sahrens #include <sys/zfs_ioctl.h>
59789Sahrens #include <sys/fs/zfs.h>
60789Sahrens #include <sys/dmu.h>
61789Sahrens #include <sys/spa.h>
62789Sahrens #include <sys/txg.h>
63789Sahrens #include <sys/dbuf.h>
64789Sahrens #include <sys/zap.h>
65789Sahrens #include <sys/dirent.h>
66789Sahrens #include <sys/policy.h>
67789Sahrens #include <sys/sunddi.h>
68789Sahrens #include <sys/filio.h>
69789Sahrens #include "fs/fs_subr.h"
70789Sahrens #include <sys/zfs_ctldir.h>
711484Sek110237 #include <sys/dnlc.h>
721669Sperrin #include <sys/zfs_rlock.h>
73789Sahrens 
74789Sahrens /*
75789Sahrens  * Programming rules.
76789Sahrens  *
77789Sahrens  * Each vnode op performs some logical unit of work.  To do this, the ZPL must
78789Sahrens  * properly lock its in-core state, create a DMU transaction, do the work,
79789Sahrens  * record this work in the intent log (ZIL), commit the DMU transaction,
80789Sahrens  * and wait the the intent log to commit if it's is a synchronous operation.
81789Sahrens  * Morover, the vnode ops must work in both normal and log replay context.
82789Sahrens  * The ordering of events is important to avoid deadlocks and references
83789Sahrens  * to freed memory.  The example below illustrates the following Big Rules:
84789Sahrens  *
85789Sahrens  *  (1) A check must be made in each zfs thread for a mounted file system.
86*5326Sek110237  *	This is done avoiding races using ZFS_ENTER(zfsvfs) or
87*5326Sek110237  *      ZFS_ENTER_VERIFY(zfsvfs, zp).  A ZFS_EXIT(zfsvfs) is needed before
88*5326Sek110237  *      all returns.
89789Sahrens  *
90789Sahrens  *  (2)	VN_RELE() should always be the last thing except for zil_commit()
912638Sperrin  *	(if necessary) and ZFS_EXIT(). This is for 3 reasons:
92789Sahrens  *	First, if it's the last reference, the vnode/znode
93789Sahrens  *	can be freed, so the zp may point to freed memory.  Second, the last
94789Sahrens  *	reference will call zfs_zinactive(), which may induce a lot of work --
951669Sperrin  *	pushing cached pages (which acquires range locks) and syncing out
96789Sahrens  *	cached atime changes.  Third, zfs_zinactive() may require a new tx,
97789Sahrens  *	which could deadlock the system if you were already holding one.
98789Sahrens  *
991757Sperrin  *  (3)	All range locks must be grabbed before calling dmu_tx_assign(),
1001757Sperrin  *	as they can span dmu_tx_assign() calls.
1011757Sperrin  *
1021757Sperrin  *  (4)	Always pass zfsvfs->z_assign as the second argument to dmu_tx_assign().
103789Sahrens  *	In normal operation, this will be TXG_NOWAIT.  During ZIL replay,
104789Sahrens  *	it will be a specific txg.  Either way, dmu_tx_assign() never blocks.
105789Sahrens  *	This is critical because we don't want to block while holding locks.
106789Sahrens  *	Note, in particular, that if a lock is sometimes acquired before
107789Sahrens  *	the tx assigns, and sometimes after (e.g. z_lock), then failing to
108789Sahrens  *	use a non-blocking assign can deadlock the system.  The scenario:
109789Sahrens  *
110789Sahrens  *	Thread A has grabbed a lock before calling dmu_tx_assign().
111789Sahrens  *	Thread B is in an already-assigned tx, and blocks for this lock.
112789Sahrens  *	Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
113789Sahrens  *	forever, because the previous txg can't quiesce until B's tx commits.
114789Sahrens  *
115789Sahrens  *	If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
1162113Sahrens  *	then drop all locks, call dmu_tx_wait(), and try again.
117789Sahrens  *
1181757Sperrin  *  (5)	If the operation succeeded, generate the intent log entry for it
119789Sahrens  *	before dropping locks.  This ensures that the ordering of events
120789Sahrens  *	in the intent log matches the order in which they actually occurred.
121789Sahrens  *
1221757Sperrin  *  (6)	At the end of each vnode op, the DMU tx must always commit,
123789Sahrens  *	regardless of whether there were any errors.
124789Sahrens  *
1252638Sperrin  *  (7)	After dropping all locks, invoke zil_commit(zilog, seq, foid)
126789Sahrens  *	to ensure that synchronous semantics are provided when necessary.
127789Sahrens  *
128789Sahrens  * In general, this is how things should be ordered in each vnode op:
129789Sahrens  *
130789Sahrens  *	ZFS_ENTER(zfsvfs);		// exit if unmounted
131789Sahrens  * top:
132789Sahrens  *	zfs_dirent_lock(&dl, ...)	// lock directory entry (may VN_HOLD())
133789Sahrens  *	rw_enter(...);			// grab any other locks you need
134789Sahrens  *	tx = dmu_tx_create(...);	// get DMU tx
135789Sahrens  *	dmu_tx_hold_*();		// hold each object you might modify
136789Sahrens  *	error = dmu_tx_assign(tx, zfsvfs->z_assign);	// try to assign
137789Sahrens  *	if (error) {
138789Sahrens  *		rw_exit(...);		// drop locks
139789Sahrens  *		zfs_dirent_unlock(dl);	// unlock directory entry
140789Sahrens  *		VN_RELE(...);		// release held vnodes
141789Sahrens  *		if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
1422113Sahrens  *			dmu_tx_wait(tx);
1432113Sahrens  *			dmu_tx_abort(tx);
144789Sahrens  *			goto top;
145789Sahrens  *		}
1462113Sahrens  *		dmu_tx_abort(tx);	// abort DMU tx
147789Sahrens  *		ZFS_EXIT(zfsvfs);	// finished in zfs
148789Sahrens  *		return (error);		// really out of space
149789Sahrens  *	}
150789Sahrens  *	error = do_real_work();		// do whatever this VOP does
151789Sahrens  *	if (error == 0)
1522638Sperrin  *		zfs_log_*(...);		// on success, make ZIL entry
153789Sahrens  *	dmu_tx_commit(tx);		// commit DMU tx -- error or not
154789Sahrens  *	rw_exit(...);			// drop locks
155789Sahrens  *	zfs_dirent_unlock(dl);		// unlock directory entry
156789Sahrens  *	VN_RELE(...);			// release held vnodes
1572638Sperrin  *	zil_commit(zilog, seq, foid);	// synchronous when necessary
158789Sahrens  *	ZFS_EXIT(zfsvfs);		// finished in zfs
159789Sahrens  *	return (error);			// done, report error
160789Sahrens  */
161789Sahrens /* ARGSUSED */
162789Sahrens static int
163789Sahrens zfs_open(vnode_t **vpp, int flag, cred_t *cr)
164789Sahrens {
1653063Sperrin 	znode_t	*zp = VTOZ(*vpp);
1663063Sperrin 
1673063Sperrin 	/* Keep a count of the synchronous opens in the znode */
1683063Sperrin 	if (flag & (FSYNC | FDSYNC))
1693063Sperrin 		atomic_inc_32(&zp->z_sync_cnt);
170789Sahrens 	return (0);
171789Sahrens }
172789Sahrens 
173789Sahrens /* ARGSUSED */
174789Sahrens static int
175789Sahrens zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr)
176789Sahrens {
1773063Sperrin 	znode_t	*zp = VTOZ(vp);
1783063Sperrin 
1793063Sperrin 	/* Decrement the synchronous opens in the znode */
1804339Sperrin 	if ((flag & (FSYNC | FDSYNC)) && (count == 1))
1813063Sperrin 		atomic_dec_32(&zp->z_sync_cnt);
1823063Sperrin 
183789Sahrens 	/*
184789Sahrens 	 * Clean up any locks held by this process on the vp.
185789Sahrens 	 */
186789Sahrens 	cleanlocks(vp, ddi_get_pid(), 0);
187789Sahrens 	cleanshares(vp, ddi_get_pid());
188789Sahrens 
189789Sahrens 	return (0);
190789Sahrens }
191789Sahrens 
192789Sahrens /*
193789Sahrens  * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and
194789Sahrens  * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter.
195789Sahrens  */
196789Sahrens static int
197789Sahrens zfs_holey(vnode_t *vp, int cmd, offset_t *off)
198789Sahrens {
199789Sahrens 	znode_t	*zp = VTOZ(vp);
200789Sahrens 	uint64_t noff = (uint64_t)*off; /* new offset */
201789Sahrens 	uint64_t file_sz;
202789Sahrens 	int error;
203789Sahrens 	boolean_t hole;
204789Sahrens 
205789Sahrens 	file_sz = zp->z_phys->zp_size;
206789Sahrens 	if (noff >= file_sz)  {
207789Sahrens 		return (ENXIO);
208789Sahrens 	}
209789Sahrens 
210789Sahrens 	if (cmd == _FIO_SEEK_HOLE)
211789Sahrens 		hole = B_TRUE;
212789Sahrens 	else
213789Sahrens 		hole = B_FALSE;
214789Sahrens 
215789Sahrens 	error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff);
216789Sahrens 
217789Sahrens 	/* end of file? */
218789Sahrens 	if ((error == ESRCH) || (noff > file_sz)) {
219789Sahrens 		/*
220789Sahrens 		 * Handle the virtual hole at the end of file.
221789Sahrens 		 */
222789Sahrens 		if (hole) {
223789Sahrens 			*off = file_sz;
224789Sahrens 			return (0);
225789Sahrens 		}
226789Sahrens 		return (ENXIO);
227789Sahrens 	}
228789Sahrens 
229789Sahrens 	if (noff < *off)
230789Sahrens 		return (error);
231789Sahrens 	*off = noff;
232789Sahrens 	return (error);
233789Sahrens }
234789Sahrens 
235789Sahrens /* ARGSUSED */
236789Sahrens static int
237789Sahrens zfs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred,
238789Sahrens     int *rvalp)
239789Sahrens {
240789Sahrens 	offset_t off;
241789Sahrens 	int error;
242789Sahrens 	zfsvfs_t *zfsvfs;
243*5326Sek110237 	znode_t *zp;
244789Sahrens 
245789Sahrens 	switch (com) {
2464339Sperrin 	case _FIOFFS:
247789Sahrens 		return (zfs_sync(vp->v_vfsp, 0, cred));
248789Sahrens 
2491544Seschrock 		/*
2501544Seschrock 		 * The following two ioctls are used by bfu.  Faking out,
2511544Seschrock 		 * necessary to avoid bfu errors.
2521544Seschrock 		 */
2534339Sperrin 	case _FIOGDIO:
2544339Sperrin 	case _FIOSDIO:
2551544Seschrock 		return (0);
2561544Seschrock 
2574339Sperrin 	case _FIO_SEEK_DATA:
2584339Sperrin 	case _FIO_SEEK_HOLE:
259789Sahrens 		if (ddi_copyin((void *)data, &off, sizeof (off), flag))
260789Sahrens 			return (EFAULT);
261789Sahrens 
262*5326Sek110237 		zp = VTOZ(vp);
263*5326Sek110237 		zfsvfs = zp->z_zfsvfs;
264*5326Sek110237 		ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
265789Sahrens 
266789Sahrens 		/* offset parameter is in/out */
267789Sahrens 		error = zfs_holey(vp, com, &off);
268789Sahrens 		ZFS_EXIT(zfsvfs);
269789Sahrens 		if (error)
270789Sahrens 			return (error);
271789Sahrens 		if (ddi_copyout(&off, (void *)data, sizeof (off), flag))
272789Sahrens 			return (EFAULT);
273789Sahrens 		return (0);
274789Sahrens 	}
275789Sahrens 	return (ENOTTY);
276789Sahrens }
277789Sahrens 
278789Sahrens /*
279789Sahrens  * When a file is memory mapped, we must keep the IO data synchronized
280789Sahrens  * between the DMU cache and the memory mapped pages.  What this means:
281789Sahrens  *
282789Sahrens  * On Write:	If we find a memory mapped page, we write to *both*
283789Sahrens  *		the page and the dmu buffer.
284789Sahrens  *
285789Sahrens  * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
286789Sahrens  *	the file is memory mapped.
287789Sahrens  */
288789Sahrens static int
2893638Sbillm mappedwrite(vnode_t *vp, int nbytes, uio_t *uio, dmu_tx_t *tx)
290789Sahrens {
291789Sahrens 	znode_t	*zp = VTOZ(vp);
292789Sahrens 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
293789Sahrens 	int64_t	start, off;
294789Sahrens 	int len = nbytes;
295789Sahrens 	int error = 0;
296789Sahrens 
297789Sahrens 	start = uio->uio_loffset;
298789Sahrens 	off = start & PAGEOFFSET;
299789Sahrens 	for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
300789Sahrens 		page_t *pp;
301789Sahrens 		uint64_t bytes = MIN(PAGESIZE - off, len);
3023638Sbillm 		uint64_t woff = uio->uio_loffset;
303789Sahrens 
304789Sahrens 		/*
305789Sahrens 		 * We don't want a new page to "appear" in the middle of
306789Sahrens 		 * the file update (because it may not get the write
307789Sahrens 		 * update data), so we grab a lock to block
308789Sahrens 		 * zfs_getpage().
309789Sahrens 		 */
310789Sahrens 		rw_enter(&zp->z_map_lock, RW_WRITER);
311789Sahrens 		if (pp = page_lookup(vp, start, SE_SHARED)) {
312789Sahrens 			caddr_t va;
313789Sahrens 
314789Sahrens 			rw_exit(&zp->z_map_lock);
315789Sahrens 			va = ppmapin(pp, PROT_READ | PROT_WRITE, (caddr_t)-1L);
316789Sahrens 			error = uiomove(va+off, bytes, UIO_WRITE, uio);
317789Sahrens 			if (error == 0) {
318789Sahrens 				dmu_write(zfsvfs->z_os, zp->z_id,
319789Sahrens 				    woff, bytes, va+off, tx);
320789Sahrens 			}
321789Sahrens 			ppmapout(va);
322789Sahrens 			page_unlock(pp);
323789Sahrens 		} else {
324789Sahrens 			error = dmu_write_uio(zfsvfs->z_os, zp->z_id,
3253638Sbillm 			    uio, bytes, tx);
326789Sahrens 			rw_exit(&zp->z_map_lock);
327789Sahrens 		}
328789Sahrens 		len -= bytes;
329789Sahrens 		off = 0;
330789Sahrens 		if (error)
331789Sahrens 			break;
332789Sahrens 	}
333789Sahrens 	return (error);
334789Sahrens }
335789Sahrens 
336789Sahrens /*
337789Sahrens  * When a file is memory mapped, we must keep the IO data synchronized
338789Sahrens  * between the DMU cache and the memory mapped pages.  What this means:
339789Sahrens  *
340789Sahrens  * On Read:	We "read" preferentially from memory mapped pages,
341789Sahrens  *		else we default from the dmu buffer.
342789Sahrens  *
343789Sahrens  * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
344789Sahrens  *	the file is memory mapped.
345789Sahrens  */
346789Sahrens static int
3473638Sbillm mappedread(vnode_t *vp, int nbytes, uio_t *uio)
348789Sahrens {
3493638Sbillm 	znode_t *zp = VTOZ(vp);
3503638Sbillm 	objset_t *os = zp->z_zfsvfs->z_os;
3513638Sbillm 	int64_t	start, off;
352789Sahrens 	int len = nbytes;
353789Sahrens 	int error = 0;
354789Sahrens 
355789Sahrens 	start = uio->uio_loffset;
356789Sahrens 	off = start & PAGEOFFSET;
357789Sahrens 	for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
358789Sahrens 		page_t *pp;
3593638Sbillm 		uint64_t bytes = MIN(PAGESIZE - off, len);
3603638Sbillm 
361789Sahrens 		if (pp = page_lookup(vp, start, SE_SHARED)) {
362789Sahrens 			caddr_t va;
363789Sahrens 
3642688Smaybee 			va = ppmapin(pp, PROT_READ, (caddr_t)-1L);
365789Sahrens 			error = uiomove(va + off, bytes, UIO_READ, uio);
366789Sahrens 			ppmapout(va);
367789Sahrens 			page_unlock(pp);
368789Sahrens 		} else {
3693638Sbillm 			error = dmu_read_uio(os, zp->z_id, uio, bytes);
370789Sahrens 		}
371789Sahrens 		len -= bytes;
372789Sahrens 		off = 0;
373789Sahrens 		if (error)
374789Sahrens 			break;
375789Sahrens 	}
376789Sahrens 	return (error);
377789Sahrens }
378789Sahrens 
3793638Sbillm offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
380789Sahrens 
381789Sahrens /*
382789Sahrens  * Read bytes from specified file into supplied buffer.
383789Sahrens  *
384789Sahrens  *	IN:	vp	- vnode of file to be read from.
385789Sahrens  *		uio	- structure supplying read location, range info,
386789Sahrens  *			  and return buffer.
387789Sahrens  *		ioflag	- SYNC flags; used to provide FRSYNC semantics.
388789Sahrens  *		cr	- credentials of caller.
389789Sahrens  *
390789Sahrens  *	OUT:	uio	- updated offset and range, buffer filled.
391789Sahrens  *
392789Sahrens  *	RETURN:	0 if success
393789Sahrens  *		error code if failure
394789Sahrens  *
395789Sahrens  * Side Effects:
396789Sahrens  *	vp - atime updated if byte count > 0
397789Sahrens  */
398789Sahrens /* ARGSUSED */
399789Sahrens static int
400789Sahrens zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
401789Sahrens {
402789Sahrens 	znode_t		*zp = VTOZ(vp);
403789Sahrens 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
404*5326Sek110237 	objset_t	*os;
4053638Sbillm 	ssize_t		n, nbytes;
4063638Sbillm 	int		error;
4071669Sperrin 	rl_t		*rl;
408789Sahrens 
409*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
410*5326Sek110237 	os = zfsvfs->z_os;
411789Sahrens 
412789Sahrens 	/*
413789Sahrens 	 * Validate file offset
414789Sahrens 	 */
415789Sahrens 	if (uio->uio_loffset < (offset_t)0) {
416789Sahrens 		ZFS_EXIT(zfsvfs);
417789Sahrens 		return (EINVAL);
418789Sahrens 	}
419789Sahrens 
420789Sahrens 	/*
421789Sahrens 	 * Fasttrack empty reads
422789Sahrens 	 */
423789Sahrens 	if (uio->uio_resid == 0) {
424789Sahrens 		ZFS_EXIT(zfsvfs);
425789Sahrens 		return (0);
426789Sahrens 	}
427789Sahrens 
428789Sahrens 	/*
4291669Sperrin 	 * Check for mandatory locks
430789Sahrens 	 */
431789Sahrens 	if (MANDMODE((mode_t)zp->z_phys->zp_mode)) {
432789Sahrens 		if (error = chklock(vp, FREAD,
433789Sahrens 		    uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) {
434789Sahrens 			ZFS_EXIT(zfsvfs);
435789Sahrens 			return (error);
436789Sahrens 		}
437789Sahrens 	}
438789Sahrens 
439789Sahrens 	/*
440789Sahrens 	 * If we're in FRSYNC mode, sync out this znode before reading it.
441789Sahrens 	 */
4422638Sperrin 	if (ioflag & FRSYNC)
4432638Sperrin 		zil_commit(zfsvfs->z_log, zp->z_last_itx, zp->z_id);
444789Sahrens 
445789Sahrens 	/*
4461669Sperrin 	 * Lock the range against changes.
447789Sahrens 	 */
4481669Sperrin 	rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
4491669Sperrin 
450789Sahrens 	/*
451789Sahrens 	 * If we are reading past end-of-file we can skip
452789Sahrens 	 * to the end; but we might still need to set atime.
453789Sahrens 	 */
454789Sahrens 	if (uio->uio_loffset >= zp->z_phys->zp_size) {
455789Sahrens 		error = 0;
456789Sahrens 		goto out;
457789Sahrens 	}
458789Sahrens 
4593638Sbillm 	ASSERT(uio->uio_loffset < zp->z_phys->zp_size);
4603638Sbillm 	n = MIN(uio->uio_resid, zp->z_phys->zp_size - uio->uio_loffset);
4613638Sbillm 
4623638Sbillm 	while (n > 0) {
4633638Sbillm 		nbytes = MIN(n, zfs_read_chunk_size -
4643638Sbillm 		    P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
4653638Sbillm 
4663638Sbillm 		if (vn_has_cached_data(vp))
4673638Sbillm 			error = mappedread(vp, nbytes, uio);
4683638Sbillm 		else
4693638Sbillm 			error = dmu_read_uio(os, zp->z_id, uio, nbytes);
4701544Seschrock 		if (error)
4713638Sbillm 			break;
4723638Sbillm 
4733638Sbillm 		n -= nbytes;
474789Sahrens 	}
4753638Sbillm 
476789Sahrens out:
4772237Smaybee 	zfs_range_unlock(rl);
478789Sahrens 
479789Sahrens 	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
480789Sahrens 	ZFS_EXIT(zfsvfs);
481789Sahrens 	return (error);
482789Sahrens }
483789Sahrens 
484789Sahrens /*
485789Sahrens  * Fault in the pages of the first n bytes specified by the uio structure.
486789Sahrens  * 1 byte in each page is touched and the uio struct is unmodified.
487789Sahrens  * Any error will exit this routine as this is only a best
488789Sahrens  * attempt to get the pages resident. This is a copy of ufs_trans_touch().
489789Sahrens  */
490789Sahrens static void
491789Sahrens zfs_prefault_write(ssize_t n, struct uio *uio)
492789Sahrens {
493789Sahrens 	struct iovec *iov;
494789Sahrens 	ulong_t cnt, incr;
495789Sahrens 	caddr_t p;
496789Sahrens 	uint8_t tmp;
497789Sahrens 
498789Sahrens 	iov = uio->uio_iov;
499789Sahrens 
500789Sahrens 	while (n) {
501789Sahrens 		cnt = MIN(iov->iov_len, n);
502789Sahrens 		if (cnt == 0) {
503789Sahrens 			/* empty iov entry */
504789Sahrens 			iov++;
505789Sahrens 			continue;
506789Sahrens 		}
507789Sahrens 		n -= cnt;
508789Sahrens 		/*
509789Sahrens 		 * touch each page in this segment.
510789Sahrens 		 */
511789Sahrens 		p = iov->iov_base;
512789Sahrens 		while (cnt) {
513789Sahrens 			switch (uio->uio_segflg) {
514789Sahrens 			case UIO_USERSPACE:
515789Sahrens 			case UIO_USERISPACE:
516789Sahrens 				if (fuword8(p, &tmp))
517789Sahrens 					return;
518789Sahrens 				break;
519789Sahrens 			case UIO_SYSSPACE:
520789Sahrens 				if (kcopy(p, &tmp, 1))
521789Sahrens 					return;
522789Sahrens 				break;
523789Sahrens 			}
524789Sahrens 			incr = MIN(cnt, PAGESIZE);
525789Sahrens 			p += incr;
526789Sahrens 			cnt -= incr;
527789Sahrens 		}
528789Sahrens 		/*
529789Sahrens 		 * touch the last byte in case it straddles a page.
530789Sahrens 		 */
531789Sahrens 		p--;
532789Sahrens 		switch (uio->uio_segflg) {
533789Sahrens 		case UIO_USERSPACE:
534789Sahrens 		case UIO_USERISPACE:
535789Sahrens 			if (fuword8(p, &tmp))
536789Sahrens 				return;
537789Sahrens 			break;
538789Sahrens 		case UIO_SYSSPACE:
539789Sahrens 			if (kcopy(p, &tmp, 1))
540789Sahrens 				return;
541789Sahrens 			break;
542789Sahrens 		}
543789Sahrens 		iov++;
544789Sahrens 	}
545789Sahrens }
546789Sahrens 
547789Sahrens /*
548789Sahrens  * Write the bytes to a file.
549789Sahrens  *
550789Sahrens  *	IN:	vp	- vnode of file to be written to.
551789Sahrens  *		uio	- structure supplying write location, range info,
552789Sahrens  *			  and data buffer.
553789Sahrens  *		ioflag	- FAPPEND flag set if in append mode.
554789Sahrens  *		cr	- credentials of caller.
555789Sahrens  *
556789Sahrens  *	OUT:	uio	- updated offset and range.
557789Sahrens  *
558789Sahrens  *	RETURN:	0 if success
559789Sahrens  *		error code if failure
560789Sahrens  *
561789Sahrens  * Timestamps:
562789Sahrens  *	vp - ctime|mtime updated if byte count > 0
563789Sahrens  */
564789Sahrens /* ARGSUSED */
565789Sahrens static int
566789Sahrens zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
567789Sahrens {
568789Sahrens 	znode_t		*zp = VTOZ(vp);
569789Sahrens 	rlim64_t	limit = uio->uio_llimit;
570789Sahrens 	ssize_t		start_resid = uio->uio_resid;
571789Sahrens 	ssize_t		tx_bytes;
572789Sahrens 	uint64_t	end_size;
573789Sahrens 	dmu_tx_t	*tx;
574789Sahrens 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
575*5326Sek110237 	zilog_t		*zilog;
576789Sahrens 	offset_t	woff;
577789Sahrens 	ssize_t		n, nbytes;
5781669Sperrin 	rl_t		*rl;
579789Sahrens 	int		max_blksz = zfsvfs->z_max_blksz;
5801669Sperrin 	int		error;
581789Sahrens 
582789Sahrens 	/*
583789Sahrens 	 * Fasttrack empty write
584789Sahrens 	 */
5851669Sperrin 	n = start_resid;
586789Sahrens 	if (n == 0)
587789Sahrens 		return (0);
588789Sahrens 
5891669Sperrin 	if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
5901669Sperrin 		limit = MAXOFFSET_T;
5911669Sperrin 
592*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
593*5326Sek110237 	zilog = zfsvfs->z_log;
594789Sahrens 
595789Sahrens 	/*
5962237Smaybee 	 * Pre-fault the pages to ensure slow (eg NFS) pages
5971669Sperrin 	 * don't hold up txg.
598789Sahrens 	 */
5992237Smaybee 	zfs_prefault_write(n, uio);
600789Sahrens 
601789Sahrens 	/*
602789Sahrens 	 * If in append mode, set the io offset pointer to eof.
603789Sahrens 	 */
6041669Sperrin 	if (ioflag & FAPPEND) {
6051669Sperrin 		/*
6061669Sperrin 		 * Range lock for a file append:
6071669Sperrin 		 * The value for the start of range will be determined by
6081669Sperrin 		 * zfs_range_lock() (to guarantee append semantics).
6091669Sperrin 		 * If this write will cause the block size to increase,
6101669Sperrin 		 * zfs_range_lock() will lock the entire file, so we must
6111669Sperrin 		 * later reduce the range after we grow the block size.
6121669Sperrin 		 */
6131669Sperrin 		rl = zfs_range_lock(zp, 0, n, RL_APPEND);
6141669Sperrin 		if (rl->r_len == UINT64_MAX) {
6151669Sperrin 			/* overlocked, zp_size can't change */
6161669Sperrin 			woff = uio->uio_loffset = zp->z_phys->zp_size;
6171669Sperrin 		} else {
6181669Sperrin 			woff = uio->uio_loffset = rl->r_off;
6191669Sperrin 		}
620789Sahrens 	} else {
621789Sahrens 		woff = uio->uio_loffset;
622789Sahrens 		/*
623789Sahrens 		 * Validate file offset
624789Sahrens 		 */
625789Sahrens 		if (woff < 0) {
626789Sahrens 			ZFS_EXIT(zfsvfs);
627789Sahrens 			return (EINVAL);
628789Sahrens 		}
629789Sahrens 
630789Sahrens 		/*
6311669Sperrin 		 * If we need to grow the block size then zfs_range_lock()
6321669Sperrin 		 * will lock a wider range than we request here.
6331669Sperrin 		 * Later after growing the block size we reduce the range.
634789Sahrens 		 */
6351669Sperrin 		rl = zfs_range_lock(zp, woff, n, RL_WRITER);
636789Sahrens 	}
637789Sahrens 
638789Sahrens 	if (woff >= limit) {
6393638Sbillm 		zfs_range_unlock(rl);
6403638Sbillm 		ZFS_EXIT(zfsvfs);
6413638Sbillm 		return (EFBIG);
642789Sahrens 	}
643789Sahrens 
644789Sahrens 	if ((woff + n) > limit || woff > (limit - n))
645789Sahrens 		n = limit - woff;
646789Sahrens 
647789Sahrens 	/*
6481669Sperrin 	 * Check for mandatory locks
649789Sahrens 	 */
650789Sahrens 	if (MANDMODE((mode_t)zp->z_phys->zp_mode) &&
6513638Sbillm 	    (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
6523638Sbillm 		zfs_range_unlock(rl);
6533638Sbillm 		ZFS_EXIT(zfsvfs);
6543638Sbillm 		return (error);
6553638Sbillm 	}
6561669Sperrin 	end_size = MAX(zp->z_phys->zp_size, woff + n);
657789Sahrens 
6581669Sperrin 	/*
6593638Sbillm 	 * Write the file in reasonable size chunks.  Each chunk is written
6603638Sbillm 	 * in a separate transaction; this keeps the intent log records small
6613638Sbillm 	 * and allows us to do more fine-grained space accounting.
662789Sahrens 	 */
663789Sahrens 	while (n > 0) {
664789Sahrens 		/*
6653638Sbillm 		 * Start a transaction.
666789Sahrens 		 */
667789Sahrens 		woff = uio->uio_loffset;
668789Sahrens 		tx = dmu_tx_create(zfsvfs->z_os);
669789Sahrens 		dmu_tx_hold_bonus(tx, zp->z_id);
670789Sahrens 		dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
671789Sahrens 		error = dmu_tx_assign(tx, zfsvfs->z_assign);
672789Sahrens 		if (error) {
673789Sahrens 			if (error == ERESTART &&
674789Sahrens 			    zfsvfs->z_assign == TXG_NOWAIT) {
6752113Sahrens 				dmu_tx_wait(tx);
6762113Sahrens 				dmu_tx_abort(tx);
6773638Sbillm 				continue;
678789Sahrens 			}
6792113Sahrens 			dmu_tx_abort(tx);
6803638Sbillm 			break;
6813638Sbillm 		}
6823638Sbillm 
6833638Sbillm 		/*
6843638Sbillm 		 * If zfs_range_lock() over-locked we grow the blocksize
6853638Sbillm 		 * and then reduce the lock range.  This will only happen
6863638Sbillm 		 * on the first iteration since zfs_range_reduce() will
6873638Sbillm 		 * shrink down r_len to the appropriate size.
6883638Sbillm 		 */
6893638Sbillm 		if (rl->r_len == UINT64_MAX) {
6903638Sbillm 			uint64_t new_blksz;
6913638Sbillm 
6923638Sbillm 			if (zp->z_blksz > max_blksz) {
6933638Sbillm 				ASSERT(!ISP2(zp->z_blksz));
6943638Sbillm 				new_blksz = MIN(end_size, SPA_MAXBLOCKSIZE);
6953638Sbillm 			} else {
6963638Sbillm 				new_blksz = MIN(end_size, max_blksz);
6973638Sbillm 			}
6983638Sbillm 			zfs_grow_blocksize(zp, new_blksz, tx);
6993638Sbillm 			zfs_range_reduce(rl, woff, n);
7003638Sbillm 		}
7013638Sbillm 
7023638Sbillm 		/*
7033638Sbillm 		 * XXX - should we really limit each write to z_max_blksz?
7043638Sbillm 		 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
7053638Sbillm 		 */
7063638Sbillm 		nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
7073638Sbillm 		rw_enter(&zp->z_map_lock, RW_READER);
7083638Sbillm 
7093638Sbillm 		tx_bytes = uio->uio_resid;
7103638Sbillm 		if (vn_has_cached_data(vp)) {
7113638Sbillm 			rw_exit(&zp->z_map_lock);
7123638Sbillm 			error = mappedwrite(vp, nbytes, uio, tx);
7133638Sbillm 		} else {
7143638Sbillm 			error = dmu_write_uio(zfsvfs->z_os, zp->z_id,
7153638Sbillm 			    uio, nbytes, tx);
7163638Sbillm 			rw_exit(&zp->z_map_lock);
717789Sahrens 		}
7183638Sbillm 		tx_bytes -= uio->uio_resid;
7193638Sbillm 
7203638Sbillm 		/*
7213638Sbillm 		 * If we made no progress, we're done.  If we made even
7223638Sbillm 		 * partial progress, update the znode and ZIL accordingly.
7233638Sbillm 		 */
7243638Sbillm 		if (tx_bytes == 0) {
7253897Smaybee 			dmu_tx_commit(tx);
7263638Sbillm 			ASSERT(error != 0);
7273638Sbillm 			break;
7283638Sbillm 		}
7293638Sbillm 
730789Sahrens 		/*
7313638Sbillm 		 * Clear Set-UID/Set-GID bits on successful write if not
7323638Sbillm 		 * privileged and at least one of the excute bits is set.
7333638Sbillm 		 *
7343638Sbillm 		 * It would be nice to to this after all writes have
7353638Sbillm 		 * been done, but that would still expose the ISUID/ISGID
7363638Sbillm 		 * to another app after the partial write is committed.
737789Sahrens 		 */
7383638Sbillm 		mutex_enter(&zp->z_acl_lock);
7393638Sbillm 		if ((zp->z_phys->zp_mode & (S_IXUSR | (S_IXUSR >> 3) |
7403638Sbillm 		    (S_IXUSR >> 6))) != 0 &&
7413638Sbillm 		    (zp->z_phys->zp_mode & (S_ISUID | S_ISGID)) != 0 &&
7423638Sbillm 		    secpolicy_vnode_setid_retain(cr,
7433638Sbillm 		    (zp->z_phys->zp_mode & S_ISUID) != 0 &&
7443638Sbillm 		    zp->z_phys->zp_uid == 0) != 0) {
7454339Sperrin 			zp->z_phys->zp_mode &= ~(S_ISUID | S_ISGID);
7463638Sbillm 		}
7473638Sbillm 		mutex_exit(&zp->z_acl_lock);
7483638Sbillm 
7493638Sbillm 		/*
7503638Sbillm 		 * Update time stamp.  NOTE: This marks the bonus buffer as
7513638Sbillm 		 * dirty, so we don't have to do it again for zp_size.
7523638Sbillm 		 */
7533638Sbillm 		zfs_time_stamper(zp, CONTENT_MODIFIED, tx);
7543638Sbillm 
7553638Sbillm 		/*
7563638Sbillm 		 * Update the file size (zp_size) if it has changed;
7573638Sbillm 		 * account for possible concurrent updates.
7583638Sbillm 		 */
7593638Sbillm 		while ((end_size = zp->z_phys->zp_size) < uio->uio_loffset)
760789Sahrens 			(void) atomic_cas_64(&zp->z_phys->zp_size, end_size,
761789Sahrens 			    uio->uio_loffset);
7623638Sbillm 		zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag);
7633638Sbillm 		dmu_tx_commit(tx);
7643638Sbillm 
7653638Sbillm 		if (error != 0)
7663638Sbillm 			break;
7673638Sbillm 		ASSERT(tx_bytes == nbytes);
7683638Sbillm 		n -= nbytes;
769789Sahrens 	}
770789Sahrens 
7712237Smaybee 	zfs_range_unlock(rl);
772789Sahrens 
773789Sahrens 	/*
774789Sahrens 	 * If we're in replay mode, or we made no progress, return error.
775789Sahrens 	 * Otherwise, it's at least a partial write, so it's successful.
776789Sahrens 	 */
777789Sahrens 	if (zfsvfs->z_assign >= TXG_INITIAL || uio->uio_resid == start_resid) {
778789Sahrens 		ZFS_EXIT(zfsvfs);
779789Sahrens 		return (error);
780789Sahrens 	}
781789Sahrens 
7822638Sperrin 	if (ioflag & (FSYNC | FDSYNC))
7832638Sperrin 		zil_commit(zilog, zp->z_last_itx, zp->z_id);
784789Sahrens 
785789Sahrens 	ZFS_EXIT(zfsvfs);
786789Sahrens 	return (0);
787789Sahrens }
788789Sahrens 
7892237Smaybee void
7903063Sperrin zfs_get_done(dmu_buf_t *db, void *vzgd)
7912237Smaybee {
7923063Sperrin 	zgd_t *zgd = (zgd_t *)vzgd;
7933063Sperrin 	rl_t *rl = zgd->zgd_rl;
7942237Smaybee 	vnode_t *vp = ZTOV(rl->r_zp);
7952237Smaybee 
7963063Sperrin 	dmu_buf_rele(db, vzgd);
7972237Smaybee 	zfs_range_unlock(rl);
7982237Smaybee 	VN_RELE(vp);
7993063Sperrin 	zil_add_vdev(zgd->zgd_zilog, DVA_GET_VDEV(BP_IDENTITY(zgd->zgd_bp)));
8003063Sperrin 	kmem_free(zgd, sizeof (zgd_t));
8012237Smaybee }
8022237Smaybee 
803789Sahrens /*
804789Sahrens  * Get data to generate a TX_WRITE intent log record.
805789Sahrens  */
806789Sahrens int
8072237Smaybee zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
808789Sahrens {
809789Sahrens 	zfsvfs_t *zfsvfs = arg;
810789Sahrens 	objset_t *os = zfsvfs->z_os;
811789Sahrens 	znode_t *zp;
812789Sahrens 	uint64_t off = lr->lr_offset;
8132237Smaybee 	dmu_buf_t *db;
8141669Sperrin 	rl_t *rl;
8153063Sperrin 	zgd_t *zgd;
8163638Sbillm 	int dlen = lr->lr_length;		/* length of user data */
817789Sahrens 	int error = 0;
818789Sahrens 
8193063Sperrin 	ASSERT(zio);
820789Sahrens 	ASSERT(dlen != 0);
821789Sahrens 
822789Sahrens 	/*
8231669Sperrin 	 * Nothing to do if the file has been removed
824789Sahrens 	 */
825789Sahrens 	if (zfs_zget(zfsvfs, lr->lr_foid, &zp) != 0)
826789Sahrens 		return (ENOENT);
8273461Sahrens 	if (zp->z_unlinked) {
828789Sahrens 		VN_RELE(ZTOV(zp));
829789Sahrens 		return (ENOENT);
830789Sahrens 	}
831789Sahrens 
832789Sahrens 	/*
833789Sahrens 	 * Write records come in two flavors: immediate and indirect.
834789Sahrens 	 * For small writes it's cheaper to store the data with the
835789Sahrens 	 * log record (immediate); for large writes it's cheaper to
836789Sahrens 	 * sync the data and get a pointer to it (indirect) so that
837789Sahrens 	 * we don't have to write the data twice.
838789Sahrens 	 */
8391669Sperrin 	if (buf != NULL) { /* immediate write */
8401669Sperrin 		rl = zfs_range_lock(zp, off, dlen, RL_READER);
8411669Sperrin 		/* test for truncation needs to be done while range locked */
8421669Sperrin 		if (off >= zp->z_phys->zp_size) {
8431669Sperrin 			error = ENOENT;
8441669Sperrin 			goto out;
8451669Sperrin 		}
8462449Smaybee 		VERIFY(0 == dmu_read(os, lr->lr_foid, off, dlen, buf));
8471669Sperrin 	} else { /* indirect write */
8481669Sperrin 		uint64_t boff; /* block starting offset */
8491669Sperrin 
850789Sahrens 		/*
8511669Sperrin 		 * Have to lock the whole block to ensure when it's
8521669Sperrin 		 * written out and it's checksum is being calculated
8531669Sperrin 		 * that no one can change the data. We need to re-check
8541669Sperrin 		 * blocksize after we get the lock in case it's changed!
855789Sahrens 		 */
8561669Sperrin 		for (;;) {
8571941Sperrin 			if (ISP2(zp->z_blksz)) {
8581941Sperrin 				boff = P2ALIGN_TYPED(off, zp->z_blksz,
8591941Sperrin 				    uint64_t);
8601941Sperrin 			} else {
8611941Sperrin 				boff = 0;
8621941Sperrin 			}
8631669Sperrin 			dlen = zp->z_blksz;
8641669Sperrin 			rl = zfs_range_lock(zp, boff, dlen, RL_READER);
8651669Sperrin 			if (zp->z_blksz == dlen)
8661669Sperrin 				break;
8672237Smaybee 			zfs_range_unlock(rl);
8681669Sperrin 		}
8691669Sperrin 		/* test for truncation needs to be done while range locked */
8701669Sperrin 		if (off >= zp->z_phys->zp_size) {
8711669Sperrin 			error = ENOENT;
8721669Sperrin 			goto out;
8731669Sperrin 		}
8743063Sperrin 		zgd = (zgd_t *)kmem_alloc(sizeof (zgd_t), KM_SLEEP);
8753063Sperrin 		zgd->zgd_rl = rl;
8763063Sperrin 		zgd->zgd_zilog = zfsvfs->z_log;
8773063Sperrin 		zgd->zgd_bp = &lr->lr_blkptr;
8783063Sperrin 		VERIFY(0 == dmu_buf_hold(os, lr->lr_foid, boff, zgd, &db));
8792237Smaybee 		ASSERT(boff == db->db_offset);
8802237Smaybee 		lr->lr_blkoff = off - boff;
8812237Smaybee 		error = dmu_sync(zio, db, &lr->lr_blkptr,
8823063Sperrin 		    lr->lr_common.lrc_txg, zfs_get_done, zgd);
8834709Smaybee 		ASSERT((error && error != EINPROGRESS) ||
8844709Smaybee 		    lr->lr_length <= zp->z_blksz);
8853063Sperrin 		if (error == 0) {
8863063Sperrin 			zil_add_vdev(zfsvfs->z_log,
8873063Sperrin 			    DVA_GET_VDEV(BP_IDENTITY(&lr->lr_blkptr)));
8883063Sperrin 		}
8892237Smaybee 		/*
8902237Smaybee 		 * If we get EINPROGRESS, then we need to wait for a
8912237Smaybee 		 * write IO initiated by dmu_sync() to complete before
8922638Sperrin 		 * we can release this dbuf.  We will finish everything
8932237Smaybee 		 * up in the zfs_get_done() callback.
8942237Smaybee 		 */
8952237Smaybee 		if (error == EINPROGRESS)
8962237Smaybee 			return (0);
8973063Sperrin 		dmu_buf_rele(db, zgd);
8983063Sperrin 		kmem_free(zgd, sizeof (zgd_t));
899789Sahrens 	}
9001669Sperrin out:
9012237Smaybee 	zfs_range_unlock(rl);
902789Sahrens 	VN_RELE(ZTOV(zp));
903789Sahrens 	return (error);
904789Sahrens }
905789Sahrens 
906789Sahrens /*ARGSUSED*/
907789Sahrens static int
908789Sahrens zfs_access(vnode_t *vp, int mode, int flags, cred_t *cr)
909789Sahrens {
910789Sahrens 	znode_t *zp = VTOZ(vp);
911789Sahrens 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
912789Sahrens 	int error;
913789Sahrens 
914*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
915789Sahrens 	error = zfs_zaccess_rwx(zp, mode, cr);
916789Sahrens 	ZFS_EXIT(zfsvfs);
917789Sahrens 	return (error);
918789Sahrens }
919789Sahrens 
920789Sahrens /*
921789Sahrens  * Lookup an entry in a directory, or an extended attribute directory.
922789Sahrens  * If it exists, return a held vnode reference for it.
923789Sahrens  *
924789Sahrens  *	IN:	dvp	- vnode of directory to search.
925789Sahrens  *		nm	- name of entry to lookup.
926789Sahrens  *		pnp	- full pathname to lookup [UNUSED].
927789Sahrens  *		flags	- LOOKUP_XATTR set if looking for an attribute.
928789Sahrens  *		rdir	- root directory vnode [UNUSED].
929789Sahrens  *		cr	- credentials of caller.
930789Sahrens  *
931789Sahrens  *	OUT:	vpp	- vnode of located entry, NULL if not found.
932789Sahrens  *
933789Sahrens  *	RETURN:	0 if success
934789Sahrens  *		error code if failure
935789Sahrens  *
936789Sahrens  * Timestamps:
937789Sahrens  *	NA
938789Sahrens  */
939789Sahrens /* ARGSUSED */
940789Sahrens static int
941789Sahrens zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
942789Sahrens     int flags, vnode_t *rdir, cred_t *cr)
943789Sahrens {
944789Sahrens 
945789Sahrens 	znode_t *zdp = VTOZ(dvp);
946789Sahrens 	zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
947789Sahrens 	int	error;
948789Sahrens 
949*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zdp);
950789Sahrens 
951789Sahrens 	*vpp = NULL;
952789Sahrens 
953789Sahrens 	if (flags & LOOKUP_XATTR) {
954789Sahrens 		/*
9553234Sck153898 		 * If the xattr property is off, refuse the lookup request.
9563234Sck153898 		 */
9573234Sck153898 		if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) {
9583234Sck153898 			ZFS_EXIT(zfsvfs);
9593234Sck153898 			return (EINVAL);
9603234Sck153898 		}
9613234Sck153898 
9623234Sck153898 		/*
963789Sahrens 		 * We don't allow recursive attributes..
964789Sahrens 		 * Maybe someday we will.
965789Sahrens 		 */
966789Sahrens 		if (zdp->z_phys->zp_flags & ZFS_XATTR) {
967789Sahrens 			ZFS_EXIT(zfsvfs);
968789Sahrens 			return (EINVAL);
969789Sahrens 		}
970789Sahrens 
9713280Sck153898 		if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) {
972789Sahrens 			ZFS_EXIT(zfsvfs);
973789Sahrens 			return (error);
974789Sahrens 		}
975789Sahrens 
976789Sahrens 		/*
977789Sahrens 		 * Do we have permission to get into attribute directory?
978789Sahrens 		 */
979789Sahrens 
980789Sahrens 		if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, cr)) {
981789Sahrens 			VN_RELE(*vpp);
982789Sahrens 		}
983789Sahrens 
984789Sahrens 		ZFS_EXIT(zfsvfs);
985789Sahrens 		return (error);
986789Sahrens 	}
987789Sahrens 
9881512Sek110237 	if (dvp->v_type != VDIR) {
9891512Sek110237 		ZFS_EXIT(zfsvfs);
9901460Smarks 		return (ENOTDIR);
9911512Sek110237 	}
9921460Smarks 
993789Sahrens 	/*
994789Sahrens 	 * Check accessibility of directory.
995789Sahrens 	 */
996789Sahrens 
997789Sahrens 	if (error = zfs_zaccess(zdp, ACE_EXECUTE, cr)) {
998789Sahrens 		ZFS_EXIT(zfsvfs);
999789Sahrens 		return (error);
1000789Sahrens 	}
1001789Sahrens 
1002789Sahrens 	if ((error = zfs_dirlook(zdp, nm, vpp)) == 0) {
1003789Sahrens 
1004789Sahrens 		/*
1005789Sahrens 		 * Convert device special files
1006789Sahrens 		 */
1007789Sahrens 		if (IS_DEVVP(*vpp)) {
1008789Sahrens 			vnode_t	*svp;
1009789Sahrens 
1010789Sahrens 			svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
1011789Sahrens 			VN_RELE(*vpp);
1012789Sahrens 			if (svp == NULL)
1013789Sahrens 				error = ENOSYS;
1014789Sahrens 			else
1015789Sahrens 				*vpp = svp;
1016789Sahrens 		}
1017789Sahrens 	}
1018789Sahrens 
1019789Sahrens 	ZFS_EXIT(zfsvfs);
1020789Sahrens 	return (error);
1021789Sahrens }
1022789Sahrens 
1023789Sahrens /*
1024789Sahrens  * Attempt to create a new entry in a directory.  If the entry
1025789Sahrens  * already exists, truncate the file if permissible, else return
1026789Sahrens  * an error.  Return the vp of the created or trunc'd file.
1027789Sahrens  *
1028789Sahrens  *	IN:	dvp	- vnode of directory to put new file entry in.
1029789Sahrens  *		name	- name of new file entry.
1030789Sahrens  *		vap	- attributes of new file.
1031789Sahrens  *		excl	- flag indicating exclusive or non-exclusive mode.
1032789Sahrens  *		mode	- mode to open file with.
1033789Sahrens  *		cr	- credentials of caller.
1034789Sahrens  *		flag	- large file flag [UNUSED].
1035789Sahrens  *
1036789Sahrens  *	OUT:	vpp	- vnode of created or trunc'd entry.
1037789Sahrens  *
1038789Sahrens  *	RETURN:	0 if success
1039789Sahrens  *		error code if failure
1040789Sahrens  *
1041789Sahrens  * Timestamps:
1042789Sahrens  *	dvp - ctime|mtime updated if new entry created
1043789Sahrens  *	 vp - ctime|mtime always, atime if new
1044789Sahrens  */
1045789Sahrens /* ARGSUSED */
1046789Sahrens static int
1047789Sahrens zfs_create(vnode_t *dvp, char *name, vattr_t *vap, vcexcl_t excl,
1048789Sahrens     int mode, vnode_t **vpp, cred_t *cr, int flag)
1049789Sahrens {
1050789Sahrens 	znode_t		*zp, *dzp = VTOZ(dvp);
1051789Sahrens 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1052*5326Sek110237 	zilog_t		*zilog;
1053*5326Sek110237 	objset_t	*os;
1054789Sahrens 	zfs_dirlock_t	*dl;
1055789Sahrens 	dmu_tx_t	*tx;
1056789Sahrens 	int		error;
1057789Sahrens 	uint64_t	zoid;
1058789Sahrens 
1059*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, dzp);
1060*5326Sek110237 	os = zfsvfs->z_os;
1061*5326Sek110237 	zilog = zfsvfs->z_log;
1062789Sahrens 
1063789Sahrens top:
1064789Sahrens 	*vpp = NULL;
1065789Sahrens 
1066789Sahrens 	if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr))
1067789Sahrens 		vap->va_mode &= ~VSVTX;
1068789Sahrens 
1069789Sahrens 	if (*name == '\0') {
1070789Sahrens 		/*
1071789Sahrens 		 * Null component name refers to the directory itself.
1072789Sahrens 		 */
1073789Sahrens 		VN_HOLD(dvp);
1074789Sahrens 		zp = dzp;
1075789Sahrens 		dl = NULL;
1076789Sahrens 		error = 0;
1077789Sahrens 	} else {
1078789Sahrens 		/* possible VN_HOLD(zp) */
1079789Sahrens 		if (error = zfs_dirent_lock(&dl, dzp, name, &zp, 0)) {
1080789Sahrens 			if (strcmp(name, "..") == 0)
1081789Sahrens 				error = EISDIR;
1082789Sahrens 			ZFS_EXIT(zfsvfs);
1083789Sahrens 			return (error);
1084789Sahrens 		}
1085789Sahrens 	}
1086789Sahrens 
1087789Sahrens 	zoid = zp ? zp->z_id : -1ULL;
1088789Sahrens 
1089789Sahrens 	if (zp == NULL) {
1090789Sahrens 		/*
1091789Sahrens 		 * Create a new file object and update the directory
1092789Sahrens 		 * to reference it.
1093789Sahrens 		 */
1094789Sahrens 		if (error = zfs_zaccess(dzp, ACE_ADD_FILE, cr)) {
1095789Sahrens 			goto out;
1096789Sahrens 		}
1097789Sahrens 
1098789Sahrens 		/*
1099789Sahrens 		 * We only support the creation of regular files in
1100789Sahrens 		 * extended attribute directories.
1101789Sahrens 		 */
1102789Sahrens 		if ((dzp->z_phys->zp_flags & ZFS_XATTR) &&
1103789Sahrens 		    (vap->va_type != VREG)) {
1104789Sahrens 			error = EINVAL;
1105789Sahrens 			goto out;
1106789Sahrens 		}
1107789Sahrens 
1108789Sahrens 		tx = dmu_tx_create(os);
1109789Sahrens 		dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1110789Sahrens 		dmu_tx_hold_bonus(tx, dzp->z_id);
11111544Seschrock 		dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1112789Sahrens 		if (dzp->z_phys->zp_flags & ZFS_INHERIT_ACE)
1113789Sahrens 			dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1114789Sahrens 			    0, SPA_MAXBLOCKSIZE);
1115789Sahrens 		error = dmu_tx_assign(tx, zfsvfs->z_assign);
1116789Sahrens 		if (error) {
1117789Sahrens 			zfs_dirent_unlock(dl);
1118789Sahrens 			if (error == ERESTART &&
1119789Sahrens 			    zfsvfs->z_assign == TXG_NOWAIT) {
11202113Sahrens 				dmu_tx_wait(tx);
11212113Sahrens 				dmu_tx_abort(tx);
1122789Sahrens 				goto top;
1123789Sahrens 			}
11242113Sahrens 			dmu_tx_abort(tx);
1125789Sahrens 			ZFS_EXIT(zfsvfs);
1126789Sahrens 			return (error);
1127789Sahrens 		}
1128789Sahrens 		zfs_mknode(dzp, vap, &zoid, tx, cr, 0, &zp, 0);
1129789Sahrens 		ASSERT(zp->z_id == zoid);
1130789Sahrens 		(void) zfs_link_create(dl, zp, tx, ZNEW);
11312638Sperrin 		zfs_log_create(zilog, tx, TX_CREATE, dzp, zp, name);
1132789Sahrens 		dmu_tx_commit(tx);
1133789Sahrens 	} else {
1134789Sahrens 		/*
1135789Sahrens 		 * A directory entry already exists for this name.
1136789Sahrens 		 */
1137789Sahrens 		/*
1138789Sahrens 		 * Can't truncate an existing file if in exclusive mode.
1139789Sahrens 		 */
1140789Sahrens 		if (excl == EXCL) {
1141789Sahrens 			error = EEXIST;
1142789Sahrens 			goto out;
1143789Sahrens 		}
1144789Sahrens 		/*
1145789Sahrens 		 * Can't open a directory for writing.
1146789Sahrens 		 */
1147789Sahrens 		if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) {
1148789Sahrens 			error = EISDIR;
1149789Sahrens 			goto out;
1150789Sahrens 		}
1151789Sahrens 		/*
1152789Sahrens 		 * Verify requested access to file.
1153789Sahrens 		 */
1154789Sahrens 		if (mode && (error = zfs_zaccess_rwx(zp, mode, cr))) {
1155789Sahrens 			goto out;
1156789Sahrens 		}
1157789Sahrens 
1158789Sahrens 		mutex_enter(&dzp->z_lock);
1159789Sahrens 		dzp->z_seq++;
1160789Sahrens 		mutex_exit(&dzp->z_lock);
1161789Sahrens 
11621878Smaybee 		/*
11631878Smaybee 		 * Truncate regular files if requested.
11641878Smaybee 		 */
11651878Smaybee 		if ((ZTOV(zp)->v_type == VREG) &&
1166789Sahrens 		    (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) {
11671878Smaybee 			error = zfs_freesp(zp, 0, 0, mode, TRUE);
11681878Smaybee 			if (error == ERESTART &&
11691878Smaybee 			    zfsvfs->z_assign == TXG_NOWAIT) {
11702113Sahrens 				/* NB: we already did dmu_tx_wait() */
11711878Smaybee 				zfs_dirent_unlock(dl);
11722365Sperrin 				VN_RELE(ZTOV(zp));
11731878Smaybee 				goto top;
1174789Sahrens 			}
11754863Spraks 
11764863Spraks 			if (error == 0) {
11774863Spraks 				vnevent_create(ZTOV(zp));
11784863Spraks 			}
1179789Sahrens 		}
1180789Sahrens 	}
1181789Sahrens out:
1182789Sahrens 
1183789Sahrens 	if (dl)
1184789Sahrens 		zfs_dirent_unlock(dl);
1185789Sahrens 
1186789Sahrens 	if (error) {
1187789Sahrens 		if (zp)
1188789Sahrens 			VN_RELE(ZTOV(zp));
1189789Sahrens 	} else {
1190789Sahrens 		*vpp = ZTOV(zp);
1191789Sahrens 		/*
1192789Sahrens 		 * If vnode is for a device return a specfs vnode instead.
1193789Sahrens 		 */
1194789Sahrens 		if (IS_DEVVP(*vpp)) {
1195789Sahrens 			struct vnode *svp;
1196789Sahrens 
1197789Sahrens 			svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
1198789Sahrens 			VN_RELE(*vpp);
1199789Sahrens 			if (svp == NULL) {
1200789Sahrens 				error = ENOSYS;
1201789Sahrens 			}
1202789Sahrens 			*vpp = svp;
1203789Sahrens 		}
1204789Sahrens 	}
1205789Sahrens 
1206789Sahrens 	ZFS_EXIT(zfsvfs);
1207789Sahrens 	return (error);
1208789Sahrens }
1209789Sahrens 
1210789Sahrens /*
1211789Sahrens  * Remove an entry from a directory.
1212789Sahrens  *
1213789Sahrens  *	IN:	dvp	- vnode of directory to remove entry from.
1214789Sahrens  *		name	- name of entry to remove.
1215789Sahrens  *		cr	- credentials of caller.
1216789Sahrens  *
1217789Sahrens  *	RETURN:	0 if success
1218789Sahrens  *		error code if failure
1219789Sahrens  *
1220789Sahrens  * Timestamps:
1221789Sahrens  *	dvp - ctime|mtime
1222789Sahrens  *	 vp - ctime (if nlink > 0)
1223789Sahrens  */
1224789Sahrens static int
1225789Sahrens zfs_remove(vnode_t *dvp, char *name, cred_t *cr)
1226789Sahrens {
1227789Sahrens 	znode_t		*zp, *dzp = VTOZ(dvp);
1228789Sahrens 	znode_t		*xzp = NULL;
1229789Sahrens 	vnode_t		*vp;
1230789Sahrens 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1231*5326Sek110237 	zilog_t		*zilog;
1232789Sahrens 	uint64_t	acl_obj, xattr_obj;
1233789Sahrens 	zfs_dirlock_t	*dl;
1234789Sahrens 	dmu_tx_t	*tx;
12353461Sahrens 	boolean_t	may_delete_now, delete_now = FALSE;
12363461Sahrens 	boolean_t	unlinked;
1237789Sahrens 	int		error;
1238789Sahrens 
1239*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, dzp);
1240*5326Sek110237 	zilog = zfsvfs->z_log;
1241789Sahrens 
1242789Sahrens top:
1243789Sahrens 	/*
1244789Sahrens 	 * Attempt to lock directory; fail if entry doesn't exist.
1245789Sahrens 	 */
1246789Sahrens 	if (error = zfs_dirent_lock(&dl, dzp, name, &zp, ZEXISTS)) {
1247789Sahrens 		ZFS_EXIT(zfsvfs);
1248789Sahrens 		return (error);
1249789Sahrens 	}
1250789Sahrens 
1251789Sahrens 	vp = ZTOV(zp);
1252789Sahrens 
1253789Sahrens 	if (error = zfs_zaccess_delete(dzp, zp, cr)) {
1254789Sahrens 		goto out;
1255789Sahrens 	}
1256789Sahrens 
1257789Sahrens 	/*
1258789Sahrens 	 * Need to use rmdir for removing directories.
1259789Sahrens 	 */
1260789Sahrens 	if (vp->v_type == VDIR) {
1261789Sahrens 		error = EPERM;
1262789Sahrens 		goto out;
1263789Sahrens 	}
1264789Sahrens 
12654863Spraks 	vnevent_remove(vp, dvp, name);
1266789Sahrens 
12671484Sek110237 	dnlc_remove(dvp, name);
12681484Sek110237 
1269789Sahrens 	mutex_enter(&vp->v_lock);
1270789Sahrens 	may_delete_now = vp->v_count == 1 && !vn_has_cached_data(vp);
1271789Sahrens 	mutex_exit(&vp->v_lock);
1272789Sahrens 
1273789Sahrens 	/*
12743461Sahrens 	 * We may delete the znode now, or we may put it in the unlinked set;
1275789Sahrens 	 * it depends on whether we're the last link, and on whether there are
1276789Sahrens 	 * other holds on the vnode.  So we dmu_tx_hold() the right things to
1277789Sahrens 	 * allow for either case.
1278789Sahrens 	 */
1279789Sahrens 	tx = dmu_tx_create(zfsvfs->z_os);
12801544Seschrock 	dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1281789Sahrens 	dmu_tx_hold_bonus(tx, zp->z_id);
1282789Sahrens 	if (may_delete_now)
1283789Sahrens 		dmu_tx_hold_free(tx, zp->z_id, 0, DMU_OBJECT_END);
1284789Sahrens 
1285789Sahrens 	/* are there any extended attributes? */
1286789Sahrens 	if ((xattr_obj = zp->z_phys->zp_xattr) != 0) {
1287789Sahrens 		/* XXX - do we need this if we are deleting? */
1288789Sahrens 		dmu_tx_hold_bonus(tx, xattr_obj);
1289789Sahrens 	}
1290789Sahrens 
1291789Sahrens 	/* are there any additional acls */
1292789Sahrens 	if ((acl_obj = zp->z_phys->zp_acl.z_acl_extern_obj) != 0 &&
1293789Sahrens 	    may_delete_now)
1294789Sahrens 		dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1295789Sahrens 
1296789Sahrens 	/* charge as an update -- would be nice not to charge at all */
12973461Sahrens 	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1298789Sahrens 
1299789Sahrens 	error = dmu_tx_assign(tx, zfsvfs->z_assign);
1300789Sahrens 	if (error) {
1301789Sahrens 		zfs_dirent_unlock(dl);
1302789Sahrens 		VN_RELE(vp);
1303789Sahrens 		if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
13042113Sahrens 			dmu_tx_wait(tx);
13052113Sahrens 			dmu_tx_abort(tx);
1306789Sahrens 			goto top;
1307789Sahrens 		}
13082113Sahrens 		dmu_tx_abort(tx);
1309789Sahrens 		ZFS_EXIT(zfsvfs);
1310789Sahrens 		return (error);
1311789Sahrens 	}
1312789Sahrens 
1313789Sahrens 	/*
1314789Sahrens 	 * Remove the directory entry.
1315789Sahrens 	 */
13163461Sahrens 	error = zfs_link_destroy(dl, zp, tx, 0, &unlinked);
1317789Sahrens 
1318789Sahrens 	if (error) {
1319789Sahrens 		dmu_tx_commit(tx);
1320789Sahrens 		goto out;
1321789Sahrens 	}
1322789Sahrens 
13233461Sahrens 	if (unlinked) {
1324789Sahrens 		mutex_enter(&vp->v_lock);
1325789Sahrens 		delete_now = may_delete_now &&
1326789Sahrens 		    vp->v_count == 1 && !vn_has_cached_data(vp) &&
1327789Sahrens 		    zp->z_phys->zp_xattr == xattr_obj &&
1328789Sahrens 		    zp->z_phys->zp_acl.z_acl_extern_obj == acl_obj;
1329789Sahrens 		mutex_exit(&vp->v_lock);
1330789Sahrens 	}
1331789Sahrens 
1332789Sahrens 	if (delete_now) {
1333789Sahrens 		if (zp->z_phys->zp_xattr) {
1334789Sahrens 			error = zfs_zget(zfsvfs, zp->z_phys->zp_xattr, &xzp);
1335789Sahrens 			ASSERT3U(error, ==, 0);
1336789Sahrens 			ASSERT3U(xzp->z_phys->zp_links, ==, 2);
1337789Sahrens 			dmu_buf_will_dirty(xzp->z_dbuf, tx);
1338789Sahrens 			mutex_enter(&xzp->z_lock);
13393461Sahrens 			xzp->z_unlinked = 1;
1340789Sahrens 			xzp->z_phys->zp_links = 0;
1341789Sahrens 			mutex_exit(&xzp->z_lock);
13423461Sahrens 			zfs_unlinked_add(xzp, tx);
1343789Sahrens 			zp->z_phys->zp_xattr = 0; /* probably unnecessary */
1344789Sahrens 		}
1345789Sahrens 		mutex_enter(&zp->z_lock);
1346789Sahrens 		mutex_enter(&vp->v_lock);
1347789Sahrens 		vp->v_count--;
1348789Sahrens 		ASSERT3U(vp->v_count, ==, 0);
1349789Sahrens 		mutex_exit(&vp->v_lock);
1350789Sahrens 		mutex_exit(&zp->z_lock);
1351789Sahrens 		zfs_znode_delete(zp, tx);
1352789Sahrens 		VFS_RELE(zfsvfs->z_vfs);
13533461Sahrens 	} else if (unlinked) {
13543461Sahrens 		zfs_unlinked_add(zp, tx);
1355789Sahrens 	}
1356789Sahrens 
13572638Sperrin 	zfs_log_remove(zilog, tx, TX_REMOVE, dzp, name);
1358789Sahrens 
1359789Sahrens 	dmu_tx_commit(tx);
1360789Sahrens out:
1361789Sahrens 	zfs_dirent_unlock(dl);
1362789Sahrens 
1363789Sahrens 	if (!delete_now) {
1364789Sahrens 		VN_RELE(vp);
1365789Sahrens 	} else if (xzp) {
1366789Sahrens 		/* this rele delayed to prevent nesting transactions */
1367789Sahrens 		VN_RELE(ZTOV(xzp));
1368789Sahrens 	}
1369789Sahrens 
1370789Sahrens 	ZFS_EXIT(zfsvfs);
1371789Sahrens 	return (error);
1372789Sahrens }
1373789Sahrens 
1374789Sahrens /*
1375789Sahrens  * Create a new directory and insert it into dvp using the name
1376789Sahrens  * provided.  Return a pointer to the inserted directory.
1377789Sahrens  *
1378789Sahrens  *	IN:	dvp	- vnode of directory to add subdir to.
1379789Sahrens  *		dirname	- name of new directory.
1380789Sahrens  *		vap	- attributes of new directory.
1381789Sahrens  *		cr	- credentials of caller.
1382789Sahrens  *
1383789Sahrens  *	OUT:	vpp	- vnode of created directory.
1384789Sahrens  *
1385789Sahrens  *	RETURN:	0 if success
1386789Sahrens  *		error code if failure
1387789Sahrens  *
1388789Sahrens  * Timestamps:
1389789Sahrens  *	dvp - ctime|mtime updated
1390789Sahrens  *	 vp - ctime|mtime|atime updated
1391789Sahrens  */
1392789Sahrens static int
1393789Sahrens zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr)
1394789Sahrens {
1395789Sahrens 	znode_t		*zp, *dzp = VTOZ(dvp);
1396789Sahrens 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1397*5326Sek110237 	zilog_t		*zilog;
1398789Sahrens 	zfs_dirlock_t	*dl;
1399789Sahrens 	uint64_t	zoid = 0;
1400789Sahrens 	dmu_tx_t	*tx;
1401789Sahrens 	int		error;
1402789Sahrens 
1403789Sahrens 	ASSERT(vap->va_type == VDIR);
1404789Sahrens 
1405*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, dzp);
1406*5326Sek110237 	zilog = zfsvfs->z_log;
1407789Sahrens 
1408789Sahrens 	if (dzp->z_phys->zp_flags & ZFS_XATTR) {
1409789Sahrens 		ZFS_EXIT(zfsvfs);
1410789Sahrens 		return (EINVAL);
1411789Sahrens 	}
1412789Sahrens top:
1413789Sahrens 	*vpp = NULL;
1414789Sahrens 
1415789Sahrens 	/*
1416789Sahrens 	 * First make sure the new directory doesn't exist.
1417789Sahrens 	 */
1418789Sahrens 	if (error = zfs_dirent_lock(&dl, dzp, dirname, &zp, ZNEW)) {
1419789Sahrens 		ZFS_EXIT(zfsvfs);
1420789Sahrens 		return (error);
1421789Sahrens 	}
1422789Sahrens 
14231231Smarks 	if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, cr)) {
14241231Smarks 		zfs_dirent_unlock(dl);
14251231Smarks 		ZFS_EXIT(zfsvfs);
14261231Smarks 		return (error);
14271231Smarks 	}
14281231Smarks 
1429789Sahrens 	/*
1430789Sahrens 	 * Add a new entry to the directory.
1431789Sahrens 	 */
1432789Sahrens 	tx = dmu_tx_create(zfsvfs->z_os);
14331544Seschrock 	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
14341544Seschrock 	dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1435789Sahrens 	if (dzp->z_phys->zp_flags & ZFS_INHERIT_ACE)
1436789Sahrens 		dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1437789Sahrens 		    0, SPA_MAXBLOCKSIZE);
1438789Sahrens 	error = dmu_tx_assign(tx, zfsvfs->z_assign);
1439789Sahrens 	if (error) {
1440789Sahrens 		zfs_dirent_unlock(dl);
1441789Sahrens 		if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
14422113Sahrens 			dmu_tx_wait(tx);
14432113Sahrens 			dmu_tx_abort(tx);
1444789Sahrens 			goto top;
1445789Sahrens 		}
14462113Sahrens 		dmu_tx_abort(tx);
1447789Sahrens 		ZFS_EXIT(zfsvfs);
1448789Sahrens 		return (error);
1449789Sahrens 	}
1450789Sahrens 
1451789Sahrens 	/*
1452789Sahrens 	 * Create new node.
1453789Sahrens 	 */
1454789Sahrens 	zfs_mknode(dzp, vap, &zoid, tx, cr, 0, &zp, 0);
1455789Sahrens 
1456789Sahrens 	/*
1457789Sahrens 	 * Now put new name in parent dir.
1458789Sahrens 	 */
1459789Sahrens 	(void) zfs_link_create(dl, zp, tx, ZNEW);
1460789Sahrens 
1461789Sahrens 	*vpp = ZTOV(zp);
1462789Sahrens 
14632638Sperrin 	zfs_log_create(zilog, tx, TX_MKDIR, dzp, zp, dirname);
1464789Sahrens 	dmu_tx_commit(tx);
1465789Sahrens 
1466789Sahrens 	zfs_dirent_unlock(dl);
1467789Sahrens 
1468789Sahrens 	ZFS_EXIT(zfsvfs);
1469789Sahrens 	return (0);
1470789Sahrens }
1471789Sahrens 
1472789Sahrens /*
1473789Sahrens  * Remove a directory subdir entry.  If the current working
1474789Sahrens  * directory is the same as the subdir to be removed, the
1475789Sahrens  * remove will fail.
1476789Sahrens  *
1477789Sahrens  *	IN:	dvp	- vnode of directory to remove from.
1478789Sahrens  *		name	- name of directory to be removed.
1479789Sahrens  *		cwd	- vnode of current working directory.
1480789Sahrens  *		cr	- credentials of caller.
1481789Sahrens  *
1482789Sahrens  *	RETURN:	0 if success
1483789Sahrens  *		error code if failure
1484789Sahrens  *
1485789Sahrens  * Timestamps:
1486789Sahrens  *	dvp - ctime|mtime updated
1487789Sahrens  */
1488789Sahrens static int
1489789Sahrens zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr)
1490789Sahrens {
1491789Sahrens 	znode_t		*dzp = VTOZ(dvp);
1492789Sahrens 	znode_t		*zp;
1493789Sahrens 	vnode_t		*vp;
1494789Sahrens 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
1495*5326Sek110237 	zilog_t		*zilog;
1496789Sahrens 	zfs_dirlock_t	*dl;
1497789Sahrens 	dmu_tx_t	*tx;
1498789Sahrens 	int		error;
1499789Sahrens 
1500*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, dzp);
1501*5326Sek110237 	zilog = zfsvfs->z_log;
1502789Sahrens 
1503789Sahrens top:
1504789Sahrens 	zp = NULL;
1505789Sahrens 
1506789Sahrens 	/*
1507789Sahrens 	 * Attempt to lock directory; fail if entry doesn't exist.
1508789Sahrens 	 */
1509789Sahrens 	if (error = zfs_dirent_lock(&dl, dzp, name, &zp, ZEXISTS)) {
1510789Sahrens 		ZFS_EXIT(zfsvfs);
1511789Sahrens 		return (error);
1512789Sahrens 	}
1513789Sahrens 
1514789Sahrens 	vp = ZTOV(zp);
1515789Sahrens 
1516789Sahrens 	if (error = zfs_zaccess_delete(dzp, zp, cr)) {
1517789Sahrens 		goto out;
1518789Sahrens 	}
1519789Sahrens 
1520789Sahrens 	if (vp->v_type != VDIR) {
1521789Sahrens 		error = ENOTDIR;
1522789Sahrens 		goto out;
1523789Sahrens 	}
1524789Sahrens 
1525789Sahrens 	if (vp == cwd) {
1526789Sahrens 		error = EINVAL;
1527789Sahrens 		goto out;
1528789Sahrens 	}
1529789Sahrens 
15304863Spraks 	vnevent_rmdir(vp, dvp, name);
1531789Sahrens 
1532789Sahrens 	/*
15333897Smaybee 	 * Grab a lock on the directory to make sure that noone is
15343897Smaybee 	 * trying to add (or lookup) entries while we are removing it.
15353897Smaybee 	 */
15363897Smaybee 	rw_enter(&zp->z_name_lock, RW_WRITER);
15373897Smaybee 
15383897Smaybee 	/*
15393897Smaybee 	 * Grab a lock on the parent pointer to make sure we play well
1540789Sahrens 	 * with the treewalk and directory rename code.
1541789Sahrens 	 */
1542789Sahrens 	rw_enter(&zp->z_parent_lock, RW_WRITER);
1543789Sahrens 
1544789Sahrens 	tx = dmu_tx_create(zfsvfs->z_os);
15451544Seschrock 	dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1546789Sahrens 	dmu_tx_hold_bonus(tx, zp->z_id);
15473461Sahrens 	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1548789Sahrens 	error = dmu_tx_assign(tx, zfsvfs->z_assign);
1549789Sahrens 	if (error) {
1550789Sahrens 		rw_exit(&zp->z_parent_lock);
15513897Smaybee 		rw_exit(&zp->z_name_lock);
1552789Sahrens 		zfs_dirent_unlock(dl);
1553789Sahrens 		VN_RELE(vp);
1554789Sahrens 		if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
15552113Sahrens 			dmu_tx_wait(tx);
15562113Sahrens 			dmu_tx_abort(tx);
1557789Sahrens 			goto top;
1558789Sahrens 		}
15592113Sahrens 		dmu_tx_abort(tx);
1560789Sahrens 		ZFS_EXIT(zfsvfs);
1561789Sahrens 		return (error);
1562789Sahrens 	}
1563789Sahrens 
1564789Sahrens 	error = zfs_link_destroy(dl, zp, tx, 0, NULL);
1565789Sahrens 
1566789Sahrens 	if (error == 0)
15672638Sperrin 		zfs_log_remove(zilog, tx, TX_RMDIR, dzp, name);
1568789Sahrens 
1569789Sahrens 	dmu_tx_commit(tx);
1570789Sahrens 
1571789Sahrens 	rw_exit(&zp->z_parent_lock);
15723897Smaybee 	rw_exit(&zp->z_name_lock);
1573789Sahrens out:
1574789Sahrens 	zfs_dirent_unlock(dl);
1575789Sahrens 
1576789Sahrens 	VN_RELE(vp);
1577789Sahrens 
1578789Sahrens 	ZFS_EXIT(zfsvfs);
1579789Sahrens 	return (error);
1580789Sahrens }
1581789Sahrens 
1582789Sahrens /*
1583789Sahrens  * Read as many directory entries as will fit into the provided
1584789Sahrens  * buffer from the given directory cursor position (specified in
1585789Sahrens  * the uio structure.
1586789Sahrens  *
1587789Sahrens  *	IN:	vp	- vnode of directory to read.
1588789Sahrens  *		uio	- structure supplying read location, range info,
1589789Sahrens  *			  and return buffer.
1590789Sahrens  *		cr	- credentials of caller.
1591789Sahrens  *
1592789Sahrens  *	OUT:	uio	- updated offset and range, buffer filled.
1593789Sahrens  *		eofp	- set to true if end-of-file detected.
1594789Sahrens  *
1595789Sahrens  *	RETURN:	0 if success
1596789Sahrens  *		error code if failure
1597789Sahrens  *
1598789Sahrens  * Timestamps:
1599789Sahrens  *	vp - atime updated
1600789Sahrens  *
1601789Sahrens  * Note that the low 4 bits of the cookie returned by zap is always zero.
1602789Sahrens  * This allows us to use the low range for "special" directory entries:
1603789Sahrens  * We use 0 for '.', and 1 for '..'.  If this is the root of the filesystem,
1604789Sahrens  * we use the offset 2 for the '.zfs' directory.
1605789Sahrens  */
1606789Sahrens /* ARGSUSED */
1607789Sahrens static int
1608789Sahrens zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp)
1609789Sahrens {
1610789Sahrens 	znode_t		*zp = VTOZ(vp);
1611789Sahrens 	iovec_t		*iovp;
1612789Sahrens 	dirent64_t	*odp;
1613789Sahrens 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
1614869Sperrin 	objset_t	*os;
1615789Sahrens 	caddr_t		outbuf;
1616789Sahrens 	size_t		bufsize;
1617789Sahrens 	zap_cursor_t	zc;
1618789Sahrens 	zap_attribute_t	zap;
1619789Sahrens 	uint_t		bytes_wanted;
1620789Sahrens 	uint64_t	offset; /* must be unsigned; checks for < 1 */
1621789Sahrens 	int		local_eof;
1622869Sperrin 	int		outcount;
1623869Sperrin 	int		error;
1624869Sperrin 	uint8_t		prefetch;
1625789Sahrens 
1626*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
1627789Sahrens 
1628789Sahrens 	/*
1629789Sahrens 	 * If we are not given an eof variable,
1630789Sahrens 	 * use a local one.
1631789Sahrens 	 */
1632789Sahrens 	if (eofp == NULL)
1633789Sahrens 		eofp = &local_eof;
1634789Sahrens 
1635789Sahrens 	/*
1636789Sahrens 	 * Check for valid iov_len.
1637789Sahrens 	 */
1638789Sahrens 	if (uio->uio_iov->iov_len <= 0) {
1639789Sahrens 		ZFS_EXIT(zfsvfs);
1640789Sahrens 		return (EINVAL);
1641789Sahrens 	}
1642789Sahrens 
1643789Sahrens 	/*
1644789Sahrens 	 * Quit if directory has been removed (posix)
1645789Sahrens 	 */
16463461Sahrens 	if ((*eofp = zp->z_unlinked) != 0) {
1647789Sahrens 		ZFS_EXIT(zfsvfs);
1648789Sahrens 		return (0);
1649789Sahrens 	}
1650789Sahrens 
1651869Sperrin 	error = 0;
1652869Sperrin 	os = zfsvfs->z_os;
1653869Sperrin 	offset = uio->uio_loffset;
1654869Sperrin 	prefetch = zp->z_zn_prefetch;
1655869Sperrin 
1656789Sahrens 	/*
1657789Sahrens 	 * Initialize the iterator cursor.
1658789Sahrens 	 */
1659789Sahrens 	if (offset <= 3) {
1660789Sahrens 		/*
1661789Sahrens 		 * Start iteration from the beginning of the directory.
1662789Sahrens 		 */
1663869Sperrin 		zap_cursor_init(&zc, os, zp->z_id);
1664789Sahrens 	} else {
1665789Sahrens 		/*
1666789Sahrens 		 * The offset is a serialized cursor.
1667789Sahrens 		 */
1668869Sperrin 		zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
1669789Sahrens 	}
1670789Sahrens 
1671789Sahrens 	/*
1672789Sahrens 	 * Get space to change directory entries into fs independent format.
1673789Sahrens 	 */
1674789Sahrens 	iovp = uio->uio_iov;
1675789Sahrens 	bytes_wanted = iovp->iov_len;
1676789Sahrens 	if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) {
1677789Sahrens 		bufsize = bytes_wanted;
1678789Sahrens 		outbuf = kmem_alloc(bufsize, KM_SLEEP);
1679789Sahrens 		odp = (struct dirent64 *)outbuf;
1680789Sahrens 	} else {
1681789Sahrens 		bufsize = bytes_wanted;
1682789Sahrens 		odp = (struct dirent64 *)iovp->iov_base;
1683789Sahrens 	}
1684789Sahrens 
1685789Sahrens 	/*
1686789Sahrens 	 * Transform to file-system independent format
1687789Sahrens 	 */
1688789Sahrens 	outcount = 0;
1689789Sahrens 	while (outcount < bytes_wanted) {
16903912Slling 		ino64_t objnum;
16913912Slling 		ushort_t reclen;
16923912Slling 		off64_t *next;
16933912Slling 
1694789Sahrens 		/*
1695789Sahrens 		 * Special case `.', `..', and `.zfs'.
1696789Sahrens 		 */
1697789Sahrens 		if (offset == 0) {
1698789Sahrens 			(void) strcpy(zap.za_name, ".");
16993912Slling 			objnum = zp->z_id;
1700789Sahrens 		} else if (offset == 1) {
1701789Sahrens 			(void) strcpy(zap.za_name, "..");
17023912Slling 			objnum = zp->z_phys->zp_parent;
1703789Sahrens 		} else if (offset == 2 && zfs_show_ctldir(zp)) {
1704789Sahrens 			(void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
17053912Slling 			objnum = ZFSCTL_INO_ROOT;
1706789Sahrens 		} else {
1707789Sahrens 			/*
1708789Sahrens 			 * Grab next entry.
1709789Sahrens 			 */
1710789Sahrens 			if (error = zap_cursor_retrieve(&zc, &zap)) {
1711789Sahrens 				if ((*eofp = (error == ENOENT)) != 0)
1712789Sahrens 					break;
1713789Sahrens 				else
1714789Sahrens 					goto update;
1715789Sahrens 			}
1716789Sahrens 
1717789Sahrens 			if (zap.za_integer_length != 8 ||
1718789Sahrens 			    zap.za_num_integers != 1) {
1719789Sahrens 				cmn_err(CE_WARN, "zap_readdir: bad directory "
1720789Sahrens 				    "entry, obj = %lld, offset = %lld\n",
1721789Sahrens 				    (u_longlong_t)zp->z_id,
1722789Sahrens 				    (u_longlong_t)offset);
1723789Sahrens 				error = ENXIO;
1724789Sahrens 				goto update;
1725789Sahrens 			}
17263912Slling 
17273912Slling 			objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
17283912Slling 			/*
17293912Slling 			 * MacOS X can extract the object type here such as:
17303912Slling 			 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
17313912Slling 			 */
1732789Sahrens 		}
17333912Slling 		reclen = DIRENT64_RECLEN(strlen(zap.za_name));
1734789Sahrens 
1735789Sahrens 		/*
1736789Sahrens 		 * Will this entry fit in the buffer?
1737789Sahrens 		 */
17383912Slling 		if (outcount + reclen > bufsize) {
1739789Sahrens 			/*
1740789Sahrens 			 * Did we manage to fit anything in the buffer?
1741789Sahrens 			 */
1742789Sahrens 			if (!outcount) {
1743789Sahrens 				error = EINVAL;
1744789Sahrens 				goto update;
1745789Sahrens 			}
1746789Sahrens 			break;
1747789Sahrens 		}
1748789Sahrens 		/*
1749789Sahrens 		 * Add this entry:
1750789Sahrens 		 */
17513912Slling 		odp->d_ino = objnum;
17523912Slling 		odp->d_reclen = reclen;
1753789Sahrens 		/* NOTE: d_off is the offset for the *next* entry */
1754789Sahrens 		next = &(odp->d_off);
1755789Sahrens 		(void) strncpy(odp->d_name, zap.za_name,
17563912Slling 		    DIRENT64_NAMELEN(reclen));
17573912Slling 		outcount += reclen;
17583912Slling 		odp = (dirent64_t *)((intptr_t)odp + reclen);
1759789Sahrens 
1760789Sahrens 		ASSERT(outcount <= bufsize);
1761789Sahrens 
1762789Sahrens 		/* Prefetch znode */
1763869Sperrin 		if (prefetch)
17643912Slling 			dmu_prefetch(os, objnum, 0, 0);
1765789Sahrens 
1766789Sahrens 		/*
1767789Sahrens 		 * Move to the next entry, fill in the previous offset.
1768789Sahrens 		 */
1769789Sahrens 		if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
1770789Sahrens 			zap_cursor_advance(&zc);
1771789Sahrens 			offset = zap_cursor_serialize(&zc);
1772789Sahrens 		} else {
1773789Sahrens 			offset += 1;
1774789Sahrens 		}
1775789Sahrens 		*next = offset;
1776789Sahrens 	}
1777869Sperrin 	zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
1778789Sahrens 
1779789Sahrens 	if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) {
1780789Sahrens 		iovp->iov_base += outcount;
1781789Sahrens 		iovp->iov_len -= outcount;
1782789Sahrens 		uio->uio_resid -= outcount;
1783789Sahrens 	} else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) {
1784789Sahrens 		/*
1785789Sahrens 		 * Reset the pointer.
1786789Sahrens 		 */
1787789Sahrens 		offset = uio->uio_loffset;
1788789Sahrens 	}
1789789Sahrens 
1790789Sahrens update:
1791885Sahrens 	zap_cursor_fini(&zc);
1792789Sahrens 	if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
1793789Sahrens 		kmem_free(outbuf, bufsize);
1794789Sahrens 
1795789Sahrens 	if (error == ENOENT)
1796789Sahrens 		error = 0;
1797789Sahrens 
1798789Sahrens 	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
1799789Sahrens 
1800789Sahrens 	uio->uio_loffset = offset;
1801789Sahrens 	ZFS_EXIT(zfsvfs);
1802789Sahrens 	return (error);
1803789Sahrens }
1804789Sahrens 
18054720Sfr157268 ulong_t zfs_fsync_sync_cnt = 4;
18064720Sfr157268 
1807789Sahrens static int
1808789Sahrens zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr)
1809789Sahrens {
1810789Sahrens 	znode_t	*zp = VTOZ(vp);
1811789Sahrens 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1812789Sahrens 
18131773Seschrock 	/*
18141773Seschrock 	 * Regardless of whether this is required for standards conformance,
18151773Seschrock 	 * this is the logical behavior when fsync() is called on a file with
18161773Seschrock 	 * dirty pages.  We use B_ASYNC since the ZIL transactions are already
18171773Seschrock 	 * going to be pushed out as part of the zil_commit().
18181773Seschrock 	 */
18191773Seschrock 	if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) &&
18201773Seschrock 	    (vp->v_type == VREG) && !(IS_SWAPVP(vp)))
18211773Seschrock 		(void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, B_ASYNC, cr);
18221773Seschrock 
18234720Sfr157268 	(void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
18244720Sfr157268 
1825*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
18262638Sperrin 	zil_commit(zfsvfs->z_log, zp->z_last_itx, zp->z_id);
1827789Sahrens 	ZFS_EXIT(zfsvfs);
1828789Sahrens 	return (0);
1829789Sahrens }
1830789Sahrens 
1831789Sahrens /*
1832789Sahrens  * Get the requested file attributes and place them in the provided
1833789Sahrens  * vattr structure.
1834789Sahrens  *
1835789Sahrens  *	IN:	vp	- vnode of file.
1836789Sahrens  *		vap	- va_mask identifies requested attributes.
1837789Sahrens  *		flags	- [UNUSED]
1838789Sahrens  *		cr	- credentials of caller.
1839789Sahrens  *
1840789Sahrens  *	OUT:	vap	- attribute values.
1841789Sahrens  *
1842789Sahrens  *	RETURN:	0 (always succeeds)
1843789Sahrens  */
1844789Sahrens /* ARGSUSED */
1845789Sahrens static int
1846789Sahrens zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr)
1847789Sahrens {
1848789Sahrens 	znode_t *zp = VTOZ(vp);
1849789Sahrens 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
1850*5326Sek110237 	znode_phys_t *pzp;
1851789Sahrens 	int	error;
18524543Smarks 	uint64_t links;
1853789Sahrens 
1854*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
1855*5326Sek110237 	pzp = zp->z_phys;
1856789Sahrens 
1857789Sahrens 	/*
1858789Sahrens 	 * Return all attributes.  It's cheaper to provide the answer
1859789Sahrens 	 * than to determine whether we were asked the question.
1860789Sahrens 	 */
1861789Sahrens 	mutex_enter(&zp->z_lock);
1862789Sahrens 
1863789Sahrens 	vap->va_type = vp->v_type;
1864789Sahrens 	vap->va_mode = pzp->zp_mode & MODEMASK;
1865789Sahrens 	vap->va_uid = zp->z_phys->zp_uid;
1866789Sahrens 	vap->va_gid = zp->z_phys->zp_gid;
1867789Sahrens 	vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev;
1868789Sahrens 	vap->va_nodeid = zp->z_id;
18694543Smarks 	if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp))
18704543Smarks 		links = pzp->zp_links + 1;
18714543Smarks 	else
18724543Smarks 		links = pzp->zp_links;
18734543Smarks 	vap->va_nlink = MIN(links, UINT32_MAX);	/* nlink_t limit! */
1874789Sahrens 	vap->va_size = pzp->zp_size;
18751816Smarks 	vap->va_rdev = vp->v_rdev;
1876789Sahrens 	vap->va_seq = zp->z_seq;
1877789Sahrens 
1878789Sahrens 	ZFS_TIME_DECODE(&vap->va_atime, pzp->zp_atime);
1879789Sahrens 	ZFS_TIME_DECODE(&vap->va_mtime, pzp->zp_mtime);
1880789Sahrens 	ZFS_TIME_DECODE(&vap->va_ctime, pzp->zp_ctime);
1881789Sahrens 
1882789Sahrens 	/*
1883905Smarks 	 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
1884905Smarks 	 * Also, if we are the owner don't bother, since owner should
1885905Smarks 	 * always be allowed to read basic attributes of file.
1886789Sahrens 	 */
1887905Smarks 	if (!(zp->z_phys->zp_flags & ZFS_ACL_TRIVIAL) &&
1888905Smarks 	    (zp->z_phys->zp_uid != crgetuid(cr))) {
1889905Smarks 		if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, cr)) {
1890789Sahrens 			mutex_exit(&zp->z_lock);
1891789Sahrens 			ZFS_EXIT(zfsvfs);
1892789Sahrens 			return (error);
1893789Sahrens 		}
1894789Sahrens 	}
1895789Sahrens 
1896789Sahrens 	mutex_exit(&zp->z_lock);
1897789Sahrens 
1898789Sahrens 	dmu_object_size_from_db(zp->z_dbuf, &vap->va_blksize, &vap->va_nblocks);
1899789Sahrens 
1900789Sahrens 	if (zp->z_blksz == 0) {
1901789Sahrens 		/*
1902789Sahrens 		 * Block size hasn't been set; suggest maximal I/O transfers.
1903789Sahrens 		 */
1904789Sahrens 		vap->va_blksize = zfsvfs->z_max_blksz;
1905789Sahrens 	}
1906789Sahrens 
1907789Sahrens 	ZFS_EXIT(zfsvfs);
1908789Sahrens 	return (0);
1909789Sahrens }
1910789Sahrens 
1911789Sahrens /*
1912789Sahrens  * Set the file attributes to the values contained in the
1913789Sahrens  * vattr structure.
1914789Sahrens  *
1915789Sahrens  *	IN:	vp	- vnode of file to be modified.
1916789Sahrens  *		vap	- new attribute values.
1917789Sahrens  *		flags	- ATTR_UTIME set if non-default time values provided.
1918789Sahrens  *		cr	- credentials of caller.
1919789Sahrens  *
1920789Sahrens  *	RETURN:	0 if success
1921789Sahrens  *		error code if failure
1922789Sahrens  *
1923789Sahrens  * Timestamps:
1924789Sahrens  *	vp - ctime updated, mtime updated if size changed.
1925789Sahrens  */
1926789Sahrens /* ARGSUSED */
1927789Sahrens static int
1928789Sahrens zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
1929789Sahrens 	caller_context_t *ct)
1930789Sahrens {
1931*5326Sek110237 	znode_t		*zp = VTOZ(vp);
1932*5326Sek110237 	znode_phys_t	*pzp;
1933789Sahrens 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
1934*5326Sek110237 	zilog_t		*zilog;
1935789Sahrens 	dmu_tx_t	*tx;
19361878Smaybee 	vattr_t		oldva;
1937789Sahrens 	uint_t		mask = vap->va_mask;
19381878Smaybee 	uint_t		saved_mask;
19392796Smarks 	int		trim_mask = 0;
1940789Sahrens 	uint64_t	new_mode;
19411231Smarks 	znode_t		*attrzp;
1942789Sahrens 	int		need_policy = FALSE;
1943789Sahrens 	int		err;
1944789Sahrens 
1945789Sahrens 	if (mask == 0)
1946789Sahrens 		return (0);
1947789Sahrens 
1948789Sahrens 	if (mask & AT_NOSET)
1949789Sahrens 		return (EINVAL);
1950789Sahrens 
1951789Sahrens 	if (mask & AT_SIZE && vp->v_type == VDIR)
1952789Sahrens 		return (EISDIR);
1953789Sahrens 
19541394Smarks 	if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO)
19551308Smarks 		return (EINVAL);
19561308Smarks 
1957*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
1958*5326Sek110237 	pzp = zp->z_phys;
1959*5326Sek110237 	zilog = zfsvfs->z_log;
1960789Sahrens 
1961789Sahrens top:
19621231Smarks 	attrzp = NULL;
1963789Sahrens 
1964789Sahrens 	if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
1965789Sahrens 		ZFS_EXIT(zfsvfs);
1966789Sahrens 		return (EROFS);
1967789Sahrens 	}
1968789Sahrens 
1969789Sahrens 	/*
1970789Sahrens 	 * First validate permissions
1971789Sahrens 	 */
1972789Sahrens 
1973789Sahrens 	if (mask & AT_SIZE) {
1974789Sahrens 		err = zfs_zaccess(zp, ACE_WRITE_DATA, cr);
1975789Sahrens 		if (err) {
1976789Sahrens 			ZFS_EXIT(zfsvfs);
1977789Sahrens 			return (err);
1978789Sahrens 		}
19791878Smaybee 		/*
19801878Smaybee 		 * XXX - Note, we are not providing any open
19811878Smaybee 		 * mode flags here (like FNDELAY), so we may
19821878Smaybee 		 * block if there are locks present... this
19831878Smaybee 		 * should be addressed in openat().
19841878Smaybee 		 */
19851878Smaybee 		do {
19861878Smaybee 			err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
19872113Sahrens 			/* NB: we already did dmu_tx_wait() if necessary */
19881878Smaybee 		} while (err == ERESTART && zfsvfs->z_assign == TXG_NOWAIT);
19891878Smaybee 		if (err) {
19901878Smaybee 			ZFS_EXIT(zfsvfs);
19911878Smaybee 			return (err);
19921878Smaybee 		}
1993789Sahrens 	}
1994789Sahrens 
1995789Sahrens 	if (mask & (AT_ATIME|AT_MTIME))
1996789Sahrens 		need_policy = zfs_zaccess_v4_perm(zp, ACE_WRITE_ATTRIBUTES, cr);
1997789Sahrens 
1998789Sahrens 	if (mask & (AT_UID|AT_GID)) {
1999789Sahrens 		int	idmask = (mask & (AT_UID|AT_GID));
2000789Sahrens 		int	take_owner;
2001789Sahrens 		int	take_group;
2002789Sahrens 
2003789Sahrens 		/*
2004913Smarks 		 * NOTE: even if a new mode is being set,
2005913Smarks 		 * we may clear S_ISUID/S_ISGID bits.
2006913Smarks 		 */
2007913Smarks 
2008913Smarks 		if (!(mask & AT_MODE))
2009913Smarks 			vap->va_mode = pzp->zp_mode;
2010913Smarks 
2011913Smarks 		/*
2012789Sahrens 		 * Take ownership or chgrp to group we are a member of
2013789Sahrens 		 */
2014789Sahrens 
2015789Sahrens 		take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
2016789Sahrens 		take_group = (mask & AT_GID) && groupmember(vap->va_gid, cr);
2017789Sahrens 
2018789Sahrens 		/*
2019789Sahrens 		 * If both AT_UID and AT_GID are set then take_owner and
2020789Sahrens 		 * take_group must both be set in order to allow taking
2021789Sahrens 		 * ownership.
2022789Sahrens 		 *
2023789Sahrens 		 * Otherwise, send the check through secpolicy_vnode_setattr()
2024789Sahrens 		 *
2025789Sahrens 		 */
2026789Sahrens 
2027789Sahrens 		if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
2028789Sahrens 		    ((idmask == AT_UID) && take_owner) ||
2029789Sahrens 		    ((idmask == AT_GID) && take_group)) {
2030789Sahrens 			if (zfs_zaccess_v4_perm(zp, ACE_WRITE_OWNER, cr) == 0) {
2031789Sahrens 				/*
2032789Sahrens 				 * Remove setuid/setgid for non-privileged users
2033789Sahrens 				 */
20341115Smarks 				secpolicy_setid_clear(vap, cr);
20352796Smarks 				trim_mask = (mask & (AT_UID|AT_GID));
2036789Sahrens 			} else {
2037789Sahrens 				need_policy =  TRUE;
2038789Sahrens 			}
2039789Sahrens 		} else {
2040789Sahrens 			need_policy =  TRUE;
2041789Sahrens 		}
2042789Sahrens 	}
2043789Sahrens 
20442796Smarks 	mutex_enter(&zp->z_lock);
20452796Smarks 	oldva.va_mode = pzp->zp_mode;
20462796Smarks 	oldva.va_uid = zp->z_phys->zp_uid;
20472796Smarks 	oldva.va_gid = zp->z_phys->zp_gid;
20482796Smarks 	mutex_exit(&zp->z_lock);
20492796Smarks 
20502796Smarks 	if (mask & AT_MODE) {
20512796Smarks 		if (zfs_zaccess_v4_perm(zp, ACE_WRITE_ACL, cr) == 0) {
20522796Smarks 			err = secpolicy_setid_setsticky_clear(vp, vap,
20532796Smarks 			    &oldva, cr);
20542796Smarks 			if (err) {
20552796Smarks 				ZFS_EXIT(zfsvfs);
20562796Smarks 				return (err);
20572796Smarks 			}
20582796Smarks 			trim_mask |= AT_MODE;
20592796Smarks 		} else {
20602796Smarks 			need_policy = TRUE;
20612796Smarks 		}
20622796Smarks 	}
2063789Sahrens 
2064789Sahrens 	if (need_policy) {
20651115Smarks 		/*
20661115Smarks 		 * If trim_mask is set then take ownership
20672796Smarks 		 * has been granted or write_acl is present and user
20682796Smarks 		 * has the ability to modify mode.  In that case remove
20692796Smarks 		 * UID|GID and or MODE from mask so that
20701115Smarks 		 * secpolicy_vnode_setattr() doesn't revoke it.
20711115Smarks 		 */
20722796Smarks 
20732796Smarks 		if (trim_mask) {
20742796Smarks 			saved_mask = vap->va_mask;
20752796Smarks 			vap->va_mask &= ~trim_mask;
20762796Smarks 
20772796Smarks 		}
2078789Sahrens 		err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
2079789Sahrens 		    (int (*)(void *, int, cred_t *))zfs_zaccess_rwx, zp);
2080789Sahrens 		if (err) {
2081789Sahrens 			ZFS_EXIT(zfsvfs);
2082789Sahrens 			return (err);
2083789Sahrens 		}
20841115Smarks 
20851115Smarks 		if (trim_mask)
20862796Smarks 			vap->va_mask |= saved_mask;
2087789Sahrens 	}
2088789Sahrens 
2089789Sahrens 	/*
2090789Sahrens 	 * secpolicy_vnode_setattr, or take ownership may have
2091789Sahrens 	 * changed va_mask
2092789Sahrens 	 */
2093789Sahrens 	mask = vap->va_mask;
2094789Sahrens 
2095789Sahrens 	tx = dmu_tx_create(zfsvfs->z_os);
2096789Sahrens 	dmu_tx_hold_bonus(tx, zp->z_id);
2097789Sahrens 
2098789Sahrens 	if (mask & AT_MODE) {
20991576Smarks 		uint64_t pmode = pzp->zp_mode;
21001576Smarks 
21011576Smarks 		new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
2102789Sahrens 
2103789Sahrens 		if (zp->z_phys->zp_acl.z_acl_extern_obj)
2104789Sahrens 			dmu_tx_hold_write(tx,
2105789Sahrens 			    pzp->zp_acl.z_acl_extern_obj, 0, SPA_MAXBLOCKSIZE);
2106789Sahrens 		else
2107789Sahrens 			dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2108789Sahrens 			    0, ZFS_ACL_SIZE(MAX_ACL_SIZE));
2109789Sahrens 	}
2110789Sahrens 
21111231Smarks 	if ((mask & (AT_UID | AT_GID)) && zp->z_phys->zp_xattr != 0) {
21121231Smarks 		err = zfs_zget(zp->z_zfsvfs, zp->z_phys->zp_xattr, &attrzp);
21131231Smarks 		if (err) {
21141231Smarks 			dmu_tx_abort(tx);
21151231Smarks 			ZFS_EXIT(zfsvfs);
21161231Smarks 			return (err);
21171231Smarks 		}
21181231Smarks 		dmu_tx_hold_bonus(tx, attrzp->z_id);
21191231Smarks 	}
21201231Smarks 
2121789Sahrens 	err = dmu_tx_assign(tx, zfsvfs->z_assign);
2122789Sahrens 	if (err) {
21231231Smarks 		if (attrzp)
21241231Smarks 			VN_RELE(ZTOV(attrzp));
2125789Sahrens 		if (err == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
21262113Sahrens 			dmu_tx_wait(tx);
21272113Sahrens 			dmu_tx_abort(tx);
2128789Sahrens 			goto top;
2129789Sahrens 		}
21302113Sahrens 		dmu_tx_abort(tx);
2131789Sahrens 		ZFS_EXIT(zfsvfs);
2132789Sahrens 		return (err);
2133789Sahrens 	}
2134789Sahrens 
2135789Sahrens 	dmu_buf_will_dirty(zp->z_dbuf, tx);
2136789Sahrens 
2137789Sahrens 	/*
2138789Sahrens 	 * Set each attribute requested.
2139789Sahrens 	 * We group settings according to the locks they need to acquire.
2140789Sahrens 	 *
2141789Sahrens 	 * Note: you cannot set ctime directly, although it will be
2142789Sahrens 	 * updated as a side-effect of calling this function.
2143789Sahrens 	 */
2144789Sahrens 
2145789Sahrens 	mutex_enter(&zp->z_lock);
2146789Sahrens 
2147789Sahrens 	if (mask & AT_MODE) {
2148789Sahrens 		err = zfs_acl_chmod_setattr(zp, new_mode, tx);
2149789Sahrens 		ASSERT3U(err, ==, 0);
2150789Sahrens 	}
2151789Sahrens 
21521231Smarks 	if (attrzp)
21531231Smarks 		mutex_enter(&attrzp->z_lock);
21541231Smarks 
21551231Smarks 	if (mask & AT_UID) {
2156789Sahrens 		zp->z_phys->zp_uid = (uint64_t)vap->va_uid;
21571231Smarks 		if (attrzp) {
21581231Smarks 			attrzp->z_phys->zp_uid = (uint64_t)vap->va_uid;
21591231Smarks 		}
21601231Smarks 	}
21611231Smarks 
21621231Smarks 	if (mask & AT_GID) {
2163789Sahrens 		zp->z_phys->zp_gid = (uint64_t)vap->va_gid;
21641231Smarks 		if (attrzp)
21651231Smarks 			attrzp->z_phys->zp_gid = (uint64_t)vap->va_gid;
21661231Smarks 	}
21671231Smarks 
21681231Smarks 	if (attrzp)
21691231Smarks 		mutex_exit(&attrzp->z_lock);
2170789Sahrens 
2171789Sahrens 	if (mask & AT_ATIME)
2172789Sahrens 		ZFS_TIME_ENCODE(&vap->va_atime, pzp->zp_atime);
2173789Sahrens 
2174789Sahrens 	if (mask & AT_MTIME)
2175789Sahrens 		ZFS_TIME_ENCODE(&vap->va_mtime, pzp->zp_mtime);
2176789Sahrens 
21771878Smaybee 	if (mask & AT_SIZE)
2178789Sahrens 		zfs_time_stamper_locked(zp, CONTENT_MODIFIED, tx);
21791878Smaybee 	else if (mask != 0)
2180789Sahrens 		zfs_time_stamper_locked(zp, STATE_CHANGED, tx);
2181789Sahrens 
21821878Smaybee 	if (mask != 0)
21832638Sperrin 		zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask);
2184789Sahrens 
2185789Sahrens 	mutex_exit(&zp->z_lock);
2186789Sahrens 
21871231Smarks 	if (attrzp)
21881231Smarks 		VN_RELE(ZTOV(attrzp));
21891231Smarks 
2190789Sahrens 	dmu_tx_commit(tx);
2191789Sahrens 
2192789Sahrens 	ZFS_EXIT(zfsvfs);
2193789Sahrens 	return (err);
2194789Sahrens }
2195789Sahrens 
21963271Smaybee typedef struct zfs_zlock {
21973271Smaybee 	krwlock_t	*zl_rwlock;	/* lock we acquired */
21983271Smaybee 	znode_t		*zl_znode;	/* znode we held */
21993271Smaybee 	struct zfs_zlock *zl_next;	/* next in list */
22003271Smaybee } zfs_zlock_t;
22013271Smaybee 
22023271Smaybee /*
22033271Smaybee  * Drop locks and release vnodes that were held by zfs_rename_lock().
22043271Smaybee  */
22053271Smaybee static void
22063271Smaybee zfs_rename_unlock(zfs_zlock_t **zlpp)
22073271Smaybee {
22083271Smaybee 	zfs_zlock_t *zl;
22093271Smaybee 
22103271Smaybee 	while ((zl = *zlpp) != NULL) {
22113271Smaybee 		if (zl->zl_znode != NULL)
22123271Smaybee 			VN_RELE(ZTOV(zl->zl_znode));
22133271Smaybee 		rw_exit(zl->zl_rwlock);
22143271Smaybee 		*zlpp = zl->zl_next;
22153271Smaybee 		kmem_free(zl, sizeof (*zl));
22163271Smaybee 	}
22173271Smaybee }
22183271Smaybee 
2219789Sahrens /*
2220789Sahrens  * Search back through the directory tree, using the ".." entries.
2221789Sahrens  * Lock each directory in the chain to prevent concurrent renames.
2222789Sahrens  * Fail any attempt to move a directory into one of its own descendants.
2223789Sahrens  * XXX - z_parent_lock can overlap with map or grow locks
2224789Sahrens  */
2225789Sahrens static int
2226789Sahrens zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
2227789Sahrens {
2228789Sahrens 	zfs_zlock_t	*zl;
22293638Sbillm 	znode_t		*zp = tdzp;
2230789Sahrens 	uint64_t	rootid = zp->z_zfsvfs->z_root;
2231789Sahrens 	uint64_t	*oidp = &zp->z_id;
2232789Sahrens 	krwlock_t	*rwlp = &szp->z_parent_lock;
2233789Sahrens 	krw_t		rw = RW_WRITER;
2234789Sahrens 
2235789Sahrens 	/*
2236789Sahrens 	 * First pass write-locks szp and compares to zp->z_id.
2237789Sahrens 	 * Later passes read-lock zp and compare to zp->z_parent.
2238789Sahrens 	 */
2239789Sahrens 	do {
22403271Smaybee 		if (!rw_tryenter(rwlp, rw)) {
22413271Smaybee 			/*
22423271Smaybee 			 * Another thread is renaming in this path.
22433271Smaybee 			 * Note that if we are a WRITER, we don't have any
22443271Smaybee 			 * parent_locks held yet.
22453271Smaybee 			 */
22463271Smaybee 			if (rw == RW_READER && zp->z_id > szp->z_id) {
22473271Smaybee 				/*
22483271Smaybee 				 * Drop our locks and restart
22493271Smaybee 				 */
22503271Smaybee 				zfs_rename_unlock(&zl);
22513271Smaybee 				*zlpp = NULL;
22523271Smaybee 				zp = tdzp;
22533271Smaybee 				oidp = &zp->z_id;
22543271Smaybee 				rwlp = &szp->z_parent_lock;
22553271Smaybee 				rw = RW_WRITER;
22563271Smaybee 				continue;
22573271Smaybee 			} else {
22583271Smaybee 				/*
22593271Smaybee 				 * Wait for other thread to drop its locks
22603271Smaybee 				 */
22613271Smaybee 				rw_enter(rwlp, rw);
22623271Smaybee 			}
22633271Smaybee 		}
22643271Smaybee 
2265789Sahrens 		zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
2266789Sahrens 		zl->zl_rwlock = rwlp;
2267789Sahrens 		zl->zl_znode = NULL;
2268789Sahrens 		zl->zl_next = *zlpp;
2269789Sahrens 		*zlpp = zl;
2270789Sahrens 
2271789Sahrens 		if (*oidp == szp->z_id)		/* We're a descendant of szp */
2272789Sahrens 			return (EINVAL);
2273789Sahrens 
2274789Sahrens 		if (*oidp == rootid)		/* We've hit the top */
2275789Sahrens 			return (0);
2276789Sahrens 
2277789Sahrens 		if (rw == RW_READER) {		/* i.e. not the first pass */
2278789Sahrens 			int error = zfs_zget(zp->z_zfsvfs, *oidp, &zp);
2279789Sahrens 			if (error)
2280789Sahrens 				return (error);
2281789Sahrens 			zl->zl_znode = zp;
2282789Sahrens 		}
2283789Sahrens 		oidp = &zp->z_phys->zp_parent;
2284789Sahrens 		rwlp = &zp->z_parent_lock;
2285789Sahrens 		rw = RW_READER;
2286789Sahrens 
2287789Sahrens 	} while (zp->z_id != sdzp->z_id);
2288789Sahrens 
2289789Sahrens 	return (0);
2290789Sahrens }
2291789Sahrens 
2292789Sahrens /*
2293789Sahrens  * Move an entry from the provided source directory to the target
2294789Sahrens  * directory.  Change the entry name as indicated.
2295789Sahrens  *
2296789Sahrens  *	IN:	sdvp	- Source directory containing the "old entry".
2297789Sahrens  *		snm	- Old entry name.
2298789Sahrens  *		tdvp	- Target directory to contain the "new entry".
2299789Sahrens  *		tnm	- New entry name.
2300789Sahrens  *		cr	- credentials of caller.
2301789Sahrens  *
2302789Sahrens  *	RETURN:	0 if success
2303789Sahrens  *		error code if failure
2304789Sahrens  *
2305789Sahrens  * Timestamps:
2306789Sahrens  *	sdvp,tdvp - ctime|mtime updated
2307789Sahrens  */
2308789Sahrens static int
2309789Sahrens zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr)
2310789Sahrens {
2311789Sahrens 	znode_t		*tdzp, *szp, *tzp;
2312789Sahrens 	znode_t		*sdzp = VTOZ(sdvp);
2313789Sahrens 	zfsvfs_t	*zfsvfs = sdzp->z_zfsvfs;
2314*5326Sek110237 	zilog_t		*zilog;
2315789Sahrens 	vnode_t		*realvp;
2316789Sahrens 	zfs_dirlock_t	*sdl, *tdl;
2317789Sahrens 	dmu_tx_t	*tx;
2318789Sahrens 	zfs_zlock_t	*zl;
2319789Sahrens 	int		cmp, serr, terr, error;
2320789Sahrens 
2321*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, sdzp);
2322*5326Sek110237 	zilog = zfsvfs->z_log;
2323789Sahrens 
2324789Sahrens 	/*
2325789Sahrens 	 * Make sure we have the real vp for the target directory.
2326789Sahrens 	 */
2327789Sahrens 	if (VOP_REALVP(tdvp, &realvp) == 0)
2328789Sahrens 		tdvp = realvp;
2329789Sahrens 
2330789Sahrens 	if (tdvp->v_vfsp != sdvp->v_vfsp) {
2331789Sahrens 		ZFS_EXIT(zfsvfs);
2332789Sahrens 		return (EXDEV);
2333789Sahrens 	}
2334789Sahrens 
2335789Sahrens 	tdzp = VTOZ(tdvp);
2336*5326Sek110237 	if (!tdzp->z_dbuf_held) {
2337*5326Sek110237 		ZFS_EXIT(zfsvfs);
2338*5326Sek110237 		return (EIO);
2339*5326Sek110237 	}
2340789Sahrens top:
2341789Sahrens 	szp = NULL;
2342789Sahrens 	tzp = NULL;
2343789Sahrens 	zl = NULL;
2344789Sahrens 
2345789Sahrens 	/*
2346789Sahrens 	 * This is to prevent the creation of links into attribute space
2347789Sahrens 	 * by renaming a linked file into/outof an attribute directory.
2348789Sahrens 	 * See the comment in zfs_link() for why this is considered bad.
2349789Sahrens 	 */
2350789Sahrens 	if ((tdzp->z_phys->zp_flags & ZFS_XATTR) !=
2351789Sahrens 	    (sdzp->z_phys->zp_flags & ZFS_XATTR)) {
2352789Sahrens 		ZFS_EXIT(zfsvfs);
2353789Sahrens 		return (EINVAL);
2354789Sahrens 	}
2355789Sahrens 
2356789Sahrens 	/*
2357789Sahrens 	 * Lock source and target directory entries.  To prevent deadlock,
2358789Sahrens 	 * a lock ordering must be defined.  We lock the directory with
2359789Sahrens 	 * the smallest object id first, or if it's a tie, the one with
2360789Sahrens 	 * the lexically first name.
2361789Sahrens 	 */
2362789Sahrens 	if (sdzp->z_id < tdzp->z_id) {
2363789Sahrens 		cmp = -1;
2364789Sahrens 	} else if (sdzp->z_id > tdzp->z_id) {
2365789Sahrens 		cmp = 1;
2366789Sahrens 	} else {
2367789Sahrens 		cmp = strcmp(snm, tnm);
2368789Sahrens 		if (cmp == 0) {
2369789Sahrens 			/*
2370789Sahrens 			 * POSIX: "If the old argument and the new argument
2371789Sahrens 			 * both refer to links to the same existing file,
2372789Sahrens 			 * the rename() function shall return successfully
2373789Sahrens 			 * and perform no other action."
2374789Sahrens 			 */
2375789Sahrens 			ZFS_EXIT(zfsvfs);
2376789Sahrens 			return (0);
2377789Sahrens 		}
2378789Sahrens 	}
2379789Sahrens 	if (cmp < 0) {
2380789Sahrens 		serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp, ZEXISTS);
2381789Sahrens 		terr = zfs_dirent_lock(&tdl, tdzp, tnm, &tzp, 0);
2382789Sahrens 	} else {
2383789Sahrens 		terr = zfs_dirent_lock(&tdl, tdzp, tnm, &tzp, 0);
2384789Sahrens 		serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp, ZEXISTS);
2385789Sahrens 	}
2386789Sahrens 
2387789Sahrens 	if (serr) {
2388789Sahrens 		/*
2389789Sahrens 		 * Source entry invalid or not there.
2390789Sahrens 		 */
2391789Sahrens 		if (!terr) {
2392789Sahrens 			zfs_dirent_unlock(tdl);
2393789Sahrens 			if (tzp)
2394789Sahrens 				VN_RELE(ZTOV(tzp));
2395789Sahrens 		}
2396789Sahrens 		if (strcmp(snm, "..") == 0)
2397789Sahrens 			serr = EINVAL;
2398789Sahrens 		ZFS_EXIT(zfsvfs);
2399789Sahrens 		return (serr);
2400789Sahrens 	}
2401789Sahrens 	if (terr) {
2402789Sahrens 		zfs_dirent_unlock(sdl);
2403789Sahrens 		VN_RELE(ZTOV(szp));
2404789Sahrens 		if (strcmp(tnm, "..") == 0)
2405789Sahrens 			terr = EINVAL;
2406789Sahrens 		ZFS_EXIT(zfsvfs);
2407789Sahrens 		return (terr);
2408789Sahrens 	}
2409789Sahrens 
2410789Sahrens 	/*
2411789Sahrens 	 * Must have write access at the source to remove the old entry
2412789Sahrens 	 * and write access at the target to create the new entry.
2413789Sahrens 	 * Note that if target and source are the same, this can be
2414789Sahrens 	 * done in a single check.
2415789Sahrens 	 */
2416789Sahrens 
2417789Sahrens 	if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr))
2418789Sahrens 		goto out;
2419789Sahrens 
2420789Sahrens 	if (ZTOV(szp)->v_type == VDIR) {
2421789Sahrens 		/*
2422789Sahrens 		 * Check to make sure rename is valid.
2423789Sahrens 		 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
2424789Sahrens 		 */
2425789Sahrens 		if (error = zfs_rename_lock(szp, tdzp, sdzp, &zl))
2426789Sahrens 			goto out;
2427789Sahrens 	}
2428789Sahrens 
2429789Sahrens 	/*
2430789Sahrens 	 * Does target exist?
2431789Sahrens 	 */
2432789Sahrens 	if (tzp) {
2433789Sahrens 		/*
2434789Sahrens 		 * Source and target must be the same type.
2435789Sahrens 		 */
2436789Sahrens 		if (ZTOV(szp)->v_type == VDIR) {
2437789Sahrens 			if (ZTOV(tzp)->v_type != VDIR) {
2438789Sahrens 				error = ENOTDIR;
2439789Sahrens 				goto out;
2440789Sahrens 			}
2441789Sahrens 		} else {
2442789Sahrens 			if (ZTOV(tzp)->v_type == VDIR) {
2443789Sahrens 				error = EISDIR;
2444789Sahrens 				goto out;
2445789Sahrens 			}
2446789Sahrens 		}
2447789Sahrens 		/*
2448789Sahrens 		 * POSIX dictates that when the source and target
2449789Sahrens 		 * entries refer to the same file object, rename
2450789Sahrens 		 * must do nothing and exit without error.
2451789Sahrens 		 */
2452789Sahrens 		if (szp->z_id == tzp->z_id) {
2453789Sahrens 			error = 0;
2454789Sahrens 			goto out;
2455789Sahrens 		}
2456789Sahrens 	}
2457789Sahrens 
24584863Spraks 	vnevent_rename_src(ZTOV(szp), sdvp, snm);
2459789Sahrens 	if (tzp)
24604863Spraks 		vnevent_rename_dest(ZTOV(tzp), tdvp, tnm);
24614863Spraks 
24624863Spraks 	/*
24634863Spraks 	 * notify the target directory if it is not the same
24644863Spraks 	 * as source directory.
24654863Spraks 	 */
24664863Spraks 	if (tdvp != sdvp) {
24674863Spraks 		vnevent_rename_dest_dir(tdvp);
24684863Spraks 	}
2469789Sahrens 
2470789Sahrens 	tx = dmu_tx_create(zfsvfs->z_os);
2471789Sahrens 	dmu_tx_hold_bonus(tx, szp->z_id);	/* nlink changes */
2472789Sahrens 	dmu_tx_hold_bonus(tx, sdzp->z_id);	/* nlink changes */
24731544Seschrock 	dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
24741544Seschrock 	dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
24751544Seschrock 	if (sdzp != tdzp)
2476789Sahrens 		dmu_tx_hold_bonus(tx, tdzp->z_id);	/* nlink changes */
24771544Seschrock 	if (tzp)
24781544Seschrock 		dmu_tx_hold_bonus(tx, tzp->z_id);	/* parent changes */
24793461Sahrens 	dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
2480789Sahrens 	error = dmu_tx_assign(tx, zfsvfs->z_assign);
2481789Sahrens 	if (error) {
2482789Sahrens 		if (zl != NULL)
2483789Sahrens 			zfs_rename_unlock(&zl);
2484789Sahrens 		zfs_dirent_unlock(sdl);
2485789Sahrens 		zfs_dirent_unlock(tdl);
2486789Sahrens 		VN_RELE(ZTOV(szp));
2487789Sahrens 		if (tzp)
2488789Sahrens 			VN_RELE(ZTOV(tzp));
2489789Sahrens 		if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
24902113Sahrens 			dmu_tx_wait(tx);
24912113Sahrens 			dmu_tx_abort(tx);
2492789Sahrens 			goto top;
2493789Sahrens 		}
24942113Sahrens 		dmu_tx_abort(tx);
2495789Sahrens 		ZFS_EXIT(zfsvfs);
2496789Sahrens 		return (error);
2497789Sahrens 	}
2498789Sahrens 
2499789Sahrens 	if (tzp)	/* Attempt to remove the existing target */
2500789Sahrens 		error = zfs_link_destroy(tdl, tzp, tx, 0, NULL);
2501789Sahrens 
2502789Sahrens 	if (error == 0) {
2503789Sahrens 		error = zfs_link_create(tdl, szp, tx, ZRENAMING);
2504789Sahrens 		if (error == 0) {
2505789Sahrens 			error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
2506789Sahrens 			ASSERT(error == 0);
25072638Sperrin 			zfs_log_rename(zilog, tx, TX_RENAME, sdzp,
25082638Sperrin 			    sdl->dl_name, tdzp, tdl->dl_name, szp);
2509789Sahrens 		}
2510789Sahrens 	}
2511789Sahrens 
2512789Sahrens 	dmu_tx_commit(tx);
2513789Sahrens out:
2514789Sahrens 	if (zl != NULL)
2515789Sahrens 		zfs_rename_unlock(&zl);
2516789Sahrens 
2517789Sahrens 	zfs_dirent_unlock(sdl);
2518789Sahrens 	zfs_dirent_unlock(tdl);
2519789Sahrens 
2520789Sahrens 	VN_RELE(ZTOV(szp));
2521789Sahrens 	if (tzp)
2522789Sahrens 		VN_RELE(ZTOV(tzp));
2523789Sahrens 
2524789Sahrens 	ZFS_EXIT(zfsvfs);
2525789Sahrens 	return (error);
2526789Sahrens }
2527789Sahrens 
2528789Sahrens /*
2529789Sahrens  * Insert the indicated symbolic reference entry into the directory.
2530789Sahrens  *
2531789Sahrens  *	IN:	dvp	- Directory to contain new symbolic link.
2532789Sahrens  *		link	- Name for new symlink entry.
2533789Sahrens  *		vap	- Attributes of new entry.
2534789Sahrens  *		target	- Target path of new symlink.
2535789Sahrens  *		cr	- credentials of caller.
2536789Sahrens  *
2537789Sahrens  *	RETURN:	0 if success
2538789Sahrens  *		error code if failure
2539789Sahrens  *
2540789Sahrens  * Timestamps:
2541789Sahrens  *	dvp - ctime|mtime updated
2542789Sahrens  */
2543789Sahrens static int
2544789Sahrens zfs_symlink(vnode_t *dvp, char *name, vattr_t *vap, char *link, cred_t *cr)
2545789Sahrens {
2546789Sahrens 	znode_t		*zp, *dzp = VTOZ(dvp);
2547789Sahrens 	zfs_dirlock_t	*dl;
2548789Sahrens 	dmu_tx_t	*tx;
2549789Sahrens 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
2550*5326Sek110237 	zilog_t		*zilog;
2551789Sahrens 	uint64_t	zoid;
2552789Sahrens 	int		len = strlen(link);
2553789Sahrens 	int		error;
2554789Sahrens 
2555789Sahrens 	ASSERT(vap->va_type == VLNK);
2556789Sahrens 
2557*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, dzp);
2558*5326Sek110237 	zilog = zfsvfs->z_log;
2559789Sahrens top:
2560789Sahrens 	if (error = zfs_zaccess(dzp, ACE_ADD_FILE, cr)) {
2561789Sahrens 		ZFS_EXIT(zfsvfs);
2562789Sahrens 		return (error);
2563789Sahrens 	}
2564789Sahrens 
2565789Sahrens 	if (len > MAXPATHLEN) {
2566789Sahrens 		ZFS_EXIT(zfsvfs);
2567789Sahrens 		return (ENAMETOOLONG);
2568789Sahrens 	}
2569789Sahrens 
2570789Sahrens 	/*
2571789Sahrens 	 * Attempt to lock directory; fail if entry already exists.
2572789Sahrens 	 */
2573789Sahrens 	if (error = zfs_dirent_lock(&dl, dzp, name, &zp, ZNEW)) {
2574789Sahrens 		ZFS_EXIT(zfsvfs);
2575789Sahrens 		return (error);
2576789Sahrens 	}
2577789Sahrens 
2578789Sahrens 	tx = dmu_tx_create(zfsvfs->z_os);
2579789Sahrens 	dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
2580789Sahrens 	dmu_tx_hold_bonus(tx, dzp->z_id);
25811544Seschrock 	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
2582789Sahrens 	if (dzp->z_phys->zp_flags & ZFS_INHERIT_ACE)
2583789Sahrens 		dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, SPA_MAXBLOCKSIZE);
2584789Sahrens 	error = dmu_tx_assign(tx, zfsvfs->z_assign);
2585789Sahrens 	if (error) {
2586789Sahrens 		zfs_dirent_unlock(dl);
2587789Sahrens 		if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
25882113Sahrens 			dmu_tx_wait(tx);
25892113Sahrens 			dmu_tx_abort(tx);
2590789Sahrens 			goto top;
2591789Sahrens 		}
25922113Sahrens 		dmu_tx_abort(tx);
2593789Sahrens 		ZFS_EXIT(zfsvfs);
2594789Sahrens 		return (error);
2595789Sahrens 	}
2596789Sahrens 
2597789Sahrens 	dmu_buf_will_dirty(dzp->z_dbuf, tx);
2598789Sahrens 
2599789Sahrens 	/*
2600789Sahrens 	 * Create a new object for the symlink.
2601789Sahrens 	 * Put the link content into bonus buffer if it will fit;
2602789Sahrens 	 * otherwise, store it just like any other file data.
2603789Sahrens 	 */
2604789Sahrens 	zoid = 0;
2605789Sahrens 	if (sizeof (znode_phys_t) + len <= dmu_bonus_max()) {
2606789Sahrens 		zfs_mknode(dzp, vap, &zoid, tx, cr, 0, &zp, len);
2607789Sahrens 		if (len != 0)
2608789Sahrens 			bcopy(link, zp->z_phys + 1, len);
2609789Sahrens 	} else {
2610789Sahrens 		dmu_buf_t *dbp;
26111669Sperrin 
2612789Sahrens 		zfs_mknode(dzp, vap, &zoid, tx, cr, 0, &zp, 0);
2613789Sahrens 
26141669Sperrin 		/*
26151669Sperrin 		 * Nothing can access the znode yet so no locking needed
26161669Sperrin 		 * for growing the znode's blocksize.
26171669Sperrin 		 */
26181669Sperrin 		zfs_grow_blocksize(zp, len, tx);
2619789Sahrens 
26201544Seschrock 		VERIFY(0 == dmu_buf_hold(zfsvfs->z_os, zoid, 0, FTAG, &dbp));
2621789Sahrens 		dmu_buf_will_dirty(dbp, tx);
2622789Sahrens 
2623789Sahrens 		ASSERT3U(len, <=, dbp->db_size);
2624789Sahrens 		bcopy(link, dbp->db_data, len);
26251544Seschrock 		dmu_buf_rele(dbp, FTAG);
2626789Sahrens 	}
2627789Sahrens 	zp->z_phys->zp_size = len;
2628789Sahrens 
2629789Sahrens 	/*
2630789Sahrens 	 * Insert the new object into the directory.
2631789Sahrens 	 */
2632789Sahrens 	(void) zfs_link_create(dl, zp, tx, ZNEW);
2633789Sahrens out:
2634789Sahrens 	if (error == 0)
26352638Sperrin 		zfs_log_symlink(zilog, tx, TX_SYMLINK, dzp, zp, name, link);
2636789Sahrens 
2637789Sahrens 	dmu_tx_commit(tx);
2638789Sahrens 
2639789Sahrens 	zfs_dirent_unlock(dl);
2640789Sahrens 
2641789Sahrens 	VN_RELE(ZTOV(zp));
2642789Sahrens 
2643789Sahrens 	ZFS_EXIT(zfsvfs);
2644789Sahrens 	return (error);
2645789Sahrens }
2646789Sahrens 
2647789Sahrens /*
2648789Sahrens  * Return, in the buffer contained in the provided uio structure,
2649789Sahrens  * the symbolic path referred to by vp.
2650789Sahrens  *
2651789Sahrens  *	IN:	vp	- vnode of symbolic link.
2652789Sahrens  *		uoip	- structure to contain the link path.
2653789Sahrens  *		cr	- credentials of caller.
2654789Sahrens  *
2655789Sahrens  *	OUT:	uio	- structure to contain the link path.
2656789Sahrens  *
2657789Sahrens  *	RETURN:	0 if success
2658789Sahrens  *		error code if failure
2659789Sahrens  *
2660789Sahrens  * Timestamps:
2661789Sahrens  *	vp - atime updated
2662789Sahrens  */
2663789Sahrens /* ARGSUSED */
2664789Sahrens static int
2665789Sahrens zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr)
2666789Sahrens {
2667789Sahrens 	znode_t		*zp = VTOZ(vp);
2668789Sahrens 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
2669789Sahrens 	size_t		bufsz;
2670789Sahrens 	int		error;
2671789Sahrens 
2672*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
2673789Sahrens 
2674789Sahrens 	bufsz = (size_t)zp->z_phys->zp_size;
2675789Sahrens 	if (bufsz + sizeof (znode_phys_t) <= zp->z_dbuf->db_size) {
2676789Sahrens 		error = uiomove(zp->z_phys + 1,
2677789Sahrens 		    MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio);
2678789Sahrens 	} else {
26791544Seschrock 		dmu_buf_t *dbp;
26801544Seschrock 		error = dmu_buf_hold(zfsvfs->z_os, zp->z_id, 0, FTAG, &dbp);
26811544Seschrock 		if (error) {
2682789Sahrens 			ZFS_EXIT(zfsvfs);
2683789Sahrens 			return (error);
2684789Sahrens 		}
2685789Sahrens 		error = uiomove(dbp->db_data,
2686789Sahrens 		    MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio);
26871544Seschrock 		dmu_buf_rele(dbp, FTAG);
2688789Sahrens 	}
2689789Sahrens 
2690789Sahrens 	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
2691789Sahrens 	ZFS_EXIT(zfsvfs);
2692789Sahrens 	return (error);
2693789Sahrens }
2694789Sahrens 
2695789Sahrens /*
2696789Sahrens  * Insert a new entry into directory tdvp referencing svp.
2697789Sahrens  *
2698789Sahrens  *	IN:	tdvp	- Directory to contain new entry.
2699789Sahrens  *		svp	- vnode of new entry.
2700789Sahrens  *		name	- name of new entry.
2701789Sahrens  *		cr	- credentials of caller.
2702789Sahrens  *
2703789Sahrens  *	RETURN:	0 if success
2704789Sahrens  *		error code if failure
2705789Sahrens  *
2706789Sahrens  * Timestamps:
2707789Sahrens  *	tdvp - ctime|mtime updated
2708789Sahrens  *	 svp - ctime updated
2709789Sahrens  */
2710789Sahrens /* ARGSUSED */
2711789Sahrens static int
2712789Sahrens zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr)
2713789Sahrens {
2714789Sahrens 	znode_t		*dzp = VTOZ(tdvp);
2715789Sahrens 	znode_t		*tzp, *szp;
2716789Sahrens 	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
2717*5326Sek110237 	zilog_t		*zilog;
2718789Sahrens 	zfs_dirlock_t	*dl;
2719789Sahrens 	dmu_tx_t	*tx;
2720789Sahrens 	vnode_t		*realvp;
2721789Sahrens 	int		error;
2722789Sahrens 
2723789Sahrens 	ASSERT(tdvp->v_type == VDIR);
2724789Sahrens 
2725*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, dzp);
2726*5326Sek110237 	zilog = zfsvfs->z_log;
2727789Sahrens 
2728789Sahrens 	if (VOP_REALVP(svp, &realvp) == 0)
2729789Sahrens 		svp = realvp;
2730789Sahrens 
2731789Sahrens 	if (svp->v_vfsp != tdvp->v_vfsp) {
2732789Sahrens 		ZFS_EXIT(zfsvfs);
2733789Sahrens 		return (EXDEV);
2734789Sahrens 	}
2735789Sahrens 
2736789Sahrens 	szp = VTOZ(svp);
2737*5326Sek110237 	if (!szp->z_dbuf_held) {
2738*5326Sek110237 		ZFS_EXIT(zfsvfs);
2739*5326Sek110237 		return (EIO);
2740*5326Sek110237 	}
2741789Sahrens top:
2742789Sahrens 	/*
2743789Sahrens 	 * We do not support links between attributes and non-attributes
2744789Sahrens 	 * because of the potential security risk of creating links
2745789Sahrens 	 * into "normal" file space in order to circumvent restrictions
2746789Sahrens 	 * imposed in attribute space.
2747789Sahrens 	 */
2748789Sahrens 	if ((szp->z_phys->zp_flags & ZFS_XATTR) !=
2749789Sahrens 	    (dzp->z_phys->zp_flags & ZFS_XATTR)) {
2750789Sahrens 		ZFS_EXIT(zfsvfs);
2751789Sahrens 		return (EINVAL);
2752789Sahrens 	}
2753789Sahrens 
2754789Sahrens 	/*
2755789Sahrens 	 * POSIX dictates that we return EPERM here.
2756789Sahrens 	 * Better choices include ENOTSUP or EISDIR.
2757789Sahrens 	 */
2758789Sahrens 	if (svp->v_type == VDIR) {
2759789Sahrens 		ZFS_EXIT(zfsvfs);
2760789Sahrens 		return (EPERM);
2761789Sahrens 	}
2762789Sahrens 
2763789Sahrens 	if ((uid_t)szp->z_phys->zp_uid != crgetuid(cr) &&
2764789Sahrens 	    secpolicy_basic_link(cr) != 0) {
2765789Sahrens 		ZFS_EXIT(zfsvfs);
2766789Sahrens 		return (EPERM);
2767789Sahrens 	}
2768789Sahrens 
2769789Sahrens 	if (error = zfs_zaccess(dzp, ACE_ADD_FILE, cr)) {
2770789Sahrens 		ZFS_EXIT(zfsvfs);
2771789Sahrens 		return (error);
2772789Sahrens 	}
2773789Sahrens 
2774789Sahrens 	/*
2775789Sahrens 	 * Attempt to lock directory; fail if entry already exists.
2776789Sahrens 	 */
2777789Sahrens 	if (error = zfs_dirent_lock(&dl, dzp, name, &tzp, ZNEW)) {
2778789Sahrens 		ZFS_EXIT(zfsvfs);
2779789Sahrens 		return (error);
2780789Sahrens 	}
2781789Sahrens 
2782789Sahrens 	tx = dmu_tx_create(zfsvfs->z_os);
2783789Sahrens 	dmu_tx_hold_bonus(tx, szp->z_id);
27841544Seschrock 	dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
2785789Sahrens 	error = dmu_tx_assign(tx, zfsvfs->z_assign);
2786789Sahrens 	if (error) {
2787789Sahrens 		zfs_dirent_unlock(dl);
2788789Sahrens 		if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
27892113Sahrens 			dmu_tx_wait(tx);
27902113Sahrens 			dmu_tx_abort(tx);
2791789Sahrens 			goto top;
2792789Sahrens 		}
27932113Sahrens 		dmu_tx_abort(tx);
2794789Sahrens 		ZFS_EXIT(zfsvfs);
2795789Sahrens 		return (error);
2796789Sahrens 	}
2797789Sahrens 
2798789Sahrens 	error = zfs_link_create(dl, szp, tx, 0);
2799789Sahrens 
2800789Sahrens 	if (error == 0)
28012638Sperrin 		zfs_log_link(zilog, tx, TX_LINK, dzp, szp, name);
2802789Sahrens 
2803789Sahrens 	dmu_tx_commit(tx);
2804789Sahrens 
2805789Sahrens 	zfs_dirent_unlock(dl);
2806789Sahrens 
28074863Spraks 	if (error == 0) {
28084863Spraks 		vnevent_link(svp);
28094863Spraks 	}
28104863Spraks 
2811789Sahrens 	ZFS_EXIT(zfsvfs);
2812789Sahrens 	return (error);
2813789Sahrens }
2814789Sahrens 
2815789Sahrens /*
2816789Sahrens  * zfs_null_putapage() is used when the file system has been force
2817789Sahrens  * unmounted. It just drops the pages.
2818789Sahrens  */
2819789Sahrens /* ARGSUSED */
2820789Sahrens static int
2821789Sahrens zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
2822789Sahrens 		size_t *lenp, int flags, cred_t *cr)
2823789Sahrens {
2824789Sahrens 	pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR);
2825789Sahrens 	return (0);
2826789Sahrens }
2827789Sahrens 
28282688Smaybee /*
28292688Smaybee  * Push a page out to disk, klustering if possible.
28302688Smaybee  *
28312688Smaybee  *	IN:	vp	- file to push page to.
28322688Smaybee  *		pp	- page to push.
28332688Smaybee  *		flags	- additional flags.
28342688Smaybee  *		cr	- credentials of caller.
28352688Smaybee  *
28362688Smaybee  *	OUT:	offp	- start of range pushed.
28372688Smaybee  *		lenp	- len of range pushed.
28382688Smaybee  *
28392688Smaybee  *	RETURN:	0 if success
28402688Smaybee  *		error code if failure
28412688Smaybee  *
28422688Smaybee  * NOTE: callers must have locked the page to be pushed.  On
28432688Smaybee  * exit, the page (and all other pages in the kluster) must be
28442688Smaybee  * unlocked.
28452688Smaybee  */
2846789Sahrens /* ARGSUSED */
2847789Sahrens static int
2848789Sahrens zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
2849789Sahrens 		size_t *lenp, int flags, cred_t *cr)
2850789Sahrens {
2851789Sahrens 	znode_t		*zp = VTOZ(vp);
2852789Sahrens 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
2853789Sahrens 	zilog_t		*zilog = zfsvfs->z_log;
2854789Sahrens 	dmu_tx_t	*tx;
28551669Sperrin 	rl_t		*rl;
28562688Smaybee 	u_offset_t	off, koff;
28572688Smaybee 	size_t		len, klen;
28584709Smaybee 	uint64_t	filesz;
2859789Sahrens 	int		err;
2860789Sahrens 
28614709Smaybee 	filesz = zp->z_phys->zp_size;
28622688Smaybee 	off = pp->p_offset;
28632688Smaybee 	len = PAGESIZE;
28642688Smaybee 	/*
28652688Smaybee 	 * If our blocksize is bigger than the page size, try to kluster
28662688Smaybee 	 * muiltiple pages so that we write a full block (thus avoiding
28672688Smaybee 	 * a read-modify-write).
28682688Smaybee 	 */
28694709Smaybee 	if (off < filesz && zp->z_blksz > PAGESIZE) {
28702688Smaybee 		if (!ISP2(zp->z_blksz)) {
28712688Smaybee 			/* Only one block in the file. */
28722688Smaybee 			klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE);
28732688Smaybee 			koff = 0;
28742688Smaybee 		} else {
28752688Smaybee 			klen = zp->z_blksz;
28762688Smaybee 			koff = P2ALIGN(off, (u_offset_t)klen);
28772688Smaybee 		}
28782688Smaybee 		ASSERT(koff <= filesz);
28792688Smaybee 		if (koff + klen > filesz)
28802688Smaybee 			klen = P2ROUNDUP(filesz - koff, (uint64_t)PAGESIZE);
28812688Smaybee 		pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags);
28822688Smaybee 	}
28832688Smaybee 	ASSERT3U(btop(len), ==, btopr(len));
2884789Sahrens top:
28852688Smaybee 	rl = zfs_range_lock(zp, off, len, RL_WRITER);
28861819Smaybee 	/*
28871819Smaybee 	 * Can't push pages past end-of-file.
28881819Smaybee 	 */
28894709Smaybee 	filesz = zp->z_phys->zp_size;
28904709Smaybee 	if (off >= filesz) {
28914709Smaybee 		/* ignore all pages */
28922688Smaybee 		err = 0;
28932688Smaybee 		goto out;
28944709Smaybee 	} else if (off + len > filesz) {
28954709Smaybee 		int npages = btopr(filesz - off);
28962688Smaybee 		page_t *trunc;
28972688Smaybee 
28982688Smaybee 		page_list_break(&pp, &trunc, npages);
28994709Smaybee 		/* ignore pages past end of file */
29002688Smaybee 		if (trunc)
29014709Smaybee 			pvn_write_done(trunc, flags);
29024709Smaybee 		len = filesz - off;
29031819Smaybee 	}
2904789Sahrens 
2905789Sahrens 	tx = dmu_tx_create(zfsvfs->z_os);
2906789Sahrens 	dmu_tx_hold_write(tx, zp->z_id, off, len);
2907789Sahrens 	dmu_tx_hold_bonus(tx, zp->z_id);
2908789Sahrens 	err = dmu_tx_assign(tx, zfsvfs->z_assign);
2909789Sahrens 	if (err != 0) {
2910789Sahrens 		if (err == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) {
29112688Smaybee 			zfs_range_unlock(rl);
29122113Sahrens 			dmu_tx_wait(tx);
29132113Sahrens 			dmu_tx_abort(tx);
29142688Smaybee 			err = 0;
2915789Sahrens 			goto top;
2916789Sahrens 		}
29172113Sahrens 		dmu_tx_abort(tx);
2918789Sahrens 		goto out;
2919789Sahrens 	}
2920789Sahrens 
29212688Smaybee 	if (zp->z_blksz <= PAGESIZE) {
29222688Smaybee 		caddr_t va = ppmapin(pp, PROT_READ, (caddr_t)-1);
29232688Smaybee 		ASSERT3U(len, <=, PAGESIZE);
29242688Smaybee 		dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx);
29252688Smaybee 		ppmapout(va);
29262688Smaybee 	} else {
29272688Smaybee 		err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx);
29282688Smaybee 	}
29292688Smaybee 
29302688Smaybee 	if (err == 0) {
29312688Smaybee 		zfs_time_stamper(zp, CONTENT_MODIFIED, tx);
29323638Sbillm 		zfs_log_write(zilog, tx, TX_WRITE, zp, off, len, 0);
29332688Smaybee 		dmu_tx_commit(tx);
29342688Smaybee 	}
29352688Smaybee 
29362688Smaybee out:
29372237Smaybee 	zfs_range_unlock(rl);
29384709Smaybee 	pvn_write_done(pp, (err ? B_ERROR : 0) | flags);
2939789Sahrens 	if (offp)
2940789Sahrens 		*offp = off;
2941789Sahrens 	if (lenp)
2942789Sahrens 		*lenp = len;
2943789Sahrens 
2944789Sahrens 	return (err);
2945789Sahrens }
2946789Sahrens 
2947789Sahrens /*
2948789Sahrens  * Copy the portion of the file indicated from pages into the file.
2949789Sahrens  * The pages are stored in a page list attached to the files vnode.
2950789Sahrens  *
2951789Sahrens  *	IN:	vp	- vnode of file to push page data to.
2952789Sahrens  *		off	- position in file to put data.
2953789Sahrens  *		len	- amount of data to write.
2954789Sahrens  *		flags	- flags to control the operation.
2955789Sahrens  *		cr	- credentials of caller.
2956789Sahrens  *
2957789Sahrens  *	RETURN:	0 if success
2958789Sahrens  *		error code if failure
2959789Sahrens  *
2960789Sahrens  * Timestamps:
2961789Sahrens  *	vp - ctime|mtime updated
2962789Sahrens  */
2963789Sahrens static int
2964789Sahrens zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr)
2965789Sahrens {
2966789Sahrens 	znode_t		*zp = VTOZ(vp);
2967789Sahrens 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
2968789Sahrens 	page_t		*pp;
2969789Sahrens 	size_t		io_len;
2970789Sahrens 	u_offset_t	io_off;
29711669Sperrin 	uint64_t	filesz;
2972789Sahrens 	int		error = 0;
2973789Sahrens 
2974*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
2975789Sahrens 
2976789Sahrens 	ASSERT(zp->z_dbuf_held && zp->z_phys);
2977789Sahrens 
2978789Sahrens 	if (len == 0) {
2979789Sahrens 		/*
2980789Sahrens 		 * Search the entire vp list for pages >= off.
2981789Sahrens 		 */
2982789Sahrens 		error = pvn_vplist_dirty(vp, (u_offset_t)off, zfs_putapage,
2983789Sahrens 		    flags, cr);
29841472Sperrin 		goto out;
2985789Sahrens 	}
2986789Sahrens 
29871669Sperrin 	filesz = zp->z_phys->zp_size; /* get consistent copy of zp_size */
29881669Sperrin 	if (off > filesz) {
2989789Sahrens 		/* past end of file */
2990789Sahrens 		ZFS_EXIT(zfsvfs);
2991789Sahrens 		return (0);
2992789Sahrens 	}
2993789Sahrens 
29941669Sperrin 	len = MIN(len, filesz - off);
2995789Sahrens 
29961472Sperrin 	for (io_off = off; io_off < off + len; io_off += io_len) {
2997789Sahrens 		if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
29981669Sperrin 			pp = page_lookup(vp, io_off,
29994339Sperrin 			    (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED);
3000789Sahrens 		} else {
3001789Sahrens 			pp = page_lookup_nowait(vp, io_off,
30024339Sperrin 			    (flags & B_FREE) ? SE_EXCL : SE_SHARED);
3003789Sahrens 		}
3004789Sahrens 
3005789Sahrens 		if (pp != NULL && pvn_getdirty(pp, flags)) {
3006789Sahrens 			int err;
3007789Sahrens 
3008789Sahrens 			/*
3009789Sahrens 			 * Found a dirty page to push
3010789Sahrens 			 */
30111669Sperrin 			err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr);
30121669Sperrin 			if (err)
3013789Sahrens 				error = err;
3014789Sahrens 		} else {
3015789Sahrens 			io_len = PAGESIZE;
3016789Sahrens 		}
3017789Sahrens 	}
30181472Sperrin out:
30192638Sperrin 	if ((flags & B_ASYNC) == 0)
30202638Sperrin 		zil_commit(zfsvfs->z_log, UINT64_MAX, zp->z_id);
3021789Sahrens 	ZFS_EXIT(zfsvfs);
3022789Sahrens 	return (error);
3023789Sahrens }
3024789Sahrens 
3025789Sahrens void
3026789Sahrens zfs_inactive(vnode_t *vp, cred_t *cr)
3027789Sahrens {
3028789Sahrens 	znode_t	*zp = VTOZ(vp);
3029789Sahrens 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3030789Sahrens 	int error;
3031789Sahrens 
3032*5326Sek110237 	rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
3033*5326Sek110237 	if (zp->z_dbuf_held == 0) {
3034789Sahrens 		if (vn_has_cached_data(vp)) {
3035789Sahrens 			(void) pvn_vplist_dirty(vp, 0, zfs_null_putapage,
3036789Sahrens 			    B_INVAL, cr);
3037789Sahrens 		}
3038789Sahrens 
30391544Seschrock 		mutex_enter(&zp->z_lock);
3040789Sahrens 		vp->v_count = 0; /* count arrives as 1 */
30411544Seschrock 		if (zp->z_dbuf == NULL) {
30421544Seschrock 			mutex_exit(&zp->z_lock);
30431544Seschrock 			zfs_znode_free(zp);
30441544Seschrock 		} else {
30451544Seschrock 			mutex_exit(&zp->z_lock);
30461544Seschrock 		}
3047*5326Sek110237 		rw_exit(&zfsvfs->z_teardown_inactive_lock);
3048789Sahrens 		VFS_RELE(zfsvfs->z_vfs);
3049789Sahrens 		return;
3050789Sahrens 	}
3051789Sahrens 
3052789Sahrens 	/*
3053789Sahrens 	 * Attempt to push any data in the page cache.  If this fails
3054789Sahrens 	 * we will get kicked out later in zfs_zinactive().
3055789Sahrens 	 */
30561298Sperrin 	if (vn_has_cached_data(vp)) {
30571298Sperrin 		(void) pvn_vplist_dirty(vp, 0, zfs_putapage, B_INVAL|B_ASYNC,
30581298Sperrin 		    cr);
30591298Sperrin 	}
3060789Sahrens 
30613461Sahrens 	if (zp->z_atime_dirty && zp->z_unlinked == 0) {
3062789Sahrens 		dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
3063789Sahrens 
3064789Sahrens 		dmu_tx_hold_bonus(tx, zp->z_id);
3065789Sahrens 		error = dmu_tx_assign(tx, TXG_WAIT);
3066789Sahrens 		if (error) {
3067789Sahrens 			dmu_tx_abort(tx);
3068789Sahrens 		} else {
3069789Sahrens 			dmu_buf_will_dirty(zp->z_dbuf, tx);
3070789Sahrens 			mutex_enter(&zp->z_lock);
3071789Sahrens 			zp->z_atime_dirty = 0;
3072789Sahrens 			mutex_exit(&zp->z_lock);
3073789Sahrens 			dmu_tx_commit(tx);
3074789Sahrens 		}
3075789Sahrens 	}
3076789Sahrens 
3077789Sahrens 	zfs_zinactive(zp);
3078*5326Sek110237 	rw_exit(&zfsvfs->z_teardown_inactive_lock);
3079789Sahrens }
3080789Sahrens 
3081789Sahrens /*
3082789Sahrens  * Bounds-check the seek operation.
3083789Sahrens  *
3084789Sahrens  *	IN:	vp	- vnode seeking within
3085789Sahrens  *		ooff	- old file offset
3086789Sahrens  *		noffp	- pointer to new file offset
3087789Sahrens  *
3088789Sahrens  *	RETURN:	0 if success
3089789Sahrens  *		EINVAL if new offset invalid
3090789Sahrens  */
3091789Sahrens /* ARGSUSED */
3092789Sahrens static int
3093789Sahrens zfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp)
3094789Sahrens {
3095789Sahrens 	if (vp->v_type == VDIR)
3096789Sahrens 		return (0);
3097789Sahrens 	return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
3098789Sahrens }
3099789Sahrens 
3100789Sahrens /*
3101789Sahrens  * Pre-filter the generic locking function to trap attempts to place
3102789Sahrens  * a mandatory lock on a memory mapped file.
3103789Sahrens  */
3104789Sahrens static int
3105789Sahrens zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset,
3106789Sahrens     flk_callback_t *flk_cbp, cred_t *cr)
3107789Sahrens {
3108789Sahrens 	znode_t *zp = VTOZ(vp);
3109789Sahrens 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3110789Sahrens 	int error;
3111789Sahrens 
3112*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
3113789Sahrens 
3114789Sahrens 	/*
31151544Seschrock 	 * We are following the UFS semantics with respect to mapcnt
31161544Seschrock 	 * here: If we see that the file is mapped already, then we will
31171544Seschrock 	 * return an error, but we don't worry about races between this
31181544Seschrock 	 * function and zfs_map().
3119789Sahrens 	 */
31201544Seschrock 	if (zp->z_mapcnt > 0 && MANDMODE((mode_t)zp->z_phys->zp_mode)) {
3121789Sahrens 		ZFS_EXIT(zfsvfs);
3122789Sahrens 		return (EAGAIN);
3123789Sahrens 	}
3124789Sahrens 	error = fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr);
3125789Sahrens 	ZFS_EXIT(zfsvfs);
3126789Sahrens 	return (error);
3127789Sahrens }
3128789Sahrens 
3129789Sahrens /*
3130789Sahrens  * If we can't find a page in the cache, we will create a new page
3131789Sahrens  * and fill it with file data.  For efficiency, we may try to fill
31321669Sperrin  * multiple pages at once (klustering).
3133789Sahrens  */
3134789Sahrens static int
3135789Sahrens zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg,
3136789Sahrens     caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw)
3137789Sahrens {
3138789Sahrens 	znode_t *zp = VTOZ(vp);
3139789Sahrens 	page_t *pp, *cur_pp;
3140789Sahrens 	objset_t *os = zp->z_zfsvfs->z_os;
3141789Sahrens 	caddr_t va;
3142789Sahrens 	u_offset_t io_off, total;
3143789Sahrens 	uint64_t oid = zp->z_id;
3144789Sahrens 	size_t io_len;
31451669Sperrin 	uint64_t filesz;
3146789Sahrens 	int err;
3147789Sahrens 
3148789Sahrens 	/*
3149789Sahrens 	 * If we are only asking for a single page don't bother klustering.
3150789Sahrens 	 */
31511669Sperrin 	filesz = zp->z_phys->zp_size; /* get consistent copy of zp_size */
31522688Smaybee 	if (off >= filesz)
31532688Smaybee 		return (EFAULT);
31542688Smaybee 	if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) {
3155789Sahrens 		io_off = off;
3156789Sahrens 		io_len = PAGESIZE;
3157789Sahrens 		pp = page_create_va(vp, io_off, io_len, PG_WAIT, seg, addr);
3158789Sahrens 	} else {
3159789Sahrens 		/*
3160789Sahrens 		 * Try to fill a kluster of pages (a blocks worth).
3161789Sahrens 		 */
3162789Sahrens 		size_t klen;
3163789Sahrens 		u_offset_t koff;
3164789Sahrens 
3165789Sahrens 		if (!ISP2(zp->z_blksz)) {
3166789Sahrens 			/* Only one block in the file. */
3167789Sahrens 			klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE);
3168789Sahrens 			koff = 0;
3169789Sahrens 		} else {
31703131Sgw25295 			/*
31713131Sgw25295 			 * It would be ideal to align our offset to the
31723131Sgw25295 			 * blocksize but doing so has resulted in some
31733131Sgw25295 			 * strange application crashes. For now, we
31743131Sgw25295 			 * leave the offset as is and only adjust the
31753131Sgw25295 			 * length if we are off the end of the file.
31763131Sgw25295 			 */
31773131Sgw25295 			koff = off;
3178789Sahrens 			klen = plsz;
3179789Sahrens 		}
31801819Smaybee 		ASSERT(koff <= filesz);
31811819Smaybee 		if (koff + klen > filesz)
31821819Smaybee 			klen = P2ROUNDUP(filesz, (uint64_t)PAGESIZE) - koff;
31832688Smaybee 		ASSERT3U(off, >=, koff);
31842688Smaybee 		ASSERT3U(off, <, koff + klen);
3185789Sahrens 		pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
31864339Sperrin 		    &io_len, koff, klen, 0);
3187789Sahrens 	}
3188789Sahrens 	if (pp == NULL) {
3189789Sahrens 		/*
3190789Sahrens 		 * Some other thread entered the page before us.
3191789Sahrens 		 * Return to zfs_getpage to retry the lookup.
3192789Sahrens 		 */
3193789Sahrens 		*pl = NULL;
3194789Sahrens 		return (0);
3195789Sahrens 	}
3196789Sahrens 
3197789Sahrens 	/*
3198789Sahrens 	 * Fill the pages in the kluster.
3199789Sahrens 	 */
3200789Sahrens 	cur_pp = pp;
3201789Sahrens 	for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
32022688Smaybee 		ASSERT3U(io_off, ==, cur_pp->p_offset);
3203789Sahrens 		va = ppmapin(cur_pp, PROT_READ | PROT_WRITE, (caddr_t)-1);
32041544Seschrock 		err = dmu_read(os, oid, io_off, PAGESIZE, va);
3205789Sahrens 		ppmapout(va);
3206789Sahrens 		if (err) {
3207789Sahrens 			/* On error, toss the entire kluster */
3208789Sahrens 			pvn_read_done(pp, B_ERROR);
3209789Sahrens 			return (err);
3210789Sahrens 		}
3211789Sahrens 		cur_pp = cur_pp->p_next;
3212789Sahrens 	}
3213789Sahrens out:
3214789Sahrens 	/*
3215789Sahrens 	 * Fill in the page list array from the kluster.  If
3216789Sahrens 	 * there are too many pages in the kluster, return
3217789Sahrens 	 * as many pages as possible starting from the desired
3218789Sahrens 	 * offset `off'.
3219789Sahrens 	 * NOTE: the page list will always be null terminated.
3220789Sahrens 	 */
3221789Sahrens 	pvn_plist_init(pp, pl, plsz, off, io_len, rw);
3222789Sahrens 
3223789Sahrens 	return (0);
3224789Sahrens }
3225789Sahrens 
3226789Sahrens /*
3227789Sahrens  * Return pointers to the pages for the file region [off, off + len]
3228789Sahrens  * in the pl array.  If plsz is greater than len, this function may
3229789Sahrens  * also return page pointers from before or after the specified
3230789Sahrens  * region (i.e. some region [off', off' + plsz]).  These additional
3231789Sahrens  * pages are only returned if they are already in the cache, or were
3232789Sahrens  * created as part of a klustered read.
3233789Sahrens  *
3234789Sahrens  *	IN:	vp	- vnode of file to get data from.
3235789Sahrens  *		off	- position in file to get data from.
3236789Sahrens  *		len	- amount of data to retrieve.
3237789Sahrens  *		plsz	- length of provided page list.
3238789Sahrens  *		seg	- segment to obtain pages for.
3239789Sahrens  *		addr	- virtual address of fault.
3240789Sahrens  *		rw	- mode of created pages.
3241789Sahrens  *		cr	- credentials of caller.
3242789Sahrens  *
3243789Sahrens  *	OUT:	protp	- protection mode of created pages.
3244789Sahrens  *		pl	- list of pages created.
3245789Sahrens  *
3246789Sahrens  *	RETURN:	0 if success
3247789Sahrens  *		error code if failure
3248789Sahrens  *
3249789Sahrens  * Timestamps:
3250789Sahrens  *	vp - atime updated
3251789Sahrens  */
3252789Sahrens /* ARGSUSED */
3253789Sahrens static int
3254789Sahrens zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
3255789Sahrens 	page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3256789Sahrens 	enum seg_rw rw, cred_t *cr)
3257789Sahrens {
3258789Sahrens 	znode_t		*zp = VTOZ(vp);
3259789Sahrens 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
3260789Sahrens 	page_t		*pp, **pl0 = pl;
32612752Sperrin 	int		need_unlock = 0, err = 0;
32622752Sperrin 	offset_t	orig_off;
3263789Sahrens 
3264*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
3265789Sahrens 
3266789Sahrens 	if (protp)
3267789Sahrens 		*protp = PROT_ALL;
3268789Sahrens 
3269789Sahrens 	ASSERT(zp->z_dbuf_held && zp->z_phys);
3270789Sahrens 
3271789Sahrens 	/* no faultahead (for now) */
3272789Sahrens 	if (pl == NULL) {
3273789Sahrens 		ZFS_EXIT(zfsvfs);
3274789Sahrens 		return (0);
3275789Sahrens 	}
3276789Sahrens 
3277789Sahrens 	/* can't fault past EOF */
3278789Sahrens 	if (off >= zp->z_phys->zp_size) {
3279789Sahrens 		ZFS_EXIT(zfsvfs);
3280789Sahrens 		return (EFAULT);
3281789Sahrens 	}
32822752Sperrin 	orig_off = off;
3283789Sahrens 
3284789Sahrens 	/*
3285789Sahrens 	 * If we already own the lock, then we must be page faulting
3286789Sahrens 	 * in the middle of a write to this file (i.e., we are writing
3287789Sahrens 	 * to this file using data from a mapped region of the file).
3288789Sahrens 	 */
32892752Sperrin 	if (rw_owner(&zp->z_map_lock) != curthread) {
3290789Sahrens 		rw_enter(&zp->z_map_lock, RW_WRITER);
3291789Sahrens 		need_unlock = TRUE;
3292789Sahrens 	}
3293789Sahrens 
3294789Sahrens 	/*
3295789Sahrens 	 * Loop through the requested range [off, off + len] looking
3296789Sahrens 	 * for pages.  If we don't find a page, we will need to create
3297789Sahrens 	 * a new page and fill it with data from the file.
3298789Sahrens 	 */
3299789Sahrens 	while (len > 0) {
3300789Sahrens 		if (plsz < PAGESIZE)
3301789Sahrens 			break;
3302789Sahrens 		if (pp = page_lookup(vp, off, SE_SHARED)) {
3303789Sahrens 			*pl++ = pp;
3304789Sahrens 			off += PAGESIZE;
3305789Sahrens 			addr += PAGESIZE;
3306789Sahrens 			len -= PAGESIZE;
3307789Sahrens 			plsz -= PAGESIZE;
3308789Sahrens 		} else {
3309789Sahrens 			err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw);
33102752Sperrin 			if (err)
33112752Sperrin 				goto out;
3312789Sahrens 			/*
3313789Sahrens 			 * klustering may have changed our region
3314789Sahrens 			 * to be block aligned.
3315789Sahrens 			 */
3316789Sahrens 			if (((pp = *pl) != 0) && (off != pp->p_offset)) {
3317789Sahrens 				int delta = off - pp->p_offset;
3318789Sahrens 				len += delta;
3319789Sahrens 				off -= delta;
3320789Sahrens 				addr -= delta;
3321789Sahrens 			}
3322789Sahrens 			while (*pl) {
3323789Sahrens 				pl++;
3324789Sahrens 				off += PAGESIZE;
3325789Sahrens 				addr += PAGESIZE;
3326789Sahrens 				plsz -= PAGESIZE;
3327789Sahrens 				if (len > PAGESIZE)
3328789Sahrens 					len -= PAGESIZE;
3329789Sahrens 				else
3330789Sahrens 					len = 0;
3331789Sahrens 			}
3332789Sahrens 		}
3333789Sahrens 	}
3334789Sahrens 
3335789Sahrens 	/*
3336789Sahrens 	 * Fill out the page array with any pages already in the cache.
3337789Sahrens 	 */
3338789Sahrens 	while (plsz > 0) {
3339789Sahrens 		pp = page_lookup_nowait(vp, off, SE_SHARED);
3340789Sahrens 		if (pp == NULL)
3341789Sahrens 			break;
3342789Sahrens 		*pl++ = pp;
3343789Sahrens 		off += PAGESIZE;
3344789Sahrens 		plsz -= PAGESIZE;
3345789Sahrens 	}
3346789Sahrens 
3347789Sahrens 	ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
3348789Sahrens out:
33492752Sperrin 	/*
33502752Sperrin 	 * We can't grab the range lock for the page as reader which would
33512752Sperrin 	 * stop truncation as this leads to deadlock. So we need to recheck
33522752Sperrin 	 * the file size.
33532752Sperrin 	 */
33542752Sperrin 	if (orig_off >= zp->z_phys->zp_size)
33552752Sperrin 		err = EFAULT;
33562752Sperrin 	if (err) {
33572752Sperrin 		/*
33582752Sperrin 		 * Release any pages we have previously locked.
33592752Sperrin 		 */
33602752Sperrin 		while (pl > pl0)
33612752Sperrin 			page_unlock(*--pl);
33622752Sperrin 	}
33632752Sperrin 
3364789Sahrens 	*pl = NULL;
3365789Sahrens 
3366789Sahrens 	if (need_unlock)
3367789Sahrens 		rw_exit(&zp->z_map_lock);
3368789Sahrens 
3369789Sahrens 	ZFS_EXIT(zfsvfs);
3370789Sahrens 	return (err);
3371789Sahrens }
3372789Sahrens 
33731544Seschrock /*
33741544Seschrock  * Request a memory map for a section of a file.  This code interacts
33751544Seschrock  * with common code and the VM system as follows:
33761544Seschrock  *
33771544Seschrock  *	common code calls mmap(), which ends up in smmap_common()
33781544Seschrock  *
33791544Seschrock  *	this calls VOP_MAP(), which takes you into (say) zfs
33801544Seschrock  *
33811544Seschrock  *	zfs_map() calls as_map(), passing segvn_create() as the callback
33821544Seschrock  *
33831544Seschrock  *	segvn_create() creates the new segment and calls VOP_ADDMAP()
33841544Seschrock  *
33851544Seschrock  *	zfs_addmap() updates z_mapcnt
33861544Seschrock  */
3387789Sahrens static int
3388789Sahrens zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
3389789Sahrens     size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr)
3390789Sahrens {
3391789Sahrens 	znode_t *zp = VTOZ(vp);
3392789Sahrens 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3393789Sahrens 	segvn_crargs_t	vn_a;
3394789Sahrens 	int		error;
3395789Sahrens 
3396*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
3397789Sahrens 
3398789Sahrens 	if (vp->v_flag & VNOMAP) {
3399789Sahrens 		ZFS_EXIT(zfsvfs);
3400789Sahrens 		return (ENOSYS);
3401789Sahrens 	}
3402789Sahrens 
3403789Sahrens 	if (off < 0 || len > MAXOFFSET_T - off) {
3404789Sahrens 		ZFS_EXIT(zfsvfs);
3405789Sahrens 		return (ENXIO);
3406789Sahrens 	}
3407789Sahrens 
3408789Sahrens 	if (vp->v_type != VREG) {
3409789Sahrens 		ZFS_EXIT(zfsvfs);
3410789Sahrens 		return (ENODEV);
3411789Sahrens 	}
3412789Sahrens 
3413789Sahrens 	/*
3414789Sahrens 	 * If file is locked, disallow mapping.
3415789Sahrens 	 */
34161544Seschrock 	if (MANDMODE((mode_t)zp->z_phys->zp_mode) && vn_has_flocks(vp)) {
34171544Seschrock 		ZFS_EXIT(zfsvfs);
34181544Seschrock 		return (EAGAIN);
3419789Sahrens 	}
3420789Sahrens 
3421789Sahrens 	as_rangelock(as);
3422789Sahrens 	if ((flags & MAP_FIXED) == 0) {
3423789Sahrens 		map_addr(addrp, len, off, 1, flags);
3424789Sahrens 		if (*addrp == NULL) {
3425789Sahrens 			as_rangeunlock(as);
3426789Sahrens 			ZFS_EXIT(zfsvfs);
3427789Sahrens 			return (ENOMEM);
3428789Sahrens 		}
3429789Sahrens 	} else {
3430789Sahrens 		/*
3431789Sahrens 		 * User specified address - blow away any previous mappings
3432789Sahrens 		 */
3433789Sahrens 		(void) as_unmap(as, *addrp, len);
3434789Sahrens 	}
3435789Sahrens 
3436789Sahrens 	vn_a.vp = vp;
3437789Sahrens 	vn_a.offset = (u_offset_t)off;
3438789Sahrens 	vn_a.type = flags & MAP_TYPE;
3439789Sahrens 	vn_a.prot = prot;
3440789Sahrens 	vn_a.maxprot = maxprot;
3441789Sahrens 	vn_a.cred = cr;
3442789Sahrens 	vn_a.amp = NULL;
3443789Sahrens 	vn_a.flags = flags & ~MAP_TYPE;
34441417Skchow 	vn_a.szc = 0;
34451417Skchow 	vn_a.lgrp_mem_policy_flags = 0;
3446789Sahrens 
3447789Sahrens 	error = as_map(as, *addrp, len, segvn_create, &vn_a);
3448789Sahrens 
3449789Sahrens 	as_rangeunlock(as);
3450789Sahrens 	ZFS_EXIT(zfsvfs);
3451789Sahrens 	return (error);
3452789Sahrens }
3453789Sahrens 
3454789Sahrens /* ARGSUSED */
3455789Sahrens static int
3456789Sahrens zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
3457789Sahrens     size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr)
3458789Sahrens {
34591544Seschrock 	uint64_t pages = btopr(len);
34601544Seschrock 
34611544Seschrock 	atomic_add_64(&VTOZ(vp)->z_mapcnt, pages);
3462789Sahrens 	return (0);
3463789Sahrens }
3464789Sahrens 
34651773Seschrock /*
34661773Seschrock  * The reason we push dirty pages as part of zfs_delmap() is so that we get a
34671773Seschrock  * more accurate mtime for the associated file.  Since we don't have a way of
34681773Seschrock  * detecting when the data was actually modified, we have to resort to
34691773Seschrock  * heuristics.  If an explicit msync() is done, then we mark the mtime when the
34701773Seschrock  * last page is pushed.  The problem occurs when the msync() call is omitted,
34711773Seschrock  * which by far the most common case:
34721773Seschrock  *
34731773Seschrock  * 	open()
34741773Seschrock  * 	mmap()
34751773Seschrock  * 	<modify memory>
34761773Seschrock  * 	munmap()
34771773Seschrock  * 	close()
34781773Seschrock  * 	<time lapse>
34791773Seschrock  * 	putpage() via fsflush
34801773Seschrock  *
34811773Seschrock  * If we wait until fsflush to come along, we can have a modification time that
34821773Seschrock  * is some arbitrary point in the future.  In order to prevent this in the
34831773Seschrock  * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is
34841773Seschrock  * torn down.
34851773Seschrock  */
3486789Sahrens /* ARGSUSED */
3487789Sahrens static int
3488789Sahrens zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
3489789Sahrens     size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr)
3490789Sahrens {
34911544Seschrock 	uint64_t pages = btopr(len);
34921544Seschrock 
34931544Seschrock 	ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages);
34941544Seschrock 	atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages);
34951773Seschrock 
34961773Seschrock 	if ((flags & MAP_SHARED) && (prot & PROT_WRITE) &&
34971773Seschrock 	    vn_has_cached_data(vp))
34981773Seschrock 		(void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr);
34991773Seschrock 
3500789Sahrens 	return (0);
3501789Sahrens }
3502789Sahrens 
3503789Sahrens /*
3504789Sahrens  * Free or allocate space in a file.  Currently, this function only
3505789Sahrens  * supports the `F_FREESP' command.  However, this command is somewhat
3506789Sahrens  * misnamed, as its functionality includes the ability to allocate as
3507789Sahrens  * well as free space.
3508789Sahrens  *
3509789Sahrens  *	IN:	vp	- vnode of file to free data in.
3510789Sahrens  *		cmd	- action to take (only F_FREESP supported).
3511789Sahrens  *		bfp	- section of file to free/alloc.
3512789Sahrens  *		flag	- current file open mode flags.
3513789Sahrens  *		offset	- current file offset.
3514789Sahrens  *		cr	- credentials of caller [UNUSED].
3515789Sahrens  *
3516789Sahrens  *	RETURN:	0 if success
3517789Sahrens  *		error code if failure
3518789Sahrens  *
3519789Sahrens  * Timestamps:
3520789Sahrens  *	vp - ctime|mtime updated
3521789Sahrens  */
3522789Sahrens /* ARGSUSED */
3523789Sahrens static int
3524789Sahrens zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag,
3525789Sahrens     offset_t offset, cred_t *cr, caller_context_t *ct)
3526789Sahrens {
3527789Sahrens 	znode_t		*zp = VTOZ(vp);
3528789Sahrens 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
3529789Sahrens 	uint64_t	off, len;
3530789Sahrens 	int		error;
3531789Sahrens 
3532*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
3533789Sahrens 
3534789Sahrens top:
3535789Sahrens 	if (cmd != F_FREESP) {
3536789Sahrens 		ZFS_EXIT(zfsvfs);
3537789Sahrens 		return (EINVAL);
3538789Sahrens 	}
3539789Sahrens 
3540789Sahrens 	if (error = convoff(vp, bfp, 0, offset)) {
3541789Sahrens 		ZFS_EXIT(zfsvfs);
3542789Sahrens 		return (error);
3543789Sahrens 	}
3544789Sahrens 
3545789Sahrens 	if (bfp->l_len < 0) {
3546789Sahrens 		ZFS_EXIT(zfsvfs);
3547789Sahrens 		return (EINVAL);
3548789Sahrens 	}
3549789Sahrens 
3550789Sahrens 	off = bfp->l_start;
35511669Sperrin 	len = bfp->l_len; /* 0 means from off to end of file */
35521878Smaybee 
35531878Smaybee 	do {
35541878Smaybee 		error = zfs_freesp(zp, off, len, flag, TRUE);
35552113Sahrens 		/* NB: we already did dmu_tx_wait() if necessary */
35561878Smaybee 	} while (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT);
3557789Sahrens 
3558789Sahrens 	ZFS_EXIT(zfsvfs);
3559789Sahrens 	return (error);
3560789Sahrens }
3561789Sahrens 
3562789Sahrens static int
3563789Sahrens zfs_fid(vnode_t *vp, fid_t *fidp)
3564789Sahrens {
3565789Sahrens 	znode_t		*zp = VTOZ(vp);
3566789Sahrens 	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
3567*5326Sek110237 	uint32_t	gen;
3568789Sahrens 	uint64_t	object = zp->z_id;
3569789Sahrens 	zfid_short_t	*zfid;
3570789Sahrens 	int		size, i;
3571789Sahrens 
3572*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
3573*5326Sek110237 	gen = (uint32_t)zp->z_gen;
3574789Sahrens 
3575789Sahrens 	size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
3576789Sahrens 	if (fidp->fid_len < size) {
3577789Sahrens 		fidp->fid_len = size;
35781512Sek110237 		ZFS_EXIT(zfsvfs);
3579789Sahrens 		return (ENOSPC);
3580789Sahrens 	}
3581789Sahrens 
3582789Sahrens 	zfid = (zfid_short_t *)fidp;
3583789Sahrens 
3584789Sahrens 	zfid->zf_len = size;
3585789Sahrens 
3586789Sahrens 	for (i = 0; i < sizeof (zfid->zf_object); i++)
3587789Sahrens 		zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
3588789Sahrens 
3589789Sahrens 	/* Must have a non-zero generation number to distinguish from .zfs */
3590789Sahrens 	if (gen == 0)
3591789Sahrens 		gen = 1;
3592789Sahrens 	for (i = 0; i < sizeof (zfid->zf_gen); i++)
3593789Sahrens 		zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
3594789Sahrens 
3595789Sahrens 	if (size == LONG_FID_LEN) {
3596789Sahrens 		uint64_t	objsetid = dmu_objset_id(zfsvfs->z_os);
3597789Sahrens 		zfid_long_t	*zlfid;
3598789Sahrens 
3599789Sahrens 		zlfid = (zfid_long_t *)fidp;
3600789Sahrens 
3601789Sahrens 		for (i = 0; i < sizeof (zlfid->zf_setid); i++)
3602789Sahrens 			zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
3603789Sahrens 
3604789Sahrens 		/* XXX - this should be the generation number for the objset */
3605789Sahrens 		for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
3606789Sahrens 			zlfid->zf_setgen[i] = 0;
3607789Sahrens 	}
3608789Sahrens 
3609789Sahrens 	ZFS_EXIT(zfsvfs);
3610789Sahrens 	return (0);
3611789Sahrens }
3612789Sahrens 
3613789Sahrens static int
3614789Sahrens zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr)
3615789Sahrens {
3616789Sahrens 	znode_t		*zp, *xzp;
3617789Sahrens 	zfsvfs_t	*zfsvfs;
3618789Sahrens 	zfs_dirlock_t	*dl;
3619789Sahrens 	int		error;
3620789Sahrens 
3621789Sahrens 	switch (cmd) {
3622789Sahrens 	case _PC_LINK_MAX:
3623789Sahrens 		*valp = ULONG_MAX;
3624789Sahrens 		return (0);
3625789Sahrens 
3626789Sahrens 	case _PC_FILESIZEBITS:
3627789Sahrens 		*valp = 64;
3628789Sahrens 		return (0);
3629789Sahrens 
3630789Sahrens 	case _PC_XATTR_EXISTS:
3631789Sahrens 		zp = VTOZ(vp);
3632789Sahrens 		zfsvfs = zp->z_zfsvfs;
3633*5326Sek110237 		ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
3634789Sahrens 		*valp = 0;
3635789Sahrens 		error = zfs_dirent_lock(&dl, zp, "", &xzp,
3636789Sahrens 		    ZXATTR | ZEXISTS | ZSHARED);
3637789Sahrens 		if (error == 0) {
3638789Sahrens 			zfs_dirent_unlock(dl);
3639789Sahrens 			if (!zfs_dirempty(xzp))
3640789Sahrens 				*valp = 1;
3641789Sahrens 			VN_RELE(ZTOV(xzp));
3642789Sahrens 		} else if (error == ENOENT) {
3643789Sahrens 			/*
3644789Sahrens 			 * If there aren't extended attributes, it's the
3645789Sahrens 			 * same as having zero of them.
3646789Sahrens 			 */
3647789Sahrens 			error = 0;
3648789Sahrens 		}
3649789Sahrens 		ZFS_EXIT(zfsvfs);
3650789Sahrens 		return (error);
3651789Sahrens 
3652789Sahrens 	case _PC_ACL_ENABLED:
3653789Sahrens 		*valp = _ACL_ACE_ENABLED;
3654789Sahrens 		return (0);
3655789Sahrens 
3656789Sahrens 	case _PC_MIN_HOLE_SIZE:
3657789Sahrens 		*valp = (ulong_t)SPA_MINBLOCKSIZE;
3658789Sahrens 		return (0);
3659789Sahrens 
3660789Sahrens 	default:
3661789Sahrens 		return (fs_pathconf(vp, cmd, valp, cr));
3662789Sahrens 	}
3663789Sahrens }
3664789Sahrens 
3665789Sahrens /*ARGSUSED*/
3666789Sahrens static int
3667789Sahrens zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr)
3668789Sahrens {
3669789Sahrens 	znode_t *zp = VTOZ(vp);
3670789Sahrens 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3671789Sahrens 	int error;
3672789Sahrens 
3673*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
3674789Sahrens 	error = zfs_getacl(zp, vsecp, cr);
3675789Sahrens 	ZFS_EXIT(zfsvfs);
3676789Sahrens 
3677789Sahrens 	return (error);
3678789Sahrens }
3679789Sahrens 
3680789Sahrens /*ARGSUSED*/
3681789Sahrens static int
3682789Sahrens zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr)
3683789Sahrens {
3684789Sahrens 	znode_t *zp = VTOZ(vp);
3685789Sahrens 	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3686789Sahrens 	int error;
3687789Sahrens 
3688*5326Sek110237 	ZFS_ENTER_VERIFY_ZP(zfsvfs, zp);
3689789Sahrens 	error = zfs_setacl(zp, vsecp, cr);
3690789Sahrens 	ZFS_EXIT(zfsvfs);
3691789Sahrens 	return (error);
3692789Sahrens }
3693789Sahrens 
3694789Sahrens /*
3695789Sahrens  * Predeclare these here so that the compiler assumes that
3696789Sahrens  * this is an "old style" function declaration that does
3697789Sahrens  * not include arguments => we won't get type mismatch errors
3698789Sahrens  * in the initializations that follow.
3699789Sahrens  */
3700789Sahrens static int zfs_inval();
3701789Sahrens static int zfs_isdir();
3702789Sahrens 
3703789Sahrens static int
3704789Sahrens zfs_inval()
3705789Sahrens {
3706789Sahrens 	return (EINVAL);
3707789Sahrens }
3708789Sahrens 
3709789Sahrens static int
3710789Sahrens zfs_isdir()
3711789Sahrens {
3712789Sahrens 	return (EISDIR);
3713789Sahrens }
3714789Sahrens /*
3715789Sahrens  * Directory vnode operations template
3716789Sahrens  */
3717789Sahrens vnodeops_t *zfs_dvnodeops;
3718789Sahrens const fs_operation_def_t zfs_dvnodeops_template[] = {
37193898Srsb 	VOPNAME_OPEN,		{ .vop_open = zfs_open },
37203898Srsb 	VOPNAME_CLOSE,		{ .vop_close = zfs_close },
37213898Srsb 	VOPNAME_READ,		{ .error = zfs_isdir },
37223898Srsb 	VOPNAME_WRITE,		{ .error = zfs_isdir },
37233898Srsb 	VOPNAME_IOCTL,		{ .vop_ioctl = zfs_ioctl },
37243898Srsb 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
37253898Srsb 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
37263898Srsb 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
37273898Srsb 	VOPNAME_LOOKUP,		{ .vop_lookup = zfs_lookup },
37283898Srsb 	VOPNAME_CREATE,		{ .vop_create = zfs_create },
37293898Srsb 	VOPNAME_REMOVE,		{ .vop_remove = zfs_remove },
37303898Srsb 	VOPNAME_LINK,		{ .vop_link = zfs_link },
37313898Srsb 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
37323898Srsb 	VOPNAME_MKDIR,		{ .vop_mkdir = zfs_mkdir },
37333898Srsb 	VOPNAME_RMDIR,		{ .vop_rmdir = zfs_rmdir },
37343898Srsb 	VOPNAME_READDIR,	{ .vop_readdir = zfs_readdir },
37353898Srsb 	VOPNAME_SYMLINK,	{ .vop_symlink = zfs_symlink },
37363898Srsb 	VOPNAME_FSYNC,		{ .vop_fsync = zfs_fsync },
37373898Srsb 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
37383898Srsb 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
37393898Srsb 	VOPNAME_SEEK,		{ .vop_seek = zfs_seek },
37403898Srsb 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
37413898Srsb 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
37423898Srsb 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
37434863Spraks 	VOPNAME_VNEVENT, 	{ .vop_vnevent = fs_vnevent_support },
37443898Srsb 	NULL,			NULL
3745789Sahrens };
3746789Sahrens 
3747789Sahrens /*
3748789Sahrens  * Regular file vnode operations template
3749789Sahrens  */
3750789Sahrens vnodeops_t *zfs_fvnodeops;
3751789Sahrens const fs_operation_def_t zfs_fvnodeops_template[] = {
37523898Srsb 	VOPNAME_OPEN,		{ .vop_open = zfs_open },
37533898Srsb 	VOPNAME_CLOSE,		{ .vop_close = zfs_close },
37543898Srsb 	VOPNAME_READ,		{ .vop_read = zfs_read },
37553898Srsb 	VOPNAME_WRITE,		{ .vop_write = zfs_write },
37563898Srsb 	VOPNAME_IOCTL,		{ .vop_ioctl = zfs_ioctl },
37573898Srsb 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
37583898Srsb 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
37593898Srsb 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
37603898Srsb 	VOPNAME_LOOKUP,		{ .vop_lookup = zfs_lookup },
37613898Srsb 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
37623898Srsb 	VOPNAME_FSYNC,		{ .vop_fsync = zfs_fsync },
37633898Srsb 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
37643898Srsb 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
37653898Srsb 	VOPNAME_SEEK,		{ .vop_seek = zfs_seek },
37663898Srsb 	VOPNAME_FRLOCK,		{ .vop_frlock = zfs_frlock },
37673898Srsb 	VOPNAME_SPACE,		{ .vop_space = zfs_space },
37683898Srsb 	VOPNAME_GETPAGE,	{ .vop_getpage = zfs_getpage },
37693898Srsb 	VOPNAME_PUTPAGE,	{ .vop_putpage = zfs_putpage },
37703898Srsb 	VOPNAME_MAP,		{ .vop_map = zfs_map },
37713898Srsb 	VOPNAME_ADDMAP,		{ .vop_addmap = zfs_addmap },
37723898Srsb 	VOPNAME_DELMAP,		{ .vop_delmap = zfs_delmap },
37733898Srsb 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
37743898Srsb 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
37753898Srsb 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
37763898Srsb 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
37773898Srsb 	NULL,			NULL
3778789Sahrens };
3779789Sahrens 
3780789Sahrens /*
3781789Sahrens  * Symbolic link vnode operations template
3782789Sahrens  */
3783789Sahrens vnodeops_t *zfs_symvnodeops;
3784789Sahrens const fs_operation_def_t zfs_symvnodeops_template[] = {
37853898Srsb 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
37863898Srsb 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
37873898Srsb 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
37883898Srsb 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
37893898Srsb 	VOPNAME_READLINK,	{ .vop_readlink = zfs_readlink },
37903898Srsb 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
37913898Srsb 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
37923898Srsb 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
37933898Srsb 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
37943898Srsb 	NULL,			NULL
3795789Sahrens };
3796789Sahrens 
3797789Sahrens /*
3798789Sahrens  * Extended attribute directory vnode operations template
3799789Sahrens  *	This template is identical to the directory vnodes
3800789Sahrens  *	operation template except for restricted operations:
3801789Sahrens  *		VOP_MKDIR()
3802789Sahrens  *		VOP_SYMLINK()
3803789Sahrens  * Note that there are other restrictions embedded in:
3804789Sahrens  *	zfs_create()	- restrict type to VREG
3805789Sahrens  *	zfs_link()	- no links into/out of attribute space
3806789Sahrens  *	zfs_rename()	- no moves into/out of attribute space
3807789Sahrens  */
3808789Sahrens vnodeops_t *zfs_xdvnodeops;
3809789Sahrens const fs_operation_def_t zfs_xdvnodeops_template[] = {
38103898Srsb 	VOPNAME_OPEN,		{ .vop_open = zfs_open },
38113898Srsb 	VOPNAME_CLOSE,		{ .vop_close = zfs_close },
38123898Srsb 	VOPNAME_IOCTL,		{ .vop_ioctl = zfs_ioctl },
38133898Srsb 	VOPNAME_GETATTR,	{ .vop_getattr = zfs_getattr },
38143898Srsb 	VOPNAME_SETATTR,	{ .vop_setattr = zfs_setattr },
38153898Srsb 	VOPNAME_ACCESS,		{ .vop_access = zfs_access },
38163898Srsb 	VOPNAME_LOOKUP,		{ .vop_lookup = zfs_lookup },
38173898Srsb 	VOPNAME_CREATE,		{ .vop_create = zfs_create },
38183898Srsb 	VOPNAME_REMOVE,		{ .vop_remove = zfs_remove },
38193898Srsb 	VOPNAME_LINK,		{ .vop_link = zfs_link },
38203898Srsb 	VOPNAME_RENAME,		{ .vop_rename = zfs_rename },
38213898Srsb 	VOPNAME_MKDIR,		{ .error = zfs_inval },
38223898Srsb 	VOPNAME_RMDIR,		{ .vop_rmdir = zfs_rmdir },
38233898Srsb 	VOPNAME_READDIR,	{ .vop_readdir = zfs_readdir },
38243898Srsb 	VOPNAME_SYMLINK,	{ .error = zfs_inval },
38253898Srsb 	VOPNAME_FSYNC,		{ .vop_fsync = zfs_fsync },
38263898Srsb 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
38273898Srsb 	VOPNAME_FID,		{ .vop_fid = zfs_fid },
38283898Srsb 	VOPNAME_SEEK,		{ .vop_seek = zfs_seek },
38293898Srsb 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
38303898Srsb 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = zfs_getsecattr },
38313898Srsb 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = zfs_setsecattr },
38323898Srsb 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
38333898Srsb 	NULL,			NULL
3834789Sahrens };
3835789Sahrens 
3836789Sahrens /*
3837789Sahrens  * Error vnode operations template
3838789Sahrens  */
3839789Sahrens vnodeops_t *zfs_evnodeops;
3840789Sahrens const fs_operation_def_t zfs_evnodeops_template[] = {
38413898Srsb 	VOPNAME_INACTIVE,	{ .vop_inactive = zfs_inactive },
38423898Srsb 	VOPNAME_PATHCONF,	{ .vop_pathconf = zfs_pathconf },
38433898Srsb 	NULL,			NULL
3844789Sahrens };
3845