xref: /onnv-gate/usr/src/uts/common/fs/nfs/nfs4_rnode.c (revision 11888:542e7ffc22d6)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
52265Sjwahlig  * Common Development and Distribution License (the "License").
62265Sjwahlig  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
2211549SMarcel.Telka@Sun.COM  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate /*
270Sstevel@tonic-gate  *  	Copyright (c) 1983,1984,1985,1986,1987,1988,1989  AT&T.
280Sstevel@tonic-gate  *	All Rights Reserved
290Sstevel@tonic-gate  */
300Sstevel@tonic-gate 
310Sstevel@tonic-gate 
320Sstevel@tonic-gate #include <sys/param.h>
330Sstevel@tonic-gate #include <sys/types.h>
340Sstevel@tonic-gate #include <sys/systm.h>
350Sstevel@tonic-gate #include <sys/cred.h>
360Sstevel@tonic-gate #include <sys/proc.h>
370Sstevel@tonic-gate #include <sys/user.h>
380Sstevel@tonic-gate #include <sys/time.h>
390Sstevel@tonic-gate #include <sys/buf.h>
400Sstevel@tonic-gate #include <sys/vfs.h>
410Sstevel@tonic-gate #include <sys/vnode.h>
420Sstevel@tonic-gate #include <sys/socket.h>
430Sstevel@tonic-gate #include <sys/uio.h>
440Sstevel@tonic-gate #include <sys/tiuser.h>
450Sstevel@tonic-gate #include <sys/swap.h>
460Sstevel@tonic-gate #include <sys/errno.h>
470Sstevel@tonic-gate #include <sys/debug.h>
480Sstevel@tonic-gate #include <sys/kmem.h>
490Sstevel@tonic-gate #include <sys/kstat.h>
500Sstevel@tonic-gate #include <sys/cmn_err.h>
510Sstevel@tonic-gate #include <sys/vtrace.h>
520Sstevel@tonic-gate #include <sys/session.h>
530Sstevel@tonic-gate #include <sys/dnlc.h>
540Sstevel@tonic-gate #include <sys/bitmap.h>
550Sstevel@tonic-gate #include <sys/acl.h>
560Sstevel@tonic-gate #include <sys/ddi.h>
570Sstevel@tonic-gate #include <sys/pathname.h>
580Sstevel@tonic-gate #include <sys/flock.h>
590Sstevel@tonic-gate #include <sys/dirent.h>
600Sstevel@tonic-gate #include <sys/flock.h>
610Sstevel@tonic-gate #include <sys/callb.h>
6210368SThomas.Haynes@Sun.COM #include <sys/sdt.h>
630Sstevel@tonic-gate 
64*11888SPavel.Filipensky@Sun.COM #include <vm/pvn.h>
65*11888SPavel.Filipensky@Sun.COM 
660Sstevel@tonic-gate #include <rpc/types.h>
670Sstevel@tonic-gate #include <rpc/xdr.h>
680Sstevel@tonic-gate #include <rpc/auth.h>
690Sstevel@tonic-gate #include <rpc/rpcsec_gss.h>
700Sstevel@tonic-gate #include <rpc/clnt.h>
710Sstevel@tonic-gate 
720Sstevel@tonic-gate #include <nfs/nfs.h>
730Sstevel@tonic-gate #include <nfs/nfs_clnt.h>
740Sstevel@tonic-gate #include <nfs/nfs_acl.h>
750Sstevel@tonic-gate 
760Sstevel@tonic-gate #include <nfs/nfs4.h>
770Sstevel@tonic-gate #include <nfs/rnode4.h>
780Sstevel@tonic-gate #include <nfs/nfs4_clnt.h>
790Sstevel@tonic-gate 
800Sstevel@tonic-gate /*
810Sstevel@tonic-gate  * The hash queues for the access to active and cached rnodes
820Sstevel@tonic-gate  * are organized as doubly linked lists.  A reader/writer lock
830Sstevel@tonic-gate  * for each hash bucket is used to control access and to synchronize
840Sstevel@tonic-gate  * lookups, additions, and deletions from the hash queue.
850Sstevel@tonic-gate  *
860Sstevel@tonic-gate  * The rnode freelist is organized as a doubly linked list with
870Sstevel@tonic-gate  * a head pointer.  Additions and deletions are synchronized via
880Sstevel@tonic-gate  * a single mutex.
890Sstevel@tonic-gate  *
900Sstevel@tonic-gate  * In order to add an rnode to the free list, it must be hashed into
910Sstevel@tonic-gate  * a hash queue and the exclusive lock to the hash queue be held.
920Sstevel@tonic-gate  * If an rnode is not hashed into a hash queue, then it is destroyed
930Sstevel@tonic-gate  * because it represents no valuable information that can be reused
940Sstevel@tonic-gate  * about the file.  The exclusive lock to the hash queue must be
950Sstevel@tonic-gate  * held in order to prevent a lookup in the hash queue from finding
960Sstevel@tonic-gate  * the rnode and using it and assuming that the rnode is not on the
970Sstevel@tonic-gate  * freelist.  The lookup in the hash queue will have the hash queue
980Sstevel@tonic-gate  * locked, either exclusive or shared.
990Sstevel@tonic-gate  *
1000Sstevel@tonic-gate  * The vnode reference count for each rnode is not allowed to drop
1010Sstevel@tonic-gate  * below 1.  This prevents external entities, such as the VM
1020Sstevel@tonic-gate  * subsystem, from acquiring references to vnodes already on the
1030Sstevel@tonic-gate  * freelist and then trying to place them back on the freelist
1040Sstevel@tonic-gate  * when their reference is released.  This means that the when an
1050Sstevel@tonic-gate  * rnode is looked up in the hash queues, then either the rnode
1065331Samw  * is removed from the freelist and that reference is transferred to
1070Sstevel@tonic-gate  * the new reference or the vnode reference count must be incremented
1080Sstevel@tonic-gate  * accordingly.  The mutex for the freelist must be held in order to
1090Sstevel@tonic-gate  * accurately test to see if the rnode is on the freelist or not.
1100Sstevel@tonic-gate  * The hash queue lock might be held shared and it is possible that
1110Sstevel@tonic-gate  * two different threads may race to remove the rnode from the
1120Sstevel@tonic-gate  * freelist.  This race can be resolved by holding the mutex for the
1130Sstevel@tonic-gate  * freelist.  Please note that the mutex for the freelist does not
1140Sstevel@tonic-gate  * need to be held if the rnode is not on the freelist.  It can not be
1150Sstevel@tonic-gate  * placed on the freelist due to the requirement that the thread
1160Sstevel@tonic-gate  * putting the rnode on the freelist must hold the exclusive lock
1170Sstevel@tonic-gate  * to the hash queue and the thread doing the lookup in the hash
1180Sstevel@tonic-gate  * queue is holding either a shared or exclusive lock to the hash
1190Sstevel@tonic-gate  * queue.
1200Sstevel@tonic-gate  *
1210Sstevel@tonic-gate  * The lock ordering is:
1220Sstevel@tonic-gate  *
1230Sstevel@tonic-gate  *	hash bucket lock -> vnode lock
124331Ssamf  *	hash bucket lock -> freelist lock -> r_statelock
1250Sstevel@tonic-gate  */
1260Sstevel@tonic-gate r4hashq_t *rtable4;
1270Sstevel@tonic-gate 
1280Sstevel@tonic-gate static kmutex_t rp4freelist_lock;
1290Sstevel@tonic-gate static rnode4_t *rp4freelist = NULL;
1300Sstevel@tonic-gate static long rnode4_new = 0;
1310Sstevel@tonic-gate int rtable4size;
1320Sstevel@tonic-gate static int rtable4mask;
1330Sstevel@tonic-gate static struct kmem_cache *rnode4_cache;
1340Sstevel@tonic-gate static int rnode4_hashlen = 4;
1350Sstevel@tonic-gate 
1360Sstevel@tonic-gate static void	r4inactive(rnode4_t *, cred_t *);
1370Sstevel@tonic-gate static vnode_t	*make_rnode4(nfs4_sharedfh_t *, r4hashq_t *, struct vfs *,
1380Sstevel@tonic-gate 		    struct vnodeops *,
1390Sstevel@tonic-gate 		    int (*)(vnode_t *, page_t *, u_offset_t *, size_t *, int,
1400Sstevel@tonic-gate 		    cred_t *),
1410Sstevel@tonic-gate 		    int *, cred_t *);
1420Sstevel@tonic-gate static void	rp4_rmfree(rnode4_t *);
1430Sstevel@tonic-gate int		nfs4_free_data_reclaim(rnode4_t *);
1440Sstevel@tonic-gate static int	nfs4_active_data_reclaim(rnode4_t *);
1450Sstevel@tonic-gate static int	nfs4_free_reclaim(void);
1460Sstevel@tonic-gate static int	nfs4_active_reclaim(void);
1470Sstevel@tonic-gate static int	nfs4_rnode_reclaim(void);
1480Sstevel@tonic-gate static void	nfs4_reclaim(void *);
1490Sstevel@tonic-gate static int	isrootfh(nfs4_sharedfh_t *, rnode4_t *);
1500Sstevel@tonic-gate static void	uninit_rnode4(rnode4_t *);
1510Sstevel@tonic-gate static void	destroy_rnode4(rnode4_t *);
1525302Sth199096 static void	r4_stub_set(rnode4_t *, nfs4_stub_type_t);
1530Sstevel@tonic-gate 
1540Sstevel@tonic-gate #ifdef DEBUG
1550Sstevel@tonic-gate static int r4_check_for_dups = 0; /* Flag to enable dup rnode detection. */
1560Sstevel@tonic-gate static int nfs4_rnode_debug = 0;
1570Sstevel@tonic-gate /* if nonzero, kmem_cache_free() rnodes rather than place on freelist */
1580Sstevel@tonic-gate static int nfs4_rnode_nofreelist = 0;
1590Sstevel@tonic-gate /* give messages on colliding shared filehandles */
1600Sstevel@tonic-gate static void	r4_dup_check(rnode4_t *, vfs_t *);
1610Sstevel@tonic-gate #endif
1620Sstevel@tonic-gate 
1630Sstevel@tonic-gate /*
1642265Sjwahlig  * If the vnode has pages, run the list and check for any that are
1652265Sjwahlig  * still dangling.  We call this routine before putting an rnode on
1662265Sjwahlig  * the free list.
1672265Sjwahlig  */
1682265Sjwahlig static int
nfs4_dross_pages(vnode_t * vp)1692265Sjwahlig nfs4_dross_pages(vnode_t *vp)
1702265Sjwahlig {
1712265Sjwahlig 	page_t *pp;
1722265Sjwahlig 	kmutex_t *vphm;
1732265Sjwahlig 
1742265Sjwahlig 	vphm = page_vnode_mutex(vp);
1752265Sjwahlig 	mutex_enter(vphm);
1762265Sjwahlig 	if ((pp = vp->v_pages) != NULL) {
1772265Sjwahlig 		do {
178*11888SPavel.Filipensky@Sun.COM 			if (pp->p_hash != PVN_VPLIST_HASH_TAG &&
179*11888SPavel.Filipensky@Sun.COM 			    pp->p_fsdata != C_NOCOMMIT) {
1802265Sjwahlig 				mutex_exit(vphm);
1812265Sjwahlig 				return (1);
1822265Sjwahlig 			}
1832265Sjwahlig 		} while ((pp = pp->p_vpnext) != vp->v_pages);
1842265Sjwahlig 	}
1852265Sjwahlig 	mutex_exit(vphm);
1862265Sjwahlig 
1872265Sjwahlig 	return (0);
1882265Sjwahlig }
1892265Sjwahlig 
1902265Sjwahlig /*
1912265Sjwahlig  * Flush any pages left on this rnode.
1920Sstevel@tonic-gate  */
1930Sstevel@tonic-gate static void
r4flushpages(rnode4_t * rp,cred_t * cr)1942265Sjwahlig r4flushpages(rnode4_t *rp, cred_t *cr)
1950Sstevel@tonic-gate {
1960Sstevel@tonic-gate 	vnode_t *vp;
1970Sstevel@tonic-gate 	int error;
1980Sstevel@tonic-gate 
1990Sstevel@tonic-gate 	/*
2000Sstevel@tonic-gate 	 * Before freeing anything, wait until all asynchronous
2010Sstevel@tonic-gate 	 * activity is done on this rnode.  This will allow all
2020Sstevel@tonic-gate 	 * asynchronous read ahead and write behind i/o's to
2030Sstevel@tonic-gate 	 * finish.
2040Sstevel@tonic-gate 	 */
2050Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
2060Sstevel@tonic-gate 	while (rp->r_count > 0)
2070Sstevel@tonic-gate 		cv_wait(&rp->r_cv, &rp->r_statelock);
2080Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
2090Sstevel@tonic-gate 
2100Sstevel@tonic-gate 	/*
2110Sstevel@tonic-gate 	 * Flush and invalidate all pages associated with the vnode.
2120Sstevel@tonic-gate 	 */
2130Sstevel@tonic-gate 	vp = RTOV4(rp);
2140Sstevel@tonic-gate 	if (nfs4_has_pages(vp)) {
2150Sstevel@tonic-gate 		ASSERT(vp->v_type != VCHR);
2160Sstevel@tonic-gate 		if ((rp->r_flags & R4DIRTY) && !rp->r_error) {
2175331Samw 			error = VOP_PUTPAGE(vp, (u_offset_t)0, 0, 0, cr, NULL);
2180Sstevel@tonic-gate 			if (error && (error == ENOSPC || error == EDQUOT)) {
2190Sstevel@tonic-gate 				mutex_enter(&rp->r_statelock);
2200Sstevel@tonic-gate 				if (!rp->r_error)
2210Sstevel@tonic-gate 					rp->r_error = error;
2220Sstevel@tonic-gate 				mutex_exit(&rp->r_statelock);
2230Sstevel@tonic-gate 			}
2240Sstevel@tonic-gate 		}
2250Sstevel@tonic-gate 		nfs4_invalidate_pages(vp, (u_offset_t)0, cr);
2260Sstevel@tonic-gate 	}
2272265Sjwahlig }
2282265Sjwahlig 
2292265Sjwahlig /*
2302265Sjwahlig  * Free the resources associated with an rnode.
2312265Sjwahlig  */
2322265Sjwahlig static void
r4inactive(rnode4_t * rp,cred_t * cr)2332265Sjwahlig r4inactive(rnode4_t *rp, cred_t *cr)
2342265Sjwahlig {
2352265Sjwahlig 	vnode_t *vp;
2362265Sjwahlig 	char *contents;
2372265Sjwahlig 	int size;
2382265Sjwahlig 	vsecattr_t *vsp;
2392265Sjwahlig 	vnode_t *xattr;
2402265Sjwahlig 
2412265Sjwahlig 	r4flushpages(rp, cr);
2422265Sjwahlig 
2432265Sjwahlig 	vp = RTOV4(rp);
2440Sstevel@tonic-gate 
2450Sstevel@tonic-gate 	/*
2460Sstevel@tonic-gate 	 * Free any held caches which may be
2470Sstevel@tonic-gate 	 * associated with this rnode.
2480Sstevel@tonic-gate 	 */
2490Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
2500Sstevel@tonic-gate 	contents = rp->r_symlink.contents;
2510Sstevel@tonic-gate 	size = rp->r_symlink.size;
2520Sstevel@tonic-gate 	rp->r_symlink.contents = NULL;
2530Sstevel@tonic-gate 	vsp = rp->r_secattr;
2540Sstevel@tonic-gate 	rp->r_secattr = NULL;
2550Sstevel@tonic-gate 	xattr = rp->r_xattr_dir;
2560Sstevel@tonic-gate 	rp->r_xattr_dir = NULL;
2570Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
2580Sstevel@tonic-gate 
2590Sstevel@tonic-gate 	/*
2600Sstevel@tonic-gate 	 * Free the access cache entries.
2610Sstevel@tonic-gate 	 */
2620Sstevel@tonic-gate 	(void) nfs4_access_purge_rp(rp);
2630Sstevel@tonic-gate 
2640Sstevel@tonic-gate 	/*
2650Sstevel@tonic-gate 	 * Free the readdir cache entries.
2660Sstevel@tonic-gate 	 */
2670Sstevel@tonic-gate 	nfs4_purge_rddir_cache(vp);
2680Sstevel@tonic-gate 
2690Sstevel@tonic-gate 	/*
2700Sstevel@tonic-gate 	 * Free the symbolic link cache.
2710Sstevel@tonic-gate 	 */
2720Sstevel@tonic-gate 	if (contents != NULL) {
2730Sstevel@tonic-gate 
2740Sstevel@tonic-gate 		kmem_free((void *)contents, size);
2750Sstevel@tonic-gate 	}
2760Sstevel@tonic-gate 
2770Sstevel@tonic-gate 	/*
2780Sstevel@tonic-gate 	 * Free any cached ACL.
2790Sstevel@tonic-gate 	 */
2800Sstevel@tonic-gate 	if (vsp != NULL)
2810Sstevel@tonic-gate 		nfs4_acl_free_cache(vsp);
2820Sstevel@tonic-gate 
2830Sstevel@tonic-gate 	/*
2840Sstevel@tonic-gate 	 * Release the cached xattr_dir
2850Sstevel@tonic-gate 	 */
2860Sstevel@tonic-gate 	if (xattr != NULL)
2870Sstevel@tonic-gate 		VN_RELE(xattr);
2880Sstevel@tonic-gate }
2890Sstevel@tonic-gate 
2900Sstevel@tonic-gate /*
2910Sstevel@tonic-gate  * We have seen a case that the fh passed in is for "." which
2920Sstevel@tonic-gate  * should be a VROOT node, however, the fh is different from the
2930Sstevel@tonic-gate  * root fh stored in the mntinfo4_t. The invalid fh might be
2940Sstevel@tonic-gate  * from a misbehaved server and will panic the client system at
2950Sstevel@tonic-gate  * a later time. To avoid the panic, we drop the bad fh, use
2960Sstevel@tonic-gate  * the root fh from mntinfo4_t, and print an error message
2970Sstevel@tonic-gate  * for attention.
2980Sstevel@tonic-gate  */
2990Sstevel@tonic-gate nfs4_sharedfh_t *
badrootfh_check(nfs4_sharedfh_t * fh,nfs4_fname_t * nm,mntinfo4_t * mi,int * wasbad)3000Sstevel@tonic-gate badrootfh_check(nfs4_sharedfh_t *fh, nfs4_fname_t *nm, mntinfo4_t *mi,
3010Sstevel@tonic-gate     int *wasbad)
3020Sstevel@tonic-gate {
3030Sstevel@tonic-gate 	char *s;
3040Sstevel@tonic-gate 
3050Sstevel@tonic-gate 	*wasbad = 0;
3060Sstevel@tonic-gate 	s = fn_name(nm);
3070Sstevel@tonic-gate 	ASSERT(strcmp(s, "..") != 0);
3080Sstevel@tonic-gate 
3090Sstevel@tonic-gate 	if ((s[0] == '.' && s[1] == '\0') && fh &&
3105302Sth199096 	    !SFH4_SAME(mi->mi_rootfh, fh)) {
3110Sstevel@tonic-gate #ifdef DEBUG
3120Sstevel@tonic-gate 		nfs4_fhandle_t fhandle;
3130Sstevel@tonic-gate 
3140Sstevel@tonic-gate 		zcmn_err(mi->mi_zone->zone_id, CE_WARN,
3150Sstevel@tonic-gate 		    "Server %s returns a different "
3160Sstevel@tonic-gate 		    "root filehandle for the path %s:",
3170Sstevel@tonic-gate 		    mi->mi_curr_serv->sv_hostname,
3180Sstevel@tonic-gate 		    mi->mi_curr_serv->sv_path);
3190Sstevel@tonic-gate 
3200Sstevel@tonic-gate 		/* print the bad fh */
3210Sstevel@tonic-gate 		fhandle.fh_len = fh->sfh_fh.nfs_fh4_len;
3220Sstevel@tonic-gate 		bcopy(fh->sfh_fh.nfs_fh4_val, fhandle.fh_buf,
3235302Sth199096 		    fhandle.fh_len);
3240Sstevel@tonic-gate 		nfs4_printfhandle(&fhandle);
3250Sstevel@tonic-gate 
3260Sstevel@tonic-gate 		/* print mi_rootfh */
3270Sstevel@tonic-gate 		fhandle.fh_len = mi->mi_rootfh->sfh_fh.nfs_fh4_len;
3280Sstevel@tonic-gate 		bcopy(mi->mi_rootfh->sfh_fh.nfs_fh4_val, fhandle.fh_buf,
3295302Sth199096 		    fhandle.fh_len);
3300Sstevel@tonic-gate 		nfs4_printfhandle(&fhandle);
3310Sstevel@tonic-gate #endif
3320Sstevel@tonic-gate 		/* use mi_rootfh instead; fh will be rele by the caller */
3330Sstevel@tonic-gate 		fh = mi->mi_rootfh;
3340Sstevel@tonic-gate 		*wasbad = 1;
3350Sstevel@tonic-gate 	}
3360Sstevel@tonic-gate 
3370Sstevel@tonic-gate 	kmem_free(s, MAXNAMELEN);
3380Sstevel@tonic-gate 	return (fh);
3390Sstevel@tonic-gate }
3400Sstevel@tonic-gate 
3410Sstevel@tonic-gate void
r4_do_attrcache(vnode_t * vp,nfs4_ga_res_t * garp,int newnode,hrtime_t t,cred_t * cr,int index)3420Sstevel@tonic-gate r4_do_attrcache(vnode_t *vp, nfs4_ga_res_t *garp, int newnode,
3430Sstevel@tonic-gate     hrtime_t t, cred_t *cr, int index)
3440Sstevel@tonic-gate {
3455302Sth199096 	int is_stub;
3460Sstevel@tonic-gate 	vattr_t *attr;
3470Sstevel@tonic-gate 	/*
3480Sstevel@tonic-gate 	 * Don't add to attrcache if time overflow, but
3490Sstevel@tonic-gate 	 * no need to check because either attr is null or the time
3500Sstevel@tonic-gate 	 * values in it were processed by nfs4_time_ntov(), which checks
3510Sstevel@tonic-gate 	 * for time overflows.
3520Sstevel@tonic-gate 	 */
3530Sstevel@tonic-gate 	attr = garp ? &garp->n4g_va : NULL;
3540Sstevel@tonic-gate 
3550Sstevel@tonic-gate 	if (attr) {
3560Sstevel@tonic-gate 		if (!newnode) {
3570Sstevel@tonic-gate 			rw_exit(&rtable4[index].r_lock);
3580Sstevel@tonic-gate #ifdef DEBUG
3590Sstevel@tonic-gate 			if (vp->v_type != attr->va_type &&
3600Sstevel@tonic-gate 			    vp->v_type != VNON && attr->va_type != VNON) {
3610Sstevel@tonic-gate 				zcmn_err(VTOMI4(vp)->mi_zone->zone_id, CE_WARN,
3625302Sth199096 				    "makenfs4node: type (%d) doesn't "
3635302Sth199096 				    "match type of found node at %p (%d)",
3645302Sth199096 				    attr->va_type, (void *)vp, vp->v_type);
3650Sstevel@tonic-gate 			}
3660Sstevel@tonic-gate #endif
3670Sstevel@tonic-gate 			nfs4_attr_cache(vp, garp, t, cr, TRUE, NULL);
3680Sstevel@tonic-gate 		} else {
3690Sstevel@tonic-gate 			rnode4_t *rp = VTOR4(vp);
3700Sstevel@tonic-gate 
3710Sstevel@tonic-gate 			vp->v_type = attr->va_type;
3720Sstevel@tonic-gate 			vp->v_rdev = attr->va_rdev;
3730Sstevel@tonic-gate 
3740Sstevel@tonic-gate 			/*
3750Sstevel@tonic-gate 			 * Turn this object into a "stub" object if we
3765302Sth199096 			 * crossed an underlying server fs boundary.
3775302Sth199096 			 * To make this check, during mount we save the
3780Sstevel@tonic-gate 			 * fsid of the server object being mounted.
3790Sstevel@tonic-gate 			 * Here we compare this object's server fsid
3800Sstevel@tonic-gate 			 * with the fsid we saved at mount.  If they
3810Sstevel@tonic-gate 			 * are different, we crossed server fs boundary.
3820Sstevel@tonic-gate 			 *
3835302Sth199096 			 * The stub type is set (or not) at rnode
3840Sstevel@tonic-gate 			 * creation time and it never changes for life
3855302Sth199096 			 * of the rnode.
3865302Sth199096 			 *
38711291SRobert.Thurlow@Sun.COM 			 * This stub will be for a mirror-mount, rather than
38811291SRobert.Thurlow@Sun.COM 			 * a referral (the latter also sets R4SRVSTUB).
38911291SRobert.Thurlow@Sun.COM 			 *
3905302Sth199096 			 * The stub type is also set during RO failover,
3915302Sth199096 			 * nfs4_remap_file().
3925302Sth199096 			 *
3935302Sth199096 			 * We don't bother with taking r_state_lock to
3945302Sth199096 			 * set the stub type because this is a new rnode
3955302Sth199096 			 * and we're holding the hash bucket r_lock RW_WRITER.
3965302Sth199096 			 * No other thread could have obtained access
3975302Sth199096 			 * to this rnode.
3980Sstevel@tonic-gate 			 */
3995302Sth199096 			is_stub = 0;
4000Sstevel@tonic-gate 			if (garp->n4g_fsid_valid) {
4015302Sth199096 				fattr4_fsid ga_fsid = garp->n4g_fsid;
4025302Sth199096 				servinfo4_t *svp = rp->r_server;
4030Sstevel@tonic-gate 
4045302Sth199096 				rp->r_srv_fsid = ga_fsid;
4050Sstevel@tonic-gate 
4065302Sth199096 				(void) nfs_rw_enter_sig(&svp->sv_lock,
4075302Sth199096 				    RW_READER, 0);
4085302Sth199096 				if (!FATTR4_FSID_EQ(&ga_fsid, &svp->sv_fsid))
4095302Sth199096 					is_stub = 1;
4105302Sth199096 				nfs_rw_exit(&svp->sv_lock);
4110Sstevel@tonic-gate 			}
4120Sstevel@tonic-gate 
4135302Sth199096 			if (is_stub)
4145302Sth199096 				r4_stub_mirrormount(rp);
4155302Sth199096 			else
4165302Sth199096 				r4_stub_none(rp);
4175302Sth199096 
4180Sstevel@tonic-gate 			/* Can not cache partial attr */
4190Sstevel@tonic-gate 			if (attr->va_mask == AT_ALL)
4200Sstevel@tonic-gate 				nfs4_attrcache_noinval(vp, garp, t);
4210Sstevel@tonic-gate 			else
4220Sstevel@tonic-gate 				PURGE_ATTRCACHE4(vp);
4230Sstevel@tonic-gate 
4240Sstevel@tonic-gate 			rw_exit(&rtable4[index].r_lock);
4250Sstevel@tonic-gate 		}
4260Sstevel@tonic-gate 	} else {
4270Sstevel@tonic-gate 		if (newnode) {
4280Sstevel@tonic-gate 			PURGE_ATTRCACHE4(vp);
4290Sstevel@tonic-gate 		}
4300Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
4310Sstevel@tonic-gate 	}
4320Sstevel@tonic-gate }
4330Sstevel@tonic-gate 
4340Sstevel@tonic-gate /*
4350Sstevel@tonic-gate  * Find or create an rnode based primarily on filehandle.  To be
4360Sstevel@tonic-gate  * used when dvp (vnode for parent directory) is not available;
4370Sstevel@tonic-gate  * otherwise, makenfs4node() should be used.
4380Sstevel@tonic-gate  *
4390Sstevel@tonic-gate  * The nfs4_fname_t argument *npp is consumed and nulled out.
4400Sstevel@tonic-gate  */
4410Sstevel@tonic-gate 
4420Sstevel@tonic-gate vnode_t *
makenfs4node_by_fh(nfs4_sharedfh_t * sfh,nfs4_sharedfh_t * psfh,nfs4_fname_t ** npp,nfs4_ga_res_t * garp,mntinfo4_t * mi,cred_t * cr,hrtime_t t)4430Sstevel@tonic-gate makenfs4node_by_fh(nfs4_sharedfh_t *sfh, nfs4_sharedfh_t *psfh,
4445302Sth199096     nfs4_fname_t **npp, nfs4_ga_res_t *garp,
4455302Sth199096     mntinfo4_t *mi, cred_t *cr, hrtime_t t)
4460Sstevel@tonic-gate {
4470Sstevel@tonic-gate 	vfs_t *vfsp = mi->mi_vfsp;
4480Sstevel@tonic-gate 	int newnode = 0;
4490Sstevel@tonic-gate 	vnode_t *vp;
4500Sstevel@tonic-gate 	rnode4_t *rp;
4510Sstevel@tonic-gate 	svnode_t *svp;
4527902SNagakiran.Rajashekar@Sun.COM 	nfs4_fname_t *name, *svpname;
4530Sstevel@tonic-gate 	int index;
4540Sstevel@tonic-gate 
4550Sstevel@tonic-gate 	ASSERT(npp && *npp);
4560Sstevel@tonic-gate 	name = *npp;
4570Sstevel@tonic-gate 	*npp = NULL;
4580Sstevel@tonic-gate 
4590Sstevel@tonic-gate 	index = rtable4hash(sfh);
4600Sstevel@tonic-gate 	rw_enter(&rtable4[index].r_lock, RW_READER);
4610Sstevel@tonic-gate 
4620Sstevel@tonic-gate 	vp = make_rnode4(sfh, &rtable4[index], vfsp,
4630Sstevel@tonic-gate 	    nfs4_vnodeops, nfs4_putapage, &newnode, cr);
4647902SNagakiran.Rajashekar@Sun.COM 
4657902SNagakiran.Rajashekar@Sun.COM 	svp = VTOSV(vp);
4667902SNagakiran.Rajashekar@Sun.COM 	rp = VTOR4(vp);
4670Sstevel@tonic-gate 	if (newnode) {
4680Sstevel@tonic-gate 		svp->sv_forw = svp->sv_back = svp;
4690Sstevel@tonic-gate 		svp->sv_name = name;
4700Sstevel@tonic-gate 		if (psfh != NULL)
4710Sstevel@tonic-gate 			sfh4_hold(psfh);
4720Sstevel@tonic-gate 		svp->sv_dfh = psfh;
47310071SPavel.Filipensky@Sun.COM 	} else {
4747902SNagakiran.Rajashekar@Sun.COM 		/*
4757902SNagakiran.Rajashekar@Sun.COM 		 * It is possible that due to a server
4767902SNagakiran.Rajashekar@Sun.COM 		 * side rename fnames have changed.
4777902SNagakiran.Rajashekar@Sun.COM 		 * update the fname here.
4787902SNagakiran.Rajashekar@Sun.COM 		 */
4797902SNagakiran.Rajashekar@Sun.COM 		mutex_enter(&rp->r_svlock);
4807902SNagakiran.Rajashekar@Sun.COM 		svpname = svp->sv_name;
4817902SNagakiran.Rajashekar@Sun.COM 		if (svp->sv_name != name) {
4827902SNagakiran.Rajashekar@Sun.COM 			svp->sv_name = name;
4837902SNagakiran.Rajashekar@Sun.COM 			mutex_exit(&rp->r_svlock);
4847902SNagakiran.Rajashekar@Sun.COM 			fn_rele(&svpname);
4857902SNagakiran.Rajashekar@Sun.COM 		} else {
4867902SNagakiran.Rajashekar@Sun.COM 			mutex_exit(&rp->r_svlock);
4877902SNagakiran.Rajashekar@Sun.COM 			fn_rele(&name);
4887902SNagakiran.Rajashekar@Sun.COM 		}
4890Sstevel@tonic-gate 	}
4900Sstevel@tonic-gate 
4910Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&rtable4[index].r_lock));
4920Sstevel@tonic-gate 	r4_do_attrcache(vp, garp, newnode, t, cr, index);
4930Sstevel@tonic-gate 	ASSERT(rw_owner(&rtable4[index].r_lock) != curthread);
4940Sstevel@tonic-gate 
4950Sstevel@tonic-gate 	return (vp);
4960Sstevel@tonic-gate }
4970Sstevel@tonic-gate 
4980Sstevel@tonic-gate /*
4990Sstevel@tonic-gate  * Find or create a vnode for the given filehandle, filesystem, parent, and
5000Sstevel@tonic-gate  * name.  The reference to nm is consumed, so the caller must first do an
5010Sstevel@tonic-gate  * fn_hold() if it wants to continue using nm after this call.
5020Sstevel@tonic-gate  */
5030Sstevel@tonic-gate vnode_t *
makenfs4node(nfs4_sharedfh_t * fh,nfs4_ga_res_t * garp,struct vfs * vfsp,hrtime_t t,cred_t * cr,vnode_t * dvp,nfs4_fname_t * nm)5040Sstevel@tonic-gate makenfs4node(nfs4_sharedfh_t *fh, nfs4_ga_res_t *garp, struct vfs *vfsp,
5055302Sth199096     hrtime_t t, cred_t *cr, vnode_t *dvp, nfs4_fname_t *nm)
5060Sstevel@tonic-gate {
5070Sstevel@tonic-gate 	vnode_t *vp;
5080Sstevel@tonic-gate 	int newnode;
5090Sstevel@tonic-gate 	int index;
5100Sstevel@tonic-gate 	mntinfo4_t *mi = VFTOMI4(vfsp);
5110Sstevel@tonic-gate 	int had_badfh = 0;
5120Sstevel@tonic-gate 	rnode4_t *rp;
5130Sstevel@tonic-gate 
5140Sstevel@tonic-gate 	ASSERT(dvp != NULL);
5150Sstevel@tonic-gate 
5160Sstevel@tonic-gate 	fh = badrootfh_check(fh, nm, mi, &had_badfh);
5170Sstevel@tonic-gate 
5180Sstevel@tonic-gate 	index = rtable4hash(fh);
5190Sstevel@tonic-gate 	rw_enter(&rtable4[index].r_lock, RW_READER);
5200Sstevel@tonic-gate 
5210Sstevel@tonic-gate 	/*
5220Sstevel@tonic-gate 	 * Note: make_rnode4() may upgrade the hash bucket lock to exclusive.
5230Sstevel@tonic-gate 	 */
5240Sstevel@tonic-gate 	vp = make_rnode4(fh, &rtable4[index], vfsp, nfs4_vnodeops,
5250Sstevel@tonic-gate 	    nfs4_putapage, &newnode, cr);
5260Sstevel@tonic-gate 
5270Sstevel@tonic-gate 	rp = VTOR4(vp);
5280Sstevel@tonic-gate 	sv_activate(&vp, dvp, &nm, newnode);
5290Sstevel@tonic-gate 	if (dvp->v_flag & V_XATTRDIR) {
5300Sstevel@tonic-gate 		mutex_enter(&rp->r_statelock);
5310Sstevel@tonic-gate 		rp->r_flags |= R4ISXATTR;
5320Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
5330Sstevel@tonic-gate 	}
5340Sstevel@tonic-gate 
5350Sstevel@tonic-gate 	/* if getting a bad file handle, do not cache the attributes. */
5360Sstevel@tonic-gate 	if (had_badfh) {
5370Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
5380Sstevel@tonic-gate 		return (vp);
5390Sstevel@tonic-gate 	}
5400Sstevel@tonic-gate 
5410Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&rtable4[index].r_lock));
5420Sstevel@tonic-gate 	r4_do_attrcache(vp, garp, newnode, t, cr, index);
5430Sstevel@tonic-gate 	ASSERT(rw_owner(&rtable4[index].r_lock) != curthread);
5440Sstevel@tonic-gate 
5450Sstevel@tonic-gate 	return (vp);
5460Sstevel@tonic-gate }
5470Sstevel@tonic-gate 
5480Sstevel@tonic-gate /*
5490Sstevel@tonic-gate  * Hash on address of filehandle object.
5500Sstevel@tonic-gate  * XXX totally untuned.
5510Sstevel@tonic-gate  */
5520Sstevel@tonic-gate 
5530Sstevel@tonic-gate int
rtable4hash(nfs4_sharedfh_t * fh)5540Sstevel@tonic-gate rtable4hash(nfs4_sharedfh_t *fh)
5550Sstevel@tonic-gate {
5560Sstevel@tonic-gate 	return (((uintptr_t)fh / sizeof (*fh)) & rtable4mask);
5570Sstevel@tonic-gate }
5580Sstevel@tonic-gate 
5590Sstevel@tonic-gate /*
5600Sstevel@tonic-gate  * Find or create the vnode for the given filehandle and filesystem.
5610Sstevel@tonic-gate  * *newnode is set to zero if the vnode already existed; non-zero if it had
5620Sstevel@tonic-gate  * to be created.
5630Sstevel@tonic-gate  *
5640Sstevel@tonic-gate  * Note: make_rnode4() may upgrade the hash bucket lock to exclusive.
5650Sstevel@tonic-gate  */
5660Sstevel@tonic-gate 
5670Sstevel@tonic-gate static vnode_t *
make_rnode4(nfs4_sharedfh_t * fh,r4hashq_t * rhtp,struct vfs * vfsp,struct vnodeops * vops,int (* putapage)(vnode_t *,page_t *,u_offset_t *,size_t *,int,cred_t *),int * newnode,cred_t * cr)5680Sstevel@tonic-gate make_rnode4(nfs4_sharedfh_t *fh, r4hashq_t *rhtp, struct vfs *vfsp,
5690Sstevel@tonic-gate     struct vnodeops *vops,
5700Sstevel@tonic-gate     int (*putapage)(vnode_t *, page_t *, u_offset_t *, size_t *, int, cred_t *),
5710Sstevel@tonic-gate     int *newnode, cred_t *cr)
5720Sstevel@tonic-gate {
5730Sstevel@tonic-gate 	rnode4_t *rp;
5740Sstevel@tonic-gate 	rnode4_t *trp;
5750Sstevel@tonic-gate 	vnode_t *vp;
5760Sstevel@tonic-gate 	mntinfo4_t *mi;
5770Sstevel@tonic-gate 
5780Sstevel@tonic-gate 	ASSERT(RW_READ_HELD(&rhtp->r_lock));
5790Sstevel@tonic-gate 
5800Sstevel@tonic-gate 	mi = VFTOMI4(vfsp);
5810Sstevel@tonic-gate 
5820Sstevel@tonic-gate start:
5830Sstevel@tonic-gate 	if ((rp = r4find(rhtp, fh, vfsp)) != NULL) {
5840Sstevel@tonic-gate 		vp = RTOV4(rp);
5850Sstevel@tonic-gate 		*newnode = 0;
5860Sstevel@tonic-gate 		return (vp);
5870Sstevel@tonic-gate 	}
5880Sstevel@tonic-gate 	rw_exit(&rhtp->r_lock);
5890Sstevel@tonic-gate 
5900Sstevel@tonic-gate 	mutex_enter(&rp4freelist_lock);
5910Sstevel@tonic-gate 
5920Sstevel@tonic-gate 	if (rp4freelist != NULL && rnode4_new >= nrnode) {
5930Sstevel@tonic-gate 		rp = rp4freelist;
5940Sstevel@tonic-gate 		rp4_rmfree(rp);
5950Sstevel@tonic-gate 		mutex_exit(&rp4freelist_lock);
5960Sstevel@tonic-gate 
5970Sstevel@tonic-gate 		vp = RTOV4(rp);
5980Sstevel@tonic-gate 
5990Sstevel@tonic-gate 		if (rp->r_flags & R4HASHED) {
6000Sstevel@tonic-gate 			rw_enter(&rp->r_hashq->r_lock, RW_WRITER);
6010Sstevel@tonic-gate 			mutex_enter(&vp->v_lock);
6020Sstevel@tonic-gate 			if (vp->v_count > 1) {
6030Sstevel@tonic-gate 				vp->v_count--;
6040Sstevel@tonic-gate 				mutex_exit(&vp->v_lock);
6050Sstevel@tonic-gate 				rw_exit(&rp->r_hashq->r_lock);
6060Sstevel@tonic-gate 				rw_enter(&rhtp->r_lock, RW_READER);
6070Sstevel@tonic-gate 				goto start;
6080Sstevel@tonic-gate 			}
6090Sstevel@tonic-gate 			mutex_exit(&vp->v_lock);
6100Sstevel@tonic-gate 			rp4_rmhash_locked(rp);
6110Sstevel@tonic-gate 			rw_exit(&rp->r_hashq->r_lock);
6120Sstevel@tonic-gate 		}
6130Sstevel@tonic-gate 
6140Sstevel@tonic-gate 		r4inactive(rp, cr);
6150Sstevel@tonic-gate 
6160Sstevel@tonic-gate 		mutex_enter(&vp->v_lock);
6170Sstevel@tonic-gate 		if (vp->v_count > 1) {
6180Sstevel@tonic-gate 			vp->v_count--;
6190Sstevel@tonic-gate 			mutex_exit(&vp->v_lock);
6200Sstevel@tonic-gate 			rw_enter(&rhtp->r_lock, RW_READER);
6210Sstevel@tonic-gate 			goto start;
6220Sstevel@tonic-gate 		}
6230Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
6240Sstevel@tonic-gate 		vn_invalid(vp);
6250Sstevel@tonic-gate 
6260Sstevel@tonic-gate 		/*
6270Sstevel@tonic-gate 		 * destroy old locks before bzero'ing and
6280Sstevel@tonic-gate 		 * recreating the locks below.
6290Sstevel@tonic-gate 		 */
6300Sstevel@tonic-gate 		uninit_rnode4(rp);
6310Sstevel@tonic-gate 
6320Sstevel@tonic-gate 		/*
6330Sstevel@tonic-gate 		 * Make sure that if rnode is recycled then
6340Sstevel@tonic-gate 		 * VFS count is decremented properly before
6350Sstevel@tonic-gate 		 * reuse.
6360Sstevel@tonic-gate 		 */
6370Sstevel@tonic-gate 		VFS_RELE(vp->v_vfsp);
6380Sstevel@tonic-gate 		vn_reinit(vp);
6390Sstevel@tonic-gate 	} else {
6400Sstevel@tonic-gate 		vnode_t *new_vp;
6410Sstevel@tonic-gate 
6420Sstevel@tonic-gate 		mutex_exit(&rp4freelist_lock);
6430Sstevel@tonic-gate 
6440Sstevel@tonic-gate 		rp = kmem_cache_alloc(rnode4_cache, KM_SLEEP);
6450Sstevel@tonic-gate 		new_vp = vn_alloc(KM_SLEEP);
6460Sstevel@tonic-gate 
6470Sstevel@tonic-gate 		atomic_add_long((ulong_t *)&rnode4_new, 1);
6480Sstevel@tonic-gate #ifdef DEBUG
6490Sstevel@tonic-gate 		clstat4_debug.nrnode.value.ui64++;
6500Sstevel@tonic-gate #endif
6510Sstevel@tonic-gate 		vp = new_vp;
6520Sstevel@tonic-gate 	}
6530Sstevel@tonic-gate 
6540Sstevel@tonic-gate 	bzero(rp, sizeof (*rp));
6550Sstevel@tonic-gate 	rp->r_vnode = vp;
6560Sstevel@tonic-gate 	nfs_rw_init(&rp->r_rwlock, NULL, RW_DEFAULT, NULL);
6570Sstevel@tonic-gate 	nfs_rw_init(&rp->r_lkserlock, NULL, RW_DEFAULT, NULL);
6580Sstevel@tonic-gate 	mutex_init(&rp->r_svlock, NULL, MUTEX_DEFAULT, NULL);
6590Sstevel@tonic-gate 	mutex_init(&rp->r_statelock, NULL, MUTEX_DEFAULT, NULL);
6600Sstevel@tonic-gate 	mutex_init(&rp->r_statev4_lock, NULL, MUTEX_DEFAULT, NULL);
6610Sstevel@tonic-gate 	mutex_init(&rp->r_os_lock, NULL, MUTEX_DEFAULT, NULL);
6620Sstevel@tonic-gate 	rp->created_v4 = 0;
6630Sstevel@tonic-gate 	list_create(&rp->r_open_streams, sizeof (nfs4_open_stream_t),
6640Sstevel@tonic-gate 	    offsetof(nfs4_open_stream_t, os_node));
6650Sstevel@tonic-gate 	rp->r_lo_head.lo_prev_rnode = &rp->r_lo_head;
6660Sstevel@tonic-gate 	rp->r_lo_head.lo_next_rnode = &rp->r_lo_head;
6670Sstevel@tonic-gate 	cv_init(&rp->r_cv, NULL, CV_DEFAULT, NULL);
6680Sstevel@tonic-gate 	cv_init(&rp->r_commit.c_cv, NULL, CV_DEFAULT, NULL);
6690Sstevel@tonic-gate 	rp->r_flags = R4READDIRWATTR;
6700Sstevel@tonic-gate 	rp->r_fh = fh;
6710Sstevel@tonic-gate 	rp->r_hashq = rhtp;
6720Sstevel@tonic-gate 	sfh4_hold(rp->r_fh);
6730Sstevel@tonic-gate 	rp->r_server = mi->mi_curr_serv;
6740Sstevel@tonic-gate 	rp->r_deleg_type = OPEN_DELEGATE_NONE;
6750Sstevel@tonic-gate 	rp->r_deleg_needs_recovery = OPEN_DELEGATE_NONE;
6760Sstevel@tonic-gate 	nfs_rw_init(&rp->r_deleg_recall_lock, NULL, RW_DEFAULT, NULL);
6770Sstevel@tonic-gate 
6780Sstevel@tonic-gate 	rddir4_cache_create(rp);
6790Sstevel@tonic-gate 	rp->r_putapage = putapage;
6800Sstevel@tonic-gate 	vn_setops(vp, vops);
6810Sstevel@tonic-gate 	vp->v_data = (caddr_t)rp;
6820Sstevel@tonic-gate 	vp->v_vfsp = vfsp;
6830Sstevel@tonic-gate 	VFS_HOLD(vfsp);
6840Sstevel@tonic-gate 	vp->v_type = VNON;
685*11888SPavel.Filipensky@Sun.COM 	vp->v_flag |= VMODSORT;
6860Sstevel@tonic-gate 	if (isrootfh(fh, rp))
6870Sstevel@tonic-gate 		vp->v_flag = VROOT;
6880Sstevel@tonic-gate 	vn_exists(vp);
6890Sstevel@tonic-gate 
6900Sstevel@tonic-gate 	/*
6910Sstevel@tonic-gate 	 * There is a race condition if someone else
6920Sstevel@tonic-gate 	 * alloc's the rnode while no locks are held, so we
6930Sstevel@tonic-gate 	 * check again and recover if found.
6940Sstevel@tonic-gate 	 */
6950Sstevel@tonic-gate 	rw_enter(&rhtp->r_lock, RW_WRITER);
6960Sstevel@tonic-gate 	if ((trp = r4find(rhtp, fh, vfsp)) != NULL) {
6970Sstevel@tonic-gate 		vp = RTOV4(trp);
6980Sstevel@tonic-gate 		*newnode = 0;
6990Sstevel@tonic-gate 		rw_exit(&rhtp->r_lock);
7000Sstevel@tonic-gate 		rp4_addfree(rp, cr);
7010Sstevel@tonic-gate 		rw_enter(&rhtp->r_lock, RW_READER);
7020Sstevel@tonic-gate 		return (vp);
7030Sstevel@tonic-gate 	}
7040Sstevel@tonic-gate 	rp4_addhash(rp);
7050Sstevel@tonic-gate 	*newnode = 1;
7060Sstevel@tonic-gate 	return (vp);
7070Sstevel@tonic-gate }
7080Sstevel@tonic-gate 
7090Sstevel@tonic-gate static void
uninit_rnode4(rnode4_t * rp)7100Sstevel@tonic-gate uninit_rnode4(rnode4_t *rp)
7110Sstevel@tonic-gate {
7120Sstevel@tonic-gate 	vnode_t *vp = RTOV4(rp);
7130Sstevel@tonic-gate 
7140Sstevel@tonic-gate 	ASSERT(rp != NULL);
7150Sstevel@tonic-gate 	ASSERT(vp != NULL);
7160Sstevel@tonic-gate 	ASSERT(vp->v_count == 1);
7170Sstevel@tonic-gate 	ASSERT(rp->r_count == 0);
7180Sstevel@tonic-gate 	ASSERT(rp->r_mapcnt == 0);
7190Sstevel@tonic-gate 	if (rp->r_flags & R4LODANGLERS) {
7200Sstevel@tonic-gate 		nfs4_flush_lock_owners(rp);
7210Sstevel@tonic-gate 	}
7220Sstevel@tonic-gate 	ASSERT(rp->r_lo_head.lo_next_rnode == &rp->r_lo_head);
7230Sstevel@tonic-gate 	ASSERT(rp->r_lo_head.lo_prev_rnode == &rp->r_lo_head);
7240Sstevel@tonic-gate 	ASSERT(!(rp->r_flags & R4HASHED));
7250Sstevel@tonic-gate 	ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL);
7260Sstevel@tonic-gate 	nfs4_clear_open_streams(rp);
7270Sstevel@tonic-gate 	list_destroy(&rp->r_open_streams);
7280Sstevel@tonic-gate 
7290Sstevel@tonic-gate 	/*
7300Sstevel@tonic-gate 	 * Destroy the rddir cache first since we need to grab the r_statelock.
7310Sstevel@tonic-gate 	 */
7320Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
7330Sstevel@tonic-gate 	rddir4_cache_destroy(rp);
7340Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
7350Sstevel@tonic-gate 	sv_uninit(&rp->r_svnode);
7360Sstevel@tonic-gate 	sfh4_rele(&rp->r_fh);
7370Sstevel@tonic-gate 	nfs_rw_destroy(&rp->r_rwlock);
7380Sstevel@tonic-gate 	nfs_rw_destroy(&rp->r_lkserlock);
7390Sstevel@tonic-gate 	mutex_destroy(&rp->r_statelock);
7400Sstevel@tonic-gate 	mutex_destroy(&rp->r_statev4_lock);
7410Sstevel@tonic-gate 	mutex_destroy(&rp->r_os_lock);
7420Sstevel@tonic-gate 	cv_destroy(&rp->r_cv);
7430Sstevel@tonic-gate 	cv_destroy(&rp->r_commit.c_cv);
7440Sstevel@tonic-gate 	nfs_rw_destroy(&rp->r_deleg_recall_lock);
7450Sstevel@tonic-gate 	if (rp->r_flags & R4DELMAPLIST)
7460Sstevel@tonic-gate 		list_destroy(&rp->r_indelmap);
7470Sstevel@tonic-gate }
7480Sstevel@tonic-gate 
7490Sstevel@tonic-gate /*
7500Sstevel@tonic-gate  * Put an rnode on the free list.
7510Sstevel@tonic-gate  *
7520Sstevel@tonic-gate  * Rnodes which were allocated above and beyond the normal limit
7530Sstevel@tonic-gate  * are immediately freed.
7540Sstevel@tonic-gate  */
7550Sstevel@tonic-gate void
rp4_addfree(rnode4_t * rp,cred_t * cr)7560Sstevel@tonic-gate rp4_addfree(rnode4_t *rp, cred_t *cr)
7570Sstevel@tonic-gate {
7580Sstevel@tonic-gate 	vnode_t *vp;
7590Sstevel@tonic-gate 	vnode_t *xattr;
7600Sstevel@tonic-gate 	struct vfs *vfsp;
7610Sstevel@tonic-gate 
7620Sstevel@tonic-gate 	vp = RTOV4(rp);
7630Sstevel@tonic-gate 	ASSERT(vp->v_count >= 1);
7640Sstevel@tonic-gate 	ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL);
7650Sstevel@tonic-gate 
7660Sstevel@tonic-gate 	/*
7670Sstevel@tonic-gate 	 * If we have too many rnodes allocated and there are no
7680Sstevel@tonic-gate 	 * references to this rnode, or if the rnode is no longer
7690Sstevel@tonic-gate 	 * accessible by it does not reside in the hash queues,
7700Sstevel@tonic-gate 	 * or if an i/o error occurred while writing to the file,
7710Sstevel@tonic-gate 	 * then just free it instead of putting it on the rnode
7720Sstevel@tonic-gate 	 * freelist.
7730Sstevel@tonic-gate 	 */
7740Sstevel@tonic-gate 	vfsp = vp->v_vfsp;
7750Sstevel@tonic-gate 	if (((rnode4_new > nrnode || !(rp->r_flags & R4HASHED) ||
7760Sstevel@tonic-gate #ifdef DEBUG
7770Sstevel@tonic-gate 	    (nfs4_rnode_nofreelist != 0) ||
7780Sstevel@tonic-gate #endif
7790Sstevel@tonic-gate 	    rp->r_error || (rp->r_flags & R4RECOVERR) ||
7800Sstevel@tonic-gate 	    (vfsp->vfs_flag & VFS_UNMOUNTED)) && rp->r_count == 0)) {
7810Sstevel@tonic-gate 		if (rp->r_flags & R4HASHED) {
7820Sstevel@tonic-gate 			rw_enter(&rp->r_hashq->r_lock, RW_WRITER);
7830Sstevel@tonic-gate 			mutex_enter(&vp->v_lock);
7840Sstevel@tonic-gate 			if (vp->v_count > 1) {
7850Sstevel@tonic-gate 				vp->v_count--;
7860Sstevel@tonic-gate 				mutex_exit(&vp->v_lock);
7870Sstevel@tonic-gate 				rw_exit(&rp->r_hashq->r_lock);
7880Sstevel@tonic-gate 				return;
7890Sstevel@tonic-gate 			}
7900Sstevel@tonic-gate 			mutex_exit(&vp->v_lock);
7910Sstevel@tonic-gate 			rp4_rmhash_locked(rp);
7920Sstevel@tonic-gate 			rw_exit(&rp->r_hashq->r_lock);
7930Sstevel@tonic-gate 		}
7940Sstevel@tonic-gate 
7950Sstevel@tonic-gate 		/*
7960Sstevel@tonic-gate 		 * Make sure we don't have a delegation on this rnode
7970Sstevel@tonic-gate 		 * before destroying it.
7980Sstevel@tonic-gate 		 */
7990Sstevel@tonic-gate 		if (rp->r_deleg_type != OPEN_DELEGATE_NONE) {
8000Sstevel@tonic-gate 			(void) nfs4delegreturn(rp,
8015302Sth199096 			    NFS4_DR_FORCE|NFS4_DR_PUSH|NFS4_DR_REOPEN);
8020Sstevel@tonic-gate 		}
8030Sstevel@tonic-gate 
8040Sstevel@tonic-gate 		r4inactive(rp, cr);
8050Sstevel@tonic-gate 
8060Sstevel@tonic-gate 		/*
8070Sstevel@tonic-gate 		 * Recheck the vnode reference count.  We need to
8080Sstevel@tonic-gate 		 * make sure that another reference has not been
8090Sstevel@tonic-gate 		 * acquired while we were not holding v_lock.  The
8100Sstevel@tonic-gate 		 * rnode is not in the rnode hash queues; one
8110Sstevel@tonic-gate 		 * way for a reference to have been acquired
8120Sstevel@tonic-gate 		 * is for a VOP_PUTPAGE because the rnode was marked
8130Sstevel@tonic-gate 		 * with R4DIRTY or for a modified page.  This
8140Sstevel@tonic-gate 		 * reference may have been acquired before our call
8150Sstevel@tonic-gate 		 * to r4inactive.  The i/o may have been completed,
8160Sstevel@tonic-gate 		 * thus allowing r4inactive to complete, but the
8170Sstevel@tonic-gate 		 * reference to the vnode may not have been released
8180Sstevel@tonic-gate 		 * yet.  In any case, the rnode can not be destroyed
8190Sstevel@tonic-gate 		 * until the other references to this vnode have been
8200Sstevel@tonic-gate 		 * released.  The other references will take care of
8210Sstevel@tonic-gate 		 * either destroying the rnode or placing it on the
8220Sstevel@tonic-gate 		 * rnode freelist.  If there are no other references,
8230Sstevel@tonic-gate 		 * then the rnode may be safely destroyed.
8240Sstevel@tonic-gate 		 */
8250Sstevel@tonic-gate 		mutex_enter(&vp->v_lock);
8260Sstevel@tonic-gate 		if (vp->v_count > 1) {
8270Sstevel@tonic-gate 			vp->v_count--;
8280Sstevel@tonic-gate 			mutex_exit(&vp->v_lock);
8290Sstevel@tonic-gate 			return;
8300Sstevel@tonic-gate 		}
8310Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
8320Sstevel@tonic-gate 
8330Sstevel@tonic-gate 		destroy_rnode4(rp);
8340Sstevel@tonic-gate 		return;
8350Sstevel@tonic-gate 	}
8360Sstevel@tonic-gate 
8370Sstevel@tonic-gate 	/*
8380Sstevel@tonic-gate 	 * Lock the hash queue and then recheck the reference count
8390Sstevel@tonic-gate 	 * to ensure that no other threads have acquired a reference
8400Sstevel@tonic-gate 	 * to indicate that the rnode should not be placed on the
8410Sstevel@tonic-gate 	 * freelist.  If another reference has been acquired, then
8420Sstevel@tonic-gate 	 * just release this one and let the other thread complete
8430Sstevel@tonic-gate 	 * the processing of adding this rnode to the freelist.
8440Sstevel@tonic-gate 	 */
8450Sstevel@tonic-gate again:
8460Sstevel@tonic-gate 	rw_enter(&rp->r_hashq->r_lock, RW_WRITER);
8470Sstevel@tonic-gate 
8480Sstevel@tonic-gate 	mutex_enter(&vp->v_lock);
8490Sstevel@tonic-gate 	if (vp->v_count > 1) {
8500Sstevel@tonic-gate 		vp->v_count--;
8510Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
8520Sstevel@tonic-gate 		rw_exit(&rp->r_hashq->r_lock);
8530Sstevel@tonic-gate 		return;
8540Sstevel@tonic-gate 	}
8550Sstevel@tonic-gate 	mutex_exit(&vp->v_lock);
8560Sstevel@tonic-gate 
8570Sstevel@tonic-gate 	/*
8580Sstevel@tonic-gate 	 * Make sure we don't put an rnode with a delegation
8590Sstevel@tonic-gate 	 * on the free list.
8600Sstevel@tonic-gate 	 */
8610Sstevel@tonic-gate 	if (rp->r_deleg_type != OPEN_DELEGATE_NONE) {
8620Sstevel@tonic-gate 		rw_exit(&rp->r_hashq->r_lock);
8630Sstevel@tonic-gate 		(void) nfs4delegreturn(rp,
8645302Sth199096 		    NFS4_DR_FORCE|NFS4_DR_PUSH|NFS4_DR_REOPEN);
8650Sstevel@tonic-gate 		goto again;
8660Sstevel@tonic-gate 	}
8670Sstevel@tonic-gate 
8680Sstevel@tonic-gate 	/*
8690Sstevel@tonic-gate 	 * Now that we have the hash queue lock, and we know there
8700Sstevel@tonic-gate 	 * are not anymore references on the vnode, check to make
8710Sstevel@tonic-gate 	 * sure there aren't any open streams still on the rnode.
8720Sstevel@tonic-gate 	 * If so, drop the hash queue lock, remove the open streams,
8730Sstevel@tonic-gate 	 * and recheck the v_count.
8740Sstevel@tonic-gate 	 */
8750Sstevel@tonic-gate 	mutex_enter(&rp->r_os_lock);
8760Sstevel@tonic-gate 	if (list_head(&rp->r_open_streams) != NULL) {
8770Sstevel@tonic-gate 		mutex_exit(&rp->r_os_lock);
8780Sstevel@tonic-gate 		rw_exit(&rp->r_hashq->r_lock);
879766Scarlsonj 		if (nfs_zone() != VTOMI4(vp)->mi_zone)
8800Sstevel@tonic-gate 			nfs4_clear_open_streams(rp);
8810Sstevel@tonic-gate 		else
8820Sstevel@tonic-gate 			(void) nfs4close_all(vp, cr);
8830Sstevel@tonic-gate 		goto again;
8840Sstevel@tonic-gate 	}
8850Sstevel@tonic-gate 	mutex_exit(&rp->r_os_lock);
8860Sstevel@tonic-gate 
8870Sstevel@tonic-gate 	/*
8882265Sjwahlig 	 * Before we put it on the freelist, make sure there are no pages.
8892265Sjwahlig 	 * If there are, flush and commit of all of the dirty and
8902265Sjwahlig 	 * uncommitted pages, assuming the file system isn't read only.
8912265Sjwahlig 	 */
8922265Sjwahlig 	if (!(vp->v_vfsp->vfs_flag & VFS_RDONLY) && nfs4_dross_pages(vp)) {
8932265Sjwahlig 		rw_exit(&rp->r_hashq->r_lock);
8942265Sjwahlig 		r4flushpages(rp, cr);
8952265Sjwahlig 		goto again;
8962265Sjwahlig 	}
8972265Sjwahlig 
8982265Sjwahlig 	/*
8990Sstevel@tonic-gate 	 * Before we put it on the freelist, make sure there is no
9000Sstevel@tonic-gate 	 * active xattr directory cached, the freelist will not
9010Sstevel@tonic-gate 	 * have its entries r4inactive'd if there is still an active
9020Sstevel@tonic-gate 	 * rnode, thus nothing in the freelist can hold another
9030Sstevel@tonic-gate 	 * rnode active.
9040Sstevel@tonic-gate 	 */
9050Sstevel@tonic-gate 	xattr = rp->r_xattr_dir;
9060Sstevel@tonic-gate 	rp->r_xattr_dir = NULL;
9070Sstevel@tonic-gate 
9080Sstevel@tonic-gate 	/*
9090Sstevel@tonic-gate 	 * If there is no cached data or metadata for this file, then
9100Sstevel@tonic-gate 	 * put the rnode on the front of the freelist so that it will
9110Sstevel@tonic-gate 	 * be reused before other rnodes which may have cached data or
9120Sstevel@tonic-gate 	 * metadata associated with them.
9130Sstevel@tonic-gate 	 */
9140Sstevel@tonic-gate 	mutex_enter(&rp4freelist_lock);
9150Sstevel@tonic-gate 	if (rp4freelist == NULL) {
9160Sstevel@tonic-gate 		rp->r_freef = rp;
9170Sstevel@tonic-gate 		rp->r_freeb = rp;
9180Sstevel@tonic-gate 		rp4freelist = rp;
9190Sstevel@tonic-gate 	} else {
9200Sstevel@tonic-gate 		rp->r_freef = rp4freelist;
9210Sstevel@tonic-gate 		rp->r_freeb = rp4freelist->r_freeb;
9220Sstevel@tonic-gate 		rp4freelist->r_freeb->r_freef = rp;
9230Sstevel@tonic-gate 		rp4freelist->r_freeb = rp;
9240Sstevel@tonic-gate 		if (!nfs4_has_pages(vp) && rp->r_dir == NULL &&
9252265Sjwahlig 		    rp->r_symlink.contents == NULL && rp->r_secattr == NULL)
9260Sstevel@tonic-gate 			rp4freelist = rp;
9270Sstevel@tonic-gate 	}
9280Sstevel@tonic-gate 	mutex_exit(&rp4freelist_lock);
9290Sstevel@tonic-gate 
9300Sstevel@tonic-gate 	rw_exit(&rp->r_hashq->r_lock);
9310Sstevel@tonic-gate 
9320Sstevel@tonic-gate 	if (xattr)
9330Sstevel@tonic-gate 		VN_RELE(xattr);
9340Sstevel@tonic-gate }
9350Sstevel@tonic-gate 
9360Sstevel@tonic-gate /*
9370Sstevel@tonic-gate  * Remove an rnode from the free list.
9380Sstevel@tonic-gate  *
9390Sstevel@tonic-gate  * The caller must be holding rp4freelist_lock and the rnode
9400Sstevel@tonic-gate  * must be on the freelist.
9410Sstevel@tonic-gate  */
9420Sstevel@tonic-gate static void
rp4_rmfree(rnode4_t * rp)9430Sstevel@tonic-gate rp4_rmfree(rnode4_t *rp)
9440Sstevel@tonic-gate {
9450Sstevel@tonic-gate 
9460Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&rp4freelist_lock));
9470Sstevel@tonic-gate 	ASSERT(rp->r_freef != NULL && rp->r_freeb != NULL);
9480Sstevel@tonic-gate 
9490Sstevel@tonic-gate 	if (rp == rp4freelist) {
9500Sstevel@tonic-gate 		rp4freelist = rp->r_freef;
9510Sstevel@tonic-gate 		if (rp == rp4freelist)
9520Sstevel@tonic-gate 			rp4freelist = NULL;
9530Sstevel@tonic-gate 	}
9540Sstevel@tonic-gate 	rp->r_freeb->r_freef = rp->r_freef;
9550Sstevel@tonic-gate 	rp->r_freef->r_freeb = rp->r_freeb;
9560Sstevel@tonic-gate 
9570Sstevel@tonic-gate 	rp->r_freef = rp->r_freeb = NULL;
9580Sstevel@tonic-gate }
9590Sstevel@tonic-gate 
9600Sstevel@tonic-gate /*
9610Sstevel@tonic-gate  * Put a rnode in the hash table.
9620Sstevel@tonic-gate  *
9630Sstevel@tonic-gate  * The caller must be holding the exclusive hash queue lock
9640Sstevel@tonic-gate  */
9650Sstevel@tonic-gate void
rp4_addhash(rnode4_t * rp)9660Sstevel@tonic-gate rp4_addhash(rnode4_t *rp)
9670Sstevel@tonic-gate {
9680Sstevel@tonic-gate 	ASSERT(RW_WRITE_HELD(&rp->r_hashq->r_lock));
9690Sstevel@tonic-gate 	ASSERT(!(rp->r_flags & R4HASHED));
9700Sstevel@tonic-gate 
9710Sstevel@tonic-gate #ifdef DEBUG
9720Sstevel@tonic-gate 	r4_dup_check(rp, RTOV4(rp)->v_vfsp);
9730Sstevel@tonic-gate #endif
9740Sstevel@tonic-gate 
9750Sstevel@tonic-gate 	rp->r_hashf = rp->r_hashq->r_hashf;
9760Sstevel@tonic-gate 	rp->r_hashq->r_hashf = rp;
9770Sstevel@tonic-gate 	rp->r_hashb = (rnode4_t *)rp->r_hashq;
9780Sstevel@tonic-gate 	rp->r_hashf->r_hashb = rp;
9790Sstevel@tonic-gate 
9800Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
9810Sstevel@tonic-gate 	rp->r_flags |= R4HASHED;
9820Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
9830Sstevel@tonic-gate }
9840Sstevel@tonic-gate 
9850Sstevel@tonic-gate /*
9860Sstevel@tonic-gate  * Remove a rnode from the hash table.
9870Sstevel@tonic-gate  *
9880Sstevel@tonic-gate  * The caller must be holding the hash queue lock.
9890Sstevel@tonic-gate  */
9900Sstevel@tonic-gate void
rp4_rmhash_locked(rnode4_t * rp)9910Sstevel@tonic-gate rp4_rmhash_locked(rnode4_t *rp)
9920Sstevel@tonic-gate {
9930Sstevel@tonic-gate 	ASSERT(RW_WRITE_HELD(&rp->r_hashq->r_lock));
9940Sstevel@tonic-gate 	ASSERT(rp->r_flags & R4HASHED);
9950Sstevel@tonic-gate 
9960Sstevel@tonic-gate 	rp->r_hashb->r_hashf = rp->r_hashf;
9970Sstevel@tonic-gate 	rp->r_hashf->r_hashb = rp->r_hashb;
9980Sstevel@tonic-gate 
9990Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
10000Sstevel@tonic-gate 	rp->r_flags &= ~R4HASHED;
10010Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
10020Sstevel@tonic-gate }
10030Sstevel@tonic-gate 
10040Sstevel@tonic-gate /*
10050Sstevel@tonic-gate  * Remove a rnode from the hash table.
10060Sstevel@tonic-gate  *
10070Sstevel@tonic-gate  * The caller must not be holding the hash queue lock.
10080Sstevel@tonic-gate  */
10090Sstevel@tonic-gate void
rp4_rmhash(rnode4_t * rp)10100Sstevel@tonic-gate rp4_rmhash(rnode4_t *rp)
10110Sstevel@tonic-gate {
10120Sstevel@tonic-gate 	rw_enter(&rp->r_hashq->r_lock, RW_WRITER);
10130Sstevel@tonic-gate 	rp4_rmhash_locked(rp);
10140Sstevel@tonic-gate 	rw_exit(&rp->r_hashq->r_lock);
10150Sstevel@tonic-gate }
10160Sstevel@tonic-gate 
10170Sstevel@tonic-gate /*
10180Sstevel@tonic-gate  * Lookup a rnode by fhandle.  Ignores rnodes that had failed recovery.
10190Sstevel@tonic-gate  * Returns NULL if no match.  If an rnode is returned, the reference count
10200Sstevel@tonic-gate  * on the master vnode is incremented.
10210Sstevel@tonic-gate  *
10220Sstevel@tonic-gate  * The caller must be holding the hash queue lock, either shared or exclusive.
10230Sstevel@tonic-gate  */
10240Sstevel@tonic-gate rnode4_t *
r4find(r4hashq_t * rhtp,nfs4_sharedfh_t * fh,struct vfs * vfsp)10250Sstevel@tonic-gate r4find(r4hashq_t *rhtp, nfs4_sharedfh_t *fh, struct vfs *vfsp)
10260Sstevel@tonic-gate {
10270Sstevel@tonic-gate 	rnode4_t *rp;
10280Sstevel@tonic-gate 	vnode_t *vp;
10290Sstevel@tonic-gate 
10300Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&rhtp->r_lock));
10310Sstevel@tonic-gate 
10320Sstevel@tonic-gate 	for (rp = rhtp->r_hashf; rp != (rnode4_t *)rhtp; rp = rp->r_hashf) {
10330Sstevel@tonic-gate 		vp = RTOV4(rp);
10340Sstevel@tonic-gate 		if (vp->v_vfsp == vfsp && SFH4_SAME(rp->r_fh, fh)) {
10350Sstevel@tonic-gate 
10360Sstevel@tonic-gate 			mutex_enter(&rp->r_statelock);
10370Sstevel@tonic-gate 			if (rp->r_flags & R4RECOVERR) {
10380Sstevel@tonic-gate 				mutex_exit(&rp->r_statelock);
10390Sstevel@tonic-gate 				continue;
10400Sstevel@tonic-gate 			}
10410Sstevel@tonic-gate 			mutex_exit(&rp->r_statelock);
10420Sstevel@tonic-gate #ifdef DEBUG
10430Sstevel@tonic-gate 			r4_dup_check(rp, vfsp);
10440Sstevel@tonic-gate #endif
10450Sstevel@tonic-gate 			if (rp->r_freef != NULL) {
10460Sstevel@tonic-gate 				mutex_enter(&rp4freelist_lock);
10470Sstevel@tonic-gate 				/*
10480Sstevel@tonic-gate 				 * If the rnode is on the freelist,
10490Sstevel@tonic-gate 				 * then remove it and use that reference
10500Sstevel@tonic-gate 				 * as the new reference.  Otherwise,
10510Sstevel@tonic-gate 				 * need to increment the reference count.
10520Sstevel@tonic-gate 				 */
10530Sstevel@tonic-gate 				if (rp->r_freef != NULL) {
10540Sstevel@tonic-gate 					rp4_rmfree(rp);
10550Sstevel@tonic-gate 					mutex_exit(&rp4freelist_lock);
10560Sstevel@tonic-gate 				} else {
10570Sstevel@tonic-gate 					mutex_exit(&rp4freelist_lock);
10580Sstevel@tonic-gate 					VN_HOLD(vp);
10590Sstevel@tonic-gate 				}
10600Sstevel@tonic-gate 			} else
10610Sstevel@tonic-gate 				VN_HOLD(vp);
10620Sstevel@tonic-gate 
10630Sstevel@tonic-gate 			/*
10640Sstevel@tonic-gate 			 * if root vnode, set v_flag to indicate that
10650Sstevel@tonic-gate 			 */
10660Sstevel@tonic-gate 			if (isrootfh(fh, rp)) {
10670Sstevel@tonic-gate 				if (!(vp->v_flag & VROOT)) {
10680Sstevel@tonic-gate 					mutex_enter(&vp->v_lock);
10690Sstevel@tonic-gate 					vp->v_flag |= VROOT;
10700Sstevel@tonic-gate 					mutex_exit(&vp->v_lock);
10710Sstevel@tonic-gate 				}
10720Sstevel@tonic-gate 			}
10730Sstevel@tonic-gate 			return (rp);
10740Sstevel@tonic-gate 		}
10750Sstevel@tonic-gate 	}
10760Sstevel@tonic-gate 	return (NULL);
10770Sstevel@tonic-gate }
10780Sstevel@tonic-gate 
10790Sstevel@tonic-gate /*
10800Sstevel@tonic-gate  * Lookup an rnode by fhandle. Just a wrapper for r4find()
10810Sstevel@tonic-gate  * that assumes the caller hasn't already got the lock
10820Sstevel@tonic-gate  * on the hash bucket.
10830Sstevel@tonic-gate  */
10840Sstevel@tonic-gate rnode4_t *
r4find_unlocked(nfs4_sharedfh_t * fh,struct vfs * vfsp)10850Sstevel@tonic-gate r4find_unlocked(nfs4_sharedfh_t *fh, struct vfs *vfsp)
10860Sstevel@tonic-gate {
10870Sstevel@tonic-gate 	rnode4_t *rp;
10880Sstevel@tonic-gate 	int index;
10890Sstevel@tonic-gate 
10900Sstevel@tonic-gate 	index = rtable4hash(fh);
10910Sstevel@tonic-gate 	rw_enter(&rtable4[index].r_lock, RW_READER);
10920Sstevel@tonic-gate 	rp = r4find(&rtable4[index], fh, vfsp);
10930Sstevel@tonic-gate 	rw_exit(&rtable4[index].r_lock);
10940Sstevel@tonic-gate 
10950Sstevel@tonic-gate 	return (rp);
10960Sstevel@tonic-gate }
10970Sstevel@tonic-gate 
10980Sstevel@tonic-gate /*
109910368SThomas.Haynes@Sun.COM  * Return >0 if there is a active vnode belonging to this vfs in the
11000Sstevel@tonic-gate  * rtable4 cache.
11010Sstevel@tonic-gate  *
11020Sstevel@tonic-gate  * Several of these checks are done without holding the usual
11030Sstevel@tonic-gate  * locks.  This is safe because destroy_rtable(), rp_addfree(),
11040Sstevel@tonic-gate  * etc. will redo the necessary checks before actually destroying
11050Sstevel@tonic-gate  * any rnodes.
11060Sstevel@tonic-gate  */
11070Sstevel@tonic-gate int
check_rtable4(struct vfs * vfsp)11080Sstevel@tonic-gate check_rtable4(struct vfs *vfsp)
11090Sstevel@tonic-gate {
11100Sstevel@tonic-gate 	rnode4_t *rp;
11110Sstevel@tonic-gate 	vnode_t *vp;
111210368SThomas.Haynes@Sun.COM 	int busy = NFSV4_RTABLE4_OK;
11130Sstevel@tonic-gate 	int index;
11140Sstevel@tonic-gate 
11150Sstevel@tonic-gate 	for (index = 0; index < rtable4size; index++) {
11160Sstevel@tonic-gate 		rw_enter(&rtable4[index].r_lock, RW_READER);
11170Sstevel@tonic-gate 
11180Sstevel@tonic-gate 		for (rp = rtable4[index].r_hashf;
11190Sstevel@tonic-gate 		    rp != (rnode4_t *)(&rtable4[index]);
11200Sstevel@tonic-gate 		    rp = rp->r_hashf) {
11210Sstevel@tonic-gate 
11220Sstevel@tonic-gate 			vp = RTOV4(rp);
11230Sstevel@tonic-gate 			if (vp->v_vfsp == vfsp) {
11240Sstevel@tonic-gate 				if (rp->r_freef == NULL) {
112510368SThomas.Haynes@Sun.COM 					busy = NFSV4_RTABLE4_NOT_FREE_LIST;
11260Sstevel@tonic-gate 				} else if (nfs4_has_pages(vp) &&
11275302Sth199096 				    (rp->r_flags & R4DIRTY)) {
112810368SThomas.Haynes@Sun.COM 					busy = NFSV4_RTABLE4_DIRTY_PAGES;
11290Sstevel@tonic-gate 				} else if (rp->r_count > 0) {
113010368SThomas.Haynes@Sun.COM 					busy = NFSV4_RTABLE4_POS_R_COUNT;
11310Sstevel@tonic-gate 				}
11320Sstevel@tonic-gate 
113310368SThomas.Haynes@Sun.COM 				if (busy != NFSV4_RTABLE4_OK) {
11340Sstevel@tonic-gate #ifdef DEBUG
11350Sstevel@tonic-gate 					char *path;
11360Sstevel@tonic-gate 
11370Sstevel@tonic-gate 					path = fn_path(rp->r_svnode.sv_name);
113810368SThomas.Haynes@Sun.COM 					DTRACE_NFSV4_3(rnode__e__debug,
113910368SThomas.Haynes@Sun.COM 					    int, busy, char *, path,
114010368SThomas.Haynes@Sun.COM 					    rnode4_t *, rp);
11410Sstevel@tonic-gate 					kmem_free(path, strlen(path)+1);
11420Sstevel@tonic-gate #endif
11430Sstevel@tonic-gate 					rw_exit(&rtable4[index].r_lock);
114410368SThomas.Haynes@Sun.COM 					return (busy);
11450Sstevel@tonic-gate 				}
11460Sstevel@tonic-gate 			}
11470Sstevel@tonic-gate 		}
11480Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
11490Sstevel@tonic-gate 	}
115010368SThomas.Haynes@Sun.COM 	return (busy);
11510Sstevel@tonic-gate }
11520Sstevel@tonic-gate 
11530Sstevel@tonic-gate /*
11540Sstevel@tonic-gate  * Destroy inactive vnodes from the hash queues which
11550Sstevel@tonic-gate  * belong to this vfs. All of the vnodes should be inactive.
11565302Sth199096  * It is essential that we destroy all rnodes in case of
11570Sstevel@tonic-gate  * forced unmount as well as in normal unmount case.
11580Sstevel@tonic-gate  */
11590Sstevel@tonic-gate 
11600Sstevel@tonic-gate void
destroy_rtable4(struct vfs * vfsp,cred_t * cr)11610Sstevel@tonic-gate destroy_rtable4(struct vfs *vfsp, cred_t *cr)
11620Sstevel@tonic-gate {
11630Sstevel@tonic-gate 	int index;
11640Sstevel@tonic-gate 	vnode_t *vp;
11650Sstevel@tonic-gate 	rnode4_t *rp, *r_hashf, *rlist;
11660Sstevel@tonic-gate 
11670Sstevel@tonic-gate 	rlist = NULL;
11680Sstevel@tonic-gate 
11690Sstevel@tonic-gate 	for (index = 0; index < rtable4size; index++) {
11700Sstevel@tonic-gate 		rw_enter(&rtable4[index].r_lock, RW_WRITER);
11710Sstevel@tonic-gate 		for (rp = rtable4[index].r_hashf;
11720Sstevel@tonic-gate 		    rp != (rnode4_t *)(&rtable4[index]);
11730Sstevel@tonic-gate 		    rp = r_hashf) {
11740Sstevel@tonic-gate 			/* save the hash pointer before destroying */
11750Sstevel@tonic-gate 			r_hashf = rp->r_hashf;
11760Sstevel@tonic-gate 
11770Sstevel@tonic-gate 			vp = RTOV4(rp);
11780Sstevel@tonic-gate 			if (vp->v_vfsp == vfsp) {
11790Sstevel@tonic-gate 				mutex_enter(&rp4freelist_lock);
11800Sstevel@tonic-gate 				if (rp->r_freef != NULL) {
11810Sstevel@tonic-gate 					rp4_rmfree(rp);
11820Sstevel@tonic-gate 					mutex_exit(&rp4freelist_lock);
11830Sstevel@tonic-gate 					rp4_rmhash_locked(rp);
11840Sstevel@tonic-gate 					rp->r_hashf = rlist;
11850Sstevel@tonic-gate 					rlist = rp;
11860Sstevel@tonic-gate 				} else
11870Sstevel@tonic-gate 					mutex_exit(&rp4freelist_lock);
11880Sstevel@tonic-gate 			}
11890Sstevel@tonic-gate 		}
11900Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
11910Sstevel@tonic-gate 	}
11920Sstevel@tonic-gate 
11930Sstevel@tonic-gate 	for (rp = rlist; rp != NULL; rp = r_hashf) {
11940Sstevel@tonic-gate 		r_hashf = rp->r_hashf;
11950Sstevel@tonic-gate 		/*
11960Sstevel@tonic-gate 		 * This call to rp4_addfree will end up destroying the
11970Sstevel@tonic-gate 		 * rnode, but in a safe way with the appropriate set
11980Sstevel@tonic-gate 		 * of checks done.
11990Sstevel@tonic-gate 		 */
12000Sstevel@tonic-gate 		rp4_addfree(rp, cr);
12010Sstevel@tonic-gate 	}
12020Sstevel@tonic-gate }
12030Sstevel@tonic-gate 
12040Sstevel@tonic-gate /*
12050Sstevel@tonic-gate  * This routine destroys all the resources of an rnode
12060Sstevel@tonic-gate  * and finally the rnode itself.
12070Sstevel@tonic-gate  */
12080Sstevel@tonic-gate static void
destroy_rnode4(rnode4_t * rp)12090Sstevel@tonic-gate destroy_rnode4(rnode4_t *rp)
12100Sstevel@tonic-gate {
12110Sstevel@tonic-gate 	vnode_t *vp;
12120Sstevel@tonic-gate 	vfs_t *vfsp;
12130Sstevel@tonic-gate 
12140Sstevel@tonic-gate 	ASSERT(rp->r_deleg_type == OPEN_DELEGATE_NONE);
12150Sstevel@tonic-gate 
12160Sstevel@tonic-gate 	vp = RTOV4(rp);
12170Sstevel@tonic-gate 	vfsp = vp->v_vfsp;
12180Sstevel@tonic-gate 
12190Sstevel@tonic-gate 	uninit_rnode4(rp);
12200Sstevel@tonic-gate 	atomic_add_long((ulong_t *)&rnode4_new, -1);
12210Sstevel@tonic-gate #ifdef DEBUG
12220Sstevel@tonic-gate 	clstat4_debug.nrnode.value.ui64--;
12230Sstevel@tonic-gate #endif
12240Sstevel@tonic-gate 	kmem_cache_free(rnode4_cache, rp);
12250Sstevel@tonic-gate 	vn_invalid(vp);
12260Sstevel@tonic-gate 	vn_free(vp);
12270Sstevel@tonic-gate 	VFS_RELE(vfsp);
12280Sstevel@tonic-gate }
12290Sstevel@tonic-gate 
12300Sstevel@tonic-gate /*
12310Sstevel@tonic-gate  * Invalidate the attributes on all rnodes forcing the next getattr
12320Sstevel@tonic-gate  * to go over the wire.  Used to flush stale uid and gid mappings.
12330Sstevel@tonic-gate  * Maybe done on a per vfsp, or all rnodes (vfsp == NULL)
12340Sstevel@tonic-gate  */
12350Sstevel@tonic-gate void
nfs4_rnode_invalidate(struct vfs * vfsp)12360Sstevel@tonic-gate nfs4_rnode_invalidate(struct vfs *vfsp)
12370Sstevel@tonic-gate {
12380Sstevel@tonic-gate 	int index;
12390Sstevel@tonic-gate 	rnode4_t *rp;
12400Sstevel@tonic-gate 	vnode_t *vp;
12410Sstevel@tonic-gate 
12420Sstevel@tonic-gate 	/*
12430Sstevel@tonic-gate 	 * Walk the hash queues looking for rnodes.
12440Sstevel@tonic-gate 	 */
12450Sstevel@tonic-gate 	for (index = 0; index < rtable4size; index++) {
12460Sstevel@tonic-gate 		rw_enter(&rtable4[index].r_lock, RW_READER);
12470Sstevel@tonic-gate 		for (rp = rtable4[index].r_hashf;
12480Sstevel@tonic-gate 		    rp != (rnode4_t *)(&rtable4[index]);
12490Sstevel@tonic-gate 		    rp = rp->r_hashf) {
12500Sstevel@tonic-gate 			vp = RTOV4(rp);
12510Sstevel@tonic-gate 			if (vfsp != NULL && vp->v_vfsp != vfsp)
12520Sstevel@tonic-gate 				continue;
12530Sstevel@tonic-gate 
12540Sstevel@tonic-gate 			if (!mutex_tryenter(&rp->r_statelock))
12550Sstevel@tonic-gate 				continue;
12560Sstevel@tonic-gate 
12570Sstevel@tonic-gate 			/*
12580Sstevel@tonic-gate 			 * Expire the attributes by resetting the change
12590Sstevel@tonic-gate 			 * and attr timeout.
12600Sstevel@tonic-gate 			 */
12610Sstevel@tonic-gate 			rp->r_change = 0;
12620Sstevel@tonic-gate 			PURGE_ATTRCACHE4_LOCKED(rp);
12630Sstevel@tonic-gate 			mutex_exit(&rp->r_statelock);
12640Sstevel@tonic-gate 		}
12650Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
12660Sstevel@tonic-gate 	}
12670Sstevel@tonic-gate }
12680Sstevel@tonic-gate 
12690Sstevel@tonic-gate /*
12700Sstevel@tonic-gate  * Flush all vnodes in this (or every) vfs.
12710Sstevel@tonic-gate  * Used by nfs_sync and by nfs_unmount.
12720Sstevel@tonic-gate  */
12730Sstevel@tonic-gate void
r4flush(struct vfs * vfsp,cred_t * cr)12740Sstevel@tonic-gate r4flush(struct vfs *vfsp, cred_t *cr)
12750Sstevel@tonic-gate {
12760Sstevel@tonic-gate 	int index;
12770Sstevel@tonic-gate 	rnode4_t *rp;
12780Sstevel@tonic-gate 	vnode_t *vp, **vplist;
12790Sstevel@tonic-gate 	long num, cnt;
12800Sstevel@tonic-gate 
12810Sstevel@tonic-gate 	/*
12820Sstevel@tonic-gate 	 * Check to see whether there is anything to do.
12830Sstevel@tonic-gate 	 */
12840Sstevel@tonic-gate 	num = rnode4_new;
12850Sstevel@tonic-gate 	if (num == 0)
12860Sstevel@tonic-gate 		return;
12870Sstevel@tonic-gate 
12880Sstevel@tonic-gate 	/*
12890Sstevel@tonic-gate 	 * Allocate a slot for all currently active rnodes on the
12900Sstevel@tonic-gate 	 * supposition that they all may need flushing.
12910Sstevel@tonic-gate 	 */
12920Sstevel@tonic-gate 	vplist = kmem_alloc(num * sizeof (*vplist), KM_SLEEP);
12930Sstevel@tonic-gate 	cnt = 0;
12940Sstevel@tonic-gate 
12950Sstevel@tonic-gate 	/*
12960Sstevel@tonic-gate 	 * Walk the hash queues looking for rnodes with page
12970Sstevel@tonic-gate 	 * lists associated with them.  Make a list of these
12980Sstevel@tonic-gate 	 * files.
12990Sstevel@tonic-gate 	 */
13000Sstevel@tonic-gate 	for (index = 0; index < rtable4size; index++) {
13010Sstevel@tonic-gate 		rw_enter(&rtable4[index].r_lock, RW_READER);
13020Sstevel@tonic-gate 		for (rp = rtable4[index].r_hashf;
13030Sstevel@tonic-gate 		    rp != (rnode4_t *)(&rtable4[index]);
13040Sstevel@tonic-gate 		    rp = rp->r_hashf) {
13050Sstevel@tonic-gate 			vp = RTOV4(rp);
13060Sstevel@tonic-gate 			/*
13070Sstevel@tonic-gate 			 * Don't bother sync'ing a vp if it
13080Sstevel@tonic-gate 			 * is part of virtual swap device or
13090Sstevel@tonic-gate 			 * if VFS is read-only
13100Sstevel@tonic-gate 			 */
13110Sstevel@tonic-gate 			if (IS_SWAPVP(vp) || vn_is_readonly(vp))
13120Sstevel@tonic-gate 				continue;
13130Sstevel@tonic-gate 			/*
13140Sstevel@tonic-gate 			 * If flushing all mounted file systems or
13150Sstevel@tonic-gate 			 * the vnode belongs to this vfs, has pages
13160Sstevel@tonic-gate 			 * and is marked as either dirty or mmap'd,
13170Sstevel@tonic-gate 			 * hold and add this vnode to the list of
13180Sstevel@tonic-gate 			 * vnodes to flush.
13190Sstevel@tonic-gate 			 */
13200Sstevel@tonic-gate 			if ((vfsp == NULL || vp->v_vfsp == vfsp) &&
13210Sstevel@tonic-gate 			    nfs4_has_pages(vp) &&
13220Sstevel@tonic-gate 			    ((rp->r_flags & R4DIRTY) || rp->r_mapcnt > 0)) {
13230Sstevel@tonic-gate 				VN_HOLD(vp);
13240Sstevel@tonic-gate 				vplist[cnt++] = vp;
13250Sstevel@tonic-gate 				if (cnt == num) {
13260Sstevel@tonic-gate 					rw_exit(&rtable4[index].r_lock);
13270Sstevel@tonic-gate 					goto toomany;
13280Sstevel@tonic-gate 				}
13290Sstevel@tonic-gate 			}
13300Sstevel@tonic-gate 		}
13310Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
13320Sstevel@tonic-gate 	}
13330Sstevel@tonic-gate toomany:
13340Sstevel@tonic-gate 
13350Sstevel@tonic-gate 	/*
13360Sstevel@tonic-gate 	 * Flush and release all of the files on the list.
13370Sstevel@tonic-gate 	 */
13380Sstevel@tonic-gate 	while (cnt-- > 0) {
13390Sstevel@tonic-gate 		vp = vplist[cnt];
13405331Samw 		(void) VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_ASYNC, cr, NULL);
13410Sstevel@tonic-gate 		VN_RELE(vp);
13420Sstevel@tonic-gate 	}
13430Sstevel@tonic-gate 
13440Sstevel@tonic-gate 	/*
13450Sstevel@tonic-gate 	 * Free the space allocated to hold the list.
13460Sstevel@tonic-gate 	 */
13470Sstevel@tonic-gate 	kmem_free(vplist, num * sizeof (*vplist));
13480Sstevel@tonic-gate }
13490Sstevel@tonic-gate 
13500Sstevel@tonic-gate int
nfs4_free_data_reclaim(rnode4_t * rp)13510Sstevel@tonic-gate nfs4_free_data_reclaim(rnode4_t *rp)
13520Sstevel@tonic-gate {
13530Sstevel@tonic-gate 	char *contents;
13540Sstevel@tonic-gate 	vnode_t *xattr;
13550Sstevel@tonic-gate 	int size;
13560Sstevel@tonic-gate 	vsecattr_t *vsp;
13570Sstevel@tonic-gate 	int freed;
13580Sstevel@tonic-gate 	bool_t rdc = FALSE;
13590Sstevel@tonic-gate 
13600Sstevel@tonic-gate 	/*
13610Sstevel@tonic-gate 	 * Free any held caches which may
13620Sstevel@tonic-gate 	 * be associated with this rnode.
13630Sstevel@tonic-gate 	 */
13640Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
13650Sstevel@tonic-gate 	if (rp->r_dir != NULL)
13660Sstevel@tonic-gate 		rdc = TRUE;
13670Sstevel@tonic-gate 	contents = rp->r_symlink.contents;
13680Sstevel@tonic-gate 	size = rp->r_symlink.size;
13690Sstevel@tonic-gate 	rp->r_symlink.contents = NULL;
13700Sstevel@tonic-gate 	vsp = rp->r_secattr;
13710Sstevel@tonic-gate 	rp->r_secattr = NULL;
13720Sstevel@tonic-gate 	xattr = rp->r_xattr_dir;
13730Sstevel@tonic-gate 	rp->r_xattr_dir = NULL;
13740Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
13750Sstevel@tonic-gate 
13760Sstevel@tonic-gate 	/*
13770Sstevel@tonic-gate 	 * Free the access cache entries.
13780Sstevel@tonic-gate 	 */
13790Sstevel@tonic-gate 	freed = nfs4_access_purge_rp(rp);
13800Sstevel@tonic-gate 
13810Sstevel@tonic-gate 	if (rdc == FALSE && contents == NULL && vsp == NULL && xattr == NULL)
13820Sstevel@tonic-gate 		return (freed);
13830Sstevel@tonic-gate 
13840Sstevel@tonic-gate 	/*
13850Sstevel@tonic-gate 	 * Free the readdir cache entries, incompletely if we can't block.
13860Sstevel@tonic-gate 	 */
13870Sstevel@tonic-gate 	nfs4_purge_rddir_cache(RTOV4(rp));
13880Sstevel@tonic-gate 
13890Sstevel@tonic-gate 	/*
13900Sstevel@tonic-gate 	 * Free the symbolic link cache.
13910Sstevel@tonic-gate 	 */
13920Sstevel@tonic-gate 	if (contents != NULL) {
13930Sstevel@tonic-gate 
13940Sstevel@tonic-gate 		kmem_free((void *)contents, size);
13950Sstevel@tonic-gate 	}
13960Sstevel@tonic-gate 
13970Sstevel@tonic-gate 	/*
13980Sstevel@tonic-gate 	 * Free any cached ACL.
13990Sstevel@tonic-gate 	 */
14000Sstevel@tonic-gate 	if (vsp != NULL)
14010Sstevel@tonic-gate 		nfs4_acl_free_cache(vsp);
14020Sstevel@tonic-gate 
14030Sstevel@tonic-gate 	/*
14040Sstevel@tonic-gate 	 * Release the xattr directory vnode
14050Sstevel@tonic-gate 	 */
14060Sstevel@tonic-gate 	if (xattr != NULL)
14070Sstevel@tonic-gate 		VN_RELE(xattr);
14080Sstevel@tonic-gate 
14090Sstevel@tonic-gate 	return (1);
14100Sstevel@tonic-gate }
14110Sstevel@tonic-gate 
14120Sstevel@tonic-gate static int
nfs4_active_data_reclaim(rnode4_t * rp)14130Sstevel@tonic-gate nfs4_active_data_reclaim(rnode4_t *rp)
14140Sstevel@tonic-gate {
14150Sstevel@tonic-gate 	char *contents;
141611218SPavel.Filipensky@Sun.COM 	vnode_t *xattr = NULL;
14170Sstevel@tonic-gate 	int size;
14180Sstevel@tonic-gate 	vsecattr_t *vsp;
14190Sstevel@tonic-gate 	int freed;
14200Sstevel@tonic-gate 	bool_t rdc = FALSE;
14210Sstevel@tonic-gate 
14220Sstevel@tonic-gate 	/*
14230Sstevel@tonic-gate 	 * Free any held credentials and caches which
14240Sstevel@tonic-gate 	 * may be associated with this rnode.
14250Sstevel@tonic-gate 	 */
14260Sstevel@tonic-gate 	if (!mutex_tryenter(&rp->r_statelock))
14270Sstevel@tonic-gate 		return (0);
14280Sstevel@tonic-gate 	contents = rp->r_symlink.contents;
14290Sstevel@tonic-gate 	size = rp->r_symlink.size;
14300Sstevel@tonic-gate 	rp->r_symlink.contents = NULL;
14310Sstevel@tonic-gate 	vsp = rp->r_secattr;
14320Sstevel@tonic-gate 	rp->r_secattr = NULL;
14330Sstevel@tonic-gate 	if (rp->r_dir != NULL)
14340Sstevel@tonic-gate 		rdc = TRUE;
143511218SPavel.Filipensky@Sun.COM 	/*
143611218SPavel.Filipensky@Sun.COM 	 * To avoid a deadlock, do not free r_xattr_dir cache if it is hashed
143711218SPavel.Filipensky@Sun.COM 	 * on the same r_hashq queue. We are not mandated to free all caches.
143811218SPavel.Filipensky@Sun.COM 	 * VN_RELE(rp->r_xattr_dir) will be done sometime later - e.g. when the
143911218SPavel.Filipensky@Sun.COM 	 * rnode 'rp' is freed or put on the free list.
144011549SMarcel.Telka@Sun.COM 	 *
144111549SMarcel.Telka@Sun.COM 	 * We will retain NFS4_XATTR_DIR_NOTSUPP because:
144211549SMarcel.Telka@Sun.COM 	 * - it has no associated rnode4_t (its v_data is NULL),
144311549SMarcel.Telka@Sun.COM 	 * - it is preallocated statically and will never go away,
144411549SMarcel.Telka@Sun.COM 	 * so we cannot save anything by releasing it.
144511218SPavel.Filipensky@Sun.COM 	 */
144611549SMarcel.Telka@Sun.COM 	if (rp->r_xattr_dir && rp->r_xattr_dir != NFS4_XATTR_DIR_NOTSUPP &&
144711549SMarcel.Telka@Sun.COM 	    VTOR4(rp->r_xattr_dir)->r_hashq != rp->r_hashq) {
144811218SPavel.Filipensky@Sun.COM 		xattr = rp->r_xattr_dir;
144911218SPavel.Filipensky@Sun.COM 		rp->r_xattr_dir = NULL;
145011218SPavel.Filipensky@Sun.COM 	}
14510Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
14520Sstevel@tonic-gate 
14530Sstevel@tonic-gate 	/*
14540Sstevel@tonic-gate 	 * Free the access cache entries.
14550Sstevel@tonic-gate 	 */
14560Sstevel@tonic-gate 	freed = nfs4_access_purge_rp(rp);
14570Sstevel@tonic-gate 
14580Sstevel@tonic-gate 	if (contents == NULL && vsp == NULL && rdc == FALSE && xattr == NULL)
14590Sstevel@tonic-gate 		return (freed);
14600Sstevel@tonic-gate 
14610Sstevel@tonic-gate 	/*
14620Sstevel@tonic-gate 	 * Free the symbolic link cache.
14630Sstevel@tonic-gate 	 */
14640Sstevel@tonic-gate 	if (contents != NULL) {
14650Sstevel@tonic-gate 
14660Sstevel@tonic-gate 		kmem_free((void *)contents, size);
14670Sstevel@tonic-gate 	}
14680Sstevel@tonic-gate 
14690Sstevel@tonic-gate 	/*
14700Sstevel@tonic-gate 	 * Free any cached ACL.
14710Sstevel@tonic-gate 	 */
14720Sstevel@tonic-gate 	if (vsp != NULL)
14730Sstevel@tonic-gate 		nfs4_acl_free_cache(vsp);
14740Sstevel@tonic-gate 
14750Sstevel@tonic-gate 	nfs4_purge_rddir_cache(RTOV4(rp));
14760Sstevel@tonic-gate 
14770Sstevel@tonic-gate 	/*
14780Sstevel@tonic-gate 	 * Release the xattr directory vnode
14790Sstevel@tonic-gate 	 */
14800Sstevel@tonic-gate 	if (xattr != NULL)
14810Sstevel@tonic-gate 		VN_RELE(xattr);
14820Sstevel@tonic-gate 
14830Sstevel@tonic-gate 	return (1);
14840Sstevel@tonic-gate }
14850Sstevel@tonic-gate 
14860Sstevel@tonic-gate static int
nfs4_free_reclaim(void)14870Sstevel@tonic-gate nfs4_free_reclaim(void)
14880Sstevel@tonic-gate {
14890Sstevel@tonic-gate 	int freed;
14900Sstevel@tonic-gate 	rnode4_t *rp;
14910Sstevel@tonic-gate 
14920Sstevel@tonic-gate #ifdef DEBUG
14930Sstevel@tonic-gate 	clstat4_debug.f_reclaim.value.ui64++;
14940Sstevel@tonic-gate #endif
14950Sstevel@tonic-gate 	freed = 0;
14960Sstevel@tonic-gate 	mutex_enter(&rp4freelist_lock);
14970Sstevel@tonic-gate 	rp = rp4freelist;
14980Sstevel@tonic-gate 	if (rp != NULL) {
14990Sstevel@tonic-gate 		do {
15000Sstevel@tonic-gate 			if (nfs4_free_data_reclaim(rp))
15010Sstevel@tonic-gate 				freed = 1;
15020Sstevel@tonic-gate 		} while ((rp = rp->r_freef) != rp4freelist);
15030Sstevel@tonic-gate 	}
15040Sstevel@tonic-gate 	mutex_exit(&rp4freelist_lock);
15050Sstevel@tonic-gate 	return (freed);
15060Sstevel@tonic-gate }
15070Sstevel@tonic-gate 
15080Sstevel@tonic-gate static int
nfs4_active_reclaim(void)15090Sstevel@tonic-gate nfs4_active_reclaim(void)
15100Sstevel@tonic-gate {
15110Sstevel@tonic-gate 	int freed;
15120Sstevel@tonic-gate 	int index;
15130Sstevel@tonic-gate 	rnode4_t *rp;
15140Sstevel@tonic-gate 
15150Sstevel@tonic-gate #ifdef DEBUG
15160Sstevel@tonic-gate 	clstat4_debug.a_reclaim.value.ui64++;
15170Sstevel@tonic-gate #endif
15180Sstevel@tonic-gate 	freed = 0;
15190Sstevel@tonic-gate 	for (index = 0; index < rtable4size; index++) {
15200Sstevel@tonic-gate 		rw_enter(&rtable4[index].r_lock, RW_READER);
15210Sstevel@tonic-gate 		for (rp = rtable4[index].r_hashf;
15220Sstevel@tonic-gate 		    rp != (rnode4_t *)(&rtable4[index]);
15230Sstevel@tonic-gate 		    rp = rp->r_hashf) {
15240Sstevel@tonic-gate 			if (nfs4_active_data_reclaim(rp))
15250Sstevel@tonic-gate 				freed = 1;
15260Sstevel@tonic-gate 		}
15270Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
15280Sstevel@tonic-gate 	}
15290Sstevel@tonic-gate 	return (freed);
15300Sstevel@tonic-gate }
15310Sstevel@tonic-gate 
15320Sstevel@tonic-gate static int
nfs4_rnode_reclaim(void)15330Sstevel@tonic-gate nfs4_rnode_reclaim(void)
15340Sstevel@tonic-gate {
15350Sstevel@tonic-gate 	int freed;
15360Sstevel@tonic-gate 	rnode4_t *rp;
15370Sstevel@tonic-gate 	vnode_t *vp;
15380Sstevel@tonic-gate 
15390Sstevel@tonic-gate #ifdef DEBUG
15400Sstevel@tonic-gate 	clstat4_debug.r_reclaim.value.ui64++;
15410Sstevel@tonic-gate #endif
15420Sstevel@tonic-gate 	freed = 0;
15430Sstevel@tonic-gate 	mutex_enter(&rp4freelist_lock);
15440Sstevel@tonic-gate 	while ((rp = rp4freelist) != NULL) {
15450Sstevel@tonic-gate 		rp4_rmfree(rp);
15460Sstevel@tonic-gate 		mutex_exit(&rp4freelist_lock);
15470Sstevel@tonic-gate 		if (rp->r_flags & R4HASHED) {
15480Sstevel@tonic-gate 			vp = RTOV4(rp);
15490Sstevel@tonic-gate 			rw_enter(&rp->r_hashq->r_lock, RW_WRITER);
15500Sstevel@tonic-gate 			mutex_enter(&vp->v_lock);
15510Sstevel@tonic-gate 			if (vp->v_count > 1) {
15520Sstevel@tonic-gate 				vp->v_count--;
15530Sstevel@tonic-gate 				mutex_exit(&vp->v_lock);
15540Sstevel@tonic-gate 				rw_exit(&rp->r_hashq->r_lock);
15550Sstevel@tonic-gate 				mutex_enter(&rp4freelist_lock);
15560Sstevel@tonic-gate 				continue;
15570Sstevel@tonic-gate 			}
15580Sstevel@tonic-gate 			mutex_exit(&vp->v_lock);
15590Sstevel@tonic-gate 			rp4_rmhash_locked(rp);
15600Sstevel@tonic-gate 			rw_exit(&rp->r_hashq->r_lock);
15610Sstevel@tonic-gate 		}
15620Sstevel@tonic-gate 		/*
15630Sstevel@tonic-gate 		 * This call to rp_addfree will end up destroying the
15640Sstevel@tonic-gate 		 * rnode, but in a safe way with the appropriate set
15650Sstevel@tonic-gate 		 * of checks done.
15660Sstevel@tonic-gate 		 */
15670Sstevel@tonic-gate 		rp4_addfree(rp, CRED());
15680Sstevel@tonic-gate 		mutex_enter(&rp4freelist_lock);
15690Sstevel@tonic-gate 	}
15700Sstevel@tonic-gate 	mutex_exit(&rp4freelist_lock);
15710Sstevel@tonic-gate 	return (freed);
15720Sstevel@tonic-gate }
15730Sstevel@tonic-gate 
15740Sstevel@tonic-gate /*ARGSUSED*/
15750Sstevel@tonic-gate static void
nfs4_reclaim(void * cdrarg)15760Sstevel@tonic-gate nfs4_reclaim(void *cdrarg)
15770Sstevel@tonic-gate {
15780Sstevel@tonic-gate #ifdef DEBUG
15790Sstevel@tonic-gate 	clstat4_debug.reclaim.value.ui64++;
15800Sstevel@tonic-gate #endif
15810Sstevel@tonic-gate 	if (nfs4_free_reclaim())
15820Sstevel@tonic-gate 		return;
15830Sstevel@tonic-gate 
15840Sstevel@tonic-gate 	if (nfs4_active_reclaim())
15850Sstevel@tonic-gate 		return;
15860Sstevel@tonic-gate 
15870Sstevel@tonic-gate 	(void) nfs4_rnode_reclaim();
15880Sstevel@tonic-gate }
15890Sstevel@tonic-gate 
15900Sstevel@tonic-gate /*
15910Sstevel@tonic-gate  * Returns the clientid4 to use for the given mntinfo4.  Note that the
15920Sstevel@tonic-gate  * clientid can change if the caller drops mi_recovlock.
15930Sstevel@tonic-gate  */
15940Sstevel@tonic-gate 
15950Sstevel@tonic-gate clientid4
mi2clientid(mntinfo4_t * mi)15960Sstevel@tonic-gate mi2clientid(mntinfo4_t *mi)
15970Sstevel@tonic-gate {
15980Sstevel@tonic-gate 	nfs4_server_t	*sp;
15990Sstevel@tonic-gate 	clientid4	clientid = 0;
16000Sstevel@tonic-gate 
16010Sstevel@tonic-gate 	/* this locks down sp if it is found */
16020Sstevel@tonic-gate 	sp = find_nfs4_server(mi);
16030Sstevel@tonic-gate 	if (sp != NULL) {
16040Sstevel@tonic-gate 		clientid = sp->clientid;
16050Sstevel@tonic-gate 		mutex_exit(&sp->s_lock);
16060Sstevel@tonic-gate 		nfs4_server_rele(sp);
16070Sstevel@tonic-gate 	}
16080Sstevel@tonic-gate 	return (clientid);
16090Sstevel@tonic-gate }
16100Sstevel@tonic-gate 
16110Sstevel@tonic-gate /*
16120Sstevel@tonic-gate  * Return the current lease time for the server associated with the given
16130Sstevel@tonic-gate  * file.  Note that the lease time could change immediately after this
16140Sstevel@tonic-gate  * call.
16150Sstevel@tonic-gate  */
16160Sstevel@tonic-gate 
16170Sstevel@tonic-gate time_t
r2lease_time(rnode4_t * rp)16180Sstevel@tonic-gate r2lease_time(rnode4_t *rp)
16190Sstevel@tonic-gate {
16200Sstevel@tonic-gate 	nfs4_server_t	*sp;
16210Sstevel@tonic-gate 	time_t		lease_time;
16220Sstevel@tonic-gate 	mntinfo4_t	*mi = VTOMI4(RTOV4(rp));
16230Sstevel@tonic-gate 
16240Sstevel@tonic-gate 	(void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_READER, 0);
16250Sstevel@tonic-gate 
16260Sstevel@tonic-gate 	/* this locks down sp if it is found */
16270Sstevel@tonic-gate 	sp = find_nfs4_server(VTOMI4(RTOV4(rp)));
16280Sstevel@tonic-gate 
16290Sstevel@tonic-gate 	if (VTOMI4(RTOV4(rp))->mi_vfsp->vfs_flag & VFS_UNMOUNTED) {
16300Sstevel@tonic-gate 		if (sp != NULL) {
16310Sstevel@tonic-gate 			mutex_exit(&sp->s_lock);
16320Sstevel@tonic-gate 			nfs4_server_rele(sp);
16330Sstevel@tonic-gate 		}
16340Sstevel@tonic-gate 		nfs_rw_exit(&mi->mi_recovlock);
16350Sstevel@tonic-gate 		return (1);		/* 1 second */
16360Sstevel@tonic-gate 	}
16370Sstevel@tonic-gate 
16380Sstevel@tonic-gate 	ASSERT(sp != NULL);
16390Sstevel@tonic-gate 
16400Sstevel@tonic-gate 	lease_time = sp->s_lease_time;
16410Sstevel@tonic-gate 
16420Sstevel@tonic-gate 	mutex_exit(&sp->s_lock);
16430Sstevel@tonic-gate 	nfs4_server_rele(sp);
16440Sstevel@tonic-gate 	nfs_rw_exit(&mi->mi_recovlock);
16450Sstevel@tonic-gate 
16460Sstevel@tonic-gate 	return (lease_time);
16470Sstevel@tonic-gate }
16480Sstevel@tonic-gate 
16490Sstevel@tonic-gate /*
16500Sstevel@tonic-gate  * Return a list with information about all the known open instances for
16510Sstevel@tonic-gate  * a filesystem. The caller must call r4releopenlist() when done with the
16520Sstevel@tonic-gate  * list.
16530Sstevel@tonic-gate  *
16540Sstevel@tonic-gate  * We are safe at looking at os_valid and os_pending_close across dropping
16550Sstevel@tonic-gate  * the 'os_sync_lock' to count up the number of open streams and then
16560Sstevel@tonic-gate  * allocate memory for the osp list due to:
16570Sstevel@tonic-gate  *	-Looking at os_pending_close is safe since this routine is
16580Sstevel@tonic-gate  *	only called via recovery, and os_pending_close can only be set via
16590Sstevel@tonic-gate  *	a non-recovery operation (which are all blocked when recovery
16600Sstevel@tonic-gate  *	is active).
16610Sstevel@tonic-gate  *
16620Sstevel@tonic-gate  *	-Examining os_valid is safe since non-recovery operations, which
16630Sstevel@tonic-gate  *	could potentially switch os_valid to 0, are blocked (via
16640Sstevel@tonic-gate  *	nfs4_start_fop) and recovery is single-threaded per mntinfo4_t
16650Sstevel@tonic-gate  *	(which means we are the only recovery thread potentially acting
16660Sstevel@tonic-gate  *	on this open stream).
16670Sstevel@tonic-gate  */
16680Sstevel@tonic-gate 
16690Sstevel@tonic-gate nfs4_opinst_t *
r4mkopenlist(mntinfo4_t * mi)16700Sstevel@tonic-gate r4mkopenlist(mntinfo4_t *mi)
16710Sstevel@tonic-gate {
16720Sstevel@tonic-gate 	nfs4_opinst_t *reopenlist, *rep;
16730Sstevel@tonic-gate 	rnode4_t *rp;
16740Sstevel@tonic-gate 	vnode_t *vp;
16750Sstevel@tonic-gate 	vfs_t *vfsp = mi->mi_vfsp;
16760Sstevel@tonic-gate 	int numosp;
16770Sstevel@tonic-gate 	nfs4_open_stream_t *osp;
16780Sstevel@tonic-gate 	int index;
16790Sstevel@tonic-gate 	open_delegation_type4 dtype;
16800Sstevel@tonic-gate 	int hold_vnode;
16810Sstevel@tonic-gate 
16820Sstevel@tonic-gate 	reopenlist = NULL;
16830Sstevel@tonic-gate 
16840Sstevel@tonic-gate 	for (index = 0; index < rtable4size; index++) {
16850Sstevel@tonic-gate 		rw_enter(&rtable4[index].r_lock, RW_READER);
16860Sstevel@tonic-gate 		for (rp = rtable4[index].r_hashf;
16870Sstevel@tonic-gate 		    rp != (rnode4_t *)(&rtable4[index]);
16880Sstevel@tonic-gate 		    rp = rp->r_hashf) {
16890Sstevel@tonic-gate 
16900Sstevel@tonic-gate 			vp = RTOV4(rp);
16910Sstevel@tonic-gate 			if (vp->v_vfsp != vfsp)
16920Sstevel@tonic-gate 				continue;
16930Sstevel@tonic-gate 			hold_vnode = 0;
16940Sstevel@tonic-gate 
16950Sstevel@tonic-gate 			mutex_enter(&rp->r_os_lock);
16960Sstevel@tonic-gate 
16970Sstevel@tonic-gate 			/* Count the number of valid open_streams of the file */
16980Sstevel@tonic-gate 			numosp = 0;
16990Sstevel@tonic-gate 			for (osp = list_head(&rp->r_open_streams); osp != NULL;
17000Sstevel@tonic-gate 			    osp = list_next(&rp->r_open_streams, osp)) {
17010Sstevel@tonic-gate 				mutex_enter(&osp->os_sync_lock);
17020Sstevel@tonic-gate 				if (osp->os_valid && !osp->os_pending_close)
17030Sstevel@tonic-gate 					numosp++;
17040Sstevel@tonic-gate 				mutex_exit(&osp->os_sync_lock);
17050Sstevel@tonic-gate 			}
17060Sstevel@tonic-gate 
17070Sstevel@tonic-gate 			/* Fill in the valid open streams per vp */
17080Sstevel@tonic-gate 			if (numosp > 0) {
17090Sstevel@tonic-gate 				int j;
17100Sstevel@tonic-gate 
17110Sstevel@tonic-gate 				hold_vnode = 1;
17120Sstevel@tonic-gate 
17130Sstevel@tonic-gate 				/*
17140Sstevel@tonic-gate 				 * Add a new open instance to the list
17150Sstevel@tonic-gate 				 */
17160Sstevel@tonic-gate 				rep = kmem_zalloc(sizeof (*reopenlist),
17175302Sth199096 				    KM_SLEEP);
17180Sstevel@tonic-gate 				rep->re_next = reopenlist;
17190Sstevel@tonic-gate 				reopenlist = rep;
17200Sstevel@tonic-gate 
17210Sstevel@tonic-gate 				rep->re_vp = vp;
17220Sstevel@tonic-gate 				rep->re_osp = kmem_zalloc(
17235302Sth199096 				    numosp * sizeof (*(rep->re_osp)),
17245302Sth199096 				    KM_SLEEP);
17250Sstevel@tonic-gate 				rep->re_numosp = numosp;
17260Sstevel@tonic-gate 
17270Sstevel@tonic-gate 				j = 0;
17280Sstevel@tonic-gate 				for (osp = list_head(&rp->r_open_streams);
17290Sstevel@tonic-gate 				    osp != NULL;
17300Sstevel@tonic-gate 				    osp = list_next(&rp->r_open_streams, osp)) {
17310Sstevel@tonic-gate 
17320Sstevel@tonic-gate 					mutex_enter(&osp->os_sync_lock);
17330Sstevel@tonic-gate 					if (osp->os_valid &&
17340Sstevel@tonic-gate 					    !osp->os_pending_close) {
17350Sstevel@tonic-gate 						osp->os_ref_count++;
17360Sstevel@tonic-gate 						rep->re_osp[j] = osp;
17370Sstevel@tonic-gate 						j++;
17380Sstevel@tonic-gate 					}
17390Sstevel@tonic-gate 					mutex_exit(&osp->os_sync_lock);
17400Sstevel@tonic-gate 				}
17410Sstevel@tonic-gate 				/*
17420Sstevel@tonic-gate 				 * Assuming valid osp(s) stays valid between
17430Sstevel@tonic-gate 				 * the time obtaining j and numosp.
17440Sstevel@tonic-gate 				 */
17450Sstevel@tonic-gate 				ASSERT(j == numosp);
17460Sstevel@tonic-gate 			}
17470Sstevel@tonic-gate 
17480Sstevel@tonic-gate 			mutex_exit(&rp->r_os_lock);
17490Sstevel@tonic-gate 			/* do this here to keep v_lock > r_os_lock */
17500Sstevel@tonic-gate 			if (hold_vnode)
17510Sstevel@tonic-gate 				VN_HOLD(vp);
17520Sstevel@tonic-gate 			mutex_enter(&rp->r_statev4_lock);
17530Sstevel@tonic-gate 			if (rp->r_deleg_type != OPEN_DELEGATE_NONE) {
17540Sstevel@tonic-gate 				/*
17550Sstevel@tonic-gate 				 * If this rnode holds a delegation,
17560Sstevel@tonic-gate 				 * but if there are no valid open streams,
17570Sstevel@tonic-gate 				 * then just discard the delegation
17580Sstevel@tonic-gate 				 * without doing delegreturn.
17590Sstevel@tonic-gate 				 */
17600Sstevel@tonic-gate 				if (numosp > 0)
17610Sstevel@tonic-gate 					rp->r_deleg_needs_recovery =
17625302Sth199096 					    rp->r_deleg_type;
17630Sstevel@tonic-gate 			}
17640Sstevel@tonic-gate 			/* Save the delegation type for use outside the lock */
17650Sstevel@tonic-gate 			dtype = rp->r_deleg_type;
17660Sstevel@tonic-gate 			mutex_exit(&rp->r_statev4_lock);
17670Sstevel@tonic-gate 
17680Sstevel@tonic-gate 			/*
17690Sstevel@tonic-gate 			 * If we have a delegation then get rid of it.
17700Sstevel@tonic-gate 			 * We've set rp->r_deleg_needs_recovery so we have
17710Sstevel@tonic-gate 			 * enough information to recover.
17720Sstevel@tonic-gate 			 */
17730Sstevel@tonic-gate 			if (dtype != OPEN_DELEGATE_NONE) {
17740Sstevel@tonic-gate 				(void) nfs4delegreturn(rp, NFS4_DR_DISCARD);
17750Sstevel@tonic-gate 			}
17760Sstevel@tonic-gate 		}
17770Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
17780Sstevel@tonic-gate 	}
17790Sstevel@tonic-gate 	return (reopenlist);
17800Sstevel@tonic-gate }
17810Sstevel@tonic-gate 
17820Sstevel@tonic-gate /*
178311291SRobert.Thurlow@Sun.COM  * Given a filesystem id, check to see if any rnodes
178411291SRobert.Thurlow@Sun.COM  * within this fsid reside in the rnode cache, other
178511291SRobert.Thurlow@Sun.COM  * than one we know about.
178611291SRobert.Thurlow@Sun.COM  *
178711291SRobert.Thurlow@Sun.COM  * Return 1 if an rnode is found, 0 otherwise
178811291SRobert.Thurlow@Sun.COM  */
178911291SRobert.Thurlow@Sun.COM int
r4find_by_fsid(mntinfo4_t * mi,fattr4_fsid * moved_fsid)179011291SRobert.Thurlow@Sun.COM r4find_by_fsid(mntinfo4_t *mi, fattr4_fsid *moved_fsid)
179111291SRobert.Thurlow@Sun.COM {
179211291SRobert.Thurlow@Sun.COM 	rnode4_t *rp;
179311291SRobert.Thurlow@Sun.COM 	vnode_t *vp;
179411291SRobert.Thurlow@Sun.COM 	vfs_t *vfsp = mi->mi_vfsp;
179511291SRobert.Thurlow@Sun.COM 	fattr4_fsid *fsid;
179611291SRobert.Thurlow@Sun.COM 	int index, found = 0;
179711291SRobert.Thurlow@Sun.COM 
179811291SRobert.Thurlow@Sun.COM 	for (index = 0; index < rtable4size; index++) {
179911291SRobert.Thurlow@Sun.COM 		rw_enter(&rtable4[index].r_lock, RW_READER);
180011291SRobert.Thurlow@Sun.COM 		for (rp = rtable4[index].r_hashf;
180111291SRobert.Thurlow@Sun.COM 		    rp != (rnode4_t *)(&rtable4[index]);
180211291SRobert.Thurlow@Sun.COM 		    rp = rp->r_hashf) {
180311291SRobert.Thurlow@Sun.COM 
180411291SRobert.Thurlow@Sun.COM 			vp = RTOV4(rp);
180511291SRobert.Thurlow@Sun.COM 			if (vp->v_vfsp != vfsp)
180611291SRobert.Thurlow@Sun.COM 				continue;
180711291SRobert.Thurlow@Sun.COM 
180811291SRobert.Thurlow@Sun.COM 			/*
180911291SRobert.Thurlow@Sun.COM 			 * XXX there might be a case where a
181011291SRobert.Thurlow@Sun.COM 			 * replicated fs may have the same fsid
181111291SRobert.Thurlow@Sun.COM 			 * across two different servers. This
181211291SRobert.Thurlow@Sun.COM 			 * check isn't good enough in that case
181311291SRobert.Thurlow@Sun.COM 			 */
181411291SRobert.Thurlow@Sun.COM 			fsid = &rp->r_srv_fsid;
181511291SRobert.Thurlow@Sun.COM 			if (FATTR4_FSID_EQ(moved_fsid, fsid)) {
181611291SRobert.Thurlow@Sun.COM 				found = 1;
181711291SRobert.Thurlow@Sun.COM 				break;
181811291SRobert.Thurlow@Sun.COM 			}
181911291SRobert.Thurlow@Sun.COM 		}
182011291SRobert.Thurlow@Sun.COM 		rw_exit(&rtable4[index].r_lock);
182111291SRobert.Thurlow@Sun.COM 
182211291SRobert.Thurlow@Sun.COM 		if (found)
182311291SRobert.Thurlow@Sun.COM 			break;
182411291SRobert.Thurlow@Sun.COM 	}
182511291SRobert.Thurlow@Sun.COM 	return (found);
182611291SRobert.Thurlow@Sun.COM }
182711291SRobert.Thurlow@Sun.COM 
182811291SRobert.Thurlow@Sun.COM /*
18290Sstevel@tonic-gate  * Release the list of open instance references.
18300Sstevel@tonic-gate  */
18310Sstevel@tonic-gate 
18320Sstevel@tonic-gate void
r4releopenlist(nfs4_opinst_t * reopenp)18330Sstevel@tonic-gate r4releopenlist(nfs4_opinst_t *reopenp)
18340Sstevel@tonic-gate {
18350Sstevel@tonic-gate 	nfs4_opinst_t *rep, *next;
18360Sstevel@tonic-gate 	int i;
18370Sstevel@tonic-gate 
18380Sstevel@tonic-gate 	for (rep = reopenp; rep; rep = next) {
18390Sstevel@tonic-gate 		next = rep->re_next;
18400Sstevel@tonic-gate 
18410Sstevel@tonic-gate 		for (i = 0; i < rep->re_numosp; i++)
18425302Sth199096 			open_stream_rele(rep->re_osp[i], VTOR4(rep->re_vp));
18430Sstevel@tonic-gate 
18440Sstevel@tonic-gate 		VN_RELE(rep->re_vp);
18450Sstevel@tonic-gate 		kmem_free(rep->re_osp,
18460Sstevel@tonic-gate 		    rep->re_numosp * sizeof (*(rep->re_osp)));
18470Sstevel@tonic-gate 
18480Sstevel@tonic-gate 		kmem_free(rep, sizeof (*rep));
18490Sstevel@tonic-gate 	}
18500Sstevel@tonic-gate }
18510Sstevel@tonic-gate 
18520Sstevel@tonic-gate int
nfs4_rnode_init(void)18530Sstevel@tonic-gate nfs4_rnode_init(void)
18540Sstevel@tonic-gate {
18550Sstevel@tonic-gate 	ulong_t nrnode4_max;
18560Sstevel@tonic-gate 	int i;
18570Sstevel@tonic-gate 
18580Sstevel@tonic-gate 	/*
18590Sstevel@tonic-gate 	 * Compute the size of the rnode4 hash table
18600Sstevel@tonic-gate 	 */
18610Sstevel@tonic-gate 	if (nrnode <= 0)
18620Sstevel@tonic-gate 		nrnode = ncsize;
18630Sstevel@tonic-gate 	nrnode4_max =
18640Sstevel@tonic-gate 	    (ulong_t)((kmem_maxavail() >> 2) / sizeof (struct rnode4));
18650Sstevel@tonic-gate 	if (nrnode > nrnode4_max || (nrnode == 0 && ncsize == 0)) {
18660Sstevel@tonic-gate 		zcmn_err(GLOBAL_ZONEID, CE_NOTE,
18670Sstevel@tonic-gate 		    "setting nrnode to max value of %ld", nrnode4_max);
18680Sstevel@tonic-gate 		nrnode = nrnode4_max;
18690Sstevel@tonic-gate 	}
18700Sstevel@tonic-gate 	rtable4size = 1 << highbit(nrnode / rnode4_hashlen);
18710Sstevel@tonic-gate 	rtable4mask = rtable4size - 1;
18720Sstevel@tonic-gate 
18730Sstevel@tonic-gate 	/*
18740Sstevel@tonic-gate 	 * Allocate and initialize the hash buckets
18750Sstevel@tonic-gate 	 */
18760Sstevel@tonic-gate 	rtable4 = kmem_alloc(rtable4size * sizeof (*rtable4), KM_SLEEP);
18770Sstevel@tonic-gate 	for (i = 0; i < rtable4size; i++) {
18780Sstevel@tonic-gate 		rtable4[i].r_hashf = (rnode4_t *)(&rtable4[i]);
18790Sstevel@tonic-gate 		rtable4[i].r_hashb = (rnode4_t *)(&rtable4[i]);
18800Sstevel@tonic-gate 		rw_init(&rtable4[i].r_lock, NULL, RW_DEFAULT, NULL);
18810Sstevel@tonic-gate 	}
18820Sstevel@tonic-gate 
18830Sstevel@tonic-gate 	rnode4_cache = kmem_cache_create("rnode4_cache", sizeof (rnode4_t),
18840Sstevel@tonic-gate 	    0, NULL, NULL, nfs4_reclaim, NULL, NULL, 0);
18850Sstevel@tonic-gate 
18860Sstevel@tonic-gate 	return (0);
18870Sstevel@tonic-gate }
18880Sstevel@tonic-gate 
18890Sstevel@tonic-gate int
nfs4_rnode_fini(void)18900Sstevel@tonic-gate nfs4_rnode_fini(void)
18910Sstevel@tonic-gate {
18920Sstevel@tonic-gate 	int i;
18930Sstevel@tonic-gate 
18940Sstevel@tonic-gate 	/*
18950Sstevel@tonic-gate 	 * Deallocate the rnode hash queues
18960Sstevel@tonic-gate 	 */
18970Sstevel@tonic-gate 	kmem_cache_destroy(rnode4_cache);
18980Sstevel@tonic-gate 
18990Sstevel@tonic-gate 	for (i = 0; i < rtable4size; i++)
19000Sstevel@tonic-gate 		rw_destroy(&rtable4[i].r_lock);
19010Sstevel@tonic-gate 
19020Sstevel@tonic-gate 	kmem_free(rtable4, rtable4size * sizeof (*rtable4));
19030Sstevel@tonic-gate 
19040Sstevel@tonic-gate 	return (0);
19050Sstevel@tonic-gate }
19060Sstevel@tonic-gate 
19070Sstevel@tonic-gate /*
19080Sstevel@tonic-gate  * Return non-zero if the given filehandle refers to the root filehandle
19090Sstevel@tonic-gate  * for the given rnode.
19100Sstevel@tonic-gate  */
19110Sstevel@tonic-gate 
19120Sstevel@tonic-gate static int
isrootfh(nfs4_sharedfh_t * fh,rnode4_t * rp)19130Sstevel@tonic-gate isrootfh(nfs4_sharedfh_t *fh, rnode4_t *rp)
19140Sstevel@tonic-gate {
19150Sstevel@tonic-gate 	int isroot;
19160Sstevel@tonic-gate 
19170Sstevel@tonic-gate 	isroot = 0;
19180Sstevel@tonic-gate 	if (SFH4_SAME(VTOMI4(RTOV4(rp))->mi_rootfh, fh))
19190Sstevel@tonic-gate 		isroot = 1;
19200Sstevel@tonic-gate 
19210Sstevel@tonic-gate 	return (isroot);
19220Sstevel@tonic-gate }
19230Sstevel@tonic-gate 
19245302Sth199096 /*
19255302Sth199096  * The r4_stub_* routines assume that the rnode is newly activated, and
19265302Sth199096  * that the caller either holds the hash bucket r_lock for this rnode as
19275302Sth199096  * RW_WRITER, or holds r_statelock.
19285302Sth199096  */
19295302Sth199096 static void
r4_stub_set(rnode4_t * rp,nfs4_stub_type_t type)19305302Sth199096 r4_stub_set(rnode4_t *rp, nfs4_stub_type_t type)
19315302Sth199096 {
19325302Sth199096 	vnode_t *vp = RTOV4(rp);
19335302Sth199096 	krwlock_t *hash_lock = &rp->r_hashq->r_lock;
19345302Sth199096 
19355302Sth199096 	ASSERT(RW_WRITE_HELD(hash_lock) || MUTEX_HELD(&rp->r_statelock));
19365302Sth199096 
19375302Sth199096 	rp->r_stub_type = type;
19385302Sth199096 
19395302Sth199096 	/*
19405302Sth199096 	 * Safely switch this vnode to the trigger vnodeops.
19415302Sth199096 	 *
19425302Sth199096 	 * Currently, we don't ever switch a trigger vnode back to using
19435302Sth199096 	 * "regular" v4 vnodeops. NFS4_STUB_NONE is only used to note that
19445302Sth199096 	 * a new v4 object is not a trigger, and it will already have the
19455302Sth199096 	 * correct v4 vnodeops by default. So, no "else" case required here.
19465302Sth199096 	 */
19475302Sth199096 	if (type != NFS4_STUB_NONE)
19485302Sth199096 		vn_setops(vp, nfs4_trigger_vnodeops);
19495302Sth199096 }
19505302Sth199096 
19515302Sth199096 void
r4_stub_mirrormount(rnode4_t * rp)19525302Sth199096 r4_stub_mirrormount(rnode4_t *rp)
19535302Sth199096 {
19545302Sth199096 	r4_stub_set(rp, NFS4_STUB_MIRRORMOUNT);
19555302Sth199096 }
19565302Sth199096 
19575302Sth199096 void
r4_stub_referral(rnode4_t * rp)195811291SRobert.Thurlow@Sun.COM r4_stub_referral(rnode4_t *rp)
195911291SRobert.Thurlow@Sun.COM {
196011291SRobert.Thurlow@Sun.COM 	DTRACE_PROBE1(nfs4clnt__func__referral__moved,
196111291SRobert.Thurlow@Sun.COM 	    vnode_t *, RTOV4(rp));
196211291SRobert.Thurlow@Sun.COM 	r4_stub_set(rp, NFS4_STUB_REFERRAL);
196311291SRobert.Thurlow@Sun.COM }
196411291SRobert.Thurlow@Sun.COM 
196511291SRobert.Thurlow@Sun.COM void
r4_stub_none(rnode4_t * rp)19665302Sth199096 r4_stub_none(rnode4_t *rp)
19675302Sth199096 {
19685302Sth199096 	r4_stub_set(rp, NFS4_STUB_NONE);
19695302Sth199096 }
19705302Sth199096 
19710Sstevel@tonic-gate #ifdef DEBUG
19720Sstevel@tonic-gate 
19730Sstevel@tonic-gate /*
19740Sstevel@tonic-gate  * Look in the rnode table for other rnodes that have the same filehandle.
19750Sstevel@tonic-gate  * Assume the lock is held for the hash chain of checkrp
19760Sstevel@tonic-gate  */
19770Sstevel@tonic-gate 
19780Sstevel@tonic-gate static void
r4_dup_check(rnode4_t * checkrp,vfs_t * vfsp)19790Sstevel@tonic-gate r4_dup_check(rnode4_t *checkrp, vfs_t *vfsp)
19800Sstevel@tonic-gate {
19810Sstevel@tonic-gate 	rnode4_t *rp;
19820Sstevel@tonic-gate 	vnode_t *tvp;
19830Sstevel@tonic-gate 	nfs4_fhandle_t fh, fh2;
19840Sstevel@tonic-gate 	int index;
19850Sstevel@tonic-gate 
19860Sstevel@tonic-gate 	if (!r4_check_for_dups)
19870Sstevel@tonic-gate 		return;
19880Sstevel@tonic-gate 
19890Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&checkrp->r_hashq->r_lock));
19900Sstevel@tonic-gate 
19910Sstevel@tonic-gate 	sfh4_copyval(checkrp->r_fh, &fh);
19920Sstevel@tonic-gate 
19930Sstevel@tonic-gate 	for (index = 0; index < rtable4size; index++) {
19940Sstevel@tonic-gate 
19950Sstevel@tonic-gate 		if (&rtable4[index] != checkrp->r_hashq)
19960Sstevel@tonic-gate 			rw_enter(&rtable4[index].r_lock, RW_READER);
19970Sstevel@tonic-gate 
19980Sstevel@tonic-gate 		for (rp = rtable4[index].r_hashf;
19990Sstevel@tonic-gate 		    rp != (rnode4_t *)(&rtable4[index]);
20000Sstevel@tonic-gate 		    rp = rp->r_hashf) {
20010Sstevel@tonic-gate 
20020Sstevel@tonic-gate 			if (rp == checkrp)
20030Sstevel@tonic-gate 				continue;
20040Sstevel@tonic-gate 
20050Sstevel@tonic-gate 			tvp = RTOV4(rp);
20060Sstevel@tonic-gate 			if (tvp->v_vfsp != vfsp)
20070Sstevel@tonic-gate 				continue;
20080Sstevel@tonic-gate 
20090Sstevel@tonic-gate 			sfh4_copyval(rp->r_fh, &fh2);
20100Sstevel@tonic-gate 			if (nfs4cmpfhandle(&fh, &fh2) == 0) {
20110Sstevel@tonic-gate 				cmn_err(CE_PANIC, "rnodes with same fs, fh "
20120Sstevel@tonic-gate 				    "(%p, %p)", (void *)checkrp, (void *)rp);
20130Sstevel@tonic-gate 			}
20140Sstevel@tonic-gate 		}
20150Sstevel@tonic-gate 
20160Sstevel@tonic-gate 		if (&rtable4[index] != checkrp->r_hashq)
20170Sstevel@tonic-gate 			rw_exit(&rtable4[index].r_lock);
20180Sstevel@tonic-gate 	}
20190Sstevel@tonic-gate }
20200Sstevel@tonic-gate 
20210Sstevel@tonic-gate #endif /* DEBUG */
2022