xref: /onnv-gate/usr/src/uts/common/nfs/nfs_clnt.h (revision 13096:b02331b7b26d)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
511507SVallish.Vaidyeshwara@Sun.COM  * Common Development and Distribution License (the "License").
611507SVallish.Vaidyeshwara@Sun.COM  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*13096SJordan.Vaughan@Sun.com  * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
230Sstevel@tonic-gate  */
240Sstevel@tonic-gate 
250Sstevel@tonic-gate /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T	*/
260Sstevel@tonic-gate /*	  All Rights Reserved  	*/
270Sstevel@tonic-gate 
280Sstevel@tonic-gate #ifndef	_NFS_NFS_CLNT_H
290Sstevel@tonic-gate #define	_NFS_NFS_CLNT_H
300Sstevel@tonic-gate 
310Sstevel@tonic-gate #include <sys/utsname.h>
320Sstevel@tonic-gate #include <sys/kstat.h>
330Sstevel@tonic-gate #include <sys/time.h>
340Sstevel@tonic-gate #include <vm/page.h>
350Sstevel@tonic-gate #include <sys/thread.h>
360Sstevel@tonic-gate #include <nfs/rnode.h>
370Sstevel@tonic-gate #include <sys/list.h>
3811507SVallish.Vaidyeshwara@Sun.COM #include <sys/condvar_impl.h>
39*13096SJordan.Vaughan@Sun.com #include <sys/zone.h>
400Sstevel@tonic-gate 
410Sstevel@tonic-gate #ifdef	__cplusplus
420Sstevel@tonic-gate extern "C" {
430Sstevel@tonic-gate #endif
440Sstevel@tonic-gate 
450Sstevel@tonic-gate #define	HOSTNAMESZ	32
460Sstevel@tonic-gate #define	ACREGMIN	3	/* min secs to hold cached file attr */
470Sstevel@tonic-gate #define	ACREGMAX	60	/* max secs to hold cached file attr */
480Sstevel@tonic-gate #define	ACDIRMIN	30	/* min secs to hold cached dir attr */
490Sstevel@tonic-gate #define	ACDIRMAX	60	/* max secs to hold cached dir attr */
500Sstevel@tonic-gate #define	ACMINMAX	3600	/* 1 hr is longest min timeout */
510Sstevel@tonic-gate #define	ACMAXMAX	36000	/* 10 hr is longest max timeout */
520Sstevel@tonic-gate 
530Sstevel@tonic-gate #define	NFS_CALLTYPES	3	/* Lookups, Reads, Writes */
540Sstevel@tonic-gate 
550Sstevel@tonic-gate /*
560Sstevel@tonic-gate  * rfscall() flags
570Sstevel@tonic-gate  */
580Sstevel@tonic-gate #define	RFSCALL_SOFT	0x00000001	/* Do op as if fs was soft-mounted */
590Sstevel@tonic-gate 
600Sstevel@tonic-gate /*
610Sstevel@tonic-gate  * Fake errno passed back from rfscall to indicate transfer size adjustment
620Sstevel@tonic-gate  */
630Sstevel@tonic-gate #define	ENFS_TRYAGAIN	999
640Sstevel@tonic-gate 
650Sstevel@tonic-gate /*
6611507SVallish.Vaidyeshwara@Sun.COM  * The NFS specific async_reqs structure. iotype is grouped to support two
6711507SVallish.Vaidyeshwara@Sun.COM  * types of async thread pools, please read comments section of mntinfo_t
6811507SVallish.Vaidyeshwara@Sun.COM  * definition for more information. Care should be taken while adding new
6911507SVallish.Vaidyeshwara@Sun.COM  * members to this group.
700Sstevel@tonic-gate  */
710Sstevel@tonic-gate 
720Sstevel@tonic-gate enum iotype {
730Sstevel@tonic-gate 	NFS_PUTAPAGE,
740Sstevel@tonic-gate 	NFS_PAGEIO,
7511507SVallish.Vaidyeshwara@Sun.COM 	NFS_COMMIT,
7611507SVallish.Vaidyeshwara@Sun.COM 	NFS_READ_AHEAD,
770Sstevel@tonic-gate 	NFS_READDIR,
7811507SVallish.Vaidyeshwara@Sun.COM 	NFS_INACTIVE,
7911507SVallish.Vaidyeshwara@Sun.COM 	NFS_ASYNC_TYPES
800Sstevel@tonic-gate };
8111507SVallish.Vaidyeshwara@Sun.COM #define	NFS_ASYNC_PGOPS_TYPES	(NFS_COMMIT + 1)
8211507SVallish.Vaidyeshwara@Sun.COM 
8311507SVallish.Vaidyeshwara@Sun.COM /*
8411507SVallish.Vaidyeshwara@Sun.COM  * NFS async requests queue type.
8511507SVallish.Vaidyeshwara@Sun.COM  */
8611507SVallish.Vaidyeshwara@Sun.COM 
8711507SVallish.Vaidyeshwara@Sun.COM enum ioqtype {
8811507SVallish.Vaidyeshwara@Sun.COM 	NFS_ASYNC_QUEUE,
8911507SVallish.Vaidyeshwara@Sun.COM 	NFS_ASYNC_PGOPS_QUEUE,
9011507SVallish.Vaidyeshwara@Sun.COM 	NFS_MAX_ASYNC_QUEUES
9111507SVallish.Vaidyeshwara@Sun.COM };
9211507SVallish.Vaidyeshwara@Sun.COM 
9311507SVallish.Vaidyeshwara@Sun.COM /*
9411507SVallish.Vaidyeshwara@Sun.COM  * Number of NFS async threads operating exclusively on page op requests.
9511507SVallish.Vaidyeshwara@Sun.COM  */
9611507SVallish.Vaidyeshwara@Sun.COM #define	NUM_ASYNC_PGOPS_THREADS	0x2
970Sstevel@tonic-gate 
980Sstevel@tonic-gate struct nfs_async_read_req {
990Sstevel@tonic-gate 	void (*readahead)();		/* pointer to readahead function */
1000Sstevel@tonic-gate 	u_offset_t blkoff;		/* offset in file */
1010Sstevel@tonic-gate 	struct seg *seg;		/* segment to do i/o to */
1020Sstevel@tonic-gate 	caddr_t addr;			/* address to do i/o to */
1030Sstevel@tonic-gate };
1040Sstevel@tonic-gate 
1050Sstevel@tonic-gate struct nfs_pageio_req {
1060Sstevel@tonic-gate 	int (*pageio)();		/* pointer to pageio function */
1070Sstevel@tonic-gate 	page_t *pp;			/* page list */
1080Sstevel@tonic-gate 	u_offset_t io_off;		/* offset in file */
1090Sstevel@tonic-gate 	uint_t io_len;			/* size of request */
1100Sstevel@tonic-gate 	int flags;
1110Sstevel@tonic-gate };
1120Sstevel@tonic-gate 
1130Sstevel@tonic-gate struct nfs_readdir_req {
1140Sstevel@tonic-gate 	int (*readdir)();		/* pointer to readdir function */
1150Sstevel@tonic-gate 	struct rddir_cache *rdc;	/* pointer to cache entry to fill */
1160Sstevel@tonic-gate };
1170Sstevel@tonic-gate 
1180Sstevel@tonic-gate struct nfs_commit_req {
1190Sstevel@tonic-gate 	void (*commit)();		/* pointer to commit function */
1200Sstevel@tonic-gate 	page_t *plist;			/* page list */
1210Sstevel@tonic-gate 	offset3 offset;			/* starting offset */
1220Sstevel@tonic-gate 	count3 count;			/* size of range to be commited */
1230Sstevel@tonic-gate };
1240Sstevel@tonic-gate 
1250Sstevel@tonic-gate struct nfs_inactive_req {
1260Sstevel@tonic-gate 	void (*inactive)();		/* pointer to inactive function */
1270Sstevel@tonic-gate };
1280Sstevel@tonic-gate 
1290Sstevel@tonic-gate struct nfs_async_reqs {
1300Sstevel@tonic-gate 	struct nfs_async_reqs *a_next;	/* pointer to next arg struct */
1310Sstevel@tonic-gate #ifdef DEBUG
1320Sstevel@tonic-gate 	kthread_t *a_queuer;		/* thread id of queueing thread */
1330Sstevel@tonic-gate #endif
1340Sstevel@tonic-gate 	struct vnode *a_vp;		/* vnode pointer */
1350Sstevel@tonic-gate 	struct cred *a_cred;		/* cred pointer */
1360Sstevel@tonic-gate 	enum iotype a_io;		/* i/o type */
1370Sstevel@tonic-gate 	union {
1380Sstevel@tonic-gate 		struct nfs_async_read_req a_read_args;
1390Sstevel@tonic-gate 		struct nfs_pageio_req a_pageio_args;
1400Sstevel@tonic-gate 		struct nfs_readdir_req a_readdir_args;
1410Sstevel@tonic-gate 		struct nfs_commit_req a_commit_args;
1420Sstevel@tonic-gate 		struct nfs_inactive_req a_inactive_args;
1430Sstevel@tonic-gate 	} a_args;
1440Sstevel@tonic-gate };
1450Sstevel@tonic-gate 
1460Sstevel@tonic-gate #define	a_nfs_readahead a_args.a_read_args.readahead
1470Sstevel@tonic-gate #define	a_nfs_blkoff a_args.a_read_args.blkoff
1480Sstevel@tonic-gate #define	a_nfs_seg a_args.a_read_args.seg
1490Sstevel@tonic-gate #define	a_nfs_addr a_args.a_read_args.addr
1500Sstevel@tonic-gate 
1510Sstevel@tonic-gate #define	a_nfs_putapage a_args.a_pageio_args.pageio
1520Sstevel@tonic-gate #define	a_nfs_pageio a_args.a_pageio_args.pageio
1530Sstevel@tonic-gate #define	a_nfs_pp a_args.a_pageio_args.pp
1540Sstevel@tonic-gate #define	a_nfs_off a_args.a_pageio_args.io_off
1550Sstevel@tonic-gate #define	a_nfs_len a_args.a_pageio_args.io_len
1560Sstevel@tonic-gate #define	a_nfs_flags a_args.a_pageio_args.flags
1570Sstevel@tonic-gate 
1580Sstevel@tonic-gate #define	a_nfs_readdir a_args.a_readdir_args.readdir
1590Sstevel@tonic-gate #define	a_nfs_rdc a_args.a_readdir_args.rdc
1600Sstevel@tonic-gate 
1610Sstevel@tonic-gate #define	a_nfs_commit a_args.a_commit_args.commit
1620Sstevel@tonic-gate #define	a_nfs_plist a_args.a_commit_args.plist
1630Sstevel@tonic-gate #define	a_nfs_offset a_args.a_commit_args.offset
1640Sstevel@tonic-gate #define	a_nfs_count a_args.a_commit_args.count
1650Sstevel@tonic-gate 
1660Sstevel@tonic-gate #define	a_nfs_inactive a_args.a_inactive_args.inactive
1670Sstevel@tonic-gate 
1680Sstevel@tonic-gate /*
1690Sstevel@tonic-gate  * Due to the way the address space callbacks are used to execute a delmap,
1700Sstevel@tonic-gate  * we must keep track of how many times the same thread has called
1710Sstevel@tonic-gate  * VOP_DELMAP()->nfs_delmap()/nfs3_delmap().  This is done by having a list of
1720Sstevel@tonic-gate  * nfs_delmapcall_t's associated with each rnode_t.  This list is protected
1730Sstevel@tonic-gate  * by the rnode_t's r_statelock.  The individual elements do not need to be
1740Sstevel@tonic-gate  * protected as they will only ever be created, modified and destroyed by
1750Sstevel@tonic-gate  * one thread (the call_id).
1760Sstevel@tonic-gate  * See nfs_delmap()/nfs3_delmap() for further explanation.
1770Sstevel@tonic-gate  */
1780Sstevel@tonic-gate typedef struct nfs_delmapcall {
1790Sstevel@tonic-gate 	kthread_t	*call_id;
1800Sstevel@tonic-gate 	int		error;	/* error from delmap */
1810Sstevel@tonic-gate 	list_node_t	call_node;
1820Sstevel@tonic-gate } nfs_delmapcall_t;
1830Sstevel@tonic-gate 
1840Sstevel@tonic-gate /*
1850Sstevel@tonic-gate  * delmap address space callback args
1860Sstevel@tonic-gate  */
1870Sstevel@tonic-gate typedef struct nfs_delmap_args {
1880Sstevel@tonic-gate 	vnode_t			*vp;
1890Sstevel@tonic-gate 	offset_t		off;
1900Sstevel@tonic-gate 	caddr_t			addr;
1910Sstevel@tonic-gate 	size_t			len;
1920Sstevel@tonic-gate 	uint_t			prot;
1930Sstevel@tonic-gate 	uint_t			maxprot;
1940Sstevel@tonic-gate 	uint_t			flags;
1950Sstevel@tonic-gate 	cred_t			*cr;
1960Sstevel@tonic-gate 	nfs_delmapcall_t	*caller; /* to retrieve errors from the cb */
1970Sstevel@tonic-gate } nfs_delmap_args_t;
1980Sstevel@tonic-gate 
1990Sstevel@tonic-gate #ifdef _KERNEL
2000Sstevel@tonic-gate extern nfs_delmapcall_t	*nfs_init_delmapcall(void);
2010Sstevel@tonic-gate extern void	nfs_free_delmapcall(nfs_delmapcall_t *);
2020Sstevel@tonic-gate extern int	nfs_find_and_delete_delmapcall(rnode_t *, int *errp);
2030Sstevel@tonic-gate #endif /* _KERNEL */
2040Sstevel@tonic-gate 
2050Sstevel@tonic-gate /*
2060Sstevel@tonic-gate  * The following structures, chhead and chtab,  make up the client handle
2070Sstevel@tonic-gate  * cache.  chhead represents a quadruple(RPC program, RPC version, Protocol
2080Sstevel@tonic-gate  * Family, and Transport).  For example, a chhead entry could represent
2090Sstevel@tonic-gate  * NFS/V3/IPv4/TCP requests.  chhead nodes are linked together as a singly
2100Sstevel@tonic-gate  * linked list and is referenced from chtable.
2110Sstevel@tonic-gate  *
2120Sstevel@tonic-gate  * chtab represents an allocated client handle bound to a particular
2130Sstevel@tonic-gate  * quadruple. These nodes chain down from a chhead node.  chtab
2140Sstevel@tonic-gate  * entries which are on the chain are considered free, so a thread may simply
2150Sstevel@tonic-gate  * unlink the first node without traversing the chain.  When the thread is
2160Sstevel@tonic-gate  * completed with its request, it puts the chtab node back on the chain.
2170Sstevel@tonic-gate  */
2180Sstevel@tonic-gate typedef struct chhead {
2190Sstevel@tonic-gate 	struct chhead *ch_next;	/* next quadruple */
2200Sstevel@tonic-gate 	struct chtab *ch_list;	/* pointer to free client handle(s) */
2210Sstevel@tonic-gate 	uint64_t ch_timesused;	/* times this quadruple was requested */
2220Sstevel@tonic-gate 	rpcprog_t ch_prog;	/* RPC program number */
2230Sstevel@tonic-gate 	rpcvers_t ch_vers;	/* RPC version number */
2240Sstevel@tonic-gate 	dev_t ch_dev;		/* pseudo device number (i.e. /dev/udp) */
2250Sstevel@tonic-gate 	char *ch_protofmly;	/* protocol (i.e. NC_INET, NC_LOOPBACK) */
2260Sstevel@tonic-gate } chhead_t;
2270Sstevel@tonic-gate 
2280Sstevel@tonic-gate typedef struct chtab {
2290Sstevel@tonic-gate 	struct chtab *ch_list;	/* next free client handle */
2300Sstevel@tonic-gate 	struct chhead *ch_head;	/* associated quadruple */
2310Sstevel@tonic-gate 	time_t ch_freed;	/* timestamp when freed */
2320Sstevel@tonic-gate 	CLIENT *ch_client;	/* pointer to client handle */
2330Sstevel@tonic-gate } chtab_t;
2340Sstevel@tonic-gate 
2350Sstevel@tonic-gate /*
2360Sstevel@tonic-gate  * clinfo is a structure which encapsulates data that is needed to
2370Sstevel@tonic-gate  * obtain a client handle from the cache
2380Sstevel@tonic-gate  */
2390Sstevel@tonic-gate typedef struct clinfo {
2400Sstevel@tonic-gate 	rpcprog_t cl_prog;	/* RPC program number */
2410Sstevel@tonic-gate 	rpcvers_t cl_vers;	/* RPC version number */
2420Sstevel@tonic-gate 	uint_t cl_readsize;	/* transfer size */
2430Sstevel@tonic-gate 	int cl_retrans;		/* times to retry request */
2440Sstevel@tonic-gate 	uint_t cl_flags;	/* info flags */
2450Sstevel@tonic-gate } clinfo_t;
2460Sstevel@tonic-gate 
2470Sstevel@tonic-gate /*
2480Sstevel@tonic-gate  * Failover information, passed opaquely through rfscall()
2490Sstevel@tonic-gate  */
2500Sstevel@tonic-gate typedef struct failinfo {
2510Sstevel@tonic-gate 	struct vnode	*vp;
2520Sstevel@tonic-gate 	caddr_t		fhp;
2530Sstevel@tonic-gate 	void (*copyproc)(caddr_t, vnode_t *);
2540Sstevel@tonic-gate 	int (*lookupproc)(vnode_t *, char *, vnode_t **, struct pathname *,
2550Sstevel@tonic-gate 			int, vnode_t *, struct cred *, int);
2560Sstevel@tonic-gate 	int (*xattrdirproc)(vnode_t *, vnode_t **, bool_t, cred_t *, int);
2570Sstevel@tonic-gate } failinfo_t;
2580Sstevel@tonic-gate 
2590Sstevel@tonic-gate /*
2600Sstevel@tonic-gate  * Static server information
2610Sstevel@tonic-gate  *
2620Sstevel@tonic-gate  * These fields are protected by sv_lock:
2630Sstevel@tonic-gate  *	sv_flags
2640Sstevel@tonic-gate  */
2650Sstevel@tonic-gate typedef struct servinfo {
2660Sstevel@tonic-gate 	struct knetconfig *sv_knconf;   /* bound TLI fd */
2670Sstevel@tonic-gate 	struct knetconfig *sv_origknconf;	/* For RDMA save orig knconf */
2680Sstevel@tonic-gate 	struct netbuf	sv_addr;	/* server's address */
2690Sstevel@tonic-gate 	nfs_fhandle	sv_fhandle;	/* this server's filehandle */
2700Sstevel@tonic-gate 	struct sec_data *sv_secdata;	/* security data for rpcsec module */
2710Sstevel@tonic-gate 	char	*sv_hostname;		/* server's hostname */
2720Sstevel@tonic-gate 	int	sv_hostnamelen;		/* server's hostname length */
2730Sstevel@tonic-gate 	uint_t	sv_flags;		/* see below */
2740Sstevel@tonic-gate 	struct servinfo	*sv_next;	/* next in list */
2750Sstevel@tonic-gate 	kmutex_t sv_lock;
2760Sstevel@tonic-gate } servinfo_t;
2770Sstevel@tonic-gate 
2780Sstevel@tonic-gate /*
2790Sstevel@tonic-gate  * The values for sv_flags.
2800Sstevel@tonic-gate  */
2810Sstevel@tonic-gate #define	SV_ROOT_STALE	0x1		/* root vnode got ESTALE */
2820Sstevel@tonic-gate 
2830Sstevel@tonic-gate /*
2840Sstevel@tonic-gate  * Switch from RDMA knconf to original mount knconf
2850Sstevel@tonic-gate  */
2860Sstevel@tonic-gate 
2870Sstevel@tonic-gate #define	ORIG_KNCONF(mi) (mi->mi_curr_serv->sv_origknconf ? \
2880Sstevel@tonic-gate 	mi->mi_curr_serv->sv_origknconf : mi->mi_curr_serv->sv_knconf)
2890Sstevel@tonic-gate 
290*13096SJordan.Vaughan@Sun.com #if	defined(_KERNEL)
2910Sstevel@tonic-gate /*
2920Sstevel@tonic-gate  * NFS private data per mounted file system
2930Sstevel@tonic-gate  *	The mi_lock mutex protects the following fields:
2940Sstevel@tonic-gate  *		mi_flags
2950Sstevel@tonic-gate  *		mi_printed
2960Sstevel@tonic-gate  *		mi_down
2970Sstevel@tonic-gate  *		mi_tsize
2980Sstevel@tonic-gate  *		mi_stsize
2990Sstevel@tonic-gate  *		mi_curread
3000Sstevel@tonic-gate  *		mi_curwrite
3010Sstevel@tonic-gate  *		mi_timers
3020Sstevel@tonic-gate  *		mi_curr_serv
3030Sstevel@tonic-gate  *		mi_readers
3040Sstevel@tonic-gate  *		mi_klmconfig
3050Sstevel@tonic-gate  *
3060Sstevel@tonic-gate  *	The mi_async_lock mutex protects the following fields:
3070Sstevel@tonic-gate  *		mi_async_reqs
3080Sstevel@tonic-gate  *		mi_async_req_count
3090Sstevel@tonic-gate  *		mi_async_tail
31011507SVallish.Vaidyeshwara@Sun.COM  *		mi_async_curr[NFS_MAX_ASYNC_QUEUES]
3110Sstevel@tonic-gate  *		mi_async_clusters
3120Sstevel@tonic-gate  *		mi_async_init_clusters
31311507SVallish.Vaidyeshwara@Sun.COM  *		mi_threads[NFS_MAX_ASYNC_QUEUES]
3140Sstevel@tonic-gate  *		mi_manager_thread
3150Sstevel@tonic-gate  *
3160Sstevel@tonic-gate  *	Normally the netconfig information for the mount comes from
3170Sstevel@tonic-gate  *	mi_curr_serv and mi_klmconfig is NULL.  If NLM calls need to use a
3180Sstevel@tonic-gate  *	different transport, mi_klmconfig contains the necessary netconfig
3190Sstevel@tonic-gate  *	information.
3200Sstevel@tonic-gate  *
3210Sstevel@tonic-gate  *	'mi_zone' is initialized at structure creation time, and never
3220Sstevel@tonic-gate  *	changes; it may be read without a lock.
3230Sstevel@tonic-gate  *
3240Sstevel@tonic-gate  *	mi_zone_node is linkage into the mi4_globals.mig_list, and is
3250Sstevel@tonic-gate  *	protected by mi4_globals.mig_list_lock.
3260Sstevel@tonic-gate  *
3270Sstevel@tonic-gate  *	Locking order:
3280Sstevel@tonic-gate  *	  mi_globals::mig_lock > mi_async_lock > mi_lock
3290Sstevel@tonic-gate  */
3300Sstevel@tonic-gate typedef struct mntinfo {
3310Sstevel@tonic-gate 	kmutex_t	mi_lock;	/* protects mntinfo fields */
3320Sstevel@tonic-gate 	struct servinfo *mi_servers;    /* server list */
3330Sstevel@tonic-gate 	struct servinfo *mi_curr_serv;  /* current server */
3340Sstevel@tonic-gate 	kcondvar_t	mi_failover_cv;	/* failover synchronization */
3350Sstevel@tonic-gate 	int		mi_readers;	/* failover - users of mi_curr_serv */
3360Sstevel@tonic-gate 	struct vfs	*mi_vfsp;	/* back pointer to vfs */
3370Sstevel@tonic-gate 	enum vtype	mi_type;	/* file type of the root vnode */
3380Sstevel@tonic-gate 	uint_t		mi_flags;	/* see below */
3390Sstevel@tonic-gate 	uint_t		mi_tsize;	/* max read transfer size (bytes) */
3400Sstevel@tonic-gate 	uint_t		mi_stsize;	/* max write transfer size (bytes) */
3410Sstevel@tonic-gate 	int		mi_timeo;	/* inital timeout in 10th sec */
3420Sstevel@tonic-gate 	int		mi_retrans;	/* times to retry request */
3430Sstevel@tonic-gate 	hrtime_t	mi_acregmin;	/* min time to hold cached file attr */
3440Sstevel@tonic-gate 	hrtime_t	mi_acregmax;	/* max time to hold cached file attr */
3450Sstevel@tonic-gate 	hrtime_t	mi_acdirmin;	/* min time to hold cached dir attr */
3460Sstevel@tonic-gate 	hrtime_t	mi_acdirmax;	/* max time to hold cached dir attr */
3470Sstevel@tonic-gate 	len_t		mi_maxfilesize; /* for pathconf _PC_FILESIZEBITS */
3480Sstevel@tonic-gate 	/*
3490Sstevel@tonic-gate 	 * Extra fields for congestion control, one per NFS call type,
3500Sstevel@tonic-gate 	 * plus one global one.
3510Sstevel@tonic-gate 	 */
3520Sstevel@tonic-gate 	struct rpc_timers mi_timers[NFS_CALLTYPES+1];
3530Sstevel@tonic-gate 	int		mi_curread;	/* current read size */
3540Sstevel@tonic-gate 	int		mi_curwrite;	/* current write size */
3550Sstevel@tonic-gate 	/*
35611507SVallish.Vaidyeshwara@Sun.COM 	 * Async I/O management
35711507SVallish.Vaidyeshwara@Sun.COM 	 * We have 2 pools of threads working on async I/O:
35811507SVallish.Vaidyeshwara@Sun.COM 	 *	(i) Threads which work on all async queues. Default number of
35911507SVallish.Vaidyeshwara@Sun.COM 	 *	threads in this queue is 8. Threads in this pool work on async
36011507SVallish.Vaidyeshwara@Sun.COM 	 *	queue pointed by mi_async_curr[NFS_ASYNC_QUEUE]. Number of
36111507SVallish.Vaidyeshwara@Sun.COM 	 *	active threads in this pool is tracked by
36211507SVallish.Vaidyeshwara@Sun.COM 	 *	mi_threads[NFS_ASYNC_QUEUE].
36311507SVallish.Vaidyeshwara@Sun.COM 	 * 	(ii)Threads which work only on page op async queues.
36411507SVallish.Vaidyeshwara@Sun.COM 	 *	Page ops queue comprises of NFS_PUTAPAGE, NFS_PAGEIO &
36511507SVallish.Vaidyeshwara@Sun.COM 	 *	NFS_COMMIT. Default number of threads in this queue is 2
36611507SVallish.Vaidyeshwara@Sun.COM 	 *	(NUM_ASYNC_PGOPS_THREADS). Threads in this pool work on async
36711507SVallish.Vaidyeshwara@Sun.COM 	 *	queue pointed by mi_async_curr[NFS_ASYNC_PGOPS_QUEUE]. Number
36811507SVallish.Vaidyeshwara@Sun.COM 	 *	of active threads in this pool is tracked by
36911507SVallish.Vaidyeshwara@Sun.COM 	 *	mi_threads[NFS_ASYNC_PGOPS_QUEUE].
3700Sstevel@tonic-gate 	 */
3710Sstevel@tonic-gate 	struct nfs_async_reqs *mi_async_reqs[NFS_ASYNC_TYPES];
3720Sstevel@tonic-gate 	struct nfs_async_reqs *mi_async_tail[NFS_ASYNC_TYPES];
37311507SVallish.Vaidyeshwara@Sun.COM 	struct nfs_async_reqs **mi_async_curr[NFS_MAX_ASYNC_QUEUES];
37411507SVallish.Vaidyeshwara@Sun.COM 						/* current async queue */
3750Sstevel@tonic-gate 	uint_t		mi_async_clusters[NFS_ASYNC_TYPES];
3760Sstevel@tonic-gate 	uint_t		mi_async_init_clusters;
3770Sstevel@tonic-gate 	uint_t		mi_async_req_count; /* # outstanding work requests */
3780Sstevel@tonic-gate 	kcondvar_t	mi_async_reqs_cv; /* signaled when there's work */
37911507SVallish.Vaidyeshwara@Sun.COM 	ushort_t	mi_threads[NFS_MAX_ASYNC_QUEUES];
38011507SVallish.Vaidyeshwara@Sun.COM 					/* number of active async threads */
3810Sstevel@tonic-gate 	ushort_t	mi_max_threads;	/* max number of async worker threads */
3820Sstevel@tonic-gate 	kthread_t	*mi_manager_thread;  /* async manager thread */
3830Sstevel@tonic-gate 	kcondvar_t	mi_async_cv; /* signaled when the last worker dies */
38411507SVallish.Vaidyeshwara@Sun.COM 	kcondvar_t	mi_async_work_cv[NFS_MAX_ASYNC_QUEUES];
38511507SVallish.Vaidyeshwara@Sun.COM 					/* tell workers to work */
3860Sstevel@tonic-gate 	kmutex_t	mi_async_lock;	/* lock to protect async list */
3870Sstevel@tonic-gate 	/*
3880Sstevel@tonic-gate 	 * Other stuff
3890Sstevel@tonic-gate 	 */
3900Sstevel@tonic-gate 	struct pathcnf *mi_pathconf;	/* static pathconf kludge */
3910Sstevel@tonic-gate 	rpcprog_t	mi_prog;	/* RPC program number */
3920Sstevel@tonic-gate 	rpcvers_t	mi_vers;	/* RPC program version number */
3930Sstevel@tonic-gate 	char		**mi_rfsnames;	/* mapping to proc names */
3940Sstevel@tonic-gate 	kstat_named_t	*mi_reqs;	/* count of requests */
3950Sstevel@tonic-gate 	uchar_t		*mi_call_type;	/* dynamic retrans call types */
3960Sstevel@tonic-gate 	uchar_t		*mi_ss_call_type;	/* semisoft call type */
3970Sstevel@tonic-gate 	uchar_t		*mi_timer_type;	/* dynamic retrans timer types */
3980Sstevel@tonic-gate 	clock_t		mi_printftime;	/* last error printf time */
3990Sstevel@tonic-gate 	/*
4000Sstevel@tonic-gate 	 * ACL entries
4010Sstevel@tonic-gate 	 */
4020Sstevel@tonic-gate 	char		**mi_aclnames;	/* mapping to proc names */
4030Sstevel@tonic-gate 	kstat_named_t	*mi_aclreqs;	/* count of acl requests */
4040Sstevel@tonic-gate 	uchar_t		*mi_acl_call_type; /* dynamic retrans call types */
4050Sstevel@tonic-gate 	uchar_t		*mi_acl_ss_call_type; /* semisoft call types */
4060Sstevel@tonic-gate 	uchar_t		*mi_acl_timer_type; /* dynamic retrans timer types */
4070Sstevel@tonic-gate 	/*
4080Sstevel@tonic-gate 	 * Client Side Failover stats
4090Sstevel@tonic-gate 	 */
4100Sstevel@tonic-gate 	uint_t		mi_noresponse;	/* server not responding count */
4110Sstevel@tonic-gate 	uint_t		mi_failover; 	/* failover to new server count */
4120Sstevel@tonic-gate 	uint_t		mi_remap;	/* remap to new server count */
4130Sstevel@tonic-gate 	/*
4140Sstevel@tonic-gate 	 * Kstat statistics
4150Sstevel@tonic-gate 	 */
4160Sstevel@tonic-gate 	struct kstat	*mi_io_kstats;
4170Sstevel@tonic-gate 	struct kstat	*mi_ro_kstats;
4180Sstevel@tonic-gate 	struct knetconfig *mi_klmconfig;
4190Sstevel@tonic-gate 	/*
4200Sstevel@tonic-gate 	 * Zones support.
4210Sstevel@tonic-gate 	 */
422*13096SJordan.Vaughan@Sun.com 	struct zone	*mi_zone;	/* Zone in which FS is mounted */
423*13096SJordan.Vaughan@Sun.com 	zone_ref_t	mi_zone_ref;	/* Reference to aforementioned zone */
4240Sstevel@tonic-gate 	list_node_t	mi_zone_node;	/* Linkage into per-zone mi list */
4251068Svv149972 	/*
4261068Svv149972 	 * Serializes threads in failover_remap.
4271068Svv149972 	 * Need to acquire this lock first in failover_remap() function
4281068Svv149972 	 * before acquiring any other rnode lock.
4291068Svv149972 	 */
4301068Svv149972 	kmutex_t	mi_remap_lock;
4310Sstevel@tonic-gate } mntinfo_t;
432*13096SJordan.Vaughan@Sun.com #endif	/* _KERNEL */
4330Sstevel@tonic-gate 
4340Sstevel@tonic-gate /*
4350Sstevel@tonic-gate  * vfs pointer to mount info
4360Sstevel@tonic-gate  */
4370Sstevel@tonic-gate #define	VFTOMI(vfsp)	((mntinfo_t *)((vfsp)->vfs_data))
4380Sstevel@tonic-gate 
4390Sstevel@tonic-gate /*
4400Sstevel@tonic-gate  * vnode pointer to mount info
4410Sstevel@tonic-gate  */
4420Sstevel@tonic-gate #define	VTOMI(vp)	((mntinfo_t *)(((vp)->v_vfsp)->vfs_data))
4430Sstevel@tonic-gate 
4440Sstevel@tonic-gate /*
4450Sstevel@tonic-gate  * The values for mi_flags.
4460Sstevel@tonic-gate  */
4470Sstevel@tonic-gate #define	MI_HARD		0x1		/* hard or soft mount */
4480Sstevel@tonic-gate #define	MI_PRINTED	0x2		/* not responding message printed */
4490Sstevel@tonic-gate #define	MI_INT		0x4		/* interrupts allowed on hard mount */
4500Sstevel@tonic-gate #define	MI_DOWN		0x8		/* server is down */
4510Sstevel@tonic-gate #define	MI_NOAC		0x10		/* don't cache attributes */
4520Sstevel@tonic-gate #define	MI_NOCTO	0x20		/* no close-to-open consistency */
4530Sstevel@tonic-gate #define	MI_DYNAMIC	0x40		/* dynamic transfer size adjustment */
4540Sstevel@tonic-gate #define	MI_LLOCK	0x80		/* local locking only (no lockmgr) */
4550Sstevel@tonic-gate #define	MI_GRPID	0x100		/* System V group id inheritance */
4560Sstevel@tonic-gate #define	MI_RPCTIMESYNC	0x200		/* RPC time sync */
4570Sstevel@tonic-gate #define	MI_LINK		0x400		/* server supports link */
4580Sstevel@tonic-gate #define	MI_SYMLINK	0x800		/* server supports symlink */
4590Sstevel@tonic-gate #define	MI_READDIRONLY	0x1000		/* use readdir instead of readdirplus */
4600Sstevel@tonic-gate #define	MI_ACL		0x2000		/* server supports NFS_ACL */
4610Sstevel@tonic-gate #define	MI_BINDINPROG	0x4000		/* binding to server is changing */
4620Sstevel@tonic-gate #define	MI_LOOPBACK	0x8000		/* Set if this is a loopback mount */
4630Sstevel@tonic-gate #define	MI_SEMISOFT	0x10000		/* soft reads, hard modify */
4640Sstevel@tonic-gate #define	MI_NOPRINT	0x20000		/* don't print messages */
4650Sstevel@tonic-gate #define	MI_DIRECTIO	0x40000		/* do direct I/O */
4660Sstevel@tonic-gate #define	MI_EXTATTR	0x80000		/* server supports extended attrs */
4670Sstevel@tonic-gate #define	MI_ASYNC_MGR_STOP	0x100000	/* tell async mgr to die */
468264Sthurlow #define	MI_DEAD		0x200000	/* mount has been terminated */
4690Sstevel@tonic-gate 
4700Sstevel@tonic-gate /*
4710Sstevel@tonic-gate  * Read-only mntinfo statistics
4720Sstevel@tonic-gate  */
4730Sstevel@tonic-gate struct mntinfo_kstat {
4740Sstevel@tonic-gate 	char		mik_proto[KNC_STRSIZE];
4750Sstevel@tonic-gate 	uint32_t	mik_vers;
4760Sstevel@tonic-gate 	uint_t		mik_flags;
4770Sstevel@tonic-gate 	uint_t		mik_secmod;
4780Sstevel@tonic-gate 	uint32_t	mik_curread;
4790Sstevel@tonic-gate 	uint32_t	mik_curwrite;
4800Sstevel@tonic-gate 	int		mik_timeo;
4810Sstevel@tonic-gate 	int		mik_retrans;
4820Sstevel@tonic-gate 	uint_t		mik_acregmin;
4830Sstevel@tonic-gate 	uint_t		mik_acregmax;
4840Sstevel@tonic-gate 	uint_t		mik_acdirmin;
4850Sstevel@tonic-gate 	uint_t		mik_acdirmax;
4860Sstevel@tonic-gate 	struct {
4870Sstevel@tonic-gate 		uint32_t srtt;
4880Sstevel@tonic-gate 		uint32_t deviate;
4890Sstevel@tonic-gate 		uint32_t rtxcur;
4900Sstevel@tonic-gate 	} mik_timers[NFS_CALLTYPES+1];
4910Sstevel@tonic-gate 	uint32_t	mik_noresponse;
4920Sstevel@tonic-gate 	uint32_t	mik_failover;
4930Sstevel@tonic-gate 	uint32_t	mik_remap;
4940Sstevel@tonic-gate 	char		mik_curserver[SYS_NMLN];
4950Sstevel@tonic-gate };
4960Sstevel@tonic-gate 
4970Sstevel@tonic-gate /*
49811507SVallish.Vaidyeshwara@Sun.COM  * Macro to wakeup sleeping async worker threads.
49911507SVallish.Vaidyeshwara@Sun.COM  */
50011507SVallish.Vaidyeshwara@Sun.COM #define	NFS_WAKE_ASYNC_WORKER(work_cv)	{				\
50111507SVallish.Vaidyeshwara@Sun.COM 	if (CV_HAS_WAITERS(&work_cv[NFS_ASYNC_QUEUE]))			\
50211507SVallish.Vaidyeshwara@Sun.COM 		cv_signal(&work_cv[NFS_ASYNC_QUEUE]);			\
50311507SVallish.Vaidyeshwara@Sun.COM 	else if (CV_HAS_WAITERS(&work_cv[NFS_ASYNC_PGOPS_QUEUE]))	\
50411507SVallish.Vaidyeshwara@Sun.COM 		cv_signal(&work_cv[NFS_ASYNC_PGOPS_QUEUE]);		\
50511507SVallish.Vaidyeshwara@Sun.COM }
50611507SVallish.Vaidyeshwara@Sun.COM 
50711507SVallish.Vaidyeshwara@Sun.COM #define	NFS_WAKEALL_ASYNC_WORKERS(work_cv) {				\
50811507SVallish.Vaidyeshwara@Sun.COM 	cv_broadcast(&work_cv[NFS_ASYNC_QUEUE]);			\
50911507SVallish.Vaidyeshwara@Sun.COM 	cv_broadcast(&work_cv[NFS_ASYNC_PGOPS_QUEUE]);			\
51011507SVallish.Vaidyeshwara@Sun.COM }
51111507SVallish.Vaidyeshwara@Sun.COM 
51211507SVallish.Vaidyeshwara@Sun.COM /*
5130Sstevel@tonic-gate  * Mark cached attributes as timed out
5140Sstevel@tonic-gate  *
5150Sstevel@tonic-gate  * The caller must not be holding the rnode r_statelock mutex.
5160Sstevel@tonic-gate  */
5170Sstevel@tonic-gate #define	PURGE_ATTRCACHE(vp)	{				\
5180Sstevel@tonic-gate 	rnode_t *rp = VTOR(vp);					\
5190Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);				\
5200Sstevel@tonic-gate 	PURGE_ATTRCACHE_LOCKED(rp);				\
5210Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);				\
5220Sstevel@tonic-gate }
5230Sstevel@tonic-gate 
5240Sstevel@tonic-gate #define	PURGE_ATTRCACHE_LOCKED(rp)	{			\
5250Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&rp->r_statelock));			\
5260Sstevel@tonic-gate 	rp->r_attrtime = gethrtime();				\
5270Sstevel@tonic-gate 	rp->r_mtime = rp->r_attrtime;				\
5280Sstevel@tonic-gate }
5290Sstevel@tonic-gate 
5300Sstevel@tonic-gate /*
5310Sstevel@tonic-gate  * Is the attribute cache valid?
5320Sstevel@tonic-gate  */
5330Sstevel@tonic-gate #define	ATTRCACHE_VALID(vp)	(gethrtime() < VTOR(vp)->r_attrtime)
5340Sstevel@tonic-gate 
5350Sstevel@tonic-gate /*
5360Sstevel@tonic-gate  * Flags to indicate whether to purge the DNLC for non-directory vnodes
5370Sstevel@tonic-gate  * in a call to nfs_purge_caches.
5380Sstevel@tonic-gate  */
5390Sstevel@tonic-gate #define	NFS_NOPURGE_DNLC	0
5400Sstevel@tonic-gate #define	NFS_PURGE_DNLC		1
5410Sstevel@tonic-gate 
5420Sstevel@tonic-gate /*
5430Sstevel@tonic-gate  * If returned error is ESTALE flush all caches.
5440Sstevel@tonic-gate  */
5450Sstevel@tonic-gate #define	PURGE_STALE_FH(error, vp, cr)				\
5460Sstevel@tonic-gate 	if ((error) == ESTALE) {				\
5470Sstevel@tonic-gate 		struct rnode *rp = VTOR(vp);			\
5480Sstevel@tonic-gate 		if (vp->v_flag & VROOT) {			\
5490Sstevel@tonic-gate 			servinfo_t *svp = rp->r_server;		\
5500Sstevel@tonic-gate 			mutex_enter(&svp->sv_lock);		\
5510Sstevel@tonic-gate 			svp->sv_flags |= SV_ROOT_STALE;		\
5520Sstevel@tonic-gate 			mutex_exit(&svp->sv_lock);		\
5530Sstevel@tonic-gate 		}						\
5540Sstevel@tonic-gate 		mutex_enter(&rp->r_statelock);			\
5550Sstevel@tonic-gate 		rp->r_flags |= RSTALE;				\
5560Sstevel@tonic-gate 		if (!rp->r_error)				\
5570Sstevel@tonic-gate 			rp->r_error = (error);			\
5580Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);			\
5590Sstevel@tonic-gate 		if (vn_has_cached_data(vp))			\
5600Sstevel@tonic-gate 			nfs_invalidate_pages((vp), (u_offset_t)0, (cr)); \
5610Sstevel@tonic-gate 		nfs_purge_caches((vp), NFS_PURGE_DNLC, (cr));	\
5620Sstevel@tonic-gate 	}
5630Sstevel@tonic-gate 
5640Sstevel@tonic-gate /*
5650Sstevel@tonic-gate  * Is cache valid?
5660Sstevel@tonic-gate  * Swap is always valid, if no attributes (attrtime == 0) or
5670Sstevel@tonic-gate  * if mtime matches cached mtime it is valid
5680Sstevel@tonic-gate  * NOTE: mtime is now a timestruc_t.
5690Sstevel@tonic-gate  * Caller should be holding the rnode r_statelock mutex.
5700Sstevel@tonic-gate  */
5710Sstevel@tonic-gate #define	CACHE_VALID(rp, mtime, fsize)				\
5720Sstevel@tonic-gate 	((RTOV(rp)->v_flag & VISSWAP) == VISSWAP ||		\
5730Sstevel@tonic-gate 	(((mtime).tv_sec == (rp)->r_attr.va_mtime.tv_sec &&	\
5740Sstevel@tonic-gate 	(mtime).tv_nsec == (rp)->r_attr.va_mtime.tv_nsec) &&	\
5750Sstevel@tonic-gate 	((fsize) == (rp)->r_attr.va_size)))
5760Sstevel@tonic-gate 
5770Sstevel@tonic-gate /*
5780Sstevel@tonic-gate  * Macro to detect forced unmount or a zone shutdown.
5790Sstevel@tonic-gate  */
5800Sstevel@tonic-gate #define	FS_OR_ZONE_GONE(vfsp) \
5810Sstevel@tonic-gate 	(((vfsp)->vfs_flag & VFS_UNMOUNTED) || \
5820Sstevel@tonic-gate 	zone_status_get(curproc->p_zone) >= ZONE_IS_SHUTTING_DOWN)
5830Sstevel@tonic-gate 
5840Sstevel@tonic-gate /*
5850Sstevel@tonic-gate  * Convert NFS tunables to hrtime_t units, seconds to nanoseconds.
5860Sstevel@tonic-gate  */
5870Sstevel@tonic-gate #define	SEC2HR(sec)	((sec) * (long long)NANOSEC)
5880Sstevel@tonic-gate #define	HR2SEC(hr)	((hr) / (long long)NANOSEC)
5890Sstevel@tonic-gate 
5900Sstevel@tonic-gate /*
5910Sstevel@tonic-gate  * Structure to identify owner of a PC file share reservation.
5920Sstevel@tonic-gate  */
5930Sstevel@tonic-gate struct nfs_owner {
5940Sstevel@tonic-gate 	int	magic;		/* magic uniquifying number */
5950Sstevel@tonic-gate 	char	hname[16];	/* first 16 bytes of hostname */
5960Sstevel@tonic-gate 	char	lowner[8];	/* local owner from fcntl */
5970Sstevel@tonic-gate };
5980Sstevel@tonic-gate 
5990Sstevel@tonic-gate /*
6000Sstevel@tonic-gate  * Values for magic.
6010Sstevel@tonic-gate  */
6020Sstevel@tonic-gate #define	NFS_OWNER_MAGIC	0x1D81E
6030Sstevel@tonic-gate 
6040Sstevel@tonic-gate /*
6050Sstevel@tonic-gate  * Support for extended attributes
6060Sstevel@tonic-gate  */
6070Sstevel@tonic-gate #define	XATTR_DIR_NAME	"/@/"		/* used for DNLC entries */
6080Sstevel@tonic-gate #define	XATTR_RPATH	"ExTaTtR"	/* used for r_path for failover */
6090Sstevel@tonic-gate 
6100Sstevel@tonic-gate /*
6110Sstevel@tonic-gate  * Short hand for checking to see whether the file system was mounted
6120Sstevel@tonic-gate  * interruptible or not.
6130Sstevel@tonic-gate  */
6140Sstevel@tonic-gate #define	INTR(vp)	(VTOMI(vp)->mi_flags & MI_INT)
6150Sstevel@tonic-gate 
6160Sstevel@tonic-gate /*
6170Sstevel@tonic-gate  * Short hand for checking whether failover is enabled or not
6180Sstevel@tonic-gate  */
6190Sstevel@tonic-gate #define	FAILOVER_MOUNT(mi)	(mi->mi_servers->sv_next)
6200Sstevel@tonic-gate 
6210Sstevel@tonic-gate /*
6220Sstevel@tonic-gate  * How long will async threads wait for additional work.
6230Sstevel@tonic-gate  */
6240Sstevel@tonic-gate #define	NFS_ASYNC_TIMEOUT	(60 * 1 * hz)	/* 1 minute */
6250Sstevel@tonic-gate 
6260Sstevel@tonic-gate #ifdef _KERNEL
6270Sstevel@tonic-gate extern int	clget(clinfo_t *, servinfo_t *, cred_t *, CLIENT **,
6280Sstevel@tonic-gate 		    struct chtab **);
6290Sstevel@tonic-gate extern void	clfree(CLIENT *, struct chtab *);
6300Sstevel@tonic-gate extern void	nfs_mi_zonelist_add(mntinfo_t *);
6310Sstevel@tonic-gate extern void	nfs_free_mi(mntinfo_t *);
6320Sstevel@tonic-gate extern void	nfs_mnt_kstat_init(struct vfs *);
6330Sstevel@tonic-gate #endif
6340Sstevel@tonic-gate 
6350Sstevel@tonic-gate /*
6360Sstevel@tonic-gate  * Per-zone data for managing client handles.  Included here solely for the
6370Sstevel@tonic-gate  * benefit of MDB.
6380Sstevel@tonic-gate  */
6390Sstevel@tonic-gate /*
6400Sstevel@tonic-gate  * client side statistics
6410Sstevel@tonic-gate  */
6420Sstevel@tonic-gate struct clstat {
6430Sstevel@tonic-gate 	kstat_named_t	calls;			/* client requests */
6440Sstevel@tonic-gate 	kstat_named_t	badcalls;		/* rpc failures */
6450Sstevel@tonic-gate 	kstat_named_t	clgets;			/* client handle gets */
6460Sstevel@tonic-gate 	kstat_named_t	cltoomany;		/* client handle cache misses */
6470Sstevel@tonic-gate #ifdef DEBUG
6480Sstevel@tonic-gate 	kstat_named_t	clalloc;		/* number of client handles */
6490Sstevel@tonic-gate 	kstat_named_t	noresponse;		/* server not responding cnt */
6500Sstevel@tonic-gate 	kstat_named_t	failover;		/* server failover count */
6510Sstevel@tonic-gate 	kstat_named_t	remap;			/* server remap count */
6520Sstevel@tonic-gate #endif
6530Sstevel@tonic-gate };
6540Sstevel@tonic-gate 
6550Sstevel@tonic-gate struct nfs_clnt {
6560Sstevel@tonic-gate 	struct chhead	*nfscl_chtable;
6570Sstevel@tonic-gate 	kmutex_t	nfscl_chtable_lock;
6580Sstevel@tonic-gate 	zoneid_t	nfscl_zoneid;
6590Sstevel@tonic-gate 	list_node_t	nfscl_node;
6600Sstevel@tonic-gate 	struct clstat	nfscl_stat;
6610Sstevel@tonic-gate };
6620Sstevel@tonic-gate 
6630Sstevel@tonic-gate #ifdef	__cplusplus
6640Sstevel@tonic-gate }
6650Sstevel@tonic-gate #endif
6660Sstevel@tonic-gate 
6670Sstevel@tonic-gate #endif	/* _NFS_NFS_CLNT_H */
668