xref: /onnv-gate/usr/src/uts/common/rpc/clnt_cots.c (revision 4457:24e5834a136e)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51676Sjpk  * Common Development and Distribution License (the "License").
61676Sjpk  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*4457Svv149972  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate /*
270Sstevel@tonic-gate  * Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T
280Sstevel@tonic-gate  *		All Rights Reserved
290Sstevel@tonic-gate  */
300Sstevel@tonic-gate 
310Sstevel@tonic-gate /*
320Sstevel@tonic-gate  * Portions of this source code were derived from Berkeley 4.3 BSD
330Sstevel@tonic-gate  * under license from the Regents of the University of California.
340Sstevel@tonic-gate  */
350Sstevel@tonic-gate 
360Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
370Sstevel@tonic-gate 
380Sstevel@tonic-gate /*
390Sstevel@tonic-gate  * Implements a kernel based, client side RPC over Connection Oriented
400Sstevel@tonic-gate  * Transports (COTS).
410Sstevel@tonic-gate  */
420Sstevel@tonic-gate 
430Sstevel@tonic-gate /*
440Sstevel@tonic-gate  * Much of this file has been re-written to let NFS work better over slow
450Sstevel@tonic-gate  * transports. A description follows.
460Sstevel@tonic-gate  *
470Sstevel@tonic-gate  * One of the annoying things about kRPC/COTS is that it will temporarily
480Sstevel@tonic-gate  * create more than one connection between a client and server. This
490Sstevel@tonic-gate  * happens because when a connection is made, the end-points entry in the
500Sstevel@tonic-gate  * linked list of connections (headed by cm_hd), is removed so that other
510Sstevel@tonic-gate  * threads don't mess with it. Went ahead and bit the bullet by keeping
520Sstevel@tonic-gate  * the endpoint on the connection list and introducing state bits,
530Sstevel@tonic-gate  * condition variables etc. to the connection entry data structure (struct
540Sstevel@tonic-gate  * cm_xprt).
550Sstevel@tonic-gate  *
560Sstevel@tonic-gate  * Here is a summary of the changes to cm-xprt:
570Sstevel@tonic-gate  *
580Sstevel@tonic-gate  *	x_ctime is the timestamp of when the endpoint was last
590Sstevel@tonic-gate  *	connected or disconnected. If an end-point is ever disconnected
600Sstevel@tonic-gate  *	or re-connected, then any outstanding RPC request is presumed
610Sstevel@tonic-gate  *	lost, telling clnt_cots_kcallit that it needs to re-send the
620Sstevel@tonic-gate  *	request, not just wait for the original request's reply to
630Sstevel@tonic-gate  *	arrive.
640Sstevel@tonic-gate  *
650Sstevel@tonic-gate  *	x_thread flag which tells us if a thread is doing a connection attempt.
660Sstevel@tonic-gate  *
670Sstevel@tonic-gate  *	x_waitdis flag which tells us we are waiting a disconnect ACK.
680Sstevel@tonic-gate  *
690Sstevel@tonic-gate  *	x_needdis flag which tells us we need to send a T_DISCONN_REQ
700Sstevel@tonic-gate  *	to kill the connection.
710Sstevel@tonic-gate  *
720Sstevel@tonic-gate  *	x_needrel flag which tells us we need to send a T_ORDREL_REQ to
730Sstevel@tonic-gate  *	gracefully close the connection.
740Sstevel@tonic-gate  *
750Sstevel@tonic-gate  *	#defined bitmasks for the all the b_* bits so that more
760Sstevel@tonic-gate  *	efficient (and at times less clumsy) masks can be used to
770Sstevel@tonic-gate  *	manipulated state in cases where multiple bits have to
780Sstevel@tonic-gate  *	set/cleared/checked in the same critical section.
790Sstevel@tonic-gate  *
800Sstevel@tonic-gate  *	x_conn_cv and x_dis-_cv are new condition variables to let
810Sstevel@tonic-gate  *	threads knows when the connection attempt is done, and to let
820Sstevel@tonic-gate  *	the connecting thread know when the disconnect handshake is
830Sstevel@tonic-gate  *	done.
840Sstevel@tonic-gate  *
850Sstevel@tonic-gate  * Added the CONN_HOLD() macro so that all reference holds have the same
860Sstevel@tonic-gate  * look and feel.
870Sstevel@tonic-gate  *
880Sstevel@tonic-gate  * In the private (cku_private) portion of the client handle,
890Sstevel@tonic-gate  *
900Sstevel@tonic-gate  *	cku_flags replaces the cku_sent a boolean. cku_flags keeps
910Sstevel@tonic-gate  *	track of whether a request as been sent, and whether the
920Sstevel@tonic-gate  *	client's handles call record is on the dispatch list (so that
930Sstevel@tonic-gate  *	the reply can be matched by XID to the right client handle).
940Sstevel@tonic-gate  *	The idea of CKU_ONQUEUE is that we can exit clnt_cots_kcallit()
950Sstevel@tonic-gate  *	and still have the response find the right client handle so
960Sstevel@tonic-gate  *	that the retry of CLNT_CALL() gets the result. Testing, found
970Sstevel@tonic-gate  *	situations where if the timeout was increased, performance
980Sstevel@tonic-gate  *	degraded. This was due to us hitting a window where the thread
990Sstevel@tonic-gate  *	was back in rfscall() (probably printing server not responding)
1000Sstevel@tonic-gate  *	while the response came back but no place to put it.
1010Sstevel@tonic-gate  *
1020Sstevel@tonic-gate  *	cku_ctime is just a cache of x_ctime. If they match,
1030Sstevel@tonic-gate  *	clnt_cots_kcallit() won't to send a retry (unless the maximum
1040Sstevel@tonic-gate  *	receive count limit as been reached). If the don't match, then
1050Sstevel@tonic-gate  *	we assume the request has been lost, and a retry of the request
1060Sstevel@tonic-gate  *	is needed.
1070Sstevel@tonic-gate  *
1080Sstevel@tonic-gate  *	cku_recv_attempts counts the number of receive count attempts
1090Sstevel@tonic-gate  *	after one try is sent on the wire.
1100Sstevel@tonic-gate  *
1110Sstevel@tonic-gate  * Added the clnt_delay() routine so that interruptible and
1120Sstevel@tonic-gate  * noninterruptible delays are possible.
1130Sstevel@tonic-gate  *
1140Sstevel@tonic-gate  * CLNT_MIN_TIMEOUT has been bumped to 10 seconds from 3. This is used to
1150Sstevel@tonic-gate  * control how long the client delays before returned after getting
1160Sstevel@tonic-gate  * ECONNREFUSED. At 3 seconds, 8 client threads per mount really does bash
1170Sstevel@tonic-gate  * a server that may be booting and not yet started nfsd.
1180Sstevel@tonic-gate  *
1190Sstevel@tonic-gate  * CLNT_MAXRECV_WITHOUT_RETRY is a new macro (value of 3) (with a tunable)
1200Sstevel@tonic-gate  * Why don't we just wait forever (receive an infinite # of times)?
1210Sstevel@tonic-gate  * Because the server may have rebooted. More insidious is that some
1220Sstevel@tonic-gate  * servers (ours) will drop NFS/TCP requests in some cases. This is bad,
1230Sstevel@tonic-gate  * but it is a reality.
1240Sstevel@tonic-gate  *
1250Sstevel@tonic-gate  * The case of a server doing orderly release really messes up the
1260Sstevel@tonic-gate  * client's recovery, especially if the server's TCP implementation is
1270Sstevel@tonic-gate  * buggy.  It was found was that the kRPC/COTS client was breaking some
1280Sstevel@tonic-gate  * TPI rules, such as not waiting for the acknowledgement of a
1290Sstevel@tonic-gate  * T_DISCON_REQ (hence the added case statements T_ERROR_ACK, T_OK_ACK and
1300Sstevel@tonic-gate  * T_DISCON_REQ in clnt_dispatch_notifyall()).
1310Sstevel@tonic-gate  *
1320Sstevel@tonic-gate  * One of things that we've seen is that a kRPC TCP endpoint goes into
1330Sstevel@tonic-gate  * TIMEWAIT and a thus a reconnect takes a long time to satisfy because
1340Sstevel@tonic-gate  * that the TIMEWAIT state takes a while to finish.  If a server sends a
1350Sstevel@tonic-gate  * T_ORDREL_IND, there is little point in an RPC client doing a
1360Sstevel@tonic-gate  * T_ORDREL_REQ, because the RPC request isn't going to make it (the
1370Sstevel@tonic-gate  * server is saying that it won't accept any more data). So kRPC was
1380Sstevel@tonic-gate  * changed to send a T_DISCON_REQ when we get a T_ORDREL_IND. So now the
1390Sstevel@tonic-gate  * connection skips the TIMEWAIT state and goes straight to a bound state
1400Sstevel@tonic-gate  * that kRPC can quickly switch to connected.
1410Sstevel@tonic-gate  *
1420Sstevel@tonic-gate  * Code that issues TPI request must use waitforack() to wait for the
1430Sstevel@tonic-gate  * corresponding ack (assuming there is one) in any future modifications.
1440Sstevel@tonic-gate  * This works around problems that may be introduced by breaking TPI rules
1450Sstevel@tonic-gate  * (by submitting new calls before earlier requests have been acked) in the
1460Sstevel@tonic-gate  * case of a signal or other early return.  waitforack() depends on
1470Sstevel@tonic-gate  * clnt_dispatch_notifyconn() to issue the wakeup when the ack
1480Sstevel@tonic-gate  * arrives, so adding new TPI calls may require corresponding changes
1490Sstevel@tonic-gate  * to clnt_dispatch_notifyconn(). Presently, the timeout period is based on
1500Sstevel@tonic-gate  * CLNT_MIN_TIMEOUT which is 10 seconds. If you modify this value, be sure
1510Sstevel@tonic-gate  * not to set it too low or TPI ACKS will be lost.
1520Sstevel@tonic-gate  */
1530Sstevel@tonic-gate 
1540Sstevel@tonic-gate #include <sys/param.h>
1550Sstevel@tonic-gate #include <sys/types.h>
1560Sstevel@tonic-gate #include <sys/user.h>
1570Sstevel@tonic-gate #include <sys/systm.h>
1580Sstevel@tonic-gate #include <sys/sysmacros.h>
1590Sstevel@tonic-gate #include <sys/proc.h>
1600Sstevel@tonic-gate #include <sys/socket.h>
1610Sstevel@tonic-gate #include <sys/file.h>
1620Sstevel@tonic-gate #include <sys/stream.h>
1630Sstevel@tonic-gate #include <sys/strsubr.h>
1640Sstevel@tonic-gate #include <sys/stropts.h>
1650Sstevel@tonic-gate #include <sys/strsun.h>
1660Sstevel@tonic-gate #include <sys/timod.h>
1670Sstevel@tonic-gate #include <sys/tiuser.h>
1680Sstevel@tonic-gate #include <sys/tihdr.h>
1690Sstevel@tonic-gate #include <sys/t_kuser.h>
1700Sstevel@tonic-gate #include <sys/fcntl.h>
1710Sstevel@tonic-gate #include <sys/errno.h>
1720Sstevel@tonic-gate #include <sys/kmem.h>
1730Sstevel@tonic-gate #include <sys/debug.h>
1740Sstevel@tonic-gate #include <sys/systm.h>
1750Sstevel@tonic-gate #include <sys/kstat.h>
1760Sstevel@tonic-gate #include <sys/t_lock.h>
1770Sstevel@tonic-gate #include <sys/ddi.h>
1780Sstevel@tonic-gate #include <sys/cmn_err.h>
1790Sstevel@tonic-gate #include <sys/time.h>
1800Sstevel@tonic-gate #include <sys/isa_defs.h>
1810Sstevel@tonic-gate #include <sys/callb.h>
1820Sstevel@tonic-gate #include <sys/sunddi.h>
1830Sstevel@tonic-gate #include <sys/atomic.h>
1840Sstevel@tonic-gate 
1850Sstevel@tonic-gate #include <netinet/in.h>
1860Sstevel@tonic-gate #include <netinet/tcp.h>
1870Sstevel@tonic-gate 
1880Sstevel@tonic-gate #include <rpc/types.h>
1890Sstevel@tonic-gate #include <rpc/xdr.h>
1900Sstevel@tonic-gate #include <rpc/auth.h>
1910Sstevel@tonic-gate #include <rpc/clnt.h>
1920Sstevel@tonic-gate #include <rpc/rpc_msg.h>
1930Sstevel@tonic-gate 
1940Sstevel@tonic-gate #define	COTS_DEFAULT_ALLOCSIZE	2048
1950Sstevel@tonic-gate 
1960Sstevel@tonic-gate #define	WIRE_HDR_SIZE	20	/* serialized call header, sans proc number */
1970Sstevel@tonic-gate #define	MSG_OFFSET	128	/* offset of call into the mblk */
1980Sstevel@tonic-gate 
1990Sstevel@tonic-gate const char *kinet_ntop6(uchar_t *, char *, size_t);
2000Sstevel@tonic-gate 
2010Sstevel@tonic-gate static int	clnt_cots_ksettimers(CLIENT *, struct rpc_timers *,
2020Sstevel@tonic-gate     struct rpc_timers *, int, void(*)(int, int, caddr_t), caddr_t, uint32_t);
2030Sstevel@tonic-gate static enum clnt_stat	clnt_cots_kcallit(CLIENT *, rpcproc_t, xdrproc_t,
2040Sstevel@tonic-gate     caddr_t, xdrproc_t, caddr_t, struct timeval);
2050Sstevel@tonic-gate static void	clnt_cots_kabort(CLIENT *);
2060Sstevel@tonic-gate static void	clnt_cots_kerror(CLIENT *, struct rpc_err *);
2070Sstevel@tonic-gate static bool_t	clnt_cots_kfreeres(CLIENT *, xdrproc_t, caddr_t);
2080Sstevel@tonic-gate static void	clnt_cots_kdestroy(CLIENT *);
2090Sstevel@tonic-gate static bool_t	clnt_cots_kcontrol(CLIENT *, int, char *);
2100Sstevel@tonic-gate 
2110Sstevel@tonic-gate 
2120Sstevel@tonic-gate /* List of transports managed by the connection manager. */
2130Sstevel@tonic-gate struct cm_xprt {
2140Sstevel@tonic-gate 	TIUSER		*x_tiptr;	/* transport handle */
2150Sstevel@tonic-gate 	queue_t		*x_wq;		/* send queue */
2160Sstevel@tonic-gate 	clock_t		x_time;		/* last time we handed this xprt out */
2170Sstevel@tonic-gate 	clock_t		x_ctime;	/* time we went to CONNECTED */
2180Sstevel@tonic-gate 	int		x_tidu_size;    /* TIDU size of this transport */
2190Sstevel@tonic-gate 	union {
2200Sstevel@tonic-gate 	    struct {
2210Sstevel@tonic-gate 		unsigned int
2220Sstevel@tonic-gate #ifdef	_BIT_FIELDS_HTOL
2230Sstevel@tonic-gate 		b_closing:	1,	/* we've sent a ord rel on this conn */
2240Sstevel@tonic-gate 		b_dead:		1,	/* transport is closed or disconn */
2250Sstevel@tonic-gate 		b_doomed:	1,	/* too many conns, let this go idle */
2260Sstevel@tonic-gate 		b_connected:	1,	/* this connection is connected */
2270Sstevel@tonic-gate 
2280Sstevel@tonic-gate 		b_ordrel:	1,	/* do an orderly release? */
2290Sstevel@tonic-gate 		b_thread:	1,	/* thread doing connect */
2300Sstevel@tonic-gate 		b_waitdis:	1,	/* waiting for disconnect ACK */
2310Sstevel@tonic-gate 		b_needdis:	1,	/* need T_DISCON_REQ */
2320Sstevel@tonic-gate 
2330Sstevel@tonic-gate 		b_needrel:	1,	/* need T_ORDREL_REQ */
2340Sstevel@tonic-gate 		b_early_disc:	1,	/* got a T_ORDREL_IND or T_DISCON_IND */
2350Sstevel@tonic-gate 					/* disconnect during connect */
2360Sstevel@tonic-gate 
2370Sstevel@tonic-gate 		b_pad:		22;
2380Sstevel@tonic-gate 
2390Sstevel@tonic-gate #endif
2400Sstevel@tonic-gate 
2410Sstevel@tonic-gate #ifdef	_BIT_FIELDS_LTOH
2420Sstevel@tonic-gate 		b_pad:		22,
2430Sstevel@tonic-gate 
2440Sstevel@tonic-gate 		b_early_disc:	1,	/* got a T_ORDREL_IND or T_DISCON_IND */
2450Sstevel@tonic-gate 					/* disconnect during connect */
2460Sstevel@tonic-gate 		b_needrel:	1,	/* need T_ORDREL_REQ */
2470Sstevel@tonic-gate 
2480Sstevel@tonic-gate 		b_needdis:	1,	/* need T_DISCON_REQ */
2490Sstevel@tonic-gate 		b_waitdis:	1,	/* waiting for disconnect ACK */
2500Sstevel@tonic-gate 		b_thread:	1,	/* thread doing connect */
2510Sstevel@tonic-gate 		b_ordrel:	1,	/* do an orderly release? */
2520Sstevel@tonic-gate 
2530Sstevel@tonic-gate 		b_connected:	1,	/* this connection is connected */
2540Sstevel@tonic-gate 		b_doomed:	1,	/* too many conns, let this go idle */
2550Sstevel@tonic-gate 		b_dead:		1,	/* transport is closed or disconn */
2560Sstevel@tonic-gate 		b_closing:	1;	/* we've sent a ord rel on this conn */
2570Sstevel@tonic-gate #endif
2580Sstevel@tonic-gate 	    } bit;	    unsigned int word;
2590Sstevel@tonic-gate 
2600Sstevel@tonic-gate #define	x_closing	x_state.bit.b_closing
2610Sstevel@tonic-gate #define	x_dead		x_state.bit.b_dead
2620Sstevel@tonic-gate #define	x_doomed	x_state.bit.b_doomed
2630Sstevel@tonic-gate #define	x_connected	x_state.bit.b_connected
2640Sstevel@tonic-gate 
2650Sstevel@tonic-gate #define	x_ordrel	x_state.bit.b_ordrel
2660Sstevel@tonic-gate #define	x_thread	x_state.bit.b_thread
2670Sstevel@tonic-gate #define	x_waitdis	x_state.bit.b_waitdis
2680Sstevel@tonic-gate #define	x_needdis	x_state.bit.b_needdis
2690Sstevel@tonic-gate 
2700Sstevel@tonic-gate #define	x_needrel	x_state.bit.b_needrel
2710Sstevel@tonic-gate #define	x_early_disc    x_state.bit.b_early_disc
2720Sstevel@tonic-gate 
2730Sstevel@tonic-gate #define	x_state_flags	x_state.word
2740Sstevel@tonic-gate 
2750Sstevel@tonic-gate #define	X_CLOSING	0x80000000
2760Sstevel@tonic-gate #define	X_DEAD		0x40000000
2770Sstevel@tonic-gate #define	X_DOOMED	0x20000000
2780Sstevel@tonic-gate #define	X_CONNECTED	0x10000000
2790Sstevel@tonic-gate 
2800Sstevel@tonic-gate #define	X_ORDREL	0x08000000
2810Sstevel@tonic-gate #define	X_THREAD	0x04000000
2820Sstevel@tonic-gate #define	X_WAITDIS	0x02000000
2830Sstevel@tonic-gate #define	X_NEEDDIS	0x01000000
2840Sstevel@tonic-gate 
2850Sstevel@tonic-gate #define	X_NEEDREL	0x00800000
2860Sstevel@tonic-gate #define	X_EARLYDISC	0x00400000
2870Sstevel@tonic-gate 
2880Sstevel@tonic-gate #define	X_BADSTATES	(X_CLOSING | X_DEAD | X_DOOMED)
2890Sstevel@tonic-gate 
2900Sstevel@tonic-gate 	}		x_state;
2910Sstevel@tonic-gate 	int		x_ref;		/* number of users of this xprt */
2920Sstevel@tonic-gate 	int		x_family;	/* address family of transport */
2930Sstevel@tonic-gate 	dev_t		x_rdev;		/* device number of transport */
2940Sstevel@tonic-gate 	struct cm_xprt	*x_next;
2950Sstevel@tonic-gate 
2960Sstevel@tonic-gate 	struct netbuf	x_server;	/* destination address */
2970Sstevel@tonic-gate 	struct netbuf	x_src;		/* src address (for retries) */
2980Sstevel@tonic-gate 	kmutex_t	x_lock;		/* lock on this entry */
2990Sstevel@tonic-gate 	kcondvar_t	x_cv;		/* to signal when can be closed */
3000Sstevel@tonic-gate 	kcondvar_t	x_conn_cv;	/* to signal when connection attempt */
3010Sstevel@tonic-gate 					/* is complete */
3020Sstevel@tonic-gate 	kstat_t		*x_ksp;
3030Sstevel@tonic-gate 
3040Sstevel@tonic-gate 	kcondvar_t	x_dis_cv;	/* to signal when disconnect attempt */
3050Sstevel@tonic-gate 					/* is complete */
3060Sstevel@tonic-gate 	zoneid_t	x_zoneid;	/* zone this xprt belongs to */
3070Sstevel@tonic-gate };
3080Sstevel@tonic-gate 
3090Sstevel@tonic-gate typedef struct cm_kstat_xprt {
3100Sstevel@tonic-gate 	kstat_named_t	x_wq;
3110Sstevel@tonic-gate 	kstat_named_t	x_server;
3120Sstevel@tonic-gate 	kstat_named_t	x_family;
3130Sstevel@tonic-gate 	kstat_named_t	x_rdev;
3140Sstevel@tonic-gate 	kstat_named_t	x_time;
3150Sstevel@tonic-gate 	kstat_named_t	x_state;
3160Sstevel@tonic-gate 	kstat_named_t	x_ref;
3170Sstevel@tonic-gate 	kstat_named_t	x_port;
3180Sstevel@tonic-gate } cm_kstat_xprt_t;
3190Sstevel@tonic-gate 
3200Sstevel@tonic-gate static cm_kstat_xprt_t cm_kstat_template = {
3210Sstevel@tonic-gate 	{ "write_queue", KSTAT_DATA_UINT32 },
3220Sstevel@tonic-gate 	{ "server",	KSTAT_DATA_STRING },
3230Sstevel@tonic-gate 	{ "addr_family", KSTAT_DATA_UINT32 },
3240Sstevel@tonic-gate 	{ "device",	KSTAT_DATA_UINT32 },
3250Sstevel@tonic-gate 	{ "time_stamp",	KSTAT_DATA_UINT32 },
3260Sstevel@tonic-gate 	{ "status",	KSTAT_DATA_UINT32 },
3270Sstevel@tonic-gate 	{ "ref_count",	KSTAT_DATA_INT32 },
3280Sstevel@tonic-gate 	{ "port",	KSTAT_DATA_UINT32 },
3290Sstevel@tonic-gate };
3300Sstevel@tonic-gate 
3310Sstevel@tonic-gate /*
3320Sstevel@tonic-gate  * The inverse of this is connmgr_release().
3330Sstevel@tonic-gate  */
3340Sstevel@tonic-gate #define	CONN_HOLD(Cm_entry)	{\
3350Sstevel@tonic-gate 	mutex_enter(&(Cm_entry)->x_lock);	\
3360Sstevel@tonic-gate 	(Cm_entry)->x_ref++;	\
3370Sstevel@tonic-gate 	mutex_exit(&(Cm_entry)->x_lock);	\
3380Sstevel@tonic-gate }
3390Sstevel@tonic-gate 
3400Sstevel@tonic-gate 
3410Sstevel@tonic-gate /*
3420Sstevel@tonic-gate  * Private data per rpc handle.  This structure is allocated by
3430Sstevel@tonic-gate  * clnt_cots_kcreate, and freed by clnt_cots_kdestroy.
3440Sstevel@tonic-gate  */
3450Sstevel@tonic-gate typedef struct cku_private_s {
3460Sstevel@tonic-gate 	CLIENT			cku_client;	/* client handle */
3470Sstevel@tonic-gate 	calllist_t		cku_call;	/* for dispatching calls */
3480Sstevel@tonic-gate 	struct rpc_err		cku_err;	/* error status */
3490Sstevel@tonic-gate 
3500Sstevel@tonic-gate 	struct netbuf		cku_srcaddr;	/* source address for retries */
3510Sstevel@tonic-gate 	int			cku_addrfmly;  /* for binding port */
3520Sstevel@tonic-gate 	struct netbuf		cku_addr;	/* remote address */
3530Sstevel@tonic-gate 	dev_t			cku_device;	/* device to use */
3540Sstevel@tonic-gate 	uint_t			cku_flags;
3550Sstevel@tonic-gate #define	CKU_ONQUEUE		0x1
3560Sstevel@tonic-gate #define	CKU_SENT		0x2
3570Sstevel@tonic-gate 
3580Sstevel@tonic-gate 	bool_t			cku_progress;	/* for CLSET_PROGRESS */
3590Sstevel@tonic-gate 	uint32_t		cku_xid;	/* current XID */
3600Sstevel@tonic-gate 	clock_t			cku_ctime;	/* time stamp of when */
3610Sstevel@tonic-gate 						/* connection was created */
3620Sstevel@tonic-gate 	uint_t			cku_recv_attempts;
3630Sstevel@tonic-gate 	XDR			cku_outxdr;	/* xdr routine for output */
3640Sstevel@tonic-gate 	XDR			cku_inxdr;	/* xdr routine for input */
3650Sstevel@tonic-gate 	char			cku_rpchdr[WIRE_HDR_SIZE + 4];
3660Sstevel@tonic-gate 						/* pre-serialized rpc header */
3670Sstevel@tonic-gate 
3680Sstevel@tonic-gate 	uint_t			cku_outbuflen;	/* default output mblk length */
3690Sstevel@tonic-gate 	struct cred		*cku_cred;	/* credentials */
3700Sstevel@tonic-gate 	bool_t			cku_nodelayonerr;
3710Sstevel@tonic-gate 						/* for CLSET_NODELAYONERR */
3720Sstevel@tonic-gate 	int			cku_useresvport; /* Use reserved port */
3730Sstevel@tonic-gate 	struct rpc_cots_client	*cku_stats;	/* stats for zone */
3740Sstevel@tonic-gate } cku_private_t;
3750Sstevel@tonic-gate 
3760Sstevel@tonic-gate static struct cm_xprt *connmgr_wrapconnect(struct cm_xprt *,
3770Sstevel@tonic-gate 	const struct timeval *, struct netbuf *, int, struct netbuf *,
3780Sstevel@tonic-gate 	struct rpc_err *, bool_t, bool_t);
3790Sstevel@tonic-gate 
3800Sstevel@tonic-gate static bool_t	connmgr_connect(struct cm_xprt *, queue_t *, struct netbuf *,
3810Sstevel@tonic-gate 				int, calllist_t *, int *, bool_t reconnect,
3820Sstevel@tonic-gate 				const struct timeval *, bool_t);
3830Sstevel@tonic-gate 
3840Sstevel@tonic-gate static bool_t	connmgr_setopt(queue_t *, int, int, calllist_t *);
3850Sstevel@tonic-gate static void	connmgr_sndrel(struct cm_xprt *);
3860Sstevel@tonic-gate static void	connmgr_snddis(struct cm_xprt *);
3870Sstevel@tonic-gate static void	connmgr_close(struct cm_xprt *);
3880Sstevel@tonic-gate static void	connmgr_release(struct cm_xprt *);
3890Sstevel@tonic-gate static struct cm_xprt *connmgr_wrapget(struct netbuf *, const struct timeval *,
3900Sstevel@tonic-gate 	cku_private_t *);
3910Sstevel@tonic-gate 
3920Sstevel@tonic-gate static struct cm_xprt *connmgr_get(struct netbuf *, const struct timeval *,
3930Sstevel@tonic-gate 	struct netbuf *, int, struct netbuf *, struct rpc_err *, dev_t,
3940Sstevel@tonic-gate 	bool_t, int);
3950Sstevel@tonic-gate 
3960Sstevel@tonic-gate static void connmgr_cancelconn(struct cm_xprt *);
3970Sstevel@tonic-gate static enum clnt_stat connmgr_cwait(struct cm_xprt *, const struct timeval *,
3980Sstevel@tonic-gate 	bool_t);
3990Sstevel@tonic-gate static void connmgr_dis_and_wait(struct cm_xprt *);
4000Sstevel@tonic-gate 
4010Sstevel@tonic-gate static void	clnt_dispatch_send(queue_t *, mblk_t *, calllist_t *, uint_t,
4020Sstevel@tonic-gate 					uint_t);
4030Sstevel@tonic-gate 
4040Sstevel@tonic-gate static int clnt_delay(clock_t, bool_t);
4050Sstevel@tonic-gate 
4060Sstevel@tonic-gate static int waitforack(calllist_t *, t_scalar_t, const struct timeval *, bool_t);
4070Sstevel@tonic-gate 
4080Sstevel@tonic-gate /*
4090Sstevel@tonic-gate  * Operations vector for TCP/IP based RPC
4100Sstevel@tonic-gate  */
4110Sstevel@tonic-gate static struct clnt_ops tcp_ops = {
4120Sstevel@tonic-gate 	clnt_cots_kcallit,	/* do rpc call */
4130Sstevel@tonic-gate 	clnt_cots_kabort,	/* abort call */
4140Sstevel@tonic-gate 	clnt_cots_kerror,	/* return error status */
4150Sstevel@tonic-gate 	clnt_cots_kfreeres,	/* free results */
4160Sstevel@tonic-gate 	clnt_cots_kdestroy,	/* destroy rpc handle */
4170Sstevel@tonic-gate 	clnt_cots_kcontrol,	/* the ioctl() of rpc */
4180Sstevel@tonic-gate 	clnt_cots_ksettimers,	/* set retry timers */
4190Sstevel@tonic-gate };
4200Sstevel@tonic-gate 
4210Sstevel@tonic-gate static int rpc_kstat_instance = 0;  /* keeps the current instance */
4220Sstevel@tonic-gate 				/* number for the next kstat_create */
4230Sstevel@tonic-gate 
4240Sstevel@tonic-gate static struct cm_xprt *cm_hd = NULL;
4250Sstevel@tonic-gate static kmutex_t connmgr_lock;	/* for connection mngr's list of transports */
4260Sstevel@tonic-gate 
4270Sstevel@tonic-gate extern kmutex_t clnt_max_msg_lock;
4280Sstevel@tonic-gate 
4290Sstevel@tonic-gate static calllist_t *clnt_pending = NULL;
4300Sstevel@tonic-gate extern kmutex_t clnt_pending_lock;
4310Sstevel@tonic-gate 
4320Sstevel@tonic-gate static int clnt_cots_hash_size = DEFAULT_HASH_SIZE;
4330Sstevel@tonic-gate 
4340Sstevel@tonic-gate static call_table_t *cots_call_ht;
4350Sstevel@tonic-gate 
4360Sstevel@tonic-gate static const struct rpc_cots_client {
4370Sstevel@tonic-gate 	kstat_named_t	rccalls;
4380Sstevel@tonic-gate 	kstat_named_t	rcbadcalls;
4390Sstevel@tonic-gate 	kstat_named_t	rcbadxids;
4400Sstevel@tonic-gate 	kstat_named_t	rctimeouts;
4410Sstevel@tonic-gate 	kstat_named_t	rcnewcreds;
4420Sstevel@tonic-gate 	kstat_named_t	rcbadverfs;
4430Sstevel@tonic-gate 	kstat_named_t	rctimers;
4440Sstevel@tonic-gate 	kstat_named_t	rccantconn;
4450Sstevel@tonic-gate 	kstat_named_t	rcnomem;
4460Sstevel@tonic-gate 	kstat_named_t	rcintrs;
4470Sstevel@tonic-gate } cots_rcstat_tmpl = {
4480Sstevel@tonic-gate 	{ "calls",	KSTAT_DATA_UINT64 },
4490Sstevel@tonic-gate 	{ "badcalls",	KSTAT_DATA_UINT64 },
4500Sstevel@tonic-gate 	{ "badxids",	KSTAT_DATA_UINT64 },
4510Sstevel@tonic-gate 	{ "timeouts",	KSTAT_DATA_UINT64 },
4520Sstevel@tonic-gate 	{ "newcreds",	KSTAT_DATA_UINT64 },
4530Sstevel@tonic-gate 	{ "badverfs",	KSTAT_DATA_UINT64 },
4540Sstevel@tonic-gate 	{ "timers",	KSTAT_DATA_UINT64 },
4550Sstevel@tonic-gate 	{ "cantconn",	KSTAT_DATA_UINT64 },
4560Sstevel@tonic-gate 	{ "nomem",	KSTAT_DATA_UINT64 },
4570Sstevel@tonic-gate 	{ "interrupts", KSTAT_DATA_UINT64 }
4580Sstevel@tonic-gate };
4590Sstevel@tonic-gate 
4600Sstevel@tonic-gate #define	COTSRCSTAT_INCR(p, x)	\
4610Sstevel@tonic-gate 	atomic_add_64(&(p)->x.value.ui64, 1)
4620Sstevel@tonic-gate 
4630Sstevel@tonic-gate #define	CLNT_MAX_CONNS	1	/* concurrent connections between clnt/srvr */
4640Sstevel@tonic-gate static int clnt_max_conns = CLNT_MAX_CONNS;
4650Sstevel@tonic-gate 
4660Sstevel@tonic-gate #define	CLNT_MIN_TIMEOUT	10	/* seconds to wait after we get a */
4670Sstevel@tonic-gate 					/* connection reset */
4680Sstevel@tonic-gate #define	CLNT_MIN_CONNTIMEOUT	5	/* seconds to wait for a connection */
4690Sstevel@tonic-gate 
4700Sstevel@tonic-gate 
4710Sstevel@tonic-gate static int clnt_cots_min_tout = CLNT_MIN_TIMEOUT;
4720Sstevel@tonic-gate static int clnt_cots_min_conntout = CLNT_MIN_CONNTIMEOUT;
4730Sstevel@tonic-gate 
4740Sstevel@tonic-gate /*
4750Sstevel@tonic-gate  * Limit the number of times we will attempt to receive a reply without
4760Sstevel@tonic-gate  * re-sending a response.
4770Sstevel@tonic-gate  */
4780Sstevel@tonic-gate #define	CLNT_MAXRECV_WITHOUT_RETRY	3
4790Sstevel@tonic-gate static uint_t clnt_cots_maxrecv	= CLNT_MAXRECV_WITHOUT_RETRY;
4800Sstevel@tonic-gate 
4810Sstevel@tonic-gate uint_t *clnt_max_msg_sizep;
4820Sstevel@tonic-gate void (*clnt_stop_idle)(queue_t *wq);
4830Sstevel@tonic-gate 
4840Sstevel@tonic-gate #define	ptoh(p)		(&((p)->cku_client))
4850Sstevel@tonic-gate #define	htop(h)		((cku_private_t *)((h)->cl_private))
4860Sstevel@tonic-gate 
4870Sstevel@tonic-gate /*
4880Sstevel@tonic-gate  * Times to retry
4890Sstevel@tonic-gate  */
4900Sstevel@tonic-gate #define	REFRESHES	2	/* authentication refreshes */
4910Sstevel@tonic-gate 
492681Srg137905 /*
493681Srg137905  * The following is used to determine the global default behavior for
494681Srg137905  * COTS when binding to a local port.
495681Srg137905  *
496681Srg137905  * If the value is set to 1 the default will be to select a reserved
497681Srg137905  * (aka privileged) port, if the value is zero the default will be to
498681Srg137905  * use non-reserved ports.  Users of kRPC may override this by using
499681Srg137905  * CLNT_CONTROL() and CLSET_BINDRESVPORT.
500681Srg137905  */
501681Srg137905 static int clnt_cots_do_bindresvport = 1;
5020Sstevel@tonic-gate 
5030Sstevel@tonic-gate static zone_key_t zone_cots_key;
5040Sstevel@tonic-gate 
5050Sstevel@tonic-gate /*
5060Sstevel@tonic-gate  * We need to do this after all kernel threads in the zone have exited.
5070Sstevel@tonic-gate  */
5080Sstevel@tonic-gate /* ARGSUSED */
5090Sstevel@tonic-gate static void
5100Sstevel@tonic-gate clnt_zone_destroy(zoneid_t zoneid, void *unused)
5110Sstevel@tonic-gate {
5120Sstevel@tonic-gate 	struct cm_xprt **cmp;
5130Sstevel@tonic-gate 	struct cm_xprt *cm_entry;
5140Sstevel@tonic-gate 	struct cm_xprt *freelist = NULL;
5150Sstevel@tonic-gate 
5160Sstevel@tonic-gate 	mutex_enter(&connmgr_lock);
5170Sstevel@tonic-gate 	cmp = &cm_hd;
5180Sstevel@tonic-gate 	while ((cm_entry = *cmp) != NULL) {
5190Sstevel@tonic-gate 		if (cm_entry->x_zoneid == zoneid) {
5200Sstevel@tonic-gate 			*cmp = cm_entry->x_next;
5210Sstevel@tonic-gate 			cm_entry->x_next = freelist;
5220Sstevel@tonic-gate 			freelist = cm_entry;
5230Sstevel@tonic-gate 		} else {
5240Sstevel@tonic-gate 			cmp = &cm_entry->x_next;
5250Sstevel@tonic-gate 		}
5260Sstevel@tonic-gate 	}
5270Sstevel@tonic-gate 	mutex_exit(&connmgr_lock);
5280Sstevel@tonic-gate 	while ((cm_entry = freelist) != NULL) {
5290Sstevel@tonic-gate 		freelist = cm_entry->x_next;
5300Sstevel@tonic-gate 		connmgr_close(cm_entry);
5310Sstevel@tonic-gate 	}
5320Sstevel@tonic-gate }
5330Sstevel@tonic-gate 
5340Sstevel@tonic-gate int
5350Sstevel@tonic-gate clnt_cots_kcreate(dev_t dev, struct netbuf *addr, int family, rpcprog_t prog,
5360Sstevel@tonic-gate 	rpcvers_t vers, uint_t max_msgsize, cred_t *cred, CLIENT **ncl)
5370Sstevel@tonic-gate {
5380Sstevel@tonic-gate 	CLIENT *h;
5390Sstevel@tonic-gate 	cku_private_t *p;
5400Sstevel@tonic-gate 	struct rpc_msg call_msg;
5410Sstevel@tonic-gate 	struct rpcstat *rpcstat;
5420Sstevel@tonic-gate 
5430Sstevel@tonic-gate 	RPCLOG(8, "clnt_cots_kcreate: prog %u\n", prog);
5440Sstevel@tonic-gate 
545766Scarlsonj 	rpcstat = zone_getspecific(rpcstat_zone_key, rpc_zone());
5460Sstevel@tonic-gate 	ASSERT(rpcstat != NULL);
5470Sstevel@tonic-gate 
5480Sstevel@tonic-gate 	/* Allocate and intialize the client handle. */
5490Sstevel@tonic-gate 	p = kmem_zalloc(sizeof (*p), KM_SLEEP);
5500Sstevel@tonic-gate 
5510Sstevel@tonic-gate 	h = ptoh(p);
5520Sstevel@tonic-gate 
5530Sstevel@tonic-gate 	h->cl_private = (caddr_t)p;
5540Sstevel@tonic-gate 	h->cl_auth = authkern_create();
5550Sstevel@tonic-gate 	h->cl_ops = &tcp_ops;
5560Sstevel@tonic-gate 
5570Sstevel@tonic-gate 	cv_init(&p->cku_call.call_cv, NULL, CV_DEFAULT, NULL);
5580Sstevel@tonic-gate 	mutex_init(&p->cku_call.call_lock, NULL, MUTEX_DEFAULT, NULL);
5590Sstevel@tonic-gate 
5600Sstevel@tonic-gate 	/*
5610Sstevel@tonic-gate 	 * If the current sanity check size in rpcmod is smaller
5620Sstevel@tonic-gate 	 * than the size needed, then increase the sanity check.
5630Sstevel@tonic-gate 	 */
5640Sstevel@tonic-gate 	if (max_msgsize != 0 && clnt_max_msg_sizep != NULL &&
5650Sstevel@tonic-gate 	    max_msgsize > *clnt_max_msg_sizep) {
5660Sstevel@tonic-gate 		mutex_enter(&clnt_max_msg_lock);
5670Sstevel@tonic-gate 		if (max_msgsize > *clnt_max_msg_sizep)
5680Sstevel@tonic-gate 			*clnt_max_msg_sizep = max_msgsize;
5690Sstevel@tonic-gate 		mutex_exit(&clnt_max_msg_lock);
5700Sstevel@tonic-gate 	}
5710Sstevel@tonic-gate 
5720Sstevel@tonic-gate 	p->cku_outbuflen = COTS_DEFAULT_ALLOCSIZE;
5730Sstevel@tonic-gate 
5740Sstevel@tonic-gate 	/* Preserialize the call message header */
5750Sstevel@tonic-gate 
5760Sstevel@tonic-gate 	call_msg.rm_xid = 0;
5770Sstevel@tonic-gate 	call_msg.rm_direction = CALL;
5780Sstevel@tonic-gate 	call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
5790Sstevel@tonic-gate 	call_msg.rm_call.cb_prog = prog;
5800Sstevel@tonic-gate 	call_msg.rm_call.cb_vers = vers;
5810Sstevel@tonic-gate 
5820Sstevel@tonic-gate 	xdrmem_create(&p->cku_outxdr, p->cku_rpchdr, WIRE_HDR_SIZE, XDR_ENCODE);
5830Sstevel@tonic-gate 
5840Sstevel@tonic-gate 	if (!xdr_callhdr(&p->cku_outxdr, &call_msg)) {
5850Sstevel@tonic-gate 		RPCLOG0(1, "clnt_cots_kcreate - Fatal header serialization "
5860Sstevel@tonic-gate 		    "error\n");
5870Sstevel@tonic-gate 		auth_destroy(h->cl_auth);
5880Sstevel@tonic-gate 		kmem_free(p, sizeof (cku_private_t));
5890Sstevel@tonic-gate 		RPCLOG0(1, "clnt_cots_kcreate: create failed error EINVAL\n");
5900Sstevel@tonic-gate 		return (EINVAL);		/* XXX */
5910Sstevel@tonic-gate 	}
5920Sstevel@tonic-gate 
5930Sstevel@tonic-gate 	/*
5940Sstevel@tonic-gate 	 * The zalloc initialized the fields below.
5950Sstevel@tonic-gate 	 * p->cku_xid = 0;
5960Sstevel@tonic-gate 	 * p->cku_flags = 0;
5970Sstevel@tonic-gate 	 * p->cku_srcaddr.len = 0;
5980Sstevel@tonic-gate 	 * p->cku_srcaddr.maxlen = 0;
5990Sstevel@tonic-gate 	 */
6000Sstevel@tonic-gate 
6010Sstevel@tonic-gate 	p->cku_cred = cred;
6020Sstevel@tonic-gate 	p->cku_device = dev;
6030Sstevel@tonic-gate 	p->cku_addrfmly = family;
6040Sstevel@tonic-gate 	p->cku_addr.buf = kmem_zalloc(addr->maxlen, KM_SLEEP);
6050Sstevel@tonic-gate 	p->cku_addr.maxlen = addr->maxlen;
6060Sstevel@tonic-gate 	p->cku_addr.len = addr->len;
6070Sstevel@tonic-gate 	bcopy(addr->buf, p->cku_addr.buf, addr->len);
6080Sstevel@tonic-gate 	p->cku_stats = rpcstat->rpc_cots_client;
6090Sstevel@tonic-gate 	p->cku_useresvport = -1; /* value is has not been set */
6100Sstevel@tonic-gate 
6110Sstevel@tonic-gate 	*ncl = h;
6120Sstevel@tonic-gate 	return (0);
6130Sstevel@tonic-gate }
6140Sstevel@tonic-gate 
6150Sstevel@tonic-gate /*ARGSUSED*/
6160Sstevel@tonic-gate static void
6170Sstevel@tonic-gate clnt_cots_kabort(CLIENT *h)
6180Sstevel@tonic-gate {
6190Sstevel@tonic-gate }
6200Sstevel@tonic-gate 
6210Sstevel@tonic-gate /*
6220Sstevel@tonic-gate  * Return error info on this handle.
6230Sstevel@tonic-gate  */
6240Sstevel@tonic-gate static void
6250Sstevel@tonic-gate clnt_cots_kerror(CLIENT *h, struct rpc_err *err)
6260Sstevel@tonic-gate {
6270Sstevel@tonic-gate 	/* LINTED pointer alignment */
6280Sstevel@tonic-gate 	cku_private_t *p = htop(h);
6290Sstevel@tonic-gate 
6300Sstevel@tonic-gate 	*err = p->cku_err;
6310Sstevel@tonic-gate }
6320Sstevel@tonic-gate 
6330Sstevel@tonic-gate static bool_t
6340Sstevel@tonic-gate clnt_cots_kfreeres(CLIENT *h, xdrproc_t xdr_res, caddr_t res_ptr)
6350Sstevel@tonic-gate {
6360Sstevel@tonic-gate 	/* LINTED pointer alignment */
6370Sstevel@tonic-gate 	cku_private_t *p = htop(h);
6380Sstevel@tonic-gate 	XDR *xdrs;
6390Sstevel@tonic-gate 
6400Sstevel@tonic-gate 	xdrs = &(p->cku_outxdr);
6410Sstevel@tonic-gate 	xdrs->x_op = XDR_FREE;
6420Sstevel@tonic-gate 	return ((*xdr_res)(xdrs, res_ptr));
6430Sstevel@tonic-gate }
6440Sstevel@tonic-gate 
6450Sstevel@tonic-gate static bool_t
6460Sstevel@tonic-gate clnt_cots_kcontrol(CLIENT *h, int cmd, char *arg)
6470Sstevel@tonic-gate {
6480Sstevel@tonic-gate 	cku_private_t *p = htop(h);
6490Sstevel@tonic-gate 
6500Sstevel@tonic-gate 	switch (cmd) {
6510Sstevel@tonic-gate 	case CLSET_PROGRESS:
6520Sstevel@tonic-gate 		p->cku_progress = TRUE;
6530Sstevel@tonic-gate 		return (TRUE);
6540Sstevel@tonic-gate 
6550Sstevel@tonic-gate 	case CLSET_XID:
6560Sstevel@tonic-gate 		if (arg == NULL)
6570Sstevel@tonic-gate 			return (FALSE);
6580Sstevel@tonic-gate 
6590Sstevel@tonic-gate 		p->cku_xid = *((uint32_t *)arg);
6600Sstevel@tonic-gate 		return (TRUE);
6610Sstevel@tonic-gate 
6620Sstevel@tonic-gate 	case CLGET_XID:
6630Sstevel@tonic-gate 		if (arg == NULL)
6640Sstevel@tonic-gate 			return (FALSE);
6650Sstevel@tonic-gate 
6660Sstevel@tonic-gate 		*((uint32_t *)arg) = p->cku_xid;
6670Sstevel@tonic-gate 		return (TRUE);
6680Sstevel@tonic-gate 
6690Sstevel@tonic-gate 	case CLSET_NODELAYONERR:
6700Sstevel@tonic-gate 		if (arg == NULL)
6710Sstevel@tonic-gate 			return (FALSE);
6720Sstevel@tonic-gate 
6730Sstevel@tonic-gate 		if (*((bool_t *)arg) == TRUE) {
6740Sstevel@tonic-gate 			p->cku_nodelayonerr = TRUE;
6750Sstevel@tonic-gate 			return (TRUE);
6760Sstevel@tonic-gate 		}
6770Sstevel@tonic-gate 		if (*((bool_t *)arg) == FALSE) {
6780Sstevel@tonic-gate 			p->cku_nodelayonerr = FALSE;
6790Sstevel@tonic-gate 			return (TRUE);
6800Sstevel@tonic-gate 		}
6810Sstevel@tonic-gate 		return (FALSE);
6820Sstevel@tonic-gate 
6830Sstevel@tonic-gate 	case CLGET_NODELAYONERR:
6840Sstevel@tonic-gate 		if (arg == NULL)
6850Sstevel@tonic-gate 			return (FALSE);
6860Sstevel@tonic-gate 
6870Sstevel@tonic-gate 		*((bool_t *)arg) = p->cku_nodelayonerr;
6880Sstevel@tonic-gate 		return (TRUE);
6890Sstevel@tonic-gate 
6900Sstevel@tonic-gate 	case CLSET_BINDRESVPORT:
6910Sstevel@tonic-gate 		if (arg == NULL)
6920Sstevel@tonic-gate 			return (FALSE);
6930Sstevel@tonic-gate 
6940Sstevel@tonic-gate 		if (*(int *)arg != 1 && *(int *)arg != 0)
6950Sstevel@tonic-gate 			return (FALSE);
6960Sstevel@tonic-gate 
6970Sstevel@tonic-gate 		p->cku_useresvport = *(int *)arg;
6980Sstevel@tonic-gate 
6990Sstevel@tonic-gate 		return (TRUE);
7000Sstevel@tonic-gate 
7010Sstevel@tonic-gate 	case CLGET_BINDRESVPORT:
7020Sstevel@tonic-gate 		if (arg == NULL)
7030Sstevel@tonic-gate 			return (FALSE);
7040Sstevel@tonic-gate 
7050Sstevel@tonic-gate 		*(int *)arg = p->cku_useresvport;
7060Sstevel@tonic-gate 
7070Sstevel@tonic-gate 		return (TRUE);
7080Sstevel@tonic-gate 
7090Sstevel@tonic-gate 	default:
7100Sstevel@tonic-gate 		return (FALSE);
7110Sstevel@tonic-gate 	}
7120Sstevel@tonic-gate }
7130Sstevel@tonic-gate 
7140Sstevel@tonic-gate /*
7150Sstevel@tonic-gate  * Destroy rpc handle.  Frees the space used for output buffer,
7160Sstevel@tonic-gate  * private data, and handle structure.
7170Sstevel@tonic-gate  */
7180Sstevel@tonic-gate static void
7190Sstevel@tonic-gate clnt_cots_kdestroy(CLIENT *h)
7200Sstevel@tonic-gate {
7210Sstevel@tonic-gate 	/* LINTED pointer alignment */
7220Sstevel@tonic-gate 	cku_private_t *p = htop(h);
7230Sstevel@tonic-gate 	calllist_t *call = &p->cku_call;
7240Sstevel@tonic-gate 
7250Sstevel@tonic-gate 	RPCLOG(8, "clnt_cots_kdestroy h: %p\n", (void *)h);
7260Sstevel@tonic-gate 	RPCLOG(8, "clnt_cots_kdestroy h: xid=0x%x\n", p->cku_xid);
7270Sstevel@tonic-gate 
7280Sstevel@tonic-gate 	if (p->cku_flags & CKU_ONQUEUE) {
7290Sstevel@tonic-gate 		RPCLOG(64, "clnt_cots_kdestroy h: removing call for xid 0x%x "
7300Sstevel@tonic-gate 		    "from dispatch list\n", p->cku_xid);
7310Sstevel@tonic-gate 		call_table_remove(call);
7320Sstevel@tonic-gate 	}
7330Sstevel@tonic-gate 
7340Sstevel@tonic-gate 	if (call->call_reply)
7350Sstevel@tonic-gate 		freemsg(call->call_reply);
7360Sstevel@tonic-gate 	cv_destroy(&call->call_cv);
7370Sstevel@tonic-gate 	mutex_destroy(&call->call_lock);
7380Sstevel@tonic-gate 
7390Sstevel@tonic-gate 	kmem_free(p->cku_srcaddr.buf, p->cku_srcaddr.maxlen);
7400Sstevel@tonic-gate 	kmem_free(p->cku_addr.buf, p->cku_addr.maxlen);
7410Sstevel@tonic-gate 	kmem_free(p, sizeof (*p));
7420Sstevel@tonic-gate }
7430Sstevel@tonic-gate 
7440Sstevel@tonic-gate static int clnt_cots_pulls;
7450Sstevel@tonic-gate #define	RM_HDR_SIZE	4	/* record mark header size */
7460Sstevel@tonic-gate 
7470Sstevel@tonic-gate /*
7480Sstevel@tonic-gate  * Call remote procedure.
7490Sstevel@tonic-gate  */
7500Sstevel@tonic-gate static enum clnt_stat
7510Sstevel@tonic-gate clnt_cots_kcallit(CLIENT *h, rpcproc_t procnum, xdrproc_t xdr_args,
7520Sstevel@tonic-gate     caddr_t argsp, xdrproc_t xdr_results, caddr_t resultsp, struct timeval wait)
7530Sstevel@tonic-gate {
7540Sstevel@tonic-gate 	/* LINTED pointer alignment */
7550Sstevel@tonic-gate 	cku_private_t *p = htop(h);
7560Sstevel@tonic-gate 	calllist_t *call = &p->cku_call;
7570Sstevel@tonic-gate 	XDR *xdrs;
7580Sstevel@tonic-gate 	struct rpc_msg reply_msg;
7590Sstevel@tonic-gate 	mblk_t *mp;
7600Sstevel@tonic-gate #ifdef	RPCDEBUG
7610Sstevel@tonic-gate 	clock_t time_sent;
7620Sstevel@tonic-gate #endif
7630Sstevel@tonic-gate 	struct netbuf *retryaddr;
7640Sstevel@tonic-gate 	struct cm_xprt *cm_entry = NULL;
7650Sstevel@tonic-gate 	queue_t *wq;
7660Sstevel@tonic-gate 	int len;
7670Sstevel@tonic-gate 	int mpsize;
7680Sstevel@tonic-gate 	int refreshes = REFRESHES;
7690Sstevel@tonic-gate 	int interrupted;
7700Sstevel@tonic-gate 	int tidu_size;
7710Sstevel@tonic-gate 	enum clnt_stat status;
7720Sstevel@tonic-gate 	struct timeval cwait;
7730Sstevel@tonic-gate 	bool_t delay_first = FALSE;
7740Sstevel@tonic-gate 	clock_t ticks;
7750Sstevel@tonic-gate 
7760Sstevel@tonic-gate 	RPCLOG(2, "clnt_cots_kcallit, procnum %u\n", procnum);
7770Sstevel@tonic-gate 	COTSRCSTAT_INCR(p->cku_stats, rccalls);
7780Sstevel@tonic-gate 
7790Sstevel@tonic-gate 	RPCLOG(2, "clnt_cots_kcallit: wait.tv_sec: %ld\n", wait.tv_sec);
7800Sstevel@tonic-gate 	RPCLOG(2, "clnt_cots_kcallit: wait.tv_usec: %ld\n", wait.tv_usec);
7810Sstevel@tonic-gate 
7820Sstevel@tonic-gate 	/*
7830Sstevel@tonic-gate 	 * Bug ID 1240234:
7840Sstevel@tonic-gate 	 * Look out for zero length timeouts. We don't want to
7850Sstevel@tonic-gate 	 * wait zero seconds for a connection to be established.
7860Sstevel@tonic-gate 	 */
7870Sstevel@tonic-gate 	if (wait.tv_sec < clnt_cots_min_conntout) {
7880Sstevel@tonic-gate 		cwait.tv_sec = clnt_cots_min_conntout;
7890Sstevel@tonic-gate 		cwait.tv_usec = 0;
7900Sstevel@tonic-gate 		RPCLOG(8, "clnt_cots_kcallit: wait.tv_sec (%ld) too low,",
7910Sstevel@tonic-gate 		    wait.tv_sec);
7920Sstevel@tonic-gate 		RPCLOG(8, " setting to: %d\n", clnt_cots_min_conntout);
7930Sstevel@tonic-gate 	} else {
7940Sstevel@tonic-gate 		cwait = wait;
7950Sstevel@tonic-gate 	}
7960Sstevel@tonic-gate 
7970Sstevel@tonic-gate call_again:
7980Sstevel@tonic-gate 	if (cm_entry) {
7990Sstevel@tonic-gate 		connmgr_release(cm_entry);
8000Sstevel@tonic-gate 		cm_entry = NULL;
8010Sstevel@tonic-gate 	}
8020Sstevel@tonic-gate 
8030Sstevel@tonic-gate 	mp = NULL;
8040Sstevel@tonic-gate 
8050Sstevel@tonic-gate 	/*
8060Sstevel@tonic-gate 	 * If the call is not a retry, allocate a new xid and cache it
8070Sstevel@tonic-gate 	 * for future retries.
8080Sstevel@tonic-gate 	 * Bug ID 1246045:
8090Sstevel@tonic-gate 	 * Treat call as a retry for purposes of binding the source
8100Sstevel@tonic-gate 	 * port only if we actually attempted to send anything on
8110Sstevel@tonic-gate 	 * the previous call.
8120Sstevel@tonic-gate 	 */
8130Sstevel@tonic-gate 	if (p->cku_xid == 0) {
8140Sstevel@tonic-gate 		p->cku_xid = alloc_xid();
8150Sstevel@tonic-gate 		/*
8160Sstevel@tonic-gate 		 * We need to ASSERT here that our xid != 0 because this
8170Sstevel@tonic-gate 		 * determines whether or not our call record gets placed on
8180Sstevel@tonic-gate 		 * the hash table or the linked list.  By design, we mandate
8190Sstevel@tonic-gate 		 * that RPC calls over cots must have xid's != 0, so we can
8200Sstevel@tonic-gate 		 * ensure proper management of the hash table.
8210Sstevel@tonic-gate 		 */
8220Sstevel@tonic-gate 		ASSERT(p->cku_xid != 0);
8230Sstevel@tonic-gate 
8240Sstevel@tonic-gate 		retryaddr = NULL;
8250Sstevel@tonic-gate 		p->cku_flags &= ~CKU_SENT;
8260Sstevel@tonic-gate 
8270Sstevel@tonic-gate 		if (p->cku_flags & CKU_ONQUEUE) {
8280Sstevel@tonic-gate 			RPCLOG(8, "clnt_cots_kcallit: new call, dequeuing old"
8290Sstevel@tonic-gate 			    " one (%p)\n", (void *)call);
8300Sstevel@tonic-gate 			call_table_remove(call);
8310Sstevel@tonic-gate 			p->cku_flags &= ~CKU_ONQUEUE;
8320Sstevel@tonic-gate 			RPCLOG(64, "clnt_cots_kcallit: removing call from "
8330Sstevel@tonic-gate 			    "dispatch list because xid was zero (now 0x%x)\n",
8340Sstevel@tonic-gate 			    p->cku_xid);
8350Sstevel@tonic-gate 		}
8360Sstevel@tonic-gate 
8370Sstevel@tonic-gate 		if (call->call_reply != NULL) {
8380Sstevel@tonic-gate 			freemsg(call->call_reply);
8390Sstevel@tonic-gate 			call->call_reply = NULL;
8400Sstevel@tonic-gate 		}
8410Sstevel@tonic-gate 	} else if (p->cku_srcaddr.buf == NULL || p->cku_srcaddr.len == 0) {
8420Sstevel@tonic-gate 		retryaddr = NULL;
8430Sstevel@tonic-gate 
8440Sstevel@tonic-gate 	} else if (p->cku_flags & CKU_SENT) {
8450Sstevel@tonic-gate 		retryaddr = &p->cku_srcaddr;
8460Sstevel@tonic-gate 
8470Sstevel@tonic-gate 	} else {
8480Sstevel@tonic-gate 		/*
8490Sstevel@tonic-gate 		 * Bug ID 1246045: Nothing was sent, so set retryaddr to
8500Sstevel@tonic-gate 		 * NULL and let connmgr_get() bind to any source port it
8510Sstevel@tonic-gate 		 * can get.
8520Sstevel@tonic-gate 		 */
8530Sstevel@tonic-gate 		retryaddr = NULL;
8540Sstevel@tonic-gate 	}
8550Sstevel@tonic-gate 
8560Sstevel@tonic-gate 	RPCLOG(64, "clnt_cots_kcallit: xid = 0x%x", p->cku_xid);
8570Sstevel@tonic-gate 	RPCLOG(64, " flags = 0x%x\n", p->cku_flags);
8580Sstevel@tonic-gate 
8590Sstevel@tonic-gate 	p->cku_err.re_status = RPC_TIMEDOUT;
8600Sstevel@tonic-gate 	p->cku_err.re_errno = p->cku_err.re_terrno = 0;
8610Sstevel@tonic-gate 
8620Sstevel@tonic-gate 	cm_entry = connmgr_wrapget(retryaddr, &cwait, p);
8630Sstevel@tonic-gate 
8640Sstevel@tonic-gate 	if (cm_entry == NULL) {
8650Sstevel@tonic-gate 		RPCLOG(1, "clnt_cots_kcallit: can't connect status %s\n",
8660Sstevel@tonic-gate 		    clnt_sperrno(p->cku_err.re_status));
8670Sstevel@tonic-gate 
8680Sstevel@tonic-gate 		/*
8690Sstevel@tonic-gate 		 * The reasons why we fail to create a connection are
8700Sstevel@tonic-gate 		 * varied. In most cases we don't want the caller to
8710Sstevel@tonic-gate 		 * immediately retry. This could have one or more
8720Sstevel@tonic-gate 		 * bad effects. This includes flooding the net with
8730Sstevel@tonic-gate 		 * connect requests to ports with no listener; a hard
8740Sstevel@tonic-gate 		 * kernel loop due to all the "reserved" TCP ports being
8750Sstevel@tonic-gate 		 * in use.
8760Sstevel@tonic-gate 		 */
8770Sstevel@tonic-gate 		delay_first = TRUE;
8780Sstevel@tonic-gate 
8790Sstevel@tonic-gate 		/*
8800Sstevel@tonic-gate 		 * Even if we end up returning EINTR, we still count a
8810Sstevel@tonic-gate 		 * a "can't connect", because the connection manager
8820Sstevel@tonic-gate 		 * might have been committed to waiting for or timing out on
8830Sstevel@tonic-gate 		 * a connection.
8840Sstevel@tonic-gate 		 */
8850Sstevel@tonic-gate 		COTSRCSTAT_INCR(p->cku_stats, rccantconn);
8860Sstevel@tonic-gate 		switch (p->cku_err.re_status) {
8870Sstevel@tonic-gate 		case RPC_INTR:
8880Sstevel@tonic-gate 			p->cku_err.re_errno = EINTR;
8890Sstevel@tonic-gate 
8900Sstevel@tonic-gate 			/*
8910Sstevel@tonic-gate 			 * No need to delay because a UNIX signal(2)
8920Sstevel@tonic-gate 			 * interrupted us. The caller likely won't
8930Sstevel@tonic-gate 			 * retry the CLNT_CALL() and even if it does,
8940Sstevel@tonic-gate 			 * we assume the caller knows what it is doing.
8950Sstevel@tonic-gate 			 */
8960Sstevel@tonic-gate 			delay_first = FALSE;
8970Sstevel@tonic-gate 			break;
8980Sstevel@tonic-gate 
8990Sstevel@tonic-gate 		case RPC_TIMEDOUT:
9000Sstevel@tonic-gate 			p->cku_err.re_errno = ETIMEDOUT;
9010Sstevel@tonic-gate 
9020Sstevel@tonic-gate 			/*
9030Sstevel@tonic-gate 			 * No need to delay because timed out already
9040Sstevel@tonic-gate 			 * on the connection request and assume that the
9050Sstevel@tonic-gate 			 * transport time out is longer than our minimum
9060Sstevel@tonic-gate 			 * timeout, or least not too much smaller.
9070Sstevel@tonic-gate 			 */
9080Sstevel@tonic-gate 			delay_first = FALSE;
9090Sstevel@tonic-gate 			break;
9100Sstevel@tonic-gate 
9110Sstevel@tonic-gate 		case RPC_SYSTEMERROR:
9120Sstevel@tonic-gate 		case RPC_TLIERROR:
9130Sstevel@tonic-gate 			/*
9140Sstevel@tonic-gate 			 * We want to delay here because a transient
9150Sstevel@tonic-gate 			 * system error has a better chance of going away
9160Sstevel@tonic-gate 			 * if we delay a bit. If it's not transient, then
9170Sstevel@tonic-gate 			 * we don't want end up in a hard kernel loop
9180Sstevel@tonic-gate 			 * due to retries.
9190Sstevel@tonic-gate 			 */
9200Sstevel@tonic-gate 			ASSERT(p->cku_err.re_errno != 0);
9210Sstevel@tonic-gate 			break;
9220Sstevel@tonic-gate 
9230Sstevel@tonic-gate 
9240Sstevel@tonic-gate 		case RPC_CANTCONNECT:
9250Sstevel@tonic-gate 			/*
9260Sstevel@tonic-gate 			 * RPC_CANTCONNECT is set on T_ERROR_ACK which
9270Sstevel@tonic-gate 			 * implies some error down in the TCP layer or
9280Sstevel@tonic-gate 			 * below. If cku_nodelayonerror is set then we
9290Sstevel@tonic-gate 			 * assume the caller knows not to try too hard.
9300Sstevel@tonic-gate 			 */
9310Sstevel@tonic-gate 			RPCLOG0(8, "clnt_cots_kcallit: connection failed,");
9320Sstevel@tonic-gate 			RPCLOG0(8, " re_status=RPC_CANTCONNECT,");
9330Sstevel@tonic-gate 			RPCLOG(8, " re_errno=%d,", p->cku_err.re_errno);
9340Sstevel@tonic-gate 			RPCLOG(8, " cku_nodelayonerr=%d", p->cku_nodelayonerr);
9350Sstevel@tonic-gate 			if (p->cku_nodelayonerr == TRUE)
9360Sstevel@tonic-gate 				delay_first = FALSE;
9370Sstevel@tonic-gate 
9380Sstevel@tonic-gate 			p->cku_err.re_errno = EIO;
9390Sstevel@tonic-gate 
9400Sstevel@tonic-gate 			break;
9410Sstevel@tonic-gate 
9420Sstevel@tonic-gate 		case RPC_XPRTFAILED:
9430Sstevel@tonic-gate 			/*
9440Sstevel@tonic-gate 			 * We want to delay here because we likely
9450Sstevel@tonic-gate 			 * got a refused connection.
9460Sstevel@tonic-gate 			 */
947*4457Svv149972 			if (p->cku_err.re_errno == 0)
948*4457Svv149972 				p->cku_err.re_errno = EIO;
949*4457Svv149972 
950*4457Svv149972 			RPCLOG(1, "clnt_cots_kcallit: transport failed: %d\n",
951*4457Svv149972 			    p->cku_err.re_errno);
952*4457Svv149972 
953*4457Svv149972 			break;
9540Sstevel@tonic-gate 
9550Sstevel@tonic-gate 		default:
9560Sstevel@tonic-gate 			/*
9570Sstevel@tonic-gate 			 * We delay here because it is better to err
9580Sstevel@tonic-gate 			 * on the side of caution. If we got here then
9590Sstevel@tonic-gate 			 * status could have been RPC_SUCCESS, but we
9600Sstevel@tonic-gate 			 * know that we did not get a connection, so
9610Sstevel@tonic-gate 			 * force the rpc status to RPC_CANTCONNECT.
9620Sstevel@tonic-gate 			 */
9630Sstevel@tonic-gate 			p->cku_err.re_status = RPC_CANTCONNECT;
9640Sstevel@tonic-gate 			p->cku_err.re_errno = EIO;
9650Sstevel@tonic-gate 			break;
9660Sstevel@tonic-gate 		}
9670Sstevel@tonic-gate 		if (delay_first == TRUE)
9680Sstevel@tonic-gate 			ticks = clnt_cots_min_tout * drv_usectohz(1000000);
9690Sstevel@tonic-gate 		goto cots_done;
9700Sstevel@tonic-gate 	}
9710Sstevel@tonic-gate 
9720Sstevel@tonic-gate 	/*
9730Sstevel@tonic-gate 	 * If we've never sent any request on this connection (send count
9740Sstevel@tonic-gate 	 * is zero, or the connection has been reset), cache the
9750Sstevel@tonic-gate 	 * the connection's create time and send a request (possibly a retry)
9760Sstevel@tonic-gate 	 */
9770Sstevel@tonic-gate 	if ((p->cku_flags & CKU_SENT) == 0 ||
9780Sstevel@tonic-gate 	    p->cku_ctime != cm_entry->x_ctime) {
9790Sstevel@tonic-gate 		p->cku_ctime = cm_entry->x_ctime;
9800Sstevel@tonic-gate 
9810Sstevel@tonic-gate 	} else if ((p->cku_flags & CKU_SENT) && (p->cku_flags & CKU_ONQUEUE) &&
9820Sstevel@tonic-gate 	    (call->call_reply != NULL ||
9830Sstevel@tonic-gate 	    p->cku_recv_attempts < clnt_cots_maxrecv)) {
9840Sstevel@tonic-gate 
9850Sstevel@tonic-gate 		/*
9860Sstevel@tonic-gate 		 * If we've sent a request and our call is on the dispatch
9870Sstevel@tonic-gate 		 * queue and we haven't made too many receive attempts, then
9880Sstevel@tonic-gate 		 * don't re-send, just receive.
9890Sstevel@tonic-gate 		 */
9900Sstevel@tonic-gate 		p->cku_recv_attempts++;
9910Sstevel@tonic-gate 		goto read_again;
9920Sstevel@tonic-gate 	}
9930Sstevel@tonic-gate 
9940Sstevel@tonic-gate 	/*
9950Sstevel@tonic-gate 	 * Now we create the RPC request in a STREAMS message.  We have to do
9960Sstevel@tonic-gate 	 * this after the call to connmgr_get so that we have the correct
9970Sstevel@tonic-gate 	 * TIDU size for the transport.
9980Sstevel@tonic-gate 	 */
9990Sstevel@tonic-gate 	tidu_size = cm_entry->x_tidu_size;
10000Sstevel@tonic-gate 	len = MSG_OFFSET + MAX(tidu_size, RM_HDR_SIZE + WIRE_HDR_SIZE);
10010Sstevel@tonic-gate 
10020Sstevel@tonic-gate 	while ((mp = allocb(len, BPRI_MED)) == NULL) {
10030Sstevel@tonic-gate 		if (strwaitbuf(len, BPRI_MED)) {
10040Sstevel@tonic-gate 			p->cku_err.re_status = RPC_SYSTEMERROR;
10050Sstevel@tonic-gate 			p->cku_err.re_errno = ENOSR;
10060Sstevel@tonic-gate 			COTSRCSTAT_INCR(p->cku_stats, rcnomem);
10070Sstevel@tonic-gate 			goto cots_done;
10080Sstevel@tonic-gate 		}
10090Sstevel@tonic-gate 	}
10100Sstevel@tonic-gate 	xdrs = &p->cku_outxdr;
10110Sstevel@tonic-gate 	xdrmblk_init(xdrs, mp, XDR_ENCODE, tidu_size);
10120Sstevel@tonic-gate 	mpsize = MBLKSIZE(mp);
10130Sstevel@tonic-gate 	ASSERT(mpsize >= len);
10140Sstevel@tonic-gate 	ASSERT(mp->b_rptr == mp->b_datap->db_base);
10150Sstevel@tonic-gate 
10160Sstevel@tonic-gate 	/*
10170Sstevel@tonic-gate 	 * If the size of mblk is not appreciably larger than what we
10180Sstevel@tonic-gate 	 * asked, then resize the mblk to exactly len bytes. The reason for
10190Sstevel@tonic-gate 	 * this: suppose len is 1600 bytes, the tidu is 1460 bytes
10200Sstevel@tonic-gate 	 * (from TCP over ethernet), and the arguments to the RPC require
10210Sstevel@tonic-gate 	 * 2800 bytes. Ideally we want the protocol to render two
10220Sstevel@tonic-gate 	 * ~1400 byte segments over the wire. However if allocb() gives us a 2k
10230Sstevel@tonic-gate 	 * mblk, and we allocate a second mblk for the remainder, the protocol
10240Sstevel@tonic-gate 	 * module may generate 3 segments over the wire:
10250Sstevel@tonic-gate 	 * 1460 bytes for the first, 448 (2048 - 1600) for the second, and
10260Sstevel@tonic-gate 	 * 892 for the third. If we "waste" 448 bytes in the first mblk,
10270Sstevel@tonic-gate 	 * the XDR encoding will generate two ~1400 byte mblks, and the
10280Sstevel@tonic-gate 	 * protocol module is more likely to produce properly sized segments.
10290Sstevel@tonic-gate 	 */
10300Sstevel@tonic-gate 	if ((mpsize >> 1) <= len)
10310Sstevel@tonic-gate 		mp->b_rptr += (mpsize - len);
10320Sstevel@tonic-gate 
10330Sstevel@tonic-gate 	/*
10340Sstevel@tonic-gate 	 * Adjust b_rptr to reserve space for the non-data protocol headers
10350Sstevel@tonic-gate 	 * any downstream modules might like to add, and for the
10360Sstevel@tonic-gate 	 * record marking header.
10370Sstevel@tonic-gate 	 */
10380Sstevel@tonic-gate 	mp->b_rptr += (MSG_OFFSET + RM_HDR_SIZE);
10390Sstevel@tonic-gate 
10400Sstevel@tonic-gate 	if (h->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
10410Sstevel@tonic-gate 		/* Copy in the preserialized RPC header information. */
10420Sstevel@tonic-gate 		bcopy(p->cku_rpchdr, mp->b_rptr, WIRE_HDR_SIZE);
10430Sstevel@tonic-gate 
10440Sstevel@tonic-gate 		/* Use XDR_SETPOS() to set the b_wptr to past the RPC header. */
10450Sstevel@tonic-gate 		XDR_SETPOS(xdrs, (uint_t)(mp->b_rptr - mp->b_datap->db_base +
10460Sstevel@tonic-gate 		    WIRE_HDR_SIZE));
10470Sstevel@tonic-gate 
10480Sstevel@tonic-gate 		ASSERT((mp->b_wptr - mp->b_rptr) == WIRE_HDR_SIZE);
10490Sstevel@tonic-gate 
10500Sstevel@tonic-gate 		/* Serialize the procedure number and the arguments. */
10510Sstevel@tonic-gate 		if ((!XDR_PUTINT32(xdrs, (int32_t *)&procnum)) ||
10520Sstevel@tonic-gate 		    (!AUTH_MARSHALL(h->cl_auth, xdrs, p->cku_cred)) ||
10530Sstevel@tonic-gate 		    (!(*xdr_args)(xdrs, argsp))) {
10540Sstevel@tonic-gate 			p->cku_err.re_status = RPC_CANTENCODEARGS;
10550Sstevel@tonic-gate 			p->cku_err.re_errno = EIO;
10560Sstevel@tonic-gate 			goto cots_done;
10570Sstevel@tonic-gate 		}
10580Sstevel@tonic-gate 
10590Sstevel@tonic-gate 		(*(uint32_t *)(mp->b_rptr)) = p->cku_xid;
10600Sstevel@tonic-gate 	} else {
10610Sstevel@tonic-gate 		uint32_t *uproc = (uint32_t *)&p->cku_rpchdr[WIRE_HDR_SIZE];
10620Sstevel@tonic-gate 		IXDR_PUT_U_INT32(uproc, procnum);
10630Sstevel@tonic-gate 
10640Sstevel@tonic-gate 		(*(uint32_t *)(&p->cku_rpchdr[0])) = p->cku_xid;
10650Sstevel@tonic-gate 
10660Sstevel@tonic-gate 		/* Use XDR_SETPOS() to set the b_wptr. */
10670Sstevel@tonic-gate 		XDR_SETPOS(xdrs, (uint_t)(mp->b_rptr - mp->b_datap->db_base));
10680Sstevel@tonic-gate 
10690Sstevel@tonic-gate 		/* Serialize the procedure number and the arguments. */
10700Sstevel@tonic-gate 		if (!AUTH_WRAP(h->cl_auth, p->cku_rpchdr, WIRE_HDR_SIZE+4,
10710Sstevel@tonic-gate 		    xdrs, xdr_args, argsp)) {
10720Sstevel@tonic-gate 			p->cku_err.re_status = RPC_CANTENCODEARGS;
10730Sstevel@tonic-gate 			p->cku_err.re_errno = EIO;
10740Sstevel@tonic-gate 			goto cots_done;
10750Sstevel@tonic-gate 		}
10760Sstevel@tonic-gate 	}
10770Sstevel@tonic-gate 
10780Sstevel@tonic-gate 	RPCLOG(2, "clnt_cots_kcallit: connected, sending call, tidu_size %d\n",
10790Sstevel@tonic-gate 	    tidu_size);
10800Sstevel@tonic-gate 
10810Sstevel@tonic-gate 	wq = cm_entry->x_wq;
10820Sstevel@tonic-gate 	clnt_dispatch_send(wq, mp, call, p->cku_xid,
10830Sstevel@tonic-gate 				(p->cku_flags & CKU_ONQUEUE));
10840Sstevel@tonic-gate 
10850Sstevel@tonic-gate 	RPCLOG(64, "clnt_cots_kcallit: sent call for xid 0x%x\n",
10860Sstevel@tonic-gate 		(uint_t)p->cku_xid);
10870Sstevel@tonic-gate 	p->cku_flags = (CKU_ONQUEUE|CKU_SENT);
10880Sstevel@tonic-gate 	p->cku_recv_attempts = 1;
10890Sstevel@tonic-gate 
10900Sstevel@tonic-gate #ifdef	RPCDEBUG
10910Sstevel@tonic-gate 	time_sent = lbolt;
10920Sstevel@tonic-gate #endif
10930Sstevel@tonic-gate 
10940Sstevel@tonic-gate 	/*
10950Sstevel@tonic-gate 	 * Wait for a reply or a timeout.  If there is no error or timeout,
10960Sstevel@tonic-gate 	 * (both indicated by call_status), call->call_reply will contain
10970Sstevel@tonic-gate 	 * the RPC reply message.
10980Sstevel@tonic-gate 	 */
10990Sstevel@tonic-gate read_again:
11000Sstevel@tonic-gate 	mutex_enter(&call->call_lock);
11010Sstevel@tonic-gate 	interrupted = 0;
11020Sstevel@tonic-gate 	if (call->call_status == RPC_TIMEDOUT) {
11030Sstevel@tonic-gate 		/*
11040Sstevel@tonic-gate 		 * Indicate that the lwp is not to be stopped while waiting
11050Sstevel@tonic-gate 		 * for this network traffic.  This is to avoid deadlock while
11060Sstevel@tonic-gate 		 * debugging a process via /proc and also to avoid recursive
11070Sstevel@tonic-gate 		 * mutex_enter()s due to NFS page faults while stopping
11080Sstevel@tonic-gate 		 * (NFS holds locks when it calls here).
11090Sstevel@tonic-gate 		 */
11100Sstevel@tonic-gate 		clock_t cv_wait_ret;
11110Sstevel@tonic-gate 		clock_t timout;
11120Sstevel@tonic-gate 		clock_t oldlbolt;
11130Sstevel@tonic-gate 
11140Sstevel@tonic-gate 		klwp_t *lwp = ttolwp(curthread);
11150Sstevel@tonic-gate 
11160Sstevel@tonic-gate 		if (lwp != NULL)
11170Sstevel@tonic-gate 			lwp->lwp_nostop++;
11180Sstevel@tonic-gate 
11190Sstevel@tonic-gate 		oldlbolt = lbolt;
11200Sstevel@tonic-gate 		timout = wait.tv_sec * drv_usectohz(1000000) +
11210Sstevel@tonic-gate 		    drv_usectohz(wait.tv_usec) + oldlbolt;
11220Sstevel@tonic-gate 		/*
11230Sstevel@tonic-gate 		 * Iterate until the call_status is changed to something
11240Sstevel@tonic-gate 		 * other that RPC_TIMEDOUT, or if cv_timedwait_sig() returns
11250Sstevel@tonic-gate 		 * something <=0 zero. The latter means that we timed
11260Sstevel@tonic-gate 		 * out.
11270Sstevel@tonic-gate 		 */
11280Sstevel@tonic-gate 		if (h->cl_nosignal)
11290Sstevel@tonic-gate 			while ((cv_wait_ret = cv_timedwait(&call->call_cv,
11300Sstevel@tonic-gate 			    &call->call_lock, timout)) > 0 &&
11310Sstevel@tonic-gate 			    call->call_status == RPC_TIMEDOUT);
11320Sstevel@tonic-gate 		else
11330Sstevel@tonic-gate 			while ((cv_wait_ret = cv_timedwait_sig(
11340Sstevel@tonic-gate 			    &call->call_cv,
11350Sstevel@tonic-gate 			    &call->call_lock, timout)) > 0 &&
11360Sstevel@tonic-gate 			    call->call_status == RPC_TIMEDOUT);
11370Sstevel@tonic-gate 
11380Sstevel@tonic-gate 		switch (cv_wait_ret) {
11390Sstevel@tonic-gate 		case 0:
11400Sstevel@tonic-gate 			/*
11410Sstevel@tonic-gate 			 * If we got out of the above loop with
11420Sstevel@tonic-gate 			 * cv_timedwait_sig() returning 0, then we were
11430Sstevel@tonic-gate 			 * interrupted regardless what call_status is.
11440Sstevel@tonic-gate 			 */
11450Sstevel@tonic-gate 			interrupted = 1;
11460Sstevel@tonic-gate 			break;
11470Sstevel@tonic-gate 		case -1:
11480Sstevel@tonic-gate 			/* cv_timedwait_sig() timed out */
11490Sstevel@tonic-gate 			break;
11500Sstevel@tonic-gate 		default:
11510Sstevel@tonic-gate 
11520Sstevel@tonic-gate 			/*
11530Sstevel@tonic-gate 			 * We were cv_signaled(). If we didn't
11540Sstevel@tonic-gate 			 * get a successful call_status and returned
11550Sstevel@tonic-gate 			 * before time expired, delay up to clnt_cots_min_tout
11560Sstevel@tonic-gate 			 * seconds so that the caller doesn't immediately
11570Sstevel@tonic-gate 			 * try to call us again and thus force the
11580Sstevel@tonic-gate 			 * same condition that got us here (such
11590Sstevel@tonic-gate 			 * as a RPC_XPRTFAILED due to the server not
11600Sstevel@tonic-gate 			 * listening on the end-point.
11610Sstevel@tonic-gate 			 */
11620Sstevel@tonic-gate 			if (call->call_status != RPC_SUCCESS) {
11630Sstevel@tonic-gate 				clock_t curlbolt;
11640Sstevel@tonic-gate 				clock_t diff;
11650Sstevel@tonic-gate 
11660Sstevel@tonic-gate 				curlbolt = ddi_get_lbolt();
11670Sstevel@tonic-gate 				ticks = clnt_cots_min_tout *
11680Sstevel@tonic-gate 				    drv_usectohz(1000000);
11690Sstevel@tonic-gate 				diff = curlbolt - oldlbolt;
11700Sstevel@tonic-gate 				if (diff < ticks) {
11710Sstevel@tonic-gate 					delay_first = TRUE;
11720Sstevel@tonic-gate 					if (diff > 0)
11730Sstevel@tonic-gate 						ticks -= diff;
11740Sstevel@tonic-gate 				}
11750Sstevel@tonic-gate 			}
11760Sstevel@tonic-gate 			break;
11770Sstevel@tonic-gate 		}
11780Sstevel@tonic-gate 
11790Sstevel@tonic-gate 		if (lwp != NULL)
11800Sstevel@tonic-gate 			lwp->lwp_nostop--;
11810Sstevel@tonic-gate 	}
11820Sstevel@tonic-gate 	/*
11830Sstevel@tonic-gate 	 * Get the reply message, if any.  This will be freed at the end
11840Sstevel@tonic-gate 	 * whether or not an error occurred.
11850Sstevel@tonic-gate 	 */
11860Sstevel@tonic-gate 	mp = call->call_reply;
11870Sstevel@tonic-gate 	call->call_reply = NULL;
11880Sstevel@tonic-gate 
11890Sstevel@tonic-gate 	/*
11900Sstevel@tonic-gate 	 * call_err is the error info when the call is on dispatch queue.
11910Sstevel@tonic-gate 	 * cku_err is the error info returned to the caller.
11920Sstevel@tonic-gate 	 * Sync cku_err with call_err for local message processing.
11930Sstevel@tonic-gate 	 */
11940Sstevel@tonic-gate 
11950Sstevel@tonic-gate 	status = call->call_status;
11960Sstevel@tonic-gate 	p->cku_err = call->call_err;
11970Sstevel@tonic-gate 	mutex_exit(&call->call_lock);
11980Sstevel@tonic-gate 
11990Sstevel@tonic-gate 	if (status != RPC_SUCCESS) {
12000Sstevel@tonic-gate 		switch (status) {
12010Sstevel@tonic-gate 		case RPC_TIMEDOUT:
12020Sstevel@tonic-gate 			if (interrupted) {
12030Sstevel@tonic-gate 				COTSRCSTAT_INCR(p->cku_stats, rcintrs);
12040Sstevel@tonic-gate 				p->cku_err.re_status = RPC_INTR;
12050Sstevel@tonic-gate 				p->cku_err.re_errno = EINTR;
12060Sstevel@tonic-gate 				RPCLOG(1, "clnt_cots_kcallit: xid 0x%x",
12070Sstevel@tonic-gate 				    p->cku_xid);
12080Sstevel@tonic-gate 				RPCLOG(1, "signal interrupted at %ld", lbolt);
12090Sstevel@tonic-gate 				RPCLOG(1, ", was sent at %ld\n", time_sent);
12100Sstevel@tonic-gate 			} else {
12110Sstevel@tonic-gate 				COTSRCSTAT_INCR(p->cku_stats, rctimeouts);
12120Sstevel@tonic-gate 				p->cku_err.re_errno = ETIMEDOUT;
12130Sstevel@tonic-gate 				RPCLOG(1, "clnt_cots_kcallit: timed out at %ld",
12140Sstevel@tonic-gate 				    lbolt);
12150Sstevel@tonic-gate 				RPCLOG(1, ", was sent at %ld\n", time_sent);
12160Sstevel@tonic-gate 			}
12170Sstevel@tonic-gate 			break;
12180Sstevel@tonic-gate 
12190Sstevel@tonic-gate 		case RPC_XPRTFAILED:
12200Sstevel@tonic-gate 			if (p->cku_err.re_errno == 0)
12210Sstevel@tonic-gate 				p->cku_err.re_errno = EIO;
12220Sstevel@tonic-gate 
12230Sstevel@tonic-gate 			RPCLOG(1, "clnt_cots_kcallit: transport failed: %d\n",
12240Sstevel@tonic-gate 			    p->cku_err.re_errno);
12250Sstevel@tonic-gate 			break;
12260Sstevel@tonic-gate 
12270Sstevel@tonic-gate 		case RPC_SYSTEMERROR:
12280Sstevel@tonic-gate 			ASSERT(p->cku_err.re_errno);
12290Sstevel@tonic-gate 			RPCLOG(1, "clnt_cots_kcallit: system error: %d\n",
12300Sstevel@tonic-gate 			    p->cku_err.re_errno);
12310Sstevel@tonic-gate 			break;
12320Sstevel@tonic-gate 
12330Sstevel@tonic-gate 		default:
12340Sstevel@tonic-gate 			p->cku_err.re_status = RPC_SYSTEMERROR;
12350Sstevel@tonic-gate 			p->cku_err.re_errno = EIO;
12360Sstevel@tonic-gate 			RPCLOG(1, "clnt_cots_kcallit: error: %s\n",
12370Sstevel@tonic-gate 			    clnt_sperrno(status));
12380Sstevel@tonic-gate 			break;
12390Sstevel@tonic-gate 		}
12400Sstevel@tonic-gate 		if (p->cku_err.re_status != RPC_TIMEDOUT) {
12410Sstevel@tonic-gate 
12420Sstevel@tonic-gate 			if (p->cku_flags & CKU_ONQUEUE) {
12430Sstevel@tonic-gate 				call_table_remove(call);
12440Sstevel@tonic-gate 				p->cku_flags &= ~CKU_ONQUEUE;
12450Sstevel@tonic-gate 			}
12460Sstevel@tonic-gate 
12470Sstevel@tonic-gate 			RPCLOG(64, "clnt_cots_kcallit: non TIMEOUT so xid 0x%x "
12480Sstevel@tonic-gate 			    "taken off dispatch list\n", p->cku_xid);
12490Sstevel@tonic-gate 			if (call->call_reply) {
12500Sstevel@tonic-gate 				freemsg(call->call_reply);
12510Sstevel@tonic-gate 				call->call_reply = NULL;
12520Sstevel@tonic-gate 			}
12530Sstevel@tonic-gate 		} else if (wait.tv_sec != 0) {
12540Sstevel@tonic-gate 			/*
12550Sstevel@tonic-gate 			 * We've sent the request over TCP and so we have
12560Sstevel@tonic-gate 			 * every reason to believe it will get
12570Sstevel@tonic-gate 			 * delivered. In which case returning a timeout is not
12580Sstevel@tonic-gate 			 * appropriate.
12590Sstevel@tonic-gate 			 */
12600Sstevel@tonic-gate 			if (p->cku_progress == TRUE &&
12610Sstevel@tonic-gate 			    p->cku_recv_attempts < clnt_cots_maxrecv) {
12620Sstevel@tonic-gate 				p->cku_err.re_status = RPC_INPROGRESS;
12630Sstevel@tonic-gate 			}
12640Sstevel@tonic-gate 		}
12650Sstevel@tonic-gate 		goto cots_done;
12660Sstevel@tonic-gate 	}
12670Sstevel@tonic-gate 
12680Sstevel@tonic-gate 	xdrs = &p->cku_inxdr;
12690Sstevel@tonic-gate 	xdrmblk_init(xdrs, mp, XDR_DECODE, 0);
12700Sstevel@tonic-gate 
12710Sstevel@tonic-gate 	reply_msg.rm_direction = REPLY;
12720Sstevel@tonic-gate 	reply_msg.rm_reply.rp_stat = MSG_ACCEPTED;
12730Sstevel@tonic-gate 	reply_msg.acpted_rply.ar_stat = SUCCESS;
12740Sstevel@tonic-gate 
12750Sstevel@tonic-gate 	reply_msg.acpted_rply.ar_verf = _null_auth;
12760Sstevel@tonic-gate 	/*
12770Sstevel@tonic-gate 	 *  xdr_results will be done in AUTH_UNWRAP.
12780Sstevel@tonic-gate 	 */
12790Sstevel@tonic-gate 	reply_msg.acpted_rply.ar_results.where = NULL;
12800Sstevel@tonic-gate 	reply_msg.acpted_rply.ar_results.proc = xdr_void;
12810Sstevel@tonic-gate 
12820Sstevel@tonic-gate 	if (xdr_replymsg(xdrs, &reply_msg)) {
12830Sstevel@tonic-gate 		enum clnt_stat re_status;
12840Sstevel@tonic-gate 
12850Sstevel@tonic-gate 		_seterr_reply(&reply_msg, &p->cku_err);
12860Sstevel@tonic-gate 
12870Sstevel@tonic-gate 		re_status = p->cku_err.re_status;
12880Sstevel@tonic-gate 		if (re_status == RPC_SUCCESS) {
12890Sstevel@tonic-gate 			/*
12900Sstevel@tonic-gate 			 * Reply is good, check auth.
12910Sstevel@tonic-gate 			 */
12920Sstevel@tonic-gate 			if (!AUTH_VALIDATE(h->cl_auth,
1293571Srg137905 					&reply_msg.acpted_rply.ar_verf)) {
12940Sstevel@tonic-gate 				COTSRCSTAT_INCR(p->cku_stats, rcbadverfs);
12950Sstevel@tonic-gate 				RPCLOG0(1, "clnt_cots_kcallit: validation "
1296571Srg137905 					"failure\n");
12970Sstevel@tonic-gate 				freemsg(mp);
12980Sstevel@tonic-gate 				(void) xdr_rpc_free_verifier(xdrs, &reply_msg);
12990Sstevel@tonic-gate 				mutex_enter(&call->call_lock);
13000Sstevel@tonic-gate 				if (call->call_reply == NULL)
13010Sstevel@tonic-gate 					call->call_status = RPC_TIMEDOUT;
13020Sstevel@tonic-gate 				mutex_exit(&call->call_lock);
13030Sstevel@tonic-gate 				goto read_again;
13040Sstevel@tonic-gate 			} else if (!AUTH_UNWRAP(h->cl_auth, xdrs,
1305571Srg137905 						xdr_results, resultsp)) {
13060Sstevel@tonic-gate 				RPCLOG0(1, "clnt_cots_kcallit: validation "
1307571Srg137905 					"failure (unwrap)\n");
13080Sstevel@tonic-gate 				p->cku_err.re_status = RPC_CANTDECODERES;
13090Sstevel@tonic-gate 				p->cku_err.re_errno = EIO;
13100Sstevel@tonic-gate 			}
13110Sstevel@tonic-gate 		} else {
13120Sstevel@tonic-gate 			/* set errno in case we can't recover */
13130Sstevel@tonic-gate 			if (re_status != RPC_VERSMISMATCH &&
13140Sstevel@tonic-gate 			    re_status != RPC_AUTHERROR &&
13150Sstevel@tonic-gate 			    re_status != RPC_PROGVERSMISMATCH)
13160Sstevel@tonic-gate 				p->cku_err.re_errno = EIO;
13170Sstevel@tonic-gate 
13180Sstevel@tonic-gate 			if (re_status == RPC_AUTHERROR) {
13190Sstevel@tonic-gate 				/*
1320589Srg137905 				 * Maybe our credential need to be refreshed
13210Sstevel@tonic-gate 				 */
1322571Srg137905 				if (cm_entry) {
1323571Srg137905 					/*
1324571Srg137905 					 * There is the potential that the
1325571Srg137905 					 * cm_entry has/will be marked dead,
1326589Srg137905 					 * so drop the connection altogether,
1327589Srg137905 					 * force REFRESH to establish new
1328589Srg137905 					 * connection.
1329571Srg137905 					 */
1330589Srg137905 					connmgr_cancelconn(cm_entry);
1331571Srg137905 					cm_entry = NULL;
13320Sstevel@tonic-gate 				}
13330Sstevel@tonic-gate 
1334571Srg137905 				if ((refreshes > 0) &&
1335571Srg137905 				    AUTH_REFRESH(h->cl_auth, &reply_msg,
1336571Srg137905 						p->cku_cred)) {
1337571Srg137905 					refreshes--;
1338571Srg137905 					(void) xdr_rpc_free_verifier(xdrs,
1339571Srg137905 								&reply_msg);
1340571Srg137905 					freemsg(mp);
1341571Srg137905 					mp = NULL;
1342571Srg137905 
1343571Srg137905 					if (p->cku_flags & CKU_ONQUEUE) {
1344571Srg137905 						call_table_remove(call);
1345571Srg137905 						p->cku_flags &= ~CKU_ONQUEUE;
1346571Srg137905 					}
1347571Srg137905 
1348571Srg137905 					RPCLOG(64,
1349571Srg137905 					    "clnt_cots_kcallit: AUTH_ERROR, xid"
1350571Srg137905 					    " 0x%x removed off dispatch list\n",
1351571Srg137905 					    p->cku_xid);
1352571Srg137905 					if (call->call_reply) {
1353571Srg137905 						freemsg(call->call_reply);
1354571Srg137905 						call->call_reply = NULL;
1355571Srg137905 					}
1356571Srg137905 
1357571Srg137905 					COTSRCSTAT_INCR(p->cku_stats,
1358571Srg137905 							rcbadcalls);
1359571Srg137905 					COTSRCSTAT_INCR(p->cku_stats,
1360571Srg137905 							rcnewcreds);
1361571Srg137905 					goto call_again;
1362589Srg137905 				}
1363589Srg137905 
13640Sstevel@tonic-gate 				/*
13650Sstevel@tonic-gate 				 * We have used the client handle to
13660Sstevel@tonic-gate 				 * do an AUTH_REFRESH and the RPC status may
13670Sstevel@tonic-gate 				 * be set to RPC_SUCCESS; Let's make sure to
13680Sstevel@tonic-gate 				 * set it to RPC_AUTHERROR.
13690Sstevel@tonic-gate 				 */
13700Sstevel@tonic-gate 				p->cku_err.re_status = RPC_AUTHERROR;
1371589Srg137905 
13720Sstevel@tonic-gate 				/*
13730Sstevel@tonic-gate 				 * Map recoverable and unrecoverable
13740Sstevel@tonic-gate 				 * authentication errors to appropriate errno
13750Sstevel@tonic-gate 				 */
13760Sstevel@tonic-gate 				switch (p->cku_err.re_why) {
1377342Snd150628 				case AUTH_TOOWEAK:
1378342Snd150628 					/*
1379571Srg137905 					 * This could be a failure where the
1380571Srg137905 					 * server requires use of a reserved
1381571Srg137905 					 * port,  check and optionally set the
1382571Srg137905 					 * client handle useresvport trying
1383571Srg137905 					 * one more time. Next go round we
1384571Srg137905 					 * fall out with the tooweak error.
1385342Snd150628 					 */
1386342Snd150628 					if (p->cku_useresvport != 1) {
1387342Snd150628 						p->cku_useresvport = 1;
1388342Snd150628 						p->cku_xid = 0;
1389342Snd150628 						(void) xdr_rpc_free_verifier
1390342Snd150628 							    (xdrs, &reply_msg);
1391342Snd150628 						freemsg(mp);
1392342Snd150628 						goto call_again;
1393342Snd150628 					}
1394342Snd150628 					/* FALLTHRU */
13950Sstevel@tonic-gate 				case AUTH_BADCRED:
13960Sstevel@tonic-gate 				case AUTH_BADVERF:
13970Sstevel@tonic-gate 				case AUTH_INVALIDRESP:
13980Sstevel@tonic-gate 				case AUTH_FAILED:
13990Sstevel@tonic-gate 				case RPCSEC_GSS_NOCRED:
14000Sstevel@tonic-gate 				case RPCSEC_GSS_FAILED:
14010Sstevel@tonic-gate 						p->cku_err.re_errno = EACCES;
14020Sstevel@tonic-gate 						break;
14030Sstevel@tonic-gate 				case AUTH_REJECTEDCRED:
14040Sstevel@tonic-gate 				case AUTH_REJECTEDVERF:
14050Sstevel@tonic-gate 				default:	p->cku_err.re_errno = EIO;
14060Sstevel@tonic-gate 						break;
14070Sstevel@tonic-gate 				}
14080Sstevel@tonic-gate 				RPCLOG(1, "clnt_cots_kcallit : authentication"
14090Sstevel@tonic-gate 				    " failed with RPC_AUTHERROR of type %d\n",
14100Sstevel@tonic-gate 				    (int)p->cku_err.re_why);
14110Sstevel@tonic-gate 			}
14120Sstevel@tonic-gate 		}
14130Sstevel@tonic-gate 	} else {
14140Sstevel@tonic-gate 		/* reply didn't decode properly. */
14150Sstevel@tonic-gate 		p->cku_err.re_status = RPC_CANTDECODERES;
14160Sstevel@tonic-gate 		p->cku_err.re_errno = EIO;
14170Sstevel@tonic-gate 		RPCLOG0(1, "clnt_cots_kcallit: decode failure\n");
14180Sstevel@tonic-gate 	}
14190Sstevel@tonic-gate 
14200Sstevel@tonic-gate 	(void) xdr_rpc_free_verifier(xdrs, &reply_msg);
14210Sstevel@tonic-gate 
14220Sstevel@tonic-gate 	if (p->cku_flags & CKU_ONQUEUE) {
14230Sstevel@tonic-gate 		call_table_remove(call);
14240Sstevel@tonic-gate 		p->cku_flags &= ~CKU_ONQUEUE;
14250Sstevel@tonic-gate 	}
14260Sstevel@tonic-gate 
14270Sstevel@tonic-gate 	RPCLOG(64, "clnt_cots_kcallit: xid 0x%x taken off dispatch list",
14280Sstevel@tonic-gate 	    p->cku_xid);
14290Sstevel@tonic-gate 	RPCLOG(64, " status is %s\n", clnt_sperrno(p->cku_err.re_status));
14300Sstevel@tonic-gate cots_done:
14310Sstevel@tonic-gate 	if (cm_entry)
14320Sstevel@tonic-gate 		connmgr_release(cm_entry);
14330Sstevel@tonic-gate 
14340Sstevel@tonic-gate 	if (mp != NULL)
14350Sstevel@tonic-gate 		freemsg(mp);
14360Sstevel@tonic-gate 	if ((p->cku_flags & CKU_ONQUEUE) == 0 && call->call_reply) {
14370Sstevel@tonic-gate 		freemsg(call->call_reply);
14380Sstevel@tonic-gate 		call->call_reply = NULL;
14390Sstevel@tonic-gate 	}
14400Sstevel@tonic-gate 	if (p->cku_err.re_status != RPC_SUCCESS) {
14410Sstevel@tonic-gate 		RPCLOG0(1, "clnt_cots_kcallit: tail-end failure\n");
14420Sstevel@tonic-gate 		COTSRCSTAT_INCR(p->cku_stats, rcbadcalls);
14430Sstevel@tonic-gate 	}
14440Sstevel@tonic-gate 
14450Sstevel@tonic-gate 	/*
14460Sstevel@tonic-gate 	 * No point in delaying if the zone is going away.
14470Sstevel@tonic-gate 	 */
14480Sstevel@tonic-gate 	if (delay_first == TRUE &&
14490Sstevel@tonic-gate 	    !(zone_status_get(curproc->p_zone) >= ZONE_IS_SHUTTING_DOWN)) {
14500Sstevel@tonic-gate 		if (clnt_delay(ticks, h->cl_nosignal) == EINTR) {
14510Sstevel@tonic-gate 			p->cku_err.re_errno = EINTR;
14520Sstevel@tonic-gate 			p->cku_err.re_status = RPC_INTR;
14530Sstevel@tonic-gate 		}
14540Sstevel@tonic-gate 	}
14550Sstevel@tonic-gate 	return (p->cku_err.re_status);
14560Sstevel@tonic-gate }
14570Sstevel@tonic-gate 
14580Sstevel@tonic-gate /*
14590Sstevel@tonic-gate  * Kinit routine for cots.  This sets up the correct operations in
14600Sstevel@tonic-gate  * the client handle, as the handle may have previously been a clts
14610Sstevel@tonic-gate  * handle, and clears the xid field so there is no way a new call
14620Sstevel@tonic-gate  * could be mistaken for a retry.  It also sets in the handle the
14630Sstevel@tonic-gate  * information that is passed at create/kinit time but needed at
14640Sstevel@tonic-gate  * call time, as cots creates the transport at call time - device,
14650Sstevel@tonic-gate  * address of the server, protocol family.
14660Sstevel@tonic-gate  */
14670Sstevel@tonic-gate void
14680Sstevel@tonic-gate clnt_cots_kinit(CLIENT *h, dev_t dev, int family, struct netbuf *addr,
14690Sstevel@tonic-gate 	int max_msgsize, cred_t *cred)
14700Sstevel@tonic-gate {
14710Sstevel@tonic-gate 	/* LINTED pointer alignment */
14720Sstevel@tonic-gate 	cku_private_t *p = htop(h);
14730Sstevel@tonic-gate 	calllist_t *call = &p->cku_call;
14740Sstevel@tonic-gate 
14750Sstevel@tonic-gate 	h->cl_ops = &tcp_ops;
14760Sstevel@tonic-gate 	if (p->cku_flags & CKU_ONQUEUE) {
14770Sstevel@tonic-gate 		call_table_remove(call);
14780Sstevel@tonic-gate 		p->cku_flags &= ~CKU_ONQUEUE;
14790Sstevel@tonic-gate 		RPCLOG(64, "clnt_cots_kinit: removing call for xid 0x%x from"
14800Sstevel@tonic-gate 		    " dispatch list\n", p->cku_xid);
14810Sstevel@tonic-gate 	}
14820Sstevel@tonic-gate 
14830Sstevel@tonic-gate 	if (call->call_reply != NULL) {
14840Sstevel@tonic-gate 		freemsg(call->call_reply);
14850Sstevel@tonic-gate 		call->call_reply = NULL;
14860Sstevel@tonic-gate 	}
14870Sstevel@tonic-gate 
14880Sstevel@tonic-gate 	call->call_bucket = NULL;
14890Sstevel@tonic-gate 	call->call_hash = 0;
14900Sstevel@tonic-gate 
14910Sstevel@tonic-gate 	/*
14920Sstevel@tonic-gate 	 * We don't clear cku_flags here, because clnt_cots_kcallit()
14930Sstevel@tonic-gate 	 * takes care of handling the cku_flags reset.
14940Sstevel@tonic-gate 	 */
14950Sstevel@tonic-gate 	p->cku_xid = 0;
14960Sstevel@tonic-gate 	p->cku_device = dev;
14970Sstevel@tonic-gate 	p->cku_addrfmly = family;
14980Sstevel@tonic-gate 	p->cku_cred = cred;
14990Sstevel@tonic-gate 
15000Sstevel@tonic-gate 	if (p->cku_addr.maxlen < addr->len) {
15010Sstevel@tonic-gate 		if (p->cku_addr.maxlen != 0 && p->cku_addr.buf != NULL)
15020Sstevel@tonic-gate 			kmem_free(p->cku_addr.buf, p->cku_addr.maxlen);
15030Sstevel@tonic-gate 		p->cku_addr.buf = kmem_zalloc(addr->maxlen, KM_SLEEP);
15040Sstevel@tonic-gate 		p->cku_addr.maxlen = addr->maxlen;
15050Sstevel@tonic-gate 	}
15060Sstevel@tonic-gate 
15070Sstevel@tonic-gate 	p->cku_addr.len = addr->len;
15080Sstevel@tonic-gate 	bcopy(addr->buf, p->cku_addr.buf, addr->len);
15090Sstevel@tonic-gate 
15100Sstevel@tonic-gate 	/*
15110Sstevel@tonic-gate 	 * If the current sanity check size in rpcmod is smaller
15120Sstevel@tonic-gate 	 * than the size needed, then increase the sanity check.
15130Sstevel@tonic-gate 	 */
15140Sstevel@tonic-gate 	if (max_msgsize != 0 && clnt_max_msg_sizep != NULL &&
15150Sstevel@tonic-gate 	    max_msgsize > *clnt_max_msg_sizep) {
15160Sstevel@tonic-gate 		mutex_enter(&clnt_max_msg_lock);
15170Sstevel@tonic-gate 		if (max_msgsize > *clnt_max_msg_sizep)
15180Sstevel@tonic-gate 			*clnt_max_msg_sizep = max_msgsize;
15190Sstevel@tonic-gate 		mutex_exit(&clnt_max_msg_lock);
15200Sstevel@tonic-gate 	}
15210Sstevel@tonic-gate }
15220Sstevel@tonic-gate 
15230Sstevel@tonic-gate /*
15240Sstevel@tonic-gate  * ksettimers is a no-op for cots, with the exception of setting the xid.
15250Sstevel@tonic-gate  */
15260Sstevel@tonic-gate /* ARGSUSED */
15270Sstevel@tonic-gate static int
15280Sstevel@tonic-gate clnt_cots_ksettimers(CLIENT *h, struct rpc_timers *t, struct rpc_timers *all,
15290Sstevel@tonic-gate 	int minimum, void (*feedback)(int, int, caddr_t), caddr_t arg,
15300Sstevel@tonic-gate 	uint32_t xid)
15310Sstevel@tonic-gate {
15320Sstevel@tonic-gate 	/* LINTED pointer alignment */
15330Sstevel@tonic-gate 	cku_private_t *p = htop(h);
15340Sstevel@tonic-gate 
15350Sstevel@tonic-gate 	if (xid)
15360Sstevel@tonic-gate 		p->cku_xid = xid;
15370Sstevel@tonic-gate 	COTSRCSTAT_INCR(p->cku_stats, rctimers);
15380Sstevel@tonic-gate 	return (0);
15390Sstevel@tonic-gate }
15400Sstevel@tonic-gate 
15410Sstevel@tonic-gate extern void rpc_poptimod(struct vnode *);
15420Sstevel@tonic-gate extern int kstr_push(struct vnode *, char *);
15430Sstevel@tonic-gate 
15440Sstevel@tonic-gate int
15450Sstevel@tonic-gate conn_kstat_update(kstat_t *ksp, int rw)
15460Sstevel@tonic-gate {
15470Sstevel@tonic-gate 	struct cm_xprt *cm_entry;
15480Sstevel@tonic-gate 	struct cm_kstat_xprt *cm_ksp_data;
15490Sstevel@tonic-gate 	uchar_t *b;
15500Sstevel@tonic-gate 	char *fbuf;
15510Sstevel@tonic-gate 
15520Sstevel@tonic-gate 	if (rw == KSTAT_WRITE)
15530Sstevel@tonic-gate 		return (EACCES);
15540Sstevel@tonic-gate 	if (ksp == NULL || ksp->ks_private == NULL)
15550Sstevel@tonic-gate 		return (EIO);
15560Sstevel@tonic-gate 	cm_entry  = (struct cm_xprt *)ksp->ks_private;
15570Sstevel@tonic-gate 	cm_ksp_data = (struct cm_kstat_xprt *)ksp->ks_data;
15580Sstevel@tonic-gate 
15590Sstevel@tonic-gate 	cm_ksp_data->x_wq.value.ui32 = (uint32_t)(uintptr_t)cm_entry->x_wq;
15600Sstevel@tonic-gate 	cm_ksp_data->x_family.value.ui32 = cm_entry->x_family;
15610Sstevel@tonic-gate 	cm_ksp_data->x_rdev.value.ui32 = (uint32_t)cm_entry->x_rdev;
15620Sstevel@tonic-gate 	cm_ksp_data->x_time.value.ui32 = cm_entry->x_time;
15630Sstevel@tonic-gate 	cm_ksp_data->x_ref.value.ui32 = cm_entry->x_ref;
15640Sstevel@tonic-gate 	cm_ksp_data->x_state.value.ui32 = cm_entry->x_state_flags;
15650Sstevel@tonic-gate 
15660Sstevel@tonic-gate 	if (cm_entry->x_server.buf) {
1567457Sbmc 		fbuf = cm_ksp_data->x_server.value.str.addr.ptr;
15680Sstevel@tonic-gate 		if (cm_entry->x_family == AF_INET &&
15690Sstevel@tonic-gate 		    cm_entry->x_server.len ==
15700Sstevel@tonic-gate 		    sizeof (struct sockaddr_in)) {
15710Sstevel@tonic-gate 			struct sockaddr_in  *sa;
15720Sstevel@tonic-gate 			sa = (struct sockaddr_in *)
15730Sstevel@tonic-gate 				cm_entry->x_server.buf;
15740Sstevel@tonic-gate 			b = (uchar_t *)&sa->sin_addr;
15750Sstevel@tonic-gate 			(void) sprintf(fbuf,
15760Sstevel@tonic-gate 			    "%03d.%03d.%03d.%03d", b[0] & 0xFF, b[1] & 0xFF,
15770Sstevel@tonic-gate 			    b[2] & 0xFF, b[3] & 0xFF);
15780Sstevel@tonic-gate 			cm_ksp_data->x_port.value.ui32 =
15790Sstevel@tonic-gate 				(uint32_t)sa->sin_port;
15800Sstevel@tonic-gate 		} else if (cm_entry->x_family == AF_INET6 &&
15810Sstevel@tonic-gate 				cm_entry->x_server.len >=
15820Sstevel@tonic-gate 				sizeof (struct sockaddr_in6)) {
15830Sstevel@tonic-gate 			/* extract server IP address & port */
15840Sstevel@tonic-gate 			struct sockaddr_in6 *sin6;
15850Sstevel@tonic-gate 			sin6 = (struct sockaddr_in6 *)cm_entry->x_server.buf;
15860Sstevel@tonic-gate 			(void) kinet_ntop6((uchar_t *)&sin6->sin6_addr, fbuf,
15870Sstevel@tonic-gate 				INET6_ADDRSTRLEN);
15880Sstevel@tonic-gate 			cm_ksp_data->x_port.value.ui32 = sin6->sin6_port;
15890Sstevel@tonic-gate 		} else {
15900Sstevel@tonic-gate 			struct sockaddr_in  *sa;
15910Sstevel@tonic-gate 
15920Sstevel@tonic-gate 			sa = (struct sockaddr_in *)cm_entry->x_server.buf;
15930Sstevel@tonic-gate 			b = (uchar_t *)&sa->sin_addr;
15940Sstevel@tonic-gate 			(void) sprintf(fbuf,
15950Sstevel@tonic-gate 			    "%03d.%03d.%03d.%03d", b[0] & 0xFF, b[1] & 0xFF,
15960Sstevel@tonic-gate 			    b[2] & 0xFF, b[3] & 0xFF);
15970Sstevel@tonic-gate 		}
15980Sstevel@tonic-gate 		KSTAT_NAMED_STR_BUFLEN(&cm_ksp_data->x_server) =
15990Sstevel@tonic-gate 			strlen(fbuf) + 1;
16000Sstevel@tonic-gate 	}
16010Sstevel@tonic-gate 
16020Sstevel@tonic-gate 	return (0);
16030Sstevel@tonic-gate }
16040Sstevel@tonic-gate 
16050Sstevel@tonic-gate 
16060Sstevel@tonic-gate /*
16070Sstevel@tonic-gate  * We want a version of delay which is interruptible by a UNIX signal
16080Sstevel@tonic-gate  * Return EINTR if an interrupt occured.
16090Sstevel@tonic-gate  */
16100Sstevel@tonic-gate static int
16110Sstevel@tonic-gate clnt_delay(clock_t ticks, bool_t nosignal)
16120Sstevel@tonic-gate {
16130Sstevel@tonic-gate 	if (nosignal == TRUE) {
16140Sstevel@tonic-gate 		delay(ticks);
16150Sstevel@tonic-gate 		return (0);
16160Sstevel@tonic-gate 	}
16170Sstevel@tonic-gate 	return (delay_sig(ticks));
16180Sstevel@tonic-gate }
16190Sstevel@tonic-gate 
16200Sstevel@tonic-gate /*
16210Sstevel@tonic-gate  * Wait for a connection until a timeout, or until we are
16220Sstevel@tonic-gate  * signalled that there has been a connection state change.
16230Sstevel@tonic-gate  */
16240Sstevel@tonic-gate static enum clnt_stat
16250Sstevel@tonic-gate connmgr_cwait(struct cm_xprt *cm_entry, const struct timeval *waitp,
16260Sstevel@tonic-gate 	bool_t nosignal)
16270Sstevel@tonic-gate {
16280Sstevel@tonic-gate 	bool_t interrupted;
16290Sstevel@tonic-gate 	clock_t timout, cv_stat;
16300Sstevel@tonic-gate 	enum clnt_stat clstat;
16310Sstevel@tonic-gate 	unsigned int old_state;
16320Sstevel@tonic-gate 
16330Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&connmgr_lock));
16340Sstevel@tonic-gate 	/*
16350Sstevel@tonic-gate 	 * We wait for the transport connection to be made, or an
16360Sstevel@tonic-gate 	 * indication that it could not be made.
16370Sstevel@tonic-gate 	 */
16380Sstevel@tonic-gate 	clstat = RPC_TIMEDOUT;
16390Sstevel@tonic-gate 	interrupted = FALSE;
16400Sstevel@tonic-gate 
16410Sstevel@tonic-gate 	old_state = cm_entry->x_state_flags;
16420Sstevel@tonic-gate 	/*
16430Sstevel@tonic-gate 	 * Now loop until cv_timedwait{_sig} returns because of
16440Sstevel@tonic-gate 	 * a signal(0) or timeout(-1) or cv_signal(>0). But it may be
16450Sstevel@tonic-gate 	 * cv_signalled for various other reasons too. So loop
16460Sstevel@tonic-gate 	 * until there is a state change on the connection.
16470Sstevel@tonic-gate 	 */
16480Sstevel@tonic-gate 
16490Sstevel@tonic-gate 	timout = waitp->tv_sec * drv_usectohz(1000000) +
16500Sstevel@tonic-gate 	    drv_usectohz(waitp->tv_usec) + lbolt;
16510Sstevel@tonic-gate 
16520Sstevel@tonic-gate 	if (nosignal) {
16530Sstevel@tonic-gate 		while ((cv_stat = cv_timedwait(&cm_entry->x_conn_cv,
16540Sstevel@tonic-gate 		    &connmgr_lock, timout)) > 0 &&
16550Sstevel@tonic-gate 		    cm_entry->x_state_flags == old_state)
16560Sstevel@tonic-gate 			;
16570Sstevel@tonic-gate 	} else {
16580Sstevel@tonic-gate 		while ((cv_stat = cv_timedwait_sig(&cm_entry->x_conn_cv,
16590Sstevel@tonic-gate 		    &connmgr_lock, timout)) > 0 &&
16600Sstevel@tonic-gate 		    cm_entry->x_state_flags == old_state)
16610Sstevel@tonic-gate 			;
16620Sstevel@tonic-gate 
16630Sstevel@tonic-gate 		if (cv_stat == 0) /* got intr signal? */
16640Sstevel@tonic-gate 			interrupted = TRUE;
16650Sstevel@tonic-gate 	}
16660Sstevel@tonic-gate 
16670Sstevel@tonic-gate 	if ((cm_entry->x_state_flags & (X_BADSTATES|X_CONNECTED)) ==
16680Sstevel@tonic-gate 	    X_CONNECTED) {
16690Sstevel@tonic-gate 		clstat = RPC_SUCCESS;
16700Sstevel@tonic-gate 	} else {
16710Sstevel@tonic-gate 		if (interrupted == TRUE)
16720Sstevel@tonic-gate 			clstat = RPC_INTR;
16730Sstevel@tonic-gate 		RPCLOG(1, "connmgr_cwait: can't connect, error: %s\n",
16740Sstevel@tonic-gate 		    clnt_sperrno(clstat));
16750Sstevel@tonic-gate 	}
16760Sstevel@tonic-gate 
16770Sstevel@tonic-gate 	return (clstat);
16780Sstevel@tonic-gate }
16790Sstevel@tonic-gate 
16800Sstevel@tonic-gate /*
16810Sstevel@tonic-gate  * Primary interface for how RPC grabs a connection.
16820Sstevel@tonic-gate  */
16830Sstevel@tonic-gate static struct cm_xprt *
16840Sstevel@tonic-gate connmgr_wrapget(
16850Sstevel@tonic-gate 	struct netbuf *retryaddr,
16860Sstevel@tonic-gate 	const struct timeval *waitp,
16870Sstevel@tonic-gate 	cku_private_t *p)
16880Sstevel@tonic-gate {
16890Sstevel@tonic-gate 	struct cm_xprt *cm_entry;
16900Sstevel@tonic-gate 
16910Sstevel@tonic-gate 	cm_entry = connmgr_get(retryaddr, waitp, &p->cku_addr, p->cku_addrfmly,
16920Sstevel@tonic-gate 	    &p->cku_srcaddr, &p->cku_err, p->cku_device,
16930Sstevel@tonic-gate 	    p->cku_client.cl_nosignal, p->cku_useresvport);
16940Sstevel@tonic-gate 
16950Sstevel@tonic-gate 	if (cm_entry == NULL) {
16960Sstevel@tonic-gate 		/*
16970Sstevel@tonic-gate 		 * Re-map the call status to RPC_INTR if the err code is
16980Sstevel@tonic-gate 		 * EINTR. This can happen if calls status is RPC_TLIERROR.
16990Sstevel@tonic-gate 		 * However, don't re-map if signalling has been turned off.
17000Sstevel@tonic-gate 		 * XXX Really need to create a separate thread whenever
17010Sstevel@tonic-gate 		 * there isn't an existing connection.
17020Sstevel@tonic-gate 		 */
17030Sstevel@tonic-gate 		if (p->cku_err.re_errno == EINTR) {
17040Sstevel@tonic-gate 			if (p->cku_client.cl_nosignal == TRUE)
17050Sstevel@tonic-gate 				p->cku_err.re_errno = EIO;
17060Sstevel@tonic-gate 			else
17070Sstevel@tonic-gate 				p->cku_err.re_status = RPC_INTR;
17080Sstevel@tonic-gate 		}
17090Sstevel@tonic-gate 	}
17100Sstevel@tonic-gate 
17110Sstevel@tonic-gate 	return (cm_entry);
17120Sstevel@tonic-gate }
17130Sstevel@tonic-gate 
17140Sstevel@tonic-gate /*
17150Sstevel@tonic-gate  * Obtains a transport to the server specified in addr.  If a suitable transport
17160Sstevel@tonic-gate  * does not already exist in the list of cached transports, a new connection
17170Sstevel@tonic-gate  * is created, connected, and added to the list. The connection is for sending
17180Sstevel@tonic-gate  * only - the reply message may come back on another transport connection.
17190Sstevel@tonic-gate  */
17200Sstevel@tonic-gate static struct cm_xprt *
17210Sstevel@tonic-gate connmgr_get(
17220Sstevel@tonic-gate 	struct netbuf	*retryaddr,
17230Sstevel@tonic-gate 	const struct timeval	*waitp,	/* changed to a ptr to converse stack */
17240Sstevel@tonic-gate 	struct netbuf	*destaddr,
17250Sstevel@tonic-gate 	int		addrfmly,
17260Sstevel@tonic-gate 	struct netbuf	*srcaddr,
17270Sstevel@tonic-gate 	struct rpc_err	*rpcerr,
17280Sstevel@tonic-gate 	dev_t		device,
17290Sstevel@tonic-gate 	bool_t		nosignal,
17300Sstevel@tonic-gate 	int		useresvport)
17310Sstevel@tonic-gate {
17320Sstevel@tonic-gate 	struct cm_xprt *cm_entry;
17330Sstevel@tonic-gate 	struct cm_xprt *lru_entry;
17340Sstevel@tonic-gate 	struct cm_xprt **cmp;
17350Sstevel@tonic-gate 	queue_t *wq;
17360Sstevel@tonic-gate 	TIUSER *tiptr;
17370Sstevel@tonic-gate 	int i;
17380Sstevel@tonic-gate 	int retval;
17390Sstevel@tonic-gate 	clock_t prev_time;
17400Sstevel@tonic-gate 	int tidu_size;
17410Sstevel@tonic-gate 	bool_t	connected;
1742766Scarlsonj 	zoneid_t zoneid = rpc_zoneid();
17430Sstevel@tonic-gate 
17440Sstevel@tonic-gate 	/*
17450Sstevel@tonic-gate 	 * If the call is not a retry, look for a transport entry that
17460Sstevel@tonic-gate 	 * goes to the server of interest.
17470Sstevel@tonic-gate 	 */
17480Sstevel@tonic-gate 	mutex_enter(&connmgr_lock);
17490Sstevel@tonic-gate 
17500Sstevel@tonic-gate 	if (retryaddr == NULL) {
17510Sstevel@tonic-gate use_new_conn:
17520Sstevel@tonic-gate 		i = 0;
17530Sstevel@tonic-gate 		cm_entry = lru_entry = NULL;
17540Sstevel@tonic-gate 		prev_time = lbolt;
17550Sstevel@tonic-gate 
17560Sstevel@tonic-gate 		cmp = &cm_hd;
17570Sstevel@tonic-gate 		while ((cm_entry = *cmp) != NULL) {
17580Sstevel@tonic-gate 			ASSERT(cm_entry != cm_entry->x_next);
17590Sstevel@tonic-gate 			/*
17600Sstevel@tonic-gate 			 * Garbage collect conections that are marked
17610Sstevel@tonic-gate 			 * for needs disconnect.
17620Sstevel@tonic-gate 			 */
17630Sstevel@tonic-gate 			if (cm_entry->x_needdis) {
1764154Sshepler 				CONN_HOLD(cm_entry);
17650Sstevel@tonic-gate 				connmgr_dis_and_wait(cm_entry);
1766154Sshepler 				connmgr_release(cm_entry);
17670Sstevel@tonic-gate 				/*
17680Sstevel@tonic-gate 				 * connmgr_lock could have been
17690Sstevel@tonic-gate 				 * dropped for the disconnect
17700Sstevel@tonic-gate 				 * processing so start over.
17710Sstevel@tonic-gate 				 */
17720Sstevel@tonic-gate 				goto use_new_conn;
17730Sstevel@tonic-gate 			}
17740Sstevel@tonic-gate 
17750Sstevel@tonic-gate 			/*
17760Sstevel@tonic-gate 			 * Garbage collect the dead connections that have
17770Sstevel@tonic-gate 			 * no threads working on them.
17780Sstevel@tonic-gate 			 */
17790Sstevel@tonic-gate 			if ((cm_entry->x_state_flags & (X_DEAD|X_THREAD)) ==
17800Sstevel@tonic-gate 			    X_DEAD) {
17813017Smaheshvs 				mutex_enter(&cm_entry->x_lock);
17823017Smaheshvs 				if (cm_entry->x_ref != 0) {
17833017Smaheshvs 					/*
17843017Smaheshvs 					 * Currently in use.
17853017Smaheshvs 					 * Cleanup later.
17863017Smaheshvs 					 */
17873017Smaheshvs 					cmp = &cm_entry->x_next;
17883017Smaheshvs 					mutex_exit(&cm_entry->x_lock);
17893017Smaheshvs 					continue;
17903017Smaheshvs 				}
17913017Smaheshvs 				mutex_exit(&cm_entry->x_lock);
17920Sstevel@tonic-gate 				*cmp = cm_entry->x_next;
17930Sstevel@tonic-gate 				mutex_exit(&connmgr_lock);
17940Sstevel@tonic-gate 				connmgr_close(cm_entry);
17950Sstevel@tonic-gate 				mutex_enter(&connmgr_lock);
17960Sstevel@tonic-gate 				goto use_new_conn;
17970Sstevel@tonic-gate 			}
17980Sstevel@tonic-gate 
17990Sstevel@tonic-gate 
18000Sstevel@tonic-gate 			if ((cm_entry->x_state_flags & X_BADSTATES) == 0 &&
18010Sstevel@tonic-gate 			    cm_entry->x_zoneid == zoneid &&
18020Sstevel@tonic-gate 			    cm_entry->x_rdev == device &&
18030Sstevel@tonic-gate 			    destaddr->len == cm_entry->x_server.len &&
18040Sstevel@tonic-gate 			    bcmp(destaddr->buf, cm_entry->x_server.buf,
18050Sstevel@tonic-gate 			    destaddr->len) == 0) {
18060Sstevel@tonic-gate 				/*
18070Sstevel@tonic-gate 				 * If the matching entry isn't connected,
18080Sstevel@tonic-gate 				 * attempt to reconnect it.
18090Sstevel@tonic-gate 				 */
18100Sstevel@tonic-gate 				if (cm_entry->x_connected == FALSE) {
18110Sstevel@tonic-gate 					/*
18120Sstevel@tonic-gate 					 * We don't go through trying
18130Sstevel@tonic-gate 					 * to find the least recently
18140Sstevel@tonic-gate 					 * used connected because
18150Sstevel@tonic-gate 					 * connmgr_reconnect() briefly
18160Sstevel@tonic-gate 					 * dropped the connmgr_lock,
18170Sstevel@tonic-gate 					 * allowing a window for our
18180Sstevel@tonic-gate 					 * accounting to be messed up.
18190Sstevel@tonic-gate 					 * In any case, a re-connected
18200Sstevel@tonic-gate 					 * connection is as good as
18210Sstevel@tonic-gate 					 * a LRU connection.
18220Sstevel@tonic-gate 					 */
18230Sstevel@tonic-gate 					return (connmgr_wrapconnect(cm_entry,
18240Sstevel@tonic-gate 					    waitp, destaddr, addrfmly, srcaddr,
18250Sstevel@tonic-gate 					    rpcerr, TRUE, nosignal));
18260Sstevel@tonic-gate 				}
18270Sstevel@tonic-gate 				i++;
18280Sstevel@tonic-gate 				if (cm_entry->x_time - prev_time <= 0 ||
18290Sstevel@tonic-gate 				    lru_entry == NULL) {
18300Sstevel@tonic-gate 					prev_time = cm_entry->x_time;
18310Sstevel@tonic-gate 					lru_entry = cm_entry;
18320Sstevel@tonic-gate 				}
18330Sstevel@tonic-gate 			}
18340Sstevel@tonic-gate 			cmp = &cm_entry->x_next;
18350Sstevel@tonic-gate 		}
18360Sstevel@tonic-gate 
18370Sstevel@tonic-gate 		if (i > clnt_max_conns) {
18380Sstevel@tonic-gate 			RPCLOG(8, "connmgr_get: too many conns, dooming entry"
18390Sstevel@tonic-gate 			    " %p\n", (void *)lru_entry->x_tiptr);
18400Sstevel@tonic-gate 			lru_entry->x_doomed = TRUE;
18410Sstevel@tonic-gate 			goto use_new_conn;
18420Sstevel@tonic-gate 		}
18430Sstevel@tonic-gate 
18440Sstevel@tonic-gate 		/*
18450Sstevel@tonic-gate 		 * If we are at the maximum number of connections to
18460Sstevel@tonic-gate 		 * the server, hand back the least recently used one.
18470Sstevel@tonic-gate 		 */
18480Sstevel@tonic-gate 		if (i == clnt_max_conns) {
18490Sstevel@tonic-gate 			/*
18500Sstevel@tonic-gate 			 * Copy into the handle the source address of
18510Sstevel@tonic-gate 			 * the connection, which we will use in case of
18520Sstevel@tonic-gate 			 * a later retry.
18530Sstevel@tonic-gate 			 */
18540Sstevel@tonic-gate 			if (srcaddr->len != lru_entry->x_src.len) {
18550Sstevel@tonic-gate 				if (srcaddr->len > 0)
18560Sstevel@tonic-gate 					kmem_free(srcaddr->buf,
18570Sstevel@tonic-gate 					    srcaddr->maxlen);
18580Sstevel@tonic-gate 				srcaddr->buf = kmem_zalloc(
18590Sstevel@tonic-gate 				    lru_entry->x_src.len, KM_SLEEP);
18600Sstevel@tonic-gate 				srcaddr->maxlen = srcaddr->len =
18610Sstevel@tonic-gate 				    lru_entry->x_src.len;
18620Sstevel@tonic-gate 			}
18630Sstevel@tonic-gate 			bcopy(lru_entry->x_src.buf, srcaddr->buf, srcaddr->len);
18640Sstevel@tonic-gate 			RPCLOG(2, "connmgr_get: call going out on %p\n",
18650Sstevel@tonic-gate 			    (void *)lru_entry);
18660Sstevel@tonic-gate 			lru_entry->x_time = lbolt;
18670Sstevel@tonic-gate 			CONN_HOLD(lru_entry);
18680Sstevel@tonic-gate 			mutex_exit(&connmgr_lock);
18690Sstevel@tonic-gate 			return (lru_entry);
18700Sstevel@tonic-gate 		}
18710Sstevel@tonic-gate 
18720Sstevel@tonic-gate 	} else {
18730Sstevel@tonic-gate 		/*
18740Sstevel@tonic-gate 		 * This is the retry case (retryaddr != NULL).  Retries must
18750Sstevel@tonic-gate 		 * be sent on the same source port as the original call.
18760Sstevel@tonic-gate 		 */
18770Sstevel@tonic-gate 
18780Sstevel@tonic-gate 		/*
18790Sstevel@tonic-gate 		 * Walk the list looking for a connection with a source address
18800Sstevel@tonic-gate 		 * that matches the retry address.
18810Sstevel@tonic-gate 		 */
18820Sstevel@tonic-gate 		cmp = &cm_hd;
18830Sstevel@tonic-gate 		while ((cm_entry = *cmp) != NULL) {
18840Sstevel@tonic-gate 			ASSERT(cm_entry != cm_entry->x_next);
18850Sstevel@tonic-gate 			if (zoneid != cm_entry->x_zoneid ||
18860Sstevel@tonic-gate 			    device != cm_entry->x_rdev ||
18870Sstevel@tonic-gate 			    retryaddr->len != cm_entry->x_src.len ||
18880Sstevel@tonic-gate 			    bcmp(retryaddr->buf, cm_entry->x_src.buf,
18890Sstevel@tonic-gate 				    retryaddr->len) != 0) {
18900Sstevel@tonic-gate 				cmp = &cm_entry->x_next;
18910Sstevel@tonic-gate 				continue;
18920Sstevel@tonic-gate 			}
18930Sstevel@tonic-gate 
18940Sstevel@tonic-gate 			/*
18950Sstevel@tonic-gate 			 * Sanity check: if the connection with our source
18960Sstevel@tonic-gate 			 * port is going to some other server, something went
18970Sstevel@tonic-gate 			 * wrong, as we never delete connections (i.e. release
18980Sstevel@tonic-gate 			 * ports) unless they have been idle.  In this case,
18990Sstevel@tonic-gate 			 * it is probably better to send the call out using
19000Sstevel@tonic-gate 			 * a new source address than to fail it altogether,
19010Sstevel@tonic-gate 			 * since that port may never be released.
19020Sstevel@tonic-gate 			 */
19030Sstevel@tonic-gate 			if (destaddr->len != cm_entry->x_server.len ||
19040Sstevel@tonic-gate 				bcmp(destaddr->buf, cm_entry->x_server.buf,
19050Sstevel@tonic-gate 					destaddr->len) != 0) {
19060Sstevel@tonic-gate 				RPCLOG(1, "connmgr_get: tiptr %p"
19070Sstevel@tonic-gate 				    " is going to a different server"
19080Sstevel@tonic-gate 				    " with the port that belongs"
19090Sstevel@tonic-gate 				    " to us!\n", (void *)cm_entry->x_tiptr);
19100Sstevel@tonic-gate 				retryaddr = NULL;
19110Sstevel@tonic-gate 				goto use_new_conn;
19120Sstevel@tonic-gate 			}
19130Sstevel@tonic-gate 
19140Sstevel@tonic-gate 			/*
19150Sstevel@tonic-gate 			 * If the connection of interest is not connected and we
19160Sstevel@tonic-gate 			 * can't reconnect it, then the server is probably
19170Sstevel@tonic-gate 			 * still down.  Return NULL to the caller and let it
19180Sstevel@tonic-gate 			 * retry later if it wants to.  We have a delay so the
19190Sstevel@tonic-gate 			 * machine doesn't go into a tight retry loop.  If the
19200Sstevel@tonic-gate 			 * entry was already connected, or the reconnected was
19210Sstevel@tonic-gate 			 * successful, return this entry.
19220Sstevel@tonic-gate 			 */
19230Sstevel@tonic-gate 			if (cm_entry->x_connected == FALSE) {
19240Sstevel@tonic-gate 				return (connmgr_wrapconnect(cm_entry,
19250Sstevel@tonic-gate 				    waitp, destaddr, addrfmly, NULL,
19260Sstevel@tonic-gate 				    rpcerr, TRUE, nosignal));
19270Sstevel@tonic-gate 			} else {
19280Sstevel@tonic-gate 				CONN_HOLD(cm_entry);
19290Sstevel@tonic-gate 
19300Sstevel@tonic-gate 				cm_entry->x_time = lbolt;
19310Sstevel@tonic-gate 				mutex_exit(&connmgr_lock);
19320Sstevel@tonic-gate 				RPCLOG(2, "connmgr_get: found old "
19330Sstevel@tonic-gate 				    "transport %p for retry\n",
19340Sstevel@tonic-gate 				    (void *)cm_entry);
19350Sstevel@tonic-gate 				return (cm_entry);
19360Sstevel@tonic-gate 			}
19370Sstevel@tonic-gate 		}
19380Sstevel@tonic-gate 
19390Sstevel@tonic-gate 		/*
19400Sstevel@tonic-gate 		 * We cannot find an entry in the list for this retry.
19410Sstevel@tonic-gate 		 * Either the entry has been removed temporarily to be
19420Sstevel@tonic-gate 		 * reconnected by another thread, or the original call
19430Sstevel@tonic-gate 		 * got a port but never got connected,
19440Sstevel@tonic-gate 		 * and hence the transport never got put in the
19450Sstevel@tonic-gate 		 * list.  Fall through to the "create new connection" code -
19460Sstevel@tonic-gate 		 * the former case will fail there trying to rebind the port,
19470Sstevel@tonic-gate 		 * and the later case (and any other pathological cases) will
19480Sstevel@tonic-gate 		 * rebind and reconnect and not hang the client machine.
19490Sstevel@tonic-gate 		 */
19500Sstevel@tonic-gate 		RPCLOG0(8, "connmgr_get: no entry in list for retry\n");
19510Sstevel@tonic-gate 	}
19520Sstevel@tonic-gate 	/*
19530Sstevel@tonic-gate 	 * Set up a transport entry in the connection manager's list.
19540Sstevel@tonic-gate 	 */
19550Sstevel@tonic-gate 	cm_entry = (struct cm_xprt *)
19560Sstevel@tonic-gate 	    kmem_zalloc(sizeof (struct cm_xprt), KM_SLEEP);
19570Sstevel@tonic-gate 
19580Sstevel@tonic-gate 	cm_entry->x_server.buf = kmem_zalloc(destaddr->len, KM_SLEEP);
19590Sstevel@tonic-gate 	bcopy(destaddr->buf, cm_entry->x_server.buf, destaddr->len);
19600Sstevel@tonic-gate 	cm_entry->x_server.len = cm_entry->x_server.maxlen = destaddr->len;
19610Sstevel@tonic-gate 
19620Sstevel@tonic-gate 	cm_entry->x_state_flags = X_THREAD;
19630Sstevel@tonic-gate 	cm_entry->x_ref = 1;
19640Sstevel@tonic-gate 	cm_entry->x_family = addrfmly;
19650Sstevel@tonic-gate 	cm_entry->x_rdev = device;
19660Sstevel@tonic-gate 	cm_entry->x_zoneid = zoneid;
19670Sstevel@tonic-gate 	mutex_init(&cm_entry->x_lock, NULL, MUTEX_DEFAULT, NULL);
19680Sstevel@tonic-gate 	cv_init(&cm_entry->x_cv, NULL, CV_DEFAULT, NULL);
19690Sstevel@tonic-gate 	cv_init(&cm_entry->x_conn_cv, NULL, CV_DEFAULT, NULL);
19700Sstevel@tonic-gate 	cv_init(&cm_entry->x_dis_cv, NULL, CV_DEFAULT, NULL);
19710Sstevel@tonic-gate 
19720Sstevel@tonic-gate 	/*
19730Sstevel@tonic-gate 	 * Note that we add this partially initialized entry to the
19740Sstevel@tonic-gate 	 * connection list. This is so that we don't have connections to
19750Sstevel@tonic-gate 	 * the same server.
19760Sstevel@tonic-gate 	 *
19770Sstevel@tonic-gate 	 * Note that x_src is not initialized at this point. This is because
19780Sstevel@tonic-gate 	 * retryaddr might be NULL in which case x_src is whatever
19790Sstevel@tonic-gate 	 * t_kbind/bindresvport gives us. If another thread wants a
19800Sstevel@tonic-gate 	 * connection to the same server, seemingly we have an issue, but we
19810Sstevel@tonic-gate 	 * don't. If the other thread comes in with retryaddr == NULL, then it
19820Sstevel@tonic-gate 	 * will never look at x_src, and it will end up waiting in
19830Sstevel@tonic-gate 	 * connmgr_cwait() for the first thread to finish the connection
19840Sstevel@tonic-gate 	 * attempt. If the other thread comes in with retryaddr != NULL, then
19850Sstevel@tonic-gate 	 * that means there was a request sent on a connection, in which case
19860Sstevel@tonic-gate 	 * the the connection should already exist. Thus the first thread
19870Sstevel@tonic-gate 	 * never gets here ... it finds the connection it its server in the
19880Sstevel@tonic-gate 	 * connection list.
19890Sstevel@tonic-gate 	 *
19900Sstevel@tonic-gate 	 * But even if theory is wrong, in the retryaddr != NULL case, the 2nd
19910Sstevel@tonic-gate 	 * thread will skip us because x_src.len == 0.
19920Sstevel@tonic-gate 	 */
19930Sstevel@tonic-gate 	cm_entry->x_next = cm_hd;
19940Sstevel@tonic-gate 	cm_hd = cm_entry;
19950Sstevel@tonic-gate 	mutex_exit(&connmgr_lock);
19960Sstevel@tonic-gate 
19970Sstevel@tonic-gate 	/*
19980Sstevel@tonic-gate 	 * Either we didn't find an entry to the server of interest, or we
19990Sstevel@tonic-gate 	 * don't have the maximum number of connections to that server -
20000Sstevel@tonic-gate 	 * create a new connection.
20010Sstevel@tonic-gate 	 */
20020Sstevel@tonic-gate 	RPCLOG0(8, "connmgr_get: creating new connection\n");
20030Sstevel@tonic-gate 	rpcerr->re_status = RPC_TLIERROR;
20040Sstevel@tonic-gate 
20051676Sjpk 	i = t_kopen(NULL, device, FREAD|FWRITE|FNDELAY, &tiptr, zone_kcred());
20060Sstevel@tonic-gate 	if (i) {
20070Sstevel@tonic-gate 		RPCLOG(1, "connmgr_get: can't open cots device, error %d\n", i);
20080Sstevel@tonic-gate 		rpcerr->re_errno = i;
20090Sstevel@tonic-gate 		connmgr_cancelconn(cm_entry);
20100Sstevel@tonic-gate 		return (NULL);
20110Sstevel@tonic-gate 	}
20120Sstevel@tonic-gate 	rpc_poptimod(tiptr->fp->f_vnode);
20130Sstevel@tonic-gate 
20140Sstevel@tonic-gate 	if (i = strioctl(tiptr->fp->f_vnode, I_PUSH, (intptr_t)"rpcmod", 0,
20150Sstevel@tonic-gate 			K_TO_K, kcred, &retval)) {
20160Sstevel@tonic-gate 		RPCLOG(1, "connmgr_get: can't push cots module, %d\n", i);
20170Sstevel@tonic-gate 		(void) t_kclose(tiptr, 1);
20180Sstevel@tonic-gate 		rpcerr->re_errno = i;
20190Sstevel@tonic-gate 		connmgr_cancelconn(cm_entry);
20200Sstevel@tonic-gate 		return (NULL);
20210Sstevel@tonic-gate 	}
20220Sstevel@tonic-gate 
20230Sstevel@tonic-gate 	if (i = strioctl(tiptr->fp->f_vnode, RPC_CLIENT, 0, 0, K_TO_K,
20240Sstevel@tonic-gate 		kcred, &retval)) {
20250Sstevel@tonic-gate 		RPCLOG(1, "connmgr_get: can't set client status with cots "
20260Sstevel@tonic-gate 		    "module, %d\n", i);
20270Sstevel@tonic-gate 		(void) t_kclose(tiptr, 1);
20280Sstevel@tonic-gate 		rpcerr->re_errno = i;
20290Sstevel@tonic-gate 		connmgr_cancelconn(cm_entry);
20300Sstevel@tonic-gate 		return (NULL);
20310Sstevel@tonic-gate 	}
20320Sstevel@tonic-gate 
20330Sstevel@tonic-gate 	mutex_enter(&connmgr_lock);
20340Sstevel@tonic-gate 
20350Sstevel@tonic-gate 	wq = tiptr->fp->f_vnode->v_stream->sd_wrq->q_next;
20360Sstevel@tonic-gate 	cm_entry->x_wq = wq;
20370Sstevel@tonic-gate 
20380Sstevel@tonic-gate 	mutex_exit(&connmgr_lock);
20390Sstevel@tonic-gate 
20400Sstevel@tonic-gate 	if (i = strioctl(tiptr->fp->f_vnode, I_PUSH, (intptr_t)"timod", 0,
20410Sstevel@tonic-gate 			K_TO_K, kcred, &retval)) {
20420Sstevel@tonic-gate 		RPCLOG(1, "connmgr_get: can't push timod, %d\n", i);
20430Sstevel@tonic-gate 		(void) t_kclose(tiptr, 1);
20440Sstevel@tonic-gate 		rpcerr->re_errno = i;
20450Sstevel@tonic-gate 		connmgr_cancelconn(cm_entry);
20460Sstevel@tonic-gate 		return (NULL);
20470Sstevel@tonic-gate 	}
20480Sstevel@tonic-gate 
20490Sstevel@tonic-gate 	/*
20500Sstevel@tonic-gate 	 * If the caller has not specified reserved port usage then
20510Sstevel@tonic-gate 	 * take the system default.
20520Sstevel@tonic-gate 	 */
20530Sstevel@tonic-gate 	if (useresvport == -1)
20540Sstevel@tonic-gate 		useresvport = clnt_cots_do_bindresvport;
20550Sstevel@tonic-gate 
20560Sstevel@tonic-gate 	if ((useresvport || retryaddr != NULL) &&
20570Sstevel@tonic-gate 	    (addrfmly == AF_INET || addrfmly == AF_INET6)) {
20580Sstevel@tonic-gate 		bool_t alloc_src = FALSE;
20590Sstevel@tonic-gate 
20600Sstevel@tonic-gate 		if (srcaddr->len != destaddr->len) {
20610Sstevel@tonic-gate 			kmem_free(srcaddr->buf, srcaddr->maxlen);
20620Sstevel@tonic-gate 			srcaddr->buf = kmem_zalloc(destaddr->len, KM_SLEEP);
20630Sstevel@tonic-gate 			srcaddr->maxlen = destaddr->len;
20640Sstevel@tonic-gate 			srcaddr->len = destaddr->len;
20650Sstevel@tonic-gate 			alloc_src = TRUE;
20660Sstevel@tonic-gate 		}
20670Sstevel@tonic-gate 
20680Sstevel@tonic-gate 		if ((i = bindresvport(tiptr, retryaddr, srcaddr, TRUE)) != 0) {
20690Sstevel@tonic-gate 			(void) t_kclose(tiptr, 1);
20700Sstevel@tonic-gate 			RPCLOG(1, "connmgr_get: couldn't bind, retryaddr: "
20710Sstevel@tonic-gate 				"%p\n", (void *)retryaddr);
20720Sstevel@tonic-gate 
20730Sstevel@tonic-gate 			/*
20740Sstevel@tonic-gate 			 * 1225408: If we allocated a source address, then it
20750Sstevel@tonic-gate 			 * is either garbage or all zeroes. In that case
20760Sstevel@tonic-gate 			 * we need to clear srcaddr.
20770Sstevel@tonic-gate 			 */
20780Sstevel@tonic-gate 			if (alloc_src == TRUE) {
20790Sstevel@tonic-gate 				kmem_free(srcaddr->buf, srcaddr->maxlen);
20800Sstevel@tonic-gate 				srcaddr->maxlen = srcaddr->len = 0;
20810Sstevel@tonic-gate 				srcaddr->buf = NULL;
20820Sstevel@tonic-gate 			}
20830Sstevel@tonic-gate 			rpcerr->re_errno = i;
20840Sstevel@tonic-gate 			connmgr_cancelconn(cm_entry);
20850Sstevel@tonic-gate 			return (NULL);
20860Sstevel@tonic-gate 		}
20870Sstevel@tonic-gate 	} else {
20880Sstevel@tonic-gate 		if ((i = t_kbind(tiptr, NULL, NULL)) != 0) {
20890Sstevel@tonic-gate 			RPCLOG(1, "clnt_cots_kcreate: t_kbind: %d\n", i);
20900Sstevel@tonic-gate 			(void) t_kclose(tiptr, 1);
20910Sstevel@tonic-gate 			rpcerr->re_errno = i;
20920Sstevel@tonic-gate 			connmgr_cancelconn(cm_entry);
20930Sstevel@tonic-gate 			return (NULL);
20940Sstevel@tonic-gate 		}
20950Sstevel@tonic-gate 	}
20960Sstevel@tonic-gate 
20970Sstevel@tonic-gate 	{
20980Sstevel@tonic-gate 		/*
20990Sstevel@tonic-gate 		 * Keep the kernel stack lean. Don't move this call
21000Sstevel@tonic-gate 		 * declaration to the top of this function because a
21010Sstevel@tonic-gate 		 * call is declared in connmgr_wrapconnect()
21020Sstevel@tonic-gate 		 */
21030Sstevel@tonic-gate 		calllist_t call;
21040Sstevel@tonic-gate 
21050Sstevel@tonic-gate 		bzero(&call, sizeof (call));
21060Sstevel@tonic-gate 		cv_init(&call.call_cv, NULL, CV_DEFAULT, NULL);
21070Sstevel@tonic-gate 
21080Sstevel@tonic-gate 		/*
21090Sstevel@tonic-gate 		 * This is a bound end-point so don't close it's stream.
21100Sstevel@tonic-gate 		 */
21110Sstevel@tonic-gate 		connected = connmgr_connect(cm_entry, wq, destaddr, addrfmly,
21120Sstevel@tonic-gate 						&call, &tidu_size, FALSE, waitp,
21130Sstevel@tonic-gate 						nosignal);
21140Sstevel@tonic-gate 		*rpcerr = call.call_err;
21150Sstevel@tonic-gate 		cv_destroy(&call.call_cv);
21160Sstevel@tonic-gate 
21170Sstevel@tonic-gate 	}
21180Sstevel@tonic-gate 
21190Sstevel@tonic-gate 	mutex_enter(&connmgr_lock);
21200Sstevel@tonic-gate 
21210Sstevel@tonic-gate 	/*
21220Sstevel@tonic-gate 	 * Set up a transport entry in the connection manager's list.
21230Sstevel@tonic-gate 	 */
21240Sstevel@tonic-gate 	cm_entry->x_src.buf = kmem_zalloc(srcaddr->len, KM_SLEEP);
21250Sstevel@tonic-gate 	bcopy(srcaddr->buf, cm_entry->x_src.buf, srcaddr->len);
21260Sstevel@tonic-gate 	cm_entry->x_src.len = cm_entry->x_src.maxlen = srcaddr->len;
21270Sstevel@tonic-gate 
21280Sstevel@tonic-gate 	cm_entry->x_tiptr = tiptr;
21290Sstevel@tonic-gate 	cm_entry->x_time = lbolt;
21300Sstevel@tonic-gate 
21310Sstevel@tonic-gate 	if (tiptr->tp_info.servtype == T_COTS_ORD)
21320Sstevel@tonic-gate 		cm_entry->x_ordrel = TRUE;
21330Sstevel@tonic-gate 	else
21340Sstevel@tonic-gate 		cm_entry->x_ordrel = FALSE;
21350Sstevel@tonic-gate 
21360Sstevel@tonic-gate 	cm_entry->x_tidu_size = tidu_size;
21370Sstevel@tonic-gate 
2138*4457Svv149972 	if (cm_entry->x_early_disc) {
2139*4457Svv149972 		/*
2140*4457Svv149972 		 * We need to check if a disconnect request has come
2141*4457Svv149972 		 * while we are connected, if so, then we need to
2142*4457Svv149972 		 * set rpcerr->re_status appropriately before returning
2143*4457Svv149972 		 * NULL to caller.
2144*4457Svv149972 		 */
2145*4457Svv149972 		if (rpcerr->re_status == RPC_SUCCESS)
2146*4457Svv149972 			rpcerr->re_status = RPC_XPRTFAILED;
21470Sstevel@tonic-gate 		cm_entry->x_connected = FALSE;
2148*4457Svv149972 	} else
21490Sstevel@tonic-gate 		cm_entry->x_connected = connected;
21500Sstevel@tonic-gate 
21510Sstevel@tonic-gate 	/*
21520Sstevel@tonic-gate 	 * There could be a discrepancy here such that
21530Sstevel@tonic-gate 	 * x_early_disc is TRUE yet connected is TRUE as well
21540Sstevel@tonic-gate 	 * and the connection is actually connected. In that case
21550Sstevel@tonic-gate 	 * lets be conservative and declare the connection as not
21560Sstevel@tonic-gate 	 * connected.
21570Sstevel@tonic-gate 	 */
21580Sstevel@tonic-gate 	cm_entry->x_early_disc = FALSE;
21590Sstevel@tonic-gate 	cm_entry->x_needdis = (cm_entry->x_connected == FALSE);
21600Sstevel@tonic-gate 	cm_entry->x_ctime = lbolt;
21610Sstevel@tonic-gate 
21620Sstevel@tonic-gate 	/*
21630Sstevel@tonic-gate 	 * Notify any threads waiting that the connection attempt is done.
21640Sstevel@tonic-gate 	 */
21650Sstevel@tonic-gate 	cm_entry->x_thread = FALSE;
21660Sstevel@tonic-gate 	cv_broadcast(&cm_entry->x_conn_cv);
21670Sstevel@tonic-gate 
21680Sstevel@tonic-gate 	if (cm_entry->x_connected == FALSE) {
2169*4457Svv149972 		mutex_exit(&connmgr_lock);
21700Sstevel@tonic-gate 		connmgr_release(cm_entry);
21710Sstevel@tonic-gate 		return (NULL);
21720Sstevel@tonic-gate 	}
2173*4457Svv149972 
2174*4457Svv149972 	mutex_exit(&connmgr_lock);
2175*4457Svv149972 
21760Sstevel@tonic-gate 	return (cm_entry);
21770Sstevel@tonic-gate }
21780Sstevel@tonic-gate 
21790Sstevel@tonic-gate /*
21800Sstevel@tonic-gate  * Keep the cm_xprt entry on the connecton list when making a connection. This
21810Sstevel@tonic-gate  * is to prevent multiple connections to a slow server from appearing.
21820Sstevel@tonic-gate  * We use the bit field x_thread to tell if a thread is doing a connection
21830Sstevel@tonic-gate  * which keeps other interested threads from messing with connection.
21840Sstevel@tonic-gate  * Those other threads just wait if x_thread is set.
21850Sstevel@tonic-gate  *
21860Sstevel@tonic-gate  * If x_thread is not set, then we do the actual work of connecting via
21870Sstevel@tonic-gate  * connmgr_connect().
21880Sstevel@tonic-gate  *
21890Sstevel@tonic-gate  * mutex convention: called with connmgr_lock held, returns with it released.
21900Sstevel@tonic-gate  */
21910Sstevel@tonic-gate static struct cm_xprt *
21920Sstevel@tonic-gate connmgr_wrapconnect(
21930Sstevel@tonic-gate 	struct cm_xprt	*cm_entry,
21940Sstevel@tonic-gate 	const struct timeval	*waitp,
21950Sstevel@tonic-gate 	struct netbuf	*destaddr,
21960Sstevel@tonic-gate 	int		addrfmly,
21970Sstevel@tonic-gate 	struct netbuf	*srcaddr,
21980Sstevel@tonic-gate 	struct rpc_err	*rpcerr,
21990Sstevel@tonic-gate 	bool_t		reconnect,
22000Sstevel@tonic-gate 	bool_t		nosignal)
22010Sstevel@tonic-gate {
22020Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&connmgr_lock));
22030Sstevel@tonic-gate 	/*
22040Sstevel@tonic-gate 	 * Hold this entry as we are about to drop connmgr_lock.
22050Sstevel@tonic-gate 	 */
22060Sstevel@tonic-gate 	CONN_HOLD(cm_entry);
22070Sstevel@tonic-gate 
22080Sstevel@tonic-gate 	/*
22090Sstevel@tonic-gate 	 * If there is a thread already making a connection for us, then
22100Sstevel@tonic-gate 	 * wait for it to complete the connection.
22110Sstevel@tonic-gate 	 */
22120Sstevel@tonic-gate 	if (cm_entry->x_thread == TRUE) {
22130Sstevel@tonic-gate 		rpcerr->re_status = connmgr_cwait(cm_entry, waitp, nosignal);
22140Sstevel@tonic-gate 
22150Sstevel@tonic-gate 		if (rpcerr->re_status != RPC_SUCCESS) {
22160Sstevel@tonic-gate 			mutex_exit(&connmgr_lock);
22170Sstevel@tonic-gate 			connmgr_release(cm_entry);
22180Sstevel@tonic-gate 			return (NULL);
22190Sstevel@tonic-gate 		}
22200Sstevel@tonic-gate 	} else {
22210Sstevel@tonic-gate 		bool_t connected;
22220Sstevel@tonic-gate 		calllist_t call;
22230Sstevel@tonic-gate 
22240Sstevel@tonic-gate 		cm_entry->x_thread = TRUE;
22250Sstevel@tonic-gate 
22260Sstevel@tonic-gate 		while (cm_entry->x_needrel == TRUE) {
22270Sstevel@tonic-gate 			cm_entry->x_needrel = FALSE;
22280Sstevel@tonic-gate 
22290Sstevel@tonic-gate 			connmgr_sndrel(cm_entry);
22300Sstevel@tonic-gate 			delay(drv_usectohz(1000000));
22310Sstevel@tonic-gate 
22320Sstevel@tonic-gate 			mutex_enter(&connmgr_lock);
22330Sstevel@tonic-gate 		}
22340Sstevel@tonic-gate 
22350Sstevel@tonic-gate 		/*
22360Sstevel@tonic-gate 		 * If we need to send a T_DISCON_REQ, send one.
22370Sstevel@tonic-gate 		 */
22380Sstevel@tonic-gate 		connmgr_dis_and_wait(cm_entry);
22390Sstevel@tonic-gate 
22400Sstevel@tonic-gate 		mutex_exit(&connmgr_lock);
22410Sstevel@tonic-gate 
22420Sstevel@tonic-gate 		bzero(&call, sizeof (call));
22430Sstevel@tonic-gate 		cv_init(&call.call_cv, NULL, CV_DEFAULT, NULL);
22440Sstevel@tonic-gate 
22450Sstevel@tonic-gate 		connected = connmgr_connect(cm_entry, cm_entry->x_wq,
22460Sstevel@tonic-gate 					    destaddr, addrfmly, &call,
22470Sstevel@tonic-gate 					    &cm_entry->x_tidu_size,
22480Sstevel@tonic-gate 					    reconnect, waitp, nosignal);
22490Sstevel@tonic-gate 
22500Sstevel@tonic-gate 		*rpcerr = call.call_err;
22510Sstevel@tonic-gate 		cv_destroy(&call.call_cv);
22520Sstevel@tonic-gate 
22530Sstevel@tonic-gate 		mutex_enter(&connmgr_lock);
22540Sstevel@tonic-gate 
22550Sstevel@tonic-gate 
2256*4457Svv149972 		if (cm_entry->x_early_disc) {
2257*4457Svv149972 			/*
2258*4457Svv149972 			 * We need to check if a disconnect request has come
2259*4457Svv149972 			 * while we are connected, if so, then we need to
2260*4457Svv149972 			 * set rpcerr->re_status appropriately before returning
2261*4457Svv149972 			 * NULL to caller.
2262*4457Svv149972 			 */
2263*4457Svv149972 			if (rpcerr->re_status == RPC_SUCCESS)
2264*4457Svv149972 				rpcerr->re_status = RPC_XPRTFAILED;
22650Sstevel@tonic-gate 			cm_entry->x_connected = FALSE;
2266*4457Svv149972 		} else
22670Sstevel@tonic-gate 			cm_entry->x_connected = connected;
22680Sstevel@tonic-gate 
22690Sstevel@tonic-gate 		/*
22700Sstevel@tonic-gate 		 * There could be a discrepancy here such that
22710Sstevel@tonic-gate 		 * x_early_disc is TRUE yet connected is TRUE as well
22720Sstevel@tonic-gate 		 * and the connection is actually connected. In that case
22730Sstevel@tonic-gate 		 * lets be conservative and declare the connection as not
22740Sstevel@tonic-gate 		 * connected.
22750Sstevel@tonic-gate 		 */
22760Sstevel@tonic-gate 
22770Sstevel@tonic-gate 		cm_entry->x_early_disc = FALSE;
22780Sstevel@tonic-gate 		cm_entry->x_needdis = (cm_entry->x_connected == FALSE);
22790Sstevel@tonic-gate 
22800Sstevel@tonic-gate 
22810Sstevel@tonic-gate 		/*
22820Sstevel@tonic-gate 		 * connmgr_connect() may have given up before the connection
22830Sstevel@tonic-gate 		 * actually timed out. So ensure that before the next
22840Sstevel@tonic-gate 		 * connection attempt we do a disconnect.
22850Sstevel@tonic-gate 		 */
22860Sstevel@tonic-gate 		cm_entry->x_ctime = lbolt;
22870Sstevel@tonic-gate 		cm_entry->x_thread = FALSE;
22880Sstevel@tonic-gate 
22890Sstevel@tonic-gate 		cv_broadcast(&cm_entry->x_conn_cv);
22900Sstevel@tonic-gate 
22910Sstevel@tonic-gate 		if (cm_entry->x_connected == FALSE) {
22920Sstevel@tonic-gate 			mutex_exit(&connmgr_lock);
22930Sstevel@tonic-gate 			connmgr_release(cm_entry);
22940Sstevel@tonic-gate 			return (NULL);
22950Sstevel@tonic-gate 		}
22960Sstevel@tonic-gate 	}
22970Sstevel@tonic-gate 
22980Sstevel@tonic-gate 	if (srcaddr != NULL) {
22990Sstevel@tonic-gate 		/*
23000Sstevel@tonic-gate 		 * Copy into the handle the
23010Sstevel@tonic-gate 		 * source address of the
23020Sstevel@tonic-gate 		 * connection, which we will use
23030Sstevel@tonic-gate 		 * in case of a later retry.
23040Sstevel@tonic-gate 		 */
23050Sstevel@tonic-gate 		if (srcaddr->len != cm_entry->x_src.len) {
23060Sstevel@tonic-gate 			if (srcaddr->maxlen > 0)
23070Sstevel@tonic-gate 				kmem_free(srcaddr->buf, srcaddr->maxlen);
23080Sstevel@tonic-gate 			srcaddr->buf = kmem_zalloc(cm_entry->x_src.len,
23090Sstevel@tonic-gate 			    KM_SLEEP);
23100Sstevel@tonic-gate 			srcaddr->maxlen = srcaddr->len =
23110Sstevel@tonic-gate 			    cm_entry->x_src.len;
23120Sstevel@tonic-gate 		}
23130Sstevel@tonic-gate 		bcopy(cm_entry->x_src.buf, srcaddr->buf, srcaddr->len);
23140Sstevel@tonic-gate 	}
23150Sstevel@tonic-gate 	cm_entry->x_time = lbolt;
23160Sstevel@tonic-gate 	mutex_exit(&connmgr_lock);
23170Sstevel@tonic-gate 	return (cm_entry);
23180Sstevel@tonic-gate }
23190Sstevel@tonic-gate 
23200Sstevel@tonic-gate /*
23210Sstevel@tonic-gate  * If we need to send a T_DISCON_REQ, send one.
23220Sstevel@tonic-gate  */
23230Sstevel@tonic-gate static void
23240Sstevel@tonic-gate connmgr_dis_and_wait(struct cm_xprt *cm_entry)
23250Sstevel@tonic-gate {
23260Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&connmgr_lock));
23270Sstevel@tonic-gate 	for (;;) {
23280Sstevel@tonic-gate 		while (cm_entry->x_needdis == TRUE) {
23290Sstevel@tonic-gate 			RPCLOG(8, "connmgr_dis_and_wait: need "
23300Sstevel@tonic-gate 				"T_DISCON_REQ for connection 0x%p\n",
23310Sstevel@tonic-gate 				(void *)cm_entry);
23320Sstevel@tonic-gate 			cm_entry->x_needdis = FALSE;
23330Sstevel@tonic-gate 			cm_entry->x_waitdis = TRUE;
23340Sstevel@tonic-gate 
23350Sstevel@tonic-gate 			connmgr_snddis(cm_entry);
23360Sstevel@tonic-gate 
23370Sstevel@tonic-gate 			mutex_enter(&connmgr_lock);
23380Sstevel@tonic-gate 		}
23390Sstevel@tonic-gate 
23400Sstevel@tonic-gate 		if (cm_entry->x_waitdis == TRUE) {
23410Sstevel@tonic-gate 			clock_t curlbolt;
23420Sstevel@tonic-gate 			clock_t timout;
23430Sstevel@tonic-gate 
23440Sstevel@tonic-gate 			RPCLOG(8, "connmgr_dis_and_wait waiting for "
23450Sstevel@tonic-gate 				"T_DISCON_REQ's ACK for connection %p\n",
23460Sstevel@tonic-gate 				(void *)cm_entry);
23470Sstevel@tonic-gate 			curlbolt = ddi_get_lbolt();
23480Sstevel@tonic-gate 
23490Sstevel@tonic-gate 			timout = clnt_cots_min_conntout *
23500Sstevel@tonic-gate 				drv_usectohz(1000000) + curlbolt;
23510Sstevel@tonic-gate 
23520Sstevel@tonic-gate 			/*
23530Sstevel@tonic-gate 			 * The TPI spec says that the T_DISCON_REQ
23540Sstevel@tonic-gate 			 * will get acknowledged, but in practice
23550Sstevel@tonic-gate 			 * the ACK may never get sent. So don't
23560Sstevel@tonic-gate 			 * block forever.
23570Sstevel@tonic-gate 			 */
23580Sstevel@tonic-gate 			(void) cv_timedwait(&cm_entry->x_dis_cv,
23590Sstevel@tonic-gate 					    &connmgr_lock, timout);
23600Sstevel@tonic-gate 		}
23610Sstevel@tonic-gate 		/*
23620Sstevel@tonic-gate 		 * If we got the ACK, break. If we didn't,
23630Sstevel@tonic-gate 		 * then send another T_DISCON_REQ.
23640Sstevel@tonic-gate 		 */
23650Sstevel@tonic-gate 		if (cm_entry->x_waitdis == FALSE) {
23660Sstevel@tonic-gate 			break;
23670Sstevel@tonic-gate 		} else {
23680Sstevel@tonic-gate 			RPCLOG(8, "connmgr_dis_and_wait: did"
23690Sstevel@tonic-gate 				"not get T_DISCON_REQ's ACK for "
23700Sstevel@tonic-gate 				"connection  %p\n", (void *)cm_entry);
23710Sstevel@tonic-gate 			cm_entry->x_needdis = TRUE;
23720Sstevel@tonic-gate 		}
23730Sstevel@tonic-gate 	}
23740Sstevel@tonic-gate }
23750Sstevel@tonic-gate 
23760Sstevel@tonic-gate static void
23770Sstevel@tonic-gate connmgr_cancelconn(struct cm_xprt *cm_entry)
23780Sstevel@tonic-gate {
23790Sstevel@tonic-gate 	/*
23800Sstevel@tonic-gate 	 * Mark the connection table entry as dead; the next thread that
23810Sstevel@tonic-gate 	 * goes through connmgr_release() will notice this and deal with it.
23820Sstevel@tonic-gate 	 */
23830Sstevel@tonic-gate 	mutex_enter(&connmgr_lock);
23840Sstevel@tonic-gate 	cm_entry->x_dead = TRUE;
23850Sstevel@tonic-gate 
23860Sstevel@tonic-gate 	/*
23870Sstevel@tonic-gate 	 * Notify any threads waiting for the connection that it isn't
23880Sstevel@tonic-gate 	 * going to happen.
23890Sstevel@tonic-gate 	 */
23900Sstevel@tonic-gate 	cm_entry->x_thread = FALSE;
23910Sstevel@tonic-gate 	cv_broadcast(&cm_entry->x_conn_cv);
23920Sstevel@tonic-gate 	mutex_exit(&connmgr_lock);
23930Sstevel@tonic-gate 
23940Sstevel@tonic-gate 	connmgr_release(cm_entry);
23950Sstevel@tonic-gate }
23960Sstevel@tonic-gate 
23970Sstevel@tonic-gate static void
23980Sstevel@tonic-gate connmgr_close(struct cm_xprt *cm_entry)
23990Sstevel@tonic-gate {
24000Sstevel@tonic-gate 	mutex_enter(&cm_entry->x_lock);
24010Sstevel@tonic-gate 	while (cm_entry->x_ref != 0) {
24020Sstevel@tonic-gate 		/*
24030Sstevel@tonic-gate 		 * Must be a noninterruptible wait.
24040Sstevel@tonic-gate 		 */
24050Sstevel@tonic-gate 		cv_wait(&cm_entry->x_cv, &cm_entry->x_lock);
24060Sstevel@tonic-gate 	}
24070Sstevel@tonic-gate 
24080Sstevel@tonic-gate 	if (cm_entry->x_tiptr != NULL)
24090Sstevel@tonic-gate 		(void) t_kclose(cm_entry->x_tiptr, 1);
24100Sstevel@tonic-gate 
24110Sstevel@tonic-gate 	mutex_exit(&cm_entry->x_lock);
24120Sstevel@tonic-gate 	if (cm_entry->x_ksp != NULL) {
24130Sstevel@tonic-gate 		mutex_enter(&connmgr_lock);
24140Sstevel@tonic-gate 		cm_entry->x_ksp->ks_private = NULL;
24150Sstevel@tonic-gate 		mutex_exit(&connmgr_lock);
24160Sstevel@tonic-gate 
24170Sstevel@tonic-gate 		/*
24180Sstevel@tonic-gate 		 * Must free the buffer we allocated for the
24190Sstevel@tonic-gate 		 * server address in the update function
24200Sstevel@tonic-gate 		 */
24210Sstevel@tonic-gate 		if (((struct cm_kstat_xprt *)(cm_entry->x_ksp->ks_data))->
2422457Sbmc 		    x_server.value.str.addr.ptr != NULL)
24230Sstevel@tonic-gate 			kmem_free(((struct cm_kstat_xprt *)(cm_entry->x_ksp->
2424457Sbmc 			    ks_data))->x_server.value.str.addr.ptr,
24250Sstevel@tonic-gate 				    INET6_ADDRSTRLEN);
24260Sstevel@tonic-gate 		kmem_free(cm_entry->x_ksp->ks_data,
24270Sstevel@tonic-gate 			    cm_entry->x_ksp->ks_data_size);
24280Sstevel@tonic-gate 		kstat_delete(cm_entry->x_ksp);
24290Sstevel@tonic-gate 	}
24300Sstevel@tonic-gate 
24310Sstevel@tonic-gate 	mutex_destroy(&cm_entry->x_lock);
24320Sstevel@tonic-gate 	cv_destroy(&cm_entry->x_cv);
24330Sstevel@tonic-gate 	cv_destroy(&cm_entry->x_conn_cv);
24340Sstevel@tonic-gate 	cv_destroy(&cm_entry->x_dis_cv);
24350Sstevel@tonic-gate 
24360Sstevel@tonic-gate 	if (cm_entry->x_server.buf != NULL)
24370Sstevel@tonic-gate 		kmem_free(cm_entry->x_server.buf, cm_entry->x_server.maxlen);
24380Sstevel@tonic-gate 	if (cm_entry->x_src.buf != NULL)
24390Sstevel@tonic-gate 		kmem_free(cm_entry->x_src.buf, cm_entry->x_src.maxlen);
24400Sstevel@tonic-gate 	kmem_free(cm_entry, sizeof (struct cm_xprt));
24410Sstevel@tonic-gate }
24420Sstevel@tonic-gate 
24430Sstevel@tonic-gate /*
24440Sstevel@tonic-gate  * Called by KRPC after sending the call message to release the connection
24450Sstevel@tonic-gate  * it was using.
24460Sstevel@tonic-gate  */
24470Sstevel@tonic-gate static void
24480Sstevel@tonic-gate connmgr_release(struct cm_xprt *cm_entry)
24490Sstevel@tonic-gate {
24500Sstevel@tonic-gate 	mutex_enter(&cm_entry->x_lock);
24510Sstevel@tonic-gate 	cm_entry->x_ref--;
24520Sstevel@tonic-gate 	if (cm_entry->x_ref == 0)
24530Sstevel@tonic-gate 		cv_signal(&cm_entry->x_cv);
24540Sstevel@tonic-gate 	mutex_exit(&cm_entry->x_lock);
24550Sstevel@tonic-gate }
24560Sstevel@tonic-gate 
24570Sstevel@tonic-gate /*
24580Sstevel@tonic-gate  * Given an open stream, connect to the remote.  Returns true if connected,
24590Sstevel@tonic-gate  * false otherwise.
24600Sstevel@tonic-gate  */
24610Sstevel@tonic-gate static bool_t
24620Sstevel@tonic-gate connmgr_connect(
24630Sstevel@tonic-gate 	struct cm_xprt		*cm_entry,
24640Sstevel@tonic-gate 	queue_t			*wq,
24650Sstevel@tonic-gate 	struct netbuf		*addr,
24660Sstevel@tonic-gate 	int			addrfmly,
24670Sstevel@tonic-gate 	calllist_t 		*e,
24680Sstevel@tonic-gate 	int 			*tidu_ptr,
24690Sstevel@tonic-gate 	bool_t 			reconnect,
24700Sstevel@tonic-gate 	const struct timeval 	*waitp,
24710Sstevel@tonic-gate 	bool_t 			nosignal)
24720Sstevel@tonic-gate {
24730Sstevel@tonic-gate 	mblk_t *mp;
24740Sstevel@tonic-gate 	struct T_conn_req *tcr;
24750Sstevel@tonic-gate 	struct T_info_ack *tinfo;
24760Sstevel@tonic-gate 	int interrupted, error;
24770Sstevel@tonic-gate 	int tidu_size, kstat_instance;
24780Sstevel@tonic-gate 
24790Sstevel@tonic-gate 	/* if it's a reconnect, flush any lingering data messages */
24800Sstevel@tonic-gate 	if (reconnect)
24810Sstevel@tonic-gate 		(void) putctl1(wq, M_FLUSH, FLUSHRW);
24820Sstevel@tonic-gate 
24830Sstevel@tonic-gate 	mp = allocb(sizeof (*tcr) + addr->len, BPRI_LO);
24840Sstevel@tonic-gate 	if (mp == NULL) {
24850Sstevel@tonic-gate 		/*
24860Sstevel@tonic-gate 		 * This is unfortunate, but we need to look up the stats for
24870Sstevel@tonic-gate 		 * this zone to increment the "memory allocation failed"
24880Sstevel@tonic-gate 		 * counter.  curproc->p_zone is safe since we're initiating a
24890Sstevel@tonic-gate 		 * connection and not in some strange streams context.
24900Sstevel@tonic-gate 		 */
24910Sstevel@tonic-gate 		struct rpcstat *rpcstat;
24920Sstevel@tonic-gate 
2493766Scarlsonj 		rpcstat = zone_getspecific(rpcstat_zone_key, rpc_zone());
24940Sstevel@tonic-gate 		ASSERT(rpcstat != NULL);
24950Sstevel@tonic-gate 
24960Sstevel@tonic-gate 		RPCLOG0(1, "connmgr_connect: cannot alloc mp for "
24970Sstevel@tonic-gate 		    "sending conn request\n");
24980Sstevel@tonic-gate 		COTSRCSTAT_INCR(rpcstat->rpc_cots_client, rcnomem);
24990Sstevel@tonic-gate 		e->call_status = RPC_SYSTEMERROR;
25000Sstevel@tonic-gate 		e->call_reason = ENOSR;
25010Sstevel@tonic-gate 		return (FALSE);
25020Sstevel@tonic-gate 	}
25030Sstevel@tonic-gate 
25040Sstevel@tonic-gate 	mp->b_datap->db_type = M_PROTO;
25050Sstevel@tonic-gate 	tcr = (struct T_conn_req *)mp->b_rptr;
25060Sstevel@tonic-gate 	bzero(tcr, sizeof (*tcr));
25070Sstevel@tonic-gate 	tcr->PRIM_type = T_CONN_REQ;
25080Sstevel@tonic-gate 	tcr->DEST_length = addr->len;
25090Sstevel@tonic-gate 	tcr->DEST_offset = sizeof (struct T_conn_req);
25100Sstevel@tonic-gate 	mp->b_wptr = mp->b_rptr + sizeof (*tcr);
25110Sstevel@tonic-gate 
25120Sstevel@tonic-gate 	bcopy(addr->buf, mp->b_wptr, tcr->DEST_length);
25130Sstevel@tonic-gate 	mp->b_wptr += tcr->DEST_length;
25140Sstevel@tonic-gate 
25150Sstevel@tonic-gate 	RPCLOG(8, "connmgr_connect: sending conn request on queue "
25160Sstevel@tonic-gate 	    "%p", (void *)wq);
25170Sstevel@tonic-gate 	RPCLOG(8, " call %p\n", (void *)wq);
25180Sstevel@tonic-gate 	/*
25190Sstevel@tonic-gate 	 * We use the entry in the handle that is normally used for
25200Sstevel@tonic-gate 	 * waiting for RPC replies to wait for the connection accept.
25210Sstevel@tonic-gate 	 */
25220Sstevel@tonic-gate 	clnt_dispatch_send(wq, mp, e, 0, 0);
25230Sstevel@tonic-gate 
25240Sstevel@tonic-gate 	mutex_enter(&clnt_pending_lock);
25250Sstevel@tonic-gate 
25260Sstevel@tonic-gate 	/*
25270Sstevel@tonic-gate 	 * We wait for the transport connection to be made, or an
25280Sstevel@tonic-gate 	 * indication that it could not be made.
25290Sstevel@tonic-gate 	 */
25300Sstevel@tonic-gate 	interrupted = 0;
25310Sstevel@tonic-gate 
25320Sstevel@tonic-gate 	/*
25330Sstevel@tonic-gate 	 * waitforack should have been called with T_OK_ACK, but the
25340Sstevel@tonic-gate 	 * present implementation needs to be passed T_INFO_ACK to
25350Sstevel@tonic-gate 	 * work correctly.
25360Sstevel@tonic-gate 	 */
25370Sstevel@tonic-gate 	error = waitforack(e, T_INFO_ACK, waitp, nosignal);
25380Sstevel@tonic-gate 	if (error == EINTR)
25390Sstevel@tonic-gate 		interrupted = 1;
25400Sstevel@tonic-gate 	if (zone_status_get(curproc->p_zone) >= ZONE_IS_EMPTY) {
25410Sstevel@tonic-gate 		/*
25420Sstevel@tonic-gate 		 * No time to lose; we essentially have been signaled to
25430Sstevel@tonic-gate 		 * quit.
25440Sstevel@tonic-gate 		 */
25450Sstevel@tonic-gate 		interrupted = 1;
25460Sstevel@tonic-gate 	}
25470Sstevel@tonic-gate #ifdef RPCDEBUG
25480Sstevel@tonic-gate 	if (error == ETIME)
25490Sstevel@tonic-gate 		RPCLOG0(8, "connmgr_connect: giving up "
25500Sstevel@tonic-gate 		    "on connection attempt; "
25510Sstevel@tonic-gate 		    "clnt_dispatch notifyconn "
25520Sstevel@tonic-gate 		    "diagnostic 'no one waiting for "
25530Sstevel@tonic-gate 		    "connection' should not be "
25540Sstevel@tonic-gate 		    "unexpected\n");
25550Sstevel@tonic-gate #endif
25560Sstevel@tonic-gate 	if (e->call_prev)
25570Sstevel@tonic-gate 		e->call_prev->call_next = e->call_next;
25580Sstevel@tonic-gate 	else
25590Sstevel@tonic-gate 		clnt_pending = e->call_next;
25600Sstevel@tonic-gate 	if (e->call_next)
25610Sstevel@tonic-gate 		e->call_next->call_prev = e->call_prev;
25620Sstevel@tonic-gate 	mutex_exit(&clnt_pending_lock);
25630Sstevel@tonic-gate 
25640Sstevel@tonic-gate 	if (e->call_status != RPC_SUCCESS || error != 0) {
25650Sstevel@tonic-gate 		if (interrupted)
25660Sstevel@tonic-gate 			e->call_status = RPC_INTR;
25670Sstevel@tonic-gate 		else if (error == ETIME)
25680Sstevel@tonic-gate 			e->call_status = RPC_TIMEDOUT;
25690Sstevel@tonic-gate 		else if (error == EPROTO)
25700Sstevel@tonic-gate 			e->call_status = RPC_SYSTEMERROR;
25710Sstevel@tonic-gate 
25720Sstevel@tonic-gate 		RPCLOG(8, "connmgr_connect: can't connect, status: "
25730Sstevel@tonic-gate 		    "%s\n", clnt_sperrno(e->call_status));
25740Sstevel@tonic-gate 
25750Sstevel@tonic-gate 		if (e->call_reply) {
25760Sstevel@tonic-gate 			freemsg(e->call_reply);
25770Sstevel@tonic-gate 			e->call_reply = NULL;
25780Sstevel@tonic-gate 		}
25790Sstevel@tonic-gate 
25800Sstevel@tonic-gate 		return (FALSE);
25810Sstevel@tonic-gate 	}
25820Sstevel@tonic-gate 	/*
25830Sstevel@tonic-gate 	 * The result of the "connection accept" is a T_info_ack
25840Sstevel@tonic-gate 	 * in the call_reply field.
25850Sstevel@tonic-gate 	 */
25860Sstevel@tonic-gate 	ASSERT(e->call_reply != NULL);
25870Sstevel@tonic-gate 	mp = e->call_reply;
25880Sstevel@tonic-gate 	e->call_reply = NULL;
25890Sstevel@tonic-gate 	tinfo = (struct T_info_ack *)mp->b_rptr;
25900Sstevel@tonic-gate 
25910Sstevel@tonic-gate 	tidu_size = tinfo->TIDU_size;
25920Sstevel@tonic-gate 	tidu_size -= (tidu_size % BYTES_PER_XDR_UNIT);
25930Sstevel@tonic-gate 	if (tidu_size > COTS_DEFAULT_ALLOCSIZE || (tidu_size <= 0))
25940Sstevel@tonic-gate 		tidu_size = COTS_DEFAULT_ALLOCSIZE;
25950Sstevel@tonic-gate 	*tidu_ptr = tidu_size;
25960Sstevel@tonic-gate 
25970Sstevel@tonic-gate 	freemsg(mp);
25980Sstevel@tonic-gate 
25990Sstevel@tonic-gate 	/*
26000Sstevel@tonic-gate 	 * Set up the pertinent options.  NODELAY is so the transport doesn't
26010Sstevel@tonic-gate 	 * buffer up RPC messages on either end.  This may not be valid for
26020Sstevel@tonic-gate 	 * all transports.  Failure to set this option is not cause to
26030Sstevel@tonic-gate 	 * bail out so we return success anyway.  Note that lack of NODELAY
26040Sstevel@tonic-gate 	 * or some other way to flush the message on both ends will cause
26050Sstevel@tonic-gate 	 * lots of retries and terrible performance.
26060Sstevel@tonic-gate 	 */
26070Sstevel@tonic-gate 	if (addrfmly == AF_INET || addrfmly == AF_INET6) {
26080Sstevel@tonic-gate 		(void) connmgr_setopt(wq, IPPROTO_TCP, TCP_NODELAY, e);
26090Sstevel@tonic-gate 		if (e->call_status == RPC_XPRTFAILED)
26100Sstevel@tonic-gate 			return (FALSE);
26110Sstevel@tonic-gate 	}
26120Sstevel@tonic-gate 
26130Sstevel@tonic-gate 	/*
26140Sstevel@tonic-gate 	 * Since we have a connection, we now need to figure out if
26150Sstevel@tonic-gate 	 * we need to create a kstat. If x_ksp is not NULL then we
26160Sstevel@tonic-gate 	 * are reusing a connection and so we do not need to create
26170Sstevel@tonic-gate 	 * another kstat -- lets just return.
26180Sstevel@tonic-gate 	 */
26190Sstevel@tonic-gate 	if (cm_entry->x_ksp != NULL)
26200Sstevel@tonic-gate 		return (TRUE);
26210Sstevel@tonic-gate 
26220Sstevel@tonic-gate 	/*
26230Sstevel@tonic-gate 	 * We need to increment rpc_kstat_instance atomically to prevent
26240Sstevel@tonic-gate 	 * two kstats being created with the same instance.
26250Sstevel@tonic-gate 	 */
26260Sstevel@tonic-gate 	kstat_instance = atomic_add_32_nv((uint32_t *)&rpc_kstat_instance, 1);
26270Sstevel@tonic-gate 
26280Sstevel@tonic-gate 	if ((cm_entry->x_ksp = kstat_create_zone("unix", kstat_instance,
26290Sstevel@tonic-gate 	    "rpc_cots_connections", "rpc", KSTAT_TYPE_NAMED,
26300Sstevel@tonic-gate 	    (uint_t)(sizeof (cm_kstat_xprt_t) / sizeof (kstat_named_t)),
26310Sstevel@tonic-gate 	    KSTAT_FLAG_VIRTUAL, cm_entry->x_zoneid)) == NULL) {
26320Sstevel@tonic-gate 		return (TRUE);
26330Sstevel@tonic-gate 	    }
26340Sstevel@tonic-gate 
26350Sstevel@tonic-gate 	cm_entry->x_ksp->ks_lock = &connmgr_lock;
26360Sstevel@tonic-gate 	cm_entry->x_ksp->ks_private = cm_entry;
26370Sstevel@tonic-gate 	cm_entry->x_ksp->ks_data_size = ((INET6_ADDRSTRLEN * sizeof (char))
26380Sstevel@tonic-gate 					    + sizeof (cm_kstat_template));
26390Sstevel@tonic-gate 	cm_entry->x_ksp->ks_data = kmem_alloc(cm_entry->x_ksp->ks_data_size,
26400Sstevel@tonic-gate 					    KM_SLEEP);
26410Sstevel@tonic-gate 	bcopy(&cm_kstat_template, cm_entry->x_ksp->ks_data,
26420Sstevel@tonic-gate 	    cm_entry->x_ksp->ks_data_size);
26430Sstevel@tonic-gate 	((struct cm_kstat_xprt *)(cm_entry->x_ksp->ks_data))->
2644457Sbmc 		    x_server.value.str.addr.ptr =
26450Sstevel@tonic-gate 		    kmem_alloc(INET6_ADDRSTRLEN, KM_SLEEP);
26460Sstevel@tonic-gate 
26470Sstevel@tonic-gate 	cm_entry->x_ksp->ks_update = conn_kstat_update;
26480Sstevel@tonic-gate 	kstat_install(cm_entry->x_ksp);
26490Sstevel@tonic-gate 	return (TRUE);
26500Sstevel@tonic-gate }
26510Sstevel@tonic-gate 
26520Sstevel@tonic-gate /*
26530Sstevel@tonic-gate  * Called by connmgr_connect to set an option on the new stream.
26540Sstevel@tonic-gate  */
26550Sstevel@tonic-gate static bool_t
26560Sstevel@tonic-gate connmgr_setopt(queue_t *wq, int level, int name, calllist_t *e)
26570Sstevel@tonic-gate {
26580Sstevel@tonic-gate 	mblk_t *mp;
26590Sstevel@tonic-gate 	struct opthdr *opt;
26600Sstevel@tonic-gate 	struct T_optmgmt_req *tor;
26610Sstevel@tonic-gate 	struct timeval waitp;
26620Sstevel@tonic-gate 	int error;
26630Sstevel@tonic-gate 
26640Sstevel@tonic-gate 	mp = allocb(sizeof (struct T_optmgmt_req) + sizeof (struct opthdr) +
26650Sstevel@tonic-gate 	    sizeof (int), BPRI_LO);
26660Sstevel@tonic-gate 	if (mp == NULL) {
26670Sstevel@tonic-gate 		RPCLOG0(1, "connmgr_setopt: cannot alloc mp for option "
26680Sstevel@tonic-gate 		    "request\n");
26690Sstevel@tonic-gate 		return (FALSE);
26700Sstevel@tonic-gate 	}
26710Sstevel@tonic-gate 
26720Sstevel@tonic-gate 	mp->b_datap->db_type = M_PROTO;
26730Sstevel@tonic-gate 	tor = (struct T_optmgmt_req *)(mp->b_rptr);
26740Sstevel@tonic-gate 	tor->PRIM_type = T_SVR4_OPTMGMT_REQ;
26750Sstevel@tonic-gate 	tor->MGMT_flags = T_NEGOTIATE;
26760Sstevel@tonic-gate 	tor->OPT_length = sizeof (struct opthdr) + sizeof (int);
26770Sstevel@tonic-gate 	tor->OPT_offset = sizeof (struct T_optmgmt_req);
26780Sstevel@tonic-gate 
26790Sstevel@tonic-gate 	opt = (struct opthdr *)(mp->b_rptr + sizeof (struct T_optmgmt_req));
26800Sstevel@tonic-gate 	opt->level = level;
26810Sstevel@tonic-gate 	opt->name = name;
26820Sstevel@tonic-gate 	opt->len = sizeof (int);
26830Sstevel@tonic-gate 	*(int *)((char *)opt + sizeof (*opt)) = 1;
26840Sstevel@tonic-gate 	mp->b_wptr += sizeof (struct T_optmgmt_req) + sizeof (struct opthdr) +
26850Sstevel@tonic-gate 	    sizeof (int);
26860Sstevel@tonic-gate 
26870Sstevel@tonic-gate 	/*
26880Sstevel@tonic-gate 	 * We will use this connection regardless
26890Sstevel@tonic-gate 	 * of whether or not the option is settable.
26900Sstevel@tonic-gate 	 */
26910Sstevel@tonic-gate 	clnt_dispatch_send(wq, mp, e, 0, 0);
26920Sstevel@tonic-gate 	mutex_enter(&clnt_pending_lock);
26930Sstevel@tonic-gate 
26940Sstevel@tonic-gate 	waitp.tv_sec = clnt_cots_min_conntout;
26950Sstevel@tonic-gate 	waitp.tv_usec = 0;
26960Sstevel@tonic-gate 	error = waitforack(e, T_OPTMGMT_ACK, &waitp, 1);
26970Sstevel@tonic-gate 
26980Sstevel@tonic-gate 	if (e->call_prev)
26990Sstevel@tonic-gate 		e->call_prev->call_next = e->call_next;
27000Sstevel@tonic-gate 	else
27010Sstevel@tonic-gate 		clnt_pending = e->call_next;
27020Sstevel@tonic-gate 	if (e->call_next)
27030Sstevel@tonic-gate 		e->call_next->call_prev = e->call_prev;
27040Sstevel@tonic-gate 	mutex_exit(&clnt_pending_lock);
27050Sstevel@tonic-gate 
27060Sstevel@tonic-gate 	if (e->call_reply != NULL) {
27070Sstevel@tonic-gate 		freemsg(e->call_reply);
27080Sstevel@tonic-gate 		e->call_reply = NULL;
27090Sstevel@tonic-gate 	}
27100Sstevel@tonic-gate 
27110Sstevel@tonic-gate 	if (e->call_status != RPC_SUCCESS || error != 0) {
27120Sstevel@tonic-gate 		RPCLOG(1, "connmgr_setopt: can't set option: %d\n", name);
27130Sstevel@tonic-gate 		return (FALSE);
27140Sstevel@tonic-gate 	}
27150Sstevel@tonic-gate 	RPCLOG(8, "connmgr_setopt: successfully set option: %d\n", name);
27160Sstevel@tonic-gate 	return (TRUE);
27170Sstevel@tonic-gate }
27180Sstevel@tonic-gate 
27190Sstevel@tonic-gate #ifdef	DEBUG
27200Sstevel@tonic-gate 
27210Sstevel@tonic-gate /*
27220Sstevel@tonic-gate  * This is a knob to let us force code coverage in allocation failure
27230Sstevel@tonic-gate  * case.
27240Sstevel@tonic-gate  */
27250Sstevel@tonic-gate static int	connmgr_failsnd;
27260Sstevel@tonic-gate #define	CONN_SND_ALLOC(Size, Pri)	\
27270Sstevel@tonic-gate 	((connmgr_failsnd-- > 0) ? NULL : allocb(Size, Pri))
27280Sstevel@tonic-gate 
27290Sstevel@tonic-gate #else
27300Sstevel@tonic-gate 
27310Sstevel@tonic-gate #define	CONN_SND_ALLOC(Size, Pri)	allocb(Size, Pri)
27320Sstevel@tonic-gate 
27330Sstevel@tonic-gate #endif
27340Sstevel@tonic-gate 
27350Sstevel@tonic-gate /*
27360Sstevel@tonic-gate  * Sends an orderly release on the specified queue.
27370Sstevel@tonic-gate  * Entered with connmgr_lock. Exited without connmgr_lock
27380Sstevel@tonic-gate  */
27390Sstevel@tonic-gate static void
27400Sstevel@tonic-gate connmgr_sndrel(struct cm_xprt *cm_entry)
27410Sstevel@tonic-gate {
27420Sstevel@tonic-gate 	struct T_ordrel_req *torr;
27430Sstevel@tonic-gate 	mblk_t *mp;
27440Sstevel@tonic-gate 	queue_t *q = cm_entry->x_wq;
27450Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&connmgr_lock));
27460Sstevel@tonic-gate 	mp = CONN_SND_ALLOC(sizeof (struct T_ordrel_req), BPRI_LO);
27470Sstevel@tonic-gate 	if (mp == NULL) {
27480Sstevel@tonic-gate 		cm_entry->x_needrel = TRUE;
27490Sstevel@tonic-gate 		mutex_exit(&connmgr_lock);
27500Sstevel@tonic-gate 		RPCLOG(1, "connmgr_sndrel: cannot alloc mp for sending ordrel "
27510Sstevel@tonic-gate 			"to queue %p\n", (void *)q);
27520Sstevel@tonic-gate 		return;
27530Sstevel@tonic-gate 	}
27540Sstevel@tonic-gate 	mutex_exit(&connmgr_lock);
27550Sstevel@tonic-gate 
27560Sstevel@tonic-gate 	mp->b_datap->db_type = M_PROTO;
27570Sstevel@tonic-gate 	torr = (struct T_ordrel_req *)(mp->b_rptr);
27580Sstevel@tonic-gate 	torr->PRIM_type = T_ORDREL_REQ;
27590Sstevel@tonic-gate 	mp->b_wptr = mp->b_rptr + sizeof (struct T_ordrel_req);
27600Sstevel@tonic-gate 
27610Sstevel@tonic-gate 	RPCLOG(8, "connmgr_sndrel: sending ordrel to queue %p\n", (void *)q);
27620Sstevel@tonic-gate 	put(q, mp);
27630Sstevel@tonic-gate }
27640Sstevel@tonic-gate 
27650Sstevel@tonic-gate /*
27660Sstevel@tonic-gate  * Sends an disconnect on the specified queue.
27670Sstevel@tonic-gate  * Entered with connmgr_lock. Exited without connmgr_lock
27680Sstevel@tonic-gate  */
27690Sstevel@tonic-gate static void
27700Sstevel@tonic-gate connmgr_snddis(struct cm_xprt *cm_entry)
27710Sstevel@tonic-gate {
27720Sstevel@tonic-gate 	struct T_discon_req *tdis;
27730Sstevel@tonic-gate 	mblk_t *mp;
27740Sstevel@tonic-gate 	queue_t *q = cm_entry->x_wq;
27750Sstevel@tonic-gate 
27760Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&connmgr_lock));
27770Sstevel@tonic-gate 	mp = CONN_SND_ALLOC(sizeof (*tdis), BPRI_LO);
27780Sstevel@tonic-gate 	if (mp == NULL) {
27790Sstevel@tonic-gate 		cm_entry->x_needdis = TRUE;
27800Sstevel@tonic-gate 		mutex_exit(&connmgr_lock);
27810Sstevel@tonic-gate 		RPCLOG(1, "connmgr_snddis: cannot alloc mp for sending discon "
27820Sstevel@tonic-gate 		    "to queue %p\n", (void *)q);
27830Sstevel@tonic-gate 		return;
27840Sstevel@tonic-gate 	}
27850Sstevel@tonic-gate 	mutex_exit(&connmgr_lock);
27860Sstevel@tonic-gate 
27870Sstevel@tonic-gate 	mp->b_datap->db_type = M_PROTO;
27880Sstevel@tonic-gate 	tdis = (struct T_discon_req *)mp->b_rptr;
27890Sstevel@tonic-gate 	tdis->PRIM_type = T_DISCON_REQ;
27900Sstevel@tonic-gate 	mp->b_wptr = mp->b_rptr + sizeof (*tdis);
27910Sstevel@tonic-gate 
27920Sstevel@tonic-gate 	RPCLOG(8, "connmgr_snddis: sending discon to queue %p\n", (void *)q);
27930Sstevel@tonic-gate 	put(q, mp);
27940Sstevel@tonic-gate }
27950Sstevel@tonic-gate 
27960Sstevel@tonic-gate /*
27970Sstevel@tonic-gate  * Sets up the entry for receiving replies, and calls rpcmod's write put proc
27980Sstevel@tonic-gate  * (through put) to send the call.
27990Sstevel@tonic-gate  */
28000Sstevel@tonic-gate static void
28010Sstevel@tonic-gate clnt_dispatch_send(queue_t *q, mblk_t *mp, calllist_t *e, uint_t xid,
28020Sstevel@tonic-gate 			uint_t queue_flag)
28030Sstevel@tonic-gate {
28040Sstevel@tonic-gate 	ASSERT(e != NULL);
28050Sstevel@tonic-gate 
28060Sstevel@tonic-gate 	e->call_status = RPC_TIMEDOUT;	/* optimistic, eh? */
28070Sstevel@tonic-gate 	e->call_reason = 0;
28080Sstevel@tonic-gate 	e->call_wq = q;
28090Sstevel@tonic-gate 	e->call_xid = xid;
28100Sstevel@tonic-gate 	e->call_notified = FALSE;
28110Sstevel@tonic-gate 
28120Sstevel@tonic-gate 	/*
28130Sstevel@tonic-gate 	 * If queue_flag is set then the calllist_t is already on the hash
28140Sstevel@tonic-gate 	 * queue.  In this case just send the message and return.
28150Sstevel@tonic-gate 	 */
28160Sstevel@tonic-gate 	if (queue_flag) {
28170Sstevel@tonic-gate 		put(q, mp);
28180Sstevel@tonic-gate 		return;
28190Sstevel@tonic-gate 	}
28200Sstevel@tonic-gate 
28210Sstevel@tonic-gate 	/*
28220Sstevel@tonic-gate 	 * Set up calls for RPC requests (with XID != 0) on the hash
28230Sstevel@tonic-gate 	 * queue for fast lookups and place other calls (i.e.
28240Sstevel@tonic-gate 	 * connection management) on the linked list.
28250Sstevel@tonic-gate 	 */
28260Sstevel@tonic-gate 	if (xid != 0) {
28270Sstevel@tonic-gate 		RPCLOG(64, "clnt_dispatch_send: putting xid 0x%x on "
28280Sstevel@tonic-gate 			"dispatch list\n", xid);
28290Sstevel@tonic-gate 		e->call_hash = call_hash(xid, clnt_cots_hash_size);
28300Sstevel@tonic-gate 		e->call_bucket = &cots_call_ht[e->call_hash];
28310Sstevel@tonic-gate 		call_table_enter(e);
28320Sstevel@tonic-gate 	} else {
28330Sstevel@tonic-gate 		mutex_enter(&clnt_pending_lock);
28340Sstevel@tonic-gate 		if (clnt_pending)
28350Sstevel@tonic-gate 			clnt_pending->call_prev = e;
28360Sstevel@tonic-gate 		e->call_next = clnt_pending;
28370Sstevel@tonic-gate 		e->call_prev = NULL;
28380Sstevel@tonic-gate 		clnt_pending = e;
28390Sstevel@tonic-gate 		mutex_exit(&clnt_pending_lock);
28400Sstevel@tonic-gate 	}
28410Sstevel@tonic-gate 
28420Sstevel@tonic-gate 	put(q, mp);
28430Sstevel@tonic-gate }
28440Sstevel@tonic-gate 
28450Sstevel@tonic-gate /*
28460Sstevel@tonic-gate  * Called by rpcmod to notify a client with a clnt_pending call that its reply
28470Sstevel@tonic-gate  * has arrived.  If we can't find a client waiting for this reply, we log
28480Sstevel@tonic-gate  * the error and return.
28490Sstevel@tonic-gate  */
28500Sstevel@tonic-gate bool_t
28510Sstevel@tonic-gate clnt_dispatch_notify(mblk_t *mp, zoneid_t zoneid)
28520Sstevel@tonic-gate {
28530Sstevel@tonic-gate 	calllist_t *e = NULL;
28540Sstevel@tonic-gate 	call_table_t *chtp;
28550Sstevel@tonic-gate 	uint32_t xid;
28560Sstevel@tonic-gate 	uint_t hash;
28570Sstevel@tonic-gate 
28580Sstevel@tonic-gate 	if ((IS_P2ALIGNED(mp->b_rptr, sizeof (uint32_t))) &&
28590Sstevel@tonic-gate 	    (mp->b_wptr - mp->b_rptr) >= sizeof (xid))
28600Sstevel@tonic-gate 		xid = *((uint32_t *)mp->b_rptr);
28610Sstevel@tonic-gate 	else {
28620Sstevel@tonic-gate 		int i = 0;
28630Sstevel@tonic-gate 		unsigned char *p = (unsigned char *)&xid;
28640Sstevel@tonic-gate 		unsigned char *rptr;
28650Sstevel@tonic-gate 		mblk_t *tmp = mp;
28660Sstevel@tonic-gate 
28670Sstevel@tonic-gate 		/*
28680Sstevel@tonic-gate 		 * Copy the xid, byte-by-byte into xid.
28690Sstevel@tonic-gate 		 */
28700Sstevel@tonic-gate 		while (tmp) {
28710Sstevel@tonic-gate 			rptr = tmp->b_rptr;
28720Sstevel@tonic-gate 			while (rptr < tmp->b_wptr) {
28730Sstevel@tonic-gate 				*p++ = *rptr++;
28740Sstevel@tonic-gate 				if (++i >= sizeof (xid))
28750Sstevel@tonic-gate 					goto done_xid_copy;
28760Sstevel@tonic-gate 			}
28770Sstevel@tonic-gate 			tmp = tmp->b_cont;
28780Sstevel@tonic-gate 		}
28790Sstevel@tonic-gate 
28800Sstevel@tonic-gate 		/*
28810Sstevel@tonic-gate 		 * If we got here, we ran out of mblk space before the
28820Sstevel@tonic-gate 		 * xid could be copied.
28830Sstevel@tonic-gate 		 */
28840Sstevel@tonic-gate 		ASSERT(tmp == NULL && i < sizeof (xid));
28850Sstevel@tonic-gate 
28860Sstevel@tonic-gate 		RPCLOG0(1,
28870Sstevel@tonic-gate 		    "clnt_dispatch_notify: message less than size of xid\n");
28880Sstevel@tonic-gate 		return (FALSE);
28890Sstevel@tonic-gate 
28900Sstevel@tonic-gate 	}
28910Sstevel@tonic-gate done_xid_copy:
28920Sstevel@tonic-gate 
28930Sstevel@tonic-gate 	hash = call_hash(xid, clnt_cots_hash_size);
28940Sstevel@tonic-gate 	chtp = &cots_call_ht[hash];
28950Sstevel@tonic-gate 	/* call_table_find returns with the hash bucket locked */
28960Sstevel@tonic-gate 	call_table_find(chtp, xid, e);
28970Sstevel@tonic-gate 
28980Sstevel@tonic-gate 	if (e != NULL) {
28990Sstevel@tonic-gate 		/*
29000Sstevel@tonic-gate 		 * Found thread waiting for this reply
29010Sstevel@tonic-gate 		 */
29020Sstevel@tonic-gate 		mutex_enter(&e->call_lock);
29030Sstevel@tonic-gate 		if (e->call_reply)
29040Sstevel@tonic-gate 			/*
29050Sstevel@tonic-gate 			 * This can happen under the following scenario:
29060Sstevel@tonic-gate 			 * clnt_cots_kcallit() times out on the response,
29070Sstevel@tonic-gate 			 * rfscall() repeats the CLNT_CALL() with
29080Sstevel@tonic-gate 			 * the same xid, clnt_cots_kcallit() sends the retry,
29090Sstevel@tonic-gate 			 * thereby putting the clnt handle on the pending list,
29100Sstevel@tonic-gate 			 * the first response arrives, signalling the thread
29110Sstevel@tonic-gate 			 * in clnt_cots_kcallit(). Before that thread is
29120Sstevel@tonic-gate 			 * dispatched, the second response arrives as well,
29130Sstevel@tonic-gate 			 * and clnt_dispatch_notify still finds the handle on
29140Sstevel@tonic-gate 			 * the pending list, with call_reply set. So free the
29150Sstevel@tonic-gate 			 * old reply now.
29160Sstevel@tonic-gate 			 *
29170Sstevel@tonic-gate 			 * It is also possible for a response intended for
29180Sstevel@tonic-gate 			 * an RPC call with a different xid to reside here.
29190Sstevel@tonic-gate 			 * This can happen if the thread that owned this
29200Sstevel@tonic-gate 			 * client handle prior to the current owner bailed
29210Sstevel@tonic-gate 			 * out and left its call record on the dispatch
29220Sstevel@tonic-gate 			 * queue.  A window exists where the response can
29230Sstevel@tonic-gate 			 * arrive before the current owner dispatches its
29240Sstevel@tonic-gate 			 * RPC call.
29250Sstevel@tonic-gate 			 *
29260Sstevel@tonic-gate 			 * In any case, this is the very last point where we
29270Sstevel@tonic-gate 			 * can safely check the call_reply field before
29280Sstevel@tonic-gate 			 * placing the new response there.
29290Sstevel@tonic-gate 			 */
29300Sstevel@tonic-gate 			freemsg(e->call_reply);
29310Sstevel@tonic-gate 		e->call_reply = mp;
29320Sstevel@tonic-gate 		e->call_status = RPC_SUCCESS;
29330Sstevel@tonic-gate 		e->call_notified = TRUE;
29340Sstevel@tonic-gate 		cv_signal(&e->call_cv);
29350Sstevel@tonic-gate 		mutex_exit(&e->call_lock);
29360Sstevel@tonic-gate 		mutex_exit(&chtp->ct_lock);
29370Sstevel@tonic-gate 		return (TRUE);
29380Sstevel@tonic-gate 	} else {
29390Sstevel@tonic-gate 		zone_t *zone;
29400Sstevel@tonic-gate 		struct rpcstat *rpcstat;
29410Sstevel@tonic-gate 
29420Sstevel@tonic-gate 		mutex_exit(&chtp->ct_lock);
29430Sstevel@tonic-gate 		RPCLOG(65, "clnt_dispatch_notify: no caller for reply 0x%x\n",
29440Sstevel@tonic-gate 		    xid);
29450Sstevel@tonic-gate 		/*
29460Sstevel@tonic-gate 		 * This is unfortunate, but we need to lookup the zone so we
29470Sstevel@tonic-gate 		 * can increment its "rcbadxids" counter.
29480Sstevel@tonic-gate 		 */
29490Sstevel@tonic-gate 		zone = zone_find_by_id(zoneid);
29500Sstevel@tonic-gate 		if (zone == NULL) {
29510Sstevel@tonic-gate 			/*
29520Sstevel@tonic-gate 			 * The zone went away...
29530Sstevel@tonic-gate 			 */
29540Sstevel@tonic-gate 			return (FALSE);
29550Sstevel@tonic-gate 		}
29560Sstevel@tonic-gate 		rpcstat = zone_getspecific(rpcstat_zone_key, zone);
29570Sstevel@tonic-gate 		if (zone_status_get(zone) >= ZONE_IS_SHUTTING_DOWN) {
29580Sstevel@tonic-gate 			/*
29590Sstevel@tonic-gate 			 * Not interested
29600Sstevel@tonic-gate 			 */
29610Sstevel@tonic-gate 			zone_rele(zone);
29620Sstevel@tonic-gate 			return (FALSE);
29630Sstevel@tonic-gate 		}
29640Sstevel@tonic-gate 		COTSRCSTAT_INCR(rpcstat->rpc_cots_client, rcbadxids);
29650Sstevel@tonic-gate 		zone_rele(zone);
29660Sstevel@tonic-gate 	}
29670Sstevel@tonic-gate 	return (FALSE);
29680Sstevel@tonic-gate }
29690Sstevel@tonic-gate 
29700Sstevel@tonic-gate /*
29710Sstevel@tonic-gate  * Called by rpcmod when a non-data indication arrives.  The ones in which we
29720Sstevel@tonic-gate  * are interested are connection indications and options acks.  We dispatch
29730Sstevel@tonic-gate  * based on the queue the indication came in on.  If we are not interested in
29740Sstevel@tonic-gate  * what came in, we return false to rpcmod, who will then pass it upstream.
29750Sstevel@tonic-gate  */
29760Sstevel@tonic-gate bool_t
29770Sstevel@tonic-gate clnt_dispatch_notifyconn(queue_t *q, mblk_t *mp)
29780Sstevel@tonic-gate {
29790Sstevel@tonic-gate 	calllist_t *e;
29800Sstevel@tonic-gate 	int type;
29810Sstevel@tonic-gate 
29820Sstevel@tonic-gate 	ASSERT((q->q_flag & QREADR) == 0);
29830Sstevel@tonic-gate 
29840Sstevel@tonic-gate 	type = ((union T_primitives *)mp->b_rptr)->type;
29850Sstevel@tonic-gate 	RPCLOG(8, "clnt_dispatch_notifyconn: prim type: [%s]\n",
29860Sstevel@tonic-gate 	    rpc_tpiprim2name(type));
29870Sstevel@tonic-gate 	mutex_enter(&clnt_pending_lock);
29880Sstevel@tonic-gate 	for (e = clnt_pending; /* NO CONDITION */; e = e->call_next) {
29890Sstevel@tonic-gate 		if (e == NULL) {
29900Sstevel@tonic-gate 			mutex_exit(&clnt_pending_lock);
29910Sstevel@tonic-gate 			RPCLOG(1, "clnt_dispatch_notifyconn: no one waiting "
29920Sstevel@tonic-gate 			    "for connection on queue 0x%p\n", (void *)q);
29930Sstevel@tonic-gate 			return (FALSE);
29940Sstevel@tonic-gate 		}
29950Sstevel@tonic-gate 		if (e->call_wq == q)
29960Sstevel@tonic-gate 			break;
29970Sstevel@tonic-gate 	}
29980Sstevel@tonic-gate 
29990Sstevel@tonic-gate 	switch (type) {
30000Sstevel@tonic-gate 	case T_CONN_CON:
30010Sstevel@tonic-gate 		/*
30020Sstevel@tonic-gate 		 * The transport is now connected, send a T_INFO_REQ to get
30030Sstevel@tonic-gate 		 * the tidu size.
30040Sstevel@tonic-gate 		 */
30050Sstevel@tonic-gate 		mutex_exit(&clnt_pending_lock);
30060Sstevel@tonic-gate 		ASSERT(mp->b_datap->db_lim - mp->b_datap->db_base >=
30070Sstevel@tonic-gate 			sizeof (struct T_info_req));
30080Sstevel@tonic-gate 		mp->b_rptr = mp->b_datap->db_base;
30090Sstevel@tonic-gate 		((union T_primitives *)mp->b_rptr)->type = T_INFO_REQ;
30100Sstevel@tonic-gate 		mp->b_wptr = mp->b_rptr + sizeof (struct T_info_req);
30110Sstevel@tonic-gate 		mp->b_datap->db_type = M_PCPROTO;
30120Sstevel@tonic-gate 		put(q, mp);
30130Sstevel@tonic-gate 		return (TRUE);
30140Sstevel@tonic-gate 	case T_INFO_ACK:
30150Sstevel@tonic-gate 	case T_OPTMGMT_ACK:
30160Sstevel@tonic-gate 		e->call_status = RPC_SUCCESS;
30170Sstevel@tonic-gate 		e->call_reply = mp;
30180Sstevel@tonic-gate 		e->call_notified = TRUE;
30190Sstevel@tonic-gate 		cv_signal(&e->call_cv);
30200Sstevel@tonic-gate 		break;
30210Sstevel@tonic-gate 	case T_ERROR_ACK:
30220Sstevel@tonic-gate 		e->call_status = RPC_CANTCONNECT;
30230Sstevel@tonic-gate 		e->call_reply = mp;
30240Sstevel@tonic-gate 		e->call_notified = TRUE;
30250Sstevel@tonic-gate 		cv_signal(&e->call_cv);
30260Sstevel@tonic-gate 		break;
30270Sstevel@tonic-gate 	case T_OK_ACK:
30280Sstevel@tonic-gate 		/*
30290Sstevel@tonic-gate 		 * Great, but we are really waiting for a T_CONN_CON
30300Sstevel@tonic-gate 		 */
30310Sstevel@tonic-gate 		freemsg(mp);
30320Sstevel@tonic-gate 		break;
30330Sstevel@tonic-gate 	default:
30340Sstevel@tonic-gate 		mutex_exit(&clnt_pending_lock);
30350Sstevel@tonic-gate 		RPCLOG(1, "clnt_dispatch_notifyconn: bad type %d\n", type);
30360Sstevel@tonic-gate 		return (FALSE);
30370Sstevel@tonic-gate 	}
30380Sstevel@tonic-gate 
30390Sstevel@tonic-gate 	mutex_exit(&clnt_pending_lock);
30400Sstevel@tonic-gate 	return (TRUE);
30410Sstevel@tonic-gate }
30420Sstevel@tonic-gate 
30430Sstevel@tonic-gate /*
30440Sstevel@tonic-gate  * Called by rpcmod when the transport is (or should be) going away.  Informs
30450Sstevel@tonic-gate  * all callers waiting for replies and marks the entry in the connection
30460Sstevel@tonic-gate  * manager's list as unconnected, and either closing (close handshake in
30470Sstevel@tonic-gate  * progress) or dead.
30480Sstevel@tonic-gate  */
30490Sstevel@tonic-gate void
30500Sstevel@tonic-gate clnt_dispatch_notifyall(queue_t *q, int32_t msg_type, int32_t reason)
30510Sstevel@tonic-gate {
30520Sstevel@tonic-gate 	calllist_t *e;
30530Sstevel@tonic-gate 	call_table_t *ctp;
30540Sstevel@tonic-gate 	struct cm_xprt *cm_entry;
30550Sstevel@tonic-gate 	int have_connmgr_lock;
30560Sstevel@tonic-gate 	int i;
30570Sstevel@tonic-gate 
30580Sstevel@tonic-gate 	ASSERT((q->q_flag & QREADR) == 0);
30590Sstevel@tonic-gate 
30600Sstevel@tonic-gate 	RPCLOG(1, "clnt_dispatch_notifyall on queue %p", (void *)q);
30610Sstevel@tonic-gate 	RPCLOG(1, " received a notifcation prim type [%s]",
30620Sstevel@tonic-gate 	    rpc_tpiprim2name(msg_type));
30630Sstevel@tonic-gate 	RPCLOG(1, " and reason %d\n", reason);
30640Sstevel@tonic-gate 
30650Sstevel@tonic-gate 	/*
30660Sstevel@tonic-gate 	 * Find the transport entry in the connection manager's list, close
30670Sstevel@tonic-gate 	 * the transport and delete the entry.  In the case where rpcmod's
30680Sstevel@tonic-gate 	 * idle timer goes off, it sends us a T_ORDREL_REQ, indicating we
30690Sstevel@tonic-gate 	 * should gracefully close the connection.
30700Sstevel@tonic-gate 	 */
30710Sstevel@tonic-gate 	have_connmgr_lock = 1;
30720Sstevel@tonic-gate 	mutex_enter(&connmgr_lock);
30730Sstevel@tonic-gate 	for (cm_entry = cm_hd; cm_entry; cm_entry = cm_entry->x_next) {
30740Sstevel@tonic-gate 		ASSERT(cm_entry != cm_entry->x_next);
30750Sstevel@tonic-gate 		if (cm_entry->x_wq == q) {
30760Sstevel@tonic-gate 			ASSERT(MUTEX_HELD(&connmgr_lock));
30770Sstevel@tonic-gate 			ASSERT(have_connmgr_lock == 1);
30780Sstevel@tonic-gate 			switch (msg_type) {
30790Sstevel@tonic-gate 			case T_ORDREL_REQ:
30800Sstevel@tonic-gate 
30810Sstevel@tonic-gate 				if (cm_entry->x_dead) {
30820Sstevel@tonic-gate 					RPCLOG(1, "idle timeout on dead "
30830Sstevel@tonic-gate 					    "connection: %p\n",
30840Sstevel@tonic-gate 					    (void *)cm_entry);
30850Sstevel@tonic-gate 					if (clnt_stop_idle != NULL)
30860Sstevel@tonic-gate 						(*clnt_stop_idle)(q);
30870Sstevel@tonic-gate 					break;
30880Sstevel@tonic-gate 				}
30890Sstevel@tonic-gate 
30900Sstevel@tonic-gate 				/*
30910Sstevel@tonic-gate 				 * Only mark the connection as dead if it is
30920Sstevel@tonic-gate 				 * connected and idle.
30930Sstevel@tonic-gate 				 * An unconnected connection has probably
30940Sstevel@tonic-gate 				 * gone idle because the server is down,
30950Sstevel@tonic-gate 				 * and when it comes back up there will be
30960Sstevel@tonic-gate 				 * retries that need to use that connection.
30970Sstevel@tonic-gate 				 */
30980Sstevel@tonic-gate 				if (cm_entry->x_connected ||
30990Sstevel@tonic-gate 				    cm_entry->x_doomed) {
31000Sstevel@tonic-gate 				    if (cm_entry->x_ordrel) {
31010Sstevel@tonic-gate 					if (cm_entry->x_closing == TRUE) {
31020Sstevel@tonic-gate 					/*
31030Sstevel@tonic-gate 					 * The connection is obviously
31040Sstevel@tonic-gate 					 * wedged due to a bug or problem
31050Sstevel@tonic-gate 					 * with the transport. Mark it
31060Sstevel@tonic-gate 					 * as dead. Otherwise we can leak
31070Sstevel@tonic-gate 					 * connections.
31080Sstevel@tonic-gate 					 */
31090Sstevel@tonic-gate 					    cm_entry->x_dead = TRUE;
31100Sstevel@tonic-gate 					    mutex_exit(&connmgr_lock);
31110Sstevel@tonic-gate 					    have_connmgr_lock = 0;
31120Sstevel@tonic-gate 					    if (clnt_stop_idle != NULL)
31130Sstevel@tonic-gate 						(*clnt_stop_idle)(q);
31140Sstevel@tonic-gate 					    break;
31150Sstevel@tonic-gate 					}
31160Sstevel@tonic-gate 					cm_entry->x_closing = TRUE;
31170Sstevel@tonic-gate 					connmgr_sndrel(cm_entry);
31180Sstevel@tonic-gate 					have_connmgr_lock = 0;
31190Sstevel@tonic-gate 				    } else {
31200Sstevel@tonic-gate 					cm_entry->x_dead = TRUE;
31210Sstevel@tonic-gate 					mutex_exit(&connmgr_lock);
31220Sstevel@tonic-gate 					have_connmgr_lock = 0;
31230Sstevel@tonic-gate 					if (clnt_stop_idle != NULL)
31240Sstevel@tonic-gate 						(*clnt_stop_idle)(q);
31250Sstevel@tonic-gate 				    }
31260Sstevel@tonic-gate 				} else {
31270Sstevel@tonic-gate 					/*
31280Sstevel@tonic-gate 					 * We don't mark the connection
31290Sstevel@tonic-gate 					 * as dead, but we turn off the
31300Sstevel@tonic-gate 					 * idle timer.
31310Sstevel@tonic-gate 					 */
31320Sstevel@tonic-gate 					mutex_exit(&connmgr_lock);
31330Sstevel@tonic-gate 					have_connmgr_lock = 0;
31340Sstevel@tonic-gate 					if (clnt_stop_idle != NULL)
31350Sstevel@tonic-gate 						(*clnt_stop_idle)(q);
31360Sstevel@tonic-gate 					RPCLOG(1, "clnt_dispatch_notifyall:"
31370Sstevel@tonic-gate 					    " ignoring timeout from rpcmod"
31380Sstevel@tonic-gate 					    " (q %p) because we are not "
31390Sstevel@tonic-gate 					    " connected\n", (void *)q);
31400Sstevel@tonic-gate 				}
31410Sstevel@tonic-gate 				break;
31420Sstevel@tonic-gate 			case T_ORDREL_IND:
31430Sstevel@tonic-gate 				/*
31440Sstevel@tonic-gate 				 * If this entry is marked closing, then we are
31450Sstevel@tonic-gate 				 * completing a close handshake, and the
31460Sstevel@tonic-gate 				 * connection is dead.  Otherwise, the server is
31470Sstevel@tonic-gate 				 * trying to close. Since the server will not
31480Sstevel@tonic-gate 				 * be sending any more RPC replies, we abort
31490Sstevel@tonic-gate 				 * the connection, including flushing
31500Sstevel@tonic-gate 				 * any RPC requests that are in-transit.
31510Sstevel@tonic-gate 				 */
31520Sstevel@tonic-gate 				if (cm_entry->x_closing) {
31530Sstevel@tonic-gate 					cm_entry->x_dead = TRUE;
31540Sstevel@tonic-gate 					mutex_exit(&connmgr_lock);
31550Sstevel@tonic-gate 					have_connmgr_lock = 0;
31560Sstevel@tonic-gate 					if (clnt_stop_idle != NULL)
31570Sstevel@tonic-gate 						(*clnt_stop_idle)(q);
31580Sstevel@tonic-gate 				} else {
31590Sstevel@tonic-gate 					/*
31600Sstevel@tonic-gate 					 * if we're getting a disconnect
31610Sstevel@tonic-gate 					 * before we've finished our
31620Sstevel@tonic-gate 					 * connect attempt, mark it for
31630Sstevel@tonic-gate 					 * later processing
31640Sstevel@tonic-gate 					 */
31650Sstevel@tonic-gate 					if (cm_entry->x_thread)
31660Sstevel@tonic-gate 						cm_entry->x_early_disc = TRUE;
31670Sstevel@tonic-gate 					else
31680Sstevel@tonic-gate 						cm_entry->x_connected = FALSE;
31690Sstevel@tonic-gate 					cm_entry->x_waitdis = TRUE;
31700Sstevel@tonic-gate 					connmgr_snddis(cm_entry);
31710Sstevel@tonic-gate 					have_connmgr_lock = 0;
31720Sstevel@tonic-gate 				}
31730Sstevel@tonic-gate 				break;
31740Sstevel@tonic-gate 
31750Sstevel@tonic-gate 			case T_ERROR_ACK:
31760Sstevel@tonic-gate 			case T_OK_ACK:
31770Sstevel@tonic-gate 				cm_entry->x_waitdis = FALSE;
31780Sstevel@tonic-gate 				cv_signal(&cm_entry->x_dis_cv);
31790Sstevel@tonic-gate 				mutex_exit(&connmgr_lock);
31800Sstevel@tonic-gate 				return;
31810Sstevel@tonic-gate 
31820Sstevel@tonic-gate 			case T_DISCON_REQ:
31830Sstevel@tonic-gate 				if (cm_entry->x_thread)
31840Sstevel@tonic-gate 					cm_entry->x_early_disc = TRUE;
31850Sstevel@tonic-gate 				else
31860Sstevel@tonic-gate 					cm_entry->x_connected = FALSE;
31870Sstevel@tonic-gate 				cm_entry->x_waitdis = TRUE;
31880Sstevel@tonic-gate 
31890Sstevel@tonic-gate 				connmgr_snddis(cm_entry);
31900Sstevel@tonic-gate 				have_connmgr_lock = 0;
31910Sstevel@tonic-gate 				break;
31920Sstevel@tonic-gate 
31930Sstevel@tonic-gate 			case T_DISCON_IND:
31940Sstevel@tonic-gate 			default:
31950Sstevel@tonic-gate 				/*
31960Sstevel@tonic-gate 				 * if we're getting a disconnect before
31970Sstevel@tonic-gate 				 * we've finished our connect attempt,
31980Sstevel@tonic-gate 				 * mark it for later processing
31990Sstevel@tonic-gate 				 */
32000Sstevel@tonic-gate 				if (cm_entry->x_closing) {
32010Sstevel@tonic-gate 					cm_entry->x_dead = TRUE;
32020Sstevel@tonic-gate 					mutex_exit(&connmgr_lock);
32030Sstevel@tonic-gate 					have_connmgr_lock = 0;
32040Sstevel@tonic-gate 					if (clnt_stop_idle != NULL)
32050Sstevel@tonic-gate 						(*clnt_stop_idle)(q);
32060Sstevel@tonic-gate 				} else {
32070Sstevel@tonic-gate 					if (cm_entry->x_thread) {
32080Sstevel@tonic-gate 						cm_entry->x_early_disc = TRUE;
32090Sstevel@tonic-gate 					} else {
32100Sstevel@tonic-gate 						cm_entry->x_dead = TRUE;
32110Sstevel@tonic-gate 						cm_entry->x_connected = FALSE;
32120Sstevel@tonic-gate 					}
32130Sstevel@tonic-gate 				}
32140Sstevel@tonic-gate 				break;
32150Sstevel@tonic-gate 			}
32160Sstevel@tonic-gate 			break;
32170Sstevel@tonic-gate 		}
32180Sstevel@tonic-gate 	}
32190Sstevel@tonic-gate 
32200Sstevel@tonic-gate 	if (have_connmgr_lock)
32210Sstevel@tonic-gate 		mutex_exit(&connmgr_lock);
32220Sstevel@tonic-gate 
32230Sstevel@tonic-gate 	if (msg_type == T_ERROR_ACK || msg_type == T_OK_ACK) {
32240Sstevel@tonic-gate 		RPCLOG(1, "clnt_dispatch_notifyall: (wq %p) could not find "
32250Sstevel@tonic-gate 		    "connmgr entry for discon ack\n", (void *)q);
32260Sstevel@tonic-gate 		return;
32270Sstevel@tonic-gate 	}
32280Sstevel@tonic-gate 
32290Sstevel@tonic-gate 	/*
32300Sstevel@tonic-gate 	 * Then kick all the clnt_pending calls out of their wait.  There
32310Sstevel@tonic-gate 	 * should be no clnt_pending calls in the case of rpcmod's idle
32320Sstevel@tonic-gate 	 * timer firing.
32330Sstevel@tonic-gate 	 */
32340Sstevel@tonic-gate 	for (i = 0; i < clnt_cots_hash_size; i++) {
32350Sstevel@tonic-gate 		ctp = &cots_call_ht[i];
32360Sstevel@tonic-gate 		mutex_enter(&ctp->ct_lock);
32370Sstevel@tonic-gate 		for (e = ctp->ct_call_next;
32380Sstevel@tonic-gate 			e != (calllist_t *)ctp;
32390Sstevel@tonic-gate 			e = e->call_next) {
32400Sstevel@tonic-gate 			if (e->call_wq == q && e->call_notified == FALSE) {
32410Sstevel@tonic-gate 				RPCLOG(1,
32420Sstevel@tonic-gate 				"clnt_dispatch_notifyall for queue %p ",
32430Sstevel@tonic-gate 					(void *)q);
32440Sstevel@tonic-gate 				RPCLOG(1, "aborting clnt_pending call %p\n",
32450Sstevel@tonic-gate 					(void *)e);
32460Sstevel@tonic-gate 
32470Sstevel@tonic-gate 				if (msg_type == T_DISCON_IND)
32480Sstevel@tonic-gate 					e->call_reason = reason;
32490Sstevel@tonic-gate 				e->call_notified = TRUE;
32500Sstevel@tonic-gate 				e->call_status = RPC_XPRTFAILED;
32510Sstevel@tonic-gate 				cv_signal(&e->call_cv);
32520Sstevel@tonic-gate 			}
32530Sstevel@tonic-gate 		}
32540Sstevel@tonic-gate 		mutex_exit(&ctp->ct_lock);
32550Sstevel@tonic-gate 	}
32560Sstevel@tonic-gate 
32570Sstevel@tonic-gate 	mutex_enter(&clnt_pending_lock);
32580Sstevel@tonic-gate 	for (e = clnt_pending; e; e = e->call_next) {
32590Sstevel@tonic-gate 		/*
32600Sstevel@tonic-gate 		 * Only signal those RPC handles that haven't been
32610Sstevel@tonic-gate 		 * signalled yet. Otherwise we can get a bogus call_reason.
32620Sstevel@tonic-gate 		 * This can happen if thread A is making a call over a
32630Sstevel@tonic-gate 		 * connection. If the server is killed, it will cause
32640Sstevel@tonic-gate 		 * reset, and reason will default to EIO as a result of
32650Sstevel@tonic-gate 		 * a T_ORDREL_IND. Thread B then attempts to recreate
32660Sstevel@tonic-gate 		 * the connection but gets a T_DISCON_IND. If we set the
32670Sstevel@tonic-gate 		 * call_reason code for all threads, then if thread A
32680Sstevel@tonic-gate 		 * hasn't been dispatched yet, it will get the wrong
32690Sstevel@tonic-gate 		 * reason. The bogus call_reason can make it harder to
32700Sstevel@tonic-gate 		 * discriminate between calls that fail because the
32710Sstevel@tonic-gate 		 * connection attempt failed versus those where the call
32720Sstevel@tonic-gate 		 * may have been executed on the server.
32730Sstevel@tonic-gate 		 */
32740Sstevel@tonic-gate 		if (e->call_wq == q && e->call_notified == FALSE) {
32750Sstevel@tonic-gate 			RPCLOG(1, "clnt_dispatch_notifyall for queue %p ",
32760Sstevel@tonic-gate 			    (void *)q);
32770Sstevel@tonic-gate 			RPCLOG(1, " aborting clnt_pending call %p\n",
32780Sstevel@tonic-gate 			    (void *)e);
32790Sstevel@tonic-gate 
32800Sstevel@tonic-gate 			if (msg_type == T_DISCON_IND)
32810Sstevel@tonic-gate 				e->call_reason = reason;
32820Sstevel@tonic-gate 			e->call_notified = TRUE;
32830Sstevel@tonic-gate 			/*
32840Sstevel@tonic-gate 			 * Let the caller timeout, else he will retry
32850Sstevel@tonic-gate 			 * immediately.
32860Sstevel@tonic-gate 			 */
32870Sstevel@tonic-gate 			e->call_status = RPC_XPRTFAILED;
32880Sstevel@tonic-gate 
32890Sstevel@tonic-gate 			/*
32900Sstevel@tonic-gate 			 * We used to just signal those threads
32910Sstevel@tonic-gate 			 * waiting for a connection, (call_xid = 0).
32920Sstevel@tonic-gate 			 * That meant that threads waiting for a response
32930Sstevel@tonic-gate 			 * waited till their timeout expired. This
32940Sstevel@tonic-gate 			 * could be a long time if they've specified a
32950Sstevel@tonic-gate 			 * maximum timeout. (2^31 - 1). So we
32960Sstevel@tonic-gate 			 * Signal all threads now.
32970Sstevel@tonic-gate 			 */
32980Sstevel@tonic-gate 			cv_signal(&e->call_cv);
32990Sstevel@tonic-gate 		}
33000Sstevel@tonic-gate 	}
33010Sstevel@tonic-gate 	mutex_exit(&clnt_pending_lock);
33020Sstevel@tonic-gate }
33030Sstevel@tonic-gate 
33040Sstevel@tonic-gate 
33050Sstevel@tonic-gate /*ARGSUSED*/
33060Sstevel@tonic-gate /*
33070Sstevel@tonic-gate  * after resuming a system that's been suspended for longer than the
33080Sstevel@tonic-gate  * NFS server's idle timeout (svc_idle_timeout for Solaris 2), rfscall()
33090Sstevel@tonic-gate  * generates "NFS server X not responding" and "NFS server X ok" messages;
33100Sstevel@tonic-gate  * here we reset inet connections to cause a re-connect and avoid those
33110Sstevel@tonic-gate  * NFS messages.  see 4045054
33120Sstevel@tonic-gate  */
33130Sstevel@tonic-gate boolean_t
33140Sstevel@tonic-gate connmgr_cpr_reset(void *arg, int code)
33150Sstevel@tonic-gate {
33160Sstevel@tonic-gate 	struct cm_xprt *cxp;
33170Sstevel@tonic-gate 
33180Sstevel@tonic-gate 	if (code == CB_CODE_CPR_CHKPT)
33190Sstevel@tonic-gate 		return (B_TRUE);
33200Sstevel@tonic-gate 
33210Sstevel@tonic-gate 	if (mutex_tryenter(&connmgr_lock) == 0)
33220Sstevel@tonic-gate 		return (B_FALSE);
33230Sstevel@tonic-gate 	for (cxp = cm_hd; cxp; cxp = cxp->x_next) {
33240Sstevel@tonic-gate 		if ((cxp->x_family == AF_INET || cxp->x_family == AF_INET6) &&
33250Sstevel@tonic-gate 			cxp->x_connected == TRUE) {
33260Sstevel@tonic-gate 			if (cxp->x_thread)
33270Sstevel@tonic-gate 				cxp->x_early_disc = TRUE;
33280Sstevel@tonic-gate 			else
33290Sstevel@tonic-gate 				cxp->x_connected = FALSE;
33300Sstevel@tonic-gate 			cxp->x_needdis = TRUE;
33310Sstevel@tonic-gate 		}
33320Sstevel@tonic-gate 	}
33330Sstevel@tonic-gate 	mutex_exit(&connmgr_lock);
33340Sstevel@tonic-gate 	return (B_TRUE);
33350Sstevel@tonic-gate }
33360Sstevel@tonic-gate 
33370Sstevel@tonic-gate void
33380Sstevel@tonic-gate clnt_cots_stats_init(zoneid_t zoneid, struct rpc_cots_client **statsp)
33390Sstevel@tonic-gate {
33400Sstevel@tonic-gate 
33410Sstevel@tonic-gate 	*statsp = (struct rpc_cots_client *)rpcstat_zone_init_common(zoneid,
33420Sstevel@tonic-gate 	    "unix", "rpc_cots_client", (const kstat_named_t *)&cots_rcstat_tmpl,
33430Sstevel@tonic-gate 	    sizeof (cots_rcstat_tmpl));
33440Sstevel@tonic-gate }
33450Sstevel@tonic-gate 
33460Sstevel@tonic-gate void
33470Sstevel@tonic-gate clnt_cots_stats_fini(zoneid_t zoneid, struct rpc_cots_client **statsp)
33480Sstevel@tonic-gate {
33490Sstevel@tonic-gate 	rpcstat_zone_fini_common(zoneid, "unix", "rpc_cots_client");
33500Sstevel@tonic-gate 	kmem_free(*statsp, sizeof (cots_rcstat_tmpl));
33510Sstevel@tonic-gate }
33520Sstevel@tonic-gate 
33530Sstevel@tonic-gate void
33540Sstevel@tonic-gate clnt_cots_init(void)
33550Sstevel@tonic-gate {
33560Sstevel@tonic-gate 	mutex_init(&connmgr_lock, NULL, MUTEX_DEFAULT, NULL);
33570Sstevel@tonic-gate 	mutex_init(&clnt_pending_lock, NULL, MUTEX_DEFAULT, NULL);
33580Sstevel@tonic-gate 
33590Sstevel@tonic-gate 	if (clnt_cots_hash_size < DEFAULT_MIN_HASH_SIZE)
33600Sstevel@tonic-gate 		clnt_cots_hash_size = DEFAULT_MIN_HASH_SIZE;
33610Sstevel@tonic-gate 
33620Sstevel@tonic-gate 	cots_call_ht = call_table_init(clnt_cots_hash_size);
33630Sstevel@tonic-gate 	zone_key_create(&zone_cots_key, NULL, NULL, clnt_zone_destroy);
33640Sstevel@tonic-gate }
33650Sstevel@tonic-gate 
33660Sstevel@tonic-gate void
33670Sstevel@tonic-gate clnt_cots_fini(void)
33680Sstevel@tonic-gate {
33690Sstevel@tonic-gate 	(void) zone_key_delete(zone_cots_key);
33700Sstevel@tonic-gate }
33710Sstevel@tonic-gate 
33720Sstevel@tonic-gate /*
33730Sstevel@tonic-gate  * Wait for TPI ack, returns success only if expected ack is received
33740Sstevel@tonic-gate  * within timeout period.
33750Sstevel@tonic-gate  */
33760Sstevel@tonic-gate 
33770Sstevel@tonic-gate static int
33780Sstevel@tonic-gate waitforack(calllist_t *e, t_scalar_t ack_prim, const struct timeval *waitp,
33790Sstevel@tonic-gate     bool_t nosignal)
33800Sstevel@tonic-gate {
33810Sstevel@tonic-gate 	union T_primitives *tpr;
33820Sstevel@tonic-gate 	clock_t timout;
33830Sstevel@tonic-gate 	int cv_stat = 1;
33840Sstevel@tonic-gate 
33850Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&clnt_pending_lock));
33860Sstevel@tonic-gate 	while (e->call_reply == NULL) {
33870Sstevel@tonic-gate 		if (waitp != NULL) {
33880Sstevel@tonic-gate 			timout = waitp->tv_sec * drv_usectohz(MICROSEC) +
33890Sstevel@tonic-gate 			    drv_usectohz(waitp->tv_usec) + lbolt;
33900Sstevel@tonic-gate 			if (nosignal)
33910Sstevel@tonic-gate 				cv_stat = cv_timedwait(&e->call_cv,
33920Sstevel@tonic-gate 				    &clnt_pending_lock, timout);
33930Sstevel@tonic-gate 			else
33940Sstevel@tonic-gate 				cv_stat = cv_timedwait_sig(&e->call_cv,
33950Sstevel@tonic-gate 				    &clnt_pending_lock, timout);
33960Sstevel@tonic-gate 		} else {
33970Sstevel@tonic-gate 			if (nosignal)
33980Sstevel@tonic-gate 				cv_wait(&e->call_cv, &clnt_pending_lock);
33990Sstevel@tonic-gate 			else
34000Sstevel@tonic-gate 				cv_stat = cv_wait_sig(&e->call_cv,
34010Sstevel@tonic-gate 				    &clnt_pending_lock);
34020Sstevel@tonic-gate 		}
34030Sstevel@tonic-gate 		if (cv_stat == -1)
34040Sstevel@tonic-gate 			return (ETIME);
34050Sstevel@tonic-gate 		if (cv_stat == 0)
34060Sstevel@tonic-gate 			return (EINTR);
34070Sstevel@tonic-gate 	}
34080Sstevel@tonic-gate 	tpr = (union T_primitives *)e->call_reply->b_rptr;
34090Sstevel@tonic-gate 	if (tpr->type == ack_prim)
34100Sstevel@tonic-gate 		return (0); /* Success */
34110Sstevel@tonic-gate 
34120Sstevel@tonic-gate 	if (tpr->type == T_ERROR_ACK) {
34130Sstevel@tonic-gate 		if (tpr->error_ack.TLI_error == TSYSERR)
34140Sstevel@tonic-gate 			return (tpr->error_ack.UNIX_error);
34150Sstevel@tonic-gate 		else
34160Sstevel@tonic-gate 			return (t_tlitosyserr(tpr->error_ack.TLI_error));
34170Sstevel@tonic-gate 	}
34180Sstevel@tonic-gate 
34190Sstevel@tonic-gate 	return (EPROTO); /* unknown or unexpected primitive */
34200Sstevel@tonic-gate }
3421