1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
27*0Sstevel@tonic-gate /* All Rights Reserved */
28*0Sstevel@tonic-gate /*
29*0Sstevel@tonic-gate  * Portions of this source code were derived from Berkeley
30*0Sstevel@tonic-gate  * 4.3 BSD under license from the Regents of the University of
31*0Sstevel@tonic-gate  * California.
32*0Sstevel@tonic-gate  */
33*0Sstevel@tonic-gate 
34*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
35*0Sstevel@tonic-gate 
36*0Sstevel@tonic-gate /*
37*0Sstevel@tonic-gate  * clnt_vc.c
38*0Sstevel@tonic-gate  *
39*0Sstevel@tonic-gate  * Implements a connectionful client side RPC.
40*0Sstevel@tonic-gate  *
41*0Sstevel@tonic-gate  * Connectionful RPC supports 'batched calls'.
42*0Sstevel@tonic-gate  * A sequence of calls may be batched-up in a send buffer. The rpc call
43*0Sstevel@tonic-gate  * return immediately to the client even though the call was not necessarily
44*0Sstevel@tonic-gate  * sent. The batching occurs if the results' xdr routine is NULL (0) AND
45*0Sstevel@tonic-gate  * the rpc timeout value is zero (see clnt.h, rpc).
46*0Sstevel@tonic-gate  *
47*0Sstevel@tonic-gate  * Clients should NOT casually batch calls that in fact return results; that
48*0Sstevel@tonic-gate  * is the server side should be aware that a call is batched and not produce
49*0Sstevel@tonic-gate  * any return message. Batched calls that produce many result messages can
50*0Sstevel@tonic-gate  * deadlock (netlock) the client and the server....
51*0Sstevel@tonic-gate  */
52*0Sstevel@tonic-gate 
53*0Sstevel@tonic-gate 
54*0Sstevel@tonic-gate #include "mt.h"
55*0Sstevel@tonic-gate #include "rpc_mt.h"
56*0Sstevel@tonic-gate #include <assert.h>
57*0Sstevel@tonic-gate #include <rpc/rpc.h>
58*0Sstevel@tonic-gate #include <rpc/trace.h>
59*0Sstevel@tonic-gate #include <errno.h>
60*0Sstevel@tonic-gate #include <sys/byteorder.h>
61*0Sstevel@tonic-gate #include <sys/mkdev.h>
62*0Sstevel@tonic-gate #include <sys/poll.h>
63*0Sstevel@tonic-gate #include <syslog.h>
64*0Sstevel@tonic-gate #include <stdlib.h>
65*0Sstevel@tonic-gate #include <unistd.h>
66*0Sstevel@tonic-gate #include <netinet/tcp.h>
67*0Sstevel@tonic-gate 
68*0Sstevel@tonic-gate #define	MCALL_MSG_SIZE 24
69*0Sstevel@tonic-gate #define	SECS_TO_MS 1000
70*0Sstevel@tonic-gate #define	USECS_TO_MS 1/1000
71*0Sstevel@tonic-gate #ifndef MIN
72*0Sstevel@tonic-gate #define	MIN(a, b)	(((a) < (b)) ? (a) : (b))
73*0Sstevel@tonic-gate #endif
74*0Sstevel@tonic-gate 
75*0Sstevel@tonic-gate extern int __rpc_timeval_to_msec();
76*0Sstevel@tonic-gate extern int __rpc_compress_pollfd(int, pollfd_t *, pollfd_t *);
77*0Sstevel@tonic-gate extern bool_t xdr_opaque_auth();
78*0Sstevel@tonic-gate extern bool_t __rpc_gss_wrap();
79*0Sstevel@tonic-gate extern bool_t __rpc_gss_unwrap();
80*0Sstevel@tonic-gate 
81*0Sstevel@tonic-gate CLIENT *_clnt_vc_create_timed(int, struct netbuf *, rpcprog_t,
82*0Sstevel@tonic-gate 		rpcvers_t, uint_t, uint_t, const struct timeval *);
83*0Sstevel@tonic-gate 
84*0Sstevel@tonic-gate static struct clnt_ops	*clnt_vc_ops();
85*0Sstevel@tonic-gate #ifdef __STDC__
86*0Sstevel@tonic-gate static int		read_vc(void *, caddr_t, int);
87*0Sstevel@tonic-gate static int		write_vc(void *, caddr_t, int);
88*0Sstevel@tonic-gate #else
89*0Sstevel@tonic-gate static int		read_vc();
90*0Sstevel@tonic-gate static int		write_vc();
91*0Sstevel@tonic-gate #endif
92*0Sstevel@tonic-gate static int		t_rcvall();
93*0Sstevel@tonic-gate static bool_t		time_not_ok();
94*0Sstevel@tonic-gate static bool_t		set_up_connection();
95*0Sstevel@tonic-gate 
96*0Sstevel@tonic-gate struct ct_data;
97*0Sstevel@tonic-gate static bool_t		set_io_mode(struct ct_data *ct, int ioMode);
98*0Sstevel@tonic-gate 
99*0Sstevel@tonic-gate /*
100*0Sstevel@tonic-gate  * Lock table handle used by various MT sync. routines
101*0Sstevel@tonic-gate  */
102*0Sstevel@tonic-gate static mutex_t	vctbl_lock = DEFAULTMUTEX;
103*0Sstevel@tonic-gate static void	*vctbl = NULL;
104*0Sstevel@tonic-gate 
105*0Sstevel@tonic-gate static const char clnt_vc_errstr[] = "%s : %s";
106*0Sstevel@tonic-gate static const char clnt_vc_str[] = "clnt_vc_create";
107*0Sstevel@tonic-gate static const char clnt_read_vc_str[] = "read_vc";
108*0Sstevel@tonic-gate static const char __no_mem_str[] = "out of memory";
109*0Sstevel@tonic-gate static const char no_fcntl_getfl_str[] = "could not get status flags and modes";
110*0Sstevel@tonic-gate static const char no_nonblock_str[] = "could not set transport blocking mode";
111*0Sstevel@tonic-gate 
112*0Sstevel@tonic-gate /*
113*0Sstevel@tonic-gate  * Private data structure
114*0Sstevel@tonic-gate  */
115*0Sstevel@tonic-gate struct ct_data {
116*0Sstevel@tonic-gate 	int		ct_fd;		/* connection's fd */
117*0Sstevel@tonic-gate 	bool_t		ct_closeit;	/* close it on destroy */
118*0Sstevel@tonic-gate 	int		ct_tsdu;	/* size of tsdu */
119*0Sstevel@tonic-gate 	int		ct_wait;	/* wait interval in milliseconds */
120*0Sstevel@tonic-gate 	bool_t		ct_waitset;	/* wait set by clnt_control? */
121*0Sstevel@tonic-gate 	struct netbuf	ct_addr;	/* remote addr */
122*0Sstevel@tonic-gate 	struct rpc_err	ct_error;
123*0Sstevel@tonic-gate 	char		ct_mcall[MCALL_MSG_SIZE]; /* marshalled callmsg */
124*0Sstevel@tonic-gate 	uint_t		ct_mpos;	/* pos after marshal */
125*0Sstevel@tonic-gate 	XDR		ct_xdrs;	/* XDR stream */
126*0Sstevel@tonic-gate 
127*0Sstevel@tonic-gate 	/* NON STANDARD INFO - 00-08-31 */
128*0Sstevel@tonic-gate 	bool_t		ct_is_oneway; /* True if the current call is oneway. */
129*0Sstevel@tonic-gate 	bool_t		ct_is_blocking;
130*0Sstevel@tonic-gate 	ushort_t	ct_io_mode;
131*0Sstevel@tonic-gate 	ushort_t	ct_blocking_mode;
132*0Sstevel@tonic-gate 	uint_t		ct_bufferSize; /* Total size of the buffer. */
133*0Sstevel@tonic-gate 	uint_t		ct_bufferPendingSize; /* Size of unsent data. */
134*0Sstevel@tonic-gate 	char 		*ct_buffer; /* Pointer to the buffer. */
135*0Sstevel@tonic-gate 	char 		*ct_bufferWritePtr; /* Ptr to the first free byte. */
136*0Sstevel@tonic-gate 	char 		*ct_bufferReadPtr; /* Ptr to the first byte of data. */
137*0Sstevel@tonic-gate };
138*0Sstevel@tonic-gate 
139*0Sstevel@tonic-gate struct nb_reg_node {
140*0Sstevel@tonic-gate 	struct nb_reg_node *next;
141*0Sstevel@tonic-gate 	struct ct_data *ct;
142*0Sstevel@tonic-gate };
143*0Sstevel@tonic-gate 
144*0Sstevel@tonic-gate static struct nb_reg_node *nb_first = (struct nb_reg_node *)&nb_first;
145*0Sstevel@tonic-gate static struct nb_reg_node *nb_free  = (struct nb_reg_node *)&nb_free;
146*0Sstevel@tonic-gate 
147*0Sstevel@tonic-gate static bool_t exit_handler_set = FALSE;
148*0Sstevel@tonic-gate 
149*0Sstevel@tonic-gate static mutex_t nb_list_mutex = DEFAULTMUTEX;
150*0Sstevel@tonic-gate 
151*0Sstevel@tonic-gate 
152*0Sstevel@tonic-gate /* Define some macros to manage the linked list. */
153*0Sstevel@tonic-gate #define	LIST_ISEMPTY(l) (l == (struct nb_reg_node *)&l)
154*0Sstevel@tonic-gate #define	LIST_CLR(l) (l = (struct nb_reg_node *)&l)
155*0Sstevel@tonic-gate #define	LIST_ADD(l, node) (node->next = l->next, l = node)
156*0Sstevel@tonic-gate #define	LIST_EXTRACT(l, node) (node = l, l = l->next)
157*0Sstevel@tonic-gate #define	LIST_FOR_EACH(l, node) \
158*0Sstevel@tonic-gate 	for (node = l; node != (struct nb_reg_node *)&l; node = node->next)
159*0Sstevel@tonic-gate 
160*0Sstevel@tonic-gate 
161*0Sstevel@tonic-gate /* Default size of the IO buffer used in non blocking mode */
162*0Sstevel@tonic-gate #define	DEFAULT_PENDING_ZONE_MAX_SIZE (16*1024)
163*0Sstevel@tonic-gate 
164*0Sstevel@tonic-gate static int nb_send(struct ct_data *ct, void *buff,
165*0Sstevel@tonic-gate     unsigned  int  nbytes);
166*0Sstevel@tonic-gate 
167*0Sstevel@tonic-gate static int do_flush(struct ct_data *ct, uint_t flush_mode);
168*0Sstevel@tonic-gate 
169*0Sstevel@tonic-gate static bool_t set_flush_mode(struct ct_data *ct,
170*0Sstevel@tonic-gate     int P_mode);
171*0Sstevel@tonic-gate 
172*0Sstevel@tonic-gate static bool_t set_blocking_connection(struct ct_data *ct,
173*0Sstevel@tonic-gate     bool_t blocking);
174*0Sstevel@tonic-gate 
175*0Sstevel@tonic-gate static int register_nb(struct ct_data *ct);
176*0Sstevel@tonic-gate static int unregister_nb(struct ct_data *ct);
177*0Sstevel@tonic-gate 
178*0Sstevel@tonic-gate 
179*0Sstevel@tonic-gate /*
180*0Sstevel@tonic-gate  * Change the mode of the underlying fd.
181*0Sstevel@tonic-gate  */
182*0Sstevel@tonic-gate static bool_t
183*0Sstevel@tonic-gate set_blocking_connection(struct ct_data *ct, bool_t blocking)
184*0Sstevel@tonic-gate {
185*0Sstevel@tonic-gate 	int flag;
186*0Sstevel@tonic-gate 
187*0Sstevel@tonic-gate 	/*
188*0Sstevel@tonic-gate 	 * If the underlying fd is already in the required mode,
189*0Sstevel@tonic-gate 	 * avoid the syscall.
190*0Sstevel@tonic-gate 	 */
191*0Sstevel@tonic-gate 	if (ct->ct_is_blocking == blocking)
192*0Sstevel@tonic-gate 		return (TRUE);
193*0Sstevel@tonic-gate 
194*0Sstevel@tonic-gate 	if ((flag = fcntl(ct->ct_fd, F_GETFL, 0)) < 0) {
195*0Sstevel@tonic-gate 		(void) syslog(LOG_ERR, "set_blocking_connection : %s",
196*0Sstevel@tonic-gate 		    no_fcntl_getfl_str);
197*0Sstevel@tonic-gate 		return (FALSE);
198*0Sstevel@tonic-gate 	}
199*0Sstevel@tonic-gate 
200*0Sstevel@tonic-gate 	flag = blocking? flag&~O_NONBLOCK : flag|O_NONBLOCK;
201*0Sstevel@tonic-gate 	if (fcntl(ct->ct_fd, F_SETFL, flag) != 0) {
202*0Sstevel@tonic-gate 		(void) syslog(LOG_ERR, "set_blocking_connection : %s",
203*0Sstevel@tonic-gate 		    no_nonblock_str);
204*0Sstevel@tonic-gate 		return (FALSE);
205*0Sstevel@tonic-gate 	}
206*0Sstevel@tonic-gate 	ct->ct_is_blocking = blocking;
207*0Sstevel@tonic-gate 	return (TRUE);
208*0Sstevel@tonic-gate }
209*0Sstevel@tonic-gate 
210*0Sstevel@tonic-gate /*
211*0Sstevel@tonic-gate  * Create a client handle for a connection.
212*0Sstevel@tonic-gate  * Default options are set, which the user can change using clnt_control()'s.
213*0Sstevel@tonic-gate  * The rpc/vc package does buffering similar to stdio, so the client
214*0Sstevel@tonic-gate  * must pick send and receive buffer sizes, 0 => use the default.
215*0Sstevel@tonic-gate  * NB: fd is copied into a private area.
216*0Sstevel@tonic-gate  * NB: The rpch->cl_auth is set null authentication. Caller may wish to
217*0Sstevel@tonic-gate  * set this something more useful.
218*0Sstevel@tonic-gate  *
219*0Sstevel@tonic-gate  * fd should be open and bound.
220*0Sstevel@tonic-gate  */
221*0Sstevel@tonic-gate CLIENT *
222*0Sstevel@tonic-gate clnt_vc_create(fd, svcaddr, prog, vers, sendsz, recvsz)
223*0Sstevel@tonic-gate 	int fd;				/* open file descriptor */
224*0Sstevel@tonic-gate 	struct netbuf *svcaddr;		/* servers address */
225*0Sstevel@tonic-gate 	rpcprog_t prog;			/* program number */
226*0Sstevel@tonic-gate 	rpcvers_t vers;			/* version number */
227*0Sstevel@tonic-gate 	uint_t sendsz;			/* buffer recv size */
228*0Sstevel@tonic-gate 	uint_t recvsz;			/* buffer send size */
229*0Sstevel@tonic-gate {
230*0Sstevel@tonic-gate 	return (_clnt_vc_create_timed(fd, svcaddr, prog, vers, sendsz,
231*0Sstevel@tonic-gate 			recvsz, NULL));
232*0Sstevel@tonic-gate }
233*0Sstevel@tonic-gate 
234*0Sstevel@tonic-gate /*
235*0Sstevel@tonic-gate  * This has the same definition as clnt_vc_create(), except it
236*0Sstevel@tonic-gate  * takes an additional parameter - a pointer to a timeval structure.
237*0Sstevel@tonic-gate  *
238*0Sstevel@tonic-gate  * Not a public interface. This is for clnt_create_timed,
239*0Sstevel@tonic-gate  * clnt_create_vers_timed, clnt_tp_create_timed to pass down the timeout
240*0Sstevel@tonic-gate  * value to control a tcp connection attempt.
241*0Sstevel@tonic-gate  * (for bug 4049792: clnt_create_timed does not time out)
242*0Sstevel@tonic-gate  *
243*0Sstevel@tonic-gate  * If tp is NULL, use default timeout to set up the connection.
244*0Sstevel@tonic-gate  */
245*0Sstevel@tonic-gate CLIENT *
246*0Sstevel@tonic-gate _clnt_vc_create_timed(fd, svcaddr, prog, vers, sendsz, recvsz, tp)
247*0Sstevel@tonic-gate 	int fd;				/* open file descriptor */
248*0Sstevel@tonic-gate 	struct netbuf *svcaddr;		/* servers address */
249*0Sstevel@tonic-gate 	rpcprog_t prog;			/* program number */
250*0Sstevel@tonic-gate 	rpcvers_t vers;			/* version number */
251*0Sstevel@tonic-gate 	uint_t sendsz;			/* buffer recv size */
252*0Sstevel@tonic-gate 	uint_t recvsz;			/* buffer send size */
253*0Sstevel@tonic-gate 	const struct timeval *tp;	/* connection timeout */
254*0Sstevel@tonic-gate {
255*0Sstevel@tonic-gate 	CLIENT *cl;			/* client handle */
256*0Sstevel@tonic-gate 	struct ct_data *ct;		/* private data */
257*0Sstevel@tonic-gate 	struct timeval now;
258*0Sstevel@tonic-gate 	struct rpc_msg call_msg;
259*0Sstevel@tonic-gate 	struct t_info tinfo;
260*0Sstevel@tonic-gate 	int flag;
261*0Sstevel@tonic-gate 
262*0Sstevel@tonic-gate 	trace5(TR_clnt_vc_create, 0, prog, vers, sendsz, recvsz);
263*0Sstevel@tonic-gate 
264*0Sstevel@tonic-gate 	cl = (CLIENT *)mem_alloc(sizeof (*cl));
265*0Sstevel@tonic-gate 	ct = (struct ct_data *)mem_alloc(sizeof (*ct));
266*0Sstevel@tonic-gate 	if ((cl == (CLIENT *)NULL) || (ct == (struct ct_data *)NULL)) {
267*0Sstevel@tonic-gate 		(void) syslog(LOG_ERR, clnt_vc_errstr,
268*0Sstevel@tonic-gate 				clnt_vc_str, __no_mem_str);
269*0Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_SYSTEMERROR;
270*0Sstevel@tonic-gate 		rpc_createerr.cf_error.re_errno = errno;
271*0Sstevel@tonic-gate 		rpc_createerr.cf_error.re_terrno = 0;
272*0Sstevel@tonic-gate 		goto err;
273*0Sstevel@tonic-gate 	}
274*0Sstevel@tonic-gate 	ct->ct_addr.buf = NULL;
275*0Sstevel@tonic-gate 
276*0Sstevel@tonic-gate 	sig_mutex_lock(&vctbl_lock);
277*0Sstevel@tonic-gate 
278*0Sstevel@tonic-gate 	if ((vctbl == NULL) && ((vctbl = rpc_fd_init()) == NULL)) {
279*0Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_SYSTEMERROR;
280*0Sstevel@tonic-gate 		rpc_createerr.cf_error.re_errno = errno;
281*0Sstevel@tonic-gate 		rpc_createerr.cf_error.re_terrno = 0;
282*0Sstevel@tonic-gate 		sig_mutex_unlock(&vctbl_lock);
283*0Sstevel@tonic-gate 		goto err;
284*0Sstevel@tonic-gate 	}
285*0Sstevel@tonic-gate 
286*0Sstevel@tonic-gate 	ct->ct_io_mode = RPC_CL_BLOCKING;
287*0Sstevel@tonic-gate 	ct->ct_blocking_mode = RPC_CL_BLOCKING_FLUSH;
288*0Sstevel@tonic-gate 
289*0Sstevel@tonic-gate 	ct->ct_buffer = NULL;	/* We allocate the buffer when needed. */
290*0Sstevel@tonic-gate 	ct->ct_bufferSize = DEFAULT_PENDING_ZONE_MAX_SIZE;
291*0Sstevel@tonic-gate 	ct->ct_bufferPendingSize = 0;
292*0Sstevel@tonic-gate 	ct->ct_bufferWritePtr = NULL;
293*0Sstevel@tonic-gate 	ct->ct_bufferReadPtr = NULL;
294*0Sstevel@tonic-gate 
295*0Sstevel@tonic-gate 	/* Check the current state of the fd. */
296*0Sstevel@tonic-gate 	if ((flag = fcntl(fd, F_GETFL, 0)) < 0) {
297*0Sstevel@tonic-gate 		(void) syslog(LOG_ERR, "_clnt_vc_create_timed : %s",
298*0Sstevel@tonic-gate 		    no_fcntl_getfl_str);
299*0Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_SYSTEMERROR;
300*0Sstevel@tonic-gate 		rpc_createerr.cf_error.re_terrno = errno;
301*0Sstevel@tonic-gate 		rpc_createerr.cf_error.re_errno = 0;
302*0Sstevel@tonic-gate 		sig_mutex_unlock(&vctbl_lock);
303*0Sstevel@tonic-gate 		goto err;
304*0Sstevel@tonic-gate 	}
305*0Sstevel@tonic-gate 	ct->ct_is_blocking = flag&O_NONBLOCK? FALSE:TRUE;
306*0Sstevel@tonic-gate 
307*0Sstevel@tonic-gate 	if (set_up_connection(fd, svcaddr, ct, tp) == FALSE) {
308*0Sstevel@tonic-gate 		sig_mutex_unlock(&vctbl_lock);
309*0Sstevel@tonic-gate 		goto err;
310*0Sstevel@tonic-gate 	}
311*0Sstevel@tonic-gate 	sig_mutex_unlock(&vctbl_lock);
312*0Sstevel@tonic-gate 
313*0Sstevel@tonic-gate 	/*
314*0Sstevel@tonic-gate 	 * Set up other members of private data struct
315*0Sstevel@tonic-gate 	 */
316*0Sstevel@tonic-gate 	ct->ct_fd = fd;
317*0Sstevel@tonic-gate 	/*
318*0Sstevel@tonic-gate 	 * The actual value will be set by clnt_call or clnt_control
319*0Sstevel@tonic-gate 	 */
320*0Sstevel@tonic-gate 	ct->ct_wait = 30000;
321*0Sstevel@tonic-gate 	ct->ct_waitset = FALSE;
322*0Sstevel@tonic-gate 	/*
323*0Sstevel@tonic-gate 	 * By default, closeit is always FALSE. It is users responsibility
324*0Sstevel@tonic-gate 	 * to do a t_close on it, else the user may use clnt_control
325*0Sstevel@tonic-gate 	 * to let clnt_destroy do it for him/her.
326*0Sstevel@tonic-gate 	 */
327*0Sstevel@tonic-gate 	ct->ct_closeit = FALSE;
328*0Sstevel@tonic-gate 
329*0Sstevel@tonic-gate 	/*
330*0Sstevel@tonic-gate 	 * Initialize call message
331*0Sstevel@tonic-gate 	 */
332*0Sstevel@tonic-gate 	(void) gettimeofday(&now, (struct timezone *)0);
333*0Sstevel@tonic-gate 	call_msg.rm_xid = getpid() ^ now.tv_sec ^ now.tv_usec;
334*0Sstevel@tonic-gate 	call_msg.rm_call.cb_prog = prog;
335*0Sstevel@tonic-gate 	call_msg.rm_call.cb_vers = vers;
336*0Sstevel@tonic-gate 
337*0Sstevel@tonic-gate 	/*
338*0Sstevel@tonic-gate 	 * pre-serialize the static part of the call msg and stash it away
339*0Sstevel@tonic-gate 	 */
340*0Sstevel@tonic-gate 	xdrmem_create(&(ct->ct_xdrs), ct->ct_mcall, MCALL_MSG_SIZE, XDR_ENCODE);
341*0Sstevel@tonic-gate 	if (! xdr_callhdr(&(ct->ct_xdrs), &call_msg)) {
342*0Sstevel@tonic-gate 		goto err;
343*0Sstevel@tonic-gate 	}
344*0Sstevel@tonic-gate 	ct->ct_mpos = XDR_GETPOS(&(ct->ct_xdrs));
345*0Sstevel@tonic-gate 	XDR_DESTROY(&(ct->ct_xdrs));
346*0Sstevel@tonic-gate 
347*0Sstevel@tonic-gate 	if (t_getinfo(fd, &tinfo) == -1) {
348*0Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_TLIERROR;
349*0Sstevel@tonic-gate 		rpc_createerr.cf_error.re_terrno = t_errno;
350*0Sstevel@tonic-gate 		rpc_createerr.cf_error.re_errno = 0;
351*0Sstevel@tonic-gate 		goto err;
352*0Sstevel@tonic-gate 	}
353*0Sstevel@tonic-gate 	/*
354*0Sstevel@tonic-gate 	 * Find the receive and the send size
355*0Sstevel@tonic-gate 	 */
356*0Sstevel@tonic-gate 	sendsz = __rpc_get_t_size((int)sendsz, tinfo.tsdu);
357*0Sstevel@tonic-gate 	recvsz = __rpc_get_t_size((int)recvsz, tinfo.tsdu);
358*0Sstevel@tonic-gate 	if ((sendsz == 0) || (recvsz == 0)) {
359*0Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_TLIERROR;
360*0Sstevel@tonic-gate 		rpc_createerr.cf_error.re_terrno = 0;
361*0Sstevel@tonic-gate 		rpc_createerr.cf_error.re_errno = 0;
362*0Sstevel@tonic-gate 		goto err;
363*0Sstevel@tonic-gate 	}
364*0Sstevel@tonic-gate 	ct->ct_tsdu = tinfo.tsdu;
365*0Sstevel@tonic-gate 	/*
366*0Sstevel@tonic-gate 	 * Create a client handle which uses xdrrec for serialization
367*0Sstevel@tonic-gate 	 * and authnone for authentication.
368*0Sstevel@tonic-gate 	 */
369*0Sstevel@tonic-gate 	ct->ct_xdrs.x_ops = NULL;
370*0Sstevel@tonic-gate 	xdrrec_create(&(ct->ct_xdrs), sendsz, recvsz, (caddr_t)ct,
371*0Sstevel@tonic-gate 			read_vc, write_vc);
372*0Sstevel@tonic-gate 	if (ct->ct_xdrs.x_ops == NULL) {
373*0Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_SYSTEMERROR;
374*0Sstevel@tonic-gate 		rpc_createerr.cf_error.re_terrno = 0;
375*0Sstevel@tonic-gate 		rpc_createerr.cf_error.re_errno = ENOMEM;
376*0Sstevel@tonic-gate 		goto err;
377*0Sstevel@tonic-gate 	}
378*0Sstevel@tonic-gate 	cl->cl_ops = clnt_vc_ops();
379*0Sstevel@tonic-gate 	cl->cl_private = (caddr_t)ct;
380*0Sstevel@tonic-gate 	cl->cl_auth = authnone_create();
381*0Sstevel@tonic-gate 	cl->cl_tp = (char *)NULL;
382*0Sstevel@tonic-gate 	cl->cl_netid = (char *)NULL;
383*0Sstevel@tonic-gate 	trace3(TR_clnt_vc_create, 1, prog, vers);
384*0Sstevel@tonic-gate 	return (cl);
385*0Sstevel@tonic-gate 
386*0Sstevel@tonic-gate err:
387*0Sstevel@tonic-gate 	if (cl) {
388*0Sstevel@tonic-gate 		if (ct) {
389*0Sstevel@tonic-gate 			if (ct->ct_addr.len)
390*0Sstevel@tonic-gate 				mem_free(ct->ct_addr.buf, ct->ct_addr.len);
391*0Sstevel@tonic-gate 			mem_free((caddr_t)ct, sizeof (struct ct_data));
392*0Sstevel@tonic-gate 		}
393*0Sstevel@tonic-gate 		mem_free((caddr_t)cl, sizeof (CLIENT));
394*0Sstevel@tonic-gate 	}
395*0Sstevel@tonic-gate 	trace3(TR_clnt_vc_create, 1, prog, vers);
396*0Sstevel@tonic-gate 	return ((CLIENT *)NULL);
397*0Sstevel@tonic-gate }
398*0Sstevel@tonic-gate 
399*0Sstevel@tonic-gate #define	TCPOPT_BUFSIZE 128
400*0Sstevel@tonic-gate 
401*0Sstevel@tonic-gate /*
402*0Sstevel@tonic-gate  * Set tcp connection timeout value.
403*0Sstevel@tonic-gate  * Retun 0 for success, -1 for failure.
404*0Sstevel@tonic-gate  */
405*0Sstevel@tonic-gate static int
406*0Sstevel@tonic-gate _set_tcp_conntime(int fd, int optval)
407*0Sstevel@tonic-gate {
408*0Sstevel@tonic-gate 	struct t_optmgmt req, res;
409*0Sstevel@tonic-gate 	struct opthdr *opt;
410*0Sstevel@tonic-gate 	int *ip;
411*0Sstevel@tonic-gate 	char buf[TCPOPT_BUFSIZE];
412*0Sstevel@tonic-gate 
413*0Sstevel@tonic-gate 	opt = (struct opthdr *)buf;
414*0Sstevel@tonic-gate 	opt->level =  IPPROTO_TCP;
415*0Sstevel@tonic-gate 	opt->name = TCP_CONN_ABORT_THRESHOLD;
416*0Sstevel@tonic-gate 	opt->len = sizeof (int);
417*0Sstevel@tonic-gate 
418*0Sstevel@tonic-gate 	req.flags = T_NEGOTIATE;
419*0Sstevel@tonic-gate 	req.opt.len = sizeof (struct opthdr) + opt->len;
420*0Sstevel@tonic-gate 	req.opt.buf = (char *)opt;
421*0Sstevel@tonic-gate 	ip = (int *)((char *)buf + sizeof (struct opthdr));
422*0Sstevel@tonic-gate 	*ip = optval;
423*0Sstevel@tonic-gate 
424*0Sstevel@tonic-gate 	res.flags = 0;
425*0Sstevel@tonic-gate 	res.opt.buf = (char *)buf;
426*0Sstevel@tonic-gate 	res.opt.maxlen = sizeof (buf);
427*0Sstevel@tonic-gate 	if (t_optmgmt(fd, &req, &res) < 0 || res.flags != T_SUCCESS) {
428*0Sstevel@tonic-gate 		return (-1);
429*0Sstevel@tonic-gate 	}
430*0Sstevel@tonic-gate 	return (0);
431*0Sstevel@tonic-gate }
432*0Sstevel@tonic-gate 
433*0Sstevel@tonic-gate /*
434*0Sstevel@tonic-gate  * Get current tcp connection timeout value.
435*0Sstevel@tonic-gate  * Retun 0 for success, -1 for failure.
436*0Sstevel@tonic-gate  */
437*0Sstevel@tonic-gate static int
438*0Sstevel@tonic-gate _get_tcp_conntime(int fd)
439*0Sstevel@tonic-gate {
440*0Sstevel@tonic-gate 	struct t_optmgmt req, res;
441*0Sstevel@tonic-gate 	struct opthdr *opt;
442*0Sstevel@tonic-gate 	int *ip, retval;
443*0Sstevel@tonic-gate 	char buf[TCPOPT_BUFSIZE];
444*0Sstevel@tonic-gate 
445*0Sstevel@tonic-gate 	opt = (struct opthdr *)buf;
446*0Sstevel@tonic-gate 	opt->level =  IPPROTO_TCP;
447*0Sstevel@tonic-gate 	opt->name = TCP_CONN_ABORT_THRESHOLD;
448*0Sstevel@tonic-gate 	opt->len = sizeof (int);
449*0Sstevel@tonic-gate 
450*0Sstevel@tonic-gate 	req.flags = T_CURRENT;
451*0Sstevel@tonic-gate 	req.opt.len = sizeof (struct opthdr) + opt->len;
452*0Sstevel@tonic-gate 	req.opt.buf = (char *)opt;
453*0Sstevel@tonic-gate 	ip = (int *)((char *)buf + sizeof (struct opthdr));
454*0Sstevel@tonic-gate 	*ip = 0;
455*0Sstevel@tonic-gate 
456*0Sstevel@tonic-gate 	res.flags = 0;
457*0Sstevel@tonic-gate 	res.opt.buf = (char *)buf;
458*0Sstevel@tonic-gate 	res.opt.maxlen = sizeof (buf);
459*0Sstevel@tonic-gate 	if (t_optmgmt(fd, &req, &res) < 0 || res.flags != T_SUCCESS) {
460*0Sstevel@tonic-gate 		return (-1);
461*0Sstevel@tonic-gate 	}
462*0Sstevel@tonic-gate 
463*0Sstevel@tonic-gate 	ip = (int *)((char *)buf + sizeof (struct opthdr));
464*0Sstevel@tonic-gate 	retval = *ip;
465*0Sstevel@tonic-gate 	return (retval);
466*0Sstevel@tonic-gate }
467*0Sstevel@tonic-gate 
468*0Sstevel@tonic-gate static bool_t
469*0Sstevel@tonic-gate set_up_connection(fd, svcaddr, ct, tp)
470*0Sstevel@tonic-gate 	int fd;
471*0Sstevel@tonic-gate 	struct netbuf *svcaddr;		/* servers address */
472*0Sstevel@tonic-gate 	struct ct_data *ct;
473*0Sstevel@tonic-gate 	struct timeval *tp;
474*0Sstevel@tonic-gate {
475*0Sstevel@tonic-gate 	int state;
476*0Sstevel@tonic-gate 	struct t_call sndcallstr, *rcvcall;
477*0Sstevel@tonic-gate 	int nconnect;
478*0Sstevel@tonic-gate 	bool_t connected, do_rcv_connect;
479*0Sstevel@tonic-gate 	int curr_time = 0;
480*0Sstevel@tonic-gate 
481*0Sstevel@tonic-gate 	ct->ct_addr.len = 0;
482*0Sstevel@tonic-gate 	state = t_getstate(fd);
483*0Sstevel@tonic-gate 	if (state == -1) {
484*0Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_TLIERROR;
485*0Sstevel@tonic-gate 		rpc_createerr.cf_error.re_errno = 0;
486*0Sstevel@tonic-gate 		rpc_createerr.cf_error.re_terrno = t_errno;
487*0Sstevel@tonic-gate 		return (FALSE);
488*0Sstevel@tonic-gate 	}
489*0Sstevel@tonic-gate 
490*0Sstevel@tonic-gate #ifdef DEBUG
491*0Sstevel@tonic-gate 	fprintf(stderr, "set_up_connection: state = %d\n", state);
492*0Sstevel@tonic-gate #endif
493*0Sstevel@tonic-gate 	switch (state) {
494*0Sstevel@tonic-gate 	case T_IDLE:
495*0Sstevel@tonic-gate 		if (svcaddr == (struct netbuf *)NULL) {
496*0Sstevel@tonic-gate 			rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
497*0Sstevel@tonic-gate 			return (FALSE);
498*0Sstevel@tonic-gate 		}
499*0Sstevel@tonic-gate 		/*
500*0Sstevel@tonic-gate 		 * Connect only if state is IDLE and svcaddr known
501*0Sstevel@tonic-gate 		 */
502*0Sstevel@tonic-gate /* LINTED pointer alignment */
503*0Sstevel@tonic-gate 		rcvcall = (struct t_call *)t_alloc(fd, T_CALL, T_OPT|T_ADDR);
504*0Sstevel@tonic-gate 		if (rcvcall == NULL) {
505*0Sstevel@tonic-gate 			rpc_createerr.cf_stat = RPC_TLIERROR;
506*0Sstevel@tonic-gate 			rpc_createerr.cf_error.re_terrno = t_errno;
507*0Sstevel@tonic-gate 			rpc_createerr.cf_error.re_errno = errno;
508*0Sstevel@tonic-gate 			return (FALSE);
509*0Sstevel@tonic-gate 		}
510*0Sstevel@tonic-gate 		rcvcall->udata.maxlen = 0;
511*0Sstevel@tonic-gate 		sndcallstr.addr = *svcaddr;
512*0Sstevel@tonic-gate 		sndcallstr.opt.len = 0;
513*0Sstevel@tonic-gate 		sndcallstr.udata.len = 0;
514*0Sstevel@tonic-gate 		/*
515*0Sstevel@tonic-gate 		 * Even NULL could have sufficed for rcvcall, because
516*0Sstevel@tonic-gate 		 * the address returned is same for all cases except
517*0Sstevel@tonic-gate 		 * for the gateway case, and hence required.
518*0Sstevel@tonic-gate 		 */
519*0Sstevel@tonic-gate 		connected = FALSE;
520*0Sstevel@tonic-gate 		do_rcv_connect = FALSE;
521*0Sstevel@tonic-gate 
522*0Sstevel@tonic-gate 		/*
523*0Sstevel@tonic-gate 		 * If there is a timeout value specified, we will try to
524*0Sstevel@tonic-gate 		 * reset the tcp connection timeout. If the transport does
525*0Sstevel@tonic-gate 		 * not support the TCP_CONN_ABORT_THRESHOLD option or fails
526*0Sstevel@tonic-gate 		 * for other reason, default timeout will be used.
527*0Sstevel@tonic-gate 		 */
528*0Sstevel@tonic-gate 		if (tp != NULL) {
529*0Sstevel@tonic-gate 		    int ms;
530*0Sstevel@tonic-gate 
531*0Sstevel@tonic-gate 		    /* TCP_CONN_ABORT_THRESHOLD takes int value in millisecs */
532*0Sstevel@tonic-gate 		    ms = tp->tv_sec * SECS_TO_MS + tp->tv_usec * USECS_TO_MS;
533*0Sstevel@tonic-gate 		    if (((curr_time = _get_tcp_conntime(fd)) != -1) &&
534*0Sstevel@tonic-gate 			(_set_tcp_conntime(fd, ms) == 0)) {
535*0Sstevel@tonic-gate #ifdef DEBUG
536*0Sstevel@tonic-gate 			fprintf(stderr, "set_up_connection: set tcp ");
537*0Sstevel@tonic-gate 			fprintf(stderr, "connection timeout to %d ms\n", ms);
538*0Sstevel@tonic-gate #endif
539*0Sstevel@tonic-gate 		    }
540*0Sstevel@tonic-gate 		}
541*0Sstevel@tonic-gate 
542*0Sstevel@tonic-gate 		for (nconnect = 0; nconnect < 3; nconnect++) {
543*0Sstevel@tonic-gate 			if (t_connect(fd, &sndcallstr, rcvcall) != -1) {
544*0Sstevel@tonic-gate 				connected = TRUE;
545*0Sstevel@tonic-gate 				break;
546*0Sstevel@tonic-gate 			}
547*0Sstevel@tonic-gate 			if (!(t_errno == TSYSERR && errno == EINTR)) {
548*0Sstevel@tonic-gate 				break;
549*0Sstevel@tonic-gate 			}
550*0Sstevel@tonic-gate 			if ((state = t_getstate(fd)) == T_OUTCON) {
551*0Sstevel@tonic-gate 				do_rcv_connect = TRUE;
552*0Sstevel@tonic-gate 				break;
553*0Sstevel@tonic-gate 			}
554*0Sstevel@tonic-gate 			if (state != T_IDLE) {
555*0Sstevel@tonic-gate 				break;
556*0Sstevel@tonic-gate 			}
557*0Sstevel@tonic-gate 		}
558*0Sstevel@tonic-gate 		if (do_rcv_connect) {
559*0Sstevel@tonic-gate 			do {
560*0Sstevel@tonic-gate 				if (t_rcvconnect(fd, rcvcall) != -1) {
561*0Sstevel@tonic-gate 					connected = TRUE;
562*0Sstevel@tonic-gate 					break;
563*0Sstevel@tonic-gate 				}
564*0Sstevel@tonic-gate 			} while (t_errno == TSYSERR && errno == EINTR);
565*0Sstevel@tonic-gate 		}
566*0Sstevel@tonic-gate 
567*0Sstevel@tonic-gate 		/*
568*0Sstevel@tonic-gate 		 * Set the connection timeout back to its old value.
569*0Sstevel@tonic-gate 		 */
570*0Sstevel@tonic-gate 		if (curr_time) {
571*0Sstevel@tonic-gate 			_set_tcp_conntime(fd, curr_time);
572*0Sstevel@tonic-gate 		}
573*0Sstevel@tonic-gate 
574*0Sstevel@tonic-gate 		if (!connected) {
575*0Sstevel@tonic-gate 			rpc_createerr.cf_stat = RPC_TLIERROR;
576*0Sstevel@tonic-gate 			rpc_createerr.cf_error.re_terrno = t_errno;
577*0Sstevel@tonic-gate 			rpc_createerr.cf_error.re_errno = errno;
578*0Sstevel@tonic-gate 			(void) t_free((char *)rcvcall, T_CALL);
579*0Sstevel@tonic-gate #ifdef DEBUG
580*0Sstevel@tonic-gate 			fprintf(stderr, "clnt_vc: t_connect error %d\n",
581*0Sstevel@tonic-gate 				rpc_createerr.cf_error.re_terrno);
582*0Sstevel@tonic-gate #endif
583*0Sstevel@tonic-gate 			return (FALSE);
584*0Sstevel@tonic-gate 		}
585*0Sstevel@tonic-gate 
586*0Sstevel@tonic-gate 		/* Free old area if allocated */
587*0Sstevel@tonic-gate 		if (ct->ct_addr.buf)
588*0Sstevel@tonic-gate 			free(ct->ct_addr.buf);
589*0Sstevel@tonic-gate 		ct->ct_addr = rcvcall->addr;	/* To get the new address */
590*0Sstevel@tonic-gate 		/* So that address buf does not get freed */
591*0Sstevel@tonic-gate 		rcvcall->addr.buf = NULL;
592*0Sstevel@tonic-gate 		(void) t_free((char *)rcvcall, T_CALL);
593*0Sstevel@tonic-gate 		break;
594*0Sstevel@tonic-gate 	case T_DATAXFER:
595*0Sstevel@tonic-gate 	case T_OUTCON:
596*0Sstevel@tonic-gate 		if (svcaddr == (struct netbuf *)NULL) {
597*0Sstevel@tonic-gate 			/*
598*0Sstevel@tonic-gate 			 * svcaddr could also be NULL in cases where the
599*0Sstevel@tonic-gate 			 * client is already bound and connected.
600*0Sstevel@tonic-gate 			 */
601*0Sstevel@tonic-gate 			ct->ct_addr.len = 0;
602*0Sstevel@tonic-gate 		} else {
603*0Sstevel@tonic-gate 			ct->ct_addr.buf = malloc(svcaddr->len);
604*0Sstevel@tonic-gate 			if (ct->ct_addr.buf == (char *)NULL) {
605*0Sstevel@tonic-gate 				(void) syslog(LOG_ERR, clnt_vc_errstr,
606*0Sstevel@tonic-gate 					clnt_vc_str, __no_mem_str);
607*0Sstevel@tonic-gate 				rpc_createerr.cf_stat = RPC_SYSTEMERROR;
608*0Sstevel@tonic-gate 				rpc_createerr.cf_error.re_errno = errno;
609*0Sstevel@tonic-gate 				rpc_createerr.cf_error.re_terrno = 0;
610*0Sstevel@tonic-gate 				return (FALSE);
611*0Sstevel@tonic-gate 			}
612*0Sstevel@tonic-gate 			(void) memcpy(ct->ct_addr.buf, svcaddr->buf,
613*0Sstevel@tonic-gate 					(int)svcaddr->len);
614*0Sstevel@tonic-gate 			ct->ct_addr.len = ct->ct_addr.maxlen = svcaddr->len;
615*0Sstevel@tonic-gate 		}
616*0Sstevel@tonic-gate 		break;
617*0Sstevel@tonic-gate 	default:
618*0Sstevel@tonic-gate 		rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
619*0Sstevel@tonic-gate 		return (FALSE);
620*0Sstevel@tonic-gate 	}
621*0Sstevel@tonic-gate 	return (TRUE);
622*0Sstevel@tonic-gate }
623*0Sstevel@tonic-gate 
624*0Sstevel@tonic-gate static enum clnt_stat
625*0Sstevel@tonic-gate clnt_vc_call(cl, proc, xdr_args, args_ptr, xdr_results, results_ptr, timeout)
626*0Sstevel@tonic-gate 	CLIENT *cl;
627*0Sstevel@tonic-gate 	rpcproc_t proc;
628*0Sstevel@tonic-gate 	xdrproc_t xdr_args;
629*0Sstevel@tonic-gate 	caddr_t args_ptr;
630*0Sstevel@tonic-gate 	xdrproc_t xdr_results;
631*0Sstevel@tonic-gate 	caddr_t results_ptr;
632*0Sstevel@tonic-gate 	struct timeval timeout;
633*0Sstevel@tonic-gate {
634*0Sstevel@tonic-gate /* LINTED pointer alignment */
635*0Sstevel@tonic-gate 	struct ct_data *ct = (struct ct_data *)cl->cl_private;
636*0Sstevel@tonic-gate 	XDR *xdrs = &(ct->ct_xdrs);
637*0Sstevel@tonic-gate 	struct rpc_msg reply_msg;
638*0Sstevel@tonic-gate 	uint32_t x_id;
639*0Sstevel@tonic-gate /* LINTED pointer alignment */
640*0Sstevel@tonic-gate 	uint32_t *msg_x_id = (uint32_t *)(ct->ct_mcall);	/* yuk */
641*0Sstevel@tonic-gate 	bool_t shipnow;
642*0Sstevel@tonic-gate 	int refreshes = 2;
643*0Sstevel@tonic-gate 
644*0Sstevel@tonic-gate 	trace3(TR_clnt_vc_call, 0, cl, proc);
645*0Sstevel@tonic-gate 
646*0Sstevel@tonic-gate 	if (rpc_fd_lock(vctbl, ct->ct_fd)) {
647*0Sstevel@tonic-gate 		rpc_callerr.re_status = RPC_FAILED;
648*0Sstevel@tonic-gate 		rpc_callerr.re_errno = errno;
649*0Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
650*0Sstevel@tonic-gate 		return (RPC_FAILED);
651*0Sstevel@tonic-gate 	}
652*0Sstevel@tonic-gate 
653*0Sstevel@tonic-gate 	ct->ct_is_oneway = FALSE;
654*0Sstevel@tonic-gate 	if (ct->ct_io_mode == RPC_CL_NONBLOCKING) {
655*0Sstevel@tonic-gate 		if (do_flush(ct, RPC_CL_BLOCKING_FLUSH) != 0) {
656*0Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
657*0Sstevel@tonic-gate 			return (RPC_FAILED);  /* XXX */
658*0Sstevel@tonic-gate 		}
659*0Sstevel@tonic-gate 	}
660*0Sstevel@tonic-gate 
661*0Sstevel@tonic-gate 	if (!ct->ct_waitset) {
662*0Sstevel@tonic-gate 		/* If time is not within limits, we ignore it. */
663*0Sstevel@tonic-gate 		if (time_not_ok(&timeout) == FALSE)
664*0Sstevel@tonic-gate 			ct->ct_wait = __rpc_timeval_to_msec(&timeout);
665*0Sstevel@tonic-gate 	} else {
666*0Sstevel@tonic-gate 		timeout.tv_sec = (ct->ct_wait / 1000);
667*0Sstevel@tonic-gate 		timeout.tv_usec = (ct->ct_wait % 1000) * 1000;
668*0Sstevel@tonic-gate 	}
669*0Sstevel@tonic-gate 
670*0Sstevel@tonic-gate 	shipnow = ((xdr_results == (xdrproc_t)0) && (timeout.tv_sec == 0) &&
671*0Sstevel@tonic-gate 	    (timeout.tv_usec == 0)) ? FALSE : TRUE;
672*0Sstevel@tonic-gate call_again:
673*0Sstevel@tonic-gate 	xdrs->x_op = XDR_ENCODE;
674*0Sstevel@tonic-gate 	rpc_callerr.re_status = RPC_SUCCESS;
675*0Sstevel@tonic-gate 	/*
676*0Sstevel@tonic-gate 	 * Due to little endian byte order, it is necessary to convert to host
677*0Sstevel@tonic-gate 	 * format before decrementing xid.
678*0Sstevel@tonic-gate 	 */
679*0Sstevel@tonic-gate 	x_id = ntohl(*msg_x_id) - 1;
680*0Sstevel@tonic-gate 	*msg_x_id = htonl(x_id);
681*0Sstevel@tonic-gate 
682*0Sstevel@tonic-gate 	if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
683*0Sstevel@tonic-gate 		if ((! XDR_PUTBYTES(xdrs, ct->ct_mcall, ct->ct_mpos)) ||
684*0Sstevel@tonic-gate 		    (! XDR_PUTINT32(xdrs, (int32_t *)&proc)) ||
685*0Sstevel@tonic-gate 		    (! AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
686*0Sstevel@tonic-gate 		    (! xdr_args(xdrs, args_ptr))) {
687*0Sstevel@tonic-gate 			if (rpc_callerr.re_status == RPC_SUCCESS)
688*0Sstevel@tonic-gate 				rpc_callerr.re_status = RPC_CANTENCODEARGS;
689*0Sstevel@tonic-gate 			(void) xdrrec_endofrecord(xdrs, TRUE);
690*0Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
691*0Sstevel@tonic-gate 			trace3(TR_clnt_vc_call, 1, cl, proc);
692*0Sstevel@tonic-gate 			return (rpc_callerr.re_status);
693*0Sstevel@tonic-gate 		}
694*0Sstevel@tonic-gate 	} else {
695*0Sstevel@tonic-gate /* LINTED pointer alignment */
696*0Sstevel@tonic-gate 		uint32_t *u = (uint32_t *)&ct->ct_mcall[ct->ct_mpos];
697*0Sstevel@tonic-gate 		IXDR_PUT_U_INT32(u, proc);
698*0Sstevel@tonic-gate 		if (!__rpc_gss_wrap(cl->cl_auth, ct->ct_mcall,
699*0Sstevel@tonic-gate 		    ((char *)u) - ct->ct_mcall, xdrs, xdr_args, args_ptr)) {
700*0Sstevel@tonic-gate 			if (rpc_callerr.re_status == RPC_SUCCESS)
701*0Sstevel@tonic-gate 				rpc_callerr.re_status = RPC_CANTENCODEARGS;
702*0Sstevel@tonic-gate 			(void) xdrrec_endofrecord(xdrs, TRUE);
703*0Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
704*0Sstevel@tonic-gate 			trace3(TR_clnt_vc_call, 1, cl, proc);
705*0Sstevel@tonic-gate 			return (rpc_callerr.re_status);
706*0Sstevel@tonic-gate 		}
707*0Sstevel@tonic-gate 	}
708*0Sstevel@tonic-gate 	if (! xdrrec_endofrecord(xdrs, shipnow)) {
709*0Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
710*0Sstevel@tonic-gate 		trace3(TR_clnt_vc_call, 1, cl, proc);
711*0Sstevel@tonic-gate 		return (rpc_callerr.re_status = RPC_CANTSEND);
712*0Sstevel@tonic-gate 	}
713*0Sstevel@tonic-gate 	if (! shipnow) {
714*0Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
715*0Sstevel@tonic-gate 		trace3(TR_clnt_vc_call, 1, cl, proc);
716*0Sstevel@tonic-gate 		return (RPC_SUCCESS);
717*0Sstevel@tonic-gate 	}
718*0Sstevel@tonic-gate 	/*
719*0Sstevel@tonic-gate 	 * Hack to provide rpc-based message passing
720*0Sstevel@tonic-gate 	 */
721*0Sstevel@tonic-gate 	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
722*0Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
723*0Sstevel@tonic-gate 		trace3(TR_clnt_vc_call, 1, cl, proc);
724*0Sstevel@tonic-gate 		return (rpc_callerr.re_status = RPC_TIMEDOUT);
725*0Sstevel@tonic-gate 	}
726*0Sstevel@tonic-gate 
727*0Sstevel@tonic-gate 
728*0Sstevel@tonic-gate 	/*
729*0Sstevel@tonic-gate 	 * Keep receiving until we get a valid transaction id
730*0Sstevel@tonic-gate 	 */
731*0Sstevel@tonic-gate 	xdrs->x_op = XDR_DECODE;
732*0Sstevel@tonic-gate 	/*CONSTANTCONDITION*/
733*0Sstevel@tonic-gate 	while (TRUE) {
734*0Sstevel@tonic-gate 		reply_msg.acpted_rply.ar_verf = _null_auth;
735*0Sstevel@tonic-gate 		reply_msg.acpted_rply.ar_results.where = NULL;
736*0Sstevel@tonic-gate 		reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
737*0Sstevel@tonic-gate 		if (! xdrrec_skiprecord(xdrs)) {
738*0Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
739*0Sstevel@tonic-gate 			trace3(TR_clnt_vc_call, 1, cl, proc);
740*0Sstevel@tonic-gate 			return (rpc_callerr.re_status);
741*0Sstevel@tonic-gate 		}
742*0Sstevel@tonic-gate 		/* now decode and validate the response header */
743*0Sstevel@tonic-gate 		if (! xdr_replymsg(xdrs, &reply_msg)) {
744*0Sstevel@tonic-gate 			if (rpc_callerr.re_status == RPC_SUCCESS)
745*0Sstevel@tonic-gate 				continue;
746*0Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
747*0Sstevel@tonic-gate 			trace3(TR_clnt_vc_call, 1, cl, proc);
748*0Sstevel@tonic-gate 			return (rpc_callerr.re_status);
749*0Sstevel@tonic-gate 		}
750*0Sstevel@tonic-gate 		if (reply_msg.rm_xid == x_id)
751*0Sstevel@tonic-gate 			break;
752*0Sstevel@tonic-gate 	}
753*0Sstevel@tonic-gate 
754*0Sstevel@tonic-gate 	/*
755*0Sstevel@tonic-gate 	 * process header
756*0Sstevel@tonic-gate 	 */
757*0Sstevel@tonic-gate 	if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
758*0Sstevel@tonic-gate 	    (reply_msg.acpted_rply.ar_stat == SUCCESS))
759*0Sstevel@tonic-gate 		rpc_callerr.re_status = RPC_SUCCESS;
760*0Sstevel@tonic-gate 	else
761*0Sstevel@tonic-gate 		__seterr_reply(&reply_msg, &(rpc_callerr));
762*0Sstevel@tonic-gate 
763*0Sstevel@tonic-gate 	if (rpc_callerr.re_status == RPC_SUCCESS) {
764*0Sstevel@tonic-gate 		if (! AUTH_VALIDATE(cl->cl_auth,
765*0Sstevel@tonic-gate 				&reply_msg.acpted_rply.ar_verf)) {
766*0Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_AUTHERROR;
767*0Sstevel@tonic-gate 			rpc_callerr.re_why = AUTH_INVALIDRESP;
768*0Sstevel@tonic-gate 		} else if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
769*0Sstevel@tonic-gate 			if (!(*xdr_results)(xdrs, results_ptr)) {
770*0Sstevel@tonic-gate 				if (rpc_callerr.re_status == RPC_SUCCESS)
771*0Sstevel@tonic-gate 				    rpc_callerr.re_status = RPC_CANTDECODERES;
772*0Sstevel@tonic-gate 			}
773*0Sstevel@tonic-gate 		} else if (!__rpc_gss_unwrap(cl->cl_auth, xdrs, xdr_results,
774*0Sstevel@tonic-gate 							results_ptr)) {
775*0Sstevel@tonic-gate 			if (rpc_callerr.re_status == RPC_SUCCESS)
776*0Sstevel@tonic-gate 				rpc_callerr.re_status = RPC_CANTDECODERES;
777*0Sstevel@tonic-gate 		}
778*0Sstevel@tonic-gate 	}	/* end successful completion */
779*0Sstevel@tonic-gate 	/*
780*0Sstevel@tonic-gate 	 * If unsuccesful AND error is an authentication error
781*0Sstevel@tonic-gate 	 * then refresh credentials and try again, else break
782*0Sstevel@tonic-gate 	 */
783*0Sstevel@tonic-gate 	else if (rpc_callerr.re_status == RPC_AUTHERROR) {
784*0Sstevel@tonic-gate 		/* maybe our credentials need to be refreshed ... */
785*0Sstevel@tonic-gate 		if (refreshes-- && AUTH_REFRESH(cl->cl_auth, &reply_msg))
786*0Sstevel@tonic-gate 			goto call_again;
787*0Sstevel@tonic-gate 		else
788*0Sstevel@tonic-gate 			/*
789*0Sstevel@tonic-gate 			 * We are setting rpc_callerr here given that libnsl
790*0Sstevel@tonic-gate 			 * is not reentrant thereby reinitializing the TSD.
791*0Sstevel@tonic-gate 			 * If not set here then success could be returned even
792*0Sstevel@tonic-gate 			 * though refresh failed.
793*0Sstevel@tonic-gate 			 */
794*0Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_AUTHERROR;
795*0Sstevel@tonic-gate 	} /* end of unsuccessful completion */
796*0Sstevel@tonic-gate 	/* free verifier ... */
797*0Sstevel@tonic-gate 	if (reply_msg.rm_reply.rp_stat == MSG_ACCEPTED &&
798*0Sstevel@tonic-gate 			reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
799*0Sstevel@tonic-gate 		xdrs->x_op = XDR_FREE;
800*0Sstevel@tonic-gate 		(void) xdr_opaque_auth(xdrs, &(reply_msg.acpted_rply.ar_verf));
801*0Sstevel@tonic-gate 	}
802*0Sstevel@tonic-gate 	rpc_fd_unlock(vctbl, ct->ct_fd);
803*0Sstevel@tonic-gate 	trace3(TR_clnt_vc_call, 1, cl, proc);
804*0Sstevel@tonic-gate 	return (rpc_callerr.re_status);
805*0Sstevel@tonic-gate }
806*0Sstevel@tonic-gate 
807*0Sstevel@tonic-gate static enum clnt_stat
808*0Sstevel@tonic-gate clnt_vc_send(cl, proc, xdr_args, args_ptr)
809*0Sstevel@tonic-gate 	CLIENT *cl;
810*0Sstevel@tonic-gate 	rpcproc_t proc;
811*0Sstevel@tonic-gate 	xdrproc_t xdr_args;
812*0Sstevel@tonic-gate 	caddr_t args_ptr;
813*0Sstevel@tonic-gate {
814*0Sstevel@tonic-gate /* LINTED pointer alignment */
815*0Sstevel@tonic-gate 	struct ct_data *ct = (struct ct_data *)cl->cl_private;
816*0Sstevel@tonic-gate 	XDR *xdrs = &(ct->ct_xdrs);
817*0Sstevel@tonic-gate 	uint32_t x_id;
818*0Sstevel@tonic-gate /* LINTED pointer alignment */
819*0Sstevel@tonic-gate 	uint32_t *msg_x_id = (uint32_t *)(ct->ct_mcall);	/* yuk */
820*0Sstevel@tonic-gate 
821*0Sstevel@tonic-gate 	trace3(TR_clnt_vc_send, 0, cl, proc);
822*0Sstevel@tonic-gate 
823*0Sstevel@tonic-gate 	if (rpc_fd_lock(vctbl, ct->ct_fd)) {
824*0Sstevel@tonic-gate 		rpc_callerr.re_status = RPC_FAILED;
825*0Sstevel@tonic-gate 		rpc_callerr.re_errno = errno;
826*0Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
827*0Sstevel@tonic-gate 		return (RPC_FAILED);
828*0Sstevel@tonic-gate 	}
829*0Sstevel@tonic-gate 
830*0Sstevel@tonic-gate 	ct->ct_is_oneway = TRUE;
831*0Sstevel@tonic-gate 
832*0Sstevel@tonic-gate 	xdrs->x_op = XDR_ENCODE;
833*0Sstevel@tonic-gate 	rpc_callerr.re_status = RPC_SUCCESS;
834*0Sstevel@tonic-gate 	/*
835*0Sstevel@tonic-gate 	 * Due to little endian byte order, it is necessary to convert to host
836*0Sstevel@tonic-gate 	 * format before decrementing xid.
837*0Sstevel@tonic-gate 	 */
838*0Sstevel@tonic-gate 	x_id = ntohl(*msg_x_id) - 1;
839*0Sstevel@tonic-gate 	*msg_x_id = htonl(x_id);
840*0Sstevel@tonic-gate 
841*0Sstevel@tonic-gate 	if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
842*0Sstevel@tonic-gate 		if ((! XDR_PUTBYTES(xdrs, ct->ct_mcall, ct->ct_mpos)) ||
843*0Sstevel@tonic-gate 		    (! XDR_PUTINT32(xdrs, (int32_t *)&proc)) ||
844*0Sstevel@tonic-gate 		    (! AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
845*0Sstevel@tonic-gate 		    (! xdr_args(xdrs, args_ptr))) {
846*0Sstevel@tonic-gate 			if (rpc_callerr.re_status == RPC_SUCCESS)
847*0Sstevel@tonic-gate 				rpc_callerr.re_status = RPC_CANTENCODEARGS;
848*0Sstevel@tonic-gate 			(void) xdrrec_endofrecord(xdrs, TRUE);
849*0Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
850*0Sstevel@tonic-gate 			trace3(TR_clnt_vc_send, 1, cl, proc);
851*0Sstevel@tonic-gate 			return (rpc_callerr.re_status);
852*0Sstevel@tonic-gate 		}
853*0Sstevel@tonic-gate 	} else {
854*0Sstevel@tonic-gate /* LINTED pointer alignment */
855*0Sstevel@tonic-gate 		uint32_t *u = (uint32_t *)&ct->ct_mcall[ct->ct_mpos];
856*0Sstevel@tonic-gate 		IXDR_PUT_U_INT32(u, proc);
857*0Sstevel@tonic-gate 		if (!__rpc_gss_wrap(cl->cl_auth, ct->ct_mcall,
858*0Sstevel@tonic-gate 		    ((char *)u) - ct->ct_mcall, xdrs, xdr_args, args_ptr)) {
859*0Sstevel@tonic-gate 			if (rpc_callerr.re_status == RPC_SUCCESS)
860*0Sstevel@tonic-gate 				rpc_callerr.re_status = RPC_CANTENCODEARGS;
861*0Sstevel@tonic-gate 			(void) xdrrec_endofrecord(xdrs, TRUE);
862*0Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
863*0Sstevel@tonic-gate 			trace3(TR_clnt_vc_send, 1, cl, proc);
864*0Sstevel@tonic-gate 			return (rpc_callerr.re_status);
865*0Sstevel@tonic-gate 		}
866*0Sstevel@tonic-gate 	}
867*0Sstevel@tonic-gate 
868*0Sstevel@tonic-gate 	/*
869*0Sstevel@tonic-gate 	 * Do not need to check errors, as the following code does
870*0Sstevel@tonic-gate 	 * not depend on the successful completion of the call.
871*0Sstevel@tonic-gate 	 * An error, if any occurs, is reported through
872*0Sstevel@tonic-gate 	 * rpc_callerr.re_status.
873*0Sstevel@tonic-gate 	 */
874*0Sstevel@tonic-gate 	xdrrec_endofrecord(xdrs, TRUE);
875*0Sstevel@tonic-gate 
876*0Sstevel@tonic-gate 	rpc_fd_unlock(vctbl, ct->ct_fd);
877*0Sstevel@tonic-gate 	trace3(TR_clnt_vc_call, 1, cl, proc);
878*0Sstevel@tonic-gate 	return (rpc_callerr.re_status);
879*0Sstevel@tonic-gate }
880*0Sstevel@tonic-gate 
881*0Sstevel@tonic-gate static void
882*0Sstevel@tonic-gate clnt_vc_geterr(cl, errp)
883*0Sstevel@tonic-gate 	CLIENT *cl;
884*0Sstevel@tonic-gate 	struct rpc_err *errp;
885*0Sstevel@tonic-gate {
886*0Sstevel@tonic-gate 	trace2(TR_clnt_vc_geterr, 0, cl);
887*0Sstevel@tonic-gate 	*errp = rpc_callerr;
888*0Sstevel@tonic-gate 	trace2(TR_clnt_vc_geterr, 1, cl);
889*0Sstevel@tonic-gate }
890*0Sstevel@tonic-gate 
891*0Sstevel@tonic-gate static bool_t
892*0Sstevel@tonic-gate clnt_vc_freeres(cl, xdr_res, res_ptr)
893*0Sstevel@tonic-gate 	CLIENT *cl;
894*0Sstevel@tonic-gate 	xdrproc_t xdr_res;
895*0Sstevel@tonic-gate 	caddr_t res_ptr;
896*0Sstevel@tonic-gate {
897*0Sstevel@tonic-gate /* LINTED pointer alignment */
898*0Sstevel@tonic-gate 	struct ct_data *ct = (struct ct_data *)cl->cl_private;
899*0Sstevel@tonic-gate 	XDR *xdrs = &(ct->ct_xdrs);
900*0Sstevel@tonic-gate 	bool_t dummy;
901*0Sstevel@tonic-gate 
902*0Sstevel@tonic-gate 	trace2(TR_clnt_vc_freeres, 0, cl);
903*0Sstevel@tonic-gate 	rpc_fd_lock(vctbl, ct->ct_fd);
904*0Sstevel@tonic-gate 	xdrs->x_op = XDR_FREE;
905*0Sstevel@tonic-gate 	dummy = (*xdr_res)(xdrs, res_ptr);
906*0Sstevel@tonic-gate 	rpc_fd_unlock(vctbl, ct->ct_fd);
907*0Sstevel@tonic-gate 	trace2(TR_clnt_vc_freeres, 1, cl);
908*0Sstevel@tonic-gate 	return (dummy);
909*0Sstevel@tonic-gate }
910*0Sstevel@tonic-gate 
911*0Sstevel@tonic-gate static void
912*0Sstevel@tonic-gate clnt_vc_abort(void)
913*0Sstevel@tonic-gate {
914*0Sstevel@tonic-gate 	trace1(TR_clnt_vc_abort, 0);
915*0Sstevel@tonic-gate 	trace1(TR_clnt_vc_abort, 1);
916*0Sstevel@tonic-gate }
917*0Sstevel@tonic-gate 
918*0Sstevel@tonic-gate /*ARGSUSED*/
919*0Sstevel@tonic-gate static bool_t
920*0Sstevel@tonic-gate clnt_vc_control(cl, request, info)
921*0Sstevel@tonic-gate 	CLIENT *cl;
922*0Sstevel@tonic-gate 	int request;
923*0Sstevel@tonic-gate 	char *info;
924*0Sstevel@tonic-gate {
925*0Sstevel@tonic-gate 	bool_t ret;
926*0Sstevel@tonic-gate /* LINTED pointer alignment */
927*0Sstevel@tonic-gate 	struct ct_data *ct = (struct ct_data *)cl->cl_private;
928*0Sstevel@tonic-gate 
929*0Sstevel@tonic-gate 	trace3(TR_clnt_vc_control, 0, cl, request);
930*0Sstevel@tonic-gate 
931*0Sstevel@tonic-gate 	if (rpc_fd_lock(vctbl, ct->ct_fd)) {
932*0Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
933*0Sstevel@tonic-gate 		return (RPC_FAILED);
934*0Sstevel@tonic-gate 	}
935*0Sstevel@tonic-gate 
936*0Sstevel@tonic-gate 	switch (request) {
937*0Sstevel@tonic-gate 	case CLSET_FD_CLOSE:
938*0Sstevel@tonic-gate 		ct->ct_closeit = TRUE;
939*0Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
940*0Sstevel@tonic-gate 		trace3(TR_clnt_vc_control, 1, cl, request);
941*0Sstevel@tonic-gate 		return (TRUE);
942*0Sstevel@tonic-gate 	case CLSET_FD_NCLOSE:
943*0Sstevel@tonic-gate 		ct->ct_closeit = FALSE;
944*0Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
945*0Sstevel@tonic-gate 		trace3(TR_clnt_vc_control, 1, cl, request);
946*0Sstevel@tonic-gate 		return (TRUE);
947*0Sstevel@tonic-gate 	case CLFLUSH:
948*0Sstevel@tonic-gate 		if (ct->ct_io_mode == RPC_CL_NONBLOCKING) {
949*0Sstevel@tonic-gate 			int res;
950*0Sstevel@tonic-gate 			res = do_flush(ct, (info == NULL ||
951*0Sstevel@tonic-gate 			    *(int *)info == RPC_CL_DEFAULT_FLUSH)?
952*0Sstevel@tonic-gate 			    ct->ct_blocking_mode: *(int *)info);
953*0Sstevel@tonic-gate 			ret = (0 == res);
954*0Sstevel@tonic-gate 		}
955*0Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
956*0Sstevel@tonic-gate 		return (ret);
957*0Sstevel@tonic-gate 	}
958*0Sstevel@tonic-gate 
959*0Sstevel@tonic-gate 	/* for other requests which use info */
960*0Sstevel@tonic-gate 	if (info == NULL) {
961*0Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
962*0Sstevel@tonic-gate 		trace3(TR_clnt_vc_control, 1, cl, request);
963*0Sstevel@tonic-gate 		return (FALSE);
964*0Sstevel@tonic-gate 	}
965*0Sstevel@tonic-gate 	switch (request) {
966*0Sstevel@tonic-gate 	case CLSET_TIMEOUT:
967*0Sstevel@tonic-gate /* LINTED pointer alignment */
968*0Sstevel@tonic-gate 		if (time_not_ok((struct timeval *)info)) {
969*0Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
970*0Sstevel@tonic-gate 			trace3(TR_clnt_vc_control, 1, cl, request);
971*0Sstevel@tonic-gate 			return (FALSE);
972*0Sstevel@tonic-gate 		}
973*0Sstevel@tonic-gate /* LINTED pointer alignment */
974*0Sstevel@tonic-gate 		ct->ct_wait = __rpc_timeval_to_msec((struct timeval *)info);
975*0Sstevel@tonic-gate 		ct->ct_waitset = TRUE;
976*0Sstevel@tonic-gate 		break;
977*0Sstevel@tonic-gate 	case CLGET_TIMEOUT:
978*0Sstevel@tonic-gate /* LINTED pointer alignment */
979*0Sstevel@tonic-gate 		((struct timeval *)info)->tv_sec = ct->ct_wait / 1000;
980*0Sstevel@tonic-gate /* LINTED pointer alignment */
981*0Sstevel@tonic-gate 		((struct timeval *)info)->tv_usec =
982*0Sstevel@tonic-gate 			(ct->ct_wait % 1000) * 1000;
983*0Sstevel@tonic-gate 		break;
984*0Sstevel@tonic-gate 	case CLGET_SERVER_ADDR:	/* For compatibility only */
985*0Sstevel@tonic-gate 		(void) memcpy(info, ct->ct_addr.buf, (int)ct->ct_addr.len);
986*0Sstevel@tonic-gate 		break;
987*0Sstevel@tonic-gate 	case CLGET_FD:
988*0Sstevel@tonic-gate /* LINTED pointer alignment */
989*0Sstevel@tonic-gate 		*(int *)info = ct->ct_fd;
990*0Sstevel@tonic-gate 		break;
991*0Sstevel@tonic-gate 	case CLGET_SVC_ADDR:
992*0Sstevel@tonic-gate 		/* The caller should not free this memory area */
993*0Sstevel@tonic-gate /* LINTED pointer alignment */
994*0Sstevel@tonic-gate 		*(struct netbuf *)info = ct->ct_addr;
995*0Sstevel@tonic-gate 		break;
996*0Sstevel@tonic-gate 	case CLSET_SVC_ADDR:		/* set to new address */
997*0Sstevel@tonic-gate #ifdef undef
998*0Sstevel@tonic-gate 		/*
999*0Sstevel@tonic-gate 		 * XXX: once the t_snddis(), followed by t_connect() starts to
1000*0Sstevel@tonic-gate 		 * work, this ifdef should be removed.  CLIENT handle reuse
1001*0Sstevel@tonic-gate 		 * would then be possible for COTS as well.
1002*0Sstevel@tonic-gate 		 */
1003*0Sstevel@tonic-gate 		if (t_snddis(ct->ct_fd, NULL) == -1) {
1004*0Sstevel@tonic-gate 			rpc_createerr.cf_stat = RPC_TLIERROR;
1005*0Sstevel@tonic-gate 			rpc_createerr.cf_error.re_terrno = t_errno;
1006*0Sstevel@tonic-gate 			rpc_createerr.cf_error.re_errno = errno;
1007*0Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
1008*0Sstevel@tonic-gate 			trace3(TR_clnt_vc_control, 1, cl, request);
1009*0Sstevel@tonic-gate 			return (FALSE);
1010*0Sstevel@tonic-gate 		}
1011*0Sstevel@tonic-gate 		ret = set_up_connection(ct->ct_fd, (struct netbuf *)info,
1012*0Sstevel@tonic-gate 			ct, NULL));
1013*0Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
1014*0Sstevel@tonic-gate 		trace3(TR_clnt_vc_control, 1, cl, request);
1015*0Sstevel@tonic-gate 		return (ret);
1016*0Sstevel@tonic-gate #else
1017*0Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
1018*0Sstevel@tonic-gate 		trace3(TR_clnt_vc_control, 1, cl, request);
1019*0Sstevel@tonic-gate 		return (FALSE);
1020*0Sstevel@tonic-gate #endif
1021*0Sstevel@tonic-gate 	case CLGET_XID:
1022*0Sstevel@tonic-gate 		/*
1023*0Sstevel@tonic-gate 		 * use the knowledge that xid is the
1024*0Sstevel@tonic-gate 		 * first element in the call structure
1025*0Sstevel@tonic-gate 		 * This will get the xid of the PREVIOUS call
1026*0Sstevel@tonic-gate 		 */
1027*0Sstevel@tonic-gate /* LINTED pointer alignment */
1028*0Sstevel@tonic-gate 		*(uint32_t *)info = ntohl(*(uint32_t *)ct->ct_mcall);
1029*0Sstevel@tonic-gate 		break;
1030*0Sstevel@tonic-gate 	case CLSET_XID:
1031*0Sstevel@tonic-gate 		/* This will set the xid of the NEXT call */
1032*0Sstevel@tonic-gate /* LINTED pointer alignment */
1033*0Sstevel@tonic-gate 		*(uint32_t *)ct->ct_mcall =  htonl(*(uint32_t *)info + 1);
1034*0Sstevel@tonic-gate 		/* increment by 1 as clnt_vc_call() decrements once */
1035*0Sstevel@tonic-gate 		break;
1036*0Sstevel@tonic-gate 	case CLGET_VERS:
1037*0Sstevel@tonic-gate 		/*
1038*0Sstevel@tonic-gate 		 * This RELIES on the information that, in the call body,
1039*0Sstevel@tonic-gate 		 * the version number field is the fifth field from the
1040*0Sstevel@tonic-gate 		 * begining of the RPC header. MUST be changed if the
1041*0Sstevel@tonic-gate 		 * call_struct is changed
1042*0Sstevel@tonic-gate 		 */
1043*0Sstevel@tonic-gate /* LINTED pointer alignment */
1044*0Sstevel@tonic-gate 		*(uint32_t *)info = ntohl(*(uint32_t *)(ct->ct_mcall +
1045*0Sstevel@tonic-gate 						4 * BYTES_PER_XDR_UNIT));
1046*0Sstevel@tonic-gate 		break;
1047*0Sstevel@tonic-gate 
1048*0Sstevel@tonic-gate 	case CLSET_VERS:
1049*0Sstevel@tonic-gate /* LINTED pointer alignment */
1050*0Sstevel@tonic-gate 		*(uint32_t *)(ct->ct_mcall + 4 * BYTES_PER_XDR_UNIT) =
1051*0Sstevel@tonic-gate /* LINTED pointer alignment */
1052*0Sstevel@tonic-gate 			htonl(*(uint32_t *)info);
1053*0Sstevel@tonic-gate 		break;
1054*0Sstevel@tonic-gate 
1055*0Sstevel@tonic-gate 	case CLGET_PROG:
1056*0Sstevel@tonic-gate 		/*
1057*0Sstevel@tonic-gate 		 * This RELIES on the information that, in the call body,
1058*0Sstevel@tonic-gate 		 * the program number field is the fourth field from the
1059*0Sstevel@tonic-gate 		 * begining of the RPC header. MUST be changed if the
1060*0Sstevel@tonic-gate 		 * call_struct is changed
1061*0Sstevel@tonic-gate 		 */
1062*0Sstevel@tonic-gate /* LINTED pointer alignment */
1063*0Sstevel@tonic-gate 		*(uint32_t *)info = ntohl(*(uint32_t *)(ct->ct_mcall +
1064*0Sstevel@tonic-gate 						3 * BYTES_PER_XDR_UNIT));
1065*0Sstevel@tonic-gate 		break;
1066*0Sstevel@tonic-gate 
1067*0Sstevel@tonic-gate 	case CLSET_PROG:
1068*0Sstevel@tonic-gate /* LINTED pointer alignment */
1069*0Sstevel@tonic-gate 		*(uint32_t *)(ct->ct_mcall + 3 * BYTES_PER_XDR_UNIT) =
1070*0Sstevel@tonic-gate /* LINTED pointer alignment */
1071*0Sstevel@tonic-gate 			htonl(*(uint32_t *)info);
1072*0Sstevel@tonic-gate 		break;
1073*0Sstevel@tonic-gate 
1074*0Sstevel@tonic-gate 	case CLSET_IO_MODE:
1075*0Sstevel@tonic-gate 		if (!set_io_mode(ct, *(int *)info)) {
1076*0Sstevel@tonic-gate 		    rpc_fd_unlock(vctbl, ct->ct_fd);
1077*0Sstevel@tonic-gate 		    return (FALSE);
1078*0Sstevel@tonic-gate 		}
1079*0Sstevel@tonic-gate 		break;
1080*0Sstevel@tonic-gate 	case CLSET_FLUSH_MODE:
1081*0Sstevel@tonic-gate 		/* Set a specific FLUSH_MODE */
1082*0Sstevel@tonic-gate 		if (!set_flush_mode(ct, *(int *)info)) {
1083*0Sstevel@tonic-gate 		    rpc_fd_unlock(vctbl, ct->ct_fd);
1084*0Sstevel@tonic-gate 		    return (FALSE);
1085*0Sstevel@tonic-gate 		}
1086*0Sstevel@tonic-gate 		break;
1087*0Sstevel@tonic-gate 	case CLGET_FLUSH_MODE:
1088*0Sstevel@tonic-gate 		*(rpcflushmode_t *)info = ct->ct_blocking_mode;
1089*0Sstevel@tonic-gate 		break;
1090*0Sstevel@tonic-gate 
1091*0Sstevel@tonic-gate 	case CLGET_IO_MODE:
1092*0Sstevel@tonic-gate 		*(rpciomode_t *)info = ct->ct_io_mode;
1093*0Sstevel@tonic-gate 		break;
1094*0Sstevel@tonic-gate 
1095*0Sstevel@tonic-gate 	case CLGET_CURRENT_REC_SIZE:
1096*0Sstevel@tonic-gate 		/*
1097*0Sstevel@tonic-gate 		 * Returns the current amount of memory allocated
1098*0Sstevel@tonic-gate 		 * to pending requests
1099*0Sstevel@tonic-gate 		 */
1100*0Sstevel@tonic-gate 		*(int *)info = ct->ct_bufferPendingSize;
1101*0Sstevel@tonic-gate 		break;
1102*0Sstevel@tonic-gate 
1103*0Sstevel@tonic-gate 	case CLSET_CONNMAXREC_SIZE:
1104*0Sstevel@tonic-gate 		/* Cannot resize the buffer if it is used. */
1105*0Sstevel@tonic-gate 		if (ct->ct_bufferPendingSize != 0) {
1106*0Sstevel@tonic-gate 			rpc_fd_unlock(vctbl, ct->ct_fd);
1107*0Sstevel@tonic-gate 			return (FALSE);
1108*0Sstevel@tonic-gate 		}
1109*0Sstevel@tonic-gate 		/*
1110*0Sstevel@tonic-gate 		 * If the new size is equal to the current size,
1111*0Sstevel@tonic-gate 		 * there is nothing to do.
1112*0Sstevel@tonic-gate 		 */
1113*0Sstevel@tonic-gate 		if (ct->ct_bufferSize == *(uint_t *)info)
1114*0Sstevel@tonic-gate 			break;
1115*0Sstevel@tonic-gate 
1116*0Sstevel@tonic-gate 		ct->ct_bufferSize = *(uint_t *)info;
1117*0Sstevel@tonic-gate 		if (ct->ct_buffer) {
1118*0Sstevel@tonic-gate 			free(ct->ct_buffer);
1119*0Sstevel@tonic-gate 			ct->ct_buffer = NULL;
1120*0Sstevel@tonic-gate 			ct->ct_bufferReadPtr = ct->ct_bufferWritePtr = NULL;
1121*0Sstevel@tonic-gate 		}
1122*0Sstevel@tonic-gate 		break;
1123*0Sstevel@tonic-gate 
1124*0Sstevel@tonic-gate 	case CLGET_CONNMAXREC_SIZE:
1125*0Sstevel@tonic-gate 		/*
1126*0Sstevel@tonic-gate 		 * Returns the size of buffer allocated
1127*0Sstevel@tonic-gate 		 * to pending requests
1128*0Sstevel@tonic-gate 		 */
1129*0Sstevel@tonic-gate 		*(uint_t *)info = ct->ct_bufferSize;
1130*0Sstevel@tonic-gate 		break;
1131*0Sstevel@tonic-gate 
1132*0Sstevel@tonic-gate 	default:
1133*0Sstevel@tonic-gate 		rpc_fd_unlock(vctbl, ct->ct_fd);
1134*0Sstevel@tonic-gate 		trace3(TR_clnt_vc_control, 1, cl, request);
1135*0Sstevel@tonic-gate 		return (FALSE);
1136*0Sstevel@tonic-gate 	}
1137*0Sstevel@tonic-gate 	rpc_fd_unlock(vctbl, ct->ct_fd);
1138*0Sstevel@tonic-gate 	trace3(TR_clnt_vc_control, 1, cl, request);
1139*0Sstevel@tonic-gate 	return (TRUE);
1140*0Sstevel@tonic-gate }
1141*0Sstevel@tonic-gate 
1142*0Sstevel@tonic-gate static void
1143*0Sstevel@tonic-gate clnt_vc_destroy(cl)
1144*0Sstevel@tonic-gate 	CLIENT *cl;
1145*0Sstevel@tonic-gate {
1146*0Sstevel@tonic-gate /* LINTED pointer alignment */
1147*0Sstevel@tonic-gate 	struct ct_data *ct = (struct ct_data *)cl->cl_private;
1148*0Sstevel@tonic-gate 	int ct_fd = ct->ct_fd;
1149*0Sstevel@tonic-gate 
1150*0Sstevel@tonic-gate 	trace2(TR_clnt_vc_destroy, 0, cl);
1151*0Sstevel@tonic-gate 	rpc_fd_lock(vctbl, ct_fd);
1152*0Sstevel@tonic-gate 
1153*0Sstevel@tonic-gate 	if (ct->ct_io_mode == RPC_CL_NONBLOCKING) {
1154*0Sstevel@tonic-gate 	    do_flush(ct, RPC_CL_BLOCKING_FLUSH);
1155*0Sstevel@tonic-gate 	    unregister_nb(ct);
1156*0Sstevel@tonic-gate 	}
1157*0Sstevel@tonic-gate 
1158*0Sstevel@tonic-gate 	if (ct->ct_closeit)
1159*0Sstevel@tonic-gate 		(void) t_close(ct_fd);
1160*0Sstevel@tonic-gate 	XDR_DESTROY(&(ct->ct_xdrs));
1161*0Sstevel@tonic-gate 	if (ct->ct_addr.buf)
1162*0Sstevel@tonic-gate 		(void) free(ct->ct_addr.buf);
1163*0Sstevel@tonic-gate 	mem_free((caddr_t)ct, sizeof (struct ct_data));
1164*0Sstevel@tonic-gate 	if (cl->cl_netid && cl->cl_netid[0])
1165*0Sstevel@tonic-gate 		mem_free(cl->cl_netid, strlen(cl->cl_netid) +1);
1166*0Sstevel@tonic-gate 	if (cl->cl_tp && cl->cl_tp[0])
1167*0Sstevel@tonic-gate 		mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
1168*0Sstevel@tonic-gate 	mem_free((caddr_t)cl, sizeof (CLIENT));
1169*0Sstevel@tonic-gate 	rpc_fd_unlock(vctbl, ct_fd);
1170*0Sstevel@tonic-gate 	trace2(TR_clnt_vc_destroy, 1, cl);
1171*0Sstevel@tonic-gate }
1172*0Sstevel@tonic-gate 
1173*0Sstevel@tonic-gate /*
1174*0Sstevel@tonic-gate  * Interface between xdr serializer and vc connection.
1175*0Sstevel@tonic-gate  * Behaves like the system calls, read & write, but keeps some error state
1176*0Sstevel@tonic-gate  * around for the rpc level.
1177*0Sstevel@tonic-gate  */
1178*0Sstevel@tonic-gate static int
1179*0Sstevel@tonic-gate read_vc(void *ct_tmp, caddr_t buf, int len)
1180*0Sstevel@tonic-gate {
1181*0Sstevel@tonic-gate 	static pthread_key_t pfdp_key;
1182*0Sstevel@tonic-gate 	struct pollfd *pfdp;
1183*0Sstevel@tonic-gate 	int npfd;		/* total number of pfdp allocated */
1184*0Sstevel@tonic-gate 	struct ct_data *ct = ct_tmp;
1185*0Sstevel@tonic-gate 	struct timeval starttime;
1186*0Sstevel@tonic-gate 	struct timeval curtime;
1187*0Sstevel@tonic-gate 	struct timeval time_waited;
1188*0Sstevel@tonic-gate 	struct timeval timeout;
1189*0Sstevel@tonic-gate 	int poll_time;
1190*0Sstevel@tonic-gate 	int delta;
1191*0Sstevel@tonic-gate 
1192*0Sstevel@tonic-gate 	trace2(TR_read_vc, 0, len);
1193*0Sstevel@tonic-gate 
1194*0Sstevel@tonic-gate 	if (len == 0) {
1195*0Sstevel@tonic-gate 		trace2(TR_read_vc, 1, len);
1196*0Sstevel@tonic-gate 		return (0);
1197*0Sstevel@tonic-gate 	}
1198*0Sstevel@tonic-gate 
1199*0Sstevel@tonic-gate 	/*
1200*0Sstevel@tonic-gate 	 * Allocate just one the first time.  thr_get_storage() may
1201*0Sstevel@tonic-gate 	 * return a larger buffer, left over from the last time we were
1202*0Sstevel@tonic-gate 	 * here, but that's OK.  realloc() will deal with it properly.
1203*0Sstevel@tonic-gate 	 */
1204*0Sstevel@tonic-gate 	npfd = 1;
1205*0Sstevel@tonic-gate 	pfdp = thr_get_storage(&pfdp_key, sizeof (struct pollfd), free);
1206*0Sstevel@tonic-gate 	if (pfdp == NULL) {
1207*0Sstevel@tonic-gate 		(void) syslog(LOG_ERR, clnt_vc_errstr,
1208*0Sstevel@tonic-gate 			clnt_read_vc_str, __no_mem_str);
1209*0Sstevel@tonic-gate 		rpc_callerr.re_status = RPC_SYSTEMERROR;
1210*0Sstevel@tonic-gate 		rpc_callerr.re_errno = errno;
1211*0Sstevel@tonic-gate 		rpc_callerr.re_terrno = 0;
1212*0Sstevel@tonic-gate 		trace2(TR_read_vc, 1, len);
1213*0Sstevel@tonic-gate 		return (-1);
1214*0Sstevel@tonic-gate 	}
1215*0Sstevel@tonic-gate 
1216*0Sstevel@tonic-gate 	/*
1217*0Sstevel@tonic-gate 	 *	N.B.:  slot 0 in the pollfd array is reserved for the file
1218*0Sstevel@tonic-gate 	 *	descriptor we're really interested in (as opposed to the
1219*0Sstevel@tonic-gate 	 *	callback descriptors).
1220*0Sstevel@tonic-gate 	 */
1221*0Sstevel@tonic-gate 	pfdp[0].fd = ct->ct_fd;
1222*0Sstevel@tonic-gate 	pfdp[0].events = MASKVAL;
1223*0Sstevel@tonic-gate 	pfdp[0].revents = 0;
1224*0Sstevel@tonic-gate 	poll_time = ct->ct_wait;
1225*0Sstevel@tonic-gate 	if (gettimeofday(&starttime, (struct timezone *)NULL) == -1) {
1226*0Sstevel@tonic-gate 		syslog(LOG_ERR, "Unable to get time of day: %m");
1227*0Sstevel@tonic-gate 		return (-1);
1228*0Sstevel@tonic-gate 	}
1229*0Sstevel@tonic-gate 
1230*0Sstevel@tonic-gate 	for (;;) {
1231*0Sstevel@tonic-gate 		extern void (*_svc_getreqset_proc)();
1232*0Sstevel@tonic-gate 		extern pollfd_t *svc_pollfd;
1233*0Sstevel@tonic-gate 		extern int svc_max_pollfd;
1234*0Sstevel@tonic-gate 		int fds;
1235*0Sstevel@tonic-gate 
1236*0Sstevel@tonic-gate 		/* VARIABLES PROTECTED BY svc_fd_lock: svc_pollfd */
1237*0Sstevel@tonic-gate 
1238*0Sstevel@tonic-gate 		if (_svc_getreqset_proc) {
1239*0Sstevel@tonic-gate 			sig_rw_rdlock(&svc_fd_lock);
1240*0Sstevel@tonic-gate 
1241*0Sstevel@tonic-gate 			/* reallocate pfdp to svc_max_pollfd +1 */
1242*0Sstevel@tonic-gate 			if (npfd != (svc_max_pollfd + 1)) {
1243*0Sstevel@tonic-gate 				struct pollfd *tmp_pfdp = realloc(pfdp,
1244*0Sstevel@tonic-gate 						sizeof (struct pollfd) *
1245*0Sstevel@tonic-gate 						(svc_max_pollfd + 1));
1246*0Sstevel@tonic-gate 				if (tmp_pfdp == NULL) {
1247*0Sstevel@tonic-gate 					sig_rw_unlock(&svc_fd_lock);
1248*0Sstevel@tonic-gate 					(void) syslog(LOG_ERR, clnt_vc_errstr,
1249*0Sstevel@tonic-gate 						clnt_read_vc_str, __no_mem_str);
1250*0Sstevel@tonic-gate 					rpc_callerr.re_status = RPC_SYSTEMERROR;
1251*0Sstevel@tonic-gate 					rpc_callerr.re_errno = errno;
1252*0Sstevel@tonic-gate 					rpc_callerr.re_terrno = 0;
1253*0Sstevel@tonic-gate 					trace2(TR_read_vc, 1, len);
1254*0Sstevel@tonic-gate 					return (-1);
1255*0Sstevel@tonic-gate 				}
1256*0Sstevel@tonic-gate 
1257*0Sstevel@tonic-gate 				pfdp = tmp_pfdp;
1258*0Sstevel@tonic-gate 				npfd = svc_max_pollfd + 1;
1259*0Sstevel@tonic-gate 				pthread_setspecific(pfdp_key, pfdp);
1260*0Sstevel@tonic-gate 			}
1261*0Sstevel@tonic-gate 			if (npfd > 1)
1262*0Sstevel@tonic-gate 				(void) memcpy(&pfdp[1], svc_pollfd,
1263*0Sstevel@tonic-gate 				    sizeof (struct pollfd) * (npfd - 1));
1264*0Sstevel@tonic-gate 
1265*0Sstevel@tonic-gate 			sig_rw_unlock(&svc_fd_lock);
1266*0Sstevel@tonic-gate 		} else {
1267*0Sstevel@tonic-gate 			npfd = 1;	/* don't forget about pfdp[0] */
1268*0Sstevel@tonic-gate 		}
1269*0Sstevel@tonic-gate 
1270*0Sstevel@tonic-gate 		switch (fds = poll(pfdp, npfd, poll_time)) {
1271*0Sstevel@tonic-gate 		case 0:
1272*0Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_TIMEDOUT;
1273*0Sstevel@tonic-gate 			trace2(TR_read_vc, 1, len);
1274*0Sstevel@tonic-gate 			return (-1);
1275*0Sstevel@tonic-gate 
1276*0Sstevel@tonic-gate 		case -1:
1277*0Sstevel@tonic-gate 			if (errno != EINTR)
1278*0Sstevel@tonic-gate 				continue;
1279*0Sstevel@tonic-gate 			else {
1280*0Sstevel@tonic-gate 				/*
1281*0Sstevel@tonic-gate 				 * interrupted by another signal,
1282*0Sstevel@tonic-gate 				 * update time_waited
1283*0Sstevel@tonic-gate 				 */
1284*0Sstevel@tonic-gate 
1285*0Sstevel@tonic-gate 				if (gettimeofday(&curtime,
1286*0Sstevel@tonic-gate 				(struct timezone *)NULL) == -1) {
1287*0Sstevel@tonic-gate 					syslog(LOG_ERR,
1288*0Sstevel@tonic-gate 					    "Unable to get time of day:  %m");
1289*0Sstevel@tonic-gate 					errno = 0;
1290*0Sstevel@tonic-gate 					continue;
1291*0Sstevel@tonic-gate 				};
1292*0Sstevel@tonic-gate 				delta = (curtime.tv_sec -
1293*0Sstevel@tonic-gate 						starttime.tv_sec) * 1000 +
1294*0Sstevel@tonic-gate 					(curtime.tv_usec -
1295*0Sstevel@tonic-gate 						starttime.tv_usec) / 1000;
1296*0Sstevel@tonic-gate 				poll_time -= delta;
1297*0Sstevel@tonic-gate 				if (poll_time < 0) {
1298*0Sstevel@tonic-gate 					rpc_callerr.re_status =
1299*0Sstevel@tonic-gate 						RPC_TIMEDOUT;
1300*0Sstevel@tonic-gate 					errno = 0;
1301*0Sstevel@tonic-gate 					trace2(TR_read_vc, 1, len);
1302*0Sstevel@tonic-gate 					return (-1);
1303*0Sstevel@tonic-gate 				} else {
1304*0Sstevel@tonic-gate 					errno = 0; /* reset it */
1305*0Sstevel@tonic-gate 					continue;
1306*0Sstevel@tonic-gate 				}
1307*0Sstevel@tonic-gate 			}
1308*0Sstevel@tonic-gate 		}
1309*0Sstevel@tonic-gate 
1310*0Sstevel@tonic-gate 		if (pfdp[0].revents == 0) {
1311*0Sstevel@tonic-gate 			/* must be for server side of the house */
1312*0Sstevel@tonic-gate 			(*_svc_getreqset_proc)(&pfdp[1], fds);
1313*0Sstevel@tonic-gate 			continue;	/* do poll again */
1314*0Sstevel@tonic-gate 		}
1315*0Sstevel@tonic-gate 
1316*0Sstevel@tonic-gate 		if (pfdp[0].revents & POLLNVAL) {
1317*0Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_CANTRECV;
1318*0Sstevel@tonic-gate 			/*
1319*0Sstevel@tonic-gate 			 *	Note:  we're faking errno here because we
1320*0Sstevel@tonic-gate 			 *	previously would have expected select() to
1321*0Sstevel@tonic-gate 			 *	return -1 with errno EBADF.  Poll(BA_OS)
1322*0Sstevel@tonic-gate 			 *	returns 0 and sets the POLLNVAL revents flag
1323*0Sstevel@tonic-gate 			 *	instead.
1324*0Sstevel@tonic-gate 			 */
1325*0Sstevel@tonic-gate 			rpc_callerr.re_errno = errno = EBADF;
1326*0Sstevel@tonic-gate 			trace2(TR_read_vc, 1, len);
1327*0Sstevel@tonic-gate 			return (-1);
1328*0Sstevel@tonic-gate 		}
1329*0Sstevel@tonic-gate 
1330*0Sstevel@tonic-gate 		if (pfdp[0].revents & (POLLERR | POLLHUP)) {
1331*0Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_CANTRECV;
1332*0Sstevel@tonic-gate 			rpc_callerr.re_errno = errno = EPIPE;
1333*0Sstevel@tonic-gate 			trace2(TR_read_vc, 1, len);
1334*0Sstevel@tonic-gate 			return (-1);
1335*0Sstevel@tonic-gate 		}
1336*0Sstevel@tonic-gate 		break;
1337*0Sstevel@tonic-gate 	}
1338*0Sstevel@tonic-gate 
1339*0Sstevel@tonic-gate 	switch (len = t_rcvall(ct->ct_fd, buf, len)) {
1340*0Sstevel@tonic-gate 	case 0:
1341*0Sstevel@tonic-gate 		/* premature eof */
1342*0Sstevel@tonic-gate 		rpc_callerr.re_errno = ENOLINK;
1343*0Sstevel@tonic-gate 		rpc_callerr.re_terrno = 0;
1344*0Sstevel@tonic-gate 		rpc_callerr.re_status = RPC_CANTRECV;
1345*0Sstevel@tonic-gate 		len = -1;	/* it's really an error */
1346*0Sstevel@tonic-gate 		break;
1347*0Sstevel@tonic-gate 
1348*0Sstevel@tonic-gate 	case -1:
1349*0Sstevel@tonic-gate 		rpc_callerr.re_terrno = t_errno;
1350*0Sstevel@tonic-gate 		rpc_callerr.re_errno = 0;
1351*0Sstevel@tonic-gate 		rpc_callerr.re_status = RPC_CANTRECV;
1352*0Sstevel@tonic-gate 		break;
1353*0Sstevel@tonic-gate 	}
1354*0Sstevel@tonic-gate 	trace2(TR_read_vc, 1, len);
1355*0Sstevel@tonic-gate 	return (len);
1356*0Sstevel@tonic-gate }
1357*0Sstevel@tonic-gate 
1358*0Sstevel@tonic-gate static int
1359*0Sstevel@tonic-gate write_vc(ct_tmp, buf, len)
1360*0Sstevel@tonic-gate 	void *ct_tmp;
1361*0Sstevel@tonic-gate 	caddr_t buf;
1362*0Sstevel@tonic-gate 	int len;
1363*0Sstevel@tonic-gate {
1364*0Sstevel@tonic-gate 	int i, cnt;
1365*0Sstevel@tonic-gate 	struct ct_data *ct = ct_tmp;
1366*0Sstevel@tonic-gate 	int flag;
1367*0Sstevel@tonic-gate 	int maxsz;
1368*0Sstevel@tonic-gate 
1369*0Sstevel@tonic-gate 	trace2(TR_write_vc, 0, len);
1370*0Sstevel@tonic-gate 
1371*0Sstevel@tonic-gate 	maxsz = ct->ct_tsdu;
1372*0Sstevel@tonic-gate 
1373*0Sstevel@tonic-gate 	/* Handle the non-blocking mode */
1374*0Sstevel@tonic-gate 	if (ct->ct_is_oneway && ct->ct_io_mode == RPC_CL_NONBLOCKING) {
1375*0Sstevel@tonic-gate 		/*
1376*0Sstevel@tonic-gate 		 * Test a special case here. If the length of the current
1377*0Sstevel@tonic-gate 		 * write is greater than the transport data unit, and the
1378*0Sstevel@tonic-gate 		 * mode is non blocking, we return RPC_CANTSEND.
1379*0Sstevel@tonic-gate 		 * XXX  this is not very clean.
1380*0Sstevel@tonic-gate 		 */
1381*0Sstevel@tonic-gate 		if (maxsz > 0 && len > maxsz) {
1382*0Sstevel@tonic-gate 			rpc_callerr.re_terrno = errno;
1383*0Sstevel@tonic-gate 			rpc_callerr.re_errno = 0;
1384*0Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_CANTSEND;
1385*0Sstevel@tonic-gate 			return (-1);
1386*0Sstevel@tonic-gate 		}
1387*0Sstevel@tonic-gate 
1388*0Sstevel@tonic-gate 		len = nb_send(ct, buf, (unsigned)len);
1389*0Sstevel@tonic-gate 		if (len == -1) {
1390*0Sstevel@tonic-gate 			rpc_callerr.re_terrno = errno;
1391*0Sstevel@tonic-gate 			rpc_callerr.re_errno = 0;
1392*0Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_CANTSEND;
1393*0Sstevel@tonic-gate 		} else if (len == -2) {
1394*0Sstevel@tonic-gate 			rpc_callerr.re_terrno = 0;
1395*0Sstevel@tonic-gate 			rpc_callerr.re_errno = 0;
1396*0Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_CANTSTORE;
1397*0Sstevel@tonic-gate 		}
1398*0Sstevel@tonic-gate 		trace2(TR_write_vc, 1, len);
1399*0Sstevel@tonic-gate 		return (len);
1400*0Sstevel@tonic-gate 	}
1401*0Sstevel@tonic-gate 
1402*0Sstevel@tonic-gate 	if ((maxsz == 0) || (maxsz == -1)) {
1403*0Sstevel@tonic-gate 		/*
1404*0Sstevel@tonic-gate 		 * T_snd may return -1 for error on connection (connection
1405*0Sstevel@tonic-gate 		 * needs to be repaired/closed, and -2 for flow-control
1406*0Sstevel@tonic-gate 		 * handling error (no operation to do, just wait and call
1407*0Sstevel@tonic-gate 		 * T_Flush()).
1408*0Sstevel@tonic-gate 		 */
1409*0Sstevel@tonic-gate 		if ((len = t_snd(ct->ct_fd, buf, (unsigned)len, 0)) == -1) {
1410*0Sstevel@tonic-gate 			rpc_callerr.re_terrno = t_errno;
1411*0Sstevel@tonic-gate 			rpc_callerr.re_errno = 0;
1412*0Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_CANTSEND;
1413*0Sstevel@tonic-gate 		}
1414*0Sstevel@tonic-gate 		trace2(TR_write_vc, 1, len);
1415*0Sstevel@tonic-gate 		return (len);
1416*0Sstevel@tonic-gate 	}
1417*0Sstevel@tonic-gate 
1418*0Sstevel@tonic-gate 	/*
1419*0Sstevel@tonic-gate 	 * This for those transports which have a max size for data.
1420*0Sstevel@tonic-gate 	 */
1421*0Sstevel@tonic-gate 	for (cnt = len, i = 0; cnt > 0; cnt -= i, buf += i) {
1422*0Sstevel@tonic-gate 		flag = cnt > maxsz ? T_MORE : 0;
1423*0Sstevel@tonic-gate 		if ((i = t_snd(ct->ct_fd, buf, (unsigned)MIN(cnt, maxsz),
1424*0Sstevel@tonic-gate 				flag)) == -1) {
1425*0Sstevel@tonic-gate 			rpc_callerr.re_terrno = t_errno;
1426*0Sstevel@tonic-gate 			rpc_callerr.re_errno = 0;
1427*0Sstevel@tonic-gate 			rpc_callerr.re_status = RPC_CANTSEND;
1428*0Sstevel@tonic-gate 			trace2(TR_write_vc, 1, len);
1429*0Sstevel@tonic-gate 			return (-1);
1430*0Sstevel@tonic-gate 		}
1431*0Sstevel@tonic-gate 	}
1432*0Sstevel@tonic-gate 	trace2(TR_write_vc, 1, len);
1433*0Sstevel@tonic-gate 	return (len);
1434*0Sstevel@tonic-gate }
1435*0Sstevel@tonic-gate 
1436*0Sstevel@tonic-gate /*
1437*0Sstevel@tonic-gate  * Receive the required bytes of data, even if it is fragmented.
1438*0Sstevel@tonic-gate  */
1439*0Sstevel@tonic-gate static int
1440*0Sstevel@tonic-gate t_rcvall(fd, buf, len)
1441*0Sstevel@tonic-gate 	int fd;
1442*0Sstevel@tonic-gate 	char *buf;
1443*0Sstevel@tonic-gate 	int len;
1444*0Sstevel@tonic-gate {
1445*0Sstevel@tonic-gate 	int moreflag;
1446*0Sstevel@tonic-gate 	int final = 0;
1447*0Sstevel@tonic-gate 	int res;
1448*0Sstevel@tonic-gate 
1449*0Sstevel@tonic-gate 	trace3(TR_t_rcvall, 0, fd, len);
1450*0Sstevel@tonic-gate 	do {
1451*0Sstevel@tonic-gate 		moreflag = 0;
1452*0Sstevel@tonic-gate 		res = t_rcv(fd, buf, (unsigned)len, &moreflag);
1453*0Sstevel@tonic-gate 		if (res == -1) {
1454*0Sstevel@tonic-gate 			if (t_errno == TLOOK)
1455*0Sstevel@tonic-gate 				switch (t_look(fd)) {
1456*0Sstevel@tonic-gate 				case T_DISCONNECT:
1457*0Sstevel@tonic-gate 					t_rcvdis(fd, NULL);
1458*0Sstevel@tonic-gate 					t_snddis(fd, NULL);
1459*0Sstevel@tonic-gate 					trace3(TR_t_rcvall, 1, fd, len);
1460*0Sstevel@tonic-gate 					return (-1);
1461*0Sstevel@tonic-gate 				case T_ORDREL:
1462*0Sstevel@tonic-gate 				/* Received orderly release indication */
1463*0Sstevel@tonic-gate 					t_rcvrel(fd);
1464*0Sstevel@tonic-gate 				/* Send orderly release indicator */
1465*0Sstevel@tonic-gate 					(void) t_sndrel(fd);
1466*0Sstevel@tonic-gate 					trace3(TR_t_rcvall, 1, fd, len);
1467*0Sstevel@tonic-gate 					return (-1);
1468*0Sstevel@tonic-gate 				default:
1469*0Sstevel@tonic-gate 					trace3(TR_t_rcvall, 1, fd, len);
1470*0Sstevel@tonic-gate 					return (-1);
1471*0Sstevel@tonic-gate 				}
1472*0Sstevel@tonic-gate 		} else if (res == 0) {
1473*0Sstevel@tonic-gate 			trace3(TR_t_rcvall, 1, fd, len);
1474*0Sstevel@tonic-gate 			return (0);
1475*0Sstevel@tonic-gate 		}
1476*0Sstevel@tonic-gate 		final += res;
1477*0Sstevel@tonic-gate 		buf += res;
1478*0Sstevel@tonic-gate 		len -= res;
1479*0Sstevel@tonic-gate 	} while ((len > 0) && (moreflag & T_MORE));
1480*0Sstevel@tonic-gate 	trace3(TR_t_rcvall, 1, fd, len);
1481*0Sstevel@tonic-gate 	return (final);
1482*0Sstevel@tonic-gate }
1483*0Sstevel@tonic-gate 
1484*0Sstevel@tonic-gate static struct clnt_ops *
1485*0Sstevel@tonic-gate clnt_vc_ops(void)
1486*0Sstevel@tonic-gate {
1487*0Sstevel@tonic-gate 	static struct clnt_ops ops;
1488*0Sstevel@tonic-gate 	extern mutex_t	ops_lock;
1489*0Sstevel@tonic-gate 
1490*0Sstevel@tonic-gate 	/* VARIABLES PROTECTED BY ops_lock: ops */
1491*0Sstevel@tonic-gate 
1492*0Sstevel@tonic-gate 	trace1(TR_clnt_vc_ops, 0);
1493*0Sstevel@tonic-gate 	sig_mutex_lock(&ops_lock);
1494*0Sstevel@tonic-gate 	if (ops.cl_call == NULL) {
1495*0Sstevel@tonic-gate 		ops.cl_call = clnt_vc_call;
1496*0Sstevel@tonic-gate 		ops.cl_send = clnt_vc_send;
1497*0Sstevel@tonic-gate 		ops.cl_abort = clnt_vc_abort;
1498*0Sstevel@tonic-gate 		ops.cl_geterr = clnt_vc_geterr;
1499*0Sstevel@tonic-gate 		ops.cl_freeres = clnt_vc_freeres;
1500*0Sstevel@tonic-gate 		ops.cl_destroy = clnt_vc_destroy;
1501*0Sstevel@tonic-gate 		ops.cl_control = clnt_vc_control;
1502*0Sstevel@tonic-gate 	}
1503*0Sstevel@tonic-gate 	sig_mutex_unlock(&ops_lock);
1504*0Sstevel@tonic-gate 	trace1(TR_clnt_vc_ops, 1);
1505*0Sstevel@tonic-gate 	return (&ops);
1506*0Sstevel@tonic-gate }
1507*0Sstevel@tonic-gate 
1508*0Sstevel@tonic-gate /*
1509*0Sstevel@tonic-gate  * Make sure that the time is not garbage.   -1 value is disallowed.
1510*0Sstevel@tonic-gate  * Note this is different from time_not_ok in clnt_dg.c
1511*0Sstevel@tonic-gate  */
1512*0Sstevel@tonic-gate static bool_t
1513*0Sstevel@tonic-gate time_not_ok(t)
1514*0Sstevel@tonic-gate 	struct timeval *t;
1515*0Sstevel@tonic-gate {
1516*0Sstevel@tonic-gate 	trace1(TR_time_not_ok, 0);
1517*0Sstevel@tonic-gate 	trace1(TR_time_not_ok, 1);
1518*0Sstevel@tonic-gate 	return (t->tv_sec <= -1 || t->tv_sec > 100000000 ||
1519*0Sstevel@tonic-gate 		t->tv_usec <= -1 || t->tv_usec > 1000000);
1520*0Sstevel@tonic-gate }
1521*0Sstevel@tonic-gate 
1522*0Sstevel@tonic-gate 
1523*0Sstevel@tonic-gate /* Compute the # of bytes that remains until the end of the buffer */
1524*0Sstevel@tonic-gate #define	REMAIN_BYTES(p) (ct->ct_bufferSize-(ct->ct_##p - ct->ct_buffer))
1525*0Sstevel@tonic-gate 
1526*0Sstevel@tonic-gate static int
1527*0Sstevel@tonic-gate addInBuffer(struct ct_data *ct, char *dataToAdd, unsigned int nBytes)
1528*0Sstevel@tonic-gate {
1529*0Sstevel@tonic-gate 	if (NULL == ct->ct_buffer) {
1530*0Sstevel@tonic-gate 		/* Buffer not allocated yet. */
1531*0Sstevel@tonic-gate 		char *buffer;
1532*0Sstevel@tonic-gate 
1533*0Sstevel@tonic-gate 		buffer = (char *)malloc(ct->ct_bufferSize);
1534*0Sstevel@tonic-gate 		if (NULL == buffer) {
1535*0Sstevel@tonic-gate 			errno = ENOMEM;
1536*0Sstevel@tonic-gate 			return (-1);
1537*0Sstevel@tonic-gate 		}
1538*0Sstevel@tonic-gate 		memcpy(buffer, dataToAdd, nBytes);
1539*0Sstevel@tonic-gate 
1540*0Sstevel@tonic-gate 		ct->ct_buffer = buffer;
1541*0Sstevel@tonic-gate 		ct->ct_bufferReadPtr = buffer;
1542*0Sstevel@tonic-gate 		ct->ct_bufferWritePtr = buffer + nBytes;
1543*0Sstevel@tonic-gate 		ct->ct_bufferPendingSize = nBytes;
1544*0Sstevel@tonic-gate 	} else {
1545*0Sstevel@tonic-gate 		/*
1546*0Sstevel@tonic-gate 		 * For an already allocated buffer, two mem copies
1547*0Sstevel@tonic-gate 		 * might be needed, depending on the current
1548*0Sstevel@tonic-gate 		 * writing position.
1549*0Sstevel@tonic-gate 		 */
1550*0Sstevel@tonic-gate 
1551*0Sstevel@tonic-gate 		/* Compute the length of the first copy. */
1552*0Sstevel@tonic-gate 		int len = MIN(nBytes, REMAIN_BYTES(bufferWritePtr));
1553*0Sstevel@tonic-gate 
1554*0Sstevel@tonic-gate 		ct->ct_bufferPendingSize += nBytes;
1555*0Sstevel@tonic-gate 
1556*0Sstevel@tonic-gate 		memcpy(ct->ct_bufferWritePtr, dataToAdd, len);
1557*0Sstevel@tonic-gate 		ct->ct_bufferWritePtr += len;
1558*0Sstevel@tonic-gate 		nBytes -= len;
1559*0Sstevel@tonic-gate 		if (0 == nBytes) {
1560*0Sstevel@tonic-gate 			/* One memcopy needed. */
1561*0Sstevel@tonic-gate 
1562*0Sstevel@tonic-gate 			/*
1563*0Sstevel@tonic-gate 			 * If the write pointer is at the end of the buffer,
1564*0Sstevel@tonic-gate 			 * wrap it now.
1565*0Sstevel@tonic-gate 			 */
1566*0Sstevel@tonic-gate 			if (ct->ct_bufferWritePtr ==
1567*0Sstevel@tonic-gate 			    (ct->ct_buffer + ct->ct_bufferSize)) {
1568*0Sstevel@tonic-gate 				ct->ct_bufferWritePtr = ct->ct_buffer;
1569*0Sstevel@tonic-gate 			}
1570*0Sstevel@tonic-gate 		} else {
1571*0Sstevel@tonic-gate 			/* Two memcopy needed. */
1572*0Sstevel@tonic-gate 			dataToAdd += len;
1573*0Sstevel@tonic-gate 
1574*0Sstevel@tonic-gate 			/*
1575*0Sstevel@tonic-gate 			 * Copy the remaining data to the beginning of the
1576*0Sstevel@tonic-gate 			 * buffer
1577*0Sstevel@tonic-gate 			 */
1578*0Sstevel@tonic-gate 			memcpy(ct->ct_buffer, dataToAdd, nBytes);
1579*0Sstevel@tonic-gate 			ct->ct_bufferWritePtr = ct->ct_buffer + nBytes;
1580*0Sstevel@tonic-gate 		}
1581*0Sstevel@tonic-gate 	}
1582*0Sstevel@tonic-gate 	return (0);
1583*0Sstevel@tonic-gate }
1584*0Sstevel@tonic-gate 
1585*0Sstevel@tonic-gate static void
1586*0Sstevel@tonic-gate getFromBuffer(struct ct_data *ct, char **data, unsigned int *nBytes)
1587*0Sstevel@tonic-gate {
1588*0Sstevel@tonic-gate 	int len = MIN(ct->ct_bufferPendingSize, REMAIN_BYTES(bufferReadPtr));
1589*0Sstevel@tonic-gate 	*data = ct->ct_bufferReadPtr;
1590*0Sstevel@tonic-gate 	*nBytes = len;
1591*0Sstevel@tonic-gate }
1592*0Sstevel@tonic-gate 
1593*0Sstevel@tonic-gate static void
1594*0Sstevel@tonic-gate consumeFromBuffer(struct ct_data *ct, unsigned int nBytes)
1595*0Sstevel@tonic-gate {
1596*0Sstevel@tonic-gate 	ct->ct_bufferPendingSize -= nBytes;
1597*0Sstevel@tonic-gate 	if (ct->ct_bufferPendingSize == 0) {
1598*0Sstevel@tonic-gate 		/*
1599*0Sstevel@tonic-gate 		 * If the buffer contains no data, we set the two pointers at
1600*0Sstevel@tonic-gate 		 * the beginning of the buffer (to miminize buffer wraps).
1601*0Sstevel@tonic-gate 		 */
1602*0Sstevel@tonic-gate 		ct->ct_bufferReadPtr = ct->ct_bufferWritePtr = ct->ct_buffer;
1603*0Sstevel@tonic-gate 	} else {
1604*0Sstevel@tonic-gate 		ct->ct_bufferReadPtr += nBytes;
1605*0Sstevel@tonic-gate 		if (ct->ct_bufferReadPtr >
1606*0Sstevel@tonic-gate 		    ct->ct_buffer + ct->ct_bufferSize) {
1607*0Sstevel@tonic-gate 			ct->ct_bufferReadPtr -= ct->ct_bufferSize;
1608*0Sstevel@tonic-gate 		}
1609*0Sstevel@tonic-gate 	}
1610*0Sstevel@tonic-gate }
1611*0Sstevel@tonic-gate 
1612*0Sstevel@tonic-gate static int
1613*0Sstevel@tonic-gate iovFromBuffer(struct ct_data *ct, struct iovec *iov)
1614*0Sstevel@tonic-gate {
1615*0Sstevel@tonic-gate 	int l;
1616*0Sstevel@tonic-gate 
1617*0Sstevel@tonic-gate 	if (ct->ct_bufferPendingSize == 0)
1618*0Sstevel@tonic-gate 		return (0);
1619*0Sstevel@tonic-gate 
1620*0Sstevel@tonic-gate 	l = REMAIN_BYTES(bufferReadPtr);
1621*0Sstevel@tonic-gate 	if (l < ct->ct_bufferPendingSize) {
1622*0Sstevel@tonic-gate 		/* Buffer in two fragments. */
1623*0Sstevel@tonic-gate 		iov[0].iov_base = ct->ct_bufferReadPtr;
1624*0Sstevel@tonic-gate 		iov[0].iov_len  = l;
1625*0Sstevel@tonic-gate 
1626*0Sstevel@tonic-gate 		iov[1].iov_base = ct->ct_buffer;
1627*0Sstevel@tonic-gate 		iov[1].iov_len  = ct->ct_bufferPendingSize - l;
1628*0Sstevel@tonic-gate 		return (2);
1629*0Sstevel@tonic-gate 	} else {
1630*0Sstevel@tonic-gate 		/* Buffer in one fragment. */
1631*0Sstevel@tonic-gate 		iov[0].iov_base = ct->ct_bufferReadPtr;
1632*0Sstevel@tonic-gate 		iov[0].iov_len  = ct->ct_bufferPendingSize;
1633*0Sstevel@tonic-gate 		return (1);
1634*0Sstevel@tonic-gate 	}
1635*0Sstevel@tonic-gate }
1636*0Sstevel@tonic-gate 
1637*0Sstevel@tonic-gate static bool_t
1638*0Sstevel@tonic-gate set_flush_mode(struct ct_data *ct, int mode)
1639*0Sstevel@tonic-gate {
1640*0Sstevel@tonic-gate 	switch (mode) {
1641*0Sstevel@tonic-gate 	case RPC_CL_BLOCKING_FLUSH:
1642*0Sstevel@tonic-gate 		/* flush as most as possible without blocking */
1643*0Sstevel@tonic-gate 	case RPC_CL_BESTEFFORT_FLUSH:
1644*0Sstevel@tonic-gate 		/* flush the buffer completely (possibly blocking) */
1645*0Sstevel@tonic-gate 	case RPC_CL_DEFAULT_FLUSH:
1646*0Sstevel@tonic-gate 		/* flush according to the currently defined policy */
1647*0Sstevel@tonic-gate 		ct->ct_blocking_mode = mode;
1648*0Sstevel@tonic-gate 		return (TRUE);
1649*0Sstevel@tonic-gate 	default:
1650*0Sstevel@tonic-gate 		return (FALSE);
1651*0Sstevel@tonic-gate 	}
1652*0Sstevel@tonic-gate }
1653*0Sstevel@tonic-gate 
1654*0Sstevel@tonic-gate static bool_t
1655*0Sstevel@tonic-gate set_io_mode(struct ct_data *ct, int ioMode)
1656*0Sstevel@tonic-gate {
1657*0Sstevel@tonic-gate 	switch (ioMode) {
1658*0Sstevel@tonic-gate 	case RPC_CL_BLOCKING:
1659*0Sstevel@tonic-gate 		if (ct->ct_io_mode == RPC_CL_NONBLOCKING) {
1660*0Sstevel@tonic-gate 			if (NULL != ct->ct_buffer) {
1661*0Sstevel@tonic-gate 				/*
1662*0Sstevel@tonic-gate 				 * If a buffer was allocated for this
1663*0Sstevel@tonic-gate 				 * connection, flush it now, and free it.
1664*0Sstevel@tonic-gate 				 */
1665*0Sstevel@tonic-gate 				do_flush(ct, RPC_CL_BLOCKING_FLUSH);
1666*0Sstevel@tonic-gate 				free(ct->ct_buffer);
1667*0Sstevel@tonic-gate 				ct->ct_buffer = NULL;
1668*0Sstevel@tonic-gate 			}
1669*0Sstevel@tonic-gate 			unregister_nb(ct);
1670*0Sstevel@tonic-gate 			ct->ct_io_mode = ioMode;
1671*0Sstevel@tonic-gate 		}
1672*0Sstevel@tonic-gate 		break;
1673*0Sstevel@tonic-gate 	case RPC_CL_NONBLOCKING:
1674*0Sstevel@tonic-gate 		if (ct->ct_io_mode == RPC_CL_BLOCKING) {
1675*0Sstevel@tonic-gate 			if (-1 == register_nb(ct)) {
1676*0Sstevel@tonic-gate 				return (FALSE);
1677*0Sstevel@tonic-gate 			}
1678*0Sstevel@tonic-gate 			ct->ct_io_mode = ioMode;
1679*0Sstevel@tonic-gate 		}
1680*0Sstevel@tonic-gate 		break;
1681*0Sstevel@tonic-gate 	default:
1682*0Sstevel@tonic-gate 		return (FALSE);
1683*0Sstevel@tonic-gate 	}
1684*0Sstevel@tonic-gate 	return (TRUE);
1685*0Sstevel@tonic-gate }
1686*0Sstevel@tonic-gate 
1687*0Sstevel@tonic-gate static int
1688*0Sstevel@tonic-gate do_flush(struct ct_data *ct, uint_t flush_mode)
1689*0Sstevel@tonic-gate {
1690*0Sstevel@tonic-gate 	int result;
1691*0Sstevel@tonic-gate 	if (ct->ct_bufferPendingSize == 0) {
1692*0Sstevel@tonic-gate 		return (0);
1693*0Sstevel@tonic-gate 	}
1694*0Sstevel@tonic-gate 
1695*0Sstevel@tonic-gate 	switch (flush_mode) {
1696*0Sstevel@tonic-gate 	case RPC_CL_BLOCKING_FLUSH:
1697*0Sstevel@tonic-gate 		if (!set_blocking_connection(ct, TRUE)) {
1698*0Sstevel@tonic-gate 			return (-1);
1699*0Sstevel@tonic-gate 		}
1700*0Sstevel@tonic-gate 		while (ct->ct_bufferPendingSize > 0) {
1701*0Sstevel@tonic-gate 			if (REMAIN_BYTES(bufferReadPtr) <
1702*0Sstevel@tonic-gate 			    ct->ct_bufferPendingSize) {
1703*0Sstevel@tonic-gate 				struct iovec iov[2];
1704*0Sstevel@tonic-gate 				iovFromBuffer(ct, iov);
1705*0Sstevel@tonic-gate 				result = writev(ct->ct_fd, iov, 2);
1706*0Sstevel@tonic-gate 			} else {
1707*0Sstevel@tonic-gate 				result = t_snd(ct->ct_fd, ct->ct_bufferReadPtr,
1708*0Sstevel@tonic-gate 				    ct->ct_bufferPendingSize, 0);
1709*0Sstevel@tonic-gate 			}
1710*0Sstevel@tonic-gate 			if (result < 0) {
1711*0Sstevel@tonic-gate 				return (-1);
1712*0Sstevel@tonic-gate 			}
1713*0Sstevel@tonic-gate 			consumeFromBuffer(ct, result);
1714*0Sstevel@tonic-gate 		}
1715*0Sstevel@tonic-gate 
1716*0Sstevel@tonic-gate 		break;
1717*0Sstevel@tonic-gate 
1718*0Sstevel@tonic-gate 	case RPC_CL_BESTEFFORT_FLUSH:
1719*0Sstevel@tonic-gate 		set_blocking_connection(ct, FALSE);
1720*0Sstevel@tonic-gate 		if (REMAIN_BYTES(bufferReadPtr) < ct->ct_bufferPendingSize) {
1721*0Sstevel@tonic-gate 			struct iovec iov[2];
1722*0Sstevel@tonic-gate 			iovFromBuffer(ct, iov);
1723*0Sstevel@tonic-gate 			result = writev(ct->ct_fd, iov, 2);
1724*0Sstevel@tonic-gate 		} else {
1725*0Sstevel@tonic-gate 			result = t_snd(ct->ct_fd, ct->ct_bufferReadPtr,
1726*0Sstevel@tonic-gate 			    ct->ct_bufferPendingSize, 0);
1727*0Sstevel@tonic-gate 		}
1728*0Sstevel@tonic-gate 		if (result < 0) {
1729*0Sstevel@tonic-gate 			if (errno != EWOULDBLOCK) {
1730*0Sstevel@tonic-gate 				perror("flush");
1731*0Sstevel@tonic-gate 				return (-1);
1732*0Sstevel@tonic-gate 			}
1733*0Sstevel@tonic-gate 			return (0);
1734*0Sstevel@tonic-gate 		}
1735*0Sstevel@tonic-gate 		if (result > 0)
1736*0Sstevel@tonic-gate 			consumeFromBuffer(ct, result);
1737*0Sstevel@tonic-gate 		break;
1738*0Sstevel@tonic-gate 	}
1739*0Sstevel@tonic-gate 	return (0);
1740*0Sstevel@tonic-gate }
1741*0Sstevel@tonic-gate 
1742*0Sstevel@tonic-gate /*
1743*0Sstevel@tonic-gate  * Non blocking send.
1744*0Sstevel@tonic-gate  */
1745*0Sstevel@tonic-gate 
1746*0Sstevel@tonic-gate static int
1747*0Sstevel@tonic-gate nb_send(struct ct_data *ct, void *buff, unsigned int nBytes)
1748*0Sstevel@tonic-gate {
1749*0Sstevel@tonic-gate 	int result;
1750*0Sstevel@tonic-gate 
1751*0Sstevel@tonic-gate 	if (!(ntohl(*(uint32_t *)buff) & 2^31)) {
1752*0Sstevel@tonic-gate 		return (-1);
1753*0Sstevel@tonic-gate 	}
1754*0Sstevel@tonic-gate 
1755*0Sstevel@tonic-gate 	/*
1756*0Sstevel@tonic-gate 	 * Check to see if the current message can be stored fully in the
1757*0Sstevel@tonic-gate 	 * buffer. We have to check this now because it may be impossible
1758*0Sstevel@tonic-gate 	 * to send any data, so the message must be stored in the buffer.
1759*0Sstevel@tonic-gate 	 */
1760*0Sstevel@tonic-gate 	if (nBytes > (ct->ct_bufferSize - ct->ct_bufferPendingSize)) {
1761*0Sstevel@tonic-gate 		/* Try to flush  (to free some space). */
1762*0Sstevel@tonic-gate 		do_flush(ct, RPC_CL_BESTEFFORT_FLUSH);
1763*0Sstevel@tonic-gate 
1764*0Sstevel@tonic-gate 		/* Can we store the message now ? */
1765*0Sstevel@tonic-gate 		if (nBytes > (ct->ct_bufferSize - ct->ct_bufferPendingSize))
1766*0Sstevel@tonic-gate 			return (-2);
1767*0Sstevel@tonic-gate 	}
1768*0Sstevel@tonic-gate 
1769*0Sstevel@tonic-gate 	set_blocking_connection(ct, FALSE);
1770*0Sstevel@tonic-gate 
1771*0Sstevel@tonic-gate 	/*
1772*0Sstevel@tonic-gate 	 * If there is no data pending, we can simply try
1773*0Sstevel@tonic-gate 	 * to send our data.
1774*0Sstevel@tonic-gate 	 */
1775*0Sstevel@tonic-gate 	if (ct->ct_bufferPendingSize == 0) {
1776*0Sstevel@tonic-gate 		result = t_snd(ct->ct_fd, buff, nBytes, 0);
1777*0Sstevel@tonic-gate 		if (result == -1) {
1778*0Sstevel@tonic-gate 			if (errno == EWOULDBLOCK) {
1779*0Sstevel@tonic-gate 				result = 0;
1780*0Sstevel@tonic-gate 			} else {
1781*0Sstevel@tonic-gate 				perror("send");
1782*0Sstevel@tonic-gate 				return (-1);
1783*0Sstevel@tonic-gate 			}
1784*0Sstevel@tonic-gate 		}
1785*0Sstevel@tonic-gate 		/*
1786*0Sstevel@tonic-gate 		 * If we have not sent all data, we must store them
1787*0Sstevel@tonic-gate 		 * in the buffer.
1788*0Sstevel@tonic-gate 		 */
1789*0Sstevel@tonic-gate 		if (result != nBytes) {
1790*0Sstevel@tonic-gate 			if (addInBuffer(ct, (char *)buff + result,
1791*0Sstevel@tonic-gate 			    nBytes - result) == -1) {
1792*0Sstevel@tonic-gate 				return (-1);
1793*0Sstevel@tonic-gate 			}
1794*0Sstevel@tonic-gate 		}
1795*0Sstevel@tonic-gate 	} else {
1796*0Sstevel@tonic-gate 		/*
1797*0Sstevel@tonic-gate 		 * Some data pending in the buffer.  We try to send
1798*0Sstevel@tonic-gate 		 * both buffer data and current message in one shot.
1799*0Sstevel@tonic-gate 		 */
1800*0Sstevel@tonic-gate 		struct iovec iov[3];
1801*0Sstevel@tonic-gate 		int i = iovFromBuffer(ct, &iov[0]);
1802*0Sstevel@tonic-gate 
1803*0Sstevel@tonic-gate 		iov[i].iov_base = buff;
1804*0Sstevel@tonic-gate 		iov[i].iov_len  = nBytes;
1805*0Sstevel@tonic-gate 
1806*0Sstevel@tonic-gate 		result = writev(ct->ct_fd, iov, i+1);
1807*0Sstevel@tonic-gate 		if (result == -1) {
1808*0Sstevel@tonic-gate 			if (errno == EWOULDBLOCK) {
1809*0Sstevel@tonic-gate 				/* No bytes sent */
1810*0Sstevel@tonic-gate 				result = 0;
1811*0Sstevel@tonic-gate 			} else {
1812*0Sstevel@tonic-gate 				return (-1);
1813*0Sstevel@tonic-gate 			}
1814*0Sstevel@tonic-gate 		}
1815*0Sstevel@tonic-gate 
1816*0Sstevel@tonic-gate 		/*
1817*0Sstevel@tonic-gate 		 * Add the bytes from the message
1818*0Sstevel@tonic-gate 		 * that we have not sent.
1819*0Sstevel@tonic-gate 		 */
1820*0Sstevel@tonic-gate 		if (result <= ct->ct_bufferPendingSize) {
1821*0Sstevel@tonic-gate 			/* No bytes from the message sent */
1822*0Sstevel@tonic-gate 			consumeFromBuffer(ct, result);
1823*0Sstevel@tonic-gate 			if (addInBuffer(ct, buff, nBytes) == -1) {
1824*0Sstevel@tonic-gate 				return (-1);
1825*0Sstevel@tonic-gate 			}
1826*0Sstevel@tonic-gate 		} else {
1827*0Sstevel@tonic-gate 			/*
1828*0Sstevel@tonic-gate 			 * Some bytes of the message are sent.
1829*0Sstevel@tonic-gate 			 * Compute the length of the message that has
1830*0Sstevel@tonic-gate 			 * been sent.
1831*0Sstevel@tonic-gate 			 */
1832*0Sstevel@tonic-gate 			int len = result - ct->ct_bufferPendingSize;
1833*0Sstevel@tonic-gate 
1834*0Sstevel@tonic-gate 			/* So, empty the buffer. */
1835*0Sstevel@tonic-gate 			ct->ct_bufferReadPtr = ct->ct_buffer;
1836*0Sstevel@tonic-gate 			ct->ct_bufferWritePtr = ct->ct_buffer;
1837*0Sstevel@tonic-gate 			ct->ct_bufferPendingSize = 0;
1838*0Sstevel@tonic-gate 
1839*0Sstevel@tonic-gate 			/* And add the remaining part of the message. */
1840*0Sstevel@tonic-gate 			if (len != nBytes) {
1841*0Sstevel@tonic-gate 				if (addInBuffer(ct, (char *)buff + len,
1842*0Sstevel@tonic-gate 					nBytes-len) == -1) {
1843*0Sstevel@tonic-gate 					return (-1);
1844*0Sstevel@tonic-gate 				}
1845*0Sstevel@tonic-gate 			}
1846*0Sstevel@tonic-gate 		}
1847*0Sstevel@tonic-gate 	}
1848*0Sstevel@tonic-gate 	return (nBytes);
1849*0Sstevel@tonic-gate }
1850*0Sstevel@tonic-gate 
1851*0Sstevel@tonic-gate static void
1852*0Sstevel@tonic-gate flush_registered_clients()
1853*0Sstevel@tonic-gate {
1854*0Sstevel@tonic-gate 	struct nb_reg_node *node;
1855*0Sstevel@tonic-gate 
1856*0Sstevel@tonic-gate 	if (LIST_ISEMPTY(nb_first)) {
1857*0Sstevel@tonic-gate 		return;
1858*0Sstevel@tonic-gate 	}
1859*0Sstevel@tonic-gate 
1860*0Sstevel@tonic-gate 	LIST_FOR_EACH(nb_first, node) {
1861*0Sstevel@tonic-gate 		do_flush(node->ct, RPC_CL_BLOCKING_FLUSH);
1862*0Sstevel@tonic-gate 	}
1863*0Sstevel@tonic-gate }
1864*0Sstevel@tonic-gate 
1865*0Sstevel@tonic-gate static int
1866*0Sstevel@tonic-gate allocate_chunk()
1867*0Sstevel@tonic-gate {
1868*0Sstevel@tonic-gate #define	CHUNK_SIZE 16
1869*0Sstevel@tonic-gate 	struct nb_reg_node *chk = (struct nb_reg_node *)
1870*0Sstevel@tonic-gate 	    malloc(sizeof (struct nb_reg_node) * CHUNK_SIZE);
1871*0Sstevel@tonic-gate 	struct nb_reg_node *n;
1872*0Sstevel@tonic-gate 	int i;
1873*0Sstevel@tonic-gate 
1874*0Sstevel@tonic-gate 	if (NULL == chk) {
1875*0Sstevel@tonic-gate 		return (-1);
1876*0Sstevel@tonic-gate 	}
1877*0Sstevel@tonic-gate 
1878*0Sstevel@tonic-gate 	n = chk;
1879*0Sstevel@tonic-gate 	for (i = 0; i < CHUNK_SIZE-1; ++i) {
1880*0Sstevel@tonic-gate 		n[i].next = &(n[i+1]);
1881*0Sstevel@tonic-gate 	}
1882*0Sstevel@tonic-gate 	n[CHUNK_SIZE-1].next = (struct nb_reg_node *)&nb_free;
1883*0Sstevel@tonic-gate 	nb_free = chk;
1884*0Sstevel@tonic-gate 	return (0);
1885*0Sstevel@tonic-gate }
1886*0Sstevel@tonic-gate 
1887*0Sstevel@tonic-gate static int
1888*0Sstevel@tonic-gate register_nb(struct ct_data *ct)
1889*0Sstevel@tonic-gate {
1890*0Sstevel@tonic-gate 	struct nb_reg_node *node;
1891*0Sstevel@tonic-gate 
1892*0Sstevel@tonic-gate 	mutex_lock(&nb_list_mutex);
1893*0Sstevel@tonic-gate 
1894*0Sstevel@tonic-gate 	if (LIST_ISEMPTY(nb_free) && (allocate_chunk() == -1)) {
1895*0Sstevel@tonic-gate 		mutex_unlock(&nb_list_mutex);
1896*0Sstevel@tonic-gate 		errno = ENOMEM;
1897*0Sstevel@tonic-gate 		return (-1);
1898*0Sstevel@tonic-gate 	}
1899*0Sstevel@tonic-gate 
1900*0Sstevel@tonic-gate 	if (!exit_handler_set) {
1901*0Sstevel@tonic-gate 		atexit(flush_registered_clients);
1902*0Sstevel@tonic-gate 		exit_handler_set = TRUE;
1903*0Sstevel@tonic-gate 	}
1904*0Sstevel@tonic-gate 	/* Get the first free node */
1905*0Sstevel@tonic-gate 	LIST_EXTRACT(nb_free, node);
1906*0Sstevel@tonic-gate 
1907*0Sstevel@tonic-gate 	node->ct = ct;
1908*0Sstevel@tonic-gate 
1909*0Sstevel@tonic-gate 	LIST_ADD(nb_first, node);
1910*0Sstevel@tonic-gate 	mutex_unlock(&nb_list_mutex);
1911*0Sstevel@tonic-gate 
1912*0Sstevel@tonic-gate 	return (0);
1913*0Sstevel@tonic-gate }
1914*0Sstevel@tonic-gate 
1915*0Sstevel@tonic-gate static int
1916*0Sstevel@tonic-gate unregister_nb(struct ct_data *ct)
1917*0Sstevel@tonic-gate {
1918*0Sstevel@tonic-gate 	struct nb_reg_node *node;
1919*0Sstevel@tonic-gate 
1920*0Sstevel@tonic-gate 	mutex_lock(&nb_list_mutex);
1921*0Sstevel@tonic-gate 	assert(! LIST_ISEMPTY(nb_first));
1922*0Sstevel@tonic-gate 
1923*0Sstevel@tonic-gate 	node = nb_first;
1924*0Sstevel@tonic-gate 	LIST_FOR_EACH(nb_first, node) {
1925*0Sstevel@tonic-gate 		if (node->next->ct == ct) {
1926*0Sstevel@tonic-gate 			/* Get the node to unregister. */
1927*0Sstevel@tonic-gate 			struct nb_reg_node *n = node->next;
1928*0Sstevel@tonic-gate 			node->next = n->next;
1929*0Sstevel@tonic-gate 
1930*0Sstevel@tonic-gate 			n->ct = NULL;
1931*0Sstevel@tonic-gate 			LIST_ADD(nb_free, n);
1932*0Sstevel@tonic-gate 			break;
1933*0Sstevel@tonic-gate 		}
1934*0Sstevel@tonic-gate 	}
1935*0Sstevel@tonic-gate 	mutex_unlock(&nb_list_mutex);
1936*0Sstevel@tonic-gate 	return (0);
1937*0Sstevel@tonic-gate }
1938