10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
52917Sevanl * Common Development and Distribution License (the "License").
62917Sevanl * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
21132Srobinson
220Sstevel@tonic-gate /*
233864Sraf * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
240Sstevel@tonic-gate * Use is subject to license terms.
250Sstevel@tonic-gate */
263864Sraf
270Sstevel@tonic-gate /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
280Sstevel@tonic-gate /* All Rights Reserved */
290Sstevel@tonic-gate /*
300Sstevel@tonic-gate * Portions of this source code were derived from Berkeley
310Sstevel@tonic-gate * 4.3 BSD under license from the Regents of the University of
320Sstevel@tonic-gate * California.
330Sstevel@tonic-gate */
340Sstevel@tonic-gate
350Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI"
360Sstevel@tonic-gate
370Sstevel@tonic-gate /*
380Sstevel@tonic-gate * clnt_vc.c
390Sstevel@tonic-gate *
400Sstevel@tonic-gate * Implements a connectionful client side RPC.
410Sstevel@tonic-gate *
420Sstevel@tonic-gate * Connectionful RPC supports 'batched calls'.
430Sstevel@tonic-gate * A sequence of calls may be batched-up in a send buffer. The rpc call
440Sstevel@tonic-gate * return immediately to the client even though the call was not necessarily
450Sstevel@tonic-gate * sent. The batching occurs if the results' xdr routine is NULL (0) AND
460Sstevel@tonic-gate * the rpc timeout value is zero (see clnt.h, rpc).
470Sstevel@tonic-gate *
480Sstevel@tonic-gate * Clients should NOT casually batch calls that in fact return results; that
490Sstevel@tonic-gate * is the server side should be aware that a call is batched and not produce
500Sstevel@tonic-gate * any return message. Batched calls that produce many result messages can
510Sstevel@tonic-gate * deadlock (netlock) the client and the server....
520Sstevel@tonic-gate */
530Sstevel@tonic-gate
540Sstevel@tonic-gate
550Sstevel@tonic-gate #include "mt.h"
560Sstevel@tonic-gate #include "rpc_mt.h"
570Sstevel@tonic-gate #include <assert.h>
580Sstevel@tonic-gate #include <rpc/rpc.h>
590Sstevel@tonic-gate #include <errno.h>
600Sstevel@tonic-gate #include <sys/byteorder.h>
610Sstevel@tonic-gate #include <sys/mkdev.h>
620Sstevel@tonic-gate #include <sys/poll.h>
630Sstevel@tonic-gate #include <syslog.h>
640Sstevel@tonic-gate #include <stdlib.h>
650Sstevel@tonic-gate #include <unistd.h>
660Sstevel@tonic-gate #include <netinet/tcp.h>
670Sstevel@tonic-gate
680Sstevel@tonic-gate #define MCALL_MSG_SIZE 24
690Sstevel@tonic-gate #define SECS_TO_MS 1000
700Sstevel@tonic-gate #define USECS_TO_MS 1/1000
710Sstevel@tonic-gate #ifndef MIN
720Sstevel@tonic-gate #define MIN(a, b) (((a) < (b)) ? (a) : (b))
730Sstevel@tonic-gate #endif
740Sstevel@tonic-gate
75132Srobinson extern int __rpc_timeval_to_msec(struct timeval *);
760Sstevel@tonic-gate extern int __rpc_compress_pollfd(int, pollfd_t *, pollfd_t *);
77132Srobinson extern bool_t xdr_opaque_auth(XDR *, struct opaque_auth *);
78132Srobinson extern bool_t __rpc_gss_wrap(AUTH *, char *, uint_t, XDR *, bool_t (*)(),
79132Srobinson caddr_t);
80132Srobinson extern bool_t __rpc_gss_unwrap(AUTH *, XDR *, bool_t (*)(), caddr_t);
81132Srobinson extern CLIENT *_clnt_vc_create_timed(int, struct netbuf *, rpcprog_t,
820Sstevel@tonic-gate rpcvers_t, uint_t, uint_t, const struct timeval *);
830Sstevel@tonic-gate
84132Srobinson static struct clnt_ops *clnt_vc_ops(void);
850Sstevel@tonic-gate static int read_vc(void *, caddr_t, int);
860Sstevel@tonic-gate static int write_vc(void *, caddr_t, int);
87132Srobinson static int t_rcvall(int, char *, int);
88132Srobinson static bool_t time_not_ok(struct timeval *);
890Sstevel@tonic-gate
900Sstevel@tonic-gate struct ct_data;
91132Srobinson static bool_t set_up_connection(int, struct netbuf *,
92132Srobinson struct ct_data *, const struct timeval *);
93132Srobinson static bool_t set_io_mode(struct ct_data *, int);
940Sstevel@tonic-gate
950Sstevel@tonic-gate /*
960Sstevel@tonic-gate * Lock table handle used by various MT sync. routines
970Sstevel@tonic-gate */
980Sstevel@tonic-gate static mutex_t vctbl_lock = DEFAULTMUTEX;
990Sstevel@tonic-gate static void *vctbl = NULL;
1000Sstevel@tonic-gate
1010Sstevel@tonic-gate static const char clnt_vc_errstr[] = "%s : %s";
1020Sstevel@tonic-gate static const char clnt_vc_str[] = "clnt_vc_create";
1030Sstevel@tonic-gate static const char clnt_read_vc_str[] = "read_vc";
1040Sstevel@tonic-gate static const char __no_mem_str[] = "out of memory";
1050Sstevel@tonic-gate static const char no_fcntl_getfl_str[] = "could not get status flags and modes";
1060Sstevel@tonic-gate static const char no_nonblock_str[] = "could not set transport blocking mode";
1070Sstevel@tonic-gate
1080Sstevel@tonic-gate /*
1090Sstevel@tonic-gate * Private data structure
1100Sstevel@tonic-gate */
1110Sstevel@tonic-gate struct ct_data {
1120Sstevel@tonic-gate int ct_fd; /* connection's fd */
1130Sstevel@tonic-gate bool_t ct_closeit; /* close it on destroy */
1140Sstevel@tonic-gate int ct_tsdu; /* size of tsdu */
1150Sstevel@tonic-gate int ct_wait; /* wait interval in milliseconds */
1160Sstevel@tonic-gate bool_t ct_waitset; /* wait set by clnt_control? */
1170Sstevel@tonic-gate struct netbuf ct_addr; /* remote addr */
1180Sstevel@tonic-gate struct rpc_err ct_error;
1190Sstevel@tonic-gate char ct_mcall[MCALL_MSG_SIZE]; /* marshalled callmsg */
1200Sstevel@tonic-gate uint_t ct_mpos; /* pos after marshal */
1210Sstevel@tonic-gate XDR ct_xdrs; /* XDR stream */
1220Sstevel@tonic-gate
1230Sstevel@tonic-gate /* NON STANDARD INFO - 00-08-31 */
1240Sstevel@tonic-gate bool_t ct_is_oneway; /* True if the current call is oneway. */
1250Sstevel@tonic-gate bool_t ct_is_blocking;
1260Sstevel@tonic-gate ushort_t ct_io_mode;
1270Sstevel@tonic-gate ushort_t ct_blocking_mode;
1280Sstevel@tonic-gate uint_t ct_bufferSize; /* Total size of the buffer. */
1290Sstevel@tonic-gate uint_t ct_bufferPendingSize; /* Size of unsent data. */
1300Sstevel@tonic-gate char *ct_buffer; /* Pointer to the buffer. */
1310Sstevel@tonic-gate char *ct_bufferWritePtr; /* Ptr to the first free byte. */
1320Sstevel@tonic-gate char *ct_bufferReadPtr; /* Ptr to the first byte of data. */
1330Sstevel@tonic-gate };
1340Sstevel@tonic-gate
1350Sstevel@tonic-gate struct nb_reg_node {
1360Sstevel@tonic-gate struct nb_reg_node *next;
1370Sstevel@tonic-gate struct ct_data *ct;
1380Sstevel@tonic-gate };
1390Sstevel@tonic-gate
1400Sstevel@tonic-gate static struct nb_reg_node *nb_first = (struct nb_reg_node *)&nb_first;
1410Sstevel@tonic-gate static struct nb_reg_node *nb_free = (struct nb_reg_node *)&nb_free;
1420Sstevel@tonic-gate
1430Sstevel@tonic-gate static bool_t exit_handler_set = FALSE;
1440Sstevel@tonic-gate
1450Sstevel@tonic-gate static mutex_t nb_list_mutex = DEFAULTMUTEX;
1460Sstevel@tonic-gate
1470Sstevel@tonic-gate
1480Sstevel@tonic-gate /* Define some macros to manage the linked list. */
1490Sstevel@tonic-gate #define LIST_ISEMPTY(l) (l == (struct nb_reg_node *)&l)
1500Sstevel@tonic-gate #define LIST_CLR(l) (l = (struct nb_reg_node *)&l)
1510Sstevel@tonic-gate #define LIST_ADD(l, node) (node->next = l->next, l = node)
1520Sstevel@tonic-gate #define LIST_EXTRACT(l, node) (node = l, l = l->next)
1530Sstevel@tonic-gate #define LIST_FOR_EACH(l, node) \
1540Sstevel@tonic-gate for (node = l; node != (struct nb_reg_node *)&l; node = node->next)
1550Sstevel@tonic-gate
1560Sstevel@tonic-gate
1570Sstevel@tonic-gate /* Default size of the IO buffer used in non blocking mode */
1580Sstevel@tonic-gate #define DEFAULT_PENDING_ZONE_MAX_SIZE (16*1024)
1590Sstevel@tonic-gate
160132Srobinson static int nb_send(struct ct_data *, void *, unsigned int);
161132Srobinson static int do_flush(struct ct_data *, uint_t);
162132Srobinson static bool_t set_flush_mode(struct ct_data *, int);
163132Srobinson static bool_t set_blocking_connection(struct ct_data *, bool_t);
1640Sstevel@tonic-gate
165132Srobinson static int register_nb(struct ct_data *);
166132Srobinson static int unregister_nb(struct ct_data *);
1670Sstevel@tonic-gate
1680Sstevel@tonic-gate
1690Sstevel@tonic-gate /*
1700Sstevel@tonic-gate * Change the mode of the underlying fd.
1710Sstevel@tonic-gate */
1720Sstevel@tonic-gate static bool_t
set_blocking_connection(struct ct_data * ct,bool_t blocking)1730Sstevel@tonic-gate set_blocking_connection(struct ct_data *ct, bool_t blocking)
1740Sstevel@tonic-gate {
1750Sstevel@tonic-gate int flag;
1760Sstevel@tonic-gate
1770Sstevel@tonic-gate /*
1780Sstevel@tonic-gate * If the underlying fd is already in the required mode,
1790Sstevel@tonic-gate * avoid the syscall.
1800Sstevel@tonic-gate */
1810Sstevel@tonic-gate if (ct->ct_is_blocking == blocking)
1820Sstevel@tonic-gate return (TRUE);
1830Sstevel@tonic-gate
1840Sstevel@tonic-gate if ((flag = fcntl(ct->ct_fd, F_GETFL, 0)) < 0) {
1850Sstevel@tonic-gate (void) syslog(LOG_ERR, "set_blocking_connection : %s",
1860Sstevel@tonic-gate no_fcntl_getfl_str);
1870Sstevel@tonic-gate return (FALSE);
1880Sstevel@tonic-gate }
1890Sstevel@tonic-gate
1900Sstevel@tonic-gate flag = blocking? flag&~O_NONBLOCK : flag|O_NONBLOCK;
1910Sstevel@tonic-gate if (fcntl(ct->ct_fd, F_SETFL, flag) != 0) {
1920Sstevel@tonic-gate (void) syslog(LOG_ERR, "set_blocking_connection : %s",
1930Sstevel@tonic-gate no_nonblock_str);
1940Sstevel@tonic-gate return (FALSE);
1950Sstevel@tonic-gate }
1960Sstevel@tonic-gate ct->ct_is_blocking = blocking;
1970Sstevel@tonic-gate return (TRUE);
1980Sstevel@tonic-gate }
1990Sstevel@tonic-gate
2000Sstevel@tonic-gate /*
2010Sstevel@tonic-gate * Create a client handle for a connection.
2020Sstevel@tonic-gate * Default options are set, which the user can change using clnt_control()'s.
2030Sstevel@tonic-gate * The rpc/vc package does buffering similar to stdio, so the client
2040Sstevel@tonic-gate * must pick send and receive buffer sizes, 0 => use the default.
2050Sstevel@tonic-gate * NB: fd is copied into a private area.
2060Sstevel@tonic-gate * NB: The rpch->cl_auth is set null authentication. Caller may wish to
2070Sstevel@tonic-gate * set this something more useful.
2080Sstevel@tonic-gate *
2090Sstevel@tonic-gate * fd should be open and bound.
2100Sstevel@tonic-gate */
2110Sstevel@tonic-gate CLIENT *
clnt_vc_create(const int fd,struct netbuf * svcaddr,const rpcprog_t prog,const rpcvers_t vers,const uint_t sendsz,const uint_t recvsz)212132Srobinson clnt_vc_create(const int fd, struct netbuf *svcaddr, const rpcprog_t prog,
213132Srobinson const rpcvers_t vers, const uint_t sendsz, const uint_t recvsz)
2140Sstevel@tonic-gate {
2150Sstevel@tonic-gate return (_clnt_vc_create_timed(fd, svcaddr, prog, vers, sendsz,
2160Sstevel@tonic-gate recvsz, NULL));
2170Sstevel@tonic-gate }
2180Sstevel@tonic-gate
2190Sstevel@tonic-gate /*
2200Sstevel@tonic-gate * This has the same definition as clnt_vc_create(), except it
2210Sstevel@tonic-gate * takes an additional parameter - a pointer to a timeval structure.
2220Sstevel@tonic-gate *
2230Sstevel@tonic-gate * Not a public interface. This is for clnt_create_timed,
2240Sstevel@tonic-gate * clnt_create_vers_timed, clnt_tp_create_timed to pass down the timeout
2250Sstevel@tonic-gate * value to control a tcp connection attempt.
2260Sstevel@tonic-gate * (for bug 4049792: clnt_create_timed does not time out)
2270Sstevel@tonic-gate *
2280Sstevel@tonic-gate * If tp is NULL, use default timeout to set up the connection.
2290Sstevel@tonic-gate */
2300Sstevel@tonic-gate CLIENT *
_clnt_vc_create_timed(int fd,struct netbuf * svcaddr,rpcprog_t prog,rpcvers_t vers,uint_t sendsz,uint_t recvsz,const struct timeval * tp)231132Srobinson _clnt_vc_create_timed(int fd, struct netbuf *svcaddr, rpcprog_t prog,
232132Srobinson rpcvers_t vers, uint_t sendsz, uint_t recvsz, const struct timeval *tp)
2330Sstevel@tonic-gate {
2340Sstevel@tonic-gate CLIENT *cl; /* client handle */
2350Sstevel@tonic-gate struct ct_data *ct; /* private data */
2360Sstevel@tonic-gate struct timeval now;
2370Sstevel@tonic-gate struct rpc_msg call_msg;
2380Sstevel@tonic-gate struct t_info tinfo;
2390Sstevel@tonic-gate int flag;
2400Sstevel@tonic-gate
241132Srobinson cl = malloc(sizeof (*cl));
242132Srobinson ct = malloc(sizeof (*ct));
243132Srobinson if ((cl == NULL) || (ct == NULL)) {
2440Sstevel@tonic-gate (void) syslog(LOG_ERR, clnt_vc_errstr,
2450Sstevel@tonic-gate clnt_vc_str, __no_mem_str);
2460Sstevel@tonic-gate rpc_createerr.cf_stat = RPC_SYSTEMERROR;
2470Sstevel@tonic-gate rpc_createerr.cf_error.re_errno = errno;
2480Sstevel@tonic-gate rpc_createerr.cf_error.re_terrno = 0;
2490Sstevel@tonic-gate goto err;
2500Sstevel@tonic-gate }
2510Sstevel@tonic-gate ct->ct_addr.buf = NULL;
2520Sstevel@tonic-gate
2532917Sevanl /*
2542917Sevanl * The only use of vctbl_lock is for serializing the creation of
2552917Sevanl * vctbl. Once created the lock needs to be released so we don't
2562917Sevanl * hold it across the set_up_connection() call and end up with a
2572917Sevanl * bunch of threads stuck waiting for the mutex.
2582917Sevanl */
2590Sstevel@tonic-gate sig_mutex_lock(&vctbl_lock);
2600Sstevel@tonic-gate
2610Sstevel@tonic-gate if ((vctbl == NULL) && ((vctbl = rpc_fd_init()) == NULL)) {
2620Sstevel@tonic-gate rpc_createerr.cf_stat = RPC_SYSTEMERROR;
2630Sstevel@tonic-gate rpc_createerr.cf_error.re_errno = errno;
2640Sstevel@tonic-gate rpc_createerr.cf_error.re_terrno = 0;
2650Sstevel@tonic-gate sig_mutex_unlock(&vctbl_lock);
2660Sstevel@tonic-gate goto err;
2670Sstevel@tonic-gate }
2680Sstevel@tonic-gate
2692917Sevanl sig_mutex_unlock(&vctbl_lock);
2702917Sevanl
2710Sstevel@tonic-gate ct->ct_io_mode = RPC_CL_BLOCKING;
2720Sstevel@tonic-gate ct->ct_blocking_mode = RPC_CL_BLOCKING_FLUSH;
2730Sstevel@tonic-gate
2740Sstevel@tonic-gate ct->ct_buffer = NULL; /* We allocate the buffer when needed. */
2750Sstevel@tonic-gate ct->ct_bufferSize = DEFAULT_PENDING_ZONE_MAX_SIZE;
2760Sstevel@tonic-gate ct->ct_bufferPendingSize = 0;
2770Sstevel@tonic-gate ct->ct_bufferWritePtr = NULL;
2780Sstevel@tonic-gate ct->ct_bufferReadPtr = NULL;
2790Sstevel@tonic-gate
2800Sstevel@tonic-gate /* Check the current state of the fd. */
2810Sstevel@tonic-gate if ((flag = fcntl(fd, F_GETFL, 0)) < 0) {
2820Sstevel@tonic-gate (void) syslog(LOG_ERR, "_clnt_vc_create_timed : %s",
2830Sstevel@tonic-gate no_fcntl_getfl_str);
2840Sstevel@tonic-gate rpc_createerr.cf_stat = RPC_SYSTEMERROR;
2850Sstevel@tonic-gate rpc_createerr.cf_error.re_terrno = errno;
2860Sstevel@tonic-gate rpc_createerr.cf_error.re_errno = 0;
2870Sstevel@tonic-gate goto err;
2880Sstevel@tonic-gate }
289132Srobinson ct->ct_is_blocking = flag & O_NONBLOCK ? FALSE : TRUE;
2900Sstevel@tonic-gate
2910Sstevel@tonic-gate if (set_up_connection(fd, svcaddr, ct, tp) == FALSE) {
2920Sstevel@tonic-gate goto err;
2930Sstevel@tonic-gate }
2940Sstevel@tonic-gate
2950Sstevel@tonic-gate /*
2960Sstevel@tonic-gate * Set up other members of private data struct
2970Sstevel@tonic-gate */
2980Sstevel@tonic-gate ct->ct_fd = fd;
2990Sstevel@tonic-gate /*
3000Sstevel@tonic-gate * The actual value will be set by clnt_call or clnt_control
3010Sstevel@tonic-gate */
3020Sstevel@tonic-gate ct->ct_wait = 30000;
3030Sstevel@tonic-gate ct->ct_waitset = FALSE;
3040Sstevel@tonic-gate /*
3050Sstevel@tonic-gate * By default, closeit is always FALSE. It is users responsibility
3060Sstevel@tonic-gate * to do a t_close on it, else the user may use clnt_control
3070Sstevel@tonic-gate * to let clnt_destroy do it for him/her.
3080Sstevel@tonic-gate */
3090Sstevel@tonic-gate ct->ct_closeit = FALSE;
3100Sstevel@tonic-gate
3110Sstevel@tonic-gate /*
3120Sstevel@tonic-gate * Initialize call message
3130Sstevel@tonic-gate */
3140Sstevel@tonic-gate (void) gettimeofday(&now, (struct timezone *)0);
3150Sstevel@tonic-gate call_msg.rm_xid = getpid() ^ now.tv_sec ^ now.tv_usec;
3160Sstevel@tonic-gate call_msg.rm_call.cb_prog = prog;
3170Sstevel@tonic-gate call_msg.rm_call.cb_vers = vers;
3180Sstevel@tonic-gate
3190Sstevel@tonic-gate /*
3200Sstevel@tonic-gate * pre-serialize the static part of the call msg and stash it away
3210Sstevel@tonic-gate */
3220Sstevel@tonic-gate xdrmem_create(&(ct->ct_xdrs), ct->ct_mcall, MCALL_MSG_SIZE, XDR_ENCODE);
323132Srobinson if (!xdr_callhdr(&(ct->ct_xdrs), &call_msg)) {
3240Sstevel@tonic-gate goto err;
3250Sstevel@tonic-gate }
3260Sstevel@tonic-gate ct->ct_mpos = XDR_GETPOS(&(ct->ct_xdrs));
3270Sstevel@tonic-gate XDR_DESTROY(&(ct->ct_xdrs));
3280Sstevel@tonic-gate
3290Sstevel@tonic-gate if (t_getinfo(fd, &tinfo) == -1) {
3300Sstevel@tonic-gate rpc_createerr.cf_stat = RPC_TLIERROR;
3310Sstevel@tonic-gate rpc_createerr.cf_error.re_terrno = t_errno;
3320Sstevel@tonic-gate rpc_createerr.cf_error.re_errno = 0;
3330Sstevel@tonic-gate goto err;
3340Sstevel@tonic-gate }
3350Sstevel@tonic-gate /*
3360Sstevel@tonic-gate * Find the receive and the send size
3370Sstevel@tonic-gate */
3380Sstevel@tonic-gate sendsz = __rpc_get_t_size((int)sendsz, tinfo.tsdu);
3390Sstevel@tonic-gate recvsz = __rpc_get_t_size((int)recvsz, tinfo.tsdu);
3400Sstevel@tonic-gate if ((sendsz == 0) || (recvsz == 0)) {
3410Sstevel@tonic-gate rpc_createerr.cf_stat = RPC_TLIERROR;
3420Sstevel@tonic-gate rpc_createerr.cf_error.re_terrno = 0;
3430Sstevel@tonic-gate rpc_createerr.cf_error.re_errno = 0;
3440Sstevel@tonic-gate goto err;
3450Sstevel@tonic-gate }
3460Sstevel@tonic-gate ct->ct_tsdu = tinfo.tsdu;
3470Sstevel@tonic-gate /*
3480Sstevel@tonic-gate * Create a client handle which uses xdrrec for serialization
3490Sstevel@tonic-gate * and authnone for authentication.
3500Sstevel@tonic-gate */
3510Sstevel@tonic-gate ct->ct_xdrs.x_ops = NULL;
3520Sstevel@tonic-gate xdrrec_create(&(ct->ct_xdrs), sendsz, recvsz, (caddr_t)ct,
3530Sstevel@tonic-gate read_vc, write_vc);
3540Sstevel@tonic-gate if (ct->ct_xdrs.x_ops == NULL) {
3550Sstevel@tonic-gate rpc_createerr.cf_stat = RPC_SYSTEMERROR;
3560Sstevel@tonic-gate rpc_createerr.cf_error.re_terrno = 0;
3570Sstevel@tonic-gate rpc_createerr.cf_error.re_errno = ENOMEM;
3580Sstevel@tonic-gate goto err;
3590Sstevel@tonic-gate }
3600Sstevel@tonic-gate cl->cl_ops = clnt_vc_ops();
3610Sstevel@tonic-gate cl->cl_private = (caddr_t)ct;
3620Sstevel@tonic-gate cl->cl_auth = authnone_create();
363132Srobinson cl->cl_tp = NULL;
364132Srobinson cl->cl_netid = NULL;
3650Sstevel@tonic-gate return (cl);
3660Sstevel@tonic-gate
3670Sstevel@tonic-gate err:
3680Sstevel@tonic-gate if (cl) {
3690Sstevel@tonic-gate if (ct) {
3700Sstevel@tonic-gate if (ct->ct_addr.len)
371132Srobinson free(ct->ct_addr.buf);
372132Srobinson free(ct);
3730Sstevel@tonic-gate }
374132Srobinson free(cl);
3750Sstevel@tonic-gate }
376132Srobinson return (NULL);
3770Sstevel@tonic-gate }
3780Sstevel@tonic-gate
3790Sstevel@tonic-gate #define TCPOPT_BUFSIZE 128
3800Sstevel@tonic-gate
3810Sstevel@tonic-gate /*
3820Sstevel@tonic-gate * Set tcp connection timeout value.
3830Sstevel@tonic-gate * Retun 0 for success, -1 for failure.
3840Sstevel@tonic-gate */
3850Sstevel@tonic-gate static int
_set_tcp_conntime(int fd,int optval)3860Sstevel@tonic-gate _set_tcp_conntime(int fd, int optval)
3870Sstevel@tonic-gate {
3880Sstevel@tonic-gate struct t_optmgmt req, res;
3890Sstevel@tonic-gate struct opthdr *opt;
3900Sstevel@tonic-gate int *ip;
3910Sstevel@tonic-gate char buf[TCPOPT_BUFSIZE];
3920Sstevel@tonic-gate
393132Srobinson /* LINTED pointer cast */
3940Sstevel@tonic-gate opt = (struct opthdr *)buf;
3950Sstevel@tonic-gate opt->level = IPPROTO_TCP;
3960Sstevel@tonic-gate opt->name = TCP_CONN_ABORT_THRESHOLD;
3970Sstevel@tonic-gate opt->len = sizeof (int);
3980Sstevel@tonic-gate
3990Sstevel@tonic-gate req.flags = T_NEGOTIATE;
4000Sstevel@tonic-gate req.opt.len = sizeof (struct opthdr) + opt->len;
4010Sstevel@tonic-gate req.opt.buf = (char *)opt;
402132Srobinson /* LINTED pointer cast */
4030Sstevel@tonic-gate ip = (int *)((char *)buf + sizeof (struct opthdr));
4040Sstevel@tonic-gate *ip = optval;
4050Sstevel@tonic-gate
4060Sstevel@tonic-gate res.flags = 0;
4070Sstevel@tonic-gate res.opt.buf = (char *)buf;
4080Sstevel@tonic-gate res.opt.maxlen = sizeof (buf);
4090Sstevel@tonic-gate if (t_optmgmt(fd, &req, &res) < 0 || res.flags != T_SUCCESS) {
4100Sstevel@tonic-gate return (-1);
4110Sstevel@tonic-gate }
4120Sstevel@tonic-gate return (0);
4130Sstevel@tonic-gate }
4140Sstevel@tonic-gate
4150Sstevel@tonic-gate /*
4160Sstevel@tonic-gate * Get current tcp connection timeout value.
4170Sstevel@tonic-gate * Retun 0 for success, -1 for failure.
4180Sstevel@tonic-gate */
4190Sstevel@tonic-gate static int
_get_tcp_conntime(int fd)4200Sstevel@tonic-gate _get_tcp_conntime(int fd)
4210Sstevel@tonic-gate {
4220Sstevel@tonic-gate struct t_optmgmt req, res;
4230Sstevel@tonic-gate struct opthdr *opt;
4240Sstevel@tonic-gate int *ip, retval;
4250Sstevel@tonic-gate char buf[TCPOPT_BUFSIZE];
4260Sstevel@tonic-gate
427132Srobinson /* LINTED pointer cast */
4280Sstevel@tonic-gate opt = (struct opthdr *)buf;
4290Sstevel@tonic-gate opt->level = IPPROTO_TCP;
4300Sstevel@tonic-gate opt->name = TCP_CONN_ABORT_THRESHOLD;
4310Sstevel@tonic-gate opt->len = sizeof (int);
4320Sstevel@tonic-gate
4330Sstevel@tonic-gate req.flags = T_CURRENT;
4340Sstevel@tonic-gate req.opt.len = sizeof (struct opthdr) + opt->len;
4350Sstevel@tonic-gate req.opt.buf = (char *)opt;
436132Srobinson /* LINTED pointer cast */
4370Sstevel@tonic-gate ip = (int *)((char *)buf + sizeof (struct opthdr));
4380Sstevel@tonic-gate *ip = 0;
4390Sstevel@tonic-gate
4400Sstevel@tonic-gate res.flags = 0;
4410Sstevel@tonic-gate res.opt.buf = (char *)buf;
4420Sstevel@tonic-gate res.opt.maxlen = sizeof (buf);
4430Sstevel@tonic-gate if (t_optmgmt(fd, &req, &res) < 0 || res.flags != T_SUCCESS) {
4440Sstevel@tonic-gate return (-1);
4450Sstevel@tonic-gate }
4460Sstevel@tonic-gate
447132Srobinson /* LINTED pointer cast */
4480Sstevel@tonic-gate ip = (int *)((char *)buf + sizeof (struct opthdr));
4490Sstevel@tonic-gate retval = *ip;
4500Sstevel@tonic-gate return (retval);
4510Sstevel@tonic-gate }
4520Sstevel@tonic-gate
4530Sstevel@tonic-gate static bool_t
set_up_connection(int fd,struct netbuf * svcaddr,struct ct_data * ct,const struct timeval * tp)454132Srobinson set_up_connection(int fd, struct netbuf *svcaddr, struct ct_data *ct,
455132Srobinson const struct timeval *tp)
4560Sstevel@tonic-gate {
4570Sstevel@tonic-gate int state;
4580Sstevel@tonic-gate struct t_call sndcallstr, *rcvcall;
4590Sstevel@tonic-gate int nconnect;
4600Sstevel@tonic-gate bool_t connected, do_rcv_connect;
4610Sstevel@tonic-gate int curr_time = 0;
4620Sstevel@tonic-gate
4630Sstevel@tonic-gate ct->ct_addr.len = 0;
4640Sstevel@tonic-gate state = t_getstate(fd);
4650Sstevel@tonic-gate if (state == -1) {
4660Sstevel@tonic-gate rpc_createerr.cf_stat = RPC_TLIERROR;
4670Sstevel@tonic-gate rpc_createerr.cf_error.re_errno = 0;
4680Sstevel@tonic-gate rpc_createerr.cf_error.re_terrno = t_errno;
4690Sstevel@tonic-gate return (FALSE);
4700Sstevel@tonic-gate }
4710Sstevel@tonic-gate
4720Sstevel@tonic-gate #ifdef DEBUG
4730Sstevel@tonic-gate fprintf(stderr, "set_up_connection: state = %d\n", state);
4740Sstevel@tonic-gate #endif
4750Sstevel@tonic-gate switch (state) {
4760Sstevel@tonic-gate case T_IDLE:
477132Srobinson if (svcaddr == NULL) {
4780Sstevel@tonic-gate rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
4790Sstevel@tonic-gate return (FALSE);
4800Sstevel@tonic-gate }
4810Sstevel@tonic-gate /*
4820Sstevel@tonic-gate * Connect only if state is IDLE and svcaddr known
4830Sstevel@tonic-gate */
4840Sstevel@tonic-gate /* LINTED pointer alignment */
4850Sstevel@tonic-gate rcvcall = (struct t_call *)t_alloc(fd, T_CALL, T_OPT|T_ADDR);
4860Sstevel@tonic-gate if (rcvcall == NULL) {
4870Sstevel@tonic-gate rpc_createerr.cf_stat = RPC_TLIERROR;
4880Sstevel@tonic-gate rpc_createerr.cf_error.re_terrno = t_errno;
4890Sstevel@tonic-gate rpc_createerr.cf_error.re_errno = errno;
4900Sstevel@tonic-gate return (FALSE);
4910Sstevel@tonic-gate }
4920Sstevel@tonic-gate rcvcall->udata.maxlen = 0;
4930Sstevel@tonic-gate sndcallstr.addr = *svcaddr;
4940Sstevel@tonic-gate sndcallstr.opt.len = 0;
4950Sstevel@tonic-gate sndcallstr.udata.len = 0;
4960Sstevel@tonic-gate /*
4970Sstevel@tonic-gate * Even NULL could have sufficed for rcvcall, because
4980Sstevel@tonic-gate * the address returned is same for all cases except
4990Sstevel@tonic-gate * for the gateway case, and hence required.
5000Sstevel@tonic-gate */
5010Sstevel@tonic-gate connected = FALSE;
5020Sstevel@tonic-gate do_rcv_connect = FALSE;
5030Sstevel@tonic-gate
5040Sstevel@tonic-gate /*
5050Sstevel@tonic-gate * If there is a timeout value specified, we will try to
5060Sstevel@tonic-gate * reset the tcp connection timeout. If the transport does
5070Sstevel@tonic-gate * not support the TCP_CONN_ABORT_THRESHOLD option or fails
5080Sstevel@tonic-gate * for other reason, default timeout will be used.
5090Sstevel@tonic-gate */
5100Sstevel@tonic-gate if (tp != NULL) {
5110Sstevel@tonic-gate int ms;
5120Sstevel@tonic-gate
5130Sstevel@tonic-gate /* TCP_CONN_ABORT_THRESHOLD takes int value in millisecs */
5140Sstevel@tonic-gate ms = tp->tv_sec * SECS_TO_MS + tp->tv_usec * USECS_TO_MS;
5150Sstevel@tonic-gate if (((curr_time = _get_tcp_conntime(fd)) != -1) &&
5160Sstevel@tonic-gate (_set_tcp_conntime(fd, ms) == 0)) {
517132Srobinson /* EMPTY */
5180Sstevel@tonic-gate #ifdef DEBUG
5190Sstevel@tonic-gate fprintf(stderr, "set_up_connection: set tcp ");
5200Sstevel@tonic-gate fprintf(stderr, "connection timeout to %d ms\n", ms);
5210Sstevel@tonic-gate #endif
5220Sstevel@tonic-gate }
5230Sstevel@tonic-gate }
5240Sstevel@tonic-gate
5250Sstevel@tonic-gate for (nconnect = 0; nconnect < 3; nconnect++) {
5260Sstevel@tonic-gate if (t_connect(fd, &sndcallstr, rcvcall) != -1) {
5270Sstevel@tonic-gate connected = TRUE;
5280Sstevel@tonic-gate break;
5290Sstevel@tonic-gate }
530*5111Ssk102515 if (t_errno == TLOOK) {
531*5111Ssk102515 switch (t_look(fd)) {
532*5111Ssk102515 case T_DISCONNECT:
533*5111Ssk102515 (void) t_rcvdis(fd, (struct
534*5111Ssk102515 t_discon *) NULL);
535*5111Ssk102515 break;
536*5111Ssk102515 default:
537*5111Ssk102515 break;
538*5111Ssk102515 }
539*5111Ssk102515 } else if (!(t_errno == TSYSERR && errno == EINTR)) {
5400Sstevel@tonic-gate break;
5410Sstevel@tonic-gate }
5420Sstevel@tonic-gate if ((state = t_getstate(fd)) == T_OUTCON) {
5430Sstevel@tonic-gate do_rcv_connect = TRUE;
5440Sstevel@tonic-gate break;
5450Sstevel@tonic-gate }
5460Sstevel@tonic-gate if (state != T_IDLE) {
5470Sstevel@tonic-gate break;
5480Sstevel@tonic-gate }
5490Sstevel@tonic-gate }
5500Sstevel@tonic-gate if (do_rcv_connect) {
5510Sstevel@tonic-gate do {
5520Sstevel@tonic-gate if (t_rcvconnect(fd, rcvcall) != -1) {
5530Sstevel@tonic-gate connected = TRUE;
5540Sstevel@tonic-gate break;
5550Sstevel@tonic-gate }
5560Sstevel@tonic-gate } while (t_errno == TSYSERR && errno == EINTR);
5570Sstevel@tonic-gate }
5580Sstevel@tonic-gate
5590Sstevel@tonic-gate /*
5600Sstevel@tonic-gate * Set the connection timeout back to its old value.
5610Sstevel@tonic-gate */
5620Sstevel@tonic-gate if (curr_time) {
563132Srobinson (void) _set_tcp_conntime(fd, curr_time);
5640Sstevel@tonic-gate }
5650Sstevel@tonic-gate
5660Sstevel@tonic-gate if (!connected) {
5670Sstevel@tonic-gate rpc_createerr.cf_stat = RPC_TLIERROR;
5680Sstevel@tonic-gate rpc_createerr.cf_error.re_terrno = t_errno;
5690Sstevel@tonic-gate rpc_createerr.cf_error.re_errno = errno;
5700Sstevel@tonic-gate (void) t_free((char *)rcvcall, T_CALL);
5710Sstevel@tonic-gate #ifdef DEBUG
5720Sstevel@tonic-gate fprintf(stderr, "clnt_vc: t_connect error %d\n",
5730Sstevel@tonic-gate rpc_createerr.cf_error.re_terrno);
5740Sstevel@tonic-gate #endif
5750Sstevel@tonic-gate return (FALSE);
5760Sstevel@tonic-gate }
5770Sstevel@tonic-gate
5780Sstevel@tonic-gate /* Free old area if allocated */
5790Sstevel@tonic-gate if (ct->ct_addr.buf)
5800Sstevel@tonic-gate free(ct->ct_addr.buf);
5810Sstevel@tonic-gate ct->ct_addr = rcvcall->addr; /* To get the new address */
5820Sstevel@tonic-gate /* So that address buf does not get freed */
5830Sstevel@tonic-gate rcvcall->addr.buf = NULL;
5840Sstevel@tonic-gate (void) t_free((char *)rcvcall, T_CALL);
5850Sstevel@tonic-gate break;
5860Sstevel@tonic-gate case T_DATAXFER:
5870Sstevel@tonic-gate case T_OUTCON:
588132Srobinson if (svcaddr == NULL) {
5890Sstevel@tonic-gate /*
5900Sstevel@tonic-gate * svcaddr could also be NULL in cases where the
5910Sstevel@tonic-gate * client is already bound and connected.
5920Sstevel@tonic-gate */
5930Sstevel@tonic-gate ct->ct_addr.len = 0;
5940Sstevel@tonic-gate } else {
5950Sstevel@tonic-gate ct->ct_addr.buf = malloc(svcaddr->len);
596132Srobinson if (ct->ct_addr.buf == NULL) {
5970Sstevel@tonic-gate (void) syslog(LOG_ERR, clnt_vc_errstr,
5980Sstevel@tonic-gate clnt_vc_str, __no_mem_str);
5990Sstevel@tonic-gate rpc_createerr.cf_stat = RPC_SYSTEMERROR;
6000Sstevel@tonic-gate rpc_createerr.cf_error.re_errno = errno;
6010Sstevel@tonic-gate rpc_createerr.cf_error.re_terrno = 0;
6020Sstevel@tonic-gate return (FALSE);
6030Sstevel@tonic-gate }
6040Sstevel@tonic-gate (void) memcpy(ct->ct_addr.buf, svcaddr->buf,
605132Srobinson (size_t)svcaddr->len);
6060Sstevel@tonic-gate ct->ct_addr.len = ct->ct_addr.maxlen = svcaddr->len;
6070Sstevel@tonic-gate }
6080Sstevel@tonic-gate break;
6090Sstevel@tonic-gate default:
6100Sstevel@tonic-gate rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
6110Sstevel@tonic-gate return (FALSE);
6120Sstevel@tonic-gate }
6130Sstevel@tonic-gate return (TRUE);
6140Sstevel@tonic-gate }
6150Sstevel@tonic-gate
6160Sstevel@tonic-gate static enum clnt_stat
clnt_vc_call(CLIENT * cl,rpcproc_t proc,xdrproc_t xdr_args,caddr_t args_ptr,xdrproc_t xdr_results,caddr_t results_ptr,struct timeval timeout)617132Srobinson clnt_vc_call(CLIENT *cl, rpcproc_t proc, xdrproc_t xdr_args, caddr_t args_ptr,
618132Srobinson xdrproc_t xdr_results, caddr_t results_ptr, struct timeval timeout)
6190Sstevel@tonic-gate {
6200Sstevel@tonic-gate /* LINTED pointer alignment */
6210Sstevel@tonic-gate struct ct_data *ct = (struct ct_data *)cl->cl_private;
6220Sstevel@tonic-gate XDR *xdrs = &(ct->ct_xdrs);
6230Sstevel@tonic-gate struct rpc_msg reply_msg;
6240Sstevel@tonic-gate uint32_t x_id;
6250Sstevel@tonic-gate /* LINTED pointer alignment */
6260Sstevel@tonic-gate uint32_t *msg_x_id = (uint32_t *)(ct->ct_mcall); /* yuk */
6270Sstevel@tonic-gate bool_t shipnow;
6280Sstevel@tonic-gate int refreshes = 2;
6290Sstevel@tonic-gate
6300Sstevel@tonic-gate if (rpc_fd_lock(vctbl, ct->ct_fd)) {
6310Sstevel@tonic-gate rpc_callerr.re_status = RPC_FAILED;
6320Sstevel@tonic-gate rpc_callerr.re_errno = errno;
6330Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
6340Sstevel@tonic-gate return (RPC_FAILED);
6350Sstevel@tonic-gate }
6360Sstevel@tonic-gate
6370Sstevel@tonic-gate ct->ct_is_oneway = FALSE;
6380Sstevel@tonic-gate if (ct->ct_io_mode == RPC_CL_NONBLOCKING) {
6390Sstevel@tonic-gate if (do_flush(ct, RPC_CL_BLOCKING_FLUSH) != 0) {
6400Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
6410Sstevel@tonic-gate return (RPC_FAILED); /* XXX */
6420Sstevel@tonic-gate }
6430Sstevel@tonic-gate }
6440Sstevel@tonic-gate
6450Sstevel@tonic-gate if (!ct->ct_waitset) {
6460Sstevel@tonic-gate /* If time is not within limits, we ignore it. */
6470Sstevel@tonic-gate if (time_not_ok(&timeout) == FALSE)
6480Sstevel@tonic-gate ct->ct_wait = __rpc_timeval_to_msec(&timeout);
6490Sstevel@tonic-gate } else {
6500Sstevel@tonic-gate timeout.tv_sec = (ct->ct_wait / 1000);
6510Sstevel@tonic-gate timeout.tv_usec = (ct->ct_wait % 1000) * 1000;
6520Sstevel@tonic-gate }
6530Sstevel@tonic-gate
6540Sstevel@tonic-gate shipnow = ((xdr_results == (xdrproc_t)0) && (timeout.tv_sec == 0) &&
6550Sstevel@tonic-gate (timeout.tv_usec == 0)) ? FALSE : TRUE;
6560Sstevel@tonic-gate call_again:
6570Sstevel@tonic-gate xdrs->x_op = XDR_ENCODE;
6580Sstevel@tonic-gate rpc_callerr.re_status = RPC_SUCCESS;
6590Sstevel@tonic-gate /*
6600Sstevel@tonic-gate * Due to little endian byte order, it is necessary to convert to host
6610Sstevel@tonic-gate * format before decrementing xid.
6620Sstevel@tonic-gate */
6630Sstevel@tonic-gate x_id = ntohl(*msg_x_id) - 1;
6640Sstevel@tonic-gate *msg_x_id = htonl(x_id);
6650Sstevel@tonic-gate
6660Sstevel@tonic-gate if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
667132Srobinson if ((!XDR_PUTBYTES(xdrs, ct->ct_mcall, ct->ct_mpos)) ||
668132Srobinson (!XDR_PUTINT32(xdrs, (int32_t *)&proc)) ||
669132Srobinson (!AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
670132Srobinson (!xdr_args(xdrs, args_ptr))) {
6710Sstevel@tonic-gate if (rpc_callerr.re_status == RPC_SUCCESS)
6720Sstevel@tonic-gate rpc_callerr.re_status = RPC_CANTENCODEARGS;
6730Sstevel@tonic-gate (void) xdrrec_endofrecord(xdrs, TRUE);
6740Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
6750Sstevel@tonic-gate return (rpc_callerr.re_status);
6760Sstevel@tonic-gate }
6770Sstevel@tonic-gate } else {
6780Sstevel@tonic-gate /* LINTED pointer alignment */
6790Sstevel@tonic-gate uint32_t *u = (uint32_t *)&ct->ct_mcall[ct->ct_mpos];
6800Sstevel@tonic-gate IXDR_PUT_U_INT32(u, proc);
6810Sstevel@tonic-gate if (!__rpc_gss_wrap(cl->cl_auth, ct->ct_mcall,
6820Sstevel@tonic-gate ((char *)u) - ct->ct_mcall, xdrs, xdr_args, args_ptr)) {
6830Sstevel@tonic-gate if (rpc_callerr.re_status == RPC_SUCCESS)
6840Sstevel@tonic-gate rpc_callerr.re_status = RPC_CANTENCODEARGS;
6850Sstevel@tonic-gate (void) xdrrec_endofrecord(xdrs, TRUE);
6860Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
6870Sstevel@tonic-gate return (rpc_callerr.re_status);
6880Sstevel@tonic-gate }
6890Sstevel@tonic-gate }
690132Srobinson if (!xdrrec_endofrecord(xdrs, shipnow)) {
6910Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
6920Sstevel@tonic-gate return (rpc_callerr.re_status = RPC_CANTSEND);
6930Sstevel@tonic-gate }
694132Srobinson if (!shipnow) {
6950Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
6960Sstevel@tonic-gate return (RPC_SUCCESS);
6970Sstevel@tonic-gate }
6980Sstevel@tonic-gate /*
6990Sstevel@tonic-gate * Hack to provide rpc-based message passing
7000Sstevel@tonic-gate */
7010Sstevel@tonic-gate if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
7020Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
7030Sstevel@tonic-gate return (rpc_callerr.re_status = RPC_TIMEDOUT);
7040Sstevel@tonic-gate }
7050Sstevel@tonic-gate
7060Sstevel@tonic-gate
7070Sstevel@tonic-gate /*
7080Sstevel@tonic-gate * Keep receiving until we get a valid transaction id
7090Sstevel@tonic-gate */
7100Sstevel@tonic-gate xdrs->x_op = XDR_DECODE;
711132Srobinson for (;;) {
7120Sstevel@tonic-gate reply_msg.acpted_rply.ar_verf = _null_auth;
7130Sstevel@tonic-gate reply_msg.acpted_rply.ar_results.where = NULL;
7140Sstevel@tonic-gate reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
715132Srobinson if (!xdrrec_skiprecord(xdrs)) {
7160Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
7170Sstevel@tonic-gate return (rpc_callerr.re_status);
7180Sstevel@tonic-gate }
7190Sstevel@tonic-gate /* now decode and validate the response header */
720132Srobinson if (!xdr_replymsg(xdrs, &reply_msg)) {
7210Sstevel@tonic-gate if (rpc_callerr.re_status == RPC_SUCCESS)
7220Sstevel@tonic-gate continue;
7230Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
7240Sstevel@tonic-gate return (rpc_callerr.re_status);
7250Sstevel@tonic-gate }
7260Sstevel@tonic-gate if (reply_msg.rm_xid == x_id)
7270Sstevel@tonic-gate break;
7280Sstevel@tonic-gate }
7290Sstevel@tonic-gate
7300Sstevel@tonic-gate /*
7310Sstevel@tonic-gate * process header
7320Sstevel@tonic-gate */
7330Sstevel@tonic-gate if ((reply_msg.rm_reply.rp_stat == MSG_ACCEPTED) &&
7340Sstevel@tonic-gate (reply_msg.acpted_rply.ar_stat == SUCCESS))
7350Sstevel@tonic-gate rpc_callerr.re_status = RPC_SUCCESS;
7360Sstevel@tonic-gate else
7370Sstevel@tonic-gate __seterr_reply(&reply_msg, &(rpc_callerr));
7380Sstevel@tonic-gate
7390Sstevel@tonic-gate if (rpc_callerr.re_status == RPC_SUCCESS) {
740132Srobinson if (!AUTH_VALIDATE(cl->cl_auth,
7410Sstevel@tonic-gate &reply_msg.acpted_rply.ar_verf)) {
7420Sstevel@tonic-gate rpc_callerr.re_status = RPC_AUTHERROR;
7430Sstevel@tonic-gate rpc_callerr.re_why = AUTH_INVALIDRESP;
7440Sstevel@tonic-gate } else if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
7450Sstevel@tonic-gate if (!(*xdr_results)(xdrs, results_ptr)) {
7460Sstevel@tonic-gate if (rpc_callerr.re_status == RPC_SUCCESS)
7470Sstevel@tonic-gate rpc_callerr.re_status = RPC_CANTDECODERES;
7480Sstevel@tonic-gate }
7490Sstevel@tonic-gate } else if (!__rpc_gss_unwrap(cl->cl_auth, xdrs, xdr_results,
7500Sstevel@tonic-gate results_ptr)) {
7510Sstevel@tonic-gate if (rpc_callerr.re_status == RPC_SUCCESS)
7520Sstevel@tonic-gate rpc_callerr.re_status = RPC_CANTDECODERES;
7530Sstevel@tonic-gate }
7540Sstevel@tonic-gate } /* end successful completion */
7550Sstevel@tonic-gate /*
7560Sstevel@tonic-gate * If unsuccesful AND error is an authentication error
7570Sstevel@tonic-gate * then refresh credentials and try again, else break
7580Sstevel@tonic-gate */
7590Sstevel@tonic-gate else if (rpc_callerr.re_status == RPC_AUTHERROR) {
7600Sstevel@tonic-gate /* maybe our credentials need to be refreshed ... */
7610Sstevel@tonic-gate if (refreshes-- && AUTH_REFRESH(cl->cl_auth, &reply_msg))
7620Sstevel@tonic-gate goto call_again;
7630Sstevel@tonic-gate else
7640Sstevel@tonic-gate /*
7650Sstevel@tonic-gate * We are setting rpc_callerr here given that libnsl
7660Sstevel@tonic-gate * is not reentrant thereby reinitializing the TSD.
7670Sstevel@tonic-gate * If not set here then success could be returned even
7680Sstevel@tonic-gate * though refresh failed.
7690Sstevel@tonic-gate */
7700Sstevel@tonic-gate rpc_callerr.re_status = RPC_AUTHERROR;
7710Sstevel@tonic-gate } /* end of unsuccessful completion */
7720Sstevel@tonic-gate /* free verifier ... */
7730Sstevel@tonic-gate if (reply_msg.rm_reply.rp_stat == MSG_ACCEPTED &&
7740Sstevel@tonic-gate reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
7750Sstevel@tonic-gate xdrs->x_op = XDR_FREE;
7760Sstevel@tonic-gate (void) xdr_opaque_auth(xdrs, &(reply_msg.acpted_rply.ar_verf));
7770Sstevel@tonic-gate }
7780Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
7790Sstevel@tonic-gate return (rpc_callerr.re_status);
7800Sstevel@tonic-gate }
7810Sstevel@tonic-gate
7820Sstevel@tonic-gate static enum clnt_stat
clnt_vc_send(CLIENT * cl,rpcproc_t proc,xdrproc_t xdr_args,caddr_t args_ptr)783132Srobinson clnt_vc_send(CLIENT *cl, rpcproc_t proc, xdrproc_t xdr_args, caddr_t args_ptr)
7840Sstevel@tonic-gate {
7850Sstevel@tonic-gate /* LINTED pointer alignment */
7860Sstevel@tonic-gate struct ct_data *ct = (struct ct_data *)cl->cl_private;
7870Sstevel@tonic-gate XDR *xdrs = &(ct->ct_xdrs);
7880Sstevel@tonic-gate uint32_t x_id;
7890Sstevel@tonic-gate /* LINTED pointer alignment */
7900Sstevel@tonic-gate uint32_t *msg_x_id = (uint32_t *)(ct->ct_mcall); /* yuk */
7910Sstevel@tonic-gate
7920Sstevel@tonic-gate if (rpc_fd_lock(vctbl, ct->ct_fd)) {
7930Sstevel@tonic-gate rpc_callerr.re_status = RPC_FAILED;
7940Sstevel@tonic-gate rpc_callerr.re_errno = errno;
7950Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
7960Sstevel@tonic-gate return (RPC_FAILED);
7970Sstevel@tonic-gate }
7980Sstevel@tonic-gate
7990Sstevel@tonic-gate ct->ct_is_oneway = TRUE;
8000Sstevel@tonic-gate
8010Sstevel@tonic-gate xdrs->x_op = XDR_ENCODE;
8020Sstevel@tonic-gate rpc_callerr.re_status = RPC_SUCCESS;
8030Sstevel@tonic-gate /*
8040Sstevel@tonic-gate * Due to little endian byte order, it is necessary to convert to host
8050Sstevel@tonic-gate * format before decrementing xid.
8060Sstevel@tonic-gate */
8070Sstevel@tonic-gate x_id = ntohl(*msg_x_id) - 1;
8080Sstevel@tonic-gate *msg_x_id = htonl(x_id);
8090Sstevel@tonic-gate
8100Sstevel@tonic-gate if (cl->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
811132Srobinson if ((!XDR_PUTBYTES(xdrs, ct->ct_mcall, ct->ct_mpos)) ||
812132Srobinson (!XDR_PUTINT32(xdrs, (int32_t *)&proc)) ||
813132Srobinson (!AUTH_MARSHALL(cl->cl_auth, xdrs)) ||
814132Srobinson (!xdr_args(xdrs, args_ptr))) {
8150Sstevel@tonic-gate if (rpc_callerr.re_status == RPC_SUCCESS)
8160Sstevel@tonic-gate rpc_callerr.re_status = RPC_CANTENCODEARGS;
8170Sstevel@tonic-gate (void) xdrrec_endofrecord(xdrs, TRUE);
8180Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
8190Sstevel@tonic-gate return (rpc_callerr.re_status);
8200Sstevel@tonic-gate }
8210Sstevel@tonic-gate } else {
8220Sstevel@tonic-gate /* LINTED pointer alignment */
8230Sstevel@tonic-gate uint32_t *u = (uint32_t *)&ct->ct_mcall[ct->ct_mpos];
8240Sstevel@tonic-gate IXDR_PUT_U_INT32(u, proc);
8250Sstevel@tonic-gate if (!__rpc_gss_wrap(cl->cl_auth, ct->ct_mcall,
8260Sstevel@tonic-gate ((char *)u) - ct->ct_mcall, xdrs, xdr_args, args_ptr)) {
8270Sstevel@tonic-gate if (rpc_callerr.re_status == RPC_SUCCESS)
8280Sstevel@tonic-gate rpc_callerr.re_status = RPC_CANTENCODEARGS;
8290Sstevel@tonic-gate (void) xdrrec_endofrecord(xdrs, TRUE);
8300Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
8310Sstevel@tonic-gate return (rpc_callerr.re_status);
8320Sstevel@tonic-gate }
8330Sstevel@tonic-gate }
8340Sstevel@tonic-gate
8350Sstevel@tonic-gate /*
8360Sstevel@tonic-gate * Do not need to check errors, as the following code does
8370Sstevel@tonic-gate * not depend on the successful completion of the call.
8380Sstevel@tonic-gate * An error, if any occurs, is reported through
8390Sstevel@tonic-gate * rpc_callerr.re_status.
8400Sstevel@tonic-gate */
841132Srobinson (void) xdrrec_endofrecord(xdrs, TRUE);
8420Sstevel@tonic-gate
8430Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
8440Sstevel@tonic-gate return (rpc_callerr.re_status);
8450Sstevel@tonic-gate }
8460Sstevel@tonic-gate
847132Srobinson /* ARGSUSED */
8480Sstevel@tonic-gate static void
clnt_vc_geterr(CLIENT * cl,struct rpc_err * errp)849132Srobinson clnt_vc_geterr(CLIENT *cl, struct rpc_err *errp)
8500Sstevel@tonic-gate {
8510Sstevel@tonic-gate *errp = rpc_callerr;
8520Sstevel@tonic-gate }
8530Sstevel@tonic-gate
8540Sstevel@tonic-gate static bool_t
clnt_vc_freeres(CLIENT * cl,xdrproc_t xdr_res,caddr_t res_ptr)855132Srobinson clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, caddr_t res_ptr)
8560Sstevel@tonic-gate {
8570Sstevel@tonic-gate /* LINTED pointer alignment */
8580Sstevel@tonic-gate struct ct_data *ct = (struct ct_data *)cl->cl_private;
8590Sstevel@tonic-gate XDR *xdrs = &(ct->ct_xdrs);
860132Srobinson bool_t stat;
8610Sstevel@tonic-gate
862132Srobinson (void) rpc_fd_lock(vctbl, ct->ct_fd);
8630Sstevel@tonic-gate xdrs->x_op = XDR_FREE;
864132Srobinson stat = (*xdr_res)(xdrs, res_ptr);
8650Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
866132Srobinson return (stat);
8670Sstevel@tonic-gate }
8680Sstevel@tonic-gate
8690Sstevel@tonic-gate static void
clnt_vc_abort(void)8700Sstevel@tonic-gate clnt_vc_abort(void)
8710Sstevel@tonic-gate {
8720Sstevel@tonic-gate }
8730Sstevel@tonic-gate
8740Sstevel@tonic-gate /*ARGSUSED*/
8750Sstevel@tonic-gate static bool_t
clnt_vc_control(CLIENT * cl,int request,char * info)876132Srobinson clnt_vc_control(CLIENT *cl, int request, char *info)
8770Sstevel@tonic-gate {
8780Sstevel@tonic-gate bool_t ret;
8790Sstevel@tonic-gate /* LINTED pointer alignment */
8800Sstevel@tonic-gate struct ct_data *ct = (struct ct_data *)cl->cl_private;
8810Sstevel@tonic-gate
8820Sstevel@tonic-gate if (rpc_fd_lock(vctbl, ct->ct_fd)) {
8830Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
8840Sstevel@tonic-gate return (RPC_FAILED);
8850Sstevel@tonic-gate }
8860Sstevel@tonic-gate
8870Sstevel@tonic-gate switch (request) {
8880Sstevel@tonic-gate case CLSET_FD_CLOSE:
8890Sstevel@tonic-gate ct->ct_closeit = TRUE;
8900Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
8910Sstevel@tonic-gate return (TRUE);
8920Sstevel@tonic-gate case CLSET_FD_NCLOSE:
8930Sstevel@tonic-gate ct->ct_closeit = FALSE;
8940Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
8950Sstevel@tonic-gate return (TRUE);
8960Sstevel@tonic-gate case CLFLUSH:
8970Sstevel@tonic-gate if (ct->ct_io_mode == RPC_CL_NONBLOCKING) {
8980Sstevel@tonic-gate int res;
8990Sstevel@tonic-gate res = do_flush(ct, (info == NULL ||
900132Srobinson /* LINTED pointer cast */
9010Sstevel@tonic-gate *(int *)info == RPC_CL_DEFAULT_FLUSH)?
902132Srobinson /* LINTED pointer cast */
9030Sstevel@tonic-gate ct->ct_blocking_mode: *(int *)info);
9040Sstevel@tonic-gate ret = (0 == res);
9050Sstevel@tonic-gate }
9060Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
9070Sstevel@tonic-gate return (ret);
9080Sstevel@tonic-gate }
9090Sstevel@tonic-gate
9100Sstevel@tonic-gate /* for other requests which use info */
9110Sstevel@tonic-gate if (info == NULL) {
9120Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
9130Sstevel@tonic-gate return (FALSE);
9140Sstevel@tonic-gate }
9150Sstevel@tonic-gate switch (request) {
9160Sstevel@tonic-gate case CLSET_TIMEOUT:
9170Sstevel@tonic-gate /* LINTED pointer alignment */
9180Sstevel@tonic-gate if (time_not_ok((struct timeval *)info)) {
9190Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
9200Sstevel@tonic-gate return (FALSE);
9210Sstevel@tonic-gate }
9220Sstevel@tonic-gate /* LINTED pointer alignment */
9230Sstevel@tonic-gate ct->ct_wait = __rpc_timeval_to_msec((struct timeval *)info);
9240Sstevel@tonic-gate ct->ct_waitset = TRUE;
9250Sstevel@tonic-gate break;
9260Sstevel@tonic-gate case CLGET_TIMEOUT:
9270Sstevel@tonic-gate /* LINTED pointer alignment */
9280Sstevel@tonic-gate ((struct timeval *)info)->tv_sec = ct->ct_wait / 1000;
9290Sstevel@tonic-gate /* LINTED pointer alignment */
9300Sstevel@tonic-gate ((struct timeval *)info)->tv_usec =
9310Sstevel@tonic-gate (ct->ct_wait % 1000) * 1000;
9320Sstevel@tonic-gate break;
9330Sstevel@tonic-gate case CLGET_SERVER_ADDR: /* For compatibility only */
934132Srobinson (void) memcpy(info, ct->ct_addr.buf, (size_t)ct->ct_addr.len);
9350Sstevel@tonic-gate break;
9360Sstevel@tonic-gate case CLGET_FD:
9370Sstevel@tonic-gate /* LINTED pointer alignment */
9380Sstevel@tonic-gate *(int *)info = ct->ct_fd;
9390Sstevel@tonic-gate break;
9400Sstevel@tonic-gate case CLGET_SVC_ADDR:
9410Sstevel@tonic-gate /* The caller should not free this memory area */
9420Sstevel@tonic-gate /* LINTED pointer alignment */
9430Sstevel@tonic-gate *(struct netbuf *)info = ct->ct_addr;
9440Sstevel@tonic-gate break;
9450Sstevel@tonic-gate case CLSET_SVC_ADDR: /* set to new address */
9460Sstevel@tonic-gate #ifdef undef
9470Sstevel@tonic-gate /*
9480Sstevel@tonic-gate * XXX: once the t_snddis(), followed by t_connect() starts to
9490Sstevel@tonic-gate * work, this ifdef should be removed. CLIENT handle reuse
9500Sstevel@tonic-gate * would then be possible for COTS as well.
9510Sstevel@tonic-gate */
9520Sstevel@tonic-gate if (t_snddis(ct->ct_fd, NULL) == -1) {
9530Sstevel@tonic-gate rpc_createerr.cf_stat = RPC_TLIERROR;
9540Sstevel@tonic-gate rpc_createerr.cf_error.re_terrno = t_errno;
9550Sstevel@tonic-gate rpc_createerr.cf_error.re_errno = errno;
9560Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
9570Sstevel@tonic-gate return (FALSE);
9580Sstevel@tonic-gate }
9590Sstevel@tonic-gate ret = set_up_connection(ct->ct_fd, (struct netbuf *)info,
9600Sstevel@tonic-gate ct, NULL));
9610Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
9620Sstevel@tonic-gate return (ret);
9630Sstevel@tonic-gate #else
9640Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
9650Sstevel@tonic-gate return (FALSE);
9660Sstevel@tonic-gate #endif
9670Sstevel@tonic-gate case CLGET_XID:
9680Sstevel@tonic-gate /*
9690Sstevel@tonic-gate * use the knowledge that xid is the
9700Sstevel@tonic-gate * first element in the call structure
9710Sstevel@tonic-gate * This will get the xid of the PREVIOUS call
9720Sstevel@tonic-gate */
9730Sstevel@tonic-gate /* LINTED pointer alignment */
9740Sstevel@tonic-gate *(uint32_t *)info = ntohl(*(uint32_t *)ct->ct_mcall);
9750Sstevel@tonic-gate break;
9760Sstevel@tonic-gate case CLSET_XID:
9770Sstevel@tonic-gate /* This will set the xid of the NEXT call */
9780Sstevel@tonic-gate /* LINTED pointer alignment */
9790Sstevel@tonic-gate *(uint32_t *)ct->ct_mcall = htonl(*(uint32_t *)info + 1);
9800Sstevel@tonic-gate /* increment by 1 as clnt_vc_call() decrements once */
9810Sstevel@tonic-gate break;
9820Sstevel@tonic-gate case CLGET_VERS:
9830Sstevel@tonic-gate /*
9840Sstevel@tonic-gate * This RELIES on the information that, in the call body,
9850Sstevel@tonic-gate * the version number field is the fifth field from the
9860Sstevel@tonic-gate * begining of the RPC header. MUST be changed if the
9870Sstevel@tonic-gate * call_struct is changed
9880Sstevel@tonic-gate */
9890Sstevel@tonic-gate /* LINTED pointer alignment */
9900Sstevel@tonic-gate *(uint32_t *)info = ntohl(*(uint32_t *)(ct->ct_mcall +
9910Sstevel@tonic-gate 4 * BYTES_PER_XDR_UNIT));
9920Sstevel@tonic-gate break;
9930Sstevel@tonic-gate
9940Sstevel@tonic-gate case CLSET_VERS:
9950Sstevel@tonic-gate /* LINTED pointer alignment */
9960Sstevel@tonic-gate *(uint32_t *)(ct->ct_mcall + 4 * BYTES_PER_XDR_UNIT) =
9970Sstevel@tonic-gate /* LINTED pointer alignment */
9980Sstevel@tonic-gate htonl(*(uint32_t *)info);
9990Sstevel@tonic-gate break;
10000Sstevel@tonic-gate
10010Sstevel@tonic-gate case CLGET_PROG:
10020Sstevel@tonic-gate /*
10030Sstevel@tonic-gate * This RELIES on the information that, in the call body,
10040Sstevel@tonic-gate * the program number field is the fourth field from the
10050Sstevel@tonic-gate * begining of the RPC header. MUST be changed if the
10060Sstevel@tonic-gate * call_struct is changed
10070Sstevel@tonic-gate */
10080Sstevel@tonic-gate /* LINTED pointer alignment */
10090Sstevel@tonic-gate *(uint32_t *)info = ntohl(*(uint32_t *)(ct->ct_mcall +
10100Sstevel@tonic-gate 3 * BYTES_PER_XDR_UNIT));
10110Sstevel@tonic-gate break;
10120Sstevel@tonic-gate
10130Sstevel@tonic-gate case CLSET_PROG:
10140Sstevel@tonic-gate /* LINTED pointer alignment */
10150Sstevel@tonic-gate *(uint32_t *)(ct->ct_mcall + 3 * BYTES_PER_XDR_UNIT) =
10160Sstevel@tonic-gate /* LINTED pointer alignment */
10170Sstevel@tonic-gate htonl(*(uint32_t *)info);
10180Sstevel@tonic-gate break;
10190Sstevel@tonic-gate
10200Sstevel@tonic-gate case CLSET_IO_MODE:
1021132Srobinson /* LINTED pointer cast */
10220Sstevel@tonic-gate if (!set_io_mode(ct, *(int *)info)) {
10230Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
10240Sstevel@tonic-gate return (FALSE);
10250Sstevel@tonic-gate }
10260Sstevel@tonic-gate break;
10270Sstevel@tonic-gate case CLSET_FLUSH_MODE:
10280Sstevel@tonic-gate /* Set a specific FLUSH_MODE */
1029132Srobinson /* LINTED pointer cast */
10300Sstevel@tonic-gate if (!set_flush_mode(ct, *(int *)info)) {
10310Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
10320Sstevel@tonic-gate return (FALSE);
10330Sstevel@tonic-gate }
10340Sstevel@tonic-gate break;
10350Sstevel@tonic-gate case CLGET_FLUSH_MODE:
1036132Srobinson /* LINTED pointer cast */
10370Sstevel@tonic-gate *(rpcflushmode_t *)info = ct->ct_blocking_mode;
10380Sstevel@tonic-gate break;
10390Sstevel@tonic-gate
10400Sstevel@tonic-gate case CLGET_IO_MODE:
1041132Srobinson /* LINTED pointer cast */
10420Sstevel@tonic-gate *(rpciomode_t *)info = ct->ct_io_mode;
10430Sstevel@tonic-gate break;
10440Sstevel@tonic-gate
10450Sstevel@tonic-gate case CLGET_CURRENT_REC_SIZE:
10460Sstevel@tonic-gate /*
10470Sstevel@tonic-gate * Returns the current amount of memory allocated
10480Sstevel@tonic-gate * to pending requests
10490Sstevel@tonic-gate */
1050132Srobinson /* LINTED pointer cast */
10510Sstevel@tonic-gate *(int *)info = ct->ct_bufferPendingSize;
10520Sstevel@tonic-gate break;
10530Sstevel@tonic-gate
10540Sstevel@tonic-gate case CLSET_CONNMAXREC_SIZE:
10550Sstevel@tonic-gate /* Cannot resize the buffer if it is used. */
10560Sstevel@tonic-gate if (ct->ct_bufferPendingSize != 0) {
10570Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
10580Sstevel@tonic-gate return (FALSE);
10590Sstevel@tonic-gate }
10600Sstevel@tonic-gate /*
10610Sstevel@tonic-gate * If the new size is equal to the current size,
10620Sstevel@tonic-gate * there is nothing to do.
10630Sstevel@tonic-gate */
1064132Srobinson /* LINTED pointer cast */
10650Sstevel@tonic-gate if (ct->ct_bufferSize == *(uint_t *)info)
10660Sstevel@tonic-gate break;
10670Sstevel@tonic-gate
1068132Srobinson /* LINTED pointer cast */
10690Sstevel@tonic-gate ct->ct_bufferSize = *(uint_t *)info;
10700Sstevel@tonic-gate if (ct->ct_buffer) {
10710Sstevel@tonic-gate free(ct->ct_buffer);
10720Sstevel@tonic-gate ct->ct_buffer = NULL;
10730Sstevel@tonic-gate ct->ct_bufferReadPtr = ct->ct_bufferWritePtr = NULL;
10740Sstevel@tonic-gate }
10750Sstevel@tonic-gate break;
10760Sstevel@tonic-gate
10770Sstevel@tonic-gate case CLGET_CONNMAXREC_SIZE:
10780Sstevel@tonic-gate /*
10790Sstevel@tonic-gate * Returns the size of buffer allocated
10800Sstevel@tonic-gate * to pending requests
10810Sstevel@tonic-gate */
1082132Srobinson /* LINTED pointer cast */
10830Sstevel@tonic-gate *(uint_t *)info = ct->ct_bufferSize;
10840Sstevel@tonic-gate break;
10850Sstevel@tonic-gate
10860Sstevel@tonic-gate default:
10870Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
10880Sstevel@tonic-gate return (FALSE);
10890Sstevel@tonic-gate }
10900Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct->ct_fd);
10910Sstevel@tonic-gate return (TRUE);
10920Sstevel@tonic-gate }
10930Sstevel@tonic-gate
10940Sstevel@tonic-gate static void
clnt_vc_destroy(CLIENT * cl)1095132Srobinson clnt_vc_destroy(CLIENT *cl)
10960Sstevel@tonic-gate {
10970Sstevel@tonic-gate /* LINTED pointer alignment */
10980Sstevel@tonic-gate struct ct_data *ct = (struct ct_data *)cl->cl_private;
10990Sstevel@tonic-gate int ct_fd = ct->ct_fd;
11000Sstevel@tonic-gate
1101132Srobinson (void) rpc_fd_lock(vctbl, ct_fd);
11020Sstevel@tonic-gate
11030Sstevel@tonic-gate if (ct->ct_io_mode == RPC_CL_NONBLOCKING) {
1104132Srobinson (void) do_flush(ct, RPC_CL_BLOCKING_FLUSH);
1105132Srobinson (void) unregister_nb(ct);
11060Sstevel@tonic-gate }
11070Sstevel@tonic-gate
11080Sstevel@tonic-gate if (ct->ct_closeit)
11090Sstevel@tonic-gate (void) t_close(ct_fd);
11100Sstevel@tonic-gate XDR_DESTROY(&(ct->ct_xdrs));
11110Sstevel@tonic-gate if (ct->ct_addr.buf)
1112132Srobinson free(ct->ct_addr.buf);
1113132Srobinson free(ct);
11140Sstevel@tonic-gate if (cl->cl_netid && cl->cl_netid[0])
1115132Srobinson free(cl->cl_netid);
11160Sstevel@tonic-gate if (cl->cl_tp && cl->cl_tp[0])
1117132Srobinson free(cl->cl_tp);
1118132Srobinson free(cl);
11190Sstevel@tonic-gate rpc_fd_unlock(vctbl, ct_fd);
11200Sstevel@tonic-gate }
11210Sstevel@tonic-gate
11220Sstevel@tonic-gate /*
11230Sstevel@tonic-gate * Interface between xdr serializer and vc connection.
11240Sstevel@tonic-gate * Behaves like the system calls, read & write, but keeps some error state
11250Sstevel@tonic-gate * around for the rpc level.
11260Sstevel@tonic-gate */
11270Sstevel@tonic-gate static int
read_vc(void * ct_tmp,caddr_t buf,int len)11280Sstevel@tonic-gate read_vc(void *ct_tmp, caddr_t buf, int len)
11290Sstevel@tonic-gate {
11303864Sraf static pthread_key_t pfdp_key = PTHREAD_ONCE_KEY_NP;
11310Sstevel@tonic-gate struct pollfd *pfdp;
11320Sstevel@tonic-gate int npfd; /* total number of pfdp allocated */
11330Sstevel@tonic-gate struct ct_data *ct = ct_tmp;
11340Sstevel@tonic-gate struct timeval starttime;
11350Sstevel@tonic-gate struct timeval curtime;
11360Sstevel@tonic-gate int poll_time;
11370Sstevel@tonic-gate int delta;
11380Sstevel@tonic-gate
1139132Srobinson if (len == 0)
11400Sstevel@tonic-gate return (0);
11410Sstevel@tonic-gate
11420Sstevel@tonic-gate /*
11430Sstevel@tonic-gate * Allocate just one the first time. thr_get_storage() may
11440Sstevel@tonic-gate * return a larger buffer, left over from the last time we were
11450Sstevel@tonic-gate * here, but that's OK. realloc() will deal with it properly.
11460Sstevel@tonic-gate */
11470Sstevel@tonic-gate npfd = 1;
11480Sstevel@tonic-gate pfdp = thr_get_storage(&pfdp_key, sizeof (struct pollfd), free);
11490Sstevel@tonic-gate if (pfdp == NULL) {
11500Sstevel@tonic-gate (void) syslog(LOG_ERR, clnt_vc_errstr,
11510Sstevel@tonic-gate clnt_read_vc_str, __no_mem_str);
11520Sstevel@tonic-gate rpc_callerr.re_status = RPC_SYSTEMERROR;
11530Sstevel@tonic-gate rpc_callerr.re_errno = errno;
11540Sstevel@tonic-gate rpc_callerr.re_terrno = 0;
11550Sstevel@tonic-gate return (-1);
11560Sstevel@tonic-gate }
11570Sstevel@tonic-gate
11580Sstevel@tonic-gate /*
11590Sstevel@tonic-gate * N.B.: slot 0 in the pollfd array is reserved for the file
11600Sstevel@tonic-gate * descriptor we're really interested in (as opposed to the
11610Sstevel@tonic-gate * callback descriptors).
11620Sstevel@tonic-gate */
11630Sstevel@tonic-gate pfdp[0].fd = ct->ct_fd;
11640Sstevel@tonic-gate pfdp[0].events = MASKVAL;
11650Sstevel@tonic-gate pfdp[0].revents = 0;
11660Sstevel@tonic-gate poll_time = ct->ct_wait;
1167132Srobinson if (gettimeofday(&starttime, NULL) == -1) {
11680Sstevel@tonic-gate syslog(LOG_ERR, "Unable to get time of day: %m");
11690Sstevel@tonic-gate return (-1);
11700Sstevel@tonic-gate }
11710Sstevel@tonic-gate
11720Sstevel@tonic-gate for (;;) {
11730Sstevel@tonic-gate extern void (*_svc_getreqset_proc)();
11740Sstevel@tonic-gate extern pollfd_t *svc_pollfd;
11750Sstevel@tonic-gate extern int svc_max_pollfd;
11760Sstevel@tonic-gate int fds;
11770Sstevel@tonic-gate
11780Sstevel@tonic-gate /* VARIABLES PROTECTED BY svc_fd_lock: svc_pollfd */
11790Sstevel@tonic-gate
11800Sstevel@tonic-gate if (_svc_getreqset_proc) {
11810Sstevel@tonic-gate sig_rw_rdlock(&svc_fd_lock);
11820Sstevel@tonic-gate
11830Sstevel@tonic-gate /* reallocate pfdp to svc_max_pollfd +1 */
11840Sstevel@tonic-gate if (npfd != (svc_max_pollfd + 1)) {
11850Sstevel@tonic-gate struct pollfd *tmp_pfdp = realloc(pfdp,
11860Sstevel@tonic-gate sizeof (struct pollfd) *
11870Sstevel@tonic-gate (svc_max_pollfd + 1));
11880Sstevel@tonic-gate if (tmp_pfdp == NULL) {
11890Sstevel@tonic-gate sig_rw_unlock(&svc_fd_lock);
11900Sstevel@tonic-gate (void) syslog(LOG_ERR, clnt_vc_errstr,
11910Sstevel@tonic-gate clnt_read_vc_str, __no_mem_str);
11920Sstevel@tonic-gate rpc_callerr.re_status = RPC_SYSTEMERROR;
11930Sstevel@tonic-gate rpc_callerr.re_errno = errno;
11940Sstevel@tonic-gate rpc_callerr.re_terrno = 0;
11950Sstevel@tonic-gate return (-1);
11960Sstevel@tonic-gate }
11970Sstevel@tonic-gate
11980Sstevel@tonic-gate pfdp = tmp_pfdp;
11990Sstevel@tonic-gate npfd = svc_max_pollfd + 1;
1200132Srobinson (void) pthread_setspecific(pfdp_key, pfdp);
12010Sstevel@tonic-gate }
12020Sstevel@tonic-gate if (npfd > 1)
12030Sstevel@tonic-gate (void) memcpy(&pfdp[1], svc_pollfd,
12040Sstevel@tonic-gate sizeof (struct pollfd) * (npfd - 1));
12050Sstevel@tonic-gate
12060Sstevel@tonic-gate sig_rw_unlock(&svc_fd_lock);
12070Sstevel@tonic-gate } else {
12080Sstevel@tonic-gate npfd = 1; /* don't forget about pfdp[0] */
12090Sstevel@tonic-gate }
12100Sstevel@tonic-gate
12110Sstevel@tonic-gate switch (fds = poll(pfdp, npfd, poll_time)) {
12120Sstevel@tonic-gate case 0:
12130Sstevel@tonic-gate rpc_callerr.re_status = RPC_TIMEDOUT;
12140Sstevel@tonic-gate return (-1);
12150Sstevel@tonic-gate
12160Sstevel@tonic-gate case -1:
12170Sstevel@tonic-gate if (errno != EINTR)
12180Sstevel@tonic-gate continue;
12190Sstevel@tonic-gate else {
12200Sstevel@tonic-gate /*
12210Sstevel@tonic-gate * interrupted by another signal,
12220Sstevel@tonic-gate * update time_waited
12230Sstevel@tonic-gate */
12240Sstevel@tonic-gate
1225132Srobinson if (gettimeofday(&curtime, NULL) == -1) {
12260Sstevel@tonic-gate syslog(LOG_ERR,
12270Sstevel@tonic-gate "Unable to get time of day: %m");
12280Sstevel@tonic-gate errno = 0;
12290Sstevel@tonic-gate continue;
12300Sstevel@tonic-gate };
12310Sstevel@tonic-gate delta = (curtime.tv_sec -
12320Sstevel@tonic-gate starttime.tv_sec) * 1000 +
12330Sstevel@tonic-gate (curtime.tv_usec -
12340Sstevel@tonic-gate starttime.tv_usec) / 1000;
12350Sstevel@tonic-gate poll_time -= delta;
12360Sstevel@tonic-gate if (poll_time < 0) {
12370Sstevel@tonic-gate rpc_callerr.re_status =
12380Sstevel@tonic-gate RPC_TIMEDOUT;
12390Sstevel@tonic-gate errno = 0;
12400Sstevel@tonic-gate return (-1);
12410Sstevel@tonic-gate } else {
12420Sstevel@tonic-gate errno = 0; /* reset it */
12430Sstevel@tonic-gate continue;
12440Sstevel@tonic-gate }
12450Sstevel@tonic-gate }
12460Sstevel@tonic-gate }
12470Sstevel@tonic-gate
12480Sstevel@tonic-gate if (pfdp[0].revents == 0) {
12490Sstevel@tonic-gate /* must be for server side of the house */
12500Sstevel@tonic-gate (*_svc_getreqset_proc)(&pfdp[1], fds);
12510Sstevel@tonic-gate continue; /* do poll again */
12520Sstevel@tonic-gate }
12530Sstevel@tonic-gate
12540Sstevel@tonic-gate if (pfdp[0].revents & POLLNVAL) {
12550Sstevel@tonic-gate rpc_callerr.re_status = RPC_CANTRECV;
12560Sstevel@tonic-gate /*
12570Sstevel@tonic-gate * Note: we're faking errno here because we
12580Sstevel@tonic-gate * previously would have expected select() to
12590Sstevel@tonic-gate * return -1 with errno EBADF. Poll(BA_OS)
12600Sstevel@tonic-gate * returns 0 and sets the POLLNVAL revents flag
12610Sstevel@tonic-gate * instead.
12620Sstevel@tonic-gate */
12630Sstevel@tonic-gate rpc_callerr.re_errno = errno = EBADF;
12640Sstevel@tonic-gate return (-1);
12650Sstevel@tonic-gate }
12660Sstevel@tonic-gate
12670Sstevel@tonic-gate if (pfdp[0].revents & (POLLERR | POLLHUP)) {
12680Sstevel@tonic-gate rpc_callerr.re_status = RPC_CANTRECV;
12690Sstevel@tonic-gate rpc_callerr.re_errno = errno = EPIPE;
12700Sstevel@tonic-gate return (-1);
12710Sstevel@tonic-gate }
12720Sstevel@tonic-gate break;
12730Sstevel@tonic-gate }
12740Sstevel@tonic-gate
12750Sstevel@tonic-gate switch (len = t_rcvall(ct->ct_fd, buf, len)) {
12760Sstevel@tonic-gate case 0:
12770Sstevel@tonic-gate /* premature eof */
12780Sstevel@tonic-gate rpc_callerr.re_errno = ENOLINK;
12790Sstevel@tonic-gate rpc_callerr.re_terrno = 0;
12800Sstevel@tonic-gate rpc_callerr.re_status = RPC_CANTRECV;
12810Sstevel@tonic-gate len = -1; /* it's really an error */
12820Sstevel@tonic-gate break;
12830Sstevel@tonic-gate
12840Sstevel@tonic-gate case -1:
12850Sstevel@tonic-gate rpc_callerr.re_terrno = t_errno;
12860Sstevel@tonic-gate rpc_callerr.re_errno = 0;
12870Sstevel@tonic-gate rpc_callerr.re_status = RPC_CANTRECV;
12880Sstevel@tonic-gate break;
12890Sstevel@tonic-gate }
12900Sstevel@tonic-gate return (len);
12910Sstevel@tonic-gate }
12920Sstevel@tonic-gate
12930Sstevel@tonic-gate static int
write_vc(void * ct_tmp,caddr_t buf,int len)1294132Srobinson write_vc(void *ct_tmp, caddr_t buf, int len)
12950Sstevel@tonic-gate {
12960Sstevel@tonic-gate int i, cnt;
12970Sstevel@tonic-gate struct ct_data *ct = ct_tmp;
12980Sstevel@tonic-gate int flag;
12990Sstevel@tonic-gate int maxsz;
13000Sstevel@tonic-gate
13010Sstevel@tonic-gate maxsz = ct->ct_tsdu;
13020Sstevel@tonic-gate
13030Sstevel@tonic-gate /* Handle the non-blocking mode */
13040Sstevel@tonic-gate if (ct->ct_is_oneway && ct->ct_io_mode == RPC_CL_NONBLOCKING) {
13050Sstevel@tonic-gate /*
13060Sstevel@tonic-gate * Test a special case here. If the length of the current
13070Sstevel@tonic-gate * write is greater than the transport data unit, and the
13080Sstevel@tonic-gate * mode is non blocking, we return RPC_CANTSEND.
13090Sstevel@tonic-gate * XXX this is not very clean.
13100Sstevel@tonic-gate */
13110Sstevel@tonic-gate if (maxsz > 0 && len > maxsz) {
13120Sstevel@tonic-gate rpc_callerr.re_terrno = errno;
13130Sstevel@tonic-gate rpc_callerr.re_errno = 0;
13140Sstevel@tonic-gate rpc_callerr.re_status = RPC_CANTSEND;
13150Sstevel@tonic-gate return (-1);
13160Sstevel@tonic-gate }
13170Sstevel@tonic-gate
13180Sstevel@tonic-gate len = nb_send(ct, buf, (unsigned)len);
13190Sstevel@tonic-gate if (len == -1) {
13200Sstevel@tonic-gate rpc_callerr.re_terrno = errno;
13210Sstevel@tonic-gate rpc_callerr.re_errno = 0;
13220Sstevel@tonic-gate rpc_callerr.re_status = RPC_CANTSEND;
13230Sstevel@tonic-gate } else if (len == -2) {
13240Sstevel@tonic-gate rpc_callerr.re_terrno = 0;
13250Sstevel@tonic-gate rpc_callerr.re_errno = 0;
13260Sstevel@tonic-gate rpc_callerr.re_status = RPC_CANTSTORE;
13270Sstevel@tonic-gate }
13280Sstevel@tonic-gate return (len);
13290Sstevel@tonic-gate }
13300Sstevel@tonic-gate
13310Sstevel@tonic-gate if ((maxsz == 0) || (maxsz == -1)) {
13320Sstevel@tonic-gate /*
13330Sstevel@tonic-gate * T_snd may return -1 for error on connection (connection
13340Sstevel@tonic-gate * needs to be repaired/closed, and -2 for flow-control
13350Sstevel@tonic-gate * handling error (no operation to do, just wait and call
13360Sstevel@tonic-gate * T_Flush()).
13370Sstevel@tonic-gate */
13380Sstevel@tonic-gate if ((len = t_snd(ct->ct_fd, buf, (unsigned)len, 0)) == -1) {
13390Sstevel@tonic-gate rpc_callerr.re_terrno = t_errno;
13400Sstevel@tonic-gate rpc_callerr.re_errno = 0;
13410Sstevel@tonic-gate rpc_callerr.re_status = RPC_CANTSEND;
13420Sstevel@tonic-gate }
13430Sstevel@tonic-gate return (len);
13440Sstevel@tonic-gate }
13450Sstevel@tonic-gate
13460Sstevel@tonic-gate /*
13470Sstevel@tonic-gate * This for those transports which have a max size for data.
13480Sstevel@tonic-gate */
13490Sstevel@tonic-gate for (cnt = len, i = 0; cnt > 0; cnt -= i, buf += i) {
13500Sstevel@tonic-gate flag = cnt > maxsz ? T_MORE : 0;
13510Sstevel@tonic-gate if ((i = t_snd(ct->ct_fd, buf, (unsigned)MIN(cnt, maxsz),
13520Sstevel@tonic-gate flag)) == -1) {
13530Sstevel@tonic-gate rpc_callerr.re_terrno = t_errno;
13540Sstevel@tonic-gate rpc_callerr.re_errno = 0;
13550Sstevel@tonic-gate rpc_callerr.re_status = RPC_CANTSEND;
13560Sstevel@tonic-gate return (-1);
13570Sstevel@tonic-gate }
13580Sstevel@tonic-gate }
13590Sstevel@tonic-gate return (len);
13600Sstevel@tonic-gate }
13610Sstevel@tonic-gate
13620Sstevel@tonic-gate /*
13630Sstevel@tonic-gate * Receive the required bytes of data, even if it is fragmented.
13640Sstevel@tonic-gate */
13650Sstevel@tonic-gate static int
t_rcvall(int fd,char * buf,int len)1366132Srobinson t_rcvall(int fd, char *buf, int len)
13670Sstevel@tonic-gate {
13680Sstevel@tonic-gate int moreflag;
13690Sstevel@tonic-gate int final = 0;
13700Sstevel@tonic-gate int res;
13710Sstevel@tonic-gate
13720Sstevel@tonic-gate do {
13730Sstevel@tonic-gate moreflag = 0;
13740Sstevel@tonic-gate res = t_rcv(fd, buf, (unsigned)len, &moreflag);
13750Sstevel@tonic-gate if (res == -1) {
13760Sstevel@tonic-gate if (t_errno == TLOOK)
13770Sstevel@tonic-gate switch (t_look(fd)) {
13780Sstevel@tonic-gate case T_DISCONNECT:
1379132Srobinson (void) t_rcvdis(fd, NULL);
1380132Srobinson (void) t_snddis(fd, NULL);
13810Sstevel@tonic-gate return (-1);
13820Sstevel@tonic-gate case T_ORDREL:
13830Sstevel@tonic-gate /* Received orderly release indication */
1384132Srobinson (void) t_rcvrel(fd);
13850Sstevel@tonic-gate /* Send orderly release indicator */
13860Sstevel@tonic-gate (void) t_sndrel(fd);
13870Sstevel@tonic-gate return (-1);
13880Sstevel@tonic-gate default:
13890Sstevel@tonic-gate return (-1);
13900Sstevel@tonic-gate }
13910Sstevel@tonic-gate } else if (res == 0) {
13920Sstevel@tonic-gate return (0);
13930Sstevel@tonic-gate }
13940Sstevel@tonic-gate final += res;
13950Sstevel@tonic-gate buf += res;
13960Sstevel@tonic-gate len -= res;
13970Sstevel@tonic-gate } while ((len > 0) && (moreflag & T_MORE));
13980Sstevel@tonic-gate return (final);
13990Sstevel@tonic-gate }
14000Sstevel@tonic-gate
14010Sstevel@tonic-gate static struct clnt_ops *
clnt_vc_ops(void)14020Sstevel@tonic-gate clnt_vc_ops(void)
14030Sstevel@tonic-gate {
14040Sstevel@tonic-gate static struct clnt_ops ops;
14050Sstevel@tonic-gate extern mutex_t ops_lock;
14060Sstevel@tonic-gate
14070Sstevel@tonic-gate /* VARIABLES PROTECTED BY ops_lock: ops */
14080Sstevel@tonic-gate
14090Sstevel@tonic-gate sig_mutex_lock(&ops_lock);
14100Sstevel@tonic-gate if (ops.cl_call == NULL) {
14110Sstevel@tonic-gate ops.cl_call = clnt_vc_call;
14120Sstevel@tonic-gate ops.cl_send = clnt_vc_send;
14130Sstevel@tonic-gate ops.cl_abort = clnt_vc_abort;
14140Sstevel@tonic-gate ops.cl_geterr = clnt_vc_geterr;
14150Sstevel@tonic-gate ops.cl_freeres = clnt_vc_freeres;
14160Sstevel@tonic-gate ops.cl_destroy = clnt_vc_destroy;
14170Sstevel@tonic-gate ops.cl_control = clnt_vc_control;
14180Sstevel@tonic-gate }
14190Sstevel@tonic-gate sig_mutex_unlock(&ops_lock);
14200Sstevel@tonic-gate return (&ops);
14210Sstevel@tonic-gate }
14220Sstevel@tonic-gate
14230Sstevel@tonic-gate /*
14240Sstevel@tonic-gate * Make sure that the time is not garbage. -1 value is disallowed.
14250Sstevel@tonic-gate * Note this is different from time_not_ok in clnt_dg.c
14260Sstevel@tonic-gate */
14270Sstevel@tonic-gate static bool_t
time_not_ok(struct timeval * t)1428132Srobinson time_not_ok(struct timeval *t)
14290Sstevel@tonic-gate {
14300Sstevel@tonic-gate return (t->tv_sec <= -1 || t->tv_sec > 100000000 ||
14310Sstevel@tonic-gate t->tv_usec <= -1 || t->tv_usec > 1000000);
14320Sstevel@tonic-gate }
14330Sstevel@tonic-gate
14340Sstevel@tonic-gate
14350Sstevel@tonic-gate /* Compute the # of bytes that remains until the end of the buffer */
14360Sstevel@tonic-gate #define REMAIN_BYTES(p) (ct->ct_bufferSize-(ct->ct_##p - ct->ct_buffer))
14370Sstevel@tonic-gate
14380Sstevel@tonic-gate static int
addInBuffer(struct ct_data * ct,char * dataToAdd,unsigned int nBytes)14390Sstevel@tonic-gate addInBuffer(struct ct_data *ct, char *dataToAdd, unsigned int nBytes)
14400Sstevel@tonic-gate {
14410Sstevel@tonic-gate if (NULL == ct->ct_buffer) {
14420Sstevel@tonic-gate /* Buffer not allocated yet. */
14430Sstevel@tonic-gate char *buffer;
14440Sstevel@tonic-gate
1445132Srobinson buffer = malloc(ct->ct_bufferSize);
14460Sstevel@tonic-gate if (NULL == buffer) {
14470Sstevel@tonic-gate errno = ENOMEM;
14480Sstevel@tonic-gate return (-1);
14490Sstevel@tonic-gate }
1450132Srobinson (void) memcpy(buffer, dataToAdd, nBytes);
14510Sstevel@tonic-gate
14520Sstevel@tonic-gate ct->ct_buffer = buffer;
14530Sstevel@tonic-gate ct->ct_bufferReadPtr = buffer;
14540Sstevel@tonic-gate ct->ct_bufferWritePtr = buffer + nBytes;
14550Sstevel@tonic-gate ct->ct_bufferPendingSize = nBytes;
14560Sstevel@tonic-gate } else {
14570Sstevel@tonic-gate /*
14580Sstevel@tonic-gate * For an already allocated buffer, two mem copies
14590Sstevel@tonic-gate * might be needed, depending on the current
14600Sstevel@tonic-gate * writing position.
14610Sstevel@tonic-gate */
14620Sstevel@tonic-gate
14630Sstevel@tonic-gate /* Compute the length of the first copy. */
14640Sstevel@tonic-gate int len = MIN(nBytes, REMAIN_BYTES(bufferWritePtr));
14650Sstevel@tonic-gate
14660Sstevel@tonic-gate ct->ct_bufferPendingSize += nBytes;
14670Sstevel@tonic-gate
1468132Srobinson (void) memcpy(ct->ct_bufferWritePtr, dataToAdd, len);
14690Sstevel@tonic-gate ct->ct_bufferWritePtr += len;
14700Sstevel@tonic-gate nBytes -= len;
14710Sstevel@tonic-gate if (0 == nBytes) {
14720Sstevel@tonic-gate /* One memcopy needed. */
14730Sstevel@tonic-gate
14740Sstevel@tonic-gate /*
14750Sstevel@tonic-gate * If the write pointer is at the end of the buffer,
14760Sstevel@tonic-gate * wrap it now.
14770Sstevel@tonic-gate */
14780Sstevel@tonic-gate if (ct->ct_bufferWritePtr ==
14790Sstevel@tonic-gate (ct->ct_buffer + ct->ct_bufferSize)) {
14800Sstevel@tonic-gate ct->ct_bufferWritePtr = ct->ct_buffer;
14810Sstevel@tonic-gate }
14820Sstevel@tonic-gate } else {
14830Sstevel@tonic-gate /* Two memcopy needed. */
14840Sstevel@tonic-gate dataToAdd += len;
14850Sstevel@tonic-gate
14860Sstevel@tonic-gate /*
14870Sstevel@tonic-gate * Copy the remaining data to the beginning of the
14880Sstevel@tonic-gate * buffer
14890Sstevel@tonic-gate */
1490132Srobinson (void) memcpy(ct->ct_buffer, dataToAdd, nBytes);
14910Sstevel@tonic-gate ct->ct_bufferWritePtr = ct->ct_buffer + nBytes;
14920Sstevel@tonic-gate }
14930Sstevel@tonic-gate }
14940Sstevel@tonic-gate return (0);
14950Sstevel@tonic-gate }
14960Sstevel@tonic-gate
14970Sstevel@tonic-gate static void
consumeFromBuffer(struct ct_data * ct,unsigned int nBytes)14980Sstevel@tonic-gate consumeFromBuffer(struct ct_data *ct, unsigned int nBytes)
14990Sstevel@tonic-gate {
15000Sstevel@tonic-gate ct->ct_bufferPendingSize -= nBytes;
15010Sstevel@tonic-gate if (ct->ct_bufferPendingSize == 0) {
15020Sstevel@tonic-gate /*
15030Sstevel@tonic-gate * If the buffer contains no data, we set the two pointers at
15040Sstevel@tonic-gate * the beginning of the buffer (to miminize buffer wraps).
15050Sstevel@tonic-gate */
15060Sstevel@tonic-gate ct->ct_bufferReadPtr = ct->ct_bufferWritePtr = ct->ct_buffer;
15070Sstevel@tonic-gate } else {
15080Sstevel@tonic-gate ct->ct_bufferReadPtr += nBytes;
15090Sstevel@tonic-gate if (ct->ct_bufferReadPtr >
15100Sstevel@tonic-gate ct->ct_buffer + ct->ct_bufferSize) {
15110Sstevel@tonic-gate ct->ct_bufferReadPtr -= ct->ct_bufferSize;
15120Sstevel@tonic-gate }
15130Sstevel@tonic-gate }
15140Sstevel@tonic-gate }
15150Sstevel@tonic-gate
15160Sstevel@tonic-gate static int
iovFromBuffer(struct ct_data * ct,struct iovec * iov)15170Sstevel@tonic-gate iovFromBuffer(struct ct_data *ct, struct iovec *iov)
15180Sstevel@tonic-gate {
15190Sstevel@tonic-gate int l;
15200Sstevel@tonic-gate
15210Sstevel@tonic-gate if (ct->ct_bufferPendingSize == 0)
15220Sstevel@tonic-gate return (0);
15230Sstevel@tonic-gate
15240Sstevel@tonic-gate l = REMAIN_BYTES(bufferReadPtr);
15250Sstevel@tonic-gate if (l < ct->ct_bufferPendingSize) {
15260Sstevel@tonic-gate /* Buffer in two fragments. */
15270Sstevel@tonic-gate iov[0].iov_base = ct->ct_bufferReadPtr;
15280Sstevel@tonic-gate iov[0].iov_len = l;
15290Sstevel@tonic-gate
15300Sstevel@tonic-gate iov[1].iov_base = ct->ct_buffer;
15310Sstevel@tonic-gate iov[1].iov_len = ct->ct_bufferPendingSize - l;
15320Sstevel@tonic-gate return (2);
15330Sstevel@tonic-gate } else {
15340Sstevel@tonic-gate /* Buffer in one fragment. */
15350Sstevel@tonic-gate iov[0].iov_base = ct->ct_bufferReadPtr;
15360Sstevel@tonic-gate iov[0].iov_len = ct->ct_bufferPendingSize;
15370Sstevel@tonic-gate return (1);
15380Sstevel@tonic-gate }
15390Sstevel@tonic-gate }
15400Sstevel@tonic-gate
15410Sstevel@tonic-gate static bool_t
set_flush_mode(struct ct_data * ct,int mode)15420Sstevel@tonic-gate set_flush_mode(struct ct_data *ct, int mode)
15430Sstevel@tonic-gate {
15440Sstevel@tonic-gate switch (mode) {
15450Sstevel@tonic-gate case RPC_CL_BLOCKING_FLUSH:
15460Sstevel@tonic-gate /* flush as most as possible without blocking */
15470Sstevel@tonic-gate case RPC_CL_BESTEFFORT_FLUSH:
15480Sstevel@tonic-gate /* flush the buffer completely (possibly blocking) */
15490Sstevel@tonic-gate case RPC_CL_DEFAULT_FLUSH:
15500Sstevel@tonic-gate /* flush according to the currently defined policy */
15510Sstevel@tonic-gate ct->ct_blocking_mode = mode;
15520Sstevel@tonic-gate return (TRUE);
15530Sstevel@tonic-gate default:
15540Sstevel@tonic-gate return (FALSE);
15550Sstevel@tonic-gate }
15560Sstevel@tonic-gate }
15570Sstevel@tonic-gate
15580Sstevel@tonic-gate static bool_t
set_io_mode(struct ct_data * ct,int ioMode)15590Sstevel@tonic-gate set_io_mode(struct ct_data *ct, int ioMode)
15600Sstevel@tonic-gate {
15610Sstevel@tonic-gate switch (ioMode) {
15620Sstevel@tonic-gate case RPC_CL_BLOCKING:
15630Sstevel@tonic-gate if (ct->ct_io_mode == RPC_CL_NONBLOCKING) {
15640Sstevel@tonic-gate if (NULL != ct->ct_buffer) {
15650Sstevel@tonic-gate /*
15660Sstevel@tonic-gate * If a buffer was allocated for this
15670Sstevel@tonic-gate * connection, flush it now, and free it.
15680Sstevel@tonic-gate */
1569132Srobinson (void) do_flush(ct, RPC_CL_BLOCKING_FLUSH);
15700Sstevel@tonic-gate free(ct->ct_buffer);
15710Sstevel@tonic-gate ct->ct_buffer = NULL;
15720Sstevel@tonic-gate }
1573132Srobinson (void) unregister_nb(ct);
15740Sstevel@tonic-gate ct->ct_io_mode = ioMode;
15750Sstevel@tonic-gate }
15760Sstevel@tonic-gate break;
15770Sstevel@tonic-gate case RPC_CL_NONBLOCKING:
15780Sstevel@tonic-gate if (ct->ct_io_mode == RPC_CL_BLOCKING) {
15790Sstevel@tonic-gate if (-1 == register_nb(ct)) {
15800Sstevel@tonic-gate return (FALSE);
15810Sstevel@tonic-gate }
15820Sstevel@tonic-gate ct->ct_io_mode = ioMode;
15830Sstevel@tonic-gate }
15840Sstevel@tonic-gate break;
15850Sstevel@tonic-gate default:
15860Sstevel@tonic-gate return (FALSE);
15870Sstevel@tonic-gate }
15880Sstevel@tonic-gate return (TRUE);
15890Sstevel@tonic-gate }
15900Sstevel@tonic-gate
15910Sstevel@tonic-gate static int
do_flush(struct ct_data * ct,uint_t flush_mode)15920Sstevel@tonic-gate do_flush(struct ct_data *ct, uint_t flush_mode)
15930Sstevel@tonic-gate {
15940Sstevel@tonic-gate int result;
15950Sstevel@tonic-gate if (ct->ct_bufferPendingSize == 0) {
15960Sstevel@tonic-gate return (0);
15970Sstevel@tonic-gate }
15980Sstevel@tonic-gate
15990Sstevel@tonic-gate switch (flush_mode) {
16000Sstevel@tonic-gate case RPC_CL_BLOCKING_FLUSH:
16010Sstevel@tonic-gate if (!set_blocking_connection(ct, TRUE)) {
16020Sstevel@tonic-gate return (-1);
16030Sstevel@tonic-gate }
16040Sstevel@tonic-gate while (ct->ct_bufferPendingSize > 0) {
16050Sstevel@tonic-gate if (REMAIN_BYTES(bufferReadPtr) <
16060Sstevel@tonic-gate ct->ct_bufferPendingSize) {
16070Sstevel@tonic-gate struct iovec iov[2];
1608132Srobinson (void) iovFromBuffer(ct, iov);
16090Sstevel@tonic-gate result = writev(ct->ct_fd, iov, 2);
16100Sstevel@tonic-gate } else {
16110Sstevel@tonic-gate result = t_snd(ct->ct_fd, ct->ct_bufferReadPtr,
16120Sstevel@tonic-gate ct->ct_bufferPendingSize, 0);
16130Sstevel@tonic-gate }
16140Sstevel@tonic-gate if (result < 0) {
16150Sstevel@tonic-gate return (-1);
16160Sstevel@tonic-gate }
16170Sstevel@tonic-gate consumeFromBuffer(ct, result);
16180Sstevel@tonic-gate }
16190Sstevel@tonic-gate
16200Sstevel@tonic-gate break;
16210Sstevel@tonic-gate
16220Sstevel@tonic-gate case RPC_CL_BESTEFFORT_FLUSH:
1623132Srobinson (void) set_blocking_connection(ct, FALSE);
16240Sstevel@tonic-gate if (REMAIN_BYTES(bufferReadPtr) < ct->ct_bufferPendingSize) {
16250Sstevel@tonic-gate struct iovec iov[2];
1626132Srobinson (void) iovFromBuffer(ct, iov);
16270Sstevel@tonic-gate result = writev(ct->ct_fd, iov, 2);
16280Sstevel@tonic-gate } else {
16290Sstevel@tonic-gate result = t_snd(ct->ct_fd, ct->ct_bufferReadPtr,
16300Sstevel@tonic-gate ct->ct_bufferPendingSize, 0);
16310Sstevel@tonic-gate }
16320Sstevel@tonic-gate if (result < 0) {
16330Sstevel@tonic-gate if (errno != EWOULDBLOCK) {
16340Sstevel@tonic-gate perror("flush");
16350Sstevel@tonic-gate return (-1);
16360Sstevel@tonic-gate }
16370Sstevel@tonic-gate return (0);
16380Sstevel@tonic-gate }
16390Sstevel@tonic-gate if (result > 0)
16400Sstevel@tonic-gate consumeFromBuffer(ct, result);
16410Sstevel@tonic-gate break;
16420Sstevel@tonic-gate }
16430Sstevel@tonic-gate return (0);
16440Sstevel@tonic-gate }
16450Sstevel@tonic-gate
16460Sstevel@tonic-gate /*
16470Sstevel@tonic-gate * Non blocking send.
16480Sstevel@tonic-gate */
16490Sstevel@tonic-gate
16500Sstevel@tonic-gate static int
nb_send(struct ct_data * ct,void * buff,unsigned int nBytes)16510Sstevel@tonic-gate nb_send(struct ct_data *ct, void *buff, unsigned int nBytes)
16520Sstevel@tonic-gate {
16530Sstevel@tonic-gate int result;
16540Sstevel@tonic-gate
16550Sstevel@tonic-gate if (!(ntohl(*(uint32_t *)buff) & 2^31)) {
16560Sstevel@tonic-gate return (-1);
16570Sstevel@tonic-gate }
16580Sstevel@tonic-gate
16590Sstevel@tonic-gate /*
16600Sstevel@tonic-gate * Check to see if the current message can be stored fully in the
16610Sstevel@tonic-gate * buffer. We have to check this now because it may be impossible
16620Sstevel@tonic-gate * to send any data, so the message must be stored in the buffer.
16630Sstevel@tonic-gate */
16640Sstevel@tonic-gate if (nBytes > (ct->ct_bufferSize - ct->ct_bufferPendingSize)) {
16650Sstevel@tonic-gate /* Try to flush (to free some space). */
1666132Srobinson (void) do_flush(ct, RPC_CL_BESTEFFORT_FLUSH);
16670Sstevel@tonic-gate
16680Sstevel@tonic-gate /* Can we store the message now ? */
16690Sstevel@tonic-gate if (nBytes > (ct->ct_bufferSize - ct->ct_bufferPendingSize))
16700Sstevel@tonic-gate return (-2);
16710Sstevel@tonic-gate }
16720Sstevel@tonic-gate
1673132Srobinson (void) set_blocking_connection(ct, FALSE);
16740Sstevel@tonic-gate
16750Sstevel@tonic-gate /*
16760Sstevel@tonic-gate * If there is no data pending, we can simply try
16770Sstevel@tonic-gate * to send our data.
16780Sstevel@tonic-gate */
16790Sstevel@tonic-gate if (ct->ct_bufferPendingSize == 0) {
16800Sstevel@tonic-gate result = t_snd(ct->ct_fd, buff, nBytes, 0);
16810Sstevel@tonic-gate if (result == -1) {
16820Sstevel@tonic-gate if (errno == EWOULDBLOCK) {
16830Sstevel@tonic-gate result = 0;
16840Sstevel@tonic-gate } else {
16850Sstevel@tonic-gate perror("send");
16860Sstevel@tonic-gate return (-1);
16870Sstevel@tonic-gate }
16880Sstevel@tonic-gate }
16890Sstevel@tonic-gate /*
16900Sstevel@tonic-gate * If we have not sent all data, we must store them
16910Sstevel@tonic-gate * in the buffer.
16920Sstevel@tonic-gate */
16930Sstevel@tonic-gate if (result != nBytes) {
16940Sstevel@tonic-gate if (addInBuffer(ct, (char *)buff + result,
16950Sstevel@tonic-gate nBytes - result) == -1) {
16960Sstevel@tonic-gate return (-1);
16970Sstevel@tonic-gate }
16980Sstevel@tonic-gate }
16990Sstevel@tonic-gate } else {
17000Sstevel@tonic-gate /*
17010Sstevel@tonic-gate * Some data pending in the buffer. We try to send
17020Sstevel@tonic-gate * both buffer data and current message in one shot.
17030Sstevel@tonic-gate */
17040Sstevel@tonic-gate struct iovec iov[3];
17050Sstevel@tonic-gate int i = iovFromBuffer(ct, &iov[0]);
17060Sstevel@tonic-gate
17070Sstevel@tonic-gate iov[i].iov_base = buff;
17080Sstevel@tonic-gate iov[i].iov_len = nBytes;
17090Sstevel@tonic-gate
17100Sstevel@tonic-gate result = writev(ct->ct_fd, iov, i+1);
17110Sstevel@tonic-gate if (result == -1) {
17120Sstevel@tonic-gate if (errno == EWOULDBLOCK) {
17130Sstevel@tonic-gate /* No bytes sent */
17140Sstevel@tonic-gate result = 0;
17150Sstevel@tonic-gate } else {
17160Sstevel@tonic-gate return (-1);
17170Sstevel@tonic-gate }
17180Sstevel@tonic-gate }
17190Sstevel@tonic-gate
17200Sstevel@tonic-gate /*
17210Sstevel@tonic-gate * Add the bytes from the message
17220Sstevel@tonic-gate * that we have not sent.
17230Sstevel@tonic-gate */
17240Sstevel@tonic-gate if (result <= ct->ct_bufferPendingSize) {
17250Sstevel@tonic-gate /* No bytes from the message sent */
17260Sstevel@tonic-gate consumeFromBuffer(ct, result);
17270Sstevel@tonic-gate if (addInBuffer(ct, buff, nBytes) == -1) {
17280Sstevel@tonic-gate return (-1);
17290Sstevel@tonic-gate }
17300Sstevel@tonic-gate } else {
17310Sstevel@tonic-gate /*
17320Sstevel@tonic-gate * Some bytes of the message are sent.
17330Sstevel@tonic-gate * Compute the length of the message that has
17340Sstevel@tonic-gate * been sent.
17350Sstevel@tonic-gate */
17360Sstevel@tonic-gate int len = result - ct->ct_bufferPendingSize;
17370Sstevel@tonic-gate
17380Sstevel@tonic-gate /* So, empty the buffer. */
17390Sstevel@tonic-gate ct->ct_bufferReadPtr = ct->ct_buffer;
17400Sstevel@tonic-gate ct->ct_bufferWritePtr = ct->ct_buffer;
17410Sstevel@tonic-gate ct->ct_bufferPendingSize = 0;
17420Sstevel@tonic-gate
17430Sstevel@tonic-gate /* And add the remaining part of the message. */
17440Sstevel@tonic-gate if (len != nBytes) {
17450Sstevel@tonic-gate if (addInBuffer(ct, (char *)buff + len,
17460Sstevel@tonic-gate nBytes-len) == -1) {
17470Sstevel@tonic-gate return (-1);
17480Sstevel@tonic-gate }
17490Sstevel@tonic-gate }
17500Sstevel@tonic-gate }
17510Sstevel@tonic-gate }
17520Sstevel@tonic-gate return (nBytes);
17530Sstevel@tonic-gate }
17540Sstevel@tonic-gate
17550Sstevel@tonic-gate static void
flush_registered_clients(void)1756132Srobinson flush_registered_clients(void)
17570Sstevel@tonic-gate {
17580Sstevel@tonic-gate struct nb_reg_node *node;
17590Sstevel@tonic-gate
17600Sstevel@tonic-gate if (LIST_ISEMPTY(nb_first)) {
17610Sstevel@tonic-gate return;
17620Sstevel@tonic-gate }
17630Sstevel@tonic-gate
17640Sstevel@tonic-gate LIST_FOR_EACH(nb_first, node) {
1765132Srobinson (void) do_flush(node->ct, RPC_CL_BLOCKING_FLUSH);
17660Sstevel@tonic-gate }
17670Sstevel@tonic-gate }
17680Sstevel@tonic-gate
17690Sstevel@tonic-gate static int
allocate_chunk(void)1770132Srobinson allocate_chunk(void)
17710Sstevel@tonic-gate {
17720Sstevel@tonic-gate #define CHUNK_SIZE 16
1773132Srobinson struct nb_reg_node *chk =
17740Sstevel@tonic-gate malloc(sizeof (struct nb_reg_node) * CHUNK_SIZE);
17750Sstevel@tonic-gate struct nb_reg_node *n;
17760Sstevel@tonic-gate int i;
17770Sstevel@tonic-gate
17780Sstevel@tonic-gate if (NULL == chk) {
17790Sstevel@tonic-gate return (-1);
17800Sstevel@tonic-gate }
17810Sstevel@tonic-gate
17820Sstevel@tonic-gate n = chk;
17830Sstevel@tonic-gate for (i = 0; i < CHUNK_SIZE-1; ++i) {
17840Sstevel@tonic-gate n[i].next = &(n[i+1]);
17850Sstevel@tonic-gate }
17860Sstevel@tonic-gate n[CHUNK_SIZE-1].next = (struct nb_reg_node *)&nb_free;
17870Sstevel@tonic-gate nb_free = chk;
17880Sstevel@tonic-gate return (0);
17890Sstevel@tonic-gate }
17900Sstevel@tonic-gate
17910Sstevel@tonic-gate static int
register_nb(struct ct_data * ct)17920Sstevel@tonic-gate register_nb(struct ct_data *ct)
17930Sstevel@tonic-gate {
17940Sstevel@tonic-gate struct nb_reg_node *node;
17950Sstevel@tonic-gate
1796132Srobinson (void) mutex_lock(&nb_list_mutex);
17970Sstevel@tonic-gate
17980Sstevel@tonic-gate if (LIST_ISEMPTY(nb_free) && (allocate_chunk() == -1)) {
1799132Srobinson (void) mutex_unlock(&nb_list_mutex);
18000Sstevel@tonic-gate errno = ENOMEM;
18010Sstevel@tonic-gate return (-1);
18020Sstevel@tonic-gate }
18030Sstevel@tonic-gate
18040Sstevel@tonic-gate if (!exit_handler_set) {
1805132Srobinson (void) atexit(flush_registered_clients);
18060Sstevel@tonic-gate exit_handler_set = TRUE;
18070Sstevel@tonic-gate }
18080Sstevel@tonic-gate /* Get the first free node */
18090Sstevel@tonic-gate LIST_EXTRACT(nb_free, node);
18100Sstevel@tonic-gate
18110Sstevel@tonic-gate node->ct = ct;
18120Sstevel@tonic-gate
18130Sstevel@tonic-gate LIST_ADD(nb_first, node);
1814132Srobinson (void) mutex_unlock(&nb_list_mutex);
18150Sstevel@tonic-gate
18160Sstevel@tonic-gate return (0);
18170Sstevel@tonic-gate }
18180Sstevel@tonic-gate
18190Sstevel@tonic-gate static int
unregister_nb(struct ct_data * ct)18200Sstevel@tonic-gate unregister_nb(struct ct_data *ct)
18210Sstevel@tonic-gate {
18220Sstevel@tonic-gate struct nb_reg_node *node;
18230Sstevel@tonic-gate
1824132Srobinson (void) mutex_lock(&nb_list_mutex);
1825132Srobinson assert(!LIST_ISEMPTY(nb_first));
18260Sstevel@tonic-gate
18270Sstevel@tonic-gate node = nb_first;
18280Sstevel@tonic-gate LIST_FOR_EACH(nb_first, node) {
18290Sstevel@tonic-gate if (node->next->ct == ct) {
18300Sstevel@tonic-gate /* Get the node to unregister. */
18310Sstevel@tonic-gate struct nb_reg_node *n = node->next;
18320Sstevel@tonic-gate node->next = n->next;
18330Sstevel@tonic-gate
18340Sstevel@tonic-gate n->ct = NULL;
18350Sstevel@tonic-gate LIST_ADD(nb_free, n);
18360Sstevel@tonic-gate break;
18370Sstevel@tonic-gate }
18380Sstevel@tonic-gate }
1839132Srobinson (void) mutex_unlock(&nb_list_mutex);
18400Sstevel@tonic-gate return (0);
18410Sstevel@tonic-gate }
1842