10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
51676Sjpk * Common Development and Distribution License (the "License").
61676Sjpk * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
228778SErik.Nordmark@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate /*
270Sstevel@tonic-gate * Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T
280Sstevel@tonic-gate * All Rights Reserved
290Sstevel@tonic-gate */
300Sstevel@tonic-gate
310Sstevel@tonic-gate /*
320Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD
330Sstevel@tonic-gate * under license from the Regents of the University of California.
340Sstevel@tonic-gate */
350Sstevel@tonic-gate
360Sstevel@tonic-gate
370Sstevel@tonic-gate /*
380Sstevel@tonic-gate * Implements a kernel based, client side RPC over Connection Oriented
390Sstevel@tonic-gate * Transports (COTS).
400Sstevel@tonic-gate */
410Sstevel@tonic-gate
420Sstevel@tonic-gate /*
430Sstevel@tonic-gate * Much of this file has been re-written to let NFS work better over slow
440Sstevel@tonic-gate * transports. A description follows.
450Sstevel@tonic-gate *
460Sstevel@tonic-gate * One of the annoying things about kRPC/COTS is that it will temporarily
470Sstevel@tonic-gate * create more than one connection between a client and server. This
480Sstevel@tonic-gate * happens because when a connection is made, the end-points entry in the
490Sstevel@tonic-gate * linked list of connections (headed by cm_hd), is removed so that other
500Sstevel@tonic-gate * threads don't mess with it. Went ahead and bit the bullet by keeping
510Sstevel@tonic-gate * the endpoint on the connection list and introducing state bits,
520Sstevel@tonic-gate * condition variables etc. to the connection entry data structure (struct
530Sstevel@tonic-gate * cm_xprt).
540Sstevel@tonic-gate *
550Sstevel@tonic-gate * Here is a summary of the changes to cm-xprt:
560Sstevel@tonic-gate *
570Sstevel@tonic-gate * x_ctime is the timestamp of when the endpoint was last
580Sstevel@tonic-gate * connected or disconnected. If an end-point is ever disconnected
590Sstevel@tonic-gate * or re-connected, then any outstanding RPC request is presumed
600Sstevel@tonic-gate * lost, telling clnt_cots_kcallit that it needs to re-send the
610Sstevel@tonic-gate * request, not just wait for the original request's reply to
620Sstevel@tonic-gate * arrive.
630Sstevel@tonic-gate *
640Sstevel@tonic-gate * x_thread flag which tells us if a thread is doing a connection attempt.
650Sstevel@tonic-gate *
660Sstevel@tonic-gate * x_waitdis flag which tells us we are waiting a disconnect ACK.
670Sstevel@tonic-gate *
680Sstevel@tonic-gate * x_needdis flag which tells us we need to send a T_DISCONN_REQ
690Sstevel@tonic-gate * to kill the connection.
700Sstevel@tonic-gate *
710Sstevel@tonic-gate * x_needrel flag which tells us we need to send a T_ORDREL_REQ to
720Sstevel@tonic-gate * gracefully close the connection.
730Sstevel@tonic-gate *
740Sstevel@tonic-gate * #defined bitmasks for the all the b_* bits so that more
750Sstevel@tonic-gate * efficient (and at times less clumsy) masks can be used to
760Sstevel@tonic-gate * manipulated state in cases where multiple bits have to
770Sstevel@tonic-gate * set/cleared/checked in the same critical section.
780Sstevel@tonic-gate *
790Sstevel@tonic-gate * x_conn_cv and x_dis-_cv are new condition variables to let
800Sstevel@tonic-gate * threads knows when the connection attempt is done, and to let
810Sstevel@tonic-gate * the connecting thread know when the disconnect handshake is
820Sstevel@tonic-gate * done.
830Sstevel@tonic-gate *
840Sstevel@tonic-gate * Added the CONN_HOLD() macro so that all reference holds have the same
850Sstevel@tonic-gate * look and feel.
860Sstevel@tonic-gate *
870Sstevel@tonic-gate * In the private (cku_private) portion of the client handle,
880Sstevel@tonic-gate *
890Sstevel@tonic-gate * cku_flags replaces the cku_sent a boolean. cku_flags keeps
900Sstevel@tonic-gate * track of whether a request as been sent, and whether the
910Sstevel@tonic-gate * client's handles call record is on the dispatch list (so that
920Sstevel@tonic-gate * the reply can be matched by XID to the right client handle).
930Sstevel@tonic-gate * The idea of CKU_ONQUEUE is that we can exit clnt_cots_kcallit()
940Sstevel@tonic-gate * and still have the response find the right client handle so
950Sstevel@tonic-gate * that the retry of CLNT_CALL() gets the result. Testing, found
960Sstevel@tonic-gate * situations where if the timeout was increased, performance
970Sstevel@tonic-gate * degraded. This was due to us hitting a window where the thread
980Sstevel@tonic-gate * was back in rfscall() (probably printing server not responding)
990Sstevel@tonic-gate * while the response came back but no place to put it.
1000Sstevel@tonic-gate *
1010Sstevel@tonic-gate * cku_ctime is just a cache of x_ctime. If they match,
1020Sstevel@tonic-gate * clnt_cots_kcallit() won't to send a retry (unless the maximum
1030Sstevel@tonic-gate * receive count limit as been reached). If the don't match, then
1040Sstevel@tonic-gate * we assume the request has been lost, and a retry of the request
1050Sstevel@tonic-gate * is needed.
1060Sstevel@tonic-gate *
1070Sstevel@tonic-gate * cku_recv_attempts counts the number of receive count attempts
1080Sstevel@tonic-gate * after one try is sent on the wire.
1090Sstevel@tonic-gate *
1100Sstevel@tonic-gate * Added the clnt_delay() routine so that interruptible and
1110Sstevel@tonic-gate * noninterruptible delays are possible.
1120Sstevel@tonic-gate *
1130Sstevel@tonic-gate * CLNT_MIN_TIMEOUT has been bumped to 10 seconds from 3. This is used to
1140Sstevel@tonic-gate * control how long the client delays before returned after getting
1150Sstevel@tonic-gate * ECONNREFUSED. At 3 seconds, 8 client threads per mount really does bash
1160Sstevel@tonic-gate * a server that may be booting and not yet started nfsd.
1170Sstevel@tonic-gate *
1180Sstevel@tonic-gate * CLNT_MAXRECV_WITHOUT_RETRY is a new macro (value of 3) (with a tunable)
1190Sstevel@tonic-gate * Why don't we just wait forever (receive an infinite # of times)?
1200Sstevel@tonic-gate * Because the server may have rebooted. More insidious is that some
1210Sstevel@tonic-gate * servers (ours) will drop NFS/TCP requests in some cases. This is bad,
1220Sstevel@tonic-gate * but it is a reality.
1230Sstevel@tonic-gate *
1240Sstevel@tonic-gate * The case of a server doing orderly release really messes up the
1250Sstevel@tonic-gate * client's recovery, especially if the server's TCP implementation is
1260Sstevel@tonic-gate * buggy. It was found was that the kRPC/COTS client was breaking some
1270Sstevel@tonic-gate * TPI rules, such as not waiting for the acknowledgement of a
1280Sstevel@tonic-gate * T_DISCON_REQ (hence the added case statements T_ERROR_ACK, T_OK_ACK and
1290Sstevel@tonic-gate * T_DISCON_REQ in clnt_dispatch_notifyall()).
1300Sstevel@tonic-gate *
1310Sstevel@tonic-gate * One of things that we've seen is that a kRPC TCP endpoint goes into
1320Sstevel@tonic-gate * TIMEWAIT and a thus a reconnect takes a long time to satisfy because
1330Sstevel@tonic-gate * that the TIMEWAIT state takes a while to finish. If a server sends a
1340Sstevel@tonic-gate * T_ORDREL_IND, there is little point in an RPC client doing a
1350Sstevel@tonic-gate * T_ORDREL_REQ, because the RPC request isn't going to make it (the
1360Sstevel@tonic-gate * server is saying that it won't accept any more data). So kRPC was
1370Sstevel@tonic-gate * changed to send a T_DISCON_REQ when we get a T_ORDREL_IND. So now the
1380Sstevel@tonic-gate * connection skips the TIMEWAIT state and goes straight to a bound state
1390Sstevel@tonic-gate * that kRPC can quickly switch to connected.
1400Sstevel@tonic-gate *
1410Sstevel@tonic-gate * Code that issues TPI request must use waitforack() to wait for the
1420Sstevel@tonic-gate * corresponding ack (assuming there is one) in any future modifications.
1430Sstevel@tonic-gate * This works around problems that may be introduced by breaking TPI rules
1440Sstevel@tonic-gate * (by submitting new calls before earlier requests have been acked) in the
1450Sstevel@tonic-gate * case of a signal or other early return. waitforack() depends on
1460Sstevel@tonic-gate * clnt_dispatch_notifyconn() to issue the wakeup when the ack
1470Sstevel@tonic-gate * arrives, so adding new TPI calls may require corresponding changes
1480Sstevel@tonic-gate * to clnt_dispatch_notifyconn(). Presently, the timeout period is based on
1490Sstevel@tonic-gate * CLNT_MIN_TIMEOUT which is 10 seconds. If you modify this value, be sure
1500Sstevel@tonic-gate * not to set it too low or TPI ACKS will be lost.
1510Sstevel@tonic-gate */
1520Sstevel@tonic-gate
1530Sstevel@tonic-gate #include <sys/param.h>
1540Sstevel@tonic-gate #include <sys/types.h>
1550Sstevel@tonic-gate #include <sys/user.h>
1560Sstevel@tonic-gate #include <sys/systm.h>
1570Sstevel@tonic-gate #include <sys/sysmacros.h>
1580Sstevel@tonic-gate #include <sys/proc.h>
1590Sstevel@tonic-gate #include <sys/socket.h>
1600Sstevel@tonic-gate #include <sys/file.h>
1610Sstevel@tonic-gate #include <sys/stream.h>
1620Sstevel@tonic-gate #include <sys/strsubr.h>
1630Sstevel@tonic-gate #include <sys/stropts.h>
1640Sstevel@tonic-gate #include <sys/strsun.h>
1650Sstevel@tonic-gate #include <sys/timod.h>
1660Sstevel@tonic-gate #include <sys/tiuser.h>
1670Sstevel@tonic-gate #include <sys/tihdr.h>
1680Sstevel@tonic-gate #include <sys/t_kuser.h>
1690Sstevel@tonic-gate #include <sys/fcntl.h>
1700Sstevel@tonic-gate #include <sys/errno.h>
1710Sstevel@tonic-gate #include <sys/kmem.h>
1720Sstevel@tonic-gate #include <sys/debug.h>
1730Sstevel@tonic-gate #include <sys/systm.h>
1740Sstevel@tonic-gate #include <sys/kstat.h>
1750Sstevel@tonic-gate #include <sys/t_lock.h>
1760Sstevel@tonic-gate #include <sys/ddi.h>
1770Sstevel@tonic-gate #include <sys/cmn_err.h>
1780Sstevel@tonic-gate #include <sys/time.h>
1790Sstevel@tonic-gate #include <sys/isa_defs.h>
1800Sstevel@tonic-gate #include <sys/callb.h>
1810Sstevel@tonic-gate #include <sys/sunddi.h>
1820Sstevel@tonic-gate #include <sys/atomic.h>
1838205SSiddheshwar.Mahesh@Sun.COM #include <sys/sdt.h>
1840Sstevel@tonic-gate
1850Sstevel@tonic-gate #include <netinet/in.h>
1860Sstevel@tonic-gate #include <netinet/tcp.h>
1870Sstevel@tonic-gate
1880Sstevel@tonic-gate #include <rpc/types.h>
1890Sstevel@tonic-gate #include <rpc/xdr.h>
1900Sstevel@tonic-gate #include <rpc/auth.h>
1910Sstevel@tonic-gate #include <rpc/clnt.h>
1920Sstevel@tonic-gate #include <rpc/rpc_msg.h>
1930Sstevel@tonic-gate
1940Sstevel@tonic-gate #define COTS_DEFAULT_ALLOCSIZE 2048
1950Sstevel@tonic-gate
1960Sstevel@tonic-gate #define WIRE_HDR_SIZE 20 /* serialized call header, sans proc number */
1970Sstevel@tonic-gate #define MSG_OFFSET 128 /* offset of call into the mblk */
1980Sstevel@tonic-gate
1990Sstevel@tonic-gate const char *kinet_ntop6(uchar_t *, char *, size_t);
2000Sstevel@tonic-gate
2010Sstevel@tonic-gate static int clnt_cots_ksettimers(CLIENT *, struct rpc_timers *,
2020Sstevel@tonic-gate struct rpc_timers *, int, void(*)(int, int, caddr_t), caddr_t, uint32_t);
2030Sstevel@tonic-gate static enum clnt_stat clnt_cots_kcallit(CLIENT *, rpcproc_t, xdrproc_t,
2040Sstevel@tonic-gate caddr_t, xdrproc_t, caddr_t, struct timeval);
2050Sstevel@tonic-gate static void clnt_cots_kabort(CLIENT *);
2060Sstevel@tonic-gate static void clnt_cots_kerror(CLIENT *, struct rpc_err *);
2070Sstevel@tonic-gate static bool_t clnt_cots_kfreeres(CLIENT *, xdrproc_t, caddr_t);
2080Sstevel@tonic-gate static void clnt_cots_kdestroy(CLIENT *);
2090Sstevel@tonic-gate static bool_t clnt_cots_kcontrol(CLIENT *, int, char *);
2100Sstevel@tonic-gate
2110Sstevel@tonic-gate
2120Sstevel@tonic-gate /* List of transports managed by the connection manager. */
2130Sstevel@tonic-gate struct cm_xprt {
2140Sstevel@tonic-gate TIUSER *x_tiptr; /* transport handle */
2150Sstevel@tonic-gate queue_t *x_wq; /* send queue */
2160Sstevel@tonic-gate clock_t x_time; /* last time we handed this xprt out */
2170Sstevel@tonic-gate clock_t x_ctime; /* time we went to CONNECTED */
2180Sstevel@tonic-gate int x_tidu_size; /* TIDU size of this transport */
2190Sstevel@tonic-gate union {
2200Sstevel@tonic-gate struct {
2210Sstevel@tonic-gate unsigned int
2220Sstevel@tonic-gate #ifdef _BIT_FIELDS_HTOL
2230Sstevel@tonic-gate b_closing: 1, /* we've sent a ord rel on this conn */
2240Sstevel@tonic-gate b_dead: 1, /* transport is closed or disconn */
2250Sstevel@tonic-gate b_doomed: 1, /* too many conns, let this go idle */
2260Sstevel@tonic-gate b_connected: 1, /* this connection is connected */
2270Sstevel@tonic-gate
2280Sstevel@tonic-gate b_ordrel: 1, /* do an orderly release? */
2290Sstevel@tonic-gate b_thread: 1, /* thread doing connect */
2300Sstevel@tonic-gate b_waitdis: 1, /* waiting for disconnect ACK */
2310Sstevel@tonic-gate b_needdis: 1, /* need T_DISCON_REQ */
2320Sstevel@tonic-gate
2330Sstevel@tonic-gate b_needrel: 1, /* need T_ORDREL_REQ */
2340Sstevel@tonic-gate b_early_disc: 1, /* got a T_ORDREL_IND or T_DISCON_IND */
2350Sstevel@tonic-gate /* disconnect during connect */
2360Sstevel@tonic-gate
2370Sstevel@tonic-gate b_pad: 22;
2380Sstevel@tonic-gate
2390Sstevel@tonic-gate #endif
2400Sstevel@tonic-gate
2410Sstevel@tonic-gate #ifdef _BIT_FIELDS_LTOH
2420Sstevel@tonic-gate b_pad: 22,
2430Sstevel@tonic-gate
2440Sstevel@tonic-gate b_early_disc: 1, /* got a T_ORDREL_IND or T_DISCON_IND */
2450Sstevel@tonic-gate /* disconnect during connect */
2460Sstevel@tonic-gate b_needrel: 1, /* need T_ORDREL_REQ */
2470Sstevel@tonic-gate
2480Sstevel@tonic-gate b_needdis: 1, /* need T_DISCON_REQ */
2490Sstevel@tonic-gate b_waitdis: 1, /* waiting for disconnect ACK */
2500Sstevel@tonic-gate b_thread: 1, /* thread doing connect */
2510Sstevel@tonic-gate b_ordrel: 1, /* do an orderly release? */
2520Sstevel@tonic-gate
2530Sstevel@tonic-gate b_connected: 1, /* this connection is connected */
2540Sstevel@tonic-gate b_doomed: 1, /* too many conns, let this go idle */
2550Sstevel@tonic-gate b_dead: 1, /* transport is closed or disconn */
2560Sstevel@tonic-gate b_closing: 1; /* we've sent a ord rel on this conn */
2570Sstevel@tonic-gate #endif
2580Sstevel@tonic-gate } bit; unsigned int word;
2590Sstevel@tonic-gate
2600Sstevel@tonic-gate #define x_closing x_state.bit.b_closing
2610Sstevel@tonic-gate #define x_dead x_state.bit.b_dead
2620Sstevel@tonic-gate #define x_doomed x_state.bit.b_doomed
2630Sstevel@tonic-gate #define x_connected x_state.bit.b_connected
2640Sstevel@tonic-gate
2650Sstevel@tonic-gate #define x_ordrel x_state.bit.b_ordrel
2660Sstevel@tonic-gate #define x_thread x_state.bit.b_thread
2670Sstevel@tonic-gate #define x_waitdis x_state.bit.b_waitdis
2680Sstevel@tonic-gate #define x_needdis x_state.bit.b_needdis
2690Sstevel@tonic-gate
2700Sstevel@tonic-gate #define x_needrel x_state.bit.b_needrel
2710Sstevel@tonic-gate #define x_early_disc x_state.bit.b_early_disc
2720Sstevel@tonic-gate
2730Sstevel@tonic-gate #define x_state_flags x_state.word
2740Sstevel@tonic-gate
2750Sstevel@tonic-gate #define X_CLOSING 0x80000000
2760Sstevel@tonic-gate #define X_DEAD 0x40000000
2770Sstevel@tonic-gate #define X_DOOMED 0x20000000
2780Sstevel@tonic-gate #define X_CONNECTED 0x10000000
2790Sstevel@tonic-gate
2800Sstevel@tonic-gate #define X_ORDREL 0x08000000
2810Sstevel@tonic-gate #define X_THREAD 0x04000000
2820Sstevel@tonic-gate #define X_WAITDIS 0x02000000
2830Sstevel@tonic-gate #define X_NEEDDIS 0x01000000
2840Sstevel@tonic-gate
2850Sstevel@tonic-gate #define X_NEEDREL 0x00800000
2860Sstevel@tonic-gate #define X_EARLYDISC 0x00400000
2870Sstevel@tonic-gate
2880Sstevel@tonic-gate #define X_BADSTATES (X_CLOSING | X_DEAD | X_DOOMED)
2890Sstevel@tonic-gate
2900Sstevel@tonic-gate } x_state;
2910Sstevel@tonic-gate int x_ref; /* number of users of this xprt */
2920Sstevel@tonic-gate int x_family; /* address family of transport */
2930Sstevel@tonic-gate dev_t x_rdev; /* device number of transport */
2940Sstevel@tonic-gate struct cm_xprt *x_next;
2950Sstevel@tonic-gate
2960Sstevel@tonic-gate struct netbuf x_server; /* destination address */
2970Sstevel@tonic-gate struct netbuf x_src; /* src address (for retries) */
2980Sstevel@tonic-gate kmutex_t x_lock; /* lock on this entry */
2990Sstevel@tonic-gate kcondvar_t x_cv; /* to signal when can be closed */
3000Sstevel@tonic-gate kcondvar_t x_conn_cv; /* to signal when connection attempt */
3010Sstevel@tonic-gate /* is complete */
3020Sstevel@tonic-gate kstat_t *x_ksp;
3030Sstevel@tonic-gate
3040Sstevel@tonic-gate kcondvar_t x_dis_cv; /* to signal when disconnect attempt */
3050Sstevel@tonic-gate /* is complete */
3060Sstevel@tonic-gate zoneid_t x_zoneid; /* zone this xprt belongs to */
3070Sstevel@tonic-gate };
3080Sstevel@tonic-gate
3090Sstevel@tonic-gate typedef struct cm_kstat_xprt {
3100Sstevel@tonic-gate kstat_named_t x_wq;
3110Sstevel@tonic-gate kstat_named_t x_server;
3120Sstevel@tonic-gate kstat_named_t x_family;
3130Sstevel@tonic-gate kstat_named_t x_rdev;
3140Sstevel@tonic-gate kstat_named_t x_time;
3150Sstevel@tonic-gate kstat_named_t x_state;
3160Sstevel@tonic-gate kstat_named_t x_ref;
3170Sstevel@tonic-gate kstat_named_t x_port;
3180Sstevel@tonic-gate } cm_kstat_xprt_t;
3190Sstevel@tonic-gate
3200Sstevel@tonic-gate static cm_kstat_xprt_t cm_kstat_template = {
3210Sstevel@tonic-gate { "write_queue", KSTAT_DATA_UINT32 },
3220Sstevel@tonic-gate { "server", KSTAT_DATA_STRING },
3230Sstevel@tonic-gate { "addr_family", KSTAT_DATA_UINT32 },
3240Sstevel@tonic-gate { "device", KSTAT_DATA_UINT32 },
3250Sstevel@tonic-gate { "time_stamp", KSTAT_DATA_UINT32 },
3260Sstevel@tonic-gate { "status", KSTAT_DATA_UINT32 },
3270Sstevel@tonic-gate { "ref_count", KSTAT_DATA_INT32 },
3280Sstevel@tonic-gate { "port", KSTAT_DATA_UINT32 },
3290Sstevel@tonic-gate };
3300Sstevel@tonic-gate
3310Sstevel@tonic-gate /*
3320Sstevel@tonic-gate * The inverse of this is connmgr_release().
3330Sstevel@tonic-gate */
3340Sstevel@tonic-gate #define CONN_HOLD(Cm_entry) {\
3350Sstevel@tonic-gate mutex_enter(&(Cm_entry)->x_lock); \
3360Sstevel@tonic-gate (Cm_entry)->x_ref++; \
3370Sstevel@tonic-gate mutex_exit(&(Cm_entry)->x_lock); \
3380Sstevel@tonic-gate }
3390Sstevel@tonic-gate
3400Sstevel@tonic-gate
3410Sstevel@tonic-gate /*
3420Sstevel@tonic-gate * Private data per rpc handle. This structure is allocated by
3430Sstevel@tonic-gate * clnt_cots_kcreate, and freed by clnt_cots_kdestroy.
3440Sstevel@tonic-gate */
3450Sstevel@tonic-gate typedef struct cku_private_s {
3460Sstevel@tonic-gate CLIENT cku_client; /* client handle */
3470Sstevel@tonic-gate calllist_t cku_call; /* for dispatching calls */
3480Sstevel@tonic-gate struct rpc_err cku_err; /* error status */
3490Sstevel@tonic-gate
3500Sstevel@tonic-gate struct netbuf cku_srcaddr; /* source address for retries */
3510Sstevel@tonic-gate int cku_addrfmly; /* for binding port */
3520Sstevel@tonic-gate struct netbuf cku_addr; /* remote address */
3530Sstevel@tonic-gate dev_t cku_device; /* device to use */
3540Sstevel@tonic-gate uint_t cku_flags;
3550Sstevel@tonic-gate #define CKU_ONQUEUE 0x1
3560Sstevel@tonic-gate #define CKU_SENT 0x2
3570Sstevel@tonic-gate
3580Sstevel@tonic-gate bool_t cku_progress; /* for CLSET_PROGRESS */
3590Sstevel@tonic-gate uint32_t cku_xid; /* current XID */
3600Sstevel@tonic-gate clock_t cku_ctime; /* time stamp of when */
3610Sstevel@tonic-gate /* connection was created */
3620Sstevel@tonic-gate uint_t cku_recv_attempts;
3630Sstevel@tonic-gate XDR cku_outxdr; /* xdr routine for output */
3640Sstevel@tonic-gate XDR cku_inxdr; /* xdr routine for input */
3650Sstevel@tonic-gate char cku_rpchdr[WIRE_HDR_SIZE + 4];
3660Sstevel@tonic-gate /* pre-serialized rpc header */
3670Sstevel@tonic-gate
3680Sstevel@tonic-gate uint_t cku_outbuflen; /* default output mblk length */
3690Sstevel@tonic-gate struct cred *cku_cred; /* credentials */
3700Sstevel@tonic-gate bool_t cku_nodelayonerr;
3710Sstevel@tonic-gate /* for CLSET_NODELAYONERR */
3720Sstevel@tonic-gate int cku_useresvport; /* Use reserved port */
3730Sstevel@tonic-gate struct rpc_cots_client *cku_stats; /* stats for zone */
3740Sstevel@tonic-gate } cku_private_t;
3750Sstevel@tonic-gate
3760Sstevel@tonic-gate static struct cm_xprt *connmgr_wrapconnect(struct cm_xprt *,
3770Sstevel@tonic-gate const struct timeval *, struct netbuf *, int, struct netbuf *,
3788778SErik.Nordmark@Sun.COM struct rpc_err *, bool_t, bool_t, cred_t *);
3790Sstevel@tonic-gate
3800Sstevel@tonic-gate static bool_t connmgr_connect(struct cm_xprt *, queue_t *, struct netbuf *,
3810Sstevel@tonic-gate int, calllist_t *, int *, bool_t reconnect,
3828778SErik.Nordmark@Sun.COM const struct timeval *, bool_t, cred_t *);
3838778SErik.Nordmark@Sun.COM
38410004Sdai.ngo@sun.com static void *connmgr_opt_getoff(mblk_t *mp, t_uscalar_t offset,
38510004Sdai.ngo@sun.com t_uscalar_t length, uint_t align_size);
38610004Sdai.ngo@sun.com static bool_t connmgr_setbufsz(calllist_t *e, queue_t *wq, cred_t *cr);
38710004Sdai.ngo@sun.com static bool_t connmgr_getopt_int(queue_t *wq, int level, int name, int *val,
38810004Sdai.ngo@sun.com calllist_t *e, cred_t *cr);
38910004Sdai.ngo@sun.com static bool_t connmgr_setopt_int(queue_t *wq, int level, int name, int val,
39010004Sdai.ngo@sun.com calllist_t *e, cred_t *cr);
3918778SErik.Nordmark@Sun.COM static bool_t connmgr_setopt(queue_t *, int, int, calllist_t *, cred_t *cr);
3920Sstevel@tonic-gate static void connmgr_sndrel(struct cm_xprt *);
3930Sstevel@tonic-gate static void connmgr_snddis(struct cm_xprt *);
3940Sstevel@tonic-gate static void connmgr_close(struct cm_xprt *);
3950Sstevel@tonic-gate static void connmgr_release(struct cm_xprt *);
3960Sstevel@tonic-gate static struct cm_xprt *connmgr_wrapget(struct netbuf *, const struct timeval *,
3970Sstevel@tonic-gate cku_private_t *);
3980Sstevel@tonic-gate
3990Sstevel@tonic-gate static struct cm_xprt *connmgr_get(struct netbuf *, const struct timeval *,
4000Sstevel@tonic-gate struct netbuf *, int, struct netbuf *, struct rpc_err *, dev_t,
4018778SErik.Nordmark@Sun.COM bool_t, int, cred_t *);
4020Sstevel@tonic-gate
4030Sstevel@tonic-gate static void connmgr_cancelconn(struct cm_xprt *);
4040Sstevel@tonic-gate static enum clnt_stat connmgr_cwait(struct cm_xprt *, const struct timeval *,
4050Sstevel@tonic-gate bool_t);
4060Sstevel@tonic-gate static void connmgr_dis_and_wait(struct cm_xprt *);
4070Sstevel@tonic-gate
4088205SSiddheshwar.Mahesh@Sun.COM static int clnt_dispatch_send(queue_t *, mblk_t *, calllist_t *, uint_t,
4090Sstevel@tonic-gate uint_t);
4100Sstevel@tonic-gate
4110Sstevel@tonic-gate static int clnt_delay(clock_t, bool_t);
4120Sstevel@tonic-gate
4130Sstevel@tonic-gate static int waitforack(calllist_t *, t_scalar_t, const struct timeval *, bool_t);
4140Sstevel@tonic-gate
4150Sstevel@tonic-gate /*
4160Sstevel@tonic-gate * Operations vector for TCP/IP based RPC
4170Sstevel@tonic-gate */
4180Sstevel@tonic-gate static struct clnt_ops tcp_ops = {
4190Sstevel@tonic-gate clnt_cots_kcallit, /* do rpc call */
4200Sstevel@tonic-gate clnt_cots_kabort, /* abort call */
4210Sstevel@tonic-gate clnt_cots_kerror, /* return error status */
4220Sstevel@tonic-gate clnt_cots_kfreeres, /* free results */
4230Sstevel@tonic-gate clnt_cots_kdestroy, /* destroy rpc handle */
4240Sstevel@tonic-gate clnt_cots_kcontrol, /* the ioctl() of rpc */
4250Sstevel@tonic-gate clnt_cots_ksettimers, /* set retry timers */
4260Sstevel@tonic-gate };
4270Sstevel@tonic-gate
4280Sstevel@tonic-gate static int rpc_kstat_instance = 0; /* keeps the current instance */
4290Sstevel@tonic-gate /* number for the next kstat_create */
4300Sstevel@tonic-gate
4310Sstevel@tonic-gate static struct cm_xprt *cm_hd = NULL;
4320Sstevel@tonic-gate static kmutex_t connmgr_lock; /* for connection mngr's list of transports */
4330Sstevel@tonic-gate
4340Sstevel@tonic-gate extern kmutex_t clnt_max_msg_lock;
4350Sstevel@tonic-gate
4360Sstevel@tonic-gate static calllist_t *clnt_pending = NULL;
4370Sstevel@tonic-gate extern kmutex_t clnt_pending_lock;
4380Sstevel@tonic-gate
4390Sstevel@tonic-gate static int clnt_cots_hash_size = DEFAULT_HASH_SIZE;
4400Sstevel@tonic-gate
4410Sstevel@tonic-gate static call_table_t *cots_call_ht;
4420Sstevel@tonic-gate
4430Sstevel@tonic-gate static const struct rpc_cots_client {
4440Sstevel@tonic-gate kstat_named_t rccalls;
4450Sstevel@tonic-gate kstat_named_t rcbadcalls;
4460Sstevel@tonic-gate kstat_named_t rcbadxids;
4470Sstevel@tonic-gate kstat_named_t rctimeouts;
4480Sstevel@tonic-gate kstat_named_t rcnewcreds;
4490Sstevel@tonic-gate kstat_named_t rcbadverfs;
4500Sstevel@tonic-gate kstat_named_t rctimers;
4510Sstevel@tonic-gate kstat_named_t rccantconn;
4520Sstevel@tonic-gate kstat_named_t rcnomem;
4530Sstevel@tonic-gate kstat_named_t rcintrs;
4540Sstevel@tonic-gate } cots_rcstat_tmpl = {
4550Sstevel@tonic-gate { "calls", KSTAT_DATA_UINT64 },
4560Sstevel@tonic-gate { "badcalls", KSTAT_DATA_UINT64 },
4570Sstevel@tonic-gate { "badxids", KSTAT_DATA_UINT64 },
4580Sstevel@tonic-gate { "timeouts", KSTAT_DATA_UINT64 },
4590Sstevel@tonic-gate { "newcreds", KSTAT_DATA_UINT64 },
4600Sstevel@tonic-gate { "badverfs", KSTAT_DATA_UINT64 },
4610Sstevel@tonic-gate { "timers", KSTAT_DATA_UINT64 },
4620Sstevel@tonic-gate { "cantconn", KSTAT_DATA_UINT64 },
4630Sstevel@tonic-gate { "nomem", KSTAT_DATA_UINT64 },
4640Sstevel@tonic-gate { "interrupts", KSTAT_DATA_UINT64 }
4650Sstevel@tonic-gate };
4660Sstevel@tonic-gate
4670Sstevel@tonic-gate #define COTSRCSTAT_INCR(p, x) \
4680Sstevel@tonic-gate atomic_add_64(&(p)->x.value.ui64, 1)
4690Sstevel@tonic-gate
4700Sstevel@tonic-gate #define CLNT_MAX_CONNS 1 /* concurrent connections between clnt/srvr */
4719806Sdai.ngo@sun.com int clnt_max_conns = CLNT_MAX_CONNS;
4720Sstevel@tonic-gate
4730Sstevel@tonic-gate #define CLNT_MIN_TIMEOUT 10 /* seconds to wait after we get a */
4740Sstevel@tonic-gate /* connection reset */
4750Sstevel@tonic-gate #define CLNT_MIN_CONNTIMEOUT 5 /* seconds to wait for a connection */
4760Sstevel@tonic-gate
4770Sstevel@tonic-gate
4789806Sdai.ngo@sun.com int clnt_cots_min_tout = CLNT_MIN_TIMEOUT;
4799806Sdai.ngo@sun.com int clnt_cots_min_conntout = CLNT_MIN_CONNTIMEOUT;
4800Sstevel@tonic-gate
4810Sstevel@tonic-gate /*
4820Sstevel@tonic-gate * Limit the number of times we will attempt to receive a reply without
4830Sstevel@tonic-gate * re-sending a response.
4840Sstevel@tonic-gate */
4850Sstevel@tonic-gate #define CLNT_MAXRECV_WITHOUT_RETRY 3
4869806Sdai.ngo@sun.com uint_t clnt_cots_maxrecv = CLNT_MAXRECV_WITHOUT_RETRY;
4870Sstevel@tonic-gate
4880Sstevel@tonic-gate uint_t *clnt_max_msg_sizep;
4890Sstevel@tonic-gate void (*clnt_stop_idle)(queue_t *wq);
4900Sstevel@tonic-gate
4910Sstevel@tonic-gate #define ptoh(p) (&((p)->cku_client))
4920Sstevel@tonic-gate #define htop(h) ((cku_private_t *)((h)->cl_private))
4930Sstevel@tonic-gate
4940Sstevel@tonic-gate /*
4950Sstevel@tonic-gate * Times to retry
4960Sstevel@tonic-gate */
4970Sstevel@tonic-gate #define REFRESHES 2 /* authentication refreshes */
4980Sstevel@tonic-gate
499681Srg137905 /*
500681Srg137905 * The following is used to determine the global default behavior for
501681Srg137905 * COTS when binding to a local port.
502681Srg137905 *
503681Srg137905 * If the value is set to 1 the default will be to select a reserved
504681Srg137905 * (aka privileged) port, if the value is zero the default will be to
505681Srg137905 * use non-reserved ports. Users of kRPC may override this by using
506681Srg137905 * CLNT_CONTROL() and CLSET_BINDRESVPORT.
507681Srg137905 */
5089806Sdai.ngo@sun.com int clnt_cots_do_bindresvport = 1;
5090Sstevel@tonic-gate
5100Sstevel@tonic-gate static zone_key_t zone_cots_key;
5110Sstevel@tonic-gate
5120Sstevel@tonic-gate /*
51310004Sdai.ngo@sun.com * Defaults TCP send and receive buffer size for RPC connections.
51410004Sdai.ngo@sun.com * These values can be tuned by /etc/system.
51510004Sdai.ngo@sun.com */
51610004Sdai.ngo@sun.com int rpc_send_bufsz = 1024*1024;
51710004Sdai.ngo@sun.com int rpc_recv_bufsz = 1024*1024;
51810004Sdai.ngo@sun.com /*
51910004Sdai.ngo@sun.com * To use system-wide default for TCP send and receive buffer size,
52010004Sdai.ngo@sun.com * use /etc/system to set rpc_default_tcp_bufsz to 1:
52110004Sdai.ngo@sun.com *
52210004Sdai.ngo@sun.com * set rpcmod:rpc_default_tcp_bufsz=1
52310004Sdai.ngo@sun.com */
52410004Sdai.ngo@sun.com int rpc_default_tcp_bufsz = 0;
52510004Sdai.ngo@sun.com
52610004Sdai.ngo@sun.com /*
5270Sstevel@tonic-gate * We need to do this after all kernel threads in the zone have exited.
5280Sstevel@tonic-gate */
5290Sstevel@tonic-gate /* ARGSUSED */
5300Sstevel@tonic-gate static void
clnt_zone_destroy(zoneid_t zoneid,void * unused)5310Sstevel@tonic-gate clnt_zone_destroy(zoneid_t zoneid, void *unused)
5320Sstevel@tonic-gate {
5330Sstevel@tonic-gate struct cm_xprt **cmp;
5340Sstevel@tonic-gate struct cm_xprt *cm_entry;
5350Sstevel@tonic-gate struct cm_xprt *freelist = NULL;
5360Sstevel@tonic-gate
5370Sstevel@tonic-gate mutex_enter(&connmgr_lock);
5380Sstevel@tonic-gate cmp = &cm_hd;
5390Sstevel@tonic-gate while ((cm_entry = *cmp) != NULL) {
5400Sstevel@tonic-gate if (cm_entry->x_zoneid == zoneid) {
5410Sstevel@tonic-gate *cmp = cm_entry->x_next;
5420Sstevel@tonic-gate cm_entry->x_next = freelist;
5430Sstevel@tonic-gate freelist = cm_entry;
5440Sstevel@tonic-gate } else {
5450Sstevel@tonic-gate cmp = &cm_entry->x_next;
5460Sstevel@tonic-gate }
5470Sstevel@tonic-gate }
5480Sstevel@tonic-gate mutex_exit(&connmgr_lock);
5490Sstevel@tonic-gate while ((cm_entry = freelist) != NULL) {
5500Sstevel@tonic-gate freelist = cm_entry->x_next;
5510Sstevel@tonic-gate connmgr_close(cm_entry);
5520Sstevel@tonic-gate }
5530Sstevel@tonic-gate }
5540Sstevel@tonic-gate
5550Sstevel@tonic-gate int
clnt_cots_kcreate(dev_t dev,struct netbuf * addr,int family,rpcprog_t prog,rpcvers_t vers,uint_t max_msgsize,cred_t * cred,CLIENT ** ncl)5560Sstevel@tonic-gate clnt_cots_kcreate(dev_t dev, struct netbuf *addr, int family, rpcprog_t prog,
5570Sstevel@tonic-gate rpcvers_t vers, uint_t max_msgsize, cred_t *cred, CLIENT **ncl)
5580Sstevel@tonic-gate {
5590Sstevel@tonic-gate CLIENT *h;
5600Sstevel@tonic-gate cku_private_t *p;
5610Sstevel@tonic-gate struct rpc_msg call_msg;
5620Sstevel@tonic-gate struct rpcstat *rpcstat;
5630Sstevel@tonic-gate
5640Sstevel@tonic-gate RPCLOG(8, "clnt_cots_kcreate: prog %u\n", prog);
5650Sstevel@tonic-gate
566766Scarlsonj rpcstat = zone_getspecific(rpcstat_zone_key, rpc_zone());
5670Sstevel@tonic-gate ASSERT(rpcstat != NULL);
5680Sstevel@tonic-gate
5690Sstevel@tonic-gate /* Allocate and intialize the client handle. */
5700Sstevel@tonic-gate p = kmem_zalloc(sizeof (*p), KM_SLEEP);
5710Sstevel@tonic-gate
5720Sstevel@tonic-gate h = ptoh(p);
5730Sstevel@tonic-gate
5740Sstevel@tonic-gate h->cl_private = (caddr_t)p;
5750Sstevel@tonic-gate h->cl_auth = authkern_create();
5760Sstevel@tonic-gate h->cl_ops = &tcp_ops;
5770Sstevel@tonic-gate
5780Sstevel@tonic-gate cv_init(&p->cku_call.call_cv, NULL, CV_DEFAULT, NULL);
5790Sstevel@tonic-gate mutex_init(&p->cku_call.call_lock, NULL, MUTEX_DEFAULT, NULL);
5800Sstevel@tonic-gate
5810Sstevel@tonic-gate /*
5820Sstevel@tonic-gate * If the current sanity check size in rpcmod is smaller
5830Sstevel@tonic-gate * than the size needed, then increase the sanity check.
5840Sstevel@tonic-gate */
5850Sstevel@tonic-gate if (max_msgsize != 0 && clnt_max_msg_sizep != NULL &&
5860Sstevel@tonic-gate max_msgsize > *clnt_max_msg_sizep) {
5870Sstevel@tonic-gate mutex_enter(&clnt_max_msg_lock);
5880Sstevel@tonic-gate if (max_msgsize > *clnt_max_msg_sizep)
5890Sstevel@tonic-gate *clnt_max_msg_sizep = max_msgsize;
5900Sstevel@tonic-gate mutex_exit(&clnt_max_msg_lock);
5910Sstevel@tonic-gate }
5920Sstevel@tonic-gate
5930Sstevel@tonic-gate p->cku_outbuflen = COTS_DEFAULT_ALLOCSIZE;
5940Sstevel@tonic-gate
5950Sstevel@tonic-gate /* Preserialize the call message header */
5960Sstevel@tonic-gate
5970Sstevel@tonic-gate call_msg.rm_xid = 0;
5980Sstevel@tonic-gate call_msg.rm_direction = CALL;
5990Sstevel@tonic-gate call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
6000Sstevel@tonic-gate call_msg.rm_call.cb_prog = prog;
6010Sstevel@tonic-gate call_msg.rm_call.cb_vers = vers;
6020Sstevel@tonic-gate
6030Sstevel@tonic-gate xdrmem_create(&p->cku_outxdr, p->cku_rpchdr, WIRE_HDR_SIZE, XDR_ENCODE);
6040Sstevel@tonic-gate
6050Sstevel@tonic-gate if (!xdr_callhdr(&p->cku_outxdr, &call_msg)) {
6060Sstevel@tonic-gate RPCLOG0(1, "clnt_cots_kcreate - Fatal header serialization "
6070Sstevel@tonic-gate "error\n");
6080Sstevel@tonic-gate auth_destroy(h->cl_auth);
6090Sstevel@tonic-gate kmem_free(p, sizeof (cku_private_t));
6100Sstevel@tonic-gate RPCLOG0(1, "clnt_cots_kcreate: create failed error EINVAL\n");
6110Sstevel@tonic-gate return (EINVAL); /* XXX */
6120Sstevel@tonic-gate }
6130Sstevel@tonic-gate
6140Sstevel@tonic-gate /*
6150Sstevel@tonic-gate * The zalloc initialized the fields below.
6160Sstevel@tonic-gate * p->cku_xid = 0;
6170Sstevel@tonic-gate * p->cku_flags = 0;
6180Sstevel@tonic-gate * p->cku_srcaddr.len = 0;
6190Sstevel@tonic-gate * p->cku_srcaddr.maxlen = 0;
6200Sstevel@tonic-gate */
6210Sstevel@tonic-gate
6220Sstevel@tonic-gate p->cku_cred = cred;
6230Sstevel@tonic-gate p->cku_device = dev;
6240Sstevel@tonic-gate p->cku_addrfmly = family;
6250Sstevel@tonic-gate p->cku_addr.buf = kmem_zalloc(addr->maxlen, KM_SLEEP);
6260Sstevel@tonic-gate p->cku_addr.maxlen = addr->maxlen;
6270Sstevel@tonic-gate p->cku_addr.len = addr->len;
6280Sstevel@tonic-gate bcopy(addr->buf, p->cku_addr.buf, addr->len);
6290Sstevel@tonic-gate p->cku_stats = rpcstat->rpc_cots_client;
6300Sstevel@tonic-gate p->cku_useresvport = -1; /* value is has not been set */
6310Sstevel@tonic-gate
6320Sstevel@tonic-gate *ncl = h;
6330Sstevel@tonic-gate return (0);
6340Sstevel@tonic-gate }
6350Sstevel@tonic-gate
6360Sstevel@tonic-gate /*ARGSUSED*/
6370Sstevel@tonic-gate static void
clnt_cots_kabort(CLIENT * h)6380Sstevel@tonic-gate clnt_cots_kabort(CLIENT *h)
6390Sstevel@tonic-gate {
6400Sstevel@tonic-gate }
6410Sstevel@tonic-gate
6420Sstevel@tonic-gate /*
6430Sstevel@tonic-gate * Return error info on this handle.
6440Sstevel@tonic-gate */
6450Sstevel@tonic-gate static void
clnt_cots_kerror(CLIENT * h,struct rpc_err * err)6460Sstevel@tonic-gate clnt_cots_kerror(CLIENT *h, struct rpc_err *err)
6470Sstevel@tonic-gate {
6480Sstevel@tonic-gate /* LINTED pointer alignment */
6490Sstevel@tonic-gate cku_private_t *p = htop(h);
6500Sstevel@tonic-gate
6510Sstevel@tonic-gate *err = p->cku_err;
6520Sstevel@tonic-gate }
6530Sstevel@tonic-gate
6540Sstevel@tonic-gate static bool_t
clnt_cots_kfreeres(CLIENT * h,xdrproc_t xdr_res,caddr_t res_ptr)6550Sstevel@tonic-gate clnt_cots_kfreeres(CLIENT *h, xdrproc_t xdr_res, caddr_t res_ptr)
6560Sstevel@tonic-gate {
6570Sstevel@tonic-gate /* LINTED pointer alignment */
6580Sstevel@tonic-gate cku_private_t *p = htop(h);
6590Sstevel@tonic-gate XDR *xdrs;
6600Sstevel@tonic-gate
6610Sstevel@tonic-gate xdrs = &(p->cku_outxdr);
6620Sstevel@tonic-gate xdrs->x_op = XDR_FREE;
6630Sstevel@tonic-gate return ((*xdr_res)(xdrs, res_ptr));
6640Sstevel@tonic-gate }
6650Sstevel@tonic-gate
6660Sstevel@tonic-gate static bool_t
clnt_cots_kcontrol(CLIENT * h,int cmd,char * arg)6670Sstevel@tonic-gate clnt_cots_kcontrol(CLIENT *h, int cmd, char *arg)
6680Sstevel@tonic-gate {
6690Sstevel@tonic-gate cku_private_t *p = htop(h);
6700Sstevel@tonic-gate
6710Sstevel@tonic-gate switch (cmd) {
6720Sstevel@tonic-gate case CLSET_PROGRESS:
6730Sstevel@tonic-gate p->cku_progress = TRUE;
6740Sstevel@tonic-gate return (TRUE);
6750Sstevel@tonic-gate
6760Sstevel@tonic-gate case CLSET_XID:
6770Sstevel@tonic-gate if (arg == NULL)
6780Sstevel@tonic-gate return (FALSE);
6790Sstevel@tonic-gate
6800Sstevel@tonic-gate p->cku_xid = *((uint32_t *)arg);
6810Sstevel@tonic-gate return (TRUE);
6820Sstevel@tonic-gate
6830Sstevel@tonic-gate case CLGET_XID:
6840Sstevel@tonic-gate if (arg == NULL)
6850Sstevel@tonic-gate return (FALSE);
6860Sstevel@tonic-gate
6870Sstevel@tonic-gate *((uint32_t *)arg) = p->cku_xid;
6880Sstevel@tonic-gate return (TRUE);
6890Sstevel@tonic-gate
6900Sstevel@tonic-gate case CLSET_NODELAYONERR:
6910Sstevel@tonic-gate if (arg == NULL)
6920Sstevel@tonic-gate return (FALSE);
6930Sstevel@tonic-gate
6940Sstevel@tonic-gate if (*((bool_t *)arg) == TRUE) {
6950Sstevel@tonic-gate p->cku_nodelayonerr = TRUE;
6960Sstevel@tonic-gate return (TRUE);
6970Sstevel@tonic-gate }
6980Sstevel@tonic-gate if (*((bool_t *)arg) == FALSE) {
6990Sstevel@tonic-gate p->cku_nodelayonerr = FALSE;
7000Sstevel@tonic-gate return (TRUE);
7010Sstevel@tonic-gate }
7020Sstevel@tonic-gate return (FALSE);
7030Sstevel@tonic-gate
7040Sstevel@tonic-gate case CLGET_NODELAYONERR:
7050Sstevel@tonic-gate if (arg == NULL)
7060Sstevel@tonic-gate return (FALSE);
7070Sstevel@tonic-gate
7080Sstevel@tonic-gate *((bool_t *)arg) = p->cku_nodelayonerr;
7090Sstevel@tonic-gate return (TRUE);
7100Sstevel@tonic-gate
7110Sstevel@tonic-gate case CLSET_BINDRESVPORT:
7120Sstevel@tonic-gate if (arg == NULL)
7130Sstevel@tonic-gate return (FALSE);
7140Sstevel@tonic-gate
7150Sstevel@tonic-gate if (*(int *)arg != 1 && *(int *)arg != 0)
7160Sstevel@tonic-gate return (FALSE);
7170Sstevel@tonic-gate
7180Sstevel@tonic-gate p->cku_useresvport = *(int *)arg;
7190Sstevel@tonic-gate
7200Sstevel@tonic-gate return (TRUE);
7210Sstevel@tonic-gate
7220Sstevel@tonic-gate case CLGET_BINDRESVPORT:
7230Sstevel@tonic-gate if (arg == NULL)
7240Sstevel@tonic-gate return (FALSE);
7250Sstevel@tonic-gate
7260Sstevel@tonic-gate *(int *)arg = p->cku_useresvport;
7270Sstevel@tonic-gate
7280Sstevel@tonic-gate return (TRUE);
7290Sstevel@tonic-gate
7300Sstevel@tonic-gate default:
7310Sstevel@tonic-gate return (FALSE);
7320Sstevel@tonic-gate }
7330Sstevel@tonic-gate }
7340Sstevel@tonic-gate
7350Sstevel@tonic-gate /*
7360Sstevel@tonic-gate * Destroy rpc handle. Frees the space used for output buffer,
7370Sstevel@tonic-gate * private data, and handle structure.
7380Sstevel@tonic-gate */
7390Sstevel@tonic-gate static void
clnt_cots_kdestroy(CLIENT * h)7400Sstevel@tonic-gate clnt_cots_kdestroy(CLIENT *h)
7410Sstevel@tonic-gate {
7420Sstevel@tonic-gate /* LINTED pointer alignment */
7430Sstevel@tonic-gate cku_private_t *p = htop(h);
7440Sstevel@tonic-gate calllist_t *call = &p->cku_call;
7450Sstevel@tonic-gate
7460Sstevel@tonic-gate RPCLOG(8, "clnt_cots_kdestroy h: %p\n", (void *)h);
7470Sstevel@tonic-gate RPCLOG(8, "clnt_cots_kdestroy h: xid=0x%x\n", p->cku_xid);
7480Sstevel@tonic-gate
7490Sstevel@tonic-gate if (p->cku_flags & CKU_ONQUEUE) {
7500Sstevel@tonic-gate RPCLOG(64, "clnt_cots_kdestroy h: removing call for xid 0x%x "
7510Sstevel@tonic-gate "from dispatch list\n", p->cku_xid);
7520Sstevel@tonic-gate call_table_remove(call);
7530Sstevel@tonic-gate }
7540Sstevel@tonic-gate
7550Sstevel@tonic-gate if (call->call_reply)
7560Sstevel@tonic-gate freemsg(call->call_reply);
7570Sstevel@tonic-gate cv_destroy(&call->call_cv);
7580Sstevel@tonic-gate mutex_destroy(&call->call_lock);
7590Sstevel@tonic-gate
7600Sstevel@tonic-gate kmem_free(p->cku_srcaddr.buf, p->cku_srcaddr.maxlen);
7610Sstevel@tonic-gate kmem_free(p->cku_addr.buf, p->cku_addr.maxlen);
7620Sstevel@tonic-gate kmem_free(p, sizeof (*p));
7630Sstevel@tonic-gate }
7640Sstevel@tonic-gate
7650Sstevel@tonic-gate static int clnt_cots_pulls;
7660Sstevel@tonic-gate #define RM_HDR_SIZE 4 /* record mark header size */
7670Sstevel@tonic-gate
7680Sstevel@tonic-gate /*
7690Sstevel@tonic-gate * Call remote procedure.
7700Sstevel@tonic-gate */
7710Sstevel@tonic-gate static enum clnt_stat
clnt_cots_kcallit(CLIENT * h,rpcproc_t procnum,xdrproc_t xdr_args,caddr_t argsp,xdrproc_t xdr_results,caddr_t resultsp,struct timeval wait)7720Sstevel@tonic-gate clnt_cots_kcallit(CLIENT *h, rpcproc_t procnum, xdrproc_t xdr_args,
7730Sstevel@tonic-gate caddr_t argsp, xdrproc_t xdr_results, caddr_t resultsp, struct timeval wait)
7740Sstevel@tonic-gate {
7750Sstevel@tonic-gate /* LINTED pointer alignment */
7760Sstevel@tonic-gate cku_private_t *p = htop(h);
7770Sstevel@tonic-gate calllist_t *call = &p->cku_call;
7780Sstevel@tonic-gate XDR *xdrs;
7790Sstevel@tonic-gate struct rpc_msg reply_msg;
7800Sstevel@tonic-gate mblk_t *mp;
7810Sstevel@tonic-gate #ifdef RPCDEBUG
7820Sstevel@tonic-gate clock_t time_sent;
7830Sstevel@tonic-gate #endif
7840Sstevel@tonic-gate struct netbuf *retryaddr;
7850Sstevel@tonic-gate struct cm_xprt *cm_entry = NULL;
7860Sstevel@tonic-gate queue_t *wq;
7879675Sdai.ngo@sun.com int len, waitsecs, max_waitsecs;
7880Sstevel@tonic-gate int mpsize;
7890Sstevel@tonic-gate int refreshes = REFRESHES;
7900Sstevel@tonic-gate int interrupted;
7910Sstevel@tonic-gate int tidu_size;
7920Sstevel@tonic-gate enum clnt_stat status;
7930Sstevel@tonic-gate struct timeval cwait;
7940Sstevel@tonic-gate bool_t delay_first = FALSE;
79511066Srafael.vanoni@sun.com clock_t ticks, now;
7960Sstevel@tonic-gate
7970Sstevel@tonic-gate RPCLOG(2, "clnt_cots_kcallit, procnum %u\n", procnum);
7980Sstevel@tonic-gate COTSRCSTAT_INCR(p->cku_stats, rccalls);
7990Sstevel@tonic-gate
8000Sstevel@tonic-gate RPCLOG(2, "clnt_cots_kcallit: wait.tv_sec: %ld\n", wait.tv_sec);
8010Sstevel@tonic-gate RPCLOG(2, "clnt_cots_kcallit: wait.tv_usec: %ld\n", wait.tv_usec);
8020Sstevel@tonic-gate /*
8030Sstevel@tonic-gate * Bug ID 1240234:
8040Sstevel@tonic-gate * Look out for zero length timeouts. We don't want to
8050Sstevel@tonic-gate * wait zero seconds for a connection to be established.
8060Sstevel@tonic-gate */
8070Sstevel@tonic-gate if (wait.tv_sec < clnt_cots_min_conntout) {
8080Sstevel@tonic-gate cwait.tv_sec = clnt_cots_min_conntout;
8090Sstevel@tonic-gate cwait.tv_usec = 0;
8100Sstevel@tonic-gate RPCLOG(8, "clnt_cots_kcallit: wait.tv_sec (%ld) too low,",
8110Sstevel@tonic-gate wait.tv_sec);
8120Sstevel@tonic-gate RPCLOG(8, " setting to: %d\n", clnt_cots_min_conntout);
8130Sstevel@tonic-gate } else {
8140Sstevel@tonic-gate cwait = wait;
8150Sstevel@tonic-gate }
8160Sstevel@tonic-gate
8170Sstevel@tonic-gate call_again:
8180Sstevel@tonic-gate if (cm_entry) {
8190Sstevel@tonic-gate connmgr_release(cm_entry);
8200Sstevel@tonic-gate cm_entry = NULL;
8210Sstevel@tonic-gate }
8220Sstevel@tonic-gate
8230Sstevel@tonic-gate mp = NULL;
8240Sstevel@tonic-gate
8250Sstevel@tonic-gate /*
8260Sstevel@tonic-gate * If the call is not a retry, allocate a new xid and cache it
8270Sstevel@tonic-gate * for future retries.
8280Sstevel@tonic-gate * Bug ID 1246045:
8290Sstevel@tonic-gate * Treat call as a retry for purposes of binding the source
8300Sstevel@tonic-gate * port only if we actually attempted to send anything on
8310Sstevel@tonic-gate * the previous call.
8320Sstevel@tonic-gate */
8330Sstevel@tonic-gate if (p->cku_xid == 0) {
8340Sstevel@tonic-gate p->cku_xid = alloc_xid();
8356403Sgt29601 call->call_zoneid = rpc_zoneid();
8366403Sgt29601
8370Sstevel@tonic-gate /*
8380Sstevel@tonic-gate * We need to ASSERT here that our xid != 0 because this
8390Sstevel@tonic-gate * determines whether or not our call record gets placed on
8400Sstevel@tonic-gate * the hash table or the linked list. By design, we mandate
8410Sstevel@tonic-gate * that RPC calls over cots must have xid's != 0, so we can
8420Sstevel@tonic-gate * ensure proper management of the hash table.
8430Sstevel@tonic-gate */
8440Sstevel@tonic-gate ASSERT(p->cku_xid != 0);
8450Sstevel@tonic-gate
8460Sstevel@tonic-gate retryaddr = NULL;
8470Sstevel@tonic-gate p->cku_flags &= ~CKU_SENT;
8480Sstevel@tonic-gate
8490Sstevel@tonic-gate if (p->cku_flags & CKU_ONQUEUE) {
8500Sstevel@tonic-gate RPCLOG(8, "clnt_cots_kcallit: new call, dequeuing old"
8510Sstevel@tonic-gate " one (%p)\n", (void *)call);
8520Sstevel@tonic-gate call_table_remove(call);
8530Sstevel@tonic-gate p->cku_flags &= ~CKU_ONQUEUE;
8540Sstevel@tonic-gate RPCLOG(64, "clnt_cots_kcallit: removing call from "
8550Sstevel@tonic-gate "dispatch list because xid was zero (now 0x%x)\n",
8560Sstevel@tonic-gate p->cku_xid);
8570Sstevel@tonic-gate }
8580Sstevel@tonic-gate
8590Sstevel@tonic-gate if (call->call_reply != NULL) {
8600Sstevel@tonic-gate freemsg(call->call_reply);
8610Sstevel@tonic-gate call->call_reply = NULL;
8620Sstevel@tonic-gate }
8630Sstevel@tonic-gate } else if (p->cku_srcaddr.buf == NULL || p->cku_srcaddr.len == 0) {
8640Sstevel@tonic-gate retryaddr = NULL;
8650Sstevel@tonic-gate
8660Sstevel@tonic-gate } else if (p->cku_flags & CKU_SENT) {
8670Sstevel@tonic-gate retryaddr = &p->cku_srcaddr;
8680Sstevel@tonic-gate
8690Sstevel@tonic-gate } else {
8700Sstevel@tonic-gate /*
8710Sstevel@tonic-gate * Bug ID 1246045: Nothing was sent, so set retryaddr to
8720Sstevel@tonic-gate * NULL and let connmgr_get() bind to any source port it
8730Sstevel@tonic-gate * can get.
8740Sstevel@tonic-gate */
8750Sstevel@tonic-gate retryaddr = NULL;
8760Sstevel@tonic-gate }
8770Sstevel@tonic-gate
8780Sstevel@tonic-gate RPCLOG(64, "clnt_cots_kcallit: xid = 0x%x", p->cku_xid);
8790Sstevel@tonic-gate RPCLOG(64, " flags = 0x%x\n", p->cku_flags);
8800Sstevel@tonic-gate
8810Sstevel@tonic-gate p->cku_err.re_status = RPC_TIMEDOUT;
8820Sstevel@tonic-gate p->cku_err.re_errno = p->cku_err.re_terrno = 0;
8830Sstevel@tonic-gate
8840Sstevel@tonic-gate cm_entry = connmgr_wrapget(retryaddr, &cwait, p);
8850Sstevel@tonic-gate
8860Sstevel@tonic-gate if (cm_entry == NULL) {
8870Sstevel@tonic-gate RPCLOG(1, "clnt_cots_kcallit: can't connect status %s\n",
8880Sstevel@tonic-gate clnt_sperrno(p->cku_err.re_status));
8890Sstevel@tonic-gate
8900Sstevel@tonic-gate /*
8910Sstevel@tonic-gate * The reasons why we fail to create a connection are
8920Sstevel@tonic-gate * varied. In most cases we don't want the caller to
8930Sstevel@tonic-gate * immediately retry. This could have one or more
8940Sstevel@tonic-gate * bad effects. This includes flooding the net with
8950Sstevel@tonic-gate * connect requests to ports with no listener; a hard
8960Sstevel@tonic-gate * kernel loop due to all the "reserved" TCP ports being
8970Sstevel@tonic-gate * in use.
8980Sstevel@tonic-gate */
8990Sstevel@tonic-gate delay_first = TRUE;
9000Sstevel@tonic-gate
9010Sstevel@tonic-gate /*
9020Sstevel@tonic-gate * Even if we end up returning EINTR, we still count a
9030Sstevel@tonic-gate * a "can't connect", because the connection manager
9040Sstevel@tonic-gate * might have been committed to waiting for or timing out on
9050Sstevel@tonic-gate * a connection.
9060Sstevel@tonic-gate */
9070Sstevel@tonic-gate COTSRCSTAT_INCR(p->cku_stats, rccantconn);
9080Sstevel@tonic-gate switch (p->cku_err.re_status) {
9090Sstevel@tonic-gate case RPC_INTR:
9100Sstevel@tonic-gate p->cku_err.re_errno = EINTR;
9110Sstevel@tonic-gate
9120Sstevel@tonic-gate /*
9130Sstevel@tonic-gate * No need to delay because a UNIX signal(2)
9140Sstevel@tonic-gate * interrupted us. The caller likely won't
9150Sstevel@tonic-gate * retry the CLNT_CALL() and even if it does,
9160Sstevel@tonic-gate * we assume the caller knows what it is doing.
9170Sstevel@tonic-gate */
9180Sstevel@tonic-gate delay_first = FALSE;
9190Sstevel@tonic-gate break;
9200Sstevel@tonic-gate
9210Sstevel@tonic-gate case RPC_TIMEDOUT:
9220Sstevel@tonic-gate p->cku_err.re_errno = ETIMEDOUT;
9230Sstevel@tonic-gate
9240Sstevel@tonic-gate /*
9250Sstevel@tonic-gate * No need to delay because timed out already
9260Sstevel@tonic-gate * on the connection request and assume that the
9270Sstevel@tonic-gate * transport time out is longer than our minimum
9280Sstevel@tonic-gate * timeout, or least not too much smaller.
9290Sstevel@tonic-gate */
9300Sstevel@tonic-gate delay_first = FALSE;
9310Sstevel@tonic-gate break;
9320Sstevel@tonic-gate
9330Sstevel@tonic-gate case RPC_SYSTEMERROR:
9340Sstevel@tonic-gate case RPC_TLIERROR:
9350Sstevel@tonic-gate /*
9360Sstevel@tonic-gate * We want to delay here because a transient
9370Sstevel@tonic-gate * system error has a better chance of going away
9380Sstevel@tonic-gate * if we delay a bit. If it's not transient, then
9390Sstevel@tonic-gate * we don't want end up in a hard kernel loop
9400Sstevel@tonic-gate * due to retries.
9410Sstevel@tonic-gate */
9420Sstevel@tonic-gate ASSERT(p->cku_err.re_errno != 0);
9430Sstevel@tonic-gate break;
9440Sstevel@tonic-gate
9450Sstevel@tonic-gate
9460Sstevel@tonic-gate case RPC_CANTCONNECT:
9470Sstevel@tonic-gate /*
9480Sstevel@tonic-gate * RPC_CANTCONNECT is set on T_ERROR_ACK which
9490Sstevel@tonic-gate * implies some error down in the TCP layer or
9500Sstevel@tonic-gate * below. If cku_nodelayonerror is set then we
9510Sstevel@tonic-gate * assume the caller knows not to try too hard.
9520Sstevel@tonic-gate */
9530Sstevel@tonic-gate RPCLOG0(8, "clnt_cots_kcallit: connection failed,");
9540Sstevel@tonic-gate RPCLOG0(8, " re_status=RPC_CANTCONNECT,");
9550Sstevel@tonic-gate RPCLOG(8, " re_errno=%d,", p->cku_err.re_errno);
9560Sstevel@tonic-gate RPCLOG(8, " cku_nodelayonerr=%d", p->cku_nodelayonerr);
9570Sstevel@tonic-gate if (p->cku_nodelayonerr == TRUE)
9580Sstevel@tonic-gate delay_first = FALSE;
9590Sstevel@tonic-gate
9600Sstevel@tonic-gate p->cku_err.re_errno = EIO;
9610Sstevel@tonic-gate
9620Sstevel@tonic-gate break;
9630Sstevel@tonic-gate
9640Sstevel@tonic-gate case RPC_XPRTFAILED:
9650Sstevel@tonic-gate /*
9660Sstevel@tonic-gate * We want to delay here because we likely
9670Sstevel@tonic-gate * got a refused connection.
9680Sstevel@tonic-gate */
9694457Svv149972 if (p->cku_err.re_errno == 0)
9704457Svv149972 p->cku_err.re_errno = EIO;
9714457Svv149972
9724457Svv149972 RPCLOG(1, "clnt_cots_kcallit: transport failed: %d\n",
9734457Svv149972 p->cku_err.re_errno);
9744457Svv149972
9754457Svv149972 break;
9760Sstevel@tonic-gate
9770Sstevel@tonic-gate default:
9780Sstevel@tonic-gate /*
9790Sstevel@tonic-gate * We delay here because it is better to err
9800Sstevel@tonic-gate * on the side of caution. If we got here then
9810Sstevel@tonic-gate * status could have been RPC_SUCCESS, but we
9820Sstevel@tonic-gate * know that we did not get a connection, so
9830Sstevel@tonic-gate * force the rpc status to RPC_CANTCONNECT.
9840Sstevel@tonic-gate */
9850Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTCONNECT;
9860Sstevel@tonic-gate p->cku_err.re_errno = EIO;
9870Sstevel@tonic-gate break;
9880Sstevel@tonic-gate }
9890Sstevel@tonic-gate if (delay_first == TRUE)
9900Sstevel@tonic-gate ticks = clnt_cots_min_tout * drv_usectohz(1000000);
9910Sstevel@tonic-gate goto cots_done;
9920Sstevel@tonic-gate }
9930Sstevel@tonic-gate
9940Sstevel@tonic-gate /*
9950Sstevel@tonic-gate * If we've never sent any request on this connection (send count
9960Sstevel@tonic-gate * is zero, or the connection has been reset), cache the
9970Sstevel@tonic-gate * the connection's create time and send a request (possibly a retry)
9980Sstevel@tonic-gate */
9990Sstevel@tonic-gate if ((p->cku_flags & CKU_SENT) == 0 ||
10000Sstevel@tonic-gate p->cku_ctime != cm_entry->x_ctime) {
10010Sstevel@tonic-gate p->cku_ctime = cm_entry->x_ctime;
10020Sstevel@tonic-gate
10030Sstevel@tonic-gate } else if ((p->cku_flags & CKU_SENT) && (p->cku_flags & CKU_ONQUEUE) &&
10040Sstevel@tonic-gate (call->call_reply != NULL ||
10050Sstevel@tonic-gate p->cku_recv_attempts < clnt_cots_maxrecv)) {
10060Sstevel@tonic-gate
10070Sstevel@tonic-gate /*
10080Sstevel@tonic-gate * If we've sent a request and our call is on the dispatch
10090Sstevel@tonic-gate * queue and we haven't made too many receive attempts, then
10100Sstevel@tonic-gate * don't re-send, just receive.
10110Sstevel@tonic-gate */
10120Sstevel@tonic-gate p->cku_recv_attempts++;
10130Sstevel@tonic-gate goto read_again;
10140Sstevel@tonic-gate }
10150Sstevel@tonic-gate
10160Sstevel@tonic-gate /*
10170Sstevel@tonic-gate * Now we create the RPC request in a STREAMS message. We have to do
10180Sstevel@tonic-gate * this after the call to connmgr_get so that we have the correct
10190Sstevel@tonic-gate * TIDU size for the transport.
10200Sstevel@tonic-gate */
10210Sstevel@tonic-gate tidu_size = cm_entry->x_tidu_size;
10220Sstevel@tonic-gate len = MSG_OFFSET + MAX(tidu_size, RM_HDR_SIZE + WIRE_HDR_SIZE);
10230Sstevel@tonic-gate
10240Sstevel@tonic-gate while ((mp = allocb(len, BPRI_MED)) == NULL) {
10250Sstevel@tonic-gate if (strwaitbuf(len, BPRI_MED)) {
10260Sstevel@tonic-gate p->cku_err.re_status = RPC_SYSTEMERROR;
10270Sstevel@tonic-gate p->cku_err.re_errno = ENOSR;
10280Sstevel@tonic-gate COTSRCSTAT_INCR(p->cku_stats, rcnomem);
10290Sstevel@tonic-gate goto cots_done;
10300Sstevel@tonic-gate }
10310Sstevel@tonic-gate }
10320Sstevel@tonic-gate xdrs = &p->cku_outxdr;
10330Sstevel@tonic-gate xdrmblk_init(xdrs, mp, XDR_ENCODE, tidu_size);
10340Sstevel@tonic-gate mpsize = MBLKSIZE(mp);
10350Sstevel@tonic-gate ASSERT(mpsize >= len);
10360Sstevel@tonic-gate ASSERT(mp->b_rptr == mp->b_datap->db_base);
10370Sstevel@tonic-gate
10380Sstevel@tonic-gate /*
10390Sstevel@tonic-gate * If the size of mblk is not appreciably larger than what we
10400Sstevel@tonic-gate * asked, then resize the mblk to exactly len bytes. The reason for
10410Sstevel@tonic-gate * this: suppose len is 1600 bytes, the tidu is 1460 bytes
10420Sstevel@tonic-gate * (from TCP over ethernet), and the arguments to the RPC require
10430Sstevel@tonic-gate * 2800 bytes. Ideally we want the protocol to render two
10440Sstevel@tonic-gate * ~1400 byte segments over the wire. However if allocb() gives us a 2k
10450Sstevel@tonic-gate * mblk, and we allocate a second mblk for the remainder, the protocol
10460Sstevel@tonic-gate * module may generate 3 segments over the wire:
10470Sstevel@tonic-gate * 1460 bytes for the first, 448 (2048 - 1600) for the second, and
10480Sstevel@tonic-gate * 892 for the third. If we "waste" 448 bytes in the first mblk,
10490Sstevel@tonic-gate * the XDR encoding will generate two ~1400 byte mblks, and the
10500Sstevel@tonic-gate * protocol module is more likely to produce properly sized segments.
10510Sstevel@tonic-gate */
10520Sstevel@tonic-gate if ((mpsize >> 1) <= len)
10530Sstevel@tonic-gate mp->b_rptr += (mpsize - len);
10540Sstevel@tonic-gate
10550Sstevel@tonic-gate /*
10560Sstevel@tonic-gate * Adjust b_rptr to reserve space for the non-data protocol headers
10570Sstevel@tonic-gate * any downstream modules might like to add, and for the
10580Sstevel@tonic-gate * record marking header.
10590Sstevel@tonic-gate */
10600Sstevel@tonic-gate mp->b_rptr += (MSG_OFFSET + RM_HDR_SIZE);
10610Sstevel@tonic-gate
10620Sstevel@tonic-gate if (h->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) {
10630Sstevel@tonic-gate /* Copy in the preserialized RPC header information. */
10640Sstevel@tonic-gate bcopy(p->cku_rpchdr, mp->b_rptr, WIRE_HDR_SIZE);
10650Sstevel@tonic-gate
10660Sstevel@tonic-gate /* Use XDR_SETPOS() to set the b_wptr to past the RPC header. */
10670Sstevel@tonic-gate XDR_SETPOS(xdrs, (uint_t)(mp->b_rptr - mp->b_datap->db_base +
10680Sstevel@tonic-gate WIRE_HDR_SIZE));
10690Sstevel@tonic-gate
10700Sstevel@tonic-gate ASSERT((mp->b_wptr - mp->b_rptr) == WIRE_HDR_SIZE);
10710Sstevel@tonic-gate
10720Sstevel@tonic-gate /* Serialize the procedure number and the arguments. */
10730Sstevel@tonic-gate if ((!XDR_PUTINT32(xdrs, (int32_t *)&procnum)) ||
10740Sstevel@tonic-gate (!AUTH_MARSHALL(h->cl_auth, xdrs, p->cku_cred)) ||
10750Sstevel@tonic-gate (!(*xdr_args)(xdrs, argsp))) {
10760Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTENCODEARGS;
10770Sstevel@tonic-gate p->cku_err.re_errno = EIO;
10780Sstevel@tonic-gate goto cots_done;
10790Sstevel@tonic-gate }
10800Sstevel@tonic-gate
10810Sstevel@tonic-gate (*(uint32_t *)(mp->b_rptr)) = p->cku_xid;
10820Sstevel@tonic-gate } else {
10830Sstevel@tonic-gate uint32_t *uproc = (uint32_t *)&p->cku_rpchdr[WIRE_HDR_SIZE];
10840Sstevel@tonic-gate IXDR_PUT_U_INT32(uproc, procnum);
10850Sstevel@tonic-gate
10860Sstevel@tonic-gate (*(uint32_t *)(&p->cku_rpchdr[0])) = p->cku_xid;
10870Sstevel@tonic-gate
10880Sstevel@tonic-gate /* Use XDR_SETPOS() to set the b_wptr. */
10890Sstevel@tonic-gate XDR_SETPOS(xdrs, (uint_t)(mp->b_rptr - mp->b_datap->db_base));
10900Sstevel@tonic-gate
10910Sstevel@tonic-gate /* Serialize the procedure number and the arguments. */
10920Sstevel@tonic-gate if (!AUTH_WRAP(h->cl_auth, p->cku_rpchdr, WIRE_HDR_SIZE+4,
10930Sstevel@tonic-gate xdrs, xdr_args, argsp)) {
10940Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTENCODEARGS;
10950Sstevel@tonic-gate p->cku_err.re_errno = EIO;
10960Sstevel@tonic-gate goto cots_done;
10970Sstevel@tonic-gate }
10980Sstevel@tonic-gate }
10990Sstevel@tonic-gate
11000Sstevel@tonic-gate RPCLOG(2, "clnt_cots_kcallit: connected, sending call, tidu_size %d\n",
11010Sstevel@tonic-gate tidu_size);
11020Sstevel@tonic-gate
11030Sstevel@tonic-gate wq = cm_entry->x_wq;
11049675Sdai.ngo@sun.com waitsecs = 0;
11059675Sdai.ngo@sun.com
11069675Sdai.ngo@sun.com dispatch_again:
11078205SSiddheshwar.Mahesh@Sun.COM status = clnt_dispatch_send(wq, mp, call, p->cku_xid,
11086403Sgt29601 (p->cku_flags & CKU_ONQUEUE));
11090Sstevel@tonic-gate
11109675Sdai.ngo@sun.com if ((status == RPC_CANTSEND) && (call->call_reason == ENOBUFS)) {
11119675Sdai.ngo@sun.com /*
11129675Sdai.ngo@sun.com * QFULL condition, allow some time for queue to drain
11139675Sdai.ngo@sun.com * and try again. Give up after waiting for all timeout
11149675Sdai.ngo@sun.com * specified for the call, or zone is going away.
11159675Sdai.ngo@sun.com */
11169675Sdai.ngo@sun.com max_waitsecs = wait.tv_sec ? wait.tv_sec : clnt_cots_min_tout;
11179675Sdai.ngo@sun.com if ((waitsecs++ < max_waitsecs) &&
11189675Sdai.ngo@sun.com !(zone_status_get(curproc->p_zone) >=
11199675Sdai.ngo@sun.com ZONE_IS_SHUTTING_DOWN)) {
11209675Sdai.ngo@sun.com
11219675Sdai.ngo@sun.com /* wait 1 sec for queue to drain */
11229675Sdai.ngo@sun.com if (clnt_delay(drv_usectohz(1000000),
11239675Sdai.ngo@sun.com h->cl_nosignal) == EINTR) {
11249675Sdai.ngo@sun.com p->cku_err.re_errno = EINTR;
11259675Sdai.ngo@sun.com p->cku_err.re_status = RPC_INTR;
11269675Sdai.ngo@sun.com
11279675Sdai.ngo@sun.com goto cots_done;
11289675Sdai.ngo@sun.com }
11299675Sdai.ngo@sun.com
11309675Sdai.ngo@sun.com /* and try again */
11319675Sdai.ngo@sun.com goto dispatch_again;
11329675Sdai.ngo@sun.com }
11338205SSiddheshwar.Mahesh@Sun.COM p->cku_err.re_status = status;
11349675Sdai.ngo@sun.com p->cku_err.re_errno = call->call_reason;
11358205SSiddheshwar.Mahesh@Sun.COM DTRACE_PROBE(krpc__e__clntcots__kcallit__cantsend);
11368205SSiddheshwar.Mahesh@Sun.COM
11378205SSiddheshwar.Mahesh@Sun.COM goto cots_done;
11388205SSiddheshwar.Mahesh@Sun.COM }
11398205SSiddheshwar.Mahesh@Sun.COM
11409675Sdai.ngo@sun.com if (waitsecs) {
11419675Sdai.ngo@sun.com /* adjust timeout to account for time wait to send */
11429675Sdai.ngo@sun.com wait.tv_sec -= waitsecs;
11439675Sdai.ngo@sun.com if (wait.tv_sec < 0) {
11449675Sdai.ngo@sun.com /* pick up reply on next retry */
11459675Sdai.ngo@sun.com wait.tv_sec = 0;
11469675Sdai.ngo@sun.com }
11479675Sdai.ngo@sun.com DTRACE_PROBE2(clnt_cots__sendwait, CLIENT *, h,
11489675Sdai.ngo@sun.com int, waitsecs);
11499675Sdai.ngo@sun.com }
11509675Sdai.ngo@sun.com
11510Sstevel@tonic-gate RPCLOG(64, "clnt_cots_kcallit: sent call for xid 0x%x\n",
11526403Sgt29601 (uint_t)p->cku_xid);
11530Sstevel@tonic-gate p->cku_flags = (CKU_ONQUEUE|CKU_SENT);
11540Sstevel@tonic-gate p->cku_recv_attempts = 1;
11550Sstevel@tonic-gate
11560Sstevel@tonic-gate #ifdef RPCDEBUG
115711066Srafael.vanoni@sun.com time_sent = ddi_get_lbolt();
11580Sstevel@tonic-gate #endif
11590Sstevel@tonic-gate
11600Sstevel@tonic-gate /*
11610Sstevel@tonic-gate * Wait for a reply or a timeout. If there is no error or timeout,
11620Sstevel@tonic-gate * (both indicated by call_status), call->call_reply will contain
11630Sstevel@tonic-gate * the RPC reply message.
11640Sstevel@tonic-gate */
11650Sstevel@tonic-gate read_again:
11660Sstevel@tonic-gate mutex_enter(&call->call_lock);
11670Sstevel@tonic-gate interrupted = 0;
11680Sstevel@tonic-gate if (call->call_status == RPC_TIMEDOUT) {
11690Sstevel@tonic-gate /*
11700Sstevel@tonic-gate * Indicate that the lwp is not to be stopped while waiting
11710Sstevel@tonic-gate * for this network traffic. This is to avoid deadlock while
11720Sstevel@tonic-gate * debugging a process via /proc and also to avoid recursive
11730Sstevel@tonic-gate * mutex_enter()s due to NFS page faults while stopping
11740Sstevel@tonic-gate * (NFS holds locks when it calls here).
11750Sstevel@tonic-gate */
11760Sstevel@tonic-gate clock_t cv_wait_ret;
11770Sstevel@tonic-gate clock_t timout;
11780Sstevel@tonic-gate clock_t oldlbolt;
11790Sstevel@tonic-gate
11800Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread);
11810Sstevel@tonic-gate
11820Sstevel@tonic-gate if (lwp != NULL)
11830Sstevel@tonic-gate lwp->lwp_nostop++;
11840Sstevel@tonic-gate
118511066Srafael.vanoni@sun.com oldlbolt = ddi_get_lbolt();
11860Sstevel@tonic-gate timout = wait.tv_sec * drv_usectohz(1000000) +
11870Sstevel@tonic-gate drv_usectohz(wait.tv_usec) + oldlbolt;
11880Sstevel@tonic-gate /*
11890Sstevel@tonic-gate * Iterate until the call_status is changed to something
11900Sstevel@tonic-gate * other that RPC_TIMEDOUT, or if cv_timedwait_sig() returns
11910Sstevel@tonic-gate * something <=0 zero. The latter means that we timed
11920Sstevel@tonic-gate * out.
11930Sstevel@tonic-gate */
11940Sstevel@tonic-gate if (h->cl_nosignal)
11950Sstevel@tonic-gate while ((cv_wait_ret = cv_timedwait(&call->call_cv,
11960Sstevel@tonic-gate &call->call_lock, timout)) > 0 &&
11976403Sgt29601 call->call_status == RPC_TIMEDOUT)
11986403Sgt29601 ;
11990Sstevel@tonic-gate else
12000Sstevel@tonic-gate while ((cv_wait_ret = cv_timedwait_sig(
12010Sstevel@tonic-gate &call->call_cv,
12020Sstevel@tonic-gate &call->call_lock, timout)) > 0 &&
12036403Sgt29601 call->call_status == RPC_TIMEDOUT)
12046403Sgt29601 ;
12050Sstevel@tonic-gate
12060Sstevel@tonic-gate switch (cv_wait_ret) {
12070Sstevel@tonic-gate case 0:
12080Sstevel@tonic-gate /*
12090Sstevel@tonic-gate * If we got out of the above loop with
12100Sstevel@tonic-gate * cv_timedwait_sig() returning 0, then we were
12110Sstevel@tonic-gate * interrupted regardless what call_status is.
12120Sstevel@tonic-gate */
12130Sstevel@tonic-gate interrupted = 1;
12140Sstevel@tonic-gate break;
12150Sstevel@tonic-gate case -1:
12160Sstevel@tonic-gate /* cv_timedwait_sig() timed out */
12170Sstevel@tonic-gate break;
12180Sstevel@tonic-gate default:
12190Sstevel@tonic-gate
12200Sstevel@tonic-gate /*
12210Sstevel@tonic-gate * We were cv_signaled(). If we didn't
12220Sstevel@tonic-gate * get a successful call_status and returned
12230Sstevel@tonic-gate * before time expired, delay up to clnt_cots_min_tout
12240Sstevel@tonic-gate * seconds so that the caller doesn't immediately
12250Sstevel@tonic-gate * try to call us again and thus force the
12260Sstevel@tonic-gate * same condition that got us here (such
12270Sstevel@tonic-gate * as a RPC_XPRTFAILED due to the server not
12280Sstevel@tonic-gate * listening on the end-point.
12290Sstevel@tonic-gate */
12300Sstevel@tonic-gate if (call->call_status != RPC_SUCCESS) {
12310Sstevel@tonic-gate clock_t curlbolt;
12320Sstevel@tonic-gate clock_t diff;
12330Sstevel@tonic-gate
12340Sstevel@tonic-gate curlbolt = ddi_get_lbolt();
12350Sstevel@tonic-gate ticks = clnt_cots_min_tout *
12360Sstevel@tonic-gate drv_usectohz(1000000);
12370Sstevel@tonic-gate diff = curlbolt - oldlbolt;
12380Sstevel@tonic-gate if (diff < ticks) {
12390Sstevel@tonic-gate delay_first = TRUE;
12400Sstevel@tonic-gate if (diff > 0)
12410Sstevel@tonic-gate ticks -= diff;
12420Sstevel@tonic-gate }
12430Sstevel@tonic-gate }
12440Sstevel@tonic-gate break;
12450Sstevel@tonic-gate }
12460Sstevel@tonic-gate
12470Sstevel@tonic-gate if (lwp != NULL)
12480Sstevel@tonic-gate lwp->lwp_nostop--;
12490Sstevel@tonic-gate }
12500Sstevel@tonic-gate /*
12510Sstevel@tonic-gate * Get the reply message, if any. This will be freed at the end
12520Sstevel@tonic-gate * whether or not an error occurred.
12530Sstevel@tonic-gate */
12540Sstevel@tonic-gate mp = call->call_reply;
12550Sstevel@tonic-gate call->call_reply = NULL;
12560Sstevel@tonic-gate
12570Sstevel@tonic-gate /*
12580Sstevel@tonic-gate * call_err is the error info when the call is on dispatch queue.
12590Sstevel@tonic-gate * cku_err is the error info returned to the caller.
12600Sstevel@tonic-gate * Sync cku_err with call_err for local message processing.
12610Sstevel@tonic-gate */
12620Sstevel@tonic-gate
12630Sstevel@tonic-gate status = call->call_status;
12640Sstevel@tonic-gate p->cku_err = call->call_err;
12650Sstevel@tonic-gate mutex_exit(&call->call_lock);
12660Sstevel@tonic-gate
12670Sstevel@tonic-gate if (status != RPC_SUCCESS) {
12680Sstevel@tonic-gate switch (status) {
12690Sstevel@tonic-gate case RPC_TIMEDOUT:
127011066Srafael.vanoni@sun.com now = ddi_get_lbolt();
12710Sstevel@tonic-gate if (interrupted) {
12720Sstevel@tonic-gate COTSRCSTAT_INCR(p->cku_stats, rcintrs);
12730Sstevel@tonic-gate p->cku_err.re_status = RPC_INTR;
12740Sstevel@tonic-gate p->cku_err.re_errno = EINTR;
12750Sstevel@tonic-gate RPCLOG(1, "clnt_cots_kcallit: xid 0x%x",
12760Sstevel@tonic-gate p->cku_xid);
127711066Srafael.vanoni@sun.com RPCLOG(1, "signal interrupted at %ld", now);
12780Sstevel@tonic-gate RPCLOG(1, ", was sent at %ld\n", time_sent);
12790Sstevel@tonic-gate } else {
12800Sstevel@tonic-gate COTSRCSTAT_INCR(p->cku_stats, rctimeouts);
12810Sstevel@tonic-gate p->cku_err.re_errno = ETIMEDOUT;
12820Sstevel@tonic-gate RPCLOG(1, "clnt_cots_kcallit: timed out at %ld",
128311066Srafael.vanoni@sun.com now);
12840Sstevel@tonic-gate RPCLOG(1, ", was sent at %ld\n", time_sent);
12850Sstevel@tonic-gate }
12860Sstevel@tonic-gate break;
12870Sstevel@tonic-gate
12880Sstevel@tonic-gate case RPC_XPRTFAILED:
12890Sstevel@tonic-gate if (p->cku_err.re_errno == 0)
12900Sstevel@tonic-gate p->cku_err.re_errno = EIO;
12910Sstevel@tonic-gate
12920Sstevel@tonic-gate RPCLOG(1, "clnt_cots_kcallit: transport failed: %d\n",
12930Sstevel@tonic-gate p->cku_err.re_errno);
12940Sstevel@tonic-gate break;
12950Sstevel@tonic-gate
12960Sstevel@tonic-gate case RPC_SYSTEMERROR:
12970Sstevel@tonic-gate ASSERT(p->cku_err.re_errno);
12980Sstevel@tonic-gate RPCLOG(1, "clnt_cots_kcallit: system error: %d\n",
12990Sstevel@tonic-gate p->cku_err.re_errno);
13000Sstevel@tonic-gate break;
13010Sstevel@tonic-gate
13020Sstevel@tonic-gate default:
13030Sstevel@tonic-gate p->cku_err.re_status = RPC_SYSTEMERROR;
13040Sstevel@tonic-gate p->cku_err.re_errno = EIO;
13050Sstevel@tonic-gate RPCLOG(1, "clnt_cots_kcallit: error: %s\n",
13060Sstevel@tonic-gate clnt_sperrno(status));
13070Sstevel@tonic-gate break;
13080Sstevel@tonic-gate }
13090Sstevel@tonic-gate if (p->cku_err.re_status != RPC_TIMEDOUT) {
13100Sstevel@tonic-gate
13110Sstevel@tonic-gate if (p->cku_flags & CKU_ONQUEUE) {
13120Sstevel@tonic-gate call_table_remove(call);
13130Sstevel@tonic-gate p->cku_flags &= ~CKU_ONQUEUE;
13140Sstevel@tonic-gate }
13150Sstevel@tonic-gate
13160Sstevel@tonic-gate RPCLOG(64, "clnt_cots_kcallit: non TIMEOUT so xid 0x%x "
13170Sstevel@tonic-gate "taken off dispatch list\n", p->cku_xid);
13180Sstevel@tonic-gate if (call->call_reply) {
13190Sstevel@tonic-gate freemsg(call->call_reply);
13200Sstevel@tonic-gate call->call_reply = NULL;
13210Sstevel@tonic-gate }
13220Sstevel@tonic-gate } else if (wait.tv_sec != 0) {
13230Sstevel@tonic-gate /*
13240Sstevel@tonic-gate * We've sent the request over TCP and so we have
13250Sstevel@tonic-gate * every reason to believe it will get
13260Sstevel@tonic-gate * delivered. In which case returning a timeout is not
13270Sstevel@tonic-gate * appropriate.
13280Sstevel@tonic-gate */
13290Sstevel@tonic-gate if (p->cku_progress == TRUE &&
13300Sstevel@tonic-gate p->cku_recv_attempts < clnt_cots_maxrecv) {
13310Sstevel@tonic-gate p->cku_err.re_status = RPC_INPROGRESS;
13320Sstevel@tonic-gate }
13330Sstevel@tonic-gate }
13340Sstevel@tonic-gate goto cots_done;
13350Sstevel@tonic-gate }
13360Sstevel@tonic-gate
13370Sstevel@tonic-gate xdrs = &p->cku_inxdr;
13380Sstevel@tonic-gate xdrmblk_init(xdrs, mp, XDR_DECODE, 0);
13390Sstevel@tonic-gate
13400Sstevel@tonic-gate reply_msg.rm_direction = REPLY;
13410Sstevel@tonic-gate reply_msg.rm_reply.rp_stat = MSG_ACCEPTED;
13420Sstevel@tonic-gate reply_msg.acpted_rply.ar_stat = SUCCESS;
13430Sstevel@tonic-gate
13440Sstevel@tonic-gate reply_msg.acpted_rply.ar_verf = _null_auth;
13450Sstevel@tonic-gate /*
13460Sstevel@tonic-gate * xdr_results will be done in AUTH_UNWRAP.
13470Sstevel@tonic-gate */
13480Sstevel@tonic-gate reply_msg.acpted_rply.ar_results.where = NULL;
13490Sstevel@tonic-gate reply_msg.acpted_rply.ar_results.proc = xdr_void;
13500Sstevel@tonic-gate
13510Sstevel@tonic-gate if (xdr_replymsg(xdrs, &reply_msg)) {
13520Sstevel@tonic-gate enum clnt_stat re_status;
13530Sstevel@tonic-gate
13540Sstevel@tonic-gate _seterr_reply(&reply_msg, &p->cku_err);
13550Sstevel@tonic-gate
13560Sstevel@tonic-gate re_status = p->cku_err.re_status;
13570Sstevel@tonic-gate if (re_status == RPC_SUCCESS) {
13580Sstevel@tonic-gate /*
13590Sstevel@tonic-gate * Reply is good, check auth.
13600Sstevel@tonic-gate */
13610Sstevel@tonic-gate if (!AUTH_VALIDATE(h->cl_auth,
13626403Sgt29601 &reply_msg.acpted_rply.ar_verf)) {
13630Sstevel@tonic-gate COTSRCSTAT_INCR(p->cku_stats, rcbadverfs);
13640Sstevel@tonic-gate RPCLOG0(1, "clnt_cots_kcallit: validation "
13656403Sgt29601 "failure\n");
13660Sstevel@tonic-gate freemsg(mp);
13670Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg);
13680Sstevel@tonic-gate mutex_enter(&call->call_lock);
13690Sstevel@tonic-gate if (call->call_reply == NULL)
13700Sstevel@tonic-gate call->call_status = RPC_TIMEDOUT;
13710Sstevel@tonic-gate mutex_exit(&call->call_lock);
13720Sstevel@tonic-gate goto read_again;
13730Sstevel@tonic-gate } else if (!AUTH_UNWRAP(h->cl_auth, xdrs,
13746403Sgt29601 xdr_results, resultsp)) {
13750Sstevel@tonic-gate RPCLOG0(1, "clnt_cots_kcallit: validation "
13766403Sgt29601 "failure (unwrap)\n");
13770Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTDECODERES;
13780Sstevel@tonic-gate p->cku_err.re_errno = EIO;
13790Sstevel@tonic-gate }
13800Sstevel@tonic-gate } else {
13810Sstevel@tonic-gate /* set errno in case we can't recover */
13820Sstevel@tonic-gate if (re_status != RPC_VERSMISMATCH &&
13830Sstevel@tonic-gate re_status != RPC_AUTHERROR &&
13840Sstevel@tonic-gate re_status != RPC_PROGVERSMISMATCH)
13850Sstevel@tonic-gate p->cku_err.re_errno = EIO;
13860Sstevel@tonic-gate
13870Sstevel@tonic-gate if (re_status == RPC_AUTHERROR) {
13880Sstevel@tonic-gate /*
1389589Srg137905 * Maybe our credential need to be refreshed
13900Sstevel@tonic-gate */
1391571Srg137905 if (cm_entry) {
1392571Srg137905 /*
1393571Srg137905 * There is the potential that the
1394571Srg137905 * cm_entry has/will be marked dead,
1395589Srg137905 * so drop the connection altogether,
1396589Srg137905 * force REFRESH to establish new
1397589Srg137905 * connection.
1398571Srg137905 */
1399589Srg137905 connmgr_cancelconn(cm_entry);
1400571Srg137905 cm_entry = NULL;
14010Sstevel@tonic-gate }
14020Sstevel@tonic-gate
1403*11380SMarcel.Telka@Sun.COM (void) xdr_rpc_free_verifier(xdrs,
1404*11380SMarcel.Telka@Sun.COM &reply_msg);
1405*11380SMarcel.Telka@Sun.COM
1406*11380SMarcel.Telka@Sun.COM if (p->cku_flags & CKU_ONQUEUE) {
1407*11380SMarcel.Telka@Sun.COM call_table_remove(call);
1408*11380SMarcel.Telka@Sun.COM p->cku_flags &= ~CKU_ONQUEUE;
1409*11380SMarcel.Telka@Sun.COM }
1410*11380SMarcel.Telka@Sun.COM RPCLOG(64,
1411*11380SMarcel.Telka@Sun.COM "clnt_cots_kcallit: AUTH_ERROR, xid"
1412*11380SMarcel.Telka@Sun.COM " 0x%x removed off dispatch list\n",
1413*11380SMarcel.Telka@Sun.COM p->cku_xid);
1414*11380SMarcel.Telka@Sun.COM if (call->call_reply) {
1415*11380SMarcel.Telka@Sun.COM freemsg(call->call_reply);
1416*11380SMarcel.Telka@Sun.COM call->call_reply = NULL;
1417*11380SMarcel.Telka@Sun.COM }
1418*11380SMarcel.Telka@Sun.COM
1419571Srg137905 if ((refreshes > 0) &&
1420571Srg137905 AUTH_REFRESH(h->cl_auth, &reply_msg,
14216403Sgt29601 p->cku_cred)) {
1422571Srg137905 refreshes--;
1423571Srg137905 freemsg(mp);
1424571Srg137905 mp = NULL;
1425571Srg137905
1426571Srg137905 COTSRCSTAT_INCR(p->cku_stats,
14276403Sgt29601 rcbadcalls);
1428571Srg137905 COTSRCSTAT_INCR(p->cku_stats,
14296403Sgt29601 rcnewcreds);
1430571Srg137905 goto call_again;
1431589Srg137905 }
1432589Srg137905
14330Sstevel@tonic-gate /*
14340Sstevel@tonic-gate * We have used the client handle to
14350Sstevel@tonic-gate * do an AUTH_REFRESH and the RPC status may
14360Sstevel@tonic-gate * be set to RPC_SUCCESS; Let's make sure to
14370Sstevel@tonic-gate * set it to RPC_AUTHERROR.
14380Sstevel@tonic-gate */
14390Sstevel@tonic-gate p->cku_err.re_status = RPC_AUTHERROR;
1440589Srg137905
14410Sstevel@tonic-gate /*
14420Sstevel@tonic-gate * Map recoverable and unrecoverable
14430Sstevel@tonic-gate * authentication errors to appropriate errno
14440Sstevel@tonic-gate */
14450Sstevel@tonic-gate switch (p->cku_err.re_why) {
1446342Snd150628 case AUTH_TOOWEAK:
1447342Snd150628 /*
1448571Srg137905 * This could be a failure where the
1449571Srg137905 * server requires use of a reserved
1450571Srg137905 * port, check and optionally set the
1451571Srg137905 * client handle useresvport trying
1452571Srg137905 * one more time. Next go round we
1453571Srg137905 * fall out with the tooweak error.
1454342Snd150628 */
1455342Snd150628 if (p->cku_useresvport != 1) {
1456342Snd150628 p->cku_useresvport = 1;
1457342Snd150628 p->cku_xid = 0;
1458342Snd150628 freemsg(mp);
1459*11380SMarcel.Telka@Sun.COM mp = NULL;
1460342Snd150628 goto call_again;
1461342Snd150628 }
1462342Snd150628 /* FALLTHRU */
14630Sstevel@tonic-gate case AUTH_BADCRED:
14640Sstevel@tonic-gate case AUTH_BADVERF:
14650Sstevel@tonic-gate case AUTH_INVALIDRESP:
14660Sstevel@tonic-gate case AUTH_FAILED:
14670Sstevel@tonic-gate case RPCSEC_GSS_NOCRED:
14680Sstevel@tonic-gate case RPCSEC_GSS_FAILED:
14690Sstevel@tonic-gate p->cku_err.re_errno = EACCES;
14700Sstevel@tonic-gate break;
14710Sstevel@tonic-gate case AUTH_REJECTEDCRED:
14720Sstevel@tonic-gate case AUTH_REJECTEDVERF:
14730Sstevel@tonic-gate default: p->cku_err.re_errno = EIO;
14740Sstevel@tonic-gate break;
14750Sstevel@tonic-gate }
14760Sstevel@tonic-gate RPCLOG(1, "clnt_cots_kcallit : authentication"
14770Sstevel@tonic-gate " failed with RPC_AUTHERROR of type %d\n",
14780Sstevel@tonic-gate (int)p->cku_err.re_why);
1479*11380SMarcel.Telka@Sun.COM goto cots_done;
14800Sstevel@tonic-gate }
14810Sstevel@tonic-gate }
14820Sstevel@tonic-gate } else {
14830Sstevel@tonic-gate /* reply didn't decode properly. */
14840Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTDECODERES;
14850Sstevel@tonic-gate p->cku_err.re_errno = EIO;
14860Sstevel@tonic-gate RPCLOG0(1, "clnt_cots_kcallit: decode failure\n");
14870Sstevel@tonic-gate }
14880Sstevel@tonic-gate
14890Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg);
14900Sstevel@tonic-gate
14910Sstevel@tonic-gate if (p->cku_flags & CKU_ONQUEUE) {
14920Sstevel@tonic-gate call_table_remove(call);
14930Sstevel@tonic-gate p->cku_flags &= ~CKU_ONQUEUE;
14940Sstevel@tonic-gate }
14950Sstevel@tonic-gate
14960Sstevel@tonic-gate RPCLOG(64, "clnt_cots_kcallit: xid 0x%x taken off dispatch list",
14970Sstevel@tonic-gate p->cku_xid);
14980Sstevel@tonic-gate RPCLOG(64, " status is %s\n", clnt_sperrno(p->cku_err.re_status));
14990Sstevel@tonic-gate cots_done:
15000Sstevel@tonic-gate if (cm_entry)
15010Sstevel@tonic-gate connmgr_release(cm_entry);
15020Sstevel@tonic-gate
15030Sstevel@tonic-gate if (mp != NULL)
15040Sstevel@tonic-gate freemsg(mp);
15050Sstevel@tonic-gate if ((p->cku_flags & CKU_ONQUEUE) == 0 && call->call_reply) {
15060Sstevel@tonic-gate freemsg(call->call_reply);
15070Sstevel@tonic-gate call->call_reply = NULL;
15080Sstevel@tonic-gate }
15090Sstevel@tonic-gate if (p->cku_err.re_status != RPC_SUCCESS) {
15100Sstevel@tonic-gate RPCLOG0(1, "clnt_cots_kcallit: tail-end failure\n");
15110Sstevel@tonic-gate COTSRCSTAT_INCR(p->cku_stats, rcbadcalls);
15120Sstevel@tonic-gate }
15130Sstevel@tonic-gate
15140Sstevel@tonic-gate /*
15150Sstevel@tonic-gate * No point in delaying if the zone is going away.
15160Sstevel@tonic-gate */
15170Sstevel@tonic-gate if (delay_first == TRUE &&
15180Sstevel@tonic-gate !(zone_status_get(curproc->p_zone) >= ZONE_IS_SHUTTING_DOWN)) {
15190Sstevel@tonic-gate if (clnt_delay(ticks, h->cl_nosignal) == EINTR) {
15200Sstevel@tonic-gate p->cku_err.re_errno = EINTR;
15210Sstevel@tonic-gate p->cku_err.re_status = RPC_INTR;
15220Sstevel@tonic-gate }
15230Sstevel@tonic-gate }
15240Sstevel@tonic-gate return (p->cku_err.re_status);
15250Sstevel@tonic-gate }
15260Sstevel@tonic-gate
15270Sstevel@tonic-gate /*
15280Sstevel@tonic-gate * Kinit routine for cots. This sets up the correct operations in
15290Sstevel@tonic-gate * the client handle, as the handle may have previously been a clts
15300Sstevel@tonic-gate * handle, and clears the xid field so there is no way a new call
15310Sstevel@tonic-gate * could be mistaken for a retry. It also sets in the handle the
15320Sstevel@tonic-gate * information that is passed at create/kinit time but needed at
15330Sstevel@tonic-gate * call time, as cots creates the transport at call time - device,
15340Sstevel@tonic-gate * address of the server, protocol family.
15350Sstevel@tonic-gate */
15360Sstevel@tonic-gate void
clnt_cots_kinit(CLIENT * h,dev_t dev,int family,struct netbuf * addr,int max_msgsize,cred_t * cred)15370Sstevel@tonic-gate clnt_cots_kinit(CLIENT *h, dev_t dev, int family, struct netbuf *addr,
15380Sstevel@tonic-gate int max_msgsize, cred_t *cred)
15390Sstevel@tonic-gate {
15400Sstevel@tonic-gate /* LINTED pointer alignment */
15410Sstevel@tonic-gate cku_private_t *p = htop(h);
15420Sstevel@tonic-gate calllist_t *call = &p->cku_call;
15430Sstevel@tonic-gate
15440Sstevel@tonic-gate h->cl_ops = &tcp_ops;
15450Sstevel@tonic-gate if (p->cku_flags & CKU_ONQUEUE) {
15460Sstevel@tonic-gate call_table_remove(call);
15470Sstevel@tonic-gate p->cku_flags &= ~CKU_ONQUEUE;
15480Sstevel@tonic-gate RPCLOG(64, "clnt_cots_kinit: removing call for xid 0x%x from"
15490Sstevel@tonic-gate " dispatch list\n", p->cku_xid);
15500Sstevel@tonic-gate }
15510Sstevel@tonic-gate
15520Sstevel@tonic-gate if (call->call_reply != NULL) {
15530Sstevel@tonic-gate freemsg(call->call_reply);
15540Sstevel@tonic-gate call->call_reply = NULL;
15550Sstevel@tonic-gate }
15560Sstevel@tonic-gate
15570Sstevel@tonic-gate call->call_bucket = NULL;
15580Sstevel@tonic-gate call->call_hash = 0;
15590Sstevel@tonic-gate
15600Sstevel@tonic-gate /*
15610Sstevel@tonic-gate * We don't clear cku_flags here, because clnt_cots_kcallit()
15620Sstevel@tonic-gate * takes care of handling the cku_flags reset.
15630Sstevel@tonic-gate */
15640Sstevel@tonic-gate p->cku_xid = 0;
15650Sstevel@tonic-gate p->cku_device = dev;
15660Sstevel@tonic-gate p->cku_addrfmly = family;
15670Sstevel@tonic-gate p->cku_cred = cred;
15680Sstevel@tonic-gate
15690Sstevel@tonic-gate if (p->cku_addr.maxlen < addr->len) {
15700Sstevel@tonic-gate if (p->cku_addr.maxlen != 0 && p->cku_addr.buf != NULL)
15710Sstevel@tonic-gate kmem_free(p->cku_addr.buf, p->cku_addr.maxlen);
15720Sstevel@tonic-gate p->cku_addr.buf = kmem_zalloc(addr->maxlen, KM_SLEEP);
15730Sstevel@tonic-gate p->cku_addr.maxlen = addr->maxlen;
15740Sstevel@tonic-gate }
15750Sstevel@tonic-gate
15760Sstevel@tonic-gate p->cku_addr.len = addr->len;
15770Sstevel@tonic-gate bcopy(addr->buf, p->cku_addr.buf, addr->len);
15780Sstevel@tonic-gate
15790Sstevel@tonic-gate /*
15800Sstevel@tonic-gate * If the current sanity check size in rpcmod is smaller
15810Sstevel@tonic-gate * than the size needed, then increase the sanity check.
15820Sstevel@tonic-gate */
15830Sstevel@tonic-gate if (max_msgsize != 0 && clnt_max_msg_sizep != NULL &&
15840Sstevel@tonic-gate max_msgsize > *clnt_max_msg_sizep) {
15850Sstevel@tonic-gate mutex_enter(&clnt_max_msg_lock);
15860Sstevel@tonic-gate if (max_msgsize > *clnt_max_msg_sizep)
15870Sstevel@tonic-gate *clnt_max_msg_sizep = max_msgsize;
15880Sstevel@tonic-gate mutex_exit(&clnt_max_msg_lock);
15890Sstevel@tonic-gate }
15900Sstevel@tonic-gate }
15910Sstevel@tonic-gate
15920Sstevel@tonic-gate /*
15930Sstevel@tonic-gate * ksettimers is a no-op for cots, with the exception of setting the xid.
15940Sstevel@tonic-gate */
15950Sstevel@tonic-gate /* ARGSUSED */
15960Sstevel@tonic-gate static int
clnt_cots_ksettimers(CLIENT * h,struct rpc_timers * t,struct rpc_timers * all,int minimum,void (* feedback)(int,int,caddr_t),caddr_t arg,uint32_t xid)15970Sstevel@tonic-gate clnt_cots_ksettimers(CLIENT *h, struct rpc_timers *t, struct rpc_timers *all,
15980Sstevel@tonic-gate int minimum, void (*feedback)(int, int, caddr_t), caddr_t arg,
15990Sstevel@tonic-gate uint32_t xid)
16000Sstevel@tonic-gate {
16010Sstevel@tonic-gate /* LINTED pointer alignment */
16020Sstevel@tonic-gate cku_private_t *p = htop(h);
16030Sstevel@tonic-gate
16040Sstevel@tonic-gate if (xid)
16050Sstevel@tonic-gate p->cku_xid = xid;
16060Sstevel@tonic-gate COTSRCSTAT_INCR(p->cku_stats, rctimers);
16070Sstevel@tonic-gate return (0);
16080Sstevel@tonic-gate }
16090Sstevel@tonic-gate
16100Sstevel@tonic-gate extern void rpc_poptimod(struct vnode *);
16110Sstevel@tonic-gate extern int kstr_push(struct vnode *, char *);
16120Sstevel@tonic-gate
16130Sstevel@tonic-gate int
conn_kstat_update(kstat_t * ksp,int rw)16140Sstevel@tonic-gate conn_kstat_update(kstat_t *ksp, int rw)
16150Sstevel@tonic-gate {
16160Sstevel@tonic-gate struct cm_xprt *cm_entry;
16170Sstevel@tonic-gate struct cm_kstat_xprt *cm_ksp_data;
16180Sstevel@tonic-gate uchar_t *b;
16190Sstevel@tonic-gate char *fbuf;
16200Sstevel@tonic-gate
16210Sstevel@tonic-gate if (rw == KSTAT_WRITE)
16220Sstevel@tonic-gate return (EACCES);
16230Sstevel@tonic-gate if (ksp == NULL || ksp->ks_private == NULL)
16240Sstevel@tonic-gate return (EIO);
16250Sstevel@tonic-gate cm_entry = (struct cm_xprt *)ksp->ks_private;
16260Sstevel@tonic-gate cm_ksp_data = (struct cm_kstat_xprt *)ksp->ks_data;
16270Sstevel@tonic-gate
16280Sstevel@tonic-gate cm_ksp_data->x_wq.value.ui32 = (uint32_t)(uintptr_t)cm_entry->x_wq;
16290Sstevel@tonic-gate cm_ksp_data->x_family.value.ui32 = cm_entry->x_family;
16300Sstevel@tonic-gate cm_ksp_data->x_rdev.value.ui32 = (uint32_t)cm_entry->x_rdev;
16310Sstevel@tonic-gate cm_ksp_data->x_time.value.ui32 = cm_entry->x_time;
16320Sstevel@tonic-gate cm_ksp_data->x_ref.value.ui32 = cm_entry->x_ref;
16330Sstevel@tonic-gate cm_ksp_data->x_state.value.ui32 = cm_entry->x_state_flags;
16340Sstevel@tonic-gate
16350Sstevel@tonic-gate if (cm_entry->x_server.buf) {
1636457Sbmc fbuf = cm_ksp_data->x_server.value.str.addr.ptr;
16370Sstevel@tonic-gate if (cm_entry->x_family == AF_INET &&
16380Sstevel@tonic-gate cm_entry->x_server.len ==
16390Sstevel@tonic-gate sizeof (struct sockaddr_in)) {
16400Sstevel@tonic-gate struct sockaddr_in *sa;
16410Sstevel@tonic-gate sa = (struct sockaddr_in *)
16420Sstevel@tonic-gate cm_entry->x_server.buf;
16430Sstevel@tonic-gate b = (uchar_t *)&sa->sin_addr;
16440Sstevel@tonic-gate (void) sprintf(fbuf,
16450Sstevel@tonic-gate "%03d.%03d.%03d.%03d", b[0] & 0xFF, b[1] & 0xFF,
16460Sstevel@tonic-gate b[2] & 0xFF, b[3] & 0xFF);
16470Sstevel@tonic-gate cm_ksp_data->x_port.value.ui32 =
16480Sstevel@tonic-gate (uint32_t)sa->sin_port;
16490Sstevel@tonic-gate } else if (cm_entry->x_family == AF_INET6 &&
16500Sstevel@tonic-gate cm_entry->x_server.len >=
16510Sstevel@tonic-gate sizeof (struct sockaddr_in6)) {
16520Sstevel@tonic-gate /* extract server IP address & port */
16530Sstevel@tonic-gate struct sockaddr_in6 *sin6;
16540Sstevel@tonic-gate sin6 = (struct sockaddr_in6 *)cm_entry->x_server.buf;
16550Sstevel@tonic-gate (void) kinet_ntop6((uchar_t *)&sin6->sin6_addr, fbuf,
16560Sstevel@tonic-gate INET6_ADDRSTRLEN);
16570Sstevel@tonic-gate cm_ksp_data->x_port.value.ui32 = sin6->sin6_port;
16580Sstevel@tonic-gate } else {
16590Sstevel@tonic-gate struct sockaddr_in *sa;
16600Sstevel@tonic-gate
16610Sstevel@tonic-gate sa = (struct sockaddr_in *)cm_entry->x_server.buf;
16620Sstevel@tonic-gate b = (uchar_t *)&sa->sin_addr;
16630Sstevel@tonic-gate (void) sprintf(fbuf,
16640Sstevel@tonic-gate "%03d.%03d.%03d.%03d", b[0] & 0xFF, b[1] & 0xFF,
16650Sstevel@tonic-gate b[2] & 0xFF, b[3] & 0xFF);
16660Sstevel@tonic-gate }
16670Sstevel@tonic-gate KSTAT_NAMED_STR_BUFLEN(&cm_ksp_data->x_server) =
16686403Sgt29601 strlen(fbuf) + 1;
16690Sstevel@tonic-gate }
16700Sstevel@tonic-gate
16710Sstevel@tonic-gate return (0);
16720Sstevel@tonic-gate }
16730Sstevel@tonic-gate
16740Sstevel@tonic-gate
16750Sstevel@tonic-gate /*
16760Sstevel@tonic-gate * We want a version of delay which is interruptible by a UNIX signal
16770Sstevel@tonic-gate * Return EINTR if an interrupt occured.
16780Sstevel@tonic-gate */
16790Sstevel@tonic-gate static int
clnt_delay(clock_t ticks,bool_t nosignal)16800Sstevel@tonic-gate clnt_delay(clock_t ticks, bool_t nosignal)
16810Sstevel@tonic-gate {
16820Sstevel@tonic-gate if (nosignal == TRUE) {
16830Sstevel@tonic-gate delay(ticks);
16840Sstevel@tonic-gate return (0);
16850Sstevel@tonic-gate }
16860Sstevel@tonic-gate return (delay_sig(ticks));
16870Sstevel@tonic-gate }
16880Sstevel@tonic-gate
16890Sstevel@tonic-gate /*
16900Sstevel@tonic-gate * Wait for a connection until a timeout, or until we are
16910Sstevel@tonic-gate * signalled that there has been a connection state change.
16920Sstevel@tonic-gate */
16930Sstevel@tonic-gate static enum clnt_stat
connmgr_cwait(struct cm_xprt * cm_entry,const struct timeval * waitp,bool_t nosignal)16940Sstevel@tonic-gate connmgr_cwait(struct cm_xprt *cm_entry, const struct timeval *waitp,
16950Sstevel@tonic-gate bool_t nosignal)
16960Sstevel@tonic-gate {
16970Sstevel@tonic-gate bool_t interrupted;
16980Sstevel@tonic-gate clock_t timout, cv_stat;
16990Sstevel@tonic-gate enum clnt_stat clstat;
17000Sstevel@tonic-gate unsigned int old_state;
17010Sstevel@tonic-gate
17020Sstevel@tonic-gate ASSERT(MUTEX_HELD(&connmgr_lock));
17030Sstevel@tonic-gate /*
17040Sstevel@tonic-gate * We wait for the transport connection to be made, or an
17050Sstevel@tonic-gate * indication that it could not be made.
17060Sstevel@tonic-gate */
17070Sstevel@tonic-gate clstat = RPC_TIMEDOUT;
17080Sstevel@tonic-gate interrupted = FALSE;
17090Sstevel@tonic-gate
17100Sstevel@tonic-gate old_state = cm_entry->x_state_flags;
17110Sstevel@tonic-gate /*
17120Sstevel@tonic-gate * Now loop until cv_timedwait{_sig} returns because of
17130Sstevel@tonic-gate * a signal(0) or timeout(-1) or cv_signal(>0). But it may be
17140Sstevel@tonic-gate * cv_signalled for various other reasons too. So loop
17150Sstevel@tonic-gate * until there is a state change on the connection.
17160Sstevel@tonic-gate */
17170Sstevel@tonic-gate
17180Sstevel@tonic-gate timout = waitp->tv_sec * drv_usectohz(1000000) +
171911066Srafael.vanoni@sun.com drv_usectohz(waitp->tv_usec) + ddi_get_lbolt();
17200Sstevel@tonic-gate
17210Sstevel@tonic-gate if (nosignal) {
17220Sstevel@tonic-gate while ((cv_stat = cv_timedwait(&cm_entry->x_conn_cv,
17230Sstevel@tonic-gate &connmgr_lock, timout)) > 0 &&
17240Sstevel@tonic-gate cm_entry->x_state_flags == old_state)
17250Sstevel@tonic-gate ;
17260Sstevel@tonic-gate } else {
17270Sstevel@tonic-gate while ((cv_stat = cv_timedwait_sig(&cm_entry->x_conn_cv,
17280Sstevel@tonic-gate &connmgr_lock, timout)) > 0 &&
17290Sstevel@tonic-gate cm_entry->x_state_flags == old_state)
17300Sstevel@tonic-gate ;
17310Sstevel@tonic-gate
17320Sstevel@tonic-gate if (cv_stat == 0) /* got intr signal? */
17330Sstevel@tonic-gate interrupted = TRUE;
17340Sstevel@tonic-gate }
17350Sstevel@tonic-gate
17360Sstevel@tonic-gate if ((cm_entry->x_state_flags & (X_BADSTATES|X_CONNECTED)) ==
17370Sstevel@tonic-gate X_CONNECTED) {
17380Sstevel@tonic-gate clstat = RPC_SUCCESS;
17390Sstevel@tonic-gate } else {
17400Sstevel@tonic-gate if (interrupted == TRUE)
17410Sstevel@tonic-gate clstat = RPC_INTR;
17420Sstevel@tonic-gate RPCLOG(1, "connmgr_cwait: can't connect, error: %s\n",
17430Sstevel@tonic-gate clnt_sperrno(clstat));
17440Sstevel@tonic-gate }
17450Sstevel@tonic-gate
17460Sstevel@tonic-gate return (clstat);
17470Sstevel@tonic-gate }
17480Sstevel@tonic-gate
17490Sstevel@tonic-gate /*
17500Sstevel@tonic-gate * Primary interface for how RPC grabs a connection.
17510Sstevel@tonic-gate */
17520Sstevel@tonic-gate static struct cm_xprt *
connmgr_wrapget(struct netbuf * retryaddr,const struct timeval * waitp,cku_private_t * p)17530Sstevel@tonic-gate connmgr_wrapget(
17540Sstevel@tonic-gate struct netbuf *retryaddr,
17550Sstevel@tonic-gate const struct timeval *waitp,
17560Sstevel@tonic-gate cku_private_t *p)
17570Sstevel@tonic-gate {
17580Sstevel@tonic-gate struct cm_xprt *cm_entry;
17590Sstevel@tonic-gate
17600Sstevel@tonic-gate cm_entry = connmgr_get(retryaddr, waitp, &p->cku_addr, p->cku_addrfmly,
17610Sstevel@tonic-gate &p->cku_srcaddr, &p->cku_err, p->cku_device,
17628778SErik.Nordmark@Sun.COM p->cku_client.cl_nosignal, p->cku_useresvport, p->cku_cred);
17630Sstevel@tonic-gate
17640Sstevel@tonic-gate if (cm_entry == NULL) {
17650Sstevel@tonic-gate /*
17660Sstevel@tonic-gate * Re-map the call status to RPC_INTR if the err code is
17670Sstevel@tonic-gate * EINTR. This can happen if calls status is RPC_TLIERROR.
17680Sstevel@tonic-gate * However, don't re-map if signalling has been turned off.
17690Sstevel@tonic-gate * XXX Really need to create a separate thread whenever
17700Sstevel@tonic-gate * there isn't an existing connection.
17710Sstevel@tonic-gate */
17720Sstevel@tonic-gate if (p->cku_err.re_errno == EINTR) {
17730Sstevel@tonic-gate if (p->cku_client.cl_nosignal == TRUE)
17740Sstevel@tonic-gate p->cku_err.re_errno = EIO;
17750Sstevel@tonic-gate else
17760Sstevel@tonic-gate p->cku_err.re_status = RPC_INTR;
17770Sstevel@tonic-gate }
17780Sstevel@tonic-gate }
17790Sstevel@tonic-gate
17800Sstevel@tonic-gate return (cm_entry);
17810Sstevel@tonic-gate }
17820Sstevel@tonic-gate
17830Sstevel@tonic-gate /*
17840Sstevel@tonic-gate * Obtains a transport to the server specified in addr. If a suitable transport
17850Sstevel@tonic-gate * does not already exist in the list of cached transports, a new connection
17860Sstevel@tonic-gate * is created, connected, and added to the list. The connection is for sending
17870Sstevel@tonic-gate * only - the reply message may come back on another transport connection.
17889806Sdai.ngo@sun.com *
17899806Sdai.ngo@sun.com * To implement round-robin load balancing with multiple client connections,
17909806Sdai.ngo@sun.com * the last entry on the list is always selected. Once the entry is selected
17919806Sdai.ngo@sun.com * it's re-inserted to the head of the list.
17920Sstevel@tonic-gate */
17930Sstevel@tonic-gate static struct cm_xprt *
connmgr_get(struct netbuf * retryaddr,const struct timeval * waitp,struct netbuf * destaddr,int addrfmly,struct netbuf * srcaddr,struct rpc_err * rpcerr,dev_t device,bool_t nosignal,int useresvport,cred_t * cr)17940Sstevel@tonic-gate connmgr_get(
17950Sstevel@tonic-gate struct netbuf *retryaddr,
17960Sstevel@tonic-gate const struct timeval *waitp, /* changed to a ptr to converse stack */
17970Sstevel@tonic-gate struct netbuf *destaddr,
17980Sstevel@tonic-gate int addrfmly,
17990Sstevel@tonic-gate struct netbuf *srcaddr,
18000Sstevel@tonic-gate struct rpc_err *rpcerr,
18010Sstevel@tonic-gate dev_t device,
18020Sstevel@tonic-gate bool_t nosignal,
18038778SErik.Nordmark@Sun.COM int useresvport,
18048778SErik.Nordmark@Sun.COM cred_t *cr)
18050Sstevel@tonic-gate {
18060Sstevel@tonic-gate struct cm_xprt *cm_entry;
18070Sstevel@tonic-gate struct cm_xprt *lru_entry;
18089806Sdai.ngo@sun.com struct cm_xprt **cmp, **prev;
18090Sstevel@tonic-gate queue_t *wq;
18100Sstevel@tonic-gate TIUSER *tiptr;
18110Sstevel@tonic-gate int i;
18120Sstevel@tonic-gate int retval;
18130Sstevel@tonic-gate int tidu_size;
18140Sstevel@tonic-gate bool_t connected;
1815766Scarlsonj zoneid_t zoneid = rpc_zoneid();
18160Sstevel@tonic-gate
18170Sstevel@tonic-gate /*
18180Sstevel@tonic-gate * If the call is not a retry, look for a transport entry that
18190Sstevel@tonic-gate * goes to the server of interest.
18200Sstevel@tonic-gate */
18210Sstevel@tonic-gate mutex_enter(&connmgr_lock);
18220Sstevel@tonic-gate
18230Sstevel@tonic-gate if (retryaddr == NULL) {
18240Sstevel@tonic-gate use_new_conn:
18250Sstevel@tonic-gate i = 0;
18260Sstevel@tonic-gate cm_entry = lru_entry = NULL;
18279806Sdai.ngo@sun.com
18289806Sdai.ngo@sun.com prev = cmp = &cm_hd;
18290Sstevel@tonic-gate while ((cm_entry = *cmp) != NULL) {
18300Sstevel@tonic-gate ASSERT(cm_entry != cm_entry->x_next);
18310Sstevel@tonic-gate /*
18320Sstevel@tonic-gate * Garbage collect conections that are marked
18330Sstevel@tonic-gate * for needs disconnect.
18340Sstevel@tonic-gate */
18350Sstevel@tonic-gate if (cm_entry->x_needdis) {
1836154Sshepler CONN_HOLD(cm_entry);
18370Sstevel@tonic-gate connmgr_dis_and_wait(cm_entry);
1838154Sshepler connmgr_release(cm_entry);
18390Sstevel@tonic-gate /*
18400Sstevel@tonic-gate * connmgr_lock could have been
18410Sstevel@tonic-gate * dropped for the disconnect
18420Sstevel@tonic-gate * processing so start over.
18430Sstevel@tonic-gate */
18440Sstevel@tonic-gate goto use_new_conn;
18450Sstevel@tonic-gate }
18460Sstevel@tonic-gate
18470Sstevel@tonic-gate /*
18480Sstevel@tonic-gate * Garbage collect the dead connections that have
18490Sstevel@tonic-gate * no threads working on them.
18500Sstevel@tonic-gate */
18510Sstevel@tonic-gate if ((cm_entry->x_state_flags & (X_DEAD|X_THREAD)) ==
18520Sstevel@tonic-gate X_DEAD) {
18533017Smaheshvs mutex_enter(&cm_entry->x_lock);
18543017Smaheshvs if (cm_entry->x_ref != 0) {
18553017Smaheshvs /*
18563017Smaheshvs * Currently in use.
18573017Smaheshvs * Cleanup later.
18583017Smaheshvs */
18593017Smaheshvs cmp = &cm_entry->x_next;
18603017Smaheshvs mutex_exit(&cm_entry->x_lock);
18613017Smaheshvs continue;
18623017Smaheshvs }
18633017Smaheshvs mutex_exit(&cm_entry->x_lock);
18640Sstevel@tonic-gate *cmp = cm_entry->x_next;
18650Sstevel@tonic-gate mutex_exit(&connmgr_lock);
18660Sstevel@tonic-gate connmgr_close(cm_entry);
18670Sstevel@tonic-gate mutex_enter(&connmgr_lock);
18680Sstevel@tonic-gate goto use_new_conn;
18690Sstevel@tonic-gate }
18700Sstevel@tonic-gate
18710Sstevel@tonic-gate
18720Sstevel@tonic-gate if ((cm_entry->x_state_flags & X_BADSTATES) == 0 &&
18730Sstevel@tonic-gate cm_entry->x_zoneid == zoneid &&
18740Sstevel@tonic-gate cm_entry->x_rdev == device &&
18750Sstevel@tonic-gate destaddr->len == cm_entry->x_server.len &&
18760Sstevel@tonic-gate bcmp(destaddr->buf, cm_entry->x_server.buf,
18770Sstevel@tonic-gate destaddr->len) == 0) {
18780Sstevel@tonic-gate /*
18790Sstevel@tonic-gate * If the matching entry isn't connected,
18800Sstevel@tonic-gate * attempt to reconnect it.
18810Sstevel@tonic-gate */
18820Sstevel@tonic-gate if (cm_entry->x_connected == FALSE) {
18830Sstevel@tonic-gate /*
18840Sstevel@tonic-gate * We don't go through trying
18850Sstevel@tonic-gate * to find the least recently
18860Sstevel@tonic-gate * used connected because
18870Sstevel@tonic-gate * connmgr_reconnect() briefly
18880Sstevel@tonic-gate * dropped the connmgr_lock,
18890Sstevel@tonic-gate * allowing a window for our
18900Sstevel@tonic-gate * accounting to be messed up.
18910Sstevel@tonic-gate * In any case, a re-connected
18920Sstevel@tonic-gate * connection is as good as
18930Sstevel@tonic-gate * a LRU connection.
18940Sstevel@tonic-gate */
18950Sstevel@tonic-gate return (connmgr_wrapconnect(cm_entry,
18960Sstevel@tonic-gate waitp, destaddr, addrfmly, srcaddr,
18978778SErik.Nordmark@Sun.COM rpcerr, TRUE, nosignal, cr));
18980Sstevel@tonic-gate }
18990Sstevel@tonic-gate i++;
19009806Sdai.ngo@sun.com
19019806Sdai.ngo@sun.com /* keep track of the last entry */
19029806Sdai.ngo@sun.com lru_entry = cm_entry;
19039806Sdai.ngo@sun.com prev = cmp;
19040Sstevel@tonic-gate }
19050Sstevel@tonic-gate cmp = &cm_entry->x_next;
19060Sstevel@tonic-gate }
19070Sstevel@tonic-gate
19080Sstevel@tonic-gate if (i > clnt_max_conns) {
19090Sstevel@tonic-gate RPCLOG(8, "connmgr_get: too many conns, dooming entry"
19100Sstevel@tonic-gate " %p\n", (void *)lru_entry->x_tiptr);
19110Sstevel@tonic-gate lru_entry->x_doomed = TRUE;
19120Sstevel@tonic-gate goto use_new_conn;
19130Sstevel@tonic-gate }
19140Sstevel@tonic-gate
19150Sstevel@tonic-gate /*
19160Sstevel@tonic-gate * If we are at the maximum number of connections to
19170Sstevel@tonic-gate * the server, hand back the least recently used one.
19180Sstevel@tonic-gate */
19190Sstevel@tonic-gate if (i == clnt_max_conns) {
19200Sstevel@tonic-gate /*
19210Sstevel@tonic-gate * Copy into the handle the source address of
19220Sstevel@tonic-gate * the connection, which we will use in case of
19230Sstevel@tonic-gate * a later retry.
19240Sstevel@tonic-gate */
19250Sstevel@tonic-gate if (srcaddr->len != lru_entry->x_src.len) {
19260Sstevel@tonic-gate if (srcaddr->len > 0)
19270Sstevel@tonic-gate kmem_free(srcaddr->buf,
19280Sstevel@tonic-gate srcaddr->maxlen);
19290Sstevel@tonic-gate srcaddr->buf = kmem_zalloc(
19300Sstevel@tonic-gate lru_entry->x_src.len, KM_SLEEP);
19310Sstevel@tonic-gate srcaddr->maxlen = srcaddr->len =
19320Sstevel@tonic-gate lru_entry->x_src.len;
19330Sstevel@tonic-gate }
19340Sstevel@tonic-gate bcopy(lru_entry->x_src.buf, srcaddr->buf, srcaddr->len);
19350Sstevel@tonic-gate RPCLOG(2, "connmgr_get: call going out on %p\n",
19360Sstevel@tonic-gate (void *)lru_entry);
193711066Srafael.vanoni@sun.com lru_entry->x_time = ddi_get_lbolt();
19380Sstevel@tonic-gate CONN_HOLD(lru_entry);
19399806Sdai.ngo@sun.com
19409806Sdai.ngo@sun.com if ((i > 1) && (prev != &cm_hd)) {
19419806Sdai.ngo@sun.com /*
19429806Sdai.ngo@sun.com * remove and re-insert entry at head of list.
19439806Sdai.ngo@sun.com */
19449806Sdai.ngo@sun.com *prev = lru_entry->x_next;
19459806Sdai.ngo@sun.com lru_entry->x_next = cm_hd;
19469806Sdai.ngo@sun.com cm_hd = lru_entry;
19479806Sdai.ngo@sun.com }
19489806Sdai.ngo@sun.com
19490Sstevel@tonic-gate mutex_exit(&connmgr_lock);
19500Sstevel@tonic-gate return (lru_entry);
19510Sstevel@tonic-gate }
19520Sstevel@tonic-gate
19530Sstevel@tonic-gate } else {
19540Sstevel@tonic-gate /*
19550Sstevel@tonic-gate * This is the retry case (retryaddr != NULL). Retries must
19560Sstevel@tonic-gate * be sent on the same source port as the original call.
19570Sstevel@tonic-gate */
19580Sstevel@tonic-gate
19590Sstevel@tonic-gate /*
19600Sstevel@tonic-gate * Walk the list looking for a connection with a source address
19610Sstevel@tonic-gate * that matches the retry address.
19620Sstevel@tonic-gate */
19638806SGerald.Thornbrugh@Sun.COM start_retry_loop:
19640Sstevel@tonic-gate cmp = &cm_hd;
19650Sstevel@tonic-gate while ((cm_entry = *cmp) != NULL) {
19660Sstevel@tonic-gate ASSERT(cm_entry != cm_entry->x_next);
19678806SGerald.Thornbrugh@Sun.COM
19688806SGerald.Thornbrugh@Sun.COM /*
19698806SGerald.Thornbrugh@Sun.COM * determine if this connection matches the passed
19708806SGerald.Thornbrugh@Sun.COM * in retry address. If it does not match, advance
19718806SGerald.Thornbrugh@Sun.COM * to the next element on the list.
19728806SGerald.Thornbrugh@Sun.COM */
19730Sstevel@tonic-gate if (zoneid != cm_entry->x_zoneid ||
19740Sstevel@tonic-gate device != cm_entry->x_rdev ||
19750Sstevel@tonic-gate retryaddr->len != cm_entry->x_src.len ||
19760Sstevel@tonic-gate bcmp(retryaddr->buf, cm_entry->x_src.buf,
19776403Sgt29601 retryaddr->len) != 0) {
19780Sstevel@tonic-gate cmp = &cm_entry->x_next;
19790Sstevel@tonic-gate continue;
19800Sstevel@tonic-gate }
19818806SGerald.Thornbrugh@Sun.COM /*
19828806SGerald.Thornbrugh@Sun.COM * Garbage collect conections that are marked
19838806SGerald.Thornbrugh@Sun.COM * for needs disconnect.
19848806SGerald.Thornbrugh@Sun.COM */
19858806SGerald.Thornbrugh@Sun.COM if (cm_entry->x_needdis) {
19868806SGerald.Thornbrugh@Sun.COM CONN_HOLD(cm_entry);
19878806SGerald.Thornbrugh@Sun.COM connmgr_dis_and_wait(cm_entry);
19888806SGerald.Thornbrugh@Sun.COM connmgr_release(cm_entry);
19898806SGerald.Thornbrugh@Sun.COM /*
19908806SGerald.Thornbrugh@Sun.COM * connmgr_lock could have been
19918806SGerald.Thornbrugh@Sun.COM * dropped for the disconnect
19928806SGerald.Thornbrugh@Sun.COM * processing so start over.
19938806SGerald.Thornbrugh@Sun.COM */
19948806SGerald.Thornbrugh@Sun.COM goto start_retry_loop;
19958806SGerald.Thornbrugh@Sun.COM }
19968806SGerald.Thornbrugh@Sun.COM /*
19978806SGerald.Thornbrugh@Sun.COM * Garbage collect the dead connections that have
19988806SGerald.Thornbrugh@Sun.COM * no threads working on them.
19998806SGerald.Thornbrugh@Sun.COM */
20008806SGerald.Thornbrugh@Sun.COM if ((cm_entry->x_state_flags & (X_DEAD|X_THREAD)) ==
20018806SGerald.Thornbrugh@Sun.COM X_DEAD) {
20028806SGerald.Thornbrugh@Sun.COM mutex_enter(&cm_entry->x_lock);
20038806SGerald.Thornbrugh@Sun.COM if (cm_entry->x_ref != 0) {
20048806SGerald.Thornbrugh@Sun.COM /*
20058806SGerald.Thornbrugh@Sun.COM * Currently in use.
20068806SGerald.Thornbrugh@Sun.COM * Cleanup later.
20078806SGerald.Thornbrugh@Sun.COM */
20088806SGerald.Thornbrugh@Sun.COM cmp = &cm_entry->x_next;
20098806SGerald.Thornbrugh@Sun.COM mutex_exit(&cm_entry->x_lock);
20108806SGerald.Thornbrugh@Sun.COM continue;
20118806SGerald.Thornbrugh@Sun.COM }
20128806SGerald.Thornbrugh@Sun.COM mutex_exit(&cm_entry->x_lock);
20138806SGerald.Thornbrugh@Sun.COM *cmp = cm_entry->x_next;
20148806SGerald.Thornbrugh@Sun.COM mutex_exit(&connmgr_lock);
20158806SGerald.Thornbrugh@Sun.COM connmgr_close(cm_entry);
20168806SGerald.Thornbrugh@Sun.COM mutex_enter(&connmgr_lock);
20178806SGerald.Thornbrugh@Sun.COM goto start_retry_loop;
20188806SGerald.Thornbrugh@Sun.COM }
20190Sstevel@tonic-gate
20200Sstevel@tonic-gate /*
20210Sstevel@tonic-gate * Sanity check: if the connection with our source
20220Sstevel@tonic-gate * port is going to some other server, something went
20230Sstevel@tonic-gate * wrong, as we never delete connections (i.e. release
20240Sstevel@tonic-gate * ports) unless they have been idle. In this case,
20250Sstevel@tonic-gate * it is probably better to send the call out using
20260Sstevel@tonic-gate * a new source address than to fail it altogether,
20270Sstevel@tonic-gate * since that port may never be released.
20280Sstevel@tonic-gate */
20290Sstevel@tonic-gate if (destaddr->len != cm_entry->x_server.len ||
20306403Sgt29601 bcmp(destaddr->buf, cm_entry->x_server.buf,
20316403Sgt29601 destaddr->len) != 0) {
20320Sstevel@tonic-gate RPCLOG(1, "connmgr_get: tiptr %p"
20330Sstevel@tonic-gate " is going to a different server"
20340Sstevel@tonic-gate " with the port that belongs"
20350Sstevel@tonic-gate " to us!\n", (void *)cm_entry->x_tiptr);
20360Sstevel@tonic-gate retryaddr = NULL;
20370Sstevel@tonic-gate goto use_new_conn;
20380Sstevel@tonic-gate }
20390Sstevel@tonic-gate
20400Sstevel@tonic-gate /*
20410Sstevel@tonic-gate * If the connection of interest is not connected and we
20420Sstevel@tonic-gate * can't reconnect it, then the server is probably
20430Sstevel@tonic-gate * still down. Return NULL to the caller and let it
20440Sstevel@tonic-gate * retry later if it wants to. We have a delay so the
20450Sstevel@tonic-gate * machine doesn't go into a tight retry loop. If the
20460Sstevel@tonic-gate * entry was already connected, or the reconnected was
20470Sstevel@tonic-gate * successful, return this entry.
20480Sstevel@tonic-gate */
20490Sstevel@tonic-gate if (cm_entry->x_connected == FALSE) {
20500Sstevel@tonic-gate return (connmgr_wrapconnect(cm_entry,
20510Sstevel@tonic-gate waitp, destaddr, addrfmly, NULL,
20528778SErik.Nordmark@Sun.COM rpcerr, TRUE, nosignal, cr));
20530Sstevel@tonic-gate } else {
20540Sstevel@tonic-gate CONN_HOLD(cm_entry);
20550Sstevel@tonic-gate
205611066Srafael.vanoni@sun.com cm_entry->x_time = ddi_get_lbolt();
20570Sstevel@tonic-gate mutex_exit(&connmgr_lock);
20580Sstevel@tonic-gate RPCLOG(2, "connmgr_get: found old "
20590Sstevel@tonic-gate "transport %p for retry\n",
20600Sstevel@tonic-gate (void *)cm_entry);
20610Sstevel@tonic-gate return (cm_entry);
20620Sstevel@tonic-gate }
20630Sstevel@tonic-gate }
20640Sstevel@tonic-gate
20650Sstevel@tonic-gate /*
20660Sstevel@tonic-gate * We cannot find an entry in the list for this retry.
20670Sstevel@tonic-gate * Either the entry has been removed temporarily to be
20680Sstevel@tonic-gate * reconnected by another thread, or the original call
20690Sstevel@tonic-gate * got a port but never got connected,
20700Sstevel@tonic-gate * and hence the transport never got put in the
20710Sstevel@tonic-gate * list. Fall through to the "create new connection" code -
20720Sstevel@tonic-gate * the former case will fail there trying to rebind the port,
20730Sstevel@tonic-gate * and the later case (and any other pathological cases) will
20740Sstevel@tonic-gate * rebind and reconnect and not hang the client machine.
20750Sstevel@tonic-gate */
20760Sstevel@tonic-gate RPCLOG0(8, "connmgr_get: no entry in list for retry\n");
20770Sstevel@tonic-gate }
20780Sstevel@tonic-gate /*
20790Sstevel@tonic-gate * Set up a transport entry in the connection manager's list.
20800Sstevel@tonic-gate */
20810Sstevel@tonic-gate cm_entry = (struct cm_xprt *)
20820Sstevel@tonic-gate kmem_zalloc(sizeof (struct cm_xprt), KM_SLEEP);
20830Sstevel@tonic-gate
20840Sstevel@tonic-gate cm_entry->x_server.buf = kmem_zalloc(destaddr->len, KM_SLEEP);
20850Sstevel@tonic-gate bcopy(destaddr->buf, cm_entry->x_server.buf, destaddr->len);
20860Sstevel@tonic-gate cm_entry->x_server.len = cm_entry->x_server.maxlen = destaddr->len;
20870Sstevel@tonic-gate
20880Sstevel@tonic-gate cm_entry->x_state_flags = X_THREAD;
20890Sstevel@tonic-gate cm_entry->x_ref = 1;
20900Sstevel@tonic-gate cm_entry->x_family = addrfmly;
20910Sstevel@tonic-gate cm_entry->x_rdev = device;
20920Sstevel@tonic-gate cm_entry->x_zoneid = zoneid;
20930Sstevel@tonic-gate mutex_init(&cm_entry->x_lock, NULL, MUTEX_DEFAULT, NULL);
20940Sstevel@tonic-gate cv_init(&cm_entry->x_cv, NULL, CV_DEFAULT, NULL);
20950Sstevel@tonic-gate cv_init(&cm_entry->x_conn_cv, NULL, CV_DEFAULT, NULL);
20960Sstevel@tonic-gate cv_init(&cm_entry->x_dis_cv, NULL, CV_DEFAULT, NULL);
20970Sstevel@tonic-gate
20980Sstevel@tonic-gate /*
20990Sstevel@tonic-gate * Note that we add this partially initialized entry to the
21000Sstevel@tonic-gate * connection list. This is so that we don't have connections to
21010Sstevel@tonic-gate * the same server.
21020Sstevel@tonic-gate *
21030Sstevel@tonic-gate * Note that x_src is not initialized at this point. This is because
21040Sstevel@tonic-gate * retryaddr might be NULL in which case x_src is whatever
21050Sstevel@tonic-gate * t_kbind/bindresvport gives us. If another thread wants a
21060Sstevel@tonic-gate * connection to the same server, seemingly we have an issue, but we
21070Sstevel@tonic-gate * don't. If the other thread comes in with retryaddr == NULL, then it
21080Sstevel@tonic-gate * will never look at x_src, and it will end up waiting in
21090Sstevel@tonic-gate * connmgr_cwait() for the first thread to finish the connection
21100Sstevel@tonic-gate * attempt. If the other thread comes in with retryaddr != NULL, then
21110Sstevel@tonic-gate * that means there was a request sent on a connection, in which case
21120Sstevel@tonic-gate * the the connection should already exist. Thus the first thread
21130Sstevel@tonic-gate * never gets here ... it finds the connection it its server in the
21140Sstevel@tonic-gate * connection list.
21150Sstevel@tonic-gate *
21160Sstevel@tonic-gate * But even if theory is wrong, in the retryaddr != NULL case, the 2nd
21170Sstevel@tonic-gate * thread will skip us because x_src.len == 0.
21180Sstevel@tonic-gate */
21190Sstevel@tonic-gate cm_entry->x_next = cm_hd;
21200Sstevel@tonic-gate cm_hd = cm_entry;
21210Sstevel@tonic-gate mutex_exit(&connmgr_lock);
21220Sstevel@tonic-gate
21230Sstevel@tonic-gate /*
21240Sstevel@tonic-gate * Either we didn't find an entry to the server of interest, or we
21250Sstevel@tonic-gate * don't have the maximum number of connections to that server -
21260Sstevel@tonic-gate * create a new connection.
21270Sstevel@tonic-gate */
21280Sstevel@tonic-gate RPCLOG0(8, "connmgr_get: creating new connection\n");
21290Sstevel@tonic-gate rpcerr->re_status = RPC_TLIERROR;
21300Sstevel@tonic-gate
21311676Sjpk i = t_kopen(NULL, device, FREAD|FWRITE|FNDELAY, &tiptr, zone_kcred());
21320Sstevel@tonic-gate if (i) {
21330Sstevel@tonic-gate RPCLOG(1, "connmgr_get: can't open cots device, error %d\n", i);
21340Sstevel@tonic-gate rpcerr->re_errno = i;
21350Sstevel@tonic-gate connmgr_cancelconn(cm_entry);
21360Sstevel@tonic-gate return (NULL);
21370Sstevel@tonic-gate }
21380Sstevel@tonic-gate rpc_poptimod(tiptr->fp->f_vnode);
21390Sstevel@tonic-gate
21400Sstevel@tonic-gate if (i = strioctl(tiptr->fp->f_vnode, I_PUSH, (intptr_t)"rpcmod", 0,
21416403Sgt29601 K_TO_K, kcred, &retval)) {
21420Sstevel@tonic-gate RPCLOG(1, "connmgr_get: can't push cots module, %d\n", i);
21430Sstevel@tonic-gate (void) t_kclose(tiptr, 1);
21440Sstevel@tonic-gate rpcerr->re_errno = i;
21450Sstevel@tonic-gate connmgr_cancelconn(cm_entry);
21460Sstevel@tonic-gate return (NULL);
21470Sstevel@tonic-gate }
21480Sstevel@tonic-gate
21490Sstevel@tonic-gate if (i = strioctl(tiptr->fp->f_vnode, RPC_CLIENT, 0, 0, K_TO_K,
21506403Sgt29601 kcred, &retval)) {
21510Sstevel@tonic-gate RPCLOG(1, "connmgr_get: can't set client status with cots "
21520Sstevel@tonic-gate "module, %d\n", i);
21530Sstevel@tonic-gate (void) t_kclose(tiptr, 1);
21540Sstevel@tonic-gate rpcerr->re_errno = i;
21550Sstevel@tonic-gate connmgr_cancelconn(cm_entry);
21560Sstevel@tonic-gate return (NULL);
21570Sstevel@tonic-gate }
21580Sstevel@tonic-gate
21590Sstevel@tonic-gate mutex_enter(&connmgr_lock);
21600Sstevel@tonic-gate
21610Sstevel@tonic-gate wq = tiptr->fp->f_vnode->v_stream->sd_wrq->q_next;
21620Sstevel@tonic-gate cm_entry->x_wq = wq;
21630Sstevel@tonic-gate
21640Sstevel@tonic-gate mutex_exit(&connmgr_lock);
21650Sstevel@tonic-gate
21660Sstevel@tonic-gate if (i = strioctl(tiptr->fp->f_vnode, I_PUSH, (intptr_t)"timod", 0,
21676403Sgt29601 K_TO_K, kcred, &retval)) {
21680Sstevel@tonic-gate RPCLOG(1, "connmgr_get: can't push timod, %d\n", i);
21690Sstevel@tonic-gate (void) t_kclose(tiptr, 1);
21700Sstevel@tonic-gate rpcerr->re_errno = i;
21710Sstevel@tonic-gate connmgr_cancelconn(cm_entry);
21720Sstevel@tonic-gate return (NULL);
21730Sstevel@tonic-gate }
21740Sstevel@tonic-gate
21750Sstevel@tonic-gate /*
21760Sstevel@tonic-gate * If the caller has not specified reserved port usage then
21770Sstevel@tonic-gate * take the system default.
21780Sstevel@tonic-gate */
21790Sstevel@tonic-gate if (useresvport == -1)
21800Sstevel@tonic-gate useresvport = clnt_cots_do_bindresvport;
21810Sstevel@tonic-gate
21820Sstevel@tonic-gate if ((useresvport || retryaddr != NULL) &&
21830Sstevel@tonic-gate (addrfmly == AF_INET || addrfmly == AF_INET6)) {
21840Sstevel@tonic-gate bool_t alloc_src = FALSE;
21850Sstevel@tonic-gate
21860Sstevel@tonic-gate if (srcaddr->len != destaddr->len) {
21870Sstevel@tonic-gate kmem_free(srcaddr->buf, srcaddr->maxlen);
21880Sstevel@tonic-gate srcaddr->buf = kmem_zalloc(destaddr->len, KM_SLEEP);
21890Sstevel@tonic-gate srcaddr->maxlen = destaddr->len;
21900Sstevel@tonic-gate srcaddr->len = destaddr->len;
21910Sstevel@tonic-gate alloc_src = TRUE;
21920Sstevel@tonic-gate }
21930Sstevel@tonic-gate
21940Sstevel@tonic-gate if ((i = bindresvport(tiptr, retryaddr, srcaddr, TRUE)) != 0) {
21950Sstevel@tonic-gate (void) t_kclose(tiptr, 1);
21960Sstevel@tonic-gate RPCLOG(1, "connmgr_get: couldn't bind, retryaddr: "
21976403Sgt29601 "%p\n", (void *)retryaddr);
21980Sstevel@tonic-gate
21990Sstevel@tonic-gate /*
22000Sstevel@tonic-gate * 1225408: If we allocated a source address, then it
22010Sstevel@tonic-gate * is either garbage or all zeroes. In that case
22020Sstevel@tonic-gate * we need to clear srcaddr.
22030Sstevel@tonic-gate */
22040Sstevel@tonic-gate if (alloc_src == TRUE) {
22050Sstevel@tonic-gate kmem_free(srcaddr->buf, srcaddr->maxlen);
22060Sstevel@tonic-gate srcaddr->maxlen = srcaddr->len = 0;
22070Sstevel@tonic-gate srcaddr->buf = NULL;
22080Sstevel@tonic-gate }
22090Sstevel@tonic-gate rpcerr->re_errno = i;
22100Sstevel@tonic-gate connmgr_cancelconn(cm_entry);
22110Sstevel@tonic-gate return (NULL);
22120Sstevel@tonic-gate }
22130Sstevel@tonic-gate } else {
22140Sstevel@tonic-gate if ((i = t_kbind(tiptr, NULL, NULL)) != 0) {
22150Sstevel@tonic-gate RPCLOG(1, "clnt_cots_kcreate: t_kbind: %d\n", i);
22160Sstevel@tonic-gate (void) t_kclose(tiptr, 1);
22170Sstevel@tonic-gate rpcerr->re_errno = i;
22180Sstevel@tonic-gate connmgr_cancelconn(cm_entry);
22190Sstevel@tonic-gate return (NULL);
22200Sstevel@tonic-gate }
22210Sstevel@tonic-gate }
22220Sstevel@tonic-gate
22230Sstevel@tonic-gate {
22240Sstevel@tonic-gate /*
22250Sstevel@tonic-gate * Keep the kernel stack lean. Don't move this call
22260Sstevel@tonic-gate * declaration to the top of this function because a
22270Sstevel@tonic-gate * call is declared in connmgr_wrapconnect()
22280Sstevel@tonic-gate */
22290Sstevel@tonic-gate calllist_t call;
22300Sstevel@tonic-gate
22310Sstevel@tonic-gate bzero(&call, sizeof (call));
22320Sstevel@tonic-gate cv_init(&call.call_cv, NULL, CV_DEFAULT, NULL);
22330Sstevel@tonic-gate
22340Sstevel@tonic-gate /*
22350Sstevel@tonic-gate * This is a bound end-point so don't close it's stream.
22360Sstevel@tonic-gate */
22370Sstevel@tonic-gate connected = connmgr_connect(cm_entry, wq, destaddr, addrfmly,
22388778SErik.Nordmark@Sun.COM &call, &tidu_size, FALSE, waitp, nosignal, cr);
22390Sstevel@tonic-gate *rpcerr = call.call_err;
22400Sstevel@tonic-gate cv_destroy(&call.call_cv);
22410Sstevel@tonic-gate
22420Sstevel@tonic-gate }
22430Sstevel@tonic-gate
22440Sstevel@tonic-gate mutex_enter(&connmgr_lock);
22450Sstevel@tonic-gate
22460Sstevel@tonic-gate /*
22470Sstevel@tonic-gate * Set up a transport entry in the connection manager's list.
22480Sstevel@tonic-gate */
22490Sstevel@tonic-gate cm_entry->x_src.buf = kmem_zalloc(srcaddr->len, KM_SLEEP);
22500Sstevel@tonic-gate bcopy(srcaddr->buf, cm_entry->x_src.buf, srcaddr->len);
22510Sstevel@tonic-gate cm_entry->x_src.len = cm_entry->x_src.maxlen = srcaddr->len;
22520Sstevel@tonic-gate
22530Sstevel@tonic-gate cm_entry->x_tiptr = tiptr;
225411066Srafael.vanoni@sun.com cm_entry->x_time = ddi_get_lbolt();
22550Sstevel@tonic-gate
22560Sstevel@tonic-gate if (tiptr->tp_info.servtype == T_COTS_ORD)
22570Sstevel@tonic-gate cm_entry->x_ordrel = TRUE;
22580Sstevel@tonic-gate else
22590Sstevel@tonic-gate cm_entry->x_ordrel = FALSE;
22600Sstevel@tonic-gate
22610Sstevel@tonic-gate cm_entry->x_tidu_size = tidu_size;
22620Sstevel@tonic-gate
22634457Svv149972 if (cm_entry->x_early_disc) {
22644457Svv149972 /*
22654457Svv149972 * We need to check if a disconnect request has come
22664457Svv149972 * while we are connected, if so, then we need to
22674457Svv149972 * set rpcerr->re_status appropriately before returning
22684457Svv149972 * NULL to caller.
22694457Svv149972 */
22704457Svv149972 if (rpcerr->re_status == RPC_SUCCESS)
22714457Svv149972 rpcerr->re_status = RPC_XPRTFAILED;
22720Sstevel@tonic-gate cm_entry->x_connected = FALSE;
22734457Svv149972 } else
22740Sstevel@tonic-gate cm_entry->x_connected = connected;
22750Sstevel@tonic-gate
22760Sstevel@tonic-gate /*
22770Sstevel@tonic-gate * There could be a discrepancy here such that
22780Sstevel@tonic-gate * x_early_disc is TRUE yet connected is TRUE as well
22790Sstevel@tonic-gate * and the connection is actually connected. In that case
22800Sstevel@tonic-gate * lets be conservative and declare the connection as not
22810Sstevel@tonic-gate * connected.
22820Sstevel@tonic-gate */
22830Sstevel@tonic-gate cm_entry->x_early_disc = FALSE;
22840Sstevel@tonic-gate cm_entry->x_needdis = (cm_entry->x_connected == FALSE);
228511066Srafael.vanoni@sun.com cm_entry->x_ctime = ddi_get_lbolt();
22860Sstevel@tonic-gate
22870Sstevel@tonic-gate /*
22880Sstevel@tonic-gate * Notify any threads waiting that the connection attempt is done.
22890Sstevel@tonic-gate */
22900Sstevel@tonic-gate cm_entry->x_thread = FALSE;
22910Sstevel@tonic-gate cv_broadcast(&cm_entry->x_conn_cv);
22920Sstevel@tonic-gate
22930Sstevel@tonic-gate if (cm_entry->x_connected == FALSE) {
22944457Svv149972 mutex_exit(&connmgr_lock);
22950Sstevel@tonic-gate connmgr_release(cm_entry);
22960Sstevel@tonic-gate return (NULL);
22970Sstevel@tonic-gate }
22984457Svv149972
22994457Svv149972 mutex_exit(&connmgr_lock);
23004457Svv149972
23010Sstevel@tonic-gate return (cm_entry);
23020Sstevel@tonic-gate }
23030Sstevel@tonic-gate
23040Sstevel@tonic-gate /*
23050Sstevel@tonic-gate * Keep the cm_xprt entry on the connecton list when making a connection. This
23060Sstevel@tonic-gate * is to prevent multiple connections to a slow server from appearing.
23070Sstevel@tonic-gate * We use the bit field x_thread to tell if a thread is doing a connection
23080Sstevel@tonic-gate * which keeps other interested threads from messing with connection.
23090Sstevel@tonic-gate * Those other threads just wait if x_thread is set.
23100Sstevel@tonic-gate *
23110Sstevel@tonic-gate * If x_thread is not set, then we do the actual work of connecting via
23120Sstevel@tonic-gate * connmgr_connect().
23130Sstevel@tonic-gate *
23140Sstevel@tonic-gate * mutex convention: called with connmgr_lock held, returns with it released.
23150Sstevel@tonic-gate */
23160Sstevel@tonic-gate static struct cm_xprt *
connmgr_wrapconnect(struct cm_xprt * cm_entry,const struct timeval * waitp,struct netbuf * destaddr,int addrfmly,struct netbuf * srcaddr,struct rpc_err * rpcerr,bool_t reconnect,bool_t nosignal,cred_t * cr)23170Sstevel@tonic-gate connmgr_wrapconnect(
23180Sstevel@tonic-gate struct cm_xprt *cm_entry,
23190Sstevel@tonic-gate const struct timeval *waitp,
23200Sstevel@tonic-gate struct netbuf *destaddr,
23210Sstevel@tonic-gate int addrfmly,
23220Sstevel@tonic-gate struct netbuf *srcaddr,
23230Sstevel@tonic-gate struct rpc_err *rpcerr,
23240Sstevel@tonic-gate bool_t reconnect,
23258778SErik.Nordmark@Sun.COM bool_t nosignal,
23268778SErik.Nordmark@Sun.COM cred_t *cr)
23270Sstevel@tonic-gate {
23280Sstevel@tonic-gate ASSERT(MUTEX_HELD(&connmgr_lock));
23290Sstevel@tonic-gate /*
23300Sstevel@tonic-gate * Hold this entry as we are about to drop connmgr_lock.
23310Sstevel@tonic-gate */
23320Sstevel@tonic-gate CONN_HOLD(cm_entry);
23330Sstevel@tonic-gate
23340Sstevel@tonic-gate /*
23350Sstevel@tonic-gate * If there is a thread already making a connection for us, then
23360Sstevel@tonic-gate * wait for it to complete the connection.
23370Sstevel@tonic-gate */
23380Sstevel@tonic-gate if (cm_entry->x_thread == TRUE) {
23390Sstevel@tonic-gate rpcerr->re_status = connmgr_cwait(cm_entry, waitp, nosignal);
23400Sstevel@tonic-gate
23410Sstevel@tonic-gate if (rpcerr->re_status != RPC_SUCCESS) {
23420Sstevel@tonic-gate mutex_exit(&connmgr_lock);
23430Sstevel@tonic-gate connmgr_release(cm_entry);
23440Sstevel@tonic-gate return (NULL);
23450Sstevel@tonic-gate }
23460Sstevel@tonic-gate } else {
23470Sstevel@tonic-gate bool_t connected;
23480Sstevel@tonic-gate calllist_t call;
23490Sstevel@tonic-gate
23500Sstevel@tonic-gate cm_entry->x_thread = TRUE;
23510Sstevel@tonic-gate
23520Sstevel@tonic-gate while (cm_entry->x_needrel == TRUE) {
23530Sstevel@tonic-gate cm_entry->x_needrel = FALSE;
23540Sstevel@tonic-gate
23550Sstevel@tonic-gate connmgr_sndrel(cm_entry);
23560Sstevel@tonic-gate delay(drv_usectohz(1000000));
23570Sstevel@tonic-gate
23580Sstevel@tonic-gate mutex_enter(&connmgr_lock);
23590Sstevel@tonic-gate }
23600Sstevel@tonic-gate
23610Sstevel@tonic-gate /*
23620Sstevel@tonic-gate * If we need to send a T_DISCON_REQ, send one.
23630Sstevel@tonic-gate */
23640Sstevel@tonic-gate connmgr_dis_and_wait(cm_entry);
23650Sstevel@tonic-gate
23660Sstevel@tonic-gate mutex_exit(&connmgr_lock);
23670Sstevel@tonic-gate
23680Sstevel@tonic-gate bzero(&call, sizeof (call));
23690Sstevel@tonic-gate cv_init(&call.call_cv, NULL, CV_DEFAULT, NULL);
23700Sstevel@tonic-gate
23710Sstevel@tonic-gate connected = connmgr_connect(cm_entry, cm_entry->x_wq,
23726403Sgt29601 destaddr, addrfmly, &call, &cm_entry->x_tidu_size,
23738778SErik.Nordmark@Sun.COM reconnect, waitp, nosignal, cr);
23740Sstevel@tonic-gate
23750Sstevel@tonic-gate *rpcerr = call.call_err;
23760Sstevel@tonic-gate cv_destroy(&call.call_cv);
23770Sstevel@tonic-gate
23780Sstevel@tonic-gate mutex_enter(&connmgr_lock);
23790Sstevel@tonic-gate
23800Sstevel@tonic-gate
23814457Svv149972 if (cm_entry->x_early_disc) {
23824457Svv149972 /*
23834457Svv149972 * We need to check if a disconnect request has come
23844457Svv149972 * while we are connected, if so, then we need to
23854457Svv149972 * set rpcerr->re_status appropriately before returning
23864457Svv149972 * NULL to caller.
23874457Svv149972 */
23884457Svv149972 if (rpcerr->re_status == RPC_SUCCESS)
23894457Svv149972 rpcerr->re_status = RPC_XPRTFAILED;
23900Sstevel@tonic-gate cm_entry->x_connected = FALSE;
23914457Svv149972 } else
23920Sstevel@tonic-gate cm_entry->x_connected = connected;
23930Sstevel@tonic-gate
23940Sstevel@tonic-gate /*
23950Sstevel@tonic-gate * There could be a discrepancy here such that
23960Sstevel@tonic-gate * x_early_disc is TRUE yet connected is TRUE as well
23970Sstevel@tonic-gate * and the connection is actually connected. In that case
23980Sstevel@tonic-gate * lets be conservative and declare the connection as not
23990Sstevel@tonic-gate * connected.
24000Sstevel@tonic-gate */
24010Sstevel@tonic-gate
24020Sstevel@tonic-gate cm_entry->x_early_disc = FALSE;
24030Sstevel@tonic-gate cm_entry->x_needdis = (cm_entry->x_connected == FALSE);
24040Sstevel@tonic-gate
24050Sstevel@tonic-gate
24060Sstevel@tonic-gate /*
24070Sstevel@tonic-gate * connmgr_connect() may have given up before the connection
24080Sstevel@tonic-gate * actually timed out. So ensure that before the next
24090Sstevel@tonic-gate * connection attempt we do a disconnect.
24100Sstevel@tonic-gate */
241111066Srafael.vanoni@sun.com cm_entry->x_ctime = ddi_get_lbolt();
24120Sstevel@tonic-gate cm_entry->x_thread = FALSE;
24130Sstevel@tonic-gate
24140Sstevel@tonic-gate cv_broadcast(&cm_entry->x_conn_cv);
24150Sstevel@tonic-gate
24160Sstevel@tonic-gate if (cm_entry->x_connected == FALSE) {
24170Sstevel@tonic-gate mutex_exit(&connmgr_lock);
24180Sstevel@tonic-gate connmgr_release(cm_entry);
24190Sstevel@tonic-gate return (NULL);
24200Sstevel@tonic-gate }
24210Sstevel@tonic-gate }
24220Sstevel@tonic-gate
24230Sstevel@tonic-gate if (srcaddr != NULL) {
24240Sstevel@tonic-gate /*
24250Sstevel@tonic-gate * Copy into the handle the
24260Sstevel@tonic-gate * source address of the
24270Sstevel@tonic-gate * connection, which we will use
24280Sstevel@tonic-gate * in case of a later retry.
24290Sstevel@tonic-gate */
24300Sstevel@tonic-gate if (srcaddr->len != cm_entry->x_src.len) {
24310Sstevel@tonic-gate if (srcaddr->maxlen > 0)
24320Sstevel@tonic-gate kmem_free(srcaddr->buf, srcaddr->maxlen);
24330Sstevel@tonic-gate srcaddr->buf = kmem_zalloc(cm_entry->x_src.len,
24340Sstevel@tonic-gate KM_SLEEP);
24350Sstevel@tonic-gate srcaddr->maxlen = srcaddr->len =
24360Sstevel@tonic-gate cm_entry->x_src.len;
24370Sstevel@tonic-gate }
24380Sstevel@tonic-gate bcopy(cm_entry->x_src.buf, srcaddr->buf, srcaddr->len);
24390Sstevel@tonic-gate }
244011066Srafael.vanoni@sun.com cm_entry->x_time = ddi_get_lbolt();
24410Sstevel@tonic-gate mutex_exit(&connmgr_lock);
24420Sstevel@tonic-gate return (cm_entry);
24430Sstevel@tonic-gate }
24440Sstevel@tonic-gate
24450Sstevel@tonic-gate /*
24460Sstevel@tonic-gate * If we need to send a T_DISCON_REQ, send one.
24470Sstevel@tonic-gate */
24480Sstevel@tonic-gate static void
connmgr_dis_and_wait(struct cm_xprt * cm_entry)24490Sstevel@tonic-gate connmgr_dis_and_wait(struct cm_xprt *cm_entry)
24500Sstevel@tonic-gate {
24510Sstevel@tonic-gate ASSERT(MUTEX_HELD(&connmgr_lock));
24520Sstevel@tonic-gate for (;;) {
24530Sstevel@tonic-gate while (cm_entry->x_needdis == TRUE) {
24540Sstevel@tonic-gate RPCLOG(8, "connmgr_dis_and_wait: need "
24556403Sgt29601 "T_DISCON_REQ for connection 0x%p\n",
24566403Sgt29601 (void *)cm_entry);
24570Sstevel@tonic-gate cm_entry->x_needdis = FALSE;
24580Sstevel@tonic-gate cm_entry->x_waitdis = TRUE;
24590Sstevel@tonic-gate
24600Sstevel@tonic-gate connmgr_snddis(cm_entry);
24610Sstevel@tonic-gate
24620Sstevel@tonic-gate mutex_enter(&connmgr_lock);
24630Sstevel@tonic-gate }
24640Sstevel@tonic-gate
24650Sstevel@tonic-gate if (cm_entry->x_waitdis == TRUE) {
24660Sstevel@tonic-gate clock_t timout;
24670Sstevel@tonic-gate
24680Sstevel@tonic-gate RPCLOG(8, "connmgr_dis_and_wait waiting for "
24696403Sgt29601 "T_DISCON_REQ's ACK for connection %p\n",
24706403Sgt29601 (void *)cm_entry);
247111066Srafael.vanoni@sun.com
247211066Srafael.vanoni@sun.com timout = clnt_cots_min_conntout * drv_usectohz(1000000);
24730Sstevel@tonic-gate
24740Sstevel@tonic-gate /*
24750Sstevel@tonic-gate * The TPI spec says that the T_DISCON_REQ
24760Sstevel@tonic-gate * will get acknowledged, but in practice
24770Sstevel@tonic-gate * the ACK may never get sent. So don't
24780Sstevel@tonic-gate * block forever.
24790Sstevel@tonic-gate */
248011066Srafael.vanoni@sun.com (void) cv_reltimedwait(&cm_entry->x_dis_cv,
248111066Srafael.vanoni@sun.com &connmgr_lock, timout, TR_CLOCK_TICK);
24820Sstevel@tonic-gate }
24830Sstevel@tonic-gate /*
24840Sstevel@tonic-gate * If we got the ACK, break. If we didn't,
24850Sstevel@tonic-gate * then send another T_DISCON_REQ.
24860Sstevel@tonic-gate */
24870Sstevel@tonic-gate if (cm_entry->x_waitdis == FALSE) {
24880Sstevel@tonic-gate break;
24890Sstevel@tonic-gate } else {
24900Sstevel@tonic-gate RPCLOG(8, "connmgr_dis_and_wait: did"
24916403Sgt29601 "not get T_DISCON_REQ's ACK for "
24926403Sgt29601 "connection %p\n", (void *)cm_entry);
24930Sstevel@tonic-gate cm_entry->x_needdis = TRUE;
24940Sstevel@tonic-gate }
24950Sstevel@tonic-gate }
24960Sstevel@tonic-gate }
24970Sstevel@tonic-gate
24980Sstevel@tonic-gate static void
connmgr_cancelconn(struct cm_xprt * cm_entry)24990Sstevel@tonic-gate connmgr_cancelconn(struct cm_xprt *cm_entry)
25000Sstevel@tonic-gate {
25010Sstevel@tonic-gate /*
25020Sstevel@tonic-gate * Mark the connection table entry as dead; the next thread that
25030Sstevel@tonic-gate * goes through connmgr_release() will notice this and deal with it.
25040Sstevel@tonic-gate */
25050Sstevel@tonic-gate mutex_enter(&connmgr_lock);
25060Sstevel@tonic-gate cm_entry->x_dead = TRUE;
25070Sstevel@tonic-gate
25080Sstevel@tonic-gate /*
25090Sstevel@tonic-gate * Notify any threads waiting for the connection that it isn't
25100Sstevel@tonic-gate * going to happen.
25110Sstevel@tonic-gate */
25120Sstevel@tonic-gate cm_entry->x_thread = FALSE;
25130Sstevel@tonic-gate cv_broadcast(&cm_entry->x_conn_cv);
25140Sstevel@tonic-gate mutex_exit(&connmgr_lock);
25150Sstevel@tonic-gate
25160Sstevel@tonic-gate connmgr_release(cm_entry);
25170Sstevel@tonic-gate }
25180Sstevel@tonic-gate
25190Sstevel@tonic-gate static void
connmgr_close(struct cm_xprt * cm_entry)25200Sstevel@tonic-gate connmgr_close(struct cm_xprt *cm_entry)
25210Sstevel@tonic-gate {
25220Sstevel@tonic-gate mutex_enter(&cm_entry->x_lock);
25230Sstevel@tonic-gate while (cm_entry->x_ref != 0) {
25240Sstevel@tonic-gate /*
25250Sstevel@tonic-gate * Must be a noninterruptible wait.
25260Sstevel@tonic-gate */
25270Sstevel@tonic-gate cv_wait(&cm_entry->x_cv, &cm_entry->x_lock);
25280Sstevel@tonic-gate }
25290Sstevel@tonic-gate
25300Sstevel@tonic-gate if (cm_entry->x_tiptr != NULL)
25310Sstevel@tonic-gate (void) t_kclose(cm_entry->x_tiptr, 1);
25320Sstevel@tonic-gate
25330Sstevel@tonic-gate mutex_exit(&cm_entry->x_lock);
25340Sstevel@tonic-gate if (cm_entry->x_ksp != NULL) {
25350Sstevel@tonic-gate mutex_enter(&connmgr_lock);
25360Sstevel@tonic-gate cm_entry->x_ksp->ks_private = NULL;
25370Sstevel@tonic-gate mutex_exit(&connmgr_lock);
25380Sstevel@tonic-gate
25390Sstevel@tonic-gate /*
25400Sstevel@tonic-gate * Must free the buffer we allocated for the
25410Sstevel@tonic-gate * server address in the update function
25420Sstevel@tonic-gate */
25430Sstevel@tonic-gate if (((struct cm_kstat_xprt *)(cm_entry->x_ksp->ks_data))->
2544457Sbmc x_server.value.str.addr.ptr != NULL)
25450Sstevel@tonic-gate kmem_free(((struct cm_kstat_xprt *)(cm_entry->x_ksp->
2546457Sbmc ks_data))->x_server.value.str.addr.ptr,
25476403Sgt29601 INET6_ADDRSTRLEN);
25480Sstevel@tonic-gate kmem_free(cm_entry->x_ksp->ks_data,
25496403Sgt29601 cm_entry->x_ksp->ks_data_size);
25500Sstevel@tonic-gate kstat_delete(cm_entry->x_ksp);
25510Sstevel@tonic-gate }
25520Sstevel@tonic-gate
25530Sstevel@tonic-gate mutex_destroy(&cm_entry->x_lock);
25540Sstevel@tonic-gate cv_destroy(&cm_entry->x_cv);
25550Sstevel@tonic-gate cv_destroy(&cm_entry->x_conn_cv);
25560Sstevel@tonic-gate cv_destroy(&cm_entry->x_dis_cv);
25570Sstevel@tonic-gate
25580Sstevel@tonic-gate if (cm_entry->x_server.buf != NULL)
25590Sstevel@tonic-gate kmem_free(cm_entry->x_server.buf, cm_entry->x_server.maxlen);
25600Sstevel@tonic-gate if (cm_entry->x_src.buf != NULL)
25610Sstevel@tonic-gate kmem_free(cm_entry->x_src.buf, cm_entry->x_src.maxlen);
25620Sstevel@tonic-gate kmem_free(cm_entry, sizeof (struct cm_xprt));
25630Sstevel@tonic-gate }
25640Sstevel@tonic-gate
25650Sstevel@tonic-gate /*
25660Sstevel@tonic-gate * Called by KRPC after sending the call message to release the connection
25670Sstevel@tonic-gate * it was using.
25680Sstevel@tonic-gate */
25690Sstevel@tonic-gate static void
connmgr_release(struct cm_xprt * cm_entry)25700Sstevel@tonic-gate connmgr_release(struct cm_xprt *cm_entry)
25710Sstevel@tonic-gate {
25720Sstevel@tonic-gate mutex_enter(&cm_entry->x_lock);
25730Sstevel@tonic-gate cm_entry->x_ref--;
25740Sstevel@tonic-gate if (cm_entry->x_ref == 0)
25750Sstevel@tonic-gate cv_signal(&cm_entry->x_cv);
25760Sstevel@tonic-gate mutex_exit(&cm_entry->x_lock);
25770Sstevel@tonic-gate }
25780Sstevel@tonic-gate
25790Sstevel@tonic-gate /*
258010004Sdai.ngo@sun.com * Set TCP receive and xmit buffer size for RPC connections.
258110004Sdai.ngo@sun.com */
258210004Sdai.ngo@sun.com static bool_t
connmgr_setbufsz(calllist_t * e,queue_t * wq,cred_t * cr)258310004Sdai.ngo@sun.com connmgr_setbufsz(calllist_t *e, queue_t *wq, cred_t *cr)
258410004Sdai.ngo@sun.com {
258510004Sdai.ngo@sun.com int ok = FALSE;
258610004Sdai.ngo@sun.com int val;
258710004Sdai.ngo@sun.com
258810004Sdai.ngo@sun.com if (rpc_default_tcp_bufsz)
258910004Sdai.ngo@sun.com return (FALSE);
259010004Sdai.ngo@sun.com
259110004Sdai.ngo@sun.com /*
259210004Sdai.ngo@sun.com * Only set new buffer size if it's larger than the system
259310004Sdai.ngo@sun.com * default buffer size. If smaller buffer size is needed
259410004Sdai.ngo@sun.com * then use /etc/system to set rpc_default_tcp_bufsz to 1.
259510004Sdai.ngo@sun.com */
259610004Sdai.ngo@sun.com ok = connmgr_getopt_int(wq, SOL_SOCKET, SO_RCVBUF, &val, e, cr);
259710004Sdai.ngo@sun.com if ((ok == TRUE) && (val < rpc_send_bufsz)) {
259810004Sdai.ngo@sun.com ok = connmgr_setopt_int(wq, SOL_SOCKET, SO_RCVBUF,
259910004Sdai.ngo@sun.com rpc_send_bufsz, e, cr);
260010004Sdai.ngo@sun.com DTRACE_PROBE2(krpc__i__connmgr_rcvbufsz,
260110004Sdai.ngo@sun.com int, ok, calllist_t *, e);
260210004Sdai.ngo@sun.com }
260310004Sdai.ngo@sun.com
260410004Sdai.ngo@sun.com ok = connmgr_getopt_int(wq, SOL_SOCKET, SO_SNDBUF, &val, e, cr);
260510004Sdai.ngo@sun.com if ((ok == TRUE) && (val < rpc_recv_bufsz)) {
260610004Sdai.ngo@sun.com ok = connmgr_setopt_int(wq, SOL_SOCKET, SO_SNDBUF,
260710004Sdai.ngo@sun.com rpc_recv_bufsz, e, cr);
260810004Sdai.ngo@sun.com DTRACE_PROBE2(krpc__i__connmgr_sndbufsz,
260910004Sdai.ngo@sun.com int, ok, calllist_t *, e);
261010004Sdai.ngo@sun.com }
261110004Sdai.ngo@sun.com return (TRUE);
261210004Sdai.ngo@sun.com }
261310004Sdai.ngo@sun.com
261410004Sdai.ngo@sun.com /*
26150Sstevel@tonic-gate * Given an open stream, connect to the remote. Returns true if connected,
26160Sstevel@tonic-gate * false otherwise.
26170Sstevel@tonic-gate */
26180Sstevel@tonic-gate static bool_t
connmgr_connect(struct cm_xprt * cm_entry,queue_t * wq,struct netbuf * addr,int addrfmly,calllist_t * e,int * tidu_ptr,bool_t reconnect,const struct timeval * waitp,bool_t nosignal,cred_t * cr)26190Sstevel@tonic-gate connmgr_connect(
26200Sstevel@tonic-gate struct cm_xprt *cm_entry,
26210Sstevel@tonic-gate queue_t *wq,
26220Sstevel@tonic-gate struct netbuf *addr,
26230Sstevel@tonic-gate int addrfmly,
26240Sstevel@tonic-gate calllist_t *e,
26250Sstevel@tonic-gate int *tidu_ptr,
26260Sstevel@tonic-gate bool_t reconnect,
26270Sstevel@tonic-gate const struct timeval *waitp,
26288778SErik.Nordmark@Sun.COM bool_t nosignal,
26298778SErik.Nordmark@Sun.COM cred_t *cr)
26300Sstevel@tonic-gate {
26310Sstevel@tonic-gate mblk_t *mp;
26320Sstevel@tonic-gate struct T_conn_req *tcr;
26330Sstevel@tonic-gate struct T_info_ack *tinfo;
26340Sstevel@tonic-gate int interrupted, error;
26350Sstevel@tonic-gate int tidu_size, kstat_instance;
26360Sstevel@tonic-gate
26370Sstevel@tonic-gate /* if it's a reconnect, flush any lingering data messages */
26380Sstevel@tonic-gate if (reconnect)
26390Sstevel@tonic-gate (void) putctl1(wq, M_FLUSH, FLUSHRW);
26400Sstevel@tonic-gate
26418778SErik.Nordmark@Sun.COM /*
26428778SErik.Nordmark@Sun.COM * Note: if the receiver uses SCM_UCRED/getpeerucred the pid will
26438778SErik.Nordmark@Sun.COM * appear as -1.
26448778SErik.Nordmark@Sun.COM */
26458778SErik.Nordmark@Sun.COM mp = allocb_cred(sizeof (*tcr) + addr->len, cr, NOPID);
26460Sstevel@tonic-gate if (mp == NULL) {
26470Sstevel@tonic-gate /*
26480Sstevel@tonic-gate * This is unfortunate, but we need to look up the stats for
26490Sstevel@tonic-gate * this zone to increment the "memory allocation failed"
26500Sstevel@tonic-gate * counter. curproc->p_zone is safe since we're initiating a
26510Sstevel@tonic-gate * connection and not in some strange streams context.
26520Sstevel@tonic-gate */
26530Sstevel@tonic-gate struct rpcstat *rpcstat;
26540Sstevel@tonic-gate
2655766Scarlsonj rpcstat = zone_getspecific(rpcstat_zone_key, rpc_zone());
26560Sstevel@tonic-gate ASSERT(rpcstat != NULL);
26570Sstevel@tonic-gate
26580Sstevel@tonic-gate RPCLOG0(1, "connmgr_connect: cannot alloc mp for "
26590Sstevel@tonic-gate "sending conn request\n");
26600Sstevel@tonic-gate COTSRCSTAT_INCR(rpcstat->rpc_cots_client, rcnomem);
26610Sstevel@tonic-gate e->call_status = RPC_SYSTEMERROR;
26620Sstevel@tonic-gate e->call_reason = ENOSR;
26630Sstevel@tonic-gate return (FALSE);
26640Sstevel@tonic-gate }
26650Sstevel@tonic-gate
266610004Sdai.ngo@sun.com /* Set TCP buffer size for RPC connections if needed */
266710004Sdai.ngo@sun.com if (addrfmly == AF_INET || addrfmly == AF_INET6)
266810004Sdai.ngo@sun.com (void) connmgr_setbufsz(e, wq, cr);
266910004Sdai.ngo@sun.com
26700Sstevel@tonic-gate mp->b_datap->db_type = M_PROTO;
26710Sstevel@tonic-gate tcr = (struct T_conn_req *)mp->b_rptr;
26720Sstevel@tonic-gate bzero(tcr, sizeof (*tcr));
26730Sstevel@tonic-gate tcr->PRIM_type = T_CONN_REQ;
26740Sstevel@tonic-gate tcr->DEST_length = addr->len;
26750Sstevel@tonic-gate tcr->DEST_offset = sizeof (struct T_conn_req);
26760Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + sizeof (*tcr);
26770Sstevel@tonic-gate
26780Sstevel@tonic-gate bcopy(addr->buf, mp->b_wptr, tcr->DEST_length);
26790Sstevel@tonic-gate mp->b_wptr += tcr->DEST_length;
26800Sstevel@tonic-gate
26810Sstevel@tonic-gate RPCLOG(8, "connmgr_connect: sending conn request on queue "
26820Sstevel@tonic-gate "%p", (void *)wq);
26830Sstevel@tonic-gate RPCLOG(8, " call %p\n", (void *)wq);
26840Sstevel@tonic-gate /*
26850Sstevel@tonic-gate * We use the entry in the handle that is normally used for
26860Sstevel@tonic-gate * waiting for RPC replies to wait for the connection accept.
26870Sstevel@tonic-gate */
26888205SSiddheshwar.Mahesh@Sun.COM if (clnt_dispatch_send(wq, mp, e, 0, 0) != RPC_SUCCESS) {
26898205SSiddheshwar.Mahesh@Sun.COM DTRACE_PROBE(krpc__e__connmgr__connect__cantsend);
26908205SSiddheshwar.Mahesh@Sun.COM freemsg(mp);
26918205SSiddheshwar.Mahesh@Sun.COM return (FALSE);
26928205SSiddheshwar.Mahesh@Sun.COM }
26930Sstevel@tonic-gate
26940Sstevel@tonic-gate mutex_enter(&clnt_pending_lock);
26950Sstevel@tonic-gate
26960Sstevel@tonic-gate /*
26970Sstevel@tonic-gate * We wait for the transport connection to be made, or an
26980Sstevel@tonic-gate * indication that it could not be made.
26990Sstevel@tonic-gate */
27000Sstevel@tonic-gate interrupted = 0;
27010Sstevel@tonic-gate
27020Sstevel@tonic-gate /*
27030Sstevel@tonic-gate * waitforack should have been called with T_OK_ACK, but the
27040Sstevel@tonic-gate * present implementation needs to be passed T_INFO_ACK to
27050Sstevel@tonic-gate * work correctly.
27060Sstevel@tonic-gate */
27070Sstevel@tonic-gate error = waitforack(e, T_INFO_ACK, waitp, nosignal);
27080Sstevel@tonic-gate if (error == EINTR)
27090Sstevel@tonic-gate interrupted = 1;
27100Sstevel@tonic-gate if (zone_status_get(curproc->p_zone) >= ZONE_IS_EMPTY) {
27110Sstevel@tonic-gate /*
27120Sstevel@tonic-gate * No time to lose; we essentially have been signaled to
27130Sstevel@tonic-gate * quit.
27140Sstevel@tonic-gate */
27150Sstevel@tonic-gate interrupted = 1;
27160Sstevel@tonic-gate }
27170Sstevel@tonic-gate #ifdef RPCDEBUG
27180Sstevel@tonic-gate if (error == ETIME)
27190Sstevel@tonic-gate RPCLOG0(8, "connmgr_connect: giving up "
27200Sstevel@tonic-gate "on connection attempt; "
27210Sstevel@tonic-gate "clnt_dispatch notifyconn "
27220Sstevel@tonic-gate "diagnostic 'no one waiting for "
27230Sstevel@tonic-gate "connection' should not be "
27240Sstevel@tonic-gate "unexpected\n");
27250Sstevel@tonic-gate #endif
27260Sstevel@tonic-gate if (e->call_prev)
27270Sstevel@tonic-gate e->call_prev->call_next = e->call_next;
27280Sstevel@tonic-gate else
27290Sstevel@tonic-gate clnt_pending = e->call_next;
27300Sstevel@tonic-gate if (e->call_next)
27310Sstevel@tonic-gate e->call_next->call_prev = e->call_prev;
27320Sstevel@tonic-gate mutex_exit(&clnt_pending_lock);
27330Sstevel@tonic-gate
27340Sstevel@tonic-gate if (e->call_status != RPC_SUCCESS || error != 0) {
27350Sstevel@tonic-gate if (interrupted)
27360Sstevel@tonic-gate e->call_status = RPC_INTR;
27370Sstevel@tonic-gate else if (error == ETIME)
27380Sstevel@tonic-gate e->call_status = RPC_TIMEDOUT;
27399804SGerald.Thornbrugh@Sun.COM else if (error == EPROTO) {
27400Sstevel@tonic-gate e->call_status = RPC_SYSTEMERROR;
27419804SGerald.Thornbrugh@Sun.COM e->call_reason = EPROTO;
27429804SGerald.Thornbrugh@Sun.COM }
27430Sstevel@tonic-gate
27440Sstevel@tonic-gate RPCLOG(8, "connmgr_connect: can't connect, status: "
27450Sstevel@tonic-gate "%s\n", clnt_sperrno(e->call_status));
27460Sstevel@tonic-gate
27470Sstevel@tonic-gate if (e->call_reply) {
27480Sstevel@tonic-gate freemsg(e->call_reply);
27490Sstevel@tonic-gate e->call_reply = NULL;
27500Sstevel@tonic-gate }
27510Sstevel@tonic-gate
27520Sstevel@tonic-gate return (FALSE);
27530Sstevel@tonic-gate }
27540Sstevel@tonic-gate /*
27550Sstevel@tonic-gate * The result of the "connection accept" is a T_info_ack
27560Sstevel@tonic-gate * in the call_reply field.
27570Sstevel@tonic-gate */
27580Sstevel@tonic-gate ASSERT(e->call_reply != NULL);
27590Sstevel@tonic-gate mp = e->call_reply;
27600Sstevel@tonic-gate e->call_reply = NULL;
27610Sstevel@tonic-gate tinfo = (struct T_info_ack *)mp->b_rptr;
27620Sstevel@tonic-gate
27630Sstevel@tonic-gate tidu_size = tinfo->TIDU_size;
27640Sstevel@tonic-gate tidu_size -= (tidu_size % BYTES_PER_XDR_UNIT);
27650Sstevel@tonic-gate if (tidu_size > COTS_DEFAULT_ALLOCSIZE || (tidu_size <= 0))
27660Sstevel@tonic-gate tidu_size = COTS_DEFAULT_ALLOCSIZE;
27670Sstevel@tonic-gate *tidu_ptr = tidu_size;
27680Sstevel@tonic-gate
27690Sstevel@tonic-gate freemsg(mp);
27700Sstevel@tonic-gate
27710Sstevel@tonic-gate /*
27720Sstevel@tonic-gate * Set up the pertinent options. NODELAY is so the transport doesn't
27730Sstevel@tonic-gate * buffer up RPC messages on either end. This may not be valid for
27740Sstevel@tonic-gate * all transports. Failure to set this option is not cause to
27750Sstevel@tonic-gate * bail out so we return success anyway. Note that lack of NODELAY
27760Sstevel@tonic-gate * or some other way to flush the message on both ends will cause
27770Sstevel@tonic-gate * lots of retries and terrible performance.
27780Sstevel@tonic-gate */
27790Sstevel@tonic-gate if (addrfmly == AF_INET || addrfmly == AF_INET6) {
27808778SErik.Nordmark@Sun.COM (void) connmgr_setopt(wq, IPPROTO_TCP, TCP_NODELAY, e, cr);
27810Sstevel@tonic-gate if (e->call_status == RPC_XPRTFAILED)
27820Sstevel@tonic-gate return (FALSE);
27830Sstevel@tonic-gate }
27840Sstevel@tonic-gate
27850Sstevel@tonic-gate /*
27860Sstevel@tonic-gate * Since we have a connection, we now need to figure out if
27870Sstevel@tonic-gate * we need to create a kstat. If x_ksp is not NULL then we
27880Sstevel@tonic-gate * are reusing a connection and so we do not need to create
27890Sstevel@tonic-gate * another kstat -- lets just return.
27900Sstevel@tonic-gate */
27910Sstevel@tonic-gate if (cm_entry->x_ksp != NULL)
27920Sstevel@tonic-gate return (TRUE);
27930Sstevel@tonic-gate
27940Sstevel@tonic-gate /*
27950Sstevel@tonic-gate * We need to increment rpc_kstat_instance atomically to prevent
27960Sstevel@tonic-gate * two kstats being created with the same instance.
27970Sstevel@tonic-gate */
27980Sstevel@tonic-gate kstat_instance = atomic_add_32_nv((uint32_t *)&rpc_kstat_instance, 1);
27990Sstevel@tonic-gate
28000Sstevel@tonic-gate if ((cm_entry->x_ksp = kstat_create_zone("unix", kstat_instance,
28010Sstevel@tonic-gate "rpc_cots_connections", "rpc", KSTAT_TYPE_NAMED,
28020Sstevel@tonic-gate (uint_t)(sizeof (cm_kstat_xprt_t) / sizeof (kstat_named_t)),
28030Sstevel@tonic-gate KSTAT_FLAG_VIRTUAL, cm_entry->x_zoneid)) == NULL) {
28040Sstevel@tonic-gate return (TRUE);
28056403Sgt29601 }
28060Sstevel@tonic-gate
28070Sstevel@tonic-gate cm_entry->x_ksp->ks_lock = &connmgr_lock;
28080Sstevel@tonic-gate cm_entry->x_ksp->ks_private = cm_entry;
28090Sstevel@tonic-gate cm_entry->x_ksp->ks_data_size = ((INET6_ADDRSTRLEN * sizeof (char))
28106403Sgt29601 + sizeof (cm_kstat_template));
28110Sstevel@tonic-gate cm_entry->x_ksp->ks_data = kmem_alloc(cm_entry->x_ksp->ks_data_size,
28126403Sgt29601 KM_SLEEP);
28130Sstevel@tonic-gate bcopy(&cm_kstat_template, cm_entry->x_ksp->ks_data,
28140Sstevel@tonic-gate cm_entry->x_ksp->ks_data_size);
28150Sstevel@tonic-gate ((struct cm_kstat_xprt *)(cm_entry->x_ksp->ks_data))->
28166403Sgt29601 x_server.value.str.addr.ptr =
28176403Sgt29601 kmem_alloc(INET6_ADDRSTRLEN, KM_SLEEP);
28180Sstevel@tonic-gate
28190Sstevel@tonic-gate cm_entry->x_ksp->ks_update = conn_kstat_update;
28200Sstevel@tonic-gate kstat_install(cm_entry->x_ksp);
28210Sstevel@tonic-gate return (TRUE);
28220Sstevel@tonic-gate }
28230Sstevel@tonic-gate
28240Sstevel@tonic-gate /*
282510004Sdai.ngo@sun.com * Verify that the specified offset falls within the mblk and
282610004Sdai.ngo@sun.com * that the resulting pointer is aligned.
282710004Sdai.ngo@sun.com * Returns NULL if not.
282810004Sdai.ngo@sun.com *
282910004Sdai.ngo@sun.com * code from fs/sockfs/socksubr.c
283010004Sdai.ngo@sun.com */
283110004Sdai.ngo@sun.com static void *
connmgr_opt_getoff(mblk_t * mp,t_uscalar_t offset,t_uscalar_t length,uint_t align_size)283210004Sdai.ngo@sun.com connmgr_opt_getoff(mblk_t *mp, t_uscalar_t offset,
283310004Sdai.ngo@sun.com t_uscalar_t length, uint_t align_size)
283410004Sdai.ngo@sun.com {
283510004Sdai.ngo@sun.com uintptr_t ptr1, ptr2;
283610004Sdai.ngo@sun.com
283710004Sdai.ngo@sun.com ASSERT(mp && mp->b_wptr >= mp->b_rptr);
283810004Sdai.ngo@sun.com ptr1 = (uintptr_t)mp->b_rptr + offset;
283910004Sdai.ngo@sun.com ptr2 = (uintptr_t)ptr1 + length;
284010004Sdai.ngo@sun.com if (ptr1 < (uintptr_t)mp->b_rptr || ptr2 > (uintptr_t)mp->b_wptr) {
284110004Sdai.ngo@sun.com return (NULL);
284210004Sdai.ngo@sun.com }
284310004Sdai.ngo@sun.com if ((ptr1 & (align_size - 1)) != 0) {
284410004Sdai.ngo@sun.com return (NULL);
284510004Sdai.ngo@sun.com }
284610004Sdai.ngo@sun.com return ((void *)ptr1);
284710004Sdai.ngo@sun.com }
284810004Sdai.ngo@sun.com
284910004Sdai.ngo@sun.com static bool_t
connmgr_getopt_int(queue_t * wq,int level,int name,int * val,calllist_t * e,cred_t * cr)285010004Sdai.ngo@sun.com connmgr_getopt_int(queue_t *wq, int level, int name, int *val,
285110004Sdai.ngo@sun.com calllist_t *e, cred_t *cr)
285210004Sdai.ngo@sun.com {
285310004Sdai.ngo@sun.com mblk_t *mp;
285410004Sdai.ngo@sun.com struct opthdr *opt, *opt_res;
285510004Sdai.ngo@sun.com struct T_optmgmt_req *tor;
285610004Sdai.ngo@sun.com struct T_optmgmt_ack *opt_ack;
285710004Sdai.ngo@sun.com struct timeval waitp;
285810004Sdai.ngo@sun.com int error;
285910004Sdai.ngo@sun.com
286010004Sdai.ngo@sun.com mp = allocb_cred(sizeof (struct T_optmgmt_req) +
286110004Sdai.ngo@sun.com sizeof (struct opthdr) + sizeof (int), cr, NOPID);
286210004Sdai.ngo@sun.com if (mp == NULL)
286310004Sdai.ngo@sun.com return (FALSE);
286410004Sdai.ngo@sun.com
286510004Sdai.ngo@sun.com mp->b_datap->db_type = M_PROTO;
286610004Sdai.ngo@sun.com tor = (struct T_optmgmt_req *)(mp->b_rptr);
286710004Sdai.ngo@sun.com tor->PRIM_type = T_SVR4_OPTMGMT_REQ;
286810004Sdai.ngo@sun.com tor->MGMT_flags = T_CURRENT;
286910004Sdai.ngo@sun.com tor->OPT_length = sizeof (struct opthdr) + sizeof (int);
287010004Sdai.ngo@sun.com tor->OPT_offset = sizeof (struct T_optmgmt_req);
287110004Sdai.ngo@sun.com
287210004Sdai.ngo@sun.com opt = (struct opthdr *)(mp->b_rptr + sizeof (struct T_optmgmt_req));
287310004Sdai.ngo@sun.com opt->level = level;
287410004Sdai.ngo@sun.com opt->name = name;
287510004Sdai.ngo@sun.com opt->len = sizeof (int);
287610004Sdai.ngo@sun.com mp->b_wptr += sizeof (struct T_optmgmt_req) + sizeof (struct opthdr) +
287710004Sdai.ngo@sun.com sizeof (int);
287810004Sdai.ngo@sun.com
287910004Sdai.ngo@sun.com /*
288010004Sdai.ngo@sun.com * We will use this connection regardless
288110004Sdai.ngo@sun.com * of whether or not the option is readable.
288210004Sdai.ngo@sun.com */
288310004Sdai.ngo@sun.com if (clnt_dispatch_send(wq, mp, e, 0, 0) != RPC_SUCCESS) {
288410004Sdai.ngo@sun.com DTRACE_PROBE(krpc__e__connmgr__getopt__cantsend);
288510004Sdai.ngo@sun.com freemsg(mp);
288610004Sdai.ngo@sun.com return (FALSE);
288710004Sdai.ngo@sun.com }
288810004Sdai.ngo@sun.com
288910004Sdai.ngo@sun.com mutex_enter(&clnt_pending_lock);
289010004Sdai.ngo@sun.com
289110004Sdai.ngo@sun.com waitp.tv_sec = clnt_cots_min_conntout;
289210004Sdai.ngo@sun.com waitp.tv_usec = 0;
289310004Sdai.ngo@sun.com error = waitforack(e, T_OPTMGMT_ACK, &waitp, 1);
289410004Sdai.ngo@sun.com
289510004Sdai.ngo@sun.com if (e->call_prev)
289610004Sdai.ngo@sun.com e->call_prev->call_next = e->call_next;
289710004Sdai.ngo@sun.com else
289810004Sdai.ngo@sun.com clnt_pending = e->call_next;
289910004Sdai.ngo@sun.com if (e->call_next)
290010004Sdai.ngo@sun.com e->call_next->call_prev = e->call_prev;
290110004Sdai.ngo@sun.com mutex_exit(&clnt_pending_lock);
290210004Sdai.ngo@sun.com
290310004Sdai.ngo@sun.com /* get reply message */
290410004Sdai.ngo@sun.com mp = e->call_reply;
290510004Sdai.ngo@sun.com e->call_reply = NULL;
290610004Sdai.ngo@sun.com
290710004Sdai.ngo@sun.com if ((!mp) || (e->call_status != RPC_SUCCESS) || (error != 0)) {
290810004Sdai.ngo@sun.com
290910004Sdai.ngo@sun.com DTRACE_PROBE4(krpc__e__connmgr_getopt, int, name,
291010004Sdai.ngo@sun.com int, e->call_status, int, error, mblk_t *, mp);
291110004Sdai.ngo@sun.com
291210004Sdai.ngo@sun.com if (mp)
291310004Sdai.ngo@sun.com freemsg(mp);
291410004Sdai.ngo@sun.com return (FALSE);
291510004Sdai.ngo@sun.com }
291610004Sdai.ngo@sun.com
291710004Sdai.ngo@sun.com opt_ack = (struct T_optmgmt_ack *)mp->b_rptr;
291810004Sdai.ngo@sun.com opt_res = (struct opthdr *)connmgr_opt_getoff(mp, opt_ack->OPT_offset,
291910004Sdai.ngo@sun.com opt_ack->OPT_length, __TPI_ALIGN_SIZE);
292010004Sdai.ngo@sun.com
292110004Sdai.ngo@sun.com if (!opt_res) {
292210004Sdai.ngo@sun.com DTRACE_PROBE4(krpc__e__connmgr_optres, mblk_t *, mp, int, name,
292310004Sdai.ngo@sun.com int, opt_ack->OPT_offset, int, opt_ack->OPT_length);
292410004Sdai.ngo@sun.com freemsg(mp);
292510004Sdai.ngo@sun.com return (FALSE);
292610004Sdai.ngo@sun.com }
292710004Sdai.ngo@sun.com *val = *(int *)&opt_res[1];
292810004Sdai.ngo@sun.com
292910004Sdai.ngo@sun.com DTRACE_PROBE2(connmgr_getopt__ok, int, name, int, *val);
293010004Sdai.ngo@sun.com
293110004Sdai.ngo@sun.com freemsg(mp);
293210004Sdai.ngo@sun.com return (TRUE);
293310004Sdai.ngo@sun.com }
293410004Sdai.ngo@sun.com
293510004Sdai.ngo@sun.com /*
29360Sstevel@tonic-gate * Called by connmgr_connect to set an option on the new stream.
29370Sstevel@tonic-gate */
29380Sstevel@tonic-gate static bool_t
connmgr_setopt_int(queue_t * wq,int level,int name,int val,calllist_t * e,cred_t * cr)293910004Sdai.ngo@sun.com connmgr_setopt_int(queue_t *wq, int level, int name, int val,
294010004Sdai.ngo@sun.com calllist_t *e, cred_t *cr)
29410Sstevel@tonic-gate {
29420Sstevel@tonic-gate mblk_t *mp;
29430Sstevel@tonic-gate struct opthdr *opt;
29440Sstevel@tonic-gate struct T_optmgmt_req *tor;
29450Sstevel@tonic-gate struct timeval waitp;
29460Sstevel@tonic-gate int error;
29470Sstevel@tonic-gate
29488778SErik.Nordmark@Sun.COM mp = allocb_cred(sizeof (struct T_optmgmt_req) +
29498778SErik.Nordmark@Sun.COM sizeof (struct opthdr) + sizeof (int), cr, NOPID);
29500Sstevel@tonic-gate if (mp == NULL) {
29510Sstevel@tonic-gate RPCLOG0(1, "connmgr_setopt: cannot alloc mp for option "
29520Sstevel@tonic-gate "request\n");
29530Sstevel@tonic-gate return (FALSE);
29540Sstevel@tonic-gate }
29550Sstevel@tonic-gate
29560Sstevel@tonic-gate mp->b_datap->db_type = M_PROTO;
29570Sstevel@tonic-gate tor = (struct T_optmgmt_req *)(mp->b_rptr);
29580Sstevel@tonic-gate tor->PRIM_type = T_SVR4_OPTMGMT_REQ;
29590Sstevel@tonic-gate tor->MGMT_flags = T_NEGOTIATE;
29600Sstevel@tonic-gate tor->OPT_length = sizeof (struct opthdr) + sizeof (int);
29610Sstevel@tonic-gate tor->OPT_offset = sizeof (struct T_optmgmt_req);
29620Sstevel@tonic-gate
29630Sstevel@tonic-gate opt = (struct opthdr *)(mp->b_rptr + sizeof (struct T_optmgmt_req));
29640Sstevel@tonic-gate opt->level = level;
29650Sstevel@tonic-gate opt->name = name;
29660Sstevel@tonic-gate opt->len = sizeof (int);
296710004Sdai.ngo@sun.com *(int *)((char *)opt + sizeof (*opt)) = val;
29680Sstevel@tonic-gate mp->b_wptr += sizeof (struct T_optmgmt_req) + sizeof (struct opthdr) +
29690Sstevel@tonic-gate sizeof (int);
29700Sstevel@tonic-gate
29710Sstevel@tonic-gate /*
29720Sstevel@tonic-gate * We will use this connection regardless
29730Sstevel@tonic-gate * of whether or not the option is settable.
29740Sstevel@tonic-gate */
29758205SSiddheshwar.Mahesh@Sun.COM if (clnt_dispatch_send(wq, mp, e, 0, 0) != RPC_SUCCESS) {
29768205SSiddheshwar.Mahesh@Sun.COM DTRACE_PROBE(krpc__e__connmgr__setopt__cantsend);
29778205SSiddheshwar.Mahesh@Sun.COM freemsg(mp);
29788205SSiddheshwar.Mahesh@Sun.COM return (FALSE);
29798205SSiddheshwar.Mahesh@Sun.COM }
29808205SSiddheshwar.Mahesh@Sun.COM
29810Sstevel@tonic-gate mutex_enter(&clnt_pending_lock);
29820Sstevel@tonic-gate
29830Sstevel@tonic-gate waitp.tv_sec = clnt_cots_min_conntout;
29840Sstevel@tonic-gate waitp.tv_usec = 0;
29850Sstevel@tonic-gate error = waitforack(e, T_OPTMGMT_ACK, &waitp, 1);
29860Sstevel@tonic-gate
29870Sstevel@tonic-gate if (e->call_prev)
29880Sstevel@tonic-gate e->call_prev->call_next = e->call_next;
29890Sstevel@tonic-gate else
29900Sstevel@tonic-gate clnt_pending = e->call_next;
29910Sstevel@tonic-gate if (e->call_next)
29920Sstevel@tonic-gate e->call_next->call_prev = e->call_prev;
29930Sstevel@tonic-gate mutex_exit(&clnt_pending_lock);
29940Sstevel@tonic-gate
29950Sstevel@tonic-gate if (e->call_reply != NULL) {
29960Sstevel@tonic-gate freemsg(e->call_reply);
29970Sstevel@tonic-gate e->call_reply = NULL;
29980Sstevel@tonic-gate }
29990Sstevel@tonic-gate
30000Sstevel@tonic-gate if (e->call_status != RPC_SUCCESS || error != 0) {
30010Sstevel@tonic-gate RPCLOG(1, "connmgr_setopt: can't set option: %d\n", name);
30020Sstevel@tonic-gate return (FALSE);
30030Sstevel@tonic-gate }
30040Sstevel@tonic-gate RPCLOG(8, "connmgr_setopt: successfully set option: %d\n", name);
30050Sstevel@tonic-gate return (TRUE);
30060Sstevel@tonic-gate }
30070Sstevel@tonic-gate
300810004Sdai.ngo@sun.com static bool_t
connmgr_setopt(queue_t * wq,int level,int name,calllist_t * e,cred_t * cr)300910004Sdai.ngo@sun.com connmgr_setopt(queue_t *wq, int level, int name, calllist_t *e, cred_t *cr)
301010004Sdai.ngo@sun.com {
301110004Sdai.ngo@sun.com return (connmgr_setopt_int(wq, level, name, 1, e, cr));
301210004Sdai.ngo@sun.com }
301310004Sdai.ngo@sun.com
30140Sstevel@tonic-gate #ifdef DEBUG
30150Sstevel@tonic-gate
30160Sstevel@tonic-gate /*
30170Sstevel@tonic-gate * This is a knob to let us force code coverage in allocation failure
30180Sstevel@tonic-gate * case.
30190Sstevel@tonic-gate */
30200Sstevel@tonic-gate static int connmgr_failsnd;
30210Sstevel@tonic-gate #define CONN_SND_ALLOC(Size, Pri) \
30220Sstevel@tonic-gate ((connmgr_failsnd-- > 0) ? NULL : allocb(Size, Pri))
30230Sstevel@tonic-gate
30240Sstevel@tonic-gate #else
30250Sstevel@tonic-gate
30260Sstevel@tonic-gate #define CONN_SND_ALLOC(Size, Pri) allocb(Size, Pri)
30270Sstevel@tonic-gate
30280Sstevel@tonic-gate #endif
30290Sstevel@tonic-gate
30300Sstevel@tonic-gate /*
30310Sstevel@tonic-gate * Sends an orderly release on the specified queue.
30320Sstevel@tonic-gate * Entered with connmgr_lock. Exited without connmgr_lock
30330Sstevel@tonic-gate */
30340Sstevel@tonic-gate static void
connmgr_sndrel(struct cm_xprt * cm_entry)30350Sstevel@tonic-gate connmgr_sndrel(struct cm_xprt *cm_entry)
30360Sstevel@tonic-gate {
30370Sstevel@tonic-gate struct T_ordrel_req *torr;
30380Sstevel@tonic-gate mblk_t *mp;
30390Sstevel@tonic-gate queue_t *q = cm_entry->x_wq;
30400Sstevel@tonic-gate ASSERT(MUTEX_HELD(&connmgr_lock));
30410Sstevel@tonic-gate mp = CONN_SND_ALLOC(sizeof (struct T_ordrel_req), BPRI_LO);
30420Sstevel@tonic-gate if (mp == NULL) {
30430Sstevel@tonic-gate cm_entry->x_needrel = TRUE;
30440Sstevel@tonic-gate mutex_exit(&connmgr_lock);
30450Sstevel@tonic-gate RPCLOG(1, "connmgr_sndrel: cannot alloc mp for sending ordrel "
30466403Sgt29601 "to queue %p\n", (void *)q);
30470Sstevel@tonic-gate return;
30480Sstevel@tonic-gate }
30490Sstevel@tonic-gate mutex_exit(&connmgr_lock);
30500Sstevel@tonic-gate
30510Sstevel@tonic-gate mp->b_datap->db_type = M_PROTO;
30520Sstevel@tonic-gate torr = (struct T_ordrel_req *)(mp->b_rptr);
30530Sstevel@tonic-gate torr->PRIM_type = T_ORDREL_REQ;
30540Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + sizeof (struct T_ordrel_req);
30550Sstevel@tonic-gate
30560Sstevel@tonic-gate RPCLOG(8, "connmgr_sndrel: sending ordrel to queue %p\n", (void *)q);
30570Sstevel@tonic-gate put(q, mp);
30580Sstevel@tonic-gate }
30590Sstevel@tonic-gate
30600Sstevel@tonic-gate /*
30610Sstevel@tonic-gate * Sends an disconnect on the specified queue.
30620Sstevel@tonic-gate * Entered with connmgr_lock. Exited without connmgr_lock
30630Sstevel@tonic-gate */
30640Sstevel@tonic-gate static void
connmgr_snddis(struct cm_xprt * cm_entry)30650Sstevel@tonic-gate connmgr_snddis(struct cm_xprt *cm_entry)
30660Sstevel@tonic-gate {
30670Sstevel@tonic-gate struct T_discon_req *tdis;
30680Sstevel@tonic-gate mblk_t *mp;
30690Sstevel@tonic-gate queue_t *q = cm_entry->x_wq;
30700Sstevel@tonic-gate
30710Sstevel@tonic-gate ASSERT(MUTEX_HELD(&connmgr_lock));
30720Sstevel@tonic-gate mp = CONN_SND_ALLOC(sizeof (*tdis), BPRI_LO);
30730Sstevel@tonic-gate if (mp == NULL) {
30740Sstevel@tonic-gate cm_entry->x_needdis = TRUE;
30750Sstevel@tonic-gate mutex_exit(&connmgr_lock);
30760Sstevel@tonic-gate RPCLOG(1, "connmgr_snddis: cannot alloc mp for sending discon "
30770Sstevel@tonic-gate "to queue %p\n", (void *)q);
30780Sstevel@tonic-gate return;
30790Sstevel@tonic-gate }
30800Sstevel@tonic-gate mutex_exit(&connmgr_lock);
30810Sstevel@tonic-gate
30820Sstevel@tonic-gate mp->b_datap->db_type = M_PROTO;
30830Sstevel@tonic-gate tdis = (struct T_discon_req *)mp->b_rptr;
30840Sstevel@tonic-gate tdis->PRIM_type = T_DISCON_REQ;
30850Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + sizeof (*tdis);
30860Sstevel@tonic-gate
30870Sstevel@tonic-gate RPCLOG(8, "connmgr_snddis: sending discon to queue %p\n", (void *)q);
30880Sstevel@tonic-gate put(q, mp);
30890Sstevel@tonic-gate }
30900Sstevel@tonic-gate
30910Sstevel@tonic-gate /*
30920Sstevel@tonic-gate * Sets up the entry for receiving replies, and calls rpcmod's write put proc
30930Sstevel@tonic-gate * (through put) to send the call.
30940Sstevel@tonic-gate */
30958205SSiddheshwar.Mahesh@Sun.COM static int
clnt_dispatch_send(queue_t * q,mblk_t * mp,calllist_t * e,uint_t xid,uint_t queue_flag)30960Sstevel@tonic-gate clnt_dispatch_send(queue_t *q, mblk_t *mp, calllist_t *e, uint_t xid,
30970Sstevel@tonic-gate uint_t queue_flag)
30980Sstevel@tonic-gate {
30990Sstevel@tonic-gate ASSERT(e != NULL);
31000Sstevel@tonic-gate
31010Sstevel@tonic-gate e->call_status = RPC_TIMEDOUT; /* optimistic, eh? */
31020Sstevel@tonic-gate e->call_reason = 0;
31030Sstevel@tonic-gate e->call_wq = q;
31040Sstevel@tonic-gate e->call_xid = xid;
31050Sstevel@tonic-gate e->call_notified = FALSE;
31060Sstevel@tonic-gate
31078205SSiddheshwar.Mahesh@Sun.COM if (!canput(q)) {
31088205SSiddheshwar.Mahesh@Sun.COM e->call_status = RPC_CANTSEND;
31099675Sdai.ngo@sun.com e->call_reason = ENOBUFS;
31108205SSiddheshwar.Mahesh@Sun.COM return (RPC_CANTSEND);
31118205SSiddheshwar.Mahesh@Sun.COM }
31128205SSiddheshwar.Mahesh@Sun.COM
31130Sstevel@tonic-gate /*
31140Sstevel@tonic-gate * If queue_flag is set then the calllist_t is already on the hash
31150Sstevel@tonic-gate * queue. In this case just send the message and return.
31160Sstevel@tonic-gate */
31170Sstevel@tonic-gate if (queue_flag) {
31180Sstevel@tonic-gate put(q, mp);
31198205SSiddheshwar.Mahesh@Sun.COM return (RPC_SUCCESS);
31208205SSiddheshwar.Mahesh@Sun.COM
31210Sstevel@tonic-gate }
31220Sstevel@tonic-gate
31230Sstevel@tonic-gate /*
31240Sstevel@tonic-gate * Set up calls for RPC requests (with XID != 0) on the hash
31250Sstevel@tonic-gate * queue for fast lookups and place other calls (i.e.
31260Sstevel@tonic-gate * connection management) on the linked list.
31270Sstevel@tonic-gate */
31280Sstevel@tonic-gate if (xid != 0) {
31290Sstevel@tonic-gate RPCLOG(64, "clnt_dispatch_send: putting xid 0x%x on "
31306403Sgt29601 "dispatch list\n", xid);
31310Sstevel@tonic-gate e->call_hash = call_hash(xid, clnt_cots_hash_size);
31320Sstevel@tonic-gate e->call_bucket = &cots_call_ht[e->call_hash];
31330Sstevel@tonic-gate call_table_enter(e);
31340Sstevel@tonic-gate } else {
31350Sstevel@tonic-gate mutex_enter(&clnt_pending_lock);
31360Sstevel@tonic-gate if (clnt_pending)
31370Sstevel@tonic-gate clnt_pending->call_prev = e;
31380Sstevel@tonic-gate e->call_next = clnt_pending;
31390Sstevel@tonic-gate e->call_prev = NULL;
31400Sstevel@tonic-gate clnt_pending = e;
31410Sstevel@tonic-gate mutex_exit(&clnt_pending_lock);
31420Sstevel@tonic-gate }
31430Sstevel@tonic-gate
31440Sstevel@tonic-gate put(q, mp);
31458205SSiddheshwar.Mahesh@Sun.COM return (RPC_SUCCESS);
31460Sstevel@tonic-gate }
31470Sstevel@tonic-gate
31480Sstevel@tonic-gate /*
31490Sstevel@tonic-gate * Called by rpcmod to notify a client with a clnt_pending call that its reply
31500Sstevel@tonic-gate * has arrived. If we can't find a client waiting for this reply, we log
31510Sstevel@tonic-gate * the error and return.
31520Sstevel@tonic-gate */
31530Sstevel@tonic-gate bool_t
clnt_dispatch_notify(mblk_t * mp,zoneid_t zoneid)31540Sstevel@tonic-gate clnt_dispatch_notify(mblk_t *mp, zoneid_t zoneid)
31550Sstevel@tonic-gate {
31560Sstevel@tonic-gate calllist_t *e = NULL;
31570Sstevel@tonic-gate call_table_t *chtp;
31580Sstevel@tonic-gate uint32_t xid;
31590Sstevel@tonic-gate uint_t hash;
31600Sstevel@tonic-gate
31610Sstevel@tonic-gate if ((IS_P2ALIGNED(mp->b_rptr, sizeof (uint32_t))) &&
31620Sstevel@tonic-gate (mp->b_wptr - mp->b_rptr) >= sizeof (xid))
31630Sstevel@tonic-gate xid = *((uint32_t *)mp->b_rptr);
31640Sstevel@tonic-gate else {
31650Sstevel@tonic-gate int i = 0;
31660Sstevel@tonic-gate unsigned char *p = (unsigned char *)&xid;
31670Sstevel@tonic-gate unsigned char *rptr;
31680Sstevel@tonic-gate mblk_t *tmp = mp;
31690Sstevel@tonic-gate
31700Sstevel@tonic-gate /*
31710Sstevel@tonic-gate * Copy the xid, byte-by-byte into xid.
31720Sstevel@tonic-gate */
31730Sstevel@tonic-gate while (tmp) {
31740Sstevel@tonic-gate rptr = tmp->b_rptr;
31750Sstevel@tonic-gate while (rptr < tmp->b_wptr) {
31760Sstevel@tonic-gate *p++ = *rptr++;
31770Sstevel@tonic-gate if (++i >= sizeof (xid))
31780Sstevel@tonic-gate goto done_xid_copy;
31790Sstevel@tonic-gate }
31800Sstevel@tonic-gate tmp = tmp->b_cont;
31810Sstevel@tonic-gate }
31820Sstevel@tonic-gate
31830Sstevel@tonic-gate /*
31840Sstevel@tonic-gate * If we got here, we ran out of mblk space before the
31850Sstevel@tonic-gate * xid could be copied.
31860Sstevel@tonic-gate */
31870Sstevel@tonic-gate ASSERT(tmp == NULL && i < sizeof (xid));
31880Sstevel@tonic-gate
31890Sstevel@tonic-gate RPCLOG0(1,
31900Sstevel@tonic-gate "clnt_dispatch_notify: message less than size of xid\n");
31910Sstevel@tonic-gate return (FALSE);
31920Sstevel@tonic-gate
31930Sstevel@tonic-gate }
31940Sstevel@tonic-gate done_xid_copy:
31950Sstevel@tonic-gate
31960Sstevel@tonic-gate hash = call_hash(xid, clnt_cots_hash_size);
31970Sstevel@tonic-gate chtp = &cots_call_ht[hash];
31980Sstevel@tonic-gate /* call_table_find returns with the hash bucket locked */
31990Sstevel@tonic-gate call_table_find(chtp, xid, e);
32000Sstevel@tonic-gate
32010Sstevel@tonic-gate if (e != NULL) {
32020Sstevel@tonic-gate /*
32030Sstevel@tonic-gate * Found thread waiting for this reply
32040Sstevel@tonic-gate */
32050Sstevel@tonic-gate mutex_enter(&e->call_lock);
32066403Sgt29601
32076403Sgt29601 /*
32086403Sgt29601 * verify that the reply is coming in on
32096403Sgt29601 * the same zone that it was sent from.
32106403Sgt29601 */
32116403Sgt29601 if (e->call_zoneid != zoneid) {
32126403Sgt29601 mutex_exit(&e->call_lock);
32136403Sgt29601 mutex_exit(&chtp->ct_lock);
32149804SGerald.Thornbrugh@Sun.COM RPCLOG0(1, "clnt_dispatch_notify: incorrect zoneid\n");
32156403Sgt29601 return (FALSE);
32166403Sgt29601 }
32176403Sgt29601
32180Sstevel@tonic-gate if (e->call_reply)
32190Sstevel@tonic-gate /*
32200Sstevel@tonic-gate * This can happen under the following scenario:
32210Sstevel@tonic-gate * clnt_cots_kcallit() times out on the response,
32220Sstevel@tonic-gate * rfscall() repeats the CLNT_CALL() with
32230Sstevel@tonic-gate * the same xid, clnt_cots_kcallit() sends the retry,
32240Sstevel@tonic-gate * thereby putting the clnt handle on the pending list,
32250Sstevel@tonic-gate * the first response arrives, signalling the thread
32260Sstevel@tonic-gate * in clnt_cots_kcallit(). Before that thread is
32270Sstevel@tonic-gate * dispatched, the second response arrives as well,
32280Sstevel@tonic-gate * and clnt_dispatch_notify still finds the handle on
32290Sstevel@tonic-gate * the pending list, with call_reply set. So free the
32300Sstevel@tonic-gate * old reply now.
32310Sstevel@tonic-gate *
32320Sstevel@tonic-gate * It is also possible for a response intended for
32330Sstevel@tonic-gate * an RPC call with a different xid to reside here.
32340Sstevel@tonic-gate * This can happen if the thread that owned this
32350Sstevel@tonic-gate * client handle prior to the current owner bailed
32360Sstevel@tonic-gate * out and left its call record on the dispatch
32370Sstevel@tonic-gate * queue. A window exists where the response can
32380Sstevel@tonic-gate * arrive before the current owner dispatches its
32390Sstevel@tonic-gate * RPC call.
32400Sstevel@tonic-gate *
32410Sstevel@tonic-gate * In any case, this is the very last point where we
32420Sstevel@tonic-gate * can safely check the call_reply field before
32430Sstevel@tonic-gate * placing the new response there.
32440Sstevel@tonic-gate */
32450Sstevel@tonic-gate freemsg(e->call_reply);
32460Sstevel@tonic-gate e->call_reply = mp;
32470Sstevel@tonic-gate e->call_status = RPC_SUCCESS;
32480Sstevel@tonic-gate e->call_notified = TRUE;
32490Sstevel@tonic-gate cv_signal(&e->call_cv);
32500Sstevel@tonic-gate mutex_exit(&e->call_lock);
32510Sstevel@tonic-gate mutex_exit(&chtp->ct_lock);
32520Sstevel@tonic-gate return (TRUE);
32530Sstevel@tonic-gate } else {
32540Sstevel@tonic-gate zone_t *zone;
32550Sstevel@tonic-gate struct rpcstat *rpcstat;
32560Sstevel@tonic-gate
32570Sstevel@tonic-gate mutex_exit(&chtp->ct_lock);
32580Sstevel@tonic-gate RPCLOG(65, "clnt_dispatch_notify: no caller for reply 0x%x\n",
32590Sstevel@tonic-gate xid);
32600Sstevel@tonic-gate /*
32610Sstevel@tonic-gate * This is unfortunate, but we need to lookup the zone so we
32620Sstevel@tonic-gate * can increment its "rcbadxids" counter.
32630Sstevel@tonic-gate */
32640Sstevel@tonic-gate zone = zone_find_by_id(zoneid);
32650Sstevel@tonic-gate if (zone == NULL) {
32660Sstevel@tonic-gate /*
32670Sstevel@tonic-gate * The zone went away...
32680Sstevel@tonic-gate */
32690Sstevel@tonic-gate return (FALSE);
32700Sstevel@tonic-gate }
32710Sstevel@tonic-gate rpcstat = zone_getspecific(rpcstat_zone_key, zone);
32720Sstevel@tonic-gate if (zone_status_get(zone) >= ZONE_IS_SHUTTING_DOWN) {
32730Sstevel@tonic-gate /*
32740Sstevel@tonic-gate * Not interested
32750Sstevel@tonic-gate */
32760Sstevel@tonic-gate zone_rele(zone);
32770Sstevel@tonic-gate return (FALSE);
32780Sstevel@tonic-gate }
32790Sstevel@tonic-gate COTSRCSTAT_INCR(rpcstat->rpc_cots_client, rcbadxids);
32800Sstevel@tonic-gate zone_rele(zone);
32810Sstevel@tonic-gate }
32820Sstevel@tonic-gate return (FALSE);
32830Sstevel@tonic-gate }
32840Sstevel@tonic-gate
32850Sstevel@tonic-gate /*
32860Sstevel@tonic-gate * Called by rpcmod when a non-data indication arrives. The ones in which we
32870Sstevel@tonic-gate * are interested are connection indications and options acks. We dispatch
32880Sstevel@tonic-gate * based on the queue the indication came in on. If we are not interested in
32890Sstevel@tonic-gate * what came in, we return false to rpcmod, who will then pass it upstream.
32900Sstevel@tonic-gate */
32910Sstevel@tonic-gate bool_t
clnt_dispatch_notifyconn(queue_t * q,mblk_t * mp)32920Sstevel@tonic-gate clnt_dispatch_notifyconn(queue_t *q, mblk_t *mp)
32930Sstevel@tonic-gate {
32940Sstevel@tonic-gate calllist_t *e;
32950Sstevel@tonic-gate int type;
32960Sstevel@tonic-gate
32970Sstevel@tonic-gate ASSERT((q->q_flag & QREADR) == 0);
32980Sstevel@tonic-gate
32990Sstevel@tonic-gate type = ((union T_primitives *)mp->b_rptr)->type;
33000Sstevel@tonic-gate RPCLOG(8, "clnt_dispatch_notifyconn: prim type: [%s]\n",
33010Sstevel@tonic-gate rpc_tpiprim2name(type));
33020Sstevel@tonic-gate mutex_enter(&clnt_pending_lock);
33030Sstevel@tonic-gate for (e = clnt_pending; /* NO CONDITION */; e = e->call_next) {
33040Sstevel@tonic-gate if (e == NULL) {
33050Sstevel@tonic-gate mutex_exit(&clnt_pending_lock);
33060Sstevel@tonic-gate RPCLOG(1, "clnt_dispatch_notifyconn: no one waiting "
33070Sstevel@tonic-gate "for connection on queue 0x%p\n", (void *)q);
33080Sstevel@tonic-gate return (FALSE);
33090Sstevel@tonic-gate }
33100Sstevel@tonic-gate if (e->call_wq == q)
33110Sstevel@tonic-gate break;
33120Sstevel@tonic-gate }
33130Sstevel@tonic-gate
33140Sstevel@tonic-gate switch (type) {
33150Sstevel@tonic-gate case T_CONN_CON:
33160Sstevel@tonic-gate /*
33170Sstevel@tonic-gate * The transport is now connected, send a T_INFO_REQ to get
33180Sstevel@tonic-gate * the tidu size.
33190Sstevel@tonic-gate */
33200Sstevel@tonic-gate mutex_exit(&clnt_pending_lock);
33210Sstevel@tonic-gate ASSERT(mp->b_datap->db_lim - mp->b_datap->db_base >=
33226403Sgt29601 sizeof (struct T_info_req));
33230Sstevel@tonic-gate mp->b_rptr = mp->b_datap->db_base;
33240Sstevel@tonic-gate ((union T_primitives *)mp->b_rptr)->type = T_INFO_REQ;
33250Sstevel@tonic-gate mp->b_wptr = mp->b_rptr + sizeof (struct T_info_req);
33260Sstevel@tonic-gate mp->b_datap->db_type = M_PCPROTO;
33270Sstevel@tonic-gate put(q, mp);
33280Sstevel@tonic-gate return (TRUE);
33290Sstevel@tonic-gate case T_INFO_ACK:
33300Sstevel@tonic-gate case T_OPTMGMT_ACK:
33310Sstevel@tonic-gate e->call_status = RPC_SUCCESS;
33320Sstevel@tonic-gate e->call_reply = mp;
33330Sstevel@tonic-gate e->call_notified = TRUE;
33340Sstevel@tonic-gate cv_signal(&e->call_cv);
33350Sstevel@tonic-gate break;
33360Sstevel@tonic-gate case T_ERROR_ACK:
33370Sstevel@tonic-gate e->call_status = RPC_CANTCONNECT;
33380Sstevel@tonic-gate e->call_reply = mp;
33390Sstevel@tonic-gate e->call_notified = TRUE;
33400Sstevel@tonic-gate cv_signal(&e->call_cv);
33410Sstevel@tonic-gate break;
33420Sstevel@tonic-gate case T_OK_ACK:
33430Sstevel@tonic-gate /*
33440Sstevel@tonic-gate * Great, but we are really waiting for a T_CONN_CON
33450Sstevel@tonic-gate */
33460Sstevel@tonic-gate freemsg(mp);
33470Sstevel@tonic-gate break;
33480Sstevel@tonic-gate default:
33490Sstevel@tonic-gate mutex_exit(&clnt_pending_lock);
33500Sstevel@tonic-gate RPCLOG(1, "clnt_dispatch_notifyconn: bad type %d\n", type);
33510Sstevel@tonic-gate return (FALSE);
33520Sstevel@tonic-gate }
33530Sstevel@tonic-gate
33540Sstevel@tonic-gate mutex_exit(&clnt_pending_lock);
33550Sstevel@tonic-gate return (TRUE);
33560Sstevel@tonic-gate }
33570Sstevel@tonic-gate
33580Sstevel@tonic-gate /*
33590Sstevel@tonic-gate * Called by rpcmod when the transport is (or should be) going away. Informs
33600Sstevel@tonic-gate * all callers waiting for replies and marks the entry in the connection
33610Sstevel@tonic-gate * manager's list as unconnected, and either closing (close handshake in
33620Sstevel@tonic-gate * progress) or dead.
33630Sstevel@tonic-gate */
33640Sstevel@tonic-gate void
clnt_dispatch_notifyall(queue_t * q,int32_t msg_type,int32_t reason)33650Sstevel@tonic-gate clnt_dispatch_notifyall(queue_t *q, int32_t msg_type, int32_t reason)
33660Sstevel@tonic-gate {
33670Sstevel@tonic-gate calllist_t *e;
33680Sstevel@tonic-gate call_table_t *ctp;
33690Sstevel@tonic-gate struct cm_xprt *cm_entry;
33700Sstevel@tonic-gate int have_connmgr_lock;
33710Sstevel@tonic-gate int i;
33720Sstevel@tonic-gate
33730Sstevel@tonic-gate ASSERT((q->q_flag & QREADR) == 0);
33740Sstevel@tonic-gate
33750Sstevel@tonic-gate RPCLOG(1, "clnt_dispatch_notifyall on queue %p", (void *)q);
33760Sstevel@tonic-gate RPCLOG(1, " received a notifcation prim type [%s]",
33770Sstevel@tonic-gate rpc_tpiprim2name(msg_type));
33780Sstevel@tonic-gate RPCLOG(1, " and reason %d\n", reason);
33790Sstevel@tonic-gate
33800Sstevel@tonic-gate /*
33810Sstevel@tonic-gate * Find the transport entry in the connection manager's list, close
33820Sstevel@tonic-gate * the transport and delete the entry. In the case where rpcmod's
33830Sstevel@tonic-gate * idle timer goes off, it sends us a T_ORDREL_REQ, indicating we
33840Sstevel@tonic-gate * should gracefully close the connection.
33850Sstevel@tonic-gate */
33860Sstevel@tonic-gate have_connmgr_lock = 1;
33870Sstevel@tonic-gate mutex_enter(&connmgr_lock);
33880Sstevel@tonic-gate for (cm_entry = cm_hd; cm_entry; cm_entry = cm_entry->x_next) {
33890Sstevel@tonic-gate ASSERT(cm_entry != cm_entry->x_next);
33900Sstevel@tonic-gate if (cm_entry->x_wq == q) {
33910Sstevel@tonic-gate ASSERT(MUTEX_HELD(&connmgr_lock));
33920Sstevel@tonic-gate ASSERT(have_connmgr_lock == 1);
33930Sstevel@tonic-gate switch (msg_type) {
33940Sstevel@tonic-gate case T_ORDREL_REQ:
33950Sstevel@tonic-gate
33960Sstevel@tonic-gate if (cm_entry->x_dead) {
33970Sstevel@tonic-gate RPCLOG(1, "idle timeout on dead "
33980Sstevel@tonic-gate "connection: %p\n",
33990Sstevel@tonic-gate (void *)cm_entry);
34000Sstevel@tonic-gate if (clnt_stop_idle != NULL)
34010Sstevel@tonic-gate (*clnt_stop_idle)(q);
34020Sstevel@tonic-gate break;
34030Sstevel@tonic-gate }
34040Sstevel@tonic-gate
34050Sstevel@tonic-gate /*
34060Sstevel@tonic-gate * Only mark the connection as dead if it is
34070Sstevel@tonic-gate * connected and idle.
34080Sstevel@tonic-gate * An unconnected connection has probably
34090Sstevel@tonic-gate * gone idle because the server is down,
34100Sstevel@tonic-gate * and when it comes back up there will be
34110Sstevel@tonic-gate * retries that need to use that connection.
34120Sstevel@tonic-gate */
34130Sstevel@tonic-gate if (cm_entry->x_connected ||
34140Sstevel@tonic-gate cm_entry->x_doomed) {
34156403Sgt29601 if (cm_entry->x_ordrel) {
34166403Sgt29601 if (cm_entry->x_closing ==
34176403Sgt29601 TRUE) {
34186403Sgt29601 /*
34196403Sgt29601 * The connection is
34206403Sgt29601 * obviously wedged due
34216403Sgt29601 * to a bug or problem
34226403Sgt29601 * with the transport.
34236403Sgt29601 * Mark it as dead.
34246403Sgt29601 * Otherwise we can
34256403Sgt29601 * leak connections.
34266403Sgt29601 */
34276403Sgt29601 cm_entry->x_dead = TRUE;
34286403Sgt29601 mutex_exit(
34296403Sgt29601 &connmgr_lock);
34306403Sgt29601 have_connmgr_lock = 0;
34316403Sgt29601 if (clnt_stop_idle !=
34326403Sgt29601 NULL)
34336403Sgt29601 (*clnt_stop_idle)(q);
34346403Sgt29601 break;
34356403Sgt29601 }
34366403Sgt29601 cm_entry->x_closing = TRUE;
34376403Sgt29601 connmgr_sndrel(cm_entry);
34386403Sgt29601 have_connmgr_lock = 0;
34396403Sgt29601 } else {
34406403Sgt29601 cm_entry->x_dead = TRUE;
34416403Sgt29601 mutex_exit(&connmgr_lock);
34426403Sgt29601 have_connmgr_lock = 0;
34436403Sgt29601 if (clnt_stop_idle != NULL)
34446403Sgt29601 (*clnt_stop_idle)(q);
34450Sstevel@tonic-gate }
34460Sstevel@tonic-gate } else {
34470Sstevel@tonic-gate /*
34480Sstevel@tonic-gate * We don't mark the connection
34490Sstevel@tonic-gate * as dead, but we turn off the
34500Sstevel@tonic-gate * idle timer.
34510Sstevel@tonic-gate */
34520Sstevel@tonic-gate mutex_exit(&connmgr_lock);
34530Sstevel@tonic-gate have_connmgr_lock = 0;
34540Sstevel@tonic-gate if (clnt_stop_idle != NULL)
34550Sstevel@tonic-gate (*clnt_stop_idle)(q);
34560Sstevel@tonic-gate RPCLOG(1, "clnt_dispatch_notifyall:"
34570Sstevel@tonic-gate " ignoring timeout from rpcmod"
34580Sstevel@tonic-gate " (q %p) because we are not "
34590Sstevel@tonic-gate " connected\n", (void *)q);
34600Sstevel@tonic-gate }
34610Sstevel@tonic-gate break;
34620Sstevel@tonic-gate case T_ORDREL_IND:
34630Sstevel@tonic-gate /*
34640Sstevel@tonic-gate * If this entry is marked closing, then we are
34650Sstevel@tonic-gate * completing a close handshake, and the
34660Sstevel@tonic-gate * connection is dead. Otherwise, the server is
34670Sstevel@tonic-gate * trying to close. Since the server will not
34680Sstevel@tonic-gate * be sending any more RPC replies, we abort
34690Sstevel@tonic-gate * the connection, including flushing
34700Sstevel@tonic-gate * any RPC requests that are in-transit.
34718956Sdai.ngo@sun.com * In either case, mark the entry as dead so
34728956Sdai.ngo@sun.com * that it can be closed by the connection
34738956Sdai.ngo@sun.com * manager's garbage collector.
34740Sstevel@tonic-gate */
34758956Sdai.ngo@sun.com cm_entry->x_dead = TRUE;
34760Sstevel@tonic-gate if (cm_entry->x_closing) {
34770Sstevel@tonic-gate mutex_exit(&connmgr_lock);
34780Sstevel@tonic-gate have_connmgr_lock = 0;
34790Sstevel@tonic-gate if (clnt_stop_idle != NULL)
34800Sstevel@tonic-gate (*clnt_stop_idle)(q);
34810Sstevel@tonic-gate } else {
34820Sstevel@tonic-gate /*
34830Sstevel@tonic-gate * if we're getting a disconnect
34840Sstevel@tonic-gate * before we've finished our
34850Sstevel@tonic-gate * connect attempt, mark it for
34860Sstevel@tonic-gate * later processing
34870Sstevel@tonic-gate */
34880Sstevel@tonic-gate if (cm_entry->x_thread)
34890Sstevel@tonic-gate cm_entry->x_early_disc = TRUE;
34900Sstevel@tonic-gate else
34910Sstevel@tonic-gate cm_entry->x_connected = FALSE;
34920Sstevel@tonic-gate cm_entry->x_waitdis = TRUE;
34930Sstevel@tonic-gate connmgr_snddis(cm_entry);
34940Sstevel@tonic-gate have_connmgr_lock = 0;
34950Sstevel@tonic-gate }
34960Sstevel@tonic-gate break;
34970Sstevel@tonic-gate
34980Sstevel@tonic-gate case T_ERROR_ACK:
34990Sstevel@tonic-gate case T_OK_ACK:
35000Sstevel@tonic-gate cm_entry->x_waitdis = FALSE;
35010Sstevel@tonic-gate cv_signal(&cm_entry->x_dis_cv);
35020Sstevel@tonic-gate mutex_exit(&connmgr_lock);
35030Sstevel@tonic-gate return;
35040Sstevel@tonic-gate
35050Sstevel@tonic-gate case T_DISCON_REQ:
35060Sstevel@tonic-gate if (cm_entry->x_thread)
35070Sstevel@tonic-gate cm_entry->x_early_disc = TRUE;
35080Sstevel@tonic-gate else
35090Sstevel@tonic-gate cm_entry->x_connected = FALSE;
35100Sstevel@tonic-gate cm_entry->x_waitdis = TRUE;
35110Sstevel@tonic-gate
35120Sstevel@tonic-gate connmgr_snddis(cm_entry);
35130Sstevel@tonic-gate have_connmgr_lock = 0;
35140Sstevel@tonic-gate break;
35150Sstevel@tonic-gate
35160Sstevel@tonic-gate case T_DISCON_IND:
35170Sstevel@tonic-gate default:
35180Sstevel@tonic-gate /*
35190Sstevel@tonic-gate * if we're getting a disconnect before
35200Sstevel@tonic-gate * we've finished our connect attempt,
35210Sstevel@tonic-gate * mark it for later processing
35220Sstevel@tonic-gate */
35230Sstevel@tonic-gate if (cm_entry->x_closing) {
35240Sstevel@tonic-gate cm_entry->x_dead = TRUE;
35250Sstevel@tonic-gate mutex_exit(&connmgr_lock);
35260Sstevel@tonic-gate have_connmgr_lock = 0;
35270Sstevel@tonic-gate if (clnt_stop_idle != NULL)
35280Sstevel@tonic-gate (*clnt_stop_idle)(q);
35290Sstevel@tonic-gate } else {
35300Sstevel@tonic-gate if (cm_entry->x_thread) {
35310Sstevel@tonic-gate cm_entry->x_early_disc = TRUE;
35320Sstevel@tonic-gate } else {
35330Sstevel@tonic-gate cm_entry->x_dead = TRUE;
35340Sstevel@tonic-gate cm_entry->x_connected = FALSE;
35350Sstevel@tonic-gate }
35360Sstevel@tonic-gate }
35370Sstevel@tonic-gate break;
35380Sstevel@tonic-gate }
35390Sstevel@tonic-gate break;
35400Sstevel@tonic-gate }
35410Sstevel@tonic-gate }
35420Sstevel@tonic-gate
35430Sstevel@tonic-gate if (have_connmgr_lock)
35440Sstevel@tonic-gate mutex_exit(&connmgr_lock);
35450Sstevel@tonic-gate
35460Sstevel@tonic-gate if (msg_type == T_ERROR_ACK || msg_type == T_OK_ACK) {
35470Sstevel@tonic-gate RPCLOG(1, "clnt_dispatch_notifyall: (wq %p) could not find "
35480Sstevel@tonic-gate "connmgr entry for discon ack\n", (void *)q);
35490Sstevel@tonic-gate return;
35500Sstevel@tonic-gate }
35510Sstevel@tonic-gate
35520Sstevel@tonic-gate /*
35530Sstevel@tonic-gate * Then kick all the clnt_pending calls out of their wait. There
35540Sstevel@tonic-gate * should be no clnt_pending calls in the case of rpcmod's idle
35550Sstevel@tonic-gate * timer firing.
35560Sstevel@tonic-gate */
35570Sstevel@tonic-gate for (i = 0; i < clnt_cots_hash_size; i++) {
35580Sstevel@tonic-gate ctp = &cots_call_ht[i];
35590Sstevel@tonic-gate mutex_enter(&ctp->ct_lock);
35600Sstevel@tonic-gate for (e = ctp->ct_call_next;
35616403Sgt29601 e != (calllist_t *)ctp;
35626403Sgt29601 e = e->call_next) {
35630Sstevel@tonic-gate if (e->call_wq == q && e->call_notified == FALSE) {
35640Sstevel@tonic-gate RPCLOG(1,
35656403Sgt29601 "clnt_dispatch_notifyall for queue %p ",
35666403Sgt29601 (void *)q);
35670Sstevel@tonic-gate RPCLOG(1, "aborting clnt_pending call %p\n",
35686403Sgt29601 (void *)e);
35690Sstevel@tonic-gate
35700Sstevel@tonic-gate if (msg_type == T_DISCON_IND)
35710Sstevel@tonic-gate e->call_reason = reason;
35720Sstevel@tonic-gate e->call_notified = TRUE;
35730Sstevel@tonic-gate e->call_status = RPC_XPRTFAILED;
35740Sstevel@tonic-gate cv_signal(&e->call_cv);
35750Sstevel@tonic-gate }
35760Sstevel@tonic-gate }
35770Sstevel@tonic-gate mutex_exit(&ctp->ct_lock);
35780Sstevel@tonic-gate }
35790Sstevel@tonic-gate
35800Sstevel@tonic-gate mutex_enter(&clnt_pending_lock);
35810Sstevel@tonic-gate for (e = clnt_pending; e; e = e->call_next) {
35820Sstevel@tonic-gate /*
35830Sstevel@tonic-gate * Only signal those RPC handles that haven't been
35840Sstevel@tonic-gate * signalled yet. Otherwise we can get a bogus call_reason.
35850Sstevel@tonic-gate * This can happen if thread A is making a call over a
35860Sstevel@tonic-gate * connection. If the server is killed, it will cause
35870Sstevel@tonic-gate * reset, and reason will default to EIO as a result of
35880Sstevel@tonic-gate * a T_ORDREL_IND. Thread B then attempts to recreate
35890Sstevel@tonic-gate * the connection but gets a T_DISCON_IND. If we set the
35900Sstevel@tonic-gate * call_reason code for all threads, then if thread A
35910Sstevel@tonic-gate * hasn't been dispatched yet, it will get the wrong
35920Sstevel@tonic-gate * reason. The bogus call_reason can make it harder to
35930Sstevel@tonic-gate * discriminate between calls that fail because the
35940Sstevel@tonic-gate * connection attempt failed versus those where the call
35950Sstevel@tonic-gate * may have been executed on the server.
35960Sstevel@tonic-gate */
35970Sstevel@tonic-gate if (e->call_wq == q && e->call_notified == FALSE) {
35980Sstevel@tonic-gate RPCLOG(1, "clnt_dispatch_notifyall for queue %p ",
35990Sstevel@tonic-gate (void *)q);
36000Sstevel@tonic-gate RPCLOG(1, " aborting clnt_pending call %p\n",
36010Sstevel@tonic-gate (void *)e);
36020Sstevel@tonic-gate
36030Sstevel@tonic-gate if (msg_type == T_DISCON_IND)
36040Sstevel@tonic-gate e->call_reason = reason;
36050Sstevel@tonic-gate e->call_notified = TRUE;
36060Sstevel@tonic-gate /*
36070Sstevel@tonic-gate * Let the caller timeout, else he will retry
36080Sstevel@tonic-gate * immediately.
36090Sstevel@tonic-gate */
36100Sstevel@tonic-gate e->call_status = RPC_XPRTFAILED;
36110Sstevel@tonic-gate
36120Sstevel@tonic-gate /*
36130Sstevel@tonic-gate * We used to just signal those threads
36140Sstevel@tonic-gate * waiting for a connection, (call_xid = 0).
36150Sstevel@tonic-gate * That meant that threads waiting for a response
36160Sstevel@tonic-gate * waited till their timeout expired. This
36170Sstevel@tonic-gate * could be a long time if they've specified a
36180Sstevel@tonic-gate * maximum timeout. (2^31 - 1). So we
36190Sstevel@tonic-gate * Signal all threads now.
36200Sstevel@tonic-gate */
36210Sstevel@tonic-gate cv_signal(&e->call_cv);
36220Sstevel@tonic-gate }
36230Sstevel@tonic-gate }
36240Sstevel@tonic-gate mutex_exit(&clnt_pending_lock);
36250Sstevel@tonic-gate }
36260Sstevel@tonic-gate
36270Sstevel@tonic-gate
36280Sstevel@tonic-gate /*ARGSUSED*/
36290Sstevel@tonic-gate /*
36300Sstevel@tonic-gate * after resuming a system that's been suspended for longer than the
36310Sstevel@tonic-gate * NFS server's idle timeout (svc_idle_timeout for Solaris 2), rfscall()
36320Sstevel@tonic-gate * generates "NFS server X not responding" and "NFS server X ok" messages;
36330Sstevel@tonic-gate * here we reset inet connections to cause a re-connect and avoid those
36340Sstevel@tonic-gate * NFS messages. see 4045054
36350Sstevel@tonic-gate */
36360Sstevel@tonic-gate boolean_t
connmgr_cpr_reset(void * arg,int code)36370Sstevel@tonic-gate connmgr_cpr_reset(void *arg, int code)
36380Sstevel@tonic-gate {
36390Sstevel@tonic-gate struct cm_xprt *cxp;
36400Sstevel@tonic-gate
36410Sstevel@tonic-gate if (code == CB_CODE_CPR_CHKPT)
36420Sstevel@tonic-gate return (B_TRUE);
36430Sstevel@tonic-gate
36440Sstevel@tonic-gate if (mutex_tryenter(&connmgr_lock) == 0)
36450Sstevel@tonic-gate return (B_FALSE);
36460Sstevel@tonic-gate for (cxp = cm_hd; cxp; cxp = cxp->x_next) {
36470Sstevel@tonic-gate if ((cxp->x_family == AF_INET || cxp->x_family == AF_INET6) &&
36486403Sgt29601 cxp->x_connected == TRUE) {
36490Sstevel@tonic-gate if (cxp->x_thread)
36500Sstevel@tonic-gate cxp->x_early_disc = TRUE;
36510Sstevel@tonic-gate else
36520Sstevel@tonic-gate cxp->x_connected = FALSE;
36530Sstevel@tonic-gate cxp->x_needdis = TRUE;
36540Sstevel@tonic-gate }
36550Sstevel@tonic-gate }
36560Sstevel@tonic-gate mutex_exit(&connmgr_lock);
36570Sstevel@tonic-gate return (B_TRUE);
36580Sstevel@tonic-gate }
36590Sstevel@tonic-gate
36600Sstevel@tonic-gate void
clnt_cots_stats_init(zoneid_t zoneid,struct rpc_cots_client ** statsp)36610Sstevel@tonic-gate clnt_cots_stats_init(zoneid_t zoneid, struct rpc_cots_client **statsp)
36620Sstevel@tonic-gate {
36630Sstevel@tonic-gate
36640Sstevel@tonic-gate *statsp = (struct rpc_cots_client *)rpcstat_zone_init_common(zoneid,
36650Sstevel@tonic-gate "unix", "rpc_cots_client", (const kstat_named_t *)&cots_rcstat_tmpl,
36660Sstevel@tonic-gate sizeof (cots_rcstat_tmpl));
36670Sstevel@tonic-gate }
36680Sstevel@tonic-gate
36690Sstevel@tonic-gate void
clnt_cots_stats_fini(zoneid_t zoneid,struct rpc_cots_client ** statsp)36700Sstevel@tonic-gate clnt_cots_stats_fini(zoneid_t zoneid, struct rpc_cots_client **statsp)
36710Sstevel@tonic-gate {
36720Sstevel@tonic-gate rpcstat_zone_fini_common(zoneid, "unix", "rpc_cots_client");
36730Sstevel@tonic-gate kmem_free(*statsp, sizeof (cots_rcstat_tmpl));
36740Sstevel@tonic-gate }
36750Sstevel@tonic-gate
36760Sstevel@tonic-gate void
clnt_cots_init(void)36770Sstevel@tonic-gate clnt_cots_init(void)
36780Sstevel@tonic-gate {
36790Sstevel@tonic-gate mutex_init(&connmgr_lock, NULL, MUTEX_DEFAULT, NULL);
36800Sstevel@tonic-gate mutex_init(&clnt_pending_lock, NULL, MUTEX_DEFAULT, NULL);
36810Sstevel@tonic-gate
36820Sstevel@tonic-gate if (clnt_cots_hash_size < DEFAULT_MIN_HASH_SIZE)
36830Sstevel@tonic-gate clnt_cots_hash_size = DEFAULT_MIN_HASH_SIZE;
36840Sstevel@tonic-gate
36850Sstevel@tonic-gate cots_call_ht = call_table_init(clnt_cots_hash_size);
36860Sstevel@tonic-gate zone_key_create(&zone_cots_key, NULL, NULL, clnt_zone_destroy);
36870Sstevel@tonic-gate }
36880Sstevel@tonic-gate
36890Sstevel@tonic-gate void
clnt_cots_fini(void)36900Sstevel@tonic-gate clnt_cots_fini(void)
36910Sstevel@tonic-gate {
36920Sstevel@tonic-gate (void) zone_key_delete(zone_cots_key);
36930Sstevel@tonic-gate }
36940Sstevel@tonic-gate
36950Sstevel@tonic-gate /*
36960Sstevel@tonic-gate * Wait for TPI ack, returns success only if expected ack is received
36970Sstevel@tonic-gate * within timeout period.
36980Sstevel@tonic-gate */
36990Sstevel@tonic-gate
37000Sstevel@tonic-gate static int
waitforack(calllist_t * e,t_scalar_t ack_prim,const struct timeval * waitp,bool_t nosignal)37010Sstevel@tonic-gate waitforack(calllist_t *e, t_scalar_t ack_prim, const struct timeval *waitp,
37020Sstevel@tonic-gate bool_t nosignal)
37030Sstevel@tonic-gate {
37040Sstevel@tonic-gate union T_primitives *tpr;
37050Sstevel@tonic-gate clock_t timout;
37060Sstevel@tonic-gate int cv_stat = 1;
37070Sstevel@tonic-gate
37080Sstevel@tonic-gate ASSERT(MUTEX_HELD(&clnt_pending_lock));
37090Sstevel@tonic-gate while (e->call_reply == NULL) {
37100Sstevel@tonic-gate if (waitp != NULL) {
37110Sstevel@tonic-gate timout = waitp->tv_sec * drv_usectohz(MICROSEC) +
371211066Srafael.vanoni@sun.com drv_usectohz(waitp->tv_usec);
37130Sstevel@tonic-gate if (nosignal)
371411066Srafael.vanoni@sun.com cv_stat = cv_reltimedwait(&e->call_cv,
371511066Srafael.vanoni@sun.com &clnt_pending_lock, timout, TR_CLOCK_TICK);
37160Sstevel@tonic-gate else
371711066Srafael.vanoni@sun.com cv_stat = cv_reltimedwait_sig(&e->call_cv,
371811066Srafael.vanoni@sun.com &clnt_pending_lock, timout, TR_CLOCK_TICK);
37190Sstevel@tonic-gate } else {
37200Sstevel@tonic-gate if (nosignal)
37210Sstevel@tonic-gate cv_wait(&e->call_cv, &clnt_pending_lock);
37220Sstevel@tonic-gate else
37230Sstevel@tonic-gate cv_stat = cv_wait_sig(&e->call_cv,
37240Sstevel@tonic-gate &clnt_pending_lock);
37250Sstevel@tonic-gate }
37260Sstevel@tonic-gate if (cv_stat == -1)
37270Sstevel@tonic-gate return (ETIME);
37280Sstevel@tonic-gate if (cv_stat == 0)
37290Sstevel@tonic-gate return (EINTR);
37306632Sgt29601 /*
37316632Sgt29601 * if we received an error from the server and we know a reply
37326632Sgt29601 * is not going to be sent, do not wait for the full timeout,
37336632Sgt29601 * return now.
37346632Sgt29601 */
37356632Sgt29601 if (e->call_status == RPC_XPRTFAILED)
37366632Sgt29601 return (e->call_reason);
37370Sstevel@tonic-gate }
37380Sstevel@tonic-gate tpr = (union T_primitives *)e->call_reply->b_rptr;
37390Sstevel@tonic-gate if (tpr->type == ack_prim)
37400Sstevel@tonic-gate return (0); /* Success */
37410Sstevel@tonic-gate
37420Sstevel@tonic-gate if (tpr->type == T_ERROR_ACK) {
37430Sstevel@tonic-gate if (tpr->error_ack.TLI_error == TSYSERR)
37440Sstevel@tonic-gate return (tpr->error_ack.UNIX_error);
37450Sstevel@tonic-gate else
37460Sstevel@tonic-gate return (t_tlitosyserr(tpr->error_ack.TLI_error));
37470Sstevel@tonic-gate }
37480Sstevel@tonic-gate
37490Sstevel@tonic-gate return (EPROTO); /* unknown or unexpected primitive */
37500Sstevel@tonic-gate }
3751