1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate /* 28*0Sstevel@tonic-gate * Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T 29*0Sstevel@tonic-gate * All Rights Reserved 30*0Sstevel@tonic-gate */ 31*0Sstevel@tonic-gate 32*0Sstevel@tonic-gate /* 33*0Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD 34*0Sstevel@tonic-gate * under license from the Regents of the University of California. 35*0Sstevel@tonic-gate */ 36*0Sstevel@tonic-gate 37*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 38*0Sstevel@tonic-gate 39*0Sstevel@tonic-gate /* 40*0Sstevel@tonic-gate * Implements a kernel based, client side RPC. 41*0Sstevel@tonic-gate */ 42*0Sstevel@tonic-gate 43*0Sstevel@tonic-gate #include <sys/param.h> 44*0Sstevel@tonic-gate #include <sys/types.h> 45*0Sstevel@tonic-gate #include <sys/systm.h> 46*0Sstevel@tonic-gate #include <sys/sysmacros.h> 47*0Sstevel@tonic-gate #include <sys/stream.h> 48*0Sstevel@tonic-gate #include <sys/strsubr.h> 49*0Sstevel@tonic-gate #include <sys/ddi.h> 50*0Sstevel@tonic-gate #include <sys/tiuser.h> 51*0Sstevel@tonic-gate #include <sys/tihdr.h> 52*0Sstevel@tonic-gate #include <sys/t_kuser.h> 53*0Sstevel@tonic-gate #include <sys/errno.h> 54*0Sstevel@tonic-gate #include <sys/kmem.h> 55*0Sstevel@tonic-gate #include <sys/debug.h> 56*0Sstevel@tonic-gate #include <sys/kstat.h> 57*0Sstevel@tonic-gate #include <sys/t_lock.h> 58*0Sstevel@tonic-gate #include <sys/cmn_err.h> 59*0Sstevel@tonic-gate #include <sys/conf.h> 60*0Sstevel@tonic-gate #include <sys/disp.h> 61*0Sstevel@tonic-gate #include <sys/taskq.h> 62*0Sstevel@tonic-gate #include <sys/list.h> 63*0Sstevel@tonic-gate #include <sys/atomic.h> 64*0Sstevel@tonic-gate #include <sys/zone.h> 65*0Sstevel@tonic-gate #include <netinet/in.h> 66*0Sstevel@tonic-gate #include <rpc/types.h> 67*0Sstevel@tonic-gate #include <rpc/xdr.h> 68*0Sstevel@tonic-gate #include <rpc/auth.h> 69*0Sstevel@tonic-gate #include <rpc/clnt.h> 70*0Sstevel@tonic-gate #include <rpc/rpc_msg.h> 71*0Sstevel@tonic-gate 72*0Sstevel@tonic-gate static enum clnt_stat clnt_clts_kcallit(CLIENT *, rpcproc_t, xdrproc_t, 73*0Sstevel@tonic-gate caddr_t, xdrproc_t, caddr_t, struct timeval); 74*0Sstevel@tonic-gate static void clnt_clts_kabort(CLIENT *); 75*0Sstevel@tonic-gate static void clnt_clts_kerror(CLIENT *, struct rpc_err *); 76*0Sstevel@tonic-gate static bool_t clnt_clts_kfreeres(CLIENT *, xdrproc_t, caddr_t); 77*0Sstevel@tonic-gate static bool_t clnt_clts_kcontrol(CLIENT *, int, char *); 78*0Sstevel@tonic-gate static void clnt_clts_kdestroy(CLIENT *); 79*0Sstevel@tonic-gate static int clnt_clts_ksettimers(CLIENT *, struct rpc_timers *, 80*0Sstevel@tonic-gate struct rpc_timers *, int, void (*)(), caddr_t, uint32_t); 81*0Sstevel@tonic-gate 82*0Sstevel@tonic-gate /* 83*0Sstevel@tonic-gate * Operations vector for CLTS based RPC 84*0Sstevel@tonic-gate */ 85*0Sstevel@tonic-gate static struct clnt_ops clts_ops = { 86*0Sstevel@tonic-gate clnt_clts_kcallit, /* do rpc call */ 87*0Sstevel@tonic-gate clnt_clts_kabort, /* abort call */ 88*0Sstevel@tonic-gate clnt_clts_kerror, /* return error status */ 89*0Sstevel@tonic-gate clnt_clts_kfreeres, /* free results */ 90*0Sstevel@tonic-gate clnt_clts_kdestroy, /* destroy rpc handle */ 91*0Sstevel@tonic-gate clnt_clts_kcontrol, /* the ioctl() of rpc */ 92*0Sstevel@tonic-gate clnt_clts_ksettimers /* set retry timers */ 93*0Sstevel@tonic-gate }; 94*0Sstevel@tonic-gate 95*0Sstevel@tonic-gate /* 96*0Sstevel@tonic-gate * Endpoint for CLTS (INET, INET6, loopback, etc.) 97*0Sstevel@tonic-gate */ 98*0Sstevel@tonic-gate typedef struct endpnt_type { 99*0Sstevel@tonic-gate struct endpnt_type *e_next; /* pointer to next endpoint type */ 100*0Sstevel@tonic-gate list_t e_pool; /* list of available endpoints */ 101*0Sstevel@tonic-gate list_t e_ilist; /* list of idle endpints */ 102*0Sstevel@tonic-gate struct endpnt *e_pcurr; /* pointer to current endpoint */ 103*0Sstevel@tonic-gate char e_protofmly[KNC_STRSIZE]; /* protocol family */ 104*0Sstevel@tonic-gate dev_t e_rdev; /* device */ 105*0Sstevel@tonic-gate kmutex_t e_plock; /* pool lock */ 106*0Sstevel@tonic-gate kmutex_t e_ilock; /* idle list lock */ 107*0Sstevel@tonic-gate timeout_id_t e_itimer; /* timer to dispatch the taskq */ 108*0Sstevel@tonic-gate uint_t e_cnt; /* number of endpoints in the pool */ 109*0Sstevel@tonic-gate zoneid_t e_zoneid; /* zoneid of endpoint type */ 110*0Sstevel@tonic-gate kcondvar_t e_async_cv; /* cv for asynchronous reap threads */ 111*0Sstevel@tonic-gate uint_t e_async_count; /* count of asynchronous reap threads */ 112*0Sstevel@tonic-gate } endpnt_type_t; 113*0Sstevel@tonic-gate 114*0Sstevel@tonic-gate typedef struct endpnt { 115*0Sstevel@tonic-gate list_node_t e_node; /* link to the pool */ 116*0Sstevel@tonic-gate list_node_t e_idle; /* link to the idle list */ 117*0Sstevel@tonic-gate endpnt_type_t *e_type; /* back pointer to endpoint type */ 118*0Sstevel@tonic-gate TIUSER *e_tiptr; /* pointer to transport endpoint */ 119*0Sstevel@tonic-gate queue_t *e_wq; /* write queue */ 120*0Sstevel@tonic-gate uint_t e_flags; /* endpoint flags */ 121*0Sstevel@tonic-gate uint_t e_ref; /* ref count on endpoint */ 122*0Sstevel@tonic-gate kcondvar_t e_cv; /* condition variable */ 123*0Sstevel@tonic-gate kmutex_t e_lock; /* protects cv and flags */ 124*0Sstevel@tonic-gate time_t e_itime; /* time when rele'd */ 125*0Sstevel@tonic-gate } endpnt_t; 126*0Sstevel@tonic-gate 127*0Sstevel@tonic-gate #define ENDPNT_ESTABLISHED 0x1 /* endpoint is established */ 128*0Sstevel@tonic-gate #define ENDPNT_WAITING 0x2 /* thread waiting for endpoint */ 129*0Sstevel@tonic-gate #define ENDPNT_BOUND 0x4 /* endpoint is bound */ 130*0Sstevel@tonic-gate #define ENDPNT_STALE 0x8 /* endpoint is dead */ 131*0Sstevel@tonic-gate #define ENDPNT_ONIDLE 0x10 /* endpoint is on the idle list */ 132*0Sstevel@tonic-gate 133*0Sstevel@tonic-gate static krwlock_t endpnt_type_lock; /* protects endpnt_type_list */ 134*0Sstevel@tonic-gate static endpnt_type_t *endpnt_type_list = NULL; /* list of CLTS endpoints */ 135*0Sstevel@tonic-gate static struct kmem_cache *endpnt_cache; /* cache of endpnt_t's */ 136*0Sstevel@tonic-gate static taskq_t *endpnt_taskq; /* endpnt_t reaper thread */ 137*0Sstevel@tonic-gate static bool_t taskq_created; /* flag for endpnt_taskq */ 138*0Sstevel@tonic-gate static kmutex_t endpnt_taskq_lock; /* taskq lock */ 139*0Sstevel@tonic-gate static zone_key_t endpnt_destructor_key; 140*0Sstevel@tonic-gate 141*0Sstevel@tonic-gate #define DEFAULT_ENDPOINT_REAP_INTERVAL 60 /* 1 minute */ 142*0Sstevel@tonic-gate #define DEFAULT_INTERVAL_SHIFT 30 /* 30 seconds */ 143*0Sstevel@tonic-gate 144*0Sstevel@tonic-gate /* 145*0Sstevel@tonic-gate * Endpoint tunables 146*0Sstevel@tonic-gate */ 147*0Sstevel@tonic-gate static int clnt_clts_max_endpoints = -1; 148*0Sstevel@tonic-gate static int clnt_clts_hash_size = DEFAULT_HASH_SIZE; 149*0Sstevel@tonic-gate static time_t clnt_clts_endpoint_reap_interval = -1; 150*0Sstevel@tonic-gate static clock_t clnt_clts_taskq_dispatch_interval; 151*0Sstevel@tonic-gate 152*0Sstevel@tonic-gate /* 153*0Sstevel@tonic-gate * Response completion hash queue 154*0Sstevel@tonic-gate */ 155*0Sstevel@tonic-gate static call_table_t *clts_call_ht; 156*0Sstevel@tonic-gate 157*0Sstevel@tonic-gate /* 158*0Sstevel@tonic-gate * Routines for the endpoint manager 159*0Sstevel@tonic-gate */ 160*0Sstevel@tonic-gate static struct endpnt_type *endpnt_type_create(struct knetconfig *); 161*0Sstevel@tonic-gate static void endpnt_type_free(struct endpnt_type *); 162*0Sstevel@tonic-gate static int check_endpnt(struct endpnt *, struct endpnt **); 163*0Sstevel@tonic-gate static struct endpnt *endpnt_get(struct knetconfig *); 164*0Sstevel@tonic-gate static void endpnt_rele(struct endpnt *); 165*0Sstevel@tonic-gate static void endpnt_reap_settimer(endpnt_type_t *); 166*0Sstevel@tonic-gate static void endpnt_reap(endpnt_type_t *); 167*0Sstevel@tonic-gate static void endpnt_reap_dispatch(void *); 168*0Sstevel@tonic-gate static void endpnt_reclaim(zoneid_t); 169*0Sstevel@tonic-gate 170*0Sstevel@tonic-gate 171*0Sstevel@tonic-gate /* 172*0Sstevel@tonic-gate * Request dipatching function. 173*0Sstevel@tonic-gate */ 174*0Sstevel@tonic-gate static int clnt_clts_dispatch_send(queue_t *q, mblk_t *, struct netbuf *addr, 175*0Sstevel@tonic-gate calllist_t *, uint_t); 176*0Sstevel@tonic-gate 177*0Sstevel@tonic-gate /* 178*0Sstevel@tonic-gate * The size of the preserialized RPC header information. 179*0Sstevel@tonic-gate */ 180*0Sstevel@tonic-gate #define CKU_HDRSIZE 20 181*0Sstevel@tonic-gate /* 182*0Sstevel@tonic-gate * The initial allocation size. It is small to reduce space requirements. 183*0Sstevel@tonic-gate */ 184*0Sstevel@tonic-gate #define CKU_INITSIZE 2048 185*0Sstevel@tonic-gate /* 186*0Sstevel@tonic-gate * The size of additional allocations, if required. It is larger to 187*0Sstevel@tonic-gate * reduce the number of actual allocations. 188*0Sstevel@tonic-gate */ 189*0Sstevel@tonic-gate #define CKU_ALLOCSIZE 8192 190*0Sstevel@tonic-gate 191*0Sstevel@tonic-gate /* 192*0Sstevel@tonic-gate * Private data per rpc handle. This structure is allocated by 193*0Sstevel@tonic-gate * clnt_clts_kcreate, and freed by clnt_clts_kdestroy. 194*0Sstevel@tonic-gate */ 195*0Sstevel@tonic-gate struct cku_private { 196*0Sstevel@tonic-gate CLIENT cku_client; /* client handle */ 197*0Sstevel@tonic-gate int cku_retrys; /* request retrys */ 198*0Sstevel@tonic-gate calllist_t cku_call; 199*0Sstevel@tonic-gate struct endpnt *cku_endpnt; /* open end point */ 200*0Sstevel@tonic-gate struct knetconfig cku_config; 201*0Sstevel@tonic-gate struct netbuf cku_addr; /* remote address */ 202*0Sstevel@tonic-gate struct rpc_err cku_err; /* error status */ 203*0Sstevel@tonic-gate XDR cku_outxdr; /* xdr stream for output */ 204*0Sstevel@tonic-gate XDR cku_inxdr; /* xdr stream for input */ 205*0Sstevel@tonic-gate char cku_rpchdr[CKU_HDRSIZE + 4]; /* rpc header */ 206*0Sstevel@tonic-gate struct cred *cku_cred; /* credentials */ 207*0Sstevel@tonic-gate struct rpc_timers *cku_timers; /* for estimating RTT */ 208*0Sstevel@tonic-gate struct rpc_timers *cku_timeall; /* for estimating RTT */ 209*0Sstevel@tonic-gate void (*cku_feedback)(int, int, caddr_t); 210*0Sstevel@tonic-gate /* ptr to feedback rtn */ 211*0Sstevel@tonic-gate caddr_t cku_feedarg; /* argument for feedback func */ 212*0Sstevel@tonic-gate uint32_t cku_xid; /* current XID */ 213*0Sstevel@tonic-gate bool_t cku_bcast; /* RPC broadcast hint */ 214*0Sstevel@tonic-gate struct rpc_clts_client *cku_stats; /* counters for the zone */ 215*0Sstevel@tonic-gate }; 216*0Sstevel@tonic-gate 217*0Sstevel@tonic-gate static const struct rpc_clts_client { 218*0Sstevel@tonic-gate kstat_named_t rccalls; 219*0Sstevel@tonic-gate kstat_named_t rcbadcalls; 220*0Sstevel@tonic-gate kstat_named_t rcretrans; 221*0Sstevel@tonic-gate kstat_named_t rcbadxids; 222*0Sstevel@tonic-gate kstat_named_t rctimeouts; 223*0Sstevel@tonic-gate kstat_named_t rcnewcreds; 224*0Sstevel@tonic-gate kstat_named_t rcbadverfs; 225*0Sstevel@tonic-gate kstat_named_t rctimers; 226*0Sstevel@tonic-gate kstat_named_t rcnomem; 227*0Sstevel@tonic-gate kstat_named_t rccantsend; 228*0Sstevel@tonic-gate } clts_rcstat_tmpl = { 229*0Sstevel@tonic-gate { "calls", KSTAT_DATA_UINT64 }, 230*0Sstevel@tonic-gate { "badcalls", KSTAT_DATA_UINT64 }, 231*0Sstevel@tonic-gate { "retrans", KSTAT_DATA_UINT64 }, 232*0Sstevel@tonic-gate { "badxids", KSTAT_DATA_UINT64 }, 233*0Sstevel@tonic-gate { "timeouts", KSTAT_DATA_UINT64 }, 234*0Sstevel@tonic-gate { "newcreds", KSTAT_DATA_UINT64 }, 235*0Sstevel@tonic-gate { "badverfs", KSTAT_DATA_UINT64 }, 236*0Sstevel@tonic-gate { "timers", KSTAT_DATA_UINT64 }, 237*0Sstevel@tonic-gate { "nomem", KSTAT_DATA_UINT64 }, 238*0Sstevel@tonic-gate { "cantsend", KSTAT_DATA_UINT64 }, 239*0Sstevel@tonic-gate }; 240*0Sstevel@tonic-gate 241*0Sstevel@tonic-gate static uint_t clts_rcstat_ndata = 242*0Sstevel@tonic-gate sizeof (clts_rcstat_tmpl) / sizeof (kstat_named_t); 243*0Sstevel@tonic-gate 244*0Sstevel@tonic-gate #define RCSTAT_INCR(s, x) \ 245*0Sstevel@tonic-gate atomic_add_64(&(s)->x.value.ui64, 1) 246*0Sstevel@tonic-gate 247*0Sstevel@tonic-gate #define ptoh(p) (&((p)->cku_client)) 248*0Sstevel@tonic-gate #define htop(h) ((struct cku_private *)((h)->cl_private)) 249*0Sstevel@tonic-gate 250*0Sstevel@tonic-gate /* 251*0Sstevel@tonic-gate * Times to retry 252*0Sstevel@tonic-gate */ 253*0Sstevel@tonic-gate #define SNDTRIES 4 254*0Sstevel@tonic-gate #define REFRESHES 2 /* authentication refreshes */ 255*0Sstevel@tonic-gate 256*0Sstevel@tonic-gate static int clnt_clts_do_bindresvport = 1; /* bind to reserved port */ 257*0Sstevel@tonic-gate #define BINDRESVPORT_RETRIES 5 258*0Sstevel@tonic-gate 259*0Sstevel@tonic-gate void 260*0Sstevel@tonic-gate clnt_clts_stats_init(zoneid_t zoneid, struct rpc_clts_client **statsp) 261*0Sstevel@tonic-gate { 262*0Sstevel@tonic-gate kstat_t *ksp; 263*0Sstevel@tonic-gate kstat_named_t *knp; 264*0Sstevel@tonic-gate 265*0Sstevel@tonic-gate knp = rpcstat_zone_init_common(zoneid, "unix", "rpc_clts_client", 266*0Sstevel@tonic-gate (const kstat_named_t *)&clts_rcstat_tmpl, 267*0Sstevel@tonic-gate sizeof (clts_rcstat_tmpl)); 268*0Sstevel@tonic-gate /* 269*0Sstevel@tonic-gate * Backwards compatibility for old kstat clients 270*0Sstevel@tonic-gate */ 271*0Sstevel@tonic-gate ksp = kstat_create_zone("unix", 0, "rpc_client", "rpc", 272*0Sstevel@tonic-gate KSTAT_TYPE_NAMED, clts_rcstat_ndata, 273*0Sstevel@tonic-gate KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_WRITABLE, zoneid); 274*0Sstevel@tonic-gate if (ksp) { 275*0Sstevel@tonic-gate ksp->ks_data = knp; 276*0Sstevel@tonic-gate kstat_install(ksp); 277*0Sstevel@tonic-gate } 278*0Sstevel@tonic-gate *statsp = (struct rpc_clts_client *)knp; 279*0Sstevel@tonic-gate } 280*0Sstevel@tonic-gate 281*0Sstevel@tonic-gate void 282*0Sstevel@tonic-gate clnt_clts_stats_fini(zoneid_t zoneid, struct rpc_clts_client **statsp) 283*0Sstevel@tonic-gate { 284*0Sstevel@tonic-gate rpcstat_zone_fini_common(zoneid, "unix", "rpc_clts_client"); 285*0Sstevel@tonic-gate kstat_delete_byname_zone("unix", 0, "rpc_client", zoneid); 286*0Sstevel@tonic-gate kmem_free(*statsp, sizeof (clts_rcstat_tmpl)); 287*0Sstevel@tonic-gate } 288*0Sstevel@tonic-gate 289*0Sstevel@tonic-gate /* 290*0Sstevel@tonic-gate * Create an rpc handle for a clts rpc connection. 291*0Sstevel@tonic-gate * Allocates space for the handle structure and the private data. 292*0Sstevel@tonic-gate */ 293*0Sstevel@tonic-gate /* ARGSUSED */ 294*0Sstevel@tonic-gate int 295*0Sstevel@tonic-gate clnt_clts_kcreate(struct knetconfig *config, struct netbuf *addr, 296*0Sstevel@tonic-gate rpcprog_t pgm, rpcvers_t vers, int retrys, struct cred *cred, 297*0Sstevel@tonic-gate CLIENT **cl) 298*0Sstevel@tonic-gate { 299*0Sstevel@tonic-gate CLIENT *h; 300*0Sstevel@tonic-gate struct cku_private *p; 301*0Sstevel@tonic-gate struct rpc_msg call_msg; 302*0Sstevel@tonic-gate int error; 303*0Sstevel@tonic-gate int plen; 304*0Sstevel@tonic-gate 305*0Sstevel@tonic-gate if (cl == NULL) 306*0Sstevel@tonic-gate return (EINVAL); 307*0Sstevel@tonic-gate 308*0Sstevel@tonic-gate *cl = NULL; 309*0Sstevel@tonic-gate error = 0; 310*0Sstevel@tonic-gate 311*0Sstevel@tonic-gate p = kmem_zalloc(sizeof (*p), KM_SLEEP); 312*0Sstevel@tonic-gate 313*0Sstevel@tonic-gate h = ptoh(p); 314*0Sstevel@tonic-gate 315*0Sstevel@tonic-gate /* handle */ 316*0Sstevel@tonic-gate h->cl_ops = &clts_ops; 317*0Sstevel@tonic-gate h->cl_private = (caddr_t)p; 318*0Sstevel@tonic-gate h->cl_auth = authkern_create(); 319*0Sstevel@tonic-gate 320*0Sstevel@tonic-gate /* call message, just used to pre-serialize below */ 321*0Sstevel@tonic-gate call_msg.rm_xid = 0; 322*0Sstevel@tonic-gate call_msg.rm_direction = CALL; 323*0Sstevel@tonic-gate call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; 324*0Sstevel@tonic-gate call_msg.rm_call.cb_prog = pgm; 325*0Sstevel@tonic-gate call_msg.rm_call.cb_vers = vers; 326*0Sstevel@tonic-gate 327*0Sstevel@tonic-gate /* private */ 328*0Sstevel@tonic-gate clnt_clts_kinit(h, addr, retrys, cred); 329*0Sstevel@tonic-gate 330*0Sstevel@tonic-gate xdrmem_create(&p->cku_outxdr, p->cku_rpchdr, CKU_HDRSIZE, XDR_ENCODE); 331*0Sstevel@tonic-gate 332*0Sstevel@tonic-gate /* pre-serialize call message header */ 333*0Sstevel@tonic-gate if (!xdr_callhdr(&p->cku_outxdr, &call_msg)) { 334*0Sstevel@tonic-gate error = EINVAL; /* XXX */ 335*0Sstevel@tonic-gate goto bad; 336*0Sstevel@tonic-gate } 337*0Sstevel@tonic-gate 338*0Sstevel@tonic-gate p->cku_config.knc_rdev = config->knc_rdev; 339*0Sstevel@tonic-gate p->cku_config.knc_semantics = config->knc_semantics; 340*0Sstevel@tonic-gate plen = strlen(config->knc_protofmly) + 1; 341*0Sstevel@tonic-gate p->cku_config.knc_protofmly = kmem_alloc(plen, KM_SLEEP); 342*0Sstevel@tonic-gate bcopy(config->knc_protofmly, p->cku_config.knc_protofmly, plen); 343*0Sstevel@tonic-gate 344*0Sstevel@tonic-gate cv_init(&p->cku_call.call_cv, NULL, CV_DEFAULT, NULL); 345*0Sstevel@tonic-gate mutex_init(&p->cku_call.call_lock, NULL, MUTEX_DEFAULT, NULL); 346*0Sstevel@tonic-gate 347*0Sstevel@tonic-gate *cl = h; 348*0Sstevel@tonic-gate return (0); 349*0Sstevel@tonic-gate 350*0Sstevel@tonic-gate bad: 351*0Sstevel@tonic-gate auth_destroy(h->cl_auth); 352*0Sstevel@tonic-gate kmem_free(p->cku_addr.buf, addr->maxlen); 353*0Sstevel@tonic-gate kmem_free(p, sizeof (struct cku_private)); 354*0Sstevel@tonic-gate 355*0Sstevel@tonic-gate return (error); 356*0Sstevel@tonic-gate } 357*0Sstevel@tonic-gate 358*0Sstevel@tonic-gate void 359*0Sstevel@tonic-gate clnt_clts_kinit(CLIENT *h, struct netbuf *addr, int retrys, cred_t *cred) 360*0Sstevel@tonic-gate { 361*0Sstevel@tonic-gate /* LINTED pointer alignment */ 362*0Sstevel@tonic-gate struct cku_private *p = htop(h); 363*0Sstevel@tonic-gate struct rpcstat *rsp; 364*0Sstevel@tonic-gate 365*0Sstevel@tonic-gate rsp = zone_getspecific(rpcstat_zone_key, curproc->p_zone); 366*0Sstevel@tonic-gate ASSERT(rsp != NULL); 367*0Sstevel@tonic-gate 368*0Sstevel@tonic-gate p->cku_retrys = retrys; 369*0Sstevel@tonic-gate 370*0Sstevel@tonic-gate if (p->cku_addr.maxlen < addr->len) { 371*0Sstevel@tonic-gate if (p->cku_addr.maxlen != 0 && p->cku_addr.buf != NULL) 372*0Sstevel@tonic-gate kmem_free(p->cku_addr.buf, p->cku_addr.maxlen); 373*0Sstevel@tonic-gate 374*0Sstevel@tonic-gate p->cku_addr.buf = kmem_zalloc(addr->maxlen, KM_SLEEP); 375*0Sstevel@tonic-gate p->cku_addr.maxlen = addr->maxlen; 376*0Sstevel@tonic-gate } 377*0Sstevel@tonic-gate 378*0Sstevel@tonic-gate p->cku_addr.len = addr->len; 379*0Sstevel@tonic-gate bcopy(addr->buf, p->cku_addr.buf, addr->len); 380*0Sstevel@tonic-gate 381*0Sstevel@tonic-gate p->cku_cred = cred; 382*0Sstevel@tonic-gate p->cku_xid = 0; 383*0Sstevel@tonic-gate p->cku_timers = NULL; 384*0Sstevel@tonic-gate p->cku_timeall = NULL; 385*0Sstevel@tonic-gate p->cku_feedback = NULL; 386*0Sstevel@tonic-gate p->cku_bcast = FALSE; 387*0Sstevel@tonic-gate p->cku_call.call_xid = 0; 388*0Sstevel@tonic-gate p->cku_call.call_hash = 0; 389*0Sstevel@tonic-gate p->cku_call.call_notified = FALSE; 390*0Sstevel@tonic-gate p->cku_call.call_next = NULL; 391*0Sstevel@tonic-gate p->cku_call.call_prev = NULL; 392*0Sstevel@tonic-gate p->cku_call.call_reply = NULL; 393*0Sstevel@tonic-gate p->cku_call.call_wq = NULL; 394*0Sstevel@tonic-gate p->cku_stats = rsp->rpc_clts_client; 395*0Sstevel@tonic-gate } 396*0Sstevel@tonic-gate 397*0Sstevel@tonic-gate /* 398*0Sstevel@tonic-gate * set the timers. Return current retransmission timeout. 399*0Sstevel@tonic-gate */ 400*0Sstevel@tonic-gate static int 401*0Sstevel@tonic-gate clnt_clts_ksettimers(CLIENT *h, struct rpc_timers *t, struct rpc_timers *all, 402*0Sstevel@tonic-gate int minimum, void (*feedback)(int, int, caddr_t), caddr_t arg, 403*0Sstevel@tonic-gate uint32_t xid) 404*0Sstevel@tonic-gate { 405*0Sstevel@tonic-gate /* LINTED pointer alignment */ 406*0Sstevel@tonic-gate struct cku_private *p = htop(h); 407*0Sstevel@tonic-gate int value; 408*0Sstevel@tonic-gate 409*0Sstevel@tonic-gate p->cku_feedback = feedback; 410*0Sstevel@tonic-gate p->cku_feedarg = arg; 411*0Sstevel@tonic-gate p->cku_timers = t; 412*0Sstevel@tonic-gate p->cku_timeall = all; 413*0Sstevel@tonic-gate if (xid) 414*0Sstevel@tonic-gate p->cku_xid = xid; 415*0Sstevel@tonic-gate value = all->rt_rtxcur; 416*0Sstevel@tonic-gate value += t->rt_rtxcur; 417*0Sstevel@tonic-gate if (value < minimum) 418*0Sstevel@tonic-gate return (minimum); 419*0Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rctimers); 420*0Sstevel@tonic-gate return (value); 421*0Sstevel@tonic-gate } 422*0Sstevel@tonic-gate 423*0Sstevel@tonic-gate /* 424*0Sstevel@tonic-gate * Time out back off function. tim is in HZ 425*0Sstevel@tonic-gate */ 426*0Sstevel@tonic-gate #define MAXTIMO (20 * hz) 427*0Sstevel@tonic-gate #define backoff(tim) (((tim) < MAXTIMO) ? dobackoff(tim) : (tim)) 428*0Sstevel@tonic-gate #define dobackoff(tim) ((((tim) << 1) > MAXTIMO) ? MAXTIMO : ((tim) << 1)) 429*0Sstevel@tonic-gate 430*0Sstevel@tonic-gate #define RETRY_POLL_TIMO 30 431*0Sstevel@tonic-gate 432*0Sstevel@tonic-gate /* 433*0Sstevel@tonic-gate * Call remote procedure. 434*0Sstevel@tonic-gate * Most of the work of rpc is done here. We serialize what is left 435*0Sstevel@tonic-gate * of the header (some was pre-serialized in the handle), serialize 436*0Sstevel@tonic-gate * the arguments, and send it off. We wait for a reply or a time out. 437*0Sstevel@tonic-gate * Timeout causes an immediate return, other packet problems may cause 438*0Sstevel@tonic-gate * a retry on the receive. When a good packet is received we deserialize 439*0Sstevel@tonic-gate * it, and check verification. A bad reply code will cause one retry 440*0Sstevel@tonic-gate * with full (longhand) credentials. 441*0Sstevel@tonic-gate */ 442*0Sstevel@tonic-gate enum clnt_stat 443*0Sstevel@tonic-gate clnt_clts_kcallit_addr(CLIENT *h, rpcproc_t procnum, xdrproc_t xdr_args, 444*0Sstevel@tonic-gate caddr_t argsp, xdrproc_t xdr_results, caddr_t resultsp, 445*0Sstevel@tonic-gate struct timeval wait, struct netbuf *sin) 446*0Sstevel@tonic-gate { 447*0Sstevel@tonic-gate /* LINTED pointer alignment */ 448*0Sstevel@tonic-gate struct cku_private *p = htop(h); 449*0Sstevel@tonic-gate XDR *xdrs; 450*0Sstevel@tonic-gate int stries = p->cku_retrys; 451*0Sstevel@tonic-gate int refreshes = REFRESHES; /* number of times to refresh cred */ 452*0Sstevel@tonic-gate int round_trip; /* time the RPC */ 453*0Sstevel@tonic-gate int error; 454*0Sstevel@tonic-gate int hdrsz; 455*0Sstevel@tonic-gate mblk_t *mp; 456*0Sstevel@tonic-gate mblk_t *mpdup; 457*0Sstevel@tonic-gate mblk_t *resp = NULL; 458*0Sstevel@tonic-gate mblk_t *tmp; 459*0Sstevel@tonic-gate calllist_t *call = &p->cku_call; 460*0Sstevel@tonic-gate clock_t timout = 0; 461*0Sstevel@tonic-gate bool_t interrupted; 462*0Sstevel@tonic-gate enum clnt_stat status; 463*0Sstevel@tonic-gate struct rpc_msg reply_msg; 464*0Sstevel@tonic-gate enum clnt_stat re_status; 465*0Sstevel@tonic-gate 466*0Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rccalls); 467*0Sstevel@tonic-gate 468*0Sstevel@tonic-gate RPCLOG(2, "clnt_clts_kcallit_addr: wait.tv_sec: %ld\n", wait.tv_sec); 469*0Sstevel@tonic-gate RPCLOG(2, "clnt_clts_kcallit_addr: wait.tv_usec: %ld\n", wait.tv_usec); 470*0Sstevel@tonic-gate 471*0Sstevel@tonic-gate timout = TIMEVAL_TO_TICK(&wait); 472*0Sstevel@tonic-gate 473*0Sstevel@tonic-gate if (p->cku_xid == 0) { 474*0Sstevel@tonic-gate p->cku_xid = alloc_xid(); 475*0Sstevel@tonic-gate if (p->cku_endpnt != NULL) 476*0Sstevel@tonic-gate endpnt_rele(p->cku_endpnt); 477*0Sstevel@tonic-gate p->cku_endpnt = NULL; 478*0Sstevel@tonic-gate } 479*0Sstevel@tonic-gate 480*0Sstevel@tonic-gate mpdup = NULL; 481*0Sstevel@tonic-gate call_again: 482*0Sstevel@tonic-gate 483*0Sstevel@tonic-gate if (mpdup == NULL) { 484*0Sstevel@tonic-gate 485*0Sstevel@tonic-gate while ((mp = allocb(CKU_INITSIZE, BPRI_LO)) == NULL) { 486*0Sstevel@tonic-gate if (strwaitbuf(CKU_INITSIZE, BPRI_LO)) { 487*0Sstevel@tonic-gate p->cku_err.re_status = RPC_SYSTEMERROR; 488*0Sstevel@tonic-gate p->cku_err.re_errno = ENOSR; 489*0Sstevel@tonic-gate goto done; 490*0Sstevel@tonic-gate } 491*0Sstevel@tonic-gate } 492*0Sstevel@tonic-gate 493*0Sstevel@tonic-gate xdrs = &p->cku_outxdr; 494*0Sstevel@tonic-gate xdrmblk_init(xdrs, mp, XDR_ENCODE, CKU_ALLOCSIZE); 495*0Sstevel@tonic-gate 496*0Sstevel@tonic-gate if (h->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) { 497*0Sstevel@tonic-gate /* 498*0Sstevel@tonic-gate * Copy in the preserialized RPC header 499*0Sstevel@tonic-gate * information. 500*0Sstevel@tonic-gate */ 501*0Sstevel@tonic-gate bcopy(p->cku_rpchdr, mp->b_rptr, CKU_HDRSIZE); 502*0Sstevel@tonic-gate 503*0Sstevel@tonic-gate /* 504*0Sstevel@tonic-gate * transaction id is the 1st thing in the output 505*0Sstevel@tonic-gate * buffer. 506*0Sstevel@tonic-gate */ 507*0Sstevel@tonic-gate /* LINTED pointer alignment */ 508*0Sstevel@tonic-gate (*(uint32_t *)(mp->b_rptr)) = p->cku_xid; 509*0Sstevel@tonic-gate 510*0Sstevel@tonic-gate /* Skip the preserialized stuff. */ 511*0Sstevel@tonic-gate XDR_SETPOS(xdrs, CKU_HDRSIZE); 512*0Sstevel@tonic-gate 513*0Sstevel@tonic-gate /* Serialize dynamic stuff into the output buffer. */ 514*0Sstevel@tonic-gate if ((!XDR_PUTINT32(xdrs, (int32_t *)&procnum)) || 515*0Sstevel@tonic-gate (!AUTH_MARSHALL(h->cl_auth, xdrs, p->cku_cred)) || 516*0Sstevel@tonic-gate (!(*xdr_args)(xdrs, argsp))) { 517*0Sstevel@tonic-gate freemsg(mp); 518*0Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTENCODEARGS; 519*0Sstevel@tonic-gate p->cku_err.re_errno = EIO; 520*0Sstevel@tonic-gate goto done; 521*0Sstevel@tonic-gate } 522*0Sstevel@tonic-gate } else { 523*0Sstevel@tonic-gate uint32_t *uproc = (uint32_t *) 524*0Sstevel@tonic-gate &p->cku_rpchdr[CKU_HDRSIZE]; 525*0Sstevel@tonic-gate IXDR_PUT_U_INT32(uproc, procnum); 526*0Sstevel@tonic-gate 527*0Sstevel@tonic-gate (*(uint32_t *)(&p->cku_rpchdr[0])) = p->cku_xid; 528*0Sstevel@tonic-gate XDR_SETPOS(xdrs, 0); 529*0Sstevel@tonic-gate 530*0Sstevel@tonic-gate /* Serialize the procedure number and the arguments. */ 531*0Sstevel@tonic-gate if (!AUTH_WRAP(h->cl_auth, (caddr_t)p->cku_rpchdr, 532*0Sstevel@tonic-gate CKU_HDRSIZE+4, xdrs, xdr_args, argsp)) { 533*0Sstevel@tonic-gate freemsg(mp); 534*0Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTENCODEARGS; 535*0Sstevel@tonic-gate p->cku_err.re_errno = EIO; 536*0Sstevel@tonic-gate goto done; 537*0Sstevel@tonic-gate } 538*0Sstevel@tonic-gate } 539*0Sstevel@tonic-gate } else 540*0Sstevel@tonic-gate mp = mpdup; 541*0Sstevel@tonic-gate 542*0Sstevel@tonic-gate mpdup = dupmsg(mp); 543*0Sstevel@tonic-gate if (mpdup == NULL) { 544*0Sstevel@tonic-gate freemsg(mp); 545*0Sstevel@tonic-gate p->cku_err.re_status = RPC_SYSTEMERROR; 546*0Sstevel@tonic-gate p->cku_err.re_errno = ENOSR; 547*0Sstevel@tonic-gate goto done; 548*0Sstevel@tonic-gate } 549*0Sstevel@tonic-gate 550*0Sstevel@tonic-gate /* 551*0Sstevel@tonic-gate * Grab an endpnt only if the endpoint is NULL. We could be retrying 552*0Sstevel@tonic-gate * the request and in this case we want to go through the same 553*0Sstevel@tonic-gate * source port, so that the duplicate request cache may detect a 554*0Sstevel@tonic-gate * retry. 555*0Sstevel@tonic-gate */ 556*0Sstevel@tonic-gate if (p->cku_endpnt == NULL) 557*0Sstevel@tonic-gate p->cku_endpnt = endpnt_get(&p->cku_config); 558*0Sstevel@tonic-gate 559*0Sstevel@tonic-gate if (p->cku_endpnt == NULL) { 560*0Sstevel@tonic-gate freemsg(mp); 561*0Sstevel@tonic-gate p->cku_err.re_status = RPC_SYSTEMERROR; 562*0Sstevel@tonic-gate p->cku_err.re_errno = ENOSR; 563*0Sstevel@tonic-gate goto done; 564*0Sstevel@tonic-gate } 565*0Sstevel@tonic-gate 566*0Sstevel@tonic-gate round_trip = lbolt; 567*0Sstevel@tonic-gate 568*0Sstevel@tonic-gate error = clnt_clts_dispatch_send(p->cku_endpnt->e_wq, mp, 569*0Sstevel@tonic-gate &p->cku_addr, call, p->cku_xid); 570*0Sstevel@tonic-gate 571*0Sstevel@tonic-gate if (error != 0) { 572*0Sstevel@tonic-gate freemsg(mp); 573*0Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTSEND; 574*0Sstevel@tonic-gate p->cku_err.re_errno = error; 575*0Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rccantsend); 576*0Sstevel@tonic-gate goto done1; 577*0Sstevel@tonic-gate } 578*0Sstevel@tonic-gate 579*0Sstevel@tonic-gate RPCLOG(64, "clnt_clts_kcallit_addr: sent call for xid 0x%x\n", 580*0Sstevel@tonic-gate p->cku_xid); 581*0Sstevel@tonic-gate 582*0Sstevel@tonic-gate /* 583*0Sstevel@tonic-gate * There are two reasons for which we go back to to tryread. 584*0Sstevel@tonic-gate * 585*0Sstevel@tonic-gate * a) In case the status is RPC_PROCUNAVAIL and we sent out a 586*0Sstevel@tonic-gate * broadcast we should not get any invalid messages with the 587*0Sstevel@tonic-gate * RPC_PROCUNAVAIL error back. Some broken RPC implementations 588*0Sstevel@tonic-gate * send them and for this we have to ignore them ( as we would 589*0Sstevel@tonic-gate * have never received them ) and look for another message 590*0Sstevel@tonic-gate * which might contain the valid response because we don't know 591*0Sstevel@tonic-gate * how many broken implementations are in the network. So we are 592*0Sstevel@tonic-gate * going to loop until 593*0Sstevel@tonic-gate * - we received a valid response 594*0Sstevel@tonic-gate * - we have processed all invalid responses and 595*0Sstevel@tonic-gate * got a time out when we try to receive again a 596*0Sstevel@tonic-gate * message. 597*0Sstevel@tonic-gate * 598*0Sstevel@tonic-gate * b) We will jump back to tryread also in case we failed 599*0Sstevel@tonic-gate * within the AUTH_VALIDATE. In this case we should move 600*0Sstevel@tonic-gate * on and loop until we received a valid response or we 601*0Sstevel@tonic-gate * have processed all responses with broken authentication 602*0Sstevel@tonic-gate * and we got a time out when we try to receive a message. 603*0Sstevel@tonic-gate */ 604*0Sstevel@tonic-gate tryread: 605*0Sstevel@tonic-gate mutex_enter(&call->call_lock); 606*0Sstevel@tonic-gate interrupted = FALSE; 607*0Sstevel@tonic-gate if (call->call_notified == FALSE) { 608*0Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread); 609*0Sstevel@tonic-gate clock_t cv_wait_ret = 1; /* init to > 0 */ 610*0Sstevel@tonic-gate clock_t cv_timout = timout; 611*0Sstevel@tonic-gate 612*0Sstevel@tonic-gate if (lwp != NULL) 613*0Sstevel@tonic-gate lwp->lwp_nostop++; 614*0Sstevel@tonic-gate 615*0Sstevel@tonic-gate cv_timout += lbolt; 616*0Sstevel@tonic-gate 617*0Sstevel@tonic-gate if (h->cl_nosignal) 618*0Sstevel@tonic-gate while ((cv_wait_ret = 619*0Sstevel@tonic-gate cv_timedwait(&call->call_cv, 620*0Sstevel@tonic-gate &call->call_lock, cv_timout)) > 0 && 621*0Sstevel@tonic-gate call->call_notified == FALSE); 622*0Sstevel@tonic-gate else 623*0Sstevel@tonic-gate while ((cv_wait_ret = 624*0Sstevel@tonic-gate cv_timedwait_sig(&call->call_cv, 625*0Sstevel@tonic-gate &call->call_lock, cv_timout)) > 0 && 626*0Sstevel@tonic-gate call->call_notified == FALSE); 627*0Sstevel@tonic-gate 628*0Sstevel@tonic-gate if (cv_wait_ret == 0) 629*0Sstevel@tonic-gate interrupted = TRUE; 630*0Sstevel@tonic-gate 631*0Sstevel@tonic-gate if (lwp != NULL) 632*0Sstevel@tonic-gate lwp->lwp_nostop--; 633*0Sstevel@tonic-gate } 634*0Sstevel@tonic-gate resp = call->call_reply; 635*0Sstevel@tonic-gate call->call_reply = NULL; 636*0Sstevel@tonic-gate status = call->call_status; 637*0Sstevel@tonic-gate /* 638*0Sstevel@tonic-gate * We have to reset the call_notified here. In case we have 639*0Sstevel@tonic-gate * to do a retry ( e.g. in case we got a RPC_PROCUNAVAIL 640*0Sstevel@tonic-gate * error ) we need to set this to false to ensure that 641*0Sstevel@tonic-gate * we will wait for the next message. When the next message 642*0Sstevel@tonic-gate * is going to arrive the function clnt_clts_dispatch_notify 643*0Sstevel@tonic-gate * will set this to true again. 644*0Sstevel@tonic-gate */ 645*0Sstevel@tonic-gate call->call_notified = FALSE; 646*0Sstevel@tonic-gate mutex_exit(&call->call_lock); 647*0Sstevel@tonic-gate 648*0Sstevel@tonic-gate if (status == RPC_TIMEDOUT) { 649*0Sstevel@tonic-gate if (interrupted) { 650*0Sstevel@tonic-gate /* 651*0Sstevel@tonic-gate * We got interrupted, bail out 652*0Sstevel@tonic-gate */ 653*0Sstevel@tonic-gate p->cku_err.re_status = RPC_INTR; 654*0Sstevel@tonic-gate p->cku_err.re_errno = EINTR; 655*0Sstevel@tonic-gate goto done1; 656*0Sstevel@tonic-gate } else { 657*0Sstevel@tonic-gate /* 658*0Sstevel@tonic-gate * It's possible that our response arrived 659*0Sstevel@tonic-gate * right after we timed out. Check to see 660*0Sstevel@tonic-gate * if it has arrived before we remove the 661*0Sstevel@tonic-gate * calllist from the dispatch queue. 662*0Sstevel@tonic-gate */ 663*0Sstevel@tonic-gate mutex_enter(&call->call_lock); 664*0Sstevel@tonic-gate if (call->call_notified == TRUE) { 665*0Sstevel@tonic-gate resp = call->call_reply; 666*0Sstevel@tonic-gate call->call_reply = NULL; 667*0Sstevel@tonic-gate mutex_exit(&call->call_lock); 668*0Sstevel@tonic-gate RPCLOG(8, "clnt_clts_kcallit_addr: " 669*0Sstevel@tonic-gate "response received for request " 670*0Sstevel@tonic-gate "w/xid 0x%x after timeout\n", 671*0Sstevel@tonic-gate p->cku_xid); 672*0Sstevel@tonic-gate goto getresponse; 673*0Sstevel@tonic-gate } 674*0Sstevel@tonic-gate mutex_exit(&call->call_lock); 675*0Sstevel@tonic-gate 676*0Sstevel@tonic-gate RPCLOG(8, "clnt_clts_kcallit_addr: " 677*0Sstevel@tonic-gate "request w/xid 0x%x timedout " 678*0Sstevel@tonic-gate "waiting for reply\n", p->cku_xid); 679*0Sstevel@tonic-gate #if 0 /* XXX not yet */ 680*0Sstevel@tonic-gate /* 681*0Sstevel@tonic-gate * Timeout may be due to a dead gateway. Send 682*0Sstevel@tonic-gate * an ioctl downstream advising deletion of 683*0Sstevel@tonic-gate * route when we reach the half-way point to 684*0Sstevel@tonic-gate * timing out. 685*0Sstevel@tonic-gate */ 686*0Sstevel@tonic-gate if (stries == p->cku_retrys/2) { 687*0Sstevel@tonic-gate t_kadvise(p->cku_endpnt->e_tiptr, 688*0Sstevel@tonic-gate (uchar_t *)p->cku_addr.buf, 689*0Sstevel@tonic-gate p->cku_addr.len); 690*0Sstevel@tonic-gate } 691*0Sstevel@tonic-gate #endif /* not yet */ 692*0Sstevel@tonic-gate p->cku_err.re_status = RPC_TIMEDOUT; 693*0Sstevel@tonic-gate p->cku_err.re_errno = ETIMEDOUT; 694*0Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rctimeouts); 695*0Sstevel@tonic-gate goto done1; 696*0Sstevel@tonic-gate } 697*0Sstevel@tonic-gate } 698*0Sstevel@tonic-gate 699*0Sstevel@tonic-gate getresponse: 700*0Sstevel@tonic-gate /* 701*0Sstevel@tonic-gate * Check to see if a response arrived. If it one is 702*0Sstevel@tonic-gate * present then proceed to process the reponse. Otherwise 703*0Sstevel@tonic-gate * fall through to retry or retransmit the request. This 704*0Sstevel@tonic-gate * is probably not the optimal thing to do, but since we 705*0Sstevel@tonic-gate * are most likely dealing with a unrealiable transport it 706*0Sstevel@tonic-gate * is the safe thing to so. 707*0Sstevel@tonic-gate */ 708*0Sstevel@tonic-gate if (resp == NULL) { 709*0Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTRECV; 710*0Sstevel@tonic-gate p->cku_err.re_errno = EIO; 711*0Sstevel@tonic-gate goto done1; 712*0Sstevel@tonic-gate } 713*0Sstevel@tonic-gate 714*0Sstevel@tonic-gate /* 715*0Sstevel@tonic-gate * Prepare the message for further processing. We need to remove 716*0Sstevel@tonic-gate * the datagram header and copy the source address if necessary. No 717*0Sstevel@tonic-gate * need to verify the header since rpcmod took care of that. 718*0Sstevel@tonic-gate */ 719*0Sstevel@tonic-gate /* 720*0Sstevel@tonic-gate * Copy the source address if the caller has supplied a netbuf. 721*0Sstevel@tonic-gate */ 722*0Sstevel@tonic-gate if (sin != NULL) { 723*0Sstevel@tonic-gate union T_primitives *pptr; 724*0Sstevel@tonic-gate 725*0Sstevel@tonic-gate pptr = (union T_primitives *)resp->b_rptr; 726*0Sstevel@tonic-gate bcopy(resp->b_rptr + pptr->unitdata_ind.SRC_offset, sin->buf, 727*0Sstevel@tonic-gate pptr->unitdata_ind.SRC_length); 728*0Sstevel@tonic-gate sin->len = pptr->unitdata_ind.SRC_length; 729*0Sstevel@tonic-gate } 730*0Sstevel@tonic-gate 731*0Sstevel@tonic-gate /* 732*0Sstevel@tonic-gate * Pop off the datagram header. 733*0Sstevel@tonic-gate */ 734*0Sstevel@tonic-gate hdrsz = resp->b_wptr - resp->b_rptr; 735*0Sstevel@tonic-gate if ((resp->b_wptr - (resp->b_rptr + hdrsz)) == 0) { 736*0Sstevel@tonic-gate tmp = resp; 737*0Sstevel@tonic-gate resp = resp->b_cont; 738*0Sstevel@tonic-gate tmp->b_cont = NULL; 739*0Sstevel@tonic-gate freeb(tmp); 740*0Sstevel@tonic-gate } else { 741*0Sstevel@tonic-gate unsigned char *ud_off = resp->b_rptr; 742*0Sstevel@tonic-gate resp->b_rptr += hdrsz; 743*0Sstevel@tonic-gate tmp = dupb(resp); 744*0Sstevel@tonic-gate if (tmp == NULL) { 745*0Sstevel@tonic-gate p->cku_err.re_status = RPC_SYSTEMERROR; 746*0Sstevel@tonic-gate p->cku_err.re_errno = ENOSR; 747*0Sstevel@tonic-gate freemsg(resp); 748*0Sstevel@tonic-gate goto done1; 749*0Sstevel@tonic-gate } 750*0Sstevel@tonic-gate tmp->b_cont = resp->b_cont; 751*0Sstevel@tonic-gate resp->b_rptr = ud_off; 752*0Sstevel@tonic-gate freeb(resp); 753*0Sstevel@tonic-gate resp = tmp; 754*0Sstevel@tonic-gate } 755*0Sstevel@tonic-gate 756*0Sstevel@tonic-gate round_trip = lbolt - round_trip; 757*0Sstevel@tonic-gate /* 758*0Sstevel@tonic-gate * Van Jacobson timer algorithm here, only if NOT a retransmission. 759*0Sstevel@tonic-gate */ 760*0Sstevel@tonic-gate if (p->cku_timers != NULL && stries == p->cku_retrys) { 761*0Sstevel@tonic-gate int rt; 762*0Sstevel@tonic-gate 763*0Sstevel@tonic-gate rt = round_trip; 764*0Sstevel@tonic-gate rt -= (p->cku_timers->rt_srtt >> 3); 765*0Sstevel@tonic-gate p->cku_timers->rt_srtt += rt; 766*0Sstevel@tonic-gate if (rt < 0) 767*0Sstevel@tonic-gate rt = - rt; 768*0Sstevel@tonic-gate rt -= (p->cku_timers->rt_deviate >> 2); 769*0Sstevel@tonic-gate p->cku_timers->rt_deviate += rt; 770*0Sstevel@tonic-gate p->cku_timers->rt_rtxcur = 771*0Sstevel@tonic-gate (clock_t)((p->cku_timers->rt_srtt >> 2) + 772*0Sstevel@tonic-gate p->cku_timers->rt_deviate) >> 1; 773*0Sstevel@tonic-gate 774*0Sstevel@tonic-gate rt = round_trip; 775*0Sstevel@tonic-gate rt -= (p->cku_timeall->rt_srtt >> 3); 776*0Sstevel@tonic-gate p->cku_timeall->rt_srtt += rt; 777*0Sstevel@tonic-gate if (rt < 0) 778*0Sstevel@tonic-gate rt = - rt; 779*0Sstevel@tonic-gate rt -= (p->cku_timeall->rt_deviate >> 2); 780*0Sstevel@tonic-gate p->cku_timeall->rt_deviate += rt; 781*0Sstevel@tonic-gate p->cku_timeall->rt_rtxcur = 782*0Sstevel@tonic-gate (clock_t)((p->cku_timeall->rt_srtt >> 2) + 783*0Sstevel@tonic-gate p->cku_timeall->rt_deviate) >> 1; 784*0Sstevel@tonic-gate if (p->cku_feedback != NULL) { 785*0Sstevel@tonic-gate (*p->cku_feedback)(FEEDBACK_OK, procnum, 786*0Sstevel@tonic-gate p->cku_feedarg); 787*0Sstevel@tonic-gate } 788*0Sstevel@tonic-gate } 789*0Sstevel@tonic-gate 790*0Sstevel@tonic-gate /* 791*0Sstevel@tonic-gate * Process reply 792*0Sstevel@tonic-gate */ 793*0Sstevel@tonic-gate xdrs = &(p->cku_inxdr); 794*0Sstevel@tonic-gate xdrmblk_init(xdrs, resp, XDR_DECODE, 0); 795*0Sstevel@tonic-gate 796*0Sstevel@tonic-gate reply_msg.rm_direction = REPLY; 797*0Sstevel@tonic-gate reply_msg.rm_reply.rp_stat = MSG_ACCEPTED; 798*0Sstevel@tonic-gate reply_msg.acpted_rply.ar_stat = SUCCESS; 799*0Sstevel@tonic-gate reply_msg.acpted_rply.ar_verf = _null_auth; 800*0Sstevel@tonic-gate /* 801*0Sstevel@tonic-gate * xdr_results will be done in AUTH_UNWRAP. 802*0Sstevel@tonic-gate */ 803*0Sstevel@tonic-gate reply_msg.acpted_rply.ar_results.where = NULL; 804*0Sstevel@tonic-gate reply_msg.acpted_rply.ar_results.proc = xdr_void; 805*0Sstevel@tonic-gate 806*0Sstevel@tonic-gate /* 807*0Sstevel@tonic-gate * Decode and validate the response. 808*0Sstevel@tonic-gate */ 809*0Sstevel@tonic-gate if (!xdr_replymsg(xdrs, &reply_msg)) { 810*0Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTDECODERES; 811*0Sstevel@tonic-gate p->cku_err.re_errno = EIO; 812*0Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 813*0Sstevel@tonic-gate goto done1; 814*0Sstevel@tonic-gate } 815*0Sstevel@tonic-gate 816*0Sstevel@tonic-gate _seterr_reply(&reply_msg, &(p->cku_err)); 817*0Sstevel@tonic-gate 818*0Sstevel@tonic-gate re_status = p->cku_err.re_status; 819*0Sstevel@tonic-gate if (re_status == RPC_SUCCESS) { 820*0Sstevel@tonic-gate /* 821*0Sstevel@tonic-gate * Reply is good, check auth. 822*0Sstevel@tonic-gate */ 823*0Sstevel@tonic-gate if (!AUTH_VALIDATE(h->cl_auth, 824*0Sstevel@tonic-gate &reply_msg.acpted_rply.ar_verf)) { 825*0Sstevel@tonic-gate p->cku_err.re_status = RPC_AUTHERROR; 826*0Sstevel@tonic-gate p->cku_err.re_why = AUTH_INVALIDRESP; 827*0Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rcbadverfs); 828*0Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 829*0Sstevel@tonic-gate goto tryread; 830*0Sstevel@tonic-gate } 831*0Sstevel@tonic-gate if (!AUTH_UNWRAP(h->cl_auth, xdrs, xdr_results, resultsp)) { 832*0Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTDECODERES; 833*0Sstevel@tonic-gate p->cku_err.re_errno = EIO; 834*0Sstevel@tonic-gate } 835*0Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 836*0Sstevel@tonic-gate goto done1; 837*0Sstevel@tonic-gate } 838*0Sstevel@tonic-gate /* set errno in case we can't recover */ 839*0Sstevel@tonic-gate if (re_status != RPC_VERSMISMATCH && 840*0Sstevel@tonic-gate re_status != RPC_AUTHERROR && 841*0Sstevel@tonic-gate re_status != RPC_PROGVERSMISMATCH) 842*0Sstevel@tonic-gate p->cku_err.re_errno = EIO; 843*0Sstevel@tonic-gate /* 844*0Sstevel@tonic-gate * Determine whether or not we're doing an RPC 845*0Sstevel@tonic-gate * broadcast. Some server implementations don't 846*0Sstevel@tonic-gate * follow RFC 1050, section 7.4.2 in that they 847*0Sstevel@tonic-gate * don't remain silent when they see a proc 848*0Sstevel@tonic-gate * they don't support. Therefore we keep trying 849*0Sstevel@tonic-gate * to receive on RPC_PROCUNAVAIL, hoping to get 850*0Sstevel@tonic-gate * a valid response from a compliant server. 851*0Sstevel@tonic-gate */ 852*0Sstevel@tonic-gate if (re_status == RPC_PROCUNAVAIL && p->cku_bcast) { 853*0Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 854*0Sstevel@tonic-gate goto tryread; 855*0Sstevel@tonic-gate } 856*0Sstevel@tonic-gate if (re_status == RPC_AUTHERROR) { 857*0Sstevel@tonic-gate /* 858*0Sstevel@tonic-gate * Maybe our credential need to be refreshed 859*0Sstevel@tonic-gate */ 860*0Sstevel@tonic-gate if (refreshes > 0 && 861*0Sstevel@tonic-gate AUTH_REFRESH(h->cl_auth, &reply_msg, p->cku_cred)) { 862*0Sstevel@tonic-gate /* 863*0Sstevel@tonic-gate * The credential is refreshed. Try the request again. 864*0Sstevel@tonic-gate * Even if stries == 0, we still retry as long as 865*0Sstevel@tonic-gate * refreshes > 0. This prevents a soft authentication 866*0Sstevel@tonic-gate * error turning into a hard one at an upper level. 867*0Sstevel@tonic-gate */ 868*0Sstevel@tonic-gate refreshes--; 869*0Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rcbadcalls); 870*0Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rcnewcreds); 871*0Sstevel@tonic-gate 872*0Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 873*0Sstevel@tonic-gate freemsg(mpdup); 874*0Sstevel@tonic-gate call_table_remove(call); 875*0Sstevel@tonic-gate mutex_enter(&call->call_lock); 876*0Sstevel@tonic-gate if (call->call_reply != NULL) { 877*0Sstevel@tonic-gate freemsg(call->call_reply); 878*0Sstevel@tonic-gate call->call_reply = NULL; 879*0Sstevel@tonic-gate } 880*0Sstevel@tonic-gate mutex_exit(&call->call_lock); 881*0Sstevel@tonic-gate 882*0Sstevel@tonic-gate freemsg(resp); 883*0Sstevel@tonic-gate mpdup = NULL; 884*0Sstevel@tonic-gate goto call_again; 885*0Sstevel@tonic-gate } 886*0Sstevel@tonic-gate /* 887*0Sstevel@tonic-gate * We have used the client handle to do an AUTH_REFRESH 888*0Sstevel@tonic-gate * and the RPC status may be set to RPC_SUCCESS; 889*0Sstevel@tonic-gate * Let's make sure to set it to RPC_AUTHERROR. 890*0Sstevel@tonic-gate */ 891*0Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTDECODERES; 892*0Sstevel@tonic-gate 893*0Sstevel@tonic-gate /* 894*0Sstevel@tonic-gate * Map recoverable and unrecoverable 895*0Sstevel@tonic-gate * authentication errors to appropriate errno 896*0Sstevel@tonic-gate */ 897*0Sstevel@tonic-gate switch (p->cku_err.re_why) { 898*0Sstevel@tonic-gate case AUTH_BADCRED: 899*0Sstevel@tonic-gate case AUTH_BADVERF: 900*0Sstevel@tonic-gate case AUTH_INVALIDRESP: 901*0Sstevel@tonic-gate case AUTH_TOOWEAK: 902*0Sstevel@tonic-gate case AUTH_FAILED: 903*0Sstevel@tonic-gate case RPCSEC_GSS_NOCRED: 904*0Sstevel@tonic-gate case RPCSEC_GSS_FAILED: 905*0Sstevel@tonic-gate p->cku_err.re_errno = EACCES; 906*0Sstevel@tonic-gate break; 907*0Sstevel@tonic-gate case AUTH_REJECTEDCRED: 908*0Sstevel@tonic-gate case AUTH_REJECTEDVERF: 909*0Sstevel@tonic-gate default: 910*0Sstevel@tonic-gate p->cku_err.re_errno = EIO; 911*0Sstevel@tonic-gate break; 912*0Sstevel@tonic-gate } 913*0Sstevel@tonic-gate RPCLOG(1, "clnt_clts_kcallit : authentication failed " 914*0Sstevel@tonic-gate "with RPC_AUTHERROR of type %d\n", 915*0Sstevel@tonic-gate p->cku_err.re_why); 916*0Sstevel@tonic-gate } 917*0Sstevel@tonic-gate 918*0Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 919*0Sstevel@tonic-gate 920*0Sstevel@tonic-gate done1: 921*0Sstevel@tonic-gate call_table_remove(call); 922*0Sstevel@tonic-gate mutex_enter(&call->call_lock); 923*0Sstevel@tonic-gate if (call->call_reply != NULL) { 924*0Sstevel@tonic-gate freemsg(call->call_reply); 925*0Sstevel@tonic-gate call->call_reply = NULL; 926*0Sstevel@tonic-gate } 927*0Sstevel@tonic-gate mutex_exit(&call->call_lock); 928*0Sstevel@tonic-gate RPCLOG(64, "clnt_clts_kcallit_addr: xid 0x%x taken off dispatch list", 929*0Sstevel@tonic-gate p->cku_xid); 930*0Sstevel@tonic-gate 931*0Sstevel@tonic-gate done: 932*0Sstevel@tonic-gate if (resp != NULL) { 933*0Sstevel@tonic-gate freemsg(resp); 934*0Sstevel@tonic-gate resp = NULL; 935*0Sstevel@tonic-gate } 936*0Sstevel@tonic-gate 937*0Sstevel@tonic-gate if ((p->cku_err.re_status != RPC_SUCCESS) && 938*0Sstevel@tonic-gate (p->cku_err.re_status != RPC_INTR) && 939*0Sstevel@tonic-gate (p->cku_err.re_status != RPC_UDERROR) && 940*0Sstevel@tonic-gate !IS_UNRECOVERABLE_RPC(p->cku_err.re_status)) { 941*0Sstevel@tonic-gate if (p->cku_feedback != NULL && stries == p->cku_retrys) { 942*0Sstevel@tonic-gate (*p->cku_feedback)(FEEDBACK_REXMIT1, procnum, 943*0Sstevel@tonic-gate p->cku_feedarg); 944*0Sstevel@tonic-gate } 945*0Sstevel@tonic-gate 946*0Sstevel@tonic-gate timout = backoff(timout); 947*0Sstevel@tonic-gate if (p->cku_timeall != (struct rpc_timers *)0) 948*0Sstevel@tonic-gate p->cku_timeall->rt_rtxcur = timout; 949*0Sstevel@tonic-gate 950*0Sstevel@tonic-gate if (p->cku_err.re_status == RPC_SYSTEMERROR || 951*0Sstevel@tonic-gate p->cku_err.re_status == RPC_CANTSEND) { 952*0Sstevel@tonic-gate /* 953*0Sstevel@tonic-gate * Errors due to lack of resources, wait a bit 954*0Sstevel@tonic-gate * and try again. 955*0Sstevel@tonic-gate */ 956*0Sstevel@tonic-gate (void) delay(hz/10); 957*0Sstevel@tonic-gate /* (void) sleep((caddr_t)&lbolt, PZERO-4); */ 958*0Sstevel@tonic-gate } 959*0Sstevel@tonic-gate if (stries-- > 0) { 960*0Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rcretrans); 961*0Sstevel@tonic-gate goto call_again; 962*0Sstevel@tonic-gate } 963*0Sstevel@tonic-gate } 964*0Sstevel@tonic-gate 965*0Sstevel@tonic-gate if (mpdup != NULL) 966*0Sstevel@tonic-gate freemsg(mpdup); 967*0Sstevel@tonic-gate 968*0Sstevel@tonic-gate if (p->cku_err.re_status != RPC_SUCCESS) { 969*0Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rcbadcalls); 970*0Sstevel@tonic-gate } 971*0Sstevel@tonic-gate 972*0Sstevel@tonic-gate /* 973*0Sstevel@tonic-gate * Allow the endpoint to be held by the client handle in case this 974*0Sstevel@tonic-gate * RPC was not successful. A retry may occur at a higher level and 975*0Sstevel@tonic-gate * in this case we may want to send the request over the same 976*0Sstevel@tonic-gate * source port. 977*0Sstevel@tonic-gate */ 978*0Sstevel@tonic-gate if (p->cku_err.re_status == RPC_SUCCESS && p->cku_endpnt != NULL) { 979*0Sstevel@tonic-gate endpnt_rele(p->cku_endpnt); 980*0Sstevel@tonic-gate p->cku_endpnt = NULL; 981*0Sstevel@tonic-gate } 982*0Sstevel@tonic-gate 983*0Sstevel@tonic-gate return (p->cku_err.re_status); 984*0Sstevel@tonic-gate } 985*0Sstevel@tonic-gate 986*0Sstevel@tonic-gate static enum clnt_stat 987*0Sstevel@tonic-gate clnt_clts_kcallit(CLIENT *h, rpcproc_t procnum, xdrproc_t xdr_args, 988*0Sstevel@tonic-gate caddr_t argsp, xdrproc_t xdr_results, caddr_t resultsp, 989*0Sstevel@tonic-gate struct timeval wait) 990*0Sstevel@tonic-gate { 991*0Sstevel@tonic-gate return (clnt_clts_kcallit_addr(h, procnum, xdr_args, argsp, 992*0Sstevel@tonic-gate xdr_results, resultsp, wait, NULL)); 993*0Sstevel@tonic-gate } 994*0Sstevel@tonic-gate 995*0Sstevel@tonic-gate /* 996*0Sstevel@tonic-gate * Return error info on this handle. 997*0Sstevel@tonic-gate */ 998*0Sstevel@tonic-gate static void 999*0Sstevel@tonic-gate clnt_clts_kerror(CLIENT *h, struct rpc_err *err) 1000*0Sstevel@tonic-gate { 1001*0Sstevel@tonic-gate /* LINTED pointer alignment */ 1002*0Sstevel@tonic-gate struct cku_private *p = htop(h); 1003*0Sstevel@tonic-gate 1004*0Sstevel@tonic-gate *err = p->cku_err; 1005*0Sstevel@tonic-gate } 1006*0Sstevel@tonic-gate 1007*0Sstevel@tonic-gate static bool_t 1008*0Sstevel@tonic-gate clnt_clts_kfreeres(CLIENT *h, xdrproc_t xdr_res, caddr_t res_ptr) 1009*0Sstevel@tonic-gate { 1010*0Sstevel@tonic-gate /* LINTED pointer alignment */ 1011*0Sstevel@tonic-gate struct cku_private *p = htop(h); 1012*0Sstevel@tonic-gate XDR *xdrs; 1013*0Sstevel@tonic-gate 1014*0Sstevel@tonic-gate xdrs = &(p->cku_outxdr); 1015*0Sstevel@tonic-gate xdrs->x_op = XDR_FREE; 1016*0Sstevel@tonic-gate return ((*xdr_res)(xdrs, res_ptr)); 1017*0Sstevel@tonic-gate } 1018*0Sstevel@tonic-gate 1019*0Sstevel@tonic-gate /*ARGSUSED*/ 1020*0Sstevel@tonic-gate static void 1021*0Sstevel@tonic-gate clnt_clts_kabort(CLIENT *h) 1022*0Sstevel@tonic-gate { 1023*0Sstevel@tonic-gate } 1024*0Sstevel@tonic-gate 1025*0Sstevel@tonic-gate static bool_t 1026*0Sstevel@tonic-gate clnt_clts_kcontrol(CLIENT *h, int cmd, char *arg) 1027*0Sstevel@tonic-gate { 1028*0Sstevel@tonic-gate /* LINTED pointer alignment */ 1029*0Sstevel@tonic-gate struct cku_private *p = htop(h); 1030*0Sstevel@tonic-gate 1031*0Sstevel@tonic-gate switch (cmd) { 1032*0Sstevel@tonic-gate case CLSET_XID: 1033*0Sstevel@tonic-gate p->cku_xid = *((uint32_t *)arg); 1034*0Sstevel@tonic-gate return (TRUE); 1035*0Sstevel@tonic-gate 1036*0Sstevel@tonic-gate case CLGET_XID: 1037*0Sstevel@tonic-gate *((uint32_t *)arg) = p->cku_xid; 1038*0Sstevel@tonic-gate return (TRUE); 1039*0Sstevel@tonic-gate 1040*0Sstevel@tonic-gate case CLSET_BCAST: 1041*0Sstevel@tonic-gate p->cku_bcast = *((uint32_t *)arg); 1042*0Sstevel@tonic-gate return (TRUE); 1043*0Sstevel@tonic-gate 1044*0Sstevel@tonic-gate case CLGET_BCAST: 1045*0Sstevel@tonic-gate *((uint32_t *)arg) = p->cku_bcast; 1046*0Sstevel@tonic-gate return (TRUE); 1047*0Sstevel@tonic-gate 1048*0Sstevel@tonic-gate default: 1049*0Sstevel@tonic-gate return (FALSE); 1050*0Sstevel@tonic-gate } 1051*0Sstevel@tonic-gate } 1052*0Sstevel@tonic-gate 1053*0Sstevel@tonic-gate /* 1054*0Sstevel@tonic-gate * Destroy rpc handle. 1055*0Sstevel@tonic-gate * Frees the space used for output buffer, private data, and handle 1056*0Sstevel@tonic-gate * structure, and the file pointer/TLI data on last reference. 1057*0Sstevel@tonic-gate */ 1058*0Sstevel@tonic-gate static void 1059*0Sstevel@tonic-gate clnt_clts_kdestroy(CLIENT *h) 1060*0Sstevel@tonic-gate { 1061*0Sstevel@tonic-gate /* LINTED pointer alignment */ 1062*0Sstevel@tonic-gate struct cku_private *p = htop(h); 1063*0Sstevel@tonic-gate calllist_t *call = &p->cku_call; 1064*0Sstevel@tonic-gate 1065*0Sstevel@tonic-gate int plen; 1066*0Sstevel@tonic-gate 1067*0Sstevel@tonic-gate RPCLOG(8, "clnt_clts_kdestroy h: %p\n", (void *)h); 1068*0Sstevel@tonic-gate RPCLOG(8, "clnt_clts_kdestroy h: xid=0x%x\n", p->cku_xid); 1069*0Sstevel@tonic-gate 1070*0Sstevel@tonic-gate if (p->cku_endpnt != NULL) 1071*0Sstevel@tonic-gate endpnt_rele(p->cku_endpnt); 1072*0Sstevel@tonic-gate 1073*0Sstevel@tonic-gate cv_destroy(&call->call_cv); 1074*0Sstevel@tonic-gate mutex_destroy(&call->call_lock); 1075*0Sstevel@tonic-gate 1076*0Sstevel@tonic-gate plen = strlen(p->cku_config.knc_protofmly) + 1; 1077*0Sstevel@tonic-gate kmem_free(p->cku_config.knc_protofmly, plen); 1078*0Sstevel@tonic-gate kmem_free(p->cku_addr.buf, p->cku_addr.maxlen); 1079*0Sstevel@tonic-gate kmem_free(p, sizeof (*p)); 1080*0Sstevel@tonic-gate } 1081*0Sstevel@tonic-gate 1082*0Sstevel@tonic-gate /* 1083*0Sstevel@tonic-gate * The connectionless (CLTS) kRPC endpoint management subsystem. 1084*0Sstevel@tonic-gate * 1085*0Sstevel@tonic-gate * Because endpoints are potentially shared among threads making RPC calls, 1086*0Sstevel@tonic-gate * they are managed in a pool according to type (endpnt_type_t). Each 1087*0Sstevel@tonic-gate * endpnt_type_t points to a list of usable endpoints through the e_pool 1088*0Sstevel@tonic-gate * field, which is of type list_t. list_t is a doubly-linked list. 1089*0Sstevel@tonic-gate * The number of endpoints in the pool is stored in the e_cnt field of 1090*0Sstevel@tonic-gate * endpnt_type_t and the endpoints are reference counted using the e_ref field 1091*0Sstevel@tonic-gate * in the endpnt_t structure. 1092*0Sstevel@tonic-gate * 1093*0Sstevel@tonic-gate * As an optimization, endpoints that have no references are also linked 1094*0Sstevel@tonic-gate * to an idle list via e_ilist which is also of type list_t. When a thread 1095*0Sstevel@tonic-gate * calls endpnt_get() to obtain a transport endpoint, the idle list is first 1096*0Sstevel@tonic-gate * consulted and if such an endpoint exists, it is removed from the idle list 1097*0Sstevel@tonic-gate * and returned to the caller. 1098*0Sstevel@tonic-gate * 1099*0Sstevel@tonic-gate * If the idle list is empty, then a check is made to see if more endpoints 1100*0Sstevel@tonic-gate * can be created. If so, we proceed and create a new endpoint which is added 1101*0Sstevel@tonic-gate * to the pool and returned to the caller. If we have reached the limit and 1102*0Sstevel@tonic-gate * cannot make a new endpoint then one is returned to the caller via round- 1103*0Sstevel@tonic-gate * robin policy. 1104*0Sstevel@tonic-gate * 1105*0Sstevel@tonic-gate * When an endpoint is placed on the idle list by a thread calling 1106*0Sstevel@tonic-gate * endpnt_rele(), it is timestamped and then a reaper taskq is scheduled to 1107*0Sstevel@tonic-gate * be dispatched if one hasn't already been. When the timer fires, the 1108*0Sstevel@tonic-gate * taskq traverses the idle list and checks to see which endpoints are 1109*0Sstevel@tonic-gate * eligible to be closed. It determines this by checking if the timestamp 1110*0Sstevel@tonic-gate * when the endpoint was released has exceeded the the threshold for how long 1111*0Sstevel@tonic-gate * it should stay alive. 1112*0Sstevel@tonic-gate * 1113*0Sstevel@tonic-gate * endpnt_t structures remain persistent until the memory reclaim callback, 1114*0Sstevel@tonic-gate * endpnt_reclaim(), is invoked. 1115*0Sstevel@tonic-gate * 1116*0Sstevel@tonic-gate * Here is an example of how the data structures would be laid out by the 1117*0Sstevel@tonic-gate * subsystem: 1118*0Sstevel@tonic-gate * 1119*0Sstevel@tonic-gate * endpnt_type_t 1120*0Sstevel@tonic-gate * 1121*0Sstevel@tonic-gate * loopback inet 1122*0Sstevel@tonic-gate * _______________ ______________ 1123*0Sstevel@tonic-gate * | e_next |----------------------->| e_next |---->> 1124*0Sstevel@tonic-gate * | e_pool |<---+ | e_pool |<----+ 1125*0Sstevel@tonic-gate * | e_ilist |<---+--+ | e_ilist |<----+--+ 1126*0Sstevel@tonic-gate * +->| e_pcurr |----+--+--+ +->| e_pcurr |-----+--+--+ 1127*0Sstevel@tonic-gate * | | ... | | | | | | ... | | | | 1128*0Sstevel@tonic-gate * | | e_itimer (90) | | | | | | e_itimer (0) | | | | 1129*0Sstevel@tonic-gate * | | e_cnt (1) | | | | | | e_cnt (3) | | | | 1130*0Sstevel@tonic-gate * | +---------------+ | | | | +--------------+ | | | 1131*0Sstevel@tonic-gate * | | | | | | | | 1132*0Sstevel@tonic-gate * | endpnt_t | | | | | | | 1133*0Sstevel@tonic-gate * | ____________ | | | | ____________ | | | 1134*0Sstevel@tonic-gate * | | e_node |<------+ | | | | e_node |<------+ | | 1135*0Sstevel@tonic-gate * | | e_idle |<---------+ | | | e_idle | | | | 1136*0Sstevel@tonic-gate * +--| e_type |<------------+ +--| e_type | | | | 1137*0Sstevel@tonic-gate * | e_tiptr | | | e_tiptr | | | | 1138*0Sstevel@tonic-gate * | ... | | | ... | | | | 1139*0Sstevel@tonic-gate * | e_lock | | | e_lock | | | | 1140*0Sstevel@tonic-gate * | ... | | | ... | | | | 1141*0Sstevel@tonic-gate * | e_ref (0) | | | e_ref (2) | | | | 1142*0Sstevel@tonic-gate * | e_itime | | | e_itime | | | | 1143*0Sstevel@tonic-gate * +------------+ | +------------+ | | | 1144*0Sstevel@tonic-gate * | | | | 1145*0Sstevel@tonic-gate * | | | | 1146*0Sstevel@tonic-gate * | ____________ | | | 1147*0Sstevel@tonic-gate * | | e_node |<------+ | | 1148*0Sstevel@tonic-gate * | | e_idle |<------+--+ | 1149*0Sstevel@tonic-gate * +--| e_type | | | 1150*0Sstevel@tonic-gate * | | e_tiptr | | | 1151*0Sstevel@tonic-gate * | | ... | | | 1152*0Sstevel@tonic-gate * | | e_lock | | | 1153*0Sstevel@tonic-gate * | | ... | | | 1154*0Sstevel@tonic-gate * | | e_ref (0) | | | 1155*0Sstevel@tonic-gate * | | e_itime | | | 1156*0Sstevel@tonic-gate * | +------------+ | | 1157*0Sstevel@tonic-gate * | | | 1158*0Sstevel@tonic-gate * | | | 1159*0Sstevel@tonic-gate * | ____________ | | 1160*0Sstevel@tonic-gate * | | e_node |<------+ | 1161*0Sstevel@tonic-gate * | | e_idle | | 1162*0Sstevel@tonic-gate * +--| e_type |<------------+ 1163*0Sstevel@tonic-gate * | e_tiptr | 1164*0Sstevel@tonic-gate * | ... | 1165*0Sstevel@tonic-gate * | e_lock | 1166*0Sstevel@tonic-gate * | ... | 1167*0Sstevel@tonic-gate * | e_ref (1) | 1168*0Sstevel@tonic-gate * | e_itime | 1169*0Sstevel@tonic-gate * +------------+ 1170*0Sstevel@tonic-gate * 1171*0Sstevel@tonic-gate * Endpoint locking strategy: 1172*0Sstevel@tonic-gate * 1173*0Sstevel@tonic-gate * The following functions manipulate lists which hold the endpoint and the 1174*0Sstevel@tonic-gate * endpoints themselves: 1175*0Sstevel@tonic-gate * 1176*0Sstevel@tonic-gate * endpnt_get()/check_endpnt()/endpnt_rele()/endpnt_reap()/do_endpnt_reclaim() 1177*0Sstevel@tonic-gate * 1178*0Sstevel@tonic-gate * Lock description follows: 1179*0Sstevel@tonic-gate * 1180*0Sstevel@tonic-gate * endpnt_type_lock: Global reader/writer lock which protects accesses to the 1181*0Sstevel@tonic-gate * endpnt_type_list. 1182*0Sstevel@tonic-gate * 1183*0Sstevel@tonic-gate * e_plock: Lock defined in the endpnt_type_t. It is intended to 1184*0Sstevel@tonic-gate * protect accesses to the pool of endopints (e_pool) for a given 1185*0Sstevel@tonic-gate * endpnt_type_t. 1186*0Sstevel@tonic-gate * 1187*0Sstevel@tonic-gate * e_ilock: Lock defined in endpnt_type_t. It is intended to protect accesses 1188*0Sstevel@tonic-gate * to the idle list (e_ilist) of available endpoints for a given 1189*0Sstevel@tonic-gate * endpnt_type_t. It also protects access to the e_itimer, e_async_cv, 1190*0Sstevel@tonic-gate * and e_async_count fields in endpnt_type_t. 1191*0Sstevel@tonic-gate * 1192*0Sstevel@tonic-gate * e_lock: Lock defined in the endpnt structure. It is intended to protect 1193*0Sstevel@tonic-gate * flags, cv, and ref count. 1194*0Sstevel@tonic-gate * 1195*0Sstevel@tonic-gate * The order goes as follows so as not to induce deadlock. 1196*0Sstevel@tonic-gate * 1197*0Sstevel@tonic-gate * endpnt_type_lock -> e_plock -> e_ilock -> e_lock 1198*0Sstevel@tonic-gate * 1199*0Sstevel@tonic-gate * Interaction with Zones and shutting down: 1200*0Sstevel@tonic-gate * 1201*0Sstevel@tonic-gate * endpnt_type_ts are uniquely identified by the (e_zoneid, e_rdev, e_protofmly) 1202*0Sstevel@tonic-gate * tuple, which means that a zone may not reuse another zone's idle endpoints 1203*0Sstevel@tonic-gate * without first doing a t_kclose(). 1204*0Sstevel@tonic-gate * 1205*0Sstevel@tonic-gate * A zone's endpnt_type_ts are destroyed when a zone is shut down; e_async_cv 1206*0Sstevel@tonic-gate * and e_async_count are used to keep track of the threads in endpnt_taskq 1207*0Sstevel@tonic-gate * trying to reap endpnt_ts in the endpnt_type_t. 1208*0Sstevel@tonic-gate */ 1209*0Sstevel@tonic-gate 1210*0Sstevel@tonic-gate /* 1211*0Sstevel@tonic-gate * Allocate and initialize an endpnt_type_t 1212*0Sstevel@tonic-gate */ 1213*0Sstevel@tonic-gate static struct endpnt_type * 1214*0Sstevel@tonic-gate endpnt_type_create(struct knetconfig *config) 1215*0Sstevel@tonic-gate { 1216*0Sstevel@tonic-gate struct endpnt_type *etype; 1217*0Sstevel@tonic-gate 1218*0Sstevel@tonic-gate /* 1219*0Sstevel@tonic-gate * Allocate a new endpoint type to hang a list of 1220*0Sstevel@tonic-gate * endpoints off of it. 1221*0Sstevel@tonic-gate */ 1222*0Sstevel@tonic-gate etype = kmem_alloc(sizeof (struct endpnt_type), KM_SLEEP); 1223*0Sstevel@tonic-gate etype->e_next = NULL; 1224*0Sstevel@tonic-gate etype->e_pcurr = NULL; 1225*0Sstevel@tonic-gate etype->e_itimer = 0; 1226*0Sstevel@tonic-gate etype->e_cnt = 0; 1227*0Sstevel@tonic-gate 1228*0Sstevel@tonic-gate (void) strncpy(etype->e_protofmly, config->knc_protofmly, KNC_STRSIZE); 1229*0Sstevel@tonic-gate mutex_init(&etype->e_plock, NULL, MUTEX_DEFAULT, NULL); 1230*0Sstevel@tonic-gate mutex_init(&etype->e_ilock, NULL, MUTEX_DEFAULT, NULL); 1231*0Sstevel@tonic-gate etype->e_rdev = config->knc_rdev; 1232*0Sstevel@tonic-gate etype->e_zoneid = getzoneid(); 1233*0Sstevel@tonic-gate etype->e_async_count = 0; 1234*0Sstevel@tonic-gate cv_init(&etype->e_async_cv, NULL, CV_DEFAULT, NULL); 1235*0Sstevel@tonic-gate 1236*0Sstevel@tonic-gate list_create(&etype->e_pool, sizeof (endpnt_t), 1237*0Sstevel@tonic-gate offsetof(endpnt_t, e_node)); 1238*0Sstevel@tonic-gate list_create(&etype->e_ilist, sizeof (endpnt_t), 1239*0Sstevel@tonic-gate offsetof(endpnt_t, e_idle)); 1240*0Sstevel@tonic-gate 1241*0Sstevel@tonic-gate /* 1242*0Sstevel@tonic-gate * Check to see if we need to create a taskq for endpoint 1243*0Sstevel@tonic-gate * reaping 1244*0Sstevel@tonic-gate */ 1245*0Sstevel@tonic-gate mutex_enter(&endpnt_taskq_lock); 1246*0Sstevel@tonic-gate if (taskq_created == FALSE) { 1247*0Sstevel@tonic-gate taskq_created = TRUE; 1248*0Sstevel@tonic-gate mutex_exit(&endpnt_taskq_lock); 1249*0Sstevel@tonic-gate ASSERT(endpnt_taskq == NULL); 1250*0Sstevel@tonic-gate endpnt_taskq = taskq_create("clts_endpnt_taskq", 1, 1251*0Sstevel@tonic-gate minclsyspri, 200, INT_MAX, 0); 1252*0Sstevel@tonic-gate } else 1253*0Sstevel@tonic-gate mutex_exit(&endpnt_taskq_lock); 1254*0Sstevel@tonic-gate 1255*0Sstevel@tonic-gate return (etype); 1256*0Sstevel@tonic-gate } 1257*0Sstevel@tonic-gate 1258*0Sstevel@tonic-gate /* 1259*0Sstevel@tonic-gate * Free an endpnt_type_t 1260*0Sstevel@tonic-gate */ 1261*0Sstevel@tonic-gate static void 1262*0Sstevel@tonic-gate endpnt_type_free(struct endpnt_type *etype) 1263*0Sstevel@tonic-gate { 1264*0Sstevel@tonic-gate mutex_destroy(&etype->e_plock); 1265*0Sstevel@tonic-gate mutex_destroy(&etype->e_ilock); 1266*0Sstevel@tonic-gate list_destroy(&etype->e_pool); 1267*0Sstevel@tonic-gate list_destroy(&etype->e_ilist); 1268*0Sstevel@tonic-gate kmem_free(etype, sizeof (endpnt_type_t)); 1269*0Sstevel@tonic-gate } 1270*0Sstevel@tonic-gate 1271*0Sstevel@tonic-gate /* 1272*0Sstevel@tonic-gate * Check the endpoint to ensure that it is suitable for use. 1273*0Sstevel@tonic-gate * 1274*0Sstevel@tonic-gate * Possible return values: 1275*0Sstevel@tonic-gate * 1276*0Sstevel@tonic-gate * return (1) - Endpoint is established, but needs to be re-opened. 1277*0Sstevel@tonic-gate * return (0) && *newp == NULL - Endpoint is established, but unusable. 1278*0Sstevel@tonic-gate * return (0) && *newp != NULL - Endpoint is established and usable. 1279*0Sstevel@tonic-gate */ 1280*0Sstevel@tonic-gate static int 1281*0Sstevel@tonic-gate check_endpnt(struct endpnt *endp, struct endpnt **newp) 1282*0Sstevel@tonic-gate { 1283*0Sstevel@tonic-gate *newp = endp; 1284*0Sstevel@tonic-gate 1285*0Sstevel@tonic-gate mutex_enter(&endp->e_lock); 1286*0Sstevel@tonic-gate ASSERT(endp->e_ref >= 1); 1287*0Sstevel@tonic-gate 1288*0Sstevel@tonic-gate /* 1289*0Sstevel@tonic-gate * The first condition we check for is if the endpoint has been 1290*0Sstevel@tonic-gate * allocated, but is unusable either because it has been closed or 1291*0Sstevel@tonic-gate * has been marked stale. Only *one* thread will be allowed to 1292*0Sstevel@tonic-gate * execute the then clause. This is enforced becuase the first thread 1293*0Sstevel@tonic-gate * to check this condition will clear the flags, so that subsequent 1294*0Sstevel@tonic-gate * thread(s) checking this endpoint will move on. 1295*0Sstevel@tonic-gate */ 1296*0Sstevel@tonic-gate if ((endp->e_flags & ENDPNT_ESTABLISHED) && 1297*0Sstevel@tonic-gate (!(endp->e_flags & ENDPNT_BOUND) || 1298*0Sstevel@tonic-gate (endp->e_flags & ENDPNT_STALE))) { 1299*0Sstevel@tonic-gate /* 1300*0Sstevel@tonic-gate * Clear the flags here since they will be 1301*0Sstevel@tonic-gate * set again by this thread. They need to be 1302*0Sstevel@tonic-gate * individually cleared because we want to maintain 1303*0Sstevel@tonic-gate * the state for ENDPNT_ONIDLE. 1304*0Sstevel@tonic-gate */ 1305*0Sstevel@tonic-gate endp->e_flags &= ~(ENDPNT_ESTABLISHED | 1306*0Sstevel@tonic-gate ENDPNT_WAITING | ENDPNT_BOUND | ENDPNT_STALE); 1307*0Sstevel@tonic-gate mutex_exit(&endp->e_lock); 1308*0Sstevel@tonic-gate return (1); 1309*0Sstevel@tonic-gate } 1310*0Sstevel@tonic-gate 1311*0Sstevel@tonic-gate /* 1312*0Sstevel@tonic-gate * The second condition is meant for any thread that is waiting for 1313*0Sstevel@tonic-gate * an endpoint to become established. It will cv_wait() until 1314*0Sstevel@tonic-gate * the condition for the endpoint has been changed to ENDPNT_BOUND or 1315*0Sstevel@tonic-gate * ENDPNT_STALE. 1316*0Sstevel@tonic-gate */ 1317*0Sstevel@tonic-gate while (!(endp->e_flags & ENDPNT_BOUND) && 1318*0Sstevel@tonic-gate !(endp->e_flags & ENDPNT_STALE)) { 1319*0Sstevel@tonic-gate endp->e_flags |= ENDPNT_WAITING; 1320*0Sstevel@tonic-gate cv_wait(&endp->e_cv, &endp->e_lock); 1321*0Sstevel@tonic-gate } 1322*0Sstevel@tonic-gate 1323*0Sstevel@tonic-gate ASSERT(endp->e_flags & ENDPNT_ESTABLISHED); 1324*0Sstevel@tonic-gate 1325*0Sstevel@tonic-gate /* 1326*0Sstevel@tonic-gate * The last case we check for is if the endpoint has been marked stale. 1327*0Sstevel@tonic-gate * If this is the case then set *newp to NULL and return, so that the 1328*0Sstevel@tonic-gate * caller is notified of the error and can take appropriate action. 1329*0Sstevel@tonic-gate */ 1330*0Sstevel@tonic-gate if (endp->e_flags & ENDPNT_STALE) { 1331*0Sstevel@tonic-gate endp->e_ref--; 1332*0Sstevel@tonic-gate *newp = NULL; 1333*0Sstevel@tonic-gate } 1334*0Sstevel@tonic-gate mutex_exit(&endp->e_lock); 1335*0Sstevel@tonic-gate return (0); 1336*0Sstevel@tonic-gate } 1337*0Sstevel@tonic-gate 1338*0Sstevel@tonic-gate #ifdef DEBUG 1339*0Sstevel@tonic-gate /* 1340*0Sstevel@tonic-gate * Provide a fault injection setting to test error conditions. 1341*0Sstevel@tonic-gate */ 1342*0Sstevel@tonic-gate static int endpnt_get_return_null = 0; 1343*0Sstevel@tonic-gate #endif 1344*0Sstevel@tonic-gate 1345*0Sstevel@tonic-gate /* 1346*0Sstevel@tonic-gate * Returns a handle (struct endpnt *) to an open and bound endpoint 1347*0Sstevel@tonic-gate * specified by the knetconfig passed in. Returns NULL if no valid endpoint 1348*0Sstevel@tonic-gate * can be obtained. 1349*0Sstevel@tonic-gate */ 1350*0Sstevel@tonic-gate static struct endpnt * 1351*0Sstevel@tonic-gate endpnt_get(struct knetconfig *config) 1352*0Sstevel@tonic-gate { 1353*0Sstevel@tonic-gate struct endpnt_type *n_etype = NULL; 1354*0Sstevel@tonic-gate struct endpnt_type *np = NULL; 1355*0Sstevel@tonic-gate struct endpnt *new = NULL; 1356*0Sstevel@tonic-gate struct endpnt *endp = NULL; 1357*0Sstevel@tonic-gate struct endpnt *next = NULL; 1358*0Sstevel@tonic-gate TIUSER *tiptr = NULL; 1359*0Sstevel@tonic-gate int rtries = BINDRESVPORT_RETRIES; 1360*0Sstevel@tonic-gate int i = 0; 1361*0Sstevel@tonic-gate int error; 1362*0Sstevel@tonic-gate int retval; 1363*0Sstevel@tonic-gate zoneid_t zoneid = getzoneid(); 1364*0Sstevel@tonic-gate 1365*0Sstevel@tonic-gate RPCLOG(1, "endpnt_get: protofmly %s, ", config->knc_protofmly); 1366*0Sstevel@tonic-gate RPCLOG(1, "rdev %ld\n", config->knc_rdev); 1367*0Sstevel@tonic-gate 1368*0Sstevel@tonic-gate #ifdef DEBUG 1369*0Sstevel@tonic-gate /* 1370*0Sstevel@tonic-gate * Inject fault if desired. Pretend we have a stale endpoint 1371*0Sstevel@tonic-gate * and return NULL. 1372*0Sstevel@tonic-gate */ 1373*0Sstevel@tonic-gate if (endpnt_get_return_null > 0) { 1374*0Sstevel@tonic-gate endpnt_get_return_null--; 1375*0Sstevel@tonic-gate return (NULL); 1376*0Sstevel@tonic-gate } 1377*0Sstevel@tonic-gate #endif 1378*0Sstevel@tonic-gate rw_enter(&endpnt_type_lock, RW_READER); 1379*0Sstevel@tonic-gate 1380*0Sstevel@tonic-gate top: 1381*0Sstevel@tonic-gate for (np = endpnt_type_list; np != NULL; np = np->e_next) 1382*0Sstevel@tonic-gate if ((np->e_zoneid == zoneid) && 1383*0Sstevel@tonic-gate (np->e_rdev == config->knc_rdev) && 1384*0Sstevel@tonic-gate (strcmp(np->e_protofmly, 1385*0Sstevel@tonic-gate config->knc_protofmly) == 0)) 1386*0Sstevel@tonic-gate break; 1387*0Sstevel@tonic-gate 1388*0Sstevel@tonic-gate if (np == NULL && n_etype != NULL) { 1389*0Sstevel@tonic-gate ASSERT(rw_write_held(&endpnt_type_lock)); 1390*0Sstevel@tonic-gate 1391*0Sstevel@tonic-gate /* 1392*0Sstevel@tonic-gate * Link the endpoint type onto the list 1393*0Sstevel@tonic-gate */ 1394*0Sstevel@tonic-gate n_etype->e_next = endpnt_type_list; 1395*0Sstevel@tonic-gate endpnt_type_list = n_etype; 1396*0Sstevel@tonic-gate np = n_etype; 1397*0Sstevel@tonic-gate n_etype = NULL; 1398*0Sstevel@tonic-gate } 1399*0Sstevel@tonic-gate 1400*0Sstevel@tonic-gate if (np == NULL) { 1401*0Sstevel@tonic-gate /* 1402*0Sstevel@tonic-gate * The logic here is that we were unable to find an 1403*0Sstevel@tonic-gate * endpnt_type_t that matched our criteria, so we allocate a 1404*0Sstevel@tonic-gate * new one. Because kmem_alloc() needs to be called with 1405*0Sstevel@tonic-gate * KM_SLEEP, we drop our locks so that we don't induce 1406*0Sstevel@tonic-gate * deadlock. After allocating and initializing the 1407*0Sstevel@tonic-gate * endpnt_type_t, we reaquire the lock and go back to check 1408*0Sstevel@tonic-gate * if this entry needs to be added to the list. Since we do 1409*0Sstevel@tonic-gate * some operations without any locking other threads may 1410*0Sstevel@tonic-gate * have been looking for the same endpnt_type_t and gone 1411*0Sstevel@tonic-gate * through this code path. We check for this case and allow 1412*0Sstevel@tonic-gate * one thread to link its endpnt_type_t to the list and the 1413*0Sstevel@tonic-gate * other threads will simply free theirs. 1414*0Sstevel@tonic-gate */ 1415*0Sstevel@tonic-gate rw_exit(&endpnt_type_lock); 1416*0Sstevel@tonic-gate n_etype = endpnt_type_create(config); 1417*0Sstevel@tonic-gate 1418*0Sstevel@tonic-gate /* 1419*0Sstevel@tonic-gate * We need to reaquire the lock with RW_WRITER here so that 1420*0Sstevel@tonic-gate * we can safely link the new endpoint type onto the list. 1421*0Sstevel@tonic-gate */ 1422*0Sstevel@tonic-gate rw_enter(&endpnt_type_lock, RW_WRITER); 1423*0Sstevel@tonic-gate goto top; 1424*0Sstevel@tonic-gate } 1425*0Sstevel@tonic-gate 1426*0Sstevel@tonic-gate rw_exit(&endpnt_type_lock); 1427*0Sstevel@tonic-gate /* 1428*0Sstevel@tonic-gate * If n_etype is not NULL, then another thread was able to 1429*0Sstevel@tonic-gate * insert an endpnt_type_t of this type onto the list before 1430*0Sstevel@tonic-gate * we did. Go ahead and free ours. 1431*0Sstevel@tonic-gate */ 1432*0Sstevel@tonic-gate if (n_etype != NULL) 1433*0Sstevel@tonic-gate endpnt_type_free(n_etype); 1434*0Sstevel@tonic-gate 1435*0Sstevel@tonic-gate mutex_enter(&np->e_ilock); 1436*0Sstevel@tonic-gate /* 1437*0Sstevel@tonic-gate * The algorithm to hand out endpoints is to first 1438*0Sstevel@tonic-gate * give out those that are idle if such endpoints 1439*0Sstevel@tonic-gate * exist. Otherwise, create a new one if we haven't 1440*0Sstevel@tonic-gate * reached the max threshold. Finally, we give out 1441*0Sstevel@tonic-gate * endpoints in a pseudo LRU fashion (round-robin). 1442*0Sstevel@tonic-gate * 1443*0Sstevel@tonic-gate * Note: The idle list is merely a hint of those endpoints 1444*0Sstevel@tonic-gate * that should be idle. There exists a window after the 1445*0Sstevel@tonic-gate * endpoint is released and before it is linked back onto the 1446*0Sstevel@tonic-gate * idle list where a thread could get a reference to it and 1447*0Sstevel@tonic-gate * use it. This is okay, since the reference counts will 1448*0Sstevel@tonic-gate * still be consistent. 1449*0Sstevel@tonic-gate */ 1450*0Sstevel@tonic-gate if ((endp = (endpnt_t *)list_head(&np->e_ilist)) != NULL) { 1451*0Sstevel@tonic-gate timeout_id_t t_id = 0; 1452*0Sstevel@tonic-gate 1453*0Sstevel@tonic-gate mutex_enter(&endp->e_lock); 1454*0Sstevel@tonic-gate endp->e_ref++; 1455*0Sstevel@tonic-gate endp->e_itime = 0; 1456*0Sstevel@tonic-gate endp->e_flags &= ~ENDPNT_ONIDLE; 1457*0Sstevel@tonic-gate mutex_exit(&endp->e_lock); 1458*0Sstevel@tonic-gate 1459*0Sstevel@tonic-gate /* 1460*0Sstevel@tonic-gate * Pop the endpoint off the idle list and hand it off 1461*0Sstevel@tonic-gate */ 1462*0Sstevel@tonic-gate list_remove(&np->e_ilist, endp); 1463*0Sstevel@tonic-gate 1464*0Sstevel@tonic-gate if (np->e_itimer != 0) { 1465*0Sstevel@tonic-gate t_id = np->e_itimer; 1466*0Sstevel@tonic-gate np->e_itimer = 0; 1467*0Sstevel@tonic-gate } 1468*0Sstevel@tonic-gate mutex_exit(&np->e_ilock); 1469*0Sstevel@tonic-gate /* 1470*0Sstevel@tonic-gate * Reset the idle timer if it has been set 1471*0Sstevel@tonic-gate */ 1472*0Sstevel@tonic-gate if (t_id != (timeout_id_t)0) 1473*0Sstevel@tonic-gate (void) untimeout(t_id); 1474*0Sstevel@tonic-gate 1475*0Sstevel@tonic-gate if (check_endpnt(endp, &new) == 0) 1476*0Sstevel@tonic-gate return (new); 1477*0Sstevel@tonic-gate } else if (np->e_cnt >= clnt_clts_max_endpoints) { 1478*0Sstevel@tonic-gate /* 1479*0Sstevel@tonic-gate * There are no idle endpoints currently, so 1480*0Sstevel@tonic-gate * create a new one if we have not reached the maximum or 1481*0Sstevel@tonic-gate * hand one out in round-robin. 1482*0Sstevel@tonic-gate */ 1483*0Sstevel@tonic-gate mutex_exit(&np->e_ilock); 1484*0Sstevel@tonic-gate mutex_enter(&np->e_plock); 1485*0Sstevel@tonic-gate endp = np->e_pcurr; 1486*0Sstevel@tonic-gate mutex_enter(&endp->e_lock); 1487*0Sstevel@tonic-gate endp->e_ref++; 1488*0Sstevel@tonic-gate mutex_exit(&endp->e_lock); 1489*0Sstevel@tonic-gate 1490*0Sstevel@tonic-gate ASSERT(endp != NULL); 1491*0Sstevel@tonic-gate /* 1492*0Sstevel@tonic-gate * Advance the pointer to the next eligible endpoint, if 1493*0Sstevel@tonic-gate * necessary. 1494*0Sstevel@tonic-gate */ 1495*0Sstevel@tonic-gate if (np->e_cnt > 1) { 1496*0Sstevel@tonic-gate next = (endpnt_t *)list_next(&np->e_pool, np->e_pcurr); 1497*0Sstevel@tonic-gate if (next == NULL) 1498*0Sstevel@tonic-gate next = (endpnt_t *)list_head(&np->e_pool); 1499*0Sstevel@tonic-gate np->e_pcurr = next; 1500*0Sstevel@tonic-gate } 1501*0Sstevel@tonic-gate 1502*0Sstevel@tonic-gate mutex_exit(&np->e_plock); 1503*0Sstevel@tonic-gate 1504*0Sstevel@tonic-gate /* 1505*0Sstevel@tonic-gate * We need to check to see if this endpoint is bound or 1506*0Sstevel@tonic-gate * not. If it is in progress then just wait until 1507*0Sstevel@tonic-gate * the set up is complete 1508*0Sstevel@tonic-gate */ 1509*0Sstevel@tonic-gate if (check_endpnt(endp, &new) == 0) 1510*0Sstevel@tonic-gate return (new); 1511*0Sstevel@tonic-gate } else { 1512*0Sstevel@tonic-gate mutex_exit(&np->e_ilock); 1513*0Sstevel@tonic-gate mutex_enter(&np->e_plock); 1514*0Sstevel@tonic-gate 1515*0Sstevel@tonic-gate /* 1516*0Sstevel@tonic-gate * Allocate a new endpoint to use. If we can't allocate any 1517*0Sstevel@tonic-gate * more memory then use one that is already established if any 1518*0Sstevel@tonic-gate * such endpoints exist. 1519*0Sstevel@tonic-gate */ 1520*0Sstevel@tonic-gate new = kmem_cache_alloc(endpnt_cache, KM_NOSLEEP); 1521*0Sstevel@tonic-gate if (new == NULL) { 1522*0Sstevel@tonic-gate RPCLOG0(1, "endpnt_get: kmem_cache_alloc failed\n"); 1523*0Sstevel@tonic-gate /* 1524*0Sstevel@tonic-gate * Try to recover by using an existing endpoint. 1525*0Sstevel@tonic-gate */ 1526*0Sstevel@tonic-gate if (np->e_cnt <= 0) { 1527*0Sstevel@tonic-gate mutex_exit(&np->e_plock); 1528*0Sstevel@tonic-gate return (NULL); 1529*0Sstevel@tonic-gate } 1530*0Sstevel@tonic-gate endp = np->e_pcurr; 1531*0Sstevel@tonic-gate if ((next = list_next(&np->e_pool, np->e_pcurr)) != 1532*0Sstevel@tonic-gate NULL) 1533*0Sstevel@tonic-gate np->e_pcurr = next; 1534*0Sstevel@tonic-gate ASSERT(endp != NULL); 1535*0Sstevel@tonic-gate mutex_enter(&endp->e_lock); 1536*0Sstevel@tonic-gate endp->e_ref++; 1537*0Sstevel@tonic-gate mutex_exit(&endp->e_lock); 1538*0Sstevel@tonic-gate mutex_exit(&np->e_plock); 1539*0Sstevel@tonic-gate 1540*0Sstevel@tonic-gate if (check_endpnt(endp, &new) == 0) 1541*0Sstevel@tonic-gate return (new); 1542*0Sstevel@tonic-gate } else { 1543*0Sstevel@tonic-gate /* 1544*0Sstevel@tonic-gate * Partially init an endpoint structure and put 1545*0Sstevel@tonic-gate * it on the list, so that other interested threads 1546*0Sstevel@tonic-gate * know that one is being created 1547*0Sstevel@tonic-gate */ 1548*0Sstevel@tonic-gate bzero(new, sizeof (struct endpnt)); 1549*0Sstevel@tonic-gate 1550*0Sstevel@tonic-gate cv_init(&new->e_cv, NULL, CV_DEFAULT, NULL); 1551*0Sstevel@tonic-gate mutex_init(&new->e_lock, NULL, MUTEX_DEFAULT, NULL); 1552*0Sstevel@tonic-gate new->e_ref = 1; 1553*0Sstevel@tonic-gate new->e_type = np; 1554*0Sstevel@tonic-gate 1555*0Sstevel@tonic-gate /* 1556*0Sstevel@tonic-gate * Link the endpoint into the pool. 1557*0Sstevel@tonic-gate */ 1558*0Sstevel@tonic-gate list_insert_head(&np->e_pool, new); 1559*0Sstevel@tonic-gate np->e_cnt++; 1560*0Sstevel@tonic-gate if (np->e_pcurr == NULL) 1561*0Sstevel@tonic-gate np->e_pcurr = new; 1562*0Sstevel@tonic-gate mutex_exit(&np->e_plock); 1563*0Sstevel@tonic-gate } 1564*0Sstevel@tonic-gate } 1565*0Sstevel@tonic-gate 1566*0Sstevel@tonic-gate /* 1567*0Sstevel@tonic-gate * The transport should be opened with sufficient privs 1568*0Sstevel@tonic-gate */ 1569*0Sstevel@tonic-gate error = t_kopen(NULL, config->knc_rdev, FREAD|FWRITE|FNDELAY, &tiptr, 1570*0Sstevel@tonic-gate kcred); 1571*0Sstevel@tonic-gate if (error) { 1572*0Sstevel@tonic-gate RPCLOG(1, "endpnt_get: t_kopen: %d\n", error); 1573*0Sstevel@tonic-gate goto bad; 1574*0Sstevel@tonic-gate } 1575*0Sstevel@tonic-gate 1576*0Sstevel@tonic-gate new->e_tiptr = tiptr; 1577*0Sstevel@tonic-gate rpc_poptimod(tiptr->fp->f_vnode); 1578*0Sstevel@tonic-gate 1579*0Sstevel@tonic-gate /* 1580*0Sstevel@tonic-gate * Allow the kernel to push the module on behalf of the user. 1581*0Sstevel@tonic-gate */ 1582*0Sstevel@tonic-gate error = strioctl(tiptr->fp->f_vnode, I_PUSH, (intptr_t)"rpcmod", 0, 1583*0Sstevel@tonic-gate K_TO_K, kcred, &retval); 1584*0Sstevel@tonic-gate if (error) { 1585*0Sstevel@tonic-gate RPCLOG(1, "endpnt_get: kstr_push on rpcmod failed %d\n", error); 1586*0Sstevel@tonic-gate goto bad; 1587*0Sstevel@tonic-gate } 1588*0Sstevel@tonic-gate 1589*0Sstevel@tonic-gate error = strioctl(tiptr->fp->f_vnode, RPC_CLIENT, 0, 0, K_TO_K, 1590*0Sstevel@tonic-gate kcred, &retval); 1591*0Sstevel@tonic-gate if (error) { 1592*0Sstevel@tonic-gate RPCLOG(1, "endpnt_get: strioctl failed %d\n", error); 1593*0Sstevel@tonic-gate goto bad; 1594*0Sstevel@tonic-gate } 1595*0Sstevel@tonic-gate 1596*0Sstevel@tonic-gate /* 1597*0Sstevel@tonic-gate * Connectionless data flow should bypass the stream head. 1598*0Sstevel@tonic-gate */ 1599*0Sstevel@tonic-gate new->e_wq = tiptr->fp->f_vnode->v_stream->sd_wrq->q_next; 1600*0Sstevel@tonic-gate 1601*0Sstevel@tonic-gate error = strioctl(tiptr->fp->f_vnode, I_PUSH, (intptr_t)"timod", 0, 1602*0Sstevel@tonic-gate K_TO_K, kcred, &retval); 1603*0Sstevel@tonic-gate if (error) { 1604*0Sstevel@tonic-gate RPCLOG(1, "endpnt_get: kstr_push on timod failed %d\n", error); 1605*0Sstevel@tonic-gate goto bad; 1606*0Sstevel@tonic-gate } 1607*0Sstevel@tonic-gate 1608*0Sstevel@tonic-gate /* 1609*0Sstevel@tonic-gate * Attempt to bind the endpoint. If we fail then propogate 1610*0Sstevel@tonic-gate * error back to calling subsystem, so that it can be handled 1611*0Sstevel@tonic-gate * appropriately. 1612*0Sstevel@tonic-gate */ 1613*0Sstevel@tonic-gate if (clnt_clts_do_bindresvport && 1614*0Sstevel@tonic-gate (strcmp(config->knc_protofmly, NC_INET) == 0 || 1615*0Sstevel@tonic-gate strcmp(config->knc_protofmly, NC_INET6) == 0)) { 1616*0Sstevel@tonic-gate 1617*0Sstevel@tonic-gate while ((error = 1618*0Sstevel@tonic-gate bindresvport(new->e_tiptr, NULL, NULL, FALSE)) != 0) { 1619*0Sstevel@tonic-gate RPCLOG(1, 1620*0Sstevel@tonic-gate "endpnt_get: bindresvport error %d\n", 1621*0Sstevel@tonic-gate error); 1622*0Sstevel@tonic-gate if (error != EPROTO) { 1623*0Sstevel@tonic-gate if (rtries-- <= 0) 1624*0Sstevel@tonic-gate goto bad; 1625*0Sstevel@tonic-gate 1626*0Sstevel@tonic-gate delay(hz << i++); 1627*0Sstevel@tonic-gate continue; 1628*0Sstevel@tonic-gate } 1629*0Sstevel@tonic-gate 1630*0Sstevel@tonic-gate (void) t_kclose(new->e_tiptr, 1); 1631*0Sstevel@tonic-gate /* 1632*0Sstevel@tonic-gate * reopen with all privileges 1633*0Sstevel@tonic-gate */ 1634*0Sstevel@tonic-gate error = t_kopen(NULL, config->knc_rdev, 1635*0Sstevel@tonic-gate FREAD|FWRITE|FNDELAY, 1636*0Sstevel@tonic-gate &new->e_tiptr, kcred); 1637*0Sstevel@tonic-gate if (error) { 1638*0Sstevel@tonic-gate RPCLOG(1, "endpnt_get: t_kopen: %d\n", error); 1639*0Sstevel@tonic-gate new->e_tiptr = NULL; 1640*0Sstevel@tonic-gate goto bad; 1641*0Sstevel@tonic-gate } 1642*0Sstevel@tonic-gate } 1643*0Sstevel@tonic-gate } else if ((error = t_kbind(new->e_tiptr, NULL, NULL)) != 0) { 1644*0Sstevel@tonic-gate RPCLOG(1, "endpnt_get: t_kbind failed: %d\n", error); 1645*0Sstevel@tonic-gate goto bad; 1646*0Sstevel@tonic-gate } 1647*0Sstevel@tonic-gate 1648*0Sstevel@tonic-gate /* 1649*0Sstevel@tonic-gate * Set the flags and notify and waiters that we have an established 1650*0Sstevel@tonic-gate * endpoint. 1651*0Sstevel@tonic-gate */ 1652*0Sstevel@tonic-gate mutex_enter(&new->e_lock); 1653*0Sstevel@tonic-gate new->e_flags |= ENDPNT_ESTABLISHED; 1654*0Sstevel@tonic-gate new->e_flags |= ENDPNT_BOUND; 1655*0Sstevel@tonic-gate if (new->e_flags & ENDPNT_WAITING) { 1656*0Sstevel@tonic-gate cv_broadcast(&new->e_cv); 1657*0Sstevel@tonic-gate new->e_flags &= ~ENDPNT_WAITING; 1658*0Sstevel@tonic-gate } 1659*0Sstevel@tonic-gate mutex_exit(&new->e_lock); 1660*0Sstevel@tonic-gate 1661*0Sstevel@tonic-gate return (new); 1662*0Sstevel@tonic-gate 1663*0Sstevel@tonic-gate bad: 1664*0Sstevel@tonic-gate ASSERT(new != NULL); 1665*0Sstevel@tonic-gate /* 1666*0Sstevel@tonic-gate * mark this endpoint as stale and notify any threads waiting 1667*0Sstevel@tonic-gate * on this endpoint that it will be going away. 1668*0Sstevel@tonic-gate */ 1669*0Sstevel@tonic-gate mutex_enter(&new->e_lock); 1670*0Sstevel@tonic-gate if (new->e_ref > 0) { 1671*0Sstevel@tonic-gate new->e_flags |= ENDPNT_ESTABLISHED; 1672*0Sstevel@tonic-gate new->e_flags |= ENDPNT_STALE; 1673*0Sstevel@tonic-gate if (new->e_flags & ENDPNT_WAITING) { 1674*0Sstevel@tonic-gate cv_broadcast(&new->e_cv); 1675*0Sstevel@tonic-gate new->e_flags &= ~ENDPNT_WAITING; 1676*0Sstevel@tonic-gate } 1677*0Sstevel@tonic-gate } 1678*0Sstevel@tonic-gate new->e_ref--; 1679*0Sstevel@tonic-gate new->e_tiptr = NULL; 1680*0Sstevel@tonic-gate mutex_exit(&new->e_lock); 1681*0Sstevel@tonic-gate 1682*0Sstevel@tonic-gate /* 1683*0Sstevel@tonic-gate * If there was a transport endopoint opened, then close it. 1684*0Sstevel@tonic-gate */ 1685*0Sstevel@tonic-gate if (tiptr != NULL) 1686*0Sstevel@tonic-gate (void) t_kclose(tiptr, 1); 1687*0Sstevel@tonic-gate 1688*0Sstevel@tonic-gate return (NULL); 1689*0Sstevel@tonic-gate } 1690*0Sstevel@tonic-gate 1691*0Sstevel@tonic-gate /* 1692*0Sstevel@tonic-gate * Release a referece to the endpoint 1693*0Sstevel@tonic-gate */ 1694*0Sstevel@tonic-gate static void 1695*0Sstevel@tonic-gate endpnt_rele(struct endpnt *sp) 1696*0Sstevel@tonic-gate { 1697*0Sstevel@tonic-gate mutex_enter(&sp->e_lock); 1698*0Sstevel@tonic-gate ASSERT(sp->e_ref > 0); 1699*0Sstevel@tonic-gate sp->e_ref--; 1700*0Sstevel@tonic-gate /* 1701*0Sstevel@tonic-gate * If the ref count is zero, then start the idle timer and link 1702*0Sstevel@tonic-gate * the endpoint onto the idle list. 1703*0Sstevel@tonic-gate */ 1704*0Sstevel@tonic-gate if (sp->e_ref == 0) { 1705*0Sstevel@tonic-gate sp->e_itime = gethrestime_sec(); 1706*0Sstevel@tonic-gate 1707*0Sstevel@tonic-gate /* 1708*0Sstevel@tonic-gate * Check to see if the endpoint is already linked to the idle 1709*0Sstevel@tonic-gate * list, so that we don't try to reinsert it. 1710*0Sstevel@tonic-gate */ 1711*0Sstevel@tonic-gate if (sp->e_flags & ENDPNT_ONIDLE) { 1712*0Sstevel@tonic-gate mutex_exit(&sp->e_lock); 1713*0Sstevel@tonic-gate mutex_enter(&sp->e_type->e_ilock); 1714*0Sstevel@tonic-gate endpnt_reap_settimer(sp->e_type); 1715*0Sstevel@tonic-gate mutex_exit(&sp->e_type->e_ilock); 1716*0Sstevel@tonic-gate return; 1717*0Sstevel@tonic-gate } 1718*0Sstevel@tonic-gate 1719*0Sstevel@tonic-gate sp->e_flags |= ENDPNT_ONIDLE; 1720*0Sstevel@tonic-gate mutex_exit(&sp->e_lock); 1721*0Sstevel@tonic-gate mutex_enter(&sp->e_type->e_ilock); 1722*0Sstevel@tonic-gate list_insert_tail(&sp->e_type->e_ilist, sp); 1723*0Sstevel@tonic-gate endpnt_reap_settimer(sp->e_type); 1724*0Sstevel@tonic-gate mutex_exit(&sp->e_type->e_ilock); 1725*0Sstevel@tonic-gate } else 1726*0Sstevel@tonic-gate mutex_exit(&sp->e_lock); 1727*0Sstevel@tonic-gate } 1728*0Sstevel@tonic-gate 1729*0Sstevel@tonic-gate static void 1730*0Sstevel@tonic-gate endpnt_reap_settimer(endpnt_type_t *etp) 1731*0Sstevel@tonic-gate { 1732*0Sstevel@tonic-gate if (etp->e_itimer == (timeout_id_t)0) 1733*0Sstevel@tonic-gate etp->e_itimer = timeout(endpnt_reap_dispatch, (void *)etp, 1734*0Sstevel@tonic-gate clnt_clts_taskq_dispatch_interval); 1735*0Sstevel@tonic-gate } 1736*0Sstevel@tonic-gate 1737*0Sstevel@tonic-gate static void 1738*0Sstevel@tonic-gate endpnt_reap_dispatch(void *a) 1739*0Sstevel@tonic-gate { 1740*0Sstevel@tonic-gate endpnt_type_t *etp = a; 1741*0Sstevel@tonic-gate 1742*0Sstevel@tonic-gate /* 1743*0Sstevel@tonic-gate * The idle timer has fired, so dispatch the taskq to close the 1744*0Sstevel@tonic-gate * endpoint. 1745*0Sstevel@tonic-gate */ 1746*0Sstevel@tonic-gate if (taskq_dispatch(endpnt_taskq, (task_func_t *)endpnt_reap, etp, 1747*0Sstevel@tonic-gate TQ_NOSLEEP) == NULL) 1748*0Sstevel@tonic-gate return; 1749*0Sstevel@tonic-gate mutex_enter(&etp->e_ilock); 1750*0Sstevel@tonic-gate etp->e_async_count++; 1751*0Sstevel@tonic-gate mutex_exit(&etp->e_ilock); 1752*0Sstevel@tonic-gate } 1753*0Sstevel@tonic-gate 1754*0Sstevel@tonic-gate /* 1755*0Sstevel@tonic-gate * Traverse the idle list and close those endpoints that have reached their 1756*0Sstevel@tonic-gate * timeout interval. 1757*0Sstevel@tonic-gate */ 1758*0Sstevel@tonic-gate static void 1759*0Sstevel@tonic-gate endpnt_reap(endpnt_type_t *etp) 1760*0Sstevel@tonic-gate { 1761*0Sstevel@tonic-gate struct endpnt *e; 1762*0Sstevel@tonic-gate struct endpnt *next_node = NULL; 1763*0Sstevel@tonic-gate 1764*0Sstevel@tonic-gate mutex_enter(&etp->e_ilock); 1765*0Sstevel@tonic-gate e = list_head(&etp->e_ilist); 1766*0Sstevel@tonic-gate while (e != NULL) { 1767*0Sstevel@tonic-gate next_node = list_next(&etp->e_ilist, e); 1768*0Sstevel@tonic-gate 1769*0Sstevel@tonic-gate mutex_enter(&e->e_lock); 1770*0Sstevel@tonic-gate if (e->e_ref > 0) { 1771*0Sstevel@tonic-gate mutex_exit(&e->e_lock); 1772*0Sstevel@tonic-gate e = next_node; 1773*0Sstevel@tonic-gate continue; 1774*0Sstevel@tonic-gate } 1775*0Sstevel@tonic-gate 1776*0Sstevel@tonic-gate ASSERT(e->e_ref == 0); 1777*0Sstevel@tonic-gate if (e->e_itime > 0 && 1778*0Sstevel@tonic-gate (e->e_itime + clnt_clts_endpoint_reap_interval) < 1779*0Sstevel@tonic-gate gethrestime_sec()) { 1780*0Sstevel@tonic-gate e->e_flags &= ~ENDPNT_BOUND; 1781*0Sstevel@tonic-gate (void) t_kclose(e->e_tiptr, 1); 1782*0Sstevel@tonic-gate e->e_tiptr = NULL; 1783*0Sstevel@tonic-gate e->e_itime = 0; 1784*0Sstevel@tonic-gate } 1785*0Sstevel@tonic-gate mutex_exit(&e->e_lock); 1786*0Sstevel@tonic-gate e = next_node; 1787*0Sstevel@tonic-gate } 1788*0Sstevel@tonic-gate etp->e_itimer = 0; 1789*0Sstevel@tonic-gate if (--etp->e_async_count == 0) 1790*0Sstevel@tonic-gate cv_signal(&etp->e_async_cv); 1791*0Sstevel@tonic-gate mutex_exit(&etp->e_ilock); 1792*0Sstevel@tonic-gate } 1793*0Sstevel@tonic-gate 1794*0Sstevel@tonic-gate static void 1795*0Sstevel@tonic-gate endpnt_reclaim(zoneid_t zoneid) 1796*0Sstevel@tonic-gate { 1797*0Sstevel@tonic-gate struct endpnt_type *np; 1798*0Sstevel@tonic-gate struct endpnt *e; 1799*0Sstevel@tonic-gate struct endpnt *next_node = NULL; 1800*0Sstevel@tonic-gate list_t free_list; 1801*0Sstevel@tonic-gate int rcnt = 0; 1802*0Sstevel@tonic-gate 1803*0Sstevel@tonic-gate list_create(&free_list, sizeof (endpnt_t), offsetof(endpnt_t, e_node)); 1804*0Sstevel@tonic-gate 1805*0Sstevel@tonic-gate RPCLOG0(1, "endpnt_reclaim: reclaim callback started\n"); 1806*0Sstevel@tonic-gate rw_enter(&endpnt_type_lock, RW_READER); 1807*0Sstevel@tonic-gate for (np = endpnt_type_list; np != NULL; np = np->e_next) { 1808*0Sstevel@tonic-gate if (zoneid != ALL_ZONES && zoneid != np->e_zoneid) 1809*0Sstevel@tonic-gate continue; 1810*0Sstevel@tonic-gate 1811*0Sstevel@tonic-gate mutex_enter(&np->e_plock); 1812*0Sstevel@tonic-gate RPCLOG(1, "endpnt_reclaim: protofmly %s, ", 1813*0Sstevel@tonic-gate np->e_protofmly); 1814*0Sstevel@tonic-gate RPCLOG(1, "rdev %ld\n", np->e_rdev); 1815*0Sstevel@tonic-gate RPCLOG(1, "endpnt_reclaim: found %d endpoint(s)\n", 1816*0Sstevel@tonic-gate np->e_cnt); 1817*0Sstevel@tonic-gate 1818*0Sstevel@tonic-gate if (np->e_cnt == 0) { 1819*0Sstevel@tonic-gate mutex_exit(&np->e_plock); 1820*0Sstevel@tonic-gate continue; 1821*0Sstevel@tonic-gate } 1822*0Sstevel@tonic-gate 1823*0Sstevel@tonic-gate /* 1824*0Sstevel@tonic-gate * The nice thing about maintaining an idle list is that if 1825*0Sstevel@tonic-gate * there are any endpoints to reclaim, they are going to be 1826*0Sstevel@tonic-gate * on this list. Just go through and reap the one's that 1827*0Sstevel@tonic-gate * have ref counts of zero. 1828*0Sstevel@tonic-gate */ 1829*0Sstevel@tonic-gate mutex_enter(&np->e_ilock); 1830*0Sstevel@tonic-gate e = list_head(&np->e_ilist); 1831*0Sstevel@tonic-gate while (e != NULL) { 1832*0Sstevel@tonic-gate next_node = list_next(&np->e_ilist, e); 1833*0Sstevel@tonic-gate mutex_enter(&e->e_lock); 1834*0Sstevel@tonic-gate if (e->e_ref > 0) { 1835*0Sstevel@tonic-gate mutex_exit(&e->e_lock); 1836*0Sstevel@tonic-gate e = next_node; 1837*0Sstevel@tonic-gate continue; 1838*0Sstevel@tonic-gate } 1839*0Sstevel@tonic-gate ASSERT(e->e_ref == 0); 1840*0Sstevel@tonic-gate mutex_exit(&e->e_lock); 1841*0Sstevel@tonic-gate 1842*0Sstevel@tonic-gate list_remove(&np->e_ilist, e); 1843*0Sstevel@tonic-gate list_remove(&np->e_pool, e); 1844*0Sstevel@tonic-gate list_insert_head(&free_list, e); 1845*0Sstevel@tonic-gate 1846*0Sstevel@tonic-gate rcnt++; 1847*0Sstevel@tonic-gate np->e_cnt--; 1848*0Sstevel@tonic-gate e = next_node; 1849*0Sstevel@tonic-gate } 1850*0Sstevel@tonic-gate mutex_exit(&np->e_ilock); 1851*0Sstevel@tonic-gate /* 1852*0Sstevel@tonic-gate * Reset the current pointer to be safe 1853*0Sstevel@tonic-gate */ 1854*0Sstevel@tonic-gate if ((e = (struct endpnt *)list_head(&np->e_pool)) != NULL) 1855*0Sstevel@tonic-gate np->e_pcurr = e; 1856*0Sstevel@tonic-gate else { 1857*0Sstevel@tonic-gate ASSERT(np->e_cnt == 0); 1858*0Sstevel@tonic-gate np->e_pcurr = NULL; 1859*0Sstevel@tonic-gate } 1860*0Sstevel@tonic-gate 1861*0Sstevel@tonic-gate mutex_exit(&np->e_plock); 1862*0Sstevel@tonic-gate } 1863*0Sstevel@tonic-gate rw_exit(&endpnt_type_lock); 1864*0Sstevel@tonic-gate 1865*0Sstevel@tonic-gate while ((e = list_head(&free_list)) != NULL) { 1866*0Sstevel@tonic-gate list_remove(&free_list, e); 1867*0Sstevel@tonic-gate if (e->e_tiptr != NULL) 1868*0Sstevel@tonic-gate (void) t_kclose(e->e_tiptr, 1); 1869*0Sstevel@tonic-gate 1870*0Sstevel@tonic-gate cv_destroy(&e->e_cv); 1871*0Sstevel@tonic-gate mutex_destroy(&e->e_lock); 1872*0Sstevel@tonic-gate kmem_cache_free(endpnt_cache, e); 1873*0Sstevel@tonic-gate } 1874*0Sstevel@tonic-gate list_destroy(&free_list); 1875*0Sstevel@tonic-gate RPCLOG(1, "endpnt_reclaim: reclaimed %d endpoint(s)\n", rcnt); 1876*0Sstevel@tonic-gate } 1877*0Sstevel@tonic-gate 1878*0Sstevel@tonic-gate /* 1879*0Sstevel@tonic-gate * Endpoint reclaim zones destructor callback routine. 1880*0Sstevel@tonic-gate * 1881*0Sstevel@tonic-gate * After reclaiming any cached entries, we basically go through the endpnt_type 1882*0Sstevel@tonic-gate * list, canceling outstanding timeouts and free'ing data structures. 1883*0Sstevel@tonic-gate */ 1884*0Sstevel@tonic-gate /* ARGSUSED */ 1885*0Sstevel@tonic-gate static void 1886*0Sstevel@tonic-gate endpnt_destructor(zoneid_t zoneid, void *a) 1887*0Sstevel@tonic-gate { 1888*0Sstevel@tonic-gate struct endpnt_type **npp; 1889*0Sstevel@tonic-gate struct endpnt_type *np; 1890*0Sstevel@tonic-gate struct endpnt_type *free_list = NULL; 1891*0Sstevel@tonic-gate timeout_id_t t_id = 0; 1892*0Sstevel@tonic-gate extern void clcleanup_zone(zoneid_t); 1893*0Sstevel@tonic-gate extern void clcleanup4_zone(zoneid_t); 1894*0Sstevel@tonic-gate 1895*0Sstevel@tonic-gate /* Make sure NFS client handles are released. */ 1896*0Sstevel@tonic-gate clcleanup_zone(zoneid); 1897*0Sstevel@tonic-gate clcleanup4_zone(zoneid); 1898*0Sstevel@tonic-gate 1899*0Sstevel@tonic-gate endpnt_reclaim(zoneid); 1900*0Sstevel@tonic-gate /* 1901*0Sstevel@tonic-gate * We don't need to be holding on to any locks across the call to 1902*0Sstevel@tonic-gate * endpnt_reclaim() and the code below; we know that no-one can 1903*0Sstevel@tonic-gate * be holding open connections for this zone (all processes and kernel 1904*0Sstevel@tonic-gate * threads are gone), so nothing could be adding anything to the list. 1905*0Sstevel@tonic-gate */ 1906*0Sstevel@tonic-gate rw_enter(&endpnt_type_lock, RW_WRITER); 1907*0Sstevel@tonic-gate npp = &endpnt_type_list; 1908*0Sstevel@tonic-gate while ((np = *npp) != NULL) { 1909*0Sstevel@tonic-gate if (np->e_zoneid != zoneid) { 1910*0Sstevel@tonic-gate npp = &np->e_next; 1911*0Sstevel@tonic-gate continue; 1912*0Sstevel@tonic-gate } 1913*0Sstevel@tonic-gate mutex_enter(&np->e_plock); 1914*0Sstevel@tonic-gate mutex_enter(&np->e_ilock); 1915*0Sstevel@tonic-gate if (np->e_itimer != 0) { 1916*0Sstevel@tonic-gate t_id = np->e_itimer; 1917*0Sstevel@tonic-gate np->e_itimer = 0; 1918*0Sstevel@tonic-gate } 1919*0Sstevel@tonic-gate ASSERT(np->e_cnt == 0); 1920*0Sstevel@tonic-gate ASSERT(list_head(&np->e_pool) == NULL); 1921*0Sstevel@tonic-gate ASSERT(list_head(&np->e_ilist) == NULL); 1922*0Sstevel@tonic-gate 1923*0Sstevel@tonic-gate mutex_exit(&np->e_ilock); 1924*0Sstevel@tonic-gate mutex_exit(&np->e_plock); 1925*0Sstevel@tonic-gate 1926*0Sstevel@tonic-gate /* 1927*0Sstevel@tonic-gate * untimeout() any outstanding timers that have not yet fired. 1928*0Sstevel@tonic-gate */ 1929*0Sstevel@tonic-gate if (t_id != (timeout_id_t)0) 1930*0Sstevel@tonic-gate (void) untimeout(t_id); 1931*0Sstevel@tonic-gate *npp = np->e_next; 1932*0Sstevel@tonic-gate np->e_next = free_list; 1933*0Sstevel@tonic-gate free_list = np; 1934*0Sstevel@tonic-gate } 1935*0Sstevel@tonic-gate rw_exit(&endpnt_type_lock); 1936*0Sstevel@tonic-gate 1937*0Sstevel@tonic-gate while (free_list != NULL) { 1938*0Sstevel@tonic-gate np = free_list; 1939*0Sstevel@tonic-gate free_list = free_list->e_next; 1940*0Sstevel@tonic-gate /* 1941*0Sstevel@tonic-gate * Wait for threads in endpnt_taskq trying to reap endpnt_ts in 1942*0Sstevel@tonic-gate * the endpnt_type_t. 1943*0Sstevel@tonic-gate */ 1944*0Sstevel@tonic-gate mutex_enter(&np->e_ilock); 1945*0Sstevel@tonic-gate while (np->e_async_count > 0) 1946*0Sstevel@tonic-gate cv_wait(&np->e_async_cv, &np->e_ilock); 1947*0Sstevel@tonic-gate cv_destroy(&np->e_async_cv); 1948*0Sstevel@tonic-gate mutex_destroy(&np->e_plock); 1949*0Sstevel@tonic-gate mutex_destroy(&np->e_ilock); 1950*0Sstevel@tonic-gate list_destroy(&np->e_pool); 1951*0Sstevel@tonic-gate list_destroy(&np->e_ilist); 1952*0Sstevel@tonic-gate kmem_free(np, sizeof (endpnt_type_t)); 1953*0Sstevel@tonic-gate } 1954*0Sstevel@tonic-gate } 1955*0Sstevel@tonic-gate 1956*0Sstevel@tonic-gate /* 1957*0Sstevel@tonic-gate * Endpoint reclaim kmem callback routine. 1958*0Sstevel@tonic-gate */ 1959*0Sstevel@tonic-gate /* ARGSUSED */ 1960*0Sstevel@tonic-gate static void 1961*0Sstevel@tonic-gate endpnt_repossess(void *a) 1962*0Sstevel@tonic-gate { 1963*0Sstevel@tonic-gate /* 1964*0Sstevel@tonic-gate * Reclaim idle endpnt's from all zones. 1965*0Sstevel@tonic-gate */ 1966*0Sstevel@tonic-gate if (endpnt_taskq != NULL) 1967*0Sstevel@tonic-gate (void) taskq_dispatch(endpnt_taskq, 1968*0Sstevel@tonic-gate (task_func_t *)endpnt_reclaim, (void *)ALL_ZONES, 1969*0Sstevel@tonic-gate TQ_NOSLEEP); 1970*0Sstevel@tonic-gate } 1971*0Sstevel@tonic-gate 1972*0Sstevel@tonic-gate /* 1973*0Sstevel@tonic-gate * RPC request dispatch routine. Constructs a datagram message and wraps it 1974*0Sstevel@tonic-gate * around the RPC request to pass downstream. 1975*0Sstevel@tonic-gate */ 1976*0Sstevel@tonic-gate static int 1977*0Sstevel@tonic-gate clnt_clts_dispatch_send(queue_t *q, mblk_t *mp, struct netbuf *addr, 1978*0Sstevel@tonic-gate calllist_t *cp, uint_t xid) 1979*0Sstevel@tonic-gate { 1980*0Sstevel@tonic-gate mblk_t *bp; 1981*0Sstevel@tonic-gate int msgsz; 1982*0Sstevel@tonic-gate struct T_unitdata_req *udreq; 1983*0Sstevel@tonic-gate 1984*0Sstevel@tonic-gate /* 1985*0Sstevel@tonic-gate * Set up the call record. 1986*0Sstevel@tonic-gate */ 1987*0Sstevel@tonic-gate cp->call_wq = q; 1988*0Sstevel@tonic-gate cp->call_xid = xid; 1989*0Sstevel@tonic-gate cp->call_status = RPC_TIMEDOUT; 1990*0Sstevel@tonic-gate cp->call_notified = FALSE; 1991*0Sstevel@tonic-gate RPCLOG(64, 1992*0Sstevel@tonic-gate "clnt_clts_dispatch_send: putting xid 0x%x on " 1993*0Sstevel@tonic-gate "dispatch list\n", xid); 1994*0Sstevel@tonic-gate cp->call_hash = call_hash(xid, clnt_clts_hash_size); 1995*0Sstevel@tonic-gate cp->call_bucket = &clts_call_ht[cp->call_hash]; 1996*0Sstevel@tonic-gate call_table_enter(cp); 1997*0Sstevel@tonic-gate 1998*0Sstevel@tonic-gate /* 1999*0Sstevel@tonic-gate * Construct the datagram 2000*0Sstevel@tonic-gate */ 2001*0Sstevel@tonic-gate msgsz = (int)TUNITDATAREQSZ; 2002*0Sstevel@tonic-gate while (!(bp = allocb(msgsz + addr->len, BPRI_LO))) { 2003*0Sstevel@tonic-gate if (strwaitbuf(msgsz + addr->len, BPRI_LO)) 2004*0Sstevel@tonic-gate return (ENOSR); 2005*0Sstevel@tonic-gate } 2006*0Sstevel@tonic-gate 2007*0Sstevel@tonic-gate udreq = (struct T_unitdata_req *)bp->b_wptr; 2008*0Sstevel@tonic-gate udreq->PRIM_type = T_UNITDATA_REQ; 2009*0Sstevel@tonic-gate udreq->DEST_length = addr->len; 2010*0Sstevel@tonic-gate 2011*0Sstevel@tonic-gate if (addr->len) { 2012*0Sstevel@tonic-gate bcopy(addr->buf, bp->b_wptr + msgsz, addr->len); 2013*0Sstevel@tonic-gate udreq->DEST_offset = (t_scalar_t)msgsz; 2014*0Sstevel@tonic-gate msgsz += addr->len; 2015*0Sstevel@tonic-gate } else 2016*0Sstevel@tonic-gate udreq->DEST_offset = 0; 2017*0Sstevel@tonic-gate udreq->OPT_length = 0; 2018*0Sstevel@tonic-gate udreq->OPT_offset = 0; 2019*0Sstevel@tonic-gate 2020*0Sstevel@tonic-gate bp->b_datap->db_type = M_PROTO; 2021*0Sstevel@tonic-gate bp->b_wptr += msgsz; 2022*0Sstevel@tonic-gate 2023*0Sstevel@tonic-gate /* 2024*0Sstevel@tonic-gate * Link the datagram header with the actual data 2025*0Sstevel@tonic-gate */ 2026*0Sstevel@tonic-gate linkb(bp, mp); 2027*0Sstevel@tonic-gate 2028*0Sstevel@tonic-gate /* 2029*0Sstevel@tonic-gate * Send downstream. 2030*0Sstevel@tonic-gate */ 2031*0Sstevel@tonic-gate put(cp->call_wq, bp); 2032*0Sstevel@tonic-gate 2033*0Sstevel@tonic-gate return (0); 2034*0Sstevel@tonic-gate } 2035*0Sstevel@tonic-gate 2036*0Sstevel@tonic-gate /* 2037*0Sstevel@tonic-gate * RPC response delivery routine. Deliver the response to the waiting 2038*0Sstevel@tonic-gate * thread by matching the xid. 2039*0Sstevel@tonic-gate */ 2040*0Sstevel@tonic-gate void 2041*0Sstevel@tonic-gate clnt_clts_dispatch_notify(mblk_t *mp, int resp_off, zoneid_t zoneid) 2042*0Sstevel@tonic-gate { 2043*0Sstevel@tonic-gate calllist_t *e = NULL; 2044*0Sstevel@tonic-gate call_table_t *chtp; 2045*0Sstevel@tonic-gate uint32_t xid; 2046*0Sstevel@tonic-gate uint_t hash; 2047*0Sstevel@tonic-gate unsigned char *hdr_offset; 2048*0Sstevel@tonic-gate mblk_t *resp; 2049*0Sstevel@tonic-gate 2050*0Sstevel@tonic-gate /* 2051*0Sstevel@tonic-gate * If the RPC response is not contained in the same mblk as the 2052*0Sstevel@tonic-gate * datagram header, then move to the next mblk. 2053*0Sstevel@tonic-gate */ 2054*0Sstevel@tonic-gate hdr_offset = mp->b_rptr; 2055*0Sstevel@tonic-gate resp = mp; 2056*0Sstevel@tonic-gate if ((mp->b_wptr - (mp->b_rptr + resp_off)) == 0) 2057*0Sstevel@tonic-gate resp = mp->b_cont; 2058*0Sstevel@tonic-gate else 2059*0Sstevel@tonic-gate resp->b_rptr += resp_off; 2060*0Sstevel@tonic-gate 2061*0Sstevel@tonic-gate ASSERT(resp != NULL); 2062*0Sstevel@tonic-gate 2063*0Sstevel@tonic-gate if ((IS_P2ALIGNED(resp->b_rptr, sizeof (uint32_t))) && 2064*0Sstevel@tonic-gate (resp->b_wptr - resp->b_rptr) >= sizeof (xid)) 2065*0Sstevel@tonic-gate xid = *((uint32_t *)resp->b_rptr); 2066*0Sstevel@tonic-gate else { 2067*0Sstevel@tonic-gate int i = 0; 2068*0Sstevel@tonic-gate unsigned char *p = (unsigned char *)&xid; 2069*0Sstevel@tonic-gate unsigned char *rptr; 2070*0Sstevel@tonic-gate mblk_t *tmp = resp; 2071*0Sstevel@tonic-gate 2072*0Sstevel@tonic-gate /* 2073*0Sstevel@tonic-gate * Copy the xid, byte-by-byte into xid. 2074*0Sstevel@tonic-gate */ 2075*0Sstevel@tonic-gate while (tmp) { 2076*0Sstevel@tonic-gate rptr = tmp->b_rptr; 2077*0Sstevel@tonic-gate while (rptr < tmp->b_wptr) { 2078*0Sstevel@tonic-gate *p++ = *rptr++; 2079*0Sstevel@tonic-gate if (++i >= sizeof (xid)) 2080*0Sstevel@tonic-gate goto done_xid_copy; 2081*0Sstevel@tonic-gate } 2082*0Sstevel@tonic-gate tmp = tmp->b_cont; 2083*0Sstevel@tonic-gate } 2084*0Sstevel@tonic-gate 2085*0Sstevel@tonic-gate /* 2086*0Sstevel@tonic-gate * If we got here, we ran out of mblk space before the 2087*0Sstevel@tonic-gate * xid could be copied. 2088*0Sstevel@tonic-gate */ 2089*0Sstevel@tonic-gate ASSERT(tmp == NULL && i < sizeof (xid)); 2090*0Sstevel@tonic-gate 2091*0Sstevel@tonic-gate RPCLOG0(1, 2092*0Sstevel@tonic-gate "clnt_dispatch_notify(clts): message less than " 2093*0Sstevel@tonic-gate "size of xid\n"); 2094*0Sstevel@tonic-gate 2095*0Sstevel@tonic-gate freemsg(mp); 2096*0Sstevel@tonic-gate return; 2097*0Sstevel@tonic-gate } 2098*0Sstevel@tonic-gate 2099*0Sstevel@tonic-gate done_xid_copy: 2100*0Sstevel@tonic-gate 2101*0Sstevel@tonic-gate /* 2102*0Sstevel@tonic-gate * Reset the read pointer back to the beginning of the protocol 2103*0Sstevel@tonic-gate * header if we moved it. 2104*0Sstevel@tonic-gate */ 2105*0Sstevel@tonic-gate if (mp->b_rptr != hdr_offset) 2106*0Sstevel@tonic-gate mp->b_rptr = hdr_offset; 2107*0Sstevel@tonic-gate 2108*0Sstevel@tonic-gate hash = call_hash(xid, clnt_clts_hash_size); 2109*0Sstevel@tonic-gate chtp = &clts_call_ht[hash]; 2110*0Sstevel@tonic-gate /* call_table_find returns with the hash bucket locked */ 2111*0Sstevel@tonic-gate call_table_find(chtp, xid, e); 2112*0Sstevel@tonic-gate 2113*0Sstevel@tonic-gate if (e != NULL) { 2114*0Sstevel@tonic-gate mutex_enter(&e->call_lock); 2115*0Sstevel@tonic-gate /* 2116*0Sstevel@tonic-gate * found thread waiting for this reply. 2117*0Sstevel@tonic-gate */ 2118*0Sstevel@tonic-gate if (e->call_reply) { 2119*0Sstevel@tonic-gate RPCLOG(8, 2120*0Sstevel@tonic-gate "clnt_dispatch_notify (clts): discarding old " 2121*0Sstevel@tonic-gate "reply for xid 0x%x\n", 2122*0Sstevel@tonic-gate xid); 2123*0Sstevel@tonic-gate freemsg(e->call_reply); 2124*0Sstevel@tonic-gate } 2125*0Sstevel@tonic-gate e->call_notified = TRUE; 2126*0Sstevel@tonic-gate e->call_reply = mp; 2127*0Sstevel@tonic-gate e->call_status = RPC_SUCCESS; 2128*0Sstevel@tonic-gate cv_signal(&e->call_cv); 2129*0Sstevel@tonic-gate mutex_exit(&e->call_lock); 2130*0Sstevel@tonic-gate mutex_exit(&chtp->ct_lock); 2131*0Sstevel@tonic-gate } else { 2132*0Sstevel@tonic-gate zone_t *zone; 2133*0Sstevel@tonic-gate struct rpcstat *rpcstat; 2134*0Sstevel@tonic-gate 2135*0Sstevel@tonic-gate mutex_exit(&chtp->ct_lock); 2136*0Sstevel@tonic-gate RPCLOG(8, "clnt_dispatch_notify (clts): no caller for reply " 2137*0Sstevel@tonic-gate "0x%x\n", xid); 2138*0Sstevel@tonic-gate freemsg(mp); 2139*0Sstevel@tonic-gate /* 2140*0Sstevel@tonic-gate * This is unfortunate, but we need to lookup the zone so we 2141*0Sstevel@tonic-gate * can increment its "rcbadxids" counter. 2142*0Sstevel@tonic-gate */ 2143*0Sstevel@tonic-gate zone = zone_find_by_id(zoneid); 2144*0Sstevel@tonic-gate if (zone == NULL) { 2145*0Sstevel@tonic-gate /* 2146*0Sstevel@tonic-gate * The zone went away... 2147*0Sstevel@tonic-gate */ 2148*0Sstevel@tonic-gate return; 2149*0Sstevel@tonic-gate } 2150*0Sstevel@tonic-gate rpcstat = zone_getspecific(rpcstat_zone_key, zone); 2151*0Sstevel@tonic-gate if (zone_status_get(zone) >= ZONE_IS_SHUTTING_DOWN) { 2152*0Sstevel@tonic-gate /* 2153*0Sstevel@tonic-gate * Not interested 2154*0Sstevel@tonic-gate */ 2155*0Sstevel@tonic-gate zone_rele(zone); 2156*0Sstevel@tonic-gate return; 2157*0Sstevel@tonic-gate } 2158*0Sstevel@tonic-gate RCSTAT_INCR(rpcstat->rpc_clts_client, rcbadxids); 2159*0Sstevel@tonic-gate zone_rele(zone); 2160*0Sstevel@tonic-gate } 2161*0Sstevel@tonic-gate } 2162*0Sstevel@tonic-gate 2163*0Sstevel@tonic-gate /* 2164*0Sstevel@tonic-gate * Init routine. Called when rpcmod is loaded. 2165*0Sstevel@tonic-gate */ 2166*0Sstevel@tonic-gate void 2167*0Sstevel@tonic-gate clnt_clts_init(void) 2168*0Sstevel@tonic-gate { 2169*0Sstevel@tonic-gate endpnt_cache = kmem_cache_create("clnt_clts_endpnt_cache", 2170*0Sstevel@tonic-gate sizeof (struct endpnt), 0, NULL, NULL, endpnt_repossess, NULL, 2171*0Sstevel@tonic-gate NULL, 0); 2172*0Sstevel@tonic-gate 2173*0Sstevel@tonic-gate rw_init(&endpnt_type_lock, NULL, RW_DEFAULT, NULL); 2174*0Sstevel@tonic-gate 2175*0Sstevel@tonic-gate /* 2176*0Sstevel@tonic-gate * Perform simple bounds checking to make sure that the setting is 2177*0Sstevel@tonic-gate * reasonable 2178*0Sstevel@tonic-gate */ 2179*0Sstevel@tonic-gate if (clnt_clts_max_endpoints <= 0) { 2180*0Sstevel@tonic-gate if (clnt_clts_do_bindresvport) 2181*0Sstevel@tonic-gate clnt_clts_max_endpoints = RESERVED_PORTSPACE; 2182*0Sstevel@tonic-gate else 2183*0Sstevel@tonic-gate clnt_clts_max_endpoints = NONRESERVED_PORTSPACE; 2184*0Sstevel@tonic-gate } 2185*0Sstevel@tonic-gate 2186*0Sstevel@tonic-gate if (clnt_clts_do_bindresvport && 2187*0Sstevel@tonic-gate clnt_clts_max_endpoints > RESERVED_PORTSPACE) 2188*0Sstevel@tonic-gate clnt_clts_max_endpoints = RESERVED_PORTSPACE; 2189*0Sstevel@tonic-gate else if (clnt_clts_max_endpoints > NONRESERVED_PORTSPACE) 2190*0Sstevel@tonic-gate clnt_clts_max_endpoints = NONRESERVED_PORTSPACE; 2191*0Sstevel@tonic-gate 2192*0Sstevel@tonic-gate if (clnt_clts_hash_size < DEFAULT_MIN_HASH_SIZE) 2193*0Sstevel@tonic-gate clnt_clts_hash_size = DEFAULT_MIN_HASH_SIZE; 2194*0Sstevel@tonic-gate 2195*0Sstevel@tonic-gate /* 2196*0Sstevel@tonic-gate * Defer creating the taskq until rpcmod gets pushed. If we are 2197*0Sstevel@tonic-gate * in diskless boot mode, rpcmod will get loaded early even before 2198*0Sstevel@tonic-gate * thread_create() is available. 2199*0Sstevel@tonic-gate */ 2200*0Sstevel@tonic-gate endpnt_taskq = NULL; 2201*0Sstevel@tonic-gate taskq_created = FALSE; 2202*0Sstevel@tonic-gate mutex_init(&endpnt_taskq_lock, NULL, MUTEX_DEFAULT, NULL); 2203*0Sstevel@tonic-gate 2204*0Sstevel@tonic-gate if (clnt_clts_endpoint_reap_interval < DEFAULT_ENDPOINT_REAP_INTERVAL) 2205*0Sstevel@tonic-gate clnt_clts_endpoint_reap_interval = 2206*0Sstevel@tonic-gate DEFAULT_ENDPOINT_REAP_INTERVAL; 2207*0Sstevel@tonic-gate 2208*0Sstevel@tonic-gate /* 2209*0Sstevel@tonic-gate * Dispatch the taskq at an interval which is offset from the 2210*0Sstevel@tonic-gate * interval that the endpoints should be reaped. 2211*0Sstevel@tonic-gate */ 2212*0Sstevel@tonic-gate clnt_clts_taskq_dispatch_interval = 2213*0Sstevel@tonic-gate (clnt_clts_endpoint_reap_interval + DEFAULT_INTERVAL_SHIFT) 2214*0Sstevel@tonic-gate * hz; 2215*0Sstevel@tonic-gate 2216*0Sstevel@tonic-gate /* 2217*0Sstevel@tonic-gate * Initialize the completion queue 2218*0Sstevel@tonic-gate */ 2219*0Sstevel@tonic-gate clts_call_ht = call_table_init(clnt_clts_hash_size); 2220*0Sstevel@tonic-gate /* 2221*0Sstevel@tonic-gate * Initialize the zone destructor callback. 2222*0Sstevel@tonic-gate */ 2223*0Sstevel@tonic-gate zone_key_create(&endpnt_destructor_key, NULL, NULL, endpnt_destructor); 2224*0Sstevel@tonic-gate } 2225*0Sstevel@tonic-gate 2226*0Sstevel@tonic-gate void 2227*0Sstevel@tonic-gate clnt_clts_fini(void) 2228*0Sstevel@tonic-gate { 2229*0Sstevel@tonic-gate (void) zone_key_delete(endpnt_destructor_key); 2230*0Sstevel@tonic-gate } 2231