10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51676Sjpk * Common Development and Distribution License (the "License"). 61676Sjpk * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*8766Sdai.ngo@sun.com * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate /* 270Sstevel@tonic-gate * Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T 280Sstevel@tonic-gate * All Rights Reserved 290Sstevel@tonic-gate */ 300Sstevel@tonic-gate 310Sstevel@tonic-gate /* 320Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD 330Sstevel@tonic-gate * under license from the Regents of the University of California. 340Sstevel@tonic-gate */ 350Sstevel@tonic-gate 360Sstevel@tonic-gate 370Sstevel@tonic-gate /* 380Sstevel@tonic-gate * Implements a kernel based, client side RPC. 390Sstevel@tonic-gate */ 400Sstevel@tonic-gate 410Sstevel@tonic-gate #include <sys/param.h> 420Sstevel@tonic-gate #include <sys/types.h> 430Sstevel@tonic-gate #include <sys/systm.h> 440Sstevel@tonic-gate #include <sys/sysmacros.h> 450Sstevel@tonic-gate #include <sys/stream.h> 460Sstevel@tonic-gate #include <sys/strsubr.h> 470Sstevel@tonic-gate #include <sys/ddi.h> 480Sstevel@tonic-gate #include <sys/tiuser.h> 490Sstevel@tonic-gate #include <sys/tihdr.h> 500Sstevel@tonic-gate #include <sys/t_kuser.h> 510Sstevel@tonic-gate #include <sys/errno.h> 520Sstevel@tonic-gate #include <sys/kmem.h> 530Sstevel@tonic-gate #include <sys/debug.h> 540Sstevel@tonic-gate #include <sys/kstat.h> 550Sstevel@tonic-gate #include <sys/t_lock.h> 560Sstevel@tonic-gate #include <sys/cmn_err.h> 570Sstevel@tonic-gate #include <sys/conf.h> 580Sstevel@tonic-gate #include <sys/disp.h> 590Sstevel@tonic-gate #include <sys/taskq.h> 600Sstevel@tonic-gate #include <sys/list.h> 610Sstevel@tonic-gate #include <sys/atomic.h> 620Sstevel@tonic-gate #include <sys/zone.h> 630Sstevel@tonic-gate #include <netinet/in.h> 640Sstevel@tonic-gate #include <rpc/types.h> 650Sstevel@tonic-gate #include <rpc/xdr.h> 660Sstevel@tonic-gate #include <rpc/auth.h> 670Sstevel@tonic-gate #include <rpc/clnt.h> 680Sstevel@tonic-gate #include <rpc/rpc_msg.h> 690Sstevel@tonic-gate 70*8766Sdai.ngo@sun.com #include <sys/sdt.h> 71*8766Sdai.ngo@sun.com 720Sstevel@tonic-gate static enum clnt_stat clnt_clts_kcallit(CLIENT *, rpcproc_t, xdrproc_t, 730Sstevel@tonic-gate caddr_t, xdrproc_t, caddr_t, struct timeval); 740Sstevel@tonic-gate static void clnt_clts_kabort(CLIENT *); 750Sstevel@tonic-gate static void clnt_clts_kerror(CLIENT *, struct rpc_err *); 760Sstevel@tonic-gate static bool_t clnt_clts_kfreeres(CLIENT *, xdrproc_t, caddr_t); 770Sstevel@tonic-gate static bool_t clnt_clts_kcontrol(CLIENT *, int, char *); 780Sstevel@tonic-gate static void clnt_clts_kdestroy(CLIENT *); 790Sstevel@tonic-gate static int clnt_clts_ksettimers(CLIENT *, struct rpc_timers *, 800Sstevel@tonic-gate struct rpc_timers *, int, void (*)(), caddr_t, uint32_t); 810Sstevel@tonic-gate 820Sstevel@tonic-gate /* 830Sstevel@tonic-gate * Operations vector for CLTS based RPC 840Sstevel@tonic-gate */ 850Sstevel@tonic-gate static struct clnt_ops clts_ops = { 860Sstevel@tonic-gate clnt_clts_kcallit, /* do rpc call */ 870Sstevel@tonic-gate clnt_clts_kabort, /* abort call */ 880Sstevel@tonic-gate clnt_clts_kerror, /* return error status */ 890Sstevel@tonic-gate clnt_clts_kfreeres, /* free results */ 900Sstevel@tonic-gate clnt_clts_kdestroy, /* destroy rpc handle */ 910Sstevel@tonic-gate clnt_clts_kcontrol, /* the ioctl() of rpc */ 920Sstevel@tonic-gate clnt_clts_ksettimers /* set retry timers */ 930Sstevel@tonic-gate }; 940Sstevel@tonic-gate 950Sstevel@tonic-gate /* 960Sstevel@tonic-gate * Endpoint for CLTS (INET, INET6, loopback, etc.) 970Sstevel@tonic-gate */ 980Sstevel@tonic-gate typedef struct endpnt_type { 990Sstevel@tonic-gate struct endpnt_type *e_next; /* pointer to next endpoint type */ 1000Sstevel@tonic-gate list_t e_pool; /* list of available endpoints */ 101*8766Sdai.ngo@sun.com list_t e_ilist; /* list of idle endpoints */ 1020Sstevel@tonic-gate struct endpnt *e_pcurr; /* pointer to current endpoint */ 1030Sstevel@tonic-gate char e_protofmly[KNC_STRSIZE]; /* protocol family */ 1040Sstevel@tonic-gate dev_t e_rdev; /* device */ 1050Sstevel@tonic-gate kmutex_t e_plock; /* pool lock */ 1060Sstevel@tonic-gate kmutex_t e_ilock; /* idle list lock */ 1070Sstevel@tonic-gate timeout_id_t e_itimer; /* timer to dispatch the taskq */ 1080Sstevel@tonic-gate uint_t e_cnt; /* number of endpoints in the pool */ 1090Sstevel@tonic-gate zoneid_t e_zoneid; /* zoneid of endpoint type */ 1100Sstevel@tonic-gate kcondvar_t e_async_cv; /* cv for asynchronous reap threads */ 1110Sstevel@tonic-gate uint_t e_async_count; /* count of asynchronous reap threads */ 1120Sstevel@tonic-gate } endpnt_type_t; 1130Sstevel@tonic-gate 1140Sstevel@tonic-gate typedef struct endpnt { 1150Sstevel@tonic-gate list_node_t e_node; /* link to the pool */ 1160Sstevel@tonic-gate list_node_t e_idle; /* link to the idle list */ 1170Sstevel@tonic-gate endpnt_type_t *e_type; /* back pointer to endpoint type */ 1180Sstevel@tonic-gate TIUSER *e_tiptr; /* pointer to transport endpoint */ 1190Sstevel@tonic-gate queue_t *e_wq; /* write queue */ 1200Sstevel@tonic-gate uint_t e_flags; /* endpoint flags */ 1210Sstevel@tonic-gate uint_t e_ref; /* ref count on endpoint */ 1220Sstevel@tonic-gate kcondvar_t e_cv; /* condition variable */ 1230Sstevel@tonic-gate kmutex_t e_lock; /* protects cv and flags */ 1240Sstevel@tonic-gate time_t e_itime; /* time when rele'd */ 1250Sstevel@tonic-gate } endpnt_t; 1260Sstevel@tonic-gate 1270Sstevel@tonic-gate #define ENDPNT_ESTABLISHED 0x1 /* endpoint is established */ 1280Sstevel@tonic-gate #define ENDPNT_WAITING 0x2 /* thread waiting for endpoint */ 1290Sstevel@tonic-gate #define ENDPNT_BOUND 0x4 /* endpoint is bound */ 1300Sstevel@tonic-gate #define ENDPNT_STALE 0x8 /* endpoint is dead */ 1310Sstevel@tonic-gate #define ENDPNT_ONIDLE 0x10 /* endpoint is on the idle list */ 1320Sstevel@tonic-gate 1330Sstevel@tonic-gate static krwlock_t endpnt_type_lock; /* protects endpnt_type_list */ 1340Sstevel@tonic-gate static endpnt_type_t *endpnt_type_list = NULL; /* list of CLTS endpoints */ 1350Sstevel@tonic-gate static struct kmem_cache *endpnt_cache; /* cache of endpnt_t's */ 1360Sstevel@tonic-gate static taskq_t *endpnt_taskq; /* endpnt_t reaper thread */ 1370Sstevel@tonic-gate static bool_t taskq_created; /* flag for endpnt_taskq */ 1380Sstevel@tonic-gate static kmutex_t endpnt_taskq_lock; /* taskq lock */ 1390Sstevel@tonic-gate static zone_key_t endpnt_destructor_key; 1400Sstevel@tonic-gate 1410Sstevel@tonic-gate #define DEFAULT_ENDPOINT_REAP_INTERVAL 60 /* 1 minute */ 1420Sstevel@tonic-gate #define DEFAULT_INTERVAL_SHIFT 30 /* 30 seconds */ 1430Sstevel@tonic-gate 1440Sstevel@tonic-gate /* 1450Sstevel@tonic-gate * Endpoint tunables 1460Sstevel@tonic-gate */ 1470Sstevel@tonic-gate static int clnt_clts_max_endpoints = -1; 1480Sstevel@tonic-gate static int clnt_clts_hash_size = DEFAULT_HASH_SIZE; 1490Sstevel@tonic-gate static time_t clnt_clts_endpoint_reap_interval = -1; 1500Sstevel@tonic-gate static clock_t clnt_clts_taskq_dispatch_interval; 1510Sstevel@tonic-gate 1520Sstevel@tonic-gate /* 1530Sstevel@tonic-gate * Response completion hash queue 1540Sstevel@tonic-gate */ 1550Sstevel@tonic-gate static call_table_t *clts_call_ht; 1560Sstevel@tonic-gate 1570Sstevel@tonic-gate /* 1580Sstevel@tonic-gate * Routines for the endpoint manager 1590Sstevel@tonic-gate */ 1600Sstevel@tonic-gate static struct endpnt_type *endpnt_type_create(struct knetconfig *); 1610Sstevel@tonic-gate static void endpnt_type_free(struct endpnt_type *); 1620Sstevel@tonic-gate static int check_endpnt(struct endpnt *, struct endpnt **); 163342Snd150628 static struct endpnt *endpnt_get(struct knetconfig *, int); 1640Sstevel@tonic-gate static void endpnt_rele(struct endpnt *); 1650Sstevel@tonic-gate static void endpnt_reap_settimer(endpnt_type_t *); 1660Sstevel@tonic-gate static void endpnt_reap(endpnt_type_t *); 1670Sstevel@tonic-gate static void endpnt_reap_dispatch(void *); 1680Sstevel@tonic-gate static void endpnt_reclaim(zoneid_t); 1690Sstevel@tonic-gate 1700Sstevel@tonic-gate 1710Sstevel@tonic-gate /* 1720Sstevel@tonic-gate * Request dipatching function. 1730Sstevel@tonic-gate */ 1740Sstevel@tonic-gate static int clnt_clts_dispatch_send(queue_t *q, mblk_t *, struct netbuf *addr, 1750Sstevel@tonic-gate calllist_t *, uint_t); 1760Sstevel@tonic-gate 1770Sstevel@tonic-gate /* 1780Sstevel@tonic-gate * The size of the preserialized RPC header information. 1790Sstevel@tonic-gate */ 1800Sstevel@tonic-gate #define CKU_HDRSIZE 20 1810Sstevel@tonic-gate /* 1820Sstevel@tonic-gate * The initial allocation size. It is small to reduce space requirements. 1830Sstevel@tonic-gate */ 1840Sstevel@tonic-gate #define CKU_INITSIZE 2048 1850Sstevel@tonic-gate /* 1860Sstevel@tonic-gate * The size of additional allocations, if required. It is larger to 1870Sstevel@tonic-gate * reduce the number of actual allocations. 1880Sstevel@tonic-gate */ 1890Sstevel@tonic-gate #define CKU_ALLOCSIZE 8192 1900Sstevel@tonic-gate 1910Sstevel@tonic-gate /* 1920Sstevel@tonic-gate * Private data per rpc handle. This structure is allocated by 1930Sstevel@tonic-gate * clnt_clts_kcreate, and freed by clnt_clts_kdestroy. 1940Sstevel@tonic-gate */ 1950Sstevel@tonic-gate struct cku_private { 1960Sstevel@tonic-gate CLIENT cku_client; /* client handle */ 1970Sstevel@tonic-gate int cku_retrys; /* request retrys */ 1980Sstevel@tonic-gate calllist_t cku_call; 1990Sstevel@tonic-gate struct endpnt *cku_endpnt; /* open end point */ 2000Sstevel@tonic-gate struct knetconfig cku_config; 2010Sstevel@tonic-gate struct netbuf cku_addr; /* remote address */ 2020Sstevel@tonic-gate struct rpc_err cku_err; /* error status */ 2030Sstevel@tonic-gate XDR cku_outxdr; /* xdr stream for output */ 2040Sstevel@tonic-gate XDR cku_inxdr; /* xdr stream for input */ 2050Sstevel@tonic-gate char cku_rpchdr[CKU_HDRSIZE + 4]; /* rpc header */ 2060Sstevel@tonic-gate struct cred *cku_cred; /* credentials */ 2070Sstevel@tonic-gate struct rpc_timers *cku_timers; /* for estimating RTT */ 2080Sstevel@tonic-gate struct rpc_timers *cku_timeall; /* for estimating RTT */ 2090Sstevel@tonic-gate void (*cku_feedback)(int, int, caddr_t); 2100Sstevel@tonic-gate /* ptr to feedback rtn */ 2110Sstevel@tonic-gate caddr_t cku_feedarg; /* argument for feedback func */ 2120Sstevel@tonic-gate uint32_t cku_xid; /* current XID */ 2130Sstevel@tonic-gate bool_t cku_bcast; /* RPC broadcast hint */ 214342Snd150628 int cku_useresvport; /* Use reserved port */ 2150Sstevel@tonic-gate struct rpc_clts_client *cku_stats; /* counters for the zone */ 2160Sstevel@tonic-gate }; 2170Sstevel@tonic-gate 2180Sstevel@tonic-gate static const struct rpc_clts_client { 2190Sstevel@tonic-gate kstat_named_t rccalls; 2200Sstevel@tonic-gate kstat_named_t rcbadcalls; 2210Sstevel@tonic-gate kstat_named_t rcretrans; 2220Sstevel@tonic-gate kstat_named_t rcbadxids; 2230Sstevel@tonic-gate kstat_named_t rctimeouts; 2240Sstevel@tonic-gate kstat_named_t rcnewcreds; 2250Sstevel@tonic-gate kstat_named_t rcbadverfs; 2260Sstevel@tonic-gate kstat_named_t rctimers; 2270Sstevel@tonic-gate kstat_named_t rcnomem; 2280Sstevel@tonic-gate kstat_named_t rccantsend; 2290Sstevel@tonic-gate } clts_rcstat_tmpl = { 2300Sstevel@tonic-gate { "calls", KSTAT_DATA_UINT64 }, 2310Sstevel@tonic-gate { "badcalls", KSTAT_DATA_UINT64 }, 2320Sstevel@tonic-gate { "retrans", KSTAT_DATA_UINT64 }, 2330Sstevel@tonic-gate { "badxids", KSTAT_DATA_UINT64 }, 2340Sstevel@tonic-gate { "timeouts", KSTAT_DATA_UINT64 }, 2350Sstevel@tonic-gate { "newcreds", KSTAT_DATA_UINT64 }, 2360Sstevel@tonic-gate { "badverfs", KSTAT_DATA_UINT64 }, 2370Sstevel@tonic-gate { "timers", KSTAT_DATA_UINT64 }, 2380Sstevel@tonic-gate { "nomem", KSTAT_DATA_UINT64 }, 2390Sstevel@tonic-gate { "cantsend", KSTAT_DATA_UINT64 }, 2400Sstevel@tonic-gate }; 2410Sstevel@tonic-gate 2420Sstevel@tonic-gate static uint_t clts_rcstat_ndata = 2430Sstevel@tonic-gate sizeof (clts_rcstat_tmpl) / sizeof (kstat_named_t); 2440Sstevel@tonic-gate 2450Sstevel@tonic-gate #define RCSTAT_INCR(s, x) \ 2460Sstevel@tonic-gate atomic_add_64(&(s)->x.value.ui64, 1) 2470Sstevel@tonic-gate 2480Sstevel@tonic-gate #define ptoh(p) (&((p)->cku_client)) 2490Sstevel@tonic-gate #define htop(h) ((struct cku_private *)((h)->cl_private)) 2500Sstevel@tonic-gate 2510Sstevel@tonic-gate /* 2520Sstevel@tonic-gate * Times to retry 2530Sstevel@tonic-gate */ 2540Sstevel@tonic-gate #define SNDTRIES 4 2550Sstevel@tonic-gate #define REFRESHES 2 /* authentication refreshes */ 2560Sstevel@tonic-gate 257681Srg137905 /* 258681Srg137905 * The following is used to determine the global default behavior for 259681Srg137905 * CLTS when binding to a local port. 260681Srg137905 * 261681Srg137905 * If the value is set to 1 the default will be to select a reserved 262681Srg137905 * (aka privileged) port, if the value is zero the default will be to 263681Srg137905 * use non-reserved ports. Users of kRPC may override this by using 264681Srg137905 * CLNT_CONTROL() and CLSET_BINDRESVPORT. 265681Srg137905 */ 266681Srg137905 static int clnt_clts_do_bindresvport = 1; 267681Srg137905 2680Sstevel@tonic-gate #define BINDRESVPORT_RETRIES 5 2690Sstevel@tonic-gate 2700Sstevel@tonic-gate void 2710Sstevel@tonic-gate clnt_clts_stats_init(zoneid_t zoneid, struct rpc_clts_client **statsp) 2720Sstevel@tonic-gate { 2730Sstevel@tonic-gate kstat_t *ksp; 2740Sstevel@tonic-gate kstat_named_t *knp; 2750Sstevel@tonic-gate 2760Sstevel@tonic-gate knp = rpcstat_zone_init_common(zoneid, "unix", "rpc_clts_client", 2770Sstevel@tonic-gate (const kstat_named_t *)&clts_rcstat_tmpl, 2780Sstevel@tonic-gate sizeof (clts_rcstat_tmpl)); 2790Sstevel@tonic-gate /* 2800Sstevel@tonic-gate * Backwards compatibility for old kstat clients 2810Sstevel@tonic-gate */ 2820Sstevel@tonic-gate ksp = kstat_create_zone("unix", 0, "rpc_client", "rpc", 2830Sstevel@tonic-gate KSTAT_TYPE_NAMED, clts_rcstat_ndata, 2840Sstevel@tonic-gate KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_WRITABLE, zoneid); 2850Sstevel@tonic-gate if (ksp) { 2860Sstevel@tonic-gate ksp->ks_data = knp; 2870Sstevel@tonic-gate kstat_install(ksp); 2880Sstevel@tonic-gate } 2890Sstevel@tonic-gate *statsp = (struct rpc_clts_client *)knp; 2900Sstevel@tonic-gate } 2910Sstevel@tonic-gate 2920Sstevel@tonic-gate void 2930Sstevel@tonic-gate clnt_clts_stats_fini(zoneid_t zoneid, struct rpc_clts_client **statsp) 2940Sstevel@tonic-gate { 2950Sstevel@tonic-gate rpcstat_zone_fini_common(zoneid, "unix", "rpc_clts_client"); 2960Sstevel@tonic-gate kstat_delete_byname_zone("unix", 0, "rpc_client", zoneid); 2970Sstevel@tonic-gate kmem_free(*statsp, sizeof (clts_rcstat_tmpl)); 2980Sstevel@tonic-gate } 2990Sstevel@tonic-gate 3000Sstevel@tonic-gate /* 3010Sstevel@tonic-gate * Create an rpc handle for a clts rpc connection. 3020Sstevel@tonic-gate * Allocates space for the handle structure and the private data. 3030Sstevel@tonic-gate */ 3040Sstevel@tonic-gate /* ARGSUSED */ 3050Sstevel@tonic-gate int 3060Sstevel@tonic-gate clnt_clts_kcreate(struct knetconfig *config, struct netbuf *addr, 3070Sstevel@tonic-gate rpcprog_t pgm, rpcvers_t vers, int retrys, struct cred *cred, 3080Sstevel@tonic-gate CLIENT **cl) 3090Sstevel@tonic-gate { 3100Sstevel@tonic-gate CLIENT *h; 3110Sstevel@tonic-gate struct cku_private *p; 3120Sstevel@tonic-gate struct rpc_msg call_msg; 3130Sstevel@tonic-gate int error; 3140Sstevel@tonic-gate int plen; 3150Sstevel@tonic-gate 3160Sstevel@tonic-gate if (cl == NULL) 3170Sstevel@tonic-gate return (EINVAL); 3180Sstevel@tonic-gate 3190Sstevel@tonic-gate *cl = NULL; 3200Sstevel@tonic-gate error = 0; 3210Sstevel@tonic-gate 3220Sstevel@tonic-gate p = kmem_zalloc(sizeof (*p), KM_SLEEP); 3230Sstevel@tonic-gate 3240Sstevel@tonic-gate h = ptoh(p); 3250Sstevel@tonic-gate 3260Sstevel@tonic-gate /* handle */ 3270Sstevel@tonic-gate h->cl_ops = &clts_ops; 3280Sstevel@tonic-gate h->cl_private = (caddr_t)p; 3290Sstevel@tonic-gate h->cl_auth = authkern_create(); 3300Sstevel@tonic-gate 3310Sstevel@tonic-gate /* call message, just used to pre-serialize below */ 3320Sstevel@tonic-gate call_msg.rm_xid = 0; 3330Sstevel@tonic-gate call_msg.rm_direction = CALL; 3340Sstevel@tonic-gate call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION; 3350Sstevel@tonic-gate call_msg.rm_call.cb_prog = pgm; 3360Sstevel@tonic-gate call_msg.rm_call.cb_vers = vers; 3370Sstevel@tonic-gate 3380Sstevel@tonic-gate /* private */ 3390Sstevel@tonic-gate clnt_clts_kinit(h, addr, retrys, cred); 3400Sstevel@tonic-gate 3410Sstevel@tonic-gate xdrmem_create(&p->cku_outxdr, p->cku_rpchdr, CKU_HDRSIZE, XDR_ENCODE); 3420Sstevel@tonic-gate 3430Sstevel@tonic-gate /* pre-serialize call message header */ 3440Sstevel@tonic-gate if (!xdr_callhdr(&p->cku_outxdr, &call_msg)) { 3450Sstevel@tonic-gate error = EINVAL; /* XXX */ 3460Sstevel@tonic-gate goto bad; 3470Sstevel@tonic-gate } 3480Sstevel@tonic-gate 3490Sstevel@tonic-gate p->cku_config.knc_rdev = config->knc_rdev; 3500Sstevel@tonic-gate p->cku_config.knc_semantics = config->knc_semantics; 3510Sstevel@tonic-gate plen = strlen(config->knc_protofmly) + 1; 3520Sstevel@tonic-gate p->cku_config.knc_protofmly = kmem_alloc(plen, KM_SLEEP); 3530Sstevel@tonic-gate bcopy(config->knc_protofmly, p->cku_config.knc_protofmly, plen); 354342Snd150628 p->cku_useresvport = -1; /* value is has not been set */ 3550Sstevel@tonic-gate 3560Sstevel@tonic-gate cv_init(&p->cku_call.call_cv, NULL, CV_DEFAULT, NULL); 3570Sstevel@tonic-gate mutex_init(&p->cku_call.call_lock, NULL, MUTEX_DEFAULT, NULL); 3580Sstevel@tonic-gate 3590Sstevel@tonic-gate *cl = h; 3600Sstevel@tonic-gate return (0); 3610Sstevel@tonic-gate 3620Sstevel@tonic-gate bad: 3630Sstevel@tonic-gate auth_destroy(h->cl_auth); 3640Sstevel@tonic-gate kmem_free(p->cku_addr.buf, addr->maxlen); 3650Sstevel@tonic-gate kmem_free(p, sizeof (struct cku_private)); 3660Sstevel@tonic-gate 3670Sstevel@tonic-gate return (error); 3680Sstevel@tonic-gate } 3690Sstevel@tonic-gate 3700Sstevel@tonic-gate void 3710Sstevel@tonic-gate clnt_clts_kinit(CLIENT *h, struct netbuf *addr, int retrys, cred_t *cred) 3720Sstevel@tonic-gate { 3730Sstevel@tonic-gate /* LINTED pointer alignment */ 3740Sstevel@tonic-gate struct cku_private *p = htop(h); 3750Sstevel@tonic-gate struct rpcstat *rsp; 3760Sstevel@tonic-gate 377766Scarlsonj rsp = zone_getspecific(rpcstat_zone_key, rpc_zone()); 3780Sstevel@tonic-gate ASSERT(rsp != NULL); 3790Sstevel@tonic-gate 3800Sstevel@tonic-gate p->cku_retrys = retrys; 3810Sstevel@tonic-gate 3820Sstevel@tonic-gate if (p->cku_addr.maxlen < addr->len) { 3830Sstevel@tonic-gate if (p->cku_addr.maxlen != 0 && p->cku_addr.buf != NULL) 3840Sstevel@tonic-gate kmem_free(p->cku_addr.buf, p->cku_addr.maxlen); 3850Sstevel@tonic-gate 3860Sstevel@tonic-gate p->cku_addr.buf = kmem_zalloc(addr->maxlen, KM_SLEEP); 3870Sstevel@tonic-gate p->cku_addr.maxlen = addr->maxlen; 3880Sstevel@tonic-gate } 3890Sstevel@tonic-gate 3900Sstevel@tonic-gate p->cku_addr.len = addr->len; 3910Sstevel@tonic-gate bcopy(addr->buf, p->cku_addr.buf, addr->len); 3920Sstevel@tonic-gate 3930Sstevel@tonic-gate p->cku_cred = cred; 3940Sstevel@tonic-gate p->cku_xid = 0; 3950Sstevel@tonic-gate p->cku_timers = NULL; 3960Sstevel@tonic-gate p->cku_timeall = NULL; 3970Sstevel@tonic-gate p->cku_feedback = NULL; 3980Sstevel@tonic-gate p->cku_bcast = FALSE; 3990Sstevel@tonic-gate p->cku_call.call_xid = 0; 4000Sstevel@tonic-gate p->cku_call.call_hash = 0; 4010Sstevel@tonic-gate p->cku_call.call_notified = FALSE; 4020Sstevel@tonic-gate p->cku_call.call_next = NULL; 4030Sstevel@tonic-gate p->cku_call.call_prev = NULL; 4040Sstevel@tonic-gate p->cku_call.call_reply = NULL; 4050Sstevel@tonic-gate p->cku_call.call_wq = NULL; 4060Sstevel@tonic-gate p->cku_stats = rsp->rpc_clts_client; 4070Sstevel@tonic-gate } 4080Sstevel@tonic-gate 4090Sstevel@tonic-gate /* 4100Sstevel@tonic-gate * set the timers. Return current retransmission timeout. 4110Sstevel@tonic-gate */ 4120Sstevel@tonic-gate static int 4130Sstevel@tonic-gate clnt_clts_ksettimers(CLIENT *h, struct rpc_timers *t, struct rpc_timers *all, 4140Sstevel@tonic-gate int minimum, void (*feedback)(int, int, caddr_t), caddr_t arg, 4150Sstevel@tonic-gate uint32_t xid) 4160Sstevel@tonic-gate { 4170Sstevel@tonic-gate /* LINTED pointer alignment */ 4180Sstevel@tonic-gate struct cku_private *p = htop(h); 4190Sstevel@tonic-gate int value; 4200Sstevel@tonic-gate 4210Sstevel@tonic-gate p->cku_feedback = feedback; 4220Sstevel@tonic-gate p->cku_feedarg = arg; 4230Sstevel@tonic-gate p->cku_timers = t; 4240Sstevel@tonic-gate p->cku_timeall = all; 4250Sstevel@tonic-gate if (xid) 4260Sstevel@tonic-gate p->cku_xid = xid; 4270Sstevel@tonic-gate value = all->rt_rtxcur; 4280Sstevel@tonic-gate value += t->rt_rtxcur; 4290Sstevel@tonic-gate if (value < minimum) 4300Sstevel@tonic-gate return (minimum); 4310Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rctimers); 4320Sstevel@tonic-gate return (value); 4330Sstevel@tonic-gate } 4340Sstevel@tonic-gate 4350Sstevel@tonic-gate /* 4360Sstevel@tonic-gate * Time out back off function. tim is in HZ 4370Sstevel@tonic-gate */ 4380Sstevel@tonic-gate #define MAXTIMO (20 * hz) 4390Sstevel@tonic-gate #define backoff(tim) (((tim) < MAXTIMO) ? dobackoff(tim) : (tim)) 4400Sstevel@tonic-gate #define dobackoff(tim) ((((tim) << 1) > MAXTIMO) ? MAXTIMO : ((tim) << 1)) 4410Sstevel@tonic-gate 4420Sstevel@tonic-gate #define RETRY_POLL_TIMO 30 4430Sstevel@tonic-gate 4440Sstevel@tonic-gate /* 4450Sstevel@tonic-gate * Call remote procedure. 4460Sstevel@tonic-gate * Most of the work of rpc is done here. We serialize what is left 4470Sstevel@tonic-gate * of the header (some was pre-serialized in the handle), serialize 4480Sstevel@tonic-gate * the arguments, and send it off. We wait for a reply or a time out. 4490Sstevel@tonic-gate * Timeout causes an immediate return, other packet problems may cause 4500Sstevel@tonic-gate * a retry on the receive. When a good packet is received we deserialize 4510Sstevel@tonic-gate * it, and check verification. A bad reply code will cause one retry 4520Sstevel@tonic-gate * with full (longhand) credentials. 4530Sstevel@tonic-gate */ 4540Sstevel@tonic-gate enum clnt_stat 4550Sstevel@tonic-gate clnt_clts_kcallit_addr(CLIENT *h, rpcproc_t procnum, xdrproc_t xdr_args, 4560Sstevel@tonic-gate caddr_t argsp, xdrproc_t xdr_results, caddr_t resultsp, 4570Sstevel@tonic-gate struct timeval wait, struct netbuf *sin) 4580Sstevel@tonic-gate { 4590Sstevel@tonic-gate /* LINTED pointer alignment */ 4600Sstevel@tonic-gate struct cku_private *p = htop(h); 4610Sstevel@tonic-gate XDR *xdrs; 4620Sstevel@tonic-gate int stries = p->cku_retrys; 4630Sstevel@tonic-gate int refreshes = REFRESHES; /* number of times to refresh cred */ 4640Sstevel@tonic-gate int round_trip; /* time the RPC */ 4650Sstevel@tonic-gate int error; 4660Sstevel@tonic-gate int hdrsz; 4670Sstevel@tonic-gate mblk_t *mp; 4680Sstevel@tonic-gate mblk_t *mpdup; 4690Sstevel@tonic-gate mblk_t *resp = NULL; 4700Sstevel@tonic-gate mblk_t *tmp; 4710Sstevel@tonic-gate calllist_t *call = &p->cku_call; 472*8766Sdai.ngo@sun.com clock_t ori_timout, timout; 4730Sstevel@tonic-gate bool_t interrupted; 4740Sstevel@tonic-gate enum clnt_stat status; 4750Sstevel@tonic-gate struct rpc_msg reply_msg; 4760Sstevel@tonic-gate enum clnt_stat re_status; 477342Snd150628 endpnt_t *endpt; 4780Sstevel@tonic-gate 4790Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rccalls); 4800Sstevel@tonic-gate 4810Sstevel@tonic-gate RPCLOG(2, "clnt_clts_kcallit_addr: wait.tv_sec: %ld\n", wait.tv_sec); 4820Sstevel@tonic-gate RPCLOG(2, "clnt_clts_kcallit_addr: wait.tv_usec: %ld\n", wait.tv_usec); 4830Sstevel@tonic-gate 4840Sstevel@tonic-gate timout = TIMEVAL_TO_TICK(&wait); 485*8766Sdai.ngo@sun.com ori_timout = timout; 4860Sstevel@tonic-gate 4870Sstevel@tonic-gate if (p->cku_xid == 0) { 4880Sstevel@tonic-gate p->cku_xid = alloc_xid(); 4890Sstevel@tonic-gate if (p->cku_endpnt != NULL) 4900Sstevel@tonic-gate endpnt_rele(p->cku_endpnt); 4910Sstevel@tonic-gate p->cku_endpnt = NULL; 4920Sstevel@tonic-gate } 4936403Sgt29601 call->call_zoneid = rpc_zoneid(); 4940Sstevel@tonic-gate 4950Sstevel@tonic-gate mpdup = NULL; 4960Sstevel@tonic-gate call_again: 4970Sstevel@tonic-gate 4980Sstevel@tonic-gate if (mpdup == NULL) { 4990Sstevel@tonic-gate 5000Sstevel@tonic-gate while ((mp = allocb(CKU_INITSIZE, BPRI_LO)) == NULL) { 5010Sstevel@tonic-gate if (strwaitbuf(CKU_INITSIZE, BPRI_LO)) { 5020Sstevel@tonic-gate p->cku_err.re_status = RPC_SYSTEMERROR; 5030Sstevel@tonic-gate p->cku_err.re_errno = ENOSR; 5040Sstevel@tonic-gate goto done; 5050Sstevel@tonic-gate } 5060Sstevel@tonic-gate } 5070Sstevel@tonic-gate 5080Sstevel@tonic-gate xdrs = &p->cku_outxdr; 5090Sstevel@tonic-gate xdrmblk_init(xdrs, mp, XDR_ENCODE, CKU_ALLOCSIZE); 5100Sstevel@tonic-gate 5110Sstevel@tonic-gate if (h->cl_auth->ah_cred.oa_flavor != RPCSEC_GSS) { 5120Sstevel@tonic-gate /* 5130Sstevel@tonic-gate * Copy in the preserialized RPC header 5140Sstevel@tonic-gate * information. 5150Sstevel@tonic-gate */ 5160Sstevel@tonic-gate bcopy(p->cku_rpchdr, mp->b_rptr, CKU_HDRSIZE); 5170Sstevel@tonic-gate 5180Sstevel@tonic-gate /* 5190Sstevel@tonic-gate * transaction id is the 1st thing in the output 5200Sstevel@tonic-gate * buffer. 5210Sstevel@tonic-gate */ 5220Sstevel@tonic-gate /* LINTED pointer alignment */ 5230Sstevel@tonic-gate (*(uint32_t *)(mp->b_rptr)) = p->cku_xid; 5240Sstevel@tonic-gate 5250Sstevel@tonic-gate /* Skip the preserialized stuff. */ 5260Sstevel@tonic-gate XDR_SETPOS(xdrs, CKU_HDRSIZE); 5270Sstevel@tonic-gate 5280Sstevel@tonic-gate /* Serialize dynamic stuff into the output buffer. */ 5290Sstevel@tonic-gate if ((!XDR_PUTINT32(xdrs, (int32_t *)&procnum)) || 5300Sstevel@tonic-gate (!AUTH_MARSHALL(h->cl_auth, xdrs, p->cku_cred)) || 5310Sstevel@tonic-gate (!(*xdr_args)(xdrs, argsp))) { 5320Sstevel@tonic-gate freemsg(mp); 5330Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTENCODEARGS; 5340Sstevel@tonic-gate p->cku_err.re_errno = EIO; 5350Sstevel@tonic-gate goto done; 5360Sstevel@tonic-gate } 5370Sstevel@tonic-gate } else { 5380Sstevel@tonic-gate uint32_t *uproc = (uint32_t *) 5390Sstevel@tonic-gate &p->cku_rpchdr[CKU_HDRSIZE]; 5400Sstevel@tonic-gate IXDR_PUT_U_INT32(uproc, procnum); 5410Sstevel@tonic-gate 5420Sstevel@tonic-gate (*(uint32_t *)(&p->cku_rpchdr[0])) = p->cku_xid; 5430Sstevel@tonic-gate XDR_SETPOS(xdrs, 0); 5440Sstevel@tonic-gate 5450Sstevel@tonic-gate /* Serialize the procedure number and the arguments. */ 5460Sstevel@tonic-gate if (!AUTH_WRAP(h->cl_auth, (caddr_t)p->cku_rpchdr, 5470Sstevel@tonic-gate CKU_HDRSIZE+4, xdrs, xdr_args, argsp)) { 5480Sstevel@tonic-gate freemsg(mp); 5490Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTENCODEARGS; 5500Sstevel@tonic-gate p->cku_err.re_errno = EIO; 5510Sstevel@tonic-gate goto done; 5520Sstevel@tonic-gate } 5530Sstevel@tonic-gate } 5540Sstevel@tonic-gate } else 5550Sstevel@tonic-gate mp = mpdup; 5560Sstevel@tonic-gate 5570Sstevel@tonic-gate mpdup = dupmsg(mp); 5580Sstevel@tonic-gate if (mpdup == NULL) { 5590Sstevel@tonic-gate freemsg(mp); 5600Sstevel@tonic-gate p->cku_err.re_status = RPC_SYSTEMERROR; 5610Sstevel@tonic-gate p->cku_err.re_errno = ENOSR; 5620Sstevel@tonic-gate goto done; 5630Sstevel@tonic-gate } 5640Sstevel@tonic-gate 5650Sstevel@tonic-gate /* 5660Sstevel@tonic-gate * Grab an endpnt only if the endpoint is NULL. We could be retrying 5670Sstevel@tonic-gate * the request and in this case we want to go through the same 5680Sstevel@tonic-gate * source port, so that the duplicate request cache may detect a 5690Sstevel@tonic-gate * retry. 5700Sstevel@tonic-gate */ 571342Snd150628 5720Sstevel@tonic-gate if (p->cku_endpnt == NULL) 573342Snd150628 p->cku_endpnt = endpnt_get(&p->cku_config, p->cku_useresvport); 5740Sstevel@tonic-gate 5750Sstevel@tonic-gate if (p->cku_endpnt == NULL) { 5760Sstevel@tonic-gate freemsg(mp); 5770Sstevel@tonic-gate p->cku_err.re_status = RPC_SYSTEMERROR; 5780Sstevel@tonic-gate p->cku_err.re_errno = ENOSR; 5790Sstevel@tonic-gate goto done; 5800Sstevel@tonic-gate } 5810Sstevel@tonic-gate 5820Sstevel@tonic-gate round_trip = lbolt; 5830Sstevel@tonic-gate 5840Sstevel@tonic-gate error = clnt_clts_dispatch_send(p->cku_endpnt->e_wq, mp, 5856403Sgt29601 &p->cku_addr, call, p->cku_xid); 5860Sstevel@tonic-gate 5870Sstevel@tonic-gate if (error != 0) { 5880Sstevel@tonic-gate freemsg(mp); 5890Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTSEND; 5900Sstevel@tonic-gate p->cku_err.re_errno = error; 5910Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rccantsend); 5920Sstevel@tonic-gate goto done1; 5930Sstevel@tonic-gate } 5940Sstevel@tonic-gate 5950Sstevel@tonic-gate RPCLOG(64, "clnt_clts_kcallit_addr: sent call for xid 0x%x\n", 5966403Sgt29601 p->cku_xid); 5970Sstevel@tonic-gate 5980Sstevel@tonic-gate /* 5990Sstevel@tonic-gate * There are two reasons for which we go back to to tryread. 6000Sstevel@tonic-gate * 6010Sstevel@tonic-gate * a) In case the status is RPC_PROCUNAVAIL and we sent out a 6020Sstevel@tonic-gate * broadcast we should not get any invalid messages with the 6030Sstevel@tonic-gate * RPC_PROCUNAVAIL error back. Some broken RPC implementations 6040Sstevel@tonic-gate * send them and for this we have to ignore them ( as we would 6050Sstevel@tonic-gate * have never received them ) and look for another message 6060Sstevel@tonic-gate * which might contain the valid response because we don't know 6070Sstevel@tonic-gate * how many broken implementations are in the network. So we are 6080Sstevel@tonic-gate * going to loop until 6090Sstevel@tonic-gate * - we received a valid response 6100Sstevel@tonic-gate * - we have processed all invalid responses and 6110Sstevel@tonic-gate * got a time out when we try to receive again a 6120Sstevel@tonic-gate * message. 6130Sstevel@tonic-gate * 6140Sstevel@tonic-gate * b) We will jump back to tryread also in case we failed 6150Sstevel@tonic-gate * within the AUTH_VALIDATE. In this case we should move 6160Sstevel@tonic-gate * on and loop until we received a valid response or we 6170Sstevel@tonic-gate * have processed all responses with broken authentication 6180Sstevel@tonic-gate * and we got a time out when we try to receive a message. 6190Sstevel@tonic-gate */ 6200Sstevel@tonic-gate tryread: 6210Sstevel@tonic-gate mutex_enter(&call->call_lock); 6220Sstevel@tonic-gate interrupted = FALSE; 6230Sstevel@tonic-gate if (call->call_notified == FALSE) { 6240Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread); 6250Sstevel@tonic-gate clock_t cv_wait_ret = 1; /* init to > 0 */ 6260Sstevel@tonic-gate clock_t cv_timout = timout; 6270Sstevel@tonic-gate 6280Sstevel@tonic-gate if (lwp != NULL) 6290Sstevel@tonic-gate lwp->lwp_nostop++; 6300Sstevel@tonic-gate 6310Sstevel@tonic-gate cv_timout += lbolt; 6320Sstevel@tonic-gate 6330Sstevel@tonic-gate if (h->cl_nosignal) 6340Sstevel@tonic-gate while ((cv_wait_ret = 6356403Sgt29601 cv_timedwait(&call->call_cv, 6366403Sgt29601 &call->call_lock, cv_timout)) > 0 && 6376403Sgt29601 call->call_notified == FALSE) 6386403Sgt29601 ; 6390Sstevel@tonic-gate else 6400Sstevel@tonic-gate while ((cv_wait_ret = 6416403Sgt29601 cv_timedwait_sig(&call->call_cv, 6426403Sgt29601 &call->call_lock, cv_timout)) > 0 && 6436403Sgt29601 call->call_notified == FALSE) 6446403Sgt29601 ; 6450Sstevel@tonic-gate 6460Sstevel@tonic-gate if (cv_wait_ret == 0) 6470Sstevel@tonic-gate interrupted = TRUE; 6480Sstevel@tonic-gate 6490Sstevel@tonic-gate if (lwp != NULL) 6500Sstevel@tonic-gate lwp->lwp_nostop--; 6510Sstevel@tonic-gate } 6520Sstevel@tonic-gate resp = call->call_reply; 6530Sstevel@tonic-gate call->call_reply = NULL; 6540Sstevel@tonic-gate status = call->call_status; 6550Sstevel@tonic-gate /* 6560Sstevel@tonic-gate * We have to reset the call_notified here. In case we have 6570Sstevel@tonic-gate * to do a retry ( e.g. in case we got a RPC_PROCUNAVAIL 6580Sstevel@tonic-gate * error ) we need to set this to false to ensure that 6590Sstevel@tonic-gate * we will wait for the next message. When the next message 6600Sstevel@tonic-gate * is going to arrive the function clnt_clts_dispatch_notify 6610Sstevel@tonic-gate * will set this to true again. 6620Sstevel@tonic-gate */ 6630Sstevel@tonic-gate call->call_notified = FALSE; 6640Sstevel@tonic-gate mutex_exit(&call->call_lock); 6650Sstevel@tonic-gate 6660Sstevel@tonic-gate if (status == RPC_TIMEDOUT) { 6670Sstevel@tonic-gate if (interrupted) { 6680Sstevel@tonic-gate /* 6690Sstevel@tonic-gate * We got interrupted, bail out 6700Sstevel@tonic-gate */ 6710Sstevel@tonic-gate p->cku_err.re_status = RPC_INTR; 6720Sstevel@tonic-gate p->cku_err.re_errno = EINTR; 6730Sstevel@tonic-gate goto done1; 6740Sstevel@tonic-gate } else { 6750Sstevel@tonic-gate /* 6760Sstevel@tonic-gate * It's possible that our response arrived 6770Sstevel@tonic-gate * right after we timed out. Check to see 6780Sstevel@tonic-gate * if it has arrived before we remove the 6790Sstevel@tonic-gate * calllist from the dispatch queue. 6800Sstevel@tonic-gate */ 6810Sstevel@tonic-gate mutex_enter(&call->call_lock); 6820Sstevel@tonic-gate if (call->call_notified == TRUE) { 6830Sstevel@tonic-gate resp = call->call_reply; 6840Sstevel@tonic-gate call->call_reply = NULL; 6850Sstevel@tonic-gate mutex_exit(&call->call_lock); 6860Sstevel@tonic-gate RPCLOG(8, "clnt_clts_kcallit_addr: " 6876403Sgt29601 "response received for request " 6886403Sgt29601 "w/xid 0x%x after timeout\n", 6896403Sgt29601 p->cku_xid); 6900Sstevel@tonic-gate goto getresponse; 6910Sstevel@tonic-gate } 6920Sstevel@tonic-gate mutex_exit(&call->call_lock); 6930Sstevel@tonic-gate 6940Sstevel@tonic-gate RPCLOG(8, "clnt_clts_kcallit_addr: " 6956403Sgt29601 "request w/xid 0x%x timedout " 6966403Sgt29601 "waiting for reply\n", p->cku_xid); 6970Sstevel@tonic-gate #if 0 /* XXX not yet */ 6980Sstevel@tonic-gate /* 6990Sstevel@tonic-gate * Timeout may be due to a dead gateway. Send 7000Sstevel@tonic-gate * an ioctl downstream advising deletion of 7010Sstevel@tonic-gate * route when we reach the half-way point to 7020Sstevel@tonic-gate * timing out. 7030Sstevel@tonic-gate */ 7040Sstevel@tonic-gate if (stries == p->cku_retrys/2) { 7050Sstevel@tonic-gate t_kadvise(p->cku_endpnt->e_tiptr, 7066403Sgt29601 (uchar_t *)p->cku_addr.buf, 7076403Sgt29601 p->cku_addr.len); 7080Sstevel@tonic-gate } 7090Sstevel@tonic-gate #endif /* not yet */ 7100Sstevel@tonic-gate p->cku_err.re_status = RPC_TIMEDOUT; 7110Sstevel@tonic-gate p->cku_err.re_errno = ETIMEDOUT; 7120Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rctimeouts); 7130Sstevel@tonic-gate goto done1; 7140Sstevel@tonic-gate } 7150Sstevel@tonic-gate } 7160Sstevel@tonic-gate 7170Sstevel@tonic-gate getresponse: 7180Sstevel@tonic-gate /* 7190Sstevel@tonic-gate * Check to see if a response arrived. If it one is 7200Sstevel@tonic-gate * present then proceed to process the reponse. Otherwise 7210Sstevel@tonic-gate * fall through to retry or retransmit the request. This 7220Sstevel@tonic-gate * is probably not the optimal thing to do, but since we 7230Sstevel@tonic-gate * are most likely dealing with a unrealiable transport it 7240Sstevel@tonic-gate * is the safe thing to so. 7250Sstevel@tonic-gate */ 7260Sstevel@tonic-gate if (resp == NULL) { 7270Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTRECV; 7280Sstevel@tonic-gate p->cku_err.re_errno = EIO; 7290Sstevel@tonic-gate goto done1; 7300Sstevel@tonic-gate } 7310Sstevel@tonic-gate 7320Sstevel@tonic-gate /* 7330Sstevel@tonic-gate * Prepare the message for further processing. We need to remove 7340Sstevel@tonic-gate * the datagram header and copy the source address if necessary. No 7350Sstevel@tonic-gate * need to verify the header since rpcmod took care of that. 7360Sstevel@tonic-gate */ 7370Sstevel@tonic-gate /* 7380Sstevel@tonic-gate * Copy the source address if the caller has supplied a netbuf. 7390Sstevel@tonic-gate */ 7400Sstevel@tonic-gate if (sin != NULL) { 7410Sstevel@tonic-gate union T_primitives *pptr; 7420Sstevel@tonic-gate 7430Sstevel@tonic-gate pptr = (union T_primitives *)resp->b_rptr; 7440Sstevel@tonic-gate bcopy(resp->b_rptr + pptr->unitdata_ind.SRC_offset, sin->buf, 7456403Sgt29601 pptr->unitdata_ind.SRC_length); 7460Sstevel@tonic-gate sin->len = pptr->unitdata_ind.SRC_length; 7470Sstevel@tonic-gate } 7480Sstevel@tonic-gate 7490Sstevel@tonic-gate /* 7500Sstevel@tonic-gate * Pop off the datagram header. 7510Sstevel@tonic-gate */ 7520Sstevel@tonic-gate hdrsz = resp->b_wptr - resp->b_rptr; 7530Sstevel@tonic-gate if ((resp->b_wptr - (resp->b_rptr + hdrsz)) == 0) { 7540Sstevel@tonic-gate tmp = resp; 7550Sstevel@tonic-gate resp = resp->b_cont; 7560Sstevel@tonic-gate tmp->b_cont = NULL; 7570Sstevel@tonic-gate freeb(tmp); 7580Sstevel@tonic-gate } else { 7590Sstevel@tonic-gate unsigned char *ud_off = resp->b_rptr; 7600Sstevel@tonic-gate resp->b_rptr += hdrsz; 7610Sstevel@tonic-gate tmp = dupb(resp); 7620Sstevel@tonic-gate if (tmp == NULL) { 7630Sstevel@tonic-gate p->cku_err.re_status = RPC_SYSTEMERROR; 7640Sstevel@tonic-gate p->cku_err.re_errno = ENOSR; 7650Sstevel@tonic-gate freemsg(resp); 7660Sstevel@tonic-gate goto done1; 7670Sstevel@tonic-gate } 7680Sstevel@tonic-gate tmp->b_cont = resp->b_cont; 7690Sstevel@tonic-gate resp->b_rptr = ud_off; 7700Sstevel@tonic-gate freeb(resp); 7710Sstevel@tonic-gate resp = tmp; 7720Sstevel@tonic-gate } 7730Sstevel@tonic-gate 7740Sstevel@tonic-gate round_trip = lbolt - round_trip; 7750Sstevel@tonic-gate /* 7760Sstevel@tonic-gate * Van Jacobson timer algorithm here, only if NOT a retransmission. 7770Sstevel@tonic-gate */ 7780Sstevel@tonic-gate if (p->cku_timers != NULL && stries == p->cku_retrys) { 7790Sstevel@tonic-gate int rt; 7800Sstevel@tonic-gate 7810Sstevel@tonic-gate rt = round_trip; 7820Sstevel@tonic-gate rt -= (p->cku_timers->rt_srtt >> 3); 7830Sstevel@tonic-gate p->cku_timers->rt_srtt += rt; 7840Sstevel@tonic-gate if (rt < 0) 7850Sstevel@tonic-gate rt = - rt; 7860Sstevel@tonic-gate rt -= (p->cku_timers->rt_deviate >> 2); 7870Sstevel@tonic-gate p->cku_timers->rt_deviate += rt; 7880Sstevel@tonic-gate p->cku_timers->rt_rtxcur = 7890Sstevel@tonic-gate (clock_t)((p->cku_timers->rt_srtt >> 2) + 7900Sstevel@tonic-gate p->cku_timers->rt_deviate) >> 1; 7910Sstevel@tonic-gate 7920Sstevel@tonic-gate rt = round_trip; 7930Sstevel@tonic-gate rt -= (p->cku_timeall->rt_srtt >> 3); 7940Sstevel@tonic-gate p->cku_timeall->rt_srtt += rt; 7950Sstevel@tonic-gate if (rt < 0) 7960Sstevel@tonic-gate rt = - rt; 7970Sstevel@tonic-gate rt -= (p->cku_timeall->rt_deviate >> 2); 7980Sstevel@tonic-gate p->cku_timeall->rt_deviate += rt; 7990Sstevel@tonic-gate p->cku_timeall->rt_rtxcur = 8000Sstevel@tonic-gate (clock_t)((p->cku_timeall->rt_srtt >> 2) + 8010Sstevel@tonic-gate p->cku_timeall->rt_deviate) >> 1; 8020Sstevel@tonic-gate if (p->cku_feedback != NULL) { 8030Sstevel@tonic-gate (*p->cku_feedback)(FEEDBACK_OK, procnum, 8040Sstevel@tonic-gate p->cku_feedarg); 8050Sstevel@tonic-gate } 8060Sstevel@tonic-gate } 8070Sstevel@tonic-gate 8080Sstevel@tonic-gate /* 8090Sstevel@tonic-gate * Process reply 8100Sstevel@tonic-gate */ 8110Sstevel@tonic-gate xdrs = &(p->cku_inxdr); 8120Sstevel@tonic-gate xdrmblk_init(xdrs, resp, XDR_DECODE, 0); 8130Sstevel@tonic-gate 8140Sstevel@tonic-gate reply_msg.rm_direction = REPLY; 8150Sstevel@tonic-gate reply_msg.rm_reply.rp_stat = MSG_ACCEPTED; 8160Sstevel@tonic-gate reply_msg.acpted_rply.ar_stat = SUCCESS; 8170Sstevel@tonic-gate reply_msg.acpted_rply.ar_verf = _null_auth; 8180Sstevel@tonic-gate /* 8190Sstevel@tonic-gate * xdr_results will be done in AUTH_UNWRAP. 8200Sstevel@tonic-gate */ 8210Sstevel@tonic-gate reply_msg.acpted_rply.ar_results.where = NULL; 8220Sstevel@tonic-gate reply_msg.acpted_rply.ar_results.proc = xdr_void; 8230Sstevel@tonic-gate 8240Sstevel@tonic-gate /* 8250Sstevel@tonic-gate * Decode and validate the response. 8260Sstevel@tonic-gate */ 8270Sstevel@tonic-gate if (!xdr_replymsg(xdrs, &reply_msg)) { 8280Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTDECODERES; 8290Sstevel@tonic-gate p->cku_err.re_errno = EIO; 8300Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 8310Sstevel@tonic-gate goto done1; 8320Sstevel@tonic-gate } 8330Sstevel@tonic-gate 8340Sstevel@tonic-gate _seterr_reply(&reply_msg, &(p->cku_err)); 8350Sstevel@tonic-gate 8360Sstevel@tonic-gate re_status = p->cku_err.re_status; 8370Sstevel@tonic-gate if (re_status == RPC_SUCCESS) { 8380Sstevel@tonic-gate /* 8390Sstevel@tonic-gate * Reply is good, check auth. 8400Sstevel@tonic-gate */ 8410Sstevel@tonic-gate if (!AUTH_VALIDATE(h->cl_auth, 8426403Sgt29601 &reply_msg.acpted_rply.ar_verf)) { 8430Sstevel@tonic-gate p->cku_err.re_status = RPC_AUTHERROR; 8440Sstevel@tonic-gate p->cku_err.re_why = AUTH_INVALIDRESP; 8450Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rcbadverfs); 8460Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 8470Sstevel@tonic-gate goto tryread; 8480Sstevel@tonic-gate } 8490Sstevel@tonic-gate if (!AUTH_UNWRAP(h->cl_auth, xdrs, xdr_results, resultsp)) { 8500Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTDECODERES; 8510Sstevel@tonic-gate p->cku_err.re_errno = EIO; 8520Sstevel@tonic-gate } 8530Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 8540Sstevel@tonic-gate goto done1; 8550Sstevel@tonic-gate } 8560Sstevel@tonic-gate /* set errno in case we can't recover */ 8570Sstevel@tonic-gate if (re_status != RPC_VERSMISMATCH && 8586403Sgt29601 re_status != RPC_AUTHERROR && re_status != RPC_PROGVERSMISMATCH) 8590Sstevel@tonic-gate p->cku_err.re_errno = EIO; 8600Sstevel@tonic-gate /* 8610Sstevel@tonic-gate * Determine whether or not we're doing an RPC 8620Sstevel@tonic-gate * broadcast. Some server implementations don't 8630Sstevel@tonic-gate * follow RFC 1050, section 7.4.2 in that they 8640Sstevel@tonic-gate * don't remain silent when they see a proc 8650Sstevel@tonic-gate * they don't support. Therefore we keep trying 8660Sstevel@tonic-gate * to receive on RPC_PROCUNAVAIL, hoping to get 8670Sstevel@tonic-gate * a valid response from a compliant server. 8680Sstevel@tonic-gate */ 8690Sstevel@tonic-gate if (re_status == RPC_PROCUNAVAIL && p->cku_bcast) { 8700Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 8710Sstevel@tonic-gate goto tryread; 8720Sstevel@tonic-gate } 8730Sstevel@tonic-gate if (re_status == RPC_AUTHERROR) { 8740Sstevel@tonic-gate /* 8750Sstevel@tonic-gate * Maybe our credential need to be refreshed 8760Sstevel@tonic-gate */ 8770Sstevel@tonic-gate if (refreshes > 0 && 8780Sstevel@tonic-gate AUTH_REFRESH(h->cl_auth, &reply_msg, p->cku_cred)) { 8790Sstevel@tonic-gate /* 8800Sstevel@tonic-gate * The credential is refreshed. Try the request again. 8810Sstevel@tonic-gate * Even if stries == 0, we still retry as long as 8820Sstevel@tonic-gate * refreshes > 0. This prevents a soft authentication 8830Sstevel@tonic-gate * error turning into a hard one at an upper level. 8840Sstevel@tonic-gate */ 8850Sstevel@tonic-gate refreshes--; 8860Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rcbadcalls); 8870Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rcnewcreds); 8880Sstevel@tonic-gate 8890Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 8900Sstevel@tonic-gate freemsg(mpdup); 8910Sstevel@tonic-gate call_table_remove(call); 8920Sstevel@tonic-gate mutex_enter(&call->call_lock); 8930Sstevel@tonic-gate if (call->call_reply != NULL) { 8940Sstevel@tonic-gate freemsg(call->call_reply); 8950Sstevel@tonic-gate call->call_reply = NULL; 8960Sstevel@tonic-gate } 8970Sstevel@tonic-gate mutex_exit(&call->call_lock); 8980Sstevel@tonic-gate 8990Sstevel@tonic-gate freemsg(resp); 9000Sstevel@tonic-gate mpdup = NULL; 9010Sstevel@tonic-gate goto call_again; 9020Sstevel@tonic-gate } 9030Sstevel@tonic-gate /* 9040Sstevel@tonic-gate * We have used the client handle to do an AUTH_REFRESH 9050Sstevel@tonic-gate * and the RPC status may be set to RPC_SUCCESS; 9060Sstevel@tonic-gate * Let's make sure to set it to RPC_AUTHERROR. 9070Sstevel@tonic-gate */ 9080Sstevel@tonic-gate p->cku_err.re_status = RPC_CANTDECODERES; 9090Sstevel@tonic-gate 9100Sstevel@tonic-gate /* 9110Sstevel@tonic-gate * Map recoverable and unrecoverable 9120Sstevel@tonic-gate * authentication errors to appropriate errno 9130Sstevel@tonic-gate */ 9140Sstevel@tonic-gate switch (p->cku_err.re_why) { 915342Snd150628 case AUTH_TOOWEAK: 916342Snd150628 /* 917342Snd150628 * Could be an nfsportmon failure, set 918342Snd150628 * useresvport and try again. 919342Snd150628 */ 920342Snd150628 if (p->cku_useresvport != 1) { 921342Snd150628 p->cku_useresvport = 1; 922342Snd150628 (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 923342Snd150628 freemsg(mpdup); 924342Snd150628 925342Snd150628 call_table_remove(call); 926342Snd150628 mutex_enter(&call->call_lock); 927342Snd150628 if (call->call_reply != NULL) { 928342Snd150628 freemsg(call->call_reply); 929342Snd150628 call->call_reply = NULL; 930342Snd150628 } 931342Snd150628 mutex_exit(&call->call_lock); 932342Snd150628 933342Snd150628 freemsg(resp); 934342Snd150628 mpdup = NULL; 935342Snd150628 endpt = p->cku_endpnt; 936342Snd150628 if (endpt->e_tiptr != NULL) { 937342Snd150628 mutex_enter(&endpt->e_lock); 938342Snd150628 endpt->e_flags &= ~ENDPNT_BOUND; 939342Snd150628 (void) t_kclose(endpt->e_tiptr, 1); 940342Snd150628 endpt->e_tiptr = NULL; 941342Snd150628 mutex_exit(&endpt->e_lock); 942342Snd150628 943342Snd150628 } 944342Snd150628 945342Snd150628 p->cku_xid = alloc_xid(); 946342Snd150628 endpnt_rele(p->cku_endpnt); 947342Snd150628 p->cku_endpnt = NULL; 948342Snd150628 goto call_again; 949342Snd150628 } 950342Snd150628 /* FALLTHRU */ 951342Snd150628 case AUTH_BADCRED: 952342Snd150628 case AUTH_BADVERF: 953342Snd150628 case AUTH_INVALIDRESP: 954342Snd150628 case AUTH_FAILED: 955342Snd150628 case RPCSEC_GSS_NOCRED: 956342Snd150628 case RPCSEC_GSS_FAILED: 957342Snd150628 p->cku_err.re_errno = EACCES; 958342Snd150628 break; 959342Snd150628 case AUTH_REJECTEDCRED: 960342Snd150628 case AUTH_REJECTEDVERF: 961342Snd150628 default: 962342Snd150628 p->cku_err.re_errno = EIO; 963342Snd150628 break; 9640Sstevel@tonic-gate } 9650Sstevel@tonic-gate RPCLOG(1, "clnt_clts_kcallit : authentication failed " 9660Sstevel@tonic-gate "with RPC_AUTHERROR of type %d\n", 9670Sstevel@tonic-gate p->cku_err.re_why); 9680Sstevel@tonic-gate } 9690Sstevel@tonic-gate 9700Sstevel@tonic-gate (void) xdr_rpc_free_verifier(xdrs, &reply_msg); 9710Sstevel@tonic-gate 9720Sstevel@tonic-gate done1: 9730Sstevel@tonic-gate call_table_remove(call); 9740Sstevel@tonic-gate mutex_enter(&call->call_lock); 9750Sstevel@tonic-gate if (call->call_reply != NULL) { 9760Sstevel@tonic-gate freemsg(call->call_reply); 9770Sstevel@tonic-gate call->call_reply = NULL; 9780Sstevel@tonic-gate } 9790Sstevel@tonic-gate mutex_exit(&call->call_lock); 9800Sstevel@tonic-gate RPCLOG(64, "clnt_clts_kcallit_addr: xid 0x%x taken off dispatch list", 9816403Sgt29601 p->cku_xid); 9820Sstevel@tonic-gate 9830Sstevel@tonic-gate done: 9840Sstevel@tonic-gate if (resp != NULL) { 9850Sstevel@tonic-gate freemsg(resp); 9860Sstevel@tonic-gate resp = NULL; 9870Sstevel@tonic-gate } 9880Sstevel@tonic-gate 9890Sstevel@tonic-gate if ((p->cku_err.re_status != RPC_SUCCESS) && 9900Sstevel@tonic-gate (p->cku_err.re_status != RPC_INTR) && 9910Sstevel@tonic-gate (p->cku_err.re_status != RPC_UDERROR) && 9920Sstevel@tonic-gate !IS_UNRECOVERABLE_RPC(p->cku_err.re_status)) { 9930Sstevel@tonic-gate if (p->cku_feedback != NULL && stries == p->cku_retrys) { 9940Sstevel@tonic-gate (*p->cku_feedback)(FEEDBACK_REXMIT1, procnum, 9950Sstevel@tonic-gate p->cku_feedarg); 9960Sstevel@tonic-gate } 9970Sstevel@tonic-gate 9980Sstevel@tonic-gate timout = backoff(timout); 9990Sstevel@tonic-gate if (p->cku_timeall != (struct rpc_timers *)0) 10000Sstevel@tonic-gate p->cku_timeall->rt_rtxcur = timout; 10010Sstevel@tonic-gate 10020Sstevel@tonic-gate if (p->cku_err.re_status == RPC_SYSTEMERROR || 10030Sstevel@tonic-gate p->cku_err.re_status == RPC_CANTSEND) { 10040Sstevel@tonic-gate /* 10050Sstevel@tonic-gate * Errors due to lack of resources, wait a bit 10060Sstevel@tonic-gate * and try again. 10070Sstevel@tonic-gate */ 10080Sstevel@tonic-gate (void) delay(hz/10); 10090Sstevel@tonic-gate /* (void) sleep((caddr_t)&lbolt, PZERO-4); */ 10100Sstevel@tonic-gate } 10110Sstevel@tonic-gate if (stries-- > 0) { 10120Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rcretrans); 10130Sstevel@tonic-gate goto call_again; 10140Sstevel@tonic-gate } 10150Sstevel@tonic-gate } 10160Sstevel@tonic-gate 10170Sstevel@tonic-gate if (mpdup != NULL) 10180Sstevel@tonic-gate freemsg(mpdup); 10190Sstevel@tonic-gate 10200Sstevel@tonic-gate if (p->cku_err.re_status != RPC_SUCCESS) { 10210Sstevel@tonic-gate RCSTAT_INCR(p->cku_stats, rcbadcalls); 10220Sstevel@tonic-gate } 10230Sstevel@tonic-gate 10240Sstevel@tonic-gate /* 10250Sstevel@tonic-gate * Allow the endpoint to be held by the client handle in case this 10260Sstevel@tonic-gate * RPC was not successful. A retry may occur at a higher level and 10270Sstevel@tonic-gate * in this case we may want to send the request over the same 10280Sstevel@tonic-gate * source port. 1029*8766Sdai.ngo@sun.com * Endpoint is also released for one-way RPC: no reply, nor retransmit 1030*8766Sdai.ngo@sun.com * is expected. 10310Sstevel@tonic-gate */ 1032*8766Sdai.ngo@sun.com if ((p->cku_err.re_status == RPC_SUCCESS || 1033*8766Sdai.ngo@sun.com (p->cku_err.re_status == RPC_TIMEDOUT && ori_timout == 0)) && 1034*8766Sdai.ngo@sun.com p->cku_endpnt != NULL) { 10350Sstevel@tonic-gate endpnt_rele(p->cku_endpnt); 10360Sstevel@tonic-gate p->cku_endpnt = NULL; 1037*8766Sdai.ngo@sun.com } else { 1038*8766Sdai.ngo@sun.com DTRACE_PROBE2(clnt_clts_kcallit_done, int, p->cku_err.re_status, 1039*8766Sdai.ngo@sun.com struct endpnt *, p->cku_endpnt); 10400Sstevel@tonic-gate } 10410Sstevel@tonic-gate 10420Sstevel@tonic-gate return (p->cku_err.re_status); 10430Sstevel@tonic-gate } 10440Sstevel@tonic-gate 10450Sstevel@tonic-gate static enum clnt_stat 10460Sstevel@tonic-gate clnt_clts_kcallit(CLIENT *h, rpcproc_t procnum, xdrproc_t xdr_args, 10470Sstevel@tonic-gate caddr_t argsp, xdrproc_t xdr_results, caddr_t resultsp, 10480Sstevel@tonic-gate struct timeval wait) 10490Sstevel@tonic-gate { 10500Sstevel@tonic-gate return (clnt_clts_kcallit_addr(h, procnum, xdr_args, argsp, 10516403Sgt29601 xdr_results, resultsp, wait, NULL)); 10520Sstevel@tonic-gate } 10530Sstevel@tonic-gate 10540Sstevel@tonic-gate /* 10550Sstevel@tonic-gate * Return error info on this handle. 10560Sstevel@tonic-gate */ 10570Sstevel@tonic-gate static void 10580Sstevel@tonic-gate clnt_clts_kerror(CLIENT *h, struct rpc_err *err) 10590Sstevel@tonic-gate { 10600Sstevel@tonic-gate /* LINTED pointer alignment */ 10610Sstevel@tonic-gate struct cku_private *p = htop(h); 10620Sstevel@tonic-gate 10630Sstevel@tonic-gate *err = p->cku_err; 10640Sstevel@tonic-gate } 10650Sstevel@tonic-gate 10660Sstevel@tonic-gate static bool_t 10670Sstevel@tonic-gate clnt_clts_kfreeres(CLIENT *h, xdrproc_t xdr_res, caddr_t res_ptr) 10680Sstevel@tonic-gate { 10690Sstevel@tonic-gate /* LINTED pointer alignment */ 10700Sstevel@tonic-gate struct cku_private *p = htop(h); 10710Sstevel@tonic-gate XDR *xdrs; 10720Sstevel@tonic-gate 10730Sstevel@tonic-gate xdrs = &(p->cku_outxdr); 10740Sstevel@tonic-gate xdrs->x_op = XDR_FREE; 10750Sstevel@tonic-gate return ((*xdr_res)(xdrs, res_ptr)); 10760Sstevel@tonic-gate } 10770Sstevel@tonic-gate 10780Sstevel@tonic-gate /*ARGSUSED*/ 10790Sstevel@tonic-gate static void 10800Sstevel@tonic-gate clnt_clts_kabort(CLIENT *h) 10810Sstevel@tonic-gate { 10820Sstevel@tonic-gate } 10830Sstevel@tonic-gate 10840Sstevel@tonic-gate static bool_t 10850Sstevel@tonic-gate clnt_clts_kcontrol(CLIENT *h, int cmd, char *arg) 10860Sstevel@tonic-gate { 10870Sstevel@tonic-gate /* LINTED pointer alignment */ 10880Sstevel@tonic-gate struct cku_private *p = htop(h); 10890Sstevel@tonic-gate 10900Sstevel@tonic-gate switch (cmd) { 10910Sstevel@tonic-gate case CLSET_XID: 10920Sstevel@tonic-gate p->cku_xid = *((uint32_t *)arg); 10930Sstevel@tonic-gate return (TRUE); 10940Sstevel@tonic-gate 10950Sstevel@tonic-gate case CLGET_XID: 10960Sstevel@tonic-gate *((uint32_t *)arg) = p->cku_xid; 10970Sstevel@tonic-gate return (TRUE); 10980Sstevel@tonic-gate 10990Sstevel@tonic-gate case CLSET_BCAST: 11000Sstevel@tonic-gate p->cku_bcast = *((uint32_t *)arg); 11010Sstevel@tonic-gate return (TRUE); 11020Sstevel@tonic-gate 11030Sstevel@tonic-gate case CLGET_BCAST: 11040Sstevel@tonic-gate *((uint32_t *)arg) = p->cku_bcast; 11050Sstevel@tonic-gate return (TRUE); 1106342Snd150628 case CLSET_BINDRESVPORT: 1107342Snd150628 if (arg == NULL) 1108342Snd150628 return (FALSE); 1109342Snd150628 1110342Snd150628 if (*(int *)arg != 1 && *(int *)arg != 0) 1111342Snd150628 return (FALSE); 1112342Snd150628 1113342Snd150628 p->cku_useresvport = *(int *)arg; 1114342Snd150628 1115342Snd150628 return (TRUE); 1116342Snd150628 1117342Snd150628 case CLGET_BINDRESVPORT: 1118342Snd150628 if (arg == NULL) 1119342Snd150628 return (FALSE); 1120342Snd150628 1121342Snd150628 *(int *)arg = p->cku_useresvport; 1122342Snd150628 1123342Snd150628 return (TRUE); 11240Sstevel@tonic-gate 11250Sstevel@tonic-gate default: 11260Sstevel@tonic-gate return (FALSE); 11270Sstevel@tonic-gate } 11280Sstevel@tonic-gate } 11290Sstevel@tonic-gate 11300Sstevel@tonic-gate /* 11310Sstevel@tonic-gate * Destroy rpc handle. 11320Sstevel@tonic-gate * Frees the space used for output buffer, private data, and handle 11330Sstevel@tonic-gate * structure, and the file pointer/TLI data on last reference. 11340Sstevel@tonic-gate */ 11350Sstevel@tonic-gate static void 11360Sstevel@tonic-gate clnt_clts_kdestroy(CLIENT *h) 11370Sstevel@tonic-gate { 11380Sstevel@tonic-gate /* LINTED pointer alignment */ 11390Sstevel@tonic-gate struct cku_private *p = htop(h); 11400Sstevel@tonic-gate calllist_t *call = &p->cku_call; 11410Sstevel@tonic-gate 11420Sstevel@tonic-gate int plen; 11430Sstevel@tonic-gate 11440Sstevel@tonic-gate RPCLOG(8, "clnt_clts_kdestroy h: %p\n", (void *)h); 11450Sstevel@tonic-gate RPCLOG(8, "clnt_clts_kdestroy h: xid=0x%x\n", p->cku_xid); 11460Sstevel@tonic-gate 11470Sstevel@tonic-gate if (p->cku_endpnt != NULL) 11480Sstevel@tonic-gate endpnt_rele(p->cku_endpnt); 11490Sstevel@tonic-gate 11500Sstevel@tonic-gate cv_destroy(&call->call_cv); 11510Sstevel@tonic-gate mutex_destroy(&call->call_lock); 11520Sstevel@tonic-gate 11530Sstevel@tonic-gate plen = strlen(p->cku_config.knc_protofmly) + 1; 11540Sstevel@tonic-gate kmem_free(p->cku_config.knc_protofmly, plen); 11550Sstevel@tonic-gate kmem_free(p->cku_addr.buf, p->cku_addr.maxlen); 11560Sstevel@tonic-gate kmem_free(p, sizeof (*p)); 11570Sstevel@tonic-gate } 11580Sstevel@tonic-gate 11590Sstevel@tonic-gate /* 11600Sstevel@tonic-gate * The connectionless (CLTS) kRPC endpoint management subsystem. 11610Sstevel@tonic-gate * 11620Sstevel@tonic-gate * Because endpoints are potentially shared among threads making RPC calls, 11630Sstevel@tonic-gate * they are managed in a pool according to type (endpnt_type_t). Each 11640Sstevel@tonic-gate * endpnt_type_t points to a list of usable endpoints through the e_pool 11650Sstevel@tonic-gate * field, which is of type list_t. list_t is a doubly-linked list. 11660Sstevel@tonic-gate * The number of endpoints in the pool is stored in the e_cnt field of 11670Sstevel@tonic-gate * endpnt_type_t and the endpoints are reference counted using the e_ref field 11680Sstevel@tonic-gate * in the endpnt_t structure. 11690Sstevel@tonic-gate * 11700Sstevel@tonic-gate * As an optimization, endpoints that have no references are also linked 11710Sstevel@tonic-gate * to an idle list via e_ilist which is also of type list_t. When a thread 11720Sstevel@tonic-gate * calls endpnt_get() to obtain a transport endpoint, the idle list is first 11730Sstevel@tonic-gate * consulted and if such an endpoint exists, it is removed from the idle list 11740Sstevel@tonic-gate * and returned to the caller. 11750Sstevel@tonic-gate * 11760Sstevel@tonic-gate * If the idle list is empty, then a check is made to see if more endpoints 11770Sstevel@tonic-gate * can be created. If so, we proceed and create a new endpoint which is added 11780Sstevel@tonic-gate * to the pool and returned to the caller. If we have reached the limit and 11790Sstevel@tonic-gate * cannot make a new endpoint then one is returned to the caller via round- 11800Sstevel@tonic-gate * robin policy. 11810Sstevel@tonic-gate * 11820Sstevel@tonic-gate * When an endpoint is placed on the idle list by a thread calling 11830Sstevel@tonic-gate * endpnt_rele(), it is timestamped and then a reaper taskq is scheduled to 11840Sstevel@tonic-gate * be dispatched if one hasn't already been. When the timer fires, the 11850Sstevel@tonic-gate * taskq traverses the idle list and checks to see which endpoints are 11860Sstevel@tonic-gate * eligible to be closed. It determines this by checking if the timestamp 11870Sstevel@tonic-gate * when the endpoint was released has exceeded the the threshold for how long 11880Sstevel@tonic-gate * it should stay alive. 11890Sstevel@tonic-gate * 11900Sstevel@tonic-gate * endpnt_t structures remain persistent until the memory reclaim callback, 11910Sstevel@tonic-gate * endpnt_reclaim(), is invoked. 11920Sstevel@tonic-gate * 11930Sstevel@tonic-gate * Here is an example of how the data structures would be laid out by the 11940Sstevel@tonic-gate * subsystem: 11950Sstevel@tonic-gate * 11960Sstevel@tonic-gate * endpnt_type_t 11970Sstevel@tonic-gate * 11980Sstevel@tonic-gate * loopback inet 11990Sstevel@tonic-gate * _______________ ______________ 12000Sstevel@tonic-gate * | e_next |----------------------->| e_next |---->> 12010Sstevel@tonic-gate * | e_pool |<---+ | e_pool |<----+ 12020Sstevel@tonic-gate * | e_ilist |<---+--+ | e_ilist |<----+--+ 12030Sstevel@tonic-gate * +->| e_pcurr |----+--+--+ +->| e_pcurr |-----+--+--+ 12040Sstevel@tonic-gate * | | ... | | | | | | ... | | | | 12050Sstevel@tonic-gate * | | e_itimer (90) | | | | | | e_itimer (0) | | | | 12060Sstevel@tonic-gate * | | e_cnt (1) | | | | | | e_cnt (3) | | | | 12070Sstevel@tonic-gate * | +---------------+ | | | | +--------------+ | | | 12080Sstevel@tonic-gate * | | | | | | | | 12090Sstevel@tonic-gate * | endpnt_t | | | | | | | 12100Sstevel@tonic-gate * | ____________ | | | | ____________ | | | 12110Sstevel@tonic-gate * | | e_node |<------+ | | | | e_node |<------+ | | 12120Sstevel@tonic-gate * | | e_idle |<---------+ | | | e_idle | | | | 12130Sstevel@tonic-gate * +--| e_type |<------------+ +--| e_type | | | | 12140Sstevel@tonic-gate * | e_tiptr | | | e_tiptr | | | | 12150Sstevel@tonic-gate * | ... | | | ... | | | | 12160Sstevel@tonic-gate * | e_lock | | | e_lock | | | | 12170Sstevel@tonic-gate * | ... | | | ... | | | | 12180Sstevel@tonic-gate * | e_ref (0) | | | e_ref (2) | | | | 12190Sstevel@tonic-gate * | e_itime | | | e_itime | | | | 12200Sstevel@tonic-gate * +------------+ | +------------+ | | | 12210Sstevel@tonic-gate * | | | | 12220Sstevel@tonic-gate * | | | | 12230Sstevel@tonic-gate * | ____________ | | | 12240Sstevel@tonic-gate * | | e_node |<------+ | | 12250Sstevel@tonic-gate * | | e_idle |<------+--+ | 12260Sstevel@tonic-gate * +--| e_type | | | 12270Sstevel@tonic-gate * | | e_tiptr | | | 12280Sstevel@tonic-gate * | | ... | | | 12290Sstevel@tonic-gate * | | e_lock | | | 12300Sstevel@tonic-gate * | | ... | | | 12310Sstevel@tonic-gate * | | e_ref (0) | | | 12320Sstevel@tonic-gate * | | e_itime | | | 12330Sstevel@tonic-gate * | +------------+ | | 12340Sstevel@tonic-gate * | | | 12350Sstevel@tonic-gate * | | | 12360Sstevel@tonic-gate * | ____________ | | 12370Sstevel@tonic-gate * | | e_node |<------+ | 12380Sstevel@tonic-gate * | | e_idle | | 12390Sstevel@tonic-gate * +--| e_type |<------------+ 12400Sstevel@tonic-gate * | e_tiptr | 12410Sstevel@tonic-gate * | ... | 12420Sstevel@tonic-gate * | e_lock | 12430Sstevel@tonic-gate * | ... | 12440Sstevel@tonic-gate * | e_ref (1) | 12450Sstevel@tonic-gate * | e_itime | 12460Sstevel@tonic-gate * +------------+ 12470Sstevel@tonic-gate * 12480Sstevel@tonic-gate * Endpoint locking strategy: 12490Sstevel@tonic-gate * 12500Sstevel@tonic-gate * The following functions manipulate lists which hold the endpoint and the 12510Sstevel@tonic-gate * endpoints themselves: 12520Sstevel@tonic-gate * 12530Sstevel@tonic-gate * endpnt_get()/check_endpnt()/endpnt_rele()/endpnt_reap()/do_endpnt_reclaim() 12540Sstevel@tonic-gate * 12550Sstevel@tonic-gate * Lock description follows: 12560Sstevel@tonic-gate * 12570Sstevel@tonic-gate * endpnt_type_lock: Global reader/writer lock which protects accesses to the 12580Sstevel@tonic-gate * endpnt_type_list. 12590Sstevel@tonic-gate * 12600Sstevel@tonic-gate * e_plock: Lock defined in the endpnt_type_t. It is intended to 12610Sstevel@tonic-gate * protect accesses to the pool of endopints (e_pool) for a given 12620Sstevel@tonic-gate * endpnt_type_t. 12630Sstevel@tonic-gate * 12640Sstevel@tonic-gate * e_ilock: Lock defined in endpnt_type_t. It is intended to protect accesses 12650Sstevel@tonic-gate * to the idle list (e_ilist) of available endpoints for a given 12660Sstevel@tonic-gate * endpnt_type_t. It also protects access to the e_itimer, e_async_cv, 12670Sstevel@tonic-gate * and e_async_count fields in endpnt_type_t. 12680Sstevel@tonic-gate * 12690Sstevel@tonic-gate * e_lock: Lock defined in the endpnt structure. It is intended to protect 12700Sstevel@tonic-gate * flags, cv, and ref count. 12710Sstevel@tonic-gate * 12720Sstevel@tonic-gate * The order goes as follows so as not to induce deadlock. 12730Sstevel@tonic-gate * 12740Sstevel@tonic-gate * endpnt_type_lock -> e_plock -> e_ilock -> e_lock 12750Sstevel@tonic-gate * 12760Sstevel@tonic-gate * Interaction with Zones and shutting down: 12770Sstevel@tonic-gate * 12780Sstevel@tonic-gate * endpnt_type_ts are uniquely identified by the (e_zoneid, e_rdev, e_protofmly) 12790Sstevel@tonic-gate * tuple, which means that a zone may not reuse another zone's idle endpoints 12800Sstevel@tonic-gate * without first doing a t_kclose(). 12810Sstevel@tonic-gate * 12820Sstevel@tonic-gate * A zone's endpnt_type_ts are destroyed when a zone is shut down; e_async_cv 12830Sstevel@tonic-gate * and e_async_count are used to keep track of the threads in endpnt_taskq 12840Sstevel@tonic-gate * trying to reap endpnt_ts in the endpnt_type_t. 12850Sstevel@tonic-gate */ 12860Sstevel@tonic-gate 12870Sstevel@tonic-gate /* 12880Sstevel@tonic-gate * Allocate and initialize an endpnt_type_t 12890Sstevel@tonic-gate */ 12900Sstevel@tonic-gate static struct endpnt_type * 12910Sstevel@tonic-gate endpnt_type_create(struct knetconfig *config) 12920Sstevel@tonic-gate { 12930Sstevel@tonic-gate struct endpnt_type *etype; 12940Sstevel@tonic-gate 12950Sstevel@tonic-gate /* 12960Sstevel@tonic-gate * Allocate a new endpoint type to hang a list of 12970Sstevel@tonic-gate * endpoints off of it. 12980Sstevel@tonic-gate */ 12990Sstevel@tonic-gate etype = kmem_alloc(sizeof (struct endpnt_type), KM_SLEEP); 13000Sstevel@tonic-gate etype->e_next = NULL; 13010Sstevel@tonic-gate etype->e_pcurr = NULL; 13020Sstevel@tonic-gate etype->e_itimer = 0; 13030Sstevel@tonic-gate etype->e_cnt = 0; 13040Sstevel@tonic-gate 13050Sstevel@tonic-gate (void) strncpy(etype->e_protofmly, config->knc_protofmly, KNC_STRSIZE); 13060Sstevel@tonic-gate mutex_init(&etype->e_plock, NULL, MUTEX_DEFAULT, NULL); 13070Sstevel@tonic-gate mutex_init(&etype->e_ilock, NULL, MUTEX_DEFAULT, NULL); 13080Sstevel@tonic-gate etype->e_rdev = config->knc_rdev; 1309766Scarlsonj etype->e_zoneid = rpc_zoneid(); 13100Sstevel@tonic-gate etype->e_async_count = 0; 13110Sstevel@tonic-gate cv_init(&etype->e_async_cv, NULL, CV_DEFAULT, NULL); 13120Sstevel@tonic-gate 13130Sstevel@tonic-gate list_create(&etype->e_pool, sizeof (endpnt_t), 13146403Sgt29601 offsetof(endpnt_t, e_node)); 13150Sstevel@tonic-gate list_create(&etype->e_ilist, sizeof (endpnt_t), 13166403Sgt29601 offsetof(endpnt_t, e_idle)); 13170Sstevel@tonic-gate 13180Sstevel@tonic-gate /* 13190Sstevel@tonic-gate * Check to see if we need to create a taskq for endpoint 13200Sstevel@tonic-gate * reaping 13210Sstevel@tonic-gate */ 13220Sstevel@tonic-gate mutex_enter(&endpnt_taskq_lock); 13230Sstevel@tonic-gate if (taskq_created == FALSE) { 13240Sstevel@tonic-gate taskq_created = TRUE; 13250Sstevel@tonic-gate mutex_exit(&endpnt_taskq_lock); 13260Sstevel@tonic-gate ASSERT(endpnt_taskq == NULL); 13270Sstevel@tonic-gate endpnt_taskq = taskq_create("clts_endpnt_taskq", 1, 13286403Sgt29601 minclsyspri, 200, INT_MAX, 0); 13290Sstevel@tonic-gate } else 13300Sstevel@tonic-gate mutex_exit(&endpnt_taskq_lock); 13310Sstevel@tonic-gate 13320Sstevel@tonic-gate return (etype); 13330Sstevel@tonic-gate } 13340Sstevel@tonic-gate 13350Sstevel@tonic-gate /* 13360Sstevel@tonic-gate * Free an endpnt_type_t 13370Sstevel@tonic-gate */ 13380Sstevel@tonic-gate static void 13390Sstevel@tonic-gate endpnt_type_free(struct endpnt_type *etype) 13400Sstevel@tonic-gate { 13410Sstevel@tonic-gate mutex_destroy(&etype->e_plock); 13420Sstevel@tonic-gate mutex_destroy(&etype->e_ilock); 13430Sstevel@tonic-gate list_destroy(&etype->e_pool); 13440Sstevel@tonic-gate list_destroy(&etype->e_ilist); 13450Sstevel@tonic-gate kmem_free(etype, sizeof (endpnt_type_t)); 13460Sstevel@tonic-gate } 13470Sstevel@tonic-gate 13480Sstevel@tonic-gate /* 13490Sstevel@tonic-gate * Check the endpoint to ensure that it is suitable for use. 13500Sstevel@tonic-gate * 13510Sstevel@tonic-gate * Possible return values: 13520Sstevel@tonic-gate * 13530Sstevel@tonic-gate * return (1) - Endpoint is established, but needs to be re-opened. 13540Sstevel@tonic-gate * return (0) && *newp == NULL - Endpoint is established, but unusable. 13550Sstevel@tonic-gate * return (0) && *newp != NULL - Endpoint is established and usable. 13560Sstevel@tonic-gate */ 13570Sstevel@tonic-gate static int 13580Sstevel@tonic-gate check_endpnt(struct endpnt *endp, struct endpnt **newp) 13590Sstevel@tonic-gate { 13600Sstevel@tonic-gate *newp = endp; 13610Sstevel@tonic-gate 13620Sstevel@tonic-gate mutex_enter(&endp->e_lock); 13630Sstevel@tonic-gate ASSERT(endp->e_ref >= 1); 13640Sstevel@tonic-gate 13650Sstevel@tonic-gate /* 13660Sstevel@tonic-gate * The first condition we check for is if the endpoint has been 13670Sstevel@tonic-gate * allocated, but is unusable either because it has been closed or 13680Sstevel@tonic-gate * has been marked stale. Only *one* thread will be allowed to 1369*8766Sdai.ngo@sun.com * execute the then clause. This is enforced because the first thread 13700Sstevel@tonic-gate * to check this condition will clear the flags, so that subsequent 13710Sstevel@tonic-gate * thread(s) checking this endpoint will move on. 13720Sstevel@tonic-gate */ 13730Sstevel@tonic-gate if ((endp->e_flags & ENDPNT_ESTABLISHED) && 13746403Sgt29601 (!(endp->e_flags & ENDPNT_BOUND) || 13756403Sgt29601 (endp->e_flags & ENDPNT_STALE))) { 13760Sstevel@tonic-gate /* 13770Sstevel@tonic-gate * Clear the flags here since they will be 13780Sstevel@tonic-gate * set again by this thread. They need to be 13790Sstevel@tonic-gate * individually cleared because we want to maintain 13800Sstevel@tonic-gate * the state for ENDPNT_ONIDLE. 13810Sstevel@tonic-gate */ 13820Sstevel@tonic-gate endp->e_flags &= ~(ENDPNT_ESTABLISHED | 13836403Sgt29601 ENDPNT_WAITING | ENDPNT_BOUND | ENDPNT_STALE); 13840Sstevel@tonic-gate mutex_exit(&endp->e_lock); 13850Sstevel@tonic-gate return (1); 13860Sstevel@tonic-gate } 13870Sstevel@tonic-gate 13880Sstevel@tonic-gate /* 13890Sstevel@tonic-gate * The second condition is meant for any thread that is waiting for 13900Sstevel@tonic-gate * an endpoint to become established. It will cv_wait() until 13910Sstevel@tonic-gate * the condition for the endpoint has been changed to ENDPNT_BOUND or 13920Sstevel@tonic-gate * ENDPNT_STALE. 13930Sstevel@tonic-gate */ 13940Sstevel@tonic-gate while (!(endp->e_flags & ENDPNT_BOUND) && 13956403Sgt29601 !(endp->e_flags & ENDPNT_STALE)) { 13960Sstevel@tonic-gate endp->e_flags |= ENDPNT_WAITING; 13970Sstevel@tonic-gate cv_wait(&endp->e_cv, &endp->e_lock); 13980Sstevel@tonic-gate } 13990Sstevel@tonic-gate 14000Sstevel@tonic-gate ASSERT(endp->e_flags & ENDPNT_ESTABLISHED); 14010Sstevel@tonic-gate 14020Sstevel@tonic-gate /* 14030Sstevel@tonic-gate * The last case we check for is if the endpoint has been marked stale. 14040Sstevel@tonic-gate * If this is the case then set *newp to NULL and return, so that the 14050Sstevel@tonic-gate * caller is notified of the error and can take appropriate action. 14060Sstevel@tonic-gate */ 14070Sstevel@tonic-gate if (endp->e_flags & ENDPNT_STALE) { 14080Sstevel@tonic-gate endp->e_ref--; 14090Sstevel@tonic-gate *newp = NULL; 14100Sstevel@tonic-gate } 14110Sstevel@tonic-gate mutex_exit(&endp->e_lock); 14120Sstevel@tonic-gate return (0); 14130Sstevel@tonic-gate } 14140Sstevel@tonic-gate 14150Sstevel@tonic-gate #ifdef DEBUG 14160Sstevel@tonic-gate /* 14170Sstevel@tonic-gate * Provide a fault injection setting to test error conditions. 14180Sstevel@tonic-gate */ 14190Sstevel@tonic-gate static int endpnt_get_return_null = 0; 14200Sstevel@tonic-gate #endif 14210Sstevel@tonic-gate 14220Sstevel@tonic-gate /* 14230Sstevel@tonic-gate * Returns a handle (struct endpnt *) to an open and bound endpoint 14240Sstevel@tonic-gate * specified by the knetconfig passed in. Returns NULL if no valid endpoint 14250Sstevel@tonic-gate * can be obtained. 14260Sstevel@tonic-gate */ 14270Sstevel@tonic-gate static struct endpnt * 1428342Snd150628 endpnt_get(struct knetconfig *config, int useresvport) 14290Sstevel@tonic-gate { 14300Sstevel@tonic-gate struct endpnt_type *n_etype = NULL; 14310Sstevel@tonic-gate struct endpnt_type *np = NULL; 14320Sstevel@tonic-gate struct endpnt *new = NULL; 14330Sstevel@tonic-gate struct endpnt *endp = NULL; 14340Sstevel@tonic-gate struct endpnt *next = NULL; 14350Sstevel@tonic-gate TIUSER *tiptr = NULL; 14360Sstevel@tonic-gate int rtries = BINDRESVPORT_RETRIES; 14370Sstevel@tonic-gate int i = 0; 14380Sstevel@tonic-gate int error; 14390Sstevel@tonic-gate int retval; 1440766Scarlsonj zoneid_t zoneid = rpc_zoneid(); 14411676Sjpk cred_t *cr; 14420Sstevel@tonic-gate 14430Sstevel@tonic-gate RPCLOG(1, "endpnt_get: protofmly %s, ", config->knc_protofmly); 14440Sstevel@tonic-gate RPCLOG(1, "rdev %ld\n", config->knc_rdev); 14450Sstevel@tonic-gate 14460Sstevel@tonic-gate #ifdef DEBUG 14470Sstevel@tonic-gate /* 14480Sstevel@tonic-gate * Inject fault if desired. Pretend we have a stale endpoint 14490Sstevel@tonic-gate * and return NULL. 14500Sstevel@tonic-gate */ 14510Sstevel@tonic-gate if (endpnt_get_return_null > 0) { 14520Sstevel@tonic-gate endpnt_get_return_null--; 14530Sstevel@tonic-gate return (NULL); 14540Sstevel@tonic-gate } 14550Sstevel@tonic-gate #endif 14560Sstevel@tonic-gate rw_enter(&endpnt_type_lock, RW_READER); 14570Sstevel@tonic-gate 14580Sstevel@tonic-gate top: 14590Sstevel@tonic-gate for (np = endpnt_type_list; np != NULL; np = np->e_next) 14600Sstevel@tonic-gate if ((np->e_zoneid == zoneid) && 14610Sstevel@tonic-gate (np->e_rdev == config->knc_rdev) && 14620Sstevel@tonic-gate (strcmp(np->e_protofmly, 14636403Sgt29601 config->knc_protofmly) == 0)) 14640Sstevel@tonic-gate break; 14650Sstevel@tonic-gate 14660Sstevel@tonic-gate if (np == NULL && n_etype != NULL) { 14670Sstevel@tonic-gate ASSERT(rw_write_held(&endpnt_type_lock)); 14680Sstevel@tonic-gate 14690Sstevel@tonic-gate /* 14700Sstevel@tonic-gate * Link the endpoint type onto the list 14710Sstevel@tonic-gate */ 14720Sstevel@tonic-gate n_etype->e_next = endpnt_type_list; 14730Sstevel@tonic-gate endpnt_type_list = n_etype; 14740Sstevel@tonic-gate np = n_etype; 14750Sstevel@tonic-gate n_etype = NULL; 14760Sstevel@tonic-gate } 14770Sstevel@tonic-gate 14780Sstevel@tonic-gate if (np == NULL) { 14790Sstevel@tonic-gate /* 14800Sstevel@tonic-gate * The logic here is that we were unable to find an 14810Sstevel@tonic-gate * endpnt_type_t that matched our criteria, so we allocate a 14820Sstevel@tonic-gate * new one. Because kmem_alloc() needs to be called with 14830Sstevel@tonic-gate * KM_SLEEP, we drop our locks so that we don't induce 14840Sstevel@tonic-gate * deadlock. After allocating and initializing the 14850Sstevel@tonic-gate * endpnt_type_t, we reaquire the lock and go back to check 14860Sstevel@tonic-gate * if this entry needs to be added to the list. Since we do 14870Sstevel@tonic-gate * some operations without any locking other threads may 14880Sstevel@tonic-gate * have been looking for the same endpnt_type_t and gone 14890Sstevel@tonic-gate * through this code path. We check for this case and allow 14900Sstevel@tonic-gate * one thread to link its endpnt_type_t to the list and the 14910Sstevel@tonic-gate * other threads will simply free theirs. 14920Sstevel@tonic-gate */ 14930Sstevel@tonic-gate rw_exit(&endpnt_type_lock); 14940Sstevel@tonic-gate n_etype = endpnt_type_create(config); 14950Sstevel@tonic-gate 14960Sstevel@tonic-gate /* 14970Sstevel@tonic-gate * We need to reaquire the lock with RW_WRITER here so that 14980Sstevel@tonic-gate * we can safely link the new endpoint type onto the list. 14990Sstevel@tonic-gate */ 15000Sstevel@tonic-gate rw_enter(&endpnt_type_lock, RW_WRITER); 15010Sstevel@tonic-gate goto top; 15020Sstevel@tonic-gate } 15030Sstevel@tonic-gate 15040Sstevel@tonic-gate rw_exit(&endpnt_type_lock); 15050Sstevel@tonic-gate /* 15060Sstevel@tonic-gate * If n_etype is not NULL, then another thread was able to 15070Sstevel@tonic-gate * insert an endpnt_type_t of this type onto the list before 15080Sstevel@tonic-gate * we did. Go ahead and free ours. 15090Sstevel@tonic-gate */ 15100Sstevel@tonic-gate if (n_etype != NULL) 15110Sstevel@tonic-gate endpnt_type_free(n_etype); 15120Sstevel@tonic-gate 15130Sstevel@tonic-gate mutex_enter(&np->e_ilock); 15140Sstevel@tonic-gate /* 15150Sstevel@tonic-gate * The algorithm to hand out endpoints is to first 15160Sstevel@tonic-gate * give out those that are idle if such endpoints 15170Sstevel@tonic-gate * exist. Otherwise, create a new one if we haven't 15180Sstevel@tonic-gate * reached the max threshold. Finally, we give out 15190Sstevel@tonic-gate * endpoints in a pseudo LRU fashion (round-robin). 15200Sstevel@tonic-gate * 15210Sstevel@tonic-gate * Note: The idle list is merely a hint of those endpoints 15220Sstevel@tonic-gate * that should be idle. There exists a window after the 15230Sstevel@tonic-gate * endpoint is released and before it is linked back onto the 15240Sstevel@tonic-gate * idle list where a thread could get a reference to it and 15250Sstevel@tonic-gate * use it. This is okay, since the reference counts will 15260Sstevel@tonic-gate * still be consistent. 15270Sstevel@tonic-gate */ 15280Sstevel@tonic-gate if ((endp = (endpnt_t *)list_head(&np->e_ilist)) != NULL) { 15290Sstevel@tonic-gate timeout_id_t t_id = 0; 15300Sstevel@tonic-gate 15310Sstevel@tonic-gate mutex_enter(&endp->e_lock); 15320Sstevel@tonic-gate endp->e_ref++; 15330Sstevel@tonic-gate endp->e_itime = 0; 15340Sstevel@tonic-gate endp->e_flags &= ~ENDPNT_ONIDLE; 15350Sstevel@tonic-gate mutex_exit(&endp->e_lock); 15360Sstevel@tonic-gate 15370Sstevel@tonic-gate /* 15380Sstevel@tonic-gate * Pop the endpoint off the idle list and hand it off 15390Sstevel@tonic-gate */ 15400Sstevel@tonic-gate list_remove(&np->e_ilist, endp); 15410Sstevel@tonic-gate 15420Sstevel@tonic-gate if (np->e_itimer != 0) { 15430Sstevel@tonic-gate t_id = np->e_itimer; 15440Sstevel@tonic-gate np->e_itimer = 0; 15450Sstevel@tonic-gate } 15460Sstevel@tonic-gate mutex_exit(&np->e_ilock); 15470Sstevel@tonic-gate /* 15480Sstevel@tonic-gate * Reset the idle timer if it has been set 15490Sstevel@tonic-gate */ 15500Sstevel@tonic-gate if (t_id != (timeout_id_t)0) 15510Sstevel@tonic-gate (void) untimeout(t_id); 15520Sstevel@tonic-gate 15530Sstevel@tonic-gate if (check_endpnt(endp, &new) == 0) 15540Sstevel@tonic-gate return (new); 15550Sstevel@tonic-gate } else if (np->e_cnt >= clnt_clts_max_endpoints) { 15560Sstevel@tonic-gate /* 15570Sstevel@tonic-gate * There are no idle endpoints currently, so 15580Sstevel@tonic-gate * create a new one if we have not reached the maximum or 15590Sstevel@tonic-gate * hand one out in round-robin. 15600Sstevel@tonic-gate */ 15610Sstevel@tonic-gate mutex_exit(&np->e_ilock); 15620Sstevel@tonic-gate mutex_enter(&np->e_plock); 15630Sstevel@tonic-gate endp = np->e_pcurr; 15640Sstevel@tonic-gate mutex_enter(&endp->e_lock); 15650Sstevel@tonic-gate endp->e_ref++; 15660Sstevel@tonic-gate mutex_exit(&endp->e_lock); 15670Sstevel@tonic-gate 15680Sstevel@tonic-gate ASSERT(endp != NULL); 15690Sstevel@tonic-gate /* 15700Sstevel@tonic-gate * Advance the pointer to the next eligible endpoint, if 15710Sstevel@tonic-gate * necessary. 15720Sstevel@tonic-gate */ 15730Sstevel@tonic-gate if (np->e_cnt > 1) { 15740Sstevel@tonic-gate next = (endpnt_t *)list_next(&np->e_pool, np->e_pcurr); 15750Sstevel@tonic-gate if (next == NULL) 15760Sstevel@tonic-gate next = (endpnt_t *)list_head(&np->e_pool); 15770Sstevel@tonic-gate np->e_pcurr = next; 15780Sstevel@tonic-gate } 15790Sstevel@tonic-gate 15800Sstevel@tonic-gate mutex_exit(&np->e_plock); 15810Sstevel@tonic-gate 15820Sstevel@tonic-gate /* 15830Sstevel@tonic-gate * We need to check to see if this endpoint is bound or 15840Sstevel@tonic-gate * not. If it is in progress then just wait until 15850Sstevel@tonic-gate * the set up is complete 15860Sstevel@tonic-gate */ 15870Sstevel@tonic-gate if (check_endpnt(endp, &new) == 0) 15880Sstevel@tonic-gate return (new); 15890Sstevel@tonic-gate } else { 15900Sstevel@tonic-gate mutex_exit(&np->e_ilock); 15910Sstevel@tonic-gate mutex_enter(&np->e_plock); 15920Sstevel@tonic-gate 15930Sstevel@tonic-gate /* 15940Sstevel@tonic-gate * Allocate a new endpoint to use. If we can't allocate any 15950Sstevel@tonic-gate * more memory then use one that is already established if any 15960Sstevel@tonic-gate * such endpoints exist. 15970Sstevel@tonic-gate */ 15980Sstevel@tonic-gate new = kmem_cache_alloc(endpnt_cache, KM_NOSLEEP); 15990Sstevel@tonic-gate if (new == NULL) { 16000Sstevel@tonic-gate RPCLOG0(1, "endpnt_get: kmem_cache_alloc failed\n"); 16010Sstevel@tonic-gate /* 16020Sstevel@tonic-gate * Try to recover by using an existing endpoint. 16030Sstevel@tonic-gate */ 16040Sstevel@tonic-gate if (np->e_cnt <= 0) { 16050Sstevel@tonic-gate mutex_exit(&np->e_plock); 16060Sstevel@tonic-gate return (NULL); 16070Sstevel@tonic-gate } 16080Sstevel@tonic-gate endp = np->e_pcurr; 16090Sstevel@tonic-gate if ((next = list_next(&np->e_pool, np->e_pcurr)) != 16106403Sgt29601 NULL) 16110Sstevel@tonic-gate np->e_pcurr = next; 16120Sstevel@tonic-gate ASSERT(endp != NULL); 16130Sstevel@tonic-gate mutex_enter(&endp->e_lock); 16140Sstevel@tonic-gate endp->e_ref++; 16150Sstevel@tonic-gate mutex_exit(&endp->e_lock); 16160Sstevel@tonic-gate mutex_exit(&np->e_plock); 16170Sstevel@tonic-gate 16180Sstevel@tonic-gate if (check_endpnt(endp, &new) == 0) 16190Sstevel@tonic-gate return (new); 16200Sstevel@tonic-gate } else { 16210Sstevel@tonic-gate /* 16220Sstevel@tonic-gate * Partially init an endpoint structure and put 16230Sstevel@tonic-gate * it on the list, so that other interested threads 16240Sstevel@tonic-gate * know that one is being created 16250Sstevel@tonic-gate */ 16260Sstevel@tonic-gate bzero(new, sizeof (struct endpnt)); 16270Sstevel@tonic-gate 16280Sstevel@tonic-gate cv_init(&new->e_cv, NULL, CV_DEFAULT, NULL); 16290Sstevel@tonic-gate mutex_init(&new->e_lock, NULL, MUTEX_DEFAULT, NULL); 16300Sstevel@tonic-gate new->e_ref = 1; 16310Sstevel@tonic-gate new->e_type = np; 16320Sstevel@tonic-gate 16330Sstevel@tonic-gate /* 16340Sstevel@tonic-gate * Link the endpoint into the pool. 16350Sstevel@tonic-gate */ 16360Sstevel@tonic-gate list_insert_head(&np->e_pool, new); 16370Sstevel@tonic-gate np->e_cnt++; 16380Sstevel@tonic-gate if (np->e_pcurr == NULL) 16390Sstevel@tonic-gate np->e_pcurr = new; 16400Sstevel@tonic-gate mutex_exit(&np->e_plock); 16410Sstevel@tonic-gate } 16420Sstevel@tonic-gate } 16430Sstevel@tonic-gate 16440Sstevel@tonic-gate /* 16450Sstevel@tonic-gate * The transport should be opened with sufficient privs 16460Sstevel@tonic-gate */ 16471676Sjpk cr = zone_kcred(); 16480Sstevel@tonic-gate error = t_kopen(NULL, config->knc_rdev, FREAD|FWRITE|FNDELAY, &tiptr, 16491676Sjpk cr); 16500Sstevel@tonic-gate if (error) { 16510Sstevel@tonic-gate RPCLOG(1, "endpnt_get: t_kopen: %d\n", error); 16520Sstevel@tonic-gate goto bad; 16530Sstevel@tonic-gate } 16540Sstevel@tonic-gate 16550Sstevel@tonic-gate new->e_tiptr = tiptr; 16560Sstevel@tonic-gate rpc_poptimod(tiptr->fp->f_vnode); 16570Sstevel@tonic-gate 16580Sstevel@tonic-gate /* 16590Sstevel@tonic-gate * Allow the kernel to push the module on behalf of the user. 16600Sstevel@tonic-gate */ 16610Sstevel@tonic-gate error = strioctl(tiptr->fp->f_vnode, I_PUSH, (intptr_t)"rpcmod", 0, 16621676Sjpk K_TO_K, cr, &retval); 16630Sstevel@tonic-gate if (error) { 16640Sstevel@tonic-gate RPCLOG(1, "endpnt_get: kstr_push on rpcmod failed %d\n", error); 16650Sstevel@tonic-gate goto bad; 16660Sstevel@tonic-gate } 16670Sstevel@tonic-gate 16680Sstevel@tonic-gate error = strioctl(tiptr->fp->f_vnode, RPC_CLIENT, 0, 0, K_TO_K, 16691676Sjpk cr, &retval); 16700Sstevel@tonic-gate if (error) { 16710Sstevel@tonic-gate RPCLOG(1, "endpnt_get: strioctl failed %d\n", error); 16720Sstevel@tonic-gate goto bad; 16730Sstevel@tonic-gate } 16740Sstevel@tonic-gate 16750Sstevel@tonic-gate /* 16760Sstevel@tonic-gate * Connectionless data flow should bypass the stream head. 16770Sstevel@tonic-gate */ 16780Sstevel@tonic-gate new->e_wq = tiptr->fp->f_vnode->v_stream->sd_wrq->q_next; 16790Sstevel@tonic-gate 16800Sstevel@tonic-gate error = strioctl(tiptr->fp->f_vnode, I_PUSH, (intptr_t)"timod", 0, 16811676Sjpk K_TO_K, cr, &retval); 16820Sstevel@tonic-gate if (error) { 16830Sstevel@tonic-gate RPCLOG(1, "endpnt_get: kstr_push on timod failed %d\n", error); 16840Sstevel@tonic-gate goto bad; 16850Sstevel@tonic-gate } 16860Sstevel@tonic-gate 16870Sstevel@tonic-gate /* 16880Sstevel@tonic-gate * Attempt to bind the endpoint. If we fail then propogate 16890Sstevel@tonic-gate * error back to calling subsystem, so that it can be handled 16900Sstevel@tonic-gate * appropriately. 1691342Snd150628 * If the caller has not specified reserved port usage then 1692342Snd150628 * take the system default. 16930Sstevel@tonic-gate */ 1694342Snd150628 if (useresvport == -1) 1695342Snd150628 useresvport = clnt_clts_do_bindresvport; 1696342Snd150628 1697342Snd150628 if (useresvport && 16980Sstevel@tonic-gate (strcmp(config->knc_protofmly, NC_INET) == 0 || 16996403Sgt29601 strcmp(config->knc_protofmly, NC_INET6) == 0)) { 17000Sstevel@tonic-gate 17010Sstevel@tonic-gate while ((error = 17026403Sgt29601 bindresvport(new->e_tiptr, NULL, NULL, FALSE)) != 0) { 17030Sstevel@tonic-gate RPCLOG(1, 17046403Sgt29601 "endpnt_get: bindresvport error %d\n", error); 17050Sstevel@tonic-gate if (error != EPROTO) { 17060Sstevel@tonic-gate if (rtries-- <= 0) 17070Sstevel@tonic-gate goto bad; 17080Sstevel@tonic-gate 17090Sstevel@tonic-gate delay(hz << i++); 17100Sstevel@tonic-gate continue; 17110Sstevel@tonic-gate } 17120Sstevel@tonic-gate 17130Sstevel@tonic-gate (void) t_kclose(new->e_tiptr, 1); 17140Sstevel@tonic-gate /* 17150Sstevel@tonic-gate * reopen with all privileges 17160Sstevel@tonic-gate */ 17170Sstevel@tonic-gate error = t_kopen(NULL, config->knc_rdev, 17181676Sjpk FREAD|FWRITE|FNDELAY, 17191676Sjpk &new->e_tiptr, cr); 17200Sstevel@tonic-gate if (error) { 17210Sstevel@tonic-gate RPCLOG(1, "endpnt_get: t_kopen: %d\n", error); 17220Sstevel@tonic-gate new->e_tiptr = NULL; 17230Sstevel@tonic-gate goto bad; 17240Sstevel@tonic-gate } 17250Sstevel@tonic-gate } 17260Sstevel@tonic-gate } else if ((error = t_kbind(new->e_tiptr, NULL, NULL)) != 0) { 17270Sstevel@tonic-gate RPCLOG(1, "endpnt_get: t_kbind failed: %d\n", error); 17280Sstevel@tonic-gate goto bad; 17290Sstevel@tonic-gate } 17300Sstevel@tonic-gate 17310Sstevel@tonic-gate /* 17320Sstevel@tonic-gate * Set the flags and notify and waiters that we have an established 17330Sstevel@tonic-gate * endpoint. 17340Sstevel@tonic-gate */ 17350Sstevel@tonic-gate mutex_enter(&new->e_lock); 17360Sstevel@tonic-gate new->e_flags |= ENDPNT_ESTABLISHED; 17370Sstevel@tonic-gate new->e_flags |= ENDPNT_BOUND; 17380Sstevel@tonic-gate if (new->e_flags & ENDPNT_WAITING) { 17390Sstevel@tonic-gate cv_broadcast(&new->e_cv); 17400Sstevel@tonic-gate new->e_flags &= ~ENDPNT_WAITING; 17410Sstevel@tonic-gate } 17420Sstevel@tonic-gate mutex_exit(&new->e_lock); 17430Sstevel@tonic-gate 17440Sstevel@tonic-gate return (new); 17450Sstevel@tonic-gate 17460Sstevel@tonic-gate bad: 17470Sstevel@tonic-gate ASSERT(new != NULL); 17480Sstevel@tonic-gate /* 17490Sstevel@tonic-gate * mark this endpoint as stale and notify any threads waiting 17500Sstevel@tonic-gate * on this endpoint that it will be going away. 17510Sstevel@tonic-gate */ 17520Sstevel@tonic-gate mutex_enter(&new->e_lock); 17530Sstevel@tonic-gate if (new->e_ref > 0) { 17540Sstevel@tonic-gate new->e_flags |= ENDPNT_ESTABLISHED; 17550Sstevel@tonic-gate new->e_flags |= ENDPNT_STALE; 17560Sstevel@tonic-gate if (new->e_flags & ENDPNT_WAITING) { 17570Sstevel@tonic-gate cv_broadcast(&new->e_cv); 17580Sstevel@tonic-gate new->e_flags &= ~ENDPNT_WAITING; 17590Sstevel@tonic-gate } 17600Sstevel@tonic-gate } 17610Sstevel@tonic-gate new->e_ref--; 17620Sstevel@tonic-gate new->e_tiptr = NULL; 17630Sstevel@tonic-gate mutex_exit(&new->e_lock); 17640Sstevel@tonic-gate 17650Sstevel@tonic-gate /* 17660Sstevel@tonic-gate * If there was a transport endopoint opened, then close it. 17670Sstevel@tonic-gate */ 17680Sstevel@tonic-gate if (tiptr != NULL) 17690Sstevel@tonic-gate (void) t_kclose(tiptr, 1); 17700Sstevel@tonic-gate 17710Sstevel@tonic-gate return (NULL); 17720Sstevel@tonic-gate } 17730Sstevel@tonic-gate 17740Sstevel@tonic-gate /* 17750Sstevel@tonic-gate * Release a referece to the endpoint 17760Sstevel@tonic-gate */ 17770Sstevel@tonic-gate static void 17780Sstevel@tonic-gate endpnt_rele(struct endpnt *sp) 17790Sstevel@tonic-gate { 17800Sstevel@tonic-gate mutex_enter(&sp->e_lock); 17810Sstevel@tonic-gate ASSERT(sp->e_ref > 0); 17820Sstevel@tonic-gate sp->e_ref--; 17830Sstevel@tonic-gate /* 17840Sstevel@tonic-gate * If the ref count is zero, then start the idle timer and link 17850Sstevel@tonic-gate * the endpoint onto the idle list. 17860Sstevel@tonic-gate */ 17870Sstevel@tonic-gate if (sp->e_ref == 0) { 17880Sstevel@tonic-gate sp->e_itime = gethrestime_sec(); 17890Sstevel@tonic-gate 17900Sstevel@tonic-gate /* 17910Sstevel@tonic-gate * Check to see if the endpoint is already linked to the idle 17920Sstevel@tonic-gate * list, so that we don't try to reinsert it. 17930Sstevel@tonic-gate */ 17940Sstevel@tonic-gate if (sp->e_flags & ENDPNT_ONIDLE) { 17950Sstevel@tonic-gate mutex_exit(&sp->e_lock); 17960Sstevel@tonic-gate mutex_enter(&sp->e_type->e_ilock); 17970Sstevel@tonic-gate endpnt_reap_settimer(sp->e_type); 17980Sstevel@tonic-gate mutex_exit(&sp->e_type->e_ilock); 17990Sstevel@tonic-gate return; 18000Sstevel@tonic-gate } 18010Sstevel@tonic-gate 18020Sstevel@tonic-gate sp->e_flags |= ENDPNT_ONIDLE; 18030Sstevel@tonic-gate mutex_exit(&sp->e_lock); 18040Sstevel@tonic-gate mutex_enter(&sp->e_type->e_ilock); 18050Sstevel@tonic-gate list_insert_tail(&sp->e_type->e_ilist, sp); 18060Sstevel@tonic-gate endpnt_reap_settimer(sp->e_type); 18070Sstevel@tonic-gate mutex_exit(&sp->e_type->e_ilock); 18080Sstevel@tonic-gate } else 18090Sstevel@tonic-gate mutex_exit(&sp->e_lock); 18100Sstevel@tonic-gate } 18110Sstevel@tonic-gate 18120Sstevel@tonic-gate static void 18130Sstevel@tonic-gate endpnt_reap_settimer(endpnt_type_t *etp) 18140Sstevel@tonic-gate { 18150Sstevel@tonic-gate if (etp->e_itimer == (timeout_id_t)0) 18160Sstevel@tonic-gate etp->e_itimer = timeout(endpnt_reap_dispatch, (void *)etp, 18176403Sgt29601 clnt_clts_taskq_dispatch_interval); 18180Sstevel@tonic-gate } 18190Sstevel@tonic-gate 18200Sstevel@tonic-gate static void 18210Sstevel@tonic-gate endpnt_reap_dispatch(void *a) 18220Sstevel@tonic-gate { 18230Sstevel@tonic-gate endpnt_type_t *etp = a; 18240Sstevel@tonic-gate 18250Sstevel@tonic-gate /* 18260Sstevel@tonic-gate * The idle timer has fired, so dispatch the taskq to close the 18270Sstevel@tonic-gate * endpoint. 18280Sstevel@tonic-gate */ 18290Sstevel@tonic-gate if (taskq_dispatch(endpnt_taskq, (task_func_t *)endpnt_reap, etp, 18300Sstevel@tonic-gate TQ_NOSLEEP) == NULL) 18310Sstevel@tonic-gate return; 18320Sstevel@tonic-gate mutex_enter(&etp->e_ilock); 18330Sstevel@tonic-gate etp->e_async_count++; 18340Sstevel@tonic-gate mutex_exit(&etp->e_ilock); 18350Sstevel@tonic-gate } 18360Sstevel@tonic-gate 18370Sstevel@tonic-gate /* 18380Sstevel@tonic-gate * Traverse the idle list and close those endpoints that have reached their 18390Sstevel@tonic-gate * timeout interval. 18400Sstevel@tonic-gate */ 18410Sstevel@tonic-gate static void 18420Sstevel@tonic-gate endpnt_reap(endpnt_type_t *etp) 18430Sstevel@tonic-gate { 18440Sstevel@tonic-gate struct endpnt *e; 18450Sstevel@tonic-gate struct endpnt *next_node = NULL; 18460Sstevel@tonic-gate 18470Sstevel@tonic-gate mutex_enter(&etp->e_ilock); 18480Sstevel@tonic-gate e = list_head(&etp->e_ilist); 18490Sstevel@tonic-gate while (e != NULL) { 18500Sstevel@tonic-gate next_node = list_next(&etp->e_ilist, e); 18510Sstevel@tonic-gate 18520Sstevel@tonic-gate mutex_enter(&e->e_lock); 18530Sstevel@tonic-gate if (e->e_ref > 0) { 18540Sstevel@tonic-gate mutex_exit(&e->e_lock); 18550Sstevel@tonic-gate e = next_node; 18560Sstevel@tonic-gate continue; 18570Sstevel@tonic-gate } 18580Sstevel@tonic-gate 18590Sstevel@tonic-gate ASSERT(e->e_ref == 0); 18600Sstevel@tonic-gate if (e->e_itime > 0 && 18610Sstevel@tonic-gate (e->e_itime + clnt_clts_endpoint_reap_interval) < 18620Sstevel@tonic-gate gethrestime_sec()) { 18630Sstevel@tonic-gate e->e_flags &= ~ENDPNT_BOUND; 18640Sstevel@tonic-gate (void) t_kclose(e->e_tiptr, 1); 18650Sstevel@tonic-gate e->e_tiptr = NULL; 18660Sstevel@tonic-gate e->e_itime = 0; 18670Sstevel@tonic-gate } 18680Sstevel@tonic-gate mutex_exit(&e->e_lock); 18690Sstevel@tonic-gate e = next_node; 18700Sstevel@tonic-gate } 18710Sstevel@tonic-gate etp->e_itimer = 0; 18720Sstevel@tonic-gate if (--etp->e_async_count == 0) 18730Sstevel@tonic-gate cv_signal(&etp->e_async_cv); 18740Sstevel@tonic-gate mutex_exit(&etp->e_ilock); 18750Sstevel@tonic-gate } 18760Sstevel@tonic-gate 18770Sstevel@tonic-gate static void 18780Sstevel@tonic-gate endpnt_reclaim(zoneid_t zoneid) 18790Sstevel@tonic-gate { 18800Sstevel@tonic-gate struct endpnt_type *np; 18810Sstevel@tonic-gate struct endpnt *e; 18820Sstevel@tonic-gate struct endpnt *next_node = NULL; 18830Sstevel@tonic-gate list_t free_list; 18840Sstevel@tonic-gate int rcnt = 0; 18850Sstevel@tonic-gate 18860Sstevel@tonic-gate list_create(&free_list, sizeof (endpnt_t), offsetof(endpnt_t, e_node)); 18870Sstevel@tonic-gate 18880Sstevel@tonic-gate RPCLOG0(1, "endpnt_reclaim: reclaim callback started\n"); 18890Sstevel@tonic-gate rw_enter(&endpnt_type_lock, RW_READER); 18900Sstevel@tonic-gate for (np = endpnt_type_list; np != NULL; np = np->e_next) { 18910Sstevel@tonic-gate if (zoneid != ALL_ZONES && zoneid != np->e_zoneid) 18920Sstevel@tonic-gate continue; 18930Sstevel@tonic-gate 18940Sstevel@tonic-gate mutex_enter(&np->e_plock); 18950Sstevel@tonic-gate RPCLOG(1, "endpnt_reclaim: protofmly %s, ", 18966403Sgt29601 np->e_protofmly); 18970Sstevel@tonic-gate RPCLOG(1, "rdev %ld\n", np->e_rdev); 18980Sstevel@tonic-gate RPCLOG(1, "endpnt_reclaim: found %d endpoint(s)\n", 18996403Sgt29601 np->e_cnt); 19000Sstevel@tonic-gate 19010Sstevel@tonic-gate if (np->e_cnt == 0) { 19020Sstevel@tonic-gate mutex_exit(&np->e_plock); 19030Sstevel@tonic-gate continue; 19040Sstevel@tonic-gate } 19050Sstevel@tonic-gate 19060Sstevel@tonic-gate /* 19070Sstevel@tonic-gate * The nice thing about maintaining an idle list is that if 19080Sstevel@tonic-gate * there are any endpoints to reclaim, they are going to be 19090Sstevel@tonic-gate * on this list. Just go through and reap the one's that 19100Sstevel@tonic-gate * have ref counts of zero. 19110Sstevel@tonic-gate */ 19120Sstevel@tonic-gate mutex_enter(&np->e_ilock); 19130Sstevel@tonic-gate e = list_head(&np->e_ilist); 19140Sstevel@tonic-gate while (e != NULL) { 19150Sstevel@tonic-gate next_node = list_next(&np->e_ilist, e); 19160Sstevel@tonic-gate mutex_enter(&e->e_lock); 19170Sstevel@tonic-gate if (e->e_ref > 0) { 19180Sstevel@tonic-gate mutex_exit(&e->e_lock); 19190Sstevel@tonic-gate e = next_node; 19200Sstevel@tonic-gate continue; 19210Sstevel@tonic-gate } 19220Sstevel@tonic-gate ASSERT(e->e_ref == 0); 19230Sstevel@tonic-gate mutex_exit(&e->e_lock); 19240Sstevel@tonic-gate 19250Sstevel@tonic-gate list_remove(&np->e_ilist, e); 19260Sstevel@tonic-gate list_remove(&np->e_pool, e); 19270Sstevel@tonic-gate list_insert_head(&free_list, e); 19280Sstevel@tonic-gate 19290Sstevel@tonic-gate rcnt++; 19300Sstevel@tonic-gate np->e_cnt--; 19310Sstevel@tonic-gate e = next_node; 19320Sstevel@tonic-gate } 19330Sstevel@tonic-gate mutex_exit(&np->e_ilock); 19340Sstevel@tonic-gate /* 19350Sstevel@tonic-gate * Reset the current pointer to be safe 19360Sstevel@tonic-gate */ 19370Sstevel@tonic-gate if ((e = (struct endpnt *)list_head(&np->e_pool)) != NULL) 19380Sstevel@tonic-gate np->e_pcurr = e; 19390Sstevel@tonic-gate else { 19400Sstevel@tonic-gate ASSERT(np->e_cnt == 0); 19410Sstevel@tonic-gate np->e_pcurr = NULL; 19420Sstevel@tonic-gate } 19430Sstevel@tonic-gate 19440Sstevel@tonic-gate mutex_exit(&np->e_plock); 19450Sstevel@tonic-gate } 19460Sstevel@tonic-gate rw_exit(&endpnt_type_lock); 19470Sstevel@tonic-gate 19480Sstevel@tonic-gate while ((e = list_head(&free_list)) != NULL) { 19490Sstevel@tonic-gate list_remove(&free_list, e); 19500Sstevel@tonic-gate if (e->e_tiptr != NULL) 19510Sstevel@tonic-gate (void) t_kclose(e->e_tiptr, 1); 19520Sstevel@tonic-gate 19530Sstevel@tonic-gate cv_destroy(&e->e_cv); 19540Sstevel@tonic-gate mutex_destroy(&e->e_lock); 19550Sstevel@tonic-gate kmem_cache_free(endpnt_cache, e); 19560Sstevel@tonic-gate } 19570Sstevel@tonic-gate list_destroy(&free_list); 19580Sstevel@tonic-gate RPCLOG(1, "endpnt_reclaim: reclaimed %d endpoint(s)\n", rcnt); 19590Sstevel@tonic-gate } 19600Sstevel@tonic-gate 19610Sstevel@tonic-gate /* 19620Sstevel@tonic-gate * Endpoint reclaim zones destructor callback routine. 19630Sstevel@tonic-gate * 19640Sstevel@tonic-gate * After reclaiming any cached entries, we basically go through the endpnt_type 19650Sstevel@tonic-gate * list, canceling outstanding timeouts and free'ing data structures. 19660Sstevel@tonic-gate */ 19670Sstevel@tonic-gate /* ARGSUSED */ 19680Sstevel@tonic-gate static void 19690Sstevel@tonic-gate endpnt_destructor(zoneid_t zoneid, void *a) 19700Sstevel@tonic-gate { 19710Sstevel@tonic-gate struct endpnt_type **npp; 19720Sstevel@tonic-gate struct endpnt_type *np; 19730Sstevel@tonic-gate struct endpnt_type *free_list = NULL; 19740Sstevel@tonic-gate timeout_id_t t_id = 0; 19750Sstevel@tonic-gate extern void clcleanup_zone(zoneid_t); 19760Sstevel@tonic-gate extern void clcleanup4_zone(zoneid_t); 19770Sstevel@tonic-gate 19780Sstevel@tonic-gate /* Make sure NFS client handles are released. */ 19790Sstevel@tonic-gate clcleanup_zone(zoneid); 19800Sstevel@tonic-gate clcleanup4_zone(zoneid); 19810Sstevel@tonic-gate 19820Sstevel@tonic-gate endpnt_reclaim(zoneid); 19830Sstevel@tonic-gate /* 19840Sstevel@tonic-gate * We don't need to be holding on to any locks across the call to 19850Sstevel@tonic-gate * endpnt_reclaim() and the code below; we know that no-one can 19860Sstevel@tonic-gate * be holding open connections for this zone (all processes and kernel 19870Sstevel@tonic-gate * threads are gone), so nothing could be adding anything to the list. 19880Sstevel@tonic-gate */ 19890Sstevel@tonic-gate rw_enter(&endpnt_type_lock, RW_WRITER); 19900Sstevel@tonic-gate npp = &endpnt_type_list; 19910Sstevel@tonic-gate while ((np = *npp) != NULL) { 19920Sstevel@tonic-gate if (np->e_zoneid != zoneid) { 19930Sstevel@tonic-gate npp = &np->e_next; 19940Sstevel@tonic-gate continue; 19950Sstevel@tonic-gate } 19960Sstevel@tonic-gate mutex_enter(&np->e_plock); 19970Sstevel@tonic-gate mutex_enter(&np->e_ilock); 19980Sstevel@tonic-gate if (np->e_itimer != 0) { 19990Sstevel@tonic-gate t_id = np->e_itimer; 20000Sstevel@tonic-gate np->e_itimer = 0; 20010Sstevel@tonic-gate } 20020Sstevel@tonic-gate ASSERT(np->e_cnt == 0); 20030Sstevel@tonic-gate ASSERT(list_head(&np->e_pool) == NULL); 20040Sstevel@tonic-gate ASSERT(list_head(&np->e_ilist) == NULL); 20050Sstevel@tonic-gate 20060Sstevel@tonic-gate mutex_exit(&np->e_ilock); 20070Sstevel@tonic-gate mutex_exit(&np->e_plock); 20080Sstevel@tonic-gate 20090Sstevel@tonic-gate /* 20100Sstevel@tonic-gate * untimeout() any outstanding timers that have not yet fired. 20110Sstevel@tonic-gate */ 20120Sstevel@tonic-gate if (t_id != (timeout_id_t)0) 20130Sstevel@tonic-gate (void) untimeout(t_id); 20140Sstevel@tonic-gate *npp = np->e_next; 20150Sstevel@tonic-gate np->e_next = free_list; 20160Sstevel@tonic-gate free_list = np; 20170Sstevel@tonic-gate } 20180Sstevel@tonic-gate rw_exit(&endpnt_type_lock); 20190Sstevel@tonic-gate 20200Sstevel@tonic-gate while (free_list != NULL) { 20210Sstevel@tonic-gate np = free_list; 20220Sstevel@tonic-gate free_list = free_list->e_next; 20230Sstevel@tonic-gate /* 20240Sstevel@tonic-gate * Wait for threads in endpnt_taskq trying to reap endpnt_ts in 20250Sstevel@tonic-gate * the endpnt_type_t. 20260Sstevel@tonic-gate */ 20270Sstevel@tonic-gate mutex_enter(&np->e_ilock); 20280Sstevel@tonic-gate while (np->e_async_count > 0) 20290Sstevel@tonic-gate cv_wait(&np->e_async_cv, &np->e_ilock); 20300Sstevel@tonic-gate cv_destroy(&np->e_async_cv); 20310Sstevel@tonic-gate mutex_destroy(&np->e_plock); 20320Sstevel@tonic-gate mutex_destroy(&np->e_ilock); 20330Sstevel@tonic-gate list_destroy(&np->e_pool); 20340Sstevel@tonic-gate list_destroy(&np->e_ilist); 20350Sstevel@tonic-gate kmem_free(np, sizeof (endpnt_type_t)); 20360Sstevel@tonic-gate } 20370Sstevel@tonic-gate } 20380Sstevel@tonic-gate 20390Sstevel@tonic-gate /* 20400Sstevel@tonic-gate * Endpoint reclaim kmem callback routine. 20410Sstevel@tonic-gate */ 20420Sstevel@tonic-gate /* ARGSUSED */ 20430Sstevel@tonic-gate static void 20440Sstevel@tonic-gate endpnt_repossess(void *a) 20450Sstevel@tonic-gate { 20460Sstevel@tonic-gate /* 20470Sstevel@tonic-gate * Reclaim idle endpnt's from all zones. 20480Sstevel@tonic-gate */ 20490Sstevel@tonic-gate if (endpnt_taskq != NULL) 20500Sstevel@tonic-gate (void) taskq_dispatch(endpnt_taskq, 20510Sstevel@tonic-gate (task_func_t *)endpnt_reclaim, (void *)ALL_ZONES, 20520Sstevel@tonic-gate TQ_NOSLEEP); 20530Sstevel@tonic-gate } 20540Sstevel@tonic-gate 20550Sstevel@tonic-gate /* 20560Sstevel@tonic-gate * RPC request dispatch routine. Constructs a datagram message and wraps it 20570Sstevel@tonic-gate * around the RPC request to pass downstream. 20580Sstevel@tonic-gate */ 20590Sstevel@tonic-gate static int 20600Sstevel@tonic-gate clnt_clts_dispatch_send(queue_t *q, mblk_t *mp, struct netbuf *addr, 20610Sstevel@tonic-gate calllist_t *cp, uint_t xid) 20620Sstevel@tonic-gate { 20630Sstevel@tonic-gate mblk_t *bp; 20640Sstevel@tonic-gate int msgsz; 20650Sstevel@tonic-gate struct T_unitdata_req *udreq; 20660Sstevel@tonic-gate 20670Sstevel@tonic-gate /* 20680Sstevel@tonic-gate * Set up the call record. 20690Sstevel@tonic-gate */ 20700Sstevel@tonic-gate cp->call_wq = q; 20710Sstevel@tonic-gate cp->call_xid = xid; 20720Sstevel@tonic-gate cp->call_status = RPC_TIMEDOUT; 20730Sstevel@tonic-gate cp->call_notified = FALSE; 20740Sstevel@tonic-gate RPCLOG(64, 20756403Sgt29601 "clnt_clts_dispatch_send: putting xid 0x%x on " 20766403Sgt29601 "dispatch list\n", xid); 20770Sstevel@tonic-gate cp->call_hash = call_hash(xid, clnt_clts_hash_size); 20780Sstevel@tonic-gate cp->call_bucket = &clts_call_ht[cp->call_hash]; 20790Sstevel@tonic-gate call_table_enter(cp); 20800Sstevel@tonic-gate 20810Sstevel@tonic-gate /* 20820Sstevel@tonic-gate * Construct the datagram 20830Sstevel@tonic-gate */ 20840Sstevel@tonic-gate msgsz = (int)TUNITDATAREQSZ; 20850Sstevel@tonic-gate while (!(bp = allocb(msgsz + addr->len, BPRI_LO))) { 20860Sstevel@tonic-gate if (strwaitbuf(msgsz + addr->len, BPRI_LO)) 20870Sstevel@tonic-gate return (ENOSR); 20880Sstevel@tonic-gate } 20890Sstevel@tonic-gate 20900Sstevel@tonic-gate udreq = (struct T_unitdata_req *)bp->b_wptr; 20910Sstevel@tonic-gate udreq->PRIM_type = T_UNITDATA_REQ; 20920Sstevel@tonic-gate udreq->DEST_length = addr->len; 20930Sstevel@tonic-gate 20940Sstevel@tonic-gate if (addr->len) { 20950Sstevel@tonic-gate bcopy(addr->buf, bp->b_wptr + msgsz, addr->len); 20960Sstevel@tonic-gate udreq->DEST_offset = (t_scalar_t)msgsz; 20970Sstevel@tonic-gate msgsz += addr->len; 20980Sstevel@tonic-gate } else 20990Sstevel@tonic-gate udreq->DEST_offset = 0; 21000Sstevel@tonic-gate udreq->OPT_length = 0; 21010Sstevel@tonic-gate udreq->OPT_offset = 0; 21020Sstevel@tonic-gate 21030Sstevel@tonic-gate bp->b_datap->db_type = M_PROTO; 21040Sstevel@tonic-gate bp->b_wptr += msgsz; 21050Sstevel@tonic-gate 21060Sstevel@tonic-gate /* 21070Sstevel@tonic-gate * Link the datagram header with the actual data 21080Sstevel@tonic-gate */ 21090Sstevel@tonic-gate linkb(bp, mp); 21100Sstevel@tonic-gate 21110Sstevel@tonic-gate /* 21120Sstevel@tonic-gate * Send downstream. 21130Sstevel@tonic-gate */ 21148205SSiddheshwar.Mahesh@Sun.COM if (canput(cp->call_wq)) { 21158205SSiddheshwar.Mahesh@Sun.COM put(cp->call_wq, bp); 21168205SSiddheshwar.Mahesh@Sun.COM return (0); 21178205SSiddheshwar.Mahesh@Sun.COM } 21180Sstevel@tonic-gate 21198205SSiddheshwar.Mahesh@Sun.COM return (EIO); 21200Sstevel@tonic-gate } 21210Sstevel@tonic-gate 21220Sstevel@tonic-gate /* 21230Sstevel@tonic-gate * RPC response delivery routine. Deliver the response to the waiting 21240Sstevel@tonic-gate * thread by matching the xid. 21250Sstevel@tonic-gate */ 21260Sstevel@tonic-gate void 21270Sstevel@tonic-gate clnt_clts_dispatch_notify(mblk_t *mp, int resp_off, zoneid_t zoneid) 21280Sstevel@tonic-gate { 21290Sstevel@tonic-gate calllist_t *e = NULL; 21300Sstevel@tonic-gate call_table_t *chtp; 21310Sstevel@tonic-gate uint32_t xid; 21320Sstevel@tonic-gate uint_t hash; 21330Sstevel@tonic-gate unsigned char *hdr_offset; 21340Sstevel@tonic-gate mblk_t *resp; 21350Sstevel@tonic-gate 21360Sstevel@tonic-gate /* 21370Sstevel@tonic-gate * If the RPC response is not contained in the same mblk as the 21380Sstevel@tonic-gate * datagram header, then move to the next mblk. 21390Sstevel@tonic-gate */ 21400Sstevel@tonic-gate hdr_offset = mp->b_rptr; 21410Sstevel@tonic-gate resp = mp; 21420Sstevel@tonic-gate if ((mp->b_wptr - (mp->b_rptr + resp_off)) == 0) 21430Sstevel@tonic-gate resp = mp->b_cont; 21440Sstevel@tonic-gate else 21450Sstevel@tonic-gate resp->b_rptr += resp_off; 21460Sstevel@tonic-gate 21470Sstevel@tonic-gate ASSERT(resp != NULL); 21480Sstevel@tonic-gate 21490Sstevel@tonic-gate if ((IS_P2ALIGNED(resp->b_rptr, sizeof (uint32_t))) && 21500Sstevel@tonic-gate (resp->b_wptr - resp->b_rptr) >= sizeof (xid)) 21510Sstevel@tonic-gate xid = *((uint32_t *)resp->b_rptr); 21520Sstevel@tonic-gate else { 21530Sstevel@tonic-gate int i = 0; 21540Sstevel@tonic-gate unsigned char *p = (unsigned char *)&xid; 21550Sstevel@tonic-gate unsigned char *rptr; 21560Sstevel@tonic-gate mblk_t *tmp = resp; 21570Sstevel@tonic-gate 21580Sstevel@tonic-gate /* 21590Sstevel@tonic-gate * Copy the xid, byte-by-byte into xid. 21600Sstevel@tonic-gate */ 21610Sstevel@tonic-gate while (tmp) { 21620Sstevel@tonic-gate rptr = tmp->b_rptr; 21630Sstevel@tonic-gate while (rptr < tmp->b_wptr) { 21640Sstevel@tonic-gate *p++ = *rptr++; 21650Sstevel@tonic-gate if (++i >= sizeof (xid)) 21660Sstevel@tonic-gate goto done_xid_copy; 21670Sstevel@tonic-gate } 21680Sstevel@tonic-gate tmp = tmp->b_cont; 21690Sstevel@tonic-gate } 21700Sstevel@tonic-gate 21710Sstevel@tonic-gate /* 21720Sstevel@tonic-gate * If we got here, we ran out of mblk space before the 21730Sstevel@tonic-gate * xid could be copied. 21740Sstevel@tonic-gate */ 21750Sstevel@tonic-gate ASSERT(tmp == NULL && i < sizeof (xid)); 21760Sstevel@tonic-gate 21770Sstevel@tonic-gate RPCLOG0(1, 21786403Sgt29601 "clnt_dispatch_notify(clts): message less than " 21796403Sgt29601 "size of xid\n"); 21800Sstevel@tonic-gate 21810Sstevel@tonic-gate freemsg(mp); 21820Sstevel@tonic-gate return; 21830Sstevel@tonic-gate } 21840Sstevel@tonic-gate 21850Sstevel@tonic-gate done_xid_copy: 21860Sstevel@tonic-gate 21870Sstevel@tonic-gate /* 21880Sstevel@tonic-gate * Reset the read pointer back to the beginning of the protocol 21890Sstevel@tonic-gate * header if we moved it. 21900Sstevel@tonic-gate */ 21910Sstevel@tonic-gate if (mp->b_rptr != hdr_offset) 21920Sstevel@tonic-gate mp->b_rptr = hdr_offset; 21930Sstevel@tonic-gate 21940Sstevel@tonic-gate hash = call_hash(xid, clnt_clts_hash_size); 21950Sstevel@tonic-gate chtp = &clts_call_ht[hash]; 21960Sstevel@tonic-gate /* call_table_find returns with the hash bucket locked */ 21970Sstevel@tonic-gate call_table_find(chtp, xid, e); 21980Sstevel@tonic-gate 21990Sstevel@tonic-gate if (e != NULL) { 22000Sstevel@tonic-gate mutex_enter(&e->call_lock); 22016403Sgt29601 22026403Sgt29601 /* 22036403Sgt29601 * verify that the reply is coming in on 22046403Sgt29601 * the same zone that it was sent from. 22056403Sgt29601 */ 22066403Sgt29601 if (e->call_zoneid != zoneid) { 22076403Sgt29601 mutex_exit(&e->call_lock); 22086403Sgt29601 mutex_exit(&chtp->ct_lock); 22096403Sgt29601 freemsg(mp); 22106403Sgt29601 return; 22116403Sgt29601 } 22126403Sgt29601 22130Sstevel@tonic-gate /* 22140Sstevel@tonic-gate * found thread waiting for this reply. 22150Sstevel@tonic-gate */ 22160Sstevel@tonic-gate if (e->call_reply) { 22170Sstevel@tonic-gate RPCLOG(8, 22186403Sgt29601 "clnt_dispatch_notify (clts): discarding old " 22196403Sgt29601 "reply for xid 0x%x\n", 22206403Sgt29601 xid); 22210Sstevel@tonic-gate freemsg(e->call_reply); 22220Sstevel@tonic-gate } 22230Sstevel@tonic-gate e->call_notified = TRUE; 22240Sstevel@tonic-gate e->call_reply = mp; 22250Sstevel@tonic-gate e->call_status = RPC_SUCCESS; 22260Sstevel@tonic-gate cv_signal(&e->call_cv); 22270Sstevel@tonic-gate mutex_exit(&e->call_lock); 22280Sstevel@tonic-gate mutex_exit(&chtp->ct_lock); 22290Sstevel@tonic-gate } else { 22300Sstevel@tonic-gate zone_t *zone; 22310Sstevel@tonic-gate struct rpcstat *rpcstat; 22320Sstevel@tonic-gate 22330Sstevel@tonic-gate mutex_exit(&chtp->ct_lock); 22340Sstevel@tonic-gate RPCLOG(8, "clnt_dispatch_notify (clts): no caller for reply " 22356403Sgt29601 "0x%x\n", xid); 22360Sstevel@tonic-gate freemsg(mp); 22370Sstevel@tonic-gate /* 22380Sstevel@tonic-gate * This is unfortunate, but we need to lookup the zone so we 22390Sstevel@tonic-gate * can increment its "rcbadxids" counter. 22400Sstevel@tonic-gate */ 22410Sstevel@tonic-gate zone = zone_find_by_id(zoneid); 22420Sstevel@tonic-gate if (zone == NULL) { 22430Sstevel@tonic-gate /* 22440Sstevel@tonic-gate * The zone went away... 22450Sstevel@tonic-gate */ 22460Sstevel@tonic-gate return; 22470Sstevel@tonic-gate } 22480Sstevel@tonic-gate rpcstat = zone_getspecific(rpcstat_zone_key, zone); 22490Sstevel@tonic-gate if (zone_status_get(zone) >= ZONE_IS_SHUTTING_DOWN) { 22500Sstevel@tonic-gate /* 22510Sstevel@tonic-gate * Not interested 22520Sstevel@tonic-gate */ 22530Sstevel@tonic-gate zone_rele(zone); 22540Sstevel@tonic-gate return; 22550Sstevel@tonic-gate } 22560Sstevel@tonic-gate RCSTAT_INCR(rpcstat->rpc_clts_client, rcbadxids); 22570Sstevel@tonic-gate zone_rele(zone); 22580Sstevel@tonic-gate } 22590Sstevel@tonic-gate } 22600Sstevel@tonic-gate 22610Sstevel@tonic-gate /* 22620Sstevel@tonic-gate * Init routine. Called when rpcmod is loaded. 22630Sstevel@tonic-gate */ 22640Sstevel@tonic-gate void 22650Sstevel@tonic-gate clnt_clts_init(void) 22660Sstevel@tonic-gate { 22670Sstevel@tonic-gate endpnt_cache = kmem_cache_create("clnt_clts_endpnt_cache", 22680Sstevel@tonic-gate sizeof (struct endpnt), 0, NULL, NULL, endpnt_repossess, NULL, 22690Sstevel@tonic-gate NULL, 0); 22700Sstevel@tonic-gate 22710Sstevel@tonic-gate rw_init(&endpnt_type_lock, NULL, RW_DEFAULT, NULL); 22720Sstevel@tonic-gate 22730Sstevel@tonic-gate /* 22740Sstevel@tonic-gate * Perform simple bounds checking to make sure that the setting is 22750Sstevel@tonic-gate * reasonable 22760Sstevel@tonic-gate */ 22770Sstevel@tonic-gate if (clnt_clts_max_endpoints <= 0) { 22780Sstevel@tonic-gate if (clnt_clts_do_bindresvport) 22790Sstevel@tonic-gate clnt_clts_max_endpoints = RESERVED_PORTSPACE; 22800Sstevel@tonic-gate else 22810Sstevel@tonic-gate clnt_clts_max_endpoints = NONRESERVED_PORTSPACE; 22820Sstevel@tonic-gate } 22830Sstevel@tonic-gate 22840Sstevel@tonic-gate if (clnt_clts_do_bindresvport && 22850Sstevel@tonic-gate clnt_clts_max_endpoints > RESERVED_PORTSPACE) 22860Sstevel@tonic-gate clnt_clts_max_endpoints = RESERVED_PORTSPACE; 22870Sstevel@tonic-gate else if (clnt_clts_max_endpoints > NONRESERVED_PORTSPACE) 22880Sstevel@tonic-gate clnt_clts_max_endpoints = NONRESERVED_PORTSPACE; 22890Sstevel@tonic-gate 22900Sstevel@tonic-gate if (clnt_clts_hash_size < DEFAULT_MIN_HASH_SIZE) 22910Sstevel@tonic-gate clnt_clts_hash_size = DEFAULT_MIN_HASH_SIZE; 22920Sstevel@tonic-gate 22930Sstevel@tonic-gate /* 22940Sstevel@tonic-gate * Defer creating the taskq until rpcmod gets pushed. If we are 22950Sstevel@tonic-gate * in diskless boot mode, rpcmod will get loaded early even before 22960Sstevel@tonic-gate * thread_create() is available. 22970Sstevel@tonic-gate */ 22980Sstevel@tonic-gate endpnt_taskq = NULL; 22990Sstevel@tonic-gate taskq_created = FALSE; 23000Sstevel@tonic-gate mutex_init(&endpnt_taskq_lock, NULL, MUTEX_DEFAULT, NULL); 23010Sstevel@tonic-gate 23020Sstevel@tonic-gate if (clnt_clts_endpoint_reap_interval < DEFAULT_ENDPOINT_REAP_INTERVAL) 23030Sstevel@tonic-gate clnt_clts_endpoint_reap_interval = 23046403Sgt29601 DEFAULT_ENDPOINT_REAP_INTERVAL; 23050Sstevel@tonic-gate 23060Sstevel@tonic-gate /* 23070Sstevel@tonic-gate * Dispatch the taskq at an interval which is offset from the 23080Sstevel@tonic-gate * interval that the endpoints should be reaped. 23090Sstevel@tonic-gate */ 23100Sstevel@tonic-gate clnt_clts_taskq_dispatch_interval = 23116403Sgt29601 (clnt_clts_endpoint_reap_interval + DEFAULT_INTERVAL_SHIFT) * hz; 23120Sstevel@tonic-gate 23130Sstevel@tonic-gate /* 23140Sstevel@tonic-gate * Initialize the completion queue 23150Sstevel@tonic-gate */ 23160Sstevel@tonic-gate clts_call_ht = call_table_init(clnt_clts_hash_size); 23170Sstevel@tonic-gate /* 23180Sstevel@tonic-gate * Initialize the zone destructor callback. 23190Sstevel@tonic-gate */ 23200Sstevel@tonic-gate zone_key_create(&endpnt_destructor_key, NULL, NULL, endpnt_destructor); 23210Sstevel@tonic-gate } 23220Sstevel@tonic-gate 23230Sstevel@tonic-gate void 23240Sstevel@tonic-gate clnt_clts_fini(void) 23250Sstevel@tonic-gate { 23260Sstevel@tonic-gate (void) zone_key_delete(endpnt_destructor_key); 23270Sstevel@tonic-gate } 2328