10Sstevel@tonic-gate /*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
57208Svv149972 * Common Development and Distribution License (the "License").
67208Svv149972 * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate /*
22*12553SKaren.Rochford@Sun.COM * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
230Sstevel@tonic-gate */
240Sstevel@tonic-gate
250Sstevel@tonic-gate /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
260Sstevel@tonic-gate /* All Rights Reserved */
270Sstevel@tonic-gate
280Sstevel@tonic-gate /*
290Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD
300Sstevel@tonic-gate * under license from the Regents of the University of California.
310Sstevel@tonic-gate */
320Sstevel@tonic-gate
330Sstevel@tonic-gate /*
340Sstevel@tonic-gate * svc_cots.c
350Sstevel@tonic-gate * Server side for connection-oriented RPC in the kernel.
360Sstevel@tonic-gate *
370Sstevel@tonic-gate */
380Sstevel@tonic-gate
390Sstevel@tonic-gate #include <sys/param.h>
400Sstevel@tonic-gate #include <sys/types.h>
410Sstevel@tonic-gate #include <sys/sysmacros.h>
420Sstevel@tonic-gate #include <sys/file.h>
430Sstevel@tonic-gate #include <sys/stream.h>
440Sstevel@tonic-gate #include <sys/strsubr.h>
450Sstevel@tonic-gate #include <sys/strsun.h>
460Sstevel@tonic-gate #include <sys/stropts.h>
470Sstevel@tonic-gate #include <sys/tiuser.h>
480Sstevel@tonic-gate #include <sys/timod.h>
490Sstevel@tonic-gate #include <sys/tihdr.h>
500Sstevel@tonic-gate #include <sys/fcntl.h>
510Sstevel@tonic-gate #include <sys/errno.h>
520Sstevel@tonic-gate #include <sys/kmem.h>
530Sstevel@tonic-gate #include <sys/systm.h>
540Sstevel@tonic-gate #include <sys/debug.h>
550Sstevel@tonic-gate #include <sys/cmn_err.h>
560Sstevel@tonic-gate #include <sys/kstat.h>
570Sstevel@tonic-gate #include <sys/vtrace.h>
580Sstevel@tonic-gate
590Sstevel@tonic-gate #include <rpc/types.h>
600Sstevel@tonic-gate #include <rpc/xdr.h>
610Sstevel@tonic-gate #include <rpc/auth.h>
620Sstevel@tonic-gate #include <rpc/rpc_msg.h>
630Sstevel@tonic-gate #include <rpc/svc.h>
647208Svv149972 #include <inet/ip.h>
650Sstevel@tonic-gate
660Sstevel@tonic-gate #define COTS_MAX_ALLOCSIZE 2048
670Sstevel@tonic-gate #define MSG_OFFSET 128 /* offset of call into the mblk */
680Sstevel@tonic-gate #define RM_HDR_SIZE 4 /* record mark header size */
690Sstevel@tonic-gate
700Sstevel@tonic-gate /*
710Sstevel@tonic-gate * Routines exported through ops vector.
720Sstevel@tonic-gate */
730Sstevel@tonic-gate static bool_t svc_cots_krecv(SVCXPRT *, mblk_t *, struct rpc_msg *);
740Sstevel@tonic-gate static bool_t svc_cots_ksend(SVCXPRT *, struct rpc_msg *);
750Sstevel@tonic-gate static bool_t svc_cots_kgetargs(SVCXPRT *, xdrproc_t, caddr_t);
760Sstevel@tonic-gate static bool_t svc_cots_kfreeargs(SVCXPRT *, xdrproc_t, caddr_t);
770Sstevel@tonic-gate static void svc_cots_kdestroy(SVCMASTERXPRT *);
780Sstevel@tonic-gate static int svc_cots_kdup(struct svc_req *, caddr_t, int,
790Sstevel@tonic-gate struct dupreq **, bool_t *);
800Sstevel@tonic-gate static void svc_cots_kdupdone(struct dupreq *, caddr_t,
810Sstevel@tonic-gate void (*)(), int, int);
820Sstevel@tonic-gate static int32_t *svc_cots_kgetres(SVCXPRT *, int);
830Sstevel@tonic-gate static void svc_cots_kfreeres(SVCXPRT *);
840Sstevel@tonic-gate static void svc_cots_kclone_destroy(SVCXPRT *);
850Sstevel@tonic-gate static void svc_cots_kstart(SVCMASTERXPRT *);
86*12553SKaren.Rochford@Sun.COM static void svc_cots_ktattrs(SVCXPRT *, int, void **);
870Sstevel@tonic-gate
880Sstevel@tonic-gate /*
890Sstevel@tonic-gate * Server transport operations vector.
900Sstevel@tonic-gate */
910Sstevel@tonic-gate struct svc_ops svc_cots_op = {
920Sstevel@tonic-gate svc_cots_krecv, /* Get requests */
930Sstevel@tonic-gate svc_cots_kgetargs, /* Deserialize arguments */
940Sstevel@tonic-gate svc_cots_ksend, /* Send reply */
950Sstevel@tonic-gate svc_cots_kfreeargs, /* Free argument data space */
960Sstevel@tonic-gate svc_cots_kdestroy, /* Destroy transport handle */
970Sstevel@tonic-gate svc_cots_kdup, /* Check entry in dup req cache */
980Sstevel@tonic-gate svc_cots_kdupdone, /* Mark entry in dup req cache as done */
990Sstevel@tonic-gate svc_cots_kgetres, /* Get pointer to response buffer */
1000Sstevel@tonic-gate svc_cots_kfreeres, /* Destroy pre-serialized response header */
1010Sstevel@tonic-gate svc_cots_kclone_destroy, /* Destroy a clone xprt */
10211967SKaren.Rochford@Sun.COM svc_cots_kstart, /* Tell `ready-to-receive' to rpcmod */
103*12553SKaren.Rochford@Sun.COM NULL, /* Transport specific clone xprt */
104*12553SKaren.Rochford@Sun.COM svc_cots_ktattrs /* Transport Attributes */
1050Sstevel@tonic-gate };
1060Sstevel@tonic-gate
1070Sstevel@tonic-gate /*
1080Sstevel@tonic-gate * Master transport private data.
1090Sstevel@tonic-gate * Kept in xprt->xp_p2.
1100Sstevel@tonic-gate */
1110Sstevel@tonic-gate struct cots_master_data {
1120Sstevel@tonic-gate char *cmd_src_addr; /* client's address */
1130Sstevel@tonic-gate int cmd_xprt_started; /* flag for clone routine to call */
1140Sstevel@tonic-gate /* rpcmod's start routine. */
1150Sstevel@tonic-gate struct rpc_cots_server *cmd_stats; /* stats for zone */
1160Sstevel@tonic-gate };
1170Sstevel@tonic-gate
1180Sstevel@tonic-gate /*
1190Sstevel@tonic-gate * Transport private data.
1200Sstevel@tonic-gate * Kept in clone_xprt->xp_p2buf.
1210Sstevel@tonic-gate */
1220Sstevel@tonic-gate typedef struct cots_data {
1230Sstevel@tonic-gate mblk_t *cd_mp; /* pre-allocated reply message */
1240Sstevel@tonic-gate mblk_t *cd_req_mp; /* request message */
1250Sstevel@tonic-gate } cots_data_t;
1260Sstevel@tonic-gate
1270Sstevel@tonic-gate /*
1280Sstevel@tonic-gate * Server statistics
1290Sstevel@tonic-gate * NOTE: This structure type is duplicated in the NFS fast path.
1300Sstevel@tonic-gate */
1310Sstevel@tonic-gate static const struct rpc_cots_server {
1320Sstevel@tonic-gate kstat_named_t rscalls;
1330Sstevel@tonic-gate kstat_named_t rsbadcalls;
1340Sstevel@tonic-gate kstat_named_t rsnullrecv;
1350Sstevel@tonic-gate kstat_named_t rsbadlen;
1360Sstevel@tonic-gate kstat_named_t rsxdrcall;
1370Sstevel@tonic-gate kstat_named_t rsdupchecks;
1380Sstevel@tonic-gate kstat_named_t rsdupreqs;
1390Sstevel@tonic-gate } cots_rsstat_tmpl = {
1400Sstevel@tonic-gate { "calls", KSTAT_DATA_UINT64 },
1410Sstevel@tonic-gate { "badcalls", KSTAT_DATA_UINT64 },
1420Sstevel@tonic-gate { "nullrecv", KSTAT_DATA_UINT64 },
1430Sstevel@tonic-gate { "badlen", KSTAT_DATA_UINT64 },
1440Sstevel@tonic-gate { "xdrcall", KSTAT_DATA_UINT64 },
1450Sstevel@tonic-gate { "dupchecks", KSTAT_DATA_UINT64 },
1460Sstevel@tonic-gate { "dupreqs", KSTAT_DATA_UINT64 }
1470Sstevel@tonic-gate };
1480Sstevel@tonic-gate
1490Sstevel@tonic-gate #define CLONE2STATS(clone_xprt) \
1500Sstevel@tonic-gate ((struct cots_master_data *)(clone_xprt)->xp_master->xp_p2)->cmd_stats
1510Sstevel@tonic-gate #define RSSTAT_INCR(s, x) \
1520Sstevel@tonic-gate atomic_add_64(&(s)->x.value.ui64, 1)
1530Sstevel@tonic-gate
1540Sstevel@tonic-gate /*
1550Sstevel@tonic-gate * Pointer to a transport specific `ready to receive' function in rpcmod
1560Sstevel@tonic-gate * (set from rpcmod).
1570Sstevel@tonic-gate */
1580Sstevel@tonic-gate void (*mir_start)(queue_t *);
1590Sstevel@tonic-gate uint_t *svc_max_msg_sizep;
1600Sstevel@tonic-gate
1610Sstevel@tonic-gate /*
1620Sstevel@tonic-gate * the address size of the underlying transport can sometimes be
1630Sstevel@tonic-gate * unknown (tinfo->ADDR_size == -1). For this case, it is
1640Sstevel@tonic-gate * necessary to figure out what the size is so the correct amount
1650Sstevel@tonic-gate * of data is allocated. This is an itterative process:
1660Sstevel@tonic-gate * 1. take a good guess (use T_MINADDRSIZE)
1670Sstevel@tonic-gate * 2. try it.
1680Sstevel@tonic-gate * 3. if it works then everything is ok
1690Sstevel@tonic-gate * 4. if the error is ENAMETOLONG, double the guess
1700Sstevel@tonic-gate * 5. go back to step 2.
1710Sstevel@tonic-gate */
1720Sstevel@tonic-gate #define T_UNKNOWNADDRSIZE (-1)
1730Sstevel@tonic-gate #define T_MINADDRSIZE 32
1740Sstevel@tonic-gate
1750Sstevel@tonic-gate /*
1760Sstevel@tonic-gate * Create a transport record.
1770Sstevel@tonic-gate * The transport record, output buffer, and private data structure
1780Sstevel@tonic-gate * are allocated. The output buffer is serialized into using xdrmem.
1790Sstevel@tonic-gate * There is one transport record per user process which implements a
1800Sstevel@tonic-gate * set of services.
1810Sstevel@tonic-gate */
1820Sstevel@tonic-gate static kmutex_t cots_kcreate_lock;
1830Sstevel@tonic-gate
1840Sstevel@tonic-gate int
svc_cots_kcreate(file_t * fp,uint_t max_msgsize,struct T_info_ack * tinfo,SVCMASTERXPRT ** nxprt)1850Sstevel@tonic-gate svc_cots_kcreate(file_t *fp, uint_t max_msgsize, struct T_info_ack *tinfo,
1860Sstevel@tonic-gate SVCMASTERXPRT **nxprt)
1870Sstevel@tonic-gate {
1880Sstevel@tonic-gate struct cots_master_data *cmd;
1897208Svv149972 int err, retval;
1900Sstevel@tonic-gate SVCMASTERXPRT *xprt;
1910Sstevel@tonic-gate struct rpcstat *rpcstat;
1927208Svv149972 struct T_addr_ack *ack_p;
1937208Svv149972 struct strioctl getaddr;
1940Sstevel@tonic-gate
1950Sstevel@tonic-gate if (nxprt == NULL)
1960Sstevel@tonic-gate return (EINVAL);
1970Sstevel@tonic-gate
1980Sstevel@tonic-gate rpcstat = zone_getspecific(rpcstat_zone_key, curproc->p_zone);
1990Sstevel@tonic-gate ASSERT(rpcstat != NULL);
2000Sstevel@tonic-gate
2017208Svv149972 xprt = kmem_zalloc(sizeof (SVCMASTERXPRT), KM_SLEEP);
2020Sstevel@tonic-gate
2037208Svv149972 cmd = kmem_zalloc(sizeof (*cmd) + sizeof (*ack_p)
2047208Svv149972 + (2 * sizeof (sin6_t)), KM_SLEEP);
2050Sstevel@tonic-gate
2067208Svv149972 ack_p = (struct T_addr_ack *)&cmd[1];
2070Sstevel@tonic-gate
2080Sstevel@tonic-gate if ((tinfo->TIDU_size > COTS_MAX_ALLOCSIZE) ||
2090Sstevel@tonic-gate (tinfo->TIDU_size <= 0))
2100Sstevel@tonic-gate xprt->xp_msg_size = COTS_MAX_ALLOCSIZE;
2110Sstevel@tonic-gate else {
2120Sstevel@tonic-gate xprt->xp_msg_size = tinfo->TIDU_size -
2137208Svv149972 (tinfo->TIDU_size % BYTES_PER_XDR_UNIT);
2140Sstevel@tonic-gate }
2150Sstevel@tonic-gate
2160Sstevel@tonic-gate xprt->xp_ops = &svc_cots_op;
2170Sstevel@tonic-gate xprt->xp_p2 = (caddr_t)cmd;
2180Sstevel@tonic-gate cmd->cmd_xprt_started = 0;
2190Sstevel@tonic-gate cmd->cmd_stats = rpcstat->rpc_cots_server;
2200Sstevel@tonic-gate
2217208Svv149972 getaddr.ic_cmd = TI_GETINFO;
2227208Svv149972 getaddr.ic_timout = -1;
2237208Svv149972 getaddr.ic_len = sizeof (*ack_p) + (2 * sizeof (sin6_t));
2247208Svv149972 getaddr.ic_dp = (char *)ack_p;
2257208Svv149972 ack_p->PRIM_type = T_ADDR_REQ;
2260Sstevel@tonic-gate
2277208Svv149972 err = strioctl(fp->f_vnode, I_STR, (intptr_t)&getaddr,
2287208Svv149972 0, K_TO_K, CRED(), &retval);
2290Sstevel@tonic-gate if (err) {
2307208Svv149972 kmem_free(cmd, sizeof (*cmd) + sizeof (*ack_p) +
2317208Svv149972 (2 * sizeof (sin6_t)));
2320Sstevel@tonic-gate kmem_free(xprt, sizeof (SVCMASTERXPRT));
2330Sstevel@tonic-gate return (err);
2340Sstevel@tonic-gate }
2350Sstevel@tonic-gate
2367208Svv149972 xprt->xp_rtaddr.maxlen = ack_p->REMADDR_length;
2377208Svv149972 xprt->xp_rtaddr.len = ack_p->REMADDR_length;
2387208Svv149972 cmd->cmd_src_addr = xprt->xp_rtaddr.buf =
2397208Svv149972 (char *)ack_p + ack_p->REMADDR_offset;
2407208Svv149972
2417208Svv149972 xprt->xp_lcladdr.maxlen = ack_p->LOCADDR_length;
2427208Svv149972 xprt->xp_lcladdr.len = ack_p->LOCADDR_length;
2437208Svv149972 xprt->xp_lcladdr.buf = (char *)ack_p + ack_p->LOCADDR_offset;
2447208Svv149972
2450Sstevel@tonic-gate /*
2460Sstevel@tonic-gate * If the current sanity check size in rpcmod is smaller
2470Sstevel@tonic-gate * than the size needed for this xprt, then increase
2480Sstevel@tonic-gate * the sanity check.
2490Sstevel@tonic-gate */
2500Sstevel@tonic-gate if (max_msgsize != 0 && svc_max_msg_sizep &&
2510Sstevel@tonic-gate max_msgsize > *svc_max_msg_sizep) {
2520Sstevel@tonic-gate
2530Sstevel@tonic-gate /* This check needs a lock */
2540Sstevel@tonic-gate mutex_enter(&cots_kcreate_lock);
2550Sstevel@tonic-gate if (svc_max_msg_sizep && max_msgsize > *svc_max_msg_sizep)
2560Sstevel@tonic-gate *svc_max_msg_sizep = max_msgsize;
2570Sstevel@tonic-gate mutex_exit(&cots_kcreate_lock);
2580Sstevel@tonic-gate }
2590Sstevel@tonic-gate
2600Sstevel@tonic-gate *nxprt = xprt;
2617208Svv149972
2620Sstevel@tonic-gate return (0);
2630Sstevel@tonic-gate }
2640Sstevel@tonic-gate
2650Sstevel@tonic-gate /*
2660Sstevel@tonic-gate * Destroy a master transport record.
2670Sstevel@tonic-gate * Frees the space allocated for a transport record.
2680Sstevel@tonic-gate */
2690Sstevel@tonic-gate static void
svc_cots_kdestroy(SVCMASTERXPRT * xprt)2700Sstevel@tonic-gate svc_cots_kdestroy(SVCMASTERXPRT *xprt)
2710Sstevel@tonic-gate {
2720Sstevel@tonic-gate struct cots_master_data *cmd = (struct cots_master_data *)xprt->xp_p2;
2730Sstevel@tonic-gate
2740Sstevel@tonic-gate ASSERT(cmd);
2750Sstevel@tonic-gate
2760Sstevel@tonic-gate if (xprt->xp_netid)
2770Sstevel@tonic-gate kmem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1);
2780Sstevel@tonic-gate if (xprt->xp_addrmask.maxlen)
2790Sstevel@tonic-gate kmem_free(xprt->xp_addrmask.buf, xprt->xp_addrmask.maxlen);
2800Sstevel@tonic-gate
2810Sstevel@tonic-gate mutex_destroy(&xprt->xp_req_lock);
2820Sstevel@tonic-gate mutex_destroy(&xprt->xp_thread_lock);
2830Sstevel@tonic-gate
2847208Svv149972 kmem_free(cmd, sizeof (*cmd) + sizeof (struct T_addr_ack) +
2857208Svv149972 (2 * sizeof (sin6_t)));
2867208Svv149972
2870Sstevel@tonic-gate kmem_free(xprt, sizeof (SVCMASTERXPRT));
2880Sstevel@tonic-gate }
2890Sstevel@tonic-gate
2900Sstevel@tonic-gate /*
2910Sstevel@tonic-gate * svc_tli_kcreate() calls this function at the end to tell
2920Sstevel@tonic-gate * rpcmod that the transport is ready to receive requests.
2930Sstevel@tonic-gate */
2940Sstevel@tonic-gate static void
svc_cots_kstart(SVCMASTERXPRT * xprt)2950Sstevel@tonic-gate svc_cots_kstart(SVCMASTERXPRT *xprt)
2960Sstevel@tonic-gate {
2970Sstevel@tonic-gate struct cots_master_data *cmd = (struct cots_master_data *)xprt->xp_p2;
2980Sstevel@tonic-gate
2990Sstevel@tonic-gate if (cmd->cmd_xprt_started == 0) {
3000Sstevel@tonic-gate /*
3010Sstevel@tonic-gate * Acquire the xp_req_lock in order to use xp_wq
3020Sstevel@tonic-gate * safely (we don't want to qenable a queue that has
3030Sstevel@tonic-gate * already been closed).
3040Sstevel@tonic-gate */
3050Sstevel@tonic-gate mutex_enter(&xprt->xp_req_lock);
3060Sstevel@tonic-gate if (cmd->cmd_xprt_started == 0 &&
3077208Svv149972 xprt->xp_wq != NULL) {
3080Sstevel@tonic-gate (*mir_start)(xprt->xp_wq);
3090Sstevel@tonic-gate cmd->cmd_xprt_started = 1;
3100Sstevel@tonic-gate }
3110Sstevel@tonic-gate mutex_exit(&xprt->xp_req_lock);
3120Sstevel@tonic-gate }
3130Sstevel@tonic-gate }
3140Sstevel@tonic-gate
3150Sstevel@tonic-gate /*
3160Sstevel@tonic-gate * Transport-type specific part of svc_xprt_cleanup().
3170Sstevel@tonic-gate */
3180Sstevel@tonic-gate static void
svc_cots_kclone_destroy(SVCXPRT * clone_xprt)3190Sstevel@tonic-gate svc_cots_kclone_destroy(SVCXPRT *clone_xprt)
3200Sstevel@tonic-gate {
3210Sstevel@tonic-gate cots_data_t *cd = (cots_data_t *)clone_xprt->xp_p2buf;
3220Sstevel@tonic-gate
3230Sstevel@tonic-gate if (cd->cd_req_mp) {
3240Sstevel@tonic-gate freemsg(cd->cd_req_mp);
3250Sstevel@tonic-gate cd->cd_req_mp = (mblk_t *)0;
3260Sstevel@tonic-gate }
3270Sstevel@tonic-gate ASSERT(cd->cd_mp == NULL);
3280Sstevel@tonic-gate }
3290Sstevel@tonic-gate
3300Sstevel@tonic-gate /*
331*12553SKaren.Rochford@Sun.COM * Transport Attributes.
332*12553SKaren.Rochford@Sun.COM */
333*12553SKaren.Rochford@Sun.COM static void
svc_cots_ktattrs(SVCXPRT * clone_xprt,int attrflag,void ** tattr)334*12553SKaren.Rochford@Sun.COM svc_cots_ktattrs(SVCXPRT *clone_xprt, int attrflag, void **tattr)
335*12553SKaren.Rochford@Sun.COM {
336*12553SKaren.Rochford@Sun.COM *tattr = NULL;
337*12553SKaren.Rochford@Sun.COM
338*12553SKaren.Rochford@Sun.COM switch (attrflag) {
339*12553SKaren.Rochford@Sun.COM case SVC_TATTR_ADDRMASK:
340*12553SKaren.Rochford@Sun.COM *tattr = (void *)&clone_xprt->xp_master->xp_addrmask;
341*12553SKaren.Rochford@Sun.COM }
342*12553SKaren.Rochford@Sun.COM }
343*12553SKaren.Rochford@Sun.COM
344*12553SKaren.Rochford@Sun.COM /*
3450Sstevel@tonic-gate * Receive rpc requests.
3460Sstevel@tonic-gate * Checks if the message is intact, and deserializes the call packet.
3470Sstevel@tonic-gate */
3480Sstevel@tonic-gate static bool_t
svc_cots_krecv(SVCXPRT * clone_xprt,mblk_t * mp,struct rpc_msg * msg)3490Sstevel@tonic-gate svc_cots_krecv(SVCXPRT *clone_xprt, mblk_t *mp, struct rpc_msg *msg)
3500Sstevel@tonic-gate {
3510Sstevel@tonic-gate cots_data_t *cd = (cots_data_t *)clone_xprt->xp_p2buf;
3520Sstevel@tonic-gate XDR *xdrs = &clone_xprt->xp_xdrin;
3530Sstevel@tonic-gate struct rpc_cots_server *stats = CLONE2STATS(clone_xprt);
3540Sstevel@tonic-gate
3550Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_SVC_COTS_KRECV_START,
3560Sstevel@tonic-gate "svc_cots_krecv_start:");
3570Sstevel@tonic-gate RPCLOG(4, "svc_cots_krecv_start clone_xprt = %p:\n",
3580Sstevel@tonic-gate (void *)clone_xprt);
3590Sstevel@tonic-gate
3600Sstevel@tonic-gate RSSTAT_INCR(stats, rscalls);
3610Sstevel@tonic-gate
3620Sstevel@tonic-gate if (mp->b_datap->db_type != M_DATA) {
3630Sstevel@tonic-gate RPCLOG(16, "svc_cots_krecv bad db_type %d\n",
3640Sstevel@tonic-gate mp->b_datap->db_type);
3650Sstevel@tonic-gate goto bad;
3660Sstevel@tonic-gate }
3670Sstevel@tonic-gate
3680Sstevel@tonic-gate xdrmblk_init(xdrs, mp, XDR_DECODE, 0);
3690Sstevel@tonic-gate
3700Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_XDR_CALLMSG_START,
3710Sstevel@tonic-gate "xdr_callmsg_start:");
3720Sstevel@tonic-gate RPCLOG0(4, "xdr_callmsg_start:\n");
3730Sstevel@tonic-gate if (!xdr_callmsg(xdrs, msg)) {
3740Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_XDR_CALLMSG_END,
3750Sstevel@tonic-gate "xdr_callmsg_end:(%S)", "bad");
3760Sstevel@tonic-gate RPCLOG0(1, "svc_cots_krecv xdr_callmsg failure\n");
3770Sstevel@tonic-gate RSSTAT_INCR(stats, rsxdrcall);
3780Sstevel@tonic-gate goto bad;
3790Sstevel@tonic-gate }
3800Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_XDR_CALLMSG_END,
3810Sstevel@tonic-gate "xdr_callmsg_end:(%S)", "good");
3820Sstevel@tonic-gate
3830Sstevel@tonic-gate clone_xprt->xp_xid = msg->rm_xid;
3840Sstevel@tonic-gate cd->cd_req_mp = mp;
3850Sstevel@tonic-gate
3860Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_SVC_COTS_KRECV_END,
3870Sstevel@tonic-gate "svc_cots_krecv_end:(%S)", "good");
3880Sstevel@tonic-gate RPCLOG0(4, "svc_cots_krecv_end:good\n");
3890Sstevel@tonic-gate return (TRUE);
3900Sstevel@tonic-gate
3910Sstevel@tonic-gate bad:
3920Sstevel@tonic-gate if (mp)
3930Sstevel@tonic-gate freemsg(mp);
3940Sstevel@tonic-gate
3950Sstevel@tonic-gate RSSTAT_INCR(stats, rsbadcalls);
3960Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_SVC_COTS_KRECV_END,
3970Sstevel@tonic-gate "svc_cots_krecv_end:(%S)", "bad");
3980Sstevel@tonic-gate return (FALSE);
3990Sstevel@tonic-gate }
4000Sstevel@tonic-gate
4010Sstevel@tonic-gate /*
4020Sstevel@tonic-gate * Send rpc reply.
4030Sstevel@tonic-gate */
4040Sstevel@tonic-gate static bool_t
svc_cots_ksend(SVCXPRT * clone_xprt,struct rpc_msg * msg)4050Sstevel@tonic-gate svc_cots_ksend(SVCXPRT *clone_xprt, struct rpc_msg *msg)
4060Sstevel@tonic-gate {
4070Sstevel@tonic-gate /* LINTED pointer alignment */
4080Sstevel@tonic-gate cots_data_t *cd = (cots_data_t *)clone_xprt->xp_p2buf;
4090Sstevel@tonic-gate XDR *xdrs = &(clone_xprt->xp_xdrout);
4100Sstevel@tonic-gate int retval = FALSE;
4110Sstevel@tonic-gate mblk_t *mp;
4120Sstevel@tonic-gate xdrproc_t xdr_results;
4130Sstevel@tonic-gate caddr_t xdr_location;
4140Sstevel@tonic-gate bool_t has_args;
4150Sstevel@tonic-gate
4160Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_SVC_COTS_KSEND_START,
4170Sstevel@tonic-gate "svc_cots_ksend_start:");
4180Sstevel@tonic-gate
4190Sstevel@tonic-gate /*
4200Sstevel@tonic-gate * If there is a result procedure specified in the reply message,
4210Sstevel@tonic-gate * it will be processed in the xdr_replymsg and SVCAUTH_WRAP.
4220Sstevel@tonic-gate * We need to make sure it won't be processed twice, so we null
4230Sstevel@tonic-gate * it for xdr_replymsg here.
4240Sstevel@tonic-gate */
4250Sstevel@tonic-gate has_args = FALSE;
4260Sstevel@tonic-gate if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
4270Sstevel@tonic-gate msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
4280Sstevel@tonic-gate if ((xdr_results = msg->acpted_rply.ar_results.proc) != NULL) {
4290Sstevel@tonic-gate has_args = TRUE;
4300Sstevel@tonic-gate xdr_location = msg->acpted_rply.ar_results.where;
4310Sstevel@tonic-gate msg->acpted_rply.ar_results.proc = xdr_void;
4320Sstevel@tonic-gate msg->acpted_rply.ar_results.where = NULL;
4330Sstevel@tonic-gate }
4340Sstevel@tonic-gate }
4350Sstevel@tonic-gate
4360Sstevel@tonic-gate mp = cd->cd_mp;
4370Sstevel@tonic-gate if (mp) {
4380Sstevel@tonic-gate /*
4390Sstevel@tonic-gate * The program above pre-allocated an mblk and put
4400Sstevel@tonic-gate * the data in place.
4410Sstevel@tonic-gate */
4420Sstevel@tonic-gate cd->cd_mp = (mblk_t *)NULL;
4430Sstevel@tonic-gate if (!(xdr_replymsg_body(xdrs, msg) &&
4440Sstevel@tonic-gate (!has_args || SVCAUTH_WRAP(&clone_xprt->xp_auth, xdrs,
4450Sstevel@tonic-gate xdr_results, xdr_location)))) {
4460Sstevel@tonic-gate RPCLOG0(1, "svc_cots_ksend: "
4470Sstevel@tonic-gate "xdr_replymsg_body/SVCAUTH_WRAP failed\n");
4480Sstevel@tonic-gate freemsg(mp);
4490Sstevel@tonic-gate goto out;
4500Sstevel@tonic-gate }
4510Sstevel@tonic-gate } else {
4520Sstevel@tonic-gate int len;
4530Sstevel@tonic-gate int mpsize;
4540Sstevel@tonic-gate
4550Sstevel@tonic-gate /*
4560Sstevel@tonic-gate * Leave space for protocol headers.
4570Sstevel@tonic-gate */
4580Sstevel@tonic-gate len = MSG_OFFSET + clone_xprt->xp_msg_size;
4590Sstevel@tonic-gate
4600Sstevel@tonic-gate /*
4610Sstevel@tonic-gate * Allocate an initial mblk for the response data.
4620Sstevel@tonic-gate */
4630Sstevel@tonic-gate while (!(mp = allocb(len, BPRI_LO))) {
4640Sstevel@tonic-gate RPCLOG0(16, "svc_cots_ksend: allocb failed failed\n");
4650Sstevel@tonic-gate if (strwaitbuf(len, BPRI_LO)) {
4660Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_SVC_COTS_KSEND_END,
4670Sstevel@tonic-gate "svc_cots_ksend_end:(%S)", "strwaitbuf");
4680Sstevel@tonic-gate RPCLOG0(1,
4690Sstevel@tonic-gate "svc_cots_ksend: strwaitbuf failed\n");
4700Sstevel@tonic-gate goto out;
4710Sstevel@tonic-gate }
4720Sstevel@tonic-gate }
4730Sstevel@tonic-gate
4740Sstevel@tonic-gate /*
4750Sstevel@tonic-gate * Initialize the XDR decode stream. Additional mblks
4760Sstevel@tonic-gate * will be allocated if necessary. They will be TIDU
4770Sstevel@tonic-gate * sized.
4780Sstevel@tonic-gate */
4790Sstevel@tonic-gate xdrmblk_init(xdrs, mp, XDR_ENCODE, clone_xprt->xp_msg_size);
4800Sstevel@tonic-gate mpsize = MBLKSIZE(mp);
4810Sstevel@tonic-gate ASSERT(mpsize >= len);
4820Sstevel@tonic-gate ASSERT(mp->b_rptr == mp->b_datap->db_base);
4830Sstevel@tonic-gate
4840Sstevel@tonic-gate /*
4850Sstevel@tonic-gate * If the size of mblk is not appreciably larger than what we
4860Sstevel@tonic-gate * asked, then resize the mblk to exactly len bytes. Reason for
4870Sstevel@tonic-gate * this: suppose len is 1600 bytes, the tidu is 1460 bytes
4880Sstevel@tonic-gate * (from TCP over ethernet), and the arguments to RPC require
4890Sstevel@tonic-gate * 2800 bytes. Ideally we want the protocol to render two
4900Sstevel@tonic-gate * ~1400 byte segments over the wire. If allocb() gives us a 2k
4910Sstevel@tonic-gate * mblk, and we allocate a second mblk for the rest, the
4920Sstevel@tonic-gate * protocol module may generate 3 segments over the wire:
4930Sstevel@tonic-gate * 1460 bytes for the first, 448 (2048 - 1600) for the 2nd, and
4940Sstevel@tonic-gate * 892 for the 3rd. If we "waste" 448 bytes in the first mblk,
4950Sstevel@tonic-gate * the XDR encoding will generate two ~1400 byte mblks, and the
4960Sstevel@tonic-gate * protocol module is more likely to produce properly sized
4970Sstevel@tonic-gate * segments.
4980Sstevel@tonic-gate */
4990Sstevel@tonic-gate if ((mpsize >> 1) <= len) {
5000Sstevel@tonic-gate mp->b_rptr += (mpsize - len);
5010Sstevel@tonic-gate }
5020Sstevel@tonic-gate
5030Sstevel@tonic-gate /*
5040Sstevel@tonic-gate * Adjust b_rptr to reserve space for the non-data protocol
5050Sstevel@tonic-gate * headers that any downstream modules might like to add, and
5060Sstevel@tonic-gate * for the record marking header.
5070Sstevel@tonic-gate */
5080Sstevel@tonic-gate mp->b_rptr += (MSG_OFFSET + RM_HDR_SIZE);
5090Sstevel@tonic-gate
5100Sstevel@tonic-gate XDR_SETPOS(xdrs, (uint_t)(mp->b_rptr - mp->b_datap->db_base));
5110Sstevel@tonic-gate ASSERT(mp->b_wptr == mp->b_rptr);
5120Sstevel@tonic-gate
5130Sstevel@tonic-gate msg->rm_xid = clone_xprt->xp_xid;
5140Sstevel@tonic-gate
5150Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_XDR_REPLYMSG_START,
5160Sstevel@tonic-gate "xdr_replymsg_start:");
5170Sstevel@tonic-gate if (!(xdr_replymsg(xdrs, msg) &&
5180Sstevel@tonic-gate (!has_args || SVCAUTH_WRAP(&clone_xprt->xp_auth, xdrs,
5190Sstevel@tonic-gate xdr_results, xdr_location)))) {
5200Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_XDR_REPLYMSG_END,
5210Sstevel@tonic-gate "xdr_replymsg_end:(%S)", "bad");
5220Sstevel@tonic-gate freemsg(mp);
5230Sstevel@tonic-gate RPCLOG0(1, "svc_cots_ksend: xdr_replymsg/SVCAUTH_WRAP "
5247208Svv149972 "failed\n");
5250Sstevel@tonic-gate goto out;
5260Sstevel@tonic-gate }
5270Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_XDR_REPLYMSG_END,
5280Sstevel@tonic-gate "xdr_replymsg_end:(%S)", "good");
5290Sstevel@tonic-gate }
5300Sstevel@tonic-gate
5310Sstevel@tonic-gate put(clone_xprt->xp_wq, mp);
5320Sstevel@tonic-gate retval = TRUE;
5330Sstevel@tonic-gate
5340Sstevel@tonic-gate out:
5350Sstevel@tonic-gate /*
5360Sstevel@tonic-gate * This is completely disgusting. If public is set it is
5370Sstevel@tonic-gate * a pointer to a structure whose first field is the address
5380Sstevel@tonic-gate * of the function to free that structure and any related
5390Sstevel@tonic-gate * stuff. (see rrokfree in nfs_xdr.c).
5400Sstevel@tonic-gate */
5410Sstevel@tonic-gate if (xdrs->x_public) {
5420Sstevel@tonic-gate /* LINTED pointer alignment */
5430Sstevel@tonic-gate (**((int (**)())xdrs->x_public))(xdrs->x_public);
5440Sstevel@tonic-gate }
5450Sstevel@tonic-gate
5460Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_SVC_COTS_KSEND_END,
5470Sstevel@tonic-gate "svc_cots_ksend_end:(%S)", "done");
5480Sstevel@tonic-gate return (retval);
5490Sstevel@tonic-gate }
5500Sstevel@tonic-gate
5510Sstevel@tonic-gate /*
5520Sstevel@tonic-gate * Deserialize arguments.
5530Sstevel@tonic-gate */
5540Sstevel@tonic-gate static bool_t
svc_cots_kgetargs(SVCXPRT * clone_xprt,xdrproc_t xdr_args,caddr_t args_ptr)5550Sstevel@tonic-gate svc_cots_kgetargs(SVCXPRT *clone_xprt, xdrproc_t xdr_args,
5560Sstevel@tonic-gate caddr_t args_ptr)
5570Sstevel@tonic-gate {
5580Sstevel@tonic-gate return (SVCAUTH_UNWRAP(&clone_xprt->xp_auth, &clone_xprt->xp_xdrin,
5590Sstevel@tonic-gate xdr_args, args_ptr));
5600Sstevel@tonic-gate }
5610Sstevel@tonic-gate
5620Sstevel@tonic-gate static bool_t
svc_cots_kfreeargs(SVCXPRT * clone_xprt,xdrproc_t xdr_args,caddr_t args_ptr)5630Sstevel@tonic-gate svc_cots_kfreeargs(SVCXPRT *clone_xprt, xdrproc_t xdr_args,
5640Sstevel@tonic-gate caddr_t args_ptr)
5650Sstevel@tonic-gate {
5660Sstevel@tonic-gate cots_data_t *cd = (cots_data_t *)clone_xprt->xp_p2buf;
5670Sstevel@tonic-gate mblk_t *mp;
5680Sstevel@tonic-gate bool_t retval;
5690Sstevel@tonic-gate
5700Sstevel@tonic-gate /*
5710Sstevel@tonic-gate * It is important to call the XDR routine before
5720Sstevel@tonic-gate * freeing the request mblk. Structures in the
5730Sstevel@tonic-gate * XDR data may point into the mblk and require that
5740Sstevel@tonic-gate * the memory be intact during the free routine.
5750Sstevel@tonic-gate */
5760Sstevel@tonic-gate if (args_ptr) {
5770Sstevel@tonic-gate /* LINTED pointer alignment */
5780Sstevel@tonic-gate XDR *xdrs = &clone_xprt->xp_xdrin;
5790Sstevel@tonic-gate
5800Sstevel@tonic-gate xdrs->x_op = XDR_FREE;
5810Sstevel@tonic-gate retval = (*xdr_args)(xdrs, args_ptr);
5820Sstevel@tonic-gate } else
5830Sstevel@tonic-gate retval = TRUE;
5840Sstevel@tonic-gate
5850Sstevel@tonic-gate if ((mp = cd->cd_req_mp) != NULL) {
5860Sstevel@tonic-gate cd->cd_req_mp = (mblk_t *)0;
5870Sstevel@tonic-gate freemsg(mp);
5880Sstevel@tonic-gate }
5890Sstevel@tonic-gate
5900Sstevel@tonic-gate return (retval);
5910Sstevel@tonic-gate }
5920Sstevel@tonic-gate
5930Sstevel@tonic-gate static int32_t *
svc_cots_kgetres(SVCXPRT * clone_xprt,int size)5940Sstevel@tonic-gate svc_cots_kgetres(SVCXPRT *clone_xprt, int size)
5950Sstevel@tonic-gate {
5960Sstevel@tonic-gate /* LINTED pointer alignment */
5970Sstevel@tonic-gate cots_data_t *cd = (cots_data_t *)clone_xprt->xp_p2buf;
5980Sstevel@tonic-gate XDR *xdrs = &clone_xprt->xp_xdrout;
5990Sstevel@tonic-gate mblk_t *mp;
6000Sstevel@tonic-gate int32_t *buf;
6010Sstevel@tonic-gate struct rpc_msg rply;
6020Sstevel@tonic-gate int len;
6030Sstevel@tonic-gate int mpsize;
6040Sstevel@tonic-gate
6050Sstevel@tonic-gate /*
6060Sstevel@tonic-gate * Leave space for protocol headers.
6070Sstevel@tonic-gate */
6080Sstevel@tonic-gate len = MSG_OFFSET + clone_xprt->xp_msg_size;
6090Sstevel@tonic-gate
6100Sstevel@tonic-gate /*
6110Sstevel@tonic-gate * Allocate an initial mblk for the response data.
6120Sstevel@tonic-gate */
6130Sstevel@tonic-gate while ((mp = allocb(len, BPRI_LO)) == NULL) {
6140Sstevel@tonic-gate if (strwaitbuf(len, BPRI_LO))
6150Sstevel@tonic-gate return (FALSE);
6160Sstevel@tonic-gate }
6170Sstevel@tonic-gate
6180Sstevel@tonic-gate /*
6190Sstevel@tonic-gate * Initialize the XDR decode stream. Additional mblks
6200Sstevel@tonic-gate * will be allocated if necessary. They will be TIDU
6210Sstevel@tonic-gate * sized.
6220Sstevel@tonic-gate */
6230Sstevel@tonic-gate xdrmblk_init(xdrs, mp, XDR_ENCODE, clone_xprt->xp_msg_size);
6240Sstevel@tonic-gate mpsize = MBLKSIZE(mp);
6250Sstevel@tonic-gate ASSERT(mpsize >= len);
6260Sstevel@tonic-gate ASSERT(mp->b_rptr == mp->b_datap->db_base);
6270Sstevel@tonic-gate
6280Sstevel@tonic-gate /*
6290Sstevel@tonic-gate * If the size of mblk is not appreciably larger than what we
6300Sstevel@tonic-gate * asked, then resize the mblk to exactly len bytes. Reason for
6310Sstevel@tonic-gate * this: suppose len is 1600 bytes, the tidu is 1460 bytes
6320Sstevel@tonic-gate * (from TCP over ethernet), and the arguments to RPC require
6330Sstevel@tonic-gate * 2800 bytes. Ideally we want the protocol to render two
6340Sstevel@tonic-gate * ~1400 byte segments over the wire. If allocb() gives us a 2k
6350Sstevel@tonic-gate * mblk, and we allocate a second mblk for the rest, the
6360Sstevel@tonic-gate * protocol module may generate 3 segments over the wire:
6370Sstevel@tonic-gate * 1460 bytes for the first, 448 (2048 - 1600) for the 2nd, and
6380Sstevel@tonic-gate * 892 for the 3rd. If we "waste" 448 bytes in the first mblk,
6390Sstevel@tonic-gate * the XDR encoding will generate two ~1400 byte mblks, and the
6400Sstevel@tonic-gate * protocol module is more likely to produce properly sized
6410Sstevel@tonic-gate * segments.
6420Sstevel@tonic-gate */
6430Sstevel@tonic-gate if ((mpsize >> 1) <= len) {
6440Sstevel@tonic-gate mp->b_rptr += (mpsize - len);
6450Sstevel@tonic-gate }
6460Sstevel@tonic-gate
6470Sstevel@tonic-gate /*
6480Sstevel@tonic-gate * Adjust b_rptr to reserve space for the non-data protocol
6490Sstevel@tonic-gate * headers that any downstream modules might like to add, and
6500Sstevel@tonic-gate * for the record marking header.
6510Sstevel@tonic-gate */
6520Sstevel@tonic-gate mp->b_rptr += (MSG_OFFSET + RM_HDR_SIZE);
6530Sstevel@tonic-gate
6540Sstevel@tonic-gate XDR_SETPOS(xdrs, (uint_t)(mp->b_rptr - mp->b_datap->db_base));
6550Sstevel@tonic-gate ASSERT(mp->b_wptr == mp->b_rptr);
6560Sstevel@tonic-gate
6570Sstevel@tonic-gate /*
6580Sstevel@tonic-gate * Assume a successful RPC since most of them are.
6590Sstevel@tonic-gate */
6600Sstevel@tonic-gate rply.rm_xid = clone_xprt->xp_xid;
6610Sstevel@tonic-gate rply.rm_direction = REPLY;
6620Sstevel@tonic-gate rply.rm_reply.rp_stat = MSG_ACCEPTED;
6630Sstevel@tonic-gate rply.acpted_rply.ar_verf = clone_xprt->xp_verf;
6640Sstevel@tonic-gate rply.acpted_rply.ar_stat = SUCCESS;
6650Sstevel@tonic-gate
6660Sstevel@tonic-gate if (!xdr_replymsg_hdr(xdrs, &rply)) {
6670Sstevel@tonic-gate freeb(mp);
6680Sstevel@tonic-gate return (NULL);
6690Sstevel@tonic-gate }
6700Sstevel@tonic-gate
6710Sstevel@tonic-gate
6720Sstevel@tonic-gate buf = XDR_INLINE(xdrs, size);
6730Sstevel@tonic-gate if (buf == NULL) {
6740Sstevel@tonic-gate ASSERT(cd->cd_mp == NULL);
6750Sstevel@tonic-gate freemsg(mp);
6760Sstevel@tonic-gate } else {
6770Sstevel@tonic-gate cd->cd_mp = mp;
6780Sstevel@tonic-gate }
6790Sstevel@tonic-gate return (buf);
6800Sstevel@tonic-gate }
6810Sstevel@tonic-gate
6820Sstevel@tonic-gate static void
svc_cots_kfreeres(SVCXPRT * clone_xprt)6830Sstevel@tonic-gate svc_cots_kfreeres(SVCXPRT *clone_xprt)
6840Sstevel@tonic-gate {
6850Sstevel@tonic-gate cots_data_t *cd;
6860Sstevel@tonic-gate mblk_t *mp;
6870Sstevel@tonic-gate
6880Sstevel@tonic-gate cd = (cots_data_t *)clone_xprt->xp_p2buf;
6890Sstevel@tonic-gate if ((mp = cd->cd_mp) != NULL) {
6900Sstevel@tonic-gate cd->cd_mp = (mblk_t *)NULL;
6910Sstevel@tonic-gate freemsg(mp);
6920Sstevel@tonic-gate }
6930Sstevel@tonic-gate }
6940Sstevel@tonic-gate
6950Sstevel@tonic-gate /*
6960Sstevel@tonic-gate * the dup cacheing routines below provide a cache of non-failure
6970Sstevel@tonic-gate * transaction id's. rpc service routines can use this to detect
6980Sstevel@tonic-gate * retransmissions and re-send a non-failure response.
6990Sstevel@tonic-gate */
7000Sstevel@tonic-gate
7010Sstevel@tonic-gate /*
7020Sstevel@tonic-gate * MAXDUPREQS is the number of cached items. It should be adjusted
7030Sstevel@tonic-gate * to the service load so that there is likely to be a response entry
7040Sstevel@tonic-gate * when the first retransmission comes in.
7050Sstevel@tonic-gate */
7060Sstevel@tonic-gate #define MAXDUPREQS 1024
7070Sstevel@tonic-gate
7080Sstevel@tonic-gate /*
7090Sstevel@tonic-gate * This should be appropriately scaled to MAXDUPREQS.
7100Sstevel@tonic-gate */
7110Sstevel@tonic-gate #define DRHASHSZ 257
7120Sstevel@tonic-gate
7130Sstevel@tonic-gate #if ((DRHASHSZ & (DRHASHSZ - 1)) == 0)
7140Sstevel@tonic-gate #define XIDHASH(xid) ((xid) & (DRHASHSZ - 1))
7150Sstevel@tonic-gate #else
7160Sstevel@tonic-gate #define XIDHASH(xid) ((xid) % DRHASHSZ)
7170Sstevel@tonic-gate #endif
7180Sstevel@tonic-gate #define DRHASH(dr) XIDHASH((dr)->dr_xid)
7190Sstevel@tonic-gate #define REQTOXID(req) ((req)->rq_xprt->xp_xid)
7200Sstevel@tonic-gate
7210Sstevel@tonic-gate static int cotsndupreqs = 0;
7227803Sgt29601@anthrax int cotsmaxdupreqs = MAXDUPREQS;
7230Sstevel@tonic-gate static kmutex_t cotsdupreq_lock;
7240Sstevel@tonic-gate static struct dupreq *cotsdrhashtbl[DRHASHSZ];
7250Sstevel@tonic-gate static int cotsdrhashstat[DRHASHSZ];
7260Sstevel@tonic-gate
7270Sstevel@tonic-gate static void unhash(struct dupreq *);
7280Sstevel@tonic-gate
7290Sstevel@tonic-gate /*
7300Sstevel@tonic-gate * cotsdrmru points to the head of a circular linked list in lru order.
7310Sstevel@tonic-gate * cotsdrmru->dr_next == drlru
7320Sstevel@tonic-gate */
7330Sstevel@tonic-gate struct dupreq *cotsdrmru;
7340Sstevel@tonic-gate
7350Sstevel@tonic-gate /*
7360Sstevel@tonic-gate * PSARC 2003/523 Contract Private Interface
7370Sstevel@tonic-gate * svc_cots_kdup
7380Sstevel@tonic-gate * Changes must be reviewed by Solaris File Sharing
7390Sstevel@tonic-gate * Changes must be communicated to contract-2003-523@sun.com
7400Sstevel@tonic-gate *
7410Sstevel@tonic-gate * svc_cots_kdup searches the request cache and returns 0 if the
7420Sstevel@tonic-gate * request is not found in the cache. If it is found, then it
7430Sstevel@tonic-gate * returns the state of the request (in progress or done) and
7440Sstevel@tonic-gate * the status or attributes that were part of the original reply.
7450Sstevel@tonic-gate *
7460Sstevel@tonic-gate * If DUP_DONE (there is a duplicate) svc_cots_kdup copies over the
7470Sstevel@tonic-gate * value of the response. In that case, also return in *dupcachedp
7480Sstevel@tonic-gate * whether the response free routine is cached in the dupreq - in which case
7490Sstevel@tonic-gate * the caller should not be freeing it, because it will be done later
7500Sstevel@tonic-gate * in the svc_cots_kdup code when the dupreq is reused.
7510Sstevel@tonic-gate */
7520Sstevel@tonic-gate static int
svc_cots_kdup(struct svc_req * req,caddr_t res,int size,struct dupreq ** drpp,bool_t * dupcachedp)7530Sstevel@tonic-gate svc_cots_kdup(struct svc_req *req, caddr_t res, int size, struct dupreq **drpp,
7540Sstevel@tonic-gate bool_t *dupcachedp)
7550Sstevel@tonic-gate {
7560Sstevel@tonic-gate struct rpc_cots_server *stats = CLONE2STATS(req->rq_xprt);
7570Sstevel@tonic-gate struct dupreq *dr;
7580Sstevel@tonic-gate uint32_t xid;
7590Sstevel@tonic-gate uint32_t drhash;
7600Sstevel@tonic-gate int status;
7610Sstevel@tonic-gate
7620Sstevel@tonic-gate xid = REQTOXID(req);
7630Sstevel@tonic-gate mutex_enter(&cotsdupreq_lock);
7640Sstevel@tonic-gate RSSTAT_INCR(stats, rsdupchecks);
7650Sstevel@tonic-gate /*
7660Sstevel@tonic-gate * Check to see whether an entry already exists in the cache.
7670Sstevel@tonic-gate */
7680Sstevel@tonic-gate dr = cotsdrhashtbl[XIDHASH(xid)];
7690Sstevel@tonic-gate while (dr != NULL) {
7700Sstevel@tonic-gate if (dr->dr_xid == xid &&
7710Sstevel@tonic-gate dr->dr_proc == req->rq_proc &&
7720Sstevel@tonic-gate dr->dr_prog == req->rq_prog &&
7730Sstevel@tonic-gate dr->dr_vers == req->rq_vers &&
7740Sstevel@tonic-gate dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
7750Sstevel@tonic-gate bcmp((caddr_t)dr->dr_addr.buf,
7760Sstevel@tonic-gate (caddr_t)req->rq_xprt->xp_rtaddr.buf,
7770Sstevel@tonic-gate dr->dr_addr.len) == 0) {
7780Sstevel@tonic-gate status = dr->dr_status;
7790Sstevel@tonic-gate if (status == DUP_DONE) {
7800Sstevel@tonic-gate bcopy(dr->dr_resp.buf, res, size);
7810Sstevel@tonic-gate if (dupcachedp != NULL)
7820Sstevel@tonic-gate *dupcachedp = (dr->dr_resfree != NULL);
7830Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_SVC_COTS_KDUP_DONE,
7840Sstevel@tonic-gate "svc_cots_kdup: DUP_DONE");
7850Sstevel@tonic-gate } else {
7860Sstevel@tonic-gate dr->dr_status = DUP_INPROGRESS;
7870Sstevel@tonic-gate *drpp = dr;
7880Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC,
7890Sstevel@tonic-gate TR_SVC_COTS_KDUP_INPROGRESS,
7900Sstevel@tonic-gate "svc_cots_kdup: DUP_INPROGRESS");
7910Sstevel@tonic-gate }
7920Sstevel@tonic-gate RSSTAT_INCR(stats, rsdupreqs);
7930Sstevel@tonic-gate mutex_exit(&cotsdupreq_lock);
7940Sstevel@tonic-gate return (status);
7950Sstevel@tonic-gate }
7960Sstevel@tonic-gate dr = dr->dr_chain;
7970Sstevel@tonic-gate }
7980Sstevel@tonic-gate
7990Sstevel@tonic-gate /*
8000Sstevel@tonic-gate * There wasn't an entry, either allocate a new one or recycle
8010Sstevel@tonic-gate * an old one.
8020Sstevel@tonic-gate */
8030Sstevel@tonic-gate if (cotsndupreqs < cotsmaxdupreqs) {
8040Sstevel@tonic-gate dr = kmem_alloc(sizeof (*dr), KM_NOSLEEP);
8050Sstevel@tonic-gate if (dr == NULL) {
8060Sstevel@tonic-gate mutex_exit(&cotsdupreq_lock);
8070Sstevel@tonic-gate return (DUP_ERROR);
8080Sstevel@tonic-gate }
8090Sstevel@tonic-gate dr->dr_resp.buf = NULL;
8100Sstevel@tonic-gate dr->dr_resp.maxlen = 0;
8110Sstevel@tonic-gate dr->dr_addr.buf = NULL;
8120Sstevel@tonic-gate dr->dr_addr.maxlen = 0;
8130Sstevel@tonic-gate if (cotsdrmru) {
8140Sstevel@tonic-gate dr->dr_next = cotsdrmru->dr_next;
8150Sstevel@tonic-gate cotsdrmru->dr_next = dr;
8160Sstevel@tonic-gate } else {
8170Sstevel@tonic-gate dr->dr_next = dr;
8180Sstevel@tonic-gate }
8190Sstevel@tonic-gate cotsndupreqs++;
8200Sstevel@tonic-gate } else {
8210Sstevel@tonic-gate dr = cotsdrmru->dr_next;
8220Sstevel@tonic-gate while (dr->dr_status == DUP_INPROGRESS) {
8230Sstevel@tonic-gate dr = dr->dr_next;
8240Sstevel@tonic-gate if (dr == cotsdrmru->dr_next) {
8250Sstevel@tonic-gate cmn_err(CE_WARN, "svc_cots_kdup no slots free");
8260Sstevel@tonic-gate mutex_exit(&cotsdupreq_lock);
8270Sstevel@tonic-gate return (DUP_ERROR);
8280Sstevel@tonic-gate }
8290Sstevel@tonic-gate }
8300Sstevel@tonic-gate unhash(dr);
8310Sstevel@tonic-gate if (dr->dr_resfree) {
8320Sstevel@tonic-gate (*dr->dr_resfree)(dr->dr_resp.buf);
8330Sstevel@tonic-gate }
8340Sstevel@tonic-gate }
8350Sstevel@tonic-gate dr->dr_resfree = NULL;
8360Sstevel@tonic-gate cotsdrmru = dr;
8370Sstevel@tonic-gate
8380Sstevel@tonic-gate dr->dr_xid = REQTOXID(req);
8390Sstevel@tonic-gate dr->dr_prog = req->rq_prog;
8400Sstevel@tonic-gate dr->dr_vers = req->rq_vers;
8410Sstevel@tonic-gate dr->dr_proc = req->rq_proc;
8420Sstevel@tonic-gate if (dr->dr_addr.maxlen < req->rq_xprt->xp_rtaddr.len) {
8430Sstevel@tonic-gate if (dr->dr_addr.buf != NULL)
8440Sstevel@tonic-gate kmem_free(dr->dr_addr.buf, dr->dr_addr.maxlen);
8450Sstevel@tonic-gate dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
8460Sstevel@tonic-gate dr->dr_addr.buf = kmem_alloc(dr->dr_addr.maxlen, KM_NOSLEEP);
8470Sstevel@tonic-gate if (dr->dr_addr.buf == NULL) {
8480Sstevel@tonic-gate dr->dr_addr.maxlen = 0;
8490Sstevel@tonic-gate dr->dr_status = DUP_DROP;
8500Sstevel@tonic-gate mutex_exit(&cotsdupreq_lock);
8510Sstevel@tonic-gate return (DUP_ERROR);
8520Sstevel@tonic-gate }
8530Sstevel@tonic-gate }
8540Sstevel@tonic-gate dr->dr_addr.len = req->rq_xprt->xp_rtaddr.len;
8550Sstevel@tonic-gate bcopy(req->rq_xprt->xp_rtaddr.buf, dr->dr_addr.buf, dr->dr_addr.len);
8560Sstevel@tonic-gate if (dr->dr_resp.maxlen < size) {
8570Sstevel@tonic-gate if (dr->dr_resp.buf != NULL)
8580Sstevel@tonic-gate kmem_free(dr->dr_resp.buf, dr->dr_resp.maxlen);
8590Sstevel@tonic-gate dr->dr_resp.maxlen = (unsigned int)size;
8600Sstevel@tonic-gate dr->dr_resp.buf = kmem_alloc(size, KM_NOSLEEP);
8610Sstevel@tonic-gate if (dr->dr_resp.buf == NULL) {
8620Sstevel@tonic-gate dr->dr_resp.maxlen = 0;
8630Sstevel@tonic-gate dr->dr_status = DUP_DROP;
8640Sstevel@tonic-gate mutex_exit(&cotsdupreq_lock);
8650Sstevel@tonic-gate return (DUP_ERROR);
8660Sstevel@tonic-gate }
8670Sstevel@tonic-gate }
8680Sstevel@tonic-gate dr->dr_status = DUP_INPROGRESS;
8690Sstevel@tonic-gate
8700Sstevel@tonic-gate drhash = (uint32_t)DRHASH(dr);
8710Sstevel@tonic-gate dr->dr_chain = cotsdrhashtbl[drhash];
8720Sstevel@tonic-gate cotsdrhashtbl[drhash] = dr;
8730Sstevel@tonic-gate cotsdrhashstat[drhash]++;
8740Sstevel@tonic-gate mutex_exit(&cotsdupreq_lock);
8750Sstevel@tonic-gate *drpp = dr;
8760Sstevel@tonic-gate return (DUP_NEW);
8770Sstevel@tonic-gate }
8780Sstevel@tonic-gate
8790Sstevel@tonic-gate /*
8800Sstevel@tonic-gate * PSARC 2003/523 Contract Private Interface
8810Sstevel@tonic-gate * svc_cots_kdupdone
8820Sstevel@tonic-gate * Changes must be reviewed by Solaris File Sharing
8830Sstevel@tonic-gate * Changes must be communicated to contract-2003-523@sun.com
8840Sstevel@tonic-gate *
8850Sstevel@tonic-gate * svc_cots_kdupdone marks the request done (DUP_DONE or DUP_DROP)
8860Sstevel@tonic-gate * and stores the response.
8870Sstevel@tonic-gate */
8880Sstevel@tonic-gate static void
svc_cots_kdupdone(struct dupreq * dr,caddr_t res,void (* dis_resfree)(),int size,int status)8890Sstevel@tonic-gate svc_cots_kdupdone(struct dupreq *dr, caddr_t res, void (*dis_resfree)(),
8900Sstevel@tonic-gate int size, int status)
8910Sstevel@tonic-gate {
8920Sstevel@tonic-gate ASSERT(dr->dr_resfree == NULL);
8930Sstevel@tonic-gate if (status == DUP_DONE) {
8940Sstevel@tonic-gate bcopy(res, dr->dr_resp.buf, size);
8950Sstevel@tonic-gate dr->dr_resfree = dis_resfree;
8960Sstevel@tonic-gate }
8970Sstevel@tonic-gate dr->dr_status = status;
8980Sstevel@tonic-gate }
8990Sstevel@tonic-gate
9000Sstevel@tonic-gate /*
9010Sstevel@tonic-gate * This routine expects that the mutex, cotsdupreq_lock, is already held.
9020Sstevel@tonic-gate */
9030Sstevel@tonic-gate static void
unhash(struct dupreq * dr)9040Sstevel@tonic-gate unhash(struct dupreq *dr)
9050Sstevel@tonic-gate {
9060Sstevel@tonic-gate struct dupreq *drt;
9070Sstevel@tonic-gate struct dupreq *drtprev = NULL;
9080Sstevel@tonic-gate uint32_t drhash;
9090Sstevel@tonic-gate
9100Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cotsdupreq_lock));
9110Sstevel@tonic-gate
9120Sstevel@tonic-gate drhash = (uint32_t)DRHASH(dr);
9130Sstevel@tonic-gate drt = cotsdrhashtbl[drhash];
9140Sstevel@tonic-gate while (drt != NULL) {
9150Sstevel@tonic-gate if (drt == dr) {
9160Sstevel@tonic-gate cotsdrhashstat[drhash]--;
9170Sstevel@tonic-gate if (drtprev == NULL) {
9180Sstevel@tonic-gate cotsdrhashtbl[drhash] = drt->dr_chain;
9190Sstevel@tonic-gate } else {
9200Sstevel@tonic-gate drtprev->dr_chain = drt->dr_chain;
9210Sstevel@tonic-gate }
9220Sstevel@tonic-gate return;
9230Sstevel@tonic-gate }
9240Sstevel@tonic-gate drtprev = drt;
9250Sstevel@tonic-gate drt = drt->dr_chain;
9260Sstevel@tonic-gate }
9270Sstevel@tonic-gate }
9280Sstevel@tonic-gate
9290Sstevel@tonic-gate void
svc_cots_stats_init(zoneid_t zoneid,struct rpc_cots_server ** statsp)9300Sstevel@tonic-gate svc_cots_stats_init(zoneid_t zoneid, struct rpc_cots_server **statsp)
9310Sstevel@tonic-gate {
9320Sstevel@tonic-gate *statsp = (struct rpc_cots_server *)rpcstat_zone_init_common(zoneid,
9330Sstevel@tonic-gate "unix", "rpc_cots_server", (const kstat_named_t *)&cots_rsstat_tmpl,
9340Sstevel@tonic-gate sizeof (cots_rsstat_tmpl));
9350Sstevel@tonic-gate }
9360Sstevel@tonic-gate
9370Sstevel@tonic-gate void
svc_cots_stats_fini(zoneid_t zoneid,struct rpc_cots_server ** statsp)9380Sstevel@tonic-gate svc_cots_stats_fini(zoneid_t zoneid, struct rpc_cots_server **statsp)
9390Sstevel@tonic-gate {
9400Sstevel@tonic-gate rpcstat_zone_fini_common(zoneid, "unix", "rpc_cots_server");
9410Sstevel@tonic-gate kmem_free(*statsp, sizeof (cots_rsstat_tmpl));
9420Sstevel@tonic-gate }
9430Sstevel@tonic-gate
9440Sstevel@tonic-gate void
svc_cots_init(void)9450Sstevel@tonic-gate svc_cots_init(void)
9460Sstevel@tonic-gate {
9470Sstevel@tonic-gate /*
9480Sstevel@tonic-gate * Check to make sure that the cots private data will fit into
9490Sstevel@tonic-gate * the stack buffer allocated by svc_run. The ASSERT is a safety
9500Sstevel@tonic-gate * net if the cots_data_t structure ever changes.
9510Sstevel@tonic-gate */
9520Sstevel@tonic-gate /*CONSTANTCONDITION*/
9530Sstevel@tonic-gate ASSERT(sizeof (cots_data_t) <= SVC_P2LEN);
9540Sstevel@tonic-gate
9550Sstevel@tonic-gate mutex_init(&cots_kcreate_lock, NULL, MUTEX_DEFAULT, NULL);
9560Sstevel@tonic-gate mutex_init(&cotsdupreq_lock, NULL, MUTEX_DEFAULT, NULL);
9570Sstevel@tonic-gate }
958