10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 57208Svv149972 * Common Development and Distribution License (the "License"). 67208Svv149972 * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*11967SKaren.Rochford@Sun.COM * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 270Sstevel@tonic-gate /* All Rights Reserved */ 280Sstevel@tonic-gate 290Sstevel@tonic-gate /* 300Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 4.3 BSD 310Sstevel@tonic-gate * under license from the Regents of the University of California. 320Sstevel@tonic-gate */ 330Sstevel@tonic-gate 340Sstevel@tonic-gate /* 350Sstevel@tonic-gate * svc_cots.c 360Sstevel@tonic-gate * Server side for connection-oriented RPC in the kernel. 370Sstevel@tonic-gate * 380Sstevel@tonic-gate */ 390Sstevel@tonic-gate 400Sstevel@tonic-gate #include <sys/param.h> 410Sstevel@tonic-gate #include <sys/types.h> 420Sstevel@tonic-gate #include <sys/sysmacros.h> 430Sstevel@tonic-gate #include <sys/file.h> 440Sstevel@tonic-gate #include <sys/stream.h> 450Sstevel@tonic-gate #include <sys/strsubr.h> 460Sstevel@tonic-gate #include <sys/strsun.h> 470Sstevel@tonic-gate #include <sys/stropts.h> 480Sstevel@tonic-gate #include <sys/tiuser.h> 490Sstevel@tonic-gate #include <sys/timod.h> 500Sstevel@tonic-gate #include <sys/tihdr.h> 510Sstevel@tonic-gate #include <sys/fcntl.h> 520Sstevel@tonic-gate #include <sys/errno.h> 530Sstevel@tonic-gate #include <sys/kmem.h> 540Sstevel@tonic-gate #include <sys/systm.h> 550Sstevel@tonic-gate #include <sys/debug.h> 560Sstevel@tonic-gate #include <sys/cmn_err.h> 570Sstevel@tonic-gate #include <sys/kstat.h> 580Sstevel@tonic-gate #include <sys/vtrace.h> 590Sstevel@tonic-gate 600Sstevel@tonic-gate #include <rpc/types.h> 610Sstevel@tonic-gate #include <rpc/xdr.h> 620Sstevel@tonic-gate #include <rpc/auth.h> 630Sstevel@tonic-gate #include <rpc/rpc_msg.h> 640Sstevel@tonic-gate #include <rpc/svc.h> 657208Svv149972 #include <inet/ip.h> 660Sstevel@tonic-gate 670Sstevel@tonic-gate #define COTS_MAX_ALLOCSIZE 2048 680Sstevel@tonic-gate #define MSG_OFFSET 128 /* offset of call into the mblk */ 690Sstevel@tonic-gate #define RM_HDR_SIZE 4 /* record mark header size */ 700Sstevel@tonic-gate 710Sstevel@tonic-gate /* 720Sstevel@tonic-gate * Routines exported through ops vector. 730Sstevel@tonic-gate */ 740Sstevel@tonic-gate static bool_t svc_cots_krecv(SVCXPRT *, mblk_t *, struct rpc_msg *); 750Sstevel@tonic-gate static bool_t svc_cots_ksend(SVCXPRT *, struct rpc_msg *); 760Sstevel@tonic-gate static bool_t svc_cots_kgetargs(SVCXPRT *, xdrproc_t, caddr_t); 770Sstevel@tonic-gate static bool_t svc_cots_kfreeargs(SVCXPRT *, xdrproc_t, caddr_t); 780Sstevel@tonic-gate static void svc_cots_kdestroy(SVCMASTERXPRT *); 790Sstevel@tonic-gate static int svc_cots_kdup(struct svc_req *, caddr_t, int, 800Sstevel@tonic-gate struct dupreq **, bool_t *); 810Sstevel@tonic-gate static void svc_cots_kdupdone(struct dupreq *, caddr_t, 820Sstevel@tonic-gate void (*)(), int, int); 830Sstevel@tonic-gate static int32_t *svc_cots_kgetres(SVCXPRT *, int); 840Sstevel@tonic-gate static void svc_cots_kfreeres(SVCXPRT *); 850Sstevel@tonic-gate static void svc_cots_kclone_destroy(SVCXPRT *); 860Sstevel@tonic-gate static void svc_cots_kstart(SVCMASTERXPRT *); 870Sstevel@tonic-gate 880Sstevel@tonic-gate /* 890Sstevel@tonic-gate * Server transport operations vector. 900Sstevel@tonic-gate */ 910Sstevel@tonic-gate struct svc_ops svc_cots_op = { 920Sstevel@tonic-gate svc_cots_krecv, /* Get requests */ 930Sstevel@tonic-gate svc_cots_kgetargs, /* Deserialize arguments */ 940Sstevel@tonic-gate svc_cots_ksend, /* Send reply */ 950Sstevel@tonic-gate svc_cots_kfreeargs, /* Free argument data space */ 960Sstevel@tonic-gate svc_cots_kdestroy, /* Destroy transport handle */ 970Sstevel@tonic-gate svc_cots_kdup, /* Check entry in dup req cache */ 980Sstevel@tonic-gate svc_cots_kdupdone, /* Mark entry in dup req cache as done */ 990Sstevel@tonic-gate svc_cots_kgetres, /* Get pointer to response buffer */ 1000Sstevel@tonic-gate svc_cots_kfreeres, /* Destroy pre-serialized response header */ 1010Sstevel@tonic-gate svc_cots_kclone_destroy, /* Destroy a clone xprt */ 102*11967SKaren.Rochford@Sun.COM svc_cots_kstart, /* Tell `ready-to-receive' to rpcmod */ 103*11967SKaren.Rochford@Sun.COM NULL /* Transport specific clone xprt */ 1040Sstevel@tonic-gate }; 1050Sstevel@tonic-gate 1060Sstevel@tonic-gate /* 1070Sstevel@tonic-gate * Master transport private data. 1080Sstevel@tonic-gate * Kept in xprt->xp_p2. 1090Sstevel@tonic-gate */ 1100Sstevel@tonic-gate struct cots_master_data { 1110Sstevel@tonic-gate char *cmd_src_addr; /* client's address */ 1120Sstevel@tonic-gate int cmd_xprt_started; /* flag for clone routine to call */ 1130Sstevel@tonic-gate /* rpcmod's start routine. */ 1140Sstevel@tonic-gate struct rpc_cots_server *cmd_stats; /* stats for zone */ 1150Sstevel@tonic-gate }; 1160Sstevel@tonic-gate 1170Sstevel@tonic-gate /* 1180Sstevel@tonic-gate * Transport private data. 1190Sstevel@tonic-gate * Kept in clone_xprt->xp_p2buf. 1200Sstevel@tonic-gate */ 1210Sstevel@tonic-gate typedef struct cots_data { 1220Sstevel@tonic-gate mblk_t *cd_mp; /* pre-allocated reply message */ 1230Sstevel@tonic-gate mblk_t *cd_req_mp; /* request message */ 1240Sstevel@tonic-gate } cots_data_t; 1250Sstevel@tonic-gate 1260Sstevel@tonic-gate /* 1270Sstevel@tonic-gate * Server statistics 1280Sstevel@tonic-gate * NOTE: This structure type is duplicated in the NFS fast path. 1290Sstevel@tonic-gate */ 1300Sstevel@tonic-gate static const struct rpc_cots_server { 1310Sstevel@tonic-gate kstat_named_t rscalls; 1320Sstevel@tonic-gate kstat_named_t rsbadcalls; 1330Sstevel@tonic-gate kstat_named_t rsnullrecv; 1340Sstevel@tonic-gate kstat_named_t rsbadlen; 1350Sstevel@tonic-gate kstat_named_t rsxdrcall; 1360Sstevel@tonic-gate kstat_named_t rsdupchecks; 1370Sstevel@tonic-gate kstat_named_t rsdupreqs; 1380Sstevel@tonic-gate } cots_rsstat_tmpl = { 1390Sstevel@tonic-gate { "calls", KSTAT_DATA_UINT64 }, 1400Sstevel@tonic-gate { "badcalls", KSTAT_DATA_UINT64 }, 1410Sstevel@tonic-gate { "nullrecv", KSTAT_DATA_UINT64 }, 1420Sstevel@tonic-gate { "badlen", KSTAT_DATA_UINT64 }, 1430Sstevel@tonic-gate { "xdrcall", KSTAT_DATA_UINT64 }, 1440Sstevel@tonic-gate { "dupchecks", KSTAT_DATA_UINT64 }, 1450Sstevel@tonic-gate { "dupreqs", KSTAT_DATA_UINT64 } 1460Sstevel@tonic-gate }; 1470Sstevel@tonic-gate 1480Sstevel@tonic-gate #define CLONE2STATS(clone_xprt) \ 1490Sstevel@tonic-gate ((struct cots_master_data *)(clone_xprt)->xp_master->xp_p2)->cmd_stats 1500Sstevel@tonic-gate #define RSSTAT_INCR(s, x) \ 1510Sstevel@tonic-gate atomic_add_64(&(s)->x.value.ui64, 1) 1520Sstevel@tonic-gate 1530Sstevel@tonic-gate /* 1540Sstevel@tonic-gate * Pointer to a transport specific `ready to receive' function in rpcmod 1550Sstevel@tonic-gate * (set from rpcmod). 1560Sstevel@tonic-gate */ 1570Sstevel@tonic-gate void (*mir_start)(queue_t *); 1580Sstevel@tonic-gate uint_t *svc_max_msg_sizep; 1590Sstevel@tonic-gate 1600Sstevel@tonic-gate /* 1610Sstevel@tonic-gate * the address size of the underlying transport can sometimes be 1620Sstevel@tonic-gate * unknown (tinfo->ADDR_size == -1). For this case, it is 1630Sstevel@tonic-gate * necessary to figure out what the size is so the correct amount 1640Sstevel@tonic-gate * of data is allocated. This is an itterative process: 1650Sstevel@tonic-gate * 1. take a good guess (use T_MINADDRSIZE) 1660Sstevel@tonic-gate * 2. try it. 1670Sstevel@tonic-gate * 3. if it works then everything is ok 1680Sstevel@tonic-gate * 4. if the error is ENAMETOLONG, double the guess 1690Sstevel@tonic-gate * 5. go back to step 2. 1700Sstevel@tonic-gate */ 1710Sstevel@tonic-gate #define T_UNKNOWNADDRSIZE (-1) 1720Sstevel@tonic-gate #define T_MINADDRSIZE 32 1730Sstevel@tonic-gate 1740Sstevel@tonic-gate /* 1750Sstevel@tonic-gate * Create a transport record. 1760Sstevel@tonic-gate * The transport record, output buffer, and private data structure 1770Sstevel@tonic-gate * are allocated. The output buffer is serialized into using xdrmem. 1780Sstevel@tonic-gate * There is one transport record per user process which implements a 1790Sstevel@tonic-gate * set of services. 1800Sstevel@tonic-gate */ 1810Sstevel@tonic-gate static kmutex_t cots_kcreate_lock; 1820Sstevel@tonic-gate 1830Sstevel@tonic-gate int 1840Sstevel@tonic-gate svc_cots_kcreate(file_t *fp, uint_t max_msgsize, struct T_info_ack *tinfo, 1850Sstevel@tonic-gate SVCMASTERXPRT **nxprt) 1860Sstevel@tonic-gate { 1870Sstevel@tonic-gate struct cots_master_data *cmd; 1887208Svv149972 int err, retval; 1890Sstevel@tonic-gate SVCMASTERXPRT *xprt; 1900Sstevel@tonic-gate struct rpcstat *rpcstat; 1917208Svv149972 struct T_addr_ack *ack_p; 1927208Svv149972 struct strioctl getaddr; 1930Sstevel@tonic-gate 1940Sstevel@tonic-gate if (nxprt == NULL) 1950Sstevel@tonic-gate return (EINVAL); 1960Sstevel@tonic-gate 1970Sstevel@tonic-gate rpcstat = zone_getspecific(rpcstat_zone_key, curproc->p_zone); 1980Sstevel@tonic-gate ASSERT(rpcstat != NULL); 1990Sstevel@tonic-gate 2007208Svv149972 xprt = kmem_zalloc(sizeof (SVCMASTERXPRT), KM_SLEEP); 2010Sstevel@tonic-gate 2027208Svv149972 cmd = kmem_zalloc(sizeof (*cmd) + sizeof (*ack_p) 2037208Svv149972 + (2 * sizeof (sin6_t)), KM_SLEEP); 2040Sstevel@tonic-gate 2057208Svv149972 ack_p = (struct T_addr_ack *)&cmd[1]; 2060Sstevel@tonic-gate 2070Sstevel@tonic-gate if ((tinfo->TIDU_size > COTS_MAX_ALLOCSIZE) || 2080Sstevel@tonic-gate (tinfo->TIDU_size <= 0)) 2090Sstevel@tonic-gate xprt->xp_msg_size = COTS_MAX_ALLOCSIZE; 2100Sstevel@tonic-gate else { 2110Sstevel@tonic-gate xprt->xp_msg_size = tinfo->TIDU_size - 2127208Svv149972 (tinfo->TIDU_size % BYTES_PER_XDR_UNIT); 2130Sstevel@tonic-gate } 2140Sstevel@tonic-gate 2150Sstevel@tonic-gate xprt->xp_ops = &svc_cots_op; 2160Sstevel@tonic-gate xprt->xp_p2 = (caddr_t)cmd; 2170Sstevel@tonic-gate cmd->cmd_xprt_started = 0; 2180Sstevel@tonic-gate cmd->cmd_stats = rpcstat->rpc_cots_server; 2190Sstevel@tonic-gate 2207208Svv149972 getaddr.ic_cmd = TI_GETINFO; 2217208Svv149972 getaddr.ic_timout = -1; 2227208Svv149972 getaddr.ic_len = sizeof (*ack_p) + (2 * sizeof (sin6_t)); 2237208Svv149972 getaddr.ic_dp = (char *)ack_p; 2247208Svv149972 ack_p->PRIM_type = T_ADDR_REQ; 2250Sstevel@tonic-gate 2267208Svv149972 err = strioctl(fp->f_vnode, I_STR, (intptr_t)&getaddr, 2277208Svv149972 0, K_TO_K, CRED(), &retval); 2280Sstevel@tonic-gate if (err) { 2297208Svv149972 kmem_free(cmd, sizeof (*cmd) + sizeof (*ack_p) + 2307208Svv149972 (2 * sizeof (sin6_t))); 2310Sstevel@tonic-gate kmem_free(xprt, sizeof (SVCMASTERXPRT)); 2320Sstevel@tonic-gate return (err); 2330Sstevel@tonic-gate } 2340Sstevel@tonic-gate 2357208Svv149972 xprt->xp_rtaddr.maxlen = ack_p->REMADDR_length; 2367208Svv149972 xprt->xp_rtaddr.len = ack_p->REMADDR_length; 2377208Svv149972 cmd->cmd_src_addr = xprt->xp_rtaddr.buf = 2387208Svv149972 (char *)ack_p + ack_p->REMADDR_offset; 2397208Svv149972 2407208Svv149972 xprt->xp_lcladdr.maxlen = ack_p->LOCADDR_length; 2417208Svv149972 xprt->xp_lcladdr.len = ack_p->LOCADDR_length; 2427208Svv149972 xprt->xp_lcladdr.buf = (char *)ack_p + ack_p->LOCADDR_offset; 2437208Svv149972 2440Sstevel@tonic-gate /* 2450Sstevel@tonic-gate * If the current sanity check size in rpcmod is smaller 2460Sstevel@tonic-gate * than the size needed for this xprt, then increase 2470Sstevel@tonic-gate * the sanity check. 2480Sstevel@tonic-gate */ 2490Sstevel@tonic-gate if (max_msgsize != 0 && svc_max_msg_sizep && 2500Sstevel@tonic-gate max_msgsize > *svc_max_msg_sizep) { 2510Sstevel@tonic-gate 2520Sstevel@tonic-gate /* This check needs a lock */ 2530Sstevel@tonic-gate mutex_enter(&cots_kcreate_lock); 2540Sstevel@tonic-gate if (svc_max_msg_sizep && max_msgsize > *svc_max_msg_sizep) 2550Sstevel@tonic-gate *svc_max_msg_sizep = max_msgsize; 2560Sstevel@tonic-gate mutex_exit(&cots_kcreate_lock); 2570Sstevel@tonic-gate } 2580Sstevel@tonic-gate 2590Sstevel@tonic-gate *nxprt = xprt; 2607208Svv149972 2610Sstevel@tonic-gate return (0); 2620Sstevel@tonic-gate } 2630Sstevel@tonic-gate 2640Sstevel@tonic-gate /* 2650Sstevel@tonic-gate * Destroy a master transport record. 2660Sstevel@tonic-gate * Frees the space allocated for a transport record. 2670Sstevel@tonic-gate */ 2680Sstevel@tonic-gate static void 2690Sstevel@tonic-gate svc_cots_kdestroy(SVCMASTERXPRT *xprt) 2700Sstevel@tonic-gate { 2710Sstevel@tonic-gate struct cots_master_data *cmd = (struct cots_master_data *)xprt->xp_p2; 2720Sstevel@tonic-gate 2730Sstevel@tonic-gate ASSERT(cmd); 2740Sstevel@tonic-gate 2750Sstevel@tonic-gate if (xprt->xp_netid) 2760Sstevel@tonic-gate kmem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1); 2770Sstevel@tonic-gate if (xprt->xp_addrmask.maxlen) 2780Sstevel@tonic-gate kmem_free(xprt->xp_addrmask.buf, xprt->xp_addrmask.maxlen); 2790Sstevel@tonic-gate 2800Sstevel@tonic-gate mutex_destroy(&xprt->xp_req_lock); 2810Sstevel@tonic-gate mutex_destroy(&xprt->xp_thread_lock); 2820Sstevel@tonic-gate 2837208Svv149972 kmem_free(cmd, sizeof (*cmd) + sizeof (struct T_addr_ack) + 2847208Svv149972 (2 * sizeof (sin6_t))); 2857208Svv149972 2860Sstevel@tonic-gate kmem_free(xprt, sizeof (SVCMASTERXPRT)); 2870Sstevel@tonic-gate } 2880Sstevel@tonic-gate 2890Sstevel@tonic-gate /* 2900Sstevel@tonic-gate * svc_tli_kcreate() calls this function at the end to tell 2910Sstevel@tonic-gate * rpcmod that the transport is ready to receive requests. 2920Sstevel@tonic-gate */ 2930Sstevel@tonic-gate static void 2940Sstevel@tonic-gate svc_cots_kstart(SVCMASTERXPRT *xprt) 2950Sstevel@tonic-gate { 2960Sstevel@tonic-gate struct cots_master_data *cmd = (struct cots_master_data *)xprt->xp_p2; 2970Sstevel@tonic-gate 2980Sstevel@tonic-gate if (cmd->cmd_xprt_started == 0) { 2990Sstevel@tonic-gate /* 3000Sstevel@tonic-gate * Acquire the xp_req_lock in order to use xp_wq 3010Sstevel@tonic-gate * safely (we don't want to qenable a queue that has 3020Sstevel@tonic-gate * already been closed). 3030Sstevel@tonic-gate */ 3040Sstevel@tonic-gate mutex_enter(&xprt->xp_req_lock); 3050Sstevel@tonic-gate if (cmd->cmd_xprt_started == 0 && 3067208Svv149972 xprt->xp_wq != NULL) { 3070Sstevel@tonic-gate (*mir_start)(xprt->xp_wq); 3080Sstevel@tonic-gate cmd->cmd_xprt_started = 1; 3090Sstevel@tonic-gate } 3100Sstevel@tonic-gate mutex_exit(&xprt->xp_req_lock); 3110Sstevel@tonic-gate } 3120Sstevel@tonic-gate } 3130Sstevel@tonic-gate 3140Sstevel@tonic-gate /* 3150Sstevel@tonic-gate * Transport-type specific part of svc_xprt_cleanup(). 3160Sstevel@tonic-gate */ 3170Sstevel@tonic-gate static void 3180Sstevel@tonic-gate svc_cots_kclone_destroy(SVCXPRT *clone_xprt) 3190Sstevel@tonic-gate { 3200Sstevel@tonic-gate cots_data_t *cd = (cots_data_t *)clone_xprt->xp_p2buf; 3210Sstevel@tonic-gate 3220Sstevel@tonic-gate if (cd->cd_req_mp) { 3230Sstevel@tonic-gate freemsg(cd->cd_req_mp); 3240Sstevel@tonic-gate cd->cd_req_mp = (mblk_t *)0; 3250Sstevel@tonic-gate } 3260Sstevel@tonic-gate ASSERT(cd->cd_mp == NULL); 3270Sstevel@tonic-gate } 3280Sstevel@tonic-gate 3290Sstevel@tonic-gate /* 3300Sstevel@tonic-gate * Receive rpc requests. 3310Sstevel@tonic-gate * Checks if the message is intact, and deserializes the call packet. 3320Sstevel@tonic-gate */ 3330Sstevel@tonic-gate static bool_t 3340Sstevel@tonic-gate svc_cots_krecv(SVCXPRT *clone_xprt, mblk_t *mp, struct rpc_msg *msg) 3350Sstevel@tonic-gate { 3360Sstevel@tonic-gate cots_data_t *cd = (cots_data_t *)clone_xprt->xp_p2buf; 3370Sstevel@tonic-gate XDR *xdrs = &clone_xprt->xp_xdrin; 3380Sstevel@tonic-gate struct rpc_cots_server *stats = CLONE2STATS(clone_xprt); 3390Sstevel@tonic-gate 3400Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_SVC_COTS_KRECV_START, 3410Sstevel@tonic-gate "svc_cots_krecv_start:"); 3420Sstevel@tonic-gate RPCLOG(4, "svc_cots_krecv_start clone_xprt = %p:\n", 3430Sstevel@tonic-gate (void *)clone_xprt); 3440Sstevel@tonic-gate 3450Sstevel@tonic-gate RSSTAT_INCR(stats, rscalls); 3460Sstevel@tonic-gate 3470Sstevel@tonic-gate if (mp->b_datap->db_type != M_DATA) { 3480Sstevel@tonic-gate RPCLOG(16, "svc_cots_krecv bad db_type %d\n", 3490Sstevel@tonic-gate mp->b_datap->db_type); 3500Sstevel@tonic-gate goto bad; 3510Sstevel@tonic-gate } 3520Sstevel@tonic-gate 3530Sstevel@tonic-gate xdrmblk_init(xdrs, mp, XDR_DECODE, 0); 3540Sstevel@tonic-gate 3550Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_XDR_CALLMSG_START, 3560Sstevel@tonic-gate "xdr_callmsg_start:"); 3570Sstevel@tonic-gate RPCLOG0(4, "xdr_callmsg_start:\n"); 3580Sstevel@tonic-gate if (!xdr_callmsg(xdrs, msg)) { 3590Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_XDR_CALLMSG_END, 3600Sstevel@tonic-gate "xdr_callmsg_end:(%S)", "bad"); 3610Sstevel@tonic-gate RPCLOG0(1, "svc_cots_krecv xdr_callmsg failure\n"); 3620Sstevel@tonic-gate RSSTAT_INCR(stats, rsxdrcall); 3630Sstevel@tonic-gate goto bad; 3640Sstevel@tonic-gate } 3650Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_XDR_CALLMSG_END, 3660Sstevel@tonic-gate "xdr_callmsg_end:(%S)", "good"); 3670Sstevel@tonic-gate 3680Sstevel@tonic-gate clone_xprt->xp_xid = msg->rm_xid; 3690Sstevel@tonic-gate cd->cd_req_mp = mp; 3700Sstevel@tonic-gate 3710Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_SVC_COTS_KRECV_END, 3720Sstevel@tonic-gate "svc_cots_krecv_end:(%S)", "good"); 3730Sstevel@tonic-gate RPCLOG0(4, "svc_cots_krecv_end:good\n"); 3740Sstevel@tonic-gate return (TRUE); 3750Sstevel@tonic-gate 3760Sstevel@tonic-gate bad: 3770Sstevel@tonic-gate if (mp) 3780Sstevel@tonic-gate freemsg(mp); 3790Sstevel@tonic-gate 3800Sstevel@tonic-gate RSSTAT_INCR(stats, rsbadcalls); 3810Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_SVC_COTS_KRECV_END, 3820Sstevel@tonic-gate "svc_cots_krecv_end:(%S)", "bad"); 3830Sstevel@tonic-gate return (FALSE); 3840Sstevel@tonic-gate } 3850Sstevel@tonic-gate 3860Sstevel@tonic-gate /* 3870Sstevel@tonic-gate * Send rpc reply. 3880Sstevel@tonic-gate */ 3890Sstevel@tonic-gate static bool_t 3900Sstevel@tonic-gate svc_cots_ksend(SVCXPRT *clone_xprt, struct rpc_msg *msg) 3910Sstevel@tonic-gate { 3920Sstevel@tonic-gate /* LINTED pointer alignment */ 3930Sstevel@tonic-gate cots_data_t *cd = (cots_data_t *)clone_xprt->xp_p2buf; 3940Sstevel@tonic-gate XDR *xdrs = &(clone_xprt->xp_xdrout); 3950Sstevel@tonic-gate int retval = FALSE; 3960Sstevel@tonic-gate mblk_t *mp; 3970Sstevel@tonic-gate xdrproc_t xdr_results; 3980Sstevel@tonic-gate caddr_t xdr_location; 3990Sstevel@tonic-gate bool_t has_args; 4000Sstevel@tonic-gate 4010Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_SVC_COTS_KSEND_START, 4020Sstevel@tonic-gate "svc_cots_ksend_start:"); 4030Sstevel@tonic-gate 4040Sstevel@tonic-gate /* 4050Sstevel@tonic-gate * If there is a result procedure specified in the reply message, 4060Sstevel@tonic-gate * it will be processed in the xdr_replymsg and SVCAUTH_WRAP. 4070Sstevel@tonic-gate * We need to make sure it won't be processed twice, so we null 4080Sstevel@tonic-gate * it for xdr_replymsg here. 4090Sstevel@tonic-gate */ 4100Sstevel@tonic-gate has_args = FALSE; 4110Sstevel@tonic-gate if (msg->rm_reply.rp_stat == MSG_ACCEPTED && 4120Sstevel@tonic-gate msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { 4130Sstevel@tonic-gate if ((xdr_results = msg->acpted_rply.ar_results.proc) != NULL) { 4140Sstevel@tonic-gate has_args = TRUE; 4150Sstevel@tonic-gate xdr_location = msg->acpted_rply.ar_results.where; 4160Sstevel@tonic-gate msg->acpted_rply.ar_results.proc = xdr_void; 4170Sstevel@tonic-gate msg->acpted_rply.ar_results.where = NULL; 4180Sstevel@tonic-gate } 4190Sstevel@tonic-gate } 4200Sstevel@tonic-gate 4210Sstevel@tonic-gate mp = cd->cd_mp; 4220Sstevel@tonic-gate if (mp) { 4230Sstevel@tonic-gate /* 4240Sstevel@tonic-gate * The program above pre-allocated an mblk and put 4250Sstevel@tonic-gate * the data in place. 4260Sstevel@tonic-gate */ 4270Sstevel@tonic-gate cd->cd_mp = (mblk_t *)NULL; 4280Sstevel@tonic-gate if (!(xdr_replymsg_body(xdrs, msg) && 4290Sstevel@tonic-gate (!has_args || SVCAUTH_WRAP(&clone_xprt->xp_auth, xdrs, 4300Sstevel@tonic-gate xdr_results, xdr_location)))) { 4310Sstevel@tonic-gate RPCLOG0(1, "svc_cots_ksend: " 4320Sstevel@tonic-gate "xdr_replymsg_body/SVCAUTH_WRAP failed\n"); 4330Sstevel@tonic-gate freemsg(mp); 4340Sstevel@tonic-gate goto out; 4350Sstevel@tonic-gate } 4360Sstevel@tonic-gate } else { 4370Sstevel@tonic-gate int len; 4380Sstevel@tonic-gate int mpsize; 4390Sstevel@tonic-gate 4400Sstevel@tonic-gate /* 4410Sstevel@tonic-gate * Leave space for protocol headers. 4420Sstevel@tonic-gate */ 4430Sstevel@tonic-gate len = MSG_OFFSET + clone_xprt->xp_msg_size; 4440Sstevel@tonic-gate 4450Sstevel@tonic-gate /* 4460Sstevel@tonic-gate * Allocate an initial mblk for the response data. 4470Sstevel@tonic-gate */ 4480Sstevel@tonic-gate while (!(mp = allocb(len, BPRI_LO))) { 4490Sstevel@tonic-gate RPCLOG0(16, "svc_cots_ksend: allocb failed failed\n"); 4500Sstevel@tonic-gate if (strwaitbuf(len, BPRI_LO)) { 4510Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_SVC_COTS_KSEND_END, 4520Sstevel@tonic-gate "svc_cots_ksend_end:(%S)", "strwaitbuf"); 4530Sstevel@tonic-gate RPCLOG0(1, 4540Sstevel@tonic-gate "svc_cots_ksend: strwaitbuf failed\n"); 4550Sstevel@tonic-gate goto out; 4560Sstevel@tonic-gate } 4570Sstevel@tonic-gate } 4580Sstevel@tonic-gate 4590Sstevel@tonic-gate /* 4600Sstevel@tonic-gate * Initialize the XDR decode stream. Additional mblks 4610Sstevel@tonic-gate * will be allocated if necessary. They will be TIDU 4620Sstevel@tonic-gate * sized. 4630Sstevel@tonic-gate */ 4640Sstevel@tonic-gate xdrmblk_init(xdrs, mp, XDR_ENCODE, clone_xprt->xp_msg_size); 4650Sstevel@tonic-gate mpsize = MBLKSIZE(mp); 4660Sstevel@tonic-gate ASSERT(mpsize >= len); 4670Sstevel@tonic-gate ASSERT(mp->b_rptr == mp->b_datap->db_base); 4680Sstevel@tonic-gate 4690Sstevel@tonic-gate /* 4700Sstevel@tonic-gate * If the size of mblk is not appreciably larger than what we 4710Sstevel@tonic-gate * asked, then resize the mblk to exactly len bytes. Reason for 4720Sstevel@tonic-gate * this: suppose len is 1600 bytes, the tidu is 1460 bytes 4730Sstevel@tonic-gate * (from TCP over ethernet), and the arguments to RPC require 4740Sstevel@tonic-gate * 2800 bytes. Ideally we want the protocol to render two 4750Sstevel@tonic-gate * ~1400 byte segments over the wire. If allocb() gives us a 2k 4760Sstevel@tonic-gate * mblk, and we allocate a second mblk for the rest, the 4770Sstevel@tonic-gate * protocol module may generate 3 segments over the wire: 4780Sstevel@tonic-gate * 1460 bytes for the first, 448 (2048 - 1600) for the 2nd, and 4790Sstevel@tonic-gate * 892 for the 3rd. If we "waste" 448 bytes in the first mblk, 4800Sstevel@tonic-gate * the XDR encoding will generate two ~1400 byte mblks, and the 4810Sstevel@tonic-gate * protocol module is more likely to produce properly sized 4820Sstevel@tonic-gate * segments. 4830Sstevel@tonic-gate */ 4840Sstevel@tonic-gate if ((mpsize >> 1) <= len) { 4850Sstevel@tonic-gate mp->b_rptr += (mpsize - len); 4860Sstevel@tonic-gate } 4870Sstevel@tonic-gate 4880Sstevel@tonic-gate /* 4890Sstevel@tonic-gate * Adjust b_rptr to reserve space for the non-data protocol 4900Sstevel@tonic-gate * headers that any downstream modules might like to add, and 4910Sstevel@tonic-gate * for the record marking header. 4920Sstevel@tonic-gate */ 4930Sstevel@tonic-gate mp->b_rptr += (MSG_OFFSET + RM_HDR_SIZE); 4940Sstevel@tonic-gate 4950Sstevel@tonic-gate XDR_SETPOS(xdrs, (uint_t)(mp->b_rptr - mp->b_datap->db_base)); 4960Sstevel@tonic-gate ASSERT(mp->b_wptr == mp->b_rptr); 4970Sstevel@tonic-gate 4980Sstevel@tonic-gate msg->rm_xid = clone_xprt->xp_xid; 4990Sstevel@tonic-gate 5000Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_XDR_REPLYMSG_START, 5010Sstevel@tonic-gate "xdr_replymsg_start:"); 5020Sstevel@tonic-gate if (!(xdr_replymsg(xdrs, msg) && 5030Sstevel@tonic-gate (!has_args || SVCAUTH_WRAP(&clone_xprt->xp_auth, xdrs, 5040Sstevel@tonic-gate xdr_results, xdr_location)))) { 5050Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_XDR_REPLYMSG_END, 5060Sstevel@tonic-gate "xdr_replymsg_end:(%S)", "bad"); 5070Sstevel@tonic-gate freemsg(mp); 5080Sstevel@tonic-gate RPCLOG0(1, "svc_cots_ksend: xdr_replymsg/SVCAUTH_WRAP " 5097208Svv149972 "failed\n"); 5100Sstevel@tonic-gate goto out; 5110Sstevel@tonic-gate } 5120Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_XDR_REPLYMSG_END, 5130Sstevel@tonic-gate "xdr_replymsg_end:(%S)", "good"); 5140Sstevel@tonic-gate } 5150Sstevel@tonic-gate 5160Sstevel@tonic-gate put(clone_xprt->xp_wq, mp); 5170Sstevel@tonic-gate retval = TRUE; 5180Sstevel@tonic-gate 5190Sstevel@tonic-gate out: 5200Sstevel@tonic-gate /* 5210Sstevel@tonic-gate * This is completely disgusting. If public is set it is 5220Sstevel@tonic-gate * a pointer to a structure whose first field is the address 5230Sstevel@tonic-gate * of the function to free that structure and any related 5240Sstevel@tonic-gate * stuff. (see rrokfree in nfs_xdr.c). 5250Sstevel@tonic-gate */ 5260Sstevel@tonic-gate if (xdrs->x_public) { 5270Sstevel@tonic-gate /* LINTED pointer alignment */ 5280Sstevel@tonic-gate (**((int (**)())xdrs->x_public))(xdrs->x_public); 5290Sstevel@tonic-gate } 5300Sstevel@tonic-gate 5310Sstevel@tonic-gate TRACE_1(TR_FAC_KRPC, TR_SVC_COTS_KSEND_END, 5320Sstevel@tonic-gate "svc_cots_ksend_end:(%S)", "done"); 5330Sstevel@tonic-gate return (retval); 5340Sstevel@tonic-gate } 5350Sstevel@tonic-gate 5360Sstevel@tonic-gate /* 5370Sstevel@tonic-gate * Deserialize arguments. 5380Sstevel@tonic-gate */ 5390Sstevel@tonic-gate static bool_t 5400Sstevel@tonic-gate svc_cots_kgetargs(SVCXPRT *clone_xprt, xdrproc_t xdr_args, 5410Sstevel@tonic-gate caddr_t args_ptr) 5420Sstevel@tonic-gate { 5430Sstevel@tonic-gate return (SVCAUTH_UNWRAP(&clone_xprt->xp_auth, &clone_xprt->xp_xdrin, 5440Sstevel@tonic-gate xdr_args, args_ptr)); 5450Sstevel@tonic-gate } 5460Sstevel@tonic-gate 5470Sstevel@tonic-gate static bool_t 5480Sstevel@tonic-gate svc_cots_kfreeargs(SVCXPRT *clone_xprt, xdrproc_t xdr_args, 5490Sstevel@tonic-gate caddr_t args_ptr) 5500Sstevel@tonic-gate { 5510Sstevel@tonic-gate cots_data_t *cd = (cots_data_t *)clone_xprt->xp_p2buf; 5520Sstevel@tonic-gate mblk_t *mp; 5530Sstevel@tonic-gate bool_t retval; 5540Sstevel@tonic-gate 5550Sstevel@tonic-gate /* 5560Sstevel@tonic-gate * It is important to call the XDR routine before 5570Sstevel@tonic-gate * freeing the request mblk. Structures in the 5580Sstevel@tonic-gate * XDR data may point into the mblk and require that 5590Sstevel@tonic-gate * the memory be intact during the free routine. 5600Sstevel@tonic-gate */ 5610Sstevel@tonic-gate if (args_ptr) { 5620Sstevel@tonic-gate /* LINTED pointer alignment */ 5630Sstevel@tonic-gate XDR *xdrs = &clone_xprt->xp_xdrin; 5640Sstevel@tonic-gate 5650Sstevel@tonic-gate xdrs->x_op = XDR_FREE; 5660Sstevel@tonic-gate retval = (*xdr_args)(xdrs, args_ptr); 5670Sstevel@tonic-gate } else 5680Sstevel@tonic-gate retval = TRUE; 5690Sstevel@tonic-gate 5700Sstevel@tonic-gate if ((mp = cd->cd_req_mp) != NULL) { 5710Sstevel@tonic-gate cd->cd_req_mp = (mblk_t *)0; 5720Sstevel@tonic-gate freemsg(mp); 5730Sstevel@tonic-gate } 5740Sstevel@tonic-gate 5750Sstevel@tonic-gate return (retval); 5760Sstevel@tonic-gate } 5770Sstevel@tonic-gate 5780Sstevel@tonic-gate static int32_t * 5790Sstevel@tonic-gate svc_cots_kgetres(SVCXPRT *clone_xprt, int size) 5800Sstevel@tonic-gate { 5810Sstevel@tonic-gate /* LINTED pointer alignment */ 5820Sstevel@tonic-gate cots_data_t *cd = (cots_data_t *)clone_xprt->xp_p2buf; 5830Sstevel@tonic-gate XDR *xdrs = &clone_xprt->xp_xdrout; 5840Sstevel@tonic-gate mblk_t *mp; 5850Sstevel@tonic-gate int32_t *buf; 5860Sstevel@tonic-gate struct rpc_msg rply; 5870Sstevel@tonic-gate int len; 5880Sstevel@tonic-gate int mpsize; 5890Sstevel@tonic-gate 5900Sstevel@tonic-gate /* 5910Sstevel@tonic-gate * Leave space for protocol headers. 5920Sstevel@tonic-gate */ 5930Sstevel@tonic-gate len = MSG_OFFSET + clone_xprt->xp_msg_size; 5940Sstevel@tonic-gate 5950Sstevel@tonic-gate /* 5960Sstevel@tonic-gate * Allocate an initial mblk for the response data. 5970Sstevel@tonic-gate */ 5980Sstevel@tonic-gate while ((mp = allocb(len, BPRI_LO)) == NULL) { 5990Sstevel@tonic-gate if (strwaitbuf(len, BPRI_LO)) 6000Sstevel@tonic-gate return (FALSE); 6010Sstevel@tonic-gate } 6020Sstevel@tonic-gate 6030Sstevel@tonic-gate /* 6040Sstevel@tonic-gate * Initialize the XDR decode stream. Additional mblks 6050Sstevel@tonic-gate * will be allocated if necessary. They will be TIDU 6060Sstevel@tonic-gate * sized. 6070Sstevel@tonic-gate */ 6080Sstevel@tonic-gate xdrmblk_init(xdrs, mp, XDR_ENCODE, clone_xprt->xp_msg_size); 6090Sstevel@tonic-gate mpsize = MBLKSIZE(mp); 6100Sstevel@tonic-gate ASSERT(mpsize >= len); 6110Sstevel@tonic-gate ASSERT(mp->b_rptr == mp->b_datap->db_base); 6120Sstevel@tonic-gate 6130Sstevel@tonic-gate /* 6140Sstevel@tonic-gate * If the size of mblk is not appreciably larger than what we 6150Sstevel@tonic-gate * asked, then resize the mblk to exactly len bytes. Reason for 6160Sstevel@tonic-gate * this: suppose len is 1600 bytes, the tidu is 1460 bytes 6170Sstevel@tonic-gate * (from TCP over ethernet), and the arguments to RPC require 6180Sstevel@tonic-gate * 2800 bytes. Ideally we want the protocol to render two 6190Sstevel@tonic-gate * ~1400 byte segments over the wire. If allocb() gives us a 2k 6200Sstevel@tonic-gate * mblk, and we allocate a second mblk for the rest, the 6210Sstevel@tonic-gate * protocol module may generate 3 segments over the wire: 6220Sstevel@tonic-gate * 1460 bytes for the first, 448 (2048 - 1600) for the 2nd, and 6230Sstevel@tonic-gate * 892 for the 3rd. If we "waste" 448 bytes in the first mblk, 6240Sstevel@tonic-gate * the XDR encoding will generate two ~1400 byte mblks, and the 6250Sstevel@tonic-gate * protocol module is more likely to produce properly sized 6260Sstevel@tonic-gate * segments. 6270Sstevel@tonic-gate */ 6280Sstevel@tonic-gate if ((mpsize >> 1) <= len) { 6290Sstevel@tonic-gate mp->b_rptr += (mpsize - len); 6300Sstevel@tonic-gate } 6310Sstevel@tonic-gate 6320Sstevel@tonic-gate /* 6330Sstevel@tonic-gate * Adjust b_rptr to reserve space for the non-data protocol 6340Sstevel@tonic-gate * headers that any downstream modules might like to add, and 6350Sstevel@tonic-gate * for the record marking header. 6360Sstevel@tonic-gate */ 6370Sstevel@tonic-gate mp->b_rptr += (MSG_OFFSET + RM_HDR_SIZE); 6380Sstevel@tonic-gate 6390Sstevel@tonic-gate XDR_SETPOS(xdrs, (uint_t)(mp->b_rptr - mp->b_datap->db_base)); 6400Sstevel@tonic-gate ASSERT(mp->b_wptr == mp->b_rptr); 6410Sstevel@tonic-gate 6420Sstevel@tonic-gate /* 6430Sstevel@tonic-gate * Assume a successful RPC since most of them are. 6440Sstevel@tonic-gate */ 6450Sstevel@tonic-gate rply.rm_xid = clone_xprt->xp_xid; 6460Sstevel@tonic-gate rply.rm_direction = REPLY; 6470Sstevel@tonic-gate rply.rm_reply.rp_stat = MSG_ACCEPTED; 6480Sstevel@tonic-gate rply.acpted_rply.ar_verf = clone_xprt->xp_verf; 6490Sstevel@tonic-gate rply.acpted_rply.ar_stat = SUCCESS; 6500Sstevel@tonic-gate 6510Sstevel@tonic-gate if (!xdr_replymsg_hdr(xdrs, &rply)) { 6520Sstevel@tonic-gate freeb(mp); 6530Sstevel@tonic-gate return (NULL); 6540Sstevel@tonic-gate } 6550Sstevel@tonic-gate 6560Sstevel@tonic-gate 6570Sstevel@tonic-gate buf = XDR_INLINE(xdrs, size); 6580Sstevel@tonic-gate if (buf == NULL) { 6590Sstevel@tonic-gate ASSERT(cd->cd_mp == NULL); 6600Sstevel@tonic-gate freemsg(mp); 6610Sstevel@tonic-gate } else { 6620Sstevel@tonic-gate cd->cd_mp = mp; 6630Sstevel@tonic-gate } 6640Sstevel@tonic-gate return (buf); 6650Sstevel@tonic-gate } 6660Sstevel@tonic-gate 6670Sstevel@tonic-gate static void 6680Sstevel@tonic-gate svc_cots_kfreeres(SVCXPRT *clone_xprt) 6690Sstevel@tonic-gate { 6700Sstevel@tonic-gate cots_data_t *cd; 6710Sstevel@tonic-gate mblk_t *mp; 6720Sstevel@tonic-gate 6730Sstevel@tonic-gate cd = (cots_data_t *)clone_xprt->xp_p2buf; 6740Sstevel@tonic-gate if ((mp = cd->cd_mp) != NULL) { 6750Sstevel@tonic-gate cd->cd_mp = (mblk_t *)NULL; 6760Sstevel@tonic-gate freemsg(mp); 6770Sstevel@tonic-gate } 6780Sstevel@tonic-gate } 6790Sstevel@tonic-gate 6800Sstevel@tonic-gate /* 6810Sstevel@tonic-gate * the dup cacheing routines below provide a cache of non-failure 6820Sstevel@tonic-gate * transaction id's. rpc service routines can use this to detect 6830Sstevel@tonic-gate * retransmissions and re-send a non-failure response. 6840Sstevel@tonic-gate */ 6850Sstevel@tonic-gate 6860Sstevel@tonic-gate /* 6870Sstevel@tonic-gate * MAXDUPREQS is the number of cached items. It should be adjusted 6880Sstevel@tonic-gate * to the service load so that there is likely to be a response entry 6890Sstevel@tonic-gate * when the first retransmission comes in. 6900Sstevel@tonic-gate */ 6910Sstevel@tonic-gate #define MAXDUPREQS 1024 6920Sstevel@tonic-gate 6930Sstevel@tonic-gate /* 6940Sstevel@tonic-gate * This should be appropriately scaled to MAXDUPREQS. 6950Sstevel@tonic-gate */ 6960Sstevel@tonic-gate #define DRHASHSZ 257 6970Sstevel@tonic-gate 6980Sstevel@tonic-gate #if ((DRHASHSZ & (DRHASHSZ - 1)) == 0) 6990Sstevel@tonic-gate #define XIDHASH(xid) ((xid) & (DRHASHSZ - 1)) 7000Sstevel@tonic-gate #else 7010Sstevel@tonic-gate #define XIDHASH(xid) ((xid) % DRHASHSZ) 7020Sstevel@tonic-gate #endif 7030Sstevel@tonic-gate #define DRHASH(dr) XIDHASH((dr)->dr_xid) 7040Sstevel@tonic-gate #define REQTOXID(req) ((req)->rq_xprt->xp_xid) 7050Sstevel@tonic-gate 7060Sstevel@tonic-gate static int cotsndupreqs = 0; 7077803Sgt29601@anthrax int cotsmaxdupreqs = MAXDUPREQS; 7080Sstevel@tonic-gate static kmutex_t cotsdupreq_lock; 7090Sstevel@tonic-gate static struct dupreq *cotsdrhashtbl[DRHASHSZ]; 7100Sstevel@tonic-gate static int cotsdrhashstat[DRHASHSZ]; 7110Sstevel@tonic-gate 7120Sstevel@tonic-gate static void unhash(struct dupreq *); 7130Sstevel@tonic-gate 7140Sstevel@tonic-gate /* 7150Sstevel@tonic-gate * cotsdrmru points to the head of a circular linked list in lru order. 7160Sstevel@tonic-gate * cotsdrmru->dr_next == drlru 7170Sstevel@tonic-gate */ 7180Sstevel@tonic-gate struct dupreq *cotsdrmru; 7190Sstevel@tonic-gate 7200Sstevel@tonic-gate /* 7210Sstevel@tonic-gate * PSARC 2003/523 Contract Private Interface 7220Sstevel@tonic-gate * svc_cots_kdup 7230Sstevel@tonic-gate * Changes must be reviewed by Solaris File Sharing 7240Sstevel@tonic-gate * Changes must be communicated to contract-2003-523@sun.com 7250Sstevel@tonic-gate * 7260Sstevel@tonic-gate * svc_cots_kdup searches the request cache and returns 0 if the 7270Sstevel@tonic-gate * request is not found in the cache. If it is found, then it 7280Sstevel@tonic-gate * returns the state of the request (in progress or done) and 7290Sstevel@tonic-gate * the status or attributes that were part of the original reply. 7300Sstevel@tonic-gate * 7310Sstevel@tonic-gate * If DUP_DONE (there is a duplicate) svc_cots_kdup copies over the 7320Sstevel@tonic-gate * value of the response. In that case, also return in *dupcachedp 7330Sstevel@tonic-gate * whether the response free routine is cached in the dupreq - in which case 7340Sstevel@tonic-gate * the caller should not be freeing it, because it will be done later 7350Sstevel@tonic-gate * in the svc_cots_kdup code when the dupreq is reused. 7360Sstevel@tonic-gate */ 7370Sstevel@tonic-gate static int 7380Sstevel@tonic-gate svc_cots_kdup(struct svc_req *req, caddr_t res, int size, struct dupreq **drpp, 7390Sstevel@tonic-gate bool_t *dupcachedp) 7400Sstevel@tonic-gate { 7410Sstevel@tonic-gate struct rpc_cots_server *stats = CLONE2STATS(req->rq_xprt); 7420Sstevel@tonic-gate struct dupreq *dr; 7430Sstevel@tonic-gate uint32_t xid; 7440Sstevel@tonic-gate uint32_t drhash; 7450Sstevel@tonic-gate int status; 7460Sstevel@tonic-gate 7470Sstevel@tonic-gate xid = REQTOXID(req); 7480Sstevel@tonic-gate mutex_enter(&cotsdupreq_lock); 7490Sstevel@tonic-gate RSSTAT_INCR(stats, rsdupchecks); 7500Sstevel@tonic-gate /* 7510Sstevel@tonic-gate * Check to see whether an entry already exists in the cache. 7520Sstevel@tonic-gate */ 7530Sstevel@tonic-gate dr = cotsdrhashtbl[XIDHASH(xid)]; 7540Sstevel@tonic-gate while (dr != NULL) { 7550Sstevel@tonic-gate if (dr->dr_xid == xid && 7560Sstevel@tonic-gate dr->dr_proc == req->rq_proc && 7570Sstevel@tonic-gate dr->dr_prog == req->rq_prog && 7580Sstevel@tonic-gate dr->dr_vers == req->rq_vers && 7590Sstevel@tonic-gate dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len && 7600Sstevel@tonic-gate bcmp((caddr_t)dr->dr_addr.buf, 7610Sstevel@tonic-gate (caddr_t)req->rq_xprt->xp_rtaddr.buf, 7620Sstevel@tonic-gate dr->dr_addr.len) == 0) { 7630Sstevel@tonic-gate status = dr->dr_status; 7640Sstevel@tonic-gate if (status == DUP_DONE) { 7650Sstevel@tonic-gate bcopy(dr->dr_resp.buf, res, size); 7660Sstevel@tonic-gate if (dupcachedp != NULL) 7670Sstevel@tonic-gate *dupcachedp = (dr->dr_resfree != NULL); 7680Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, TR_SVC_COTS_KDUP_DONE, 7690Sstevel@tonic-gate "svc_cots_kdup: DUP_DONE"); 7700Sstevel@tonic-gate } else { 7710Sstevel@tonic-gate dr->dr_status = DUP_INPROGRESS; 7720Sstevel@tonic-gate *drpp = dr; 7730Sstevel@tonic-gate TRACE_0(TR_FAC_KRPC, 7740Sstevel@tonic-gate TR_SVC_COTS_KDUP_INPROGRESS, 7750Sstevel@tonic-gate "svc_cots_kdup: DUP_INPROGRESS"); 7760Sstevel@tonic-gate } 7770Sstevel@tonic-gate RSSTAT_INCR(stats, rsdupreqs); 7780Sstevel@tonic-gate mutex_exit(&cotsdupreq_lock); 7790Sstevel@tonic-gate return (status); 7800Sstevel@tonic-gate } 7810Sstevel@tonic-gate dr = dr->dr_chain; 7820Sstevel@tonic-gate } 7830Sstevel@tonic-gate 7840Sstevel@tonic-gate /* 7850Sstevel@tonic-gate * There wasn't an entry, either allocate a new one or recycle 7860Sstevel@tonic-gate * an old one. 7870Sstevel@tonic-gate */ 7880Sstevel@tonic-gate if (cotsndupreqs < cotsmaxdupreqs) { 7890Sstevel@tonic-gate dr = kmem_alloc(sizeof (*dr), KM_NOSLEEP); 7900Sstevel@tonic-gate if (dr == NULL) { 7910Sstevel@tonic-gate mutex_exit(&cotsdupreq_lock); 7920Sstevel@tonic-gate return (DUP_ERROR); 7930Sstevel@tonic-gate } 7940Sstevel@tonic-gate dr->dr_resp.buf = NULL; 7950Sstevel@tonic-gate dr->dr_resp.maxlen = 0; 7960Sstevel@tonic-gate dr->dr_addr.buf = NULL; 7970Sstevel@tonic-gate dr->dr_addr.maxlen = 0; 7980Sstevel@tonic-gate if (cotsdrmru) { 7990Sstevel@tonic-gate dr->dr_next = cotsdrmru->dr_next; 8000Sstevel@tonic-gate cotsdrmru->dr_next = dr; 8010Sstevel@tonic-gate } else { 8020Sstevel@tonic-gate dr->dr_next = dr; 8030Sstevel@tonic-gate } 8040Sstevel@tonic-gate cotsndupreqs++; 8050Sstevel@tonic-gate } else { 8060Sstevel@tonic-gate dr = cotsdrmru->dr_next; 8070Sstevel@tonic-gate while (dr->dr_status == DUP_INPROGRESS) { 8080Sstevel@tonic-gate dr = dr->dr_next; 8090Sstevel@tonic-gate if (dr == cotsdrmru->dr_next) { 8100Sstevel@tonic-gate cmn_err(CE_WARN, "svc_cots_kdup no slots free"); 8110Sstevel@tonic-gate mutex_exit(&cotsdupreq_lock); 8120Sstevel@tonic-gate return (DUP_ERROR); 8130Sstevel@tonic-gate } 8140Sstevel@tonic-gate } 8150Sstevel@tonic-gate unhash(dr); 8160Sstevel@tonic-gate if (dr->dr_resfree) { 8170Sstevel@tonic-gate (*dr->dr_resfree)(dr->dr_resp.buf); 8180Sstevel@tonic-gate } 8190Sstevel@tonic-gate } 8200Sstevel@tonic-gate dr->dr_resfree = NULL; 8210Sstevel@tonic-gate cotsdrmru = dr; 8220Sstevel@tonic-gate 8230Sstevel@tonic-gate dr->dr_xid = REQTOXID(req); 8240Sstevel@tonic-gate dr->dr_prog = req->rq_prog; 8250Sstevel@tonic-gate dr->dr_vers = req->rq_vers; 8260Sstevel@tonic-gate dr->dr_proc = req->rq_proc; 8270Sstevel@tonic-gate if (dr->dr_addr.maxlen < req->rq_xprt->xp_rtaddr.len) { 8280Sstevel@tonic-gate if (dr->dr_addr.buf != NULL) 8290Sstevel@tonic-gate kmem_free(dr->dr_addr.buf, dr->dr_addr.maxlen); 8300Sstevel@tonic-gate dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len; 8310Sstevel@tonic-gate dr->dr_addr.buf = kmem_alloc(dr->dr_addr.maxlen, KM_NOSLEEP); 8320Sstevel@tonic-gate if (dr->dr_addr.buf == NULL) { 8330Sstevel@tonic-gate dr->dr_addr.maxlen = 0; 8340Sstevel@tonic-gate dr->dr_status = DUP_DROP; 8350Sstevel@tonic-gate mutex_exit(&cotsdupreq_lock); 8360Sstevel@tonic-gate return (DUP_ERROR); 8370Sstevel@tonic-gate } 8380Sstevel@tonic-gate } 8390Sstevel@tonic-gate dr->dr_addr.len = req->rq_xprt->xp_rtaddr.len; 8400Sstevel@tonic-gate bcopy(req->rq_xprt->xp_rtaddr.buf, dr->dr_addr.buf, dr->dr_addr.len); 8410Sstevel@tonic-gate if (dr->dr_resp.maxlen < size) { 8420Sstevel@tonic-gate if (dr->dr_resp.buf != NULL) 8430Sstevel@tonic-gate kmem_free(dr->dr_resp.buf, dr->dr_resp.maxlen); 8440Sstevel@tonic-gate dr->dr_resp.maxlen = (unsigned int)size; 8450Sstevel@tonic-gate dr->dr_resp.buf = kmem_alloc(size, KM_NOSLEEP); 8460Sstevel@tonic-gate if (dr->dr_resp.buf == NULL) { 8470Sstevel@tonic-gate dr->dr_resp.maxlen = 0; 8480Sstevel@tonic-gate dr->dr_status = DUP_DROP; 8490Sstevel@tonic-gate mutex_exit(&cotsdupreq_lock); 8500Sstevel@tonic-gate return (DUP_ERROR); 8510Sstevel@tonic-gate } 8520Sstevel@tonic-gate } 8530Sstevel@tonic-gate dr->dr_status = DUP_INPROGRESS; 8540Sstevel@tonic-gate 8550Sstevel@tonic-gate drhash = (uint32_t)DRHASH(dr); 8560Sstevel@tonic-gate dr->dr_chain = cotsdrhashtbl[drhash]; 8570Sstevel@tonic-gate cotsdrhashtbl[drhash] = dr; 8580Sstevel@tonic-gate cotsdrhashstat[drhash]++; 8590Sstevel@tonic-gate mutex_exit(&cotsdupreq_lock); 8600Sstevel@tonic-gate *drpp = dr; 8610Sstevel@tonic-gate return (DUP_NEW); 8620Sstevel@tonic-gate } 8630Sstevel@tonic-gate 8640Sstevel@tonic-gate /* 8650Sstevel@tonic-gate * PSARC 2003/523 Contract Private Interface 8660Sstevel@tonic-gate * svc_cots_kdupdone 8670Sstevel@tonic-gate * Changes must be reviewed by Solaris File Sharing 8680Sstevel@tonic-gate * Changes must be communicated to contract-2003-523@sun.com 8690Sstevel@tonic-gate * 8700Sstevel@tonic-gate * svc_cots_kdupdone marks the request done (DUP_DONE or DUP_DROP) 8710Sstevel@tonic-gate * and stores the response. 8720Sstevel@tonic-gate */ 8730Sstevel@tonic-gate static void 8740Sstevel@tonic-gate svc_cots_kdupdone(struct dupreq *dr, caddr_t res, void (*dis_resfree)(), 8750Sstevel@tonic-gate int size, int status) 8760Sstevel@tonic-gate { 8770Sstevel@tonic-gate ASSERT(dr->dr_resfree == NULL); 8780Sstevel@tonic-gate if (status == DUP_DONE) { 8790Sstevel@tonic-gate bcopy(res, dr->dr_resp.buf, size); 8800Sstevel@tonic-gate dr->dr_resfree = dis_resfree; 8810Sstevel@tonic-gate } 8820Sstevel@tonic-gate dr->dr_status = status; 8830Sstevel@tonic-gate } 8840Sstevel@tonic-gate 8850Sstevel@tonic-gate /* 8860Sstevel@tonic-gate * This routine expects that the mutex, cotsdupreq_lock, is already held. 8870Sstevel@tonic-gate */ 8880Sstevel@tonic-gate static void 8890Sstevel@tonic-gate unhash(struct dupreq *dr) 8900Sstevel@tonic-gate { 8910Sstevel@tonic-gate struct dupreq *drt; 8920Sstevel@tonic-gate struct dupreq *drtprev = NULL; 8930Sstevel@tonic-gate uint32_t drhash; 8940Sstevel@tonic-gate 8950Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cotsdupreq_lock)); 8960Sstevel@tonic-gate 8970Sstevel@tonic-gate drhash = (uint32_t)DRHASH(dr); 8980Sstevel@tonic-gate drt = cotsdrhashtbl[drhash]; 8990Sstevel@tonic-gate while (drt != NULL) { 9000Sstevel@tonic-gate if (drt == dr) { 9010Sstevel@tonic-gate cotsdrhashstat[drhash]--; 9020Sstevel@tonic-gate if (drtprev == NULL) { 9030Sstevel@tonic-gate cotsdrhashtbl[drhash] = drt->dr_chain; 9040Sstevel@tonic-gate } else { 9050Sstevel@tonic-gate drtprev->dr_chain = drt->dr_chain; 9060Sstevel@tonic-gate } 9070Sstevel@tonic-gate return; 9080Sstevel@tonic-gate } 9090Sstevel@tonic-gate drtprev = drt; 9100Sstevel@tonic-gate drt = drt->dr_chain; 9110Sstevel@tonic-gate } 9120Sstevel@tonic-gate } 9130Sstevel@tonic-gate 9140Sstevel@tonic-gate void 9150Sstevel@tonic-gate svc_cots_stats_init(zoneid_t zoneid, struct rpc_cots_server **statsp) 9160Sstevel@tonic-gate { 9170Sstevel@tonic-gate *statsp = (struct rpc_cots_server *)rpcstat_zone_init_common(zoneid, 9180Sstevel@tonic-gate "unix", "rpc_cots_server", (const kstat_named_t *)&cots_rsstat_tmpl, 9190Sstevel@tonic-gate sizeof (cots_rsstat_tmpl)); 9200Sstevel@tonic-gate } 9210Sstevel@tonic-gate 9220Sstevel@tonic-gate void 9230Sstevel@tonic-gate svc_cots_stats_fini(zoneid_t zoneid, struct rpc_cots_server **statsp) 9240Sstevel@tonic-gate { 9250Sstevel@tonic-gate rpcstat_zone_fini_common(zoneid, "unix", "rpc_cots_server"); 9260Sstevel@tonic-gate kmem_free(*statsp, sizeof (cots_rsstat_tmpl)); 9270Sstevel@tonic-gate } 9280Sstevel@tonic-gate 9290Sstevel@tonic-gate void 9300Sstevel@tonic-gate svc_cots_init(void) 9310Sstevel@tonic-gate { 9320Sstevel@tonic-gate /* 9330Sstevel@tonic-gate * Check to make sure that the cots private data will fit into 9340Sstevel@tonic-gate * the stack buffer allocated by svc_run. The ASSERT is a safety 9350Sstevel@tonic-gate * net if the cots_data_t structure ever changes. 9360Sstevel@tonic-gate */ 9370Sstevel@tonic-gate /*CONSTANTCONDITION*/ 9380Sstevel@tonic-gate ASSERT(sizeof (cots_data_t) <= SVC_P2LEN); 9390Sstevel@tonic-gate 9400Sstevel@tonic-gate mutex_init(&cots_kcreate_lock, NULL, MUTEX_DEFAULT, NULL); 9410Sstevel@tonic-gate mutex_init(&cotsdupreq_lock, NULL, MUTEX_DEFAULT, NULL); 9420Sstevel@tonic-gate } 943