10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*7387SRobert.Gordon@Sun.COM * Common Development and Distribution License (the "License"). 6*7387SRobert.Gordon@Sun.COM * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*7387SRobert.Gordon@Sun.COM * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */ 260Sstevel@tonic-gate /* All Rights Reserved */ 270Sstevel@tonic-gate /* 280Sstevel@tonic-gate * Portions of this source code were derived from Berkeley 290Sstevel@tonic-gate * 4.3 BSD under license from the Regents of the University of 300Sstevel@tonic-gate * California. 310Sstevel@tonic-gate */ 320Sstevel@tonic-gate 330Sstevel@tonic-gate /* 340Sstevel@tonic-gate * Server side of RPC over RDMA in the kernel. 350Sstevel@tonic-gate */ 360Sstevel@tonic-gate 370Sstevel@tonic-gate #include <sys/param.h> 380Sstevel@tonic-gate #include <sys/types.h> 390Sstevel@tonic-gate #include <sys/user.h> 400Sstevel@tonic-gate #include <sys/sysmacros.h> 410Sstevel@tonic-gate #include <sys/proc.h> 420Sstevel@tonic-gate #include <sys/file.h> 430Sstevel@tonic-gate #include <sys/errno.h> 440Sstevel@tonic-gate #include <sys/kmem.h> 450Sstevel@tonic-gate #include <sys/debug.h> 460Sstevel@tonic-gate #include <sys/systm.h> 470Sstevel@tonic-gate #include <sys/cmn_err.h> 480Sstevel@tonic-gate #include <sys/kstat.h> 490Sstevel@tonic-gate #include <sys/vtrace.h> 500Sstevel@tonic-gate #include <sys/debug.h> 510Sstevel@tonic-gate 520Sstevel@tonic-gate #include <rpc/types.h> 530Sstevel@tonic-gate #include <rpc/xdr.h> 540Sstevel@tonic-gate #include <rpc/auth.h> 550Sstevel@tonic-gate #include <rpc/clnt.h> 560Sstevel@tonic-gate #include <rpc/rpc_msg.h> 570Sstevel@tonic-gate #include <rpc/svc.h> 580Sstevel@tonic-gate #include <rpc/rpc_rdma.h> 590Sstevel@tonic-gate #include <sys/ddi.h> 600Sstevel@tonic-gate #include <sys/sunddi.h> 610Sstevel@tonic-gate 620Sstevel@tonic-gate #include <inet/common.h> 630Sstevel@tonic-gate #include <inet/ip.h> 640Sstevel@tonic-gate #include <inet/ip6.h> 650Sstevel@tonic-gate 66*7387SRobert.Gordon@Sun.COM #include <nfs/nfs.h> 67*7387SRobert.Gordon@Sun.COM #include <sys/sdt.h> 68*7387SRobert.Gordon@Sun.COM 69*7387SRobert.Gordon@Sun.COM #define SVC_RDMA_SUCCESS 0 70*7387SRobert.Gordon@Sun.COM #define SVC_RDMA_FAIL -1 71*7387SRobert.Gordon@Sun.COM 72*7387SRobert.Gordon@Sun.COM #define SVC_CREDIT_FACTOR (0.5) 73*7387SRobert.Gordon@Sun.COM 74*7387SRobert.Gordon@Sun.COM #define MSG_IS_RPCSEC_GSS(msg) \ 75*7387SRobert.Gordon@Sun.COM ((msg)->rm_reply.rp_acpt.ar_verf.oa_flavor == RPCSEC_GSS) 76*7387SRobert.Gordon@Sun.COM 77*7387SRobert.Gordon@Sun.COM 78*7387SRobert.Gordon@Sun.COM uint32_t rdma_bufs_granted = RDMA_BUFS_GRANT; 79*7387SRobert.Gordon@Sun.COM 800Sstevel@tonic-gate /* 810Sstevel@tonic-gate * RDMA transport specific data associated with SVCMASTERXPRT 820Sstevel@tonic-gate */ 830Sstevel@tonic-gate struct rdma_data { 840Sstevel@tonic-gate SVCMASTERXPRT *rd_xprt; /* back ptr to SVCMASTERXPRT */ 850Sstevel@tonic-gate struct rdma_svc_data rd_data; /* rdma data */ 860Sstevel@tonic-gate rdma_mod_t *r_mod; /* RDMA module containing ops ptr */ 870Sstevel@tonic-gate }; 880Sstevel@tonic-gate 890Sstevel@tonic-gate /* 900Sstevel@tonic-gate * Plugin connection specific data stashed away in clone SVCXPRT 910Sstevel@tonic-gate */ 920Sstevel@tonic-gate struct clone_rdma_data { 930Sstevel@tonic-gate CONN *conn; /* RDMA connection */ 940Sstevel@tonic-gate rdma_buf_t rpcbuf; /* RPC req/resp buffer */ 95*7387SRobert.Gordon@Sun.COM struct clist *cl_reply; /* reply chunk buffer info */ 96*7387SRobert.Gordon@Sun.COM struct clist *cl_wlist; /* write list clist */ 970Sstevel@tonic-gate }; 980Sstevel@tonic-gate 990Sstevel@tonic-gate #define MAXADDRLEN 128 /* max length for address mask */ 1000Sstevel@tonic-gate 1010Sstevel@tonic-gate /* 1020Sstevel@tonic-gate * Routines exported through ops vector. 1030Sstevel@tonic-gate */ 1040Sstevel@tonic-gate static bool_t svc_rdma_krecv(SVCXPRT *, mblk_t *, struct rpc_msg *); 1050Sstevel@tonic-gate static bool_t svc_rdma_ksend(SVCXPRT *, struct rpc_msg *); 1060Sstevel@tonic-gate static bool_t svc_rdma_kgetargs(SVCXPRT *, xdrproc_t, caddr_t); 1070Sstevel@tonic-gate static bool_t svc_rdma_kfreeargs(SVCXPRT *, xdrproc_t, caddr_t); 1080Sstevel@tonic-gate void svc_rdma_kdestroy(SVCMASTERXPRT *); 1090Sstevel@tonic-gate static int svc_rdma_kdup(struct svc_req *, caddr_t, int, 1100Sstevel@tonic-gate struct dupreq **, bool_t *); 1110Sstevel@tonic-gate static void svc_rdma_kdupdone(struct dupreq *, caddr_t, 1120Sstevel@tonic-gate void (*)(), int, int); 1130Sstevel@tonic-gate static int32_t *svc_rdma_kgetres(SVCXPRT *, int); 1140Sstevel@tonic-gate static void svc_rdma_kfreeres(SVCXPRT *); 1150Sstevel@tonic-gate static void svc_rdma_kclone_destroy(SVCXPRT *); 1160Sstevel@tonic-gate static void svc_rdma_kstart(SVCMASTERXPRT *); 1170Sstevel@tonic-gate void svc_rdma_kstop(SVCMASTERXPRT *); 1180Sstevel@tonic-gate 119*7387SRobert.Gordon@Sun.COM static int svc_process_long_reply(SVCXPRT *, xdrproc_t, 120*7387SRobert.Gordon@Sun.COM caddr_t, struct rpc_msg *, bool_t, int *, 121*7387SRobert.Gordon@Sun.COM int *, int *, unsigned int *); 122*7387SRobert.Gordon@Sun.COM 123*7387SRobert.Gordon@Sun.COM static int svc_compose_rpcmsg(SVCXPRT *, CONN *, xdrproc_t, 124*7387SRobert.Gordon@Sun.COM caddr_t, rdma_buf_t *, XDR **, struct rpc_msg *, 125*7387SRobert.Gordon@Sun.COM bool_t, uint_t *); 126*7387SRobert.Gordon@Sun.COM static bool_t rpcmsg_length(xdrproc_t, 127*7387SRobert.Gordon@Sun.COM caddr_t, 128*7387SRobert.Gordon@Sun.COM struct rpc_msg *, bool_t, int); 129*7387SRobert.Gordon@Sun.COM 1300Sstevel@tonic-gate /* 1310Sstevel@tonic-gate * Server transport operations vector. 1320Sstevel@tonic-gate */ 1330Sstevel@tonic-gate struct svc_ops rdma_svc_ops = { 1340Sstevel@tonic-gate svc_rdma_krecv, /* Get requests */ 1350Sstevel@tonic-gate svc_rdma_kgetargs, /* Deserialize arguments */ 1360Sstevel@tonic-gate svc_rdma_ksend, /* Send reply */ 1370Sstevel@tonic-gate svc_rdma_kfreeargs, /* Free argument data space */ 1380Sstevel@tonic-gate svc_rdma_kdestroy, /* Destroy transport handle */ 1390Sstevel@tonic-gate svc_rdma_kdup, /* Check entry in dup req cache */ 1400Sstevel@tonic-gate svc_rdma_kdupdone, /* Mark entry in dup req cache as done */ 1410Sstevel@tonic-gate svc_rdma_kgetres, /* Get pointer to response buffer */ 1420Sstevel@tonic-gate svc_rdma_kfreeres, /* Destroy pre-serialized response header */ 1430Sstevel@tonic-gate svc_rdma_kclone_destroy, /* Destroy a clone xprt */ 1440Sstevel@tonic-gate svc_rdma_kstart /* Tell `ready-to-receive' to rpcmod */ 1450Sstevel@tonic-gate }; 1460Sstevel@tonic-gate 1470Sstevel@tonic-gate /* 1480Sstevel@tonic-gate * Server statistics 1490Sstevel@tonic-gate * NOTE: This structure type is duplicated in the NFS fast path. 1500Sstevel@tonic-gate */ 1510Sstevel@tonic-gate struct { 1520Sstevel@tonic-gate kstat_named_t rscalls; 1530Sstevel@tonic-gate kstat_named_t rsbadcalls; 1540Sstevel@tonic-gate kstat_named_t rsnullrecv; 1550Sstevel@tonic-gate kstat_named_t rsbadlen; 1560Sstevel@tonic-gate kstat_named_t rsxdrcall; 1570Sstevel@tonic-gate kstat_named_t rsdupchecks; 1580Sstevel@tonic-gate kstat_named_t rsdupreqs; 1590Sstevel@tonic-gate kstat_named_t rslongrpcs; 160*7387SRobert.Gordon@Sun.COM kstat_named_t rstotalreplies; 161*7387SRobert.Gordon@Sun.COM kstat_named_t rstotallongreplies; 162*7387SRobert.Gordon@Sun.COM kstat_named_t rstotalinlinereplies; 1630Sstevel@tonic-gate } rdmarsstat = { 1640Sstevel@tonic-gate { "calls", KSTAT_DATA_UINT64 }, 1650Sstevel@tonic-gate { "badcalls", KSTAT_DATA_UINT64 }, 1660Sstevel@tonic-gate { "nullrecv", KSTAT_DATA_UINT64 }, 1670Sstevel@tonic-gate { "badlen", KSTAT_DATA_UINT64 }, 1680Sstevel@tonic-gate { "xdrcall", KSTAT_DATA_UINT64 }, 1690Sstevel@tonic-gate { "dupchecks", KSTAT_DATA_UINT64 }, 1700Sstevel@tonic-gate { "dupreqs", KSTAT_DATA_UINT64 }, 171*7387SRobert.Gordon@Sun.COM { "longrpcs", KSTAT_DATA_UINT64 }, 172*7387SRobert.Gordon@Sun.COM { "totalreplies", KSTAT_DATA_UINT64 }, 173*7387SRobert.Gordon@Sun.COM { "totallongreplies", KSTAT_DATA_UINT64 }, 174*7387SRobert.Gordon@Sun.COM { "totalinlinereplies", KSTAT_DATA_UINT64 }, 1750Sstevel@tonic-gate }; 1760Sstevel@tonic-gate 1770Sstevel@tonic-gate kstat_named_t *rdmarsstat_ptr = (kstat_named_t *)&rdmarsstat; 1780Sstevel@tonic-gate uint_t rdmarsstat_ndata = sizeof (rdmarsstat) / sizeof (kstat_named_t); 1790Sstevel@tonic-gate 180*7387SRobert.Gordon@Sun.COM #define RSSTAT_INCR(x) atomic_add_64(&rdmarsstat.x.value.ui64, 1) 1810Sstevel@tonic-gate /* 1820Sstevel@tonic-gate * Create a transport record. 1830Sstevel@tonic-gate * The transport record, output buffer, and private data structure 1840Sstevel@tonic-gate * are allocated. The output buffer is serialized into using xdrmem. 1850Sstevel@tonic-gate * There is one transport record per user process which implements a 1860Sstevel@tonic-gate * set of services. 1870Sstevel@tonic-gate */ 1880Sstevel@tonic-gate /* ARGSUSED */ 1890Sstevel@tonic-gate int 1900Sstevel@tonic-gate svc_rdma_kcreate(char *netid, SVC_CALLOUT_TABLE *sct, int id, 191*7387SRobert.Gordon@Sun.COM rdma_xprt_group_t *started_xprts) 1920Sstevel@tonic-gate { 1930Sstevel@tonic-gate int error; 1940Sstevel@tonic-gate SVCMASTERXPRT *xprt; 1950Sstevel@tonic-gate struct rdma_data *rd; 1960Sstevel@tonic-gate rdma_registry_t *rmod; 1970Sstevel@tonic-gate rdma_xprt_record_t *xprt_rec; 1980Sstevel@tonic-gate queue_t *q; 1990Sstevel@tonic-gate /* 2000Sstevel@tonic-gate * modload the RDMA plugins is not already done. 2010Sstevel@tonic-gate */ 2020Sstevel@tonic-gate if (!rdma_modloaded) { 203*7387SRobert.Gordon@Sun.COM /*CONSTANTCONDITION*/ 204*7387SRobert.Gordon@Sun.COM ASSERT(sizeof (struct clone_rdma_data) <= SVC_P2LEN); 205*7387SRobert.Gordon@Sun.COM 2060Sstevel@tonic-gate mutex_enter(&rdma_modload_lock); 2070Sstevel@tonic-gate if (!rdma_modloaded) { 2080Sstevel@tonic-gate error = rdma_modload(); 2090Sstevel@tonic-gate } 2100Sstevel@tonic-gate mutex_exit(&rdma_modload_lock); 2110Sstevel@tonic-gate 2120Sstevel@tonic-gate if (error) 2130Sstevel@tonic-gate return (error); 2140Sstevel@tonic-gate } 2150Sstevel@tonic-gate 2160Sstevel@tonic-gate /* 2170Sstevel@tonic-gate * master_xprt_count is the count of master transport handles 2180Sstevel@tonic-gate * that were successfully created and are ready to recieve for 2190Sstevel@tonic-gate * RDMA based access. 2200Sstevel@tonic-gate */ 2210Sstevel@tonic-gate error = 0; 2220Sstevel@tonic-gate xprt_rec = NULL; 2230Sstevel@tonic-gate rw_enter(&rdma_lock, RW_READER); 2240Sstevel@tonic-gate if (rdma_mod_head == NULL) { 2250Sstevel@tonic-gate started_xprts->rtg_count = 0; 2260Sstevel@tonic-gate rw_exit(&rdma_lock); 2270Sstevel@tonic-gate if (rdma_dev_available) 2280Sstevel@tonic-gate return (EPROTONOSUPPORT); 2290Sstevel@tonic-gate else 2300Sstevel@tonic-gate return (ENODEV); 2310Sstevel@tonic-gate } 2320Sstevel@tonic-gate 2330Sstevel@tonic-gate /* 2340Sstevel@tonic-gate * If we have reached here, then atleast one RDMA plugin has loaded. 2350Sstevel@tonic-gate * Create a master_xprt, make it start listenining on the device, 2360Sstevel@tonic-gate * if an error is generated, record it, we might need to shut 2370Sstevel@tonic-gate * the master_xprt. 2380Sstevel@tonic-gate * SVC_START() calls svc_rdma_kstart which calls plugin binding 2390Sstevel@tonic-gate * routines. 2400Sstevel@tonic-gate */ 2410Sstevel@tonic-gate for (rmod = rdma_mod_head; rmod != NULL; rmod = rmod->r_next) { 2420Sstevel@tonic-gate 2430Sstevel@tonic-gate /* 2440Sstevel@tonic-gate * One SVCMASTERXPRT per RDMA plugin. 2450Sstevel@tonic-gate */ 2460Sstevel@tonic-gate xprt = kmem_zalloc(sizeof (*xprt), KM_SLEEP); 2470Sstevel@tonic-gate xprt->xp_ops = &rdma_svc_ops; 2480Sstevel@tonic-gate xprt->xp_sct = sct; 2490Sstevel@tonic-gate xprt->xp_type = T_RDMA; 2500Sstevel@tonic-gate mutex_init(&xprt->xp_req_lock, NULL, MUTEX_DEFAULT, NULL); 2510Sstevel@tonic-gate mutex_init(&xprt->xp_thread_lock, NULL, MUTEX_DEFAULT, NULL); 2520Sstevel@tonic-gate xprt->xp_req_head = (mblk_t *)0; 2530Sstevel@tonic-gate xprt->xp_req_tail = (mblk_t *)0; 2540Sstevel@tonic-gate xprt->xp_threads = 0; 2550Sstevel@tonic-gate xprt->xp_detached_threads = 0; 2560Sstevel@tonic-gate 2570Sstevel@tonic-gate rd = kmem_zalloc(sizeof (*rd), KM_SLEEP); 2580Sstevel@tonic-gate xprt->xp_p2 = (caddr_t)rd; 2590Sstevel@tonic-gate rd->rd_xprt = xprt; 2600Sstevel@tonic-gate rd->r_mod = rmod->r_mod; 2610Sstevel@tonic-gate 2620Sstevel@tonic-gate q = &rd->rd_data.q; 2630Sstevel@tonic-gate xprt->xp_wq = q; 2640Sstevel@tonic-gate q->q_ptr = &rd->rd_xprt; 2650Sstevel@tonic-gate xprt->xp_netid = NULL; 2660Sstevel@tonic-gate 2670Sstevel@tonic-gate if (netid != NULL) { 2680Sstevel@tonic-gate xprt->xp_netid = kmem_alloc(strlen(netid) + 1, 269*7387SRobert.Gordon@Sun.COM KM_SLEEP); 2700Sstevel@tonic-gate (void) strcpy(xprt->xp_netid, netid); 2710Sstevel@tonic-gate } 2720Sstevel@tonic-gate 2730Sstevel@tonic-gate xprt->xp_addrmask.maxlen = 2740Sstevel@tonic-gate xprt->xp_addrmask.len = sizeof (struct sockaddr_in); 2750Sstevel@tonic-gate xprt->xp_addrmask.buf = 2760Sstevel@tonic-gate kmem_zalloc(xprt->xp_addrmask.len, KM_SLEEP); 2770Sstevel@tonic-gate ((struct sockaddr_in *)xprt->xp_addrmask.buf)->sin_addr.s_addr = 2780Sstevel@tonic-gate (uint32_t)~0; 2790Sstevel@tonic-gate ((struct sockaddr_in *)xprt->xp_addrmask.buf)->sin_family = 2800Sstevel@tonic-gate (ushort_t)~0; 2810Sstevel@tonic-gate 2820Sstevel@tonic-gate /* 2830Sstevel@tonic-gate * Each of the plugins will have their own Service ID 2840Sstevel@tonic-gate * to listener specific mapping, like port number for VI 2850Sstevel@tonic-gate * and service name for IB. 2860Sstevel@tonic-gate */ 2870Sstevel@tonic-gate rd->rd_data.svcid = id; 2880Sstevel@tonic-gate error = svc_xprt_register(xprt, id); 2890Sstevel@tonic-gate if (error) { 290*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__xprt__reg); 2910Sstevel@tonic-gate goto cleanup; 2920Sstevel@tonic-gate } 2930Sstevel@tonic-gate 2940Sstevel@tonic-gate SVC_START(xprt); 2950Sstevel@tonic-gate if (!rd->rd_data.active) { 2960Sstevel@tonic-gate svc_xprt_unregister(xprt); 2970Sstevel@tonic-gate error = rd->rd_data.err_code; 2980Sstevel@tonic-gate goto cleanup; 2990Sstevel@tonic-gate } 3000Sstevel@tonic-gate 3010Sstevel@tonic-gate /* 3020Sstevel@tonic-gate * This is set only when there is atleast one or more 3030Sstevel@tonic-gate * transports successfully created. We insert the pointer 3040Sstevel@tonic-gate * to the created RDMA master xprt into a separately maintained 3050Sstevel@tonic-gate * list. This way we can easily reference it later to cleanup, 3060Sstevel@tonic-gate * when NFS kRPC service pool is going away/unregistered. 3070Sstevel@tonic-gate */ 3080Sstevel@tonic-gate started_xprts->rtg_count ++; 3090Sstevel@tonic-gate xprt_rec = kmem_alloc(sizeof (*xprt_rec), KM_SLEEP); 3100Sstevel@tonic-gate xprt_rec->rtr_xprt_ptr = xprt; 3110Sstevel@tonic-gate xprt_rec->rtr_next = started_xprts->rtg_listhead; 3120Sstevel@tonic-gate started_xprts->rtg_listhead = xprt_rec; 3130Sstevel@tonic-gate continue; 3140Sstevel@tonic-gate cleanup: 3150Sstevel@tonic-gate SVC_DESTROY(xprt); 3160Sstevel@tonic-gate if (error == RDMA_FAILED) 3170Sstevel@tonic-gate error = EPROTONOSUPPORT; 3180Sstevel@tonic-gate } 3190Sstevel@tonic-gate 3200Sstevel@tonic-gate rw_exit(&rdma_lock); 3210Sstevel@tonic-gate 3220Sstevel@tonic-gate /* 3230Sstevel@tonic-gate * Don't return any error even if a single plugin was started 3240Sstevel@tonic-gate * successfully. 3250Sstevel@tonic-gate */ 3260Sstevel@tonic-gate if (started_xprts->rtg_count == 0) 3270Sstevel@tonic-gate return (error); 3280Sstevel@tonic-gate return (0); 3290Sstevel@tonic-gate } 3300Sstevel@tonic-gate 3310Sstevel@tonic-gate /* 3320Sstevel@tonic-gate * Cleanup routine for freeing up memory allocated by 3330Sstevel@tonic-gate * svc_rdma_kcreate() 3340Sstevel@tonic-gate */ 3350Sstevel@tonic-gate void 3360Sstevel@tonic-gate svc_rdma_kdestroy(SVCMASTERXPRT *xprt) 3370Sstevel@tonic-gate { 3380Sstevel@tonic-gate struct rdma_data *rd = (struct rdma_data *)xprt->xp_p2; 3390Sstevel@tonic-gate 3400Sstevel@tonic-gate 3410Sstevel@tonic-gate mutex_destroy(&xprt->xp_req_lock); 3420Sstevel@tonic-gate mutex_destroy(&xprt->xp_thread_lock); 3430Sstevel@tonic-gate kmem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1); 3440Sstevel@tonic-gate kmem_free(rd, sizeof (*rd)); 3450Sstevel@tonic-gate kmem_free(xprt->xp_addrmask.buf, xprt->xp_addrmask.maxlen); 3460Sstevel@tonic-gate kmem_free(xprt, sizeof (*xprt)); 3470Sstevel@tonic-gate } 3480Sstevel@tonic-gate 3490Sstevel@tonic-gate 3500Sstevel@tonic-gate static void 3510Sstevel@tonic-gate svc_rdma_kstart(SVCMASTERXPRT *xprt) 3520Sstevel@tonic-gate { 3530Sstevel@tonic-gate struct rdma_svc_data *svcdata; 3540Sstevel@tonic-gate rdma_mod_t *rmod; 3550Sstevel@tonic-gate 3560Sstevel@tonic-gate svcdata = &((struct rdma_data *)xprt->xp_p2)->rd_data; 3570Sstevel@tonic-gate rmod = ((struct rdma_data *)xprt->xp_p2)->r_mod; 3580Sstevel@tonic-gate 3590Sstevel@tonic-gate /* 3600Sstevel@tonic-gate * Create a listener for module at this port 3610Sstevel@tonic-gate */ 3620Sstevel@tonic-gate 3630Sstevel@tonic-gate (*rmod->rdma_ops->rdma_svc_listen)(svcdata); 3640Sstevel@tonic-gate } 3650Sstevel@tonic-gate 3660Sstevel@tonic-gate void 3670Sstevel@tonic-gate svc_rdma_kstop(SVCMASTERXPRT *xprt) 3680Sstevel@tonic-gate { 3690Sstevel@tonic-gate struct rdma_svc_data *svcdata; 3700Sstevel@tonic-gate rdma_mod_t *rmod; 3710Sstevel@tonic-gate 3720Sstevel@tonic-gate svcdata = &((struct rdma_data *)xprt->xp_p2)->rd_data; 3730Sstevel@tonic-gate rmod = ((struct rdma_data *)xprt->xp_p2)->r_mod; 3740Sstevel@tonic-gate 3750Sstevel@tonic-gate /* 3760Sstevel@tonic-gate * Call the stop listener routine for each plugin. 3770Sstevel@tonic-gate */ 3780Sstevel@tonic-gate (*rmod->rdma_ops->rdma_svc_stop)(svcdata); 3790Sstevel@tonic-gate if (svcdata->active) 380*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__kstop); 3810Sstevel@tonic-gate } 3820Sstevel@tonic-gate 3830Sstevel@tonic-gate /* ARGSUSED */ 3840Sstevel@tonic-gate static void 3850Sstevel@tonic-gate svc_rdma_kclone_destroy(SVCXPRT *clone_xprt) 3860Sstevel@tonic-gate { 3870Sstevel@tonic-gate } 3880Sstevel@tonic-gate 3890Sstevel@tonic-gate static bool_t 3900Sstevel@tonic-gate svc_rdma_krecv(SVCXPRT *clone_xprt, mblk_t *mp, struct rpc_msg *msg) 3910Sstevel@tonic-gate { 392*7387SRobert.Gordon@Sun.COM XDR *xdrs; 393*7387SRobert.Gordon@Sun.COM CONN *conn; 3940Sstevel@tonic-gate 395*7387SRobert.Gordon@Sun.COM rdma_recv_data_t *rdp = (rdma_recv_data_t *)mp->b_rptr; 396*7387SRobert.Gordon@Sun.COM struct clone_rdma_data *crdp; 397*7387SRobert.Gordon@Sun.COM struct clist *cl = NULL; 398*7387SRobert.Gordon@Sun.COM struct clist *wcl = NULL; 399*7387SRobert.Gordon@Sun.COM struct clist *cllong = NULL; 400*7387SRobert.Gordon@Sun.COM 401*7387SRobert.Gordon@Sun.COM rdma_stat status; 402*7387SRobert.Gordon@Sun.COM uint32_t vers, op, pos, xid; 403*7387SRobert.Gordon@Sun.COM uint32_t rdma_credit; 404*7387SRobert.Gordon@Sun.COM uint32_t wcl_total_length = 0; 405*7387SRobert.Gordon@Sun.COM bool_t wwl = FALSE; 406*7387SRobert.Gordon@Sun.COM 407*7387SRobert.Gordon@Sun.COM crdp = (struct clone_rdma_data *)clone_xprt->xp_p2buf; 4080Sstevel@tonic-gate RSSTAT_INCR(rscalls); 4090Sstevel@tonic-gate conn = rdp->conn; 4100Sstevel@tonic-gate 4110Sstevel@tonic-gate status = rdma_svc_postrecv(conn); 4120Sstevel@tonic-gate if (status != RDMA_SUCCESS) { 413*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__krecv__postrecv); 414*7387SRobert.Gordon@Sun.COM goto badrpc_call; 4150Sstevel@tonic-gate } 4160Sstevel@tonic-gate 4170Sstevel@tonic-gate xdrs = &clone_xprt->xp_xdrin; 4180Sstevel@tonic-gate xdrmem_create(xdrs, rdp->rpcmsg.addr, rdp->rpcmsg.len, XDR_DECODE); 419*7387SRobert.Gordon@Sun.COM xid = *(uint32_t *)rdp->rpcmsg.addr; 420*7387SRobert.Gordon@Sun.COM XDR_SETPOS(xdrs, sizeof (uint32_t)); 4210Sstevel@tonic-gate 4220Sstevel@tonic-gate if (! xdr_u_int(xdrs, &vers) || 423*7387SRobert.Gordon@Sun.COM ! xdr_u_int(xdrs, &rdma_credit) || 4240Sstevel@tonic-gate ! xdr_u_int(xdrs, &op)) { 425*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__krecv__uint); 426*7387SRobert.Gordon@Sun.COM goto xdr_err; 4270Sstevel@tonic-gate } 4280Sstevel@tonic-gate 429*7387SRobert.Gordon@Sun.COM /* Checking if the status of the recv operation was normal */ 430*7387SRobert.Gordon@Sun.COM if (rdp->status != 0) { 431*7387SRobert.Gordon@Sun.COM DTRACE_PROBE1(krpc__e__svcrdma__krecv__invalid__status, 432*7387SRobert.Gordon@Sun.COM int, rdp->status); 433*7387SRobert.Gordon@Sun.COM goto badrpc_call; 434*7387SRobert.Gordon@Sun.COM } 435*7387SRobert.Gordon@Sun.COM 4360Sstevel@tonic-gate if (! xdr_do_clist(xdrs, &cl)) { 437*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__krecv__do__clist); 438*7387SRobert.Gordon@Sun.COM goto xdr_err; 4390Sstevel@tonic-gate } 4400Sstevel@tonic-gate 441*7387SRobert.Gordon@Sun.COM if (!xdr_decode_wlist_svc(xdrs, &wcl, &wwl, &wcl_total_length, conn)) { 442*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__krecv__decode__wlist); 443*7387SRobert.Gordon@Sun.COM if (cl) 444*7387SRobert.Gordon@Sun.COM clist_free(cl); 445*7387SRobert.Gordon@Sun.COM goto xdr_err; 446*7387SRobert.Gordon@Sun.COM } 447*7387SRobert.Gordon@Sun.COM crdp->cl_wlist = wcl; 448*7387SRobert.Gordon@Sun.COM 449*7387SRobert.Gordon@Sun.COM crdp->cl_reply = NULL; 450*7387SRobert.Gordon@Sun.COM (void) xdr_decode_reply_wchunk(xdrs, &crdp->cl_reply); 451*7387SRobert.Gordon@Sun.COM 4520Sstevel@tonic-gate /* 4530Sstevel@tonic-gate * A chunk at 0 offset indicates that the RPC call message 4540Sstevel@tonic-gate * is in a chunk. Get the RPC call message chunk. 4550Sstevel@tonic-gate */ 4560Sstevel@tonic-gate if (cl != NULL && op == RDMA_NOMSG) { 4570Sstevel@tonic-gate 4580Sstevel@tonic-gate /* Remove RPC call message chunk from chunklist */ 4590Sstevel@tonic-gate cllong = cl; 4600Sstevel@tonic-gate cl = cl->c_next; 4610Sstevel@tonic-gate cllong->c_next = NULL; 4620Sstevel@tonic-gate 463*7387SRobert.Gordon@Sun.COM 4640Sstevel@tonic-gate /* Allocate and register memory for the RPC call msg chunk */ 465*7387SRobert.Gordon@Sun.COM cllong->rb_longbuf.type = RDMA_LONG_BUFFER; 466*7387SRobert.Gordon@Sun.COM cllong->rb_longbuf.len = cllong->c_len > LONG_REPLY_LEN ? 467*7387SRobert.Gordon@Sun.COM cllong->c_len : LONG_REPLY_LEN; 468*7387SRobert.Gordon@Sun.COM 469*7387SRobert.Gordon@Sun.COM if (rdma_buf_alloc(conn, &cllong->rb_longbuf)) { 4700Sstevel@tonic-gate clist_free(cllong); 471*7387SRobert.Gordon@Sun.COM goto cll_malloc_err; 4720Sstevel@tonic-gate } 473*7387SRobert.Gordon@Sun.COM 474*7387SRobert.Gordon@Sun.COM cllong->u.c_daddr3 = cllong->rb_longbuf.addr; 475*7387SRobert.Gordon@Sun.COM 476*7387SRobert.Gordon@Sun.COM if (cllong->u.c_daddr == NULL) { 477*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__krecv__nomem); 478*7387SRobert.Gordon@Sun.COM rdma_buf_free(conn, &cllong->rb_longbuf); 479*7387SRobert.Gordon@Sun.COM clist_free(cllong); 480*7387SRobert.Gordon@Sun.COM goto cll_malloc_err; 481*7387SRobert.Gordon@Sun.COM } 482*7387SRobert.Gordon@Sun.COM 483*7387SRobert.Gordon@Sun.COM status = clist_register(conn, cllong, CLIST_REG_DST); 4840Sstevel@tonic-gate if (status) { 485*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__krecv__clist__reg); 486*7387SRobert.Gordon@Sun.COM rdma_buf_free(conn, &cllong->rb_longbuf); 4870Sstevel@tonic-gate clist_free(cllong); 488*7387SRobert.Gordon@Sun.COM goto cll_malloc_err; 4890Sstevel@tonic-gate } 4900Sstevel@tonic-gate 4910Sstevel@tonic-gate /* 4920Sstevel@tonic-gate * Now read the RPC call message in 4930Sstevel@tonic-gate */ 4940Sstevel@tonic-gate status = RDMA_READ(conn, cllong, WAIT); 4950Sstevel@tonic-gate if (status) { 496*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__krecv__read); 497*7387SRobert.Gordon@Sun.COM (void) clist_deregister(conn, cllong, CLIST_REG_DST); 498*7387SRobert.Gordon@Sun.COM rdma_buf_free(conn, &cllong->rb_longbuf); 4990Sstevel@tonic-gate clist_free(cllong); 500*7387SRobert.Gordon@Sun.COM goto cll_malloc_err; 5010Sstevel@tonic-gate } 5020Sstevel@tonic-gate 503*7387SRobert.Gordon@Sun.COM status = clist_syncmem(conn, cllong, CLIST_REG_DST); 504*7387SRobert.Gordon@Sun.COM (void) clist_deregister(conn, cllong, CLIST_REG_DST); 5050Sstevel@tonic-gate 506*7387SRobert.Gordon@Sun.COM xdrrdma_create(xdrs, (caddr_t)(uintptr_t)cllong->u.c_daddr3, 5070Sstevel@tonic-gate cllong->c_len, 0, cl, XDR_DECODE, conn); 5080Sstevel@tonic-gate 509*7387SRobert.Gordon@Sun.COM crdp->rpcbuf = cllong->rb_longbuf; 510*7387SRobert.Gordon@Sun.COM crdp->rpcbuf.len = cllong->c_len; 5110Sstevel@tonic-gate clist_free(cllong); 5120Sstevel@tonic-gate RDMA_BUF_FREE(conn, &rdp->rpcmsg); 5130Sstevel@tonic-gate } else { 5140Sstevel@tonic-gate pos = XDR_GETPOS(xdrs); 515*7387SRobert.Gordon@Sun.COM xdrrdma_create(xdrs, rdp->rpcmsg.addr + pos, 516*7387SRobert.Gordon@Sun.COM rdp->rpcmsg.len - pos, 0, cl, XDR_DECODE, conn); 517*7387SRobert.Gordon@Sun.COM crdp->rpcbuf = rdp->rpcmsg; 5180Sstevel@tonic-gate 519*7387SRobert.Gordon@Sun.COM /* Use xdrrdmablk_ops to indicate there is a read chunk list */ 520*7387SRobert.Gordon@Sun.COM if (cl != NULL) { 521*7387SRobert.Gordon@Sun.COM int32_t flg = XDR_RDMA_RLIST_REG; 522*7387SRobert.Gordon@Sun.COM 523*7387SRobert.Gordon@Sun.COM XDR_CONTROL(xdrs, XDR_RDMA_SET_FLAGS, &flg); 524*7387SRobert.Gordon@Sun.COM xdrs->x_ops = &xdrrdmablk_ops; 525*7387SRobert.Gordon@Sun.COM } 5260Sstevel@tonic-gate } 527*7387SRobert.Gordon@Sun.COM 528*7387SRobert.Gordon@Sun.COM if (crdp->cl_wlist) { 529*7387SRobert.Gordon@Sun.COM int32_t flg = XDR_RDMA_WLIST_REG; 530*7387SRobert.Gordon@Sun.COM 531*7387SRobert.Gordon@Sun.COM XDR_CONTROL(xdrs, XDR_RDMA_SET_WLIST, crdp->cl_wlist); 532*7387SRobert.Gordon@Sun.COM XDR_CONTROL(xdrs, XDR_RDMA_SET_FLAGS, &flg); 533*7387SRobert.Gordon@Sun.COM } 534*7387SRobert.Gordon@Sun.COM 5350Sstevel@tonic-gate if (! xdr_callmsg(xdrs, msg)) { 536*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__krecv__callmsg); 5370Sstevel@tonic-gate RSSTAT_INCR(rsxdrcall); 538*7387SRobert.Gordon@Sun.COM goto callmsg_err; 5390Sstevel@tonic-gate } 5400Sstevel@tonic-gate 5410Sstevel@tonic-gate /* 5420Sstevel@tonic-gate * Point the remote transport address in the service_transport 5430Sstevel@tonic-gate * handle at the address in the request. 5440Sstevel@tonic-gate */ 5450Sstevel@tonic-gate clone_xprt->xp_rtaddr.buf = conn->c_raddr.buf; 5460Sstevel@tonic-gate clone_xprt->xp_rtaddr.len = conn->c_raddr.len; 5470Sstevel@tonic-gate clone_xprt->xp_rtaddr.maxlen = conn->c_raddr.len; 548*7387SRobert.Gordon@Sun.COM clone_xprt->xp_xid = xid; 549*7387SRobert.Gordon@Sun.COM crdp->conn = conn; 5500Sstevel@tonic-gate 551*7387SRobert.Gordon@Sun.COM freeb(mp); 552*7387SRobert.Gordon@Sun.COM 553*7387SRobert.Gordon@Sun.COM return (TRUE); 554*7387SRobert.Gordon@Sun.COM 555*7387SRobert.Gordon@Sun.COM callmsg_err: 556*7387SRobert.Gordon@Sun.COM rdma_buf_free(conn, &crdp->rpcbuf); 557*7387SRobert.Gordon@Sun.COM 558*7387SRobert.Gordon@Sun.COM cll_malloc_err: 559*7387SRobert.Gordon@Sun.COM if (cl) 560*7387SRobert.Gordon@Sun.COM clist_free(cl); 561*7387SRobert.Gordon@Sun.COM xdr_err: 562*7387SRobert.Gordon@Sun.COM XDR_DESTROY(xdrs); 563*7387SRobert.Gordon@Sun.COM 564*7387SRobert.Gordon@Sun.COM badrpc_call: 565*7387SRobert.Gordon@Sun.COM RDMA_BUF_FREE(conn, &rdp->rpcmsg); 566*7387SRobert.Gordon@Sun.COM RDMA_REL_CONN(conn); 567*7387SRobert.Gordon@Sun.COM freeb(mp); 568*7387SRobert.Gordon@Sun.COM RSSTAT_INCR(rsbadcalls); 569*7387SRobert.Gordon@Sun.COM return (FALSE); 570*7387SRobert.Gordon@Sun.COM } 571*7387SRobert.Gordon@Sun.COM 572*7387SRobert.Gordon@Sun.COM static int 573*7387SRobert.Gordon@Sun.COM svc_process_long_reply(SVCXPRT * clone_xprt, 574*7387SRobert.Gordon@Sun.COM xdrproc_t xdr_results, caddr_t xdr_location, 575*7387SRobert.Gordon@Sun.COM struct rpc_msg *msg, bool_t has_args, int *msglen, 576*7387SRobert.Gordon@Sun.COM int *freelen, int *numchunks, unsigned int *final_len) 577*7387SRobert.Gordon@Sun.COM { 578*7387SRobert.Gordon@Sun.COM int status; 579*7387SRobert.Gordon@Sun.COM XDR xdrslong; 580*7387SRobert.Gordon@Sun.COM struct clist *wcl = NULL; 581*7387SRobert.Gordon@Sun.COM int count = 0; 582*7387SRobert.Gordon@Sun.COM int alloc_len; 583*7387SRobert.Gordon@Sun.COM char *memp; 584*7387SRobert.Gordon@Sun.COM rdma_buf_t long_rpc = {0}; 585*7387SRobert.Gordon@Sun.COM struct clone_rdma_data *crdp; 586*7387SRobert.Gordon@Sun.COM 587*7387SRobert.Gordon@Sun.COM crdp = (struct clone_rdma_data *)clone_xprt->xp_p2buf; 588*7387SRobert.Gordon@Sun.COM 589*7387SRobert.Gordon@Sun.COM bzero(&xdrslong, sizeof (xdrslong)); 590*7387SRobert.Gordon@Sun.COM 591*7387SRobert.Gordon@Sun.COM /* Choose a size for the long rpc response */ 592*7387SRobert.Gordon@Sun.COM if (MSG_IS_RPCSEC_GSS(msg)) { 593*7387SRobert.Gordon@Sun.COM alloc_len = RNDUP(MAX_AUTH_BYTES + *msglen); 594*7387SRobert.Gordon@Sun.COM } else { 595*7387SRobert.Gordon@Sun.COM alloc_len = RNDUP(*msglen); 596*7387SRobert.Gordon@Sun.COM } 597*7387SRobert.Gordon@Sun.COM 598*7387SRobert.Gordon@Sun.COM if (alloc_len <= 64 * 1024) { 599*7387SRobert.Gordon@Sun.COM if (alloc_len > 32 * 1024) { 600*7387SRobert.Gordon@Sun.COM alloc_len = 64 * 1024; 601*7387SRobert.Gordon@Sun.COM } else { 602*7387SRobert.Gordon@Sun.COM if (alloc_len > 16 * 1024) { 603*7387SRobert.Gordon@Sun.COM alloc_len = 32 * 1024; 604*7387SRobert.Gordon@Sun.COM } else { 605*7387SRobert.Gordon@Sun.COM alloc_len = 16 * 1024; 606*7387SRobert.Gordon@Sun.COM } 607*7387SRobert.Gordon@Sun.COM } 608*7387SRobert.Gordon@Sun.COM } 609*7387SRobert.Gordon@Sun.COM 610*7387SRobert.Gordon@Sun.COM long_rpc.type = RDMA_LONG_BUFFER; 611*7387SRobert.Gordon@Sun.COM long_rpc.len = alloc_len; 612*7387SRobert.Gordon@Sun.COM if (rdma_buf_alloc(crdp->conn, &long_rpc)) { 613*7387SRobert.Gordon@Sun.COM return (SVC_RDMA_FAIL); 614*7387SRobert.Gordon@Sun.COM } 615*7387SRobert.Gordon@Sun.COM 616*7387SRobert.Gordon@Sun.COM memp = long_rpc.addr; 617*7387SRobert.Gordon@Sun.COM xdrmem_create(&xdrslong, memp, alloc_len, XDR_ENCODE); 618*7387SRobert.Gordon@Sun.COM 619*7387SRobert.Gordon@Sun.COM msg->rm_xid = clone_xprt->xp_xid; 620*7387SRobert.Gordon@Sun.COM 621*7387SRobert.Gordon@Sun.COM if (!(xdr_replymsg(&xdrslong, msg) && 622*7387SRobert.Gordon@Sun.COM (!has_args || SVCAUTH_WRAP(&clone_xprt->xp_auth, &xdrslong, 623*7387SRobert.Gordon@Sun.COM xdr_results, xdr_location)))) { 624*7387SRobert.Gordon@Sun.COM rdma_buf_free(crdp->conn, &long_rpc); 625*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__longrep__authwrap); 626*7387SRobert.Gordon@Sun.COM return (SVC_RDMA_FAIL); 627*7387SRobert.Gordon@Sun.COM } 628*7387SRobert.Gordon@Sun.COM 629*7387SRobert.Gordon@Sun.COM *final_len = XDR_GETPOS(&xdrslong); 630*7387SRobert.Gordon@Sun.COM 631*7387SRobert.Gordon@Sun.COM *numchunks = 0; 632*7387SRobert.Gordon@Sun.COM *freelen = 0; 633*7387SRobert.Gordon@Sun.COM 634*7387SRobert.Gordon@Sun.COM wcl = crdp->cl_reply; 635*7387SRobert.Gordon@Sun.COM wcl->rb_longbuf = long_rpc; 636*7387SRobert.Gordon@Sun.COM 637*7387SRobert.Gordon@Sun.COM count = *final_len; 638*7387SRobert.Gordon@Sun.COM while (wcl != NULL) { 639*7387SRobert.Gordon@Sun.COM if (wcl->c_dmemhandle.mrc_rmr == 0) 640*7387SRobert.Gordon@Sun.COM break; 6410Sstevel@tonic-gate 642*7387SRobert.Gordon@Sun.COM if (wcl->c_len > count) { 643*7387SRobert.Gordon@Sun.COM wcl->c_len = count; 644*7387SRobert.Gordon@Sun.COM } 645*7387SRobert.Gordon@Sun.COM wcl->w.c_saddr3 = (caddr_t)memp; 646*7387SRobert.Gordon@Sun.COM 647*7387SRobert.Gordon@Sun.COM count -= wcl->c_len; 648*7387SRobert.Gordon@Sun.COM *numchunks += 1; 649*7387SRobert.Gordon@Sun.COM if (count == 0) 650*7387SRobert.Gordon@Sun.COM break; 651*7387SRobert.Gordon@Sun.COM memp += wcl->c_len; 652*7387SRobert.Gordon@Sun.COM wcl = wcl->c_next; 653*7387SRobert.Gordon@Sun.COM } 654*7387SRobert.Gordon@Sun.COM 655*7387SRobert.Gordon@Sun.COM wcl = crdp->cl_reply; 656*7387SRobert.Gordon@Sun.COM 657*7387SRobert.Gordon@Sun.COM /* 658*7387SRobert.Gordon@Sun.COM * MUST fail if there are still more data 659*7387SRobert.Gordon@Sun.COM */ 660*7387SRobert.Gordon@Sun.COM if (count > 0) { 661*7387SRobert.Gordon@Sun.COM rdma_buf_free(crdp->conn, &long_rpc); 662*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__longrep__dlen__clist); 663*7387SRobert.Gordon@Sun.COM return (SVC_RDMA_FAIL); 664*7387SRobert.Gordon@Sun.COM } 665*7387SRobert.Gordon@Sun.COM 666*7387SRobert.Gordon@Sun.COM if (clist_register(crdp->conn, wcl, CLIST_REG_SOURCE) != RDMA_SUCCESS) { 667*7387SRobert.Gordon@Sun.COM rdma_buf_free(crdp->conn, &long_rpc); 668*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__longrep__clistreg); 669*7387SRobert.Gordon@Sun.COM return (SVC_RDMA_FAIL); 670*7387SRobert.Gordon@Sun.COM } 671*7387SRobert.Gordon@Sun.COM 672*7387SRobert.Gordon@Sun.COM status = clist_syncmem(crdp->conn, wcl, CLIST_REG_SOURCE); 673*7387SRobert.Gordon@Sun.COM 674*7387SRobert.Gordon@Sun.COM if (status) { 675*7387SRobert.Gordon@Sun.COM (void) clist_deregister(crdp->conn, wcl, CLIST_REG_SOURCE); 676*7387SRobert.Gordon@Sun.COM rdma_buf_free(crdp->conn, &long_rpc); 677*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__longrep__syncmem); 678*7387SRobert.Gordon@Sun.COM return (SVC_RDMA_FAIL); 6790Sstevel@tonic-gate } 680*7387SRobert.Gordon@Sun.COM 681*7387SRobert.Gordon@Sun.COM status = RDMA_WRITE(crdp->conn, wcl, WAIT); 682*7387SRobert.Gordon@Sun.COM 683*7387SRobert.Gordon@Sun.COM (void) clist_deregister(crdp->conn, wcl, CLIST_REG_SOURCE); 684*7387SRobert.Gordon@Sun.COM rdma_buf_free(crdp->conn, &wcl->rb_longbuf); 685*7387SRobert.Gordon@Sun.COM 686*7387SRobert.Gordon@Sun.COM if (status != RDMA_SUCCESS) { 687*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__longrep__write); 688*7387SRobert.Gordon@Sun.COM return (SVC_RDMA_FAIL); 689*7387SRobert.Gordon@Sun.COM } 690*7387SRobert.Gordon@Sun.COM 691*7387SRobert.Gordon@Sun.COM return (SVC_RDMA_SUCCESS); 692*7387SRobert.Gordon@Sun.COM } 693*7387SRobert.Gordon@Sun.COM 6940Sstevel@tonic-gate 695*7387SRobert.Gordon@Sun.COM static int 696*7387SRobert.Gordon@Sun.COM svc_compose_rpcmsg(SVCXPRT * clone_xprt, CONN * conn, xdrproc_t xdr_results, 697*7387SRobert.Gordon@Sun.COM caddr_t xdr_location, rdma_buf_t *rpcreply, XDR ** xdrs, 698*7387SRobert.Gordon@Sun.COM struct rpc_msg *msg, bool_t has_args, uint_t *len) 699*7387SRobert.Gordon@Sun.COM { 700*7387SRobert.Gordon@Sun.COM /* 701*7387SRobert.Gordon@Sun.COM * Get a pre-allocated buffer for rpc reply 702*7387SRobert.Gordon@Sun.COM */ 703*7387SRobert.Gordon@Sun.COM rpcreply->type = SEND_BUFFER; 704*7387SRobert.Gordon@Sun.COM if (rdma_buf_alloc(conn, rpcreply)) { 705*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__rpcmsg__reply__nofreebufs); 706*7387SRobert.Gordon@Sun.COM return (SVC_RDMA_FAIL); 707*7387SRobert.Gordon@Sun.COM } 708*7387SRobert.Gordon@Sun.COM 709*7387SRobert.Gordon@Sun.COM xdrrdma_create(*xdrs, rpcreply->addr, rpcreply->len, 710*7387SRobert.Gordon@Sun.COM 0, NULL, XDR_ENCODE, conn); 711*7387SRobert.Gordon@Sun.COM 712*7387SRobert.Gordon@Sun.COM msg->rm_xid = clone_xprt->xp_xid; 713*7387SRobert.Gordon@Sun.COM 714*7387SRobert.Gordon@Sun.COM if (has_args) { 715*7387SRobert.Gordon@Sun.COM if (!(xdr_replymsg(*xdrs, msg) && 716*7387SRobert.Gordon@Sun.COM (!has_args || 717*7387SRobert.Gordon@Sun.COM SVCAUTH_WRAP(&clone_xprt->xp_auth, *xdrs, 718*7387SRobert.Gordon@Sun.COM xdr_results, xdr_location)))) { 719*7387SRobert.Gordon@Sun.COM rdma_buf_free(conn, rpcreply); 720*7387SRobert.Gordon@Sun.COM DTRACE_PROBE( 721*7387SRobert.Gordon@Sun.COM krpc__e__svcrdma__rpcmsg__reply__authwrap1); 722*7387SRobert.Gordon@Sun.COM return (SVC_RDMA_FAIL); 723*7387SRobert.Gordon@Sun.COM } 724*7387SRobert.Gordon@Sun.COM } else { 725*7387SRobert.Gordon@Sun.COM if (!xdr_replymsg(*xdrs, msg)) { 726*7387SRobert.Gordon@Sun.COM rdma_buf_free(conn, rpcreply); 727*7387SRobert.Gordon@Sun.COM DTRACE_PROBE( 728*7387SRobert.Gordon@Sun.COM krpc__e__svcrdma__rpcmsg__reply__authwrap2); 729*7387SRobert.Gordon@Sun.COM return (SVC_RDMA_FAIL); 730*7387SRobert.Gordon@Sun.COM } 731*7387SRobert.Gordon@Sun.COM } 732*7387SRobert.Gordon@Sun.COM 733*7387SRobert.Gordon@Sun.COM *len = XDR_GETPOS(*xdrs); 734*7387SRobert.Gordon@Sun.COM 735*7387SRobert.Gordon@Sun.COM return (SVC_RDMA_SUCCESS); 7360Sstevel@tonic-gate } 7370Sstevel@tonic-gate 7380Sstevel@tonic-gate /* 7390Sstevel@tonic-gate * Send rpc reply. 7400Sstevel@tonic-gate */ 7410Sstevel@tonic-gate static bool_t 742*7387SRobert.Gordon@Sun.COM svc_rdma_ksend(SVCXPRT * clone_xprt, struct rpc_msg *msg) 7430Sstevel@tonic-gate { 744*7387SRobert.Gordon@Sun.COM XDR *xdrs_rpc = &(clone_xprt->xp_xdrout); 745*7387SRobert.Gordon@Sun.COM XDR xdrs_rhdr; 746*7387SRobert.Gordon@Sun.COM CONN *conn = NULL; 747*7387SRobert.Gordon@Sun.COM rdma_buf_t rbuf_resp = {0}, rbuf_rpc_resp = {0}; 748*7387SRobert.Gordon@Sun.COM 749*7387SRobert.Gordon@Sun.COM struct clone_rdma_data *crdp; 750*7387SRobert.Gordon@Sun.COM struct clist *cl_read = NULL; 751*7387SRobert.Gordon@Sun.COM struct clist *cl_send = NULL; 752*7387SRobert.Gordon@Sun.COM struct clist *cl_write = NULL; 753*7387SRobert.Gordon@Sun.COM xdrproc_t xdr_results; /* results XDR encoding function */ 754*7387SRobert.Gordon@Sun.COM caddr_t xdr_location; /* response results pointer */ 755*7387SRobert.Gordon@Sun.COM 7560Sstevel@tonic-gate int retval = FALSE; 757*7387SRobert.Gordon@Sun.COM int status, msglen, num_wreply_segments = 0; 758*7387SRobert.Gordon@Sun.COM uint32_t rdma_credit = 0; 759*7387SRobert.Gordon@Sun.COM int freelen = 0; 760*7387SRobert.Gordon@Sun.COM bool_t has_args; 761*7387SRobert.Gordon@Sun.COM uint_t final_resp_len, rdma_response_op, vers; 7620Sstevel@tonic-gate 763*7387SRobert.Gordon@Sun.COM bzero(&xdrs_rhdr, sizeof (XDR)); 764*7387SRobert.Gordon@Sun.COM crdp = (struct clone_rdma_data *)clone_xprt->xp_p2buf; 765*7387SRobert.Gordon@Sun.COM conn = crdp->conn; 7660Sstevel@tonic-gate 7670Sstevel@tonic-gate /* 7680Sstevel@tonic-gate * If there is a result procedure specified in the reply message, 7690Sstevel@tonic-gate * it will be processed in the xdr_replymsg and SVCAUTH_WRAP. 7700Sstevel@tonic-gate * We need to make sure it won't be processed twice, so we null 7710Sstevel@tonic-gate * it for xdr_replymsg here. 7720Sstevel@tonic-gate */ 7730Sstevel@tonic-gate has_args = FALSE; 7740Sstevel@tonic-gate if (msg->rm_reply.rp_stat == MSG_ACCEPTED && 7750Sstevel@tonic-gate msg->rm_reply.rp_acpt.ar_stat == SUCCESS) { 7760Sstevel@tonic-gate if ((xdr_results = msg->acpted_rply.ar_results.proc) != NULL) { 7770Sstevel@tonic-gate has_args = TRUE; 7780Sstevel@tonic-gate xdr_location = msg->acpted_rply.ar_results.where; 7790Sstevel@tonic-gate msg->acpted_rply.ar_results.proc = xdr_void; 7800Sstevel@tonic-gate msg->acpted_rply.ar_results.where = NULL; 7810Sstevel@tonic-gate } 7820Sstevel@tonic-gate } 7830Sstevel@tonic-gate 7840Sstevel@tonic-gate /* 785*7387SRobert.Gordon@Sun.COM * Given the limit on the inline response size (RPC_MSG_SZ), 786*7387SRobert.Gordon@Sun.COM * there is a need to make a guess as to the overall size of 787*7387SRobert.Gordon@Sun.COM * the response. If the resultant size is beyond the inline 788*7387SRobert.Gordon@Sun.COM * size, then the server needs to use the "reply chunk list" 789*7387SRobert.Gordon@Sun.COM * provided by the client (if the client provided one). An 790*7387SRobert.Gordon@Sun.COM * example of this type of response would be a READDIR 791*7387SRobert.Gordon@Sun.COM * response (e.g. a small directory read would fit in RPC_MSG_SZ 792*7387SRobert.Gordon@Sun.COM * and that is the preference but it may not fit) 793*7387SRobert.Gordon@Sun.COM * 794*7387SRobert.Gordon@Sun.COM * Combine the encoded size and the size of the true results 795*7387SRobert.Gordon@Sun.COM * and then make the decision about where to encode and send results. 796*7387SRobert.Gordon@Sun.COM * 797*7387SRobert.Gordon@Sun.COM * One important note, this calculation is ignoring the size 798*7387SRobert.Gordon@Sun.COM * of the encoding of the authentication overhead. The reason 799*7387SRobert.Gordon@Sun.COM * for this is rooted in the complexities of access to the 800*7387SRobert.Gordon@Sun.COM * encoded size of RPCSEC_GSS related authentiation, 801*7387SRobert.Gordon@Sun.COM * integrity, and privacy. 802*7387SRobert.Gordon@Sun.COM * 803*7387SRobert.Gordon@Sun.COM * If it turns out that the encoded authentication bumps the 804*7387SRobert.Gordon@Sun.COM * response over the RPC_MSG_SZ limit, then it may need to 805*7387SRobert.Gordon@Sun.COM * attempt to encode for the reply chunk list. 806*7387SRobert.Gordon@Sun.COM */ 807*7387SRobert.Gordon@Sun.COM 808*7387SRobert.Gordon@Sun.COM /* 809*7387SRobert.Gordon@Sun.COM * Calculating the "sizeof" the RPC response header and the 810*7387SRobert.Gordon@Sun.COM * encoded results. 8110Sstevel@tonic-gate */ 8120Sstevel@tonic-gate msglen = xdr_sizeof(xdr_replymsg, msg); 813*7387SRobert.Gordon@Sun.COM 814*7387SRobert.Gordon@Sun.COM if (msglen > 0) { 815*7387SRobert.Gordon@Sun.COM RSSTAT_INCR(rstotalreplies); 816*7387SRobert.Gordon@Sun.COM } 817*7387SRobert.Gordon@Sun.COM if (has_args) 8180Sstevel@tonic-gate msglen += xdrrdma_sizeof(xdr_results, xdr_location, 819*7387SRobert.Gordon@Sun.COM rdma_minchunk, NULL, NULL); 820*7387SRobert.Gordon@Sun.COM 821*7387SRobert.Gordon@Sun.COM DTRACE_PROBE1(krpc__i__svcrdma__ksend__msglen, int, msglen); 8220Sstevel@tonic-gate 823*7387SRobert.Gordon@Sun.COM status = SVC_RDMA_SUCCESS; 8240Sstevel@tonic-gate 825*7387SRobert.Gordon@Sun.COM if (msglen < RPC_MSG_SZ) { 8260Sstevel@tonic-gate /* 827*7387SRobert.Gordon@Sun.COM * Looks like the response will fit in the inline 828*7387SRobert.Gordon@Sun.COM * response; let's try 8290Sstevel@tonic-gate */ 830*7387SRobert.Gordon@Sun.COM RSSTAT_INCR(rstotalinlinereplies); 831*7387SRobert.Gordon@Sun.COM 832*7387SRobert.Gordon@Sun.COM rdma_response_op = RDMA_MSG; 8330Sstevel@tonic-gate 834*7387SRobert.Gordon@Sun.COM status = svc_compose_rpcmsg(clone_xprt, conn, xdr_results, 835*7387SRobert.Gordon@Sun.COM xdr_location, &rbuf_rpc_resp, &xdrs_rpc, msg, 836*7387SRobert.Gordon@Sun.COM has_args, &final_resp_len); 837*7387SRobert.Gordon@Sun.COM 838*7387SRobert.Gordon@Sun.COM DTRACE_PROBE1(krpc__i__srdma__ksend__compose_status, 839*7387SRobert.Gordon@Sun.COM int, status); 840*7387SRobert.Gordon@Sun.COM DTRACE_PROBE1(krpc__i__srdma__ksend__compose_len, 841*7387SRobert.Gordon@Sun.COM int, final_resp_len); 842*7387SRobert.Gordon@Sun.COM 843*7387SRobert.Gordon@Sun.COM if (status == SVC_RDMA_SUCCESS && crdp->cl_reply) { 844*7387SRobert.Gordon@Sun.COM clist_free(crdp->cl_reply); 845*7387SRobert.Gordon@Sun.COM crdp->cl_reply = NULL; 8460Sstevel@tonic-gate } 8470Sstevel@tonic-gate } 8480Sstevel@tonic-gate 849*7387SRobert.Gordon@Sun.COM /* 850*7387SRobert.Gordon@Sun.COM * If the encode failed (size?) or the message really is 851*7387SRobert.Gordon@Sun.COM * larger than what is allowed, try the response chunk list. 852*7387SRobert.Gordon@Sun.COM */ 853*7387SRobert.Gordon@Sun.COM if (status != SVC_RDMA_SUCCESS || msglen >= RPC_MSG_SZ) { 8540Sstevel@tonic-gate /* 855*7387SRobert.Gordon@Sun.COM * attempting to use a reply chunk list when there 856*7387SRobert.Gordon@Sun.COM * isn't one won't get very far... 8570Sstevel@tonic-gate */ 858*7387SRobert.Gordon@Sun.COM if (crdp->cl_reply == NULL) { 859*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__ksend__noreplycl); 8600Sstevel@tonic-gate goto out; 8610Sstevel@tonic-gate } 8620Sstevel@tonic-gate 863*7387SRobert.Gordon@Sun.COM RSSTAT_INCR(rstotallongreplies); 864*7387SRobert.Gordon@Sun.COM 865*7387SRobert.Gordon@Sun.COM msglen = xdr_sizeof(xdr_replymsg, msg); 866*7387SRobert.Gordon@Sun.COM msglen += xdrrdma_sizeof(xdr_results, xdr_location, 0, 867*7387SRobert.Gordon@Sun.COM NULL, NULL); 868*7387SRobert.Gordon@Sun.COM 869*7387SRobert.Gordon@Sun.COM status = svc_process_long_reply(clone_xprt, xdr_results, 870*7387SRobert.Gordon@Sun.COM xdr_location, msg, has_args, &msglen, &freelen, 871*7387SRobert.Gordon@Sun.COM &num_wreply_segments, &final_resp_len); 872*7387SRobert.Gordon@Sun.COM 873*7387SRobert.Gordon@Sun.COM DTRACE_PROBE1(krpc__i__svcrdma__ksend__longreplen, 874*7387SRobert.Gordon@Sun.COM int, final_resp_len); 875*7387SRobert.Gordon@Sun.COM 876*7387SRobert.Gordon@Sun.COM if (status != SVC_RDMA_SUCCESS) { 877*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__ksend__compose__failed); 878*7387SRobert.Gordon@Sun.COM goto out; 8790Sstevel@tonic-gate } 8800Sstevel@tonic-gate 881*7387SRobert.Gordon@Sun.COM rdma_response_op = RDMA_NOMSG; 8820Sstevel@tonic-gate } 8830Sstevel@tonic-gate 884*7387SRobert.Gordon@Sun.COM DTRACE_PROBE1(krpc__i__svcrdma__ksend__rdmamsg__len, 885*7387SRobert.Gordon@Sun.COM int, final_resp_len); 8860Sstevel@tonic-gate 887*7387SRobert.Gordon@Sun.COM rbuf_resp.type = SEND_BUFFER; 888*7387SRobert.Gordon@Sun.COM if (rdma_buf_alloc(conn, &rbuf_resp)) { 889*7387SRobert.Gordon@Sun.COM rdma_buf_free(conn, &rbuf_rpc_resp); 890*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__ksend__nofreebufs); 891*7387SRobert.Gordon@Sun.COM goto out; 8920Sstevel@tonic-gate } 8930Sstevel@tonic-gate 894*7387SRobert.Gordon@Sun.COM rdma_credit = rdma_bufs_granted; 895*7387SRobert.Gordon@Sun.COM 896*7387SRobert.Gordon@Sun.COM vers = RPCRDMA_VERS; 897*7387SRobert.Gordon@Sun.COM xdrmem_create(&xdrs_rhdr, rbuf_resp.addr, rbuf_resp.len, XDR_ENCODE); 898*7387SRobert.Gordon@Sun.COM (*(uint32_t *)rbuf_resp.addr) = msg->rm_xid; 899*7387SRobert.Gordon@Sun.COM /* Skip xid and set the xdr position accordingly. */ 900*7387SRobert.Gordon@Sun.COM XDR_SETPOS(&xdrs_rhdr, sizeof (uint32_t)); 901*7387SRobert.Gordon@Sun.COM if (!xdr_u_int(&xdrs_rhdr, &vers) || 902*7387SRobert.Gordon@Sun.COM !xdr_u_int(&xdrs_rhdr, &rdma_credit) || 903*7387SRobert.Gordon@Sun.COM !xdr_u_int(&xdrs_rhdr, &rdma_response_op)) { 904*7387SRobert.Gordon@Sun.COM rdma_buf_free(conn, &rbuf_rpc_resp); 905*7387SRobert.Gordon@Sun.COM rdma_buf_free(conn, &rbuf_resp); 906*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__ksend__uint); 9070Sstevel@tonic-gate goto out; 9080Sstevel@tonic-gate } 9090Sstevel@tonic-gate 9100Sstevel@tonic-gate /* 911*7387SRobert.Gordon@Sun.COM * Now XDR the read chunk list, actually always NULL 9120Sstevel@tonic-gate */ 913*7387SRobert.Gordon@Sun.COM (void) xdr_encode_rlist_svc(&xdrs_rhdr, cl_read); 9140Sstevel@tonic-gate 9150Sstevel@tonic-gate /* 916*7387SRobert.Gordon@Sun.COM * encode write list -- we already drove RDMA_WRITEs 9170Sstevel@tonic-gate */ 918*7387SRobert.Gordon@Sun.COM cl_write = crdp->cl_wlist; 919*7387SRobert.Gordon@Sun.COM if (!xdr_encode_wlist(&xdrs_rhdr, cl_write)) { 920*7387SRobert.Gordon@Sun.COM DTRACE_PROBE(krpc__e__svcrdma__ksend__enc__wlist); 921*7387SRobert.Gordon@Sun.COM rdma_buf_free(conn, &rbuf_rpc_resp); 922*7387SRobert.Gordon@Sun.COM rdma_buf_free(conn, &rbuf_resp); 9230Sstevel@tonic-gate goto out; 9240Sstevel@tonic-gate } 9250Sstevel@tonic-gate 9260Sstevel@tonic-gate /* 927*7387SRobert.Gordon@Sun.COM * XDR encode the RDMA_REPLY write chunk 9280Sstevel@tonic-gate */ 929*7387SRobert.Gordon@Sun.COM if (!xdr_encode_reply_wchunk(&xdrs_rhdr, crdp->cl_reply, 930*7387SRobert.Gordon@Sun.COM num_wreply_segments)) { 931*7387SRobert.Gordon@Sun.COM rdma_buf_free(conn, &rbuf_rpc_resp); 932*7387SRobert.Gordon@Sun.COM rdma_buf_free(conn, &rbuf_resp); 933*7387SRobert.Gordon@Sun.COM goto out; 9340Sstevel@tonic-gate } 9350Sstevel@tonic-gate 936*7387SRobert.Gordon@Sun.COM clist_add(&cl_send, 0, XDR_GETPOS(&xdrs_rhdr), &rbuf_resp.handle, 937*7387SRobert.Gordon@Sun.COM rbuf_resp.addr, NULL, NULL); 9380Sstevel@tonic-gate 939*7387SRobert.Gordon@Sun.COM if (rdma_response_op == RDMA_MSG) { 940*7387SRobert.Gordon@Sun.COM clist_add(&cl_send, 0, final_resp_len, &rbuf_rpc_resp.handle, 941*7387SRobert.Gordon@Sun.COM rbuf_rpc_resp.addr, NULL, NULL); 9420Sstevel@tonic-gate } 9430Sstevel@tonic-gate 944*7387SRobert.Gordon@Sun.COM status = RDMA_SEND(conn, cl_send, msg->rm_xid); 945*7387SRobert.Gordon@Sun.COM 946*7387SRobert.Gordon@Sun.COM if (status == RDMA_SUCCESS) { 947*7387SRobert.Gordon@Sun.COM retval = TRUE; 9480Sstevel@tonic-gate } 9490Sstevel@tonic-gate 950*7387SRobert.Gordon@Sun.COM out: 9510Sstevel@tonic-gate /* 9520Sstevel@tonic-gate * Free up sendlist chunks 9530Sstevel@tonic-gate */ 954*7387SRobert.Gordon@Sun.COM if (cl_send != NULL) 955*7387SRobert.Gordon@Sun.COM clist_free(cl_send); 9560Sstevel@tonic-gate 9570Sstevel@tonic-gate /* 9580Sstevel@tonic-gate * Destroy private data for xdr rdma 9590Sstevel@tonic-gate */ 960*7387SRobert.Gordon@Sun.COM if (clone_xprt->xp_xdrout.x_ops != NULL) { 961*7387SRobert.Gordon@Sun.COM XDR_DESTROY(&(clone_xprt->xp_xdrout)); 962*7387SRobert.Gordon@Sun.COM } 963*7387SRobert.Gordon@Sun.COM 964*7387SRobert.Gordon@Sun.COM if (crdp->cl_reply) { 965*7387SRobert.Gordon@Sun.COM clist_free(crdp->cl_reply); 966*7387SRobert.Gordon@Sun.COM crdp->cl_reply = NULL; 967*7387SRobert.Gordon@Sun.COM } 9680Sstevel@tonic-gate 9690Sstevel@tonic-gate /* 9700Sstevel@tonic-gate * This is completely disgusting. If public is set it is 9710Sstevel@tonic-gate * a pointer to a structure whose first field is the address 9720Sstevel@tonic-gate * of the function to free that structure and any related 9730Sstevel@tonic-gate * stuff. (see rrokfree in nfs_xdr.c). 9740Sstevel@tonic-gate */ 975*7387SRobert.Gordon@Sun.COM if (xdrs_rpc->x_public) { 9760Sstevel@tonic-gate /* LINTED pointer alignment */ 977*7387SRobert.Gordon@Sun.COM (**((int (**)()) xdrs_rpc->x_public)) (xdrs_rpc->x_public); 978*7387SRobert.Gordon@Sun.COM } 979*7387SRobert.Gordon@Sun.COM 980*7387SRobert.Gordon@Sun.COM if (xdrs_rhdr.x_ops != NULL) { 981*7387SRobert.Gordon@Sun.COM XDR_DESTROY(&xdrs_rhdr); 9820Sstevel@tonic-gate } 9830Sstevel@tonic-gate 9840Sstevel@tonic-gate return (retval); 9850Sstevel@tonic-gate } 9860Sstevel@tonic-gate 9870Sstevel@tonic-gate /* 9880Sstevel@tonic-gate * Deserialize arguments. 9890Sstevel@tonic-gate */ 9900Sstevel@tonic-gate static bool_t 9910Sstevel@tonic-gate svc_rdma_kgetargs(SVCXPRT *clone_xprt, xdrproc_t xdr_args, caddr_t args_ptr) 9920Sstevel@tonic-gate { 9930Sstevel@tonic-gate if ((SVCAUTH_UNWRAP(&clone_xprt->xp_auth, &clone_xprt->xp_xdrin, 9940Sstevel@tonic-gate xdr_args, args_ptr)) != TRUE) 9950Sstevel@tonic-gate return (FALSE); 9960Sstevel@tonic-gate return (TRUE); 9970Sstevel@tonic-gate } 9980Sstevel@tonic-gate 9990Sstevel@tonic-gate static bool_t 10000Sstevel@tonic-gate svc_rdma_kfreeargs(SVCXPRT *clone_xprt, xdrproc_t xdr_args, 10010Sstevel@tonic-gate caddr_t args_ptr) 10020Sstevel@tonic-gate { 1003*7387SRobert.Gordon@Sun.COM struct clone_rdma_data *crdp; 10040Sstevel@tonic-gate bool_t retval; 10050Sstevel@tonic-gate 1006*7387SRobert.Gordon@Sun.COM crdp = (struct clone_rdma_data *)clone_xprt->xp_p2buf; 1007*7387SRobert.Gordon@Sun.COM 1008*7387SRobert.Gordon@Sun.COM /* 1009*7387SRobert.Gordon@Sun.COM * Free the args if needed then XDR_DESTROY 1010*7387SRobert.Gordon@Sun.COM */ 10110Sstevel@tonic-gate if (args_ptr) { 10120Sstevel@tonic-gate XDR *xdrs = &clone_xprt->xp_xdrin; 10130Sstevel@tonic-gate 10140Sstevel@tonic-gate xdrs->x_op = XDR_FREE; 10150Sstevel@tonic-gate retval = (*xdr_args)(xdrs, args_ptr); 10160Sstevel@tonic-gate } 1017*7387SRobert.Gordon@Sun.COM 10180Sstevel@tonic-gate XDR_DESTROY(&(clone_xprt->xp_xdrin)); 1019*7387SRobert.Gordon@Sun.COM rdma_buf_free(crdp->conn, &crdp->rpcbuf); 1020*7387SRobert.Gordon@Sun.COM if (crdp->cl_reply) { 1021*7387SRobert.Gordon@Sun.COM clist_free(crdp->cl_reply); 1022*7387SRobert.Gordon@Sun.COM crdp->cl_reply = NULL; 1023*7387SRobert.Gordon@Sun.COM } 1024*7387SRobert.Gordon@Sun.COM RDMA_REL_CONN(crdp->conn); 1025*7387SRobert.Gordon@Sun.COM 10260Sstevel@tonic-gate return (retval); 10270Sstevel@tonic-gate } 10280Sstevel@tonic-gate 10290Sstevel@tonic-gate /* ARGSUSED */ 10300Sstevel@tonic-gate static int32_t * 10310Sstevel@tonic-gate svc_rdma_kgetres(SVCXPRT *clone_xprt, int size) 10320Sstevel@tonic-gate { 10330Sstevel@tonic-gate return (NULL); 10340Sstevel@tonic-gate } 10350Sstevel@tonic-gate 10360Sstevel@tonic-gate /* ARGSUSED */ 10370Sstevel@tonic-gate static void 10380Sstevel@tonic-gate svc_rdma_kfreeres(SVCXPRT *clone_xprt) 10390Sstevel@tonic-gate { 10400Sstevel@tonic-gate } 10410Sstevel@tonic-gate 10420Sstevel@tonic-gate /* 10430Sstevel@tonic-gate * the dup cacheing routines below provide a cache of non-failure 10440Sstevel@tonic-gate * transaction id's. rpc service routines can use this to detect 10450Sstevel@tonic-gate * retransmissions and re-send a non-failure response. 10460Sstevel@tonic-gate */ 10470Sstevel@tonic-gate 10480Sstevel@tonic-gate /* 10490Sstevel@tonic-gate * MAXDUPREQS is the number of cached items. It should be adjusted 10500Sstevel@tonic-gate * to the service load so that there is likely to be a response entry 10510Sstevel@tonic-gate * when the first retransmission comes in. 10520Sstevel@tonic-gate */ 10530Sstevel@tonic-gate #define MAXDUPREQS 1024 10540Sstevel@tonic-gate 10550Sstevel@tonic-gate /* 10560Sstevel@tonic-gate * This should be appropriately scaled to MAXDUPREQS. 10570Sstevel@tonic-gate */ 10580Sstevel@tonic-gate #define DRHASHSZ 257 10590Sstevel@tonic-gate 10600Sstevel@tonic-gate #if ((DRHASHSZ & (DRHASHSZ - 1)) == 0) 10610Sstevel@tonic-gate #define XIDHASH(xid) ((xid) & (DRHASHSZ - 1)) 10620Sstevel@tonic-gate #else 10630Sstevel@tonic-gate #define XIDHASH(xid) ((xid) % DRHASHSZ) 10640Sstevel@tonic-gate #endif 10650Sstevel@tonic-gate #define DRHASH(dr) XIDHASH((dr)->dr_xid) 10660Sstevel@tonic-gate #define REQTOXID(req) ((req)->rq_xprt->xp_xid) 10670Sstevel@tonic-gate 10680Sstevel@tonic-gate static int rdmandupreqs = 0; 10690Sstevel@tonic-gate static int rdmamaxdupreqs = MAXDUPREQS; 10700Sstevel@tonic-gate static kmutex_t rdmadupreq_lock; 10710Sstevel@tonic-gate static struct dupreq *rdmadrhashtbl[DRHASHSZ]; 10720Sstevel@tonic-gate static int rdmadrhashstat[DRHASHSZ]; 10730Sstevel@tonic-gate 10740Sstevel@tonic-gate static void unhash(struct dupreq *); 10750Sstevel@tonic-gate 10760Sstevel@tonic-gate /* 10770Sstevel@tonic-gate * rdmadrmru points to the head of a circular linked list in lru order. 10780Sstevel@tonic-gate * rdmadrmru->dr_next == drlru 10790Sstevel@tonic-gate */ 10800Sstevel@tonic-gate struct dupreq *rdmadrmru; 10810Sstevel@tonic-gate 10820Sstevel@tonic-gate /* 10830Sstevel@tonic-gate * svc_rdma_kdup searches the request cache and returns 0 if the 10840Sstevel@tonic-gate * request is not found in the cache. If it is found, then it 10850Sstevel@tonic-gate * returns the state of the request (in progress or done) and 10860Sstevel@tonic-gate * the status or attributes that were part of the original reply. 10870Sstevel@tonic-gate */ 10880Sstevel@tonic-gate static int 10890Sstevel@tonic-gate svc_rdma_kdup(struct svc_req *req, caddr_t res, int size, struct dupreq **drpp, 10900Sstevel@tonic-gate bool_t *dupcachedp) 10910Sstevel@tonic-gate { 10920Sstevel@tonic-gate struct dupreq *dr; 10930Sstevel@tonic-gate uint32_t xid; 10940Sstevel@tonic-gate uint32_t drhash; 10950Sstevel@tonic-gate int status; 10960Sstevel@tonic-gate 10970Sstevel@tonic-gate xid = REQTOXID(req); 10980Sstevel@tonic-gate mutex_enter(&rdmadupreq_lock); 10990Sstevel@tonic-gate RSSTAT_INCR(rsdupchecks); 11000Sstevel@tonic-gate /* 11010Sstevel@tonic-gate * Check to see whether an entry already exists in the cache. 11020Sstevel@tonic-gate */ 11030Sstevel@tonic-gate dr = rdmadrhashtbl[XIDHASH(xid)]; 11040Sstevel@tonic-gate while (dr != NULL) { 11050Sstevel@tonic-gate if (dr->dr_xid == xid && 11060Sstevel@tonic-gate dr->dr_proc == req->rq_proc && 11070Sstevel@tonic-gate dr->dr_prog == req->rq_prog && 11080Sstevel@tonic-gate dr->dr_vers == req->rq_vers && 11090Sstevel@tonic-gate dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len && 11100Sstevel@tonic-gate bcmp((caddr_t)dr->dr_addr.buf, 11110Sstevel@tonic-gate (caddr_t)req->rq_xprt->xp_rtaddr.buf, 11120Sstevel@tonic-gate dr->dr_addr.len) == 0) { 11130Sstevel@tonic-gate status = dr->dr_status; 11140Sstevel@tonic-gate if (status == DUP_DONE) { 11150Sstevel@tonic-gate bcopy(dr->dr_resp.buf, res, size); 11160Sstevel@tonic-gate if (dupcachedp != NULL) 11170Sstevel@tonic-gate *dupcachedp = (dr->dr_resfree != NULL); 11180Sstevel@tonic-gate } else { 11190Sstevel@tonic-gate dr->dr_status = DUP_INPROGRESS; 11200Sstevel@tonic-gate *drpp = dr; 11210Sstevel@tonic-gate } 11220Sstevel@tonic-gate RSSTAT_INCR(rsdupreqs); 11230Sstevel@tonic-gate mutex_exit(&rdmadupreq_lock); 11240Sstevel@tonic-gate return (status); 11250Sstevel@tonic-gate } 11260Sstevel@tonic-gate dr = dr->dr_chain; 11270Sstevel@tonic-gate } 11280Sstevel@tonic-gate 11290Sstevel@tonic-gate /* 11300Sstevel@tonic-gate * There wasn't an entry, either allocate a new one or recycle 11310Sstevel@tonic-gate * an old one. 11320Sstevel@tonic-gate */ 11330Sstevel@tonic-gate if (rdmandupreqs < rdmamaxdupreqs) { 11340Sstevel@tonic-gate dr = kmem_alloc(sizeof (*dr), KM_NOSLEEP); 11350Sstevel@tonic-gate if (dr == NULL) { 11360Sstevel@tonic-gate mutex_exit(&rdmadupreq_lock); 11370Sstevel@tonic-gate return (DUP_ERROR); 11380Sstevel@tonic-gate } 11390Sstevel@tonic-gate dr->dr_resp.buf = NULL; 11400Sstevel@tonic-gate dr->dr_resp.maxlen = 0; 11410Sstevel@tonic-gate dr->dr_addr.buf = NULL; 11420Sstevel@tonic-gate dr->dr_addr.maxlen = 0; 11430Sstevel@tonic-gate if (rdmadrmru) { 11440Sstevel@tonic-gate dr->dr_next = rdmadrmru->dr_next; 11450Sstevel@tonic-gate rdmadrmru->dr_next = dr; 11460Sstevel@tonic-gate } else { 11470Sstevel@tonic-gate dr->dr_next = dr; 11480Sstevel@tonic-gate } 11490Sstevel@tonic-gate rdmandupreqs++; 11500Sstevel@tonic-gate } else { 11510Sstevel@tonic-gate dr = rdmadrmru->dr_next; 11520Sstevel@tonic-gate while (dr->dr_status == DUP_INPROGRESS) { 11530Sstevel@tonic-gate dr = dr->dr_next; 11540Sstevel@tonic-gate if (dr == rdmadrmru->dr_next) { 11550Sstevel@tonic-gate mutex_exit(&rdmadupreq_lock); 11560Sstevel@tonic-gate return (DUP_ERROR); 11570Sstevel@tonic-gate } 11580Sstevel@tonic-gate } 11590Sstevel@tonic-gate unhash(dr); 11600Sstevel@tonic-gate if (dr->dr_resfree) { 11610Sstevel@tonic-gate (*dr->dr_resfree)(dr->dr_resp.buf); 11620Sstevel@tonic-gate } 11630Sstevel@tonic-gate } 11640Sstevel@tonic-gate dr->dr_resfree = NULL; 11650Sstevel@tonic-gate rdmadrmru = dr; 11660Sstevel@tonic-gate 11670Sstevel@tonic-gate dr->dr_xid = REQTOXID(req); 11680Sstevel@tonic-gate dr->dr_prog = req->rq_prog; 11690Sstevel@tonic-gate dr->dr_vers = req->rq_vers; 11700Sstevel@tonic-gate dr->dr_proc = req->rq_proc; 11710Sstevel@tonic-gate if (dr->dr_addr.maxlen < req->rq_xprt->xp_rtaddr.len) { 11720Sstevel@tonic-gate if (dr->dr_addr.buf != NULL) 11730Sstevel@tonic-gate kmem_free(dr->dr_addr.buf, dr->dr_addr.maxlen); 11740Sstevel@tonic-gate dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len; 11750Sstevel@tonic-gate dr->dr_addr.buf = kmem_alloc(dr->dr_addr.maxlen, KM_NOSLEEP); 11760Sstevel@tonic-gate if (dr->dr_addr.buf == NULL) { 11770Sstevel@tonic-gate dr->dr_addr.maxlen = 0; 11780Sstevel@tonic-gate dr->dr_status = DUP_DROP; 11790Sstevel@tonic-gate mutex_exit(&rdmadupreq_lock); 11800Sstevel@tonic-gate return (DUP_ERROR); 11810Sstevel@tonic-gate } 11820Sstevel@tonic-gate } 11830Sstevel@tonic-gate dr->dr_addr.len = req->rq_xprt->xp_rtaddr.len; 11840Sstevel@tonic-gate bcopy(req->rq_xprt->xp_rtaddr.buf, dr->dr_addr.buf, dr->dr_addr.len); 11850Sstevel@tonic-gate if (dr->dr_resp.maxlen < size) { 11860Sstevel@tonic-gate if (dr->dr_resp.buf != NULL) 11870Sstevel@tonic-gate kmem_free(dr->dr_resp.buf, dr->dr_resp.maxlen); 11880Sstevel@tonic-gate dr->dr_resp.maxlen = (unsigned int)size; 11890Sstevel@tonic-gate dr->dr_resp.buf = kmem_alloc(size, KM_NOSLEEP); 11900Sstevel@tonic-gate if (dr->dr_resp.buf == NULL) { 11910Sstevel@tonic-gate dr->dr_resp.maxlen = 0; 11920Sstevel@tonic-gate dr->dr_status = DUP_DROP; 11930Sstevel@tonic-gate mutex_exit(&rdmadupreq_lock); 11940Sstevel@tonic-gate return (DUP_ERROR); 11950Sstevel@tonic-gate } 11960Sstevel@tonic-gate } 11970Sstevel@tonic-gate dr->dr_status = DUP_INPROGRESS; 11980Sstevel@tonic-gate 11990Sstevel@tonic-gate drhash = (uint32_t)DRHASH(dr); 12000Sstevel@tonic-gate dr->dr_chain = rdmadrhashtbl[drhash]; 12010Sstevel@tonic-gate rdmadrhashtbl[drhash] = dr; 12020Sstevel@tonic-gate rdmadrhashstat[drhash]++; 12030Sstevel@tonic-gate mutex_exit(&rdmadupreq_lock); 12040Sstevel@tonic-gate *drpp = dr; 12050Sstevel@tonic-gate return (DUP_NEW); 12060Sstevel@tonic-gate } 12070Sstevel@tonic-gate 12080Sstevel@tonic-gate /* 12090Sstevel@tonic-gate * svc_rdma_kdupdone marks the request done (DUP_DONE or DUP_DROP) 12100Sstevel@tonic-gate * and stores the response. 12110Sstevel@tonic-gate */ 12120Sstevel@tonic-gate static void 12130Sstevel@tonic-gate svc_rdma_kdupdone(struct dupreq *dr, caddr_t res, void (*dis_resfree)(), 12140Sstevel@tonic-gate int size, int status) 12150Sstevel@tonic-gate { 12160Sstevel@tonic-gate ASSERT(dr->dr_resfree == NULL); 12170Sstevel@tonic-gate if (status == DUP_DONE) { 12180Sstevel@tonic-gate bcopy(res, dr->dr_resp.buf, size); 12190Sstevel@tonic-gate dr->dr_resfree = dis_resfree; 12200Sstevel@tonic-gate } 12210Sstevel@tonic-gate dr->dr_status = status; 12220Sstevel@tonic-gate } 12230Sstevel@tonic-gate 12240Sstevel@tonic-gate /* 12250Sstevel@tonic-gate * This routine expects that the mutex, rdmadupreq_lock, is already held. 12260Sstevel@tonic-gate */ 12270Sstevel@tonic-gate static void 12280Sstevel@tonic-gate unhash(struct dupreq *dr) 12290Sstevel@tonic-gate { 12300Sstevel@tonic-gate struct dupreq *drt; 12310Sstevel@tonic-gate struct dupreq *drtprev = NULL; 12320Sstevel@tonic-gate uint32_t drhash; 12330Sstevel@tonic-gate 12340Sstevel@tonic-gate ASSERT(MUTEX_HELD(&rdmadupreq_lock)); 12350Sstevel@tonic-gate 12360Sstevel@tonic-gate drhash = (uint32_t)DRHASH(dr); 12370Sstevel@tonic-gate drt = rdmadrhashtbl[drhash]; 12380Sstevel@tonic-gate while (drt != NULL) { 12390Sstevel@tonic-gate if (drt == dr) { 12400Sstevel@tonic-gate rdmadrhashstat[drhash]--; 12410Sstevel@tonic-gate if (drtprev == NULL) { 12420Sstevel@tonic-gate rdmadrhashtbl[drhash] = drt->dr_chain; 12430Sstevel@tonic-gate } else { 12440Sstevel@tonic-gate drtprev->dr_chain = drt->dr_chain; 12450Sstevel@tonic-gate } 12460Sstevel@tonic-gate return; 12470Sstevel@tonic-gate } 12480Sstevel@tonic-gate drtprev = drt; 12490Sstevel@tonic-gate drt = drt->dr_chain; 12500Sstevel@tonic-gate } 12510Sstevel@tonic-gate } 1252*7387SRobert.Gordon@Sun.COM 1253*7387SRobert.Gordon@Sun.COM bool_t 1254*7387SRobert.Gordon@Sun.COM rdma_get_wchunk(struct svc_req *req, iovec_t *iov, struct clist *wlist) 1255*7387SRobert.Gordon@Sun.COM { 1256*7387SRobert.Gordon@Sun.COM struct clist *clist; 1257*7387SRobert.Gordon@Sun.COM uint32_t tlen; 1258*7387SRobert.Gordon@Sun.COM 1259*7387SRobert.Gordon@Sun.COM if (req->rq_xprt->xp_type != T_RDMA) { 1260*7387SRobert.Gordon@Sun.COM return (FALSE); 1261*7387SRobert.Gordon@Sun.COM } 1262*7387SRobert.Gordon@Sun.COM 1263*7387SRobert.Gordon@Sun.COM tlen = 0; 1264*7387SRobert.Gordon@Sun.COM clist = wlist; 1265*7387SRobert.Gordon@Sun.COM while (clist) { 1266*7387SRobert.Gordon@Sun.COM tlen += clist->c_len; 1267*7387SRobert.Gordon@Sun.COM clist = clist->c_next; 1268*7387SRobert.Gordon@Sun.COM } 1269*7387SRobert.Gordon@Sun.COM 1270*7387SRobert.Gordon@Sun.COM /* 1271*7387SRobert.Gordon@Sun.COM * set iov to addr+len of first segment of first wchunk of 1272*7387SRobert.Gordon@Sun.COM * wlist sent by client. krecv() already malloc'd a buffer 1273*7387SRobert.Gordon@Sun.COM * large enough, but registration is deferred until we write 1274*7387SRobert.Gordon@Sun.COM * the buffer back to (NFS) client using RDMA_WRITE. 1275*7387SRobert.Gordon@Sun.COM */ 1276*7387SRobert.Gordon@Sun.COM iov->iov_base = (caddr_t)(uintptr_t)wlist->w.c_saddr; 1277*7387SRobert.Gordon@Sun.COM iov->iov_len = tlen; 1278*7387SRobert.Gordon@Sun.COM 1279*7387SRobert.Gordon@Sun.COM return (TRUE); 1280*7387SRobert.Gordon@Sun.COM } 1281