xref: /onnv-gate/usr/src/uts/common/io/ib/mgt/ibmf/ibmf_wqe.c (revision 7354:935b93de4e78)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
56819Stomee  * Common Development and Distribution License (the "License").
66819Stomee  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
226819Stomee  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate /*
270Sstevel@tonic-gate  * This file implements the Work Queue Entry (WQE) management in IBMF.
280Sstevel@tonic-gate  */
290Sstevel@tonic-gate 
300Sstevel@tonic-gate #include <sys/ib/mgt/ibmf/ibmf_impl.h>
310Sstevel@tonic-gate 
320Sstevel@tonic-gate extern int ibmf_trace_level;
330Sstevel@tonic-gate extern int ibmf_send_wqes_per_port, ibmf_recv_wqes_per_port;
340Sstevel@tonic-gate 
350Sstevel@tonic-gate #define	IBMF_INIT_SG_ELEMENT(sg, mem, lkey, size)	{ \
360Sstevel@tonic-gate 	(sg).ds_va = (ib_vaddr_t)(uintptr_t)(mem);	\
370Sstevel@tonic-gate 	(sg).ds_key = (lkey);				\
380Sstevel@tonic-gate 	(sg).ds_len = (size);				\
390Sstevel@tonic-gate }
400Sstevel@tonic-gate 
410Sstevel@tonic-gate #define	IBMF_ADDR_TO_SEND_WR_ID(ptr, id)		\
420Sstevel@tonic-gate 	(id) = (ibt_wrid_t)(uintptr_t)(ptr)
430Sstevel@tonic-gate 
440Sstevel@tonic-gate #define	IBMF_ADDR_TO_RECV_WR_ID(ptr, id)		 \
450Sstevel@tonic-gate 	(id) = ((ibt_wrid_t)(uintptr_t)(ptr) | IBMF_RCV_CQE)
460Sstevel@tonic-gate 
470Sstevel@tonic-gate #define	IBMF_INIT_RMPP_HDR(hdrp, ver, type, respt, flg, status, seg, lennwl) { \
480Sstevel@tonic-gate 	(hdrp)->rmpp_version = (ver);			\
490Sstevel@tonic-gate 	(hdrp)->rmpp_type = (type);			\
500Sstevel@tonic-gate 	(hdrp)->rmpp_resp_time = (respt);		\
510Sstevel@tonic-gate 	(hdrp)->rmpp_flags = (flg);			\
520Sstevel@tonic-gate 	(hdrp)->rmpp_status = (status);			\
530Sstevel@tonic-gate 	(hdrp)->rmpp_segnum = (h2b32(seg));		\
540Sstevel@tonic-gate 	(hdrp)->rmpp_pyldlen_nwl = (h2b32(lennwl));	\
550Sstevel@tonic-gate }
560Sstevel@tonic-gate 
570Sstevel@tonic-gate static int ibmf_send_wqe_cache_constructor(void *buf, void *cdrarg,
580Sstevel@tonic-gate     int kmflags);
590Sstevel@tonic-gate static void ibmf_send_wqe_cache_destructor(void *buf, void *cdrarg);
600Sstevel@tonic-gate static int ibmf_recv_wqe_cache_constructor(void *buf, void *cdrarg,
610Sstevel@tonic-gate     int kmflags);
620Sstevel@tonic-gate static void ibmf_recv_wqe_cache_destructor(void *buf, void *cdrarg);
630Sstevel@tonic-gate static int ibmf_i_extend_wqe_mem(ibmf_ci_t *cip,
640Sstevel@tonic-gate     ibmf_qp_handle_t ibmf_qp_handle, ibmf_wqe_mgt_t *wqe_mgt,
650Sstevel@tonic-gate     boolean_t block);
660Sstevel@tonic-gate 
670Sstevel@tonic-gate /*
680Sstevel@tonic-gate  * ibmf_send_wqe_cache_constructor():
690Sstevel@tonic-gate  *	Constructor for the kmem cache used for send WQEs for special QPs
700Sstevel@tonic-gate  */
710Sstevel@tonic-gate /* ARGSUSED */
720Sstevel@tonic-gate static int
ibmf_send_wqe_cache_constructor(void * buf,void * cdrarg,int kmflags)730Sstevel@tonic-gate ibmf_send_wqe_cache_constructor(void *buf, void *cdrarg, int kmflags)
740Sstevel@tonic-gate {
750Sstevel@tonic-gate 	ibmf_send_wqe_t		*send_wqe = (ibmf_send_wqe_t *)buf;
760Sstevel@tonic-gate 	ibmf_ci_t		*cip = (ibmf_ci_t *)cdrarg;
770Sstevel@tonic-gate 	ibmf_wqe_mgt_t		*wqe_mgt;
780Sstevel@tonic-gate 
790Sstevel@tonic-gate 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
800Sstevel@tonic-gate 	    ibmf_send_wqe_cache_constructor_start, IBMF_TNF_TRACE, "",
810Sstevel@tonic-gate 	    "ibmf_send_wqe_cache_constructor() enter, buf = %p, cdarg = %p\n",
820Sstevel@tonic-gate 	    tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
830Sstevel@tonic-gate 
840Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*send_wqe))
850Sstevel@tonic-gate 
860Sstevel@tonic-gate 	/* initialize send WQE context */
870Sstevel@tonic-gate 	send_wqe->send_sg_mem =
880Sstevel@tonic-gate 	    (ib_vaddr_t)(uintptr_t)vmem_alloc(cip->ci_wqe_ib_vmem,
890Sstevel@tonic-gate 	    IBMF_MEM_PER_WQE, kmflags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
900Sstevel@tonic-gate 	if (send_wqe->send_sg_mem == NULL) {
910Sstevel@tonic-gate 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
920Sstevel@tonic-gate 		    ibmf_send_wqe_cache_constructor_err, IBMF_TNF_ERROR, "",
930Sstevel@tonic-gate 		    "ibmf_send_wqe_cache_constructor(): %s\n", tnf_string, msg,
940Sstevel@tonic-gate 		    "Failed vmem allocation in send WQE cache constructor");
950Sstevel@tonic-gate 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
960Sstevel@tonic-gate 		    ibmf_send_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
970Sstevel@tonic-gate 		    "ibmf_send_wqe_cache_constructor() exit\n");
980Sstevel@tonic-gate 		return (-1);
990Sstevel@tonic-gate 	}
1000Sstevel@tonic-gate 
1010Sstevel@tonic-gate 	mutex_enter(&cip->ci_wqe_mutex);
1020Sstevel@tonic-gate 	wqe_mgt = cip->ci_wqe_mgt_list;
1030Sstevel@tonic-gate 
1040Sstevel@tonic-gate 	/* Look for the WQE management struct that includes this address */
1050Sstevel@tonic-gate 	while (wqe_mgt != NULL) {
1060Sstevel@tonic-gate 		mutex_enter(&wqe_mgt->wqes_mutex);
1070Sstevel@tonic-gate 		if ((send_wqe->send_sg_mem >= wqe_mgt->wqes_ib_mem) &&
1080Sstevel@tonic-gate 		    (send_wqe->send_sg_mem < (wqe_mgt->wqes_ib_mem +
1090Sstevel@tonic-gate 		    wqe_mgt->wqes_kmem_sz))) {
1100Sstevel@tonic-gate 			mutex_exit(&wqe_mgt->wqes_mutex);
1110Sstevel@tonic-gate 			break;
1120Sstevel@tonic-gate 		}
1130Sstevel@tonic-gate 		mutex_exit(&wqe_mgt->wqes_mutex);
1140Sstevel@tonic-gate 		wqe_mgt = wqe_mgt->wqe_mgt_next;
1150Sstevel@tonic-gate 	}
1160Sstevel@tonic-gate 
1170Sstevel@tonic-gate 	if (wqe_mgt == NULL) {
1180Sstevel@tonic-gate 		mutex_exit(&cip->ci_wqe_mutex);
119*7354SGiri.Adari@Sun.COM 		vmem_free(cip->ci_wqe_ib_vmem,
120*7354SGiri.Adari@Sun.COM 		    (void *)(uintptr_t)send_wqe->send_sg_mem, IBMF_MEM_PER_WQE);
1210Sstevel@tonic-gate 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
1220Sstevel@tonic-gate 		    ibmf_send_wqe_cache_constructor_err, IBMF_TNF_ERROR, "",
1230Sstevel@tonic-gate 		    "ibmf_send_wqe_cache_constructor(): %s\n", tnf_string, msg,
1240Sstevel@tonic-gate 		    "Address not found in WQE mgt list");
1250Sstevel@tonic-gate 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
1260Sstevel@tonic-gate 		    ibmf_send_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
1270Sstevel@tonic-gate 		    "ibmf_send_wqe_cache_constructor() exit\n");
1280Sstevel@tonic-gate 		return (-1);
1290Sstevel@tonic-gate 	}
1300Sstevel@tonic-gate 
1310Sstevel@tonic-gate 	mutex_enter(&wqe_mgt->wqes_mutex);
1320Sstevel@tonic-gate 
1330Sstevel@tonic-gate 	send_wqe->send_mem = (caddr_t)((uintptr_t)wqe_mgt->wqes_kmem +
1340Sstevel@tonic-gate 	    (uintptr_t)(send_wqe->send_sg_mem - wqe_mgt->wqes_ib_mem));
1350Sstevel@tonic-gate 	bzero(send_wqe->send_mem, IBMF_MEM_PER_WQE);
1360Sstevel@tonic-gate 	send_wqe->send_sg_lkey = wqe_mgt->wqes_ib_lkey;
1370Sstevel@tonic-gate 	send_wqe->send_mem_hdl = wqe_mgt->wqes_ib_mem_hdl;
1380Sstevel@tonic-gate 	send_wqe->send_wqe_flags = 0;
1390Sstevel@tonic-gate 	send_wqe->send_wqe_next = NULL;
1400Sstevel@tonic-gate 
1410Sstevel@tonic-gate 	mutex_exit(&wqe_mgt->wqes_mutex);
1420Sstevel@tonic-gate 	mutex_exit(&cip->ci_wqe_mutex);
1430Sstevel@tonic-gate 
1440Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
1450Sstevel@tonic-gate 	    ibmf_send_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
1460Sstevel@tonic-gate 	    "ibmf_send_wqe_cache_constructor() exit\n");
1470Sstevel@tonic-gate 
1480Sstevel@tonic-gate 	return (0);
1490Sstevel@tonic-gate }
1500Sstevel@tonic-gate 
1510Sstevel@tonic-gate /*
1520Sstevel@tonic-gate  * ibmf_send_wqe_cache_destructor():
1530Sstevel@tonic-gate  *	Destructor for send WQE kmem cache for special QPs
1540Sstevel@tonic-gate  */
1550Sstevel@tonic-gate /* ARGSUSED */
1560Sstevel@tonic-gate static void
ibmf_send_wqe_cache_destructor(void * buf,void * cdrarg)1570Sstevel@tonic-gate ibmf_send_wqe_cache_destructor(void *buf, void *cdrarg)
1580Sstevel@tonic-gate {
1590Sstevel@tonic-gate 	ibmf_send_wqe_t		*send_wqe = (ibmf_send_wqe_t *)buf;
1600Sstevel@tonic-gate 	ibmf_ci_t		*cip = (ibmf_ci_t *)cdrarg;
1610Sstevel@tonic-gate 
1620Sstevel@tonic-gate 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
1630Sstevel@tonic-gate 	    ibmf_i_send_wqe_cache_destructor_start, IBMF_TNF_TRACE, "",
1640Sstevel@tonic-gate 	    "ibmf_send_wqe_cache_destructor() enter, buf = %p, cdarg = %p\n",
1650Sstevel@tonic-gate 	    tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
1660Sstevel@tonic-gate 
1670Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*send_wqe))
1680Sstevel@tonic-gate 
1690Sstevel@tonic-gate 	/* Free the vmem allocated for the WQE */
1700Sstevel@tonic-gate 	vmem_free(cip->ci_wqe_ib_vmem,
1710Sstevel@tonic-gate 	    (void *)(uintptr_t)send_wqe->send_sg_mem, IBMF_MEM_PER_WQE);
1720Sstevel@tonic-gate 	send_wqe->send_mem = NULL;
1730Sstevel@tonic-gate 
1740Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
1750Sstevel@tonic-gate 	    ibmf_i_send_wqe_cache_destructor_end, IBMF_TNF_TRACE, "",
1760Sstevel@tonic-gate 	    "ibmf_send_wqe_cache_destructor() exit\n");
1770Sstevel@tonic-gate }
1780Sstevel@tonic-gate 
1790Sstevel@tonic-gate /*
1800Sstevel@tonic-gate  * ibmf_recv_wqe_cache_constructor():
1810Sstevel@tonic-gate  *	Constructor for receive WQE kmem cache for special QPs
1820Sstevel@tonic-gate  */
1830Sstevel@tonic-gate /* ARGSUSED */
1840Sstevel@tonic-gate static int
ibmf_recv_wqe_cache_constructor(void * buf,void * cdrarg,int kmflags)1850Sstevel@tonic-gate ibmf_recv_wqe_cache_constructor(void *buf, void *cdrarg, int kmflags)
1860Sstevel@tonic-gate {
1870Sstevel@tonic-gate 	ibmf_recv_wqe_t		*recv_wqe = (ibmf_recv_wqe_t *)buf;
1880Sstevel@tonic-gate 	ibmf_ci_t		*cip = (ibmf_ci_t *)cdrarg;
1890Sstevel@tonic-gate 	ibmf_wqe_mgt_t		*wqe_mgt;
1900Sstevel@tonic-gate 
1910Sstevel@tonic-gate 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
1920Sstevel@tonic-gate 	    ibmf_i_recv_wqe_cache_constructor_start, IBMF_TNF_TRACE, "",
1930Sstevel@tonic-gate 	    "ibmf_recv_wqe_cache_constructor() enter, buf = %p, cdarg = %p\n",
1940Sstevel@tonic-gate 	    tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
1950Sstevel@tonic-gate 
1960Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*recv_wqe))
1970Sstevel@tonic-gate 
1980Sstevel@tonic-gate 	/* initialize recv WQE context */
1990Sstevel@tonic-gate 	recv_wqe->recv_sg_mem =
2000Sstevel@tonic-gate 	    (ib_vaddr_t)(uintptr_t)vmem_alloc(cip->ci_wqe_ib_vmem,
2010Sstevel@tonic-gate 	    IBMF_MEM_PER_WQE, kmflags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
2020Sstevel@tonic-gate 	if (recv_wqe->recv_sg_mem == NULL) {
2030Sstevel@tonic-gate 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
2040Sstevel@tonic-gate 		    ibmf_recv_wqe_cache_constructor_err, IBMF_TNF_ERROR, "",
2050Sstevel@tonic-gate 		    "ibmf_recv_wqe_cache_constructor(): %s\n", tnf_string, msg,
2060Sstevel@tonic-gate 		    "Failed vmem allocation in receive WQE cache constructor");
2070Sstevel@tonic-gate 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
2080Sstevel@tonic-gate 		    ibmf_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
2090Sstevel@tonic-gate 		    "ibmf_recv_wqe_cache_constructor() exit\n");
2100Sstevel@tonic-gate 		return (-1);
2110Sstevel@tonic-gate 	}
2120Sstevel@tonic-gate 
2130Sstevel@tonic-gate 	mutex_enter(&cip->ci_wqe_mutex);
2140Sstevel@tonic-gate 	wqe_mgt = cip->ci_wqe_mgt_list;
2150Sstevel@tonic-gate 
2160Sstevel@tonic-gate 	/* Look for the WQE management struct that includes this address */
2170Sstevel@tonic-gate 	while (wqe_mgt != NULL) {
2180Sstevel@tonic-gate 		mutex_enter(&wqe_mgt->wqes_mutex);
2190Sstevel@tonic-gate 		if ((recv_wqe->recv_sg_mem >= wqe_mgt->wqes_ib_mem) &&
2200Sstevel@tonic-gate 		    (recv_wqe->recv_sg_mem < (wqe_mgt->wqes_ib_mem +
2210Sstevel@tonic-gate 		    wqe_mgt->wqes_kmem_sz))) {
2220Sstevel@tonic-gate 			mutex_exit(&wqe_mgt->wqes_mutex);
2230Sstevel@tonic-gate 			break;
2240Sstevel@tonic-gate 		}
2250Sstevel@tonic-gate 		mutex_exit(&wqe_mgt->wqes_mutex);
2260Sstevel@tonic-gate 		wqe_mgt = wqe_mgt->wqe_mgt_next;
2270Sstevel@tonic-gate 	}
2280Sstevel@tonic-gate 
2290Sstevel@tonic-gate 	if (wqe_mgt == NULL) {
2300Sstevel@tonic-gate 		mutex_exit(&cip->ci_wqe_mutex);
231*7354SGiri.Adari@Sun.COM 		vmem_free(cip->ci_wqe_ib_vmem,
232*7354SGiri.Adari@Sun.COM 		    (void *)(uintptr_t)recv_wqe->recv_sg_mem, IBMF_MEM_PER_WQE);
2330Sstevel@tonic-gate 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
2340Sstevel@tonic-gate 		    ibmf_recv_wqe_cache_constructor_err, IBMF_TNF_ERROR, "",
2350Sstevel@tonic-gate 		    "ibmf_recv_wqe_cache_constructor(): %s\n", tnf_string, msg,
2360Sstevel@tonic-gate 		    "Address not found in WQE mgt list");
2370Sstevel@tonic-gate 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
2380Sstevel@tonic-gate 		    ibmf_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
2390Sstevel@tonic-gate 		    "ibmf_recv_wqe_cache_constructor() exit\n");
2400Sstevel@tonic-gate 		return (-1);
2410Sstevel@tonic-gate 	}
2420Sstevel@tonic-gate 
2430Sstevel@tonic-gate 	mutex_enter(&wqe_mgt->wqes_mutex);
2440Sstevel@tonic-gate 
2450Sstevel@tonic-gate 	recv_wqe->recv_mem = (caddr_t)((uintptr_t)wqe_mgt->wqes_kmem +
2460Sstevel@tonic-gate 	    (uintptr_t)(recv_wqe->recv_sg_mem - wqe_mgt->wqes_ib_mem));
2470Sstevel@tonic-gate 	bzero(recv_wqe->recv_mem, IBMF_MEM_PER_WQE);
2480Sstevel@tonic-gate 	recv_wqe->recv_sg_lkey = wqe_mgt->wqes_ib_lkey;
2490Sstevel@tonic-gate 	recv_wqe->recv_mem_hdl = wqe_mgt->wqes_ib_mem_hdl;
2500Sstevel@tonic-gate 	recv_wqe->recv_wqe_next = NULL;
2510Sstevel@tonic-gate 	recv_wqe->recv_msg = NULL;
2520Sstevel@tonic-gate 	recv_wqe->recv_wqe_flags = 0;
2530Sstevel@tonic-gate 
2540Sstevel@tonic-gate 	mutex_exit(&wqe_mgt->wqes_mutex);
2550Sstevel@tonic-gate 	mutex_exit(&cip->ci_wqe_mutex);
2560Sstevel@tonic-gate 
2570Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
2580Sstevel@tonic-gate 	    ibmf_i_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
2590Sstevel@tonic-gate 	    "ibmf_recv_wqe_cache_constructor() exit\n");
2600Sstevel@tonic-gate 
2610Sstevel@tonic-gate 	return (0);
2620Sstevel@tonic-gate }
2630Sstevel@tonic-gate 
2640Sstevel@tonic-gate /*
2650Sstevel@tonic-gate  * ibmf_recv_wqe_cache_destructor():
2660Sstevel@tonic-gate  *	Destructor for receive WQE kmem cache for special QPs
2670Sstevel@tonic-gate  */
2680Sstevel@tonic-gate /* ARGSUSED */
2690Sstevel@tonic-gate static void
ibmf_recv_wqe_cache_destructor(void * buf,void * cdrarg)2700Sstevel@tonic-gate ibmf_recv_wqe_cache_destructor(void *buf, void *cdrarg)
2710Sstevel@tonic-gate {
2720Sstevel@tonic-gate 	ibmf_recv_wqe_t		*recv_wqe = (ibmf_recv_wqe_t *)buf;
2730Sstevel@tonic-gate 	ibmf_ci_t		*cip = (ibmf_ci_t *)cdrarg;
2740Sstevel@tonic-gate 
2750Sstevel@tonic-gate 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
2760Sstevel@tonic-gate 	    ibmf_i_recv_wqe_cache_destructor_start, IBMF_TNF_TRACE, "",
2770Sstevel@tonic-gate 	    "ibmf_recv_wqe_cache_destructor() enter, buf = %p, cdarg = %p\n",
2780Sstevel@tonic-gate 	    tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
2790Sstevel@tonic-gate 
2800Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*recv_wqe))
2810Sstevel@tonic-gate 
2820Sstevel@tonic-gate 	/* Free the vmem allocated for the WQE */
2830Sstevel@tonic-gate 	vmem_free(cip->ci_wqe_ib_vmem,
2840Sstevel@tonic-gate 	    (void *)(uintptr_t)recv_wqe->recv_sg_mem, IBMF_MEM_PER_WQE);
2850Sstevel@tonic-gate 	recv_wqe->recv_mem = NULL;
2860Sstevel@tonic-gate 
2870Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
2880Sstevel@tonic-gate 	    ibmf_i_recv_wqe_cache_destructor_end, IBMF_TNF_TRACE, "",
2890Sstevel@tonic-gate 	    "ibmf_recv_wqe_cache_destructor() exit\n");
2900Sstevel@tonic-gate }
2910Sstevel@tonic-gate 
2920Sstevel@tonic-gate /*
2930Sstevel@tonic-gate  * ibmf_altqp_send_wqe_cache_constructor():
2940Sstevel@tonic-gate  *	Constructor for the kmem cache used for send WQEs for alternate QPs
2950Sstevel@tonic-gate  */
2960Sstevel@tonic-gate /* ARGSUSED */
2970Sstevel@tonic-gate int
ibmf_altqp_send_wqe_cache_constructor(void * buf,void * cdrarg,int kmflags)2980Sstevel@tonic-gate ibmf_altqp_send_wqe_cache_constructor(void *buf, void *cdrarg, int kmflags)
2990Sstevel@tonic-gate {
3000Sstevel@tonic-gate 	ibmf_send_wqe_t	*send_wqe = (ibmf_send_wqe_t *)buf;
3010Sstevel@tonic-gate 	ibmf_alt_qp_t	*qp_ctx = (ibmf_alt_qp_t *)cdrarg;
3020Sstevel@tonic-gate 	ibmf_wqe_mgt_t	*wqe_mgt;
3030Sstevel@tonic-gate 
3040Sstevel@tonic-gate 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
3050Sstevel@tonic-gate 	    ibmf_altqp_send_wqe_cache_constructor_start, IBMF_TNF_TRACE, "",
3060Sstevel@tonic-gate 	    "ibmf_altqp_send_wqe_cache_constructor() enter, buf = %p, "
3070Sstevel@tonic-gate 	    "cdarg = %p\n", tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
3080Sstevel@tonic-gate 
3090Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*send_wqe))
3100Sstevel@tonic-gate 
3110Sstevel@tonic-gate 	/* initialize send WQE context */
3120Sstevel@tonic-gate 	send_wqe->send_sg_mem = (ib_vaddr_t)(uintptr_t)vmem_alloc(
3130Sstevel@tonic-gate 	    qp_ctx->isq_wqe_ib_vmem, IBMF_MEM_PER_WQE,
3140Sstevel@tonic-gate 	    kmflags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
3150Sstevel@tonic-gate 	if (send_wqe->send_sg_mem == NULL) {
3160Sstevel@tonic-gate 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
3170Sstevel@tonic-gate 		    ibmf_altqp_send_wqe_cache_constructor_err, IBMF_TNF_ERROR,
3180Sstevel@tonic-gate 		    "", "ibmf_altqp_send_wqe_cache_constructor(): %s\n",
3190Sstevel@tonic-gate 		    tnf_string, msg, "Failed vmem allocation in "
3200Sstevel@tonic-gate 		    "alternate QP send WQE cache constructor");
3210Sstevel@tonic-gate 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
3220Sstevel@tonic-gate 		    ibmf_altqp_send_wqe_cache_constructor_end, IBMF_TNF_TRACE,
3230Sstevel@tonic-gate 		    "", "ibmf_altqp_send_wqe_cache_constructor() exit\n");
3240Sstevel@tonic-gate 		return (-1);
3250Sstevel@tonic-gate 	}
3260Sstevel@tonic-gate 
3270Sstevel@tonic-gate 	mutex_enter(&qp_ctx->isq_wqe_mutex);
3280Sstevel@tonic-gate 	wqe_mgt = qp_ctx->isq_wqe_mgt_list;
3290Sstevel@tonic-gate 
3300Sstevel@tonic-gate 	/* Look for the WQE management struct that includes this address */
3310Sstevel@tonic-gate 	while (wqe_mgt != NULL) {
3320Sstevel@tonic-gate 		mutex_enter(&wqe_mgt->wqes_mutex);
3330Sstevel@tonic-gate 		if ((send_wqe->send_sg_mem >= wqe_mgt->wqes_ib_mem) &&
3340Sstevel@tonic-gate 		    (send_wqe->send_sg_mem < (wqe_mgt->wqes_ib_mem +
3350Sstevel@tonic-gate 		    wqe_mgt->wqes_kmem_sz))) {
3360Sstevel@tonic-gate 			mutex_exit(&wqe_mgt->wqes_mutex);
3370Sstevel@tonic-gate 			break;
3380Sstevel@tonic-gate 		}
3390Sstevel@tonic-gate 		mutex_exit(&wqe_mgt->wqes_mutex);
3400Sstevel@tonic-gate 		wqe_mgt = wqe_mgt->wqe_mgt_next;
3410Sstevel@tonic-gate 	}
3420Sstevel@tonic-gate 
3430Sstevel@tonic-gate 	if (wqe_mgt == NULL) {
3440Sstevel@tonic-gate 		mutex_exit(&qp_ctx->isq_wqe_mutex);
345*7354SGiri.Adari@Sun.COM 		vmem_free(qp_ctx->isq_wqe_ib_vmem,
346*7354SGiri.Adari@Sun.COM 		    (void *)(uintptr_t)send_wqe->send_sg_mem, IBMF_MEM_PER_WQE);
3470Sstevel@tonic-gate 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
3480Sstevel@tonic-gate 		    ibmf_altqp_send_wqe_cache_constructor_err, IBMF_TNF_ERROR,
3490Sstevel@tonic-gate 		    "", "ibmf_altqp_send_wqe_cache_constructor(): %s\n",
3500Sstevel@tonic-gate 		    tnf_string, msg, "Address not found in WQE mgt list");
3510Sstevel@tonic-gate 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
3520Sstevel@tonic-gate 		    ibmf_altqp_send_wqe_cache_constructor_end,
3530Sstevel@tonic-gate 		    IBMF_TNF_TRACE, "",
3540Sstevel@tonic-gate 		    "ibmf_altqp_send_wqe_cache_constructor() exit\n");
3550Sstevel@tonic-gate 		return (-1);
3560Sstevel@tonic-gate 	}
3570Sstevel@tonic-gate 
3580Sstevel@tonic-gate 	mutex_enter(&wqe_mgt->wqes_mutex);
3590Sstevel@tonic-gate 
3600Sstevel@tonic-gate 	send_wqe->send_mem = (caddr_t)((uintptr_t)wqe_mgt->wqes_kmem +
3610Sstevel@tonic-gate 	    (uintptr_t)(send_wqe->send_sg_mem - wqe_mgt->wqes_ib_mem));
3620Sstevel@tonic-gate 	bzero(send_wqe->send_mem, IBMF_MEM_PER_WQE);
3630Sstevel@tonic-gate 	send_wqe->send_sg_lkey = wqe_mgt->wqes_ib_lkey;
3640Sstevel@tonic-gate 	send_wqe->send_mem_hdl = wqe_mgt->wqes_ib_mem_hdl;
3650Sstevel@tonic-gate 	send_wqe->send_wqe_flags = 0;
3660Sstevel@tonic-gate 
3670Sstevel@tonic-gate 	mutex_exit(&wqe_mgt->wqes_mutex);
3680Sstevel@tonic-gate 	mutex_exit(&qp_ctx->isq_wqe_mutex);
3690Sstevel@tonic-gate 
3700Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
3710Sstevel@tonic-gate 	    ibmf_i_altqp_send_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
3720Sstevel@tonic-gate 	    "ibmf_altqp_send_wqe_cache_constructor() exit\n");
3730Sstevel@tonic-gate 
3740Sstevel@tonic-gate 	return (0);
3750Sstevel@tonic-gate }
3760Sstevel@tonic-gate 
3770Sstevel@tonic-gate /*
3780Sstevel@tonic-gate  * ibmf_altqp_send_wqe_cache_destructor():
3790Sstevel@tonic-gate  *	Destructor for send WQE kmem cache for alternate QPs
3800Sstevel@tonic-gate  */
3810Sstevel@tonic-gate /* ARGSUSED */
3820Sstevel@tonic-gate void
ibmf_altqp_send_wqe_cache_destructor(void * buf,void * cdrarg)3830Sstevel@tonic-gate ibmf_altqp_send_wqe_cache_destructor(void *buf, void *cdrarg)
3840Sstevel@tonic-gate {
3850Sstevel@tonic-gate 	ibmf_send_wqe_t	*send_wqe = (ibmf_send_wqe_t *)buf;
3860Sstevel@tonic-gate 	ibmf_alt_qp_t	*qp_ctx = (ibmf_alt_qp_t *)cdrarg;
3870Sstevel@tonic-gate 
3880Sstevel@tonic-gate 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
3890Sstevel@tonic-gate 	    ibmf_i_altqp_send_wqe_cache_destructor_start, IBMF_TNF_TRACE, "",
3900Sstevel@tonic-gate 	    "ibmf_altqp_send_wqe_cache_destructor() enter, buf = %p, "
3910Sstevel@tonic-gate 	    "cdarg = %p\n", tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
3920Sstevel@tonic-gate 
3930Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*send_wqe))
3940Sstevel@tonic-gate 
3950Sstevel@tonic-gate 	/* Free the vmem allocated for the WQE */
3960Sstevel@tonic-gate 	vmem_free(qp_ctx->isq_wqe_ib_vmem,
3970Sstevel@tonic-gate 	    (void *)(uintptr_t)send_wqe->send_sg_mem, IBMF_MEM_PER_WQE);
3980Sstevel@tonic-gate 	send_wqe->send_mem = NULL;
3990Sstevel@tonic-gate 
4000Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
4010Sstevel@tonic-gate 	    ibmf_i_altqp_send_wqe_cache_destructor_end, IBMF_TNF_TRACE, "",
4020Sstevel@tonic-gate 	    "ibmf_altqp_send_wqe_cache_destructor() exit\n");
4030Sstevel@tonic-gate }
4040Sstevel@tonic-gate 
4050Sstevel@tonic-gate /*
4060Sstevel@tonic-gate  * ibmf_altqp_recv_wqe_cache_constructor():
4070Sstevel@tonic-gate  *	Constructor for receive WQE kmem cache for alternate QPs
4080Sstevel@tonic-gate  */
4090Sstevel@tonic-gate /* ARGSUSED */
4100Sstevel@tonic-gate int
ibmf_altqp_recv_wqe_cache_constructor(void * buf,void * cdrarg,int kmflags)4110Sstevel@tonic-gate ibmf_altqp_recv_wqe_cache_constructor(void *buf, void *cdrarg, int kmflags)
4120Sstevel@tonic-gate {
4130Sstevel@tonic-gate 	ibmf_recv_wqe_t	*recv_wqe = (ibmf_recv_wqe_t *)buf;
4140Sstevel@tonic-gate 	ibmf_alt_qp_t	*qp_ctx = (ibmf_alt_qp_t *)cdrarg;
4150Sstevel@tonic-gate 	ibmf_wqe_mgt_t	*wqe_mgt;
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
4180Sstevel@tonic-gate 	    ibmf_i_altqp_recv_wqe_cache_constructor_start, IBMF_TNF_TRACE, "",
4190Sstevel@tonic-gate 	    "ibmf_altqp_recv_wqe_cache_constructor() enter, buf = %p, "
4200Sstevel@tonic-gate 	    "cdarg = %p\n", tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
4210Sstevel@tonic-gate 
4220Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*recv_wqe))
4230Sstevel@tonic-gate 
4240Sstevel@tonic-gate 	/* initialize recv WQE context */
4250Sstevel@tonic-gate 	recv_wqe->recv_sg_mem = (ib_vaddr_t)(uintptr_t)vmem_alloc(
4260Sstevel@tonic-gate 	    qp_ctx->isq_wqe_ib_vmem, IBMF_MEM_PER_WQE,
4270Sstevel@tonic-gate 	    kmflags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
4280Sstevel@tonic-gate 	if (recv_wqe->recv_sg_mem == NULL) {
4290Sstevel@tonic-gate 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
4300Sstevel@tonic-gate 		    ibmf_altqp_recv_wqe_cache_constructor_err, IBMF_TNF_ERROR,
4310Sstevel@tonic-gate 		    "", "ibmf_altqp_recv_wqe_cache_constructor(): %s\n",
4320Sstevel@tonic-gate 		    tnf_string, msg,
4330Sstevel@tonic-gate 		    "Failed vmem allocation in recv WQE cache constructor");
4340Sstevel@tonic-gate 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
4350Sstevel@tonic-gate 		    ibmf_altqp_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE,
4360Sstevel@tonic-gate 		    "", "ibmf_altqp_recv_wqe_cache_constructor() exit\n");
4370Sstevel@tonic-gate 		return (-1);
4380Sstevel@tonic-gate 	}
4390Sstevel@tonic-gate 
4400Sstevel@tonic-gate 	mutex_enter(&qp_ctx->isq_wqe_mutex);
4410Sstevel@tonic-gate 	wqe_mgt = qp_ctx->isq_wqe_mgt_list;
4420Sstevel@tonic-gate 
4430Sstevel@tonic-gate 	/* Look for the WQE management struct that includes this address */
4440Sstevel@tonic-gate 	while (wqe_mgt != NULL) {
4450Sstevel@tonic-gate 		mutex_enter(&wqe_mgt->wqes_mutex);
4460Sstevel@tonic-gate 		if ((recv_wqe->recv_sg_mem >= wqe_mgt->wqes_ib_mem) &&
4470Sstevel@tonic-gate 		    (recv_wqe->recv_sg_mem < (wqe_mgt->wqes_ib_mem +
4480Sstevel@tonic-gate 		    wqe_mgt->wqes_kmem_sz))) {
4490Sstevel@tonic-gate 			mutex_exit(&wqe_mgt->wqes_mutex);
4500Sstevel@tonic-gate 			break;
4510Sstevel@tonic-gate 		}
4520Sstevel@tonic-gate 		mutex_exit(&wqe_mgt->wqes_mutex);
4530Sstevel@tonic-gate 		wqe_mgt = wqe_mgt->wqe_mgt_next;
4540Sstevel@tonic-gate 	}
4550Sstevel@tonic-gate 
4560Sstevel@tonic-gate 	if (wqe_mgt == NULL) {
4570Sstevel@tonic-gate 		mutex_exit(&qp_ctx->isq_wqe_mutex);
458*7354SGiri.Adari@Sun.COM 		vmem_free(qp_ctx->isq_wqe_ib_vmem,
459*7354SGiri.Adari@Sun.COM 		    (void *)(uintptr_t)recv_wqe->recv_sg_mem, IBMF_MEM_PER_WQE);
4600Sstevel@tonic-gate 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
4610Sstevel@tonic-gate 		    ibmf_recv_wqe_cache_constructor_err, IBMF_TNF_ERROR, "",
4620Sstevel@tonic-gate 		    "ibmf_altqp_recv_wqe_cache_constructor(): %s\n",
4630Sstevel@tonic-gate 		    tnf_string, msg, "Address not found in WQE mgt list");
4640Sstevel@tonic-gate 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
4650Sstevel@tonic-gate 		    ibmf_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
4660Sstevel@tonic-gate 		    "ibmf_recv_wqe_cache_constructor() exit\n");
4670Sstevel@tonic-gate 		return (-1);
4680Sstevel@tonic-gate 	}
4690Sstevel@tonic-gate 
4700Sstevel@tonic-gate 	mutex_enter(&wqe_mgt->wqes_mutex);
4710Sstevel@tonic-gate 
4720Sstevel@tonic-gate 	recv_wqe->recv_mem = (caddr_t)((uintptr_t)wqe_mgt->wqes_kmem +
4730Sstevel@tonic-gate 	    (uintptr_t)(recv_wqe->recv_sg_mem - wqe_mgt->wqes_ib_mem));
4740Sstevel@tonic-gate 	bzero(recv_wqe->recv_mem, IBMF_MEM_PER_WQE);
4750Sstevel@tonic-gate 	recv_wqe->recv_sg_lkey = wqe_mgt->wqes_ib_lkey;
4760Sstevel@tonic-gate 	recv_wqe->recv_mem_hdl = wqe_mgt->wqes_ib_mem_hdl;
4770Sstevel@tonic-gate 	recv_wqe->recv_wqe_flags = 0;
4780Sstevel@tonic-gate 
4790Sstevel@tonic-gate 	mutex_exit(&wqe_mgt->wqes_mutex);
4800Sstevel@tonic-gate 	mutex_exit(&qp_ctx->isq_wqe_mutex);
4810Sstevel@tonic-gate 
4820Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
4830Sstevel@tonic-gate 	    ibmf_i_altqp_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
4840Sstevel@tonic-gate 	    "ibmf_altqp_recv_wqe_cache_constructor() exit\n");
4850Sstevel@tonic-gate 
4860Sstevel@tonic-gate 	return (0);
4870Sstevel@tonic-gate }
4880Sstevel@tonic-gate 
4890Sstevel@tonic-gate /*
4900Sstevel@tonic-gate  * ibmf_altqp_recv_wqe_cache_destructor():
4910Sstevel@tonic-gate  *	Destructor for receive WQE kmem cache for alternate QPs
4920Sstevel@tonic-gate  */
4930Sstevel@tonic-gate /* ARGSUSED */
4940Sstevel@tonic-gate void
ibmf_altqp_recv_wqe_cache_destructor(void * buf,void * cdrarg)4950Sstevel@tonic-gate ibmf_altqp_recv_wqe_cache_destructor(void *buf, void *cdrarg)
4960Sstevel@tonic-gate {
4970Sstevel@tonic-gate 	ibmf_recv_wqe_t	*recv_wqe = (ibmf_recv_wqe_t *)buf;
4980Sstevel@tonic-gate 	ibmf_alt_qp_t	*qp_ctx = (ibmf_alt_qp_t *)cdrarg;
4990Sstevel@tonic-gate 
5000Sstevel@tonic-gate 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
5010Sstevel@tonic-gate 	    ibmf_i_altqp_recv_wqe_cache_destructor_start, IBMF_TNF_TRACE, "",
5020Sstevel@tonic-gate 	    "ibmf_altqp_recv_wqe_cache_destructor() enter, buf = %p, "
5030Sstevel@tonic-gate 	    "cdarg = %p\n", tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
5040Sstevel@tonic-gate 
5050Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*recv_wqe))
5060Sstevel@tonic-gate 
5070Sstevel@tonic-gate 	/* Free the vmem allocated for the WQE */
5080Sstevel@tonic-gate 	vmem_free(qp_ctx->isq_wqe_ib_vmem,
5090Sstevel@tonic-gate 	    (void *)(uintptr_t)recv_wqe->recv_sg_mem, IBMF_MEM_PER_WQE);
5100Sstevel@tonic-gate 	recv_wqe->recv_mem = NULL;
5110Sstevel@tonic-gate 
5120Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
5130Sstevel@tonic-gate 	    ibmf_i_altqp_recv_wqe_cache_destructor_end, IBMF_TNF_TRACE, "",
5140Sstevel@tonic-gate 	    "ibmf_altqp_recv_wqe_cache_destructor() exit\n");
5150Sstevel@tonic-gate }
5160Sstevel@tonic-gate 
5170Sstevel@tonic-gate /*
5180Sstevel@tonic-gate  * ibmf_i_init_wqes():
5190Sstevel@tonic-gate  *	Create the kmem cache for send and receive WQEs
5200Sstevel@tonic-gate  */
5210Sstevel@tonic-gate int
ibmf_i_init_wqes(ibmf_ci_t * cip)5220Sstevel@tonic-gate ibmf_i_init_wqes(ibmf_ci_t *cip)
5230Sstevel@tonic-gate {
5240Sstevel@tonic-gate 	ibt_status_t		status;
5250Sstevel@tonic-gate 	ibt_mr_hdl_t		mem_hdl;
5260Sstevel@tonic-gate 	ibt_mr_desc_t		mem_desc;
5270Sstevel@tonic-gate 	ibt_mr_attr_t		mem_attr;
5280Sstevel@tonic-gate 	ibmf_wqe_mgt_t		*wqe_mgtp;
5290Sstevel@tonic-gate 	char			string[128];
5300Sstevel@tonic-gate 
5310Sstevel@tonic-gate 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_wqes_start,
5320Sstevel@tonic-gate 	    IBMF_TNF_TRACE, "", "ibmf_i_init_wqes() enter, cip = %p\n",
5330Sstevel@tonic-gate 	    tnf_opaque, cip, cip);
5340Sstevel@tonic-gate 
5350Sstevel@tonic-gate 	/*
5360Sstevel@tonic-gate 	 * Allocate memory for the WQE management structure
5370Sstevel@tonic-gate 	 */
5380Sstevel@tonic-gate 	wqe_mgtp = kmem_zalloc(sizeof (ibmf_wqe_mgt_t), KM_SLEEP);
5390Sstevel@tonic-gate 	mutex_init(&wqe_mgtp->wqes_mutex, NULL, MUTEX_DRIVER, NULL);
5400Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wqe_mgtp))
5410Sstevel@tonic-gate 
5420Sstevel@tonic-gate 	/*
5430Sstevel@tonic-gate 	 * Allocate memory for the WQEs to be used by the special QPs on this CI
5440Sstevel@tonic-gate 	 * There are two special QPs per CI port
5450Sstevel@tonic-gate 	 */
5460Sstevel@tonic-gate 	wqe_mgtp->wqes_kmem_sz = cip->ci_nports * 2 *
5470Sstevel@tonic-gate 	    ((IBMF_MEM_PER_WQE * ibmf_send_wqes_per_port) +
5480Sstevel@tonic-gate 	    (IBMF_MEM_PER_WQE * ibmf_recv_wqes_per_port));
5490Sstevel@tonic-gate 	wqe_mgtp->wqes_kmem =
5500Sstevel@tonic-gate 	    kmem_zalloc(wqe_mgtp->wqes_kmem_sz, KM_SLEEP);
5510Sstevel@tonic-gate 
5520Sstevel@tonic-gate 	mem_attr.mr_vaddr = (ib_vaddr_t)(uintptr_t)wqe_mgtp->wqes_kmem;
5530Sstevel@tonic-gate 	mem_attr.mr_len = wqe_mgtp->wqes_kmem_sz;
5540Sstevel@tonic-gate 	mem_attr.mr_flags = IBT_MR_SLEEP | IBT_MR_ENABLE_LOCAL_WRITE;
5550Sstevel@tonic-gate 	mem_attr.mr_as = NULL;
5560Sstevel@tonic-gate 
5570Sstevel@tonic-gate 	/* Register the allocated memory */
5580Sstevel@tonic-gate 	status = ibt_register_mr(cip->ci_ci_handle, cip->ci_pd, &mem_attr,
5590Sstevel@tonic-gate 	    &mem_hdl, &mem_desc);
5600Sstevel@tonic-gate 	if (status != IBT_SUCCESS) {
5610Sstevel@tonic-gate 		kmem_free(wqe_mgtp->wqes_kmem,
5620Sstevel@tonic-gate 		    wqe_mgtp->wqes_kmem_sz);
5630Sstevel@tonic-gate 		IBMF_TRACE_2(IBMF_TNF_NODEBUG, DPRINT_L1,
5640Sstevel@tonic-gate 		    ibmf_i_init_wqes_err, IBMF_TNF_ERROR, "",
5650Sstevel@tonic-gate 		    "ibmf_i_init_wqes(): %s, status = %d\n", tnf_string, msg,
5660Sstevel@tonic-gate 		    "register of WQE mem failed", tnf_uint, status, status);
5670Sstevel@tonic-gate 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
5680Sstevel@tonic-gate 		    ibmf_i_init_wqes_end, IBMF_TNF_TRACE, "",
5690Sstevel@tonic-gate 		    "ibmf_i_init_wqes() exit\n");
5700Sstevel@tonic-gate 		return (IBMF_NO_RESOURCES);
5710Sstevel@tonic-gate 	}
5720Sstevel@tonic-gate 
5730Sstevel@tonic-gate 	/* Store the memory registration information */
5740Sstevel@tonic-gate 	wqe_mgtp->wqes_ib_mem = mem_desc.md_vaddr;
5750Sstevel@tonic-gate 	wqe_mgtp->wqes_ib_lkey = mem_desc.md_lkey;
5760Sstevel@tonic-gate 	wqe_mgtp->wqes_ib_mem_hdl = mem_hdl;
5770Sstevel@tonic-gate 
5780Sstevel@tonic-gate 	/* Create a vmem arena for the IB virtual address space */
5790Sstevel@tonic-gate 	bzero(string, 128);
5800Sstevel@tonic-gate 	(void) sprintf(string, "ibmf_%016" PRIx64 "_wqes", cip->ci_node_guid);
5810Sstevel@tonic-gate 	cip->ci_wqe_ib_vmem = vmem_create(string,
5820Sstevel@tonic-gate 	    (void *)(uintptr_t)wqe_mgtp->wqes_ib_mem, wqe_mgtp->wqes_kmem_sz,
5830Sstevel@tonic-gate 	    sizeof (uint64_t), NULL, NULL, NULL, 0, VM_SLEEP);
5840Sstevel@tonic-gate 
5850Sstevel@tonic-gate 	mutex_enter(&cip->ci_wqe_mutex);
5860Sstevel@tonic-gate 	cip->ci_wqe_mgt_list = wqe_mgtp;
5870Sstevel@tonic-gate 	mutex_exit(&cip->ci_wqe_mutex);
5880Sstevel@tonic-gate 
5896819Stomee 	bzero(string, 128);
5906819Stomee 	(void) sprintf(string, "ibmf_%016" PRIx64 "_swqe", cip->ci_node_guid);
5916819Stomee 	/* create a kmem cache for the send WQEs */
5926819Stomee 	cip->ci_send_wqes_cache = kmem_cache_create(string,
5936819Stomee 	    sizeof (ibmf_send_wqe_t), 0, ibmf_send_wqe_cache_constructor,
5946819Stomee 	    ibmf_send_wqe_cache_destructor, NULL, (void *)cip, NULL, 0);
5956819Stomee 
5966819Stomee 	bzero(string, 128);
5976819Stomee 	(void) sprintf(string, "ibmf_%016" PRIx64 "_rwqe", cip->ci_node_guid);
5986819Stomee 	/* create a kmem cache for the receive WQEs */
5996819Stomee 	cip->ci_recv_wqes_cache = kmem_cache_create(string,
6006819Stomee 	    sizeof (ibmf_recv_wqe_t), 0, ibmf_recv_wqe_cache_constructor,
6016819Stomee 	    ibmf_recv_wqe_cache_destructor, NULL, (void *)cip, NULL, 0);
6026819Stomee 
6030Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_wqes_end,
6040Sstevel@tonic-gate 	    IBMF_TNF_TRACE, "", "ibmf_i_init_wqes() exit\n");
6050Sstevel@tonic-gate 
6060Sstevel@tonic-gate 	return (IBMF_SUCCESS);
6070Sstevel@tonic-gate }
6080Sstevel@tonic-gate 
6090Sstevel@tonic-gate /*
6100Sstevel@tonic-gate  * ibmf_i_fini_wqes():
6110Sstevel@tonic-gate  *	Destroy the kmem cache for send and receive WQEs
6120Sstevel@tonic-gate  */
6130Sstevel@tonic-gate void
ibmf_i_fini_wqes(ibmf_ci_t * cip)6140Sstevel@tonic-gate ibmf_i_fini_wqes(ibmf_ci_t *cip)
6150Sstevel@tonic-gate {
6160Sstevel@tonic-gate 	ibmf_wqe_mgt_t	*wqe_mgt;
6170Sstevel@tonic-gate 	ibt_mr_hdl_t	wqe_ib_mem_hdl;
6180Sstevel@tonic-gate 	void		*wqe_kmem;
6190Sstevel@tonic-gate 	uint64_t	wqe_kmem_sz;
6200Sstevel@tonic-gate 
6210Sstevel@tonic-gate 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_wqes_start,
6220Sstevel@tonic-gate 	    IBMF_TNF_TRACE, "", "ibmf_i_fini_wqes() enter, cip = %p\n",
6230Sstevel@tonic-gate 	    tnf_opaque, cip, cip);
6240Sstevel@tonic-gate 
6250Sstevel@tonic-gate 	mutex_enter(&cip->ci_wqe_mutex);
6260Sstevel@tonic-gate 
6270Sstevel@tonic-gate 	wqe_mgt = cip->ci_wqe_mgt_list;
6280Sstevel@tonic-gate 	while (wqe_mgt != NULL) {
6290Sstevel@tonic-gate 		/* Remove the WQE mgt struct from the list */
6300Sstevel@tonic-gate 		cip->ci_wqe_mgt_list = wqe_mgt->wqe_mgt_next;
6310Sstevel@tonic-gate 		mutex_exit(&cip->ci_wqe_mutex);
6320Sstevel@tonic-gate 
6330Sstevel@tonic-gate 		mutex_enter(&wqe_mgt->wqes_mutex);
6340Sstevel@tonic-gate 		wqe_ib_mem_hdl = wqe_mgt->wqes_ib_mem_hdl;
6350Sstevel@tonic-gate 		wqe_kmem = wqe_mgt->wqes_kmem;
6360Sstevel@tonic-gate 		wqe_kmem_sz = wqe_mgt->wqes_kmem_sz;
6370Sstevel@tonic-gate 		mutex_exit(&wqe_mgt->wqes_mutex);
6380Sstevel@tonic-gate 
6390Sstevel@tonic-gate 		/* Deregister the memory allocated for the WQEs */
6400Sstevel@tonic-gate 		(void) ibt_deregister_mr(cip->ci_ci_handle, wqe_ib_mem_hdl);
6410Sstevel@tonic-gate 
6420Sstevel@tonic-gate 		/* Free the kmem allocated for the WQEs */
6430Sstevel@tonic-gate 		kmem_free(wqe_kmem, wqe_kmem_sz);
6440Sstevel@tonic-gate 
6450Sstevel@tonic-gate 		/* Destroy the mutex */
6460Sstevel@tonic-gate 		mutex_destroy(&wqe_mgt->wqes_mutex);
6470Sstevel@tonic-gate 
6480Sstevel@tonic-gate 		/* Free the WQE management structure */
6490Sstevel@tonic-gate 		kmem_free(wqe_mgt, sizeof (ibmf_wqe_mgt_t));
6500Sstevel@tonic-gate 
6510Sstevel@tonic-gate 		mutex_enter(&cip->ci_wqe_mutex);
6520Sstevel@tonic-gate 		wqe_mgt = cip->ci_wqe_mgt_list;
6530Sstevel@tonic-gate 	}
6540Sstevel@tonic-gate 
6550Sstevel@tonic-gate 	mutex_exit(&cip->ci_wqe_mutex);
6560Sstevel@tonic-gate 
6570Sstevel@tonic-gate 	/* Destroy the kmem_cache for the send WQE */
6580Sstevel@tonic-gate 	kmem_cache_destroy(cip->ci_send_wqes_cache);
6590Sstevel@tonic-gate 	/* Destroy the kmem_cache for the receive WQE */
6600Sstevel@tonic-gate 	kmem_cache_destroy(cip->ci_recv_wqes_cache);
6610Sstevel@tonic-gate 
6620Sstevel@tonic-gate 	/*
6630Sstevel@tonic-gate 	 * Destroy the vmem arena for the WQEs
6640Sstevel@tonic-gate 	 * This must be done after the kmem_cache_destroy() calls since
6650Sstevel@tonic-gate 	 * the cache destructors call vmem_free()
6660Sstevel@tonic-gate 	 */
6670Sstevel@tonic-gate 	vmem_destroy((void *)cip->ci_wqe_ib_vmem);
6680Sstevel@tonic-gate 
6690Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_wqes_end,
6700Sstevel@tonic-gate 	    IBMF_TNF_TRACE, "", "ibmf_i_fini_wqes() exit\n");
6710Sstevel@tonic-gate }
6720Sstevel@tonic-gate 
6730Sstevel@tonic-gate /*
6740Sstevel@tonic-gate  * ibmf_i_init_altqp_wqes():
6750Sstevel@tonic-gate  *	Create the kmem cache for send and receive WQEs used by alternate QPs
6760Sstevel@tonic-gate  */
6770Sstevel@tonic-gate int
ibmf_i_init_altqp_wqes(ibmf_alt_qp_t * qp_ctx)6780Sstevel@tonic-gate ibmf_i_init_altqp_wqes(ibmf_alt_qp_t *qp_ctx)
6790Sstevel@tonic-gate {
6800Sstevel@tonic-gate 	ibt_status_t		status;
6810Sstevel@tonic-gate 	ibt_mr_hdl_t		mem_hdl;
6820Sstevel@tonic-gate 	ibt_mr_desc_t		mem_desc;
6830Sstevel@tonic-gate 	ibt_mr_attr_t		mem_attr;
6840Sstevel@tonic-gate 	ibmf_wqe_mgt_t		*wqe_mgtp;
6850Sstevel@tonic-gate 	char			string[128];
6860Sstevel@tonic-gate 
6870Sstevel@tonic-gate 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_altqp_wqes_start,
6880Sstevel@tonic-gate 	    IBMF_TNF_TRACE, "", "ibmf_i_init_altqp_wqes() enter, qp_ctx = %p\n",
6890Sstevel@tonic-gate 	    tnf_opaque, qp, qp_ctx);
6900Sstevel@tonic-gate 
6910Sstevel@tonic-gate 	/*
6920Sstevel@tonic-gate 	 * Allocate memory for the WQE management structure
6930Sstevel@tonic-gate 	 */
6940Sstevel@tonic-gate 	wqe_mgtp = kmem_zalloc(sizeof (ibmf_wqe_mgt_t), KM_SLEEP);
6950Sstevel@tonic-gate 	mutex_init(&wqe_mgtp->wqes_mutex, NULL, MUTEX_DRIVER, NULL);
6960Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wqe_mgtp))
6970Sstevel@tonic-gate 
6980Sstevel@tonic-gate 	/*
6990Sstevel@tonic-gate 	 * Allocate memory for all the WQEs to be used by this alternate QP
7000Sstevel@tonic-gate 	 */
7010Sstevel@tonic-gate 	wqe_mgtp->wqes_kmem_sz = (IBMF_MEM_PER_WQE * ibmf_send_wqes_per_port) +
7020Sstevel@tonic-gate 	    (IBMF_MEM_PER_WQE * ibmf_recv_wqes_per_port);
7030Sstevel@tonic-gate 	wqe_mgtp->wqes_kmem = kmem_zalloc(wqe_mgtp->wqes_kmem_sz, KM_SLEEP);
7040Sstevel@tonic-gate 
7050Sstevel@tonic-gate 	mem_attr.mr_vaddr = (ib_vaddr_t)(uintptr_t)wqe_mgtp->wqes_kmem;
7060Sstevel@tonic-gate 	mem_attr.mr_len = wqe_mgtp->wqes_kmem_sz;
7070Sstevel@tonic-gate 	mem_attr.mr_flags = IBT_MR_SLEEP | IBT_MR_ENABLE_LOCAL_WRITE;
7080Sstevel@tonic-gate 	mem_attr.mr_as = NULL;
7090Sstevel@tonic-gate 
7100Sstevel@tonic-gate 	/* Register the allocated memory */
7110Sstevel@tonic-gate 	status = ibt_register_mr(qp_ctx->isq_client_hdl->ic_myci->ci_ci_handle,
7120Sstevel@tonic-gate 	    qp_ctx->isq_client_hdl->ic_myci->ci_pd, &mem_attr, &mem_hdl,
7130Sstevel@tonic-gate 	    &mem_desc);
7140Sstevel@tonic-gate 	if (status != IBT_SUCCESS) {
7150Sstevel@tonic-gate 		kmem_free(wqe_mgtp->wqes_kmem, wqe_mgtp->wqes_kmem_sz);
7160Sstevel@tonic-gate 		IBMF_TRACE_2(IBMF_TNF_NODEBUG, DPRINT_L1,
7170Sstevel@tonic-gate 		    ibmf_i_init_altqp_wqes_err, IBMF_TNF_ERROR, "",
7180Sstevel@tonic-gate 		    "ibmf_i_init_altqp_wqes(): %s, status = %d\n",
7190Sstevel@tonic-gate 		    tnf_string, msg,
7200Sstevel@tonic-gate 		    "register of WQE mem failed", tnf_uint, status, status);
7210Sstevel@tonic-gate 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
7220Sstevel@tonic-gate 		    ibmf_i_init_altqp_wqes_end, IBMF_TNF_TRACE, "",
7230Sstevel@tonic-gate 		    "ibmf_i_init_altqp_wqes() exit\n");
7240Sstevel@tonic-gate 		return (IBMF_NO_RESOURCES);
7250Sstevel@tonic-gate 	}
7260Sstevel@tonic-gate 
7270Sstevel@tonic-gate 	/* Store the memory registration information */
7280Sstevel@tonic-gate 	wqe_mgtp->wqes_ib_mem = mem_desc.md_vaddr;
7290Sstevel@tonic-gate 	wqe_mgtp->wqes_ib_lkey = mem_desc.md_lkey;
7300Sstevel@tonic-gate 	wqe_mgtp->wqes_ib_mem_hdl = mem_hdl;
7310Sstevel@tonic-gate 
7320Sstevel@tonic-gate 	/* Create a vmem arena for the IB virtual address space */
7330Sstevel@tonic-gate 	bzero(string, 128);
7340Sstevel@tonic-gate 	(void) sprintf(string, "ibmf_%016" PRIx64 "_%x_wqes",
7350Sstevel@tonic-gate 	    qp_ctx->isq_client_hdl->ic_client_info.ci_guid, qp_ctx->isq_qpn);
7360Sstevel@tonic-gate 	qp_ctx->isq_wqe_ib_vmem = vmem_create(string,
7370Sstevel@tonic-gate 	    (void *)(uintptr_t)wqe_mgtp->wqes_ib_mem, wqe_mgtp->wqes_kmem_sz,
7380Sstevel@tonic-gate 	    sizeof (uint64_t), NULL, NULL, NULL, 0, VM_SLEEP);
7390Sstevel@tonic-gate 
7406862Sshepler 	bzero(string, 128);
7416862Sshepler 	/*
7426862Sshepler 	 * CAUTION: Do not exceed 32 characters for the kmem cache name, else,
7436862Sshepler 	 * mdb does not exit (bug 4878751). There is some connection between
7446862Sshepler 	 * mdb walkers and kmem_caches with the limitation likely to be in the
7456862Sshepler 	 * mdb code.
7466862Sshepler 	 */
7476862Sshepler 	(void) sprintf(string, "ibmf%016" PRIx64 "_%xs",
7486862Sshepler 	    qp_ctx->isq_client_hdl->ic_client_info.ci_guid, qp_ctx->isq_qpn);
7496862Sshepler 	/* create a kmem cache for the send WQEs */
7506862Sshepler 	qp_ctx->isq_send_wqes_cache = kmem_cache_create(string,
7516862Sshepler 	    sizeof (ibmf_send_wqe_t), 0, ibmf_altqp_send_wqe_cache_constructor,
7526862Sshepler 	    ibmf_altqp_send_wqe_cache_destructor, NULL, (void *)qp_ctx,
7536862Sshepler 	    NULL, 0);
7546862Sshepler 
7556862Sshepler 	bzero(string, 128);
7566862Sshepler 	(void) sprintf(string, "ibmf%016" PRIx64 "_%xr",
7576862Sshepler 	    qp_ctx->isq_client_hdl->ic_client_info.ci_guid, qp_ctx->isq_qpn);
7586862Sshepler 	/* create a kmem cache for the receive WQEs */
7596862Sshepler 	qp_ctx->isq_recv_wqes_cache = kmem_cache_create(string,
7606862Sshepler 	    sizeof (ibmf_recv_wqe_t), 0, ibmf_altqp_recv_wqe_cache_constructor,
7616862Sshepler 	    ibmf_altqp_recv_wqe_cache_destructor, NULL, (void *)qp_ctx,
7626862Sshepler 	    NULL, 0);
7636862Sshepler 
7640Sstevel@tonic-gate 	mutex_enter(&qp_ctx->isq_wqe_mutex);
7650Sstevel@tonic-gate 	qp_ctx->isq_wqe_mgt_list = wqe_mgtp;
7660Sstevel@tonic-gate 	mutex_exit(&qp_ctx->isq_wqe_mutex);
7670Sstevel@tonic-gate 
7680Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_altqp_wqes_end,
7690Sstevel@tonic-gate 	    IBMF_TNF_TRACE, "", "ibmf_i_init_altqp_wqes() exit\n");
7700Sstevel@tonic-gate 
7710Sstevel@tonic-gate 	return (IBMF_SUCCESS);
7720Sstevel@tonic-gate }
7730Sstevel@tonic-gate 
7740Sstevel@tonic-gate /*
7750Sstevel@tonic-gate  * ibmf_i_fini_altqp_wqes():
7760Sstevel@tonic-gate  *	Destroy the kmem cache for send and receive WQEs for alternate QPs
7770Sstevel@tonic-gate  */
7780Sstevel@tonic-gate void
ibmf_i_fini_altqp_wqes(ibmf_alt_qp_t * qp_ctx)7790Sstevel@tonic-gate ibmf_i_fini_altqp_wqes(ibmf_alt_qp_t *qp_ctx)
7800Sstevel@tonic-gate {
7810Sstevel@tonic-gate 	ibmf_wqe_mgt_t	*wqe_mgt;
7820Sstevel@tonic-gate 	ibt_mr_hdl_t	wqe_ib_mem_hdl;
7830Sstevel@tonic-gate 	void		*wqe_kmem;
7840Sstevel@tonic-gate 	uint64_t	wqe_kmem_sz;
7850Sstevel@tonic-gate 
7860Sstevel@tonic-gate 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_wqes_start,
7870Sstevel@tonic-gate 	    IBMF_TNF_TRACE, "", "ibmf_i_fini_wqes() enter, qp_ctx = %p\n",
7880Sstevel@tonic-gate 	    tnf_opaque, qp, qp_ctx);
7890Sstevel@tonic-gate 
7900Sstevel@tonic-gate 	mutex_enter(&qp_ctx->isq_wqe_mutex);
7910Sstevel@tonic-gate 	wqe_mgt = qp_ctx->isq_wqe_mgt_list;
7920Sstevel@tonic-gate 	while (wqe_mgt != NULL) {
7930Sstevel@tonic-gate 		/* Remove the WQE mgt struct from the list */
7940Sstevel@tonic-gate 		qp_ctx->isq_wqe_mgt_list = wqe_mgt->wqe_mgt_next;
7950Sstevel@tonic-gate 		mutex_exit(&qp_ctx->isq_wqe_mutex);
7960Sstevel@tonic-gate 
7970Sstevel@tonic-gate 		mutex_enter(&wqe_mgt->wqes_mutex);
7980Sstevel@tonic-gate 		wqe_ib_mem_hdl = wqe_mgt->wqes_ib_mem_hdl;
7990Sstevel@tonic-gate 		wqe_kmem = wqe_mgt->wqes_kmem;
8000Sstevel@tonic-gate 		wqe_kmem_sz = wqe_mgt->wqes_kmem_sz;
8010Sstevel@tonic-gate 		mutex_exit(&wqe_mgt->wqes_mutex);
8020Sstevel@tonic-gate 
8030Sstevel@tonic-gate 		/* Deregister the memory allocated for the WQEs */
8040Sstevel@tonic-gate 		(void) ibt_deregister_mr(
8050Sstevel@tonic-gate 		    qp_ctx->isq_client_hdl->ic_myci->ci_ci_handle,
8060Sstevel@tonic-gate 		    wqe_ib_mem_hdl);
8070Sstevel@tonic-gate 
8080Sstevel@tonic-gate 		/* Free the kmem allocated for the WQEs */
8090Sstevel@tonic-gate 		kmem_free(wqe_kmem, wqe_kmem_sz);
8100Sstevel@tonic-gate 
8110Sstevel@tonic-gate 		/* Destroy the WQE mgt struct mutex */
8120Sstevel@tonic-gate 		mutex_destroy(&wqe_mgt->wqes_mutex);
8130Sstevel@tonic-gate 
8140Sstevel@tonic-gate 		/* Free the WQE management structure */
8150Sstevel@tonic-gate 		kmem_free(wqe_mgt, sizeof (ibmf_wqe_mgt_t));
8160Sstevel@tonic-gate 
8170Sstevel@tonic-gate 		mutex_enter(&qp_ctx->isq_wqe_mutex);
8180Sstevel@tonic-gate 		wqe_mgt = qp_ctx->isq_wqe_mgt_list;
8190Sstevel@tonic-gate 	}
8200Sstevel@tonic-gate 
8210Sstevel@tonic-gate 	mutex_exit(&qp_ctx->isq_wqe_mutex);
8220Sstevel@tonic-gate 
8230Sstevel@tonic-gate 	/* Destroy the kmem_cache for the send WQE */
8240Sstevel@tonic-gate 	kmem_cache_destroy(qp_ctx->isq_send_wqes_cache);
8250Sstevel@tonic-gate 	/* Destroy the kmem_cache for the receive WQE */
8260Sstevel@tonic-gate 	kmem_cache_destroy(qp_ctx->isq_recv_wqes_cache);
8270Sstevel@tonic-gate 
8280Sstevel@tonic-gate 	/*
8290Sstevel@tonic-gate 	 * Destroy the vmem arena for the WQEs
8300Sstevel@tonic-gate 	 * This must be done after the kmem_cache_destroy() calls since
8310Sstevel@tonic-gate 	 * the cache destructors call vmem_free()
8320Sstevel@tonic-gate 	 */
8330Sstevel@tonic-gate 	vmem_destroy((void *)qp_ctx->isq_wqe_ib_vmem);
8340Sstevel@tonic-gate 
8350Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_wqes_end,
8360Sstevel@tonic-gate 	    IBMF_TNF_TRACE, "", "ibmf_i_fini_wqes() exit\n");
8370Sstevel@tonic-gate }
8380Sstevel@tonic-gate 
8390Sstevel@tonic-gate /*
8400Sstevel@tonic-gate  * ibmf_i_init_send_wqe():
8410Sstevel@tonic-gate  *	Initialize a send WQE
8420Sstevel@tonic-gate  */
8430Sstevel@tonic-gate /* ARGSUSED */
8440Sstevel@tonic-gate void
ibmf_i_init_send_wqe(ibmf_client_t * clientp,ibmf_msg_impl_t * msgimplp,ibt_wr_ds_t * sglp,ibmf_send_wqe_t * wqep,ibt_ud_dest_hdl_t ud_dest,ibt_qp_hdl_t ibt_qp_handle,ibmf_qp_handle_t ibmf_qp_handle)8450Sstevel@tonic-gate ibmf_i_init_send_wqe(ibmf_client_t *clientp, ibmf_msg_impl_t *msgimplp,
8460Sstevel@tonic-gate     ibt_wr_ds_t *sglp, ibmf_send_wqe_t *wqep, ibt_ud_dest_hdl_t ud_dest,
8470Sstevel@tonic-gate     ibt_qp_hdl_t ibt_qp_handle, ibmf_qp_handle_t ibmf_qp_handle)
8480Sstevel@tonic-gate {
8490Sstevel@tonic-gate 	ibmf_msg_bufs_t	*ipbufs = &msgimplp->im_msgbufs_send;
8500Sstevel@tonic-gate 	ibmf_msg_bufs_t	*hdr_ipbufs;
8510Sstevel@tonic-gate 	ib_mad_hdr_t	*ibmadhdrp;
8520Sstevel@tonic-gate 	ibmf_rmpp_ctx_t	*rmpp_ctx = &msgimplp->im_rmpp_ctx;
8530Sstevel@tonic-gate 	ibmf_rmpp_hdr_t	*rmpp_hdr;
8540Sstevel@tonic-gate 	ibt_send_wr_t	*swrp;
8550Sstevel@tonic-gate 	uchar_t		*buf;
8560Sstevel@tonic-gate 	size_t		data_sz, offset;
8570Sstevel@tonic-gate 	uint32_t	cl_hdr_sz, cl_hdr_off;
8580Sstevel@tonic-gate 
8590Sstevel@tonic-gate 	IBMF_TRACE_5(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_send_wqe_start,
8600Sstevel@tonic-gate 	    IBMF_TNF_TRACE, "", "ibmf_i_init_send_wqe() enter, "
8610Sstevel@tonic-gate 	    "clientp = %p, msg = %p, sglp = %p , wqep = %p, qp_hdl = %p\n",
8620Sstevel@tonic-gate 	    tnf_opaque, clientp, clientp, tnf_opaque, msg, msgimplp,
8630Sstevel@tonic-gate 	    tnf_opaque, sglp, sglp, tnf_opaque, wqep, wqep,
8640Sstevel@tonic-gate 	    tnf_opaque, qp_hdl, ibmf_qp_handle);
8650Sstevel@tonic-gate 
8660Sstevel@tonic-gate 	_NOTE(ASSUMING_PROTECTED(*wqep))
8670Sstevel@tonic-gate 	_NOTE(ASSUMING_PROTECTED(*sglp))
8680Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*swrp))
8690Sstevel@tonic-gate 
8700Sstevel@tonic-gate 	swrp = &wqep->send_wr;
8710Sstevel@tonic-gate 	/* use send wqe pointer as the WR ID */
8720Sstevel@tonic-gate 	IBMF_ADDR_TO_SEND_WR_ID(wqep, swrp->wr_id);
8730Sstevel@tonic-gate 	ASSERT(swrp->wr_id != NULL);
8740Sstevel@tonic-gate 	swrp->wr_flags = IBT_WR_NO_FLAGS;
8750Sstevel@tonic-gate 	swrp->wr_opcode = IBT_WRC_SEND;
8760Sstevel@tonic-gate 	swrp->wr_trans = IBT_UD_SRV;
8770Sstevel@tonic-gate 	wqep->send_client = clientp;
8780Sstevel@tonic-gate 	wqep->send_msg = msgimplp;
8790Sstevel@tonic-gate 
8800Sstevel@tonic-gate 	IBMF_INIT_SG_ELEMENT(sglp[0], wqep->send_mem, wqep->send_sg_lkey,
8810Sstevel@tonic-gate 	    IBMF_MAD_SIZE);
8820Sstevel@tonic-gate 
8830Sstevel@tonic-gate 	bzero(wqep->send_mem, IBMF_MAD_SIZE);
8840Sstevel@tonic-gate 	if (msgimplp->im_flags & IBMF_MSG_FLAGS_SEND_RMPP) {
8850Sstevel@tonic-gate 		buf = (uchar_t *)ipbufs->im_bufs_cl_data +
8860Sstevel@tonic-gate 		    (rmpp_ctx->rmpp_ns - 1) * rmpp_ctx->rmpp_pkt_data_sz;
8870Sstevel@tonic-gate 		data_sz = (rmpp_ctx->rmpp_ns == rmpp_ctx->rmpp_num_pkts) ?
8880Sstevel@tonic-gate 		    rmpp_ctx->rmpp_last_pkt_sz : rmpp_ctx->rmpp_pkt_data_sz;
8890Sstevel@tonic-gate 	} else {
8900Sstevel@tonic-gate 		buf = ipbufs->im_bufs_cl_data;
8910Sstevel@tonic-gate 		data_sz = ipbufs->im_bufs_cl_data_len;
8920Sstevel@tonic-gate 	}
8930Sstevel@tonic-gate 
8940Sstevel@tonic-gate 	/*
8950Sstevel@tonic-gate 	 * We pick the correct msgbuf based on the nature of the transaction.
8960Sstevel@tonic-gate 	 * Where the send msgbuf is available, we pick it to provide the
8970Sstevel@tonic-gate 	 * context of the outgoing MAD. Note that if this is a termination
8980Sstevel@tonic-gate 	 * context, then  the send buffer is invalid even if the sequenced
8990Sstevel@tonic-gate 	 * flags is set because the termination message only has a receive
9000Sstevel@tonic-gate 	 * buffer set up.
9010Sstevel@tonic-gate 	 */
9020Sstevel@tonic-gate 	if ((msgimplp->im_flags & IBMF_MSG_FLAGS_SEQUENCED) &&
9030Sstevel@tonic-gate 	    ((msgimplp->im_flags & IBMF_MSG_FLAGS_TERMINATION) == 0)) {
9040Sstevel@tonic-gate 		hdr_ipbufs = &msgimplp->im_msgbufs_send;
9050Sstevel@tonic-gate 	} else if (msgimplp->im_flags & IBMF_MSG_FLAGS_RECV_RMPP) {
9060Sstevel@tonic-gate 		hdr_ipbufs = &msgimplp->im_msgbufs_recv;
9070Sstevel@tonic-gate 	} else if (msgimplp->im_flags & IBMF_MSG_FLAGS_SEND_RMPP) {
9080Sstevel@tonic-gate 		hdr_ipbufs = &msgimplp->im_msgbufs_send;
9090Sstevel@tonic-gate 	} else {
9100Sstevel@tonic-gate 		if (msgimplp->im_unsolicited == B_TRUE) {
9110Sstevel@tonic-gate 			hdr_ipbufs = &msgimplp->im_msgbufs_recv;
9120Sstevel@tonic-gate 		} else {
9130Sstevel@tonic-gate 			hdr_ipbufs = &msgimplp->im_msgbufs_send;
9140Sstevel@tonic-gate 		}
9150Sstevel@tonic-gate 	}
9160Sstevel@tonic-gate 
9170Sstevel@tonic-gate 	bcopy((void *)hdr_ipbufs->im_bufs_mad_hdr,
9180Sstevel@tonic-gate 	    (void *)wqep->send_mem, sizeof (ib_mad_hdr_t));
9190Sstevel@tonic-gate 
9200Sstevel@tonic-gate 	/*
9210Sstevel@tonic-gate 	 * For unsolicited messages, we only have the sender's MAD at hand.
9220Sstevel@tonic-gate 	 * So, we must flip the response bit in the method for the outgoing MAD.
9230Sstevel@tonic-gate 	 */
9240Sstevel@tonic-gate 	ibmadhdrp = (ib_mad_hdr_t *)wqep->send_mem;
9250Sstevel@tonic-gate 	if (msgimplp->im_unsolicited == B_TRUE) {
9260Sstevel@tonic-gate 		ibmadhdrp->R_Method = IBMF_FLIP_RESP_BIT(ibmadhdrp->R_Method);
9270Sstevel@tonic-gate 	}
9280Sstevel@tonic-gate 
9290Sstevel@tonic-gate 	offset = sizeof (ib_mad_hdr_t);
9300Sstevel@tonic-gate 
9310Sstevel@tonic-gate 	if ((msgimplp->im_flags & IBMF_MSG_FLAGS_SEND_RMPP) ||
9320Sstevel@tonic-gate 	    (msgimplp->im_flags & IBMF_MSG_FLAGS_RECV_RMPP)) {
9330Sstevel@tonic-gate 
9340Sstevel@tonic-gate 		rmpp_hdr = (ibmf_rmpp_hdr_t *)
9350Sstevel@tonic-gate 		    ((uintptr_t)wqep->send_mem + offset);
9360Sstevel@tonic-gate 
9370Sstevel@tonic-gate 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*rmpp_hdr));
9380Sstevel@tonic-gate 
9390Sstevel@tonic-gate 		IBMF_TRACE_4(IBMF_TNF_DEBUG, DPRINT_L3, ibmf_i_init_send_wqe,
9400Sstevel@tonic-gate 		    IBMF_TNF_TRACE, "",
9410Sstevel@tonic-gate 		    "ibmf_init_send_wqe: msgimplp = %p, rmpp_type = %d,"
9420Sstevel@tonic-gate 		    " next_seg = %d, num_pkts = %d\n",
9430Sstevel@tonic-gate 		    tnf_opaque, msgimplp, msgimplp,
9440Sstevel@tonic-gate 		    tnf_opaque, rmpp_type, rmpp_ctx->rmpp_type,
9450Sstevel@tonic-gate 		    tnf_opaque, next_seg, rmpp_ctx->rmpp_ns,
9460Sstevel@tonic-gate 		    tnf_opaque, num_pkts, rmpp_ctx->rmpp_num_pkts);
9470Sstevel@tonic-gate 
9480Sstevel@tonic-gate 		/*
9490Sstevel@tonic-gate 		 * Initialize the RMPP header
9500Sstevel@tonic-gate 		 */
9510Sstevel@tonic-gate 		rmpp_ctx->rmpp_flags = IBMF_RMPP_FLAGS_ACTIVE;
9520Sstevel@tonic-gate 
9530Sstevel@tonic-gate 		/* first, last packet flags set only for type DATA */
9540Sstevel@tonic-gate 		if (rmpp_ctx->rmpp_type == IBMF_RMPP_TYPE_DATA) {
9550Sstevel@tonic-gate 
9560Sstevel@tonic-gate 			if (rmpp_ctx->rmpp_ns == 1)
9570Sstevel@tonic-gate 				rmpp_ctx->rmpp_flags |=
9580Sstevel@tonic-gate 				    IBMF_RMPP_FLAGS_FIRST_PKT;
9590Sstevel@tonic-gate 			else
9600Sstevel@tonic-gate 				rmpp_ctx->rmpp_respt = IBMF_RMPP_DEFAULT_RRESPT;
9610Sstevel@tonic-gate 
9620Sstevel@tonic-gate 			if (rmpp_ctx->rmpp_ns == rmpp_ctx->rmpp_num_pkts)
9630Sstevel@tonic-gate 				rmpp_ctx->rmpp_flags |=
9640Sstevel@tonic-gate 				    IBMF_RMPP_FLAGS_LAST_PKT;
9650Sstevel@tonic-gate 		} else {
9660Sstevel@tonic-gate 			data_sz = 0;
9670Sstevel@tonic-gate 			rmpp_ctx->rmpp_respt = IBMF_RMPP_TERM_RRESPT;
9680Sstevel@tonic-gate 		}
9690Sstevel@tonic-gate 
9700Sstevel@tonic-gate 		IBMF_INIT_RMPP_HDR(rmpp_hdr,
9710Sstevel@tonic-gate 		    IBMF_RMPP_VERSION, rmpp_ctx->rmpp_type,
9720Sstevel@tonic-gate 		    rmpp_ctx->rmpp_respt, rmpp_ctx->rmpp_flags,
9730Sstevel@tonic-gate 		    rmpp_ctx->rmpp_status, rmpp_ctx->rmpp_word3,
9740Sstevel@tonic-gate 		    rmpp_ctx->rmpp_word4)
9750Sstevel@tonic-gate 
9760Sstevel@tonic-gate 		IBMF_TRACE_5(IBMF_TNF_DEBUG, DPRINT_L3, ibmf_i_init_send_wqe,
9770Sstevel@tonic-gate 		    IBMF_TNF_TRACE, "",
9780Sstevel@tonic-gate 		    "ibmf_init_send_wqe: msgimplp = %p, rmpp_type = %d,"
9790Sstevel@tonic-gate 		    " rmpp_flags = 0x%x, rmpp_segnum = %d, pyld_nwl = %d\n",
9800Sstevel@tonic-gate 		    tnf_opaque, msgimplp, msgimplp,
9810Sstevel@tonic-gate 		    tnf_opaque, rmpp_type, rmpp_hdr->rmpp_type,
9820Sstevel@tonic-gate 		    tnf_opaque, rmpp_flags, rmpp_hdr->rmpp_flags,
9830Sstevel@tonic-gate 		    tnf_opaque, rmpp_segnum, b2h32(rmpp_hdr->rmpp_segnum),
9840Sstevel@tonic-gate 		    tnf_opaque, pyld_nwl, b2h32(rmpp_hdr->rmpp_pyldlen_nwl));
9850Sstevel@tonic-gate 
9860Sstevel@tonic-gate 		_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(rmpp_hdr));
9870Sstevel@tonic-gate 	}
9880Sstevel@tonic-gate 
9890Sstevel@tonic-gate 	/* determine offset to start class header */
9900Sstevel@tonic-gate 	ibmf_i_mgt_class_to_hdr_sz_off(
9910Sstevel@tonic-gate 	    hdr_ipbufs->im_bufs_mad_hdr->MgmtClass,
9920Sstevel@tonic-gate 	    &cl_hdr_sz, &cl_hdr_off);
9930Sstevel@tonic-gate 	offset += cl_hdr_off;
9940Sstevel@tonic-gate 	if (hdr_ipbufs->im_bufs_cl_hdr != NULL) {
9950Sstevel@tonic-gate 		bcopy((void *)hdr_ipbufs->im_bufs_cl_hdr,
9960Sstevel@tonic-gate 		    (void *)((uintptr_t)wqep->send_mem + offset),
9970Sstevel@tonic-gate 		    hdr_ipbufs->im_bufs_cl_hdr_len);
9980Sstevel@tonic-gate 		offset += hdr_ipbufs->im_bufs_cl_hdr_len;
9990Sstevel@tonic-gate 	}
10000Sstevel@tonic-gate 	bcopy((void *)buf, (void *)((uintptr_t)wqep->send_mem + offset),
10010Sstevel@tonic-gate 	    data_sz);
10020Sstevel@tonic-gate 	swrp->wr_sgl = sglp;
10030Sstevel@tonic-gate 	swrp->wr_nds = 1;
10040Sstevel@tonic-gate 	swrp->wr.ud.udwr_dest = ud_dest;
10050Sstevel@tonic-gate 	wqep->send_port_num = clientp->ic_client_info.port_num;
10060Sstevel@tonic-gate 	wqep->send_qp_handle = ibt_qp_handle;
10070Sstevel@tonic-gate 	wqep->send_ibmf_qp_handle = ibmf_qp_handle;
10080Sstevel@tonic-gate 
10090Sstevel@tonic-gate 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*swrp))
10100Sstevel@tonic-gate 
10110Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_send_wqe_end,
10120Sstevel@tonic-gate 	    IBMF_TNF_TRACE, "", "ibmf_i_init_send_wqe() exit\n");
10130Sstevel@tonic-gate }
10140Sstevel@tonic-gate 
10150Sstevel@tonic-gate /*
10160Sstevel@tonic-gate  * ibmf_i_init_recv_wqe():
10170Sstevel@tonic-gate  *	Initialize a receive WQE
10180Sstevel@tonic-gate  */
10190Sstevel@tonic-gate void
ibmf_i_init_recv_wqe(ibmf_qp_t * qpp,ibt_wr_ds_t * sglp,ibmf_recv_wqe_t * wqep,ibt_qp_hdl_t ibt_qp_handle,ibmf_qp_handle_t ibmf_qp_handle)10200Sstevel@tonic-gate ibmf_i_init_recv_wqe(ibmf_qp_t *qpp, ibt_wr_ds_t *sglp,
10210Sstevel@tonic-gate     ibmf_recv_wqe_t *wqep, ibt_qp_hdl_t ibt_qp_handle,
10220Sstevel@tonic-gate     ibmf_qp_handle_t ibmf_qp_handle)
10230Sstevel@tonic-gate {
10240Sstevel@tonic-gate 	ibt_recv_wr_t		*rwrp;
10250Sstevel@tonic-gate 
10260Sstevel@tonic-gate 	IBMF_TRACE_4(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_recv_wqe_start,
10270Sstevel@tonic-gate 	    IBMF_TNF_TRACE, "", "ibmf_i_init_recv_wqe() enter, "
10280Sstevel@tonic-gate 	    "qpp = %p, sglp = %p , wqep = %p, ud_dest = %p, qp_hdl = %p\n",
10290Sstevel@tonic-gate 	    tnf_opaque, qpp, qpp, tnf_opaque, sglp, sglp, tnf_opaque,
10300Sstevel@tonic-gate 	    wqep, wqep, tnf_opaque, qp_hdl, ibmf_qp_handle);
10310Sstevel@tonic-gate 
10320Sstevel@tonic-gate 	_NOTE(ASSUMING_PROTECTED(*wqep))
10330Sstevel@tonic-gate 	_NOTE(ASSUMING_PROTECTED(*sglp))
10340Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*rwrp))
10350Sstevel@tonic-gate 
10360Sstevel@tonic-gate 	rwrp = &wqep->recv_wr;
10370Sstevel@tonic-gate 
10380Sstevel@tonic-gate 	/*
10390Sstevel@tonic-gate 	 * we set a bit in the WR ID to be able to easily distinguish
10400Sstevel@tonic-gate 	 * between send completions and recv completions
10410Sstevel@tonic-gate 	 */
10420Sstevel@tonic-gate 	IBMF_ADDR_TO_RECV_WR_ID(wqep, rwrp->wr_id);
10430Sstevel@tonic-gate 
10440Sstevel@tonic-gate 	IBMF_INIT_SG_ELEMENT(sglp[0], wqep->recv_mem, wqep->recv_sg_lkey,
10450Sstevel@tonic-gate 	    sizeof (ib_grh_t) + IBMF_MAD_SIZE);
10460Sstevel@tonic-gate 
10470Sstevel@tonic-gate 	rwrp->wr_sgl = sglp;
10480Sstevel@tonic-gate 	rwrp->wr_nds = IBMF_MAX_RQ_WR_SGL_ELEMENTS;
10490Sstevel@tonic-gate 	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
10500Sstevel@tonic-gate 		wqep->recv_port_num = qpp->iq_port_num;
10510Sstevel@tonic-gate 	} else {
10520Sstevel@tonic-gate 		ibmf_alt_qp_t	*altqp = (ibmf_alt_qp_t *)ibmf_qp_handle;
10530Sstevel@tonic-gate 		wqep->recv_port_num = altqp->isq_port_num;
10540Sstevel@tonic-gate 	}
10550Sstevel@tonic-gate 	wqep->recv_qpp = qpp;
10560Sstevel@tonic-gate 	wqep->recv_qp_handle = ibt_qp_handle;
10570Sstevel@tonic-gate 	wqep->recv_ibmf_qp_handle = ibmf_qp_handle;
10580Sstevel@tonic-gate 
10590Sstevel@tonic-gate 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*rwrp))
10600Sstevel@tonic-gate 
10610Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_recv_wqe_end,
10620Sstevel@tonic-gate 	    IBMF_TNF_TRACE, "", "ibmf_i_init_recv_wqe() exit\n");
10630Sstevel@tonic-gate }
10640Sstevel@tonic-gate 
10650Sstevel@tonic-gate /*
10660Sstevel@tonic-gate  * ibmf_i_extend_wqe_cache():
10670Sstevel@tonic-gate  *	Extend the kmem WQE cache
10680Sstevel@tonic-gate  */
10690Sstevel@tonic-gate int
ibmf_i_extend_wqe_cache(ibmf_ci_t * cip,ibmf_qp_handle_t ibmf_qp_handle,boolean_t block)10700Sstevel@tonic-gate ibmf_i_extend_wqe_cache(ibmf_ci_t *cip, ibmf_qp_handle_t ibmf_qp_handle,
10710Sstevel@tonic-gate     boolean_t block)
10720Sstevel@tonic-gate {
10730Sstevel@tonic-gate 	ibmf_wqe_mgt_t		*wqe_mgt;
10740Sstevel@tonic-gate 
10750Sstevel@tonic-gate 	IBMF_TRACE_3(IBMF_TNF_DEBUG, DPRINT_L4,
10760Sstevel@tonic-gate 	    ibmf_i_extend_wqe_cache_start, IBMF_TNF_TRACE, "",
10770Sstevel@tonic-gate 	    "ibmf_i_extend_wqe_cache() enter, cip = %p, qp_hdl = %p, "
10780Sstevel@tonic-gate 	    " block = %d\n", tnf_opaque, cip, cip, tnf_opaque, qp_hdl,
10790Sstevel@tonic-gate 	    ibmf_qp_handle, tnf_uint, block, block);
10800Sstevel@tonic-gate 
10810Sstevel@tonic-gate 	/*
10820Sstevel@tonic-gate 	 * Allocate memory for the WQE management structure
10830Sstevel@tonic-gate 	 */
10840Sstevel@tonic-gate 	wqe_mgt = kmem_zalloc(sizeof (ibmf_wqe_mgt_t),
10850Sstevel@tonic-gate 	    (block == B_TRUE ? KM_SLEEP : KM_NOSLEEP));
10860Sstevel@tonic-gate 	if (wqe_mgt == NULL) {
10870Sstevel@tonic-gate 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
10880Sstevel@tonic-gate 		    ibmf_i_extend_wqe_cache_err, IBMF_TNF_ERROR, "",
10890Sstevel@tonic-gate 		    "ibmf_i_extend_wqe_cache(): %s\n",
10900Sstevel@tonic-gate 		    tnf_string, msg, "wqe mgt alloc failed");
10910Sstevel@tonic-gate 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
10920Sstevel@tonic-gate 		    ibmf_i_extend_wqe_cache_end, IBMF_TNF_TRACE, "",
10930Sstevel@tonic-gate 		    "ibmf_i_extend_wqe_cache() exit\n");
10940Sstevel@tonic-gate 		return (IBMF_NO_RESOURCES);
10950Sstevel@tonic-gate 	}
10960Sstevel@tonic-gate 	mutex_init(&wqe_mgt->wqes_mutex, NULL, MUTEX_DRIVER, NULL);
10970Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wqe_mgt))
10980Sstevel@tonic-gate 
10990Sstevel@tonic-gate 	/* Allocate and register more WQE memory */
11000Sstevel@tonic-gate 	if (ibmf_i_extend_wqe_mem(cip, ibmf_qp_handle, wqe_mgt,
11010Sstevel@tonic-gate 	    block) != IBMF_SUCCESS) {
11020Sstevel@tonic-gate 		mutex_destroy(&wqe_mgt->wqes_mutex);
11030Sstevel@tonic-gate 		kmem_free(wqe_mgt, sizeof (ibmf_wqe_mgt_t));
11040Sstevel@tonic-gate 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
11050Sstevel@tonic-gate 		    ibmf_i_extend_wqe_cache_err, IBMF_TNF_ERROR, "",
11060Sstevel@tonic-gate 		    "ibmf_i_extend_wqe_cache(): %s\n",
11070Sstevel@tonic-gate 		    tnf_string, msg, "extension of WQE pool failed");
11080Sstevel@tonic-gate 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
11090Sstevel@tonic-gate 		    ibmf_i_extend_wqe_cache_end, IBMF_TNF_TRACE, "",
11100Sstevel@tonic-gate 		    "ibmf_i_extend_wqe_cache() exit\n");
11110Sstevel@tonic-gate 		return (IBMF_NO_RESOURCES);
11120Sstevel@tonic-gate 	}
11130Sstevel@tonic-gate 
11140Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
11150Sstevel@tonic-gate 	    ibmf_i_extend_wqe_cache_end, IBMF_TNF_TRACE, "",
11160Sstevel@tonic-gate 	    "ibmf_i_extend_wqe_cache() exit\n");
11170Sstevel@tonic-gate 
11180Sstevel@tonic-gate 	return (IBMF_SUCCESS);
11190Sstevel@tonic-gate }
11200Sstevel@tonic-gate 
11210Sstevel@tonic-gate /*
11220Sstevel@tonic-gate  * ibmf_i_extend_wqe_mem():
11230Sstevel@tonic-gate  *	Allocate and register more WQE memory, and expand the VMEM arena
11240Sstevel@tonic-gate  */
11250Sstevel@tonic-gate static int
ibmf_i_extend_wqe_mem(ibmf_ci_t * cip,ibmf_qp_handle_t ibmf_qp_handle,ibmf_wqe_mgt_t * wqe_mgt,boolean_t block)11260Sstevel@tonic-gate ibmf_i_extend_wqe_mem(ibmf_ci_t *cip, ibmf_qp_handle_t ibmf_qp_handle,
11270Sstevel@tonic-gate     ibmf_wqe_mgt_t *wqe_mgt, boolean_t block)
11280Sstevel@tonic-gate {
11290Sstevel@tonic-gate 	ibt_status_t		status;
11300Sstevel@tonic-gate 	ibt_mr_hdl_t		mem_hdl;
11310Sstevel@tonic-gate 	ibt_mr_desc_t		mem_desc;
11320Sstevel@tonic-gate 	ibt_mr_attr_t		mem_attr;
11330Sstevel@tonic-gate 	ibmf_alt_qp_t		*qp_ctx;
11340Sstevel@tonic-gate 	ibmf_wqe_mgt_t		*pwqe_mgt;
11350Sstevel@tonic-gate 	vmem_t			*wqe_vmem_arena;
11360Sstevel@tonic-gate 
11370Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wqe_mgt))
11380Sstevel@tonic-gate 
11390Sstevel@tonic-gate 	IBMF_TRACE_4(IBMF_TNF_DEBUG, DPRINT_L4,
11400Sstevel@tonic-gate 	    ibmf_i_extend_wqe_cache_start, IBMF_TNF_TRACE, "",
11410Sstevel@tonic-gate 	    "ibmf_i_extend_wqe_cache() enter, cip = %p, qp_hdl = %p"
11420Sstevel@tonic-gate 	    "wqe_mgt = %p, block = %d\n",
11430Sstevel@tonic-gate 	    tnf_opaque, cip, cip, tnf_opaque, qp_hdl, ibmf_qp_handle,
11440Sstevel@tonic-gate 	    tnf_opaque, wqe_mgt, wqe_mgt, tnf_uint, block, block);
11450Sstevel@tonic-gate 
11460Sstevel@tonic-gate 	/*
11470Sstevel@tonic-gate 	 * Allocate more memory for the WQEs to be used by the
11480Sstevel@tonic-gate 	 * specified QP
11490Sstevel@tonic-gate 	 */
11500Sstevel@tonic-gate 	wqe_mgt->wqes_kmem_sz = cip->ci_nports * 2 *
11510Sstevel@tonic-gate 	    ((IBMF_MEM_PER_WQE * ibmf_send_wqes_per_port) +
11520Sstevel@tonic-gate 	    (IBMF_MEM_PER_WQE * ibmf_recv_wqes_per_port));
11530Sstevel@tonic-gate 	wqe_mgt->wqes_kmem = kmem_zalloc(wqe_mgt->wqes_kmem_sz,
11540Sstevel@tonic-gate 	    (block == B_TRUE ? KM_SLEEP : KM_NOSLEEP));
11550Sstevel@tonic-gate 	if (wqe_mgt->wqes_kmem == NULL) {
11560Sstevel@tonic-gate 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
11570Sstevel@tonic-gate 		    ibmf_i_extend_wqe_mem_err, IBMF_TNF_ERROR, "",
11580Sstevel@tonic-gate 		    "ibmf_i_extend_wqe_mem(): %s\n",
11590Sstevel@tonic-gate 		    tnf_string, msg, "extension of WQE pool failed");
11600Sstevel@tonic-gate 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
11610Sstevel@tonic-gate 		    ibmf_i_extend_wqe_mem_end, IBMF_TNF_TRACE, "",
11620Sstevel@tonic-gate 		    "ibmf_i_extend_wqe_mem() exit\n");
11630Sstevel@tonic-gate 		return (IBMF_NO_RESOURCES);
11640Sstevel@tonic-gate 	}
11650Sstevel@tonic-gate 
11660Sstevel@tonic-gate 	mem_attr.mr_vaddr = (ib_vaddr_t)(uintptr_t)wqe_mgt->wqes_kmem;
11670Sstevel@tonic-gate 	mem_attr.mr_len = wqe_mgt->wqes_kmem_sz;
11680Sstevel@tonic-gate 	mem_attr.mr_flags = (block == B_TRUE ? IBT_MR_SLEEP : IBT_MR_NOSLEEP)
11690Sstevel@tonic-gate 	    | IBT_MR_ENABLE_LOCAL_WRITE;
11700Sstevel@tonic-gate 	mem_attr.mr_as = NULL;
11710Sstevel@tonic-gate 
11720Sstevel@tonic-gate 	/* Register the allocated memory */
11730Sstevel@tonic-gate 	status = ibt_register_mr(cip->ci_ci_handle, cip->ci_pd,
11740Sstevel@tonic-gate 	    &mem_attr, &mem_hdl, &mem_desc);
11750Sstevel@tonic-gate 	if (status != IBT_SUCCESS) {
11760Sstevel@tonic-gate 		kmem_free(wqe_mgt->wqes_kmem, wqe_mgt->wqes_kmem_sz);
11770Sstevel@tonic-gate 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
11780Sstevel@tonic-gate 		    ibmf_i_extend_wqe_mem_err, IBMF_TNF_ERROR, "",
11790Sstevel@tonic-gate 		    "ibmf_i_extend_wqe_mem(): %s\n",
11800Sstevel@tonic-gate 		    tnf_string, msg, "wqe extension MR failed");
11810Sstevel@tonic-gate 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
11820Sstevel@tonic-gate 		    ibmf_i_extend_wqe_mem_end, IBMF_TNF_TRACE, "",
11830Sstevel@tonic-gate 		    "ibmf_i_extend_wqe_mem() exit\n");
11840Sstevel@tonic-gate 		return (IBMF_NO_RESOURCES);
11850Sstevel@tonic-gate 	}
11860Sstevel@tonic-gate 
11870Sstevel@tonic-gate 	/* Store the memory registration information */
11880Sstevel@tonic-gate 	wqe_mgt->wqes_ib_mem = mem_desc.md_vaddr;
11890Sstevel@tonic-gate 	wqe_mgt->wqes_ib_lkey = mem_desc.md_lkey;
11900Sstevel@tonic-gate 	wqe_mgt->wqes_ib_mem_hdl = mem_hdl;
11910Sstevel@tonic-gate 
11920Sstevel@tonic-gate 	/* Get the VMEM arena based on the QP type */
11930Sstevel@tonic-gate 	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
11940Sstevel@tonic-gate 		wqe_vmem_arena = cip->ci_wqe_ib_vmem;
11950Sstevel@tonic-gate 	} else {
11960Sstevel@tonic-gate 		qp_ctx = (ibmf_alt_qp_t *)ibmf_qp_handle;
11970Sstevel@tonic-gate 		wqe_vmem_arena = qp_ctx->isq_wqe_ib_vmem;
11980Sstevel@tonic-gate 	}
11990Sstevel@tonic-gate 
12000Sstevel@tonic-gate 	/* Add these addresses to the vmem arena */
12010Sstevel@tonic-gate 	if (vmem_add(wqe_vmem_arena, (void *)(uintptr_t)wqe_mgt->wqes_ib_mem,
12020Sstevel@tonic-gate 	    wqe_mgt->wqes_kmem_sz,
12030Sstevel@tonic-gate 	    (block == B_TRUE ? VM_SLEEP : VM_NOSLEEP)) == NULL) {
12040Sstevel@tonic-gate 		(void) ibt_deregister_mr(cip->ci_ci_handle,
12050Sstevel@tonic-gate 		    wqe_mgt->wqes_ib_mem_hdl);
12060Sstevel@tonic-gate 		kmem_free(wqe_mgt->wqes_kmem, wqe_mgt->wqes_kmem_sz);
12070Sstevel@tonic-gate 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
12080Sstevel@tonic-gate 		    ibmf_i_extend_wqe_mem_err, IBMF_TNF_ERROR, "",
12090Sstevel@tonic-gate 		    "ibmf_i_extend_wqe_mem(): %s\n",
12100Sstevel@tonic-gate 		    tnf_string, msg, "wqe extension vmem_add failed");
12110Sstevel@tonic-gate 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
12120Sstevel@tonic-gate 		    ibmf_i_extend_wqe_mem_end, IBMF_TNF_TRACE, "",
12130Sstevel@tonic-gate 		    "ibmf_i_extend_wqe_mem() exit\n");
12140Sstevel@tonic-gate 		return (IBMF_NO_RESOURCES);
12150Sstevel@tonic-gate 	}
12160Sstevel@tonic-gate 
12170Sstevel@tonic-gate 	/* Get the WQE management pointers based on the QP type */
12180Sstevel@tonic-gate 	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
12190Sstevel@tonic-gate 		mutex_enter(&cip->ci_wqe_mutex);
12200Sstevel@tonic-gate 		pwqe_mgt = cip->ci_wqe_mgt_list;
12210Sstevel@tonic-gate 
12220Sstevel@tonic-gate 		/* Add the new wqe management struct to the end of the list */
12230Sstevel@tonic-gate 		while (pwqe_mgt->wqe_mgt_next != NULL)
12240Sstevel@tonic-gate 			pwqe_mgt = pwqe_mgt->wqe_mgt_next;
12250Sstevel@tonic-gate 		pwqe_mgt->wqe_mgt_next = wqe_mgt;
12260Sstevel@tonic-gate 
12270Sstevel@tonic-gate 		mutex_exit(&cip->ci_wqe_mutex);
12280Sstevel@tonic-gate 	} else {
12290Sstevel@tonic-gate 		mutex_enter(&qp_ctx->isq_wqe_mutex);
12300Sstevel@tonic-gate 		pwqe_mgt = qp_ctx->isq_wqe_mgt_list;
12310Sstevel@tonic-gate 
12320Sstevel@tonic-gate 		/* Add the new wqe management struct to the end of the list */
12330Sstevel@tonic-gate 		while (pwqe_mgt->wqe_mgt_next != NULL)
12340Sstevel@tonic-gate 			pwqe_mgt = pwqe_mgt->wqe_mgt_next;
12350Sstevel@tonic-gate 		pwqe_mgt->wqe_mgt_next = wqe_mgt;
12360Sstevel@tonic-gate 
12370Sstevel@tonic-gate 		mutex_exit(&qp_ctx->isq_wqe_mutex);
12380Sstevel@tonic-gate 	}
12390Sstevel@tonic-gate 
12400Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_extend_wqe_mem_end,
12410Sstevel@tonic-gate 	    IBMF_TNF_TRACE, "", "ibmf_i_extend_wqe_mem() exit\n");
12420Sstevel@tonic-gate 
12430Sstevel@tonic-gate 	return (IBMF_SUCCESS);
12440Sstevel@tonic-gate }
12450Sstevel@tonic-gate 
12460Sstevel@tonic-gate /*
12470Sstevel@tonic-gate  * ibmf_i_alloc_send_resources():
12480Sstevel@tonic-gate  *	Allocate send resources (the send WQE)
12490Sstevel@tonic-gate  */
12500Sstevel@tonic-gate int
ibmf_i_alloc_send_resources(ibmf_ci_t * cip,ibmf_msg_impl_t * msgimplp,boolean_t block,ibmf_send_wqe_t ** swqepp)12510Sstevel@tonic-gate ibmf_i_alloc_send_resources(ibmf_ci_t *cip, ibmf_msg_impl_t *msgimplp,
12520Sstevel@tonic-gate     boolean_t block, ibmf_send_wqe_t **swqepp)
12530Sstevel@tonic-gate {
12540Sstevel@tonic-gate 	ibmf_send_wqe_t		*send_wqep;
12550Sstevel@tonic-gate 	struct kmem_cache	*kmem_cachep;
12560Sstevel@tonic-gate 	ibmf_qp_handle_t	ibmf_qp_handle = msgimplp->im_qp_hdl;
12570Sstevel@tonic-gate 	ibmf_alt_qp_t		*altqp;
12580Sstevel@tonic-gate 
12590Sstevel@tonic-gate 	IBMF_TRACE_3(IBMF_TNF_DEBUG, DPRINT_L4,
12600Sstevel@tonic-gate 	    ibmf_i_alloc_send_resources_start, IBMF_TNF_TRACE, "",
12610Sstevel@tonic-gate 	    "ibmf_i_alloc_send_resources() enter, cip = %p, msg = %p, "
12620Sstevel@tonic-gate 	    " block = %d\n", tnf_opaque, cip, cip, tnf_opaque, msg,
12630Sstevel@tonic-gate 	    msgimplp, tnf_uint, block, block);
12640Sstevel@tonic-gate 
12650Sstevel@tonic-gate 	/* Get the WQE kmem cache pointer based on the QP type */
12660Sstevel@tonic-gate 	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT)
12670Sstevel@tonic-gate 		kmem_cachep = cip->ci_send_wqes_cache;
12680Sstevel@tonic-gate 	else {
12690Sstevel@tonic-gate 		altqp = (ibmf_alt_qp_t *)ibmf_qp_handle;
12700Sstevel@tonic-gate 		kmem_cachep = altqp->isq_send_wqes_cache;
12710Sstevel@tonic-gate 	}
12720Sstevel@tonic-gate 
12730Sstevel@tonic-gate 	/*
12740Sstevel@tonic-gate 	 * Allocate a send WQE from the send WQE kmem cache
12750Sstevel@tonic-gate 	 * Do not block here as we are holding the msgimpl mutex.
12760Sstevel@tonic-gate 	 */
12770Sstevel@tonic-gate 	send_wqep = kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
12780Sstevel@tonic-gate 	if (send_wqep == NULL) {
12790Sstevel@tonic-gate 		/*
12800Sstevel@tonic-gate 		 * Attempt to extend the cache and then retry the
12810Sstevel@tonic-gate 		 * kmem_cache_alloc()
12820Sstevel@tonic-gate 		 * The block argument (third) is set to B_FALSE.
12830Sstevel@tonic-gate 		 */
12840Sstevel@tonic-gate 		if (ibmf_i_extend_wqe_cache(cip, ibmf_qp_handle, B_FALSE) ==
12850Sstevel@tonic-gate 		    IBMF_NO_RESOURCES) {
12860Sstevel@tonic-gate 			mutex_enter(&cip->ci_mutex);
12870Sstevel@tonic-gate 			IBMF_ADD32_PORT_KSTATS(cip, swqe_allocs_failed, 1);
12880Sstevel@tonic-gate 			mutex_exit(&cip->ci_mutex);
12890Sstevel@tonic-gate 			IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
12900Sstevel@tonic-gate 			    ibmf_i_alloc_send_resources_err, IBMF_TNF_ERROR, "",
12910Sstevel@tonic-gate 			    "ibmf_i_alloc_send_resources(): %s\n",
12920Sstevel@tonic-gate 			    tnf_string, msg, "alloc send_wqe failed");
12930Sstevel@tonic-gate 			IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
12940Sstevel@tonic-gate 			    ibmf_i_alloc_send_resources_end, IBMF_TNF_TRACE, "",
12950Sstevel@tonic-gate 			    "ibmf_i_alloc_send_resources() exit\n");
12960Sstevel@tonic-gate 			return (IBMF_NO_RESOURCES);
12970Sstevel@tonic-gate 		} else {
12980Sstevel@tonic-gate 			send_wqep = kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
12990Sstevel@tonic-gate 			if (send_wqep == NULL) {
13000Sstevel@tonic-gate 				/* Allocation failed again. Give up here. */
13010Sstevel@tonic-gate 				mutex_enter(&cip->ci_mutex);
13020Sstevel@tonic-gate 				IBMF_ADD32_PORT_KSTATS(cip, swqe_allocs_failed,
13030Sstevel@tonic-gate 				    1);
13040Sstevel@tonic-gate 				mutex_exit(&cip->ci_mutex);
13050Sstevel@tonic-gate 				IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
13060Sstevel@tonic-gate 				    ibmf_i_alloc_send_resources_err,
13070Sstevel@tonic-gate 				    IBMF_TNF_ERROR, "",
13080Sstevel@tonic-gate 				    "ibmf_i_alloc_send_resources(): %s\n",
13090Sstevel@tonic-gate 				    tnf_string, msg, "alloc send_wqe failed");
13100Sstevel@tonic-gate 				IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
13110Sstevel@tonic-gate 				    ibmf_i_alloc_send_resources_end,
13120Sstevel@tonic-gate 				    IBMF_TNF_TRACE, "",
13130Sstevel@tonic-gate 				    "ibmf_i_alloc_send_resources() exit\n");
13140Sstevel@tonic-gate 				return (IBMF_NO_RESOURCES);
13150Sstevel@tonic-gate 			}
13160Sstevel@tonic-gate 		}
13170Sstevel@tonic-gate 	}
13180Sstevel@tonic-gate 
13190Sstevel@tonic-gate 	mutex_enter(&cip->ci_mutex);
13200Sstevel@tonic-gate 	IBMF_ADD32_PORT_KSTATS(cip, send_wqes_alloced, 1);
13210Sstevel@tonic-gate 	mutex_exit(&cip->ci_mutex);
13220Sstevel@tonic-gate 	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
13230Sstevel@tonic-gate 		mutex_enter(&cip->ci_mutex);
13240Sstevel@tonic-gate 		cip->ci_wqes_alloced++;
13250Sstevel@tonic-gate 		mutex_exit(&cip->ci_mutex);
13260Sstevel@tonic-gate 	} else {
13270Sstevel@tonic-gate 		mutex_enter(&altqp->isq_mutex);
13280Sstevel@tonic-gate 		altqp->isq_wqes_alloced++;
13290Sstevel@tonic-gate 		mutex_exit(&altqp->isq_mutex);
13300Sstevel@tonic-gate 	}
13310Sstevel@tonic-gate 
13320Sstevel@tonic-gate 	send_wqep->send_msg = msgimplp;
13330Sstevel@tonic-gate 	*swqepp = send_wqep;
13340Sstevel@tonic-gate 
13350Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
13360Sstevel@tonic-gate 	    ibmf_i_alloc_send_resources_end, IBMF_TNF_TRACE, "",
13370Sstevel@tonic-gate 	    "ibmf_i_alloc_send_resources() exit\n");
13380Sstevel@tonic-gate 
13390Sstevel@tonic-gate 	return (IBMF_SUCCESS);
13400Sstevel@tonic-gate }
13410Sstevel@tonic-gate 
13420Sstevel@tonic-gate /*
13430Sstevel@tonic-gate  * ibmf_i_free_send_resources():
13440Sstevel@tonic-gate  *	Free send resources (just the send WQE)
13450Sstevel@tonic-gate  */
13460Sstevel@tonic-gate /* ARGSUSED */
13470Sstevel@tonic-gate void
ibmf_i_free_send_resources(ibmf_ci_t * cip,ibmf_msg_impl_t * msgimplp,ibmf_send_wqe_t * swqep)13480Sstevel@tonic-gate ibmf_i_free_send_resources(ibmf_ci_t *cip, ibmf_msg_impl_t *msgimplp,
13490Sstevel@tonic-gate     ibmf_send_wqe_t *swqep)
13500Sstevel@tonic-gate {
13510Sstevel@tonic-gate 	struct kmem_cache	*kmem_cachep;
13520Sstevel@tonic-gate 	ibmf_qp_handle_t	ibmf_qp_handle = msgimplp->im_qp_hdl;
13530Sstevel@tonic-gate 	ibmf_alt_qp_t		*altqp;
13540Sstevel@tonic-gate 
13550Sstevel@tonic-gate 	IBMF_TRACE_3(IBMF_TNF_DEBUG, DPRINT_L4,
13560Sstevel@tonic-gate 	    ibmf_i_free_send_resources_start, IBMF_TNF_TRACE, "",
13570Sstevel@tonic-gate 	    "ibmf_i_free_send_resources() enter, cip = %p, msg = %p, "
13580Sstevel@tonic-gate 	    " swqep = %p\n", tnf_opaque, cip, cip, tnf_opaque, msg,
13590Sstevel@tonic-gate 	    msgimplp, tnf_opaque, swqep, swqep);
13600Sstevel@tonic-gate 
13610Sstevel@tonic-gate 	/* Get the WQE kmem cache pointer based on the QP type */
13620Sstevel@tonic-gate 	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT)
13630Sstevel@tonic-gate 		kmem_cachep = cip->ci_send_wqes_cache;
13640Sstevel@tonic-gate 	else {
13650Sstevel@tonic-gate 		altqp = (ibmf_alt_qp_t *)ibmf_qp_handle;
13660Sstevel@tonic-gate 		kmem_cachep = altqp->isq_send_wqes_cache;
13670Sstevel@tonic-gate 	}
13680Sstevel@tonic-gate 
13690Sstevel@tonic-gate 	/* return the send WQE to the kmem cache */
13700Sstevel@tonic-gate 	kmem_cache_free(kmem_cachep, swqep);
13710Sstevel@tonic-gate 
13720Sstevel@tonic-gate 	mutex_enter(&cip->ci_mutex);
13730Sstevel@tonic-gate 	IBMF_SUB32_PORT_KSTATS(cip, send_wqes_alloced, 1);
13740Sstevel@tonic-gate 	mutex_exit(&cip->ci_mutex);
13750Sstevel@tonic-gate 	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
13760Sstevel@tonic-gate 		mutex_enter(&cip->ci_mutex);
13770Sstevel@tonic-gate 		cip->ci_wqes_alloced--;
13780Sstevel@tonic-gate 		if (cip->ci_wqes_alloced == 0)
13790Sstevel@tonic-gate 			cv_signal(&cip->ci_wqes_cv);
13800Sstevel@tonic-gate 		mutex_exit(&cip->ci_mutex);
13810Sstevel@tonic-gate 	} else {
13820Sstevel@tonic-gate 		mutex_enter(&altqp->isq_mutex);
13830Sstevel@tonic-gate 		altqp->isq_wqes_alloced--;
13840Sstevel@tonic-gate 		if (altqp->isq_wqes_alloced == 0)
13850Sstevel@tonic-gate 			cv_signal(&altqp->isq_wqes_cv);
13860Sstevel@tonic-gate 		mutex_exit(&altqp->isq_mutex);
13870Sstevel@tonic-gate 	}
13880Sstevel@tonic-gate 
13890Sstevel@tonic-gate 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
13900Sstevel@tonic-gate 	    ibmf_i_free_send_resources_end, IBMF_TNF_TRACE, "",
13910Sstevel@tonic-gate 	    "ibmf_i_free_send_resources() exit\n");
13920Sstevel@tonic-gate }
1393