xref: /onnv-gate/usr/src/uts/common/io/ib/ibtl/ibtl_handlers.c (revision 12965:b65a8427f8fe)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
52259Shiremath  * Common Development and Distribution License (the "License").
62259Shiremath  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*12965SWilliam.Taylor@Oracle.COM  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
230Sstevel@tonic-gate  */
240Sstevel@tonic-gate 
250Sstevel@tonic-gate #include <sys/ib/ibtl/impl/ibtl.h>
260Sstevel@tonic-gate #include <sys/ib/ibtl/impl/ibtl_cm.h>
270Sstevel@tonic-gate #include <sys/taskq.h>
280Sstevel@tonic-gate #include <sys/disp.h>
290Sstevel@tonic-gate #include <sys/callb.h>
300Sstevel@tonic-gate #include <sys/proc.h>
310Sstevel@tonic-gate 
320Sstevel@tonic-gate /*
330Sstevel@tonic-gate  * ibtl_handlers.c
340Sstevel@tonic-gate  */
350Sstevel@tonic-gate 
360Sstevel@tonic-gate /*
370Sstevel@tonic-gate  * What's in this file?
380Sstevel@tonic-gate  *
390Sstevel@tonic-gate  *   This file started as an implementation of Asynchronous Event/Error
400Sstevel@tonic-gate  *   handling and Completion Queue handling.  As the implementation
410Sstevel@tonic-gate  *   evolved, code has been added for other ibc_* interfaces (resume,
420Sstevel@tonic-gate  *   predetach, etc.) that use the same mechanisms as used for asyncs.
430Sstevel@tonic-gate  *
440Sstevel@tonic-gate  * Async and CQ handling at interrupt level.
450Sstevel@tonic-gate  *
460Sstevel@tonic-gate  *   CQ handling is normally done at interrupt level using the CQ callback
470Sstevel@tonic-gate  *   handler to call the appropriate IBT Client (owner of the CQ).  For
480Sstevel@tonic-gate  *   clients that would prefer a fully flexible non-interrupt context to
490Sstevel@tonic-gate  *   do their CQ handling, a CQ can be created so that its handler is
500Sstevel@tonic-gate  *   called from a non-interrupt thread.  CQ handling is done frequently
510Sstevel@tonic-gate  *   whereas Async handling is expected to occur very infrequently.
520Sstevel@tonic-gate  *
530Sstevel@tonic-gate  *   Async handling is done by marking (or'ing in of an async_code of) the
540Sstevel@tonic-gate  *   pertinent IBTL data structure, and then notifying the async_thread(s)
550Sstevel@tonic-gate  *   that the data structure has async work to be done.  The notification
560Sstevel@tonic-gate  *   occurs by linking the data structure through its async_link onto a
570Sstevel@tonic-gate  *   list of like data structures and waking up an async_thread.  This
580Sstevel@tonic-gate  *   list append is not done if there is already async work pending on
590Sstevel@tonic-gate  *   this data structure (IBTL_ASYNC_PENDING).
600Sstevel@tonic-gate  *
610Sstevel@tonic-gate  * Async Mutex and CQ Mutex
620Sstevel@tonic-gate  *
630Sstevel@tonic-gate  *   The global ibtl_async_mutex is "the" mutex used to control access
640Sstevel@tonic-gate  *   to all the data needed by ibc_async_handler.  All the threads that
650Sstevel@tonic-gate  *   use this mutex are written so that the mutex is held for very short
660Sstevel@tonic-gate  *   periods of time, and never held while making calls to functions
670Sstevel@tonic-gate  *   that may block.
680Sstevel@tonic-gate  *
690Sstevel@tonic-gate  *   The global ibtl_cq_mutex is used similarly by ibc_cq_handler and
700Sstevel@tonic-gate  *   the ibtl_cq_thread(s).
710Sstevel@tonic-gate  *
720Sstevel@tonic-gate  * Mutex hierarchy
730Sstevel@tonic-gate  *
740Sstevel@tonic-gate  *   The ibtl_clnt_list_mutex is above the ibtl_async_mutex.
750Sstevel@tonic-gate  *   ibtl_clnt_list_mutex protects all of the various lists.
760Sstevel@tonic-gate  *   The ibtl_async_mutex is below this in the hierarchy.
770Sstevel@tonic-gate  *
780Sstevel@tonic-gate  *   The ibtl_cq_mutex is independent of the above mutexes.
790Sstevel@tonic-gate  *
800Sstevel@tonic-gate  * Threads
810Sstevel@tonic-gate  *
820Sstevel@tonic-gate  *   There are "ibtl_cq_threads" number of threads created for handling
830Sstevel@tonic-gate  *   Completion Queues in threads.  If this feature really gets used,
840Sstevel@tonic-gate  *   then we will want to do some suitable tuning.  Similarly, we may
850Sstevel@tonic-gate  *   want to tune the number of "ibtl_async_thread_init".
860Sstevel@tonic-gate  *
870Sstevel@tonic-gate  *   The function ibtl_cq_thread is the main loop for handling a CQ in a
880Sstevel@tonic-gate  *   thread.  There can be multiple threads executing this same code.
890Sstevel@tonic-gate  *   The code sleeps when there is no work to be done (list is empty),
900Sstevel@tonic-gate  *   otherwise it pulls the first CQ structure off the list and performs
910Sstevel@tonic-gate  *   the CQ handler callback to the client.  After that returns, a check
920Sstevel@tonic-gate  *   is made, and if another ibc_cq_handler call was made for this CQ,
930Sstevel@tonic-gate  *   the client is called again.
940Sstevel@tonic-gate  *
950Sstevel@tonic-gate  *   The function ibtl_async_thread is the main loop for handling async
960Sstevel@tonic-gate  *   events/errors.  There can be multiple threads executing this same code.
970Sstevel@tonic-gate  *   The code sleeps when there is no work to be done (lists are empty),
980Sstevel@tonic-gate  *   otherwise it pulls the first structure off one of the lists and
990Sstevel@tonic-gate  *   performs the async callback(s) to the client(s).  Note that HCA
1000Sstevel@tonic-gate  *   async handling is done by calling each of the clients using the HCA.
1010Sstevel@tonic-gate  *   When the async handling completes, the data structure having the async
1020Sstevel@tonic-gate  *   event/error is checked for more work before it's considered "done".
1030Sstevel@tonic-gate  *
1040Sstevel@tonic-gate  * Taskq
1050Sstevel@tonic-gate  *
1060Sstevel@tonic-gate  *   The async_taskq is used here for allowing async handler callbacks to
1070Sstevel@tonic-gate  *   occur simultaneously to multiple clients of an HCA.  This taskq could
1080Sstevel@tonic-gate  *   be used for other purposes, e.g., if all the async_threads are in
1090Sstevel@tonic-gate  *   use, but this is deemed as overkill since asyncs should occur rarely.
1100Sstevel@tonic-gate  */
1110Sstevel@tonic-gate 
1120Sstevel@tonic-gate /* Globals */
1130Sstevel@tonic-gate static char ibtf_handlers[] = "ibtl_handlers";
1140Sstevel@tonic-gate 
1150Sstevel@tonic-gate /* priority for IBTL threads (async, cq, and taskq) */
1160Sstevel@tonic-gate static pri_t ibtl_pri = MAXCLSYSPRI - 1; /* maybe override in /etc/system */
1170Sstevel@tonic-gate 
1180Sstevel@tonic-gate /* taskq used for HCA asyncs */
1190Sstevel@tonic-gate #define	ibtl_async_taskq system_taskq
1200Sstevel@tonic-gate 
1210Sstevel@tonic-gate /* data for async handling by threads */
1220Sstevel@tonic-gate static kmutex_t ibtl_async_mutex;	/* protects most *_async_* data */
1230Sstevel@tonic-gate static kcondvar_t ibtl_async_cv;	/* async_threads wait on this */
1240Sstevel@tonic-gate static kcondvar_t ibtl_clnt_cv;		/* ibt_detach might wait on this */
1250Sstevel@tonic-gate static void ibtl_dec_clnt_async_cnt(ibtl_clnt_t *clntp);
1260Sstevel@tonic-gate static void ibtl_inc_clnt_async_cnt(ibtl_clnt_t *clntp);
1270Sstevel@tonic-gate 
1280Sstevel@tonic-gate static kt_did_t *ibtl_async_did;	/* for thread_join() */
1297354SGiri.Adari@Sun.COM int ibtl_async_thread_init = 4;	/* total # of async_threads to create */
1300Sstevel@tonic-gate static int ibtl_async_thread_exit = 0;	/* set if/when thread(s) should exit */
1310Sstevel@tonic-gate 
1320Sstevel@tonic-gate /* async lists for various structures */
1330Sstevel@tonic-gate static ibtl_hca_devinfo_t *ibtl_async_hca_list_start, *ibtl_async_hca_list_end;
1340Sstevel@tonic-gate static ibtl_eec_t *ibtl_async_eec_list_start, *ibtl_async_eec_list_end;
1350Sstevel@tonic-gate static ibtl_qp_t *ibtl_async_qp_list_start, *ibtl_async_qp_list_end;
1360Sstevel@tonic-gate static ibtl_cq_t *ibtl_async_cq_list_start, *ibtl_async_cq_list_end;
1370Sstevel@tonic-gate static ibtl_srq_t *ibtl_async_srq_list_start, *ibtl_async_srq_list_end;
1380Sstevel@tonic-gate 
1390Sstevel@tonic-gate /* data for CQ completion handling by threads */
1400Sstevel@tonic-gate static kmutex_t ibtl_cq_mutex;	/* protects the cv and the list below */
1410Sstevel@tonic-gate static kcondvar_t ibtl_cq_cv;
1420Sstevel@tonic-gate static ibtl_cq_t *ibtl_cq_list_start, *ibtl_cq_list_end;
1430Sstevel@tonic-gate 
1440Sstevel@tonic-gate static int ibtl_cq_threads = 0;		/* total # of cq threads */
1450Sstevel@tonic-gate static int ibtl_cqs_using_threads = 0;	/* total # of cqs using threads */
1460Sstevel@tonic-gate static int ibtl_cq_thread_exit = 0;	/* set if/when thread(s) should exit */
1470Sstevel@tonic-gate 
1480Sstevel@tonic-gate /* value used to tell IBTL threads to exit */
1490Sstevel@tonic-gate #define	IBTL_THREAD_EXIT 0x1b7fdead	/* IBTF DEAD */
1509891SRajkumar.Sivaprakasam@Sun.COM /* Cisco Topspin Vendor ID for Rereg hack */
1519891SRajkumar.Sivaprakasam@Sun.COM #define	IBT_VENDOR_CISCO 0x05ad
1520Sstevel@tonic-gate 
1530Sstevel@tonic-gate int ibtl_eec_not_supported = 1;
1540Sstevel@tonic-gate 
1550Sstevel@tonic-gate char *ibtl_last_client_name;	/* may help debugging */
1569891SRajkumar.Sivaprakasam@Sun.COM typedef ibt_status_t (*ibtl_node_info_cb_t)(ib_guid_t, uint8_t, ib_lid_t,
1579891SRajkumar.Sivaprakasam@Sun.COM     ibt_node_info_t *);
1589891SRajkumar.Sivaprakasam@Sun.COM 
1599891SRajkumar.Sivaprakasam@Sun.COM ibtl_node_info_cb_t ibtl_node_info_cb;
1600Sstevel@tonic-gate 
_NOTE(LOCK_ORDER (ibtl_clnt_list_mutex ibtl_async_mutex))1610Sstevel@tonic-gate _NOTE(LOCK_ORDER(ibtl_clnt_list_mutex ibtl_async_mutex))
1620Sstevel@tonic-gate 
1639891SRajkumar.Sivaprakasam@Sun.COM void
1649891SRajkumar.Sivaprakasam@Sun.COM ibtl_cm_set_node_info_cb(ibt_status_t (*node_info_cb)(ib_guid_t, uint8_t,
1659891SRajkumar.Sivaprakasam@Sun.COM     ib_lid_t, ibt_node_info_t *))
1669891SRajkumar.Sivaprakasam@Sun.COM {
1679891SRajkumar.Sivaprakasam@Sun.COM 	mutex_enter(&ibtl_clnt_list_mutex);
1689891SRajkumar.Sivaprakasam@Sun.COM 	ibtl_node_info_cb = node_info_cb;
1699891SRajkumar.Sivaprakasam@Sun.COM 	mutex_exit(&ibtl_clnt_list_mutex);
1709891SRajkumar.Sivaprakasam@Sun.COM }
1719891SRajkumar.Sivaprakasam@Sun.COM 
1720Sstevel@tonic-gate /*
1730Sstevel@tonic-gate  * ibc_async_handler()
1740Sstevel@tonic-gate  *
1750Sstevel@tonic-gate  * Asynchronous Event/Error Handler.
1760Sstevel@tonic-gate  *
1770Sstevel@tonic-gate  *	This is the function called HCA drivers to post various async
1780Sstevel@tonic-gate  *	event and errors mention in the IB architecture spec.  See
1790Sstevel@tonic-gate  *	ibtl_types.h for additional details of this.
1800Sstevel@tonic-gate  *
1810Sstevel@tonic-gate  *	This function marks the pertinent IBTF object with the async_code,
1820Sstevel@tonic-gate  *	and queues the object for handling by an ibtl_async_thread.  If
1830Sstevel@tonic-gate  *	the object is NOT already marked for async processing, it is added
1840Sstevel@tonic-gate  *	to the associated list for that type of object, and an
1850Sstevel@tonic-gate  *	ibtl_async_thread is signaled to finish the async work.
1860Sstevel@tonic-gate  */
1870Sstevel@tonic-gate void
ibc_async_handler(ibc_clnt_hdl_t hca_devp,ibt_async_code_t code,ibc_async_event_t * event_p)1880Sstevel@tonic-gate ibc_async_handler(ibc_clnt_hdl_t hca_devp, ibt_async_code_t code,
1890Sstevel@tonic-gate     ibc_async_event_t *event_p)
1900Sstevel@tonic-gate {
1910Sstevel@tonic-gate 	ibtl_qp_t	*ibtl_qp;
1920Sstevel@tonic-gate 	ibtl_cq_t	*ibtl_cq;
1930Sstevel@tonic-gate 	ibtl_srq_t	*ibtl_srq;
1940Sstevel@tonic-gate 	ibtl_eec_t	*ibtl_eec;
1950Sstevel@tonic-gate 	uint8_t		port_minus1;
1960Sstevel@tonic-gate 
1979891SRajkumar.Sivaprakasam@Sun.COM 	ibtl_async_port_event_t	*portp;
1989891SRajkumar.Sivaprakasam@Sun.COM 
1990Sstevel@tonic-gate 	IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler(%p, 0x%x, %p)",
2000Sstevel@tonic-gate 	    hca_devp, code, event_p);
2010Sstevel@tonic-gate 
2020Sstevel@tonic-gate 	mutex_enter(&ibtl_async_mutex);
2030Sstevel@tonic-gate 
2040Sstevel@tonic-gate 	switch (code) {
2050Sstevel@tonic-gate 	case IBT_EVENT_PATH_MIGRATED_QP:
2060Sstevel@tonic-gate 	case IBT_EVENT_SQD:
2070Sstevel@tonic-gate 	case IBT_ERROR_CATASTROPHIC_QP:
2080Sstevel@tonic-gate 	case IBT_ERROR_PATH_MIGRATE_REQ_QP:
2090Sstevel@tonic-gate 	case IBT_EVENT_COM_EST_QP:
2100Sstevel@tonic-gate 	case IBT_ERROR_INVALID_REQUEST_QP:
2110Sstevel@tonic-gate 	case IBT_ERROR_ACCESS_VIOLATION_QP:
2120Sstevel@tonic-gate 	case IBT_EVENT_EMPTY_QP:
213*12965SWilliam.Taylor@Oracle.COM 	case IBT_FEXCH_ERROR:
2140Sstevel@tonic-gate 		ibtl_qp = event_p->ev_qp_hdl;
2150Sstevel@tonic-gate 		if (ibtl_qp == NULL) {
2160Sstevel@tonic-gate 			IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: "
2170Sstevel@tonic-gate 			    "bad qp handle");
2180Sstevel@tonic-gate 			break;
2190Sstevel@tonic-gate 		}
2200Sstevel@tonic-gate 		switch (code) {
2210Sstevel@tonic-gate 		case IBT_ERROR_CATASTROPHIC_QP:
2220Sstevel@tonic-gate 			ibtl_qp->qp_cat_fma_ena = event_p->ev_fma_ena; break;
2230Sstevel@tonic-gate 		case IBT_ERROR_PATH_MIGRATE_REQ_QP:
2240Sstevel@tonic-gate 			ibtl_qp->qp_pth_fma_ena = event_p->ev_fma_ena; break;
2250Sstevel@tonic-gate 		case IBT_ERROR_INVALID_REQUEST_QP:
2260Sstevel@tonic-gate 			ibtl_qp->qp_inv_fma_ena = event_p->ev_fma_ena; break;
2270Sstevel@tonic-gate 		case IBT_ERROR_ACCESS_VIOLATION_QP:
2280Sstevel@tonic-gate 			ibtl_qp->qp_acc_fma_ena = event_p->ev_fma_ena; break;
2290Sstevel@tonic-gate 		}
2300Sstevel@tonic-gate 
2310Sstevel@tonic-gate 		ibtl_qp->qp_async_codes |= code;
2320Sstevel@tonic-gate 		if ((ibtl_qp->qp_async_flags & IBTL_ASYNC_PENDING) == 0) {
2330Sstevel@tonic-gate 			ibtl_qp->qp_async_flags |= IBTL_ASYNC_PENDING;
2340Sstevel@tonic-gate 			ibtl_qp->qp_async_link = NULL;
2350Sstevel@tonic-gate 			if (ibtl_async_qp_list_end == NULL)
2360Sstevel@tonic-gate 				ibtl_async_qp_list_start = ibtl_qp;
2370Sstevel@tonic-gate 			else
2380Sstevel@tonic-gate 				ibtl_async_qp_list_end->qp_async_link = ibtl_qp;
2390Sstevel@tonic-gate 			ibtl_async_qp_list_end = ibtl_qp;
2400Sstevel@tonic-gate 			cv_signal(&ibtl_async_cv);
2410Sstevel@tonic-gate 		}
2420Sstevel@tonic-gate 		break;
2430Sstevel@tonic-gate 
2440Sstevel@tonic-gate 	case IBT_ERROR_CQ:
2450Sstevel@tonic-gate 		ibtl_cq = event_p->ev_cq_hdl;
2460Sstevel@tonic-gate 		if (ibtl_cq == NULL) {
2470Sstevel@tonic-gate 			IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: "
2480Sstevel@tonic-gate 			    "bad cq handle");
2490Sstevel@tonic-gate 			break;
2500Sstevel@tonic-gate 		}
2510Sstevel@tonic-gate 		ibtl_cq->cq_async_codes |= code;
2520Sstevel@tonic-gate 		ibtl_cq->cq_fma_ena = event_p->ev_fma_ena;
2530Sstevel@tonic-gate 		if ((ibtl_cq->cq_async_flags & IBTL_ASYNC_PENDING) == 0) {
2540Sstevel@tonic-gate 			ibtl_cq->cq_async_flags |= IBTL_ASYNC_PENDING;
2550Sstevel@tonic-gate 			ibtl_cq->cq_async_link = NULL;
2560Sstevel@tonic-gate 			if (ibtl_async_cq_list_end == NULL)
2570Sstevel@tonic-gate 				ibtl_async_cq_list_start = ibtl_cq;
2580Sstevel@tonic-gate 			else
2590Sstevel@tonic-gate 				ibtl_async_cq_list_end->cq_async_link = ibtl_cq;
2600Sstevel@tonic-gate 			ibtl_async_cq_list_end = ibtl_cq;
2610Sstevel@tonic-gate 			cv_signal(&ibtl_async_cv);
2620Sstevel@tonic-gate 		}
2630Sstevel@tonic-gate 		break;
2640Sstevel@tonic-gate 
2650Sstevel@tonic-gate 	case IBT_ERROR_CATASTROPHIC_SRQ:
2660Sstevel@tonic-gate 	case IBT_EVENT_LIMIT_REACHED_SRQ:
2670Sstevel@tonic-gate 		ibtl_srq = event_p->ev_srq_hdl;
2680Sstevel@tonic-gate 		if (ibtl_srq == NULL) {
2690Sstevel@tonic-gate 			IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: "
2700Sstevel@tonic-gate 			    "bad srq handle");
2710Sstevel@tonic-gate 			break;
2720Sstevel@tonic-gate 		}
2730Sstevel@tonic-gate 		ibtl_srq->srq_async_codes |= code;
2740Sstevel@tonic-gate 		ibtl_srq->srq_fma_ena = event_p->ev_fma_ena;
2750Sstevel@tonic-gate 		if ((ibtl_srq->srq_async_flags & IBTL_ASYNC_PENDING) == 0) {
2760Sstevel@tonic-gate 			ibtl_srq->srq_async_flags |= IBTL_ASYNC_PENDING;
2770Sstevel@tonic-gate 			ibtl_srq->srq_async_link = NULL;
2780Sstevel@tonic-gate 			if (ibtl_async_srq_list_end == NULL)
2790Sstevel@tonic-gate 				ibtl_async_srq_list_start = ibtl_srq;
2800Sstevel@tonic-gate 			else
2810Sstevel@tonic-gate 				ibtl_async_srq_list_end->srq_async_link =
2820Sstevel@tonic-gate 				    ibtl_srq;
2830Sstevel@tonic-gate 			ibtl_async_srq_list_end = ibtl_srq;
2840Sstevel@tonic-gate 			cv_signal(&ibtl_async_cv);
2850Sstevel@tonic-gate 		}
2860Sstevel@tonic-gate 		break;
2870Sstevel@tonic-gate 
2880Sstevel@tonic-gate 	case IBT_EVENT_PATH_MIGRATED_EEC:
2890Sstevel@tonic-gate 	case IBT_ERROR_PATH_MIGRATE_REQ_EEC:
2900Sstevel@tonic-gate 	case IBT_ERROR_CATASTROPHIC_EEC:
2910Sstevel@tonic-gate 	case IBT_EVENT_COM_EST_EEC:
2920Sstevel@tonic-gate 		if (ibtl_eec_not_supported) {
2930Sstevel@tonic-gate 			IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: "
2940Sstevel@tonic-gate 			    "EEC events are disabled.");
2950Sstevel@tonic-gate 			break;
2960Sstevel@tonic-gate 		}
2970Sstevel@tonic-gate 		ibtl_eec = event_p->ev_eec_hdl;
2980Sstevel@tonic-gate 		if (ibtl_eec == NULL) {
2990Sstevel@tonic-gate 			IBTF_DPRINTF_L2(ibtf_handlers, "ibc_async_handler: "
3000Sstevel@tonic-gate 			    "bad eec handle");
3010Sstevel@tonic-gate 			break;
3020Sstevel@tonic-gate 		}
3030Sstevel@tonic-gate 		switch (code) {
3040Sstevel@tonic-gate 		case IBT_ERROR_PATH_MIGRATE_REQ_EEC:
3050Sstevel@tonic-gate 			ibtl_eec->eec_pth_fma_ena = event_p->ev_fma_ena; break;
3060Sstevel@tonic-gate 		case IBT_ERROR_CATASTROPHIC_EEC:
3070Sstevel@tonic-gate 			ibtl_eec->eec_cat_fma_ena = event_p->ev_fma_ena; break;
3080Sstevel@tonic-gate 		}
3090Sstevel@tonic-gate 		ibtl_eec->eec_async_codes |= code;
3100Sstevel@tonic-gate 		if ((ibtl_eec->eec_async_flags & IBTL_ASYNC_PENDING) == 0) {
3110Sstevel@tonic-gate 			ibtl_eec->eec_async_flags |= IBTL_ASYNC_PENDING;
3120Sstevel@tonic-gate 			ibtl_eec->eec_async_link = NULL;
3130Sstevel@tonic-gate 			if (ibtl_async_eec_list_end == NULL)
3140Sstevel@tonic-gate 				ibtl_async_eec_list_start = ibtl_eec;
3150Sstevel@tonic-gate 			else
3160Sstevel@tonic-gate 				ibtl_async_eec_list_end->eec_async_link =
3170Sstevel@tonic-gate 				    ibtl_eec;
3180Sstevel@tonic-gate 			ibtl_async_eec_list_end = ibtl_eec;
3190Sstevel@tonic-gate 			cv_signal(&ibtl_async_cv);
3200Sstevel@tonic-gate 		}
3210Sstevel@tonic-gate 		break;
3220Sstevel@tonic-gate 
3230Sstevel@tonic-gate 	case IBT_ERROR_LOCAL_CATASTROPHIC:
3240Sstevel@tonic-gate 		hca_devp->hd_async_codes |= code;
3250Sstevel@tonic-gate 		hca_devp->hd_fma_ena = event_p->ev_fma_ena;
3260Sstevel@tonic-gate 		/* FALLTHROUGH */
3270Sstevel@tonic-gate 
3280Sstevel@tonic-gate 	case IBT_EVENT_PORT_UP:
3299891SRajkumar.Sivaprakasam@Sun.COM 	case IBT_PORT_CHANGE_EVENT:
3309891SRajkumar.Sivaprakasam@Sun.COM 	case IBT_CLNT_REREG_EVENT:
3310Sstevel@tonic-gate 	case IBT_ERROR_PORT_DOWN:
3329891SRajkumar.Sivaprakasam@Sun.COM 		if ((code & IBT_PORT_EVENTS) != 0) {
3330Sstevel@tonic-gate 			if ((port_minus1 = event_p->ev_port - 1) >=
3340Sstevel@tonic-gate 			    hca_devp->hd_hca_attr->hca_nports) {
3350Sstevel@tonic-gate 				IBTF_DPRINTF_L2(ibtf_handlers,
3360Sstevel@tonic-gate 				    "ibc_async_handler: bad port #: %d",
3370Sstevel@tonic-gate 				    event_p->ev_port);
3385752Shiremath 				break;
3390Sstevel@tonic-gate 			}
3409891SRajkumar.Sivaprakasam@Sun.COM 			portp = &hca_devp->hd_async_port[port_minus1];
3419891SRajkumar.Sivaprakasam@Sun.COM 			if (code == IBT_EVENT_PORT_UP) {
3429891SRajkumar.Sivaprakasam@Sun.COM 				/*
3439891SRajkumar.Sivaprakasam@Sun.COM 				 * The port is just coming UP we can't have any
3449891SRajkumar.Sivaprakasam@Sun.COM 				 * valid older events.
3459891SRajkumar.Sivaprakasam@Sun.COM 				 */
3469891SRajkumar.Sivaprakasam@Sun.COM 				portp->status = IBTL_HCA_PORT_UP;
3479891SRajkumar.Sivaprakasam@Sun.COM 			} else if (code == IBT_ERROR_PORT_DOWN) {
3489891SRajkumar.Sivaprakasam@Sun.COM 				/*
3499891SRajkumar.Sivaprakasam@Sun.COM 				 * The port is going DOWN older events don't
3509891SRajkumar.Sivaprakasam@Sun.COM 				 * count.
3519891SRajkumar.Sivaprakasam@Sun.COM 				 */
3529891SRajkumar.Sivaprakasam@Sun.COM 				portp->status = IBTL_HCA_PORT_DOWN;
3539891SRajkumar.Sivaprakasam@Sun.COM 			} else if (code == IBT_PORT_CHANGE_EVENT) {
3549891SRajkumar.Sivaprakasam@Sun.COM 				/*
3559891SRajkumar.Sivaprakasam@Sun.COM 				 * For port UP and DOWN events only the latest
3569891SRajkumar.Sivaprakasam@Sun.COM 				 * event counts. If we get a UP after DOWN it
3579891SRajkumar.Sivaprakasam@Sun.COM 				 * is sufficient to send just UP and vice versa.
3589891SRajkumar.Sivaprakasam@Sun.COM 				 * In the case of port CHANGE event it is valid
3599891SRajkumar.Sivaprakasam@Sun.COM 				 * only when the port is UP already but if we
3609891SRajkumar.Sivaprakasam@Sun.COM 				 * receive it after UP but before UP is
3619891SRajkumar.Sivaprakasam@Sun.COM 				 * delivered we still need to deliver CHANGE
3629891SRajkumar.Sivaprakasam@Sun.COM 				 * after we deliver UP event.
3639891SRajkumar.Sivaprakasam@Sun.COM 				 *
3649891SRajkumar.Sivaprakasam@Sun.COM 				 * We will not get a CHANGE event when the port
3659891SRajkumar.Sivaprakasam@Sun.COM 				 * is down or DOWN event is pending.
3669891SRajkumar.Sivaprakasam@Sun.COM 				 */
3679891SRajkumar.Sivaprakasam@Sun.COM 				portp->flags |= event_p->ev_port_flags;
3689891SRajkumar.Sivaprakasam@Sun.COM 				portp->status |= IBTL_HCA_PORT_CHG;
3699891SRajkumar.Sivaprakasam@Sun.COM 			} else if (code == IBT_CLNT_REREG_EVENT) {
3709891SRajkumar.Sivaprakasam@Sun.COM 				/*
3719891SRajkumar.Sivaprakasam@Sun.COM 				 * SM has requested a re-register of
3729891SRajkumar.Sivaprakasam@Sun.COM 				 * subscription to SM events notification.
3739891SRajkumar.Sivaprakasam@Sun.COM 				 */
3749891SRajkumar.Sivaprakasam@Sun.COM 				portp->status |= IBTL_HCA_PORT_ASYNC_CLNT_REREG;
3759891SRajkumar.Sivaprakasam@Sun.COM 			}
3769891SRajkumar.Sivaprakasam@Sun.COM 
3770Sstevel@tonic-gate 			hca_devp->hd_async_codes |= code;
3780Sstevel@tonic-gate 		}
3790Sstevel@tonic-gate 
3800Sstevel@tonic-gate 		if ((hca_devp->hd_async_flags & IBTL_ASYNC_PENDING) == 0) {
3810Sstevel@tonic-gate 			hca_devp->hd_async_flags |= IBTL_ASYNC_PENDING;
3820Sstevel@tonic-gate 			hca_devp->hd_async_link = NULL;
3830Sstevel@tonic-gate 			if (ibtl_async_hca_list_end == NULL)
3840Sstevel@tonic-gate 				ibtl_async_hca_list_start = hca_devp;
3850Sstevel@tonic-gate 			else
3860Sstevel@tonic-gate 				ibtl_async_hca_list_end->hd_async_link =
3870Sstevel@tonic-gate 				    hca_devp;
3880Sstevel@tonic-gate 			ibtl_async_hca_list_end = hca_devp;
3890Sstevel@tonic-gate 			cv_signal(&ibtl_async_cv);
3900Sstevel@tonic-gate 		}
3910Sstevel@tonic-gate 
3920Sstevel@tonic-gate 		break;
3930Sstevel@tonic-gate 
3940Sstevel@tonic-gate 	default:
3950Sstevel@tonic-gate 		IBTF_DPRINTF_L1(ibtf_handlers, "ibc_async_handler: "
3960Sstevel@tonic-gate 		    "invalid code (0x%x)", code);
3970Sstevel@tonic-gate 	}
3980Sstevel@tonic-gate 
3990Sstevel@tonic-gate 	mutex_exit(&ibtl_async_mutex);
4000Sstevel@tonic-gate }
4010Sstevel@tonic-gate 
4020Sstevel@tonic-gate 
4030Sstevel@tonic-gate /* Finally, make the async call to the client. */
4040Sstevel@tonic-gate 
4050Sstevel@tonic-gate static void
ibtl_async_client_call(ibtl_hca_t * ibt_hca,ibt_async_code_t code,ibt_async_event_t * event_p)4060Sstevel@tonic-gate ibtl_async_client_call(ibtl_hca_t *ibt_hca, ibt_async_code_t code,
4070Sstevel@tonic-gate     ibt_async_event_t *event_p)
4080Sstevel@tonic-gate {
4090Sstevel@tonic-gate 	ibtl_clnt_t		*clntp;
4100Sstevel@tonic-gate 	void			*client_private;
4110Sstevel@tonic-gate 	ibt_async_handler_t	async_handler;
4120Sstevel@tonic-gate 	char			*client_name;
4130Sstevel@tonic-gate 
4140Sstevel@tonic-gate 	IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call(%p, 0x%x, %p)",
4150Sstevel@tonic-gate 	    ibt_hca, code, event_p);
4160Sstevel@tonic-gate 
4170Sstevel@tonic-gate 	clntp = ibt_hca->ha_clnt_devp;
4180Sstevel@tonic-gate 
4190Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_last_client_name))
4200Sstevel@tonic-gate 	/* Record who is being called (just a debugging aid) */
4210Sstevel@tonic-gate 	ibtl_last_client_name = client_name = clntp->clnt_name;
4220Sstevel@tonic-gate 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_last_client_name))
4230Sstevel@tonic-gate 
4240Sstevel@tonic-gate 	client_private = clntp->clnt_private;
4250Sstevel@tonic-gate 	async_handler = clntp->clnt_modinfop->mi_async_handler;
4260Sstevel@tonic-gate 
4270Sstevel@tonic-gate 	if (code & (IBT_EVENT_COM_EST_QP | IBT_EVENT_COM_EST_EEC)) {
4280Sstevel@tonic-gate 		mutex_enter(&ibtl_clnt_list_mutex);
4290Sstevel@tonic-gate 		async_handler = ibtl_cm_async_handler;
4300Sstevel@tonic-gate 		client_private = ibtl_cm_clnt_private;
4310Sstevel@tonic-gate 		mutex_exit(&ibtl_clnt_list_mutex);
4320Sstevel@tonic-gate 		ibt_hca = NULL;
4330Sstevel@tonic-gate 		IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: "
4340Sstevel@tonic-gate 		    "calling CM for COM_EST");
4350Sstevel@tonic-gate 	} else {
4360Sstevel@tonic-gate 		IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: "
4370Sstevel@tonic-gate 		    "calling client '%s'", client_name);
4380Sstevel@tonic-gate 	}
4390Sstevel@tonic-gate 	if (async_handler != NULL)
4400Sstevel@tonic-gate 		async_handler(client_private, ibt_hca, code, event_p);
4410Sstevel@tonic-gate 	else
4420Sstevel@tonic-gate 		IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_async_client_call: "
4430Sstevel@tonic-gate 		    "client '%s' has no async handler", client_name);
4440Sstevel@tonic-gate }
4450Sstevel@tonic-gate 
4460Sstevel@tonic-gate /*
4470Sstevel@tonic-gate  * Inform CM or DM about HCA events.
4480Sstevel@tonic-gate  *
4490Sstevel@tonic-gate  *	We use taskqs to allow simultaneous notification, with sleeping.
4500Sstevel@tonic-gate  *	Since taskqs only allow one argument, we define a structure
4510Sstevel@tonic-gate  *	because we need to pass in more than one argument.
4520Sstevel@tonic-gate  */
4530Sstevel@tonic-gate 
4540Sstevel@tonic-gate struct ibtl_mgr_s {
4550Sstevel@tonic-gate 	ibtl_hca_devinfo_t	*mgr_hca_devp;
4560Sstevel@tonic-gate 	ibt_async_handler_t	mgr_async_handler;
4570Sstevel@tonic-gate 	void			*mgr_clnt_private;
4580Sstevel@tonic-gate };
4590Sstevel@tonic-gate 
4600Sstevel@tonic-gate /*
4610Sstevel@tonic-gate  * Asyncs of HCA level events for CM and DM.  Call CM or DM and tell them
4620Sstevel@tonic-gate  * about the HCA for the event recorded in the ibtl_hca_devinfo_t.
4630Sstevel@tonic-gate  */
4640Sstevel@tonic-gate static void
ibtl_do_mgr_async_task(void * arg)4650Sstevel@tonic-gate ibtl_do_mgr_async_task(void *arg)
4660Sstevel@tonic-gate {
4670Sstevel@tonic-gate 	struct ibtl_mgr_s	*mgrp = (struct ibtl_mgr_s *)arg;
4680Sstevel@tonic-gate 	ibtl_hca_devinfo_t	*hca_devp = mgrp->mgr_hca_devp;
4690Sstevel@tonic-gate 
4705752Shiremath 	IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_mgr_async_task(0x%x)",
4710Sstevel@tonic-gate 	    hca_devp->hd_async_code);
4720Sstevel@tonic-gate 
4730Sstevel@tonic-gate 	mgrp->mgr_async_handler(mgrp->mgr_clnt_private, NULL,
4740Sstevel@tonic-gate 	    hca_devp->hd_async_code, &hca_devp->hd_async_event);
4750Sstevel@tonic-gate 	kmem_free(mgrp, sizeof (*mgrp));
4760Sstevel@tonic-gate 
4770Sstevel@tonic-gate 	mutex_enter(&ibtl_clnt_list_mutex);
4780Sstevel@tonic-gate 	if (--hca_devp->hd_async_task_cnt == 0)
4790Sstevel@tonic-gate 		cv_signal(&hca_devp->hd_async_task_cv);
4800Sstevel@tonic-gate 	mutex_exit(&ibtl_clnt_list_mutex);
4810Sstevel@tonic-gate }
4820Sstevel@tonic-gate 
4830Sstevel@tonic-gate static void
ibt_cisco_embedded_sm_rereg_fix(void * arg)4849891SRajkumar.Sivaprakasam@Sun.COM ibt_cisco_embedded_sm_rereg_fix(void *arg)
4859891SRajkumar.Sivaprakasam@Sun.COM {
4869891SRajkumar.Sivaprakasam@Sun.COM 	struct ibtl_mgr_s *mgrp = arg;
4879891SRajkumar.Sivaprakasam@Sun.COM 	ibtl_hca_devinfo_t *hca_devp;
4889891SRajkumar.Sivaprakasam@Sun.COM 	ibt_node_info_t node_info;
4899891SRajkumar.Sivaprakasam@Sun.COM 	ibt_status_t ibt_status;
4909891SRajkumar.Sivaprakasam@Sun.COM 	ibtl_async_port_event_t *portp;
4919891SRajkumar.Sivaprakasam@Sun.COM 	ib_lid_t sm_lid;
4929891SRajkumar.Sivaprakasam@Sun.COM 	ib_guid_t hca_guid;
4939891SRajkumar.Sivaprakasam@Sun.COM 	ibt_async_event_t *event_p;
4949891SRajkumar.Sivaprakasam@Sun.COM 	ibt_hca_portinfo_t *pinfop;
4959891SRajkumar.Sivaprakasam@Sun.COM 	uint8_t	port;
4969891SRajkumar.Sivaprakasam@Sun.COM 
4979891SRajkumar.Sivaprakasam@Sun.COM 	hca_devp = mgrp->mgr_hca_devp;
4989891SRajkumar.Sivaprakasam@Sun.COM 
4999891SRajkumar.Sivaprakasam@Sun.COM 	mutex_enter(&ibtl_clnt_list_mutex);
5009891SRajkumar.Sivaprakasam@Sun.COM 	event_p = &hca_devp->hd_async_event;
5019891SRajkumar.Sivaprakasam@Sun.COM 	port = event_p->ev_port;
5029891SRajkumar.Sivaprakasam@Sun.COM 	portp = &hca_devp->hd_async_port[port - 1];
5039891SRajkumar.Sivaprakasam@Sun.COM 	pinfop = &hca_devp->hd_portinfop[port - 1];
5049891SRajkumar.Sivaprakasam@Sun.COM 	sm_lid = pinfop->p_sm_lid;
5059891SRajkumar.Sivaprakasam@Sun.COM 	hca_guid = hca_devp->hd_hca_attr->hca_node_guid;
5069891SRajkumar.Sivaprakasam@Sun.COM 	mutex_exit(&ibtl_clnt_list_mutex);
5079891SRajkumar.Sivaprakasam@Sun.COM 
5089891SRajkumar.Sivaprakasam@Sun.COM 	ibt_status = ((ibtl_node_info_cb_t)mgrp->mgr_async_handler)(hca_guid,
5099891SRajkumar.Sivaprakasam@Sun.COM 	    port, sm_lid, &node_info);
5109891SRajkumar.Sivaprakasam@Sun.COM 	if (ibt_status == IBT_SUCCESS) {
5119891SRajkumar.Sivaprakasam@Sun.COM 		if ((node_info.n_vendor_id == IBT_VENDOR_CISCO) &&
5129891SRajkumar.Sivaprakasam@Sun.COM 		    (node_info.n_node_type == IBT_NODE_TYPE_SWITCH)) {
5139891SRajkumar.Sivaprakasam@Sun.COM 			mutex_enter(&ibtl_async_mutex);
5149891SRajkumar.Sivaprakasam@Sun.COM 			portp->status |= IBTL_HCA_PORT_ASYNC_CLNT_REREG;
5159891SRajkumar.Sivaprakasam@Sun.COM 			hca_devp->hd_async_codes |= IBT_CLNT_REREG_EVENT;
5169891SRajkumar.Sivaprakasam@Sun.COM 			mutex_exit(&ibtl_async_mutex);
5179891SRajkumar.Sivaprakasam@Sun.COM 		}
5189891SRajkumar.Sivaprakasam@Sun.COM 	}
5199891SRajkumar.Sivaprakasam@Sun.COM 	kmem_free(mgrp, sizeof (*mgrp));
5209891SRajkumar.Sivaprakasam@Sun.COM 
5219891SRajkumar.Sivaprakasam@Sun.COM 	mutex_enter(&ibtl_clnt_list_mutex);
5229891SRajkumar.Sivaprakasam@Sun.COM 	if (--hca_devp->hd_async_task_cnt == 0)
5239891SRajkumar.Sivaprakasam@Sun.COM 		cv_signal(&hca_devp->hd_async_task_cv);
5249891SRajkumar.Sivaprakasam@Sun.COM 	mutex_exit(&ibtl_clnt_list_mutex);
5259891SRajkumar.Sivaprakasam@Sun.COM }
5269891SRajkumar.Sivaprakasam@Sun.COM 
5279891SRajkumar.Sivaprakasam@Sun.COM static void
ibtl_cm_get_node_info(ibtl_hca_devinfo_t * hca_devp,ibt_async_handler_t async_handler)5289891SRajkumar.Sivaprakasam@Sun.COM ibtl_cm_get_node_info(ibtl_hca_devinfo_t *hca_devp,
5299891SRajkumar.Sivaprakasam@Sun.COM     ibt_async_handler_t async_handler)
5309891SRajkumar.Sivaprakasam@Sun.COM {
5319891SRajkumar.Sivaprakasam@Sun.COM 	struct ibtl_mgr_s *mgrp;
5329891SRajkumar.Sivaprakasam@Sun.COM 
5339891SRajkumar.Sivaprakasam@Sun.COM 	if (async_handler == NULL)
5349891SRajkumar.Sivaprakasam@Sun.COM 		return;
5359891SRajkumar.Sivaprakasam@Sun.COM 
5369891SRajkumar.Sivaprakasam@Sun.COM 	_NOTE(NO_COMPETING_THREADS_NOW)
5379891SRajkumar.Sivaprakasam@Sun.COM 	mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP);
5389891SRajkumar.Sivaprakasam@Sun.COM 	mgrp->mgr_hca_devp = hca_devp;
5399891SRajkumar.Sivaprakasam@Sun.COM 	mgrp->mgr_async_handler = async_handler;
5409891SRajkumar.Sivaprakasam@Sun.COM 	mgrp->mgr_clnt_private = NULL;
5419891SRajkumar.Sivaprakasam@Sun.COM 	hca_devp->hd_async_task_cnt++;
5429891SRajkumar.Sivaprakasam@Sun.COM 
5439891SRajkumar.Sivaprakasam@Sun.COM 	(void) taskq_dispatch(ibtl_async_taskq,
5449891SRajkumar.Sivaprakasam@Sun.COM 	    ibt_cisco_embedded_sm_rereg_fix, mgrp, TQ_SLEEP);
5459891SRajkumar.Sivaprakasam@Sun.COM #ifndef lint
5469891SRajkumar.Sivaprakasam@Sun.COM 	_NOTE(COMPETING_THREADS_NOW)
5479891SRajkumar.Sivaprakasam@Sun.COM #endif
5489891SRajkumar.Sivaprakasam@Sun.COM }
5499891SRajkumar.Sivaprakasam@Sun.COM 
5509891SRajkumar.Sivaprakasam@Sun.COM static void
ibtl_tell_mgr(ibtl_hca_devinfo_t * hca_devp,ibt_async_handler_t async_handler,void * clnt_private)5510Sstevel@tonic-gate ibtl_tell_mgr(ibtl_hca_devinfo_t *hca_devp, ibt_async_handler_t async_handler,
5520Sstevel@tonic-gate     void *clnt_private)
5530Sstevel@tonic-gate {
5540Sstevel@tonic-gate 	struct ibtl_mgr_s *mgrp;
5550Sstevel@tonic-gate 
5560Sstevel@tonic-gate 	if (async_handler == NULL)
5570Sstevel@tonic-gate 		return;
5580Sstevel@tonic-gate 
5590Sstevel@tonic-gate 	_NOTE(NO_COMPETING_THREADS_NOW)
5600Sstevel@tonic-gate 	mgrp = kmem_alloc(sizeof (*mgrp), KM_SLEEP);
5610Sstevel@tonic-gate 	mgrp->mgr_hca_devp = hca_devp;
5620Sstevel@tonic-gate 	mgrp->mgr_async_handler = async_handler;
5630Sstevel@tonic-gate 	mgrp->mgr_clnt_private = clnt_private;
5640Sstevel@tonic-gate 	hca_devp->hd_async_task_cnt++;
5650Sstevel@tonic-gate 
5660Sstevel@tonic-gate 	(void) taskq_dispatch(ibtl_async_taskq, ibtl_do_mgr_async_task, mgrp,
5670Sstevel@tonic-gate 	    TQ_SLEEP);
5682840Scarlsonj #ifndef lint
5690Sstevel@tonic-gate 	_NOTE(COMPETING_THREADS_NOW)
5702840Scarlsonj #endif
5710Sstevel@tonic-gate }
5720Sstevel@tonic-gate 
5730Sstevel@tonic-gate /*
5740Sstevel@tonic-gate  * Per client-device asyncs for HCA level events.  Call each client that is
5750Sstevel@tonic-gate  * using the HCA for the event recorded in the ibtl_hca_devinfo_t.
5760Sstevel@tonic-gate  */
5770Sstevel@tonic-gate static void
ibtl_hca_client_async_task(void * arg)5780Sstevel@tonic-gate ibtl_hca_client_async_task(void *arg)
5790Sstevel@tonic-gate {
5800Sstevel@tonic-gate 	ibtl_hca_t		*ibt_hca = (ibtl_hca_t *)arg;
5810Sstevel@tonic-gate 	ibtl_hca_devinfo_t	*hca_devp = ibt_hca->ha_hca_devp;
5820Sstevel@tonic-gate 	ibtl_clnt_t		*clntp = ibt_hca->ha_clnt_devp;
5830Sstevel@tonic-gate 	ibt_async_event_t	async_event;
5840Sstevel@tonic-gate 
5850Sstevel@tonic-gate 	IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_hca_client_async_task(%p, 0x%x)",
5860Sstevel@tonic-gate 	    ibt_hca, hca_devp->hd_async_code);
5870Sstevel@tonic-gate 
5880Sstevel@tonic-gate 	bcopy(&hca_devp->hd_async_event, &async_event, sizeof (async_event));
5890Sstevel@tonic-gate 	ibtl_async_client_call(ibt_hca, hca_devp->hd_async_code, &async_event);
5900Sstevel@tonic-gate 
5910Sstevel@tonic-gate 	mutex_enter(&ibtl_async_mutex);
5920Sstevel@tonic-gate 	if (--ibt_hca->ha_async_cnt == 0 &&
5930Sstevel@tonic-gate 	    (ibt_hca->ha_async_flags & IBTL_ASYNC_FREE_OBJECT)) {
5940Sstevel@tonic-gate 		mutex_exit(&ibtl_async_mutex);
5950Sstevel@tonic-gate 		kmem_free(ibt_hca, sizeof (ibtl_hca_t));
5960Sstevel@tonic-gate 	} else
5970Sstevel@tonic-gate 		mutex_exit(&ibtl_async_mutex);
5980Sstevel@tonic-gate 
5990Sstevel@tonic-gate 	mutex_enter(&ibtl_clnt_list_mutex);
6000Sstevel@tonic-gate 	if (--hca_devp->hd_async_task_cnt == 0)
6010Sstevel@tonic-gate 		cv_signal(&hca_devp->hd_async_task_cv);
6020Sstevel@tonic-gate 	if (--clntp->clnt_async_cnt == 0)
6030Sstevel@tonic-gate 		cv_broadcast(&ibtl_clnt_cv);
6040Sstevel@tonic-gate 
6050Sstevel@tonic-gate 	mutex_exit(&ibtl_clnt_list_mutex);
6060Sstevel@tonic-gate }
6070Sstevel@tonic-gate 
6080Sstevel@tonic-gate /*
6090Sstevel@tonic-gate  * Asyncs for HCA level events.
6100Sstevel@tonic-gate  *
6110Sstevel@tonic-gate  * The function continues to run until there are no more async
6120Sstevel@tonic-gate  * events/errors for this HCA.  An event is chosen for dispatch
6130Sstevel@tonic-gate  * to all clients of this HCA.  This thread dispatches them via
6140Sstevel@tonic-gate  * the ibtl_async_taskq, then sleeps until all tasks are done.
6150Sstevel@tonic-gate  *
6160Sstevel@tonic-gate  * This thread records the async_code and async_event in the
6170Sstevel@tonic-gate  * ibtl_hca_devinfo_t for all client taskq threads to reference.
6180Sstevel@tonic-gate  *
6190Sstevel@tonic-gate  * This is called from an async or taskq thread with ibtl_async_mutex held.
6200Sstevel@tonic-gate  */
6210Sstevel@tonic-gate static void
ibtl_do_hca_asyncs(ibtl_hca_devinfo_t * hca_devp)6220Sstevel@tonic-gate ibtl_do_hca_asyncs(ibtl_hca_devinfo_t *hca_devp)
6230Sstevel@tonic-gate {
6240Sstevel@tonic-gate 	ibtl_hca_t			*ibt_hca;
6259891SRajkumar.Sivaprakasam@Sun.COM 	ibt_async_event_t		*eventp;
6260Sstevel@tonic-gate 	ibt_async_code_t		code;
6270Sstevel@tonic-gate 	ibtl_async_port_status_t  	temp;
6280Sstevel@tonic-gate 	uint8_t				nports;
6290Sstevel@tonic-gate 	uint8_t				port_minus1;
6309891SRajkumar.Sivaprakasam@Sun.COM 	ibtl_async_port_event_t		*portp;
6310Sstevel@tonic-gate 
6320Sstevel@tonic-gate 	mutex_exit(&ibtl_async_mutex);
6330Sstevel@tonic-gate 
6340Sstevel@tonic-gate 	mutex_enter(&ibtl_clnt_list_mutex);
6350Sstevel@tonic-gate 	while (hca_devp->hd_async_busy)
6360Sstevel@tonic-gate 		cv_wait(&hca_devp->hd_async_busy_cv, &ibtl_clnt_list_mutex);
6370Sstevel@tonic-gate 	hca_devp->hd_async_busy = 1;
6380Sstevel@tonic-gate 	mutex_enter(&ibtl_async_mutex);
6390Sstevel@tonic-gate 
6400Sstevel@tonic-gate 	bzero(&hca_devp->hd_async_event, sizeof (hca_devp->hd_async_event));
6410Sstevel@tonic-gate 	for (;;) {
6420Sstevel@tonic-gate 
6430Sstevel@tonic-gate 		hca_devp->hd_async_event.ev_fma_ena = 0;
6440Sstevel@tonic-gate 
6450Sstevel@tonic-gate 		code = hca_devp->hd_async_codes;
6460Sstevel@tonic-gate 		if (code & IBT_ERROR_LOCAL_CATASTROPHIC) {
6470Sstevel@tonic-gate 			code = IBT_ERROR_LOCAL_CATASTROPHIC;
6480Sstevel@tonic-gate 			hca_devp->hd_async_event.ev_fma_ena =
6490Sstevel@tonic-gate 			    hca_devp->hd_fma_ena;
6509891SRajkumar.Sivaprakasam@Sun.COM 		} else if (code & IBT_ERROR_PORT_DOWN) {
6510Sstevel@tonic-gate 			code = IBT_ERROR_PORT_DOWN;
6529891SRajkumar.Sivaprakasam@Sun.COM 			temp = IBTL_HCA_PORT_DOWN;
6539891SRajkumar.Sivaprakasam@Sun.COM 		} else if (code & IBT_EVENT_PORT_UP) {
6540Sstevel@tonic-gate 			code = IBT_EVENT_PORT_UP;
6559891SRajkumar.Sivaprakasam@Sun.COM 			temp = IBTL_HCA_PORT_UP;
6569891SRajkumar.Sivaprakasam@Sun.COM 		} else if (code & IBT_PORT_CHANGE_EVENT) {
6579891SRajkumar.Sivaprakasam@Sun.COM 			code = IBT_PORT_CHANGE_EVENT;
6589891SRajkumar.Sivaprakasam@Sun.COM 			temp = IBTL_HCA_PORT_CHG;
6599891SRajkumar.Sivaprakasam@Sun.COM 		} else if (code & IBT_CLNT_REREG_EVENT) {
6609891SRajkumar.Sivaprakasam@Sun.COM 			code = IBT_CLNT_REREG_EVENT;
6619891SRajkumar.Sivaprakasam@Sun.COM 			temp = IBTL_HCA_PORT_ASYNC_CLNT_REREG;
6629891SRajkumar.Sivaprakasam@Sun.COM 		} else {
6630Sstevel@tonic-gate 			hca_devp->hd_async_codes = 0;
6640Sstevel@tonic-gate 			code = 0;
6650Sstevel@tonic-gate 		}
6660Sstevel@tonic-gate 
6670Sstevel@tonic-gate 		if (code == 0) {
6680Sstevel@tonic-gate 			hca_devp->hd_async_flags &= ~IBTL_ASYNC_PENDING;
6690Sstevel@tonic-gate 			break;
6700Sstevel@tonic-gate 		}
6710Sstevel@tonic-gate 		hca_devp->hd_async_codes &= ~code;
6720Sstevel@tonic-gate 
6739891SRajkumar.Sivaprakasam@Sun.COM 		/* PORT_UP, PORT_CHANGE, PORT_DOWN or ASYNC_REREG */
6749891SRajkumar.Sivaprakasam@Sun.COM 		if ((code & IBT_PORT_EVENTS) != 0) {
6750Sstevel@tonic-gate 			portp = hca_devp->hd_async_port;
6760Sstevel@tonic-gate 			nports = hca_devp->hd_hca_attr->hca_nports;
6770Sstevel@tonic-gate 			for (port_minus1 = 0; port_minus1 < nports;
6780Sstevel@tonic-gate 			    port_minus1++) {
6799891SRajkumar.Sivaprakasam@Sun.COM 				/*
6809891SRajkumar.Sivaprakasam@Sun.COM 				 * Matching event in this port, let's go handle
6819891SRajkumar.Sivaprakasam@Sun.COM 				 * it.
6829891SRajkumar.Sivaprakasam@Sun.COM 				 */
6839891SRajkumar.Sivaprakasam@Sun.COM 				if ((portp[port_minus1].status & temp) != 0)
6840Sstevel@tonic-gate 					break;
6850Sstevel@tonic-gate 			}
6860Sstevel@tonic-gate 			if (port_minus1 >= nports) {
6870Sstevel@tonic-gate 				/* we checked again, but found nothing */
6880Sstevel@tonic-gate 				continue;
6890Sstevel@tonic-gate 			}
6900Sstevel@tonic-gate 			IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_do_hca_asyncs: "
6910Sstevel@tonic-gate 			    "async: port# %x code %x", port_minus1 + 1, code);
6920Sstevel@tonic-gate 			/* mark it to check for other ports after we're done */
6930Sstevel@tonic-gate 			hca_devp->hd_async_codes |= code;
6940Sstevel@tonic-gate 
6959891SRajkumar.Sivaprakasam@Sun.COM 			/*
6969891SRajkumar.Sivaprakasam@Sun.COM 			 * Copy the event information into hca_devp and clear
6979891SRajkumar.Sivaprakasam@Sun.COM 			 * event information from the per port data.
6989891SRajkumar.Sivaprakasam@Sun.COM 			 */
6990Sstevel@tonic-gate 			hca_devp->hd_async_event.ev_port = port_minus1 + 1;
7009891SRajkumar.Sivaprakasam@Sun.COM 			if (temp == IBTL_HCA_PORT_CHG) {
7019891SRajkumar.Sivaprakasam@Sun.COM 				hca_devp->hd_async_event.ev_port_flags =
7029891SRajkumar.Sivaprakasam@Sun.COM 				    hca_devp->hd_async_port[port_minus1].flags;
7039891SRajkumar.Sivaprakasam@Sun.COM 				hca_devp->hd_async_port[port_minus1].flags = 0;
7049891SRajkumar.Sivaprakasam@Sun.COM 			}
7059891SRajkumar.Sivaprakasam@Sun.COM 			hca_devp->hd_async_port[port_minus1].status &= ~temp;
7060Sstevel@tonic-gate 
7070Sstevel@tonic-gate 			mutex_exit(&ibtl_async_mutex);
7080Sstevel@tonic-gate 			ibtl_reinit_hca_portinfo(hca_devp, port_minus1 + 1);
7090Sstevel@tonic-gate 			mutex_enter(&ibtl_async_mutex);
7109891SRajkumar.Sivaprakasam@Sun.COM 			eventp = &hca_devp->hd_async_event;
7119891SRajkumar.Sivaprakasam@Sun.COM 			eventp->ev_hca_guid =
7129891SRajkumar.Sivaprakasam@Sun.COM 			    hca_devp->hd_hca_attr->hca_node_guid;
7130Sstevel@tonic-gate 		}
7140Sstevel@tonic-gate 
7150Sstevel@tonic-gate 		hca_devp->hd_async_code = code;
7160Sstevel@tonic-gate 		hca_devp->hd_async_event.ev_hca_guid =
7170Sstevel@tonic-gate 		    hca_devp->hd_hca_attr->hca_node_guid;
7180Sstevel@tonic-gate 		mutex_exit(&ibtl_async_mutex);
7190Sstevel@tonic-gate 
7200Sstevel@tonic-gate 		/*
7210Sstevel@tonic-gate 		 * Make sure to inform CM, DM, and IBMA if we know of them.
7220Sstevel@tonic-gate 		 * Also, make sure not to inform them a second time, which
7230Sstevel@tonic-gate 		 * would occur if they have the HCA open.
7240Sstevel@tonic-gate 		 */
7250Sstevel@tonic-gate 
7260Sstevel@tonic-gate 		if (ibtl_ibma_async_handler)
7270Sstevel@tonic-gate 			ibtl_tell_mgr(hca_devp, ibtl_ibma_async_handler,
7280Sstevel@tonic-gate 			    ibtl_ibma_clnt_private);
7290Sstevel@tonic-gate 		/* wait for all tasks to complete */
7300Sstevel@tonic-gate 		while (hca_devp->hd_async_task_cnt != 0)
7310Sstevel@tonic-gate 			cv_wait(&hca_devp->hd_async_task_cv,
7320Sstevel@tonic-gate 			    &ibtl_clnt_list_mutex);
7330Sstevel@tonic-gate 
7349891SRajkumar.Sivaprakasam@Sun.COM 		/*
7359891SRajkumar.Sivaprakasam@Sun.COM 		 * Hack Alert:
7369891SRajkumar.Sivaprakasam@Sun.COM 		 * The ibmf handler would have updated the Master SM LID if it
7379891SRajkumar.Sivaprakasam@Sun.COM 		 * was SM LID change event. Now lets check if the new Master SM
7389891SRajkumar.Sivaprakasam@Sun.COM 		 * is a Embedded Cisco Topspin SM.
7399891SRajkumar.Sivaprakasam@Sun.COM 		 */
7409891SRajkumar.Sivaprakasam@Sun.COM 		if ((code == IBT_PORT_CHANGE_EVENT) &&
7419891SRajkumar.Sivaprakasam@Sun.COM 		    eventp->ev_port_flags & IBT_PORT_CHANGE_SM_LID)
7429891SRajkumar.Sivaprakasam@Sun.COM 			ibtl_cm_get_node_info(hca_devp,
7439891SRajkumar.Sivaprakasam@Sun.COM 			    (ibt_async_handler_t)ibtl_node_info_cb);
7449891SRajkumar.Sivaprakasam@Sun.COM 		/* wait for node info task to complete */
7459891SRajkumar.Sivaprakasam@Sun.COM 		while (hca_devp->hd_async_task_cnt != 0)
7469891SRajkumar.Sivaprakasam@Sun.COM 			cv_wait(&hca_devp->hd_async_task_cv,
7479891SRajkumar.Sivaprakasam@Sun.COM 			    &ibtl_clnt_list_mutex);
7489891SRajkumar.Sivaprakasam@Sun.COM 
7490Sstevel@tonic-gate 		if (ibtl_dm_async_handler)
7500Sstevel@tonic-gate 			ibtl_tell_mgr(hca_devp, ibtl_dm_async_handler,
7510Sstevel@tonic-gate 			    ibtl_dm_clnt_private);
7520Sstevel@tonic-gate 		if (ibtl_cm_async_handler)
7530Sstevel@tonic-gate 			ibtl_tell_mgr(hca_devp, ibtl_cm_async_handler,
7540Sstevel@tonic-gate 			    ibtl_cm_clnt_private);
7550Sstevel@tonic-gate 		/* wait for all tasks to complete */
7560Sstevel@tonic-gate 		while (hca_devp->hd_async_task_cnt != 0)
7570Sstevel@tonic-gate 			cv_wait(&hca_devp->hd_async_task_cv,
7580Sstevel@tonic-gate 			    &ibtl_clnt_list_mutex);
7590Sstevel@tonic-gate 
7600Sstevel@tonic-gate 		for (ibt_hca = hca_devp->hd_clnt_list;
7610Sstevel@tonic-gate 		    ibt_hca != NULL;
7620Sstevel@tonic-gate 		    ibt_hca = ibt_hca->ha_clnt_link) {
7630Sstevel@tonic-gate 
7642259Shiremath 			/* Managers are handled above */
7650Sstevel@tonic-gate 			if (IBTL_HCA2MODI_P(ibt_hca)->mi_async_handler ==
7660Sstevel@tonic-gate 			    ibtl_cm_async_handler)
7670Sstevel@tonic-gate 				continue;
7680Sstevel@tonic-gate 			if (IBTL_HCA2MODI_P(ibt_hca)->mi_async_handler ==
7690Sstevel@tonic-gate 			    ibtl_dm_async_handler)
7700Sstevel@tonic-gate 				continue;
7710Sstevel@tonic-gate 			if (IBTL_HCA2MODI_P(ibt_hca)->mi_async_handler ==
7720Sstevel@tonic-gate 			    ibtl_ibma_async_handler)
7730Sstevel@tonic-gate 				continue;
7740Sstevel@tonic-gate 			++ibt_hca->ha_clnt_devp->clnt_async_cnt;
7750Sstevel@tonic-gate 
7760Sstevel@tonic-gate 			mutex_enter(&ibtl_async_mutex);
7770Sstevel@tonic-gate 			ibt_hca->ha_async_cnt++;
7780Sstevel@tonic-gate 			mutex_exit(&ibtl_async_mutex);
7790Sstevel@tonic-gate 			hca_devp->hd_async_task_cnt++;
7800Sstevel@tonic-gate 			(void) taskq_dispatch(ibtl_async_taskq,
7810Sstevel@tonic-gate 			    ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP);
7820Sstevel@tonic-gate 		}
7830Sstevel@tonic-gate 
7840Sstevel@tonic-gate 		/* wait for all tasks to complete */
7850Sstevel@tonic-gate 		while (hca_devp->hd_async_task_cnt != 0)
7860Sstevel@tonic-gate 			cv_wait(&hca_devp->hd_async_task_cv,
7870Sstevel@tonic-gate 			    &ibtl_clnt_list_mutex);
7880Sstevel@tonic-gate 
7890Sstevel@tonic-gate 		mutex_enter(&ibtl_async_mutex);
7900Sstevel@tonic-gate 	}
7910Sstevel@tonic-gate 	hca_devp->hd_async_code = 0;
7920Sstevel@tonic-gate 	hca_devp->hd_async_busy = 0;
7930Sstevel@tonic-gate 	cv_broadcast(&hca_devp->hd_async_busy_cv);
7940Sstevel@tonic-gate 	mutex_exit(&ibtl_clnt_list_mutex);
7950Sstevel@tonic-gate }
7960Sstevel@tonic-gate 
7970Sstevel@tonic-gate /*
7980Sstevel@tonic-gate  * Asyncs for QP objects.
7990Sstevel@tonic-gate  *
8000Sstevel@tonic-gate  * The function continues to run until there are no more async
8010Sstevel@tonic-gate  * events/errors for this object.
8020Sstevel@tonic-gate  */
8030Sstevel@tonic-gate static void
ibtl_do_qp_asyncs(ibtl_qp_t * ibtl_qp)8040Sstevel@tonic-gate ibtl_do_qp_asyncs(ibtl_qp_t *ibtl_qp)
8050Sstevel@tonic-gate {
8060Sstevel@tonic-gate 	ibt_async_code_t	code;
8070Sstevel@tonic-gate 	ibt_async_event_t	async_event;
8080Sstevel@tonic-gate 
8090Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ibtl_async_mutex));
8100Sstevel@tonic-gate 	bzero(&async_event, sizeof (async_event));
8110Sstevel@tonic-gate 	async_event.ev_chan_hdl = IBTL_QP2CHAN(ibtl_qp);
8120Sstevel@tonic-gate 
8130Sstevel@tonic-gate 	while ((code = ibtl_qp->qp_async_codes) != 0) {
8140Sstevel@tonic-gate 		async_event.ev_fma_ena = 0;
8150Sstevel@tonic-gate 		if (ibtl_qp->qp_async_flags & IBTL_ASYNC_FREE_OBJECT)
8160Sstevel@tonic-gate 			code = 0;	/* fallthrough to "kmem_free" */
8170Sstevel@tonic-gate 		else if (code & IBT_ERROR_CATASTROPHIC_QP) {
8180Sstevel@tonic-gate 			code = IBT_ERROR_CATASTROPHIC_QP;
8190Sstevel@tonic-gate 			async_event.ev_fma_ena = ibtl_qp->qp_cat_fma_ena;
8200Sstevel@tonic-gate 		} else if (code & IBT_ERROR_INVALID_REQUEST_QP) {
8210Sstevel@tonic-gate 			code = IBT_ERROR_INVALID_REQUEST_QP;
8220Sstevel@tonic-gate 			async_event.ev_fma_ena = ibtl_qp->qp_inv_fma_ena;
8230Sstevel@tonic-gate 		} else if (code & IBT_ERROR_ACCESS_VIOLATION_QP) {
8240Sstevel@tonic-gate 			code = IBT_ERROR_ACCESS_VIOLATION_QP;
8250Sstevel@tonic-gate 			async_event.ev_fma_ena = ibtl_qp->qp_acc_fma_ena;
8260Sstevel@tonic-gate 		} else if (code & IBT_ERROR_PATH_MIGRATE_REQ_QP) {
8270Sstevel@tonic-gate 			code = IBT_ERROR_PATH_MIGRATE_REQ_QP;
8280Sstevel@tonic-gate 			async_event.ev_fma_ena = ibtl_qp->qp_pth_fma_ena;
8290Sstevel@tonic-gate 		} else if (code & IBT_EVENT_PATH_MIGRATED_QP)
8300Sstevel@tonic-gate 			code = IBT_EVENT_PATH_MIGRATED_QP;
8310Sstevel@tonic-gate 		else if (code & IBT_EVENT_SQD)
8320Sstevel@tonic-gate 			code = IBT_EVENT_SQD;
8330Sstevel@tonic-gate 		else if (code & IBT_EVENT_COM_EST_QP)
8340Sstevel@tonic-gate 			code = IBT_EVENT_COM_EST_QP;
8350Sstevel@tonic-gate 		else if (code & IBT_EVENT_EMPTY_QP)
8360Sstevel@tonic-gate 			code = IBT_EVENT_EMPTY_QP;
8370Sstevel@tonic-gate 		else {
8380Sstevel@tonic-gate 			IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_qp_asyncs: "
8390Sstevel@tonic-gate 			    "async: unexpected QP async code 0x%x", code);
8400Sstevel@tonic-gate 			ibtl_qp->qp_async_codes = 0;
8410Sstevel@tonic-gate 			code = 0;
8420Sstevel@tonic-gate 		}
8430Sstevel@tonic-gate 		ibtl_qp->qp_async_codes &= ~code;
8440Sstevel@tonic-gate 
8450Sstevel@tonic-gate 		if (code) {
8460Sstevel@tonic-gate 			mutex_exit(&ibtl_async_mutex);
8470Sstevel@tonic-gate 			ibtl_async_client_call(ibtl_qp->qp_hca,
8480Sstevel@tonic-gate 			    code, &async_event);
8490Sstevel@tonic-gate 			mutex_enter(&ibtl_async_mutex);
8500Sstevel@tonic-gate 		}
8510Sstevel@tonic-gate 
8520Sstevel@tonic-gate 		if (ibtl_qp->qp_async_flags & IBTL_ASYNC_FREE_OBJECT) {
8530Sstevel@tonic-gate 			mutex_exit(&ibtl_async_mutex);
8540Sstevel@tonic-gate 			cv_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_cv);
8550Sstevel@tonic-gate 			mutex_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_mutex);
8560Sstevel@tonic-gate 			kmem_free(IBTL_QP2CHAN(ibtl_qp),
8570Sstevel@tonic-gate 			    sizeof (ibtl_channel_t));
8580Sstevel@tonic-gate 			mutex_enter(&ibtl_async_mutex);
8590Sstevel@tonic-gate 			return;
8600Sstevel@tonic-gate 		}
8610Sstevel@tonic-gate 	}
8620Sstevel@tonic-gate 	ibtl_qp->qp_async_flags &= ~IBTL_ASYNC_PENDING;
8630Sstevel@tonic-gate }
8640Sstevel@tonic-gate 
8650Sstevel@tonic-gate /*
8660Sstevel@tonic-gate  * Asyncs for SRQ objects.
8670Sstevel@tonic-gate  *
8680Sstevel@tonic-gate  * The function continues to run until there are no more async
8690Sstevel@tonic-gate  * events/errors for this object.
8700Sstevel@tonic-gate  */
8710Sstevel@tonic-gate static void
ibtl_do_srq_asyncs(ibtl_srq_t * ibtl_srq)8720Sstevel@tonic-gate ibtl_do_srq_asyncs(ibtl_srq_t *ibtl_srq)
8730Sstevel@tonic-gate {
8740Sstevel@tonic-gate 	ibt_async_code_t	code;
8750Sstevel@tonic-gate 	ibt_async_event_t	async_event;
8760Sstevel@tonic-gate 
8770Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ibtl_async_mutex));
8780Sstevel@tonic-gate 	bzero(&async_event, sizeof (async_event));
8790Sstevel@tonic-gate 	async_event.ev_srq_hdl = ibtl_srq;
8800Sstevel@tonic-gate 	async_event.ev_fma_ena = ibtl_srq->srq_fma_ena;
8810Sstevel@tonic-gate 
8820Sstevel@tonic-gate 	while ((code = ibtl_srq->srq_async_codes) != 0) {
8830Sstevel@tonic-gate 		if (ibtl_srq->srq_async_flags & IBTL_ASYNC_FREE_OBJECT)
8840Sstevel@tonic-gate 			code = 0;	/* fallthrough to "kmem_free" */
8850Sstevel@tonic-gate 		else if (code & IBT_ERROR_CATASTROPHIC_SRQ)
8860Sstevel@tonic-gate 			code = IBT_ERROR_CATASTROPHIC_SRQ;
8870Sstevel@tonic-gate 		else if (code & IBT_EVENT_LIMIT_REACHED_SRQ)
8880Sstevel@tonic-gate 			code = IBT_EVENT_LIMIT_REACHED_SRQ;
8890Sstevel@tonic-gate 		else {
8900Sstevel@tonic-gate 			IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_srq_asyncs: "
8910Sstevel@tonic-gate 			    "async: unexpected SRQ async code 0x%x", code);
8920Sstevel@tonic-gate 			ibtl_srq->srq_async_codes = 0;
8930Sstevel@tonic-gate 			code = 0;
8940Sstevel@tonic-gate 		}
8950Sstevel@tonic-gate 		ibtl_srq->srq_async_codes &= ~code;
8960Sstevel@tonic-gate 
8970Sstevel@tonic-gate 		if (code) {
8980Sstevel@tonic-gate 			mutex_exit(&ibtl_async_mutex);
8990Sstevel@tonic-gate 			ibtl_async_client_call(ibtl_srq->srq_hca,
9000Sstevel@tonic-gate 			    code, &async_event);
9010Sstevel@tonic-gate 			mutex_enter(&ibtl_async_mutex);
9020Sstevel@tonic-gate 		}
9030Sstevel@tonic-gate 
9040Sstevel@tonic-gate 		if (ibtl_srq->srq_async_flags & IBTL_ASYNC_FREE_OBJECT) {
9050Sstevel@tonic-gate 			mutex_exit(&ibtl_async_mutex);
9060Sstevel@tonic-gate 			kmem_free(ibtl_srq, sizeof (struct ibtl_srq_s));
9070Sstevel@tonic-gate 			mutex_enter(&ibtl_async_mutex);
9080Sstevel@tonic-gate 			return;
9090Sstevel@tonic-gate 		}
9100Sstevel@tonic-gate 	}
9110Sstevel@tonic-gate 	ibtl_srq->srq_async_flags &= ~IBTL_ASYNC_PENDING;
9120Sstevel@tonic-gate }
9130Sstevel@tonic-gate 
9140Sstevel@tonic-gate /*
9150Sstevel@tonic-gate  * Asyncs for CQ objects.
9160Sstevel@tonic-gate  *
9170Sstevel@tonic-gate  * The function continues to run until there are no more async
9180Sstevel@tonic-gate  * events/errors for this object.
9190Sstevel@tonic-gate  */
9200Sstevel@tonic-gate static void
ibtl_do_cq_asyncs(ibtl_cq_t * ibtl_cq)9210Sstevel@tonic-gate ibtl_do_cq_asyncs(ibtl_cq_t *ibtl_cq)
9220Sstevel@tonic-gate {
9230Sstevel@tonic-gate 	ibt_async_code_t	code;
9240Sstevel@tonic-gate 	ibt_async_event_t	async_event;
9250Sstevel@tonic-gate 
9260Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ibtl_async_mutex));
9270Sstevel@tonic-gate 	bzero(&async_event, sizeof (async_event));
9280Sstevel@tonic-gate 	async_event.ev_cq_hdl = ibtl_cq;
9290Sstevel@tonic-gate 	async_event.ev_fma_ena = ibtl_cq->cq_fma_ena;
9300Sstevel@tonic-gate 
9310Sstevel@tonic-gate 	while ((code = ibtl_cq->cq_async_codes) != 0) {
9320Sstevel@tonic-gate 		if (ibtl_cq->cq_async_flags & IBTL_ASYNC_FREE_OBJECT)
9330Sstevel@tonic-gate 			code = 0;	/* fallthrough to "kmem_free" */
9340Sstevel@tonic-gate 		else if (code & IBT_ERROR_CQ)
9350Sstevel@tonic-gate 			code = IBT_ERROR_CQ;
9360Sstevel@tonic-gate 		else {
9370Sstevel@tonic-gate 			IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_cq_asyncs: "
9380Sstevel@tonic-gate 			    "async: unexpected CQ async code 0x%x", code);
9390Sstevel@tonic-gate 			ibtl_cq->cq_async_codes = 0;
9400Sstevel@tonic-gate 			code = 0;
9410Sstevel@tonic-gate 		}
9420Sstevel@tonic-gate 		ibtl_cq->cq_async_codes &= ~code;
9430Sstevel@tonic-gate 
9440Sstevel@tonic-gate 		if (code) {
9450Sstevel@tonic-gate 			mutex_exit(&ibtl_async_mutex);
9460Sstevel@tonic-gate 			ibtl_async_client_call(ibtl_cq->cq_hca,
9470Sstevel@tonic-gate 			    code, &async_event);
9480Sstevel@tonic-gate 			mutex_enter(&ibtl_async_mutex);
9490Sstevel@tonic-gate 		}
9500Sstevel@tonic-gate 
9510Sstevel@tonic-gate 		if (ibtl_cq->cq_async_flags & IBTL_ASYNC_FREE_OBJECT) {
9520Sstevel@tonic-gate 			mutex_exit(&ibtl_async_mutex);
9530Sstevel@tonic-gate 			mutex_destroy(&ibtl_cq->cq_mutex);
9540Sstevel@tonic-gate 			kmem_free(ibtl_cq, sizeof (struct ibtl_cq_s));
9550Sstevel@tonic-gate 			mutex_enter(&ibtl_async_mutex);
9560Sstevel@tonic-gate 			return;
9570Sstevel@tonic-gate 		}
9580Sstevel@tonic-gate 	}
9590Sstevel@tonic-gate 	ibtl_cq->cq_async_flags &= ~IBTL_ASYNC_PENDING;
9600Sstevel@tonic-gate }
9610Sstevel@tonic-gate 
9620Sstevel@tonic-gate /*
9630Sstevel@tonic-gate  * Asyncs for EEC objects.
9640Sstevel@tonic-gate  *
9650Sstevel@tonic-gate  * The function continues to run until there are no more async
9660Sstevel@tonic-gate  * events/errors for this object.
9670Sstevel@tonic-gate  */
9680Sstevel@tonic-gate static void
ibtl_do_eec_asyncs(ibtl_eec_t * ibtl_eec)9690Sstevel@tonic-gate ibtl_do_eec_asyncs(ibtl_eec_t *ibtl_eec)
9700Sstevel@tonic-gate {
9710Sstevel@tonic-gate 	ibt_async_code_t	code;
9720Sstevel@tonic-gate 	ibt_async_event_t	async_event;
9730Sstevel@tonic-gate 
9740Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ibtl_async_mutex));
9750Sstevel@tonic-gate 	bzero(&async_event, sizeof (async_event));
9760Sstevel@tonic-gate 	async_event.ev_chan_hdl = ibtl_eec->eec_channel;
9770Sstevel@tonic-gate 
9780Sstevel@tonic-gate 	while ((code = ibtl_eec->eec_async_codes) != 0) {
9790Sstevel@tonic-gate 		async_event.ev_fma_ena = 0;
9800Sstevel@tonic-gate 		if (ibtl_eec->eec_async_flags & IBTL_ASYNC_FREE_OBJECT)
9810Sstevel@tonic-gate 			code = 0;	/* fallthrough to "kmem_free" */
9820Sstevel@tonic-gate 		else if (code & IBT_ERROR_CATASTROPHIC_EEC) {
9830Sstevel@tonic-gate 			code = IBT_ERROR_CATASTROPHIC_CHAN;
9840Sstevel@tonic-gate 			async_event.ev_fma_ena = ibtl_eec->eec_cat_fma_ena;
9850Sstevel@tonic-gate 		} else if (code & IBT_ERROR_PATH_MIGRATE_REQ_EEC) {
9860Sstevel@tonic-gate 			code = IBT_ERROR_PATH_MIGRATE_REQ;
9870Sstevel@tonic-gate 			async_event.ev_fma_ena = ibtl_eec->eec_pth_fma_ena;
9880Sstevel@tonic-gate 		} else if (code & IBT_EVENT_PATH_MIGRATED_EEC)
9890Sstevel@tonic-gate 			code = IBT_EVENT_PATH_MIGRATED;
9900Sstevel@tonic-gate 		else if (code & IBT_EVENT_COM_EST_EEC)
9910Sstevel@tonic-gate 			code = IBT_EVENT_COM_EST;
9920Sstevel@tonic-gate 		else {
9930Sstevel@tonic-gate 			IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_do_eec_asyncs: "
9940Sstevel@tonic-gate 			    "async: unexpected code 0x%x", code);
9950Sstevel@tonic-gate 			ibtl_eec->eec_async_codes = 0;
9960Sstevel@tonic-gate 			code = 0;
9970Sstevel@tonic-gate 		}
9980Sstevel@tonic-gate 		ibtl_eec->eec_async_codes &= ~code;
9990Sstevel@tonic-gate 
10000Sstevel@tonic-gate 		if (code) {
10010Sstevel@tonic-gate 			mutex_exit(&ibtl_async_mutex);
10020Sstevel@tonic-gate 			ibtl_async_client_call(ibtl_eec->eec_hca,
10030Sstevel@tonic-gate 			    code, &async_event);
10040Sstevel@tonic-gate 			mutex_enter(&ibtl_async_mutex);
10050Sstevel@tonic-gate 		}
10060Sstevel@tonic-gate 
10070Sstevel@tonic-gate 		if (ibtl_eec->eec_async_flags & IBTL_ASYNC_FREE_OBJECT) {
10080Sstevel@tonic-gate 			mutex_exit(&ibtl_async_mutex);
10090Sstevel@tonic-gate 			kmem_free(ibtl_eec, sizeof (struct ibtl_eec_s));
10100Sstevel@tonic-gate 			mutex_enter(&ibtl_async_mutex);
10110Sstevel@tonic-gate 			return;
10120Sstevel@tonic-gate 		}
10130Sstevel@tonic-gate 	}
10140Sstevel@tonic-gate 	ibtl_eec->eec_async_flags &= ~IBTL_ASYNC_PENDING;
10150Sstevel@tonic-gate }
10160Sstevel@tonic-gate 
10170Sstevel@tonic-gate #ifdef __lock_lint
10180Sstevel@tonic-gate kmutex_t cpr_mutex;
10190Sstevel@tonic-gate #endif
10200Sstevel@tonic-gate 
10210Sstevel@tonic-gate /*
10220Sstevel@tonic-gate  * Loop forever, calling async_handlers until all of the async lists
10230Sstevel@tonic-gate  * are empty.
10240Sstevel@tonic-gate  */
10250Sstevel@tonic-gate 
10260Sstevel@tonic-gate static void
ibtl_async_thread(void)10270Sstevel@tonic-gate ibtl_async_thread(void)
10280Sstevel@tonic-gate {
10290Sstevel@tonic-gate #ifndef __lock_lint
10300Sstevel@tonic-gate 	kmutex_t cpr_mutex;
10310Sstevel@tonic-gate #endif
10320Sstevel@tonic-gate 	callb_cpr_t	cprinfo;
10330Sstevel@tonic-gate 
10340Sstevel@tonic-gate 	_NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo))
10350Sstevel@tonic-gate 	_NOTE(NO_COMPETING_THREADS_NOW)
10360Sstevel@tonic-gate 	mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
10370Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr,
10380Sstevel@tonic-gate 	    "ibtl_async_thread");
10392840Scarlsonj #ifndef lint
10400Sstevel@tonic-gate 	_NOTE(COMPETING_THREADS_NOW)
10412840Scarlsonj #endif
10420Sstevel@tonic-gate 
10430Sstevel@tonic-gate 	mutex_enter(&ibtl_async_mutex);
10440Sstevel@tonic-gate 
10450Sstevel@tonic-gate 	for (;;) {
10460Sstevel@tonic-gate 		if (ibtl_async_hca_list_start) {
10470Sstevel@tonic-gate 			ibtl_hca_devinfo_t *hca_devp;
10480Sstevel@tonic-gate 
10490Sstevel@tonic-gate 			/* remove first entry from list */
10500Sstevel@tonic-gate 			hca_devp = ibtl_async_hca_list_start;
10510Sstevel@tonic-gate 			ibtl_async_hca_list_start = hca_devp->hd_async_link;
10520Sstevel@tonic-gate 			hca_devp->hd_async_link = NULL;
10530Sstevel@tonic-gate 			if (ibtl_async_hca_list_start == NULL)
10540Sstevel@tonic-gate 				ibtl_async_hca_list_end = NULL;
10550Sstevel@tonic-gate 
10560Sstevel@tonic-gate 			ibtl_do_hca_asyncs(hca_devp);
10570Sstevel@tonic-gate 
10580Sstevel@tonic-gate 		} else if (ibtl_async_qp_list_start) {
10590Sstevel@tonic-gate 			ibtl_qp_t *ibtl_qp;
10600Sstevel@tonic-gate 
10610Sstevel@tonic-gate 			/* remove from list */
10620Sstevel@tonic-gate 			ibtl_qp = ibtl_async_qp_list_start;
10630Sstevel@tonic-gate 			ibtl_async_qp_list_start = ibtl_qp->qp_async_link;
10640Sstevel@tonic-gate 			ibtl_qp->qp_async_link = NULL;
10650Sstevel@tonic-gate 			if (ibtl_async_qp_list_start == NULL)
10660Sstevel@tonic-gate 				ibtl_async_qp_list_end = NULL;
10670Sstevel@tonic-gate 
10680Sstevel@tonic-gate 			ibtl_do_qp_asyncs(ibtl_qp);
10690Sstevel@tonic-gate 
10700Sstevel@tonic-gate 		} else if (ibtl_async_srq_list_start) {
10710Sstevel@tonic-gate 			ibtl_srq_t *ibtl_srq;
10720Sstevel@tonic-gate 
10730Sstevel@tonic-gate 			/* remove from list */
10740Sstevel@tonic-gate 			ibtl_srq = ibtl_async_srq_list_start;
10750Sstevel@tonic-gate 			ibtl_async_srq_list_start = ibtl_srq->srq_async_link;
10760Sstevel@tonic-gate 			ibtl_srq->srq_async_link = NULL;
10770Sstevel@tonic-gate 			if (ibtl_async_srq_list_start == NULL)
10780Sstevel@tonic-gate 				ibtl_async_srq_list_end = NULL;
10790Sstevel@tonic-gate 
10800Sstevel@tonic-gate 			ibtl_do_srq_asyncs(ibtl_srq);
10810Sstevel@tonic-gate 
10820Sstevel@tonic-gate 		} else if (ibtl_async_eec_list_start) {
10830Sstevel@tonic-gate 			ibtl_eec_t *ibtl_eec;
10840Sstevel@tonic-gate 
10850Sstevel@tonic-gate 			/* remove from list */
10860Sstevel@tonic-gate 			ibtl_eec = ibtl_async_eec_list_start;
10870Sstevel@tonic-gate 			ibtl_async_eec_list_start = ibtl_eec->eec_async_link;
10880Sstevel@tonic-gate 			ibtl_eec->eec_async_link = NULL;
10890Sstevel@tonic-gate 			if (ibtl_async_eec_list_start == NULL)
10900Sstevel@tonic-gate 				ibtl_async_eec_list_end = NULL;
10910Sstevel@tonic-gate 
10920Sstevel@tonic-gate 			ibtl_do_eec_asyncs(ibtl_eec);
10930Sstevel@tonic-gate 
10940Sstevel@tonic-gate 		} else if (ibtl_async_cq_list_start) {
10950Sstevel@tonic-gate 			ibtl_cq_t *ibtl_cq;
10960Sstevel@tonic-gate 
10970Sstevel@tonic-gate 			/* remove from list */
10980Sstevel@tonic-gate 			ibtl_cq = ibtl_async_cq_list_start;
10990Sstevel@tonic-gate 			ibtl_async_cq_list_start = ibtl_cq->cq_async_link;
11000Sstevel@tonic-gate 			ibtl_cq->cq_async_link = NULL;
11010Sstevel@tonic-gate 			if (ibtl_async_cq_list_start == NULL)
11020Sstevel@tonic-gate 				ibtl_async_cq_list_end = NULL;
11030Sstevel@tonic-gate 
11040Sstevel@tonic-gate 			ibtl_do_cq_asyncs(ibtl_cq);
11050Sstevel@tonic-gate 
11060Sstevel@tonic-gate 		} else {
11070Sstevel@tonic-gate 			if (ibtl_async_thread_exit == IBTL_THREAD_EXIT)
11080Sstevel@tonic-gate 				break;
11090Sstevel@tonic-gate 			mutex_enter(&cpr_mutex);
11100Sstevel@tonic-gate 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
11110Sstevel@tonic-gate 			mutex_exit(&cpr_mutex);
11120Sstevel@tonic-gate 
11130Sstevel@tonic-gate 			cv_wait(&ibtl_async_cv, &ibtl_async_mutex);
11140Sstevel@tonic-gate 
11150Sstevel@tonic-gate 			mutex_exit(&ibtl_async_mutex);
11160Sstevel@tonic-gate 			mutex_enter(&cpr_mutex);
11170Sstevel@tonic-gate 			CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex);
11180Sstevel@tonic-gate 			mutex_exit(&cpr_mutex);
11190Sstevel@tonic-gate 			mutex_enter(&ibtl_async_mutex);
11200Sstevel@tonic-gate 		}
11210Sstevel@tonic-gate 	}
11220Sstevel@tonic-gate 
11230Sstevel@tonic-gate 	mutex_exit(&ibtl_async_mutex);
11240Sstevel@tonic-gate 
11250Sstevel@tonic-gate #ifndef __lock_lint
11260Sstevel@tonic-gate 	mutex_enter(&cpr_mutex);
11270Sstevel@tonic-gate 	CALLB_CPR_EXIT(&cprinfo);
11280Sstevel@tonic-gate #endif
11290Sstevel@tonic-gate 	mutex_destroy(&cpr_mutex);
11300Sstevel@tonic-gate }
11310Sstevel@tonic-gate 
11320Sstevel@tonic-gate 
11330Sstevel@tonic-gate void
ibtl_free_qp_async_check(ibtl_qp_t * ibtl_qp)11340Sstevel@tonic-gate ibtl_free_qp_async_check(ibtl_qp_t *ibtl_qp)
11350Sstevel@tonic-gate {
11360Sstevel@tonic-gate 	IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_qp_async_check(%p)", ibtl_qp);
11370Sstevel@tonic-gate 
11380Sstevel@tonic-gate 	mutex_enter(&ibtl_async_mutex);
11390Sstevel@tonic-gate 
11400Sstevel@tonic-gate 	/*
11410Sstevel@tonic-gate 	 * If there is an active async, mark this object to be freed
11420Sstevel@tonic-gate 	 * by the async_thread when it's done.
11430Sstevel@tonic-gate 	 */
11440Sstevel@tonic-gate 	if (ibtl_qp->qp_async_flags & IBTL_ASYNC_PENDING) {
11450Sstevel@tonic-gate 		ibtl_qp->qp_async_flags |= IBTL_ASYNC_FREE_OBJECT;
11460Sstevel@tonic-gate 		mutex_exit(&ibtl_async_mutex);
11470Sstevel@tonic-gate 	} else {	/* free the object now */
11480Sstevel@tonic-gate 		mutex_exit(&ibtl_async_mutex);
11490Sstevel@tonic-gate 		cv_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_cv);
11500Sstevel@tonic-gate 		mutex_destroy(&(IBTL_QP2CHAN(ibtl_qp))->ch_cm_mutex);
11510Sstevel@tonic-gate 		kmem_free(IBTL_QP2CHAN(ibtl_qp), sizeof (ibtl_channel_t));
11520Sstevel@tonic-gate 	}
11530Sstevel@tonic-gate }
11540Sstevel@tonic-gate 
11550Sstevel@tonic-gate void
ibtl_free_cq_async_check(ibtl_cq_t * ibtl_cq)11560Sstevel@tonic-gate ibtl_free_cq_async_check(ibtl_cq_t *ibtl_cq)
11570Sstevel@tonic-gate {
11580Sstevel@tonic-gate 	IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_cq_async_check(%p)", ibtl_cq);
11590Sstevel@tonic-gate 
11600Sstevel@tonic-gate 	mutex_enter(&ibtl_async_mutex);
11610Sstevel@tonic-gate 
11620Sstevel@tonic-gate 	/* if there is an active async, mark this object to be freed */
11630Sstevel@tonic-gate 	if (ibtl_cq->cq_async_flags & IBTL_ASYNC_PENDING) {
11640Sstevel@tonic-gate 		ibtl_cq->cq_async_flags |= IBTL_ASYNC_FREE_OBJECT;
11650Sstevel@tonic-gate 		mutex_exit(&ibtl_async_mutex);
11660Sstevel@tonic-gate 	} else {	/* free the object now */
11670Sstevel@tonic-gate 		mutex_exit(&ibtl_async_mutex);
11680Sstevel@tonic-gate 		mutex_destroy(&ibtl_cq->cq_mutex);
11690Sstevel@tonic-gate 		kmem_free(ibtl_cq, sizeof (struct ibtl_cq_s));
11700Sstevel@tonic-gate 	}
11710Sstevel@tonic-gate }
11720Sstevel@tonic-gate 
11730Sstevel@tonic-gate void
ibtl_free_srq_async_check(ibtl_srq_t * ibtl_srq)11740Sstevel@tonic-gate ibtl_free_srq_async_check(ibtl_srq_t *ibtl_srq)
11750Sstevel@tonic-gate {
11760Sstevel@tonic-gate 	IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_srq_async_check(%p)",
11770Sstevel@tonic-gate 	    ibtl_srq);
11780Sstevel@tonic-gate 
11790Sstevel@tonic-gate 	mutex_enter(&ibtl_async_mutex);
11800Sstevel@tonic-gate 
11810Sstevel@tonic-gate 	/* if there is an active async, mark this object to be freed */
11820Sstevel@tonic-gate 	if (ibtl_srq->srq_async_flags & IBTL_ASYNC_PENDING) {
11830Sstevel@tonic-gate 		ibtl_srq->srq_async_flags |= IBTL_ASYNC_FREE_OBJECT;
11840Sstevel@tonic-gate 		mutex_exit(&ibtl_async_mutex);
11850Sstevel@tonic-gate 	} else {	/* free the object now */
11860Sstevel@tonic-gate 		mutex_exit(&ibtl_async_mutex);
11870Sstevel@tonic-gate 		kmem_free(ibtl_srq, sizeof (struct ibtl_srq_s));
11880Sstevel@tonic-gate 	}
11890Sstevel@tonic-gate }
11900Sstevel@tonic-gate 
11910Sstevel@tonic-gate void
ibtl_free_eec_async_check(ibtl_eec_t * ibtl_eec)11920Sstevel@tonic-gate ibtl_free_eec_async_check(ibtl_eec_t *ibtl_eec)
11930Sstevel@tonic-gate {
11940Sstevel@tonic-gate 	IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_eec_async_check(%p)",
11950Sstevel@tonic-gate 	    ibtl_eec);
11960Sstevel@tonic-gate 
11970Sstevel@tonic-gate 	mutex_enter(&ibtl_async_mutex);
11980Sstevel@tonic-gate 
11990Sstevel@tonic-gate 	/* if there is an active async, mark this object to be freed */
12000Sstevel@tonic-gate 	if (ibtl_eec->eec_async_flags & IBTL_ASYNC_PENDING) {
12010Sstevel@tonic-gate 		ibtl_eec->eec_async_flags |= IBTL_ASYNC_FREE_OBJECT;
12020Sstevel@tonic-gate 		mutex_exit(&ibtl_async_mutex);
12030Sstevel@tonic-gate 	} else {	/* free the object now */
12040Sstevel@tonic-gate 		mutex_exit(&ibtl_async_mutex);
12050Sstevel@tonic-gate 		kmem_free(ibtl_eec, sizeof (struct ibtl_eec_s));
12060Sstevel@tonic-gate 	}
12070Sstevel@tonic-gate }
12080Sstevel@tonic-gate 
12090Sstevel@tonic-gate /*
12100Sstevel@tonic-gate  * This function differs from above in that we assume this is called
12110Sstevel@tonic-gate  * from non-interrupt context, and never called from the async_thread.
12120Sstevel@tonic-gate  */
12130Sstevel@tonic-gate 
12140Sstevel@tonic-gate void
ibtl_free_hca_async_check(ibtl_hca_t * ibt_hca)12150Sstevel@tonic-gate ibtl_free_hca_async_check(ibtl_hca_t *ibt_hca)
12160Sstevel@tonic-gate {
12170Sstevel@tonic-gate 	IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_hca_async_check(%p)",
12180Sstevel@tonic-gate 	    ibt_hca);
12190Sstevel@tonic-gate 
12200Sstevel@tonic-gate 	mutex_enter(&ibtl_async_mutex);
12210Sstevel@tonic-gate 
12220Sstevel@tonic-gate 	/* if there is an active async, mark this object to be freed */
12230Sstevel@tonic-gate 	if (ibt_hca->ha_async_cnt > 0) {
12240Sstevel@tonic-gate 		ibt_hca->ha_async_flags |= IBTL_ASYNC_FREE_OBJECT;
12250Sstevel@tonic-gate 		mutex_exit(&ibtl_async_mutex);
12260Sstevel@tonic-gate 	} else {	/* free the object now */
12270Sstevel@tonic-gate 		mutex_exit(&ibtl_async_mutex);
12280Sstevel@tonic-gate 		kmem_free(ibt_hca, sizeof (ibtl_hca_t));
12290Sstevel@tonic-gate 	}
12300Sstevel@tonic-gate }
12310Sstevel@tonic-gate 
12320Sstevel@tonic-gate /*
12330Sstevel@tonic-gate  * Completion Queue Handling.
12340Sstevel@tonic-gate  *
12350Sstevel@tonic-gate  *	A completion queue can be handled through a simple callback
12360Sstevel@tonic-gate  *	at interrupt level, or it may be queued for an ibtl_cq_thread
12370Sstevel@tonic-gate  *	to handle.  The latter is chosen during ibt_alloc_cq when the
12380Sstevel@tonic-gate  *	IBTF_CQ_HANDLER_IN_THREAD is specified.
12390Sstevel@tonic-gate  */
12400Sstevel@tonic-gate 
12410Sstevel@tonic-gate static void
ibtl_cq_handler_call(ibtl_cq_t * ibtl_cq)12420Sstevel@tonic-gate ibtl_cq_handler_call(ibtl_cq_t *ibtl_cq)
12430Sstevel@tonic-gate {
12440Sstevel@tonic-gate 	ibt_cq_handler_t	cq_handler;
12450Sstevel@tonic-gate 	void			*arg;
12460Sstevel@tonic-gate 
12470Sstevel@tonic-gate 	IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_cq_handler_call(%p)", ibtl_cq);
12480Sstevel@tonic-gate 
12498580SBill.Taylor@Sun.COM 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ibtl_cq))
12500Sstevel@tonic-gate 	cq_handler = ibtl_cq->cq_comp_handler;
12510Sstevel@tonic-gate 	arg = ibtl_cq->cq_arg;
12520Sstevel@tonic-gate 	if (cq_handler != NULL)
12530Sstevel@tonic-gate 		cq_handler(ibtl_cq, arg);
12540Sstevel@tonic-gate 	else
12550Sstevel@tonic-gate 		IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_cq_handler_call: "
12560Sstevel@tonic-gate 		    "no cq_handler for cq %p", ibtl_cq);
12570Sstevel@tonic-gate }
12580Sstevel@tonic-gate 
12590Sstevel@tonic-gate /*
12600Sstevel@tonic-gate  * Before ibt_free_cq can continue, we need to ensure no more cq_handler
12610Sstevel@tonic-gate  * callbacks can occur.  When we get the mutex, we know there are no
12620Sstevel@tonic-gate  * outstanding cq_handler callbacks.  We set the cq_handler to NULL to
12630Sstevel@tonic-gate  * prohibit future callbacks.
12640Sstevel@tonic-gate  */
12650Sstevel@tonic-gate void
ibtl_free_cq_check(ibtl_cq_t * ibtl_cq)12660Sstevel@tonic-gate ibtl_free_cq_check(ibtl_cq_t *ibtl_cq)
12670Sstevel@tonic-gate {
12680Sstevel@tonic-gate 	mutex_enter(&ibtl_cq->cq_mutex);
12690Sstevel@tonic-gate 	ibtl_cq->cq_comp_handler = NULL;
12700Sstevel@tonic-gate 	mutex_exit(&ibtl_cq->cq_mutex);
12710Sstevel@tonic-gate 	if (ibtl_cq->cq_in_thread) {
12720Sstevel@tonic-gate 		mutex_enter(&ibtl_cq_mutex);
12730Sstevel@tonic-gate 		--ibtl_cqs_using_threads;
12740Sstevel@tonic-gate 		while (ibtl_cq->cq_impl_flags & IBTL_CQ_PENDING) {
12750Sstevel@tonic-gate 			ibtl_cq->cq_impl_flags &= ~IBTL_CQ_CALL_CLIENT;
12760Sstevel@tonic-gate 			ibtl_cq->cq_impl_flags |= IBTL_CQ_FREE;
12770Sstevel@tonic-gate 			cv_wait(&ibtl_cq_cv, &ibtl_cq_mutex);
12780Sstevel@tonic-gate 		}
12790Sstevel@tonic-gate 		mutex_exit(&ibtl_cq_mutex);
12800Sstevel@tonic-gate 	}
12810Sstevel@tonic-gate }
12820Sstevel@tonic-gate 
12830Sstevel@tonic-gate /*
12840Sstevel@tonic-gate  * Loop forever, calling cq_handlers until the cq list
12850Sstevel@tonic-gate  * is empty.
12860Sstevel@tonic-gate  */
12870Sstevel@tonic-gate 
12880Sstevel@tonic-gate static void
ibtl_cq_thread(void)12890Sstevel@tonic-gate ibtl_cq_thread(void)
12900Sstevel@tonic-gate {
12910Sstevel@tonic-gate #ifndef __lock_lint
12920Sstevel@tonic-gate 	kmutex_t cpr_mutex;
12930Sstevel@tonic-gate #endif
12940Sstevel@tonic-gate 	callb_cpr_t	cprinfo;
12950Sstevel@tonic-gate 
12960Sstevel@tonic-gate 	_NOTE(MUTEX_PROTECTS_DATA(cpr_mutex, cprinfo))
12970Sstevel@tonic-gate 	_NOTE(NO_COMPETING_THREADS_NOW)
12980Sstevel@tonic-gate 	mutex_init(&cpr_mutex, NULL, MUTEX_DRIVER, NULL);
12990Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, &cpr_mutex, callb_generic_cpr,
13000Sstevel@tonic-gate 	    "ibtl_cq_thread");
13012840Scarlsonj #ifndef lint
13020Sstevel@tonic-gate 	_NOTE(COMPETING_THREADS_NOW)
13032840Scarlsonj #endif
13040Sstevel@tonic-gate 
13050Sstevel@tonic-gate 	mutex_enter(&ibtl_cq_mutex);
13060Sstevel@tonic-gate 
13070Sstevel@tonic-gate 	for (;;) {
13080Sstevel@tonic-gate 		if (ibtl_cq_list_start) {
13090Sstevel@tonic-gate 			ibtl_cq_t *ibtl_cq;
13100Sstevel@tonic-gate 
13110Sstevel@tonic-gate 			ibtl_cq = ibtl_cq_list_start;
13120Sstevel@tonic-gate 			ibtl_cq_list_start = ibtl_cq->cq_link;
13130Sstevel@tonic-gate 			ibtl_cq->cq_link = NULL;
13140Sstevel@tonic-gate 			if (ibtl_cq == ibtl_cq_list_end)
13150Sstevel@tonic-gate 				ibtl_cq_list_end = NULL;
13160Sstevel@tonic-gate 
13170Sstevel@tonic-gate 			while (ibtl_cq->cq_impl_flags & IBTL_CQ_CALL_CLIENT) {
13180Sstevel@tonic-gate 				ibtl_cq->cq_impl_flags &= ~IBTL_CQ_CALL_CLIENT;
13190Sstevel@tonic-gate 				mutex_exit(&ibtl_cq_mutex);
13200Sstevel@tonic-gate 				ibtl_cq_handler_call(ibtl_cq);
13210Sstevel@tonic-gate 				mutex_enter(&ibtl_cq_mutex);
13220Sstevel@tonic-gate 			}
13230Sstevel@tonic-gate 			ibtl_cq->cq_impl_flags &= ~IBTL_CQ_PENDING;
13240Sstevel@tonic-gate 			if (ibtl_cq->cq_impl_flags & IBTL_CQ_FREE)
13250Sstevel@tonic-gate 				cv_broadcast(&ibtl_cq_cv);
13260Sstevel@tonic-gate 		} else {
13270Sstevel@tonic-gate 			if (ibtl_cq_thread_exit == IBTL_THREAD_EXIT)
13280Sstevel@tonic-gate 				break;
13290Sstevel@tonic-gate 			mutex_enter(&cpr_mutex);
13300Sstevel@tonic-gate 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
13310Sstevel@tonic-gate 			mutex_exit(&cpr_mutex);
13320Sstevel@tonic-gate 
13330Sstevel@tonic-gate 			cv_wait(&ibtl_cq_cv, &ibtl_cq_mutex);
13340Sstevel@tonic-gate 
13350Sstevel@tonic-gate 			mutex_exit(&ibtl_cq_mutex);
13360Sstevel@tonic-gate 			mutex_enter(&cpr_mutex);
13370Sstevel@tonic-gate 			CALLB_CPR_SAFE_END(&cprinfo, &cpr_mutex);
13380Sstevel@tonic-gate 			mutex_exit(&cpr_mutex);
13390Sstevel@tonic-gate 			mutex_enter(&ibtl_cq_mutex);
13400Sstevel@tonic-gate 		}
13410Sstevel@tonic-gate 	}
13420Sstevel@tonic-gate 
13430Sstevel@tonic-gate 	mutex_exit(&ibtl_cq_mutex);
13440Sstevel@tonic-gate #ifndef __lock_lint
13450Sstevel@tonic-gate 	mutex_enter(&cpr_mutex);
13460Sstevel@tonic-gate 	CALLB_CPR_EXIT(&cprinfo);
13470Sstevel@tonic-gate #endif
13480Sstevel@tonic-gate 	mutex_destroy(&cpr_mutex);
13490Sstevel@tonic-gate }
13500Sstevel@tonic-gate 
13510Sstevel@tonic-gate 
13520Sstevel@tonic-gate /*
13530Sstevel@tonic-gate  * ibc_cq_handler()
13540Sstevel@tonic-gate  *
13550Sstevel@tonic-gate  *    Completion Queue Notification Handler.
13560Sstevel@tonic-gate  *
13570Sstevel@tonic-gate  */
13580Sstevel@tonic-gate /*ARGSUSED*/
13590Sstevel@tonic-gate void
ibc_cq_handler(ibc_clnt_hdl_t ibc_hdl,ibt_cq_hdl_t ibtl_cq)13600Sstevel@tonic-gate ibc_cq_handler(ibc_clnt_hdl_t ibc_hdl, ibt_cq_hdl_t ibtl_cq)
13610Sstevel@tonic-gate {
13620Sstevel@tonic-gate 	IBTF_DPRINTF_L4(ibtf_handlers, "ibc_cq_handler(%p, %p)",
13630Sstevel@tonic-gate 	    ibc_hdl, ibtl_cq);
13640Sstevel@tonic-gate 
13650Sstevel@tonic-gate 	if (ibtl_cq->cq_in_thread) {
13660Sstevel@tonic-gate 		mutex_enter(&ibtl_cq_mutex);
13670Sstevel@tonic-gate 		ibtl_cq->cq_impl_flags |= IBTL_CQ_CALL_CLIENT;
13680Sstevel@tonic-gate 		if ((ibtl_cq->cq_impl_flags & IBTL_CQ_PENDING) == 0) {
13690Sstevel@tonic-gate 			ibtl_cq->cq_impl_flags |= IBTL_CQ_PENDING;
13700Sstevel@tonic-gate 			ibtl_cq->cq_link = NULL;
13710Sstevel@tonic-gate 			if (ibtl_cq_list_end == NULL)
13720Sstevel@tonic-gate 				ibtl_cq_list_start = ibtl_cq;
13730Sstevel@tonic-gate 			else
13740Sstevel@tonic-gate 				ibtl_cq_list_end->cq_link = ibtl_cq;
13750Sstevel@tonic-gate 			ibtl_cq_list_end = ibtl_cq;
13760Sstevel@tonic-gate 			cv_signal(&ibtl_cq_cv);
13770Sstevel@tonic-gate 		}
13780Sstevel@tonic-gate 		mutex_exit(&ibtl_cq_mutex);
13790Sstevel@tonic-gate 		return;
13800Sstevel@tonic-gate 	} else
13810Sstevel@tonic-gate 		ibtl_cq_handler_call(ibtl_cq);
13820Sstevel@tonic-gate }
13830Sstevel@tonic-gate 
13840Sstevel@tonic-gate 
13850Sstevel@tonic-gate /*
13860Sstevel@tonic-gate  * ibt_enable_cq_notify()
13870Sstevel@tonic-gate  *      Enable Notification requests on the specified CQ.
13880Sstevel@tonic-gate  *
13890Sstevel@tonic-gate  *      ibt_cq          The CQ handle.
13900Sstevel@tonic-gate  *
13910Sstevel@tonic-gate  *      notify_type     Enable notifications for all (IBT_NEXT_COMPLETION)
13920Sstevel@tonic-gate  *                      completions, or the next Solicited completion
13930Sstevel@tonic-gate  *                      (IBT_NEXT_SOLICITED) only.
13940Sstevel@tonic-gate  *
13950Sstevel@tonic-gate  *	Completion notifications are disabled by setting the completion
13960Sstevel@tonic-gate  *	handler to NULL by calling ibt_set_cq_handler().
13970Sstevel@tonic-gate  */
13980Sstevel@tonic-gate ibt_status_t
ibt_enable_cq_notify(ibt_cq_hdl_t ibtl_cq,ibt_cq_notify_flags_t notify_type)13990Sstevel@tonic-gate ibt_enable_cq_notify(ibt_cq_hdl_t ibtl_cq, ibt_cq_notify_flags_t notify_type)
14000Sstevel@tonic-gate {
14010Sstevel@tonic-gate 	IBTF_DPRINTF_L3(ibtf_handlers, "ibt_enable_cq_notify(%p, %d)",
14020Sstevel@tonic-gate 	    ibtl_cq, notify_type);
14030Sstevel@tonic-gate 
14040Sstevel@tonic-gate 	return (IBTL_CQ2CIHCAOPS_P(ibtl_cq)->ibc_notify_cq(
14050Sstevel@tonic-gate 	    IBTL_CQ2CIHCA(ibtl_cq), ibtl_cq->cq_ibc_cq_hdl, notify_type));
14060Sstevel@tonic-gate }
14070Sstevel@tonic-gate 
14080Sstevel@tonic-gate 
14090Sstevel@tonic-gate /*
14100Sstevel@tonic-gate  * ibt_set_cq_handler()
14110Sstevel@tonic-gate  *      Register a work request completion handler with the IBTF.
14120Sstevel@tonic-gate  *
14130Sstevel@tonic-gate  *      ibt_cq                  The CQ handle.
14140Sstevel@tonic-gate  *
14150Sstevel@tonic-gate  *      completion_handler      The completion handler.
14160Sstevel@tonic-gate  *
14170Sstevel@tonic-gate  *      arg                     The IBTF client private argument to be passed
14180Sstevel@tonic-gate  *                              back to the client when calling the CQ
14190Sstevel@tonic-gate  *                              completion handler.
14200Sstevel@tonic-gate  *
14210Sstevel@tonic-gate  *	Completion notifications are disabled by setting the completion
14220Sstevel@tonic-gate  *	handler to NULL.  When setting the handler to NULL, no additional
14230Sstevel@tonic-gate  *	calls to the previous CQ handler will be initiated, but there may
14240Sstevel@tonic-gate  *	be one in progress.
14250Sstevel@tonic-gate  *
14260Sstevel@tonic-gate  *      This function does not otherwise change the state of previous
14270Sstevel@tonic-gate  *      calls to ibt_enable_cq_notify().
14280Sstevel@tonic-gate  */
14290Sstevel@tonic-gate void
ibt_set_cq_handler(ibt_cq_hdl_t ibtl_cq,ibt_cq_handler_t completion_handler,void * arg)14300Sstevel@tonic-gate ibt_set_cq_handler(ibt_cq_hdl_t ibtl_cq, ibt_cq_handler_t completion_handler,
14310Sstevel@tonic-gate     void *arg)
14320Sstevel@tonic-gate {
14330Sstevel@tonic-gate 	IBTF_DPRINTF_L3(ibtf_handlers, "ibt_set_cq_handler(%p, %p, %p)",
14340Sstevel@tonic-gate 	    ibtl_cq, completion_handler, arg);
14350Sstevel@tonic-gate 
14368580SBill.Taylor@Sun.COM 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ibtl_cq))
14370Sstevel@tonic-gate 	ibtl_cq->cq_comp_handler = completion_handler;
14380Sstevel@tonic-gate 	ibtl_cq->cq_arg = arg;
14390Sstevel@tonic-gate }
14400Sstevel@tonic-gate 
14410Sstevel@tonic-gate 
14420Sstevel@tonic-gate /*
14430Sstevel@tonic-gate  * Inform IBT clients about New HCAs.
14440Sstevel@tonic-gate  *
14450Sstevel@tonic-gate  *	We use taskqs to allow simultaneous notification, with sleeping.
14460Sstevel@tonic-gate  *	Since taskqs only allow one argument, we define a structure
14470Sstevel@tonic-gate  *	because we need to pass in two arguments.
14480Sstevel@tonic-gate  */
14490Sstevel@tonic-gate 
14500Sstevel@tonic-gate struct ibtl_new_hca_s {
14510Sstevel@tonic-gate 	ibtl_clnt_t		*nh_clntp;
14520Sstevel@tonic-gate 	ibtl_hca_devinfo_t	*nh_hca_devp;
14530Sstevel@tonic-gate 	ibt_async_code_t	nh_code;
14540Sstevel@tonic-gate };
14550Sstevel@tonic-gate 
14560Sstevel@tonic-gate static void
ibtl_tell_client_about_new_hca(void * arg)14570Sstevel@tonic-gate ibtl_tell_client_about_new_hca(void *arg)
14580Sstevel@tonic-gate {
14590Sstevel@tonic-gate 	struct ibtl_new_hca_s	*new_hcap = (struct ibtl_new_hca_s *)arg;
14600Sstevel@tonic-gate 	ibtl_clnt_t		*clntp = new_hcap->nh_clntp;
14610Sstevel@tonic-gate 	ibt_async_event_t	async_event;
14620Sstevel@tonic-gate 	ibtl_hca_devinfo_t	*hca_devp = new_hcap->nh_hca_devp;
14630Sstevel@tonic-gate 
14640Sstevel@tonic-gate 	bzero(&async_event, sizeof (async_event));
14650Sstevel@tonic-gate 	async_event.ev_hca_guid = hca_devp->hd_hca_attr->hca_node_guid;
14660Sstevel@tonic-gate 	clntp->clnt_modinfop->mi_async_handler(
14670Sstevel@tonic-gate 	    clntp->clnt_private, NULL, new_hcap->nh_code, &async_event);
14680Sstevel@tonic-gate 	kmem_free(new_hcap, sizeof (*new_hcap));
14690Sstevel@tonic-gate #ifdef __lock_lint
14700Sstevel@tonic-gate 	{
14710Sstevel@tonic-gate 		ibt_hca_hdl_t hca_hdl;
14720Sstevel@tonic-gate 		(void) ibt_open_hca(clntp, 0ULL, &hca_hdl);
14730Sstevel@tonic-gate 	}
14740Sstevel@tonic-gate #endif
14750Sstevel@tonic-gate 	mutex_enter(&ibtl_clnt_list_mutex);
14760Sstevel@tonic-gate 	if (--hca_devp->hd_async_task_cnt == 0)
14770Sstevel@tonic-gate 		cv_signal(&hca_devp->hd_async_task_cv);
14780Sstevel@tonic-gate 	if (--clntp->clnt_async_cnt == 0)
14790Sstevel@tonic-gate 		cv_broadcast(&ibtl_clnt_cv);
14800Sstevel@tonic-gate 	mutex_exit(&ibtl_clnt_list_mutex);
14810Sstevel@tonic-gate }
14820Sstevel@tonic-gate 
14830Sstevel@tonic-gate /*
14840Sstevel@tonic-gate  * ibtl_announce_new_hca:
14850Sstevel@tonic-gate  *
14860Sstevel@tonic-gate  *	o First attach these clients in the given order
14870Sstevel@tonic-gate  *		IBMA
14880Sstevel@tonic-gate  *		IBCM
14890Sstevel@tonic-gate  *
14900Sstevel@tonic-gate  *	o Next attach all other clients in parallel.
14910Sstevel@tonic-gate  *
14920Sstevel@tonic-gate  * NOTE: Use the taskq to simultaneously notify all clients of the new HCA.
14930Sstevel@tonic-gate  * Retval from clients is ignored.
14940Sstevel@tonic-gate  */
14950Sstevel@tonic-gate void
ibtl_announce_new_hca(ibtl_hca_devinfo_t * hca_devp)14960Sstevel@tonic-gate ibtl_announce_new_hca(ibtl_hca_devinfo_t *hca_devp)
14970Sstevel@tonic-gate {
14980Sstevel@tonic-gate 	ibtl_clnt_t		*clntp;
14990Sstevel@tonic-gate 	struct ibtl_new_hca_s	*new_hcap;
15000Sstevel@tonic-gate 
15015752Shiremath 	IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_announce_new_hca(%p, %llX)",
15020Sstevel@tonic-gate 	    hca_devp, hca_devp->hd_hca_attr->hca_node_guid);
15030Sstevel@tonic-gate 
15040Sstevel@tonic-gate 	mutex_enter(&ibtl_clnt_list_mutex);
15050Sstevel@tonic-gate 
15060Sstevel@tonic-gate 	clntp = ibtl_clnt_list;
15070Sstevel@tonic-gate 	while (clntp != NULL) {
15080Sstevel@tonic-gate 		if (clntp->clnt_modinfop->mi_clnt_class == IBT_IBMA) {
15090Sstevel@tonic-gate 			IBTF_DPRINTF_L4(ibtf_handlers,
15100Sstevel@tonic-gate 			    "ibtl_announce_new_hca: calling IBMF");
15110Sstevel@tonic-gate 			if (clntp->clnt_modinfop->mi_async_handler) {
15120Sstevel@tonic-gate 				_NOTE(NO_COMPETING_THREADS_NOW)
15130Sstevel@tonic-gate 				new_hcap = kmem_alloc(sizeof (*new_hcap),
15140Sstevel@tonic-gate 				    KM_SLEEP);
15150Sstevel@tonic-gate 				new_hcap->nh_clntp = clntp;
15160Sstevel@tonic-gate 				new_hcap->nh_hca_devp = hca_devp;
15170Sstevel@tonic-gate 				new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
15182840Scarlsonj #ifndef lint
15190Sstevel@tonic-gate 				_NOTE(COMPETING_THREADS_NOW)
15202840Scarlsonj #endif
15210Sstevel@tonic-gate 				clntp->clnt_async_cnt++;
15220Sstevel@tonic-gate 				hca_devp->hd_async_task_cnt++;
15230Sstevel@tonic-gate 
15240Sstevel@tonic-gate 				(void) taskq_dispatch(ibtl_async_taskq,
15250Sstevel@tonic-gate 				    ibtl_tell_client_about_new_hca, new_hcap,
15260Sstevel@tonic-gate 				    TQ_SLEEP);
15270Sstevel@tonic-gate 			}
15280Sstevel@tonic-gate 			break;
15290Sstevel@tonic-gate 		}
15300Sstevel@tonic-gate 		clntp = clntp->clnt_list_link;
15310Sstevel@tonic-gate 	}
15320Sstevel@tonic-gate 	if (clntp != NULL)
15330Sstevel@tonic-gate 		while (clntp->clnt_async_cnt > 0)
15340Sstevel@tonic-gate 			cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex);
15350Sstevel@tonic-gate 	clntp = ibtl_clnt_list;
15360Sstevel@tonic-gate 	while (clntp != NULL) {
15377354SGiri.Adari@Sun.COM 		if (clntp->clnt_modinfop->mi_clnt_class == IBT_DM) {
15387354SGiri.Adari@Sun.COM 			IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_announce_new_hca: "
15397354SGiri.Adari@Sun.COM 			    "calling  %s", clntp->clnt_modinfop->mi_clnt_name);
15407354SGiri.Adari@Sun.COM 			if (clntp->clnt_modinfop->mi_async_handler) {
15417354SGiri.Adari@Sun.COM 				_NOTE(NO_COMPETING_THREADS_NOW)
15427354SGiri.Adari@Sun.COM 				new_hcap = kmem_alloc(sizeof (*new_hcap),
15437354SGiri.Adari@Sun.COM 				    KM_SLEEP);
15447354SGiri.Adari@Sun.COM 				new_hcap->nh_clntp = clntp;
15457354SGiri.Adari@Sun.COM 				new_hcap->nh_hca_devp = hca_devp;
15467354SGiri.Adari@Sun.COM 				new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
15477354SGiri.Adari@Sun.COM #ifndef lint
15487354SGiri.Adari@Sun.COM 				_NOTE(COMPETING_THREADS_NOW)
15497354SGiri.Adari@Sun.COM #endif
15507354SGiri.Adari@Sun.COM 				clntp->clnt_async_cnt++;
15517354SGiri.Adari@Sun.COM 				hca_devp->hd_async_task_cnt++;
15527354SGiri.Adari@Sun.COM 
15538082SRamaswamy.Tummala@Sun.COM 				mutex_exit(&ibtl_clnt_list_mutex);
15548082SRamaswamy.Tummala@Sun.COM 				(void) ibtl_tell_client_about_new_hca(
15558082SRamaswamy.Tummala@Sun.COM 				    new_hcap);
15568082SRamaswamy.Tummala@Sun.COM 				mutex_enter(&ibtl_clnt_list_mutex);
15577354SGiri.Adari@Sun.COM 			}
15587354SGiri.Adari@Sun.COM 			break;
15597354SGiri.Adari@Sun.COM 		}
15607354SGiri.Adari@Sun.COM 		clntp = clntp->clnt_list_link;
15617354SGiri.Adari@Sun.COM 	}
15628082SRamaswamy.Tummala@Sun.COM 
15637354SGiri.Adari@Sun.COM 	clntp = ibtl_clnt_list;
15647354SGiri.Adari@Sun.COM 	while (clntp != NULL) {
15657354SGiri.Adari@Sun.COM 		if (clntp->clnt_modinfop->mi_clnt_class == IBT_CM) {
15665752Shiremath 			IBTF_DPRINTF_L4(ibtf_handlers, "ibtl_announce_new_hca: "
15675752Shiremath 			    "calling  %s", clntp->clnt_modinfop->mi_clnt_name);
15680Sstevel@tonic-gate 			if (clntp->clnt_modinfop->mi_async_handler) {
15690Sstevel@tonic-gate 				_NOTE(NO_COMPETING_THREADS_NOW)
15700Sstevel@tonic-gate 				new_hcap = kmem_alloc(sizeof (*new_hcap),
15710Sstevel@tonic-gate 				    KM_SLEEP);
15720Sstevel@tonic-gate 				new_hcap->nh_clntp = clntp;
15730Sstevel@tonic-gate 				new_hcap->nh_hca_devp = hca_devp;
15740Sstevel@tonic-gate 				new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
15752840Scarlsonj #ifndef lint
15760Sstevel@tonic-gate 				_NOTE(COMPETING_THREADS_NOW)
15772840Scarlsonj #endif
15780Sstevel@tonic-gate 				clntp->clnt_async_cnt++;
15790Sstevel@tonic-gate 				hca_devp->hd_async_task_cnt++;
15800Sstevel@tonic-gate 
15810Sstevel@tonic-gate 				(void) taskq_dispatch(ibtl_async_taskq,
15820Sstevel@tonic-gate 				    ibtl_tell_client_about_new_hca, new_hcap,
15830Sstevel@tonic-gate 				    TQ_SLEEP);
15840Sstevel@tonic-gate 			}
15850Sstevel@tonic-gate 			break;
15860Sstevel@tonic-gate 		}
15870Sstevel@tonic-gate 		clntp = clntp->clnt_list_link;
15880Sstevel@tonic-gate 	}
15890Sstevel@tonic-gate 	if (clntp != NULL)
15900Sstevel@tonic-gate 		while (clntp->clnt_async_cnt > 0)
15910Sstevel@tonic-gate 			cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex);
15920Sstevel@tonic-gate 	clntp = ibtl_clnt_list;
15930Sstevel@tonic-gate 	while (clntp != NULL) {
15940Sstevel@tonic-gate 		if ((clntp->clnt_modinfop->mi_clnt_class != IBT_DM) &&
15955752Shiremath 		    (clntp->clnt_modinfop->mi_clnt_class != IBT_CM) &&
15960Sstevel@tonic-gate 		    (clntp->clnt_modinfop->mi_clnt_class != IBT_IBMA)) {
15970Sstevel@tonic-gate 			IBTF_DPRINTF_L4(ibtf_handlers,
15982259Shiremath 			    "ibtl_announce_new_hca: Calling %s ",
15992259Shiremath 			    clntp->clnt_modinfop->mi_clnt_name);
16000Sstevel@tonic-gate 			if (clntp->clnt_modinfop->mi_async_handler) {
16010Sstevel@tonic-gate 				_NOTE(NO_COMPETING_THREADS_NOW)
16020Sstevel@tonic-gate 				new_hcap = kmem_alloc(sizeof (*new_hcap),
16030Sstevel@tonic-gate 				    KM_SLEEP);
16040Sstevel@tonic-gate 				new_hcap->nh_clntp = clntp;
16050Sstevel@tonic-gate 				new_hcap->nh_hca_devp = hca_devp;
16060Sstevel@tonic-gate 				new_hcap->nh_code = IBT_HCA_ATTACH_EVENT;
16072840Scarlsonj #ifndef lint
16080Sstevel@tonic-gate 				_NOTE(COMPETING_THREADS_NOW)
16092840Scarlsonj #endif
16100Sstevel@tonic-gate 				clntp->clnt_async_cnt++;
16110Sstevel@tonic-gate 				hca_devp->hd_async_task_cnt++;
16120Sstevel@tonic-gate 
16130Sstevel@tonic-gate 				(void) taskq_dispatch(ibtl_async_taskq,
16140Sstevel@tonic-gate 				    ibtl_tell_client_about_new_hca, new_hcap,
16150Sstevel@tonic-gate 				    TQ_SLEEP);
16160Sstevel@tonic-gate 			}
16170Sstevel@tonic-gate 		}
16180Sstevel@tonic-gate 		clntp = clntp->clnt_list_link;
16190Sstevel@tonic-gate 	}
16200Sstevel@tonic-gate 
16210Sstevel@tonic-gate 	/* wait for all tasks to complete */
16220Sstevel@tonic-gate 	while (hca_devp->hd_async_task_cnt != 0)
16230Sstevel@tonic-gate 		cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex);
16240Sstevel@tonic-gate 
16250Sstevel@tonic-gate 	/* wakeup thread that may be waiting to send an HCA async */
16260Sstevel@tonic-gate 	ASSERT(hca_devp->hd_async_busy == 1);
16270Sstevel@tonic-gate 	hca_devp->hd_async_busy = 0;
16280Sstevel@tonic-gate 	cv_broadcast(&hca_devp->hd_async_busy_cv);
16290Sstevel@tonic-gate 	mutex_exit(&ibtl_clnt_list_mutex);
16300Sstevel@tonic-gate }
16310Sstevel@tonic-gate 
16320Sstevel@tonic-gate /*
16330Sstevel@tonic-gate  * ibtl_detach_all_clients:
16340Sstevel@tonic-gate  *
16350Sstevel@tonic-gate  *	Return value - 0 for Success, 1 for Failure
16360Sstevel@tonic-gate  *
16370Sstevel@tonic-gate  *	o First detach general clients.
16380Sstevel@tonic-gate  *
16390Sstevel@tonic-gate  *	o Next detach these clients
16400Sstevel@tonic-gate  *		IBCM
16410Sstevel@tonic-gate  *		IBDM
16420Sstevel@tonic-gate  *
16430Sstevel@tonic-gate  *	o Finally, detach this client
16440Sstevel@tonic-gate  *		IBMA
16450Sstevel@tonic-gate  */
16460Sstevel@tonic-gate int
ibtl_detach_all_clients(ibtl_hca_devinfo_t * hca_devp)16470Sstevel@tonic-gate ibtl_detach_all_clients(ibtl_hca_devinfo_t *hca_devp)
16480Sstevel@tonic-gate {
16490Sstevel@tonic-gate 	ib_guid_t		hcaguid = hca_devp->hd_hca_attr->hca_node_guid;
16500Sstevel@tonic-gate 	ibtl_hca_t		*ibt_hca;
16510Sstevel@tonic-gate 	ibtl_clnt_t		*clntp;
16520Sstevel@tonic-gate 	int			retval;
16530Sstevel@tonic-gate 
16540Sstevel@tonic-gate 	IBTF_DPRINTF_L2(ibtf_handlers, "ibtl_detach_all_clients(%llX)",
16550Sstevel@tonic-gate 	    hcaguid);
16560Sstevel@tonic-gate 
16570Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ibtl_clnt_list_mutex));
16580Sstevel@tonic-gate 
16590Sstevel@tonic-gate 	while (hca_devp->hd_async_busy)
16600Sstevel@tonic-gate 		cv_wait(&hca_devp->hd_async_busy_cv, &ibtl_clnt_list_mutex);
16610Sstevel@tonic-gate 	hca_devp->hd_async_busy = 1;
16620Sstevel@tonic-gate 
16630Sstevel@tonic-gate 	/* First inform general clients asynchronously */
16640Sstevel@tonic-gate 	hca_devp->hd_async_event.ev_hca_guid = hcaguid;
16650Sstevel@tonic-gate 	hca_devp->hd_async_event.ev_fma_ena = 0;
16660Sstevel@tonic-gate 	hca_devp->hd_async_event.ev_chan_hdl = NULL;
16670Sstevel@tonic-gate 	hca_devp->hd_async_event.ev_cq_hdl = NULL;
16680Sstevel@tonic-gate 	hca_devp->hd_async_code = IBT_HCA_DETACH_EVENT;
16690Sstevel@tonic-gate 
16700Sstevel@tonic-gate 	ibt_hca = hca_devp->hd_clnt_list;
16710Sstevel@tonic-gate 	while (ibt_hca != NULL) {
16720Sstevel@tonic-gate 		clntp = ibt_hca->ha_clnt_devp;
16730Sstevel@tonic-gate 		if (IBTL_GENERIC_CLIENT(clntp)) {
16740Sstevel@tonic-gate 			++ibt_hca->ha_clnt_devp->clnt_async_cnt;
16750Sstevel@tonic-gate 			mutex_enter(&ibtl_async_mutex);
16760Sstevel@tonic-gate 			ibt_hca->ha_async_cnt++;
16770Sstevel@tonic-gate 			mutex_exit(&ibtl_async_mutex);
16780Sstevel@tonic-gate 			hca_devp->hd_async_task_cnt++;
16790Sstevel@tonic-gate 
16800Sstevel@tonic-gate 			(void) taskq_dispatch(ibtl_async_taskq,
16810Sstevel@tonic-gate 			    ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP);
16820Sstevel@tonic-gate 		}
16830Sstevel@tonic-gate 		ibt_hca = ibt_hca->ha_clnt_link;
16840Sstevel@tonic-gate 	}
16850Sstevel@tonic-gate 
16860Sstevel@tonic-gate 	/* wait for all clients to complete */
16870Sstevel@tonic-gate 	while (hca_devp->hd_async_task_cnt != 0) {
16880Sstevel@tonic-gate 		cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex);
16890Sstevel@tonic-gate 	}
16900Sstevel@tonic-gate 	/* Go thru the clients and check if any have not closed this HCA. */
16912259Shiremath 	retval = 0;
16920Sstevel@tonic-gate 	ibt_hca = hca_devp->hd_clnt_list;
16930Sstevel@tonic-gate 	while (ibt_hca != NULL) {
16940Sstevel@tonic-gate 		clntp = ibt_hca->ha_clnt_devp;
16950Sstevel@tonic-gate 		if (IBTL_GENERIC_CLIENT(clntp)) {
16960Sstevel@tonic-gate 			IBTF_DPRINTF_L2(ibtf_handlers,
16970Sstevel@tonic-gate 			    "ibtl_detach_all_clients: "
16980Sstevel@tonic-gate 			    "client '%s' failed to close the HCA.",
16990Sstevel@tonic-gate 			    ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name);
17000Sstevel@tonic-gate 			retval = 1;
17010Sstevel@tonic-gate 		}
17020Sstevel@tonic-gate 		ibt_hca = ibt_hca->ha_clnt_link;
17030Sstevel@tonic-gate 	}
17042259Shiremath 	if (retval == 1)
17052259Shiremath 		goto bailout;
17060Sstevel@tonic-gate 
17072259Shiremath 	/* Next inform IBDM asynchronously */
17080Sstevel@tonic-gate 	ibt_hca = hca_devp->hd_clnt_list;
17090Sstevel@tonic-gate 	while (ibt_hca != NULL) {
17100Sstevel@tonic-gate 		clntp = ibt_hca->ha_clnt_devp;
17112259Shiremath 		if (clntp->clnt_modinfop->mi_clnt_class == IBT_DM) {
17120Sstevel@tonic-gate 			++ibt_hca->ha_clnt_devp->clnt_async_cnt;
17130Sstevel@tonic-gate 			mutex_enter(&ibtl_async_mutex);
17140Sstevel@tonic-gate 			ibt_hca->ha_async_cnt++;
17150Sstevel@tonic-gate 			mutex_exit(&ibtl_async_mutex);
17160Sstevel@tonic-gate 			hca_devp->hd_async_task_cnt++;
17170Sstevel@tonic-gate 
17188082SRamaswamy.Tummala@Sun.COM 			mutex_exit(&ibtl_clnt_list_mutex);
17198082SRamaswamy.Tummala@Sun.COM 			ibtl_hca_client_async_task(ibt_hca);
17208082SRamaswamy.Tummala@Sun.COM 			mutex_enter(&ibtl_clnt_list_mutex);
17218082SRamaswamy.Tummala@Sun.COM 			break;
17220Sstevel@tonic-gate 		}
17230Sstevel@tonic-gate 		ibt_hca = ibt_hca->ha_clnt_link;
17240Sstevel@tonic-gate 	}
17250Sstevel@tonic-gate 
17262259Shiremath 	/*
17272259Shiremath 	 * Next inform IBCM.
17282259Shiremath 	 * As IBCM doesn't perform ibt_open_hca(), IBCM will not be
17292259Shiremath 	 * accessible via hca_devp->hd_clnt_list.
17302259Shiremath 	 * ibtl_cm_async_handler will NOT be NULL, if IBCM is registered.
17312259Shiremath 	 */
17322259Shiremath 	if (ibtl_cm_async_handler) {
17332259Shiremath 		ibtl_tell_mgr(hca_devp, ibtl_cm_async_handler,
17342259Shiremath 		    ibtl_cm_clnt_private);
17352259Shiremath 
17362259Shiremath 		/* wait for all tasks to complete */
17372259Shiremath 		while (hca_devp->hd_async_task_cnt != 0)
17382259Shiremath 			cv_wait(&hca_devp->hd_async_task_cv,
17392259Shiremath 			    &ibtl_clnt_list_mutex);
17402259Shiremath 	}
17412259Shiremath 
17420Sstevel@tonic-gate 	/* Go thru the clients and check if any have not closed this HCA. */
17430Sstevel@tonic-gate 	retval = 0;
17440Sstevel@tonic-gate 	ibt_hca = hca_devp->hd_clnt_list;
17450Sstevel@tonic-gate 	while (ibt_hca != NULL) {
17460Sstevel@tonic-gate 		clntp = ibt_hca->ha_clnt_devp;
17470Sstevel@tonic-gate 		if (clntp->clnt_modinfop->mi_clnt_class != IBT_IBMA) {
17480Sstevel@tonic-gate 			IBTF_DPRINTF_L2(ibtf_handlers,
17490Sstevel@tonic-gate 			    "ibtl_detach_all_clients: "
17500Sstevel@tonic-gate 			    "client '%s' failed to close the HCA.",
17510Sstevel@tonic-gate 			    ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name);
17520Sstevel@tonic-gate 			retval = 1;
17530Sstevel@tonic-gate 		}
17540Sstevel@tonic-gate 		ibt_hca = ibt_hca->ha_clnt_link;
17550Sstevel@tonic-gate 	}
17562259Shiremath 	if (retval == 1)
17572259Shiremath 		goto bailout;
17580Sstevel@tonic-gate 
17590Sstevel@tonic-gate 	/* Finally, inform IBMA */
17600Sstevel@tonic-gate 	ibt_hca = hca_devp->hd_clnt_list;
17610Sstevel@tonic-gate 	while (ibt_hca != NULL) {
17620Sstevel@tonic-gate 		clntp = ibt_hca->ha_clnt_devp;
17630Sstevel@tonic-gate 		if (clntp->clnt_modinfop->mi_clnt_class == IBT_IBMA) {
17640Sstevel@tonic-gate 			++ibt_hca->ha_clnt_devp->clnt_async_cnt;
17650Sstevel@tonic-gate 			mutex_enter(&ibtl_async_mutex);
17660Sstevel@tonic-gate 			ibt_hca->ha_async_cnt++;
17670Sstevel@tonic-gate 			mutex_exit(&ibtl_async_mutex);
17680Sstevel@tonic-gate 			hca_devp->hd_async_task_cnt++;
17690Sstevel@tonic-gate 
17700Sstevel@tonic-gate 			(void) taskq_dispatch(ibtl_async_taskq,
17710Sstevel@tonic-gate 			    ibtl_hca_client_async_task, ibt_hca, TQ_SLEEP);
17720Sstevel@tonic-gate 		} else
17730Sstevel@tonic-gate 			IBTF_DPRINTF_L2(ibtf_handlers,
17740Sstevel@tonic-gate 			    "ibtl_detach_all_clients: "
17750Sstevel@tonic-gate 			    "client '%s' is unexpectedly on the client list",
17760Sstevel@tonic-gate 			    ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name);
17770Sstevel@tonic-gate 		ibt_hca = ibt_hca->ha_clnt_link;
17780Sstevel@tonic-gate 	}
17790Sstevel@tonic-gate 
17800Sstevel@tonic-gate 	/* wait for IBMA to complete */
17810Sstevel@tonic-gate 	while (hca_devp->hd_async_task_cnt != 0) {
17820Sstevel@tonic-gate 		cv_wait(&hca_devp->hd_async_task_cv, &ibtl_clnt_list_mutex);
17830Sstevel@tonic-gate 	}
17840Sstevel@tonic-gate 
17850Sstevel@tonic-gate 	/* Check if this HCA's client list is empty. */
17860Sstevel@tonic-gate 	ibt_hca = hca_devp->hd_clnt_list;
17870Sstevel@tonic-gate 	if (ibt_hca != NULL) {
17880Sstevel@tonic-gate 		IBTF_DPRINTF_L2(ibtf_handlers,
17890Sstevel@tonic-gate 		    "ibtl_detach_all_clients: "
17900Sstevel@tonic-gate 		    "client '%s' failed to close the HCA.",
17910Sstevel@tonic-gate 		    ibt_hca->ha_clnt_devp->clnt_modinfop->mi_clnt_name);
17920Sstevel@tonic-gate 		retval = 1;
17930Sstevel@tonic-gate 	} else
17940Sstevel@tonic-gate 		retval = 0;
17950Sstevel@tonic-gate 
17960Sstevel@tonic-gate bailout:
17978082SRamaswamy.Tummala@Sun.COM 	if (retval) {
17988082SRamaswamy.Tummala@Sun.COM 		hca_devp->hd_state = IBTL_HCA_DEV_ATTACHED; /* fix hd_state */
17998082SRamaswamy.Tummala@Sun.COM 		mutex_exit(&ibtl_clnt_list_mutex);
18008082SRamaswamy.Tummala@Sun.COM 		ibtl_announce_new_hca(hca_devp);
18018082SRamaswamy.Tummala@Sun.COM 		mutex_enter(&ibtl_clnt_list_mutex);
18028082SRamaswamy.Tummala@Sun.COM 	} else {
18038082SRamaswamy.Tummala@Sun.COM 		hca_devp->hd_async_busy = 0;
18048082SRamaswamy.Tummala@Sun.COM 		cv_broadcast(&hca_devp->hd_async_busy_cv);
18058082SRamaswamy.Tummala@Sun.COM 	}
18068082SRamaswamy.Tummala@Sun.COM 
18070Sstevel@tonic-gate 	return (retval);
18080Sstevel@tonic-gate }
18090Sstevel@tonic-gate 
18100Sstevel@tonic-gate void
ibtl_free_clnt_async_check(ibtl_clnt_t * clntp)18110Sstevel@tonic-gate ibtl_free_clnt_async_check(ibtl_clnt_t *clntp)
18120Sstevel@tonic-gate {
18130Sstevel@tonic-gate 	IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_free_clnt_async_check(%p)", clntp);
18140Sstevel@tonic-gate 
18150Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ibtl_clnt_list_mutex));
18160Sstevel@tonic-gate 
18170Sstevel@tonic-gate 	/* wait for all asyncs based on "ibtl_clnt_list" to complete */
18180Sstevel@tonic-gate 	while (clntp->clnt_async_cnt != 0) {
18190Sstevel@tonic-gate 		cv_wait(&ibtl_clnt_cv, &ibtl_clnt_list_mutex);
18200Sstevel@tonic-gate 	}
18210Sstevel@tonic-gate }
18220Sstevel@tonic-gate 
18230Sstevel@tonic-gate static void
ibtl_dec_clnt_async_cnt(ibtl_clnt_t * clntp)18240Sstevel@tonic-gate ibtl_dec_clnt_async_cnt(ibtl_clnt_t *clntp)
18250Sstevel@tonic-gate {
18260Sstevel@tonic-gate 	mutex_enter(&ibtl_clnt_list_mutex);
18270Sstevel@tonic-gate 	if (--clntp->clnt_async_cnt == 0) {
18280Sstevel@tonic-gate 		cv_broadcast(&ibtl_clnt_cv);
18290Sstevel@tonic-gate 	}
18300Sstevel@tonic-gate 	mutex_exit(&ibtl_clnt_list_mutex);
18310Sstevel@tonic-gate }
18320Sstevel@tonic-gate 
18330Sstevel@tonic-gate static void
ibtl_inc_clnt_async_cnt(ibtl_clnt_t * clntp)18340Sstevel@tonic-gate ibtl_inc_clnt_async_cnt(ibtl_clnt_t *clntp)
18350Sstevel@tonic-gate {
18360Sstevel@tonic-gate 	mutex_enter(&ibtl_clnt_list_mutex);
18370Sstevel@tonic-gate 	++clntp->clnt_async_cnt;
18380Sstevel@tonic-gate 	mutex_exit(&ibtl_clnt_list_mutex);
18390Sstevel@tonic-gate }
18400Sstevel@tonic-gate 
18410Sstevel@tonic-gate 
18420Sstevel@tonic-gate /*
18430Sstevel@tonic-gate  * Functions and data structures to inform clients that a notification
18440Sstevel@tonic-gate  * has occurred about Multicast Groups that might interest them.
18450Sstevel@tonic-gate  */
18460Sstevel@tonic-gate struct ibtl_sm_notice {
18470Sstevel@tonic-gate 	ibt_clnt_hdl_t		np_ibt_hdl;
18480Sstevel@tonic-gate 	ib_gid_t		np_sgid;
18490Sstevel@tonic-gate 	ibt_subnet_event_code_t	np_code;
18500Sstevel@tonic-gate 	ibt_subnet_event_t	np_event;
18510Sstevel@tonic-gate };
18520Sstevel@tonic-gate 
18530Sstevel@tonic-gate static void
ibtl_sm_notice_task(void * arg)18540Sstevel@tonic-gate ibtl_sm_notice_task(void *arg)
18550Sstevel@tonic-gate {
18560Sstevel@tonic-gate 	struct ibtl_sm_notice *noticep = (struct ibtl_sm_notice *)arg;
18570Sstevel@tonic-gate 	ibt_clnt_hdl_t ibt_hdl = noticep->np_ibt_hdl;
18580Sstevel@tonic-gate 	ibt_sm_notice_handler_t sm_notice_handler;
18590Sstevel@tonic-gate 
18600Sstevel@tonic-gate 	sm_notice_handler = ibt_hdl->clnt_sm_trap_handler;
18610Sstevel@tonic-gate 	if (sm_notice_handler != NULL)
18620Sstevel@tonic-gate 		sm_notice_handler(ibt_hdl->clnt_sm_trap_handler_arg,
18630Sstevel@tonic-gate 		    noticep->np_sgid, noticep->np_code, &noticep->np_event);
18640Sstevel@tonic-gate 	kmem_free(noticep, sizeof (*noticep));
18650Sstevel@tonic-gate 	ibtl_dec_clnt_async_cnt(ibt_hdl);
18660Sstevel@tonic-gate }
18670Sstevel@tonic-gate 
18680Sstevel@tonic-gate /*
18690Sstevel@tonic-gate  * Inform the client that MCG notices are not working at this time.
18700Sstevel@tonic-gate  */
18710Sstevel@tonic-gate void
ibtl_cm_sm_notice_init_failure(ibtl_cm_sm_init_fail_t * ifail)18720Sstevel@tonic-gate ibtl_cm_sm_notice_init_failure(ibtl_cm_sm_init_fail_t *ifail)
18730Sstevel@tonic-gate {
18740Sstevel@tonic-gate 	ibt_clnt_hdl_t ibt_hdl = ifail->smf_ibt_hdl;
18750Sstevel@tonic-gate 	struct ibtl_sm_notice *noticep;
18760Sstevel@tonic-gate 	ib_gid_t *sgidp = &ifail->smf_sgid[0];
18770Sstevel@tonic-gate 	int i;
18780Sstevel@tonic-gate 
18790Sstevel@tonic-gate 	for (i = 0; i < ifail->smf_num_sgids; i++) {
18800Sstevel@tonic-gate 		_NOTE(NO_COMPETING_THREADS_NOW)
18810Sstevel@tonic-gate 		noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP);
18820Sstevel@tonic-gate 		noticep->np_ibt_hdl = ibt_hdl;
18830Sstevel@tonic-gate 		noticep->np_sgid = *sgidp++;
18840Sstevel@tonic-gate 		noticep->np_code = IBT_SM_EVENT_UNAVAILABLE;
18852840Scarlsonj #ifndef lint
18860Sstevel@tonic-gate 		_NOTE(COMPETING_THREADS_NOW)
18872840Scarlsonj #endif
18880Sstevel@tonic-gate 		ibtl_inc_clnt_async_cnt(ibt_hdl);
18890Sstevel@tonic-gate 		(void) taskq_dispatch(ibtl_async_taskq,
18900Sstevel@tonic-gate 		    ibtl_sm_notice_task, noticep, TQ_SLEEP);
18910Sstevel@tonic-gate 	}
18920Sstevel@tonic-gate }
18930Sstevel@tonic-gate 
18940Sstevel@tonic-gate /*
18950Sstevel@tonic-gate  * Inform all clients of the event.
18960Sstevel@tonic-gate  */
18970Sstevel@tonic-gate void
ibtl_cm_sm_notice_handler(ib_gid_t sgid,ibt_subnet_event_code_t code,ibt_subnet_event_t * event)18980Sstevel@tonic-gate ibtl_cm_sm_notice_handler(ib_gid_t sgid, ibt_subnet_event_code_t code,
18990Sstevel@tonic-gate     ibt_subnet_event_t *event)
19000Sstevel@tonic-gate {
19010Sstevel@tonic-gate 	_NOTE(NO_COMPETING_THREADS_NOW)
19020Sstevel@tonic-gate 	struct ibtl_sm_notice	*noticep;
19030Sstevel@tonic-gate 	ibtl_clnt_t		*clntp;
19040Sstevel@tonic-gate 
19050Sstevel@tonic-gate 	mutex_enter(&ibtl_clnt_list_mutex);
19060Sstevel@tonic-gate 	clntp = ibtl_clnt_list;
19070Sstevel@tonic-gate 	while (clntp != NULL) {
19080Sstevel@tonic-gate 		if (clntp->clnt_sm_trap_handler) {
19090Sstevel@tonic-gate 			noticep = kmem_zalloc(sizeof (*noticep), KM_SLEEP);
19100Sstevel@tonic-gate 			noticep->np_ibt_hdl = clntp;
19110Sstevel@tonic-gate 			noticep->np_sgid = sgid;
19120Sstevel@tonic-gate 			noticep->np_code = code;
19130Sstevel@tonic-gate 			noticep->np_event = *event;
19140Sstevel@tonic-gate 			++clntp->clnt_async_cnt;
19150Sstevel@tonic-gate 			(void) taskq_dispatch(ibtl_async_taskq,
19160Sstevel@tonic-gate 			    ibtl_sm_notice_task, noticep, TQ_SLEEP);
19170Sstevel@tonic-gate 		}
19180Sstevel@tonic-gate 		clntp = clntp->clnt_list_link;
19190Sstevel@tonic-gate 	}
19200Sstevel@tonic-gate 	mutex_exit(&ibtl_clnt_list_mutex);
19212840Scarlsonj #ifndef lint
19220Sstevel@tonic-gate 	_NOTE(COMPETING_THREADS_NOW)
19232840Scarlsonj #endif
19240Sstevel@tonic-gate }
19250Sstevel@tonic-gate 
19260Sstevel@tonic-gate /*
19270Sstevel@tonic-gate  * Record the handler for this client.
19280Sstevel@tonic-gate  */
19290Sstevel@tonic-gate void
ibtl_cm_set_sm_notice_handler(ibt_clnt_hdl_t ibt_hdl,ibt_sm_notice_handler_t sm_notice_handler,void * private)19300Sstevel@tonic-gate ibtl_cm_set_sm_notice_handler(ibt_clnt_hdl_t ibt_hdl,
19310Sstevel@tonic-gate     ibt_sm_notice_handler_t sm_notice_handler, void *private)
19320Sstevel@tonic-gate {
19330Sstevel@tonic-gate 	_NOTE(NO_COMPETING_THREADS_NOW)
19340Sstevel@tonic-gate 	ibt_hdl->clnt_sm_trap_handler = sm_notice_handler;
19350Sstevel@tonic-gate 	ibt_hdl->clnt_sm_trap_handler_arg = private;
19362840Scarlsonj #ifndef lint
19370Sstevel@tonic-gate 	_NOTE(COMPETING_THREADS_NOW)
19382840Scarlsonj #endif
19390Sstevel@tonic-gate }
19400Sstevel@tonic-gate 
19410Sstevel@tonic-gate 
19420Sstevel@tonic-gate /*
19430Sstevel@tonic-gate  * ibtl_another_cq_handler_in_thread()
19440Sstevel@tonic-gate  *
19450Sstevel@tonic-gate  * Conditionally increase the number of cq_threads.
19460Sstevel@tonic-gate  * The number of threads grows, based on the number of cqs using threads.
19470Sstevel@tonic-gate  *
19480Sstevel@tonic-gate  * The table below controls the number of threads as follows:
19490Sstevel@tonic-gate  *
19500Sstevel@tonic-gate  *	Number of CQs	Number of cq_threads
19510Sstevel@tonic-gate  *		0		0
19520Sstevel@tonic-gate  *		1		1
19530Sstevel@tonic-gate  *		2-3		2
19540Sstevel@tonic-gate  *		4-5		3
19550Sstevel@tonic-gate  *		6-9		4
19560Sstevel@tonic-gate  *		10-15		5
19570Sstevel@tonic-gate  *		16-23		6
19580Sstevel@tonic-gate  *		24-31		7
19590Sstevel@tonic-gate  *		32+		8
19600Sstevel@tonic-gate  */
19610Sstevel@tonic-gate 
19620Sstevel@tonic-gate #define	IBTL_CQ_MAXTHREADS 8
19630Sstevel@tonic-gate static uint8_t ibtl_cq_scaling[IBTL_CQ_MAXTHREADS] = {
19640Sstevel@tonic-gate 	1, 2, 4, 6, 10, 16, 24, 32
19650Sstevel@tonic-gate };
19660Sstevel@tonic-gate 
19670Sstevel@tonic-gate static kt_did_t ibtl_cq_did[IBTL_CQ_MAXTHREADS];
19680Sstevel@tonic-gate 
19690Sstevel@tonic-gate void
ibtl_another_cq_handler_in_thread(void)19700Sstevel@tonic-gate ibtl_another_cq_handler_in_thread(void)
19710Sstevel@tonic-gate {
19720Sstevel@tonic-gate 	kthread_t *t;
19730Sstevel@tonic-gate 	int my_idx;
19740Sstevel@tonic-gate 
19750Sstevel@tonic-gate 	mutex_enter(&ibtl_cq_mutex);
19760Sstevel@tonic-gate 	if ((ibtl_cq_threads == IBTL_CQ_MAXTHREADS) ||
19770Sstevel@tonic-gate 	    (++ibtl_cqs_using_threads < ibtl_cq_scaling[ibtl_cq_threads])) {
19780Sstevel@tonic-gate 		mutex_exit(&ibtl_cq_mutex);
19790Sstevel@tonic-gate 		return;
19800Sstevel@tonic-gate 	}
19810Sstevel@tonic-gate 	my_idx = ibtl_cq_threads++;
19820Sstevel@tonic-gate 	mutex_exit(&ibtl_cq_mutex);
19830Sstevel@tonic-gate 	t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0, TS_RUN,
19840Sstevel@tonic-gate 	    ibtl_pri - 1);
19850Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
19860Sstevel@tonic-gate 	ibtl_cq_did[my_idx] = t->t_did;	/* save for thread_join() */
19870Sstevel@tonic-gate 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
19880Sstevel@tonic-gate }
19890Sstevel@tonic-gate 
19900Sstevel@tonic-gate void
ibtl_thread_init(void)19910Sstevel@tonic-gate ibtl_thread_init(void)
19920Sstevel@tonic-gate {
19930Sstevel@tonic-gate 	IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init()");
19940Sstevel@tonic-gate 
19950Sstevel@tonic-gate 	mutex_init(&ibtl_async_mutex, NULL, MUTEX_DEFAULT, NULL);
19960Sstevel@tonic-gate 	cv_init(&ibtl_async_cv, NULL, CV_DEFAULT, NULL);
19970Sstevel@tonic-gate 	cv_init(&ibtl_clnt_cv, NULL, CV_DEFAULT, NULL);
19980Sstevel@tonic-gate 
19990Sstevel@tonic-gate 	mutex_init(&ibtl_cq_mutex, NULL, MUTEX_DEFAULT, NULL);
20000Sstevel@tonic-gate 	cv_init(&ibtl_cq_cv, NULL, CV_DEFAULT, NULL);
20010Sstevel@tonic-gate }
20020Sstevel@tonic-gate 
20030Sstevel@tonic-gate void
ibtl_thread_init2(void)20040Sstevel@tonic-gate ibtl_thread_init2(void)
20050Sstevel@tonic-gate {
20060Sstevel@tonic-gate 	int i;
20070Sstevel@tonic-gate 	static int initted = 0;
20080Sstevel@tonic-gate 	kthread_t *t;
20090Sstevel@tonic-gate 
20100Sstevel@tonic-gate 	mutex_enter(&ibtl_async_mutex);
20110Sstevel@tonic-gate 	if (initted == 1) {
20120Sstevel@tonic-gate 		mutex_exit(&ibtl_async_mutex);
20130Sstevel@tonic-gate 		return;
20140Sstevel@tonic-gate 	}
20150Sstevel@tonic-gate 	initted = 1;
20160Sstevel@tonic-gate 	mutex_exit(&ibtl_async_mutex);
20170Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_async_did))
20180Sstevel@tonic-gate 	ibtl_async_did = kmem_zalloc(ibtl_async_thread_init * sizeof (kt_did_t),
20190Sstevel@tonic-gate 	    KM_SLEEP);
20200Sstevel@tonic-gate 
20210Sstevel@tonic-gate 	IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_init2()");
20220Sstevel@tonic-gate 
20230Sstevel@tonic-gate 	for (i = 0; i < ibtl_async_thread_init; i++) {
20240Sstevel@tonic-gate 		t = thread_create(NULL, 0, ibtl_async_thread, NULL, 0, &p0,
20250Sstevel@tonic-gate 		    TS_RUN, ibtl_pri - 1);
20260Sstevel@tonic-gate 		ibtl_async_did[i] = t->t_did; /* thread_join() */
20270Sstevel@tonic-gate 	}
20280Sstevel@tonic-gate 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_async_did))
20290Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
20300Sstevel@tonic-gate 	for (i = 0; i < ibtl_cq_threads; i++) {
20310Sstevel@tonic-gate 		t = thread_create(NULL, 0, ibtl_cq_thread, NULL, 0, &p0,
20320Sstevel@tonic-gate 		    TS_RUN, ibtl_pri - 1);
20330Sstevel@tonic-gate 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
20340Sstevel@tonic-gate 		ibtl_cq_did[i] = t->t_did; /* save for thread_join() */
20350Sstevel@tonic-gate 		_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_did))
20360Sstevel@tonic-gate 	}
20370Sstevel@tonic-gate 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
20380Sstevel@tonic-gate }
20390Sstevel@tonic-gate 
20400Sstevel@tonic-gate void
ibtl_thread_fini(void)20410Sstevel@tonic-gate ibtl_thread_fini(void)
20420Sstevel@tonic-gate {
20430Sstevel@tonic-gate 	int i;
20440Sstevel@tonic-gate 
20450Sstevel@tonic-gate 	IBTF_DPRINTF_L3(ibtf_handlers, "ibtl_thread_fini()");
20460Sstevel@tonic-gate 
20470Sstevel@tonic-gate 	/* undo the work done by ibtl_thread_init() */
20480Sstevel@tonic-gate 
20490Sstevel@tonic-gate 	mutex_enter(&ibtl_cq_mutex);
20500Sstevel@tonic-gate 	ibtl_cq_thread_exit = IBTL_THREAD_EXIT;
20510Sstevel@tonic-gate 	cv_broadcast(&ibtl_cq_cv);
20520Sstevel@tonic-gate 	mutex_exit(&ibtl_cq_mutex);
20530Sstevel@tonic-gate 
20540Sstevel@tonic-gate 	mutex_enter(&ibtl_async_mutex);
20550Sstevel@tonic-gate 	ibtl_async_thread_exit = IBTL_THREAD_EXIT;
20560Sstevel@tonic-gate 	cv_broadcast(&ibtl_async_cv);
20570Sstevel@tonic-gate 	mutex_exit(&ibtl_async_mutex);
20580Sstevel@tonic-gate 
20590Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
20600Sstevel@tonic-gate 	for (i = 0; i < ibtl_cq_threads; i++)
20610Sstevel@tonic-gate 		thread_join(ibtl_cq_did[i]);
20620Sstevel@tonic-gate 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(ibtl_cq_threads))
20630Sstevel@tonic-gate 
20640Sstevel@tonic-gate 	if (ibtl_async_did) {
20650Sstevel@tonic-gate 		for (i = 0; i < ibtl_async_thread_init; i++)
20660Sstevel@tonic-gate 			thread_join(ibtl_async_did[i]);
20670Sstevel@tonic-gate 
20680Sstevel@tonic-gate 		kmem_free(ibtl_async_did,
20690Sstevel@tonic-gate 		    ibtl_async_thread_init * sizeof (kt_did_t));
20700Sstevel@tonic-gate 	}
20710Sstevel@tonic-gate 	mutex_destroy(&ibtl_cq_mutex);
20720Sstevel@tonic-gate 	cv_destroy(&ibtl_cq_cv);
20730Sstevel@tonic-gate 
20740Sstevel@tonic-gate 	mutex_destroy(&ibtl_async_mutex);
20750Sstevel@tonic-gate 	cv_destroy(&ibtl_async_cv);
20760Sstevel@tonic-gate 	cv_destroy(&ibtl_clnt_cv);
20770Sstevel@tonic-gate }
20789891SRajkumar.Sivaprakasam@Sun.COM 
20799891SRajkumar.Sivaprakasam@Sun.COM /* ARGSUSED */
ibtl_dummy_node_info_cb(ib_guid_t hca_guid,uint8_t port,ib_lid_t lid,ibt_node_info_t * node_info)20809891SRajkumar.Sivaprakasam@Sun.COM ibt_status_t ibtl_dummy_node_info_cb(ib_guid_t hca_guid, uint8_t port,
20819891SRajkumar.Sivaprakasam@Sun.COM     ib_lid_t lid, ibt_node_info_t *node_info)
20829891SRajkumar.Sivaprakasam@Sun.COM {
20839891SRajkumar.Sivaprakasam@Sun.COM 	return (IBT_SUCCESS);
20849891SRajkumar.Sivaprakasam@Sun.COM }
2085