xref: /onnv-gate/usr/src/uts/common/os/ddi_intr_irm.c (revision 8561:14b63022c7d9)
1*8561SScott.Carter@Sun.COM /*
2*8561SScott.Carter@Sun.COM  * CDDL HEADER START
3*8561SScott.Carter@Sun.COM  *
4*8561SScott.Carter@Sun.COM  * The contents of this file are subject to the terms of the
5*8561SScott.Carter@Sun.COM  * Common Development and Distribution License (the "License").
6*8561SScott.Carter@Sun.COM  * You may not use this file except in compliance with the License.
7*8561SScott.Carter@Sun.COM  *
8*8561SScott.Carter@Sun.COM  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*8561SScott.Carter@Sun.COM  * or http://www.opensolaris.org/os/licensing.
10*8561SScott.Carter@Sun.COM  * See the License for the specific language governing permissions
11*8561SScott.Carter@Sun.COM  * and limitations under the License.
12*8561SScott.Carter@Sun.COM  *
13*8561SScott.Carter@Sun.COM  * When distributing Covered Code, include this CDDL HEADER in each
14*8561SScott.Carter@Sun.COM  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*8561SScott.Carter@Sun.COM  * If applicable, add the following below this CDDL HEADER, with the
16*8561SScott.Carter@Sun.COM  * fields enclosed by brackets "[]" replaced with your own identifying
17*8561SScott.Carter@Sun.COM  * information: Portions Copyright [yyyy] [name of copyright owner]
18*8561SScott.Carter@Sun.COM  *
19*8561SScott.Carter@Sun.COM  * CDDL HEADER END
20*8561SScott.Carter@Sun.COM  */
21*8561SScott.Carter@Sun.COM /*
22*8561SScott.Carter@Sun.COM  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23*8561SScott.Carter@Sun.COM  * Use is subject to license terms.
24*8561SScott.Carter@Sun.COM  */
25*8561SScott.Carter@Sun.COM 
26*8561SScott.Carter@Sun.COM #include <sys/note.h>
27*8561SScott.Carter@Sun.COM #include <sys/sysmacros.h>
28*8561SScott.Carter@Sun.COM #include <sys/types.h>
29*8561SScott.Carter@Sun.COM #include <sys/param.h>
30*8561SScott.Carter@Sun.COM #include <sys/systm.h>
31*8561SScott.Carter@Sun.COM #include <sys/kmem.h>
32*8561SScott.Carter@Sun.COM #include <sys/cmn_err.h>
33*8561SScott.Carter@Sun.COM #include <sys/debug.h>
34*8561SScott.Carter@Sun.COM #include <sys/ddi.h>
35*8561SScott.Carter@Sun.COM #include <sys/sunndi.h>
36*8561SScott.Carter@Sun.COM #include <sys/ndi_impldefs.h>	/* include prototypes */
37*8561SScott.Carter@Sun.COM 
38*8561SScott.Carter@Sun.COM /*
39*8561SScott.Carter@Sun.COM  * Interrupt Resource Management (IRM).
40*8561SScott.Carter@Sun.COM  */
41*8561SScott.Carter@Sun.COM 
42*8561SScott.Carter@Sun.COM #define	DDI_IRM_BALANCE_DELAY	(60)	/* In seconds */
43*8561SScott.Carter@Sun.COM 
44*8561SScott.Carter@Sun.COM #define	DDI_IRM_HAS_CB(c)	((c) && (c->cb_flags & DDI_CB_FLAG_INTR))
45*8561SScott.Carter@Sun.COM 
46*8561SScott.Carter@Sun.COM #define	DDI_IRM_IS_REDUCIBLE(r)	(((r->ireq_flags & DDI_IRM_FLAG_CALLBACK) && \
47*8561SScott.Carter@Sun.COM 				(r->ireq_type == DDI_INTR_TYPE_MSIX)) || \
48*8561SScott.Carter@Sun.COM 				(r->ireq_flags & DDI_IRM_FLAG_NEW))
49*8561SScott.Carter@Sun.COM 
50*8561SScott.Carter@Sun.COM extern pri_t	minclsyspri;
51*8561SScott.Carter@Sun.COM 
52*8561SScott.Carter@Sun.COM /* Global policies */
53*8561SScott.Carter@Sun.COM int		irm_enable = 1;
54*8561SScott.Carter@Sun.COM boolean_t	irm_active = B_FALSE;
55*8561SScott.Carter@Sun.COM int		irm_default_policy = DDI_IRM_POLICY_LARGE;
56*8561SScott.Carter@Sun.COM uint_t		irm_balance_delay = DDI_IRM_BALANCE_DELAY;
57*8561SScott.Carter@Sun.COM 
58*8561SScott.Carter@Sun.COM /* Global list of interrupt pools */
59*8561SScott.Carter@Sun.COM kmutex_t	irm_pools_lock;
60*8561SScott.Carter@Sun.COM list_t		irm_pools_list;
61*8561SScott.Carter@Sun.COM 
62*8561SScott.Carter@Sun.COM /* Global debug tunables */
63*8561SScott.Carter@Sun.COM #ifdef	DEBUG
64*8561SScott.Carter@Sun.COM int		irm_debug_policy = 0;
65*8561SScott.Carter@Sun.COM uint_t		irm_debug_size = 0;
66*8561SScott.Carter@Sun.COM #endif	/* DEBUG */
67*8561SScott.Carter@Sun.COM 
68*8561SScott.Carter@Sun.COM static void	irm_balance_thread(ddi_irm_pool_t *);
69*8561SScott.Carter@Sun.COM static void	i_ddi_irm_balance(ddi_irm_pool_t *);
70*8561SScott.Carter@Sun.COM static void	i_ddi_irm_enqueue(ddi_irm_pool_t *, boolean_t);
71*8561SScott.Carter@Sun.COM static void	i_ddi_irm_reduce(ddi_irm_pool_t *pool);
72*8561SScott.Carter@Sun.COM static int	i_ddi_irm_reduce_large(ddi_irm_pool_t *, int);
73*8561SScott.Carter@Sun.COM static void	i_ddi_irm_reduce_large_resort(ddi_irm_pool_t *);
74*8561SScott.Carter@Sun.COM static int	i_ddi_irm_reduce_even(ddi_irm_pool_t *, int);
75*8561SScott.Carter@Sun.COM static void	i_ddi_irm_reduce_new(ddi_irm_pool_t *, int);
76*8561SScott.Carter@Sun.COM static void	i_ddi_irm_insertion_sort(list_t *, ddi_irm_req_t *);
77*8561SScott.Carter@Sun.COM static int	i_ddi_irm_notify(ddi_irm_pool_t *, ddi_irm_req_t *);
78*8561SScott.Carter@Sun.COM 
79*8561SScott.Carter@Sun.COM /*
80*8561SScott.Carter@Sun.COM  * OS Initialization Routines
81*8561SScott.Carter@Sun.COM  */
82*8561SScott.Carter@Sun.COM 
83*8561SScott.Carter@Sun.COM /*
84*8561SScott.Carter@Sun.COM  * irm_init()
85*8561SScott.Carter@Sun.COM  *
86*8561SScott.Carter@Sun.COM  *	Initialize IRM subsystem before any drivers are attached.
87*8561SScott.Carter@Sun.COM  */
88*8561SScott.Carter@Sun.COM void
89*8561SScott.Carter@Sun.COM irm_init(void)
90*8561SScott.Carter@Sun.COM {
91*8561SScott.Carter@Sun.COM 	/* Do nothing if IRM is disabled */
92*8561SScott.Carter@Sun.COM 	if (!irm_enable)
93*8561SScott.Carter@Sun.COM 		return;
94*8561SScott.Carter@Sun.COM 
95*8561SScott.Carter@Sun.COM 	/* Verify that the default balancing policy is valid */
96*8561SScott.Carter@Sun.COM 	if (!DDI_IRM_POLICY_VALID(irm_default_policy))
97*8561SScott.Carter@Sun.COM 		irm_default_policy = DDI_IRM_POLICY_LARGE;
98*8561SScott.Carter@Sun.COM 
99*8561SScott.Carter@Sun.COM 	/* Initialize the global list of interrupt pools */
100*8561SScott.Carter@Sun.COM 	mutex_init(&irm_pools_lock, NULL, MUTEX_DRIVER, NULL);
101*8561SScott.Carter@Sun.COM 	list_create(&irm_pools_list, sizeof (ddi_irm_pool_t),
102*8561SScott.Carter@Sun.COM 	    offsetof(ddi_irm_pool_t, ipool_link));
103*8561SScott.Carter@Sun.COM }
104*8561SScott.Carter@Sun.COM 
105*8561SScott.Carter@Sun.COM /*
106*8561SScott.Carter@Sun.COM  * i_ddi_irm_poststartup()
107*8561SScott.Carter@Sun.COM  *
108*8561SScott.Carter@Sun.COM  *	IRM is not activated until after the IO subsystem is initialized.
109*8561SScott.Carter@Sun.COM  *	When activated, per-pool balancing threads are spawned and a flag
110*8561SScott.Carter@Sun.COM  *	is set so that all future pools will be activated when created.
111*8561SScott.Carter@Sun.COM  *
112*8561SScott.Carter@Sun.COM  *	NOTE: the global variable 'irm_enable' disables IRM if zero.
113*8561SScott.Carter@Sun.COM  */
114*8561SScott.Carter@Sun.COM void
115*8561SScott.Carter@Sun.COM i_ddi_irm_poststartup(void)
116*8561SScott.Carter@Sun.COM {
117*8561SScott.Carter@Sun.COM 	ddi_irm_pool_t	*pool_p;
118*8561SScott.Carter@Sun.COM 
119*8561SScott.Carter@Sun.COM 	/* Do nothing if IRM is disabled */
120*8561SScott.Carter@Sun.COM 	if (!irm_enable)
121*8561SScott.Carter@Sun.COM 		return;
122*8561SScott.Carter@Sun.COM 
123*8561SScott.Carter@Sun.COM 	/* Lock the global list */
124*8561SScott.Carter@Sun.COM 	mutex_enter(&irm_pools_lock);
125*8561SScott.Carter@Sun.COM 
126*8561SScott.Carter@Sun.COM 	/* Activate all defined pools */
127*8561SScott.Carter@Sun.COM 	for (pool_p = list_head(&irm_pools_list); pool_p;
128*8561SScott.Carter@Sun.COM 	    pool_p = list_next(&irm_pools_list, pool_p))
129*8561SScott.Carter@Sun.COM 		pool_p->ipool_thread = thread_create(NULL, 0,
130*8561SScott.Carter@Sun.COM 		    irm_balance_thread, pool_p, 0, &p0, TS_RUN, minclsyspri);
131*8561SScott.Carter@Sun.COM 
132*8561SScott.Carter@Sun.COM 	/* Set future pools to be active */
133*8561SScott.Carter@Sun.COM 	irm_active = B_TRUE;
134*8561SScott.Carter@Sun.COM 
135*8561SScott.Carter@Sun.COM 	/* Unlock the global list */
136*8561SScott.Carter@Sun.COM 	mutex_exit(&irm_pools_lock);
137*8561SScott.Carter@Sun.COM }
138*8561SScott.Carter@Sun.COM 
139*8561SScott.Carter@Sun.COM /*
140*8561SScott.Carter@Sun.COM  * NDI interfaces for creating/destroying IRM pools.
141*8561SScott.Carter@Sun.COM  */
142*8561SScott.Carter@Sun.COM 
143*8561SScott.Carter@Sun.COM /*
144*8561SScott.Carter@Sun.COM  * ndi_irm_create()
145*8561SScott.Carter@Sun.COM  *
146*8561SScott.Carter@Sun.COM  *	Nexus interface to create an IRM pool.  Create the new
147*8561SScott.Carter@Sun.COM  *	pool and add it to the global list of interrupt pools.
148*8561SScott.Carter@Sun.COM  */
149*8561SScott.Carter@Sun.COM int
150*8561SScott.Carter@Sun.COM ndi_irm_create(dev_info_t *dip, ddi_irm_params_t *paramsp,
151*8561SScott.Carter@Sun.COM     ddi_irm_pool_t **pool_retp)
152*8561SScott.Carter@Sun.COM {
153*8561SScott.Carter@Sun.COM 	ddi_irm_pool_t	*pool_p;
154*8561SScott.Carter@Sun.COM 
155*8561SScott.Carter@Sun.COM 	ASSERT(dip != NULL);
156*8561SScott.Carter@Sun.COM 	ASSERT(paramsp != NULL);
157*8561SScott.Carter@Sun.COM 	ASSERT(pool_retp != NULL);
158*8561SScott.Carter@Sun.COM 	ASSERT(paramsp->iparams_total >= 1);
159*8561SScott.Carter@Sun.COM 	ASSERT(paramsp->iparams_types != 0);
160*8561SScott.Carter@Sun.COM 	ASSERT(paramsp->iparams_default >= 1);
161*8561SScott.Carter@Sun.COM 
162*8561SScott.Carter@Sun.COM 	DDI_INTR_IRMDBG((CE_CONT, "ndi_irm_create: dip %p\n", (void *)dip));
163*8561SScott.Carter@Sun.COM 
164*8561SScott.Carter@Sun.COM 	/* Check if IRM is enabled */
165*8561SScott.Carter@Sun.COM 	if (!irm_enable)
166*8561SScott.Carter@Sun.COM 		return (NDI_FAILURE);
167*8561SScott.Carter@Sun.COM 
168*8561SScott.Carter@Sun.COM 	/* Validate parameters */
169*8561SScott.Carter@Sun.COM 	if ((dip == NULL) || (paramsp == NULL) || (pool_retp == NULL) ||
170*8561SScott.Carter@Sun.COM 	    (paramsp->iparams_total < 1) || (paramsp->iparams_types == 0) ||
171*8561SScott.Carter@Sun.COM 	    (paramsp->iparams_default < 1))
172*8561SScott.Carter@Sun.COM 		return (NDI_FAILURE);
173*8561SScott.Carter@Sun.COM 
174*8561SScott.Carter@Sun.COM 	/* Allocate and initialize the pool */
175*8561SScott.Carter@Sun.COM 	pool_p = kmem_zalloc(sizeof (ddi_irm_pool_t), KM_SLEEP);
176*8561SScott.Carter@Sun.COM 	pool_p->ipool_owner = dip;
177*8561SScott.Carter@Sun.COM 	pool_p->ipool_policy = irm_default_policy;
178*8561SScott.Carter@Sun.COM 	pool_p->ipool_types = paramsp->iparams_types;
179*8561SScott.Carter@Sun.COM 	pool_p->ipool_totsz = paramsp->iparams_total;
180*8561SScott.Carter@Sun.COM 	pool_p->ipool_defsz = paramsp->iparams_default;
181*8561SScott.Carter@Sun.COM 	list_create(&pool_p->ipool_req_list, sizeof (ddi_irm_req_t),
182*8561SScott.Carter@Sun.COM 	    offsetof(ddi_irm_req_t, ireq_link));
183*8561SScott.Carter@Sun.COM 	list_create(&pool_p->ipool_scratch_list, sizeof (ddi_irm_req_t),
184*8561SScott.Carter@Sun.COM 	    offsetof(ddi_irm_req_t, ireq_scratch_link));
185*8561SScott.Carter@Sun.COM 	cv_init(&pool_p->ipool_cv, NULL, CV_DRIVER, NULL);
186*8561SScott.Carter@Sun.COM 	mutex_init(&pool_p->ipool_lock, NULL, MUTEX_DRIVER, NULL);
187*8561SScott.Carter@Sun.COM 	mutex_init(&pool_p->ipool_navail_lock, NULL, MUTEX_DRIVER, NULL);
188*8561SScott.Carter@Sun.COM 
189*8561SScott.Carter@Sun.COM 	/* Add to global list of pools */
190*8561SScott.Carter@Sun.COM 	mutex_enter(&irm_pools_lock);
191*8561SScott.Carter@Sun.COM 	list_insert_tail(&irm_pools_list, pool_p);
192*8561SScott.Carter@Sun.COM 	mutex_exit(&irm_pools_lock);
193*8561SScott.Carter@Sun.COM 
194*8561SScott.Carter@Sun.COM 	/* If IRM is active, then activate the pool */
195*8561SScott.Carter@Sun.COM 	if (irm_active)
196*8561SScott.Carter@Sun.COM 		pool_p->ipool_thread = thread_create(NULL, 0,
197*8561SScott.Carter@Sun.COM 		    irm_balance_thread, pool_p, 0, &p0, TS_RUN, minclsyspri);
198*8561SScott.Carter@Sun.COM 
199*8561SScott.Carter@Sun.COM 	*pool_retp = pool_p;
200*8561SScott.Carter@Sun.COM 	return (NDI_SUCCESS);
201*8561SScott.Carter@Sun.COM }
202*8561SScott.Carter@Sun.COM 
203*8561SScott.Carter@Sun.COM /*
204*8561SScott.Carter@Sun.COM  * ndi_irm_destroy()
205*8561SScott.Carter@Sun.COM  *
206*8561SScott.Carter@Sun.COM  *	Nexus interface to destroy an IRM pool.  Destroy the pool
207*8561SScott.Carter@Sun.COM  *	and remove it from the global list of interrupt pools.
208*8561SScott.Carter@Sun.COM  */
209*8561SScott.Carter@Sun.COM int
210*8561SScott.Carter@Sun.COM ndi_irm_destroy(ddi_irm_pool_t *pool_p)
211*8561SScott.Carter@Sun.COM {
212*8561SScott.Carter@Sun.COM 	ASSERT(pool_p != NULL);
213*8561SScott.Carter@Sun.COM 	ASSERT(pool_p->ipool_resno == 0);
214*8561SScott.Carter@Sun.COM 
215*8561SScott.Carter@Sun.COM 	DDI_INTR_IRMDBG((CE_CONT, "ndi_irm_destroy: pool_p %p\n",
216*8561SScott.Carter@Sun.COM 	    (void *)pool_p));
217*8561SScott.Carter@Sun.COM 
218*8561SScott.Carter@Sun.COM 	/* Validate parameters */
219*8561SScott.Carter@Sun.COM 	if (pool_p == NULL)
220*8561SScott.Carter@Sun.COM 		return (NDI_FAILURE);
221*8561SScott.Carter@Sun.COM 
222*8561SScott.Carter@Sun.COM 	/* Validate that pool is empty */
223*8561SScott.Carter@Sun.COM 	if (pool_p->ipool_resno != 0)
224*8561SScott.Carter@Sun.COM 		return (NDI_BUSY);
225*8561SScott.Carter@Sun.COM 
226*8561SScott.Carter@Sun.COM 	/* Remove the pool from the global list */
227*8561SScott.Carter@Sun.COM 	mutex_enter(&irm_pools_lock);
228*8561SScott.Carter@Sun.COM 	list_remove(&irm_pools_list, pool_p);
229*8561SScott.Carter@Sun.COM 	mutex_exit(&irm_pools_lock);
230*8561SScott.Carter@Sun.COM 
231*8561SScott.Carter@Sun.COM 	/* Terminate the balancing thread */
232*8561SScott.Carter@Sun.COM 	mutex_enter(&pool_p->ipool_lock);
233*8561SScott.Carter@Sun.COM 	if (pool_p->ipool_thread &&
234*8561SScott.Carter@Sun.COM 	    (pool_p->ipool_flags & DDI_IRM_FLAG_ACTIVE)) {
235*8561SScott.Carter@Sun.COM 		pool_p->ipool_flags |= DDI_IRM_FLAG_EXIT;
236*8561SScott.Carter@Sun.COM 		cv_signal(&pool_p->ipool_cv);
237*8561SScott.Carter@Sun.COM 		thread_join(pool_p->ipool_thread->t_did);
238*8561SScott.Carter@Sun.COM 	}
239*8561SScott.Carter@Sun.COM 	mutex_exit(&pool_p->ipool_lock);
240*8561SScott.Carter@Sun.COM 
241*8561SScott.Carter@Sun.COM 	/* Destroy the pool */
242*8561SScott.Carter@Sun.COM 	cv_destroy(&pool_p->ipool_cv);
243*8561SScott.Carter@Sun.COM 	mutex_destroy(&pool_p->ipool_lock);
244*8561SScott.Carter@Sun.COM 	mutex_destroy(&pool_p->ipool_navail_lock);
245*8561SScott.Carter@Sun.COM 	list_destroy(&pool_p->ipool_req_list);
246*8561SScott.Carter@Sun.COM 	list_destroy(&pool_p->ipool_scratch_list);
247*8561SScott.Carter@Sun.COM 	kmem_free(pool_p, sizeof (ddi_irm_pool_t));
248*8561SScott.Carter@Sun.COM 
249*8561SScott.Carter@Sun.COM 	return (NDI_SUCCESS);
250*8561SScott.Carter@Sun.COM }
251*8561SScott.Carter@Sun.COM 
252*8561SScott.Carter@Sun.COM /*
253*8561SScott.Carter@Sun.COM  * Insert/Modify/Remove Interrupt Requests
254*8561SScott.Carter@Sun.COM  */
255*8561SScott.Carter@Sun.COM 
256*8561SScott.Carter@Sun.COM /*
257*8561SScott.Carter@Sun.COM  * i_ddi_irm_insert()
258*8561SScott.Carter@Sun.COM  *
259*8561SScott.Carter@Sun.COM  *	Insert a new request into an interrupt pool, and balance the pool.
260*8561SScott.Carter@Sun.COM  */
261*8561SScott.Carter@Sun.COM int
262*8561SScott.Carter@Sun.COM i_ddi_irm_insert(dev_info_t *dip, int type, int count)
263*8561SScott.Carter@Sun.COM {
264*8561SScott.Carter@Sun.COM 	ddi_cb_t	*cb_p;
265*8561SScott.Carter@Sun.COM 	ddi_irm_req_t	*req_p;
266*8561SScott.Carter@Sun.COM 	devinfo_intr_t	*intr_p;
267*8561SScott.Carter@Sun.COM 	ddi_irm_pool_t	*pool_p;
268*8561SScott.Carter@Sun.COM 	uint_t		nreq, nmin, npartial;
269*8561SScott.Carter@Sun.COM 	boolean_t	irm_flag = B_FALSE;
270*8561SScott.Carter@Sun.COM 
271*8561SScott.Carter@Sun.COM 	ASSERT(dip != NULL);
272*8561SScott.Carter@Sun.COM 	ASSERT(DDI_INTR_TYPE_FLAG_VALID(type));
273*8561SScott.Carter@Sun.COM 	ASSERT(count > 0);
274*8561SScott.Carter@Sun.COM 
275*8561SScott.Carter@Sun.COM 	DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_insert: dip %p type %d count %d\n",
276*8561SScott.Carter@Sun.COM 	    (void *)dip, type, count));
277*8561SScott.Carter@Sun.COM 
278*8561SScott.Carter@Sun.COM 	/* Validate parameters */
279*8561SScott.Carter@Sun.COM 	if ((dip == NULL) || (count < 1) || !DDI_INTR_TYPE_FLAG_VALID(type)) {
280*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_insert: invalid args\n"));
281*8561SScott.Carter@Sun.COM 		return (DDI_EINVAL);
282*8561SScott.Carter@Sun.COM 	}
283*8561SScott.Carter@Sun.COM 
284*8561SScott.Carter@Sun.COM 	/* Check for an existing request */
285*8561SScott.Carter@Sun.COM 	if (((intr_p = DEVI(dip)->devi_intr_p) != NULL) &&
286*8561SScott.Carter@Sun.COM 	    (intr_p->devi_irm_req_p != NULL))
287*8561SScott.Carter@Sun.COM 		return (DDI_SUCCESS);
288*8561SScott.Carter@Sun.COM 
289*8561SScott.Carter@Sun.COM 	/* Check for IRM support from the system */
290*8561SScott.Carter@Sun.COM 	if ((pool_p = i_ddi_intr_get_pool(dip, type)) == NULL) {
291*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_insert: not supported\n"));
292*8561SScott.Carter@Sun.COM 		return (DDI_ENOTSUP);
293*8561SScott.Carter@Sun.COM 	}
294*8561SScott.Carter@Sun.COM 
295*8561SScott.Carter@Sun.COM 	/* Check for IRM support from the driver */
296*8561SScott.Carter@Sun.COM 	if (((cb_p = DEVI(dip)->devi_cb_p) != NULL) && DDI_IRM_HAS_CB(cb_p) &&
297*8561SScott.Carter@Sun.COM 	    (type == DDI_INTR_TYPE_MSIX))
298*8561SScott.Carter@Sun.COM 		irm_flag = B_TRUE;
299*8561SScott.Carter@Sun.COM 
300*8561SScott.Carter@Sun.COM 	/* Determine request size */
301*8561SScott.Carter@Sun.COM 	nreq = (irm_flag) ? count : i_ddi_intr_get_current_navail(dip, type);
302*8561SScott.Carter@Sun.COM 	nmin = (irm_flag) ? 1 : nreq;
303*8561SScott.Carter@Sun.COM 	npartial = MIN(nreq, pool_p->ipool_defsz);
304*8561SScott.Carter@Sun.COM 
305*8561SScott.Carter@Sun.COM 	/* Allocate and initialize the request */
306*8561SScott.Carter@Sun.COM 	req_p = kmem_zalloc(sizeof (ddi_irm_req_t), KM_SLEEP);
307*8561SScott.Carter@Sun.COM 	req_p->ireq_type = type;
308*8561SScott.Carter@Sun.COM 	req_p->ireq_dip = dip;
309*8561SScott.Carter@Sun.COM 	req_p->ireq_pool_p = pool_p;
310*8561SScott.Carter@Sun.COM 	req_p->ireq_nreq = nreq;
311*8561SScott.Carter@Sun.COM 	req_p->ireq_flags = DDI_IRM_FLAG_NEW;
312*8561SScott.Carter@Sun.COM 	if (DDI_IRM_HAS_CB(cb_p))
313*8561SScott.Carter@Sun.COM 		req_p->ireq_flags |= DDI_IRM_FLAG_CALLBACK;
314*8561SScott.Carter@Sun.COM 
315*8561SScott.Carter@Sun.COM 	/* Lock the pool */
316*8561SScott.Carter@Sun.COM 	mutex_enter(&pool_p->ipool_lock);
317*8561SScott.Carter@Sun.COM 
318*8561SScott.Carter@Sun.COM 	/* Check for minimal fit before inserting */
319*8561SScott.Carter@Sun.COM 	if ((pool_p->ipool_minno + nmin) > pool_p->ipool_totsz) {
320*8561SScott.Carter@Sun.COM 		cmn_err(CE_WARN, "%s%d: interrupt pool too full.\n",
321*8561SScott.Carter@Sun.COM 		    ddi_driver_name(dip), ddi_get_instance(dip));
322*8561SScott.Carter@Sun.COM 		mutex_exit(&pool_p->ipool_lock);
323*8561SScott.Carter@Sun.COM 		kmem_free(req_p, sizeof (ddi_irm_req_t));
324*8561SScott.Carter@Sun.COM 		return (DDI_EAGAIN);
325*8561SScott.Carter@Sun.COM 	}
326*8561SScott.Carter@Sun.COM 
327*8561SScott.Carter@Sun.COM 	/* Insert the request into the pool */
328*8561SScott.Carter@Sun.COM 	pool_p->ipool_reqno += nreq;
329*8561SScott.Carter@Sun.COM 	pool_p->ipool_minno += nmin;
330*8561SScott.Carter@Sun.COM 	i_ddi_irm_insertion_sort(&pool_p->ipool_req_list, req_p);
331*8561SScott.Carter@Sun.COM 
332*8561SScott.Carter@Sun.COM 	/*
333*8561SScott.Carter@Sun.COM 	 * Try to fulfill the request.
334*8561SScott.Carter@Sun.COM 	 *
335*8561SScott.Carter@Sun.COM 	 * If all the interrupts are available, and either the request
336*8561SScott.Carter@Sun.COM 	 * is static or the pool is active, then just take them directly.
337*8561SScott.Carter@Sun.COM 	 *
338*8561SScott.Carter@Sun.COM 	 * If only some of the interrupts are available, and the request
339*8561SScott.Carter@Sun.COM 	 * can receive future callbacks, then take some now but queue the
340*8561SScott.Carter@Sun.COM 	 * pool to be rebalanced later.
341*8561SScott.Carter@Sun.COM 	 *
342*8561SScott.Carter@Sun.COM 	 * Otherwise, immediately rebalance the pool and wait.
343*8561SScott.Carter@Sun.COM 	 */
344*8561SScott.Carter@Sun.COM 	if ((!irm_flag || (pool_p->ipool_flags & DDI_IRM_FLAG_ACTIVE)) &&
345*8561SScott.Carter@Sun.COM 	    ((pool_p->ipool_resno + nreq) <= pool_p->ipool_totsz)) {
346*8561SScott.Carter@Sun.COM 
347*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_insert: "
348*8561SScott.Carter@Sun.COM 		    "request completely fulfilled.\n"));
349*8561SScott.Carter@Sun.COM 		pool_p->ipool_resno += nreq;
350*8561SScott.Carter@Sun.COM 		req_p->ireq_navail = nreq;
351*8561SScott.Carter@Sun.COM 		req_p->ireq_flags &= ~(DDI_IRM_FLAG_NEW);
352*8561SScott.Carter@Sun.COM 
353*8561SScott.Carter@Sun.COM 	} else if (irm_flag &&
354*8561SScott.Carter@Sun.COM 	    ((pool_p->ipool_resno + npartial) <= pool_p->ipool_totsz)) {
355*8561SScott.Carter@Sun.COM 
356*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_insert: "
357*8561SScott.Carter@Sun.COM 		    "request partially fulfilled.\n"));
358*8561SScott.Carter@Sun.COM 		pool_p->ipool_resno += npartial;
359*8561SScott.Carter@Sun.COM 		req_p->ireq_navail = npartial;
360*8561SScott.Carter@Sun.COM 		req_p->ireq_flags &= ~(DDI_IRM_FLAG_NEW);
361*8561SScott.Carter@Sun.COM 		i_ddi_irm_enqueue(pool_p, B_FALSE);
362*8561SScott.Carter@Sun.COM 
363*8561SScott.Carter@Sun.COM 	} else {
364*8561SScott.Carter@Sun.COM 
365*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_insert: "
366*8561SScott.Carter@Sun.COM 		    "request needs immediate rebalance.\n"));
367*8561SScott.Carter@Sun.COM 		i_ddi_irm_enqueue(pool_p, B_TRUE);
368*8561SScott.Carter@Sun.COM 		req_p->ireq_flags &= ~(DDI_IRM_FLAG_NEW);
369*8561SScott.Carter@Sun.COM 	}
370*8561SScott.Carter@Sun.COM 
371*8561SScott.Carter@Sun.COM 	/* Fail if the request cannot be fulfilled at all */
372*8561SScott.Carter@Sun.COM 	if (req_p->ireq_navail == 0) {
373*8561SScott.Carter@Sun.COM 		cmn_err(CE_WARN, "%s%d: interrupt pool too full.\n",
374*8561SScott.Carter@Sun.COM 		    ddi_driver_name(dip), ddi_get_instance(dip));
375*8561SScott.Carter@Sun.COM 		mutex_exit(&pool_p->ipool_lock);
376*8561SScott.Carter@Sun.COM 		pool_p->ipool_reqno -= nreq;
377*8561SScott.Carter@Sun.COM 		pool_p->ipool_minno -= nmin;
378*8561SScott.Carter@Sun.COM 		list_remove(&pool_p->ipool_req_list, req_p);
379*8561SScott.Carter@Sun.COM 		kmem_free(req_p, sizeof (ddi_irm_req_t));
380*8561SScott.Carter@Sun.COM 		return (DDI_EAGAIN);
381*8561SScott.Carter@Sun.COM 	}
382*8561SScott.Carter@Sun.COM 
383*8561SScott.Carter@Sun.COM 	/* Unlock the pool */
384*8561SScott.Carter@Sun.COM 	mutex_exit(&pool_p->ipool_lock);
385*8561SScott.Carter@Sun.COM 
386*8561SScott.Carter@Sun.COM 	intr_p->devi_irm_req_p = req_p;
387*8561SScott.Carter@Sun.COM 	return (DDI_SUCCESS);
388*8561SScott.Carter@Sun.COM }
389*8561SScott.Carter@Sun.COM 
390*8561SScott.Carter@Sun.COM /*
391*8561SScott.Carter@Sun.COM  * i_ddi_irm_modify()
392*8561SScott.Carter@Sun.COM  *
393*8561SScott.Carter@Sun.COM  *	Modify an existing request in an interrupt pool, and balance the pool.
394*8561SScott.Carter@Sun.COM  */
395*8561SScott.Carter@Sun.COM int
396*8561SScott.Carter@Sun.COM i_ddi_irm_modify(dev_info_t *dip, int nreq)
397*8561SScott.Carter@Sun.COM {
398*8561SScott.Carter@Sun.COM 	devinfo_intr_t	*intr_p;
399*8561SScott.Carter@Sun.COM 	ddi_irm_req_t	*req_p;
400*8561SScott.Carter@Sun.COM 	ddi_irm_pool_t	*pool_p;
401*8561SScott.Carter@Sun.COM 
402*8561SScott.Carter@Sun.COM 	ASSERT(dip != NULL);
403*8561SScott.Carter@Sun.COM 
404*8561SScott.Carter@Sun.COM 	DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_modify: dip %p nreq %d\n",
405*8561SScott.Carter@Sun.COM 	    (void *)dip, nreq));
406*8561SScott.Carter@Sun.COM 
407*8561SScott.Carter@Sun.COM 	/* Validate parameters */
408*8561SScott.Carter@Sun.COM 	if ((dip == NULL) || (nreq < 1)) {
409*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_modify: invalid args\n"));
410*8561SScott.Carter@Sun.COM 		return (DDI_EINVAL);
411*8561SScott.Carter@Sun.COM 	}
412*8561SScott.Carter@Sun.COM 
413*8561SScott.Carter@Sun.COM 	/* Check that the operation is supported */
414*8561SScott.Carter@Sun.COM 	if (!(intr_p = DEVI(dip)->devi_intr_p) ||
415*8561SScott.Carter@Sun.COM 	    !(req_p = intr_p->devi_irm_req_p) ||
416*8561SScott.Carter@Sun.COM 	    !DDI_IRM_IS_REDUCIBLE(req_p)) {
417*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_modify: not supported\n"));
418*8561SScott.Carter@Sun.COM 		return (DDI_ENOTSUP);
419*8561SScott.Carter@Sun.COM 	}
420*8561SScott.Carter@Sun.COM 
421*8561SScott.Carter@Sun.COM 	/* Validate request size is not too large */
422*8561SScott.Carter@Sun.COM 	if (nreq > intr_p->devi_intr_sup_nintrs) {
423*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_modify: invalid args\n"));
424*8561SScott.Carter@Sun.COM 		return (DDI_EINVAL);
425*8561SScott.Carter@Sun.COM 	}
426*8561SScott.Carter@Sun.COM 
427*8561SScott.Carter@Sun.COM 	/*
428*8561SScott.Carter@Sun.COM 	 * Modify request, but only if new size is different.
429*8561SScott.Carter@Sun.COM 	 */
430*8561SScott.Carter@Sun.COM 	if (nreq != req_p->ireq_nreq) {
431*8561SScott.Carter@Sun.COM 
432*8561SScott.Carter@Sun.COM 		/* Lock the pool */
433*8561SScott.Carter@Sun.COM 		pool_p = req_p->ireq_pool_p;
434*8561SScott.Carter@Sun.COM 		mutex_enter(&pool_p->ipool_lock);
435*8561SScott.Carter@Sun.COM 
436*8561SScott.Carter@Sun.COM 		/* Update pool and request */
437*8561SScott.Carter@Sun.COM 		pool_p->ipool_reqno -= req_p->ireq_nreq;
438*8561SScott.Carter@Sun.COM 		pool_p->ipool_reqno += nreq;
439*8561SScott.Carter@Sun.COM 		req_p->ireq_nreq = nreq;
440*8561SScott.Carter@Sun.COM 
441*8561SScott.Carter@Sun.COM 		/* Re-sort request in the pool */
442*8561SScott.Carter@Sun.COM 		list_remove(&pool_p->ipool_req_list, req_p);
443*8561SScott.Carter@Sun.COM 		i_ddi_irm_insertion_sort(&pool_p->ipool_req_list, req_p);
444*8561SScott.Carter@Sun.COM 
445*8561SScott.Carter@Sun.COM 		/* Queue pool to be rebalanced */
446*8561SScott.Carter@Sun.COM 		i_ddi_irm_enqueue(pool_p, B_FALSE);
447*8561SScott.Carter@Sun.COM 
448*8561SScott.Carter@Sun.COM 		/* Unlock the pool */
449*8561SScott.Carter@Sun.COM 		mutex_exit(&pool_p->ipool_lock);
450*8561SScott.Carter@Sun.COM 	}
451*8561SScott.Carter@Sun.COM 
452*8561SScott.Carter@Sun.COM 	return (DDI_SUCCESS);
453*8561SScott.Carter@Sun.COM }
454*8561SScott.Carter@Sun.COM 
455*8561SScott.Carter@Sun.COM /*
456*8561SScott.Carter@Sun.COM  * i_ddi_irm_remove()
457*8561SScott.Carter@Sun.COM  *
458*8561SScott.Carter@Sun.COM  *	Remove a request from an interrupt pool, and balance the pool.
459*8561SScott.Carter@Sun.COM  */
460*8561SScott.Carter@Sun.COM int
461*8561SScott.Carter@Sun.COM i_ddi_irm_remove(dev_info_t *dip)
462*8561SScott.Carter@Sun.COM {
463*8561SScott.Carter@Sun.COM 	devinfo_intr_t	*intr_p;
464*8561SScott.Carter@Sun.COM 	ddi_irm_pool_t	*pool_p;
465*8561SScott.Carter@Sun.COM 	ddi_irm_req_t	*req_p;
466*8561SScott.Carter@Sun.COM 	uint_t		nmin;
467*8561SScott.Carter@Sun.COM 
468*8561SScott.Carter@Sun.COM 	ASSERT(dip != NULL);
469*8561SScott.Carter@Sun.COM 
470*8561SScott.Carter@Sun.COM 	DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_remove: dip %p\n", (void *)dip));
471*8561SScott.Carter@Sun.COM 
472*8561SScott.Carter@Sun.COM 	/* Validate parameters */
473*8561SScott.Carter@Sun.COM 	if (dip == NULL) {
474*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_remove: invalid args\n"));
475*8561SScott.Carter@Sun.COM 		return (DDI_EINVAL);
476*8561SScott.Carter@Sun.COM 	}
477*8561SScott.Carter@Sun.COM 
478*8561SScott.Carter@Sun.COM 	/* Check if the device has a request */
479*8561SScott.Carter@Sun.COM 	if (!(intr_p = DEVI(dip)->devi_intr_p) ||
480*8561SScott.Carter@Sun.COM 	    !(req_p = intr_p->devi_irm_req_p)) {
481*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_modify: not found\n"));
482*8561SScott.Carter@Sun.COM 		return (DDI_EINVAL);
483*8561SScott.Carter@Sun.COM 	}
484*8561SScott.Carter@Sun.COM 
485*8561SScott.Carter@Sun.COM 	/* Lock the pool */
486*8561SScott.Carter@Sun.COM 	pool_p = req_p->ireq_pool_p;
487*8561SScott.Carter@Sun.COM 	mutex_enter(&pool_p->ipool_lock);
488*8561SScott.Carter@Sun.COM 
489*8561SScott.Carter@Sun.COM 	/* Remove request */
490*8561SScott.Carter@Sun.COM 	nmin = DDI_IRM_IS_REDUCIBLE(req_p) ? 1 : req_p->ireq_nreq;
491*8561SScott.Carter@Sun.COM 	pool_p->ipool_minno -= nmin;
492*8561SScott.Carter@Sun.COM 	pool_p->ipool_reqno -= req_p->ireq_nreq;
493*8561SScott.Carter@Sun.COM 	pool_p->ipool_resno -= req_p->ireq_navail;
494*8561SScott.Carter@Sun.COM 	list_remove(&pool_p->ipool_req_list, req_p);
495*8561SScott.Carter@Sun.COM 
496*8561SScott.Carter@Sun.COM 	/* Queue pool to be rebalanced */
497*8561SScott.Carter@Sun.COM 	i_ddi_irm_enqueue(pool_p, B_FALSE);
498*8561SScott.Carter@Sun.COM 
499*8561SScott.Carter@Sun.COM 	/* Unlock the pool */
500*8561SScott.Carter@Sun.COM 	mutex_exit(&pool_p->ipool_lock);
501*8561SScott.Carter@Sun.COM 
502*8561SScott.Carter@Sun.COM 	/* Destroy the request */
503*8561SScott.Carter@Sun.COM 	intr_p->devi_irm_req_p = NULL;
504*8561SScott.Carter@Sun.COM 	kmem_free(req_p, sizeof (ddi_irm_req_t));
505*8561SScott.Carter@Sun.COM 
506*8561SScott.Carter@Sun.COM 	return (DDI_SUCCESS);
507*8561SScott.Carter@Sun.COM }
508*8561SScott.Carter@Sun.COM 
509*8561SScott.Carter@Sun.COM /*
510*8561SScott.Carter@Sun.COM  * i_ddi_irm_set_cb()
511*8561SScott.Carter@Sun.COM  *
512*8561SScott.Carter@Sun.COM  *	Change the callback flag for a request, in response to
513*8561SScott.Carter@Sun.COM  *	a change in its callback registration.  Then rebalance
514*8561SScott.Carter@Sun.COM  *	the interrupt pool.
515*8561SScott.Carter@Sun.COM  *
516*8561SScott.Carter@Sun.COM  *	NOTE: the request is not locked because the navail value
517*8561SScott.Carter@Sun.COM  *	      is not directly affected.  The balancing thread may
518*8561SScott.Carter@Sun.COM  *	      modify the navail value in the background after it
519*8561SScott.Carter@Sun.COM  *	      locks the request itself.
520*8561SScott.Carter@Sun.COM  */
521*8561SScott.Carter@Sun.COM void
522*8561SScott.Carter@Sun.COM i_ddi_irm_set_cb(dev_info_t *dip, boolean_t has_cb_flag)
523*8561SScott.Carter@Sun.COM {
524*8561SScott.Carter@Sun.COM 	devinfo_intr_t	*intr_p;
525*8561SScott.Carter@Sun.COM 	ddi_irm_pool_t	*pool_p;
526*8561SScott.Carter@Sun.COM 	ddi_irm_req_t	*req_p;
527*8561SScott.Carter@Sun.COM 	uint_t		nreq;
528*8561SScott.Carter@Sun.COM 
529*8561SScott.Carter@Sun.COM 	ASSERT(dip != NULL);
530*8561SScott.Carter@Sun.COM 
531*8561SScott.Carter@Sun.COM 	DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_set_cb: dip %p has_cb_flag %d\n",
532*8561SScott.Carter@Sun.COM 	    (void *)dip, (int)has_cb_flag));
533*8561SScott.Carter@Sun.COM 
534*8561SScott.Carter@Sun.COM 	/* Validate parameters */
535*8561SScott.Carter@Sun.COM 	if (dip == NULL)
536*8561SScott.Carter@Sun.COM 		return;
537*8561SScott.Carter@Sun.COM 
538*8561SScott.Carter@Sun.COM 	/* Check for association with interrupt pool */
539*8561SScott.Carter@Sun.COM 	if (!(intr_p = DEVI(dip)->devi_intr_p) ||
540*8561SScott.Carter@Sun.COM 	    !(req_p = intr_p->devi_irm_req_p)) {
541*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_set_cb: not in pool\n"));
542*8561SScott.Carter@Sun.COM 		return;
543*8561SScott.Carter@Sun.COM 	}
544*8561SScott.Carter@Sun.COM 
545*8561SScott.Carter@Sun.COM 	/* Lock the pool */
546*8561SScott.Carter@Sun.COM 	pool_p = req_p->ireq_pool_p;
547*8561SScott.Carter@Sun.COM 	mutex_enter(&pool_p->ipool_lock);
548*8561SScott.Carter@Sun.COM 
549*8561SScott.Carter@Sun.COM 	/*
550*8561SScott.Carter@Sun.COM 	 * Update the request and the pool
551*8561SScott.Carter@Sun.COM 	 */
552*8561SScott.Carter@Sun.COM 	if (has_cb_flag) {
553*8561SScott.Carter@Sun.COM 
554*8561SScott.Carter@Sun.COM 		/* Update pool statistics */
555*8561SScott.Carter@Sun.COM 		if (req_p->ireq_type == DDI_INTR_TYPE_MSIX)
556*8561SScott.Carter@Sun.COM 			pool_p->ipool_minno -= (req_p->ireq_nreq - 1);
557*8561SScott.Carter@Sun.COM 
558*8561SScott.Carter@Sun.COM 		/* Update request */
559*8561SScott.Carter@Sun.COM 		req_p->ireq_flags |= DDI_IRM_FLAG_CALLBACK;
560*8561SScott.Carter@Sun.COM 
561*8561SScott.Carter@Sun.COM 		/* Rebalance in background */
562*8561SScott.Carter@Sun.COM 		i_ddi_irm_enqueue(pool_p, B_FALSE);
563*8561SScott.Carter@Sun.COM 
564*8561SScott.Carter@Sun.COM 	} else {
565*8561SScott.Carter@Sun.COM 
566*8561SScott.Carter@Sun.COM 		/* Determine new request size */
567*8561SScott.Carter@Sun.COM 		nreq = MIN(req_p->ireq_nreq, pool_p->ipool_defsz);
568*8561SScott.Carter@Sun.COM 
569*8561SScott.Carter@Sun.COM 		/* Update pool statistics */
570*8561SScott.Carter@Sun.COM 		pool_p->ipool_reqno -= req_p->ireq_nreq;
571*8561SScott.Carter@Sun.COM 		pool_p->ipool_reqno += nreq;
572*8561SScott.Carter@Sun.COM 		if (req_p->ireq_type == DDI_INTR_TYPE_MSIX) {
573*8561SScott.Carter@Sun.COM 			pool_p->ipool_minno -= 1;
574*8561SScott.Carter@Sun.COM 			pool_p->ipool_minno += nreq;
575*8561SScott.Carter@Sun.COM 		} else {
576*8561SScott.Carter@Sun.COM 			pool_p->ipool_minno -= req_p->ireq_nreq;
577*8561SScott.Carter@Sun.COM 			pool_p->ipool_minno += nreq;
578*8561SScott.Carter@Sun.COM 		}
579*8561SScott.Carter@Sun.COM 
580*8561SScott.Carter@Sun.COM 		/* Update request size, and re-sort in pool */
581*8561SScott.Carter@Sun.COM 		req_p->ireq_nreq = nreq;
582*8561SScott.Carter@Sun.COM 		list_remove(&pool_p->ipool_req_list, req_p);
583*8561SScott.Carter@Sun.COM 		i_ddi_irm_insertion_sort(&pool_p->ipool_req_list, req_p);
584*8561SScott.Carter@Sun.COM 
585*8561SScott.Carter@Sun.COM 		/* Rebalance synchronously, before losing callback */
586*8561SScott.Carter@Sun.COM 		i_ddi_irm_enqueue(pool_p, B_TRUE);
587*8561SScott.Carter@Sun.COM 
588*8561SScott.Carter@Sun.COM 		/* Remove callback flag */
589*8561SScott.Carter@Sun.COM 		req_p->ireq_flags &= ~(DDI_IRM_FLAG_CALLBACK);
590*8561SScott.Carter@Sun.COM 	}
591*8561SScott.Carter@Sun.COM 
592*8561SScott.Carter@Sun.COM 	/* Unlock the pool */
593*8561SScott.Carter@Sun.COM 	mutex_exit(&pool_p->ipool_lock);
594*8561SScott.Carter@Sun.COM }
595*8561SScott.Carter@Sun.COM 
596*8561SScott.Carter@Sun.COM /*
597*8561SScott.Carter@Sun.COM  * Interrupt Pool Balancing
598*8561SScott.Carter@Sun.COM  */
599*8561SScott.Carter@Sun.COM 
600*8561SScott.Carter@Sun.COM /*
601*8561SScott.Carter@Sun.COM  * irm_balance_thread()
602*8561SScott.Carter@Sun.COM  *
603*8561SScott.Carter@Sun.COM  *	One instance of this thread operates per each defined IRM pool.
604*8561SScott.Carter@Sun.COM  *	It does the initial activation of the pool, as well as balancing
605*8561SScott.Carter@Sun.COM  *	any requests that were queued up before the pool was active.
606*8561SScott.Carter@Sun.COM  *	Once active, it waits forever to service balance operations.
607*8561SScott.Carter@Sun.COM  */
608*8561SScott.Carter@Sun.COM static void
609*8561SScott.Carter@Sun.COM irm_balance_thread(ddi_irm_pool_t *pool_p)
610*8561SScott.Carter@Sun.COM {
611*8561SScott.Carter@Sun.COM 	clock_t		interval, wakeup;
612*8561SScott.Carter@Sun.COM 
613*8561SScott.Carter@Sun.COM 	DDI_INTR_IRMDBG((CE_CONT, "irm_balance_thread: pool_p %p\n",
614*8561SScott.Carter@Sun.COM 	    (void *)pool_p));
615*8561SScott.Carter@Sun.COM 
616*8561SScott.Carter@Sun.COM 	/* Lock the pool */
617*8561SScott.Carter@Sun.COM 	mutex_enter(&pool_p->ipool_lock);
618*8561SScott.Carter@Sun.COM 
619*8561SScott.Carter@Sun.COM 	/* Perform initial balance if required */
620*8561SScott.Carter@Sun.COM 	if (pool_p->ipool_reqno > pool_p->ipool_resno)
621*8561SScott.Carter@Sun.COM 		i_ddi_irm_balance(pool_p);
622*8561SScott.Carter@Sun.COM 
623*8561SScott.Carter@Sun.COM 	/* Activate the pool */
624*8561SScott.Carter@Sun.COM 	pool_p->ipool_flags |= DDI_IRM_FLAG_ACTIVE;
625*8561SScott.Carter@Sun.COM 
626*8561SScott.Carter@Sun.COM 	/* Main loop */
627*8561SScott.Carter@Sun.COM 	for (;;) {
628*8561SScott.Carter@Sun.COM 
629*8561SScott.Carter@Sun.COM 		/* Compute the delay interval */
630*8561SScott.Carter@Sun.COM 		interval = drv_usectohz(irm_balance_delay * 1000000);
631*8561SScott.Carter@Sun.COM 
632*8561SScott.Carter@Sun.COM 		/* Sleep until queued */
633*8561SScott.Carter@Sun.COM 		cv_wait(&pool_p->ipool_cv, &pool_p->ipool_lock);
634*8561SScott.Carter@Sun.COM 
635*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "irm_balance_thread: signaled.\n"));
636*8561SScott.Carter@Sun.COM 
637*8561SScott.Carter@Sun.COM 		/* Wait one interval, or until there are waiters */
638*8561SScott.Carter@Sun.COM 		if ((interval > 0) &&
639*8561SScott.Carter@Sun.COM 		    !(pool_p->ipool_flags & DDI_IRM_FLAG_WAITERS) &&
640*8561SScott.Carter@Sun.COM 		    !(pool_p->ipool_flags & DDI_IRM_FLAG_EXIT)) {
641*8561SScott.Carter@Sun.COM 			wakeup = ddi_get_lbolt() + interval;
642*8561SScott.Carter@Sun.COM 			(void) cv_timedwait(&pool_p->ipool_cv,
643*8561SScott.Carter@Sun.COM 			    &pool_p->ipool_lock, wakeup);
644*8561SScott.Carter@Sun.COM 		}
645*8561SScott.Carter@Sun.COM 
646*8561SScott.Carter@Sun.COM 		/* Check if awakened to exit */
647*8561SScott.Carter@Sun.COM 		if (pool_p->ipool_flags & DDI_IRM_FLAG_EXIT) {
648*8561SScott.Carter@Sun.COM 			DDI_INTR_IRMDBG((CE_CONT,
649*8561SScott.Carter@Sun.COM 			    "irm_balance_thread: exiting...\n"));
650*8561SScott.Carter@Sun.COM 			mutex_exit(&pool_p->ipool_lock);
651*8561SScott.Carter@Sun.COM 			thread_exit();
652*8561SScott.Carter@Sun.COM 		}
653*8561SScott.Carter@Sun.COM 
654*8561SScott.Carter@Sun.COM 		/* Balance the pool */
655*8561SScott.Carter@Sun.COM 		i_ddi_irm_balance(pool_p);
656*8561SScott.Carter@Sun.COM 
657*8561SScott.Carter@Sun.COM 		/* Notify waiters */
658*8561SScott.Carter@Sun.COM 		if (pool_p->ipool_flags & DDI_IRM_FLAG_WAITERS) {
659*8561SScott.Carter@Sun.COM 			cv_broadcast(&pool_p->ipool_cv);
660*8561SScott.Carter@Sun.COM 			pool_p->ipool_flags &= ~(DDI_IRM_FLAG_WAITERS);
661*8561SScott.Carter@Sun.COM 		}
662*8561SScott.Carter@Sun.COM 
663*8561SScott.Carter@Sun.COM 		/* Clear QUEUED condition */
664*8561SScott.Carter@Sun.COM 		pool_p->ipool_flags &= ~(DDI_IRM_FLAG_QUEUED);
665*8561SScott.Carter@Sun.COM 	}
666*8561SScott.Carter@Sun.COM }
667*8561SScott.Carter@Sun.COM 
668*8561SScott.Carter@Sun.COM /*
669*8561SScott.Carter@Sun.COM  * i_ddi_irm_balance()
670*8561SScott.Carter@Sun.COM  *
671*8561SScott.Carter@Sun.COM  *	Balance a pool.  The general algorithm is to first reset all
672*8561SScott.Carter@Sun.COM  *	requests to their maximum size, use reduction algorithms to
673*8561SScott.Carter@Sun.COM  *	solve any imbalance, and then notify affected drivers.
674*8561SScott.Carter@Sun.COM  */
675*8561SScott.Carter@Sun.COM static void
676*8561SScott.Carter@Sun.COM i_ddi_irm_balance(ddi_irm_pool_t *pool_p)
677*8561SScott.Carter@Sun.COM {
678*8561SScott.Carter@Sun.COM 	ddi_irm_req_t	*req_p;
679*8561SScott.Carter@Sun.COM 
680*8561SScott.Carter@Sun.COM #ifdef	DEBUG
681*8561SScott.Carter@Sun.COM 	uint_t		debug_totsz = 0;
682*8561SScott.Carter@Sun.COM 	int		debug_policy = 0;
683*8561SScott.Carter@Sun.COM #endif	/* DEBUG */
684*8561SScott.Carter@Sun.COM 
685*8561SScott.Carter@Sun.COM 	ASSERT(pool_p != NULL);
686*8561SScott.Carter@Sun.COM 	ASSERT(MUTEX_HELD(&pool_p->ipool_lock));
687*8561SScott.Carter@Sun.COM 
688*8561SScott.Carter@Sun.COM 	DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_balance: pool_p %p\n",
689*8561SScott.Carter@Sun.COM 	    (void *)pool_p));
690*8561SScott.Carter@Sun.COM 
691*8561SScott.Carter@Sun.COM #ifdef	DEBUG	/* Adjust size and policy settings */
692*8561SScott.Carter@Sun.COM 	if (irm_debug_size > pool_p->ipool_minno) {
693*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_balance: debug size %d\n",
694*8561SScott.Carter@Sun.COM 		    irm_debug_size));
695*8561SScott.Carter@Sun.COM 		debug_totsz = pool_p->ipool_totsz;
696*8561SScott.Carter@Sun.COM 		pool_p->ipool_totsz = irm_debug_size;
697*8561SScott.Carter@Sun.COM 	}
698*8561SScott.Carter@Sun.COM 	if (DDI_IRM_POLICY_VALID(irm_debug_policy)) {
699*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT,
700*8561SScott.Carter@Sun.COM 		    "i_ddi_irm_balance: debug policy %d\n", irm_debug_policy));
701*8561SScott.Carter@Sun.COM 		debug_policy = pool_p->ipool_policy;
702*8561SScott.Carter@Sun.COM 		pool_p->ipool_policy = irm_debug_policy;
703*8561SScott.Carter@Sun.COM 	}
704*8561SScott.Carter@Sun.COM #endif	/* DEBUG */
705*8561SScott.Carter@Sun.COM 
706*8561SScott.Carter@Sun.COM 	/* Lock the availability lock */
707*8561SScott.Carter@Sun.COM 	mutex_enter(&pool_p->ipool_navail_lock);
708*8561SScott.Carter@Sun.COM 
709*8561SScott.Carter@Sun.COM 	/*
710*8561SScott.Carter@Sun.COM 	 * Put all of the reducible requests into a scratch list.
711*8561SScott.Carter@Sun.COM 	 * Reset each one of them to their maximum availability.
712*8561SScott.Carter@Sun.COM 	 */
713*8561SScott.Carter@Sun.COM 	for (req_p = list_head(&pool_p->ipool_req_list); req_p;
714*8561SScott.Carter@Sun.COM 	    req_p = list_next(&pool_p->ipool_req_list, req_p)) {
715*8561SScott.Carter@Sun.COM 		if (DDI_IRM_IS_REDUCIBLE(req_p)) {
716*8561SScott.Carter@Sun.COM 			pool_p->ipool_resno -= req_p->ireq_navail;
717*8561SScott.Carter@Sun.COM 			req_p->ireq_scratch = req_p->ireq_navail;
718*8561SScott.Carter@Sun.COM 			req_p->ireq_navail = req_p->ireq_nreq;
719*8561SScott.Carter@Sun.COM 			pool_p->ipool_resno += req_p->ireq_navail;
720*8561SScott.Carter@Sun.COM 			list_insert_tail(&pool_p->ipool_scratch_list, req_p);
721*8561SScott.Carter@Sun.COM 		}
722*8561SScott.Carter@Sun.COM 	}
723*8561SScott.Carter@Sun.COM 
724*8561SScott.Carter@Sun.COM 	/* Balance the requests */
725*8561SScott.Carter@Sun.COM 	i_ddi_irm_reduce(pool_p);
726*8561SScott.Carter@Sun.COM 
727*8561SScott.Carter@Sun.COM 	/* Unlock the availability lock */
728*8561SScott.Carter@Sun.COM 	mutex_exit(&pool_p->ipool_navail_lock);
729*8561SScott.Carter@Sun.COM 
730*8561SScott.Carter@Sun.COM 	/*
731*8561SScott.Carter@Sun.COM 	 * Process REMOVE notifications.
732*8561SScott.Carter@Sun.COM 	 *
733*8561SScott.Carter@Sun.COM 	 * If a driver fails to release interrupts: exclude it from
734*8561SScott.Carter@Sun.COM 	 * further processing, correct the resulting imbalance, and
735*8561SScott.Carter@Sun.COM 	 * start over again at the head of the scratch list.
736*8561SScott.Carter@Sun.COM 	 */
737*8561SScott.Carter@Sun.COM 	req_p = list_head(&pool_p->ipool_scratch_list);
738*8561SScott.Carter@Sun.COM 	while (req_p) {
739*8561SScott.Carter@Sun.COM 		if ((req_p->ireq_navail < req_p->ireq_scratch) &&
740*8561SScott.Carter@Sun.COM 		    (i_ddi_irm_notify(pool_p, req_p) != DDI_SUCCESS)) {
741*8561SScott.Carter@Sun.COM 			list_remove(&pool_p->ipool_scratch_list, req_p);
742*8561SScott.Carter@Sun.COM 			mutex_enter(&pool_p->ipool_navail_lock);
743*8561SScott.Carter@Sun.COM 			i_ddi_irm_reduce(pool_p);
744*8561SScott.Carter@Sun.COM 			mutex_exit(&pool_p->ipool_navail_lock);
745*8561SScott.Carter@Sun.COM 			req_p = list_head(&pool_p->ipool_scratch_list);
746*8561SScott.Carter@Sun.COM 		} else {
747*8561SScott.Carter@Sun.COM 			req_p = list_next(&pool_p->ipool_scratch_list, req_p);
748*8561SScott.Carter@Sun.COM 		}
749*8561SScott.Carter@Sun.COM 	}
750*8561SScott.Carter@Sun.COM 
751*8561SScott.Carter@Sun.COM 	/*
752*8561SScott.Carter@Sun.COM 	 * Process ADD notifications.
753*8561SScott.Carter@Sun.COM 	 *
754*8561SScott.Carter@Sun.COM 	 * This is the last use of the scratch list, so empty it.
755*8561SScott.Carter@Sun.COM 	 */
756*8561SScott.Carter@Sun.COM 	while (req_p = list_remove_head(&pool_p->ipool_scratch_list)) {
757*8561SScott.Carter@Sun.COM 		if (req_p->ireq_navail > req_p->ireq_scratch) {
758*8561SScott.Carter@Sun.COM 			(void) i_ddi_irm_notify(pool_p, req_p);
759*8561SScott.Carter@Sun.COM 		}
760*8561SScott.Carter@Sun.COM 	}
761*8561SScott.Carter@Sun.COM 
762*8561SScott.Carter@Sun.COM #ifdef	DEBUG	/* Restore size and policy settings */
763*8561SScott.Carter@Sun.COM 	if (debug_totsz != 0)
764*8561SScott.Carter@Sun.COM 		pool_p->ipool_totsz = debug_totsz;
765*8561SScott.Carter@Sun.COM 	if (debug_policy != 0)
766*8561SScott.Carter@Sun.COM 		pool_p->ipool_policy = debug_policy;
767*8561SScott.Carter@Sun.COM #endif	/* DEBUG */
768*8561SScott.Carter@Sun.COM }
769*8561SScott.Carter@Sun.COM 
770*8561SScott.Carter@Sun.COM /*
771*8561SScott.Carter@Sun.COM  * i_ddi_irm_reduce()
772*8561SScott.Carter@Sun.COM  *
773*8561SScott.Carter@Sun.COM  *	Use reduction algorithms to correct an imbalance in a pool.
774*8561SScott.Carter@Sun.COM  */
775*8561SScott.Carter@Sun.COM static void
776*8561SScott.Carter@Sun.COM i_ddi_irm_reduce(ddi_irm_pool_t *pool_p)
777*8561SScott.Carter@Sun.COM {
778*8561SScott.Carter@Sun.COM 	int	ret, imbalance;
779*8561SScott.Carter@Sun.COM 
780*8561SScott.Carter@Sun.COM 	ASSERT(pool_p != NULL);
781*8561SScott.Carter@Sun.COM 	ASSERT(MUTEX_HELD(&pool_p->ipool_lock));
782*8561SScott.Carter@Sun.COM 	ASSERT(DDI_IRM_POLICY_VALID(pool_p->ipool_policy));
783*8561SScott.Carter@Sun.COM 
784*8561SScott.Carter@Sun.COM 	DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_reduce: pool_p %p\n",
785*8561SScott.Carter@Sun.COM 	    (void *)pool_p));
786*8561SScott.Carter@Sun.COM 
787*8561SScott.Carter@Sun.COM 	/* Compute the imbalance.  Do nothing if already balanced. */
788*8561SScott.Carter@Sun.COM 	if ((imbalance = pool_p->ipool_resno - pool_p->ipool_totsz) <= 0)
789*8561SScott.Carter@Sun.COM 		return;
790*8561SScott.Carter@Sun.COM 
791*8561SScott.Carter@Sun.COM 	/* Reduce by policy */
792*8561SScott.Carter@Sun.COM 	switch (pool_p->ipool_policy) {
793*8561SScott.Carter@Sun.COM 	case DDI_IRM_POLICY_LARGE:
794*8561SScott.Carter@Sun.COM 		ret = i_ddi_irm_reduce_large(pool_p, imbalance);
795*8561SScott.Carter@Sun.COM 		break;
796*8561SScott.Carter@Sun.COM 	case DDI_IRM_POLICY_EVEN:
797*8561SScott.Carter@Sun.COM 		ret = i_ddi_irm_reduce_even(pool_p, imbalance);
798*8561SScott.Carter@Sun.COM 		break;
799*8561SScott.Carter@Sun.COM 	}
800*8561SScott.Carter@Sun.COM 
801*8561SScott.Carter@Sun.COM 	/*
802*8561SScott.Carter@Sun.COM 	 * If the policy based reductions failed, then
803*8561SScott.Carter@Sun.COM 	 * possibly reduce new requests as a last resort.
804*8561SScott.Carter@Sun.COM 	 */
805*8561SScott.Carter@Sun.COM 	if (ret != DDI_SUCCESS) {
806*8561SScott.Carter@Sun.COM 
807*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT,
808*8561SScott.Carter@Sun.COM 		    "i_ddi_irm_reduce: policy reductions failed.\n"));
809*8561SScott.Carter@Sun.COM 
810*8561SScott.Carter@Sun.COM 		/* Compute remaining imbalance */
811*8561SScott.Carter@Sun.COM 		imbalance = pool_p->ipool_resno - pool_p->ipool_totsz;
812*8561SScott.Carter@Sun.COM 
813*8561SScott.Carter@Sun.COM 		ASSERT(imbalance > 0);
814*8561SScott.Carter@Sun.COM 
815*8561SScott.Carter@Sun.COM 		i_ddi_irm_reduce_new(pool_p, imbalance);
816*8561SScott.Carter@Sun.COM 	}
817*8561SScott.Carter@Sun.COM }
818*8561SScott.Carter@Sun.COM 
819*8561SScott.Carter@Sun.COM /*
820*8561SScott.Carter@Sun.COM  * i_ddi_irm_enqueue()
821*8561SScott.Carter@Sun.COM  *
822*8561SScott.Carter@Sun.COM  *	Queue a pool to be balanced.  Signals the balancing thread to wake
823*8561SScott.Carter@Sun.COM  *	up and process the pool.  If 'wait_flag' is true, then the current
824*8561SScott.Carter@Sun.COM  *	thread becomes a waiter and blocks until the balance is completed.
825*8561SScott.Carter@Sun.COM  */
826*8561SScott.Carter@Sun.COM static void
827*8561SScott.Carter@Sun.COM i_ddi_irm_enqueue(ddi_irm_pool_t *pool_p, boolean_t wait_flag)
828*8561SScott.Carter@Sun.COM {
829*8561SScott.Carter@Sun.COM 	ASSERT(pool_p != NULL);
830*8561SScott.Carter@Sun.COM 	ASSERT(MUTEX_HELD(&pool_p->ipool_lock));
831*8561SScott.Carter@Sun.COM 
832*8561SScott.Carter@Sun.COM 	DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_enqueue: pool_p %p wait_flag %d\n",
833*8561SScott.Carter@Sun.COM 	    (void *)pool_p, (int)wait_flag));
834*8561SScott.Carter@Sun.COM 
835*8561SScott.Carter@Sun.COM 	/* Do nothing if pool is already balanced */
836*8561SScott.Carter@Sun.COM #ifndef	DEBUG
837*8561SScott.Carter@Sun.COM 	if ((pool_p->ipool_reqno == pool_p->ipool_resno)) {
838*8561SScott.Carter@Sun.COM #else
839*8561SScott.Carter@Sun.COM 	if ((pool_p->ipool_reqno == pool_p->ipool_resno) && !irm_debug_size) {
840*8561SScott.Carter@Sun.COM #endif	/* DEBUG */
841*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT,
842*8561SScott.Carter@Sun.COM 		    "i_ddi_irm_enqueue: pool already balanced\n"));
843*8561SScott.Carter@Sun.COM 		return;
844*8561SScott.Carter@Sun.COM 	}
845*8561SScott.Carter@Sun.COM 
846*8561SScott.Carter@Sun.COM 	/* Avoid deadlocks when IRM is not active */
847*8561SScott.Carter@Sun.COM 	if (!irm_active && wait_flag) {
848*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT,
849*8561SScott.Carter@Sun.COM 		    "i_ddi_irm_enqueue: pool not active.\n"));
850*8561SScott.Carter@Sun.COM 		return;
851*8561SScott.Carter@Sun.COM 	}
852*8561SScott.Carter@Sun.COM 
853*8561SScott.Carter@Sun.COM 	if (wait_flag)
854*8561SScott.Carter@Sun.COM 		pool_p->ipool_flags |= DDI_IRM_FLAG_WAITERS;
855*8561SScott.Carter@Sun.COM 
856*8561SScott.Carter@Sun.COM 	if (wait_flag || !(pool_p->ipool_flags & DDI_IRM_FLAG_QUEUED)) {
857*8561SScott.Carter@Sun.COM 		pool_p->ipool_flags |= DDI_IRM_FLAG_QUEUED;
858*8561SScott.Carter@Sun.COM 		cv_signal(&pool_p->ipool_cv);
859*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_enqueue: pool queued.\n"));
860*8561SScott.Carter@Sun.COM 	}
861*8561SScott.Carter@Sun.COM 
862*8561SScott.Carter@Sun.COM 	if (wait_flag) {
863*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_enqueue: waiting...\n"));
864*8561SScott.Carter@Sun.COM 		cv_wait(&pool_p->ipool_cv, &pool_p->ipool_lock);
865*8561SScott.Carter@Sun.COM 	}
866*8561SScott.Carter@Sun.COM }
867*8561SScott.Carter@Sun.COM 
868*8561SScott.Carter@Sun.COM /*
869*8561SScott.Carter@Sun.COM  * Reduction Algorithms, Used For Balancing
870*8561SScott.Carter@Sun.COM  */
871*8561SScott.Carter@Sun.COM 
872*8561SScott.Carter@Sun.COM /*
873*8561SScott.Carter@Sun.COM  * i_ddi_irm_reduce_large()
874*8561SScott.Carter@Sun.COM  *
875*8561SScott.Carter@Sun.COM  *	Algorithm for the DDI_IRM_POLICY_LARGE reduction policy.
876*8561SScott.Carter@Sun.COM  *
877*8561SScott.Carter@Sun.COM  *	This algorithm generally reduces larger requests first, before
878*8561SScott.Carter@Sun.COM  *	advancing to smaller requests.  The scratch list is initially
879*8561SScott.Carter@Sun.COM  *	sorted in descending order by current navail values, which are
880*8561SScott.Carter@Sun.COM  *	maximized prior to reduction.  This sorted order is preserved,
881*8561SScott.Carter@Sun.COM  *	but within a range of equally sized requests they are secondarily
882*8561SScott.Carter@Sun.COM  *	sorted in ascending order by initial nreq value.  The head of the
883*8561SScott.Carter@Sun.COM  *	list is always selected for reduction, since it is the current
884*8561SScott.Carter@Sun.COM  *	largest request.  After being reduced, it is sorted further into
885*8561SScott.Carter@Sun.COM  *	the list before the next iteration.
886*8561SScott.Carter@Sun.COM  *
887*8561SScott.Carter@Sun.COM  *	Optimizations in this algorithm include trying to reduce multiple
888*8561SScott.Carter@Sun.COM  *	requests together if they are equally sized.  And the algorithm
889*8561SScott.Carter@Sun.COM  *	attempts to reduce in larger increments when possible to minimize
890*8561SScott.Carter@Sun.COM  *	the total number of iterations.
891*8561SScott.Carter@Sun.COM  */
892*8561SScott.Carter@Sun.COM static int
893*8561SScott.Carter@Sun.COM i_ddi_irm_reduce_large(ddi_irm_pool_t *pool_p, int imbalance)
894*8561SScott.Carter@Sun.COM {
895*8561SScott.Carter@Sun.COM 	ddi_irm_req_t	*req_p, *next_p;
896*8561SScott.Carter@Sun.COM 	int		nreqs, reduction;
897*8561SScott.Carter@Sun.COM 
898*8561SScott.Carter@Sun.COM 	ASSERT(pool_p != NULL);
899*8561SScott.Carter@Sun.COM 	ASSERT(imbalance > 0);
900*8561SScott.Carter@Sun.COM 	ASSERT(MUTEX_HELD(&pool_p->ipool_lock));
901*8561SScott.Carter@Sun.COM 
902*8561SScott.Carter@Sun.COM 	DDI_INTR_IRMDBG((CE_CONT,
903*8561SScott.Carter@Sun.COM 	    "i_ddi_irm_reduce_large: pool_p %p imbalance %d\n", (void *)pool_p,
904*8561SScott.Carter@Sun.COM 	    imbalance));
905*8561SScott.Carter@Sun.COM 
906*8561SScott.Carter@Sun.COM 	while (imbalance > 0) {
907*8561SScott.Carter@Sun.COM 
908*8561SScott.Carter@Sun.COM 		req_p = list_head(&pool_p->ipool_scratch_list);
909*8561SScott.Carter@Sun.COM 		next_p = list_next(&pool_p->ipool_scratch_list, req_p);
910*8561SScott.Carter@Sun.COM 
911*8561SScott.Carter@Sun.COM 		/* Fail if nothing is reducible */
912*8561SScott.Carter@Sun.COM 		if (req_p->ireq_navail == 1) {
913*8561SScott.Carter@Sun.COM 			DDI_INTR_IRMDBG((CE_CONT,
914*8561SScott.Carter@Sun.COM 			    "i_ddi_irm_reduce_large: failure.\n"));
915*8561SScott.Carter@Sun.COM 			return (DDI_FAILURE);
916*8561SScott.Carter@Sun.COM 		}
917*8561SScott.Carter@Sun.COM 
918*8561SScott.Carter@Sun.COM 		/* Count the number of equally sized requests */
919*8561SScott.Carter@Sun.COM 		nreqs = 1;
920*8561SScott.Carter@Sun.COM 		while (next_p && (req_p->ireq_navail == next_p->ireq_navail)) {
921*8561SScott.Carter@Sun.COM 			next_p = list_next(&pool_p->ipool_scratch_list, next_p);
922*8561SScott.Carter@Sun.COM 			nreqs++;
923*8561SScott.Carter@Sun.COM 		}
924*8561SScott.Carter@Sun.COM 
925*8561SScott.Carter@Sun.COM 		/* Try to reduce multiple requests together */
926*8561SScott.Carter@Sun.COM 		if (nreqs > 1) {
927*8561SScott.Carter@Sun.COM 
928*8561SScott.Carter@Sun.COM 			if (next_p) {
929*8561SScott.Carter@Sun.COM 				reduction = req_p->ireq_navail -
930*8561SScott.Carter@Sun.COM 				    (next_p->ireq_navail + 1);
931*8561SScott.Carter@Sun.COM 			} else {
932*8561SScott.Carter@Sun.COM 				reduction = req_p->ireq_navail - 1;
933*8561SScott.Carter@Sun.COM 			}
934*8561SScott.Carter@Sun.COM 
935*8561SScott.Carter@Sun.COM 			if ((reduction * nreqs) > imbalance)
936*8561SScott.Carter@Sun.COM 				reduction = imbalance / nreqs;
937*8561SScott.Carter@Sun.COM 
938*8561SScott.Carter@Sun.COM 			if (reduction > 0) {
939*8561SScott.Carter@Sun.COM 				while (req_p && (req_p != next_p)) {
940*8561SScott.Carter@Sun.COM 					imbalance -= reduction;
941*8561SScott.Carter@Sun.COM 					req_p->ireq_navail -= reduction;
942*8561SScott.Carter@Sun.COM 					pool_p->ipool_resno -= reduction;
943*8561SScott.Carter@Sun.COM 					req_p = list_next(
944*8561SScott.Carter@Sun.COM 					    &pool_p->ipool_scratch_list, req_p);
945*8561SScott.Carter@Sun.COM 				}
946*8561SScott.Carter@Sun.COM 				continue;
947*8561SScott.Carter@Sun.COM 			}
948*8561SScott.Carter@Sun.COM 		}
949*8561SScott.Carter@Sun.COM 
950*8561SScott.Carter@Sun.COM 		/* Or just reduce the current request */
951*8561SScott.Carter@Sun.COM 		next_p = list_next(&pool_p->ipool_scratch_list, req_p);
952*8561SScott.Carter@Sun.COM 		if (next_p && (req_p->ireq_navail > next_p->ireq_navail)) {
953*8561SScott.Carter@Sun.COM 			reduction = req_p->ireq_navail - next_p->ireq_navail;
954*8561SScott.Carter@Sun.COM 			reduction = MIN(reduction, imbalance);
955*8561SScott.Carter@Sun.COM 		} else {
956*8561SScott.Carter@Sun.COM 			reduction = 1;
957*8561SScott.Carter@Sun.COM 		}
958*8561SScott.Carter@Sun.COM 		imbalance -= reduction;
959*8561SScott.Carter@Sun.COM 		req_p->ireq_navail -= reduction;
960*8561SScott.Carter@Sun.COM 		pool_p->ipool_resno -= reduction;
961*8561SScott.Carter@Sun.COM 
962*8561SScott.Carter@Sun.COM 		/* Re-sort the scratch list if not yet finished */
963*8561SScott.Carter@Sun.COM 		if (imbalance > 0) {
964*8561SScott.Carter@Sun.COM 			i_ddi_irm_reduce_large_resort(pool_p);
965*8561SScott.Carter@Sun.COM 		}
966*8561SScott.Carter@Sun.COM 	}
967*8561SScott.Carter@Sun.COM 
968*8561SScott.Carter@Sun.COM 	return (DDI_SUCCESS);
969*8561SScott.Carter@Sun.COM }
970*8561SScott.Carter@Sun.COM 
971*8561SScott.Carter@Sun.COM /*
972*8561SScott.Carter@Sun.COM  * i_ddi_irm_reduce_large_resort()
973*8561SScott.Carter@Sun.COM  *
974*8561SScott.Carter@Sun.COM  *	Helper function for i_ddi_irm_reduce_large().  Once a request
975*8561SScott.Carter@Sun.COM  *	is reduced, this resorts it further down into the list as necessary.
976*8561SScott.Carter@Sun.COM  */
977*8561SScott.Carter@Sun.COM static void
978*8561SScott.Carter@Sun.COM i_ddi_irm_reduce_large_resort(ddi_irm_pool_t *pool_p)
979*8561SScott.Carter@Sun.COM {
980*8561SScott.Carter@Sun.COM 	ddi_irm_req_t	*req_p, *next_p;
981*8561SScott.Carter@Sun.COM 
982*8561SScott.Carter@Sun.COM 	ASSERT(pool_p != NULL);
983*8561SScott.Carter@Sun.COM 	ASSERT(MUTEX_HELD(&pool_p->ipool_lock));
984*8561SScott.Carter@Sun.COM 
985*8561SScott.Carter@Sun.COM 	req_p = list_remove_head(&pool_p->ipool_scratch_list);
986*8561SScott.Carter@Sun.COM 	next_p = list_head(&pool_p->ipool_scratch_list);
987*8561SScott.Carter@Sun.COM 
988*8561SScott.Carter@Sun.COM 	while (next_p &&
989*8561SScott.Carter@Sun.COM 	    ((next_p->ireq_navail > req_p->ireq_navail) ||
990*8561SScott.Carter@Sun.COM 	    ((next_p->ireq_navail == req_p->ireq_navail) &&
991*8561SScott.Carter@Sun.COM 	    (next_p->ireq_nreq < req_p->ireq_nreq))))
992*8561SScott.Carter@Sun.COM 		next_p = list_next(&pool_p->ipool_scratch_list, next_p);
993*8561SScott.Carter@Sun.COM 
994*8561SScott.Carter@Sun.COM 	list_insert_before(&pool_p->ipool_scratch_list, next_p, req_p);
995*8561SScott.Carter@Sun.COM }
996*8561SScott.Carter@Sun.COM 
997*8561SScott.Carter@Sun.COM /*
998*8561SScott.Carter@Sun.COM  * i_ddi_irm_reduce_even()
999*8561SScott.Carter@Sun.COM  *
1000*8561SScott.Carter@Sun.COM  *	Algorithm for the DDI_IRM_POLICY_EVEN reduction policy.
1001*8561SScott.Carter@Sun.COM  *
1002*8561SScott.Carter@Sun.COM  *	This algorithm reduces requests evenly, without giving a
1003*8561SScott.Carter@Sun.COM  *	specific preference to smaller or larger requests.  Each
1004*8561SScott.Carter@Sun.COM  *	iteration reduces all reducible requests by the same amount
1005*8561SScott.Carter@Sun.COM  *	until the imbalance is corrected.  Although when possible,
1006*8561SScott.Carter@Sun.COM  *	it tries to avoid reducing requests below the threshold of
1007*8561SScott.Carter@Sun.COM  *	the interrupt pool's default allocation size.
1008*8561SScott.Carter@Sun.COM  *
1009*8561SScott.Carter@Sun.COM  *	An optimization in this algorithm is to reduce the requests
1010*8561SScott.Carter@Sun.COM  *	in larger increments during each iteration, to minimize the
1011*8561SScott.Carter@Sun.COM  *	total number of iterations required.
1012*8561SScott.Carter@Sun.COM  */
1013*8561SScott.Carter@Sun.COM static int
1014*8561SScott.Carter@Sun.COM i_ddi_irm_reduce_even(ddi_irm_pool_t *pool_p, int imbalance)
1015*8561SScott.Carter@Sun.COM {
1016*8561SScott.Carter@Sun.COM 	ddi_irm_req_t	*req_p, *last_p;
1017*8561SScott.Carter@Sun.COM 	uint_t		nmin = pool_p->ipool_defsz;
1018*8561SScott.Carter@Sun.COM 	uint_t		nreduce, reduction;
1019*8561SScott.Carter@Sun.COM 
1020*8561SScott.Carter@Sun.COM 	ASSERT(pool_p != NULL);
1021*8561SScott.Carter@Sun.COM 	ASSERT(imbalance > 0);
1022*8561SScott.Carter@Sun.COM 	ASSERT(MUTEX_HELD(&pool_p->ipool_lock));
1023*8561SScott.Carter@Sun.COM 
1024*8561SScott.Carter@Sun.COM 	DDI_INTR_IRMDBG((CE_CONT,
1025*8561SScott.Carter@Sun.COM 	    "i_ddi_irm_reduce_even: pool_p %p imbalance %d\n",
1026*8561SScott.Carter@Sun.COM 	    (void *)pool_p, imbalance));
1027*8561SScott.Carter@Sun.COM 
1028*8561SScott.Carter@Sun.COM 	while ((nmin > 0) && (imbalance > 0)) {
1029*8561SScott.Carter@Sun.COM 
1030*8561SScott.Carter@Sun.COM 		/* Count reducible requests */
1031*8561SScott.Carter@Sun.COM 		nreduce = 0;
1032*8561SScott.Carter@Sun.COM 		for (req_p = list_head(&pool_p->ipool_scratch_list); req_p;
1033*8561SScott.Carter@Sun.COM 		    req_p = list_next(&pool_p->ipool_scratch_list, req_p)) {
1034*8561SScott.Carter@Sun.COM 			if (req_p->ireq_navail <= nmin)
1035*8561SScott.Carter@Sun.COM 				break;
1036*8561SScott.Carter@Sun.COM 			last_p = req_p;
1037*8561SScott.Carter@Sun.COM 			nreduce++;
1038*8561SScott.Carter@Sun.COM 		}
1039*8561SScott.Carter@Sun.COM 
1040*8561SScott.Carter@Sun.COM 		/* If none are reducible, try a lower minimum */
1041*8561SScott.Carter@Sun.COM 		if (nreduce == 0) {
1042*8561SScott.Carter@Sun.COM 			nmin--;
1043*8561SScott.Carter@Sun.COM 			continue;
1044*8561SScott.Carter@Sun.COM 		}
1045*8561SScott.Carter@Sun.COM 
1046*8561SScott.Carter@Sun.COM 		/* Compute reduction */
1047*8561SScott.Carter@Sun.COM 		if (nreduce < imbalance) {
1048*8561SScott.Carter@Sun.COM 			reduction = last_p->ireq_navail - nmin;
1049*8561SScott.Carter@Sun.COM 			if ((reduction * nreduce) > imbalance) {
1050*8561SScott.Carter@Sun.COM 				reduction = imbalance / nreduce;
1051*8561SScott.Carter@Sun.COM 			}
1052*8561SScott.Carter@Sun.COM 		} else {
1053*8561SScott.Carter@Sun.COM 			reduction = 1;
1054*8561SScott.Carter@Sun.COM 		}
1055*8561SScott.Carter@Sun.COM 
1056*8561SScott.Carter@Sun.COM 		/* Start at head of list, but skip excess */
1057*8561SScott.Carter@Sun.COM 		req_p = list_head(&pool_p->ipool_scratch_list);
1058*8561SScott.Carter@Sun.COM 		while (nreduce > imbalance) {
1059*8561SScott.Carter@Sun.COM 			req_p = list_next(&pool_p->ipool_scratch_list, req_p);
1060*8561SScott.Carter@Sun.COM 			nreduce--;
1061*8561SScott.Carter@Sun.COM 		}
1062*8561SScott.Carter@Sun.COM 
1063*8561SScott.Carter@Sun.COM 		/* Do reductions */
1064*8561SScott.Carter@Sun.COM 		while (req_p && (nreduce > 0)) {
1065*8561SScott.Carter@Sun.COM 			imbalance -= reduction;
1066*8561SScott.Carter@Sun.COM 			req_p->ireq_navail -= reduction;
1067*8561SScott.Carter@Sun.COM 			pool_p->ipool_resno -= reduction;
1068*8561SScott.Carter@Sun.COM 			req_p = list_next(&pool_p->ipool_scratch_list, req_p);
1069*8561SScott.Carter@Sun.COM 			nreduce--;
1070*8561SScott.Carter@Sun.COM 		}
1071*8561SScott.Carter@Sun.COM 	}
1072*8561SScott.Carter@Sun.COM 
1073*8561SScott.Carter@Sun.COM 	if (nmin == 0) {
1074*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_reduce_even: failure.\n"));
1075*8561SScott.Carter@Sun.COM 		return (DDI_FAILURE);
1076*8561SScott.Carter@Sun.COM 	}
1077*8561SScott.Carter@Sun.COM 
1078*8561SScott.Carter@Sun.COM 	return (DDI_SUCCESS);
1079*8561SScott.Carter@Sun.COM }
1080*8561SScott.Carter@Sun.COM 
1081*8561SScott.Carter@Sun.COM /*
1082*8561SScott.Carter@Sun.COM  * i_ddi_irm_reduce_new()
1083*8561SScott.Carter@Sun.COM  *
1084*8561SScott.Carter@Sun.COM  *	Reduces new requests to zero.  This is only used as a
1085*8561SScott.Carter@Sun.COM  *	last resort after another reduction algorithm failed.
1086*8561SScott.Carter@Sun.COM  */
1087*8561SScott.Carter@Sun.COM static void
1088*8561SScott.Carter@Sun.COM i_ddi_irm_reduce_new(ddi_irm_pool_t *pool_p, int imbalance)
1089*8561SScott.Carter@Sun.COM {
1090*8561SScott.Carter@Sun.COM 	ddi_irm_req_t	*req_p;
1091*8561SScott.Carter@Sun.COM 
1092*8561SScott.Carter@Sun.COM 	ASSERT(pool_p != NULL);
1093*8561SScott.Carter@Sun.COM 	ASSERT(imbalance > 0);
1094*8561SScott.Carter@Sun.COM 	ASSERT(MUTEX_HELD(&pool_p->ipool_lock));
1095*8561SScott.Carter@Sun.COM 
1096*8561SScott.Carter@Sun.COM 	for (req_p = list_head(&pool_p->ipool_scratch_list);
1097*8561SScott.Carter@Sun.COM 	    req_p && (imbalance > 0);
1098*8561SScott.Carter@Sun.COM 	    req_p = list_next(&pool_p->ipool_scratch_list, req_p)) {
1099*8561SScott.Carter@Sun.COM 		ASSERT(req_p->ireq_navail == 1);
1100*8561SScott.Carter@Sun.COM 		if (req_p->ireq_flags & DDI_IRM_FLAG_NEW) {
1101*8561SScott.Carter@Sun.COM 			req_p->ireq_navail--;
1102*8561SScott.Carter@Sun.COM 			pool_p->ipool_resno--;
1103*8561SScott.Carter@Sun.COM 			imbalance--;
1104*8561SScott.Carter@Sun.COM 		}
1105*8561SScott.Carter@Sun.COM 	}
1106*8561SScott.Carter@Sun.COM }
1107*8561SScott.Carter@Sun.COM 
1108*8561SScott.Carter@Sun.COM /*
1109*8561SScott.Carter@Sun.COM  * Miscellaneous Helper Functions
1110*8561SScott.Carter@Sun.COM  */
1111*8561SScott.Carter@Sun.COM 
1112*8561SScott.Carter@Sun.COM /*
1113*8561SScott.Carter@Sun.COM  * i_ddi_intr_get_pool()
1114*8561SScott.Carter@Sun.COM  *
1115*8561SScott.Carter@Sun.COM  *	Get an IRM pool that supplies interrupts of a specified type.
1116*8561SScott.Carter@Sun.COM  *	Invokes a DDI_INTROP_GETPOOL to the bus nexus driver.  Fails
1117*8561SScott.Carter@Sun.COM  *	if no pool exists.
1118*8561SScott.Carter@Sun.COM  */
1119*8561SScott.Carter@Sun.COM ddi_irm_pool_t *
1120*8561SScott.Carter@Sun.COM i_ddi_intr_get_pool(dev_info_t *dip, int type)
1121*8561SScott.Carter@Sun.COM {
1122*8561SScott.Carter@Sun.COM 	devinfo_intr_t		*intr_p;
1123*8561SScott.Carter@Sun.COM 	ddi_irm_pool_t		*pool_p;
1124*8561SScott.Carter@Sun.COM 	ddi_irm_req_t		*req_p;
1125*8561SScott.Carter@Sun.COM 	ddi_intr_handle_impl_t	hdl;
1126*8561SScott.Carter@Sun.COM 
1127*8561SScott.Carter@Sun.COM 	ASSERT(dip != NULL);
1128*8561SScott.Carter@Sun.COM 	ASSERT(DDI_INTR_TYPE_FLAG_VALID(type));
1129*8561SScott.Carter@Sun.COM 
1130*8561SScott.Carter@Sun.COM 	if (((intr_p = DEVI(dip)->devi_intr_p) != NULL) &&
1131*8561SScott.Carter@Sun.COM 	    ((req_p = intr_p->devi_irm_req_p) != NULL) &&
1132*8561SScott.Carter@Sun.COM 	    ((pool_p = req_p->ireq_pool_p) != NULL) &&
1133*8561SScott.Carter@Sun.COM 	    (pool_p->ipool_types & type)) {
1134*8561SScott.Carter@Sun.COM 		return (pool_p);
1135*8561SScott.Carter@Sun.COM 	}
1136*8561SScott.Carter@Sun.COM 
1137*8561SScott.Carter@Sun.COM 	bzero(&hdl, sizeof (ddi_intr_handle_impl_t));
1138*8561SScott.Carter@Sun.COM 	hdl.ih_dip = dip;
1139*8561SScott.Carter@Sun.COM 	hdl.ih_type = type;
1140*8561SScott.Carter@Sun.COM 
1141*8561SScott.Carter@Sun.COM 	if (i_ddi_intr_ops(dip, dip, DDI_INTROP_GETPOOL,
1142*8561SScott.Carter@Sun.COM 	    &hdl, (void *)&pool_p) == DDI_SUCCESS)
1143*8561SScott.Carter@Sun.COM 		return (pool_p);
1144*8561SScott.Carter@Sun.COM 
1145*8561SScott.Carter@Sun.COM 	return (NULL);
1146*8561SScott.Carter@Sun.COM }
1147*8561SScott.Carter@Sun.COM 
1148*8561SScott.Carter@Sun.COM /*
1149*8561SScott.Carter@Sun.COM  * i_ddi_irm_insertion_sort()
1150*8561SScott.Carter@Sun.COM  *
1151*8561SScott.Carter@Sun.COM  *	Use the insertion sort method to insert a request into a list.
1152*8561SScott.Carter@Sun.COM  *	The list is sorted in descending order by request size.
1153*8561SScott.Carter@Sun.COM  */
1154*8561SScott.Carter@Sun.COM static void
1155*8561SScott.Carter@Sun.COM i_ddi_irm_insertion_sort(list_t *req_list, ddi_irm_req_t *req_p)
1156*8561SScott.Carter@Sun.COM {
1157*8561SScott.Carter@Sun.COM 	ddi_irm_req_t	*next_p;
1158*8561SScott.Carter@Sun.COM 
1159*8561SScott.Carter@Sun.COM 	next_p = list_head(req_list);
1160*8561SScott.Carter@Sun.COM 
1161*8561SScott.Carter@Sun.COM 	while (next_p && (next_p->ireq_nreq > req_p->ireq_nreq))
1162*8561SScott.Carter@Sun.COM 		next_p = list_next(req_list, next_p);
1163*8561SScott.Carter@Sun.COM 
1164*8561SScott.Carter@Sun.COM 	list_insert_before(req_list, next_p, req_p);
1165*8561SScott.Carter@Sun.COM }
1166*8561SScott.Carter@Sun.COM 
1167*8561SScott.Carter@Sun.COM /*
1168*8561SScott.Carter@Sun.COM  * i_ddi_irm_notify()
1169*8561SScott.Carter@Sun.COM  *
1170*8561SScott.Carter@Sun.COM  *	Notify a driver of changes to its interrupt request using the
1171*8561SScott.Carter@Sun.COM  *	generic callback mechanism.  Checks for errors in processing.
1172*8561SScott.Carter@Sun.COM  */
1173*8561SScott.Carter@Sun.COM static int
1174*8561SScott.Carter@Sun.COM i_ddi_irm_notify(ddi_irm_pool_t *pool_p, ddi_irm_req_t *req_p)
1175*8561SScott.Carter@Sun.COM {
1176*8561SScott.Carter@Sun.COM 	ddi_cb_action_t	action;
1177*8561SScott.Carter@Sun.COM 	ddi_cb_t	*cb_p;
1178*8561SScott.Carter@Sun.COM 	uint_t		nintrs;
1179*8561SScott.Carter@Sun.COM 	int		ret, count;
1180*8561SScott.Carter@Sun.COM 
1181*8561SScott.Carter@Sun.COM 	DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_notify: pool_p %p req_p %p\n",
1182*8561SScott.Carter@Sun.COM 	    (void *)pool_p, (void *)req_p));
1183*8561SScott.Carter@Sun.COM 
1184*8561SScott.Carter@Sun.COM 	/* Do not notify new or unchanged requests */
1185*8561SScott.Carter@Sun.COM 	if ((req_p->ireq_navail == req_p->ireq_scratch) ||
1186*8561SScott.Carter@Sun.COM 	    (req_p->ireq_flags & DDI_IRM_FLAG_NEW))
1187*8561SScott.Carter@Sun.COM 		return (DDI_SUCCESS);
1188*8561SScott.Carter@Sun.COM 
1189*8561SScott.Carter@Sun.COM 	/* Determine action and count */
1190*8561SScott.Carter@Sun.COM 	if (req_p->ireq_navail > req_p->ireq_scratch) {
1191*8561SScott.Carter@Sun.COM 		action = DDI_CB_INTR_ADD;
1192*8561SScott.Carter@Sun.COM 		count = req_p->ireq_navail - req_p->ireq_scratch;
1193*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_notify: adding %d\n",
1194*8561SScott.Carter@Sun.COM 		    count));
1195*8561SScott.Carter@Sun.COM 	} else {
1196*8561SScott.Carter@Sun.COM 		action = DDI_CB_INTR_REMOVE;
1197*8561SScott.Carter@Sun.COM 		count = req_p->ireq_scratch - req_p->ireq_navail;
1198*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_notify: removing %d\n",
1199*8561SScott.Carter@Sun.COM 		    count));
1200*8561SScott.Carter@Sun.COM 	}
1201*8561SScott.Carter@Sun.COM 
1202*8561SScott.Carter@Sun.COM 	/* Lookup driver callback */
1203*8561SScott.Carter@Sun.COM 	if ((cb_p = DEVI(req_p->ireq_dip)->devi_cb_p) == NULL) {
1204*8561SScott.Carter@Sun.COM 		DDI_INTR_IRMDBG((CE_WARN, "i_ddi_irm_notify: no callback!\n"));
1205*8561SScott.Carter@Sun.COM 		return (DDI_FAILURE);
1206*8561SScott.Carter@Sun.COM 	}
1207*8561SScott.Carter@Sun.COM 
1208*8561SScott.Carter@Sun.COM 	/* Do callback */
1209*8561SScott.Carter@Sun.COM 	ret = cb_p->cb_func(req_p->ireq_dip, action, (void *)(uintptr_t)count,
1210*8561SScott.Carter@Sun.COM 	    cb_p->cb_arg1, cb_p->cb_arg2);
1211*8561SScott.Carter@Sun.COM 
1212*8561SScott.Carter@Sun.COM 	/* Log callback errors */
1213*8561SScott.Carter@Sun.COM 	if (ret != DDI_SUCCESS) {
1214*8561SScott.Carter@Sun.COM 		cmn_err(CE_WARN, "%s%d: failed callback (action=%d, ret=%d)\n",
1215*8561SScott.Carter@Sun.COM 		    ddi_driver_name(req_p->ireq_dip),
1216*8561SScott.Carter@Sun.COM 		    ddi_get_instance(req_p->ireq_dip), (int)action, ret);
1217*8561SScott.Carter@Sun.COM 	}
1218*8561SScott.Carter@Sun.COM 
1219*8561SScott.Carter@Sun.COM 	/* Check if the driver exceeds its availability */
1220*8561SScott.Carter@Sun.COM 	nintrs = i_ddi_intr_get_current_nintrs(req_p->ireq_dip);
1221*8561SScott.Carter@Sun.COM 	if (nintrs > req_p->ireq_navail) {
1222*8561SScott.Carter@Sun.COM 		cmn_err(CE_WARN, "%s%d: failed to release interrupts "
1223*8561SScott.Carter@Sun.COM 		    "(nintrs=%d, navail=%d).\n",
1224*8561SScott.Carter@Sun.COM 		    ddi_driver_name(req_p->ireq_dip),
1225*8561SScott.Carter@Sun.COM 		    ddi_get_instance(req_p->ireq_dip), nintrs,
1226*8561SScott.Carter@Sun.COM 		    req_p->ireq_navail);
1227*8561SScott.Carter@Sun.COM 		pool_p->ipool_resno += (nintrs - req_p->ireq_navail);
1228*8561SScott.Carter@Sun.COM 		req_p->ireq_navail = nintrs;
1229*8561SScott.Carter@Sun.COM 		return (DDI_FAILURE);
1230*8561SScott.Carter@Sun.COM 	}
1231*8561SScott.Carter@Sun.COM 
1232*8561SScott.Carter@Sun.COM 	/* Update request */
1233*8561SScott.Carter@Sun.COM 	req_p->ireq_scratch = req_p->ireq_navail;
1234*8561SScott.Carter@Sun.COM 
1235*8561SScott.Carter@Sun.COM 	return (DDI_SUCCESS);
1236*8561SScott.Carter@Sun.COM }
1237*8561SScott.Carter@Sun.COM 
1238*8561SScott.Carter@Sun.COM /*
1239*8561SScott.Carter@Sun.COM  * i_ddi_irm_debug_balance()
1240*8561SScott.Carter@Sun.COM  *
1241*8561SScott.Carter@Sun.COM  *	A debug/test only routine to force the immediate,
1242*8561SScott.Carter@Sun.COM  *	synchronous rebalancing of an interrupt pool.
1243*8561SScott.Carter@Sun.COM  */
1244*8561SScott.Carter@Sun.COM #ifdef	DEBUG
1245*8561SScott.Carter@Sun.COM void
1246*8561SScott.Carter@Sun.COM i_ddi_irm_debug_balance(dev_info_t *dip, boolean_t wait_flag)
1247*8561SScott.Carter@Sun.COM {
1248*8561SScott.Carter@Sun.COM 	ddi_irm_pool_t	*pool_p;
1249*8561SScott.Carter@Sun.COM 	int		type;
1250*8561SScott.Carter@Sun.COM 
1251*8561SScott.Carter@Sun.COM 	DDI_INTR_IRMDBG((CE_CONT, "i_ddi_irm_debug_balance: dip %p wait %d\n",
1252*8561SScott.Carter@Sun.COM 	    (void *)dip, (int)wait_flag));
1253*8561SScott.Carter@Sun.COM 
1254*8561SScott.Carter@Sun.COM 	if (((type = i_ddi_intr_get_current_type(dip)) != 0) &&
1255*8561SScott.Carter@Sun.COM 	    ((pool_p = i_ddi_intr_get_pool(dip, type)) != NULL)) {
1256*8561SScott.Carter@Sun.COM 		mutex_enter(&pool_p->ipool_lock);
1257*8561SScott.Carter@Sun.COM 		i_ddi_irm_enqueue(pool_p, wait_flag);
1258*8561SScott.Carter@Sun.COM 		mutex_exit(&pool_p->ipool_lock);
1259*8561SScott.Carter@Sun.COM 	}
1260*8561SScott.Carter@Sun.COM }
1261*8561SScott.Carter@Sun.COM #endif
1262