1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * hermon_rsrc.c
28 * Hermon Resource Management Routines
29 *
30 * Implements all the routines necessary for setup, teardown, and
31 * alloc/free of all Hermon resources, including those that are managed
32 * by Hermon hardware or which live in Hermon's direct attached DDR memory.
33 */
34
35 #include <sys/types.h>
36 #include <sys/conf.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/modctl.h>
40 #include <sys/vmem.h>
41 #include <sys/bitmap.h>
42
43 #include <sys/ib/adapters/hermon/hermon.h>
44
45 int hermon_rsrc_verbose = 0;
46
47 /*
48 * The following routines are used for initializing and destroying
49 * the resource pools used by the Hermon resource allocation routines.
50 * They consist of four classes of object:
51 *
52 * Mailboxes: The "In" and "Out" mailbox types are used by the Hermon
53 * command interface routines. Mailboxes are used to pass information
54 * back and forth to the Hermon firmware. Either type of mailbox may
55 * be allocated from Hermon's direct attached DDR memory or from system
56 * memory (although currently all "In" mailboxes are in DDR and all "out"
57 * mailboxes come from system memory.
58 *
59 * HW entry objects: These objects represent resources required by the Hermon
60 * hardware. These objects include things like Queue Pair contexts (QPC),
61 * Completion Queue contexts (CQC), Event Queue contexts (EQC), RDB (for
62 * supporting RDMA Read/Atomic), Multicast Group entries (MCG), Memory
63 * Protection Table entries (MPT), Memory Translation Table entries (MTT).
64 *
65 * What these objects all have in common is that they are each required
66 * to come from ICM memory, they are always allocated from tables, and
67 * they are not to be directly accessed (read or written) by driver
68 * software (Mellanox FMR access to MPT is an exception).
69 * The other notable exceptions are the UAR pages (UAR_PG) which are
70 * allocated from the UAR address space rather than DDR, and the UD
71 * address vectors (UDAV) which are similar to the common object types
72 * with the major difference being that UDAVs _are_ directly read and
73 * written by driver software.
74 *
75 * SW handle objects: These objects represent resources required by Hermon
76 * driver software. They are primarily software tracking structures,
77 * which are allocated from system memory (using kmem_cache). Several of
78 * the objects have both a "constructor" and "destructor" method
79 * associated with them (see below).
80 *
81 * Protection Domain (PD) handle objects: These objects are very much like
82 * a SW handle object with the notable difference that all PD handle
83 * objects have an actual Protection Domain number (PD) associated with
84 * them (and the PD number is allocated/managed through a separate
85 * vmem_arena specifically set aside for this purpose.
86 */
87
88 static int hermon_rsrc_mbox_init(hermon_state_t *state,
89 hermon_rsrc_mbox_info_t *info);
90 static void hermon_rsrc_mbox_fini(hermon_state_t *state,
91 hermon_rsrc_mbox_info_t *info);
92
93 static int hermon_rsrc_sw_handles_init(hermon_state_t *state,
94 hermon_rsrc_sw_hdl_info_t *info);
95 static void hermon_rsrc_sw_handles_fini(hermon_state_t *state,
96 hermon_rsrc_sw_hdl_info_t *info);
97
98 static int hermon_rsrc_pd_handles_init(hermon_state_t *state,
99 hermon_rsrc_sw_hdl_info_t *info);
100 static void hermon_rsrc_pd_handles_fini(hermon_state_t *state,
101 hermon_rsrc_sw_hdl_info_t *info);
102
103 /*
104 * The following routines are used for allocating and freeing the specific
105 * types of objects described above from their associated resource pools.
106 */
107 static int hermon_rsrc_mbox_alloc(hermon_rsrc_pool_info_t *pool_info,
108 uint_t num, hermon_rsrc_t *hdl);
109 static void hermon_rsrc_mbox_free(hermon_rsrc_t *hdl);
110
111 static int hermon_rsrc_hw_entry_alloc(hermon_rsrc_pool_info_t *pool_info,
112 uint_t num, uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl);
113 static void hermon_rsrc_hw_entry_free(hermon_rsrc_pool_info_t *pool_info,
114 hermon_rsrc_t *hdl);
115 static int hermon_rsrc_hw_entry_reserve(hermon_rsrc_pool_info_t *pool_info,
116 uint_t num, uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl);
117
118 static int hermon_rsrc_hw_entry_icm_confirm(hermon_rsrc_pool_info_t *pool_info,
119 uint_t num, hermon_rsrc_t *hdl, int num_to_hdl);
120 static int hermon_rsrc_hw_entry_icm_free(hermon_rsrc_pool_info_t *pool_info,
121 hermon_rsrc_t *hdl, int num_to_hdl);
122
123 static int hermon_rsrc_swhdl_alloc(hermon_rsrc_pool_info_t *pool_info,
124 uint_t sleepflag, hermon_rsrc_t *hdl);
125 static void hermon_rsrc_swhdl_free(hermon_rsrc_pool_info_t *pool_info,
126 hermon_rsrc_t *hdl);
127
128 static int hermon_rsrc_pdhdl_alloc(hermon_rsrc_pool_info_t *pool_info,
129 uint_t sleepflag, hermon_rsrc_t *hdl);
130 static void hermon_rsrc_pdhdl_free(hermon_rsrc_pool_info_t *pool_info,
131 hermon_rsrc_t *hdl);
132
133 static int hermon_rsrc_fexch_alloc(hermon_state_t *state,
134 hermon_rsrc_type_t rsrc, uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl);
135 static void hermon_rsrc_fexch_free(hermon_state_t *state, hermon_rsrc_t *hdl);
136 static int hermon_rsrc_rfci_alloc(hermon_state_t *state,
137 hermon_rsrc_type_t rsrc, uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl);
138 static void hermon_rsrc_rfci_free(hermon_state_t *state, hermon_rsrc_t *hdl);
139
140 /*
141 * The following routines are the constructors and destructors for several
142 * of the SW handle type objects. For certain types of SW handles objects
143 * (all of which are implemented using kmem_cache), we need to do some
144 * special field initialization (specifically, mutex_init/destroy). These
145 * routines enable that init and teardown.
146 */
147 static int hermon_rsrc_pdhdl_constructor(void *pd, void *priv, int flags);
148 static void hermon_rsrc_pdhdl_destructor(void *pd, void *state);
149 static int hermon_rsrc_cqhdl_constructor(void *cq, void *priv, int flags);
150 static void hermon_rsrc_cqhdl_destructor(void *cq, void *state);
151 static int hermon_rsrc_qphdl_constructor(void *cq, void *priv, int flags);
152 static void hermon_rsrc_qphdl_destructor(void *cq, void *state);
153 static int hermon_rsrc_srqhdl_constructor(void *srq, void *priv, int flags);
154 static void hermon_rsrc_srqhdl_destructor(void *srq, void *state);
155 static int hermon_rsrc_refcnt_constructor(void *rc, void *priv, int flags);
156 static void hermon_rsrc_refcnt_destructor(void *rc, void *state);
157 static int hermon_rsrc_ahhdl_constructor(void *ah, void *priv, int flags);
158 static void hermon_rsrc_ahhdl_destructor(void *ah, void *state);
159 static int hermon_rsrc_mrhdl_constructor(void *mr, void *priv, int flags);
160 static void hermon_rsrc_mrhdl_destructor(void *mr, void *state);
161
162 /*
163 * Special routine to calculate and return the size of a MCG object based
164 * on current driver configuration (specifically, the number of QP per MCG
165 * that has been configured.
166 */
167 static int hermon_rsrc_mcg_entry_get_size(hermon_state_t *state,
168 uint_t *mcg_size_shift);
169
170
171 /*
172 * hermon_rsrc_alloc()
173 *
174 * Context: Can be called from interrupt or base context.
175 * The "sleepflag" parameter is used by all object allocators to
176 * determine whether to SLEEP for resources or not.
177 */
178 int
hermon_rsrc_alloc(hermon_state_t * state,hermon_rsrc_type_t rsrc,uint_t num,uint_t sleepflag,hermon_rsrc_t ** hdl)179 hermon_rsrc_alloc(hermon_state_t *state, hermon_rsrc_type_t rsrc, uint_t num,
180 uint_t sleepflag, hermon_rsrc_t **hdl)
181 {
182 hermon_rsrc_pool_info_t *rsrc_pool;
183 hermon_rsrc_t *tmp_rsrc_hdl;
184 int flag, status = DDI_FAILURE;
185
186 ASSERT(state != NULL);
187 ASSERT(hdl != NULL);
188
189 rsrc_pool = &state->hs_rsrc_hdl[rsrc];
190 ASSERT(rsrc_pool != NULL);
191
192 /*
193 * Allocate space for the object used to track the resource handle
194 */
195 flag = (sleepflag == HERMON_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
196 tmp_rsrc_hdl = kmem_cache_alloc(state->hs_rsrc_cache, flag);
197 if (tmp_rsrc_hdl == NULL) {
198 return (DDI_FAILURE);
199 }
200 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*tmp_rsrc_hdl))
201
202 /*
203 * Set rsrc_hdl type. This is later used by the hermon_rsrc_free call
204 * to know what type of resource is being freed.
205 */
206 tmp_rsrc_hdl->rsrc_type = rsrc;
207
208 /*
209 * Depending on resource type, call the appropriate alloc routine
210 */
211 switch (rsrc) {
212 case HERMON_IN_MBOX:
213 case HERMON_OUT_MBOX:
214 case HERMON_INTR_IN_MBOX:
215 case HERMON_INTR_OUT_MBOX:
216 status = hermon_rsrc_mbox_alloc(rsrc_pool, num, tmp_rsrc_hdl);
217 break;
218
219 case HERMON_DMPT:
220 /* Allocate "num" (contiguous/aligned for FEXCH) DMPTs */
221 case HERMON_QPC:
222 /* Allocate "num" (contiguous/aligned for RSS) QPCs */
223 status = hermon_rsrc_hw_entry_alloc(rsrc_pool, num, num,
224 sleepflag, tmp_rsrc_hdl);
225 break;
226
227 case HERMON_QPC_FEXCH_PORT1:
228 case HERMON_QPC_FEXCH_PORT2:
229 /* Allocate "num" contiguous/aligned QPCs for FEXCH */
230 status = hermon_rsrc_fexch_alloc(state, rsrc, num,
231 sleepflag, tmp_rsrc_hdl);
232 break;
233
234 case HERMON_QPC_RFCI_PORT1:
235 case HERMON_QPC_RFCI_PORT2:
236 /* Allocate "num" contiguous/aligned QPCs for RFCI */
237 status = hermon_rsrc_rfci_alloc(state, rsrc, num,
238 sleepflag, tmp_rsrc_hdl);
239 break;
240
241 case HERMON_MTT:
242 case HERMON_CQC:
243 case HERMON_SRQC:
244 case HERMON_EQC:
245 case HERMON_MCG:
246 case HERMON_UARPG:
247 /* Allocate "num" unaligned resources */
248 status = hermon_rsrc_hw_entry_alloc(rsrc_pool, num, 1,
249 sleepflag, tmp_rsrc_hdl);
250 break;
251
252 case HERMON_MRHDL:
253 case HERMON_EQHDL:
254 case HERMON_CQHDL:
255 case HERMON_SRQHDL:
256 case HERMON_AHHDL:
257 case HERMON_QPHDL:
258 case HERMON_REFCNT:
259 status = hermon_rsrc_swhdl_alloc(rsrc_pool, sleepflag,
260 tmp_rsrc_hdl);
261 break;
262
263 case HERMON_PDHDL:
264 status = hermon_rsrc_pdhdl_alloc(rsrc_pool, sleepflag,
265 tmp_rsrc_hdl);
266 break;
267
268 case HERMON_RDB: /* handled during HERMON_QPC */
269 case HERMON_ALTC: /* handled during HERMON_QPC */
270 case HERMON_AUXC: /* handled during HERMON_QPC */
271 case HERMON_CMPT_QPC: /* handled during HERMON_QPC */
272 case HERMON_CMPT_SRQC: /* handled during HERMON_SRQC */
273 case HERMON_CMPT_CQC: /* handled during HERMON_CPC */
274 case HERMON_CMPT_EQC: /* handled during HERMON_EPC */
275 default:
276 HERMON_WARNING(state, "unexpected resource type in alloc ");
277 cmn_err(CE_WARN, "Resource type %x \n", rsrc_pool->rsrc_type);
278 break;
279 }
280
281 /*
282 * If the resource allocation failed, then free the special resource
283 * tracking structure and return failure. Otherwise return the
284 * handle for the resource tracking structure.
285 */
286 if (status != DDI_SUCCESS) {
287 kmem_cache_free(state->hs_rsrc_cache, tmp_rsrc_hdl);
288 return (DDI_FAILURE);
289 } else {
290 *hdl = tmp_rsrc_hdl;
291 return (DDI_SUCCESS);
292 }
293 }
294
295
296 /*
297 * hermon_rsrc_reserve()
298 *
299 * Context: Can only be called from attach.
300 * The "sleepflag" parameter is used by all object allocators to
301 * determine whether to SLEEP for resources or not.
302 */
303 int
hermon_rsrc_reserve(hermon_state_t * state,hermon_rsrc_type_t rsrc,uint_t num,uint_t sleepflag,hermon_rsrc_t ** hdl)304 hermon_rsrc_reserve(hermon_state_t *state, hermon_rsrc_type_t rsrc, uint_t num,
305 uint_t sleepflag, hermon_rsrc_t **hdl)
306 {
307 hermon_rsrc_pool_info_t *rsrc_pool;
308 hermon_rsrc_t *tmp_rsrc_hdl;
309 int flag, status = DDI_FAILURE;
310
311 ASSERT(state != NULL);
312 ASSERT(hdl != NULL);
313
314 rsrc_pool = &state->hs_rsrc_hdl[rsrc];
315 ASSERT(rsrc_pool != NULL);
316
317 /*
318 * Allocate space for the object used to track the resource handle
319 */
320 flag = (sleepflag == HERMON_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
321 tmp_rsrc_hdl = kmem_cache_alloc(state->hs_rsrc_cache, flag);
322 if (tmp_rsrc_hdl == NULL) {
323 return (DDI_FAILURE);
324 }
325 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*tmp_rsrc_hdl))
326
327 /*
328 * Set rsrc_hdl type. This is later used by the hermon_rsrc_free call
329 * to know what type of resource is being freed.
330 */
331 tmp_rsrc_hdl->rsrc_type = rsrc;
332
333 switch (rsrc) {
334 case HERMON_QPC:
335 case HERMON_DMPT:
336 case HERMON_MTT:
337 /*
338 * Reserve num resources, naturally aligned (N * num).
339 */
340 status = hermon_rsrc_hw_entry_reserve(rsrc_pool, num, num,
341 sleepflag, tmp_rsrc_hdl);
342 break;
343
344 default:
345 HERMON_WARNING(state, "unexpected resource type in reserve ");
346 cmn_err(CE_WARN, "Resource type %x \n", rsrc);
347 break;
348 }
349
350 /*
351 * If the resource allocation failed, then free the special resource
352 * tracking structure and return failure. Otherwise return the
353 * handle for the resource tracking structure.
354 */
355 if (status != DDI_SUCCESS) {
356 kmem_cache_free(state->hs_rsrc_cache, tmp_rsrc_hdl);
357 return (DDI_FAILURE);
358 } else {
359 *hdl = tmp_rsrc_hdl;
360 return (DDI_SUCCESS);
361 }
362 }
363
364
365 /*
366 * hermon_rsrc_fexch_alloc()
367 *
368 * Context: Can only be called from base context.
369 * The "sleepflag" parameter is used by all object allocators to
370 * determine whether to SLEEP for resources or not.
371 */
372 static int
hermon_rsrc_fexch_alloc(hermon_state_t * state,hermon_rsrc_type_t rsrc,uint_t num,uint_t sleepflag,hermon_rsrc_t * hdl)373 hermon_rsrc_fexch_alloc(hermon_state_t *state, hermon_rsrc_type_t rsrc,
374 uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl)
375 {
376 hermon_fcoib_t *fcoib;
377 void *addr;
378 uint32_t fexch_qpn_base;
379 hermon_rsrc_pool_info_t *qpc_pool, *mpt_pool, *mtt_pool;
380 int flag, status;
381 hermon_rsrc_t mpt_hdl; /* temporary, just for icm_confirm */
382 hermon_rsrc_t mtt_hdl; /* temporary, just for icm_confirm */
383 uint_t portm1; /* hca_port_number - 1 */
384 uint_t nummtt;
385 vmem_t *vmp;
386
387 ASSERT(state != NULL);
388 ASSERT(hdl != NULL);
389
390 if ((state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_FC) == 0)
391 return (DDI_FAILURE);
392
393 portm1 = rsrc - HERMON_QPC_FEXCH_PORT1;
394 fcoib = &state->hs_fcoib;
395 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
396
397 /* Allocate from the FEXCH QP range */
398 vmp = fcoib->hfc_fexch_vmemp[portm1];
399 addr = vmem_xalloc(vmp, num, num, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
400 if (addr == NULL) {
401 return (DDI_FAILURE);
402 }
403 fexch_qpn_base = (uint32_t)((uintptr_t)addr -
404 fcoib->hfc_vmemstart + fcoib->hfc_fexch_base[portm1]);
405
406 /* ICM confirm for the FEXCH QP range */
407 qpc_pool = &state->hs_rsrc_hdl[HERMON_QPC];
408 hdl->hr_len = num << qpc_pool->rsrc_shift;
409 hdl->hr_addr = addr; /* used only for vmem_xfree */
410 hdl->hr_indx = fexch_qpn_base;
411
412 status = hermon_rsrc_hw_entry_icm_confirm(qpc_pool, num, hdl, 1);
413 if (status != DDI_SUCCESS) {
414 vmem_xfree(vmp, addr, num);
415 return (DDI_FAILURE);
416 }
417
418 /* ICM confirm for the Primary MKEYs (client side only) */
419 mpt_pool = &state->hs_rsrc_hdl[HERMON_DMPT];
420 mpt_hdl.hr_len = num << mpt_pool->rsrc_shift;
421 mpt_hdl.hr_addr = NULL;
422 mpt_hdl.hr_indx = fcoib->hfc_mpt_base[portm1] +
423 (fexch_qpn_base - fcoib->hfc_fexch_base[portm1]);
424
425 status = hermon_rsrc_hw_entry_icm_confirm(mpt_pool, num, &mpt_hdl, 0);
426 if (status != DDI_SUCCESS) {
427 status = hermon_rsrc_hw_entry_icm_free(qpc_pool, hdl, 1);
428 vmem_xfree(vmp, addr, num);
429 return (DDI_FAILURE);
430 }
431
432 /* ICM confirm for the MTTs of the Primary MKEYs (client side only) */
433 nummtt = fcoib->hfc_mtts_per_mpt;
434 num *= nummtt;
435 mtt_pool = &state->hs_rsrc_hdl[HERMON_MTT];
436 mtt_hdl.hr_len = num << mtt_pool->rsrc_shift;
437 mtt_hdl.hr_addr = NULL;
438 mtt_hdl.hr_indx = fcoib->hfc_mtt_base[portm1] +
439 (fexch_qpn_base - fcoib->hfc_fexch_base[portm1]) *
440 nummtt;
441
442 status = hermon_rsrc_hw_entry_icm_confirm(mtt_pool, num, &mtt_hdl, 0);
443 if (status != DDI_SUCCESS) {
444 vmem_xfree(vmp, addr, num);
445 return (DDI_FAILURE);
446 }
447 return (DDI_SUCCESS);
448 }
449
450 static void
hermon_rsrc_fexch_free(hermon_state_t * state,hermon_rsrc_t * hdl)451 hermon_rsrc_fexch_free(hermon_state_t *state, hermon_rsrc_t *hdl)
452 {
453 hermon_fcoib_t *fcoib;
454 uint_t portm1; /* hca_port_number - 1 */
455
456 ASSERT(state != NULL);
457 ASSERT(hdl != NULL);
458
459 portm1 = hdl->rsrc_type - HERMON_QPC_FEXCH_PORT1;
460 fcoib = &state->hs_fcoib;
461 vmem_xfree(fcoib->hfc_fexch_vmemp[portm1], hdl->hr_addr,
462 hdl->hr_len >> state->hs_rsrc_hdl[HERMON_QPC].rsrc_shift);
463 }
464
465 /*
466 * hermon_rsrc_rfci_alloc()
467 *
468 * Context: Can only be called from base context.
469 * The "sleepflag" parameter is used by all object allocators to
470 * determine whether to SLEEP for resources or not.
471 */
472 static int
hermon_rsrc_rfci_alloc(hermon_state_t * state,hermon_rsrc_type_t rsrc,uint_t num,uint_t sleepflag,hermon_rsrc_t * hdl)473 hermon_rsrc_rfci_alloc(hermon_state_t *state, hermon_rsrc_type_t rsrc,
474 uint_t num, uint_t sleepflag, hermon_rsrc_t *hdl)
475 {
476 hermon_fcoib_t *fcoib;
477 void *addr;
478 uint32_t rfci_qpn_base;
479 hermon_rsrc_pool_info_t *qpc_pool;
480 int flag, status;
481 uint_t portm1; /* hca_port_number - 1 */
482 vmem_t *vmp;
483
484 ASSERT(state != NULL);
485 ASSERT(hdl != NULL);
486
487 if ((state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_FC) == 0)
488 return (DDI_FAILURE);
489
490 portm1 = rsrc - HERMON_QPC_RFCI_PORT1;
491 fcoib = &state->hs_fcoib;
492 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
493
494 /* Allocate from the RFCI QP range */
495 vmp = fcoib->hfc_rfci_vmemp[portm1];
496 addr = vmem_xalloc(vmp, num, num, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
497 if (addr == NULL) {
498 return (DDI_FAILURE);
499 }
500 rfci_qpn_base = (uint32_t)((uintptr_t)addr -
501 fcoib->hfc_vmemstart + fcoib->hfc_rfci_base[portm1]);
502
503 /* ICM confirm for the RFCI QP */
504 qpc_pool = &state->hs_rsrc_hdl[HERMON_QPC];
505 hdl->hr_len = num << qpc_pool->rsrc_shift;
506 hdl->hr_addr = addr; /* used only for vmem_xfree */
507 hdl->hr_indx = rfci_qpn_base;
508
509 status = hermon_rsrc_hw_entry_icm_confirm(qpc_pool, num, hdl, 1);
510 if (status != DDI_SUCCESS) {
511 vmem_xfree(vmp, addr, num);
512 return (DDI_FAILURE);
513 }
514 return (DDI_SUCCESS);
515 }
516
517 static void
hermon_rsrc_rfci_free(hermon_state_t * state,hermon_rsrc_t * hdl)518 hermon_rsrc_rfci_free(hermon_state_t *state, hermon_rsrc_t *hdl)
519 {
520 hermon_fcoib_t *fcoib;
521 uint_t portm1; /* hca_port_number - 1 */
522
523 ASSERT(state != NULL);
524 ASSERT(hdl != NULL);
525
526 portm1 = hdl->rsrc_type - HERMON_QPC_RFCI_PORT1;
527 fcoib = &state->hs_fcoib;
528 vmem_xfree(fcoib->hfc_rfci_vmemp[portm1], hdl->hr_addr,
529 hdl->hr_len >> state->hs_rsrc_hdl[HERMON_QPC].rsrc_shift);
530 }
531
532
533 /*
534 * hermon_rsrc_free()
535 * Context: Can be called from interrupt or base context.
536 */
537 void
hermon_rsrc_free(hermon_state_t * state,hermon_rsrc_t ** hdl)538 hermon_rsrc_free(hermon_state_t *state, hermon_rsrc_t **hdl)
539 {
540 hermon_rsrc_pool_info_t *rsrc_pool;
541
542 ASSERT(state != NULL);
543 ASSERT(hdl != NULL);
544
545 rsrc_pool = &state->hs_rsrc_hdl[(*hdl)->rsrc_type];
546 ASSERT(rsrc_pool != NULL);
547
548 /*
549 * Depending on resource type, call the appropriate free routine
550 */
551 switch (rsrc_pool->rsrc_type) {
552 case HERMON_IN_MBOX:
553 case HERMON_OUT_MBOX:
554 case HERMON_INTR_IN_MBOX:
555 case HERMON_INTR_OUT_MBOX:
556 hermon_rsrc_mbox_free(*hdl);
557 break;
558
559 case HERMON_QPC_FEXCH_PORT1:
560 case HERMON_QPC_FEXCH_PORT2:
561 hermon_rsrc_fexch_free(state, *hdl);
562 break;
563
564 case HERMON_QPC_RFCI_PORT1:
565 case HERMON_QPC_RFCI_PORT2:
566 hermon_rsrc_rfci_free(state, *hdl);
567 break;
568
569 case HERMON_QPC:
570 case HERMON_CQC:
571 case HERMON_SRQC:
572 case HERMON_EQC:
573 case HERMON_DMPT:
574 case HERMON_MCG:
575 case HERMON_MTT:
576 case HERMON_UARPG:
577 hermon_rsrc_hw_entry_free(rsrc_pool, *hdl);
578 break;
579
580 case HERMON_MRHDL:
581 case HERMON_EQHDL:
582 case HERMON_CQHDL:
583 case HERMON_SRQHDL:
584 case HERMON_AHHDL:
585 case HERMON_QPHDL:
586 case HERMON_REFCNT:
587 hermon_rsrc_swhdl_free(rsrc_pool, *hdl);
588 break;
589
590 case HERMON_PDHDL:
591 hermon_rsrc_pdhdl_free(rsrc_pool, *hdl);
592 break;
593
594 case HERMON_RDB:
595 case HERMON_ALTC:
596 case HERMON_AUXC:
597 case HERMON_CMPT_QPC:
598 case HERMON_CMPT_SRQC:
599 case HERMON_CMPT_CQC:
600 case HERMON_CMPT_EQC:
601 default:
602 cmn_err(CE_CONT, "!rsrc_type = 0x%x\n", rsrc_pool->rsrc_type);
603 break;
604 }
605
606 /*
607 * Free the special resource tracking structure, set the handle to
608 * NULL, and return.
609 */
610 kmem_cache_free(state->hs_rsrc_cache, *hdl);
611 *hdl = NULL;
612 }
613
614
615 /*
616 * hermon_rsrc_init_phase1()
617 *
618 * Completes the first phase of Hermon resource/configuration init.
619 * This involves creating the kmem_cache for the "hermon_rsrc_t"
620 * structs, allocating the space for the resource pool handles,
621 * and setting up the "Out" mailboxes.
622 *
623 * When this function completes, the Hermon driver is ready to
624 * post the following commands which return information only in the
625 * "Out" mailbox: QUERY_DDR, QUERY_FW, QUERY_DEV_LIM, and QUERY_ADAPTER
626 * If any of these commands are to be posted at this time, they must be
627 * done so only when "spinning" (as the outstanding command list and
628 * EQ setup code has not yet run)
629 *
630 * Context: Only called from attach() path context
631 */
632 int
hermon_rsrc_init_phase1(hermon_state_t * state)633 hermon_rsrc_init_phase1(hermon_state_t *state)
634 {
635 hermon_rsrc_pool_info_t *rsrc_pool;
636 hermon_rsrc_mbox_info_t mbox_info;
637 hermon_rsrc_cleanup_level_t cleanup;
638 hermon_cfg_profile_t *cfgprof;
639 uint64_t num, size;
640 int status;
641 char *rsrc_name;
642
643 ASSERT(state != NULL);
644
645 /* This is where Phase 1 of resource initialization begins */
646 cleanup = HERMON_RSRC_CLEANUP_LEVEL0;
647
648 /* Build kmem cache name from Hermon instance */
649 rsrc_name = kmem_zalloc(HERMON_RSRC_NAME_MAXLEN, KM_SLEEP);
650 HERMON_RSRC_NAME(rsrc_name, HERMON_RSRC_CACHE);
651
652 /*
653 * Create the kmem_cache for "hermon_rsrc_t" structures
654 * (kmem_cache_create will SLEEP until successful)
655 */
656 state->hs_rsrc_cache = kmem_cache_create(rsrc_name,
657 sizeof (hermon_rsrc_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
658
659 /*
660 * Allocate an array of hermon_rsrc_pool_info_t's (used in all
661 * subsequent resource allocations)
662 */
663 state->hs_rsrc_hdl = kmem_zalloc(HERMON_NUM_RESOURCES *
664 sizeof (hermon_rsrc_pool_info_t), KM_SLEEP);
665
666 /* Pull in the configuration profile */
667 cfgprof = state->hs_cfg_profile;
668
669 /* Initialize the resource pool for "out" mailboxes */
670 num = ((uint64_t)1 << cfgprof->cp_log_num_outmbox);
671 size = ((uint64_t)1 << cfgprof->cp_log_outmbox_size);
672 rsrc_pool = &state->hs_rsrc_hdl[HERMON_OUT_MBOX];
673 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
674 rsrc_pool->rsrc_pool_size = (size * num);
675 rsrc_pool->rsrc_shift = cfgprof->cp_log_outmbox_size;
676 rsrc_pool->rsrc_quantum = (uint_t)size;
677 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
678 rsrc_pool->rsrc_state = state;
679 mbox_info.mbi_num = num;
680 mbox_info.mbi_size = size;
681 mbox_info.mbi_rsrcpool = rsrc_pool;
682 status = hermon_rsrc_mbox_init(state, &mbox_info);
683 if (status != DDI_SUCCESS) {
684 hermon_rsrc_fini(state, cleanup);
685 status = DDI_FAILURE;
686 goto rsrcinitp1_fail;
687 }
688 cleanup = HERMON_RSRC_CLEANUP_LEVEL1;
689
690 /* Initialize the mailbox list */
691 status = hermon_outmbox_list_init(state);
692 if (status != DDI_SUCCESS) {
693 hermon_rsrc_fini(state, cleanup);
694 status = DDI_FAILURE;
695 goto rsrcinitp1_fail;
696 }
697 cleanup = HERMON_RSRC_CLEANUP_LEVEL2;
698
699 /* Initialize the resource pool for "interrupt out" mailboxes */
700 num = ((uint64_t)1 << cfgprof->cp_log_num_intr_outmbox);
701 size = ((uint64_t)1 << cfgprof->cp_log_outmbox_size);
702 rsrc_pool = &state->hs_rsrc_hdl[HERMON_INTR_OUT_MBOX];
703 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
704 rsrc_pool->rsrc_pool_size = (size * num);
705 rsrc_pool->rsrc_shift = cfgprof->cp_log_outmbox_size;
706 rsrc_pool->rsrc_quantum = (uint_t)size;
707 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
708 rsrc_pool->rsrc_state = state;
709 mbox_info.mbi_num = num;
710 mbox_info.mbi_size = size;
711 mbox_info.mbi_rsrcpool = rsrc_pool;
712 status = hermon_rsrc_mbox_init(state, &mbox_info);
713 if (status != DDI_SUCCESS) {
714 hermon_rsrc_fini(state, cleanup);
715 status = DDI_FAILURE;
716 goto rsrcinitp1_fail;
717 }
718 cleanup = HERMON_RSRC_CLEANUP_LEVEL3;
719
720 /* Initialize the mailbox list */
721 status = hermon_intr_outmbox_list_init(state);
722 if (status != DDI_SUCCESS) {
723 hermon_rsrc_fini(state, cleanup);
724 status = DDI_FAILURE;
725 goto rsrcinitp1_fail;
726 }
727 cleanup = HERMON_RSRC_CLEANUP_LEVEL4;
728
729 /* Initialize the resource pool for "in" mailboxes */
730 num = ((uint64_t)1 << cfgprof->cp_log_num_inmbox);
731 size = ((uint64_t)1 << cfgprof->cp_log_inmbox_size);
732 rsrc_pool = &state->hs_rsrc_hdl[HERMON_IN_MBOX];
733 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
734 rsrc_pool->rsrc_pool_size = (size * num);
735 rsrc_pool->rsrc_shift = cfgprof->cp_log_inmbox_size;
736 rsrc_pool->rsrc_quantum = (uint_t)size;
737 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
738 rsrc_pool->rsrc_state = state;
739 mbox_info.mbi_num = num;
740 mbox_info.mbi_size = size;
741 mbox_info.mbi_rsrcpool = rsrc_pool;
742 status = hermon_rsrc_mbox_init(state, &mbox_info);
743 if (status != DDI_SUCCESS) {
744 hermon_rsrc_fini(state, cleanup);
745 status = DDI_FAILURE;
746 goto rsrcinitp1_fail;
747 }
748 cleanup = HERMON_RSRC_CLEANUP_LEVEL5;
749
750 /* Initialize the mailbox list */
751 status = hermon_inmbox_list_init(state);
752 if (status != DDI_SUCCESS) {
753 hermon_rsrc_fini(state, cleanup);
754 status = DDI_FAILURE;
755 goto rsrcinitp1_fail;
756 }
757 cleanup = HERMON_RSRC_CLEANUP_LEVEL6;
758
759 /* Initialize the resource pool for "interrupt in" mailboxes */
760 num = ((uint64_t)1 << cfgprof->cp_log_num_intr_inmbox);
761 size = ((uint64_t)1 << cfgprof->cp_log_inmbox_size);
762 rsrc_pool = &state->hs_rsrc_hdl[HERMON_INTR_IN_MBOX];
763 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
764 rsrc_pool->rsrc_pool_size = (size * num);
765 rsrc_pool->rsrc_shift = cfgprof->cp_log_inmbox_size;
766 rsrc_pool->rsrc_quantum = (uint_t)size;
767 rsrc_pool->rsrc_align = HERMON_MBOX_ALIGN;
768 rsrc_pool->rsrc_state = state;
769 mbox_info.mbi_num = num;
770 mbox_info.mbi_size = size;
771 mbox_info.mbi_rsrcpool = rsrc_pool;
772 status = hermon_rsrc_mbox_init(state, &mbox_info);
773 if (status != DDI_SUCCESS) {
774 hermon_rsrc_fini(state, cleanup);
775 status = DDI_FAILURE;
776 goto rsrcinitp1_fail;
777 }
778 cleanup = HERMON_RSRC_CLEANUP_LEVEL7;
779
780 /* Initialize the mailbox list */
781 status = hermon_intr_inmbox_list_init(state);
782 if (status != DDI_SUCCESS) {
783 hermon_rsrc_fini(state, cleanup);
784 status = DDI_FAILURE;
785 goto rsrcinitp1_fail;
786 }
787 cleanup = HERMON_RSRC_CLEANUP_PHASE1_COMPLETE;
788 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
789 return (DDI_SUCCESS);
790
791 rsrcinitp1_fail:
792 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
793 return (status);
794 }
795
796
797 /*
798 * hermon_rsrc_init_phase2()
799 * Context: Only called from attach() path context
800 */
801 int
hermon_rsrc_init_phase2(hermon_state_t * state)802 hermon_rsrc_init_phase2(hermon_state_t *state)
803 {
804 hermon_rsrc_sw_hdl_info_t hdl_info;
805 hermon_rsrc_hw_entry_info_t entry_info;
806 hermon_rsrc_pool_info_t *rsrc_pool;
807 hermon_rsrc_cleanup_level_t cleanup, ncleanup;
808 hermon_cfg_profile_t *cfgprof;
809 hermon_hw_querydevlim_t *devlim;
810 uint64_t num, max, num_prealloc;
811 uint_t mcg_size, mcg_size_shift;
812 int i, status;
813 char *rsrc_name;
814
815 ASSERT(state != NULL);
816
817 /* Phase 2 initialization begins where Phase 1 left off */
818 cleanup = HERMON_RSRC_CLEANUP_PHASE1_COMPLETE;
819
820 /* Allocate the ICM resource name space */
821
822 /* Build the ICM vmem arena names from Hermon instance */
823 rsrc_name = kmem_zalloc(HERMON_RSRC_NAME_MAXLEN, KM_SLEEP);
824
825 /*
826 * Initialize the resource pools for all objects that exist in
827 * context memory (ICM). The ICM consists of context tables, each
828 * type of resource (QP, CQ, EQ, etc) having it's own context table
829 * (QPC, CQC, EQC, etc...).
830 */
831 cfgprof = state->hs_cfg_profile;
832 devlim = &state->hs_devlim;
833
834 /*
835 * Initialize the resource pools for each of the driver resources.
836 * With a few exceptions, these resources fall into the two cateogories
837 * of either hw_entries or sw_entries.
838 */
839
840 /*
841 * Initialize the resource pools for ICM (hardware) types first.
842 * These resources are managed through vmem arenas, which are
843 * created via the rsrc pool initialization routine. Note that,
844 * due to further calculations, the MCG resource pool is
845 * initialized seperately.
846 */
847 for (i = 0; i < HERMON_NUM_ICM_RESOURCES; i++) {
848
849 rsrc_pool = &state->hs_rsrc_hdl[i];
850 rsrc_pool->rsrc_type = i;
851 rsrc_pool->rsrc_state = state;
852
853 /* Set the resource-specific attributes */
854 switch (i) {
855 case HERMON_MTT:
856 max = ((uint64_t)1 << devlim->log_max_mtt);
857 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_mtt);
858 HERMON_RSRC_NAME(rsrc_name, HERMON_MTT_VMEM);
859 ncleanup = HERMON_RSRC_CLEANUP_LEVEL9;
860 break;
861
862 case HERMON_DMPT:
863 max = ((uint64_t)1 << devlim->log_max_dmpt);
864 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_dmpt);
865 HERMON_RSRC_NAME(rsrc_name, HERMON_DMPT_VMEM);
866 ncleanup = HERMON_RSRC_CLEANUP_LEVEL10;
867 break;
868
869 case HERMON_QPC:
870 max = ((uint64_t)1 << devlim->log_max_qp);
871 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_qp);
872 HERMON_RSRC_NAME(rsrc_name, HERMON_QPC_VMEM);
873 ncleanup = HERMON_RSRC_CLEANUP_LEVEL11;
874 break;
875
876 case HERMON_CQC:
877 max = ((uint64_t)1 << devlim->log_max_cq);
878 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_cq);
879 HERMON_RSRC_NAME(rsrc_name, HERMON_CQC_VMEM);
880 ncleanup = HERMON_RSRC_CLEANUP_LEVEL13;
881 break;
882
883 case HERMON_SRQC:
884 max = ((uint64_t)1 << devlim->log_max_srq);
885 num_prealloc = ((uint64_t)1 << devlim->log_rsvd_srq);
886 HERMON_RSRC_NAME(rsrc_name, HERMON_SRQC_VMEM);
887 ncleanup = HERMON_RSRC_CLEANUP_LEVEL16;
888 break;
889
890 case HERMON_EQC:
891 max = ((uint64_t)1 << devlim->log_max_eq);
892 num_prealloc = state->hs_rsvd_eqs;
893 HERMON_RSRC_NAME(rsrc_name, HERMON_EQC_VMEM);
894 ncleanup = HERMON_RSRC_CLEANUP_LEVEL18;
895 break;
896
897 case HERMON_MCG: /* handled below */
898 case HERMON_AUXC:
899 case HERMON_ALTC:
900 case HERMON_RDB:
901 case HERMON_CMPT_QPC:
902 case HERMON_CMPT_SRQC:
903 case HERMON_CMPT_CQC:
904 case HERMON_CMPT_EQC:
905 default:
906 /* We don't need to initialize this rsrc here. */
907 continue;
908 }
909
910 /* Set the common values for all resource pools */
911 rsrc_pool->rsrc_state = state;
912 rsrc_pool->rsrc_loc = HERMON_IN_ICM;
913 rsrc_pool->rsrc_pool_size = state->hs_icm[i].table_size;
914 rsrc_pool->rsrc_align = state->hs_icm[i].table_size;
915 rsrc_pool->rsrc_shift = state->hs_icm[i].log_object_size;
916 rsrc_pool->rsrc_quantum = state->hs_icm[i].object_size;
917
918 /* Now, initialize the entry_info and call the init routine */
919 entry_info.hwi_num = state->hs_icm[i].num_entries;
920 entry_info.hwi_max = max;
921 entry_info.hwi_prealloc = num_prealloc;
922 entry_info.hwi_rsrcpool = rsrc_pool;
923 entry_info.hwi_rsrcname = rsrc_name;
924 status = hermon_rsrc_hw_entries_init(state, &entry_info);
925 if (status != DDI_SUCCESS) {
926 hermon_rsrc_fini(state, cleanup);
927 status = DDI_FAILURE;
928 goto rsrcinitp2_fail;
929 }
930 cleanup = ncleanup;
931 }
932
933 /*
934 * Initialize the Multicast Group (MCG) entries. First, calculate
935 * (and validate) the size of the MCGs.
936 */
937 status = hermon_rsrc_mcg_entry_get_size(state, &mcg_size_shift);
938 if (status != DDI_SUCCESS) {
939 hermon_rsrc_fini(state, cleanup);
940 status = DDI_FAILURE;
941 goto rsrcinitp2_fail;
942 }
943 mcg_size = HERMON_MCGMEM_SZ(state);
944
945 /*
946 * Initialize the resource pool for the MCG table entries. Notice
947 * that the number of MCGs is configurable. Note also that a certain
948 * number of MCGs must be set aside for Hermon firmware use (they
949 * correspond to the number of MCGs used by the internal hash
950 * function).
951 */
952 num = ((uint64_t)1 << cfgprof->cp_log_num_mcg);
953 max = ((uint64_t)1 << devlim->log_max_mcg);
954 num_prealloc = ((uint64_t)1 << cfgprof->cp_log_num_mcg_hash);
955 rsrc_pool = &state->hs_rsrc_hdl[HERMON_MCG];
956 rsrc_pool->rsrc_loc = HERMON_IN_ICM;
957 rsrc_pool->rsrc_pool_size = (mcg_size * num);
958 rsrc_pool->rsrc_shift = mcg_size_shift;
959 rsrc_pool->rsrc_quantum = mcg_size;
960 rsrc_pool->rsrc_align = (mcg_size * num);
961 rsrc_pool->rsrc_state = state;
962 HERMON_RSRC_NAME(rsrc_name, HERMON_MCG_VMEM);
963 entry_info.hwi_num = num;
964 entry_info.hwi_max = max;
965 entry_info.hwi_prealloc = num_prealloc;
966 entry_info.hwi_rsrcpool = rsrc_pool;
967 entry_info.hwi_rsrcname = rsrc_name;
968 status = hermon_rsrc_hw_entries_init(state, &entry_info);
969 if (status != DDI_SUCCESS) {
970 hermon_rsrc_fini(state, cleanup);
971 status = DDI_FAILURE;
972 goto rsrcinitp2_fail;
973 }
974 cleanup = HERMON_RSRC_CLEANUP_LEVEL19;
975
976 /*
977 * Initialize the full range of ICM for the AUXC resource.
978 * This is done because its size is so small, about 1 byte per QP.
979 */
980
981 /*
982 * Initialize the Hermon command handling interfaces. This step
983 * sets up the outstanding command tracking mechanism for easy access
984 * and fast allocation (see hermon_cmd.c for more details).
985 */
986 status = hermon_outstanding_cmdlist_init(state);
987 if (status != DDI_SUCCESS) {
988 hermon_rsrc_fini(state, cleanup);
989 status = DDI_FAILURE;
990 goto rsrcinitp2_fail;
991 }
992 cleanup = HERMON_RSRC_CLEANUP_LEVEL20;
993
994 /* Initialize the resource pool and vmem arena for the PD handles */
995 rsrc_pool = &state->hs_rsrc_hdl[HERMON_PDHDL];
996 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
997 rsrc_pool->rsrc_quantum = sizeof (struct hermon_sw_pd_s);
998 rsrc_pool->rsrc_state = state;
999 HERMON_RSRC_NAME(rsrc_name, HERMON_PDHDL_CACHE);
1000 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_pd);
1001 hdl_info.swi_max = ((uint64_t)1 << devlim->log_max_pd);
1002 hdl_info.swi_rsrcpool = rsrc_pool;
1003 hdl_info.swi_constructor = hermon_rsrc_pdhdl_constructor;
1004 hdl_info.swi_destructor = hermon_rsrc_pdhdl_destructor;
1005 hdl_info.swi_rsrcname = rsrc_name;
1006 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1007 status = hermon_rsrc_pd_handles_init(state, &hdl_info);
1008 if (status != DDI_SUCCESS) {
1009 hermon_rsrc_fini(state, cleanup);
1010 status = DDI_FAILURE;
1011 goto rsrcinitp2_fail;
1012 }
1013 cleanup = HERMON_RSRC_CLEANUP_LEVEL21;
1014
1015 /*
1016 * Initialize the resource pools for the rest of the software handles.
1017 * This includes MR handles, EQ handles, QP handles, etc. These
1018 * objects are almost entirely managed using kmem_cache routines,
1019 * and do not utilize a vmem arena.
1020 */
1021 for (i = HERMON_NUM_ICM_RESOURCES; i < HERMON_NUM_RESOURCES; i++) {
1022 rsrc_pool = &state->hs_rsrc_hdl[i];
1023 rsrc_pool->rsrc_type = i;
1024
1025 /* Set the resource-specific attributes */
1026 switch (i) {
1027 case HERMON_MRHDL:
1028 rsrc_pool->rsrc_quantum =
1029 sizeof (struct hermon_sw_mr_s);
1030 HERMON_RSRC_NAME(rsrc_name, HERMON_MRHDL_CACHE);
1031 hdl_info.swi_num =
1032 ((uint64_t)1 << cfgprof->cp_log_num_dmpt) +
1033 ((uint64_t)1 << cfgprof->cp_log_num_cmpt);
1034 hdl_info.swi_max =
1035 ((uint64_t)1 << cfgprof->cp_log_num_dmpt) +
1036 ((uint64_t)1 << cfgprof->cp_log_num_cmpt);
1037 hdl_info.swi_constructor =
1038 hermon_rsrc_mrhdl_constructor;
1039 hdl_info.swi_destructor = hermon_rsrc_mrhdl_destructor;
1040 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1041 ncleanup = HERMON_RSRC_CLEANUP_LEVEL22;
1042 break;
1043
1044 case HERMON_EQHDL:
1045 rsrc_pool->rsrc_quantum =
1046 sizeof (struct hermon_sw_eq_s);
1047 HERMON_RSRC_NAME(rsrc_name, HERMON_EQHDL_CACHE);
1048 hdl_info.swi_num = HERMON_NUM_EQ;
1049 hdl_info.swi_max = ((uint64_t)1 << devlim->log_max_eq);
1050 hdl_info.swi_constructor = NULL;
1051 hdl_info.swi_destructor = NULL;
1052 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1053 ncleanup = HERMON_RSRC_CLEANUP_LEVEL23;
1054 break;
1055
1056 case HERMON_CQHDL:
1057 rsrc_pool->rsrc_quantum =
1058 sizeof (struct hermon_sw_cq_s);
1059 HERMON_RSRC_NAME(rsrc_name, HERMON_CQHDL_CACHE);
1060 hdl_info.swi_num =
1061 (uint64_t)1 << cfgprof->cp_log_num_cq;
1062 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_cq;
1063 hdl_info.swi_constructor =
1064 hermon_rsrc_cqhdl_constructor;
1065 hdl_info.swi_destructor = hermon_rsrc_cqhdl_destructor;
1066 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1067 hdl_info.swi_prealloc_sz = sizeof (hermon_cqhdl_t);
1068 ncleanup = HERMON_RSRC_CLEANUP_LEVEL24;
1069 break;
1070
1071 case HERMON_SRQHDL:
1072 rsrc_pool->rsrc_quantum =
1073 sizeof (struct hermon_sw_srq_s);
1074 HERMON_RSRC_NAME(rsrc_name, HERMON_SRQHDL_CACHE);
1075 hdl_info.swi_num =
1076 (uint64_t)1 << cfgprof->cp_log_num_srq;
1077 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_srq;
1078 hdl_info.swi_constructor =
1079 hermon_rsrc_srqhdl_constructor;
1080 hdl_info.swi_destructor = hermon_rsrc_srqhdl_destructor;
1081 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1082 hdl_info.swi_prealloc_sz = sizeof (hermon_srqhdl_t);
1083 ncleanup = HERMON_RSRC_CLEANUP_LEVEL25;
1084 break;
1085
1086 case HERMON_AHHDL:
1087 rsrc_pool->rsrc_quantum =
1088 sizeof (struct hermon_sw_ah_s);
1089 HERMON_RSRC_NAME(rsrc_name, HERMON_AHHDL_CACHE);
1090 hdl_info.swi_num =
1091 (uint64_t)1 << cfgprof->cp_log_num_ah;
1092 hdl_info.swi_max = HERMON_NUM_AH;
1093 hdl_info.swi_constructor =
1094 hermon_rsrc_ahhdl_constructor;
1095 hdl_info.swi_destructor = hermon_rsrc_ahhdl_destructor;
1096 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1097 ncleanup = HERMON_RSRC_CLEANUP_LEVEL26;
1098 break;
1099
1100 case HERMON_QPHDL:
1101 rsrc_pool->rsrc_quantum =
1102 sizeof (struct hermon_sw_qp_s);
1103 HERMON_RSRC_NAME(rsrc_name, HERMON_QPHDL_CACHE);
1104 hdl_info.swi_num =
1105 (uint64_t)1 << cfgprof->cp_log_num_qp;
1106 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_qp;
1107 hdl_info.swi_constructor =
1108 hermon_rsrc_qphdl_constructor;
1109 hdl_info.swi_destructor = hermon_rsrc_qphdl_destructor;
1110 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1111 hdl_info.swi_prealloc_sz = sizeof (hermon_qphdl_t);
1112 ncleanup = HERMON_RSRC_CLEANUP_LEVEL27;
1113 break;
1114
1115 case HERMON_REFCNT:
1116 rsrc_pool->rsrc_quantum = sizeof (hermon_sw_refcnt_t);
1117 HERMON_RSRC_NAME(rsrc_name, HERMON_REFCNT_CACHE);
1118 hdl_info.swi_num =
1119 (uint64_t)1 << cfgprof->cp_log_num_dmpt;
1120 hdl_info.swi_max = (uint64_t)1 << devlim->log_max_dmpt;
1121 hdl_info.swi_constructor =
1122 hermon_rsrc_refcnt_constructor;
1123 hdl_info.swi_destructor = hermon_rsrc_refcnt_destructor;
1124 hdl_info.swi_flags = HERMON_SWHDL_KMEMCACHE_INIT;
1125 ncleanup = HERMON_RSRC_CLEANUP_LEVEL28;
1126 break;
1127
1128 default:
1129 continue;
1130 }
1131
1132 /* Set the common values and call the init routine */
1133 rsrc_pool->rsrc_loc = HERMON_IN_SYSMEM;
1134 rsrc_pool->rsrc_state = state;
1135 hdl_info.swi_rsrcpool = rsrc_pool;
1136 hdl_info.swi_rsrcname = rsrc_name;
1137 status = hermon_rsrc_sw_handles_init(state, &hdl_info);
1138 if (status != DDI_SUCCESS) {
1139 hermon_rsrc_fini(state, cleanup);
1140 status = DDI_FAILURE;
1141 goto rsrcinitp2_fail;
1142 }
1143 cleanup = ncleanup;
1144 }
1145
1146 /*
1147 * Initialize a resource pool for the MCG handles. Notice that for
1148 * these MCG handles, we are allocating a table of structures (used to
1149 * keep track of the MCG entries that are being written to hardware
1150 * and to speed up multicast attach/detach operations).
1151 */
1152 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_mcg);
1153 hdl_info.swi_max = ((uint64_t)1 << devlim->log_max_mcg);
1154 hdl_info.swi_flags = HERMON_SWHDL_TABLE_INIT;
1155 hdl_info.swi_prealloc_sz = sizeof (struct hermon_sw_mcg_list_s);
1156 status = hermon_rsrc_sw_handles_init(state, &hdl_info);
1157 if (status != DDI_SUCCESS) {
1158 hermon_rsrc_fini(state, cleanup);
1159 status = DDI_FAILURE;
1160 goto rsrcinitp2_fail;
1161 }
1162 state->hs_mcghdl = hdl_info.swi_table_ptr;
1163 cleanup = HERMON_RSRC_CLEANUP_LEVEL29;
1164
1165 /*
1166 * Last, initialize the resource pool for the UAR pages, which contain
1167 * the hardware's doorbell registers. Each process supported in User
1168 * Mode is assigned a UAR page. Also coming from this pool are the
1169 * kernel-assigned UAR page, and any hardware-reserved pages. Note
1170 * that the number of UAR pages is configurable, the value must be less
1171 * than the maximum value (obtained from the QUERY_DEV_LIM command) or
1172 * the initialization will fail. Note also that we assign the base
1173 * address of the UAR BAR to the rsrc_start parameter.
1174 */
1175 num = ((uint64_t)1 << cfgprof->cp_log_num_uar);
1176 max = num;
1177 num_prealloc = max(devlim->num_rsvd_uar, 128);
1178 rsrc_pool = &state->hs_rsrc_hdl[HERMON_UARPG];
1179 rsrc_pool->rsrc_loc = HERMON_IN_UAR;
1180 rsrc_pool->rsrc_pool_size = (num << PAGESHIFT);
1181 rsrc_pool->rsrc_shift = PAGESHIFT;
1182 rsrc_pool->rsrc_quantum = (uint_t)PAGESIZE;
1183 rsrc_pool->rsrc_align = PAGESIZE;
1184 rsrc_pool->rsrc_state = state;
1185 rsrc_pool->rsrc_start = (void *)state->hs_reg_uar_baseaddr;
1186 HERMON_RSRC_NAME(rsrc_name, HERMON_UAR_PAGE_VMEM_ATTCH);
1187 entry_info.hwi_num = num;
1188 entry_info.hwi_max = max;
1189 entry_info.hwi_prealloc = num_prealloc;
1190 entry_info.hwi_rsrcpool = rsrc_pool;
1191 entry_info.hwi_rsrcname = rsrc_name;
1192 status = hermon_rsrc_hw_entries_init(state, &entry_info);
1193 if (status != DDI_SUCCESS) {
1194 hermon_rsrc_fini(state, cleanup);
1195 status = DDI_FAILURE;
1196 goto rsrcinitp2_fail;
1197 }
1198
1199 cleanup = HERMON_RSRC_CLEANUP_ALL;
1200
1201 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
1202 return (DDI_SUCCESS);
1203
1204 rsrcinitp2_fail:
1205 kmem_free(rsrc_name, HERMON_RSRC_NAME_MAXLEN);
1206 return (status);
1207 }
1208
1209
1210 /*
1211 * hermon_rsrc_fini()
1212 * Context: Only called from attach() and/or detach() path contexts
1213 */
1214 void
hermon_rsrc_fini(hermon_state_t * state,hermon_rsrc_cleanup_level_t clean)1215 hermon_rsrc_fini(hermon_state_t *state, hermon_rsrc_cleanup_level_t clean)
1216 {
1217 hermon_rsrc_sw_hdl_info_t hdl_info;
1218 hermon_rsrc_hw_entry_info_t entry_info;
1219 hermon_rsrc_mbox_info_t mbox_info;
1220 hermon_cfg_profile_t *cfgprof;
1221
1222 ASSERT(state != NULL);
1223
1224 cfgprof = state->hs_cfg_profile;
1225
1226 /*
1227 * If init code above is shortened up (see comments), then we
1228 * need to establish how to safely and simply clean up from any
1229 * given failure point. Flags, maybe...
1230 */
1231
1232 switch (clean) {
1233 /*
1234 * If we add more resources that need to be cleaned up here, we should
1235 * ensure that HERMON_RSRC_CLEANUP_ALL is still the first entry (i.e.
1236 * corresponds to the last resource allocated).
1237 */
1238
1239 case HERMON_RSRC_CLEANUP_ALL:
1240 case HERMON_RSRC_CLEANUP_LEVEL31:
1241 /* Cleanup the UAR page resource pool, first the dbr pages */
1242 if (state->hs_kern_dbr) {
1243 hermon_dbr_kern_free(state);
1244 state->hs_kern_dbr = NULL;
1245 }
1246
1247 /* NS then, the pool itself */
1248 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_UARPG];
1249 hermon_rsrc_hw_entries_fini(state, &entry_info);
1250
1251 /* FALLTHROUGH */
1252
1253 case HERMON_RSRC_CLEANUP_LEVEL30:
1254 /* Cleanup the central MCG handle pointers list */
1255 hdl_info.swi_rsrcpool = NULL;
1256 hdl_info.swi_table_ptr = state->hs_mcghdl;
1257 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_mcg);
1258 hdl_info.swi_prealloc_sz = sizeof (struct hermon_sw_mcg_list_s);
1259 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1260 /* FALLTHROUGH */
1261
1262 case HERMON_RSRC_CLEANUP_LEVEL29:
1263 /* Cleanup the reference count resource pool */
1264 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_REFCNT];
1265 hdl_info.swi_table_ptr = NULL;
1266 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1267 /* FALLTHROUGH */
1268
1269 case HERMON_RSRC_CLEANUP_LEVEL28:
1270 /* Cleanup the QP handle resource pool */
1271 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_QPHDL];
1272 hdl_info.swi_table_ptr = NULL;
1273 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_qp);
1274 hdl_info.swi_prealloc_sz = sizeof (hermon_qphdl_t);
1275 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1276 /* FALLTHROUGH */
1277 case HERMON_RSRC_CLEANUP_LEVEL27:
1278 /* Cleanup the address handle resrouce pool */
1279 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_AHHDL];
1280 hdl_info.swi_table_ptr = NULL;
1281 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1282 /* FALLTHROUGH */
1283
1284 case HERMON_RSRC_CLEANUP_LEVEL26:
1285 /* Cleanup the SRQ handle resource pool. */
1286 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_SRQHDL];
1287 hdl_info.swi_table_ptr = NULL;
1288 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_srq);
1289 hdl_info.swi_prealloc_sz = sizeof (hermon_srqhdl_t);
1290 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1291 /* FALLTHROUGH */
1292
1293 case HERMON_RSRC_CLEANUP_LEVEL25:
1294 /* Cleanup the CQ handle resource pool */
1295 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CQHDL];
1296 hdl_info.swi_table_ptr = NULL;
1297 hdl_info.swi_num = ((uint64_t)1 << cfgprof->cp_log_num_cq);
1298 hdl_info.swi_prealloc_sz = sizeof (hermon_cqhdl_t);
1299 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1300 /* FALLTHROUGH */
1301
1302 case HERMON_RSRC_CLEANUP_LEVEL24:
1303 /* Cleanup the EQ handle resource pool */
1304 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_EQHDL];
1305 hdl_info.swi_table_ptr = NULL;
1306 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1307 /* FALLTHROUGH */
1308
1309 case HERMON_RSRC_CLEANUP_LEVEL23:
1310 /* Cleanup the MR handle resource pool */
1311 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_MRHDL];
1312 hdl_info.swi_table_ptr = NULL;
1313 hermon_rsrc_sw_handles_fini(state, &hdl_info);
1314 /* FALLTHROUGH */
1315
1316 case HERMON_RSRC_CLEANUP_LEVEL22:
1317 /* Cleanup the PD handle resource pool */
1318 hdl_info.swi_rsrcpool = &state->hs_rsrc_hdl[HERMON_PDHDL];
1319 hdl_info.swi_table_ptr = NULL;
1320 hermon_rsrc_pd_handles_fini(state, &hdl_info);
1321 /* FALLTHROUGH */
1322
1323 case HERMON_RSRC_CLEANUP_LEVEL21:
1324 /* Currently unused - FALLTHROUGH */
1325
1326 case HERMON_RSRC_CLEANUP_LEVEL20:
1327 /* Cleanup the outstanding command list */
1328 hermon_outstanding_cmdlist_fini(state);
1329 /* FALLTHROUGH */
1330
1331 case HERMON_RSRC_CLEANUP_LEVEL19:
1332 /* Cleanup the EQC table resource pool */
1333 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_EQC];
1334 hermon_rsrc_hw_entries_fini(state, &entry_info);
1335 /* FALLTHROUGH */
1336
1337 case HERMON_RSRC_CLEANUP_LEVEL18:
1338 /* Cleanup the MCG table resource pool */
1339 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_MCG];
1340 hermon_rsrc_hw_entries_fini(state, &entry_info);
1341 /* FALLTHROUGH */
1342
1343 case HERMON_RSRC_CLEANUP_LEVEL17:
1344 /* Currently Unused - fallthrough */
1345 case HERMON_RSRC_CLEANUP_LEVEL16:
1346 /* Cleanup the SRQC table resource pool */
1347 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_SRQC];
1348 hermon_rsrc_hw_entries_fini(state, &entry_info);
1349 /* FALLTHROUGH */
1350
1351 case HERMON_RSRC_CLEANUP_LEVEL15:
1352 /* Cleanup the AUXC table resource pool */
1353 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_AUXC];
1354 hermon_rsrc_hw_entries_fini(state, &entry_info);
1355 /* FALLTHROUGH */
1356
1357 case HERMON_RSRC_CLEANUP_LEVEL14:
1358 /* Cleanup the ALTCF table resource pool */
1359 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_ALTC];
1360 hermon_rsrc_hw_entries_fini(state, &entry_info);
1361 /* FALLTHROUGH */
1362
1363 case HERMON_RSRC_CLEANUP_LEVEL13:
1364 /* Cleanup the CQC table resource pool */
1365 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CQC];
1366 hermon_rsrc_hw_entries_fini(state, &entry_info);
1367 /* FALLTHROUGH */
1368
1369 case HERMON_RSRC_CLEANUP_LEVEL12:
1370 /* Cleanup the RDB table resource pool */
1371 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_RDB];
1372 hermon_rsrc_hw_entries_fini(state, &entry_info);
1373 /* FALLTHROUGH */
1374
1375 case HERMON_RSRC_CLEANUP_LEVEL11:
1376 /* Cleanup the QPC table resource pool */
1377 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_QPC];
1378 hermon_rsrc_hw_entries_fini(state, &entry_info);
1379 /* FALLTHROUGH */
1380
1381 case HERMON_RSRC_CLEANUP_LEVEL10EQ:
1382 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1383 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_EQC];
1384 hermon_rsrc_hw_entries_fini(state, &entry_info);
1385 /* FALLTHROUGH */
1386
1387 case HERMON_RSRC_CLEANUP_LEVEL10CQ:
1388 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1389 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_CQC];
1390 hermon_rsrc_hw_entries_fini(state, &entry_info);
1391 /* FALLTHROUGH */
1392
1393 case HERMON_RSRC_CLEANUP_LEVEL10SRQ:
1394 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1395 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_SRQC];
1396 hermon_rsrc_hw_entries_fini(state, &entry_info);
1397 /* FALLTHROUGH */
1398
1399 case HERMON_RSRC_CLEANUP_LEVEL10QP:
1400 /* Cleanup the cMPTs for the EQs, CQs, SRQs, and QPs */
1401 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_CMPT_QPC];
1402 hermon_rsrc_hw_entries_fini(state, &entry_info);
1403 /* FALLTHROUGH */
1404
1405 case HERMON_RSRC_CLEANUP_LEVEL10:
1406 /* Cleanup the dMPT table resource pool */
1407 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_DMPT];
1408 hermon_rsrc_hw_entries_fini(state, &entry_info);
1409 /* FALLTHROUGH */
1410
1411 case HERMON_RSRC_CLEANUP_LEVEL9:
1412 /* Cleanup the MTT table resource pool */
1413 entry_info.hwi_rsrcpool = &state->hs_rsrc_hdl[HERMON_MTT];
1414 hermon_rsrc_hw_entries_fini(state, &entry_info);
1415 break;
1416
1417 /*
1418 * The cleanup below comes from the "Phase 1" initialization step.
1419 * (see hermon_rsrc_init_phase1() above)
1420 */
1421 case HERMON_RSRC_CLEANUP_PHASE1_COMPLETE:
1422 /* Cleanup the "In" mailbox list */
1423 hermon_intr_inmbox_list_fini(state);
1424 /* FALLTHROUGH */
1425
1426 case HERMON_RSRC_CLEANUP_LEVEL7:
1427 /* Cleanup the interrupt "In" mailbox resource pool */
1428 mbox_info.mbi_rsrcpool =
1429 &state->hs_rsrc_hdl[HERMON_INTR_IN_MBOX];
1430 hermon_rsrc_mbox_fini(state, &mbox_info);
1431 /* FALLTHROUGH */
1432
1433 case HERMON_RSRC_CLEANUP_LEVEL6:
1434 /* Cleanup the "In" mailbox list */
1435 hermon_inmbox_list_fini(state);
1436 /* FALLTHROUGH */
1437
1438 case HERMON_RSRC_CLEANUP_LEVEL5:
1439 /* Cleanup the "In" mailbox resource pool */
1440 mbox_info.mbi_rsrcpool = &state->hs_rsrc_hdl[HERMON_IN_MBOX];
1441 hermon_rsrc_mbox_fini(state, &mbox_info);
1442 /* FALLTHROUGH */
1443
1444 case HERMON_RSRC_CLEANUP_LEVEL4:
1445 /* Cleanup the interrupt "Out" mailbox list */
1446 hermon_intr_outmbox_list_fini(state);
1447 /* FALLTHROUGH */
1448
1449 case HERMON_RSRC_CLEANUP_LEVEL3:
1450 /* Cleanup the "Out" mailbox resource pool */
1451 mbox_info.mbi_rsrcpool =
1452 &state->hs_rsrc_hdl[HERMON_INTR_OUT_MBOX];
1453 hermon_rsrc_mbox_fini(state, &mbox_info);
1454 /* FALLTHROUGH */
1455
1456 case HERMON_RSRC_CLEANUP_LEVEL2:
1457 /* Cleanup the "Out" mailbox list */
1458 hermon_outmbox_list_fini(state);
1459 /* FALLTHROUGH */
1460
1461 case HERMON_RSRC_CLEANUP_LEVEL1:
1462 /* Cleanup the "Out" mailbox resource pool */
1463 mbox_info.mbi_rsrcpool = &state->hs_rsrc_hdl[HERMON_OUT_MBOX];
1464 hermon_rsrc_mbox_fini(state, &mbox_info);
1465 /* FALLTHROUGH */
1466
1467 case HERMON_RSRC_CLEANUP_LEVEL0:
1468 /* Free the array of hermon_rsrc_pool_info_t's */
1469
1470 kmem_free(state->hs_rsrc_hdl, HERMON_NUM_RESOURCES *
1471 sizeof (hermon_rsrc_pool_info_t));
1472
1473 kmem_cache_destroy(state->hs_rsrc_cache);
1474 break;
1475
1476 default:
1477 HERMON_WARNING(state, "unexpected resource cleanup level");
1478 break;
1479 }
1480 }
1481
1482
1483 /*
1484 * hermon_rsrc_mbox_init()
1485 * Context: Only called from attach() path context
1486 */
1487 static int
hermon_rsrc_mbox_init(hermon_state_t * state,hermon_rsrc_mbox_info_t * info)1488 hermon_rsrc_mbox_init(hermon_state_t *state, hermon_rsrc_mbox_info_t *info)
1489 {
1490 hermon_rsrc_pool_info_t *rsrc_pool;
1491 hermon_rsrc_priv_mbox_t *priv;
1492
1493 ASSERT(state != NULL);
1494 ASSERT(info != NULL);
1495
1496 rsrc_pool = info->mbi_rsrcpool;
1497 ASSERT(rsrc_pool != NULL);
1498
1499 /* Allocate and initialize mailbox private structure */
1500 priv = kmem_zalloc(sizeof (hermon_rsrc_priv_mbox_t), KM_SLEEP);
1501 priv->pmb_dip = state->hs_dip;
1502 priv->pmb_devaccattr = state->hs_reg_accattr;
1503 priv->pmb_xfer_mode = DDI_DMA_CONSISTENT;
1504
1505 /*
1506 * Initialize many of the default DMA attributes. Then set alignment
1507 * and scatter-gather restrictions specific for mailbox memory.
1508 */
1509 hermon_dma_attr_init(state, &priv->pmb_dmaattr);
1510 priv->pmb_dmaattr.dma_attr_align = HERMON_MBOX_ALIGN;
1511 priv->pmb_dmaattr.dma_attr_sgllen = 1;
1512 priv->pmb_dmaattr.dma_attr_flags = 0;
1513 rsrc_pool->rsrc_private = priv;
1514
1515 ASSERT(rsrc_pool->rsrc_loc == HERMON_IN_SYSMEM);
1516
1517 rsrc_pool->rsrc_start = NULL;
1518 rsrc_pool->rsrc_vmp = NULL;
1519
1520 return (DDI_SUCCESS);
1521 }
1522
1523
1524 /*
1525 * hermon_rsrc_mbox_fini()
1526 * Context: Only called from attach() and/or detach() path contexts
1527 */
1528 /* ARGSUSED */
1529 static void
hermon_rsrc_mbox_fini(hermon_state_t * state,hermon_rsrc_mbox_info_t * info)1530 hermon_rsrc_mbox_fini(hermon_state_t *state, hermon_rsrc_mbox_info_t *info)
1531 {
1532 hermon_rsrc_pool_info_t *rsrc_pool;
1533
1534 ASSERT(state != NULL);
1535 ASSERT(info != NULL);
1536
1537 rsrc_pool = info->mbi_rsrcpool;
1538 ASSERT(rsrc_pool != NULL);
1539
1540 /* Free up the private struct */
1541 kmem_free(rsrc_pool->rsrc_private, sizeof (hermon_rsrc_priv_mbox_t));
1542 }
1543
1544
1545 /*
1546 * hermon_rsrc_hw_entries_init()
1547 * Context: Only called from attach() path context
1548 */
1549 int
hermon_rsrc_hw_entries_init(hermon_state_t * state,hermon_rsrc_hw_entry_info_t * info)1550 hermon_rsrc_hw_entries_init(hermon_state_t *state,
1551 hermon_rsrc_hw_entry_info_t *info)
1552 {
1553 hermon_rsrc_pool_info_t *rsrc_pool;
1554 hermon_rsrc_t *rsvd_rsrc = NULL;
1555 vmem_t *vmp;
1556 uint64_t num_hwentry, max_hwentry, num_prealloc;
1557 int status;
1558
1559 ASSERT(state != NULL);
1560 ASSERT(info != NULL);
1561
1562 rsrc_pool = info->hwi_rsrcpool;
1563 ASSERT(rsrc_pool != NULL);
1564 num_hwentry = info->hwi_num;
1565 max_hwentry = info->hwi_max;
1566 num_prealloc = info->hwi_prealloc;
1567
1568 if (hermon_rsrc_verbose) {
1569 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init: "
1570 "rsrc_type (0x%x) num (%llx) max (0x%llx) prealloc "
1571 "(0x%llx)", rsrc_pool->rsrc_type, (longlong_t)num_hwentry,
1572 (longlong_t)max_hwentry, (longlong_t)num_prealloc);
1573 }
1574
1575 /* Make sure number of HW entries makes sense */
1576 if (num_hwentry > max_hwentry) {
1577 return (DDI_FAILURE);
1578 }
1579
1580 /* Set this pool's rsrc_start from the initial ICM allocation */
1581 if (rsrc_pool->rsrc_start == 0) {
1582
1583 /* use a ROUND value that works on both 32 and 64-bit kernels */
1584 rsrc_pool->rsrc_start = (void *)(uintptr_t)0x10000000;
1585
1586 if (hermon_rsrc_verbose) {
1587 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init:"
1588 " rsrc_type (0x%x) rsrc_start set (0x%lx)",
1589 rsrc_pool->rsrc_type, rsrc_pool->rsrc_start);
1590 }
1591 }
1592
1593 /*
1594 * Create new vmem arena for the HW entries table if rsrc_quantum
1595 * is non-zero. Otherwise if rsrc_quantum is zero, then these HW
1596 * entries are not going to be dynamically allocatable (i.e. they
1597 * won't be allocated/freed through hermon_rsrc_alloc/free). This
1598 * latter option is used for both ALTC and CMPT resources which
1599 * are managed by hardware.
1600 */
1601 if (rsrc_pool->rsrc_quantum != 0) {
1602 vmp = vmem_create(info->hwi_rsrcname,
1603 (void *)(uintptr_t)rsrc_pool->rsrc_start,
1604 rsrc_pool->rsrc_pool_size, rsrc_pool->rsrc_quantum,
1605 NULL, NULL, NULL, 0, VM_SLEEP);
1606 if (vmp == NULL) {
1607 /* failed to create vmem arena */
1608 return (DDI_FAILURE);
1609 }
1610 rsrc_pool->rsrc_vmp = vmp;
1611 if (hermon_rsrc_verbose) {
1612 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init:"
1613 " rsrc_type (0x%x) created vmem arena for rsrc",
1614 rsrc_pool->rsrc_type);
1615 }
1616 } else {
1617 /* we do not require a vmem arena */
1618 rsrc_pool->rsrc_vmp = NULL;
1619 if (hermon_rsrc_verbose) {
1620 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entries_init:"
1621 " rsrc_type (0x%x) vmem arena not required",
1622 rsrc_pool->rsrc_type);
1623 }
1624 }
1625
1626 /* Allocate hardware reserved resources, if any */
1627 if (num_prealloc != 0) {
1628 status = hermon_rsrc_alloc(state, rsrc_pool->rsrc_type,
1629 num_prealloc, HERMON_SLEEP, &rsvd_rsrc);
1630 if (status != DDI_SUCCESS) {
1631 /* unable to preallocate the reserved entries */
1632 if (rsrc_pool->rsrc_vmp != NULL) {
1633 vmem_destroy(rsrc_pool->rsrc_vmp);
1634 }
1635 return (DDI_FAILURE);
1636 }
1637 }
1638 rsrc_pool->rsrc_private = rsvd_rsrc;
1639
1640 return (DDI_SUCCESS);
1641 }
1642
1643
1644 /*
1645 * hermon_rsrc_hw_entries_fini()
1646 * Context: Only called from attach() and/or detach() path contexts
1647 */
1648 void
hermon_rsrc_hw_entries_fini(hermon_state_t * state,hermon_rsrc_hw_entry_info_t * info)1649 hermon_rsrc_hw_entries_fini(hermon_state_t *state,
1650 hermon_rsrc_hw_entry_info_t *info)
1651 {
1652 hermon_rsrc_pool_info_t *rsrc_pool;
1653 hermon_rsrc_t *rsvd_rsrc;
1654
1655 ASSERT(state != NULL);
1656 ASSERT(info != NULL);
1657
1658 rsrc_pool = info->hwi_rsrcpool;
1659 ASSERT(rsrc_pool != NULL);
1660
1661 /* Free up any "reserved" (i.e. preallocated) HW entries */
1662 rsvd_rsrc = (hermon_rsrc_t *)rsrc_pool->rsrc_private;
1663 if (rsvd_rsrc != NULL) {
1664 hermon_rsrc_free(state, &rsvd_rsrc);
1665 }
1666
1667 /*
1668 * If we've actually setup a vmem arena for the HW entries, then
1669 * destroy it now
1670 */
1671 if (rsrc_pool->rsrc_vmp != NULL) {
1672 vmem_destroy(rsrc_pool->rsrc_vmp);
1673 }
1674 }
1675
1676
1677 /*
1678 * hermon_rsrc_sw_handles_init()
1679 * Context: Only called from attach() path context
1680 */
1681 /* ARGSUSED */
1682 static int
hermon_rsrc_sw_handles_init(hermon_state_t * state,hermon_rsrc_sw_hdl_info_t * info)1683 hermon_rsrc_sw_handles_init(hermon_state_t *state,
1684 hermon_rsrc_sw_hdl_info_t *info)
1685 {
1686 hermon_rsrc_pool_info_t *rsrc_pool;
1687 uint64_t num_swhdl, max_swhdl, prealloc_sz;
1688
1689 ASSERT(state != NULL);
1690 ASSERT(info != NULL);
1691
1692 rsrc_pool = info->swi_rsrcpool;
1693 ASSERT(rsrc_pool != NULL);
1694 num_swhdl = info->swi_num;
1695 max_swhdl = info->swi_max;
1696 prealloc_sz = info->swi_prealloc_sz;
1697
1698
1699 /* Make sure number of SW handles makes sense */
1700 if (num_swhdl > max_swhdl) {
1701 return (DDI_FAILURE);
1702 }
1703
1704 /*
1705 * Depending on the flags parameter, create a kmem_cache for some
1706 * number of software handle structures. Note: kmem_cache_create()
1707 * will SLEEP until successful.
1708 */
1709 if (info->swi_flags & HERMON_SWHDL_KMEMCACHE_INIT) {
1710 rsrc_pool->rsrc_private = kmem_cache_create(
1711 info->swi_rsrcname, rsrc_pool->rsrc_quantum, 0,
1712 info->swi_constructor, info->swi_destructor, NULL,
1713 rsrc_pool->rsrc_state, NULL, 0);
1714 }
1715
1716
1717 /* Allocate the central list of SW handle pointers */
1718 if (info->swi_flags & HERMON_SWHDL_TABLE_INIT) {
1719 info->swi_table_ptr = kmem_zalloc(num_swhdl * prealloc_sz,
1720 KM_SLEEP);
1721 }
1722
1723 return (DDI_SUCCESS);
1724 }
1725
1726
1727 /*
1728 * hermon_rsrc_sw_handles_fini()
1729 * Context: Only called from attach() and/or detach() path contexts
1730 */
1731 /* ARGSUSED */
1732 static void
hermon_rsrc_sw_handles_fini(hermon_state_t * state,hermon_rsrc_sw_hdl_info_t * info)1733 hermon_rsrc_sw_handles_fini(hermon_state_t *state,
1734 hermon_rsrc_sw_hdl_info_t *info)
1735 {
1736 hermon_rsrc_pool_info_t *rsrc_pool;
1737 uint64_t num_swhdl, prealloc_sz;
1738
1739 ASSERT(state != NULL);
1740 ASSERT(info != NULL);
1741
1742 rsrc_pool = info->swi_rsrcpool;
1743 num_swhdl = info->swi_num;
1744 prealloc_sz = info->swi_prealloc_sz;
1745
1746 /*
1747 * If a "software handle" kmem_cache exists for this resource, then
1748 * destroy it now
1749 */
1750 if (rsrc_pool != NULL) {
1751 kmem_cache_destroy(rsrc_pool->rsrc_private);
1752 }
1753
1754 /* Free up this central list of SW handle pointers */
1755 if (info->swi_table_ptr != NULL) {
1756 kmem_free(info->swi_table_ptr, num_swhdl * prealloc_sz);
1757 }
1758 }
1759
1760
1761 /*
1762 * hermon_rsrc_pd_handles_init()
1763 * Context: Only called from attach() path context
1764 */
1765 static int
hermon_rsrc_pd_handles_init(hermon_state_t * state,hermon_rsrc_sw_hdl_info_t * info)1766 hermon_rsrc_pd_handles_init(hermon_state_t *state,
1767 hermon_rsrc_sw_hdl_info_t *info)
1768 {
1769 hermon_rsrc_pool_info_t *rsrc_pool;
1770 vmem_t *vmp;
1771 char vmem_name[HERMON_RSRC_NAME_MAXLEN];
1772 int status;
1773
1774 ASSERT(state != NULL);
1775 ASSERT(info != NULL);
1776
1777 rsrc_pool = info->swi_rsrcpool;
1778 ASSERT(rsrc_pool != NULL);
1779
1780 /* Initialize the resource pool for software handle table */
1781 status = hermon_rsrc_sw_handles_init(state, info);
1782 if (status != DDI_SUCCESS) {
1783 return (DDI_FAILURE);
1784 }
1785
1786 /* Build vmem arena name from Hermon instance */
1787 HERMON_RSRC_NAME(vmem_name, HERMON_PDHDL_VMEM);
1788
1789 /* Create new vmem arena for PD numbers */
1790 vmp = vmem_create(vmem_name, (caddr_t)1, info->swi_num, 1, NULL,
1791 NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
1792 if (vmp == NULL) {
1793 /* Unable to create vmem arena */
1794 info->swi_table_ptr = NULL;
1795 hermon_rsrc_sw_handles_fini(state, info);
1796 return (DDI_FAILURE);
1797 }
1798 rsrc_pool->rsrc_vmp = vmp;
1799
1800 return (DDI_SUCCESS);
1801 }
1802
1803
1804 /*
1805 * hermon_rsrc_pd_handles_fini()
1806 * Context: Only called from attach() and/or detach() path contexts
1807 */
1808 static void
hermon_rsrc_pd_handles_fini(hermon_state_t * state,hermon_rsrc_sw_hdl_info_t * info)1809 hermon_rsrc_pd_handles_fini(hermon_state_t *state,
1810 hermon_rsrc_sw_hdl_info_t *info)
1811 {
1812 hermon_rsrc_pool_info_t *rsrc_pool;
1813
1814 ASSERT(state != NULL);
1815 ASSERT(info != NULL);
1816
1817 rsrc_pool = info->swi_rsrcpool;
1818
1819 /* Destroy the specially created UAR scratch table vmem arena */
1820 vmem_destroy(rsrc_pool->rsrc_vmp);
1821
1822 /* Destroy the "hermon_sw_pd_t" kmem_cache */
1823 hermon_rsrc_sw_handles_fini(state, info);
1824 }
1825
1826
1827 /*
1828 * hermon_rsrc_mbox_alloc()
1829 * Context: Only called from attach() path context
1830 */
1831 static int
hermon_rsrc_mbox_alloc(hermon_rsrc_pool_info_t * pool_info,uint_t num,hermon_rsrc_t * hdl)1832 hermon_rsrc_mbox_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t num,
1833 hermon_rsrc_t *hdl)
1834 {
1835 hermon_rsrc_priv_mbox_t *priv;
1836 caddr_t kaddr;
1837 size_t real_len, temp_len;
1838 int status;
1839
1840 ASSERT(pool_info != NULL);
1841 ASSERT(hdl != NULL);
1842
1843 /* Get the private pointer for the mailboxes */
1844 priv = pool_info->rsrc_private;
1845 ASSERT(priv != NULL);
1846
1847 /* Allocate a DMA handle for the mailbox */
1848 status = ddi_dma_alloc_handle(priv->pmb_dip, &priv->pmb_dmaattr,
1849 DDI_DMA_SLEEP, NULL, &hdl->hr_dmahdl);
1850 if (status != DDI_SUCCESS) {
1851 return (DDI_FAILURE);
1852 }
1853
1854 /* Allocate memory for the mailbox */
1855 temp_len = (num << pool_info->rsrc_shift);
1856 status = ddi_dma_mem_alloc(hdl->hr_dmahdl, temp_len,
1857 &priv->pmb_devaccattr, priv->pmb_xfer_mode, DDI_DMA_SLEEP,
1858 NULL, &kaddr, &real_len, &hdl->hr_acchdl);
1859 if (status != DDI_SUCCESS) {
1860 /* No more memory available for mailbox entries */
1861 ddi_dma_free_handle(&hdl->hr_dmahdl);
1862 return (DDI_FAILURE);
1863 }
1864
1865 hdl->hr_addr = (void *)kaddr;
1866 hdl->hr_len = (uint32_t)real_len;
1867
1868 return (DDI_SUCCESS);
1869 }
1870
1871
1872 /*
1873 * hermon_rsrc_mbox_free()
1874 * Context: Can be called from interrupt or base context.
1875 */
1876 static void
hermon_rsrc_mbox_free(hermon_rsrc_t * hdl)1877 hermon_rsrc_mbox_free(hermon_rsrc_t *hdl)
1878 {
1879 ASSERT(hdl != NULL);
1880
1881 /* Use ddi_dma_mem_free() to free up sys memory for mailbox */
1882 ddi_dma_mem_free(&hdl->hr_acchdl);
1883
1884 /* Free the DMA handle for the mailbox */
1885 ddi_dma_free_handle(&hdl->hr_dmahdl);
1886 }
1887
1888
1889 /*
1890 * hermon_rsrc_hw_entry_alloc()
1891 * Context: Can be called from interrupt or base context.
1892 */
1893 static int
hermon_rsrc_hw_entry_alloc(hermon_rsrc_pool_info_t * pool_info,uint_t num,uint_t num_align,uint_t sleepflag,hermon_rsrc_t * hdl)1894 hermon_rsrc_hw_entry_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t num,
1895 uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl)
1896 {
1897 void *addr;
1898 uint64_t offset;
1899 uint32_t align;
1900 int status;
1901 int flag;
1902
1903 ASSERT(pool_info != NULL);
1904 ASSERT(hdl != NULL);
1905
1906 /*
1907 * Use vmem_xalloc() to get a properly aligned pointer (based on
1908 * the number requested) to the HW entry(ies). This handles the
1909 * cases (for special QPCs and for RDB entries) where we need more
1910 * than one and need to ensure that they are properly aligned.
1911 */
1912 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
1913 hdl->hr_len = (num << pool_info->rsrc_shift);
1914 align = (num_align << pool_info->rsrc_shift);
1915
1916 addr = vmem_xalloc(pool_info->rsrc_vmp, hdl->hr_len,
1917 align, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
1918
1919 if (addr == NULL) {
1920 /* No more HW entries available */
1921 return (DDI_FAILURE);
1922 }
1923
1924 hdl->hr_acchdl = NULL; /* only used for mbox resources */
1925
1926 /* Calculate vaddr and HW table index */
1927 offset = (uintptr_t)addr - (uintptr_t)pool_info->rsrc_start;
1928 hdl->hr_addr = addr; /* only used for mbox and uarpg resources */
1929 hdl->hr_indx = offset >> pool_info->rsrc_shift;
1930
1931 if (pool_info->rsrc_loc == HERMON_IN_ICM) {
1932 int num_to_hdl;
1933 hermon_rsrc_type_t rsrc_type = pool_info->rsrc_type;
1934
1935 num_to_hdl = (rsrc_type == HERMON_QPC ||
1936 rsrc_type == HERMON_CQC || rsrc_type == HERMON_SRQC);
1937
1938 /* confirm ICM is mapped, and allocate if necessary */
1939 status = hermon_rsrc_hw_entry_icm_confirm(pool_info, num, hdl,
1940 num_to_hdl);
1941 if (status != DDI_SUCCESS) {
1942 return (DDI_FAILURE);
1943 }
1944 hdl->hr_addr = NULL; /* not used for ICM resources */
1945 }
1946
1947 return (DDI_SUCCESS);
1948 }
1949
1950
1951 /*
1952 * hermon_rsrc_hw_entry_reserve()
1953 * Context: Can be called from interrupt or base context.
1954 */
1955 int
hermon_rsrc_hw_entry_reserve(hermon_rsrc_pool_info_t * pool_info,uint_t num,uint_t num_align,uint_t sleepflag,hermon_rsrc_t * hdl)1956 hermon_rsrc_hw_entry_reserve(hermon_rsrc_pool_info_t *pool_info, uint_t num,
1957 uint_t num_align, uint_t sleepflag, hermon_rsrc_t *hdl)
1958 {
1959 void *addr;
1960 uint64_t offset;
1961 uint32_t align;
1962 int flag;
1963
1964 ASSERT(pool_info != NULL);
1965 ASSERT(hdl != NULL);
1966 ASSERT(pool_info->rsrc_loc == HERMON_IN_ICM);
1967
1968 /*
1969 * Use vmem_xalloc() to get a properly aligned pointer (based on
1970 * the number requested) to the HW entry(ies). This handles the
1971 * cases (for special QPCs and for RDB entries) where we need more
1972 * than one and need to ensure that they are properly aligned.
1973 */
1974 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
1975 hdl->hr_len = (num << pool_info->rsrc_shift);
1976 align = (num_align << pool_info->rsrc_shift);
1977
1978 addr = vmem_xalloc(pool_info->rsrc_vmp, hdl->hr_len,
1979 align, 0, 0, NULL, NULL, flag | VM_FIRSTFIT);
1980
1981 if (addr == NULL) {
1982 /* No more HW entries available */
1983 return (DDI_FAILURE);
1984 }
1985
1986 hdl->hr_acchdl = NULL; /* only used for mbox resources */
1987
1988 /* Calculate vaddr and HW table index */
1989 offset = (uintptr_t)addr - (uintptr_t)pool_info->rsrc_start;
1990 hdl->hr_addr = NULL;
1991 hdl->hr_indx = offset >> pool_info->rsrc_shift;
1992
1993 /* ICM will be allocated and mapped if and when it gets used */
1994
1995 return (DDI_SUCCESS);
1996 }
1997
1998
1999 /*
2000 * hermon_rsrc_hw_entry_free()
2001 * Context: Can be called from interrupt or base context.
2002 */
2003 static void
hermon_rsrc_hw_entry_free(hermon_rsrc_pool_info_t * pool_info,hermon_rsrc_t * hdl)2004 hermon_rsrc_hw_entry_free(hermon_rsrc_pool_info_t *pool_info,
2005 hermon_rsrc_t *hdl)
2006 {
2007 void *addr;
2008 uint64_t offset;
2009 int status;
2010
2011 ASSERT(pool_info != NULL);
2012 ASSERT(hdl != NULL);
2013
2014 /* Calculate the allocated address */
2015 offset = hdl->hr_indx << pool_info->rsrc_shift;
2016 addr = (void *)(uintptr_t)(offset + (uintptr_t)pool_info->rsrc_start);
2017
2018 /* Use vmem_xfree() to free up the HW table entry */
2019 vmem_xfree(pool_info->rsrc_vmp, addr, hdl->hr_len);
2020
2021 if (pool_info->rsrc_loc == HERMON_IN_ICM) {
2022 int num_to_hdl;
2023 hermon_rsrc_type_t rsrc_type = pool_info->rsrc_type;
2024
2025 num_to_hdl = (rsrc_type == HERMON_QPC ||
2026 rsrc_type == HERMON_CQC || rsrc_type == HERMON_SRQC);
2027
2028 /* free ICM references, and free ICM if required */
2029 status = hermon_rsrc_hw_entry_icm_free(pool_info, hdl,
2030 num_to_hdl);
2031 if (status != DDI_SUCCESS)
2032 HERMON_WARNING(pool_info->rsrc_state,
2033 "failure in hw_entry_free");
2034 }
2035 }
2036
2037 /*
2038 * hermon_rsrc_hw_entry_icm_confirm()
2039 * Context: Can be called from interrupt or base context.
2040 */
2041 static int
hermon_rsrc_hw_entry_icm_confirm(hermon_rsrc_pool_info_t * pool_info,uint_t num,hermon_rsrc_t * hdl,int num_to_hdl)2042 hermon_rsrc_hw_entry_icm_confirm(hermon_rsrc_pool_info_t *pool_info, uint_t num,
2043 hermon_rsrc_t *hdl, int num_to_hdl)
2044 {
2045 hermon_state_t *state;
2046 hermon_icm_table_t *icm_table;
2047 uint8_t *bitmap;
2048 hermon_dma_info_t *dma_info;
2049 hermon_rsrc_type_t type;
2050 uint32_t rindx, span_offset;
2051 uint32_t span_avail;
2052 int num_backed;
2053 int status;
2054 uint32_t index1, index2;
2055
2056 /*
2057 * Utility routine responsible for ensuring that there is memory
2058 * backing the ICM resources allocated via hermon_rsrc_hw_entry_alloc().
2059 * Confirm existing ICM mapping(s) or allocate ICM memory for the
2060 * given hardware resources being allocated, and increment the
2061 * ICM DMA structure(s) reference count.
2062 *
2063 * We may be allocating more objects than can fit in a single span,
2064 * or more than will fit in the remaining contiguous memory (from
2065 * the offset indicated by hdl->ar_indx) in the span in question.
2066 * In either of these cases, we'll be breaking up our allocation
2067 * into multiple spans.
2068 */
2069 state = pool_info->rsrc_state;
2070 type = pool_info->rsrc_type;
2071 icm_table = &state->hs_icm[type];
2072
2073 rindx = hdl->hr_indx;
2074 hermon_index(index1, index2, rindx, icm_table, span_offset);
2075
2076 if (hermon_rsrc_verbose) {
2077 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entry_icm_confirm: "
2078 "type (0x%x) num (0x%x) length (0x%x) index (0x%x, 0x%x): ",
2079 type, num, hdl->hr_len, index1, index2);
2080 }
2081
2082 mutex_enter(&icm_table->icm_table_lock);
2083 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2084 while (num) {
2085 #ifndef __lock_lint
2086 while (icm_table->icm_busy) {
2087 cv_wait(&icm_table->icm_table_cv,
2088 &icm_table->icm_table_lock);
2089 }
2090 #endif
2091 if (!HERMON_BMAP_BIT_ISSET(bitmap, index2)) {
2092 /* Allocate ICM for this span */
2093 icm_table->icm_busy = 1;
2094 mutex_exit(&icm_table->icm_table_lock);
2095 status = hermon_icm_alloc(state, type, index1, index2);
2096 mutex_enter(&icm_table->icm_table_lock);
2097 icm_table->icm_busy = 0;
2098 cv_broadcast(&icm_table->icm_table_cv);
2099 if (status != DDI_SUCCESS) {
2100 goto fail_alloc;
2101 }
2102 if (hermon_rsrc_verbose) {
2103 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_"
2104 "hw_entry_icm_confirm: ALLOCATED ICM: "
2105 "type (0x%x) index (0x%x, 0x%x)",
2106 type, index1, index2);
2107 }
2108 }
2109
2110 /*
2111 * We need to increment the refcnt of this span by the
2112 * number of objects in this resource allocation that are
2113 * backed by this span. Given that the rsrc allocation is
2114 * contiguous, this value will be the number of objects in
2115 * the span from 'span_offset' onward, either up to a max
2116 * of the total number of objects, or the end of the span.
2117 * So, determine the number of objects that can be backed
2118 * by this span ('span_avail'), then determine the number
2119 * of backed resources.
2120 */
2121 span_avail = icm_table->span - span_offset;
2122 if (num > span_avail) {
2123 num_backed = span_avail;
2124 } else {
2125 num_backed = num;
2126 }
2127
2128 /*
2129 * Now that we know 'num_backed', increment the refcnt,
2130 * decrement the total number, and set 'span_offset' to
2131 * 0 in case we roll over into the next span.
2132 */
2133 dma_info[index2].icm_refcnt += num_backed;
2134 rindx += num_backed;
2135 num -= num_backed;
2136
2137 if (hermon_rsrc_verbose) {
2138 IBTF_DPRINTF_L2("ALLOC", "ICM type (0x%x) index "
2139 "(0x%x, 0x%x) num_backed (0x%x)",
2140 type, index1, index2, num_backed);
2141 IBTF_DPRINTF_L2("ALLOC", "ICM type (0x%x) refcnt now "
2142 "(0x%x) num_remaining (0x%x)", type,
2143 dma_info[index2].icm_refcnt, num);
2144 }
2145 if (num == 0)
2146 break;
2147
2148 hermon_index(index1, index2, rindx, icm_table, span_offset);
2149 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2150 }
2151 mutex_exit(&icm_table->icm_table_lock);
2152
2153 return (DDI_SUCCESS);
2154
2155 fail_alloc:
2156 /* JBDB */
2157 if (hermon_rsrc_verbose) {
2158 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_"
2159 "hw_entry_icm_confirm: FAILED ICM ALLOC: "
2160 "type (0x%x) num remaind (0x%x) index (0x%x, 0x%x)"
2161 "refcnt (0x%x)", type, num, index1, index2,
2162 icm_table->icm_dma[index1][index2].icm_refcnt);
2163 }
2164 IBTF_DPRINTF_L2("hermon", "WARNING: "
2165 "unimplemented fail code in hermon_rsrc_hw_entry_icm_alloc\n");
2166
2167 #if needs_work
2168 /* free refcnt's and any spans we've allocated */
2169 while (index-- != start) {
2170 /*
2171 * JBDB - This is a bit tricky. We need to
2172 * free refcnt's on any spans that we've
2173 * incremented them on, and completely free
2174 * spans that we've allocated. How do we do
2175 * this here? Does it need to be as involved
2176 * as the core of icm_free() below, or can
2177 * we leverage breadcrumbs somehow?
2178 */
2179 HERMON_WARNING(state, "unable to allocate ICM memory: "
2180 "UNIMPLEMENTED HANDLING!!");
2181 }
2182 #else
2183 cmn_err(CE_WARN,
2184 "unimplemented fail code in hermon_rsrc_hw_entry_icm_alloc\n");
2185 #endif
2186 mutex_exit(&icm_table->icm_table_lock);
2187
2188 HERMON_WARNING(state, "unable to allocate ICM memory");
2189 return (DDI_FAILURE);
2190 }
2191
2192 /*
2193 * hermon_rsrc_hw_entry_icm_free()
2194 * Context: Can be called from interrupt or base context.
2195 */
2196 static int
hermon_rsrc_hw_entry_icm_free(hermon_rsrc_pool_info_t * pool_info,hermon_rsrc_t * hdl,int num_to_hdl)2197 hermon_rsrc_hw_entry_icm_free(hermon_rsrc_pool_info_t *pool_info,
2198 hermon_rsrc_t *hdl, int num_to_hdl)
2199 {
2200 hermon_state_t *state;
2201 hermon_icm_table_t *icm_table;
2202 uint8_t *bitmap;
2203 hermon_dma_info_t *dma_info;
2204 hermon_rsrc_type_t type;
2205 uint32_t span_offset;
2206 uint32_t span_remain;
2207 int num_freed;
2208 int num;
2209 uint32_t index1, index2, rindx;
2210
2211 /*
2212 * Utility routine responsible for freeing references to ICM
2213 * DMA spans, and freeing the ICM memory if necessary.
2214 *
2215 * We may have allocated objects in a single contiguous resource
2216 * allocation that reside in a number of spans, at any given
2217 * starting offset within a span. We therefore must determine
2218 * where this allocation starts, and then determine if we need
2219 * to free objects in more than one span.
2220 */
2221 state = pool_info->rsrc_state;
2222 type = pool_info->rsrc_type;
2223 icm_table = &state->hs_icm[type];
2224
2225 rindx = hdl->hr_indx;
2226 hermon_index(index1, index2, rindx, icm_table, span_offset);
2227 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2228
2229 /* determine the number of ICM objects in this allocation */
2230 num = hdl->hr_len >> pool_info->rsrc_shift;
2231
2232 if (hermon_rsrc_verbose) {
2233 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entry_icm_free: "
2234 "type (0x%x) num (0x%x) length (0x%x) index (0x%x, 0x%x)",
2235 type, num, hdl->hr_len, index1, index2);
2236 }
2237 mutex_enter(&icm_table->icm_table_lock);
2238 while (num) {
2239 /*
2240 * As with the ICM confirm code above, we need to
2241 * decrement the ICM span(s) by the number of
2242 * resources being freed. So, determine the number
2243 * of objects that are backed in this span from
2244 * 'span_offset' onward, and set 'num_freed' to
2245 * the smaller of either that number ('span_remain'),
2246 * or the total number of objects being freed.
2247 */
2248 span_remain = icm_table->span - span_offset;
2249 if (num > span_remain) {
2250 num_freed = span_remain;
2251 } else {
2252 num_freed = num;
2253 }
2254
2255 /*
2256 * Now that we know 'num_freed', decrement the refcnt,
2257 * decrement the total number, and set 'span_offset' to
2258 * 0 in case we roll over into the next span.
2259 */
2260 dma_info[index2].icm_refcnt -= num_freed;
2261 num -= num_freed;
2262 rindx += num_freed;
2263
2264 if (hermon_rsrc_verbose) {
2265 IBTF_DPRINTF_L2("FREE", "ICM type (0x%x) index "
2266 "(0x%x, 0x%x) num_freed (0x%x)", type,
2267 index1, index2, num_freed);
2268 IBTF_DPRINTF_L2("FREE", "ICM type (0x%x) refcnt now "
2269 "(0x%x) num remaining (0x%x)", type,
2270 icm_table->icm_dma[index1][index2].icm_refcnt, num);
2271 }
2272
2273 #if HERMON_ICM_FREE_ENABLED
2274 /* If we've freed the last object in this span, free it */
2275 if ((index1 != 0 || index2 != 0) &&
2276 (dma_info[index2].icm_refcnt == 0)) {
2277 if (hermon_rsrc_verbose) {
2278 IBTF_DPRINTF_L2("hermon", "hermon_rsrc_hw_entry"
2279 "_icm_free: freeing ICM type (0x%x) index"
2280 " (0x%x, 0x%x)", type, index1, index2);
2281 }
2282 hermon_icm_free(state, type, index1, index2);
2283 }
2284 #endif
2285 if (num == 0)
2286 break;
2287
2288 hermon_index(index1, index2, rindx, icm_table, span_offset);
2289 hermon_bitmap(bitmap, dma_info, icm_table, index1, num_to_hdl);
2290 }
2291 mutex_exit(&icm_table->icm_table_lock);
2292
2293 return (DDI_SUCCESS);
2294 }
2295
2296
2297
2298 /*
2299 * hermon_rsrc_swhdl_alloc()
2300 * Context: Can be called from interrupt or base context.
2301 */
2302 static int
hermon_rsrc_swhdl_alloc(hermon_rsrc_pool_info_t * pool_info,uint_t sleepflag,hermon_rsrc_t * hdl)2303 hermon_rsrc_swhdl_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t sleepflag,
2304 hermon_rsrc_t *hdl)
2305 {
2306 void *addr;
2307 int flag;
2308
2309 ASSERT(pool_info != NULL);
2310 ASSERT(hdl != NULL);
2311
2312 /* Allocate the software handle structure */
2313 flag = (sleepflag == HERMON_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
2314 addr = kmem_cache_alloc(pool_info->rsrc_private, flag);
2315 if (addr == NULL) {
2316 return (DDI_FAILURE);
2317 }
2318 hdl->hr_len = pool_info->rsrc_quantum;
2319 hdl->hr_addr = addr;
2320
2321 return (DDI_SUCCESS);
2322 }
2323
2324
2325 /*
2326 * hermon_rsrc_swhdl_free()
2327 * Context: Can be called from interrupt or base context.
2328 */
2329 static void
hermon_rsrc_swhdl_free(hermon_rsrc_pool_info_t * pool_info,hermon_rsrc_t * hdl)2330 hermon_rsrc_swhdl_free(hermon_rsrc_pool_info_t *pool_info, hermon_rsrc_t *hdl)
2331 {
2332 ASSERT(pool_info != NULL);
2333 ASSERT(hdl != NULL);
2334
2335 /* Free the software handle structure */
2336 kmem_cache_free(pool_info->rsrc_private, hdl->hr_addr);
2337 }
2338
2339
2340 /*
2341 * hermon_rsrc_pdhdl_alloc()
2342 * Context: Can be called from interrupt or base context.
2343 */
2344 static int
hermon_rsrc_pdhdl_alloc(hermon_rsrc_pool_info_t * pool_info,uint_t sleepflag,hermon_rsrc_t * hdl)2345 hermon_rsrc_pdhdl_alloc(hermon_rsrc_pool_info_t *pool_info, uint_t sleepflag,
2346 hermon_rsrc_t *hdl)
2347 {
2348 hermon_pdhdl_t addr;
2349 void *tmpaddr;
2350 int flag, status;
2351
2352 ASSERT(pool_info != NULL);
2353 ASSERT(hdl != NULL);
2354
2355 /* Allocate the software handle */
2356 status = hermon_rsrc_swhdl_alloc(pool_info, sleepflag, hdl);
2357 if (status != DDI_SUCCESS) {
2358 return (DDI_FAILURE);
2359 }
2360 addr = (hermon_pdhdl_t)hdl->hr_addr;
2361 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*addr))
2362
2363 /* Allocate a PD number for the handle */
2364 flag = (sleepflag == HERMON_SLEEP) ? VM_SLEEP : VM_NOSLEEP;
2365 tmpaddr = vmem_alloc(pool_info->rsrc_vmp, 1, flag);
2366 if (tmpaddr == NULL) {
2367 /* No more PD number entries available */
2368 hermon_rsrc_swhdl_free(pool_info, hdl);
2369 return (DDI_FAILURE);
2370 }
2371 addr->pd_pdnum = (uint32_t)(uintptr_t)tmpaddr;
2372 addr->pd_rsrcp = hdl;
2373 hdl->hr_indx = addr->pd_pdnum;
2374
2375 return (DDI_SUCCESS);
2376 }
2377
2378
2379 /*
2380 * hermon_rsrc_pdhdl_free()
2381 * Context: Can be called from interrupt or base context.
2382 */
2383 static void
hermon_rsrc_pdhdl_free(hermon_rsrc_pool_info_t * pool_info,hermon_rsrc_t * hdl)2384 hermon_rsrc_pdhdl_free(hermon_rsrc_pool_info_t *pool_info, hermon_rsrc_t *hdl)
2385 {
2386 ASSERT(pool_info != NULL);
2387 ASSERT(hdl != NULL);
2388
2389 /* Use vmem_free() to free up the PD number */
2390 vmem_free(pool_info->rsrc_vmp, (void *)(uintptr_t)hdl->hr_indx, 1);
2391
2392 /* Free the software handle structure */
2393 hermon_rsrc_swhdl_free(pool_info, hdl);
2394 }
2395
2396
2397 /*
2398 * hermon_rsrc_pdhdl_constructor()
2399 * Context: Can be called from interrupt or base context.
2400 */
2401 /* ARGSUSED */
2402 static int
hermon_rsrc_pdhdl_constructor(void * pd,void * priv,int flags)2403 hermon_rsrc_pdhdl_constructor(void *pd, void *priv, int flags)
2404 {
2405 hermon_pdhdl_t pdhdl;
2406 hermon_state_t *state;
2407
2408 pdhdl = (hermon_pdhdl_t)pd;
2409 state = (hermon_state_t *)priv;
2410
2411 mutex_init(&pdhdl->pd_lock, NULL, MUTEX_DRIVER,
2412 DDI_INTR_PRI(state->hs_intrmsi_pri));
2413
2414 return (DDI_SUCCESS);
2415 }
2416
2417
2418 /*
2419 * hermon_rsrc_pdhdl_destructor()
2420 * Context: Can be called from interrupt or base context.
2421 */
2422 /* ARGSUSED */
2423 static void
hermon_rsrc_pdhdl_destructor(void * pd,void * priv)2424 hermon_rsrc_pdhdl_destructor(void *pd, void *priv)
2425 {
2426 hermon_pdhdl_t pdhdl;
2427
2428 pdhdl = (hermon_pdhdl_t)pd;
2429
2430 mutex_destroy(&pdhdl->pd_lock);
2431 }
2432
2433
2434 /*
2435 * hermon_rsrc_cqhdl_constructor()
2436 * Context: Can be called from interrupt or base context.
2437 */
2438 /* ARGSUSED */
2439 static int
hermon_rsrc_cqhdl_constructor(void * cq,void * priv,int flags)2440 hermon_rsrc_cqhdl_constructor(void *cq, void *priv, int flags)
2441 {
2442 hermon_cqhdl_t cqhdl;
2443 hermon_state_t *state;
2444
2445 cqhdl = (hermon_cqhdl_t)cq;
2446 state = (hermon_state_t *)priv;
2447
2448 mutex_init(&cqhdl->cq_lock, NULL, MUTEX_DRIVER,
2449 DDI_INTR_PRI(state->hs_intrmsi_pri));
2450
2451 return (DDI_SUCCESS);
2452 }
2453
2454
2455 /*
2456 * hermon_rsrc_cqhdl_destructor()
2457 * Context: Can be called from interrupt or base context.
2458 */
2459 /* ARGSUSED */
2460 static void
hermon_rsrc_cqhdl_destructor(void * cq,void * priv)2461 hermon_rsrc_cqhdl_destructor(void *cq, void *priv)
2462 {
2463 hermon_cqhdl_t cqhdl;
2464
2465 cqhdl = (hermon_cqhdl_t)cq;
2466
2467 mutex_destroy(&cqhdl->cq_lock);
2468 }
2469
2470
2471 /*
2472 * hermon_rsrc_qphdl_constructor()
2473 * Context: Can be called from interrupt or base context.
2474 */
2475 /* ARGSUSED */
2476 static int
hermon_rsrc_qphdl_constructor(void * qp,void * priv,int flags)2477 hermon_rsrc_qphdl_constructor(void *qp, void *priv, int flags)
2478 {
2479 hermon_qphdl_t qphdl;
2480 hermon_state_t *state;
2481
2482 qphdl = (hermon_qphdl_t)qp;
2483 state = (hermon_state_t *)priv;
2484
2485 mutex_init(&qphdl->qp_lock, NULL, MUTEX_DRIVER,
2486 DDI_INTR_PRI(state->hs_intrmsi_pri));
2487
2488 return (DDI_SUCCESS);
2489 }
2490
2491
2492 /*
2493 * hermon_rsrc_qphdl_destructor()
2494 * Context: Can be called from interrupt or base context.
2495 */
2496 /* ARGSUSED */
2497 static void
hermon_rsrc_qphdl_destructor(void * qp,void * priv)2498 hermon_rsrc_qphdl_destructor(void *qp, void *priv)
2499 {
2500 hermon_qphdl_t qphdl;
2501
2502 qphdl = (hermon_qphdl_t)qp;
2503
2504 mutex_destroy(&qphdl->qp_lock);
2505 }
2506
2507
2508 /*
2509 * hermon_rsrc_srqhdl_constructor()
2510 * Context: Can be called from interrupt or base context.
2511 */
2512 /* ARGSUSED */
2513 static int
hermon_rsrc_srqhdl_constructor(void * srq,void * priv,int flags)2514 hermon_rsrc_srqhdl_constructor(void *srq, void *priv, int flags)
2515 {
2516 hermon_srqhdl_t srqhdl;
2517 hermon_state_t *state;
2518
2519 srqhdl = (hermon_srqhdl_t)srq;
2520 state = (hermon_state_t *)priv;
2521
2522 mutex_init(&srqhdl->srq_lock, NULL, MUTEX_DRIVER,
2523 DDI_INTR_PRI(state->hs_intrmsi_pri));
2524
2525 return (DDI_SUCCESS);
2526 }
2527
2528
2529 /*
2530 * hermon_rsrc_srqhdl_destructor()
2531 * Context: Can be called from interrupt or base context.
2532 */
2533 /* ARGSUSED */
2534 static void
hermon_rsrc_srqhdl_destructor(void * srq,void * priv)2535 hermon_rsrc_srqhdl_destructor(void *srq, void *priv)
2536 {
2537 hermon_srqhdl_t srqhdl;
2538
2539 srqhdl = (hermon_srqhdl_t)srq;
2540
2541 mutex_destroy(&srqhdl->srq_lock);
2542 }
2543
2544
2545 /*
2546 * hermon_rsrc_refcnt_constructor()
2547 * Context: Can be called from interrupt or base context.
2548 */
2549 /* ARGSUSED */
2550 static int
hermon_rsrc_refcnt_constructor(void * rc,void * priv,int flags)2551 hermon_rsrc_refcnt_constructor(void *rc, void *priv, int flags)
2552 {
2553 hermon_sw_refcnt_t *refcnt;
2554 hermon_state_t *state;
2555
2556 refcnt = (hermon_sw_refcnt_t *)rc;
2557 state = (hermon_state_t *)priv;
2558
2559 mutex_init(&refcnt->swrc_lock, NULL, MUTEX_DRIVER,
2560 DDI_INTR_PRI(state->hs_intrmsi_pri));
2561
2562 return (DDI_SUCCESS);
2563 }
2564
2565
2566 /*
2567 * hermon_rsrc_refcnt_destructor()
2568 * Context: Can be called from interrupt or base context.
2569 */
2570 /* ARGSUSED */
2571 static void
hermon_rsrc_refcnt_destructor(void * rc,void * priv)2572 hermon_rsrc_refcnt_destructor(void *rc, void *priv)
2573 {
2574 hermon_sw_refcnt_t *refcnt;
2575
2576 refcnt = (hermon_sw_refcnt_t *)rc;
2577
2578 mutex_destroy(&refcnt->swrc_lock);
2579 }
2580
2581
2582 /*
2583 * hermon_rsrc_ahhdl_constructor()
2584 * Context: Can be called from interrupt or base context.
2585 */
2586 /* ARGSUSED */
2587 static int
hermon_rsrc_ahhdl_constructor(void * ah,void * priv,int flags)2588 hermon_rsrc_ahhdl_constructor(void *ah, void *priv, int flags)
2589 {
2590 hermon_ahhdl_t ahhdl;
2591 hermon_state_t *state;
2592
2593 ahhdl = (hermon_ahhdl_t)ah;
2594 state = (hermon_state_t *)priv;
2595
2596 mutex_init(&ahhdl->ah_lock, NULL, MUTEX_DRIVER,
2597 DDI_INTR_PRI(state->hs_intrmsi_pri));
2598 return (DDI_SUCCESS);
2599 }
2600
2601
2602 /*
2603 * hermon_rsrc_ahhdl_destructor()
2604 * Context: Can be called from interrupt or base context.
2605 */
2606 /* ARGSUSED */
2607 static void
hermon_rsrc_ahhdl_destructor(void * ah,void * priv)2608 hermon_rsrc_ahhdl_destructor(void *ah, void *priv)
2609 {
2610 hermon_ahhdl_t ahhdl;
2611
2612 ahhdl = (hermon_ahhdl_t)ah;
2613
2614 mutex_destroy(&ahhdl->ah_lock);
2615 }
2616
2617
2618 /*
2619 * hermon_rsrc_mrhdl_constructor()
2620 * Context: Can be called from interrupt or base context.
2621 */
2622 /* ARGSUSED */
2623 static int
hermon_rsrc_mrhdl_constructor(void * mr,void * priv,int flags)2624 hermon_rsrc_mrhdl_constructor(void *mr, void *priv, int flags)
2625 {
2626 hermon_mrhdl_t mrhdl;
2627 hermon_state_t *state;
2628
2629 mrhdl = (hermon_mrhdl_t)mr;
2630 state = (hermon_state_t *)priv;
2631
2632 mutex_init(&mrhdl->mr_lock, NULL, MUTEX_DRIVER,
2633 DDI_INTR_PRI(state->hs_intrmsi_pri));
2634
2635 return (DDI_SUCCESS);
2636 }
2637
2638
2639 /*
2640 * hermon_rsrc_mrhdl_destructor()
2641 * Context: Can be called from interrupt or base context.
2642 */
2643 /* ARGSUSED */
2644 static void
hermon_rsrc_mrhdl_destructor(void * mr,void * priv)2645 hermon_rsrc_mrhdl_destructor(void *mr, void *priv)
2646 {
2647 hermon_mrhdl_t mrhdl;
2648
2649 mrhdl = (hermon_mrhdl_t)mr;
2650
2651 mutex_destroy(&mrhdl->mr_lock);
2652 }
2653
2654
2655 /*
2656 * hermon_rsrc_mcg_entry_get_size()
2657 */
2658 static int
hermon_rsrc_mcg_entry_get_size(hermon_state_t * state,uint_t * mcg_size_shift)2659 hermon_rsrc_mcg_entry_get_size(hermon_state_t *state, uint_t *mcg_size_shift)
2660 {
2661 uint_t num_qp_per_mcg, max_qp_per_mcg, log2;
2662
2663 /*
2664 * Round the configured number of QP per MCG to next larger
2665 * power-of-2 size and update.
2666 */
2667 num_qp_per_mcg = state->hs_cfg_profile->cp_num_qp_per_mcg + 8;
2668 log2 = highbit(num_qp_per_mcg);
2669 if ((num_qp_per_mcg & (num_qp_per_mcg - 1)) == 0) {
2670 log2 = log2 - 1;
2671 }
2672 state->hs_cfg_profile->cp_num_qp_per_mcg = (1 << log2) - 8;
2673
2674 /* Now make sure number of QP per MCG makes sense */
2675 num_qp_per_mcg = state->hs_cfg_profile->cp_num_qp_per_mcg;
2676 max_qp_per_mcg = (1 << state->hs_devlim.log_max_qp_mcg);
2677 if (num_qp_per_mcg > max_qp_per_mcg) {
2678 return (DDI_FAILURE);
2679 }
2680
2681 /* Return the (shift) size of an individual MCG HW entry */
2682 *mcg_size_shift = log2 + 2;
2683
2684 return (DDI_SUCCESS);
2685 }
2686