1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
28 */
29 #include <hxge_impl.h>
30 #include <hxge_pfc.h>
31
32 /*
33 * PSARC/2007/453 MSI-X interrupt limit override
34 * (This PSARC case is limited to MSI-X vectors
35 * and SPARC platforms only).
36 */
37 uint32_t hxge_msi_enable = 2;
38
39 /*
40 * Globals: tunable parameters (/etc/system or adb)
41 *
42 */
43 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
44 uint32_t hxge_rbr_spare_size = 0;
45 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
46 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
47 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
48 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
49 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
50 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
51
52 static hxge_os_mutex_t hxgedebuglock;
53 static int hxge_debug_init = 0;
54
55 /*
56 * Debugging flags:
57 * hxge_no_tx_lb : transmit load balancing
58 * hxge_tx_lb_policy: 0 - TCP/UDP port (default)
59 * 1 - From the Stack
60 * 2 - Destination IP Address
61 */
62 uint32_t hxge_no_tx_lb = 0;
63 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
64
65 /*
66 * Tunables to manage the receive buffer blocks.
67 *
68 * hxge_rx_threshold_hi: copy all buffers.
69 * hxge_rx_bcopy_size_type: receive buffer block size type.
70 * hxge_rx_threshold_lo: copy only up to tunable block size type.
71 */
72 #if defined(__sparc)
73 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
74 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_4;
75 #else
76 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_NONE;
77 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_NONE;
78 #endif
79 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
80
81 rtrace_t hpi_rtracebuf;
82
83 /*
84 * Function Prototypes
85 */
86 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
87 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
88 static void hxge_unattach(p_hxge_t);
89
90 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
91
92 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
93 static void hxge_destroy_mutexes(p_hxge_t);
94
95 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
96 static void hxge_unmap_regs(p_hxge_t hxgep);
97
98 static hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
99 static void hxge_remove_intrs(p_hxge_t hxgep);
100 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
101 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
102 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
103 static void hxge_intrs_enable(p_hxge_t hxgep);
104 static void hxge_intrs_disable(p_hxge_t hxgep);
105 static void hxge_suspend(p_hxge_t);
106 static hxge_status_t hxge_resume(p_hxge_t);
107 static hxge_status_t hxge_setup_dev(p_hxge_t);
108 static void hxge_destroy_dev(p_hxge_t);
109 static hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
110 static void hxge_free_mem_pool(p_hxge_t);
111 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
112 static void hxge_free_rx_mem_pool(p_hxge_t);
113 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
114 static void hxge_free_tx_mem_pool(p_hxge_t);
115 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
116 struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
117 p_hxge_dma_common_t);
118 static void hxge_dma_mem_free(p_hxge_dma_common_t);
119 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
120 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
121 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
122 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
123 p_hxge_dma_common_t *, struct ddi_dma_attr *, size_t);
124 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
125 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
126 p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
127 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
128 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
129 p_hxge_dma_common_t *, size_t);
130 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
131 static int hxge_init_common_dev(p_hxge_t);
132 static void hxge_uninit_common_dev(p_hxge_t);
133
134 /*
135 * The next declarations are for the GLDv3 interface.
136 */
137 static int hxge_m_start(void *);
138 static void hxge_m_stop(void *);
139 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
140 static int hxge_m_promisc(void *, boolean_t);
141 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
142 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
143
144 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
145 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
146 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
147 uint_t pr_valsize, const void *pr_val);
148 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
149 uint_t pr_valsize, void *pr_val);
150 static void hxge_m_propinfo(void *barg, const char *pr_name,
151 mac_prop_id_t pr_num, mac_prop_info_handle_t mph);
152 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
153 uint_t pr_valsize, const void *pr_val);
154 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
155 uint_t pr_valsize, void *pr_val);
156 static void hxge_link_poll(void *arg);
157 static void hxge_link_update(p_hxge_t hxge, link_state_t state);
158 static void hxge_msix_init(p_hxge_t hxgep);
159
160 char *hxge_priv_props[] = {
161 "_rxdma_intr_time",
162 "_rxdma_intr_pkts",
163 "_class_opt_ipv4_tcp",
164 "_class_opt_ipv4_udp",
165 "_class_opt_ipv4_ah",
166 "_class_opt_ipv4_sctp",
167 "_class_opt_ipv6_tcp",
168 "_class_opt_ipv6_udp",
169 "_class_opt_ipv6_ah",
170 "_class_opt_ipv6_sctp",
171 NULL
172 };
173
174 #define HXGE_MAX_PRIV_PROPS \
175 (sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
176
177 #define HXGE_MAGIC 0x4E584745UL
178 #define MAX_DUMP_SZ 256
179
180 #define HXGE_M_CALLBACK_FLAGS \
181 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
182
183 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
184
185 static mac_callbacks_t hxge_m_callbacks = {
186 HXGE_M_CALLBACK_FLAGS,
187 hxge_m_stat,
188 hxge_m_start,
189 hxge_m_stop,
190 hxge_m_promisc,
191 hxge_m_multicst,
192 NULL,
193 NULL,
194 NULL,
195 hxge_m_ioctl,
196 hxge_m_getcapab,
197 NULL,
198 NULL,
199 hxge_m_setprop,
200 hxge_m_getprop,
201 hxge_m_propinfo
202 };
203
204 /* PSARC/2007/453 MSI-X interrupt limit override. */
205 #define HXGE_MSIX_REQUEST_10G 8
206 static int hxge_create_msi_property(p_hxge_t);
207
208 /* Enable debug messages as necessary. */
209 uint64_t hxge_debug_level = 0;
210
211 /*
212 * This list contains the instance structures for the Hydra
213 * devices present in the system. The lock exists to guarantee
214 * mutually exclusive access to the list.
215 */
216 void *hxge_list = NULL;
217 void *hxge_hw_list = NULL;
218 hxge_os_mutex_t hxge_common_lock;
219
220 extern uint64_t hpi_debug_level;
221
222 extern hxge_status_t hxge_ldgv_init();
223 extern hxge_status_t hxge_ldgv_uninit();
224 extern hxge_status_t hxge_intr_ldgv_init();
225 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
226 ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
227 extern void hxge_fm_fini(p_hxge_t hxgep);
228
229 /*
230 * Count used to maintain the number of buffers being used
231 * by Hydra instances and loaned up to the upper layers.
232 */
233 uint32_t hxge_mblks_pending = 0;
234
235 /*
236 * Device register access attributes for PIO.
237 */
238 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
239 DDI_DEVICE_ATTR_V0,
240 DDI_STRUCTURE_LE_ACC,
241 DDI_STRICTORDER_ACC,
242 };
243
244 /*
245 * Device descriptor access attributes for DMA.
246 */
247 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
248 DDI_DEVICE_ATTR_V0,
249 DDI_STRUCTURE_LE_ACC,
250 DDI_STRICTORDER_ACC
251 };
252
253 /*
254 * Device buffer access attributes for DMA.
255 */
256 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
257 DDI_DEVICE_ATTR_V0,
258 DDI_STRUCTURE_BE_ACC,
259 DDI_STRICTORDER_ACC
260 };
261
262 ddi_dma_attr_t hxge_rx_rcr_desc_dma_attr = {
263 DMA_ATTR_V0, /* version number. */
264 0, /* low address */
265 0xffffffffffffffff, /* high address */
266 0xffffffffffffffff, /* address counter max */
267 0x80000, /* alignment */
268 0xfc00fc, /* dlim_burstsizes */
269 0x1, /* minimum transfer size */
270 0xffffffffffffffff, /* maximum transfer size */
271 0xffffffffffffffff, /* maximum segment size */
272 1, /* scatter/gather list length */
273 (unsigned int)1, /* granularity */
274 0 /* attribute flags */
275 };
276
277 ddi_dma_attr_t hxge_tx_desc_dma_attr = {
278 DMA_ATTR_V0, /* version number. */
279 0, /* low address */
280 0xffffffffffffffff, /* high address */
281 0xffffffffffffffff, /* address counter max */
282 0x100000, /* alignment */
283 0xfc00fc, /* dlim_burstsizes */
284 0x1, /* minimum transfer size */
285 0xffffffffffffffff, /* maximum transfer size */
286 0xffffffffffffffff, /* maximum segment size */
287 1, /* scatter/gather list length */
288 (unsigned int)1, /* granularity */
289 0 /* attribute flags */
290 };
291
292 ddi_dma_attr_t hxge_rx_rbr_desc_dma_attr = {
293 DMA_ATTR_V0, /* version number. */
294 0, /* low address */
295 0xffffffffffffffff, /* high address */
296 0xffffffffffffffff, /* address counter max */
297 0x40000, /* alignment */
298 0xfc00fc, /* dlim_burstsizes */
299 0x1, /* minimum transfer size */
300 0xffffffffffffffff, /* maximum transfer size */
301 0xffffffffffffffff, /* maximum segment size */
302 1, /* scatter/gather list length */
303 (unsigned int)1, /* granularity */
304 0 /* attribute flags */
305 };
306
307 ddi_dma_attr_t hxge_rx_mbox_dma_attr = {
308 DMA_ATTR_V0, /* version number. */
309 0, /* low address */
310 0xffffffffffffffff, /* high address */
311 0xffffffffffffffff, /* address counter max */
312 #if defined(_BIG_ENDIAN)
313 0x2000, /* alignment */
314 #else
315 0x1000, /* alignment */
316 #endif
317 0xfc00fc, /* dlim_burstsizes */
318 0x1, /* minimum transfer size */
319 0xffffffffffffffff, /* maximum transfer size */
320 0xffffffffffffffff, /* maximum segment size */
321 5, /* scatter/gather list length */
322 (unsigned int)1, /* granularity */
323 0 /* attribute flags */
324 };
325
326 ddi_dma_attr_t hxge_tx_dma_attr = {
327 DMA_ATTR_V0, /* version number. */
328 0, /* low address */
329 0xffffffffffffffff, /* high address */
330 0xffffffffffffffff, /* address counter max */
331 #if defined(_BIG_ENDIAN)
332 0x2000, /* alignment */
333 #else
334 0x1000, /* alignment */
335 #endif
336 0xfc00fc, /* dlim_burstsizes */
337 0x1, /* minimum transfer size */
338 0xffffffffffffffff, /* maximum transfer size */
339 0xffffffffffffffff, /* maximum segment size */
340 5, /* scatter/gather list length */
341 (unsigned int)1, /* granularity */
342 0 /* attribute flags */
343 };
344
345 ddi_dma_attr_t hxge_rx_dma_attr = {
346 DMA_ATTR_V0, /* version number. */
347 0, /* low address */
348 0xffffffffffffffff, /* high address */
349 0xffffffffffffffff, /* address counter max */
350 0x10000, /* alignment */
351 0xfc00fc, /* dlim_burstsizes */
352 0x1, /* minimum transfer size */
353 0xffffffffffffffff, /* maximum transfer size */
354 0xffffffffffffffff, /* maximum segment size */
355 1, /* scatter/gather list length */
356 (unsigned int)1, /* granularity */
357 DDI_DMA_RELAXED_ORDERING /* attribute flags */
358 };
359
360 ddi_dma_lim_t hxge_dma_limits = {
361 (uint_t)0, /* dlim_addr_lo */
362 (uint_t)0xffffffff, /* dlim_addr_hi */
363 (uint_t)0xffffffff, /* dlim_cntr_max */
364 (uint_t)0xfc00fc, /* dlim_burstsizes for 32 and 64 bit xfers */
365 0x1, /* dlim_minxfer */
366 1024 /* dlim_speed */
367 };
368
369 dma_method_t hxge_force_dma = DVMA;
370
371 /*
372 * dma chunk sizes.
373 *
374 * Try to allocate the largest possible size
375 * so that fewer number of dma chunks would be managed
376 */
377 size_t alloc_sizes[] = {
378 0x1000, 0x2000, 0x4000, 0x8000,
379 0x10000, 0x20000, 0x40000, 0x80000,
380 0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
381 };
382
383 /*
384 * Translate "dev_t" to a pointer to the associated "dev_info_t".
385 */
386 static int
hxge_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)387 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
388 {
389 p_hxge_t hxgep = NULL;
390 int instance;
391 int status = DDI_SUCCESS;
392 int i;
393
394 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
395
396 /*
397 * Get the device instance since we'll need to setup or retrieve a soft
398 * state for this instance.
399 */
400 instance = ddi_get_instance(dip);
401
402 switch (cmd) {
403 case DDI_ATTACH:
404 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
405 break;
406
407 case DDI_RESUME:
408 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
409 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
410 if (hxgep == NULL) {
411 status = DDI_FAILURE;
412 break;
413 }
414 if (hxgep->dip != dip) {
415 status = DDI_FAILURE;
416 break;
417 }
418 if (hxgep->suspended == DDI_PM_SUSPEND) {
419 status = ddi_dev_is_needed(hxgep->dip, 0, 1);
420 } else {
421 (void) hxge_resume(hxgep);
422 }
423 goto hxge_attach_exit;
424
425 case DDI_PM_RESUME:
426 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
427 hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
428 if (hxgep == NULL) {
429 status = DDI_FAILURE;
430 break;
431 }
432 if (hxgep->dip != dip) {
433 status = DDI_FAILURE;
434 break;
435 }
436 (void) hxge_resume(hxgep);
437 goto hxge_attach_exit;
438
439 default:
440 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
441 status = DDI_FAILURE;
442 goto hxge_attach_exit;
443 }
444
445 if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
446 status = DDI_FAILURE;
447 HXGE_ERROR_MSG((hxgep, DDI_CTL,
448 "ddi_soft_state_zalloc failed"));
449 goto hxge_attach_exit;
450 }
451
452 hxgep = ddi_get_soft_state(hxge_list, instance);
453 if (hxgep == NULL) {
454 status = HXGE_ERROR;
455 HXGE_ERROR_MSG((hxgep, DDI_CTL,
456 "ddi_get_soft_state failed"));
457 goto hxge_attach_fail2;
458 }
459
460 hxgep->drv_state = 0;
461 hxgep->dip = dip;
462 hxgep->instance = instance;
463 hxgep->p_dip = ddi_get_parent(dip);
464 hxgep->hxge_debug_level = hxge_debug_level;
465 hpi_debug_level = hxge_debug_level;
466
467 /*
468 * Initialize MMAC struture.
469 */
470 (void) hxge_pfc_num_macs_get(hxgep, &hxgep->mmac.total);
471 hxgep->mmac.available = hxgep->mmac.total;
472 for (i = 0; i < hxgep->mmac.total; i++) {
473 hxgep->mmac.addrs[i].set = B_FALSE;
474 hxgep->mmac.addrs[i].primary = B_FALSE;
475 }
476
477 hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
478 &hxge_rx_dma_attr);
479
480 status = hxge_map_regs(hxgep);
481 if (status != HXGE_OK) {
482 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
483 goto hxge_attach_fail3;
484 }
485
486 status = hxge_init_common_dev(hxgep);
487 if (status != HXGE_OK) {
488 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
489 "hxge_init_common_dev failed"));
490 goto hxge_attach_fail4;
491 }
492
493 /*
494 * Setup the Ndd parameters for this instance.
495 */
496 hxge_init_param(hxgep);
497
498 /*
499 * Setup Register Tracing Buffer.
500 */
501 hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
502
503 /* init stats ptr */
504 hxge_init_statsp(hxgep);
505
506 status = hxge_setup_mutexes(hxgep);
507 if (status != HXGE_OK) {
508 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
509 goto hxge_attach_fail;
510 }
511
512 /* Scrub the MSI-X memory */
513 hxge_msix_init(hxgep);
514
515 status = hxge_get_config_properties(hxgep);
516 if (status != HXGE_OK) {
517 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
518 goto hxge_attach_fail;
519 }
520
521 /*
522 * Setup the Kstats for the driver.
523 */
524 hxge_setup_kstats(hxgep);
525 hxge_setup_param(hxgep);
526
527 status = hxge_setup_system_dma_pages(hxgep);
528 if (status != HXGE_OK) {
529 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
530 goto hxge_attach_fail;
531 }
532
533 hxge_hw_id_init(hxgep);
534 hxge_hw_init_niu_common(hxgep);
535
536 status = hxge_setup_dev(hxgep);
537 if (status != DDI_SUCCESS) {
538 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
539 goto hxge_attach_fail;
540 }
541
542 status = hxge_add_intrs(hxgep);
543 if (status != DDI_SUCCESS) {
544 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
545 goto hxge_attach_fail;
546 }
547
548 /*
549 * Enable interrupts.
550 */
551 hxge_intrs_enable(hxgep);
552
553 if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
554 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
555 "unable to register to mac layer (%d)", status));
556 goto hxge_attach_fail;
557 }
558 mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
559
560 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
561 instance));
562
563 goto hxge_attach_exit;
564
565 hxge_attach_fail:
566 hxge_unattach(hxgep);
567 goto hxge_attach_fail1;
568
569 hxge_attach_fail5:
570 /*
571 * Tear down the ndd parameters setup.
572 */
573 hxge_destroy_param(hxgep);
574
575 /*
576 * Tear down the kstat setup.
577 */
578 hxge_destroy_kstats(hxgep);
579
580 hxge_attach_fail4:
581 if (hxgep->hxge_hw_p) {
582 hxge_uninit_common_dev(hxgep);
583 hxgep->hxge_hw_p = NULL;
584 }
585 hxge_attach_fail3:
586 /*
587 * Unmap the register setup.
588 */
589 hxge_unmap_regs(hxgep);
590
591 hxge_fm_fini(hxgep);
592
593 hxge_attach_fail2:
594 ddi_soft_state_free(hxge_list, hxgep->instance);
595
596 hxge_attach_fail1:
597 if (status != HXGE_OK)
598 status = (HXGE_ERROR | HXGE_DDI_FAILED);
599 hxgep = NULL;
600
601 hxge_attach_exit:
602 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
603 status));
604
605 return (status);
606 }
607
608 static int
hxge_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)609 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
610 {
611 int status = DDI_SUCCESS;
612 int instance;
613 p_hxge_t hxgep = NULL;
614
615 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
616 instance = ddi_get_instance(dip);
617 hxgep = ddi_get_soft_state(hxge_list, instance);
618 if (hxgep == NULL) {
619 status = DDI_FAILURE;
620 goto hxge_detach_exit;
621 }
622
623 switch (cmd) {
624 case DDI_DETACH:
625 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
626 break;
627
628 case DDI_PM_SUSPEND:
629 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
630 hxgep->suspended = DDI_PM_SUSPEND;
631 hxge_suspend(hxgep);
632 break;
633
634 case DDI_SUSPEND:
635 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
636 if (hxgep->suspended != DDI_PM_SUSPEND) {
637 hxgep->suspended = DDI_SUSPEND;
638 hxge_suspend(hxgep);
639 }
640 break;
641
642 default:
643 status = DDI_FAILURE;
644 break;
645 }
646
647 if (cmd != DDI_DETACH)
648 goto hxge_detach_exit;
649
650 /*
651 * Stop the xcvr polling.
652 */
653 hxgep->suspended = cmd;
654
655 if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
656 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
657 "<== hxge_detach status = 0x%08X", status));
658 return (DDI_FAILURE);
659 }
660 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
661 "<== hxge_detach (mac_unregister) status = 0x%08X", status));
662
663 hxge_unattach(hxgep);
664 hxgep = NULL;
665
666 hxge_detach_exit:
667 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
668 status));
669
670 return (status);
671 }
672
673 static void
hxge_unattach(p_hxge_t hxgep)674 hxge_unattach(p_hxge_t hxgep)
675 {
676 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
677
678 if (hxgep == NULL || hxgep->dev_regs == NULL) {
679 return;
680 }
681
682 if (hxgep->hxge_hw_p) {
683 hxge_uninit_common_dev(hxgep);
684 hxgep->hxge_hw_p = NULL;
685 }
686
687 if (hxgep->hxge_timerid) {
688 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
689 hxgep->hxge_timerid = 0;
690 }
691
692 /* Stop interrupts. */
693 hxge_intrs_disable(hxgep);
694
695 /* Stop any further interrupts. */
696 hxge_remove_intrs(hxgep);
697
698 /* Stop the device and free resources. */
699 hxge_destroy_dev(hxgep);
700
701 /* Tear down the ndd parameters setup. */
702 hxge_destroy_param(hxgep);
703
704 /* Tear down the kstat setup. */
705 hxge_destroy_kstats(hxgep);
706
707 /*
708 * Remove the list of ndd parameters which were setup during attach.
709 */
710 if (hxgep->dip) {
711 HXGE_DEBUG_MSG((hxgep, OBP_CTL,
712 " hxge_unattach: remove all properties"));
713 (void) ddi_prop_remove_all(hxgep->dip);
714 }
715
716 /*
717 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
718 * previous state before unmapping the registers.
719 */
720 HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
721 HXGE_DELAY(1000);
722
723 /*
724 * Unmap the register setup.
725 */
726 hxge_unmap_regs(hxgep);
727
728 hxge_fm_fini(hxgep);
729
730 /* Destroy all mutexes. */
731 hxge_destroy_mutexes(hxgep);
732
733 /*
734 * Free the soft state data structures allocated with this instance.
735 */
736 ddi_soft_state_free(hxge_list, hxgep->instance);
737
738 HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
739 }
740
741 static hxge_status_t
hxge_map_regs(p_hxge_t hxgep)742 hxge_map_regs(p_hxge_t hxgep)
743 {
744 int ddi_status = DDI_SUCCESS;
745 p_dev_regs_t dev_regs;
746
747 #ifdef HXGE_DEBUG
748 char *sysname;
749 #endif
750
751 off_t regsize;
752 hxge_status_t status = HXGE_OK;
753 int nregs;
754
755 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
756
757 if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
758 return (HXGE_ERROR);
759
760 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
761
762 hxgep->dev_regs = NULL;
763 dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
764 dev_regs->hxge_regh = NULL;
765 dev_regs->hxge_pciregh = NULL;
766 dev_regs->hxge_msix_regh = NULL;
767
768 (void) ddi_dev_regsize(hxgep->dip, 0, ®size);
769 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
770 "hxge_map_regs: pci config size 0x%x", regsize));
771
772 ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
773 (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
774 &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
775 if (ddi_status != DDI_SUCCESS) {
776 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
777 "ddi_map_regs, hxge bus config regs failed"));
778 goto hxge_map_regs_fail0;
779 }
780
781 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
782 "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
783 dev_regs->hxge_pciregp,
784 dev_regs->hxge_pciregh));
785
786 (void) ddi_dev_regsize(hxgep->dip, 1, ®size);
787 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
788 "hxge_map_regs: pio size 0x%x", regsize));
789
790 /* set up the device mapped register */
791 ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
792 (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
793 &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
794
795 if (ddi_status != DDI_SUCCESS) {
796 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
797 "ddi_map_regs for Hydra global reg failed"));
798 goto hxge_map_regs_fail1;
799 }
800
801 /* set up the msi/msi-x mapped register */
802 (void) ddi_dev_regsize(hxgep->dip, 2, ®size);
803 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
804 "hxge_map_regs: msix size 0x%x", regsize));
805
806 ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
807 (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
808 &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
809
810 if (ddi_status != DDI_SUCCESS) {
811 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
812 "ddi_map_regs for msi reg failed"));
813 goto hxge_map_regs_fail2;
814 }
815
816 hxgep->dev_regs = dev_regs;
817
818 HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
819 HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
820 HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
821 HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
822
823 HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
824 HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
825
826 HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
827 HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
828
829 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
830 " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
831
832 goto hxge_map_regs_exit;
833
834 hxge_map_regs_fail3:
835 if (dev_regs->hxge_msix_regh) {
836 ddi_regs_map_free(&dev_regs->hxge_msix_regh);
837 }
838
839 hxge_map_regs_fail2:
840 if (dev_regs->hxge_regh) {
841 ddi_regs_map_free(&dev_regs->hxge_regh);
842 }
843
844 hxge_map_regs_fail1:
845 if (dev_regs->hxge_pciregh) {
846 ddi_regs_map_free(&dev_regs->hxge_pciregh);
847 }
848
849 hxge_map_regs_fail0:
850 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
851 kmem_free(dev_regs, sizeof (dev_regs_t));
852
853 hxge_map_regs_exit:
854 if (ddi_status != DDI_SUCCESS)
855 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
856 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
857 return (status);
858 }
859
860 static void
hxge_unmap_regs(p_hxge_t hxgep)861 hxge_unmap_regs(p_hxge_t hxgep)
862 {
863 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
864 if (hxgep->dev_regs) {
865 if (hxgep->dev_regs->hxge_pciregh) {
866 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
867 "==> hxge_unmap_regs: bus"));
868 ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
869 hxgep->dev_regs->hxge_pciregh = NULL;
870 }
871
872 if (hxgep->dev_regs->hxge_regh) {
873 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
874 "==> hxge_unmap_regs: device registers"));
875 ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
876 hxgep->dev_regs->hxge_regh = NULL;
877 }
878
879 if (hxgep->dev_regs->hxge_msix_regh) {
880 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
881 "==> hxge_unmap_regs: device interrupts"));
882 ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
883 hxgep->dev_regs->hxge_msix_regh = NULL;
884 }
885 kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
886 hxgep->dev_regs = NULL;
887 }
888 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
889 }
890
891 static hxge_status_t
hxge_setup_mutexes(p_hxge_t hxgep)892 hxge_setup_mutexes(p_hxge_t hxgep)
893 {
894 int ddi_status = DDI_SUCCESS;
895 hxge_status_t status = HXGE_OK;
896
897 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
898
899 /*
900 * Get the interrupt cookie so the mutexes can be Initialised.
901 */
902 ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
903 &hxgep->interrupt_cookie);
904
905 if (ddi_status != DDI_SUCCESS) {
906 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
907 "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
908 goto hxge_setup_mutexes_exit;
909 }
910
911 /*
912 * Initialize mutex's for this device.
913 */
914 MUTEX_INIT(hxgep->genlock, NULL,
915 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
916 MUTEX_INIT(&hxgep->vmac_lock, NULL,
917 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
918 MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
919 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
920 RW_INIT(&hxgep->filter_lock, NULL,
921 RW_DRIVER, (void *) hxgep->interrupt_cookie);
922 MUTEX_INIT(&hxgep->pio_lock, NULL,
923 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
924 MUTEX_INIT(&hxgep->timeout.lock, NULL,
925 MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
926
927 hxge_setup_mutexes_exit:
928 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
929 "<== hxge_setup_mutexes status = %x", status));
930
931 if (ddi_status != DDI_SUCCESS)
932 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
933
934 return (status);
935 }
936
937 static void
hxge_destroy_mutexes(p_hxge_t hxgep)938 hxge_destroy_mutexes(p_hxge_t hxgep)
939 {
940 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
941 RW_DESTROY(&hxgep->filter_lock);
942 MUTEX_DESTROY(&hxgep->vmac_lock);
943 MUTEX_DESTROY(&hxgep->ouraddr_lock);
944 MUTEX_DESTROY(hxgep->genlock);
945 MUTEX_DESTROY(&hxgep->pio_lock);
946 MUTEX_DESTROY(&hxgep->timeout.lock);
947
948 if (hxge_debug_init == 1) {
949 MUTEX_DESTROY(&hxgedebuglock);
950 hxge_debug_init = 0;
951 }
952
953 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
954 }
955
956 hxge_status_t
hxge_init(p_hxge_t hxgep)957 hxge_init(p_hxge_t hxgep)
958 {
959 hxge_status_t status = HXGE_OK;
960
961 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
962
963 if (hxgep->drv_state & STATE_HW_INITIALIZED) {
964 return (status);
965 }
966
967 /*
968 * Allocate system memory for the receive/transmit buffer blocks and
969 * receive/transmit descriptor rings.
970 */
971 status = hxge_alloc_mem_pool(hxgep);
972 if (status != HXGE_OK) {
973 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
974 goto hxge_init_fail1;
975 }
976
977 /*
978 * Initialize and enable TXDMA channels.
979 */
980 status = hxge_init_txdma_channels(hxgep);
981 if (status != HXGE_OK) {
982 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
983 goto hxge_init_fail3;
984 }
985
986 /*
987 * Initialize and enable RXDMA channels.
988 */
989 status = hxge_init_rxdma_channels(hxgep);
990 if (status != HXGE_OK) {
991 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
992 goto hxge_init_fail4;
993 }
994
995 /*
996 * Initialize TCAM
997 */
998 status = hxge_classify_init(hxgep);
999 if (status != HXGE_OK) {
1000 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
1001 goto hxge_init_fail5;
1002 }
1003
1004 /*
1005 * Initialize the VMAC block.
1006 */
1007 status = hxge_vmac_init(hxgep);
1008 if (status != HXGE_OK) {
1009 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
1010 goto hxge_init_fail5;
1011 }
1012
1013 /* Bringup - this may be unnecessary when PXE and FCODE available */
1014 status = hxge_pfc_set_default_mac_addr(hxgep);
1015 if (status != HXGE_OK) {
1016 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1017 "Default Address Failure\n"));
1018 goto hxge_init_fail5;
1019 }
1020
1021 /*
1022 * Enable hardware interrupts.
1023 */
1024 hxge_intr_hw_enable(hxgep);
1025 hxgep->drv_state |= STATE_HW_INITIALIZED;
1026
1027 goto hxge_init_exit;
1028
1029 hxge_init_fail5:
1030 hxge_uninit_rxdma_channels(hxgep);
1031 hxge_init_fail4:
1032 hxge_uninit_txdma_channels(hxgep);
1033 hxge_init_fail3:
1034 hxge_free_mem_pool(hxgep);
1035 hxge_init_fail1:
1036 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1037 "<== hxge_init status (failed) = 0x%08x", status));
1038 return (status);
1039
1040 hxge_init_exit:
1041
1042 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1043 status));
1044
1045 return (status);
1046 }
1047
1048 timeout_id_t
hxge_start_timer(p_hxge_t hxgep,fptrv_t func,int msec)1049 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1050 {
1051 if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1052 return (timeout(func, (caddr_t)hxgep,
1053 drv_usectohz(1000 * msec)));
1054 }
1055 return (NULL);
1056 }
1057
1058 /*ARGSUSED*/
1059 void
hxge_stop_timer(p_hxge_t hxgep,timeout_id_t timerid)1060 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1061 {
1062 if (timerid) {
1063 (void) untimeout(timerid);
1064 }
1065 }
1066
1067 void
hxge_uninit(p_hxge_t hxgep)1068 hxge_uninit(p_hxge_t hxgep)
1069 {
1070 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1071
1072 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1073 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1074 "==> hxge_uninit: not initialized"));
1075 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1076 return;
1077 }
1078
1079 /* Stop timer */
1080 if (hxgep->hxge_timerid) {
1081 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1082 hxgep->hxge_timerid = 0;
1083 }
1084
1085 (void) hxge_intr_hw_disable(hxgep);
1086
1087 /* Reset the receive VMAC side. */
1088 (void) hxge_rx_vmac_disable(hxgep);
1089
1090 /* Free classification resources */
1091 (void) hxge_classify_uninit(hxgep);
1092
1093 /* Reset the transmit/receive DMA side. */
1094 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1095 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1096
1097 hxge_uninit_txdma_channels(hxgep);
1098 hxge_uninit_rxdma_channels(hxgep);
1099
1100 /* Reset the transmit VMAC side. */
1101 (void) hxge_tx_vmac_disable(hxgep);
1102
1103 hxge_free_mem_pool(hxgep);
1104
1105 hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1106
1107 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1108 }
1109
1110 /*ARGSUSED*/
1111 /*VARARGS*/
1112 void
hxge_debug_msg(p_hxge_t hxgep,uint64_t level,char * fmt,...)1113 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1114 {
1115 char msg_buffer[1048];
1116 char prefix_buffer[32];
1117 int instance;
1118 uint64_t debug_level;
1119 int cmn_level = CE_CONT;
1120 va_list ap;
1121
1122 debug_level = (hxgep == NULL) ? hxge_debug_level :
1123 hxgep->hxge_debug_level;
1124
1125 if ((level & debug_level) || (level == HXGE_NOTE) ||
1126 (level == HXGE_ERR_CTL)) {
1127 /* do the msg processing */
1128 if (hxge_debug_init == 0) {
1129 MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1130 hxge_debug_init = 1;
1131 }
1132
1133 MUTEX_ENTER(&hxgedebuglock);
1134
1135 if ((level & HXGE_NOTE)) {
1136 cmn_level = CE_NOTE;
1137 }
1138
1139 if (level & HXGE_ERR_CTL) {
1140 cmn_level = CE_WARN;
1141 }
1142
1143 va_start(ap, fmt);
1144 (void) vsprintf(msg_buffer, fmt, ap);
1145 va_end(ap);
1146
1147 if (hxgep == NULL) {
1148 instance = -1;
1149 (void) sprintf(prefix_buffer, "%s :", "hxge");
1150 } else {
1151 instance = hxgep->instance;
1152 (void) sprintf(prefix_buffer,
1153 "%s%d :", "hxge", instance);
1154 }
1155
1156 MUTEX_EXIT(&hxgedebuglock);
1157 cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1158 }
1159 }
1160
1161 char *
hxge_dump_packet(char * addr,int size)1162 hxge_dump_packet(char *addr, int size)
1163 {
1164 uchar_t *ap = (uchar_t *)addr;
1165 int i;
1166 static char etherbuf[1024];
1167 char *cp = etherbuf;
1168 char digits[] = "0123456789abcdef";
1169
1170 if (!size)
1171 size = 60;
1172
1173 if (size > MAX_DUMP_SZ) {
1174 /* Dump the leading bytes */
1175 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1176 if (*ap > 0x0f)
1177 *cp++ = digits[*ap >> 4];
1178 *cp++ = digits[*ap++ & 0xf];
1179 *cp++ = ':';
1180 }
1181 for (i = 0; i < 20; i++)
1182 *cp++ = '.';
1183 /* Dump the last MAX_DUMP_SZ/2 bytes */
1184 ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1185 for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1186 if (*ap > 0x0f)
1187 *cp++ = digits[*ap >> 4];
1188 *cp++ = digits[*ap++ & 0xf];
1189 *cp++ = ':';
1190 }
1191 } else {
1192 for (i = 0; i < size; i++) {
1193 if (*ap > 0x0f)
1194 *cp++ = digits[*ap >> 4];
1195 *cp++ = digits[*ap++ & 0xf];
1196 *cp++ = ':';
1197 }
1198 }
1199 *--cp = 0;
1200 return (etherbuf);
1201 }
1202
1203 static void
hxge_suspend(p_hxge_t hxgep)1204 hxge_suspend(p_hxge_t hxgep)
1205 {
1206 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1207
1208 /*
1209 * Stop the link status timer before hxge_intrs_disable() to avoid
1210 * accessing the the MSIX table simultaneously. Note that the timer
1211 * routine polls for MSIX parity errors.
1212 */
1213 MUTEX_ENTER(&hxgep->timeout.lock);
1214 if (hxgep->timeout.id)
1215 (void) untimeout(hxgep->timeout.id);
1216 MUTEX_EXIT(&hxgep->timeout.lock);
1217
1218 hxge_intrs_disable(hxgep);
1219 hxge_destroy_dev(hxgep);
1220
1221 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1222 }
1223
1224 static hxge_status_t
hxge_resume(p_hxge_t hxgep)1225 hxge_resume(p_hxge_t hxgep)
1226 {
1227 hxge_status_t status = HXGE_OK;
1228
1229 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1230 hxgep->suspended = DDI_RESUME;
1231
1232 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1233 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1234
1235 (void) hxge_rx_vmac_enable(hxgep);
1236 (void) hxge_tx_vmac_enable(hxgep);
1237
1238 hxge_intrs_enable(hxgep);
1239
1240 hxgep->suspended = 0;
1241
1242 /*
1243 * Resume the link status timer after hxge_intrs_enable to avoid
1244 * accessing MSIX table simultaneously.
1245 */
1246 MUTEX_ENTER(&hxgep->timeout.lock);
1247 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1248 hxgep->timeout.ticks);
1249 MUTEX_EXIT(&hxgep->timeout.lock);
1250
1251 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1252 "<== hxge_resume status = 0x%x", status));
1253
1254 return (status);
1255 }
1256
1257 static hxge_status_t
hxge_setup_dev(p_hxge_t hxgep)1258 hxge_setup_dev(p_hxge_t hxgep)
1259 {
1260 hxge_status_t status = HXGE_OK;
1261
1262 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1263
1264 status = hxge_link_init(hxgep);
1265 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1266 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1267 "Bad register acc handle"));
1268 status = HXGE_ERROR;
1269 }
1270
1271 if (status != HXGE_OK) {
1272 HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1273 " hxge_setup_dev status (link init 0x%08x)", status));
1274 goto hxge_setup_dev_exit;
1275 }
1276
1277 hxge_setup_dev_exit:
1278 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1279 "<== hxge_setup_dev status = 0x%08x", status));
1280
1281 return (status);
1282 }
1283
1284 static void
hxge_destroy_dev(p_hxge_t hxgep)1285 hxge_destroy_dev(p_hxge_t hxgep)
1286 {
1287 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1288
1289 (void) hxge_hw_stop(hxgep);
1290
1291 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1292 }
1293
1294 static hxge_status_t
hxge_setup_system_dma_pages(p_hxge_t hxgep)1295 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1296 {
1297 int ddi_status = DDI_SUCCESS;
1298 uint_t count;
1299 ddi_dma_cookie_t cookie;
1300 uint_t iommu_pagesize;
1301 hxge_status_t status = HXGE_OK;
1302
1303 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1304
1305 hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1306 iommu_pagesize = dvma_pagesize(hxgep->dip);
1307
1308 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1309 " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1310 " default_block_size %d iommu_pagesize %d",
1311 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1312 hxgep->rx_default_block_size, iommu_pagesize));
1313
1314 if (iommu_pagesize != 0) {
1315 if (hxgep->sys_page_sz == iommu_pagesize) {
1316 /* Hydra support up to 8K pages */
1317 if (iommu_pagesize > 0x2000)
1318 hxgep->sys_page_sz = 0x2000;
1319 } else {
1320 if (hxgep->sys_page_sz > iommu_pagesize)
1321 hxgep->sys_page_sz = iommu_pagesize;
1322 }
1323 }
1324
1325 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1326
1327 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1328 "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1329 "default_block_size %d page mask %d",
1330 hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1331 hxgep->rx_default_block_size, hxgep->sys_page_mask));
1332
1333 switch (hxgep->sys_page_sz) {
1334 default:
1335 hxgep->sys_page_sz = 0x1000;
1336 hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1337 hxgep->rx_default_block_size = 0x1000;
1338 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1339 break;
1340 case 0x1000:
1341 hxgep->rx_default_block_size = 0x1000;
1342 hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1343 break;
1344 case 0x2000:
1345 hxgep->rx_default_block_size = 0x2000;
1346 hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1347 break;
1348 }
1349
1350 hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1351 hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1352
1353 /*
1354 * Get the system DMA burst size.
1355 */
1356 ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1357 DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1358 if (ddi_status != DDI_SUCCESS) {
1359 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1360 "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1361 goto hxge_get_soft_properties_exit;
1362 }
1363
1364 ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1365 (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1366 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1367 &cookie, &count);
1368 if (ddi_status != DDI_DMA_MAPPED) {
1369 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1370 "Binding spare handle to find system burstsize failed."));
1371 ddi_status = DDI_FAILURE;
1372 goto hxge_get_soft_properties_fail1;
1373 }
1374
1375 hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1376 (void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1377
1378 hxge_get_soft_properties_fail1:
1379 ddi_dma_free_handle(&hxgep->dmasparehandle);
1380
1381 hxge_get_soft_properties_exit:
1382
1383 if (ddi_status != DDI_SUCCESS)
1384 status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1385
1386 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1387 "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1388
1389 return (status);
1390 }
1391
1392 static hxge_status_t
hxge_alloc_mem_pool(p_hxge_t hxgep)1393 hxge_alloc_mem_pool(p_hxge_t hxgep)
1394 {
1395 hxge_status_t status = HXGE_OK;
1396
1397 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1398
1399 status = hxge_alloc_rx_mem_pool(hxgep);
1400 if (status != HXGE_OK) {
1401 return (HXGE_ERROR);
1402 }
1403
1404 status = hxge_alloc_tx_mem_pool(hxgep);
1405 if (status != HXGE_OK) {
1406 hxge_free_rx_mem_pool(hxgep);
1407 return (HXGE_ERROR);
1408 }
1409
1410 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1411 return (HXGE_OK);
1412 }
1413
1414 static void
hxge_free_mem_pool(p_hxge_t hxgep)1415 hxge_free_mem_pool(p_hxge_t hxgep)
1416 {
1417 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1418
1419 hxge_free_rx_mem_pool(hxgep);
1420 hxge_free_tx_mem_pool(hxgep);
1421
1422 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1423 }
1424
1425 static hxge_status_t
hxge_alloc_rx_mem_pool(p_hxge_t hxgep)1426 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1427 {
1428 int i, j;
1429 uint32_t ndmas, st_rdc;
1430 p_hxge_dma_pt_cfg_t p_all_cfgp;
1431 p_hxge_hw_pt_cfg_t p_cfgp;
1432 p_hxge_dma_pool_t dma_poolp;
1433 p_hxge_dma_common_t *dma_buf_p;
1434 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1435 p_hxge_dma_common_t *dma_rbr_cntl_p;
1436 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1437 p_hxge_dma_common_t *dma_rcr_cntl_p;
1438 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1439 p_hxge_dma_common_t *dma_mbox_cntl_p;
1440 size_t rx_buf_alloc_size;
1441 size_t rx_rbr_cntl_alloc_size;
1442 size_t rx_rcr_cntl_alloc_size;
1443 size_t rx_mbox_cntl_alloc_size;
1444 uint32_t *num_chunks; /* per dma */
1445 hxge_status_t status = HXGE_OK;
1446
1447 uint32_t hxge_port_rbr_size;
1448 uint32_t hxge_port_rbr_spare_size;
1449 uint32_t hxge_port_rcr_size;
1450
1451 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1452
1453 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1454 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1455 st_rdc = p_cfgp->start_rdc;
1456 ndmas = p_cfgp->max_rdcs;
1457
1458 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1459 " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1460
1461 /*
1462 * Allocate memory for each receive DMA channel.
1463 */
1464 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1465 KM_SLEEP);
1466 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1467 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1468
1469 dma_rbr_cntl_poolp = (p_hxge_dma_pool_t)
1470 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1471 dma_rbr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1472 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1473 dma_rcr_cntl_poolp = (p_hxge_dma_pool_t)
1474 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1475 dma_rcr_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1476 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1477 dma_mbox_cntl_poolp = (p_hxge_dma_pool_t)
1478 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1479 dma_mbox_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1480 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1481
1482 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1483 KM_SLEEP);
1484
1485 /*
1486 * Assume that each DMA channel will be configured with default block
1487 * size. rbr block counts are mod of batch count (16).
1488 */
1489 hxge_port_rbr_size = p_all_cfgp->rbr_size;
1490 hxge_port_rcr_size = p_all_cfgp->rcr_size;
1491
1492 if (!hxge_port_rbr_size) {
1493 hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1494 }
1495
1496 if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1497 hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1498 (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1499 }
1500
1501 p_all_cfgp->rbr_size = hxge_port_rbr_size;
1502 hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1503
1504 if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1505 hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1506 (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1507 }
1508
1509 rx_buf_alloc_size = (hxgep->rx_default_block_size *
1510 (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1511
1512 /*
1513 * Addresses of receive block ring, receive completion ring and the
1514 * mailbox must be all cache-aligned (64 bytes).
1515 */
1516 rx_rbr_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1517 rx_rbr_cntl_alloc_size *= sizeof (rx_desc_t);
1518 rx_rcr_cntl_alloc_size = sizeof (rcr_entry_t) * hxge_port_rcr_size;
1519 rx_mbox_cntl_alloc_size = sizeof (rxdma_mailbox_t);
1520
1521 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1522 "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1523 "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1524 hxge_port_rbr_size, hxge_port_rbr_spare_size,
1525 hxge_port_rcr_size, rx_cntl_alloc_size));
1526
1527 hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1528 hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1529
1530 /*
1531 * Allocate memory for receive buffers and descriptor rings. Replace
1532 * allocation functions with interface functions provided by the
1533 * partition manager when it is available.
1534 */
1535 /*
1536 * Allocate memory for the receive buffer blocks.
1537 */
1538 for (i = 0; i < ndmas; i++) {
1539 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1540 " hxge_alloc_rx_mem_pool to alloc mem: "
1541 " dma %d dma_buf_p %llx &dma_buf_p %llx",
1542 i, dma_buf_p[i], &dma_buf_p[i]));
1543
1544 num_chunks[i] = 0;
1545
1546 status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1547 rx_buf_alloc_size, hxgep->rx_default_block_size,
1548 &num_chunks[i]);
1549 if (status != HXGE_OK) {
1550 break;
1551 }
1552
1553 st_rdc++;
1554 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1555 " hxge_alloc_rx_mem_pool DONE alloc mem: "
1556 "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1557 dma_buf_p[i], &dma_buf_p[i]));
1558 }
1559
1560 if (i < ndmas) {
1561 goto hxge_alloc_rx_mem_fail1;
1562 }
1563
1564 /*
1565 * Allocate memory for descriptor rings and mailbox.
1566 */
1567 st_rdc = p_cfgp->start_rdc;
1568 for (j = 0; j < ndmas; j++) {
1569 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1570 &dma_rbr_cntl_p[j], &hxge_rx_rbr_desc_dma_attr,
1571 rx_rbr_cntl_alloc_size)) != HXGE_OK) {
1572 break;
1573 }
1574
1575 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1576 &dma_rcr_cntl_p[j], &hxge_rx_rcr_desc_dma_attr,
1577 rx_rcr_cntl_alloc_size)) != HXGE_OK) {
1578 break;
1579 }
1580
1581 if ((status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc,
1582 &dma_mbox_cntl_p[j], &hxge_rx_mbox_dma_attr,
1583 rx_mbox_cntl_alloc_size)) != HXGE_OK) {
1584 break;
1585 }
1586 st_rdc++;
1587 }
1588
1589 if (j < ndmas) {
1590 goto hxge_alloc_rx_mem_fail2;
1591 }
1592
1593 dma_poolp->ndmas = ndmas;
1594 dma_poolp->num_chunks = num_chunks;
1595 dma_poolp->buf_allocated = B_TRUE;
1596 hxgep->rx_buf_pool_p = dma_poolp;
1597 dma_poolp->dma_buf_pool_p = dma_buf_p;
1598
1599 dma_rbr_cntl_poolp->ndmas = ndmas;
1600 dma_rbr_cntl_poolp->buf_allocated = B_TRUE;
1601 hxgep->rx_rbr_cntl_pool_p = dma_rbr_cntl_poolp;
1602 dma_rbr_cntl_poolp->dma_buf_pool_p = dma_rbr_cntl_p;
1603
1604 dma_rcr_cntl_poolp->ndmas = ndmas;
1605 dma_rcr_cntl_poolp->buf_allocated = B_TRUE;
1606 hxgep->rx_rcr_cntl_pool_p = dma_rcr_cntl_poolp;
1607 dma_rcr_cntl_poolp->dma_buf_pool_p = dma_rcr_cntl_p;
1608
1609 dma_mbox_cntl_poolp->ndmas = ndmas;
1610 dma_mbox_cntl_poolp->buf_allocated = B_TRUE;
1611 hxgep->rx_mbox_cntl_pool_p = dma_mbox_cntl_poolp;
1612 dma_mbox_cntl_poolp->dma_buf_pool_p = dma_mbox_cntl_p;
1613
1614 goto hxge_alloc_rx_mem_pool_exit;
1615
1616 hxge_alloc_rx_mem_fail2:
1617 /* Free control buffers */
1618 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1619 "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1620 for (; j >= 0; j--) {
1621 hxge_free_rx_cntl_dma(hxgep,
1622 (p_hxge_dma_common_t)dma_rbr_cntl_p[j]);
1623 hxge_free_rx_cntl_dma(hxgep,
1624 (p_hxge_dma_common_t)dma_rcr_cntl_p[j]);
1625 hxge_free_rx_cntl_dma(hxgep,
1626 (p_hxge_dma_common_t)dma_mbox_cntl_p[j]);
1627 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1628 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1629 }
1630 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1631 "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1632
1633 hxge_alloc_rx_mem_fail1:
1634 /* Free data buffers */
1635 i--;
1636 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1637 "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1638 for (; i >= 0; i--) {
1639 hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1640 num_chunks[i]);
1641 }
1642 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1643 "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1644
1645 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1646 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1647 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1648 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1649 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1650 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1651 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1652 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1653 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1654
1655 hxge_alloc_rx_mem_pool_exit:
1656 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1657 "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1658
1659 return (status);
1660 }
1661
1662 static void
hxge_free_rx_mem_pool(p_hxge_t hxgep)1663 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1664 {
1665 uint32_t i, ndmas;
1666 p_hxge_dma_pool_t dma_poolp;
1667 p_hxge_dma_common_t *dma_buf_p;
1668 p_hxge_dma_pool_t dma_rbr_cntl_poolp;
1669 p_hxge_dma_common_t *dma_rbr_cntl_p;
1670 p_hxge_dma_pool_t dma_rcr_cntl_poolp;
1671 p_hxge_dma_common_t *dma_rcr_cntl_p;
1672 p_hxge_dma_pool_t dma_mbox_cntl_poolp;
1673 p_hxge_dma_common_t *dma_mbox_cntl_p;
1674 uint32_t *num_chunks;
1675
1676 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1677
1678 dma_poolp = hxgep->rx_buf_pool_p;
1679 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1680 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1681 "(null rx buf pool or buf not allocated"));
1682 return;
1683 }
1684
1685 dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
1686 if (dma_rbr_cntl_poolp == NULL ||
1687 (!dma_rbr_cntl_poolp->buf_allocated)) {
1688 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1689 "<== hxge_free_rx_mem_pool "
1690 "(null rbr cntl buf pool or rbr cntl buf not allocated"));
1691 return;
1692 }
1693
1694 dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
1695 if (dma_rcr_cntl_poolp == NULL ||
1696 (!dma_rcr_cntl_poolp->buf_allocated)) {
1697 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1698 "<== hxge_free_rx_mem_pool "
1699 "(null rcr cntl buf pool or rcr cntl buf not allocated"));
1700 return;
1701 }
1702
1703 dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
1704 if (dma_mbox_cntl_poolp == NULL ||
1705 (!dma_mbox_cntl_poolp->buf_allocated)) {
1706 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1707 "<== hxge_free_rx_mem_pool "
1708 "(null mbox cntl buf pool or mbox cntl buf not allocated"));
1709 return;
1710 }
1711
1712 dma_buf_p = dma_poolp->dma_buf_pool_p;
1713 num_chunks = dma_poolp->num_chunks;
1714
1715 dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
1716 dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
1717 dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
1718 ndmas = dma_rbr_cntl_poolp->ndmas;
1719
1720 for (i = 0; i < ndmas; i++) {
1721 hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1722 }
1723
1724 for (i = 0; i < ndmas; i++) {
1725 hxge_free_rx_cntl_dma(hxgep, dma_rbr_cntl_p[i]);
1726 hxge_free_rx_cntl_dma(hxgep, dma_rcr_cntl_p[i]);
1727 hxge_free_rx_cntl_dma(hxgep, dma_mbox_cntl_p[i]);
1728 }
1729
1730 for (i = 0; i < ndmas; i++) {
1731 KMEM_FREE(dma_buf_p[i],
1732 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1733 KMEM_FREE(dma_rbr_cntl_p[i], sizeof (hxge_dma_common_t));
1734 KMEM_FREE(dma_rcr_cntl_p[i], sizeof (hxge_dma_common_t));
1735 KMEM_FREE(dma_mbox_cntl_p[i], sizeof (hxge_dma_common_t));
1736 }
1737
1738 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1739 KMEM_FREE(dma_rbr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1740 KMEM_FREE(dma_rbr_cntl_poolp, sizeof (hxge_dma_pool_t));
1741 KMEM_FREE(dma_rcr_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1742 KMEM_FREE(dma_rcr_cntl_poolp, sizeof (hxge_dma_pool_t));
1743 KMEM_FREE(dma_mbox_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1744 KMEM_FREE(dma_mbox_cntl_poolp, sizeof (hxge_dma_pool_t));
1745 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1746 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1747
1748 hxgep->rx_buf_pool_p = NULL;
1749 hxgep->rx_rbr_cntl_pool_p = NULL;
1750 hxgep->rx_rcr_cntl_pool_p = NULL;
1751 hxgep->rx_mbox_cntl_pool_p = NULL;
1752
1753 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1754 }
1755
1756 static hxge_status_t
hxge_alloc_rx_buf_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,size_t alloc_size,size_t block_size,uint32_t * num_chunks)1757 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1758 p_hxge_dma_common_t *dmap,
1759 size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1760 {
1761 p_hxge_dma_common_t rx_dmap;
1762 hxge_status_t status = HXGE_OK;
1763 size_t total_alloc_size;
1764 size_t allocated = 0;
1765 int i, size_index, array_size;
1766
1767 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1768
1769 rx_dmap = (p_hxge_dma_common_t)
1770 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1771
1772 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1773 " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1774 dma_channel, alloc_size, block_size, dmap));
1775
1776 total_alloc_size = alloc_size;
1777
1778 i = 0;
1779 size_index = 0;
1780 array_size = sizeof (alloc_sizes) / sizeof (size_t);
1781 while ((size_index < array_size) &&
1782 (alloc_sizes[size_index] < alloc_size))
1783 size_index++;
1784 if (size_index >= array_size) {
1785 size_index = array_size - 1;
1786 }
1787
1788 while ((allocated < total_alloc_size) &&
1789 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1790 rx_dmap[i].dma_chunk_index = i;
1791 rx_dmap[i].block_size = block_size;
1792 rx_dmap[i].alength = alloc_sizes[size_index];
1793 rx_dmap[i].orig_alength = rx_dmap[i].alength;
1794 rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1795 rx_dmap[i].dma_channel = dma_channel;
1796 rx_dmap[i].contig_alloc_type = B_FALSE;
1797
1798 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1799 "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1800 "i %d nblocks %d alength %d",
1801 dma_channel, i, &rx_dmap[i], block_size,
1802 i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1803 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1804 &hxge_rx_dma_attr, rx_dmap[i].alength,
1805 &hxge_dev_buf_dma_acc_attr,
1806 DDI_DMA_READ | DDI_DMA_STREAMING,
1807 (p_hxge_dma_common_t)(&rx_dmap[i]));
1808 if (status != HXGE_OK) {
1809 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1810 " hxge_alloc_rx_buf_dma: Alloc Failed: "
1811 " for size: %d", alloc_sizes[size_index]));
1812 size_index--;
1813 } else {
1814 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1815 " alloc_rx_buf_dma allocated rdc %d "
1816 "chunk %d size %x dvma %x bufp %llx ",
1817 dma_channel, i, rx_dmap[i].alength,
1818 rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1819 i++;
1820 allocated += alloc_sizes[size_index];
1821 }
1822 }
1823
1824 if (allocated < total_alloc_size) {
1825 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1826 " hxge_alloc_rx_buf_dma failed due to"
1827 " allocated(%d) < required(%d)",
1828 allocated, total_alloc_size));
1829 goto hxge_alloc_rx_mem_fail1;
1830 }
1831
1832 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1833 " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1834
1835 *num_chunks = i;
1836 *dmap = rx_dmap;
1837
1838 goto hxge_alloc_rx_mem_exit;
1839
1840 hxge_alloc_rx_mem_fail1:
1841 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1842
1843 hxge_alloc_rx_mem_exit:
1844 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1845 "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1846
1847 return (status);
1848 }
1849
1850 /*ARGSUSED*/
1851 static void
hxge_free_rx_buf_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap,uint32_t num_chunks)1852 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1853 uint32_t num_chunks)
1854 {
1855 int i;
1856
1857 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1858 "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1859
1860 for (i = 0; i < num_chunks; i++) {
1861 HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1862 "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1863 hxge_dma_mem_free(dmap++);
1864 }
1865
1866 HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1867 }
1868
1869 /*ARGSUSED*/
1870 static hxge_status_t
hxge_alloc_rx_cntl_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,struct ddi_dma_attr * attr,size_t size)1871 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1872 p_hxge_dma_common_t *dmap, struct ddi_dma_attr *attr, size_t size)
1873 {
1874 p_hxge_dma_common_t rx_dmap;
1875 hxge_status_t status = HXGE_OK;
1876
1877 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1878
1879 rx_dmap = (p_hxge_dma_common_t)
1880 KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1881
1882 rx_dmap->contig_alloc_type = B_FALSE;
1883
1884 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1885 attr, size, &hxge_dev_desc_dma_acc_attr,
1886 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1887 if (status != HXGE_OK) {
1888 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1889 " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1890 " for size: %d", size));
1891 goto hxge_alloc_rx_cntl_dma_fail1;
1892 }
1893
1894 *dmap = rx_dmap;
1895
1896 goto hxge_alloc_rx_cntl_dma_exit;
1897
1898 hxge_alloc_rx_cntl_dma_fail1:
1899 KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1900
1901 hxge_alloc_rx_cntl_dma_exit:
1902 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1903 "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1904
1905 return (status);
1906 }
1907
1908 /*ARGSUSED*/
1909 static void
hxge_free_rx_cntl_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap)1910 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1911 {
1912 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1913
1914 hxge_dma_mem_free(dmap);
1915
1916 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1917 }
1918
1919 static hxge_status_t
hxge_alloc_tx_mem_pool(p_hxge_t hxgep)1920 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1921 {
1922 hxge_status_t status = HXGE_OK;
1923 int i, j;
1924 uint32_t ndmas, st_tdc;
1925 p_hxge_dma_pt_cfg_t p_all_cfgp;
1926 p_hxge_hw_pt_cfg_t p_cfgp;
1927 p_hxge_dma_pool_t dma_poolp;
1928 p_hxge_dma_common_t *dma_buf_p;
1929 p_hxge_dma_pool_t dma_cntl_poolp;
1930 p_hxge_dma_common_t *dma_cntl_p;
1931 size_t tx_buf_alloc_size;
1932 size_t tx_cntl_alloc_size;
1933 uint32_t *num_chunks; /* per dma */
1934
1935 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1936
1937 p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1938 p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1939 st_tdc = p_cfgp->start_tdc;
1940 ndmas = p_cfgp->max_tdcs;
1941
1942 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1943 "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1944 p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1945 /*
1946 * Allocate memory for each transmit DMA channel.
1947 */
1948 dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1949 KM_SLEEP);
1950 dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1951 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1952
1953 dma_cntl_poolp = (p_hxge_dma_pool_t)
1954 KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1955 dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1956 sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1957
1958 hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
1959
1960 /*
1961 * Assume that each DMA channel will be configured with default
1962 * transmit bufer size for copying transmit data. (For packet payload
1963 * over this limit, packets will not be copied.)
1964 */
1965 tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
1966
1967 /*
1968 * Addresses of transmit descriptor ring and the mailbox must be all
1969 * cache-aligned (64 bytes).
1970 */
1971 tx_cntl_alloc_size = hxge_tx_ring_size;
1972 tx_cntl_alloc_size *= (sizeof (tx_desc_t));
1973 tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
1974
1975 num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1976 KM_SLEEP);
1977
1978 /*
1979 * Allocate memory for transmit buffers and descriptor rings. Replace
1980 * allocation functions with interface functions provided by the
1981 * partition manager when it is available.
1982 *
1983 * Allocate memory for the transmit buffer pool.
1984 */
1985 for (i = 0; i < ndmas; i++) {
1986 num_chunks[i] = 0;
1987 status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
1988 tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
1989 if (status != HXGE_OK) {
1990 break;
1991 }
1992 st_tdc++;
1993 }
1994
1995 if (i < ndmas) {
1996 goto hxge_alloc_tx_mem_pool_fail1;
1997 }
1998
1999 st_tdc = p_cfgp->start_tdc;
2000
2001 /*
2002 * Allocate memory for descriptor rings and mailbox.
2003 */
2004 for (j = 0; j < ndmas; j++) {
2005 status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
2006 tx_cntl_alloc_size);
2007 if (status != HXGE_OK) {
2008 break;
2009 }
2010 st_tdc++;
2011 }
2012
2013 if (j < ndmas) {
2014 goto hxge_alloc_tx_mem_pool_fail2;
2015 }
2016
2017 dma_poolp->ndmas = ndmas;
2018 dma_poolp->num_chunks = num_chunks;
2019 dma_poolp->buf_allocated = B_TRUE;
2020 dma_poolp->dma_buf_pool_p = dma_buf_p;
2021 hxgep->tx_buf_pool_p = dma_poolp;
2022
2023 dma_cntl_poolp->ndmas = ndmas;
2024 dma_cntl_poolp->buf_allocated = B_TRUE;
2025 dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
2026 hxgep->tx_cntl_pool_p = dma_cntl_poolp;
2027
2028 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2029 "==> hxge_alloc_tx_mem_pool: start_tdc %d "
2030 "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
2031
2032 goto hxge_alloc_tx_mem_pool_exit;
2033
2034 hxge_alloc_tx_mem_pool_fail2:
2035 /* Free control buffers */
2036 j--;
2037 for (; j >= 0; j--) {
2038 hxge_free_tx_cntl_dma(hxgep,
2039 (p_hxge_dma_common_t)dma_cntl_p[j]);
2040 }
2041
2042 hxge_alloc_tx_mem_pool_fail1:
2043 /* Free data buffers */
2044 i--;
2045 for (; i >= 0; i--) {
2046 hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
2047 num_chunks[i]);
2048 }
2049
2050 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2051 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2052 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2053 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2054 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2055
2056 hxge_alloc_tx_mem_pool_exit:
2057 HXGE_DEBUG_MSG((hxgep, MEM_CTL,
2058 "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
2059
2060 return (status);
2061 }
2062
2063 static hxge_status_t
hxge_alloc_tx_buf_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,size_t alloc_size,size_t block_size,uint32_t * num_chunks)2064 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
2065 p_hxge_dma_common_t *dmap, size_t alloc_size,
2066 size_t block_size, uint32_t *num_chunks)
2067 {
2068 p_hxge_dma_common_t tx_dmap;
2069 hxge_status_t status = HXGE_OK;
2070 size_t total_alloc_size;
2071 size_t allocated = 0;
2072 int i, size_index, array_size;
2073
2074 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
2075
2076 tx_dmap = (p_hxge_dma_common_t)
2077 KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
2078
2079 total_alloc_size = alloc_size;
2080 i = 0;
2081 size_index = 0;
2082 array_size = sizeof (alloc_sizes) / sizeof (size_t);
2083 while ((size_index < array_size) &&
2084 (alloc_sizes[size_index] < alloc_size))
2085 size_index++;
2086 if (size_index >= array_size) {
2087 size_index = array_size - 1;
2088 }
2089
2090 while ((allocated < total_alloc_size) &&
2091 (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2092 tx_dmap[i].dma_chunk_index = i;
2093 tx_dmap[i].block_size = block_size;
2094 tx_dmap[i].alength = alloc_sizes[size_index];
2095 tx_dmap[i].orig_alength = tx_dmap[i].alength;
2096 tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2097 tx_dmap[i].dma_channel = dma_channel;
2098 tx_dmap[i].contig_alloc_type = B_FALSE;
2099
2100 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2101 &hxge_tx_dma_attr, tx_dmap[i].alength,
2102 &hxge_dev_buf_dma_acc_attr,
2103 DDI_DMA_WRITE | DDI_DMA_STREAMING,
2104 (p_hxge_dma_common_t)(&tx_dmap[i]));
2105 if (status != HXGE_OK) {
2106 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2107 " hxge_alloc_tx_buf_dma: Alloc Failed: "
2108 " for size: %d", alloc_sizes[size_index]));
2109 size_index--;
2110 } else {
2111 i++;
2112 allocated += alloc_sizes[size_index];
2113 }
2114 }
2115
2116 if (allocated < total_alloc_size) {
2117 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2118 " hxge_alloc_tx_buf_dma: failed due to"
2119 " allocated(%d) < required(%d)",
2120 allocated, total_alloc_size));
2121 goto hxge_alloc_tx_mem_fail1;
2122 }
2123
2124 *num_chunks = i;
2125 *dmap = tx_dmap;
2126 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2127 "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2128 *dmap, i));
2129 goto hxge_alloc_tx_mem_exit;
2130
2131 hxge_alloc_tx_mem_fail1:
2132 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2133
2134 hxge_alloc_tx_mem_exit:
2135 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2136 "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2137
2138 return (status);
2139 }
2140
2141 /*ARGSUSED*/
2142 static void
hxge_free_tx_buf_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap,uint32_t num_chunks)2143 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2144 uint32_t num_chunks)
2145 {
2146 int i;
2147
2148 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2149
2150 for (i = 0; i < num_chunks; i++) {
2151 hxge_dma_mem_free(dmap++);
2152 }
2153
2154 HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2155 }
2156
2157 /*ARGSUSED*/
2158 static hxge_status_t
hxge_alloc_tx_cntl_dma(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dmap,size_t size)2159 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2160 p_hxge_dma_common_t *dmap, size_t size)
2161 {
2162 p_hxge_dma_common_t tx_dmap;
2163 hxge_status_t status = HXGE_OK;
2164
2165 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2166
2167 tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2168 KM_SLEEP);
2169
2170 tx_dmap->contig_alloc_type = B_FALSE;
2171
2172 status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2173 &hxge_tx_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2174 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2175 if (status != HXGE_OK) {
2176 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2177 " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2178 " for size: %d", size));
2179 goto hxge_alloc_tx_cntl_dma_fail1;
2180 }
2181
2182 *dmap = tx_dmap;
2183
2184 goto hxge_alloc_tx_cntl_dma_exit;
2185
2186 hxge_alloc_tx_cntl_dma_fail1:
2187 KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2188
2189 hxge_alloc_tx_cntl_dma_exit:
2190 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2191 "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2192
2193 return (status);
2194 }
2195
2196 /*ARGSUSED*/
2197 static void
hxge_free_tx_cntl_dma(p_hxge_t hxgep,p_hxge_dma_common_t dmap)2198 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2199 {
2200 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2201
2202 hxge_dma_mem_free(dmap);
2203
2204 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2205 }
2206
2207 static void
hxge_free_tx_mem_pool(p_hxge_t hxgep)2208 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2209 {
2210 uint32_t i, ndmas;
2211 p_hxge_dma_pool_t dma_poolp;
2212 p_hxge_dma_common_t *dma_buf_p;
2213 p_hxge_dma_pool_t dma_cntl_poolp;
2214 p_hxge_dma_common_t *dma_cntl_p;
2215 uint32_t *num_chunks;
2216
2217 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2218
2219 dma_poolp = hxgep->tx_buf_pool_p;
2220 if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2221 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2222 "<== hxge_free_tx_mem_pool "
2223 "(null rx buf pool or buf not allocated"));
2224 return;
2225 }
2226
2227 dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2228 if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2229 HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2230 "<== hxge_free_tx_mem_pool "
2231 "(null tx cntl buf pool or cntl buf not allocated"));
2232 return;
2233 }
2234
2235 dma_buf_p = dma_poolp->dma_buf_pool_p;
2236 num_chunks = dma_poolp->num_chunks;
2237
2238 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2239 ndmas = dma_cntl_poolp->ndmas;
2240
2241 for (i = 0; i < ndmas; i++) {
2242 hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2243 }
2244
2245 for (i = 0; i < ndmas; i++) {
2246 hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2247 }
2248
2249 for (i = 0; i < ndmas; i++) {
2250 KMEM_FREE(dma_buf_p[i],
2251 sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2252 KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2253 }
2254
2255 KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2256 KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2257 KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2258 KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2259 KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2260
2261 hxgep->tx_buf_pool_p = NULL;
2262 hxgep->tx_cntl_pool_p = NULL;
2263
2264 HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2265 }
2266
2267 /*ARGSUSED*/
2268 static hxge_status_t
hxge_dma_mem_alloc(p_hxge_t hxgep,dma_method_t method,struct ddi_dma_attr * dma_attrp,size_t length,ddi_device_acc_attr_t * acc_attr_p,uint_t xfer_flags,p_hxge_dma_common_t dma_p)2269 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2270 struct ddi_dma_attr *dma_attrp,
2271 size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2272 p_hxge_dma_common_t dma_p)
2273 {
2274 caddr_t kaddrp;
2275 int ddi_status = DDI_SUCCESS;
2276
2277 dma_p->dma_handle = NULL;
2278 dma_p->acc_handle = NULL;
2279 dma_p->kaddrp = NULL;
2280
2281 ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2282 DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2283 if (ddi_status != DDI_SUCCESS) {
2284 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2285 "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2286 return (HXGE_ERROR | HXGE_DDI_FAILED);
2287 }
2288
2289 ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2290 xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2291 &dma_p->acc_handle);
2292 if (ddi_status != DDI_SUCCESS) {
2293 /* The caller will decide whether it is fatal */
2294 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2295 "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2296 ddi_dma_free_handle(&dma_p->dma_handle);
2297 dma_p->dma_handle = NULL;
2298 return (HXGE_ERROR | HXGE_DDI_FAILED);
2299 }
2300
2301 if (dma_p->alength < length) {
2302 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2303 "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2304 ddi_dma_mem_free(&dma_p->acc_handle);
2305 ddi_dma_free_handle(&dma_p->dma_handle);
2306 dma_p->acc_handle = NULL;
2307 dma_p->dma_handle = NULL;
2308 return (HXGE_ERROR);
2309 }
2310
2311 ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2312 kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2313 &dma_p->dma_cookie, &dma_p->ncookies);
2314 if (ddi_status != DDI_DMA_MAPPED) {
2315 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2316 "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2317 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2318 if (dma_p->acc_handle) {
2319 ddi_dma_mem_free(&dma_p->acc_handle);
2320 dma_p->acc_handle = NULL;
2321 }
2322 ddi_dma_free_handle(&dma_p->dma_handle);
2323 dma_p->dma_handle = NULL;
2324 return (HXGE_ERROR | HXGE_DDI_FAILED);
2325 }
2326
2327 if (dma_p->ncookies != 1) {
2328 HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2329 "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2330 "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2331 if (dma_p->acc_handle) {
2332 ddi_dma_mem_free(&dma_p->acc_handle);
2333 dma_p->acc_handle = NULL;
2334 }
2335 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2336 ddi_dma_free_handle(&dma_p->dma_handle);
2337 dma_p->dma_handle = NULL;
2338 return (HXGE_ERROR);
2339 }
2340
2341 dma_p->kaddrp = kaddrp;
2342 #if defined(__i386)
2343 dma_p->ioaddr_pp =
2344 (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2345 #else
2346 dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2347 #endif
2348
2349 HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2350
2351 HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2352 "dma buffer allocated: dma_p $%p "
2353 "return dmac_ladress from cookie $%p dmac_size %d "
2354 "dma_p->ioaddr_p $%p "
2355 "dma_p->orig_ioaddr_p $%p "
2356 "orig_vatopa $%p "
2357 "alength %d (0x%x) "
2358 "kaddrp $%p "
2359 "length %d (0x%x)",
2360 dma_p,
2361 dma_p->dma_cookie.dmac_laddress,
2362 dma_p->dma_cookie.dmac_size,
2363 dma_p->ioaddr_pp,
2364 dma_p->orig_ioaddr_pp,
2365 dma_p->orig_vatopa,
2366 dma_p->alength, dma_p->alength,
2367 kaddrp,
2368 length, length));
2369
2370 return (HXGE_OK);
2371 }
2372
2373 static void
hxge_dma_mem_free(p_hxge_dma_common_t dma_p)2374 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2375 {
2376 if (dma_p == NULL)
2377 return;
2378
2379 if (dma_p->dma_handle != NULL) {
2380 if (dma_p->ncookies) {
2381 (void) ddi_dma_unbind_handle(dma_p->dma_handle);
2382 dma_p->ncookies = 0;
2383 }
2384 ddi_dma_free_handle(&dma_p->dma_handle);
2385 dma_p->dma_handle = NULL;
2386 }
2387
2388 if (dma_p->acc_handle != NULL) {
2389 ddi_dma_mem_free(&dma_p->acc_handle);
2390 dma_p->acc_handle = NULL;
2391 HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2392 }
2393
2394 dma_p->kaddrp = NULL;
2395 dma_p->alength = NULL;
2396 }
2397
2398 /*
2399 * hxge_m_start() -- start transmitting and receiving.
2400 *
2401 * This function is called by the MAC layer when the first
2402 * stream is open to prepare the hardware ready for sending
2403 * and transmitting packets.
2404 */
2405 static int
hxge_m_start(void * arg)2406 hxge_m_start(void *arg)
2407 {
2408 p_hxge_t hxgep = (p_hxge_t)arg;
2409
2410 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2411
2412 MUTEX_ENTER(hxgep->genlock);
2413
2414 if (hxge_init(hxgep) != DDI_SUCCESS) {
2415 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2416 "<== hxge_m_start: initialization failed"));
2417 MUTEX_EXIT(hxgep->genlock);
2418 return (EIO);
2419 }
2420
2421 if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2422 /*
2423 * Start timer to check the system error and tx hangs
2424 */
2425 hxgep->hxge_timerid = hxge_start_timer(hxgep,
2426 hxge_check_hw_state, HXGE_CHECK_TIMER);
2427
2428 hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2429
2430 hxgep->timeout.link_status = 0;
2431 hxgep->timeout.report_link_status = B_TRUE;
2432 hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
2433
2434 /* Start the link status timer to check the link status */
2435 MUTEX_ENTER(&hxgep->timeout.lock);
2436 hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
2437 hxgep->timeout.ticks);
2438 MUTEX_EXIT(&hxgep->timeout.lock);
2439 }
2440
2441 MUTEX_EXIT(hxgep->genlock);
2442
2443 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2444
2445 return (0);
2446 }
2447
2448 /*
2449 * hxge_m_stop(): stop transmitting and receiving.
2450 */
2451 static void
hxge_m_stop(void * arg)2452 hxge_m_stop(void *arg)
2453 {
2454 p_hxge_t hxgep = (p_hxge_t)arg;
2455
2456 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2457
2458 if (hxgep->hxge_timerid) {
2459 hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2460 hxgep->hxge_timerid = 0;
2461 }
2462
2463 /* Stop the link status timer before unregistering */
2464 MUTEX_ENTER(&hxgep->timeout.lock);
2465 if (hxgep->timeout.id) {
2466 (void) untimeout(hxgep->timeout.id);
2467 hxgep->timeout.id = 0;
2468 }
2469 hxge_link_update(hxgep, LINK_STATE_DOWN);
2470 MUTEX_EXIT(&hxgep->timeout.lock);
2471
2472 MUTEX_ENTER(hxgep->genlock);
2473
2474 hxge_uninit(hxgep);
2475
2476 hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2477
2478 MUTEX_EXIT(hxgep->genlock);
2479
2480 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2481 }
2482
2483 static int
hxge_m_multicst(void * arg,boolean_t add,const uint8_t * mca)2484 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2485 {
2486 p_hxge_t hxgep = (p_hxge_t)arg;
2487 struct ether_addr addrp;
2488
2489 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2490
2491 bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2492
2493 if (add) {
2494 if (hxge_add_mcast_addr(hxgep, &addrp)) {
2495 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2496 "<== hxge_m_multicst: add multicast failed"));
2497 return (EINVAL);
2498 }
2499 } else {
2500 if (hxge_del_mcast_addr(hxgep, &addrp)) {
2501 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2502 "<== hxge_m_multicst: del multicast failed"));
2503 return (EINVAL);
2504 }
2505 }
2506
2507 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2508
2509 return (0);
2510 }
2511
2512 static int
hxge_m_promisc(void * arg,boolean_t on)2513 hxge_m_promisc(void *arg, boolean_t on)
2514 {
2515 p_hxge_t hxgep = (p_hxge_t)arg;
2516
2517 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2518
2519 if (hxge_set_promisc(hxgep, on)) {
2520 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2521 "<== hxge_m_promisc: set promisc failed"));
2522 return (EINVAL);
2523 }
2524
2525 HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2526
2527 return (0);
2528 }
2529
2530 static void
hxge_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)2531 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2532 {
2533 p_hxge_t hxgep = (p_hxge_t)arg;
2534 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
2535 boolean_t need_privilege;
2536 int err;
2537 int cmd;
2538
2539 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2540
2541 iocp = (struct iocblk *)mp->b_rptr;
2542 iocp->ioc_error = 0;
2543 need_privilege = B_TRUE;
2544 cmd = iocp->ioc_cmd;
2545
2546 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2547 switch (cmd) {
2548 default:
2549 miocnak(wq, mp, 0, EINVAL);
2550 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2551 return;
2552
2553 case LB_GET_INFO_SIZE:
2554 case LB_GET_INFO:
2555 case LB_GET_MODE:
2556 need_privilege = B_FALSE;
2557 break;
2558
2559 case LB_SET_MODE:
2560 break;
2561
2562 case ND_GET:
2563 need_privilege = B_FALSE;
2564 break;
2565 case ND_SET:
2566 break;
2567
2568 case HXGE_GET_TX_RING_SZ:
2569 case HXGE_GET_TX_DESC:
2570 case HXGE_TX_SIDE_RESET:
2571 case HXGE_RX_SIDE_RESET:
2572 case HXGE_GLOBAL_RESET:
2573 case HXGE_RESET_MAC:
2574 case HXGE_PUT_TCAM:
2575 case HXGE_GET_TCAM:
2576 case HXGE_RTRACE:
2577
2578 need_privilege = B_FALSE;
2579 break;
2580 }
2581
2582 if (need_privilege) {
2583 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2584 if (err != 0) {
2585 miocnak(wq, mp, 0, err);
2586 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2587 "<== hxge_m_ioctl: no priv"));
2588 return;
2589 }
2590 }
2591
2592 switch (cmd) {
2593 case ND_GET:
2594 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2595 case ND_SET:
2596 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2597 hxge_param_ioctl(hxgep, wq, mp, iocp);
2598 break;
2599
2600 case LB_GET_MODE:
2601 case LB_SET_MODE:
2602 case LB_GET_INFO_SIZE:
2603 case LB_GET_INFO:
2604 hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2605 break;
2606
2607 case HXGE_PUT_TCAM:
2608 case HXGE_GET_TCAM:
2609 case HXGE_GET_TX_RING_SZ:
2610 case HXGE_GET_TX_DESC:
2611 case HXGE_TX_SIDE_RESET:
2612 case HXGE_RX_SIDE_RESET:
2613 case HXGE_GLOBAL_RESET:
2614 case HXGE_RESET_MAC:
2615 HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2616 "==> hxge_m_ioctl: cmd 0x%x", cmd));
2617 hxge_hw_ioctl(hxgep, wq, mp, iocp);
2618 break;
2619 }
2620
2621 HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2622 }
2623
2624 /*ARGSUSED*/
2625 static int
hxge_tx_ring_start(mac_ring_driver_t rdriver,uint64_t mr_gen_num)2626 hxge_tx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2627 {
2628 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2629 p_hxge_t hxgep;
2630 p_tx_ring_t ring;
2631
2632 ASSERT(rhp != NULL);
2633 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2634
2635 hxgep = rhp->hxgep;
2636
2637 /*
2638 * Get the ring pointer.
2639 */
2640 ring = hxgep->tx_rings->rings[rhp->index];
2641
2642 /*
2643 * Fill in the handle for the transmit.
2644 */
2645 MUTEX_ENTER(&ring->lock);
2646 rhp->started = B_TRUE;
2647 ring->ring_handle = rhp->ring_handle;
2648 MUTEX_EXIT(&ring->lock);
2649
2650 return (0);
2651 }
2652
2653 static void
hxge_tx_ring_stop(mac_ring_driver_t rdriver)2654 hxge_tx_ring_stop(mac_ring_driver_t rdriver)
2655 {
2656 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2657 p_hxge_t hxgep;
2658 p_tx_ring_t ring;
2659
2660 ASSERT(rhp != NULL);
2661 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2662
2663 hxgep = rhp->hxgep;
2664 ring = hxgep->tx_rings->rings[rhp->index];
2665
2666 MUTEX_ENTER(&ring->lock);
2667 ring->ring_handle = (mac_ring_handle_t)NULL;
2668 rhp->started = B_FALSE;
2669 MUTEX_EXIT(&ring->lock);
2670 }
2671
2672 static int
hxge_rx_ring_start(mac_ring_driver_t rdriver,uint64_t mr_gen_num)2673 hxge_rx_ring_start(mac_ring_driver_t rdriver, uint64_t mr_gen_num)
2674 {
2675 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2676 p_hxge_t hxgep;
2677 p_rx_rcr_ring_t ring;
2678 int i;
2679
2680 ASSERT(rhp != NULL);
2681 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2682
2683 hxgep = rhp->hxgep;
2684
2685 /*
2686 * Get pointer to ring.
2687 */
2688 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2689
2690 MUTEX_ENTER(&ring->lock);
2691
2692 if (rhp->started) {
2693 MUTEX_EXIT(&ring->lock);
2694 return (0);
2695 }
2696
2697 /*
2698 * Set the ldvp and ldgp pointers to enable/disable
2699 * polling.
2700 */
2701 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2702 if ((hxgep->ldgvp->ldvp[i].is_rxdma == 1) &&
2703 (hxgep->ldgvp->ldvp[i].channel == rhp->index)) {
2704 ring->ldvp = &hxgep->ldgvp->ldvp[i];
2705 ring->ldgp = hxgep->ldgvp->ldvp[i].ldgp;
2706 break;
2707 }
2708 }
2709
2710 rhp->started = B_TRUE;
2711 ring->rcr_mac_handle = rhp->ring_handle;
2712 ring->rcr_gen_num = mr_gen_num;
2713 MUTEX_EXIT(&ring->lock);
2714
2715 return (0);
2716 }
2717
2718 static void
hxge_rx_ring_stop(mac_ring_driver_t rdriver)2719 hxge_rx_ring_stop(mac_ring_driver_t rdriver)
2720 {
2721 p_hxge_ring_handle_t rhp = (p_hxge_ring_handle_t)rdriver;
2722 p_hxge_t hxgep;
2723 p_rx_rcr_ring_t ring;
2724
2725 ASSERT(rhp != NULL);
2726 ASSERT((rhp->index >= 0) && (rhp->index < HXGE_MAX_TDCS));
2727
2728 hxgep = rhp->hxgep;
2729 ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
2730
2731 MUTEX_ENTER(&ring->lock);
2732 rhp->started = B_TRUE;
2733 ring->rcr_mac_handle = NULL;
2734 ring->ldvp = NULL;
2735 ring->ldgp = NULL;
2736 MUTEX_EXIT(&ring->lock);
2737 }
2738
2739 static int
hxge_rx_group_start(mac_group_driver_t gdriver)2740 hxge_rx_group_start(mac_group_driver_t gdriver)
2741 {
2742 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2743
2744 ASSERT(group->hxgep != NULL);
2745 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2746
2747 MUTEX_ENTER(group->hxgep->genlock);
2748 group->started = B_TRUE;
2749 MUTEX_EXIT(group->hxgep->genlock);
2750
2751 return (0);
2752 }
2753
2754 static void
hxge_rx_group_stop(mac_group_driver_t gdriver)2755 hxge_rx_group_stop(mac_group_driver_t gdriver)
2756 {
2757 hxge_ring_group_t *group = (hxge_ring_group_t *)gdriver;
2758
2759 ASSERT(group->hxgep != NULL);
2760 ASSERT(group->hxgep->hxge_mac_state == HXGE_MAC_STARTED);
2761 ASSERT(group->started == B_TRUE);
2762
2763 MUTEX_ENTER(group->hxgep->genlock);
2764 group->started = B_FALSE;
2765 MUTEX_EXIT(group->hxgep->genlock);
2766 }
2767
2768 static int
hxge_mmac_get_slot(p_hxge_t hxgep,int * slot)2769 hxge_mmac_get_slot(p_hxge_t hxgep, int *slot)
2770 {
2771 int i;
2772
2773 /*
2774 * Find an open slot.
2775 */
2776 for (i = 0; i < hxgep->mmac.total; i++) {
2777 if (!hxgep->mmac.addrs[i].set) {
2778 *slot = i;
2779 return (0);
2780 }
2781 }
2782
2783 return (ENXIO);
2784 }
2785
2786 static int
hxge_mmac_set_addr(p_hxge_t hxgep,int slot,const uint8_t * addr)2787 hxge_mmac_set_addr(p_hxge_t hxgep, int slot, const uint8_t *addr)
2788 {
2789 struct ether_addr eaddr;
2790 hxge_status_t status = HXGE_OK;
2791
2792 bcopy(addr, (uint8_t *)&eaddr, ETHERADDRL);
2793
2794 /*
2795 * Set new interface local address and re-init device.
2796 * This is destructive to any other streams attached
2797 * to this device.
2798 */
2799 RW_ENTER_WRITER(&hxgep->filter_lock);
2800 status = hxge_pfc_set_mac_address(hxgep, slot, &eaddr);
2801 RW_EXIT(&hxgep->filter_lock);
2802 if (status != HXGE_OK)
2803 return (status);
2804
2805 hxgep->mmac.addrs[slot].set = B_TRUE;
2806 bcopy(addr, hxgep->mmac.addrs[slot].addr, ETHERADDRL);
2807 hxgep->mmac.available--;
2808 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2809 hxgep->mmac.addrs[slot].primary = B_TRUE;
2810
2811 return (0);
2812 }
2813
2814 static int
hxge_mmac_find_addr(p_hxge_t hxgep,const uint8_t * addr,int * slot)2815 hxge_mmac_find_addr(p_hxge_t hxgep, const uint8_t *addr, int *slot)
2816 {
2817 int i, result;
2818
2819 for (i = 0; i < hxgep->mmac.total; i++) {
2820 if (hxgep->mmac.addrs[i].set) {
2821 result = memcmp(hxgep->mmac.addrs[i].addr,
2822 addr, ETHERADDRL);
2823 if (result == 0) {
2824 *slot = i;
2825 return (0);
2826 }
2827 }
2828 }
2829
2830 return (EINVAL);
2831 }
2832
2833 static int
hxge_mmac_unset_addr(p_hxge_t hxgep,int slot)2834 hxge_mmac_unset_addr(p_hxge_t hxgep, int slot)
2835 {
2836 hxge_status_t status;
2837 int i;
2838
2839 status = hxge_pfc_clear_mac_address(hxgep, slot);
2840 if (status != HXGE_OK)
2841 return (status);
2842
2843 for (i = 0; i < ETHERADDRL; i++)
2844 hxgep->mmac.addrs[slot].addr[i] = 0;
2845
2846 hxgep->mmac.addrs[slot].set = B_FALSE;
2847 if (slot == HXGE_MAC_DEFAULT_ADDR_SLOT)
2848 hxgep->mmac.addrs[slot].primary = B_FALSE;
2849 hxgep->mmac.available++;
2850
2851 return (0);
2852 }
2853
2854 static int
hxge_rx_group_add_mac(void * arg,const uint8_t * mac_addr)2855 hxge_rx_group_add_mac(void *arg, const uint8_t *mac_addr)
2856 {
2857 hxge_ring_group_t *group = arg;
2858 p_hxge_t hxgep = group->hxgep;
2859 int slot = 0;
2860
2861 ASSERT(group->type == MAC_RING_TYPE_RX);
2862
2863 MUTEX_ENTER(hxgep->genlock);
2864
2865 /*
2866 * Find a slot for the address.
2867 */
2868 if (hxge_mmac_get_slot(hxgep, &slot) != 0) {
2869 MUTEX_EXIT(hxgep->genlock);
2870 return (ENOSPC);
2871 }
2872
2873 /*
2874 * Program the MAC address.
2875 */
2876 if (hxge_mmac_set_addr(hxgep, slot, mac_addr) != 0) {
2877 MUTEX_EXIT(hxgep->genlock);
2878 return (ENOSPC);
2879 }
2880
2881 MUTEX_EXIT(hxgep->genlock);
2882 return (0);
2883 }
2884
2885 static int
hxge_rx_group_rem_mac(void * arg,const uint8_t * mac_addr)2886 hxge_rx_group_rem_mac(void *arg, const uint8_t *mac_addr)
2887 {
2888 hxge_ring_group_t *group = arg;
2889 p_hxge_t hxgep = group->hxgep;
2890 int rv, slot;
2891
2892 ASSERT(group->type == MAC_RING_TYPE_RX);
2893
2894 MUTEX_ENTER(hxgep->genlock);
2895
2896 if ((rv = hxge_mmac_find_addr(hxgep, mac_addr, &slot)) != 0) {
2897 MUTEX_EXIT(hxgep->genlock);
2898 return (rv);
2899 }
2900
2901 if ((rv = hxge_mmac_unset_addr(hxgep, slot)) != 0) {
2902 MUTEX_EXIT(hxgep->genlock);
2903 return (rv);
2904 }
2905
2906 MUTEX_EXIT(hxgep->genlock);
2907 return (0);
2908 }
2909
2910 static void
hxge_group_get(void * arg,mac_ring_type_t type,int groupid,mac_group_info_t * infop,mac_group_handle_t gh)2911 hxge_group_get(void *arg, mac_ring_type_t type, int groupid,
2912 mac_group_info_t *infop, mac_group_handle_t gh)
2913 {
2914 p_hxge_t hxgep = arg;
2915 hxge_ring_group_t *group;
2916
2917 ASSERT(type == MAC_RING_TYPE_RX);
2918
2919 switch (type) {
2920 case MAC_RING_TYPE_RX:
2921 group = &hxgep->rx_groups[groupid];
2922 group->hxgep = hxgep;
2923 group->ghandle = gh;
2924 group->index = groupid;
2925 group->type = type;
2926
2927 infop->mgi_driver = (mac_group_driver_t)group;
2928 infop->mgi_start = hxge_rx_group_start;
2929 infop->mgi_stop = hxge_rx_group_stop;
2930 infop->mgi_addmac = hxge_rx_group_add_mac;
2931 infop->mgi_remmac = hxge_rx_group_rem_mac;
2932 infop->mgi_count = HXGE_MAX_RDCS;
2933 break;
2934
2935 case MAC_RING_TYPE_TX:
2936 default:
2937 break;
2938 }
2939 }
2940
2941 static int
hxge_ring_get_htable_idx(p_hxge_t hxgep,mac_ring_type_t type,uint32_t channel)2942 hxge_ring_get_htable_idx(p_hxge_t hxgep, mac_ring_type_t type, uint32_t channel)
2943 {
2944 int i;
2945
2946 ASSERT(hxgep->ldgvp != NULL);
2947
2948 switch (type) {
2949 case MAC_RING_TYPE_RX:
2950 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2951 if ((hxgep->ldgvp->ldvp[i].is_rxdma) &&
2952 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2953 return ((int)
2954 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2955 }
2956 }
2957 break;
2958
2959 case MAC_RING_TYPE_TX:
2960 for (i = 0; i < hxgep->ldgvp->maxldvs; i++) {
2961 if ((hxgep->ldgvp->ldvp[i].is_txdma) &&
2962 (hxgep->ldgvp->ldvp[i].channel == channel)) {
2963 return ((int)
2964 hxgep->ldgvp->ldvp[i].ldgp->htable_idx);
2965 }
2966 }
2967 break;
2968
2969 default:
2970 break;
2971 }
2972
2973 return (-1);
2974 }
2975
2976 /*
2977 * Callback function for the GLDv3 layer to register all rings.
2978 */
2979 /*ARGSUSED*/
2980 static void
hxge_fill_ring(void * arg,mac_ring_type_t type,const int rg_index,const int index,mac_ring_info_t * infop,mac_ring_handle_t rh)2981 hxge_fill_ring(void *arg, mac_ring_type_t type, const int rg_index,
2982 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
2983 {
2984 p_hxge_t hxgep = arg;
2985
2986 ASSERT(hxgep != NULL);
2987 ASSERT(infop != NULL);
2988
2989 switch (type) {
2990 case MAC_RING_TYPE_TX: {
2991 p_hxge_ring_handle_t rhp;
2992 mac_intr_t *mintr = &infop->mri_intr;
2993 p_hxge_intr_t intrp;
2994 int htable_idx;
2995
2996 ASSERT((index >= 0) && (index < HXGE_MAX_TDCS));
2997 rhp = &hxgep->tx_ring_handles[index];
2998 rhp->hxgep = hxgep;
2999 rhp->index = index;
3000 rhp->ring_handle = rh;
3001 infop->mri_driver = (mac_ring_driver_t)rhp;
3002 infop->mri_start = hxge_tx_ring_start;
3003 infop->mri_stop = hxge_tx_ring_stop;
3004 infop->mri_tx = hxge_tx_ring_send;
3005 infop->mri_stat = hxge_tx_ring_stat;
3006
3007 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3008 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
3009 if (htable_idx >= 0)
3010 mintr->mi_ddi_handle = intrp->htable[htable_idx];
3011 else
3012 mintr->mi_ddi_handle = NULL;
3013 break;
3014 }
3015
3016 case MAC_RING_TYPE_RX: {
3017 p_hxge_ring_handle_t rhp;
3018 mac_intr_t hxge_mac_intr;
3019 p_hxge_intr_t intrp;
3020 int htable_idx;
3021
3022 ASSERT((index >= 0) && (index < HXGE_MAX_RDCS));
3023 rhp = &hxgep->rx_ring_handles[index];
3024 rhp->hxgep = hxgep;
3025 rhp->index = index;
3026 rhp->ring_handle = rh;
3027
3028 /*
3029 * Entrypoint to enable interrupt (disable poll) and
3030 * disable interrupt (enable poll).
3031 */
3032 hxge_mac_intr.mi_handle = (mac_intr_handle_t)rhp;
3033 hxge_mac_intr.mi_enable = (mac_intr_enable_t)hxge_disable_poll;
3034 hxge_mac_intr.mi_disable = (mac_intr_disable_t)hxge_enable_poll;
3035
3036 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3037 htable_idx = hxge_ring_get_htable_idx(hxgep, type, index);
3038 if (htable_idx >= 0)
3039 hxge_mac_intr.mi_ddi_handle = intrp->htable[htable_idx];
3040 else
3041 hxge_mac_intr.mi_ddi_handle = NULL;
3042
3043 infop->mri_driver = (mac_ring_driver_t)rhp;
3044 infop->mri_start = hxge_rx_ring_start;
3045 infop->mri_stop = hxge_rx_ring_stop;
3046 infop->mri_intr = hxge_mac_intr;
3047 infop->mri_poll = hxge_rx_poll;
3048 infop->mri_stat = hxge_rx_ring_stat;
3049 break;
3050 }
3051
3052 default:
3053 break;
3054 }
3055 }
3056
3057 /*ARGSUSED*/
3058 boolean_t
hxge_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)3059 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3060 {
3061 p_hxge_t hxgep = arg;
3062
3063 switch (cap) {
3064 case MAC_CAPAB_HCKSUM: {
3065 uint32_t *txflags = cap_data;
3066
3067 *txflags = HCKSUM_INET_PARTIAL;
3068 break;
3069 }
3070
3071 case MAC_CAPAB_RINGS: {
3072 mac_capab_rings_t *cap_rings = cap_data;
3073
3074 MUTEX_ENTER(hxgep->genlock);
3075 if (cap_rings->mr_type == MAC_RING_TYPE_RX) {
3076 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3077 cap_rings->mr_rnum = HXGE_MAX_RDCS;
3078 cap_rings->mr_rget = hxge_fill_ring;
3079 cap_rings->mr_gnum = HXGE_MAX_RX_GROUPS;
3080 cap_rings->mr_gget = hxge_group_get;
3081 cap_rings->mr_gaddring = NULL;
3082 cap_rings->mr_gremring = NULL;
3083 } else {
3084 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3085 cap_rings->mr_rnum = HXGE_MAX_TDCS;
3086 cap_rings->mr_rget = hxge_fill_ring;
3087 cap_rings->mr_gnum = 0;
3088 cap_rings->mr_gget = NULL;
3089 cap_rings->mr_gaddring = NULL;
3090 cap_rings->mr_gremring = NULL;
3091 }
3092 MUTEX_EXIT(hxgep->genlock);
3093 break;
3094 }
3095
3096 default:
3097 return (B_FALSE);
3098 }
3099 return (B_TRUE);
3100 }
3101
3102 static boolean_t
hxge_param_locked(mac_prop_id_t pr_num)3103 hxge_param_locked(mac_prop_id_t pr_num)
3104 {
3105 /*
3106 * All adv_* parameters are locked (read-only) while
3107 * the device is in any sort of loopback mode ...
3108 */
3109 switch (pr_num) {
3110 case MAC_PROP_ADV_1000FDX_CAP:
3111 case MAC_PROP_EN_1000FDX_CAP:
3112 case MAC_PROP_ADV_1000HDX_CAP:
3113 case MAC_PROP_EN_1000HDX_CAP:
3114 case MAC_PROP_ADV_100FDX_CAP:
3115 case MAC_PROP_EN_100FDX_CAP:
3116 case MAC_PROP_ADV_100HDX_CAP:
3117 case MAC_PROP_EN_100HDX_CAP:
3118 case MAC_PROP_ADV_10FDX_CAP:
3119 case MAC_PROP_EN_10FDX_CAP:
3120 case MAC_PROP_ADV_10HDX_CAP:
3121 case MAC_PROP_EN_10HDX_CAP:
3122 case MAC_PROP_AUTONEG:
3123 case MAC_PROP_FLOWCTRL:
3124 return (B_TRUE);
3125 }
3126 return (B_FALSE);
3127 }
3128
3129 /*
3130 * callback functions for set/get of properties
3131 */
3132 static int
hxge_m_setprop(void * barg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,const void * pr_val)3133 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3134 uint_t pr_valsize, const void *pr_val)
3135 {
3136 hxge_t *hxgep = barg;
3137 p_hxge_stats_t statsp;
3138 int err = 0;
3139 uint32_t new_mtu, old_framesize, new_framesize;
3140
3141 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
3142
3143 statsp = hxgep->statsp;
3144 MUTEX_ENTER(hxgep->genlock);
3145 if (statsp->port_stats.lb_mode != hxge_lb_normal &&
3146 hxge_param_locked(pr_num)) {
3147 /*
3148 * All adv_* parameters are locked (read-only)
3149 * while the device is in any sort of loopback mode.
3150 */
3151 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3152 "==> hxge_m_setprop: loopback mode: read only"));
3153 MUTEX_EXIT(hxgep->genlock);
3154 return (EBUSY);
3155 }
3156
3157 switch (pr_num) {
3158 /*
3159 * These properties are either not exist or read only
3160 */
3161 case MAC_PROP_EN_1000FDX_CAP:
3162 case MAC_PROP_EN_100FDX_CAP:
3163 case MAC_PROP_EN_10FDX_CAP:
3164 case MAC_PROP_EN_1000HDX_CAP:
3165 case MAC_PROP_EN_100HDX_CAP:
3166 case MAC_PROP_EN_10HDX_CAP:
3167 case MAC_PROP_ADV_1000FDX_CAP:
3168 case MAC_PROP_ADV_1000HDX_CAP:
3169 case MAC_PROP_ADV_100FDX_CAP:
3170 case MAC_PROP_ADV_100HDX_CAP:
3171 case MAC_PROP_ADV_10FDX_CAP:
3172 case MAC_PROP_ADV_10HDX_CAP:
3173 case MAC_PROP_STATUS:
3174 case MAC_PROP_SPEED:
3175 case MAC_PROP_DUPLEX:
3176 case MAC_PROP_AUTONEG:
3177 /*
3178 * Flow control is handled in the shared domain and
3179 * it is readonly here.
3180 */
3181 case MAC_PROP_FLOWCTRL:
3182 err = EINVAL;
3183 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3184 "==> hxge_m_setprop: read only property %d",
3185 pr_num));
3186 break;
3187
3188 case MAC_PROP_MTU:
3189 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3190 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3191 "==> hxge_m_setprop: set MTU: %d", new_mtu));
3192
3193 new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3194 if (new_framesize == hxgep->vmac.maxframesize) {
3195 err = 0;
3196 break;
3197 }
3198
3199 if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3200 err = EBUSY;
3201 break;
3202 }
3203
3204 if (new_framesize < MIN_FRAME_SIZE ||
3205 new_framesize > MAX_FRAME_SIZE) {
3206 err = EINVAL;
3207 break;
3208 }
3209
3210 old_framesize = hxgep->vmac.maxframesize;
3211 hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3212
3213 if (hxge_vmac_set_framesize(hxgep)) {
3214 hxgep->vmac.maxframesize =
3215 (uint16_t)old_framesize;
3216 err = EINVAL;
3217 break;
3218 }
3219
3220 err = mac_maxsdu_update(hxgep->mach, new_mtu);
3221 if (err) {
3222 hxgep->vmac.maxframesize =
3223 (uint16_t)old_framesize;
3224 (void) hxge_vmac_set_framesize(hxgep);
3225 }
3226
3227 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3228 "==> hxge_m_setprop: set MTU: %d maxframe %d",
3229 new_mtu, hxgep->vmac.maxframesize));
3230 break;
3231
3232 case MAC_PROP_PRIVATE:
3233 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3234 "==> hxge_m_setprop: private property"));
3235 err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3236 pr_val);
3237 break;
3238
3239 default:
3240 err = ENOTSUP;
3241 break;
3242 }
3243
3244 MUTEX_EXIT(hxgep->genlock);
3245
3246 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3247 "<== hxge_m_setprop (return %d)", err));
3248
3249 return (err);
3250 }
3251
3252 static int
hxge_m_getprop(void * barg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,void * pr_val)3253 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3254 uint_t pr_valsize, void *pr_val)
3255 {
3256 hxge_t *hxgep = barg;
3257 p_hxge_stats_t statsp = hxgep->statsp;
3258 int err = 0;
3259 link_flowctrl_t fl;
3260 uint64_t tmp = 0;
3261 link_state_t ls;
3262
3263 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3264 "==> hxge_m_getprop: pr_num %d", pr_num));
3265
3266 switch (pr_num) {
3267 case MAC_PROP_DUPLEX:
3268 *(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3269 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3270 "==> hxge_m_getprop: duplex mode %d",
3271 *(uint8_t *)pr_val));
3272 break;
3273
3274 case MAC_PROP_SPEED:
3275 ASSERT(pr_valsize >= sizeof (uint64_t));
3276 tmp = statsp->mac_stats.link_speed * 1000000ull;
3277 bcopy(&tmp, pr_val, sizeof (tmp));
3278 break;
3279
3280 case MAC_PROP_STATUS:
3281 ASSERT(pr_valsize >= sizeof (link_state_t));
3282 if (!statsp->mac_stats.link_up)
3283 ls = LINK_STATE_DOWN;
3284 else
3285 ls = LINK_STATE_UP;
3286 bcopy(&ls, pr_val, sizeof (ls));
3287 break;
3288
3289 case MAC_PROP_FLOWCTRL:
3290 /*
3291 * Flow control is supported by the shared domain and
3292 * it is currently transmit only
3293 */
3294 ASSERT(pr_valsize < sizeof (link_flowctrl_t));
3295 fl = LINK_FLOWCTRL_TX;
3296 bcopy(&fl, pr_val, sizeof (fl));
3297 break;
3298 case MAC_PROP_AUTONEG:
3299 /* 10G link only and it is not negotiable */
3300 *(uint8_t *)pr_val = 0;
3301 break;
3302 case MAC_PROP_ADV_1000FDX_CAP:
3303 case MAC_PROP_ADV_100FDX_CAP:
3304 case MAC_PROP_ADV_10FDX_CAP:
3305 case MAC_PROP_ADV_1000HDX_CAP:
3306 case MAC_PROP_ADV_100HDX_CAP:
3307 case MAC_PROP_ADV_10HDX_CAP:
3308 case MAC_PROP_EN_1000FDX_CAP:
3309 case MAC_PROP_EN_100FDX_CAP:
3310 case MAC_PROP_EN_10FDX_CAP:
3311 case MAC_PROP_EN_1000HDX_CAP:
3312 case MAC_PROP_EN_100HDX_CAP:
3313 case MAC_PROP_EN_10HDX_CAP:
3314 err = ENOTSUP;
3315 break;
3316
3317 case MAC_PROP_PRIVATE:
3318 err = hxge_get_priv_prop(hxgep, pr_name, pr_valsize,
3319 pr_val);
3320 break;
3321
3322 default:
3323 err = EINVAL;
3324 break;
3325 }
3326
3327 HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3328
3329 return (err);
3330 }
3331
3332 static void
hxge_m_propinfo(void * arg,const char * pr_name,mac_prop_id_t pr_num,mac_prop_info_handle_t prh)3333 hxge_m_propinfo(void *arg, const char *pr_name,
3334 mac_prop_id_t pr_num, mac_prop_info_handle_t prh)
3335 {
3336 _NOTE(ARGUNUSED(arg));
3337 switch (pr_num) {
3338 case MAC_PROP_DUPLEX:
3339 case MAC_PROP_SPEED:
3340 case MAC_PROP_STATUS:
3341 case MAC_PROP_AUTONEG:
3342 case MAC_PROP_FLOWCTRL:
3343 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3344 break;
3345
3346 case MAC_PROP_MTU:
3347 mac_prop_info_set_range_uint32(prh,
3348 MIN_FRAME_SIZE - MTU_TO_FRAME_SIZE,
3349 MAX_FRAME_SIZE - MTU_TO_FRAME_SIZE);
3350 break;
3351
3352 case MAC_PROP_PRIVATE: {
3353 char valstr[MAXNAMELEN];
3354
3355 bzero(valstr, sizeof (valstr));
3356
3357 /* Receive Interrupt Blanking Parameters */
3358 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3359 (void) snprintf(valstr, sizeof (valstr), "%d",
3360 RXDMA_RCR_TO_DEFAULT);
3361 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3362 (void) snprintf(valstr, sizeof (valstr), "%d",
3363 RXDMA_RCR_PTHRES_DEFAULT);
3364
3365 /* Classification and Load Distribution Configuration */
3366 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3367 strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3368 strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3369 strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3370 strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3371 strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3372 strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3373 strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3374 (void) snprintf(valstr, sizeof (valstr), "%d",
3375 HXGE_CLASS_TCAM_LOOKUP);
3376 }
3377
3378 if (strlen(valstr) > 0)
3379 mac_prop_info_set_default_str(prh, valstr);
3380 break;
3381 }
3382 }
3383 }
3384
3385
3386 /* ARGSUSED */
3387 static int
hxge_set_priv_prop(p_hxge_t hxgep,const char * pr_name,uint_t pr_valsize,const void * pr_val)3388 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3389 const void *pr_val)
3390 {
3391 p_hxge_param_t param_arr = hxgep->param_arr;
3392 int err = 0;
3393
3394 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3395 "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3396
3397 if (pr_val == NULL) {
3398 return (EINVAL);
3399 }
3400
3401 /* Blanking */
3402 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3403 err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3404 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_time]);
3405 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3406 err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3407 (char *)pr_val, (caddr_t)¶m_arr[param_rxdma_intr_pkts]);
3408
3409 /* Classification */
3410 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3411 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3412 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
3413 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3414 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3415 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
3416 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3417 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3418 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
3419 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3420 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3421 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
3422 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3423 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3424 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
3425 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3426 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3427 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
3428 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3429 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3430 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
3431 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3432 err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3433 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
3434 } else {
3435 err = EINVAL;
3436 }
3437
3438 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3439 "<== hxge_set_priv_prop: err %d", err));
3440
3441 return (err);
3442 }
3443
3444 static int
hxge_get_priv_prop(p_hxge_t hxgep,const char * pr_name,uint_t pr_valsize,void * pr_val)3445 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3446 void *pr_val)
3447 {
3448 p_hxge_param_t param_arr = hxgep->param_arr;
3449 char valstr[MAXNAMELEN];
3450 int err = 0;
3451 uint_t strsize;
3452 int value = 0;
3453
3454 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3455 "==> hxge_get_priv_prop: property %s", pr_name));
3456
3457 /* Receive Interrupt Blanking Parameters */
3458 if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3459 value = hxgep->intr_timeout;
3460 } else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3461 value = hxgep->intr_threshold;
3462
3463 /* Classification and Load Distribution Configuration */
3464 } else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3465 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3466 (caddr_t)¶m_arr[param_class_opt_ipv4_tcp]);
3467
3468 value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3469 } else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3470 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3471 (caddr_t)¶m_arr[param_class_opt_ipv4_udp]);
3472
3473 value = (int)param_arr[param_class_opt_ipv4_udp].value;
3474 } else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3475 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3476 (caddr_t)¶m_arr[param_class_opt_ipv4_ah]);
3477
3478 value = (int)param_arr[param_class_opt_ipv4_ah].value;
3479 } else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3480 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3481 (caddr_t)¶m_arr[param_class_opt_ipv4_sctp]);
3482
3483 value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3484 } else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3485 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3486 (caddr_t)¶m_arr[param_class_opt_ipv6_tcp]);
3487
3488 value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3489 } else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3490 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3491 (caddr_t)¶m_arr[param_class_opt_ipv6_udp]);
3492
3493 value = (int)param_arr[param_class_opt_ipv6_udp].value;
3494 } else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3495 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3496 (caddr_t)¶m_arr[param_class_opt_ipv6_ah]);
3497
3498 value = (int)param_arr[param_class_opt_ipv6_ah].value;
3499 } else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3500 err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3501 (caddr_t)¶m_arr[param_class_opt_ipv6_sctp]);
3502
3503 value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3504 } else {
3505 err = EINVAL;
3506 }
3507
3508 if (err == 0) {
3509 (void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3510
3511 strsize = (uint_t)strlen(valstr);
3512 if (pr_valsize < strsize) {
3513 err = ENOBUFS;
3514 } else {
3515 (void) strlcpy(pr_val, valstr, pr_valsize);
3516 }
3517 }
3518
3519 HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3520 "<== hxge_get_priv_prop: return %d", err));
3521
3522 return (err);
3523 }
3524 /*
3525 * Module loading and removing entry points.
3526 */
3527 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3528 nodev, NULL, D_MP, NULL, NULL);
3529
3530 extern struct mod_ops mod_driverops;
3531
3532 #define HXGE_DESC_VER "HXGE 10Gb Ethernet Driver"
3533
3534 /*
3535 * Module linkage information for the kernel.
3536 */
3537 static struct modldrv hxge_modldrv = {
3538 &mod_driverops,
3539 HXGE_DESC_VER,
3540 &hxge_dev_ops
3541 };
3542
3543 static struct modlinkage modlinkage = {
3544 MODREV_1, (void *) &hxge_modldrv, NULL
3545 };
3546
3547 int
_init(void)3548 _init(void)
3549 {
3550 int status;
3551
3552 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3553 mac_init_ops(&hxge_dev_ops, "hxge");
3554 status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3555 if (status != 0) {
3556 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3557 "failed to init device soft state"));
3558 mac_fini_ops(&hxge_dev_ops);
3559 goto _init_exit;
3560 }
3561
3562 status = mod_install(&modlinkage);
3563 if (status != 0) {
3564 ddi_soft_state_fini(&hxge_list);
3565 HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3566 goto _init_exit;
3567 }
3568
3569 MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3570
3571 _init_exit:
3572 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3573
3574 return (status);
3575 }
3576
3577 int
_fini(void)3578 _fini(void)
3579 {
3580 int status;
3581
3582 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3583
3584 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3585
3586 if (hxge_mblks_pending)
3587 return (EBUSY);
3588
3589 status = mod_remove(&modlinkage);
3590 if (status != DDI_SUCCESS) {
3591 HXGE_DEBUG_MSG((NULL, MOD_CTL,
3592 "Module removal failed 0x%08x", status));
3593 goto _fini_exit;
3594 }
3595
3596 mac_fini_ops(&hxge_dev_ops);
3597
3598 ddi_soft_state_fini(&hxge_list);
3599
3600 MUTEX_DESTROY(&hxge_common_lock);
3601
3602 _fini_exit:
3603 HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3604
3605 return (status);
3606 }
3607
3608 int
_info(struct modinfo * modinfop)3609 _info(struct modinfo *modinfop)
3610 {
3611 int status;
3612
3613 HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3614 status = mod_info(&modlinkage, modinfop);
3615 HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3616
3617 return (status);
3618 }
3619
3620 /*ARGSUSED*/
3621 static hxge_status_t
hxge_add_intrs(p_hxge_t hxgep)3622 hxge_add_intrs(p_hxge_t hxgep)
3623 {
3624 int intr_types;
3625 int type = 0;
3626 int ddi_status = DDI_SUCCESS;
3627 hxge_status_t status = HXGE_OK;
3628
3629 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3630
3631 hxgep->hxge_intr_type.intr_registered = B_FALSE;
3632 hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3633 hxgep->hxge_intr_type.msi_intx_cnt = 0;
3634 hxgep->hxge_intr_type.intr_added = 0;
3635 hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3636 hxgep->hxge_intr_type.intr_type = 0;
3637
3638 if (hxge_msi_enable) {
3639 hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3640 }
3641
3642 /* Get the supported interrupt types */
3643 if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3644 != DDI_SUCCESS) {
3645 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3646 "ddi_intr_get_supported_types failed: status 0x%08x",
3647 ddi_status));
3648 return (HXGE_ERROR | HXGE_DDI_FAILED);
3649 }
3650
3651 hxgep->hxge_intr_type.intr_types = intr_types;
3652
3653 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3654 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3655
3656 /*
3657 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3658 * (1): 1 - MSI
3659 * (2): 2 - MSI-X
3660 * others - FIXED
3661 */
3662 switch (hxge_msi_enable) {
3663 default:
3664 type = DDI_INTR_TYPE_FIXED;
3665 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3666 "use fixed (intx emulation) type %08x", type));
3667 break;
3668
3669 case 2:
3670 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3671 "ddi_intr_get_supported_types: 0x%08x", intr_types));
3672 if (intr_types & DDI_INTR_TYPE_MSIX) {
3673 type = DDI_INTR_TYPE_MSIX;
3674 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3675 "==> hxge_add_intrs: "
3676 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3677 } else if (intr_types & DDI_INTR_TYPE_MSI) {
3678 type = DDI_INTR_TYPE_MSI;
3679 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3680 "==> hxge_add_intrs: "
3681 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3682 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3683 type = DDI_INTR_TYPE_FIXED;
3684 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3685 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3686 }
3687 break;
3688
3689 case 1:
3690 if (intr_types & DDI_INTR_TYPE_MSI) {
3691 type = DDI_INTR_TYPE_MSI;
3692 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3693 "==> hxge_add_intrs: "
3694 "ddi_intr_get_supported_types: MSI 0x%08x", type));
3695 } else if (intr_types & DDI_INTR_TYPE_MSIX) {
3696 type = DDI_INTR_TYPE_MSIX;
3697 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3698 "==> hxge_add_intrs: "
3699 "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3700 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
3701 type = DDI_INTR_TYPE_FIXED;
3702 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3703 "==> hxge_add_intrs: "
3704 "ddi_intr_get_supported_types: MSXED0x%08x", type));
3705 }
3706 }
3707
3708 hxgep->hxge_intr_type.intr_type = type;
3709 if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3710 type == DDI_INTR_TYPE_FIXED) &&
3711 hxgep->hxge_intr_type.niu_msi_enable) {
3712 if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3713 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3714 " hxge_add_intrs: "
3715 " hxge_add_intrs_adv failed: status 0x%08x",
3716 status));
3717 return (status);
3718 } else {
3719 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3720 "interrupts registered : type %d", type));
3721 hxgep->hxge_intr_type.intr_registered = B_TRUE;
3722
3723 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3724 "\nAdded advanced hxge add_intr_adv "
3725 "intr type 0x%x\n", type));
3726
3727 return (status);
3728 }
3729 }
3730
3731 if (!hxgep->hxge_intr_type.intr_registered) {
3732 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3733 "==> hxge_add_intrs: failed to register interrupts"));
3734 return (HXGE_ERROR | HXGE_DDI_FAILED);
3735 }
3736
3737 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3738
3739 return (status);
3740 }
3741
3742 /*ARGSUSED*/
3743 static hxge_status_t
hxge_add_intrs_adv(p_hxge_t hxgep)3744 hxge_add_intrs_adv(p_hxge_t hxgep)
3745 {
3746 int intr_type;
3747 p_hxge_intr_t intrp;
3748 hxge_status_t status;
3749
3750 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3751
3752 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3753 intr_type = intrp->intr_type;
3754
3755 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3756 intr_type));
3757
3758 switch (intr_type) {
3759 case DDI_INTR_TYPE_MSI: /* 0x2 */
3760 case DDI_INTR_TYPE_MSIX: /* 0x4 */
3761 status = hxge_add_intrs_adv_type(hxgep, intr_type);
3762 break;
3763
3764 case DDI_INTR_TYPE_FIXED: /* 0x1 */
3765 status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3766 break;
3767
3768 default:
3769 status = HXGE_ERROR;
3770 break;
3771 }
3772
3773 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3774
3775 return (status);
3776 }
3777
3778 /*ARGSUSED*/
3779 static hxge_status_t
hxge_add_intrs_adv_type(p_hxge_t hxgep,uint32_t int_type)3780 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3781 {
3782 dev_info_t *dip = hxgep->dip;
3783 p_hxge_ldg_t ldgp;
3784 p_hxge_intr_t intrp;
3785 uint_t *inthandler;
3786 void *arg1, *arg2;
3787 int behavior;
3788 int nintrs, navail;
3789 int nactual, nrequired, nrequest;
3790 int inum = 0;
3791 int loop = 0;
3792 int x, y;
3793 int ddi_status = DDI_SUCCESS;
3794 hxge_status_t status = HXGE_OK;
3795
3796 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3797
3798 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3799
3800 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3801 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3802 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3803 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3804 "nintrs: %d", ddi_status, nintrs));
3805 return (HXGE_ERROR | HXGE_DDI_FAILED);
3806 }
3807
3808 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3809 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3810 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3811 "ddi_intr_get_navail() failed, status: 0x%x%, "
3812 "nintrs: %d", ddi_status, navail));
3813 return (HXGE_ERROR | HXGE_DDI_FAILED);
3814 }
3815
3816 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3817 "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3818 int_type, nintrs, navail));
3819
3820 /* PSARC/2007/453 MSI-X interrupt limit override */
3821 if (int_type == DDI_INTR_TYPE_MSIX) {
3822 nrequest = hxge_create_msi_property(hxgep);
3823 if (nrequest < navail) {
3824 navail = nrequest;
3825 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3826 "hxge_add_intrs_adv_type: nintrs %d "
3827 "navail %d (nrequest %d)",
3828 nintrs, navail, nrequest));
3829 }
3830 }
3831
3832 if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3833 /* MSI must be power of 2 */
3834 if ((navail & 16) == 16) {
3835 navail = 16;
3836 } else if ((navail & 8) == 8) {
3837 navail = 8;
3838 } else if ((navail & 4) == 4) {
3839 navail = 4;
3840 } else if ((navail & 2) == 2) {
3841 navail = 2;
3842 } else {
3843 navail = 1;
3844 }
3845 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3846 "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3847 "navail %d", nintrs, navail));
3848 }
3849
3850 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3851 "requesting: intr type %d nintrs %d, navail %d",
3852 int_type, nintrs, navail));
3853
3854 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3855 DDI_INTR_ALLOC_NORMAL);
3856 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3857 intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3858
3859 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3860 navail, &nactual, behavior);
3861 if (ddi_status != DDI_SUCCESS || nactual == 0) {
3862 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3863 " ddi_intr_alloc() failed: %d", ddi_status));
3864 kmem_free(intrp->htable, intrp->intr_size);
3865 return (HXGE_ERROR | HXGE_DDI_FAILED);
3866 }
3867
3868 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3869 "ddi_intr_alloc() returned: navail %d nactual %d",
3870 navail, nactual));
3871
3872 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3873 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3874 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3875 " ddi_intr_get_pri() failed: %d", ddi_status));
3876 /* Free already allocated interrupts */
3877 for (y = 0; y < nactual; y++) {
3878 (void) ddi_intr_free(intrp->htable[y]);
3879 }
3880
3881 kmem_free(intrp->htable, intrp->intr_size);
3882 return (HXGE_ERROR | HXGE_DDI_FAILED);
3883 }
3884
3885 nrequired = 0;
3886 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3887 if (status != HXGE_OK) {
3888 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3889 "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3890 "failed: 0x%x", status));
3891 /* Free already allocated interrupts */
3892 for (y = 0; y < nactual; y++) {
3893 (void) ddi_intr_free(intrp->htable[y]);
3894 }
3895
3896 kmem_free(intrp->htable, intrp->intr_size);
3897 return (status);
3898 }
3899
3900 ldgp = hxgep->ldgvp->ldgp;
3901 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3902 "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3903
3904 if (nactual < nrequired)
3905 loop = nactual;
3906 else
3907 loop = nrequired;
3908
3909 for (x = 0; x < loop; x++, ldgp++) {
3910 ldgp->vector = (uint8_t)x;
3911 arg1 = ldgp->ldvp;
3912 arg2 = hxgep;
3913 if (ldgp->nldvs == 1) {
3914 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3915 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3916 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3917 "1-1 int handler (entry %d)\n",
3918 arg1, arg2, x));
3919 } else if (ldgp->nldvs > 1) {
3920 inthandler = (uint_t *)ldgp->sys_intr_handler;
3921 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3922 "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3923 "nldevs %d int handler (entry %d)\n",
3924 arg1, arg2, ldgp->nldvs, x));
3925 }
3926 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3927 "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3928 "htable 0x%llx", x, intrp->htable[x]));
3929
3930 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3931 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3932 DDI_SUCCESS) {
3933 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3934 "==> hxge_add_intrs_adv_type: failed #%d "
3935 "status 0x%x", x, ddi_status));
3936 for (y = 0; y < intrp->intr_added; y++) {
3937 (void) ddi_intr_remove_handler(
3938 intrp->htable[y]);
3939 }
3940
3941 /* Free already allocated intr */
3942 for (y = 0; y < nactual; y++) {
3943 (void) ddi_intr_free(intrp->htable[y]);
3944 }
3945 kmem_free(intrp->htable, intrp->intr_size);
3946
3947 (void) hxge_ldgv_uninit(hxgep);
3948
3949 return (HXGE_ERROR | HXGE_DDI_FAILED);
3950 }
3951
3952 ldgp->htable_idx = x;
3953 intrp->intr_added++;
3954 }
3955 intrp->msi_intx_cnt = nactual;
3956
3957 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3958 "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3959 navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3960
3961 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3962 (void) hxge_intr_ldgv_init(hxgep);
3963
3964 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3965
3966 return (status);
3967 }
3968
3969 /*ARGSUSED*/
3970 static hxge_status_t
hxge_add_intrs_adv_type_fix(p_hxge_t hxgep,uint32_t int_type)3971 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3972 {
3973 dev_info_t *dip = hxgep->dip;
3974 p_hxge_ldg_t ldgp;
3975 p_hxge_intr_t intrp;
3976 uint_t *inthandler;
3977 void *arg1, *arg2;
3978 int behavior;
3979 int nintrs, navail;
3980 int nactual, nrequired;
3981 int inum = 0;
3982 int x, y;
3983 int ddi_status = DDI_SUCCESS;
3984 hxge_status_t status = HXGE_OK;
3985
3986 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3987 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3988
3989 ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3990 if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3991 HXGE_DEBUG_MSG((hxgep, INT_CTL,
3992 "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3993 "nintrs: %d", status, nintrs));
3994 return (HXGE_ERROR | HXGE_DDI_FAILED);
3995 }
3996
3997 ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3998 if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3999 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4000 "ddi_intr_get_navail() failed, status: 0x%x%, "
4001 "nintrs: %d", ddi_status, navail));
4002 return (HXGE_ERROR | HXGE_DDI_FAILED);
4003 }
4004
4005 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4006 "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
4007 nintrs, navail));
4008
4009 behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
4010 DDI_INTR_ALLOC_NORMAL);
4011 intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
4012 intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
4013 ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
4014 navail, &nactual, behavior);
4015 if (ddi_status != DDI_SUCCESS || nactual == 0) {
4016 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4017 " ddi_intr_alloc() failed: %d", ddi_status));
4018 kmem_free(intrp->htable, intrp->intr_size);
4019 return (HXGE_ERROR | HXGE_DDI_FAILED);
4020 }
4021
4022 if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
4023 (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
4024 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4025 " ddi_intr_get_pri() failed: %d", ddi_status));
4026 /* Free already allocated interrupts */
4027 for (y = 0; y < nactual; y++) {
4028 (void) ddi_intr_free(intrp->htable[y]);
4029 }
4030
4031 kmem_free(intrp->htable, intrp->intr_size);
4032 return (HXGE_ERROR | HXGE_DDI_FAILED);
4033 }
4034
4035 nrequired = 0;
4036 status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
4037 if (status != HXGE_OK) {
4038 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4039 "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
4040 "failed: 0x%x", status));
4041 /* Free already allocated interrupts */
4042 for (y = 0; y < nactual; y++) {
4043 (void) ddi_intr_free(intrp->htable[y]);
4044 }
4045
4046 kmem_free(intrp->htable, intrp->intr_size);
4047 return (status);
4048 }
4049
4050 ldgp = hxgep->ldgvp->ldgp;
4051 for (x = 0; x < nrequired; x++, ldgp++) {
4052 ldgp->vector = (uint8_t)x;
4053 arg1 = ldgp->ldvp;
4054 arg2 = hxgep;
4055 if (ldgp->nldvs == 1) {
4056 inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
4057 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4058 "hxge_add_intrs_adv_type_fix: "
4059 "1-1 int handler(%d) ldg %d ldv %d "
4060 "arg1 $%p arg2 $%p\n",
4061 x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
4062 } else if (ldgp->nldvs > 1) {
4063 inthandler = (uint_t *)ldgp->sys_intr_handler;
4064 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4065 "hxge_add_intrs_adv_type_fix: "
4066 "shared ldv %d int handler(%d) ldv %d ldg %d"
4067 "arg1 0x%016llx arg2 0x%016llx\n",
4068 x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
4069 arg1, arg2));
4070 }
4071
4072 if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
4073 (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
4074 DDI_SUCCESS) {
4075 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4076 "==> hxge_add_intrs_adv_type_fix: failed #%d "
4077 "status 0x%x", x, ddi_status));
4078 for (y = 0; y < intrp->intr_added; y++) {
4079 (void) ddi_intr_remove_handler(
4080 intrp->htable[y]);
4081 }
4082 for (y = 0; y < nactual; y++) {
4083 (void) ddi_intr_free(intrp->htable[y]);
4084 }
4085 /* Free already allocated intr */
4086 kmem_free(intrp->htable, intrp->intr_size);
4087
4088 (void) hxge_ldgv_uninit(hxgep);
4089
4090 return (HXGE_ERROR | HXGE_DDI_FAILED);
4091 }
4092 intrp->intr_added++;
4093 }
4094
4095 intrp->msi_intx_cnt = nactual;
4096
4097 (void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
4098
4099 status = hxge_intr_ldgv_init(hxgep);
4100
4101 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
4102
4103 return (status);
4104 }
4105
4106 /*ARGSUSED*/
4107 static void
hxge_remove_intrs(p_hxge_t hxgep)4108 hxge_remove_intrs(p_hxge_t hxgep)
4109 {
4110 int i, inum;
4111 p_hxge_intr_t intrp;
4112
4113 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
4114 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4115 if (!intrp->intr_registered) {
4116 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4117 "<== hxge_remove_intrs: interrupts not registered"));
4118 return;
4119 }
4120
4121 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
4122
4123 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4124 (void) ddi_intr_block_disable(intrp->htable,
4125 intrp->intr_added);
4126 } else {
4127 for (i = 0; i < intrp->intr_added; i++) {
4128 (void) ddi_intr_disable(intrp->htable[i]);
4129 }
4130 }
4131
4132 for (inum = 0; inum < intrp->intr_added; inum++) {
4133 if (intrp->htable[inum]) {
4134 (void) ddi_intr_remove_handler(intrp->htable[inum]);
4135 }
4136 }
4137
4138 for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
4139 if (intrp->htable[inum]) {
4140 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4141 "hxge_remove_intrs: ddi_intr_free inum %d "
4142 "msi_intx_cnt %d intr_added %d",
4143 inum, intrp->msi_intx_cnt, intrp->intr_added));
4144
4145 (void) ddi_intr_free(intrp->htable[inum]);
4146 }
4147 }
4148
4149 kmem_free(intrp->htable, intrp->intr_size);
4150 intrp->intr_registered = B_FALSE;
4151 intrp->intr_enabled = B_FALSE;
4152 intrp->msi_intx_cnt = 0;
4153 intrp->intr_added = 0;
4154
4155 (void) hxge_ldgv_uninit(hxgep);
4156
4157 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4158 }
4159
4160 /*ARGSUSED*/
4161 static void
hxge_intrs_enable(p_hxge_t hxgep)4162 hxge_intrs_enable(p_hxge_t hxgep)
4163 {
4164 p_hxge_intr_t intrp;
4165 int i;
4166 int status;
4167
4168 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4169
4170 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4171
4172 if (!intrp->intr_registered) {
4173 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4174 "interrupts are not registered"));
4175 return;
4176 }
4177
4178 if (intrp->intr_enabled) {
4179 HXGE_DEBUG_MSG((hxgep, INT_CTL,
4180 "<== hxge_intrs_enable: already enabled"));
4181 return;
4182 }
4183
4184 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4185 status = ddi_intr_block_enable(intrp->htable,
4186 intrp->intr_added);
4187 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4188 "block enable - status 0x%x total inums #%d\n",
4189 status, intrp->intr_added));
4190 } else {
4191 for (i = 0; i < intrp->intr_added; i++) {
4192 status = ddi_intr_enable(intrp->htable[i]);
4193 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4194 "ddi_intr_enable:enable - status 0x%x "
4195 "total inums %d enable inum #%d\n",
4196 status, intrp->intr_added, i));
4197 if (status == DDI_SUCCESS) {
4198 intrp->intr_enabled = B_TRUE;
4199 }
4200 }
4201 }
4202
4203 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4204 }
4205
4206 /*ARGSUSED*/
4207 static void
hxge_intrs_disable(p_hxge_t hxgep)4208 hxge_intrs_disable(p_hxge_t hxgep)
4209 {
4210 p_hxge_intr_t intrp;
4211 int i;
4212
4213 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4214
4215 intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4216
4217 if (!intrp->intr_registered) {
4218 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4219 "interrupts are not registered"));
4220 return;
4221 }
4222
4223 if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4224 (void) ddi_intr_block_disable(intrp->htable,
4225 intrp->intr_added);
4226 } else {
4227 for (i = 0; i < intrp->intr_added; i++) {
4228 (void) ddi_intr_disable(intrp->htable[i]);
4229 }
4230 }
4231
4232 intrp->intr_enabled = B_FALSE;
4233 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4234 }
4235
4236 static hxge_status_t
hxge_mac_register(p_hxge_t hxgep)4237 hxge_mac_register(p_hxge_t hxgep)
4238 {
4239 mac_register_t *macp;
4240 int status;
4241
4242 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4243
4244 if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4245 return (HXGE_ERROR);
4246
4247 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4248 macp->m_driver = hxgep;
4249 macp->m_dip = hxgep->dip;
4250 macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4251 macp->m_callbacks = &hxge_m_callbacks;
4252 macp->m_min_sdu = 0;
4253 macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4254 macp->m_margin = VLAN_TAGSZ;
4255 macp->m_priv_props = hxge_priv_props;
4256 macp->m_v12n = MAC_VIRT_LEVEL1;
4257
4258 HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4259 "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4260 macp->m_src_addr[0],
4261 macp->m_src_addr[1],
4262 macp->m_src_addr[2],
4263 macp->m_src_addr[3],
4264 macp->m_src_addr[4],
4265 macp->m_src_addr[5]));
4266
4267 status = mac_register(macp, &hxgep->mach);
4268 mac_free(macp);
4269
4270 if (status != 0) {
4271 cmn_err(CE_WARN,
4272 "hxge_mac_register failed (status %d instance %d)",
4273 status, hxgep->instance);
4274 return (HXGE_ERROR);
4275 }
4276
4277 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4278 "(instance %d)", hxgep->instance));
4279
4280 return (HXGE_OK);
4281 }
4282
4283 static int
hxge_init_common_dev(p_hxge_t hxgep)4284 hxge_init_common_dev(p_hxge_t hxgep)
4285 {
4286 p_hxge_hw_list_t hw_p;
4287 dev_info_t *p_dip;
4288
4289 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4290
4291 p_dip = hxgep->p_dip;
4292 MUTEX_ENTER(&hxge_common_lock);
4293
4294 /*
4295 * Loop through existing per Hydra hardware list.
4296 */
4297 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4298 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4299 "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4300 hw_p, p_dip));
4301 if (hw_p->parent_devp == p_dip) {
4302 hxgep->hxge_hw_p = hw_p;
4303 hw_p->ndevs++;
4304 hw_p->hxge_p = hxgep;
4305 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4306 "==> hxge_init_common_device: "
4307 "hw_p $%p parent dip $%p ndevs %d (found)",
4308 hw_p, p_dip, hw_p->ndevs));
4309 break;
4310 }
4311 }
4312
4313 if (hw_p == NULL) {
4314 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4315 "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4316 hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4317 hw_p->parent_devp = p_dip;
4318 hw_p->magic = HXGE_MAGIC;
4319 hxgep->hxge_hw_p = hw_p;
4320 hw_p->ndevs++;
4321 hw_p->hxge_p = hxgep;
4322 hw_p->next = hxge_hw_list;
4323
4324 MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4325 MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4326 MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4327
4328 hxge_hw_list = hw_p;
4329 }
4330 MUTEX_EXIT(&hxge_common_lock);
4331 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4332 "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4333 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4334
4335 return (HXGE_OK);
4336 }
4337
4338 static void
hxge_uninit_common_dev(p_hxge_t hxgep)4339 hxge_uninit_common_dev(p_hxge_t hxgep)
4340 {
4341 p_hxge_hw_list_t hw_p, h_hw_p;
4342 dev_info_t *p_dip;
4343
4344 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4345 if (hxgep->hxge_hw_p == NULL) {
4346 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4347 "<== hxge_uninit_common_dev (no common)"));
4348 return;
4349 }
4350
4351 MUTEX_ENTER(&hxge_common_lock);
4352 h_hw_p = hxge_hw_list;
4353 for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4354 p_dip = hw_p->parent_devp;
4355 if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4356 hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4357 hw_p->magic == HXGE_MAGIC) {
4358 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4359 "==> hxge_uninit_common_dev: "
4360 "hw_p $%p parent dip $%p ndevs %d (found)",
4361 hw_p, p_dip, hw_p->ndevs));
4362
4363 hxgep->hxge_hw_p = NULL;
4364 if (hw_p->ndevs) {
4365 hw_p->ndevs--;
4366 }
4367 hw_p->hxge_p = NULL;
4368 if (!hw_p->ndevs) {
4369 MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4370 MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4371 MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4372 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4373 "==> hxge_uninit_common_dev: "
4374 "hw_p $%p parent dip $%p ndevs %d (last)",
4375 hw_p, p_dip, hw_p->ndevs));
4376
4377 if (hw_p == hxge_hw_list) {
4378 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4379 "==> hxge_uninit_common_dev:"
4380 "remove head "
4381 "hw_p $%p parent dip $%p "
4382 "ndevs %d (head)",
4383 hw_p, p_dip, hw_p->ndevs));
4384 hxge_hw_list = hw_p->next;
4385 } else {
4386 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4387 "==> hxge_uninit_common_dev:"
4388 "remove middle "
4389 "hw_p $%p parent dip $%p "
4390 "ndevs %d (middle)",
4391 hw_p, p_dip, hw_p->ndevs));
4392 h_hw_p->next = hw_p->next;
4393 }
4394
4395 KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4396 }
4397 break;
4398 } else {
4399 h_hw_p = hw_p;
4400 }
4401 }
4402
4403 MUTEX_EXIT(&hxge_common_lock);
4404 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4405 "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4406
4407 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4408 }
4409
4410 #define HXGE_MSIX_ENTRIES 32
4411 #define HXGE_MSIX_WAIT_COUNT 10
4412 #define HXGE_MSIX_PARITY_CHECK_COUNT 30
4413
4414 static void
hxge_link_poll(void * arg)4415 hxge_link_poll(void *arg)
4416 {
4417 p_hxge_t hxgep = (p_hxge_t)arg;
4418 hpi_handle_t handle;
4419 cip_link_stat_t link_stat;
4420 hxge_timeout *to = &hxgep->timeout;
4421
4422 handle = HXGE_DEV_HPI_HANDLE(hxgep);
4423 HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4424
4425 if (to->report_link_status ||
4426 (to->link_status != link_stat.bits.xpcs0_link_up)) {
4427 to->link_status = link_stat.bits.xpcs0_link_up;
4428 to->report_link_status = B_FALSE;
4429
4430 if (link_stat.bits.xpcs0_link_up) {
4431 hxge_link_update(hxgep, LINK_STATE_UP);
4432 } else {
4433 hxge_link_update(hxgep, LINK_STATE_DOWN);
4434 }
4435 }
4436
4437 /* Restart the link status timer to check the link status */
4438 MUTEX_ENTER(&to->lock);
4439 to->id = timeout(hxge_link_poll, arg, to->ticks);
4440 MUTEX_EXIT(&to->lock);
4441 }
4442
4443 static void
hxge_link_update(p_hxge_t hxgep,link_state_t state)4444 hxge_link_update(p_hxge_t hxgep, link_state_t state)
4445 {
4446 p_hxge_stats_t statsp = (p_hxge_stats_t)hxgep->statsp;
4447
4448 mac_link_update(hxgep->mach, state);
4449 if (state == LINK_STATE_UP) {
4450 statsp->mac_stats.link_speed = 10000;
4451 statsp->mac_stats.link_duplex = 2;
4452 statsp->mac_stats.link_up = 1;
4453 } else {
4454 statsp->mac_stats.link_speed = 0;
4455 statsp->mac_stats.link_duplex = 0;
4456 statsp->mac_stats.link_up = 0;
4457 }
4458 }
4459
4460 static void
hxge_msix_init(p_hxge_t hxgep)4461 hxge_msix_init(p_hxge_t hxgep)
4462 {
4463 uint32_t data0;
4464 uint32_t data1;
4465 uint32_t data2;
4466 int i;
4467 uint32_t msix_entry0;
4468 uint32_t msix_entry1;
4469 uint32_t msix_entry2;
4470 uint32_t msix_entry3;
4471
4472 /* Change to use MSIx bar instead of indirect access */
4473 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4474 data0 = 0xffffffff - i;
4475 data1 = 0xffffffff - i - 1;
4476 data2 = 0xffffffff - i - 2;
4477
4478 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16, data0);
4479 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 4, data1);
4480 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 8, data2);
4481 HXGE_REG_WR32(hxgep->hpi_msi_handle, i * 16 + 12, 0);
4482 }
4483
4484 /* Initialize ram data out buffer. */
4485 for (i = 0; i < HXGE_MSIX_ENTRIES; i++) {
4486 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16, &msix_entry0);
4487 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 4, &msix_entry1);
4488 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 8, &msix_entry2);
4489 HXGE_REG_RD32(hxgep->hpi_msi_handle, i * 16 + 12, &msix_entry3);
4490 }
4491 }
4492
4493 /*
4494 * The following function is to support
4495 * PSARC/2007/453 MSI-X interrupt limit override.
4496 */
4497 static int
hxge_create_msi_property(p_hxge_t hxgep)4498 hxge_create_msi_property(p_hxge_t hxgep)
4499 {
4500 int nmsi;
4501 extern int ncpus;
4502
4503 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==>hxge_create_msi_property"));
4504
4505 (void) ddi_prop_create(DDI_DEV_T_NONE, hxgep->dip,
4506 DDI_PROP_CANSLEEP, "#msix-request", NULL, 0);
4507 /*
4508 * The maximum MSI-X requested will be 8.
4509 * If the # of CPUs is less than 8, we will reqeust
4510 * # MSI-X based on the # of CPUs.
4511 */
4512 if (ncpus >= HXGE_MSIX_REQUEST_10G) {
4513 nmsi = HXGE_MSIX_REQUEST_10G;
4514 } else {
4515 nmsi = ncpus;
4516 }
4517
4518 HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4519 "==>hxge_create_msi_property(10G): exists 0x%x (nmsi %d)",
4520 ddi_prop_exists(DDI_DEV_T_NONE, hxgep->dip,
4521 DDI_PROP_CANSLEEP, "#msix-request"), nmsi));
4522
4523 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<==hxge_create_msi_property"));
4524 return (nmsi);
4525 }
4526