1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/nxge/nxge_impl.h>
28 #include <sys/nxge/nxge_txdma.h>
29 #include <sys/nxge/nxge_hio.h>
30 #include <npi_tx_rd64.h>
31 #include <npi_tx_wr64.h>
32 #include <sys/llc1.h>
33
34 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
35 uint32_t nxge_tx_minfree = 64;
36 uint32_t nxge_tx_intr_thres = 0;
37 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
38 uint32_t nxge_tx_tiny_pack = 1;
39 uint32_t nxge_tx_use_bcopy = 1;
40
41 extern uint32_t nxge_tx_ring_size;
42 extern uint32_t nxge_bcopy_thresh;
43 extern uint32_t nxge_dvma_thresh;
44 extern uint32_t nxge_dma_stream_thresh;
45 extern dma_method_t nxge_force_dma;
46 extern uint32_t nxge_cksum_offload;
47
48 /* Device register access attributes for PIO. */
49 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
50 /* Device descriptor access attributes for DMA. */
51 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
52 /* Device buffer access attributes for DMA. */
53 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
54 extern ddi_dma_attr_t nxge_desc_dma_attr;
55 extern ddi_dma_attr_t nxge_tx_dma_attr;
56
57 extern void nxge_tx_ring_task(void *arg);
58
59 static nxge_status_t nxge_map_txdma(p_nxge_t, int);
60
61 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int);
62
63 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
64 p_nxge_dma_common_t *, p_tx_ring_t *,
65 uint32_t, p_nxge_dma_common_t *,
66 p_tx_mbox_t *);
67 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t);
68
69 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
70 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
71 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
72
73 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
74 p_nxge_dma_common_t *, p_tx_ring_t,
75 p_tx_mbox_t *);
76 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
77 p_tx_ring_t, p_tx_mbox_t);
78
79 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
80 p_tx_ring_t, p_tx_mbox_t);
81 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t);
82
83 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
84 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
85 p_nxge_ldv_t, tx_cs_t);
86 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
87 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
88 uint16_t, p_tx_ring_t);
89
90 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,
91 p_tx_ring_t ring_p, uint16_t channel);
92
93 nxge_status_t
nxge_init_txdma_channels(p_nxge_t nxgep)94 nxge_init_txdma_channels(p_nxge_t nxgep)
95 {
96 nxge_grp_set_t *set = &nxgep->tx_set;
97 int i, tdc, count;
98 nxge_grp_t *group;
99 dc_map_t map;
100 int dev_gindex;
101
102 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels"));
103
104 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
105 if ((1 << i) & set->lg.map) {
106 group = set->group[i];
107 dev_gindex =
108 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
109 map = nxgep->pt_config.tdc_grps[dev_gindex].map;
110 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
111 if ((1 << tdc) & map) {
112 if ((nxge_grp_dc_add(nxgep,
113 group, VP_BOUND_TX, tdc)))
114 goto init_txdma_channels_exit;
115 }
116 }
117 }
118 if (++count == set->lg.count)
119 break;
120 }
121
122 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels"));
123 return (NXGE_OK);
124
125 init_txdma_channels_exit:
126 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
127 if ((1 << i) & set->lg.map) {
128 group = set->group[i];
129 dev_gindex =
130 nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
131 map = nxgep->pt_config.tdc_grps[dev_gindex].map;
132 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
133 if ((1 << tdc) & map) {
134 nxge_grp_dc_remove(nxgep,
135 VP_BOUND_TX, tdc);
136 }
137 }
138 }
139 if (++count == set->lg.count)
140 break;
141 }
142
143 return (NXGE_ERROR);
144
145 }
146
147 nxge_status_t
nxge_init_txdma_channel(p_nxge_t nxge,int channel)148 nxge_init_txdma_channel(
149 p_nxge_t nxge,
150 int channel)
151 {
152 nxge_status_t status;
153
154 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel"));
155
156 status = nxge_map_txdma(nxge, channel);
157 if (status != NXGE_OK) {
158 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
159 "<== nxge_init_txdma_channel: status 0x%x", status));
160 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
161 return (status);
162 }
163
164 status = nxge_txdma_hw_start(nxge, channel);
165 if (status != NXGE_OK) {
166 (void) nxge_unmap_txdma_channel(nxge, channel);
167 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
168 return (status);
169 }
170
171 if (!nxge->statsp->tdc_ksp[channel])
172 nxge_setup_tdc_kstats(nxge, channel);
173
174 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel"));
175
176 return (status);
177 }
178
179 void
nxge_uninit_txdma_channels(p_nxge_t nxgep)180 nxge_uninit_txdma_channels(p_nxge_t nxgep)
181 {
182 nxge_grp_set_t *set = &nxgep->tx_set;
183 int tdc;
184
185 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels"));
186
187 if (set->owned.map == 0) {
188 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
189 "nxge_uninit_txdma_channels: no channels"));
190 return;
191 }
192
193 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
194 if ((1 << tdc) & set->owned.map) {
195 nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc);
196 }
197 }
198
199 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels"));
200 }
201
202 void
nxge_uninit_txdma_channel(p_nxge_t nxgep,int channel)203 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel)
204 {
205 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel"));
206
207 if (nxgep->statsp->tdc_ksp[channel]) {
208 kstat_delete(nxgep->statsp->tdc_ksp[channel]);
209 nxgep->statsp->tdc_ksp[channel] = 0;
210 }
211
212 if (nxge_txdma_stop_channel(nxgep, channel) != NXGE_OK)
213 goto nxge_uninit_txdma_channel_exit;
214
215 nxge_unmap_txdma_channel(nxgep, channel);
216
217 nxge_uninit_txdma_channel_exit:
218 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_uninit_txdma_channel"));
219 }
220
221 void
nxge_setup_dma_common(p_nxge_dma_common_t dest_p,p_nxge_dma_common_t src_p,uint32_t entries,uint32_t size)222 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
223 uint32_t entries, uint32_t size)
224 {
225 size_t tsize;
226 *dest_p = *src_p;
227 tsize = size * entries;
228 dest_p->alength = tsize;
229 dest_p->nblocks = entries;
230 dest_p->block_size = size;
231 dest_p->offset += tsize;
232
233 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
234 src_p->alength -= tsize;
235 src_p->dma_cookie.dmac_laddress += tsize;
236 src_p->dma_cookie.dmac_size -= tsize;
237 }
238
239 /*
240 * nxge_reset_txdma_channel
241 *
242 * Reset a TDC.
243 *
244 * Arguments:
245 * nxgep
246 * channel The channel to reset.
247 * reg_data The current TX_CS.
248 *
249 * Notes:
250 *
251 * NPI/NXGE function calls:
252 * npi_txdma_channel_reset()
253 * npi_txdma_channel_control()
254 *
255 * Registers accessed:
256 * TX_CS DMC+0x40028 Transmit Control And Status
257 * TX_RING_KICK DMC+0x40018 Transmit Ring Kick
258 *
259 * Context:
260 * Any domain
261 */
262 nxge_status_t
nxge_reset_txdma_channel(p_nxge_t nxgep,uint16_t channel,uint64_t reg_data)263 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
264 {
265 npi_status_t rs = NPI_SUCCESS;
266 nxge_status_t status = NXGE_OK;
267 npi_handle_t handle;
268
269 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
270
271 handle = NXGE_DEV_NPI_HANDLE(nxgep);
272 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
273 rs = npi_txdma_channel_reset(handle, channel);
274 } else {
275 rs = npi_txdma_channel_control(handle, TXDMA_RESET,
276 channel);
277 }
278
279 if (rs != NPI_SUCCESS) {
280 status = NXGE_ERROR | rs;
281 }
282
283 /*
284 * Reset the tail (kick) register to 0.
285 * (Hardware will not reset it. Tx overflow fatal
286 * error if tail is not set to 0 after reset!
287 */
288 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
289
290 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
291 return (status);
292 }
293
294 /*
295 * nxge_init_txdma_channel_event_mask
296 *
297 * Enable interrupts for a set of events.
298 *
299 * Arguments:
300 * nxgep
301 * channel The channel to map.
302 * mask_p The events to enable.
303 *
304 * Notes:
305 *
306 * NPI/NXGE function calls:
307 * npi_txdma_event_mask()
308 *
309 * Registers accessed:
310 * TX_ENT_MSK DMC+0x40020 Transmit Event Mask
311 *
312 * Context:
313 * Any domain
314 */
315 nxge_status_t
nxge_init_txdma_channel_event_mask(p_nxge_t nxgep,uint16_t channel,p_tx_dma_ent_msk_t mask_p)316 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
317 p_tx_dma_ent_msk_t mask_p)
318 {
319 npi_handle_t handle;
320 npi_status_t rs = NPI_SUCCESS;
321 nxge_status_t status = NXGE_OK;
322
323 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
324 "<== nxge_init_txdma_channel_event_mask"));
325
326 handle = NXGE_DEV_NPI_HANDLE(nxgep);
327 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
328 if (rs != NPI_SUCCESS) {
329 status = NXGE_ERROR | rs;
330 }
331
332 return (status);
333 }
334
335 /*
336 * nxge_init_txdma_channel_cntl_stat
337 *
338 * Stop a TDC. If at first we don't succeed, inject an error.
339 *
340 * Arguments:
341 * nxgep
342 * channel The channel to stop.
343 *
344 * Notes:
345 *
346 * NPI/NXGE function calls:
347 * npi_txdma_control_status()
348 *
349 * Registers accessed:
350 * TX_CS DMC+0x40028 Transmit Control And Status
351 *
352 * Context:
353 * Any domain
354 */
355 nxge_status_t
nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep,uint16_t channel,uint64_t reg_data)356 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
357 uint64_t reg_data)
358 {
359 npi_handle_t handle;
360 npi_status_t rs = NPI_SUCCESS;
361 nxge_status_t status = NXGE_OK;
362
363 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
364 "<== nxge_init_txdma_channel_cntl_stat"));
365
366 handle = NXGE_DEV_NPI_HANDLE(nxgep);
367 rs = npi_txdma_control_status(handle, OP_SET, channel,
368 (p_tx_cs_t)®_data);
369
370 if (rs != NPI_SUCCESS) {
371 status = NXGE_ERROR | rs;
372 }
373
374 return (status);
375 }
376
377 /*
378 * nxge_enable_txdma_channel
379 *
380 * Enable a TDC.
381 *
382 * Arguments:
383 * nxgep
384 * channel The channel to enable.
385 * tx_desc_p channel's transmit descriptor ring.
386 * mbox_p channel's mailbox,
387 *
388 * Notes:
389 *
390 * NPI/NXGE function calls:
391 * npi_txdma_ring_config()
392 * npi_txdma_mbox_config()
393 * npi_txdma_channel_init_enable()
394 *
395 * Registers accessed:
396 * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration
397 * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High
398 * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low
399 * TX_CS DMC+0x40028 Transmit Control And Status
400 *
401 * Context:
402 * Any domain
403 */
404 nxge_status_t
nxge_enable_txdma_channel(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_desc_p,p_tx_mbox_t mbox_p)405 nxge_enable_txdma_channel(p_nxge_t nxgep,
406 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
407 {
408 npi_handle_t handle;
409 npi_status_t rs = NPI_SUCCESS;
410 nxge_status_t status = NXGE_OK;
411
412 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
413
414 handle = NXGE_DEV_NPI_HANDLE(nxgep);
415 /*
416 * Use configuration data composed at init time.
417 * Write to hardware the transmit ring configurations.
418 */
419 rs = npi_txdma_ring_config(handle, OP_SET, channel,
420 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
421
422 if (rs != NPI_SUCCESS) {
423 return (NXGE_ERROR | rs);
424 }
425
426 if (isLDOMguest(nxgep)) {
427 /* Add interrupt handler for this channel. */
428 if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK)
429 return (NXGE_ERROR);
430 }
431
432 /* Write to hardware the mailbox */
433 rs = npi_txdma_mbox_config(handle, OP_SET, channel,
434 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
435
436 if (rs != NPI_SUCCESS) {
437 return (NXGE_ERROR | rs);
438 }
439
440 /* Start the DMA engine. */
441 rs = npi_txdma_channel_init_enable(handle, channel);
442
443 if (rs != NPI_SUCCESS) {
444 return (NXGE_ERROR | rs);
445 }
446
447 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
448
449 return (status);
450 }
451
452 void
nxge_fill_tx_hdr(p_mblk_t mp,boolean_t fill_len,boolean_t l4_cksum,int pkt_len,uint8_t npads,p_tx_pkt_hdr_all_t pkthdrp,t_uscalar_t start_offset,t_uscalar_t stuff_offset)453 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
454 boolean_t l4_cksum, int pkt_len, uint8_t npads,
455 p_tx_pkt_hdr_all_t pkthdrp,
456 t_uscalar_t start_offset,
457 t_uscalar_t stuff_offset)
458 {
459 p_tx_pkt_header_t hdrp;
460 p_mblk_t nmp;
461 uint64_t tmp;
462 size_t mblk_len;
463 size_t iph_len;
464 size_t hdrs_size;
465 uint8_t hdrs_buf[sizeof (struct ether_header) +
466 64 + sizeof (uint32_t)];
467 uint8_t *cursor;
468 uint8_t *ip_buf;
469 uint16_t eth_type;
470 uint8_t ipproto;
471 boolean_t is_vlan = B_FALSE;
472 size_t eth_hdr_size;
473
474 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
475
476 /*
477 * Caller should zero out the headers first.
478 */
479 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
480
481 if (fill_len) {
482 NXGE_DEBUG_MSG((NULL, TX_CTL,
483 "==> nxge_fill_tx_hdr: pkt_len %d "
484 "npads %d", pkt_len, npads));
485 tmp = (uint64_t)pkt_len;
486 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
487 goto fill_tx_header_done;
488 }
489
490 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT);
491
492 /*
493 * mp is the original data packet (does not include the
494 * Neptune transmit header).
495 */
496 nmp = mp;
497 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
498 "mp $%p b_rptr $%p len %d",
499 mp, nmp->b_rptr, MBLKL(nmp)));
500 /* copy ether_header from mblk to hdrs_buf */
501 cursor = &hdrs_buf[0];
502 tmp = sizeof (struct ether_vlan_header);
503 while ((nmp != NULL) && (tmp > 0)) {
504 size_t buflen;
505 mblk_len = MBLKL(nmp);
506 buflen = min((size_t)tmp, mblk_len);
507 bcopy(nmp->b_rptr, cursor, buflen);
508 cursor += buflen;
509 tmp -= buflen;
510 nmp = nmp->b_cont;
511 }
512
513 nmp = mp;
514 mblk_len = MBLKL(nmp);
515 ip_buf = NULL;
516 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
517 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
518 "ether type 0x%x", eth_type, hdrp->value));
519
520 if (eth_type < ETHERMTU) {
521 tmp = 1ull;
522 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
523 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
524 "value 0x%llx", hdrp->value));
525 if (*(hdrs_buf + sizeof (struct ether_header))
526 == LLC_SNAP_SAP) {
527 eth_type = ntohs(*((uint16_t *)(hdrs_buf +
528 sizeof (struct ether_header) + 6)));
529 NXGE_DEBUG_MSG((NULL, TX_CTL,
530 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
531 eth_type));
532 } else {
533 goto fill_tx_header_done;
534 }
535 } else if (eth_type == VLAN_ETHERTYPE) {
536 tmp = 1ull;
537 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
538
539 eth_type = ntohs(((struct ether_vlan_header *)
540 hdrs_buf)->ether_type);
541 is_vlan = B_TRUE;
542 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
543 "value 0x%llx", hdrp->value));
544 }
545
546 if (!is_vlan) {
547 eth_hdr_size = sizeof (struct ether_header);
548 } else {
549 eth_hdr_size = sizeof (struct ether_vlan_header);
550 }
551
552 switch (eth_type) {
553 case ETHERTYPE_IP:
554 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
555 ip_buf = nmp->b_rptr + eth_hdr_size;
556 mblk_len -= eth_hdr_size;
557 iph_len = ((*ip_buf) & 0x0f);
558 if (mblk_len > (iph_len + sizeof (uint32_t))) {
559 ip_buf = nmp->b_rptr;
560 ip_buf += eth_hdr_size;
561 } else {
562 ip_buf = NULL;
563 }
564
565 }
566 if (ip_buf == NULL) {
567 hdrs_size = 0;
568 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
569 while ((nmp) && (hdrs_size <
570 sizeof (hdrs_buf))) {
571 mblk_len = (size_t)nmp->b_wptr -
572 (size_t)nmp->b_rptr;
573 if (mblk_len >=
574 (sizeof (hdrs_buf) - hdrs_size))
575 mblk_len = sizeof (hdrs_buf) -
576 hdrs_size;
577 bcopy(nmp->b_rptr,
578 &hdrs_buf[hdrs_size], mblk_len);
579 hdrs_size += mblk_len;
580 nmp = nmp->b_cont;
581 }
582 ip_buf = hdrs_buf;
583 ip_buf += eth_hdr_size;
584 iph_len = ((*ip_buf) & 0x0f);
585 }
586
587 ipproto = ip_buf[9];
588
589 tmp = (uint64_t)iph_len;
590 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
591 tmp = (uint64_t)(eth_hdr_size >> 1);
592 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
593
594 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
595 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
596 "tmp 0x%x",
597 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
598 ipproto, tmp));
599 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
600 "value 0x%llx", hdrp->value));
601
602 break;
603
604 case ETHERTYPE_IPV6:
605 hdrs_size = 0;
606 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
607 while ((nmp) && (hdrs_size <
608 sizeof (hdrs_buf))) {
609 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
610 if (mblk_len >=
611 (sizeof (hdrs_buf) - hdrs_size))
612 mblk_len = sizeof (hdrs_buf) -
613 hdrs_size;
614 bcopy(nmp->b_rptr,
615 &hdrs_buf[hdrs_size], mblk_len);
616 hdrs_size += mblk_len;
617 nmp = nmp->b_cont;
618 }
619 ip_buf = hdrs_buf;
620 ip_buf += eth_hdr_size;
621
622 tmp = 1ull;
623 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
624
625 tmp = (eth_hdr_size >> 1);
626 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
627
628 /* byte 6 is the next header protocol */
629 ipproto = ip_buf[6];
630
631 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
632 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
633 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
634 ipproto));
635 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
636 "value 0x%llx", hdrp->value));
637
638 break;
639
640 default:
641 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
642 goto fill_tx_header_done;
643 }
644
645 switch (ipproto) {
646 case IPPROTO_TCP:
647 NXGE_DEBUG_MSG((NULL, TX_CTL,
648 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
649 if (l4_cksum) {
650 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP;
651 hdrp->value |=
652 (((uint64_t)(start_offset >> 1)) <<
653 TX_PKT_HEADER_L4START_SHIFT);
654 hdrp->value |=
655 (((uint64_t)(stuff_offset >> 1)) <<
656 TX_PKT_HEADER_L4STUFF_SHIFT);
657
658 NXGE_DEBUG_MSG((NULL, TX_CTL,
659 "==> nxge_tx_pkt_hdr_init: TCP CKSUM "
660 "value 0x%llx", hdrp->value));
661 }
662
663 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
664 "value 0x%llx", hdrp->value));
665 break;
666
667 case IPPROTO_UDP:
668 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
669 if (l4_cksum) {
670 if (!nxge_cksum_offload) {
671 uint16_t *up;
672 uint16_t cksum;
673 t_uscalar_t stuff_len;
674
675 /*
676 * The checksum field has the
677 * partial checksum.
678 * IP_CSUM() macro calls ip_cksum() which
679 * can add in the partial checksum.
680 */
681 cksum = IP_CSUM(mp, start_offset, 0);
682 stuff_len = stuff_offset;
683 nmp = mp;
684 mblk_len = MBLKL(nmp);
685 while ((nmp != NULL) &&
686 (mblk_len < stuff_len)) {
687 stuff_len -= mblk_len;
688 nmp = nmp->b_cont;
689 if (nmp)
690 mblk_len = MBLKL(nmp);
691 }
692 ASSERT(nmp);
693 up = (uint16_t *)(nmp->b_rptr + stuff_len);
694
695 *up = cksum;
696 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP;
697 NXGE_DEBUG_MSG((NULL, TX_CTL,
698 "==> nxge_tx_pkt_hdr_init: UDP offset %d "
699 "use sw cksum "
700 "write to $%p cksum 0x%x content up 0x%x",
701 stuff_len,
702 up,
703 cksum,
704 *up));
705 } else {
706 /* Hardware will compute the full checksum */
707 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP;
708 hdrp->value |=
709 (((uint64_t)(start_offset >> 1)) <<
710 TX_PKT_HEADER_L4START_SHIFT);
711 hdrp->value |=
712 (((uint64_t)(stuff_offset >> 1)) <<
713 TX_PKT_HEADER_L4STUFF_SHIFT);
714
715 NXGE_DEBUG_MSG((NULL, TX_CTL,
716 "==> nxge_tx_pkt_hdr_init: UDP offset %d "
717 " use partial checksum "
718 "cksum 0x%x ",
719 "value 0x%llx",
720 stuff_offset,
721 IP_CSUM(mp, start_offset, 0),
722 hdrp->value));
723 }
724 }
725
726 NXGE_DEBUG_MSG((NULL, TX_CTL,
727 "==> nxge_tx_pkt_hdr_init: UDP"
728 "value 0x%llx", hdrp->value));
729 break;
730
731 default:
732 goto fill_tx_header_done;
733 }
734
735 fill_tx_header_done:
736 NXGE_DEBUG_MSG((NULL, TX_CTL,
737 "==> nxge_fill_tx_hdr: pkt_len %d "
738 "npads %d value 0x%llx", pkt_len, npads, hdrp->value));
739
740 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
741 }
742
743 /*ARGSUSED*/
744 p_mblk_t
nxge_tx_pkt_header_reserve(p_mblk_t mp,uint8_t * npads)745 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
746 {
747 p_mblk_t newmp = NULL;
748
749 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
750 NXGE_DEBUG_MSG((NULL, TX_CTL,
751 "<== nxge_tx_pkt_header_reserve: allocb failed"));
752 return (NULL);
753 }
754
755 NXGE_DEBUG_MSG((NULL, TX_CTL,
756 "==> nxge_tx_pkt_header_reserve: get new mp"));
757 DB_TYPE(newmp) = M_DATA;
758 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
759 linkb(newmp, mp);
760 newmp->b_rptr -= TX_PKT_HEADER_SIZE;
761
762 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
763 "b_rptr $%p b_wptr $%p",
764 newmp->b_rptr, newmp->b_wptr));
765
766 NXGE_DEBUG_MSG((NULL, TX_CTL,
767 "<== nxge_tx_pkt_header_reserve: use new mp"));
768
769 return (newmp);
770 }
771
772 int
nxge_tx_pkt_nmblocks(p_mblk_t mp,int * tot_xfer_len_p)773 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
774 {
775 uint_t nmblks;
776 ssize_t len;
777 uint_t pkt_len;
778 p_mblk_t nmp, bmp, tmp;
779 uint8_t *b_wptr;
780
781 NXGE_DEBUG_MSG((NULL, TX_CTL,
782 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
783 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
784
785 nmp = mp;
786 bmp = mp;
787 nmblks = 0;
788 pkt_len = 0;
789 *tot_xfer_len_p = 0;
790
791 while (nmp) {
792 len = MBLKL(nmp);
793 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
794 "len %d pkt_len %d nmblks %d tot_xfer_len %d",
795 len, pkt_len, nmblks,
796 *tot_xfer_len_p));
797
798 if (len <= 0) {
799 bmp = nmp;
800 nmp = nmp->b_cont;
801 NXGE_DEBUG_MSG((NULL, TX_CTL,
802 "==> nxge_tx_pkt_nmblocks: "
803 "len (0) pkt_len %d nmblks %d",
804 pkt_len, nmblks));
805 continue;
806 }
807
808 *tot_xfer_len_p += len;
809 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
810 "len %d pkt_len %d nmblks %d tot_xfer_len %d",
811 len, pkt_len, nmblks,
812 *tot_xfer_len_p));
813
814 if (len < nxge_bcopy_thresh) {
815 NXGE_DEBUG_MSG((NULL, TX_CTL,
816 "==> nxge_tx_pkt_nmblocks: "
817 "len %d (< thresh) pkt_len %d nmblks %d",
818 len, pkt_len, nmblks));
819 if (pkt_len == 0)
820 nmblks++;
821 pkt_len += len;
822 if (pkt_len >= nxge_bcopy_thresh) {
823 pkt_len = 0;
824 len = 0;
825 nmp = bmp;
826 }
827 } else {
828 NXGE_DEBUG_MSG((NULL, TX_CTL,
829 "==> nxge_tx_pkt_nmblocks: "
830 "len %d (> thresh) pkt_len %d nmblks %d",
831 len, pkt_len, nmblks));
832 pkt_len = 0;
833 nmblks++;
834 /*
835 * Hardware limits the transfer length to 4K.
836 * If len is more than 4K, we need to break
837 * it up to at most 2 more blocks.
838 */
839 if (len > TX_MAX_TRANSFER_LENGTH) {
840 uint32_t nsegs;
841
842 nsegs = 1;
843 NXGE_DEBUG_MSG((NULL, TX_CTL,
844 "==> nxge_tx_pkt_nmblocks: "
845 "len %d pkt_len %d nmblks %d nsegs %d",
846 len, pkt_len, nmblks, nsegs));
847 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
848 ++nsegs;
849 }
850 do {
851 b_wptr = nmp->b_rptr +
852 TX_MAX_TRANSFER_LENGTH;
853 nmp->b_wptr = b_wptr;
854 if ((tmp = dupb(nmp)) == NULL) {
855 return (0);
856 }
857 tmp->b_rptr = b_wptr;
858 tmp->b_wptr = nmp->b_wptr;
859 tmp->b_cont = nmp->b_cont;
860 nmp->b_cont = tmp;
861 nmblks++;
862 if (--nsegs) {
863 nmp = tmp;
864 }
865 } while (nsegs);
866 nmp = tmp;
867 }
868 }
869
870 /*
871 * Hardware limits the transmit gather pointers to 15.
872 */
873 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
874 TX_MAX_GATHER_POINTERS) {
875 NXGE_DEBUG_MSG((NULL, TX_CTL,
876 "==> nxge_tx_pkt_nmblocks: pull msg - "
877 "len %d pkt_len %d nmblks %d",
878 len, pkt_len, nmblks));
879 /* Pull all message blocks from b_cont */
880 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
881 return (0);
882 }
883 freemsg(nmp->b_cont);
884 nmp->b_cont = tmp;
885 pkt_len = 0;
886 }
887 bmp = nmp;
888 nmp = nmp->b_cont;
889 }
890
891 NXGE_DEBUG_MSG((NULL, TX_CTL,
892 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
893 "nmblks %d len %d tot_xfer_len %d",
894 mp->b_rptr, mp->b_wptr, nmblks,
895 MBLKL(mp), *tot_xfer_len_p));
896
897 return (nmblks);
898 }
899
900 boolean_t
nxge_txdma_reclaim(p_nxge_t nxgep,p_tx_ring_t tx_ring_p,int nmblks)901 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
902 {
903 boolean_t status = B_TRUE;
904 p_nxge_dma_common_t tx_desc_dma_p;
905 nxge_dma_common_t desc_area;
906 p_tx_desc_t tx_desc_ring_vp;
907 p_tx_desc_t tx_desc_p;
908 p_tx_desc_t tx_desc_pp;
909 tx_desc_t r_tx_desc;
910 p_tx_msg_t tx_msg_ring;
911 p_tx_msg_t tx_msg_p;
912 npi_handle_t handle;
913 tx_ring_hdl_t tx_head;
914 uint32_t pkt_len;
915 uint_t tx_rd_index;
916 uint16_t head_index, tail_index;
917 uint8_t tdc;
918 boolean_t head_wrap, tail_wrap;
919 p_nxge_tx_ring_stats_t tdc_stats;
920 int rc;
921
922 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
923
924 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
925 (nmblks != 0));
926 NXGE_DEBUG_MSG((nxgep, TX_CTL,
927 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d",
928 tx_ring_p->descs_pending, nxge_reclaim_pending,
929 nmblks));
930 if (!status) {
931 tx_desc_dma_p = &tx_ring_p->tdc_desc;
932 desc_area = tx_ring_p->tdc_desc;
933 handle = NXGE_DEV_NPI_HANDLE(nxgep);
934 tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
935 tx_desc_ring_vp =
936 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
937 tx_rd_index = tx_ring_p->rd_index;
938 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
939 tx_msg_ring = tx_ring_p->tx_msg_ring;
940 tx_msg_p = &tx_msg_ring[tx_rd_index];
941 tdc = tx_ring_p->tdc;
942 tdc_stats = tx_ring_p->tdc_stats;
943 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
944 tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
945 }
946
947 tail_index = tx_ring_p->wr_index;
948 tail_wrap = tx_ring_p->wr_index_wrap;
949
950 NXGE_DEBUG_MSG((nxgep, TX_CTL,
951 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
952 "tail_index %d tail_wrap %d "
953 "tx_desc_p $%p ($%p) ",
954 tdc, tx_rd_index, tail_index, tail_wrap,
955 tx_desc_p, (*(uint64_t *)tx_desc_p)));
956 /*
957 * Read the hardware maintained transmit head
958 * and wrap around bit.
959 */
960 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
961 head_index = tx_head.bits.ldw.head;
962 head_wrap = tx_head.bits.ldw.wrap;
963 NXGE_DEBUG_MSG((nxgep, TX_CTL,
964 "==> nxge_txdma_reclaim: "
965 "tx_rd_index %d tail %d tail_wrap %d "
966 "head %d wrap %d",
967 tx_rd_index, tail_index, tail_wrap,
968 head_index, head_wrap));
969
970 if (head_index == tail_index) {
971 if (TXDMA_RING_EMPTY(head_index, head_wrap,
972 tail_index, tail_wrap) &&
973 (head_index == tx_rd_index)) {
974 NXGE_DEBUG_MSG((nxgep, TX_CTL,
975 "==> nxge_txdma_reclaim: EMPTY"));
976 return (B_TRUE);
977 }
978
979 NXGE_DEBUG_MSG((nxgep, TX_CTL,
980 "==> nxge_txdma_reclaim: Checking "
981 "if ring full"));
982 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
983 tail_wrap)) {
984 NXGE_DEBUG_MSG((nxgep, TX_CTL,
985 "==> nxge_txdma_reclaim: full"));
986 return (B_FALSE);
987 }
988 }
989
990 NXGE_DEBUG_MSG((nxgep, TX_CTL,
991 "==> nxge_txdma_reclaim: tx_rd_index and head_index"));
992
993 tx_desc_pp = &r_tx_desc;
994 while ((tx_rd_index != head_index) &&
995 (tx_ring_p->descs_pending != 0)) {
996
997 NXGE_DEBUG_MSG((nxgep, TX_CTL,
998 "==> nxge_txdma_reclaim: Checking if pending"));
999
1000 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1001 "==> nxge_txdma_reclaim: "
1002 "descs_pending %d ",
1003 tx_ring_p->descs_pending));
1004
1005 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1006 "==> nxge_txdma_reclaim: "
1007 "(tx_rd_index %d head_index %d "
1008 "(tx_desc_p $%p)",
1009 tx_rd_index, head_index,
1010 tx_desc_p));
1011
1012 tx_desc_pp->value = tx_desc_p->value;
1013 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1014 "==> nxge_txdma_reclaim: "
1015 "(tx_rd_index %d head_index %d "
1016 "tx_desc_p $%p (desc value 0x%llx) ",
1017 tx_rd_index, head_index,
1018 tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
1019
1020 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1021 "==> nxge_txdma_reclaim: dump desc:"));
1022
1023 pkt_len = tx_desc_pp->bits.hdw.tr_len;
1024 tdc_stats->obytes += (pkt_len - TX_PKT_HEADER_SIZE);
1025 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
1026 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1027 "==> nxge_txdma_reclaim: pkt_len %d "
1028 "tdc channel %d opackets %d",
1029 pkt_len,
1030 tdc,
1031 tdc_stats->opackets));
1032
1033 if (tx_msg_p->flags.dma_type == USE_DVMA) {
1034 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1035 "tx_desc_p = $%p "
1036 "tx_desc_pp = $%p "
1037 "index = %d",
1038 tx_desc_p,
1039 tx_desc_pp,
1040 tx_ring_p->rd_index));
1041 (void) dvma_unload(tx_msg_p->dvma_handle,
1042 0, -1);
1043 tx_msg_p->dvma_handle = NULL;
1044 if (tx_ring_p->dvma_wr_index ==
1045 tx_ring_p->dvma_wrap_mask) {
1046 tx_ring_p->dvma_wr_index = 0;
1047 } else {
1048 tx_ring_p->dvma_wr_index++;
1049 }
1050 tx_ring_p->dvma_pending--;
1051 } else if (tx_msg_p->flags.dma_type ==
1052 USE_DMA) {
1053 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1054 "==> nxge_txdma_reclaim: "
1055 "USE DMA"));
1056 if (rc = ddi_dma_unbind_handle
1057 (tx_msg_p->dma_handle)) {
1058 cmn_err(CE_WARN, "!nxge_reclaim: "
1059 "ddi_dma_unbind_handle "
1060 "failed. status %d", rc);
1061 }
1062 }
1063 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1064 "==> nxge_txdma_reclaim: count packets"));
1065 /*
1066 * count a chained packet only once.
1067 */
1068 if (tx_msg_p->tx_message != NULL) {
1069 freemsg(tx_msg_p->tx_message);
1070 tx_msg_p->tx_message = NULL;
1071 }
1072
1073 tx_msg_p->flags.dma_type = USE_NONE;
1074 tx_rd_index = tx_ring_p->rd_index;
1075 tx_rd_index = (tx_rd_index + 1) &
1076 tx_ring_p->tx_wrap_mask;
1077 tx_ring_p->rd_index = tx_rd_index;
1078 tx_ring_p->descs_pending--;
1079 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
1080 tx_msg_p = &tx_msg_ring[tx_rd_index];
1081 }
1082
1083 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
1084 (int)tx_ring_p->descs_pending - TX_FULL_MARK));
1085 if (status) {
1086 (void) cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
1087 }
1088 } else {
1089 status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
1090 (int)tx_ring_p->descs_pending - TX_FULL_MARK));
1091 }
1092
1093 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1094 "<== nxge_txdma_reclaim status = 0x%08x", status));
1095
1096 return (status);
1097 }
1098
1099 /*
1100 * nxge_tx_intr
1101 *
1102 * Process a TDC interrupt
1103 *
1104 * Arguments:
1105 * arg1 A Logical Device state Vector (LSV) data structure.
1106 * arg2 nxge_t *
1107 *
1108 * Notes:
1109 *
1110 * NPI/NXGE function calls:
1111 * npi_txdma_control_status()
1112 * npi_intr_ldg_mgmt_set()
1113 *
1114 * nxge_tx_err_evnts()
1115 * nxge_txdma_reclaim()
1116 *
1117 * Registers accessed:
1118 * TX_CS DMC+0x40028 Transmit Control And Status
1119 * PIO_LDSV
1120 *
1121 * Context:
1122 * Any domain
1123 */
1124 uint_t
nxge_tx_intr(void * arg1,void * arg2)1125 nxge_tx_intr(void *arg1, void *arg2)
1126 {
1127 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
1128 p_nxge_t nxgep = (p_nxge_t)arg2;
1129 p_nxge_ldg_t ldgp;
1130 uint8_t channel;
1131 uint32_t vindex;
1132 npi_handle_t handle;
1133 tx_cs_t cs;
1134 p_tx_ring_t *tx_rings;
1135 p_tx_ring_t tx_ring_p;
1136 npi_status_t rs = NPI_SUCCESS;
1137 uint_t serviced = DDI_INTR_UNCLAIMED;
1138 nxge_status_t status = NXGE_OK;
1139
1140 if (ldvp == NULL) {
1141 NXGE_DEBUG_MSG((NULL, INT_CTL,
1142 "<== nxge_tx_intr: nxgep $%p ldvp $%p",
1143 nxgep, ldvp));
1144 return (DDI_INTR_UNCLAIMED);
1145 }
1146
1147 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
1148 nxgep = ldvp->nxgep;
1149 }
1150 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1151 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
1152 nxgep, ldvp));
1153
1154 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1155 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1156 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1157 "<== nxge_tx_intr: interface not started or intialized"));
1158 return (DDI_INTR_CLAIMED);
1159 }
1160
1161 /*
1162 * This interrupt handler is for a specific
1163 * transmit dma channel.
1164 */
1165 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1166 /* Get the control and status for this channel. */
1167 channel = ldvp->channel;
1168 ldgp = ldvp->ldgp;
1169 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1170 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
1171 "channel %d",
1172 nxgep, ldvp, channel));
1173
1174 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
1175 vindex = ldvp->vdma_index;
1176 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1177 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
1178 channel, vindex, rs));
1179 if (!rs && cs.bits.ldw.mk) {
1180 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1181 "==> nxge_tx_intr:channel %d ring index %d "
1182 "status 0x%08x (mk bit set)",
1183 channel, vindex, rs));
1184 tx_rings = nxgep->tx_rings->rings;
1185 tx_ring_p = tx_rings[vindex];
1186 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1187 "==> nxge_tx_intr:channel %d ring index %d "
1188 "status 0x%08x (mk bit set, calling reclaim)",
1189 channel, vindex, rs));
1190
1191 nxge_tx_ring_task((void *)tx_ring_p);
1192 }
1193
1194 /*
1195 * Process other transmit control and status.
1196 * Check the ldv state.
1197 */
1198 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
1199 /*
1200 * Rearm this logical group if this is a single device
1201 * group.
1202 */
1203 if (ldgp->nldvs == 1) {
1204 NXGE_DEBUG_MSG((nxgep, INT_CTL,
1205 "==> nxge_tx_intr: rearm"));
1206 if (status == NXGE_OK) {
1207 if (isLDOMguest(nxgep)) {
1208 nxge_hio_ldgimgn(nxgep, ldgp);
1209 } else {
1210 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
1211 B_TRUE, ldgp->ldg_timer);
1212 }
1213 }
1214 }
1215
1216 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
1217 serviced = DDI_INTR_CLAIMED;
1218 return (serviced);
1219 }
1220
1221 void
nxge_txdma_stop(p_nxge_t nxgep)1222 nxge_txdma_stop(p_nxge_t nxgep) /* Dead */
1223 {
1224 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
1225
1226 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
1227
1228 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
1229 }
1230
1231 void
nxge_txdma_stop_start(p_nxge_t nxgep)1232 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */
1233 {
1234 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
1235
1236 (void) nxge_txdma_stop(nxgep);
1237
1238 (void) nxge_fixup_txdma_rings(nxgep);
1239 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
1240 (void) nxge_tx_mac_enable(nxgep);
1241 (void) nxge_txdma_hw_kick(nxgep);
1242
1243 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
1244 }
1245
1246 npi_status_t
nxge_txdma_channel_disable(nxge_t * nxge,int channel)1247 nxge_txdma_channel_disable(
1248 nxge_t *nxge,
1249 int channel)
1250 {
1251 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge);
1252 npi_status_t rs;
1253 tdmc_intr_dbg_t intr_dbg;
1254
1255 /*
1256 * Stop the dma channel and wait for the stop-done.
1257 * If the stop-done bit is not present, then force
1258 * an error so TXC will stop.
1259 * All channels bound to this port need to be stopped
1260 * and reset after injecting an interrupt error.
1261 */
1262 rs = npi_txdma_channel_disable(handle, channel);
1263 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1264 "==> nxge_txdma_channel_disable(%d) "
1265 "rs 0x%x", channel, rs));
1266 if (rs != NPI_SUCCESS) {
1267 /* Inject any error */
1268 intr_dbg.value = 0;
1269 intr_dbg.bits.ldw.nack_pref = 1;
1270 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1271 "==> nxge_txdma_hw_mode: "
1272 "channel %d (stop failed 0x%x) "
1273 "(inject err)", rs, channel));
1274 (void) npi_txdma_inj_int_error_set(
1275 handle, channel, &intr_dbg);
1276 rs = npi_txdma_channel_disable(handle, channel);
1277 NXGE_DEBUG_MSG((nxge, MEM3_CTL,
1278 "==> nxge_txdma_hw_mode: "
1279 "channel %d (stop again 0x%x) "
1280 "(after inject err)",
1281 rs, channel));
1282 }
1283
1284 return (rs);
1285 }
1286
1287 /*
1288 * nxge_txdma_hw_mode
1289 *
1290 * Toggle all TDCs on (enable) or off (disable).
1291 *
1292 * Arguments:
1293 * nxgep
1294 * enable Enable or disable a TDC.
1295 *
1296 * Notes:
1297 *
1298 * NPI/NXGE function calls:
1299 * npi_txdma_channel_enable(TX_CS)
1300 * npi_txdma_channel_disable(TX_CS)
1301 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1302 *
1303 * Registers accessed:
1304 * TX_CS DMC+0x40028 Transmit Control And Status
1305 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1306 *
1307 * Context:
1308 * Any domain
1309 */
1310 nxge_status_t
nxge_txdma_hw_mode(p_nxge_t nxgep,boolean_t enable)1311 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
1312 {
1313 nxge_grp_set_t *set = &nxgep->tx_set;
1314
1315 npi_handle_t handle;
1316 nxge_status_t status;
1317 npi_status_t rs;
1318 int tdc;
1319
1320 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1321 "==> nxge_txdma_hw_mode: enable mode %d", enable));
1322
1323 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
1324 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1325 "<== nxge_txdma_mode: not initialized"));
1326 return (NXGE_ERROR);
1327 }
1328
1329 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1330 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1331 "<== nxge_txdma_hw_mode: NULL ring pointer(s)"));
1332 return (NXGE_ERROR);
1333 }
1334
1335 /* Enable or disable all of the TDCs owned by us. */
1336 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1337 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1338 if ((1 << tdc) & set->owned.map) {
1339 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1340 if (ring) {
1341 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1342 "==> nxge_txdma_hw_mode: channel %d", tdc));
1343 if (enable) {
1344 rs = npi_txdma_channel_enable
1345 (handle, tdc);
1346 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1347 "==> nxge_txdma_hw_mode: "
1348 "channel %d (enable) rs 0x%x",
1349 tdc, rs));
1350 } else {
1351 rs = nxge_txdma_channel_disable
1352 (nxgep, tdc);
1353 }
1354 }
1355 }
1356 }
1357
1358 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1359
1360 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1361 "<== nxge_txdma_hw_mode: status 0x%x", status));
1362
1363 return (status);
1364 }
1365
1366 void
nxge_txdma_enable_channel(p_nxge_t nxgep,uint16_t channel)1367 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
1368 {
1369 npi_handle_t handle;
1370
1371 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1372 "==> nxge_txdma_enable_channel: channel %d", channel));
1373
1374 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1375 /* enable the transmit dma channels */
1376 (void) npi_txdma_channel_enable(handle, channel);
1377
1378 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
1379 }
1380
1381 void
nxge_txdma_disable_channel(p_nxge_t nxgep,uint16_t channel)1382 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
1383 {
1384 npi_handle_t handle;
1385
1386 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
1387 "==> nxge_txdma_disable_channel: channel %d", channel));
1388
1389 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1390 /* stop the transmit dma channels */
1391 (void) npi_txdma_channel_disable(handle, channel);
1392
1393 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
1394 }
1395
1396 /*
1397 * nxge_txdma_stop_inj_err
1398 *
1399 * Stop a TDC. If at first we don't succeed, inject an error.
1400 *
1401 * Arguments:
1402 * nxgep
1403 * channel The channel to stop.
1404 *
1405 * Notes:
1406 *
1407 * NPI/NXGE function calls:
1408 * npi_txdma_channel_disable()
1409 * npi_txdma_inj_int_error_set()
1410 * #if defined(NXGE_DEBUG)
1411 * nxge_txdma_regs_dump_channels(nxgep);
1412 * #endif
1413 *
1414 * Registers accessed:
1415 * TX_CS DMC+0x40028 Transmit Control And Status
1416 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1417 *
1418 * Context:
1419 * Any domain
1420 */
1421 int
nxge_txdma_stop_inj_err(p_nxge_t nxgep,int channel)1422 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
1423 {
1424 npi_handle_t handle;
1425 tdmc_intr_dbg_t intr_dbg;
1426 int status;
1427 npi_status_t rs = NPI_SUCCESS;
1428
1429 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
1430 /*
1431 * Stop the dma channel waits for the stop done.
1432 * If the stop done bit is not set, then create
1433 * an error.
1434 */
1435 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1436 rs = npi_txdma_channel_disable(handle, channel);
1437 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1438 if (status == NXGE_OK) {
1439 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1440 "<== nxge_txdma_stop_inj_err (channel %d): "
1441 "stopped OK", channel));
1442 return (status);
1443 }
1444
1445 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1446 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
1447 "injecting error", channel, rs));
1448 /* Inject any error */
1449 intr_dbg.value = 0;
1450 intr_dbg.bits.ldw.nack_pref = 1;
1451 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1452
1453 /* Stop done bit will be set as a result of error injection */
1454 rs = npi_txdma_channel_disable(handle, channel);
1455 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
1456 if (!(rs & NPI_TXDMA_STOP_FAILED)) {
1457 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1458 "<== nxge_txdma_stop_inj_err (channel %d): "
1459 "stopped OK ", channel));
1460 return (status);
1461 }
1462
1463 #if defined(NXGE_DEBUG)
1464 nxge_txdma_regs_dump_channels(nxgep);
1465 #endif
1466 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1467 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
1468 " (injected error but still not stopped)", channel, rs));
1469
1470 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
1471 return (status);
1472 }
1473
1474 /*ARGSUSED*/
1475 void
nxge_fixup_txdma_rings(p_nxge_t nxgep)1476 nxge_fixup_txdma_rings(p_nxge_t nxgep)
1477 {
1478 nxge_grp_set_t *set = &nxgep->tx_set;
1479 int tdc;
1480
1481 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
1482
1483 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1484 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1485 "<== nxge_fixup_txdma_rings: NULL ring pointer(s)"));
1486 return;
1487 }
1488
1489 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1490 if ((1 << tdc) & set->owned.map) {
1491 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1492 if (ring) {
1493 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1494 "==> nxge_fixup_txdma_rings: channel %d",
1495 tdc));
1496 nxge_txdma_fixup_channel(nxgep, ring, tdc);
1497 }
1498 }
1499 }
1500
1501 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
1502 }
1503
1504 /*ARGSUSED*/
1505 void
nxge_txdma_fix_channel(p_nxge_t nxgep,uint16_t channel)1506 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
1507 {
1508 p_tx_ring_t ring_p;
1509
1510 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
1511 ring_p = nxge_txdma_get_ring(nxgep, channel);
1512 if (ring_p == NULL) {
1513 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1514 return;
1515 }
1516
1517 if (ring_p->tdc != channel) {
1518 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1519 "<== nxge_txdma_fix_channel: channel not matched "
1520 "ring tdc %d passed channel",
1521 ring_p->tdc, channel));
1522 return;
1523 }
1524
1525 nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1526
1527 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
1528 }
1529
1530 /*ARGSUSED*/
1531 void
nxge_txdma_fixup_channel(p_nxge_t nxgep,p_tx_ring_t ring_p,uint16_t channel)1532 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1533 {
1534 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
1535
1536 if (ring_p == NULL) {
1537 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1538 "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1539 return;
1540 }
1541
1542 if (ring_p->tdc != channel) {
1543 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1544 "<== nxge_txdma_fixup_channel: channel not matched "
1545 "ring tdc %d passed channel",
1546 ring_p->tdc, channel));
1547 return;
1548 }
1549
1550 MUTEX_ENTER(&ring_p->lock);
1551 (void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1552 ring_p->rd_index = 0;
1553 ring_p->wr_index = 0;
1554 ring_p->ring_head.value = 0;
1555 ring_p->ring_kick_tail.value = 0;
1556 ring_p->descs_pending = 0;
1557 MUTEX_EXIT(&ring_p->lock);
1558
1559 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
1560 }
1561
1562 /*ARGSUSED*/
1563 void
nxge_txdma_hw_kick(p_nxge_t nxgep)1564 nxge_txdma_hw_kick(p_nxge_t nxgep)
1565 {
1566 nxge_grp_set_t *set = &nxgep->tx_set;
1567 int tdc;
1568
1569 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
1570
1571 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1572 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1573 "<== nxge_txdma_hw_kick: NULL ring pointer(s)"));
1574 return;
1575 }
1576
1577 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1578 if ((1 << tdc) & set->owned.map) {
1579 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1580 if (ring) {
1581 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
1582 "==> nxge_txdma_hw_kick: channel %d", tdc));
1583 nxge_txdma_hw_kick_channel(nxgep, ring, tdc);
1584 }
1585 }
1586 }
1587
1588 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
1589 }
1590
1591 /*ARGSUSED*/
1592 void
nxge_txdma_kick_channel(p_nxge_t nxgep,uint16_t channel)1593 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
1594 {
1595 p_tx_ring_t ring_p;
1596
1597 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
1598
1599 ring_p = nxge_txdma_get_ring(nxgep, channel);
1600 if (ring_p == NULL) {
1601 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1602 " nxge_txdma_kick_channel"));
1603 return;
1604 }
1605
1606 if (ring_p->tdc != channel) {
1607 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1608 "<== nxge_txdma_kick_channel: channel not matched "
1609 "ring tdc %d passed channel",
1610 ring_p->tdc, channel));
1611 return;
1612 }
1613
1614 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
1615
1616 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
1617 }
1618
1619 /*ARGSUSED*/
1620 void
nxge_txdma_hw_kick_channel(p_nxge_t nxgep,p_tx_ring_t ring_p,uint16_t channel)1621 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
1622 {
1623
1624 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
1625
1626 if (ring_p == NULL) {
1627 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1628 "<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
1629 return;
1630 }
1631
1632 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
1633 }
1634
1635 /*
1636 * nxge_check_tx_hang
1637 *
1638 * Check the state of all TDCs belonging to nxgep.
1639 *
1640 * Arguments:
1641 * nxgep
1642 *
1643 * Notes:
1644 * Called by nxge_hw.c:nxge_check_hw_state().
1645 *
1646 * NPI/NXGE function calls:
1647 *
1648 * Registers accessed:
1649 *
1650 * Context:
1651 * Any domain
1652 */
1653 /*ARGSUSED*/
1654 void
nxge_check_tx_hang(p_nxge_t nxgep)1655 nxge_check_tx_hang(p_nxge_t nxgep)
1656 {
1657 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
1658
1659 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
1660 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
1661 goto nxge_check_tx_hang_exit;
1662 }
1663
1664 /*
1665 * Needs inputs from hardware for regs:
1666 * head index had not moved since last timeout.
1667 * packets not transmitted or stuffed registers.
1668 */
1669 if (nxge_txdma_hung(nxgep)) {
1670 nxge_fixup_hung_txdma_rings(nxgep);
1671 }
1672
1673 nxge_check_tx_hang_exit:
1674 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
1675 }
1676
1677 /*
1678 * nxge_txdma_hung
1679 *
1680 * Reset a TDC.
1681 *
1682 * Arguments:
1683 * nxgep
1684 * channel The channel to reset.
1685 * reg_data The current TX_CS.
1686 *
1687 * Notes:
1688 * Called by nxge_check_tx_hang()
1689 *
1690 * NPI/NXGE function calls:
1691 * nxge_txdma_channel_hung()
1692 *
1693 * Registers accessed:
1694 *
1695 * Context:
1696 * Any domain
1697 */
1698 int
nxge_txdma_hung(p_nxge_t nxgep)1699 nxge_txdma_hung(p_nxge_t nxgep)
1700 {
1701 nxge_grp_set_t *set = &nxgep->tx_set;
1702 int tdc;
1703 boolean_t shared;
1704
1705 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
1706
1707 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1708 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1709 "<== nxge_txdma_hung: NULL ring pointer(s)"));
1710 return (B_FALSE);
1711 }
1712
1713 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1714 /*
1715 * Grab the shared state of the TDC.
1716 */
1717 if (isLDOMservice(nxgep)) {
1718 nxge_hio_data_t *nhd =
1719 (nxge_hio_data_t *)nxgep->nxge_hw_p->hio;
1720
1721 MUTEX_ENTER(&nhd->lock);
1722 shared = nxgep->tdc_is_shared[tdc];
1723 MUTEX_EXIT(&nhd->lock);
1724 } else {
1725 shared = B_FALSE;
1726 }
1727
1728 /*
1729 * Now, process continue to process.
1730 */
1731 if (((1 << tdc) & set->owned.map) && !shared) {
1732 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1733 if (ring) {
1734 if (nxge_txdma_channel_hung(nxgep, ring, tdc)) {
1735 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1736 "==> nxge_txdma_hung: TDC %d hung",
1737 tdc));
1738 return (B_TRUE);
1739 }
1740 }
1741 }
1742 }
1743
1744 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
1745
1746 return (B_FALSE);
1747 }
1748
1749 /*
1750 * nxge_txdma_channel_hung
1751 *
1752 * Reset a TDC.
1753 *
1754 * Arguments:
1755 * nxgep
1756 * ring <channel>'s ring.
1757 * channel The channel to reset.
1758 *
1759 * Notes:
1760 * Called by nxge_txdma.c:nxge_txdma_hung()
1761 *
1762 * NPI/NXGE function calls:
1763 * npi_txdma_ring_head_get()
1764 *
1765 * Registers accessed:
1766 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
1767 *
1768 * Context:
1769 * Any domain
1770 */
1771 int
nxge_txdma_channel_hung(p_nxge_t nxgep,p_tx_ring_t tx_ring_p,uint16_t channel)1772 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
1773 {
1774 uint16_t head_index, tail_index;
1775 boolean_t head_wrap, tail_wrap;
1776 npi_handle_t handle;
1777 tx_ring_hdl_t tx_head;
1778 uint_t tx_rd_index;
1779
1780 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
1781
1782 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1783 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1784 "==> nxge_txdma_channel_hung: channel %d", channel));
1785 MUTEX_ENTER(&tx_ring_p->lock);
1786 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
1787
1788 tail_index = tx_ring_p->wr_index;
1789 tail_wrap = tx_ring_p->wr_index_wrap;
1790 tx_rd_index = tx_ring_p->rd_index;
1791 MUTEX_EXIT(&tx_ring_p->lock);
1792
1793 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1794 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
1795 "tail_index %d tail_wrap %d ",
1796 channel, tx_rd_index, tail_index, tail_wrap));
1797 /*
1798 * Read the hardware maintained transmit head
1799 * and wrap around bit.
1800 */
1801 (void) npi_txdma_ring_head_get(handle, channel, &tx_head);
1802 head_index = tx_head.bits.ldw.head;
1803 head_wrap = tx_head.bits.ldw.wrap;
1804 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1805 "==> nxge_txdma_channel_hung: "
1806 "tx_rd_index %d tail %d tail_wrap %d "
1807 "head %d wrap %d",
1808 tx_rd_index, tail_index, tail_wrap,
1809 head_index, head_wrap));
1810
1811 if (TXDMA_RING_EMPTY(head_index, head_wrap,
1812 tail_index, tail_wrap) &&
1813 (head_index == tx_rd_index)) {
1814 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1815 "==> nxge_txdma_channel_hung: EMPTY"));
1816 return (B_FALSE);
1817 }
1818
1819 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1820 "==> nxge_txdma_channel_hung: Checking if ring full"));
1821 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
1822 tail_wrap)) {
1823 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1824 "==> nxge_txdma_channel_hung: full"));
1825 return (B_TRUE);
1826 }
1827
1828 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
1829
1830 return (B_FALSE);
1831 }
1832
1833 /*
1834 * nxge_fixup_hung_txdma_rings
1835 *
1836 * Disable a TDC.
1837 *
1838 * Arguments:
1839 * nxgep
1840 * channel The channel to reset.
1841 * reg_data The current TX_CS.
1842 *
1843 * Notes:
1844 * Called by nxge_check_tx_hang()
1845 *
1846 * NPI/NXGE function calls:
1847 * npi_txdma_ring_head_get()
1848 *
1849 * Registers accessed:
1850 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
1851 *
1852 * Context:
1853 * Any domain
1854 */
1855 /*ARGSUSED*/
1856 void
nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)1857 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
1858 {
1859 nxge_grp_set_t *set = &nxgep->tx_set;
1860 int tdc;
1861
1862 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
1863
1864 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
1865 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1866 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
1867 return;
1868 }
1869
1870 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1871 if ((1 << tdc) & set->owned.map) {
1872 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
1873 if (ring) {
1874 nxge_txdma_fixup_hung_channel(nxgep, ring, tdc);
1875 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1876 "==> nxge_fixup_hung_txdma_rings: TDC %d",
1877 tdc));
1878 }
1879 }
1880 }
1881
1882 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
1883 }
1884
1885 /*
1886 * nxge_txdma_fixup_hung_channel
1887 *
1888 * 'Fix' a hung TDC.
1889 *
1890 * Arguments:
1891 * nxgep
1892 * channel The channel to fix.
1893 *
1894 * Notes:
1895 * Called by nxge_fixup_hung_txdma_rings()
1896 *
1897 * 1. Reclaim the TDC.
1898 * 2. Disable the TDC.
1899 *
1900 * NPI/NXGE function calls:
1901 * nxge_txdma_reclaim()
1902 * npi_txdma_channel_disable(TX_CS)
1903 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
1904 *
1905 * Registers accessed:
1906 * TX_CS DMC+0x40028 Transmit Control And Status
1907 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
1908 *
1909 * Context:
1910 * Any domain
1911 */
1912 /*ARGSUSED*/
1913 void
nxge_txdma_fix_hung_channel(p_nxge_t nxgep,uint16_t channel)1914 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
1915 {
1916 p_tx_ring_t ring_p;
1917
1918 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
1919 ring_p = nxge_txdma_get_ring(nxgep, channel);
1920 if (ring_p == NULL) {
1921 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1922 "<== nxge_txdma_fix_hung_channel"));
1923 return;
1924 }
1925
1926 if (ring_p->tdc != channel) {
1927 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1928 "<== nxge_txdma_fix_hung_channel: channel not matched "
1929 "ring tdc %d passed channel",
1930 ring_p->tdc, channel));
1931 return;
1932 }
1933
1934 nxge_txdma_fixup_channel(nxgep, ring_p, channel);
1935
1936 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
1937 }
1938
1939 /*ARGSUSED*/
1940 void
nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,p_tx_ring_t ring_p,uint16_t channel)1941 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
1942 uint16_t channel)
1943 {
1944 npi_handle_t handle;
1945 tdmc_intr_dbg_t intr_dbg;
1946 int status = NXGE_OK;
1947
1948 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
1949
1950 if (ring_p == NULL) {
1951 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1952 "<== nxge_txdma_fixup_channel: NULL ring pointer"));
1953 return;
1954 }
1955
1956 if (ring_p->tdc != channel) {
1957 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1958 "<== nxge_txdma_fixup_hung_channel: channel "
1959 "not matched "
1960 "ring tdc %d passed channel",
1961 ring_p->tdc, channel));
1962 return;
1963 }
1964
1965 /* Reclaim descriptors */
1966 MUTEX_ENTER(&ring_p->lock);
1967 (void) nxge_txdma_reclaim(nxgep, ring_p, 0);
1968 MUTEX_EXIT(&ring_p->lock);
1969
1970 handle = NXGE_DEV_NPI_HANDLE(nxgep);
1971 /*
1972 * Stop the dma channel waits for the stop done.
1973 * If the stop done bit is not set, then force
1974 * an error.
1975 */
1976 status = npi_txdma_channel_disable(handle, channel);
1977 if (!(status & NPI_TXDMA_STOP_FAILED)) {
1978 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1979 "<== nxge_txdma_fixup_hung_channel: stopped OK "
1980 "ring tdc %d passed channel %d",
1981 ring_p->tdc, channel));
1982 return;
1983 }
1984
1985 /* Inject any error */
1986 intr_dbg.value = 0;
1987 intr_dbg.bits.ldw.nack_pref = 1;
1988 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
1989
1990 /* Stop done bit will be set as a result of error injection */
1991 status = npi_txdma_channel_disable(handle, channel);
1992 if (!(status & NPI_TXDMA_STOP_FAILED)) {
1993 NXGE_DEBUG_MSG((nxgep, TX_CTL,
1994 "<== nxge_txdma_fixup_hung_channel: stopped again"
1995 "ring tdc %d passed channel",
1996 ring_p->tdc, channel));
1997 return;
1998 }
1999
2000 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2001 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
2002 "ring tdc %d passed channel",
2003 ring_p->tdc, channel));
2004
2005 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
2006 }
2007
2008 /*ARGSUSED*/
2009 void
nxge_reclaim_rings(p_nxge_t nxgep)2010 nxge_reclaim_rings(p_nxge_t nxgep)
2011 {
2012 nxge_grp_set_t *set = &nxgep->tx_set;
2013 int tdc;
2014
2015 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings"));
2016
2017 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2018 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2019 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2020 return;
2021 }
2022
2023 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2024 if ((1 << tdc) & set->owned.map) {
2025 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2026 if (ring) {
2027 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2028 "==> nxge_reclaim_rings: TDC %d", tdc));
2029 MUTEX_ENTER(&ring->lock);
2030 (void) nxge_txdma_reclaim(nxgep, ring, 0);
2031 MUTEX_EXIT(&ring->lock);
2032 }
2033 }
2034 }
2035
2036 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
2037 }
2038
2039 void
nxge_txdma_regs_dump_channels(p_nxge_t nxgep)2040 nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
2041 {
2042 nxge_grp_set_t *set = &nxgep->tx_set;
2043 npi_handle_t handle;
2044 int tdc;
2045
2046 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels"));
2047
2048 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2049
2050 if (!isLDOMguest(nxgep)) {
2051 (void) npi_txdma_dump_fzc_regs(handle);
2052
2053 /* Dump TXC registers. */
2054 (void) npi_txc_dump_fzc_regs(handle);
2055 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
2056 }
2057
2058 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
2059 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2060 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
2061 return;
2062 }
2063
2064 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
2065 if ((1 << tdc) & set->owned.map) {
2066 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
2067 if (ring) {
2068 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2069 "==> nxge_txdma_regs_dump_channels: "
2070 "TDC %d", tdc));
2071 (void) npi_txdma_dump_tdc_regs(handle, tdc);
2072
2073 /* Dump TXC registers, if able to. */
2074 if (!isLDOMguest(nxgep)) {
2075 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2076 "==> nxge_txdma_regs_dump_channels:"
2077 " FZC TDC %d", tdc));
2078 (void) npi_txc_dump_tdc_fzc_regs
2079 (handle, tdc);
2080 }
2081 nxge_txdma_regs_dump(nxgep, tdc);
2082 }
2083 }
2084 }
2085
2086 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
2087 }
2088
2089 void
nxge_txdma_regs_dump(p_nxge_t nxgep,int channel)2090 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
2091 {
2092 npi_handle_t handle;
2093 tx_ring_hdl_t hdl;
2094 tx_ring_kick_t kick;
2095 tx_cs_t cs;
2096 txc_control_t control;
2097 uint32_t bitmap = 0;
2098 uint32_t burst = 0;
2099 uint32_t bytes = 0;
2100 dma_log_page_t cfg;
2101
2102 printf("\n\tfunc # %d tdc %d ",
2103 nxgep->function_num, channel);
2104 cfg.page_num = 0;
2105 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2106 (void) npi_txdma_log_page_get(handle, channel, &cfg);
2107 printf("\n\tlog page func %d valid page 0 %d",
2108 cfg.func_num, cfg.valid);
2109 cfg.page_num = 1;
2110 (void) npi_txdma_log_page_get(handle, channel, &cfg);
2111 printf("\n\tlog page func %d valid page 1 %d",
2112 cfg.func_num, cfg.valid);
2113
2114 (void) npi_txdma_ring_head_get(handle, channel, &hdl);
2115 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
2116 printf("\n\thead value is 0x%0llx",
2117 (long long)hdl.value);
2118 printf("\n\thead index %d", hdl.bits.ldw.head);
2119 printf("\n\tkick value is 0x%0llx",
2120 (long long)kick.value);
2121 printf("\n\ttail index %d\n", kick.bits.ldw.tail);
2122
2123 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
2124 printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
2125 printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
2126
2127 (void) npi_txc_control(handle, OP_GET, &control);
2128 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
2129 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
2130 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
2131
2132 printf("\n\tTXC port control 0x%0llx",
2133 (long long)control.value);
2134 printf("\n\tTXC port bitmap 0x%x", bitmap);
2135 printf("\n\tTXC max burst %d", burst);
2136 printf("\n\tTXC bytes xmt %d\n", bytes);
2137
2138 {
2139 ipp_status_t status;
2140
2141 (void) npi_ipp_get_status(handle, nxgep->function_num, &status);
2142 #if defined(__i386)
2143 printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value);
2144 #else
2145 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
2146 #endif
2147 }
2148 }
2149
2150 /*
2151 * nxge_tdc_hvio_setup
2152 *
2153 * I'm not exactly sure what this code does.
2154 *
2155 * Arguments:
2156 * nxgep
2157 * channel The channel to map.
2158 *
2159 * Notes:
2160 *
2161 * NPI/NXGE function calls:
2162 * na
2163 *
2164 * Context:
2165 * Service domain?
2166 */
2167 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2168 static void
nxge_tdc_hvio_setup(nxge_t * nxgep,int channel)2169 nxge_tdc_hvio_setup(
2170 nxge_t *nxgep, int channel)
2171 {
2172 nxge_dma_common_t *data;
2173 nxge_dma_common_t *control;
2174 tx_ring_t *ring;
2175
2176 ring = nxgep->tx_rings->rings[channel];
2177 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2178
2179 ring->hv_set = B_FALSE;
2180
2181 ring->hv_tx_buf_base_ioaddr_pp =
2182 (uint64_t)data->orig_ioaddr_pp;
2183 ring->hv_tx_buf_ioaddr_size =
2184 (uint64_t)data->orig_alength;
2185
2186 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2187 "hv data buf base io $%p size 0x%llx (%d) buf base io $%p "
2188 "orig vatopa base io $%p orig_len 0x%llx (%d)",
2189 ring->hv_tx_buf_base_ioaddr_pp,
2190 ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size,
2191 data->ioaddr_pp, data->orig_vatopa,
2192 data->orig_alength, data->orig_alength));
2193
2194 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2195
2196 ring->hv_tx_cntl_base_ioaddr_pp =
2197 (uint64_t)control->orig_ioaddr_pp;
2198 ring->hv_tx_cntl_ioaddr_size =
2199 (uint64_t)control->orig_alength;
2200
2201 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
2202 "hv cntl base io $%p orig ioaddr_pp ($%p) "
2203 "orig vatopa ($%p) size 0x%llx (%d 0x%x)",
2204 ring->hv_tx_cntl_base_ioaddr_pp,
2205 control->orig_ioaddr_pp, control->orig_vatopa,
2206 ring->hv_tx_cntl_ioaddr_size,
2207 control->orig_alength, control->orig_alength));
2208 }
2209 #endif
2210
2211 static nxge_status_t
nxge_map_txdma(p_nxge_t nxgep,int channel)2212 nxge_map_txdma(p_nxge_t nxgep, int channel)
2213 {
2214 nxge_dma_common_t **pData;
2215 nxge_dma_common_t **pControl;
2216 tx_ring_t **pRing, *ring;
2217 tx_mbox_t **mailbox;
2218 uint32_t num_chunks;
2219
2220 nxge_status_t status = NXGE_OK;
2221
2222 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
2223
2224 if (!nxgep->tx_cntl_pool_p->buf_allocated) {
2225 if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) {
2226 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2227 "<== nxge_map_txdma: buf not allocated"));
2228 return (NXGE_ERROR);
2229 }
2230 }
2231
2232 if (nxge_alloc_txb(nxgep, channel) != NXGE_OK)
2233 return (NXGE_ERROR);
2234
2235 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
2236 pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
2237 pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
2238 pRing = &nxgep->tx_rings->rings[channel];
2239 mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2240
2241 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2242 "tx_rings $%p tx_desc_rings $%p",
2243 nxgep->tx_rings, nxgep->tx_rings->rings));
2244
2245 /*
2246 * Map descriptors from the buffer pools for <channel>.
2247 */
2248
2249 /*
2250 * Set up and prepare buffer blocks, descriptors
2251 * and mailbox.
2252 */
2253 status = nxge_map_txdma_channel(nxgep, channel,
2254 pData, pRing, num_chunks, pControl, mailbox);
2255 if (status != NXGE_OK) {
2256 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2257 "==> nxge_map_txdma(%d): nxge_map_txdma_channel() "
2258 "returned 0x%x",
2259 nxgep, channel, status));
2260 return (status);
2261 }
2262
2263 ring = *pRing;
2264
2265 ring->index = (uint16_t)channel;
2266 ring->tdc_stats = &nxgep->statsp->tdc_stats[channel];
2267
2268 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
2269 if (isLDOMguest(nxgep)) {
2270 (void) nxge_tdc_lp_conf(nxgep, channel);
2271 } else {
2272 nxge_tdc_hvio_setup(nxgep, channel);
2273 }
2274 #endif
2275
2276 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
2277 "(status 0x%x channel %d)", status, channel));
2278
2279 return (status);
2280 }
2281
2282 static nxge_status_t
nxge_map_txdma_channel(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_tx_ring_t * tx_desc_p,uint32_t num_chunks,p_nxge_dma_common_t * dma_cntl_p,p_tx_mbox_t * tx_mbox_p)2283 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
2284 p_nxge_dma_common_t *dma_buf_p,
2285 p_tx_ring_t *tx_desc_p,
2286 uint32_t num_chunks,
2287 p_nxge_dma_common_t *dma_cntl_p,
2288 p_tx_mbox_t *tx_mbox_p)
2289 {
2290 int status = NXGE_OK;
2291
2292 /*
2293 * Set up and prepare buffer blocks, descriptors
2294 * and mailbox.
2295 */
2296 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2297 "==> nxge_map_txdma_channel (channel %d)", channel));
2298 /*
2299 * Transmit buffer blocks
2300 */
2301 status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
2302 dma_buf_p, tx_desc_p, num_chunks);
2303 if (status != NXGE_OK) {
2304 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2305 "==> nxge_map_txdma_channel (channel %d): "
2306 "map buffer failed 0x%x", channel, status));
2307 goto nxge_map_txdma_channel_exit;
2308 }
2309
2310 /*
2311 * Transmit block ring, and mailbox.
2312 */
2313 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
2314 tx_mbox_p);
2315
2316 goto nxge_map_txdma_channel_exit;
2317
2318 nxge_map_txdma_channel_fail1:
2319 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2320 "==> nxge_map_txdma_channel: unmap buf"
2321 "(status 0x%x channel %d)",
2322 status, channel));
2323 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
2324
2325 nxge_map_txdma_channel_exit:
2326 NXGE_ERROR_MSG((nxgep, MEM3_CTL,
2327 "<== nxge_map_txdma_channel: "
2328 "(status 0x%x channel %d)",
2329 status, channel));
2330
2331 return (status);
2332 }
2333
2334 /*ARGSUSED*/
2335 static void
nxge_unmap_txdma_channel(p_nxge_t nxgep,uint16_t channel)2336 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel)
2337 {
2338 tx_ring_t *ring;
2339 tx_mbox_t *mailbox;
2340
2341 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2342 "==> nxge_unmap_txdma_channel (channel %d)", channel));
2343 /*
2344 * unmap tx block ring, and mailbox.
2345 */
2346 ring = nxgep->tx_rings->rings[channel];
2347 mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2348
2349 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox);
2350
2351 /* unmap buffer blocks */
2352 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring);
2353
2354 nxge_free_txb(nxgep, channel);
2355
2356 /*
2357 * Cleanup the reference to the ring now that it does not exist.
2358 */
2359 nxgep->tx_rings->rings[channel] = NULL;
2360
2361 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
2362 }
2363
2364 /*
2365 * nxge_map_txdma_channel_cfg_ring
2366 *
2367 * Map a TDC into our kernel space.
2368 * This function allocates all of the per-channel data structures.
2369 *
2370 * Arguments:
2371 * nxgep
2372 * dma_channel The channel to map.
2373 * dma_cntl_p
2374 * tx_ring_p dma_channel's transmit ring
2375 * tx_mbox_p dma_channel's mailbox
2376 *
2377 * Notes:
2378 *
2379 * NPI/NXGE function calls:
2380 * nxge_setup_dma_common()
2381 *
2382 * Registers accessed:
2383 * none.
2384 *
2385 * Context:
2386 * Any domain
2387 */
2388 /*ARGSUSED*/
2389 static void
nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dma_cntl_p,p_tx_ring_t tx_ring_p,p_tx_mbox_t * tx_mbox_p)2390 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
2391 p_nxge_dma_common_t *dma_cntl_p,
2392 p_tx_ring_t tx_ring_p,
2393 p_tx_mbox_t *tx_mbox_p)
2394 {
2395 p_tx_mbox_t mboxp;
2396 p_nxge_dma_common_t cntl_dmap;
2397 p_nxge_dma_common_t dmap;
2398 p_tx_rng_cfig_t tx_ring_cfig_p;
2399 p_tx_ring_kick_t tx_ring_kick_p;
2400 p_tx_cs_t tx_cs_p;
2401 p_tx_dma_ent_msk_t tx_evmask_p;
2402 p_txdma_mbh_t mboxh_p;
2403 p_txdma_mbl_t mboxl_p;
2404 uint64_t tx_desc_len;
2405
2406 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2407 "==> nxge_map_txdma_channel_cfg_ring"));
2408
2409 cntl_dmap = *dma_cntl_p;
2410
2411 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
2412 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
2413 sizeof (tx_desc_t));
2414 /*
2415 * Zero out transmit ring descriptors.
2416 */
2417 bzero((caddr_t)dmap->kaddrp, dmap->alength);
2418 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
2419 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
2420 tx_cs_p = &(tx_ring_p->tx_cs);
2421 tx_evmask_p = &(tx_ring_p->tx_evmask);
2422 tx_ring_cfig_p->value = 0;
2423 tx_ring_kick_p->value = 0;
2424 tx_cs_p->value = 0;
2425 tx_evmask_p->value = 0;
2426
2427 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2428 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
2429 dma_channel,
2430 dmap->dma_cookie.dmac_laddress));
2431
2432 tx_ring_cfig_p->value = 0;
2433 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
2434 tx_ring_cfig_p->value =
2435 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
2436 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
2437
2438 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2439 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
2440 dma_channel,
2441 tx_ring_cfig_p->value));
2442
2443 tx_cs_p->bits.ldw.rst = 1;
2444
2445 /* Map in mailbox */
2446 mboxp = (p_tx_mbox_t)
2447 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
2448 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
2449 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
2450 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
2451 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
2452 mboxh_p->value = mboxl_p->value = 0;
2453
2454 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2455 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2456 dmap->dma_cookie.dmac_laddress));
2457
2458 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
2459 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
2460
2461 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
2462 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
2463
2464 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2465 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
2466 dmap->dma_cookie.dmac_laddress));
2467 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2468 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
2469 "mbox $%p",
2470 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
2471 tx_ring_p->page_valid.value = 0;
2472 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
2473 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
2474 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
2475 tx_ring_p->page_hdl.value = 0;
2476
2477 tx_ring_p->page_valid.bits.ldw.page0 = 1;
2478 tx_ring_p->page_valid.bits.ldw.page1 = 1;
2479
2480 tx_ring_p->max_burst.value = 0;
2481 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
2482
2483 *tx_mbox_p = mboxp;
2484
2485 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2486 "<== nxge_map_txdma_channel_cfg_ring"));
2487 }
2488
2489 /*ARGSUSED*/
2490 static void
nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,p_tx_ring_t tx_ring_p,p_tx_mbox_t tx_mbox_p)2491 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
2492 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2493 {
2494 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2495 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
2496 tx_ring_p->tdc));
2497
2498 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
2499
2500 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2501 "<== nxge_unmap_txdma_channel_cfg_ring"));
2502 }
2503
2504 /*
2505 * nxge_map_txdma_channel_buf_ring
2506 *
2507 *
2508 * Arguments:
2509 * nxgep
2510 * channel The channel to map.
2511 * dma_buf_p
2512 * tx_desc_p channel's descriptor ring
2513 * num_chunks
2514 *
2515 * Notes:
2516 *
2517 * NPI/NXGE function calls:
2518 * nxge_setup_dma_common()
2519 *
2520 * Registers accessed:
2521 * none.
2522 *
2523 * Context:
2524 * Any domain
2525 */
2526 static nxge_status_t
nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_tx_ring_t * tx_desc_p,uint32_t num_chunks)2527 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
2528 p_nxge_dma_common_t *dma_buf_p,
2529 p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
2530 {
2531 p_nxge_dma_common_t dma_bufp, tmp_bufp;
2532 p_nxge_dma_common_t dmap;
2533 nxge_os_dma_handle_t tx_buf_dma_handle;
2534 p_tx_ring_t tx_ring_p;
2535 p_tx_msg_t tx_msg_ring;
2536 nxge_status_t status = NXGE_OK;
2537 int ddi_status = DDI_SUCCESS;
2538 int i, j, index;
2539 uint32_t size, bsize;
2540 uint32_t nblocks, nmsgs;
2541 char qname[TASKQ_NAMELEN];
2542
2543 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2544 "==> nxge_map_txdma_channel_buf_ring"));
2545
2546 dma_bufp = tmp_bufp = *dma_buf_p;
2547 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2548 " nxge_map_txdma_channel_buf_ring: channel %d to map %d "
2549 "chunks bufp $%p",
2550 channel, num_chunks, dma_bufp));
2551
2552 nmsgs = 0;
2553 for (i = 0; i < num_chunks; i++, tmp_bufp++) {
2554 nmsgs += tmp_bufp->nblocks;
2555 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2556 "==> nxge_map_txdma_channel_buf_ring: channel %d "
2557 "bufp $%p nblocks %d nmsgs %d",
2558 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
2559 }
2560 if (!nmsgs) {
2561 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2562 "<== nxge_map_txdma_channel_buf_ring: channel %d "
2563 "no msg blocks",
2564 channel));
2565 status = NXGE_ERROR;
2566 goto nxge_map_txdma_channel_buf_ring_exit;
2567 }
2568
2569 tx_ring_p = (p_tx_ring_t)
2570 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
2571 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
2572 (void *)nxgep->interrupt_cookie);
2573
2574 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE);
2575 tx_ring_p->tx_ring_busy = B_FALSE;
2576 tx_ring_p->nxgep = nxgep;
2577 tx_ring_p->tx_ring_handle = (mac_ring_handle_t)NULL;
2578 (void) snprintf(qname, TASKQ_NAMELEN, "tx_%d_%d",
2579 nxgep->instance, channel);
2580 tx_ring_p->taskq = ddi_taskq_create(nxgep->dip, qname, 1,
2581 TASKQ_DEFAULTPRI, 0);
2582 if (tx_ring_p->taskq == NULL) {
2583 goto nxge_map_txdma_channel_buf_ring_fail1;
2584 }
2585
2586 /*
2587 * Allocate transmit message rings and handles for packets
2588 * not to be copied to premapped buffers.
2589 */
2590 size = nmsgs * sizeof (tx_msg_t);
2591 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
2592 for (i = 0; i < nmsgs; i++) {
2593 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
2594 DDI_DMA_DONTWAIT, 0,
2595 &tx_msg_ring[i].dma_handle);
2596 if (ddi_status != DDI_SUCCESS) {
2597 status |= NXGE_DDI_FAILED;
2598 break;
2599 }
2600 }
2601 if (i < nmsgs) {
2602 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2603 "Allocate handles failed."));
2604 goto nxge_map_txdma_channel_buf_ring_fail1;
2605 }
2606
2607 tx_ring_p->tdc = channel;
2608 tx_ring_p->tx_msg_ring = tx_msg_ring;
2609 tx_ring_p->tx_ring_size = nmsgs;
2610 tx_ring_p->num_chunks = num_chunks;
2611 if (!nxge_tx_intr_thres) {
2612 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
2613 }
2614 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
2615 tx_ring_p->rd_index = 0;
2616 tx_ring_p->wr_index = 0;
2617 tx_ring_p->ring_head.value = 0;
2618 tx_ring_p->ring_kick_tail.value = 0;
2619 tx_ring_p->descs_pending = 0;
2620
2621 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2622 "==> nxge_map_txdma_channel_buf_ring: channel %d "
2623 "actual tx desc max %d nmsgs %d "
2624 "(config nxge_tx_ring_size %d)",
2625 channel, tx_ring_p->tx_ring_size, nmsgs,
2626 nxge_tx_ring_size));
2627
2628 /*
2629 * Map in buffers from the buffer pool.
2630 */
2631 index = 0;
2632 bsize = dma_bufp->block_size;
2633
2634 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
2635 "dma_bufp $%p tx_rng_p $%p "
2636 "tx_msg_rng_p $%p bsize %d",
2637 dma_bufp, tx_ring_p, tx_msg_ring, bsize));
2638
2639 tx_buf_dma_handle = dma_bufp->dma_handle;
2640 for (i = 0; i < num_chunks; i++, dma_bufp++) {
2641 bsize = dma_bufp->block_size;
2642 nblocks = dma_bufp->nblocks;
2643 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2644 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
2645 "size %d dma_bufp $%p",
2646 i, sizeof (nxge_dma_common_t), dma_bufp));
2647
2648 for (j = 0; j < nblocks; j++) {
2649 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
2650 dmap = &tx_msg_ring[index++].buf_dma;
2651 #ifdef TX_MEM_DEBUG
2652 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2653 "==> nxge_map_txdma_channel_buf_ring: j %d"
2654 "dmap $%p", i, dmap));
2655 #endif
2656 nxge_setup_dma_common(dmap, dma_bufp, 1,
2657 bsize);
2658 }
2659 }
2660
2661 if (i < num_chunks) {
2662 status = NXGE_ERROR;
2663 goto nxge_map_txdma_channel_buf_ring_fail1;
2664 }
2665
2666 *tx_desc_p = tx_ring_p;
2667
2668 goto nxge_map_txdma_channel_buf_ring_exit;
2669
2670 nxge_map_txdma_channel_buf_ring_fail1:
2671 if (tx_ring_p->taskq) {
2672 ddi_taskq_destroy(tx_ring_p->taskq);
2673 tx_ring_p->taskq = NULL;
2674 }
2675
2676 index--;
2677 for (; index >= 0; index--) {
2678 if (tx_msg_ring[index].dma_handle != NULL) {
2679 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
2680 }
2681 }
2682 MUTEX_DESTROY(&tx_ring_p->lock);
2683 KMEM_FREE(tx_msg_ring, size);
2684 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2685
2686 status = NXGE_ERROR;
2687
2688 nxge_map_txdma_channel_buf_ring_exit:
2689 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2690 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
2691
2692 return (status);
2693 }
2694
2695 /*ARGSUSED*/
2696 static void
nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep,p_tx_ring_t tx_ring_p)2697 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
2698 {
2699 p_tx_msg_t tx_msg_ring;
2700 p_tx_msg_t tx_msg_p;
2701 int i;
2702
2703 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2704 "==> nxge_unmap_txdma_channel_buf_ring"));
2705 if (tx_ring_p == NULL) {
2706 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2707 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
2708 return;
2709 }
2710 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2711 "==> nxge_unmap_txdma_channel_buf_ring: channel %d",
2712 tx_ring_p->tdc));
2713
2714 tx_msg_ring = tx_ring_p->tx_msg_ring;
2715
2716 /*
2717 * Since the serialization thread, timer thread and
2718 * interrupt thread can all call the transmit reclaim,
2719 * the unmapping function needs to acquire the lock
2720 * to free those buffers which were transmitted
2721 * by the hardware already.
2722 */
2723 MUTEX_ENTER(&tx_ring_p->lock);
2724 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2725 "==> nxge_unmap_txdma_channel_buf_ring (reclaim): "
2726 "channel %d",
2727 tx_ring_p->tdc));
2728 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
2729
2730 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2731 tx_msg_p = &tx_msg_ring[i];
2732 if (tx_msg_p->tx_message != NULL) {
2733 freemsg(tx_msg_p->tx_message);
2734 tx_msg_p->tx_message = NULL;
2735 }
2736 }
2737
2738 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
2739 if (tx_msg_ring[i].dma_handle != NULL) {
2740 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
2741 }
2742 tx_msg_ring[i].dma_handle = NULL;
2743 }
2744
2745 MUTEX_EXIT(&tx_ring_p->lock);
2746
2747 if (tx_ring_p->taskq) {
2748 ddi_taskq_destroy(tx_ring_p->taskq);
2749 tx_ring_p->taskq = NULL;
2750 }
2751
2752 MUTEX_DESTROY(&tx_ring_p->lock);
2753 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
2754 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
2755
2756 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2757 "<== nxge_unmap_txdma_channel_buf_ring"));
2758 }
2759
2760 static nxge_status_t
nxge_txdma_hw_start(p_nxge_t nxgep,int channel)2761 nxge_txdma_hw_start(p_nxge_t nxgep, int channel)
2762 {
2763 p_tx_rings_t tx_rings;
2764 p_tx_ring_t *tx_desc_rings;
2765 p_tx_mbox_areas_t tx_mbox_areas_p;
2766 p_tx_mbox_t *tx_mbox_p;
2767 nxge_status_t status = NXGE_OK;
2768
2769 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
2770
2771 tx_rings = nxgep->tx_rings;
2772 if (tx_rings == NULL) {
2773 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2774 "<== nxge_txdma_hw_start: NULL ring pointer"));
2775 return (NXGE_ERROR);
2776 }
2777 tx_desc_rings = tx_rings->rings;
2778 if (tx_desc_rings == NULL) {
2779 NXGE_DEBUG_MSG((nxgep, TX_CTL,
2780 "<== nxge_txdma_hw_start: NULL ring pointers"));
2781 return (NXGE_ERROR);
2782 }
2783
2784 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2785 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
2786
2787 tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
2788 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
2789
2790 status = nxge_txdma_start_channel(nxgep, channel,
2791 (p_tx_ring_t)tx_desc_rings[channel],
2792 (p_tx_mbox_t)tx_mbox_p[channel]);
2793 if (status != NXGE_OK) {
2794 goto nxge_txdma_hw_start_fail1;
2795 }
2796
2797 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2798 "tx_rings $%p rings $%p",
2799 nxgep->tx_rings, nxgep->tx_rings->rings));
2800 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
2801 "tx_rings $%p tx_desc_rings $%p",
2802 nxgep->tx_rings, tx_desc_rings));
2803
2804 goto nxge_txdma_hw_start_exit;
2805
2806 nxge_txdma_hw_start_fail1:
2807 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2808 "==> nxge_txdma_hw_start: disable "
2809 "(status 0x%x channel %d)", status, channel));
2810
2811 nxge_txdma_hw_start_exit:
2812 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2813 "==> nxge_txdma_hw_start: (status 0x%x)", status));
2814
2815 return (status);
2816 }
2817
2818 /*
2819 * nxge_txdma_start_channel
2820 *
2821 * Start a TDC.
2822 *
2823 * Arguments:
2824 * nxgep
2825 * channel The channel to start.
2826 * tx_ring_p channel's transmit descriptor ring.
2827 * tx_mbox_p channel' smailbox.
2828 *
2829 * Notes:
2830 *
2831 * NPI/NXGE function calls:
2832 * nxge_reset_txdma_channel()
2833 * nxge_init_txdma_channel_event_mask()
2834 * nxge_enable_txdma_channel()
2835 *
2836 * Registers accessed:
2837 * none directly (see functions above).
2838 *
2839 * Context:
2840 * Any domain
2841 */
2842 static nxge_status_t
nxge_txdma_start_channel(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_ring_p,p_tx_mbox_t tx_mbox_p)2843 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
2844 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
2845
2846 {
2847 nxge_status_t status = NXGE_OK;
2848
2849 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2850 "==> nxge_txdma_start_channel (channel %d)", channel));
2851 /*
2852 * TXDMA/TXC must be in stopped state.
2853 */
2854 (void) nxge_txdma_stop_inj_err(nxgep, channel);
2855
2856 /*
2857 * Reset TXDMA channel
2858 */
2859 tx_ring_p->tx_cs.value = 0;
2860 tx_ring_p->tx_cs.bits.ldw.rst = 1;
2861 status = nxge_reset_txdma_channel(nxgep, channel,
2862 tx_ring_p->tx_cs.value);
2863 if (status != NXGE_OK) {
2864 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2865 "==> nxge_txdma_start_channel (channel %d)"
2866 " reset channel failed 0x%x", channel, status));
2867 goto nxge_txdma_start_channel_exit;
2868 }
2869
2870 /*
2871 * Initialize the TXDMA channel specific FZC control
2872 * configurations. These FZC registers are pertaining
2873 * to each TX channel (i.e. logical pages).
2874 */
2875 if (!isLDOMguest(nxgep)) {
2876 status = nxge_init_fzc_txdma_channel(nxgep, channel,
2877 tx_ring_p, tx_mbox_p);
2878 if (status != NXGE_OK) {
2879 goto nxge_txdma_start_channel_exit;
2880 }
2881 }
2882
2883 /*
2884 * Initialize the event masks.
2885 */
2886 tx_ring_p->tx_evmask.value = 0;
2887 status = nxge_init_txdma_channel_event_mask(nxgep,
2888 channel, &tx_ring_p->tx_evmask);
2889 if (status != NXGE_OK) {
2890 goto nxge_txdma_start_channel_exit;
2891 }
2892
2893 /*
2894 * Load TXDMA descriptors, buffers, mailbox,
2895 * initialise the DMA channels and
2896 * enable each DMA channel.
2897 */
2898 status = nxge_enable_txdma_channel(nxgep, channel,
2899 tx_ring_p, tx_mbox_p);
2900 if (status != NXGE_OK) {
2901 goto nxge_txdma_start_channel_exit;
2902 }
2903
2904 nxge_txdma_start_channel_exit:
2905 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
2906
2907 return (status);
2908 }
2909
2910 /*
2911 * nxge_txdma_stop_channel
2912 *
2913 * Stop a TDC.
2914 *
2915 * Arguments:
2916 * nxgep
2917 * channel The channel to stop.
2918 * tx_ring_p channel's transmit descriptor ring.
2919 * tx_mbox_p channel' smailbox.
2920 *
2921 * Notes:
2922 *
2923 * NPI/NXGE function calls:
2924 * nxge_txdma_stop_inj_err()
2925 * nxge_reset_txdma_channel()
2926 * nxge_init_txdma_channel_event_mask()
2927 * nxge_init_txdma_channel_cntl_stat()
2928 * nxge_disable_txdma_channel()
2929 *
2930 * Registers accessed:
2931 * none directly (see functions above).
2932 *
2933 * Context:
2934 * Any domain
2935 */
2936 /*ARGSUSED*/
2937 static nxge_status_t
nxge_txdma_stop_channel(p_nxge_t nxgep,uint16_t channel)2938 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
2939 {
2940 p_tx_ring_t tx_ring_p;
2941 int status = NXGE_OK;
2942
2943 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
2944 "==> nxge_txdma_stop_channel: channel %d", channel));
2945
2946 /*
2947 * Stop (disable) TXDMA and TXC (if stop bit is set
2948 * and STOP_N_GO bit not set, the TXDMA reset state will
2949 * not be set if reset TXDMA.
2950 */
2951 (void) nxge_txdma_stop_inj_err(nxgep, channel);
2952
2953 if (nxgep->tx_rings == NULL) {
2954 status = NXGE_ERROR;
2955 goto nxge_txdma_stop_channel_exit;
2956 }
2957
2958 tx_ring_p = nxgep->tx_rings->rings[channel];
2959 if (tx_ring_p == NULL) {
2960 status = NXGE_ERROR;
2961 goto nxge_txdma_stop_channel_exit;
2962 }
2963
2964 /*
2965 * Reset TXDMA channel
2966 */
2967 tx_ring_p->tx_cs.value = 0;
2968 tx_ring_p->tx_cs.bits.ldw.rst = 1;
2969 status = nxge_reset_txdma_channel(nxgep, channel,
2970 tx_ring_p->tx_cs.value);
2971 if (status != NXGE_OK) {
2972 goto nxge_txdma_stop_channel_exit;
2973 }
2974
2975 #ifdef HARDWARE_REQUIRED
2976 /* Set up the interrupt event masks. */
2977 tx_ring_p->tx_evmask.value = 0;
2978 status = nxge_init_txdma_channel_event_mask(nxgep,
2979 channel, &tx_ring_p->tx_evmask);
2980 if (status != NXGE_OK) {
2981 goto nxge_txdma_stop_channel_exit;
2982 }
2983
2984 /* Initialize the DMA control and status register */
2985 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
2986 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
2987 tx_ring_p->tx_cs.value);
2988 if (status != NXGE_OK) {
2989 goto nxge_txdma_stop_channel_exit;
2990 }
2991
2992 tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
2993
2994 /* Disable channel */
2995 status = nxge_disable_txdma_channel(nxgep, channel,
2996 tx_ring_p, tx_mbox_p);
2997 if (status != NXGE_OK) {
2998 goto nxge_txdma_start_channel_exit;
2999 }
3000
3001 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
3002 "==> nxge_txdma_stop_channel: event done"));
3003
3004 #endif
3005
3006 nxge_txdma_stop_channel_exit:
3007 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
3008 return (status);
3009 }
3010
3011 /*
3012 * nxge_txdma_get_ring
3013 *
3014 * Get the ring for a TDC.
3015 *
3016 * Arguments:
3017 * nxgep
3018 * channel
3019 *
3020 * Notes:
3021 *
3022 * NPI/NXGE function calls:
3023 *
3024 * Registers accessed:
3025 *
3026 * Context:
3027 * Any domain
3028 */
3029 static p_tx_ring_t
nxge_txdma_get_ring(p_nxge_t nxgep,uint16_t channel)3030 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
3031 {
3032 nxge_grp_set_t *set = &nxgep->tx_set;
3033 int tdc;
3034
3035 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
3036
3037 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3038 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3039 "<== nxge_txdma_get_ring: NULL ring pointer(s)"));
3040 goto return_null;
3041 }
3042
3043 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3044 if ((1 << tdc) & set->owned.map) {
3045 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3046 if (ring) {
3047 if (channel == ring->tdc) {
3048 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3049 "<== nxge_txdma_get_ring: "
3050 "tdc %d ring $%p", tdc, ring));
3051 return (ring);
3052 }
3053 }
3054 }
3055 }
3056
3057 return_null:
3058 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: "
3059 "ring not found"));
3060
3061 return (NULL);
3062 }
3063
3064 /*
3065 * nxge_txdma_get_mbox
3066 *
3067 * Get the mailbox for a TDC.
3068 *
3069 * Arguments:
3070 * nxgep
3071 * channel
3072 *
3073 * Notes:
3074 *
3075 * NPI/NXGE function calls:
3076 *
3077 * Registers accessed:
3078 *
3079 * Context:
3080 * Any domain
3081 */
3082 static p_tx_mbox_t
nxge_txdma_get_mbox(p_nxge_t nxgep,uint16_t channel)3083 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
3084 {
3085 nxge_grp_set_t *set = &nxgep->tx_set;
3086 int tdc;
3087
3088 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
3089
3090 if (nxgep->tx_mbox_areas_p == 0 ||
3091 nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) {
3092 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3093 "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)"));
3094 goto return_null;
3095 }
3096
3097 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3098 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3099 "<== nxge_txdma_get_mbox: NULL ring pointer(s)"));
3100 goto return_null;
3101 }
3102
3103 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3104 if ((1 << tdc) & set->owned.map) {
3105 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3106 if (ring) {
3107 if (channel == ring->tdc) {
3108 tx_mbox_t *mailbox = nxgep->
3109 tx_mbox_areas_p->
3110 txmbox_areas_p[tdc];
3111 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3112 "<== nxge_txdma_get_mbox: tdc %d "
3113 "ring $%p", tdc, mailbox));
3114 return (mailbox);
3115 }
3116 }
3117 }
3118 }
3119
3120 return_null:
3121 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: "
3122 "mailbox not found"));
3123
3124 return (NULL);
3125 }
3126
3127 /*
3128 * nxge_tx_err_evnts
3129 *
3130 * Recover a TDC.
3131 *
3132 * Arguments:
3133 * nxgep
3134 * index The index to the TDC ring.
3135 * ldvp Used to get the channel number ONLY.
3136 * cs A copy of the bits from TX_CS.
3137 *
3138 * Notes:
3139 * Calling tree:
3140 * nxge_tx_intr()
3141 *
3142 * NPI/NXGE function calls:
3143 * npi_txdma_ring_error_get()
3144 * npi_txdma_inj_par_error_get()
3145 * nxge_txdma_fatal_err_recover()
3146 *
3147 * Registers accessed:
3148 * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High
3149 * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low
3150 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3151 *
3152 * Context:
3153 * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR.
3154 */
3155 /*ARGSUSED*/
3156 static nxge_status_t
nxge_tx_err_evnts(p_nxge_t nxgep,uint_t index,p_nxge_ldv_t ldvp,tx_cs_t cs)3157 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
3158 {
3159 npi_handle_t handle;
3160 npi_status_t rs;
3161 uint8_t channel;
3162 p_tx_ring_t *tx_rings;
3163 p_tx_ring_t tx_ring_p;
3164 p_nxge_tx_ring_stats_t tdc_stats;
3165 boolean_t txchan_fatal = B_FALSE;
3166 nxge_status_t status = NXGE_OK;
3167 tdmc_inj_par_err_t par_err;
3168 uint32_t value;
3169
3170 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts"));
3171 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3172 channel = ldvp->channel;
3173
3174 tx_rings = nxgep->tx_rings->rings;
3175 tx_ring_p = tx_rings[index];
3176 tdc_stats = tx_ring_p->tdc_stats;
3177 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
3178 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
3179 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
3180 if ((rs = npi_txdma_ring_error_get(handle, channel,
3181 &tdc_stats->errlog)) != NPI_SUCCESS)
3182 return (NXGE_ERROR | rs);
3183 }
3184
3185 if (cs.bits.ldw.mbox_err) {
3186 tdc_stats->mbox_err++;
3187 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3188 NXGE_FM_EREPORT_TDMC_MBOX_ERR);
3189 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3190 "==> nxge_tx_err_evnts(channel %d): "
3191 "fatal error: mailbox", channel));
3192 txchan_fatal = B_TRUE;
3193 }
3194 if (cs.bits.ldw.pkt_size_err) {
3195 tdc_stats->pkt_size_err++;
3196 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3197 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
3198 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3199 "==> nxge_tx_err_evnts(channel %d): "
3200 "fatal error: pkt_size_err", channel));
3201 txchan_fatal = B_TRUE;
3202 }
3203 if (cs.bits.ldw.tx_ring_oflow) {
3204 tdc_stats->tx_ring_oflow++;
3205 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3206 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
3207 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3208 "==> nxge_tx_err_evnts(channel %d): "
3209 "fatal error: tx_ring_oflow", channel));
3210 txchan_fatal = B_TRUE;
3211 }
3212 if (cs.bits.ldw.pref_buf_par_err) {
3213 tdc_stats->pre_buf_par_err++;
3214 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3215 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
3216 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3217 "==> nxge_tx_err_evnts(channel %d): "
3218 "fatal error: pre_buf_par_err", channel));
3219 /* Clear error injection source for parity error */
3220 (void) npi_txdma_inj_par_error_get(handle, &value);
3221 par_err.value = value;
3222 par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
3223 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3224 txchan_fatal = B_TRUE;
3225 }
3226 if (cs.bits.ldw.nack_pref) {
3227 tdc_stats->nack_pref++;
3228 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3229 NXGE_FM_EREPORT_TDMC_NACK_PREF);
3230 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3231 "==> nxge_tx_err_evnts(channel %d): "
3232 "fatal error: nack_pref", channel));
3233 txchan_fatal = B_TRUE;
3234 }
3235 if (cs.bits.ldw.nack_pkt_rd) {
3236 tdc_stats->nack_pkt_rd++;
3237 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3238 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
3239 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3240 "==> nxge_tx_err_evnts(channel %d): "
3241 "fatal error: nack_pkt_rd", channel));
3242 txchan_fatal = B_TRUE;
3243 }
3244 if (cs.bits.ldw.conf_part_err) {
3245 tdc_stats->conf_part_err++;
3246 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3247 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
3248 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3249 "==> nxge_tx_err_evnts(channel %d): "
3250 "fatal error: config_partition_err", channel));
3251 txchan_fatal = B_TRUE;
3252 }
3253 if (cs.bits.ldw.pkt_prt_err) {
3254 tdc_stats->pkt_part_err++;
3255 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
3256 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
3257 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3258 "==> nxge_tx_err_evnts(channel %d): "
3259 "fatal error: pkt_prt_err", channel));
3260 txchan_fatal = B_TRUE;
3261 }
3262
3263 /* Clear error injection source in case this is an injected error */
3264 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
3265
3266 if (txchan_fatal) {
3267 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3268 " nxge_tx_err_evnts: "
3269 " fatal error on channel %d cs 0x%llx\n",
3270 channel, cs.value));
3271 status = nxge_txdma_fatal_err_recover(nxgep, channel,
3272 tx_ring_p);
3273 if (status == NXGE_OK) {
3274 FM_SERVICE_RESTORED(nxgep);
3275 }
3276 }
3277
3278 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts"));
3279
3280 return (status);
3281 }
3282
3283 static nxge_status_t
nxge_txdma_fatal_err_recover(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_ring_p)3284 nxge_txdma_fatal_err_recover(
3285 p_nxge_t nxgep,
3286 uint16_t channel,
3287 p_tx_ring_t tx_ring_p)
3288 {
3289 npi_handle_t handle;
3290 npi_status_t rs = NPI_SUCCESS;
3291 p_tx_mbox_t tx_mbox_p;
3292 nxge_status_t status = NXGE_OK;
3293
3294 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
3295 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3296 "Recovering from TxDMAChannel#%d error...", channel));
3297
3298 /*
3299 * Stop the dma channel waits for the stop done.
3300 * If the stop done bit is not set, then create
3301 * an error.
3302 */
3303
3304 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3305 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
3306 MUTEX_ENTER(&tx_ring_p->lock);
3307 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
3308 if (rs != NPI_SUCCESS) {
3309 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3310 "==> nxge_txdma_fatal_err_recover (channel %d): "
3311 "stop failed ", channel));
3312 goto fail;
3313 }
3314
3315 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
3316 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
3317
3318 /*
3319 * Reset TXDMA channel
3320 */
3321 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
3322 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
3323 NPI_SUCCESS) {
3324 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3325 "==> nxge_txdma_fatal_err_recover (channel %d)"
3326 " reset channel failed 0x%x", channel, rs));
3327 goto fail;
3328 }
3329
3330 /*
3331 * Reset the tail (kick) register to 0.
3332 * (Hardware will not reset it. Tx overflow fatal
3333 * error if tail is not set to 0 after reset!
3334 */
3335 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
3336
3337 /* Restart TXDMA channel */
3338
3339 if (!isLDOMguest(nxgep)) {
3340 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
3341
3342 // XXX This is a problem in HIO!
3343 /*
3344 * Initialize the TXDMA channel specific FZC control
3345 * configurations. These FZC registers are pertaining
3346 * to each TX channel (i.e. logical pages).
3347 */
3348 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
3349 status = nxge_init_fzc_txdma_channel(nxgep, channel,
3350 tx_ring_p, tx_mbox_p);
3351 if (status != NXGE_OK)
3352 goto fail;
3353 }
3354
3355 /*
3356 * Initialize the event masks.
3357 */
3358 tx_ring_p->tx_evmask.value = 0;
3359 status = nxge_init_txdma_channel_event_mask(nxgep, channel,
3360 &tx_ring_p->tx_evmask);
3361 if (status != NXGE_OK)
3362 goto fail;
3363
3364 tx_ring_p->wr_index_wrap = B_FALSE;
3365 tx_ring_p->wr_index = 0;
3366 tx_ring_p->rd_index = 0;
3367
3368 /*
3369 * Load TXDMA descriptors, buffers, mailbox,
3370 * initialise the DMA channels and
3371 * enable each DMA channel.
3372 */
3373 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
3374 status = nxge_enable_txdma_channel(nxgep, channel,
3375 tx_ring_p, tx_mbox_p);
3376 MUTEX_EXIT(&tx_ring_p->lock);
3377 if (status != NXGE_OK)
3378 goto fail;
3379
3380 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3381 "Recovery Successful, TxDMAChannel#%d Restored",
3382 channel));
3383 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
3384
3385 return (NXGE_OK);
3386
3387 fail:
3388 MUTEX_EXIT(&tx_ring_p->lock);
3389
3390 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3391 "nxge_txdma_fatal_err_recover (channel %d): "
3392 "failed to recover this txdma channel", channel));
3393 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
3394
3395 return (status);
3396 }
3397
3398 /*
3399 * nxge_tx_port_fatal_err_recover
3400 *
3401 * Attempt to recover from a fatal port error.
3402 *
3403 * Arguments:
3404 * nxgep
3405 *
3406 * Notes:
3407 * How would a guest do this?
3408 *
3409 * NPI/NXGE function calls:
3410 *
3411 * Registers accessed:
3412 *
3413 * Context:
3414 * Service domain
3415 */
3416 nxge_status_t
nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)3417 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
3418 {
3419 nxge_grp_set_t *set = &nxgep->tx_set;
3420 nxge_channel_t tdc;
3421
3422 tx_ring_t *ring;
3423 tx_mbox_t *mailbox;
3424
3425 npi_handle_t handle;
3426 nxge_status_t status;
3427 npi_status_t rs;
3428
3429 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
3430 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3431 "Recovering from TxPort error..."));
3432
3433 if (isLDOMguest(nxgep)) {
3434 return (NXGE_OK);
3435 }
3436
3437 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
3438 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3439 "<== nxge_tx_port_fatal_err_recover: not initialized"));
3440 return (NXGE_ERROR);
3441 }
3442
3443 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
3444 NXGE_DEBUG_MSG((nxgep, TX_CTL,
3445 "<== nxge_tx_port_fatal_err_recover: "
3446 "NULL ring pointer(s)"));
3447 return (NXGE_ERROR);
3448 }
3449
3450 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3451 if ((1 << tdc) & set->owned.map) {
3452 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3453 if (ring)
3454 MUTEX_ENTER(&ring->lock);
3455 }
3456 }
3457
3458 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3459
3460 /*
3461 * Stop all the TDCs owned by us.
3462 * (The shared TDCs will have been stopped by their owners.)
3463 */
3464 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3465 if ((1 << tdc) & set->owned.map) {
3466 ring = nxgep->tx_rings->rings[tdc];
3467 if (ring) {
3468 rs = npi_txdma_channel_control
3469 (handle, TXDMA_STOP, tdc);
3470 if (rs != NPI_SUCCESS) {
3471 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3472 "nxge_tx_port_fatal_err_recover "
3473 "(channel %d): stop failed ", tdc));
3474 goto fail;
3475 }
3476 }
3477 }
3478 }
3479
3480 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs..."));
3481
3482 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3483 if ((1 << tdc) & set->owned.map) {
3484 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3485 if (ring) {
3486 (void) nxge_txdma_reclaim(nxgep, ring, 0);
3487 }
3488 }
3489 }
3490
3491 /*
3492 * Reset all the TDCs.
3493 */
3494 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs..."));
3495
3496 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3497 if ((1 << tdc) & set->owned.map) {
3498 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3499 if (ring) {
3500 if ((rs = npi_txdma_channel_control
3501 (handle, TXDMA_RESET, tdc))
3502 != NPI_SUCCESS) {
3503 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
3504 "nxge_tx_port_fatal_err_recover "
3505 "(channel %d) reset channel "
3506 "failed 0x%x", tdc, rs));
3507 goto fail;
3508 }
3509 }
3510 /*
3511 * Reset the tail (kick) register to 0.
3512 * (Hardware will not reset it. Tx overflow fatal
3513 * error if tail is not set to 0 after reset!
3514 */
3515 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0);
3516 }
3517 }
3518
3519 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs..."));
3520
3521 /* Restart all the TDCs */
3522 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3523 if ((1 << tdc) & set->owned.map) {
3524 ring = nxgep->tx_rings->rings[tdc];
3525 if (ring) {
3526 mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3527 status = nxge_init_fzc_txdma_channel(nxgep, tdc,
3528 ring, mailbox);
3529 ring->tx_evmask.value = 0;
3530 /*
3531 * Initialize the event masks.
3532 */
3533 status = nxge_init_txdma_channel_event_mask
3534 (nxgep, tdc, &ring->tx_evmask);
3535
3536 ring->wr_index_wrap = B_FALSE;
3537 ring->wr_index = 0;
3538 ring->rd_index = 0;
3539
3540 if (status != NXGE_OK)
3541 goto fail;
3542 if (status != NXGE_OK)
3543 goto fail;
3544 }
3545 }
3546 }
3547
3548 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs..."));
3549
3550 /* Re-enable all the TDCs */
3551 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3552 if ((1 << tdc) & set->owned.map) {
3553 ring = nxgep->tx_rings->rings[tdc];
3554 if (ring) {
3555 mailbox = nxge_txdma_get_mbox(nxgep, tdc);
3556 status = nxge_enable_txdma_channel(nxgep, tdc,
3557 ring, mailbox);
3558 if (status != NXGE_OK)
3559 goto fail;
3560 }
3561 }
3562 }
3563
3564 /*
3565 * Unlock all the TDCs.
3566 */
3567 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3568 if ((1 << tdc) & set->owned.map) {
3569 tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
3570 if (ring)
3571 MUTEX_EXIT(&ring->lock);
3572 }
3573 }
3574
3575 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded"));
3576 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3577
3578 return (NXGE_OK);
3579
3580 fail:
3581 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
3582 if ((1 << tdc) & set->owned.map) {
3583 ring = nxgep->tx_rings->rings[tdc];
3584 if (ring)
3585 MUTEX_EXIT(&ring->lock);
3586 }
3587 }
3588
3589 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed"));
3590 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
3591
3592 return (status);
3593 }
3594
3595 /*
3596 * nxge_txdma_inject_err
3597 *
3598 * Inject an error into a TDC.
3599 *
3600 * Arguments:
3601 * nxgep
3602 * err_id The error to inject.
3603 * chan The channel to inject into.
3604 *
3605 * Notes:
3606 * This is called from nxge_main.c:nxge_err_inject()
3607 * Has this ioctl ever been used?
3608 *
3609 * NPI/NXGE function calls:
3610 * npi_txdma_inj_par_error_get()
3611 * npi_txdma_inj_par_error_set()
3612 *
3613 * Registers accessed:
3614 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
3615 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
3616 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
3617 *
3618 * Context:
3619 * Service domain
3620 */
3621 void
nxge_txdma_inject_err(p_nxge_t nxgep,uint32_t err_id,uint8_t chan)3622 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
3623 {
3624 tdmc_intr_dbg_t tdi;
3625 tdmc_inj_par_err_t par_err;
3626 uint32_t value;
3627 npi_handle_t handle;
3628
3629 switch (err_id) {
3630
3631 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
3632 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3633 /* Clear error injection source for parity error */
3634 (void) npi_txdma_inj_par_error_get(handle, &value);
3635 par_err.value = value;
3636 par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
3637 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3638
3639 par_err.bits.ldw.inject_parity_error = (1 << chan);
3640 (void) npi_txdma_inj_par_error_get(handle, &value);
3641 par_err.value = value;
3642 par_err.bits.ldw.inject_parity_error |= (1 << chan);
3643 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
3644 (unsigned long long)par_err.value);
3645 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
3646 break;
3647
3648 case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
3649 case NXGE_FM_EREPORT_TDMC_NACK_PREF:
3650 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
3651 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
3652 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
3653 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
3654 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
3655 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3656 chan, &tdi.value);
3657 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
3658 tdi.bits.ldw.pref_buf_par_err = 1;
3659 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
3660 tdi.bits.ldw.mbox_err = 1;
3661 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
3662 tdi.bits.ldw.nack_pref = 1;
3663 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
3664 tdi.bits.ldw.nack_pkt_rd = 1;
3665 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
3666 tdi.bits.ldw.pkt_size_err = 1;
3667 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
3668 tdi.bits.ldw.tx_ring_oflow = 1;
3669 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
3670 tdi.bits.ldw.conf_part_err = 1;
3671 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
3672 tdi.bits.ldw.pkt_part_err = 1;
3673 #if defined(__i386)
3674 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n",
3675 tdi.value);
3676 #else
3677 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
3678 tdi.value);
3679 #endif
3680 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
3681 chan, tdi.value);
3682
3683 break;
3684 }
3685 }
3686