13859Sml29623 /* 23859Sml29623 * CDDL HEADER START 33859Sml29623 * 43859Sml29623 * The contents of this file are subject to the terms of the 53859Sml29623 * Common Development and Distribution License (the "License"). 63859Sml29623 * You may not use this file except in compliance with the License. 73859Sml29623 * 83859Sml29623 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 93859Sml29623 * or http://www.opensolaris.org/os/licensing. 103859Sml29623 * See the License for the specific language governing permissions 113859Sml29623 * and limitations under the License. 123859Sml29623 * 133859Sml29623 * When distributing Covered Code, include this CDDL HEADER in each 143859Sml29623 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 153859Sml29623 * If applicable, add the following below this CDDL HEADER, with the 163859Sml29623 * fields enclosed by brackets "[]" replaced with your own identifying 173859Sml29623 * information: Portions Copyright [yyyy] [name of copyright owner] 183859Sml29623 * 193859Sml29623 * CDDL HEADER END 203859Sml29623 */ 213859Sml29623 /* 226495Sspeer * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 233859Sml29623 * Use is subject to license terms. 243859Sml29623 */ 253859Sml29623 263859Sml29623 #pragma ident "%Z%%M% %I% %E% SMI" 273859Sml29623 283859Sml29623 #include <sys/nxge/nxge_impl.h> 293859Sml29623 #include <sys/nxge/nxge_txdma.h> 306495Sspeer #include <sys/nxge/nxge_hio.h> 316495Sspeer #include <npi_tx_rd64.h> 326495Sspeer #include <npi_tx_wr64.h> 333859Sml29623 #include <sys/llc1.h> 343859Sml29623 353859Sml29623 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 363859Sml29623 uint32_t nxge_tx_minfree = 32; 373859Sml29623 uint32_t nxge_tx_intr_thres = 0; 383859Sml29623 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 393859Sml29623 uint32_t nxge_tx_tiny_pack = 1; 403859Sml29623 uint32_t nxge_tx_use_bcopy = 1; 413859Sml29623 423859Sml29623 extern uint32_t nxge_tx_ring_size; 433859Sml29623 extern uint32_t nxge_bcopy_thresh; 443859Sml29623 extern uint32_t nxge_dvma_thresh; 453859Sml29623 extern uint32_t nxge_dma_stream_thresh; 463859Sml29623 extern dma_method_t nxge_force_dma; 47*6611Sml29623 extern uint32_t nxge_cksum_offload; 483859Sml29623 493859Sml29623 /* Device register access attributes for PIO. */ 503859Sml29623 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 513859Sml29623 /* Device descriptor access attributes for DMA. */ 523859Sml29623 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 533859Sml29623 /* Device buffer access attributes for DMA. */ 543859Sml29623 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 553859Sml29623 extern ddi_dma_attr_t nxge_desc_dma_attr; 563859Sml29623 extern ddi_dma_attr_t nxge_tx_dma_attr; 573859Sml29623 583952Sml29623 extern int nxge_serial_tx(mblk_t *mp, void *arg); 593952Sml29623 606495Sspeer static nxge_status_t nxge_map_txdma(p_nxge_t, int); 616495Sspeer 626495Sspeer static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int); 633859Sml29623 643859Sml29623 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 653859Sml29623 p_nxge_dma_common_t *, p_tx_ring_t *, 663859Sml29623 uint32_t, p_nxge_dma_common_t *, 673859Sml29623 p_tx_mbox_t *); 686495Sspeer static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t); 693859Sml29623 703859Sml29623 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 713859Sml29623 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 723859Sml29623 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 733859Sml29623 743859Sml29623 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 753859Sml29623 p_nxge_dma_common_t *, p_tx_ring_t, 763859Sml29623 p_tx_mbox_t *); 773859Sml29623 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 783859Sml29623 p_tx_ring_t, p_tx_mbox_t); 793859Sml29623 803859Sml29623 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 813859Sml29623 p_tx_ring_t, p_tx_mbox_t); 826495Sspeer static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t); 833859Sml29623 843859Sml29623 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 853859Sml29623 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 863859Sml29623 p_nxge_ldv_t, tx_cs_t); 873859Sml29623 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 883859Sml29623 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 893859Sml29623 uint16_t, p_tx_ring_t); 903859Sml29623 916495Sspeer static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, 926495Sspeer p_tx_ring_t ring_p, uint16_t channel); 936495Sspeer 943859Sml29623 nxge_status_t 953859Sml29623 nxge_init_txdma_channels(p_nxge_t nxgep) 963859Sml29623 { 976495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 986495Sspeer int i, count; 996495Sspeer 1006495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels")); 1016495Sspeer 1026495Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 1036495Sspeer if ((1 << i) & set->lg.map) { 1046495Sspeer int tdc; 1056495Sspeer nxge_grp_t *group = set->group[i]; 1066495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1076495Sspeer if ((1 << tdc) & group->map) { 1086495Sspeer if ((nxge_grp_dc_add(nxgep, 1096495Sspeer (vr_handle_t)group, 1106495Sspeer VP_BOUND_TX, tdc))) 1116495Sspeer return (NXGE_ERROR); 1126495Sspeer } 1136495Sspeer } 1146495Sspeer } 1156495Sspeer if (++count == set->lg.count) 1166495Sspeer break; 1176495Sspeer } 1186495Sspeer 1196495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels")); 1206495Sspeer 1216495Sspeer return (NXGE_OK); 1226495Sspeer } 1236495Sspeer 1246495Sspeer nxge_status_t 1256495Sspeer nxge_init_txdma_channel( 1266495Sspeer p_nxge_t nxge, 1276495Sspeer int channel) 1286495Sspeer { 1296495Sspeer nxge_status_t status; 1306495Sspeer 1316495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel")); 1326495Sspeer 1336495Sspeer status = nxge_map_txdma(nxge, channel); 1343859Sml29623 if (status != NXGE_OK) { 1356495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1366495Sspeer "<== nxge_init_txdma_channel: status 0x%x", status)); 1376495Sspeer (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 1383859Sml29623 return (status); 1393859Sml29623 } 1403859Sml29623 1416495Sspeer status = nxge_txdma_hw_start(nxge, channel); 1423859Sml29623 if (status != NXGE_OK) { 1436495Sspeer (void) nxge_unmap_txdma_channel(nxge, channel); 1446495Sspeer (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 1453859Sml29623 return (status); 1463859Sml29623 } 1473859Sml29623 1486495Sspeer if (!nxge->statsp->tdc_ksp[channel]) 1496495Sspeer nxge_setup_tdc_kstats(nxge, channel); 1506495Sspeer 1516495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel")); 1526495Sspeer 1536495Sspeer return (status); 1543859Sml29623 } 1553859Sml29623 1563859Sml29623 void 1573859Sml29623 nxge_uninit_txdma_channels(p_nxge_t nxgep) 1583859Sml29623 { 1596495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 1606495Sspeer int tdc; 1616495Sspeer 1626495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels")); 1636495Sspeer 1646495Sspeer if (set->owned.map == 0) { 1656495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1666495Sspeer "nxge_uninit_txdma_channels: no channels")); 1676495Sspeer return; 1686495Sspeer } 1696495Sspeer 1706495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1716495Sspeer if ((1 << tdc) & set->owned.map) { 1726495Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc); 1736495Sspeer } 1746495Sspeer } 1756495Sspeer 1766495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels")); 1776495Sspeer } 1786495Sspeer 1796495Sspeer void 1806495Sspeer nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel) 1816495Sspeer { 1826495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel")); 1836495Sspeer 1846495Sspeer if (nxgep->statsp->tdc_ksp[channel]) { 1856495Sspeer kstat_delete(nxgep->statsp->tdc_ksp[channel]); 1866495Sspeer nxgep->statsp->tdc_ksp[channel] = 0; 1876495Sspeer } 1886495Sspeer 1896495Sspeer (void) nxge_txdma_stop_channel(nxgep, channel); 1906495Sspeer nxge_unmap_txdma_channel(nxgep, channel); 1913859Sml29623 1923859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1936495Sspeer "<== nxge_uninit_txdma_channel")); 1943859Sml29623 } 1953859Sml29623 1963859Sml29623 void 1973859Sml29623 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 1983859Sml29623 uint32_t entries, uint32_t size) 1993859Sml29623 { 2003859Sml29623 size_t tsize; 2013859Sml29623 *dest_p = *src_p; 2023859Sml29623 tsize = size * entries; 2033859Sml29623 dest_p->alength = tsize; 2043859Sml29623 dest_p->nblocks = entries; 2053859Sml29623 dest_p->block_size = size; 2063859Sml29623 dest_p->offset += tsize; 2073859Sml29623 2083859Sml29623 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 2093859Sml29623 src_p->alength -= tsize; 2103859Sml29623 src_p->dma_cookie.dmac_laddress += tsize; 2113859Sml29623 src_p->dma_cookie.dmac_size -= tsize; 2123859Sml29623 } 2133859Sml29623 2146495Sspeer /* 2156495Sspeer * nxge_reset_txdma_channel 2166495Sspeer * 2176495Sspeer * Reset a TDC. 2186495Sspeer * 2196495Sspeer * Arguments: 2206495Sspeer * nxgep 2216495Sspeer * channel The channel to reset. 2226495Sspeer * reg_data The current TX_CS. 2236495Sspeer * 2246495Sspeer * Notes: 2256495Sspeer * 2266495Sspeer * NPI/NXGE function calls: 2276495Sspeer * npi_txdma_channel_reset() 2286495Sspeer * npi_txdma_channel_control() 2296495Sspeer * 2306495Sspeer * Registers accessed: 2316495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 2326495Sspeer * TX_RING_KICK DMC+0x40018 Transmit Ring Kick 2336495Sspeer * 2346495Sspeer * Context: 2356495Sspeer * Any domain 2366495Sspeer */ 2373859Sml29623 nxge_status_t 2383859Sml29623 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 2393859Sml29623 { 2403859Sml29623 npi_status_t rs = NPI_SUCCESS; 2413859Sml29623 nxge_status_t status = NXGE_OK; 2423859Sml29623 npi_handle_t handle; 2433859Sml29623 2443859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 2453859Sml29623 2463859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2473859Sml29623 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 2483859Sml29623 rs = npi_txdma_channel_reset(handle, channel); 2493859Sml29623 } else { 2503859Sml29623 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 2513859Sml29623 channel); 2523859Sml29623 } 2533859Sml29623 2543859Sml29623 if (rs != NPI_SUCCESS) { 2553859Sml29623 status = NXGE_ERROR | rs; 2563859Sml29623 } 2573859Sml29623 2583859Sml29623 /* 2593859Sml29623 * Reset the tail (kick) register to 0. 2603859Sml29623 * (Hardware will not reset it. Tx overflow fatal 2613859Sml29623 * error if tail is not set to 0 after reset! 2623859Sml29623 */ 2633859Sml29623 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 2643859Sml29623 2653859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 2663859Sml29623 return (status); 2673859Sml29623 } 2683859Sml29623 2696495Sspeer /* 2706495Sspeer * nxge_init_txdma_channel_event_mask 2716495Sspeer * 2726495Sspeer * Enable interrupts for a set of events. 2736495Sspeer * 2746495Sspeer * Arguments: 2756495Sspeer * nxgep 2766495Sspeer * channel The channel to map. 2776495Sspeer * mask_p The events to enable. 2786495Sspeer * 2796495Sspeer * Notes: 2806495Sspeer * 2816495Sspeer * NPI/NXGE function calls: 2826495Sspeer * npi_txdma_event_mask() 2836495Sspeer * 2846495Sspeer * Registers accessed: 2856495Sspeer * TX_ENT_MSK DMC+0x40020 Transmit Event Mask 2866495Sspeer * 2876495Sspeer * Context: 2886495Sspeer * Any domain 2896495Sspeer */ 2903859Sml29623 nxge_status_t 2913859Sml29623 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 2923859Sml29623 p_tx_dma_ent_msk_t mask_p) 2933859Sml29623 { 2943859Sml29623 npi_handle_t handle; 2953859Sml29623 npi_status_t rs = NPI_SUCCESS; 2963859Sml29623 nxge_status_t status = NXGE_OK; 2973859Sml29623 2983859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2993859Sml29623 "<== nxge_init_txdma_channel_event_mask")); 3003859Sml29623 3013859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3023859Sml29623 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 3033859Sml29623 if (rs != NPI_SUCCESS) { 3043859Sml29623 status = NXGE_ERROR | rs; 3053859Sml29623 } 3063859Sml29623 3073859Sml29623 return (status); 3083859Sml29623 } 3093859Sml29623 3106495Sspeer /* 3116495Sspeer * nxge_init_txdma_channel_cntl_stat 3126495Sspeer * 3136495Sspeer * Stop a TDC. If at first we don't succeed, inject an error. 3146495Sspeer * 3156495Sspeer * Arguments: 3166495Sspeer * nxgep 3176495Sspeer * channel The channel to stop. 3186495Sspeer * 3196495Sspeer * Notes: 3206495Sspeer * 3216495Sspeer * NPI/NXGE function calls: 3226495Sspeer * npi_txdma_control_status() 3236495Sspeer * 3246495Sspeer * Registers accessed: 3256495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 3266495Sspeer * 3276495Sspeer * Context: 3286495Sspeer * Any domain 3296495Sspeer */ 3303859Sml29623 nxge_status_t 3313859Sml29623 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 3323859Sml29623 uint64_t reg_data) 3333859Sml29623 { 3343859Sml29623 npi_handle_t handle; 3353859Sml29623 npi_status_t rs = NPI_SUCCESS; 3363859Sml29623 nxge_status_t status = NXGE_OK; 3373859Sml29623 3383859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 3393859Sml29623 "<== nxge_init_txdma_channel_cntl_stat")); 3403859Sml29623 3413859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3423859Sml29623 rs = npi_txdma_control_status(handle, OP_SET, channel, 3433859Sml29623 (p_tx_cs_t)®_data); 3443859Sml29623 3453859Sml29623 if (rs != NPI_SUCCESS) { 3463859Sml29623 status = NXGE_ERROR | rs; 3473859Sml29623 } 3483859Sml29623 3493859Sml29623 return (status); 3503859Sml29623 } 3513859Sml29623 3526495Sspeer /* 3536495Sspeer * nxge_enable_txdma_channel 3546495Sspeer * 3556495Sspeer * Enable a TDC. 3566495Sspeer * 3576495Sspeer * Arguments: 3586495Sspeer * nxgep 3596495Sspeer * channel The channel to enable. 3606495Sspeer * tx_desc_p channel's transmit descriptor ring. 3616495Sspeer * mbox_p channel's mailbox, 3626495Sspeer * 3636495Sspeer * Notes: 3646495Sspeer * 3656495Sspeer * NPI/NXGE function calls: 3666495Sspeer * npi_txdma_ring_config() 3676495Sspeer * npi_txdma_mbox_config() 3686495Sspeer * npi_txdma_channel_init_enable() 3696495Sspeer * 3706495Sspeer * Registers accessed: 3716495Sspeer * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration 3726495Sspeer * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High 3736495Sspeer * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low 3746495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 3756495Sspeer * 3766495Sspeer * Context: 3776495Sspeer * Any domain 3786495Sspeer */ 3793859Sml29623 nxge_status_t 3803859Sml29623 nxge_enable_txdma_channel(p_nxge_t nxgep, 3813859Sml29623 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 3823859Sml29623 { 3833859Sml29623 npi_handle_t handle; 3843859Sml29623 npi_status_t rs = NPI_SUCCESS; 3853859Sml29623 nxge_status_t status = NXGE_OK; 3863859Sml29623 3873859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 3883859Sml29623 3893859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3903859Sml29623 /* 3913859Sml29623 * Use configuration data composed at init time. 3923859Sml29623 * Write to hardware the transmit ring configurations. 3933859Sml29623 */ 3943859Sml29623 rs = npi_txdma_ring_config(handle, OP_SET, channel, 3956495Sspeer (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 3963859Sml29623 3973859Sml29623 if (rs != NPI_SUCCESS) { 3983859Sml29623 return (NXGE_ERROR | rs); 3993859Sml29623 } 4003859Sml29623 4016495Sspeer if (isLDOMguest(nxgep)) { 4026495Sspeer /* Add interrupt handler for this channel. */ 4036495Sspeer if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK) 4046495Sspeer return (NXGE_ERROR); 4056495Sspeer } 4066495Sspeer 4073859Sml29623 /* Write to hardware the mailbox */ 4083859Sml29623 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 4093859Sml29623 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 4103859Sml29623 4113859Sml29623 if (rs != NPI_SUCCESS) { 4123859Sml29623 return (NXGE_ERROR | rs); 4133859Sml29623 } 4143859Sml29623 4153859Sml29623 /* Start the DMA engine. */ 4163859Sml29623 rs = npi_txdma_channel_init_enable(handle, channel); 4173859Sml29623 4183859Sml29623 if (rs != NPI_SUCCESS) { 4193859Sml29623 return (NXGE_ERROR | rs); 4203859Sml29623 } 4213859Sml29623 4223859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 4233859Sml29623 4243859Sml29623 return (status); 4253859Sml29623 } 4263859Sml29623 4273859Sml29623 void 4283859Sml29623 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 4293859Sml29623 boolean_t l4_cksum, int pkt_len, uint8_t npads, 430*6611Sml29623 p_tx_pkt_hdr_all_t pkthdrp, 431*6611Sml29623 t_uscalar_t start_offset, 432*6611Sml29623 t_uscalar_t stuff_offset) 4333859Sml29623 { 4343859Sml29623 p_tx_pkt_header_t hdrp; 4353859Sml29623 p_mblk_t nmp; 4363859Sml29623 uint64_t tmp; 4373859Sml29623 size_t mblk_len; 4383859Sml29623 size_t iph_len; 4393859Sml29623 size_t hdrs_size; 4403859Sml29623 uint8_t hdrs_buf[sizeof (struct ether_header) + 4413859Sml29623 64 + sizeof (uint32_t)]; 4425505Smisaki uint8_t *cursor; 4433859Sml29623 uint8_t *ip_buf; 4443859Sml29623 uint16_t eth_type; 4453859Sml29623 uint8_t ipproto; 4463859Sml29623 boolean_t is_vlan = B_FALSE; 4473859Sml29623 size_t eth_hdr_size; 4483859Sml29623 4493859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 4503859Sml29623 4513859Sml29623 /* 4523859Sml29623 * Caller should zero out the headers first. 4533859Sml29623 */ 4543859Sml29623 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 4553859Sml29623 4563859Sml29623 if (fill_len) { 4573859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 4583859Sml29623 "==> nxge_fill_tx_hdr: pkt_len %d " 4593859Sml29623 "npads %d", pkt_len, npads)); 4603859Sml29623 tmp = (uint64_t)pkt_len; 4613859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 4623859Sml29623 goto fill_tx_header_done; 4633859Sml29623 } 4643859Sml29623 465*6611Sml29623 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT); 4663859Sml29623 4673859Sml29623 /* 4683859Sml29623 * mp is the original data packet (does not include the 4693859Sml29623 * Neptune transmit header). 4703859Sml29623 */ 4713859Sml29623 nmp = mp; 4723859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 4733859Sml29623 "mp $%p b_rptr $%p len %d", 4745505Smisaki mp, nmp->b_rptr, MBLKL(nmp))); 4755505Smisaki /* copy ether_header from mblk to hdrs_buf */ 4765505Smisaki cursor = &hdrs_buf[0]; 4775505Smisaki tmp = sizeof (struct ether_vlan_header); 4785505Smisaki while ((nmp != NULL) && (tmp > 0)) { 4795505Smisaki size_t buflen; 4805505Smisaki mblk_len = MBLKL(nmp); 4815512Smisaki buflen = min((size_t)tmp, mblk_len); 4825505Smisaki bcopy(nmp->b_rptr, cursor, buflen); 4835505Smisaki cursor += buflen; 4845505Smisaki tmp -= buflen; 4855505Smisaki nmp = nmp->b_cont; 4865505Smisaki } 4875505Smisaki 4885505Smisaki nmp = mp; 4895505Smisaki mblk_len = MBLKL(nmp); 4903859Sml29623 ip_buf = NULL; 4913859Sml29623 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 4923859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 4933859Sml29623 "ether type 0x%x", eth_type, hdrp->value)); 4943859Sml29623 4953859Sml29623 if (eth_type < ETHERMTU) { 4963859Sml29623 tmp = 1ull; 4973859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 4983859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 4993859Sml29623 "value 0x%llx", hdrp->value)); 5003859Sml29623 if (*(hdrs_buf + sizeof (struct ether_header)) 5013859Sml29623 == LLC_SNAP_SAP) { 5023859Sml29623 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 5033859Sml29623 sizeof (struct ether_header) + 6))); 5043859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 5053859Sml29623 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 5063859Sml29623 eth_type)); 5073859Sml29623 } else { 5083859Sml29623 goto fill_tx_header_done; 5093859Sml29623 } 5103859Sml29623 } else if (eth_type == VLAN_ETHERTYPE) { 5113859Sml29623 tmp = 1ull; 5123859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 5133859Sml29623 5143859Sml29623 eth_type = ntohs(((struct ether_vlan_header *) 5153859Sml29623 hdrs_buf)->ether_type); 5163859Sml29623 is_vlan = B_TRUE; 5173859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 5183859Sml29623 "value 0x%llx", hdrp->value)); 5193859Sml29623 } 5203859Sml29623 5213859Sml29623 if (!is_vlan) { 5223859Sml29623 eth_hdr_size = sizeof (struct ether_header); 5233859Sml29623 } else { 5243859Sml29623 eth_hdr_size = sizeof (struct ether_vlan_header); 5253859Sml29623 } 5263859Sml29623 5273859Sml29623 switch (eth_type) { 5283859Sml29623 case ETHERTYPE_IP: 5293859Sml29623 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 5303859Sml29623 ip_buf = nmp->b_rptr + eth_hdr_size; 5313859Sml29623 mblk_len -= eth_hdr_size; 5323859Sml29623 iph_len = ((*ip_buf) & 0x0f); 5333859Sml29623 if (mblk_len > (iph_len + sizeof (uint32_t))) { 5343859Sml29623 ip_buf = nmp->b_rptr; 5353859Sml29623 ip_buf += eth_hdr_size; 5363859Sml29623 } else { 5373859Sml29623 ip_buf = NULL; 5383859Sml29623 } 5393859Sml29623 5403859Sml29623 } 5413859Sml29623 if (ip_buf == NULL) { 5423859Sml29623 hdrs_size = 0; 5433859Sml29623 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 5443859Sml29623 while ((nmp) && (hdrs_size < 5453859Sml29623 sizeof (hdrs_buf))) { 5463859Sml29623 mblk_len = (size_t)nmp->b_wptr - 5473859Sml29623 (size_t)nmp->b_rptr; 5483859Sml29623 if (mblk_len >= 5493859Sml29623 (sizeof (hdrs_buf) - hdrs_size)) 5503859Sml29623 mblk_len = sizeof (hdrs_buf) - 5513859Sml29623 hdrs_size; 5523859Sml29623 bcopy(nmp->b_rptr, 5533859Sml29623 &hdrs_buf[hdrs_size], mblk_len); 5543859Sml29623 hdrs_size += mblk_len; 5553859Sml29623 nmp = nmp->b_cont; 5563859Sml29623 } 5573859Sml29623 ip_buf = hdrs_buf; 5583859Sml29623 ip_buf += eth_hdr_size; 5593859Sml29623 iph_len = ((*ip_buf) & 0x0f); 5603859Sml29623 } 5613859Sml29623 5623859Sml29623 ipproto = ip_buf[9]; 5633859Sml29623 5643859Sml29623 tmp = (uint64_t)iph_len; 5653859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 5663859Sml29623 tmp = (uint64_t)(eth_hdr_size >> 1); 5673859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 5683859Sml29623 5693859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 5703859Sml29623 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 5713859Sml29623 "tmp 0x%x", 5723859Sml29623 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 5733859Sml29623 ipproto, tmp)); 5743859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 5753859Sml29623 "value 0x%llx", hdrp->value)); 5763859Sml29623 5773859Sml29623 break; 5783859Sml29623 5793859Sml29623 case ETHERTYPE_IPV6: 5803859Sml29623 hdrs_size = 0; 5813859Sml29623 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 5823859Sml29623 while ((nmp) && (hdrs_size < 5833859Sml29623 sizeof (hdrs_buf))) { 5843859Sml29623 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 5853859Sml29623 if (mblk_len >= 5863859Sml29623 (sizeof (hdrs_buf) - hdrs_size)) 5873859Sml29623 mblk_len = sizeof (hdrs_buf) - 5883859Sml29623 hdrs_size; 5893859Sml29623 bcopy(nmp->b_rptr, 5903859Sml29623 &hdrs_buf[hdrs_size], mblk_len); 5913859Sml29623 hdrs_size += mblk_len; 5923859Sml29623 nmp = nmp->b_cont; 5933859Sml29623 } 5943859Sml29623 ip_buf = hdrs_buf; 5953859Sml29623 ip_buf += eth_hdr_size; 5963859Sml29623 5973859Sml29623 tmp = 1ull; 5983859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 5993859Sml29623 6003859Sml29623 tmp = (eth_hdr_size >> 1); 6013859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 6023859Sml29623 6033859Sml29623 /* byte 6 is the next header protocol */ 6043859Sml29623 ipproto = ip_buf[6]; 6053859Sml29623 6063859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 6073859Sml29623 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 6083859Sml29623 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 6093859Sml29623 ipproto)); 6103859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 6113859Sml29623 "value 0x%llx", hdrp->value)); 6123859Sml29623 6133859Sml29623 break; 6143859Sml29623 6153859Sml29623 default: 6163859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 6173859Sml29623 goto fill_tx_header_done; 6183859Sml29623 } 6193859Sml29623 6203859Sml29623 switch (ipproto) { 6213859Sml29623 case IPPROTO_TCP: 6223859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 623*6611Sml29623 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 6243859Sml29623 if (l4_cksum) { 625*6611Sml29623 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP; 626*6611Sml29623 hdrp->value |= 627*6611Sml29623 (((uint64_t)(start_offset >> 1)) << 628*6611Sml29623 TX_PKT_HEADER_L4START_SHIFT); 629*6611Sml29623 hdrp->value |= 630*6611Sml29623 (((uint64_t)(stuff_offset >> 1)) << 631*6611Sml29623 TX_PKT_HEADER_L4STUFF_SHIFT); 632*6611Sml29623 6333859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 634*6611Sml29623 "==> nxge_tx_pkt_hdr_init: TCP CKSUM " 635*6611Sml29623 "value 0x%llx", hdrp->value)); 6363859Sml29623 } 6373859Sml29623 6383859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 639*6611Sml29623 "value 0x%llx", hdrp->value)); 6403859Sml29623 break; 6413859Sml29623 6423859Sml29623 case IPPROTO_UDP: 6433859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 6443859Sml29623 if (l4_cksum) { 645*6611Sml29623 if (!nxge_cksum_offload) { 646*6611Sml29623 uint16_t *up; 647*6611Sml29623 uint16_t cksum; 648*6611Sml29623 t_uscalar_t stuff_len; 649*6611Sml29623 650*6611Sml29623 /* 651*6611Sml29623 * The checksum field has the 652*6611Sml29623 * partial checksum. 653*6611Sml29623 * IP_CSUM() macro calls ip_cksum() which 654*6611Sml29623 * can add in the partial checksum. 655*6611Sml29623 */ 656*6611Sml29623 cksum = IP_CSUM(mp, start_offset, 0); 657*6611Sml29623 stuff_len = stuff_offset; 658*6611Sml29623 nmp = mp; 659*6611Sml29623 mblk_len = MBLKL(nmp); 660*6611Sml29623 while ((nmp != NULL) && 661*6611Sml29623 (mblk_len < stuff_len)) { 662*6611Sml29623 stuff_len -= mblk_len; 663*6611Sml29623 nmp = nmp->b_cont; 664*6611Sml29623 } 665*6611Sml29623 ASSERT(nmp); 666*6611Sml29623 up = (uint16_t *)(nmp->b_rptr + stuff_len); 667*6611Sml29623 668*6611Sml29623 *up = cksum; 669*6611Sml29623 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP; 670*6611Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 671*6611Sml29623 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 672*6611Sml29623 "use sw cksum " 673*6611Sml29623 "write to $%p cksum 0x%x content up 0x%x", 674*6611Sml29623 stuff_len, 675*6611Sml29623 up, 676*6611Sml29623 cksum, 677*6611Sml29623 *up)); 678*6611Sml29623 } else { 679*6611Sml29623 /* Hardware will compute the full checksum */ 680*6611Sml29623 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP; 681*6611Sml29623 hdrp->value |= 682*6611Sml29623 (((uint64_t)(start_offset >> 1)) << 683*6611Sml29623 TX_PKT_HEADER_L4START_SHIFT); 684*6611Sml29623 hdrp->value |= 685*6611Sml29623 (((uint64_t)(stuff_offset >> 1)) << 686*6611Sml29623 TX_PKT_HEADER_L4STUFF_SHIFT); 687*6611Sml29623 688*6611Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 689*6611Sml29623 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 690*6611Sml29623 " use partial checksum " 691*6611Sml29623 "cksum 0x%x ", 692*6611Sml29623 "value 0x%llx", 693*6611Sml29623 stuff_offset, 694*6611Sml29623 IP_CSUM(mp, start_offset, 0), 695*6611Sml29623 hdrp->value)); 696*6611Sml29623 } 6973859Sml29623 } 698*6611Sml29623 6993859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7003859Sml29623 "==> nxge_tx_pkt_hdr_init: UDP" 7013859Sml29623 "value 0x%llx", hdrp->value)); 7023859Sml29623 break; 7033859Sml29623 7043859Sml29623 default: 7053859Sml29623 goto fill_tx_header_done; 7063859Sml29623 } 7073859Sml29623 7083859Sml29623 fill_tx_header_done: 7093859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7103859Sml29623 "==> nxge_fill_tx_hdr: pkt_len %d " 7113859Sml29623 "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 7123859Sml29623 7133859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 7143859Sml29623 } 7153859Sml29623 7163859Sml29623 /*ARGSUSED*/ 7173859Sml29623 p_mblk_t 7183859Sml29623 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 7193859Sml29623 { 7203859Sml29623 p_mblk_t newmp = NULL; 7213859Sml29623 7223859Sml29623 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 7233859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7243859Sml29623 "<== nxge_tx_pkt_header_reserve: allocb failed")); 7253859Sml29623 return (NULL); 7263859Sml29623 } 7273859Sml29623 7283859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7293859Sml29623 "==> nxge_tx_pkt_header_reserve: get new mp")); 7303859Sml29623 DB_TYPE(newmp) = M_DATA; 7313859Sml29623 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 7323859Sml29623 linkb(newmp, mp); 7333859Sml29623 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 7343859Sml29623 7353859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 7363859Sml29623 "b_rptr $%p b_wptr $%p", 7373859Sml29623 newmp->b_rptr, newmp->b_wptr)); 7383859Sml29623 7393859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7403859Sml29623 "<== nxge_tx_pkt_header_reserve: use new mp")); 7413859Sml29623 7423859Sml29623 return (newmp); 7433859Sml29623 } 7443859Sml29623 7453859Sml29623 int 7463859Sml29623 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 7473859Sml29623 { 7483859Sml29623 uint_t nmblks; 7493859Sml29623 ssize_t len; 7503859Sml29623 uint_t pkt_len; 7513859Sml29623 p_mblk_t nmp, bmp, tmp; 7523859Sml29623 uint8_t *b_wptr; 7533859Sml29623 7543859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7553859Sml29623 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 7563859Sml29623 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 7573859Sml29623 7583859Sml29623 nmp = mp; 7593859Sml29623 bmp = mp; 7603859Sml29623 nmblks = 0; 7613859Sml29623 pkt_len = 0; 7623859Sml29623 *tot_xfer_len_p = 0; 7633859Sml29623 7643859Sml29623 while (nmp) { 7653859Sml29623 len = MBLKL(nmp); 7663859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 7673859Sml29623 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 7683859Sml29623 len, pkt_len, nmblks, 7693859Sml29623 *tot_xfer_len_p)); 7703859Sml29623 7713859Sml29623 if (len <= 0) { 7723859Sml29623 bmp = nmp; 7733859Sml29623 nmp = nmp->b_cont; 7743859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7753859Sml29623 "==> nxge_tx_pkt_nmblocks: " 7763859Sml29623 "len (0) pkt_len %d nmblks %d", 7773859Sml29623 pkt_len, nmblks)); 7783859Sml29623 continue; 7793859Sml29623 } 7803859Sml29623 7813859Sml29623 *tot_xfer_len_p += len; 7823859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 7833859Sml29623 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 7843859Sml29623 len, pkt_len, nmblks, 7853859Sml29623 *tot_xfer_len_p)); 7863859Sml29623 7873859Sml29623 if (len < nxge_bcopy_thresh) { 7883859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7893859Sml29623 "==> nxge_tx_pkt_nmblocks: " 7903859Sml29623 "len %d (< thresh) pkt_len %d nmblks %d", 7913859Sml29623 len, pkt_len, nmblks)); 7923859Sml29623 if (pkt_len == 0) 7933859Sml29623 nmblks++; 7943859Sml29623 pkt_len += len; 7953859Sml29623 if (pkt_len >= nxge_bcopy_thresh) { 7963859Sml29623 pkt_len = 0; 7973859Sml29623 len = 0; 7983859Sml29623 nmp = bmp; 7993859Sml29623 } 8003859Sml29623 } else { 8013859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8023859Sml29623 "==> nxge_tx_pkt_nmblocks: " 8033859Sml29623 "len %d (> thresh) pkt_len %d nmblks %d", 8043859Sml29623 len, pkt_len, nmblks)); 8053859Sml29623 pkt_len = 0; 8063859Sml29623 nmblks++; 8073859Sml29623 /* 8083859Sml29623 * Hardware limits the transfer length to 4K. 8093859Sml29623 * If len is more than 4K, we need to break 8103859Sml29623 * it up to at most 2 more blocks. 8113859Sml29623 */ 8123859Sml29623 if (len > TX_MAX_TRANSFER_LENGTH) { 8133859Sml29623 uint32_t nsegs; 8143859Sml29623 8156495Sspeer nsegs = 1; 8163859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8173859Sml29623 "==> nxge_tx_pkt_nmblocks: " 8183859Sml29623 "len %d pkt_len %d nmblks %d nsegs %d", 8193859Sml29623 len, pkt_len, nmblks, nsegs)); 8203859Sml29623 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 8213859Sml29623 ++nsegs; 8223859Sml29623 } 8233859Sml29623 do { 8243859Sml29623 b_wptr = nmp->b_rptr + 8253859Sml29623 TX_MAX_TRANSFER_LENGTH; 8263859Sml29623 nmp->b_wptr = b_wptr; 8273859Sml29623 if ((tmp = dupb(nmp)) == NULL) { 8283859Sml29623 return (0); 8293859Sml29623 } 8303859Sml29623 tmp->b_rptr = b_wptr; 8313859Sml29623 tmp->b_wptr = nmp->b_wptr; 8323859Sml29623 tmp->b_cont = nmp->b_cont; 8333859Sml29623 nmp->b_cont = tmp; 8343859Sml29623 nmblks++; 8353859Sml29623 if (--nsegs) { 8363859Sml29623 nmp = tmp; 8373859Sml29623 } 8383859Sml29623 } while (nsegs); 8393859Sml29623 nmp = tmp; 8403859Sml29623 } 8413859Sml29623 } 8423859Sml29623 8433859Sml29623 /* 8443859Sml29623 * Hardware limits the transmit gather pointers to 15. 8453859Sml29623 */ 8463859Sml29623 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 8473859Sml29623 TX_MAX_GATHER_POINTERS) { 8483859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8493859Sml29623 "==> nxge_tx_pkt_nmblocks: pull msg - " 8503859Sml29623 "len %d pkt_len %d nmblks %d", 8513859Sml29623 len, pkt_len, nmblks)); 8523859Sml29623 /* Pull all message blocks from b_cont */ 8533859Sml29623 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 8543859Sml29623 return (0); 8553859Sml29623 } 8563859Sml29623 freemsg(nmp->b_cont); 8573859Sml29623 nmp->b_cont = tmp; 8583859Sml29623 pkt_len = 0; 8593859Sml29623 } 8603859Sml29623 bmp = nmp; 8613859Sml29623 nmp = nmp->b_cont; 8623859Sml29623 } 8633859Sml29623 8643859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8653859Sml29623 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 8663859Sml29623 "nmblks %d len %d tot_xfer_len %d", 8673859Sml29623 mp->b_rptr, mp->b_wptr, nmblks, 8683859Sml29623 MBLKL(mp), *tot_xfer_len_p)); 8693859Sml29623 8703859Sml29623 return (nmblks); 8713859Sml29623 } 8723859Sml29623 8733859Sml29623 boolean_t 8743859Sml29623 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 8753859Sml29623 { 8763859Sml29623 boolean_t status = B_TRUE; 8773859Sml29623 p_nxge_dma_common_t tx_desc_dma_p; 8783859Sml29623 nxge_dma_common_t desc_area; 8793859Sml29623 p_tx_desc_t tx_desc_ring_vp; 8803859Sml29623 p_tx_desc_t tx_desc_p; 8813859Sml29623 p_tx_desc_t tx_desc_pp; 8823859Sml29623 tx_desc_t r_tx_desc; 8833859Sml29623 p_tx_msg_t tx_msg_ring; 8843859Sml29623 p_tx_msg_t tx_msg_p; 8853859Sml29623 npi_handle_t handle; 8863859Sml29623 tx_ring_hdl_t tx_head; 8873859Sml29623 uint32_t pkt_len; 8883859Sml29623 uint_t tx_rd_index; 8893859Sml29623 uint16_t head_index, tail_index; 8903859Sml29623 uint8_t tdc; 8913859Sml29623 boolean_t head_wrap, tail_wrap; 8923859Sml29623 p_nxge_tx_ring_stats_t tdc_stats; 8933859Sml29623 int rc; 8943859Sml29623 8953859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 8963859Sml29623 8973859Sml29623 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 8983859Sml29623 (nmblks != 0)); 8993859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9003859Sml29623 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 9013859Sml29623 tx_ring_p->descs_pending, nxge_reclaim_pending, 9023859Sml29623 nmblks)); 9033859Sml29623 if (!status) { 9043859Sml29623 tx_desc_dma_p = &tx_ring_p->tdc_desc; 9053859Sml29623 desc_area = tx_ring_p->tdc_desc; 9063859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 9073859Sml29623 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 9083859Sml29623 tx_desc_ring_vp = 9093859Sml29623 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 9103859Sml29623 tx_rd_index = tx_ring_p->rd_index; 9113859Sml29623 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 9123859Sml29623 tx_msg_ring = tx_ring_p->tx_msg_ring; 9133859Sml29623 tx_msg_p = &tx_msg_ring[tx_rd_index]; 9143859Sml29623 tdc = tx_ring_p->tdc; 9153859Sml29623 tdc_stats = tx_ring_p->tdc_stats; 9163859Sml29623 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 9173859Sml29623 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 9183859Sml29623 } 9193859Sml29623 9203859Sml29623 tail_index = tx_ring_p->wr_index; 9213859Sml29623 tail_wrap = tx_ring_p->wr_index_wrap; 9223859Sml29623 9233859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9243859Sml29623 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 9253859Sml29623 "tail_index %d tail_wrap %d " 9263859Sml29623 "tx_desc_p $%p ($%p) ", 9273859Sml29623 tdc, tx_rd_index, tail_index, tail_wrap, 9283859Sml29623 tx_desc_p, (*(uint64_t *)tx_desc_p))); 9293859Sml29623 /* 9303859Sml29623 * Read the hardware maintained transmit head 9313859Sml29623 * and wrap around bit. 9323859Sml29623 */ 9333859Sml29623 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 9343859Sml29623 head_index = tx_head.bits.ldw.head; 9353859Sml29623 head_wrap = tx_head.bits.ldw.wrap; 9363859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9373859Sml29623 "==> nxge_txdma_reclaim: " 9383859Sml29623 "tx_rd_index %d tail %d tail_wrap %d " 9393859Sml29623 "head %d wrap %d", 9403859Sml29623 tx_rd_index, tail_index, tail_wrap, 9413859Sml29623 head_index, head_wrap)); 9423859Sml29623 9433859Sml29623 if (head_index == tail_index) { 9443859Sml29623 if (TXDMA_RING_EMPTY(head_index, head_wrap, 9453859Sml29623 tail_index, tail_wrap) && 9463859Sml29623 (head_index == tx_rd_index)) { 9473859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9483859Sml29623 "==> nxge_txdma_reclaim: EMPTY")); 9493859Sml29623 return (B_TRUE); 9503859Sml29623 } 9513859Sml29623 9523859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9533859Sml29623 "==> nxge_txdma_reclaim: Checking " 9543859Sml29623 "if ring full")); 9553859Sml29623 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 9563859Sml29623 tail_wrap)) { 9573859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9583859Sml29623 "==> nxge_txdma_reclaim: full")); 9593859Sml29623 return (B_FALSE); 9603859Sml29623 } 9613859Sml29623 } 9623859Sml29623 9633859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9643859Sml29623 "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 9653859Sml29623 9663859Sml29623 tx_desc_pp = &r_tx_desc; 9673859Sml29623 while ((tx_rd_index != head_index) && 9683859Sml29623 (tx_ring_p->descs_pending != 0)) { 9693859Sml29623 9703859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9713859Sml29623 "==> nxge_txdma_reclaim: Checking if pending")); 9723859Sml29623 9733859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9743859Sml29623 "==> nxge_txdma_reclaim: " 9753859Sml29623 "descs_pending %d ", 9763859Sml29623 tx_ring_p->descs_pending)); 9773859Sml29623 9783859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9793859Sml29623 "==> nxge_txdma_reclaim: " 9803859Sml29623 "(tx_rd_index %d head_index %d " 9813859Sml29623 "(tx_desc_p $%p)", 9823859Sml29623 tx_rd_index, head_index, 9833859Sml29623 tx_desc_p)); 9843859Sml29623 9853859Sml29623 tx_desc_pp->value = tx_desc_p->value; 9863859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9873859Sml29623 "==> nxge_txdma_reclaim: " 9883859Sml29623 "(tx_rd_index %d head_index %d " 9893859Sml29623 "tx_desc_p $%p (desc value 0x%llx) ", 9903859Sml29623 tx_rd_index, head_index, 9913859Sml29623 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 9923859Sml29623 9933859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9943859Sml29623 "==> nxge_txdma_reclaim: dump desc:")); 9953859Sml29623 9963859Sml29623 pkt_len = tx_desc_pp->bits.hdw.tr_len; 9973859Sml29623 tdc_stats->obytes += pkt_len; 9983859Sml29623 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 9993859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10003859Sml29623 "==> nxge_txdma_reclaim: pkt_len %d " 10013859Sml29623 "tdc channel %d opackets %d", 10023859Sml29623 pkt_len, 10033859Sml29623 tdc, 10043859Sml29623 tdc_stats->opackets)); 10053859Sml29623 10063859Sml29623 if (tx_msg_p->flags.dma_type == USE_DVMA) { 10073859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10083859Sml29623 "tx_desc_p = $%p " 10093859Sml29623 "tx_desc_pp = $%p " 10103859Sml29623 "index = %d", 10113859Sml29623 tx_desc_p, 10123859Sml29623 tx_desc_pp, 10133859Sml29623 tx_ring_p->rd_index)); 10143859Sml29623 (void) dvma_unload(tx_msg_p->dvma_handle, 10153859Sml29623 0, -1); 10163859Sml29623 tx_msg_p->dvma_handle = NULL; 10173859Sml29623 if (tx_ring_p->dvma_wr_index == 10183859Sml29623 tx_ring_p->dvma_wrap_mask) { 10193859Sml29623 tx_ring_p->dvma_wr_index = 0; 10203859Sml29623 } else { 10213859Sml29623 tx_ring_p->dvma_wr_index++; 10223859Sml29623 } 10233859Sml29623 tx_ring_p->dvma_pending--; 10243859Sml29623 } else if (tx_msg_p->flags.dma_type == 10253859Sml29623 USE_DMA) { 10263859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10273859Sml29623 "==> nxge_txdma_reclaim: " 10283859Sml29623 "USE DMA")); 10293859Sml29623 if (rc = ddi_dma_unbind_handle 10303859Sml29623 (tx_msg_p->dma_handle)) { 10313859Sml29623 cmn_err(CE_WARN, "!nxge_reclaim: " 10323859Sml29623 "ddi_dma_unbind_handle " 10333859Sml29623 "failed. status %d", rc); 10343859Sml29623 } 10353859Sml29623 } 10363859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10373859Sml29623 "==> nxge_txdma_reclaim: count packets")); 10383859Sml29623 /* 10393859Sml29623 * count a chained packet only once. 10403859Sml29623 */ 10413859Sml29623 if (tx_msg_p->tx_message != NULL) { 10423859Sml29623 freemsg(tx_msg_p->tx_message); 10433859Sml29623 tx_msg_p->tx_message = NULL; 10443859Sml29623 } 10453859Sml29623 10463859Sml29623 tx_msg_p->flags.dma_type = USE_NONE; 10473859Sml29623 tx_rd_index = tx_ring_p->rd_index; 10483859Sml29623 tx_rd_index = (tx_rd_index + 1) & 10493859Sml29623 tx_ring_p->tx_wrap_mask; 10503859Sml29623 tx_ring_p->rd_index = tx_rd_index; 10513859Sml29623 tx_ring_p->descs_pending--; 10523859Sml29623 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 10533859Sml29623 tx_msg_p = &tx_msg_ring[tx_rd_index]; 10543859Sml29623 } 10553859Sml29623 10563859Sml29623 status = (nmblks <= (tx_ring_p->tx_ring_size - 10573859Sml29623 tx_ring_p->descs_pending - 10583859Sml29623 TX_FULL_MARK)); 10593859Sml29623 if (status) { 10603859Sml29623 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 10613859Sml29623 } 10623859Sml29623 } else { 10633859Sml29623 status = (nmblks <= 10643859Sml29623 (tx_ring_p->tx_ring_size - 10653859Sml29623 tx_ring_p->descs_pending - 10663859Sml29623 TX_FULL_MARK)); 10673859Sml29623 } 10683859Sml29623 10693859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10703859Sml29623 "<== nxge_txdma_reclaim status = 0x%08x", status)); 10713859Sml29623 10723859Sml29623 return (status); 10733859Sml29623 } 10743859Sml29623 10756495Sspeer /* 10766495Sspeer * nxge_tx_intr 10776495Sspeer * 10786495Sspeer * Process a TDC interrupt 10796495Sspeer * 10806495Sspeer * Arguments: 10816495Sspeer * arg1 A Logical Device state Vector (LSV) data structure. 10826495Sspeer * arg2 nxge_t * 10836495Sspeer * 10846495Sspeer * Notes: 10856495Sspeer * 10866495Sspeer * NPI/NXGE function calls: 10876495Sspeer * npi_txdma_control_status() 10886495Sspeer * npi_intr_ldg_mgmt_set() 10896495Sspeer * 10906495Sspeer * nxge_tx_err_evnts() 10916495Sspeer * nxge_txdma_reclaim() 10926495Sspeer * 10936495Sspeer * Registers accessed: 10946495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 10956495Sspeer * PIO_LDSV 10966495Sspeer * 10976495Sspeer * Context: 10986495Sspeer * Any domain 10996495Sspeer */ 11003859Sml29623 uint_t 11013859Sml29623 nxge_tx_intr(void *arg1, void *arg2) 11023859Sml29623 { 11033859Sml29623 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 11043859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg2; 11053859Sml29623 p_nxge_ldg_t ldgp; 11063859Sml29623 uint8_t channel; 11073859Sml29623 uint32_t vindex; 11083859Sml29623 npi_handle_t handle; 11093859Sml29623 tx_cs_t cs; 11103859Sml29623 p_tx_ring_t *tx_rings; 11113859Sml29623 p_tx_ring_t tx_ring_p; 11123859Sml29623 npi_status_t rs = NPI_SUCCESS; 11133859Sml29623 uint_t serviced = DDI_INTR_UNCLAIMED; 11143859Sml29623 nxge_status_t status = NXGE_OK; 11153859Sml29623 11163859Sml29623 if (ldvp == NULL) { 11173859Sml29623 NXGE_DEBUG_MSG((NULL, INT_CTL, 11183859Sml29623 "<== nxge_tx_intr: nxgep $%p ldvp $%p", 11193859Sml29623 nxgep, ldvp)); 11203859Sml29623 return (DDI_INTR_UNCLAIMED); 11213859Sml29623 } 11223859Sml29623 11233859Sml29623 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 11243859Sml29623 nxgep = ldvp->nxgep; 11253859Sml29623 } 11263859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11273859Sml29623 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 11283859Sml29623 nxgep, ldvp)); 11293859Sml29623 /* 11303859Sml29623 * This interrupt handler is for a specific 11313859Sml29623 * transmit dma channel. 11323859Sml29623 */ 11333859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 11343859Sml29623 /* Get the control and status for this channel. */ 11353859Sml29623 channel = ldvp->channel; 11363859Sml29623 ldgp = ldvp->ldgp; 11373859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11383859Sml29623 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 11393859Sml29623 "channel %d", 11403859Sml29623 nxgep, ldvp, channel)); 11413859Sml29623 11423859Sml29623 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 11433859Sml29623 vindex = ldvp->vdma_index; 11443859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11453859Sml29623 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 11463859Sml29623 channel, vindex, rs)); 11473859Sml29623 if (!rs && cs.bits.ldw.mk) { 11483859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11493859Sml29623 "==> nxge_tx_intr:channel %d ring index %d " 11503859Sml29623 "status 0x%08x (mk bit set)", 11513859Sml29623 channel, vindex, rs)); 11523859Sml29623 tx_rings = nxgep->tx_rings->rings; 11533859Sml29623 tx_ring_p = tx_rings[vindex]; 11543859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11553859Sml29623 "==> nxge_tx_intr:channel %d ring index %d " 11563859Sml29623 "status 0x%08x (mk bit set, calling reclaim)", 11573859Sml29623 channel, vindex, rs)); 11583859Sml29623 11593859Sml29623 MUTEX_ENTER(&tx_ring_p->lock); 11603859Sml29623 (void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0); 11613859Sml29623 MUTEX_EXIT(&tx_ring_p->lock); 11623859Sml29623 mac_tx_update(nxgep->mach); 11633859Sml29623 } 11643859Sml29623 11653859Sml29623 /* 11663859Sml29623 * Process other transmit control and status. 11673859Sml29623 * Check the ldv state. 11683859Sml29623 */ 11693859Sml29623 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 11703859Sml29623 /* 11713859Sml29623 * Rearm this logical group if this is a single device 11723859Sml29623 * group. 11733859Sml29623 */ 11743859Sml29623 if (ldgp->nldvs == 1) { 11753859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11763859Sml29623 "==> nxge_tx_intr: rearm")); 11773859Sml29623 if (status == NXGE_OK) { 11786495Sspeer if (isLDOMguest(nxgep)) { 11796495Sspeer nxge_hio_ldgimgn(nxgep, ldgp); 11806495Sspeer } else { 11816495Sspeer (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 11826495Sspeer B_TRUE, ldgp->ldg_timer); 11836495Sspeer } 11843859Sml29623 } 11853859Sml29623 } 11863859Sml29623 11873859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 11883859Sml29623 serviced = DDI_INTR_CLAIMED; 11893859Sml29623 return (serviced); 11903859Sml29623 } 11913859Sml29623 11923859Sml29623 void 11936495Sspeer nxge_txdma_stop(p_nxge_t nxgep) /* Dead */ 11943859Sml29623 { 11953859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 11963859Sml29623 11973859Sml29623 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 11983859Sml29623 11993859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 12003859Sml29623 } 12013859Sml29623 12023859Sml29623 void 12036495Sspeer nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */ 12043859Sml29623 { 12053859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 12063859Sml29623 12073859Sml29623 (void) nxge_txdma_stop(nxgep); 12083859Sml29623 12093859Sml29623 (void) nxge_fixup_txdma_rings(nxgep); 12103859Sml29623 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 12113859Sml29623 (void) nxge_tx_mac_enable(nxgep); 12123859Sml29623 (void) nxge_txdma_hw_kick(nxgep); 12133859Sml29623 12143859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 12153859Sml29623 } 12163859Sml29623 12176495Sspeer npi_status_t 12186495Sspeer nxge_txdma_channel_disable( 12196495Sspeer nxge_t *nxge, 12206495Sspeer int channel) 12216495Sspeer { 12226495Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge); 12236495Sspeer npi_status_t rs; 12246495Sspeer tdmc_intr_dbg_t intr_dbg; 12256495Sspeer 12266495Sspeer /* 12276495Sspeer * Stop the dma channel and wait for the stop-done. 12286495Sspeer * If the stop-done bit is not present, then force 12296495Sspeer * an error so TXC will stop. 12306495Sspeer * All channels bound to this port need to be stopped 12316495Sspeer * and reset after injecting an interrupt error. 12326495Sspeer */ 12336495Sspeer rs = npi_txdma_channel_disable(handle, channel); 12346495Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL, 12356495Sspeer "==> nxge_txdma_channel_disable(%d) " 12366495Sspeer "rs 0x%x", channel, rs)); 12376495Sspeer if (rs != NPI_SUCCESS) { 12386495Sspeer /* Inject any error */ 12396495Sspeer intr_dbg.value = 0; 12406495Sspeer intr_dbg.bits.ldw.nack_pref = 1; 12416495Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL, 12426495Sspeer "==> nxge_txdma_hw_mode: " 12436495Sspeer "channel %d (stop failed 0x%x) " 12446495Sspeer "(inject err)", rs, channel)); 12456495Sspeer (void) npi_txdma_inj_int_error_set( 12466495Sspeer handle, channel, &intr_dbg); 12476495Sspeer rs = npi_txdma_channel_disable(handle, channel); 12486495Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL, 12496495Sspeer "==> nxge_txdma_hw_mode: " 12506495Sspeer "channel %d (stop again 0x%x) " 12516495Sspeer "(after inject err)", 12526495Sspeer rs, channel)); 12536495Sspeer } 12546495Sspeer 12556495Sspeer return (rs); 12566495Sspeer } 12576495Sspeer 12586495Sspeer /* 12596495Sspeer * nxge_txdma_hw_mode 12606495Sspeer * 12616495Sspeer * Toggle all TDCs on (enable) or off (disable). 12626495Sspeer * 12636495Sspeer * Arguments: 12646495Sspeer * nxgep 12656495Sspeer * enable Enable or disable a TDC. 12666495Sspeer * 12676495Sspeer * Notes: 12686495Sspeer * 12696495Sspeer * NPI/NXGE function calls: 12706495Sspeer * npi_txdma_channel_enable(TX_CS) 12716495Sspeer * npi_txdma_channel_disable(TX_CS) 12726495Sspeer * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 12736495Sspeer * 12746495Sspeer * Registers accessed: 12756495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 12766495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 12776495Sspeer * 12786495Sspeer * Context: 12796495Sspeer * Any domain 12806495Sspeer */ 12813859Sml29623 nxge_status_t 12823859Sml29623 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 12833859Sml29623 { 12846495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 12856495Sspeer 12866495Sspeer npi_handle_t handle; 12876495Sspeer nxge_status_t status; 12886495Sspeer npi_status_t rs; 12896495Sspeer int tdc; 12903859Sml29623 12913859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 12923859Sml29623 "==> nxge_txdma_hw_mode: enable mode %d", enable)); 12933859Sml29623 12943859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 12953859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 12963859Sml29623 "<== nxge_txdma_mode: not initialized")); 12973859Sml29623 return (NXGE_ERROR); 12983859Sml29623 } 12993859Sml29623 13006495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 13013859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 13026495Sspeer "<== nxge_txdma_hw_mode: NULL ring pointer(s)")); 13033859Sml29623 return (NXGE_ERROR); 13043859Sml29623 } 13053859Sml29623 13066495Sspeer /* Enable or disable all of the TDCs owned by us. */ 13073859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 13086495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 13096495Sspeer if ((1 << tdc) & set->owned.map) { 13106495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 13116495Sspeer if (ring) { 13126495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 13136495Sspeer "==> nxge_txdma_hw_mode: channel %d", tdc)); 13146495Sspeer if (enable) { 13156495Sspeer rs = npi_txdma_channel_enable 13166495Sspeer (handle, tdc); 13173859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 13186495Sspeer "==> nxge_txdma_hw_mode: " 13196495Sspeer "channel %d (enable) rs 0x%x", 13206495Sspeer tdc, rs)); 13216495Sspeer } else { 13226495Sspeer rs = nxge_txdma_channel_disable 13236495Sspeer (nxgep, tdc); 13243859Sml29623 } 13253859Sml29623 } 13263859Sml29623 } 13273859Sml29623 } 13283859Sml29623 13293859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 13303859Sml29623 13313859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 13323859Sml29623 "<== nxge_txdma_hw_mode: status 0x%x", status)); 13333859Sml29623 13343859Sml29623 return (status); 13353859Sml29623 } 13363859Sml29623 13373859Sml29623 void 13383859Sml29623 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 13393859Sml29623 { 13403859Sml29623 npi_handle_t handle; 13413859Sml29623 13423859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 13433859Sml29623 "==> nxge_txdma_enable_channel: channel %d", channel)); 13443859Sml29623 13453859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 13463859Sml29623 /* enable the transmit dma channels */ 13473859Sml29623 (void) npi_txdma_channel_enable(handle, channel); 13483859Sml29623 13493859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 13503859Sml29623 } 13513859Sml29623 13523859Sml29623 void 13533859Sml29623 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 13543859Sml29623 { 13553859Sml29623 npi_handle_t handle; 13563859Sml29623 13573859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 13583859Sml29623 "==> nxge_txdma_disable_channel: channel %d", channel)); 13593859Sml29623 13603859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 13613859Sml29623 /* stop the transmit dma channels */ 13623859Sml29623 (void) npi_txdma_channel_disable(handle, channel); 13633859Sml29623 13643859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 13653859Sml29623 } 13663859Sml29623 13676495Sspeer /* 13686495Sspeer * nxge_txdma_stop_inj_err 13696495Sspeer * 13706495Sspeer * Stop a TDC. If at first we don't succeed, inject an error. 13716495Sspeer * 13726495Sspeer * Arguments: 13736495Sspeer * nxgep 13746495Sspeer * channel The channel to stop. 13756495Sspeer * 13766495Sspeer * Notes: 13776495Sspeer * 13786495Sspeer * NPI/NXGE function calls: 13796495Sspeer * npi_txdma_channel_disable() 13806495Sspeer * npi_txdma_inj_int_error_set() 13816495Sspeer * #if defined(NXGE_DEBUG) 13826495Sspeer * nxge_txdma_regs_dump_channels(nxgep); 13836495Sspeer * #endif 13846495Sspeer * 13856495Sspeer * Registers accessed: 13866495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 13876495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 13886495Sspeer * 13896495Sspeer * Context: 13906495Sspeer * Any domain 13916495Sspeer */ 13923859Sml29623 int 13933859Sml29623 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 13943859Sml29623 { 13953859Sml29623 npi_handle_t handle; 13963859Sml29623 tdmc_intr_dbg_t intr_dbg; 13973859Sml29623 int status; 13983859Sml29623 npi_status_t rs = NPI_SUCCESS; 13993859Sml29623 14003859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 14013859Sml29623 /* 14023859Sml29623 * Stop the dma channel waits for the stop done. 14033859Sml29623 * If the stop done bit is not set, then create 14043859Sml29623 * an error. 14053859Sml29623 */ 14063859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 14073859Sml29623 rs = npi_txdma_channel_disable(handle, channel); 14083859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 14093859Sml29623 if (status == NXGE_OK) { 14103859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 14113859Sml29623 "<== nxge_txdma_stop_inj_err (channel %d): " 14123859Sml29623 "stopped OK", channel)); 14133859Sml29623 return (status); 14143859Sml29623 } 14153859Sml29623 14163859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 14173859Sml29623 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 14183859Sml29623 "injecting error", channel, rs)); 14193859Sml29623 /* Inject any error */ 14203859Sml29623 intr_dbg.value = 0; 14213859Sml29623 intr_dbg.bits.ldw.nack_pref = 1; 14223859Sml29623 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 14233859Sml29623 14243859Sml29623 /* Stop done bit will be set as a result of error injection */ 14253859Sml29623 rs = npi_txdma_channel_disable(handle, channel); 14263859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 14273859Sml29623 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 14283859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 14293859Sml29623 "<== nxge_txdma_stop_inj_err (channel %d): " 14303859Sml29623 "stopped OK ", channel)); 14313859Sml29623 return (status); 14323859Sml29623 } 14333859Sml29623 14343859Sml29623 #if defined(NXGE_DEBUG) 14353859Sml29623 nxge_txdma_regs_dump_channels(nxgep); 14363859Sml29623 #endif 14373859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 14383859Sml29623 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 14393859Sml29623 " (injected error but still not stopped)", channel, rs)); 14403859Sml29623 14413859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 14423859Sml29623 return (status); 14433859Sml29623 } 14443859Sml29623 14453859Sml29623 /*ARGSUSED*/ 14463859Sml29623 void 14473859Sml29623 nxge_fixup_txdma_rings(p_nxge_t nxgep) 14483859Sml29623 { 14496495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 14506495Sspeer int tdc; 14513859Sml29623 14523859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 14533859Sml29623 14546495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 14556495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 14566495Sspeer "<== nxge_fixup_txdma_rings: NULL ring pointer(s)")); 14573859Sml29623 return; 14583859Sml29623 } 14593859Sml29623 14606495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 14616495Sspeer if ((1 << tdc) & set->owned.map) { 14626495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 14636495Sspeer if (ring) { 14646495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 14656495Sspeer "==> nxge_fixup_txdma_rings: channel %d", 14666495Sspeer tdc)); 14676495Sspeer nxge_txdma_fixup_channel(nxgep, ring, tdc); 14686495Sspeer } 14696495Sspeer } 14703859Sml29623 } 14713859Sml29623 14723859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 14733859Sml29623 } 14743859Sml29623 14753859Sml29623 /*ARGSUSED*/ 14763859Sml29623 void 14773859Sml29623 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 14783859Sml29623 { 14793859Sml29623 p_tx_ring_t ring_p; 14803859Sml29623 14813859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 14823859Sml29623 ring_p = nxge_txdma_get_ring(nxgep, channel); 14833859Sml29623 if (ring_p == NULL) { 14843859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 14853859Sml29623 return; 14863859Sml29623 } 14873859Sml29623 14883859Sml29623 if (ring_p->tdc != channel) { 14893859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 14903859Sml29623 "<== nxge_txdma_fix_channel: channel not matched " 14913859Sml29623 "ring tdc %d passed channel", 14923859Sml29623 ring_p->tdc, channel)); 14933859Sml29623 return; 14943859Sml29623 } 14953859Sml29623 14963859Sml29623 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 14973859Sml29623 14983859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 14993859Sml29623 } 15003859Sml29623 15013859Sml29623 /*ARGSUSED*/ 15023859Sml29623 void 15033859Sml29623 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 15043859Sml29623 { 15053859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 15063859Sml29623 15073859Sml29623 if (ring_p == NULL) { 15083859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15093859Sml29623 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 15103859Sml29623 return; 15113859Sml29623 } 15123859Sml29623 15133859Sml29623 if (ring_p->tdc != channel) { 15143859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15153859Sml29623 "<== nxge_txdma_fixup_channel: channel not matched " 15163859Sml29623 "ring tdc %d passed channel", 15173859Sml29623 ring_p->tdc, channel)); 15183859Sml29623 return; 15193859Sml29623 } 15203859Sml29623 15213859Sml29623 MUTEX_ENTER(&ring_p->lock); 15223859Sml29623 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 15233859Sml29623 ring_p->rd_index = 0; 15243859Sml29623 ring_p->wr_index = 0; 15253859Sml29623 ring_p->ring_head.value = 0; 15263859Sml29623 ring_p->ring_kick_tail.value = 0; 15273859Sml29623 ring_p->descs_pending = 0; 15283859Sml29623 MUTEX_EXIT(&ring_p->lock); 15293859Sml29623 15303859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 15313859Sml29623 } 15323859Sml29623 15333859Sml29623 /*ARGSUSED*/ 15343859Sml29623 void 15353859Sml29623 nxge_txdma_hw_kick(p_nxge_t nxgep) 15363859Sml29623 { 15376495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 15386495Sspeer int tdc; 15393859Sml29623 15403859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 15413859Sml29623 15426495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 15433859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15446495Sspeer "<== nxge_txdma_hw_kick: NULL ring pointer(s)")); 15453859Sml29623 return; 15463859Sml29623 } 15473859Sml29623 15486495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 15496495Sspeer if ((1 << tdc) & set->owned.map) { 15506495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 15516495Sspeer if (ring) { 15526495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 15536495Sspeer "==> nxge_txdma_hw_kick: channel %d", tdc)); 15546495Sspeer nxge_txdma_hw_kick_channel(nxgep, ring, tdc); 15556495Sspeer } 15566495Sspeer } 15573859Sml29623 } 15583859Sml29623 15593859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 15603859Sml29623 } 15613859Sml29623 15623859Sml29623 /*ARGSUSED*/ 15633859Sml29623 void 15643859Sml29623 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 15653859Sml29623 { 15663859Sml29623 p_tx_ring_t ring_p; 15673859Sml29623 15683859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 15693859Sml29623 15703859Sml29623 ring_p = nxge_txdma_get_ring(nxgep, channel); 15713859Sml29623 if (ring_p == NULL) { 15723859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15733859Sml29623 " nxge_txdma_kick_channel")); 15743859Sml29623 return; 15753859Sml29623 } 15763859Sml29623 15773859Sml29623 if (ring_p->tdc != channel) { 15783859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15793859Sml29623 "<== nxge_txdma_kick_channel: channel not matched " 15803859Sml29623 "ring tdc %d passed channel", 15813859Sml29623 ring_p->tdc, channel)); 15823859Sml29623 return; 15833859Sml29623 } 15843859Sml29623 15853859Sml29623 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 15863859Sml29623 15873859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 15883859Sml29623 } 15893859Sml29623 15903859Sml29623 /*ARGSUSED*/ 15913859Sml29623 void 15923859Sml29623 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 15933859Sml29623 { 15943859Sml29623 15953859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 15963859Sml29623 15973859Sml29623 if (ring_p == NULL) { 15983859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15993859Sml29623 "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 16003859Sml29623 return; 16013859Sml29623 } 16023859Sml29623 16033859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 16043859Sml29623 } 16053859Sml29623 16066495Sspeer /* 16076495Sspeer * nxge_check_tx_hang 16086495Sspeer * 16096495Sspeer * Check the state of all TDCs belonging to nxgep. 16106495Sspeer * 16116495Sspeer * Arguments: 16126495Sspeer * nxgep 16136495Sspeer * 16146495Sspeer * Notes: 16156495Sspeer * Called by nxge_hw.c:nxge_check_hw_state(). 16166495Sspeer * 16176495Sspeer * NPI/NXGE function calls: 16186495Sspeer * 16196495Sspeer * Registers accessed: 16206495Sspeer * 16216495Sspeer * Context: 16226495Sspeer * Any domain 16236495Sspeer */ 16243859Sml29623 /*ARGSUSED*/ 16253859Sml29623 void 16263859Sml29623 nxge_check_tx_hang(p_nxge_t nxgep) 16273859Sml29623 { 16283859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 16293859Sml29623 16303859Sml29623 /* 16313859Sml29623 * Needs inputs from hardware for regs: 16323859Sml29623 * head index had not moved since last timeout. 16333859Sml29623 * packets not transmitted or stuffed registers. 16343859Sml29623 */ 16353859Sml29623 if (nxge_txdma_hung(nxgep)) { 16363859Sml29623 nxge_fixup_hung_txdma_rings(nxgep); 16373859Sml29623 } 16383859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 16393859Sml29623 } 16403859Sml29623 16416495Sspeer /* 16426495Sspeer * nxge_txdma_hung 16436495Sspeer * 16446495Sspeer * Reset a TDC. 16456495Sspeer * 16466495Sspeer * Arguments: 16476495Sspeer * nxgep 16486495Sspeer * channel The channel to reset. 16496495Sspeer * reg_data The current TX_CS. 16506495Sspeer * 16516495Sspeer * Notes: 16526495Sspeer * Called by nxge_check_tx_hang() 16536495Sspeer * 16546495Sspeer * NPI/NXGE function calls: 16556495Sspeer * nxge_txdma_channel_hung() 16566495Sspeer * 16576495Sspeer * Registers accessed: 16586495Sspeer * 16596495Sspeer * Context: 16606495Sspeer * Any domain 16616495Sspeer */ 16623859Sml29623 int 16633859Sml29623 nxge_txdma_hung(p_nxge_t nxgep) 16643859Sml29623 { 16656495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 16666495Sspeer int tdc; 16673859Sml29623 16683859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 16696495Sspeer 16706495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 16713859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 16726495Sspeer "<== nxge_txdma_hung: NULL ring pointer(s)")); 16733859Sml29623 return (B_FALSE); 16743859Sml29623 } 16753859Sml29623 16766495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 16776495Sspeer if ((1 << tdc) & set->owned.map) { 16786495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 16796495Sspeer if (ring) { 16806495Sspeer if (nxge_txdma_channel_hung(nxgep, ring, tdc)) { 16816495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 16826495Sspeer "==> nxge_txdma_hung: TDC %d hung", 16836495Sspeer tdc)); 16846495Sspeer return (B_TRUE); 16856495Sspeer } 16866495Sspeer } 16873859Sml29623 } 16883859Sml29623 } 16893859Sml29623 16903859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 16913859Sml29623 16923859Sml29623 return (B_FALSE); 16933859Sml29623 } 16943859Sml29623 16956495Sspeer /* 16966495Sspeer * nxge_txdma_channel_hung 16976495Sspeer * 16986495Sspeer * Reset a TDC. 16996495Sspeer * 17006495Sspeer * Arguments: 17016495Sspeer * nxgep 17026495Sspeer * ring <channel>'s ring. 17036495Sspeer * channel The channel to reset. 17046495Sspeer * 17056495Sspeer * Notes: 17066495Sspeer * Called by nxge_txdma.c:nxge_txdma_hung() 17076495Sspeer * 17086495Sspeer * NPI/NXGE function calls: 17096495Sspeer * npi_txdma_ring_head_get() 17106495Sspeer * 17116495Sspeer * Registers accessed: 17126495Sspeer * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 17136495Sspeer * 17146495Sspeer * Context: 17156495Sspeer * Any domain 17166495Sspeer */ 17173859Sml29623 int 17183859Sml29623 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 17193859Sml29623 { 17203859Sml29623 uint16_t head_index, tail_index; 17213859Sml29623 boolean_t head_wrap, tail_wrap; 17223859Sml29623 npi_handle_t handle; 17233859Sml29623 tx_ring_hdl_t tx_head; 17243859Sml29623 uint_t tx_rd_index; 17253859Sml29623 17263859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 17273859Sml29623 17283859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 17293859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 17303859Sml29623 "==> nxge_txdma_channel_hung: channel %d", channel)); 17313859Sml29623 MUTEX_ENTER(&tx_ring_p->lock); 17323859Sml29623 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 17333859Sml29623 17343859Sml29623 tail_index = tx_ring_p->wr_index; 17353859Sml29623 tail_wrap = tx_ring_p->wr_index_wrap; 17363859Sml29623 tx_rd_index = tx_ring_p->rd_index; 17373859Sml29623 MUTEX_EXIT(&tx_ring_p->lock); 17383859Sml29623 17393859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 17403859Sml29623 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 17413859Sml29623 "tail_index %d tail_wrap %d ", 17423859Sml29623 channel, tx_rd_index, tail_index, tail_wrap)); 17433859Sml29623 /* 17443859Sml29623 * Read the hardware maintained transmit head 17453859Sml29623 * and wrap around bit. 17463859Sml29623 */ 17473859Sml29623 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 17483859Sml29623 head_index = tx_head.bits.ldw.head; 17493859Sml29623 head_wrap = tx_head.bits.ldw.wrap; 17503859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 17513859Sml29623 "==> nxge_txdma_channel_hung: " 17523859Sml29623 "tx_rd_index %d tail %d tail_wrap %d " 17533859Sml29623 "head %d wrap %d", 17543859Sml29623 tx_rd_index, tail_index, tail_wrap, 17553859Sml29623 head_index, head_wrap)); 17563859Sml29623 17573859Sml29623 if (TXDMA_RING_EMPTY(head_index, head_wrap, 17583859Sml29623 tail_index, tail_wrap) && 17593859Sml29623 (head_index == tx_rd_index)) { 17603859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 17613859Sml29623 "==> nxge_txdma_channel_hung: EMPTY")); 17623859Sml29623 return (B_FALSE); 17633859Sml29623 } 17643859Sml29623 17653859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 17663859Sml29623 "==> nxge_txdma_channel_hung: Checking if ring full")); 17673859Sml29623 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 17683859Sml29623 tail_wrap)) { 17693859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 17703859Sml29623 "==> nxge_txdma_channel_hung: full")); 17713859Sml29623 return (B_TRUE); 17723859Sml29623 } 17733859Sml29623 17743859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 17753859Sml29623 17763859Sml29623 return (B_FALSE); 17773859Sml29623 } 17783859Sml29623 17796495Sspeer /* 17806495Sspeer * nxge_fixup_hung_txdma_rings 17816495Sspeer * 17826495Sspeer * Disable a TDC. 17836495Sspeer * 17846495Sspeer * Arguments: 17856495Sspeer * nxgep 17866495Sspeer * channel The channel to reset. 17876495Sspeer * reg_data The current TX_CS. 17886495Sspeer * 17896495Sspeer * Notes: 17906495Sspeer * Called by nxge_check_tx_hang() 17916495Sspeer * 17926495Sspeer * NPI/NXGE function calls: 17936495Sspeer * npi_txdma_ring_head_get() 17946495Sspeer * 17956495Sspeer * Registers accessed: 17966495Sspeer * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 17976495Sspeer * 17986495Sspeer * Context: 17996495Sspeer * Any domain 18006495Sspeer */ 18013859Sml29623 /*ARGSUSED*/ 18023859Sml29623 void 18033859Sml29623 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 18043859Sml29623 { 18056495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 18066495Sspeer int tdc; 18073859Sml29623 18083859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 18096495Sspeer 18106495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 18113859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18126495Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 18133859Sml29623 return; 18143859Sml29623 } 18153859Sml29623 18166495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 18176495Sspeer if ((1 << tdc) & set->owned.map) { 18186495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 18196495Sspeer if (ring) { 18206495Sspeer nxge_txdma_fixup_hung_channel(nxgep, ring, tdc); 18216495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 18226495Sspeer "==> nxge_fixup_hung_txdma_rings: TDC %d", 18236495Sspeer tdc)); 18246495Sspeer } 18256495Sspeer } 18263859Sml29623 } 18273859Sml29623 18283859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 18293859Sml29623 } 18303859Sml29623 18316495Sspeer /* 18326495Sspeer * nxge_txdma_fixup_hung_channel 18336495Sspeer * 18346495Sspeer * 'Fix' a hung TDC. 18356495Sspeer * 18366495Sspeer * Arguments: 18376495Sspeer * nxgep 18386495Sspeer * channel The channel to fix. 18396495Sspeer * 18406495Sspeer * Notes: 18416495Sspeer * Called by nxge_fixup_hung_txdma_rings() 18426495Sspeer * 18436495Sspeer * 1. Reclaim the TDC. 18446495Sspeer * 2. Disable the TDC. 18456495Sspeer * 18466495Sspeer * NPI/NXGE function calls: 18476495Sspeer * nxge_txdma_reclaim() 18486495Sspeer * npi_txdma_channel_disable(TX_CS) 18496495Sspeer * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 18506495Sspeer * 18516495Sspeer * Registers accessed: 18526495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 18536495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 18546495Sspeer * 18556495Sspeer * Context: 18566495Sspeer * Any domain 18576495Sspeer */ 18583859Sml29623 /*ARGSUSED*/ 18593859Sml29623 void 18603859Sml29623 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 18613859Sml29623 { 18623859Sml29623 p_tx_ring_t ring_p; 18633859Sml29623 18643859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 18653859Sml29623 ring_p = nxge_txdma_get_ring(nxgep, channel); 18663859Sml29623 if (ring_p == NULL) { 18673859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18683859Sml29623 "<== nxge_txdma_fix_hung_channel")); 18693859Sml29623 return; 18703859Sml29623 } 18713859Sml29623 18723859Sml29623 if (ring_p->tdc != channel) { 18733859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18743859Sml29623 "<== nxge_txdma_fix_hung_channel: channel not matched " 18753859Sml29623 "ring tdc %d passed channel", 18763859Sml29623 ring_p->tdc, channel)); 18773859Sml29623 return; 18783859Sml29623 } 18793859Sml29623 18803859Sml29623 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 18813859Sml29623 18823859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 18833859Sml29623 } 18843859Sml29623 18853859Sml29623 /*ARGSUSED*/ 18863859Sml29623 void 18873859Sml29623 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 18883859Sml29623 uint16_t channel) 18893859Sml29623 { 18903859Sml29623 npi_handle_t handle; 18913859Sml29623 tdmc_intr_dbg_t intr_dbg; 18923859Sml29623 int status = NXGE_OK; 18933859Sml29623 18943859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 18953859Sml29623 18963859Sml29623 if (ring_p == NULL) { 18973859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18983859Sml29623 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 18993859Sml29623 return; 19003859Sml29623 } 19013859Sml29623 19023859Sml29623 if (ring_p->tdc != channel) { 19033859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19043859Sml29623 "<== nxge_txdma_fixup_hung_channel: channel " 19053859Sml29623 "not matched " 19063859Sml29623 "ring tdc %d passed channel", 19073859Sml29623 ring_p->tdc, channel)); 19083859Sml29623 return; 19093859Sml29623 } 19103859Sml29623 19113859Sml29623 /* Reclaim descriptors */ 19123859Sml29623 MUTEX_ENTER(&ring_p->lock); 19133859Sml29623 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 19143859Sml29623 MUTEX_EXIT(&ring_p->lock); 19153859Sml29623 19163859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 19173859Sml29623 /* 19183859Sml29623 * Stop the dma channel waits for the stop done. 19193859Sml29623 * If the stop done bit is not set, then force 19203859Sml29623 * an error. 19213859Sml29623 */ 19223859Sml29623 status = npi_txdma_channel_disable(handle, channel); 19233859Sml29623 if (!(status & NPI_TXDMA_STOP_FAILED)) { 19243859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19253859Sml29623 "<== nxge_txdma_fixup_hung_channel: stopped OK " 19263859Sml29623 "ring tdc %d passed channel %d", 19273859Sml29623 ring_p->tdc, channel)); 19283859Sml29623 return; 19293859Sml29623 } 19303859Sml29623 19313859Sml29623 /* Inject any error */ 19323859Sml29623 intr_dbg.value = 0; 19333859Sml29623 intr_dbg.bits.ldw.nack_pref = 1; 19343859Sml29623 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 19353859Sml29623 19363859Sml29623 /* Stop done bit will be set as a result of error injection */ 19373859Sml29623 status = npi_txdma_channel_disable(handle, channel); 19383859Sml29623 if (!(status & NPI_TXDMA_STOP_FAILED)) { 19393859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19403859Sml29623 "<== nxge_txdma_fixup_hung_channel: stopped again" 19413859Sml29623 "ring tdc %d passed channel", 19423859Sml29623 ring_p->tdc, channel)); 19433859Sml29623 return; 19443859Sml29623 } 19453859Sml29623 19463859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19473859Sml29623 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 19483859Sml29623 "ring tdc %d passed channel", 19493859Sml29623 ring_p->tdc, channel)); 19503859Sml29623 19513859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 19523859Sml29623 } 19533859Sml29623 19543859Sml29623 /*ARGSUSED*/ 19553859Sml29623 void 19563859Sml29623 nxge_reclaim_rings(p_nxge_t nxgep) 19573859Sml29623 { 19586495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 19596495Sspeer int tdc; 19606495Sspeer 19616495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings")); 19626495Sspeer 19636495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 19643859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19656495Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 19663859Sml29623 return; 19673859Sml29623 } 19683859Sml29623 19696495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 19706495Sspeer if ((1 << tdc) & set->owned.map) { 19716495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 19726495Sspeer if (ring) { 19736495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 19746495Sspeer "==> nxge_reclaim_rings: TDC %d", tdc)); 19756495Sspeer MUTEX_ENTER(&ring->lock); 19766495Sspeer (void) nxge_txdma_reclaim(nxgep, ring, tdc); 19776495Sspeer MUTEX_EXIT(&ring->lock); 19786495Sspeer } 19796495Sspeer } 19803859Sml29623 } 19813859Sml29623 19823859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 19833859Sml29623 } 19843859Sml29623 19853859Sml29623 void 19863859Sml29623 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 19873859Sml29623 { 19886495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 19896495Sspeer npi_handle_t handle; 19906495Sspeer int tdc; 19916495Sspeer 19926495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels")); 19933859Sml29623 19943859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 19956495Sspeer 19966495Sspeer if (!isLDOMguest(nxgep)) { 19976495Sspeer (void) npi_txdma_dump_fzc_regs(handle); 19986495Sspeer 19996495Sspeer /* Dump TXC registers. */ 20006495Sspeer (void) npi_txc_dump_fzc_regs(handle); 20016495Sspeer (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 20023859Sml29623 } 20033859Sml29623 20046495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 20053859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 20066495Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 20073859Sml29623 return; 20083859Sml29623 } 20093859Sml29623 20106495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 20116495Sspeer if ((1 << tdc) & set->owned.map) { 20126495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 20136495Sspeer if (ring) { 20146495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 20156495Sspeer "==> nxge_txdma_regs_dump_channels: " 20166495Sspeer "TDC %d", tdc)); 20176495Sspeer (void) npi_txdma_dump_tdc_regs(handle, tdc); 20186495Sspeer 20196495Sspeer /* Dump TXC registers, if able to. */ 20206495Sspeer if (!isLDOMguest(nxgep)) { 20216495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 20226495Sspeer "==> nxge_txdma_regs_dump_channels:" 20236495Sspeer " FZC TDC %d", tdc)); 20246495Sspeer (void) npi_txc_dump_tdc_fzc_regs 20256495Sspeer (handle, tdc); 20266495Sspeer } 20276495Sspeer nxge_txdma_regs_dump(nxgep, tdc); 20286495Sspeer } 20296495Sspeer } 20303859Sml29623 } 20313859Sml29623 20323859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 20333859Sml29623 } 20343859Sml29623 20353859Sml29623 void 20363859Sml29623 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 20373859Sml29623 { 20383859Sml29623 npi_handle_t handle; 20393859Sml29623 tx_ring_hdl_t hdl; 20403859Sml29623 tx_ring_kick_t kick; 20413859Sml29623 tx_cs_t cs; 20423859Sml29623 txc_control_t control; 20433859Sml29623 uint32_t bitmap = 0; 20443859Sml29623 uint32_t burst = 0; 20453859Sml29623 uint32_t bytes = 0; 20463859Sml29623 dma_log_page_t cfg; 20473859Sml29623 20483859Sml29623 printf("\n\tfunc # %d tdc %d ", 20493859Sml29623 nxgep->function_num, channel); 20503859Sml29623 cfg.page_num = 0; 20513859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 20523859Sml29623 (void) npi_txdma_log_page_get(handle, channel, &cfg); 20533859Sml29623 printf("\n\tlog page func %d valid page 0 %d", 20543859Sml29623 cfg.func_num, cfg.valid); 20553859Sml29623 cfg.page_num = 1; 20563859Sml29623 (void) npi_txdma_log_page_get(handle, channel, &cfg); 20573859Sml29623 printf("\n\tlog page func %d valid page 1 %d", 20583859Sml29623 cfg.func_num, cfg.valid); 20593859Sml29623 20603859Sml29623 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 20613859Sml29623 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 20623859Sml29623 printf("\n\thead value is 0x%0llx", 20633859Sml29623 (long long)hdl.value); 20643859Sml29623 printf("\n\thead index %d", hdl.bits.ldw.head); 20653859Sml29623 printf("\n\tkick value is 0x%0llx", 20663859Sml29623 (long long)kick.value); 20673859Sml29623 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 20683859Sml29623 20693859Sml29623 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 20703859Sml29623 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 20713859Sml29623 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 20723859Sml29623 20733859Sml29623 (void) npi_txc_control(handle, OP_GET, &control); 20743859Sml29623 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 20753859Sml29623 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 20763859Sml29623 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 20773859Sml29623 20783859Sml29623 printf("\n\tTXC port control 0x%0llx", 20793859Sml29623 (long long)control.value); 20803859Sml29623 printf("\n\tTXC port bitmap 0x%x", bitmap); 20813859Sml29623 printf("\n\tTXC max burst %d", burst); 20823859Sml29623 printf("\n\tTXC bytes xmt %d\n", bytes); 20833859Sml29623 20843859Sml29623 { 20853859Sml29623 ipp_status_t status; 20863859Sml29623 20873859Sml29623 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 20885125Sjoycey #if defined(__i386) 20895125Sjoycey printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value); 20905125Sjoycey #else 20913859Sml29623 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 20925125Sjoycey #endif 20933859Sml29623 } 20943859Sml29623 } 20953859Sml29623 20963859Sml29623 /* 20976495Sspeer * nxge_tdc_hvio_setup 20986495Sspeer * 20996495Sspeer * I'm not exactly sure what this code does. 21006495Sspeer * 21016495Sspeer * Arguments: 21026495Sspeer * nxgep 21036495Sspeer * channel The channel to map. 21046495Sspeer * 21056495Sspeer * Notes: 21066495Sspeer * 21076495Sspeer * NPI/NXGE function calls: 21086495Sspeer * na 21096495Sspeer * 21106495Sspeer * Context: 21116495Sspeer * Service domain? 21123859Sml29623 */ 21136495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 21146495Sspeer static void 21156495Sspeer nxge_tdc_hvio_setup( 21166495Sspeer nxge_t *nxgep, int channel) 21173859Sml29623 { 21186495Sspeer nxge_dma_common_t *data; 21196495Sspeer nxge_dma_common_t *control; 21206495Sspeer tx_ring_t *ring; 21216495Sspeer 21226495Sspeer ring = nxgep->tx_rings->rings[channel]; 21236495Sspeer data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 21246495Sspeer 21256495Sspeer ring->hv_set = B_FALSE; 21266495Sspeer 21276495Sspeer ring->hv_tx_buf_base_ioaddr_pp = 21286495Sspeer (uint64_t)data->orig_ioaddr_pp; 21296495Sspeer ring->hv_tx_buf_ioaddr_size = 21306495Sspeer (uint64_t)data->orig_alength; 21316495Sspeer 21326495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 21336495Sspeer "hv data buf base io $%p size 0x%llx (%d) buf base io $%p " 21346495Sspeer "orig vatopa base io $%p orig_len 0x%llx (%d)", 21356495Sspeer ring->hv_tx_buf_base_ioaddr_pp, 21366495Sspeer ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size, 21376495Sspeer data->ioaddr_pp, data->orig_vatopa, 21386495Sspeer data->orig_alength, data->orig_alength)); 21396495Sspeer 21406495Sspeer control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 21416495Sspeer 21426495Sspeer ring->hv_tx_cntl_base_ioaddr_pp = 21436495Sspeer (uint64_t)control->orig_ioaddr_pp; 21446495Sspeer ring->hv_tx_cntl_ioaddr_size = 21456495Sspeer (uint64_t)control->orig_alength; 21466495Sspeer 21476495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 21486495Sspeer "hv cntl base io $%p orig ioaddr_pp ($%p) " 21496495Sspeer "orig vatopa ($%p) size 0x%llx (%d 0x%x)", 21506495Sspeer ring->hv_tx_cntl_base_ioaddr_pp, 21516495Sspeer control->orig_ioaddr_pp, control->orig_vatopa, 21526495Sspeer ring->hv_tx_cntl_ioaddr_size, 21536495Sspeer control->orig_alength, control->orig_alength)); 21546495Sspeer } 21553859Sml29623 #endif 21563859Sml29623 21576495Sspeer static nxge_status_t 21586495Sspeer nxge_map_txdma(p_nxge_t nxgep, int channel) 21596495Sspeer { 21606495Sspeer nxge_dma_common_t **pData; 21616495Sspeer nxge_dma_common_t **pControl; 21626495Sspeer tx_ring_t **pRing, *ring; 21636495Sspeer tx_mbox_t **mailbox; 21646495Sspeer uint32_t num_chunks; 21656495Sspeer 21666495Sspeer nxge_status_t status = NXGE_OK; 21676495Sspeer 21686495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 21696495Sspeer 21706495Sspeer if (!nxgep->tx_cntl_pool_p->buf_allocated) { 21716495Sspeer if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) { 21726495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 21736495Sspeer "<== nxge_map_txdma: buf not allocated")); 21746495Sspeer return (NXGE_ERROR); 21756495Sspeer } 21763859Sml29623 } 21773859Sml29623 21786495Sspeer if (nxge_alloc_txb(nxgep, channel) != NXGE_OK) 21796495Sspeer return (NXGE_ERROR); 21806495Sspeer 21816495Sspeer num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 21826495Sspeer pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 21836495Sspeer pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 21846495Sspeer pRing = &nxgep->tx_rings->rings[channel]; 21856495Sspeer mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 21866495Sspeer 21876495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 21883859Sml29623 "tx_rings $%p tx_desc_rings $%p", 21896495Sspeer nxgep->tx_rings, nxgep->tx_rings->rings)); 21903859Sml29623 21913859Sml29623 /* 21926495Sspeer * Map descriptors from the buffer pools for <channel>. 21936495Sspeer */ 21946495Sspeer 21956495Sspeer /* 21966495Sspeer * Set up and prepare buffer blocks, descriptors 21976495Sspeer * and mailbox. 21983859Sml29623 */ 21996495Sspeer status = nxge_map_txdma_channel(nxgep, channel, 22006495Sspeer pData, pRing, num_chunks, pControl, mailbox); 22016495Sspeer if (status != NXGE_OK) { 22026495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 22036495Sspeer "==> nxge_map_txdma(%d): nxge_map_txdma_channel() " 22046495Sspeer "returned 0x%x", 22056495Sspeer nxgep, channel, status)); 22066495Sspeer return (status); 22076495Sspeer } 22086495Sspeer 22096495Sspeer ring = *pRing; 22106495Sspeer 22116495Sspeer ring->index = (uint16_t)channel; 22126495Sspeer ring->tdc_stats = &nxgep->statsp->tdc_stats[channel]; 22136495Sspeer 22146495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 22156495Sspeer if (isLDOMguest(nxgep)) { 22166495Sspeer (void) nxge_tdc_lp_conf(nxgep, channel); 22176495Sspeer } else { 22186495Sspeer nxge_tdc_hvio_setup(nxgep, channel); 22196495Sspeer } 22203859Sml29623 #endif 22216495Sspeer 22226495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 22236495Sspeer "(status 0x%x channel %d)", status, channel)); 22243859Sml29623 22253859Sml29623 return (status); 22263859Sml29623 } 22273859Sml29623 22283859Sml29623 static nxge_status_t 22293859Sml29623 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 22303859Sml29623 p_nxge_dma_common_t *dma_buf_p, 22313859Sml29623 p_tx_ring_t *tx_desc_p, 22323859Sml29623 uint32_t num_chunks, 22333859Sml29623 p_nxge_dma_common_t *dma_cntl_p, 22343859Sml29623 p_tx_mbox_t *tx_mbox_p) 22353859Sml29623 { 22363859Sml29623 int status = NXGE_OK; 22373859Sml29623 22383859Sml29623 /* 22393859Sml29623 * Set up and prepare buffer blocks, descriptors 22403859Sml29623 * and mailbox. 22413859Sml29623 */ 22426495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 22433859Sml29623 "==> nxge_map_txdma_channel (channel %d)", channel)); 22443859Sml29623 /* 22453859Sml29623 * Transmit buffer blocks 22463859Sml29623 */ 22473859Sml29623 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 22483859Sml29623 dma_buf_p, tx_desc_p, num_chunks); 22493859Sml29623 if (status != NXGE_OK) { 22503859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 22513859Sml29623 "==> nxge_map_txdma_channel (channel %d): " 22523859Sml29623 "map buffer failed 0x%x", channel, status)); 22533859Sml29623 goto nxge_map_txdma_channel_exit; 22543859Sml29623 } 22553859Sml29623 22563859Sml29623 /* 22573859Sml29623 * Transmit block ring, and mailbox. 22583859Sml29623 */ 22593859Sml29623 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 22603859Sml29623 tx_mbox_p); 22613859Sml29623 22623859Sml29623 goto nxge_map_txdma_channel_exit; 22633859Sml29623 22643859Sml29623 nxge_map_txdma_channel_fail1: 22656495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 22663859Sml29623 "==> nxge_map_txdma_channel: unmap buf" 22673859Sml29623 "(status 0x%x channel %d)", 22683859Sml29623 status, channel)); 22693859Sml29623 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 22703859Sml29623 22713859Sml29623 nxge_map_txdma_channel_exit: 22726495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 22733859Sml29623 "<== nxge_map_txdma_channel: " 22743859Sml29623 "(status 0x%x channel %d)", 22753859Sml29623 status, channel)); 22763859Sml29623 22773859Sml29623 return (status); 22783859Sml29623 } 22793859Sml29623 22803859Sml29623 /*ARGSUSED*/ 22813859Sml29623 static void 22826495Sspeer nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel) 22833859Sml29623 { 22846495Sspeer tx_ring_t *ring; 22856495Sspeer tx_mbox_t *mailbox; 22866495Sspeer 22873859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 22883859Sml29623 "==> nxge_unmap_txdma_channel (channel %d)", channel)); 22893859Sml29623 /* 22903859Sml29623 * unmap tx block ring, and mailbox. 22913859Sml29623 */ 22926495Sspeer ring = nxgep->tx_rings->rings[channel]; 22936495Sspeer mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 22946495Sspeer 22956495Sspeer (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox); 22963859Sml29623 22973859Sml29623 /* unmap buffer blocks */ 22986495Sspeer (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring); 22996495Sspeer 23006495Sspeer nxge_free_txb(nxgep, channel); 23013859Sml29623 23023859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 23033859Sml29623 } 23043859Sml29623 23056495Sspeer /* 23066495Sspeer * nxge_map_txdma_channel_cfg_ring 23076495Sspeer * 23086495Sspeer * Map a TDC into our kernel space. 23096495Sspeer * This function allocates all of the per-channel data structures. 23106495Sspeer * 23116495Sspeer * Arguments: 23126495Sspeer * nxgep 23136495Sspeer * dma_channel The channel to map. 23146495Sspeer * dma_cntl_p 23156495Sspeer * tx_ring_p dma_channel's transmit ring 23166495Sspeer * tx_mbox_p dma_channel's mailbox 23176495Sspeer * 23186495Sspeer * Notes: 23196495Sspeer * 23206495Sspeer * NPI/NXGE function calls: 23216495Sspeer * nxge_setup_dma_common() 23226495Sspeer * 23236495Sspeer * Registers accessed: 23246495Sspeer * none. 23256495Sspeer * 23266495Sspeer * Context: 23276495Sspeer * Any domain 23286495Sspeer */ 23293859Sml29623 /*ARGSUSED*/ 23303859Sml29623 static void 23313859Sml29623 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 23323859Sml29623 p_nxge_dma_common_t *dma_cntl_p, 23333859Sml29623 p_tx_ring_t tx_ring_p, 23343859Sml29623 p_tx_mbox_t *tx_mbox_p) 23353859Sml29623 { 23363859Sml29623 p_tx_mbox_t mboxp; 23373859Sml29623 p_nxge_dma_common_t cntl_dmap; 23383859Sml29623 p_nxge_dma_common_t dmap; 23393859Sml29623 p_tx_rng_cfig_t tx_ring_cfig_p; 23403859Sml29623 p_tx_ring_kick_t tx_ring_kick_p; 23413859Sml29623 p_tx_cs_t tx_cs_p; 23423859Sml29623 p_tx_dma_ent_msk_t tx_evmask_p; 23433859Sml29623 p_txdma_mbh_t mboxh_p; 23443859Sml29623 p_txdma_mbl_t mboxl_p; 23453859Sml29623 uint64_t tx_desc_len; 23463859Sml29623 23473859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 23483859Sml29623 "==> nxge_map_txdma_channel_cfg_ring")); 23493859Sml29623 23503859Sml29623 cntl_dmap = *dma_cntl_p; 23513859Sml29623 23523859Sml29623 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 23533859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 23543859Sml29623 sizeof (tx_desc_t)); 23553859Sml29623 /* 23563859Sml29623 * Zero out transmit ring descriptors. 23573859Sml29623 */ 23583859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 23593859Sml29623 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 23603859Sml29623 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 23613859Sml29623 tx_cs_p = &(tx_ring_p->tx_cs); 23623859Sml29623 tx_evmask_p = &(tx_ring_p->tx_evmask); 23633859Sml29623 tx_ring_cfig_p->value = 0; 23643859Sml29623 tx_ring_kick_p->value = 0; 23653859Sml29623 tx_cs_p->value = 0; 23663859Sml29623 tx_evmask_p->value = 0; 23673859Sml29623 23683859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 23693859Sml29623 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 23703859Sml29623 dma_channel, 23713859Sml29623 dmap->dma_cookie.dmac_laddress)); 23723859Sml29623 23733859Sml29623 tx_ring_cfig_p->value = 0; 23743859Sml29623 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 23753859Sml29623 tx_ring_cfig_p->value = 23763859Sml29623 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 23773859Sml29623 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 23783859Sml29623 23793859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 23803859Sml29623 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 23813859Sml29623 dma_channel, 23823859Sml29623 tx_ring_cfig_p->value)); 23833859Sml29623 23843859Sml29623 tx_cs_p->bits.ldw.rst = 1; 23853859Sml29623 23863859Sml29623 /* Map in mailbox */ 23873859Sml29623 mboxp = (p_tx_mbox_t) 23883859Sml29623 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 23893859Sml29623 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 23903859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 23913859Sml29623 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 23923859Sml29623 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 23933859Sml29623 mboxh_p->value = mboxl_p->value = 0; 23943859Sml29623 23953859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 23963859Sml29623 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 23973859Sml29623 dmap->dma_cookie.dmac_laddress)); 23983859Sml29623 23993859Sml29623 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 24003859Sml29623 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 24013859Sml29623 24023859Sml29623 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 24033859Sml29623 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 24043859Sml29623 24053859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24063859Sml29623 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 24073859Sml29623 dmap->dma_cookie.dmac_laddress)); 24083859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24093859Sml29623 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 24103859Sml29623 "mbox $%p", 24113859Sml29623 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 24123859Sml29623 tx_ring_p->page_valid.value = 0; 24133859Sml29623 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 24143859Sml29623 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 24153859Sml29623 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 24163859Sml29623 tx_ring_p->page_hdl.value = 0; 24173859Sml29623 24183859Sml29623 tx_ring_p->page_valid.bits.ldw.page0 = 1; 24193859Sml29623 tx_ring_p->page_valid.bits.ldw.page1 = 1; 24203859Sml29623 24213859Sml29623 tx_ring_p->max_burst.value = 0; 24223859Sml29623 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 24233859Sml29623 24243859Sml29623 *tx_mbox_p = mboxp; 24253859Sml29623 24263859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24273859Sml29623 "<== nxge_map_txdma_channel_cfg_ring")); 24283859Sml29623 } 24293859Sml29623 24303859Sml29623 /*ARGSUSED*/ 24313859Sml29623 static void 24323859Sml29623 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 24333859Sml29623 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 24343859Sml29623 { 24353859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24363859Sml29623 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 24373859Sml29623 tx_ring_p->tdc)); 24383859Sml29623 24393859Sml29623 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 24403859Sml29623 24413859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24423859Sml29623 "<== nxge_unmap_txdma_channel_cfg_ring")); 24433859Sml29623 } 24443859Sml29623 24456495Sspeer /* 24466495Sspeer * nxge_map_txdma_channel_buf_ring 24476495Sspeer * 24486495Sspeer * 24496495Sspeer * Arguments: 24506495Sspeer * nxgep 24516495Sspeer * channel The channel to map. 24526495Sspeer * dma_buf_p 24536495Sspeer * tx_desc_p channel's descriptor ring 24546495Sspeer * num_chunks 24556495Sspeer * 24566495Sspeer * Notes: 24576495Sspeer * 24586495Sspeer * NPI/NXGE function calls: 24596495Sspeer * nxge_setup_dma_common() 24606495Sspeer * 24616495Sspeer * Registers accessed: 24626495Sspeer * none. 24636495Sspeer * 24646495Sspeer * Context: 24656495Sspeer * Any domain 24666495Sspeer */ 24673859Sml29623 static nxge_status_t 24683859Sml29623 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 24693859Sml29623 p_nxge_dma_common_t *dma_buf_p, 24703859Sml29623 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 24713859Sml29623 { 24723859Sml29623 p_nxge_dma_common_t dma_bufp, tmp_bufp; 24733859Sml29623 p_nxge_dma_common_t dmap; 24743859Sml29623 nxge_os_dma_handle_t tx_buf_dma_handle; 24753859Sml29623 p_tx_ring_t tx_ring_p; 24763859Sml29623 p_tx_msg_t tx_msg_ring; 24773859Sml29623 nxge_status_t status = NXGE_OK; 24783859Sml29623 int ddi_status = DDI_SUCCESS; 24793859Sml29623 int i, j, index; 24803859Sml29623 uint32_t size, bsize; 24813859Sml29623 uint32_t nblocks, nmsgs; 24823859Sml29623 24833859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24843859Sml29623 "==> nxge_map_txdma_channel_buf_ring")); 24853859Sml29623 24863859Sml29623 dma_bufp = tmp_bufp = *dma_buf_p; 24873859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24883859Sml29623 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 24893859Sml29623 "chunks bufp $%p", 24903859Sml29623 channel, num_chunks, dma_bufp)); 24913859Sml29623 24923859Sml29623 nmsgs = 0; 24933859Sml29623 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 24943859Sml29623 nmsgs += tmp_bufp->nblocks; 24953859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24963859Sml29623 "==> nxge_map_txdma_channel_buf_ring: channel %d " 24973859Sml29623 "bufp $%p nblocks %d nmsgs %d", 24983859Sml29623 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 24993859Sml29623 } 25003859Sml29623 if (!nmsgs) { 25013859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25023859Sml29623 "<== nxge_map_txdma_channel_buf_ring: channel %d " 25033859Sml29623 "no msg blocks", 25043859Sml29623 channel)); 25053859Sml29623 status = NXGE_ERROR; 25063859Sml29623 goto nxge_map_txdma_channel_buf_ring_exit; 25073859Sml29623 } 25083859Sml29623 25093859Sml29623 tx_ring_p = (p_tx_ring_t) 25103859Sml29623 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 25113859Sml29623 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 25123859Sml29623 (void *)nxgep->interrupt_cookie); 25133952Sml29623 25143952Sml29623 tx_ring_p->nxgep = nxgep; 25153952Sml29623 tx_ring_p->serial = nxge_serialize_create(nmsgs, 25163952Sml29623 nxge_serial_tx, tx_ring_p); 25173859Sml29623 /* 25183859Sml29623 * Allocate transmit message rings and handles for packets 25193859Sml29623 * not to be copied to premapped buffers. 25203859Sml29623 */ 25213859Sml29623 size = nmsgs * sizeof (tx_msg_t); 25223859Sml29623 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 25233859Sml29623 for (i = 0; i < nmsgs; i++) { 25243859Sml29623 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 25253859Sml29623 DDI_DMA_DONTWAIT, 0, 25263859Sml29623 &tx_msg_ring[i].dma_handle); 25273859Sml29623 if (ddi_status != DDI_SUCCESS) { 25283859Sml29623 status |= NXGE_DDI_FAILED; 25293859Sml29623 break; 25303859Sml29623 } 25313859Sml29623 } 25323859Sml29623 if (i < nmsgs) { 25334185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 25344185Sspeer "Allocate handles failed.")); 25353859Sml29623 goto nxge_map_txdma_channel_buf_ring_fail1; 25363859Sml29623 } 25373859Sml29623 25383859Sml29623 tx_ring_p->tdc = channel; 25393859Sml29623 tx_ring_p->tx_msg_ring = tx_msg_ring; 25403859Sml29623 tx_ring_p->tx_ring_size = nmsgs; 25413859Sml29623 tx_ring_p->num_chunks = num_chunks; 25423859Sml29623 if (!nxge_tx_intr_thres) { 25433859Sml29623 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 25443859Sml29623 } 25453859Sml29623 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 25463859Sml29623 tx_ring_p->rd_index = 0; 25473859Sml29623 tx_ring_p->wr_index = 0; 25483859Sml29623 tx_ring_p->ring_head.value = 0; 25493859Sml29623 tx_ring_p->ring_kick_tail.value = 0; 25503859Sml29623 tx_ring_p->descs_pending = 0; 25513859Sml29623 25523859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25533859Sml29623 "==> nxge_map_txdma_channel_buf_ring: channel %d " 25543859Sml29623 "actual tx desc max %d nmsgs %d " 25553859Sml29623 "(config nxge_tx_ring_size %d)", 25563859Sml29623 channel, tx_ring_p->tx_ring_size, nmsgs, 25573859Sml29623 nxge_tx_ring_size)); 25583859Sml29623 25593859Sml29623 /* 25603859Sml29623 * Map in buffers from the buffer pool. 25613859Sml29623 */ 25623859Sml29623 index = 0; 25633859Sml29623 bsize = dma_bufp->block_size; 25643859Sml29623 25653859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 25663859Sml29623 "dma_bufp $%p tx_rng_p $%p " 25673859Sml29623 "tx_msg_rng_p $%p bsize %d", 25683859Sml29623 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 25693859Sml29623 25703859Sml29623 tx_buf_dma_handle = dma_bufp->dma_handle; 25713859Sml29623 for (i = 0; i < num_chunks; i++, dma_bufp++) { 25723859Sml29623 bsize = dma_bufp->block_size; 25733859Sml29623 nblocks = dma_bufp->nblocks; 25743859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25753859Sml29623 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 25763859Sml29623 "size %d dma_bufp $%p", 25773859Sml29623 i, sizeof (nxge_dma_common_t), dma_bufp)); 25783859Sml29623 25793859Sml29623 for (j = 0; j < nblocks; j++) { 25803859Sml29623 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 25813859Sml29623 dmap = &tx_msg_ring[index++].buf_dma; 25823859Sml29623 #ifdef TX_MEM_DEBUG 25833859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25843859Sml29623 "==> nxge_map_txdma_channel_buf_ring: j %d" 25853859Sml29623 "dmap $%p", i, dmap)); 25863859Sml29623 #endif 25873859Sml29623 nxge_setup_dma_common(dmap, dma_bufp, 1, 25883859Sml29623 bsize); 25893859Sml29623 } 25903859Sml29623 } 25913859Sml29623 25923859Sml29623 if (i < num_chunks) { 25934185Sspeer status = NXGE_ERROR; 25943859Sml29623 goto nxge_map_txdma_channel_buf_ring_fail1; 25953859Sml29623 } 25963859Sml29623 25973859Sml29623 *tx_desc_p = tx_ring_p; 25983859Sml29623 25993859Sml29623 goto nxge_map_txdma_channel_buf_ring_exit; 26003859Sml29623 26013859Sml29623 nxge_map_txdma_channel_buf_ring_fail1: 26023952Sml29623 if (tx_ring_p->serial) { 26033952Sml29623 nxge_serialize_destroy(tx_ring_p->serial); 26043952Sml29623 tx_ring_p->serial = NULL; 26053952Sml29623 } 26063952Sml29623 26073859Sml29623 index--; 26083859Sml29623 for (; index >= 0; index--) { 26094185Sspeer if (tx_msg_ring[index].dma_handle != NULL) { 26104185Sspeer ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 26113859Sml29623 } 26123859Sml29623 } 26133859Sml29623 MUTEX_DESTROY(&tx_ring_p->lock); 26144185Sspeer KMEM_FREE(tx_msg_ring, size); 26153859Sml29623 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 26163859Sml29623 26174185Sspeer status = NXGE_ERROR; 26184185Sspeer 26193859Sml29623 nxge_map_txdma_channel_buf_ring_exit: 26203859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26213859Sml29623 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 26223859Sml29623 26233859Sml29623 return (status); 26243859Sml29623 } 26253859Sml29623 26263859Sml29623 /*ARGSUSED*/ 26273859Sml29623 static void 26283859Sml29623 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 26293859Sml29623 { 26303859Sml29623 p_tx_msg_t tx_msg_ring; 26313859Sml29623 p_tx_msg_t tx_msg_p; 26323859Sml29623 int i; 26333859Sml29623 26343859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26353859Sml29623 "==> nxge_unmap_txdma_channel_buf_ring")); 26363859Sml29623 if (tx_ring_p == NULL) { 26373859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 26383859Sml29623 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 26393859Sml29623 return; 26403859Sml29623 } 26413859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26423859Sml29623 "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 26433859Sml29623 tx_ring_p->tdc)); 26443859Sml29623 26453859Sml29623 tx_msg_ring = tx_ring_p->tx_msg_ring; 26466495Sspeer 26476495Sspeer /* 26486495Sspeer * Since the serialization thread, timer thread and 26496495Sspeer * interrupt thread can all call the transmit reclaim, 26506495Sspeer * the unmapping function needs to acquire the lock 26516495Sspeer * to free those buffers which were transmitted 26526495Sspeer * by the hardware already. 26536495Sspeer */ 26546495Sspeer MUTEX_ENTER(&tx_ring_p->lock); 26556495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 26566495Sspeer "==> nxge_unmap_txdma_channel_buf_ring (reclaim): " 26576495Sspeer "channel %d", 26586495Sspeer tx_ring_p->tdc)); 26596495Sspeer (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 26606495Sspeer 26613859Sml29623 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 26623859Sml29623 tx_msg_p = &tx_msg_ring[i]; 26633859Sml29623 if (tx_msg_p->tx_message != NULL) { 26643859Sml29623 freemsg(tx_msg_p->tx_message); 26653859Sml29623 tx_msg_p->tx_message = NULL; 26663859Sml29623 } 26673859Sml29623 } 26683859Sml29623 26693859Sml29623 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 26703859Sml29623 if (tx_msg_ring[i].dma_handle != NULL) { 26713859Sml29623 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 26723859Sml29623 } 26736495Sspeer tx_msg_ring[i].dma_handle = NULL; 26743859Sml29623 } 26753859Sml29623 26766495Sspeer MUTEX_EXIT(&tx_ring_p->lock); 26776495Sspeer 26783952Sml29623 if (tx_ring_p->serial) { 26793952Sml29623 nxge_serialize_destroy(tx_ring_p->serial); 26803952Sml29623 tx_ring_p->serial = NULL; 26813952Sml29623 } 26823952Sml29623 26833859Sml29623 MUTEX_DESTROY(&tx_ring_p->lock); 26843859Sml29623 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 26853859Sml29623 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 26863859Sml29623 26873859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26883859Sml29623 "<== nxge_unmap_txdma_channel_buf_ring")); 26893859Sml29623 } 26903859Sml29623 26913859Sml29623 static nxge_status_t 26926495Sspeer nxge_txdma_hw_start(p_nxge_t nxgep, int channel) 26933859Sml29623 { 26943859Sml29623 p_tx_rings_t tx_rings; 26953859Sml29623 p_tx_ring_t *tx_desc_rings; 26963859Sml29623 p_tx_mbox_areas_t tx_mbox_areas_p; 26973859Sml29623 p_tx_mbox_t *tx_mbox_p; 26983859Sml29623 nxge_status_t status = NXGE_OK; 26993859Sml29623 27003859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 27013859Sml29623 27023859Sml29623 tx_rings = nxgep->tx_rings; 27033859Sml29623 if (tx_rings == NULL) { 27043859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 27053859Sml29623 "<== nxge_txdma_hw_start: NULL ring pointer")); 27063859Sml29623 return (NXGE_ERROR); 27073859Sml29623 } 27083859Sml29623 tx_desc_rings = tx_rings->rings; 27093859Sml29623 if (tx_desc_rings == NULL) { 27103859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 27113859Sml29623 "<== nxge_txdma_hw_start: NULL ring pointers")); 27123859Sml29623 return (NXGE_ERROR); 27133859Sml29623 } 27143859Sml29623 27156495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 27166495Sspeer "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 27173859Sml29623 27183859Sml29623 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 27193859Sml29623 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 27203859Sml29623 27216495Sspeer status = nxge_txdma_start_channel(nxgep, channel, 27226495Sspeer (p_tx_ring_t)tx_desc_rings[channel], 27236495Sspeer (p_tx_mbox_t)tx_mbox_p[channel]); 27246495Sspeer if (status != NXGE_OK) { 27256495Sspeer goto nxge_txdma_hw_start_fail1; 27263859Sml29623 } 27273859Sml29623 27283859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 27293859Sml29623 "tx_rings $%p rings $%p", 27303859Sml29623 nxgep->tx_rings, nxgep->tx_rings->rings)); 27313859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 27323859Sml29623 "tx_rings $%p tx_desc_rings $%p", 27333859Sml29623 nxgep->tx_rings, tx_desc_rings)); 27343859Sml29623 27353859Sml29623 goto nxge_txdma_hw_start_exit; 27363859Sml29623 27373859Sml29623 nxge_txdma_hw_start_fail1: 27383859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 27393859Sml29623 "==> nxge_txdma_hw_start: disable " 27406495Sspeer "(status 0x%x channel %d)", status, channel)); 27413859Sml29623 27423859Sml29623 nxge_txdma_hw_start_exit: 27433859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 27443859Sml29623 "==> nxge_txdma_hw_start: (status 0x%x)", status)); 27453859Sml29623 27463859Sml29623 return (status); 27473859Sml29623 } 27483859Sml29623 27496495Sspeer /* 27506495Sspeer * nxge_txdma_start_channel 27516495Sspeer * 27526495Sspeer * Start a TDC. 27536495Sspeer * 27546495Sspeer * Arguments: 27556495Sspeer * nxgep 27566495Sspeer * channel The channel to start. 27576495Sspeer * tx_ring_p channel's transmit descriptor ring. 27586495Sspeer * tx_mbox_p channel' smailbox. 27596495Sspeer * 27606495Sspeer * Notes: 27616495Sspeer * 27626495Sspeer * NPI/NXGE function calls: 27636495Sspeer * nxge_reset_txdma_channel() 27646495Sspeer * nxge_init_txdma_channel_event_mask() 27656495Sspeer * nxge_enable_txdma_channel() 27666495Sspeer * 27676495Sspeer * Registers accessed: 27686495Sspeer * none directly (see functions above). 27696495Sspeer * 27706495Sspeer * Context: 27716495Sspeer * Any domain 27726495Sspeer */ 27733859Sml29623 static nxge_status_t 27743859Sml29623 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 27753859Sml29623 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 27763859Sml29623 27773859Sml29623 { 27783859Sml29623 nxge_status_t status = NXGE_OK; 27793859Sml29623 27803859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 27813859Sml29623 "==> nxge_txdma_start_channel (channel %d)", channel)); 27823859Sml29623 /* 27833859Sml29623 * TXDMA/TXC must be in stopped state. 27843859Sml29623 */ 27853859Sml29623 (void) nxge_txdma_stop_inj_err(nxgep, channel); 27863859Sml29623 27873859Sml29623 /* 27883859Sml29623 * Reset TXDMA channel 27893859Sml29623 */ 27903859Sml29623 tx_ring_p->tx_cs.value = 0; 27913859Sml29623 tx_ring_p->tx_cs.bits.ldw.rst = 1; 27923859Sml29623 status = nxge_reset_txdma_channel(nxgep, channel, 27933859Sml29623 tx_ring_p->tx_cs.value); 27943859Sml29623 if (status != NXGE_OK) { 27953859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27963859Sml29623 "==> nxge_txdma_start_channel (channel %d)" 27973859Sml29623 " reset channel failed 0x%x", channel, status)); 27983859Sml29623 goto nxge_txdma_start_channel_exit; 27993859Sml29623 } 28003859Sml29623 28013859Sml29623 /* 28023859Sml29623 * Initialize the TXDMA channel specific FZC control 28033859Sml29623 * configurations. These FZC registers are pertaining 28043859Sml29623 * to each TX channel (i.e. logical pages). 28053859Sml29623 */ 28066495Sspeer if (!isLDOMguest(nxgep)) { 28076495Sspeer status = nxge_init_fzc_txdma_channel(nxgep, channel, 28086495Sspeer tx_ring_p, tx_mbox_p); 28096495Sspeer if (status != NXGE_OK) { 28106495Sspeer goto nxge_txdma_start_channel_exit; 28116495Sspeer } 28123859Sml29623 } 28133859Sml29623 28143859Sml29623 /* 28153859Sml29623 * Initialize the event masks. 28163859Sml29623 */ 28173859Sml29623 tx_ring_p->tx_evmask.value = 0; 28183859Sml29623 status = nxge_init_txdma_channel_event_mask(nxgep, 28196495Sspeer channel, &tx_ring_p->tx_evmask); 28203859Sml29623 if (status != NXGE_OK) { 28213859Sml29623 goto nxge_txdma_start_channel_exit; 28223859Sml29623 } 28233859Sml29623 28243859Sml29623 /* 28253859Sml29623 * Load TXDMA descriptors, buffers, mailbox, 28263859Sml29623 * initialise the DMA channels and 28273859Sml29623 * enable each DMA channel. 28283859Sml29623 */ 28293859Sml29623 status = nxge_enable_txdma_channel(nxgep, channel, 28303859Sml29623 tx_ring_p, tx_mbox_p); 28313859Sml29623 if (status != NXGE_OK) { 28323859Sml29623 goto nxge_txdma_start_channel_exit; 28333859Sml29623 } 28343859Sml29623 28353859Sml29623 nxge_txdma_start_channel_exit: 28363859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 28373859Sml29623 28383859Sml29623 return (status); 28393859Sml29623 } 28403859Sml29623 28416495Sspeer /* 28426495Sspeer * nxge_txdma_stop_channel 28436495Sspeer * 28446495Sspeer * Stop a TDC. 28456495Sspeer * 28466495Sspeer * Arguments: 28476495Sspeer * nxgep 28486495Sspeer * channel The channel to stop. 28496495Sspeer * tx_ring_p channel's transmit descriptor ring. 28506495Sspeer * tx_mbox_p channel' smailbox. 28516495Sspeer * 28526495Sspeer * Notes: 28536495Sspeer * 28546495Sspeer * NPI/NXGE function calls: 28556495Sspeer * nxge_txdma_stop_inj_err() 28566495Sspeer * nxge_reset_txdma_channel() 28576495Sspeer * nxge_init_txdma_channel_event_mask() 28586495Sspeer * nxge_init_txdma_channel_cntl_stat() 28596495Sspeer * nxge_disable_txdma_channel() 28606495Sspeer * 28616495Sspeer * Registers accessed: 28626495Sspeer * none directly (see functions above). 28636495Sspeer * 28646495Sspeer * Context: 28656495Sspeer * Any domain 28666495Sspeer */ 28673859Sml29623 /*ARGSUSED*/ 28683859Sml29623 static nxge_status_t 28696495Sspeer nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 28703859Sml29623 { 28716495Sspeer p_tx_ring_t tx_ring_p; 28726495Sspeer int status = NXGE_OK; 28733859Sml29623 28743859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 28753859Sml29623 "==> nxge_txdma_stop_channel: channel %d", channel)); 28763859Sml29623 28773859Sml29623 /* 28783859Sml29623 * Stop (disable) TXDMA and TXC (if stop bit is set 28793859Sml29623 * and STOP_N_GO bit not set, the TXDMA reset state will 28803859Sml29623 * not be set if reset TXDMA. 28813859Sml29623 */ 28823859Sml29623 (void) nxge_txdma_stop_inj_err(nxgep, channel); 28833859Sml29623 28846495Sspeer tx_ring_p = nxgep->tx_rings->rings[channel]; 28856495Sspeer 28863859Sml29623 /* 28873859Sml29623 * Reset TXDMA channel 28883859Sml29623 */ 28893859Sml29623 tx_ring_p->tx_cs.value = 0; 28903859Sml29623 tx_ring_p->tx_cs.bits.ldw.rst = 1; 28913859Sml29623 status = nxge_reset_txdma_channel(nxgep, channel, 28923859Sml29623 tx_ring_p->tx_cs.value); 28933859Sml29623 if (status != NXGE_OK) { 28943859Sml29623 goto nxge_txdma_stop_channel_exit; 28953859Sml29623 } 28963859Sml29623 28973859Sml29623 #ifdef HARDWARE_REQUIRED 28983859Sml29623 /* Set up the interrupt event masks. */ 28993859Sml29623 tx_ring_p->tx_evmask.value = 0; 29003859Sml29623 status = nxge_init_txdma_channel_event_mask(nxgep, 29013859Sml29623 channel, &tx_ring_p->tx_evmask); 29023859Sml29623 if (status != NXGE_OK) { 29033859Sml29623 goto nxge_txdma_stop_channel_exit; 29043859Sml29623 } 29053859Sml29623 29063859Sml29623 /* Initialize the DMA control and status register */ 29073859Sml29623 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 29083859Sml29623 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 29093859Sml29623 tx_ring_p->tx_cs.value); 29103859Sml29623 if (status != NXGE_OK) { 29113859Sml29623 goto nxge_txdma_stop_channel_exit; 29123859Sml29623 } 29133859Sml29623 29146495Sspeer tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 29156495Sspeer 29163859Sml29623 /* Disable channel */ 29173859Sml29623 status = nxge_disable_txdma_channel(nxgep, channel, 29186495Sspeer tx_ring_p, tx_mbox_p); 29193859Sml29623 if (status != NXGE_OK) { 29203859Sml29623 goto nxge_txdma_start_channel_exit; 29213859Sml29623 } 29223859Sml29623 29233859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 29243859Sml29623 "==> nxge_txdma_stop_channel: event done")); 29253859Sml29623 29263859Sml29623 #endif 29273859Sml29623 29283859Sml29623 nxge_txdma_stop_channel_exit: 29293859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 29303859Sml29623 return (status); 29313859Sml29623 } 29323859Sml29623 29336495Sspeer /* 29346495Sspeer * nxge_txdma_get_ring 29356495Sspeer * 29366495Sspeer * Get the ring for a TDC. 29376495Sspeer * 29386495Sspeer * Arguments: 29396495Sspeer * nxgep 29406495Sspeer * channel 29416495Sspeer * 29426495Sspeer * Notes: 29436495Sspeer * 29446495Sspeer * NPI/NXGE function calls: 29456495Sspeer * 29466495Sspeer * Registers accessed: 29476495Sspeer * 29486495Sspeer * Context: 29496495Sspeer * Any domain 29506495Sspeer */ 29513859Sml29623 static p_tx_ring_t 29523859Sml29623 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 29533859Sml29623 { 29546495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 29556495Sspeer int tdc; 29563859Sml29623 29573859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 29583859Sml29623 29596495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 29603859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 29616495Sspeer "<== nxge_txdma_get_ring: NULL ring pointer(s)")); 29626495Sspeer goto return_null; 29633859Sml29623 } 29643859Sml29623 29656495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 29666495Sspeer if ((1 << tdc) & set->owned.map) { 29676495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 29686495Sspeer if (ring) { 29696495Sspeer if (channel == ring->tdc) { 29706495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 29716495Sspeer "<== nxge_txdma_get_ring: " 29726495Sspeer "tdc %d ring $%p", tdc, ring)); 29736495Sspeer return (ring); 29746495Sspeer } 29756495Sspeer } 29763859Sml29623 } 29773859Sml29623 } 29783859Sml29623 29796495Sspeer return_null: 29806495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: " 29816495Sspeer "ring not found")); 29826495Sspeer 29833859Sml29623 return (NULL); 29843859Sml29623 } 29853859Sml29623 29866495Sspeer /* 29876495Sspeer * nxge_txdma_get_mbox 29886495Sspeer * 29896495Sspeer * Get the mailbox for a TDC. 29906495Sspeer * 29916495Sspeer * Arguments: 29926495Sspeer * nxgep 29936495Sspeer * channel 29946495Sspeer * 29956495Sspeer * Notes: 29966495Sspeer * 29976495Sspeer * NPI/NXGE function calls: 29986495Sspeer * 29996495Sspeer * Registers accessed: 30006495Sspeer * 30016495Sspeer * Context: 30026495Sspeer * Any domain 30036495Sspeer */ 30043859Sml29623 static p_tx_mbox_t 30053859Sml29623 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 30063859Sml29623 { 30076495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 30086495Sspeer int tdc; 30093859Sml29623 30103859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 30113859Sml29623 30126495Sspeer if (nxgep->tx_mbox_areas_p == 0 || 30136495Sspeer nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) { 30146495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 30156495Sspeer "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)")); 30166495Sspeer goto return_null; 30173859Sml29623 } 30183859Sml29623 30196495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 30206495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 30216495Sspeer "<== nxge_txdma_get_mbox: NULL ring pointer(s)")); 30226495Sspeer goto return_null; 30233859Sml29623 } 30243859Sml29623 30256495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 30266495Sspeer if ((1 << tdc) & set->owned.map) { 30276495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 30286495Sspeer if (ring) { 30296495Sspeer if (channel == ring->tdc) { 30306495Sspeer tx_mbox_t *mailbox = nxgep-> 30316495Sspeer tx_mbox_areas_p-> 30326495Sspeer txmbox_areas_p[tdc]; 30336495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 30346495Sspeer "<== nxge_txdma_get_mbox: tdc %d " 30356495Sspeer "ring $%p", tdc, mailbox)); 30366495Sspeer return (mailbox); 30376495Sspeer } 30386495Sspeer } 30393859Sml29623 } 30403859Sml29623 } 30413859Sml29623 30426495Sspeer return_null: 30436495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: " 30446495Sspeer "mailbox not found")); 30456495Sspeer 30463859Sml29623 return (NULL); 30473859Sml29623 } 30483859Sml29623 30496495Sspeer /* 30506495Sspeer * nxge_tx_err_evnts 30516495Sspeer * 30526495Sspeer * Recover a TDC. 30536495Sspeer * 30546495Sspeer * Arguments: 30556495Sspeer * nxgep 30566495Sspeer * index The index to the TDC ring. 30576495Sspeer * ldvp Used to get the channel number ONLY. 30586495Sspeer * cs A copy of the bits from TX_CS. 30596495Sspeer * 30606495Sspeer * Notes: 30616495Sspeer * Calling tree: 30626495Sspeer * nxge_tx_intr() 30636495Sspeer * 30646495Sspeer * NPI/NXGE function calls: 30656495Sspeer * npi_txdma_ring_error_get() 30666495Sspeer * npi_txdma_inj_par_error_get() 30676495Sspeer * nxge_txdma_fatal_err_recover() 30686495Sspeer * 30696495Sspeer * Registers accessed: 30706495Sspeer * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High 30716495Sspeer * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low 30726495Sspeer * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 30736495Sspeer * 30746495Sspeer * Context: 30756495Sspeer * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR. 30766495Sspeer */ 30773859Sml29623 /*ARGSUSED*/ 30783859Sml29623 static nxge_status_t 30793859Sml29623 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 30803859Sml29623 { 30813859Sml29623 npi_handle_t handle; 30823859Sml29623 npi_status_t rs; 30833859Sml29623 uint8_t channel; 30843859Sml29623 p_tx_ring_t *tx_rings; 30853859Sml29623 p_tx_ring_t tx_ring_p; 30863859Sml29623 p_nxge_tx_ring_stats_t tdc_stats; 30873859Sml29623 boolean_t txchan_fatal = B_FALSE; 30883859Sml29623 nxge_status_t status = NXGE_OK; 30893859Sml29623 tdmc_inj_par_err_t par_err; 30903859Sml29623 uint32_t value; 30913859Sml29623 30926495Sspeer NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts")); 30933859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 30943859Sml29623 channel = ldvp->channel; 30953859Sml29623 30963859Sml29623 tx_rings = nxgep->tx_rings->rings; 30973859Sml29623 tx_ring_p = tx_rings[index]; 30983859Sml29623 tdc_stats = tx_ring_p->tdc_stats; 30993859Sml29623 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 31003859Sml29623 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 31013859Sml29623 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 31023859Sml29623 if ((rs = npi_txdma_ring_error_get(handle, channel, 31033859Sml29623 &tdc_stats->errlog)) != NPI_SUCCESS) 31043859Sml29623 return (NXGE_ERROR | rs); 31053859Sml29623 } 31063859Sml29623 31073859Sml29623 if (cs.bits.ldw.mbox_err) { 31083859Sml29623 tdc_stats->mbox_err++; 31093859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31103859Sml29623 NXGE_FM_EREPORT_TDMC_MBOX_ERR); 31113859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31123859Sml29623 "==> nxge_tx_err_evnts(channel %d): " 31133859Sml29623 "fatal error: mailbox", channel)); 31143859Sml29623 txchan_fatal = B_TRUE; 31153859Sml29623 } 31163859Sml29623 if (cs.bits.ldw.pkt_size_err) { 31173859Sml29623 tdc_stats->pkt_size_err++; 31183859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31193859Sml29623 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 31203859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31213859Sml29623 "==> nxge_tx_err_evnts(channel %d): " 31223859Sml29623 "fatal error: pkt_size_err", channel)); 31233859Sml29623 txchan_fatal = B_TRUE; 31243859Sml29623 } 31253859Sml29623 if (cs.bits.ldw.tx_ring_oflow) { 31263859Sml29623 tdc_stats->tx_ring_oflow++; 31273859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31283859Sml29623 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 31293859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31303859Sml29623 "==> nxge_tx_err_evnts(channel %d): " 31313859Sml29623 "fatal error: tx_ring_oflow", channel)); 31323859Sml29623 txchan_fatal = B_TRUE; 31333859Sml29623 } 31343859Sml29623 if (cs.bits.ldw.pref_buf_par_err) { 31353859Sml29623 tdc_stats->pre_buf_par_err++; 31363859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31373859Sml29623 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 31383859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31393859Sml29623 "==> nxge_tx_err_evnts(channel %d): " 31403859Sml29623 "fatal error: pre_buf_par_err", channel)); 31413859Sml29623 /* Clear error injection source for parity error */ 31423859Sml29623 (void) npi_txdma_inj_par_error_get(handle, &value); 31433859Sml29623 par_err.value = value; 31443859Sml29623 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 31453859Sml29623 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 31463859Sml29623 txchan_fatal = B_TRUE; 31473859Sml29623 } 31483859Sml29623 if (cs.bits.ldw.nack_pref) { 31493859Sml29623 tdc_stats->nack_pref++; 31503859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31513859Sml29623 NXGE_FM_EREPORT_TDMC_NACK_PREF); 31523859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31533859Sml29623 "==> nxge_tx_err_evnts(channel %d): " 31543859Sml29623 "fatal error: nack_pref", channel)); 31553859Sml29623 txchan_fatal = B_TRUE; 31563859Sml29623 } 31573859Sml29623 if (cs.bits.ldw.nack_pkt_rd) { 31583859Sml29623 tdc_stats->nack_pkt_rd++; 31593859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31603859Sml29623 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 31613859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31623859Sml29623 "==> nxge_tx_err_evnts(channel %d): " 31633859Sml29623 "fatal error: nack_pkt_rd", channel)); 31643859Sml29623 txchan_fatal = B_TRUE; 31653859Sml29623 } 31663859Sml29623 if (cs.bits.ldw.conf_part_err) { 31673859Sml29623 tdc_stats->conf_part_err++; 31683859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31693859Sml29623 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 31703859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31713859Sml29623 "==> nxge_tx_err_evnts(channel %d): " 31723859Sml29623 "fatal error: config_partition_err", channel)); 31733859Sml29623 txchan_fatal = B_TRUE; 31743859Sml29623 } 31753859Sml29623 if (cs.bits.ldw.pkt_prt_err) { 31763859Sml29623 tdc_stats->pkt_part_err++; 31773859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31783859Sml29623 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 31793859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31803859Sml29623 "==> nxge_tx_err_evnts(channel %d): " 31813859Sml29623 "fatal error: pkt_prt_err", channel)); 31823859Sml29623 txchan_fatal = B_TRUE; 31833859Sml29623 } 31843859Sml29623 31853859Sml29623 /* Clear error injection source in case this is an injected error */ 31863859Sml29623 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 31873859Sml29623 31883859Sml29623 if (txchan_fatal) { 31893859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31903859Sml29623 " nxge_tx_err_evnts: " 31913859Sml29623 " fatal error on channel %d cs 0x%llx\n", 31923859Sml29623 channel, cs.value)); 31933859Sml29623 status = nxge_txdma_fatal_err_recover(nxgep, channel, 31943859Sml29623 tx_ring_p); 31953859Sml29623 if (status == NXGE_OK) { 31963859Sml29623 FM_SERVICE_RESTORED(nxgep); 31973859Sml29623 } 31983859Sml29623 } 31993859Sml29623 32006495Sspeer NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts")); 32013859Sml29623 32023859Sml29623 return (status); 32033859Sml29623 } 32043859Sml29623 32053859Sml29623 static nxge_status_t 32066495Sspeer nxge_txdma_fatal_err_recover( 32076495Sspeer p_nxge_t nxgep, 32086495Sspeer uint16_t channel, 32096495Sspeer p_tx_ring_t tx_ring_p) 32103859Sml29623 { 32113859Sml29623 npi_handle_t handle; 32123859Sml29623 npi_status_t rs = NPI_SUCCESS; 32133859Sml29623 p_tx_mbox_t tx_mbox_p; 32143859Sml29623 nxge_status_t status = NXGE_OK; 32153859Sml29623 32163859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 32173859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32183859Sml29623 "Recovering from TxDMAChannel#%d error...", channel)); 32193859Sml29623 32203859Sml29623 /* 32213859Sml29623 * Stop the dma channel waits for the stop done. 32223859Sml29623 * If the stop done bit is not set, then create 32233859Sml29623 * an error. 32243859Sml29623 */ 32253859Sml29623 32263859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 32273859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 32283859Sml29623 MUTEX_ENTER(&tx_ring_p->lock); 32293859Sml29623 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 32303859Sml29623 if (rs != NPI_SUCCESS) { 32313859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32323859Sml29623 "==> nxge_txdma_fatal_err_recover (channel %d): " 32333859Sml29623 "stop failed ", channel)); 32343859Sml29623 goto fail; 32353859Sml29623 } 32363859Sml29623 32373859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 32383859Sml29623 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 32393859Sml29623 32403859Sml29623 /* 32413859Sml29623 * Reset TXDMA channel 32423859Sml29623 */ 32433859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 32443859Sml29623 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 32453859Sml29623 NPI_SUCCESS) { 32463859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32473859Sml29623 "==> nxge_txdma_fatal_err_recover (channel %d)" 32483859Sml29623 " reset channel failed 0x%x", channel, rs)); 32493859Sml29623 goto fail; 32503859Sml29623 } 32513859Sml29623 32523859Sml29623 /* 32533859Sml29623 * Reset the tail (kick) register to 0. 32543859Sml29623 * (Hardware will not reset it. Tx overflow fatal 32553859Sml29623 * error if tail is not set to 0 after reset! 32563859Sml29623 */ 32573859Sml29623 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 32583859Sml29623 32593859Sml29623 /* Restart TXDMA channel */ 32603859Sml29623 32616495Sspeer if (!isLDOMguest(nxgep)) { 32626495Sspeer tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 32636495Sspeer 32646495Sspeer // XXX This is a problem in HIO! 32656495Sspeer /* 32666495Sspeer * Initialize the TXDMA channel specific FZC control 32676495Sspeer * configurations. These FZC registers are pertaining 32686495Sspeer * to each TX channel (i.e. logical pages). 32696495Sspeer */ 32706495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 32716495Sspeer status = nxge_init_fzc_txdma_channel(nxgep, channel, 32726495Sspeer tx_ring_p, tx_mbox_p); 32736495Sspeer if (status != NXGE_OK) 32746495Sspeer goto fail; 32756495Sspeer } 32763859Sml29623 32773859Sml29623 /* 32783859Sml29623 * Initialize the event masks. 32793859Sml29623 */ 32803859Sml29623 tx_ring_p->tx_evmask.value = 0; 32813859Sml29623 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 32823859Sml29623 &tx_ring_p->tx_evmask); 32833859Sml29623 if (status != NXGE_OK) 32843859Sml29623 goto fail; 32853859Sml29623 32863859Sml29623 tx_ring_p->wr_index_wrap = B_FALSE; 32873859Sml29623 tx_ring_p->wr_index = 0; 32883859Sml29623 tx_ring_p->rd_index = 0; 32893859Sml29623 32903859Sml29623 /* 32913859Sml29623 * Load TXDMA descriptors, buffers, mailbox, 32923859Sml29623 * initialise the DMA channels and 32933859Sml29623 * enable each DMA channel. 32943859Sml29623 */ 32953859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 32963859Sml29623 status = nxge_enable_txdma_channel(nxgep, channel, 32973859Sml29623 tx_ring_p, tx_mbox_p); 32983859Sml29623 MUTEX_EXIT(&tx_ring_p->lock); 32993859Sml29623 if (status != NXGE_OK) 33003859Sml29623 goto fail; 33013859Sml29623 33023859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33033859Sml29623 "Recovery Successful, TxDMAChannel#%d Restored", 33043859Sml29623 channel)); 33053859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 33063859Sml29623 33073859Sml29623 return (NXGE_OK); 33083859Sml29623 33093859Sml29623 fail: 33103859Sml29623 MUTEX_EXIT(&tx_ring_p->lock); 33113859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 33123859Sml29623 "nxge_txdma_fatal_err_recover (channel %d): " 33133859Sml29623 "failed to recover this txdma channel", channel)); 33143859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 33153859Sml29623 33163859Sml29623 return (status); 33173859Sml29623 } 33183859Sml29623 33196495Sspeer /* 33206495Sspeer * nxge_tx_port_fatal_err_recover 33216495Sspeer * 33226495Sspeer * Attempt to recover from a fatal port error. 33236495Sspeer * 33246495Sspeer * Arguments: 33256495Sspeer * nxgep 33266495Sspeer * 33276495Sspeer * Notes: 33286495Sspeer * How would a guest do this? 33296495Sspeer * 33306495Sspeer * NPI/NXGE function calls: 33316495Sspeer * 33326495Sspeer * Registers accessed: 33336495Sspeer * 33346495Sspeer * Context: 33356495Sspeer * Service domain 33366495Sspeer */ 33373859Sml29623 nxge_status_t 33383859Sml29623 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 33393859Sml29623 { 33406495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 33416495Sspeer nxge_channel_t tdc; 33426495Sspeer 33436495Sspeer tx_ring_t *ring; 33446495Sspeer tx_mbox_t *mailbox; 33456495Sspeer 33463859Sml29623 npi_handle_t handle; 33476495Sspeer nxge_status_t status; 33486495Sspeer npi_status_t rs; 33493859Sml29623 33503859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 33513859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33526495Sspeer "Recovering from TxPort error...")); 33536495Sspeer 33546495Sspeer if (isLDOMguest(nxgep)) { 33556495Sspeer return (NXGE_OK); 33566495Sspeer } 33576495Sspeer 33586495Sspeer if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 33596495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 33606495Sspeer "<== nxge_tx_port_fatal_err_recover: not initialized")); 33616495Sspeer return (NXGE_ERROR); 33626495Sspeer } 33636495Sspeer 33646495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 33656495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 33666495Sspeer "<== nxge_tx_port_fatal_err_recover: " 33676495Sspeer "NULL ring pointer(s)")); 33686495Sspeer return (NXGE_ERROR); 33696495Sspeer } 33706495Sspeer 33716495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 33726495Sspeer if ((1 << tdc) & set->owned.map) { 33736495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 33746495Sspeer if (ring) 33756495Sspeer MUTEX_ENTER(&ring->lock); 33766495Sspeer } 33776495Sspeer } 33783859Sml29623 33793859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 33806495Sspeer 33816495Sspeer /* 33826495Sspeer * Stop all the TDCs owned by us. 33836495Sspeer * (The shared TDCs will have been stopped by their owners.) 33846495Sspeer */ 33856495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 33866495Sspeer if ((1 << tdc) & set->owned.map) { 33876495Sspeer ring = nxgep->tx_rings->rings[tdc]; 33886495Sspeer if (ring) { 33896495Sspeer rs = npi_txdma_channel_control 33906495Sspeer (handle, TXDMA_STOP, tdc); 33916495Sspeer if (rs != NPI_SUCCESS) { 33926495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33936495Sspeer "nxge_tx_port_fatal_err_recover " 33946495Sspeer "(channel %d): stop failed ", tdc)); 33956495Sspeer goto fail; 33966495Sspeer } 33976495Sspeer } 33983859Sml29623 } 33993859Sml29623 } 34003859Sml29623 34016495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs...")); 34026495Sspeer 34036495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34046495Sspeer if ((1 << tdc) & set->owned.map) { 34056495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 34066495Sspeer if (ring) 34076495Sspeer (void) nxge_txdma_reclaim(nxgep, ring, 0); 34083859Sml29623 } 34093859Sml29623 } 34103859Sml29623 34113859Sml29623 /* 34126495Sspeer * Reset all the TDCs. 34133859Sml29623 */ 34146495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs...")); 34156495Sspeer 34166495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34176495Sspeer if ((1 << tdc) & set->owned.map) { 34186495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 34196495Sspeer if (ring) { 34206495Sspeer if ((rs = npi_txdma_channel_control 34216495Sspeer (handle, TXDMA_RESET, tdc)) 34226495Sspeer != NPI_SUCCESS) { 34236495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34246495Sspeer "nxge_tx_port_fatal_err_recover " 34256495Sspeer "(channel %d) reset channel " 34266495Sspeer "failed 0x%x", tdc, rs)); 34276495Sspeer goto fail; 34286495Sspeer } 34296495Sspeer } 34306495Sspeer /* 34316495Sspeer * Reset the tail (kick) register to 0. 34326495Sspeer * (Hardware will not reset it. Tx overflow fatal 34336495Sspeer * error if tail is not set to 0 after reset! 34346495Sspeer */ 34356495Sspeer TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0); 34363859Sml29623 } 34376495Sspeer } 34386495Sspeer 34396495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs...")); 34406495Sspeer 34416495Sspeer /* Restart all the TDCs */ 34426495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34436495Sspeer if ((1 << tdc) & set->owned.map) { 34446495Sspeer ring = nxgep->tx_rings->rings[tdc]; 34456495Sspeer if (ring) { 34466495Sspeer mailbox = nxge_txdma_get_mbox(nxgep, tdc); 34476495Sspeer status = nxge_init_fzc_txdma_channel(nxgep, tdc, 34486495Sspeer ring, mailbox); 34496495Sspeer ring->tx_evmask.value = 0; 34506495Sspeer /* 34516495Sspeer * Initialize the event masks. 34526495Sspeer */ 34536495Sspeer status = nxge_init_txdma_channel_event_mask 34546495Sspeer (nxgep, tdc, &ring->tx_evmask); 34556495Sspeer 34566495Sspeer ring->wr_index_wrap = B_FALSE; 34576495Sspeer ring->wr_index = 0; 34586495Sspeer ring->rd_index = 0; 34596495Sspeer 34606495Sspeer if (status != NXGE_OK) 34616495Sspeer goto fail; 34626495Sspeer if (status != NXGE_OK) 34636495Sspeer goto fail; 34646495Sspeer } 34653859Sml29623 } 34666495Sspeer } 34676495Sspeer 34686495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs...")); 34696495Sspeer 34706495Sspeer /* Re-enable all the TDCs */ 34716495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34726495Sspeer if ((1 << tdc) & set->owned.map) { 34736495Sspeer ring = nxgep->tx_rings->rings[tdc]; 34746495Sspeer if (ring) { 34756495Sspeer mailbox = nxge_txdma_get_mbox(nxgep, tdc); 34766495Sspeer status = nxge_enable_txdma_channel(nxgep, tdc, 34776495Sspeer ring, mailbox); 34786495Sspeer if (status != NXGE_OK) 34796495Sspeer goto fail; 34806495Sspeer } 34816495Sspeer } 34823859Sml29623 } 34833859Sml29623 34843859Sml29623 /* 34856495Sspeer * Unlock all the TDCs. 34863859Sml29623 */ 34876495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34886495Sspeer if ((1 << tdc) & set->owned.map) { 34896495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 34906495Sspeer if (ring) 34916495Sspeer MUTEX_EXIT(&ring->lock); 34923859Sml29623 } 34933859Sml29623 } 34943859Sml29623 34956495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded")); 34963859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 34973859Sml29623 34983859Sml29623 return (NXGE_OK); 34993859Sml29623 35003859Sml29623 fail: 35016495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 35026495Sspeer if ((1 << tdc) & set->owned.map) { 35036495Sspeer ring = nxgep->tx_rings->rings[tdc]; 35046495Sspeer if (ring) 35056495Sspeer MUTEX_EXIT(&ring->lock); 35063859Sml29623 } 35073859Sml29623 } 35083859Sml29623 35096495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed")); 35106495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 35113859Sml29623 35123859Sml29623 return (status); 35133859Sml29623 } 35143859Sml29623 35156495Sspeer /* 35166495Sspeer * nxge_txdma_inject_err 35176495Sspeer * 35186495Sspeer * Inject an error into a TDC. 35196495Sspeer * 35206495Sspeer * Arguments: 35216495Sspeer * nxgep 35226495Sspeer * err_id The error to inject. 35236495Sspeer * chan The channel to inject into. 35246495Sspeer * 35256495Sspeer * Notes: 35266495Sspeer * This is called from nxge_main.c:nxge_err_inject() 35276495Sspeer * Has this ioctl ever been used? 35286495Sspeer * 35296495Sspeer * NPI/NXGE function calls: 35306495Sspeer * npi_txdma_inj_par_error_get() 35316495Sspeer * npi_txdma_inj_par_error_set() 35326495Sspeer * 35336495Sspeer * Registers accessed: 35346495Sspeer * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 35356495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 35366495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 35376495Sspeer * 35386495Sspeer * Context: 35396495Sspeer * Service domain 35406495Sspeer */ 35413859Sml29623 void 35423859Sml29623 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 35433859Sml29623 { 35443859Sml29623 tdmc_intr_dbg_t tdi; 35453859Sml29623 tdmc_inj_par_err_t par_err; 35463859Sml29623 uint32_t value; 35473859Sml29623 npi_handle_t handle; 35483859Sml29623 35493859Sml29623 switch (err_id) { 35503859Sml29623 35513859Sml29623 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 35523859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 35533859Sml29623 /* Clear error injection source for parity error */ 35543859Sml29623 (void) npi_txdma_inj_par_error_get(handle, &value); 35553859Sml29623 par_err.value = value; 35563859Sml29623 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 35573859Sml29623 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 35583859Sml29623 35593859Sml29623 par_err.bits.ldw.inject_parity_error = (1 << chan); 35603859Sml29623 (void) npi_txdma_inj_par_error_get(handle, &value); 35613859Sml29623 par_err.value = value; 35623859Sml29623 par_err.bits.ldw.inject_parity_error |= (1 << chan); 35633859Sml29623 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 35643859Sml29623 (unsigned long long)par_err.value); 35653859Sml29623 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 35663859Sml29623 break; 35673859Sml29623 35683859Sml29623 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 35693859Sml29623 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 35703859Sml29623 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 35713859Sml29623 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 35723859Sml29623 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 35733859Sml29623 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 35743859Sml29623 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 35753859Sml29623 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 35763859Sml29623 chan, &tdi.value); 35773859Sml29623 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 35783859Sml29623 tdi.bits.ldw.pref_buf_par_err = 1; 35793859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 35803859Sml29623 tdi.bits.ldw.mbox_err = 1; 35813859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 35823859Sml29623 tdi.bits.ldw.nack_pref = 1; 35833859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 35843859Sml29623 tdi.bits.ldw.nack_pkt_rd = 1; 35853859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 35863859Sml29623 tdi.bits.ldw.pkt_size_err = 1; 35873859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 35883859Sml29623 tdi.bits.ldw.tx_ring_oflow = 1; 35893859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 35903859Sml29623 tdi.bits.ldw.conf_part_err = 1; 35913859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 35923859Sml29623 tdi.bits.ldw.pkt_part_err = 1; 35935125Sjoycey #if defined(__i386) 35945125Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n", 35955125Sjoycey tdi.value); 35965125Sjoycey #else 35973859Sml29623 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 35983859Sml29623 tdi.value); 35995125Sjoycey #endif 36003859Sml29623 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 36013859Sml29623 chan, tdi.value); 36023859Sml29623 36033859Sml29623 break; 36043859Sml29623 } 36053859Sml29623 } 3606