13859Sml29623 /* 23859Sml29623 * CDDL HEADER START 33859Sml29623 * 43859Sml29623 * The contents of this file are subject to the terms of the 53859Sml29623 * Common Development and Distribution License (the "License"). 63859Sml29623 * You may not use this file except in compliance with the License. 73859Sml29623 * 83859Sml29623 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 93859Sml29623 * or http://www.opensolaris.org/os/licensing. 103859Sml29623 * See the License for the specific language governing permissions 113859Sml29623 * and limitations under the License. 123859Sml29623 * 133859Sml29623 * When distributing Covered Code, include this CDDL HEADER in each 143859Sml29623 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 153859Sml29623 * If applicable, add the following below this CDDL HEADER, with the 163859Sml29623 * fields enclosed by brackets "[]" replaced with your own identifying 173859Sml29623 * information: Portions Copyright [yyyy] [name of copyright owner] 183859Sml29623 * 193859Sml29623 * CDDL HEADER END 203859Sml29623 */ 213859Sml29623 /* 226495Sspeer * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 233859Sml29623 * Use is subject to license terms. 243859Sml29623 */ 253859Sml29623 263859Sml29623 #include <sys/nxge/nxge_impl.h> 273859Sml29623 #include <sys/nxge/nxge_txdma.h> 286495Sspeer #include <sys/nxge/nxge_hio.h> 296495Sspeer #include <npi_tx_rd64.h> 306495Sspeer #include <npi_tx_wr64.h> 313859Sml29623 #include <sys/llc1.h> 323859Sml29623 333859Sml29623 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 343859Sml29623 uint32_t nxge_tx_minfree = 32; 353859Sml29623 uint32_t nxge_tx_intr_thres = 0; 363859Sml29623 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 373859Sml29623 uint32_t nxge_tx_tiny_pack = 1; 383859Sml29623 uint32_t nxge_tx_use_bcopy = 1; 393859Sml29623 403859Sml29623 extern uint32_t nxge_tx_ring_size; 413859Sml29623 extern uint32_t nxge_bcopy_thresh; 423859Sml29623 extern uint32_t nxge_dvma_thresh; 433859Sml29623 extern uint32_t nxge_dma_stream_thresh; 443859Sml29623 extern dma_method_t nxge_force_dma; 456611Sml29623 extern uint32_t nxge_cksum_offload; 463859Sml29623 473859Sml29623 /* Device register access attributes for PIO. */ 483859Sml29623 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 493859Sml29623 /* Device descriptor access attributes for DMA. */ 503859Sml29623 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 513859Sml29623 /* Device buffer access attributes for DMA. */ 523859Sml29623 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 533859Sml29623 extern ddi_dma_attr_t nxge_desc_dma_attr; 543859Sml29623 extern ddi_dma_attr_t nxge_tx_dma_attr; 553859Sml29623 563952Sml29623 extern int nxge_serial_tx(mblk_t *mp, void *arg); 573952Sml29623 586495Sspeer static nxge_status_t nxge_map_txdma(p_nxge_t, int); 596495Sspeer 606495Sspeer static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int); 613859Sml29623 623859Sml29623 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 633859Sml29623 p_nxge_dma_common_t *, p_tx_ring_t *, 643859Sml29623 uint32_t, p_nxge_dma_common_t *, 653859Sml29623 p_tx_mbox_t *); 666495Sspeer static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t); 673859Sml29623 683859Sml29623 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 693859Sml29623 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 703859Sml29623 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 713859Sml29623 723859Sml29623 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 733859Sml29623 p_nxge_dma_common_t *, p_tx_ring_t, 743859Sml29623 p_tx_mbox_t *); 753859Sml29623 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 763859Sml29623 p_tx_ring_t, p_tx_mbox_t); 773859Sml29623 783859Sml29623 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 793859Sml29623 p_tx_ring_t, p_tx_mbox_t); 806495Sspeer static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t); 813859Sml29623 823859Sml29623 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 833859Sml29623 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 843859Sml29623 p_nxge_ldv_t, tx_cs_t); 853859Sml29623 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 863859Sml29623 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 873859Sml29623 uint16_t, p_tx_ring_t); 883859Sml29623 896495Sspeer static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, 906495Sspeer p_tx_ring_t ring_p, uint16_t channel); 916495Sspeer 923859Sml29623 nxge_status_t 933859Sml29623 nxge_init_txdma_channels(p_nxge_t nxgep) 943859Sml29623 { 956495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 966495Sspeer int i, count; 976495Sspeer 986495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels")); 996495Sspeer 1006495Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 1016495Sspeer if ((1 << i) & set->lg.map) { 1026495Sspeer int tdc; 1036495Sspeer nxge_grp_t *group = set->group[i]; 1046495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1056495Sspeer if ((1 << tdc) & group->map) { 1066495Sspeer if ((nxge_grp_dc_add(nxgep, 1077755SMisaki.Kataoka@Sun.COM group, VP_BOUND_TX, tdc))) 1086495Sspeer return (NXGE_ERROR); 1096495Sspeer } 1106495Sspeer } 1116495Sspeer } 1126495Sspeer if (++count == set->lg.count) 1136495Sspeer break; 1146495Sspeer } 1156495Sspeer 1166495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels")); 1176495Sspeer 1186495Sspeer return (NXGE_OK); 1196495Sspeer } 1206495Sspeer 1216495Sspeer nxge_status_t 1226495Sspeer nxge_init_txdma_channel( 1236495Sspeer p_nxge_t nxge, 1246495Sspeer int channel) 1256495Sspeer { 1266495Sspeer nxge_status_t status; 1276495Sspeer 1286495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel")); 1296495Sspeer 1306495Sspeer status = nxge_map_txdma(nxge, channel); 1313859Sml29623 if (status != NXGE_OK) { 1326495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1336495Sspeer "<== nxge_init_txdma_channel: status 0x%x", status)); 1346495Sspeer (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 1353859Sml29623 return (status); 1363859Sml29623 } 1373859Sml29623 1386495Sspeer status = nxge_txdma_hw_start(nxge, channel); 1393859Sml29623 if (status != NXGE_OK) { 1406495Sspeer (void) nxge_unmap_txdma_channel(nxge, channel); 1416495Sspeer (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 1423859Sml29623 return (status); 1433859Sml29623 } 1443859Sml29623 1456495Sspeer if (!nxge->statsp->tdc_ksp[channel]) 1466495Sspeer nxge_setup_tdc_kstats(nxge, channel); 1476495Sspeer 1486495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel")); 1496495Sspeer 1506495Sspeer return (status); 1513859Sml29623 } 1523859Sml29623 1533859Sml29623 void 1543859Sml29623 nxge_uninit_txdma_channels(p_nxge_t nxgep) 1553859Sml29623 { 1566495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 1576495Sspeer int tdc; 1586495Sspeer 1596495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels")); 1606495Sspeer 1616495Sspeer if (set->owned.map == 0) { 1626495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1636495Sspeer "nxge_uninit_txdma_channels: no channels")); 1646495Sspeer return; 1656495Sspeer } 1666495Sspeer 1676495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1686495Sspeer if ((1 << tdc) & set->owned.map) { 1696495Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc); 1706495Sspeer } 1716495Sspeer } 1726495Sspeer 1736495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels")); 1746495Sspeer } 1756495Sspeer 1766495Sspeer void 1776495Sspeer nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel) 1786495Sspeer { 1796495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel")); 1806495Sspeer 1816495Sspeer if (nxgep->statsp->tdc_ksp[channel]) { 1826495Sspeer kstat_delete(nxgep->statsp->tdc_ksp[channel]); 1836495Sspeer nxgep->statsp->tdc_ksp[channel] = 0; 1846495Sspeer } 1856495Sspeer 1866495Sspeer (void) nxge_txdma_stop_channel(nxgep, channel); 1876495Sspeer nxge_unmap_txdma_channel(nxgep, channel); 1883859Sml29623 1893859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1906929Smisaki "<== nxge_uninit_txdma_channel")); 1913859Sml29623 } 1923859Sml29623 1933859Sml29623 void 1943859Sml29623 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 1953859Sml29623 uint32_t entries, uint32_t size) 1963859Sml29623 { 1973859Sml29623 size_t tsize; 1983859Sml29623 *dest_p = *src_p; 1993859Sml29623 tsize = size * entries; 2003859Sml29623 dest_p->alength = tsize; 2013859Sml29623 dest_p->nblocks = entries; 2023859Sml29623 dest_p->block_size = size; 2033859Sml29623 dest_p->offset += tsize; 2043859Sml29623 2053859Sml29623 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 2063859Sml29623 src_p->alength -= tsize; 2073859Sml29623 src_p->dma_cookie.dmac_laddress += tsize; 2083859Sml29623 src_p->dma_cookie.dmac_size -= tsize; 2093859Sml29623 } 2103859Sml29623 2116495Sspeer /* 2126495Sspeer * nxge_reset_txdma_channel 2136495Sspeer * 2146495Sspeer * Reset a TDC. 2156495Sspeer * 2166495Sspeer * Arguments: 2176495Sspeer * nxgep 2186495Sspeer * channel The channel to reset. 2196495Sspeer * reg_data The current TX_CS. 2206495Sspeer * 2216495Sspeer * Notes: 2226495Sspeer * 2236495Sspeer * NPI/NXGE function calls: 2246495Sspeer * npi_txdma_channel_reset() 2256495Sspeer * npi_txdma_channel_control() 2266495Sspeer * 2276495Sspeer * Registers accessed: 2286495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 2296495Sspeer * TX_RING_KICK DMC+0x40018 Transmit Ring Kick 2306495Sspeer * 2316495Sspeer * Context: 2326495Sspeer * Any domain 2336495Sspeer */ 2343859Sml29623 nxge_status_t 2353859Sml29623 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 2363859Sml29623 { 2373859Sml29623 npi_status_t rs = NPI_SUCCESS; 2383859Sml29623 nxge_status_t status = NXGE_OK; 2393859Sml29623 npi_handle_t handle; 2403859Sml29623 2413859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 2423859Sml29623 2433859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2443859Sml29623 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 2453859Sml29623 rs = npi_txdma_channel_reset(handle, channel); 2463859Sml29623 } else { 2473859Sml29623 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 2486929Smisaki channel); 2493859Sml29623 } 2503859Sml29623 2513859Sml29623 if (rs != NPI_SUCCESS) { 2523859Sml29623 status = NXGE_ERROR | rs; 2533859Sml29623 } 2543859Sml29623 2553859Sml29623 /* 2563859Sml29623 * Reset the tail (kick) register to 0. 2573859Sml29623 * (Hardware will not reset it. Tx overflow fatal 2583859Sml29623 * error if tail is not set to 0 after reset! 2593859Sml29623 */ 2603859Sml29623 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 2613859Sml29623 2623859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 2633859Sml29623 return (status); 2643859Sml29623 } 2653859Sml29623 2666495Sspeer /* 2676495Sspeer * nxge_init_txdma_channel_event_mask 2686495Sspeer * 2696495Sspeer * Enable interrupts for a set of events. 2706495Sspeer * 2716495Sspeer * Arguments: 2726495Sspeer * nxgep 2736495Sspeer * channel The channel to map. 2746495Sspeer * mask_p The events to enable. 2756495Sspeer * 2766495Sspeer * Notes: 2776495Sspeer * 2786495Sspeer * NPI/NXGE function calls: 2796495Sspeer * npi_txdma_event_mask() 2806495Sspeer * 2816495Sspeer * Registers accessed: 2826495Sspeer * TX_ENT_MSK DMC+0x40020 Transmit Event Mask 2836495Sspeer * 2846495Sspeer * Context: 2856495Sspeer * Any domain 2866495Sspeer */ 2873859Sml29623 nxge_status_t 2883859Sml29623 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 2893859Sml29623 p_tx_dma_ent_msk_t mask_p) 2903859Sml29623 { 2913859Sml29623 npi_handle_t handle; 2923859Sml29623 npi_status_t rs = NPI_SUCCESS; 2933859Sml29623 nxge_status_t status = NXGE_OK; 2943859Sml29623 2953859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2966929Smisaki "<== nxge_init_txdma_channel_event_mask")); 2973859Sml29623 2983859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2993859Sml29623 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 3003859Sml29623 if (rs != NPI_SUCCESS) { 3013859Sml29623 status = NXGE_ERROR | rs; 3023859Sml29623 } 3033859Sml29623 3043859Sml29623 return (status); 3053859Sml29623 } 3063859Sml29623 3076495Sspeer /* 3086495Sspeer * nxge_init_txdma_channel_cntl_stat 3096495Sspeer * 3106495Sspeer * Stop a TDC. If at first we don't succeed, inject an error. 3116495Sspeer * 3126495Sspeer * Arguments: 3136495Sspeer * nxgep 3146495Sspeer * channel The channel to stop. 3156495Sspeer * 3166495Sspeer * Notes: 3176495Sspeer * 3186495Sspeer * NPI/NXGE function calls: 3196495Sspeer * npi_txdma_control_status() 3206495Sspeer * 3216495Sspeer * Registers accessed: 3226495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 3236495Sspeer * 3246495Sspeer * Context: 3256495Sspeer * Any domain 3266495Sspeer */ 3273859Sml29623 nxge_status_t 3283859Sml29623 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 3293859Sml29623 uint64_t reg_data) 3303859Sml29623 { 3313859Sml29623 npi_handle_t handle; 3323859Sml29623 npi_status_t rs = NPI_SUCCESS; 3333859Sml29623 nxge_status_t status = NXGE_OK; 3343859Sml29623 3353859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 3366929Smisaki "<== nxge_init_txdma_channel_cntl_stat")); 3373859Sml29623 3383859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3393859Sml29623 rs = npi_txdma_control_status(handle, OP_SET, channel, 3406929Smisaki (p_tx_cs_t)®_data); 3413859Sml29623 3423859Sml29623 if (rs != NPI_SUCCESS) { 3433859Sml29623 status = NXGE_ERROR | rs; 3443859Sml29623 } 3453859Sml29623 3463859Sml29623 return (status); 3473859Sml29623 } 3483859Sml29623 3496495Sspeer /* 3506495Sspeer * nxge_enable_txdma_channel 3516495Sspeer * 3526495Sspeer * Enable a TDC. 3536495Sspeer * 3546495Sspeer * Arguments: 3556495Sspeer * nxgep 3566495Sspeer * channel The channel to enable. 3576495Sspeer * tx_desc_p channel's transmit descriptor ring. 3586495Sspeer * mbox_p channel's mailbox, 3596495Sspeer * 3606495Sspeer * Notes: 3616495Sspeer * 3626495Sspeer * NPI/NXGE function calls: 3636495Sspeer * npi_txdma_ring_config() 3646495Sspeer * npi_txdma_mbox_config() 3656495Sspeer * npi_txdma_channel_init_enable() 3666495Sspeer * 3676495Sspeer * Registers accessed: 3686495Sspeer * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration 3696495Sspeer * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High 3706495Sspeer * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low 3716495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 3726495Sspeer * 3736495Sspeer * Context: 3746495Sspeer * Any domain 3756495Sspeer */ 3763859Sml29623 nxge_status_t 3773859Sml29623 nxge_enable_txdma_channel(p_nxge_t nxgep, 3783859Sml29623 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 3793859Sml29623 { 3803859Sml29623 npi_handle_t handle; 3813859Sml29623 npi_status_t rs = NPI_SUCCESS; 3823859Sml29623 nxge_status_t status = NXGE_OK; 3833859Sml29623 3843859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 3853859Sml29623 3863859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3873859Sml29623 /* 3883859Sml29623 * Use configuration data composed at init time. 3893859Sml29623 * Write to hardware the transmit ring configurations. 3903859Sml29623 */ 3913859Sml29623 rs = npi_txdma_ring_config(handle, OP_SET, channel, 3926495Sspeer (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 3933859Sml29623 3943859Sml29623 if (rs != NPI_SUCCESS) { 3953859Sml29623 return (NXGE_ERROR | rs); 3963859Sml29623 } 3973859Sml29623 3986495Sspeer if (isLDOMguest(nxgep)) { 3996495Sspeer /* Add interrupt handler for this channel. */ 4006495Sspeer if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK) 4016495Sspeer return (NXGE_ERROR); 4026495Sspeer } 4036495Sspeer 4043859Sml29623 /* Write to hardware the mailbox */ 4053859Sml29623 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 4066929Smisaki (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 4073859Sml29623 4083859Sml29623 if (rs != NPI_SUCCESS) { 4093859Sml29623 return (NXGE_ERROR | rs); 4103859Sml29623 } 4113859Sml29623 4123859Sml29623 /* Start the DMA engine. */ 4133859Sml29623 rs = npi_txdma_channel_init_enable(handle, channel); 4143859Sml29623 4153859Sml29623 if (rs != NPI_SUCCESS) { 4163859Sml29623 return (NXGE_ERROR | rs); 4173859Sml29623 } 4183859Sml29623 4193859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 4203859Sml29623 4213859Sml29623 return (status); 4223859Sml29623 } 4233859Sml29623 4243859Sml29623 void 4253859Sml29623 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 4263859Sml29623 boolean_t l4_cksum, int pkt_len, uint8_t npads, 4276611Sml29623 p_tx_pkt_hdr_all_t pkthdrp, 4286611Sml29623 t_uscalar_t start_offset, 4296611Sml29623 t_uscalar_t stuff_offset) 4303859Sml29623 { 4313859Sml29623 p_tx_pkt_header_t hdrp; 4323859Sml29623 p_mblk_t nmp; 4333859Sml29623 uint64_t tmp; 4343859Sml29623 size_t mblk_len; 4353859Sml29623 size_t iph_len; 4363859Sml29623 size_t hdrs_size; 4373859Sml29623 uint8_t hdrs_buf[sizeof (struct ether_header) + 4386929Smisaki 64 + sizeof (uint32_t)]; 4395505Smisaki uint8_t *cursor; 4403859Sml29623 uint8_t *ip_buf; 4413859Sml29623 uint16_t eth_type; 4423859Sml29623 uint8_t ipproto; 4433859Sml29623 boolean_t is_vlan = B_FALSE; 4443859Sml29623 size_t eth_hdr_size; 4453859Sml29623 4463859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 4473859Sml29623 4483859Sml29623 /* 4493859Sml29623 * Caller should zero out the headers first. 4503859Sml29623 */ 4513859Sml29623 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 4523859Sml29623 4533859Sml29623 if (fill_len) { 4543859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 4556929Smisaki "==> nxge_fill_tx_hdr: pkt_len %d " 4566929Smisaki "npads %d", pkt_len, npads)); 4573859Sml29623 tmp = (uint64_t)pkt_len; 4583859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 4593859Sml29623 goto fill_tx_header_done; 4603859Sml29623 } 4613859Sml29623 4626611Sml29623 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT); 4633859Sml29623 4643859Sml29623 /* 4653859Sml29623 * mp is the original data packet (does not include the 4663859Sml29623 * Neptune transmit header). 4673859Sml29623 */ 4683859Sml29623 nmp = mp; 4693859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 4706929Smisaki "mp $%p b_rptr $%p len %d", 4716929Smisaki mp, nmp->b_rptr, MBLKL(nmp))); 4725505Smisaki /* copy ether_header from mblk to hdrs_buf */ 4735505Smisaki cursor = &hdrs_buf[0]; 4745505Smisaki tmp = sizeof (struct ether_vlan_header); 4755505Smisaki while ((nmp != NULL) && (tmp > 0)) { 4765505Smisaki size_t buflen; 4775505Smisaki mblk_len = MBLKL(nmp); 4785512Smisaki buflen = min((size_t)tmp, mblk_len); 4795505Smisaki bcopy(nmp->b_rptr, cursor, buflen); 4805505Smisaki cursor += buflen; 4815505Smisaki tmp -= buflen; 4825505Smisaki nmp = nmp->b_cont; 4835505Smisaki } 4845505Smisaki 4855505Smisaki nmp = mp; 4865505Smisaki mblk_len = MBLKL(nmp); 4873859Sml29623 ip_buf = NULL; 4883859Sml29623 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 4893859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 4906929Smisaki "ether type 0x%x", eth_type, hdrp->value)); 4913859Sml29623 4923859Sml29623 if (eth_type < ETHERMTU) { 4933859Sml29623 tmp = 1ull; 4943859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 4953859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 4966929Smisaki "value 0x%llx", hdrp->value)); 4973859Sml29623 if (*(hdrs_buf + sizeof (struct ether_header)) 4986929Smisaki == LLC_SNAP_SAP) { 4993859Sml29623 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 5006929Smisaki sizeof (struct ether_header) + 6))); 5013859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 5026929Smisaki "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 5036929Smisaki eth_type)); 5043859Sml29623 } else { 5053859Sml29623 goto fill_tx_header_done; 5063859Sml29623 } 5073859Sml29623 } else if (eth_type == VLAN_ETHERTYPE) { 5083859Sml29623 tmp = 1ull; 5093859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 5103859Sml29623 5113859Sml29623 eth_type = ntohs(((struct ether_vlan_header *) 5126929Smisaki hdrs_buf)->ether_type); 5133859Sml29623 is_vlan = B_TRUE; 5143859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 5156929Smisaki "value 0x%llx", hdrp->value)); 5163859Sml29623 } 5173859Sml29623 5183859Sml29623 if (!is_vlan) { 5193859Sml29623 eth_hdr_size = sizeof (struct ether_header); 5203859Sml29623 } else { 5213859Sml29623 eth_hdr_size = sizeof (struct ether_vlan_header); 5223859Sml29623 } 5233859Sml29623 5243859Sml29623 switch (eth_type) { 5253859Sml29623 case ETHERTYPE_IP: 5263859Sml29623 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 5273859Sml29623 ip_buf = nmp->b_rptr + eth_hdr_size; 5283859Sml29623 mblk_len -= eth_hdr_size; 5293859Sml29623 iph_len = ((*ip_buf) & 0x0f); 5303859Sml29623 if (mblk_len > (iph_len + sizeof (uint32_t))) { 5313859Sml29623 ip_buf = nmp->b_rptr; 5323859Sml29623 ip_buf += eth_hdr_size; 5333859Sml29623 } else { 5343859Sml29623 ip_buf = NULL; 5353859Sml29623 } 5363859Sml29623 5373859Sml29623 } 5383859Sml29623 if (ip_buf == NULL) { 5393859Sml29623 hdrs_size = 0; 5403859Sml29623 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 5413859Sml29623 while ((nmp) && (hdrs_size < 5426929Smisaki sizeof (hdrs_buf))) { 5433859Sml29623 mblk_len = (size_t)nmp->b_wptr - 5446929Smisaki (size_t)nmp->b_rptr; 5453859Sml29623 if (mblk_len >= 5466929Smisaki (sizeof (hdrs_buf) - hdrs_size)) 5473859Sml29623 mblk_len = sizeof (hdrs_buf) - 5486929Smisaki hdrs_size; 5493859Sml29623 bcopy(nmp->b_rptr, 5506929Smisaki &hdrs_buf[hdrs_size], mblk_len); 5513859Sml29623 hdrs_size += mblk_len; 5523859Sml29623 nmp = nmp->b_cont; 5533859Sml29623 } 5543859Sml29623 ip_buf = hdrs_buf; 5553859Sml29623 ip_buf += eth_hdr_size; 5563859Sml29623 iph_len = ((*ip_buf) & 0x0f); 5573859Sml29623 } 5583859Sml29623 5593859Sml29623 ipproto = ip_buf[9]; 5603859Sml29623 5613859Sml29623 tmp = (uint64_t)iph_len; 5623859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 5633859Sml29623 tmp = (uint64_t)(eth_hdr_size >> 1); 5643859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 5653859Sml29623 5663859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 5676929Smisaki " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 5686929Smisaki "tmp 0x%x", 5696929Smisaki iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 5706929Smisaki ipproto, tmp)); 5713859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 5726929Smisaki "value 0x%llx", hdrp->value)); 5733859Sml29623 5743859Sml29623 break; 5753859Sml29623 5763859Sml29623 case ETHERTYPE_IPV6: 5773859Sml29623 hdrs_size = 0; 5783859Sml29623 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 5793859Sml29623 while ((nmp) && (hdrs_size < 5806929Smisaki sizeof (hdrs_buf))) { 5813859Sml29623 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 5823859Sml29623 if (mblk_len >= 5836929Smisaki (sizeof (hdrs_buf) - hdrs_size)) 5843859Sml29623 mblk_len = sizeof (hdrs_buf) - 5856929Smisaki hdrs_size; 5863859Sml29623 bcopy(nmp->b_rptr, 5876929Smisaki &hdrs_buf[hdrs_size], mblk_len); 5883859Sml29623 hdrs_size += mblk_len; 5893859Sml29623 nmp = nmp->b_cont; 5903859Sml29623 } 5913859Sml29623 ip_buf = hdrs_buf; 5923859Sml29623 ip_buf += eth_hdr_size; 5933859Sml29623 5943859Sml29623 tmp = 1ull; 5953859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 5963859Sml29623 5973859Sml29623 tmp = (eth_hdr_size >> 1); 5983859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 5993859Sml29623 6003859Sml29623 /* byte 6 is the next header protocol */ 6013859Sml29623 ipproto = ip_buf[6]; 6023859Sml29623 6033859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 6046929Smisaki " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 6056929Smisaki iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 6066929Smisaki ipproto)); 6073859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 6086929Smisaki "value 0x%llx", hdrp->value)); 6093859Sml29623 6103859Sml29623 break; 6113859Sml29623 6123859Sml29623 default: 6133859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 6143859Sml29623 goto fill_tx_header_done; 6153859Sml29623 } 6163859Sml29623 6173859Sml29623 switch (ipproto) { 6183859Sml29623 case IPPROTO_TCP: 6193859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 6206611Sml29623 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 6213859Sml29623 if (l4_cksum) { 6226611Sml29623 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP; 6236611Sml29623 hdrp->value |= 6246611Sml29623 (((uint64_t)(start_offset >> 1)) << 6256611Sml29623 TX_PKT_HEADER_L4START_SHIFT); 6266611Sml29623 hdrp->value |= 6276611Sml29623 (((uint64_t)(stuff_offset >> 1)) << 6286611Sml29623 TX_PKT_HEADER_L4STUFF_SHIFT); 6296611Sml29623 6303859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 6316611Sml29623 "==> nxge_tx_pkt_hdr_init: TCP CKSUM " 6326611Sml29623 "value 0x%llx", hdrp->value)); 6333859Sml29623 } 6343859Sml29623 6353859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 6366611Sml29623 "value 0x%llx", hdrp->value)); 6373859Sml29623 break; 6383859Sml29623 6393859Sml29623 case IPPROTO_UDP: 6403859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 6413859Sml29623 if (l4_cksum) { 6426611Sml29623 if (!nxge_cksum_offload) { 6436611Sml29623 uint16_t *up; 6446611Sml29623 uint16_t cksum; 6456611Sml29623 t_uscalar_t stuff_len; 6466611Sml29623 6476611Sml29623 /* 6486611Sml29623 * The checksum field has the 6496611Sml29623 * partial checksum. 6506611Sml29623 * IP_CSUM() macro calls ip_cksum() which 6516611Sml29623 * can add in the partial checksum. 6526611Sml29623 */ 6536611Sml29623 cksum = IP_CSUM(mp, start_offset, 0); 6546611Sml29623 stuff_len = stuff_offset; 6556611Sml29623 nmp = mp; 6566611Sml29623 mblk_len = MBLKL(nmp); 6576611Sml29623 while ((nmp != NULL) && 6586611Sml29623 (mblk_len < stuff_len)) { 6596611Sml29623 stuff_len -= mblk_len; 6606611Sml29623 nmp = nmp->b_cont; 6616611Sml29623 } 6626611Sml29623 ASSERT(nmp); 6636611Sml29623 up = (uint16_t *)(nmp->b_rptr + stuff_len); 6646611Sml29623 6656611Sml29623 *up = cksum; 6666611Sml29623 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP; 6676611Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 6686611Sml29623 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 6696611Sml29623 "use sw cksum " 6706611Sml29623 "write to $%p cksum 0x%x content up 0x%x", 6716611Sml29623 stuff_len, 6726611Sml29623 up, 6736611Sml29623 cksum, 6746611Sml29623 *up)); 6756611Sml29623 } else { 6766611Sml29623 /* Hardware will compute the full checksum */ 6776611Sml29623 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP; 6786611Sml29623 hdrp->value |= 6796611Sml29623 (((uint64_t)(start_offset >> 1)) << 6806611Sml29623 TX_PKT_HEADER_L4START_SHIFT); 6816611Sml29623 hdrp->value |= 6826611Sml29623 (((uint64_t)(stuff_offset >> 1)) << 6836611Sml29623 TX_PKT_HEADER_L4STUFF_SHIFT); 6846611Sml29623 6856611Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 6866611Sml29623 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 6876611Sml29623 " use partial checksum " 6886611Sml29623 "cksum 0x%x ", 6896611Sml29623 "value 0x%llx", 6906611Sml29623 stuff_offset, 6916611Sml29623 IP_CSUM(mp, start_offset, 0), 6926611Sml29623 hdrp->value)); 6936611Sml29623 } 6943859Sml29623 } 6956611Sml29623 6963859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 6976929Smisaki "==> nxge_tx_pkt_hdr_init: UDP" 6986929Smisaki "value 0x%llx", hdrp->value)); 6993859Sml29623 break; 7003859Sml29623 7013859Sml29623 default: 7023859Sml29623 goto fill_tx_header_done; 7033859Sml29623 } 7043859Sml29623 7053859Sml29623 fill_tx_header_done: 7063859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7076929Smisaki "==> nxge_fill_tx_hdr: pkt_len %d " 7086929Smisaki "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 7093859Sml29623 7103859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 7113859Sml29623 } 7123859Sml29623 7133859Sml29623 /*ARGSUSED*/ 7143859Sml29623 p_mblk_t 7153859Sml29623 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 7163859Sml29623 { 7173859Sml29623 p_mblk_t newmp = NULL; 7183859Sml29623 7193859Sml29623 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 7203859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7216929Smisaki "<== nxge_tx_pkt_header_reserve: allocb failed")); 7223859Sml29623 return (NULL); 7233859Sml29623 } 7243859Sml29623 7253859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7266929Smisaki "==> nxge_tx_pkt_header_reserve: get new mp")); 7273859Sml29623 DB_TYPE(newmp) = M_DATA; 7283859Sml29623 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 7293859Sml29623 linkb(newmp, mp); 7303859Sml29623 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 7313859Sml29623 7323859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 7336929Smisaki "b_rptr $%p b_wptr $%p", 7346929Smisaki newmp->b_rptr, newmp->b_wptr)); 7353859Sml29623 7363859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7376929Smisaki "<== nxge_tx_pkt_header_reserve: use new mp")); 7383859Sml29623 7393859Sml29623 return (newmp); 7403859Sml29623 } 7413859Sml29623 7423859Sml29623 int 7433859Sml29623 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 7443859Sml29623 { 7453859Sml29623 uint_t nmblks; 7463859Sml29623 ssize_t len; 7473859Sml29623 uint_t pkt_len; 7483859Sml29623 p_mblk_t nmp, bmp, tmp; 7493859Sml29623 uint8_t *b_wptr; 7503859Sml29623 7513859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7526929Smisaki "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 7536929Smisaki "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 7543859Sml29623 7553859Sml29623 nmp = mp; 7563859Sml29623 bmp = mp; 7573859Sml29623 nmblks = 0; 7583859Sml29623 pkt_len = 0; 7593859Sml29623 *tot_xfer_len_p = 0; 7603859Sml29623 7613859Sml29623 while (nmp) { 7623859Sml29623 len = MBLKL(nmp); 7633859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 7646929Smisaki "len %d pkt_len %d nmblks %d tot_xfer_len %d", 7656929Smisaki len, pkt_len, nmblks, 7666929Smisaki *tot_xfer_len_p)); 7673859Sml29623 7683859Sml29623 if (len <= 0) { 7693859Sml29623 bmp = nmp; 7703859Sml29623 nmp = nmp->b_cont; 7713859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7726929Smisaki "==> nxge_tx_pkt_nmblocks: " 7736929Smisaki "len (0) pkt_len %d nmblks %d", 7746929Smisaki pkt_len, nmblks)); 7753859Sml29623 continue; 7763859Sml29623 } 7773859Sml29623 7783859Sml29623 *tot_xfer_len_p += len; 7793859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 7806929Smisaki "len %d pkt_len %d nmblks %d tot_xfer_len %d", 7816929Smisaki len, pkt_len, nmblks, 7826929Smisaki *tot_xfer_len_p)); 7833859Sml29623 7843859Sml29623 if (len < nxge_bcopy_thresh) { 7853859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7866929Smisaki "==> nxge_tx_pkt_nmblocks: " 7876929Smisaki "len %d (< thresh) pkt_len %d nmblks %d", 7886929Smisaki len, pkt_len, nmblks)); 7893859Sml29623 if (pkt_len == 0) 7903859Sml29623 nmblks++; 7913859Sml29623 pkt_len += len; 7923859Sml29623 if (pkt_len >= nxge_bcopy_thresh) { 7933859Sml29623 pkt_len = 0; 7943859Sml29623 len = 0; 7953859Sml29623 nmp = bmp; 7963859Sml29623 } 7973859Sml29623 } else { 7983859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7996929Smisaki "==> nxge_tx_pkt_nmblocks: " 8006929Smisaki "len %d (> thresh) pkt_len %d nmblks %d", 8016929Smisaki len, pkt_len, nmblks)); 8023859Sml29623 pkt_len = 0; 8033859Sml29623 nmblks++; 8043859Sml29623 /* 8053859Sml29623 * Hardware limits the transfer length to 4K. 8063859Sml29623 * If len is more than 4K, we need to break 8073859Sml29623 * it up to at most 2 more blocks. 8083859Sml29623 */ 8093859Sml29623 if (len > TX_MAX_TRANSFER_LENGTH) { 8103859Sml29623 uint32_t nsegs; 8113859Sml29623 8126495Sspeer nsegs = 1; 8133859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8146929Smisaki "==> nxge_tx_pkt_nmblocks: " 8156929Smisaki "len %d pkt_len %d nmblks %d nsegs %d", 8166929Smisaki len, pkt_len, nmblks, nsegs)); 8173859Sml29623 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 8183859Sml29623 ++nsegs; 8193859Sml29623 } 8203859Sml29623 do { 8213859Sml29623 b_wptr = nmp->b_rptr + 8226929Smisaki TX_MAX_TRANSFER_LENGTH; 8233859Sml29623 nmp->b_wptr = b_wptr; 8243859Sml29623 if ((tmp = dupb(nmp)) == NULL) { 8253859Sml29623 return (0); 8263859Sml29623 } 8273859Sml29623 tmp->b_rptr = b_wptr; 8283859Sml29623 tmp->b_wptr = nmp->b_wptr; 8293859Sml29623 tmp->b_cont = nmp->b_cont; 8303859Sml29623 nmp->b_cont = tmp; 8313859Sml29623 nmblks++; 8323859Sml29623 if (--nsegs) { 8333859Sml29623 nmp = tmp; 8343859Sml29623 } 8353859Sml29623 } while (nsegs); 8363859Sml29623 nmp = tmp; 8373859Sml29623 } 8383859Sml29623 } 8393859Sml29623 8403859Sml29623 /* 8413859Sml29623 * Hardware limits the transmit gather pointers to 15. 8423859Sml29623 */ 8433859Sml29623 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 8446929Smisaki TX_MAX_GATHER_POINTERS) { 8453859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8466929Smisaki "==> nxge_tx_pkt_nmblocks: pull msg - " 8476929Smisaki "len %d pkt_len %d nmblks %d", 8486929Smisaki len, pkt_len, nmblks)); 8493859Sml29623 /* Pull all message blocks from b_cont */ 8503859Sml29623 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 8513859Sml29623 return (0); 8523859Sml29623 } 8533859Sml29623 freemsg(nmp->b_cont); 8543859Sml29623 nmp->b_cont = tmp; 8553859Sml29623 pkt_len = 0; 8563859Sml29623 } 8573859Sml29623 bmp = nmp; 8583859Sml29623 nmp = nmp->b_cont; 8593859Sml29623 } 8603859Sml29623 8613859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8626929Smisaki "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 8636929Smisaki "nmblks %d len %d tot_xfer_len %d", 8646929Smisaki mp->b_rptr, mp->b_wptr, nmblks, 8656929Smisaki MBLKL(mp), *tot_xfer_len_p)); 8663859Sml29623 8673859Sml29623 return (nmblks); 8683859Sml29623 } 8693859Sml29623 8703859Sml29623 boolean_t 8713859Sml29623 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 8723859Sml29623 { 8733859Sml29623 boolean_t status = B_TRUE; 8743859Sml29623 p_nxge_dma_common_t tx_desc_dma_p; 8753859Sml29623 nxge_dma_common_t desc_area; 8763859Sml29623 p_tx_desc_t tx_desc_ring_vp; 8773859Sml29623 p_tx_desc_t tx_desc_p; 8783859Sml29623 p_tx_desc_t tx_desc_pp; 8793859Sml29623 tx_desc_t r_tx_desc; 8803859Sml29623 p_tx_msg_t tx_msg_ring; 8813859Sml29623 p_tx_msg_t tx_msg_p; 8823859Sml29623 npi_handle_t handle; 8833859Sml29623 tx_ring_hdl_t tx_head; 8843859Sml29623 uint32_t pkt_len; 8853859Sml29623 uint_t tx_rd_index; 8863859Sml29623 uint16_t head_index, tail_index; 8873859Sml29623 uint8_t tdc; 8883859Sml29623 boolean_t head_wrap, tail_wrap; 8893859Sml29623 p_nxge_tx_ring_stats_t tdc_stats; 8903859Sml29623 int rc; 8913859Sml29623 8923859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 8933859Sml29623 8943859Sml29623 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 8956929Smisaki (nmblks != 0)); 8963859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 8976929Smisaki "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 8986929Smisaki tx_ring_p->descs_pending, nxge_reclaim_pending, 8996929Smisaki nmblks)); 9003859Sml29623 if (!status) { 9013859Sml29623 tx_desc_dma_p = &tx_ring_p->tdc_desc; 9023859Sml29623 desc_area = tx_ring_p->tdc_desc; 9033859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 9043859Sml29623 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 9053859Sml29623 tx_desc_ring_vp = 9066929Smisaki (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 9073859Sml29623 tx_rd_index = tx_ring_p->rd_index; 9083859Sml29623 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 9093859Sml29623 tx_msg_ring = tx_ring_p->tx_msg_ring; 9103859Sml29623 tx_msg_p = &tx_msg_ring[tx_rd_index]; 9113859Sml29623 tdc = tx_ring_p->tdc; 9123859Sml29623 tdc_stats = tx_ring_p->tdc_stats; 9133859Sml29623 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 9143859Sml29623 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 9153859Sml29623 } 9163859Sml29623 9173859Sml29623 tail_index = tx_ring_p->wr_index; 9183859Sml29623 tail_wrap = tx_ring_p->wr_index_wrap; 9193859Sml29623 9203859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9216929Smisaki "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 9226929Smisaki "tail_index %d tail_wrap %d " 9236929Smisaki "tx_desc_p $%p ($%p) ", 9246929Smisaki tdc, tx_rd_index, tail_index, tail_wrap, 9256929Smisaki tx_desc_p, (*(uint64_t *)tx_desc_p))); 9263859Sml29623 /* 9273859Sml29623 * Read the hardware maintained transmit head 9283859Sml29623 * and wrap around bit. 9293859Sml29623 */ 9303859Sml29623 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 9313859Sml29623 head_index = tx_head.bits.ldw.head; 9323859Sml29623 head_wrap = tx_head.bits.ldw.wrap; 9333859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9346929Smisaki "==> nxge_txdma_reclaim: " 9356929Smisaki "tx_rd_index %d tail %d tail_wrap %d " 9366929Smisaki "head %d wrap %d", 9376929Smisaki tx_rd_index, tail_index, tail_wrap, 9386929Smisaki head_index, head_wrap)); 9393859Sml29623 9403859Sml29623 if (head_index == tail_index) { 9413859Sml29623 if (TXDMA_RING_EMPTY(head_index, head_wrap, 9426929Smisaki tail_index, tail_wrap) && 9436929Smisaki (head_index == tx_rd_index)) { 9443859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9456929Smisaki "==> nxge_txdma_reclaim: EMPTY")); 9463859Sml29623 return (B_TRUE); 9473859Sml29623 } 9483859Sml29623 9493859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9506929Smisaki "==> nxge_txdma_reclaim: Checking " 9516929Smisaki "if ring full")); 9523859Sml29623 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 9536929Smisaki tail_wrap)) { 9543859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9556929Smisaki "==> nxge_txdma_reclaim: full")); 9563859Sml29623 return (B_FALSE); 9573859Sml29623 } 9583859Sml29623 } 9593859Sml29623 9603859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9616929Smisaki "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 9623859Sml29623 9633859Sml29623 tx_desc_pp = &r_tx_desc; 9643859Sml29623 while ((tx_rd_index != head_index) && 9656929Smisaki (tx_ring_p->descs_pending != 0)) { 9663859Sml29623 9673859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9686929Smisaki "==> nxge_txdma_reclaim: Checking if pending")); 9693859Sml29623 9703859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9716929Smisaki "==> nxge_txdma_reclaim: " 9726929Smisaki "descs_pending %d ", 9736929Smisaki tx_ring_p->descs_pending)); 9743859Sml29623 9753859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9766929Smisaki "==> nxge_txdma_reclaim: " 9776929Smisaki "(tx_rd_index %d head_index %d " 9786929Smisaki "(tx_desc_p $%p)", 9796929Smisaki tx_rd_index, head_index, 9806929Smisaki tx_desc_p)); 9813859Sml29623 9823859Sml29623 tx_desc_pp->value = tx_desc_p->value; 9833859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9846929Smisaki "==> nxge_txdma_reclaim: " 9856929Smisaki "(tx_rd_index %d head_index %d " 9866929Smisaki "tx_desc_p $%p (desc value 0x%llx) ", 9876929Smisaki tx_rd_index, head_index, 9886929Smisaki tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 9893859Sml29623 9903859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9916929Smisaki "==> nxge_txdma_reclaim: dump desc:")); 9923859Sml29623 9933859Sml29623 pkt_len = tx_desc_pp->bits.hdw.tr_len; 9943859Sml29623 tdc_stats->obytes += pkt_len; 9953859Sml29623 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 9963859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9976929Smisaki "==> nxge_txdma_reclaim: pkt_len %d " 9986929Smisaki "tdc channel %d opackets %d", 9996929Smisaki pkt_len, 10006929Smisaki tdc, 10016929Smisaki tdc_stats->opackets)); 10023859Sml29623 10033859Sml29623 if (tx_msg_p->flags.dma_type == USE_DVMA) { 10043859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10056929Smisaki "tx_desc_p = $%p " 10066929Smisaki "tx_desc_pp = $%p " 10076929Smisaki "index = %d", 10086929Smisaki tx_desc_p, 10096929Smisaki tx_desc_pp, 10106929Smisaki tx_ring_p->rd_index)); 10113859Sml29623 (void) dvma_unload(tx_msg_p->dvma_handle, 10126929Smisaki 0, -1); 10133859Sml29623 tx_msg_p->dvma_handle = NULL; 10143859Sml29623 if (tx_ring_p->dvma_wr_index == 10156929Smisaki tx_ring_p->dvma_wrap_mask) { 10163859Sml29623 tx_ring_p->dvma_wr_index = 0; 10173859Sml29623 } else { 10183859Sml29623 tx_ring_p->dvma_wr_index++; 10193859Sml29623 } 10203859Sml29623 tx_ring_p->dvma_pending--; 10213859Sml29623 } else if (tx_msg_p->flags.dma_type == 10226929Smisaki USE_DMA) { 10233859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10246929Smisaki "==> nxge_txdma_reclaim: " 10256929Smisaki "USE DMA")); 10263859Sml29623 if (rc = ddi_dma_unbind_handle 10276929Smisaki (tx_msg_p->dma_handle)) { 10283859Sml29623 cmn_err(CE_WARN, "!nxge_reclaim: " 10296929Smisaki "ddi_dma_unbind_handle " 10306929Smisaki "failed. status %d", rc); 10313859Sml29623 } 10323859Sml29623 } 10333859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10346929Smisaki "==> nxge_txdma_reclaim: count packets")); 10353859Sml29623 /* 10363859Sml29623 * count a chained packet only once. 10373859Sml29623 */ 10383859Sml29623 if (tx_msg_p->tx_message != NULL) { 10393859Sml29623 freemsg(tx_msg_p->tx_message); 10403859Sml29623 tx_msg_p->tx_message = NULL; 10413859Sml29623 } 10423859Sml29623 10433859Sml29623 tx_msg_p->flags.dma_type = USE_NONE; 10443859Sml29623 tx_rd_index = tx_ring_p->rd_index; 10453859Sml29623 tx_rd_index = (tx_rd_index + 1) & 10466929Smisaki tx_ring_p->tx_wrap_mask; 10473859Sml29623 tx_ring_p->rd_index = tx_rd_index; 10483859Sml29623 tx_ring_p->descs_pending--; 10493859Sml29623 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 10503859Sml29623 tx_msg_p = &tx_msg_ring[tx_rd_index]; 10513859Sml29623 } 10523859Sml29623 10533859Sml29623 status = (nmblks <= (tx_ring_p->tx_ring_size - 10546929Smisaki tx_ring_p->descs_pending - 10556929Smisaki TX_FULL_MARK)); 10563859Sml29623 if (status) { 10573859Sml29623 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 10583859Sml29623 } 10593859Sml29623 } else { 10603859Sml29623 status = (nmblks <= 10616929Smisaki (tx_ring_p->tx_ring_size - 10626929Smisaki tx_ring_p->descs_pending - 10636929Smisaki TX_FULL_MARK)); 10643859Sml29623 } 10653859Sml29623 10663859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10676929Smisaki "<== nxge_txdma_reclaim status = 0x%08x", status)); 10683859Sml29623 10693859Sml29623 return (status); 10703859Sml29623 } 10713859Sml29623 10726495Sspeer /* 10736495Sspeer * nxge_tx_intr 10746495Sspeer * 10756495Sspeer * Process a TDC interrupt 10766495Sspeer * 10776495Sspeer * Arguments: 10786495Sspeer * arg1 A Logical Device state Vector (LSV) data structure. 10796495Sspeer * arg2 nxge_t * 10806495Sspeer * 10816495Sspeer * Notes: 10826495Sspeer * 10836495Sspeer * NPI/NXGE function calls: 10846495Sspeer * npi_txdma_control_status() 10856495Sspeer * npi_intr_ldg_mgmt_set() 10866495Sspeer * 10876495Sspeer * nxge_tx_err_evnts() 10886495Sspeer * nxge_txdma_reclaim() 10896495Sspeer * 10906495Sspeer * Registers accessed: 10916495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 10926495Sspeer * PIO_LDSV 10936495Sspeer * 10946495Sspeer * Context: 10956495Sspeer * Any domain 10966495Sspeer */ 10973859Sml29623 uint_t 10983859Sml29623 nxge_tx_intr(void *arg1, void *arg2) 10993859Sml29623 { 11003859Sml29623 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 11013859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg2; 11023859Sml29623 p_nxge_ldg_t ldgp; 11033859Sml29623 uint8_t channel; 11043859Sml29623 uint32_t vindex; 11053859Sml29623 npi_handle_t handle; 11063859Sml29623 tx_cs_t cs; 11073859Sml29623 p_tx_ring_t *tx_rings; 11083859Sml29623 p_tx_ring_t tx_ring_p; 11093859Sml29623 npi_status_t rs = NPI_SUCCESS; 11103859Sml29623 uint_t serviced = DDI_INTR_UNCLAIMED; 11113859Sml29623 nxge_status_t status = NXGE_OK; 11123859Sml29623 11133859Sml29623 if (ldvp == NULL) { 11143859Sml29623 NXGE_DEBUG_MSG((NULL, INT_CTL, 11156929Smisaki "<== nxge_tx_intr: nxgep $%p ldvp $%p", 11166929Smisaki nxgep, ldvp)); 11173859Sml29623 return (DDI_INTR_UNCLAIMED); 11183859Sml29623 } 11193859Sml29623 11203859Sml29623 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 11213859Sml29623 nxgep = ldvp->nxgep; 11223859Sml29623 } 11233859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11246929Smisaki "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 11256929Smisaki nxgep, ldvp)); 11266713Sspeer 11276713Sspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 11286713Sspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 11296713Sspeer NXGE_DEBUG_MSG((nxgep, INT_CTL, 11306713Sspeer "<== nxge_tx_intr: interface not started or intialized")); 11316713Sspeer return (DDI_INTR_CLAIMED); 11326713Sspeer } 11336713Sspeer 11343859Sml29623 /* 11353859Sml29623 * This interrupt handler is for a specific 11363859Sml29623 * transmit dma channel. 11373859Sml29623 */ 11383859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 11393859Sml29623 /* Get the control and status for this channel. */ 11403859Sml29623 channel = ldvp->channel; 11413859Sml29623 ldgp = ldvp->ldgp; 11423859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11436929Smisaki "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 11446929Smisaki "channel %d", 11456929Smisaki nxgep, ldvp, channel)); 11463859Sml29623 11473859Sml29623 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 11483859Sml29623 vindex = ldvp->vdma_index; 11493859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11506929Smisaki "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 11516929Smisaki channel, vindex, rs)); 11523859Sml29623 if (!rs && cs.bits.ldw.mk) { 11533859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11546929Smisaki "==> nxge_tx_intr:channel %d ring index %d " 11556929Smisaki "status 0x%08x (mk bit set)", 11566929Smisaki channel, vindex, rs)); 11573859Sml29623 tx_rings = nxgep->tx_rings->rings; 11583859Sml29623 tx_ring_p = tx_rings[vindex]; 11593859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11606929Smisaki "==> nxge_tx_intr:channel %d ring index %d " 11616929Smisaki "status 0x%08x (mk bit set, calling reclaim)", 11626929Smisaki channel, vindex, rs)); 11633859Sml29623 11643859Sml29623 MUTEX_ENTER(&tx_ring_p->lock); 11653859Sml29623 (void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0); 11663859Sml29623 MUTEX_EXIT(&tx_ring_p->lock); 11673859Sml29623 mac_tx_update(nxgep->mach); 11683859Sml29623 } 11693859Sml29623 11703859Sml29623 /* 11713859Sml29623 * Process other transmit control and status. 11723859Sml29623 * Check the ldv state. 11733859Sml29623 */ 11743859Sml29623 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 11753859Sml29623 /* 11763859Sml29623 * Rearm this logical group if this is a single device 11773859Sml29623 * group. 11783859Sml29623 */ 11793859Sml29623 if (ldgp->nldvs == 1) { 11803859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11816929Smisaki "==> nxge_tx_intr: rearm")); 11823859Sml29623 if (status == NXGE_OK) { 11836495Sspeer if (isLDOMguest(nxgep)) { 11846495Sspeer nxge_hio_ldgimgn(nxgep, ldgp); 11856495Sspeer } else { 11866495Sspeer (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 11876495Sspeer B_TRUE, ldgp->ldg_timer); 11886495Sspeer } 11893859Sml29623 } 11903859Sml29623 } 11913859Sml29623 11923859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 11933859Sml29623 serviced = DDI_INTR_CLAIMED; 11943859Sml29623 return (serviced); 11953859Sml29623 } 11963859Sml29623 11973859Sml29623 void 11986495Sspeer nxge_txdma_stop(p_nxge_t nxgep) /* Dead */ 11993859Sml29623 { 12003859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 12013859Sml29623 12023859Sml29623 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 12033859Sml29623 12043859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 12053859Sml29623 } 12063859Sml29623 12073859Sml29623 void 12086495Sspeer nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */ 12093859Sml29623 { 12103859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 12113859Sml29623 12123859Sml29623 (void) nxge_txdma_stop(nxgep); 12133859Sml29623 12143859Sml29623 (void) nxge_fixup_txdma_rings(nxgep); 12153859Sml29623 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 12163859Sml29623 (void) nxge_tx_mac_enable(nxgep); 12173859Sml29623 (void) nxge_txdma_hw_kick(nxgep); 12183859Sml29623 12193859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 12203859Sml29623 } 12213859Sml29623 12226495Sspeer npi_status_t 12236495Sspeer nxge_txdma_channel_disable( 12246495Sspeer nxge_t *nxge, 12256495Sspeer int channel) 12266495Sspeer { 12276495Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge); 12286495Sspeer npi_status_t rs; 12296495Sspeer tdmc_intr_dbg_t intr_dbg; 12306495Sspeer 12316495Sspeer /* 12326495Sspeer * Stop the dma channel and wait for the stop-done. 12336495Sspeer * If the stop-done bit is not present, then force 12346495Sspeer * an error so TXC will stop. 12356495Sspeer * All channels bound to this port need to be stopped 12366495Sspeer * and reset after injecting an interrupt error. 12376495Sspeer */ 12386495Sspeer rs = npi_txdma_channel_disable(handle, channel); 12396495Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL, 12406929Smisaki "==> nxge_txdma_channel_disable(%d) " 12416929Smisaki "rs 0x%x", channel, rs)); 12426495Sspeer if (rs != NPI_SUCCESS) { 12436495Sspeer /* Inject any error */ 12446495Sspeer intr_dbg.value = 0; 12456495Sspeer intr_dbg.bits.ldw.nack_pref = 1; 12466495Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL, 12476929Smisaki "==> nxge_txdma_hw_mode: " 12486929Smisaki "channel %d (stop failed 0x%x) " 12496929Smisaki "(inject err)", rs, channel)); 12506495Sspeer (void) npi_txdma_inj_int_error_set( 12516929Smisaki handle, channel, &intr_dbg); 12526495Sspeer rs = npi_txdma_channel_disable(handle, channel); 12536495Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL, 12546929Smisaki "==> nxge_txdma_hw_mode: " 12556929Smisaki "channel %d (stop again 0x%x) " 12566929Smisaki "(after inject err)", 12576929Smisaki rs, channel)); 12586495Sspeer } 12596495Sspeer 12606495Sspeer return (rs); 12616495Sspeer } 12626495Sspeer 12636495Sspeer /* 12646495Sspeer * nxge_txdma_hw_mode 12656495Sspeer * 12666495Sspeer * Toggle all TDCs on (enable) or off (disable). 12676495Sspeer * 12686495Sspeer * Arguments: 12696495Sspeer * nxgep 12706495Sspeer * enable Enable or disable a TDC. 12716495Sspeer * 12726495Sspeer * Notes: 12736495Sspeer * 12746495Sspeer * NPI/NXGE function calls: 12756495Sspeer * npi_txdma_channel_enable(TX_CS) 12766495Sspeer * npi_txdma_channel_disable(TX_CS) 12776495Sspeer * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 12786495Sspeer * 12796495Sspeer * Registers accessed: 12806495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 12816495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 12826495Sspeer * 12836495Sspeer * Context: 12846495Sspeer * Any domain 12856495Sspeer */ 12863859Sml29623 nxge_status_t 12873859Sml29623 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 12883859Sml29623 { 12896495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 12906495Sspeer 12916495Sspeer npi_handle_t handle; 12926495Sspeer nxge_status_t status; 12936495Sspeer npi_status_t rs; 12946495Sspeer int tdc; 12953859Sml29623 12963859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 12976929Smisaki "==> nxge_txdma_hw_mode: enable mode %d", enable)); 12983859Sml29623 12993859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 13003859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 13016929Smisaki "<== nxge_txdma_mode: not initialized")); 13023859Sml29623 return (NXGE_ERROR); 13033859Sml29623 } 13043859Sml29623 13056495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 13063859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 13076495Sspeer "<== nxge_txdma_hw_mode: NULL ring pointer(s)")); 13083859Sml29623 return (NXGE_ERROR); 13093859Sml29623 } 13103859Sml29623 13116495Sspeer /* Enable or disable all of the TDCs owned by us. */ 13123859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 13136495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 13146495Sspeer if ((1 << tdc) & set->owned.map) { 13156495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 13166495Sspeer if (ring) { 13176495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 13186495Sspeer "==> nxge_txdma_hw_mode: channel %d", tdc)); 13196495Sspeer if (enable) { 13206495Sspeer rs = npi_txdma_channel_enable 13216495Sspeer (handle, tdc); 13223859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 13236495Sspeer "==> nxge_txdma_hw_mode: " 13246495Sspeer "channel %d (enable) rs 0x%x", 13256495Sspeer tdc, rs)); 13266495Sspeer } else { 13276495Sspeer rs = nxge_txdma_channel_disable 13286495Sspeer (nxgep, tdc); 13293859Sml29623 } 13303859Sml29623 } 13313859Sml29623 } 13323859Sml29623 } 13333859Sml29623 13343859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 13353859Sml29623 13363859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 13376929Smisaki "<== nxge_txdma_hw_mode: status 0x%x", status)); 13383859Sml29623 13393859Sml29623 return (status); 13403859Sml29623 } 13413859Sml29623 13423859Sml29623 void 13433859Sml29623 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 13443859Sml29623 { 13453859Sml29623 npi_handle_t handle; 13463859Sml29623 13473859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 13486929Smisaki "==> nxge_txdma_enable_channel: channel %d", channel)); 13493859Sml29623 13503859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 13513859Sml29623 /* enable the transmit dma channels */ 13523859Sml29623 (void) npi_txdma_channel_enable(handle, channel); 13533859Sml29623 13543859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 13553859Sml29623 } 13563859Sml29623 13573859Sml29623 void 13583859Sml29623 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 13593859Sml29623 { 13603859Sml29623 npi_handle_t handle; 13613859Sml29623 13623859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 13636929Smisaki "==> nxge_txdma_disable_channel: channel %d", channel)); 13643859Sml29623 13653859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 13663859Sml29623 /* stop the transmit dma channels */ 13673859Sml29623 (void) npi_txdma_channel_disable(handle, channel); 13683859Sml29623 13693859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 13703859Sml29623 } 13713859Sml29623 13726495Sspeer /* 13736495Sspeer * nxge_txdma_stop_inj_err 13746495Sspeer * 13756495Sspeer * Stop a TDC. If at first we don't succeed, inject an error. 13766495Sspeer * 13776495Sspeer * Arguments: 13786495Sspeer * nxgep 13796495Sspeer * channel The channel to stop. 13806495Sspeer * 13816495Sspeer * Notes: 13826495Sspeer * 13836495Sspeer * NPI/NXGE function calls: 13846495Sspeer * npi_txdma_channel_disable() 13856495Sspeer * npi_txdma_inj_int_error_set() 13866495Sspeer * #if defined(NXGE_DEBUG) 13876495Sspeer * nxge_txdma_regs_dump_channels(nxgep); 13886495Sspeer * #endif 13896495Sspeer * 13906495Sspeer * Registers accessed: 13916495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 13926495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 13936495Sspeer * 13946495Sspeer * Context: 13956495Sspeer * Any domain 13966495Sspeer */ 13973859Sml29623 int 13983859Sml29623 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 13993859Sml29623 { 14003859Sml29623 npi_handle_t handle; 14013859Sml29623 tdmc_intr_dbg_t intr_dbg; 14023859Sml29623 int status; 14033859Sml29623 npi_status_t rs = NPI_SUCCESS; 14043859Sml29623 14053859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 14063859Sml29623 /* 14073859Sml29623 * Stop the dma channel waits for the stop done. 14083859Sml29623 * If the stop done bit is not set, then create 14093859Sml29623 * an error. 14103859Sml29623 */ 14113859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 14123859Sml29623 rs = npi_txdma_channel_disable(handle, channel); 14133859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 14143859Sml29623 if (status == NXGE_OK) { 14153859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 14166929Smisaki "<== nxge_txdma_stop_inj_err (channel %d): " 14176929Smisaki "stopped OK", channel)); 14183859Sml29623 return (status); 14193859Sml29623 } 14203859Sml29623 14213859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 14226929Smisaki "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 14236929Smisaki "injecting error", channel, rs)); 14243859Sml29623 /* Inject any error */ 14253859Sml29623 intr_dbg.value = 0; 14263859Sml29623 intr_dbg.bits.ldw.nack_pref = 1; 14273859Sml29623 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 14283859Sml29623 14293859Sml29623 /* Stop done bit will be set as a result of error injection */ 14303859Sml29623 rs = npi_txdma_channel_disable(handle, channel); 14313859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 14323859Sml29623 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 14333859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 14346929Smisaki "<== nxge_txdma_stop_inj_err (channel %d): " 14356929Smisaki "stopped OK ", channel)); 14363859Sml29623 return (status); 14373859Sml29623 } 14383859Sml29623 14393859Sml29623 #if defined(NXGE_DEBUG) 14403859Sml29623 nxge_txdma_regs_dump_channels(nxgep); 14413859Sml29623 #endif 14423859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 14436929Smisaki "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 14446929Smisaki " (injected error but still not stopped)", channel, rs)); 14453859Sml29623 14463859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 14473859Sml29623 return (status); 14483859Sml29623 } 14493859Sml29623 14503859Sml29623 /*ARGSUSED*/ 14513859Sml29623 void 14523859Sml29623 nxge_fixup_txdma_rings(p_nxge_t nxgep) 14533859Sml29623 { 14546495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 14556495Sspeer int tdc; 14563859Sml29623 14573859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 14583859Sml29623 14596495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 14606495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 14616495Sspeer "<== nxge_fixup_txdma_rings: NULL ring pointer(s)")); 14623859Sml29623 return; 14633859Sml29623 } 14643859Sml29623 14656495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 14666495Sspeer if ((1 << tdc) & set->owned.map) { 14676495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 14686495Sspeer if (ring) { 14696495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 14706495Sspeer "==> nxge_fixup_txdma_rings: channel %d", 14716495Sspeer tdc)); 14726495Sspeer nxge_txdma_fixup_channel(nxgep, ring, tdc); 14736495Sspeer } 14746495Sspeer } 14753859Sml29623 } 14763859Sml29623 14773859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 14783859Sml29623 } 14793859Sml29623 14803859Sml29623 /*ARGSUSED*/ 14813859Sml29623 void 14823859Sml29623 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 14833859Sml29623 { 14843859Sml29623 p_tx_ring_t ring_p; 14853859Sml29623 14863859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 14873859Sml29623 ring_p = nxge_txdma_get_ring(nxgep, channel); 14883859Sml29623 if (ring_p == NULL) { 14893859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 14903859Sml29623 return; 14913859Sml29623 } 14923859Sml29623 14933859Sml29623 if (ring_p->tdc != channel) { 14943859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 14956929Smisaki "<== nxge_txdma_fix_channel: channel not matched " 14966929Smisaki "ring tdc %d passed channel", 14976929Smisaki ring_p->tdc, channel)); 14983859Sml29623 return; 14993859Sml29623 } 15003859Sml29623 15013859Sml29623 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 15023859Sml29623 15033859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 15043859Sml29623 } 15053859Sml29623 15063859Sml29623 /*ARGSUSED*/ 15073859Sml29623 void 15083859Sml29623 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 15093859Sml29623 { 15103859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 15113859Sml29623 15123859Sml29623 if (ring_p == NULL) { 15133859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15146929Smisaki "<== nxge_txdma_fixup_channel: NULL ring pointer")); 15153859Sml29623 return; 15163859Sml29623 } 15173859Sml29623 15183859Sml29623 if (ring_p->tdc != channel) { 15193859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15206929Smisaki "<== nxge_txdma_fixup_channel: channel not matched " 15216929Smisaki "ring tdc %d passed channel", 15226929Smisaki ring_p->tdc, channel)); 15233859Sml29623 return; 15243859Sml29623 } 15253859Sml29623 15263859Sml29623 MUTEX_ENTER(&ring_p->lock); 15273859Sml29623 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 15283859Sml29623 ring_p->rd_index = 0; 15293859Sml29623 ring_p->wr_index = 0; 15303859Sml29623 ring_p->ring_head.value = 0; 15313859Sml29623 ring_p->ring_kick_tail.value = 0; 15323859Sml29623 ring_p->descs_pending = 0; 15333859Sml29623 MUTEX_EXIT(&ring_p->lock); 15343859Sml29623 15353859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 15363859Sml29623 } 15373859Sml29623 15383859Sml29623 /*ARGSUSED*/ 15393859Sml29623 void 15403859Sml29623 nxge_txdma_hw_kick(p_nxge_t nxgep) 15413859Sml29623 { 15426495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 15436495Sspeer int tdc; 15443859Sml29623 15453859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 15463859Sml29623 15476495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 15483859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15496495Sspeer "<== nxge_txdma_hw_kick: NULL ring pointer(s)")); 15503859Sml29623 return; 15513859Sml29623 } 15523859Sml29623 15536495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 15546495Sspeer if ((1 << tdc) & set->owned.map) { 15556495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 15566495Sspeer if (ring) { 15576495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 15586495Sspeer "==> nxge_txdma_hw_kick: channel %d", tdc)); 15596495Sspeer nxge_txdma_hw_kick_channel(nxgep, ring, tdc); 15606495Sspeer } 15616495Sspeer } 15623859Sml29623 } 15633859Sml29623 15643859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 15653859Sml29623 } 15663859Sml29623 15673859Sml29623 /*ARGSUSED*/ 15683859Sml29623 void 15693859Sml29623 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 15703859Sml29623 { 15713859Sml29623 p_tx_ring_t ring_p; 15723859Sml29623 15733859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 15743859Sml29623 15753859Sml29623 ring_p = nxge_txdma_get_ring(nxgep, channel); 15763859Sml29623 if (ring_p == NULL) { 15773859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15786929Smisaki " nxge_txdma_kick_channel")); 15793859Sml29623 return; 15803859Sml29623 } 15813859Sml29623 15823859Sml29623 if (ring_p->tdc != channel) { 15833859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15846929Smisaki "<== nxge_txdma_kick_channel: channel not matched " 15856929Smisaki "ring tdc %d passed channel", 15866929Smisaki ring_p->tdc, channel)); 15873859Sml29623 return; 15883859Sml29623 } 15893859Sml29623 15903859Sml29623 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 15913859Sml29623 15923859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 15933859Sml29623 } 15943859Sml29623 15953859Sml29623 /*ARGSUSED*/ 15963859Sml29623 void 15973859Sml29623 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 15983859Sml29623 { 15993859Sml29623 16003859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 16013859Sml29623 16023859Sml29623 if (ring_p == NULL) { 16033859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 16046929Smisaki "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 16053859Sml29623 return; 16063859Sml29623 } 16073859Sml29623 16083859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 16093859Sml29623 } 16103859Sml29623 16116495Sspeer /* 16126495Sspeer * nxge_check_tx_hang 16136495Sspeer * 16146495Sspeer * Check the state of all TDCs belonging to nxgep. 16156495Sspeer * 16166495Sspeer * Arguments: 16176495Sspeer * nxgep 16186495Sspeer * 16196495Sspeer * Notes: 16206495Sspeer * Called by nxge_hw.c:nxge_check_hw_state(). 16216495Sspeer * 16226495Sspeer * NPI/NXGE function calls: 16236495Sspeer * 16246495Sspeer * Registers accessed: 16256495Sspeer * 16266495Sspeer * Context: 16276495Sspeer * Any domain 16286495Sspeer */ 16293859Sml29623 /*ARGSUSED*/ 16303859Sml29623 void 16313859Sml29623 nxge_check_tx_hang(p_nxge_t nxgep) 16323859Sml29623 { 16333859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 16343859Sml29623 16356713Sspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 16366713Sspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 16376713Sspeer goto nxge_check_tx_hang_exit; 16386713Sspeer } 16396713Sspeer 16403859Sml29623 /* 16413859Sml29623 * Needs inputs from hardware for regs: 16423859Sml29623 * head index had not moved since last timeout. 16433859Sml29623 * packets not transmitted or stuffed registers. 16443859Sml29623 */ 16453859Sml29623 if (nxge_txdma_hung(nxgep)) { 16463859Sml29623 nxge_fixup_hung_txdma_rings(nxgep); 16473859Sml29623 } 16486713Sspeer 16496713Sspeer nxge_check_tx_hang_exit: 16503859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 16513859Sml29623 } 16523859Sml29623 16536495Sspeer /* 16546495Sspeer * nxge_txdma_hung 16556495Sspeer * 16566495Sspeer * Reset a TDC. 16576495Sspeer * 16586495Sspeer * Arguments: 16596495Sspeer * nxgep 16606495Sspeer * channel The channel to reset. 16616495Sspeer * reg_data The current TX_CS. 16626495Sspeer * 16636495Sspeer * Notes: 16646495Sspeer * Called by nxge_check_tx_hang() 16656495Sspeer * 16666495Sspeer * NPI/NXGE function calls: 16676495Sspeer * nxge_txdma_channel_hung() 16686495Sspeer * 16696495Sspeer * Registers accessed: 16706495Sspeer * 16716495Sspeer * Context: 16726495Sspeer * Any domain 16736495Sspeer */ 16743859Sml29623 int 16753859Sml29623 nxge_txdma_hung(p_nxge_t nxgep) 16763859Sml29623 { 1677*7812SMichael.Speer@Sun.COM nxge_grp_set_t *set = &nxgep->tx_set; 1678*7812SMichael.Speer@Sun.COM int tdc; 1679*7812SMichael.Speer@Sun.COM boolean_t shared; 16803859Sml29623 16813859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 16826495Sspeer 16836495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 16843859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 16856495Sspeer "<== nxge_txdma_hung: NULL ring pointer(s)")); 16863859Sml29623 return (B_FALSE); 16873859Sml29623 } 16883859Sml29623 16896495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1690*7812SMichael.Speer@Sun.COM /* 1691*7812SMichael.Speer@Sun.COM * Grab the shared state of the TDC. 1692*7812SMichael.Speer@Sun.COM */ 1693*7812SMichael.Speer@Sun.COM if (isLDOMservice(nxgep)) { 1694*7812SMichael.Speer@Sun.COM nxge_hio_data_t *nhd = 1695*7812SMichael.Speer@Sun.COM (nxge_hio_data_t *)nxgep->nxge_hw_p->hio; 1696*7812SMichael.Speer@Sun.COM 1697*7812SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 1698*7812SMichael.Speer@Sun.COM shared = nxgep->tdc_is_shared[tdc]; 1699*7812SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 1700*7812SMichael.Speer@Sun.COM } else { 1701*7812SMichael.Speer@Sun.COM shared = B_FALSE; 1702*7812SMichael.Speer@Sun.COM } 1703*7812SMichael.Speer@Sun.COM 1704*7812SMichael.Speer@Sun.COM /* 1705*7812SMichael.Speer@Sun.COM * Now, process continue to process. 1706*7812SMichael.Speer@Sun.COM */ 1707*7812SMichael.Speer@Sun.COM if (((1 << tdc) & set->owned.map) && !shared) { 17086495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 17096495Sspeer if (ring) { 17106495Sspeer if (nxge_txdma_channel_hung(nxgep, ring, tdc)) { 17116495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 17126495Sspeer "==> nxge_txdma_hung: TDC %d hung", 17136495Sspeer tdc)); 17146495Sspeer return (B_TRUE); 17156495Sspeer } 17166495Sspeer } 17173859Sml29623 } 17183859Sml29623 } 17193859Sml29623 17203859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 17213859Sml29623 17223859Sml29623 return (B_FALSE); 17233859Sml29623 } 17243859Sml29623 17256495Sspeer /* 17266495Sspeer * nxge_txdma_channel_hung 17276495Sspeer * 17286495Sspeer * Reset a TDC. 17296495Sspeer * 17306495Sspeer * Arguments: 17316495Sspeer * nxgep 17326495Sspeer * ring <channel>'s ring. 17336495Sspeer * channel The channel to reset. 17346495Sspeer * 17356495Sspeer * Notes: 17366495Sspeer * Called by nxge_txdma.c:nxge_txdma_hung() 17376495Sspeer * 17386495Sspeer * NPI/NXGE function calls: 17396495Sspeer * npi_txdma_ring_head_get() 17406495Sspeer * 17416495Sspeer * Registers accessed: 17426495Sspeer * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 17436495Sspeer * 17446495Sspeer * Context: 17456495Sspeer * Any domain 17466495Sspeer */ 17473859Sml29623 int 17483859Sml29623 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 17493859Sml29623 { 17503859Sml29623 uint16_t head_index, tail_index; 17513859Sml29623 boolean_t head_wrap, tail_wrap; 17523859Sml29623 npi_handle_t handle; 17533859Sml29623 tx_ring_hdl_t tx_head; 17543859Sml29623 uint_t tx_rd_index; 17553859Sml29623 17563859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 17573859Sml29623 17583859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 17593859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 17606929Smisaki "==> nxge_txdma_channel_hung: channel %d", channel)); 17613859Sml29623 MUTEX_ENTER(&tx_ring_p->lock); 17623859Sml29623 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 17633859Sml29623 17643859Sml29623 tail_index = tx_ring_p->wr_index; 17653859Sml29623 tail_wrap = tx_ring_p->wr_index_wrap; 17663859Sml29623 tx_rd_index = tx_ring_p->rd_index; 17673859Sml29623 MUTEX_EXIT(&tx_ring_p->lock); 17683859Sml29623 17693859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 17706929Smisaki "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 17716929Smisaki "tail_index %d tail_wrap %d ", 17726929Smisaki channel, tx_rd_index, tail_index, tail_wrap)); 17733859Sml29623 /* 17743859Sml29623 * Read the hardware maintained transmit head 17753859Sml29623 * and wrap around bit. 17763859Sml29623 */ 17773859Sml29623 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 17783859Sml29623 head_index = tx_head.bits.ldw.head; 17793859Sml29623 head_wrap = tx_head.bits.ldw.wrap; 17803859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 17816929Smisaki "==> nxge_txdma_channel_hung: " 17826929Smisaki "tx_rd_index %d tail %d tail_wrap %d " 17836929Smisaki "head %d wrap %d", 17846929Smisaki tx_rd_index, tail_index, tail_wrap, 17856929Smisaki head_index, head_wrap)); 17863859Sml29623 17873859Sml29623 if (TXDMA_RING_EMPTY(head_index, head_wrap, 17886929Smisaki tail_index, tail_wrap) && 17896929Smisaki (head_index == tx_rd_index)) { 17903859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 17916929Smisaki "==> nxge_txdma_channel_hung: EMPTY")); 17923859Sml29623 return (B_FALSE); 17933859Sml29623 } 17943859Sml29623 17953859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 17966929Smisaki "==> nxge_txdma_channel_hung: Checking if ring full")); 17973859Sml29623 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 17986929Smisaki tail_wrap)) { 17993859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18006929Smisaki "==> nxge_txdma_channel_hung: full")); 18013859Sml29623 return (B_TRUE); 18023859Sml29623 } 18033859Sml29623 18043859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 18053859Sml29623 18063859Sml29623 return (B_FALSE); 18073859Sml29623 } 18083859Sml29623 18096495Sspeer /* 18106495Sspeer * nxge_fixup_hung_txdma_rings 18116495Sspeer * 18126495Sspeer * Disable a TDC. 18136495Sspeer * 18146495Sspeer * Arguments: 18156495Sspeer * nxgep 18166495Sspeer * channel The channel to reset. 18176495Sspeer * reg_data The current TX_CS. 18186495Sspeer * 18196495Sspeer * Notes: 18206495Sspeer * Called by nxge_check_tx_hang() 18216495Sspeer * 18226495Sspeer * NPI/NXGE function calls: 18236495Sspeer * npi_txdma_ring_head_get() 18246495Sspeer * 18256495Sspeer * Registers accessed: 18266495Sspeer * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 18276495Sspeer * 18286495Sspeer * Context: 18296495Sspeer * Any domain 18306495Sspeer */ 18313859Sml29623 /*ARGSUSED*/ 18323859Sml29623 void 18333859Sml29623 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 18343859Sml29623 { 18356495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 18366495Sspeer int tdc; 18373859Sml29623 18383859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 18396495Sspeer 18406495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 18413859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18426495Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 18433859Sml29623 return; 18443859Sml29623 } 18453859Sml29623 18466495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 18476495Sspeer if ((1 << tdc) & set->owned.map) { 18486495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 18496495Sspeer if (ring) { 18506495Sspeer nxge_txdma_fixup_hung_channel(nxgep, ring, tdc); 18516495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 18526495Sspeer "==> nxge_fixup_hung_txdma_rings: TDC %d", 18536495Sspeer tdc)); 18546495Sspeer } 18556495Sspeer } 18563859Sml29623 } 18573859Sml29623 18583859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 18593859Sml29623 } 18603859Sml29623 18616495Sspeer /* 18626495Sspeer * nxge_txdma_fixup_hung_channel 18636495Sspeer * 18646495Sspeer * 'Fix' a hung TDC. 18656495Sspeer * 18666495Sspeer * Arguments: 18676495Sspeer * nxgep 18686495Sspeer * channel The channel to fix. 18696495Sspeer * 18706495Sspeer * Notes: 18716495Sspeer * Called by nxge_fixup_hung_txdma_rings() 18726495Sspeer * 18736495Sspeer * 1. Reclaim the TDC. 18746495Sspeer * 2. Disable the TDC. 18756495Sspeer * 18766495Sspeer * NPI/NXGE function calls: 18776495Sspeer * nxge_txdma_reclaim() 18786495Sspeer * npi_txdma_channel_disable(TX_CS) 18796495Sspeer * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 18806495Sspeer * 18816495Sspeer * Registers accessed: 18826495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 18836495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 18846495Sspeer * 18856495Sspeer * Context: 18866495Sspeer * Any domain 18876495Sspeer */ 18883859Sml29623 /*ARGSUSED*/ 18893859Sml29623 void 18903859Sml29623 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 18913859Sml29623 { 18923859Sml29623 p_tx_ring_t ring_p; 18933859Sml29623 18943859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 18953859Sml29623 ring_p = nxge_txdma_get_ring(nxgep, channel); 18963859Sml29623 if (ring_p == NULL) { 18973859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18986929Smisaki "<== nxge_txdma_fix_hung_channel")); 18993859Sml29623 return; 19003859Sml29623 } 19013859Sml29623 19023859Sml29623 if (ring_p->tdc != channel) { 19033859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19046929Smisaki "<== nxge_txdma_fix_hung_channel: channel not matched " 19056929Smisaki "ring tdc %d passed channel", 19066929Smisaki ring_p->tdc, channel)); 19073859Sml29623 return; 19083859Sml29623 } 19093859Sml29623 19103859Sml29623 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 19113859Sml29623 19123859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 19133859Sml29623 } 19143859Sml29623 19153859Sml29623 /*ARGSUSED*/ 19163859Sml29623 void 19173859Sml29623 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 19183859Sml29623 uint16_t channel) 19193859Sml29623 { 19203859Sml29623 npi_handle_t handle; 19213859Sml29623 tdmc_intr_dbg_t intr_dbg; 19223859Sml29623 int status = NXGE_OK; 19233859Sml29623 19243859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 19253859Sml29623 19263859Sml29623 if (ring_p == NULL) { 19273859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19286929Smisaki "<== nxge_txdma_fixup_channel: NULL ring pointer")); 19293859Sml29623 return; 19303859Sml29623 } 19313859Sml29623 19323859Sml29623 if (ring_p->tdc != channel) { 19333859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19346929Smisaki "<== nxge_txdma_fixup_hung_channel: channel " 19356929Smisaki "not matched " 19366929Smisaki "ring tdc %d passed channel", 19376929Smisaki ring_p->tdc, channel)); 19383859Sml29623 return; 19393859Sml29623 } 19403859Sml29623 19413859Sml29623 /* Reclaim descriptors */ 19423859Sml29623 MUTEX_ENTER(&ring_p->lock); 19433859Sml29623 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 19443859Sml29623 MUTEX_EXIT(&ring_p->lock); 19453859Sml29623 19463859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 19473859Sml29623 /* 19483859Sml29623 * Stop the dma channel waits for the stop done. 19493859Sml29623 * If the stop done bit is not set, then force 19503859Sml29623 * an error. 19513859Sml29623 */ 19523859Sml29623 status = npi_txdma_channel_disable(handle, channel); 19533859Sml29623 if (!(status & NPI_TXDMA_STOP_FAILED)) { 19543859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19556929Smisaki "<== nxge_txdma_fixup_hung_channel: stopped OK " 19566929Smisaki "ring tdc %d passed channel %d", 19576929Smisaki ring_p->tdc, channel)); 19583859Sml29623 return; 19593859Sml29623 } 19603859Sml29623 19613859Sml29623 /* Inject any error */ 19623859Sml29623 intr_dbg.value = 0; 19633859Sml29623 intr_dbg.bits.ldw.nack_pref = 1; 19643859Sml29623 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 19653859Sml29623 19663859Sml29623 /* Stop done bit will be set as a result of error injection */ 19673859Sml29623 status = npi_txdma_channel_disable(handle, channel); 19683859Sml29623 if (!(status & NPI_TXDMA_STOP_FAILED)) { 19693859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19706929Smisaki "<== nxge_txdma_fixup_hung_channel: stopped again" 19716929Smisaki "ring tdc %d passed channel", 19726929Smisaki ring_p->tdc, channel)); 19733859Sml29623 return; 19743859Sml29623 } 19753859Sml29623 19763859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19776929Smisaki "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 19786929Smisaki "ring tdc %d passed channel", 19796929Smisaki ring_p->tdc, channel)); 19803859Sml29623 19813859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 19823859Sml29623 } 19833859Sml29623 19843859Sml29623 /*ARGSUSED*/ 19853859Sml29623 void 19863859Sml29623 nxge_reclaim_rings(p_nxge_t nxgep) 19873859Sml29623 { 19886495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 19896495Sspeer int tdc; 19906495Sspeer 19916495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings")); 19926495Sspeer 19936495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 19943859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19956495Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 19963859Sml29623 return; 19973859Sml29623 } 19983859Sml29623 19996495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 20006495Sspeer if ((1 << tdc) & set->owned.map) { 20016495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 20026495Sspeer if (ring) { 20036495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 20046495Sspeer "==> nxge_reclaim_rings: TDC %d", tdc)); 20056495Sspeer MUTEX_ENTER(&ring->lock); 20066495Sspeer (void) nxge_txdma_reclaim(nxgep, ring, tdc); 20076495Sspeer MUTEX_EXIT(&ring->lock); 20086495Sspeer } 20096495Sspeer } 20103859Sml29623 } 20113859Sml29623 20123859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 20133859Sml29623 } 20143859Sml29623 20153859Sml29623 void 20163859Sml29623 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 20173859Sml29623 { 20186495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 20196495Sspeer npi_handle_t handle; 20206495Sspeer int tdc; 20216495Sspeer 20226495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels")); 20233859Sml29623 20243859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 20256495Sspeer 20266495Sspeer if (!isLDOMguest(nxgep)) { 20276495Sspeer (void) npi_txdma_dump_fzc_regs(handle); 20286495Sspeer 20296495Sspeer /* Dump TXC registers. */ 20306495Sspeer (void) npi_txc_dump_fzc_regs(handle); 20316495Sspeer (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 20323859Sml29623 } 20333859Sml29623 20346495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 20353859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 20366495Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 20373859Sml29623 return; 20383859Sml29623 } 20393859Sml29623 20406495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 20416495Sspeer if ((1 << tdc) & set->owned.map) { 20426495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 20436495Sspeer if (ring) { 20446495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 20456495Sspeer "==> nxge_txdma_regs_dump_channels: " 20466495Sspeer "TDC %d", tdc)); 20476495Sspeer (void) npi_txdma_dump_tdc_regs(handle, tdc); 20486495Sspeer 20496495Sspeer /* Dump TXC registers, if able to. */ 20506495Sspeer if (!isLDOMguest(nxgep)) { 20516495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 20526495Sspeer "==> nxge_txdma_regs_dump_channels:" 20536495Sspeer " FZC TDC %d", tdc)); 20546495Sspeer (void) npi_txc_dump_tdc_fzc_regs 20556495Sspeer (handle, tdc); 20566495Sspeer } 20576495Sspeer nxge_txdma_regs_dump(nxgep, tdc); 20586495Sspeer } 20596495Sspeer } 20603859Sml29623 } 20613859Sml29623 20623859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 20633859Sml29623 } 20643859Sml29623 20653859Sml29623 void 20663859Sml29623 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 20673859Sml29623 { 20683859Sml29623 npi_handle_t handle; 20693859Sml29623 tx_ring_hdl_t hdl; 20703859Sml29623 tx_ring_kick_t kick; 20713859Sml29623 tx_cs_t cs; 20723859Sml29623 txc_control_t control; 20733859Sml29623 uint32_t bitmap = 0; 20743859Sml29623 uint32_t burst = 0; 20753859Sml29623 uint32_t bytes = 0; 20763859Sml29623 dma_log_page_t cfg; 20773859Sml29623 20783859Sml29623 printf("\n\tfunc # %d tdc %d ", 20796929Smisaki nxgep->function_num, channel); 20803859Sml29623 cfg.page_num = 0; 20813859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 20823859Sml29623 (void) npi_txdma_log_page_get(handle, channel, &cfg); 20833859Sml29623 printf("\n\tlog page func %d valid page 0 %d", 20846929Smisaki cfg.func_num, cfg.valid); 20853859Sml29623 cfg.page_num = 1; 20863859Sml29623 (void) npi_txdma_log_page_get(handle, channel, &cfg); 20873859Sml29623 printf("\n\tlog page func %d valid page 1 %d", 20886929Smisaki cfg.func_num, cfg.valid); 20893859Sml29623 20903859Sml29623 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 20913859Sml29623 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 20923859Sml29623 printf("\n\thead value is 0x%0llx", 20936929Smisaki (long long)hdl.value); 20943859Sml29623 printf("\n\thead index %d", hdl.bits.ldw.head); 20953859Sml29623 printf("\n\tkick value is 0x%0llx", 20966929Smisaki (long long)kick.value); 20973859Sml29623 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 20983859Sml29623 20993859Sml29623 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 21003859Sml29623 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 21013859Sml29623 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 21023859Sml29623 21033859Sml29623 (void) npi_txc_control(handle, OP_GET, &control); 21043859Sml29623 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 21053859Sml29623 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 21063859Sml29623 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 21073859Sml29623 21083859Sml29623 printf("\n\tTXC port control 0x%0llx", 21096929Smisaki (long long)control.value); 21103859Sml29623 printf("\n\tTXC port bitmap 0x%x", bitmap); 21113859Sml29623 printf("\n\tTXC max burst %d", burst); 21123859Sml29623 printf("\n\tTXC bytes xmt %d\n", bytes); 21133859Sml29623 21143859Sml29623 { 21153859Sml29623 ipp_status_t status; 21163859Sml29623 21173859Sml29623 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 21185125Sjoycey #if defined(__i386) 21195125Sjoycey printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value); 21205125Sjoycey #else 21213859Sml29623 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 21225125Sjoycey #endif 21233859Sml29623 } 21243859Sml29623 } 21253859Sml29623 21263859Sml29623 /* 21276495Sspeer * nxge_tdc_hvio_setup 21286495Sspeer * 21296495Sspeer * I'm not exactly sure what this code does. 21306495Sspeer * 21316495Sspeer * Arguments: 21326495Sspeer * nxgep 21336495Sspeer * channel The channel to map. 21346495Sspeer * 21356495Sspeer * Notes: 21366495Sspeer * 21376495Sspeer * NPI/NXGE function calls: 21386495Sspeer * na 21396495Sspeer * 21406495Sspeer * Context: 21416495Sspeer * Service domain? 21423859Sml29623 */ 21436495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 21446495Sspeer static void 21456495Sspeer nxge_tdc_hvio_setup( 21466495Sspeer nxge_t *nxgep, int channel) 21473859Sml29623 { 21486495Sspeer nxge_dma_common_t *data; 21496495Sspeer nxge_dma_common_t *control; 21506495Sspeer tx_ring_t *ring; 21516495Sspeer 21526495Sspeer ring = nxgep->tx_rings->rings[channel]; 21536495Sspeer data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 21546495Sspeer 21556495Sspeer ring->hv_set = B_FALSE; 21566495Sspeer 21576495Sspeer ring->hv_tx_buf_base_ioaddr_pp = 21586495Sspeer (uint64_t)data->orig_ioaddr_pp; 21596495Sspeer ring->hv_tx_buf_ioaddr_size = 21606495Sspeer (uint64_t)data->orig_alength; 21616495Sspeer 21626495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 21636929Smisaki "hv data buf base io $%p size 0x%llx (%d) buf base io $%p " 21646929Smisaki "orig vatopa base io $%p orig_len 0x%llx (%d)", 21656929Smisaki ring->hv_tx_buf_base_ioaddr_pp, 21666929Smisaki ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size, 21676929Smisaki data->ioaddr_pp, data->orig_vatopa, 21686929Smisaki data->orig_alength, data->orig_alength)); 21696495Sspeer 21706495Sspeer control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 21716495Sspeer 21726495Sspeer ring->hv_tx_cntl_base_ioaddr_pp = 21736495Sspeer (uint64_t)control->orig_ioaddr_pp; 21746495Sspeer ring->hv_tx_cntl_ioaddr_size = 21756495Sspeer (uint64_t)control->orig_alength; 21766495Sspeer 21776495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 21786929Smisaki "hv cntl base io $%p orig ioaddr_pp ($%p) " 21796929Smisaki "orig vatopa ($%p) size 0x%llx (%d 0x%x)", 21806929Smisaki ring->hv_tx_cntl_base_ioaddr_pp, 21816929Smisaki control->orig_ioaddr_pp, control->orig_vatopa, 21826929Smisaki ring->hv_tx_cntl_ioaddr_size, 21836929Smisaki control->orig_alength, control->orig_alength)); 21846495Sspeer } 21853859Sml29623 #endif 21863859Sml29623 21876495Sspeer static nxge_status_t 21886495Sspeer nxge_map_txdma(p_nxge_t nxgep, int channel) 21896495Sspeer { 21906495Sspeer nxge_dma_common_t **pData; 21916495Sspeer nxge_dma_common_t **pControl; 21926495Sspeer tx_ring_t **pRing, *ring; 21936495Sspeer tx_mbox_t **mailbox; 21946495Sspeer uint32_t num_chunks; 21956495Sspeer 21966495Sspeer nxge_status_t status = NXGE_OK; 21976495Sspeer 21986495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 21996495Sspeer 22006495Sspeer if (!nxgep->tx_cntl_pool_p->buf_allocated) { 22016495Sspeer if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) { 22026495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 22036495Sspeer "<== nxge_map_txdma: buf not allocated")); 22046495Sspeer return (NXGE_ERROR); 22056495Sspeer } 22063859Sml29623 } 22073859Sml29623 22086495Sspeer if (nxge_alloc_txb(nxgep, channel) != NXGE_OK) 22096495Sspeer return (NXGE_ERROR); 22106495Sspeer 22116495Sspeer num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 22126495Sspeer pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 22136495Sspeer pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 22146495Sspeer pRing = &nxgep->tx_rings->rings[channel]; 22156495Sspeer mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 22166495Sspeer 22176495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 22186929Smisaki "tx_rings $%p tx_desc_rings $%p", 22196929Smisaki nxgep->tx_rings, nxgep->tx_rings->rings)); 22203859Sml29623 22213859Sml29623 /* 22226495Sspeer * Map descriptors from the buffer pools for <channel>. 22236495Sspeer */ 22246495Sspeer 22256495Sspeer /* 22266495Sspeer * Set up and prepare buffer blocks, descriptors 22276495Sspeer * and mailbox. 22283859Sml29623 */ 22296495Sspeer status = nxge_map_txdma_channel(nxgep, channel, 22306495Sspeer pData, pRing, num_chunks, pControl, mailbox); 22316495Sspeer if (status != NXGE_OK) { 22326495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 22336929Smisaki "==> nxge_map_txdma(%d): nxge_map_txdma_channel() " 22346929Smisaki "returned 0x%x", 22356929Smisaki nxgep, channel, status)); 22366495Sspeer return (status); 22376495Sspeer } 22386495Sspeer 22396495Sspeer ring = *pRing; 22406495Sspeer 22416495Sspeer ring->index = (uint16_t)channel; 22426495Sspeer ring->tdc_stats = &nxgep->statsp->tdc_stats[channel]; 22436495Sspeer 22446495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 22456495Sspeer if (isLDOMguest(nxgep)) { 22466495Sspeer (void) nxge_tdc_lp_conf(nxgep, channel); 22476495Sspeer } else { 22486495Sspeer nxge_tdc_hvio_setup(nxgep, channel); 22496495Sspeer } 22503859Sml29623 #endif 22516495Sspeer 22526495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 22536495Sspeer "(status 0x%x channel %d)", status, channel)); 22543859Sml29623 22553859Sml29623 return (status); 22563859Sml29623 } 22573859Sml29623 22583859Sml29623 static nxge_status_t 22593859Sml29623 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 22603859Sml29623 p_nxge_dma_common_t *dma_buf_p, 22613859Sml29623 p_tx_ring_t *tx_desc_p, 22623859Sml29623 uint32_t num_chunks, 22633859Sml29623 p_nxge_dma_common_t *dma_cntl_p, 22643859Sml29623 p_tx_mbox_t *tx_mbox_p) 22653859Sml29623 { 22663859Sml29623 int status = NXGE_OK; 22673859Sml29623 22683859Sml29623 /* 22693859Sml29623 * Set up and prepare buffer blocks, descriptors 22703859Sml29623 * and mailbox. 22713859Sml29623 */ 22726495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 22736929Smisaki "==> nxge_map_txdma_channel (channel %d)", channel)); 22743859Sml29623 /* 22753859Sml29623 * Transmit buffer blocks 22763859Sml29623 */ 22773859Sml29623 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 22786929Smisaki dma_buf_p, tx_desc_p, num_chunks); 22793859Sml29623 if (status != NXGE_OK) { 22803859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 22816929Smisaki "==> nxge_map_txdma_channel (channel %d): " 22826929Smisaki "map buffer failed 0x%x", channel, status)); 22833859Sml29623 goto nxge_map_txdma_channel_exit; 22843859Sml29623 } 22853859Sml29623 22863859Sml29623 /* 22873859Sml29623 * Transmit block ring, and mailbox. 22883859Sml29623 */ 22893859Sml29623 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 22906929Smisaki tx_mbox_p); 22913859Sml29623 22923859Sml29623 goto nxge_map_txdma_channel_exit; 22933859Sml29623 22943859Sml29623 nxge_map_txdma_channel_fail1: 22956495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 22966929Smisaki "==> nxge_map_txdma_channel: unmap buf" 22976929Smisaki "(status 0x%x channel %d)", 22986929Smisaki status, channel)); 22993859Sml29623 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 23003859Sml29623 23013859Sml29623 nxge_map_txdma_channel_exit: 23026495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 23036929Smisaki "<== nxge_map_txdma_channel: " 23046929Smisaki "(status 0x%x channel %d)", 23056929Smisaki status, channel)); 23063859Sml29623 23073859Sml29623 return (status); 23083859Sml29623 } 23093859Sml29623 23103859Sml29623 /*ARGSUSED*/ 23113859Sml29623 static void 23126495Sspeer nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel) 23133859Sml29623 { 23146495Sspeer tx_ring_t *ring; 23156495Sspeer tx_mbox_t *mailbox; 23166495Sspeer 23173859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 23186929Smisaki "==> nxge_unmap_txdma_channel (channel %d)", channel)); 23193859Sml29623 /* 23203859Sml29623 * unmap tx block ring, and mailbox. 23213859Sml29623 */ 23226495Sspeer ring = nxgep->tx_rings->rings[channel]; 23236495Sspeer mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 23246495Sspeer 23256495Sspeer (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox); 23263859Sml29623 23273859Sml29623 /* unmap buffer blocks */ 23286495Sspeer (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring); 23296495Sspeer 23306495Sspeer nxge_free_txb(nxgep, channel); 23313859Sml29623 23323859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 23333859Sml29623 } 23343859Sml29623 23356495Sspeer /* 23366495Sspeer * nxge_map_txdma_channel_cfg_ring 23376495Sspeer * 23386495Sspeer * Map a TDC into our kernel space. 23396495Sspeer * This function allocates all of the per-channel data structures. 23406495Sspeer * 23416495Sspeer * Arguments: 23426495Sspeer * nxgep 23436495Sspeer * dma_channel The channel to map. 23446495Sspeer * dma_cntl_p 23456495Sspeer * tx_ring_p dma_channel's transmit ring 23466495Sspeer * tx_mbox_p dma_channel's mailbox 23476495Sspeer * 23486495Sspeer * Notes: 23496495Sspeer * 23506495Sspeer * NPI/NXGE function calls: 23516495Sspeer * nxge_setup_dma_common() 23526495Sspeer * 23536495Sspeer * Registers accessed: 23546495Sspeer * none. 23556495Sspeer * 23566495Sspeer * Context: 23576495Sspeer * Any domain 23586495Sspeer */ 23593859Sml29623 /*ARGSUSED*/ 23603859Sml29623 static void 23613859Sml29623 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 23623859Sml29623 p_nxge_dma_common_t *dma_cntl_p, 23633859Sml29623 p_tx_ring_t tx_ring_p, 23643859Sml29623 p_tx_mbox_t *tx_mbox_p) 23653859Sml29623 { 23663859Sml29623 p_tx_mbox_t mboxp; 23673859Sml29623 p_nxge_dma_common_t cntl_dmap; 23683859Sml29623 p_nxge_dma_common_t dmap; 23693859Sml29623 p_tx_rng_cfig_t tx_ring_cfig_p; 23703859Sml29623 p_tx_ring_kick_t tx_ring_kick_p; 23713859Sml29623 p_tx_cs_t tx_cs_p; 23723859Sml29623 p_tx_dma_ent_msk_t tx_evmask_p; 23733859Sml29623 p_txdma_mbh_t mboxh_p; 23743859Sml29623 p_txdma_mbl_t mboxl_p; 23753859Sml29623 uint64_t tx_desc_len; 23763859Sml29623 23773859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 23786929Smisaki "==> nxge_map_txdma_channel_cfg_ring")); 23793859Sml29623 23803859Sml29623 cntl_dmap = *dma_cntl_p; 23813859Sml29623 23823859Sml29623 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 23833859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 23846929Smisaki sizeof (tx_desc_t)); 23853859Sml29623 /* 23863859Sml29623 * Zero out transmit ring descriptors. 23873859Sml29623 */ 23883859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 23893859Sml29623 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 23903859Sml29623 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 23913859Sml29623 tx_cs_p = &(tx_ring_p->tx_cs); 23923859Sml29623 tx_evmask_p = &(tx_ring_p->tx_evmask); 23933859Sml29623 tx_ring_cfig_p->value = 0; 23943859Sml29623 tx_ring_kick_p->value = 0; 23953859Sml29623 tx_cs_p->value = 0; 23963859Sml29623 tx_evmask_p->value = 0; 23973859Sml29623 23983859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 23996929Smisaki "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 24006929Smisaki dma_channel, 24016929Smisaki dmap->dma_cookie.dmac_laddress)); 24023859Sml29623 24033859Sml29623 tx_ring_cfig_p->value = 0; 24043859Sml29623 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 24053859Sml29623 tx_ring_cfig_p->value = 24066929Smisaki (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 24076929Smisaki (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 24083859Sml29623 24093859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24106929Smisaki "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 24116929Smisaki dma_channel, 24126929Smisaki tx_ring_cfig_p->value)); 24133859Sml29623 24143859Sml29623 tx_cs_p->bits.ldw.rst = 1; 24153859Sml29623 24163859Sml29623 /* Map in mailbox */ 24173859Sml29623 mboxp = (p_tx_mbox_t) 24186929Smisaki KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 24193859Sml29623 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 24203859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 24213859Sml29623 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 24223859Sml29623 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 24233859Sml29623 mboxh_p->value = mboxl_p->value = 0; 24243859Sml29623 24253859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24266929Smisaki "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 24276929Smisaki dmap->dma_cookie.dmac_laddress)); 24283859Sml29623 24293859Sml29623 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 24306929Smisaki TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 24313859Sml29623 24323859Sml29623 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 24336929Smisaki TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 24343859Sml29623 24353859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24366929Smisaki "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 24376929Smisaki dmap->dma_cookie.dmac_laddress)); 24383859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24396929Smisaki "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 24406929Smisaki "mbox $%p", 24416929Smisaki mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 24423859Sml29623 tx_ring_p->page_valid.value = 0; 24433859Sml29623 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 24443859Sml29623 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 24453859Sml29623 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 24463859Sml29623 tx_ring_p->page_hdl.value = 0; 24473859Sml29623 24483859Sml29623 tx_ring_p->page_valid.bits.ldw.page0 = 1; 24493859Sml29623 tx_ring_p->page_valid.bits.ldw.page1 = 1; 24503859Sml29623 24513859Sml29623 tx_ring_p->max_burst.value = 0; 24523859Sml29623 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 24533859Sml29623 24543859Sml29623 *tx_mbox_p = mboxp; 24553859Sml29623 24563859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24576929Smisaki "<== nxge_map_txdma_channel_cfg_ring")); 24583859Sml29623 } 24593859Sml29623 24603859Sml29623 /*ARGSUSED*/ 24613859Sml29623 static void 24623859Sml29623 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 24633859Sml29623 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 24643859Sml29623 { 24653859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24666929Smisaki "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 24676929Smisaki tx_ring_p->tdc)); 24683859Sml29623 24693859Sml29623 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 24703859Sml29623 24713859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24726929Smisaki "<== nxge_unmap_txdma_channel_cfg_ring")); 24733859Sml29623 } 24743859Sml29623 24756495Sspeer /* 24766495Sspeer * nxge_map_txdma_channel_buf_ring 24776495Sspeer * 24786495Sspeer * 24796495Sspeer * Arguments: 24806495Sspeer * nxgep 24816495Sspeer * channel The channel to map. 24826495Sspeer * dma_buf_p 24836495Sspeer * tx_desc_p channel's descriptor ring 24846495Sspeer * num_chunks 24856495Sspeer * 24866495Sspeer * Notes: 24876495Sspeer * 24886495Sspeer * NPI/NXGE function calls: 24896495Sspeer * nxge_setup_dma_common() 24906495Sspeer * 24916495Sspeer * Registers accessed: 24926495Sspeer * none. 24936495Sspeer * 24946495Sspeer * Context: 24956495Sspeer * Any domain 24966495Sspeer */ 24973859Sml29623 static nxge_status_t 24983859Sml29623 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 24993859Sml29623 p_nxge_dma_common_t *dma_buf_p, 25003859Sml29623 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 25013859Sml29623 { 25023859Sml29623 p_nxge_dma_common_t dma_bufp, tmp_bufp; 25033859Sml29623 p_nxge_dma_common_t dmap; 25043859Sml29623 nxge_os_dma_handle_t tx_buf_dma_handle; 25053859Sml29623 p_tx_ring_t tx_ring_p; 25063859Sml29623 p_tx_msg_t tx_msg_ring; 25073859Sml29623 nxge_status_t status = NXGE_OK; 25083859Sml29623 int ddi_status = DDI_SUCCESS; 25093859Sml29623 int i, j, index; 25103859Sml29623 uint32_t size, bsize; 25113859Sml29623 uint32_t nblocks, nmsgs; 25123859Sml29623 25133859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25146929Smisaki "==> nxge_map_txdma_channel_buf_ring")); 25153859Sml29623 25163859Sml29623 dma_bufp = tmp_bufp = *dma_buf_p; 25173859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25183859Sml29623 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 25193859Sml29623 "chunks bufp $%p", 25206929Smisaki channel, num_chunks, dma_bufp)); 25213859Sml29623 25223859Sml29623 nmsgs = 0; 25233859Sml29623 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 25243859Sml29623 nmsgs += tmp_bufp->nblocks; 25253859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25266929Smisaki "==> nxge_map_txdma_channel_buf_ring: channel %d " 25276929Smisaki "bufp $%p nblocks %d nmsgs %d", 25286929Smisaki channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 25293859Sml29623 } 25303859Sml29623 if (!nmsgs) { 25313859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25326929Smisaki "<== nxge_map_txdma_channel_buf_ring: channel %d " 25336929Smisaki "no msg blocks", 25346929Smisaki channel)); 25353859Sml29623 status = NXGE_ERROR; 25363859Sml29623 goto nxge_map_txdma_channel_buf_ring_exit; 25373859Sml29623 } 25383859Sml29623 25393859Sml29623 tx_ring_p = (p_tx_ring_t) 25406929Smisaki KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 25413859Sml29623 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 25426929Smisaki (void *)nxgep->interrupt_cookie); 25433952Sml29623 25446713Sspeer (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE); 25456886Sspeer tx_ring_p->tx_ring_busy = B_FALSE; 25463952Sml29623 tx_ring_p->nxgep = nxgep; 25473952Sml29623 tx_ring_p->serial = nxge_serialize_create(nmsgs, 25486929Smisaki nxge_serial_tx, tx_ring_p); 25493859Sml29623 /* 25503859Sml29623 * Allocate transmit message rings and handles for packets 25513859Sml29623 * not to be copied to premapped buffers. 25523859Sml29623 */ 25533859Sml29623 size = nmsgs * sizeof (tx_msg_t); 25543859Sml29623 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 25553859Sml29623 for (i = 0; i < nmsgs; i++) { 25563859Sml29623 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 25576929Smisaki DDI_DMA_DONTWAIT, 0, 25586929Smisaki &tx_msg_ring[i].dma_handle); 25593859Sml29623 if (ddi_status != DDI_SUCCESS) { 25603859Sml29623 status |= NXGE_DDI_FAILED; 25613859Sml29623 break; 25623859Sml29623 } 25633859Sml29623 } 25643859Sml29623 if (i < nmsgs) { 25654185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 25664185Sspeer "Allocate handles failed.")); 25673859Sml29623 goto nxge_map_txdma_channel_buf_ring_fail1; 25683859Sml29623 } 25693859Sml29623 25703859Sml29623 tx_ring_p->tdc = channel; 25713859Sml29623 tx_ring_p->tx_msg_ring = tx_msg_ring; 25723859Sml29623 tx_ring_p->tx_ring_size = nmsgs; 25733859Sml29623 tx_ring_p->num_chunks = num_chunks; 25743859Sml29623 if (!nxge_tx_intr_thres) { 25753859Sml29623 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 25763859Sml29623 } 25773859Sml29623 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 25783859Sml29623 tx_ring_p->rd_index = 0; 25793859Sml29623 tx_ring_p->wr_index = 0; 25803859Sml29623 tx_ring_p->ring_head.value = 0; 25813859Sml29623 tx_ring_p->ring_kick_tail.value = 0; 25823859Sml29623 tx_ring_p->descs_pending = 0; 25833859Sml29623 25843859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25856929Smisaki "==> nxge_map_txdma_channel_buf_ring: channel %d " 25866929Smisaki "actual tx desc max %d nmsgs %d " 25876929Smisaki "(config nxge_tx_ring_size %d)", 25886929Smisaki channel, tx_ring_p->tx_ring_size, nmsgs, 25896929Smisaki nxge_tx_ring_size)); 25903859Sml29623 25913859Sml29623 /* 25923859Sml29623 * Map in buffers from the buffer pool. 25933859Sml29623 */ 25943859Sml29623 index = 0; 25953859Sml29623 bsize = dma_bufp->block_size; 25963859Sml29623 25973859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 25986929Smisaki "dma_bufp $%p tx_rng_p $%p " 25996929Smisaki "tx_msg_rng_p $%p bsize %d", 26006929Smisaki dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 26013859Sml29623 26023859Sml29623 tx_buf_dma_handle = dma_bufp->dma_handle; 26033859Sml29623 for (i = 0; i < num_chunks; i++, dma_bufp++) { 26043859Sml29623 bsize = dma_bufp->block_size; 26053859Sml29623 nblocks = dma_bufp->nblocks; 26063859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26076929Smisaki "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 26086929Smisaki "size %d dma_bufp $%p", 26096929Smisaki i, sizeof (nxge_dma_common_t), dma_bufp)); 26103859Sml29623 26113859Sml29623 for (j = 0; j < nblocks; j++) { 26123859Sml29623 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 26133859Sml29623 dmap = &tx_msg_ring[index++].buf_dma; 26143859Sml29623 #ifdef TX_MEM_DEBUG 26153859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26166929Smisaki "==> nxge_map_txdma_channel_buf_ring: j %d" 26176929Smisaki "dmap $%p", i, dmap)); 26183859Sml29623 #endif 26193859Sml29623 nxge_setup_dma_common(dmap, dma_bufp, 1, 26206929Smisaki bsize); 26213859Sml29623 } 26223859Sml29623 } 26233859Sml29623 26243859Sml29623 if (i < num_chunks) { 26254185Sspeer status = NXGE_ERROR; 26263859Sml29623 goto nxge_map_txdma_channel_buf_ring_fail1; 26273859Sml29623 } 26283859Sml29623 26293859Sml29623 *tx_desc_p = tx_ring_p; 26303859Sml29623 26313859Sml29623 goto nxge_map_txdma_channel_buf_ring_exit; 26323859Sml29623 26333859Sml29623 nxge_map_txdma_channel_buf_ring_fail1: 26343952Sml29623 if (tx_ring_p->serial) { 26353952Sml29623 nxge_serialize_destroy(tx_ring_p->serial); 26363952Sml29623 tx_ring_p->serial = NULL; 26373952Sml29623 } 26383952Sml29623 26393859Sml29623 index--; 26403859Sml29623 for (; index >= 0; index--) { 26414185Sspeer if (tx_msg_ring[index].dma_handle != NULL) { 26424185Sspeer ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 26433859Sml29623 } 26443859Sml29623 } 26453859Sml29623 MUTEX_DESTROY(&tx_ring_p->lock); 26464185Sspeer KMEM_FREE(tx_msg_ring, size); 26473859Sml29623 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 26483859Sml29623 26494185Sspeer status = NXGE_ERROR; 26504185Sspeer 26513859Sml29623 nxge_map_txdma_channel_buf_ring_exit: 26523859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26536929Smisaki "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 26543859Sml29623 26553859Sml29623 return (status); 26563859Sml29623 } 26573859Sml29623 26583859Sml29623 /*ARGSUSED*/ 26593859Sml29623 static void 26603859Sml29623 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 26613859Sml29623 { 26623859Sml29623 p_tx_msg_t tx_msg_ring; 26633859Sml29623 p_tx_msg_t tx_msg_p; 26643859Sml29623 int i; 26653859Sml29623 26663859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26676929Smisaki "==> nxge_unmap_txdma_channel_buf_ring")); 26683859Sml29623 if (tx_ring_p == NULL) { 26693859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 26706929Smisaki "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 26713859Sml29623 return; 26723859Sml29623 } 26733859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26746929Smisaki "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 26756929Smisaki tx_ring_p->tdc)); 26763859Sml29623 26773859Sml29623 tx_msg_ring = tx_ring_p->tx_msg_ring; 26786495Sspeer 26796495Sspeer /* 26806495Sspeer * Since the serialization thread, timer thread and 26816495Sspeer * interrupt thread can all call the transmit reclaim, 26826495Sspeer * the unmapping function needs to acquire the lock 26836495Sspeer * to free those buffers which were transmitted 26846495Sspeer * by the hardware already. 26856495Sspeer */ 26866495Sspeer MUTEX_ENTER(&tx_ring_p->lock); 26876495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 26886495Sspeer "==> nxge_unmap_txdma_channel_buf_ring (reclaim): " 26896495Sspeer "channel %d", 26906495Sspeer tx_ring_p->tdc)); 26916495Sspeer (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 26926495Sspeer 26933859Sml29623 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 26943859Sml29623 tx_msg_p = &tx_msg_ring[i]; 26953859Sml29623 if (tx_msg_p->tx_message != NULL) { 26963859Sml29623 freemsg(tx_msg_p->tx_message); 26973859Sml29623 tx_msg_p->tx_message = NULL; 26983859Sml29623 } 26993859Sml29623 } 27003859Sml29623 27013859Sml29623 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 27023859Sml29623 if (tx_msg_ring[i].dma_handle != NULL) { 27033859Sml29623 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 27043859Sml29623 } 27056495Sspeer tx_msg_ring[i].dma_handle = NULL; 27063859Sml29623 } 27073859Sml29623 27086495Sspeer MUTEX_EXIT(&tx_ring_p->lock); 27096495Sspeer 27103952Sml29623 if (tx_ring_p->serial) { 27113952Sml29623 nxge_serialize_destroy(tx_ring_p->serial); 27123952Sml29623 tx_ring_p->serial = NULL; 27133952Sml29623 } 27143952Sml29623 27153859Sml29623 MUTEX_DESTROY(&tx_ring_p->lock); 27163859Sml29623 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 27173859Sml29623 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 27183859Sml29623 27193859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 27206929Smisaki "<== nxge_unmap_txdma_channel_buf_ring")); 27213859Sml29623 } 27223859Sml29623 27233859Sml29623 static nxge_status_t 27246495Sspeer nxge_txdma_hw_start(p_nxge_t nxgep, int channel) 27253859Sml29623 { 27263859Sml29623 p_tx_rings_t tx_rings; 27273859Sml29623 p_tx_ring_t *tx_desc_rings; 27283859Sml29623 p_tx_mbox_areas_t tx_mbox_areas_p; 27293859Sml29623 p_tx_mbox_t *tx_mbox_p; 27303859Sml29623 nxge_status_t status = NXGE_OK; 27313859Sml29623 27323859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 27333859Sml29623 27343859Sml29623 tx_rings = nxgep->tx_rings; 27353859Sml29623 if (tx_rings == NULL) { 27363859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 27376929Smisaki "<== nxge_txdma_hw_start: NULL ring pointer")); 27383859Sml29623 return (NXGE_ERROR); 27393859Sml29623 } 27403859Sml29623 tx_desc_rings = tx_rings->rings; 27413859Sml29623 if (tx_desc_rings == NULL) { 27423859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 27436929Smisaki "<== nxge_txdma_hw_start: NULL ring pointers")); 27443859Sml29623 return (NXGE_ERROR); 27453859Sml29623 } 27463859Sml29623 27476495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 27486495Sspeer "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 27493859Sml29623 27503859Sml29623 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 27513859Sml29623 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 27523859Sml29623 27536495Sspeer status = nxge_txdma_start_channel(nxgep, channel, 27546495Sspeer (p_tx_ring_t)tx_desc_rings[channel], 27556495Sspeer (p_tx_mbox_t)tx_mbox_p[channel]); 27566495Sspeer if (status != NXGE_OK) { 27576495Sspeer goto nxge_txdma_hw_start_fail1; 27583859Sml29623 } 27593859Sml29623 27603859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 27616929Smisaki "tx_rings $%p rings $%p", 27626929Smisaki nxgep->tx_rings, nxgep->tx_rings->rings)); 27633859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 27646929Smisaki "tx_rings $%p tx_desc_rings $%p", 27656929Smisaki nxgep->tx_rings, tx_desc_rings)); 27663859Sml29623 27673859Sml29623 goto nxge_txdma_hw_start_exit; 27683859Sml29623 27693859Sml29623 nxge_txdma_hw_start_fail1: 27703859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 27716929Smisaki "==> nxge_txdma_hw_start: disable " 27726929Smisaki "(status 0x%x channel %d)", status, channel)); 27733859Sml29623 27743859Sml29623 nxge_txdma_hw_start_exit: 27753859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 27766929Smisaki "==> nxge_txdma_hw_start: (status 0x%x)", status)); 27773859Sml29623 27783859Sml29623 return (status); 27793859Sml29623 } 27803859Sml29623 27816495Sspeer /* 27826495Sspeer * nxge_txdma_start_channel 27836495Sspeer * 27846495Sspeer * Start a TDC. 27856495Sspeer * 27866495Sspeer * Arguments: 27876495Sspeer * nxgep 27886495Sspeer * channel The channel to start. 27896495Sspeer * tx_ring_p channel's transmit descriptor ring. 27906495Sspeer * tx_mbox_p channel' smailbox. 27916495Sspeer * 27926495Sspeer * Notes: 27936495Sspeer * 27946495Sspeer * NPI/NXGE function calls: 27956495Sspeer * nxge_reset_txdma_channel() 27966495Sspeer * nxge_init_txdma_channel_event_mask() 27976495Sspeer * nxge_enable_txdma_channel() 27986495Sspeer * 27996495Sspeer * Registers accessed: 28006495Sspeer * none directly (see functions above). 28016495Sspeer * 28026495Sspeer * Context: 28036495Sspeer * Any domain 28046495Sspeer */ 28053859Sml29623 static nxge_status_t 28063859Sml29623 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 28073859Sml29623 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 28083859Sml29623 28093859Sml29623 { 28103859Sml29623 nxge_status_t status = NXGE_OK; 28113859Sml29623 28123859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 28133859Sml29623 "==> nxge_txdma_start_channel (channel %d)", channel)); 28143859Sml29623 /* 28153859Sml29623 * TXDMA/TXC must be in stopped state. 28163859Sml29623 */ 28173859Sml29623 (void) nxge_txdma_stop_inj_err(nxgep, channel); 28183859Sml29623 28193859Sml29623 /* 28203859Sml29623 * Reset TXDMA channel 28213859Sml29623 */ 28223859Sml29623 tx_ring_p->tx_cs.value = 0; 28233859Sml29623 tx_ring_p->tx_cs.bits.ldw.rst = 1; 28243859Sml29623 status = nxge_reset_txdma_channel(nxgep, channel, 28253859Sml29623 tx_ring_p->tx_cs.value); 28263859Sml29623 if (status != NXGE_OK) { 28273859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 28283859Sml29623 "==> nxge_txdma_start_channel (channel %d)" 28293859Sml29623 " reset channel failed 0x%x", channel, status)); 28303859Sml29623 goto nxge_txdma_start_channel_exit; 28313859Sml29623 } 28323859Sml29623 28333859Sml29623 /* 28343859Sml29623 * Initialize the TXDMA channel specific FZC control 28353859Sml29623 * configurations. These FZC registers are pertaining 28363859Sml29623 * to each TX channel (i.e. logical pages). 28373859Sml29623 */ 28386495Sspeer if (!isLDOMguest(nxgep)) { 28396495Sspeer status = nxge_init_fzc_txdma_channel(nxgep, channel, 28406495Sspeer tx_ring_p, tx_mbox_p); 28416495Sspeer if (status != NXGE_OK) { 28426495Sspeer goto nxge_txdma_start_channel_exit; 28436495Sspeer } 28443859Sml29623 } 28453859Sml29623 28463859Sml29623 /* 28473859Sml29623 * Initialize the event masks. 28483859Sml29623 */ 28493859Sml29623 tx_ring_p->tx_evmask.value = 0; 28503859Sml29623 status = nxge_init_txdma_channel_event_mask(nxgep, 28516495Sspeer channel, &tx_ring_p->tx_evmask); 28523859Sml29623 if (status != NXGE_OK) { 28533859Sml29623 goto nxge_txdma_start_channel_exit; 28543859Sml29623 } 28553859Sml29623 28563859Sml29623 /* 28573859Sml29623 * Load TXDMA descriptors, buffers, mailbox, 28583859Sml29623 * initialise the DMA channels and 28593859Sml29623 * enable each DMA channel. 28603859Sml29623 */ 28613859Sml29623 status = nxge_enable_txdma_channel(nxgep, channel, 28623859Sml29623 tx_ring_p, tx_mbox_p); 28633859Sml29623 if (status != NXGE_OK) { 28643859Sml29623 goto nxge_txdma_start_channel_exit; 28653859Sml29623 } 28663859Sml29623 28673859Sml29623 nxge_txdma_start_channel_exit: 28683859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 28693859Sml29623 28703859Sml29623 return (status); 28713859Sml29623 } 28723859Sml29623 28736495Sspeer /* 28746495Sspeer * nxge_txdma_stop_channel 28756495Sspeer * 28766495Sspeer * Stop a TDC. 28776495Sspeer * 28786495Sspeer * Arguments: 28796495Sspeer * nxgep 28806495Sspeer * channel The channel to stop. 28816495Sspeer * tx_ring_p channel's transmit descriptor ring. 28826495Sspeer * tx_mbox_p channel' smailbox. 28836495Sspeer * 28846495Sspeer * Notes: 28856495Sspeer * 28866495Sspeer * NPI/NXGE function calls: 28876495Sspeer * nxge_txdma_stop_inj_err() 28886495Sspeer * nxge_reset_txdma_channel() 28896495Sspeer * nxge_init_txdma_channel_event_mask() 28906495Sspeer * nxge_init_txdma_channel_cntl_stat() 28916495Sspeer * nxge_disable_txdma_channel() 28926495Sspeer * 28936495Sspeer * Registers accessed: 28946495Sspeer * none directly (see functions above). 28956495Sspeer * 28966495Sspeer * Context: 28976495Sspeer * Any domain 28986495Sspeer */ 28993859Sml29623 /*ARGSUSED*/ 29003859Sml29623 static nxge_status_t 29016495Sspeer nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 29023859Sml29623 { 29036495Sspeer p_tx_ring_t tx_ring_p; 29046495Sspeer int status = NXGE_OK; 29053859Sml29623 29063859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 29076929Smisaki "==> nxge_txdma_stop_channel: channel %d", channel)); 29083859Sml29623 29093859Sml29623 /* 29103859Sml29623 * Stop (disable) TXDMA and TXC (if stop bit is set 29113859Sml29623 * and STOP_N_GO bit not set, the TXDMA reset state will 29123859Sml29623 * not be set if reset TXDMA. 29133859Sml29623 */ 29143859Sml29623 (void) nxge_txdma_stop_inj_err(nxgep, channel); 29153859Sml29623 29166495Sspeer tx_ring_p = nxgep->tx_rings->rings[channel]; 29176495Sspeer 29183859Sml29623 /* 29193859Sml29623 * Reset TXDMA channel 29203859Sml29623 */ 29213859Sml29623 tx_ring_p->tx_cs.value = 0; 29223859Sml29623 tx_ring_p->tx_cs.bits.ldw.rst = 1; 29233859Sml29623 status = nxge_reset_txdma_channel(nxgep, channel, 29246929Smisaki tx_ring_p->tx_cs.value); 29253859Sml29623 if (status != NXGE_OK) { 29263859Sml29623 goto nxge_txdma_stop_channel_exit; 29273859Sml29623 } 29283859Sml29623 29293859Sml29623 #ifdef HARDWARE_REQUIRED 29303859Sml29623 /* Set up the interrupt event masks. */ 29313859Sml29623 tx_ring_p->tx_evmask.value = 0; 29323859Sml29623 status = nxge_init_txdma_channel_event_mask(nxgep, 29336929Smisaki channel, &tx_ring_p->tx_evmask); 29343859Sml29623 if (status != NXGE_OK) { 29353859Sml29623 goto nxge_txdma_stop_channel_exit; 29363859Sml29623 } 29373859Sml29623 29383859Sml29623 /* Initialize the DMA control and status register */ 29393859Sml29623 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 29403859Sml29623 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 29416929Smisaki tx_ring_p->tx_cs.value); 29423859Sml29623 if (status != NXGE_OK) { 29433859Sml29623 goto nxge_txdma_stop_channel_exit; 29443859Sml29623 } 29453859Sml29623 29466495Sspeer tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 29476495Sspeer 29483859Sml29623 /* Disable channel */ 29493859Sml29623 status = nxge_disable_txdma_channel(nxgep, channel, 29506495Sspeer tx_ring_p, tx_mbox_p); 29513859Sml29623 if (status != NXGE_OK) { 29523859Sml29623 goto nxge_txdma_start_channel_exit; 29533859Sml29623 } 29543859Sml29623 29553859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 29566929Smisaki "==> nxge_txdma_stop_channel: event done")); 29573859Sml29623 29583859Sml29623 #endif 29593859Sml29623 29603859Sml29623 nxge_txdma_stop_channel_exit: 29613859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 29623859Sml29623 return (status); 29633859Sml29623 } 29643859Sml29623 29656495Sspeer /* 29666495Sspeer * nxge_txdma_get_ring 29676495Sspeer * 29686495Sspeer * Get the ring for a TDC. 29696495Sspeer * 29706495Sspeer * Arguments: 29716495Sspeer * nxgep 29726495Sspeer * channel 29736495Sspeer * 29746495Sspeer * Notes: 29756495Sspeer * 29766495Sspeer * NPI/NXGE function calls: 29776495Sspeer * 29786495Sspeer * Registers accessed: 29796495Sspeer * 29806495Sspeer * Context: 29816495Sspeer * Any domain 29826495Sspeer */ 29833859Sml29623 static p_tx_ring_t 29843859Sml29623 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 29853859Sml29623 { 29866495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 29876495Sspeer int tdc; 29883859Sml29623 29893859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 29903859Sml29623 29916495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 29923859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 29936495Sspeer "<== nxge_txdma_get_ring: NULL ring pointer(s)")); 29946495Sspeer goto return_null; 29953859Sml29623 } 29963859Sml29623 29976495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 29986495Sspeer if ((1 << tdc) & set->owned.map) { 29996495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 30006495Sspeer if (ring) { 30016495Sspeer if (channel == ring->tdc) { 30026495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 30036495Sspeer "<== nxge_txdma_get_ring: " 30046495Sspeer "tdc %d ring $%p", tdc, ring)); 30056495Sspeer return (ring); 30066495Sspeer } 30076495Sspeer } 30083859Sml29623 } 30093859Sml29623 } 30103859Sml29623 30116495Sspeer return_null: 30126495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: " 30136929Smisaki "ring not found")); 30146495Sspeer 30153859Sml29623 return (NULL); 30163859Sml29623 } 30173859Sml29623 30186495Sspeer /* 30196495Sspeer * nxge_txdma_get_mbox 30206495Sspeer * 30216495Sspeer * Get the mailbox for a TDC. 30226495Sspeer * 30236495Sspeer * Arguments: 30246495Sspeer * nxgep 30256495Sspeer * channel 30266495Sspeer * 30276495Sspeer * Notes: 30286495Sspeer * 30296495Sspeer * NPI/NXGE function calls: 30306495Sspeer * 30316495Sspeer * Registers accessed: 30326495Sspeer * 30336495Sspeer * Context: 30346495Sspeer * Any domain 30356495Sspeer */ 30363859Sml29623 static p_tx_mbox_t 30373859Sml29623 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 30383859Sml29623 { 30396495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 30406495Sspeer int tdc; 30413859Sml29623 30423859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 30433859Sml29623 30446495Sspeer if (nxgep->tx_mbox_areas_p == 0 || 30456495Sspeer nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) { 30466495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 30476495Sspeer "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)")); 30486495Sspeer goto return_null; 30493859Sml29623 } 30503859Sml29623 30516495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 30526495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 30536495Sspeer "<== nxge_txdma_get_mbox: NULL ring pointer(s)")); 30546495Sspeer goto return_null; 30553859Sml29623 } 30563859Sml29623 30576495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 30586495Sspeer if ((1 << tdc) & set->owned.map) { 30596495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 30606495Sspeer if (ring) { 30616495Sspeer if (channel == ring->tdc) { 30626495Sspeer tx_mbox_t *mailbox = nxgep-> 30636495Sspeer tx_mbox_areas_p-> 30646495Sspeer txmbox_areas_p[tdc]; 30656495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 30666495Sspeer "<== nxge_txdma_get_mbox: tdc %d " 30676495Sspeer "ring $%p", tdc, mailbox)); 30686495Sspeer return (mailbox); 30696495Sspeer } 30706495Sspeer } 30713859Sml29623 } 30723859Sml29623 } 30733859Sml29623 30746495Sspeer return_null: 30756495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: " 30766929Smisaki "mailbox not found")); 30776495Sspeer 30783859Sml29623 return (NULL); 30793859Sml29623 } 30803859Sml29623 30816495Sspeer /* 30826495Sspeer * nxge_tx_err_evnts 30836495Sspeer * 30846495Sspeer * Recover a TDC. 30856495Sspeer * 30866495Sspeer * Arguments: 30876495Sspeer * nxgep 30886495Sspeer * index The index to the TDC ring. 30896495Sspeer * ldvp Used to get the channel number ONLY. 30906495Sspeer * cs A copy of the bits from TX_CS. 30916495Sspeer * 30926495Sspeer * Notes: 30936495Sspeer * Calling tree: 30946495Sspeer * nxge_tx_intr() 30956495Sspeer * 30966495Sspeer * NPI/NXGE function calls: 30976495Sspeer * npi_txdma_ring_error_get() 30986495Sspeer * npi_txdma_inj_par_error_get() 30996495Sspeer * nxge_txdma_fatal_err_recover() 31006495Sspeer * 31016495Sspeer * Registers accessed: 31026495Sspeer * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High 31036495Sspeer * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low 31046495Sspeer * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 31056495Sspeer * 31066495Sspeer * Context: 31076495Sspeer * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR. 31086495Sspeer */ 31093859Sml29623 /*ARGSUSED*/ 31103859Sml29623 static nxge_status_t 31113859Sml29623 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 31123859Sml29623 { 31133859Sml29623 npi_handle_t handle; 31143859Sml29623 npi_status_t rs; 31153859Sml29623 uint8_t channel; 31163859Sml29623 p_tx_ring_t *tx_rings; 31173859Sml29623 p_tx_ring_t tx_ring_p; 31183859Sml29623 p_nxge_tx_ring_stats_t tdc_stats; 31193859Sml29623 boolean_t txchan_fatal = B_FALSE; 31203859Sml29623 nxge_status_t status = NXGE_OK; 31213859Sml29623 tdmc_inj_par_err_t par_err; 31223859Sml29623 uint32_t value; 31233859Sml29623 31246495Sspeer NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts")); 31253859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 31263859Sml29623 channel = ldvp->channel; 31273859Sml29623 31283859Sml29623 tx_rings = nxgep->tx_rings->rings; 31293859Sml29623 tx_ring_p = tx_rings[index]; 31303859Sml29623 tdc_stats = tx_ring_p->tdc_stats; 31313859Sml29623 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 31326929Smisaki (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 31336929Smisaki (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 31343859Sml29623 if ((rs = npi_txdma_ring_error_get(handle, channel, 31356929Smisaki &tdc_stats->errlog)) != NPI_SUCCESS) 31363859Sml29623 return (NXGE_ERROR | rs); 31373859Sml29623 } 31383859Sml29623 31393859Sml29623 if (cs.bits.ldw.mbox_err) { 31403859Sml29623 tdc_stats->mbox_err++; 31413859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31426929Smisaki NXGE_FM_EREPORT_TDMC_MBOX_ERR); 31433859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31446929Smisaki "==> nxge_tx_err_evnts(channel %d): " 31456929Smisaki "fatal error: mailbox", channel)); 31463859Sml29623 txchan_fatal = B_TRUE; 31473859Sml29623 } 31483859Sml29623 if (cs.bits.ldw.pkt_size_err) { 31493859Sml29623 tdc_stats->pkt_size_err++; 31503859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31516929Smisaki NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 31523859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31536929Smisaki "==> nxge_tx_err_evnts(channel %d): " 31546929Smisaki "fatal error: pkt_size_err", channel)); 31553859Sml29623 txchan_fatal = B_TRUE; 31563859Sml29623 } 31573859Sml29623 if (cs.bits.ldw.tx_ring_oflow) { 31583859Sml29623 tdc_stats->tx_ring_oflow++; 31593859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31606929Smisaki NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 31613859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31626929Smisaki "==> nxge_tx_err_evnts(channel %d): " 31636929Smisaki "fatal error: tx_ring_oflow", channel)); 31643859Sml29623 txchan_fatal = B_TRUE; 31653859Sml29623 } 31663859Sml29623 if (cs.bits.ldw.pref_buf_par_err) { 31673859Sml29623 tdc_stats->pre_buf_par_err++; 31683859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31696929Smisaki NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 31703859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31716929Smisaki "==> nxge_tx_err_evnts(channel %d): " 31726929Smisaki "fatal error: pre_buf_par_err", channel)); 31733859Sml29623 /* Clear error injection source for parity error */ 31743859Sml29623 (void) npi_txdma_inj_par_error_get(handle, &value); 31753859Sml29623 par_err.value = value; 31763859Sml29623 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 31773859Sml29623 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 31783859Sml29623 txchan_fatal = B_TRUE; 31793859Sml29623 } 31803859Sml29623 if (cs.bits.ldw.nack_pref) { 31813859Sml29623 tdc_stats->nack_pref++; 31823859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31836929Smisaki NXGE_FM_EREPORT_TDMC_NACK_PREF); 31843859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31856929Smisaki "==> nxge_tx_err_evnts(channel %d): " 31866929Smisaki "fatal error: nack_pref", channel)); 31873859Sml29623 txchan_fatal = B_TRUE; 31883859Sml29623 } 31893859Sml29623 if (cs.bits.ldw.nack_pkt_rd) { 31903859Sml29623 tdc_stats->nack_pkt_rd++; 31913859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31926929Smisaki NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 31933859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31946929Smisaki "==> nxge_tx_err_evnts(channel %d): " 31956929Smisaki "fatal error: nack_pkt_rd", channel)); 31963859Sml29623 txchan_fatal = B_TRUE; 31973859Sml29623 } 31983859Sml29623 if (cs.bits.ldw.conf_part_err) { 31993859Sml29623 tdc_stats->conf_part_err++; 32003859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 32016929Smisaki NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 32023859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32036929Smisaki "==> nxge_tx_err_evnts(channel %d): " 32046929Smisaki "fatal error: config_partition_err", channel)); 32053859Sml29623 txchan_fatal = B_TRUE; 32063859Sml29623 } 32073859Sml29623 if (cs.bits.ldw.pkt_prt_err) { 32083859Sml29623 tdc_stats->pkt_part_err++; 32093859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 32106929Smisaki NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 32113859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32126929Smisaki "==> nxge_tx_err_evnts(channel %d): " 32136929Smisaki "fatal error: pkt_prt_err", channel)); 32143859Sml29623 txchan_fatal = B_TRUE; 32153859Sml29623 } 32163859Sml29623 32173859Sml29623 /* Clear error injection source in case this is an injected error */ 32183859Sml29623 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 32193859Sml29623 32203859Sml29623 if (txchan_fatal) { 32213859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32226929Smisaki " nxge_tx_err_evnts: " 32236929Smisaki " fatal error on channel %d cs 0x%llx\n", 32246929Smisaki channel, cs.value)); 32253859Sml29623 status = nxge_txdma_fatal_err_recover(nxgep, channel, 32266929Smisaki tx_ring_p); 32273859Sml29623 if (status == NXGE_OK) { 32283859Sml29623 FM_SERVICE_RESTORED(nxgep); 32293859Sml29623 } 32303859Sml29623 } 32313859Sml29623 32326495Sspeer NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts")); 32333859Sml29623 32343859Sml29623 return (status); 32353859Sml29623 } 32363859Sml29623 32373859Sml29623 static nxge_status_t 32386495Sspeer nxge_txdma_fatal_err_recover( 32396495Sspeer p_nxge_t nxgep, 32406495Sspeer uint16_t channel, 32416495Sspeer p_tx_ring_t tx_ring_p) 32423859Sml29623 { 32433859Sml29623 npi_handle_t handle; 32443859Sml29623 npi_status_t rs = NPI_SUCCESS; 32453859Sml29623 p_tx_mbox_t tx_mbox_p; 32463859Sml29623 nxge_status_t status = NXGE_OK; 32473859Sml29623 32483859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 32493859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32506929Smisaki "Recovering from TxDMAChannel#%d error...", channel)); 32513859Sml29623 32523859Sml29623 /* 32533859Sml29623 * Stop the dma channel waits for the stop done. 32543859Sml29623 * If the stop done bit is not set, then create 32553859Sml29623 * an error. 32563859Sml29623 */ 32573859Sml29623 32583859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 32593859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 32603859Sml29623 MUTEX_ENTER(&tx_ring_p->lock); 32613859Sml29623 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 32623859Sml29623 if (rs != NPI_SUCCESS) { 32633859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32646929Smisaki "==> nxge_txdma_fatal_err_recover (channel %d): " 32656929Smisaki "stop failed ", channel)); 32663859Sml29623 goto fail; 32673859Sml29623 } 32683859Sml29623 32693859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 32703859Sml29623 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 32713859Sml29623 32723859Sml29623 /* 32733859Sml29623 * Reset TXDMA channel 32743859Sml29623 */ 32753859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 32763859Sml29623 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 32776929Smisaki NPI_SUCCESS) { 32783859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32796929Smisaki "==> nxge_txdma_fatal_err_recover (channel %d)" 32806929Smisaki " reset channel failed 0x%x", channel, rs)); 32813859Sml29623 goto fail; 32823859Sml29623 } 32833859Sml29623 32843859Sml29623 /* 32853859Sml29623 * Reset the tail (kick) register to 0. 32863859Sml29623 * (Hardware will not reset it. Tx overflow fatal 32873859Sml29623 * error if tail is not set to 0 after reset! 32883859Sml29623 */ 32893859Sml29623 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 32903859Sml29623 32913859Sml29623 /* Restart TXDMA channel */ 32923859Sml29623 32936495Sspeer if (!isLDOMguest(nxgep)) { 32946495Sspeer tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 32956495Sspeer 32966495Sspeer // XXX This is a problem in HIO! 32976495Sspeer /* 32986495Sspeer * Initialize the TXDMA channel specific FZC control 32996495Sspeer * configurations. These FZC registers are pertaining 33006495Sspeer * to each TX channel (i.e. logical pages). 33016495Sspeer */ 33026495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 33036495Sspeer status = nxge_init_fzc_txdma_channel(nxgep, channel, 33046495Sspeer tx_ring_p, tx_mbox_p); 33056495Sspeer if (status != NXGE_OK) 33066495Sspeer goto fail; 33076495Sspeer } 33083859Sml29623 33093859Sml29623 /* 33103859Sml29623 * Initialize the event masks. 33113859Sml29623 */ 33123859Sml29623 tx_ring_p->tx_evmask.value = 0; 33133859Sml29623 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 33146929Smisaki &tx_ring_p->tx_evmask); 33153859Sml29623 if (status != NXGE_OK) 33163859Sml29623 goto fail; 33173859Sml29623 33183859Sml29623 tx_ring_p->wr_index_wrap = B_FALSE; 33193859Sml29623 tx_ring_p->wr_index = 0; 33203859Sml29623 tx_ring_p->rd_index = 0; 33213859Sml29623 33223859Sml29623 /* 33233859Sml29623 * Load TXDMA descriptors, buffers, mailbox, 33243859Sml29623 * initialise the DMA channels and 33253859Sml29623 * enable each DMA channel. 33263859Sml29623 */ 33273859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 33283859Sml29623 status = nxge_enable_txdma_channel(nxgep, channel, 33296929Smisaki tx_ring_p, tx_mbox_p); 33303859Sml29623 MUTEX_EXIT(&tx_ring_p->lock); 33313859Sml29623 if (status != NXGE_OK) 33323859Sml29623 goto fail; 33333859Sml29623 33343859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33356929Smisaki "Recovery Successful, TxDMAChannel#%d Restored", 33366929Smisaki channel)); 33373859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 33383859Sml29623 33393859Sml29623 return (NXGE_OK); 33403859Sml29623 33413859Sml29623 fail: 33423859Sml29623 MUTEX_EXIT(&tx_ring_p->lock); 33433859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 33446929Smisaki "nxge_txdma_fatal_err_recover (channel %d): " 33456929Smisaki "failed to recover this txdma channel", channel)); 33463859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 33473859Sml29623 33483859Sml29623 return (status); 33493859Sml29623 } 33503859Sml29623 33516495Sspeer /* 33526495Sspeer * nxge_tx_port_fatal_err_recover 33536495Sspeer * 33546495Sspeer * Attempt to recover from a fatal port error. 33556495Sspeer * 33566495Sspeer * Arguments: 33576495Sspeer * nxgep 33586495Sspeer * 33596495Sspeer * Notes: 33606495Sspeer * How would a guest do this? 33616495Sspeer * 33626495Sspeer * NPI/NXGE function calls: 33636495Sspeer * 33646495Sspeer * Registers accessed: 33656495Sspeer * 33666495Sspeer * Context: 33676495Sspeer * Service domain 33686495Sspeer */ 33693859Sml29623 nxge_status_t 33703859Sml29623 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 33713859Sml29623 { 33726495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 33736495Sspeer nxge_channel_t tdc; 33746495Sspeer 33756495Sspeer tx_ring_t *ring; 33766495Sspeer tx_mbox_t *mailbox; 33776495Sspeer 33783859Sml29623 npi_handle_t handle; 33796495Sspeer nxge_status_t status; 33806495Sspeer npi_status_t rs; 33813859Sml29623 33823859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 33833859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33846495Sspeer "Recovering from TxPort error...")); 33856495Sspeer 33866495Sspeer if (isLDOMguest(nxgep)) { 33876495Sspeer return (NXGE_OK); 33886495Sspeer } 33896495Sspeer 33906495Sspeer if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 33916495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 33926495Sspeer "<== nxge_tx_port_fatal_err_recover: not initialized")); 33936495Sspeer return (NXGE_ERROR); 33946495Sspeer } 33956495Sspeer 33966495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 33976495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 33986495Sspeer "<== nxge_tx_port_fatal_err_recover: " 33996495Sspeer "NULL ring pointer(s)")); 34006495Sspeer return (NXGE_ERROR); 34016495Sspeer } 34026495Sspeer 34036495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34046495Sspeer if ((1 << tdc) & set->owned.map) { 34056495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 34066495Sspeer if (ring) 34076495Sspeer MUTEX_ENTER(&ring->lock); 34086495Sspeer } 34096495Sspeer } 34103859Sml29623 34113859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 34126495Sspeer 34136495Sspeer /* 34146495Sspeer * Stop all the TDCs owned by us. 34156495Sspeer * (The shared TDCs will have been stopped by their owners.) 34166495Sspeer */ 34176495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34186495Sspeer if ((1 << tdc) & set->owned.map) { 34196495Sspeer ring = nxgep->tx_rings->rings[tdc]; 34206495Sspeer if (ring) { 34216495Sspeer rs = npi_txdma_channel_control 34226495Sspeer (handle, TXDMA_STOP, tdc); 34236495Sspeer if (rs != NPI_SUCCESS) { 34246495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34256495Sspeer "nxge_tx_port_fatal_err_recover " 34266495Sspeer "(channel %d): stop failed ", tdc)); 34276495Sspeer goto fail; 34286495Sspeer } 34296495Sspeer } 34303859Sml29623 } 34313859Sml29623 } 34323859Sml29623 34336495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs...")); 34346495Sspeer 34356495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34366495Sspeer if ((1 << tdc) & set->owned.map) { 34376495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 34386495Sspeer if (ring) 34396495Sspeer (void) nxge_txdma_reclaim(nxgep, ring, 0); 34403859Sml29623 } 34413859Sml29623 } 34423859Sml29623 34433859Sml29623 /* 34446495Sspeer * Reset all the TDCs. 34453859Sml29623 */ 34466495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs...")); 34476495Sspeer 34486495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34496495Sspeer if ((1 << tdc) & set->owned.map) { 34506495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 34516495Sspeer if (ring) { 34526495Sspeer if ((rs = npi_txdma_channel_control 34536929Smisaki (handle, TXDMA_RESET, tdc)) 34546495Sspeer != NPI_SUCCESS) { 34556495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34566495Sspeer "nxge_tx_port_fatal_err_recover " 34576495Sspeer "(channel %d) reset channel " 34586495Sspeer "failed 0x%x", tdc, rs)); 34596495Sspeer goto fail; 34606495Sspeer } 34616495Sspeer } 34626495Sspeer /* 34636495Sspeer * Reset the tail (kick) register to 0. 34646495Sspeer * (Hardware will not reset it. Tx overflow fatal 34656495Sspeer * error if tail is not set to 0 after reset! 34666495Sspeer */ 34676495Sspeer TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0); 34683859Sml29623 } 34696495Sspeer } 34706495Sspeer 34716495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs...")); 34726495Sspeer 34736495Sspeer /* Restart all the TDCs */ 34746495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34756495Sspeer if ((1 << tdc) & set->owned.map) { 34766495Sspeer ring = nxgep->tx_rings->rings[tdc]; 34776495Sspeer if (ring) { 34786495Sspeer mailbox = nxge_txdma_get_mbox(nxgep, tdc); 34796495Sspeer status = nxge_init_fzc_txdma_channel(nxgep, tdc, 34806495Sspeer ring, mailbox); 34816495Sspeer ring->tx_evmask.value = 0; 34826495Sspeer /* 34836495Sspeer * Initialize the event masks. 34846495Sspeer */ 34856495Sspeer status = nxge_init_txdma_channel_event_mask 34866495Sspeer (nxgep, tdc, &ring->tx_evmask); 34876495Sspeer 34886495Sspeer ring->wr_index_wrap = B_FALSE; 34896495Sspeer ring->wr_index = 0; 34906495Sspeer ring->rd_index = 0; 34916495Sspeer 34926495Sspeer if (status != NXGE_OK) 34936495Sspeer goto fail; 34946495Sspeer if (status != NXGE_OK) 34956495Sspeer goto fail; 34966495Sspeer } 34973859Sml29623 } 34986495Sspeer } 34996495Sspeer 35006495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs...")); 35016495Sspeer 35026495Sspeer /* Re-enable all the TDCs */ 35036495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 35046495Sspeer if ((1 << tdc) & set->owned.map) { 35056495Sspeer ring = nxgep->tx_rings->rings[tdc]; 35066495Sspeer if (ring) { 35076495Sspeer mailbox = nxge_txdma_get_mbox(nxgep, tdc); 35086495Sspeer status = nxge_enable_txdma_channel(nxgep, tdc, 35096495Sspeer ring, mailbox); 35106495Sspeer if (status != NXGE_OK) 35116495Sspeer goto fail; 35126495Sspeer } 35136495Sspeer } 35143859Sml29623 } 35153859Sml29623 35163859Sml29623 /* 35176495Sspeer * Unlock all the TDCs. 35183859Sml29623 */ 35196495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 35206495Sspeer if ((1 << tdc) & set->owned.map) { 35216495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 35226495Sspeer if (ring) 35236495Sspeer MUTEX_EXIT(&ring->lock); 35243859Sml29623 } 35253859Sml29623 } 35263859Sml29623 35276495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded")); 35283859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 35293859Sml29623 35303859Sml29623 return (NXGE_OK); 35313859Sml29623 35323859Sml29623 fail: 35336495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 35346495Sspeer if ((1 << tdc) & set->owned.map) { 35356495Sspeer ring = nxgep->tx_rings->rings[tdc]; 35366495Sspeer if (ring) 35376495Sspeer MUTEX_EXIT(&ring->lock); 35383859Sml29623 } 35393859Sml29623 } 35403859Sml29623 35416495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed")); 35426495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 35433859Sml29623 35443859Sml29623 return (status); 35453859Sml29623 } 35463859Sml29623 35476495Sspeer /* 35486495Sspeer * nxge_txdma_inject_err 35496495Sspeer * 35506495Sspeer * Inject an error into a TDC. 35516495Sspeer * 35526495Sspeer * Arguments: 35536495Sspeer * nxgep 35546495Sspeer * err_id The error to inject. 35556495Sspeer * chan The channel to inject into. 35566495Sspeer * 35576495Sspeer * Notes: 35586495Sspeer * This is called from nxge_main.c:nxge_err_inject() 35596495Sspeer * Has this ioctl ever been used? 35606495Sspeer * 35616495Sspeer * NPI/NXGE function calls: 35626495Sspeer * npi_txdma_inj_par_error_get() 35636495Sspeer * npi_txdma_inj_par_error_set() 35646495Sspeer * 35656495Sspeer * Registers accessed: 35666495Sspeer * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 35676495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 35686495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 35696495Sspeer * 35706495Sspeer * Context: 35716495Sspeer * Service domain 35726495Sspeer */ 35733859Sml29623 void 35743859Sml29623 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 35753859Sml29623 { 35763859Sml29623 tdmc_intr_dbg_t tdi; 35773859Sml29623 tdmc_inj_par_err_t par_err; 35783859Sml29623 uint32_t value; 35793859Sml29623 npi_handle_t handle; 35803859Sml29623 35813859Sml29623 switch (err_id) { 35823859Sml29623 35833859Sml29623 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 35843859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 35853859Sml29623 /* Clear error injection source for parity error */ 35863859Sml29623 (void) npi_txdma_inj_par_error_get(handle, &value); 35873859Sml29623 par_err.value = value; 35883859Sml29623 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 35893859Sml29623 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 35903859Sml29623 35913859Sml29623 par_err.bits.ldw.inject_parity_error = (1 << chan); 35923859Sml29623 (void) npi_txdma_inj_par_error_get(handle, &value); 35933859Sml29623 par_err.value = value; 35943859Sml29623 par_err.bits.ldw.inject_parity_error |= (1 << chan); 35953859Sml29623 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 35966929Smisaki (unsigned long long)par_err.value); 35973859Sml29623 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 35983859Sml29623 break; 35993859Sml29623 36003859Sml29623 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 36013859Sml29623 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 36023859Sml29623 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 36033859Sml29623 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 36043859Sml29623 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 36053859Sml29623 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 36063859Sml29623 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 36073859Sml29623 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 36086929Smisaki chan, &tdi.value); 36093859Sml29623 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 36103859Sml29623 tdi.bits.ldw.pref_buf_par_err = 1; 36113859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 36123859Sml29623 tdi.bits.ldw.mbox_err = 1; 36133859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 36143859Sml29623 tdi.bits.ldw.nack_pref = 1; 36153859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 36163859Sml29623 tdi.bits.ldw.nack_pkt_rd = 1; 36173859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 36183859Sml29623 tdi.bits.ldw.pkt_size_err = 1; 36193859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 36203859Sml29623 tdi.bits.ldw.tx_ring_oflow = 1; 36213859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 36223859Sml29623 tdi.bits.ldw.conf_part_err = 1; 36233859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 36243859Sml29623 tdi.bits.ldw.pkt_part_err = 1; 36255125Sjoycey #if defined(__i386) 36265125Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n", 36276929Smisaki tdi.value); 36285125Sjoycey #else 36293859Sml29623 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 36306929Smisaki tdi.value); 36315125Sjoycey #endif 36323859Sml29623 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 36336929Smisaki chan, tdi.value); 36343859Sml29623 36353859Sml29623 break; 36363859Sml29623 } 36373859Sml29623 } 3638