13859Sml29623 /* 23859Sml29623 * CDDL HEADER START 33859Sml29623 * 43859Sml29623 * The contents of this file are subject to the terms of the 53859Sml29623 * Common Development and Distribution License (the "License"). 63859Sml29623 * You may not use this file except in compliance with the License. 73859Sml29623 * 83859Sml29623 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 93859Sml29623 * or http://www.opensolaris.org/os/licensing. 103859Sml29623 * See the License for the specific language governing permissions 113859Sml29623 * and limitations under the License. 123859Sml29623 * 133859Sml29623 * When distributing Covered Code, include this CDDL HEADER in each 143859Sml29623 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 153859Sml29623 * If applicable, add the following below this CDDL HEADER, with the 163859Sml29623 * fields enclosed by brackets "[]" replaced with your own identifying 173859Sml29623 * information: Portions Copyright [yyyy] [name of copyright owner] 183859Sml29623 * 193859Sml29623 * CDDL HEADER END 203859Sml29623 */ 21*9015SMichael.Speer@Sun.COM 223859Sml29623 /* 238948SMichael.Speer@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 243859Sml29623 * Use is subject to license terms. 253859Sml29623 */ 263859Sml29623 273859Sml29623 #include <sys/nxge/nxge_impl.h> 283859Sml29623 #include <sys/nxge/nxge_txdma.h> 296495Sspeer #include <sys/nxge/nxge_hio.h> 306495Sspeer #include <npi_tx_rd64.h> 316495Sspeer #include <npi_tx_wr64.h> 323859Sml29623 #include <sys/llc1.h> 333859Sml29623 343859Sml29623 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 358275SEric Cheng uint32_t nxge_tx_minfree = 64; 363859Sml29623 uint32_t nxge_tx_intr_thres = 0; 373859Sml29623 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 383859Sml29623 uint32_t nxge_tx_tiny_pack = 1; 393859Sml29623 uint32_t nxge_tx_use_bcopy = 1; 403859Sml29623 413859Sml29623 extern uint32_t nxge_tx_ring_size; 423859Sml29623 extern uint32_t nxge_bcopy_thresh; 433859Sml29623 extern uint32_t nxge_dvma_thresh; 443859Sml29623 extern uint32_t nxge_dma_stream_thresh; 453859Sml29623 extern dma_method_t nxge_force_dma; 466611Sml29623 extern uint32_t nxge_cksum_offload; 473859Sml29623 483859Sml29623 /* Device register access attributes for PIO. */ 493859Sml29623 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 503859Sml29623 /* Device descriptor access attributes for DMA. */ 513859Sml29623 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 523859Sml29623 /* Device buffer access attributes for DMA. */ 533859Sml29623 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 543859Sml29623 extern ddi_dma_attr_t nxge_desc_dma_attr; 553859Sml29623 extern ddi_dma_attr_t nxge_tx_dma_attr; 563859Sml29623 578275SEric Cheng extern void nxge_tx_ring_task(void *arg); 587906SMichael.Speer@Sun.COM 596495Sspeer static nxge_status_t nxge_map_txdma(p_nxge_t, int); 606495Sspeer 616495Sspeer static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int); 623859Sml29623 633859Sml29623 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 643859Sml29623 p_nxge_dma_common_t *, p_tx_ring_t *, 653859Sml29623 uint32_t, p_nxge_dma_common_t *, 663859Sml29623 p_tx_mbox_t *); 676495Sspeer static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t); 683859Sml29623 693859Sml29623 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 703859Sml29623 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 713859Sml29623 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 723859Sml29623 733859Sml29623 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 743859Sml29623 p_nxge_dma_common_t *, p_tx_ring_t, 753859Sml29623 p_tx_mbox_t *); 763859Sml29623 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 773859Sml29623 p_tx_ring_t, p_tx_mbox_t); 783859Sml29623 793859Sml29623 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 803859Sml29623 p_tx_ring_t, p_tx_mbox_t); 816495Sspeer static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t); 823859Sml29623 833859Sml29623 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 843859Sml29623 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 853859Sml29623 p_nxge_ldv_t, tx_cs_t); 863859Sml29623 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 873859Sml29623 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 883859Sml29623 uint16_t, p_tx_ring_t); 893859Sml29623 906495Sspeer static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, 916495Sspeer p_tx_ring_t ring_p, uint16_t channel); 926495Sspeer 933859Sml29623 nxge_status_t 943859Sml29623 nxge_init_txdma_channels(p_nxge_t nxgep) 953859Sml29623 { 967950SMichael.Speer@Sun.COM nxge_grp_set_t *set = &nxgep->tx_set; 977950SMichael.Speer@Sun.COM int i, tdc, count; 987950SMichael.Speer@Sun.COM nxge_grp_t *group; 998275SEric Cheng dc_map_t map; 1008275SEric Cheng int dev_gindex; 1016495Sspeer 1026495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels")); 1036495Sspeer 1046495Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 1056495Sspeer if ((1 << i) & set->lg.map) { 1067950SMichael.Speer@Sun.COM group = set->group[i]; 1078275SEric Cheng dev_gindex = 1088275SEric Cheng nxgep->pt_config.hw_config.def_mac_txdma_grpid + i; 1098275SEric Cheng map = nxgep->pt_config.tdc_grps[dev_gindex].map; 1106495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1118275SEric Cheng if ((1 << tdc) & map) { 1128275SEric Cheng if ((nxge_grp_dc_add(nxgep, 1138275SEric Cheng group, VP_BOUND_TX, tdc))) 1147950SMichael.Speer@Sun.COM goto init_txdma_channels_exit; 1156495Sspeer } 1166495Sspeer } 1176495Sspeer } 1186495Sspeer if (++count == set->lg.count) 1196495Sspeer break; 1206495Sspeer } 1216495Sspeer 1226495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels")); 1236495Sspeer return (NXGE_OK); 1247950SMichael.Speer@Sun.COM 1257950SMichael.Speer@Sun.COM init_txdma_channels_exit: 1267950SMichael.Speer@Sun.COM for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 1277950SMichael.Speer@Sun.COM if ((1 << i) & set->lg.map) { 1287950SMichael.Speer@Sun.COM group = set->group[i]; 1298275SEric Cheng dev_gindex = 1308275SEric Cheng nxgep->pt_config.hw_config.def_mac_txdma_grpid + i; 1318275SEric Cheng map = nxgep->pt_config.tdc_grps[dev_gindex].map; 1327950SMichael.Speer@Sun.COM for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1338275SEric Cheng if ((1 << tdc) & map) { 1347950SMichael.Speer@Sun.COM nxge_grp_dc_remove(nxgep, 1357950SMichael.Speer@Sun.COM VP_BOUND_TX, tdc); 1367950SMichael.Speer@Sun.COM } 1377950SMichael.Speer@Sun.COM } 1387950SMichael.Speer@Sun.COM } 1397950SMichael.Speer@Sun.COM if (++count == set->lg.count) 1407950SMichael.Speer@Sun.COM break; 1417950SMichael.Speer@Sun.COM } 1427950SMichael.Speer@Sun.COM 1437950SMichael.Speer@Sun.COM return (NXGE_ERROR); 1448275SEric Cheng 1456495Sspeer } 1466495Sspeer 1476495Sspeer nxge_status_t 1486495Sspeer nxge_init_txdma_channel( 1496495Sspeer p_nxge_t nxge, 1506495Sspeer int channel) 1516495Sspeer { 1526495Sspeer nxge_status_t status; 1536495Sspeer 1546495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel")); 1556495Sspeer 1566495Sspeer status = nxge_map_txdma(nxge, channel); 1573859Sml29623 if (status != NXGE_OK) { 1586495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1596495Sspeer "<== nxge_init_txdma_channel: status 0x%x", status)); 1606495Sspeer (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 1613859Sml29623 return (status); 1623859Sml29623 } 1633859Sml29623 1646495Sspeer status = nxge_txdma_hw_start(nxge, channel); 1653859Sml29623 if (status != NXGE_OK) { 1666495Sspeer (void) nxge_unmap_txdma_channel(nxge, channel); 1676495Sspeer (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 1683859Sml29623 return (status); 1693859Sml29623 } 1703859Sml29623 1716495Sspeer if (!nxge->statsp->tdc_ksp[channel]) 1726495Sspeer nxge_setup_tdc_kstats(nxge, channel); 1736495Sspeer 1746495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel")); 1756495Sspeer 1766495Sspeer return (status); 1773859Sml29623 } 1783859Sml29623 1793859Sml29623 void 1803859Sml29623 nxge_uninit_txdma_channels(p_nxge_t nxgep) 1813859Sml29623 { 1826495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 1836495Sspeer int tdc; 1846495Sspeer 1856495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels")); 1866495Sspeer 1876495Sspeer if (set->owned.map == 0) { 1886495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1896495Sspeer "nxge_uninit_txdma_channels: no channels")); 1906495Sspeer return; 1916495Sspeer } 1926495Sspeer 1936495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1946495Sspeer if ((1 << tdc) & set->owned.map) { 1956495Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc); 1966495Sspeer } 1976495Sspeer } 1986495Sspeer 1996495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels")); 2006495Sspeer } 2016495Sspeer 2026495Sspeer void 2036495Sspeer nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel) 2046495Sspeer { 2056495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel")); 2066495Sspeer 2076495Sspeer if (nxgep->statsp->tdc_ksp[channel]) { 2086495Sspeer kstat_delete(nxgep->statsp->tdc_ksp[channel]); 2096495Sspeer nxgep->statsp->tdc_ksp[channel] = 0; 2106495Sspeer } 2116495Sspeer 2126495Sspeer (void) nxge_txdma_stop_channel(nxgep, channel); 2136495Sspeer nxge_unmap_txdma_channel(nxgep, channel); 2143859Sml29623 2153859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2166929Smisaki "<== nxge_uninit_txdma_channel")); 2173859Sml29623 } 2183859Sml29623 2193859Sml29623 void 2203859Sml29623 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 2213859Sml29623 uint32_t entries, uint32_t size) 2223859Sml29623 { 2233859Sml29623 size_t tsize; 2243859Sml29623 *dest_p = *src_p; 2253859Sml29623 tsize = size * entries; 2263859Sml29623 dest_p->alength = tsize; 2273859Sml29623 dest_p->nblocks = entries; 2283859Sml29623 dest_p->block_size = size; 2293859Sml29623 dest_p->offset += tsize; 2303859Sml29623 2313859Sml29623 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 2323859Sml29623 src_p->alength -= tsize; 2333859Sml29623 src_p->dma_cookie.dmac_laddress += tsize; 2343859Sml29623 src_p->dma_cookie.dmac_size -= tsize; 2353859Sml29623 } 2363859Sml29623 2376495Sspeer /* 2386495Sspeer * nxge_reset_txdma_channel 2396495Sspeer * 2406495Sspeer * Reset a TDC. 2416495Sspeer * 2426495Sspeer * Arguments: 2436495Sspeer * nxgep 2446495Sspeer * channel The channel to reset. 2456495Sspeer * reg_data The current TX_CS. 2466495Sspeer * 2476495Sspeer * Notes: 2486495Sspeer * 2496495Sspeer * NPI/NXGE function calls: 2506495Sspeer * npi_txdma_channel_reset() 2516495Sspeer * npi_txdma_channel_control() 2526495Sspeer * 2536495Sspeer * Registers accessed: 2546495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 2556495Sspeer * TX_RING_KICK DMC+0x40018 Transmit Ring Kick 2566495Sspeer * 2576495Sspeer * Context: 2586495Sspeer * Any domain 2596495Sspeer */ 2603859Sml29623 nxge_status_t 2613859Sml29623 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 2623859Sml29623 { 2633859Sml29623 npi_status_t rs = NPI_SUCCESS; 2643859Sml29623 nxge_status_t status = NXGE_OK; 2653859Sml29623 npi_handle_t handle; 2663859Sml29623 2673859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 2683859Sml29623 2693859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2703859Sml29623 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 2713859Sml29623 rs = npi_txdma_channel_reset(handle, channel); 2723859Sml29623 } else { 2733859Sml29623 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 2746929Smisaki channel); 2753859Sml29623 } 2763859Sml29623 2773859Sml29623 if (rs != NPI_SUCCESS) { 2783859Sml29623 status = NXGE_ERROR | rs; 2793859Sml29623 } 2803859Sml29623 2813859Sml29623 /* 2823859Sml29623 * Reset the tail (kick) register to 0. 2833859Sml29623 * (Hardware will not reset it. Tx overflow fatal 2843859Sml29623 * error if tail is not set to 0 after reset! 2853859Sml29623 */ 2863859Sml29623 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 2873859Sml29623 2883859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 2893859Sml29623 return (status); 2903859Sml29623 } 2913859Sml29623 2926495Sspeer /* 2936495Sspeer * nxge_init_txdma_channel_event_mask 2946495Sspeer * 2956495Sspeer * Enable interrupts for a set of events. 2966495Sspeer * 2976495Sspeer * Arguments: 2986495Sspeer * nxgep 2996495Sspeer * channel The channel to map. 3006495Sspeer * mask_p The events to enable. 3016495Sspeer * 3026495Sspeer * Notes: 3036495Sspeer * 3046495Sspeer * NPI/NXGE function calls: 3056495Sspeer * npi_txdma_event_mask() 3066495Sspeer * 3076495Sspeer * Registers accessed: 3086495Sspeer * TX_ENT_MSK DMC+0x40020 Transmit Event Mask 3096495Sspeer * 3106495Sspeer * Context: 3116495Sspeer * Any domain 3126495Sspeer */ 3133859Sml29623 nxge_status_t 3143859Sml29623 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 3153859Sml29623 p_tx_dma_ent_msk_t mask_p) 3163859Sml29623 { 3173859Sml29623 npi_handle_t handle; 3183859Sml29623 npi_status_t rs = NPI_SUCCESS; 3193859Sml29623 nxge_status_t status = NXGE_OK; 3203859Sml29623 3213859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 3226929Smisaki "<== nxge_init_txdma_channel_event_mask")); 3233859Sml29623 3243859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3253859Sml29623 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 3263859Sml29623 if (rs != NPI_SUCCESS) { 3273859Sml29623 status = NXGE_ERROR | rs; 3283859Sml29623 } 3293859Sml29623 3303859Sml29623 return (status); 3313859Sml29623 } 3323859Sml29623 3336495Sspeer /* 3346495Sspeer * nxge_init_txdma_channel_cntl_stat 3356495Sspeer * 3366495Sspeer * Stop a TDC. If at first we don't succeed, inject an error. 3376495Sspeer * 3386495Sspeer * Arguments: 3396495Sspeer * nxgep 3406495Sspeer * channel The channel to stop. 3416495Sspeer * 3426495Sspeer * Notes: 3436495Sspeer * 3446495Sspeer * NPI/NXGE function calls: 3456495Sspeer * npi_txdma_control_status() 3466495Sspeer * 3476495Sspeer * Registers accessed: 3486495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 3496495Sspeer * 3506495Sspeer * Context: 3516495Sspeer * Any domain 3526495Sspeer */ 3533859Sml29623 nxge_status_t 3543859Sml29623 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 3553859Sml29623 uint64_t reg_data) 3563859Sml29623 { 3573859Sml29623 npi_handle_t handle; 3583859Sml29623 npi_status_t rs = NPI_SUCCESS; 3593859Sml29623 nxge_status_t status = NXGE_OK; 3603859Sml29623 3613859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 3626929Smisaki "<== nxge_init_txdma_channel_cntl_stat")); 3633859Sml29623 3643859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3653859Sml29623 rs = npi_txdma_control_status(handle, OP_SET, channel, 3666929Smisaki (p_tx_cs_t)®_data); 3673859Sml29623 3683859Sml29623 if (rs != NPI_SUCCESS) { 3693859Sml29623 status = NXGE_ERROR | rs; 3703859Sml29623 } 3713859Sml29623 3723859Sml29623 return (status); 3733859Sml29623 } 3743859Sml29623 3756495Sspeer /* 3766495Sspeer * nxge_enable_txdma_channel 3776495Sspeer * 3786495Sspeer * Enable a TDC. 3796495Sspeer * 3806495Sspeer * Arguments: 3816495Sspeer * nxgep 3826495Sspeer * channel The channel to enable. 3836495Sspeer * tx_desc_p channel's transmit descriptor ring. 3846495Sspeer * mbox_p channel's mailbox, 3856495Sspeer * 3866495Sspeer * Notes: 3876495Sspeer * 3886495Sspeer * NPI/NXGE function calls: 3896495Sspeer * npi_txdma_ring_config() 3906495Sspeer * npi_txdma_mbox_config() 3916495Sspeer * npi_txdma_channel_init_enable() 3926495Sspeer * 3936495Sspeer * Registers accessed: 3946495Sspeer * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration 3956495Sspeer * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High 3966495Sspeer * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low 3976495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 3986495Sspeer * 3996495Sspeer * Context: 4006495Sspeer * Any domain 4016495Sspeer */ 4023859Sml29623 nxge_status_t 4033859Sml29623 nxge_enable_txdma_channel(p_nxge_t nxgep, 4043859Sml29623 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 4053859Sml29623 { 4063859Sml29623 npi_handle_t handle; 4073859Sml29623 npi_status_t rs = NPI_SUCCESS; 4083859Sml29623 nxge_status_t status = NXGE_OK; 4093859Sml29623 4103859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 4113859Sml29623 4123859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4133859Sml29623 /* 4143859Sml29623 * Use configuration data composed at init time. 4153859Sml29623 * Write to hardware the transmit ring configurations. 4163859Sml29623 */ 4173859Sml29623 rs = npi_txdma_ring_config(handle, OP_SET, channel, 4186495Sspeer (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 4193859Sml29623 4203859Sml29623 if (rs != NPI_SUCCESS) { 4213859Sml29623 return (NXGE_ERROR | rs); 4223859Sml29623 } 4233859Sml29623 4246495Sspeer if (isLDOMguest(nxgep)) { 4256495Sspeer /* Add interrupt handler for this channel. */ 4266495Sspeer if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK) 4276495Sspeer return (NXGE_ERROR); 4286495Sspeer } 4296495Sspeer 4303859Sml29623 /* Write to hardware the mailbox */ 4313859Sml29623 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 4326929Smisaki (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 4333859Sml29623 4343859Sml29623 if (rs != NPI_SUCCESS) { 4353859Sml29623 return (NXGE_ERROR | rs); 4363859Sml29623 } 4373859Sml29623 4383859Sml29623 /* Start the DMA engine. */ 4393859Sml29623 rs = npi_txdma_channel_init_enable(handle, channel); 4403859Sml29623 4413859Sml29623 if (rs != NPI_SUCCESS) { 4423859Sml29623 return (NXGE_ERROR | rs); 4433859Sml29623 } 4443859Sml29623 4453859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 4463859Sml29623 4473859Sml29623 return (status); 4483859Sml29623 } 4493859Sml29623 4503859Sml29623 void 4513859Sml29623 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 4523859Sml29623 boolean_t l4_cksum, int pkt_len, uint8_t npads, 4536611Sml29623 p_tx_pkt_hdr_all_t pkthdrp, 4546611Sml29623 t_uscalar_t start_offset, 4556611Sml29623 t_uscalar_t stuff_offset) 4563859Sml29623 { 4573859Sml29623 p_tx_pkt_header_t hdrp; 4583859Sml29623 p_mblk_t nmp; 4593859Sml29623 uint64_t tmp; 4603859Sml29623 size_t mblk_len; 4613859Sml29623 size_t iph_len; 4623859Sml29623 size_t hdrs_size; 4633859Sml29623 uint8_t hdrs_buf[sizeof (struct ether_header) + 4646929Smisaki 64 + sizeof (uint32_t)]; 4655505Smisaki uint8_t *cursor; 4663859Sml29623 uint8_t *ip_buf; 4673859Sml29623 uint16_t eth_type; 4683859Sml29623 uint8_t ipproto; 4693859Sml29623 boolean_t is_vlan = B_FALSE; 4703859Sml29623 size_t eth_hdr_size; 4713859Sml29623 4723859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 4733859Sml29623 4743859Sml29623 /* 4753859Sml29623 * Caller should zero out the headers first. 4763859Sml29623 */ 4773859Sml29623 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 4783859Sml29623 4793859Sml29623 if (fill_len) { 4803859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 4816929Smisaki "==> nxge_fill_tx_hdr: pkt_len %d " 4826929Smisaki "npads %d", pkt_len, npads)); 4833859Sml29623 tmp = (uint64_t)pkt_len; 4843859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 4853859Sml29623 goto fill_tx_header_done; 4863859Sml29623 } 4873859Sml29623 4886611Sml29623 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT); 4893859Sml29623 4903859Sml29623 /* 4913859Sml29623 * mp is the original data packet (does not include the 4923859Sml29623 * Neptune transmit header). 4933859Sml29623 */ 4943859Sml29623 nmp = mp; 4953859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 4966929Smisaki "mp $%p b_rptr $%p len %d", 4976929Smisaki mp, nmp->b_rptr, MBLKL(nmp))); 4985505Smisaki /* copy ether_header from mblk to hdrs_buf */ 4995505Smisaki cursor = &hdrs_buf[0]; 5005505Smisaki tmp = sizeof (struct ether_vlan_header); 5015505Smisaki while ((nmp != NULL) && (tmp > 0)) { 5025505Smisaki size_t buflen; 5035505Smisaki mblk_len = MBLKL(nmp); 5045512Smisaki buflen = min((size_t)tmp, mblk_len); 5055505Smisaki bcopy(nmp->b_rptr, cursor, buflen); 5065505Smisaki cursor += buflen; 5075505Smisaki tmp -= buflen; 5085505Smisaki nmp = nmp->b_cont; 5095505Smisaki } 5105505Smisaki 5115505Smisaki nmp = mp; 5125505Smisaki mblk_len = MBLKL(nmp); 5133859Sml29623 ip_buf = NULL; 5143859Sml29623 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 5153859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 5166929Smisaki "ether type 0x%x", eth_type, hdrp->value)); 5173859Sml29623 5183859Sml29623 if (eth_type < ETHERMTU) { 5193859Sml29623 tmp = 1ull; 5203859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 5213859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 5226929Smisaki "value 0x%llx", hdrp->value)); 5233859Sml29623 if (*(hdrs_buf + sizeof (struct ether_header)) 5246929Smisaki == LLC_SNAP_SAP) { 5253859Sml29623 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 5266929Smisaki sizeof (struct ether_header) + 6))); 5273859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 5286929Smisaki "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 5296929Smisaki eth_type)); 5303859Sml29623 } else { 5313859Sml29623 goto fill_tx_header_done; 5323859Sml29623 } 5333859Sml29623 } else if (eth_type == VLAN_ETHERTYPE) { 5343859Sml29623 tmp = 1ull; 5353859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 5363859Sml29623 5373859Sml29623 eth_type = ntohs(((struct ether_vlan_header *) 5386929Smisaki hdrs_buf)->ether_type); 5393859Sml29623 is_vlan = B_TRUE; 5403859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 5416929Smisaki "value 0x%llx", hdrp->value)); 5423859Sml29623 } 5433859Sml29623 5443859Sml29623 if (!is_vlan) { 5453859Sml29623 eth_hdr_size = sizeof (struct ether_header); 5463859Sml29623 } else { 5473859Sml29623 eth_hdr_size = sizeof (struct ether_vlan_header); 5483859Sml29623 } 5493859Sml29623 5503859Sml29623 switch (eth_type) { 5513859Sml29623 case ETHERTYPE_IP: 5523859Sml29623 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 5533859Sml29623 ip_buf = nmp->b_rptr + eth_hdr_size; 5543859Sml29623 mblk_len -= eth_hdr_size; 5553859Sml29623 iph_len = ((*ip_buf) & 0x0f); 5563859Sml29623 if (mblk_len > (iph_len + sizeof (uint32_t))) { 5573859Sml29623 ip_buf = nmp->b_rptr; 5583859Sml29623 ip_buf += eth_hdr_size; 5593859Sml29623 } else { 5603859Sml29623 ip_buf = NULL; 5613859Sml29623 } 5623859Sml29623 5633859Sml29623 } 5643859Sml29623 if (ip_buf == NULL) { 5653859Sml29623 hdrs_size = 0; 5663859Sml29623 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 5673859Sml29623 while ((nmp) && (hdrs_size < 5686929Smisaki sizeof (hdrs_buf))) { 5693859Sml29623 mblk_len = (size_t)nmp->b_wptr - 5706929Smisaki (size_t)nmp->b_rptr; 5713859Sml29623 if (mblk_len >= 5726929Smisaki (sizeof (hdrs_buf) - hdrs_size)) 5733859Sml29623 mblk_len = sizeof (hdrs_buf) - 5746929Smisaki hdrs_size; 5753859Sml29623 bcopy(nmp->b_rptr, 5766929Smisaki &hdrs_buf[hdrs_size], mblk_len); 5773859Sml29623 hdrs_size += mblk_len; 5783859Sml29623 nmp = nmp->b_cont; 5793859Sml29623 } 5803859Sml29623 ip_buf = hdrs_buf; 5813859Sml29623 ip_buf += eth_hdr_size; 5823859Sml29623 iph_len = ((*ip_buf) & 0x0f); 5833859Sml29623 } 5843859Sml29623 5853859Sml29623 ipproto = ip_buf[9]; 5863859Sml29623 5873859Sml29623 tmp = (uint64_t)iph_len; 5883859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 5893859Sml29623 tmp = (uint64_t)(eth_hdr_size >> 1); 5903859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 5913859Sml29623 5923859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 5936929Smisaki " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 5946929Smisaki "tmp 0x%x", 5956929Smisaki iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 5966929Smisaki ipproto, tmp)); 5973859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 5986929Smisaki "value 0x%llx", hdrp->value)); 5993859Sml29623 6003859Sml29623 break; 6013859Sml29623 6023859Sml29623 case ETHERTYPE_IPV6: 6033859Sml29623 hdrs_size = 0; 6043859Sml29623 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 6053859Sml29623 while ((nmp) && (hdrs_size < 6066929Smisaki sizeof (hdrs_buf))) { 6073859Sml29623 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 6083859Sml29623 if (mblk_len >= 6096929Smisaki (sizeof (hdrs_buf) - hdrs_size)) 6103859Sml29623 mblk_len = sizeof (hdrs_buf) - 6116929Smisaki hdrs_size; 6123859Sml29623 bcopy(nmp->b_rptr, 6136929Smisaki &hdrs_buf[hdrs_size], mblk_len); 6143859Sml29623 hdrs_size += mblk_len; 6153859Sml29623 nmp = nmp->b_cont; 6163859Sml29623 } 6173859Sml29623 ip_buf = hdrs_buf; 6183859Sml29623 ip_buf += eth_hdr_size; 6193859Sml29623 6203859Sml29623 tmp = 1ull; 6213859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 6223859Sml29623 6233859Sml29623 tmp = (eth_hdr_size >> 1); 6243859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 6253859Sml29623 6263859Sml29623 /* byte 6 is the next header protocol */ 6273859Sml29623 ipproto = ip_buf[6]; 6283859Sml29623 6293859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 6306929Smisaki " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 6316929Smisaki iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 6326929Smisaki ipproto)); 6333859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 6346929Smisaki "value 0x%llx", hdrp->value)); 6353859Sml29623 6363859Sml29623 break; 6373859Sml29623 6383859Sml29623 default: 6393859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 6403859Sml29623 goto fill_tx_header_done; 6413859Sml29623 } 6423859Sml29623 6433859Sml29623 switch (ipproto) { 6443859Sml29623 case IPPROTO_TCP: 6453859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 6466611Sml29623 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 6473859Sml29623 if (l4_cksum) { 6486611Sml29623 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP; 6496611Sml29623 hdrp->value |= 6506611Sml29623 (((uint64_t)(start_offset >> 1)) << 6516611Sml29623 TX_PKT_HEADER_L4START_SHIFT); 6526611Sml29623 hdrp->value |= 6536611Sml29623 (((uint64_t)(stuff_offset >> 1)) << 6546611Sml29623 TX_PKT_HEADER_L4STUFF_SHIFT); 6556611Sml29623 6563859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 6576611Sml29623 "==> nxge_tx_pkt_hdr_init: TCP CKSUM " 6586611Sml29623 "value 0x%llx", hdrp->value)); 6593859Sml29623 } 6603859Sml29623 6613859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 6626611Sml29623 "value 0x%llx", hdrp->value)); 6633859Sml29623 break; 6643859Sml29623 6653859Sml29623 case IPPROTO_UDP: 6663859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 6673859Sml29623 if (l4_cksum) { 6686611Sml29623 if (!nxge_cksum_offload) { 6696611Sml29623 uint16_t *up; 6706611Sml29623 uint16_t cksum; 6716611Sml29623 t_uscalar_t stuff_len; 6726611Sml29623 6736611Sml29623 /* 6746611Sml29623 * The checksum field has the 6756611Sml29623 * partial checksum. 6766611Sml29623 * IP_CSUM() macro calls ip_cksum() which 6776611Sml29623 * can add in the partial checksum. 6786611Sml29623 */ 6796611Sml29623 cksum = IP_CSUM(mp, start_offset, 0); 6806611Sml29623 stuff_len = stuff_offset; 6816611Sml29623 nmp = mp; 6826611Sml29623 mblk_len = MBLKL(nmp); 6836611Sml29623 while ((nmp != NULL) && 6846611Sml29623 (mblk_len < stuff_len)) { 6856611Sml29623 stuff_len -= mblk_len; 6866611Sml29623 nmp = nmp->b_cont; 6876611Sml29623 } 6886611Sml29623 ASSERT(nmp); 6896611Sml29623 up = (uint16_t *)(nmp->b_rptr + stuff_len); 6906611Sml29623 6916611Sml29623 *up = cksum; 6926611Sml29623 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP; 6936611Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 6946611Sml29623 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 6956611Sml29623 "use sw cksum " 6966611Sml29623 "write to $%p cksum 0x%x content up 0x%x", 6976611Sml29623 stuff_len, 6986611Sml29623 up, 6996611Sml29623 cksum, 7006611Sml29623 *up)); 7016611Sml29623 } else { 7026611Sml29623 /* Hardware will compute the full checksum */ 7036611Sml29623 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP; 7046611Sml29623 hdrp->value |= 7056611Sml29623 (((uint64_t)(start_offset >> 1)) << 7066611Sml29623 TX_PKT_HEADER_L4START_SHIFT); 7076611Sml29623 hdrp->value |= 7086611Sml29623 (((uint64_t)(stuff_offset >> 1)) << 7096611Sml29623 TX_PKT_HEADER_L4STUFF_SHIFT); 7106611Sml29623 7116611Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7126611Sml29623 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 7136611Sml29623 " use partial checksum " 7146611Sml29623 "cksum 0x%x ", 7156611Sml29623 "value 0x%llx", 7166611Sml29623 stuff_offset, 7176611Sml29623 IP_CSUM(mp, start_offset, 0), 7186611Sml29623 hdrp->value)); 7196611Sml29623 } 7203859Sml29623 } 7216611Sml29623 7223859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7236929Smisaki "==> nxge_tx_pkt_hdr_init: UDP" 7246929Smisaki "value 0x%llx", hdrp->value)); 7253859Sml29623 break; 7263859Sml29623 7273859Sml29623 default: 7283859Sml29623 goto fill_tx_header_done; 7293859Sml29623 } 7303859Sml29623 7313859Sml29623 fill_tx_header_done: 7323859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7336929Smisaki "==> nxge_fill_tx_hdr: pkt_len %d " 7346929Smisaki "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 7353859Sml29623 7363859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 7373859Sml29623 } 7383859Sml29623 7393859Sml29623 /*ARGSUSED*/ 7403859Sml29623 p_mblk_t 7413859Sml29623 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 7423859Sml29623 { 7433859Sml29623 p_mblk_t newmp = NULL; 7443859Sml29623 7453859Sml29623 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 7463859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7476929Smisaki "<== nxge_tx_pkt_header_reserve: allocb failed")); 7483859Sml29623 return (NULL); 7493859Sml29623 } 7503859Sml29623 7513859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7526929Smisaki "==> nxge_tx_pkt_header_reserve: get new mp")); 7533859Sml29623 DB_TYPE(newmp) = M_DATA; 7543859Sml29623 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 7553859Sml29623 linkb(newmp, mp); 7563859Sml29623 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 7573859Sml29623 7583859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 7596929Smisaki "b_rptr $%p b_wptr $%p", 7606929Smisaki newmp->b_rptr, newmp->b_wptr)); 7613859Sml29623 7623859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7636929Smisaki "<== nxge_tx_pkt_header_reserve: use new mp")); 7643859Sml29623 7653859Sml29623 return (newmp); 7663859Sml29623 } 7673859Sml29623 7683859Sml29623 int 7693859Sml29623 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 7703859Sml29623 { 7713859Sml29623 uint_t nmblks; 7723859Sml29623 ssize_t len; 7733859Sml29623 uint_t pkt_len; 7743859Sml29623 p_mblk_t nmp, bmp, tmp; 7753859Sml29623 uint8_t *b_wptr; 7763859Sml29623 7773859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7786929Smisaki "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 7796929Smisaki "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 7803859Sml29623 7813859Sml29623 nmp = mp; 7823859Sml29623 bmp = mp; 7833859Sml29623 nmblks = 0; 7843859Sml29623 pkt_len = 0; 7853859Sml29623 *tot_xfer_len_p = 0; 7863859Sml29623 7873859Sml29623 while (nmp) { 7883859Sml29623 len = MBLKL(nmp); 7893859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 7906929Smisaki "len %d pkt_len %d nmblks %d tot_xfer_len %d", 7916929Smisaki len, pkt_len, nmblks, 7926929Smisaki *tot_xfer_len_p)); 7933859Sml29623 7943859Sml29623 if (len <= 0) { 7953859Sml29623 bmp = nmp; 7963859Sml29623 nmp = nmp->b_cont; 7973859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7986929Smisaki "==> nxge_tx_pkt_nmblocks: " 7996929Smisaki "len (0) pkt_len %d nmblks %d", 8006929Smisaki pkt_len, nmblks)); 8013859Sml29623 continue; 8023859Sml29623 } 8033859Sml29623 8043859Sml29623 *tot_xfer_len_p += len; 8053859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 8066929Smisaki "len %d pkt_len %d nmblks %d tot_xfer_len %d", 8076929Smisaki len, pkt_len, nmblks, 8086929Smisaki *tot_xfer_len_p)); 8093859Sml29623 8103859Sml29623 if (len < nxge_bcopy_thresh) { 8113859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8126929Smisaki "==> nxge_tx_pkt_nmblocks: " 8136929Smisaki "len %d (< thresh) pkt_len %d nmblks %d", 8146929Smisaki len, pkt_len, nmblks)); 8153859Sml29623 if (pkt_len == 0) 8163859Sml29623 nmblks++; 8173859Sml29623 pkt_len += len; 8183859Sml29623 if (pkt_len >= nxge_bcopy_thresh) { 8193859Sml29623 pkt_len = 0; 8203859Sml29623 len = 0; 8213859Sml29623 nmp = bmp; 8223859Sml29623 } 8233859Sml29623 } else { 8243859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8256929Smisaki "==> nxge_tx_pkt_nmblocks: " 8266929Smisaki "len %d (> thresh) pkt_len %d nmblks %d", 8276929Smisaki len, pkt_len, nmblks)); 8283859Sml29623 pkt_len = 0; 8293859Sml29623 nmblks++; 8303859Sml29623 /* 8313859Sml29623 * Hardware limits the transfer length to 4K. 8323859Sml29623 * If len is more than 4K, we need to break 8333859Sml29623 * it up to at most 2 more blocks. 8343859Sml29623 */ 8353859Sml29623 if (len > TX_MAX_TRANSFER_LENGTH) { 8363859Sml29623 uint32_t nsegs; 8373859Sml29623 8386495Sspeer nsegs = 1; 8393859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8406929Smisaki "==> nxge_tx_pkt_nmblocks: " 8416929Smisaki "len %d pkt_len %d nmblks %d nsegs %d", 8426929Smisaki len, pkt_len, nmblks, nsegs)); 8433859Sml29623 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 8443859Sml29623 ++nsegs; 8453859Sml29623 } 8463859Sml29623 do { 8473859Sml29623 b_wptr = nmp->b_rptr + 8486929Smisaki TX_MAX_TRANSFER_LENGTH; 8493859Sml29623 nmp->b_wptr = b_wptr; 8503859Sml29623 if ((tmp = dupb(nmp)) == NULL) { 8513859Sml29623 return (0); 8523859Sml29623 } 8533859Sml29623 tmp->b_rptr = b_wptr; 8543859Sml29623 tmp->b_wptr = nmp->b_wptr; 8553859Sml29623 tmp->b_cont = nmp->b_cont; 8563859Sml29623 nmp->b_cont = tmp; 8573859Sml29623 nmblks++; 8583859Sml29623 if (--nsegs) { 8593859Sml29623 nmp = tmp; 8603859Sml29623 } 8613859Sml29623 } while (nsegs); 8623859Sml29623 nmp = tmp; 8633859Sml29623 } 8643859Sml29623 } 8653859Sml29623 8663859Sml29623 /* 8673859Sml29623 * Hardware limits the transmit gather pointers to 15. 8683859Sml29623 */ 8693859Sml29623 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 8706929Smisaki TX_MAX_GATHER_POINTERS) { 8713859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8726929Smisaki "==> nxge_tx_pkt_nmblocks: pull msg - " 8736929Smisaki "len %d pkt_len %d nmblks %d", 8746929Smisaki len, pkt_len, nmblks)); 8753859Sml29623 /* Pull all message blocks from b_cont */ 8763859Sml29623 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 8773859Sml29623 return (0); 8783859Sml29623 } 8793859Sml29623 freemsg(nmp->b_cont); 8803859Sml29623 nmp->b_cont = tmp; 8813859Sml29623 pkt_len = 0; 8823859Sml29623 } 8833859Sml29623 bmp = nmp; 8843859Sml29623 nmp = nmp->b_cont; 8853859Sml29623 } 8863859Sml29623 8873859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8886929Smisaki "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 8896929Smisaki "nmblks %d len %d tot_xfer_len %d", 8906929Smisaki mp->b_rptr, mp->b_wptr, nmblks, 8916929Smisaki MBLKL(mp), *tot_xfer_len_p)); 8923859Sml29623 8933859Sml29623 return (nmblks); 8943859Sml29623 } 8953859Sml29623 8963859Sml29623 boolean_t 8973859Sml29623 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 8983859Sml29623 { 8993859Sml29623 boolean_t status = B_TRUE; 9003859Sml29623 p_nxge_dma_common_t tx_desc_dma_p; 9013859Sml29623 nxge_dma_common_t desc_area; 9023859Sml29623 p_tx_desc_t tx_desc_ring_vp; 9033859Sml29623 p_tx_desc_t tx_desc_p; 9043859Sml29623 p_tx_desc_t tx_desc_pp; 9053859Sml29623 tx_desc_t r_tx_desc; 9063859Sml29623 p_tx_msg_t tx_msg_ring; 9073859Sml29623 p_tx_msg_t tx_msg_p; 9083859Sml29623 npi_handle_t handle; 9093859Sml29623 tx_ring_hdl_t tx_head; 9103859Sml29623 uint32_t pkt_len; 9113859Sml29623 uint_t tx_rd_index; 9123859Sml29623 uint16_t head_index, tail_index; 9133859Sml29623 uint8_t tdc; 9143859Sml29623 boolean_t head_wrap, tail_wrap; 9158275SEric Cheng p_nxge_tx_ring_stats_t tdc_stats; 9163859Sml29623 int rc; 9173859Sml29623 9183859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 9193859Sml29623 9203859Sml29623 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 9216929Smisaki (nmblks != 0)); 9223859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9236929Smisaki "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 9246929Smisaki tx_ring_p->descs_pending, nxge_reclaim_pending, 9256929Smisaki nmblks)); 9263859Sml29623 if (!status) { 9273859Sml29623 tx_desc_dma_p = &tx_ring_p->tdc_desc; 9283859Sml29623 desc_area = tx_ring_p->tdc_desc; 9293859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 9303859Sml29623 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 9313859Sml29623 tx_desc_ring_vp = 9326929Smisaki (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 9333859Sml29623 tx_rd_index = tx_ring_p->rd_index; 9343859Sml29623 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 9353859Sml29623 tx_msg_ring = tx_ring_p->tx_msg_ring; 9363859Sml29623 tx_msg_p = &tx_msg_ring[tx_rd_index]; 9373859Sml29623 tdc = tx_ring_p->tdc; 9383859Sml29623 tdc_stats = tx_ring_p->tdc_stats; 9393859Sml29623 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 9403859Sml29623 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 9413859Sml29623 } 9423859Sml29623 9433859Sml29623 tail_index = tx_ring_p->wr_index; 9443859Sml29623 tail_wrap = tx_ring_p->wr_index_wrap; 9453859Sml29623 9463859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9476929Smisaki "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 9486929Smisaki "tail_index %d tail_wrap %d " 9496929Smisaki "tx_desc_p $%p ($%p) ", 9506929Smisaki tdc, tx_rd_index, tail_index, tail_wrap, 9516929Smisaki tx_desc_p, (*(uint64_t *)tx_desc_p))); 9523859Sml29623 /* 9533859Sml29623 * Read the hardware maintained transmit head 9543859Sml29623 * and wrap around bit. 9553859Sml29623 */ 9563859Sml29623 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 9573859Sml29623 head_index = tx_head.bits.ldw.head; 9583859Sml29623 head_wrap = tx_head.bits.ldw.wrap; 9593859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9606929Smisaki "==> nxge_txdma_reclaim: " 9616929Smisaki "tx_rd_index %d tail %d tail_wrap %d " 9626929Smisaki "head %d wrap %d", 9636929Smisaki tx_rd_index, tail_index, tail_wrap, 9646929Smisaki head_index, head_wrap)); 9653859Sml29623 9663859Sml29623 if (head_index == tail_index) { 9673859Sml29623 if (TXDMA_RING_EMPTY(head_index, head_wrap, 9686929Smisaki tail_index, tail_wrap) && 9696929Smisaki (head_index == tx_rd_index)) { 9703859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9716929Smisaki "==> nxge_txdma_reclaim: EMPTY")); 9723859Sml29623 return (B_TRUE); 9733859Sml29623 } 9743859Sml29623 9753859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9766929Smisaki "==> nxge_txdma_reclaim: Checking " 9776929Smisaki "if ring full")); 9783859Sml29623 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 9796929Smisaki tail_wrap)) { 9803859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9816929Smisaki "==> nxge_txdma_reclaim: full")); 9823859Sml29623 return (B_FALSE); 9833859Sml29623 } 9843859Sml29623 } 9853859Sml29623 9863859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9876929Smisaki "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 9883859Sml29623 9893859Sml29623 tx_desc_pp = &r_tx_desc; 9903859Sml29623 while ((tx_rd_index != head_index) && 9916929Smisaki (tx_ring_p->descs_pending != 0)) { 9923859Sml29623 9933859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9946929Smisaki "==> nxge_txdma_reclaim: Checking if pending")); 9953859Sml29623 9963859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9976929Smisaki "==> nxge_txdma_reclaim: " 9986929Smisaki "descs_pending %d ", 9996929Smisaki tx_ring_p->descs_pending)); 10003859Sml29623 10013859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10026929Smisaki "==> nxge_txdma_reclaim: " 10036929Smisaki "(tx_rd_index %d head_index %d " 10046929Smisaki "(tx_desc_p $%p)", 10056929Smisaki tx_rd_index, head_index, 10066929Smisaki tx_desc_p)); 10073859Sml29623 10083859Sml29623 tx_desc_pp->value = tx_desc_p->value; 10093859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10106929Smisaki "==> nxge_txdma_reclaim: " 10116929Smisaki "(tx_rd_index %d head_index %d " 10126929Smisaki "tx_desc_p $%p (desc value 0x%llx) ", 10136929Smisaki tx_rd_index, head_index, 10146929Smisaki tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 10153859Sml29623 10163859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10176929Smisaki "==> nxge_txdma_reclaim: dump desc:")); 10183859Sml29623 10193859Sml29623 pkt_len = tx_desc_pp->bits.hdw.tr_len; 10203859Sml29623 tdc_stats->obytes += pkt_len; 10213859Sml29623 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 10223859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10236929Smisaki "==> nxge_txdma_reclaim: pkt_len %d " 10246929Smisaki "tdc channel %d opackets %d", 10256929Smisaki pkt_len, 10266929Smisaki tdc, 10276929Smisaki tdc_stats->opackets)); 10283859Sml29623 10293859Sml29623 if (tx_msg_p->flags.dma_type == USE_DVMA) { 10303859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10316929Smisaki "tx_desc_p = $%p " 10326929Smisaki "tx_desc_pp = $%p " 10336929Smisaki "index = %d", 10346929Smisaki tx_desc_p, 10356929Smisaki tx_desc_pp, 10366929Smisaki tx_ring_p->rd_index)); 10373859Sml29623 (void) dvma_unload(tx_msg_p->dvma_handle, 10386929Smisaki 0, -1); 10393859Sml29623 tx_msg_p->dvma_handle = NULL; 10403859Sml29623 if (tx_ring_p->dvma_wr_index == 10416929Smisaki tx_ring_p->dvma_wrap_mask) { 10423859Sml29623 tx_ring_p->dvma_wr_index = 0; 10433859Sml29623 } else { 10443859Sml29623 tx_ring_p->dvma_wr_index++; 10453859Sml29623 } 10463859Sml29623 tx_ring_p->dvma_pending--; 10473859Sml29623 } else if (tx_msg_p->flags.dma_type == 10486929Smisaki USE_DMA) { 10493859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10506929Smisaki "==> nxge_txdma_reclaim: " 10516929Smisaki "USE DMA")); 10523859Sml29623 if (rc = ddi_dma_unbind_handle 10536929Smisaki (tx_msg_p->dma_handle)) { 10543859Sml29623 cmn_err(CE_WARN, "!nxge_reclaim: " 10556929Smisaki "ddi_dma_unbind_handle " 10566929Smisaki "failed. status %d", rc); 10573859Sml29623 } 10583859Sml29623 } 10593859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10606929Smisaki "==> nxge_txdma_reclaim: count packets")); 10613859Sml29623 /* 10623859Sml29623 * count a chained packet only once. 10633859Sml29623 */ 10643859Sml29623 if (tx_msg_p->tx_message != NULL) { 10658275SEric Cheng freemsg(tx_msg_p->tx_message); 10668275SEric Cheng tx_msg_p->tx_message = NULL; 10673859Sml29623 } 10683859Sml29623 10693859Sml29623 tx_msg_p->flags.dma_type = USE_NONE; 10703859Sml29623 tx_rd_index = tx_ring_p->rd_index; 10713859Sml29623 tx_rd_index = (tx_rd_index + 1) & 10726929Smisaki tx_ring_p->tx_wrap_mask; 10733859Sml29623 tx_ring_p->rd_index = tx_rd_index; 10743859Sml29623 tx_ring_p->descs_pending--; 10753859Sml29623 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 10763859Sml29623 tx_msg_p = &tx_msg_ring[tx_rd_index]; 10773859Sml29623 } 10783859Sml29623 10798948SMichael.Speer@Sun.COM status = (nmblks <= ((int)tx_ring_p->tx_ring_size - 10808948SMichael.Speer@Sun.COM (int)tx_ring_p->descs_pending - TX_FULL_MARK)); 10813859Sml29623 if (status) { 1082*9015SMichael.Speer@Sun.COM (void) cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 10833859Sml29623 } 10843859Sml29623 } else { 10858948SMichael.Speer@Sun.COM status = (nmblks <= ((int)tx_ring_p->tx_ring_size - 10868948SMichael.Speer@Sun.COM (int)tx_ring_p->descs_pending - TX_FULL_MARK)); 10873859Sml29623 } 10883859Sml29623 10893859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10906929Smisaki "<== nxge_txdma_reclaim status = 0x%08x", status)); 10913859Sml29623 10923859Sml29623 return (status); 10933859Sml29623 } 10943859Sml29623 10956495Sspeer /* 10966495Sspeer * nxge_tx_intr 10976495Sspeer * 10986495Sspeer * Process a TDC interrupt 10996495Sspeer * 11006495Sspeer * Arguments: 11016495Sspeer * arg1 A Logical Device state Vector (LSV) data structure. 11026495Sspeer * arg2 nxge_t * 11036495Sspeer * 11046495Sspeer * Notes: 11056495Sspeer * 11066495Sspeer * NPI/NXGE function calls: 11076495Sspeer * npi_txdma_control_status() 11086495Sspeer * npi_intr_ldg_mgmt_set() 11096495Sspeer * 11106495Sspeer * nxge_tx_err_evnts() 11116495Sspeer * nxge_txdma_reclaim() 11126495Sspeer * 11136495Sspeer * Registers accessed: 11146495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 11156495Sspeer * PIO_LDSV 11166495Sspeer * 11176495Sspeer * Context: 11186495Sspeer * Any domain 11196495Sspeer */ 11203859Sml29623 uint_t 11213859Sml29623 nxge_tx_intr(void *arg1, void *arg2) 11223859Sml29623 { 11233859Sml29623 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 11243859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg2; 11253859Sml29623 p_nxge_ldg_t ldgp; 11263859Sml29623 uint8_t channel; 11273859Sml29623 uint32_t vindex; 11283859Sml29623 npi_handle_t handle; 11293859Sml29623 tx_cs_t cs; 11303859Sml29623 p_tx_ring_t *tx_rings; 11313859Sml29623 p_tx_ring_t tx_ring_p; 11323859Sml29623 npi_status_t rs = NPI_SUCCESS; 11333859Sml29623 uint_t serviced = DDI_INTR_UNCLAIMED; 11343859Sml29623 nxge_status_t status = NXGE_OK; 11353859Sml29623 11363859Sml29623 if (ldvp == NULL) { 11373859Sml29623 NXGE_DEBUG_MSG((NULL, INT_CTL, 11386929Smisaki "<== nxge_tx_intr: nxgep $%p ldvp $%p", 11396929Smisaki nxgep, ldvp)); 11403859Sml29623 return (DDI_INTR_UNCLAIMED); 11413859Sml29623 } 11423859Sml29623 11433859Sml29623 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 11443859Sml29623 nxgep = ldvp->nxgep; 11453859Sml29623 } 11463859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11476929Smisaki "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 11486929Smisaki nxgep, ldvp)); 11496713Sspeer 11506713Sspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 11516713Sspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 11526713Sspeer NXGE_DEBUG_MSG((nxgep, INT_CTL, 11536713Sspeer "<== nxge_tx_intr: interface not started or intialized")); 11546713Sspeer return (DDI_INTR_CLAIMED); 11556713Sspeer } 11566713Sspeer 11573859Sml29623 /* 11583859Sml29623 * This interrupt handler is for a specific 11593859Sml29623 * transmit dma channel. 11603859Sml29623 */ 11613859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 11623859Sml29623 /* Get the control and status for this channel. */ 11633859Sml29623 channel = ldvp->channel; 11643859Sml29623 ldgp = ldvp->ldgp; 11653859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11666929Smisaki "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 11676929Smisaki "channel %d", 11686929Smisaki nxgep, ldvp, channel)); 11693859Sml29623 11703859Sml29623 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 11713859Sml29623 vindex = ldvp->vdma_index; 11723859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11736929Smisaki "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 11746929Smisaki channel, vindex, rs)); 11753859Sml29623 if (!rs && cs.bits.ldw.mk) { 11763859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11776929Smisaki "==> nxge_tx_intr:channel %d ring index %d " 11786929Smisaki "status 0x%08x (mk bit set)", 11796929Smisaki channel, vindex, rs)); 11803859Sml29623 tx_rings = nxgep->tx_rings->rings; 11813859Sml29623 tx_ring_p = tx_rings[vindex]; 11823859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11836929Smisaki "==> nxge_tx_intr:channel %d ring index %d " 11846929Smisaki "status 0x%08x (mk bit set, calling reclaim)", 11856929Smisaki channel, vindex, rs)); 11863859Sml29623 11878275SEric Cheng nxge_tx_ring_task((void *)tx_ring_p); 11883859Sml29623 } 11893859Sml29623 11903859Sml29623 /* 11913859Sml29623 * Process other transmit control and status. 11923859Sml29623 * Check the ldv state. 11933859Sml29623 */ 11943859Sml29623 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 11953859Sml29623 /* 11963859Sml29623 * Rearm this logical group if this is a single device 11973859Sml29623 * group. 11983859Sml29623 */ 11993859Sml29623 if (ldgp->nldvs == 1) { 12003859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 12016929Smisaki "==> nxge_tx_intr: rearm")); 12023859Sml29623 if (status == NXGE_OK) { 12036495Sspeer if (isLDOMguest(nxgep)) { 12046495Sspeer nxge_hio_ldgimgn(nxgep, ldgp); 12056495Sspeer } else { 12066495Sspeer (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 12076495Sspeer B_TRUE, ldgp->ldg_timer); 12086495Sspeer } 12093859Sml29623 } 12103859Sml29623 } 12113859Sml29623 12123859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 12133859Sml29623 serviced = DDI_INTR_CLAIMED; 12143859Sml29623 return (serviced); 12153859Sml29623 } 12163859Sml29623 12173859Sml29623 void 12186495Sspeer nxge_txdma_stop(p_nxge_t nxgep) /* Dead */ 12193859Sml29623 { 12203859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 12213859Sml29623 12223859Sml29623 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 12233859Sml29623 12243859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 12253859Sml29623 } 12263859Sml29623 12273859Sml29623 void 12286495Sspeer nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */ 12293859Sml29623 { 12303859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 12313859Sml29623 12323859Sml29623 (void) nxge_txdma_stop(nxgep); 12333859Sml29623 12343859Sml29623 (void) nxge_fixup_txdma_rings(nxgep); 12353859Sml29623 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 12363859Sml29623 (void) nxge_tx_mac_enable(nxgep); 12373859Sml29623 (void) nxge_txdma_hw_kick(nxgep); 12383859Sml29623 12393859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 12403859Sml29623 } 12413859Sml29623 12426495Sspeer npi_status_t 12436495Sspeer nxge_txdma_channel_disable( 12446495Sspeer nxge_t *nxge, 12456495Sspeer int channel) 12466495Sspeer { 12476495Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge); 12486495Sspeer npi_status_t rs; 12496495Sspeer tdmc_intr_dbg_t intr_dbg; 12506495Sspeer 12516495Sspeer /* 12526495Sspeer * Stop the dma channel and wait for the stop-done. 12536495Sspeer * If the stop-done bit is not present, then force 12546495Sspeer * an error so TXC will stop. 12556495Sspeer * All channels bound to this port need to be stopped 12566495Sspeer * and reset after injecting an interrupt error. 12576495Sspeer */ 12586495Sspeer rs = npi_txdma_channel_disable(handle, channel); 12596495Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL, 12606929Smisaki "==> nxge_txdma_channel_disable(%d) " 12616929Smisaki "rs 0x%x", channel, rs)); 12626495Sspeer if (rs != NPI_SUCCESS) { 12636495Sspeer /* Inject any error */ 12646495Sspeer intr_dbg.value = 0; 12656495Sspeer intr_dbg.bits.ldw.nack_pref = 1; 12666495Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL, 12676929Smisaki "==> nxge_txdma_hw_mode: " 12686929Smisaki "channel %d (stop failed 0x%x) " 12696929Smisaki "(inject err)", rs, channel)); 12706495Sspeer (void) npi_txdma_inj_int_error_set( 12716929Smisaki handle, channel, &intr_dbg); 12726495Sspeer rs = npi_txdma_channel_disable(handle, channel); 12736495Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL, 12746929Smisaki "==> nxge_txdma_hw_mode: " 12756929Smisaki "channel %d (stop again 0x%x) " 12766929Smisaki "(after inject err)", 12776929Smisaki rs, channel)); 12786495Sspeer } 12796495Sspeer 12806495Sspeer return (rs); 12816495Sspeer } 12826495Sspeer 12836495Sspeer /* 12846495Sspeer * nxge_txdma_hw_mode 12856495Sspeer * 12866495Sspeer * Toggle all TDCs on (enable) or off (disable). 12876495Sspeer * 12886495Sspeer * Arguments: 12896495Sspeer * nxgep 12906495Sspeer * enable Enable or disable a TDC. 12916495Sspeer * 12926495Sspeer * Notes: 12936495Sspeer * 12946495Sspeer * NPI/NXGE function calls: 12956495Sspeer * npi_txdma_channel_enable(TX_CS) 12966495Sspeer * npi_txdma_channel_disable(TX_CS) 12976495Sspeer * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 12986495Sspeer * 12996495Sspeer * Registers accessed: 13006495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 13016495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 13026495Sspeer * 13036495Sspeer * Context: 13046495Sspeer * Any domain 13056495Sspeer */ 13063859Sml29623 nxge_status_t 13073859Sml29623 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 13083859Sml29623 { 13096495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 13106495Sspeer 13116495Sspeer npi_handle_t handle; 13126495Sspeer nxge_status_t status; 13136495Sspeer npi_status_t rs; 13146495Sspeer int tdc; 13153859Sml29623 13163859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 13176929Smisaki "==> nxge_txdma_hw_mode: enable mode %d", enable)); 13183859Sml29623 13193859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 13203859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 13216929Smisaki "<== nxge_txdma_mode: not initialized")); 13223859Sml29623 return (NXGE_ERROR); 13233859Sml29623 } 13243859Sml29623 13256495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 13263859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 13276495Sspeer "<== nxge_txdma_hw_mode: NULL ring pointer(s)")); 13283859Sml29623 return (NXGE_ERROR); 13293859Sml29623 } 13303859Sml29623 13316495Sspeer /* Enable or disable all of the TDCs owned by us. */ 13323859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 13336495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 13346495Sspeer if ((1 << tdc) & set->owned.map) { 13356495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 13366495Sspeer if (ring) { 13376495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 13386495Sspeer "==> nxge_txdma_hw_mode: channel %d", tdc)); 13396495Sspeer if (enable) { 13406495Sspeer rs = npi_txdma_channel_enable 13416495Sspeer (handle, tdc); 13423859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 13436495Sspeer "==> nxge_txdma_hw_mode: " 13446495Sspeer "channel %d (enable) rs 0x%x", 13456495Sspeer tdc, rs)); 13466495Sspeer } else { 13476495Sspeer rs = nxge_txdma_channel_disable 13486495Sspeer (nxgep, tdc); 13493859Sml29623 } 13503859Sml29623 } 13513859Sml29623 } 13523859Sml29623 } 13533859Sml29623 13543859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 13553859Sml29623 13563859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 13576929Smisaki "<== nxge_txdma_hw_mode: status 0x%x", status)); 13583859Sml29623 13593859Sml29623 return (status); 13603859Sml29623 } 13613859Sml29623 13623859Sml29623 void 13633859Sml29623 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 13643859Sml29623 { 13653859Sml29623 npi_handle_t handle; 13663859Sml29623 13673859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 13686929Smisaki "==> nxge_txdma_enable_channel: channel %d", channel)); 13693859Sml29623 13703859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 13713859Sml29623 /* enable the transmit dma channels */ 13723859Sml29623 (void) npi_txdma_channel_enable(handle, channel); 13733859Sml29623 13743859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 13753859Sml29623 } 13763859Sml29623 13773859Sml29623 void 13783859Sml29623 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 13793859Sml29623 { 13803859Sml29623 npi_handle_t handle; 13813859Sml29623 13823859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 13836929Smisaki "==> nxge_txdma_disable_channel: channel %d", channel)); 13843859Sml29623 13853859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 13863859Sml29623 /* stop the transmit dma channels */ 13873859Sml29623 (void) npi_txdma_channel_disable(handle, channel); 13883859Sml29623 13893859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 13903859Sml29623 } 13913859Sml29623 13926495Sspeer /* 13936495Sspeer * nxge_txdma_stop_inj_err 13946495Sspeer * 13956495Sspeer * Stop a TDC. If at first we don't succeed, inject an error. 13966495Sspeer * 13976495Sspeer * Arguments: 13986495Sspeer * nxgep 13996495Sspeer * channel The channel to stop. 14006495Sspeer * 14016495Sspeer * Notes: 14026495Sspeer * 14036495Sspeer * NPI/NXGE function calls: 14046495Sspeer * npi_txdma_channel_disable() 14056495Sspeer * npi_txdma_inj_int_error_set() 14066495Sspeer * #if defined(NXGE_DEBUG) 14076495Sspeer * nxge_txdma_regs_dump_channels(nxgep); 14086495Sspeer * #endif 14096495Sspeer * 14106495Sspeer * Registers accessed: 14116495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 14126495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 14136495Sspeer * 14146495Sspeer * Context: 14156495Sspeer * Any domain 14166495Sspeer */ 14173859Sml29623 int 14183859Sml29623 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 14193859Sml29623 { 14203859Sml29623 npi_handle_t handle; 14213859Sml29623 tdmc_intr_dbg_t intr_dbg; 14223859Sml29623 int status; 14233859Sml29623 npi_status_t rs = NPI_SUCCESS; 14243859Sml29623 14253859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 14263859Sml29623 /* 14273859Sml29623 * Stop the dma channel waits for the stop done. 14283859Sml29623 * If the stop done bit is not set, then create 14293859Sml29623 * an error. 14303859Sml29623 */ 14313859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 14323859Sml29623 rs = npi_txdma_channel_disable(handle, channel); 14333859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 14343859Sml29623 if (status == NXGE_OK) { 14353859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 14366929Smisaki "<== nxge_txdma_stop_inj_err (channel %d): " 14376929Smisaki "stopped OK", channel)); 14383859Sml29623 return (status); 14393859Sml29623 } 14403859Sml29623 14413859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 14426929Smisaki "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 14436929Smisaki "injecting error", channel, rs)); 14443859Sml29623 /* Inject any error */ 14453859Sml29623 intr_dbg.value = 0; 14463859Sml29623 intr_dbg.bits.ldw.nack_pref = 1; 14473859Sml29623 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 14483859Sml29623 14493859Sml29623 /* Stop done bit will be set as a result of error injection */ 14503859Sml29623 rs = npi_txdma_channel_disable(handle, channel); 14513859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 14523859Sml29623 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 14533859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 14546929Smisaki "<== nxge_txdma_stop_inj_err (channel %d): " 14556929Smisaki "stopped OK ", channel)); 14563859Sml29623 return (status); 14573859Sml29623 } 14583859Sml29623 14593859Sml29623 #if defined(NXGE_DEBUG) 14603859Sml29623 nxge_txdma_regs_dump_channels(nxgep); 14613859Sml29623 #endif 14623859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 14636929Smisaki "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 14646929Smisaki " (injected error but still not stopped)", channel, rs)); 14653859Sml29623 14663859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 14673859Sml29623 return (status); 14683859Sml29623 } 14693859Sml29623 14703859Sml29623 /*ARGSUSED*/ 14713859Sml29623 void 14723859Sml29623 nxge_fixup_txdma_rings(p_nxge_t nxgep) 14733859Sml29623 { 14746495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 14756495Sspeer int tdc; 14763859Sml29623 14773859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 14783859Sml29623 14796495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 14806495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 14816495Sspeer "<== nxge_fixup_txdma_rings: NULL ring pointer(s)")); 14823859Sml29623 return; 14833859Sml29623 } 14843859Sml29623 14856495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 14866495Sspeer if ((1 << tdc) & set->owned.map) { 14876495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 14886495Sspeer if (ring) { 14896495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 14906495Sspeer "==> nxge_fixup_txdma_rings: channel %d", 14916495Sspeer tdc)); 14926495Sspeer nxge_txdma_fixup_channel(nxgep, ring, tdc); 14936495Sspeer } 14946495Sspeer } 14953859Sml29623 } 14963859Sml29623 14973859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 14983859Sml29623 } 14993859Sml29623 15003859Sml29623 /*ARGSUSED*/ 15013859Sml29623 void 15023859Sml29623 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 15033859Sml29623 { 15043859Sml29623 p_tx_ring_t ring_p; 15053859Sml29623 15063859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 15073859Sml29623 ring_p = nxge_txdma_get_ring(nxgep, channel); 15083859Sml29623 if (ring_p == NULL) { 15093859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 15103859Sml29623 return; 15113859Sml29623 } 15123859Sml29623 15133859Sml29623 if (ring_p->tdc != channel) { 15143859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15156929Smisaki "<== nxge_txdma_fix_channel: channel not matched " 15166929Smisaki "ring tdc %d passed channel", 15176929Smisaki ring_p->tdc, channel)); 15183859Sml29623 return; 15193859Sml29623 } 15203859Sml29623 15213859Sml29623 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 15223859Sml29623 15233859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 15243859Sml29623 } 15253859Sml29623 15263859Sml29623 /*ARGSUSED*/ 15273859Sml29623 void 15283859Sml29623 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 15293859Sml29623 { 15303859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 15313859Sml29623 15323859Sml29623 if (ring_p == NULL) { 15333859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15346929Smisaki "<== nxge_txdma_fixup_channel: NULL ring pointer")); 15353859Sml29623 return; 15363859Sml29623 } 15373859Sml29623 15383859Sml29623 if (ring_p->tdc != channel) { 15393859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15406929Smisaki "<== nxge_txdma_fixup_channel: channel not matched " 15416929Smisaki "ring tdc %d passed channel", 15426929Smisaki ring_p->tdc, channel)); 15433859Sml29623 return; 15443859Sml29623 } 15453859Sml29623 15463859Sml29623 MUTEX_ENTER(&ring_p->lock); 15473859Sml29623 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 15483859Sml29623 ring_p->rd_index = 0; 15493859Sml29623 ring_p->wr_index = 0; 15503859Sml29623 ring_p->ring_head.value = 0; 15513859Sml29623 ring_p->ring_kick_tail.value = 0; 15523859Sml29623 ring_p->descs_pending = 0; 15533859Sml29623 MUTEX_EXIT(&ring_p->lock); 15543859Sml29623 15553859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 15563859Sml29623 } 15573859Sml29623 15583859Sml29623 /*ARGSUSED*/ 15593859Sml29623 void 15603859Sml29623 nxge_txdma_hw_kick(p_nxge_t nxgep) 15613859Sml29623 { 15626495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 15636495Sspeer int tdc; 15643859Sml29623 15653859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 15663859Sml29623 15676495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 15683859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15696495Sspeer "<== nxge_txdma_hw_kick: NULL ring pointer(s)")); 15703859Sml29623 return; 15713859Sml29623 } 15723859Sml29623 15736495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 15746495Sspeer if ((1 << tdc) & set->owned.map) { 15756495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 15766495Sspeer if (ring) { 15776495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 15786495Sspeer "==> nxge_txdma_hw_kick: channel %d", tdc)); 15796495Sspeer nxge_txdma_hw_kick_channel(nxgep, ring, tdc); 15806495Sspeer } 15816495Sspeer } 15823859Sml29623 } 15833859Sml29623 15843859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 15853859Sml29623 } 15863859Sml29623 15873859Sml29623 /*ARGSUSED*/ 15883859Sml29623 void 15893859Sml29623 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 15903859Sml29623 { 15913859Sml29623 p_tx_ring_t ring_p; 15923859Sml29623 15933859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 15943859Sml29623 15953859Sml29623 ring_p = nxge_txdma_get_ring(nxgep, channel); 15963859Sml29623 if (ring_p == NULL) { 15973859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15986929Smisaki " nxge_txdma_kick_channel")); 15993859Sml29623 return; 16003859Sml29623 } 16013859Sml29623 16023859Sml29623 if (ring_p->tdc != channel) { 16033859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 16046929Smisaki "<== nxge_txdma_kick_channel: channel not matched " 16056929Smisaki "ring tdc %d passed channel", 16066929Smisaki ring_p->tdc, channel)); 16073859Sml29623 return; 16083859Sml29623 } 16093859Sml29623 16103859Sml29623 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 16113859Sml29623 16123859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 16133859Sml29623 } 16143859Sml29623 16153859Sml29623 /*ARGSUSED*/ 16163859Sml29623 void 16173859Sml29623 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 16183859Sml29623 { 16193859Sml29623 16203859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 16213859Sml29623 16223859Sml29623 if (ring_p == NULL) { 16233859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 16246929Smisaki "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 16253859Sml29623 return; 16263859Sml29623 } 16273859Sml29623 16283859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 16293859Sml29623 } 16303859Sml29623 16316495Sspeer /* 16326495Sspeer * nxge_check_tx_hang 16336495Sspeer * 16346495Sspeer * Check the state of all TDCs belonging to nxgep. 16356495Sspeer * 16366495Sspeer * Arguments: 16376495Sspeer * nxgep 16386495Sspeer * 16396495Sspeer * Notes: 16406495Sspeer * Called by nxge_hw.c:nxge_check_hw_state(). 16416495Sspeer * 16426495Sspeer * NPI/NXGE function calls: 16436495Sspeer * 16446495Sspeer * Registers accessed: 16456495Sspeer * 16466495Sspeer * Context: 16476495Sspeer * Any domain 16486495Sspeer */ 16493859Sml29623 /*ARGSUSED*/ 16503859Sml29623 void 16513859Sml29623 nxge_check_tx_hang(p_nxge_t nxgep) 16523859Sml29623 { 16533859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 16543859Sml29623 16556713Sspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 16566713Sspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 16576713Sspeer goto nxge_check_tx_hang_exit; 16586713Sspeer } 16596713Sspeer 16603859Sml29623 /* 16613859Sml29623 * Needs inputs from hardware for regs: 16623859Sml29623 * head index had not moved since last timeout. 16633859Sml29623 * packets not transmitted or stuffed registers. 16643859Sml29623 */ 16653859Sml29623 if (nxge_txdma_hung(nxgep)) { 16663859Sml29623 nxge_fixup_hung_txdma_rings(nxgep); 16673859Sml29623 } 16686713Sspeer 16696713Sspeer nxge_check_tx_hang_exit: 16703859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 16713859Sml29623 } 16723859Sml29623 16736495Sspeer /* 16746495Sspeer * nxge_txdma_hung 16756495Sspeer * 16766495Sspeer * Reset a TDC. 16776495Sspeer * 16786495Sspeer * Arguments: 16796495Sspeer * nxgep 16806495Sspeer * channel The channel to reset. 16816495Sspeer * reg_data The current TX_CS. 16826495Sspeer * 16836495Sspeer * Notes: 16846495Sspeer * Called by nxge_check_tx_hang() 16856495Sspeer * 16866495Sspeer * NPI/NXGE function calls: 16876495Sspeer * nxge_txdma_channel_hung() 16886495Sspeer * 16896495Sspeer * Registers accessed: 16906495Sspeer * 16916495Sspeer * Context: 16926495Sspeer * Any domain 16936495Sspeer */ 16943859Sml29623 int 16953859Sml29623 nxge_txdma_hung(p_nxge_t nxgep) 16963859Sml29623 { 16977812SMichael.Speer@Sun.COM nxge_grp_set_t *set = &nxgep->tx_set; 16987812SMichael.Speer@Sun.COM int tdc; 16997812SMichael.Speer@Sun.COM boolean_t shared; 17003859Sml29623 17013859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 17026495Sspeer 17036495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 17043859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 17056495Sspeer "<== nxge_txdma_hung: NULL ring pointer(s)")); 17063859Sml29623 return (B_FALSE); 17073859Sml29623 } 17083859Sml29623 17096495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 17107812SMichael.Speer@Sun.COM /* 17117812SMichael.Speer@Sun.COM * Grab the shared state of the TDC. 17127812SMichael.Speer@Sun.COM */ 17137812SMichael.Speer@Sun.COM if (isLDOMservice(nxgep)) { 17147812SMichael.Speer@Sun.COM nxge_hio_data_t *nhd = 17157812SMichael.Speer@Sun.COM (nxge_hio_data_t *)nxgep->nxge_hw_p->hio; 17167812SMichael.Speer@Sun.COM 17177812SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 17187812SMichael.Speer@Sun.COM shared = nxgep->tdc_is_shared[tdc]; 17197812SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 17207812SMichael.Speer@Sun.COM } else { 17217812SMichael.Speer@Sun.COM shared = B_FALSE; 17227812SMichael.Speer@Sun.COM } 17237812SMichael.Speer@Sun.COM 17247812SMichael.Speer@Sun.COM /* 17257812SMichael.Speer@Sun.COM * Now, process continue to process. 17267812SMichael.Speer@Sun.COM */ 17277812SMichael.Speer@Sun.COM if (((1 << tdc) & set->owned.map) && !shared) { 17286495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 17296495Sspeer if (ring) { 17306495Sspeer if (nxge_txdma_channel_hung(nxgep, ring, tdc)) { 17316495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 17326495Sspeer "==> nxge_txdma_hung: TDC %d hung", 17336495Sspeer tdc)); 17346495Sspeer return (B_TRUE); 17356495Sspeer } 17366495Sspeer } 17373859Sml29623 } 17383859Sml29623 } 17393859Sml29623 17403859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 17413859Sml29623 17423859Sml29623 return (B_FALSE); 17433859Sml29623 } 17443859Sml29623 17456495Sspeer /* 17466495Sspeer * nxge_txdma_channel_hung 17476495Sspeer * 17486495Sspeer * Reset a TDC. 17496495Sspeer * 17506495Sspeer * Arguments: 17516495Sspeer * nxgep 17526495Sspeer * ring <channel>'s ring. 17536495Sspeer * channel The channel to reset. 17546495Sspeer * 17556495Sspeer * Notes: 17566495Sspeer * Called by nxge_txdma.c:nxge_txdma_hung() 17576495Sspeer * 17586495Sspeer * NPI/NXGE function calls: 17596495Sspeer * npi_txdma_ring_head_get() 17606495Sspeer * 17616495Sspeer * Registers accessed: 17626495Sspeer * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 17636495Sspeer * 17646495Sspeer * Context: 17656495Sspeer * Any domain 17666495Sspeer */ 17673859Sml29623 int 17683859Sml29623 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 17693859Sml29623 { 17703859Sml29623 uint16_t head_index, tail_index; 17713859Sml29623 boolean_t head_wrap, tail_wrap; 17723859Sml29623 npi_handle_t handle; 17733859Sml29623 tx_ring_hdl_t tx_head; 17743859Sml29623 uint_t tx_rd_index; 17753859Sml29623 17763859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 17773859Sml29623 17783859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 17793859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 17806929Smisaki "==> nxge_txdma_channel_hung: channel %d", channel)); 17813859Sml29623 MUTEX_ENTER(&tx_ring_p->lock); 17823859Sml29623 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 17833859Sml29623 17843859Sml29623 tail_index = tx_ring_p->wr_index; 17853859Sml29623 tail_wrap = tx_ring_p->wr_index_wrap; 17863859Sml29623 tx_rd_index = tx_ring_p->rd_index; 17873859Sml29623 MUTEX_EXIT(&tx_ring_p->lock); 17883859Sml29623 17893859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 17906929Smisaki "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 17916929Smisaki "tail_index %d tail_wrap %d ", 17926929Smisaki channel, tx_rd_index, tail_index, tail_wrap)); 17933859Sml29623 /* 17943859Sml29623 * Read the hardware maintained transmit head 17953859Sml29623 * and wrap around bit. 17963859Sml29623 */ 17973859Sml29623 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 17983859Sml29623 head_index = tx_head.bits.ldw.head; 17993859Sml29623 head_wrap = tx_head.bits.ldw.wrap; 18003859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18016929Smisaki "==> nxge_txdma_channel_hung: " 18026929Smisaki "tx_rd_index %d tail %d tail_wrap %d " 18036929Smisaki "head %d wrap %d", 18046929Smisaki tx_rd_index, tail_index, tail_wrap, 18056929Smisaki head_index, head_wrap)); 18063859Sml29623 18073859Sml29623 if (TXDMA_RING_EMPTY(head_index, head_wrap, 18086929Smisaki tail_index, tail_wrap) && 18096929Smisaki (head_index == tx_rd_index)) { 18103859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18116929Smisaki "==> nxge_txdma_channel_hung: EMPTY")); 18123859Sml29623 return (B_FALSE); 18133859Sml29623 } 18143859Sml29623 18153859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18166929Smisaki "==> nxge_txdma_channel_hung: Checking if ring full")); 18173859Sml29623 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 18186929Smisaki tail_wrap)) { 18193859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18206929Smisaki "==> nxge_txdma_channel_hung: full")); 18213859Sml29623 return (B_TRUE); 18223859Sml29623 } 18233859Sml29623 18243859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 18253859Sml29623 18263859Sml29623 return (B_FALSE); 18273859Sml29623 } 18283859Sml29623 18296495Sspeer /* 18306495Sspeer * nxge_fixup_hung_txdma_rings 18316495Sspeer * 18326495Sspeer * Disable a TDC. 18336495Sspeer * 18346495Sspeer * Arguments: 18356495Sspeer * nxgep 18366495Sspeer * channel The channel to reset. 18376495Sspeer * reg_data The current TX_CS. 18386495Sspeer * 18396495Sspeer * Notes: 18406495Sspeer * Called by nxge_check_tx_hang() 18416495Sspeer * 18426495Sspeer * NPI/NXGE function calls: 18436495Sspeer * npi_txdma_ring_head_get() 18446495Sspeer * 18456495Sspeer * Registers accessed: 18466495Sspeer * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 18476495Sspeer * 18486495Sspeer * Context: 18496495Sspeer * Any domain 18506495Sspeer */ 18513859Sml29623 /*ARGSUSED*/ 18523859Sml29623 void 18533859Sml29623 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 18543859Sml29623 { 18556495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 18566495Sspeer int tdc; 18573859Sml29623 18583859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 18596495Sspeer 18606495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 18613859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18626495Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 18633859Sml29623 return; 18643859Sml29623 } 18653859Sml29623 18666495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 18676495Sspeer if ((1 << tdc) & set->owned.map) { 18686495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 18696495Sspeer if (ring) { 18706495Sspeer nxge_txdma_fixup_hung_channel(nxgep, ring, tdc); 18716495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 18726495Sspeer "==> nxge_fixup_hung_txdma_rings: TDC %d", 18736495Sspeer tdc)); 18746495Sspeer } 18756495Sspeer } 18763859Sml29623 } 18773859Sml29623 18783859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 18793859Sml29623 } 18803859Sml29623 18816495Sspeer /* 18826495Sspeer * nxge_txdma_fixup_hung_channel 18836495Sspeer * 18846495Sspeer * 'Fix' a hung TDC. 18856495Sspeer * 18866495Sspeer * Arguments: 18876495Sspeer * nxgep 18886495Sspeer * channel The channel to fix. 18896495Sspeer * 18906495Sspeer * Notes: 18916495Sspeer * Called by nxge_fixup_hung_txdma_rings() 18926495Sspeer * 18936495Sspeer * 1. Reclaim the TDC. 18946495Sspeer * 2. Disable the TDC. 18956495Sspeer * 18966495Sspeer * NPI/NXGE function calls: 18976495Sspeer * nxge_txdma_reclaim() 18986495Sspeer * npi_txdma_channel_disable(TX_CS) 18996495Sspeer * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 19006495Sspeer * 19016495Sspeer * Registers accessed: 19026495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 19036495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 19046495Sspeer * 19056495Sspeer * Context: 19066495Sspeer * Any domain 19076495Sspeer */ 19083859Sml29623 /*ARGSUSED*/ 19093859Sml29623 void 19103859Sml29623 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 19113859Sml29623 { 19123859Sml29623 p_tx_ring_t ring_p; 19133859Sml29623 19143859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 19153859Sml29623 ring_p = nxge_txdma_get_ring(nxgep, channel); 19163859Sml29623 if (ring_p == NULL) { 19173859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19186929Smisaki "<== nxge_txdma_fix_hung_channel")); 19193859Sml29623 return; 19203859Sml29623 } 19213859Sml29623 19223859Sml29623 if (ring_p->tdc != channel) { 19233859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19246929Smisaki "<== nxge_txdma_fix_hung_channel: channel not matched " 19256929Smisaki "ring tdc %d passed channel", 19266929Smisaki ring_p->tdc, channel)); 19273859Sml29623 return; 19283859Sml29623 } 19293859Sml29623 19303859Sml29623 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 19313859Sml29623 19323859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 19333859Sml29623 } 19343859Sml29623 19353859Sml29623 /*ARGSUSED*/ 19363859Sml29623 void 19373859Sml29623 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 19383859Sml29623 uint16_t channel) 19393859Sml29623 { 19403859Sml29623 npi_handle_t handle; 19413859Sml29623 tdmc_intr_dbg_t intr_dbg; 19423859Sml29623 int status = NXGE_OK; 19433859Sml29623 19443859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 19453859Sml29623 19463859Sml29623 if (ring_p == NULL) { 19473859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19486929Smisaki "<== nxge_txdma_fixup_channel: NULL ring pointer")); 19493859Sml29623 return; 19503859Sml29623 } 19513859Sml29623 19523859Sml29623 if (ring_p->tdc != channel) { 19533859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19546929Smisaki "<== nxge_txdma_fixup_hung_channel: channel " 19556929Smisaki "not matched " 19566929Smisaki "ring tdc %d passed channel", 19576929Smisaki ring_p->tdc, channel)); 19583859Sml29623 return; 19593859Sml29623 } 19603859Sml29623 19613859Sml29623 /* Reclaim descriptors */ 19623859Sml29623 MUTEX_ENTER(&ring_p->lock); 19633859Sml29623 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 19643859Sml29623 MUTEX_EXIT(&ring_p->lock); 19653859Sml29623 19663859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 19673859Sml29623 /* 19683859Sml29623 * Stop the dma channel waits for the stop done. 19693859Sml29623 * If the stop done bit is not set, then force 19703859Sml29623 * an error. 19713859Sml29623 */ 19723859Sml29623 status = npi_txdma_channel_disable(handle, channel); 19733859Sml29623 if (!(status & NPI_TXDMA_STOP_FAILED)) { 19743859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19756929Smisaki "<== nxge_txdma_fixup_hung_channel: stopped OK " 19766929Smisaki "ring tdc %d passed channel %d", 19776929Smisaki ring_p->tdc, channel)); 19783859Sml29623 return; 19793859Sml29623 } 19803859Sml29623 19813859Sml29623 /* Inject any error */ 19823859Sml29623 intr_dbg.value = 0; 19833859Sml29623 intr_dbg.bits.ldw.nack_pref = 1; 19843859Sml29623 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 19853859Sml29623 19863859Sml29623 /* Stop done bit will be set as a result of error injection */ 19873859Sml29623 status = npi_txdma_channel_disable(handle, channel); 19883859Sml29623 if (!(status & NPI_TXDMA_STOP_FAILED)) { 19893859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19906929Smisaki "<== nxge_txdma_fixup_hung_channel: stopped again" 19916929Smisaki "ring tdc %d passed channel", 19926929Smisaki ring_p->tdc, channel)); 19933859Sml29623 return; 19943859Sml29623 } 19953859Sml29623 19963859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19976929Smisaki "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 19986929Smisaki "ring tdc %d passed channel", 19996929Smisaki ring_p->tdc, channel)); 20003859Sml29623 20013859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 20023859Sml29623 } 20033859Sml29623 20043859Sml29623 /*ARGSUSED*/ 20053859Sml29623 void 20063859Sml29623 nxge_reclaim_rings(p_nxge_t nxgep) 20073859Sml29623 { 20086495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 20096495Sspeer int tdc; 20106495Sspeer 20116495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings")); 20126495Sspeer 20136495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 20143859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 20156495Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 20163859Sml29623 return; 20173859Sml29623 } 20183859Sml29623 20196495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 20206495Sspeer if ((1 << tdc) & set->owned.map) { 20216495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 20226495Sspeer if (ring) { 20236495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 20246495Sspeer "==> nxge_reclaim_rings: TDC %d", tdc)); 20256495Sspeer MUTEX_ENTER(&ring->lock); 20268275SEric Cheng (void) nxge_txdma_reclaim(nxgep, ring, 0); 20276495Sspeer MUTEX_EXIT(&ring->lock); 20286495Sspeer } 20296495Sspeer } 20303859Sml29623 } 20313859Sml29623 20323859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 20333859Sml29623 } 20343859Sml29623 20353859Sml29623 void 20363859Sml29623 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 20373859Sml29623 { 20386495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 20396495Sspeer npi_handle_t handle; 20406495Sspeer int tdc; 20416495Sspeer 20426495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels")); 20433859Sml29623 20443859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 20456495Sspeer 20466495Sspeer if (!isLDOMguest(nxgep)) { 20476495Sspeer (void) npi_txdma_dump_fzc_regs(handle); 20486495Sspeer 20496495Sspeer /* Dump TXC registers. */ 20506495Sspeer (void) npi_txc_dump_fzc_regs(handle); 20516495Sspeer (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 20523859Sml29623 } 20533859Sml29623 20546495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 20553859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 20566495Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 20573859Sml29623 return; 20583859Sml29623 } 20593859Sml29623 20606495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 20616495Sspeer if ((1 << tdc) & set->owned.map) { 20626495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 20636495Sspeer if (ring) { 20646495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 20656495Sspeer "==> nxge_txdma_regs_dump_channels: " 20666495Sspeer "TDC %d", tdc)); 20676495Sspeer (void) npi_txdma_dump_tdc_regs(handle, tdc); 20686495Sspeer 20696495Sspeer /* Dump TXC registers, if able to. */ 20706495Sspeer if (!isLDOMguest(nxgep)) { 20716495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 20726495Sspeer "==> nxge_txdma_regs_dump_channels:" 20736495Sspeer " FZC TDC %d", tdc)); 20746495Sspeer (void) npi_txc_dump_tdc_fzc_regs 20756495Sspeer (handle, tdc); 20766495Sspeer } 20776495Sspeer nxge_txdma_regs_dump(nxgep, tdc); 20786495Sspeer } 20796495Sspeer } 20803859Sml29623 } 20813859Sml29623 20823859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 20833859Sml29623 } 20843859Sml29623 20853859Sml29623 void 20863859Sml29623 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 20873859Sml29623 { 20883859Sml29623 npi_handle_t handle; 20893859Sml29623 tx_ring_hdl_t hdl; 20903859Sml29623 tx_ring_kick_t kick; 20913859Sml29623 tx_cs_t cs; 20923859Sml29623 txc_control_t control; 20933859Sml29623 uint32_t bitmap = 0; 20943859Sml29623 uint32_t burst = 0; 20953859Sml29623 uint32_t bytes = 0; 20963859Sml29623 dma_log_page_t cfg; 20973859Sml29623 20983859Sml29623 printf("\n\tfunc # %d tdc %d ", 20996929Smisaki nxgep->function_num, channel); 21003859Sml29623 cfg.page_num = 0; 21013859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 21023859Sml29623 (void) npi_txdma_log_page_get(handle, channel, &cfg); 21033859Sml29623 printf("\n\tlog page func %d valid page 0 %d", 21046929Smisaki cfg.func_num, cfg.valid); 21053859Sml29623 cfg.page_num = 1; 21063859Sml29623 (void) npi_txdma_log_page_get(handle, channel, &cfg); 21073859Sml29623 printf("\n\tlog page func %d valid page 1 %d", 21086929Smisaki cfg.func_num, cfg.valid); 21093859Sml29623 21103859Sml29623 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 21113859Sml29623 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 21123859Sml29623 printf("\n\thead value is 0x%0llx", 21136929Smisaki (long long)hdl.value); 21143859Sml29623 printf("\n\thead index %d", hdl.bits.ldw.head); 21153859Sml29623 printf("\n\tkick value is 0x%0llx", 21166929Smisaki (long long)kick.value); 21173859Sml29623 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 21183859Sml29623 21193859Sml29623 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 21203859Sml29623 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 21213859Sml29623 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 21223859Sml29623 21233859Sml29623 (void) npi_txc_control(handle, OP_GET, &control); 21243859Sml29623 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 21253859Sml29623 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 21263859Sml29623 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 21273859Sml29623 21283859Sml29623 printf("\n\tTXC port control 0x%0llx", 21296929Smisaki (long long)control.value); 21303859Sml29623 printf("\n\tTXC port bitmap 0x%x", bitmap); 21313859Sml29623 printf("\n\tTXC max burst %d", burst); 21323859Sml29623 printf("\n\tTXC bytes xmt %d\n", bytes); 21333859Sml29623 21343859Sml29623 { 21353859Sml29623 ipp_status_t status; 21363859Sml29623 21373859Sml29623 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 21385125Sjoycey #if defined(__i386) 21395125Sjoycey printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value); 21405125Sjoycey #else 21413859Sml29623 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 21425125Sjoycey #endif 21433859Sml29623 } 21443859Sml29623 } 21453859Sml29623 21463859Sml29623 /* 21476495Sspeer * nxge_tdc_hvio_setup 21486495Sspeer * 21496495Sspeer * I'm not exactly sure what this code does. 21506495Sspeer * 21516495Sspeer * Arguments: 21526495Sspeer * nxgep 21536495Sspeer * channel The channel to map. 21546495Sspeer * 21556495Sspeer * Notes: 21566495Sspeer * 21576495Sspeer * NPI/NXGE function calls: 21586495Sspeer * na 21596495Sspeer * 21606495Sspeer * Context: 21616495Sspeer * Service domain? 21623859Sml29623 */ 21636495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 21646495Sspeer static void 21656495Sspeer nxge_tdc_hvio_setup( 21666495Sspeer nxge_t *nxgep, int channel) 21673859Sml29623 { 21686495Sspeer nxge_dma_common_t *data; 21696495Sspeer nxge_dma_common_t *control; 21706495Sspeer tx_ring_t *ring; 21716495Sspeer 21726495Sspeer ring = nxgep->tx_rings->rings[channel]; 21736495Sspeer data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 21746495Sspeer 21756495Sspeer ring->hv_set = B_FALSE; 21766495Sspeer 21776495Sspeer ring->hv_tx_buf_base_ioaddr_pp = 21786495Sspeer (uint64_t)data->orig_ioaddr_pp; 21796495Sspeer ring->hv_tx_buf_ioaddr_size = 21806495Sspeer (uint64_t)data->orig_alength; 21816495Sspeer 21826495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 21836929Smisaki "hv data buf base io $%p size 0x%llx (%d) buf base io $%p " 21846929Smisaki "orig vatopa base io $%p orig_len 0x%llx (%d)", 21856929Smisaki ring->hv_tx_buf_base_ioaddr_pp, 21866929Smisaki ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size, 21876929Smisaki data->ioaddr_pp, data->orig_vatopa, 21886929Smisaki data->orig_alength, data->orig_alength)); 21896495Sspeer 21906495Sspeer control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 21916495Sspeer 21926495Sspeer ring->hv_tx_cntl_base_ioaddr_pp = 21936495Sspeer (uint64_t)control->orig_ioaddr_pp; 21946495Sspeer ring->hv_tx_cntl_ioaddr_size = 21956495Sspeer (uint64_t)control->orig_alength; 21966495Sspeer 21976495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 21986929Smisaki "hv cntl base io $%p orig ioaddr_pp ($%p) " 21996929Smisaki "orig vatopa ($%p) size 0x%llx (%d 0x%x)", 22006929Smisaki ring->hv_tx_cntl_base_ioaddr_pp, 22016929Smisaki control->orig_ioaddr_pp, control->orig_vatopa, 22026929Smisaki ring->hv_tx_cntl_ioaddr_size, 22036929Smisaki control->orig_alength, control->orig_alength)); 22046495Sspeer } 22053859Sml29623 #endif 22063859Sml29623 22076495Sspeer static nxge_status_t 22086495Sspeer nxge_map_txdma(p_nxge_t nxgep, int channel) 22096495Sspeer { 22106495Sspeer nxge_dma_common_t **pData; 22116495Sspeer nxge_dma_common_t **pControl; 22126495Sspeer tx_ring_t **pRing, *ring; 22136495Sspeer tx_mbox_t **mailbox; 22146495Sspeer uint32_t num_chunks; 22156495Sspeer 22166495Sspeer nxge_status_t status = NXGE_OK; 22176495Sspeer 22186495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 22196495Sspeer 22206495Sspeer if (!nxgep->tx_cntl_pool_p->buf_allocated) { 22216495Sspeer if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) { 22226495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 22236495Sspeer "<== nxge_map_txdma: buf not allocated")); 22246495Sspeer return (NXGE_ERROR); 22256495Sspeer } 22263859Sml29623 } 22273859Sml29623 22286495Sspeer if (nxge_alloc_txb(nxgep, channel) != NXGE_OK) 22296495Sspeer return (NXGE_ERROR); 22306495Sspeer 22316495Sspeer num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 22326495Sspeer pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 22336495Sspeer pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 22346495Sspeer pRing = &nxgep->tx_rings->rings[channel]; 22356495Sspeer mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 22366495Sspeer 22376495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 22386929Smisaki "tx_rings $%p tx_desc_rings $%p", 22396929Smisaki nxgep->tx_rings, nxgep->tx_rings->rings)); 22403859Sml29623 22413859Sml29623 /* 22426495Sspeer * Map descriptors from the buffer pools for <channel>. 22436495Sspeer */ 22446495Sspeer 22456495Sspeer /* 22466495Sspeer * Set up and prepare buffer blocks, descriptors 22476495Sspeer * and mailbox. 22483859Sml29623 */ 22496495Sspeer status = nxge_map_txdma_channel(nxgep, channel, 22506495Sspeer pData, pRing, num_chunks, pControl, mailbox); 22516495Sspeer if (status != NXGE_OK) { 22526495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 22536929Smisaki "==> nxge_map_txdma(%d): nxge_map_txdma_channel() " 22546929Smisaki "returned 0x%x", 22556929Smisaki nxgep, channel, status)); 22566495Sspeer return (status); 22576495Sspeer } 22586495Sspeer 22596495Sspeer ring = *pRing; 22606495Sspeer 22616495Sspeer ring->index = (uint16_t)channel; 22626495Sspeer ring->tdc_stats = &nxgep->statsp->tdc_stats[channel]; 22636495Sspeer 22646495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 22656495Sspeer if (isLDOMguest(nxgep)) { 22666495Sspeer (void) nxge_tdc_lp_conf(nxgep, channel); 22676495Sspeer } else { 22686495Sspeer nxge_tdc_hvio_setup(nxgep, channel); 22696495Sspeer } 22703859Sml29623 #endif 22716495Sspeer 22726495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 22736495Sspeer "(status 0x%x channel %d)", status, channel)); 22743859Sml29623 22753859Sml29623 return (status); 22763859Sml29623 } 22773859Sml29623 22783859Sml29623 static nxge_status_t 22793859Sml29623 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 22803859Sml29623 p_nxge_dma_common_t *dma_buf_p, 22813859Sml29623 p_tx_ring_t *tx_desc_p, 22823859Sml29623 uint32_t num_chunks, 22833859Sml29623 p_nxge_dma_common_t *dma_cntl_p, 22843859Sml29623 p_tx_mbox_t *tx_mbox_p) 22853859Sml29623 { 22863859Sml29623 int status = NXGE_OK; 22873859Sml29623 22883859Sml29623 /* 22893859Sml29623 * Set up and prepare buffer blocks, descriptors 22903859Sml29623 * and mailbox. 22913859Sml29623 */ 22926495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 22936929Smisaki "==> nxge_map_txdma_channel (channel %d)", channel)); 22943859Sml29623 /* 22953859Sml29623 * Transmit buffer blocks 22963859Sml29623 */ 22973859Sml29623 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 22986929Smisaki dma_buf_p, tx_desc_p, num_chunks); 22993859Sml29623 if (status != NXGE_OK) { 23003859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 23016929Smisaki "==> nxge_map_txdma_channel (channel %d): " 23026929Smisaki "map buffer failed 0x%x", channel, status)); 23033859Sml29623 goto nxge_map_txdma_channel_exit; 23043859Sml29623 } 23053859Sml29623 23063859Sml29623 /* 23073859Sml29623 * Transmit block ring, and mailbox. 23083859Sml29623 */ 23093859Sml29623 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 23106929Smisaki tx_mbox_p); 23113859Sml29623 23123859Sml29623 goto nxge_map_txdma_channel_exit; 23133859Sml29623 23143859Sml29623 nxge_map_txdma_channel_fail1: 23156495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 23166929Smisaki "==> nxge_map_txdma_channel: unmap buf" 23176929Smisaki "(status 0x%x channel %d)", 23186929Smisaki status, channel)); 23193859Sml29623 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 23203859Sml29623 23213859Sml29623 nxge_map_txdma_channel_exit: 23226495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 23236929Smisaki "<== nxge_map_txdma_channel: " 23246929Smisaki "(status 0x%x channel %d)", 23256929Smisaki status, channel)); 23263859Sml29623 23273859Sml29623 return (status); 23283859Sml29623 } 23293859Sml29623 23303859Sml29623 /*ARGSUSED*/ 23313859Sml29623 static void 23326495Sspeer nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel) 23333859Sml29623 { 23346495Sspeer tx_ring_t *ring; 23356495Sspeer tx_mbox_t *mailbox; 23366495Sspeer 23373859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 23386929Smisaki "==> nxge_unmap_txdma_channel (channel %d)", channel)); 23393859Sml29623 /* 23403859Sml29623 * unmap tx block ring, and mailbox. 23413859Sml29623 */ 23426495Sspeer ring = nxgep->tx_rings->rings[channel]; 23436495Sspeer mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 23446495Sspeer 23456495Sspeer (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox); 23463859Sml29623 23473859Sml29623 /* unmap buffer blocks */ 23486495Sspeer (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring); 23496495Sspeer 23506495Sspeer nxge_free_txb(nxgep, channel); 23513859Sml29623 23523859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 23533859Sml29623 } 23543859Sml29623 23556495Sspeer /* 23566495Sspeer * nxge_map_txdma_channel_cfg_ring 23576495Sspeer * 23586495Sspeer * Map a TDC into our kernel space. 23596495Sspeer * This function allocates all of the per-channel data structures. 23606495Sspeer * 23616495Sspeer * Arguments: 23626495Sspeer * nxgep 23636495Sspeer * dma_channel The channel to map. 23646495Sspeer * dma_cntl_p 23656495Sspeer * tx_ring_p dma_channel's transmit ring 23666495Sspeer * tx_mbox_p dma_channel's mailbox 23676495Sspeer * 23686495Sspeer * Notes: 23696495Sspeer * 23706495Sspeer * NPI/NXGE function calls: 23716495Sspeer * nxge_setup_dma_common() 23726495Sspeer * 23736495Sspeer * Registers accessed: 23746495Sspeer * none. 23756495Sspeer * 23766495Sspeer * Context: 23776495Sspeer * Any domain 23786495Sspeer */ 23793859Sml29623 /*ARGSUSED*/ 23803859Sml29623 static void 23813859Sml29623 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 23823859Sml29623 p_nxge_dma_common_t *dma_cntl_p, 23833859Sml29623 p_tx_ring_t tx_ring_p, 23843859Sml29623 p_tx_mbox_t *tx_mbox_p) 23853859Sml29623 { 23863859Sml29623 p_tx_mbox_t mboxp; 23873859Sml29623 p_nxge_dma_common_t cntl_dmap; 23883859Sml29623 p_nxge_dma_common_t dmap; 23893859Sml29623 p_tx_rng_cfig_t tx_ring_cfig_p; 23903859Sml29623 p_tx_ring_kick_t tx_ring_kick_p; 23913859Sml29623 p_tx_cs_t tx_cs_p; 23923859Sml29623 p_tx_dma_ent_msk_t tx_evmask_p; 23933859Sml29623 p_txdma_mbh_t mboxh_p; 23943859Sml29623 p_txdma_mbl_t mboxl_p; 23953859Sml29623 uint64_t tx_desc_len; 23963859Sml29623 23973859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 23986929Smisaki "==> nxge_map_txdma_channel_cfg_ring")); 23993859Sml29623 24003859Sml29623 cntl_dmap = *dma_cntl_p; 24013859Sml29623 24023859Sml29623 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 24033859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 24046929Smisaki sizeof (tx_desc_t)); 24053859Sml29623 /* 24063859Sml29623 * Zero out transmit ring descriptors. 24073859Sml29623 */ 24083859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 24093859Sml29623 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 24103859Sml29623 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 24113859Sml29623 tx_cs_p = &(tx_ring_p->tx_cs); 24123859Sml29623 tx_evmask_p = &(tx_ring_p->tx_evmask); 24133859Sml29623 tx_ring_cfig_p->value = 0; 24143859Sml29623 tx_ring_kick_p->value = 0; 24153859Sml29623 tx_cs_p->value = 0; 24163859Sml29623 tx_evmask_p->value = 0; 24173859Sml29623 24183859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24196929Smisaki "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 24206929Smisaki dma_channel, 24216929Smisaki dmap->dma_cookie.dmac_laddress)); 24223859Sml29623 24233859Sml29623 tx_ring_cfig_p->value = 0; 24243859Sml29623 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 24253859Sml29623 tx_ring_cfig_p->value = 24266929Smisaki (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 24276929Smisaki (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 24283859Sml29623 24293859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24306929Smisaki "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 24316929Smisaki dma_channel, 24326929Smisaki tx_ring_cfig_p->value)); 24333859Sml29623 24343859Sml29623 tx_cs_p->bits.ldw.rst = 1; 24353859Sml29623 24363859Sml29623 /* Map in mailbox */ 24373859Sml29623 mboxp = (p_tx_mbox_t) 24386929Smisaki KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 24393859Sml29623 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 24403859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 24413859Sml29623 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 24423859Sml29623 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 24433859Sml29623 mboxh_p->value = mboxl_p->value = 0; 24443859Sml29623 24453859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24466929Smisaki "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 24476929Smisaki dmap->dma_cookie.dmac_laddress)); 24483859Sml29623 24493859Sml29623 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 24506929Smisaki TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 24513859Sml29623 24523859Sml29623 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 24536929Smisaki TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 24543859Sml29623 24553859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24566929Smisaki "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 24576929Smisaki dmap->dma_cookie.dmac_laddress)); 24583859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24596929Smisaki "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 24606929Smisaki "mbox $%p", 24616929Smisaki mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 24623859Sml29623 tx_ring_p->page_valid.value = 0; 24633859Sml29623 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 24643859Sml29623 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 24653859Sml29623 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 24663859Sml29623 tx_ring_p->page_hdl.value = 0; 24673859Sml29623 24683859Sml29623 tx_ring_p->page_valid.bits.ldw.page0 = 1; 24693859Sml29623 tx_ring_p->page_valid.bits.ldw.page1 = 1; 24703859Sml29623 24713859Sml29623 tx_ring_p->max_burst.value = 0; 24723859Sml29623 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 24733859Sml29623 24743859Sml29623 *tx_mbox_p = mboxp; 24753859Sml29623 24763859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24776929Smisaki "<== nxge_map_txdma_channel_cfg_ring")); 24783859Sml29623 } 24793859Sml29623 24803859Sml29623 /*ARGSUSED*/ 24813859Sml29623 static void 24823859Sml29623 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 24833859Sml29623 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 24843859Sml29623 { 24853859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24866929Smisaki "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 24876929Smisaki tx_ring_p->tdc)); 24883859Sml29623 24893859Sml29623 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 24903859Sml29623 24913859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24926929Smisaki "<== nxge_unmap_txdma_channel_cfg_ring")); 24933859Sml29623 } 24943859Sml29623 24956495Sspeer /* 24966495Sspeer * nxge_map_txdma_channel_buf_ring 24976495Sspeer * 24986495Sspeer * 24996495Sspeer * Arguments: 25006495Sspeer * nxgep 25016495Sspeer * channel The channel to map. 25026495Sspeer * dma_buf_p 25036495Sspeer * tx_desc_p channel's descriptor ring 25046495Sspeer * num_chunks 25056495Sspeer * 25066495Sspeer * Notes: 25076495Sspeer * 25086495Sspeer * NPI/NXGE function calls: 25096495Sspeer * nxge_setup_dma_common() 25106495Sspeer * 25116495Sspeer * Registers accessed: 25126495Sspeer * none. 25136495Sspeer * 25146495Sspeer * Context: 25156495Sspeer * Any domain 25166495Sspeer */ 25173859Sml29623 static nxge_status_t 25183859Sml29623 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 25193859Sml29623 p_nxge_dma_common_t *dma_buf_p, 25203859Sml29623 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 25213859Sml29623 { 25223859Sml29623 p_nxge_dma_common_t dma_bufp, tmp_bufp; 25233859Sml29623 p_nxge_dma_common_t dmap; 25243859Sml29623 nxge_os_dma_handle_t tx_buf_dma_handle; 25253859Sml29623 p_tx_ring_t tx_ring_p; 25263859Sml29623 p_tx_msg_t tx_msg_ring; 25273859Sml29623 nxge_status_t status = NXGE_OK; 25283859Sml29623 int ddi_status = DDI_SUCCESS; 25293859Sml29623 int i, j, index; 25303859Sml29623 uint32_t size, bsize; 25313859Sml29623 uint32_t nblocks, nmsgs; 25328275SEric Cheng char qname[TASKQ_NAMELEN]; 25333859Sml29623 25343859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25356929Smisaki "==> nxge_map_txdma_channel_buf_ring")); 25363859Sml29623 25373859Sml29623 dma_bufp = tmp_bufp = *dma_buf_p; 25383859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25393859Sml29623 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 25403859Sml29623 "chunks bufp $%p", 25416929Smisaki channel, num_chunks, dma_bufp)); 25423859Sml29623 25433859Sml29623 nmsgs = 0; 25443859Sml29623 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 25453859Sml29623 nmsgs += tmp_bufp->nblocks; 25463859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25476929Smisaki "==> nxge_map_txdma_channel_buf_ring: channel %d " 25486929Smisaki "bufp $%p nblocks %d nmsgs %d", 25496929Smisaki channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 25503859Sml29623 } 25513859Sml29623 if (!nmsgs) { 25523859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25536929Smisaki "<== nxge_map_txdma_channel_buf_ring: channel %d " 25546929Smisaki "no msg blocks", 25556929Smisaki channel)); 25563859Sml29623 status = NXGE_ERROR; 25573859Sml29623 goto nxge_map_txdma_channel_buf_ring_exit; 25583859Sml29623 } 25593859Sml29623 25603859Sml29623 tx_ring_p = (p_tx_ring_t) 25616929Smisaki KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 25623859Sml29623 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 25636929Smisaki (void *)nxgep->interrupt_cookie); 25643952Sml29623 25656713Sspeer (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE); 25666886Sspeer tx_ring_p->tx_ring_busy = B_FALSE; 25673952Sml29623 tx_ring_p->nxgep = nxgep; 25688275SEric Cheng tx_ring_p->tx_ring_handle = (mac_ring_handle_t)NULL; 25698275SEric Cheng (void) snprintf(qname, TASKQ_NAMELEN, "tx_%d_%d", 25708275SEric Cheng nxgep->instance, channel); 25718275SEric Cheng tx_ring_p->taskq = ddi_taskq_create(nxgep->dip, qname, 1, 25728275SEric Cheng TASKQ_DEFAULTPRI, 0); 25738275SEric Cheng if (tx_ring_p->taskq == NULL) { 25748275SEric Cheng goto nxge_map_txdma_channel_buf_ring_fail1; 25758275SEric Cheng } 25768275SEric Cheng 25773859Sml29623 /* 25783859Sml29623 * Allocate transmit message rings and handles for packets 25793859Sml29623 * not to be copied to premapped buffers. 25803859Sml29623 */ 25813859Sml29623 size = nmsgs * sizeof (tx_msg_t); 25823859Sml29623 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 25833859Sml29623 for (i = 0; i < nmsgs; i++) { 25843859Sml29623 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 25856929Smisaki DDI_DMA_DONTWAIT, 0, 25866929Smisaki &tx_msg_ring[i].dma_handle); 25873859Sml29623 if (ddi_status != DDI_SUCCESS) { 25883859Sml29623 status |= NXGE_DDI_FAILED; 25893859Sml29623 break; 25903859Sml29623 } 25913859Sml29623 } 25923859Sml29623 if (i < nmsgs) { 25934185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 25944185Sspeer "Allocate handles failed.")); 25953859Sml29623 goto nxge_map_txdma_channel_buf_ring_fail1; 25963859Sml29623 } 25973859Sml29623 25983859Sml29623 tx_ring_p->tdc = channel; 25993859Sml29623 tx_ring_p->tx_msg_ring = tx_msg_ring; 26003859Sml29623 tx_ring_p->tx_ring_size = nmsgs; 26013859Sml29623 tx_ring_p->num_chunks = num_chunks; 26023859Sml29623 if (!nxge_tx_intr_thres) { 26033859Sml29623 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 26043859Sml29623 } 26053859Sml29623 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 26063859Sml29623 tx_ring_p->rd_index = 0; 26073859Sml29623 tx_ring_p->wr_index = 0; 26083859Sml29623 tx_ring_p->ring_head.value = 0; 26093859Sml29623 tx_ring_p->ring_kick_tail.value = 0; 26103859Sml29623 tx_ring_p->descs_pending = 0; 26113859Sml29623 26123859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26136929Smisaki "==> nxge_map_txdma_channel_buf_ring: channel %d " 26146929Smisaki "actual tx desc max %d nmsgs %d " 26156929Smisaki "(config nxge_tx_ring_size %d)", 26166929Smisaki channel, tx_ring_p->tx_ring_size, nmsgs, 26176929Smisaki nxge_tx_ring_size)); 26183859Sml29623 26193859Sml29623 /* 26203859Sml29623 * Map in buffers from the buffer pool. 26213859Sml29623 */ 26223859Sml29623 index = 0; 26233859Sml29623 bsize = dma_bufp->block_size; 26243859Sml29623 26253859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 26266929Smisaki "dma_bufp $%p tx_rng_p $%p " 26276929Smisaki "tx_msg_rng_p $%p bsize %d", 26286929Smisaki dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 26293859Sml29623 26303859Sml29623 tx_buf_dma_handle = dma_bufp->dma_handle; 26313859Sml29623 for (i = 0; i < num_chunks; i++, dma_bufp++) { 26323859Sml29623 bsize = dma_bufp->block_size; 26333859Sml29623 nblocks = dma_bufp->nblocks; 26343859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26356929Smisaki "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 26366929Smisaki "size %d dma_bufp $%p", 26376929Smisaki i, sizeof (nxge_dma_common_t), dma_bufp)); 26383859Sml29623 26393859Sml29623 for (j = 0; j < nblocks; j++) { 26403859Sml29623 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 26413859Sml29623 dmap = &tx_msg_ring[index++].buf_dma; 26423859Sml29623 #ifdef TX_MEM_DEBUG 26433859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26446929Smisaki "==> nxge_map_txdma_channel_buf_ring: j %d" 26456929Smisaki "dmap $%p", i, dmap)); 26463859Sml29623 #endif 26473859Sml29623 nxge_setup_dma_common(dmap, dma_bufp, 1, 26486929Smisaki bsize); 26493859Sml29623 } 26503859Sml29623 } 26513859Sml29623 26523859Sml29623 if (i < num_chunks) { 26534185Sspeer status = NXGE_ERROR; 26543859Sml29623 goto nxge_map_txdma_channel_buf_ring_fail1; 26553859Sml29623 } 26563859Sml29623 26573859Sml29623 *tx_desc_p = tx_ring_p; 26583859Sml29623 26593859Sml29623 goto nxge_map_txdma_channel_buf_ring_exit; 26603859Sml29623 26613859Sml29623 nxge_map_txdma_channel_buf_ring_fail1: 26628275SEric Cheng if (tx_ring_p->taskq) { 26638275SEric Cheng ddi_taskq_destroy(tx_ring_p->taskq); 26648275SEric Cheng tx_ring_p->taskq = NULL; 26653952Sml29623 } 26663952Sml29623 26673859Sml29623 index--; 26683859Sml29623 for (; index >= 0; index--) { 26694185Sspeer if (tx_msg_ring[index].dma_handle != NULL) { 26704185Sspeer ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 26713859Sml29623 } 26723859Sml29623 } 26733859Sml29623 MUTEX_DESTROY(&tx_ring_p->lock); 26744185Sspeer KMEM_FREE(tx_msg_ring, size); 26753859Sml29623 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 26763859Sml29623 26774185Sspeer status = NXGE_ERROR; 26784185Sspeer 26793859Sml29623 nxge_map_txdma_channel_buf_ring_exit: 26803859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26816929Smisaki "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 26823859Sml29623 26833859Sml29623 return (status); 26843859Sml29623 } 26853859Sml29623 26863859Sml29623 /*ARGSUSED*/ 26873859Sml29623 static void 26883859Sml29623 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 26893859Sml29623 { 26903859Sml29623 p_tx_msg_t tx_msg_ring; 26913859Sml29623 p_tx_msg_t tx_msg_p; 26923859Sml29623 int i; 26933859Sml29623 26943859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26956929Smisaki "==> nxge_unmap_txdma_channel_buf_ring")); 26963859Sml29623 if (tx_ring_p == NULL) { 26973859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 26986929Smisaki "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 26993859Sml29623 return; 27003859Sml29623 } 27013859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 27026929Smisaki "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 27036929Smisaki tx_ring_p->tdc)); 27043859Sml29623 27053859Sml29623 tx_msg_ring = tx_ring_p->tx_msg_ring; 27066495Sspeer 27076495Sspeer /* 27086495Sspeer * Since the serialization thread, timer thread and 27096495Sspeer * interrupt thread can all call the transmit reclaim, 27106495Sspeer * the unmapping function needs to acquire the lock 27116495Sspeer * to free those buffers which were transmitted 27126495Sspeer * by the hardware already. 27136495Sspeer */ 27146495Sspeer MUTEX_ENTER(&tx_ring_p->lock); 27156495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 27166495Sspeer "==> nxge_unmap_txdma_channel_buf_ring (reclaim): " 27176495Sspeer "channel %d", 27186495Sspeer tx_ring_p->tdc)); 27196495Sspeer (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 27206495Sspeer 27213859Sml29623 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 27223859Sml29623 tx_msg_p = &tx_msg_ring[i]; 27233859Sml29623 if (tx_msg_p->tx_message != NULL) { 27243859Sml29623 freemsg(tx_msg_p->tx_message); 27253859Sml29623 tx_msg_p->tx_message = NULL; 27263859Sml29623 } 27273859Sml29623 } 27283859Sml29623 27293859Sml29623 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 27303859Sml29623 if (tx_msg_ring[i].dma_handle != NULL) { 27313859Sml29623 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 27323859Sml29623 } 27336495Sspeer tx_msg_ring[i].dma_handle = NULL; 27343859Sml29623 } 27353859Sml29623 27366495Sspeer MUTEX_EXIT(&tx_ring_p->lock); 27376495Sspeer 27388275SEric Cheng if (tx_ring_p->taskq) { 27398275SEric Cheng ddi_taskq_destroy(tx_ring_p->taskq); 27408275SEric Cheng tx_ring_p->taskq = NULL; 27413952Sml29623 } 27423952Sml29623 27433859Sml29623 MUTEX_DESTROY(&tx_ring_p->lock); 27443859Sml29623 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 27453859Sml29623 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 27463859Sml29623 27473859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 27486929Smisaki "<== nxge_unmap_txdma_channel_buf_ring")); 27493859Sml29623 } 27503859Sml29623 27513859Sml29623 static nxge_status_t 27526495Sspeer nxge_txdma_hw_start(p_nxge_t nxgep, int channel) 27533859Sml29623 { 27543859Sml29623 p_tx_rings_t tx_rings; 27553859Sml29623 p_tx_ring_t *tx_desc_rings; 27563859Sml29623 p_tx_mbox_areas_t tx_mbox_areas_p; 27573859Sml29623 p_tx_mbox_t *tx_mbox_p; 27583859Sml29623 nxge_status_t status = NXGE_OK; 27593859Sml29623 27603859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 27613859Sml29623 27623859Sml29623 tx_rings = nxgep->tx_rings; 27633859Sml29623 if (tx_rings == NULL) { 27643859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 27656929Smisaki "<== nxge_txdma_hw_start: NULL ring pointer")); 27663859Sml29623 return (NXGE_ERROR); 27673859Sml29623 } 27683859Sml29623 tx_desc_rings = tx_rings->rings; 27693859Sml29623 if (tx_desc_rings == NULL) { 27703859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 27716929Smisaki "<== nxge_txdma_hw_start: NULL ring pointers")); 27723859Sml29623 return (NXGE_ERROR); 27733859Sml29623 } 27743859Sml29623 27756495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 27766495Sspeer "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 27773859Sml29623 27783859Sml29623 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 27793859Sml29623 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 27803859Sml29623 27816495Sspeer status = nxge_txdma_start_channel(nxgep, channel, 27826495Sspeer (p_tx_ring_t)tx_desc_rings[channel], 27836495Sspeer (p_tx_mbox_t)tx_mbox_p[channel]); 27846495Sspeer if (status != NXGE_OK) { 27856495Sspeer goto nxge_txdma_hw_start_fail1; 27863859Sml29623 } 27873859Sml29623 27883859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 27896929Smisaki "tx_rings $%p rings $%p", 27906929Smisaki nxgep->tx_rings, nxgep->tx_rings->rings)); 27913859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 27926929Smisaki "tx_rings $%p tx_desc_rings $%p", 27936929Smisaki nxgep->tx_rings, tx_desc_rings)); 27943859Sml29623 27953859Sml29623 goto nxge_txdma_hw_start_exit; 27963859Sml29623 27973859Sml29623 nxge_txdma_hw_start_fail1: 27983859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 27996929Smisaki "==> nxge_txdma_hw_start: disable " 28006929Smisaki "(status 0x%x channel %d)", status, channel)); 28013859Sml29623 28023859Sml29623 nxge_txdma_hw_start_exit: 28033859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 28046929Smisaki "==> nxge_txdma_hw_start: (status 0x%x)", status)); 28053859Sml29623 28063859Sml29623 return (status); 28073859Sml29623 } 28083859Sml29623 28096495Sspeer /* 28106495Sspeer * nxge_txdma_start_channel 28116495Sspeer * 28126495Sspeer * Start a TDC. 28136495Sspeer * 28146495Sspeer * Arguments: 28156495Sspeer * nxgep 28166495Sspeer * channel The channel to start. 28176495Sspeer * tx_ring_p channel's transmit descriptor ring. 28186495Sspeer * tx_mbox_p channel' smailbox. 28196495Sspeer * 28206495Sspeer * Notes: 28216495Sspeer * 28226495Sspeer * NPI/NXGE function calls: 28236495Sspeer * nxge_reset_txdma_channel() 28246495Sspeer * nxge_init_txdma_channel_event_mask() 28256495Sspeer * nxge_enable_txdma_channel() 28266495Sspeer * 28276495Sspeer * Registers accessed: 28286495Sspeer * none directly (see functions above). 28296495Sspeer * 28306495Sspeer * Context: 28316495Sspeer * Any domain 28326495Sspeer */ 28333859Sml29623 static nxge_status_t 28343859Sml29623 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 28353859Sml29623 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 28363859Sml29623 28373859Sml29623 { 28383859Sml29623 nxge_status_t status = NXGE_OK; 28393859Sml29623 28403859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 28413859Sml29623 "==> nxge_txdma_start_channel (channel %d)", channel)); 28423859Sml29623 /* 28433859Sml29623 * TXDMA/TXC must be in stopped state. 28443859Sml29623 */ 28453859Sml29623 (void) nxge_txdma_stop_inj_err(nxgep, channel); 28463859Sml29623 28473859Sml29623 /* 28483859Sml29623 * Reset TXDMA channel 28493859Sml29623 */ 28503859Sml29623 tx_ring_p->tx_cs.value = 0; 28513859Sml29623 tx_ring_p->tx_cs.bits.ldw.rst = 1; 28523859Sml29623 status = nxge_reset_txdma_channel(nxgep, channel, 28533859Sml29623 tx_ring_p->tx_cs.value); 28543859Sml29623 if (status != NXGE_OK) { 28553859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 28563859Sml29623 "==> nxge_txdma_start_channel (channel %d)" 28573859Sml29623 " reset channel failed 0x%x", channel, status)); 28583859Sml29623 goto nxge_txdma_start_channel_exit; 28593859Sml29623 } 28603859Sml29623 28613859Sml29623 /* 28623859Sml29623 * Initialize the TXDMA channel specific FZC control 28633859Sml29623 * configurations. These FZC registers are pertaining 28643859Sml29623 * to each TX channel (i.e. logical pages). 28653859Sml29623 */ 28666495Sspeer if (!isLDOMguest(nxgep)) { 28676495Sspeer status = nxge_init_fzc_txdma_channel(nxgep, channel, 28686495Sspeer tx_ring_p, tx_mbox_p); 28696495Sspeer if (status != NXGE_OK) { 28706495Sspeer goto nxge_txdma_start_channel_exit; 28716495Sspeer } 28723859Sml29623 } 28733859Sml29623 28743859Sml29623 /* 28753859Sml29623 * Initialize the event masks. 28763859Sml29623 */ 28773859Sml29623 tx_ring_p->tx_evmask.value = 0; 28783859Sml29623 status = nxge_init_txdma_channel_event_mask(nxgep, 28796495Sspeer channel, &tx_ring_p->tx_evmask); 28803859Sml29623 if (status != NXGE_OK) { 28813859Sml29623 goto nxge_txdma_start_channel_exit; 28823859Sml29623 } 28833859Sml29623 28843859Sml29623 /* 28853859Sml29623 * Load TXDMA descriptors, buffers, mailbox, 28863859Sml29623 * initialise the DMA channels and 28873859Sml29623 * enable each DMA channel. 28883859Sml29623 */ 28893859Sml29623 status = nxge_enable_txdma_channel(nxgep, channel, 28903859Sml29623 tx_ring_p, tx_mbox_p); 28913859Sml29623 if (status != NXGE_OK) { 28923859Sml29623 goto nxge_txdma_start_channel_exit; 28933859Sml29623 } 28943859Sml29623 28953859Sml29623 nxge_txdma_start_channel_exit: 28963859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 28973859Sml29623 28983859Sml29623 return (status); 28993859Sml29623 } 29003859Sml29623 29016495Sspeer /* 29026495Sspeer * nxge_txdma_stop_channel 29036495Sspeer * 29046495Sspeer * Stop a TDC. 29056495Sspeer * 29066495Sspeer * Arguments: 29076495Sspeer * nxgep 29086495Sspeer * channel The channel to stop. 29096495Sspeer * tx_ring_p channel's transmit descriptor ring. 29106495Sspeer * tx_mbox_p channel' smailbox. 29116495Sspeer * 29126495Sspeer * Notes: 29136495Sspeer * 29146495Sspeer * NPI/NXGE function calls: 29156495Sspeer * nxge_txdma_stop_inj_err() 29166495Sspeer * nxge_reset_txdma_channel() 29176495Sspeer * nxge_init_txdma_channel_event_mask() 29186495Sspeer * nxge_init_txdma_channel_cntl_stat() 29196495Sspeer * nxge_disable_txdma_channel() 29206495Sspeer * 29216495Sspeer * Registers accessed: 29226495Sspeer * none directly (see functions above). 29236495Sspeer * 29246495Sspeer * Context: 29256495Sspeer * Any domain 29266495Sspeer */ 29273859Sml29623 /*ARGSUSED*/ 29283859Sml29623 static nxge_status_t 29296495Sspeer nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 29303859Sml29623 { 29316495Sspeer p_tx_ring_t tx_ring_p; 29326495Sspeer int status = NXGE_OK; 29333859Sml29623 29343859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 29356929Smisaki "==> nxge_txdma_stop_channel: channel %d", channel)); 29363859Sml29623 29373859Sml29623 /* 29383859Sml29623 * Stop (disable) TXDMA and TXC (if stop bit is set 29393859Sml29623 * and STOP_N_GO bit not set, the TXDMA reset state will 29403859Sml29623 * not be set if reset TXDMA. 29413859Sml29623 */ 29423859Sml29623 (void) nxge_txdma_stop_inj_err(nxgep, channel); 29433859Sml29623 29446495Sspeer tx_ring_p = nxgep->tx_rings->rings[channel]; 29456495Sspeer 29463859Sml29623 /* 29473859Sml29623 * Reset TXDMA channel 29483859Sml29623 */ 29493859Sml29623 tx_ring_p->tx_cs.value = 0; 29503859Sml29623 tx_ring_p->tx_cs.bits.ldw.rst = 1; 29513859Sml29623 status = nxge_reset_txdma_channel(nxgep, channel, 29526929Smisaki tx_ring_p->tx_cs.value); 29533859Sml29623 if (status != NXGE_OK) { 29543859Sml29623 goto nxge_txdma_stop_channel_exit; 29553859Sml29623 } 29563859Sml29623 29573859Sml29623 #ifdef HARDWARE_REQUIRED 29583859Sml29623 /* Set up the interrupt event masks. */ 29593859Sml29623 tx_ring_p->tx_evmask.value = 0; 29603859Sml29623 status = nxge_init_txdma_channel_event_mask(nxgep, 29616929Smisaki channel, &tx_ring_p->tx_evmask); 29623859Sml29623 if (status != NXGE_OK) { 29633859Sml29623 goto nxge_txdma_stop_channel_exit; 29643859Sml29623 } 29653859Sml29623 29663859Sml29623 /* Initialize the DMA control and status register */ 29673859Sml29623 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 29683859Sml29623 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 29696929Smisaki tx_ring_p->tx_cs.value); 29703859Sml29623 if (status != NXGE_OK) { 29713859Sml29623 goto nxge_txdma_stop_channel_exit; 29723859Sml29623 } 29733859Sml29623 29746495Sspeer tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 29756495Sspeer 29763859Sml29623 /* Disable channel */ 29773859Sml29623 status = nxge_disable_txdma_channel(nxgep, channel, 29786495Sspeer tx_ring_p, tx_mbox_p); 29793859Sml29623 if (status != NXGE_OK) { 29803859Sml29623 goto nxge_txdma_start_channel_exit; 29813859Sml29623 } 29823859Sml29623 29833859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 29846929Smisaki "==> nxge_txdma_stop_channel: event done")); 29853859Sml29623 29863859Sml29623 #endif 29873859Sml29623 29883859Sml29623 nxge_txdma_stop_channel_exit: 29893859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 29903859Sml29623 return (status); 29913859Sml29623 } 29923859Sml29623 29936495Sspeer /* 29946495Sspeer * nxge_txdma_get_ring 29956495Sspeer * 29966495Sspeer * Get the ring for a TDC. 29976495Sspeer * 29986495Sspeer * Arguments: 29996495Sspeer * nxgep 30006495Sspeer * channel 30016495Sspeer * 30026495Sspeer * Notes: 30036495Sspeer * 30046495Sspeer * NPI/NXGE function calls: 30056495Sspeer * 30066495Sspeer * Registers accessed: 30076495Sspeer * 30086495Sspeer * Context: 30096495Sspeer * Any domain 30106495Sspeer */ 30113859Sml29623 static p_tx_ring_t 30123859Sml29623 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 30133859Sml29623 { 30146495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 30156495Sspeer int tdc; 30163859Sml29623 30173859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 30183859Sml29623 30196495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 30203859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 30216495Sspeer "<== nxge_txdma_get_ring: NULL ring pointer(s)")); 30226495Sspeer goto return_null; 30233859Sml29623 } 30243859Sml29623 30256495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 30266495Sspeer if ((1 << tdc) & set->owned.map) { 30276495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 30286495Sspeer if (ring) { 30296495Sspeer if (channel == ring->tdc) { 30306495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 30316495Sspeer "<== nxge_txdma_get_ring: " 30326495Sspeer "tdc %d ring $%p", tdc, ring)); 30336495Sspeer return (ring); 30346495Sspeer } 30356495Sspeer } 30363859Sml29623 } 30373859Sml29623 } 30383859Sml29623 30396495Sspeer return_null: 30406495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: " 30416929Smisaki "ring not found")); 30426495Sspeer 30433859Sml29623 return (NULL); 30443859Sml29623 } 30453859Sml29623 30466495Sspeer /* 30476495Sspeer * nxge_txdma_get_mbox 30486495Sspeer * 30496495Sspeer * Get the mailbox for a TDC. 30506495Sspeer * 30516495Sspeer * Arguments: 30526495Sspeer * nxgep 30536495Sspeer * channel 30546495Sspeer * 30556495Sspeer * Notes: 30566495Sspeer * 30576495Sspeer * NPI/NXGE function calls: 30586495Sspeer * 30596495Sspeer * Registers accessed: 30606495Sspeer * 30616495Sspeer * Context: 30626495Sspeer * Any domain 30636495Sspeer */ 30643859Sml29623 static p_tx_mbox_t 30653859Sml29623 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 30663859Sml29623 { 30676495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 30686495Sspeer int tdc; 30693859Sml29623 30703859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 30713859Sml29623 30726495Sspeer if (nxgep->tx_mbox_areas_p == 0 || 30736495Sspeer nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) { 30746495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 30756495Sspeer "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)")); 30766495Sspeer goto return_null; 30773859Sml29623 } 30783859Sml29623 30796495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 30806495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 30816495Sspeer "<== nxge_txdma_get_mbox: NULL ring pointer(s)")); 30826495Sspeer goto return_null; 30833859Sml29623 } 30843859Sml29623 30856495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 30866495Sspeer if ((1 << tdc) & set->owned.map) { 30876495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 30886495Sspeer if (ring) { 30896495Sspeer if (channel == ring->tdc) { 30906495Sspeer tx_mbox_t *mailbox = nxgep-> 30916495Sspeer tx_mbox_areas_p-> 30926495Sspeer txmbox_areas_p[tdc]; 30936495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 30946495Sspeer "<== nxge_txdma_get_mbox: tdc %d " 30956495Sspeer "ring $%p", tdc, mailbox)); 30966495Sspeer return (mailbox); 30976495Sspeer } 30986495Sspeer } 30993859Sml29623 } 31003859Sml29623 } 31013859Sml29623 31026495Sspeer return_null: 31036495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: " 31046929Smisaki "mailbox not found")); 31056495Sspeer 31063859Sml29623 return (NULL); 31073859Sml29623 } 31083859Sml29623 31096495Sspeer /* 31106495Sspeer * nxge_tx_err_evnts 31116495Sspeer * 31126495Sspeer * Recover a TDC. 31136495Sspeer * 31146495Sspeer * Arguments: 31156495Sspeer * nxgep 31166495Sspeer * index The index to the TDC ring. 31176495Sspeer * ldvp Used to get the channel number ONLY. 31186495Sspeer * cs A copy of the bits from TX_CS. 31196495Sspeer * 31206495Sspeer * Notes: 31216495Sspeer * Calling tree: 31226495Sspeer * nxge_tx_intr() 31236495Sspeer * 31246495Sspeer * NPI/NXGE function calls: 31256495Sspeer * npi_txdma_ring_error_get() 31266495Sspeer * npi_txdma_inj_par_error_get() 31276495Sspeer * nxge_txdma_fatal_err_recover() 31286495Sspeer * 31296495Sspeer * Registers accessed: 31306495Sspeer * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High 31316495Sspeer * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low 31326495Sspeer * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 31336495Sspeer * 31346495Sspeer * Context: 31356495Sspeer * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR. 31366495Sspeer */ 31373859Sml29623 /*ARGSUSED*/ 31383859Sml29623 static nxge_status_t 31393859Sml29623 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 31403859Sml29623 { 31413859Sml29623 npi_handle_t handle; 31423859Sml29623 npi_status_t rs; 31433859Sml29623 uint8_t channel; 31443859Sml29623 p_tx_ring_t *tx_rings; 31453859Sml29623 p_tx_ring_t tx_ring_p; 31463859Sml29623 p_nxge_tx_ring_stats_t tdc_stats; 31473859Sml29623 boolean_t txchan_fatal = B_FALSE; 31483859Sml29623 nxge_status_t status = NXGE_OK; 31493859Sml29623 tdmc_inj_par_err_t par_err; 31503859Sml29623 uint32_t value; 31513859Sml29623 31526495Sspeer NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts")); 31533859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 31543859Sml29623 channel = ldvp->channel; 31553859Sml29623 31563859Sml29623 tx_rings = nxgep->tx_rings->rings; 31573859Sml29623 tx_ring_p = tx_rings[index]; 31583859Sml29623 tdc_stats = tx_ring_p->tdc_stats; 31593859Sml29623 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 31606929Smisaki (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 31616929Smisaki (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 31623859Sml29623 if ((rs = npi_txdma_ring_error_get(handle, channel, 31636929Smisaki &tdc_stats->errlog)) != NPI_SUCCESS) 31643859Sml29623 return (NXGE_ERROR | rs); 31653859Sml29623 } 31663859Sml29623 31673859Sml29623 if (cs.bits.ldw.mbox_err) { 31683859Sml29623 tdc_stats->mbox_err++; 31693859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31706929Smisaki NXGE_FM_EREPORT_TDMC_MBOX_ERR); 31713859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31726929Smisaki "==> nxge_tx_err_evnts(channel %d): " 31736929Smisaki "fatal error: mailbox", channel)); 31743859Sml29623 txchan_fatal = B_TRUE; 31753859Sml29623 } 31763859Sml29623 if (cs.bits.ldw.pkt_size_err) { 31773859Sml29623 tdc_stats->pkt_size_err++; 31783859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31796929Smisaki NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 31803859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31816929Smisaki "==> nxge_tx_err_evnts(channel %d): " 31826929Smisaki "fatal error: pkt_size_err", channel)); 31833859Sml29623 txchan_fatal = B_TRUE; 31843859Sml29623 } 31853859Sml29623 if (cs.bits.ldw.tx_ring_oflow) { 31863859Sml29623 tdc_stats->tx_ring_oflow++; 31873859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31886929Smisaki NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 31893859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31906929Smisaki "==> nxge_tx_err_evnts(channel %d): " 31916929Smisaki "fatal error: tx_ring_oflow", channel)); 31923859Sml29623 txchan_fatal = B_TRUE; 31933859Sml29623 } 31943859Sml29623 if (cs.bits.ldw.pref_buf_par_err) { 31953859Sml29623 tdc_stats->pre_buf_par_err++; 31963859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31976929Smisaki NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 31983859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31996929Smisaki "==> nxge_tx_err_evnts(channel %d): " 32006929Smisaki "fatal error: pre_buf_par_err", channel)); 32013859Sml29623 /* Clear error injection source for parity error */ 32023859Sml29623 (void) npi_txdma_inj_par_error_get(handle, &value); 32033859Sml29623 par_err.value = value; 32043859Sml29623 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 32053859Sml29623 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 32063859Sml29623 txchan_fatal = B_TRUE; 32073859Sml29623 } 32083859Sml29623 if (cs.bits.ldw.nack_pref) { 32093859Sml29623 tdc_stats->nack_pref++; 32103859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 32116929Smisaki NXGE_FM_EREPORT_TDMC_NACK_PREF); 32123859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32136929Smisaki "==> nxge_tx_err_evnts(channel %d): " 32146929Smisaki "fatal error: nack_pref", channel)); 32153859Sml29623 txchan_fatal = B_TRUE; 32163859Sml29623 } 32173859Sml29623 if (cs.bits.ldw.nack_pkt_rd) { 32183859Sml29623 tdc_stats->nack_pkt_rd++; 32193859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 32206929Smisaki NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 32213859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32226929Smisaki "==> nxge_tx_err_evnts(channel %d): " 32236929Smisaki "fatal error: nack_pkt_rd", channel)); 32243859Sml29623 txchan_fatal = B_TRUE; 32253859Sml29623 } 32263859Sml29623 if (cs.bits.ldw.conf_part_err) { 32273859Sml29623 tdc_stats->conf_part_err++; 32283859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 32296929Smisaki NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 32303859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32316929Smisaki "==> nxge_tx_err_evnts(channel %d): " 32326929Smisaki "fatal error: config_partition_err", channel)); 32333859Sml29623 txchan_fatal = B_TRUE; 32343859Sml29623 } 32353859Sml29623 if (cs.bits.ldw.pkt_prt_err) { 32363859Sml29623 tdc_stats->pkt_part_err++; 32373859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 32386929Smisaki NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 32393859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32406929Smisaki "==> nxge_tx_err_evnts(channel %d): " 32416929Smisaki "fatal error: pkt_prt_err", channel)); 32423859Sml29623 txchan_fatal = B_TRUE; 32433859Sml29623 } 32443859Sml29623 32453859Sml29623 /* Clear error injection source in case this is an injected error */ 32463859Sml29623 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 32473859Sml29623 32483859Sml29623 if (txchan_fatal) { 32493859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32506929Smisaki " nxge_tx_err_evnts: " 32516929Smisaki " fatal error on channel %d cs 0x%llx\n", 32526929Smisaki channel, cs.value)); 32533859Sml29623 status = nxge_txdma_fatal_err_recover(nxgep, channel, 32546929Smisaki tx_ring_p); 32553859Sml29623 if (status == NXGE_OK) { 32563859Sml29623 FM_SERVICE_RESTORED(nxgep); 32573859Sml29623 } 32583859Sml29623 } 32593859Sml29623 32606495Sspeer NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts")); 32613859Sml29623 32623859Sml29623 return (status); 32633859Sml29623 } 32643859Sml29623 32653859Sml29623 static nxge_status_t 32666495Sspeer nxge_txdma_fatal_err_recover( 32676495Sspeer p_nxge_t nxgep, 32686495Sspeer uint16_t channel, 32696495Sspeer p_tx_ring_t tx_ring_p) 32703859Sml29623 { 32713859Sml29623 npi_handle_t handle; 32723859Sml29623 npi_status_t rs = NPI_SUCCESS; 32733859Sml29623 p_tx_mbox_t tx_mbox_p; 32743859Sml29623 nxge_status_t status = NXGE_OK; 32753859Sml29623 32763859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 32773859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32786929Smisaki "Recovering from TxDMAChannel#%d error...", channel)); 32793859Sml29623 32803859Sml29623 /* 32813859Sml29623 * Stop the dma channel waits for the stop done. 32823859Sml29623 * If the stop done bit is not set, then create 32833859Sml29623 * an error. 32843859Sml29623 */ 32853859Sml29623 32863859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 32873859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 32883859Sml29623 MUTEX_ENTER(&tx_ring_p->lock); 32893859Sml29623 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 32903859Sml29623 if (rs != NPI_SUCCESS) { 32913859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32926929Smisaki "==> nxge_txdma_fatal_err_recover (channel %d): " 32936929Smisaki "stop failed ", channel)); 32943859Sml29623 goto fail; 32953859Sml29623 } 32963859Sml29623 32973859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 32983859Sml29623 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 32993859Sml29623 33003859Sml29623 /* 33013859Sml29623 * Reset TXDMA channel 33023859Sml29623 */ 33033859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 33043859Sml29623 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 33056929Smisaki NPI_SUCCESS) { 33063859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33076929Smisaki "==> nxge_txdma_fatal_err_recover (channel %d)" 33086929Smisaki " reset channel failed 0x%x", channel, rs)); 33093859Sml29623 goto fail; 33103859Sml29623 } 33113859Sml29623 33123859Sml29623 /* 33133859Sml29623 * Reset the tail (kick) register to 0. 33143859Sml29623 * (Hardware will not reset it. Tx overflow fatal 33153859Sml29623 * error if tail is not set to 0 after reset! 33163859Sml29623 */ 33173859Sml29623 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 33183859Sml29623 33193859Sml29623 /* Restart TXDMA channel */ 33203859Sml29623 33216495Sspeer if (!isLDOMguest(nxgep)) { 33226495Sspeer tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 33236495Sspeer 33246495Sspeer // XXX This is a problem in HIO! 33256495Sspeer /* 33266495Sspeer * Initialize the TXDMA channel specific FZC control 33276495Sspeer * configurations. These FZC registers are pertaining 33286495Sspeer * to each TX channel (i.e. logical pages). 33296495Sspeer */ 33306495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 33316495Sspeer status = nxge_init_fzc_txdma_channel(nxgep, channel, 33326495Sspeer tx_ring_p, tx_mbox_p); 33336495Sspeer if (status != NXGE_OK) 33346495Sspeer goto fail; 33356495Sspeer } 33363859Sml29623 33373859Sml29623 /* 33383859Sml29623 * Initialize the event masks. 33393859Sml29623 */ 33403859Sml29623 tx_ring_p->tx_evmask.value = 0; 33413859Sml29623 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 33426929Smisaki &tx_ring_p->tx_evmask); 33433859Sml29623 if (status != NXGE_OK) 33443859Sml29623 goto fail; 33453859Sml29623 33463859Sml29623 tx_ring_p->wr_index_wrap = B_FALSE; 33473859Sml29623 tx_ring_p->wr_index = 0; 33483859Sml29623 tx_ring_p->rd_index = 0; 33493859Sml29623 33503859Sml29623 /* 33513859Sml29623 * Load TXDMA descriptors, buffers, mailbox, 33523859Sml29623 * initialise the DMA channels and 33533859Sml29623 * enable each DMA channel. 33543859Sml29623 */ 33553859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 33563859Sml29623 status = nxge_enable_txdma_channel(nxgep, channel, 33576929Smisaki tx_ring_p, tx_mbox_p); 33583859Sml29623 MUTEX_EXIT(&tx_ring_p->lock); 33593859Sml29623 if (status != NXGE_OK) 33603859Sml29623 goto fail; 33613859Sml29623 33623859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33636929Smisaki "Recovery Successful, TxDMAChannel#%d Restored", 33646929Smisaki channel)); 33653859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 33663859Sml29623 33673859Sml29623 return (NXGE_OK); 33683859Sml29623 33693859Sml29623 fail: 33703859Sml29623 MUTEX_EXIT(&tx_ring_p->lock); 33717906SMichael.Speer@Sun.COM 33723859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 33736929Smisaki "nxge_txdma_fatal_err_recover (channel %d): " 33746929Smisaki "failed to recover this txdma channel", channel)); 33753859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 33763859Sml29623 33773859Sml29623 return (status); 33783859Sml29623 } 33793859Sml29623 33806495Sspeer /* 33816495Sspeer * nxge_tx_port_fatal_err_recover 33826495Sspeer * 33836495Sspeer * Attempt to recover from a fatal port error. 33846495Sspeer * 33856495Sspeer * Arguments: 33866495Sspeer * nxgep 33876495Sspeer * 33886495Sspeer * Notes: 33896495Sspeer * How would a guest do this? 33906495Sspeer * 33916495Sspeer * NPI/NXGE function calls: 33926495Sspeer * 33936495Sspeer * Registers accessed: 33946495Sspeer * 33956495Sspeer * Context: 33966495Sspeer * Service domain 33976495Sspeer */ 33983859Sml29623 nxge_status_t 33993859Sml29623 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 34003859Sml29623 { 34016495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 34026495Sspeer nxge_channel_t tdc; 34036495Sspeer 34046495Sspeer tx_ring_t *ring; 34056495Sspeer tx_mbox_t *mailbox; 34066495Sspeer 34073859Sml29623 npi_handle_t handle; 34086495Sspeer nxge_status_t status; 34096495Sspeer npi_status_t rs; 34103859Sml29623 34113859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 34123859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34136495Sspeer "Recovering from TxPort error...")); 34146495Sspeer 34156495Sspeer if (isLDOMguest(nxgep)) { 34166495Sspeer return (NXGE_OK); 34176495Sspeer } 34186495Sspeer 34196495Sspeer if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 34206495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 34216495Sspeer "<== nxge_tx_port_fatal_err_recover: not initialized")); 34226495Sspeer return (NXGE_ERROR); 34236495Sspeer } 34246495Sspeer 34256495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 34266495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 34276495Sspeer "<== nxge_tx_port_fatal_err_recover: " 34286495Sspeer "NULL ring pointer(s)")); 34296495Sspeer return (NXGE_ERROR); 34306495Sspeer } 34316495Sspeer 34326495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34336495Sspeer if ((1 << tdc) & set->owned.map) { 34346495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 34356495Sspeer if (ring) 34366495Sspeer MUTEX_ENTER(&ring->lock); 34376495Sspeer } 34386495Sspeer } 34393859Sml29623 34403859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 34416495Sspeer 34426495Sspeer /* 34436495Sspeer * Stop all the TDCs owned by us. 34446495Sspeer * (The shared TDCs will have been stopped by their owners.) 34456495Sspeer */ 34466495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34476495Sspeer if ((1 << tdc) & set->owned.map) { 34486495Sspeer ring = nxgep->tx_rings->rings[tdc]; 34496495Sspeer if (ring) { 34506495Sspeer rs = npi_txdma_channel_control 34516495Sspeer (handle, TXDMA_STOP, tdc); 34526495Sspeer if (rs != NPI_SUCCESS) { 34536495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34546495Sspeer "nxge_tx_port_fatal_err_recover " 34556495Sspeer "(channel %d): stop failed ", tdc)); 34566495Sspeer goto fail; 34576495Sspeer } 34586495Sspeer } 34593859Sml29623 } 34603859Sml29623 } 34613859Sml29623 34626495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs...")); 34636495Sspeer 34646495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34656495Sspeer if ((1 << tdc) & set->owned.map) { 34666495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 34677906SMichael.Speer@Sun.COM if (ring) { 34686495Sspeer (void) nxge_txdma_reclaim(nxgep, ring, 0); 34697906SMichael.Speer@Sun.COM } 34703859Sml29623 } 34713859Sml29623 } 34723859Sml29623 34733859Sml29623 /* 34746495Sspeer * Reset all the TDCs. 34753859Sml29623 */ 34766495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs...")); 34776495Sspeer 34786495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34796495Sspeer if ((1 << tdc) & set->owned.map) { 34806495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 34816495Sspeer if (ring) { 34826495Sspeer if ((rs = npi_txdma_channel_control 34836929Smisaki (handle, TXDMA_RESET, tdc)) 34846495Sspeer != NPI_SUCCESS) { 34856495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34866495Sspeer "nxge_tx_port_fatal_err_recover " 34876495Sspeer "(channel %d) reset channel " 34886495Sspeer "failed 0x%x", tdc, rs)); 34896495Sspeer goto fail; 34906495Sspeer } 34916495Sspeer } 34926495Sspeer /* 34936495Sspeer * Reset the tail (kick) register to 0. 34946495Sspeer * (Hardware will not reset it. Tx overflow fatal 34956495Sspeer * error if tail is not set to 0 after reset! 34966495Sspeer */ 34976495Sspeer TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0); 34983859Sml29623 } 34996495Sspeer } 35006495Sspeer 35016495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs...")); 35026495Sspeer 35036495Sspeer /* Restart all the TDCs */ 35046495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 35056495Sspeer if ((1 << tdc) & set->owned.map) { 35066495Sspeer ring = nxgep->tx_rings->rings[tdc]; 35076495Sspeer if (ring) { 35086495Sspeer mailbox = nxge_txdma_get_mbox(nxgep, tdc); 35096495Sspeer status = nxge_init_fzc_txdma_channel(nxgep, tdc, 35106495Sspeer ring, mailbox); 35116495Sspeer ring->tx_evmask.value = 0; 35126495Sspeer /* 35136495Sspeer * Initialize the event masks. 35146495Sspeer */ 35156495Sspeer status = nxge_init_txdma_channel_event_mask 35166495Sspeer (nxgep, tdc, &ring->tx_evmask); 35176495Sspeer 35186495Sspeer ring->wr_index_wrap = B_FALSE; 35196495Sspeer ring->wr_index = 0; 35206495Sspeer ring->rd_index = 0; 35216495Sspeer 35226495Sspeer if (status != NXGE_OK) 35236495Sspeer goto fail; 35246495Sspeer if (status != NXGE_OK) 35256495Sspeer goto fail; 35266495Sspeer } 35273859Sml29623 } 35286495Sspeer } 35296495Sspeer 35306495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs...")); 35316495Sspeer 35326495Sspeer /* Re-enable all the TDCs */ 35336495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 35346495Sspeer if ((1 << tdc) & set->owned.map) { 35356495Sspeer ring = nxgep->tx_rings->rings[tdc]; 35366495Sspeer if (ring) { 35376495Sspeer mailbox = nxge_txdma_get_mbox(nxgep, tdc); 35386495Sspeer status = nxge_enable_txdma_channel(nxgep, tdc, 35396495Sspeer ring, mailbox); 35406495Sspeer if (status != NXGE_OK) 35416495Sspeer goto fail; 35426495Sspeer } 35436495Sspeer } 35443859Sml29623 } 35453859Sml29623 35463859Sml29623 /* 35476495Sspeer * Unlock all the TDCs. 35483859Sml29623 */ 35496495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 35506495Sspeer if ((1 << tdc) & set->owned.map) { 35516495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 35526495Sspeer if (ring) 35536495Sspeer MUTEX_EXIT(&ring->lock); 35543859Sml29623 } 35553859Sml29623 } 35563859Sml29623 35576495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded")); 35583859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 35593859Sml29623 35603859Sml29623 return (NXGE_OK); 35613859Sml29623 35623859Sml29623 fail: 35636495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 35646495Sspeer if ((1 << tdc) & set->owned.map) { 35656495Sspeer ring = nxgep->tx_rings->rings[tdc]; 35666495Sspeer if (ring) 35676495Sspeer MUTEX_EXIT(&ring->lock); 35683859Sml29623 } 35693859Sml29623 } 35703859Sml29623 35716495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed")); 35726495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 35733859Sml29623 35743859Sml29623 return (status); 35753859Sml29623 } 35763859Sml29623 35776495Sspeer /* 35786495Sspeer * nxge_txdma_inject_err 35796495Sspeer * 35806495Sspeer * Inject an error into a TDC. 35816495Sspeer * 35826495Sspeer * Arguments: 35836495Sspeer * nxgep 35846495Sspeer * err_id The error to inject. 35856495Sspeer * chan The channel to inject into. 35866495Sspeer * 35876495Sspeer * Notes: 35886495Sspeer * This is called from nxge_main.c:nxge_err_inject() 35896495Sspeer * Has this ioctl ever been used? 35906495Sspeer * 35916495Sspeer * NPI/NXGE function calls: 35926495Sspeer * npi_txdma_inj_par_error_get() 35936495Sspeer * npi_txdma_inj_par_error_set() 35946495Sspeer * 35956495Sspeer * Registers accessed: 35966495Sspeer * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 35976495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 35986495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 35996495Sspeer * 36006495Sspeer * Context: 36016495Sspeer * Service domain 36026495Sspeer */ 36033859Sml29623 void 36043859Sml29623 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 36053859Sml29623 { 36063859Sml29623 tdmc_intr_dbg_t tdi; 36073859Sml29623 tdmc_inj_par_err_t par_err; 36083859Sml29623 uint32_t value; 36093859Sml29623 npi_handle_t handle; 36103859Sml29623 36113859Sml29623 switch (err_id) { 36123859Sml29623 36133859Sml29623 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 36143859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 36153859Sml29623 /* Clear error injection source for parity error */ 36163859Sml29623 (void) npi_txdma_inj_par_error_get(handle, &value); 36173859Sml29623 par_err.value = value; 36183859Sml29623 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 36193859Sml29623 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 36203859Sml29623 36213859Sml29623 par_err.bits.ldw.inject_parity_error = (1 << chan); 36223859Sml29623 (void) npi_txdma_inj_par_error_get(handle, &value); 36233859Sml29623 par_err.value = value; 36243859Sml29623 par_err.bits.ldw.inject_parity_error |= (1 << chan); 36253859Sml29623 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 36266929Smisaki (unsigned long long)par_err.value); 36273859Sml29623 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 36283859Sml29623 break; 36293859Sml29623 36303859Sml29623 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 36313859Sml29623 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 36323859Sml29623 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 36333859Sml29623 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 36343859Sml29623 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 36353859Sml29623 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 36363859Sml29623 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 36373859Sml29623 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 36386929Smisaki chan, &tdi.value); 36393859Sml29623 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 36403859Sml29623 tdi.bits.ldw.pref_buf_par_err = 1; 36413859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 36423859Sml29623 tdi.bits.ldw.mbox_err = 1; 36433859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 36443859Sml29623 tdi.bits.ldw.nack_pref = 1; 36453859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 36463859Sml29623 tdi.bits.ldw.nack_pkt_rd = 1; 36473859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 36483859Sml29623 tdi.bits.ldw.pkt_size_err = 1; 36493859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 36503859Sml29623 tdi.bits.ldw.tx_ring_oflow = 1; 36513859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 36523859Sml29623 tdi.bits.ldw.conf_part_err = 1; 36533859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 36543859Sml29623 tdi.bits.ldw.pkt_part_err = 1; 36555125Sjoycey #if defined(__i386) 36565125Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n", 36576929Smisaki tdi.value); 36585125Sjoycey #else 36593859Sml29623 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 36606929Smisaki tdi.value); 36615125Sjoycey #endif 36623859Sml29623 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 36636929Smisaki chan, tdi.value); 36643859Sml29623 36653859Sml29623 break; 36663859Sml29623 } 36673859Sml29623 } 3668