13859Sml29623 /* 23859Sml29623 * CDDL HEADER START 33859Sml29623 * 43859Sml29623 * The contents of this file are subject to the terms of the 53859Sml29623 * Common Development and Distribution License (the "License"). 63859Sml29623 * You may not use this file except in compliance with the License. 73859Sml29623 * 83859Sml29623 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 93859Sml29623 * or http://www.opensolaris.org/os/licensing. 103859Sml29623 * See the License for the specific language governing permissions 113859Sml29623 * and limitations under the License. 123859Sml29623 * 133859Sml29623 * When distributing Covered Code, include this CDDL HEADER in each 143859Sml29623 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 153859Sml29623 * If applicable, add the following below this CDDL HEADER, with the 163859Sml29623 * fields enclosed by brackets "[]" replaced with your own identifying 173859Sml29623 * information: Portions Copyright [yyyy] [name of copyright owner] 183859Sml29623 * 193859Sml29623 * CDDL HEADER END 203859Sml29623 */ 213859Sml29623 /* 226495Sspeer * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 233859Sml29623 * Use is subject to license terms. 243859Sml29623 */ 253859Sml29623 263859Sml29623 #include <sys/nxge/nxge_impl.h> 273859Sml29623 #include <sys/nxge/nxge_txdma.h> 286495Sspeer #include <sys/nxge/nxge_hio.h> 296495Sspeer #include <npi_tx_rd64.h> 306495Sspeer #include <npi_tx_wr64.h> 313859Sml29623 #include <sys/llc1.h> 323859Sml29623 333859Sml29623 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 343859Sml29623 uint32_t nxge_tx_minfree = 32; 353859Sml29623 uint32_t nxge_tx_intr_thres = 0; 363859Sml29623 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 373859Sml29623 uint32_t nxge_tx_tiny_pack = 1; 383859Sml29623 uint32_t nxge_tx_use_bcopy = 1; 393859Sml29623 403859Sml29623 extern uint32_t nxge_tx_ring_size; 413859Sml29623 extern uint32_t nxge_bcopy_thresh; 423859Sml29623 extern uint32_t nxge_dvma_thresh; 433859Sml29623 extern uint32_t nxge_dma_stream_thresh; 443859Sml29623 extern dma_method_t nxge_force_dma; 456611Sml29623 extern uint32_t nxge_cksum_offload; 463859Sml29623 473859Sml29623 /* Device register access attributes for PIO. */ 483859Sml29623 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 493859Sml29623 /* Device descriptor access attributes for DMA. */ 503859Sml29623 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 513859Sml29623 /* Device buffer access attributes for DMA. */ 523859Sml29623 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 533859Sml29623 extern ddi_dma_attr_t nxge_desc_dma_attr; 543859Sml29623 extern ddi_dma_attr_t nxge_tx_dma_attr; 553859Sml29623 563952Sml29623 extern int nxge_serial_tx(mblk_t *mp, void *arg); 573952Sml29623 58*7906SMichael.Speer@Sun.COM void nxge_txdma_freemsg_task(p_tx_ring_t tx_ring_p); 59*7906SMichael.Speer@Sun.COM 606495Sspeer static nxge_status_t nxge_map_txdma(p_nxge_t, int); 616495Sspeer 626495Sspeer static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int); 633859Sml29623 643859Sml29623 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 653859Sml29623 p_nxge_dma_common_t *, p_tx_ring_t *, 663859Sml29623 uint32_t, p_nxge_dma_common_t *, 673859Sml29623 p_tx_mbox_t *); 686495Sspeer static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t); 693859Sml29623 703859Sml29623 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 713859Sml29623 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 723859Sml29623 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 733859Sml29623 743859Sml29623 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 753859Sml29623 p_nxge_dma_common_t *, p_tx_ring_t, 763859Sml29623 p_tx_mbox_t *); 773859Sml29623 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 783859Sml29623 p_tx_ring_t, p_tx_mbox_t); 793859Sml29623 803859Sml29623 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 813859Sml29623 p_tx_ring_t, p_tx_mbox_t); 826495Sspeer static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t); 833859Sml29623 843859Sml29623 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 853859Sml29623 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 863859Sml29623 p_nxge_ldv_t, tx_cs_t); 873859Sml29623 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 883859Sml29623 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 893859Sml29623 uint16_t, p_tx_ring_t); 903859Sml29623 916495Sspeer static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, 926495Sspeer p_tx_ring_t ring_p, uint16_t channel); 936495Sspeer 943859Sml29623 nxge_status_t 953859Sml29623 nxge_init_txdma_channels(p_nxge_t nxgep) 963859Sml29623 { 976495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 986495Sspeer int i, count; 996495Sspeer 1006495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels")); 1016495Sspeer 1026495Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 1036495Sspeer if ((1 << i) & set->lg.map) { 1046495Sspeer int tdc; 1056495Sspeer nxge_grp_t *group = set->group[i]; 1066495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1076495Sspeer if ((1 << tdc) & group->map) { 1086495Sspeer if ((nxge_grp_dc_add(nxgep, 1097755SMisaki.Kataoka@Sun.COM group, VP_BOUND_TX, tdc))) 1106495Sspeer return (NXGE_ERROR); 1116495Sspeer } 1126495Sspeer } 1136495Sspeer } 1146495Sspeer if (++count == set->lg.count) 1156495Sspeer break; 1166495Sspeer } 1176495Sspeer 1186495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels")); 1196495Sspeer 1206495Sspeer return (NXGE_OK); 1216495Sspeer } 1226495Sspeer 1236495Sspeer nxge_status_t 1246495Sspeer nxge_init_txdma_channel( 1256495Sspeer p_nxge_t nxge, 1266495Sspeer int channel) 1276495Sspeer { 1286495Sspeer nxge_status_t status; 1296495Sspeer 1306495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel")); 1316495Sspeer 1326495Sspeer status = nxge_map_txdma(nxge, channel); 1333859Sml29623 if (status != NXGE_OK) { 1346495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 1356495Sspeer "<== nxge_init_txdma_channel: status 0x%x", status)); 1366495Sspeer (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 1373859Sml29623 return (status); 1383859Sml29623 } 1393859Sml29623 1406495Sspeer status = nxge_txdma_hw_start(nxge, channel); 1413859Sml29623 if (status != NXGE_OK) { 1426495Sspeer (void) nxge_unmap_txdma_channel(nxge, channel); 1436495Sspeer (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 1443859Sml29623 return (status); 1453859Sml29623 } 1463859Sml29623 1476495Sspeer if (!nxge->statsp->tdc_ksp[channel]) 1486495Sspeer nxge_setup_tdc_kstats(nxge, channel); 1496495Sspeer 1506495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel")); 1516495Sspeer 1526495Sspeer return (status); 1533859Sml29623 } 1543859Sml29623 1553859Sml29623 void 1563859Sml29623 nxge_uninit_txdma_channels(p_nxge_t nxgep) 1573859Sml29623 { 1586495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 1596495Sspeer int tdc; 1606495Sspeer 1616495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels")); 1626495Sspeer 1636495Sspeer if (set->owned.map == 0) { 1646495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1656495Sspeer "nxge_uninit_txdma_channels: no channels")); 1666495Sspeer return; 1676495Sspeer } 1686495Sspeer 1696495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1706495Sspeer if ((1 << tdc) & set->owned.map) { 1716495Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc); 1726495Sspeer } 1736495Sspeer } 1746495Sspeer 1756495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels")); 1766495Sspeer } 1776495Sspeer 1786495Sspeer void 1796495Sspeer nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel) 1806495Sspeer { 1816495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel")); 1826495Sspeer 1836495Sspeer if (nxgep->statsp->tdc_ksp[channel]) { 1846495Sspeer kstat_delete(nxgep->statsp->tdc_ksp[channel]); 1856495Sspeer nxgep->statsp->tdc_ksp[channel] = 0; 1866495Sspeer } 1876495Sspeer 1886495Sspeer (void) nxge_txdma_stop_channel(nxgep, channel); 1896495Sspeer nxge_unmap_txdma_channel(nxgep, channel); 1903859Sml29623 1913859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1926929Smisaki "<== nxge_uninit_txdma_channel")); 1933859Sml29623 } 1943859Sml29623 1953859Sml29623 void 1963859Sml29623 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 1973859Sml29623 uint32_t entries, uint32_t size) 1983859Sml29623 { 1993859Sml29623 size_t tsize; 2003859Sml29623 *dest_p = *src_p; 2013859Sml29623 tsize = size * entries; 2023859Sml29623 dest_p->alength = tsize; 2033859Sml29623 dest_p->nblocks = entries; 2043859Sml29623 dest_p->block_size = size; 2053859Sml29623 dest_p->offset += tsize; 2063859Sml29623 2073859Sml29623 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 2083859Sml29623 src_p->alength -= tsize; 2093859Sml29623 src_p->dma_cookie.dmac_laddress += tsize; 2103859Sml29623 src_p->dma_cookie.dmac_size -= tsize; 2113859Sml29623 } 2123859Sml29623 2136495Sspeer /* 2146495Sspeer * nxge_reset_txdma_channel 2156495Sspeer * 2166495Sspeer * Reset a TDC. 2176495Sspeer * 2186495Sspeer * Arguments: 2196495Sspeer * nxgep 2206495Sspeer * channel The channel to reset. 2216495Sspeer * reg_data The current TX_CS. 2226495Sspeer * 2236495Sspeer * Notes: 2246495Sspeer * 2256495Sspeer * NPI/NXGE function calls: 2266495Sspeer * npi_txdma_channel_reset() 2276495Sspeer * npi_txdma_channel_control() 2286495Sspeer * 2296495Sspeer * Registers accessed: 2306495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 2316495Sspeer * TX_RING_KICK DMC+0x40018 Transmit Ring Kick 2326495Sspeer * 2336495Sspeer * Context: 2346495Sspeer * Any domain 2356495Sspeer */ 2363859Sml29623 nxge_status_t 2373859Sml29623 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 2383859Sml29623 { 2393859Sml29623 npi_status_t rs = NPI_SUCCESS; 2403859Sml29623 nxge_status_t status = NXGE_OK; 2413859Sml29623 npi_handle_t handle; 2423859Sml29623 2433859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 2443859Sml29623 2453859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2463859Sml29623 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 2473859Sml29623 rs = npi_txdma_channel_reset(handle, channel); 2483859Sml29623 } else { 2493859Sml29623 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 2506929Smisaki channel); 2513859Sml29623 } 2523859Sml29623 2533859Sml29623 if (rs != NPI_SUCCESS) { 2543859Sml29623 status = NXGE_ERROR | rs; 2553859Sml29623 } 2563859Sml29623 2573859Sml29623 /* 2583859Sml29623 * Reset the tail (kick) register to 0. 2593859Sml29623 * (Hardware will not reset it. Tx overflow fatal 2603859Sml29623 * error if tail is not set to 0 after reset! 2613859Sml29623 */ 2623859Sml29623 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 2633859Sml29623 2643859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 2653859Sml29623 return (status); 2663859Sml29623 } 2673859Sml29623 2686495Sspeer /* 2696495Sspeer * nxge_init_txdma_channel_event_mask 2706495Sspeer * 2716495Sspeer * Enable interrupts for a set of events. 2726495Sspeer * 2736495Sspeer * Arguments: 2746495Sspeer * nxgep 2756495Sspeer * channel The channel to map. 2766495Sspeer * mask_p The events to enable. 2776495Sspeer * 2786495Sspeer * Notes: 2796495Sspeer * 2806495Sspeer * NPI/NXGE function calls: 2816495Sspeer * npi_txdma_event_mask() 2826495Sspeer * 2836495Sspeer * Registers accessed: 2846495Sspeer * TX_ENT_MSK DMC+0x40020 Transmit Event Mask 2856495Sspeer * 2866495Sspeer * Context: 2876495Sspeer * Any domain 2886495Sspeer */ 2893859Sml29623 nxge_status_t 2903859Sml29623 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 2913859Sml29623 p_tx_dma_ent_msk_t mask_p) 2923859Sml29623 { 2933859Sml29623 npi_handle_t handle; 2943859Sml29623 npi_status_t rs = NPI_SUCCESS; 2953859Sml29623 nxge_status_t status = NXGE_OK; 2963859Sml29623 2973859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2986929Smisaki "<== nxge_init_txdma_channel_event_mask")); 2993859Sml29623 3003859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3013859Sml29623 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 3023859Sml29623 if (rs != NPI_SUCCESS) { 3033859Sml29623 status = NXGE_ERROR | rs; 3043859Sml29623 } 3053859Sml29623 3063859Sml29623 return (status); 3073859Sml29623 } 3083859Sml29623 3096495Sspeer /* 3106495Sspeer * nxge_init_txdma_channel_cntl_stat 3116495Sspeer * 3126495Sspeer * Stop a TDC. If at first we don't succeed, inject an error. 3136495Sspeer * 3146495Sspeer * Arguments: 3156495Sspeer * nxgep 3166495Sspeer * channel The channel to stop. 3176495Sspeer * 3186495Sspeer * Notes: 3196495Sspeer * 3206495Sspeer * NPI/NXGE function calls: 3216495Sspeer * npi_txdma_control_status() 3226495Sspeer * 3236495Sspeer * Registers accessed: 3246495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 3256495Sspeer * 3266495Sspeer * Context: 3276495Sspeer * Any domain 3286495Sspeer */ 3293859Sml29623 nxge_status_t 3303859Sml29623 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 3313859Sml29623 uint64_t reg_data) 3323859Sml29623 { 3333859Sml29623 npi_handle_t handle; 3343859Sml29623 npi_status_t rs = NPI_SUCCESS; 3353859Sml29623 nxge_status_t status = NXGE_OK; 3363859Sml29623 3373859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 3386929Smisaki "<== nxge_init_txdma_channel_cntl_stat")); 3393859Sml29623 3403859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3413859Sml29623 rs = npi_txdma_control_status(handle, OP_SET, channel, 3426929Smisaki (p_tx_cs_t)®_data); 3433859Sml29623 3443859Sml29623 if (rs != NPI_SUCCESS) { 3453859Sml29623 status = NXGE_ERROR | rs; 3463859Sml29623 } 3473859Sml29623 3483859Sml29623 return (status); 3493859Sml29623 } 3503859Sml29623 3516495Sspeer /* 3526495Sspeer * nxge_enable_txdma_channel 3536495Sspeer * 3546495Sspeer * Enable a TDC. 3556495Sspeer * 3566495Sspeer * Arguments: 3576495Sspeer * nxgep 3586495Sspeer * channel The channel to enable. 3596495Sspeer * tx_desc_p channel's transmit descriptor ring. 3606495Sspeer * mbox_p channel's mailbox, 3616495Sspeer * 3626495Sspeer * Notes: 3636495Sspeer * 3646495Sspeer * NPI/NXGE function calls: 3656495Sspeer * npi_txdma_ring_config() 3666495Sspeer * npi_txdma_mbox_config() 3676495Sspeer * npi_txdma_channel_init_enable() 3686495Sspeer * 3696495Sspeer * Registers accessed: 3706495Sspeer * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration 3716495Sspeer * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High 3726495Sspeer * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low 3736495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 3746495Sspeer * 3756495Sspeer * Context: 3766495Sspeer * Any domain 3776495Sspeer */ 3783859Sml29623 nxge_status_t 3793859Sml29623 nxge_enable_txdma_channel(p_nxge_t nxgep, 3803859Sml29623 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 3813859Sml29623 { 3823859Sml29623 npi_handle_t handle; 3833859Sml29623 npi_status_t rs = NPI_SUCCESS; 3843859Sml29623 nxge_status_t status = NXGE_OK; 3853859Sml29623 3863859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 3873859Sml29623 3883859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3893859Sml29623 /* 3903859Sml29623 * Use configuration data composed at init time. 3913859Sml29623 * Write to hardware the transmit ring configurations. 3923859Sml29623 */ 3933859Sml29623 rs = npi_txdma_ring_config(handle, OP_SET, channel, 3946495Sspeer (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 3953859Sml29623 3963859Sml29623 if (rs != NPI_SUCCESS) { 3973859Sml29623 return (NXGE_ERROR | rs); 3983859Sml29623 } 3993859Sml29623 4006495Sspeer if (isLDOMguest(nxgep)) { 4016495Sspeer /* Add interrupt handler for this channel. */ 4026495Sspeer if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK) 4036495Sspeer return (NXGE_ERROR); 4046495Sspeer } 4056495Sspeer 4063859Sml29623 /* Write to hardware the mailbox */ 4073859Sml29623 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 4086929Smisaki (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 4093859Sml29623 4103859Sml29623 if (rs != NPI_SUCCESS) { 4113859Sml29623 return (NXGE_ERROR | rs); 4123859Sml29623 } 4133859Sml29623 4143859Sml29623 /* Start the DMA engine. */ 4153859Sml29623 rs = npi_txdma_channel_init_enable(handle, channel); 4163859Sml29623 4173859Sml29623 if (rs != NPI_SUCCESS) { 4183859Sml29623 return (NXGE_ERROR | rs); 4193859Sml29623 } 4203859Sml29623 4213859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 4223859Sml29623 4233859Sml29623 return (status); 4243859Sml29623 } 4253859Sml29623 4263859Sml29623 void 4273859Sml29623 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 4283859Sml29623 boolean_t l4_cksum, int pkt_len, uint8_t npads, 4296611Sml29623 p_tx_pkt_hdr_all_t pkthdrp, 4306611Sml29623 t_uscalar_t start_offset, 4316611Sml29623 t_uscalar_t stuff_offset) 4323859Sml29623 { 4333859Sml29623 p_tx_pkt_header_t hdrp; 4343859Sml29623 p_mblk_t nmp; 4353859Sml29623 uint64_t tmp; 4363859Sml29623 size_t mblk_len; 4373859Sml29623 size_t iph_len; 4383859Sml29623 size_t hdrs_size; 4393859Sml29623 uint8_t hdrs_buf[sizeof (struct ether_header) + 4406929Smisaki 64 + sizeof (uint32_t)]; 4415505Smisaki uint8_t *cursor; 4423859Sml29623 uint8_t *ip_buf; 4433859Sml29623 uint16_t eth_type; 4443859Sml29623 uint8_t ipproto; 4453859Sml29623 boolean_t is_vlan = B_FALSE; 4463859Sml29623 size_t eth_hdr_size; 4473859Sml29623 4483859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 4493859Sml29623 4503859Sml29623 /* 4513859Sml29623 * Caller should zero out the headers first. 4523859Sml29623 */ 4533859Sml29623 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 4543859Sml29623 4553859Sml29623 if (fill_len) { 4563859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 4576929Smisaki "==> nxge_fill_tx_hdr: pkt_len %d " 4586929Smisaki "npads %d", pkt_len, npads)); 4593859Sml29623 tmp = (uint64_t)pkt_len; 4603859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 4613859Sml29623 goto fill_tx_header_done; 4623859Sml29623 } 4633859Sml29623 4646611Sml29623 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT); 4653859Sml29623 4663859Sml29623 /* 4673859Sml29623 * mp is the original data packet (does not include the 4683859Sml29623 * Neptune transmit header). 4693859Sml29623 */ 4703859Sml29623 nmp = mp; 4713859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 4726929Smisaki "mp $%p b_rptr $%p len %d", 4736929Smisaki mp, nmp->b_rptr, MBLKL(nmp))); 4745505Smisaki /* copy ether_header from mblk to hdrs_buf */ 4755505Smisaki cursor = &hdrs_buf[0]; 4765505Smisaki tmp = sizeof (struct ether_vlan_header); 4775505Smisaki while ((nmp != NULL) && (tmp > 0)) { 4785505Smisaki size_t buflen; 4795505Smisaki mblk_len = MBLKL(nmp); 4805512Smisaki buflen = min((size_t)tmp, mblk_len); 4815505Smisaki bcopy(nmp->b_rptr, cursor, buflen); 4825505Smisaki cursor += buflen; 4835505Smisaki tmp -= buflen; 4845505Smisaki nmp = nmp->b_cont; 4855505Smisaki } 4865505Smisaki 4875505Smisaki nmp = mp; 4885505Smisaki mblk_len = MBLKL(nmp); 4893859Sml29623 ip_buf = NULL; 4903859Sml29623 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 4913859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 4926929Smisaki "ether type 0x%x", eth_type, hdrp->value)); 4933859Sml29623 4943859Sml29623 if (eth_type < ETHERMTU) { 4953859Sml29623 tmp = 1ull; 4963859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 4973859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 4986929Smisaki "value 0x%llx", hdrp->value)); 4993859Sml29623 if (*(hdrs_buf + sizeof (struct ether_header)) 5006929Smisaki == LLC_SNAP_SAP) { 5013859Sml29623 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 5026929Smisaki sizeof (struct ether_header) + 6))); 5033859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 5046929Smisaki "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 5056929Smisaki eth_type)); 5063859Sml29623 } else { 5073859Sml29623 goto fill_tx_header_done; 5083859Sml29623 } 5093859Sml29623 } else if (eth_type == VLAN_ETHERTYPE) { 5103859Sml29623 tmp = 1ull; 5113859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 5123859Sml29623 5133859Sml29623 eth_type = ntohs(((struct ether_vlan_header *) 5146929Smisaki hdrs_buf)->ether_type); 5153859Sml29623 is_vlan = B_TRUE; 5163859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 5176929Smisaki "value 0x%llx", hdrp->value)); 5183859Sml29623 } 5193859Sml29623 5203859Sml29623 if (!is_vlan) { 5213859Sml29623 eth_hdr_size = sizeof (struct ether_header); 5223859Sml29623 } else { 5233859Sml29623 eth_hdr_size = sizeof (struct ether_vlan_header); 5243859Sml29623 } 5253859Sml29623 5263859Sml29623 switch (eth_type) { 5273859Sml29623 case ETHERTYPE_IP: 5283859Sml29623 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 5293859Sml29623 ip_buf = nmp->b_rptr + eth_hdr_size; 5303859Sml29623 mblk_len -= eth_hdr_size; 5313859Sml29623 iph_len = ((*ip_buf) & 0x0f); 5323859Sml29623 if (mblk_len > (iph_len + sizeof (uint32_t))) { 5333859Sml29623 ip_buf = nmp->b_rptr; 5343859Sml29623 ip_buf += eth_hdr_size; 5353859Sml29623 } else { 5363859Sml29623 ip_buf = NULL; 5373859Sml29623 } 5383859Sml29623 5393859Sml29623 } 5403859Sml29623 if (ip_buf == NULL) { 5413859Sml29623 hdrs_size = 0; 5423859Sml29623 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 5433859Sml29623 while ((nmp) && (hdrs_size < 5446929Smisaki sizeof (hdrs_buf))) { 5453859Sml29623 mblk_len = (size_t)nmp->b_wptr - 5466929Smisaki (size_t)nmp->b_rptr; 5473859Sml29623 if (mblk_len >= 5486929Smisaki (sizeof (hdrs_buf) - hdrs_size)) 5493859Sml29623 mblk_len = sizeof (hdrs_buf) - 5506929Smisaki hdrs_size; 5513859Sml29623 bcopy(nmp->b_rptr, 5526929Smisaki &hdrs_buf[hdrs_size], mblk_len); 5533859Sml29623 hdrs_size += mblk_len; 5543859Sml29623 nmp = nmp->b_cont; 5553859Sml29623 } 5563859Sml29623 ip_buf = hdrs_buf; 5573859Sml29623 ip_buf += eth_hdr_size; 5583859Sml29623 iph_len = ((*ip_buf) & 0x0f); 5593859Sml29623 } 5603859Sml29623 5613859Sml29623 ipproto = ip_buf[9]; 5623859Sml29623 5633859Sml29623 tmp = (uint64_t)iph_len; 5643859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 5653859Sml29623 tmp = (uint64_t)(eth_hdr_size >> 1); 5663859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 5673859Sml29623 5683859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 5696929Smisaki " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 5706929Smisaki "tmp 0x%x", 5716929Smisaki iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 5726929Smisaki ipproto, tmp)); 5733859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 5746929Smisaki "value 0x%llx", hdrp->value)); 5753859Sml29623 5763859Sml29623 break; 5773859Sml29623 5783859Sml29623 case ETHERTYPE_IPV6: 5793859Sml29623 hdrs_size = 0; 5803859Sml29623 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 5813859Sml29623 while ((nmp) && (hdrs_size < 5826929Smisaki sizeof (hdrs_buf))) { 5833859Sml29623 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 5843859Sml29623 if (mblk_len >= 5856929Smisaki (sizeof (hdrs_buf) - hdrs_size)) 5863859Sml29623 mblk_len = sizeof (hdrs_buf) - 5876929Smisaki hdrs_size; 5883859Sml29623 bcopy(nmp->b_rptr, 5896929Smisaki &hdrs_buf[hdrs_size], mblk_len); 5903859Sml29623 hdrs_size += mblk_len; 5913859Sml29623 nmp = nmp->b_cont; 5923859Sml29623 } 5933859Sml29623 ip_buf = hdrs_buf; 5943859Sml29623 ip_buf += eth_hdr_size; 5953859Sml29623 5963859Sml29623 tmp = 1ull; 5973859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 5983859Sml29623 5993859Sml29623 tmp = (eth_hdr_size >> 1); 6003859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 6013859Sml29623 6023859Sml29623 /* byte 6 is the next header protocol */ 6033859Sml29623 ipproto = ip_buf[6]; 6043859Sml29623 6053859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 6066929Smisaki " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 6076929Smisaki iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 6086929Smisaki ipproto)); 6093859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 6106929Smisaki "value 0x%llx", hdrp->value)); 6113859Sml29623 6123859Sml29623 break; 6133859Sml29623 6143859Sml29623 default: 6153859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 6163859Sml29623 goto fill_tx_header_done; 6173859Sml29623 } 6183859Sml29623 6193859Sml29623 switch (ipproto) { 6203859Sml29623 case IPPROTO_TCP: 6213859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 6226611Sml29623 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 6233859Sml29623 if (l4_cksum) { 6246611Sml29623 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP; 6256611Sml29623 hdrp->value |= 6266611Sml29623 (((uint64_t)(start_offset >> 1)) << 6276611Sml29623 TX_PKT_HEADER_L4START_SHIFT); 6286611Sml29623 hdrp->value |= 6296611Sml29623 (((uint64_t)(stuff_offset >> 1)) << 6306611Sml29623 TX_PKT_HEADER_L4STUFF_SHIFT); 6316611Sml29623 6323859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 6336611Sml29623 "==> nxge_tx_pkt_hdr_init: TCP CKSUM " 6346611Sml29623 "value 0x%llx", hdrp->value)); 6353859Sml29623 } 6363859Sml29623 6373859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 6386611Sml29623 "value 0x%llx", hdrp->value)); 6393859Sml29623 break; 6403859Sml29623 6413859Sml29623 case IPPROTO_UDP: 6423859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 6433859Sml29623 if (l4_cksum) { 6446611Sml29623 if (!nxge_cksum_offload) { 6456611Sml29623 uint16_t *up; 6466611Sml29623 uint16_t cksum; 6476611Sml29623 t_uscalar_t stuff_len; 6486611Sml29623 6496611Sml29623 /* 6506611Sml29623 * The checksum field has the 6516611Sml29623 * partial checksum. 6526611Sml29623 * IP_CSUM() macro calls ip_cksum() which 6536611Sml29623 * can add in the partial checksum. 6546611Sml29623 */ 6556611Sml29623 cksum = IP_CSUM(mp, start_offset, 0); 6566611Sml29623 stuff_len = stuff_offset; 6576611Sml29623 nmp = mp; 6586611Sml29623 mblk_len = MBLKL(nmp); 6596611Sml29623 while ((nmp != NULL) && 6606611Sml29623 (mblk_len < stuff_len)) { 6616611Sml29623 stuff_len -= mblk_len; 6626611Sml29623 nmp = nmp->b_cont; 6636611Sml29623 } 6646611Sml29623 ASSERT(nmp); 6656611Sml29623 up = (uint16_t *)(nmp->b_rptr + stuff_len); 6666611Sml29623 6676611Sml29623 *up = cksum; 6686611Sml29623 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP; 6696611Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 6706611Sml29623 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 6716611Sml29623 "use sw cksum " 6726611Sml29623 "write to $%p cksum 0x%x content up 0x%x", 6736611Sml29623 stuff_len, 6746611Sml29623 up, 6756611Sml29623 cksum, 6766611Sml29623 *up)); 6776611Sml29623 } else { 6786611Sml29623 /* Hardware will compute the full checksum */ 6796611Sml29623 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP; 6806611Sml29623 hdrp->value |= 6816611Sml29623 (((uint64_t)(start_offset >> 1)) << 6826611Sml29623 TX_PKT_HEADER_L4START_SHIFT); 6836611Sml29623 hdrp->value |= 6846611Sml29623 (((uint64_t)(stuff_offset >> 1)) << 6856611Sml29623 TX_PKT_HEADER_L4STUFF_SHIFT); 6866611Sml29623 6876611Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 6886611Sml29623 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 6896611Sml29623 " use partial checksum " 6906611Sml29623 "cksum 0x%x ", 6916611Sml29623 "value 0x%llx", 6926611Sml29623 stuff_offset, 6936611Sml29623 IP_CSUM(mp, start_offset, 0), 6946611Sml29623 hdrp->value)); 6956611Sml29623 } 6963859Sml29623 } 6976611Sml29623 6983859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 6996929Smisaki "==> nxge_tx_pkt_hdr_init: UDP" 7006929Smisaki "value 0x%llx", hdrp->value)); 7013859Sml29623 break; 7023859Sml29623 7033859Sml29623 default: 7043859Sml29623 goto fill_tx_header_done; 7053859Sml29623 } 7063859Sml29623 7073859Sml29623 fill_tx_header_done: 7083859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7096929Smisaki "==> nxge_fill_tx_hdr: pkt_len %d " 7106929Smisaki "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 7113859Sml29623 7123859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 7133859Sml29623 } 7143859Sml29623 7153859Sml29623 /*ARGSUSED*/ 7163859Sml29623 p_mblk_t 7173859Sml29623 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 7183859Sml29623 { 7193859Sml29623 p_mblk_t newmp = NULL; 7203859Sml29623 7213859Sml29623 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 7223859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7236929Smisaki "<== nxge_tx_pkt_header_reserve: allocb failed")); 7243859Sml29623 return (NULL); 7253859Sml29623 } 7263859Sml29623 7273859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7286929Smisaki "==> nxge_tx_pkt_header_reserve: get new mp")); 7293859Sml29623 DB_TYPE(newmp) = M_DATA; 7303859Sml29623 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 7313859Sml29623 linkb(newmp, mp); 7323859Sml29623 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 7333859Sml29623 7343859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 7356929Smisaki "b_rptr $%p b_wptr $%p", 7366929Smisaki newmp->b_rptr, newmp->b_wptr)); 7373859Sml29623 7383859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7396929Smisaki "<== nxge_tx_pkt_header_reserve: use new mp")); 7403859Sml29623 7413859Sml29623 return (newmp); 7423859Sml29623 } 7433859Sml29623 7443859Sml29623 int 7453859Sml29623 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 7463859Sml29623 { 7473859Sml29623 uint_t nmblks; 7483859Sml29623 ssize_t len; 7493859Sml29623 uint_t pkt_len; 7503859Sml29623 p_mblk_t nmp, bmp, tmp; 7513859Sml29623 uint8_t *b_wptr; 7523859Sml29623 7533859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7546929Smisaki "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 7556929Smisaki "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 7563859Sml29623 7573859Sml29623 nmp = mp; 7583859Sml29623 bmp = mp; 7593859Sml29623 nmblks = 0; 7603859Sml29623 pkt_len = 0; 7613859Sml29623 *tot_xfer_len_p = 0; 7623859Sml29623 7633859Sml29623 while (nmp) { 7643859Sml29623 len = MBLKL(nmp); 7653859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 7666929Smisaki "len %d pkt_len %d nmblks %d tot_xfer_len %d", 7676929Smisaki len, pkt_len, nmblks, 7686929Smisaki *tot_xfer_len_p)); 7693859Sml29623 7703859Sml29623 if (len <= 0) { 7713859Sml29623 bmp = nmp; 7723859Sml29623 nmp = nmp->b_cont; 7733859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7746929Smisaki "==> nxge_tx_pkt_nmblocks: " 7756929Smisaki "len (0) pkt_len %d nmblks %d", 7766929Smisaki pkt_len, nmblks)); 7773859Sml29623 continue; 7783859Sml29623 } 7793859Sml29623 7803859Sml29623 *tot_xfer_len_p += len; 7813859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 7826929Smisaki "len %d pkt_len %d nmblks %d tot_xfer_len %d", 7836929Smisaki len, pkt_len, nmblks, 7846929Smisaki *tot_xfer_len_p)); 7853859Sml29623 7863859Sml29623 if (len < nxge_bcopy_thresh) { 7873859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 7886929Smisaki "==> nxge_tx_pkt_nmblocks: " 7896929Smisaki "len %d (< thresh) pkt_len %d nmblks %d", 7906929Smisaki len, pkt_len, nmblks)); 7913859Sml29623 if (pkt_len == 0) 7923859Sml29623 nmblks++; 7933859Sml29623 pkt_len += len; 7943859Sml29623 if (pkt_len >= nxge_bcopy_thresh) { 7953859Sml29623 pkt_len = 0; 7963859Sml29623 len = 0; 7973859Sml29623 nmp = bmp; 7983859Sml29623 } 7993859Sml29623 } else { 8003859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8016929Smisaki "==> nxge_tx_pkt_nmblocks: " 8026929Smisaki "len %d (> thresh) pkt_len %d nmblks %d", 8036929Smisaki len, pkt_len, nmblks)); 8043859Sml29623 pkt_len = 0; 8053859Sml29623 nmblks++; 8063859Sml29623 /* 8073859Sml29623 * Hardware limits the transfer length to 4K. 8083859Sml29623 * If len is more than 4K, we need to break 8093859Sml29623 * it up to at most 2 more blocks. 8103859Sml29623 */ 8113859Sml29623 if (len > TX_MAX_TRANSFER_LENGTH) { 8123859Sml29623 uint32_t nsegs; 8133859Sml29623 8146495Sspeer nsegs = 1; 8153859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8166929Smisaki "==> nxge_tx_pkt_nmblocks: " 8176929Smisaki "len %d pkt_len %d nmblks %d nsegs %d", 8186929Smisaki len, pkt_len, nmblks, nsegs)); 8193859Sml29623 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 8203859Sml29623 ++nsegs; 8213859Sml29623 } 8223859Sml29623 do { 8233859Sml29623 b_wptr = nmp->b_rptr + 8246929Smisaki TX_MAX_TRANSFER_LENGTH; 8253859Sml29623 nmp->b_wptr = b_wptr; 8263859Sml29623 if ((tmp = dupb(nmp)) == NULL) { 8273859Sml29623 return (0); 8283859Sml29623 } 8293859Sml29623 tmp->b_rptr = b_wptr; 8303859Sml29623 tmp->b_wptr = nmp->b_wptr; 8313859Sml29623 tmp->b_cont = nmp->b_cont; 8323859Sml29623 nmp->b_cont = tmp; 8333859Sml29623 nmblks++; 8343859Sml29623 if (--nsegs) { 8353859Sml29623 nmp = tmp; 8363859Sml29623 } 8373859Sml29623 } while (nsegs); 8383859Sml29623 nmp = tmp; 8393859Sml29623 } 8403859Sml29623 } 8413859Sml29623 8423859Sml29623 /* 8433859Sml29623 * Hardware limits the transmit gather pointers to 15. 8443859Sml29623 */ 8453859Sml29623 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 8466929Smisaki TX_MAX_GATHER_POINTERS) { 8473859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8486929Smisaki "==> nxge_tx_pkt_nmblocks: pull msg - " 8496929Smisaki "len %d pkt_len %d nmblks %d", 8506929Smisaki len, pkt_len, nmblks)); 8513859Sml29623 /* Pull all message blocks from b_cont */ 8523859Sml29623 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 8533859Sml29623 return (0); 8543859Sml29623 } 8553859Sml29623 freemsg(nmp->b_cont); 8563859Sml29623 nmp->b_cont = tmp; 8573859Sml29623 pkt_len = 0; 8583859Sml29623 } 8593859Sml29623 bmp = nmp; 8603859Sml29623 nmp = nmp->b_cont; 8613859Sml29623 } 8623859Sml29623 8633859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, 8646929Smisaki "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 8656929Smisaki "nmblks %d len %d tot_xfer_len %d", 8666929Smisaki mp->b_rptr, mp->b_wptr, nmblks, 8676929Smisaki MBLKL(mp), *tot_xfer_len_p)); 8683859Sml29623 8693859Sml29623 return (nmblks); 8703859Sml29623 } 8713859Sml29623 872*7906SMichael.Speer@Sun.COM static void 873*7906SMichael.Speer@Sun.COM nxge_txdma_freemsg_list_add(p_tx_ring_t tx_ring_p, p_tx_msg_t msgp) 874*7906SMichael.Speer@Sun.COM { 875*7906SMichael.Speer@Sun.COM MUTEX_ENTER(&tx_ring_p->freelock); 876*7906SMichael.Speer@Sun.COM if (tx_ring_p->tx_free_list_p != NULL) 877*7906SMichael.Speer@Sun.COM msgp->nextp = tx_ring_p->tx_free_list_p; 878*7906SMichael.Speer@Sun.COM tx_ring_p->tx_free_list_p = msgp; 879*7906SMichael.Speer@Sun.COM MUTEX_EXIT(&tx_ring_p->freelock); 880*7906SMichael.Speer@Sun.COM } 881*7906SMichael.Speer@Sun.COM 882*7906SMichael.Speer@Sun.COM /* 883*7906SMichael.Speer@Sun.COM * void 884*7906SMichael.Speer@Sun.COM * nxge_txdma_freemsg_task() -- walk the list of messages to be 885*7906SMichael.Speer@Sun.COM * freed and free the messages. 886*7906SMichael.Speer@Sun.COM */ 887*7906SMichael.Speer@Sun.COM void 888*7906SMichael.Speer@Sun.COM nxge_txdma_freemsg_task(p_tx_ring_t tx_ring_p) 889*7906SMichael.Speer@Sun.COM { 890*7906SMichael.Speer@Sun.COM p_tx_msg_t msgp, nextp; 891*7906SMichael.Speer@Sun.COM 892*7906SMichael.Speer@Sun.COM if (tx_ring_p->tx_free_list_p != NULL) { 893*7906SMichael.Speer@Sun.COM MUTEX_ENTER(&tx_ring_p->freelock); 894*7906SMichael.Speer@Sun.COM msgp = tx_ring_p->tx_free_list_p; 895*7906SMichael.Speer@Sun.COM tx_ring_p->tx_free_list_p = (p_tx_msg_t)NULL; 896*7906SMichael.Speer@Sun.COM MUTEX_EXIT(&tx_ring_p->freelock); 897*7906SMichael.Speer@Sun.COM 898*7906SMichael.Speer@Sun.COM while (msgp != NULL) { 899*7906SMichael.Speer@Sun.COM nextp = msgp->nextp; 900*7906SMichael.Speer@Sun.COM if (msgp->tx_message != NULL) { 901*7906SMichael.Speer@Sun.COM freemsg(msgp->tx_message); 902*7906SMichael.Speer@Sun.COM msgp->tx_message = NULL; 903*7906SMichael.Speer@Sun.COM } 904*7906SMichael.Speer@Sun.COM msgp->nextp = NULL; 905*7906SMichael.Speer@Sun.COM msgp = nextp; 906*7906SMichael.Speer@Sun.COM } 907*7906SMichael.Speer@Sun.COM } 908*7906SMichael.Speer@Sun.COM } 909*7906SMichael.Speer@Sun.COM 9103859Sml29623 boolean_t 9113859Sml29623 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 9123859Sml29623 { 9133859Sml29623 boolean_t status = B_TRUE; 9143859Sml29623 p_nxge_dma_common_t tx_desc_dma_p; 9153859Sml29623 nxge_dma_common_t desc_area; 9163859Sml29623 p_tx_desc_t tx_desc_ring_vp; 9173859Sml29623 p_tx_desc_t tx_desc_p; 9183859Sml29623 p_tx_desc_t tx_desc_pp; 9193859Sml29623 tx_desc_t r_tx_desc; 9203859Sml29623 p_tx_msg_t tx_msg_ring; 9213859Sml29623 p_tx_msg_t tx_msg_p; 9223859Sml29623 npi_handle_t handle; 9233859Sml29623 tx_ring_hdl_t tx_head; 9243859Sml29623 uint32_t pkt_len; 9253859Sml29623 uint_t tx_rd_index; 9263859Sml29623 uint16_t head_index, tail_index; 9273859Sml29623 uint8_t tdc; 9283859Sml29623 boolean_t head_wrap, tail_wrap; 929*7906SMichael.Speer@Sun.COM p_nxge_tx_ring_stats_t tdc_stats; 9303859Sml29623 int rc; 9313859Sml29623 9323859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 9333859Sml29623 9343859Sml29623 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 9356929Smisaki (nmblks != 0)); 9363859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9376929Smisaki "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 9386929Smisaki tx_ring_p->descs_pending, nxge_reclaim_pending, 9396929Smisaki nmblks)); 9403859Sml29623 if (!status) { 9413859Sml29623 tx_desc_dma_p = &tx_ring_p->tdc_desc; 9423859Sml29623 desc_area = tx_ring_p->tdc_desc; 9433859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 9443859Sml29623 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 9453859Sml29623 tx_desc_ring_vp = 9466929Smisaki (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 9473859Sml29623 tx_rd_index = tx_ring_p->rd_index; 9483859Sml29623 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 9493859Sml29623 tx_msg_ring = tx_ring_p->tx_msg_ring; 9503859Sml29623 tx_msg_p = &tx_msg_ring[tx_rd_index]; 9513859Sml29623 tdc = tx_ring_p->tdc; 9523859Sml29623 tdc_stats = tx_ring_p->tdc_stats; 9533859Sml29623 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 9543859Sml29623 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 9553859Sml29623 } 9563859Sml29623 9573859Sml29623 tail_index = tx_ring_p->wr_index; 9583859Sml29623 tail_wrap = tx_ring_p->wr_index_wrap; 9593859Sml29623 9603859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9616929Smisaki "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 9626929Smisaki "tail_index %d tail_wrap %d " 9636929Smisaki "tx_desc_p $%p ($%p) ", 9646929Smisaki tdc, tx_rd_index, tail_index, tail_wrap, 9656929Smisaki tx_desc_p, (*(uint64_t *)tx_desc_p))); 9663859Sml29623 /* 9673859Sml29623 * Read the hardware maintained transmit head 9683859Sml29623 * and wrap around bit. 9693859Sml29623 */ 9703859Sml29623 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 9713859Sml29623 head_index = tx_head.bits.ldw.head; 9723859Sml29623 head_wrap = tx_head.bits.ldw.wrap; 9733859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9746929Smisaki "==> nxge_txdma_reclaim: " 9756929Smisaki "tx_rd_index %d tail %d tail_wrap %d " 9766929Smisaki "head %d wrap %d", 9776929Smisaki tx_rd_index, tail_index, tail_wrap, 9786929Smisaki head_index, head_wrap)); 9793859Sml29623 9803859Sml29623 if (head_index == tail_index) { 9813859Sml29623 if (TXDMA_RING_EMPTY(head_index, head_wrap, 9826929Smisaki tail_index, tail_wrap) && 9836929Smisaki (head_index == tx_rd_index)) { 9843859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9856929Smisaki "==> nxge_txdma_reclaim: EMPTY")); 9863859Sml29623 return (B_TRUE); 9873859Sml29623 } 9883859Sml29623 9893859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9906929Smisaki "==> nxge_txdma_reclaim: Checking " 9916929Smisaki "if ring full")); 9923859Sml29623 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 9936929Smisaki tail_wrap)) { 9943859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 9956929Smisaki "==> nxge_txdma_reclaim: full")); 9963859Sml29623 return (B_FALSE); 9973859Sml29623 } 9983859Sml29623 } 9993859Sml29623 10003859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10016929Smisaki "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 10023859Sml29623 10033859Sml29623 tx_desc_pp = &r_tx_desc; 10043859Sml29623 while ((tx_rd_index != head_index) && 10056929Smisaki (tx_ring_p->descs_pending != 0)) { 10063859Sml29623 10073859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10086929Smisaki "==> nxge_txdma_reclaim: Checking if pending")); 10093859Sml29623 10103859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10116929Smisaki "==> nxge_txdma_reclaim: " 10126929Smisaki "descs_pending %d ", 10136929Smisaki tx_ring_p->descs_pending)); 10143859Sml29623 10153859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10166929Smisaki "==> nxge_txdma_reclaim: " 10176929Smisaki "(tx_rd_index %d head_index %d " 10186929Smisaki "(tx_desc_p $%p)", 10196929Smisaki tx_rd_index, head_index, 10206929Smisaki tx_desc_p)); 10213859Sml29623 10223859Sml29623 tx_desc_pp->value = tx_desc_p->value; 10233859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10246929Smisaki "==> nxge_txdma_reclaim: " 10256929Smisaki "(tx_rd_index %d head_index %d " 10266929Smisaki "tx_desc_p $%p (desc value 0x%llx) ", 10276929Smisaki tx_rd_index, head_index, 10286929Smisaki tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 10293859Sml29623 10303859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10316929Smisaki "==> nxge_txdma_reclaim: dump desc:")); 10323859Sml29623 10333859Sml29623 pkt_len = tx_desc_pp->bits.hdw.tr_len; 10343859Sml29623 tdc_stats->obytes += pkt_len; 10353859Sml29623 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 10363859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10376929Smisaki "==> nxge_txdma_reclaim: pkt_len %d " 10386929Smisaki "tdc channel %d opackets %d", 10396929Smisaki pkt_len, 10406929Smisaki tdc, 10416929Smisaki tdc_stats->opackets)); 10423859Sml29623 10433859Sml29623 if (tx_msg_p->flags.dma_type == USE_DVMA) { 10443859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10456929Smisaki "tx_desc_p = $%p " 10466929Smisaki "tx_desc_pp = $%p " 10476929Smisaki "index = %d", 10486929Smisaki tx_desc_p, 10496929Smisaki tx_desc_pp, 10506929Smisaki tx_ring_p->rd_index)); 10513859Sml29623 (void) dvma_unload(tx_msg_p->dvma_handle, 10526929Smisaki 0, -1); 10533859Sml29623 tx_msg_p->dvma_handle = NULL; 10543859Sml29623 if (tx_ring_p->dvma_wr_index == 10556929Smisaki tx_ring_p->dvma_wrap_mask) { 10563859Sml29623 tx_ring_p->dvma_wr_index = 0; 10573859Sml29623 } else { 10583859Sml29623 tx_ring_p->dvma_wr_index++; 10593859Sml29623 } 10603859Sml29623 tx_ring_p->dvma_pending--; 10613859Sml29623 } else if (tx_msg_p->flags.dma_type == 10626929Smisaki USE_DMA) { 10633859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10646929Smisaki "==> nxge_txdma_reclaim: " 10656929Smisaki "USE DMA")); 10663859Sml29623 if (rc = ddi_dma_unbind_handle 10676929Smisaki (tx_msg_p->dma_handle)) { 10683859Sml29623 cmn_err(CE_WARN, "!nxge_reclaim: " 10696929Smisaki "ddi_dma_unbind_handle " 10706929Smisaki "failed. status %d", rc); 10713859Sml29623 } 10723859Sml29623 } 10733859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 10746929Smisaki "==> nxge_txdma_reclaim: count packets")); 1075*7906SMichael.Speer@Sun.COM 10763859Sml29623 /* 10773859Sml29623 * count a chained packet only once. 10783859Sml29623 */ 10793859Sml29623 if (tx_msg_p->tx_message != NULL) { 1080*7906SMichael.Speer@Sun.COM nxge_txdma_freemsg_list_add(tx_ring_p, 1081*7906SMichael.Speer@Sun.COM tx_msg_p); 10823859Sml29623 } 10833859Sml29623 10843859Sml29623 tx_msg_p->flags.dma_type = USE_NONE; 10853859Sml29623 tx_rd_index = tx_ring_p->rd_index; 10863859Sml29623 tx_rd_index = (tx_rd_index + 1) & 10876929Smisaki tx_ring_p->tx_wrap_mask; 10883859Sml29623 tx_ring_p->rd_index = tx_rd_index; 10893859Sml29623 tx_ring_p->descs_pending--; 10903859Sml29623 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 10913859Sml29623 tx_msg_p = &tx_msg_ring[tx_rd_index]; 10923859Sml29623 } 10933859Sml29623 10943859Sml29623 status = (nmblks <= (tx_ring_p->tx_ring_size - 10956929Smisaki tx_ring_p->descs_pending - 10966929Smisaki TX_FULL_MARK)); 10973859Sml29623 if (status) { 10983859Sml29623 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 10993859Sml29623 } 11003859Sml29623 } else { 11013859Sml29623 status = (nmblks <= 11026929Smisaki (tx_ring_p->tx_ring_size - 11036929Smisaki tx_ring_p->descs_pending - 11046929Smisaki TX_FULL_MARK)); 11053859Sml29623 } 11063859Sml29623 11073859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 11086929Smisaki "<== nxge_txdma_reclaim status = 0x%08x", status)); 11093859Sml29623 11103859Sml29623 return (status); 11113859Sml29623 } 11123859Sml29623 11136495Sspeer /* 11146495Sspeer * nxge_tx_intr 11156495Sspeer * 11166495Sspeer * Process a TDC interrupt 11176495Sspeer * 11186495Sspeer * Arguments: 11196495Sspeer * arg1 A Logical Device state Vector (LSV) data structure. 11206495Sspeer * arg2 nxge_t * 11216495Sspeer * 11226495Sspeer * Notes: 11236495Sspeer * 11246495Sspeer * NPI/NXGE function calls: 11256495Sspeer * npi_txdma_control_status() 11266495Sspeer * npi_intr_ldg_mgmt_set() 11276495Sspeer * 11286495Sspeer * nxge_tx_err_evnts() 11296495Sspeer * nxge_txdma_reclaim() 11306495Sspeer * 11316495Sspeer * Registers accessed: 11326495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 11336495Sspeer * PIO_LDSV 11346495Sspeer * 11356495Sspeer * Context: 11366495Sspeer * Any domain 11376495Sspeer */ 11383859Sml29623 uint_t 11393859Sml29623 nxge_tx_intr(void *arg1, void *arg2) 11403859Sml29623 { 11413859Sml29623 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 11423859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg2; 11433859Sml29623 p_nxge_ldg_t ldgp; 11443859Sml29623 uint8_t channel; 11453859Sml29623 uint32_t vindex; 11463859Sml29623 npi_handle_t handle; 11473859Sml29623 tx_cs_t cs; 11483859Sml29623 p_tx_ring_t *tx_rings; 11493859Sml29623 p_tx_ring_t tx_ring_p; 11503859Sml29623 npi_status_t rs = NPI_SUCCESS; 11513859Sml29623 uint_t serviced = DDI_INTR_UNCLAIMED; 11523859Sml29623 nxge_status_t status = NXGE_OK; 11533859Sml29623 11543859Sml29623 if (ldvp == NULL) { 11553859Sml29623 NXGE_DEBUG_MSG((NULL, INT_CTL, 11566929Smisaki "<== nxge_tx_intr: nxgep $%p ldvp $%p", 11576929Smisaki nxgep, ldvp)); 11583859Sml29623 return (DDI_INTR_UNCLAIMED); 11593859Sml29623 } 11603859Sml29623 11613859Sml29623 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 11623859Sml29623 nxgep = ldvp->nxgep; 11633859Sml29623 } 11643859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11656929Smisaki "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 11666929Smisaki nxgep, ldvp)); 11676713Sspeer 11686713Sspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 11696713Sspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 11706713Sspeer NXGE_DEBUG_MSG((nxgep, INT_CTL, 11716713Sspeer "<== nxge_tx_intr: interface not started or intialized")); 11726713Sspeer return (DDI_INTR_CLAIMED); 11736713Sspeer } 11746713Sspeer 11753859Sml29623 /* 11763859Sml29623 * This interrupt handler is for a specific 11773859Sml29623 * transmit dma channel. 11783859Sml29623 */ 11793859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 11803859Sml29623 /* Get the control and status for this channel. */ 11813859Sml29623 channel = ldvp->channel; 11823859Sml29623 ldgp = ldvp->ldgp; 11833859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11846929Smisaki "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 11856929Smisaki "channel %d", 11866929Smisaki nxgep, ldvp, channel)); 11873859Sml29623 11883859Sml29623 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 11893859Sml29623 vindex = ldvp->vdma_index; 11903859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11916929Smisaki "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 11926929Smisaki channel, vindex, rs)); 11933859Sml29623 if (!rs && cs.bits.ldw.mk) { 11943859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 11956929Smisaki "==> nxge_tx_intr:channel %d ring index %d " 11966929Smisaki "status 0x%08x (mk bit set)", 11976929Smisaki channel, vindex, rs)); 11983859Sml29623 tx_rings = nxgep->tx_rings->rings; 11993859Sml29623 tx_ring_p = tx_rings[vindex]; 12003859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 12016929Smisaki "==> nxge_tx_intr:channel %d ring index %d " 12026929Smisaki "status 0x%08x (mk bit set, calling reclaim)", 12036929Smisaki channel, vindex, rs)); 12043859Sml29623 12053859Sml29623 MUTEX_ENTER(&tx_ring_p->lock); 12063859Sml29623 (void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0); 12073859Sml29623 MUTEX_EXIT(&tx_ring_p->lock); 1208*7906SMichael.Speer@Sun.COM 1209*7906SMichael.Speer@Sun.COM nxge_txdma_freemsg_task(tx_ring_p); 1210*7906SMichael.Speer@Sun.COM 12113859Sml29623 mac_tx_update(nxgep->mach); 12123859Sml29623 } 12133859Sml29623 12143859Sml29623 /* 12153859Sml29623 * Process other transmit control and status. 12163859Sml29623 * Check the ldv state. 12173859Sml29623 */ 12183859Sml29623 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 12193859Sml29623 /* 12203859Sml29623 * Rearm this logical group if this is a single device 12213859Sml29623 * group. 12223859Sml29623 */ 12233859Sml29623 if (ldgp->nldvs == 1) { 12243859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, 12256929Smisaki "==> nxge_tx_intr: rearm")); 12263859Sml29623 if (status == NXGE_OK) { 12276495Sspeer if (isLDOMguest(nxgep)) { 12286495Sspeer nxge_hio_ldgimgn(nxgep, ldgp); 12296495Sspeer } else { 12306495Sspeer (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 12316495Sspeer B_TRUE, ldgp->ldg_timer); 12326495Sspeer } 12333859Sml29623 } 12343859Sml29623 } 12353859Sml29623 12363859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 12373859Sml29623 serviced = DDI_INTR_CLAIMED; 12383859Sml29623 return (serviced); 12393859Sml29623 } 12403859Sml29623 12413859Sml29623 void 12426495Sspeer nxge_txdma_stop(p_nxge_t nxgep) /* Dead */ 12433859Sml29623 { 12443859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 12453859Sml29623 12463859Sml29623 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 12473859Sml29623 12483859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 12493859Sml29623 } 12503859Sml29623 12513859Sml29623 void 12526495Sspeer nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */ 12533859Sml29623 { 12543859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 12553859Sml29623 12563859Sml29623 (void) nxge_txdma_stop(nxgep); 12573859Sml29623 12583859Sml29623 (void) nxge_fixup_txdma_rings(nxgep); 12593859Sml29623 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 12603859Sml29623 (void) nxge_tx_mac_enable(nxgep); 12613859Sml29623 (void) nxge_txdma_hw_kick(nxgep); 12623859Sml29623 12633859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 12643859Sml29623 } 12653859Sml29623 12666495Sspeer npi_status_t 12676495Sspeer nxge_txdma_channel_disable( 12686495Sspeer nxge_t *nxge, 12696495Sspeer int channel) 12706495Sspeer { 12716495Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge); 12726495Sspeer npi_status_t rs; 12736495Sspeer tdmc_intr_dbg_t intr_dbg; 12746495Sspeer 12756495Sspeer /* 12766495Sspeer * Stop the dma channel and wait for the stop-done. 12776495Sspeer * If the stop-done bit is not present, then force 12786495Sspeer * an error so TXC will stop. 12796495Sspeer * All channels bound to this port need to be stopped 12806495Sspeer * and reset after injecting an interrupt error. 12816495Sspeer */ 12826495Sspeer rs = npi_txdma_channel_disable(handle, channel); 12836495Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL, 12846929Smisaki "==> nxge_txdma_channel_disable(%d) " 12856929Smisaki "rs 0x%x", channel, rs)); 12866495Sspeer if (rs != NPI_SUCCESS) { 12876495Sspeer /* Inject any error */ 12886495Sspeer intr_dbg.value = 0; 12896495Sspeer intr_dbg.bits.ldw.nack_pref = 1; 12906495Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL, 12916929Smisaki "==> nxge_txdma_hw_mode: " 12926929Smisaki "channel %d (stop failed 0x%x) " 12936929Smisaki "(inject err)", rs, channel)); 12946495Sspeer (void) npi_txdma_inj_int_error_set( 12956929Smisaki handle, channel, &intr_dbg); 12966495Sspeer rs = npi_txdma_channel_disable(handle, channel); 12976495Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL, 12986929Smisaki "==> nxge_txdma_hw_mode: " 12996929Smisaki "channel %d (stop again 0x%x) " 13006929Smisaki "(after inject err)", 13016929Smisaki rs, channel)); 13026495Sspeer } 13036495Sspeer 13046495Sspeer return (rs); 13056495Sspeer } 13066495Sspeer 13076495Sspeer /* 13086495Sspeer * nxge_txdma_hw_mode 13096495Sspeer * 13106495Sspeer * Toggle all TDCs on (enable) or off (disable). 13116495Sspeer * 13126495Sspeer * Arguments: 13136495Sspeer * nxgep 13146495Sspeer * enable Enable or disable a TDC. 13156495Sspeer * 13166495Sspeer * Notes: 13176495Sspeer * 13186495Sspeer * NPI/NXGE function calls: 13196495Sspeer * npi_txdma_channel_enable(TX_CS) 13206495Sspeer * npi_txdma_channel_disable(TX_CS) 13216495Sspeer * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 13226495Sspeer * 13236495Sspeer * Registers accessed: 13246495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 13256495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 13266495Sspeer * 13276495Sspeer * Context: 13286495Sspeer * Any domain 13296495Sspeer */ 13303859Sml29623 nxge_status_t 13313859Sml29623 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 13323859Sml29623 { 13336495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 13346495Sspeer 13356495Sspeer npi_handle_t handle; 13366495Sspeer nxge_status_t status; 13376495Sspeer npi_status_t rs; 13386495Sspeer int tdc; 13393859Sml29623 13403859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 13416929Smisaki "==> nxge_txdma_hw_mode: enable mode %d", enable)); 13423859Sml29623 13433859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 13443859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 13456929Smisaki "<== nxge_txdma_mode: not initialized")); 13463859Sml29623 return (NXGE_ERROR); 13473859Sml29623 } 13483859Sml29623 13496495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 13503859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 13516495Sspeer "<== nxge_txdma_hw_mode: NULL ring pointer(s)")); 13523859Sml29623 return (NXGE_ERROR); 13533859Sml29623 } 13543859Sml29623 13556495Sspeer /* Enable or disable all of the TDCs owned by us. */ 13563859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 13576495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 13586495Sspeer if ((1 << tdc) & set->owned.map) { 13596495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 13606495Sspeer if (ring) { 13616495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 13626495Sspeer "==> nxge_txdma_hw_mode: channel %d", tdc)); 13636495Sspeer if (enable) { 13646495Sspeer rs = npi_txdma_channel_enable 13656495Sspeer (handle, tdc); 13663859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 13676495Sspeer "==> nxge_txdma_hw_mode: " 13686495Sspeer "channel %d (enable) rs 0x%x", 13696495Sspeer tdc, rs)); 13706495Sspeer } else { 13716495Sspeer rs = nxge_txdma_channel_disable 13726495Sspeer (nxgep, tdc); 13733859Sml29623 } 13743859Sml29623 } 13753859Sml29623 } 13763859Sml29623 } 13773859Sml29623 13783859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 13793859Sml29623 13803859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 13816929Smisaki "<== nxge_txdma_hw_mode: status 0x%x", status)); 13823859Sml29623 13833859Sml29623 return (status); 13843859Sml29623 } 13853859Sml29623 13863859Sml29623 void 13873859Sml29623 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 13883859Sml29623 { 13893859Sml29623 npi_handle_t handle; 13903859Sml29623 13913859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 13926929Smisaki "==> nxge_txdma_enable_channel: channel %d", channel)); 13933859Sml29623 13943859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 13953859Sml29623 /* enable the transmit dma channels */ 13963859Sml29623 (void) npi_txdma_channel_enable(handle, channel); 13973859Sml29623 13983859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 13993859Sml29623 } 14003859Sml29623 14013859Sml29623 void 14023859Sml29623 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 14033859Sml29623 { 14043859Sml29623 npi_handle_t handle; 14053859Sml29623 14063859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 14076929Smisaki "==> nxge_txdma_disable_channel: channel %d", channel)); 14083859Sml29623 14093859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 14103859Sml29623 /* stop the transmit dma channels */ 14113859Sml29623 (void) npi_txdma_channel_disable(handle, channel); 14123859Sml29623 14133859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 14143859Sml29623 } 14153859Sml29623 14166495Sspeer /* 14176495Sspeer * nxge_txdma_stop_inj_err 14186495Sspeer * 14196495Sspeer * Stop a TDC. If at first we don't succeed, inject an error. 14206495Sspeer * 14216495Sspeer * Arguments: 14226495Sspeer * nxgep 14236495Sspeer * channel The channel to stop. 14246495Sspeer * 14256495Sspeer * Notes: 14266495Sspeer * 14276495Sspeer * NPI/NXGE function calls: 14286495Sspeer * npi_txdma_channel_disable() 14296495Sspeer * npi_txdma_inj_int_error_set() 14306495Sspeer * #if defined(NXGE_DEBUG) 14316495Sspeer * nxge_txdma_regs_dump_channels(nxgep); 14326495Sspeer * #endif 14336495Sspeer * 14346495Sspeer * Registers accessed: 14356495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 14366495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 14376495Sspeer * 14386495Sspeer * Context: 14396495Sspeer * Any domain 14406495Sspeer */ 14413859Sml29623 int 14423859Sml29623 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 14433859Sml29623 { 14443859Sml29623 npi_handle_t handle; 14453859Sml29623 tdmc_intr_dbg_t intr_dbg; 14463859Sml29623 int status; 14473859Sml29623 npi_status_t rs = NPI_SUCCESS; 14483859Sml29623 14493859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 14503859Sml29623 /* 14513859Sml29623 * Stop the dma channel waits for the stop done. 14523859Sml29623 * If the stop done bit is not set, then create 14533859Sml29623 * an error. 14543859Sml29623 */ 14553859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 14563859Sml29623 rs = npi_txdma_channel_disable(handle, channel); 14573859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 14583859Sml29623 if (status == NXGE_OK) { 14593859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 14606929Smisaki "<== nxge_txdma_stop_inj_err (channel %d): " 14616929Smisaki "stopped OK", channel)); 14623859Sml29623 return (status); 14633859Sml29623 } 14643859Sml29623 14653859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 14666929Smisaki "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 14676929Smisaki "injecting error", channel, rs)); 14683859Sml29623 /* Inject any error */ 14693859Sml29623 intr_dbg.value = 0; 14703859Sml29623 intr_dbg.bits.ldw.nack_pref = 1; 14713859Sml29623 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 14723859Sml29623 14733859Sml29623 /* Stop done bit will be set as a result of error injection */ 14743859Sml29623 rs = npi_txdma_channel_disable(handle, channel); 14753859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 14763859Sml29623 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 14773859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 14786929Smisaki "<== nxge_txdma_stop_inj_err (channel %d): " 14796929Smisaki "stopped OK ", channel)); 14803859Sml29623 return (status); 14813859Sml29623 } 14823859Sml29623 14833859Sml29623 #if defined(NXGE_DEBUG) 14843859Sml29623 nxge_txdma_regs_dump_channels(nxgep); 14853859Sml29623 #endif 14863859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 14876929Smisaki "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 14886929Smisaki " (injected error but still not stopped)", channel, rs)); 14893859Sml29623 14903859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 14913859Sml29623 return (status); 14923859Sml29623 } 14933859Sml29623 14943859Sml29623 /*ARGSUSED*/ 14953859Sml29623 void 14963859Sml29623 nxge_fixup_txdma_rings(p_nxge_t nxgep) 14973859Sml29623 { 14986495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 14996495Sspeer int tdc; 15003859Sml29623 15013859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 15023859Sml29623 15036495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 15046495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 15056495Sspeer "<== nxge_fixup_txdma_rings: NULL ring pointer(s)")); 15063859Sml29623 return; 15073859Sml29623 } 15083859Sml29623 15096495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 15106495Sspeer if ((1 << tdc) & set->owned.map) { 15116495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 15126495Sspeer if (ring) { 15136495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 15146495Sspeer "==> nxge_fixup_txdma_rings: channel %d", 15156495Sspeer tdc)); 15166495Sspeer nxge_txdma_fixup_channel(nxgep, ring, tdc); 15176495Sspeer } 15186495Sspeer } 15193859Sml29623 } 15203859Sml29623 15213859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 15223859Sml29623 } 15233859Sml29623 15243859Sml29623 /*ARGSUSED*/ 15253859Sml29623 void 15263859Sml29623 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 15273859Sml29623 { 15283859Sml29623 p_tx_ring_t ring_p; 15293859Sml29623 15303859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 15313859Sml29623 ring_p = nxge_txdma_get_ring(nxgep, channel); 15323859Sml29623 if (ring_p == NULL) { 15333859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 15343859Sml29623 return; 15353859Sml29623 } 15363859Sml29623 15373859Sml29623 if (ring_p->tdc != channel) { 15383859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15396929Smisaki "<== nxge_txdma_fix_channel: channel not matched " 15406929Smisaki "ring tdc %d passed channel", 15416929Smisaki ring_p->tdc, channel)); 15423859Sml29623 return; 15433859Sml29623 } 15443859Sml29623 15453859Sml29623 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 15463859Sml29623 15473859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 15483859Sml29623 } 15493859Sml29623 15503859Sml29623 /*ARGSUSED*/ 15513859Sml29623 void 15523859Sml29623 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 15533859Sml29623 { 15543859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 15553859Sml29623 15563859Sml29623 if (ring_p == NULL) { 15573859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15586929Smisaki "<== nxge_txdma_fixup_channel: NULL ring pointer")); 15593859Sml29623 return; 15603859Sml29623 } 15613859Sml29623 15623859Sml29623 if (ring_p->tdc != channel) { 15633859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15646929Smisaki "<== nxge_txdma_fixup_channel: channel not matched " 15656929Smisaki "ring tdc %d passed channel", 15666929Smisaki ring_p->tdc, channel)); 15673859Sml29623 return; 15683859Sml29623 } 15693859Sml29623 15703859Sml29623 MUTEX_ENTER(&ring_p->lock); 15713859Sml29623 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 15723859Sml29623 ring_p->rd_index = 0; 15733859Sml29623 ring_p->wr_index = 0; 15743859Sml29623 ring_p->ring_head.value = 0; 15753859Sml29623 ring_p->ring_kick_tail.value = 0; 15763859Sml29623 ring_p->descs_pending = 0; 15773859Sml29623 MUTEX_EXIT(&ring_p->lock); 1578*7906SMichael.Speer@Sun.COM nxge_txdma_freemsg_task(ring_p); 15793859Sml29623 15803859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 15813859Sml29623 } 15823859Sml29623 15833859Sml29623 /*ARGSUSED*/ 15843859Sml29623 void 15853859Sml29623 nxge_txdma_hw_kick(p_nxge_t nxgep) 15863859Sml29623 { 15876495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 15886495Sspeer int tdc; 15893859Sml29623 15903859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 15913859Sml29623 15926495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 15933859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 15946495Sspeer "<== nxge_txdma_hw_kick: NULL ring pointer(s)")); 15953859Sml29623 return; 15963859Sml29623 } 15973859Sml29623 15986495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 15996495Sspeer if ((1 << tdc) & set->owned.map) { 16006495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 16016495Sspeer if (ring) { 16026495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 16036495Sspeer "==> nxge_txdma_hw_kick: channel %d", tdc)); 16046495Sspeer nxge_txdma_hw_kick_channel(nxgep, ring, tdc); 16056495Sspeer } 16066495Sspeer } 16073859Sml29623 } 16083859Sml29623 16093859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 16103859Sml29623 } 16113859Sml29623 16123859Sml29623 /*ARGSUSED*/ 16133859Sml29623 void 16143859Sml29623 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 16153859Sml29623 { 16163859Sml29623 p_tx_ring_t ring_p; 16173859Sml29623 16183859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 16193859Sml29623 16203859Sml29623 ring_p = nxge_txdma_get_ring(nxgep, channel); 16213859Sml29623 if (ring_p == NULL) { 16223859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 16236929Smisaki " nxge_txdma_kick_channel")); 16243859Sml29623 return; 16253859Sml29623 } 16263859Sml29623 16273859Sml29623 if (ring_p->tdc != channel) { 16283859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 16296929Smisaki "<== nxge_txdma_kick_channel: channel not matched " 16306929Smisaki "ring tdc %d passed channel", 16316929Smisaki ring_p->tdc, channel)); 16323859Sml29623 return; 16333859Sml29623 } 16343859Sml29623 16353859Sml29623 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 16363859Sml29623 16373859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 16383859Sml29623 } 16393859Sml29623 16403859Sml29623 /*ARGSUSED*/ 16413859Sml29623 void 16423859Sml29623 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 16433859Sml29623 { 16443859Sml29623 16453859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 16463859Sml29623 16473859Sml29623 if (ring_p == NULL) { 16483859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 16496929Smisaki "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 16503859Sml29623 return; 16513859Sml29623 } 16523859Sml29623 16533859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 16543859Sml29623 } 16553859Sml29623 16566495Sspeer /* 16576495Sspeer * nxge_check_tx_hang 16586495Sspeer * 16596495Sspeer * Check the state of all TDCs belonging to nxgep. 16606495Sspeer * 16616495Sspeer * Arguments: 16626495Sspeer * nxgep 16636495Sspeer * 16646495Sspeer * Notes: 16656495Sspeer * Called by nxge_hw.c:nxge_check_hw_state(). 16666495Sspeer * 16676495Sspeer * NPI/NXGE function calls: 16686495Sspeer * 16696495Sspeer * Registers accessed: 16706495Sspeer * 16716495Sspeer * Context: 16726495Sspeer * Any domain 16736495Sspeer */ 16743859Sml29623 /*ARGSUSED*/ 16753859Sml29623 void 16763859Sml29623 nxge_check_tx_hang(p_nxge_t nxgep) 16773859Sml29623 { 16783859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 16793859Sml29623 16806713Sspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 16816713Sspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 16826713Sspeer goto nxge_check_tx_hang_exit; 16836713Sspeer } 16846713Sspeer 16853859Sml29623 /* 16863859Sml29623 * Needs inputs from hardware for regs: 16873859Sml29623 * head index had not moved since last timeout. 16883859Sml29623 * packets not transmitted or stuffed registers. 16893859Sml29623 */ 16903859Sml29623 if (nxge_txdma_hung(nxgep)) { 16913859Sml29623 nxge_fixup_hung_txdma_rings(nxgep); 16923859Sml29623 } 16936713Sspeer 16946713Sspeer nxge_check_tx_hang_exit: 16953859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 16963859Sml29623 } 16973859Sml29623 16986495Sspeer /* 16996495Sspeer * nxge_txdma_hung 17006495Sspeer * 17016495Sspeer * Reset a TDC. 17026495Sspeer * 17036495Sspeer * Arguments: 17046495Sspeer * nxgep 17056495Sspeer * channel The channel to reset. 17066495Sspeer * reg_data The current TX_CS. 17076495Sspeer * 17086495Sspeer * Notes: 17096495Sspeer * Called by nxge_check_tx_hang() 17106495Sspeer * 17116495Sspeer * NPI/NXGE function calls: 17126495Sspeer * nxge_txdma_channel_hung() 17136495Sspeer * 17146495Sspeer * Registers accessed: 17156495Sspeer * 17166495Sspeer * Context: 17176495Sspeer * Any domain 17186495Sspeer */ 17193859Sml29623 int 17203859Sml29623 nxge_txdma_hung(p_nxge_t nxgep) 17213859Sml29623 { 17227812SMichael.Speer@Sun.COM nxge_grp_set_t *set = &nxgep->tx_set; 17237812SMichael.Speer@Sun.COM int tdc; 17247812SMichael.Speer@Sun.COM boolean_t shared; 17253859Sml29623 17263859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 17276495Sspeer 17286495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 17293859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 17306495Sspeer "<== nxge_txdma_hung: NULL ring pointer(s)")); 17313859Sml29623 return (B_FALSE); 17323859Sml29623 } 17333859Sml29623 17346495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 17357812SMichael.Speer@Sun.COM /* 17367812SMichael.Speer@Sun.COM * Grab the shared state of the TDC. 17377812SMichael.Speer@Sun.COM */ 17387812SMichael.Speer@Sun.COM if (isLDOMservice(nxgep)) { 17397812SMichael.Speer@Sun.COM nxge_hio_data_t *nhd = 17407812SMichael.Speer@Sun.COM (nxge_hio_data_t *)nxgep->nxge_hw_p->hio; 17417812SMichael.Speer@Sun.COM 17427812SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock); 17437812SMichael.Speer@Sun.COM shared = nxgep->tdc_is_shared[tdc]; 17447812SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock); 17457812SMichael.Speer@Sun.COM } else { 17467812SMichael.Speer@Sun.COM shared = B_FALSE; 17477812SMichael.Speer@Sun.COM } 17487812SMichael.Speer@Sun.COM 17497812SMichael.Speer@Sun.COM /* 17507812SMichael.Speer@Sun.COM * Now, process continue to process. 17517812SMichael.Speer@Sun.COM */ 17527812SMichael.Speer@Sun.COM if (((1 << tdc) & set->owned.map) && !shared) { 17536495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 17546495Sspeer if (ring) { 17556495Sspeer if (nxge_txdma_channel_hung(nxgep, ring, tdc)) { 17566495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 17576495Sspeer "==> nxge_txdma_hung: TDC %d hung", 17586495Sspeer tdc)); 17596495Sspeer return (B_TRUE); 17606495Sspeer } 17616495Sspeer } 17623859Sml29623 } 17633859Sml29623 } 17643859Sml29623 17653859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 17663859Sml29623 17673859Sml29623 return (B_FALSE); 17683859Sml29623 } 17693859Sml29623 17706495Sspeer /* 17716495Sspeer * nxge_txdma_channel_hung 17726495Sspeer * 17736495Sspeer * Reset a TDC. 17746495Sspeer * 17756495Sspeer * Arguments: 17766495Sspeer * nxgep 17776495Sspeer * ring <channel>'s ring. 17786495Sspeer * channel The channel to reset. 17796495Sspeer * 17806495Sspeer * Notes: 17816495Sspeer * Called by nxge_txdma.c:nxge_txdma_hung() 17826495Sspeer * 17836495Sspeer * NPI/NXGE function calls: 17846495Sspeer * npi_txdma_ring_head_get() 17856495Sspeer * 17866495Sspeer * Registers accessed: 17876495Sspeer * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 17886495Sspeer * 17896495Sspeer * Context: 17906495Sspeer * Any domain 17916495Sspeer */ 17923859Sml29623 int 17933859Sml29623 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 17943859Sml29623 { 17953859Sml29623 uint16_t head_index, tail_index; 17963859Sml29623 boolean_t head_wrap, tail_wrap; 17973859Sml29623 npi_handle_t handle; 17983859Sml29623 tx_ring_hdl_t tx_head; 17993859Sml29623 uint_t tx_rd_index; 18003859Sml29623 18013859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 18023859Sml29623 18033859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 18043859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18056929Smisaki "==> nxge_txdma_channel_hung: channel %d", channel)); 18063859Sml29623 MUTEX_ENTER(&tx_ring_p->lock); 18073859Sml29623 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 18083859Sml29623 18093859Sml29623 tail_index = tx_ring_p->wr_index; 18103859Sml29623 tail_wrap = tx_ring_p->wr_index_wrap; 18113859Sml29623 tx_rd_index = tx_ring_p->rd_index; 18123859Sml29623 MUTEX_EXIT(&tx_ring_p->lock); 1813*7906SMichael.Speer@Sun.COM nxge_txdma_freemsg_task(tx_ring_p); 18143859Sml29623 18153859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18166929Smisaki "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 18176929Smisaki "tail_index %d tail_wrap %d ", 18186929Smisaki channel, tx_rd_index, tail_index, tail_wrap)); 18193859Sml29623 /* 18203859Sml29623 * Read the hardware maintained transmit head 18213859Sml29623 * and wrap around bit. 18223859Sml29623 */ 18233859Sml29623 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 18243859Sml29623 head_index = tx_head.bits.ldw.head; 18253859Sml29623 head_wrap = tx_head.bits.ldw.wrap; 18263859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18276929Smisaki "==> nxge_txdma_channel_hung: " 18286929Smisaki "tx_rd_index %d tail %d tail_wrap %d " 18296929Smisaki "head %d wrap %d", 18306929Smisaki tx_rd_index, tail_index, tail_wrap, 18316929Smisaki head_index, head_wrap)); 18323859Sml29623 18333859Sml29623 if (TXDMA_RING_EMPTY(head_index, head_wrap, 18346929Smisaki tail_index, tail_wrap) && 18356929Smisaki (head_index == tx_rd_index)) { 18363859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18376929Smisaki "==> nxge_txdma_channel_hung: EMPTY")); 18383859Sml29623 return (B_FALSE); 18393859Sml29623 } 18403859Sml29623 18413859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18426929Smisaki "==> nxge_txdma_channel_hung: Checking if ring full")); 18433859Sml29623 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 18446929Smisaki tail_wrap)) { 18453859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18466929Smisaki "==> nxge_txdma_channel_hung: full")); 18473859Sml29623 return (B_TRUE); 18483859Sml29623 } 18493859Sml29623 18503859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 18513859Sml29623 18523859Sml29623 return (B_FALSE); 18533859Sml29623 } 18543859Sml29623 18556495Sspeer /* 18566495Sspeer * nxge_fixup_hung_txdma_rings 18576495Sspeer * 18586495Sspeer * Disable a TDC. 18596495Sspeer * 18606495Sspeer * Arguments: 18616495Sspeer * nxgep 18626495Sspeer * channel The channel to reset. 18636495Sspeer * reg_data The current TX_CS. 18646495Sspeer * 18656495Sspeer * Notes: 18666495Sspeer * Called by nxge_check_tx_hang() 18676495Sspeer * 18686495Sspeer * NPI/NXGE function calls: 18696495Sspeer * npi_txdma_ring_head_get() 18706495Sspeer * 18716495Sspeer * Registers accessed: 18726495Sspeer * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 18736495Sspeer * 18746495Sspeer * Context: 18756495Sspeer * Any domain 18766495Sspeer */ 18773859Sml29623 /*ARGSUSED*/ 18783859Sml29623 void 18793859Sml29623 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 18803859Sml29623 { 18816495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 18826495Sspeer int tdc; 18833859Sml29623 18843859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 18856495Sspeer 18866495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 18873859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 18886495Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 18893859Sml29623 return; 18903859Sml29623 } 18913859Sml29623 18926495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 18936495Sspeer if ((1 << tdc) & set->owned.map) { 18946495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 18956495Sspeer if (ring) { 18966495Sspeer nxge_txdma_fixup_hung_channel(nxgep, ring, tdc); 18976495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 18986495Sspeer "==> nxge_fixup_hung_txdma_rings: TDC %d", 18996495Sspeer tdc)); 19006495Sspeer } 19016495Sspeer } 19023859Sml29623 } 19033859Sml29623 19043859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 19053859Sml29623 } 19063859Sml29623 19076495Sspeer /* 19086495Sspeer * nxge_txdma_fixup_hung_channel 19096495Sspeer * 19106495Sspeer * 'Fix' a hung TDC. 19116495Sspeer * 19126495Sspeer * Arguments: 19136495Sspeer * nxgep 19146495Sspeer * channel The channel to fix. 19156495Sspeer * 19166495Sspeer * Notes: 19176495Sspeer * Called by nxge_fixup_hung_txdma_rings() 19186495Sspeer * 19196495Sspeer * 1. Reclaim the TDC. 19206495Sspeer * 2. Disable the TDC. 19216495Sspeer * 19226495Sspeer * NPI/NXGE function calls: 19236495Sspeer * nxge_txdma_reclaim() 19246495Sspeer * npi_txdma_channel_disable(TX_CS) 19256495Sspeer * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 19266495Sspeer * 19276495Sspeer * Registers accessed: 19286495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status 19296495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 19306495Sspeer * 19316495Sspeer * Context: 19326495Sspeer * Any domain 19336495Sspeer */ 19343859Sml29623 /*ARGSUSED*/ 19353859Sml29623 void 19363859Sml29623 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 19373859Sml29623 { 19383859Sml29623 p_tx_ring_t ring_p; 19393859Sml29623 19403859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 19413859Sml29623 ring_p = nxge_txdma_get_ring(nxgep, channel); 19423859Sml29623 if (ring_p == NULL) { 19433859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19446929Smisaki "<== nxge_txdma_fix_hung_channel")); 19453859Sml29623 return; 19463859Sml29623 } 19473859Sml29623 19483859Sml29623 if (ring_p->tdc != channel) { 19493859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19506929Smisaki "<== nxge_txdma_fix_hung_channel: channel not matched " 19516929Smisaki "ring tdc %d passed channel", 19526929Smisaki ring_p->tdc, channel)); 19533859Sml29623 return; 19543859Sml29623 } 19553859Sml29623 19563859Sml29623 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 19573859Sml29623 19583859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 19593859Sml29623 } 19603859Sml29623 19613859Sml29623 /*ARGSUSED*/ 19623859Sml29623 void 19633859Sml29623 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 19643859Sml29623 uint16_t channel) 19653859Sml29623 { 19663859Sml29623 npi_handle_t handle; 19673859Sml29623 tdmc_intr_dbg_t intr_dbg; 19683859Sml29623 int status = NXGE_OK; 19693859Sml29623 19703859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 19713859Sml29623 19723859Sml29623 if (ring_p == NULL) { 19733859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19746929Smisaki "<== nxge_txdma_fixup_channel: NULL ring pointer")); 19753859Sml29623 return; 19763859Sml29623 } 19773859Sml29623 19783859Sml29623 if (ring_p->tdc != channel) { 19793859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 19806929Smisaki "<== nxge_txdma_fixup_hung_channel: channel " 19816929Smisaki "not matched " 19826929Smisaki "ring tdc %d passed channel", 19836929Smisaki ring_p->tdc, channel)); 19843859Sml29623 return; 19853859Sml29623 } 19863859Sml29623 19873859Sml29623 /* Reclaim descriptors */ 19883859Sml29623 MUTEX_ENTER(&ring_p->lock); 19893859Sml29623 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 19903859Sml29623 MUTEX_EXIT(&ring_p->lock); 19913859Sml29623 1992*7906SMichael.Speer@Sun.COM nxge_txdma_freemsg_task(ring_p); 1993*7906SMichael.Speer@Sun.COM 19943859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 19953859Sml29623 /* 19963859Sml29623 * Stop the dma channel waits for the stop done. 19973859Sml29623 * If the stop done bit is not set, then force 19983859Sml29623 * an error. 19993859Sml29623 */ 20003859Sml29623 status = npi_txdma_channel_disable(handle, channel); 20013859Sml29623 if (!(status & NPI_TXDMA_STOP_FAILED)) { 20023859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 20036929Smisaki "<== nxge_txdma_fixup_hung_channel: stopped OK " 20046929Smisaki "ring tdc %d passed channel %d", 20056929Smisaki ring_p->tdc, channel)); 20063859Sml29623 return; 20073859Sml29623 } 20083859Sml29623 20093859Sml29623 /* Inject any error */ 20103859Sml29623 intr_dbg.value = 0; 20113859Sml29623 intr_dbg.bits.ldw.nack_pref = 1; 20123859Sml29623 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 20133859Sml29623 20143859Sml29623 /* Stop done bit will be set as a result of error injection */ 20153859Sml29623 status = npi_txdma_channel_disable(handle, channel); 20163859Sml29623 if (!(status & NPI_TXDMA_STOP_FAILED)) { 20173859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 20186929Smisaki "<== nxge_txdma_fixup_hung_channel: stopped again" 20196929Smisaki "ring tdc %d passed channel", 20206929Smisaki ring_p->tdc, channel)); 20213859Sml29623 return; 20223859Sml29623 } 20233859Sml29623 20243859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 20256929Smisaki "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 20266929Smisaki "ring tdc %d passed channel", 20276929Smisaki ring_p->tdc, channel)); 20283859Sml29623 20293859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 20303859Sml29623 } 20313859Sml29623 20323859Sml29623 /*ARGSUSED*/ 20333859Sml29623 void 20343859Sml29623 nxge_reclaim_rings(p_nxge_t nxgep) 20353859Sml29623 { 20366495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 20376495Sspeer int tdc; 20386495Sspeer 20396495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings")); 20406495Sspeer 20416495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 20423859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 20436495Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 20443859Sml29623 return; 20453859Sml29623 } 20463859Sml29623 20476495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 20486495Sspeer if ((1 << tdc) & set->owned.map) { 20496495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 20506495Sspeer if (ring) { 20516495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 20526495Sspeer "==> nxge_reclaim_rings: TDC %d", tdc)); 20536495Sspeer MUTEX_ENTER(&ring->lock); 20546495Sspeer (void) nxge_txdma_reclaim(nxgep, ring, tdc); 20556495Sspeer MUTEX_EXIT(&ring->lock); 2056*7906SMichael.Speer@Sun.COM 2057*7906SMichael.Speer@Sun.COM nxge_txdma_freemsg_task(ring); 20586495Sspeer } 20596495Sspeer } 20603859Sml29623 } 20613859Sml29623 20623859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 20633859Sml29623 } 20643859Sml29623 20653859Sml29623 void 20663859Sml29623 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 20673859Sml29623 { 20686495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 20696495Sspeer npi_handle_t handle; 20706495Sspeer int tdc; 20716495Sspeer 20726495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels")); 20733859Sml29623 20743859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 20756495Sspeer 20766495Sspeer if (!isLDOMguest(nxgep)) { 20776495Sspeer (void) npi_txdma_dump_fzc_regs(handle); 20786495Sspeer 20796495Sspeer /* Dump TXC registers. */ 20806495Sspeer (void) npi_txc_dump_fzc_regs(handle); 20816495Sspeer (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 20823859Sml29623 } 20833859Sml29623 20846495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 20853859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 20866495Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 20873859Sml29623 return; 20883859Sml29623 } 20893859Sml29623 20906495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 20916495Sspeer if ((1 << tdc) & set->owned.map) { 20926495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 20936495Sspeer if (ring) { 20946495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 20956495Sspeer "==> nxge_txdma_regs_dump_channels: " 20966495Sspeer "TDC %d", tdc)); 20976495Sspeer (void) npi_txdma_dump_tdc_regs(handle, tdc); 20986495Sspeer 20996495Sspeer /* Dump TXC registers, if able to. */ 21006495Sspeer if (!isLDOMguest(nxgep)) { 21016495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 21026495Sspeer "==> nxge_txdma_regs_dump_channels:" 21036495Sspeer " FZC TDC %d", tdc)); 21046495Sspeer (void) npi_txc_dump_tdc_fzc_regs 21056495Sspeer (handle, tdc); 21066495Sspeer } 21076495Sspeer nxge_txdma_regs_dump(nxgep, tdc); 21086495Sspeer } 21096495Sspeer } 21103859Sml29623 } 21113859Sml29623 21123859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 21133859Sml29623 } 21143859Sml29623 21153859Sml29623 void 21163859Sml29623 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 21173859Sml29623 { 21183859Sml29623 npi_handle_t handle; 21193859Sml29623 tx_ring_hdl_t hdl; 21203859Sml29623 tx_ring_kick_t kick; 21213859Sml29623 tx_cs_t cs; 21223859Sml29623 txc_control_t control; 21233859Sml29623 uint32_t bitmap = 0; 21243859Sml29623 uint32_t burst = 0; 21253859Sml29623 uint32_t bytes = 0; 21263859Sml29623 dma_log_page_t cfg; 21273859Sml29623 21283859Sml29623 printf("\n\tfunc # %d tdc %d ", 21296929Smisaki nxgep->function_num, channel); 21303859Sml29623 cfg.page_num = 0; 21313859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 21323859Sml29623 (void) npi_txdma_log_page_get(handle, channel, &cfg); 21333859Sml29623 printf("\n\tlog page func %d valid page 0 %d", 21346929Smisaki cfg.func_num, cfg.valid); 21353859Sml29623 cfg.page_num = 1; 21363859Sml29623 (void) npi_txdma_log_page_get(handle, channel, &cfg); 21373859Sml29623 printf("\n\tlog page func %d valid page 1 %d", 21386929Smisaki cfg.func_num, cfg.valid); 21393859Sml29623 21403859Sml29623 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 21413859Sml29623 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 21423859Sml29623 printf("\n\thead value is 0x%0llx", 21436929Smisaki (long long)hdl.value); 21443859Sml29623 printf("\n\thead index %d", hdl.bits.ldw.head); 21453859Sml29623 printf("\n\tkick value is 0x%0llx", 21466929Smisaki (long long)kick.value); 21473859Sml29623 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 21483859Sml29623 21493859Sml29623 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 21503859Sml29623 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 21513859Sml29623 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 21523859Sml29623 21533859Sml29623 (void) npi_txc_control(handle, OP_GET, &control); 21543859Sml29623 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 21553859Sml29623 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 21563859Sml29623 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 21573859Sml29623 21583859Sml29623 printf("\n\tTXC port control 0x%0llx", 21596929Smisaki (long long)control.value); 21603859Sml29623 printf("\n\tTXC port bitmap 0x%x", bitmap); 21613859Sml29623 printf("\n\tTXC max burst %d", burst); 21623859Sml29623 printf("\n\tTXC bytes xmt %d\n", bytes); 21633859Sml29623 21643859Sml29623 { 21653859Sml29623 ipp_status_t status; 21663859Sml29623 21673859Sml29623 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 21685125Sjoycey #if defined(__i386) 21695125Sjoycey printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value); 21705125Sjoycey #else 21713859Sml29623 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 21725125Sjoycey #endif 21733859Sml29623 } 21743859Sml29623 } 21753859Sml29623 21763859Sml29623 /* 21776495Sspeer * nxge_tdc_hvio_setup 21786495Sspeer * 21796495Sspeer * I'm not exactly sure what this code does. 21806495Sspeer * 21816495Sspeer * Arguments: 21826495Sspeer * nxgep 21836495Sspeer * channel The channel to map. 21846495Sspeer * 21856495Sspeer * Notes: 21866495Sspeer * 21876495Sspeer * NPI/NXGE function calls: 21886495Sspeer * na 21896495Sspeer * 21906495Sspeer * Context: 21916495Sspeer * Service domain? 21923859Sml29623 */ 21936495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 21946495Sspeer static void 21956495Sspeer nxge_tdc_hvio_setup( 21966495Sspeer nxge_t *nxgep, int channel) 21973859Sml29623 { 21986495Sspeer nxge_dma_common_t *data; 21996495Sspeer nxge_dma_common_t *control; 22006495Sspeer tx_ring_t *ring; 22016495Sspeer 22026495Sspeer ring = nxgep->tx_rings->rings[channel]; 22036495Sspeer data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 22046495Sspeer 22056495Sspeer ring->hv_set = B_FALSE; 22066495Sspeer 22076495Sspeer ring->hv_tx_buf_base_ioaddr_pp = 22086495Sspeer (uint64_t)data->orig_ioaddr_pp; 22096495Sspeer ring->hv_tx_buf_ioaddr_size = 22106495Sspeer (uint64_t)data->orig_alength; 22116495Sspeer 22126495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 22136929Smisaki "hv data buf base io $%p size 0x%llx (%d) buf base io $%p " 22146929Smisaki "orig vatopa base io $%p orig_len 0x%llx (%d)", 22156929Smisaki ring->hv_tx_buf_base_ioaddr_pp, 22166929Smisaki ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size, 22176929Smisaki data->ioaddr_pp, data->orig_vatopa, 22186929Smisaki data->orig_alength, data->orig_alength)); 22196495Sspeer 22206495Sspeer control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 22216495Sspeer 22226495Sspeer ring->hv_tx_cntl_base_ioaddr_pp = 22236495Sspeer (uint64_t)control->orig_ioaddr_pp; 22246495Sspeer ring->hv_tx_cntl_ioaddr_size = 22256495Sspeer (uint64_t)control->orig_alength; 22266495Sspeer 22276495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 22286929Smisaki "hv cntl base io $%p orig ioaddr_pp ($%p) " 22296929Smisaki "orig vatopa ($%p) size 0x%llx (%d 0x%x)", 22306929Smisaki ring->hv_tx_cntl_base_ioaddr_pp, 22316929Smisaki control->orig_ioaddr_pp, control->orig_vatopa, 22326929Smisaki ring->hv_tx_cntl_ioaddr_size, 22336929Smisaki control->orig_alength, control->orig_alength)); 22346495Sspeer } 22353859Sml29623 #endif 22363859Sml29623 22376495Sspeer static nxge_status_t 22386495Sspeer nxge_map_txdma(p_nxge_t nxgep, int channel) 22396495Sspeer { 22406495Sspeer nxge_dma_common_t **pData; 22416495Sspeer nxge_dma_common_t **pControl; 22426495Sspeer tx_ring_t **pRing, *ring; 22436495Sspeer tx_mbox_t **mailbox; 22446495Sspeer uint32_t num_chunks; 22456495Sspeer 22466495Sspeer nxge_status_t status = NXGE_OK; 22476495Sspeer 22486495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 22496495Sspeer 22506495Sspeer if (!nxgep->tx_cntl_pool_p->buf_allocated) { 22516495Sspeer if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) { 22526495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 22536495Sspeer "<== nxge_map_txdma: buf not allocated")); 22546495Sspeer return (NXGE_ERROR); 22556495Sspeer } 22563859Sml29623 } 22573859Sml29623 22586495Sspeer if (nxge_alloc_txb(nxgep, channel) != NXGE_OK) 22596495Sspeer return (NXGE_ERROR); 22606495Sspeer 22616495Sspeer num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 22626495Sspeer pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 22636495Sspeer pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 22646495Sspeer pRing = &nxgep->tx_rings->rings[channel]; 22656495Sspeer mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 22666495Sspeer 22676495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 22686929Smisaki "tx_rings $%p tx_desc_rings $%p", 22696929Smisaki nxgep->tx_rings, nxgep->tx_rings->rings)); 22703859Sml29623 22713859Sml29623 /* 22726495Sspeer * Map descriptors from the buffer pools for <channel>. 22736495Sspeer */ 22746495Sspeer 22756495Sspeer /* 22766495Sspeer * Set up and prepare buffer blocks, descriptors 22776495Sspeer * and mailbox. 22783859Sml29623 */ 22796495Sspeer status = nxge_map_txdma_channel(nxgep, channel, 22806495Sspeer pData, pRing, num_chunks, pControl, mailbox); 22816495Sspeer if (status != NXGE_OK) { 22826495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 22836929Smisaki "==> nxge_map_txdma(%d): nxge_map_txdma_channel() " 22846929Smisaki "returned 0x%x", 22856929Smisaki nxgep, channel, status)); 22866495Sspeer return (status); 22876495Sspeer } 22886495Sspeer 22896495Sspeer ring = *pRing; 22906495Sspeer 22916495Sspeer ring->index = (uint16_t)channel; 22926495Sspeer ring->tdc_stats = &nxgep->statsp->tdc_stats[channel]; 22936495Sspeer 22946495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 22956495Sspeer if (isLDOMguest(nxgep)) { 22966495Sspeer (void) nxge_tdc_lp_conf(nxgep, channel); 22976495Sspeer } else { 22986495Sspeer nxge_tdc_hvio_setup(nxgep, channel); 22996495Sspeer } 23003859Sml29623 #endif 23016495Sspeer 23026495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 23036495Sspeer "(status 0x%x channel %d)", status, channel)); 23043859Sml29623 23053859Sml29623 return (status); 23063859Sml29623 } 23073859Sml29623 23083859Sml29623 static nxge_status_t 23093859Sml29623 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 23103859Sml29623 p_nxge_dma_common_t *dma_buf_p, 23113859Sml29623 p_tx_ring_t *tx_desc_p, 23123859Sml29623 uint32_t num_chunks, 23133859Sml29623 p_nxge_dma_common_t *dma_cntl_p, 23143859Sml29623 p_tx_mbox_t *tx_mbox_p) 23153859Sml29623 { 23163859Sml29623 int status = NXGE_OK; 23173859Sml29623 23183859Sml29623 /* 23193859Sml29623 * Set up and prepare buffer blocks, descriptors 23203859Sml29623 * and mailbox. 23213859Sml29623 */ 23226495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 23236929Smisaki "==> nxge_map_txdma_channel (channel %d)", channel)); 23243859Sml29623 /* 23253859Sml29623 * Transmit buffer blocks 23263859Sml29623 */ 23273859Sml29623 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 23286929Smisaki dma_buf_p, tx_desc_p, num_chunks); 23293859Sml29623 if (status != NXGE_OK) { 23303859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 23316929Smisaki "==> nxge_map_txdma_channel (channel %d): " 23326929Smisaki "map buffer failed 0x%x", channel, status)); 23333859Sml29623 goto nxge_map_txdma_channel_exit; 23343859Sml29623 } 23353859Sml29623 23363859Sml29623 /* 23373859Sml29623 * Transmit block ring, and mailbox. 23383859Sml29623 */ 23393859Sml29623 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 23406929Smisaki tx_mbox_p); 23413859Sml29623 23423859Sml29623 goto nxge_map_txdma_channel_exit; 23433859Sml29623 23443859Sml29623 nxge_map_txdma_channel_fail1: 23456495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 23466929Smisaki "==> nxge_map_txdma_channel: unmap buf" 23476929Smisaki "(status 0x%x channel %d)", 23486929Smisaki status, channel)); 23493859Sml29623 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 23503859Sml29623 23513859Sml29623 nxge_map_txdma_channel_exit: 23526495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, 23536929Smisaki "<== nxge_map_txdma_channel: " 23546929Smisaki "(status 0x%x channel %d)", 23556929Smisaki status, channel)); 23563859Sml29623 23573859Sml29623 return (status); 23583859Sml29623 } 23593859Sml29623 23603859Sml29623 /*ARGSUSED*/ 23613859Sml29623 static void 23626495Sspeer nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel) 23633859Sml29623 { 23646495Sspeer tx_ring_t *ring; 23656495Sspeer tx_mbox_t *mailbox; 23666495Sspeer 23673859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 23686929Smisaki "==> nxge_unmap_txdma_channel (channel %d)", channel)); 23693859Sml29623 /* 23703859Sml29623 * unmap tx block ring, and mailbox. 23713859Sml29623 */ 23726495Sspeer ring = nxgep->tx_rings->rings[channel]; 23736495Sspeer mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 23746495Sspeer 23756495Sspeer (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox); 23763859Sml29623 23773859Sml29623 /* unmap buffer blocks */ 23786495Sspeer (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring); 23796495Sspeer 23806495Sspeer nxge_free_txb(nxgep, channel); 23813859Sml29623 23823859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 23833859Sml29623 } 23843859Sml29623 23856495Sspeer /* 23866495Sspeer * nxge_map_txdma_channel_cfg_ring 23876495Sspeer * 23886495Sspeer * Map a TDC into our kernel space. 23896495Sspeer * This function allocates all of the per-channel data structures. 23906495Sspeer * 23916495Sspeer * Arguments: 23926495Sspeer * nxgep 23936495Sspeer * dma_channel The channel to map. 23946495Sspeer * dma_cntl_p 23956495Sspeer * tx_ring_p dma_channel's transmit ring 23966495Sspeer * tx_mbox_p dma_channel's mailbox 23976495Sspeer * 23986495Sspeer * Notes: 23996495Sspeer * 24006495Sspeer * NPI/NXGE function calls: 24016495Sspeer * nxge_setup_dma_common() 24026495Sspeer * 24036495Sspeer * Registers accessed: 24046495Sspeer * none. 24056495Sspeer * 24066495Sspeer * Context: 24076495Sspeer * Any domain 24086495Sspeer */ 24093859Sml29623 /*ARGSUSED*/ 24103859Sml29623 static void 24113859Sml29623 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 24123859Sml29623 p_nxge_dma_common_t *dma_cntl_p, 24133859Sml29623 p_tx_ring_t tx_ring_p, 24143859Sml29623 p_tx_mbox_t *tx_mbox_p) 24153859Sml29623 { 24163859Sml29623 p_tx_mbox_t mboxp; 24173859Sml29623 p_nxge_dma_common_t cntl_dmap; 24183859Sml29623 p_nxge_dma_common_t dmap; 24193859Sml29623 p_tx_rng_cfig_t tx_ring_cfig_p; 24203859Sml29623 p_tx_ring_kick_t tx_ring_kick_p; 24213859Sml29623 p_tx_cs_t tx_cs_p; 24223859Sml29623 p_tx_dma_ent_msk_t tx_evmask_p; 24233859Sml29623 p_txdma_mbh_t mboxh_p; 24243859Sml29623 p_txdma_mbl_t mboxl_p; 24253859Sml29623 uint64_t tx_desc_len; 24263859Sml29623 24273859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24286929Smisaki "==> nxge_map_txdma_channel_cfg_ring")); 24293859Sml29623 24303859Sml29623 cntl_dmap = *dma_cntl_p; 24313859Sml29623 24323859Sml29623 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 24333859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 24346929Smisaki sizeof (tx_desc_t)); 24353859Sml29623 /* 24363859Sml29623 * Zero out transmit ring descriptors. 24373859Sml29623 */ 24383859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 24393859Sml29623 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 24403859Sml29623 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 24413859Sml29623 tx_cs_p = &(tx_ring_p->tx_cs); 24423859Sml29623 tx_evmask_p = &(tx_ring_p->tx_evmask); 24433859Sml29623 tx_ring_cfig_p->value = 0; 24443859Sml29623 tx_ring_kick_p->value = 0; 24453859Sml29623 tx_cs_p->value = 0; 24463859Sml29623 tx_evmask_p->value = 0; 24473859Sml29623 24483859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24496929Smisaki "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 24506929Smisaki dma_channel, 24516929Smisaki dmap->dma_cookie.dmac_laddress)); 24523859Sml29623 24533859Sml29623 tx_ring_cfig_p->value = 0; 24543859Sml29623 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 24553859Sml29623 tx_ring_cfig_p->value = 24566929Smisaki (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 24576929Smisaki (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 24583859Sml29623 24593859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24606929Smisaki "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 24616929Smisaki dma_channel, 24626929Smisaki tx_ring_cfig_p->value)); 24633859Sml29623 24643859Sml29623 tx_cs_p->bits.ldw.rst = 1; 24653859Sml29623 24663859Sml29623 /* Map in mailbox */ 24673859Sml29623 mboxp = (p_tx_mbox_t) 24686929Smisaki KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 24693859Sml29623 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 24703859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 24713859Sml29623 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 24723859Sml29623 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 24733859Sml29623 mboxh_p->value = mboxl_p->value = 0; 24743859Sml29623 24753859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24766929Smisaki "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 24776929Smisaki dmap->dma_cookie.dmac_laddress)); 24783859Sml29623 24793859Sml29623 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 24806929Smisaki TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 24813859Sml29623 24823859Sml29623 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 24836929Smisaki TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 24843859Sml29623 24853859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24866929Smisaki "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 24876929Smisaki dmap->dma_cookie.dmac_laddress)); 24883859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 24896929Smisaki "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 24906929Smisaki "mbox $%p", 24916929Smisaki mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 24923859Sml29623 tx_ring_p->page_valid.value = 0; 24933859Sml29623 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 24943859Sml29623 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 24953859Sml29623 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 24963859Sml29623 tx_ring_p->page_hdl.value = 0; 24973859Sml29623 24983859Sml29623 tx_ring_p->page_valid.bits.ldw.page0 = 1; 24993859Sml29623 tx_ring_p->page_valid.bits.ldw.page1 = 1; 25003859Sml29623 25013859Sml29623 tx_ring_p->max_burst.value = 0; 25023859Sml29623 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 25033859Sml29623 25043859Sml29623 *tx_mbox_p = mboxp; 25053859Sml29623 25063859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25076929Smisaki "<== nxge_map_txdma_channel_cfg_ring")); 25083859Sml29623 } 25093859Sml29623 25103859Sml29623 /*ARGSUSED*/ 25113859Sml29623 static void 25123859Sml29623 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 25133859Sml29623 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 25143859Sml29623 { 25153859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25166929Smisaki "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 25176929Smisaki tx_ring_p->tdc)); 25183859Sml29623 25193859Sml29623 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 25203859Sml29623 25213859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25226929Smisaki "<== nxge_unmap_txdma_channel_cfg_ring")); 25233859Sml29623 } 25243859Sml29623 25256495Sspeer /* 25266495Sspeer * nxge_map_txdma_channel_buf_ring 25276495Sspeer * 25286495Sspeer * 25296495Sspeer * Arguments: 25306495Sspeer * nxgep 25316495Sspeer * channel The channel to map. 25326495Sspeer * dma_buf_p 25336495Sspeer * tx_desc_p channel's descriptor ring 25346495Sspeer * num_chunks 25356495Sspeer * 25366495Sspeer * Notes: 25376495Sspeer * 25386495Sspeer * NPI/NXGE function calls: 25396495Sspeer * nxge_setup_dma_common() 25406495Sspeer * 25416495Sspeer * Registers accessed: 25426495Sspeer * none. 25436495Sspeer * 25446495Sspeer * Context: 25456495Sspeer * Any domain 25466495Sspeer */ 25473859Sml29623 static nxge_status_t 25483859Sml29623 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 25493859Sml29623 p_nxge_dma_common_t *dma_buf_p, 25503859Sml29623 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 25513859Sml29623 { 25523859Sml29623 p_nxge_dma_common_t dma_bufp, tmp_bufp; 25533859Sml29623 p_nxge_dma_common_t dmap; 25543859Sml29623 nxge_os_dma_handle_t tx_buf_dma_handle; 25553859Sml29623 p_tx_ring_t tx_ring_p; 25563859Sml29623 p_tx_msg_t tx_msg_ring; 25573859Sml29623 nxge_status_t status = NXGE_OK; 25583859Sml29623 int ddi_status = DDI_SUCCESS; 25593859Sml29623 int i, j, index; 25603859Sml29623 uint32_t size, bsize; 25613859Sml29623 uint32_t nblocks, nmsgs; 25623859Sml29623 25633859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25646929Smisaki "==> nxge_map_txdma_channel_buf_ring")); 25653859Sml29623 25663859Sml29623 dma_bufp = tmp_bufp = *dma_buf_p; 25673859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25683859Sml29623 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 25693859Sml29623 "chunks bufp $%p", 25706929Smisaki channel, num_chunks, dma_bufp)); 25713859Sml29623 25723859Sml29623 nmsgs = 0; 25733859Sml29623 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 25743859Sml29623 nmsgs += tmp_bufp->nblocks; 25753859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25766929Smisaki "==> nxge_map_txdma_channel_buf_ring: channel %d " 25776929Smisaki "bufp $%p nblocks %d nmsgs %d", 25786929Smisaki channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 25793859Sml29623 } 25803859Sml29623 if (!nmsgs) { 25813859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 25826929Smisaki "<== nxge_map_txdma_channel_buf_ring: channel %d " 25836929Smisaki "no msg blocks", 25846929Smisaki channel)); 25853859Sml29623 status = NXGE_ERROR; 25863859Sml29623 goto nxge_map_txdma_channel_buf_ring_exit; 25873859Sml29623 } 25883859Sml29623 25893859Sml29623 tx_ring_p = (p_tx_ring_t) 25906929Smisaki KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 25913859Sml29623 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 25926929Smisaki (void *)nxgep->interrupt_cookie); 2593*7906SMichael.Speer@Sun.COM MUTEX_INIT(&tx_ring_p->freelock, NULL, MUTEX_DRIVER, 2594*7906SMichael.Speer@Sun.COM (void *)nxgep->interrupt_cookie); 25953952Sml29623 25966713Sspeer (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE); 25976886Sspeer tx_ring_p->tx_ring_busy = B_FALSE; 25983952Sml29623 tx_ring_p->nxgep = nxgep; 25993952Sml29623 tx_ring_p->serial = nxge_serialize_create(nmsgs, 26006929Smisaki nxge_serial_tx, tx_ring_p); 26013859Sml29623 /* 26023859Sml29623 * Allocate transmit message rings and handles for packets 26033859Sml29623 * not to be copied to premapped buffers. 26043859Sml29623 */ 26053859Sml29623 size = nmsgs * sizeof (tx_msg_t); 26063859Sml29623 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 26073859Sml29623 for (i = 0; i < nmsgs; i++) { 26083859Sml29623 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 26096929Smisaki DDI_DMA_DONTWAIT, 0, 26106929Smisaki &tx_msg_ring[i].dma_handle); 26113859Sml29623 if (ddi_status != DDI_SUCCESS) { 26123859Sml29623 status |= NXGE_DDI_FAILED; 26133859Sml29623 break; 26143859Sml29623 } 26153859Sml29623 } 26163859Sml29623 if (i < nmsgs) { 26174185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26184185Sspeer "Allocate handles failed.")); 26193859Sml29623 goto nxge_map_txdma_channel_buf_ring_fail1; 26203859Sml29623 } 26213859Sml29623 26223859Sml29623 tx_ring_p->tdc = channel; 26233859Sml29623 tx_ring_p->tx_msg_ring = tx_msg_ring; 26243859Sml29623 tx_ring_p->tx_ring_size = nmsgs; 26253859Sml29623 tx_ring_p->num_chunks = num_chunks; 26263859Sml29623 if (!nxge_tx_intr_thres) { 26273859Sml29623 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 26283859Sml29623 } 26293859Sml29623 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 26303859Sml29623 tx_ring_p->rd_index = 0; 26313859Sml29623 tx_ring_p->wr_index = 0; 26323859Sml29623 tx_ring_p->ring_head.value = 0; 26333859Sml29623 tx_ring_p->ring_kick_tail.value = 0; 26343859Sml29623 tx_ring_p->descs_pending = 0; 26353859Sml29623 26363859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26376929Smisaki "==> nxge_map_txdma_channel_buf_ring: channel %d " 26386929Smisaki "actual tx desc max %d nmsgs %d " 26396929Smisaki "(config nxge_tx_ring_size %d)", 26406929Smisaki channel, tx_ring_p->tx_ring_size, nmsgs, 26416929Smisaki nxge_tx_ring_size)); 26423859Sml29623 26433859Sml29623 /* 26443859Sml29623 * Map in buffers from the buffer pool. 26453859Sml29623 */ 26463859Sml29623 index = 0; 26473859Sml29623 bsize = dma_bufp->block_size; 26483859Sml29623 26493859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 26506929Smisaki "dma_bufp $%p tx_rng_p $%p " 26516929Smisaki "tx_msg_rng_p $%p bsize %d", 26526929Smisaki dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 26533859Sml29623 26543859Sml29623 tx_buf_dma_handle = dma_bufp->dma_handle; 26553859Sml29623 for (i = 0; i < num_chunks; i++, dma_bufp++) { 26563859Sml29623 bsize = dma_bufp->block_size; 26573859Sml29623 nblocks = dma_bufp->nblocks; 26583859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26596929Smisaki "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 26606929Smisaki "size %d dma_bufp $%p", 26616929Smisaki i, sizeof (nxge_dma_common_t), dma_bufp)); 26623859Sml29623 26633859Sml29623 for (j = 0; j < nblocks; j++) { 26643859Sml29623 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 2665*7906SMichael.Speer@Sun.COM tx_msg_ring[index].nextp = NULL; 26663859Sml29623 dmap = &tx_msg_ring[index++].buf_dma; 26673859Sml29623 #ifdef TX_MEM_DEBUG 26683859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 26696929Smisaki "==> nxge_map_txdma_channel_buf_ring: j %d" 26706929Smisaki "dmap $%p", i, dmap)); 26713859Sml29623 #endif 26723859Sml29623 nxge_setup_dma_common(dmap, dma_bufp, 1, 26736929Smisaki bsize); 26743859Sml29623 } 26753859Sml29623 } 26763859Sml29623 26773859Sml29623 if (i < num_chunks) { 26784185Sspeer status = NXGE_ERROR; 26793859Sml29623 goto nxge_map_txdma_channel_buf_ring_fail1; 26803859Sml29623 } 26813859Sml29623 26823859Sml29623 *tx_desc_p = tx_ring_p; 26833859Sml29623 26843859Sml29623 goto nxge_map_txdma_channel_buf_ring_exit; 26853859Sml29623 26863859Sml29623 nxge_map_txdma_channel_buf_ring_fail1: 26873952Sml29623 if (tx_ring_p->serial) { 26883952Sml29623 nxge_serialize_destroy(tx_ring_p->serial); 26893952Sml29623 tx_ring_p->serial = NULL; 26903952Sml29623 } 26913952Sml29623 26923859Sml29623 index--; 26933859Sml29623 for (; index >= 0; index--) { 26944185Sspeer if (tx_msg_ring[index].dma_handle != NULL) { 26954185Sspeer ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 26963859Sml29623 } 26973859Sml29623 } 2698*7906SMichael.Speer@Sun.COM 2699*7906SMichael.Speer@Sun.COM MUTEX_DESTROY(&tx_ring_p->freelock); 27003859Sml29623 MUTEX_DESTROY(&tx_ring_p->lock); 27014185Sspeer KMEM_FREE(tx_msg_ring, size); 27023859Sml29623 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 27033859Sml29623 27044185Sspeer status = NXGE_ERROR; 27054185Sspeer 27063859Sml29623 nxge_map_txdma_channel_buf_ring_exit: 27073859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 27086929Smisaki "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 27093859Sml29623 27103859Sml29623 return (status); 27113859Sml29623 } 27123859Sml29623 27133859Sml29623 /*ARGSUSED*/ 27143859Sml29623 static void 27153859Sml29623 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 27163859Sml29623 { 27173859Sml29623 p_tx_msg_t tx_msg_ring; 27183859Sml29623 p_tx_msg_t tx_msg_p; 27193859Sml29623 int i; 27203859Sml29623 27213859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 27226929Smisaki "==> nxge_unmap_txdma_channel_buf_ring")); 27233859Sml29623 if (tx_ring_p == NULL) { 27243859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 27256929Smisaki "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 27263859Sml29623 return; 27273859Sml29623 } 27283859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 27296929Smisaki "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 27306929Smisaki tx_ring_p->tdc)); 27313859Sml29623 27323859Sml29623 tx_msg_ring = tx_ring_p->tx_msg_ring; 27336495Sspeer 27346495Sspeer /* 27356495Sspeer * Since the serialization thread, timer thread and 27366495Sspeer * interrupt thread can all call the transmit reclaim, 27376495Sspeer * the unmapping function needs to acquire the lock 27386495Sspeer * to free those buffers which were transmitted 27396495Sspeer * by the hardware already. 27406495Sspeer */ 27416495Sspeer MUTEX_ENTER(&tx_ring_p->lock); 27426495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 27436495Sspeer "==> nxge_unmap_txdma_channel_buf_ring (reclaim): " 27446495Sspeer "channel %d", 27456495Sspeer tx_ring_p->tdc)); 27466495Sspeer (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 27476495Sspeer 27483859Sml29623 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 27493859Sml29623 tx_msg_p = &tx_msg_ring[i]; 27503859Sml29623 if (tx_msg_p->tx_message != NULL) { 27513859Sml29623 freemsg(tx_msg_p->tx_message); 27523859Sml29623 tx_msg_p->tx_message = NULL; 27533859Sml29623 } 27543859Sml29623 } 27553859Sml29623 27563859Sml29623 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 27573859Sml29623 if (tx_msg_ring[i].dma_handle != NULL) { 27583859Sml29623 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 27593859Sml29623 } 27606495Sspeer tx_msg_ring[i].dma_handle = NULL; 27613859Sml29623 } 27623859Sml29623 27636495Sspeer MUTEX_EXIT(&tx_ring_p->lock); 27646495Sspeer 27653952Sml29623 if (tx_ring_p->serial) { 27663952Sml29623 nxge_serialize_destroy(tx_ring_p->serial); 27673952Sml29623 tx_ring_p->serial = NULL; 27683952Sml29623 } 27693952Sml29623 2770*7906SMichael.Speer@Sun.COM MUTEX_DESTROY(&tx_ring_p->freelock); 27713859Sml29623 MUTEX_DESTROY(&tx_ring_p->lock); 27723859Sml29623 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 27733859Sml29623 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 27743859Sml29623 27753859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 27766929Smisaki "<== nxge_unmap_txdma_channel_buf_ring")); 27773859Sml29623 } 27783859Sml29623 27793859Sml29623 static nxge_status_t 27806495Sspeer nxge_txdma_hw_start(p_nxge_t nxgep, int channel) 27813859Sml29623 { 27823859Sml29623 p_tx_rings_t tx_rings; 27833859Sml29623 p_tx_ring_t *tx_desc_rings; 27843859Sml29623 p_tx_mbox_areas_t tx_mbox_areas_p; 27853859Sml29623 p_tx_mbox_t *tx_mbox_p; 27863859Sml29623 nxge_status_t status = NXGE_OK; 27873859Sml29623 27883859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 27893859Sml29623 27903859Sml29623 tx_rings = nxgep->tx_rings; 27913859Sml29623 if (tx_rings == NULL) { 27923859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 27936929Smisaki "<== nxge_txdma_hw_start: NULL ring pointer")); 27943859Sml29623 return (NXGE_ERROR); 27953859Sml29623 } 27963859Sml29623 tx_desc_rings = tx_rings->rings; 27973859Sml29623 if (tx_desc_rings == NULL) { 27983859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 27996929Smisaki "<== nxge_txdma_hw_start: NULL ring pointers")); 28003859Sml29623 return (NXGE_ERROR); 28013859Sml29623 } 28023859Sml29623 28036495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 28046495Sspeer "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 28053859Sml29623 28063859Sml29623 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 28073859Sml29623 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 28083859Sml29623 28096495Sspeer status = nxge_txdma_start_channel(nxgep, channel, 28106495Sspeer (p_tx_ring_t)tx_desc_rings[channel], 28116495Sspeer (p_tx_mbox_t)tx_mbox_p[channel]); 28126495Sspeer if (status != NXGE_OK) { 28136495Sspeer goto nxge_txdma_hw_start_fail1; 28143859Sml29623 } 28153859Sml29623 28163859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 28176929Smisaki "tx_rings $%p rings $%p", 28186929Smisaki nxgep->tx_rings, nxgep->tx_rings->rings)); 28193859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 28206929Smisaki "tx_rings $%p tx_desc_rings $%p", 28216929Smisaki nxgep->tx_rings, tx_desc_rings)); 28223859Sml29623 28233859Sml29623 goto nxge_txdma_hw_start_exit; 28243859Sml29623 28253859Sml29623 nxge_txdma_hw_start_fail1: 28263859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 28276929Smisaki "==> nxge_txdma_hw_start: disable " 28286929Smisaki "(status 0x%x channel %d)", status, channel)); 28293859Sml29623 28303859Sml29623 nxge_txdma_hw_start_exit: 28313859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 28326929Smisaki "==> nxge_txdma_hw_start: (status 0x%x)", status)); 28333859Sml29623 28343859Sml29623 return (status); 28353859Sml29623 } 28363859Sml29623 28376495Sspeer /* 28386495Sspeer * nxge_txdma_start_channel 28396495Sspeer * 28406495Sspeer * Start a TDC. 28416495Sspeer * 28426495Sspeer * Arguments: 28436495Sspeer * nxgep 28446495Sspeer * channel The channel to start. 28456495Sspeer * tx_ring_p channel's transmit descriptor ring. 28466495Sspeer * tx_mbox_p channel' smailbox. 28476495Sspeer * 28486495Sspeer * Notes: 28496495Sspeer * 28506495Sspeer * NPI/NXGE function calls: 28516495Sspeer * nxge_reset_txdma_channel() 28526495Sspeer * nxge_init_txdma_channel_event_mask() 28536495Sspeer * nxge_enable_txdma_channel() 28546495Sspeer * 28556495Sspeer * Registers accessed: 28566495Sspeer * none directly (see functions above). 28576495Sspeer * 28586495Sspeer * Context: 28596495Sspeer * Any domain 28606495Sspeer */ 28613859Sml29623 static nxge_status_t 28623859Sml29623 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 28633859Sml29623 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 28643859Sml29623 28653859Sml29623 { 28663859Sml29623 nxge_status_t status = NXGE_OK; 28673859Sml29623 28683859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 28693859Sml29623 "==> nxge_txdma_start_channel (channel %d)", channel)); 28703859Sml29623 /* 28713859Sml29623 * TXDMA/TXC must be in stopped state. 28723859Sml29623 */ 28733859Sml29623 (void) nxge_txdma_stop_inj_err(nxgep, channel); 28743859Sml29623 28753859Sml29623 /* 28763859Sml29623 * Reset TXDMA channel 28773859Sml29623 */ 28783859Sml29623 tx_ring_p->tx_cs.value = 0; 28793859Sml29623 tx_ring_p->tx_cs.bits.ldw.rst = 1; 28803859Sml29623 status = nxge_reset_txdma_channel(nxgep, channel, 28813859Sml29623 tx_ring_p->tx_cs.value); 28823859Sml29623 if (status != NXGE_OK) { 28833859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 28843859Sml29623 "==> nxge_txdma_start_channel (channel %d)" 28853859Sml29623 " reset channel failed 0x%x", channel, status)); 28863859Sml29623 goto nxge_txdma_start_channel_exit; 28873859Sml29623 } 28883859Sml29623 28893859Sml29623 /* 28903859Sml29623 * Initialize the TXDMA channel specific FZC control 28913859Sml29623 * configurations. These FZC registers are pertaining 28923859Sml29623 * to each TX channel (i.e. logical pages). 28933859Sml29623 */ 28946495Sspeer if (!isLDOMguest(nxgep)) { 28956495Sspeer status = nxge_init_fzc_txdma_channel(nxgep, channel, 28966495Sspeer tx_ring_p, tx_mbox_p); 28976495Sspeer if (status != NXGE_OK) { 28986495Sspeer goto nxge_txdma_start_channel_exit; 28996495Sspeer } 29003859Sml29623 } 29013859Sml29623 29023859Sml29623 /* 29033859Sml29623 * Initialize the event masks. 29043859Sml29623 */ 29053859Sml29623 tx_ring_p->tx_evmask.value = 0; 29063859Sml29623 status = nxge_init_txdma_channel_event_mask(nxgep, 29076495Sspeer channel, &tx_ring_p->tx_evmask); 29083859Sml29623 if (status != NXGE_OK) { 29093859Sml29623 goto nxge_txdma_start_channel_exit; 29103859Sml29623 } 29113859Sml29623 29123859Sml29623 /* 29133859Sml29623 * Load TXDMA descriptors, buffers, mailbox, 29143859Sml29623 * initialise the DMA channels and 29153859Sml29623 * enable each DMA channel. 29163859Sml29623 */ 29173859Sml29623 status = nxge_enable_txdma_channel(nxgep, channel, 29183859Sml29623 tx_ring_p, tx_mbox_p); 29193859Sml29623 if (status != NXGE_OK) { 29203859Sml29623 goto nxge_txdma_start_channel_exit; 29213859Sml29623 } 29223859Sml29623 29233859Sml29623 nxge_txdma_start_channel_exit: 29243859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 29253859Sml29623 29263859Sml29623 return (status); 29273859Sml29623 } 29283859Sml29623 29296495Sspeer /* 29306495Sspeer * nxge_txdma_stop_channel 29316495Sspeer * 29326495Sspeer * Stop a TDC. 29336495Sspeer * 29346495Sspeer * Arguments: 29356495Sspeer * nxgep 29366495Sspeer * channel The channel to stop. 29376495Sspeer * tx_ring_p channel's transmit descriptor ring. 29386495Sspeer * tx_mbox_p channel' smailbox. 29396495Sspeer * 29406495Sspeer * Notes: 29416495Sspeer * 29426495Sspeer * NPI/NXGE function calls: 29436495Sspeer * nxge_txdma_stop_inj_err() 29446495Sspeer * nxge_reset_txdma_channel() 29456495Sspeer * nxge_init_txdma_channel_event_mask() 29466495Sspeer * nxge_init_txdma_channel_cntl_stat() 29476495Sspeer * nxge_disable_txdma_channel() 29486495Sspeer * 29496495Sspeer * Registers accessed: 29506495Sspeer * none directly (see functions above). 29516495Sspeer * 29526495Sspeer * Context: 29536495Sspeer * Any domain 29546495Sspeer */ 29553859Sml29623 /*ARGSUSED*/ 29563859Sml29623 static nxge_status_t 29576495Sspeer nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 29583859Sml29623 { 29596495Sspeer p_tx_ring_t tx_ring_p; 29606495Sspeer int status = NXGE_OK; 29613859Sml29623 29623859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 29636929Smisaki "==> nxge_txdma_stop_channel: channel %d", channel)); 29643859Sml29623 29653859Sml29623 /* 29663859Sml29623 * Stop (disable) TXDMA and TXC (if stop bit is set 29673859Sml29623 * and STOP_N_GO bit not set, the TXDMA reset state will 29683859Sml29623 * not be set if reset TXDMA. 29693859Sml29623 */ 29703859Sml29623 (void) nxge_txdma_stop_inj_err(nxgep, channel); 29713859Sml29623 29726495Sspeer tx_ring_p = nxgep->tx_rings->rings[channel]; 29736495Sspeer 29743859Sml29623 /* 29753859Sml29623 * Reset TXDMA channel 29763859Sml29623 */ 29773859Sml29623 tx_ring_p->tx_cs.value = 0; 29783859Sml29623 tx_ring_p->tx_cs.bits.ldw.rst = 1; 29793859Sml29623 status = nxge_reset_txdma_channel(nxgep, channel, 29806929Smisaki tx_ring_p->tx_cs.value); 29813859Sml29623 if (status != NXGE_OK) { 29823859Sml29623 goto nxge_txdma_stop_channel_exit; 29833859Sml29623 } 29843859Sml29623 29853859Sml29623 #ifdef HARDWARE_REQUIRED 29863859Sml29623 /* Set up the interrupt event masks. */ 29873859Sml29623 tx_ring_p->tx_evmask.value = 0; 29883859Sml29623 status = nxge_init_txdma_channel_event_mask(nxgep, 29896929Smisaki channel, &tx_ring_p->tx_evmask); 29903859Sml29623 if (status != NXGE_OK) { 29913859Sml29623 goto nxge_txdma_stop_channel_exit; 29923859Sml29623 } 29933859Sml29623 29943859Sml29623 /* Initialize the DMA control and status register */ 29953859Sml29623 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 29963859Sml29623 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 29976929Smisaki tx_ring_p->tx_cs.value); 29983859Sml29623 if (status != NXGE_OK) { 29993859Sml29623 goto nxge_txdma_stop_channel_exit; 30003859Sml29623 } 30013859Sml29623 30026495Sspeer tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 30036495Sspeer 30043859Sml29623 /* Disable channel */ 30053859Sml29623 status = nxge_disable_txdma_channel(nxgep, channel, 30066495Sspeer tx_ring_p, tx_mbox_p); 30073859Sml29623 if (status != NXGE_OK) { 30083859Sml29623 goto nxge_txdma_start_channel_exit; 30093859Sml29623 } 30103859Sml29623 30113859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 30126929Smisaki "==> nxge_txdma_stop_channel: event done")); 30133859Sml29623 30143859Sml29623 #endif 30153859Sml29623 30163859Sml29623 nxge_txdma_stop_channel_exit: 30173859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 30183859Sml29623 return (status); 30193859Sml29623 } 30203859Sml29623 30216495Sspeer /* 30226495Sspeer * nxge_txdma_get_ring 30236495Sspeer * 30246495Sspeer * Get the ring for a TDC. 30256495Sspeer * 30266495Sspeer * Arguments: 30276495Sspeer * nxgep 30286495Sspeer * channel 30296495Sspeer * 30306495Sspeer * Notes: 30316495Sspeer * 30326495Sspeer * NPI/NXGE function calls: 30336495Sspeer * 30346495Sspeer * Registers accessed: 30356495Sspeer * 30366495Sspeer * Context: 30376495Sspeer * Any domain 30386495Sspeer */ 30393859Sml29623 static p_tx_ring_t 30403859Sml29623 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 30413859Sml29623 { 30426495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 30436495Sspeer int tdc; 30443859Sml29623 30453859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 30463859Sml29623 30476495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 30483859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 30496495Sspeer "<== nxge_txdma_get_ring: NULL ring pointer(s)")); 30506495Sspeer goto return_null; 30513859Sml29623 } 30523859Sml29623 30536495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 30546495Sspeer if ((1 << tdc) & set->owned.map) { 30556495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 30566495Sspeer if (ring) { 30576495Sspeer if (channel == ring->tdc) { 30586495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 30596495Sspeer "<== nxge_txdma_get_ring: " 30606495Sspeer "tdc %d ring $%p", tdc, ring)); 30616495Sspeer return (ring); 30626495Sspeer } 30636495Sspeer } 30643859Sml29623 } 30653859Sml29623 } 30663859Sml29623 30676495Sspeer return_null: 30686495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: " 30696929Smisaki "ring not found")); 30706495Sspeer 30713859Sml29623 return (NULL); 30723859Sml29623 } 30733859Sml29623 30746495Sspeer /* 30756495Sspeer * nxge_txdma_get_mbox 30766495Sspeer * 30776495Sspeer * Get the mailbox for a TDC. 30786495Sspeer * 30796495Sspeer * Arguments: 30806495Sspeer * nxgep 30816495Sspeer * channel 30826495Sspeer * 30836495Sspeer * Notes: 30846495Sspeer * 30856495Sspeer * NPI/NXGE function calls: 30866495Sspeer * 30876495Sspeer * Registers accessed: 30886495Sspeer * 30896495Sspeer * Context: 30906495Sspeer * Any domain 30916495Sspeer */ 30923859Sml29623 static p_tx_mbox_t 30933859Sml29623 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 30943859Sml29623 { 30956495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 30966495Sspeer int tdc; 30973859Sml29623 30983859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 30993859Sml29623 31006495Sspeer if (nxgep->tx_mbox_areas_p == 0 || 31016495Sspeer nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) { 31026495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 31036495Sspeer "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)")); 31046495Sspeer goto return_null; 31053859Sml29623 } 31063859Sml29623 31076495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 31086495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 31096495Sspeer "<== nxge_txdma_get_mbox: NULL ring pointer(s)")); 31106495Sspeer goto return_null; 31113859Sml29623 } 31123859Sml29623 31136495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 31146495Sspeer if ((1 << tdc) & set->owned.map) { 31156495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 31166495Sspeer if (ring) { 31176495Sspeer if (channel == ring->tdc) { 31186495Sspeer tx_mbox_t *mailbox = nxgep-> 31196495Sspeer tx_mbox_areas_p-> 31206495Sspeer txmbox_areas_p[tdc]; 31216495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 31226495Sspeer "<== nxge_txdma_get_mbox: tdc %d " 31236495Sspeer "ring $%p", tdc, mailbox)); 31246495Sspeer return (mailbox); 31256495Sspeer } 31266495Sspeer } 31273859Sml29623 } 31283859Sml29623 } 31293859Sml29623 31306495Sspeer return_null: 31316495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: " 31326929Smisaki "mailbox not found")); 31336495Sspeer 31343859Sml29623 return (NULL); 31353859Sml29623 } 31363859Sml29623 31376495Sspeer /* 31386495Sspeer * nxge_tx_err_evnts 31396495Sspeer * 31406495Sspeer * Recover a TDC. 31416495Sspeer * 31426495Sspeer * Arguments: 31436495Sspeer * nxgep 31446495Sspeer * index The index to the TDC ring. 31456495Sspeer * ldvp Used to get the channel number ONLY. 31466495Sspeer * cs A copy of the bits from TX_CS. 31476495Sspeer * 31486495Sspeer * Notes: 31496495Sspeer * Calling tree: 31506495Sspeer * nxge_tx_intr() 31516495Sspeer * 31526495Sspeer * NPI/NXGE function calls: 31536495Sspeer * npi_txdma_ring_error_get() 31546495Sspeer * npi_txdma_inj_par_error_get() 31556495Sspeer * nxge_txdma_fatal_err_recover() 31566495Sspeer * 31576495Sspeer * Registers accessed: 31586495Sspeer * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High 31596495Sspeer * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low 31606495Sspeer * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 31616495Sspeer * 31626495Sspeer * Context: 31636495Sspeer * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR. 31646495Sspeer */ 31653859Sml29623 /*ARGSUSED*/ 31663859Sml29623 static nxge_status_t 31673859Sml29623 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 31683859Sml29623 { 31693859Sml29623 npi_handle_t handle; 31703859Sml29623 npi_status_t rs; 31713859Sml29623 uint8_t channel; 31723859Sml29623 p_tx_ring_t *tx_rings; 31733859Sml29623 p_tx_ring_t tx_ring_p; 31743859Sml29623 p_nxge_tx_ring_stats_t tdc_stats; 31753859Sml29623 boolean_t txchan_fatal = B_FALSE; 31763859Sml29623 nxge_status_t status = NXGE_OK; 31773859Sml29623 tdmc_inj_par_err_t par_err; 31783859Sml29623 uint32_t value; 31793859Sml29623 31806495Sspeer NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts")); 31813859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 31823859Sml29623 channel = ldvp->channel; 31833859Sml29623 31843859Sml29623 tx_rings = nxgep->tx_rings->rings; 31853859Sml29623 tx_ring_p = tx_rings[index]; 31863859Sml29623 tdc_stats = tx_ring_p->tdc_stats; 31873859Sml29623 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 31886929Smisaki (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 31896929Smisaki (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 31903859Sml29623 if ((rs = npi_txdma_ring_error_get(handle, channel, 31916929Smisaki &tdc_stats->errlog)) != NPI_SUCCESS) 31923859Sml29623 return (NXGE_ERROR | rs); 31933859Sml29623 } 31943859Sml29623 31953859Sml29623 if (cs.bits.ldw.mbox_err) { 31963859Sml29623 tdc_stats->mbox_err++; 31973859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 31986929Smisaki NXGE_FM_EREPORT_TDMC_MBOX_ERR); 31993859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32006929Smisaki "==> nxge_tx_err_evnts(channel %d): " 32016929Smisaki "fatal error: mailbox", channel)); 32023859Sml29623 txchan_fatal = B_TRUE; 32033859Sml29623 } 32043859Sml29623 if (cs.bits.ldw.pkt_size_err) { 32053859Sml29623 tdc_stats->pkt_size_err++; 32063859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 32076929Smisaki NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 32083859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32096929Smisaki "==> nxge_tx_err_evnts(channel %d): " 32106929Smisaki "fatal error: pkt_size_err", channel)); 32113859Sml29623 txchan_fatal = B_TRUE; 32123859Sml29623 } 32133859Sml29623 if (cs.bits.ldw.tx_ring_oflow) { 32143859Sml29623 tdc_stats->tx_ring_oflow++; 32153859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 32166929Smisaki NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 32173859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32186929Smisaki "==> nxge_tx_err_evnts(channel %d): " 32196929Smisaki "fatal error: tx_ring_oflow", channel)); 32203859Sml29623 txchan_fatal = B_TRUE; 32213859Sml29623 } 32223859Sml29623 if (cs.bits.ldw.pref_buf_par_err) { 32233859Sml29623 tdc_stats->pre_buf_par_err++; 32243859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 32256929Smisaki NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 32263859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32276929Smisaki "==> nxge_tx_err_evnts(channel %d): " 32286929Smisaki "fatal error: pre_buf_par_err", channel)); 32293859Sml29623 /* Clear error injection source for parity error */ 32303859Sml29623 (void) npi_txdma_inj_par_error_get(handle, &value); 32313859Sml29623 par_err.value = value; 32323859Sml29623 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 32333859Sml29623 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 32343859Sml29623 txchan_fatal = B_TRUE; 32353859Sml29623 } 32363859Sml29623 if (cs.bits.ldw.nack_pref) { 32373859Sml29623 tdc_stats->nack_pref++; 32383859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 32396929Smisaki NXGE_FM_EREPORT_TDMC_NACK_PREF); 32403859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32416929Smisaki "==> nxge_tx_err_evnts(channel %d): " 32426929Smisaki "fatal error: nack_pref", channel)); 32433859Sml29623 txchan_fatal = B_TRUE; 32443859Sml29623 } 32453859Sml29623 if (cs.bits.ldw.nack_pkt_rd) { 32463859Sml29623 tdc_stats->nack_pkt_rd++; 32473859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 32486929Smisaki NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 32493859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32506929Smisaki "==> nxge_tx_err_evnts(channel %d): " 32516929Smisaki "fatal error: nack_pkt_rd", channel)); 32523859Sml29623 txchan_fatal = B_TRUE; 32533859Sml29623 } 32543859Sml29623 if (cs.bits.ldw.conf_part_err) { 32553859Sml29623 tdc_stats->conf_part_err++; 32563859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 32576929Smisaki NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 32583859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32596929Smisaki "==> nxge_tx_err_evnts(channel %d): " 32606929Smisaki "fatal error: config_partition_err", channel)); 32613859Sml29623 txchan_fatal = B_TRUE; 32623859Sml29623 } 32633859Sml29623 if (cs.bits.ldw.pkt_prt_err) { 32643859Sml29623 tdc_stats->pkt_part_err++; 32653859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 32666929Smisaki NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 32673859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32686929Smisaki "==> nxge_tx_err_evnts(channel %d): " 32696929Smisaki "fatal error: pkt_prt_err", channel)); 32703859Sml29623 txchan_fatal = B_TRUE; 32713859Sml29623 } 32723859Sml29623 32733859Sml29623 /* Clear error injection source in case this is an injected error */ 32743859Sml29623 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 32753859Sml29623 32763859Sml29623 if (txchan_fatal) { 32773859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32786929Smisaki " nxge_tx_err_evnts: " 32796929Smisaki " fatal error on channel %d cs 0x%llx\n", 32806929Smisaki channel, cs.value)); 32813859Sml29623 status = nxge_txdma_fatal_err_recover(nxgep, channel, 32826929Smisaki tx_ring_p); 32833859Sml29623 if (status == NXGE_OK) { 32843859Sml29623 FM_SERVICE_RESTORED(nxgep); 32853859Sml29623 } 32863859Sml29623 } 32873859Sml29623 32886495Sspeer NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts")); 32893859Sml29623 32903859Sml29623 return (status); 32913859Sml29623 } 32923859Sml29623 32933859Sml29623 static nxge_status_t 32946495Sspeer nxge_txdma_fatal_err_recover( 32956495Sspeer p_nxge_t nxgep, 32966495Sspeer uint16_t channel, 32976495Sspeer p_tx_ring_t tx_ring_p) 32983859Sml29623 { 32993859Sml29623 npi_handle_t handle; 33003859Sml29623 npi_status_t rs = NPI_SUCCESS; 33013859Sml29623 p_tx_mbox_t tx_mbox_p; 33023859Sml29623 nxge_status_t status = NXGE_OK; 33033859Sml29623 33043859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 33053859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33066929Smisaki "Recovering from TxDMAChannel#%d error...", channel)); 33073859Sml29623 33083859Sml29623 /* 33093859Sml29623 * Stop the dma channel waits for the stop done. 33103859Sml29623 * If the stop done bit is not set, then create 33113859Sml29623 * an error. 33123859Sml29623 */ 33133859Sml29623 33143859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 33153859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 33163859Sml29623 MUTEX_ENTER(&tx_ring_p->lock); 33173859Sml29623 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 33183859Sml29623 if (rs != NPI_SUCCESS) { 33193859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33206929Smisaki "==> nxge_txdma_fatal_err_recover (channel %d): " 33216929Smisaki "stop failed ", channel)); 33223859Sml29623 goto fail; 33233859Sml29623 } 33243859Sml29623 33253859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 33263859Sml29623 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 33273859Sml29623 33283859Sml29623 /* 33293859Sml29623 * Reset TXDMA channel 33303859Sml29623 */ 33313859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 33323859Sml29623 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 33336929Smisaki NPI_SUCCESS) { 33343859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33356929Smisaki "==> nxge_txdma_fatal_err_recover (channel %d)" 33366929Smisaki " reset channel failed 0x%x", channel, rs)); 33373859Sml29623 goto fail; 33383859Sml29623 } 33393859Sml29623 33403859Sml29623 /* 33413859Sml29623 * Reset the tail (kick) register to 0. 33423859Sml29623 * (Hardware will not reset it. Tx overflow fatal 33433859Sml29623 * error if tail is not set to 0 after reset! 33443859Sml29623 */ 33453859Sml29623 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 33463859Sml29623 33473859Sml29623 /* Restart TXDMA channel */ 33483859Sml29623 33496495Sspeer if (!isLDOMguest(nxgep)) { 33506495Sspeer tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 33516495Sspeer 33526495Sspeer // XXX This is a problem in HIO! 33536495Sspeer /* 33546495Sspeer * Initialize the TXDMA channel specific FZC control 33556495Sspeer * configurations. These FZC registers are pertaining 33566495Sspeer * to each TX channel (i.e. logical pages). 33576495Sspeer */ 33586495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 33596495Sspeer status = nxge_init_fzc_txdma_channel(nxgep, channel, 33606495Sspeer tx_ring_p, tx_mbox_p); 33616495Sspeer if (status != NXGE_OK) 33626495Sspeer goto fail; 33636495Sspeer } 33643859Sml29623 33653859Sml29623 /* 33663859Sml29623 * Initialize the event masks. 33673859Sml29623 */ 33683859Sml29623 tx_ring_p->tx_evmask.value = 0; 33693859Sml29623 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 33706929Smisaki &tx_ring_p->tx_evmask); 33713859Sml29623 if (status != NXGE_OK) 33723859Sml29623 goto fail; 33733859Sml29623 33743859Sml29623 tx_ring_p->wr_index_wrap = B_FALSE; 33753859Sml29623 tx_ring_p->wr_index = 0; 33763859Sml29623 tx_ring_p->rd_index = 0; 33773859Sml29623 33783859Sml29623 /* 33793859Sml29623 * Load TXDMA descriptors, buffers, mailbox, 33803859Sml29623 * initialise the DMA channels and 33813859Sml29623 * enable each DMA channel. 33823859Sml29623 */ 33833859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 33843859Sml29623 status = nxge_enable_txdma_channel(nxgep, channel, 33856929Smisaki tx_ring_p, tx_mbox_p); 33863859Sml29623 MUTEX_EXIT(&tx_ring_p->lock); 33873859Sml29623 if (status != NXGE_OK) 33883859Sml29623 goto fail; 33893859Sml29623 3390*7906SMichael.Speer@Sun.COM nxge_txdma_freemsg_task(tx_ring_p); 3391*7906SMichael.Speer@Sun.COM 33923859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33936929Smisaki "Recovery Successful, TxDMAChannel#%d Restored", 33946929Smisaki channel)); 33953859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 33963859Sml29623 33973859Sml29623 return (NXGE_OK); 33983859Sml29623 33993859Sml29623 fail: 34003859Sml29623 MUTEX_EXIT(&tx_ring_p->lock); 3401*7906SMichael.Speer@Sun.COM 3402*7906SMichael.Speer@Sun.COM nxge_txdma_freemsg_task(tx_ring_p); 3403*7906SMichael.Speer@Sun.COM 34043859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, 34056929Smisaki "nxge_txdma_fatal_err_recover (channel %d): " 34066929Smisaki "failed to recover this txdma channel", channel)); 34073859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 34083859Sml29623 34093859Sml29623 return (status); 34103859Sml29623 } 34113859Sml29623 34126495Sspeer /* 34136495Sspeer * nxge_tx_port_fatal_err_recover 34146495Sspeer * 34156495Sspeer * Attempt to recover from a fatal port error. 34166495Sspeer * 34176495Sspeer * Arguments: 34186495Sspeer * nxgep 34196495Sspeer * 34206495Sspeer * Notes: 34216495Sspeer * How would a guest do this? 34226495Sspeer * 34236495Sspeer * NPI/NXGE function calls: 34246495Sspeer * 34256495Sspeer * Registers accessed: 34266495Sspeer * 34276495Sspeer * Context: 34286495Sspeer * Service domain 34296495Sspeer */ 34303859Sml29623 nxge_status_t 34313859Sml29623 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 34323859Sml29623 { 34336495Sspeer nxge_grp_set_t *set = &nxgep->tx_set; 34346495Sspeer nxge_channel_t tdc; 34356495Sspeer 34366495Sspeer tx_ring_t *ring; 34376495Sspeer tx_mbox_t *mailbox; 34386495Sspeer 34393859Sml29623 npi_handle_t handle; 34406495Sspeer nxge_status_t status; 34416495Sspeer npi_status_t rs; 34423859Sml29623 34433859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 34443859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34456495Sspeer "Recovering from TxPort error...")); 34466495Sspeer 34476495Sspeer if (isLDOMguest(nxgep)) { 34486495Sspeer return (NXGE_OK); 34496495Sspeer } 34506495Sspeer 34516495Sspeer if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 34526495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 34536495Sspeer "<== nxge_tx_port_fatal_err_recover: not initialized")); 34546495Sspeer return (NXGE_ERROR); 34556495Sspeer } 34566495Sspeer 34576495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 34586495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 34596495Sspeer "<== nxge_tx_port_fatal_err_recover: " 34606495Sspeer "NULL ring pointer(s)")); 34616495Sspeer return (NXGE_ERROR); 34626495Sspeer } 34636495Sspeer 34646495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34656495Sspeer if ((1 << tdc) & set->owned.map) { 34666495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 34676495Sspeer if (ring) 34686495Sspeer MUTEX_ENTER(&ring->lock); 34696495Sspeer } 34706495Sspeer } 34713859Sml29623 34723859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 34736495Sspeer 34746495Sspeer /* 34756495Sspeer * Stop all the TDCs owned by us. 34766495Sspeer * (The shared TDCs will have been stopped by their owners.) 34776495Sspeer */ 34786495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34796495Sspeer if ((1 << tdc) & set->owned.map) { 34806495Sspeer ring = nxgep->tx_rings->rings[tdc]; 34816495Sspeer if (ring) { 34826495Sspeer rs = npi_txdma_channel_control 34836495Sspeer (handle, TXDMA_STOP, tdc); 34846495Sspeer if (rs != NPI_SUCCESS) { 34856495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34866495Sspeer "nxge_tx_port_fatal_err_recover " 34876495Sspeer "(channel %d): stop failed ", tdc)); 34886495Sspeer goto fail; 34896495Sspeer } 34906495Sspeer } 34913859Sml29623 } 34923859Sml29623 } 34933859Sml29623 34946495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs...")); 34956495Sspeer 34966495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 34976495Sspeer if ((1 << tdc) & set->owned.map) { 34986495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3499*7906SMichael.Speer@Sun.COM if (ring) { 3500*7906SMichael.Speer@Sun.COM MUTEX_ENTER(&ring->lock); 35016495Sspeer (void) nxge_txdma_reclaim(nxgep, ring, 0); 3502*7906SMichael.Speer@Sun.COM MUTEX_EXIT(&ring->lock); 3503*7906SMichael.Speer@Sun.COM 3504*7906SMichael.Speer@Sun.COM nxge_txdma_freemsg_task(ring); 3505*7906SMichael.Speer@Sun.COM } 35063859Sml29623 } 35073859Sml29623 } 35083859Sml29623 35093859Sml29623 /* 35106495Sspeer * Reset all the TDCs. 35113859Sml29623 */ 35126495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs...")); 35136495Sspeer 35146495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 35156495Sspeer if ((1 << tdc) & set->owned.map) { 35166495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 35176495Sspeer if (ring) { 35186495Sspeer if ((rs = npi_txdma_channel_control 35196929Smisaki (handle, TXDMA_RESET, tdc)) 35206495Sspeer != NPI_SUCCESS) { 35216495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 35226495Sspeer "nxge_tx_port_fatal_err_recover " 35236495Sspeer "(channel %d) reset channel " 35246495Sspeer "failed 0x%x", tdc, rs)); 35256495Sspeer goto fail; 35266495Sspeer } 35276495Sspeer } 35286495Sspeer /* 35296495Sspeer * Reset the tail (kick) register to 0. 35306495Sspeer * (Hardware will not reset it. Tx overflow fatal 35316495Sspeer * error if tail is not set to 0 after reset! 35326495Sspeer */ 35336495Sspeer TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0); 35343859Sml29623 } 35356495Sspeer } 35366495Sspeer 35376495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs...")); 35386495Sspeer 35396495Sspeer /* Restart all the TDCs */ 35406495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 35416495Sspeer if ((1 << tdc) & set->owned.map) { 35426495Sspeer ring = nxgep->tx_rings->rings[tdc]; 35436495Sspeer if (ring) { 35446495Sspeer mailbox = nxge_txdma_get_mbox(nxgep, tdc); 35456495Sspeer status = nxge_init_fzc_txdma_channel(nxgep, tdc, 35466495Sspeer ring, mailbox); 35476495Sspeer ring->tx_evmask.value = 0; 35486495Sspeer /* 35496495Sspeer * Initialize the event masks. 35506495Sspeer */ 35516495Sspeer status = nxge_init_txdma_channel_event_mask 35526495Sspeer (nxgep, tdc, &ring->tx_evmask); 35536495Sspeer 35546495Sspeer ring->wr_index_wrap = B_FALSE; 35556495Sspeer ring->wr_index = 0; 35566495Sspeer ring->rd_index = 0; 35576495Sspeer 35586495Sspeer if (status != NXGE_OK) 35596495Sspeer goto fail; 35606495Sspeer if (status != NXGE_OK) 35616495Sspeer goto fail; 35626495Sspeer } 35633859Sml29623 } 35646495Sspeer } 35656495Sspeer 35666495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs...")); 35676495Sspeer 35686495Sspeer /* Re-enable all the TDCs */ 35696495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 35706495Sspeer if ((1 << tdc) & set->owned.map) { 35716495Sspeer ring = nxgep->tx_rings->rings[tdc]; 35726495Sspeer if (ring) { 35736495Sspeer mailbox = nxge_txdma_get_mbox(nxgep, tdc); 35746495Sspeer status = nxge_enable_txdma_channel(nxgep, tdc, 35756495Sspeer ring, mailbox); 35766495Sspeer if (status != NXGE_OK) 35776495Sspeer goto fail; 35786495Sspeer } 35796495Sspeer } 35803859Sml29623 } 35813859Sml29623 35823859Sml29623 /* 35836495Sspeer * Unlock all the TDCs. 35843859Sml29623 */ 35856495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 35866495Sspeer if ((1 << tdc) & set->owned.map) { 35876495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 35886495Sspeer if (ring) 35896495Sspeer MUTEX_EXIT(&ring->lock); 35903859Sml29623 } 35913859Sml29623 } 35923859Sml29623 35936495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded")); 35943859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 35953859Sml29623 35963859Sml29623 return (NXGE_OK); 35973859Sml29623 35983859Sml29623 fail: 35996495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 36006495Sspeer if ((1 << tdc) & set->owned.map) { 36016495Sspeer ring = nxgep->tx_rings->rings[tdc]; 36026495Sspeer if (ring) 36036495Sspeer MUTEX_EXIT(&ring->lock); 36043859Sml29623 } 36053859Sml29623 } 36063859Sml29623 36076495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed")); 36086495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 36093859Sml29623 36103859Sml29623 return (status); 36113859Sml29623 } 36123859Sml29623 36136495Sspeer /* 36146495Sspeer * nxge_txdma_inject_err 36156495Sspeer * 36166495Sspeer * Inject an error into a TDC. 36176495Sspeer * 36186495Sspeer * Arguments: 36196495Sspeer * nxgep 36206495Sspeer * err_id The error to inject. 36216495Sspeer * chan The channel to inject into. 36226495Sspeer * 36236495Sspeer * Notes: 36246495Sspeer * This is called from nxge_main.c:nxge_err_inject() 36256495Sspeer * Has this ioctl ever been used? 36266495Sspeer * 36276495Sspeer * NPI/NXGE function calls: 36286495Sspeer * npi_txdma_inj_par_error_get() 36296495Sspeer * npi_txdma_inj_par_error_set() 36306495Sspeer * 36316495Sspeer * Registers accessed: 36326495Sspeer * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 36336495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 36346495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 36356495Sspeer * 36366495Sspeer * Context: 36376495Sspeer * Service domain 36386495Sspeer */ 36393859Sml29623 void 36403859Sml29623 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 36413859Sml29623 { 36423859Sml29623 tdmc_intr_dbg_t tdi; 36433859Sml29623 tdmc_inj_par_err_t par_err; 36443859Sml29623 uint32_t value; 36453859Sml29623 npi_handle_t handle; 36463859Sml29623 36473859Sml29623 switch (err_id) { 36483859Sml29623 36493859Sml29623 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 36503859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 36513859Sml29623 /* Clear error injection source for parity error */ 36523859Sml29623 (void) npi_txdma_inj_par_error_get(handle, &value); 36533859Sml29623 par_err.value = value; 36543859Sml29623 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 36553859Sml29623 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 36563859Sml29623 36573859Sml29623 par_err.bits.ldw.inject_parity_error = (1 << chan); 36583859Sml29623 (void) npi_txdma_inj_par_error_get(handle, &value); 36593859Sml29623 par_err.value = value; 36603859Sml29623 par_err.bits.ldw.inject_parity_error |= (1 << chan); 36613859Sml29623 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 36626929Smisaki (unsigned long long)par_err.value); 36633859Sml29623 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 36643859Sml29623 break; 36653859Sml29623 36663859Sml29623 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 36673859Sml29623 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 36683859Sml29623 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 36693859Sml29623 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 36703859Sml29623 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 36713859Sml29623 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 36723859Sml29623 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 36733859Sml29623 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 36746929Smisaki chan, &tdi.value); 36753859Sml29623 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 36763859Sml29623 tdi.bits.ldw.pref_buf_par_err = 1; 36773859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 36783859Sml29623 tdi.bits.ldw.mbox_err = 1; 36793859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 36803859Sml29623 tdi.bits.ldw.nack_pref = 1; 36813859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 36823859Sml29623 tdi.bits.ldw.nack_pkt_rd = 1; 36833859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 36843859Sml29623 tdi.bits.ldw.pkt_size_err = 1; 36853859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 36863859Sml29623 tdi.bits.ldw.tx_ring_oflow = 1; 36873859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 36883859Sml29623 tdi.bits.ldw.conf_part_err = 1; 36893859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 36903859Sml29623 tdi.bits.ldw.pkt_part_err = 1; 36915125Sjoycey #if defined(__i386) 36925125Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n", 36936929Smisaki tdi.value); 36945125Sjoycey #else 36953859Sml29623 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 36966929Smisaki tdi.value); 36975125Sjoycey #endif 36983859Sml29623 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 36996929Smisaki chan, tdi.value); 37003859Sml29623 37013859Sml29623 break; 37023859Sml29623 } 37033859Sml29623 } 3704