13859Sml29623 /*
23859Sml29623 * CDDL HEADER START
33859Sml29623 *
43859Sml29623 * The contents of this file are subject to the terms of the
53859Sml29623 * Common Development and Distribution License (the "License").
63859Sml29623 * You may not use this file except in compliance with the License.
73859Sml29623 *
83859Sml29623 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93859Sml29623 * or http://www.opensolaris.org/os/licensing.
103859Sml29623 * See the License for the specific language governing permissions
113859Sml29623 * and limitations under the License.
123859Sml29623 *
133859Sml29623 * When distributing Covered Code, include this CDDL HEADER in each
143859Sml29623 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153859Sml29623 * If applicable, add the following below this CDDL HEADER, with the
163859Sml29623 * fields enclosed by brackets "[]" replaced with your own identifying
173859Sml29623 * information: Portions Copyright [yyyy] [name of copyright owner]
183859Sml29623 *
193859Sml29623 * CDDL HEADER END
203859Sml29623 */
219015SMichael.Speer@Sun.COM
223859Sml29623 /*
23*11878SVenu.Iyer@Sun.COM * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
243859Sml29623 * Use is subject to license terms.
253859Sml29623 */
263859Sml29623
273859Sml29623 #include <sys/nxge/nxge_impl.h>
283859Sml29623 #include <sys/nxge/nxge_txdma.h>
296495Sspeer #include <sys/nxge/nxge_hio.h>
306495Sspeer #include <npi_tx_rd64.h>
316495Sspeer #include <npi_tx_wr64.h>
323859Sml29623 #include <sys/llc1.h>
333859Sml29623
343859Sml29623 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT;
358275SEric Cheng uint32_t nxge_tx_minfree = 64;
363859Sml29623 uint32_t nxge_tx_intr_thres = 0;
373859Sml29623 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS;
383859Sml29623 uint32_t nxge_tx_tiny_pack = 1;
393859Sml29623 uint32_t nxge_tx_use_bcopy = 1;
403859Sml29623
413859Sml29623 extern uint32_t nxge_tx_ring_size;
423859Sml29623 extern uint32_t nxge_bcopy_thresh;
433859Sml29623 extern uint32_t nxge_dvma_thresh;
443859Sml29623 extern uint32_t nxge_dma_stream_thresh;
453859Sml29623 extern dma_method_t nxge_force_dma;
466611Sml29623 extern uint32_t nxge_cksum_offload;
473859Sml29623
483859Sml29623 /* Device register access attributes for PIO. */
493859Sml29623 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr;
503859Sml29623 /* Device descriptor access attributes for DMA. */
513859Sml29623 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr;
523859Sml29623 /* Device buffer access attributes for DMA. */
533859Sml29623 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr;
543859Sml29623 extern ddi_dma_attr_t nxge_desc_dma_attr;
553859Sml29623 extern ddi_dma_attr_t nxge_tx_dma_attr;
563859Sml29623
578275SEric Cheng extern void nxge_tx_ring_task(void *arg);
587906SMichael.Speer@Sun.COM
596495Sspeer static nxge_status_t nxge_map_txdma(p_nxge_t, int);
606495Sspeer
616495Sspeer static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int);
623859Sml29623
633859Sml29623 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t,
643859Sml29623 p_nxge_dma_common_t *, p_tx_ring_t *,
653859Sml29623 uint32_t, p_nxge_dma_common_t *,
663859Sml29623 p_tx_mbox_t *);
676495Sspeer static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t);
683859Sml29623
693859Sml29623 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t,
703859Sml29623 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t);
713859Sml29623 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t);
723859Sml29623
733859Sml29623 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t,
743859Sml29623 p_nxge_dma_common_t *, p_tx_ring_t,
753859Sml29623 p_tx_mbox_t *);
763859Sml29623 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t,
773859Sml29623 p_tx_ring_t, p_tx_mbox_t);
783859Sml29623
793859Sml29623 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t,
803859Sml29623 p_tx_ring_t, p_tx_mbox_t);
816495Sspeer static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t);
823859Sml29623
833859Sml29623 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t);
843859Sml29623 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t,
853859Sml29623 p_nxge_ldv_t, tx_cs_t);
863859Sml29623 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t);
873859Sml29623 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t,
883859Sml29623 uint16_t, p_tx_ring_t);
893859Sml29623
906495Sspeer static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,
916495Sspeer p_tx_ring_t ring_p, uint16_t channel);
926495Sspeer
933859Sml29623 nxge_status_t
nxge_init_txdma_channels(p_nxge_t nxgep)943859Sml29623 nxge_init_txdma_channels(p_nxge_t nxgep)
953859Sml29623 {
967950SMichael.Speer@Sun.COM nxge_grp_set_t *set = &nxgep->tx_set;
977950SMichael.Speer@Sun.COM int i, tdc, count;
987950SMichael.Speer@Sun.COM nxge_grp_t *group;
998275SEric Cheng dc_map_t map;
1008275SEric Cheng int dev_gindex;
1016495Sspeer
1026495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels"));
1036495Sspeer
1046495Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
1056495Sspeer if ((1 << i) & set->lg.map) {
1067950SMichael.Speer@Sun.COM group = set->group[i];
1078275SEric Cheng dev_gindex =
1088275SEric Cheng nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
1098275SEric Cheng map = nxgep->pt_config.tdc_grps[dev_gindex].map;
1106495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1118275SEric Cheng if ((1 << tdc) & map) {
1128275SEric Cheng if ((nxge_grp_dc_add(nxgep,
1138275SEric Cheng group, VP_BOUND_TX, tdc)))
1147950SMichael.Speer@Sun.COM goto init_txdma_channels_exit;
1156495Sspeer }
1166495Sspeer }
1176495Sspeer }
1186495Sspeer if (++count == set->lg.count)
1196495Sspeer break;
1206495Sspeer }
1216495Sspeer
1226495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels"));
1236495Sspeer return (NXGE_OK);
1247950SMichael.Speer@Sun.COM
1257950SMichael.Speer@Sun.COM init_txdma_channels_exit:
1267950SMichael.Speer@Sun.COM for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
1277950SMichael.Speer@Sun.COM if ((1 << i) & set->lg.map) {
1287950SMichael.Speer@Sun.COM group = set->group[i];
1298275SEric Cheng dev_gindex =
1308275SEric Cheng nxgep->pt_config.hw_config.def_mac_txdma_grpid + i;
1318275SEric Cheng map = nxgep->pt_config.tdc_grps[dev_gindex].map;
1327950SMichael.Speer@Sun.COM for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1338275SEric Cheng if ((1 << tdc) & map) {
1347950SMichael.Speer@Sun.COM nxge_grp_dc_remove(nxgep,
1357950SMichael.Speer@Sun.COM VP_BOUND_TX, tdc);
1367950SMichael.Speer@Sun.COM }
1377950SMichael.Speer@Sun.COM }
1387950SMichael.Speer@Sun.COM }
1397950SMichael.Speer@Sun.COM if (++count == set->lg.count)
1407950SMichael.Speer@Sun.COM break;
1417950SMichael.Speer@Sun.COM }
1427950SMichael.Speer@Sun.COM
1437950SMichael.Speer@Sun.COM return (NXGE_ERROR);
1448275SEric Cheng
1456495Sspeer }
1466495Sspeer
1476495Sspeer nxge_status_t
nxge_init_txdma_channel(p_nxge_t nxge,int channel)1486495Sspeer nxge_init_txdma_channel(
1496495Sspeer p_nxge_t nxge,
1506495Sspeer int channel)
1516495Sspeer {
1526495Sspeer nxge_status_t status;
1536495Sspeer
1546495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel"));
1556495Sspeer
1566495Sspeer status = nxge_map_txdma(nxge, channel);
1573859Sml29623 if (status != NXGE_OK) {
1586495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
1596495Sspeer "<== nxge_init_txdma_channel: status 0x%x", status));
1606495Sspeer (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
1613859Sml29623 return (status);
1623859Sml29623 }
1633859Sml29623
1646495Sspeer status = nxge_txdma_hw_start(nxge, channel);
1653859Sml29623 if (status != NXGE_OK) {
1666495Sspeer (void) nxge_unmap_txdma_channel(nxge, channel);
1676495Sspeer (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel);
1683859Sml29623 return (status);
1693859Sml29623 }
1703859Sml29623
1716495Sspeer if (!nxge->statsp->tdc_ksp[channel])
1726495Sspeer nxge_setup_tdc_kstats(nxge, channel);
1736495Sspeer
1746495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel"));
1756495Sspeer
1766495Sspeer return (status);
1773859Sml29623 }
1783859Sml29623
1793859Sml29623 void
nxge_uninit_txdma_channels(p_nxge_t nxgep)1803859Sml29623 nxge_uninit_txdma_channels(p_nxge_t nxgep)
1813859Sml29623 {
1826495Sspeer nxge_grp_set_t *set = &nxgep->tx_set;
1836495Sspeer int tdc;
1846495Sspeer
1856495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels"));
1866495Sspeer
1876495Sspeer if (set->owned.map == 0) {
1886495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
1896495Sspeer "nxge_uninit_txdma_channels: no channels"));
1906495Sspeer return;
1916495Sspeer }
1926495Sspeer
1936495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
1946495Sspeer if ((1 << tdc) & set->owned.map) {
1956495Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc);
1966495Sspeer }
1976495Sspeer }
1986495Sspeer
1996495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels"));
2006495Sspeer }
2016495Sspeer
2026495Sspeer void
nxge_uninit_txdma_channel(p_nxge_t nxgep,int channel)2036495Sspeer nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel)
2046495Sspeer {
2056495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel"));
2066495Sspeer
2076495Sspeer if (nxgep->statsp->tdc_ksp[channel]) {
2086495Sspeer kstat_delete(nxgep->statsp->tdc_ksp[channel]);
2096495Sspeer nxgep->statsp->tdc_ksp[channel] = 0;
2106495Sspeer }
2116495Sspeer
21210577SMichael.Speer@Sun.COM if (nxge_txdma_stop_channel(nxgep, channel) != NXGE_OK)
21310577SMichael.Speer@Sun.COM goto nxge_uninit_txdma_channel_exit;
21410577SMichael.Speer@Sun.COM
2156495Sspeer nxge_unmap_txdma_channel(nxgep, channel);
2163859Sml29623
21710577SMichael.Speer@Sun.COM nxge_uninit_txdma_channel_exit:
21810577SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_uninit_txdma_channel"));
2193859Sml29623 }
2203859Sml29623
2213859Sml29623 void
nxge_setup_dma_common(p_nxge_dma_common_t dest_p,p_nxge_dma_common_t src_p,uint32_t entries,uint32_t size)2223859Sml29623 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p,
2233859Sml29623 uint32_t entries, uint32_t size)
2243859Sml29623 {
2253859Sml29623 size_t tsize;
2263859Sml29623 *dest_p = *src_p;
2273859Sml29623 tsize = size * entries;
2283859Sml29623 dest_p->alength = tsize;
2293859Sml29623 dest_p->nblocks = entries;
2303859Sml29623 dest_p->block_size = size;
2313859Sml29623 dest_p->offset += tsize;
2323859Sml29623
2333859Sml29623 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize;
2343859Sml29623 src_p->alength -= tsize;
2353859Sml29623 src_p->dma_cookie.dmac_laddress += tsize;
2363859Sml29623 src_p->dma_cookie.dmac_size -= tsize;
2373859Sml29623 }
2383859Sml29623
2396495Sspeer /*
2406495Sspeer * nxge_reset_txdma_channel
2416495Sspeer *
2426495Sspeer * Reset a TDC.
2436495Sspeer *
2446495Sspeer * Arguments:
2456495Sspeer * nxgep
2466495Sspeer * channel The channel to reset.
2476495Sspeer * reg_data The current TX_CS.
2486495Sspeer *
2496495Sspeer * Notes:
2506495Sspeer *
2516495Sspeer * NPI/NXGE function calls:
2526495Sspeer * npi_txdma_channel_reset()
2536495Sspeer * npi_txdma_channel_control()
2546495Sspeer *
2556495Sspeer * Registers accessed:
2566495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status
2576495Sspeer * TX_RING_KICK DMC+0x40018 Transmit Ring Kick
2586495Sspeer *
2596495Sspeer * Context:
2606495Sspeer * Any domain
2616495Sspeer */
2623859Sml29623 nxge_status_t
nxge_reset_txdma_channel(p_nxge_t nxgep,uint16_t channel,uint64_t reg_data)2633859Sml29623 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data)
2643859Sml29623 {
2653859Sml29623 npi_status_t rs = NPI_SUCCESS;
2663859Sml29623 nxge_status_t status = NXGE_OK;
2673859Sml29623 npi_handle_t handle;
2683859Sml29623
2693859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel"));
2703859Sml29623
2713859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2723859Sml29623 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) {
2733859Sml29623 rs = npi_txdma_channel_reset(handle, channel);
2743859Sml29623 } else {
2753859Sml29623 rs = npi_txdma_channel_control(handle, TXDMA_RESET,
2766929Smisaki channel);
2773859Sml29623 }
2783859Sml29623
2793859Sml29623 if (rs != NPI_SUCCESS) {
2803859Sml29623 status = NXGE_ERROR | rs;
2813859Sml29623 }
2823859Sml29623
2833859Sml29623 /*
2843859Sml29623 * Reset the tail (kick) register to 0.
2853859Sml29623 * (Hardware will not reset it. Tx overflow fatal
2863859Sml29623 * error if tail is not set to 0 after reset!
2873859Sml29623 */
2883859Sml29623 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
2893859Sml29623
2903859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel"));
2913859Sml29623 return (status);
2923859Sml29623 }
2933859Sml29623
2946495Sspeer /*
2956495Sspeer * nxge_init_txdma_channel_event_mask
2966495Sspeer *
2976495Sspeer * Enable interrupts for a set of events.
2986495Sspeer *
2996495Sspeer * Arguments:
3006495Sspeer * nxgep
3016495Sspeer * channel The channel to map.
3026495Sspeer * mask_p The events to enable.
3036495Sspeer *
3046495Sspeer * Notes:
3056495Sspeer *
3066495Sspeer * NPI/NXGE function calls:
3076495Sspeer * npi_txdma_event_mask()
3086495Sspeer *
3096495Sspeer * Registers accessed:
3106495Sspeer * TX_ENT_MSK DMC+0x40020 Transmit Event Mask
3116495Sspeer *
3126495Sspeer * Context:
3136495Sspeer * Any domain
3146495Sspeer */
3153859Sml29623 nxge_status_t
nxge_init_txdma_channel_event_mask(p_nxge_t nxgep,uint16_t channel,p_tx_dma_ent_msk_t mask_p)3163859Sml29623 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
3173859Sml29623 p_tx_dma_ent_msk_t mask_p)
3183859Sml29623 {
3193859Sml29623 npi_handle_t handle;
3203859Sml29623 npi_status_t rs = NPI_SUCCESS;
3213859Sml29623 nxge_status_t status = NXGE_OK;
3223859Sml29623
3233859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
3246929Smisaki "<== nxge_init_txdma_channel_event_mask"));
3253859Sml29623
3263859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3273859Sml29623 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p);
3283859Sml29623 if (rs != NPI_SUCCESS) {
3293859Sml29623 status = NXGE_ERROR | rs;
3303859Sml29623 }
3313859Sml29623
3323859Sml29623 return (status);
3333859Sml29623 }
3343859Sml29623
3356495Sspeer /*
3366495Sspeer * nxge_init_txdma_channel_cntl_stat
3376495Sspeer *
3386495Sspeer * Stop a TDC. If at first we don't succeed, inject an error.
3396495Sspeer *
3406495Sspeer * Arguments:
3416495Sspeer * nxgep
3426495Sspeer * channel The channel to stop.
3436495Sspeer *
3446495Sspeer * Notes:
3456495Sspeer *
3466495Sspeer * NPI/NXGE function calls:
3476495Sspeer * npi_txdma_control_status()
3486495Sspeer *
3496495Sspeer * Registers accessed:
3506495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status
3516495Sspeer *
3526495Sspeer * Context:
3536495Sspeer * Any domain
3546495Sspeer */
3553859Sml29623 nxge_status_t
nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep,uint16_t channel,uint64_t reg_data)3563859Sml29623 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
3573859Sml29623 uint64_t reg_data)
3583859Sml29623 {
3593859Sml29623 npi_handle_t handle;
3603859Sml29623 npi_status_t rs = NPI_SUCCESS;
3613859Sml29623 nxge_status_t status = NXGE_OK;
3623859Sml29623
3633859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
3646929Smisaki "<== nxge_init_txdma_channel_cntl_stat"));
3653859Sml29623
3663859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3673859Sml29623 rs = npi_txdma_control_status(handle, OP_SET, channel,
3686929Smisaki (p_tx_cs_t)®_data);
3693859Sml29623
3703859Sml29623 if (rs != NPI_SUCCESS) {
3713859Sml29623 status = NXGE_ERROR | rs;
3723859Sml29623 }
3733859Sml29623
3743859Sml29623 return (status);
3753859Sml29623 }
3763859Sml29623
3776495Sspeer /*
3786495Sspeer * nxge_enable_txdma_channel
3796495Sspeer *
3806495Sspeer * Enable a TDC.
3816495Sspeer *
3826495Sspeer * Arguments:
3836495Sspeer * nxgep
3846495Sspeer * channel The channel to enable.
3856495Sspeer * tx_desc_p channel's transmit descriptor ring.
3866495Sspeer * mbox_p channel's mailbox,
3876495Sspeer *
3886495Sspeer * Notes:
3896495Sspeer *
3906495Sspeer * NPI/NXGE function calls:
3916495Sspeer * npi_txdma_ring_config()
3926495Sspeer * npi_txdma_mbox_config()
3936495Sspeer * npi_txdma_channel_init_enable()
3946495Sspeer *
3956495Sspeer * Registers accessed:
3966495Sspeer * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration
3976495Sspeer * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High
3986495Sspeer * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low
3996495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status
4006495Sspeer *
4016495Sspeer * Context:
4026495Sspeer * Any domain
4036495Sspeer */
4043859Sml29623 nxge_status_t
nxge_enable_txdma_channel(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_desc_p,p_tx_mbox_t mbox_p)4053859Sml29623 nxge_enable_txdma_channel(p_nxge_t nxgep,
4063859Sml29623 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p)
4073859Sml29623 {
4083859Sml29623 npi_handle_t handle;
4093859Sml29623 npi_status_t rs = NPI_SUCCESS;
4103859Sml29623 nxge_status_t status = NXGE_OK;
4113859Sml29623
4123859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel"));
4133859Sml29623
4143859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4153859Sml29623 /*
4163859Sml29623 * Use configuration data composed at init time.
4173859Sml29623 * Write to hardware the transmit ring configurations.
4183859Sml29623 */
4193859Sml29623 rs = npi_txdma_ring_config(handle, OP_SET, channel,
4206495Sspeer (uint64_t *)&(tx_desc_p->tx_ring_cfig.value));
4213859Sml29623
4223859Sml29623 if (rs != NPI_SUCCESS) {
4233859Sml29623 return (NXGE_ERROR | rs);
4243859Sml29623 }
4253859Sml29623
4266495Sspeer if (isLDOMguest(nxgep)) {
4276495Sspeer /* Add interrupt handler for this channel. */
4286495Sspeer if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK)
4296495Sspeer return (NXGE_ERROR);
4306495Sspeer }
4316495Sspeer
4323859Sml29623 /* Write to hardware the mailbox */
4333859Sml29623 rs = npi_txdma_mbox_config(handle, OP_SET, channel,
4346929Smisaki (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress);
4353859Sml29623
4363859Sml29623 if (rs != NPI_SUCCESS) {
4373859Sml29623 return (NXGE_ERROR | rs);
4383859Sml29623 }
4393859Sml29623
4403859Sml29623 /* Start the DMA engine. */
4413859Sml29623 rs = npi_txdma_channel_init_enable(handle, channel);
4423859Sml29623
4433859Sml29623 if (rs != NPI_SUCCESS) {
4443859Sml29623 return (NXGE_ERROR | rs);
4453859Sml29623 }
4463859Sml29623
4473859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel"));
4483859Sml29623
4493859Sml29623 return (status);
4503859Sml29623 }
4513859Sml29623
4523859Sml29623 void
nxge_fill_tx_hdr(p_mblk_t mp,boolean_t fill_len,boolean_t l4_cksum,int pkt_len,uint8_t npads,p_tx_pkt_hdr_all_t pkthdrp,t_uscalar_t start_offset,t_uscalar_t stuff_offset)4533859Sml29623 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len,
4543859Sml29623 boolean_t l4_cksum, int pkt_len, uint8_t npads,
4556611Sml29623 p_tx_pkt_hdr_all_t pkthdrp,
4566611Sml29623 t_uscalar_t start_offset,
4576611Sml29623 t_uscalar_t stuff_offset)
4583859Sml29623 {
4593859Sml29623 p_tx_pkt_header_t hdrp;
4603859Sml29623 p_mblk_t nmp;
4613859Sml29623 uint64_t tmp;
4623859Sml29623 size_t mblk_len;
4633859Sml29623 size_t iph_len;
4643859Sml29623 size_t hdrs_size;
4653859Sml29623 uint8_t hdrs_buf[sizeof (struct ether_header) +
4666929Smisaki 64 + sizeof (uint32_t)];
4675505Smisaki uint8_t *cursor;
4683859Sml29623 uint8_t *ip_buf;
4693859Sml29623 uint16_t eth_type;
4703859Sml29623 uint8_t ipproto;
4713859Sml29623 boolean_t is_vlan = B_FALSE;
4723859Sml29623 size_t eth_hdr_size;
4733859Sml29623
4743859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp));
4753859Sml29623
4763859Sml29623 /*
4773859Sml29623 * Caller should zero out the headers first.
4783859Sml29623 */
4793859Sml29623 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr;
4803859Sml29623
4813859Sml29623 if (fill_len) {
4823859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
4836929Smisaki "==> nxge_fill_tx_hdr: pkt_len %d "
4846929Smisaki "npads %d", pkt_len, npads));
4853859Sml29623 tmp = (uint64_t)pkt_len;
4863859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT);
4873859Sml29623 goto fill_tx_header_done;
4883859Sml29623 }
4893859Sml29623
4906611Sml29623 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT);
4913859Sml29623
4923859Sml29623 /*
4933859Sml29623 * mp is the original data packet (does not include the
4943859Sml29623 * Neptune transmit header).
4953859Sml29623 */
4963859Sml29623 nmp = mp;
4973859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: "
4986929Smisaki "mp $%p b_rptr $%p len %d",
4996929Smisaki mp, nmp->b_rptr, MBLKL(nmp)));
5005505Smisaki /* copy ether_header from mblk to hdrs_buf */
5015505Smisaki cursor = &hdrs_buf[0];
5025505Smisaki tmp = sizeof (struct ether_vlan_header);
5035505Smisaki while ((nmp != NULL) && (tmp > 0)) {
5045505Smisaki size_t buflen;
5055505Smisaki mblk_len = MBLKL(nmp);
5065512Smisaki buflen = min((size_t)tmp, mblk_len);
5075505Smisaki bcopy(nmp->b_rptr, cursor, buflen);
5085505Smisaki cursor += buflen;
5095505Smisaki tmp -= buflen;
5105505Smisaki nmp = nmp->b_cont;
5115505Smisaki }
5125505Smisaki
5135505Smisaki nmp = mp;
5145505Smisaki mblk_len = MBLKL(nmp);
5153859Sml29623 ip_buf = NULL;
5163859Sml29623 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type);
5173859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) "
5186929Smisaki "ether type 0x%x", eth_type, hdrp->value));
5193859Sml29623
5203859Sml29623 if (eth_type < ETHERMTU) {
5213859Sml29623 tmp = 1ull;
5223859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT);
5233859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC "
5246929Smisaki "value 0x%llx", hdrp->value));
5253859Sml29623 if (*(hdrs_buf + sizeof (struct ether_header))
5266929Smisaki == LLC_SNAP_SAP) {
5273859Sml29623 eth_type = ntohs(*((uint16_t *)(hdrs_buf +
5286929Smisaki sizeof (struct ether_header) + 6)));
5293859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
5306929Smisaki "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x",
5316929Smisaki eth_type));
5323859Sml29623 } else {
5333859Sml29623 goto fill_tx_header_done;
5343859Sml29623 }
5353859Sml29623 } else if (eth_type == VLAN_ETHERTYPE) {
5363859Sml29623 tmp = 1ull;
5373859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT);
5383859Sml29623
5393859Sml29623 eth_type = ntohs(((struct ether_vlan_header *)
5406929Smisaki hdrs_buf)->ether_type);
5413859Sml29623 is_vlan = B_TRUE;
5423859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN "
5436929Smisaki "value 0x%llx", hdrp->value));
5443859Sml29623 }
5453859Sml29623
5463859Sml29623 if (!is_vlan) {
5473859Sml29623 eth_hdr_size = sizeof (struct ether_header);
5483859Sml29623 } else {
5493859Sml29623 eth_hdr_size = sizeof (struct ether_vlan_header);
5503859Sml29623 }
5513859Sml29623
5523859Sml29623 switch (eth_type) {
5533859Sml29623 case ETHERTYPE_IP:
5543859Sml29623 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) {
5553859Sml29623 ip_buf = nmp->b_rptr + eth_hdr_size;
5563859Sml29623 mblk_len -= eth_hdr_size;
5573859Sml29623 iph_len = ((*ip_buf) & 0x0f);
5583859Sml29623 if (mblk_len > (iph_len + sizeof (uint32_t))) {
5593859Sml29623 ip_buf = nmp->b_rptr;
5603859Sml29623 ip_buf += eth_hdr_size;
5613859Sml29623 } else {
5623859Sml29623 ip_buf = NULL;
5633859Sml29623 }
5643859Sml29623
5653859Sml29623 }
5663859Sml29623 if (ip_buf == NULL) {
5673859Sml29623 hdrs_size = 0;
5683859Sml29623 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
5693859Sml29623 while ((nmp) && (hdrs_size <
5706929Smisaki sizeof (hdrs_buf))) {
5713859Sml29623 mblk_len = (size_t)nmp->b_wptr -
5726929Smisaki (size_t)nmp->b_rptr;
5733859Sml29623 if (mblk_len >=
5746929Smisaki (sizeof (hdrs_buf) - hdrs_size))
5753859Sml29623 mblk_len = sizeof (hdrs_buf) -
5766929Smisaki hdrs_size;
5773859Sml29623 bcopy(nmp->b_rptr,
5786929Smisaki &hdrs_buf[hdrs_size], mblk_len);
5793859Sml29623 hdrs_size += mblk_len;
5803859Sml29623 nmp = nmp->b_cont;
5813859Sml29623 }
5823859Sml29623 ip_buf = hdrs_buf;
5833859Sml29623 ip_buf += eth_hdr_size;
5843859Sml29623 iph_len = ((*ip_buf) & 0x0f);
5853859Sml29623 }
5863859Sml29623
5873859Sml29623 ipproto = ip_buf[9];
5883859Sml29623
5893859Sml29623 tmp = (uint64_t)iph_len;
5903859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT);
5913859Sml29623 tmp = (uint64_t)(eth_hdr_size >> 1);
5923859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
5933859Sml29623
5943859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 "
5956929Smisaki " iph_len %d l3start %d eth_hdr_size %d proto 0x%x"
5966929Smisaki "tmp 0x%x",
5976929Smisaki iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
5986929Smisaki ipproto, tmp));
5993859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP "
6006929Smisaki "value 0x%llx", hdrp->value));
6013859Sml29623
6023859Sml29623 break;
6033859Sml29623
6043859Sml29623 case ETHERTYPE_IPV6:
6053859Sml29623 hdrs_size = 0;
6063859Sml29623 ((p_ether_header_t)hdrs_buf)->ether_type = 0;
6073859Sml29623 while ((nmp) && (hdrs_size <
6086929Smisaki sizeof (hdrs_buf))) {
6093859Sml29623 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr;
6103859Sml29623 if (mblk_len >=
6116929Smisaki (sizeof (hdrs_buf) - hdrs_size))
6123859Sml29623 mblk_len = sizeof (hdrs_buf) -
6136929Smisaki hdrs_size;
6143859Sml29623 bcopy(nmp->b_rptr,
6156929Smisaki &hdrs_buf[hdrs_size], mblk_len);
6163859Sml29623 hdrs_size += mblk_len;
6173859Sml29623 nmp = nmp->b_cont;
6183859Sml29623 }
6193859Sml29623 ip_buf = hdrs_buf;
6203859Sml29623 ip_buf += eth_hdr_size;
6213859Sml29623
6223859Sml29623 tmp = 1ull;
6233859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT);
6243859Sml29623
6253859Sml29623 tmp = (eth_hdr_size >> 1);
6263859Sml29623 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT);
6273859Sml29623
6283859Sml29623 /* byte 6 is the next header protocol */
6293859Sml29623 ipproto = ip_buf[6];
6303859Sml29623
6313859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 "
6326929Smisaki " iph_len %d l3start %d eth_hdr_size %d proto 0x%x",
6336929Smisaki iph_len, hdrp->bits.hdw.l3start, eth_hdr_size,
6346929Smisaki ipproto));
6353859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 "
6366929Smisaki "value 0x%llx", hdrp->value));
6373859Sml29623
6383859Sml29623 break;
6393859Sml29623
6403859Sml29623 default:
6413859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP"));
6423859Sml29623 goto fill_tx_header_done;
6433859Sml29623 }
6443859Sml29623
6453859Sml29623 switch (ipproto) {
6463859Sml29623 case IPPROTO_TCP:
6473859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
6486611Sml29623 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum));
6493859Sml29623 if (l4_cksum) {
6506611Sml29623 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP;
6516611Sml29623 hdrp->value |=
6526611Sml29623 (((uint64_t)(start_offset >> 1)) <<
6536611Sml29623 TX_PKT_HEADER_L4START_SHIFT);
6546611Sml29623 hdrp->value |=
6556611Sml29623 (((uint64_t)(stuff_offset >> 1)) <<
6566611Sml29623 TX_PKT_HEADER_L4STUFF_SHIFT);
6576611Sml29623
6583859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
6596611Sml29623 "==> nxge_tx_pkt_hdr_init: TCP CKSUM "
6606611Sml29623 "value 0x%llx", hdrp->value));
6613859Sml29623 }
6623859Sml29623
6633859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP "
6646611Sml29623 "value 0x%llx", hdrp->value));
6653859Sml29623 break;
6663859Sml29623
6673859Sml29623 case IPPROTO_UDP:
6683859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP"));
6693859Sml29623 if (l4_cksum) {
6706611Sml29623 if (!nxge_cksum_offload) {
6716611Sml29623 uint16_t *up;
6726611Sml29623 uint16_t cksum;
6736611Sml29623 t_uscalar_t stuff_len;
6746611Sml29623
6756611Sml29623 /*
6766611Sml29623 * The checksum field has the
6776611Sml29623 * partial checksum.
6786611Sml29623 * IP_CSUM() macro calls ip_cksum() which
6796611Sml29623 * can add in the partial checksum.
6806611Sml29623 */
6816611Sml29623 cksum = IP_CSUM(mp, start_offset, 0);
6826611Sml29623 stuff_len = stuff_offset;
6836611Sml29623 nmp = mp;
6846611Sml29623 mblk_len = MBLKL(nmp);
6856611Sml29623 while ((nmp != NULL) &&
6866611Sml29623 (mblk_len < stuff_len)) {
6876611Sml29623 stuff_len -= mblk_len;
6886611Sml29623 nmp = nmp->b_cont;
68910577SMichael.Speer@Sun.COM if (nmp)
69010577SMichael.Speer@Sun.COM mblk_len = MBLKL(nmp);
6916611Sml29623 }
6926611Sml29623 ASSERT(nmp);
6936611Sml29623 up = (uint16_t *)(nmp->b_rptr + stuff_len);
6946611Sml29623
6956611Sml29623 *up = cksum;
6966611Sml29623 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP;
6976611Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
6986611Sml29623 "==> nxge_tx_pkt_hdr_init: UDP offset %d "
6996611Sml29623 "use sw cksum "
7006611Sml29623 "write to $%p cksum 0x%x content up 0x%x",
7016611Sml29623 stuff_len,
7026611Sml29623 up,
7036611Sml29623 cksum,
7046611Sml29623 *up));
7056611Sml29623 } else {
7066611Sml29623 /* Hardware will compute the full checksum */
7076611Sml29623 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP;
7086611Sml29623 hdrp->value |=
7096611Sml29623 (((uint64_t)(start_offset >> 1)) <<
7106611Sml29623 TX_PKT_HEADER_L4START_SHIFT);
7116611Sml29623 hdrp->value |=
7126611Sml29623 (((uint64_t)(stuff_offset >> 1)) <<
7136611Sml29623 TX_PKT_HEADER_L4STUFF_SHIFT);
7146611Sml29623
7156611Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
7166611Sml29623 "==> nxge_tx_pkt_hdr_init: UDP offset %d "
7176611Sml29623 " use partial checksum "
7186611Sml29623 "cksum 0x%x ",
7196611Sml29623 "value 0x%llx",
7206611Sml29623 stuff_offset,
7216611Sml29623 IP_CSUM(mp, start_offset, 0),
7226611Sml29623 hdrp->value));
7236611Sml29623 }
7243859Sml29623 }
7256611Sml29623
7263859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
7276929Smisaki "==> nxge_tx_pkt_hdr_init: UDP"
7286929Smisaki "value 0x%llx", hdrp->value));
7293859Sml29623 break;
7303859Sml29623
7313859Sml29623 default:
7323859Sml29623 goto fill_tx_header_done;
7333859Sml29623 }
7343859Sml29623
7353859Sml29623 fill_tx_header_done:
7363859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
7376929Smisaki "==> nxge_fill_tx_hdr: pkt_len %d "
7386929Smisaki "npads %d value 0x%llx", pkt_len, npads, hdrp->value));
7393859Sml29623
7403859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr"));
7413859Sml29623 }
7423859Sml29623
7433859Sml29623 /*ARGSUSED*/
7443859Sml29623 p_mblk_t
nxge_tx_pkt_header_reserve(p_mblk_t mp,uint8_t * npads)7453859Sml29623 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads)
7463859Sml29623 {
7473859Sml29623 p_mblk_t newmp = NULL;
7483859Sml29623
7493859Sml29623 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) {
7503859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
7516929Smisaki "<== nxge_tx_pkt_header_reserve: allocb failed"));
7523859Sml29623 return (NULL);
7533859Sml29623 }
7543859Sml29623
7553859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
7566929Smisaki "==> nxge_tx_pkt_header_reserve: get new mp"));
7573859Sml29623 DB_TYPE(newmp) = M_DATA;
7583859Sml29623 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp);
7593859Sml29623 linkb(newmp, mp);
7603859Sml29623 newmp->b_rptr -= TX_PKT_HEADER_SIZE;
7613859Sml29623
7623859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: "
7636929Smisaki "b_rptr $%p b_wptr $%p",
7646929Smisaki newmp->b_rptr, newmp->b_wptr));
7653859Sml29623
7663859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
7676929Smisaki "<== nxge_tx_pkt_header_reserve: use new mp"));
7683859Sml29623
7693859Sml29623 return (newmp);
7703859Sml29623 }
7713859Sml29623
7723859Sml29623 int
nxge_tx_pkt_nmblocks(p_mblk_t mp,int * tot_xfer_len_p)7733859Sml29623 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p)
7743859Sml29623 {
7753859Sml29623 uint_t nmblks;
7763859Sml29623 ssize_t len;
7773859Sml29623 uint_t pkt_len;
7783859Sml29623 p_mblk_t nmp, bmp, tmp;
7793859Sml29623 uint8_t *b_wptr;
7803859Sml29623
7813859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
7826929Smisaki "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p "
7836929Smisaki "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp)));
7843859Sml29623
7853859Sml29623 nmp = mp;
7863859Sml29623 bmp = mp;
7873859Sml29623 nmblks = 0;
7883859Sml29623 pkt_len = 0;
7893859Sml29623 *tot_xfer_len_p = 0;
7903859Sml29623
7913859Sml29623 while (nmp) {
7923859Sml29623 len = MBLKL(nmp);
7933859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
7946929Smisaki "len %d pkt_len %d nmblks %d tot_xfer_len %d",
7956929Smisaki len, pkt_len, nmblks,
7966929Smisaki *tot_xfer_len_p));
7973859Sml29623
7983859Sml29623 if (len <= 0) {
7993859Sml29623 bmp = nmp;
8003859Sml29623 nmp = nmp->b_cont;
8013859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
8026929Smisaki "==> nxge_tx_pkt_nmblocks: "
8036929Smisaki "len (0) pkt_len %d nmblks %d",
8046929Smisaki pkt_len, nmblks));
8053859Sml29623 continue;
8063859Sml29623 }
8073859Sml29623
8083859Sml29623 *tot_xfer_len_p += len;
8093859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: "
8106929Smisaki "len %d pkt_len %d nmblks %d tot_xfer_len %d",
8116929Smisaki len, pkt_len, nmblks,
8126929Smisaki *tot_xfer_len_p));
8133859Sml29623
8143859Sml29623 if (len < nxge_bcopy_thresh) {
8153859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
8166929Smisaki "==> nxge_tx_pkt_nmblocks: "
8176929Smisaki "len %d (< thresh) pkt_len %d nmblks %d",
8186929Smisaki len, pkt_len, nmblks));
8193859Sml29623 if (pkt_len == 0)
8203859Sml29623 nmblks++;
8213859Sml29623 pkt_len += len;
8223859Sml29623 if (pkt_len >= nxge_bcopy_thresh) {
8233859Sml29623 pkt_len = 0;
8243859Sml29623 len = 0;
8253859Sml29623 nmp = bmp;
8263859Sml29623 }
8273859Sml29623 } else {
8283859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
8296929Smisaki "==> nxge_tx_pkt_nmblocks: "
8306929Smisaki "len %d (> thresh) pkt_len %d nmblks %d",
8316929Smisaki len, pkt_len, nmblks));
8323859Sml29623 pkt_len = 0;
8333859Sml29623 nmblks++;
8343859Sml29623 /*
8353859Sml29623 * Hardware limits the transfer length to 4K.
8363859Sml29623 * If len is more than 4K, we need to break
8373859Sml29623 * it up to at most 2 more blocks.
8383859Sml29623 */
8393859Sml29623 if (len > TX_MAX_TRANSFER_LENGTH) {
8403859Sml29623 uint32_t nsegs;
8413859Sml29623
8426495Sspeer nsegs = 1;
8433859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
8446929Smisaki "==> nxge_tx_pkt_nmblocks: "
8456929Smisaki "len %d pkt_len %d nmblks %d nsegs %d",
8466929Smisaki len, pkt_len, nmblks, nsegs));
8473859Sml29623 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) {
8483859Sml29623 ++nsegs;
8493859Sml29623 }
8503859Sml29623 do {
8513859Sml29623 b_wptr = nmp->b_rptr +
8526929Smisaki TX_MAX_TRANSFER_LENGTH;
8533859Sml29623 nmp->b_wptr = b_wptr;
8543859Sml29623 if ((tmp = dupb(nmp)) == NULL) {
8553859Sml29623 return (0);
8563859Sml29623 }
8573859Sml29623 tmp->b_rptr = b_wptr;
8583859Sml29623 tmp->b_wptr = nmp->b_wptr;
8593859Sml29623 tmp->b_cont = nmp->b_cont;
8603859Sml29623 nmp->b_cont = tmp;
8613859Sml29623 nmblks++;
8623859Sml29623 if (--nsegs) {
8633859Sml29623 nmp = tmp;
8643859Sml29623 }
8653859Sml29623 } while (nsegs);
8663859Sml29623 nmp = tmp;
8673859Sml29623 }
8683859Sml29623 }
8693859Sml29623
8703859Sml29623 /*
8713859Sml29623 * Hardware limits the transmit gather pointers to 15.
8723859Sml29623 */
8733859Sml29623 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) >
8746929Smisaki TX_MAX_GATHER_POINTERS) {
8753859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
8766929Smisaki "==> nxge_tx_pkt_nmblocks: pull msg - "
8776929Smisaki "len %d pkt_len %d nmblks %d",
8786929Smisaki len, pkt_len, nmblks));
8793859Sml29623 /* Pull all message blocks from b_cont */
8803859Sml29623 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) {
8813859Sml29623 return (0);
8823859Sml29623 }
8833859Sml29623 freemsg(nmp->b_cont);
8843859Sml29623 nmp->b_cont = tmp;
8853859Sml29623 pkt_len = 0;
8863859Sml29623 }
8873859Sml29623 bmp = nmp;
8883859Sml29623 nmp = nmp->b_cont;
8893859Sml29623 }
8903859Sml29623
8913859Sml29623 NXGE_DEBUG_MSG((NULL, TX_CTL,
8926929Smisaki "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p "
8936929Smisaki "nmblks %d len %d tot_xfer_len %d",
8946929Smisaki mp->b_rptr, mp->b_wptr, nmblks,
8956929Smisaki MBLKL(mp), *tot_xfer_len_p));
8963859Sml29623
8973859Sml29623 return (nmblks);
8983859Sml29623 }
8993859Sml29623
9003859Sml29623 boolean_t
nxge_txdma_reclaim(p_nxge_t nxgep,p_tx_ring_t tx_ring_p,int nmblks)9013859Sml29623 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks)
9023859Sml29623 {
9033859Sml29623 boolean_t status = B_TRUE;
9043859Sml29623 p_nxge_dma_common_t tx_desc_dma_p;
9053859Sml29623 nxge_dma_common_t desc_area;
9063859Sml29623 p_tx_desc_t tx_desc_ring_vp;
9073859Sml29623 p_tx_desc_t tx_desc_p;
9083859Sml29623 p_tx_desc_t tx_desc_pp;
9093859Sml29623 tx_desc_t r_tx_desc;
9103859Sml29623 p_tx_msg_t tx_msg_ring;
9113859Sml29623 p_tx_msg_t tx_msg_p;
9123859Sml29623 npi_handle_t handle;
9133859Sml29623 tx_ring_hdl_t tx_head;
9143859Sml29623 uint32_t pkt_len;
9153859Sml29623 uint_t tx_rd_index;
9163859Sml29623 uint16_t head_index, tail_index;
9173859Sml29623 uint8_t tdc;
9183859Sml29623 boolean_t head_wrap, tail_wrap;
9198275SEric Cheng p_nxge_tx_ring_stats_t tdc_stats;
9203859Sml29623 int rc;
9213859Sml29623
9223859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim"));
9233859Sml29623
9243859Sml29623 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) &&
9256929Smisaki (nmblks != 0));
9263859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
9276929Smisaki "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d",
9286929Smisaki tx_ring_p->descs_pending, nxge_reclaim_pending,
9296929Smisaki nmblks));
9303859Sml29623 if (!status) {
9313859Sml29623 tx_desc_dma_p = &tx_ring_p->tdc_desc;
9323859Sml29623 desc_area = tx_ring_p->tdc_desc;
9333859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
9343859Sml29623 tx_desc_ring_vp = tx_desc_dma_p->kaddrp;
9353859Sml29623 tx_desc_ring_vp =
9366929Smisaki (p_tx_desc_t)DMA_COMMON_VPTR(desc_area);
9373859Sml29623 tx_rd_index = tx_ring_p->rd_index;
9383859Sml29623 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
9393859Sml29623 tx_msg_ring = tx_ring_p->tx_msg_ring;
9403859Sml29623 tx_msg_p = &tx_msg_ring[tx_rd_index];
9413859Sml29623 tdc = tx_ring_p->tdc;
9423859Sml29623 tdc_stats = tx_ring_p->tdc_stats;
9433859Sml29623 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) {
9443859Sml29623 tdc_stats->tx_max_pend = tx_ring_p->descs_pending;
9453859Sml29623 }
9463859Sml29623
9473859Sml29623 tail_index = tx_ring_p->wr_index;
9483859Sml29623 tail_wrap = tx_ring_p->wr_index_wrap;
9493859Sml29623
9503859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
9516929Smisaki "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d "
9526929Smisaki "tail_index %d tail_wrap %d "
9536929Smisaki "tx_desc_p $%p ($%p) ",
9546929Smisaki tdc, tx_rd_index, tail_index, tail_wrap,
9556929Smisaki tx_desc_p, (*(uint64_t *)tx_desc_p)));
9563859Sml29623 /*
9573859Sml29623 * Read the hardware maintained transmit head
9583859Sml29623 * and wrap around bit.
9593859Sml29623 */
9603859Sml29623 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value);
9613859Sml29623 head_index = tx_head.bits.ldw.head;
9623859Sml29623 head_wrap = tx_head.bits.ldw.wrap;
9633859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
9646929Smisaki "==> nxge_txdma_reclaim: "
9656929Smisaki "tx_rd_index %d tail %d tail_wrap %d "
9666929Smisaki "head %d wrap %d",
9676929Smisaki tx_rd_index, tail_index, tail_wrap,
9686929Smisaki head_index, head_wrap));
9693859Sml29623
9703859Sml29623 if (head_index == tail_index) {
9713859Sml29623 if (TXDMA_RING_EMPTY(head_index, head_wrap,
9726929Smisaki tail_index, tail_wrap) &&
9736929Smisaki (head_index == tx_rd_index)) {
9743859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
9756929Smisaki "==> nxge_txdma_reclaim: EMPTY"));
9763859Sml29623 return (B_TRUE);
9773859Sml29623 }
9783859Sml29623
9793859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
9806929Smisaki "==> nxge_txdma_reclaim: Checking "
9816929Smisaki "if ring full"));
9823859Sml29623 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
9836929Smisaki tail_wrap)) {
9843859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
9856929Smisaki "==> nxge_txdma_reclaim: full"));
9863859Sml29623 return (B_FALSE);
9873859Sml29623 }
9883859Sml29623 }
9893859Sml29623
9903859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
9916929Smisaki "==> nxge_txdma_reclaim: tx_rd_index and head_index"));
9923859Sml29623
9933859Sml29623 tx_desc_pp = &r_tx_desc;
9943859Sml29623 while ((tx_rd_index != head_index) &&
9956929Smisaki (tx_ring_p->descs_pending != 0)) {
9963859Sml29623
9973859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
9986929Smisaki "==> nxge_txdma_reclaim: Checking if pending"));
9993859Sml29623
10003859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
10016929Smisaki "==> nxge_txdma_reclaim: "
10026929Smisaki "descs_pending %d ",
10036929Smisaki tx_ring_p->descs_pending));
10043859Sml29623
10053859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
10066929Smisaki "==> nxge_txdma_reclaim: "
10076929Smisaki "(tx_rd_index %d head_index %d "
10086929Smisaki "(tx_desc_p $%p)",
10096929Smisaki tx_rd_index, head_index,
10106929Smisaki tx_desc_p));
10113859Sml29623
10123859Sml29623 tx_desc_pp->value = tx_desc_p->value;
10133859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
10146929Smisaki "==> nxge_txdma_reclaim: "
10156929Smisaki "(tx_rd_index %d head_index %d "
10166929Smisaki "tx_desc_p $%p (desc value 0x%llx) ",
10176929Smisaki tx_rd_index, head_index,
10186929Smisaki tx_desc_pp, (*(uint64_t *)tx_desc_pp)));
10193859Sml29623
10203859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
10216929Smisaki "==> nxge_txdma_reclaim: dump desc:"));
10223859Sml29623
10233859Sml29623 pkt_len = tx_desc_pp->bits.hdw.tr_len;
1024*11878SVenu.Iyer@Sun.COM tdc_stats->obytes += (pkt_len - TX_PKT_HEADER_SIZE);
10253859Sml29623 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop;
10263859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
10276929Smisaki "==> nxge_txdma_reclaim: pkt_len %d "
10286929Smisaki "tdc channel %d opackets %d",
10296929Smisaki pkt_len,
10306929Smisaki tdc,
10316929Smisaki tdc_stats->opackets));
10323859Sml29623
10333859Sml29623 if (tx_msg_p->flags.dma_type == USE_DVMA) {
10343859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
10356929Smisaki "tx_desc_p = $%p "
10366929Smisaki "tx_desc_pp = $%p "
10376929Smisaki "index = %d",
10386929Smisaki tx_desc_p,
10396929Smisaki tx_desc_pp,
10406929Smisaki tx_ring_p->rd_index));
10413859Sml29623 (void) dvma_unload(tx_msg_p->dvma_handle,
10426929Smisaki 0, -1);
10433859Sml29623 tx_msg_p->dvma_handle = NULL;
10443859Sml29623 if (tx_ring_p->dvma_wr_index ==
10456929Smisaki tx_ring_p->dvma_wrap_mask) {
10463859Sml29623 tx_ring_p->dvma_wr_index = 0;
10473859Sml29623 } else {
10483859Sml29623 tx_ring_p->dvma_wr_index++;
10493859Sml29623 }
10503859Sml29623 tx_ring_p->dvma_pending--;
10513859Sml29623 } else if (tx_msg_p->flags.dma_type ==
10526929Smisaki USE_DMA) {
10533859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
10546929Smisaki "==> nxge_txdma_reclaim: "
10556929Smisaki "USE DMA"));
10563859Sml29623 if (rc = ddi_dma_unbind_handle
10576929Smisaki (tx_msg_p->dma_handle)) {
10583859Sml29623 cmn_err(CE_WARN, "!nxge_reclaim: "
10596929Smisaki "ddi_dma_unbind_handle "
10606929Smisaki "failed. status %d", rc);
10613859Sml29623 }
10623859Sml29623 }
10633859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
10646929Smisaki "==> nxge_txdma_reclaim: count packets"));
10653859Sml29623 /*
10663859Sml29623 * count a chained packet only once.
10673859Sml29623 */
10683859Sml29623 if (tx_msg_p->tx_message != NULL) {
10698275SEric Cheng freemsg(tx_msg_p->tx_message);
10708275SEric Cheng tx_msg_p->tx_message = NULL;
10713859Sml29623 }
10723859Sml29623
10733859Sml29623 tx_msg_p->flags.dma_type = USE_NONE;
10743859Sml29623 tx_rd_index = tx_ring_p->rd_index;
10753859Sml29623 tx_rd_index = (tx_rd_index + 1) &
10766929Smisaki tx_ring_p->tx_wrap_mask;
10773859Sml29623 tx_ring_p->rd_index = tx_rd_index;
10783859Sml29623 tx_ring_p->descs_pending--;
10793859Sml29623 tx_desc_p = &tx_desc_ring_vp[tx_rd_index];
10803859Sml29623 tx_msg_p = &tx_msg_ring[tx_rd_index];
10813859Sml29623 }
10823859Sml29623
10838948SMichael.Speer@Sun.COM status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
10848948SMichael.Speer@Sun.COM (int)tx_ring_p->descs_pending - TX_FULL_MARK));
10853859Sml29623 if (status) {
10869015SMichael.Speer@Sun.COM (void) cas32((uint32_t *)&tx_ring_p->queueing, 1, 0);
10873859Sml29623 }
10883859Sml29623 } else {
10898948SMichael.Speer@Sun.COM status = (nmblks <= ((int)tx_ring_p->tx_ring_size -
10908948SMichael.Speer@Sun.COM (int)tx_ring_p->descs_pending - TX_FULL_MARK));
10913859Sml29623 }
10923859Sml29623
10933859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
10946929Smisaki "<== nxge_txdma_reclaim status = 0x%08x", status));
10953859Sml29623
10963859Sml29623 return (status);
10973859Sml29623 }
10983859Sml29623
10996495Sspeer /*
11006495Sspeer * nxge_tx_intr
11016495Sspeer *
11026495Sspeer * Process a TDC interrupt
11036495Sspeer *
11046495Sspeer * Arguments:
11056495Sspeer * arg1 A Logical Device state Vector (LSV) data structure.
11066495Sspeer * arg2 nxge_t *
11076495Sspeer *
11086495Sspeer * Notes:
11096495Sspeer *
11106495Sspeer * NPI/NXGE function calls:
11116495Sspeer * npi_txdma_control_status()
11126495Sspeer * npi_intr_ldg_mgmt_set()
11136495Sspeer *
11146495Sspeer * nxge_tx_err_evnts()
11156495Sspeer * nxge_txdma_reclaim()
11166495Sspeer *
11176495Sspeer * Registers accessed:
11186495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status
11196495Sspeer * PIO_LDSV
11206495Sspeer *
11216495Sspeer * Context:
11226495Sspeer * Any domain
11236495Sspeer */
11243859Sml29623 uint_t
nxge_tx_intr(void * arg1,void * arg2)11253859Sml29623 nxge_tx_intr(void *arg1, void *arg2)
11263859Sml29623 {
11273859Sml29623 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
11283859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg2;
11293859Sml29623 p_nxge_ldg_t ldgp;
11303859Sml29623 uint8_t channel;
11313859Sml29623 uint32_t vindex;
11323859Sml29623 npi_handle_t handle;
11333859Sml29623 tx_cs_t cs;
11343859Sml29623 p_tx_ring_t *tx_rings;
11353859Sml29623 p_tx_ring_t tx_ring_p;
11363859Sml29623 npi_status_t rs = NPI_SUCCESS;
11373859Sml29623 uint_t serviced = DDI_INTR_UNCLAIMED;
11383859Sml29623 nxge_status_t status = NXGE_OK;
11393859Sml29623
11403859Sml29623 if (ldvp == NULL) {
11413859Sml29623 NXGE_DEBUG_MSG((NULL, INT_CTL,
11426929Smisaki "<== nxge_tx_intr: nxgep $%p ldvp $%p",
11436929Smisaki nxgep, ldvp));
11443859Sml29623 return (DDI_INTR_UNCLAIMED);
11453859Sml29623 }
11463859Sml29623
11473859Sml29623 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
11483859Sml29623 nxgep = ldvp->nxgep;
11493859Sml29623 }
11503859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL,
11516929Smisaki "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p",
11526929Smisaki nxgep, ldvp));
11536713Sspeer
11546713Sspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
11556713Sspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
11566713Sspeer NXGE_DEBUG_MSG((nxgep, INT_CTL,
11576713Sspeer "<== nxge_tx_intr: interface not started or intialized"));
11586713Sspeer return (DDI_INTR_CLAIMED);
11596713Sspeer }
11606713Sspeer
11613859Sml29623 /*
11623859Sml29623 * This interrupt handler is for a specific
11633859Sml29623 * transmit dma channel.
11643859Sml29623 */
11653859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
11663859Sml29623 /* Get the control and status for this channel. */
11673859Sml29623 channel = ldvp->channel;
11683859Sml29623 ldgp = ldvp->ldgp;
11693859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL,
11706929Smisaki "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p "
11716929Smisaki "channel %d",
11726929Smisaki nxgep, ldvp, channel));
11733859Sml29623
11743859Sml29623 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs);
11753859Sml29623 vindex = ldvp->vdma_index;
11763859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL,
11776929Smisaki "==> nxge_tx_intr:channel %d ring index %d status 0x%08x",
11786929Smisaki channel, vindex, rs));
11793859Sml29623 if (!rs && cs.bits.ldw.mk) {
11803859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL,
11816929Smisaki "==> nxge_tx_intr:channel %d ring index %d "
11826929Smisaki "status 0x%08x (mk bit set)",
11836929Smisaki channel, vindex, rs));
11843859Sml29623 tx_rings = nxgep->tx_rings->rings;
11853859Sml29623 tx_ring_p = tx_rings[vindex];
11863859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL,
11876929Smisaki "==> nxge_tx_intr:channel %d ring index %d "
11886929Smisaki "status 0x%08x (mk bit set, calling reclaim)",
11896929Smisaki channel, vindex, rs));
11903859Sml29623
11918275SEric Cheng nxge_tx_ring_task((void *)tx_ring_p);
11923859Sml29623 }
11933859Sml29623
11943859Sml29623 /*
11953859Sml29623 * Process other transmit control and status.
11963859Sml29623 * Check the ldv state.
11973859Sml29623 */
11983859Sml29623 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs);
11993859Sml29623 /*
12003859Sml29623 * Rearm this logical group if this is a single device
12013859Sml29623 * group.
12023859Sml29623 */
12033859Sml29623 if (ldgp->nldvs == 1) {
12043859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL,
12056929Smisaki "==> nxge_tx_intr: rearm"));
12063859Sml29623 if (status == NXGE_OK) {
12076495Sspeer if (isLDOMguest(nxgep)) {
12086495Sspeer nxge_hio_ldgimgn(nxgep, ldgp);
12096495Sspeer } else {
12106495Sspeer (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg,
12116495Sspeer B_TRUE, ldgp->ldg_timer);
12126495Sspeer }
12133859Sml29623 }
12143859Sml29623 }
12153859Sml29623
12163859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr"));
12173859Sml29623 serviced = DDI_INTR_CLAIMED;
12183859Sml29623 return (serviced);
12193859Sml29623 }
12203859Sml29623
12213859Sml29623 void
nxge_txdma_stop(p_nxge_t nxgep)12226495Sspeer nxge_txdma_stop(p_nxge_t nxgep) /* Dead */
12233859Sml29623 {
12243859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop"));
12253859Sml29623
12263859Sml29623 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP);
12273859Sml29623
12283859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop"));
12293859Sml29623 }
12303859Sml29623
12313859Sml29623 void
nxge_txdma_stop_start(p_nxge_t nxgep)12326495Sspeer nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */
12333859Sml29623 {
12343859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start"));
12353859Sml29623
12363859Sml29623 (void) nxge_txdma_stop(nxgep);
12373859Sml29623
12383859Sml29623 (void) nxge_fixup_txdma_rings(nxgep);
12393859Sml29623 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START);
12403859Sml29623 (void) nxge_tx_mac_enable(nxgep);
12413859Sml29623 (void) nxge_txdma_hw_kick(nxgep);
12423859Sml29623
12433859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start"));
12443859Sml29623 }
12453859Sml29623
12466495Sspeer npi_status_t
nxge_txdma_channel_disable(nxge_t * nxge,int channel)12476495Sspeer nxge_txdma_channel_disable(
12486495Sspeer nxge_t *nxge,
12496495Sspeer int channel)
12506495Sspeer {
12516495Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge);
12526495Sspeer npi_status_t rs;
12536495Sspeer tdmc_intr_dbg_t intr_dbg;
12546495Sspeer
12556495Sspeer /*
12566495Sspeer * Stop the dma channel and wait for the stop-done.
12576495Sspeer * If the stop-done bit is not present, then force
12586495Sspeer * an error so TXC will stop.
12596495Sspeer * All channels bound to this port need to be stopped
12606495Sspeer * and reset after injecting an interrupt error.
12616495Sspeer */
12626495Sspeer rs = npi_txdma_channel_disable(handle, channel);
12636495Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL,
12646929Smisaki "==> nxge_txdma_channel_disable(%d) "
12656929Smisaki "rs 0x%x", channel, rs));
12666495Sspeer if (rs != NPI_SUCCESS) {
12676495Sspeer /* Inject any error */
12686495Sspeer intr_dbg.value = 0;
12696495Sspeer intr_dbg.bits.ldw.nack_pref = 1;
12706495Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL,
12716929Smisaki "==> nxge_txdma_hw_mode: "
12726929Smisaki "channel %d (stop failed 0x%x) "
12736929Smisaki "(inject err)", rs, channel));
12746495Sspeer (void) npi_txdma_inj_int_error_set(
12756929Smisaki handle, channel, &intr_dbg);
12766495Sspeer rs = npi_txdma_channel_disable(handle, channel);
12776495Sspeer NXGE_DEBUG_MSG((nxge, MEM3_CTL,
12786929Smisaki "==> nxge_txdma_hw_mode: "
12796929Smisaki "channel %d (stop again 0x%x) "
12806929Smisaki "(after inject err)",
12816929Smisaki rs, channel));
12826495Sspeer }
12836495Sspeer
12846495Sspeer return (rs);
12856495Sspeer }
12866495Sspeer
12876495Sspeer /*
12886495Sspeer * nxge_txdma_hw_mode
12896495Sspeer *
12906495Sspeer * Toggle all TDCs on (enable) or off (disable).
12916495Sspeer *
12926495Sspeer * Arguments:
12936495Sspeer * nxgep
12946495Sspeer * enable Enable or disable a TDC.
12956495Sspeer *
12966495Sspeer * Notes:
12976495Sspeer *
12986495Sspeer * NPI/NXGE function calls:
12996495Sspeer * npi_txdma_channel_enable(TX_CS)
13006495Sspeer * npi_txdma_channel_disable(TX_CS)
13016495Sspeer * npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
13026495Sspeer *
13036495Sspeer * Registers accessed:
13046495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status
13056495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
13066495Sspeer *
13076495Sspeer * Context:
13086495Sspeer * Any domain
13096495Sspeer */
13103859Sml29623 nxge_status_t
nxge_txdma_hw_mode(p_nxge_t nxgep,boolean_t enable)13113859Sml29623 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
13123859Sml29623 {
13136495Sspeer nxge_grp_set_t *set = &nxgep->tx_set;
13146495Sspeer
13156495Sspeer npi_handle_t handle;
13166495Sspeer nxge_status_t status;
13176495Sspeer npi_status_t rs;
13186495Sspeer int tdc;
13193859Sml29623
13203859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
13216929Smisaki "==> nxge_txdma_hw_mode: enable mode %d", enable));
13223859Sml29623
13233859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
13243859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
13256929Smisaki "<== nxge_txdma_mode: not initialized"));
13263859Sml29623 return (NXGE_ERROR);
13273859Sml29623 }
13283859Sml29623
13296495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
13303859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
13316495Sspeer "<== nxge_txdma_hw_mode: NULL ring pointer(s)"));
13323859Sml29623 return (NXGE_ERROR);
13333859Sml29623 }
13343859Sml29623
13356495Sspeer /* Enable or disable all of the TDCs owned by us. */
13363859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
13376495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
13386495Sspeer if ((1 << tdc) & set->owned.map) {
13396495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
13406495Sspeer if (ring) {
13416495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
13426495Sspeer "==> nxge_txdma_hw_mode: channel %d", tdc));
13436495Sspeer if (enable) {
13446495Sspeer rs = npi_txdma_channel_enable
13456495Sspeer (handle, tdc);
13463859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
13476495Sspeer "==> nxge_txdma_hw_mode: "
13486495Sspeer "channel %d (enable) rs 0x%x",
13496495Sspeer tdc, rs));
13506495Sspeer } else {
13516495Sspeer rs = nxge_txdma_channel_disable
13526495Sspeer (nxgep, tdc);
13533859Sml29623 }
13543859Sml29623 }
13553859Sml29623 }
13563859Sml29623 }
13573859Sml29623
13583859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
13593859Sml29623
13603859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
13616929Smisaki "<== nxge_txdma_hw_mode: status 0x%x", status));
13623859Sml29623
13633859Sml29623 return (status);
13643859Sml29623 }
13653859Sml29623
13663859Sml29623 void
nxge_txdma_enable_channel(p_nxge_t nxgep,uint16_t channel)13673859Sml29623 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
13683859Sml29623 {
13693859Sml29623 npi_handle_t handle;
13703859Sml29623
13713859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
13726929Smisaki "==> nxge_txdma_enable_channel: channel %d", channel));
13733859Sml29623
13743859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
13753859Sml29623 /* enable the transmit dma channels */
13763859Sml29623 (void) npi_txdma_channel_enable(handle, channel);
13773859Sml29623
13783859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel"));
13793859Sml29623 }
13803859Sml29623
13813859Sml29623 void
nxge_txdma_disable_channel(p_nxge_t nxgep,uint16_t channel)13823859Sml29623 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
13833859Sml29623 {
13843859Sml29623 npi_handle_t handle;
13853859Sml29623
13863859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
13876929Smisaki "==> nxge_txdma_disable_channel: channel %d", channel));
13883859Sml29623
13893859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
13903859Sml29623 /* stop the transmit dma channels */
13913859Sml29623 (void) npi_txdma_channel_disable(handle, channel);
13923859Sml29623
13933859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel"));
13943859Sml29623 }
13953859Sml29623
13966495Sspeer /*
13976495Sspeer * nxge_txdma_stop_inj_err
13986495Sspeer *
13996495Sspeer * Stop a TDC. If at first we don't succeed, inject an error.
14006495Sspeer *
14016495Sspeer * Arguments:
14026495Sspeer * nxgep
14036495Sspeer * channel The channel to stop.
14046495Sspeer *
14056495Sspeer * Notes:
14066495Sspeer *
14076495Sspeer * NPI/NXGE function calls:
14086495Sspeer * npi_txdma_channel_disable()
14096495Sspeer * npi_txdma_inj_int_error_set()
14106495Sspeer * #if defined(NXGE_DEBUG)
14116495Sspeer * nxge_txdma_regs_dump_channels(nxgep);
14126495Sspeer * #endif
14136495Sspeer *
14146495Sspeer * Registers accessed:
14156495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status
14166495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
14176495Sspeer *
14186495Sspeer * Context:
14196495Sspeer * Any domain
14206495Sspeer */
14213859Sml29623 int
nxge_txdma_stop_inj_err(p_nxge_t nxgep,int channel)14223859Sml29623 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel)
14233859Sml29623 {
14243859Sml29623 npi_handle_t handle;
14253859Sml29623 tdmc_intr_dbg_t intr_dbg;
14263859Sml29623 int status;
14273859Sml29623 npi_status_t rs = NPI_SUCCESS;
14283859Sml29623
14293859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err"));
14303859Sml29623 /*
14313859Sml29623 * Stop the dma channel waits for the stop done.
14323859Sml29623 * If the stop done bit is not set, then create
14333859Sml29623 * an error.
14343859Sml29623 */
14353859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
14363859Sml29623 rs = npi_txdma_channel_disable(handle, channel);
14373859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
14383859Sml29623 if (status == NXGE_OK) {
14393859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
14406929Smisaki "<== nxge_txdma_stop_inj_err (channel %d): "
14416929Smisaki "stopped OK", channel));
14423859Sml29623 return (status);
14433859Sml29623 }
14443859Sml29623
14453859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
14466929Smisaki "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) "
14476929Smisaki "injecting error", channel, rs));
14483859Sml29623 /* Inject any error */
14493859Sml29623 intr_dbg.value = 0;
14503859Sml29623 intr_dbg.bits.ldw.nack_pref = 1;
14513859Sml29623 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
14523859Sml29623
14533859Sml29623 /* Stop done bit will be set as a result of error injection */
14543859Sml29623 rs = npi_txdma_channel_disable(handle, channel);
14553859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
14563859Sml29623 if (!(rs & NPI_TXDMA_STOP_FAILED)) {
14573859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
14586929Smisaki "<== nxge_txdma_stop_inj_err (channel %d): "
14596929Smisaki "stopped OK ", channel));
14603859Sml29623 return (status);
14613859Sml29623 }
14623859Sml29623
14633859Sml29623 #if defined(NXGE_DEBUG)
14643859Sml29623 nxge_txdma_regs_dump_channels(nxgep);
14653859Sml29623 #endif
14663859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
14676929Smisaki "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) "
14686929Smisaki " (injected error but still not stopped)", channel, rs));
14693859Sml29623
14703859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err"));
14713859Sml29623 return (status);
14723859Sml29623 }
14733859Sml29623
14743859Sml29623 /*ARGSUSED*/
14753859Sml29623 void
nxge_fixup_txdma_rings(p_nxge_t nxgep)14763859Sml29623 nxge_fixup_txdma_rings(p_nxge_t nxgep)
14773859Sml29623 {
14786495Sspeer nxge_grp_set_t *set = &nxgep->tx_set;
14796495Sspeer int tdc;
14803859Sml29623
14813859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings"));
14823859Sml29623
14836495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
14846495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
14856495Sspeer "<== nxge_fixup_txdma_rings: NULL ring pointer(s)"));
14863859Sml29623 return;
14873859Sml29623 }
14883859Sml29623
14896495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
14906495Sspeer if ((1 << tdc) & set->owned.map) {
14916495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
14926495Sspeer if (ring) {
14936495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
14946495Sspeer "==> nxge_fixup_txdma_rings: channel %d",
14956495Sspeer tdc));
14966495Sspeer nxge_txdma_fixup_channel(nxgep, ring, tdc);
14976495Sspeer }
14986495Sspeer }
14993859Sml29623 }
15003859Sml29623
15013859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings"));
15023859Sml29623 }
15033859Sml29623
15043859Sml29623 /*ARGSUSED*/
15053859Sml29623 void
nxge_txdma_fix_channel(p_nxge_t nxgep,uint16_t channel)15063859Sml29623 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
15073859Sml29623 {
15083859Sml29623 p_tx_ring_t ring_p;
15093859Sml29623
15103859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel"));
15113859Sml29623 ring_p = nxge_txdma_get_ring(nxgep, channel);
15123859Sml29623 if (ring_p == NULL) {
15133859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
15143859Sml29623 return;
15153859Sml29623 }
15163859Sml29623
15173859Sml29623 if (ring_p->tdc != channel) {
15183859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
15196929Smisaki "<== nxge_txdma_fix_channel: channel not matched "
15206929Smisaki "ring tdc %d passed channel",
15216929Smisaki ring_p->tdc, channel));
15223859Sml29623 return;
15233859Sml29623 }
15243859Sml29623
15253859Sml29623 nxge_txdma_fixup_channel(nxgep, ring_p, channel);
15263859Sml29623
15273859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel"));
15283859Sml29623 }
15293859Sml29623
15303859Sml29623 /*ARGSUSED*/
15313859Sml29623 void
nxge_txdma_fixup_channel(p_nxge_t nxgep,p_tx_ring_t ring_p,uint16_t channel)15323859Sml29623 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
15333859Sml29623 {
15343859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel"));
15353859Sml29623
15363859Sml29623 if (ring_p == NULL) {
15373859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
15386929Smisaki "<== nxge_txdma_fixup_channel: NULL ring pointer"));
15393859Sml29623 return;
15403859Sml29623 }
15413859Sml29623
15423859Sml29623 if (ring_p->tdc != channel) {
15433859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
15446929Smisaki "<== nxge_txdma_fixup_channel: channel not matched "
15456929Smisaki "ring tdc %d passed channel",
15466929Smisaki ring_p->tdc, channel));
15473859Sml29623 return;
15483859Sml29623 }
15493859Sml29623
15503859Sml29623 MUTEX_ENTER(&ring_p->lock);
15513859Sml29623 (void) nxge_txdma_reclaim(nxgep, ring_p, 0);
15523859Sml29623 ring_p->rd_index = 0;
15533859Sml29623 ring_p->wr_index = 0;
15543859Sml29623 ring_p->ring_head.value = 0;
15553859Sml29623 ring_p->ring_kick_tail.value = 0;
15563859Sml29623 ring_p->descs_pending = 0;
15573859Sml29623 MUTEX_EXIT(&ring_p->lock);
15583859Sml29623
15593859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel"));
15603859Sml29623 }
15613859Sml29623
15623859Sml29623 /*ARGSUSED*/
15633859Sml29623 void
nxge_txdma_hw_kick(p_nxge_t nxgep)15643859Sml29623 nxge_txdma_hw_kick(p_nxge_t nxgep)
15653859Sml29623 {
15666495Sspeer nxge_grp_set_t *set = &nxgep->tx_set;
15676495Sspeer int tdc;
15683859Sml29623
15693859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick"));
15703859Sml29623
15716495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
15723859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
15736495Sspeer "<== nxge_txdma_hw_kick: NULL ring pointer(s)"));
15743859Sml29623 return;
15753859Sml29623 }
15763859Sml29623
15776495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
15786495Sspeer if ((1 << tdc) & set->owned.map) {
15796495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
15806495Sspeer if (ring) {
15816495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
15826495Sspeer "==> nxge_txdma_hw_kick: channel %d", tdc));
15836495Sspeer nxge_txdma_hw_kick_channel(nxgep, ring, tdc);
15846495Sspeer }
15856495Sspeer }
15863859Sml29623 }
15873859Sml29623
15883859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick"));
15893859Sml29623 }
15903859Sml29623
15913859Sml29623 /*ARGSUSED*/
15923859Sml29623 void
nxge_txdma_kick_channel(p_nxge_t nxgep,uint16_t channel)15933859Sml29623 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel)
15943859Sml29623 {
15953859Sml29623 p_tx_ring_t ring_p;
15963859Sml29623
15973859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel"));
15983859Sml29623
15993859Sml29623 ring_p = nxge_txdma_get_ring(nxgep, channel);
16003859Sml29623 if (ring_p == NULL) {
16013859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
16026929Smisaki " nxge_txdma_kick_channel"));
16033859Sml29623 return;
16043859Sml29623 }
16053859Sml29623
16063859Sml29623 if (ring_p->tdc != channel) {
16073859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
16086929Smisaki "<== nxge_txdma_kick_channel: channel not matched "
16096929Smisaki "ring tdc %d passed channel",
16106929Smisaki ring_p->tdc, channel));
16113859Sml29623 return;
16123859Sml29623 }
16133859Sml29623
16143859Sml29623 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel);
16153859Sml29623
16163859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel"));
16173859Sml29623 }
16183859Sml29623
16193859Sml29623 /*ARGSUSED*/
16203859Sml29623 void
nxge_txdma_hw_kick_channel(p_nxge_t nxgep,p_tx_ring_t ring_p,uint16_t channel)16213859Sml29623 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel)
16223859Sml29623 {
16233859Sml29623
16243859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel"));
16253859Sml29623
16263859Sml29623 if (ring_p == NULL) {
16273859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
16286929Smisaki "<== nxge_txdma_hw_kick_channel: NULL ring pointer"));
16293859Sml29623 return;
16303859Sml29623 }
16313859Sml29623
16323859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel"));
16333859Sml29623 }
16343859Sml29623
16356495Sspeer /*
16366495Sspeer * nxge_check_tx_hang
16376495Sspeer *
16386495Sspeer * Check the state of all TDCs belonging to nxgep.
16396495Sspeer *
16406495Sspeer * Arguments:
16416495Sspeer * nxgep
16426495Sspeer *
16436495Sspeer * Notes:
16446495Sspeer * Called by nxge_hw.c:nxge_check_hw_state().
16456495Sspeer *
16466495Sspeer * NPI/NXGE function calls:
16476495Sspeer *
16486495Sspeer * Registers accessed:
16496495Sspeer *
16506495Sspeer * Context:
16516495Sspeer * Any domain
16526495Sspeer */
16533859Sml29623 /*ARGSUSED*/
16543859Sml29623 void
nxge_check_tx_hang(p_nxge_t nxgep)16553859Sml29623 nxge_check_tx_hang(p_nxge_t nxgep)
16563859Sml29623 {
16573859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang"));
16583859Sml29623
16596713Sspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
16606713Sspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
16616713Sspeer goto nxge_check_tx_hang_exit;
16626713Sspeer }
16636713Sspeer
16643859Sml29623 /*
16653859Sml29623 * Needs inputs from hardware for regs:
16663859Sml29623 * head index had not moved since last timeout.
16673859Sml29623 * packets not transmitted or stuffed registers.
16683859Sml29623 */
16693859Sml29623 if (nxge_txdma_hung(nxgep)) {
16703859Sml29623 nxge_fixup_hung_txdma_rings(nxgep);
16713859Sml29623 }
16726713Sspeer
16736713Sspeer nxge_check_tx_hang_exit:
16743859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang"));
16753859Sml29623 }
16763859Sml29623
16776495Sspeer /*
16786495Sspeer * nxge_txdma_hung
16796495Sspeer *
16806495Sspeer * Reset a TDC.
16816495Sspeer *
16826495Sspeer * Arguments:
16836495Sspeer * nxgep
16846495Sspeer * channel The channel to reset.
16856495Sspeer * reg_data The current TX_CS.
16866495Sspeer *
16876495Sspeer * Notes:
16886495Sspeer * Called by nxge_check_tx_hang()
16896495Sspeer *
16906495Sspeer * NPI/NXGE function calls:
16916495Sspeer * nxge_txdma_channel_hung()
16926495Sspeer *
16936495Sspeer * Registers accessed:
16946495Sspeer *
16956495Sspeer * Context:
16966495Sspeer * Any domain
16976495Sspeer */
16983859Sml29623 int
nxge_txdma_hung(p_nxge_t nxgep)16993859Sml29623 nxge_txdma_hung(p_nxge_t nxgep)
17003859Sml29623 {
17017812SMichael.Speer@Sun.COM nxge_grp_set_t *set = &nxgep->tx_set;
17027812SMichael.Speer@Sun.COM int tdc;
17037812SMichael.Speer@Sun.COM boolean_t shared;
17043859Sml29623
17053859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung"));
17066495Sspeer
17076495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
17083859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
17096495Sspeer "<== nxge_txdma_hung: NULL ring pointer(s)"));
17103859Sml29623 return (B_FALSE);
17113859Sml29623 }
17123859Sml29623
17136495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
17147812SMichael.Speer@Sun.COM /*
17157812SMichael.Speer@Sun.COM * Grab the shared state of the TDC.
17167812SMichael.Speer@Sun.COM */
17177812SMichael.Speer@Sun.COM if (isLDOMservice(nxgep)) {
17187812SMichael.Speer@Sun.COM nxge_hio_data_t *nhd =
17197812SMichael.Speer@Sun.COM (nxge_hio_data_t *)nxgep->nxge_hw_p->hio;
17207812SMichael.Speer@Sun.COM
17217812SMichael.Speer@Sun.COM MUTEX_ENTER(&nhd->lock);
17227812SMichael.Speer@Sun.COM shared = nxgep->tdc_is_shared[tdc];
17237812SMichael.Speer@Sun.COM MUTEX_EXIT(&nhd->lock);
17247812SMichael.Speer@Sun.COM } else {
17257812SMichael.Speer@Sun.COM shared = B_FALSE;
17267812SMichael.Speer@Sun.COM }
17277812SMichael.Speer@Sun.COM
17287812SMichael.Speer@Sun.COM /*
17297812SMichael.Speer@Sun.COM * Now, process continue to process.
17307812SMichael.Speer@Sun.COM */
17317812SMichael.Speer@Sun.COM if (((1 << tdc) & set->owned.map) && !shared) {
17326495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
17336495Sspeer if (ring) {
17346495Sspeer if (nxge_txdma_channel_hung(nxgep, ring, tdc)) {
17356495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
17366495Sspeer "==> nxge_txdma_hung: TDC %d hung",
17376495Sspeer tdc));
17386495Sspeer return (B_TRUE);
17396495Sspeer }
17406495Sspeer }
17413859Sml29623 }
17423859Sml29623 }
17433859Sml29623
17443859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung"));
17453859Sml29623
17463859Sml29623 return (B_FALSE);
17473859Sml29623 }
17483859Sml29623
17496495Sspeer /*
17506495Sspeer * nxge_txdma_channel_hung
17516495Sspeer *
17526495Sspeer * Reset a TDC.
17536495Sspeer *
17546495Sspeer * Arguments:
17556495Sspeer * nxgep
17566495Sspeer * ring <channel>'s ring.
17576495Sspeer * channel The channel to reset.
17586495Sspeer *
17596495Sspeer * Notes:
17606495Sspeer * Called by nxge_txdma.c:nxge_txdma_hung()
17616495Sspeer *
17626495Sspeer * NPI/NXGE function calls:
17636495Sspeer * npi_txdma_ring_head_get()
17646495Sspeer *
17656495Sspeer * Registers accessed:
17666495Sspeer * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
17676495Sspeer *
17686495Sspeer * Context:
17696495Sspeer * Any domain
17706495Sspeer */
17713859Sml29623 int
nxge_txdma_channel_hung(p_nxge_t nxgep,p_tx_ring_t tx_ring_p,uint16_t channel)17723859Sml29623 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel)
17733859Sml29623 {
17743859Sml29623 uint16_t head_index, tail_index;
17753859Sml29623 boolean_t head_wrap, tail_wrap;
17763859Sml29623 npi_handle_t handle;
17773859Sml29623 tx_ring_hdl_t tx_head;
17783859Sml29623 uint_t tx_rd_index;
17793859Sml29623
17803859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung"));
17813859Sml29623
17823859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
17833859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
17846929Smisaki "==> nxge_txdma_channel_hung: channel %d", channel));
17853859Sml29623 MUTEX_ENTER(&tx_ring_p->lock);
17863859Sml29623 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
17873859Sml29623
17883859Sml29623 tail_index = tx_ring_p->wr_index;
17893859Sml29623 tail_wrap = tx_ring_p->wr_index_wrap;
17903859Sml29623 tx_rd_index = tx_ring_p->rd_index;
17913859Sml29623 MUTEX_EXIT(&tx_ring_p->lock);
17923859Sml29623
17933859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
17946929Smisaki "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d "
17956929Smisaki "tail_index %d tail_wrap %d ",
17966929Smisaki channel, tx_rd_index, tail_index, tail_wrap));
17973859Sml29623 /*
17983859Sml29623 * Read the hardware maintained transmit head
17993859Sml29623 * and wrap around bit.
18003859Sml29623 */
18013859Sml29623 (void) npi_txdma_ring_head_get(handle, channel, &tx_head);
18023859Sml29623 head_index = tx_head.bits.ldw.head;
18033859Sml29623 head_wrap = tx_head.bits.ldw.wrap;
18043859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
18056929Smisaki "==> nxge_txdma_channel_hung: "
18066929Smisaki "tx_rd_index %d tail %d tail_wrap %d "
18076929Smisaki "head %d wrap %d",
18086929Smisaki tx_rd_index, tail_index, tail_wrap,
18096929Smisaki head_index, head_wrap));
18103859Sml29623
18113859Sml29623 if (TXDMA_RING_EMPTY(head_index, head_wrap,
18126929Smisaki tail_index, tail_wrap) &&
18136929Smisaki (head_index == tx_rd_index)) {
18143859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
18156929Smisaki "==> nxge_txdma_channel_hung: EMPTY"));
18163859Sml29623 return (B_FALSE);
18173859Sml29623 }
18183859Sml29623
18193859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
18206929Smisaki "==> nxge_txdma_channel_hung: Checking if ring full"));
18213859Sml29623 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index,
18226929Smisaki tail_wrap)) {
18233859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
18246929Smisaki "==> nxge_txdma_channel_hung: full"));
18253859Sml29623 return (B_TRUE);
18263859Sml29623 }
18273859Sml29623
18283859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung"));
18293859Sml29623
18303859Sml29623 return (B_FALSE);
18313859Sml29623 }
18323859Sml29623
18336495Sspeer /*
18346495Sspeer * nxge_fixup_hung_txdma_rings
18356495Sspeer *
18366495Sspeer * Disable a TDC.
18376495Sspeer *
18386495Sspeer * Arguments:
18396495Sspeer * nxgep
18406495Sspeer * channel The channel to reset.
18416495Sspeer * reg_data The current TX_CS.
18426495Sspeer *
18436495Sspeer * Notes:
18446495Sspeer * Called by nxge_check_tx_hang()
18456495Sspeer *
18466495Sspeer * NPI/NXGE function calls:
18476495Sspeer * npi_txdma_ring_head_get()
18486495Sspeer *
18496495Sspeer * Registers accessed:
18506495Sspeer * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low
18516495Sspeer *
18526495Sspeer * Context:
18536495Sspeer * Any domain
18546495Sspeer */
18553859Sml29623 /*ARGSUSED*/
18563859Sml29623 void
nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)18573859Sml29623 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep)
18583859Sml29623 {
18596495Sspeer nxge_grp_set_t *set = &nxgep->tx_set;
18606495Sspeer int tdc;
18613859Sml29623
18623859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings"));
18636495Sspeer
18646495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
18653859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
18666495Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
18673859Sml29623 return;
18683859Sml29623 }
18693859Sml29623
18706495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
18716495Sspeer if ((1 << tdc) & set->owned.map) {
18726495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
18736495Sspeer if (ring) {
18746495Sspeer nxge_txdma_fixup_hung_channel(nxgep, ring, tdc);
18756495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
18766495Sspeer "==> nxge_fixup_hung_txdma_rings: TDC %d",
18776495Sspeer tdc));
18786495Sspeer }
18796495Sspeer }
18803859Sml29623 }
18813859Sml29623
18823859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings"));
18833859Sml29623 }
18843859Sml29623
18856495Sspeer /*
18866495Sspeer * nxge_txdma_fixup_hung_channel
18876495Sspeer *
18886495Sspeer * 'Fix' a hung TDC.
18896495Sspeer *
18906495Sspeer * Arguments:
18916495Sspeer * nxgep
18926495Sspeer * channel The channel to fix.
18936495Sspeer *
18946495Sspeer * Notes:
18956495Sspeer * Called by nxge_fixup_hung_txdma_rings()
18966495Sspeer *
18976495Sspeer * 1. Reclaim the TDC.
18986495Sspeer * 2. Disable the TDC.
18996495Sspeer *
19006495Sspeer * NPI/NXGE function calls:
19016495Sspeer * nxge_txdma_reclaim()
19026495Sspeer * npi_txdma_channel_disable(TX_CS)
19036495Sspeer * npi_txdma_inj_int_error_set(TDMC_INTR_DBG)
19046495Sspeer *
19056495Sspeer * Registers accessed:
19066495Sspeer * TX_CS DMC+0x40028 Transmit Control And Status
19076495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
19086495Sspeer *
19096495Sspeer * Context:
19106495Sspeer * Any domain
19116495Sspeer */
19123859Sml29623 /*ARGSUSED*/
19133859Sml29623 void
nxge_txdma_fix_hung_channel(p_nxge_t nxgep,uint16_t channel)19143859Sml29623 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel)
19153859Sml29623 {
19163859Sml29623 p_tx_ring_t ring_p;
19173859Sml29623
19183859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel"));
19193859Sml29623 ring_p = nxge_txdma_get_ring(nxgep, channel);
19203859Sml29623 if (ring_p == NULL) {
19213859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
19226929Smisaki "<== nxge_txdma_fix_hung_channel"));
19233859Sml29623 return;
19243859Sml29623 }
19253859Sml29623
19263859Sml29623 if (ring_p->tdc != channel) {
19273859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
19286929Smisaki "<== nxge_txdma_fix_hung_channel: channel not matched "
19296929Smisaki "ring tdc %d passed channel",
19306929Smisaki ring_p->tdc, channel));
19313859Sml29623 return;
19323859Sml29623 }
19333859Sml29623
19343859Sml29623 nxge_txdma_fixup_channel(nxgep, ring_p, channel);
19353859Sml29623
19363859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel"));
19373859Sml29623 }
19383859Sml29623
19393859Sml29623 /*ARGSUSED*/
19403859Sml29623 void
nxge_txdma_fixup_hung_channel(p_nxge_t nxgep,p_tx_ring_t ring_p,uint16_t channel)19413859Sml29623 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p,
19423859Sml29623 uint16_t channel)
19433859Sml29623 {
19443859Sml29623 npi_handle_t handle;
19453859Sml29623 tdmc_intr_dbg_t intr_dbg;
19463859Sml29623 int status = NXGE_OK;
19473859Sml29623
19483859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel"));
19493859Sml29623
19503859Sml29623 if (ring_p == NULL) {
19513859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
19526929Smisaki "<== nxge_txdma_fixup_channel: NULL ring pointer"));
19533859Sml29623 return;
19543859Sml29623 }
19553859Sml29623
19563859Sml29623 if (ring_p->tdc != channel) {
19573859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
19586929Smisaki "<== nxge_txdma_fixup_hung_channel: channel "
19596929Smisaki "not matched "
19606929Smisaki "ring tdc %d passed channel",
19616929Smisaki ring_p->tdc, channel));
19623859Sml29623 return;
19633859Sml29623 }
19643859Sml29623
19653859Sml29623 /* Reclaim descriptors */
19663859Sml29623 MUTEX_ENTER(&ring_p->lock);
19673859Sml29623 (void) nxge_txdma_reclaim(nxgep, ring_p, 0);
19683859Sml29623 MUTEX_EXIT(&ring_p->lock);
19693859Sml29623
19703859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
19713859Sml29623 /*
19723859Sml29623 * Stop the dma channel waits for the stop done.
19733859Sml29623 * If the stop done bit is not set, then force
19743859Sml29623 * an error.
19753859Sml29623 */
19763859Sml29623 status = npi_txdma_channel_disable(handle, channel);
19773859Sml29623 if (!(status & NPI_TXDMA_STOP_FAILED)) {
19783859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
19796929Smisaki "<== nxge_txdma_fixup_hung_channel: stopped OK "
19806929Smisaki "ring tdc %d passed channel %d",
19816929Smisaki ring_p->tdc, channel));
19823859Sml29623 return;
19833859Sml29623 }
19843859Sml29623
19853859Sml29623 /* Inject any error */
19863859Sml29623 intr_dbg.value = 0;
19873859Sml29623 intr_dbg.bits.ldw.nack_pref = 1;
19883859Sml29623 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg);
19893859Sml29623
19903859Sml29623 /* Stop done bit will be set as a result of error injection */
19913859Sml29623 status = npi_txdma_channel_disable(handle, channel);
19923859Sml29623 if (!(status & NPI_TXDMA_STOP_FAILED)) {
19933859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
19946929Smisaki "<== nxge_txdma_fixup_hung_channel: stopped again"
19956929Smisaki "ring tdc %d passed channel",
19966929Smisaki ring_p->tdc, channel));
19973859Sml29623 return;
19983859Sml29623 }
19993859Sml29623
20003859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
20016929Smisaki "<== nxge_txdma_fixup_hung_channel: stop done still not set!! "
20026929Smisaki "ring tdc %d passed channel",
20036929Smisaki ring_p->tdc, channel));
20043859Sml29623
20053859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel"));
20063859Sml29623 }
20073859Sml29623
20083859Sml29623 /*ARGSUSED*/
20093859Sml29623 void
nxge_reclaim_rings(p_nxge_t nxgep)20103859Sml29623 nxge_reclaim_rings(p_nxge_t nxgep)
20113859Sml29623 {
20126495Sspeer nxge_grp_set_t *set = &nxgep->tx_set;
20136495Sspeer int tdc;
20146495Sspeer
20156495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings"));
20166495Sspeer
20176495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
20183859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
20196495Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
20203859Sml29623 return;
20213859Sml29623 }
20223859Sml29623
20236495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
20246495Sspeer if ((1 << tdc) & set->owned.map) {
20256495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
20266495Sspeer if (ring) {
20276495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
20286495Sspeer "==> nxge_reclaim_rings: TDC %d", tdc));
20296495Sspeer MUTEX_ENTER(&ring->lock);
20308275SEric Cheng (void) nxge_txdma_reclaim(nxgep, ring, 0);
20316495Sspeer MUTEX_EXIT(&ring->lock);
20326495Sspeer }
20336495Sspeer }
20343859Sml29623 }
20353859Sml29623
20363859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings"));
20373859Sml29623 }
20383859Sml29623
20393859Sml29623 void
nxge_txdma_regs_dump_channels(p_nxge_t nxgep)20403859Sml29623 nxge_txdma_regs_dump_channels(p_nxge_t nxgep)
20413859Sml29623 {
20426495Sspeer nxge_grp_set_t *set = &nxgep->tx_set;
20436495Sspeer npi_handle_t handle;
20446495Sspeer int tdc;
20456495Sspeer
20466495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels"));
20473859Sml29623
20483859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
20496495Sspeer
20506495Sspeer if (!isLDOMguest(nxgep)) {
20516495Sspeer (void) npi_txdma_dump_fzc_regs(handle);
20526495Sspeer
20536495Sspeer /* Dump TXC registers. */
20546495Sspeer (void) npi_txc_dump_fzc_regs(handle);
20556495Sspeer (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num);
20563859Sml29623 }
20573859Sml29623
20586495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
20593859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
20606495Sspeer "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)"));
20613859Sml29623 return;
20623859Sml29623 }
20633859Sml29623
20646495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
20656495Sspeer if ((1 << tdc) & set->owned.map) {
20666495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
20676495Sspeer if (ring) {
20686495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
20696495Sspeer "==> nxge_txdma_regs_dump_channels: "
20706495Sspeer "TDC %d", tdc));
20716495Sspeer (void) npi_txdma_dump_tdc_regs(handle, tdc);
20726495Sspeer
20736495Sspeer /* Dump TXC registers, if able to. */
20746495Sspeer if (!isLDOMguest(nxgep)) {
20756495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
20766495Sspeer "==> nxge_txdma_regs_dump_channels:"
20776495Sspeer " FZC TDC %d", tdc));
20786495Sspeer (void) npi_txc_dump_tdc_fzc_regs
20796495Sspeer (handle, tdc);
20806495Sspeer }
20816495Sspeer nxge_txdma_regs_dump(nxgep, tdc);
20826495Sspeer }
20836495Sspeer }
20843859Sml29623 }
20853859Sml29623
20863859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump"));
20873859Sml29623 }
20883859Sml29623
20893859Sml29623 void
nxge_txdma_regs_dump(p_nxge_t nxgep,int channel)20903859Sml29623 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel)
20913859Sml29623 {
20923859Sml29623 npi_handle_t handle;
20933859Sml29623 tx_ring_hdl_t hdl;
20943859Sml29623 tx_ring_kick_t kick;
20953859Sml29623 tx_cs_t cs;
20963859Sml29623 txc_control_t control;
20973859Sml29623 uint32_t bitmap = 0;
20983859Sml29623 uint32_t burst = 0;
20993859Sml29623 uint32_t bytes = 0;
21003859Sml29623 dma_log_page_t cfg;
21013859Sml29623
21023859Sml29623 printf("\n\tfunc # %d tdc %d ",
21036929Smisaki nxgep->function_num, channel);
21043859Sml29623 cfg.page_num = 0;
21053859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
21063859Sml29623 (void) npi_txdma_log_page_get(handle, channel, &cfg);
21073859Sml29623 printf("\n\tlog page func %d valid page 0 %d",
21086929Smisaki cfg.func_num, cfg.valid);
21093859Sml29623 cfg.page_num = 1;
21103859Sml29623 (void) npi_txdma_log_page_get(handle, channel, &cfg);
21113859Sml29623 printf("\n\tlog page func %d valid page 1 %d",
21126929Smisaki cfg.func_num, cfg.valid);
21133859Sml29623
21143859Sml29623 (void) npi_txdma_ring_head_get(handle, channel, &hdl);
21153859Sml29623 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick);
21163859Sml29623 printf("\n\thead value is 0x%0llx",
21176929Smisaki (long long)hdl.value);
21183859Sml29623 printf("\n\thead index %d", hdl.bits.ldw.head);
21193859Sml29623 printf("\n\tkick value is 0x%0llx",
21206929Smisaki (long long)kick.value);
21213859Sml29623 printf("\n\ttail index %d\n", kick.bits.ldw.tail);
21223859Sml29623
21233859Sml29623 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs);
21243859Sml29623 printf("\n\tControl statue is 0x%0llx", (long long)cs.value);
21253859Sml29623 printf("\n\tControl status RST state %d", cs.bits.ldw.rst);
21263859Sml29623
21273859Sml29623 (void) npi_txc_control(handle, OP_GET, &control);
21283859Sml29623 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap);
21293859Sml29623 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst);
21303859Sml29623 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes);
21313859Sml29623
21323859Sml29623 printf("\n\tTXC port control 0x%0llx",
21336929Smisaki (long long)control.value);
21343859Sml29623 printf("\n\tTXC port bitmap 0x%x", bitmap);
21353859Sml29623 printf("\n\tTXC max burst %d", burst);
21363859Sml29623 printf("\n\tTXC bytes xmt %d\n", bytes);
21373859Sml29623
21383859Sml29623 {
21393859Sml29623 ipp_status_t status;
21403859Sml29623
21413859Sml29623 (void) npi_ipp_get_status(handle, nxgep->function_num, &status);
21425125Sjoycey #if defined(__i386)
21435125Sjoycey printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value);
21445125Sjoycey #else
21453859Sml29623 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value);
21465125Sjoycey #endif
21473859Sml29623 }
21483859Sml29623 }
21493859Sml29623
21503859Sml29623 /*
21516495Sspeer * nxge_tdc_hvio_setup
21526495Sspeer *
21536495Sspeer * I'm not exactly sure what this code does.
21546495Sspeer *
21556495Sspeer * Arguments:
21566495Sspeer * nxgep
21576495Sspeer * channel The channel to map.
21586495Sspeer *
21596495Sspeer * Notes:
21606495Sspeer *
21616495Sspeer * NPI/NXGE function calls:
21626495Sspeer * na
21636495Sspeer *
21646495Sspeer * Context:
21656495Sspeer * Service domain?
21663859Sml29623 */
21676495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
21686495Sspeer static void
nxge_tdc_hvio_setup(nxge_t * nxgep,int channel)21696495Sspeer nxge_tdc_hvio_setup(
21706495Sspeer nxge_t *nxgep, int channel)
21713859Sml29623 {
21726495Sspeer nxge_dma_common_t *data;
21736495Sspeer nxge_dma_common_t *control;
21746495Sspeer tx_ring_t *ring;
21756495Sspeer
21766495Sspeer ring = nxgep->tx_rings->rings[channel];
21776495Sspeer data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
21786495Sspeer
21796495Sspeer ring->hv_set = B_FALSE;
21806495Sspeer
21816495Sspeer ring->hv_tx_buf_base_ioaddr_pp =
21826495Sspeer (uint64_t)data->orig_ioaddr_pp;
21836495Sspeer ring->hv_tx_buf_ioaddr_size =
21846495Sspeer (uint64_t)data->orig_alength;
21856495Sspeer
21866495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
21876929Smisaki "hv data buf base io $%p size 0x%llx (%d) buf base io $%p "
21886929Smisaki "orig vatopa base io $%p orig_len 0x%llx (%d)",
21896929Smisaki ring->hv_tx_buf_base_ioaddr_pp,
21906929Smisaki ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size,
21916929Smisaki data->ioaddr_pp, data->orig_vatopa,
21926929Smisaki data->orig_alength, data->orig_alength));
21936495Sspeer
21946495Sspeer control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
21956495Sspeer
21966495Sspeer ring->hv_tx_cntl_base_ioaddr_pp =
21976495Sspeer (uint64_t)control->orig_ioaddr_pp;
21986495Sspeer ring->hv_tx_cntl_ioaddr_size =
21996495Sspeer (uint64_t)control->orig_alength;
22006495Sspeer
22016495Sspeer NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: "
22026929Smisaki "hv cntl base io $%p orig ioaddr_pp ($%p) "
22036929Smisaki "orig vatopa ($%p) size 0x%llx (%d 0x%x)",
22046929Smisaki ring->hv_tx_cntl_base_ioaddr_pp,
22056929Smisaki control->orig_ioaddr_pp, control->orig_vatopa,
22066929Smisaki ring->hv_tx_cntl_ioaddr_size,
22076929Smisaki control->orig_alength, control->orig_alength));
22086495Sspeer }
22093859Sml29623 #endif
22103859Sml29623
22116495Sspeer static nxge_status_t
nxge_map_txdma(p_nxge_t nxgep,int channel)22126495Sspeer nxge_map_txdma(p_nxge_t nxgep, int channel)
22136495Sspeer {
22146495Sspeer nxge_dma_common_t **pData;
22156495Sspeer nxge_dma_common_t **pControl;
22166495Sspeer tx_ring_t **pRing, *ring;
22176495Sspeer tx_mbox_t **mailbox;
22186495Sspeer uint32_t num_chunks;
22196495Sspeer
22206495Sspeer nxge_status_t status = NXGE_OK;
22216495Sspeer
22226495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma"));
22236495Sspeer
22246495Sspeer if (!nxgep->tx_cntl_pool_p->buf_allocated) {
22256495Sspeer if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) {
22266495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
22276495Sspeer "<== nxge_map_txdma: buf not allocated"));
22286495Sspeer return (NXGE_ERROR);
22296495Sspeer }
22303859Sml29623 }
22313859Sml29623
22326495Sspeer if (nxge_alloc_txb(nxgep, channel) != NXGE_OK)
22336495Sspeer return (NXGE_ERROR);
22346495Sspeer
22356495Sspeer num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel];
22366495Sspeer pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel];
22376495Sspeer pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel];
22386495Sspeer pRing = &nxgep->tx_rings->rings[channel];
22396495Sspeer mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
22406495Sspeer
22416495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
22426929Smisaki "tx_rings $%p tx_desc_rings $%p",
22436929Smisaki nxgep->tx_rings, nxgep->tx_rings->rings));
22443859Sml29623
22453859Sml29623 /*
22466495Sspeer * Map descriptors from the buffer pools for <channel>.
22476495Sspeer */
22486495Sspeer
22496495Sspeer /*
22506495Sspeer * Set up and prepare buffer blocks, descriptors
22516495Sspeer * and mailbox.
22523859Sml29623 */
22536495Sspeer status = nxge_map_txdma_channel(nxgep, channel,
22546495Sspeer pData, pRing, num_chunks, pControl, mailbox);
22556495Sspeer if (status != NXGE_OK) {
22566495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL,
22576929Smisaki "==> nxge_map_txdma(%d): nxge_map_txdma_channel() "
22586929Smisaki "returned 0x%x",
22596929Smisaki nxgep, channel, status));
22606495Sspeer return (status);
22616495Sspeer }
22626495Sspeer
22636495Sspeer ring = *pRing;
22646495Sspeer
22656495Sspeer ring->index = (uint16_t)channel;
22666495Sspeer ring->tdc_stats = &nxgep->statsp->tdc_stats[channel];
22676495Sspeer
22686495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
22696495Sspeer if (isLDOMguest(nxgep)) {
22706495Sspeer (void) nxge_tdc_lp_conf(nxgep, channel);
22716495Sspeer } else {
22726495Sspeer nxge_tdc_hvio_setup(nxgep, channel);
22736495Sspeer }
22743859Sml29623 #endif
22756495Sspeer
22766495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: "
22776495Sspeer "(status 0x%x channel %d)", status, channel));
22783859Sml29623
22793859Sml29623 return (status);
22803859Sml29623 }
22813859Sml29623
22823859Sml29623 static nxge_status_t
nxge_map_txdma_channel(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_tx_ring_t * tx_desc_p,uint32_t num_chunks,p_nxge_dma_common_t * dma_cntl_p,p_tx_mbox_t * tx_mbox_p)22833859Sml29623 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel,
22843859Sml29623 p_nxge_dma_common_t *dma_buf_p,
22853859Sml29623 p_tx_ring_t *tx_desc_p,
22863859Sml29623 uint32_t num_chunks,
22873859Sml29623 p_nxge_dma_common_t *dma_cntl_p,
22883859Sml29623 p_tx_mbox_t *tx_mbox_p)
22893859Sml29623 {
22903859Sml29623 int status = NXGE_OK;
22913859Sml29623
22923859Sml29623 /*
22933859Sml29623 * Set up and prepare buffer blocks, descriptors
22943859Sml29623 * and mailbox.
22953859Sml29623 */
22966495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL,
22976929Smisaki "==> nxge_map_txdma_channel (channel %d)", channel));
22983859Sml29623 /*
22993859Sml29623 * Transmit buffer blocks
23003859Sml29623 */
23013859Sml29623 status = nxge_map_txdma_channel_buf_ring(nxgep, channel,
23026929Smisaki dma_buf_p, tx_desc_p, num_chunks);
23033859Sml29623 if (status != NXGE_OK) {
23043859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
23056929Smisaki "==> nxge_map_txdma_channel (channel %d): "
23066929Smisaki "map buffer failed 0x%x", channel, status));
23073859Sml29623 goto nxge_map_txdma_channel_exit;
23083859Sml29623 }
23093859Sml29623
23103859Sml29623 /*
23113859Sml29623 * Transmit block ring, and mailbox.
23123859Sml29623 */
23133859Sml29623 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p,
23146929Smisaki tx_mbox_p);
23153859Sml29623
23163859Sml29623 goto nxge_map_txdma_channel_exit;
23173859Sml29623
23183859Sml29623 nxge_map_txdma_channel_fail1:
23196495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL,
23206929Smisaki "==> nxge_map_txdma_channel: unmap buf"
23216929Smisaki "(status 0x%x channel %d)",
23226929Smisaki status, channel));
23233859Sml29623 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p);
23243859Sml29623
23253859Sml29623 nxge_map_txdma_channel_exit:
23266495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL,
23276929Smisaki "<== nxge_map_txdma_channel: "
23286929Smisaki "(status 0x%x channel %d)",
23296929Smisaki status, channel));
23303859Sml29623
23313859Sml29623 return (status);
23323859Sml29623 }
23333859Sml29623
23343859Sml29623 /*ARGSUSED*/
23353859Sml29623 static void
nxge_unmap_txdma_channel(p_nxge_t nxgep,uint16_t channel)23366495Sspeer nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel)
23373859Sml29623 {
23386495Sspeer tx_ring_t *ring;
23396495Sspeer tx_mbox_t *mailbox;
23406495Sspeer
23413859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
23426929Smisaki "==> nxge_unmap_txdma_channel (channel %d)", channel));
23433859Sml29623 /*
23443859Sml29623 * unmap tx block ring, and mailbox.
23453859Sml29623 */
23466495Sspeer ring = nxgep->tx_rings->rings[channel];
23476495Sspeer mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
23486495Sspeer
23496495Sspeer (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox);
23503859Sml29623
23513859Sml29623 /* unmap buffer blocks */
23526495Sspeer (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring);
23536495Sspeer
23546495Sspeer nxge_free_txb(nxgep, channel);
23553859Sml29623
23569730SMichael.Speer@Sun.COM /*
23579730SMichael.Speer@Sun.COM * Cleanup the reference to the ring now that it does not exist.
23589730SMichael.Speer@Sun.COM */
23599730SMichael.Speer@Sun.COM nxgep->tx_rings->rings[channel] = NULL;
23609730SMichael.Speer@Sun.COM
23613859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel"));
23623859Sml29623 }
23633859Sml29623
23646495Sspeer /*
23656495Sspeer * nxge_map_txdma_channel_cfg_ring
23666495Sspeer *
23676495Sspeer * Map a TDC into our kernel space.
23686495Sspeer * This function allocates all of the per-channel data structures.
23696495Sspeer *
23706495Sspeer * Arguments:
23716495Sspeer * nxgep
23726495Sspeer * dma_channel The channel to map.
23736495Sspeer * dma_cntl_p
23746495Sspeer * tx_ring_p dma_channel's transmit ring
23756495Sspeer * tx_mbox_p dma_channel's mailbox
23766495Sspeer *
23776495Sspeer * Notes:
23786495Sspeer *
23796495Sspeer * NPI/NXGE function calls:
23806495Sspeer * nxge_setup_dma_common()
23816495Sspeer *
23826495Sspeer * Registers accessed:
23836495Sspeer * none.
23846495Sspeer *
23856495Sspeer * Context:
23866495Sspeer * Any domain
23876495Sspeer */
23883859Sml29623 /*ARGSUSED*/
23893859Sml29623 static void
nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dma_cntl_p,p_tx_ring_t tx_ring_p,p_tx_mbox_t * tx_mbox_p)23903859Sml29623 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
23913859Sml29623 p_nxge_dma_common_t *dma_cntl_p,
23923859Sml29623 p_tx_ring_t tx_ring_p,
23933859Sml29623 p_tx_mbox_t *tx_mbox_p)
23943859Sml29623 {
23953859Sml29623 p_tx_mbox_t mboxp;
23963859Sml29623 p_nxge_dma_common_t cntl_dmap;
23973859Sml29623 p_nxge_dma_common_t dmap;
23983859Sml29623 p_tx_rng_cfig_t tx_ring_cfig_p;
23993859Sml29623 p_tx_ring_kick_t tx_ring_kick_p;
24003859Sml29623 p_tx_cs_t tx_cs_p;
24013859Sml29623 p_tx_dma_ent_msk_t tx_evmask_p;
24023859Sml29623 p_txdma_mbh_t mboxh_p;
24033859Sml29623 p_txdma_mbl_t mboxl_p;
24043859Sml29623 uint64_t tx_desc_len;
24053859Sml29623
24063859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24076929Smisaki "==> nxge_map_txdma_channel_cfg_ring"));
24083859Sml29623
24093859Sml29623 cntl_dmap = *dma_cntl_p;
24103859Sml29623
24113859Sml29623 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc;
24123859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size,
24136929Smisaki sizeof (tx_desc_t));
24143859Sml29623 /*
24153859Sml29623 * Zero out transmit ring descriptors.
24163859Sml29623 */
24173859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength);
24183859Sml29623 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig);
24193859Sml29623 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick);
24203859Sml29623 tx_cs_p = &(tx_ring_p->tx_cs);
24213859Sml29623 tx_evmask_p = &(tx_ring_p->tx_evmask);
24223859Sml29623 tx_ring_cfig_p->value = 0;
24233859Sml29623 tx_ring_kick_p->value = 0;
24243859Sml29623 tx_cs_p->value = 0;
24253859Sml29623 tx_evmask_p->value = 0;
24263859Sml29623
24273859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24286929Smisaki "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p",
24296929Smisaki dma_channel,
24306929Smisaki dmap->dma_cookie.dmac_laddress));
24313859Sml29623
24323859Sml29623 tx_ring_cfig_p->value = 0;
24333859Sml29623 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3);
24343859Sml29623 tx_ring_cfig_p->value =
24356929Smisaki (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) |
24366929Smisaki (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT);
24373859Sml29623
24383859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24396929Smisaki "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx",
24406929Smisaki dma_channel,
24416929Smisaki tx_ring_cfig_p->value));
24423859Sml29623
24433859Sml29623 tx_cs_p->bits.ldw.rst = 1;
24443859Sml29623
24453859Sml29623 /* Map in mailbox */
24463859Sml29623 mboxp = (p_tx_mbox_t)
24476929Smisaki KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP);
24483859Sml29623 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox;
24493859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t));
24503859Sml29623 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh;
24513859Sml29623 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl;
24523859Sml29623 mboxh_p->value = mboxl_p->value = 0;
24533859Sml29623
24543859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24556929Smisaki "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
24566929Smisaki dmap->dma_cookie.dmac_laddress));
24573859Sml29623
24583859Sml29623 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >>
24596929Smisaki TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK);
24603859Sml29623
24613859Sml29623 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress &
24626929Smisaki TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT);
24633859Sml29623
24643859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24656929Smisaki "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx",
24666929Smisaki dmap->dma_cookie.dmac_laddress));
24673859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24686929Smisaki "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p "
24696929Smisaki "mbox $%p",
24706929Smisaki mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr));
24713859Sml29623 tx_ring_p->page_valid.value = 0;
24723859Sml29623 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0;
24733859Sml29623 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0;
24743859Sml29623 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0;
24753859Sml29623 tx_ring_p->page_hdl.value = 0;
24763859Sml29623
24773859Sml29623 tx_ring_p->page_valid.bits.ldw.page0 = 1;
24783859Sml29623 tx_ring_p->page_valid.bits.ldw.page1 = 1;
24793859Sml29623
24803859Sml29623 tx_ring_p->max_burst.value = 0;
24813859Sml29623 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT;
24823859Sml29623
24833859Sml29623 *tx_mbox_p = mboxp;
24843859Sml29623
24853859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24866929Smisaki "<== nxge_map_txdma_channel_cfg_ring"));
24873859Sml29623 }
24883859Sml29623
24893859Sml29623 /*ARGSUSED*/
24903859Sml29623 static void
nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,p_tx_ring_t tx_ring_p,p_tx_mbox_t tx_mbox_p)24913859Sml29623 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep,
24923859Sml29623 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
24933859Sml29623 {
24943859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
24956929Smisaki "==> nxge_unmap_txdma_channel_cfg_ring: channel %d",
24966929Smisaki tx_ring_p->tdc));
24973859Sml29623
24983859Sml29623 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t));
24993859Sml29623
25003859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25016929Smisaki "<== nxge_unmap_txdma_channel_cfg_ring"));
25023859Sml29623 }
25033859Sml29623
25046495Sspeer /*
25056495Sspeer * nxge_map_txdma_channel_buf_ring
25066495Sspeer *
25076495Sspeer *
25086495Sspeer * Arguments:
25096495Sspeer * nxgep
25106495Sspeer * channel The channel to map.
25116495Sspeer * dma_buf_p
25126495Sspeer * tx_desc_p channel's descriptor ring
25136495Sspeer * num_chunks
25146495Sspeer *
25156495Sspeer * Notes:
25166495Sspeer *
25176495Sspeer * NPI/NXGE function calls:
25186495Sspeer * nxge_setup_dma_common()
25196495Sspeer *
25206495Sspeer * Registers accessed:
25216495Sspeer * none.
25226495Sspeer *
25236495Sspeer * Context:
25246495Sspeer * Any domain
25256495Sspeer */
25263859Sml29623 static nxge_status_t
nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_tx_ring_t * tx_desc_p,uint32_t num_chunks)25273859Sml29623 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
25283859Sml29623 p_nxge_dma_common_t *dma_buf_p,
25293859Sml29623 p_tx_ring_t *tx_desc_p, uint32_t num_chunks)
25303859Sml29623 {
25313859Sml29623 p_nxge_dma_common_t dma_bufp, tmp_bufp;
25323859Sml29623 p_nxge_dma_common_t dmap;
25333859Sml29623 nxge_os_dma_handle_t tx_buf_dma_handle;
25343859Sml29623 p_tx_ring_t tx_ring_p;
25353859Sml29623 p_tx_msg_t tx_msg_ring;
25363859Sml29623 nxge_status_t status = NXGE_OK;
25373859Sml29623 int ddi_status = DDI_SUCCESS;
25383859Sml29623 int i, j, index;
25393859Sml29623 uint32_t size, bsize;
25403859Sml29623 uint32_t nblocks, nmsgs;
25418275SEric Cheng char qname[TASKQ_NAMELEN];
25423859Sml29623
25433859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25446929Smisaki "==> nxge_map_txdma_channel_buf_ring"));
25453859Sml29623
25463859Sml29623 dma_bufp = tmp_bufp = *dma_buf_p;
25473859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25483859Sml29623 " nxge_map_txdma_channel_buf_ring: channel %d to map %d "
25493859Sml29623 "chunks bufp $%p",
25506929Smisaki channel, num_chunks, dma_bufp));
25513859Sml29623
25523859Sml29623 nmsgs = 0;
25533859Sml29623 for (i = 0; i < num_chunks; i++, tmp_bufp++) {
25543859Sml29623 nmsgs += tmp_bufp->nblocks;
25553859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25566929Smisaki "==> nxge_map_txdma_channel_buf_ring: channel %d "
25576929Smisaki "bufp $%p nblocks %d nmsgs %d",
25586929Smisaki channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
25593859Sml29623 }
25603859Sml29623 if (!nmsgs) {
25613859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
25626929Smisaki "<== nxge_map_txdma_channel_buf_ring: channel %d "
25636929Smisaki "no msg blocks",
25646929Smisaki channel));
25653859Sml29623 status = NXGE_ERROR;
25663859Sml29623 goto nxge_map_txdma_channel_buf_ring_exit;
25673859Sml29623 }
25683859Sml29623
25693859Sml29623 tx_ring_p = (p_tx_ring_t)
25706929Smisaki KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP);
25713859Sml29623 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER,
25726929Smisaki (void *)nxgep->interrupt_cookie);
25733952Sml29623
25746713Sspeer (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, NXGE_TX_RING_ONLINE);
25756886Sspeer tx_ring_p->tx_ring_busy = B_FALSE;
25763952Sml29623 tx_ring_p->nxgep = nxgep;
25778275SEric Cheng tx_ring_p->tx_ring_handle = (mac_ring_handle_t)NULL;
25788275SEric Cheng (void) snprintf(qname, TASKQ_NAMELEN, "tx_%d_%d",
25798275SEric Cheng nxgep->instance, channel);
25808275SEric Cheng tx_ring_p->taskq = ddi_taskq_create(nxgep->dip, qname, 1,
25818275SEric Cheng TASKQ_DEFAULTPRI, 0);
25828275SEric Cheng if (tx_ring_p->taskq == NULL) {
25838275SEric Cheng goto nxge_map_txdma_channel_buf_ring_fail1;
25848275SEric Cheng }
25858275SEric Cheng
25863859Sml29623 /*
25873859Sml29623 * Allocate transmit message rings and handles for packets
25883859Sml29623 * not to be copied to premapped buffers.
25893859Sml29623 */
25903859Sml29623 size = nmsgs * sizeof (tx_msg_t);
25913859Sml29623 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
25923859Sml29623 for (i = 0; i < nmsgs; i++) {
25933859Sml29623 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr,
25946929Smisaki DDI_DMA_DONTWAIT, 0,
25956929Smisaki &tx_msg_ring[i].dma_handle);
25963859Sml29623 if (ddi_status != DDI_SUCCESS) {
25973859Sml29623 status |= NXGE_DDI_FAILED;
25983859Sml29623 break;
25993859Sml29623 }
26003859Sml29623 }
26013859Sml29623 if (i < nmsgs) {
26024185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
26034185Sspeer "Allocate handles failed."));
26043859Sml29623 goto nxge_map_txdma_channel_buf_ring_fail1;
26053859Sml29623 }
26063859Sml29623
26073859Sml29623 tx_ring_p->tdc = channel;
26083859Sml29623 tx_ring_p->tx_msg_ring = tx_msg_ring;
26093859Sml29623 tx_ring_p->tx_ring_size = nmsgs;
26103859Sml29623 tx_ring_p->num_chunks = num_chunks;
26113859Sml29623 if (!nxge_tx_intr_thres) {
26123859Sml29623 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4;
26133859Sml29623 }
26143859Sml29623 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1;
26153859Sml29623 tx_ring_p->rd_index = 0;
26163859Sml29623 tx_ring_p->wr_index = 0;
26173859Sml29623 tx_ring_p->ring_head.value = 0;
26183859Sml29623 tx_ring_p->ring_kick_tail.value = 0;
26193859Sml29623 tx_ring_p->descs_pending = 0;
26203859Sml29623
26213859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26226929Smisaki "==> nxge_map_txdma_channel_buf_ring: channel %d "
26236929Smisaki "actual tx desc max %d nmsgs %d "
26246929Smisaki "(config nxge_tx_ring_size %d)",
26256929Smisaki channel, tx_ring_p->tx_ring_size, nmsgs,
26266929Smisaki nxge_tx_ring_size));
26273859Sml29623
26283859Sml29623 /*
26293859Sml29623 * Map in buffers from the buffer pool.
26303859Sml29623 */
26313859Sml29623 index = 0;
26323859Sml29623 bsize = dma_bufp->block_size;
26333859Sml29623
26343859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: "
26356929Smisaki "dma_bufp $%p tx_rng_p $%p "
26366929Smisaki "tx_msg_rng_p $%p bsize %d",
26376929Smisaki dma_bufp, tx_ring_p, tx_msg_ring, bsize));
26383859Sml29623
26393859Sml29623 tx_buf_dma_handle = dma_bufp->dma_handle;
26403859Sml29623 for (i = 0; i < num_chunks; i++, dma_bufp++) {
26413859Sml29623 bsize = dma_bufp->block_size;
26423859Sml29623 nblocks = dma_bufp->nblocks;
26433859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26446929Smisaki "==> nxge_map_txdma_channel_buf_ring: dma chunk %d "
26456929Smisaki "size %d dma_bufp $%p",
26466929Smisaki i, sizeof (nxge_dma_common_t), dma_bufp));
26473859Sml29623
26483859Sml29623 for (j = 0; j < nblocks; j++) {
26493859Sml29623 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle;
26503859Sml29623 dmap = &tx_msg_ring[index++].buf_dma;
26513859Sml29623 #ifdef TX_MEM_DEBUG
26523859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26536929Smisaki "==> nxge_map_txdma_channel_buf_ring: j %d"
26546929Smisaki "dmap $%p", i, dmap));
26553859Sml29623 #endif
26563859Sml29623 nxge_setup_dma_common(dmap, dma_bufp, 1,
26576929Smisaki bsize);
26583859Sml29623 }
26593859Sml29623 }
26603859Sml29623
26613859Sml29623 if (i < num_chunks) {
26624185Sspeer status = NXGE_ERROR;
26633859Sml29623 goto nxge_map_txdma_channel_buf_ring_fail1;
26643859Sml29623 }
26653859Sml29623
26663859Sml29623 *tx_desc_p = tx_ring_p;
26673859Sml29623
26683859Sml29623 goto nxge_map_txdma_channel_buf_ring_exit;
26693859Sml29623
26703859Sml29623 nxge_map_txdma_channel_buf_ring_fail1:
26718275SEric Cheng if (tx_ring_p->taskq) {
26728275SEric Cheng ddi_taskq_destroy(tx_ring_p->taskq);
26738275SEric Cheng tx_ring_p->taskq = NULL;
26743952Sml29623 }
26753952Sml29623
26763859Sml29623 index--;
26773859Sml29623 for (; index >= 0; index--) {
26784185Sspeer if (tx_msg_ring[index].dma_handle != NULL) {
26794185Sspeer ddi_dma_free_handle(&tx_msg_ring[index].dma_handle);
26803859Sml29623 }
26813859Sml29623 }
26823859Sml29623 MUTEX_DESTROY(&tx_ring_p->lock);
26834185Sspeer KMEM_FREE(tx_msg_ring, size);
26843859Sml29623 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
26853859Sml29623
26864185Sspeer status = NXGE_ERROR;
26874185Sspeer
26883859Sml29623 nxge_map_txdma_channel_buf_ring_exit:
26893859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
26906929Smisaki "<== nxge_map_txdma_channel_buf_ring status 0x%x", status));
26913859Sml29623
26923859Sml29623 return (status);
26933859Sml29623 }
26943859Sml29623
26953859Sml29623 /*ARGSUSED*/
26963859Sml29623 static void
nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep,p_tx_ring_t tx_ring_p)26973859Sml29623 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p)
26983859Sml29623 {
26993859Sml29623 p_tx_msg_t tx_msg_ring;
27003859Sml29623 p_tx_msg_t tx_msg_p;
27013859Sml29623 int i;
27023859Sml29623
27033859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27046929Smisaki "==> nxge_unmap_txdma_channel_buf_ring"));
27053859Sml29623 if (tx_ring_p == NULL) {
27063859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
27076929Smisaki "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp"));
27083859Sml29623 return;
27093859Sml29623 }
27103859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27116929Smisaki "==> nxge_unmap_txdma_channel_buf_ring: channel %d",
27126929Smisaki tx_ring_p->tdc));
27133859Sml29623
27143859Sml29623 tx_msg_ring = tx_ring_p->tx_msg_ring;
27156495Sspeer
27166495Sspeer /*
27176495Sspeer * Since the serialization thread, timer thread and
27186495Sspeer * interrupt thread can all call the transmit reclaim,
27196495Sspeer * the unmapping function needs to acquire the lock
27206495Sspeer * to free those buffers which were transmitted
27216495Sspeer * by the hardware already.
27226495Sspeer */
27236495Sspeer MUTEX_ENTER(&tx_ring_p->lock);
27246495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
27256495Sspeer "==> nxge_unmap_txdma_channel_buf_ring (reclaim): "
27266495Sspeer "channel %d",
27276495Sspeer tx_ring_p->tdc));
27286495Sspeer (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
27296495Sspeer
27303859Sml29623 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
27313859Sml29623 tx_msg_p = &tx_msg_ring[i];
27323859Sml29623 if (tx_msg_p->tx_message != NULL) {
27333859Sml29623 freemsg(tx_msg_p->tx_message);
27343859Sml29623 tx_msg_p->tx_message = NULL;
27353859Sml29623 }
27363859Sml29623 }
27373859Sml29623
27383859Sml29623 for (i = 0; i < tx_ring_p->tx_ring_size; i++) {
27393859Sml29623 if (tx_msg_ring[i].dma_handle != NULL) {
27403859Sml29623 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle);
27413859Sml29623 }
27426495Sspeer tx_msg_ring[i].dma_handle = NULL;
27433859Sml29623 }
27443859Sml29623
27456495Sspeer MUTEX_EXIT(&tx_ring_p->lock);
27466495Sspeer
27478275SEric Cheng if (tx_ring_p->taskq) {
27488275SEric Cheng ddi_taskq_destroy(tx_ring_p->taskq);
27498275SEric Cheng tx_ring_p->taskq = NULL;
27503952Sml29623 }
27513952Sml29623
27523859Sml29623 MUTEX_DESTROY(&tx_ring_p->lock);
27533859Sml29623 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size);
27543859Sml29623 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t));
27553859Sml29623
27563859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
27576929Smisaki "<== nxge_unmap_txdma_channel_buf_ring"));
27583859Sml29623 }
27593859Sml29623
27603859Sml29623 static nxge_status_t
nxge_txdma_hw_start(p_nxge_t nxgep,int channel)27616495Sspeer nxge_txdma_hw_start(p_nxge_t nxgep, int channel)
27623859Sml29623 {
27633859Sml29623 p_tx_rings_t tx_rings;
27643859Sml29623 p_tx_ring_t *tx_desc_rings;
27653859Sml29623 p_tx_mbox_areas_t tx_mbox_areas_p;
27663859Sml29623 p_tx_mbox_t *tx_mbox_p;
27673859Sml29623 nxge_status_t status = NXGE_OK;
27683859Sml29623
27693859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start"));
27703859Sml29623
27713859Sml29623 tx_rings = nxgep->tx_rings;
27723859Sml29623 if (tx_rings == NULL) {
27733859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
27746929Smisaki "<== nxge_txdma_hw_start: NULL ring pointer"));
27753859Sml29623 return (NXGE_ERROR);
27763859Sml29623 }
27773859Sml29623 tx_desc_rings = tx_rings->rings;
27783859Sml29623 if (tx_desc_rings == NULL) {
27793859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
27806929Smisaki "<== nxge_txdma_hw_start: NULL ring pointers"));
27813859Sml29623 return (NXGE_ERROR);
27823859Sml29623 }
27833859Sml29623
27846495Sspeer NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
27856495Sspeer "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings));
27863859Sml29623
27873859Sml29623 tx_mbox_areas_p = nxgep->tx_mbox_areas_p;
27883859Sml29623 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p;
27893859Sml29623
27906495Sspeer status = nxge_txdma_start_channel(nxgep, channel,
27916495Sspeer (p_tx_ring_t)tx_desc_rings[channel],
27926495Sspeer (p_tx_mbox_t)tx_mbox_p[channel]);
27936495Sspeer if (status != NXGE_OK) {
27946495Sspeer goto nxge_txdma_hw_start_fail1;
27953859Sml29623 }
27963859Sml29623
27973859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
27986929Smisaki "tx_rings $%p rings $%p",
27996929Smisaki nxgep->tx_rings, nxgep->tx_rings->rings));
28003859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: "
28016929Smisaki "tx_rings $%p tx_desc_rings $%p",
28026929Smisaki nxgep->tx_rings, tx_desc_rings));
28033859Sml29623
28043859Sml29623 goto nxge_txdma_hw_start_exit;
28053859Sml29623
28063859Sml29623 nxge_txdma_hw_start_fail1:
28073859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
28086929Smisaki "==> nxge_txdma_hw_start: disable "
28096929Smisaki "(status 0x%x channel %d)", status, channel));
28103859Sml29623
28113859Sml29623 nxge_txdma_hw_start_exit:
28123859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
28136929Smisaki "==> nxge_txdma_hw_start: (status 0x%x)", status));
28143859Sml29623
28153859Sml29623 return (status);
28163859Sml29623 }
28173859Sml29623
28186495Sspeer /*
28196495Sspeer * nxge_txdma_start_channel
28206495Sspeer *
28216495Sspeer * Start a TDC.
28226495Sspeer *
28236495Sspeer * Arguments:
28246495Sspeer * nxgep
28256495Sspeer * channel The channel to start.
28266495Sspeer * tx_ring_p channel's transmit descriptor ring.
28276495Sspeer * tx_mbox_p channel' smailbox.
28286495Sspeer *
28296495Sspeer * Notes:
28306495Sspeer *
28316495Sspeer * NPI/NXGE function calls:
28326495Sspeer * nxge_reset_txdma_channel()
28336495Sspeer * nxge_init_txdma_channel_event_mask()
28346495Sspeer * nxge_enable_txdma_channel()
28356495Sspeer *
28366495Sspeer * Registers accessed:
28376495Sspeer * none directly (see functions above).
28386495Sspeer *
28396495Sspeer * Context:
28406495Sspeer * Any domain
28416495Sspeer */
28423859Sml29623 static nxge_status_t
nxge_txdma_start_channel(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_ring_p,p_tx_mbox_t tx_mbox_p)28433859Sml29623 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel,
28443859Sml29623 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p)
28453859Sml29623
28463859Sml29623 {
28473859Sml29623 nxge_status_t status = NXGE_OK;
28483859Sml29623
28493859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
28503859Sml29623 "==> nxge_txdma_start_channel (channel %d)", channel));
28513859Sml29623 /*
28523859Sml29623 * TXDMA/TXC must be in stopped state.
28533859Sml29623 */
28543859Sml29623 (void) nxge_txdma_stop_inj_err(nxgep, channel);
28553859Sml29623
28563859Sml29623 /*
28573859Sml29623 * Reset TXDMA channel
28583859Sml29623 */
28593859Sml29623 tx_ring_p->tx_cs.value = 0;
28603859Sml29623 tx_ring_p->tx_cs.bits.ldw.rst = 1;
28613859Sml29623 status = nxge_reset_txdma_channel(nxgep, channel,
28623859Sml29623 tx_ring_p->tx_cs.value);
28633859Sml29623 if (status != NXGE_OK) {
28643859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
28653859Sml29623 "==> nxge_txdma_start_channel (channel %d)"
28663859Sml29623 " reset channel failed 0x%x", channel, status));
28673859Sml29623 goto nxge_txdma_start_channel_exit;
28683859Sml29623 }
28693859Sml29623
28703859Sml29623 /*
28713859Sml29623 * Initialize the TXDMA channel specific FZC control
28723859Sml29623 * configurations. These FZC registers are pertaining
28733859Sml29623 * to each TX channel (i.e. logical pages).
28743859Sml29623 */
28756495Sspeer if (!isLDOMguest(nxgep)) {
28766495Sspeer status = nxge_init_fzc_txdma_channel(nxgep, channel,
28776495Sspeer tx_ring_p, tx_mbox_p);
28786495Sspeer if (status != NXGE_OK) {
28796495Sspeer goto nxge_txdma_start_channel_exit;
28806495Sspeer }
28813859Sml29623 }
28823859Sml29623
28833859Sml29623 /*
28843859Sml29623 * Initialize the event masks.
28853859Sml29623 */
28863859Sml29623 tx_ring_p->tx_evmask.value = 0;
28873859Sml29623 status = nxge_init_txdma_channel_event_mask(nxgep,
28886495Sspeer channel, &tx_ring_p->tx_evmask);
28893859Sml29623 if (status != NXGE_OK) {
28903859Sml29623 goto nxge_txdma_start_channel_exit;
28913859Sml29623 }
28923859Sml29623
28933859Sml29623 /*
28943859Sml29623 * Load TXDMA descriptors, buffers, mailbox,
28953859Sml29623 * initialise the DMA channels and
28963859Sml29623 * enable each DMA channel.
28973859Sml29623 */
28983859Sml29623 status = nxge_enable_txdma_channel(nxgep, channel,
28993859Sml29623 tx_ring_p, tx_mbox_p);
29003859Sml29623 if (status != NXGE_OK) {
29013859Sml29623 goto nxge_txdma_start_channel_exit;
29023859Sml29623 }
29033859Sml29623
29043859Sml29623 nxge_txdma_start_channel_exit:
29053859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel"));
29063859Sml29623
29073859Sml29623 return (status);
29083859Sml29623 }
29093859Sml29623
29106495Sspeer /*
29116495Sspeer * nxge_txdma_stop_channel
29126495Sspeer *
29136495Sspeer * Stop a TDC.
29146495Sspeer *
29156495Sspeer * Arguments:
29166495Sspeer * nxgep
29176495Sspeer * channel The channel to stop.
29186495Sspeer * tx_ring_p channel's transmit descriptor ring.
29196495Sspeer * tx_mbox_p channel' smailbox.
29206495Sspeer *
29216495Sspeer * Notes:
29226495Sspeer *
29236495Sspeer * NPI/NXGE function calls:
29246495Sspeer * nxge_txdma_stop_inj_err()
29256495Sspeer * nxge_reset_txdma_channel()
29266495Sspeer * nxge_init_txdma_channel_event_mask()
29276495Sspeer * nxge_init_txdma_channel_cntl_stat()
29286495Sspeer * nxge_disable_txdma_channel()
29296495Sspeer *
29306495Sspeer * Registers accessed:
29316495Sspeer * none directly (see functions above).
29326495Sspeer *
29336495Sspeer * Context:
29346495Sspeer * Any domain
29356495Sspeer */
29363859Sml29623 /*ARGSUSED*/
29373859Sml29623 static nxge_status_t
nxge_txdma_stop_channel(p_nxge_t nxgep,uint16_t channel)29386495Sspeer nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
29393859Sml29623 {
29406495Sspeer p_tx_ring_t tx_ring_p;
29416495Sspeer int status = NXGE_OK;
29423859Sml29623
29433859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
29446929Smisaki "==> nxge_txdma_stop_channel: channel %d", channel));
29453859Sml29623
29463859Sml29623 /*
29473859Sml29623 * Stop (disable) TXDMA and TXC (if stop bit is set
29483859Sml29623 * and STOP_N_GO bit not set, the TXDMA reset state will
29493859Sml29623 * not be set if reset TXDMA.
29503859Sml29623 */
29513859Sml29623 (void) nxge_txdma_stop_inj_err(nxgep, channel);
29523859Sml29623
295310577SMichael.Speer@Sun.COM if (nxgep->tx_rings == NULL) {
295410577SMichael.Speer@Sun.COM status = NXGE_ERROR;
295510577SMichael.Speer@Sun.COM goto nxge_txdma_stop_channel_exit;
295610577SMichael.Speer@Sun.COM }
295710577SMichael.Speer@Sun.COM
29586495Sspeer tx_ring_p = nxgep->tx_rings->rings[channel];
295910577SMichael.Speer@Sun.COM if (tx_ring_p == NULL) {
296010577SMichael.Speer@Sun.COM status = NXGE_ERROR;
296110577SMichael.Speer@Sun.COM goto nxge_txdma_stop_channel_exit;
296210577SMichael.Speer@Sun.COM }
29636495Sspeer
29643859Sml29623 /*
29653859Sml29623 * Reset TXDMA channel
29663859Sml29623 */
29673859Sml29623 tx_ring_p->tx_cs.value = 0;
29683859Sml29623 tx_ring_p->tx_cs.bits.ldw.rst = 1;
29693859Sml29623 status = nxge_reset_txdma_channel(nxgep, channel,
29706929Smisaki tx_ring_p->tx_cs.value);
29713859Sml29623 if (status != NXGE_OK) {
29723859Sml29623 goto nxge_txdma_stop_channel_exit;
29733859Sml29623 }
29743859Sml29623
29753859Sml29623 #ifdef HARDWARE_REQUIRED
29763859Sml29623 /* Set up the interrupt event masks. */
29773859Sml29623 tx_ring_p->tx_evmask.value = 0;
29783859Sml29623 status = nxge_init_txdma_channel_event_mask(nxgep,
29796929Smisaki channel, &tx_ring_p->tx_evmask);
29803859Sml29623 if (status != NXGE_OK) {
29813859Sml29623 goto nxge_txdma_stop_channel_exit;
29823859Sml29623 }
29833859Sml29623
29843859Sml29623 /* Initialize the DMA control and status register */
29853859Sml29623 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL;
29863859Sml29623 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel,
29876929Smisaki tx_ring_p->tx_cs.value);
29883859Sml29623 if (status != NXGE_OK) {
29893859Sml29623 goto nxge_txdma_stop_channel_exit;
29903859Sml29623 }
29913859Sml29623
29926495Sspeer tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel];
29936495Sspeer
29943859Sml29623 /* Disable channel */
29953859Sml29623 status = nxge_disable_txdma_channel(nxgep, channel,
29966495Sspeer tx_ring_p, tx_mbox_p);
29973859Sml29623 if (status != NXGE_OK) {
29983859Sml29623 goto nxge_txdma_start_channel_exit;
29993859Sml29623 }
30003859Sml29623
30013859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL,
30026929Smisaki "==> nxge_txdma_stop_channel: event done"));
30033859Sml29623
30043859Sml29623 #endif
30053859Sml29623
30063859Sml29623 nxge_txdma_stop_channel_exit:
30073859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel"));
30083859Sml29623 return (status);
30093859Sml29623 }
30103859Sml29623
30116495Sspeer /*
30126495Sspeer * nxge_txdma_get_ring
30136495Sspeer *
30146495Sspeer * Get the ring for a TDC.
30156495Sspeer *
30166495Sspeer * Arguments:
30176495Sspeer * nxgep
30186495Sspeer * channel
30196495Sspeer *
30206495Sspeer * Notes:
30216495Sspeer *
30226495Sspeer * NPI/NXGE function calls:
30236495Sspeer *
30246495Sspeer * Registers accessed:
30256495Sspeer *
30266495Sspeer * Context:
30276495Sspeer * Any domain
30286495Sspeer */
30293859Sml29623 static p_tx_ring_t
nxge_txdma_get_ring(p_nxge_t nxgep,uint16_t channel)30303859Sml29623 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel)
30313859Sml29623 {
30326495Sspeer nxge_grp_set_t *set = &nxgep->tx_set;
30336495Sspeer int tdc;
30343859Sml29623
30353859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring"));
30363859Sml29623
30376495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
30383859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
30396495Sspeer "<== nxge_txdma_get_ring: NULL ring pointer(s)"));
30406495Sspeer goto return_null;
30413859Sml29623 }
30423859Sml29623
30436495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
30446495Sspeer if ((1 << tdc) & set->owned.map) {
30456495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
30466495Sspeer if (ring) {
30476495Sspeer if (channel == ring->tdc) {
30486495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
30496495Sspeer "<== nxge_txdma_get_ring: "
30506495Sspeer "tdc %d ring $%p", tdc, ring));
30516495Sspeer return (ring);
30526495Sspeer }
30536495Sspeer }
30543859Sml29623 }
30553859Sml29623 }
30563859Sml29623
30576495Sspeer return_null:
30586495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: "
30596929Smisaki "ring not found"));
30606495Sspeer
30613859Sml29623 return (NULL);
30623859Sml29623 }
30633859Sml29623
30646495Sspeer /*
30656495Sspeer * nxge_txdma_get_mbox
30666495Sspeer *
30676495Sspeer * Get the mailbox for a TDC.
30686495Sspeer *
30696495Sspeer * Arguments:
30706495Sspeer * nxgep
30716495Sspeer * channel
30726495Sspeer *
30736495Sspeer * Notes:
30746495Sspeer *
30756495Sspeer * NPI/NXGE function calls:
30766495Sspeer *
30776495Sspeer * Registers accessed:
30786495Sspeer *
30796495Sspeer * Context:
30806495Sspeer * Any domain
30816495Sspeer */
30823859Sml29623 static p_tx_mbox_t
nxge_txdma_get_mbox(p_nxge_t nxgep,uint16_t channel)30833859Sml29623 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel)
30843859Sml29623 {
30856495Sspeer nxge_grp_set_t *set = &nxgep->tx_set;
30866495Sspeer int tdc;
30873859Sml29623
30883859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox"));
30893859Sml29623
30906495Sspeer if (nxgep->tx_mbox_areas_p == 0 ||
30916495Sspeer nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) {
30926495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
30936495Sspeer "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)"));
30946495Sspeer goto return_null;
30953859Sml29623 }
30963859Sml29623
30976495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
30986495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
30996495Sspeer "<== nxge_txdma_get_mbox: NULL ring pointer(s)"));
31006495Sspeer goto return_null;
31013859Sml29623 }
31023859Sml29623
31036495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
31046495Sspeer if ((1 << tdc) & set->owned.map) {
31056495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
31066495Sspeer if (ring) {
31076495Sspeer if (channel == ring->tdc) {
31086495Sspeer tx_mbox_t *mailbox = nxgep->
31096495Sspeer tx_mbox_areas_p->
31106495Sspeer txmbox_areas_p[tdc];
31116495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
31126495Sspeer "<== nxge_txdma_get_mbox: tdc %d "
31136495Sspeer "ring $%p", tdc, mailbox));
31146495Sspeer return (mailbox);
31156495Sspeer }
31166495Sspeer }
31173859Sml29623 }
31183859Sml29623 }
31193859Sml29623
31206495Sspeer return_null:
31216495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: "
31226929Smisaki "mailbox not found"));
31236495Sspeer
31243859Sml29623 return (NULL);
31253859Sml29623 }
31263859Sml29623
31276495Sspeer /*
31286495Sspeer * nxge_tx_err_evnts
31296495Sspeer *
31306495Sspeer * Recover a TDC.
31316495Sspeer *
31326495Sspeer * Arguments:
31336495Sspeer * nxgep
31346495Sspeer * index The index to the TDC ring.
31356495Sspeer * ldvp Used to get the channel number ONLY.
31366495Sspeer * cs A copy of the bits from TX_CS.
31376495Sspeer *
31386495Sspeer * Notes:
31396495Sspeer * Calling tree:
31406495Sspeer * nxge_tx_intr()
31416495Sspeer *
31426495Sspeer * NPI/NXGE function calls:
31436495Sspeer * npi_txdma_ring_error_get()
31446495Sspeer * npi_txdma_inj_par_error_get()
31456495Sspeer * nxge_txdma_fatal_err_recover()
31466495Sspeer *
31476495Sspeer * Registers accessed:
31486495Sspeer * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High
31496495Sspeer * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low
31506495Sspeer * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
31516495Sspeer *
31526495Sspeer * Context:
31536495Sspeer * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR.
31546495Sspeer */
31553859Sml29623 /*ARGSUSED*/
31563859Sml29623 static nxge_status_t
nxge_tx_err_evnts(p_nxge_t nxgep,uint_t index,p_nxge_ldv_t ldvp,tx_cs_t cs)31573859Sml29623 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs)
31583859Sml29623 {
31593859Sml29623 npi_handle_t handle;
31603859Sml29623 npi_status_t rs;
31613859Sml29623 uint8_t channel;
31623859Sml29623 p_tx_ring_t *tx_rings;
31633859Sml29623 p_tx_ring_t tx_ring_p;
31643859Sml29623 p_nxge_tx_ring_stats_t tdc_stats;
31653859Sml29623 boolean_t txchan_fatal = B_FALSE;
31663859Sml29623 nxge_status_t status = NXGE_OK;
31673859Sml29623 tdmc_inj_par_err_t par_err;
31683859Sml29623 uint32_t value;
31693859Sml29623
31706495Sspeer NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts"));
31713859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
31723859Sml29623 channel = ldvp->channel;
31733859Sml29623
31743859Sml29623 tx_rings = nxgep->tx_rings->rings;
31753859Sml29623 tx_ring_p = tx_rings[index];
31763859Sml29623 tdc_stats = tx_ring_p->tdc_stats;
31773859Sml29623 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) ||
31786929Smisaki (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) ||
31796929Smisaki (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) {
31803859Sml29623 if ((rs = npi_txdma_ring_error_get(handle, channel,
31816929Smisaki &tdc_stats->errlog)) != NPI_SUCCESS)
31823859Sml29623 return (NXGE_ERROR | rs);
31833859Sml29623 }
31843859Sml29623
31853859Sml29623 if (cs.bits.ldw.mbox_err) {
31863859Sml29623 tdc_stats->mbox_err++;
31873859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
31886929Smisaki NXGE_FM_EREPORT_TDMC_MBOX_ERR);
31893859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31906929Smisaki "==> nxge_tx_err_evnts(channel %d): "
31916929Smisaki "fatal error: mailbox", channel));
31923859Sml29623 txchan_fatal = B_TRUE;
31933859Sml29623 }
31943859Sml29623 if (cs.bits.ldw.pkt_size_err) {
31953859Sml29623 tdc_stats->pkt_size_err++;
31963859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
31976929Smisaki NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR);
31983859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31996929Smisaki "==> nxge_tx_err_evnts(channel %d): "
32006929Smisaki "fatal error: pkt_size_err", channel));
32013859Sml29623 txchan_fatal = B_TRUE;
32023859Sml29623 }
32033859Sml29623 if (cs.bits.ldw.tx_ring_oflow) {
32043859Sml29623 tdc_stats->tx_ring_oflow++;
32053859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
32066929Smisaki NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW);
32073859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32086929Smisaki "==> nxge_tx_err_evnts(channel %d): "
32096929Smisaki "fatal error: tx_ring_oflow", channel));
32103859Sml29623 txchan_fatal = B_TRUE;
32113859Sml29623 }
32123859Sml29623 if (cs.bits.ldw.pref_buf_par_err) {
32133859Sml29623 tdc_stats->pre_buf_par_err++;
32143859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
32156929Smisaki NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR);
32163859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32176929Smisaki "==> nxge_tx_err_evnts(channel %d): "
32186929Smisaki "fatal error: pre_buf_par_err", channel));
32193859Sml29623 /* Clear error injection source for parity error */
32203859Sml29623 (void) npi_txdma_inj_par_error_get(handle, &value);
32213859Sml29623 par_err.value = value;
32223859Sml29623 par_err.bits.ldw.inject_parity_error &= ~(1 << channel);
32233859Sml29623 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
32243859Sml29623 txchan_fatal = B_TRUE;
32253859Sml29623 }
32263859Sml29623 if (cs.bits.ldw.nack_pref) {
32273859Sml29623 tdc_stats->nack_pref++;
32283859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
32296929Smisaki NXGE_FM_EREPORT_TDMC_NACK_PREF);
32303859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32316929Smisaki "==> nxge_tx_err_evnts(channel %d): "
32326929Smisaki "fatal error: nack_pref", channel));
32333859Sml29623 txchan_fatal = B_TRUE;
32343859Sml29623 }
32353859Sml29623 if (cs.bits.ldw.nack_pkt_rd) {
32363859Sml29623 tdc_stats->nack_pkt_rd++;
32373859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
32386929Smisaki NXGE_FM_EREPORT_TDMC_NACK_PKT_RD);
32393859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32406929Smisaki "==> nxge_tx_err_evnts(channel %d): "
32416929Smisaki "fatal error: nack_pkt_rd", channel));
32423859Sml29623 txchan_fatal = B_TRUE;
32433859Sml29623 }
32443859Sml29623 if (cs.bits.ldw.conf_part_err) {
32453859Sml29623 tdc_stats->conf_part_err++;
32463859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
32476929Smisaki NXGE_FM_EREPORT_TDMC_CONF_PART_ERR);
32483859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32496929Smisaki "==> nxge_tx_err_evnts(channel %d): "
32506929Smisaki "fatal error: config_partition_err", channel));
32513859Sml29623 txchan_fatal = B_TRUE;
32523859Sml29623 }
32533859Sml29623 if (cs.bits.ldw.pkt_prt_err) {
32543859Sml29623 tdc_stats->pkt_part_err++;
32553859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel,
32566929Smisaki NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR);
32573859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32586929Smisaki "==> nxge_tx_err_evnts(channel %d): "
32596929Smisaki "fatal error: pkt_prt_err", channel));
32603859Sml29623 txchan_fatal = B_TRUE;
32613859Sml29623 }
32623859Sml29623
32633859Sml29623 /* Clear error injection source in case this is an injected error */
32643859Sml29623 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0);
32653859Sml29623
32663859Sml29623 if (txchan_fatal) {
32673859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32686929Smisaki " nxge_tx_err_evnts: "
32696929Smisaki " fatal error on channel %d cs 0x%llx\n",
32706929Smisaki channel, cs.value));
32713859Sml29623 status = nxge_txdma_fatal_err_recover(nxgep, channel,
32726929Smisaki tx_ring_p);
32733859Sml29623 if (status == NXGE_OK) {
32743859Sml29623 FM_SERVICE_RESTORED(nxgep);
32753859Sml29623 }
32763859Sml29623 }
32773859Sml29623
32786495Sspeer NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts"));
32793859Sml29623
32803859Sml29623 return (status);
32813859Sml29623 }
32823859Sml29623
32833859Sml29623 static nxge_status_t
nxge_txdma_fatal_err_recover(p_nxge_t nxgep,uint16_t channel,p_tx_ring_t tx_ring_p)32846495Sspeer nxge_txdma_fatal_err_recover(
32856495Sspeer p_nxge_t nxgep,
32866495Sspeer uint16_t channel,
32876495Sspeer p_tx_ring_t tx_ring_p)
32883859Sml29623 {
32893859Sml29623 npi_handle_t handle;
32903859Sml29623 npi_status_t rs = NPI_SUCCESS;
32913859Sml29623 p_tx_mbox_t tx_mbox_p;
32923859Sml29623 nxge_status_t status = NXGE_OK;
32933859Sml29623
32943859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover"));
32953859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32966929Smisaki "Recovering from TxDMAChannel#%d error...", channel));
32973859Sml29623
32983859Sml29623 /*
32993859Sml29623 * Stop the dma channel waits for the stop done.
33003859Sml29623 * If the stop done bit is not set, then create
33013859Sml29623 * an error.
33023859Sml29623 */
33033859Sml29623
33043859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
33053859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop..."));
33063859Sml29623 MUTEX_ENTER(&tx_ring_p->lock);
33073859Sml29623 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel);
33083859Sml29623 if (rs != NPI_SUCCESS) {
33093859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
33106929Smisaki "==> nxge_txdma_fatal_err_recover (channel %d): "
33116929Smisaki "stop failed ", channel));
33123859Sml29623 goto fail;
33133859Sml29623 }
33143859Sml29623
33153859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim..."));
33163859Sml29623 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0);
33173859Sml29623
33183859Sml29623 /*
33193859Sml29623 * Reset TXDMA channel
33203859Sml29623 */
33213859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset..."));
33223859Sml29623 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) !=
33236929Smisaki NPI_SUCCESS) {
33243859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
33256929Smisaki "==> nxge_txdma_fatal_err_recover (channel %d)"
33266929Smisaki " reset channel failed 0x%x", channel, rs));
33273859Sml29623 goto fail;
33283859Sml29623 }
33293859Sml29623
33303859Sml29623 /*
33313859Sml29623 * Reset the tail (kick) register to 0.
33323859Sml29623 * (Hardware will not reset it. Tx overflow fatal
33333859Sml29623 * error if tail is not set to 0 after reset!
33343859Sml29623 */
33353859Sml29623 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0);
33363859Sml29623
33373859Sml29623 /* Restart TXDMA channel */
33383859Sml29623
33396495Sspeer if (!isLDOMguest(nxgep)) {
33406495Sspeer tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel);
33416495Sspeer
33426495Sspeer // XXX This is a problem in HIO!
33436495Sspeer /*
33446495Sspeer * Initialize the TXDMA channel specific FZC control
33456495Sspeer * configurations. These FZC registers are pertaining
33466495Sspeer * to each TX channel (i.e. logical pages).
33476495Sspeer */
33486495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart..."));
33496495Sspeer status = nxge_init_fzc_txdma_channel(nxgep, channel,
33506495Sspeer tx_ring_p, tx_mbox_p);
33516495Sspeer if (status != NXGE_OK)
33526495Sspeer goto fail;
33536495Sspeer }
33543859Sml29623
33553859Sml29623 /*
33563859Sml29623 * Initialize the event masks.
33573859Sml29623 */
33583859Sml29623 tx_ring_p->tx_evmask.value = 0;
33593859Sml29623 status = nxge_init_txdma_channel_event_mask(nxgep, channel,
33606929Smisaki &tx_ring_p->tx_evmask);
33613859Sml29623 if (status != NXGE_OK)
33623859Sml29623 goto fail;
33633859Sml29623
33643859Sml29623 tx_ring_p->wr_index_wrap = B_FALSE;
33653859Sml29623 tx_ring_p->wr_index = 0;
33663859Sml29623 tx_ring_p->rd_index = 0;
33673859Sml29623
33683859Sml29623 /*
33693859Sml29623 * Load TXDMA descriptors, buffers, mailbox,
33703859Sml29623 * initialise the DMA channels and
33713859Sml29623 * enable each DMA channel.
33723859Sml29623 */
33733859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable..."));
33743859Sml29623 status = nxge_enable_txdma_channel(nxgep, channel,
33756929Smisaki tx_ring_p, tx_mbox_p);
33763859Sml29623 MUTEX_EXIT(&tx_ring_p->lock);
33773859Sml29623 if (status != NXGE_OK)
33783859Sml29623 goto fail;
33793859Sml29623
33803859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
33816929Smisaki "Recovery Successful, TxDMAChannel#%d Restored",
33826929Smisaki channel));
33833859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover"));
33843859Sml29623
33853859Sml29623 return (NXGE_OK);
33863859Sml29623
33873859Sml29623 fail:
33883859Sml29623 MUTEX_EXIT(&tx_ring_p->lock);
33897906SMichael.Speer@Sun.COM
33903859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL,
33916929Smisaki "nxge_txdma_fatal_err_recover (channel %d): "
33926929Smisaki "failed to recover this txdma channel", channel));
33933859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
33943859Sml29623
33953859Sml29623 return (status);
33963859Sml29623 }
33973859Sml29623
33986495Sspeer /*
33996495Sspeer * nxge_tx_port_fatal_err_recover
34006495Sspeer *
34016495Sspeer * Attempt to recover from a fatal port error.
34026495Sspeer *
34036495Sspeer * Arguments:
34046495Sspeer * nxgep
34056495Sspeer *
34066495Sspeer * Notes:
34076495Sspeer * How would a guest do this?
34086495Sspeer *
34096495Sspeer * NPI/NXGE function calls:
34106495Sspeer *
34116495Sspeer * Registers accessed:
34126495Sspeer *
34136495Sspeer * Context:
34146495Sspeer * Service domain
34156495Sspeer */
34163859Sml29623 nxge_status_t
nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)34173859Sml29623 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep)
34183859Sml29623 {
34196495Sspeer nxge_grp_set_t *set = &nxgep->tx_set;
34206495Sspeer nxge_channel_t tdc;
34216495Sspeer
34226495Sspeer tx_ring_t *ring;
34236495Sspeer tx_mbox_t *mailbox;
34246495Sspeer
34253859Sml29623 npi_handle_t handle;
34266495Sspeer nxge_status_t status;
34276495Sspeer npi_status_t rs;
34283859Sml29623
34293859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover"));
34303859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
34316495Sspeer "Recovering from TxPort error..."));
34326495Sspeer
34336495Sspeer if (isLDOMguest(nxgep)) {
34346495Sspeer return (NXGE_OK);
34356495Sspeer }
34366495Sspeer
34376495Sspeer if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
34386495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
34396495Sspeer "<== nxge_tx_port_fatal_err_recover: not initialized"));
34406495Sspeer return (NXGE_ERROR);
34416495Sspeer }
34426495Sspeer
34436495Sspeer if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) {
34446495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
34456495Sspeer "<== nxge_tx_port_fatal_err_recover: "
34466495Sspeer "NULL ring pointer(s)"));
34476495Sspeer return (NXGE_ERROR);
34486495Sspeer }
34496495Sspeer
34506495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
34516495Sspeer if ((1 << tdc) & set->owned.map) {
34526495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
34536495Sspeer if (ring)
34546495Sspeer MUTEX_ENTER(&ring->lock);
34556495Sspeer }
34566495Sspeer }
34573859Sml29623
34583859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
34596495Sspeer
34606495Sspeer /*
34616495Sspeer * Stop all the TDCs owned by us.
34626495Sspeer * (The shared TDCs will have been stopped by their owners.)
34636495Sspeer */
34646495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
34656495Sspeer if ((1 << tdc) & set->owned.map) {
34666495Sspeer ring = nxgep->tx_rings->rings[tdc];
34676495Sspeer if (ring) {
34686495Sspeer rs = npi_txdma_channel_control
34696495Sspeer (handle, TXDMA_STOP, tdc);
34706495Sspeer if (rs != NPI_SUCCESS) {
34716495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
34726495Sspeer "nxge_tx_port_fatal_err_recover "
34736495Sspeer "(channel %d): stop failed ", tdc));
34746495Sspeer goto fail;
34756495Sspeer }
34766495Sspeer }
34773859Sml29623 }
34783859Sml29623 }
34793859Sml29623
34806495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs..."));
34816495Sspeer
34826495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
34836495Sspeer if ((1 << tdc) & set->owned.map) {
34846495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
34857906SMichael.Speer@Sun.COM if (ring) {
34866495Sspeer (void) nxge_txdma_reclaim(nxgep, ring, 0);
34877906SMichael.Speer@Sun.COM }
34883859Sml29623 }
34893859Sml29623 }
34903859Sml29623
34913859Sml29623 /*
34926495Sspeer * Reset all the TDCs.
34933859Sml29623 */
34946495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs..."));
34956495Sspeer
34966495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
34976495Sspeer if ((1 << tdc) & set->owned.map) {
34986495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
34996495Sspeer if (ring) {
35006495Sspeer if ((rs = npi_txdma_channel_control
35016929Smisaki (handle, TXDMA_RESET, tdc))
35026495Sspeer != NPI_SUCCESS) {
35036495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
35046495Sspeer "nxge_tx_port_fatal_err_recover "
35056495Sspeer "(channel %d) reset channel "
35066495Sspeer "failed 0x%x", tdc, rs));
35076495Sspeer goto fail;
35086495Sspeer }
35096495Sspeer }
35106495Sspeer /*
35116495Sspeer * Reset the tail (kick) register to 0.
35126495Sspeer * (Hardware will not reset it. Tx overflow fatal
35136495Sspeer * error if tail is not set to 0 after reset!
35146495Sspeer */
35156495Sspeer TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0);
35163859Sml29623 }
35176495Sspeer }
35186495Sspeer
35196495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs..."));
35206495Sspeer
35216495Sspeer /* Restart all the TDCs */
35226495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
35236495Sspeer if ((1 << tdc) & set->owned.map) {
35246495Sspeer ring = nxgep->tx_rings->rings[tdc];
35256495Sspeer if (ring) {
35266495Sspeer mailbox = nxge_txdma_get_mbox(nxgep, tdc);
35276495Sspeer status = nxge_init_fzc_txdma_channel(nxgep, tdc,
35286495Sspeer ring, mailbox);
35296495Sspeer ring->tx_evmask.value = 0;
35306495Sspeer /*
35316495Sspeer * Initialize the event masks.
35326495Sspeer */
35336495Sspeer status = nxge_init_txdma_channel_event_mask
35346495Sspeer (nxgep, tdc, &ring->tx_evmask);
35356495Sspeer
35366495Sspeer ring->wr_index_wrap = B_FALSE;
35376495Sspeer ring->wr_index = 0;
35386495Sspeer ring->rd_index = 0;
35396495Sspeer
35406495Sspeer if (status != NXGE_OK)
35416495Sspeer goto fail;
35426495Sspeer if (status != NXGE_OK)
35436495Sspeer goto fail;
35446495Sspeer }
35453859Sml29623 }
35466495Sspeer }
35476495Sspeer
35486495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs..."));
35496495Sspeer
35506495Sspeer /* Re-enable all the TDCs */
35516495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
35526495Sspeer if ((1 << tdc) & set->owned.map) {
35536495Sspeer ring = nxgep->tx_rings->rings[tdc];
35546495Sspeer if (ring) {
35556495Sspeer mailbox = nxge_txdma_get_mbox(nxgep, tdc);
35566495Sspeer status = nxge_enable_txdma_channel(nxgep, tdc,
35576495Sspeer ring, mailbox);
35586495Sspeer if (status != NXGE_OK)
35596495Sspeer goto fail;
35606495Sspeer }
35616495Sspeer }
35623859Sml29623 }
35633859Sml29623
35643859Sml29623 /*
35656495Sspeer * Unlock all the TDCs.
35663859Sml29623 */
35676495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
35686495Sspeer if ((1 << tdc) & set->owned.map) {
35696495Sspeer tx_ring_t *ring = nxgep->tx_rings->rings[tdc];
35706495Sspeer if (ring)
35716495Sspeer MUTEX_EXIT(&ring->lock);
35723859Sml29623 }
35733859Sml29623 }
35743859Sml29623
35756495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded"));
35763859Sml29623 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
35773859Sml29623
35783859Sml29623 return (NXGE_OK);
35793859Sml29623
35803859Sml29623 fail:
35816495Sspeer for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) {
35826495Sspeer if ((1 << tdc) & set->owned.map) {
35836495Sspeer ring = nxgep->tx_rings->rings[tdc];
35846495Sspeer if (ring)
35856495Sspeer MUTEX_EXIT(&ring->lock);
35863859Sml29623 }
35873859Sml29623 }
35883859Sml29623
35896495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed"));
35906495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover"));
35913859Sml29623
35923859Sml29623 return (status);
35933859Sml29623 }
35943859Sml29623
35956495Sspeer /*
35966495Sspeer * nxge_txdma_inject_err
35976495Sspeer *
35986495Sspeer * Inject an error into a TDC.
35996495Sspeer *
36006495Sspeer * Arguments:
36016495Sspeer * nxgep
36026495Sspeer * err_id The error to inject.
36036495Sspeer * chan The channel to inject into.
36046495Sspeer *
36056495Sspeer * Notes:
36066495Sspeer * This is called from nxge_main.c:nxge_err_inject()
36076495Sspeer * Has this ioctl ever been used?
36086495Sspeer *
36096495Sspeer * NPI/NXGE function calls:
36106495Sspeer * npi_txdma_inj_par_error_get()
36116495Sspeer * npi_txdma_inj_par_error_set()
36126495Sspeer *
36136495Sspeer * Registers accessed:
36146495Sspeer * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error
36156495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
36166495Sspeer * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug
36176495Sspeer *
36186495Sspeer * Context:
36196495Sspeer * Service domain
36206495Sspeer */
36213859Sml29623 void
nxge_txdma_inject_err(p_nxge_t nxgep,uint32_t err_id,uint8_t chan)36223859Sml29623 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
36233859Sml29623 {
36243859Sml29623 tdmc_intr_dbg_t tdi;
36253859Sml29623 tdmc_inj_par_err_t par_err;
36263859Sml29623 uint32_t value;
36273859Sml29623 npi_handle_t handle;
36283859Sml29623
36293859Sml29623 switch (err_id) {
36303859Sml29623
36313859Sml29623 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR:
36323859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
36333859Sml29623 /* Clear error injection source for parity error */
36343859Sml29623 (void) npi_txdma_inj_par_error_get(handle, &value);
36353859Sml29623 par_err.value = value;
36363859Sml29623 par_err.bits.ldw.inject_parity_error &= ~(1 << chan);
36373859Sml29623 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
36383859Sml29623
36393859Sml29623 par_err.bits.ldw.inject_parity_error = (1 << chan);
36403859Sml29623 (void) npi_txdma_inj_par_error_get(handle, &value);
36413859Sml29623 par_err.value = value;
36423859Sml29623 par_err.bits.ldw.inject_parity_error |= (1 << chan);
36433859Sml29623 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n",
36446929Smisaki (unsigned long long)par_err.value);
36453859Sml29623 (void) npi_txdma_inj_par_error_set(handle, par_err.value);
36463859Sml29623 break;
36473859Sml29623
36483859Sml29623 case NXGE_FM_EREPORT_TDMC_MBOX_ERR:
36493859Sml29623 case NXGE_FM_EREPORT_TDMC_NACK_PREF:
36503859Sml29623 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD:
36513859Sml29623 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR:
36523859Sml29623 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW:
36533859Sml29623 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR:
36543859Sml29623 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR:
36553859Sml29623 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
36566929Smisaki chan, &tdi.value);
36573859Sml29623 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR)
36583859Sml29623 tdi.bits.ldw.pref_buf_par_err = 1;
36593859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR)
36603859Sml29623 tdi.bits.ldw.mbox_err = 1;
36613859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF)
36623859Sml29623 tdi.bits.ldw.nack_pref = 1;
36633859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD)
36643859Sml29623 tdi.bits.ldw.nack_pkt_rd = 1;
36653859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR)
36663859Sml29623 tdi.bits.ldw.pkt_size_err = 1;
36673859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW)
36683859Sml29623 tdi.bits.ldw.tx_ring_oflow = 1;
36693859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR)
36703859Sml29623 tdi.bits.ldw.conf_part_err = 1;
36713859Sml29623 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR)
36723859Sml29623 tdi.bits.ldw.pkt_part_err = 1;
36735125Sjoycey #if defined(__i386)
36745125Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n",
36756929Smisaki tdi.value);
36765125Sjoycey #else
36773859Sml29623 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n",
36786929Smisaki tdi.value);
36795125Sjoycey #endif
36803859Sml29623 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG,
36816929Smisaki chan, tdi.value);
36823859Sml29623
36833859Sml29623 break;
36843859Sml29623 }
36853859Sml29623 }
3686