13859Sml29623 /*
23859Sml29623 * CDDL HEADER START
33859Sml29623 *
43859Sml29623 * The contents of this file are subject to the terms of the
53859Sml29623 * Common Development and Distribution License (the "License").
63859Sml29623 * You may not use this file except in compliance with the License.
73859Sml29623 *
83859Sml29623 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93859Sml29623 * or http://www.opensolaris.org/os/licensing.
103859Sml29623 * See the License for the specific language governing permissions
113859Sml29623 * and limitations under the License.
123859Sml29623 *
133859Sml29623 * When distributing Covered Code, include this CDDL HEADER in each
143859Sml29623 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153859Sml29623 * If applicable, add the following below this CDDL HEADER, with the
163859Sml29623 * fields enclosed by brackets "[]" replaced with your own identifying
173859Sml29623 * information: Portions Copyright [yyyy] [name of copyright owner]
183859Sml29623 *
193859Sml29623 * CDDL HEADER END
203859Sml29623 */
2110577SMichael.Speer@Sun.COM
223859Sml29623 /*
23*11878SVenu.Iyer@Sun.COM * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
243859Sml29623 * Use is subject to license terms.
253859Sml29623 */
263859Sml29623
273859Sml29623 #include <sys/nxge/nxge_impl.h>
283859Sml29623 #include <sys/nxge/nxge_rxdma.h>
296495Sspeer #include <sys/nxge/nxge_hio.h>
306495Sspeer
316495Sspeer #if !defined(_BIG_ENDIAN)
326495Sspeer #include <npi_rx_rd32.h>
336495Sspeer #endif
346495Sspeer #include <npi_rx_rd64.h>
356495Sspeer #include <npi_rx_wr64.h>
363859Sml29623
373859Sml29623 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \
386495Sspeer (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid)
393859Sml29623 #define NXGE_ACTUAL_RDC(nxgep, rdc) \
403859Sml29623 (rdc + nxgep->pt_config.hw_config.start_rdc)
413859Sml29623
423859Sml29623 /*
433859Sml29623 * Globals: tunable parameters (/etc/system or adb)
443859Sml29623 *
453859Sml29623 */
463859Sml29623 extern uint32_t nxge_rbr_size;
473859Sml29623 extern uint32_t nxge_rcr_size;
483859Sml29623 extern uint32_t nxge_rbr_spare_size;
4911304SJanie.Lu@Sun.COM extern uint16_t nxge_rdc_buf_offset;
503859Sml29623
513859Sml29623 extern uint32_t nxge_mblks_pending;
523859Sml29623
533859Sml29623 /*
543859Sml29623 * Tunable to reduce the amount of time spent in the
553859Sml29623 * ISR doing Rx Processing.
563859Sml29623 */
573859Sml29623 extern uint32_t nxge_max_rx_pkts;
583859Sml29623
593859Sml29623 /*
603859Sml29623 * Tunables to manage the receive buffer blocks.
613859Sml29623 *
623859Sml29623 * nxge_rx_threshold_hi: copy all buffers.
633859Sml29623 * nxge_rx_bcopy_size_type: receive buffer block size type.
643859Sml29623 * nxge_rx_threshold_lo: copy only up to tunable block size type.
653859Sml29623 */
663859Sml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi;
673859Sml29623 extern nxge_rxbuf_type_t nxge_rx_buf_size_type;
683859Sml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo;
693859Sml29623
706611Sml29623 extern uint32_t nxge_cksum_offload;
716495Sspeer
726495Sspeer static nxge_status_t nxge_map_rxdma(p_nxge_t, int);
736495Sspeer static void nxge_unmap_rxdma(p_nxge_t, int);
743859Sml29623
753859Sml29623 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t);
766495Sspeer
776495Sspeer static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int);
786495Sspeer static void nxge_rxdma_hw_stop(p_nxge_t, int);
793859Sml29623
803859Sml29623 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t,
813859Sml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
823859Sml29623 uint32_t,
833859Sml29623 p_nxge_dma_common_t *, p_rx_rcr_ring_t *,
843859Sml29623 p_rx_mbox_t *);
853859Sml29623 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t,
863859Sml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
873859Sml29623
883859Sml29623 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t,
893859Sml29623 uint16_t,
903859Sml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *,
913859Sml29623 p_rx_rcr_ring_t *, p_rx_mbox_t *);
923859Sml29623 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t,
933859Sml29623 p_rx_rcr_ring_t, p_rx_mbox_t);
943859Sml29623
953859Sml29623 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t,
963859Sml29623 uint16_t,
973859Sml29623 p_nxge_dma_common_t *,
983859Sml29623 p_rx_rbr_ring_t *, uint32_t);
993859Sml29623 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t,
1003859Sml29623 p_rx_rbr_ring_t);
1013859Sml29623
1023859Sml29623 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t,
1033859Sml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t);
1043859Sml29623 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t);
1053859Sml29623
1066495Sspeer static mblk_t *
1076495Sspeer nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int);
1083859Sml29623
1093859Sml29623 static void nxge_receive_packet(p_nxge_t,
1103859Sml29623 p_rx_rcr_ring_t,
1113859Sml29623 p_rcr_entry_t,
1123859Sml29623 boolean_t *,
1133859Sml29623 mblk_t **, mblk_t **);
1143859Sml29623
1153859Sml29623 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t);
1163859Sml29623
1173859Sml29623 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t);
1183859Sml29623 static void nxge_freeb(p_rx_msg_t);
1196495Sspeer static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t);
1203859Sml29623
1213859Sml29623 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t,
1223859Sml29623 uint32_t, uint32_t);
1233859Sml29623
1243859Sml29623 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t,
1253859Sml29623 p_rx_rbr_ring_t);
1263859Sml29623
1273859Sml29623
1283859Sml29623 static nxge_status_t
1293859Sml29623 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t);
1303859Sml29623
1313859Sml29623 nxge_status_t
1323859Sml29623 nxge_rx_port_fatal_err_recover(p_nxge_t);
1333859Sml29623
1346495Sspeer static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t);
1356495Sspeer
1363859Sml29623 nxge_status_t
nxge_init_rxdma_channels(p_nxge_t nxgep)1373859Sml29623 nxge_init_rxdma_channels(p_nxge_t nxgep)
1383859Sml29623 {
1397950SMichael.Speer@Sun.COM nxge_grp_set_t *set = &nxgep->rx_set;
1408275SEric Cheng int i, count, channel;
1417950SMichael.Speer@Sun.COM nxge_grp_t *group;
1428275SEric Cheng dc_map_t map;
1438275SEric Cheng int dev_gindex;
1443859Sml29623
1453859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels"));
1463859Sml29623
1476495Sspeer if (!isLDOMguest(nxgep)) {
1486495Sspeer if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) {
1496495Sspeer cmn_err(CE_NOTE, "hw_start_common");
1506495Sspeer return (NXGE_ERROR);
1516495Sspeer }
1526495Sspeer }
1536495Sspeer
1546495Sspeer /*
1556495Sspeer * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8)
1566495Sspeer * We only have 8 hardware RDC tables, but we may have
1576495Sspeer * up to 16 logical (software-defined) groups of RDCS,
1586495Sspeer * if we make use of layer 3 & 4 hardware classification.
1596495Sspeer */
1606495Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
1616495Sspeer if ((1 << i) & set->lg.map) {
1627950SMichael.Speer@Sun.COM group = set->group[i];
1638275SEric Cheng dev_gindex =
1648275SEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
1658275SEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map;
1666495Sspeer for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
1678275SEric Cheng if ((1 << channel) & map) {
1686495Sspeer if ((nxge_grp_dc_add(nxgep,
1697755SMisaki.Kataoka@Sun.COM group, VP_BOUND_RX, channel)))
1707950SMichael.Speer@Sun.COM goto init_rxdma_channels_exit;
1716495Sspeer }
1726495Sspeer }
1736495Sspeer }
1746495Sspeer if (++count == set->lg.count)
1756495Sspeer break;
1766495Sspeer }
1776495Sspeer
1786495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
1796495Sspeer return (NXGE_OK);
1807950SMichael.Speer@Sun.COM
1817950SMichael.Speer@Sun.COM init_rxdma_channels_exit:
1827950SMichael.Speer@Sun.COM for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) {
1837950SMichael.Speer@Sun.COM if ((1 << i) & set->lg.map) {
1847950SMichael.Speer@Sun.COM group = set->group[i];
1858275SEric Cheng dev_gindex =
1868275SEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i;
1878275SEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map;
1888275SEric Cheng for (channel = 0; channel < NXGE_MAX_RDCS; channel++) {
1898275SEric Cheng if ((1 << channel) & map) {
1907950SMichael.Speer@Sun.COM nxge_grp_dc_remove(nxgep,
1918275SEric Cheng VP_BOUND_RX, channel);
1927950SMichael.Speer@Sun.COM }
1937950SMichael.Speer@Sun.COM }
1947950SMichael.Speer@Sun.COM }
1957950SMichael.Speer@Sun.COM if (++count == set->lg.count)
1967950SMichael.Speer@Sun.COM break;
1977950SMichael.Speer@Sun.COM }
1987950SMichael.Speer@Sun.COM
1997950SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels"));
2007950SMichael.Speer@Sun.COM return (NXGE_ERROR);
2016495Sspeer }
2026495Sspeer
2036495Sspeer nxge_status_t
nxge_init_rxdma_channel(p_nxge_t nxge,int channel)2046495Sspeer nxge_init_rxdma_channel(p_nxge_t nxge, int channel)
2056495Sspeer {
2068400SNicolas.Droux@Sun.COM nxge_status_t status;
2076495Sspeer
2086495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel"));
2096495Sspeer
2106495Sspeer status = nxge_map_rxdma(nxge, channel);
2113859Sml29623 if (status != NXGE_OK) {
2126495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL,
2136495Sspeer "<== nxge_init_rxdma: status 0x%x", status));
2143859Sml29623 return (status);
2153859Sml29623 }
2163859Sml29623
2178400SNicolas.Droux@Sun.COM #if defined(sun4v)
2188400SNicolas.Droux@Sun.COM if (isLDOMguest(nxge)) {
2198400SNicolas.Droux@Sun.COM /* set rcr_ring */
2208400SNicolas.Droux@Sun.COM p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel];
2218400SNicolas.Droux@Sun.COM
2228400SNicolas.Droux@Sun.COM status = nxge_hio_rxdma_bind_intr(nxge, ring, channel);
2238400SNicolas.Droux@Sun.COM if (status != NXGE_OK) {
2248400SNicolas.Droux@Sun.COM nxge_unmap_rxdma(nxge, channel);
2258400SNicolas.Droux@Sun.COM return (status);
2268400SNicolas.Droux@Sun.COM }
2278400SNicolas.Droux@Sun.COM }
2288400SNicolas.Droux@Sun.COM #endif
2298400SNicolas.Droux@Sun.COM
2306495Sspeer status = nxge_rxdma_hw_start(nxge, channel);
2313859Sml29623 if (status != NXGE_OK) {
2326495Sspeer nxge_unmap_rxdma(nxge, channel);
2333859Sml29623 }
2343859Sml29623
2356495Sspeer if (!nxge->statsp->rdc_ksp[channel])
2366495Sspeer nxge_setup_rdc_kstats(nxge, channel);
2376495Sspeer
2386495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL,
2396495Sspeer "<== nxge_init_rxdma_channel: status 0x%x", status));
2403859Sml29623
2413859Sml29623 return (status);
2423859Sml29623 }
2433859Sml29623
2443859Sml29623 void
nxge_uninit_rxdma_channels(p_nxge_t nxgep)2453859Sml29623 nxge_uninit_rxdma_channels(p_nxge_t nxgep)
2463859Sml29623 {
2476495Sspeer nxge_grp_set_t *set = &nxgep->rx_set;
2486495Sspeer int rdc;
2496495Sspeer
2503859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels"));
2513859Sml29623
2526495Sspeer if (set->owned.map == 0) {
2536495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
2546495Sspeer "nxge_uninit_rxdma_channels: no channels"));
2556495Sspeer return;
2566495Sspeer }
2576495Sspeer
2586495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
2596495Sspeer if ((1 << rdc) & set->owned.map) {
2606495Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc);
2616495Sspeer }
2626495Sspeer }
2636495Sspeer
2646495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels"));
2656495Sspeer }
2666495Sspeer
2676495Sspeer void
nxge_uninit_rxdma_channel(p_nxge_t nxgep,int channel)2686495Sspeer nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel)
2696495Sspeer {
2706495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel"));
2716495Sspeer
2726495Sspeer if (nxgep->statsp->rdc_ksp[channel]) {
2736495Sspeer kstat_delete(nxgep->statsp->rdc_ksp[channel]);
2746495Sspeer nxgep->statsp->rdc_ksp[channel] = 0;
2756495Sspeer }
2766495Sspeer
2776495Sspeer nxge_rxdma_hw_stop(nxgep, channel);
2786495Sspeer nxge_unmap_rxdma(nxgep, channel);
2796495Sspeer
2806495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel"));
2813859Sml29623 }
2823859Sml29623
2833859Sml29623 nxge_status_t
nxge_reset_rxdma_channel(p_nxge_t nxgep,uint16_t channel)2843859Sml29623 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
2853859Sml29623 {
2863859Sml29623 npi_handle_t handle;
2873859Sml29623 npi_status_t rs = NPI_SUCCESS;
2883859Sml29623 nxge_status_t status = NXGE_OK;
2893859Sml29623
2907812SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel"));
2913859Sml29623
2923859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
2933859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
2943859Sml29623
2953859Sml29623 if (rs != NPI_SUCCESS) {
2963859Sml29623 status = NXGE_ERROR | rs;
2973859Sml29623 }
2983859Sml29623
2997812SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel"));
3007812SMichael.Speer@Sun.COM
3013859Sml29623 return (status);
3023859Sml29623 }
3033859Sml29623
3043859Sml29623 void
nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)3053859Sml29623 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep)
3063859Sml29623 {
3076495Sspeer nxge_grp_set_t *set = &nxgep->rx_set;
3086495Sspeer int rdc;
3093859Sml29623
3103859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels"));
3113859Sml29623
3126495Sspeer if (!isLDOMguest(nxgep)) {
3136495Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
3146495Sspeer (void) npi_rxdma_dump_fzc_regs(handle);
3156495Sspeer }
3166495Sspeer
3176495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
3186495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
3196495Sspeer "nxge_rxdma_regs_dump_channels: "
3206495Sspeer "NULL ring pointer(s)"));
3213859Sml29623 return;
3223859Sml29623 }
3236495Sspeer
3246495Sspeer if (set->owned.map == 0) {
3253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
3266495Sspeer "nxge_rxdma_regs_dump_channels: no channels"));
3273859Sml29623 return;
3283859Sml29623 }
3293859Sml29623
3306495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
3316495Sspeer if ((1 << rdc) & set->owned.map) {
3326495Sspeer rx_rbr_ring_t *ring =
3336495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc];
3346495Sspeer if (ring) {
3356495Sspeer (void) nxge_dump_rxdma_channel(nxgep, rdc);
3366495Sspeer }
3373859Sml29623 }
3383859Sml29623 }
3393859Sml29623
3403859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump"));
3413859Sml29623 }
3423859Sml29623
3433859Sml29623 nxge_status_t
nxge_dump_rxdma_channel(p_nxge_t nxgep,uint8_t channel)3443859Sml29623 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel)
3453859Sml29623 {
3463859Sml29623 npi_handle_t handle;
3473859Sml29623 npi_status_t rs = NPI_SUCCESS;
3483859Sml29623 nxge_status_t status = NXGE_OK;
3493859Sml29623
3503859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel"));
3513859Sml29623
3523859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3533859Sml29623 rs = npi_rxdma_dump_rdc_regs(handle, channel);
3543859Sml29623
3553859Sml29623 if (rs != NPI_SUCCESS) {
3563859Sml29623 status = NXGE_ERROR | rs;
3573859Sml29623 }
3583859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel"));
3593859Sml29623 return (status);
3603859Sml29623 }
3613859Sml29623
3623859Sml29623 nxge_status_t
nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep,uint16_t channel,p_rx_dma_ent_msk_t mask_p)3633859Sml29623 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel,
3643859Sml29623 p_rx_dma_ent_msk_t mask_p)
3653859Sml29623 {
3663859Sml29623 npi_handle_t handle;
3673859Sml29623 npi_status_t rs = NPI_SUCCESS;
3683859Sml29623 nxge_status_t status = NXGE_OK;
3693859Sml29623
3703859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3716929Smisaki "<== nxge_init_rxdma_channel_event_mask"));
3723859Sml29623
3733859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3743859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p);
3753859Sml29623 if (rs != NPI_SUCCESS) {
3763859Sml29623 status = NXGE_ERROR | rs;
3773859Sml29623 }
3783859Sml29623
3793859Sml29623 return (status);
3803859Sml29623 }
3813859Sml29623
3823859Sml29623 nxge_status_t
nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep,uint16_t channel,p_rx_dma_ctl_stat_t cs_p)3833859Sml29623 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel,
3843859Sml29623 p_rx_dma_ctl_stat_t cs_p)
3853859Sml29623 {
3863859Sml29623 npi_handle_t handle;
3873859Sml29623 npi_status_t rs = NPI_SUCCESS;
3883859Sml29623 nxge_status_t status = NXGE_OK;
3893859Sml29623
3903859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
3916929Smisaki "<== nxge_init_rxdma_channel_cntl_stat"));
3923859Sml29623
3933859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
3943859Sml29623 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p);
3953859Sml29623
3963859Sml29623 if (rs != NPI_SUCCESS) {
3973859Sml29623 status = NXGE_ERROR | rs;
3983859Sml29623 }
3993859Sml29623
4003859Sml29623 return (status);
4013859Sml29623 }
4023859Sml29623
4036495Sspeer /*
4046495Sspeer * nxge_rxdma_cfg_rdcgrp_default_rdc
4056495Sspeer *
4066495Sspeer * Set the default RDC for an RDC Group (Table)
4076495Sspeer *
4086495Sspeer * Arguments:
4096495Sspeer * nxgep
4106495Sspeer * rdcgrp The group to modify
4116495Sspeer * rdc The new default RDC.
4126495Sspeer *
4136495Sspeer * Notes:
4146495Sspeer *
4156495Sspeer * NPI/NXGE function calls:
4166495Sspeer * npi_rxdma_cfg_rdc_table_default_rdc()
4176495Sspeer *
4186495Sspeer * Registers accessed:
4196495Sspeer * RDC_TBL_REG: FZC_ZCP + 0x10000
4206495Sspeer *
4216495Sspeer * Context:
4226495Sspeer * Service domain
4236495Sspeer */
4243859Sml29623 nxge_status_t
nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep,uint8_t rdcgrp,uint8_t rdc)4256495Sspeer nxge_rxdma_cfg_rdcgrp_default_rdc(
4266495Sspeer p_nxge_t nxgep,
4276495Sspeer uint8_t rdcgrp,
4286495Sspeer uint8_t rdc)
4293859Sml29623 {
4303859Sml29623 npi_handle_t handle;
4313859Sml29623 npi_status_t rs = NPI_SUCCESS;
4323859Sml29623 p_nxge_dma_pt_cfg_t p_dma_cfgp;
4333859Sml29623 p_nxge_rdc_grp_t rdc_grp_p;
4343859Sml29623 uint8_t actual_rdcgrp, actual_rdc;
4353859Sml29623
4363859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
4376929Smisaki " ==> nxge_rxdma_cfg_rdcgrp_default_rdc"));
4383859Sml29623 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
4393859Sml29623
4403859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4413859Sml29623
4426495Sspeer /*
4436495Sspeer * This has to be rewritten. Do we even allow this anymore?
4446495Sspeer */
4453859Sml29623 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp];
4466495Sspeer RDC_MAP_IN(rdc_grp_p->map, rdc);
4476495Sspeer rdc_grp_p->def_rdc = rdc;
4483859Sml29623
4493859Sml29623 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp);
4503859Sml29623 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc);
4513859Sml29623
4526495Sspeer rs = npi_rxdma_cfg_rdc_table_default_rdc(
4536929Smisaki handle, actual_rdcgrp, actual_rdc);
4543859Sml29623
4553859Sml29623 if (rs != NPI_SUCCESS) {
4563859Sml29623 return (NXGE_ERROR | rs);
4573859Sml29623 }
4583859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
4596929Smisaki " <== nxge_rxdma_cfg_rdcgrp_default_rdc"));
4603859Sml29623 return (NXGE_OK);
4613859Sml29623 }
4623859Sml29623
4633859Sml29623 nxge_status_t
nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep,uint8_t port,uint8_t rdc)4643859Sml29623 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc)
4653859Sml29623 {
4663859Sml29623 npi_handle_t handle;
4673859Sml29623
4683859Sml29623 uint8_t actual_rdc;
4693859Sml29623 npi_status_t rs = NPI_SUCCESS;
4703859Sml29623
4713859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
4726929Smisaki " ==> nxge_rxdma_cfg_port_default_rdc"));
4733859Sml29623
4743859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4756495Sspeer actual_rdc = rdc; /* XXX Hack! */
4763859Sml29623 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc);
4773859Sml29623
4783859Sml29623
4793859Sml29623 if (rs != NPI_SUCCESS) {
4803859Sml29623 return (NXGE_ERROR | rs);
4813859Sml29623 }
4823859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
4836929Smisaki " <== nxge_rxdma_cfg_port_default_rdc"));
4843859Sml29623
4853859Sml29623 return (NXGE_OK);
4863859Sml29623 }
4873859Sml29623
4883859Sml29623 nxge_status_t
nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep,uint8_t channel,uint16_t pkts)4893859Sml29623 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel,
4903859Sml29623 uint16_t pkts)
4913859Sml29623 {
4923859Sml29623 npi_status_t rs = NPI_SUCCESS;
4933859Sml29623 npi_handle_t handle;
4943859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
4956929Smisaki " ==> nxge_rxdma_cfg_rcr_threshold"));
4963859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
4973859Sml29623
4983859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts);
4993859Sml29623
5003859Sml29623 if (rs != NPI_SUCCESS) {
5013859Sml29623 return (NXGE_ERROR | rs);
5023859Sml29623 }
5033859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold"));
5043859Sml29623 return (NXGE_OK);
5053859Sml29623 }
5063859Sml29623
5073859Sml29623 nxge_status_t
nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep,uint8_t channel,uint16_t tout,uint8_t enable)5083859Sml29623 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel,
5093859Sml29623 uint16_t tout, uint8_t enable)
5103859Sml29623 {
5113859Sml29623 npi_status_t rs = NPI_SUCCESS;
5123859Sml29623 npi_handle_t handle;
5133859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout"));
5143859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
5153859Sml29623 if (enable == 0) {
5163859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel);
5173859Sml29623 } else {
5183859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
5196929Smisaki tout);
5203859Sml29623 }
5213859Sml29623
5223859Sml29623 if (rs != NPI_SUCCESS) {
5233859Sml29623 return (NXGE_ERROR | rs);
5243859Sml29623 }
5253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout"));
5263859Sml29623 return (NXGE_OK);
5273859Sml29623 }
5283859Sml29623
5293859Sml29623 nxge_status_t
nxge_enable_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t mbox_p)5303859Sml29623 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
5313859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
5323859Sml29623 {
5333859Sml29623 npi_handle_t handle;
5343859Sml29623 rdc_desc_cfg_t rdc_desc;
5353859Sml29623 p_rcrcfig_b_t cfgb_p;
5363859Sml29623 npi_status_t rs = NPI_SUCCESS;
5373859Sml29623
5383859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel"));
5393859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
5403859Sml29623 /*
5413859Sml29623 * Use configuration data composed at init time.
5423859Sml29623 * Write to hardware the receive ring configurations.
5433859Sml29623 */
5443859Sml29623 rdc_desc.mbox_enable = 1;
5453859Sml29623 rdc_desc.mbox_addr = mbox_p->mbox_addr;
5463859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
5476929Smisaki "==> nxge_enable_rxdma_channel: mboxp $%p($%p)",
5486929Smisaki mbox_p->mbox_addr, rdc_desc.mbox_addr));
5493859Sml29623
5503859Sml29623 rdc_desc.rbr_len = rbr_p->rbb_max;
5513859Sml29623 rdc_desc.rbr_addr = rbr_p->rbr_addr;
5523859Sml29623
5533859Sml29623 switch (nxgep->rx_bksize_code) {
5543859Sml29623 case RBR_BKSIZE_4K:
5553859Sml29623 rdc_desc.page_size = SIZE_4KB;
5563859Sml29623 break;
5573859Sml29623 case RBR_BKSIZE_8K:
5583859Sml29623 rdc_desc.page_size = SIZE_8KB;
5593859Sml29623 break;
5603859Sml29623 case RBR_BKSIZE_16K:
5613859Sml29623 rdc_desc.page_size = SIZE_16KB;
5623859Sml29623 break;
5633859Sml29623 case RBR_BKSIZE_32K:
5643859Sml29623 rdc_desc.page_size = SIZE_32KB;
5653859Sml29623 break;
5663859Sml29623 }
5673859Sml29623
5683859Sml29623 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0;
5693859Sml29623 rdc_desc.valid0 = 1;
5703859Sml29623
5713859Sml29623 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1;
5723859Sml29623 rdc_desc.valid1 = 1;
5733859Sml29623
5743859Sml29623 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2;
5753859Sml29623 rdc_desc.valid2 = 1;
5763859Sml29623
5773859Sml29623 rdc_desc.full_hdr = rcr_p->full_hdr_flag;
5783859Sml29623 rdc_desc.offset = rcr_p->sw_priv_hdr_len;
5793859Sml29623
5803859Sml29623 rdc_desc.rcr_len = rcr_p->comp_size;
5813859Sml29623 rdc_desc.rcr_addr = rcr_p->rcr_addr;
5823859Sml29623
5833859Sml29623 cfgb_p = &(rcr_p->rcr_cfgb);
5843859Sml29623 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres;
5856495Sspeer /* For now, disable this timeout in a guest domain. */
5866495Sspeer if (isLDOMguest(nxgep)) {
5876495Sspeer rdc_desc.rcr_timeout = 0;
5886495Sspeer rdc_desc.rcr_timeout_enable = 0;
5896495Sspeer } else {
5906495Sspeer rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout;
5916495Sspeer rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout;
5926495Sspeer }
5933859Sml29623
5943859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
5956929Smisaki "rbr_len qlen %d pagesize code %d rcr_len %d",
5966929Smisaki rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
5973859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: "
5986929Smisaki "size 0 %d size 1 %d size 2 %d",
5996929Smisaki rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1,
6006929Smisaki rbr_p->npi_pkt_buf_size2));
6013859Sml29623
60211304SJanie.Lu@Sun.COM if (nxgep->niu_hw_type == NIU_HW_TYPE_RF)
60311304SJanie.Lu@Sun.COM rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
60411304SJanie.Lu@Sun.COM &rdc_desc, B_TRUE);
60511304SJanie.Lu@Sun.COM else
60611304SJanie.Lu@Sun.COM rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc,
60711304SJanie.Lu@Sun.COM &rdc_desc, B_FALSE);
6083859Sml29623 if (rs != NPI_SUCCESS) {
6093859Sml29623 return (NXGE_ERROR | rs);
6103859Sml29623 }
6113859Sml29623
6123859Sml29623 /*
6133859Sml29623 * Enable the timeout and threshold.
6143859Sml29623 */
6153859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
6166929Smisaki rdc_desc.rcr_threshold);
6173859Sml29623 if (rs != NPI_SUCCESS) {
6183859Sml29623 return (NXGE_ERROR | rs);
6193859Sml29623 }
6203859Sml29623
6213859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
6226929Smisaki rdc_desc.rcr_timeout);
6233859Sml29623 if (rs != NPI_SUCCESS) {
6243859Sml29623 return (NXGE_ERROR | rs);
6253859Sml29623 }
6263859Sml29623
6279232SMichael.Speer@Sun.COM if (!isLDOMguest(nxgep)) {
6289232SMichael.Speer@Sun.COM /* Enable the DMA */
6299232SMichael.Speer@Sun.COM rs = npi_rxdma_cfg_rdc_enable(handle, channel);
6309232SMichael.Speer@Sun.COM if (rs != NPI_SUCCESS) {
6319232SMichael.Speer@Sun.COM return (NXGE_ERROR | rs);
6329232SMichael.Speer@Sun.COM }
6333859Sml29623 }
6343859Sml29623
6353859Sml29623 /* Kick the DMA engine. */
6363859Sml29623 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max);
6379232SMichael.Speer@Sun.COM
6389232SMichael.Speer@Sun.COM if (!isLDOMguest(nxgep)) {
6399232SMichael.Speer@Sun.COM /* Clear the rbr empty bit */
6409232SMichael.Speer@Sun.COM (void) npi_rxdma_channel_rbr_empty_clear(handle, channel);
6419232SMichael.Speer@Sun.COM }
6423859Sml29623
6433859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel"));
6443859Sml29623
6453859Sml29623 return (NXGE_OK);
6463859Sml29623 }
6473859Sml29623
6483859Sml29623 nxge_status_t
nxge_disable_rxdma_channel(p_nxge_t nxgep,uint16_t channel)6493859Sml29623 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel)
6503859Sml29623 {
6513859Sml29623 npi_handle_t handle;
6523859Sml29623 npi_status_t rs = NPI_SUCCESS;
6533859Sml29623
6543859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel"));
6553859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
6563859Sml29623
6573859Sml29623 /* disable the DMA */
6583859Sml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel);
6593859Sml29623 if (rs != NPI_SUCCESS) {
6603859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
6616929Smisaki "<== nxge_disable_rxdma_channel:failed (0x%x)",
6626929Smisaki rs));
6633859Sml29623 return (NXGE_ERROR | rs);
6643859Sml29623 }
6653859Sml29623
6663859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel"));
6673859Sml29623 return (NXGE_OK);
6683859Sml29623 }
6693859Sml29623
6703859Sml29623 nxge_status_t
nxge_rxdma_channel_rcrflush(p_nxge_t nxgep,uint8_t channel)6713859Sml29623 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel)
6723859Sml29623 {
6733859Sml29623 npi_handle_t handle;
6743859Sml29623 nxge_status_t status = NXGE_OK;
6753859Sml29623
6763859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
6776929Smisaki "<== nxge_init_rxdma_channel_rcrflush"));
6783859Sml29623
6793859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
6803859Sml29623 npi_rxdma_rdc_rcr_flush(handle, channel);
6813859Sml29623
6823859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
6836929Smisaki "<== nxge_init_rxdma_channel_rcrflsh"));
6843859Sml29623 return (status);
6853859Sml29623
6863859Sml29623 }
6873859Sml29623
6883859Sml29623 #define MID_INDEX(l, r) ((r + l + 1) >> 1)
6893859Sml29623
6903859Sml29623 #define TO_LEFT -1
6913859Sml29623 #define TO_RIGHT 1
6923859Sml29623 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
6933859Sml29623 #define BOTH_LEFT (TO_LEFT + TO_LEFT)
6943859Sml29623 #define IN_MIDDLE (TO_RIGHT + TO_LEFT)
6953859Sml29623 #define NO_HINT 0xffffffff
6963859Sml29623
6973859Sml29623 /*ARGSUSED*/
6983859Sml29623 nxge_status_t
nxge_rxbuf_pp_to_vp(p_nxge_t nxgep,p_rx_rbr_ring_t rbr_p,uint8_t pktbufsz_type,uint64_t * pkt_buf_addr_pp,uint64_t ** pkt_buf_addr_p,uint32_t * bufoffset,uint32_t * msg_index)6993859Sml29623 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p,
7003859Sml29623 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
7013859Sml29623 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
7023859Sml29623 {
7033859Sml29623 int bufsize;
7043859Sml29623 uint64_t pktbuf_pp;
7053859Sml29623 uint64_t dvma_addr;
7063859Sml29623 rxring_info_t *ring_info;
7073859Sml29623 int base_side, end_side;
7083859Sml29623 int r_index, l_index, anchor_index;
7093859Sml29623 int found, search_done;
7103859Sml29623 uint32_t offset, chunk_size, block_size, page_size_mask;
7113859Sml29623 uint32_t chunk_index, block_index, total_index;
7123859Sml29623 int max_iterations, iteration;
7133859Sml29623 rxbuf_index_info_t *bufinfo;
7143859Sml29623
7153859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp"));
7163859Sml29623
7173859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
7186929Smisaki "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
7196929Smisaki pkt_buf_addr_pp,
7206929Smisaki pktbufsz_type));
7215125Sjoycey #if defined(__i386)
7225125Sjoycey pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp;
7235125Sjoycey #else
7243859Sml29623 pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
7255125Sjoycey #endif
7263859Sml29623
7273859Sml29623 switch (pktbufsz_type) {
7283859Sml29623 case 0:
7293859Sml29623 bufsize = rbr_p->pkt_buf_size0;
7303859Sml29623 break;
7313859Sml29623 case 1:
7323859Sml29623 bufsize = rbr_p->pkt_buf_size1;
7333859Sml29623 break;
7343859Sml29623 case 2:
7353859Sml29623 bufsize = rbr_p->pkt_buf_size2;
7363859Sml29623 break;
7373859Sml29623 case RCR_SINGLE_BLOCK:
7383859Sml29623 bufsize = 0;
7393859Sml29623 anchor_index = 0;
7403859Sml29623 break;
7413859Sml29623 default:
7423859Sml29623 return (NXGE_ERROR);
7433859Sml29623 }
7443859Sml29623
7453859Sml29623 if (rbr_p->num_blocks == 1) {
7463859Sml29623 anchor_index = 0;
7473859Sml29623 ring_info = rbr_p->ring_info;
7483859Sml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
7493859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
7506929Smisaki "==> nxge_rxbuf_pp_to_vp: (found, 1 block) "
7516929Smisaki "buf_pp $%p btype %d anchor_index %d "
7526929Smisaki "bufinfo $%p",
7536929Smisaki pkt_buf_addr_pp,
7546929Smisaki pktbufsz_type,
7556929Smisaki anchor_index,
7566929Smisaki bufinfo));
7573859Sml29623
7583859Sml29623 goto found_index;
7593859Sml29623 }
7603859Sml29623
7613859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
7626929Smisaki "==> nxge_rxbuf_pp_to_vp: "
7636929Smisaki "buf_pp $%p btype %d anchor_index %d",
7646929Smisaki pkt_buf_addr_pp,
7656929Smisaki pktbufsz_type,
7666929Smisaki anchor_index));
7673859Sml29623
7683859Sml29623 ring_info = rbr_p->ring_info;
7693859Sml29623 found = B_FALSE;
7703859Sml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
7713859Sml29623 iteration = 0;
7723859Sml29623 max_iterations = ring_info->max_iterations;
7733859Sml29623 /*
7743859Sml29623 * First check if this block has been seen
7753859Sml29623 * recently. This is indicated by a hint which
7763859Sml29623 * is initialized when the first buffer of the block
7773859Sml29623 * is seen. The hint is reset when the last buffer of
7783859Sml29623 * the block has been processed.
7793859Sml29623 * As three block sizes are supported, three hints
7803859Sml29623 * are kept. The idea behind the hints is that once
7813859Sml29623 * the hardware uses a block for a buffer of that
7823859Sml29623 * size, it will use it exclusively for that size
7833859Sml29623 * and will use it until it is exhausted. It is assumed
7843859Sml29623 * that there would a single block being used for the same
7853859Sml29623 * buffer sizes at any given time.
7863859Sml29623 */
7873859Sml29623 if (ring_info->hint[pktbufsz_type] != NO_HINT) {
7883859Sml29623 anchor_index = ring_info->hint[pktbufsz_type];
7893859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr;
7903859Sml29623 chunk_size = bufinfo[anchor_index].buf_size;
7913859Sml29623 if ((pktbuf_pp >= dvma_addr) &&
7926929Smisaki (pktbuf_pp < (dvma_addr + chunk_size))) {
7933859Sml29623 found = B_TRUE;
7943859Sml29623 /*
7953859Sml29623 * check if this is the last buffer in the block
7963859Sml29623 * If so, then reset the hint for the size;
7973859Sml29623 */
7983859Sml29623
7993859Sml29623 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
8003859Sml29623 ring_info->hint[pktbufsz_type] = NO_HINT;
8013859Sml29623 }
8023859Sml29623 }
8033859Sml29623
8043859Sml29623 if (found == B_FALSE) {
8053859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
8066929Smisaki "==> nxge_rxbuf_pp_to_vp: (!found)"
8076929Smisaki "buf_pp $%p btype %d anchor_index %d",
8086929Smisaki pkt_buf_addr_pp,
8096929Smisaki pktbufsz_type,
8106929Smisaki anchor_index));
8113859Sml29623
8123859Sml29623 /*
8133859Sml29623 * This is the first buffer of the block of this
8143859Sml29623 * size. Need to search the whole information
8153859Sml29623 * array.
8163859Sml29623 * the search algorithm uses a binary tree search
8173859Sml29623 * algorithm. It assumes that the information is
8183859Sml29623 * already sorted with increasing order
8193859Sml29623 * info[0] < info[1] < info[2] .... < info[n-1]
8203859Sml29623 * where n is the size of the information array
8213859Sml29623 */
8223859Sml29623 r_index = rbr_p->num_blocks - 1;
8233859Sml29623 l_index = 0;
8243859Sml29623 search_done = B_FALSE;
8253859Sml29623 anchor_index = MID_INDEX(r_index, l_index);
8263859Sml29623 while (search_done == B_FALSE) {
8273859Sml29623 if ((r_index == l_index) ||
8286929Smisaki (iteration >= max_iterations))
8293859Sml29623 search_done = B_TRUE;
8303859Sml29623 end_side = TO_RIGHT; /* to the right */
8313859Sml29623 base_side = TO_LEFT; /* to the left */
8323859Sml29623 /* read the DVMA address information and sort it */
8333859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr;
8343859Sml29623 chunk_size = bufinfo[anchor_index].buf_size;
8353859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
8366929Smisaki "==> nxge_rxbuf_pp_to_vp: (searching)"
8376929Smisaki "buf_pp $%p btype %d "
8386929Smisaki "anchor_index %d chunk_size %d dvmaaddr $%p",
8396929Smisaki pkt_buf_addr_pp,
8406929Smisaki pktbufsz_type,
8416929Smisaki anchor_index,
8426929Smisaki chunk_size,
8436929Smisaki dvma_addr));
8443859Sml29623
8453859Sml29623 if (pktbuf_pp >= dvma_addr)
8463859Sml29623 base_side = TO_RIGHT; /* to the right */
8473859Sml29623 if (pktbuf_pp < (dvma_addr + chunk_size))
8483859Sml29623 end_side = TO_LEFT; /* to the left */
8493859Sml29623
8503859Sml29623 switch (base_side + end_side) {
8516929Smisaki case IN_MIDDLE:
8526929Smisaki /* found */
8536929Smisaki found = B_TRUE;
8546929Smisaki search_done = B_TRUE;
8556929Smisaki if ((pktbuf_pp + bufsize) <
8566929Smisaki (dvma_addr + chunk_size))
8576929Smisaki ring_info->hint[pktbufsz_type] =
8586929Smisaki bufinfo[anchor_index].buf_index;
8596929Smisaki break;
8606929Smisaki case BOTH_RIGHT:
8616929Smisaki /* not found: go to the right */
8626929Smisaki l_index = anchor_index + 1;
8636929Smisaki anchor_index = MID_INDEX(r_index, l_index);
8646929Smisaki break;
8656929Smisaki
8666929Smisaki case BOTH_LEFT:
8676929Smisaki /* not found: go to the left */
8686929Smisaki r_index = anchor_index - 1;
8696929Smisaki anchor_index = MID_INDEX(r_index, l_index);
8706929Smisaki break;
8716929Smisaki default: /* should not come here */
8726929Smisaki return (NXGE_ERROR);
8733859Sml29623 }
8743859Sml29623 iteration++;
8753859Sml29623 }
8763859Sml29623
8773859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
8786929Smisaki "==> nxge_rxbuf_pp_to_vp: (search done)"
8796929Smisaki "buf_pp $%p btype %d anchor_index %d",
8806929Smisaki pkt_buf_addr_pp,
8816929Smisaki pktbufsz_type,
8826929Smisaki anchor_index));
8833859Sml29623 }
8843859Sml29623
8853859Sml29623 if (found == B_FALSE) {
8863859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
8876929Smisaki "==> nxge_rxbuf_pp_to_vp: (search failed)"
8886929Smisaki "buf_pp $%p btype %d anchor_index %d",
8896929Smisaki pkt_buf_addr_pp,
8906929Smisaki pktbufsz_type,
8916929Smisaki anchor_index));
8923859Sml29623 return (NXGE_ERROR);
8933859Sml29623 }
8943859Sml29623
8953859Sml29623 found_index:
8963859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
8976929Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND1)"
8986929Smisaki "buf_pp $%p btype %d bufsize %d anchor_index %d",
8996929Smisaki pkt_buf_addr_pp,
9006929Smisaki pktbufsz_type,
9016929Smisaki bufsize,
9026929Smisaki anchor_index));
9033859Sml29623
9043859Sml29623 /* index of the first block in this chunk */
9053859Sml29623 chunk_index = bufinfo[anchor_index].start_index;
9063859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr;
9073859Sml29623 page_size_mask = ring_info->block_size_mask;
9083859Sml29623
9093859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
9106929Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
9116929Smisaki "buf_pp $%p btype %d bufsize %d "
9126929Smisaki "anchor_index %d chunk_index %d dvma $%p",
9136929Smisaki pkt_buf_addr_pp,
9146929Smisaki pktbufsz_type,
9156929Smisaki bufsize,
9166929Smisaki anchor_index,
9176929Smisaki chunk_index,
9186929Smisaki dvma_addr));
9193859Sml29623
9203859Sml29623 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */
9213859Sml29623 block_size = rbr_p->block_size; /* System block(page) size */
9223859Sml29623
9233859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
9246929Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
9256929Smisaki "buf_pp $%p btype %d bufsize %d "
9266929Smisaki "anchor_index %d chunk_index %d dvma $%p "
9276929Smisaki "offset %d block_size %d",
9286929Smisaki pkt_buf_addr_pp,
9296929Smisaki pktbufsz_type,
9306929Smisaki bufsize,
9316929Smisaki anchor_index,
9326929Smisaki chunk_index,
9336929Smisaki dvma_addr,
9346929Smisaki offset,
9356929Smisaki block_size));
9363859Sml29623
9373859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index"));
9383859Sml29623
9393859Sml29623 block_index = (offset / block_size); /* index within chunk */
9403859Sml29623 total_index = chunk_index + block_index;
9413859Sml29623
9423859Sml29623
9433859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
9446929Smisaki "==> nxge_rxbuf_pp_to_vp: "
9456929Smisaki "total_index %d dvma_addr $%p "
9466929Smisaki "offset %d block_size %d "
9476929Smisaki "block_index %d ",
9486929Smisaki total_index, dvma_addr,
9496929Smisaki offset, block_size,
9506929Smisaki block_index));
9515125Sjoycey #if defined(__i386)
9525125Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr +
9536929Smisaki (uint32_t)offset);
9545125Sjoycey #else
9555125Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
9566929Smisaki (uint64_t)offset);
9575125Sjoycey #endif
9583859Sml29623
9593859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
9606929Smisaki "==> nxge_rxbuf_pp_to_vp: "
9616929Smisaki "total_index %d dvma_addr $%p "
9626929Smisaki "offset %d block_size %d "
9636929Smisaki "block_index %d "
9646929Smisaki "*pkt_buf_addr_p $%p",
9656929Smisaki total_index, dvma_addr,
9666929Smisaki offset, block_size,
9676929Smisaki block_index,
9686929Smisaki *pkt_buf_addr_p));
9693859Sml29623
9703859Sml29623
9713859Sml29623 *msg_index = total_index;
9723859Sml29623 *bufoffset = (offset & page_size_mask);
9733859Sml29623
9743859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
9756929Smisaki "==> nxge_rxbuf_pp_to_vp: get msg index: "
9766929Smisaki "msg_index %d bufoffset_index %d",
9776929Smisaki *msg_index,
9786929Smisaki *bufoffset));
9793859Sml29623
9803859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp"));
9813859Sml29623
9823859Sml29623 return (NXGE_OK);
9833859Sml29623 }
9843859Sml29623
9853859Sml29623 /*
9863859Sml29623 * used by quick sort (qsort) function
9873859Sml29623 * to perform comparison
9883859Sml29623 */
9893859Sml29623 static int
nxge_sort_compare(const void * p1,const void * p2)9903859Sml29623 nxge_sort_compare(const void *p1, const void *p2)
9913859Sml29623 {
9923859Sml29623
9933859Sml29623 rxbuf_index_info_t *a, *b;
9943859Sml29623
9953859Sml29623 a = (rxbuf_index_info_t *)p1;
9963859Sml29623 b = (rxbuf_index_info_t *)p2;
9973859Sml29623
9983859Sml29623 if (a->dvma_addr > b->dvma_addr)
9993859Sml29623 return (1);
10003859Sml29623 if (a->dvma_addr < b->dvma_addr)
10013859Sml29623 return (-1);
10023859Sml29623 return (0);
10033859Sml29623 }
10043859Sml29623
10053859Sml29623
10063859Sml29623
10073859Sml29623 /*
10083859Sml29623 * grabbed this sort implementation from common/syscall/avl.c
10093859Sml29623 *
10103859Sml29623 */
10113859Sml29623 /*
10123859Sml29623 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
10133859Sml29623 * v = Ptr to array/vector of objs
10143859Sml29623 * n = # objs in the array
10153859Sml29623 * s = size of each obj (must be multiples of a word size)
10163859Sml29623 * f = ptr to function to compare two objs
10173859Sml29623 * returns (-1 = less than, 0 = equal, 1 = greater than
10183859Sml29623 */
10193859Sml29623 void
nxge_ksort(caddr_t v,int n,int s,int (* f)())10203859Sml29623 nxge_ksort(caddr_t v, int n, int s, int (*f)())
10213859Sml29623 {
10223859Sml29623 int g, i, j, ii;
10233859Sml29623 unsigned int *p1, *p2;
10243859Sml29623 unsigned int tmp;
10253859Sml29623
10263859Sml29623 /* No work to do */
10273859Sml29623 if (v == NULL || n <= 1)
10283859Sml29623 return;
10293859Sml29623 /* Sanity check on arguments */
10303859Sml29623 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
10313859Sml29623 ASSERT(s > 0);
10323859Sml29623
10333859Sml29623 for (g = n / 2; g > 0; g /= 2) {
10343859Sml29623 for (i = g; i < n; i++) {
10353859Sml29623 for (j = i - g; j >= 0 &&
10366929Smisaki (*f)(v + j * s, v + (j + g) * s) == 1;
10376929Smisaki j -= g) {
10383859Sml29623 p1 = (unsigned *)(v + j * s);
10393859Sml29623 p2 = (unsigned *)(v + (j + g) * s);
10403859Sml29623 for (ii = 0; ii < s / 4; ii++) {
10413859Sml29623 tmp = *p1;
10423859Sml29623 *p1++ = *p2;
10433859Sml29623 *p2++ = tmp;
10443859Sml29623 }
10453859Sml29623 }
10463859Sml29623 }
10473859Sml29623 }
10483859Sml29623 }
10493859Sml29623
10503859Sml29623 /*
10513859Sml29623 * Initialize data structures required for rxdma
10523859Sml29623 * buffer dvma->vmem address lookup
10533859Sml29623 */
10543859Sml29623 /*ARGSUSED*/
10553859Sml29623 static nxge_status_t
nxge_rxbuf_index_info_init(p_nxge_t nxgep,p_rx_rbr_ring_t rbrp)10563859Sml29623 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp)
10573859Sml29623 {
10583859Sml29623
10593859Sml29623 int index;
10603859Sml29623 rxring_info_t *ring_info;
10613859Sml29623 int max_iteration = 0, max_index = 0;
10623859Sml29623
10633859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init"));
10643859Sml29623
10653859Sml29623 ring_info = rbrp->ring_info;
10663859Sml29623 ring_info->hint[0] = NO_HINT;
10673859Sml29623 ring_info->hint[1] = NO_HINT;
10683859Sml29623 ring_info->hint[2] = NO_HINT;
10693859Sml29623 max_index = rbrp->num_blocks;
10703859Sml29623
10713859Sml29623 /* read the DVMA address information and sort it */
10723859Sml29623 /* do init of the information array */
10733859Sml29623
10743859Sml29623
10753859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
10766929Smisaki " nxge_rxbuf_index_info_init Sort ptrs"));
10773859Sml29623
10783859Sml29623 /* sort the array */
10793859Sml29623 nxge_ksort((void *)ring_info->buffer, max_index,
10806929Smisaki sizeof (rxbuf_index_info_t), nxge_sort_compare);
10813859Sml29623
10823859Sml29623
10833859Sml29623
10843859Sml29623 for (index = 0; index < max_index; index++) {
10853859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
10866929Smisaki " nxge_rxbuf_index_info_init: sorted chunk %d "
10876929Smisaki " ioaddr $%p kaddr $%p size %x",
10886929Smisaki index, ring_info->buffer[index].dvma_addr,
10896929Smisaki ring_info->buffer[index].kaddr,
10906929Smisaki ring_info->buffer[index].buf_size));
10913859Sml29623 }
10923859Sml29623
10933859Sml29623 max_iteration = 0;
10943859Sml29623 while (max_index >= (1ULL << max_iteration))
10953859Sml29623 max_iteration++;
10963859Sml29623 ring_info->max_iterations = max_iteration + 1;
10973859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL,
10986929Smisaki " nxge_rxbuf_index_info_init Find max iter %d",
10996929Smisaki ring_info->max_iterations));
11003859Sml29623
11013859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init"));
11023859Sml29623 return (NXGE_OK);
11033859Sml29623 }
11043859Sml29623
11053859Sml29623 /* ARGSUSED */
11063859Sml29623 void
nxge_dump_rcr_entry(p_nxge_t nxgep,p_rcr_entry_t entry_p)11073859Sml29623 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p)
11083859Sml29623 {
11093859Sml29623 #ifdef NXGE_DEBUG
11103859Sml29623
11113859Sml29623 uint32_t bptr;
11123859Sml29623 uint64_t pp;
11133859Sml29623
11143859Sml29623 bptr = entry_p->bits.hdw.pkt_buf_addr;
11153859Sml29623
11163859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
11176929Smisaki "\trcr entry $%p "
11186929Smisaki "\trcr entry 0x%0llx "
11196929Smisaki "\trcr entry 0x%08x "
11206929Smisaki "\trcr entry 0x%08x "
11216929Smisaki "\tvalue 0x%0llx\n"
11226929Smisaki "\tmulti = %d\n"
11236929Smisaki "\tpkt_type = 0x%x\n"
11246929Smisaki "\tzero_copy = %d\n"
11256929Smisaki "\tnoport = %d\n"
11266929Smisaki "\tpromis = %d\n"
11276929Smisaki "\terror = 0x%04x\n"
11286929Smisaki "\tdcf_err = 0x%01x\n"
11296929Smisaki "\tl2_len = %d\n"
11306929Smisaki "\tpktbufsize = %d\n"
11316929Smisaki "\tpkt_buf_addr = $%p\n"
11326929Smisaki "\tpkt_buf_addr (<< 6) = $%p\n",
11336929Smisaki entry_p,
11346929Smisaki *(int64_t *)entry_p,
11356929Smisaki *(int32_t *)entry_p,
11366929Smisaki *(int32_t *)((char *)entry_p + 32),
11376929Smisaki entry_p->value,
11386929Smisaki entry_p->bits.hdw.multi,
11396929Smisaki entry_p->bits.hdw.pkt_type,
11406929Smisaki entry_p->bits.hdw.zero_copy,
11416929Smisaki entry_p->bits.hdw.noport,
11426929Smisaki entry_p->bits.hdw.promis,
11436929Smisaki entry_p->bits.hdw.error,
11446929Smisaki entry_p->bits.hdw.dcf_err,
11456929Smisaki entry_p->bits.hdw.l2_len,
11466929Smisaki entry_p->bits.hdw.pktbufsz,
11476929Smisaki bptr,
11486929Smisaki entry_p->bits.ldw.pkt_buf_addr));
11493859Sml29623
11503859Sml29623 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
11516929Smisaki RCR_PKT_BUF_ADDR_SHIFT;
11523859Sml29623
11533859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
11546929Smisaki pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
11553859Sml29623 #endif
11563859Sml29623 }
11573859Sml29623
11583859Sml29623 void
nxge_rxdma_regs_dump(p_nxge_t nxgep,int rdc)11593859Sml29623 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc)
11603859Sml29623 {
11613859Sml29623 npi_handle_t handle;
11623859Sml29623 rbr_stat_t rbr_stat;
11633859Sml29623 addr44_t hd_addr;
11643859Sml29623 addr44_t tail_addr;
11653859Sml29623 uint16_t qlen;
11663859Sml29623
11673859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
11686929Smisaki "==> nxge_rxdma_regs_dump: rdc channel %d", rdc));
11693859Sml29623
11703859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
11713859Sml29623
11723859Sml29623 /* RBR head */
11733859Sml29623 hd_addr.addr = 0;
11743859Sml29623 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr);
11755165Syc148097 #if defined(__i386)
11763859Sml29623 printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
11776929Smisaki (void *)(uint32_t)hd_addr.addr);
11785125Sjoycey #else
11795165Syc148097 printf("nxge_rxdma_regs_dump: got hdptr $%p \n",
11806929Smisaki (void *)hd_addr.addr);
11815125Sjoycey #endif
11823859Sml29623
11833859Sml29623 /* RBR stats */
11843859Sml29623 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat);
11853859Sml29623 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen);
11863859Sml29623
11873859Sml29623 /* RCR tail */
11883859Sml29623 tail_addr.addr = 0;
11893859Sml29623 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr);
11905165Syc148097 #if defined(__i386)
11913859Sml29623 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
11926929Smisaki (void *)(uint32_t)tail_addr.addr);
11935125Sjoycey #else
11945165Syc148097 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n",
11956929Smisaki (void *)tail_addr.addr);
11965125Sjoycey #endif
11973859Sml29623
11983859Sml29623 /* RCR qlen */
11993859Sml29623 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen);
12003859Sml29623 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen);
12013859Sml29623
12023859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
12036929Smisaki "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc));
12043859Sml29623 }
12053859Sml29623
12063859Sml29623 nxge_status_t
nxge_rxdma_hw_mode(p_nxge_t nxgep,boolean_t enable)12073859Sml29623 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable)
12083859Sml29623 {
12096495Sspeer nxge_grp_set_t *set = &nxgep->rx_set;
12106495Sspeer nxge_status_t status;
12116495Sspeer npi_status_t rs;
12126495Sspeer int rdc;
12133859Sml29623
12143859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
12156929Smisaki "==> nxge_rxdma_hw_mode: mode %d", enable));
12163859Sml29623
12173859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
12183859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
12196495Sspeer "<== nxge_rxdma_mode: not initialized"));
12203859Sml29623 return (NXGE_ERROR);
12213859Sml29623 }
12226495Sspeer
12236495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
12246495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
12256495Sspeer "<== nxge_tx_port_fatal_err_recover: "
12266495Sspeer "NULL ring pointer(s)"));
12273859Sml29623 return (NXGE_ERROR);
12283859Sml29623 }
12293859Sml29623
12306495Sspeer if (set->owned.map == 0) {
12316495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL,
12326495Sspeer "nxge_rxdma_regs_dump_channels: no channels"));
12336495Sspeer return (NULL);
12346495Sspeer }
12356495Sspeer
12366495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
12376495Sspeer if ((1 << rdc) & set->owned.map) {
12386495Sspeer rx_rbr_ring_t *ring =
12396495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc];
12406495Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep);
12416495Sspeer if (ring) {
12426495Sspeer if (enable) {
12436495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
12446495Sspeer "==> nxge_rxdma_hw_mode: "
12456495Sspeer "channel %d (enable)", rdc));
12466495Sspeer rs = npi_rxdma_cfg_rdc_enable
12476495Sspeer (handle, rdc);
12486495Sspeer } else {
12496495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
12506495Sspeer "==> nxge_rxdma_hw_mode: "
12516495Sspeer "channel %d disable)", rdc));
12526495Sspeer rs = npi_rxdma_cfg_rdc_disable
12536495Sspeer (handle, rdc);
12546495Sspeer }
12556495Sspeer }
12563859Sml29623 }
12573859Sml29623 }
12583859Sml29623
12593859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs);
12603859Sml29623
12613859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
12626929Smisaki "<== nxge_rxdma_hw_mode: status 0x%x", status));
12633859Sml29623
12643859Sml29623 return (status);
12653859Sml29623 }
12663859Sml29623
12673859Sml29623 void
nxge_rxdma_enable_channel(p_nxge_t nxgep,uint16_t channel)12683859Sml29623 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel)
12693859Sml29623 {
12703859Sml29623 npi_handle_t handle;
12713859Sml29623
12723859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
12736929Smisaki "==> nxge_rxdma_enable_channel: channel %d", channel));
12743859Sml29623
12753859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
12763859Sml29623 (void) npi_rxdma_cfg_rdc_enable(handle, channel);
12773859Sml29623
12783859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel"));
12793859Sml29623 }
12803859Sml29623
12813859Sml29623 void
nxge_rxdma_disable_channel(p_nxge_t nxgep,uint16_t channel)12823859Sml29623 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel)
12833859Sml29623 {
12843859Sml29623 npi_handle_t handle;
12853859Sml29623
12863859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL,
12876929Smisaki "==> nxge_rxdma_disable_channel: channel %d", channel));
12883859Sml29623
12893859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
12903859Sml29623 (void) npi_rxdma_cfg_rdc_disable(handle, channel);
12913859Sml29623
12923859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel"));
12933859Sml29623 }
12943859Sml29623
12953859Sml29623 void
nxge_hw_start_rx(p_nxge_t nxgep)12963859Sml29623 nxge_hw_start_rx(p_nxge_t nxgep)
12973859Sml29623 {
12983859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx"));
12993859Sml29623
13003859Sml29623 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START);
13013859Sml29623 (void) nxge_rx_mac_enable(nxgep);
13023859Sml29623
13033859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx"));
13043859Sml29623 }
13053859Sml29623
13063859Sml29623 /*ARGSUSED*/
13073859Sml29623 void
nxge_fixup_rxdma_rings(p_nxge_t nxgep)13083859Sml29623 nxge_fixup_rxdma_rings(p_nxge_t nxgep)
13093859Sml29623 {
13106495Sspeer nxge_grp_set_t *set = &nxgep->rx_set;
13116495Sspeer int rdc;
13123859Sml29623
13133859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings"));
13143859Sml29623
13156495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
13166495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
13176495Sspeer "<== nxge_tx_port_fatal_err_recover: "
13186495Sspeer "NULL ring pointer(s)"));
13193859Sml29623 return;
13203859Sml29623 }
13213859Sml29623
13226495Sspeer if (set->owned.map == 0) {
13233859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
13246495Sspeer "nxge_rxdma_regs_dump_channels: no channels"));
13253859Sml29623 return;
13263859Sml29623 }
13276495Sspeer
13286495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
13296495Sspeer if ((1 << rdc) & set->owned.map) {
13306495Sspeer rx_rbr_ring_t *ring =
13316495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc];
13326495Sspeer if (ring) {
13336495Sspeer nxge_rxdma_hw_stop(nxgep, rdc);
13346495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL,
13356929Smisaki "==> nxge_fixup_rxdma_rings: "
13366929Smisaki "channel %d ring $%px",
13376929Smisaki rdc, ring));
133810218SMichael.Speer@Sun.COM (void) nxge_rxdma_fix_channel(nxgep, rdc);
13396495Sspeer }
13406495Sspeer }
13413859Sml29623 }
13423859Sml29623
13433859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings"));
13443859Sml29623 }
13453859Sml29623
13463859Sml29623 void
nxge_rxdma_fix_channel(p_nxge_t nxgep,uint16_t channel)13473859Sml29623 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel)
13483859Sml29623 {
13493859Sml29623 int ndmas;
13503859Sml29623 p_rx_rbr_rings_t rx_rbr_rings;
13513859Sml29623 p_rx_rbr_ring_t *rbr_rings;
13523859Sml29623 p_rx_rcr_rings_t rx_rcr_rings;
13533859Sml29623 p_rx_rcr_ring_t *rcr_rings;
13543859Sml29623 p_rx_mbox_areas_t rx_mbox_areas_p;
13553859Sml29623 p_rx_mbox_t *rx_mbox_p;
13563859Sml29623 p_nxge_dma_pool_t dma_buf_poolp;
13573859Sml29623 p_nxge_dma_pool_t dma_cntl_poolp;
13583859Sml29623 p_rx_rbr_ring_t rbrp;
13593859Sml29623 p_rx_rcr_ring_t rcrp;
13603859Sml29623 p_rx_mbox_t mboxp;
13613859Sml29623 p_nxge_dma_common_t dmap;
13623859Sml29623 nxge_status_t status = NXGE_OK;
13633859Sml29623
136410218SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel"));
13653859Sml29623
13663859Sml29623 (void) nxge_rxdma_stop_channel(nxgep, channel);
13673859Sml29623
13683859Sml29623 dma_buf_poolp = nxgep->rx_buf_pool_p;
13693859Sml29623 dma_cntl_poolp = nxgep->rx_cntl_pool_p;
13703859Sml29623
13713859Sml29623 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) {
13723859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
137310218SMichael.Speer@Sun.COM "<== nxge_rxdma_fix_channel: buf not allocated"));
13743859Sml29623 return;
13753859Sml29623 }
13763859Sml29623
13773859Sml29623 ndmas = dma_buf_poolp->ndmas;
13783859Sml29623 if (!ndmas) {
13793859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
138010218SMichael.Speer@Sun.COM "<== nxge_rxdma_fix_channel: no dma allocated"));
13813859Sml29623 return;
13823859Sml29623 }
13833859Sml29623
13843859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings;
13853859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings;
13863859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings;
13873859Sml29623 rcr_rings = rx_rcr_rings->rcr_rings;
13883859Sml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
13893859Sml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
13903859Sml29623
13913859Sml29623 /* Reinitialize the receive block and completion rings */
139210218SMichael.Speer@Sun.COM rbrp = (p_rx_rbr_ring_t)rbr_rings[channel],
139310218SMichael.Speer@Sun.COM rcrp = (p_rx_rcr_ring_t)rcr_rings[channel],
139410218SMichael.Speer@Sun.COM mboxp = (p_rx_mbox_t)rx_mbox_p[channel];
13953859Sml29623
13963859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
13973859Sml29623 rbrp->rbr_rd_index = 0;
13983859Sml29623 rcrp->comp_rd_index = 0;
13993859Sml29623 rcrp->comp_wt_index = 0;
14003859Sml29623
14013859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
14023859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength);
14033859Sml29623
14043859Sml29623 status = nxge_rxdma_start_channel(nxgep, channel,
14056929Smisaki rbrp, rcrp, mboxp);
14063859Sml29623 if (status != NXGE_OK) {
140710218SMichael.Speer@Sun.COM goto nxge_rxdma_fix_channel_fail;
14088275SEric Cheng }
14098275SEric Cheng
14108275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL,
141110218SMichael.Speer@Sun.COM "<== nxge_rxdma_fix_channel: success (0x%08x)", status));
141210218SMichael.Speer@Sun.COM return;
141310218SMichael.Speer@Sun.COM
141410218SMichael.Speer@Sun.COM nxge_rxdma_fix_channel_fail:
14158275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL,
141610218SMichael.Speer@Sun.COM "<== nxge_rxdma_fix_channel: failed (0x%08x)", status));
14173859Sml29623 }
14183859Sml29623
14193859Sml29623 p_rx_rbr_ring_t
nxge_rxdma_get_rbr_ring(p_nxge_t nxgep,uint16_t channel)14203859Sml29623 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel)
14213859Sml29623 {
14226495Sspeer nxge_grp_set_t *set = &nxgep->rx_set;
14236495Sspeer nxge_channel_t rdc;
14243859Sml29623
14253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
14266929Smisaki "==> nxge_rxdma_get_rbr_ring: channel %d", channel));
14273859Sml29623
14286495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) {
14296495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
14306495Sspeer "<== nxge_rxdma_get_rbr_ring: "
14316495Sspeer "NULL ring pointer(s)"));
14323859Sml29623 return (NULL);
14333859Sml29623 }
14346495Sspeer
14356495Sspeer if (set->owned.map == 0) {
14363859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
14376495Sspeer "<== nxge_rxdma_get_rbr_ring: no channels"));
14383859Sml29623 return (NULL);
14393859Sml29623 }
14403859Sml29623
14416495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
14426495Sspeer if ((1 << rdc) & set->owned.map) {
14436495Sspeer rx_rbr_ring_t *ring =
14446495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc];
14456495Sspeer if (ring) {
14466495Sspeer if (channel == ring->rdc) {
14476495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL,
14486495Sspeer "==> nxge_rxdma_get_rbr_ring: "
14496495Sspeer "channel %d ring $%p", rdc, ring));
14506495Sspeer return (ring);
14516495Sspeer }
14526495Sspeer }
14533859Sml29623 }
14543859Sml29623 }
14553859Sml29623
14563859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
14576929Smisaki "<== nxge_rxdma_get_rbr_ring: not found"));
14583859Sml29623
14593859Sml29623 return (NULL);
14603859Sml29623 }
14613859Sml29623
14623859Sml29623 p_rx_rcr_ring_t
nxge_rxdma_get_rcr_ring(p_nxge_t nxgep,uint16_t channel)14633859Sml29623 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel)
14643859Sml29623 {
14656495Sspeer nxge_grp_set_t *set = &nxgep->rx_set;
14666495Sspeer nxge_channel_t rdc;
14673859Sml29623
14683859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
14696929Smisaki "==> nxge_rxdma_get_rcr_ring: channel %d", channel));
14703859Sml29623
14716495Sspeer if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) {
14726495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL,
14736495Sspeer "<== nxge_rxdma_get_rcr_ring: "
14746495Sspeer "NULL ring pointer(s)"));
14753859Sml29623 return (NULL);
14763859Sml29623 }
14776495Sspeer
14786495Sspeer if (set->owned.map == 0) {
14793859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
14806495Sspeer "<== nxge_rxdma_get_rbr_ring: no channels"));
14813859Sml29623 return (NULL);
14823859Sml29623 }
14833859Sml29623
14846495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
14856495Sspeer if ((1 << rdc) & set->owned.map) {
14866495Sspeer rx_rcr_ring_t *ring =
14876495Sspeer nxgep->rx_rcr_rings->rcr_rings[rdc];
14886495Sspeer if (ring) {
14896495Sspeer if (channel == ring->rdc) {
14906495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL,
14916495Sspeer "==> nxge_rxdma_get_rcr_ring: "
14926495Sspeer "channel %d ring $%p", rdc, ring));
14936495Sspeer return (ring);
14946495Sspeer }
14956495Sspeer }
14963859Sml29623 }
14973859Sml29623 }
14983859Sml29623
14993859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
15006929Smisaki "<== nxge_rxdma_get_rcr_ring: not found"));
15013859Sml29623
15023859Sml29623 return (NULL);
15033859Sml29623 }
15043859Sml29623
15053859Sml29623 /*
15063859Sml29623 * Static functions start here.
15073859Sml29623 */
15083859Sml29623 static p_rx_msg_t
nxge_allocb(size_t size,uint32_t pri,p_nxge_dma_common_t dmabuf_p)15093859Sml29623 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p)
15103859Sml29623 {
15113859Sml29623 p_rx_msg_t nxge_mp = NULL;
15123859Sml29623 p_nxge_dma_common_t dmamsg_p;
15133859Sml29623 uchar_t *buffer;
15143859Sml29623
15153859Sml29623 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
15163859Sml29623 if (nxge_mp == NULL) {
15174185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
15186929Smisaki "Allocation of a rx msg failed."));
15193859Sml29623 goto nxge_allocb_exit;
15203859Sml29623 }
15213859Sml29623
15223859Sml29623 nxge_mp->use_buf_pool = B_FALSE;
15233859Sml29623 if (dmabuf_p) {
15243859Sml29623 nxge_mp->use_buf_pool = B_TRUE;
15253859Sml29623 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma;
15263859Sml29623 *dmamsg_p = *dmabuf_p;
15273859Sml29623 dmamsg_p->nblocks = 1;
15283859Sml29623 dmamsg_p->block_size = size;
15293859Sml29623 dmamsg_p->alength = size;
15303859Sml29623 buffer = (uchar_t *)dmabuf_p->kaddrp;
15313859Sml29623
15323859Sml29623 dmabuf_p->kaddrp = (void *)
15336929Smisaki ((char *)dmabuf_p->kaddrp + size);
15343859Sml29623 dmabuf_p->ioaddr_pp = (void *)
15356929Smisaki ((char *)dmabuf_p->ioaddr_pp + size);
15363859Sml29623 dmabuf_p->alength -= size;
15373859Sml29623 dmabuf_p->offset += size;
15383859Sml29623 dmabuf_p->dma_cookie.dmac_laddress += size;
15393859Sml29623 dmabuf_p->dma_cookie.dmac_size -= size;
15403859Sml29623
15413859Sml29623 } else {
15423859Sml29623 buffer = KMEM_ALLOC(size, KM_NOSLEEP);
15433859Sml29623 if (buffer == NULL) {
15444185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
15456929Smisaki "Allocation of a receive page failed."));
15463859Sml29623 goto nxge_allocb_fail1;
15473859Sml29623 }
15483859Sml29623 }
15493859Sml29623
15503859Sml29623 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb);
15513859Sml29623 if (nxge_mp->rx_mblk_p == NULL) {
15524185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed."));
15533859Sml29623 goto nxge_allocb_fail2;
15543859Sml29623 }
15553859Sml29623
15563859Sml29623 nxge_mp->buffer = buffer;
15573859Sml29623 nxge_mp->block_size = size;
15583859Sml29623 nxge_mp->freeb.free_func = (void (*)())nxge_freeb;
15593859Sml29623 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp;
15603859Sml29623 nxge_mp->ref_cnt = 1;
15613859Sml29623 nxge_mp->free = B_TRUE;
15623859Sml29623 nxge_mp->rx_use_bcopy = B_FALSE;
15633859Sml29623
15643859Sml29623 atomic_inc_32(&nxge_mblks_pending);
15653859Sml29623
15663859Sml29623 goto nxge_allocb_exit;
15673859Sml29623
15683859Sml29623 nxge_allocb_fail2:
15693859Sml29623 if (!nxge_mp->use_buf_pool) {
15703859Sml29623 KMEM_FREE(buffer, size);
15713859Sml29623 }
15723859Sml29623
15733859Sml29623 nxge_allocb_fail1:
15743859Sml29623 KMEM_FREE(nxge_mp, sizeof (rx_msg_t));
15753859Sml29623 nxge_mp = NULL;
15763859Sml29623
15773859Sml29623 nxge_allocb_exit:
15783859Sml29623 return (nxge_mp);
15793859Sml29623 }
15803859Sml29623
15813859Sml29623 p_mblk_t
nxge_dupb(p_rx_msg_t nxge_mp,uint_t offset,size_t size)15823859Sml29623 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
15833859Sml29623 {
15843859Sml29623 p_mblk_t mp;
15853859Sml29623
15863859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb"));
15873859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p "
15886929Smisaki "offset = 0x%08X "
15896929Smisaki "size = 0x%08X",
15906929Smisaki nxge_mp, offset, size));
15913859Sml29623
15923859Sml29623 mp = desballoc(&nxge_mp->buffer[offset], size,
15936929Smisaki 0, &nxge_mp->freeb);
15943859Sml29623 if (mp == NULL) {
15953859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
15963859Sml29623 goto nxge_dupb_exit;
15973859Sml29623 }
15983859Sml29623 atomic_inc_32(&nxge_mp->ref_cnt);
15993859Sml29623
16003859Sml29623
16013859Sml29623 nxge_dupb_exit:
16023859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
16036929Smisaki nxge_mp));
16043859Sml29623 return (mp);
16053859Sml29623 }
16063859Sml29623
16073859Sml29623 p_mblk_t
nxge_dupb_bcopy(p_rx_msg_t nxge_mp,uint_t offset,size_t size)16083859Sml29623 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size)
16093859Sml29623 {
16103859Sml29623 p_mblk_t mp;
16113859Sml29623 uchar_t *dp;
16123859Sml29623
16133859Sml29623 mp = allocb(size + NXGE_RXBUF_EXTRA, 0);
16143859Sml29623 if (mp == NULL) {
16153859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
16163859Sml29623 goto nxge_dupb_bcopy_exit;
16173859Sml29623 }
16183859Sml29623 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA;
16193859Sml29623 bcopy((void *)&nxge_mp->buffer[offset], dp, size);
16203859Sml29623 mp->b_wptr = dp + size;
16213859Sml29623
16223859Sml29623 nxge_dupb_bcopy_exit:
16233859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p",
16246929Smisaki nxge_mp));
16253859Sml29623 return (mp);
16263859Sml29623 }
16273859Sml29623
16283859Sml29623 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p,
16293859Sml29623 p_rx_msg_t rx_msg_p);
16303859Sml29623
16313859Sml29623 void
nxge_post_page(p_nxge_t nxgep,p_rx_rbr_ring_t rx_rbr_p,p_rx_msg_t rx_msg_p)16323859Sml29623 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
16333859Sml29623 {
16343859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page"));
16353859Sml29623
16363859Sml29623 /* Reuse this buffer */
16373859Sml29623 rx_msg_p->free = B_FALSE;
16383859Sml29623 rx_msg_p->cur_usage_cnt = 0;
16393859Sml29623 rx_msg_p->max_usage_cnt = 0;
16403859Sml29623 rx_msg_p->pkt_buf_size = 0;
16413859Sml29623
16423859Sml29623 if (rx_rbr_p->rbr_use_bcopy) {
16433859Sml29623 rx_msg_p->rx_use_bcopy = B_FALSE;
16443859Sml29623 atomic_dec_32(&rx_rbr_p->rbr_consumed);
16453859Sml29623 }
16463859Sml29623
16473859Sml29623 /*
16483859Sml29623 * Get the rbr header pointer and its offset index.
16493859Sml29623 */
16503859Sml29623 MUTEX_ENTER(&rx_rbr_p->post_lock);
16513859Sml29623 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) &
16526929Smisaki rx_rbr_p->rbr_wrap_mask);
16533859Sml29623 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
16543859Sml29623 MUTEX_EXIT(&rx_rbr_p->post_lock);
16555770Sml29623 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep),
16565770Sml29623 rx_rbr_p->rdc, 1);
16573859Sml29623
16583859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
16596929Smisaki "<== nxge_post_page (channel %d post_next_index %d)",
16606929Smisaki rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
16613859Sml29623
16623859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page"));
16633859Sml29623 }
16643859Sml29623
16653859Sml29623 void
nxge_freeb(p_rx_msg_t rx_msg_p)16663859Sml29623 nxge_freeb(p_rx_msg_t rx_msg_p)
16673859Sml29623 {
16683859Sml29623 size_t size;
16693859Sml29623 uchar_t *buffer = NULL;
16703859Sml29623 int ref_cnt;
16714874Sml29623 boolean_t free_state = B_FALSE;
16723859Sml29623
16735170Stm144005 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p;
16745170Stm144005
16753859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb"));
16763859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL,
16776929Smisaki "nxge_freeb:rx_msg_p = $%p (block pending %d)",
16786929Smisaki rx_msg_p, nxge_mblks_pending));
16793859Sml29623
16804874Sml29623 /*
16814874Sml29623 * First we need to get the free state, then
16824874Sml29623 * atomic decrement the reference count to prevent
16834874Sml29623 * the race condition with the interrupt thread that
16844874Sml29623 * is processing a loaned up buffer block.
16854874Sml29623 */
16864874Sml29623 free_state = rx_msg_p->free;
16873859Sml29623 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
16883859Sml29623 if (!ref_cnt) {
16895770Sml29623 atomic_dec_32(&nxge_mblks_pending);
16903859Sml29623 buffer = rx_msg_p->buffer;
16913859Sml29623 size = rx_msg_p->block_size;
16923859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: "
16936929Smisaki "will free: rx_msg_p = $%p (block pending %d)",
16946929Smisaki rx_msg_p, nxge_mblks_pending));
16953859Sml29623
16963859Sml29623 if (!rx_msg_p->use_buf_pool) {
16973859Sml29623 KMEM_FREE(buffer, size);
16983859Sml29623 }
16993859Sml29623
17003859Sml29623 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
17015170Stm144005
17025759Smisaki if (ring) {
17035759Smisaki /*
17045759Smisaki * Decrement the receive buffer ring's reference
17055759Smisaki * count, too.
17065759Smisaki */
17075759Smisaki atomic_dec_32(&ring->rbr_ref_cnt);
17085759Smisaki
17095759Smisaki /*
17106495Sspeer * Free the receive buffer ring, if
17115759Smisaki * 1. all the receive buffers have been freed
17125759Smisaki * 2. and we are in the proper state (that is,
17135759Smisaki * we are not UNMAPPING).
17145759Smisaki */
17155759Smisaki if (ring->rbr_ref_cnt == 0 &&
17165759Smisaki ring->rbr_state == RBR_UNMAPPED) {
17176495Sspeer /*
17186495Sspeer * Free receive data buffers,
17196495Sspeer * buffer index information
17206495Sspeer * (rxring_info) and
17216495Sspeer * the message block ring.
17226495Sspeer */
17236495Sspeer NXGE_DEBUG_MSG((NULL, RX_CTL,
17246495Sspeer "nxge_freeb:rx_msg_p = $%p "
17256495Sspeer "(block pending %d) free buffers",
17266495Sspeer rx_msg_p, nxge_mblks_pending));
17276495Sspeer nxge_rxdma_databuf_free(ring);
17286495Sspeer if (ring->ring_info) {
17296495Sspeer KMEM_FREE(ring->ring_info,
17306495Sspeer sizeof (rxring_info_t));
17316495Sspeer }
17326495Sspeer
17336495Sspeer if (ring->rx_msg_ring) {
17346495Sspeer KMEM_FREE(ring->rx_msg_ring,
17356495Sspeer ring->tnblocks *
17366495Sspeer sizeof (p_rx_msg_t));
17376495Sspeer }
17385759Smisaki KMEM_FREE(ring, sizeof (*ring));
17395759Smisaki }
17405170Stm144005 }
17413859Sml29623 return;
17423859Sml29623 }
17433859Sml29623
17443859Sml29623 /*
17453859Sml29623 * Repost buffer.
17463859Sml29623 */
17475759Smisaki if (free_state && (ref_cnt == 1) && ring) {
17483859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL,
17493859Sml29623 "nxge_freeb: post page $%p:", rx_msg_p));
17505170Stm144005 if (ring->rbr_state == RBR_POSTING)
17515170Stm144005 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p);
17523859Sml29623 }
17533859Sml29623
17543859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb"));
17553859Sml29623 }
17563859Sml29623
17573859Sml29623 uint_t
nxge_rx_intr(void * arg1,void * arg2)17583859Sml29623 nxge_rx_intr(void *arg1, void *arg2)
17593859Sml29623 {
17603859Sml29623 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1;
17613859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg2;
17623859Sml29623 p_nxge_ldg_t ldgp;
17633859Sml29623 uint8_t channel;
17643859Sml29623 npi_handle_t handle;
17653859Sml29623 rx_dma_ctl_stat_t cs;
176610309SSriharsha.Basavapatna@Sun.COM p_rx_rcr_ring_t rcrp;
17679730SMichael.Speer@Sun.COM mblk_t *mp = NULL;
17683859Sml29623
17693859Sml29623 if (ldvp == NULL) {
17703859Sml29623 NXGE_DEBUG_MSG((NULL, INT_CTL,
17716929Smisaki "<== nxge_rx_intr: arg2 $%p arg1 $%p",
17726929Smisaki nxgep, ldvp));
17733859Sml29623 return (DDI_INTR_CLAIMED);
17743859Sml29623 }
17753859Sml29623
17763859Sml29623 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) {
17773859Sml29623 nxgep = ldvp->nxgep;
17783859Sml29623 }
17796602Sspeer
17806602Sspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) ||
17816602Sspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) {
17826602Sspeer NXGE_DEBUG_MSG((nxgep, INT_CTL,
17836602Sspeer "<== nxge_rx_intr: interface not started or intialized"));
17846602Sspeer return (DDI_INTR_CLAIMED);
17856602Sspeer }
17866602Sspeer
17873859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
17886929Smisaki "==> nxge_rx_intr: arg2 $%p arg1 $%p",
17896929Smisaki nxgep, ldvp));
17903859Sml29623
17913859Sml29623 /*
17929232SMichael.Speer@Sun.COM * Get the PIO handle.
17933859Sml29623 */
17943859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
17958275SEric Cheng
17969232SMichael.Speer@Sun.COM /*
17979232SMichael.Speer@Sun.COM * Get the ring to enable us to process packets.
17989232SMichael.Speer@Sun.COM */
179910309SSriharsha.Basavapatna@Sun.COM rcrp = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index];
18008275SEric Cheng
18018275SEric Cheng /*
18028275SEric Cheng * The RCR ring lock must be held when packets
18038275SEric Cheng * are being processed and the hardware registers are
18048275SEric Cheng * being read or written to prevent race condition
18058275SEric Cheng * among the interrupt thread, the polling thread
18068275SEric Cheng * (will cause fatal errors such as rcrincon bit set)
18078275SEric Cheng * and the setting of the poll_flag.
18088275SEric Cheng */
180910309SSriharsha.Basavapatna@Sun.COM MUTEX_ENTER(&rcrp->lock);
18108275SEric Cheng
18113859Sml29623 /*
18123859Sml29623 * Get the control and status for this channel.
18133859Sml29623 */
18143859Sml29623 channel = ldvp->channel;
18153859Sml29623 ldgp = ldvp->ldgp;
18168275SEric Cheng
1817*11878SVenu.Iyer@Sun.COM if (!isLDOMguest(nxgep) && (!rcrp->started)) {
18189232SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, INT_CTL,
18199232SMichael.Speer@Sun.COM "<== nxge_rx_intr: channel is not started"));
18209232SMichael.Speer@Sun.COM
18219232SMichael.Speer@Sun.COM /*
18229232SMichael.Speer@Sun.COM * We received an interrupt before the ring is started.
18239232SMichael.Speer@Sun.COM */
18249232SMichael.Speer@Sun.COM RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
18259232SMichael.Speer@Sun.COM &cs.value);
18269232SMichael.Speer@Sun.COM cs.value &= RX_DMA_CTL_STAT_WR1C;
18279232SMichael.Speer@Sun.COM cs.bits.hdw.mex = 1;
18289232SMichael.Speer@Sun.COM RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
18299232SMichael.Speer@Sun.COM cs.value);
18309232SMichael.Speer@Sun.COM
18319232SMichael.Speer@Sun.COM /*
18329232SMichael.Speer@Sun.COM * Rearm this logical group if this is a single device
18339232SMichael.Speer@Sun.COM * group.
18349232SMichael.Speer@Sun.COM */
18359232SMichael.Speer@Sun.COM if (ldgp->nldvs == 1) {
18369232SMichael.Speer@Sun.COM if (isLDOMguest(nxgep)) {
18379232SMichael.Speer@Sun.COM nxge_hio_ldgimgn(nxgep, ldgp);
18389232SMichael.Speer@Sun.COM } else {
18399232SMichael.Speer@Sun.COM ldgimgm_t mgm;
18409232SMichael.Speer@Sun.COM
18419232SMichael.Speer@Sun.COM mgm.value = 0;
18429232SMichael.Speer@Sun.COM mgm.bits.ldw.arm = 1;
18439232SMichael.Speer@Sun.COM mgm.bits.ldw.timer = ldgp->ldg_timer;
18449232SMichael.Speer@Sun.COM
18459232SMichael.Speer@Sun.COM NXGE_REG_WR64(handle,
18469232SMichael.Speer@Sun.COM LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
18479232SMichael.Speer@Sun.COM mgm.value);
18489232SMichael.Speer@Sun.COM }
18498275SEric Cheng }
185010309SSriharsha.Basavapatna@Sun.COM MUTEX_EXIT(&rcrp->lock);
18519232SMichael.Speer@Sun.COM return (DDI_INTR_CLAIMED);
18528275SEric Cheng }
18538275SEric Cheng
185410309SSriharsha.Basavapatna@Sun.COM ASSERT(rcrp->ldgp == ldgp);
185510309SSriharsha.Basavapatna@Sun.COM ASSERT(rcrp->ldvp == ldvp);
18568275SEric Cheng
18573859Sml29623 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value);
18583859Sml29623
18593859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d "
18606929Smisaki "cs 0x%016llx rcrto 0x%x rcrthres %x",
18616929Smisaki channel,
18626929Smisaki cs.value,
18636929Smisaki cs.bits.hdw.rcrto,
18646929Smisaki cs.bits.hdw.rcrthres));
18653859Sml29623
186610309SSriharsha.Basavapatna@Sun.COM if (!rcrp->poll_flag) {
186710309SSriharsha.Basavapatna@Sun.COM mp = nxge_rx_pkts(nxgep, rcrp, cs, -1);
18689730SMichael.Speer@Sun.COM }
18693859Sml29623
18703859Sml29623 /* error events. */
18713859Sml29623 if (cs.value & RX_DMA_CTL_STAT_ERROR) {
18726495Sspeer (void) nxge_rx_err_evnts(nxgep, channel, cs);
18733859Sml29623 }
18743859Sml29623
18753859Sml29623 /*
18763859Sml29623 * Enable the mailbox update interrupt if we want
18773859Sml29623 * to use mailbox. We probably don't need to use
18783859Sml29623 * mailbox as it only saves us one pio read.
18793859Sml29623 * Also write 1 to rcrthres and rcrto to clear
18803859Sml29623 * these two edge triggered bits.
18813859Sml29623 */
18823859Sml29623 cs.value &= RX_DMA_CTL_STAT_WR1C;
188310309SSriharsha.Basavapatna@Sun.COM cs.bits.hdw.mex = rcrp->poll_flag ? 0 : 1;
18843859Sml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
18856929Smisaki cs.value);
18863859Sml29623
18873859Sml29623 /*
18888275SEric Cheng * If the polling mode is enabled, disable the interrupt.
18893859Sml29623 */
189010309SSriharsha.Basavapatna@Sun.COM if (rcrp->poll_flag) {
18918275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
18928275SEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p "
18938275SEric Cheng "(disabling interrupts)", channel, ldgp, ldvp));
189410309SSriharsha.Basavapatna@Sun.COM
18958275SEric Cheng /*
18968275SEric Cheng * Disarm this logical group if this is a single device
18978275SEric Cheng * group.
18988275SEric Cheng */
18998275SEric Cheng if (ldgp->nldvs == 1) {
190010309SSriharsha.Basavapatna@Sun.COM if (isLDOMguest(nxgep)) {
190110309SSriharsha.Basavapatna@Sun.COM ldgp->arm = B_FALSE;
190210309SSriharsha.Basavapatna@Sun.COM nxge_hio_ldgimgn(nxgep, ldgp);
190310309SSriharsha.Basavapatna@Sun.COM } else {
190410309SSriharsha.Basavapatna@Sun.COM ldgimgm_t mgm;
190510309SSriharsha.Basavapatna@Sun.COM mgm.value = 0;
190610309SSriharsha.Basavapatna@Sun.COM mgm.bits.ldw.arm = 0;
190710309SSriharsha.Basavapatna@Sun.COM NXGE_REG_WR64(handle,
190810309SSriharsha.Basavapatna@Sun.COM LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
190910309SSriharsha.Basavapatna@Sun.COM mgm.value);
191010309SSriharsha.Basavapatna@Sun.COM }
19116495Sspeer }
19128275SEric Cheng } else {
19138275SEric Cheng /*
19148400SNicolas.Droux@Sun.COM * Rearm this logical group if this is a single device
19158400SNicolas.Droux@Sun.COM * group.
19168275SEric Cheng */
19178275SEric Cheng if (ldgp->nldvs == 1) {
19188275SEric Cheng if (isLDOMguest(nxgep)) {
19198275SEric Cheng nxge_hio_ldgimgn(nxgep, ldgp);
19208275SEric Cheng } else {
19218275SEric Cheng ldgimgm_t mgm;
19228275SEric Cheng
19238275SEric Cheng mgm.value = 0;
19248275SEric Cheng mgm.bits.ldw.arm = 1;
19258275SEric Cheng mgm.bits.ldw.timer = ldgp->ldg_timer;
19268275SEric Cheng
19278275SEric Cheng NXGE_REG_WR64(handle,
19288275SEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
19298275SEric Cheng mgm.value);
19308275SEric Cheng }
19318275SEric Cheng }
19328275SEric Cheng
19338275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
19348275SEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p "
19358275SEric Cheng "exiting ISR (and call mac_rx_ring)", channel, ldgp));
19363859Sml29623 }
193710309SSriharsha.Basavapatna@Sun.COM MUTEX_EXIT(&rcrp->lock);
19388275SEric Cheng
19399730SMichael.Speer@Sun.COM if (mp != NULL) {
194010309SSriharsha.Basavapatna@Sun.COM mac_rx_ring(nxgep->mach, rcrp->rcr_mac_handle, mp,
194110309SSriharsha.Basavapatna@Sun.COM rcrp->rcr_gen_num);
19428275SEric Cheng }
19438275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED"));
19448275SEric Cheng return (DDI_INTR_CLAIMED);
19453859Sml29623 }
19463859Sml29623
19473859Sml29623 /*
19483859Sml29623 * This routine is the main packet receive processing function.
19493859Sml29623 * It gets the packet type, error code, and buffer related
19503859Sml29623 * information from the receive completion entry.
19513859Sml29623 * How many completion entries to process is based on the number of packets
19523859Sml29623 * queued by the hardware, a hardware maintained tail pointer
19533859Sml29623 * and a configurable receive packet count.
19543859Sml29623 *
19553859Sml29623 * A chain of message blocks will be created as result of processing
19563859Sml29623 * the completion entries. This chain of message blocks will be returned and
19573859Sml29623 * a hardware control status register will be updated with the number of
19583859Sml29623 * packets were removed from the hardware queue.
19593859Sml29623 *
19608275SEric Cheng * The RCR ring lock is held when entering this function.
19613859Sml29623 */
19626495Sspeer static mblk_t *
nxge_rx_pkts(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,rx_dma_ctl_stat_t cs,int bytes_to_pickup)19636495Sspeer nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs,
19646495Sspeer int bytes_to_pickup)
19653859Sml29623 {
19663859Sml29623 npi_handle_t handle;
19673859Sml29623 uint8_t channel;
19683859Sml29623 uint32_t comp_rd_index;
19693859Sml29623 p_rcr_entry_t rcr_desc_rd_head_p;
19703859Sml29623 p_rcr_entry_t rcr_desc_rd_head_pp;
19713859Sml29623 p_mblk_t nmp, mp_cont, head_mp, *tail_mp;
19723859Sml29623 uint16_t qlen, nrcr_read, npkt_read;
19736495Sspeer uint32_t qlen_hw;
19743859Sml29623 boolean_t multi;
19756495Sspeer rcrcfig_b_t rcr_cfg_b;
19766495Sspeer int totallen = 0;
19773859Sml29623 #if defined(_BIG_ENDIAN)
19783859Sml29623 npi_status_t rs = NPI_SUCCESS;
19793859Sml29623 #endif
19803859Sml29623
19818275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: "
19826929Smisaki "channel %d", rcr_p->rdc));
19833859Sml29623
19843859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) {
19853859Sml29623 return (NULL);
19863859Sml29623 }
19873859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
19883859Sml29623 channel = rcr_p->rdc;
19893859Sml29623
19903859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
19916929Smisaki "==> nxge_rx_pkts: START: rcr channel %d "
19926929Smisaki "head_p $%p head_pp $%p index %d ",
19936929Smisaki channel, rcr_p->rcr_desc_rd_head_p,
19946929Smisaki rcr_p->rcr_desc_rd_head_pp,
19956929Smisaki rcr_p->comp_rd_index));
19963859Sml29623
19973859Sml29623
19983859Sml29623 #if !defined(_BIG_ENDIAN)
19993859Sml29623 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff;
20003859Sml29623 #else
20013859Sml29623 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
20023859Sml29623 if (rs != NPI_SUCCESS) {
20036495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: "
20043859Sml29623 "channel %d, get qlen failed 0x%08x",
20056929Smisaki channel, rs));
20063859Sml29623 return (NULL);
20073859Sml29623 }
20083859Sml29623 #endif
20093859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d "
20106929Smisaki "qlen %d", channel, qlen));
20113859Sml29623
20123859Sml29623
20133859Sml29623
20143859Sml29623 if (!qlen) {
20158275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
20166929Smisaki "==> nxge_rx_pkts:rcr channel %d "
20176929Smisaki "qlen %d (no pkts)", channel, qlen));
20183859Sml29623
20193859Sml29623 return (NULL);
20203859Sml29623 }
20213859Sml29623
20223859Sml29623 comp_rd_index = rcr_p->comp_rd_index;
20233859Sml29623
20243859Sml29623 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p;
20253859Sml29623 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp;
20263859Sml29623 nrcr_read = npkt_read = 0;
20273859Sml29623
20283859Sml29623 /*
20293859Sml29623 * Number of packets queued
20303859Sml29623 * (The jumbo or multi packet will be counted as only one
20313859Sml29623 * packets and it may take up more than one completion entry).
20323859Sml29623 */
20333859Sml29623 qlen_hw = (qlen < nxge_max_rx_pkts) ?
20346929Smisaki qlen : nxge_max_rx_pkts;
20353859Sml29623 head_mp = NULL;
20363859Sml29623 tail_mp = &head_mp;
20373859Sml29623 nmp = mp_cont = NULL;
20383859Sml29623 multi = B_FALSE;
20393859Sml29623
20403859Sml29623 while (qlen_hw) {
20413859Sml29623
20423859Sml29623 #ifdef NXGE_DEBUG
20433859Sml29623 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p);
20443859Sml29623 #endif
20453859Sml29623 /*
20463859Sml29623 * Process one completion ring entry.
20473859Sml29623 */
20483859Sml29623 nxge_receive_packet(nxgep,
20496929Smisaki rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont);
20503859Sml29623
20513859Sml29623 /*
20523859Sml29623 * message chaining modes
20533859Sml29623 */
20543859Sml29623 if (nmp) {
20553859Sml29623 nmp->b_next = NULL;
20563859Sml29623 if (!multi && !mp_cont) { /* frame fits a partition */
20573859Sml29623 *tail_mp = nmp;
20583859Sml29623 tail_mp = &nmp->b_next;
20596495Sspeer totallen += MBLKL(nmp);
20603859Sml29623 nmp = NULL;
20613859Sml29623 } else if (multi && !mp_cont) { /* first segment */
20623859Sml29623 *tail_mp = nmp;
20633859Sml29623 tail_mp = &nmp->b_cont;
20646495Sspeer totallen += MBLKL(nmp);
20653859Sml29623 } else if (multi && mp_cont) { /* mid of multi segs */
20663859Sml29623 *tail_mp = mp_cont;
20673859Sml29623 tail_mp = &mp_cont->b_cont;
20686495Sspeer totallen += MBLKL(mp_cont);
20693859Sml29623 } else if (!multi && mp_cont) { /* last segment */
20703859Sml29623 *tail_mp = mp_cont;
20713859Sml29623 tail_mp = &nmp->b_next;
20726495Sspeer totallen += MBLKL(mp_cont);
20733859Sml29623 nmp = NULL;
20743859Sml29623 }
20753859Sml29623 }
20763859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
20776929Smisaki "==> nxge_rx_pkts: loop: rcr channel %d "
20786929Smisaki "before updating: multi %d "
20796929Smisaki "nrcr_read %d "
20806929Smisaki "npk read %d "
20816929Smisaki "head_pp $%p index %d ",
20826929Smisaki channel,
20836929Smisaki multi,
20846929Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp,
20856929Smisaki comp_rd_index));
20863859Sml29623
20873859Sml29623 if (!multi) {
20883859Sml29623 qlen_hw--;
20893859Sml29623 npkt_read++;
20903859Sml29623 }
20913859Sml29623
20923859Sml29623 /*
20933859Sml29623 * Update the next read entry.
20943859Sml29623 */
20953859Sml29623 comp_rd_index = NEXT_ENTRY(comp_rd_index,
20966929Smisaki rcr_p->comp_wrap_mask);
20973859Sml29623
20983859Sml29623 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
20996929Smisaki rcr_p->rcr_desc_first_p,
21006929Smisaki rcr_p->rcr_desc_last_p);
21013859Sml29623
21023859Sml29623 nrcr_read++;
21033859Sml29623
21043859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
21056929Smisaki "<== nxge_rx_pkts: (SAM, process one packet) "
21066929Smisaki "nrcr_read %d",
21076929Smisaki nrcr_read));
21083859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
21096929Smisaki "==> nxge_rx_pkts: loop: rcr channel %d "
21106929Smisaki "multi %d "
21116929Smisaki "nrcr_read %d "
21126929Smisaki "npk read %d "
21136929Smisaki "head_pp $%p index %d ",
21146929Smisaki channel,
21156929Smisaki multi,
21166929Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp,
21176929Smisaki comp_rd_index));
21183859Sml29623
21196495Sspeer if ((bytes_to_pickup != -1) &&
21206495Sspeer (totallen >= bytes_to_pickup)) {
21216495Sspeer break;
21226495Sspeer }
21233859Sml29623 }
21243859Sml29623
21253859Sml29623 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
21263859Sml29623 rcr_p->comp_rd_index = comp_rd_index;
21273859Sml29623 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
21283859Sml29623 if ((nxgep->intr_timeout != rcr_p->intr_timeout) ||
21296929Smisaki (nxgep->intr_threshold != rcr_p->intr_threshold)) {
21308661SSantwona.Behera@Sun.COM
21318661SSantwona.Behera@Sun.COM rcr_p->intr_timeout = (nxgep->intr_timeout <
21328661SSantwona.Behera@Sun.COM NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN :
21338661SSantwona.Behera@Sun.COM nxgep->intr_timeout;
21348661SSantwona.Behera@Sun.COM
21358661SSantwona.Behera@Sun.COM rcr_p->intr_threshold = (nxgep->intr_threshold <
21368661SSantwona.Behera@Sun.COM NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN :
21378661SSantwona.Behera@Sun.COM nxgep->intr_threshold;
21388661SSantwona.Behera@Sun.COM
21393859Sml29623 rcr_cfg_b.value = 0x0ULL;
21408661SSantwona.Behera@Sun.COM rcr_cfg_b.bits.ldw.entout = 1;
21413859Sml29623 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout;
21423859Sml29623 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold;
21438661SSantwona.Behera@Sun.COM
21443859Sml29623 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG,
21456929Smisaki channel, rcr_cfg_b.value);
21463859Sml29623 }
21473859Sml29623
21483859Sml29623 cs.bits.ldw.pktread = npkt_read;
21493859Sml29623 cs.bits.ldw.ptrread = nrcr_read;
21503859Sml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
21516929Smisaki channel, cs.value);
21523859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
21536929Smisaki "==> nxge_rx_pkts: EXIT: rcr channel %d "
21546929Smisaki "head_pp $%p index %016llx ",
21556929Smisaki channel,
21566929Smisaki rcr_p->rcr_desc_rd_head_pp,
21576929Smisaki rcr_p->comp_rd_index));
21583859Sml29623 /*
21593859Sml29623 * Update RCR buffer pointer read and number of packets
21603859Sml29623 * read.
21613859Sml29623 */
21623859Sml29623
21638275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return"
21648275SEric Cheng "channel %d", rcr_p->rdc));
21658275SEric Cheng
21663859Sml29623 return (head_mp);
21673859Sml29623 }
21683859Sml29623
21693859Sml29623 void
nxge_receive_packet(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,p_rcr_entry_t rcr_desc_rd_head_p,boolean_t * multi_p,mblk_t ** mp,mblk_t ** mp_cont)21703859Sml29623 nxge_receive_packet(p_nxge_t nxgep,
21713859Sml29623 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p,
21723859Sml29623 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont)
21733859Sml29623 {
21743859Sml29623 p_mblk_t nmp = NULL;
21753859Sml29623 uint64_t multi;
21763859Sml29623 uint64_t dcf_err;
21773859Sml29623 uint8_t channel;
21783859Sml29623
21793859Sml29623 boolean_t first_entry = B_TRUE;
21803859Sml29623 boolean_t is_tcp_udp = B_FALSE;
21813859Sml29623 boolean_t buffer_free = B_FALSE;
21823859Sml29623 boolean_t error_send_up = B_FALSE;
21833859Sml29623 uint8_t error_type;
21843859Sml29623 uint16_t l2_len;
21853859Sml29623 uint16_t skip_len;
21863859Sml29623 uint8_t pktbufsz_type;
21873859Sml29623 uint64_t rcr_entry;
21883859Sml29623 uint64_t *pkt_buf_addr_pp;
21893859Sml29623 uint64_t *pkt_buf_addr_p;
21903859Sml29623 uint32_t buf_offset;
21913859Sml29623 uint32_t bsize;
21923859Sml29623 uint32_t error_disp_cnt;
21933859Sml29623 uint32_t msg_index;
21943859Sml29623 p_rx_rbr_ring_t rx_rbr_p;
21953859Sml29623 p_rx_msg_t *rx_msg_ring_p;
21963859Sml29623 p_rx_msg_t rx_msg_p;
21973859Sml29623 uint16_t sw_offset_bytes = 0, hdr_size = 0;
21983859Sml29623 nxge_status_t status = NXGE_OK;
21993859Sml29623 boolean_t is_valid = B_FALSE;
22003859Sml29623 p_nxge_rx_ring_stats_t rdc_stats;
22013859Sml29623 uint32_t bytes_read;
22023859Sml29623 uint64_t pkt_type;
22033859Sml29623 uint64_t frag;
22046028Ssbehera boolean_t pkt_too_long_err = B_FALSE;
22053859Sml29623 #ifdef NXGE_DEBUG
22063859Sml29623 int dump_len;
22073859Sml29623 #endif
22083859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet"));
22093859Sml29623 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
22103859Sml29623
22113859Sml29623 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
22123859Sml29623
22133859Sml29623 multi = (rcr_entry & RCR_MULTI_MASK);
22143859Sml29623 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK);
22153859Sml29623 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
22163859Sml29623
22173859Sml29623 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
22183859Sml29623 frag = (rcr_entry & RCR_FRAG_MASK);
22193859Sml29623
22203859Sml29623 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
22213859Sml29623
22223859Sml29623 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
22236929Smisaki RCR_PKTBUFSZ_SHIFT);
22245125Sjoycey #if defined(__i386)
22255125Sjoycey pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry &
22266929Smisaki RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT);
22275125Sjoycey #else
22283859Sml29623 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
22296929Smisaki RCR_PKT_BUF_ADDR_SHIFT);
22305125Sjoycey #endif
22313859Sml29623
22323859Sml29623 channel = rcr_p->rdc;
22333859Sml29623
22343859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
22356929Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
22366929Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
22376929Smisaki "error_type 0x%x pkt_type 0x%x "
22386929Smisaki "pktbufsz_type %d ",
22396929Smisaki rcr_desc_rd_head_p,
22406929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len,
22416929Smisaki multi,
22426929Smisaki error_type,
22436929Smisaki pkt_type,
22446929Smisaki pktbufsz_type));
22453859Sml29623
22463859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
22476929Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx "
22486929Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx "
22496929Smisaki "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p,
22506929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len,
22516929Smisaki multi,
22526929Smisaki error_type,
22536929Smisaki pkt_type));
22543859Sml29623
22553859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
22566929Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx "
22576929Smisaki "full pkt_buf_addr_pp $%p l2_len %d",
22586929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len));
22593859Sml29623
22603859Sml29623 /* get the stats ptr */
22613859Sml29623 rdc_stats = rcr_p->rdc_stats;
22623859Sml29623
22633859Sml29623 if (!l2_len) {
22643859Sml29623
22653859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
22666929Smisaki "<== nxge_receive_packet: failed: l2 length is 0."));
22673859Sml29623 return;
22683859Sml29623 }
22693859Sml29623
22706028Ssbehera /*
22718275SEric Cheng * Software workaround for BMAC hardware limitation that allows
22726028Ssbehera * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406
22736028Ssbehera * instead of 0x2400 for jumbo.
22746028Ssbehera */
22756028Ssbehera if (l2_len > nxgep->mac.maxframesize) {
22766028Ssbehera pkt_too_long_err = B_TRUE;
22776028Ssbehera }
22786028Ssbehera
22794185Sspeer /* Hardware sends us 4 bytes of CRC as no stripping is done. */
22804185Sspeer l2_len -= ETHERFCSL;
22814185Sspeer
22823859Sml29623 /* shift 6 bits to get the full io address */
22835125Sjoycey #if defined(__i386)
22845125Sjoycey pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp <<
22856929Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL);
22865125Sjoycey #else
22873859Sml29623 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
22886929Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL);
22895125Sjoycey #endif
22903859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
22916929Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx "
22926929Smisaki "full pkt_buf_addr_pp $%p l2_len %d",
22936929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len));
22943859Sml29623
22953859Sml29623 rx_rbr_p = rcr_p->rx_rbr_p;
22963859Sml29623 rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
22973859Sml29623
22983859Sml29623 if (first_entry) {
22993859Sml29623 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
23006929Smisaki RXDMA_HDR_SIZE_DEFAULT);
23013859Sml29623
23023859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
23036929Smisaki "==> nxge_receive_packet: first entry 0x%016llx "
23046929Smisaki "pkt_buf_addr_pp $%p l2_len %d hdr %d",
23056929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len,
23066929Smisaki hdr_size));
23073859Sml29623 }
23083859Sml29623
23093859Sml29623 MUTEX_ENTER(&rx_rbr_p->lock);
23103859Sml29623
23113859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
23126929Smisaki "==> (rbr 1) nxge_receive_packet: entry 0x%0llx "
23136929Smisaki "full pkt_buf_addr_pp $%p l2_len %d",
23146929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len));
23153859Sml29623
23163859Sml29623 /*
23173859Sml29623 * Packet buffer address in the completion entry points
23183859Sml29623 * to the starting buffer address (offset 0).
23193859Sml29623 * Use the starting buffer address to locate the corresponding
23203859Sml29623 * kernel address.
23213859Sml29623 */
23223859Sml29623 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p,
23236929Smisaki pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
23246929Smisaki &buf_offset,
23256929Smisaki &msg_index);
23263859Sml29623
23273859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
23286929Smisaki "==> (rbr 2) nxge_receive_packet: entry 0x%0llx "
23296929Smisaki "full pkt_buf_addr_pp $%p l2_len %d",
23306929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len));
23313859Sml29623
23323859Sml29623 if (status != NXGE_OK) {
23333859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock);
23343859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
23356929Smisaki "<== nxge_receive_packet: found vaddr failed %d",
23366929Smisaki status));
23373859Sml29623 return;
23383859Sml29623 }
23393859Sml29623
23403859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
23416929Smisaki "==> (rbr 3) nxge_receive_packet: entry 0x%0llx "
23426929Smisaki "full pkt_buf_addr_pp $%p l2_len %d",
23436929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len));
23443859Sml29623
23453859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
23466929Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
23476929Smisaki "full pkt_buf_addr_pp $%p l2_len %d",
23486929Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
23493859Sml29623
23503859Sml29623 rx_msg_p = rx_msg_ring_p[msg_index];
23513859Sml29623
23523859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
23536929Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx "
23546929Smisaki "full pkt_buf_addr_pp $%p l2_len %d",
23556929Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
23563859Sml29623
23573859Sml29623 switch (pktbufsz_type) {
23583859Sml29623 case RCR_PKTBUFSZ_0:
23593859Sml29623 bsize = rx_rbr_p->pkt_buf_size0_bytes;
23603859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
23616929Smisaki "==> nxge_receive_packet: 0 buf %d", bsize));
23623859Sml29623 break;
23633859Sml29623 case RCR_PKTBUFSZ_1:
23643859Sml29623 bsize = rx_rbr_p->pkt_buf_size1_bytes;
23653859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
23666929Smisaki "==> nxge_receive_packet: 1 buf %d", bsize));
23673859Sml29623 break;
23683859Sml29623 case RCR_PKTBUFSZ_2:
23693859Sml29623 bsize = rx_rbr_p->pkt_buf_size2_bytes;
23703859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
23716929Smisaki "==> nxge_receive_packet: 2 buf %d", bsize));
23723859Sml29623 break;
23733859Sml29623 case RCR_SINGLE_BLOCK:
23743859Sml29623 bsize = rx_msg_p->block_size;
23753859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
23766929Smisaki "==> nxge_receive_packet: single %d", bsize));
23773859Sml29623
23783859Sml29623 break;
23793859Sml29623 default:
23803859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock);
23813859Sml29623 return;
23823859Sml29623 }
23833859Sml29623
238411304SJanie.Lu@Sun.COM switch (nxge_rdc_buf_offset) {
238511304SJanie.Lu@Sun.COM case SW_OFFSET_NO_OFFSET:
238611304SJanie.Lu@Sun.COM sw_offset_bytes = 0;
238711304SJanie.Lu@Sun.COM break;
238811304SJanie.Lu@Sun.COM case SW_OFFSET_64:
238911304SJanie.Lu@Sun.COM sw_offset_bytes = 64;
239011304SJanie.Lu@Sun.COM break;
239111304SJanie.Lu@Sun.COM case SW_OFFSET_128:
239211304SJanie.Lu@Sun.COM sw_offset_bytes = 128;
239311304SJanie.Lu@Sun.COM break;
239411304SJanie.Lu@Sun.COM case SW_OFFSET_192:
239511304SJanie.Lu@Sun.COM sw_offset_bytes = 192;
239611304SJanie.Lu@Sun.COM break;
239711304SJanie.Lu@Sun.COM case SW_OFFSET_256:
239811304SJanie.Lu@Sun.COM sw_offset_bytes = 256;
239911304SJanie.Lu@Sun.COM break;
240011304SJanie.Lu@Sun.COM case SW_OFFSET_320:
240111304SJanie.Lu@Sun.COM sw_offset_bytes = 320;
240211304SJanie.Lu@Sun.COM break;
240311304SJanie.Lu@Sun.COM case SW_OFFSET_384:
240411304SJanie.Lu@Sun.COM sw_offset_bytes = 384;
240511304SJanie.Lu@Sun.COM break;
240611304SJanie.Lu@Sun.COM case SW_OFFSET_448:
240711304SJanie.Lu@Sun.COM sw_offset_bytes = 448;
240811304SJanie.Lu@Sun.COM break;
240911304SJanie.Lu@Sun.COM default:
241011304SJanie.Lu@Sun.COM sw_offset_bytes = 0;
241111304SJanie.Lu@Sun.COM break;
241211304SJanie.Lu@Sun.COM }
241311304SJanie.Lu@Sun.COM
24143859Sml29623 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
24156929Smisaki (buf_offset + sw_offset_bytes),
24166929Smisaki (hdr_size + l2_len),
24176929Smisaki DDI_DMA_SYNC_FORCPU);
24183859Sml29623
24193859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
24206929Smisaki "==> nxge_receive_packet: after first dump:usage count"));
24213859Sml29623
24223859Sml29623 if (rx_msg_p->cur_usage_cnt == 0) {
24233859Sml29623 if (rx_rbr_p->rbr_use_bcopy) {
24243859Sml29623 atomic_inc_32(&rx_rbr_p->rbr_consumed);
24253859Sml29623 if (rx_rbr_p->rbr_consumed <
24266929Smisaki rx_rbr_p->rbr_threshold_hi) {
24273859Sml29623 if (rx_rbr_p->rbr_threshold_lo == 0 ||
24286929Smisaki ((rx_rbr_p->rbr_consumed >=
24296929Smisaki rx_rbr_p->rbr_threshold_lo) &&
24306929Smisaki (rx_rbr_p->rbr_bufsize_type >=
24316929Smisaki pktbufsz_type))) {
24323859Sml29623 rx_msg_p->rx_use_bcopy = B_TRUE;
24333859Sml29623 }
24343859Sml29623 } else {
24353859Sml29623 rx_msg_p->rx_use_bcopy = B_TRUE;
24363859Sml29623 }
24373859Sml29623 }
24383859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
24396929Smisaki "==> nxge_receive_packet: buf %d (new block) ",
24406929Smisaki bsize));
24413859Sml29623
24423859Sml29623 rx_msg_p->pkt_buf_size_code = pktbufsz_type;
24433859Sml29623 rx_msg_p->pkt_buf_size = bsize;
24443859Sml29623 rx_msg_p->cur_usage_cnt = 1;
24453859Sml29623 if (pktbufsz_type == RCR_SINGLE_BLOCK) {
24463859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
24476929Smisaki "==> nxge_receive_packet: buf %d "
24486929Smisaki "(single block) ",
24496929Smisaki bsize));
24503859Sml29623 /*
24513859Sml29623 * Buffer can be reused once the free function
24523859Sml29623 * is called.
24533859Sml29623 */
24543859Sml29623 rx_msg_p->max_usage_cnt = 1;
24553859Sml29623 buffer_free = B_TRUE;
24563859Sml29623 } else {
24573859Sml29623 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize;
24583859Sml29623 if (rx_msg_p->max_usage_cnt == 1) {
24593859Sml29623 buffer_free = B_TRUE;
24603859Sml29623 }
24613859Sml29623 }
24623859Sml29623 } else {
24633859Sml29623 rx_msg_p->cur_usage_cnt++;
24643859Sml29623 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
24653859Sml29623 buffer_free = B_TRUE;
24663859Sml29623 }
24673859Sml29623 }
24683859Sml29623
24693859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
24703859Sml29623 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
24716929Smisaki msg_index, l2_len,
24726929Smisaki rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
24733859Sml29623
24746028Ssbehera if ((error_type) || (dcf_err) || (pkt_too_long_err)) {
24753859Sml29623 rdc_stats->ierrors++;
24763859Sml29623 if (dcf_err) {
24773859Sml29623 rdc_stats->dcf_err++;
24783859Sml29623 #ifdef NXGE_DEBUG
24793859Sml29623 if (!rdc_stats->dcf_err) {
24803859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
24813859Sml29623 "nxge_receive_packet: channel %d dcf_err rcr"
24823859Sml29623 " 0x%llx", channel, rcr_entry));
24833859Sml29623 }
24843859Sml29623 #endif
24853859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
24866929Smisaki NXGE_FM_EREPORT_RDMC_DCF_ERR);
24876028Ssbehera } else if (pkt_too_long_err) {
24886028Ssbehera rdc_stats->pkt_too_long_err++;
24896028Ssbehera NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:"
24906028Ssbehera " channel %d packet length [%d] > "
24916028Ssbehera "maxframesize [%d]", channel, l2_len + ETHERFCSL,
24926028Ssbehera nxgep->mac.maxframesize));
24933859Sml29623 } else {
24943859Sml29623 /* Update error stats */
24953859Sml29623 error_disp_cnt = NXGE_ERROR_SHOW_MAX;
24963859Sml29623 rdc_stats->errlog.compl_err_type = error_type;
24973859Sml29623
24983859Sml29623 switch (error_type) {
24995523Syc148097 /*
25005523Syc148097 * Do not send FMA ereport for RCR_L2_ERROR and
25015523Syc148097 * RCR_L4_CSUM_ERROR because most likely they indicate
25025523Syc148097 * back pressure rather than HW failures.
25035523Syc148097 */
25045165Syc148097 case RCR_L2_ERROR:
25055165Syc148097 rdc_stats->l2_err++;
25065165Syc148097 if (rdc_stats->l2_err <
25075165Syc148097 error_disp_cnt) {
25085165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
25095165Syc148097 " nxge_receive_packet:"
25105165Syc148097 " channel %d RCR L2_ERROR",
25115165Syc148097 channel));
25125165Syc148097 }
25135165Syc148097 break;
25145165Syc148097 case RCR_L4_CSUM_ERROR:
25155165Syc148097 error_send_up = B_TRUE;
25165165Syc148097 rdc_stats->l4_cksum_err++;
25175165Syc148097 if (rdc_stats->l4_cksum_err <
25185165Syc148097 error_disp_cnt) {
25193859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
25205165Syc148097 " nxge_receive_packet:"
25215165Syc148097 " channel %d"
25225165Syc148097 " RCR L4_CSUM_ERROR", channel));
25235165Syc148097 }
25245165Syc148097 break;
25255523Syc148097 /*
25265523Syc148097 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and
25275523Syc148097 * RCR_ZCP_SOFT_ERROR because they reflect the same
25285523Syc148097 * FFLP and ZCP errors that have been reported by
25295523Syc148097 * nxge_fflp.c and nxge_zcp.c.
25305523Syc148097 */
25315165Syc148097 case RCR_FFLP_SOFT_ERROR:
25325165Syc148097 error_send_up = B_TRUE;
25335165Syc148097 rdc_stats->fflp_soft_err++;
25345165Syc148097 if (rdc_stats->fflp_soft_err <
25355165Syc148097 error_disp_cnt) {
25365165Syc148097 NXGE_ERROR_MSG((nxgep,
25375165Syc148097 NXGE_ERR_CTL,
25385165Syc148097 " nxge_receive_packet:"
25395165Syc148097 " channel %d"
25405165Syc148097 " RCR FFLP_SOFT_ERROR", channel));
25415165Syc148097 }
25425165Syc148097 break;
25435165Syc148097 case RCR_ZCP_SOFT_ERROR:
25445165Syc148097 error_send_up = B_TRUE;
25455165Syc148097 rdc_stats->fflp_soft_err++;
25465165Syc148097 if (rdc_stats->zcp_soft_err <
25475165Syc148097 error_disp_cnt)
25485165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
25495165Syc148097 " nxge_receive_packet: Channel %d"
25505165Syc148097 " RCR ZCP_SOFT_ERROR", channel));
25515165Syc148097 break;
25525165Syc148097 default:
25535165Syc148097 rdc_stats->rcr_unknown_err++;
25545165Syc148097 if (rdc_stats->rcr_unknown_err
25555165Syc148097 < error_disp_cnt) {
25565165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
25575165Syc148097 " nxge_receive_packet: Channel %d"
25585165Syc148097 " RCR entry 0x%llx error 0x%x",
25595165Syc148097 rcr_entry, channel, error_type));
25605165Syc148097 }
25615165Syc148097 break;
25623859Sml29623 }
25633859Sml29623 }
25643859Sml29623
25653859Sml29623 /*
25663859Sml29623 * Update and repost buffer block if max usage
25673859Sml29623 * count is reached.
25683859Sml29623 */
25693859Sml29623 if (error_send_up == B_FALSE) {
25704874Sml29623 atomic_inc_32(&rx_msg_p->ref_cnt);
25713859Sml29623 if (buffer_free == B_TRUE) {
25723859Sml29623 rx_msg_p->free = B_TRUE;
25733859Sml29623 }
25743859Sml29623
25753859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock);
25763859Sml29623 nxge_freeb(rx_msg_p);
25773859Sml29623 return;
25783859Sml29623 }
25793859Sml29623 }
25803859Sml29623
25813859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
25826929Smisaki "==> nxge_receive_packet: DMA sync second "));
25833859Sml29623
25845165Syc148097 bytes_read = rcr_p->rcvd_pkt_bytes;
25853859Sml29623 skip_len = sw_offset_bytes + hdr_size;
25863859Sml29623 if (!rx_msg_p->rx_use_bcopy) {
25874874Sml29623 /*
25884874Sml29623 * For loaned up buffers, the driver reference count
25894874Sml29623 * will be incremented first and then the free state.
25904874Sml29623 */
25915165Syc148097 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
25925165Syc148097 if (first_entry) {
25935165Syc148097 nmp->b_rptr = &nmp->b_rptr[skip_len];
25945165Syc148097 if (l2_len < bsize - skip_len) {
25955165Syc148097 nmp->b_wptr = &nmp->b_rptr[l2_len];
25965165Syc148097 } else {
25975165Syc148097 nmp->b_wptr = &nmp->b_rptr[bsize
25985165Syc148097 - skip_len];
25995165Syc148097 }
26005165Syc148097 } else {
26015165Syc148097 if (l2_len - bytes_read < bsize) {
26025165Syc148097 nmp->b_wptr =
26035165Syc148097 &nmp->b_rptr[l2_len - bytes_read];
26045165Syc148097 } else {
26055165Syc148097 nmp->b_wptr = &nmp->b_rptr[bsize];
26065165Syc148097 }
26075165Syc148097 }
26085165Syc148097 }
26093859Sml29623 } else {
26105165Syc148097 if (first_entry) {
26115165Syc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
26125165Syc148097 l2_len < bsize - skip_len ?
26135165Syc148097 l2_len : bsize - skip_len);
26145165Syc148097 } else {
26155165Syc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset,
26165165Syc148097 l2_len - bytes_read < bsize ?
26175165Syc148097 l2_len - bytes_read : bsize);
26185165Syc148097 }
26193859Sml29623 }
26203859Sml29623 if (nmp != NULL) {
26217145Syc148097 if (first_entry) {
26227145Syc148097 /*
26237145Syc148097 * Jumbo packets may be received with more than one
26247145Syc148097 * buffer, increment ipackets for the first entry only.
26257145Syc148097 */
26267145Syc148097 rdc_stats->ipackets++;
26277145Syc148097
26287145Syc148097 /* Update ibytes for kstat. */
26297145Syc148097 rdc_stats->ibytes += skip_len
26307145Syc148097 + l2_len < bsize ? l2_len : bsize;
26317145Syc148097 /*
26327145Syc148097 * Update the number of bytes read so far for the
26337145Syc148097 * current frame.
26347145Syc148097 */
26355165Syc148097 bytes_read = nmp->b_wptr - nmp->b_rptr;
26367145Syc148097 } else {
26377145Syc148097 rdc_stats->ibytes += l2_len - bytes_read < bsize ?
26387145Syc148097 l2_len - bytes_read : bsize;
26393859Sml29623 bytes_read += nmp->b_wptr - nmp->b_rptr;
26407145Syc148097 }
26415165Syc148097
26425165Syc148097 NXGE_DEBUG_MSG((nxgep, RX_CTL,
26435165Syc148097 "==> nxge_receive_packet after dupb: "
26445165Syc148097 "rbr consumed %d "
26455165Syc148097 "pktbufsz_type %d "
26465165Syc148097 "nmp $%p rptr $%p wptr $%p "
26475165Syc148097 "buf_offset %d bzise %d l2_len %d skip_len %d",
26485165Syc148097 rx_rbr_p->rbr_consumed,
26495165Syc148097 pktbufsz_type,
26505165Syc148097 nmp, nmp->b_rptr, nmp->b_wptr,
26515165Syc148097 buf_offset, bsize, l2_len, skip_len));
26523859Sml29623 } else {
26533859Sml29623 cmn_err(CE_WARN, "!nxge_receive_packet: "
26546929Smisaki "update stats (error)");
26554977Sraghus atomic_inc_32(&rx_msg_p->ref_cnt);
26564977Sraghus if (buffer_free == B_TRUE) {
26574977Sraghus rx_msg_p->free = B_TRUE;
26584977Sraghus }
26594977Sraghus MUTEX_EXIT(&rx_rbr_p->lock);
26604977Sraghus nxge_freeb(rx_msg_p);
26614977Sraghus return;
26623859Sml29623 }
26635060Syc148097
26643859Sml29623 if (buffer_free == B_TRUE) {
26653859Sml29623 rx_msg_p->free = B_TRUE;
26663859Sml29623 }
26677145Syc148097
26683859Sml29623 is_valid = (nmp != NULL);
26695165Syc148097
26705165Syc148097 rcr_p->rcvd_pkt_bytes = bytes_read;
26715165Syc148097
26723859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock);
26733859Sml29623
26743859Sml29623 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
26753859Sml29623 atomic_inc_32(&rx_msg_p->ref_cnt);
26763859Sml29623 nxge_freeb(rx_msg_p);
26773859Sml29623 }
26783859Sml29623
26793859Sml29623 if (is_valid) {
26803859Sml29623 nmp->b_cont = NULL;
26813859Sml29623 if (first_entry) {
26823859Sml29623 *mp = nmp;
26833859Sml29623 *mp_cont = NULL;
26845165Syc148097 } else {
26853859Sml29623 *mp_cont = nmp;
26865165Syc148097 }
26873859Sml29623 }
26883859Sml29623
26893859Sml29623 /*
26907145Syc148097 * ERROR, FRAG and PKT_TYPE are only reported in the first entry.
26917145Syc148097 * If a packet is not fragmented and no error bit is set, then
26927145Syc148097 * L4 checksum is OK.
26933859Sml29623 */
26947145Syc148097
26953859Sml29623 if (is_valid && !multi) {
26966495Sspeer /*
26976611Sml29623 * If the checksum flag nxge_chksum_offload
26986611Sml29623 * is 1, TCP and UDP packets can be sent
26996495Sspeer * up with good checksum. If the checksum flag
27006611Sml29623 * is set to 0, checksum reporting will apply to
27016495Sspeer * TCP packets only (workaround for a hardware bug).
27026611Sml29623 * If the checksum flag nxge_cksum_offload is
27036611Sml29623 * greater than 1, both TCP and UDP packets
27046611Sml29623 * will not be reported its hardware checksum results.
27056495Sspeer */
27066611Sml29623 if (nxge_cksum_offload == 1) {
27076495Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
27086929Smisaki pkt_type == RCR_PKT_IS_UDP) ?
27096929Smisaki B_TRUE: B_FALSE);
27106611Sml29623 } else if (!nxge_cksum_offload) {
27116495Sspeer /* TCP checksum only. */
27126495Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ?
27136929Smisaki B_TRUE: B_FALSE);
27146495Sspeer }
27153859Sml29623
27163859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: "
27176929Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d",
27186929Smisaki is_valid, multi, is_tcp_udp, frag, error_type));
27193859Sml29623
27203859Sml29623 if (is_tcp_udp && !frag && !error_type) {
2721*11878SVenu.Iyer@Sun.COM mac_hcksum_set(nmp, 0, 0, 0, 0, HCK_FULLCKSUM_OK);
27223859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
27236929Smisaki "==> nxge_receive_packet: Full tcp/udp cksum "
27246929Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d "
27256929Smisaki "error %d",
27266929Smisaki is_valid, multi, is_tcp_udp, frag, error_type));
27273859Sml29623 }
27283859Sml29623 }
27293859Sml29623
27303859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL,
27316929Smisaki "==> nxge_receive_packet: *mp 0x%016llx", *mp));
27323859Sml29623
27333859Sml29623 *multi_p = (multi == RCR_MULTI_MASK);
27343859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: "
27356929Smisaki "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
27366929Smisaki *multi_p, nmp, *mp, *mp_cont));
27373859Sml29623 }
27383859Sml29623
27398275SEric Cheng /*
27408275SEric Cheng * Enable polling for a ring. Interrupt for the ring is disabled when
27418275SEric Cheng * the nxge interrupt comes (see nxge_rx_intr).
27428275SEric Cheng */
27438275SEric Cheng int
nxge_enable_poll(void * arg)27448275SEric Cheng nxge_enable_poll(void *arg)
27458275SEric Cheng {
27468275SEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
27478275SEric Cheng p_rx_rcr_ring_t ringp;
27488275SEric Cheng p_nxge_t nxgep;
27498275SEric Cheng p_nxge_ldg_t ldgp;
27508275SEric Cheng uint32_t channel;
27518275SEric Cheng
27528275SEric Cheng if (ring_handle == NULL) {
275310309SSriharsha.Basavapatna@Sun.COM ASSERT(ring_handle != NULL);
27548275SEric Cheng return (0);
27558275SEric Cheng }
27568275SEric Cheng
27578275SEric Cheng nxgep = ring_handle->nxgep;
27588275SEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
27598275SEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
27608275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
27618275SEric Cheng "==> nxge_enable_poll: rdc %d ", ringp->rdc));
27628275SEric Cheng ldgp = ringp->ldgp;
27638275SEric Cheng if (ldgp == NULL) {
27648275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
27658275SEric Cheng "==> nxge_enable_poll: rdc %d NULL ldgp: no change",
27668275SEric Cheng ringp->rdc));
27678275SEric Cheng return (0);
27688275SEric Cheng }
27698275SEric Cheng
27708275SEric Cheng MUTEX_ENTER(&ringp->lock);
27718275SEric Cheng /* enable polling */
27728275SEric Cheng if (ringp->poll_flag == 0) {
27738275SEric Cheng ringp->poll_flag = 1;
27748275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
27758275SEric Cheng "==> nxge_enable_poll: rdc %d set poll flag to 1",
27768275SEric Cheng ringp->rdc));
27778275SEric Cheng }
27788275SEric Cheng
27798275SEric Cheng MUTEX_EXIT(&ringp->lock);
27808275SEric Cheng return (0);
27818275SEric Cheng }
27828275SEric Cheng /*
27838275SEric Cheng * Disable polling for a ring and enable its interrupt.
27848275SEric Cheng */
27858275SEric Cheng int
nxge_disable_poll(void * arg)27868275SEric Cheng nxge_disable_poll(void *arg)
27878275SEric Cheng {
27888275SEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
27898275SEric Cheng p_rx_rcr_ring_t ringp;
27908275SEric Cheng p_nxge_t nxgep;
27918275SEric Cheng uint32_t channel;
27928275SEric Cheng
27938275SEric Cheng if (ring_handle == NULL) {
279410309SSriharsha.Basavapatna@Sun.COM ASSERT(ring_handle != NULL);
27958275SEric Cheng return (0);
27968275SEric Cheng }
27978275SEric Cheng
27988275SEric Cheng nxgep = ring_handle->nxgep;
27998275SEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
28008275SEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel];
28018275SEric Cheng
28028275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
28038275SEric Cheng "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc));
28048275SEric Cheng
28058275SEric Cheng MUTEX_ENTER(&ringp->lock);
28068275SEric Cheng
28078275SEric Cheng /* disable polling: enable interrupt */
28088275SEric Cheng if (ringp->poll_flag) {
28098275SEric Cheng npi_handle_t handle;
28108275SEric Cheng rx_dma_ctl_stat_t cs;
28118275SEric Cheng uint8_t channel;
28128275SEric Cheng p_nxge_ldg_t ldgp;
28138275SEric Cheng
28148275SEric Cheng /*
28158275SEric Cheng * Get the control and status for this channel.
28168275SEric Cheng */
28178275SEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep);
28188275SEric Cheng channel = ringp->rdc;
28198275SEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG,
28208275SEric Cheng channel, &cs.value);
28218275SEric Cheng
28228275SEric Cheng /*
28238275SEric Cheng * Enable mailbox update
28248275SEric Cheng * Since packets were not read and the hardware uses
28258275SEric Cheng * bits pktread and ptrread to update the queue
28268275SEric Cheng * length, we need to set both bits to 0.
28278275SEric Cheng */
28288275SEric Cheng cs.bits.ldw.pktread = 0;
28298275SEric Cheng cs.bits.ldw.ptrread = 0;
28308275SEric Cheng cs.bits.hdw.mex = 1;
28318275SEric Cheng RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
28328275SEric Cheng cs.value);
28338275SEric Cheng
28348275SEric Cheng /*
28358275SEric Cheng * Rearm this logical group if this is a single device
28368275SEric Cheng * group.
28378275SEric Cheng */
28388275SEric Cheng ldgp = ringp->ldgp;
28398275SEric Cheng if (ldgp == NULL) {
28408275SEric Cheng ringp->poll_flag = 0;
28418275SEric Cheng MUTEX_EXIT(&ringp->lock);
28428275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
28438275SEric Cheng "==> nxge_disable_poll: no ldgp rdc %d "
28448275SEric Cheng "(still set poll to 0", ringp->rdc));
28458275SEric Cheng return (0);
28468275SEric Cheng }
28478275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
28488275SEric Cheng "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)",
28498275SEric Cheng ringp->rdc, ldgp));
28508275SEric Cheng if (ldgp->nldvs == 1) {
285110309SSriharsha.Basavapatna@Sun.COM if (isLDOMguest(nxgep)) {
285210309SSriharsha.Basavapatna@Sun.COM ldgp->arm = B_TRUE;
285310309SSriharsha.Basavapatna@Sun.COM nxge_hio_ldgimgn(nxgep, ldgp);
285410309SSriharsha.Basavapatna@Sun.COM } else {
285510309SSriharsha.Basavapatna@Sun.COM ldgimgm_t mgm;
285610309SSriharsha.Basavapatna@Sun.COM mgm.value = 0;
285710309SSriharsha.Basavapatna@Sun.COM mgm.bits.ldw.arm = 1;
285810309SSriharsha.Basavapatna@Sun.COM mgm.bits.ldw.timer = ldgp->ldg_timer;
285910309SSriharsha.Basavapatna@Sun.COM NXGE_REG_WR64(handle,
286010309SSriharsha.Basavapatna@Sun.COM LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg),
286110309SSriharsha.Basavapatna@Sun.COM mgm.value);
286210309SSriharsha.Basavapatna@Sun.COM }
28638275SEric Cheng }
28648275SEric Cheng ringp->poll_flag = 0;
28658275SEric Cheng }
28668275SEric Cheng
28678275SEric Cheng MUTEX_EXIT(&ringp->lock);
28688275SEric Cheng return (0);
28698275SEric Cheng }
28708275SEric Cheng
28718275SEric Cheng /*
28728275SEric Cheng * Poll 'bytes_to_pickup' bytes of message from the rx ring.
28738275SEric Cheng */
28748275SEric Cheng mblk_t *
nxge_rx_poll(void * arg,int bytes_to_pickup)28758275SEric Cheng nxge_rx_poll(void *arg, int bytes_to_pickup)
28768275SEric Cheng {
28778275SEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg;
28788275SEric Cheng p_rx_rcr_ring_t rcr_p;
28798275SEric Cheng p_nxge_t nxgep;
28808275SEric Cheng npi_handle_t handle;
28818275SEric Cheng rx_dma_ctl_stat_t cs;
28828275SEric Cheng mblk_t *mblk;
28838275SEric Cheng p_nxge_ldv_t ldvp;
28848275SEric Cheng uint32_t channel;
28858275SEric Cheng
28868275SEric Cheng nxgep = ring_handle->nxgep;
28878275SEric Cheng
28888275SEric Cheng /*
28898275SEric Cheng * Get the control and status for this channel.
28908275SEric Cheng */
28918275SEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep);
28928275SEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index;
28938275SEric Cheng rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel];
28948275SEric Cheng MUTEX_ENTER(&rcr_p->lock);
28958275SEric Cheng ASSERT(rcr_p->poll_flag == 1);
28968275SEric Cheng
28978275SEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value);
28988275SEric Cheng
28998275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
29008275SEric Cheng "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d",
29018275SEric Cheng rcr_p->rdc, rcr_p->poll_flag));
29028275SEric Cheng mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup);
29038275SEric Cheng
29048275SEric Cheng ldvp = rcr_p->ldvp;
29058275SEric Cheng /* error events. */
29068275SEric Cheng if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) {
29078275SEric Cheng (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs);
29088275SEric Cheng }
29098275SEric Cheng
29108275SEric Cheng MUTEX_EXIT(&rcr_p->lock);
29118275SEric Cheng
29128275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL,
29138275SEric Cheng "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk));
29148275SEric Cheng return (mblk);
29158275SEric Cheng }
29168275SEric Cheng
29178275SEric Cheng
29183859Sml29623 /*ARGSUSED*/
29193859Sml29623 static nxge_status_t
nxge_rx_err_evnts(p_nxge_t nxgep,int channel,rx_dma_ctl_stat_t cs)29206495Sspeer nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs)
29213859Sml29623 {
29223859Sml29623 p_nxge_rx_ring_stats_t rdc_stats;
29233859Sml29623 npi_handle_t handle;
29243859Sml29623 npi_status_t rs;
29253859Sml29623 boolean_t rxchan_fatal = B_FALSE;
29263859Sml29623 boolean_t rxport_fatal = B_FALSE;
29273859Sml29623 uint8_t portn;
29283859Sml29623 nxge_status_t status = NXGE_OK;
29293859Sml29623 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX;
29303859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts"));
29313859Sml29623
29323859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
29333859Sml29623 portn = nxgep->mac.portnum;
29346495Sspeer rdc_stats = &nxgep->statsp->rdc_stats[channel];
29353859Sml29623
29363859Sml29623 if (cs.bits.hdw.rbr_tmout) {
29373859Sml29623 rdc_stats->rx_rbr_tmout++;
29383859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
29396929Smisaki NXGE_FM_EREPORT_RDMC_RBR_TMOUT);
29403859Sml29623 rxchan_fatal = B_TRUE;
29413859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29426929Smisaki "==> nxge_rx_err_evnts: rx_rbr_timeout"));
29433859Sml29623 }
29443859Sml29623 if (cs.bits.hdw.rsp_cnt_err) {
29453859Sml29623 rdc_stats->rsp_cnt_err++;
29463859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
29476929Smisaki NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR);
29483859Sml29623 rxchan_fatal = B_TRUE;
29493859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29506929Smisaki "==> nxge_rx_err_evnts(channel %d): "
29516929Smisaki "rsp_cnt_err", channel));
29523859Sml29623 }
29533859Sml29623 if (cs.bits.hdw.byte_en_bus) {
29543859Sml29623 rdc_stats->byte_en_bus++;
29553859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
29566929Smisaki NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS);
29573859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29586929Smisaki "==> nxge_rx_err_evnts(channel %d): "
29596929Smisaki "fatal error: byte_en_bus", channel));
29603859Sml29623 rxchan_fatal = B_TRUE;
29613859Sml29623 }
29623859Sml29623 if (cs.bits.hdw.rsp_dat_err) {
29633859Sml29623 rdc_stats->rsp_dat_err++;
29643859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
29656929Smisaki NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR);
29663859Sml29623 rxchan_fatal = B_TRUE;
29673859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29686929Smisaki "==> nxge_rx_err_evnts(channel %d): "
29696929Smisaki "fatal error: rsp_dat_err", channel));
29703859Sml29623 }
29713859Sml29623 if (cs.bits.hdw.rcr_ack_err) {
29723859Sml29623 rdc_stats->rcr_ack_err++;
29733859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
29746929Smisaki NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR);
29753859Sml29623 rxchan_fatal = B_TRUE;
29763859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29776929Smisaki "==> nxge_rx_err_evnts(channel %d): "
29786929Smisaki "fatal error: rcr_ack_err", channel));
29793859Sml29623 }
29803859Sml29623 if (cs.bits.hdw.dc_fifo_err) {
29813859Sml29623 rdc_stats->dc_fifo_err++;
29823859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
29836929Smisaki NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR);
29843859Sml29623 /* This is not a fatal error! */
29853859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29866929Smisaki "==> nxge_rx_err_evnts(channel %d): "
29876929Smisaki "dc_fifo_err", channel));
29883859Sml29623 rxport_fatal = B_TRUE;
29893859Sml29623 }
29903859Sml29623 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) {
29913859Sml29623 if ((rs = npi_rxdma_ring_perr_stat_get(handle,
29926929Smisaki &rdc_stats->errlog.pre_par,
29936929Smisaki &rdc_stats->errlog.sha_par))
29946929Smisaki != NPI_SUCCESS) {
29953859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
29966929Smisaki "==> nxge_rx_err_evnts(channel %d): "
29976929Smisaki "rcr_sha_par: get perr", channel));
29983859Sml29623 return (NXGE_ERROR | rs);
29993859Sml29623 }
30003859Sml29623 if (cs.bits.hdw.rcr_sha_par) {
30013859Sml29623 rdc_stats->rcr_sha_par++;
30023859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
30036929Smisaki NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
30043859Sml29623 rxchan_fatal = B_TRUE;
30053859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30066929Smisaki "==> nxge_rx_err_evnts(channel %d): "
30076929Smisaki "fatal error: rcr_sha_par", channel));
30083859Sml29623 }
30093859Sml29623 if (cs.bits.hdw.rbr_pre_par) {
30103859Sml29623 rdc_stats->rbr_pre_par++;
30113859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
30126929Smisaki NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
30133859Sml29623 rxchan_fatal = B_TRUE;
30143859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30156929Smisaki "==> nxge_rx_err_evnts(channel %d): "
30166929Smisaki "fatal error: rbr_pre_par", channel));
30173859Sml29623 }
30183859Sml29623 }
30196172Syc148097 /*
30206172Syc148097 * The Following 4 status bits are for information, the system
30216172Syc148097 * is running fine. There is no need to send FMA ereports or
30226172Syc148097 * log messages.
30236172Syc148097 */
30243859Sml29623 if (cs.bits.hdw.port_drop_pkt) {
30253859Sml29623 rdc_stats->port_drop_pkt++;
30263859Sml29623 }
30273859Sml29623 if (cs.bits.hdw.wred_drop) {
30283859Sml29623 rdc_stats->wred_drop++;
30293859Sml29623 }
30303859Sml29623 if (cs.bits.hdw.rbr_pre_empty) {
30313859Sml29623 rdc_stats->rbr_pre_empty++;
30323859Sml29623 }
30333859Sml29623 if (cs.bits.hdw.rcr_shadow_full) {
30343859Sml29623 rdc_stats->rcr_shadow_full++;
30353859Sml29623 }
30363859Sml29623 if (cs.bits.hdw.config_err) {
30373859Sml29623 rdc_stats->config_err++;
30383859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
30396929Smisaki NXGE_FM_EREPORT_RDMC_CONFIG_ERR);
30403859Sml29623 rxchan_fatal = B_TRUE;
30413859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30426929Smisaki "==> nxge_rx_err_evnts(channel %d): "
30436929Smisaki "config error", channel));
30443859Sml29623 }
30453859Sml29623 if (cs.bits.hdw.rcrincon) {
30463859Sml29623 rdc_stats->rcrincon++;
30473859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
30486929Smisaki NXGE_FM_EREPORT_RDMC_RCRINCON);
30493859Sml29623 rxchan_fatal = B_TRUE;
30503859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30516929Smisaki "==> nxge_rx_err_evnts(channel %d): "
30526929Smisaki "fatal error: rcrincon error", channel));
30533859Sml29623 }
30543859Sml29623 if (cs.bits.hdw.rcrfull) {
30553859Sml29623 rdc_stats->rcrfull++;
30563859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
30576929Smisaki NXGE_FM_EREPORT_RDMC_RCRFULL);
30583859Sml29623 rxchan_fatal = B_TRUE;
30593859Sml29623 if (rdc_stats->rcrfull < error_disp_cnt)
30603859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30616929Smisaki "==> nxge_rx_err_evnts(channel %d): "
30626929Smisaki "fatal error: rcrfull error", channel));
30633859Sml29623 }
30643859Sml29623 if (cs.bits.hdw.rbr_empty) {
30656172Syc148097 /*
30666172Syc148097 * This bit is for information, there is no need
30676172Syc148097 * send FMA ereport or log a message.
30686172Syc148097 */
30693859Sml29623 rdc_stats->rbr_empty++;
30703859Sml29623 }
30713859Sml29623 if (cs.bits.hdw.rbrfull) {
30723859Sml29623 rdc_stats->rbrfull++;
30733859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
30746929Smisaki NXGE_FM_EREPORT_RDMC_RBRFULL);
30753859Sml29623 rxchan_fatal = B_TRUE;
30763859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30776929Smisaki "==> nxge_rx_err_evnts(channel %d): "
30786929Smisaki "fatal error: rbr_full error", channel));
30793859Sml29623 }
30803859Sml29623 if (cs.bits.hdw.rbrlogpage) {
30813859Sml29623 rdc_stats->rbrlogpage++;
30823859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
30836929Smisaki NXGE_FM_EREPORT_RDMC_RBRLOGPAGE);
30843859Sml29623 rxchan_fatal = B_TRUE;
30853859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30866929Smisaki "==> nxge_rx_err_evnts(channel %d): "
30876929Smisaki "fatal error: rbr logical page error", channel));
30883859Sml29623 }
30893859Sml29623 if (cs.bits.hdw.cfiglogpage) {
30903859Sml29623 rdc_stats->cfiglogpage++;
30913859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel,
30926929Smisaki NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE);
30933859Sml29623 rxchan_fatal = B_TRUE;
30943859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
30956929Smisaki "==> nxge_rx_err_evnts(channel %d): "
30966929Smisaki "fatal error: cfig logical page error", channel));
30973859Sml29623 }
30983859Sml29623
30993859Sml29623 if (rxport_fatal) {
31003859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31016495Sspeer " nxge_rx_err_evnts: fatal error on Port #%d\n",
31026495Sspeer portn));
31036495Sspeer if (isLDOMguest(nxgep)) {
31046495Sspeer status = NXGE_ERROR;
31056495Sspeer } else {
31066495Sspeer status = nxge_ipp_fatal_err_recover(nxgep);
31076495Sspeer if (status == NXGE_OK) {
31086495Sspeer FM_SERVICE_RESTORED(nxgep);
31096495Sspeer }
31103859Sml29623 }
31113859Sml29623 }
31123859Sml29623
31133859Sml29623 if (rxchan_fatal) {
31143859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
31156495Sspeer " nxge_rx_err_evnts: fatal error on Channel #%d\n",
31166495Sspeer channel));
31176495Sspeer if (isLDOMguest(nxgep)) {
31186495Sspeer status = NXGE_ERROR;
31196495Sspeer } else {
31206495Sspeer status = nxge_rxdma_fatal_err_recover(nxgep, channel);
31216495Sspeer if (status == NXGE_OK) {
31226495Sspeer FM_SERVICE_RESTORED(nxgep);
31236495Sspeer }
31243859Sml29623 }
31253859Sml29623 }
31263859Sml29623
31273859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts"));
31283859Sml29623
31293859Sml29623 return (status);
31303859Sml29623 }
31313859Sml29623
31326495Sspeer /*
31336495Sspeer * nxge_rdc_hvio_setup
31346495Sspeer *
31356495Sspeer * This code appears to setup some Hypervisor variables.
31366495Sspeer *
31376495Sspeer * Arguments:
31386495Sspeer * nxgep
31396495Sspeer * channel
31406495Sspeer *
31416495Sspeer * Notes:
31426495Sspeer * What does NIU_LP_WORKAROUND mean?
31436495Sspeer *
31446495Sspeer * NPI/NXGE function calls:
31456495Sspeer * na
31466495Sspeer *
31476495Sspeer * Context:
31486495Sspeer * Any domain
31496495Sspeer */
31506495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
31516495Sspeer static void
nxge_rdc_hvio_setup(nxge_t * nxgep,int channel)31526495Sspeer nxge_rdc_hvio_setup(
31536495Sspeer nxge_t *nxgep, int channel)
31543859Sml29623 {
31556495Sspeer nxge_dma_common_t *dma_common;
31566495Sspeer nxge_dma_common_t *dma_control;
31576495Sspeer rx_rbr_ring_t *ring;
31586495Sspeer
31596495Sspeer ring = nxgep->rx_rbr_rings->rbr_rings[channel];
31606495Sspeer dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
31616495Sspeer
31626495Sspeer ring->hv_set = B_FALSE;
31636495Sspeer
31646495Sspeer ring->hv_rx_buf_base_ioaddr_pp = (uint64_t)
31656495Sspeer dma_common->orig_ioaddr_pp;
31666495Sspeer ring->hv_rx_buf_ioaddr_size = (uint64_t)
31676495Sspeer dma_common->orig_alength;
31686495Sspeer
31696495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
31706495Sspeer "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)",
31716495Sspeer channel, ring->hv_rx_buf_base_ioaddr_pp,
31726495Sspeer dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size,
31736495Sspeer dma_common->orig_alength, dma_common->orig_alength));
31746495Sspeer
31756495Sspeer dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
31766495Sspeer
31776495Sspeer ring->hv_rx_cntl_base_ioaddr_pp =
31786495Sspeer (uint64_t)dma_control->orig_ioaddr_pp;
31796495Sspeer ring->hv_rx_cntl_ioaddr_size =
31806495Sspeer (uint64_t)dma_control->orig_alength;
31816495Sspeer
31826495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: "
31836495Sspeer "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)",
31846495Sspeer channel, ring->hv_rx_cntl_base_ioaddr_pp,
31856495Sspeer dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size,
31866495Sspeer dma_control->orig_alength, dma_control->orig_alength));
31876495Sspeer }
31883859Sml29623 #endif
31893859Sml29623
31906495Sspeer /*
31916495Sspeer * nxge_map_rxdma
31926495Sspeer *
31936495Sspeer * Map an RDC into our kernel space.
31946495Sspeer *
31956495Sspeer * Arguments:
31966495Sspeer * nxgep
31976495Sspeer * channel The channel to map.
31986495Sspeer *
31996495Sspeer * Notes:
32006495Sspeer * 1. Allocate & initialise a memory pool, if necessary.
32016495Sspeer * 2. Allocate however many receive buffers are required.
32026495Sspeer * 3. Setup buffers, descriptors, and mailbox.
32036495Sspeer *
32046495Sspeer * NPI/NXGE function calls:
32056495Sspeer * nxge_alloc_rx_mem_pool()
32066495Sspeer * nxge_alloc_rbb()
32076495Sspeer * nxge_map_rxdma_channel()
32086495Sspeer *
32096495Sspeer * Registers accessed:
32106495Sspeer *
32116495Sspeer * Context:
32126495Sspeer * Any domain
32136495Sspeer */
32146495Sspeer static nxge_status_t
nxge_map_rxdma(p_nxge_t nxgep,int channel)32156495Sspeer nxge_map_rxdma(p_nxge_t nxgep, int channel)
32166495Sspeer {
32176495Sspeer nxge_dma_common_t **data;
32186495Sspeer nxge_dma_common_t **control;
32196495Sspeer rx_rbr_ring_t **rbr_ring;
32206495Sspeer rx_rcr_ring_t **rcr_ring;
32216495Sspeer rx_mbox_t **mailbox;
32226495Sspeer uint32_t chunks;
32236495Sspeer
32246495Sspeer nxge_status_t status;
32256495Sspeer
32263859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma"));
32273859Sml29623
32286495Sspeer if (!nxgep->rx_buf_pool_p) {
32296495Sspeer if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) {
32306495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32316495Sspeer "<== nxge_map_rxdma: buf not allocated"));
32326495Sspeer return (NXGE_ERROR);
32336495Sspeer }
32343859Sml29623 }
32353859Sml29623
32366495Sspeer if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK)
32376495Sspeer return (NXGE_ERROR);
32383859Sml29623
32393859Sml29623 /*
32406495Sspeer * Map descriptors from the buffer polls for each dma channel.
32416495Sspeer */
32426495Sspeer
32436495Sspeer /*
32446495Sspeer * Set up and prepare buffer blocks, descriptors
32456495Sspeer * and mailbox.
32463859Sml29623 */
32476495Sspeer data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel];
32486495Sspeer rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel];
32496495Sspeer chunks = nxgep->rx_buf_pool_p->num_chunks[channel];
32506495Sspeer
32516495Sspeer control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel];
32526495Sspeer rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel];
32536495Sspeer
32546495Sspeer mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
32556495Sspeer
32566495Sspeer status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring,
32576495Sspeer chunks, control, rcr_ring, mailbox);
32586495Sspeer if (status != NXGE_OK) {
32596495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
32606929Smisaki "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) "
32616929Smisaki "returned 0x%x",
32626929Smisaki channel, status));
32636495Sspeer return (status);
32646495Sspeer }
32656495Sspeer nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel;
32666495Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel;
32676495Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats =
32686495Sspeer &nxgep->statsp->rdc_stats[channel];
32693859Sml29623
32703859Sml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
32716495Sspeer if (!isLDOMguest(nxgep))
32726495Sspeer nxge_rdc_hvio_setup(nxgep, channel);
32736495Sspeer #endif
32746495Sspeer
32753859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
32766495Sspeer "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel));
32773859Sml29623
32783859Sml29623 return (status);
32793859Sml29623 }
32803859Sml29623
32813859Sml29623 static void
nxge_unmap_rxdma(p_nxge_t nxgep,int channel)32826495Sspeer nxge_unmap_rxdma(p_nxge_t nxgep, int channel)
32833859Sml29623 {
32846495Sspeer rx_rbr_ring_t *rbr_ring;
32856495Sspeer rx_rcr_ring_t *rcr_ring;
32866495Sspeer rx_mbox_t *mailbox;
32876495Sspeer
32886495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel));
32896495Sspeer
32906495Sspeer if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings ||
32916495Sspeer !nxgep->rx_mbox_areas_p)
32923859Sml29623 return;
32936495Sspeer
32946495Sspeer rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel];
32956495Sspeer rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel];
32966495Sspeer mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
32976495Sspeer
32986495Sspeer if (!rbr_ring || !rcr_ring || !mailbox)
32993859Sml29623 return;
33006495Sspeer
33016495Sspeer (void) nxge_unmap_rxdma_channel(
33026929Smisaki nxgep, channel, rbr_ring, rcr_ring, mailbox);
33036495Sspeer
33046495Sspeer nxge_free_rxb(nxgep, channel);
33056495Sspeer
33066495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma"));
33073859Sml29623 }
33083859Sml29623
33093859Sml29623 nxge_status_t
nxge_map_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_rx_rbr_ring_t * rbr_p,uint32_t num_chunks,p_nxge_dma_common_t * dma_cntl_p,p_rx_rcr_ring_t * rcr_p,p_rx_mbox_t * rx_mbox_p)33103859Sml29623 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
33113859Sml29623 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
33123859Sml29623 uint32_t num_chunks,
33133859Sml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p,
33143859Sml29623 p_rx_mbox_t *rx_mbox_p)
33153859Sml29623 {
33163859Sml29623 int status = NXGE_OK;
33173859Sml29623
33183859Sml29623 /*
33193859Sml29623 * Set up and prepare buffer blocks, descriptors
33203859Sml29623 * and mailbox.
33213859Sml29623 */
33223859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
33236929Smisaki "==> nxge_map_rxdma_channel (channel %d)", channel));
33243859Sml29623 /*
33253859Sml29623 * Receive buffer blocks
33263859Sml29623 */
33273859Sml29623 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel,
33286929Smisaki dma_buf_p, rbr_p, num_chunks);
33293859Sml29623 if (status != NXGE_OK) {
33303859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
33316929Smisaki "==> nxge_map_rxdma_channel (channel %d): "
33326929Smisaki "map buffer failed 0x%x", channel, status));
33333859Sml29623 goto nxge_map_rxdma_channel_exit;
33343859Sml29623 }
33353859Sml29623
33363859Sml29623 /*
33373859Sml29623 * Receive block ring, completion ring and mailbox.
33383859Sml29623 */
33393859Sml29623 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel,
33406929Smisaki dma_cntl_p, rbr_p, rcr_p, rx_mbox_p);
33413859Sml29623 if (status != NXGE_OK) {
33423859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
33436929Smisaki "==> nxge_map_rxdma_channel (channel %d): "
33446929Smisaki "map config failed 0x%x", channel, status));
33453859Sml29623 goto nxge_map_rxdma_channel_fail2;
33463859Sml29623 }
33473859Sml29623
33483859Sml29623 goto nxge_map_rxdma_channel_exit;
33493859Sml29623
33503859Sml29623 nxge_map_rxdma_channel_fail3:
33513859Sml29623 /* Free rbr, rcr */
33523859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
33536929Smisaki "==> nxge_map_rxdma_channel: free rbr/rcr "
33546929Smisaki "(status 0x%x channel %d)",
33556929Smisaki status, channel));
33563859Sml29623 nxge_unmap_rxdma_channel_cfg_ring(nxgep,
33576929Smisaki *rcr_p, *rx_mbox_p);
33583859Sml29623
33593859Sml29623 nxge_map_rxdma_channel_fail2:
33603859Sml29623 /* Free buffer blocks */
33613859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
33626929Smisaki "==> nxge_map_rxdma_channel: free rx buffers"
33636929Smisaki "(nxgep 0x%x status 0x%x channel %d)",
33646929Smisaki nxgep, status, channel));
33653859Sml29623 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p);
33663859Sml29623
33674185Sspeer status = NXGE_ERROR;
33684185Sspeer
33693859Sml29623 nxge_map_rxdma_channel_exit:
33703859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
33716929Smisaki "<== nxge_map_rxdma_channel: "
33726929Smisaki "(nxgep 0x%x status 0x%x channel %d)",
33736929Smisaki nxgep, status, channel));
33743859Sml29623
33753859Sml29623 return (status);
33763859Sml29623 }
33773859Sml29623
33783859Sml29623 /*ARGSUSED*/
33793859Sml29623 static void
nxge_unmap_rxdma_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t rx_mbox_p)33803859Sml29623 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel,
33813859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
33823859Sml29623 {
33833859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
33846929Smisaki "==> nxge_unmap_rxdma_channel (channel %d)", channel));
33853859Sml29623
33863859Sml29623 /*
33873859Sml29623 * unmap receive block ring, completion ring and mailbox.
33883859Sml29623 */
33893859Sml29623 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep,
33906929Smisaki rcr_p, rx_mbox_p);
33913859Sml29623
33923859Sml29623 /* unmap buffer blocks */
33933859Sml29623 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p);
33943859Sml29623
33953859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel"));
33963859Sml29623 }
33973859Sml29623
33983859Sml29623 /*ARGSUSED*/
33993859Sml29623 static nxge_status_t
nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep,uint16_t dma_channel,p_nxge_dma_common_t * dma_cntl_p,p_rx_rbr_ring_t * rbr_p,p_rx_rcr_ring_t * rcr_p,p_rx_mbox_t * rx_mbox_p)34003859Sml29623 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel,
34013859Sml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p,
34023859Sml29623 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
34033859Sml29623 {
34043859Sml29623 p_rx_rbr_ring_t rbrp;
34053859Sml29623 p_rx_rcr_ring_t rcrp;
34063859Sml29623 p_rx_mbox_t mboxp;
34073859Sml29623 p_nxge_dma_common_t cntl_dmap;
34083859Sml29623 p_nxge_dma_common_t dmap;
34093859Sml29623 p_rx_msg_t *rx_msg_ring;
34103859Sml29623 p_rx_msg_t rx_msg_p;
34113859Sml29623 p_rbr_cfig_a_t rcfga_p;
34123859Sml29623 p_rbr_cfig_b_t rcfgb_p;
34133859Sml29623 p_rcrcfig_a_t cfga_p;
34143859Sml29623 p_rcrcfig_b_t cfgb_p;
34153859Sml29623 p_rxdma_cfig1_t cfig1_p;
34163859Sml29623 p_rxdma_cfig2_t cfig2_p;
34173859Sml29623 p_rbr_kick_t kick_p;
34183859Sml29623 uint32_t dmaaddrp;
34193859Sml29623 uint32_t *rbr_vaddrp;
34203859Sml29623 uint32_t bkaddr;
34213859Sml29623 nxge_status_t status = NXGE_OK;
34223859Sml29623 int i;
34233859Sml29623 uint32_t nxge_port_rcr_size;
34243859Sml29623
34253859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
34266929Smisaki "==> nxge_map_rxdma_channel_cfg_ring"));
34273859Sml29623
34283859Sml29623 cntl_dmap = *dma_cntl_p;
34293859Sml29623
34303859Sml29623 /* Map in the receive block ring */
34313859Sml29623 rbrp = *rbr_p;
34323859Sml29623 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc;
34333859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
34343859Sml29623 /*
34353859Sml29623 * Zero out buffer block ring descriptors.
34363859Sml29623 */
34373859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength);
34383859Sml29623
34393859Sml29623 rcfga_p = &(rbrp->rbr_cfga);
34403859Sml29623 rcfgb_p = &(rbrp->rbr_cfgb);
34413859Sml29623 kick_p = &(rbrp->rbr_kick);
34423859Sml29623 rcfga_p->value = 0;
34433859Sml29623 rcfgb_p->value = 0;
34443859Sml29623 kick_p->value = 0;
34453859Sml29623 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
34463859Sml29623 rcfga_p->value = (rbrp->rbr_addr &
34476929Smisaki (RBR_CFIG_A_STDADDR_MASK |
34486929Smisaki RBR_CFIG_A_STDADDR_BASE_MASK));
34493859Sml29623 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
34503859Sml29623
34513859Sml29623 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0;
34523859Sml29623 rcfgb_p->bits.ldw.vld0 = 1;
34533859Sml29623 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1;
34543859Sml29623 rcfgb_p->bits.ldw.vld1 = 1;
34553859Sml29623 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2;
34563859Sml29623 rcfgb_p->bits.ldw.vld2 = 1;
34573859Sml29623 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code;
34583859Sml29623
34593859Sml29623 /*
34603859Sml29623 * For each buffer block, enter receive block address to the ring.
34613859Sml29623 */
34623859Sml29623 rbr_vaddrp = (uint32_t *)dmap->kaddrp;
34633859Sml29623 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
34643859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
34656929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: channel %d "
34666929Smisaki "rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
34673859Sml29623
34683859Sml29623 rx_msg_ring = rbrp->rx_msg_ring;
34693859Sml29623 for (i = 0; i < rbrp->tnblocks; i++) {
34703859Sml29623 rx_msg_p = rx_msg_ring[i];
34713859Sml29623 rx_msg_p->nxgep = nxgep;
34723859Sml29623 rx_msg_p->rx_rbr_p = rbrp;
34733859Sml29623 bkaddr = (uint32_t)
34746929Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress
34756929Smisaki >> RBR_BKADDR_SHIFT));
34763859Sml29623 rx_msg_p->free = B_FALSE;
34773859Sml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe;
34783859Sml29623
34793859Sml29623 *rbr_vaddrp++ = bkaddr;
34803859Sml29623 }
34813859Sml29623
34823859Sml29623 kick_p->bits.ldw.bkadd = rbrp->rbb_max;
34833859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
34843859Sml29623
34853859Sml29623 rbrp->rbr_rd_index = 0;
34863859Sml29623
34873859Sml29623 rbrp->rbr_consumed = 0;
34883859Sml29623 rbrp->rbr_use_bcopy = B_TRUE;
34893859Sml29623 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
34903859Sml29623 /*
34913859Sml29623 * Do bcopy on packets greater than bcopy size once
34923859Sml29623 * the lo threshold is reached.
34933859Sml29623 * This lo threshold should be less than the hi threshold.
34943859Sml29623 *
34953859Sml29623 * Do bcopy on every packet once the hi threshold is reached.
34963859Sml29623 */
34973859Sml29623 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) {
34983859Sml29623 /* default it to use hi */
34993859Sml29623 nxge_rx_threshold_lo = nxge_rx_threshold_hi;
35003859Sml29623 }
35013859Sml29623
35023859Sml29623 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) {
35033859Sml29623 nxge_rx_buf_size_type = NXGE_RBR_TYPE2;
35043859Sml29623 }
35053859Sml29623 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type;
35063859Sml29623
35073859Sml29623 switch (nxge_rx_threshold_hi) {
35083859Sml29623 default:
35093859Sml29623 case NXGE_RX_COPY_NONE:
35103859Sml29623 /* Do not do bcopy at all */
35113859Sml29623 rbrp->rbr_use_bcopy = B_FALSE;
35123859Sml29623 rbrp->rbr_threshold_hi = rbrp->rbb_max;
35133859Sml29623 break;
35143859Sml29623
35153859Sml29623 case NXGE_RX_COPY_1:
35163859Sml29623 case NXGE_RX_COPY_2:
35173859Sml29623 case NXGE_RX_COPY_3:
35183859Sml29623 case NXGE_RX_COPY_4:
35193859Sml29623 case NXGE_RX_COPY_5:
35203859Sml29623 case NXGE_RX_COPY_6:
35213859Sml29623 case NXGE_RX_COPY_7:
35223859Sml29623 rbrp->rbr_threshold_hi =
35236929Smisaki rbrp->rbb_max *
35246929Smisaki (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE;
35253859Sml29623 break;
35263859Sml29623
35273859Sml29623 case NXGE_RX_COPY_ALL:
35283859Sml29623 rbrp->rbr_threshold_hi = 0;
35293859Sml29623 break;
35303859Sml29623 }
35313859Sml29623
35323859Sml29623 switch (nxge_rx_threshold_lo) {
35333859Sml29623 default:
35343859Sml29623 case NXGE_RX_COPY_NONE:
35353859Sml29623 /* Do not do bcopy at all */
35363859Sml29623 if (rbrp->rbr_use_bcopy) {
35373859Sml29623 rbrp->rbr_use_bcopy = B_FALSE;
35383859Sml29623 }
35393859Sml29623 rbrp->rbr_threshold_lo = rbrp->rbb_max;
35403859Sml29623 break;
35413859Sml29623
35423859Sml29623 case NXGE_RX_COPY_1:
35433859Sml29623 case NXGE_RX_COPY_2:
35443859Sml29623 case NXGE_RX_COPY_3:
35453859Sml29623 case NXGE_RX_COPY_4:
35463859Sml29623 case NXGE_RX_COPY_5:
35473859Sml29623 case NXGE_RX_COPY_6:
35483859Sml29623 case NXGE_RX_COPY_7:
35493859Sml29623 rbrp->rbr_threshold_lo =
35506929Smisaki rbrp->rbb_max *
35516929Smisaki (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE;
35523859Sml29623 break;
35533859Sml29623
35543859Sml29623 case NXGE_RX_COPY_ALL:
35553859Sml29623 rbrp->rbr_threshold_lo = 0;
35563859Sml29623 break;
35573859Sml29623 }
35583859Sml29623
35593859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
35606929Smisaki "nxge_map_rxdma_channel_cfg_ring: channel %d "
35616929Smisaki "rbb_max %d "
35626929Smisaki "rbrp->rbr_bufsize_type %d "
35636929Smisaki "rbb_threshold_hi %d "
35646929Smisaki "rbb_threshold_lo %d",
35656929Smisaki dma_channel,
35666929Smisaki rbrp->rbb_max,
35676929Smisaki rbrp->rbr_bufsize_type,
35686929Smisaki rbrp->rbr_threshold_hi,
35696929Smisaki rbrp->rbr_threshold_lo));
35703859Sml29623
35713859Sml29623 rbrp->page_valid.value = 0;
35723859Sml29623 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0;
35733859Sml29623 rbrp->page_value_1.value = rbrp->page_value_2.value = 0;
35743859Sml29623 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0;
35753859Sml29623 rbrp->page_hdl.value = 0;
35763859Sml29623
35773859Sml29623 rbrp->page_valid.bits.ldw.page0 = 1;
35783859Sml29623 rbrp->page_valid.bits.ldw.page1 = 1;
35793859Sml29623
35803859Sml29623 /* Map in the receive completion ring */
35813859Sml29623 rcrp = (p_rx_rcr_ring_t)
35826929Smisaki KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
35833859Sml29623 rcrp->rdc = dma_channel;
35843859Sml29623
35853859Sml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
35863859Sml29623 rcrp->comp_size = nxge_port_rcr_size;
35873859Sml29623 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1;
35883859Sml29623
35893859Sml29623 rcrp->max_receive_pkts = nxge_max_rx_pkts;
35903859Sml29623
35913859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
35923859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
35936929Smisaki sizeof (rcr_entry_t));
35943859Sml29623 rcrp->comp_rd_index = 0;
35953859Sml29623 rcrp->comp_wt_index = 0;
35963859Sml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
35976929Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
35985125Sjoycey #if defined(__i386)
35996929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
36006929Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
36015125Sjoycey #else
36026929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
36036929Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
36045125Sjoycey #endif
36053859Sml29623
36063859Sml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
36076929Smisaki (nxge_port_rcr_size - 1);
36083859Sml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
36096929Smisaki (nxge_port_rcr_size - 1);
36103859Sml29623
36113859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
36126929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: "
36136929Smisaki "channel %d "
36146929Smisaki "rbr_vaddrp $%p "
36156929Smisaki "rcr_desc_rd_head_p $%p "
36166929Smisaki "rcr_desc_rd_head_pp $%p "
36176929Smisaki "rcr_desc_rd_last_p $%p "
36186929Smisaki "rcr_desc_rd_last_pp $%p ",
36196929Smisaki dma_channel,
36206929Smisaki rbr_vaddrp,
36216929Smisaki rcrp->rcr_desc_rd_head_p,
36226929Smisaki rcrp->rcr_desc_rd_head_pp,
36236929Smisaki rcrp->rcr_desc_last_p,
36246929Smisaki rcrp->rcr_desc_last_pp));
36253859Sml29623
36263859Sml29623 /*
36273859Sml29623 * Zero out buffer block ring descriptors.
36283859Sml29623 */
36293859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength);
36308661SSantwona.Behera@Sun.COM
36318661SSantwona.Behera@Sun.COM rcrp->intr_timeout = (nxgep->intr_timeout <
36328661SSantwona.Behera@Sun.COM NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN :
36338661SSantwona.Behera@Sun.COM nxgep->intr_timeout;
36348661SSantwona.Behera@Sun.COM
36358661SSantwona.Behera@Sun.COM rcrp->intr_threshold = (nxgep->intr_threshold <
36368661SSantwona.Behera@Sun.COM NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN :
36378661SSantwona.Behera@Sun.COM nxgep->intr_threshold;
36388661SSantwona.Behera@Sun.COM
36393859Sml29623 rcrp->full_hdr_flag = B_FALSE;
364011304SJanie.Lu@Sun.COM
364111304SJanie.Lu@Sun.COM rcrp->sw_priv_hdr_len = nxge_rdc_buf_offset;
364211304SJanie.Lu@Sun.COM
36433859Sml29623
36443859Sml29623 cfga_p = &(rcrp->rcr_cfga);
36453859Sml29623 cfgb_p = &(rcrp->rcr_cfgb);
36463859Sml29623 cfga_p->value = 0;
36473859Sml29623 cfgb_p->value = 0;
36483859Sml29623 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
36493859Sml29623 cfga_p->value = (rcrp->rcr_addr &
36506929Smisaki (RCRCFIG_A_STADDR_MASK |
36516929Smisaki RCRCFIG_A_STADDR_BASE_MASK));
36523859Sml29623
36533859Sml29623 rcfga_p->value |= ((uint64_t)rcrp->comp_size <<
36546929Smisaki RCRCFIG_A_LEN_SHIF);
36553859Sml29623
36563859Sml29623 /*
36573859Sml29623 * Timeout should be set based on the system clock divider.
36588661SSantwona.Behera@Sun.COM * A timeout value of 1 assumes that the
36593859Sml29623 * granularity (1000) is 3 microseconds running at 300MHz.
36603859Sml29623 */
36613859Sml29623 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold;
36623859Sml29623 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout;
36633859Sml29623 cfgb_p->bits.ldw.entout = 1;
36643859Sml29623
36653859Sml29623 /* Map in the mailbox */
36663859Sml29623 mboxp = (p_rx_mbox_t)
36676929Smisaki KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
36683859Sml29623 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox;
36693859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
36703859Sml29623 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1;
36713859Sml29623 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2;
36723859Sml29623 cfig1_p->value = cfig2_p->value = 0;
36733859Sml29623
36743859Sml29623 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
36753859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
36766929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: "
36776929Smisaki "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
36786929Smisaki dma_channel, cfig1_p->value, cfig2_p->value,
36796929Smisaki mboxp->mbox_addr));
36803859Sml29623
36813859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32
36826929Smisaki & 0xfff);
36833859Sml29623 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp;
36843859Sml29623
36853859Sml29623
36863859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
36873859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
36886929Smisaki RXDMA_CFIG2_MBADDR_L_MASK);
36893859Sml29623
36903859Sml29623 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
36913859Sml29623
36923859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
36936929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: "
36946929Smisaki "channel %d damaddrp $%p "
36956929Smisaki "cfg1 0x%016llx cfig2 0x%016llx",
36966929Smisaki dma_channel, dmaaddrp,
36976929Smisaki cfig1_p->value, cfig2_p->value));
36983859Sml29623
36993859Sml29623 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag;
370011304SJanie.Lu@Sun.COM if (nxgep->niu_hw_type == NIU_HW_TYPE_RF) {
370111304SJanie.Lu@Sun.COM switch (rcrp->sw_priv_hdr_len) {
370211304SJanie.Lu@Sun.COM case SW_OFFSET_NO_OFFSET:
370311304SJanie.Lu@Sun.COM case SW_OFFSET_64:
370411304SJanie.Lu@Sun.COM case SW_OFFSET_128:
370511304SJanie.Lu@Sun.COM case SW_OFFSET_192:
370611304SJanie.Lu@Sun.COM cfig2_p->bits.ldw.offset =
370711304SJanie.Lu@Sun.COM rcrp->sw_priv_hdr_len;
370811304SJanie.Lu@Sun.COM cfig2_p->bits.ldw.offset256 = 0;
370911304SJanie.Lu@Sun.COM break;
371011304SJanie.Lu@Sun.COM case SW_OFFSET_256:
371111304SJanie.Lu@Sun.COM case SW_OFFSET_320:
371211304SJanie.Lu@Sun.COM case SW_OFFSET_384:
371311304SJanie.Lu@Sun.COM case SW_OFFSET_448:
371411304SJanie.Lu@Sun.COM cfig2_p->bits.ldw.offset =
371511304SJanie.Lu@Sun.COM rcrp->sw_priv_hdr_len & 0x3;
371611304SJanie.Lu@Sun.COM cfig2_p->bits.ldw.offset256 = 1;
371711304SJanie.Lu@Sun.COM break;
371811304SJanie.Lu@Sun.COM default:
371911304SJanie.Lu@Sun.COM cfig2_p->bits.ldw.offset = SW_OFFSET_NO_OFFSET;
372011304SJanie.Lu@Sun.COM cfig2_p->bits.ldw.offset256 = 0;
372111304SJanie.Lu@Sun.COM }
372211304SJanie.Lu@Sun.COM } else {
372311304SJanie.Lu@Sun.COM cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len;
372411304SJanie.Lu@Sun.COM }
37253859Sml29623
37263859Sml29623 rbrp->rx_rcr_p = rcrp;
37273859Sml29623 rcrp->rx_rbr_p = rbrp;
37283859Sml29623 *rcr_p = rcrp;
37293859Sml29623 *rx_mbox_p = mboxp;
37303859Sml29623
37313859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
37326929Smisaki "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
37333859Sml29623
37343859Sml29623 return (status);
37353859Sml29623 }
37363859Sml29623
37373859Sml29623 /*ARGSUSED*/
37383859Sml29623 static void
nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t rx_mbox_p)37393859Sml29623 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep,
37403859Sml29623 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
37413859Sml29623 {
37423859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
37436929Smisaki "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d",
37446929Smisaki rcr_p->rdc));
37453859Sml29623
37463859Sml29623 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
37473859Sml29623 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
37483859Sml29623
37493859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
37506929Smisaki "<== nxge_unmap_rxdma_channel_cfg_ring"));
37513859Sml29623 }
37523859Sml29623
37533859Sml29623 static nxge_status_t
nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep,uint16_t channel,p_nxge_dma_common_t * dma_buf_p,p_rx_rbr_ring_t * rbr_p,uint32_t num_chunks)37543859Sml29623 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel,
37553859Sml29623 p_nxge_dma_common_t *dma_buf_p,
37563859Sml29623 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
37573859Sml29623 {
37583859Sml29623 p_rx_rbr_ring_t rbrp;
37593859Sml29623 p_nxge_dma_common_t dma_bufp, tmp_bufp;
37603859Sml29623 p_rx_msg_t *rx_msg_ring;
37613859Sml29623 p_rx_msg_t rx_msg_p;
37623859Sml29623 p_mblk_t mblk_p;
37633859Sml29623
37643859Sml29623 rxring_info_t *ring_info;
37653859Sml29623 nxge_status_t status = NXGE_OK;
37663859Sml29623 int i, j, index;
37673859Sml29623 uint32_t size, bsize, nblocks, nmsgs;
37683859Sml29623
37693859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
37706929Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d",
37716929Smisaki channel));
37723859Sml29623
37733859Sml29623 dma_bufp = tmp_bufp = *dma_buf_p;
37743859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
37756929Smisaki " nxge_map_rxdma_channel_buf_ring: channel %d to map %d "
37766929Smisaki "chunks bufp 0x%016llx",
37776929Smisaki channel, num_chunks, dma_bufp));
37783859Sml29623
37793859Sml29623 nmsgs = 0;
37803859Sml29623 for (i = 0; i < num_chunks; i++, tmp_bufp++) {
37813859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
37826929Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d "
37836929Smisaki "bufp 0x%016llx nblocks %d nmsgs %d",
37846929Smisaki channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
37853859Sml29623 nmsgs += tmp_bufp->nblocks;
37863859Sml29623 }
37873859Sml29623 if (!nmsgs) {
37884185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
37896929Smisaki "<== nxge_map_rxdma_channel_buf_ring: channel %d "
37906929Smisaki "no msg blocks",
37916929Smisaki channel));
37923859Sml29623 status = NXGE_ERROR;
37933859Sml29623 goto nxge_map_rxdma_channel_buf_ring_exit;
37943859Sml29623 }
37953859Sml29623
37965170Stm144005 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP);
37973859Sml29623
37983859Sml29623 size = nmsgs * sizeof (p_rx_msg_t);
37993859Sml29623 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
38003859Sml29623 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
38016929Smisaki KM_SLEEP);
38023859Sml29623
38033859Sml29623 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
38046929Smisaki (void *)nxgep->interrupt_cookie);
38053859Sml29623 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
38066929Smisaki (void *)nxgep->interrupt_cookie);
38073859Sml29623 rbrp->rdc = channel;
38083859Sml29623 rbrp->num_blocks = num_chunks;
38093859Sml29623 rbrp->tnblocks = nmsgs;
38103859Sml29623 rbrp->rbb_max = nmsgs;
38113859Sml29623 rbrp->rbr_max_size = nmsgs;
38123859Sml29623 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
38133859Sml29623
38143859Sml29623 /*
38153859Sml29623 * Buffer sizes suggested by NIU architect.
38163859Sml29623 * 256, 512 and 2K.
38173859Sml29623 */
38183859Sml29623
38193859Sml29623 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
38203859Sml29623 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
38213859Sml29623 rbrp->npi_pkt_buf_size0 = SIZE_256B;
38223859Sml29623
38233859Sml29623 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
38243859Sml29623 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
38253859Sml29623 rbrp->npi_pkt_buf_size1 = SIZE_1KB;
38263859Sml29623
38273859Sml29623 rbrp->block_size = nxgep->rx_default_block_size;
38283859Sml29623
38299730SMichael.Speer@Sun.COM if (!nxgep->mac.is_jumbo) {
38303859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
38313859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
38323859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_2KB;
38333859Sml29623 } else {
38343859Sml29623 if (rbrp->block_size >= 0x2000) {
38353859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K;
38363859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES;
38373859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_8KB;
38383859Sml29623 } else {
38393859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K;
38403859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES;
38413859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_4KB;
38423859Sml29623 }
38433859Sml29623 }
38443859Sml29623
38453859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
38466929Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d "
38476929Smisaki "actual rbr max %d rbb_max %d nmsgs %d "
38486929Smisaki "rbrp->block_size %d default_block_size %d "
38496929Smisaki "(config nxge_rbr_size %d nxge_rbr_spare_size %d)",
38506929Smisaki channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
38516929Smisaki rbrp->block_size, nxgep->rx_default_block_size,
38526929Smisaki nxge_rbr_size, nxge_rbr_spare_size));
38533859Sml29623
38543859Sml29623 /* Map in buffers from the buffer pool. */
38553859Sml29623 index = 0;
38563859Sml29623 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
38573859Sml29623 bsize = dma_bufp->block_size;
38583859Sml29623 nblocks = dma_bufp->nblocks;
38595125Sjoycey #if defined(__i386)
38605125Sjoycey ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp;
38615125Sjoycey #else
38623859Sml29623 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
38635125Sjoycey #endif
38643859Sml29623 ring_info->buffer[i].buf_index = i;
38653859Sml29623 ring_info->buffer[i].buf_size = dma_bufp->alength;
38663859Sml29623 ring_info->buffer[i].start_index = index;
38675125Sjoycey #if defined(__i386)
38685125Sjoycey ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp;
38695125Sjoycey #else
38703859Sml29623 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
38715125Sjoycey #endif
38723859Sml29623
38733859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
38746929Smisaki " nxge_map_rxdma_channel_buf_ring: map channel %d "
38756929Smisaki "chunk %d"
38766929Smisaki " nblocks %d chunk_size %x block_size 0x%x "
38776929Smisaki "dma_bufp $%p", channel, i,
38786929Smisaki dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
38796929Smisaki dma_bufp));
38803859Sml29623
38813859Sml29623 for (j = 0; j < nblocks; j++) {
38823859Sml29623 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO,
38836929Smisaki dma_bufp)) == NULL) {
38844185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
38856929Smisaki "allocb failed (index %d i %d j %d)",
38866929Smisaki index, i, j));
38874185Sspeer goto nxge_map_rxdma_channel_buf_ring_fail1;
38883859Sml29623 }
38893859Sml29623 rx_msg_ring[index] = rx_msg_p;
38903859Sml29623 rx_msg_p->block_index = index;
38913859Sml29623 rx_msg_p->shifted_addr = (uint32_t)
38926929Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
38936929Smisaki RBR_BKADDR_SHIFT));
38943859Sml29623
38953859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
38966929Smisaki "index %d j %d rx_msg_p $%p mblk %p",
38976929Smisaki index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
38983859Sml29623
38993859Sml29623 mblk_p = rx_msg_p->rx_mblk_p;
39003859Sml29623 mblk_p->b_wptr = mblk_p->b_rptr + bsize;
39015170Stm144005
39025170Stm144005 rbrp->rbr_ref_cnt++;
39033859Sml29623 index++;
39043859Sml29623 rx_msg_p->buf_dma.dma_channel = channel;
39053859Sml29623 }
39066495Sspeer
39076495Sspeer rbrp->rbr_alloc_type = DDI_MEM_ALLOC;
39086495Sspeer if (dma_bufp->contig_alloc_type) {
39096495Sspeer rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC;
39106495Sspeer }
39116495Sspeer
39126495Sspeer if (dma_bufp->kmem_alloc_type) {
39136495Sspeer rbrp->rbr_alloc_type = KMEM_ALLOC;
39146495Sspeer }
39156495Sspeer
39166495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
39176495Sspeer " nxge_map_rxdma_channel_buf_ring: map channel %d "
39186495Sspeer "chunk %d"
39196495Sspeer " nblocks %d chunk_size %x block_size 0x%x "
39206495Sspeer "dma_bufp $%p",
39216495Sspeer channel, i,
39226495Sspeer dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize,
39236495Sspeer dma_bufp));
39243859Sml29623 }
39253859Sml29623 if (i < rbrp->num_blocks) {
39263859Sml29623 goto nxge_map_rxdma_channel_buf_ring_fail1;
39273859Sml29623 }
39283859Sml29623
39293859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
39306929Smisaki "nxge_map_rxdma_channel_buf_ring: done buf init "
39316929Smisaki "channel %d msg block entries %d",
39326929Smisaki channel, index));
39333859Sml29623 ring_info->block_size_mask = bsize - 1;
39343859Sml29623 rbrp->rx_msg_ring = rx_msg_ring;
39353859Sml29623 rbrp->dma_bufp = dma_buf_p;
39363859Sml29623 rbrp->ring_info = ring_info;
39373859Sml29623
39383859Sml29623 status = nxge_rxbuf_index_info_init(nxgep, rbrp);
39393859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
39406929Smisaki " nxge_map_rxdma_channel_buf_ring: "
39416929Smisaki "channel %d done buf info init", channel));
39423859Sml29623
39435170Stm144005 /*
39445170Stm144005 * Finally, permit nxge_freeb() to call nxge_post_page().
39455170Stm144005 */
39465170Stm144005 rbrp->rbr_state = RBR_POSTING;
39475170Stm144005
39483859Sml29623 *rbr_p = rbrp;
39493859Sml29623 goto nxge_map_rxdma_channel_buf_ring_exit;
39503859Sml29623
39513859Sml29623 nxge_map_rxdma_channel_buf_ring_fail1:
39523859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
39536929Smisaki " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
39546929Smisaki channel, status));
39553859Sml29623
39563859Sml29623 index--;
39573859Sml29623 for (; index >= 0; index--) {
39583859Sml29623 rx_msg_p = rx_msg_ring[index];
39593859Sml29623 if (rx_msg_p != NULL) {
39603859Sml29623 freeb(rx_msg_p->rx_mblk_p);
39613859Sml29623 rx_msg_ring[index] = NULL;
39623859Sml29623 }
39633859Sml29623 }
39643859Sml29623 nxge_map_rxdma_channel_buf_ring_fail:
39653859Sml29623 MUTEX_DESTROY(&rbrp->post_lock);
39663859Sml29623 MUTEX_DESTROY(&rbrp->lock);
39673859Sml29623 KMEM_FREE(ring_info, sizeof (rxring_info_t));
39683859Sml29623 KMEM_FREE(rx_msg_ring, size);
39693859Sml29623 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
39703859Sml29623
39714185Sspeer status = NXGE_ERROR;
39724185Sspeer
39733859Sml29623 nxge_map_rxdma_channel_buf_ring_exit:
39743859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
39756929Smisaki "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status));
39763859Sml29623
39773859Sml29623 return (status);
39783859Sml29623 }
39793859Sml29623
39803859Sml29623 /*ARGSUSED*/
39813859Sml29623 static void
nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,p_rx_rbr_ring_t rbr_p)39823859Sml29623 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep,
39833859Sml29623 p_rx_rbr_ring_t rbr_p)
39843859Sml29623 {
39853859Sml29623 p_rx_msg_t *rx_msg_ring;
39863859Sml29623 p_rx_msg_t rx_msg_p;
39873859Sml29623 rxring_info_t *ring_info;
39883859Sml29623 int i;
39893859Sml29623 uint32_t size;
39903859Sml29623 #ifdef NXGE_DEBUG
39913859Sml29623 int num_chunks;
39923859Sml29623 #endif
39933859Sml29623
39943859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
39956929Smisaki "==> nxge_unmap_rxdma_channel_buf_ring"));
39963859Sml29623 if (rbr_p == NULL) {
39973859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
39986929Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
39993859Sml29623 return;
40003859Sml29623 }
40013859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
40026929Smisaki "==> nxge_unmap_rxdma_channel_buf_ring: channel %d",
40036929Smisaki rbr_p->rdc));
40043859Sml29623
40053859Sml29623 rx_msg_ring = rbr_p->rx_msg_ring;
40063859Sml29623 ring_info = rbr_p->ring_info;
40073859Sml29623
40083859Sml29623 if (rx_msg_ring == NULL || ring_info == NULL) {
40096929Smisaki NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
40106929Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: "
40116929Smisaki "rx_msg_ring $%p ring_info $%p",
40126929Smisaki rx_msg_p, ring_info));
40133859Sml29623 return;
40143859Sml29623 }
40153859Sml29623
40163859Sml29623 #ifdef NXGE_DEBUG
40173859Sml29623 num_chunks = rbr_p->num_blocks;
40183859Sml29623 #endif
40193859Sml29623 size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
40203859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
40216929Smisaki " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
40226929Smisaki "tnblocks %d (max %d) size ptrs %d ",
40236929Smisaki rbr_p->rdc, num_chunks,
40246929Smisaki rbr_p->tnblocks, rbr_p->rbr_max_size, size));
40253859Sml29623
40263859Sml29623 for (i = 0; i < rbr_p->tnblocks; i++) {
40273859Sml29623 rx_msg_p = rx_msg_ring[i];
40283859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
40296929Smisaki " nxge_unmap_rxdma_channel_buf_ring: "
40306929Smisaki "rx_msg_p $%p",
40316929Smisaki rx_msg_p));
40323859Sml29623 if (rx_msg_p != NULL) {
40333859Sml29623 freeb(rx_msg_p->rx_mblk_p);
40343859Sml29623 rx_msg_ring[i] = NULL;
40353859Sml29623 }
40363859Sml29623 }
40373859Sml29623
40385170Stm144005 /*
40395170Stm144005 * We no longer may use the mutex <post_lock>. By setting
40405170Stm144005 * <rbr_state> to anything but POSTING, we prevent
40415170Stm144005 * nxge_post_page() from accessing a dead mutex.
40425170Stm144005 */
40435170Stm144005 rbr_p->rbr_state = RBR_UNMAPPING;
40443859Sml29623 MUTEX_DESTROY(&rbr_p->post_lock);
40455170Stm144005
40463859Sml29623 MUTEX_DESTROY(&rbr_p->lock);
40475170Stm144005
40485170Stm144005 if (rbr_p->rbr_ref_cnt == 0) {
40496495Sspeer /*
40506495Sspeer * This is the normal state of affairs.
40516495Sspeer * Need to free the following buffers:
40526495Sspeer * - data buffers
40536495Sspeer * - rx_msg ring
40546495Sspeer * - ring_info
40556495Sspeer * - rbr ring
40566495Sspeer */
40576495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL,
40586495Sspeer "unmap_rxdma_buf_ring: No outstanding - freeing "));
40596495Sspeer nxge_rxdma_databuf_free(rbr_p);
40606495Sspeer KMEM_FREE(ring_info, sizeof (rxring_info_t));
40616495Sspeer KMEM_FREE(rx_msg_ring, size);
40625170Stm144005 KMEM_FREE(rbr_p, sizeof (*rbr_p));
40635170Stm144005 } else {
40645170Stm144005 /*
40655170Stm144005 * Some of our buffers are still being used.
40665170Stm144005 * Therefore, tell nxge_freeb() this ring is
40675170Stm144005 * unmapped, so it may free <rbr_p> for us.
40685170Stm144005 */
40695170Stm144005 rbr_p->rbr_state = RBR_UNMAPPED;
40705170Stm144005 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
40715170Stm144005 "unmap_rxdma_buf_ring: %d %s outstanding.",
40725170Stm144005 rbr_p->rbr_ref_cnt,
40735170Stm144005 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
40745170Stm144005 }
40753859Sml29623
40763859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
40776929Smisaki "<== nxge_unmap_rxdma_channel_buf_ring"));
40783859Sml29623 }
40793859Sml29623
40806495Sspeer /*
40816495Sspeer * nxge_rxdma_hw_start_common
40826495Sspeer *
40836495Sspeer * Arguments:
40846495Sspeer * nxgep
40856495Sspeer *
40866495Sspeer * Notes:
40876495Sspeer *
40886495Sspeer * NPI/NXGE function calls:
40896495Sspeer * nxge_init_fzc_rx_common();
40906495Sspeer * nxge_init_fzc_rxdma_port();
40916495Sspeer *
40926495Sspeer * Registers accessed:
40936495Sspeer *
40946495Sspeer * Context:
40956495Sspeer * Service domain
40966495Sspeer */
40973859Sml29623 static nxge_status_t
nxge_rxdma_hw_start_common(p_nxge_t nxgep)40983859Sml29623 nxge_rxdma_hw_start_common(p_nxge_t nxgep)
40993859Sml29623 {
41003859Sml29623 nxge_status_t status = NXGE_OK;
41013859Sml29623
41023859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
41033859Sml29623
41043859Sml29623 /*
41053859Sml29623 * Load the sharable parameters by writing to the
41063859Sml29623 * function zero control registers. These FZC registers
41073859Sml29623 * should be initialized only once for the entire chip.
41083859Sml29623 */
41093859Sml29623 (void) nxge_init_fzc_rx_common(nxgep);
41103859Sml29623
41113859Sml29623 /*
41123859Sml29623 * Initialize the RXDMA port specific FZC control configurations.
41133859Sml29623 * These FZC registers are pertaining to each port.
41143859Sml29623 */
41153859Sml29623 (void) nxge_init_fzc_rxdma_port(nxgep);
41163859Sml29623
41173859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common"));
41183859Sml29623
41193859Sml29623 return (status);
41203859Sml29623 }
41213859Sml29623
41223859Sml29623 static nxge_status_t
nxge_rxdma_hw_start(p_nxge_t nxgep,int channel)41236495Sspeer nxge_rxdma_hw_start(p_nxge_t nxgep, int channel)
41243859Sml29623 {
41253859Sml29623 int i, ndmas;
41263859Sml29623 p_rx_rbr_rings_t rx_rbr_rings;
41273859Sml29623 p_rx_rbr_ring_t *rbr_rings;
41283859Sml29623 p_rx_rcr_rings_t rx_rcr_rings;
41293859Sml29623 p_rx_rcr_ring_t *rcr_rings;
41303859Sml29623 p_rx_mbox_areas_t rx_mbox_areas_p;
41313859Sml29623 p_rx_mbox_t *rx_mbox_p;
41323859Sml29623 nxge_status_t status = NXGE_OK;
41333859Sml29623
41343859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start"));
41353859Sml29623
41363859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings;
41373859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings;
41383859Sml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
41393859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
41406929Smisaki "<== nxge_rxdma_hw_start: NULL ring pointers"));
41413859Sml29623 return (NXGE_ERROR);
41423859Sml29623 }
41433859Sml29623 ndmas = rx_rbr_rings->ndmas;
41443859Sml29623 if (ndmas == 0) {
41453859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
41466929Smisaki "<== nxge_rxdma_hw_start: no dma channel allocated"));
41473859Sml29623 return (NXGE_ERROR);
41483859Sml29623 }
41493859Sml29623
41503859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
41516929Smisaki "==> nxge_rxdma_hw_start (ndmas %d)", ndmas));
41523859Sml29623
41533859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings;
41543859Sml29623 rcr_rings = rx_rcr_rings->rcr_rings;
41553859Sml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p;
41563859Sml29623 if (rx_mbox_areas_p) {
41573859Sml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
41583859Sml29623 }
41593859Sml29623
41606495Sspeer i = channel;
41616495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
41626929Smisaki "==> nxge_rxdma_hw_start (ndmas %d) channel %d",
41636929Smisaki ndmas, channel));
41646495Sspeer status = nxge_rxdma_start_channel(nxgep, channel,
41656495Sspeer (p_rx_rbr_ring_t)rbr_rings[i],
41666495Sspeer (p_rx_rcr_ring_t)rcr_rings[i],
41676495Sspeer (p_rx_mbox_t)rx_mbox_p[i]);
41686495Sspeer if (status != NXGE_OK) {
41696495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
41706495Sspeer "==> nxge_rxdma_hw_start: disable "
41716495Sspeer "(status 0x%x channel %d)", status, channel));
41726495Sspeer return (status);
41733859Sml29623 }
41743859Sml29623
41753859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: "
41766929Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx",
41776929Smisaki rx_rbr_rings, rx_rcr_rings));
41783859Sml29623
41793859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
41806929Smisaki "==> nxge_rxdma_hw_start: (status 0x%x)", status));
41813859Sml29623
41823859Sml29623 return (status);
41833859Sml29623 }
41843859Sml29623
41853859Sml29623 static void
nxge_rxdma_hw_stop(p_nxge_t nxgep,int channel)41866495Sspeer nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel)
41873859Sml29623 {
41883859Sml29623 p_rx_rbr_rings_t rx_rbr_rings;
41893859Sml29623 p_rx_rcr_rings_t rx_rcr_rings;
41903859Sml29623
41913859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop"));
41923859Sml29623
41933859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings;
41943859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings;
41953859Sml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
41963859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
41976929Smisaki "<== nxge_rxdma_hw_stop: NULL ring pointers"));
41983859Sml29623 return;
41993859Sml29623 }
42003859Sml29623
42013859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
42026929Smisaki "==> nxge_rxdma_hw_stop(channel %d)",
42036929Smisaki channel));
42046495Sspeer (void) nxge_rxdma_stop_channel(nxgep, channel);
42053859Sml29623
42063859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: "
42076929Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx",
42086929Smisaki rx_rbr_rings, rx_rcr_rings));
42093859Sml29623
42103859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop"));
42113859Sml29623 }
42123859Sml29623
42133859Sml29623
42143859Sml29623 static nxge_status_t
nxge_rxdma_start_channel(p_nxge_t nxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t mbox_p)42153859Sml29623 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel,
42163859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p)
42173859Sml29623
42183859Sml29623 {
42193859Sml29623 npi_handle_t handle;
42203859Sml29623 npi_status_t rs = NPI_SUCCESS;
42213859Sml29623 rx_dma_ctl_stat_t cs;
42223859Sml29623 rx_dma_ent_msk_t ent_mask;
42233859Sml29623 nxge_status_t status = NXGE_OK;
42243859Sml29623
42253859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel"));
42263859Sml29623
42273859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
42283859Sml29623
42293859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: "
42303859Sml29623 "npi handle addr $%p acc $%p",
42313859Sml29623 nxgep->npi_handle.regp, nxgep->npi_handle.regh));
42323859Sml29623
42336495Sspeer /* Reset RXDMA channel, but not if you're a guest. */
42346495Sspeer if (!isLDOMguest(nxgep)) {
42356495Sspeer rs = npi_rxdma_cfg_rdc_reset(handle, channel);
42366495Sspeer if (rs != NPI_SUCCESS) {
42376495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
42386495Sspeer "==> nxge_init_fzc_rdc: "
42396495Sspeer "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x",
42406495Sspeer channel, rs));
42416495Sspeer return (NXGE_ERROR | rs);
42426495Sspeer }
42436495Sspeer
42446495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
42456495Sspeer "==> nxge_rxdma_start_channel: reset done: channel %d",
42466495Sspeer channel));
42473859Sml29623 }
42483859Sml29623
42496495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
42506495Sspeer if (isLDOMguest(nxgep))
42516495Sspeer (void) nxge_rdc_lp_conf(nxgep, channel);
42526495Sspeer #endif
42533859Sml29623
42543859Sml29623 /*
42553859Sml29623 * Initialize the RXDMA channel specific FZC control
42563859Sml29623 * configurations. These FZC registers are pertaining
42573859Sml29623 * to each RX channel (logical pages).
42583859Sml29623 */
42596495Sspeer if (!isLDOMguest(nxgep)) {
42606495Sspeer status = nxge_init_fzc_rxdma_channel(nxgep, channel);
42616495Sspeer if (status != NXGE_OK) {
42626495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
42636495Sspeer "==> nxge_rxdma_start_channel: "
42646495Sspeer "init fzc rxdma failed (0x%08x channel %d)",
42656495Sspeer status, channel));
42666495Sspeer return (status);
42676495Sspeer }
42686495Sspeer
42696495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
42706495Sspeer "==> nxge_rxdma_start_channel: fzc done"));
42713859Sml29623 }
42723859Sml29623
42733859Sml29623 /* Set up the interrupt event masks. */
42743859Sml29623 ent_mask.value = 0;
42753859Sml29623 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK;
42763859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel,
42776495Sspeer &ent_mask);
42783859Sml29623 if (rs != NPI_SUCCESS) {
42793859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
42803859Sml29623 "==> nxge_rxdma_start_channel: "
42816495Sspeer "init rxdma event masks failed "
42826495Sspeer "(0x%08x channel %d)",
42833859Sml29623 status, channel));
42843859Sml29623 return (NXGE_ERROR | rs);
42853859Sml29623 }
42863859Sml29623
42876495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
42886495Sspeer "==> nxge_rxdma_start_channel: "
42893859Sml29623 "event done: channel %d (mask 0x%016llx)",
42903859Sml29623 channel, ent_mask.value));
42913859Sml29623
42923859Sml29623 /* Initialize the receive DMA control and status register */
42933859Sml29623 cs.value = 0;
42943859Sml29623 cs.bits.hdw.mex = 1;
42953859Sml29623 cs.bits.hdw.rcrthres = 1;
42963859Sml29623 cs.bits.hdw.rcrto = 1;
42973859Sml29623 cs.bits.hdw.rbr_empty = 1;
42983859Sml29623 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
42993859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
43003859Sml29623 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
43013859Sml29623 if (status != NXGE_OK) {
43023859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
43033859Sml29623 "==> nxge_rxdma_start_channel: "
43043859Sml29623 "init rxdma control register failed (0x%08x channel %d",
43053859Sml29623 status, channel));
43063859Sml29623 return (status);
43073859Sml29623 }
43083859Sml29623
43093859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
43103859Sml29623 "control done - channel %d cs 0x%016llx", channel, cs.value));
43113859Sml29623
43123859Sml29623 /*
43133859Sml29623 * Load RXDMA descriptors, buffers, mailbox,
43143859Sml29623 * initialise the receive DMA channels and
43153859Sml29623 * enable each DMA channel.
43163859Sml29623 */
43173859Sml29623 status = nxge_enable_rxdma_channel(nxgep,
43186495Sspeer channel, rbr_p, rcr_p, mbox_p);
43193859Sml29623
43203859Sml29623 if (status != NXGE_OK) {
43213859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
43226495Sspeer " nxge_rxdma_start_channel: "
43236495Sspeer " enable rxdma failed (0x%08x channel %d)",
43246495Sspeer status, channel));
43256495Sspeer return (status);
43266495Sspeer }
43276495Sspeer
43286495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
43296495Sspeer "==> nxge_rxdma_start_channel: enabled channel %d"));
43306495Sspeer
43316495Sspeer if (isLDOMguest(nxgep)) {
43326495Sspeer /* Add interrupt handler for this channel. */
433310577SMichael.Speer@Sun.COM status = nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel);
433410577SMichael.Speer@Sun.COM if (status != NXGE_OK) {
43356495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
43363859Sml29623 " nxge_rxdma_start_channel: "
43376495Sspeer " nxge_hio_intr_add failed (0x%08x channel %d)",
433810577SMichael.Speer@Sun.COM status, channel));
433910577SMichael.Speer@Sun.COM return (status);
43406495Sspeer }
43413859Sml29623 }
43423859Sml29623
43433859Sml29623 ent_mask.value = 0;
43443859Sml29623 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK |
43453859Sml29623 RX_DMA_ENT_MSK_PTDROP_PKT_MASK);
43463859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel,
43473859Sml29623 &ent_mask);
43483859Sml29623 if (rs != NPI_SUCCESS) {
43493859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL,
43503859Sml29623 "==> nxge_rxdma_start_channel: "
43513859Sml29623 "init rxdma event masks failed (0x%08x channel %d)",
43523859Sml29623 status, channel));
43533859Sml29623 return (NXGE_ERROR | rs);
43543859Sml29623 }
43553859Sml29623
43563859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: "
43573859Sml29623 "control done - channel %d cs 0x%016llx", channel, cs.value));
43583859Sml29623
43593859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel"));
43603859Sml29623
43613859Sml29623 return (NXGE_OK);
43623859Sml29623 }
43633859Sml29623
43643859Sml29623 static nxge_status_t
nxge_rxdma_stop_channel(p_nxge_t nxgep,uint16_t channel)43653859Sml29623 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel)
43663859Sml29623 {
43673859Sml29623 npi_handle_t handle;
43683859Sml29623 npi_status_t rs = NPI_SUCCESS;
43693859Sml29623 rx_dma_ctl_stat_t cs;
43703859Sml29623 rx_dma_ent_msk_t ent_mask;
43713859Sml29623 nxge_status_t status = NXGE_OK;
43723859Sml29623
43733859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel"));
43743859Sml29623
43753859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
43763859Sml29623
43773859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: "
43786929Smisaki "npi handle addr $%p acc $%p",
43796929Smisaki nxgep->npi_handle.regp, nxgep->npi_handle.regh));
43803859Sml29623
43817812SMichael.Speer@Sun.COM if (!isLDOMguest(nxgep)) {
43827812SMichael.Speer@Sun.COM /*
43837812SMichael.Speer@Sun.COM * Stop RxMAC = A.9.2.6
43847812SMichael.Speer@Sun.COM */
43857812SMichael.Speer@Sun.COM if (nxge_rx_mac_disable(nxgep) != NXGE_OK) {
43867812SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
43877812SMichael.Speer@Sun.COM "nxge_rxdma_stop_channel: "
43887812SMichael.Speer@Sun.COM "Failed to disable RxMAC"));
43897812SMichael.Speer@Sun.COM }
43907812SMichael.Speer@Sun.COM
43917812SMichael.Speer@Sun.COM /*
43927812SMichael.Speer@Sun.COM * Drain IPP Port = A.9.3.6
43937812SMichael.Speer@Sun.COM */
43947812SMichael.Speer@Sun.COM (void) nxge_ipp_drain(nxgep);
43957812SMichael.Speer@Sun.COM }
43967812SMichael.Speer@Sun.COM
43973859Sml29623 /* Reset RXDMA channel */
43983859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
43993859Sml29623 if (rs != NPI_SUCCESS) {
44003859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
44016929Smisaki " nxge_rxdma_stop_channel: "
44026929Smisaki " reset rxdma failed (0x%08x channel %d)",
44036929Smisaki rs, channel));
44043859Sml29623 return (NXGE_ERROR | rs);
44053859Sml29623 }
44063859Sml29623
44073859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
44086929Smisaki "==> nxge_rxdma_stop_channel: reset done"));
44093859Sml29623
44103859Sml29623 /* Set up the interrupt event masks. */
44113859Sml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL;
44123859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel,
44136929Smisaki &ent_mask);
44143859Sml29623 if (rs != NPI_SUCCESS) {
44153859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
44166929Smisaki "==> nxge_rxdma_stop_channel: "
44176929Smisaki "set rxdma event masks failed (0x%08x channel %d)",
44186929Smisaki rs, channel));
44193859Sml29623 return (NXGE_ERROR | rs);
44203859Sml29623 }
44213859Sml29623
44223859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
44236929Smisaki "==> nxge_rxdma_stop_channel: event done"));
44243859Sml29623
44257812SMichael.Speer@Sun.COM /*
44267812SMichael.Speer@Sun.COM * Initialize the receive DMA control and status register
44277812SMichael.Speer@Sun.COM */
44283859Sml29623 cs.value = 0;
44297812SMichael.Speer@Sun.COM status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs);
44303859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control "
44316929Smisaki " to default (all 0s) 0x%08x", cs.value));
44323859Sml29623 if (status != NXGE_OK) {
44333859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
44346929Smisaki " nxge_rxdma_stop_channel: init rxdma"
44356929Smisaki " control register failed (0x%08x channel %d",
44366929Smisaki status, channel));
44373859Sml29623 return (status);
44383859Sml29623 }
44393859Sml29623
44403859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL,
44416929Smisaki "==> nxge_rxdma_stop_channel: control done"));
44423859Sml29623
44437812SMichael.Speer@Sun.COM /*
44447812SMichael.Speer@Sun.COM * Make sure channel is disabled.
44457812SMichael.Speer@Sun.COM */
44463859Sml29623 status = nxge_disable_rxdma_channel(nxgep, channel);
44478275SEric Cheng
44483859Sml29623 if (status != NXGE_OK) {
44493859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
44506929Smisaki " nxge_rxdma_stop_channel: "
44516929Smisaki " init enable rxdma failed (0x%08x channel %d)",
44526929Smisaki status, channel));
44533859Sml29623 return (status);
44543859Sml29623 }
44553859Sml29623
44567812SMichael.Speer@Sun.COM if (!isLDOMguest(nxgep)) {
44577812SMichael.Speer@Sun.COM /*
44587812SMichael.Speer@Sun.COM * Enable RxMAC = A.9.2.10
44597812SMichael.Speer@Sun.COM */
44607812SMichael.Speer@Sun.COM if (nxge_rx_mac_enable(nxgep) != NXGE_OK) {
44617812SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
44627812SMichael.Speer@Sun.COM "nxge_rxdma_stop_channel: Rx MAC still disabled"));
44637812SMichael.Speer@Sun.COM }
44647812SMichael.Speer@Sun.COM }
44657812SMichael.Speer@Sun.COM
44663859Sml29623 NXGE_DEBUG_MSG((nxgep,
44676929Smisaki RX_CTL, "==> nxge_rxdma_stop_channel: disable done"));
44683859Sml29623
44693859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel"));
44703859Sml29623
44713859Sml29623 return (NXGE_OK);
44723859Sml29623 }
44733859Sml29623
44743859Sml29623 nxge_status_t
nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)44753859Sml29623 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep)
44763859Sml29623 {
44773859Sml29623 npi_handle_t handle;
44783859Sml29623 p_nxge_rdc_sys_stats_t statsp;
44793859Sml29623 rx_ctl_dat_fifo_stat_t stat;
44803859Sml29623 uint32_t zcp_err_status;
44813859Sml29623 uint32_t ipp_err_status;
44823859Sml29623 nxge_status_t status = NXGE_OK;
44833859Sml29623 npi_status_t rs = NPI_SUCCESS;
44843859Sml29623 boolean_t my_err = B_FALSE;
44853859Sml29623
44863859Sml29623 handle = nxgep->npi_handle;
44873859Sml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
44883859Sml29623
44893859Sml29623 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat);
44903859Sml29623
44913859Sml29623 if (rs != NPI_SUCCESS)
44923859Sml29623 return (NXGE_ERROR | rs);
44933859Sml29623
44943859Sml29623 if (stat.bits.ldw.id_mismatch) {
44953859Sml29623 statsp->id_mismatch++;
44963859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL,
44976929Smisaki NXGE_FM_EREPORT_RDMC_ID_MISMATCH);
44983859Sml29623 /* Global fatal error encountered */
44993859Sml29623 }
45003859Sml29623
45013859Sml29623 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) {
45023859Sml29623 switch (nxgep->mac.portnum) {
45033859Sml29623 case 0:
45043859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) ||
45056929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) {
45063859Sml29623 my_err = B_TRUE;
45073859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err;
45083859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err;
45093859Sml29623 }
45103859Sml29623 break;
45113859Sml29623 case 1:
45123859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) ||
45136929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) {
45143859Sml29623 my_err = B_TRUE;
45153859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err;
45163859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err;
45173859Sml29623 }
45183859Sml29623 break;
45193859Sml29623 case 2:
45203859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) ||
45216929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) {
45223859Sml29623 my_err = B_TRUE;
45233859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err;
45243859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err;
45253859Sml29623 }
45263859Sml29623 break;
45273859Sml29623 case 3:
45283859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) ||
45296929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) {
45303859Sml29623 my_err = B_TRUE;
45313859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err;
45323859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err;
45333859Sml29623 }
45343859Sml29623 break;
45353859Sml29623 default:
45363859Sml29623 return (NXGE_ERROR);
45373859Sml29623 }
45383859Sml29623 }
45393859Sml29623
45403859Sml29623 if (my_err) {
45413859Sml29623 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status,
45426929Smisaki zcp_err_status);
45433859Sml29623 if (status != NXGE_OK)
45443859Sml29623 return (status);
45453859Sml29623 }
45463859Sml29623
45473859Sml29623 return (NXGE_OK);
45483859Sml29623 }
45493859Sml29623
45503859Sml29623 static nxge_status_t
nxge_rxdma_handle_port_errors(p_nxge_t nxgep,uint32_t ipp_status,uint32_t zcp_status)45513859Sml29623 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status,
45523859Sml29623 uint32_t zcp_status)
45533859Sml29623 {
45543859Sml29623 boolean_t rxport_fatal = B_FALSE;
45553859Sml29623 p_nxge_rdc_sys_stats_t statsp;
45563859Sml29623 nxge_status_t status = NXGE_OK;
45573859Sml29623 uint8_t portn;
45583859Sml29623
45593859Sml29623 portn = nxgep->mac.portnum;
45603859Sml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats;
45613859Sml29623
45623859Sml29623 if (ipp_status & (0x1 << portn)) {
45633859Sml29623 statsp->ipp_eop_err++;
45643859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
45656929Smisaki NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR);
45663859Sml29623 rxport_fatal = B_TRUE;
45673859Sml29623 }
45683859Sml29623
45693859Sml29623 if (zcp_status & (0x1 << portn)) {
45703859Sml29623 statsp->zcp_eop_err++;
45713859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL,
45726929Smisaki NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR);
45733859Sml29623 rxport_fatal = B_TRUE;
45743859Sml29623 }
45753859Sml29623
45763859Sml29623 if (rxport_fatal) {
45773859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
45786929Smisaki " nxge_rxdma_handle_port_error: "
45796929Smisaki " fatal error on Port #%d\n",
45806929Smisaki portn));
45813859Sml29623 status = nxge_rx_port_fatal_err_recover(nxgep);
45823859Sml29623 if (status == NXGE_OK) {
45833859Sml29623 FM_SERVICE_RESTORED(nxgep);
45843859Sml29623 }
45853859Sml29623 }
45863859Sml29623
45873859Sml29623 return (status);
45883859Sml29623 }
45893859Sml29623
45903859Sml29623 static nxge_status_t
nxge_rxdma_fatal_err_recover(p_nxge_t nxgep,uint16_t channel)45913859Sml29623 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel)
45923859Sml29623 {
45933859Sml29623 npi_handle_t handle;
45943859Sml29623 npi_status_t rs = NPI_SUCCESS;
45953859Sml29623 nxge_status_t status = NXGE_OK;
45963859Sml29623 p_rx_rbr_ring_t rbrp;
45973859Sml29623 p_rx_rcr_ring_t rcrp;
45983859Sml29623 p_rx_mbox_t mboxp;
45993859Sml29623 rx_dma_ent_msk_t ent_mask;
46003859Sml29623 p_nxge_dma_common_t dmap;
46013859Sml29623 uint32_t ref_cnt;
46023859Sml29623 p_rx_msg_t rx_msg_p;
46033859Sml29623 int i;
46043859Sml29623 uint32_t nxge_port_rcr_size;
46053859Sml29623
46063859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover"));
46073859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
46086929Smisaki "Recovering from RxDMAChannel#%d error...", channel));
46093859Sml29623
46103859Sml29623 /*
46113859Sml29623 * Stop the dma channel waits for the stop done.
46123859Sml29623 * If the stop done bit is not set, then create
46133859Sml29623 * an error.
46143859Sml29623 */
46153859Sml29623
46163859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep);
46173859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop..."));
46183859Sml29623
461910218SMichael.Speer@Sun.COM rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[channel];
462010218SMichael.Speer@Sun.COM rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[channel];
46213859Sml29623
46223859Sml29623 MUTEX_ENTER(&rbrp->lock);
46233859Sml29623 MUTEX_ENTER(&rbrp->post_lock);
46243859Sml29623
46253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel..."));
46263859Sml29623
46273859Sml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel);
46283859Sml29623 if (rs != NPI_SUCCESS) {
46293859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
46306929Smisaki "nxge_disable_rxdma_channel:failed"));
46313859Sml29623 goto fail;
46323859Sml29623 }
46333859Sml29623
46343859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt..."));
46353859Sml29623
46363859Sml29623 /* Disable interrupt */
46373859Sml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL;
46383859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
46393859Sml29623 if (rs != NPI_SUCCESS) {
46403859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
46416929Smisaki "nxge_rxdma_stop_channel: "
46426929Smisaki "set rxdma event masks failed (channel %d)",
46436929Smisaki channel));
46443859Sml29623 }
46453859Sml29623
46463859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset..."));
46473859Sml29623
46483859Sml29623 /* Reset RXDMA channel */
46493859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel);
46503859Sml29623 if (rs != NPI_SUCCESS) {
46513859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
46526929Smisaki "nxge_rxdma_fatal_err_recover: "
46536929Smisaki " reset rxdma failed (channel %d)", channel));
46543859Sml29623 goto fail;
46553859Sml29623 }
46563859Sml29623
46573859Sml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size;
46583859Sml29623
465910218SMichael.Speer@Sun.COM mboxp = (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[channel];
46603859Sml29623
46613859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
46623859Sml29623 rbrp->rbr_rd_index = 0;
46633859Sml29623
46643859Sml29623 rcrp->comp_rd_index = 0;
46653859Sml29623 rcrp->comp_wt_index = 0;
46663859Sml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
46676929Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
46685125Sjoycey #if defined(__i386)
46696929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
46706929Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
46715125Sjoycey #else
46726929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
46736929Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
46745125Sjoycey #endif
46753859Sml29623
46763859Sml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
46776929Smisaki (nxge_port_rcr_size - 1);
46783859Sml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
46796929Smisaki (nxge_port_rcr_size - 1);
46803859Sml29623
46813859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc;
46823859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength);
46833859Sml29623
46843859Sml29623 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size);
46853859Sml29623
46863859Sml29623 for (i = 0; i < rbrp->rbr_max_size; i++) {
46873859Sml29623 rx_msg_p = rbrp->rx_msg_ring[i];
46883859Sml29623 ref_cnt = rx_msg_p->ref_cnt;
46893859Sml29623 if (ref_cnt != 1) {
46903859Sml29623 if (rx_msg_p->cur_usage_cnt !=
46916929Smisaki rx_msg_p->max_usage_cnt) {
46923859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
46936929Smisaki "buf[%d]: cur_usage_cnt = %d "
46946929Smisaki "max_usage_cnt = %d\n", i,
46956929Smisaki rx_msg_p->cur_usage_cnt,
46966929Smisaki rx_msg_p->max_usage_cnt));
46973859Sml29623 } else {
46983859Sml29623 /* Buffer can be re-posted */
46993859Sml29623 rx_msg_p->free = B_TRUE;
47003859Sml29623 rx_msg_p->cur_usage_cnt = 0;
47013859Sml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe;
47023859Sml29623 rx_msg_p->pkt_buf_size = 0;
47033859Sml29623 }
47043859Sml29623 }
47053859Sml29623 }
47063859Sml29623
47073859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start..."));
47083859Sml29623
47093859Sml29623 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp);
47103859Sml29623 if (status != NXGE_OK) {
47113859Sml29623 goto fail;
47123859Sml29623 }
47133859Sml29623
47143859Sml29623 MUTEX_EXIT(&rbrp->post_lock);
47153859Sml29623 MUTEX_EXIT(&rbrp->lock);
47163859Sml29623
47173859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
47186929Smisaki "Recovery Successful, RxDMAChannel#%d Restored",
47196929Smisaki channel));
47203859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover"));
47213859Sml29623 return (NXGE_OK);
472210577SMichael.Speer@Sun.COM
47233859Sml29623 fail:
47243859Sml29623 MUTEX_EXIT(&rbrp->post_lock);
47253859Sml29623 MUTEX_EXIT(&rbrp->lock);
47263859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
47273859Sml29623 return (NXGE_ERROR | rs);
47283859Sml29623 }
47293859Sml29623
47303859Sml29623 nxge_status_t
nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)47313859Sml29623 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep)
47323859Sml29623 {
47336495Sspeer nxge_grp_set_t *set = &nxgep->rx_set;
47346495Sspeer nxge_status_t status = NXGE_OK;
473510577SMichael.Speer@Sun.COM p_rx_rcr_ring_t rcrp;
47366495Sspeer int rdc;
47373859Sml29623
47383859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover"));
47393859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
47406929Smisaki "Recovering from RxPort error..."));
47416495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n"));
47426495Sspeer
47433859Sml29623 if (nxge_rx_mac_disable(nxgep) != NXGE_OK)
47443859Sml29623 goto fail;
47453859Sml29623
47463859Sml29623 NXGE_DELAY(1000);
47473859Sml29623
47486495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels..."));
47496495Sspeer
47506495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) {
47516495Sspeer if ((1 << rdc) & set->owned.map) {
475210577SMichael.Speer@Sun.COM rcrp = nxgep->rx_rcr_rings->rcr_rings[rdc];
475310577SMichael.Speer@Sun.COM if (rcrp != NULL) {
475410577SMichael.Speer@Sun.COM MUTEX_ENTER(&rcrp->lock);
475510577SMichael.Speer@Sun.COM if (nxge_rxdma_fatal_err_recover(nxgep,
475610577SMichael.Speer@Sun.COM rdc) != NXGE_OK) {
475710577SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
475810577SMichael.Speer@Sun.COM "Could not recover "
475910577SMichael.Speer@Sun.COM "channel %d", rdc));
476010577SMichael.Speer@Sun.COM }
476110577SMichael.Speer@Sun.COM MUTEX_EXIT(&rcrp->lock);
47626495Sspeer }
47633859Sml29623 }
47643859Sml29623 }
47653859Sml29623
47666495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP..."));
47673859Sml29623
47683859Sml29623 /* Reset IPP */
47693859Sml29623 if (nxge_ipp_reset(nxgep) != NXGE_OK) {
47703859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
47716929Smisaki "nxge_rx_port_fatal_err_recover: "
47726929Smisaki "Failed to reset IPP"));
47733859Sml29623 goto fail;
47743859Sml29623 }
47753859Sml29623
47763859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC..."));
47773859Sml29623
47783859Sml29623 /* Reset RxMAC */
47793859Sml29623 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) {
47803859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
47816929Smisaki "nxge_rx_port_fatal_err_recover: "
47826929Smisaki "Failed to reset RxMAC"));
47833859Sml29623 goto fail;
47843859Sml29623 }
47853859Sml29623
47863859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP..."));
47873859Sml29623
47883859Sml29623 /* Re-Initialize IPP */
47893859Sml29623 if (nxge_ipp_init(nxgep) != NXGE_OK) {
47903859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
47916929Smisaki "nxge_rx_port_fatal_err_recover: "
47926929Smisaki "Failed to init IPP"));
47933859Sml29623 goto fail;
47943859Sml29623 }
47953859Sml29623
47963859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC..."));
47973859Sml29623
47983859Sml29623 /* Re-Initialize RxMAC */
47993859Sml29623 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) {
48003859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
48016929Smisaki "nxge_rx_port_fatal_err_recover: "
48026929Smisaki "Failed to reset RxMAC"));
48033859Sml29623 goto fail;
48043859Sml29623 }
48053859Sml29623
48063859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC..."));
48073859Sml29623
48083859Sml29623 /* Re-enable RxMAC */
48093859Sml29623 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) {
48103859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
48116929Smisaki "nxge_rx_port_fatal_err_recover: "
48126929Smisaki "Failed to enable RxMAC"));
48133859Sml29623 goto fail;
48143859Sml29623 }
48153859Sml29623
48163859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
48176929Smisaki "Recovery Successful, RxPort Restored"));
48183859Sml29623
48193859Sml29623 return (NXGE_OK);
48203859Sml29623 fail:
48213859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed"));
48223859Sml29623 return (status);
48233859Sml29623 }
48243859Sml29623
48253859Sml29623 void
nxge_rxdma_inject_err(p_nxge_t nxgep,uint32_t err_id,uint8_t chan)48263859Sml29623 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan)
48273859Sml29623 {
48283859Sml29623 rx_dma_ctl_stat_t cs;
48293859Sml29623 rx_ctl_dat_fifo_stat_t cdfs;
48303859Sml29623
48313859Sml29623 switch (err_id) {
48323859Sml29623 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR:
48333859Sml29623 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR:
48343859Sml29623 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR:
48353859Sml29623 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR:
48363859Sml29623 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT:
48373859Sml29623 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR:
48383859Sml29623 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS:
48393859Sml29623 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR:
48403859Sml29623 case NXGE_FM_EREPORT_RDMC_RCRINCON:
48413859Sml29623 case NXGE_FM_EREPORT_RDMC_RCRFULL:
48423859Sml29623 case NXGE_FM_EREPORT_RDMC_RBRFULL:
48433859Sml29623 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE:
48443859Sml29623 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE:
48453859Sml29623 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR:
48463859Sml29623 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
48476929Smisaki chan, &cs.value);
48483859Sml29623 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR)
48493859Sml29623 cs.bits.hdw.rcr_ack_err = 1;
48503859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR)
48513859Sml29623 cs.bits.hdw.dc_fifo_err = 1;
48523859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR)
48533859Sml29623 cs.bits.hdw.rcr_sha_par = 1;
48543859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR)
48553859Sml29623 cs.bits.hdw.rbr_pre_par = 1;
48563859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT)
48573859Sml29623 cs.bits.hdw.rbr_tmout = 1;
48583859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR)
48593859Sml29623 cs.bits.hdw.rsp_cnt_err = 1;
48603859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS)
48613859Sml29623 cs.bits.hdw.byte_en_bus = 1;
48623859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR)
48633859Sml29623 cs.bits.hdw.rsp_dat_err = 1;
48643859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR)
48653859Sml29623 cs.bits.hdw.config_err = 1;
48663859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON)
48673859Sml29623 cs.bits.hdw.rcrincon = 1;
48683859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL)
48693859Sml29623 cs.bits.hdw.rcrfull = 1;
48703859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL)
48713859Sml29623 cs.bits.hdw.rbrfull = 1;
48723859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE)
48733859Sml29623 cs.bits.hdw.rbrlogpage = 1;
48743859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE)
48753859Sml29623 cs.bits.hdw.cfiglogpage = 1;
48765125Sjoycey #if defined(__i386)
48775125Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n",
48786929Smisaki cs.value);
48795125Sjoycey #else
48803859Sml29623 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n",
48816929Smisaki cs.value);
48825125Sjoycey #endif
48833859Sml29623 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG,
48846929Smisaki chan, cs.value);
48853859Sml29623 break;
48863859Sml29623 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH:
48873859Sml29623 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR:
48883859Sml29623 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR:
48893859Sml29623 cdfs.value = 0;
48903859Sml29623 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH)
48913859Sml29623 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum);
48923859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR)
48933859Sml29623 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum);
48943859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR)
48953859Sml29623 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum);
48965125Sjoycey #if defined(__i386)
48975125Sjoycey cmn_err(CE_NOTE,
48986929Smisaki "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
48996929Smisaki cdfs.value);
49005125Sjoycey #else
49013859Sml29623 cmn_err(CE_NOTE,
49026929Smisaki "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n",
49036929Smisaki cdfs.value);
49045125Sjoycey #endif
49056495Sspeer NXGE_REG_WR64(nxgep->npi_handle,
49066495Sspeer RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value);
49073859Sml29623 break;
49083859Sml29623 case NXGE_FM_EREPORT_RDMC_DCF_ERR:
49093859Sml29623 break;
49105165Syc148097 case NXGE_FM_EREPORT_RDMC_RCR_ERR:
49113859Sml29623 break;
49123859Sml29623 }
49133859Sml29623 }
49146495Sspeer
49156495Sspeer static void
nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p)49166495Sspeer nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p)
49176495Sspeer {
49186495Sspeer rxring_info_t *ring_info;
49196495Sspeer int index;
49206495Sspeer uint32_t chunk_size;
49216495Sspeer uint64_t kaddr;
49226495Sspeer uint_t num_blocks;
49236495Sspeer
49246495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free"));
49256495Sspeer
49266495Sspeer if (rbr_p == NULL) {
49276495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
49286495Sspeer "==> nxge_rxdma_databuf_free: NULL rbr pointer"));
49296495Sspeer return;
49306495Sspeer }
49316495Sspeer
49326495Sspeer if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) {
49339232SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((NULL, DMA_CTL,
49349232SMichael.Speer@Sun.COM "<== nxge_rxdma_databuf_free: DDI"));
49356495Sspeer return;
49366495Sspeer }
49376495Sspeer
49386495Sspeer ring_info = rbr_p->ring_info;
49396495Sspeer if (ring_info == NULL) {
49406495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
49416495Sspeer "==> nxge_rxdma_databuf_free: NULL ring info"));
49426495Sspeer return;
49436495Sspeer }
49446495Sspeer num_blocks = rbr_p->num_blocks;
49456495Sspeer for (index = 0; index < num_blocks; index++) {
49466495Sspeer kaddr = ring_info->buffer[index].kaddr;
49476495Sspeer chunk_size = ring_info->buffer[index].buf_size;
49486495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL,
49496495Sspeer "==> nxge_rxdma_databuf_free: free chunk %d "
49506495Sspeer "kaddrp $%p chunk size %d",
49516495Sspeer index, kaddr, chunk_size));
49526495Sspeer if (kaddr == NULL) continue;
49536495Sspeer nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size);
49546495Sspeer ring_info->buffer[index].kaddr = NULL;
49556495Sspeer }
49566495Sspeer
49576495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free"));
49586495Sspeer }
49596495Sspeer
49606495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
49616495Sspeer extern void contig_mem_free(void *, size_t);
49626495Sspeer #endif
49636495Sspeer
49646495Sspeer void
nxge_free_buf(buf_alloc_type_t alloc_type,uint64_t kaddr,uint32_t buf_size)49656495Sspeer nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size)
49666495Sspeer {
49676495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf"));
49686495Sspeer
49696495Sspeer if (kaddr == NULL || !buf_size) {
49706495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
49716495Sspeer "==> nxge_free_buf: invalid kaddr $%p size to free %d",
49726495Sspeer kaddr, buf_size));
49736495Sspeer return;
49746495Sspeer }
49756495Sspeer
49766495Sspeer switch (alloc_type) {
49776495Sspeer case KMEM_ALLOC:
49786495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL,
49796495Sspeer "==> nxge_free_buf: freeing kmem $%p size %d",
49806495Sspeer kaddr, buf_size));
49816495Sspeer #if defined(__i386)
49826495Sspeer KMEM_FREE((void *)(uint32_t)kaddr, buf_size);
49836495Sspeer #else
49846495Sspeer KMEM_FREE((void *)kaddr, buf_size);
49856495Sspeer #endif
49866495Sspeer break;
49876495Sspeer
49886495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND)
49896495Sspeer case CONTIG_MEM_ALLOC:
49906495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL,
49916495Sspeer "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d",
49926495Sspeer kaddr, buf_size));
49936495Sspeer contig_mem_free((void *)kaddr, buf_size);
49946495Sspeer break;
49956495Sspeer #endif
49966495Sspeer
49976495Sspeer default:
49986495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
49996495Sspeer "<== nxge_free_buf: unsupported alloc type %d",
50006495Sspeer alloc_type));
50016495Sspeer return;
50026495Sspeer }
50036495Sspeer
50046495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf"));
50056495Sspeer }
5006