13859Sml29623 /* 23859Sml29623 * CDDL HEADER START 33859Sml29623 * 43859Sml29623 * The contents of this file are subject to the terms of the 53859Sml29623 * Common Development and Distribution License (the "License"). 63859Sml29623 * You may not use this file except in compliance with the License. 73859Sml29623 * 83859Sml29623 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 93859Sml29623 * or http://www.opensolaris.org/os/licensing. 103859Sml29623 * See the License for the specific language governing permissions 113859Sml29623 * and limitations under the License. 123859Sml29623 * 133859Sml29623 * When distributing Covered Code, include this CDDL HEADER in each 143859Sml29623 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 153859Sml29623 * If applicable, add the following below this CDDL HEADER, with the 163859Sml29623 * fields enclosed by brackets "[]" replaced with your own identifying 173859Sml29623 * information: Portions Copyright [yyyy] [name of copyright owner] 183859Sml29623 * 193859Sml29623 * CDDL HEADER END 203859Sml29623 */ 213859Sml29623 /* 22*8661SSantwona.Behera@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 233859Sml29623 * Use is subject to license terms. 243859Sml29623 */ 253859Sml29623 263859Sml29623 #include <sys/nxge/nxge_impl.h> 273859Sml29623 #include <sys/nxge/nxge_rxdma.h> 286495Sspeer #include <sys/nxge/nxge_hio.h> 296495Sspeer 306495Sspeer #if !defined(_BIG_ENDIAN) 316495Sspeer #include <npi_rx_rd32.h> 326495Sspeer #endif 336495Sspeer #include <npi_rx_rd64.h> 346495Sspeer #include <npi_rx_wr64.h> 353859Sml29623 363859Sml29623 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 376495Sspeer (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 383859Sml29623 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 393859Sml29623 (rdc + nxgep->pt_config.hw_config.start_rdc) 403859Sml29623 413859Sml29623 /* 428275SEric Cheng * XXX: This is a tunable to limit the number of packets each interrupt 438275SEric Cheng * handles. 0 (default) means that each interrupt takes as much packets 448275SEric Cheng * as it finds. 458275SEric Cheng */ 468275SEric Cheng extern int nxge_max_intr_pkts; 478275SEric Cheng 488275SEric Cheng /* 493859Sml29623 * Globals: tunable parameters (/etc/system or adb) 503859Sml29623 * 513859Sml29623 */ 523859Sml29623 extern uint32_t nxge_rbr_size; 533859Sml29623 extern uint32_t nxge_rcr_size; 543859Sml29623 extern uint32_t nxge_rbr_spare_size; 553859Sml29623 563859Sml29623 extern uint32_t nxge_mblks_pending; 573859Sml29623 583859Sml29623 /* 593859Sml29623 * Tunable to reduce the amount of time spent in the 603859Sml29623 * ISR doing Rx Processing. 613859Sml29623 */ 623859Sml29623 extern uint32_t nxge_max_rx_pkts; 633859Sml29623 boolean_t nxge_jumbo_enable; 643859Sml29623 65*8661SSantwona.Behera@Sun.COM extern uint16_t nxge_rcr_timeout; 66*8661SSantwona.Behera@Sun.COM extern uint16_t nxge_rcr_threshold; 67*8661SSantwona.Behera@Sun.COM 683859Sml29623 /* 693859Sml29623 * Tunables to manage the receive buffer blocks. 703859Sml29623 * 713859Sml29623 * nxge_rx_threshold_hi: copy all buffers. 723859Sml29623 * nxge_rx_bcopy_size_type: receive buffer block size type. 733859Sml29623 * nxge_rx_threshold_lo: copy only up to tunable block size type. 743859Sml29623 */ 753859Sml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 763859Sml29623 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 773859Sml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 783859Sml29623 796611Sml29623 extern uint32_t nxge_cksum_offload; 806495Sspeer 816495Sspeer static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 826495Sspeer static void nxge_unmap_rxdma(p_nxge_t, int); 833859Sml29623 843859Sml29623 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 856495Sspeer 866495Sspeer static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 876495Sspeer static void nxge_rxdma_hw_stop(p_nxge_t, int); 883859Sml29623 893859Sml29623 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 903859Sml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 913859Sml29623 uint32_t, 923859Sml29623 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 933859Sml29623 p_rx_mbox_t *); 943859Sml29623 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 953859Sml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 963859Sml29623 973859Sml29623 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 983859Sml29623 uint16_t, 993859Sml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 1003859Sml29623 p_rx_rcr_ring_t *, p_rx_mbox_t *); 1013859Sml29623 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 1023859Sml29623 p_rx_rcr_ring_t, p_rx_mbox_t); 1033859Sml29623 1043859Sml29623 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 1053859Sml29623 uint16_t, 1063859Sml29623 p_nxge_dma_common_t *, 1073859Sml29623 p_rx_rbr_ring_t *, uint32_t); 1083859Sml29623 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 1093859Sml29623 p_rx_rbr_ring_t); 1103859Sml29623 1113859Sml29623 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 1123859Sml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 1133859Sml29623 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 1143859Sml29623 1156495Sspeer static mblk_t * 1166495Sspeer nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 1173859Sml29623 1183859Sml29623 static void nxge_receive_packet(p_nxge_t, 1193859Sml29623 p_rx_rcr_ring_t, 1203859Sml29623 p_rcr_entry_t, 1213859Sml29623 boolean_t *, 1223859Sml29623 mblk_t **, mblk_t **); 1233859Sml29623 1243859Sml29623 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 1253859Sml29623 1263859Sml29623 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 1273859Sml29623 static void nxge_freeb(p_rx_msg_t); 1288275SEric Cheng static mblk_t *nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 1296495Sspeer static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 1303859Sml29623 1313859Sml29623 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 1323859Sml29623 uint32_t, uint32_t); 1333859Sml29623 1343859Sml29623 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 1353859Sml29623 p_rx_rbr_ring_t); 1363859Sml29623 1373859Sml29623 1383859Sml29623 static nxge_status_t 1393859Sml29623 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 1403859Sml29623 1413859Sml29623 nxge_status_t 1423859Sml29623 nxge_rx_port_fatal_err_recover(p_nxge_t); 1433859Sml29623 1446495Sspeer static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 1456495Sspeer 1463859Sml29623 nxge_status_t 1473859Sml29623 nxge_init_rxdma_channels(p_nxge_t nxgep) 1483859Sml29623 { 1497950SMichael.Speer@Sun.COM nxge_grp_set_t *set = &nxgep->rx_set; 1508275SEric Cheng int i, count, channel; 1517950SMichael.Speer@Sun.COM nxge_grp_t *group; 1528275SEric Cheng dc_map_t map; 1538275SEric Cheng int dev_gindex; 1543859Sml29623 1553859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 1563859Sml29623 1576495Sspeer if (!isLDOMguest(nxgep)) { 1586495Sspeer if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 1596495Sspeer cmn_err(CE_NOTE, "hw_start_common"); 1606495Sspeer return (NXGE_ERROR); 1616495Sspeer } 1626495Sspeer } 1636495Sspeer 1646495Sspeer /* 1656495Sspeer * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 1666495Sspeer * We only have 8 hardware RDC tables, but we may have 1676495Sspeer * up to 16 logical (software-defined) groups of RDCS, 1686495Sspeer * if we make use of layer 3 & 4 hardware classification. 1696495Sspeer */ 1706495Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 1716495Sspeer if ((1 << i) & set->lg.map) { 1727950SMichael.Speer@Sun.COM group = set->group[i]; 1738275SEric Cheng dev_gindex = 1748275SEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 1758275SEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map; 1766495Sspeer for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 1778275SEric Cheng if ((1 << channel) & map) { 1786495Sspeer if ((nxge_grp_dc_add(nxgep, 1797755SMisaki.Kataoka@Sun.COM group, VP_BOUND_RX, channel))) 1807950SMichael.Speer@Sun.COM goto init_rxdma_channels_exit; 1816495Sspeer } 1826495Sspeer } 1836495Sspeer } 1846495Sspeer if (++count == set->lg.count) 1856495Sspeer break; 1866495Sspeer } 1876495Sspeer 1886495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 1896495Sspeer return (NXGE_OK); 1907950SMichael.Speer@Sun.COM 1917950SMichael.Speer@Sun.COM init_rxdma_channels_exit: 1927950SMichael.Speer@Sun.COM for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 1937950SMichael.Speer@Sun.COM if ((1 << i) & set->lg.map) { 1947950SMichael.Speer@Sun.COM group = set->group[i]; 1958275SEric Cheng dev_gindex = 1968275SEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 1978275SEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map; 1988275SEric Cheng for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 1998275SEric Cheng if ((1 << channel) & map) { 2007950SMichael.Speer@Sun.COM nxge_grp_dc_remove(nxgep, 2018275SEric Cheng VP_BOUND_RX, channel); 2027950SMichael.Speer@Sun.COM } 2037950SMichael.Speer@Sun.COM } 2047950SMichael.Speer@Sun.COM } 2057950SMichael.Speer@Sun.COM if (++count == set->lg.count) 2067950SMichael.Speer@Sun.COM break; 2077950SMichael.Speer@Sun.COM } 2087950SMichael.Speer@Sun.COM 2097950SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 2107950SMichael.Speer@Sun.COM return (NXGE_ERROR); 2116495Sspeer } 2126495Sspeer 2136495Sspeer nxge_status_t 2146495Sspeer nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 2156495Sspeer { 2168400SNicolas.Droux@Sun.COM nxge_status_t status; 2176495Sspeer 2186495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 2196495Sspeer 2206495Sspeer status = nxge_map_rxdma(nxge, channel); 2213859Sml29623 if (status != NXGE_OK) { 2226495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2236495Sspeer "<== nxge_init_rxdma: status 0x%x", status)); 2243859Sml29623 return (status); 2253859Sml29623 } 2263859Sml29623 2278400SNicolas.Droux@Sun.COM #if defined(sun4v) 2288400SNicolas.Droux@Sun.COM if (isLDOMguest(nxge)) { 2298400SNicolas.Droux@Sun.COM /* set rcr_ring */ 2308400SNicolas.Droux@Sun.COM p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel]; 2318400SNicolas.Droux@Sun.COM 2328400SNicolas.Droux@Sun.COM status = nxge_hio_rxdma_bind_intr(nxge, ring, channel); 2338400SNicolas.Droux@Sun.COM if (status != NXGE_OK) { 2348400SNicolas.Droux@Sun.COM nxge_unmap_rxdma(nxge, channel); 2358400SNicolas.Droux@Sun.COM return (status); 2368400SNicolas.Droux@Sun.COM } 2378400SNicolas.Droux@Sun.COM } 2388400SNicolas.Droux@Sun.COM #endif 2398400SNicolas.Droux@Sun.COM 2406495Sspeer status = nxge_rxdma_hw_start(nxge, channel); 2413859Sml29623 if (status != NXGE_OK) { 2426495Sspeer nxge_unmap_rxdma(nxge, channel); 2433859Sml29623 } 2443859Sml29623 2456495Sspeer if (!nxge->statsp->rdc_ksp[channel]) 2466495Sspeer nxge_setup_rdc_kstats(nxge, channel); 2476495Sspeer 2486495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, 2496495Sspeer "<== nxge_init_rxdma_channel: status 0x%x", status)); 2503859Sml29623 2513859Sml29623 return (status); 2523859Sml29623 } 2533859Sml29623 2543859Sml29623 void 2553859Sml29623 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 2563859Sml29623 { 2576495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 2586495Sspeer int rdc; 2596495Sspeer 2603859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 2613859Sml29623 2626495Sspeer if (set->owned.map == 0) { 2636495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2646495Sspeer "nxge_uninit_rxdma_channels: no channels")); 2656495Sspeer return; 2666495Sspeer } 2676495Sspeer 2686495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 2696495Sspeer if ((1 << rdc) & set->owned.map) { 2706495Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 2716495Sspeer } 2726495Sspeer } 2736495Sspeer 2746495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 2756495Sspeer } 2766495Sspeer 2776495Sspeer void 2786495Sspeer nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 2796495Sspeer { 2806495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 2816495Sspeer 2826495Sspeer if (nxgep->statsp->rdc_ksp[channel]) { 2836495Sspeer kstat_delete(nxgep->statsp->rdc_ksp[channel]); 2846495Sspeer nxgep->statsp->rdc_ksp[channel] = 0; 2856495Sspeer } 2866495Sspeer 2876495Sspeer nxge_rxdma_hw_stop(nxgep, channel); 2886495Sspeer nxge_unmap_rxdma(nxgep, channel); 2896495Sspeer 2906495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 2913859Sml29623 } 2923859Sml29623 2933859Sml29623 nxge_status_t 2943859Sml29623 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 2953859Sml29623 { 2963859Sml29623 npi_handle_t handle; 2973859Sml29623 npi_status_t rs = NPI_SUCCESS; 2983859Sml29623 nxge_status_t status = NXGE_OK; 2993859Sml29623 3007812SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 3013859Sml29623 3023859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3033859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 3043859Sml29623 3053859Sml29623 if (rs != NPI_SUCCESS) { 3063859Sml29623 status = NXGE_ERROR | rs; 3073859Sml29623 } 3083859Sml29623 3097812SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 3107812SMichael.Speer@Sun.COM 3113859Sml29623 return (status); 3123859Sml29623 } 3133859Sml29623 3143859Sml29623 void 3153859Sml29623 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 3163859Sml29623 { 3176495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 3186495Sspeer int rdc; 3193859Sml29623 3203859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 3213859Sml29623 3226495Sspeer if (!isLDOMguest(nxgep)) { 3236495Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 3246495Sspeer (void) npi_rxdma_dump_fzc_regs(handle); 3256495Sspeer } 3266495Sspeer 3276495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 3286495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 3296495Sspeer "nxge_rxdma_regs_dump_channels: " 3306495Sspeer "NULL ring pointer(s)")); 3313859Sml29623 return; 3323859Sml29623 } 3336495Sspeer 3346495Sspeer if (set->owned.map == 0) { 3353859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3366495Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 3373859Sml29623 return; 3383859Sml29623 } 3393859Sml29623 3406495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 3416495Sspeer if ((1 << rdc) & set->owned.map) { 3426495Sspeer rx_rbr_ring_t *ring = 3436495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 3446495Sspeer if (ring) { 3456495Sspeer (void) nxge_dump_rxdma_channel(nxgep, rdc); 3466495Sspeer } 3473859Sml29623 } 3483859Sml29623 } 3493859Sml29623 3503859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 3513859Sml29623 } 3523859Sml29623 3533859Sml29623 nxge_status_t 3543859Sml29623 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 3553859Sml29623 { 3563859Sml29623 npi_handle_t handle; 3573859Sml29623 npi_status_t rs = NPI_SUCCESS; 3583859Sml29623 nxge_status_t status = NXGE_OK; 3593859Sml29623 3603859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 3613859Sml29623 3623859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3633859Sml29623 rs = npi_rxdma_dump_rdc_regs(handle, channel); 3643859Sml29623 3653859Sml29623 if (rs != NPI_SUCCESS) { 3663859Sml29623 status = NXGE_ERROR | rs; 3673859Sml29623 } 3683859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 3693859Sml29623 return (status); 3703859Sml29623 } 3713859Sml29623 3723859Sml29623 nxge_status_t 3733859Sml29623 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 3743859Sml29623 p_rx_dma_ent_msk_t mask_p) 3753859Sml29623 { 3763859Sml29623 npi_handle_t handle; 3773859Sml29623 npi_status_t rs = NPI_SUCCESS; 3783859Sml29623 nxge_status_t status = NXGE_OK; 3793859Sml29623 3803859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3816929Smisaki "<== nxge_init_rxdma_channel_event_mask")); 3823859Sml29623 3833859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3843859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 3853859Sml29623 if (rs != NPI_SUCCESS) { 3863859Sml29623 status = NXGE_ERROR | rs; 3873859Sml29623 } 3883859Sml29623 3893859Sml29623 return (status); 3903859Sml29623 } 3913859Sml29623 3923859Sml29623 nxge_status_t 3933859Sml29623 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 3943859Sml29623 p_rx_dma_ctl_stat_t cs_p) 3953859Sml29623 { 3963859Sml29623 npi_handle_t handle; 3973859Sml29623 npi_status_t rs = NPI_SUCCESS; 3983859Sml29623 nxge_status_t status = NXGE_OK; 3993859Sml29623 4003859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 4016929Smisaki "<== nxge_init_rxdma_channel_cntl_stat")); 4023859Sml29623 4033859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4043859Sml29623 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 4053859Sml29623 4063859Sml29623 if (rs != NPI_SUCCESS) { 4073859Sml29623 status = NXGE_ERROR | rs; 4083859Sml29623 } 4093859Sml29623 4103859Sml29623 return (status); 4113859Sml29623 } 4123859Sml29623 4136495Sspeer /* 4146495Sspeer * nxge_rxdma_cfg_rdcgrp_default_rdc 4156495Sspeer * 4166495Sspeer * Set the default RDC for an RDC Group (Table) 4176495Sspeer * 4186495Sspeer * Arguments: 4196495Sspeer * nxgep 4206495Sspeer * rdcgrp The group to modify 4216495Sspeer * rdc The new default RDC. 4226495Sspeer * 4236495Sspeer * Notes: 4246495Sspeer * 4256495Sspeer * NPI/NXGE function calls: 4266495Sspeer * npi_rxdma_cfg_rdc_table_default_rdc() 4276495Sspeer * 4286495Sspeer * Registers accessed: 4296495Sspeer * RDC_TBL_REG: FZC_ZCP + 0x10000 4306495Sspeer * 4316495Sspeer * Context: 4326495Sspeer * Service domain 4336495Sspeer */ 4343859Sml29623 nxge_status_t 4356495Sspeer nxge_rxdma_cfg_rdcgrp_default_rdc( 4366495Sspeer p_nxge_t nxgep, 4376495Sspeer uint8_t rdcgrp, 4386495Sspeer uint8_t rdc) 4393859Sml29623 { 4403859Sml29623 npi_handle_t handle; 4413859Sml29623 npi_status_t rs = NPI_SUCCESS; 4423859Sml29623 p_nxge_dma_pt_cfg_t p_dma_cfgp; 4433859Sml29623 p_nxge_rdc_grp_t rdc_grp_p; 4443859Sml29623 uint8_t actual_rdcgrp, actual_rdc; 4453859Sml29623 4463859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4476929Smisaki " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 4483859Sml29623 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 4493859Sml29623 4503859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4513859Sml29623 4526495Sspeer /* 4536495Sspeer * This has to be rewritten. Do we even allow this anymore? 4546495Sspeer */ 4553859Sml29623 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 4566495Sspeer RDC_MAP_IN(rdc_grp_p->map, rdc); 4576495Sspeer rdc_grp_p->def_rdc = rdc; 4583859Sml29623 4593859Sml29623 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 4603859Sml29623 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 4613859Sml29623 4626495Sspeer rs = npi_rxdma_cfg_rdc_table_default_rdc( 4636929Smisaki handle, actual_rdcgrp, actual_rdc); 4643859Sml29623 4653859Sml29623 if (rs != NPI_SUCCESS) { 4663859Sml29623 return (NXGE_ERROR | rs); 4673859Sml29623 } 4683859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4696929Smisaki " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 4703859Sml29623 return (NXGE_OK); 4713859Sml29623 } 4723859Sml29623 4733859Sml29623 nxge_status_t 4743859Sml29623 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 4753859Sml29623 { 4763859Sml29623 npi_handle_t handle; 4773859Sml29623 4783859Sml29623 uint8_t actual_rdc; 4793859Sml29623 npi_status_t rs = NPI_SUCCESS; 4803859Sml29623 4813859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4826929Smisaki " ==> nxge_rxdma_cfg_port_default_rdc")); 4833859Sml29623 4843859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4856495Sspeer actual_rdc = rdc; /* XXX Hack! */ 4863859Sml29623 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 4873859Sml29623 4883859Sml29623 4893859Sml29623 if (rs != NPI_SUCCESS) { 4903859Sml29623 return (NXGE_ERROR | rs); 4913859Sml29623 } 4923859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4936929Smisaki " <== nxge_rxdma_cfg_port_default_rdc")); 4943859Sml29623 4953859Sml29623 return (NXGE_OK); 4963859Sml29623 } 4973859Sml29623 4983859Sml29623 nxge_status_t 4993859Sml29623 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 5003859Sml29623 uint16_t pkts) 5013859Sml29623 { 5023859Sml29623 npi_status_t rs = NPI_SUCCESS; 5033859Sml29623 npi_handle_t handle; 5043859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 5056929Smisaki " ==> nxge_rxdma_cfg_rcr_threshold")); 5063859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5073859Sml29623 5083859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 5093859Sml29623 5103859Sml29623 if (rs != NPI_SUCCESS) { 5113859Sml29623 return (NXGE_ERROR | rs); 5123859Sml29623 } 5133859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 5143859Sml29623 return (NXGE_OK); 5153859Sml29623 } 5163859Sml29623 5173859Sml29623 nxge_status_t 5183859Sml29623 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 5193859Sml29623 uint16_t tout, uint8_t enable) 5203859Sml29623 { 5213859Sml29623 npi_status_t rs = NPI_SUCCESS; 5223859Sml29623 npi_handle_t handle; 5233859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 5243859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5253859Sml29623 if (enable == 0) { 5263859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 5273859Sml29623 } else { 5283859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 5296929Smisaki tout); 5303859Sml29623 } 5313859Sml29623 5323859Sml29623 if (rs != NPI_SUCCESS) { 5333859Sml29623 return (NXGE_ERROR | rs); 5343859Sml29623 } 5353859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 5363859Sml29623 return (NXGE_OK); 5373859Sml29623 } 5383859Sml29623 5393859Sml29623 nxge_status_t 5403859Sml29623 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 5413859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 5423859Sml29623 { 5433859Sml29623 npi_handle_t handle; 5443859Sml29623 rdc_desc_cfg_t rdc_desc; 5453859Sml29623 p_rcrcfig_b_t cfgb_p; 5463859Sml29623 npi_status_t rs = NPI_SUCCESS; 5473859Sml29623 5483859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 5493859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5503859Sml29623 /* 5513859Sml29623 * Use configuration data composed at init time. 5523859Sml29623 * Write to hardware the receive ring configurations. 5533859Sml29623 */ 5543859Sml29623 rdc_desc.mbox_enable = 1; 5553859Sml29623 rdc_desc.mbox_addr = mbox_p->mbox_addr; 5563859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5576929Smisaki "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 5586929Smisaki mbox_p->mbox_addr, rdc_desc.mbox_addr)); 5593859Sml29623 5603859Sml29623 rdc_desc.rbr_len = rbr_p->rbb_max; 5613859Sml29623 rdc_desc.rbr_addr = rbr_p->rbr_addr; 5623859Sml29623 5633859Sml29623 switch (nxgep->rx_bksize_code) { 5643859Sml29623 case RBR_BKSIZE_4K: 5653859Sml29623 rdc_desc.page_size = SIZE_4KB; 5663859Sml29623 break; 5673859Sml29623 case RBR_BKSIZE_8K: 5683859Sml29623 rdc_desc.page_size = SIZE_8KB; 5693859Sml29623 break; 5703859Sml29623 case RBR_BKSIZE_16K: 5713859Sml29623 rdc_desc.page_size = SIZE_16KB; 5723859Sml29623 break; 5733859Sml29623 case RBR_BKSIZE_32K: 5743859Sml29623 rdc_desc.page_size = SIZE_32KB; 5753859Sml29623 break; 5763859Sml29623 } 5773859Sml29623 5783859Sml29623 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 5793859Sml29623 rdc_desc.valid0 = 1; 5803859Sml29623 5813859Sml29623 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 5823859Sml29623 rdc_desc.valid1 = 1; 5833859Sml29623 5843859Sml29623 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 5853859Sml29623 rdc_desc.valid2 = 1; 5863859Sml29623 5873859Sml29623 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 5883859Sml29623 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 5893859Sml29623 5903859Sml29623 rdc_desc.rcr_len = rcr_p->comp_size; 5913859Sml29623 rdc_desc.rcr_addr = rcr_p->rcr_addr; 5923859Sml29623 5933859Sml29623 cfgb_p = &(rcr_p->rcr_cfgb); 5943859Sml29623 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 5956495Sspeer /* For now, disable this timeout in a guest domain. */ 5966495Sspeer if (isLDOMguest(nxgep)) { 5976495Sspeer rdc_desc.rcr_timeout = 0; 5986495Sspeer rdc_desc.rcr_timeout_enable = 0; 5996495Sspeer } else { 6006495Sspeer rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 6016495Sspeer rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 6026495Sspeer } 6033859Sml29623 6043859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 6056929Smisaki "rbr_len qlen %d pagesize code %d rcr_len %d", 6066929Smisaki rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 6073859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 6086929Smisaki "size 0 %d size 1 %d size 2 %d", 6096929Smisaki rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 6106929Smisaki rbr_p->npi_pkt_buf_size2)); 6113859Sml29623 6123859Sml29623 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 6133859Sml29623 if (rs != NPI_SUCCESS) { 6143859Sml29623 return (NXGE_ERROR | rs); 6153859Sml29623 } 6163859Sml29623 6173859Sml29623 /* 6183859Sml29623 * Enable the timeout and threshold. 6193859Sml29623 */ 6203859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 6216929Smisaki rdc_desc.rcr_threshold); 6223859Sml29623 if (rs != NPI_SUCCESS) { 6233859Sml29623 return (NXGE_ERROR | rs); 6243859Sml29623 } 6253859Sml29623 6263859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 6276929Smisaki rdc_desc.rcr_timeout); 6283859Sml29623 if (rs != NPI_SUCCESS) { 6293859Sml29623 return (NXGE_ERROR | rs); 6303859Sml29623 } 6313859Sml29623 6323859Sml29623 /* Enable the DMA */ 6333859Sml29623 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 6343859Sml29623 if (rs != NPI_SUCCESS) { 6353859Sml29623 return (NXGE_ERROR | rs); 6363859Sml29623 } 6373859Sml29623 6383859Sml29623 /* Kick the DMA engine. */ 6393859Sml29623 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 6403859Sml29623 /* Clear the rbr empty bit */ 6413859Sml29623 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 6423859Sml29623 6433859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 6443859Sml29623 6453859Sml29623 return (NXGE_OK); 6463859Sml29623 } 6473859Sml29623 6483859Sml29623 nxge_status_t 6493859Sml29623 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 6503859Sml29623 { 6513859Sml29623 npi_handle_t handle; 6523859Sml29623 npi_status_t rs = NPI_SUCCESS; 6533859Sml29623 6543859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 6553859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 6563859Sml29623 6573859Sml29623 /* disable the DMA */ 6583859Sml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 6593859Sml29623 if (rs != NPI_SUCCESS) { 6603859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 6616929Smisaki "<== nxge_disable_rxdma_channel:failed (0x%x)", 6626929Smisaki rs)); 6633859Sml29623 return (NXGE_ERROR | rs); 6643859Sml29623 } 6653859Sml29623 6663859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 6673859Sml29623 return (NXGE_OK); 6683859Sml29623 } 6693859Sml29623 6703859Sml29623 nxge_status_t 6713859Sml29623 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 6723859Sml29623 { 6733859Sml29623 npi_handle_t handle; 6743859Sml29623 nxge_status_t status = NXGE_OK; 6753859Sml29623 6763859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 6776929Smisaki "<== nxge_init_rxdma_channel_rcrflush")); 6783859Sml29623 6793859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 6803859Sml29623 npi_rxdma_rdc_rcr_flush(handle, channel); 6813859Sml29623 6823859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 6836929Smisaki "<== nxge_init_rxdma_channel_rcrflsh")); 6843859Sml29623 return (status); 6853859Sml29623 6863859Sml29623 } 6873859Sml29623 6883859Sml29623 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 6893859Sml29623 6903859Sml29623 #define TO_LEFT -1 6913859Sml29623 #define TO_RIGHT 1 6923859Sml29623 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 6933859Sml29623 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 6943859Sml29623 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 6953859Sml29623 #define NO_HINT 0xffffffff 6963859Sml29623 6973859Sml29623 /*ARGSUSED*/ 6983859Sml29623 nxge_status_t 6993859Sml29623 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 7003859Sml29623 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 7013859Sml29623 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 7023859Sml29623 { 7033859Sml29623 int bufsize; 7043859Sml29623 uint64_t pktbuf_pp; 7053859Sml29623 uint64_t dvma_addr; 7063859Sml29623 rxring_info_t *ring_info; 7073859Sml29623 int base_side, end_side; 7083859Sml29623 int r_index, l_index, anchor_index; 7093859Sml29623 int found, search_done; 7103859Sml29623 uint32_t offset, chunk_size, block_size, page_size_mask; 7113859Sml29623 uint32_t chunk_index, block_index, total_index; 7123859Sml29623 int max_iterations, iteration; 7133859Sml29623 rxbuf_index_info_t *bufinfo; 7143859Sml29623 7153859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 7163859Sml29623 7173859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7186929Smisaki "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 7196929Smisaki pkt_buf_addr_pp, 7206929Smisaki pktbufsz_type)); 7215125Sjoycey #if defined(__i386) 7225125Sjoycey pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 7235125Sjoycey #else 7243859Sml29623 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 7255125Sjoycey #endif 7263859Sml29623 7273859Sml29623 switch (pktbufsz_type) { 7283859Sml29623 case 0: 7293859Sml29623 bufsize = rbr_p->pkt_buf_size0; 7303859Sml29623 break; 7313859Sml29623 case 1: 7323859Sml29623 bufsize = rbr_p->pkt_buf_size1; 7333859Sml29623 break; 7343859Sml29623 case 2: 7353859Sml29623 bufsize = rbr_p->pkt_buf_size2; 7363859Sml29623 break; 7373859Sml29623 case RCR_SINGLE_BLOCK: 7383859Sml29623 bufsize = 0; 7393859Sml29623 anchor_index = 0; 7403859Sml29623 break; 7413859Sml29623 default: 7423859Sml29623 return (NXGE_ERROR); 7433859Sml29623 } 7443859Sml29623 7453859Sml29623 if (rbr_p->num_blocks == 1) { 7463859Sml29623 anchor_index = 0; 7473859Sml29623 ring_info = rbr_p->ring_info; 7483859Sml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 7493859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7506929Smisaki "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 7516929Smisaki "buf_pp $%p btype %d anchor_index %d " 7526929Smisaki "bufinfo $%p", 7536929Smisaki pkt_buf_addr_pp, 7546929Smisaki pktbufsz_type, 7556929Smisaki anchor_index, 7566929Smisaki bufinfo)); 7573859Sml29623 7583859Sml29623 goto found_index; 7593859Sml29623 } 7603859Sml29623 7613859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7626929Smisaki "==> nxge_rxbuf_pp_to_vp: " 7636929Smisaki "buf_pp $%p btype %d anchor_index %d", 7646929Smisaki pkt_buf_addr_pp, 7656929Smisaki pktbufsz_type, 7666929Smisaki anchor_index)); 7673859Sml29623 7683859Sml29623 ring_info = rbr_p->ring_info; 7693859Sml29623 found = B_FALSE; 7703859Sml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 7713859Sml29623 iteration = 0; 7723859Sml29623 max_iterations = ring_info->max_iterations; 7733859Sml29623 /* 7743859Sml29623 * First check if this block has been seen 7753859Sml29623 * recently. This is indicated by a hint which 7763859Sml29623 * is initialized when the first buffer of the block 7773859Sml29623 * is seen. The hint is reset when the last buffer of 7783859Sml29623 * the block has been processed. 7793859Sml29623 * As three block sizes are supported, three hints 7803859Sml29623 * are kept. The idea behind the hints is that once 7813859Sml29623 * the hardware uses a block for a buffer of that 7823859Sml29623 * size, it will use it exclusively for that size 7833859Sml29623 * and will use it until it is exhausted. It is assumed 7843859Sml29623 * that there would a single block being used for the same 7853859Sml29623 * buffer sizes at any given time. 7863859Sml29623 */ 7873859Sml29623 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 7883859Sml29623 anchor_index = ring_info->hint[pktbufsz_type]; 7893859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 7903859Sml29623 chunk_size = bufinfo[anchor_index].buf_size; 7913859Sml29623 if ((pktbuf_pp >= dvma_addr) && 7926929Smisaki (pktbuf_pp < (dvma_addr + chunk_size))) { 7933859Sml29623 found = B_TRUE; 7943859Sml29623 /* 7953859Sml29623 * check if this is the last buffer in the block 7963859Sml29623 * If so, then reset the hint for the size; 7973859Sml29623 */ 7983859Sml29623 7993859Sml29623 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 8003859Sml29623 ring_info->hint[pktbufsz_type] = NO_HINT; 8013859Sml29623 } 8023859Sml29623 } 8033859Sml29623 8043859Sml29623 if (found == B_FALSE) { 8053859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8066929Smisaki "==> nxge_rxbuf_pp_to_vp: (!found)" 8076929Smisaki "buf_pp $%p btype %d anchor_index %d", 8086929Smisaki pkt_buf_addr_pp, 8096929Smisaki pktbufsz_type, 8106929Smisaki anchor_index)); 8113859Sml29623 8123859Sml29623 /* 8133859Sml29623 * This is the first buffer of the block of this 8143859Sml29623 * size. Need to search the whole information 8153859Sml29623 * array. 8163859Sml29623 * the search algorithm uses a binary tree search 8173859Sml29623 * algorithm. It assumes that the information is 8183859Sml29623 * already sorted with increasing order 8193859Sml29623 * info[0] < info[1] < info[2] .... < info[n-1] 8203859Sml29623 * where n is the size of the information array 8213859Sml29623 */ 8223859Sml29623 r_index = rbr_p->num_blocks - 1; 8233859Sml29623 l_index = 0; 8243859Sml29623 search_done = B_FALSE; 8253859Sml29623 anchor_index = MID_INDEX(r_index, l_index); 8263859Sml29623 while (search_done == B_FALSE) { 8273859Sml29623 if ((r_index == l_index) || 8286929Smisaki (iteration >= max_iterations)) 8293859Sml29623 search_done = B_TRUE; 8303859Sml29623 end_side = TO_RIGHT; /* to the right */ 8313859Sml29623 base_side = TO_LEFT; /* to the left */ 8323859Sml29623 /* read the DVMA address information and sort it */ 8333859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 8343859Sml29623 chunk_size = bufinfo[anchor_index].buf_size; 8353859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8366929Smisaki "==> nxge_rxbuf_pp_to_vp: (searching)" 8376929Smisaki "buf_pp $%p btype %d " 8386929Smisaki "anchor_index %d chunk_size %d dvmaaddr $%p", 8396929Smisaki pkt_buf_addr_pp, 8406929Smisaki pktbufsz_type, 8416929Smisaki anchor_index, 8426929Smisaki chunk_size, 8436929Smisaki dvma_addr)); 8443859Sml29623 8453859Sml29623 if (pktbuf_pp >= dvma_addr) 8463859Sml29623 base_side = TO_RIGHT; /* to the right */ 8473859Sml29623 if (pktbuf_pp < (dvma_addr + chunk_size)) 8483859Sml29623 end_side = TO_LEFT; /* to the left */ 8493859Sml29623 8503859Sml29623 switch (base_side + end_side) { 8516929Smisaki case IN_MIDDLE: 8526929Smisaki /* found */ 8536929Smisaki found = B_TRUE; 8546929Smisaki search_done = B_TRUE; 8556929Smisaki if ((pktbuf_pp + bufsize) < 8566929Smisaki (dvma_addr + chunk_size)) 8576929Smisaki ring_info->hint[pktbufsz_type] = 8586929Smisaki bufinfo[anchor_index].buf_index; 8596929Smisaki break; 8606929Smisaki case BOTH_RIGHT: 8616929Smisaki /* not found: go to the right */ 8626929Smisaki l_index = anchor_index + 1; 8636929Smisaki anchor_index = MID_INDEX(r_index, l_index); 8646929Smisaki break; 8656929Smisaki 8666929Smisaki case BOTH_LEFT: 8676929Smisaki /* not found: go to the left */ 8686929Smisaki r_index = anchor_index - 1; 8696929Smisaki anchor_index = MID_INDEX(r_index, l_index); 8706929Smisaki break; 8716929Smisaki default: /* should not come here */ 8726929Smisaki return (NXGE_ERROR); 8733859Sml29623 } 8743859Sml29623 iteration++; 8753859Sml29623 } 8763859Sml29623 8773859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8786929Smisaki "==> nxge_rxbuf_pp_to_vp: (search done)" 8796929Smisaki "buf_pp $%p btype %d anchor_index %d", 8806929Smisaki pkt_buf_addr_pp, 8816929Smisaki pktbufsz_type, 8826929Smisaki anchor_index)); 8833859Sml29623 } 8843859Sml29623 8853859Sml29623 if (found == B_FALSE) { 8863859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8876929Smisaki "==> nxge_rxbuf_pp_to_vp: (search failed)" 8886929Smisaki "buf_pp $%p btype %d anchor_index %d", 8896929Smisaki pkt_buf_addr_pp, 8906929Smisaki pktbufsz_type, 8916929Smisaki anchor_index)); 8923859Sml29623 return (NXGE_ERROR); 8933859Sml29623 } 8943859Sml29623 8953859Sml29623 found_index: 8963859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8976929Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 8986929Smisaki "buf_pp $%p btype %d bufsize %d anchor_index %d", 8996929Smisaki pkt_buf_addr_pp, 9006929Smisaki pktbufsz_type, 9016929Smisaki bufsize, 9026929Smisaki anchor_index)); 9033859Sml29623 9043859Sml29623 /* index of the first block in this chunk */ 9053859Sml29623 chunk_index = bufinfo[anchor_index].start_index; 9063859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 9073859Sml29623 page_size_mask = ring_info->block_size_mask; 9083859Sml29623 9093859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9106929Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 9116929Smisaki "buf_pp $%p btype %d bufsize %d " 9126929Smisaki "anchor_index %d chunk_index %d dvma $%p", 9136929Smisaki pkt_buf_addr_pp, 9146929Smisaki pktbufsz_type, 9156929Smisaki bufsize, 9166929Smisaki anchor_index, 9176929Smisaki chunk_index, 9186929Smisaki dvma_addr)); 9193859Sml29623 9203859Sml29623 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 9213859Sml29623 block_size = rbr_p->block_size; /* System block(page) size */ 9223859Sml29623 9233859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9246929Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 9256929Smisaki "buf_pp $%p btype %d bufsize %d " 9266929Smisaki "anchor_index %d chunk_index %d dvma $%p " 9276929Smisaki "offset %d block_size %d", 9286929Smisaki pkt_buf_addr_pp, 9296929Smisaki pktbufsz_type, 9306929Smisaki bufsize, 9316929Smisaki anchor_index, 9326929Smisaki chunk_index, 9336929Smisaki dvma_addr, 9346929Smisaki offset, 9356929Smisaki block_size)); 9363859Sml29623 9373859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 9383859Sml29623 9393859Sml29623 block_index = (offset / block_size); /* index within chunk */ 9403859Sml29623 total_index = chunk_index + block_index; 9413859Sml29623 9423859Sml29623 9433859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9446929Smisaki "==> nxge_rxbuf_pp_to_vp: " 9456929Smisaki "total_index %d dvma_addr $%p " 9466929Smisaki "offset %d block_size %d " 9476929Smisaki "block_index %d ", 9486929Smisaki total_index, dvma_addr, 9496929Smisaki offset, block_size, 9506929Smisaki block_index)); 9515125Sjoycey #if defined(__i386) 9525125Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 9536929Smisaki (uint32_t)offset); 9545125Sjoycey #else 9555125Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 9566929Smisaki (uint64_t)offset); 9575125Sjoycey #endif 9583859Sml29623 9593859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9606929Smisaki "==> nxge_rxbuf_pp_to_vp: " 9616929Smisaki "total_index %d dvma_addr $%p " 9626929Smisaki "offset %d block_size %d " 9636929Smisaki "block_index %d " 9646929Smisaki "*pkt_buf_addr_p $%p", 9656929Smisaki total_index, dvma_addr, 9666929Smisaki offset, block_size, 9676929Smisaki block_index, 9686929Smisaki *pkt_buf_addr_p)); 9693859Sml29623 9703859Sml29623 9713859Sml29623 *msg_index = total_index; 9723859Sml29623 *bufoffset = (offset & page_size_mask); 9733859Sml29623 9743859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9756929Smisaki "==> nxge_rxbuf_pp_to_vp: get msg index: " 9766929Smisaki "msg_index %d bufoffset_index %d", 9776929Smisaki *msg_index, 9786929Smisaki *bufoffset)); 9793859Sml29623 9803859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 9813859Sml29623 9823859Sml29623 return (NXGE_OK); 9833859Sml29623 } 9843859Sml29623 9853859Sml29623 /* 9863859Sml29623 * used by quick sort (qsort) function 9873859Sml29623 * to perform comparison 9883859Sml29623 */ 9893859Sml29623 static int 9903859Sml29623 nxge_sort_compare(const void *p1, const void *p2) 9913859Sml29623 { 9923859Sml29623 9933859Sml29623 rxbuf_index_info_t *a, *b; 9943859Sml29623 9953859Sml29623 a = (rxbuf_index_info_t *)p1; 9963859Sml29623 b = (rxbuf_index_info_t *)p2; 9973859Sml29623 9983859Sml29623 if (a->dvma_addr > b->dvma_addr) 9993859Sml29623 return (1); 10003859Sml29623 if (a->dvma_addr < b->dvma_addr) 10013859Sml29623 return (-1); 10023859Sml29623 return (0); 10033859Sml29623 } 10043859Sml29623 10053859Sml29623 10063859Sml29623 10073859Sml29623 /* 10083859Sml29623 * grabbed this sort implementation from common/syscall/avl.c 10093859Sml29623 * 10103859Sml29623 */ 10113859Sml29623 /* 10123859Sml29623 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 10133859Sml29623 * v = Ptr to array/vector of objs 10143859Sml29623 * n = # objs in the array 10153859Sml29623 * s = size of each obj (must be multiples of a word size) 10163859Sml29623 * f = ptr to function to compare two objs 10173859Sml29623 * returns (-1 = less than, 0 = equal, 1 = greater than 10183859Sml29623 */ 10193859Sml29623 void 10203859Sml29623 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 10213859Sml29623 { 10223859Sml29623 int g, i, j, ii; 10233859Sml29623 unsigned int *p1, *p2; 10243859Sml29623 unsigned int tmp; 10253859Sml29623 10263859Sml29623 /* No work to do */ 10273859Sml29623 if (v == NULL || n <= 1) 10283859Sml29623 return; 10293859Sml29623 /* Sanity check on arguments */ 10303859Sml29623 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 10313859Sml29623 ASSERT(s > 0); 10323859Sml29623 10333859Sml29623 for (g = n / 2; g > 0; g /= 2) { 10343859Sml29623 for (i = g; i < n; i++) { 10353859Sml29623 for (j = i - g; j >= 0 && 10366929Smisaki (*f)(v + j * s, v + (j + g) * s) == 1; 10376929Smisaki j -= g) { 10383859Sml29623 p1 = (unsigned *)(v + j * s); 10393859Sml29623 p2 = (unsigned *)(v + (j + g) * s); 10403859Sml29623 for (ii = 0; ii < s / 4; ii++) { 10413859Sml29623 tmp = *p1; 10423859Sml29623 *p1++ = *p2; 10433859Sml29623 *p2++ = tmp; 10443859Sml29623 } 10453859Sml29623 } 10463859Sml29623 } 10473859Sml29623 } 10483859Sml29623 } 10493859Sml29623 10503859Sml29623 /* 10513859Sml29623 * Initialize data structures required for rxdma 10523859Sml29623 * buffer dvma->vmem address lookup 10533859Sml29623 */ 10543859Sml29623 /*ARGSUSED*/ 10553859Sml29623 static nxge_status_t 10563859Sml29623 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 10573859Sml29623 { 10583859Sml29623 10593859Sml29623 int index; 10603859Sml29623 rxring_info_t *ring_info; 10613859Sml29623 int max_iteration = 0, max_index = 0; 10623859Sml29623 10633859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 10643859Sml29623 10653859Sml29623 ring_info = rbrp->ring_info; 10663859Sml29623 ring_info->hint[0] = NO_HINT; 10673859Sml29623 ring_info->hint[1] = NO_HINT; 10683859Sml29623 ring_info->hint[2] = NO_HINT; 10693859Sml29623 max_index = rbrp->num_blocks; 10703859Sml29623 10713859Sml29623 /* read the DVMA address information and sort it */ 10723859Sml29623 /* do init of the information array */ 10733859Sml29623 10743859Sml29623 10753859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10766929Smisaki " nxge_rxbuf_index_info_init Sort ptrs")); 10773859Sml29623 10783859Sml29623 /* sort the array */ 10793859Sml29623 nxge_ksort((void *)ring_info->buffer, max_index, 10806929Smisaki sizeof (rxbuf_index_info_t), nxge_sort_compare); 10813859Sml29623 10823859Sml29623 10833859Sml29623 10843859Sml29623 for (index = 0; index < max_index; index++) { 10853859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10866929Smisaki " nxge_rxbuf_index_info_init: sorted chunk %d " 10876929Smisaki " ioaddr $%p kaddr $%p size %x", 10886929Smisaki index, ring_info->buffer[index].dvma_addr, 10896929Smisaki ring_info->buffer[index].kaddr, 10906929Smisaki ring_info->buffer[index].buf_size)); 10913859Sml29623 } 10923859Sml29623 10933859Sml29623 max_iteration = 0; 10943859Sml29623 while (max_index >= (1ULL << max_iteration)) 10953859Sml29623 max_iteration++; 10963859Sml29623 ring_info->max_iterations = max_iteration + 1; 10973859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10986929Smisaki " nxge_rxbuf_index_info_init Find max iter %d", 10996929Smisaki ring_info->max_iterations)); 11003859Sml29623 11013859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 11023859Sml29623 return (NXGE_OK); 11033859Sml29623 } 11043859Sml29623 11053859Sml29623 /* ARGSUSED */ 11063859Sml29623 void 11073859Sml29623 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 11083859Sml29623 { 11093859Sml29623 #ifdef NXGE_DEBUG 11103859Sml29623 11113859Sml29623 uint32_t bptr; 11123859Sml29623 uint64_t pp; 11133859Sml29623 11143859Sml29623 bptr = entry_p->bits.hdw.pkt_buf_addr; 11153859Sml29623 11163859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11176929Smisaki "\trcr entry $%p " 11186929Smisaki "\trcr entry 0x%0llx " 11196929Smisaki "\trcr entry 0x%08x " 11206929Smisaki "\trcr entry 0x%08x " 11216929Smisaki "\tvalue 0x%0llx\n" 11226929Smisaki "\tmulti = %d\n" 11236929Smisaki "\tpkt_type = 0x%x\n" 11246929Smisaki "\tzero_copy = %d\n" 11256929Smisaki "\tnoport = %d\n" 11266929Smisaki "\tpromis = %d\n" 11276929Smisaki "\terror = 0x%04x\n" 11286929Smisaki "\tdcf_err = 0x%01x\n" 11296929Smisaki "\tl2_len = %d\n" 11306929Smisaki "\tpktbufsize = %d\n" 11316929Smisaki "\tpkt_buf_addr = $%p\n" 11326929Smisaki "\tpkt_buf_addr (<< 6) = $%p\n", 11336929Smisaki entry_p, 11346929Smisaki *(int64_t *)entry_p, 11356929Smisaki *(int32_t *)entry_p, 11366929Smisaki *(int32_t *)((char *)entry_p + 32), 11376929Smisaki entry_p->value, 11386929Smisaki entry_p->bits.hdw.multi, 11396929Smisaki entry_p->bits.hdw.pkt_type, 11406929Smisaki entry_p->bits.hdw.zero_copy, 11416929Smisaki entry_p->bits.hdw.noport, 11426929Smisaki entry_p->bits.hdw.promis, 11436929Smisaki entry_p->bits.hdw.error, 11446929Smisaki entry_p->bits.hdw.dcf_err, 11456929Smisaki entry_p->bits.hdw.l2_len, 11466929Smisaki entry_p->bits.hdw.pktbufsz, 11476929Smisaki bptr, 11486929Smisaki entry_p->bits.ldw.pkt_buf_addr)); 11493859Sml29623 11503859Sml29623 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 11516929Smisaki RCR_PKT_BUF_ADDR_SHIFT; 11523859Sml29623 11533859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 11546929Smisaki pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 11553859Sml29623 #endif 11563859Sml29623 } 11573859Sml29623 11583859Sml29623 void 11593859Sml29623 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 11603859Sml29623 { 11613859Sml29623 npi_handle_t handle; 11623859Sml29623 rbr_stat_t rbr_stat; 11633859Sml29623 addr44_t hd_addr; 11643859Sml29623 addr44_t tail_addr; 11653859Sml29623 uint16_t qlen; 11663859Sml29623 11673859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11686929Smisaki "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 11693859Sml29623 11703859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 11713859Sml29623 11723859Sml29623 /* RBR head */ 11733859Sml29623 hd_addr.addr = 0; 11743859Sml29623 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 11755165Syc148097 #if defined(__i386) 11763859Sml29623 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 11776929Smisaki (void *)(uint32_t)hd_addr.addr); 11785125Sjoycey #else 11795165Syc148097 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 11806929Smisaki (void *)hd_addr.addr); 11815125Sjoycey #endif 11823859Sml29623 11833859Sml29623 /* RBR stats */ 11843859Sml29623 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 11853859Sml29623 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 11863859Sml29623 11873859Sml29623 /* RCR tail */ 11883859Sml29623 tail_addr.addr = 0; 11893859Sml29623 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 11905165Syc148097 #if defined(__i386) 11913859Sml29623 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 11926929Smisaki (void *)(uint32_t)tail_addr.addr); 11935125Sjoycey #else 11945165Syc148097 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 11956929Smisaki (void *)tail_addr.addr); 11965125Sjoycey #endif 11973859Sml29623 11983859Sml29623 /* RCR qlen */ 11993859Sml29623 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 12003859Sml29623 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 12013859Sml29623 12023859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12036929Smisaki "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 12043859Sml29623 } 12053859Sml29623 12063859Sml29623 nxge_status_t 12073859Sml29623 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 12083859Sml29623 { 12096495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 12106495Sspeer nxge_status_t status; 12116495Sspeer npi_status_t rs; 12126495Sspeer int rdc; 12133859Sml29623 12143859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12156929Smisaki "==> nxge_rxdma_hw_mode: mode %d", enable)); 12163859Sml29623 12173859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 12183859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12196495Sspeer "<== nxge_rxdma_mode: not initialized")); 12203859Sml29623 return (NXGE_ERROR); 12213859Sml29623 } 12226495Sspeer 12236495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 12246495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 12256495Sspeer "<== nxge_tx_port_fatal_err_recover: " 12266495Sspeer "NULL ring pointer(s)")); 12273859Sml29623 return (NXGE_ERROR); 12283859Sml29623 } 12293859Sml29623 12306495Sspeer if (set->owned.map == 0) { 12316495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 12326495Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 12336495Sspeer return (NULL); 12346495Sspeer } 12356495Sspeer 12366495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 12376495Sspeer if ((1 << rdc) & set->owned.map) { 12386495Sspeer rx_rbr_ring_t *ring = 12396495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 12406495Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 12416495Sspeer if (ring) { 12426495Sspeer if (enable) { 12436495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12446495Sspeer "==> nxge_rxdma_hw_mode: " 12456495Sspeer "channel %d (enable)", rdc)); 12466495Sspeer rs = npi_rxdma_cfg_rdc_enable 12476495Sspeer (handle, rdc); 12486495Sspeer } else { 12496495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12506495Sspeer "==> nxge_rxdma_hw_mode: " 12516495Sspeer "channel %d disable)", rdc)); 12526495Sspeer rs = npi_rxdma_cfg_rdc_disable 12536495Sspeer (handle, rdc); 12546495Sspeer } 12556495Sspeer } 12563859Sml29623 } 12573859Sml29623 } 12583859Sml29623 12593859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 12603859Sml29623 12613859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12626929Smisaki "<== nxge_rxdma_hw_mode: status 0x%x", status)); 12633859Sml29623 12643859Sml29623 return (status); 12653859Sml29623 } 12663859Sml29623 12673859Sml29623 void 12683859Sml29623 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 12693859Sml29623 { 12703859Sml29623 npi_handle_t handle; 12713859Sml29623 12723859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 12736929Smisaki "==> nxge_rxdma_enable_channel: channel %d", channel)); 12743859Sml29623 12753859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 12763859Sml29623 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 12773859Sml29623 12783859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 12793859Sml29623 } 12803859Sml29623 12813859Sml29623 void 12823859Sml29623 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 12833859Sml29623 { 12843859Sml29623 npi_handle_t handle; 12853859Sml29623 12863859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 12876929Smisaki "==> nxge_rxdma_disable_channel: channel %d", channel)); 12883859Sml29623 12893859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 12903859Sml29623 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 12913859Sml29623 12923859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 12933859Sml29623 } 12943859Sml29623 12953859Sml29623 void 12963859Sml29623 nxge_hw_start_rx(p_nxge_t nxgep) 12973859Sml29623 { 12983859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 12993859Sml29623 13003859Sml29623 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 13013859Sml29623 (void) nxge_rx_mac_enable(nxgep); 13023859Sml29623 13033859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 13043859Sml29623 } 13053859Sml29623 13063859Sml29623 /*ARGSUSED*/ 13073859Sml29623 void 13083859Sml29623 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 13093859Sml29623 { 13106495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 13116495Sspeer int rdc; 13123859Sml29623 13133859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 13143859Sml29623 13156495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 13166495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 13176495Sspeer "<== nxge_tx_port_fatal_err_recover: " 13186495Sspeer "NULL ring pointer(s)")); 13193859Sml29623 return; 13203859Sml29623 } 13213859Sml29623 13226495Sspeer if (set->owned.map == 0) { 13233859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13246495Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 13253859Sml29623 return; 13263859Sml29623 } 13276495Sspeer 13286495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 13296495Sspeer if ((1 << rdc) & set->owned.map) { 13306495Sspeer rx_rbr_ring_t *ring = 13316495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 13326495Sspeer if (ring) { 13336495Sspeer nxge_rxdma_hw_stop(nxgep, rdc); 13346495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 13356929Smisaki "==> nxge_fixup_rxdma_rings: " 13366929Smisaki "channel %d ring $%px", 13376929Smisaki rdc, ring)); 13386495Sspeer (void) nxge_rxdma_fixup_channel 13396495Sspeer (nxgep, rdc, rdc); 13406495Sspeer } 13416495Sspeer } 13423859Sml29623 } 13433859Sml29623 13443859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 13453859Sml29623 } 13463859Sml29623 13473859Sml29623 void 13483859Sml29623 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 13493859Sml29623 { 13503859Sml29623 int i; 13513859Sml29623 13523859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 13533859Sml29623 i = nxge_rxdma_get_ring_index(nxgep, channel); 13543859Sml29623 if (i < 0) { 13553859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13566929Smisaki "<== nxge_rxdma_fix_channel: no entry found")); 13573859Sml29623 return; 13583859Sml29623 } 13593859Sml29623 13603859Sml29623 nxge_rxdma_fixup_channel(nxgep, channel, i); 13613859Sml29623 13626495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 13633859Sml29623 } 13643859Sml29623 13653859Sml29623 void 13663859Sml29623 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 13673859Sml29623 { 13683859Sml29623 int ndmas; 13693859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 13703859Sml29623 p_rx_rbr_ring_t *rbr_rings; 13713859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 13723859Sml29623 p_rx_rcr_ring_t *rcr_rings; 13733859Sml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 13743859Sml29623 p_rx_mbox_t *rx_mbox_p; 13753859Sml29623 p_nxge_dma_pool_t dma_buf_poolp; 13763859Sml29623 p_nxge_dma_pool_t dma_cntl_poolp; 13773859Sml29623 p_rx_rbr_ring_t rbrp; 13783859Sml29623 p_rx_rcr_ring_t rcrp; 13793859Sml29623 p_rx_mbox_t mboxp; 13803859Sml29623 p_nxge_dma_common_t dmap; 13813859Sml29623 nxge_status_t status = NXGE_OK; 13823859Sml29623 13833859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 13843859Sml29623 13853859Sml29623 (void) nxge_rxdma_stop_channel(nxgep, channel); 13863859Sml29623 13873859Sml29623 dma_buf_poolp = nxgep->rx_buf_pool_p; 13883859Sml29623 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 13893859Sml29623 13903859Sml29623 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 13913859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 13926929Smisaki "<== nxge_rxdma_fixup_channel: buf not allocated")); 13933859Sml29623 return; 13943859Sml29623 } 13953859Sml29623 13963859Sml29623 ndmas = dma_buf_poolp->ndmas; 13973859Sml29623 if (!ndmas) { 13983859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 13996929Smisaki "<== nxge_rxdma_fixup_channel: no dma allocated")); 14003859Sml29623 return; 14013859Sml29623 } 14023859Sml29623 14033859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 14043859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 14053859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 14063859Sml29623 rcr_rings = rx_rcr_rings->rcr_rings; 14073859Sml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 14083859Sml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 14093859Sml29623 14103859Sml29623 /* Reinitialize the receive block and completion rings */ 14113859Sml29623 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 14126929Smisaki rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 14136929Smisaki mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 14143859Sml29623 14153859Sml29623 14163859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 14173859Sml29623 rbrp->rbr_rd_index = 0; 14183859Sml29623 rcrp->comp_rd_index = 0; 14193859Sml29623 rcrp->comp_wt_index = 0; 14203859Sml29623 14213859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 14223859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 14233859Sml29623 14243859Sml29623 status = nxge_rxdma_start_channel(nxgep, channel, 14256929Smisaki rbrp, rcrp, mboxp); 14263859Sml29623 if (status != NXGE_OK) { 14273859Sml29623 goto nxge_rxdma_fixup_channel_fail; 14283859Sml29623 } 14293859Sml29623 if (status != NXGE_OK) { 14303859Sml29623 goto nxge_rxdma_fixup_channel_fail; 14313859Sml29623 } 14323859Sml29623 14333859Sml29623 nxge_rxdma_fixup_channel_fail: 14343859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14356929Smisaki "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 14363859Sml29623 14373859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 14383859Sml29623 } 14393859Sml29623 14408275SEric Cheng /* 14418275SEric Cheng * Convert an absolute RDC number to a Receive Buffer Ring index. That is, 14428275SEric Cheng * map <channel> to an index into nxgep->rx_rbr_rings. 14438275SEric Cheng * (device ring index -> port ring index) 14448275SEric Cheng */ 14453859Sml29623 int 14463859Sml29623 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 14473859Sml29623 { 14488275SEric Cheng int i, ndmas; 14498275SEric Cheng uint16_t rdc; 14508275SEric Cheng p_rx_rbr_rings_t rx_rbr_rings; 14518275SEric Cheng p_rx_rbr_ring_t *rbr_rings; 14528275SEric Cheng 14538275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14548275SEric Cheng "==> nxge_rxdma_get_ring_index: channel %d", channel)); 14558275SEric Cheng 14568275SEric Cheng rx_rbr_rings = nxgep->rx_rbr_rings; 14578275SEric Cheng if (rx_rbr_rings == NULL) { 14588275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14598275SEric Cheng "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 14608275SEric Cheng return (-1); 14618275SEric Cheng } 14628275SEric Cheng ndmas = rx_rbr_rings->ndmas; 14638275SEric Cheng if (!ndmas) { 14648275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14658275SEric Cheng "<== nxge_rxdma_get_ring_index: no channel")); 14668275SEric Cheng return (-1); 14678275SEric Cheng } 14688275SEric Cheng 14698275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14708275SEric Cheng "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 14718275SEric Cheng 14728275SEric Cheng rbr_rings = rx_rbr_rings->rbr_rings; 14738275SEric Cheng for (i = 0; i < ndmas; i++) { 14748275SEric Cheng rdc = rbr_rings[i]->rdc; 14758275SEric Cheng if (channel == rdc) { 14768275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14778275SEric Cheng "==> nxge_rxdma_get_rbr_ring: channel %d " 14788275SEric Cheng "(index %d) ring %d", channel, i, rbr_rings[i])); 14798275SEric Cheng return (i); 14808275SEric Cheng } 14818275SEric Cheng } 14828275SEric Cheng 14838275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14848275SEric Cheng "<== nxge_rxdma_get_rbr_ring_index: not found")); 14858275SEric Cheng 14868275SEric Cheng return (-1); 14873859Sml29623 } 14883859Sml29623 14893859Sml29623 p_rx_rbr_ring_t 14903859Sml29623 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 14913859Sml29623 { 14926495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 14936495Sspeer nxge_channel_t rdc; 14943859Sml29623 14953859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14966929Smisaki "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 14973859Sml29623 14986495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 14996495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 15006495Sspeer "<== nxge_rxdma_get_rbr_ring: " 15016495Sspeer "NULL ring pointer(s)")); 15023859Sml29623 return (NULL); 15033859Sml29623 } 15046495Sspeer 15056495Sspeer if (set->owned.map == 0) { 15063859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15076495Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 15083859Sml29623 return (NULL); 15093859Sml29623 } 15103859Sml29623 15116495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 15126495Sspeer if ((1 << rdc) & set->owned.map) { 15136495Sspeer rx_rbr_ring_t *ring = 15146495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 15156495Sspeer if (ring) { 15166495Sspeer if (channel == ring->rdc) { 15176495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 15186495Sspeer "==> nxge_rxdma_get_rbr_ring: " 15196495Sspeer "channel %d ring $%p", rdc, ring)); 15206495Sspeer return (ring); 15216495Sspeer } 15226495Sspeer } 15233859Sml29623 } 15243859Sml29623 } 15253859Sml29623 15263859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15276929Smisaki "<== nxge_rxdma_get_rbr_ring: not found")); 15283859Sml29623 15293859Sml29623 return (NULL); 15303859Sml29623 } 15313859Sml29623 15323859Sml29623 p_rx_rcr_ring_t 15333859Sml29623 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 15343859Sml29623 { 15356495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 15366495Sspeer nxge_channel_t rdc; 15373859Sml29623 15383859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15396929Smisaki "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 15403859Sml29623 15416495Sspeer if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 15426495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 15436495Sspeer "<== nxge_rxdma_get_rcr_ring: " 15446495Sspeer "NULL ring pointer(s)")); 15453859Sml29623 return (NULL); 15463859Sml29623 } 15476495Sspeer 15486495Sspeer if (set->owned.map == 0) { 15493859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15506495Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 15513859Sml29623 return (NULL); 15523859Sml29623 } 15533859Sml29623 15546495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 15556495Sspeer if ((1 << rdc) & set->owned.map) { 15566495Sspeer rx_rcr_ring_t *ring = 15576495Sspeer nxgep->rx_rcr_rings->rcr_rings[rdc]; 15586495Sspeer if (ring) { 15596495Sspeer if (channel == ring->rdc) { 15606495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 15616495Sspeer "==> nxge_rxdma_get_rcr_ring: " 15626495Sspeer "channel %d ring $%p", rdc, ring)); 15636495Sspeer return (ring); 15646495Sspeer } 15656495Sspeer } 15663859Sml29623 } 15673859Sml29623 } 15683859Sml29623 15693859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15706929Smisaki "<== nxge_rxdma_get_rcr_ring: not found")); 15713859Sml29623 15723859Sml29623 return (NULL); 15733859Sml29623 } 15743859Sml29623 15753859Sml29623 /* 15763859Sml29623 * Static functions start here. 15773859Sml29623 */ 15783859Sml29623 static p_rx_msg_t 15793859Sml29623 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 15803859Sml29623 { 15813859Sml29623 p_rx_msg_t nxge_mp = NULL; 15823859Sml29623 p_nxge_dma_common_t dmamsg_p; 15833859Sml29623 uchar_t *buffer; 15843859Sml29623 15853859Sml29623 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 15863859Sml29623 if (nxge_mp == NULL) { 15874185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 15886929Smisaki "Allocation of a rx msg failed.")); 15893859Sml29623 goto nxge_allocb_exit; 15903859Sml29623 } 15913859Sml29623 15923859Sml29623 nxge_mp->use_buf_pool = B_FALSE; 15933859Sml29623 if (dmabuf_p) { 15943859Sml29623 nxge_mp->use_buf_pool = B_TRUE; 15953859Sml29623 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 15963859Sml29623 *dmamsg_p = *dmabuf_p; 15973859Sml29623 dmamsg_p->nblocks = 1; 15983859Sml29623 dmamsg_p->block_size = size; 15993859Sml29623 dmamsg_p->alength = size; 16003859Sml29623 buffer = (uchar_t *)dmabuf_p->kaddrp; 16013859Sml29623 16023859Sml29623 dmabuf_p->kaddrp = (void *) 16036929Smisaki ((char *)dmabuf_p->kaddrp + size); 16043859Sml29623 dmabuf_p->ioaddr_pp = (void *) 16056929Smisaki ((char *)dmabuf_p->ioaddr_pp + size); 16063859Sml29623 dmabuf_p->alength -= size; 16073859Sml29623 dmabuf_p->offset += size; 16083859Sml29623 dmabuf_p->dma_cookie.dmac_laddress += size; 16093859Sml29623 dmabuf_p->dma_cookie.dmac_size -= size; 16103859Sml29623 16113859Sml29623 } else { 16123859Sml29623 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 16133859Sml29623 if (buffer == NULL) { 16144185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 16156929Smisaki "Allocation of a receive page failed.")); 16163859Sml29623 goto nxge_allocb_fail1; 16173859Sml29623 } 16183859Sml29623 } 16193859Sml29623 16203859Sml29623 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 16213859Sml29623 if (nxge_mp->rx_mblk_p == NULL) { 16224185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 16233859Sml29623 goto nxge_allocb_fail2; 16243859Sml29623 } 16253859Sml29623 16263859Sml29623 nxge_mp->buffer = buffer; 16273859Sml29623 nxge_mp->block_size = size; 16283859Sml29623 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 16293859Sml29623 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 16303859Sml29623 nxge_mp->ref_cnt = 1; 16313859Sml29623 nxge_mp->free = B_TRUE; 16323859Sml29623 nxge_mp->rx_use_bcopy = B_FALSE; 16333859Sml29623 16343859Sml29623 atomic_inc_32(&nxge_mblks_pending); 16353859Sml29623 16363859Sml29623 goto nxge_allocb_exit; 16373859Sml29623 16383859Sml29623 nxge_allocb_fail2: 16393859Sml29623 if (!nxge_mp->use_buf_pool) { 16403859Sml29623 KMEM_FREE(buffer, size); 16413859Sml29623 } 16423859Sml29623 16433859Sml29623 nxge_allocb_fail1: 16443859Sml29623 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 16453859Sml29623 nxge_mp = NULL; 16463859Sml29623 16473859Sml29623 nxge_allocb_exit: 16483859Sml29623 return (nxge_mp); 16493859Sml29623 } 16503859Sml29623 16513859Sml29623 p_mblk_t 16523859Sml29623 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 16533859Sml29623 { 16543859Sml29623 p_mblk_t mp; 16553859Sml29623 16563859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 16573859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 16586929Smisaki "offset = 0x%08X " 16596929Smisaki "size = 0x%08X", 16606929Smisaki nxge_mp, offset, size)); 16613859Sml29623 16623859Sml29623 mp = desballoc(&nxge_mp->buffer[offset], size, 16636929Smisaki 0, &nxge_mp->freeb); 16643859Sml29623 if (mp == NULL) { 16653859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 16663859Sml29623 goto nxge_dupb_exit; 16673859Sml29623 } 16683859Sml29623 atomic_inc_32(&nxge_mp->ref_cnt); 16693859Sml29623 16703859Sml29623 16713859Sml29623 nxge_dupb_exit: 16723859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 16736929Smisaki nxge_mp)); 16743859Sml29623 return (mp); 16753859Sml29623 } 16763859Sml29623 16773859Sml29623 p_mblk_t 16783859Sml29623 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 16793859Sml29623 { 16803859Sml29623 p_mblk_t mp; 16813859Sml29623 uchar_t *dp; 16823859Sml29623 16833859Sml29623 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 16843859Sml29623 if (mp == NULL) { 16853859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 16863859Sml29623 goto nxge_dupb_bcopy_exit; 16873859Sml29623 } 16883859Sml29623 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 16893859Sml29623 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 16903859Sml29623 mp->b_wptr = dp + size; 16913859Sml29623 16923859Sml29623 nxge_dupb_bcopy_exit: 16933859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 16946929Smisaki nxge_mp)); 16953859Sml29623 return (mp); 16963859Sml29623 } 16973859Sml29623 16983859Sml29623 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 16993859Sml29623 p_rx_msg_t rx_msg_p); 17003859Sml29623 17013859Sml29623 void 17023859Sml29623 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 17033859Sml29623 { 17043859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 17053859Sml29623 17063859Sml29623 /* Reuse this buffer */ 17073859Sml29623 rx_msg_p->free = B_FALSE; 17083859Sml29623 rx_msg_p->cur_usage_cnt = 0; 17093859Sml29623 rx_msg_p->max_usage_cnt = 0; 17103859Sml29623 rx_msg_p->pkt_buf_size = 0; 17113859Sml29623 17123859Sml29623 if (rx_rbr_p->rbr_use_bcopy) { 17133859Sml29623 rx_msg_p->rx_use_bcopy = B_FALSE; 17143859Sml29623 atomic_dec_32(&rx_rbr_p->rbr_consumed); 17153859Sml29623 } 17163859Sml29623 17173859Sml29623 /* 17183859Sml29623 * Get the rbr header pointer and its offset index. 17193859Sml29623 */ 17203859Sml29623 MUTEX_ENTER(&rx_rbr_p->post_lock); 17213859Sml29623 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 17226929Smisaki rx_rbr_p->rbr_wrap_mask); 17233859Sml29623 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 17243859Sml29623 MUTEX_EXIT(&rx_rbr_p->post_lock); 17255770Sml29623 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 17265770Sml29623 rx_rbr_p->rdc, 1); 17273859Sml29623 17283859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 17296929Smisaki "<== nxge_post_page (channel %d post_next_index %d)", 17306929Smisaki rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 17313859Sml29623 17323859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 17333859Sml29623 } 17343859Sml29623 17353859Sml29623 void 17363859Sml29623 nxge_freeb(p_rx_msg_t rx_msg_p) 17373859Sml29623 { 17383859Sml29623 size_t size; 17393859Sml29623 uchar_t *buffer = NULL; 17403859Sml29623 int ref_cnt; 17414874Sml29623 boolean_t free_state = B_FALSE; 17423859Sml29623 17435170Stm144005 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 17445170Stm144005 17453859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 17463859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 17476929Smisaki "nxge_freeb:rx_msg_p = $%p (block pending %d)", 17486929Smisaki rx_msg_p, nxge_mblks_pending)); 17493859Sml29623 17504874Sml29623 /* 17514874Sml29623 * First we need to get the free state, then 17524874Sml29623 * atomic decrement the reference count to prevent 17534874Sml29623 * the race condition with the interrupt thread that 17544874Sml29623 * is processing a loaned up buffer block. 17554874Sml29623 */ 17564874Sml29623 free_state = rx_msg_p->free; 17573859Sml29623 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 17583859Sml29623 if (!ref_cnt) { 17595770Sml29623 atomic_dec_32(&nxge_mblks_pending); 17603859Sml29623 buffer = rx_msg_p->buffer; 17613859Sml29623 size = rx_msg_p->block_size; 17623859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 17636929Smisaki "will free: rx_msg_p = $%p (block pending %d)", 17646929Smisaki rx_msg_p, nxge_mblks_pending)); 17653859Sml29623 17663859Sml29623 if (!rx_msg_p->use_buf_pool) { 17673859Sml29623 KMEM_FREE(buffer, size); 17683859Sml29623 } 17693859Sml29623 17703859Sml29623 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 17715170Stm144005 17725759Smisaki if (ring) { 17735759Smisaki /* 17745759Smisaki * Decrement the receive buffer ring's reference 17755759Smisaki * count, too. 17765759Smisaki */ 17775759Smisaki atomic_dec_32(&ring->rbr_ref_cnt); 17785759Smisaki 17795759Smisaki /* 17806495Sspeer * Free the receive buffer ring, if 17815759Smisaki * 1. all the receive buffers have been freed 17825759Smisaki * 2. and we are in the proper state (that is, 17835759Smisaki * we are not UNMAPPING). 17845759Smisaki */ 17855759Smisaki if (ring->rbr_ref_cnt == 0 && 17865759Smisaki ring->rbr_state == RBR_UNMAPPED) { 17876495Sspeer /* 17886495Sspeer * Free receive data buffers, 17896495Sspeer * buffer index information 17906495Sspeer * (rxring_info) and 17916495Sspeer * the message block ring. 17926495Sspeer */ 17936495Sspeer NXGE_DEBUG_MSG((NULL, RX_CTL, 17946495Sspeer "nxge_freeb:rx_msg_p = $%p " 17956495Sspeer "(block pending %d) free buffers", 17966495Sspeer rx_msg_p, nxge_mblks_pending)); 17976495Sspeer nxge_rxdma_databuf_free(ring); 17986495Sspeer if (ring->ring_info) { 17996495Sspeer KMEM_FREE(ring->ring_info, 18006495Sspeer sizeof (rxring_info_t)); 18016495Sspeer } 18026495Sspeer 18036495Sspeer if (ring->rx_msg_ring) { 18046495Sspeer KMEM_FREE(ring->rx_msg_ring, 18056495Sspeer ring->tnblocks * 18066495Sspeer sizeof (p_rx_msg_t)); 18076495Sspeer } 18085759Smisaki KMEM_FREE(ring, sizeof (*ring)); 18095759Smisaki } 18105170Stm144005 } 18113859Sml29623 return; 18123859Sml29623 } 18133859Sml29623 18143859Sml29623 /* 18153859Sml29623 * Repost buffer. 18163859Sml29623 */ 18175759Smisaki if (free_state && (ref_cnt == 1) && ring) { 18183859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, 18193859Sml29623 "nxge_freeb: post page $%p:", rx_msg_p)); 18205170Stm144005 if (ring->rbr_state == RBR_POSTING) 18215170Stm144005 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 18223859Sml29623 } 18233859Sml29623 18243859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 18253859Sml29623 } 18263859Sml29623 18273859Sml29623 uint_t 18283859Sml29623 nxge_rx_intr(void *arg1, void *arg2) 18293859Sml29623 { 18303859Sml29623 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 18313859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg2; 18323859Sml29623 p_nxge_ldg_t ldgp; 18333859Sml29623 uint8_t channel; 18343859Sml29623 npi_handle_t handle; 18353859Sml29623 rx_dma_ctl_stat_t cs; 18368275SEric Cheng p_rx_rcr_ring_t rcr_ring; 18378275SEric Cheng mblk_t *mp; 18383859Sml29623 18393859Sml29623 #ifdef NXGE_DEBUG 18403859Sml29623 rxdma_cfig1_t cfg; 18413859Sml29623 #endif 18423859Sml29623 18433859Sml29623 if (ldvp == NULL) { 18443859Sml29623 NXGE_DEBUG_MSG((NULL, INT_CTL, 18456929Smisaki "<== nxge_rx_intr: arg2 $%p arg1 $%p", 18466929Smisaki nxgep, ldvp)); 18473859Sml29623 18483859Sml29623 return (DDI_INTR_CLAIMED); 18493859Sml29623 } 18503859Sml29623 18513859Sml29623 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 18523859Sml29623 nxgep = ldvp->nxgep; 18533859Sml29623 } 18546602Sspeer 18556602Sspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 18566602Sspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 18576602Sspeer NXGE_DEBUG_MSG((nxgep, INT_CTL, 18586602Sspeer "<== nxge_rx_intr: interface not started or intialized")); 18596602Sspeer return (DDI_INTR_CLAIMED); 18606602Sspeer } 18616602Sspeer 18623859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 18636929Smisaki "==> nxge_rx_intr: arg2 $%p arg1 $%p", 18646929Smisaki nxgep, ldvp)); 18653859Sml29623 18663859Sml29623 /* 18673859Sml29623 * This interrupt handler is for a specific 18683859Sml29623 * receive dma channel. 18693859Sml29623 */ 18703859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 18718275SEric Cheng 18728275SEric Cheng rcr_ring = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 18738275SEric Cheng 18748275SEric Cheng /* 18758275SEric Cheng * The RCR ring lock must be held when packets 18768275SEric Cheng * are being processed and the hardware registers are 18778275SEric Cheng * being read or written to prevent race condition 18788275SEric Cheng * among the interrupt thread, the polling thread 18798275SEric Cheng * (will cause fatal errors such as rcrincon bit set) 18808275SEric Cheng * and the setting of the poll_flag. 18818275SEric Cheng */ 18828275SEric Cheng MUTEX_ENTER(&rcr_ring->lock); 18838275SEric Cheng 18843859Sml29623 /* 18853859Sml29623 * Get the control and status for this channel. 18863859Sml29623 */ 18873859Sml29623 channel = ldvp->channel; 18883859Sml29623 ldgp = ldvp->ldgp; 18898275SEric Cheng 18908275SEric Cheng if (!isLDOMguest(nxgep)) { 18918275SEric Cheng if (!nxgep->rx_channel_started[channel]) { 18928275SEric Cheng NXGE_DEBUG_MSG((nxgep, INT_CTL, 18938275SEric Cheng "<== nxge_rx_intr: channel is not started")); 18948275SEric Cheng MUTEX_EXIT(&rcr_ring->lock); 18958275SEric Cheng return (DDI_INTR_CLAIMED); 18968275SEric Cheng } 18978275SEric Cheng } 18988275SEric Cheng 18998275SEric Cheng ASSERT(rcr_ring->ldgp == ldgp); 19008275SEric Cheng ASSERT(rcr_ring->ldvp == ldvp); 19018275SEric Cheng 19023859Sml29623 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 19033859Sml29623 19043859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 19056929Smisaki "cs 0x%016llx rcrto 0x%x rcrthres %x", 19066929Smisaki channel, 19076929Smisaki cs.value, 19086929Smisaki cs.bits.hdw.rcrto, 19096929Smisaki cs.bits.hdw.rcrthres)); 19103859Sml29623 19118275SEric Cheng mp = nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 19123859Sml29623 19133859Sml29623 /* error events. */ 19143859Sml29623 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 19156495Sspeer (void) nxge_rx_err_evnts(nxgep, channel, cs); 19163859Sml29623 } 19173859Sml29623 19183859Sml29623 /* 19193859Sml29623 * Enable the mailbox update interrupt if we want 19203859Sml29623 * to use mailbox. We probably don't need to use 19213859Sml29623 * mailbox as it only saves us one pio read. 19223859Sml29623 * Also write 1 to rcrthres and rcrto to clear 19233859Sml29623 * these two edge triggered bits. 19243859Sml29623 */ 19253859Sml29623 cs.value &= RX_DMA_CTL_STAT_WR1C; 19268275SEric Cheng cs.bits.hdw.mex = rcr_ring->poll_flag ? 0 : 1; 19273859Sml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 19286929Smisaki cs.value); 19293859Sml29623 19303859Sml29623 /* 19318275SEric Cheng * If the polling mode is enabled, disable the interrupt. 19323859Sml29623 */ 19338275SEric Cheng if (rcr_ring->poll_flag) { 19348275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 19358275SEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 19368275SEric Cheng "(disabling interrupts)", channel, ldgp, ldvp)); 19378275SEric Cheng /* 19388275SEric Cheng * Disarm this logical group if this is a single device 19398275SEric Cheng * group. 19408275SEric Cheng */ 19418275SEric Cheng if (ldgp->nldvs == 1) { 19428275SEric Cheng ldgimgm_t mgm; 19438275SEric Cheng mgm.value = 0; 19448275SEric Cheng mgm.bits.ldw.arm = 0; 19456495Sspeer NXGE_REG_WR64(handle, 19468275SEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 19476495Sspeer } 19488275SEric Cheng } else { 19498275SEric Cheng /* 19508400SNicolas.Droux@Sun.COM * Rearm this logical group if this is a single device 19518400SNicolas.Droux@Sun.COM * group. 19528275SEric Cheng */ 19538275SEric Cheng if (ldgp->nldvs == 1) { 19548275SEric Cheng if (isLDOMguest(nxgep)) { 19558275SEric Cheng nxge_hio_ldgimgn(nxgep, ldgp); 19568275SEric Cheng } else { 19578275SEric Cheng ldgimgm_t mgm; 19588275SEric Cheng 19598275SEric Cheng mgm.value = 0; 19608275SEric Cheng mgm.bits.ldw.arm = 1; 19618275SEric Cheng mgm.bits.ldw.timer = ldgp->ldg_timer; 19628275SEric Cheng 19638275SEric Cheng NXGE_REG_WR64(handle, 19648275SEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 19658275SEric Cheng mgm.value); 19668275SEric Cheng } 19678275SEric Cheng } 19688275SEric Cheng 19698275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 19708275SEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p " 19718275SEric Cheng "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 19723859Sml29623 } 19738275SEric Cheng MUTEX_EXIT(&rcr_ring->lock); 19748275SEric Cheng 19758275SEric Cheng if (mp) { 19768275SEric Cheng if (!isLDOMguest(nxgep)) 19778275SEric Cheng mac_rx_ring(nxgep->mach, rcr_ring->rcr_mac_handle, mp, 19788275SEric Cheng rcr_ring->rcr_gen_num); 19798275SEric Cheng #if defined(sun4v) 19808275SEric Cheng else { /* isLDOMguest(nxgep) */ 19818275SEric Cheng nxge_hio_data_t *nhd = (nxge_hio_data_t *) 19828275SEric Cheng nxgep->nxge_hw_p->hio; 19838275SEric Cheng nx_vio_fp_t *vio = &nhd->hio.vio; 19848275SEric Cheng 19858275SEric Cheng if (vio->cb.vio_net_rx_cb) { 19868275SEric Cheng (*vio->cb.vio_net_rx_cb) 19878275SEric Cheng (nxgep->hio_vr->vhp, mp); 19888275SEric Cheng } 19898275SEric Cheng } 19908275SEric Cheng #endif 19918275SEric Cheng } 19928275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 19938275SEric Cheng return (DDI_INTR_CLAIMED); 19943859Sml29623 } 19953859Sml29623 19963859Sml29623 /* 19973859Sml29623 * Process the packets received in the specified logical device 19983859Sml29623 * and pass up a chain of message blocks to the upper layer. 19998275SEric Cheng * The RCR ring lock must be held before calling this function. 20003859Sml29623 */ 20018275SEric Cheng static mblk_t * 20026495Sspeer nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 20033859Sml29623 { 20043859Sml29623 p_mblk_t mp; 20053859Sml29623 p_rx_rcr_ring_t rcrp; 20063859Sml29623 20073859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 20086495Sspeer rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 20098275SEric Cheng 20108275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 20118275SEric Cheng "==> nxge_rx_pkts_vring: (calling nxge_rx_pkts)rdc %d " 20128275SEric Cheng "rcr_mac_handle $%p ", rcrp->rdc, rcrp->rcr_mac_handle)); 20136495Sspeer if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 20143859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20156929Smisaki "<== nxge_rx_pkts_vring: no mp")); 20168275SEric Cheng return (NULL); 20173859Sml29623 } 20183859Sml29623 20193859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 20206929Smisaki mp)); 20213859Sml29623 20223859Sml29623 #ifdef NXGE_DEBUG 20233859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20246929Smisaki "==> nxge_rx_pkts_vring:calling mac_rx " 20256929Smisaki "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 20266929Smisaki "mac_handle $%p", 20276929Smisaki mp->b_wptr - mp->b_rptr, 20286929Smisaki mp, mp->b_cont, mp->b_next, 20296929Smisaki rcrp, rcrp->rcr_mac_handle)); 20303859Sml29623 20313859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20326929Smisaki "==> nxge_rx_pkts_vring: dump packets " 20336929Smisaki "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 20346929Smisaki mp, 20356929Smisaki mp->b_rptr, 20366929Smisaki mp->b_wptr, 20376929Smisaki nxge_dump_packet((char *)mp->b_rptr, 20386929Smisaki mp->b_wptr - mp->b_rptr))); 20393859Sml29623 if (mp->b_cont) { 20403859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20416929Smisaki "==> nxge_rx_pkts_vring: dump b_cont packets " 20426929Smisaki "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 20436929Smisaki mp->b_cont, 20446929Smisaki mp->b_cont->b_rptr, 20456929Smisaki mp->b_cont->b_wptr, 20466929Smisaki nxge_dump_packet((char *)mp->b_cont->b_rptr, 20476929Smisaki mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 20483859Sml29623 } 20493859Sml29623 if (mp->b_next) { 20503859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20516929Smisaki "==> nxge_rx_pkts_vring: dump next packets " 20526929Smisaki "(b_rptr $%p): %s", 20536929Smisaki mp->b_next->b_rptr, 20546929Smisaki nxge_dump_packet((char *)mp->b_next->b_rptr, 20556929Smisaki mp->b_next->b_wptr - mp->b_next->b_rptr))); 20563859Sml29623 } 20573859Sml29623 #endif 20588275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 20598275SEric Cheng "<== nxge_rx_pkts_vring: returning rdc %d rcr_mac_handle $%p ", 20608275SEric Cheng rcrp->rdc, rcrp->rcr_mac_handle)); 20618275SEric Cheng 20628275SEric Cheng return (mp); 20633859Sml29623 } 20643859Sml29623 20653859Sml29623 20663859Sml29623 /* 20673859Sml29623 * This routine is the main packet receive processing function. 20683859Sml29623 * It gets the packet type, error code, and buffer related 20693859Sml29623 * information from the receive completion entry. 20703859Sml29623 * How many completion entries to process is based on the number of packets 20713859Sml29623 * queued by the hardware, a hardware maintained tail pointer 20723859Sml29623 * and a configurable receive packet count. 20733859Sml29623 * 20743859Sml29623 * A chain of message blocks will be created as result of processing 20753859Sml29623 * the completion entries. This chain of message blocks will be returned and 20763859Sml29623 * a hardware control status register will be updated with the number of 20773859Sml29623 * packets were removed from the hardware queue. 20783859Sml29623 * 20798275SEric Cheng * The RCR ring lock is held when entering this function. 20803859Sml29623 */ 20816495Sspeer static mblk_t * 20826495Sspeer nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 20836495Sspeer int bytes_to_pickup) 20843859Sml29623 { 20853859Sml29623 npi_handle_t handle; 20863859Sml29623 uint8_t channel; 20873859Sml29623 uint32_t comp_rd_index; 20883859Sml29623 p_rcr_entry_t rcr_desc_rd_head_p; 20893859Sml29623 p_rcr_entry_t rcr_desc_rd_head_pp; 20903859Sml29623 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 20913859Sml29623 uint16_t qlen, nrcr_read, npkt_read; 20926495Sspeer uint32_t qlen_hw; 20933859Sml29623 boolean_t multi; 20946495Sspeer rcrcfig_b_t rcr_cfg_b; 20956495Sspeer int totallen = 0; 20963859Sml29623 #if defined(_BIG_ENDIAN) 20973859Sml29623 npi_status_t rs = NPI_SUCCESS; 20983859Sml29623 #endif 20993859Sml29623 21008275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 21016929Smisaki "channel %d", rcr_p->rdc)); 21023859Sml29623 21033859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 21043859Sml29623 return (NULL); 21053859Sml29623 } 21063859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 21073859Sml29623 channel = rcr_p->rdc; 21083859Sml29623 21093859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 21106929Smisaki "==> nxge_rx_pkts: START: rcr channel %d " 21116929Smisaki "head_p $%p head_pp $%p index %d ", 21126929Smisaki channel, rcr_p->rcr_desc_rd_head_p, 21136929Smisaki rcr_p->rcr_desc_rd_head_pp, 21146929Smisaki rcr_p->comp_rd_index)); 21153859Sml29623 21163859Sml29623 21173859Sml29623 #if !defined(_BIG_ENDIAN) 21183859Sml29623 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 21193859Sml29623 #else 21203859Sml29623 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 21213859Sml29623 if (rs != NPI_SUCCESS) { 21226495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 21233859Sml29623 "channel %d, get qlen failed 0x%08x", 21246929Smisaki channel, rs)); 21253859Sml29623 return (NULL); 21263859Sml29623 } 21273859Sml29623 #endif 21283859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 21296929Smisaki "qlen %d", channel, qlen)); 21303859Sml29623 21313859Sml29623 21323859Sml29623 21333859Sml29623 if (!qlen) { 21348275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 21356929Smisaki "==> nxge_rx_pkts:rcr channel %d " 21366929Smisaki "qlen %d (no pkts)", channel, qlen)); 21373859Sml29623 21383859Sml29623 return (NULL); 21393859Sml29623 } 21403859Sml29623 21413859Sml29623 comp_rd_index = rcr_p->comp_rd_index; 21423859Sml29623 21433859Sml29623 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 21443859Sml29623 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 21453859Sml29623 nrcr_read = npkt_read = 0; 21463859Sml29623 21473859Sml29623 /* 21483859Sml29623 * Number of packets queued 21493859Sml29623 * (The jumbo or multi packet will be counted as only one 21503859Sml29623 * packets and it may take up more than one completion entry). 21513859Sml29623 */ 21523859Sml29623 qlen_hw = (qlen < nxge_max_rx_pkts) ? 21536929Smisaki qlen : nxge_max_rx_pkts; 21543859Sml29623 head_mp = NULL; 21553859Sml29623 tail_mp = &head_mp; 21563859Sml29623 nmp = mp_cont = NULL; 21573859Sml29623 multi = B_FALSE; 21583859Sml29623 21593859Sml29623 while (qlen_hw) { 21603859Sml29623 21613859Sml29623 #ifdef NXGE_DEBUG 21623859Sml29623 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 21633859Sml29623 #endif 21643859Sml29623 /* 21653859Sml29623 * Process one completion ring entry. 21663859Sml29623 */ 21673859Sml29623 nxge_receive_packet(nxgep, 21686929Smisaki rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 21693859Sml29623 21703859Sml29623 /* 21713859Sml29623 * message chaining modes 21723859Sml29623 */ 21733859Sml29623 if (nmp) { 21743859Sml29623 nmp->b_next = NULL; 21753859Sml29623 if (!multi && !mp_cont) { /* frame fits a partition */ 21763859Sml29623 *tail_mp = nmp; 21773859Sml29623 tail_mp = &nmp->b_next; 21786495Sspeer totallen += MBLKL(nmp); 21793859Sml29623 nmp = NULL; 21803859Sml29623 } else if (multi && !mp_cont) { /* first segment */ 21813859Sml29623 *tail_mp = nmp; 21823859Sml29623 tail_mp = &nmp->b_cont; 21836495Sspeer totallen += MBLKL(nmp); 21843859Sml29623 } else if (multi && mp_cont) { /* mid of multi segs */ 21853859Sml29623 *tail_mp = mp_cont; 21863859Sml29623 tail_mp = &mp_cont->b_cont; 21876495Sspeer totallen += MBLKL(mp_cont); 21883859Sml29623 } else if (!multi && mp_cont) { /* last segment */ 21893859Sml29623 *tail_mp = mp_cont; 21903859Sml29623 tail_mp = &nmp->b_next; 21916495Sspeer totallen += MBLKL(mp_cont); 21923859Sml29623 nmp = NULL; 21933859Sml29623 } 21943859Sml29623 } 21953859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 21966929Smisaki "==> nxge_rx_pkts: loop: rcr channel %d " 21976929Smisaki "before updating: multi %d " 21986929Smisaki "nrcr_read %d " 21996929Smisaki "npk read %d " 22006929Smisaki "head_pp $%p index %d ", 22016929Smisaki channel, 22026929Smisaki multi, 22036929Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp, 22046929Smisaki comp_rd_index)); 22053859Sml29623 22063859Sml29623 if (!multi) { 22073859Sml29623 qlen_hw--; 22083859Sml29623 npkt_read++; 22093859Sml29623 } 22103859Sml29623 22113859Sml29623 /* 22123859Sml29623 * Update the next read entry. 22133859Sml29623 */ 22143859Sml29623 comp_rd_index = NEXT_ENTRY(comp_rd_index, 22156929Smisaki rcr_p->comp_wrap_mask); 22163859Sml29623 22173859Sml29623 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 22186929Smisaki rcr_p->rcr_desc_first_p, 22196929Smisaki rcr_p->rcr_desc_last_p); 22203859Sml29623 22213859Sml29623 nrcr_read++; 22223859Sml29623 22233859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22246929Smisaki "<== nxge_rx_pkts: (SAM, process one packet) " 22256929Smisaki "nrcr_read %d", 22266929Smisaki nrcr_read)); 22273859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22286929Smisaki "==> nxge_rx_pkts: loop: rcr channel %d " 22296929Smisaki "multi %d " 22306929Smisaki "nrcr_read %d " 22316929Smisaki "npk read %d " 22326929Smisaki "head_pp $%p index %d ", 22336929Smisaki channel, 22346929Smisaki multi, 22356929Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp, 22366929Smisaki comp_rd_index)); 22373859Sml29623 22386495Sspeer if ((bytes_to_pickup != -1) && 22396495Sspeer (totallen >= bytes_to_pickup)) { 22406495Sspeer break; 22416495Sspeer } 22428275SEric Cheng 22438275SEric Cheng /* limit the number of packets for interrupt */ 22448275SEric Cheng if (!(rcr_p->poll_flag)) { 22458275SEric Cheng if (npkt_read == nxge_max_intr_pkts) { 22468275SEric Cheng break; 22478275SEric Cheng } 22488275SEric Cheng } 22493859Sml29623 } 22503859Sml29623 22513859Sml29623 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 22523859Sml29623 rcr_p->comp_rd_index = comp_rd_index; 22533859Sml29623 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 22543859Sml29623 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 22556929Smisaki (nxgep->intr_threshold != rcr_p->intr_threshold)) { 2256*8661SSantwona.Behera@Sun.COM 2257*8661SSantwona.Behera@Sun.COM rcr_p->intr_timeout = (nxgep->intr_timeout < 2258*8661SSantwona.Behera@Sun.COM NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 2259*8661SSantwona.Behera@Sun.COM nxgep->intr_timeout; 2260*8661SSantwona.Behera@Sun.COM 2261*8661SSantwona.Behera@Sun.COM rcr_p->intr_threshold = (nxgep->intr_threshold < 2262*8661SSantwona.Behera@Sun.COM NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 2263*8661SSantwona.Behera@Sun.COM nxgep->intr_threshold; 2264*8661SSantwona.Behera@Sun.COM 22653859Sml29623 rcr_cfg_b.value = 0x0ULL; 2266*8661SSantwona.Behera@Sun.COM rcr_cfg_b.bits.ldw.entout = 1; 22673859Sml29623 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 22683859Sml29623 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 2269*8661SSantwona.Behera@Sun.COM 22703859Sml29623 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 22716929Smisaki channel, rcr_cfg_b.value); 22723859Sml29623 } 22733859Sml29623 22743859Sml29623 cs.bits.ldw.pktread = npkt_read; 22753859Sml29623 cs.bits.ldw.ptrread = nrcr_read; 22763859Sml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 22776929Smisaki channel, cs.value); 22783859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22796929Smisaki "==> nxge_rx_pkts: EXIT: rcr channel %d " 22806929Smisaki "head_pp $%p index %016llx ", 22816929Smisaki channel, 22826929Smisaki rcr_p->rcr_desc_rd_head_pp, 22836929Smisaki rcr_p->comp_rd_index)); 22843859Sml29623 /* 22853859Sml29623 * Update RCR buffer pointer read and number of packets 22863859Sml29623 * read. 22873859Sml29623 */ 22883859Sml29623 22898275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 22908275SEric Cheng "channel %d", rcr_p->rdc)); 22918275SEric Cheng 22923859Sml29623 return (head_mp); 22933859Sml29623 } 22943859Sml29623 22953859Sml29623 void 22963859Sml29623 nxge_receive_packet(p_nxge_t nxgep, 22973859Sml29623 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 22983859Sml29623 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 22993859Sml29623 { 23003859Sml29623 p_mblk_t nmp = NULL; 23013859Sml29623 uint64_t multi; 23023859Sml29623 uint64_t dcf_err; 23033859Sml29623 uint8_t channel; 23043859Sml29623 23053859Sml29623 boolean_t first_entry = B_TRUE; 23063859Sml29623 boolean_t is_tcp_udp = B_FALSE; 23073859Sml29623 boolean_t buffer_free = B_FALSE; 23083859Sml29623 boolean_t error_send_up = B_FALSE; 23093859Sml29623 uint8_t error_type; 23103859Sml29623 uint16_t l2_len; 23113859Sml29623 uint16_t skip_len; 23123859Sml29623 uint8_t pktbufsz_type; 23133859Sml29623 uint64_t rcr_entry; 23143859Sml29623 uint64_t *pkt_buf_addr_pp; 23153859Sml29623 uint64_t *pkt_buf_addr_p; 23163859Sml29623 uint32_t buf_offset; 23173859Sml29623 uint32_t bsize; 23183859Sml29623 uint32_t error_disp_cnt; 23193859Sml29623 uint32_t msg_index; 23203859Sml29623 p_rx_rbr_ring_t rx_rbr_p; 23213859Sml29623 p_rx_msg_t *rx_msg_ring_p; 23223859Sml29623 p_rx_msg_t rx_msg_p; 23233859Sml29623 uint16_t sw_offset_bytes = 0, hdr_size = 0; 23243859Sml29623 nxge_status_t status = NXGE_OK; 23253859Sml29623 boolean_t is_valid = B_FALSE; 23263859Sml29623 p_nxge_rx_ring_stats_t rdc_stats; 23273859Sml29623 uint32_t bytes_read; 23283859Sml29623 uint64_t pkt_type; 23293859Sml29623 uint64_t frag; 23306028Ssbehera boolean_t pkt_too_long_err = B_FALSE; 23313859Sml29623 #ifdef NXGE_DEBUG 23323859Sml29623 int dump_len; 23333859Sml29623 #endif 23343859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 23353859Sml29623 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 23363859Sml29623 23373859Sml29623 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 23383859Sml29623 23393859Sml29623 multi = (rcr_entry & RCR_MULTI_MASK); 23403859Sml29623 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 23413859Sml29623 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 23423859Sml29623 23433859Sml29623 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 23443859Sml29623 frag = (rcr_entry & RCR_FRAG_MASK); 23453859Sml29623 23463859Sml29623 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 23473859Sml29623 23483859Sml29623 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 23496929Smisaki RCR_PKTBUFSZ_SHIFT); 23505125Sjoycey #if defined(__i386) 23515125Sjoycey pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 23526929Smisaki RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 23535125Sjoycey #else 23543859Sml29623 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 23556929Smisaki RCR_PKT_BUF_ADDR_SHIFT); 23565125Sjoycey #endif 23573859Sml29623 23583859Sml29623 channel = rcr_p->rdc; 23593859Sml29623 23603859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23616929Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 23626929Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 23636929Smisaki "error_type 0x%x pkt_type 0x%x " 23646929Smisaki "pktbufsz_type %d ", 23656929Smisaki rcr_desc_rd_head_p, 23666929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 23676929Smisaki multi, 23686929Smisaki error_type, 23696929Smisaki pkt_type, 23706929Smisaki pktbufsz_type)); 23713859Sml29623 23723859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23736929Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 23746929Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 23756929Smisaki "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 23766929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 23776929Smisaki multi, 23786929Smisaki error_type, 23796929Smisaki pkt_type)); 23803859Sml29623 23813859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23826929Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx " 23836929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 23846929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 23853859Sml29623 23863859Sml29623 /* get the stats ptr */ 23873859Sml29623 rdc_stats = rcr_p->rdc_stats; 23883859Sml29623 23893859Sml29623 if (!l2_len) { 23903859Sml29623 23913859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 23926929Smisaki "<== nxge_receive_packet: failed: l2 length is 0.")); 23933859Sml29623 return; 23943859Sml29623 } 23953859Sml29623 23966028Ssbehera /* 23978275SEric Cheng * Software workaround for BMAC hardware limitation that allows 23986028Ssbehera * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 23996028Ssbehera * instead of 0x2400 for jumbo. 24006028Ssbehera */ 24016028Ssbehera if (l2_len > nxgep->mac.maxframesize) { 24026028Ssbehera pkt_too_long_err = B_TRUE; 24036028Ssbehera } 24046028Ssbehera 24054185Sspeer /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 24064185Sspeer l2_len -= ETHERFCSL; 24074185Sspeer 24083859Sml29623 /* shift 6 bits to get the full io address */ 24095125Sjoycey #if defined(__i386) 24105125Sjoycey pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 24116929Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL); 24125125Sjoycey #else 24133859Sml29623 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 24146929Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL); 24155125Sjoycey #endif 24163859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24176929Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx " 24186929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24196929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 24203859Sml29623 24213859Sml29623 rx_rbr_p = rcr_p->rx_rbr_p; 24223859Sml29623 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 24233859Sml29623 24243859Sml29623 if (first_entry) { 24253859Sml29623 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 24266929Smisaki RXDMA_HDR_SIZE_DEFAULT); 24273859Sml29623 24283859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24296929Smisaki "==> nxge_receive_packet: first entry 0x%016llx " 24306929Smisaki "pkt_buf_addr_pp $%p l2_len %d hdr %d", 24316929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 24326929Smisaki hdr_size)); 24333859Sml29623 } 24343859Sml29623 24353859Sml29623 MUTEX_ENTER(&rx_rbr_p->lock); 24363859Sml29623 24373859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24386929Smisaki "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 24396929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24406929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 24413859Sml29623 24423859Sml29623 /* 24433859Sml29623 * Packet buffer address in the completion entry points 24443859Sml29623 * to the starting buffer address (offset 0). 24453859Sml29623 * Use the starting buffer address to locate the corresponding 24463859Sml29623 * kernel address. 24473859Sml29623 */ 24483859Sml29623 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 24496929Smisaki pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 24506929Smisaki &buf_offset, 24516929Smisaki &msg_index); 24523859Sml29623 24533859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24546929Smisaki "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 24556929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24566929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 24573859Sml29623 24583859Sml29623 if (status != NXGE_OK) { 24593859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 24603859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24616929Smisaki "<== nxge_receive_packet: found vaddr failed %d", 24626929Smisaki status)); 24633859Sml29623 return; 24643859Sml29623 } 24653859Sml29623 24663859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24676929Smisaki "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 24686929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24696929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 24703859Sml29623 24713859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24726929Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 24736929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24746929Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 24753859Sml29623 24763859Sml29623 rx_msg_p = rx_msg_ring_p[msg_index]; 24773859Sml29623 24783859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24796929Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 24806929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24816929Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 24823859Sml29623 24833859Sml29623 switch (pktbufsz_type) { 24843859Sml29623 case RCR_PKTBUFSZ_0: 24853859Sml29623 bsize = rx_rbr_p->pkt_buf_size0_bytes; 24863859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24876929Smisaki "==> nxge_receive_packet: 0 buf %d", bsize)); 24883859Sml29623 break; 24893859Sml29623 case RCR_PKTBUFSZ_1: 24903859Sml29623 bsize = rx_rbr_p->pkt_buf_size1_bytes; 24913859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24926929Smisaki "==> nxge_receive_packet: 1 buf %d", bsize)); 24933859Sml29623 break; 24943859Sml29623 case RCR_PKTBUFSZ_2: 24953859Sml29623 bsize = rx_rbr_p->pkt_buf_size2_bytes; 24963859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24976929Smisaki "==> nxge_receive_packet: 2 buf %d", bsize)); 24983859Sml29623 break; 24993859Sml29623 case RCR_SINGLE_BLOCK: 25003859Sml29623 bsize = rx_msg_p->block_size; 25013859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25026929Smisaki "==> nxge_receive_packet: single %d", bsize)); 25033859Sml29623 25043859Sml29623 break; 25053859Sml29623 default: 25063859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 25073859Sml29623 return; 25083859Sml29623 } 25093859Sml29623 25103859Sml29623 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 25116929Smisaki (buf_offset + sw_offset_bytes), 25126929Smisaki (hdr_size + l2_len), 25136929Smisaki DDI_DMA_SYNC_FORCPU); 25143859Sml29623 25153859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25166929Smisaki "==> nxge_receive_packet: after first dump:usage count")); 25173859Sml29623 25183859Sml29623 if (rx_msg_p->cur_usage_cnt == 0) { 25193859Sml29623 if (rx_rbr_p->rbr_use_bcopy) { 25203859Sml29623 atomic_inc_32(&rx_rbr_p->rbr_consumed); 25213859Sml29623 if (rx_rbr_p->rbr_consumed < 25226929Smisaki rx_rbr_p->rbr_threshold_hi) { 25233859Sml29623 if (rx_rbr_p->rbr_threshold_lo == 0 || 25246929Smisaki ((rx_rbr_p->rbr_consumed >= 25256929Smisaki rx_rbr_p->rbr_threshold_lo) && 25266929Smisaki (rx_rbr_p->rbr_bufsize_type >= 25276929Smisaki pktbufsz_type))) { 25283859Sml29623 rx_msg_p->rx_use_bcopy = B_TRUE; 25293859Sml29623 } 25303859Sml29623 } else { 25313859Sml29623 rx_msg_p->rx_use_bcopy = B_TRUE; 25323859Sml29623 } 25333859Sml29623 } 25343859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25356929Smisaki "==> nxge_receive_packet: buf %d (new block) ", 25366929Smisaki bsize)); 25373859Sml29623 25383859Sml29623 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 25393859Sml29623 rx_msg_p->pkt_buf_size = bsize; 25403859Sml29623 rx_msg_p->cur_usage_cnt = 1; 25413859Sml29623 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 25423859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25436929Smisaki "==> nxge_receive_packet: buf %d " 25446929Smisaki "(single block) ", 25456929Smisaki bsize)); 25463859Sml29623 /* 25473859Sml29623 * Buffer can be reused once the free function 25483859Sml29623 * is called. 25493859Sml29623 */ 25503859Sml29623 rx_msg_p->max_usage_cnt = 1; 25513859Sml29623 buffer_free = B_TRUE; 25523859Sml29623 } else { 25533859Sml29623 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 25543859Sml29623 if (rx_msg_p->max_usage_cnt == 1) { 25553859Sml29623 buffer_free = B_TRUE; 25563859Sml29623 } 25573859Sml29623 } 25583859Sml29623 } else { 25593859Sml29623 rx_msg_p->cur_usage_cnt++; 25603859Sml29623 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 25613859Sml29623 buffer_free = B_TRUE; 25623859Sml29623 } 25633859Sml29623 } 25643859Sml29623 25653859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 25663859Sml29623 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 25676929Smisaki msg_index, l2_len, 25686929Smisaki rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 25693859Sml29623 25706028Ssbehera if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 25713859Sml29623 rdc_stats->ierrors++; 25723859Sml29623 if (dcf_err) { 25733859Sml29623 rdc_stats->dcf_err++; 25743859Sml29623 #ifdef NXGE_DEBUG 25753859Sml29623 if (!rdc_stats->dcf_err) { 25763859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 25773859Sml29623 "nxge_receive_packet: channel %d dcf_err rcr" 25783859Sml29623 " 0x%llx", channel, rcr_entry)); 25793859Sml29623 } 25803859Sml29623 #endif 25813859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 25826929Smisaki NXGE_FM_EREPORT_RDMC_DCF_ERR); 25836028Ssbehera } else if (pkt_too_long_err) { 25846028Ssbehera rdc_stats->pkt_too_long_err++; 25856028Ssbehera NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 25866028Ssbehera " channel %d packet length [%d] > " 25876028Ssbehera "maxframesize [%d]", channel, l2_len + ETHERFCSL, 25886028Ssbehera nxgep->mac.maxframesize)); 25893859Sml29623 } else { 25903859Sml29623 /* Update error stats */ 25913859Sml29623 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 25923859Sml29623 rdc_stats->errlog.compl_err_type = error_type; 25933859Sml29623 25943859Sml29623 switch (error_type) { 25955523Syc148097 /* 25965523Syc148097 * Do not send FMA ereport for RCR_L2_ERROR and 25975523Syc148097 * RCR_L4_CSUM_ERROR because most likely they indicate 25985523Syc148097 * back pressure rather than HW failures. 25995523Syc148097 */ 26005165Syc148097 case RCR_L2_ERROR: 26015165Syc148097 rdc_stats->l2_err++; 26025165Syc148097 if (rdc_stats->l2_err < 26035165Syc148097 error_disp_cnt) { 26045165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26055165Syc148097 " nxge_receive_packet:" 26065165Syc148097 " channel %d RCR L2_ERROR", 26075165Syc148097 channel)); 26085165Syc148097 } 26095165Syc148097 break; 26105165Syc148097 case RCR_L4_CSUM_ERROR: 26115165Syc148097 error_send_up = B_TRUE; 26125165Syc148097 rdc_stats->l4_cksum_err++; 26135165Syc148097 if (rdc_stats->l4_cksum_err < 26145165Syc148097 error_disp_cnt) { 26153859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26165165Syc148097 " nxge_receive_packet:" 26175165Syc148097 " channel %d" 26185165Syc148097 " RCR L4_CSUM_ERROR", channel)); 26195165Syc148097 } 26205165Syc148097 break; 26215523Syc148097 /* 26225523Syc148097 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 26235523Syc148097 * RCR_ZCP_SOFT_ERROR because they reflect the same 26245523Syc148097 * FFLP and ZCP errors that have been reported by 26255523Syc148097 * nxge_fflp.c and nxge_zcp.c. 26265523Syc148097 */ 26275165Syc148097 case RCR_FFLP_SOFT_ERROR: 26285165Syc148097 error_send_up = B_TRUE; 26295165Syc148097 rdc_stats->fflp_soft_err++; 26305165Syc148097 if (rdc_stats->fflp_soft_err < 26315165Syc148097 error_disp_cnt) { 26325165Syc148097 NXGE_ERROR_MSG((nxgep, 26335165Syc148097 NXGE_ERR_CTL, 26345165Syc148097 " nxge_receive_packet:" 26355165Syc148097 " channel %d" 26365165Syc148097 " RCR FFLP_SOFT_ERROR", channel)); 26375165Syc148097 } 26385165Syc148097 break; 26395165Syc148097 case RCR_ZCP_SOFT_ERROR: 26405165Syc148097 error_send_up = B_TRUE; 26415165Syc148097 rdc_stats->fflp_soft_err++; 26425165Syc148097 if (rdc_stats->zcp_soft_err < 26435165Syc148097 error_disp_cnt) 26445165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26455165Syc148097 " nxge_receive_packet: Channel %d" 26465165Syc148097 " RCR ZCP_SOFT_ERROR", channel)); 26475165Syc148097 break; 26485165Syc148097 default: 26495165Syc148097 rdc_stats->rcr_unknown_err++; 26505165Syc148097 if (rdc_stats->rcr_unknown_err 26515165Syc148097 < error_disp_cnt) { 26525165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26535165Syc148097 " nxge_receive_packet: Channel %d" 26545165Syc148097 " RCR entry 0x%llx error 0x%x", 26555165Syc148097 rcr_entry, channel, error_type)); 26565165Syc148097 } 26575165Syc148097 break; 26583859Sml29623 } 26593859Sml29623 } 26603859Sml29623 26613859Sml29623 /* 26623859Sml29623 * Update and repost buffer block if max usage 26633859Sml29623 * count is reached. 26643859Sml29623 */ 26653859Sml29623 if (error_send_up == B_FALSE) { 26664874Sml29623 atomic_inc_32(&rx_msg_p->ref_cnt); 26673859Sml29623 if (buffer_free == B_TRUE) { 26683859Sml29623 rx_msg_p->free = B_TRUE; 26693859Sml29623 } 26703859Sml29623 26713859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 26723859Sml29623 nxge_freeb(rx_msg_p); 26733859Sml29623 return; 26743859Sml29623 } 26753859Sml29623 } 26763859Sml29623 26773859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 26786929Smisaki "==> nxge_receive_packet: DMA sync second ")); 26793859Sml29623 26805165Syc148097 bytes_read = rcr_p->rcvd_pkt_bytes; 26813859Sml29623 skip_len = sw_offset_bytes + hdr_size; 26823859Sml29623 if (!rx_msg_p->rx_use_bcopy) { 26834874Sml29623 /* 26844874Sml29623 * For loaned up buffers, the driver reference count 26854874Sml29623 * will be incremented first and then the free state. 26864874Sml29623 */ 26875165Syc148097 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 26885165Syc148097 if (first_entry) { 26895165Syc148097 nmp->b_rptr = &nmp->b_rptr[skip_len]; 26905165Syc148097 if (l2_len < bsize - skip_len) { 26915165Syc148097 nmp->b_wptr = &nmp->b_rptr[l2_len]; 26925165Syc148097 } else { 26935165Syc148097 nmp->b_wptr = &nmp->b_rptr[bsize 26945165Syc148097 - skip_len]; 26955165Syc148097 } 26965165Syc148097 } else { 26975165Syc148097 if (l2_len - bytes_read < bsize) { 26985165Syc148097 nmp->b_wptr = 26995165Syc148097 &nmp->b_rptr[l2_len - bytes_read]; 27005165Syc148097 } else { 27015165Syc148097 nmp->b_wptr = &nmp->b_rptr[bsize]; 27025165Syc148097 } 27035165Syc148097 } 27045165Syc148097 } 27053859Sml29623 } else { 27065165Syc148097 if (first_entry) { 27075165Syc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 27085165Syc148097 l2_len < bsize - skip_len ? 27095165Syc148097 l2_len : bsize - skip_len); 27105165Syc148097 } else { 27115165Syc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 27125165Syc148097 l2_len - bytes_read < bsize ? 27135165Syc148097 l2_len - bytes_read : bsize); 27145165Syc148097 } 27153859Sml29623 } 27163859Sml29623 if (nmp != NULL) { 27177145Syc148097 if (first_entry) { 27187145Syc148097 /* 27197145Syc148097 * Jumbo packets may be received with more than one 27207145Syc148097 * buffer, increment ipackets for the first entry only. 27217145Syc148097 */ 27227145Syc148097 rdc_stats->ipackets++; 27237145Syc148097 27247145Syc148097 /* Update ibytes for kstat. */ 27257145Syc148097 rdc_stats->ibytes += skip_len 27267145Syc148097 + l2_len < bsize ? l2_len : bsize; 27277145Syc148097 /* 27287145Syc148097 * Update the number of bytes read so far for the 27297145Syc148097 * current frame. 27307145Syc148097 */ 27315165Syc148097 bytes_read = nmp->b_wptr - nmp->b_rptr; 27327145Syc148097 } else { 27337145Syc148097 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 27347145Syc148097 l2_len - bytes_read : bsize; 27353859Sml29623 bytes_read += nmp->b_wptr - nmp->b_rptr; 27367145Syc148097 } 27375165Syc148097 27385165Syc148097 NXGE_DEBUG_MSG((nxgep, RX_CTL, 27395165Syc148097 "==> nxge_receive_packet after dupb: " 27405165Syc148097 "rbr consumed %d " 27415165Syc148097 "pktbufsz_type %d " 27425165Syc148097 "nmp $%p rptr $%p wptr $%p " 27435165Syc148097 "buf_offset %d bzise %d l2_len %d skip_len %d", 27445165Syc148097 rx_rbr_p->rbr_consumed, 27455165Syc148097 pktbufsz_type, 27465165Syc148097 nmp, nmp->b_rptr, nmp->b_wptr, 27475165Syc148097 buf_offset, bsize, l2_len, skip_len)); 27483859Sml29623 } else { 27493859Sml29623 cmn_err(CE_WARN, "!nxge_receive_packet: " 27506929Smisaki "update stats (error)"); 27514977Sraghus atomic_inc_32(&rx_msg_p->ref_cnt); 27524977Sraghus if (buffer_free == B_TRUE) { 27534977Sraghus rx_msg_p->free = B_TRUE; 27544977Sraghus } 27554977Sraghus MUTEX_EXIT(&rx_rbr_p->lock); 27564977Sraghus nxge_freeb(rx_msg_p); 27574977Sraghus return; 27583859Sml29623 } 27595060Syc148097 27603859Sml29623 if (buffer_free == B_TRUE) { 27613859Sml29623 rx_msg_p->free = B_TRUE; 27623859Sml29623 } 27637145Syc148097 27643859Sml29623 is_valid = (nmp != NULL); 27655165Syc148097 27665165Syc148097 rcr_p->rcvd_pkt_bytes = bytes_read; 27675165Syc148097 27683859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 27693859Sml29623 27703859Sml29623 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 27713859Sml29623 atomic_inc_32(&rx_msg_p->ref_cnt); 27723859Sml29623 nxge_freeb(rx_msg_p); 27733859Sml29623 } 27743859Sml29623 27753859Sml29623 if (is_valid) { 27763859Sml29623 nmp->b_cont = NULL; 27773859Sml29623 if (first_entry) { 27783859Sml29623 *mp = nmp; 27793859Sml29623 *mp_cont = NULL; 27805165Syc148097 } else { 27813859Sml29623 *mp_cont = nmp; 27825165Syc148097 } 27833859Sml29623 } 27843859Sml29623 27853859Sml29623 /* 27867145Syc148097 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 27877145Syc148097 * If a packet is not fragmented and no error bit is set, then 27887145Syc148097 * L4 checksum is OK. 27893859Sml29623 */ 27907145Syc148097 27913859Sml29623 if (is_valid && !multi) { 27926495Sspeer /* 27936611Sml29623 * If the checksum flag nxge_chksum_offload 27946611Sml29623 * is 1, TCP and UDP packets can be sent 27956495Sspeer * up with good checksum. If the checksum flag 27966611Sml29623 * is set to 0, checksum reporting will apply to 27976495Sspeer * TCP packets only (workaround for a hardware bug). 27986611Sml29623 * If the checksum flag nxge_cksum_offload is 27996611Sml29623 * greater than 1, both TCP and UDP packets 28006611Sml29623 * will not be reported its hardware checksum results. 28016495Sspeer */ 28026611Sml29623 if (nxge_cksum_offload == 1) { 28036495Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 28046929Smisaki pkt_type == RCR_PKT_IS_UDP) ? 28056929Smisaki B_TRUE: B_FALSE); 28066611Sml29623 } else if (!nxge_cksum_offload) { 28076495Sspeer /* TCP checksum only. */ 28086495Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 28096929Smisaki B_TRUE: B_FALSE); 28106495Sspeer } 28113859Sml29623 28123859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 28136929Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 28146929Smisaki is_valid, multi, is_tcp_udp, frag, error_type)); 28153859Sml29623 28163859Sml29623 if (is_tcp_udp && !frag && !error_type) { 28173859Sml29623 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 28186929Smisaki HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 28193859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 28206929Smisaki "==> nxge_receive_packet: Full tcp/udp cksum " 28216929Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d " 28226929Smisaki "error %d", 28236929Smisaki is_valid, multi, is_tcp_udp, frag, error_type)); 28243859Sml29623 } 28253859Sml29623 } 28263859Sml29623 28273859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 28286929Smisaki "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 28293859Sml29623 28303859Sml29623 *multi_p = (multi == RCR_MULTI_MASK); 28313859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 28326929Smisaki "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 28336929Smisaki *multi_p, nmp, *mp, *mp_cont)); 28343859Sml29623 } 28353859Sml29623 28368275SEric Cheng /* 28378275SEric Cheng * Enable polling for a ring. Interrupt for the ring is disabled when 28388275SEric Cheng * the nxge interrupt comes (see nxge_rx_intr). 28398275SEric Cheng */ 28408275SEric Cheng int 28418275SEric Cheng nxge_enable_poll(void *arg) 28428275SEric Cheng { 28438275SEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 28448275SEric Cheng p_rx_rcr_ring_t ringp; 28458275SEric Cheng p_nxge_t nxgep; 28468275SEric Cheng p_nxge_ldg_t ldgp; 28478275SEric Cheng uint32_t channel; 28488275SEric Cheng 28498275SEric Cheng if (ring_handle == NULL) { 28508275SEric Cheng return (0); 28518275SEric Cheng } 28528275SEric Cheng 28538275SEric Cheng nxgep = ring_handle->nxgep; 28548275SEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 28558275SEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 28568275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 28578275SEric Cheng "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 28588275SEric Cheng ldgp = ringp->ldgp; 28598275SEric Cheng if (ldgp == NULL) { 28608275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 28618275SEric Cheng "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 28628275SEric Cheng ringp->rdc)); 28638275SEric Cheng return (0); 28648275SEric Cheng } 28658275SEric Cheng 28668275SEric Cheng MUTEX_ENTER(&ringp->lock); 28678275SEric Cheng /* enable polling */ 28688275SEric Cheng if (ringp->poll_flag == 0) { 28698275SEric Cheng ringp->poll_flag = 1; 28708275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 28718275SEric Cheng "==> nxge_enable_poll: rdc %d set poll flag to 1", 28728275SEric Cheng ringp->rdc)); 28738275SEric Cheng } 28748275SEric Cheng 28758275SEric Cheng MUTEX_EXIT(&ringp->lock); 28768275SEric Cheng return (0); 28778275SEric Cheng } 28788275SEric Cheng /* 28798275SEric Cheng * Disable polling for a ring and enable its interrupt. 28808275SEric Cheng */ 28818275SEric Cheng int 28828275SEric Cheng nxge_disable_poll(void *arg) 28838275SEric Cheng { 28848275SEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 28858275SEric Cheng p_rx_rcr_ring_t ringp; 28868275SEric Cheng p_nxge_t nxgep; 28878275SEric Cheng uint32_t channel; 28888275SEric Cheng 28898275SEric Cheng if (ring_handle == NULL) { 28908275SEric Cheng return (0); 28918275SEric Cheng } 28928275SEric Cheng 28938275SEric Cheng nxgep = ring_handle->nxgep; 28948275SEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 28958275SEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 28968275SEric Cheng 28978275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 28988275SEric Cheng "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 28998275SEric Cheng 29008275SEric Cheng MUTEX_ENTER(&ringp->lock); 29018275SEric Cheng 29028275SEric Cheng /* disable polling: enable interrupt */ 29038275SEric Cheng if (ringp->poll_flag) { 29048275SEric Cheng npi_handle_t handle; 29058275SEric Cheng rx_dma_ctl_stat_t cs; 29068275SEric Cheng uint8_t channel; 29078275SEric Cheng p_nxge_ldg_t ldgp; 29088275SEric Cheng 29098275SEric Cheng /* 29108275SEric Cheng * Get the control and status for this channel. 29118275SEric Cheng */ 29128275SEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep); 29138275SEric Cheng channel = ringp->rdc; 29148275SEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 29158275SEric Cheng channel, &cs.value); 29168275SEric Cheng 29178275SEric Cheng /* 29188275SEric Cheng * Enable mailbox update 29198275SEric Cheng * Since packets were not read and the hardware uses 29208275SEric Cheng * bits pktread and ptrread to update the queue 29218275SEric Cheng * length, we need to set both bits to 0. 29228275SEric Cheng */ 29238275SEric Cheng cs.bits.ldw.pktread = 0; 29248275SEric Cheng cs.bits.ldw.ptrread = 0; 29258275SEric Cheng cs.bits.hdw.mex = 1; 29268275SEric Cheng RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 29278275SEric Cheng cs.value); 29288275SEric Cheng 29298275SEric Cheng /* 29308275SEric Cheng * Rearm this logical group if this is a single device 29318275SEric Cheng * group. 29328275SEric Cheng */ 29338275SEric Cheng ldgp = ringp->ldgp; 29348275SEric Cheng if (ldgp == NULL) { 29358275SEric Cheng ringp->poll_flag = 0; 29368275SEric Cheng MUTEX_EXIT(&ringp->lock); 29378275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 29388275SEric Cheng "==> nxge_disable_poll: no ldgp rdc %d " 29398275SEric Cheng "(still set poll to 0", ringp->rdc)); 29408275SEric Cheng return (0); 29418275SEric Cheng } 29428275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 29438275SEric Cheng "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 29448275SEric Cheng ringp->rdc, ldgp)); 29458275SEric Cheng if (ldgp->nldvs == 1) { 29468275SEric Cheng ldgimgm_t mgm; 29478275SEric Cheng mgm.value = 0; 29488275SEric Cheng mgm.bits.ldw.arm = 1; 29498275SEric Cheng mgm.bits.ldw.timer = ldgp->ldg_timer; 29508275SEric Cheng NXGE_REG_WR64(handle, 29518275SEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 29528275SEric Cheng } 29538275SEric Cheng ringp->poll_flag = 0; 29548275SEric Cheng } 29558275SEric Cheng 29568275SEric Cheng MUTEX_EXIT(&ringp->lock); 29578275SEric Cheng return (0); 29588275SEric Cheng } 29598275SEric Cheng 29608275SEric Cheng /* 29618275SEric Cheng * Poll 'bytes_to_pickup' bytes of message from the rx ring. 29628275SEric Cheng */ 29638275SEric Cheng mblk_t * 29648275SEric Cheng nxge_rx_poll(void *arg, int bytes_to_pickup) 29658275SEric Cheng { 29668275SEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 29678275SEric Cheng p_rx_rcr_ring_t rcr_p; 29688275SEric Cheng p_nxge_t nxgep; 29698275SEric Cheng npi_handle_t handle; 29708275SEric Cheng rx_dma_ctl_stat_t cs; 29718275SEric Cheng mblk_t *mblk; 29728275SEric Cheng p_nxge_ldv_t ldvp; 29738275SEric Cheng uint32_t channel; 29748275SEric Cheng 29758275SEric Cheng nxgep = ring_handle->nxgep; 29768275SEric Cheng 29778275SEric Cheng /* 29788275SEric Cheng * Get the control and status for this channel. 29798275SEric Cheng */ 29808275SEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep); 29818275SEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 29828275SEric Cheng rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 29838275SEric Cheng MUTEX_ENTER(&rcr_p->lock); 29848275SEric Cheng ASSERT(rcr_p->poll_flag == 1); 29858275SEric Cheng 29868275SEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 29878275SEric Cheng 29888275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 29898275SEric Cheng "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 29908275SEric Cheng rcr_p->rdc, rcr_p->poll_flag)); 29918275SEric Cheng mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 29928275SEric Cheng 29938275SEric Cheng ldvp = rcr_p->ldvp; 29948275SEric Cheng /* error events. */ 29958275SEric Cheng if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 29968275SEric Cheng (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 29978275SEric Cheng } 29988275SEric Cheng 29998275SEric Cheng MUTEX_EXIT(&rcr_p->lock); 30008275SEric Cheng 30018275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 30028275SEric Cheng "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 30038275SEric Cheng return (mblk); 30048275SEric Cheng } 30058275SEric Cheng 30068275SEric Cheng 30073859Sml29623 /*ARGSUSED*/ 30083859Sml29623 static nxge_status_t 30096495Sspeer nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 30103859Sml29623 { 30113859Sml29623 p_nxge_rx_ring_stats_t rdc_stats; 30123859Sml29623 npi_handle_t handle; 30133859Sml29623 npi_status_t rs; 30143859Sml29623 boolean_t rxchan_fatal = B_FALSE; 30153859Sml29623 boolean_t rxport_fatal = B_FALSE; 30163859Sml29623 uint8_t portn; 30173859Sml29623 nxge_status_t status = NXGE_OK; 30183859Sml29623 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 30193859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 30203859Sml29623 30213859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 30223859Sml29623 portn = nxgep->mac.portnum; 30236495Sspeer rdc_stats = &nxgep->statsp->rdc_stats[channel]; 30243859Sml29623 30253859Sml29623 if (cs.bits.hdw.rbr_tmout) { 30263859Sml29623 rdc_stats->rx_rbr_tmout++; 30273859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30286929Smisaki NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 30293859Sml29623 rxchan_fatal = B_TRUE; 30303859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30316929Smisaki "==> nxge_rx_err_evnts: rx_rbr_timeout")); 30323859Sml29623 } 30333859Sml29623 if (cs.bits.hdw.rsp_cnt_err) { 30343859Sml29623 rdc_stats->rsp_cnt_err++; 30353859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30366929Smisaki NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 30373859Sml29623 rxchan_fatal = B_TRUE; 30383859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30396929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30406929Smisaki "rsp_cnt_err", channel)); 30413859Sml29623 } 30423859Sml29623 if (cs.bits.hdw.byte_en_bus) { 30433859Sml29623 rdc_stats->byte_en_bus++; 30443859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30456929Smisaki NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 30463859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30476929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30486929Smisaki "fatal error: byte_en_bus", channel)); 30493859Sml29623 rxchan_fatal = B_TRUE; 30503859Sml29623 } 30513859Sml29623 if (cs.bits.hdw.rsp_dat_err) { 30523859Sml29623 rdc_stats->rsp_dat_err++; 30533859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30546929Smisaki NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 30553859Sml29623 rxchan_fatal = B_TRUE; 30563859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30576929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30586929Smisaki "fatal error: rsp_dat_err", channel)); 30593859Sml29623 } 30603859Sml29623 if (cs.bits.hdw.rcr_ack_err) { 30613859Sml29623 rdc_stats->rcr_ack_err++; 30623859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30636929Smisaki NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 30643859Sml29623 rxchan_fatal = B_TRUE; 30653859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30666929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30676929Smisaki "fatal error: rcr_ack_err", channel)); 30683859Sml29623 } 30693859Sml29623 if (cs.bits.hdw.dc_fifo_err) { 30703859Sml29623 rdc_stats->dc_fifo_err++; 30713859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30726929Smisaki NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 30733859Sml29623 /* This is not a fatal error! */ 30743859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30756929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30766929Smisaki "dc_fifo_err", channel)); 30773859Sml29623 rxport_fatal = B_TRUE; 30783859Sml29623 } 30793859Sml29623 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 30803859Sml29623 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 30816929Smisaki &rdc_stats->errlog.pre_par, 30826929Smisaki &rdc_stats->errlog.sha_par)) 30836929Smisaki != NPI_SUCCESS) { 30843859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30856929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30866929Smisaki "rcr_sha_par: get perr", channel)); 30873859Sml29623 return (NXGE_ERROR | rs); 30883859Sml29623 } 30893859Sml29623 if (cs.bits.hdw.rcr_sha_par) { 30903859Sml29623 rdc_stats->rcr_sha_par++; 30913859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30926929Smisaki NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 30933859Sml29623 rxchan_fatal = B_TRUE; 30943859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30956929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30966929Smisaki "fatal error: rcr_sha_par", channel)); 30973859Sml29623 } 30983859Sml29623 if (cs.bits.hdw.rbr_pre_par) { 30993859Sml29623 rdc_stats->rbr_pre_par++; 31003859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31016929Smisaki NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 31023859Sml29623 rxchan_fatal = B_TRUE; 31033859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31046929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31056929Smisaki "fatal error: rbr_pre_par", channel)); 31063859Sml29623 } 31073859Sml29623 } 31086172Syc148097 /* 31096172Syc148097 * The Following 4 status bits are for information, the system 31106172Syc148097 * is running fine. There is no need to send FMA ereports or 31116172Syc148097 * log messages. 31126172Syc148097 */ 31133859Sml29623 if (cs.bits.hdw.port_drop_pkt) { 31143859Sml29623 rdc_stats->port_drop_pkt++; 31153859Sml29623 } 31163859Sml29623 if (cs.bits.hdw.wred_drop) { 31173859Sml29623 rdc_stats->wred_drop++; 31183859Sml29623 } 31193859Sml29623 if (cs.bits.hdw.rbr_pre_empty) { 31203859Sml29623 rdc_stats->rbr_pre_empty++; 31213859Sml29623 } 31223859Sml29623 if (cs.bits.hdw.rcr_shadow_full) { 31233859Sml29623 rdc_stats->rcr_shadow_full++; 31243859Sml29623 } 31253859Sml29623 if (cs.bits.hdw.config_err) { 31263859Sml29623 rdc_stats->config_err++; 31273859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31286929Smisaki NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 31293859Sml29623 rxchan_fatal = B_TRUE; 31303859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31316929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31326929Smisaki "config error", channel)); 31333859Sml29623 } 31343859Sml29623 if (cs.bits.hdw.rcrincon) { 31353859Sml29623 rdc_stats->rcrincon++; 31363859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31376929Smisaki NXGE_FM_EREPORT_RDMC_RCRINCON); 31383859Sml29623 rxchan_fatal = B_TRUE; 31393859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31406929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31416929Smisaki "fatal error: rcrincon error", channel)); 31423859Sml29623 } 31433859Sml29623 if (cs.bits.hdw.rcrfull) { 31443859Sml29623 rdc_stats->rcrfull++; 31453859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31466929Smisaki NXGE_FM_EREPORT_RDMC_RCRFULL); 31473859Sml29623 rxchan_fatal = B_TRUE; 31483859Sml29623 if (rdc_stats->rcrfull < error_disp_cnt) 31493859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31506929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31516929Smisaki "fatal error: rcrfull error", channel)); 31523859Sml29623 } 31533859Sml29623 if (cs.bits.hdw.rbr_empty) { 31546172Syc148097 /* 31556172Syc148097 * This bit is for information, there is no need 31566172Syc148097 * send FMA ereport or log a message. 31576172Syc148097 */ 31583859Sml29623 rdc_stats->rbr_empty++; 31593859Sml29623 } 31603859Sml29623 if (cs.bits.hdw.rbrfull) { 31613859Sml29623 rdc_stats->rbrfull++; 31623859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31636929Smisaki NXGE_FM_EREPORT_RDMC_RBRFULL); 31643859Sml29623 rxchan_fatal = B_TRUE; 31653859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31666929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31676929Smisaki "fatal error: rbr_full error", channel)); 31683859Sml29623 } 31693859Sml29623 if (cs.bits.hdw.rbrlogpage) { 31703859Sml29623 rdc_stats->rbrlogpage++; 31713859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31726929Smisaki NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 31733859Sml29623 rxchan_fatal = B_TRUE; 31743859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31756929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31766929Smisaki "fatal error: rbr logical page error", channel)); 31773859Sml29623 } 31783859Sml29623 if (cs.bits.hdw.cfiglogpage) { 31793859Sml29623 rdc_stats->cfiglogpage++; 31803859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31816929Smisaki NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 31823859Sml29623 rxchan_fatal = B_TRUE; 31833859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31846929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31856929Smisaki "fatal error: cfig logical page error", channel)); 31863859Sml29623 } 31873859Sml29623 31883859Sml29623 if (rxport_fatal) { 31893859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31906495Sspeer " nxge_rx_err_evnts: fatal error on Port #%d\n", 31916495Sspeer portn)); 31926495Sspeer if (isLDOMguest(nxgep)) { 31936495Sspeer status = NXGE_ERROR; 31946495Sspeer } else { 31956495Sspeer status = nxge_ipp_fatal_err_recover(nxgep); 31966495Sspeer if (status == NXGE_OK) { 31976495Sspeer FM_SERVICE_RESTORED(nxgep); 31986495Sspeer } 31993859Sml29623 } 32003859Sml29623 } 32013859Sml29623 32023859Sml29623 if (rxchan_fatal) { 32033859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32046495Sspeer " nxge_rx_err_evnts: fatal error on Channel #%d\n", 32056495Sspeer channel)); 32066495Sspeer if (isLDOMguest(nxgep)) { 32076495Sspeer status = NXGE_ERROR; 32086495Sspeer } else { 32096495Sspeer status = nxge_rxdma_fatal_err_recover(nxgep, channel); 32106495Sspeer if (status == NXGE_OK) { 32116495Sspeer FM_SERVICE_RESTORED(nxgep); 32126495Sspeer } 32133859Sml29623 } 32143859Sml29623 } 32153859Sml29623 32163859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 32173859Sml29623 32183859Sml29623 return (status); 32193859Sml29623 } 32203859Sml29623 32216495Sspeer /* 32226495Sspeer * nxge_rdc_hvio_setup 32236495Sspeer * 32246495Sspeer * This code appears to setup some Hypervisor variables. 32256495Sspeer * 32266495Sspeer * Arguments: 32276495Sspeer * nxgep 32286495Sspeer * channel 32296495Sspeer * 32306495Sspeer * Notes: 32316495Sspeer * What does NIU_LP_WORKAROUND mean? 32326495Sspeer * 32336495Sspeer * NPI/NXGE function calls: 32346495Sspeer * na 32356495Sspeer * 32366495Sspeer * Context: 32376495Sspeer * Any domain 32386495Sspeer */ 32396495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 32406495Sspeer static void 32416495Sspeer nxge_rdc_hvio_setup( 32426495Sspeer nxge_t *nxgep, int channel) 32433859Sml29623 { 32446495Sspeer nxge_dma_common_t *dma_common; 32456495Sspeer nxge_dma_common_t *dma_control; 32466495Sspeer rx_rbr_ring_t *ring; 32476495Sspeer 32486495Sspeer ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 32496495Sspeer dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 32506495Sspeer 32516495Sspeer ring->hv_set = B_FALSE; 32526495Sspeer 32536495Sspeer ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 32546495Sspeer dma_common->orig_ioaddr_pp; 32556495Sspeer ring->hv_rx_buf_ioaddr_size = (uint64_t) 32566495Sspeer dma_common->orig_alength; 32576495Sspeer 32586495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 32596495Sspeer "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 32606495Sspeer channel, ring->hv_rx_buf_base_ioaddr_pp, 32616495Sspeer dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 32626495Sspeer dma_common->orig_alength, dma_common->orig_alength)); 32636495Sspeer 32646495Sspeer dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 32656495Sspeer 32666495Sspeer ring->hv_rx_cntl_base_ioaddr_pp = 32676495Sspeer (uint64_t)dma_control->orig_ioaddr_pp; 32686495Sspeer ring->hv_rx_cntl_ioaddr_size = 32696495Sspeer (uint64_t)dma_control->orig_alength; 32706495Sspeer 32716495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 32726495Sspeer "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 32736495Sspeer channel, ring->hv_rx_cntl_base_ioaddr_pp, 32746495Sspeer dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 32756495Sspeer dma_control->orig_alength, dma_control->orig_alength)); 32766495Sspeer } 32773859Sml29623 #endif 32783859Sml29623 32796495Sspeer /* 32806495Sspeer * nxge_map_rxdma 32816495Sspeer * 32826495Sspeer * Map an RDC into our kernel space. 32836495Sspeer * 32846495Sspeer * Arguments: 32856495Sspeer * nxgep 32866495Sspeer * channel The channel to map. 32876495Sspeer * 32886495Sspeer * Notes: 32896495Sspeer * 1. Allocate & initialise a memory pool, if necessary. 32906495Sspeer * 2. Allocate however many receive buffers are required. 32916495Sspeer * 3. Setup buffers, descriptors, and mailbox. 32926495Sspeer * 32936495Sspeer * NPI/NXGE function calls: 32946495Sspeer * nxge_alloc_rx_mem_pool() 32956495Sspeer * nxge_alloc_rbb() 32966495Sspeer * nxge_map_rxdma_channel() 32976495Sspeer * 32986495Sspeer * Registers accessed: 32996495Sspeer * 33006495Sspeer * Context: 33016495Sspeer * Any domain 33026495Sspeer */ 33036495Sspeer static nxge_status_t 33046495Sspeer nxge_map_rxdma(p_nxge_t nxgep, int channel) 33056495Sspeer { 33066495Sspeer nxge_dma_common_t **data; 33076495Sspeer nxge_dma_common_t **control; 33086495Sspeer rx_rbr_ring_t **rbr_ring; 33096495Sspeer rx_rcr_ring_t **rcr_ring; 33106495Sspeer rx_mbox_t **mailbox; 33116495Sspeer uint32_t chunks; 33126495Sspeer 33136495Sspeer nxge_status_t status; 33146495Sspeer 33153859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 33163859Sml29623 33176495Sspeer if (!nxgep->rx_buf_pool_p) { 33186495Sspeer if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 33196495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33206495Sspeer "<== nxge_map_rxdma: buf not allocated")); 33216495Sspeer return (NXGE_ERROR); 33226495Sspeer } 33233859Sml29623 } 33243859Sml29623 33256495Sspeer if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 33266495Sspeer return (NXGE_ERROR); 33273859Sml29623 33283859Sml29623 /* 33293859Sml29623 * Timeout should be set based on the system clock divider. 3330*8661SSantwona.Behera@Sun.COM * A timeout value of 1 assumes that the 33313859Sml29623 * granularity (1000) is 3 microseconds running at 300MHz. 33323859Sml29623 */ 33333859Sml29623 3334*8661SSantwona.Behera@Sun.COM nxgep->intr_threshold = nxge_rcr_threshold; 3335*8661SSantwona.Behera@Sun.COM nxgep->intr_timeout = nxge_rcr_timeout; 33363859Sml29623 33373859Sml29623 /* 33386495Sspeer * Map descriptors from the buffer polls for each dma channel. 33396495Sspeer */ 33406495Sspeer 33416495Sspeer /* 33426495Sspeer * Set up and prepare buffer blocks, descriptors 33436495Sspeer * and mailbox. 33443859Sml29623 */ 33456495Sspeer data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 33466495Sspeer rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 33476495Sspeer chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 33486495Sspeer 33496495Sspeer control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 33506495Sspeer rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 33516495Sspeer 33526495Sspeer mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 33536495Sspeer 33546495Sspeer status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 33556495Sspeer chunks, control, rcr_ring, mailbox); 33566495Sspeer if (status != NXGE_OK) { 33576495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33586929Smisaki "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 33596929Smisaki "returned 0x%x", 33606929Smisaki channel, status)); 33616495Sspeer return (status); 33626495Sspeer } 33636495Sspeer nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 33646495Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 33656495Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 33666495Sspeer &nxgep->statsp->rdc_stats[channel]; 33673859Sml29623 33683859Sml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 33696495Sspeer if (!isLDOMguest(nxgep)) 33706495Sspeer nxge_rdc_hvio_setup(nxgep, channel); 33716495Sspeer #endif 33726495Sspeer 33733859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 33746495Sspeer "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 33753859Sml29623 33763859Sml29623 return (status); 33773859Sml29623 } 33783859Sml29623 33793859Sml29623 static void 33806495Sspeer nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 33813859Sml29623 { 33826495Sspeer rx_rbr_ring_t *rbr_ring; 33836495Sspeer rx_rcr_ring_t *rcr_ring; 33846495Sspeer rx_mbox_t *mailbox; 33856495Sspeer 33866495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 33876495Sspeer 33886495Sspeer if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 33896495Sspeer !nxgep->rx_mbox_areas_p) 33903859Sml29623 return; 33916495Sspeer 33926495Sspeer rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 33936495Sspeer rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 33946495Sspeer mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 33956495Sspeer 33966495Sspeer if (!rbr_ring || !rcr_ring || !mailbox) 33973859Sml29623 return; 33986495Sspeer 33996495Sspeer (void) nxge_unmap_rxdma_channel( 34006929Smisaki nxgep, channel, rbr_ring, rcr_ring, mailbox); 34016495Sspeer 34026495Sspeer nxge_free_rxb(nxgep, channel); 34036495Sspeer 34046495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 34053859Sml29623 } 34063859Sml29623 34073859Sml29623 nxge_status_t 34083859Sml29623 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 34093859Sml29623 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 34103859Sml29623 uint32_t num_chunks, 34113859Sml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 34123859Sml29623 p_rx_mbox_t *rx_mbox_p) 34133859Sml29623 { 34143859Sml29623 int status = NXGE_OK; 34153859Sml29623 34163859Sml29623 /* 34173859Sml29623 * Set up and prepare buffer blocks, descriptors 34183859Sml29623 * and mailbox. 34193859Sml29623 */ 34203859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34216929Smisaki "==> nxge_map_rxdma_channel (channel %d)", channel)); 34223859Sml29623 /* 34233859Sml29623 * Receive buffer blocks 34243859Sml29623 */ 34253859Sml29623 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 34266929Smisaki dma_buf_p, rbr_p, num_chunks); 34273859Sml29623 if (status != NXGE_OK) { 34283859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34296929Smisaki "==> nxge_map_rxdma_channel (channel %d): " 34306929Smisaki "map buffer failed 0x%x", channel, status)); 34313859Sml29623 goto nxge_map_rxdma_channel_exit; 34323859Sml29623 } 34333859Sml29623 34343859Sml29623 /* 34353859Sml29623 * Receive block ring, completion ring and mailbox. 34363859Sml29623 */ 34373859Sml29623 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 34386929Smisaki dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 34393859Sml29623 if (status != NXGE_OK) { 34403859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34416929Smisaki "==> nxge_map_rxdma_channel (channel %d): " 34426929Smisaki "map config failed 0x%x", channel, status)); 34433859Sml29623 goto nxge_map_rxdma_channel_fail2; 34443859Sml29623 } 34453859Sml29623 34463859Sml29623 goto nxge_map_rxdma_channel_exit; 34473859Sml29623 34483859Sml29623 nxge_map_rxdma_channel_fail3: 34493859Sml29623 /* Free rbr, rcr */ 34503859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34516929Smisaki "==> nxge_map_rxdma_channel: free rbr/rcr " 34526929Smisaki "(status 0x%x channel %d)", 34536929Smisaki status, channel)); 34543859Sml29623 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 34556929Smisaki *rcr_p, *rx_mbox_p); 34563859Sml29623 34573859Sml29623 nxge_map_rxdma_channel_fail2: 34583859Sml29623 /* Free buffer blocks */ 34593859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34606929Smisaki "==> nxge_map_rxdma_channel: free rx buffers" 34616929Smisaki "(nxgep 0x%x status 0x%x channel %d)", 34626929Smisaki nxgep, status, channel)); 34633859Sml29623 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 34643859Sml29623 34654185Sspeer status = NXGE_ERROR; 34664185Sspeer 34673859Sml29623 nxge_map_rxdma_channel_exit: 34683859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34696929Smisaki "<== nxge_map_rxdma_channel: " 34706929Smisaki "(nxgep 0x%x status 0x%x channel %d)", 34716929Smisaki nxgep, status, channel)); 34723859Sml29623 34733859Sml29623 return (status); 34743859Sml29623 } 34753859Sml29623 34763859Sml29623 /*ARGSUSED*/ 34773859Sml29623 static void 34783859Sml29623 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 34793859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 34803859Sml29623 { 34813859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34826929Smisaki "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 34833859Sml29623 34843859Sml29623 /* 34853859Sml29623 * unmap receive block ring, completion ring and mailbox. 34863859Sml29623 */ 34873859Sml29623 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 34886929Smisaki rcr_p, rx_mbox_p); 34893859Sml29623 34903859Sml29623 /* unmap buffer blocks */ 34913859Sml29623 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 34923859Sml29623 34933859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 34943859Sml29623 } 34953859Sml29623 34963859Sml29623 /*ARGSUSED*/ 34973859Sml29623 static nxge_status_t 34983859Sml29623 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 34993859Sml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 35003859Sml29623 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 35013859Sml29623 { 35023859Sml29623 p_rx_rbr_ring_t rbrp; 35033859Sml29623 p_rx_rcr_ring_t rcrp; 35043859Sml29623 p_rx_mbox_t mboxp; 35053859Sml29623 p_nxge_dma_common_t cntl_dmap; 35063859Sml29623 p_nxge_dma_common_t dmap; 35073859Sml29623 p_rx_msg_t *rx_msg_ring; 35083859Sml29623 p_rx_msg_t rx_msg_p; 35093859Sml29623 p_rbr_cfig_a_t rcfga_p; 35103859Sml29623 p_rbr_cfig_b_t rcfgb_p; 35113859Sml29623 p_rcrcfig_a_t cfga_p; 35123859Sml29623 p_rcrcfig_b_t cfgb_p; 35133859Sml29623 p_rxdma_cfig1_t cfig1_p; 35143859Sml29623 p_rxdma_cfig2_t cfig2_p; 35153859Sml29623 p_rbr_kick_t kick_p; 35163859Sml29623 uint32_t dmaaddrp; 35173859Sml29623 uint32_t *rbr_vaddrp; 35183859Sml29623 uint32_t bkaddr; 35193859Sml29623 nxge_status_t status = NXGE_OK; 35203859Sml29623 int i; 35213859Sml29623 uint32_t nxge_port_rcr_size; 35223859Sml29623 35233859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35246929Smisaki "==> nxge_map_rxdma_channel_cfg_ring")); 35253859Sml29623 35263859Sml29623 cntl_dmap = *dma_cntl_p; 35273859Sml29623 35283859Sml29623 /* Map in the receive block ring */ 35293859Sml29623 rbrp = *rbr_p; 35303859Sml29623 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 35313859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 35323859Sml29623 /* 35333859Sml29623 * Zero out buffer block ring descriptors. 35343859Sml29623 */ 35353859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 35363859Sml29623 35373859Sml29623 rcfga_p = &(rbrp->rbr_cfga); 35383859Sml29623 rcfgb_p = &(rbrp->rbr_cfgb); 35393859Sml29623 kick_p = &(rbrp->rbr_kick); 35403859Sml29623 rcfga_p->value = 0; 35413859Sml29623 rcfgb_p->value = 0; 35423859Sml29623 kick_p->value = 0; 35433859Sml29623 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 35443859Sml29623 rcfga_p->value = (rbrp->rbr_addr & 35456929Smisaki (RBR_CFIG_A_STDADDR_MASK | 35466929Smisaki RBR_CFIG_A_STDADDR_BASE_MASK)); 35473859Sml29623 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 35483859Sml29623 35493859Sml29623 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 35503859Sml29623 rcfgb_p->bits.ldw.vld0 = 1; 35513859Sml29623 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 35523859Sml29623 rcfgb_p->bits.ldw.vld1 = 1; 35533859Sml29623 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 35543859Sml29623 rcfgb_p->bits.ldw.vld2 = 1; 35553859Sml29623 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 35563859Sml29623 35573859Sml29623 /* 35583859Sml29623 * For each buffer block, enter receive block address to the ring. 35593859Sml29623 */ 35603859Sml29623 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 35613859Sml29623 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 35623859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35636929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 35646929Smisaki "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 35653859Sml29623 35663859Sml29623 rx_msg_ring = rbrp->rx_msg_ring; 35673859Sml29623 for (i = 0; i < rbrp->tnblocks; i++) { 35683859Sml29623 rx_msg_p = rx_msg_ring[i]; 35693859Sml29623 rx_msg_p->nxgep = nxgep; 35703859Sml29623 rx_msg_p->rx_rbr_p = rbrp; 35713859Sml29623 bkaddr = (uint32_t) 35726929Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 35736929Smisaki >> RBR_BKADDR_SHIFT)); 35743859Sml29623 rx_msg_p->free = B_FALSE; 35753859Sml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe; 35763859Sml29623 35773859Sml29623 *rbr_vaddrp++ = bkaddr; 35783859Sml29623 } 35793859Sml29623 35803859Sml29623 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 35813859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 35823859Sml29623 35833859Sml29623 rbrp->rbr_rd_index = 0; 35843859Sml29623 35853859Sml29623 rbrp->rbr_consumed = 0; 35863859Sml29623 rbrp->rbr_use_bcopy = B_TRUE; 35873859Sml29623 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 35883859Sml29623 /* 35893859Sml29623 * Do bcopy on packets greater than bcopy size once 35903859Sml29623 * the lo threshold is reached. 35913859Sml29623 * This lo threshold should be less than the hi threshold. 35923859Sml29623 * 35933859Sml29623 * Do bcopy on every packet once the hi threshold is reached. 35943859Sml29623 */ 35953859Sml29623 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 35963859Sml29623 /* default it to use hi */ 35973859Sml29623 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 35983859Sml29623 } 35993859Sml29623 36003859Sml29623 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 36013859Sml29623 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 36023859Sml29623 } 36033859Sml29623 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 36043859Sml29623 36053859Sml29623 switch (nxge_rx_threshold_hi) { 36063859Sml29623 default: 36073859Sml29623 case NXGE_RX_COPY_NONE: 36083859Sml29623 /* Do not do bcopy at all */ 36093859Sml29623 rbrp->rbr_use_bcopy = B_FALSE; 36103859Sml29623 rbrp->rbr_threshold_hi = rbrp->rbb_max; 36113859Sml29623 break; 36123859Sml29623 36133859Sml29623 case NXGE_RX_COPY_1: 36143859Sml29623 case NXGE_RX_COPY_2: 36153859Sml29623 case NXGE_RX_COPY_3: 36163859Sml29623 case NXGE_RX_COPY_4: 36173859Sml29623 case NXGE_RX_COPY_5: 36183859Sml29623 case NXGE_RX_COPY_6: 36193859Sml29623 case NXGE_RX_COPY_7: 36203859Sml29623 rbrp->rbr_threshold_hi = 36216929Smisaki rbrp->rbb_max * 36226929Smisaki (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 36233859Sml29623 break; 36243859Sml29623 36253859Sml29623 case NXGE_RX_COPY_ALL: 36263859Sml29623 rbrp->rbr_threshold_hi = 0; 36273859Sml29623 break; 36283859Sml29623 } 36293859Sml29623 36303859Sml29623 switch (nxge_rx_threshold_lo) { 36313859Sml29623 default: 36323859Sml29623 case NXGE_RX_COPY_NONE: 36333859Sml29623 /* Do not do bcopy at all */ 36343859Sml29623 if (rbrp->rbr_use_bcopy) { 36353859Sml29623 rbrp->rbr_use_bcopy = B_FALSE; 36363859Sml29623 } 36373859Sml29623 rbrp->rbr_threshold_lo = rbrp->rbb_max; 36383859Sml29623 break; 36393859Sml29623 36403859Sml29623 case NXGE_RX_COPY_1: 36413859Sml29623 case NXGE_RX_COPY_2: 36423859Sml29623 case NXGE_RX_COPY_3: 36433859Sml29623 case NXGE_RX_COPY_4: 36443859Sml29623 case NXGE_RX_COPY_5: 36453859Sml29623 case NXGE_RX_COPY_6: 36463859Sml29623 case NXGE_RX_COPY_7: 36473859Sml29623 rbrp->rbr_threshold_lo = 36486929Smisaki rbrp->rbb_max * 36496929Smisaki (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 36503859Sml29623 break; 36513859Sml29623 36523859Sml29623 case NXGE_RX_COPY_ALL: 36533859Sml29623 rbrp->rbr_threshold_lo = 0; 36543859Sml29623 break; 36553859Sml29623 } 36563859Sml29623 36573859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 36586929Smisaki "nxge_map_rxdma_channel_cfg_ring: channel %d " 36596929Smisaki "rbb_max %d " 36606929Smisaki "rbrp->rbr_bufsize_type %d " 36616929Smisaki "rbb_threshold_hi %d " 36626929Smisaki "rbb_threshold_lo %d", 36636929Smisaki dma_channel, 36646929Smisaki rbrp->rbb_max, 36656929Smisaki rbrp->rbr_bufsize_type, 36666929Smisaki rbrp->rbr_threshold_hi, 36676929Smisaki rbrp->rbr_threshold_lo)); 36683859Sml29623 36693859Sml29623 rbrp->page_valid.value = 0; 36703859Sml29623 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 36713859Sml29623 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 36723859Sml29623 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 36733859Sml29623 rbrp->page_hdl.value = 0; 36743859Sml29623 36753859Sml29623 rbrp->page_valid.bits.ldw.page0 = 1; 36763859Sml29623 rbrp->page_valid.bits.ldw.page1 = 1; 36773859Sml29623 36783859Sml29623 /* Map in the receive completion ring */ 36793859Sml29623 rcrp = (p_rx_rcr_ring_t) 36806929Smisaki KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 36813859Sml29623 rcrp->rdc = dma_channel; 36823859Sml29623 36833859Sml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 36843859Sml29623 rcrp->comp_size = nxge_port_rcr_size; 36853859Sml29623 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 36863859Sml29623 36873859Sml29623 rcrp->max_receive_pkts = nxge_max_rx_pkts; 36883859Sml29623 36893859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 36903859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 36916929Smisaki sizeof (rcr_entry_t)); 36923859Sml29623 rcrp->comp_rd_index = 0; 36933859Sml29623 rcrp->comp_wt_index = 0; 36943859Sml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 36956929Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 36965125Sjoycey #if defined(__i386) 36976929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 36986929Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 36995125Sjoycey #else 37006929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 37016929Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 37025125Sjoycey #endif 37033859Sml29623 37043859Sml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 37056929Smisaki (nxge_port_rcr_size - 1); 37063859Sml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 37076929Smisaki (nxge_port_rcr_size - 1); 37083859Sml29623 37093859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37106929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 37116929Smisaki "channel %d " 37126929Smisaki "rbr_vaddrp $%p " 37136929Smisaki "rcr_desc_rd_head_p $%p " 37146929Smisaki "rcr_desc_rd_head_pp $%p " 37156929Smisaki "rcr_desc_rd_last_p $%p " 37166929Smisaki "rcr_desc_rd_last_pp $%p ", 37176929Smisaki dma_channel, 37186929Smisaki rbr_vaddrp, 37196929Smisaki rcrp->rcr_desc_rd_head_p, 37206929Smisaki rcrp->rcr_desc_rd_head_pp, 37216929Smisaki rcrp->rcr_desc_last_p, 37226929Smisaki rcrp->rcr_desc_last_pp)); 37233859Sml29623 37243859Sml29623 /* 37253859Sml29623 * Zero out buffer block ring descriptors. 37263859Sml29623 */ 37273859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 3728*8661SSantwona.Behera@Sun.COM 3729*8661SSantwona.Behera@Sun.COM rcrp->intr_timeout = (nxgep->intr_timeout < 3730*8661SSantwona.Behera@Sun.COM NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 3731*8661SSantwona.Behera@Sun.COM nxgep->intr_timeout; 3732*8661SSantwona.Behera@Sun.COM 3733*8661SSantwona.Behera@Sun.COM rcrp->intr_threshold = (nxgep->intr_threshold < 3734*8661SSantwona.Behera@Sun.COM NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 3735*8661SSantwona.Behera@Sun.COM nxgep->intr_threshold; 3736*8661SSantwona.Behera@Sun.COM 37373859Sml29623 rcrp->full_hdr_flag = B_FALSE; 37383859Sml29623 rcrp->sw_priv_hdr_len = 0; 37393859Sml29623 37403859Sml29623 cfga_p = &(rcrp->rcr_cfga); 37413859Sml29623 cfgb_p = &(rcrp->rcr_cfgb); 37423859Sml29623 cfga_p->value = 0; 37433859Sml29623 cfgb_p->value = 0; 37443859Sml29623 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 37453859Sml29623 cfga_p->value = (rcrp->rcr_addr & 37466929Smisaki (RCRCFIG_A_STADDR_MASK | 37476929Smisaki RCRCFIG_A_STADDR_BASE_MASK)); 37483859Sml29623 37493859Sml29623 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 37506929Smisaki RCRCFIG_A_LEN_SHIF); 37513859Sml29623 37523859Sml29623 /* 37533859Sml29623 * Timeout should be set based on the system clock divider. 3754*8661SSantwona.Behera@Sun.COM * A timeout value of 1 assumes that the 37553859Sml29623 * granularity (1000) is 3 microseconds running at 300MHz. 37563859Sml29623 */ 37573859Sml29623 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 37583859Sml29623 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 37593859Sml29623 cfgb_p->bits.ldw.entout = 1; 37603859Sml29623 37613859Sml29623 /* Map in the mailbox */ 37623859Sml29623 mboxp = (p_rx_mbox_t) 37636929Smisaki KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 37643859Sml29623 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 37653859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 37663859Sml29623 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 37673859Sml29623 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 37683859Sml29623 cfig1_p->value = cfig2_p->value = 0; 37693859Sml29623 37703859Sml29623 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 37713859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37726929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 37736929Smisaki "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 37746929Smisaki dma_channel, cfig1_p->value, cfig2_p->value, 37756929Smisaki mboxp->mbox_addr)); 37763859Sml29623 37773859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 37786929Smisaki & 0xfff); 37793859Sml29623 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 37803859Sml29623 37813859Sml29623 37823859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 37833859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 37846929Smisaki RXDMA_CFIG2_MBADDR_L_MASK); 37853859Sml29623 37863859Sml29623 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 37873859Sml29623 37883859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37896929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 37906929Smisaki "channel %d damaddrp $%p " 37916929Smisaki "cfg1 0x%016llx cfig2 0x%016llx", 37926929Smisaki dma_channel, dmaaddrp, 37936929Smisaki cfig1_p->value, cfig2_p->value)); 37943859Sml29623 37953859Sml29623 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 37963859Sml29623 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 37973859Sml29623 37983859Sml29623 rbrp->rx_rcr_p = rcrp; 37993859Sml29623 rcrp->rx_rbr_p = rbrp; 38003859Sml29623 *rcr_p = rcrp; 38013859Sml29623 *rx_mbox_p = mboxp; 38023859Sml29623 38033859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38046929Smisaki "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 38053859Sml29623 38063859Sml29623 return (status); 38073859Sml29623 } 38083859Sml29623 38093859Sml29623 /*ARGSUSED*/ 38103859Sml29623 static void 38113859Sml29623 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 38123859Sml29623 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 38133859Sml29623 { 38143859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38156929Smisaki "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 38166929Smisaki rcr_p->rdc)); 38173859Sml29623 38183859Sml29623 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 38193859Sml29623 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 38203859Sml29623 38213859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38226929Smisaki "<== nxge_unmap_rxdma_channel_cfg_ring")); 38233859Sml29623 } 38243859Sml29623 38253859Sml29623 static nxge_status_t 38263859Sml29623 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 38273859Sml29623 p_nxge_dma_common_t *dma_buf_p, 38283859Sml29623 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 38293859Sml29623 { 38303859Sml29623 p_rx_rbr_ring_t rbrp; 38313859Sml29623 p_nxge_dma_common_t dma_bufp, tmp_bufp; 38323859Sml29623 p_rx_msg_t *rx_msg_ring; 38333859Sml29623 p_rx_msg_t rx_msg_p; 38343859Sml29623 p_mblk_t mblk_p; 38353859Sml29623 38363859Sml29623 rxring_info_t *ring_info; 38373859Sml29623 nxge_status_t status = NXGE_OK; 38383859Sml29623 int i, j, index; 38393859Sml29623 uint32_t size, bsize, nblocks, nmsgs; 38403859Sml29623 38413859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38426929Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d", 38436929Smisaki channel)); 38443859Sml29623 38453859Sml29623 dma_bufp = tmp_bufp = *dma_buf_p; 38463859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38476929Smisaki " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 38486929Smisaki "chunks bufp 0x%016llx", 38496929Smisaki channel, num_chunks, dma_bufp)); 38503859Sml29623 38513859Sml29623 nmsgs = 0; 38523859Sml29623 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 38533859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38546929Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d " 38556929Smisaki "bufp 0x%016llx nblocks %d nmsgs %d", 38566929Smisaki channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 38573859Sml29623 nmsgs += tmp_bufp->nblocks; 38583859Sml29623 } 38593859Sml29623 if (!nmsgs) { 38604185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 38616929Smisaki "<== nxge_map_rxdma_channel_buf_ring: channel %d " 38626929Smisaki "no msg blocks", 38636929Smisaki channel)); 38643859Sml29623 status = NXGE_ERROR; 38653859Sml29623 goto nxge_map_rxdma_channel_buf_ring_exit; 38663859Sml29623 } 38673859Sml29623 38685170Stm144005 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 38693859Sml29623 38703859Sml29623 size = nmsgs * sizeof (p_rx_msg_t); 38713859Sml29623 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 38723859Sml29623 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 38736929Smisaki KM_SLEEP); 38743859Sml29623 38753859Sml29623 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 38766929Smisaki (void *)nxgep->interrupt_cookie); 38773859Sml29623 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 38786929Smisaki (void *)nxgep->interrupt_cookie); 38793859Sml29623 rbrp->rdc = channel; 38803859Sml29623 rbrp->num_blocks = num_chunks; 38813859Sml29623 rbrp->tnblocks = nmsgs; 38823859Sml29623 rbrp->rbb_max = nmsgs; 38833859Sml29623 rbrp->rbr_max_size = nmsgs; 38843859Sml29623 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 38853859Sml29623 38863859Sml29623 /* 38873859Sml29623 * Buffer sizes suggested by NIU architect. 38883859Sml29623 * 256, 512 and 2K. 38893859Sml29623 */ 38903859Sml29623 38913859Sml29623 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 38923859Sml29623 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 38933859Sml29623 rbrp->npi_pkt_buf_size0 = SIZE_256B; 38943859Sml29623 38953859Sml29623 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 38963859Sml29623 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 38973859Sml29623 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 38983859Sml29623 38993859Sml29623 rbrp->block_size = nxgep->rx_default_block_size; 39003859Sml29623 39013859Sml29623 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 39023859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 39033859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 39043859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 39053859Sml29623 } else { 39063859Sml29623 if (rbrp->block_size >= 0x2000) { 39073859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 39083859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 39093859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 39103859Sml29623 } else { 39113859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 39123859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 39133859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 39143859Sml29623 } 39153859Sml29623 } 39163859Sml29623 39173859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39186929Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d " 39196929Smisaki "actual rbr max %d rbb_max %d nmsgs %d " 39206929Smisaki "rbrp->block_size %d default_block_size %d " 39216929Smisaki "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 39226929Smisaki channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 39236929Smisaki rbrp->block_size, nxgep->rx_default_block_size, 39246929Smisaki nxge_rbr_size, nxge_rbr_spare_size)); 39253859Sml29623 39263859Sml29623 /* Map in buffers from the buffer pool. */ 39273859Sml29623 index = 0; 39283859Sml29623 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 39293859Sml29623 bsize = dma_bufp->block_size; 39303859Sml29623 nblocks = dma_bufp->nblocks; 39315125Sjoycey #if defined(__i386) 39325125Sjoycey ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 39335125Sjoycey #else 39343859Sml29623 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 39355125Sjoycey #endif 39363859Sml29623 ring_info->buffer[i].buf_index = i; 39373859Sml29623 ring_info->buffer[i].buf_size = dma_bufp->alength; 39383859Sml29623 ring_info->buffer[i].start_index = index; 39395125Sjoycey #if defined(__i386) 39405125Sjoycey ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 39415125Sjoycey #else 39423859Sml29623 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 39435125Sjoycey #endif 39443859Sml29623 39453859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39466929Smisaki " nxge_map_rxdma_channel_buf_ring: map channel %d " 39476929Smisaki "chunk %d" 39486929Smisaki " nblocks %d chunk_size %x block_size 0x%x " 39496929Smisaki "dma_bufp $%p", channel, i, 39506929Smisaki dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 39516929Smisaki dma_bufp)); 39523859Sml29623 39533859Sml29623 for (j = 0; j < nblocks; j++) { 39543859Sml29623 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 39556929Smisaki dma_bufp)) == NULL) { 39564185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 39576929Smisaki "allocb failed (index %d i %d j %d)", 39586929Smisaki index, i, j)); 39594185Sspeer goto nxge_map_rxdma_channel_buf_ring_fail1; 39603859Sml29623 } 39613859Sml29623 rx_msg_ring[index] = rx_msg_p; 39623859Sml29623 rx_msg_p->block_index = index; 39633859Sml29623 rx_msg_p->shifted_addr = (uint32_t) 39646929Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 39656929Smisaki RBR_BKADDR_SHIFT)); 39663859Sml29623 39673859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39686929Smisaki "index %d j %d rx_msg_p $%p mblk %p", 39696929Smisaki index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 39703859Sml29623 39713859Sml29623 mblk_p = rx_msg_p->rx_mblk_p; 39723859Sml29623 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 39735170Stm144005 39745170Stm144005 rbrp->rbr_ref_cnt++; 39753859Sml29623 index++; 39763859Sml29623 rx_msg_p->buf_dma.dma_channel = channel; 39773859Sml29623 } 39786495Sspeer 39796495Sspeer rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 39806495Sspeer if (dma_bufp->contig_alloc_type) { 39816495Sspeer rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 39826495Sspeer } 39836495Sspeer 39846495Sspeer if (dma_bufp->kmem_alloc_type) { 39856495Sspeer rbrp->rbr_alloc_type = KMEM_ALLOC; 39866495Sspeer } 39876495Sspeer 39886495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39896495Sspeer " nxge_map_rxdma_channel_buf_ring: map channel %d " 39906495Sspeer "chunk %d" 39916495Sspeer " nblocks %d chunk_size %x block_size 0x%x " 39926495Sspeer "dma_bufp $%p", 39936495Sspeer channel, i, 39946495Sspeer dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 39956495Sspeer dma_bufp)); 39963859Sml29623 } 39973859Sml29623 if (i < rbrp->num_blocks) { 39983859Sml29623 goto nxge_map_rxdma_channel_buf_ring_fail1; 39993859Sml29623 } 40003859Sml29623 40013859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40026929Smisaki "nxge_map_rxdma_channel_buf_ring: done buf init " 40036929Smisaki "channel %d msg block entries %d", 40046929Smisaki channel, index)); 40053859Sml29623 ring_info->block_size_mask = bsize - 1; 40063859Sml29623 rbrp->rx_msg_ring = rx_msg_ring; 40073859Sml29623 rbrp->dma_bufp = dma_buf_p; 40083859Sml29623 rbrp->ring_info = ring_info; 40093859Sml29623 40103859Sml29623 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 40113859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40126929Smisaki " nxge_map_rxdma_channel_buf_ring: " 40136929Smisaki "channel %d done buf info init", channel)); 40143859Sml29623 40155170Stm144005 /* 40165170Stm144005 * Finally, permit nxge_freeb() to call nxge_post_page(). 40175170Stm144005 */ 40185170Stm144005 rbrp->rbr_state = RBR_POSTING; 40195170Stm144005 40203859Sml29623 *rbr_p = rbrp; 40213859Sml29623 goto nxge_map_rxdma_channel_buf_ring_exit; 40223859Sml29623 40233859Sml29623 nxge_map_rxdma_channel_buf_ring_fail1: 40243859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40256929Smisaki " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 40266929Smisaki channel, status)); 40273859Sml29623 40283859Sml29623 index--; 40293859Sml29623 for (; index >= 0; index--) { 40303859Sml29623 rx_msg_p = rx_msg_ring[index]; 40313859Sml29623 if (rx_msg_p != NULL) { 40323859Sml29623 freeb(rx_msg_p->rx_mblk_p); 40333859Sml29623 rx_msg_ring[index] = NULL; 40343859Sml29623 } 40353859Sml29623 } 40363859Sml29623 nxge_map_rxdma_channel_buf_ring_fail: 40373859Sml29623 MUTEX_DESTROY(&rbrp->post_lock); 40383859Sml29623 MUTEX_DESTROY(&rbrp->lock); 40393859Sml29623 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 40403859Sml29623 KMEM_FREE(rx_msg_ring, size); 40413859Sml29623 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 40423859Sml29623 40434185Sspeer status = NXGE_ERROR; 40444185Sspeer 40453859Sml29623 nxge_map_rxdma_channel_buf_ring_exit: 40463859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40476929Smisaki "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 40483859Sml29623 40493859Sml29623 return (status); 40503859Sml29623 } 40513859Sml29623 40523859Sml29623 /*ARGSUSED*/ 40533859Sml29623 static void 40543859Sml29623 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 40553859Sml29623 p_rx_rbr_ring_t rbr_p) 40563859Sml29623 { 40573859Sml29623 p_rx_msg_t *rx_msg_ring; 40583859Sml29623 p_rx_msg_t rx_msg_p; 40593859Sml29623 rxring_info_t *ring_info; 40603859Sml29623 int i; 40613859Sml29623 uint32_t size; 40623859Sml29623 #ifdef NXGE_DEBUG 40633859Sml29623 int num_chunks; 40643859Sml29623 #endif 40653859Sml29623 40663859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40676929Smisaki "==> nxge_unmap_rxdma_channel_buf_ring")); 40683859Sml29623 if (rbr_p == NULL) { 40693859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 40706929Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 40713859Sml29623 return; 40723859Sml29623 } 40733859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40746929Smisaki "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 40756929Smisaki rbr_p->rdc)); 40763859Sml29623 40773859Sml29623 rx_msg_ring = rbr_p->rx_msg_ring; 40783859Sml29623 ring_info = rbr_p->ring_info; 40793859Sml29623 40803859Sml29623 if (rx_msg_ring == NULL || ring_info == NULL) { 40816929Smisaki NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40826929Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: " 40836929Smisaki "rx_msg_ring $%p ring_info $%p", 40846929Smisaki rx_msg_p, ring_info)); 40853859Sml29623 return; 40863859Sml29623 } 40873859Sml29623 40883859Sml29623 #ifdef NXGE_DEBUG 40893859Sml29623 num_chunks = rbr_p->num_blocks; 40903859Sml29623 #endif 40913859Sml29623 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 40923859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40936929Smisaki " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 40946929Smisaki "tnblocks %d (max %d) size ptrs %d ", 40956929Smisaki rbr_p->rdc, num_chunks, 40966929Smisaki rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 40973859Sml29623 40983859Sml29623 for (i = 0; i < rbr_p->tnblocks; i++) { 40993859Sml29623 rx_msg_p = rx_msg_ring[i]; 41003859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 41016929Smisaki " nxge_unmap_rxdma_channel_buf_ring: " 41026929Smisaki "rx_msg_p $%p", 41036929Smisaki rx_msg_p)); 41043859Sml29623 if (rx_msg_p != NULL) { 41053859Sml29623 freeb(rx_msg_p->rx_mblk_p); 41063859Sml29623 rx_msg_ring[i] = NULL; 41073859Sml29623 } 41083859Sml29623 } 41093859Sml29623 41105170Stm144005 /* 41115170Stm144005 * We no longer may use the mutex <post_lock>. By setting 41125170Stm144005 * <rbr_state> to anything but POSTING, we prevent 41135170Stm144005 * nxge_post_page() from accessing a dead mutex. 41145170Stm144005 */ 41155170Stm144005 rbr_p->rbr_state = RBR_UNMAPPING; 41163859Sml29623 MUTEX_DESTROY(&rbr_p->post_lock); 41175170Stm144005 41183859Sml29623 MUTEX_DESTROY(&rbr_p->lock); 41195170Stm144005 41205170Stm144005 if (rbr_p->rbr_ref_cnt == 0) { 41216495Sspeer /* 41226495Sspeer * This is the normal state of affairs. 41236495Sspeer * Need to free the following buffers: 41246495Sspeer * - data buffers 41256495Sspeer * - rx_msg ring 41266495Sspeer * - ring_info 41276495Sspeer * - rbr ring 41286495Sspeer */ 41296495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 41306495Sspeer "unmap_rxdma_buf_ring: No outstanding - freeing ")); 41316495Sspeer nxge_rxdma_databuf_free(rbr_p); 41326495Sspeer KMEM_FREE(ring_info, sizeof (rxring_info_t)); 41336495Sspeer KMEM_FREE(rx_msg_ring, size); 41345170Stm144005 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 41355170Stm144005 } else { 41365170Stm144005 /* 41375170Stm144005 * Some of our buffers are still being used. 41385170Stm144005 * Therefore, tell nxge_freeb() this ring is 41395170Stm144005 * unmapped, so it may free <rbr_p> for us. 41405170Stm144005 */ 41415170Stm144005 rbr_p->rbr_state = RBR_UNMAPPED; 41425170Stm144005 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 41435170Stm144005 "unmap_rxdma_buf_ring: %d %s outstanding.", 41445170Stm144005 rbr_p->rbr_ref_cnt, 41455170Stm144005 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 41465170Stm144005 } 41473859Sml29623 41483859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 41496929Smisaki "<== nxge_unmap_rxdma_channel_buf_ring")); 41503859Sml29623 } 41513859Sml29623 41526495Sspeer /* 41536495Sspeer * nxge_rxdma_hw_start_common 41546495Sspeer * 41556495Sspeer * Arguments: 41566495Sspeer * nxgep 41576495Sspeer * 41586495Sspeer * Notes: 41596495Sspeer * 41606495Sspeer * NPI/NXGE function calls: 41616495Sspeer * nxge_init_fzc_rx_common(); 41626495Sspeer * nxge_init_fzc_rxdma_port(); 41636495Sspeer * 41646495Sspeer * Registers accessed: 41656495Sspeer * 41666495Sspeer * Context: 41676495Sspeer * Service domain 41686495Sspeer */ 41693859Sml29623 static nxge_status_t 41703859Sml29623 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 41713859Sml29623 { 41723859Sml29623 nxge_status_t status = NXGE_OK; 41733859Sml29623 41743859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 41753859Sml29623 41763859Sml29623 /* 41773859Sml29623 * Load the sharable parameters by writing to the 41783859Sml29623 * function zero control registers. These FZC registers 41793859Sml29623 * should be initialized only once for the entire chip. 41803859Sml29623 */ 41813859Sml29623 (void) nxge_init_fzc_rx_common(nxgep); 41823859Sml29623 41833859Sml29623 /* 41843859Sml29623 * Initialize the RXDMA port specific FZC control configurations. 41853859Sml29623 * These FZC registers are pertaining to each port. 41863859Sml29623 */ 41873859Sml29623 (void) nxge_init_fzc_rxdma_port(nxgep); 41883859Sml29623 41893859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 41903859Sml29623 41913859Sml29623 return (status); 41923859Sml29623 } 41933859Sml29623 41943859Sml29623 static nxge_status_t 41956495Sspeer nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 41963859Sml29623 { 41973859Sml29623 int i, ndmas; 41983859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 41993859Sml29623 p_rx_rbr_ring_t *rbr_rings; 42003859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 42013859Sml29623 p_rx_rcr_ring_t *rcr_rings; 42023859Sml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 42033859Sml29623 p_rx_mbox_t *rx_mbox_p; 42043859Sml29623 nxge_status_t status = NXGE_OK; 42053859Sml29623 42063859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 42073859Sml29623 42083859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 42093859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 42103859Sml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 42113859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 42126929Smisaki "<== nxge_rxdma_hw_start: NULL ring pointers")); 42133859Sml29623 return (NXGE_ERROR); 42143859Sml29623 } 42153859Sml29623 ndmas = rx_rbr_rings->ndmas; 42163859Sml29623 if (ndmas == 0) { 42173859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 42186929Smisaki "<== nxge_rxdma_hw_start: no dma channel allocated")); 42193859Sml29623 return (NXGE_ERROR); 42203859Sml29623 } 42213859Sml29623 42223859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42236929Smisaki "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 42243859Sml29623 42253859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 42263859Sml29623 rcr_rings = rx_rcr_rings->rcr_rings; 42273859Sml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 42283859Sml29623 if (rx_mbox_areas_p) { 42293859Sml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 42303859Sml29623 } 42313859Sml29623 42326495Sspeer i = channel; 42336495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42346929Smisaki "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 42356929Smisaki ndmas, channel)); 42366495Sspeer status = nxge_rxdma_start_channel(nxgep, channel, 42376495Sspeer (p_rx_rbr_ring_t)rbr_rings[i], 42386495Sspeer (p_rx_rcr_ring_t)rcr_rings[i], 42396495Sspeer (p_rx_mbox_t)rx_mbox_p[i]); 42406495Sspeer if (status != NXGE_OK) { 42416495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 42426495Sspeer "==> nxge_rxdma_hw_start: disable " 42436495Sspeer "(status 0x%x channel %d)", status, channel)); 42446495Sspeer return (status); 42453859Sml29623 } 42463859Sml29623 42473859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 42486929Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx", 42496929Smisaki rx_rbr_rings, rx_rcr_rings)); 42503859Sml29623 42513859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42526929Smisaki "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 42533859Sml29623 42543859Sml29623 return (status); 42553859Sml29623 } 42563859Sml29623 42573859Sml29623 static void 42586495Sspeer nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 42593859Sml29623 { 42603859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 42613859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 42623859Sml29623 42633859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 42643859Sml29623 42653859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 42663859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 42673859Sml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 42683859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 42696929Smisaki "<== nxge_rxdma_hw_stop: NULL ring pointers")); 42703859Sml29623 return; 42713859Sml29623 } 42723859Sml29623 42733859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42746929Smisaki "==> nxge_rxdma_hw_stop(channel %d)", 42756929Smisaki channel)); 42766495Sspeer (void) nxge_rxdma_stop_channel(nxgep, channel); 42773859Sml29623 42783859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 42796929Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx", 42806929Smisaki rx_rbr_rings, rx_rcr_rings)); 42813859Sml29623 42823859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 42833859Sml29623 } 42843859Sml29623 42853859Sml29623 42863859Sml29623 static nxge_status_t 42873859Sml29623 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 42883859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 42893859Sml29623 42903859Sml29623 { 42913859Sml29623 npi_handle_t handle; 42923859Sml29623 npi_status_t rs = NPI_SUCCESS; 42933859Sml29623 rx_dma_ctl_stat_t cs; 42943859Sml29623 rx_dma_ent_msk_t ent_mask; 42953859Sml29623 nxge_status_t status = NXGE_OK; 42963859Sml29623 42973859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 42983859Sml29623 42993859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 43003859Sml29623 43013859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 43023859Sml29623 "npi handle addr $%p acc $%p", 43033859Sml29623 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 43043859Sml29623 43056495Sspeer /* Reset RXDMA channel, but not if you're a guest. */ 43066495Sspeer if (!isLDOMguest(nxgep)) { 43076495Sspeer rs = npi_rxdma_cfg_rdc_reset(handle, channel); 43086495Sspeer if (rs != NPI_SUCCESS) { 43096495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43106495Sspeer "==> nxge_init_fzc_rdc: " 43116495Sspeer "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 43126495Sspeer channel, rs)); 43136495Sspeer return (NXGE_ERROR | rs); 43146495Sspeer } 43156495Sspeer 43166495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 43176495Sspeer "==> nxge_rxdma_start_channel: reset done: channel %d", 43186495Sspeer channel)); 43193859Sml29623 } 43203859Sml29623 43216495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 43226495Sspeer if (isLDOMguest(nxgep)) 43236495Sspeer (void) nxge_rdc_lp_conf(nxgep, channel); 43246495Sspeer #endif 43253859Sml29623 43263859Sml29623 /* 43273859Sml29623 * Initialize the RXDMA channel specific FZC control 43283859Sml29623 * configurations. These FZC registers are pertaining 43293859Sml29623 * to each RX channel (logical pages). 43303859Sml29623 */ 43316495Sspeer if (!isLDOMguest(nxgep)) { 43326495Sspeer status = nxge_init_fzc_rxdma_channel(nxgep, channel); 43336495Sspeer if (status != NXGE_OK) { 43346495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43356495Sspeer "==> nxge_rxdma_start_channel: " 43366495Sspeer "init fzc rxdma failed (0x%08x channel %d)", 43376495Sspeer status, channel)); 43386495Sspeer return (status); 43396495Sspeer } 43406495Sspeer 43416495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 43426495Sspeer "==> nxge_rxdma_start_channel: fzc done")); 43433859Sml29623 } 43443859Sml29623 43453859Sml29623 /* Set up the interrupt event masks. */ 43463859Sml29623 ent_mask.value = 0; 43473859Sml29623 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 43483859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 43496495Sspeer &ent_mask); 43503859Sml29623 if (rs != NPI_SUCCESS) { 43513859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43523859Sml29623 "==> nxge_rxdma_start_channel: " 43536495Sspeer "init rxdma event masks failed " 43546495Sspeer "(0x%08x channel %d)", 43553859Sml29623 status, channel)); 43563859Sml29623 return (NXGE_ERROR | rs); 43573859Sml29623 } 43583859Sml29623 43596495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 43606495Sspeer "==> nxge_rxdma_start_channel: " 43613859Sml29623 "event done: channel %d (mask 0x%016llx)", 43623859Sml29623 channel, ent_mask.value)); 43633859Sml29623 43643859Sml29623 /* Initialize the receive DMA control and status register */ 43653859Sml29623 cs.value = 0; 43663859Sml29623 cs.bits.hdw.mex = 1; 43673859Sml29623 cs.bits.hdw.rcrthres = 1; 43683859Sml29623 cs.bits.hdw.rcrto = 1; 43693859Sml29623 cs.bits.hdw.rbr_empty = 1; 43703859Sml29623 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 43713859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 43723859Sml29623 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 43733859Sml29623 if (status != NXGE_OK) { 43743859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43753859Sml29623 "==> nxge_rxdma_start_channel: " 43763859Sml29623 "init rxdma control register failed (0x%08x channel %d", 43773859Sml29623 status, channel)); 43783859Sml29623 return (status); 43793859Sml29623 } 43803859Sml29623 43813859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 43823859Sml29623 "control done - channel %d cs 0x%016llx", channel, cs.value)); 43833859Sml29623 43843859Sml29623 /* 43853859Sml29623 * Load RXDMA descriptors, buffers, mailbox, 43863859Sml29623 * initialise the receive DMA channels and 43873859Sml29623 * enable each DMA channel. 43883859Sml29623 */ 43893859Sml29623 status = nxge_enable_rxdma_channel(nxgep, 43906495Sspeer channel, rbr_p, rcr_p, mbox_p); 43913859Sml29623 43923859Sml29623 if (status != NXGE_OK) { 43933859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43946495Sspeer " nxge_rxdma_start_channel: " 43956495Sspeer " enable rxdma failed (0x%08x channel %d)", 43966495Sspeer status, channel)); 43976495Sspeer return (status); 43986495Sspeer } 43996495Sspeer 44006495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 44016495Sspeer "==> nxge_rxdma_start_channel: enabled channel %d")); 44026495Sspeer 44036495Sspeer if (isLDOMguest(nxgep)) { 44046495Sspeer /* Add interrupt handler for this channel. */ 44056495Sspeer if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 44066495Sspeer != NXGE_OK) { 44076495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44083859Sml29623 " nxge_rxdma_start_channel: " 44096495Sspeer " nxge_hio_intr_add failed (0x%08x channel %d)", 44106495Sspeer status, channel)); 44116495Sspeer } 44123859Sml29623 } 44133859Sml29623 44143859Sml29623 ent_mask.value = 0; 44153859Sml29623 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 44163859Sml29623 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 44173859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 44183859Sml29623 &ent_mask); 44193859Sml29623 if (rs != NPI_SUCCESS) { 44203859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 44213859Sml29623 "==> nxge_rxdma_start_channel: " 44223859Sml29623 "init rxdma event masks failed (0x%08x channel %d)", 44233859Sml29623 status, channel)); 44243859Sml29623 return (NXGE_ERROR | rs); 44253859Sml29623 } 44263859Sml29623 44273859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 44283859Sml29623 "control done - channel %d cs 0x%016llx", channel, cs.value)); 44293859Sml29623 44303859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 44313859Sml29623 44323859Sml29623 return (NXGE_OK); 44333859Sml29623 } 44343859Sml29623 44353859Sml29623 static nxge_status_t 44363859Sml29623 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 44373859Sml29623 { 44383859Sml29623 npi_handle_t handle; 44393859Sml29623 npi_status_t rs = NPI_SUCCESS; 44403859Sml29623 rx_dma_ctl_stat_t cs; 44413859Sml29623 rx_dma_ent_msk_t ent_mask; 44423859Sml29623 nxge_status_t status = NXGE_OK; 44433859Sml29623 44443859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 44453859Sml29623 44463859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 44473859Sml29623 44483859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 44496929Smisaki "npi handle addr $%p acc $%p", 44506929Smisaki nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 44513859Sml29623 44527812SMichael.Speer@Sun.COM if (!isLDOMguest(nxgep)) { 44537812SMichael.Speer@Sun.COM /* 44547812SMichael.Speer@Sun.COM * Stop RxMAC = A.9.2.6 44557812SMichael.Speer@Sun.COM */ 44567812SMichael.Speer@Sun.COM if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 44577812SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44587812SMichael.Speer@Sun.COM "nxge_rxdma_stop_channel: " 44597812SMichael.Speer@Sun.COM "Failed to disable RxMAC")); 44607812SMichael.Speer@Sun.COM } 44617812SMichael.Speer@Sun.COM 44627812SMichael.Speer@Sun.COM /* 44637812SMichael.Speer@Sun.COM * Drain IPP Port = A.9.3.6 44647812SMichael.Speer@Sun.COM */ 44657812SMichael.Speer@Sun.COM (void) nxge_ipp_drain(nxgep); 44667812SMichael.Speer@Sun.COM } 44677812SMichael.Speer@Sun.COM 44683859Sml29623 /* Reset RXDMA channel */ 44693859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 44703859Sml29623 if (rs != NPI_SUCCESS) { 44713859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44726929Smisaki " nxge_rxdma_stop_channel: " 44736929Smisaki " reset rxdma failed (0x%08x channel %d)", 44746929Smisaki rs, channel)); 44753859Sml29623 return (NXGE_ERROR | rs); 44763859Sml29623 } 44773859Sml29623 44783859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 44796929Smisaki "==> nxge_rxdma_stop_channel: reset done")); 44803859Sml29623 44813859Sml29623 /* Set up the interrupt event masks. */ 44823859Sml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL; 44833859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 44846929Smisaki &ent_mask); 44853859Sml29623 if (rs != NPI_SUCCESS) { 44863859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44876929Smisaki "==> nxge_rxdma_stop_channel: " 44886929Smisaki "set rxdma event masks failed (0x%08x channel %d)", 44896929Smisaki rs, channel)); 44903859Sml29623 return (NXGE_ERROR | rs); 44913859Sml29623 } 44923859Sml29623 44933859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 44946929Smisaki "==> nxge_rxdma_stop_channel: event done")); 44953859Sml29623 44967812SMichael.Speer@Sun.COM /* 44977812SMichael.Speer@Sun.COM * Initialize the receive DMA control and status register 44987812SMichael.Speer@Sun.COM */ 44993859Sml29623 cs.value = 0; 45007812SMichael.Speer@Sun.COM status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 45013859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 45026929Smisaki " to default (all 0s) 0x%08x", cs.value)); 45033859Sml29623 if (status != NXGE_OK) { 45043859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45056929Smisaki " nxge_rxdma_stop_channel: init rxdma" 45066929Smisaki " control register failed (0x%08x channel %d", 45076929Smisaki status, channel)); 45083859Sml29623 return (status); 45093859Sml29623 } 45103859Sml29623 45113859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 45126929Smisaki "==> nxge_rxdma_stop_channel: control done")); 45133859Sml29623 45147812SMichael.Speer@Sun.COM /* 45157812SMichael.Speer@Sun.COM * Make sure channel is disabled. 45167812SMichael.Speer@Sun.COM */ 45173859Sml29623 status = nxge_disable_rxdma_channel(nxgep, channel); 45188275SEric Cheng 45193859Sml29623 if (status != NXGE_OK) { 45203859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45216929Smisaki " nxge_rxdma_stop_channel: " 45226929Smisaki " init enable rxdma failed (0x%08x channel %d)", 45236929Smisaki status, channel)); 45243859Sml29623 return (status); 45253859Sml29623 } 45263859Sml29623 45277812SMichael.Speer@Sun.COM if (!isLDOMguest(nxgep)) { 45287812SMichael.Speer@Sun.COM /* 45297812SMichael.Speer@Sun.COM * Enable RxMAC = A.9.2.10 45307812SMichael.Speer@Sun.COM */ 45317812SMichael.Speer@Sun.COM if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 45327812SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45337812SMichael.Speer@Sun.COM "nxge_rxdma_stop_channel: Rx MAC still disabled")); 45347812SMichael.Speer@Sun.COM } 45357812SMichael.Speer@Sun.COM } 45367812SMichael.Speer@Sun.COM 45373859Sml29623 NXGE_DEBUG_MSG((nxgep, 45386929Smisaki RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 45393859Sml29623 45403859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 45413859Sml29623 45423859Sml29623 return (NXGE_OK); 45433859Sml29623 } 45443859Sml29623 45453859Sml29623 nxge_status_t 45463859Sml29623 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 45473859Sml29623 { 45483859Sml29623 npi_handle_t handle; 45493859Sml29623 p_nxge_rdc_sys_stats_t statsp; 45503859Sml29623 rx_ctl_dat_fifo_stat_t stat; 45513859Sml29623 uint32_t zcp_err_status; 45523859Sml29623 uint32_t ipp_err_status; 45533859Sml29623 nxge_status_t status = NXGE_OK; 45543859Sml29623 npi_status_t rs = NPI_SUCCESS; 45553859Sml29623 boolean_t my_err = B_FALSE; 45563859Sml29623 45573859Sml29623 handle = nxgep->npi_handle; 45583859Sml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 45593859Sml29623 45603859Sml29623 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 45613859Sml29623 45623859Sml29623 if (rs != NPI_SUCCESS) 45633859Sml29623 return (NXGE_ERROR | rs); 45643859Sml29623 45653859Sml29623 if (stat.bits.ldw.id_mismatch) { 45663859Sml29623 statsp->id_mismatch++; 45673859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 45686929Smisaki NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 45693859Sml29623 /* Global fatal error encountered */ 45703859Sml29623 } 45713859Sml29623 45723859Sml29623 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 45733859Sml29623 switch (nxgep->mac.portnum) { 45743859Sml29623 case 0: 45753859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 45766929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 45773859Sml29623 my_err = B_TRUE; 45783859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45793859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45803859Sml29623 } 45813859Sml29623 break; 45823859Sml29623 case 1: 45833859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 45846929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 45853859Sml29623 my_err = B_TRUE; 45863859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45873859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45883859Sml29623 } 45893859Sml29623 break; 45903859Sml29623 case 2: 45913859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 45926929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 45933859Sml29623 my_err = B_TRUE; 45943859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45953859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45963859Sml29623 } 45973859Sml29623 break; 45983859Sml29623 case 3: 45993859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 46006929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 46013859Sml29623 my_err = B_TRUE; 46023859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 46033859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 46043859Sml29623 } 46053859Sml29623 break; 46063859Sml29623 default: 46073859Sml29623 return (NXGE_ERROR); 46083859Sml29623 } 46093859Sml29623 } 46103859Sml29623 46113859Sml29623 if (my_err) { 46123859Sml29623 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 46136929Smisaki zcp_err_status); 46143859Sml29623 if (status != NXGE_OK) 46153859Sml29623 return (status); 46163859Sml29623 } 46173859Sml29623 46183859Sml29623 return (NXGE_OK); 46193859Sml29623 } 46203859Sml29623 46213859Sml29623 static nxge_status_t 46223859Sml29623 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 46233859Sml29623 uint32_t zcp_status) 46243859Sml29623 { 46253859Sml29623 boolean_t rxport_fatal = B_FALSE; 46263859Sml29623 p_nxge_rdc_sys_stats_t statsp; 46273859Sml29623 nxge_status_t status = NXGE_OK; 46283859Sml29623 uint8_t portn; 46293859Sml29623 46303859Sml29623 portn = nxgep->mac.portnum; 46313859Sml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 46323859Sml29623 46333859Sml29623 if (ipp_status & (0x1 << portn)) { 46343859Sml29623 statsp->ipp_eop_err++; 46353859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 46366929Smisaki NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 46373859Sml29623 rxport_fatal = B_TRUE; 46383859Sml29623 } 46393859Sml29623 46403859Sml29623 if (zcp_status & (0x1 << portn)) { 46413859Sml29623 statsp->zcp_eop_err++; 46423859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 46436929Smisaki NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 46443859Sml29623 rxport_fatal = B_TRUE; 46453859Sml29623 } 46463859Sml29623 46473859Sml29623 if (rxport_fatal) { 46483859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46496929Smisaki " nxge_rxdma_handle_port_error: " 46506929Smisaki " fatal error on Port #%d\n", 46516929Smisaki portn)); 46523859Sml29623 status = nxge_rx_port_fatal_err_recover(nxgep); 46533859Sml29623 if (status == NXGE_OK) { 46543859Sml29623 FM_SERVICE_RESTORED(nxgep); 46553859Sml29623 } 46563859Sml29623 } 46573859Sml29623 46583859Sml29623 return (status); 46593859Sml29623 } 46603859Sml29623 46613859Sml29623 static nxge_status_t 46623859Sml29623 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 46633859Sml29623 { 46643859Sml29623 npi_handle_t handle; 46653859Sml29623 npi_status_t rs = NPI_SUCCESS; 46663859Sml29623 nxge_status_t status = NXGE_OK; 46673859Sml29623 p_rx_rbr_ring_t rbrp; 46683859Sml29623 p_rx_rcr_ring_t rcrp; 46693859Sml29623 p_rx_mbox_t mboxp; 46703859Sml29623 rx_dma_ent_msk_t ent_mask; 46713859Sml29623 p_nxge_dma_common_t dmap; 46723859Sml29623 int ring_idx; 46733859Sml29623 uint32_t ref_cnt; 46743859Sml29623 p_rx_msg_t rx_msg_p; 46753859Sml29623 int i; 46763859Sml29623 uint32_t nxge_port_rcr_size; 46773859Sml29623 46783859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 46793859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46806929Smisaki "Recovering from RxDMAChannel#%d error...", channel)); 46813859Sml29623 46823859Sml29623 /* 46833859Sml29623 * Stop the dma channel waits for the stop done. 46843859Sml29623 * If the stop done bit is not set, then create 46853859Sml29623 * an error. 46863859Sml29623 */ 46873859Sml29623 46883859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 46893859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 46903859Sml29623 46913859Sml29623 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 46923859Sml29623 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 46933859Sml29623 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 46943859Sml29623 46953859Sml29623 MUTEX_ENTER(&rcrp->lock); 46963859Sml29623 MUTEX_ENTER(&rbrp->lock); 46973859Sml29623 MUTEX_ENTER(&rbrp->post_lock); 46983859Sml29623 46993859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 47003859Sml29623 47013859Sml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 47023859Sml29623 if (rs != NPI_SUCCESS) { 47033859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47046929Smisaki "nxge_disable_rxdma_channel:failed")); 47053859Sml29623 goto fail; 47063859Sml29623 } 47073859Sml29623 47083859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 47093859Sml29623 47103859Sml29623 /* Disable interrupt */ 47113859Sml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL; 47123859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 47133859Sml29623 if (rs != NPI_SUCCESS) { 47143859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47156929Smisaki "nxge_rxdma_stop_channel: " 47166929Smisaki "set rxdma event masks failed (channel %d)", 47176929Smisaki channel)); 47183859Sml29623 } 47193859Sml29623 47203859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 47213859Sml29623 47223859Sml29623 /* Reset RXDMA channel */ 47233859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 47243859Sml29623 if (rs != NPI_SUCCESS) { 47253859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47266929Smisaki "nxge_rxdma_fatal_err_recover: " 47276929Smisaki " reset rxdma failed (channel %d)", channel)); 47283859Sml29623 goto fail; 47293859Sml29623 } 47303859Sml29623 47313859Sml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 47323859Sml29623 47333859Sml29623 mboxp = 47346929Smisaki (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 47353859Sml29623 47363859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 47373859Sml29623 rbrp->rbr_rd_index = 0; 47383859Sml29623 47393859Sml29623 rcrp->comp_rd_index = 0; 47403859Sml29623 rcrp->comp_wt_index = 0; 47413859Sml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 47426929Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 47435125Sjoycey #if defined(__i386) 47446929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 47456929Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 47465125Sjoycey #else 47476929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 47486929Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 47495125Sjoycey #endif 47503859Sml29623 47513859Sml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 47526929Smisaki (nxge_port_rcr_size - 1); 47533859Sml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 47546929Smisaki (nxge_port_rcr_size - 1); 47553859Sml29623 47563859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 47573859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 47583859Sml29623 47593859Sml29623 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 47603859Sml29623 47613859Sml29623 for (i = 0; i < rbrp->rbr_max_size; i++) { 47623859Sml29623 rx_msg_p = rbrp->rx_msg_ring[i]; 47633859Sml29623 ref_cnt = rx_msg_p->ref_cnt; 47643859Sml29623 if (ref_cnt != 1) { 47653859Sml29623 if (rx_msg_p->cur_usage_cnt != 47666929Smisaki rx_msg_p->max_usage_cnt) { 47673859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47686929Smisaki "buf[%d]: cur_usage_cnt = %d " 47696929Smisaki "max_usage_cnt = %d\n", i, 47706929Smisaki rx_msg_p->cur_usage_cnt, 47716929Smisaki rx_msg_p->max_usage_cnt)); 47723859Sml29623 } else { 47733859Sml29623 /* Buffer can be re-posted */ 47743859Sml29623 rx_msg_p->free = B_TRUE; 47753859Sml29623 rx_msg_p->cur_usage_cnt = 0; 47763859Sml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe; 47773859Sml29623 rx_msg_p->pkt_buf_size = 0; 47783859Sml29623 } 47793859Sml29623 } 47803859Sml29623 } 47813859Sml29623 47823859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 47833859Sml29623 47843859Sml29623 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 47853859Sml29623 if (status != NXGE_OK) { 47863859Sml29623 goto fail; 47873859Sml29623 } 47883859Sml29623 47893859Sml29623 MUTEX_EXIT(&rbrp->post_lock); 47903859Sml29623 MUTEX_EXIT(&rbrp->lock); 47913859Sml29623 MUTEX_EXIT(&rcrp->lock); 47923859Sml29623 47933859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47946929Smisaki "Recovery Successful, RxDMAChannel#%d Restored", 47956929Smisaki channel)); 47963859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 47973859Sml29623 47983859Sml29623 return (NXGE_OK); 47993859Sml29623 fail: 48003859Sml29623 MUTEX_EXIT(&rbrp->post_lock); 48013859Sml29623 MUTEX_EXIT(&rbrp->lock); 48023859Sml29623 MUTEX_EXIT(&rcrp->lock); 48033859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 48043859Sml29623 48053859Sml29623 return (NXGE_ERROR | rs); 48063859Sml29623 } 48073859Sml29623 48083859Sml29623 nxge_status_t 48093859Sml29623 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 48103859Sml29623 { 48116495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 48126495Sspeer nxge_status_t status = NXGE_OK; 48136495Sspeer int rdc; 48143859Sml29623 48153859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 48163859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48176929Smisaki "Recovering from RxPort error...")); 48186495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 48196495Sspeer 48203859Sml29623 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 48213859Sml29623 goto fail; 48223859Sml29623 48233859Sml29623 NXGE_DELAY(1000); 48243859Sml29623 48256495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 48266495Sspeer 48276495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 48286495Sspeer if ((1 << rdc) & set->owned.map) { 48296495Sspeer if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 48306495Sspeer != NXGE_OK) { 48316495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48326495Sspeer "Could not recover channel %d", rdc)); 48336495Sspeer } 48343859Sml29623 } 48353859Sml29623 } 48363859Sml29623 48376495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 48383859Sml29623 48393859Sml29623 /* Reset IPP */ 48403859Sml29623 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 48413859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48426929Smisaki "nxge_rx_port_fatal_err_recover: " 48436929Smisaki "Failed to reset IPP")); 48443859Sml29623 goto fail; 48453859Sml29623 } 48463859Sml29623 48473859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 48483859Sml29623 48493859Sml29623 /* Reset RxMAC */ 48503859Sml29623 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 48513859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48526929Smisaki "nxge_rx_port_fatal_err_recover: " 48536929Smisaki "Failed to reset RxMAC")); 48543859Sml29623 goto fail; 48553859Sml29623 } 48563859Sml29623 48573859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 48583859Sml29623 48593859Sml29623 /* Re-Initialize IPP */ 48603859Sml29623 if (nxge_ipp_init(nxgep) != NXGE_OK) { 48613859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48626929Smisaki "nxge_rx_port_fatal_err_recover: " 48636929Smisaki "Failed to init IPP")); 48643859Sml29623 goto fail; 48653859Sml29623 } 48663859Sml29623 48673859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 48683859Sml29623 48693859Sml29623 /* Re-Initialize RxMAC */ 48703859Sml29623 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 48713859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48726929Smisaki "nxge_rx_port_fatal_err_recover: " 48736929Smisaki "Failed to reset RxMAC")); 48743859Sml29623 goto fail; 48753859Sml29623 } 48763859Sml29623 48773859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 48783859Sml29623 48793859Sml29623 /* Re-enable RxMAC */ 48803859Sml29623 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 48813859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48826929Smisaki "nxge_rx_port_fatal_err_recover: " 48836929Smisaki "Failed to enable RxMAC")); 48843859Sml29623 goto fail; 48853859Sml29623 } 48863859Sml29623 48873859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48886929Smisaki "Recovery Successful, RxPort Restored")); 48893859Sml29623 48903859Sml29623 return (NXGE_OK); 48913859Sml29623 fail: 48923859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 48933859Sml29623 return (status); 48943859Sml29623 } 48953859Sml29623 48963859Sml29623 void 48973859Sml29623 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 48983859Sml29623 { 48993859Sml29623 rx_dma_ctl_stat_t cs; 49003859Sml29623 rx_ctl_dat_fifo_stat_t cdfs; 49013859Sml29623 49023859Sml29623 switch (err_id) { 49033859Sml29623 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 49043859Sml29623 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 49053859Sml29623 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 49063859Sml29623 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 49073859Sml29623 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 49083859Sml29623 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 49093859Sml29623 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 49103859Sml29623 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 49113859Sml29623 case NXGE_FM_EREPORT_RDMC_RCRINCON: 49123859Sml29623 case NXGE_FM_EREPORT_RDMC_RCRFULL: 49133859Sml29623 case NXGE_FM_EREPORT_RDMC_RBRFULL: 49143859Sml29623 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 49153859Sml29623 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 49163859Sml29623 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 49173859Sml29623 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 49186929Smisaki chan, &cs.value); 49193859Sml29623 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 49203859Sml29623 cs.bits.hdw.rcr_ack_err = 1; 49213859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 49223859Sml29623 cs.bits.hdw.dc_fifo_err = 1; 49233859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 49243859Sml29623 cs.bits.hdw.rcr_sha_par = 1; 49253859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 49263859Sml29623 cs.bits.hdw.rbr_pre_par = 1; 49273859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 49283859Sml29623 cs.bits.hdw.rbr_tmout = 1; 49293859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 49303859Sml29623 cs.bits.hdw.rsp_cnt_err = 1; 49313859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 49323859Sml29623 cs.bits.hdw.byte_en_bus = 1; 49333859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 49343859Sml29623 cs.bits.hdw.rsp_dat_err = 1; 49353859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 49363859Sml29623 cs.bits.hdw.config_err = 1; 49373859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 49383859Sml29623 cs.bits.hdw.rcrincon = 1; 49393859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 49403859Sml29623 cs.bits.hdw.rcrfull = 1; 49413859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 49423859Sml29623 cs.bits.hdw.rbrfull = 1; 49433859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 49443859Sml29623 cs.bits.hdw.rbrlogpage = 1; 49453859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 49463859Sml29623 cs.bits.hdw.cfiglogpage = 1; 49475125Sjoycey #if defined(__i386) 49485125Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 49496929Smisaki cs.value); 49505125Sjoycey #else 49513859Sml29623 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 49526929Smisaki cs.value); 49535125Sjoycey #endif 49543859Sml29623 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 49556929Smisaki chan, cs.value); 49563859Sml29623 break; 49573859Sml29623 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 49583859Sml29623 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 49593859Sml29623 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 49603859Sml29623 cdfs.value = 0; 49613859Sml29623 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 49623859Sml29623 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 49633859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 49643859Sml29623 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 49653859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 49663859Sml29623 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 49675125Sjoycey #if defined(__i386) 49685125Sjoycey cmn_err(CE_NOTE, 49696929Smisaki "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 49706929Smisaki cdfs.value); 49715125Sjoycey #else 49723859Sml29623 cmn_err(CE_NOTE, 49736929Smisaki "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 49746929Smisaki cdfs.value); 49755125Sjoycey #endif 49766495Sspeer NXGE_REG_WR64(nxgep->npi_handle, 49776495Sspeer RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 49783859Sml29623 break; 49793859Sml29623 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 49803859Sml29623 break; 49815165Syc148097 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 49823859Sml29623 break; 49833859Sml29623 } 49843859Sml29623 } 49856495Sspeer 49866495Sspeer static void 49876495Sspeer nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 49886495Sspeer { 49896495Sspeer rxring_info_t *ring_info; 49906495Sspeer int index; 49916495Sspeer uint32_t chunk_size; 49926495Sspeer uint64_t kaddr; 49936495Sspeer uint_t num_blocks; 49946495Sspeer 49956495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 49966495Sspeer 49976495Sspeer if (rbr_p == NULL) { 49986495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 49996495Sspeer "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 50006495Sspeer return; 50016495Sspeer } 50026495Sspeer 50036495Sspeer if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 50046495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 50056495Sspeer "==> nxge_rxdma_databuf_free: DDI")); 50066495Sspeer return; 50076495Sspeer } 50086495Sspeer 50096495Sspeer ring_info = rbr_p->ring_info; 50106495Sspeer if (ring_info == NULL) { 50116495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 50126495Sspeer "==> nxge_rxdma_databuf_free: NULL ring info")); 50136495Sspeer return; 50146495Sspeer } 50156495Sspeer num_blocks = rbr_p->num_blocks; 50166495Sspeer for (index = 0; index < num_blocks; index++) { 50176495Sspeer kaddr = ring_info->buffer[index].kaddr; 50186495Sspeer chunk_size = ring_info->buffer[index].buf_size; 50196495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 50206495Sspeer "==> nxge_rxdma_databuf_free: free chunk %d " 50216495Sspeer "kaddrp $%p chunk size %d", 50226495Sspeer index, kaddr, chunk_size)); 50236495Sspeer if (kaddr == NULL) continue; 50246495Sspeer nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 50256495Sspeer ring_info->buffer[index].kaddr = NULL; 50266495Sspeer } 50276495Sspeer 50286495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 50296495Sspeer } 50306495Sspeer 50316495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 50326495Sspeer extern void contig_mem_free(void *, size_t); 50336495Sspeer #endif 50346495Sspeer 50356495Sspeer void 50366495Sspeer nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 50376495Sspeer { 50386495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 50396495Sspeer 50406495Sspeer if (kaddr == NULL || !buf_size) { 50416495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 50426495Sspeer "==> nxge_free_buf: invalid kaddr $%p size to free %d", 50436495Sspeer kaddr, buf_size)); 50446495Sspeer return; 50456495Sspeer } 50466495Sspeer 50476495Sspeer switch (alloc_type) { 50486495Sspeer case KMEM_ALLOC: 50496495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 50506495Sspeer "==> nxge_free_buf: freeing kmem $%p size %d", 50516495Sspeer kaddr, buf_size)); 50526495Sspeer #if defined(__i386) 50536495Sspeer KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 50546495Sspeer #else 50556495Sspeer KMEM_FREE((void *)kaddr, buf_size); 50566495Sspeer #endif 50576495Sspeer break; 50586495Sspeer 50596495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 50606495Sspeer case CONTIG_MEM_ALLOC: 50616495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 50626495Sspeer "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 50636495Sspeer kaddr, buf_size)); 50646495Sspeer contig_mem_free((void *)kaddr, buf_size); 50656495Sspeer break; 50666495Sspeer #endif 50676495Sspeer 50686495Sspeer default: 50696495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 50706495Sspeer "<== nxge_free_buf: unsupported alloc type %d", 50716495Sspeer alloc_type)); 50726495Sspeer return; 50736495Sspeer } 50746495Sspeer 50756495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 50766495Sspeer } 5077