13859Sml29623 /* 23859Sml29623 * CDDL HEADER START 33859Sml29623 * 43859Sml29623 * The contents of this file are subject to the terms of the 53859Sml29623 * Common Development and Distribution License (the "License"). 63859Sml29623 * You may not use this file except in compliance with the License. 73859Sml29623 * 83859Sml29623 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 93859Sml29623 * or http://www.opensolaris.org/os/licensing. 103859Sml29623 * See the License for the specific language governing permissions 113859Sml29623 * and limitations under the License. 123859Sml29623 * 133859Sml29623 * When distributing Covered Code, include this CDDL HEADER in each 143859Sml29623 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 153859Sml29623 * If applicable, add the following below this CDDL HEADER, with the 163859Sml29623 * fields enclosed by brackets "[]" replaced with your own identifying 173859Sml29623 * information: Portions Copyright [yyyy] [name of copyright owner] 183859Sml29623 * 193859Sml29623 * CDDL HEADER END 203859Sml29623 */ 213859Sml29623 /* 228661SSantwona.Behera@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 233859Sml29623 * Use is subject to license terms. 243859Sml29623 */ 253859Sml29623 263859Sml29623 #include <sys/nxge/nxge_impl.h> 273859Sml29623 #include <sys/nxge/nxge_rxdma.h> 286495Sspeer #include <sys/nxge/nxge_hio.h> 296495Sspeer 306495Sspeer #if !defined(_BIG_ENDIAN) 316495Sspeer #include <npi_rx_rd32.h> 326495Sspeer #endif 336495Sspeer #include <npi_rx_rd64.h> 346495Sspeer #include <npi_rx_wr64.h> 353859Sml29623 363859Sml29623 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 376495Sspeer (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 383859Sml29623 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 393859Sml29623 (rdc + nxgep->pt_config.hw_config.start_rdc) 403859Sml29623 413859Sml29623 /* 423859Sml29623 * Globals: tunable parameters (/etc/system or adb) 433859Sml29623 * 443859Sml29623 */ 453859Sml29623 extern uint32_t nxge_rbr_size; 463859Sml29623 extern uint32_t nxge_rcr_size; 473859Sml29623 extern uint32_t nxge_rbr_spare_size; 483859Sml29623 493859Sml29623 extern uint32_t nxge_mblks_pending; 503859Sml29623 513859Sml29623 /* 523859Sml29623 * Tunable to reduce the amount of time spent in the 533859Sml29623 * ISR doing Rx Processing. 543859Sml29623 */ 553859Sml29623 extern uint32_t nxge_max_rx_pkts; 563859Sml29623 boolean_t nxge_jumbo_enable; 573859Sml29623 583859Sml29623 /* 593859Sml29623 * Tunables to manage the receive buffer blocks. 603859Sml29623 * 613859Sml29623 * nxge_rx_threshold_hi: copy all buffers. 623859Sml29623 * nxge_rx_bcopy_size_type: receive buffer block size type. 633859Sml29623 * nxge_rx_threshold_lo: copy only up to tunable block size type. 643859Sml29623 */ 653859Sml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 663859Sml29623 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 673859Sml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 683859Sml29623 696611Sml29623 extern uint32_t nxge_cksum_offload; 706495Sspeer 716495Sspeer static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 726495Sspeer static void nxge_unmap_rxdma(p_nxge_t, int); 733859Sml29623 743859Sml29623 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 756495Sspeer 766495Sspeer static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 776495Sspeer static void nxge_rxdma_hw_stop(p_nxge_t, int); 783859Sml29623 793859Sml29623 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 803859Sml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 813859Sml29623 uint32_t, 823859Sml29623 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 833859Sml29623 p_rx_mbox_t *); 843859Sml29623 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 853859Sml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 863859Sml29623 873859Sml29623 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 883859Sml29623 uint16_t, 893859Sml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 903859Sml29623 p_rx_rcr_ring_t *, p_rx_mbox_t *); 913859Sml29623 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 923859Sml29623 p_rx_rcr_ring_t, p_rx_mbox_t); 933859Sml29623 943859Sml29623 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 953859Sml29623 uint16_t, 963859Sml29623 p_nxge_dma_common_t *, 973859Sml29623 p_rx_rbr_ring_t *, uint32_t); 983859Sml29623 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 993859Sml29623 p_rx_rbr_ring_t); 1003859Sml29623 1013859Sml29623 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 1023859Sml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 1033859Sml29623 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 1043859Sml29623 1056495Sspeer static mblk_t * 1066495Sspeer nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 1073859Sml29623 1083859Sml29623 static void nxge_receive_packet(p_nxge_t, 1093859Sml29623 p_rx_rcr_ring_t, 1103859Sml29623 p_rcr_entry_t, 1113859Sml29623 boolean_t *, 1123859Sml29623 mblk_t **, mblk_t **); 1133859Sml29623 1143859Sml29623 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 1153859Sml29623 1163859Sml29623 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 1173859Sml29623 static void nxge_freeb(p_rx_msg_t); 1188275SEric Cheng static mblk_t *nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 1196495Sspeer static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 1203859Sml29623 1213859Sml29623 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 1223859Sml29623 uint32_t, uint32_t); 1233859Sml29623 1243859Sml29623 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 1253859Sml29623 p_rx_rbr_ring_t); 1263859Sml29623 1273859Sml29623 1283859Sml29623 static nxge_status_t 1293859Sml29623 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 1303859Sml29623 1313859Sml29623 nxge_status_t 1323859Sml29623 nxge_rx_port_fatal_err_recover(p_nxge_t); 1333859Sml29623 1346495Sspeer static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 1356495Sspeer 1363859Sml29623 nxge_status_t 1373859Sml29623 nxge_init_rxdma_channels(p_nxge_t nxgep) 1383859Sml29623 { 1397950SMichael.Speer@Sun.COM nxge_grp_set_t *set = &nxgep->rx_set; 1408275SEric Cheng int i, count, channel; 1417950SMichael.Speer@Sun.COM nxge_grp_t *group; 1428275SEric Cheng dc_map_t map; 1438275SEric Cheng int dev_gindex; 1443859Sml29623 1453859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 1463859Sml29623 1476495Sspeer if (!isLDOMguest(nxgep)) { 1486495Sspeer if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 1496495Sspeer cmn_err(CE_NOTE, "hw_start_common"); 1506495Sspeer return (NXGE_ERROR); 1516495Sspeer } 1526495Sspeer } 1536495Sspeer 1546495Sspeer /* 1556495Sspeer * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 1566495Sspeer * We only have 8 hardware RDC tables, but we may have 1576495Sspeer * up to 16 logical (software-defined) groups of RDCS, 1586495Sspeer * if we make use of layer 3 & 4 hardware classification. 1596495Sspeer */ 1606495Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 1616495Sspeer if ((1 << i) & set->lg.map) { 1627950SMichael.Speer@Sun.COM group = set->group[i]; 1638275SEric Cheng dev_gindex = 1648275SEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 1658275SEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map; 1666495Sspeer for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 1678275SEric Cheng if ((1 << channel) & map) { 1686495Sspeer if ((nxge_grp_dc_add(nxgep, 1697755SMisaki.Kataoka@Sun.COM group, VP_BOUND_RX, channel))) 1707950SMichael.Speer@Sun.COM goto init_rxdma_channels_exit; 1716495Sspeer } 1726495Sspeer } 1736495Sspeer } 1746495Sspeer if (++count == set->lg.count) 1756495Sspeer break; 1766495Sspeer } 1776495Sspeer 1786495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 1796495Sspeer return (NXGE_OK); 1807950SMichael.Speer@Sun.COM 1817950SMichael.Speer@Sun.COM init_rxdma_channels_exit: 1827950SMichael.Speer@Sun.COM for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 1837950SMichael.Speer@Sun.COM if ((1 << i) & set->lg.map) { 1847950SMichael.Speer@Sun.COM group = set->group[i]; 1858275SEric Cheng dev_gindex = 1868275SEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 1878275SEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map; 1888275SEric Cheng for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 1898275SEric Cheng if ((1 << channel) & map) { 1907950SMichael.Speer@Sun.COM nxge_grp_dc_remove(nxgep, 1918275SEric Cheng VP_BOUND_RX, channel); 1927950SMichael.Speer@Sun.COM } 1937950SMichael.Speer@Sun.COM } 1947950SMichael.Speer@Sun.COM } 1957950SMichael.Speer@Sun.COM if (++count == set->lg.count) 1967950SMichael.Speer@Sun.COM break; 1977950SMichael.Speer@Sun.COM } 1987950SMichael.Speer@Sun.COM 1997950SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 2007950SMichael.Speer@Sun.COM return (NXGE_ERROR); 2016495Sspeer } 2026495Sspeer 2036495Sspeer nxge_status_t 2046495Sspeer nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 2056495Sspeer { 2068400SNicolas.Droux@Sun.COM nxge_status_t status; 2076495Sspeer 2086495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 2096495Sspeer 2106495Sspeer status = nxge_map_rxdma(nxge, channel); 2113859Sml29623 if (status != NXGE_OK) { 2126495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2136495Sspeer "<== nxge_init_rxdma: status 0x%x", status)); 2143859Sml29623 return (status); 2153859Sml29623 } 2163859Sml29623 2178400SNicolas.Droux@Sun.COM #if defined(sun4v) 2188400SNicolas.Droux@Sun.COM if (isLDOMguest(nxge)) { 2198400SNicolas.Droux@Sun.COM /* set rcr_ring */ 2208400SNicolas.Droux@Sun.COM p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel]; 2218400SNicolas.Droux@Sun.COM 2228400SNicolas.Droux@Sun.COM status = nxge_hio_rxdma_bind_intr(nxge, ring, channel); 2238400SNicolas.Droux@Sun.COM if (status != NXGE_OK) { 2248400SNicolas.Droux@Sun.COM nxge_unmap_rxdma(nxge, channel); 2258400SNicolas.Droux@Sun.COM return (status); 2268400SNicolas.Droux@Sun.COM } 2278400SNicolas.Droux@Sun.COM } 2288400SNicolas.Droux@Sun.COM #endif 2298400SNicolas.Droux@Sun.COM 2306495Sspeer status = nxge_rxdma_hw_start(nxge, channel); 2313859Sml29623 if (status != NXGE_OK) { 2326495Sspeer nxge_unmap_rxdma(nxge, channel); 2333859Sml29623 } 2343859Sml29623 2356495Sspeer if (!nxge->statsp->rdc_ksp[channel]) 2366495Sspeer nxge_setup_rdc_kstats(nxge, channel); 2376495Sspeer 2386495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, 2396495Sspeer "<== nxge_init_rxdma_channel: status 0x%x", status)); 2403859Sml29623 2413859Sml29623 return (status); 2423859Sml29623 } 2433859Sml29623 2443859Sml29623 void 2453859Sml29623 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 2463859Sml29623 { 2476495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 2486495Sspeer int rdc; 2496495Sspeer 2503859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 2513859Sml29623 2526495Sspeer if (set->owned.map == 0) { 2536495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2546495Sspeer "nxge_uninit_rxdma_channels: no channels")); 2556495Sspeer return; 2566495Sspeer } 2576495Sspeer 2586495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 2596495Sspeer if ((1 << rdc) & set->owned.map) { 2606495Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 2616495Sspeer } 2626495Sspeer } 2636495Sspeer 2646495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 2656495Sspeer } 2666495Sspeer 2676495Sspeer void 2686495Sspeer nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 2696495Sspeer { 2706495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 2716495Sspeer 2726495Sspeer if (nxgep->statsp->rdc_ksp[channel]) { 2736495Sspeer kstat_delete(nxgep->statsp->rdc_ksp[channel]); 2746495Sspeer nxgep->statsp->rdc_ksp[channel] = 0; 2756495Sspeer } 2766495Sspeer 2776495Sspeer nxge_rxdma_hw_stop(nxgep, channel); 2786495Sspeer nxge_unmap_rxdma(nxgep, channel); 2796495Sspeer 2806495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 2813859Sml29623 } 2823859Sml29623 2833859Sml29623 nxge_status_t 2843859Sml29623 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 2853859Sml29623 { 2863859Sml29623 npi_handle_t handle; 2873859Sml29623 npi_status_t rs = NPI_SUCCESS; 2883859Sml29623 nxge_status_t status = NXGE_OK; 2893859Sml29623 2907812SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 2913859Sml29623 2923859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2933859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 2943859Sml29623 2953859Sml29623 if (rs != NPI_SUCCESS) { 2963859Sml29623 status = NXGE_ERROR | rs; 2973859Sml29623 } 2983859Sml29623 2997812SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 3007812SMichael.Speer@Sun.COM 3013859Sml29623 return (status); 3023859Sml29623 } 3033859Sml29623 3043859Sml29623 void 3053859Sml29623 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 3063859Sml29623 { 3076495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 3086495Sspeer int rdc; 3093859Sml29623 3103859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 3113859Sml29623 3126495Sspeer if (!isLDOMguest(nxgep)) { 3136495Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 3146495Sspeer (void) npi_rxdma_dump_fzc_regs(handle); 3156495Sspeer } 3166495Sspeer 3176495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 3186495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 3196495Sspeer "nxge_rxdma_regs_dump_channels: " 3206495Sspeer "NULL ring pointer(s)")); 3213859Sml29623 return; 3223859Sml29623 } 3236495Sspeer 3246495Sspeer if (set->owned.map == 0) { 3253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3266495Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 3273859Sml29623 return; 3283859Sml29623 } 3293859Sml29623 3306495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 3316495Sspeer if ((1 << rdc) & set->owned.map) { 3326495Sspeer rx_rbr_ring_t *ring = 3336495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 3346495Sspeer if (ring) { 3356495Sspeer (void) nxge_dump_rxdma_channel(nxgep, rdc); 3366495Sspeer } 3373859Sml29623 } 3383859Sml29623 } 3393859Sml29623 3403859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 3413859Sml29623 } 3423859Sml29623 3433859Sml29623 nxge_status_t 3443859Sml29623 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 3453859Sml29623 { 3463859Sml29623 npi_handle_t handle; 3473859Sml29623 npi_status_t rs = NPI_SUCCESS; 3483859Sml29623 nxge_status_t status = NXGE_OK; 3493859Sml29623 3503859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 3513859Sml29623 3523859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3533859Sml29623 rs = npi_rxdma_dump_rdc_regs(handle, channel); 3543859Sml29623 3553859Sml29623 if (rs != NPI_SUCCESS) { 3563859Sml29623 status = NXGE_ERROR | rs; 3573859Sml29623 } 3583859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 3593859Sml29623 return (status); 3603859Sml29623 } 3613859Sml29623 3623859Sml29623 nxge_status_t 3633859Sml29623 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 3643859Sml29623 p_rx_dma_ent_msk_t mask_p) 3653859Sml29623 { 3663859Sml29623 npi_handle_t handle; 3673859Sml29623 npi_status_t rs = NPI_SUCCESS; 3683859Sml29623 nxge_status_t status = NXGE_OK; 3693859Sml29623 3703859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3716929Smisaki "<== nxge_init_rxdma_channel_event_mask")); 3723859Sml29623 3733859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3743859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 3753859Sml29623 if (rs != NPI_SUCCESS) { 3763859Sml29623 status = NXGE_ERROR | rs; 3773859Sml29623 } 3783859Sml29623 3793859Sml29623 return (status); 3803859Sml29623 } 3813859Sml29623 3823859Sml29623 nxge_status_t 3833859Sml29623 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 3843859Sml29623 p_rx_dma_ctl_stat_t cs_p) 3853859Sml29623 { 3863859Sml29623 npi_handle_t handle; 3873859Sml29623 npi_status_t rs = NPI_SUCCESS; 3883859Sml29623 nxge_status_t status = NXGE_OK; 3893859Sml29623 3903859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3916929Smisaki "<== nxge_init_rxdma_channel_cntl_stat")); 3923859Sml29623 3933859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3943859Sml29623 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 3953859Sml29623 3963859Sml29623 if (rs != NPI_SUCCESS) { 3973859Sml29623 status = NXGE_ERROR | rs; 3983859Sml29623 } 3993859Sml29623 4003859Sml29623 return (status); 4013859Sml29623 } 4023859Sml29623 4036495Sspeer /* 4046495Sspeer * nxge_rxdma_cfg_rdcgrp_default_rdc 4056495Sspeer * 4066495Sspeer * Set the default RDC for an RDC Group (Table) 4076495Sspeer * 4086495Sspeer * Arguments: 4096495Sspeer * nxgep 4106495Sspeer * rdcgrp The group to modify 4116495Sspeer * rdc The new default RDC. 4126495Sspeer * 4136495Sspeer * Notes: 4146495Sspeer * 4156495Sspeer * NPI/NXGE function calls: 4166495Sspeer * npi_rxdma_cfg_rdc_table_default_rdc() 4176495Sspeer * 4186495Sspeer * Registers accessed: 4196495Sspeer * RDC_TBL_REG: FZC_ZCP + 0x10000 4206495Sspeer * 4216495Sspeer * Context: 4226495Sspeer * Service domain 4236495Sspeer */ 4243859Sml29623 nxge_status_t 4256495Sspeer nxge_rxdma_cfg_rdcgrp_default_rdc( 4266495Sspeer p_nxge_t nxgep, 4276495Sspeer uint8_t rdcgrp, 4286495Sspeer uint8_t rdc) 4293859Sml29623 { 4303859Sml29623 npi_handle_t handle; 4313859Sml29623 npi_status_t rs = NPI_SUCCESS; 4323859Sml29623 p_nxge_dma_pt_cfg_t p_dma_cfgp; 4333859Sml29623 p_nxge_rdc_grp_t rdc_grp_p; 4343859Sml29623 uint8_t actual_rdcgrp, actual_rdc; 4353859Sml29623 4363859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4376929Smisaki " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 4383859Sml29623 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 4393859Sml29623 4403859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4413859Sml29623 4426495Sspeer /* 4436495Sspeer * This has to be rewritten. Do we even allow this anymore? 4446495Sspeer */ 4453859Sml29623 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 4466495Sspeer RDC_MAP_IN(rdc_grp_p->map, rdc); 4476495Sspeer rdc_grp_p->def_rdc = rdc; 4483859Sml29623 4493859Sml29623 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 4503859Sml29623 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 4513859Sml29623 4526495Sspeer rs = npi_rxdma_cfg_rdc_table_default_rdc( 4536929Smisaki handle, actual_rdcgrp, actual_rdc); 4543859Sml29623 4553859Sml29623 if (rs != NPI_SUCCESS) { 4563859Sml29623 return (NXGE_ERROR | rs); 4573859Sml29623 } 4583859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4596929Smisaki " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 4603859Sml29623 return (NXGE_OK); 4613859Sml29623 } 4623859Sml29623 4633859Sml29623 nxge_status_t 4643859Sml29623 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 4653859Sml29623 { 4663859Sml29623 npi_handle_t handle; 4673859Sml29623 4683859Sml29623 uint8_t actual_rdc; 4693859Sml29623 npi_status_t rs = NPI_SUCCESS; 4703859Sml29623 4713859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4726929Smisaki " ==> nxge_rxdma_cfg_port_default_rdc")); 4733859Sml29623 4743859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4756495Sspeer actual_rdc = rdc; /* XXX Hack! */ 4763859Sml29623 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 4773859Sml29623 4783859Sml29623 4793859Sml29623 if (rs != NPI_SUCCESS) { 4803859Sml29623 return (NXGE_ERROR | rs); 4813859Sml29623 } 4823859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4836929Smisaki " <== nxge_rxdma_cfg_port_default_rdc")); 4843859Sml29623 4853859Sml29623 return (NXGE_OK); 4863859Sml29623 } 4873859Sml29623 4883859Sml29623 nxge_status_t 4893859Sml29623 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 4903859Sml29623 uint16_t pkts) 4913859Sml29623 { 4923859Sml29623 npi_status_t rs = NPI_SUCCESS; 4933859Sml29623 npi_handle_t handle; 4943859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4956929Smisaki " ==> nxge_rxdma_cfg_rcr_threshold")); 4963859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4973859Sml29623 4983859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 4993859Sml29623 5003859Sml29623 if (rs != NPI_SUCCESS) { 5013859Sml29623 return (NXGE_ERROR | rs); 5023859Sml29623 } 5033859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 5043859Sml29623 return (NXGE_OK); 5053859Sml29623 } 5063859Sml29623 5073859Sml29623 nxge_status_t 5083859Sml29623 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 5093859Sml29623 uint16_t tout, uint8_t enable) 5103859Sml29623 { 5113859Sml29623 npi_status_t rs = NPI_SUCCESS; 5123859Sml29623 npi_handle_t handle; 5133859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 5143859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5153859Sml29623 if (enable == 0) { 5163859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 5173859Sml29623 } else { 5183859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 5196929Smisaki tout); 5203859Sml29623 } 5213859Sml29623 5223859Sml29623 if (rs != NPI_SUCCESS) { 5233859Sml29623 return (NXGE_ERROR | rs); 5243859Sml29623 } 5253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 5263859Sml29623 return (NXGE_OK); 5273859Sml29623 } 5283859Sml29623 5293859Sml29623 nxge_status_t 5303859Sml29623 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 5313859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 5323859Sml29623 { 5333859Sml29623 npi_handle_t handle; 5343859Sml29623 rdc_desc_cfg_t rdc_desc; 5353859Sml29623 p_rcrcfig_b_t cfgb_p; 5363859Sml29623 npi_status_t rs = NPI_SUCCESS; 5373859Sml29623 5383859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 5393859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5403859Sml29623 /* 5413859Sml29623 * Use configuration data composed at init time. 5423859Sml29623 * Write to hardware the receive ring configurations. 5433859Sml29623 */ 5443859Sml29623 rdc_desc.mbox_enable = 1; 5453859Sml29623 rdc_desc.mbox_addr = mbox_p->mbox_addr; 5463859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5476929Smisaki "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 5486929Smisaki mbox_p->mbox_addr, rdc_desc.mbox_addr)); 5493859Sml29623 5503859Sml29623 rdc_desc.rbr_len = rbr_p->rbb_max; 5513859Sml29623 rdc_desc.rbr_addr = rbr_p->rbr_addr; 5523859Sml29623 5533859Sml29623 switch (nxgep->rx_bksize_code) { 5543859Sml29623 case RBR_BKSIZE_4K: 5553859Sml29623 rdc_desc.page_size = SIZE_4KB; 5563859Sml29623 break; 5573859Sml29623 case RBR_BKSIZE_8K: 5583859Sml29623 rdc_desc.page_size = SIZE_8KB; 5593859Sml29623 break; 5603859Sml29623 case RBR_BKSIZE_16K: 5613859Sml29623 rdc_desc.page_size = SIZE_16KB; 5623859Sml29623 break; 5633859Sml29623 case RBR_BKSIZE_32K: 5643859Sml29623 rdc_desc.page_size = SIZE_32KB; 5653859Sml29623 break; 5663859Sml29623 } 5673859Sml29623 5683859Sml29623 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 5693859Sml29623 rdc_desc.valid0 = 1; 5703859Sml29623 5713859Sml29623 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 5723859Sml29623 rdc_desc.valid1 = 1; 5733859Sml29623 5743859Sml29623 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 5753859Sml29623 rdc_desc.valid2 = 1; 5763859Sml29623 5773859Sml29623 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 5783859Sml29623 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 5793859Sml29623 5803859Sml29623 rdc_desc.rcr_len = rcr_p->comp_size; 5813859Sml29623 rdc_desc.rcr_addr = rcr_p->rcr_addr; 5823859Sml29623 5833859Sml29623 cfgb_p = &(rcr_p->rcr_cfgb); 5843859Sml29623 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 5856495Sspeer /* For now, disable this timeout in a guest domain. */ 5866495Sspeer if (isLDOMguest(nxgep)) { 5876495Sspeer rdc_desc.rcr_timeout = 0; 5886495Sspeer rdc_desc.rcr_timeout_enable = 0; 5896495Sspeer } else { 5906495Sspeer rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 5916495Sspeer rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 5926495Sspeer } 5933859Sml29623 5943859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 5956929Smisaki "rbr_len qlen %d pagesize code %d rcr_len %d", 5966929Smisaki rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 5973859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 5986929Smisaki "size 0 %d size 1 %d size 2 %d", 5996929Smisaki rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 6006929Smisaki rbr_p->npi_pkt_buf_size2)); 6013859Sml29623 6023859Sml29623 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 6033859Sml29623 if (rs != NPI_SUCCESS) { 6043859Sml29623 return (NXGE_ERROR | rs); 6053859Sml29623 } 6063859Sml29623 6073859Sml29623 /* 6083859Sml29623 * Enable the timeout and threshold. 6093859Sml29623 */ 6103859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 6116929Smisaki rdc_desc.rcr_threshold); 6123859Sml29623 if (rs != NPI_SUCCESS) { 6133859Sml29623 return (NXGE_ERROR | rs); 6143859Sml29623 } 6153859Sml29623 6163859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 6176929Smisaki rdc_desc.rcr_timeout); 6183859Sml29623 if (rs != NPI_SUCCESS) { 6193859Sml29623 return (NXGE_ERROR | rs); 6203859Sml29623 } 6213859Sml29623 622*9232SMichael.Speer@Sun.COM if (!isLDOMguest(nxgep)) { 623*9232SMichael.Speer@Sun.COM /* Enable the DMA */ 624*9232SMichael.Speer@Sun.COM rs = npi_rxdma_cfg_rdc_enable(handle, channel); 625*9232SMichael.Speer@Sun.COM if (rs != NPI_SUCCESS) { 626*9232SMichael.Speer@Sun.COM return (NXGE_ERROR | rs); 627*9232SMichael.Speer@Sun.COM } 6283859Sml29623 } 6293859Sml29623 6303859Sml29623 /* Kick the DMA engine. */ 6313859Sml29623 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 632*9232SMichael.Speer@Sun.COM 633*9232SMichael.Speer@Sun.COM if (!isLDOMguest(nxgep)) { 634*9232SMichael.Speer@Sun.COM /* Clear the rbr empty bit */ 635*9232SMichael.Speer@Sun.COM (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 636*9232SMichael.Speer@Sun.COM } 6373859Sml29623 6383859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 6393859Sml29623 6403859Sml29623 return (NXGE_OK); 6413859Sml29623 } 6423859Sml29623 6433859Sml29623 nxge_status_t 6443859Sml29623 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 6453859Sml29623 { 6463859Sml29623 npi_handle_t handle; 6473859Sml29623 npi_status_t rs = NPI_SUCCESS; 6483859Sml29623 6493859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 6503859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 6513859Sml29623 6523859Sml29623 /* disable the DMA */ 6533859Sml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 6543859Sml29623 if (rs != NPI_SUCCESS) { 6553859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 6566929Smisaki "<== nxge_disable_rxdma_channel:failed (0x%x)", 6576929Smisaki rs)); 6583859Sml29623 return (NXGE_ERROR | rs); 6593859Sml29623 } 6603859Sml29623 6613859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 6623859Sml29623 return (NXGE_OK); 6633859Sml29623 } 6643859Sml29623 6653859Sml29623 nxge_status_t 6663859Sml29623 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 6673859Sml29623 { 6683859Sml29623 npi_handle_t handle; 6693859Sml29623 nxge_status_t status = NXGE_OK; 6703859Sml29623 6713859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 6726929Smisaki "<== nxge_init_rxdma_channel_rcrflush")); 6733859Sml29623 6743859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 6753859Sml29623 npi_rxdma_rdc_rcr_flush(handle, channel); 6763859Sml29623 6773859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 6786929Smisaki "<== nxge_init_rxdma_channel_rcrflsh")); 6793859Sml29623 return (status); 6803859Sml29623 6813859Sml29623 } 6823859Sml29623 6833859Sml29623 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 6843859Sml29623 6853859Sml29623 #define TO_LEFT -1 6863859Sml29623 #define TO_RIGHT 1 6873859Sml29623 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 6883859Sml29623 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 6893859Sml29623 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 6903859Sml29623 #define NO_HINT 0xffffffff 6913859Sml29623 6923859Sml29623 /*ARGSUSED*/ 6933859Sml29623 nxge_status_t 6943859Sml29623 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 6953859Sml29623 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 6963859Sml29623 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 6973859Sml29623 { 6983859Sml29623 int bufsize; 6993859Sml29623 uint64_t pktbuf_pp; 7003859Sml29623 uint64_t dvma_addr; 7013859Sml29623 rxring_info_t *ring_info; 7023859Sml29623 int base_side, end_side; 7033859Sml29623 int r_index, l_index, anchor_index; 7043859Sml29623 int found, search_done; 7053859Sml29623 uint32_t offset, chunk_size, block_size, page_size_mask; 7063859Sml29623 uint32_t chunk_index, block_index, total_index; 7073859Sml29623 int max_iterations, iteration; 7083859Sml29623 rxbuf_index_info_t *bufinfo; 7093859Sml29623 7103859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 7113859Sml29623 7123859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7136929Smisaki "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 7146929Smisaki pkt_buf_addr_pp, 7156929Smisaki pktbufsz_type)); 7165125Sjoycey #if defined(__i386) 7175125Sjoycey pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 7185125Sjoycey #else 7193859Sml29623 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 7205125Sjoycey #endif 7213859Sml29623 7223859Sml29623 switch (pktbufsz_type) { 7233859Sml29623 case 0: 7243859Sml29623 bufsize = rbr_p->pkt_buf_size0; 7253859Sml29623 break; 7263859Sml29623 case 1: 7273859Sml29623 bufsize = rbr_p->pkt_buf_size1; 7283859Sml29623 break; 7293859Sml29623 case 2: 7303859Sml29623 bufsize = rbr_p->pkt_buf_size2; 7313859Sml29623 break; 7323859Sml29623 case RCR_SINGLE_BLOCK: 7333859Sml29623 bufsize = 0; 7343859Sml29623 anchor_index = 0; 7353859Sml29623 break; 7363859Sml29623 default: 7373859Sml29623 return (NXGE_ERROR); 7383859Sml29623 } 7393859Sml29623 7403859Sml29623 if (rbr_p->num_blocks == 1) { 7413859Sml29623 anchor_index = 0; 7423859Sml29623 ring_info = rbr_p->ring_info; 7433859Sml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 7443859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7456929Smisaki "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 7466929Smisaki "buf_pp $%p btype %d anchor_index %d " 7476929Smisaki "bufinfo $%p", 7486929Smisaki pkt_buf_addr_pp, 7496929Smisaki pktbufsz_type, 7506929Smisaki anchor_index, 7516929Smisaki bufinfo)); 7523859Sml29623 7533859Sml29623 goto found_index; 7543859Sml29623 } 7553859Sml29623 7563859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7576929Smisaki "==> nxge_rxbuf_pp_to_vp: " 7586929Smisaki "buf_pp $%p btype %d anchor_index %d", 7596929Smisaki pkt_buf_addr_pp, 7606929Smisaki pktbufsz_type, 7616929Smisaki anchor_index)); 7623859Sml29623 7633859Sml29623 ring_info = rbr_p->ring_info; 7643859Sml29623 found = B_FALSE; 7653859Sml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 7663859Sml29623 iteration = 0; 7673859Sml29623 max_iterations = ring_info->max_iterations; 7683859Sml29623 /* 7693859Sml29623 * First check if this block has been seen 7703859Sml29623 * recently. This is indicated by a hint which 7713859Sml29623 * is initialized when the first buffer of the block 7723859Sml29623 * is seen. The hint is reset when the last buffer of 7733859Sml29623 * the block has been processed. 7743859Sml29623 * As three block sizes are supported, three hints 7753859Sml29623 * are kept. The idea behind the hints is that once 7763859Sml29623 * the hardware uses a block for a buffer of that 7773859Sml29623 * size, it will use it exclusively for that size 7783859Sml29623 * and will use it until it is exhausted. It is assumed 7793859Sml29623 * that there would a single block being used for the same 7803859Sml29623 * buffer sizes at any given time. 7813859Sml29623 */ 7823859Sml29623 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 7833859Sml29623 anchor_index = ring_info->hint[pktbufsz_type]; 7843859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 7853859Sml29623 chunk_size = bufinfo[anchor_index].buf_size; 7863859Sml29623 if ((pktbuf_pp >= dvma_addr) && 7876929Smisaki (pktbuf_pp < (dvma_addr + chunk_size))) { 7883859Sml29623 found = B_TRUE; 7893859Sml29623 /* 7903859Sml29623 * check if this is the last buffer in the block 7913859Sml29623 * If so, then reset the hint for the size; 7923859Sml29623 */ 7933859Sml29623 7943859Sml29623 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 7953859Sml29623 ring_info->hint[pktbufsz_type] = NO_HINT; 7963859Sml29623 } 7973859Sml29623 } 7983859Sml29623 7993859Sml29623 if (found == B_FALSE) { 8003859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8016929Smisaki "==> nxge_rxbuf_pp_to_vp: (!found)" 8026929Smisaki "buf_pp $%p btype %d anchor_index %d", 8036929Smisaki pkt_buf_addr_pp, 8046929Smisaki pktbufsz_type, 8056929Smisaki anchor_index)); 8063859Sml29623 8073859Sml29623 /* 8083859Sml29623 * This is the first buffer of the block of this 8093859Sml29623 * size. Need to search the whole information 8103859Sml29623 * array. 8113859Sml29623 * the search algorithm uses a binary tree search 8123859Sml29623 * algorithm. It assumes that the information is 8133859Sml29623 * already sorted with increasing order 8143859Sml29623 * info[0] < info[1] < info[2] .... < info[n-1] 8153859Sml29623 * where n is the size of the information array 8163859Sml29623 */ 8173859Sml29623 r_index = rbr_p->num_blocks - 1; 8183859Sml29623 l_index = 0; 8193859Sml29623 search_done = B_FALSE; 8203859Sml29623 anchor_index = MID_INDEX(r_index, l_index); 8213859Sml29623 while (search_done == B_FALSE) { 8223859Sml29623 if ((r_index == l_index) || 8236929Smisaki (iteration >= max_iterations)) 8243859Sml29623 search_done = B_TRUE; 8253859Sml29623 end_side = TO_RIGHT; /* to the right */ 8263859Sml29623 base_side = TO_LEFT; /* to the left */ 8273859Sml29623 /* read the DVMA address information and sort it */ 8283859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 8293859Sml29623 chunk_size = bufinfo[anchor_index].buf_size; 8303859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8316929Smisaki "==> nxge_rxbuf_pp_to_vp: (searching)" 8326929Smisaki "buf_pp $%p btype %d " 8336929Smisaki "anchor_index %d chunk_size %d dvmaaddr $%p", 8346929Smisaki pkt_buf_addr_pp, 8356929Smisaki pktbufsz_type, 8366929Smisaki anchor_index, 8376929Smisaki chunk_size, 8386929Smisaki dvma_addr)); 8393859Sml29623 8403859Sml29623 if (pktbuf_pp >= dvma_addr) 8413859Sml29623 base_side = TO_RIGHT; /* to the right */ 8423859Sml29623 if (pktbuf_pp < (dvma_addr + chunk_size)) 8433859Sml29623 end_side = TO_LEFT; /* to the left */ 8443859Sml29623 8453859Sml29623 switch (base_side + end_side) { 8466929Smisaki case IN_MIDDLE: 8476929Smisaki /* found */ 8486929Smisaki found = B_TRUE; 8496929Smisaki search_done = B_TRUE; 8506929Smisaki if ((pktbuf_pp + bufsize) < 8516929Smisaki (dvma_addr + chunk_size)) 8526929Smisaki ring_info->hint[pktbufsz_type] = 8536929Smisaki bufinfo[anchor_index].buf_index; 8546929Smisaki break; 8556929Smisaki case BOTH_RIGHT: 8566929Smisaki /* not found: go to the right */ 8576929Smisaki l_index = anchor_index + 1; 8586929Smisaki anchor_index = MID_INDEX(r_index, l_index); 8596929Smisaki break; 8606929Smisaki 8616929Smisaki case BOTH_LEFT: 8626929Smisaki /* not found: go to the left */ 8636929Smisaki r_index = anchor_index - 1; 8646929Smisaki anchor_index = MID_INDEX(r_index, l_index); 8656929Smisaki break; 8666929Smisaki default: /* should not come here */ 8676929Smisaki return (NXGE_ERROR); 8683859Sml29623 } 8693859Sml29623 iteration++; 8703859Sml29623 } 8713859Sml29623 8723859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8736929Smisaki "==> nxge_rxbuf_pp_to_vp: (search done)" 8746929Smisaki "buf_pp $%p btype %d anchor_index %d", 8756929Smisaki pkt_buf_addr_pp, 8766929Smisaki pktbufsz_type, 8776929Smisaki anchor_index)); 8783859Sml29623 } 8793859Sml29623 8803859Sml29623 if (found == B_FALSE) { 8813859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8826929Smisaki "==> nxge_rxbuf_pp_to_vp: (search failed)" 8836929Smisaki "buf_pp $%p btype %d anchor_index %d", 8846929Smisaki pkt_buf_addr_pp, 8856929Smisaki pktbufsz_type, 8866929Smisaki anchor_index)); 8873859Sml29623 return (NXGE_ERROR); 8883859Sml29623 } 8893859Sml29623 8903859Sml29623 found_index: 8913859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8926929Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 8936929Smisaki "buf_pp $%p btype %d bufsize %d anchor_index %d", 8946929Smisaki pkt_buf_addr_pp, 8956929Smisaki pktbufsz_type, 8966929Smisaki bufsize, 8976929Smisaki anchor_index)); 8983859Sml29623 8993859Sml29623 /* index of the first block in this chunk */ 9003859Sml29623 chunk_index = bufinfo[anchor_index].start_index; 9013859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 9023859Sml29623 page_size_mask = ring_info->block_size_mask; 9033859Sml29623 9043859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9056929Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 9066929Smisaki "buf_pp $%p btype %d bufsize %d " 9076929Smisaki "anchor_index %d chunk_index %d dvma $%p", 9086929Smisaki pkt_buf_addr_pp, 9096929Smisaki pktbufsz_type, 9106929Smisaki bufsize, 9116929Smisaki anchor_index, 9126929Smisaki chunk_index, 9136929Smisaki dvma_addr)); 9143859Sml29623 9153859Sml29623 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 9163859Sml29623 block_size = rbr_p->block_size; /* System block(page) size */ 9173859Sml29623 9183859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9196929Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 9206929Smisaki "buf_pp $%p btype %d bufsize %d " 9216929Smisaki "anchor_index %d chunk_index %d dvma $%p " 9226929Smisaki "offset %d block_size %d", 9236929Smisaki pkt_buf_addr_pp, 9246929Smisaki pktbufsz_type, 9256929Smisaki bufsize, 9266929Smisaki anchor_index, 9276929Smisaki chunk_index, 9286929Smisaki dvma_addr, 9296929Smisaki offset, 9306929Smisaki block_size)); 9313859Sml29623 9323859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 9333859Sml29623 9343859Sml29623 block_index = (offset / block_size); /* index within chunk */ 9353859Sml29623 total_index = chunk_index + block_index; 9363859Sml29623 9373859Sml29623 9383859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9396929Smisaki "==> nxge_rxbuf_pp_to_vp: " 9406929Smisaki "total_index %d dvma_addr $%p " 9416929Smisaki "offset %d block_size %d " 9426929Smisaki "block_index %d ", 9436929Smisaki total_index, dvma_addr, 9446929Smisaki offset, block_size, 9456929Smisaki block_index)); 9465125Sjoycey #if defined(__i386) 9475125Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 9486929Smisaki (uint32_t)offset); 9495125Sjoycey #else 9505125Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 9516929Smisaki (uint64_t)offset); 9525125Sjoycey #endif 9533859Sml29623 9543859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9556929Smisaki "==> nxge_rxbuf_pp_to_vp: " 9566929Smisaki "total_index %d dvma_addr $%p " 9576929Smisaki "offset %d block_size %d " 9586929Smisaki "block_index %d " 9596929Smisaki "*pkt_buf_addr_p $%p", 9606929Smisaki total_index, dvma_addr, 9616929Smisaki offset, block_size, 9626929Smisaki block_index, 9636929Smisaki *pkt_buf_addr_p)); 9643859Sml29623 9653859Sml29623 9663859Sml29623 *msg_index = total_index; 9673859Sml29623 *bufoffset = (offset & page_size_mask); 9683859Sml29623 9693859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9706929Smisaki "==> nxge_rxbuf_pp_to_vp: get msg index: " 9716929Smisaki "msg_index %d bufoffset_index %d", 9726929Smisaki *msg_index, 9736929Smisaki *bufoffset)); 9743859Sml29623 9753859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 9763859Sml29623 9773859Sml29623 return (NXGE_OK); 9783859Sml29623 } 9793859Sml29623 9803859Sml29623 /* 9813859Sml29623 * used by quick sort (qsort) function 9823859Sml29623 * to perform comparison 9833859Sml29623 */ 9843859Sml29623 static int 9853859Sml29623 nxge_sort_compare(const void *p1, const void *p2) 9863859Sml29623 { 9873859Sml29623 9883859Sml29623 rxbuf_index_info_t *a, *b; 9893859Sml29623 9903859Sml29623 a = (rxbuf_index_info_t *)p1; 9913859Sml29623 b = (rxbuf_index_info_t *)p2; 9923859Sml29623 9933859Sml29623 if (a->dvma_addr > b->dvma_addr) 9943859Sml29623 return (1); 9953859Sml29623 if (a->dvma_addr < b->dvma_addr) 9963859Sml29623 return (-1); 9973859Sml29623 return (0); 9983859Sml29623 } 9993859Sml29623 10003859Sml29623 10013859Sml29623 10023859Sml29623 /* 10033859Sml29623 * grabbed this sort implementation from common/syscall/avl.c 10043859Sml29623 * 10053859Sml29623 */ 10063859Sml29623 /* 10073859Sml29623 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 10083859Sml29623 * v = Ptr to array/vector of objs 10093859Sml29623 * n = # objs in the array 10103859Sml29623 * s = size of each obj (must be multiples of a word size) 10113859Sml29623 * f = ptr to function to compare two objs 10123859Sml29623 * returns (-1 = less than, 0 = equal, 1 = greater than 10133859Sml29623 */ 10143859Sml29623 void 10153859Sml29623 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 10163859Sml29623 { 10173859Sml29623 int g, i, j, ii; 10183859Sml29623 unsigned int *p1, *p2; 10193859Sml29623 unsigned int tmp; 10203859Sml29623 10213859Sml29623 /* No work to do */ 10223859Sml29623 if (v == NULL || n <= 1) 10233859Sml29623 return; 10243859Sml29623 /* Sanity check on arguments */ 10253859Sml29623 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 10263859Sml29623 ASSERT(s > 0); 10273859Sml29623 10283859Sml29623 for (g = n / 2; g > 0; g /= 2) { 10293859Sml29623 for (i = g; i < n; i++) { 10303859Sml29623 for (j = i - g; j >= 0 && 10316929Smisaki (*f)(v + j * s, v + (j + g) * s) == 1; 10326929Smisaki j -= g) { 10333859Sml29623 p1 = (unsigned *)(v + j * s); 10343859Sml29623 p2 = (unsigned *)(v + (j + g) * s); 10353859Sml29623 for (ii = 0; ii < s / 4; ii++) { 10363859Sml29623 tmp = *p1; 10373859Sml29623 *p1++ = *p2; 10383859Sml29623 *p2++ = tmp; 10393859Sml29623 } 10403859Sml29623 } 10413859Sml29623 } 10423859Sml29623 } 10433859Sml29623 } 10443859Sml29623 10453859Sml29623 /* 10463859Sml29623 * Initialize data structures required for rxdma 10473859Sml29623 * buffer dvma->vmem address lookup 10483859Sml29623 */ 10493859Sml29623 /*ARGSUSED*/ 10503859Sml29623 static nxge_status_t 10513859Sml29623 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 10523859Sml29623 { 10533859Sml29623 10543859Sml29623 int index; 10553859Sml29623 rxring_info_t *ring_info; 10563859Sml29623 int max_iteration = 0, max_index = 0; 10573859Sml29623 10583859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 10593859Sml29623 10603859Sml29623 ring_info = rbrp->ring_info; 10613859Sml29623 ring_info->hint[0] = NO_HINT; 10623859Sml29623 ring_info->hint[1] = NO_HINT; 10633859Sml29623 ring_info->hint[2] = NO_HINT; 10643859Sml29623 max_index = rbrp->num_blocks; 10653859Sml29623 10663859Sml29623 /* read the DVMA address information and sort it */ 10673859Sml29623 /* do init of the information array */ 10683859Sml29623 10693859Sml29623 10703859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10716929Smisaki " nxge_rxbuf_index_info_init Sort ptrs")); 10723859Sml29623 10733859Sml29623 /* sort the array */ 10743859Sml29623 nxge_ksort((void *)ring_info->buffer, max_index, 10756929Smisaki sizeof (rxbuf_index_info_t), nxge_sort_compare); 10763859Sml29623 10773859Sml29623 10783859Sml29623 10793859Sml29623 for (index = 0; index < max_index; index++) { 10803859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10816929Smisaki " nxge_rxbuf_index_info_init: sorted chunk %d " 10826929Smisaki " ioaddr $%p kaddr $%p size %x", 10836929Smisaki index, ring_info->buffer[index].dvma_addr, 10846929Smisaki ring_info->buffer[index].kaddr, 10856929Smisaki ring_info->buffer[index].buf_size)); 10863859Sml29623 } 10873859Sml29623 10883859Sml29623 max_iteration = 0; 10893859Sml29623 while (max_index >= (1ULL << max_iteration)) 10903859Sml29623 max_iteration++; 10913859Sml29623 ring_info->max_iterations = max_iteration + 1; 10923859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10936929Smisaki " nxge_rxbuf_index_info_init Find max iter %d", 10946929Smisaki ring_info->max_iterations)); 10953859Sml29623 10963859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 10973859Sml29623 return (NXGE_OK); 10983859Sml29623 } 10993859Sml29623 11003859Sml29623 /* ARGSUSED */ 11013859Sml29623 void 11023859Sml29623 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 11033859Sml29623 { 11043859Sml29623 #ifdef NXGE_DEBUG 11053859Sml29623 11063859Sml29623 uint32_t bptr; 11073859Sml29623 uint64_t pp; 11083859Sml29623 11093859Sml29623 bptr = entry_p->bits.hdw.pkt_buf_addr; 11103859Sml29623 11113859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11126929Smisaki "\trcr entry $%p " 11136929Smisaki "\trcr entry 0x%0llx " 11146929Smisaki "\trcr entry 0x%08x " 11156929Smisaki "\trcr entry 0x%08x " 11166929Smisaki "\tvalue 0x%0llx\n" 11176929Smisaki "\tmulti = %d\n" 11186929Smisaki "\tpkt_type = 0x%x\n" 11196929Smisaki "\tzero_copy = %d\n" 11206929Smisaki "\tnoport = %d\n" 11216929Smisaki "\tpromis = %d\n" 11226929Smisaki "\terror = 0x%04x\n" 11236929Smisaki "\tdcf_err = 0x%01x\n" 11246929Smisaki "\tl2_len = %d\n" 11256929Smisaki "\tpktbufsize = %d\n" 11266929Smisaki "\tpkt_buf_addr = $%p\n" 11276929Smisaki "\tpkt_buf_addr (<< 6) = $%p\n", 11286929Smisaki entry_p, 11296929Smisaki *(int64_t *)entry_p, 11306929Smisaki *(int32_t *)entry_p, 11316929Smisaki *(int32_t *)((char *)entry_p + 32), 11326929Smisaki entry_p->value, 11336929Smisaki entry_p->bits.hdw.multi, 11346929Smisaki entry_p->bits.hdw.pkt_type, 11356929Smisaki entry_p->bits.hdw.zero_copy, 11366929Smisaki entry_p->bits.hdw.noport, 11376929Smisaki entry_p->bits.hdw.promis, 11386929Smisaki entry_p->bits.hdw.error, 11396929Smisaki entry_p->bits.hdw.dcf_err, 11406929Smisaki entry_p->bits.hdw.l2_len, 11416929Smisaki entry_p->bits.hdw.pktbufsz, 11426929Smisaki bptr, 11436929Smisaki entry_p->bits.ldw.pkt_buf_addr)); 11443859Sml29623 11453859Sml29623 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 11466929Smisaki RCR_PKT_BUF_ADDR_SHIFT; 11473859Sml29623 11483859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 11496929Smisaki pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 11503859Sml29623 #endif 11513859Sml29623 } 11523859Sml29623 11533859Sml29623 void 11543859Sml29623 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 11553859Sml29623 { 11563859Sml29623 npi_handle_t handle; 11573859Sml29623 rbr_stat_t rbr_stat; 11583859Sml29623 addr44_t hd_addr; 11593859Sml29623 addr44_t tail_addr; 11603859Sml29623 uint16_t qlen; 11613859Sml29623 11623859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11636929Smisaki "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 11643859Sml29623 11653859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 11663859Sml29623 11673859Sml29623 /* RBR head */ 11683859Sml29623 hd_addr.addr = 0; 11693859Sml29623 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 11705165Syc148097 #if defined(__i386) 11713859Sml29623 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 11726929Smisaki (void *)(uint32_t)hd_addr.addr); 11735125Sjoycey #else 11745165Syc148097 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 11756929Smisaki (void *)hd_addr.addr); 11765125Sjoycey #endif 11773859Sml29623 11783859Sml29623 /* RBR stats */ 11793859Sml29623 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 11803859Sml29623 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 11813859Sml29623 11823859Sml29623 /* RCR tail */ 11833859Sml29623 tail_addr.addr = 0; 11843859Sml29623 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 11855165Syc148097 #if defined(__i386) 11863859Sml29623 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 11876929Smisaki (void *)(uint32_t)tail_addr.addr); 11885125Sjoycey #else 11895165Syc148097 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 11906929Smisaki (void *)tail_addr.addr); 11915125Sjoycey #endif 11923859Sml29623 11933859Sml29623 /* RCR qlen */ 11943859Sml29623 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 11953859Sml29623 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 11963859Sml29623 11973859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11986929Smisaki "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 11993859Sml29623 } 12003859Sml29623 12013859Sml29623 nxge_status_t 12023859Sml29623 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 12033859Sml29623 { 12046495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 12056495Sspeer nxge_status_t status; 12066495Sspeer npi_status_t rs; 12076495Sspeer int rdc; 12083859Sml29623 12093859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12106929Smisaki "==> nxge_rxdma_hw_mode: mode %d", enable)); 12113859Sml29623 12123859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 12133859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12146495Sspeer "<== nxge_rxdma_mode: not initialized")); 12153859Sml29623 return (NXGE_ERROR); 12163859Sml29623 } 12176495Sspeer 12186495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 12196495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 12206495Sspeer "<== nxge_tx_port_fatal_err_recover: " 12216495Sspeer "NULL ring pointer(s)")); 12223859Sml29623 return (NXGE_ERROR); 12233859Sml29623 } 12243859Sml29623 12256495Sspeer if (set->owned.map == 0) { 12266495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 12276495Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 12286495Sspeer return (NULL); 12296495Sspeer } 12306495Sspeer 12316495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 12326495Sspeer if ((1 << rdc) & set->owned.map) { 12336495Sspeer rx_rbr_ring_t *ring = 12346495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 12356495Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 12366495Sspeer if (ring) { 12376495Sspeer if (enable) { 12386495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12396495Sspeer "==> nxge_rxdma_hw_mode: " 12406495Sspeer "channel %d (enable)", rdc)); 12416495Sspeer rs = npi_rxdma_cfg_rdc_enable 12426495Sspeer (handle, rdc); 12436495Sspeer } else { 12446495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12456495Sspeer "==> nxge_rxdma_hw_mode: " 12466495Sspeer "channel %d disable)", rdc)); 12476495Sspeer rs = npi_rxdma_cfg_rdc_disable 12486495Sspeer (handle, rdc); 12496495Sspeer } 12506495Sspeer } 12513859Sml29623 } 12523859Sml29623 } 12533859Sml29623 12543859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 12553859Sml29623 12563859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12576929Smisaki "<== nxge_rxdma_hw_mode: status 0x%x", status)); 12583859Sml29623 12593859Sml29623 return (status); 12603859Sml29623 } 12613859Sml29623 12623859Sml29623 void 12633859Sml29623 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 12643859Sml29623 { 12653859Sml29623 npi_handle_t handle; 12663859Sml29623 12673859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 12686929Smisaki "==> nxge_rxdma_enable_channel: channel %d", channel)); 12693859Sml29623 12703859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 12713859Sml29623 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 12723859Sml29623 12733859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 12743859Sml29623 } 12753859Sml29623 12763859Sml29623 void 12773859Sml29623 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 12783859Sml29623 { 12793859Sml29623 npi_handle_t handle; 12803859Sml29623 12813859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 12826929Smisaki "==> nxge_rxdma_disable_channel: channel %d", channel)); 12833859Sml29623 12843859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 12853859Sml29623 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 12863859Sml29623 12873859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 12883859Sml29623 } 12893859Sml29623 12903859Sml29623 void 12913859Sml29623 nxge_hw_start_rx(p_nxge_t nxgep) 12923859Sml29623 { 12933859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 12943859Sml29623 12953859Sml29623 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 12963859Sml29623 (void) nxge_rx_mac_enable(nxgep); 12973859Sml29623 12983859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 12993859Sml29623 } 13003859Sml29623 13013859Sml29623 /*ARGSUSED*/ 13023859Sml29623 void 13033859Sml29623 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 13043859Sml29623 { 13056495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 13066495Sspeer int rdc; 13073859Sml29623 13083859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 13093859Sml29623 13106495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 13116495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 13126495Sspeer "<== nxge_tx_port_fatal_err_recover: " 13136495Sspeer "NULL ring pointer(s)")); 13143859Sml29623 return; 13153859Sml29623 } 13163859Sml29623 13176495Sspeer if (set->owned.map == 0) { 13183859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13196495Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 13203859Sml29623 return; 13213859Sml29623 } 13226495Sspeer 13236495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 13246495Sspeer if ((1 << rdc) & set->owned.map) { 13256495Sspeer rx_rbr_ring_t *ring = 13266495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 13276495Sspeer if (ring) { 13286495Sspeer nxge_rxdma_hw_stop(nxgep, rdc); 13296495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 13306929Smisaki "==> nxge_fixup_rxdma_rings: " 13316929Smisaki "channel %d ring $%px", 13326929Smisaki rdc, ring)); 13336495Sspeer (void) nxge_rxdma_fixup_channel 13346495Sspeer (nxgep, rdc, rdc); 13356495Sspeer } 13366495Sspeer } 13373859Sml29623 } 13383859Sml29623 13393859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 13403859Sml29623 } 13413859Sml29623 13423859Sml29623 void 13433859Sml29623 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 13443859Sml29623 { 13453859Sml29623 int i; 13463859Sml29623 13473859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 13483859Sml29623 i = nxge_rxdma_get_ring_index(nxgep, channel); 13493859Sml29623 if (i < 0) { 13503859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13516929Smisaki "<== nxge_rxdma_fix_channel: no entry found")); 13523859Sml29623 return; 13533859Sml29623 } 13543859Sml29623 13553859Sml29623 nxge_rxdma_fixup_channel(nxgep, channel, i); 13563859Sml29623 13576495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 13583859Sml29623 } 13593859Sml29623 13603859Sml29623 void 13613859Sml29623 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 13623859Sml29623 { 13633859Sml29623 int ndmas; 13643859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 13653859Sml29623 p_rx_rbr_ring_t *rbr_rings; 13663859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 13673859Sml29623 p_rx_rcr_ring_t *rcr_rings; 13683859Sml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 13693859Sml29623 p_rx_mbox_t *rx_mbox_p; 13703859Sml29623 p_nxge_dma_pool_t dma_buf_poolp; 13713859Sml29623 p_nxge_dma_pool_t dma_cntl_poolp; 13723859Sml29623 p_rx_rbr_ring_t rbrp; 13733859Sml29623 p_rx_rcr_ring_t rcrp; 13743859Sml29623 p_rx_mbox_t mboxp; 13753859Sml29623 p_nxge_dma_common_t dmap; 13763859Sml29623 nxge_status_t status = NXGE_OK; 13773859Sml29623 13783859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 13793859Sml29623 13803859Sml29623 (void) nxge_rxdma_stop_channel(nxgep, channel); 13813859Sml29623 13823859Sml29623 dma_buf_poolp = nxgep->rx_buf_pool_p; 13833859Sml29623 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 13843859Sml29623 13853859Sml29623 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 13863859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 13876929Smisaki "<== nxge_rxdma_fixup_channel: buf not allocated")); 13883859Sml29623 return; 13893859Sml29623 } 13903859Sml29623 13913859Sml29623 ndmas = dma_buf_poolp->ndmas; 13923859Sml29623 if (!ndmas) { 13933859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 13946929Smisaki "<== nxge_rxdma_fixup_channel: no dma allocated")); 13953859Sml29623 return; 13963859Sml29623 } 13973859Sml29623 13983859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 13993859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 14003859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 14013859Sml29623 rcr_rings = rx_rcr_rings->rcr_rings; 14023859Sml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 14033859Sml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 14043859Sml29623 14053859Sml29623 /* Reinitialize the receive block and completion rings */ 14063859Sml29623 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 14076929Smisaki rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 14086929Smisaki mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 14093859Sml29623 14103859Sml29623 14113859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 14123859Sml29623 rbrp->rbr_rd_index = 0; 14133859Sml29623 rcrp->comp_rd_index = 0; 14143859Sml29623 rcrp->comp_wt_index = 0; 14153859Sml29623 14163859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 14173859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 14183859Sml29623 14193859Sml29623 status = nxge_rxdma_start_channel(nxgep, channel, 14206929Smisaki rbrp, rcrp, mboxp); 14213859Sml29623 if (status != NXGE_OK) { 14223859Sml29623 goto nxge_rxdma_fixup_channel_fail; 14233859Sml29623 } 14243859Sml29623 if (status != NXGE_OK) { 14253859Sml29623 goto nxge_rxdma_fixup_channel_fail; 14263859Sml29623 } 14273859Sml29623 14283859Sml29623 nxge_rxdma_fixup_channel_fail: 14293859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14306929Smisaki "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 14313859Sml29623 14323859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 14333859Sml29623 } 14343859Sml29623 14358275SEric Cheng /* 14368275SEric Cheng * Convert an absolute RDC number to a Receive Buffer Ring index. That is, 14378275SEric Cheng * map <channel> to an index into nxgep->rx_rbr_rings. 14388275SEric Cheng * (device ring index -> port ring index) 14398275SEric Cheng */ 14403859Sml29623 int 14413859Sml29623 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 14423859Sml29623 { 14438275SEric Cheng int i, ndmas; 14448275SEric Cheng uint16_t rdc; 14458275SEric Cheng p_rx_rbr_rings_t rx_rbr_rings; 14468275SEric Cheng p_rx_rbr_ring_t *rbr_rings; 14478275SEric Cheng 14488275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14498275SEric Cheng "==> nxge_rxdma_get_ring_index: channel %d", channel)); 14508275SEric Cheng 14518275SEric Cheng rx_rbr_rings = nxgep->rx_rbr_rings; 14528275SEric Cheng if (rx_rbr_rings == NULL) { 14538275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14548275SEric Cheng "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 14558275SEric Cheng return (-1); 14568275SEric Cheng } 14578275SEric Cheng ndmas = rx_rbr_rings->ndmas; 14588275SEric Cheng if (!ndmas) { 14598275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14608275SEric Cheng "<== nxge_rxdma_get_ring_index: no channel")); 14618275SEric Cheng return (-1); 14628275SEric Cheng } 14638275SEric Cheng 14648275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14658275SEric Cheng "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 14668275SEric Cheng 14678275SEric Cheng rbr_rings = rx_rbr_rings->rbr_rings; 14688275SEric Cheng for (i = 0; i < ndmas; i++) { 14698275SEric Cheng rdc = rbr_rings[i]->rdc; 14708275SEric Cheng if (channel == rdc) { 14718275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14728275SEric Cheng "==> nxge_rxdma_get_rbr_ring: channel %d " 14738275SEric Cheng "(index %d) ring %d", channel, i, rbr_rings[i])); 14748275SEric Cheng return (i); 14758275SEric Cheng } 14768275SEric Cheng } 14778275SEric Cheng 14788275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14798275SEric Cheng "<== nxge_rxdma_get_rbr_ring_index: not found")); 14808275SEric Cheng 14818275SEric Cheng return (-1); 14823859Sml29623 } 14833859Sml29623 14843859Sml29623 p_rx_rbr_ring_t 14853859Sml29623 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 14863859Sml29623 { 14876495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 14886495Sspeer nxge_channel_t rdc; 14893859Sml29623 14903859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14916929Smisaki "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 14923859Sml29623 14936495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 14946495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 14956495Sspeer "<== nxge_rxdma_get_rbr_ring: " 14966495Sspeer "NULL ring pointer(s)")); 14973859Sml29623 return (NULL); 14983859Sml29623 } 14996495Sspeer 15006495Sspeer if (set->owned.map == 0) { 15013859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15026495Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 15033859Sml29623 return (NULL); 15043859Sml29623 } 15053859Sml29623 15066495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 15076495Sspeer if ((1 << rdc) & set->owned.map) { 15086495Sspeer rx_rbr_ring_t *ring = 15096495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 15106495Sspeer if (ring) { 15116495Sspeer if (channel == ring->rdc) { 15126495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 15136495Sspeer "==> nxge_rxdma_get_rbr_ring: " 15146495Sspeer "channel %d ring $%p", rdc, ring)); 15156495Sspeer return (ring); 15166495Sspeer } 15176495Sspeer } 15183859Sml29623 } 15193859Sml29623 } 15203859Sml29623 15213859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15226929Smisaki "<== nxge_rxdma_get_rbr_ring: not found")); 15233859Sml29623 15243859Sml29623 return (NULL); 15253859Sml29623 } 15263859Sml29623 15273859Sml29623 p_rx_rcr_ring_t 15283859Sml29623 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 15293859Sml29623 { 15306495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 15316495Sspeer nxge_channel_t rdc; 15323859Sml29623 15333859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15346929Smisaki "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 15353859Sml29623 15366495Sspeer if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 15376495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 15386495Sspeer "<== nxge_rxdma_get_rcr_ring: " 15396495Sspeer "NULL ring pointer(s)")); 15403859Sml29623 return (NULL); 15413859Sml29623 } 15426495Sspeer 15436495Sspeer if (set->owned.map == 0) { 15443859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15456495Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 15463859Sml29623 return (NULL); 15473859Sml29623 } 15483859Sml29623 15496495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 15506495Sspeer if ((1 << rdc) & set->owned.map) { 15516495Sspeer rx_rcr_ring_t *ring = 15526495Sspeer nxgep->rx_rcr_rings->rcr_rings[rdc]; 15536495Sspeer if (ring) { 15546495Sspeer if (channel == ring->rdc) { 15556495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 15566495Sspeer "==> nxge_rxdma_get_rcr_ring: " 15576495Sspeer "channel %d ring $%p", rdc, ring)); 15586495Sspeer return (ring); 15596495Sspeer } 15606495Sspeer } 15613859Sml29623 } 15623859Sml29623 } 15633859Sml29623 15643859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15656929Smisaki "<== nxge_rxdma_get_rcr_ring: not found")); 15663859Sml29623 15673859Sml29623 return (NULL); 15683859Sml29623 } 15693859Sml29623 15703859Sml29623 /* 15713859Sml29623 * Static functions start here. 15723859Sml29623 */ 15733859Sml29623 static p_rx_msg_t 15743859Sml29623 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 15753859Sml29623 { 15763859Sml29623 p_rx_msg_t nxge_mp = NULL; 15773859Sml29623 p_nxge_dma_common_t dmamsg_p; 15783859Sml29623 uchar_t *buffer; 15793859Sml29623 15803859Sml29623 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 15813859Sml29623 if (nxge_mp == NULL) { 15824185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 15836929Smisaki "Allocation of a rx msg failed.")); 15843859Sml29623 goto nxge_allocb_exit; 15853859Sml29623 } 15863859Sml29623 15873859Sml29623 nxge_mp->use_buf_pool = B_FALSE; 15883859Sml29623 if (dmabuf_p) { 15893859Sml29623 nxge_mp->use_buf_pool = B_TRUE; 15903859Sml29623 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 15913859Sml29623 *dmamsg_p = *dmabuf_p; 15923859Sml29623 dmamsg_p->nblocks = 1; 15933859Sml29623 dmamsg_p->block_size = size; 15943859Sml29623 dmamsg_p->alength = size; 15953859Sml29623 buffer = (uchar_t *)dmabuf_p->kaddrp; 15963859Sml29623 15973859Sml29623 dmabuf_p->kaddrp = (void *) 15986929Smisaki ((char *)dmabuf_p->kaddrp + size); 15993859Sml29623 dmabuf_p->ioaddr_pp = (void *) 16006929Smisaki ((char *)dmabuf_p->ioaddr_pp + size); 16013859Sml29623 dmabuf_p->alength -= size; 16023859Sml29623 dmabuf_p->offset += size; 16033859Sml29623 dmabuf_p->dma_cookie.dmac_laddress += size; 16043859Sml29623 dmabuf_p->dma_cookie.dmac_size -= size; 16053859Sml29623 16063859Sml29623 } else { 16073859Sml29623 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 16083859Sml29623 if (buffer == NULL) { 16094185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 16106929Smisaki "Allocation of a receive page failed.")); 16113859Sml29623 goto nxge_allocb_fail1; 16123859Sml29623 } 16133859Sml29623 } 16143859Sml29623 16153859Sml29623 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 16163859Sml29623 if (nxge_mp->rx_mblk_p == NULL) { 16174185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 16183859Sml29623 goto nxge_allocb_fail2; 16193859Sml29623 } 16203859Sml29623 16213859Sml29623 nxge_mp->buffer = buffer; 16223859Sml29623 nxge_mp->block_size = size; 16233859Sml29623 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 16243859Sml29623 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 16253859Sml29623 nxge_mp->ref_cnt = 1; 16263859Sml29623 nxge_mp->free = B_TRUE; 16273859Sml29623 nxge_mp->rx_use_bcopy = B_FALSE; 16283859Sml29623 16293859Sml29623 atomic_inc_32(&nxge_mblks_pending); 16303859Sml29623 16313859Sml29623 goto nxge_allocb_exit; 16323859Sml29623 16333859Sml29623 nxge_allocb_fail2: 16343859Sml29623 if (!nxge_mp->use_buf_pool) { 16353859Sml29623 KMEM_FREE(buffer, size); 16363859Sml29623 } 16373859Sml29623 16383859Sml29623 nxge_allocb_fail1: 16393859Sml29623 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 16403859Sml29623 nxge_mp = NULL; 16413859Sml29623 16423859Sml29623 nxge_allocb_exit: 16433859Sml29623 return (nxge_mp); 16443859Sml29623 } 16453859Sml29623 16463859Sml29623 p_mblk_t 16473859Sml29623 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 16483859Sml29623 { 16493859Sml29623 p_mblk_t mp; 16503859Sml29623 16513859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 16523859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 16536929Smisaki "offset = 0x%08X " 16546929Smisaki "size = 0x%08X", 16556929Smisaki nxge_mp, offset, size)); 16563859Sml29623 16573859Sml29623 mp = desballoc(&nxge_mp->buffer[offset], size, 16586929Smisaki 0, &nxge_mp->freeb); 16593859Sml29623 if (mp == NULL) { 16603859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 16613859Sml29623 goto nxge_dupb_exit; 16623859Sml29623 } 16633859Sml29623 atomic_inc_32(&nxge_mp->ref_cnt); 16643859Sml29623 16653859Sml29623 16663859Sml29623 nxge_dupb_exit: 16673859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 16686929Smisaki nxge_mp)); 16693859Sml29623 return (mp); 16703859Sml29623 } 16713859Sml29623 16723859Sml29623 p_mblk_t 16733859Sml29623 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 16743859Sml29623 { 16753859Sml29623 p_mblk_t mp; 16763859Sml29623 uchar_t *dp; 16773859Sml29623 16783859Sml29623 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 16793859Sml29623 if (mp == NULL) { 16803859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 16813859Sml29623 goto nxge_dupb_bcopy_exit; 16823859Sml29623 } 16833859Sml29623 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 16843859Sml29623 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 16853859Sml29623 mp->b_wptr = dp + size; 16863859Sml29623 16873859Sml29623 nxge_dupb_bcopy_exit: 16883859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 16896929Smisaki nxge_mp)); 16903859Sml29623 return (mp); 16913859Sml29623 } 16923859Sml29623 16933859Sml29623 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 16943859Sml29623 p_rx_msg_t rx_msg_p); 16953859Sml29623 16963859Sml29623 void 16973859Sml29623 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 16983859Sml29623 { 16993859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 17003859Sml29623 17013859Sml29623 /* Reuse this buffer */ 17023859Sml29623 rx_msg_p->free = B_FALSE; 17033859Sml29623 rx_msg_p->cur_usage_cnt = 0; 17043859Sml29623 rx_msg_p->max_usage_cnt = 0; 17053859Sml29623 rx_msg_p->pkt_buf_size = 0; 17063859Sml29623 17073859Sml29623 if (rx_rbr_p->rbr_use_bcopy) { 17083859Sml29623 rx_msg_p->rx_use_bcopy = B_FALSE; 17093859Sml29623 atomic_dec_32(&rx_rbr_p->rbr_consumed); 17103859Sml29623 } 17113859Sml29623 17123859Sml29623 /* 17133859Sml29623 * Get the rbr header pointer and its offset index. 17143859Sml29623 */ 17153859Sml29623 MUTEX_ENTER(&rx_rbr_p->post_lock); 17163859Sml29623 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 17176929Smisaki rx_rbr_p->rbr_wrap_mask); 17183859Sml29623 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 17193859Sml29623 MUTEX_EXIT(&rx_rbr_p->post_lock); 17205770Sml29623 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 17215770Sml29623 rx_rbr_p->rdc, 1); 17223859Sml29623 17233859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 17246929Smisaki "<== nxge_post_page (channel %d post_next_index %d)", 17256929Smisaki rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 17263859Sml29623 17273859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 17283859Sml29623 } 17293859Sml29623 17303859Sml29623 void 17313859Sml29623 nxge_freeb(p_rx_msg_t rx_msg_p) 17323859Sml29623 { 17333859Sml29623 size_t size; 17343859Sml29623 uchar_t *buffer = NULL; 17353859Sml29623 int ref_cnt; 17364874Sml29623 boolean_t free_state = B_FALSE; 17373859Sml29623 17385170Stm144005 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 17395170Stm144005 17403859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 17413859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 17426929Smisaki "nxge_freeb:rx_msg_p = $%p (block pending %d)", 17436929Smisaki rx_msg_p, nxge_mblks_pending)); 17443859Sml29623 17454874Sml29623 /* 17464874Sml29623 * First we need to get the free state, then 17474874Sml29623 * atomic decrement the reference count to prevent 17484874Sml29623 * the race condition with the interrupt thread that 17494874Sml29623 * is processing a loaned up buffer block. 17504874Sml29623 */ 17514874Sml29623 free_state = rx_msg_p->free; 17523859Sml29623 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 17533859Sml29623 if (!ref_cnt) { 17545770Sml29623 atomic_dec_32(&nxge_mblks_pending); 17553859Sml29623 buffer = rx_msg_p->buffer; 17563859Sml29623 size = rx_msg_p->block_size; 17573859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 17586929Smisaki "will free: rx_msg_p = $%p (block pending %d)", 17596929Smisaki rx_msg_p, nxge_mblks_pending)); 17603859Sml29623 17613859Sml29623 if (!rx_msg_p->use_buf_pool) { 17623859Sml29623 KMEM_FREE(buffer, size); 17633859Sml29623 } 17643859Sml29623 17653859Sml29623 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 17665170Stm144005 17675759Smisaki if (ring) { 17685759Smisaki /* 17695759Smisaki * Decrement the receive buffer ring's reference 17705759Smisaki * count, too. 17715759Smisaki */ 17725759Smisaki atomic_dec_32(&ring->rbr_ref_cnt); 17735759Smisaki 17745759Smisaki /* 17756495Sspeer * Free the receive buffer ring, if 17765759Smisaki * 1. all the receive buffers have been freed 17775759Smisaki * 2. and we are in the proper state (that is, 17785759Smisaki * we are not UNMAPPING). 17795759Smisaki */ 17805759Smisaki if (ring->rbr_ref_cnt == 0 && 17815759Smisaki ring->rbr_state == RBR_UNMAPPED) { 17826495Sspeer /* 17836495Sspeer * Free receive data buffers, 17846495Sspeer * buffer index information 17856495Sspeer * (rxring_info) and 17866495Sspeer * the message block ring. 17876495Sspeer */ 17886495Sspeer NXGE_DEBUG_MSG((NULL, RX_CTL, 17896495Sspeer "nxge_freeb:rx_msg_p = $%p " 17906495Sspeer "(block pending %d) free buffers", 17916495Sspeer rx_msg_p, nxge_mblks_pending)); 17926495Sspeer nxge_rxdma_databuf_free(ring); 17936495Sspeer if (ring->ring_info) { 17946495Sspeer KMEM_FREE(ring->ring_info, 17956495Sspeer sizeof (rxring_info_t)); 17966495Sspeer } 17976495Sspeer 17986495Sspeer if (ring->rx_msg_ring) { 17996495Sspeer KMEM_FREE(ring->rx_msg_ring, 18006495Sspeer ring->tnblocks * 18016495Sspeer sizeof (p_rx_msg_t)); 18026495Sspeer } 18035759Smisaki KMEM_FREE(ring, sizeof (*ring)); 18045759Smisaki } 18055170Stm144005 } 18063859Sml29623 return; 18073859Sml29623 } 18083859Sml29623 18093859Sml29623 /* 18103859Sml29623 * Repost buffer. 18113859Sml29623 */ 18125759Smisaki if (free_state && (ref_cnt == 1) && ring) { 18133859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, 18143859Sml29623 "nxge_freeb: post page $%p:", rx_msg_p)); 18155170Stm144005 if (ring->rbr_state == RBR_POSTING) 18165170Stm144005 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 18173859Sml29623 } 18183859Sml29623 18193859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 18203859Sml29623 } 18213859Sml29623 18223859Sml29623 uint_t 18233859Sml29623 nxge_rx_intr(void *arg1, void *arg2) 18243859Sml29623 { 18253859Sml29623 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 18263859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg2; 18273859Sml29623 p_nxge_ldg_t ldgp; 18283859Sml29623 uint8_t channel; 18293859Sml29623 npi_handle_t handle; 18303859Sml29623 rx_dma_ctl_stat_t cs; 18318275SEric Cheng p_rx_rcr_ring_t rcr_ring; 18328275SEric Cheng mblk_t *mp; 18333859Sml29623 18343859Sml29623 if (ldvp == NULL) { 18353859Sml29623 NXGE_DEBUG_MSG((NULL, INT_CTL, 18366929Smisaki "<== nxge_rx_intr: arg2 $%p arg1 $%p", 18376929Smisaki nxgep, ldvp)); 18383859Sml29623 return (DDI_INTR_CLAIMED); 18393859Sml29623 } 18403859Sml29623 18413859Sml29623 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 18423859Sml29623 nxgep = ldvp->nxgep; 18433859Sml29623 } 18446602Sspeer 18456602Sspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 18466602Sspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 18476602Sspeer NXGE_DEBUG_MSG((nxgep, INT_CTL, 18486602Sspeer "<== nxge_rx_intr: interface not started or intialized")); 18496602Sspeer return (DDI_INTR_CLAIMED); 18506602Sspeer } 18516602Sspeer 18523859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 18536929Smisaki "==> nxge_rx_intr: arg2 $%p arg1 $%p", 18546929Smisaki nxgep, ldvp)); 18553859Sml29623 18563859Sml29623 /* 1857*9232SMichael.Speer@Sun.COM * Get the PIO handle. 18583859Sml29623 */ 18593859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 18608275SEric Cheng 1861*9232SMichael.Speer@Sun.COM /* 1862*9232SMichael.Speer@Sun.COM * Get the ring to enable us to process packets. 1863*9232SMichael.Speer@Sun.COM */ 18648275SEric Cheng rcr_ring = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 18658275SEric Cheng 18668275SEric Cheng /* 18678275SEric Cheng * The RCR ring lock must be held when packets 18688275SEric Cheng * are being processed and the hardware registers are 18698275SEric Cheng * being read or written to prevent race condition 18708275SEric Cheng * among the interrupt thread, the polling thread 18718275SEric Cheng * (will cause fatal errors such as rcrincon bit set) 18728275SEric Cheng * and the setting of the poll_flag. 18738275SEric Cheng */ 18748275SEric Cheng MUTEX_ENTER(&rcr_ring->lock); 18758275SEric Cheng 18763859Sml29623 /* 18773859Sml29623 * Get the control and status for this channel. 18783859Sml29623 */ 18793859Sml29623 channel = ldvp->channel; 18803859Sml29623 ldgp = ldvp->ldgp; 18818275SEric Cheng 1882*9232SMichael.Speer@Sun.COM if (!isLDOMguest(nxgep) && (!nxgep->rx_channel_started[channel])) { 1883*9232SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, INT_CTL, 1884*9232SMichael.Speer@Sun.COM "<== nxge_rx_intr: channel is not started")); 1885*9232SMichael.Speer@Sun.COM 1886*9232SMichael.Speer@Sun.COM /* 1887*9232SMichael.Speer@Sun.COM * We received an interrupt before the ring is started. 1888*9232SMichael.Speer@Sun.COM */ 1889*9232SMichael.Speer@Sun.COM RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, 1890*9232SMichael.Speer@Sun.COM &cs.value); 1891*9232SMichael.Speer@Sun.COM cs.value &= RX_DMA_CTL_STAT_WR1C; 1892*9232SMichael.Speer@Sun.COM cs.bits.hdw.mex = 1; 1893*9232SMichael.Speer@Sun.COM RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 1894*9232SMichael.Speer@Sun.COM cs.value); 1895*9232SMichael.Speer@Sun.COM 1896*9232SMichael.Speer@Sun.COM /* 1897*9232SMichael.Speer@Sun.COM * Rearm this logical group if this is a single device 1898*9232SMichael.Speer@Sun.COM * group. 1899*9232SMichael.Speer@Sun.COM */ 1900*9232SMichael.Speer@Sun.COM if (ldgp->nldvs == 1) { 1901*9232SMichael.Speer@Sun.COM if (isLDOMguest(nxgep)) { 1902*9232SMichael.Speer@Sun.COM nxge_hio_ldgimgn(nxgep, ldgp); 1903*9232SMichael.Speer@Sun.COM } else { 1904*9232SMichael.Speer@Sun.COM ldgimgm_t mgm; 1905*9232SMichael.Speer@Sun.COM 1906*9232SMichael.Speer@Sun.COM mgm.value = 0; 1907*9232SMichael.Speer@Sun.COM mgm.bits.ldw.arm = 1; 1908*9232SMichael.Speer@Sun.COM mgm.bits.ldw.timer = ldgp->ldg_timer; 1909*9232SMichael.Speer@Sun.COM 1910*9232SMichael.Speer@Sun.COM NXGE_REG_WR64(handle, 1911*9232SMichael.Speer@Sun.COM LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 1912*9232SMichael.Speer@Sun.COM mgm.value); 1913*9232SMichael.Speer@Sun.COM } 19148275SEric Cheng } 1915*9232SMichael.Speer@Sun.COM MUTEX_EXIT(&rcr_ring->lock); 1916*9232SMichael.Speer@Sun.COM return (DDI_INTR_CLAIMED); 19178275SEric Cheng } 19188275SEric Cheng 19198275SEric Cheng ASSERT(rcr_ring->ldgp == ldgp); 19208275SEric Cheng ASSERT(rcr_ring->ldvp == ldvp); 19218275SEric Cheng 19223859Sml29623 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 19233859Sml29623 19243859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 19256929Smisaki "cs 0x%016llx rcrto 0x%x rcrthres %x", 19266929Smisaki channel, 19276929Smisaki cs.value, 19286929Smisaki cs.bits.hdw.rcrto, 19296929Smisaki cs.bits.hdw.rcrthres)); 19303859Sml29623 19318275SEric Cheng mp = nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 19323859Sml29623 19333859Sml29623 /* error events. */ 19343859Sml29623 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 19356495Sspeer (void) nxge_rx_err_evnts(nxgep, channel, cs); 19363859Sml29623 } 19373859Sml29623 19383859Sml29623 /* 19393859Sml29623 * Enable the mailbox update interrupt if we want 19403859Sml29623 * to use mailbox. We probably don't need to use 19413859Sml29623 * mailbox as it only saves us one pio read. 19423859Sml29623 * Also write 1 to rcrthres and rcrto to clear 19433859Sml29623 * these two edge triggered bits. 19443859Sml29623 */ 19453859Sml29623 cs.value &= RX_DMA_CTL_STAT_WR1C; 19468275SEric Cheng cs.bits.hdw.mex = rcr_ring->poll_flag ? 0 : 1; 19473859Sml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 19486929Smisaki cs.value); 19493859Sml29623 19503859Sml29623 /* 19518275SEric Cheng * If the polling mode is enabled, disable the interrupt. 19523859Sml29623 */ 19538275SEric Cheng if (rcr_ring->poll_flag) { 19548275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 19558275SEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 19568275SEric Cheng "(disabling interrupts)", channel, ldgp, ldvp)); 19578275SEric Cheng /* 19588275SEric Cheng * Disarm this logical group if this is a single device 19598275SEric Cheng * group. 19608275SEric Cheng */ 19618275SEric Cheng if (ldgp->nldvs == 1) { 19628275SEric Cheng ldgimgm_t mgm; 19638275SEric Cheng mgm.value = 0; 19648275SEric Cheng mgm.bits.ldw.arm = 0; 19656495Sspeer NXGE_REG_WR64(handle, 19668275SEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 19676495Sspeer } 19688275SEric Cheng } else { 19698275SEric Cheng /* 19708400SNicolas.Droux@Sun.COM * Rearm this logical group if this is a single device 19718400SNicolas.Droux@Sun.COM * group. 19728275SEric Cheng */ 19738275SEric Cheng if (ldgp->nldvs == 1) { 19748275SEric Cheng if (isLDOMguest(nxgep)) { 19758275SEric Cheng nxge_hio_ldgimgn(nxgep, ldgp); 19768275SEric Cheng } else { 19778275SEric Cheng ldgimgm_t mgm; 19788275SEric Cheng 19798275SEric Cheng mgm.value = 0; 19808275SEric Cheng mgm.bits.ldw.arm = 1; 19818275SEric Cheng mgm.bits.ldw.timer = ldgp->ldg_timer; 19828275SEric Cheng 19838275SEric Cheng NXGE_REG_WR64(handle, 19848275SEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 19858275SEric Cheng mgm.value); 19868275SEric Cheng } 19878275SEric Cheng } 19888275SEric Cheng 19898275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 19908275SEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p " 19918275SEric Cheng "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 19923859Sml29623 } 19938275SEric Cheng MUTEX_EXIT(&rcr_ring->lock); 19948275SEric Cheng 19958275SEric Cheng if (mp) { 19968275SEric Cheng if (!isLDOMguest(nxgep)) 19978275SEric Cheng mac_rx_ring(nxgep->mach, rcr_ring->rcr_mac_handle, mp, 19988275SEric Cheng rcr_ring->rcr_gen_num); 19998275SEric Cheng #if defined(sun4v) 20008275SEric Cheng else { /* isLDOMguest(nxgep) */ 20018275SEric Cheng nxge_hio_data_t *nhd = (nxge_hio_data_t *) 20028275SEric Cheng nxgep->nxge_hw_p->hio; 20038275SEric Cheng nx_vio_fp_t *vio = &nhd->hio.vio; 20048275SEric Cheng 20058275SEric Cheng if (vio->cb.vio_net_rx_cb) { 20068275SEric Cheng (*vio->cb.vio_net_rx_cb) 20078275SEric Cheng (nxgep->hio_vr->vhp, mp); 20088275SEric Cheng } 20098275SEric Cheng } 20108275SEric Cheng #endif 20118275SEric Cheng } 20128275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 20138275SEric Cheng return (DDI_INTR_CLAIMED); 20143859Sml29623 } 20153859Sml29623 20163859Sml29623 /* 20173859Sml29623 * Process the packets received in the specified logical device 20183859Sml29623 * and pass up a chain of message blocks to the upper layer. 20198275SEric Cheng * The RCR ring lock must be held before calling this function. 20203859Sml29623 */ 20218275SEric Cheng static mblk_t * 20226495Sspeer nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 20233859Sml29623 { 20243859Sml29623 p_mblk_t mp; 20253859Sml29623 p_rx_rcr_ring_t rcrp; 20263859Sml29623 20273859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 20286495Sspeer rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 20298275SEric Cheng 20308275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 20318275SEric Cheng "==> nxge_rx_pkts_vring: (calling nxge_rx_pkts)rdc %d " 20328275SEric Cheng "rcr_mac_handle $%p ", rcrp->rdc, rcrp->rcr_mac_handle)); 20336495Sspeer if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 20343859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20356929Smisaki "<== nxge_rx_pkts_vring: no mp")); 20368275SEric Cheng return (NULL); 20373859Sml29623 } 20383859Sml29623 20393859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 20406929Smisaki mp)); 20413859Sml29623 20423859Sml29623 #ifdef NXGE_DEBUG 20433859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20446929Smisaki "==> nxge_rx_pkts_vring:calling mac_rx " 20456929Smisaki "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 20466929Smisaki "mac_handle $%p", 20476929Smisaki mp->b_wptr - mp->b_rptr, 20486929Smisaki mp, mp->b_cont, mp->b_next, 20496929Smisaki rcrp, rcrp->rcr_mac_handle)); 20503859Sml29623 20513859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20526929Smisaki "==> nxge_rx_pkts_vring: dump packets " 20536929Smisaki "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 20546929Smisaki mp, 20556929Smisaki mp->b_rptr, 20566929Smisaki mp->b_wptr, 20576929Smisaki nxge_dump_packet((char *)mp->b_rptr, 20586929Smisaki mp->b_wptr - mp->b_rptr))); 20593859Sml29623 if (mp->b_cont) { 20603859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20616929Smisaki "==> nxge_rx_pkts_vring: dump b_cont packets " 20626929Smisaki "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 20636929Smisaki mp->b_cont, 20646929Smisaki mp->b_cont->b_rptr, 20656929Smisaki mp->b_cont->b_wptr, 20666929Smisaki nxge_dump_packet((char *)mp->b_cont->b_rptr, 20676929Smisaki mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 20683859Sml29623 } 20693859Sml29623 if (mp->b_next) { 20703859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20716929Smisaki "==> nxge_rx_pkts_vring: dump next packets " 20726929Smisaki "(b_rptr $%p): %s", 20736929Smisaki mp->b_next->b_rptr, 20746929Smisaki nxge_dump_packet((char *)mp->b_next->b_rptr, 20756929Smisaki mp->b_next->b_wptr - mp->b_next->b_rptr))); 20763859Sml29623 } 20773859Sml29623 #endif 20788275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 20798275SEric Cheng "<== nxge_rx_pkts_vring: returning rdc %d rcr_mac_handle $%p ", 20808275SEric Cheng rcrp->rdc, rcrp->rcr_mac_handle)); 20818275SEric Cheng 20828275SEric Cheng return (mp); 20833859Sml29623 } 20843859Sml29623 20853859Sml29623 20863859Sml29623 /* 20873859Sml29623 * This routine is the main packet receive processing function. 20883859Sml29623 * It gets the packet type, error code, and buffer related 20893859Sml29623 * information from the receive completion entry. 20903859Sml29623 * How many completion entries to process is based on the number of packets 20913859Sml29623 * queued by the hardware, a hardware maintained tail pointer 20923859Sml29623 * and a configurable receive packet count. 20933859Sml29623 * 20943859Sml29623 * A chain of message blocks will be created as result of processing 20953859Sml29623 * the completion entries. This chain of message blocks will be returned and 20963859Sml29623 * a hardware control status register will be updated with the number of 20973859Sml29623 * packets were removed from the hardware queue. 20983859Sml29623 * 20998275SEric Cheng * The RCR ring lock is held when entering this function. 21003859Sml29623 */ 21016495Sspeer static mblk_t * 21026495Sspeer nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 21036495Sspeer int bytes_to_pickup) 21043859Sml29623 { 21053859Sml29623 npi_handle_t handle; 21063859Sml29623 uint8_t channel; 21073859Sml29623 uint32_t comp_rd_index; 21083859Sml29623 p_rcr_entry_t rcr_desc_rd_head_p; 21093859Sml29623 p_rcr_entry_t rcr_desc_rd_head_pp; 21103859Sml29623 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 21113859Sml29623 uint16_t qlen, nrcr_read, npkt_read; 21126495Sspeer uint32_t qlen_hw; 21133859Sml29623 boolean_t multi; 21146495Sspeer rcrcfig_b_t rcr_cfg_b; 21156495Sspeer int totallen = 0; 21163859Sml29623 #if defined(_BIG_ENDIAN) 21173859Sml29623 npi_status_t rs = NPI_SUCCESS; 21183859Sml29623 #endif 21193859Sml29623 21208275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 21216929Smisaki "channel %d", rcr_p->rdc)); 21223859Sml29623 21233859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 21243859Sml29623 return (NULL); 21253859Sml29623 } 21263859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 21273859Sml29623 channel = rcr_p->rdc; 21283859Sml29623 21293859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 21306929Smisaki "==> nxge_rx_pkts: START: rcr channel %d " 21316929Smisaki "head_p $%p head_pp $%p index %d ", 21326929Smisaki channel, rcr_p->rcr_desc_rd_head_p, 21336929Smisaki rcr_p->rcr_desc_rd_head_pp, 21346929Smisaki rcr_p->comp_rd_index)); 21353859Sml29623 21363859Sml29623 21373859Sml29623 #if !defined(_BIG_ENDIAN) 21383859Sml29623 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 21393859Sml29623 #else 21403859Sml29623 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 21413859Sml29623 if (rs != NPI_SUCCESS) { 21426495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 21433859Sml29623 "channel %d, get qlen failed 0x%08x", 21446929Smisaki channel, rs)); 21453859Sml29623 return (NULL); 21463859Sml29623 } 21473859Sml29623 #endif 21483859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 21496929Smisaki "qlen %d", channel, qlen)); 21503859Sml29623 21513859Sml29623 21523859Sml29623 21533859Sml29623 if (!qlen) { 21548275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 21556929Smisaki "==> nxge_rx_pkts:rcr channel %d " 21566929Smisaki "qlen %d (no pkts)", channel, qlen)); 21573859Sml29623 21583859Sml29623 return (NULL); 21593859Sml29623 } 21603859Sml29623 21613859Sml29623 comp_rd_index = rcr_p->comp_rd_index; 21623859Sml29623 21633859Sml29623 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 21643859Sml29623 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 21653859Sml29623 nrcr_read = npkt_read = 0; 21663859Sml29623 21673859Sml29623 /* 21683859Sml29623 * Number of packets queued 21693859Sml29623 * (The jumbo or multi packet will be counted as only one 21703859Sml29623 * packets and it may take up more than one completion entry). 21713859Sml29623 */ 21723859Sml29623 qlen_hw = (qlen < nxge_max_rx_pkts) ? 21736929Smisaki qlen : nxge_max_rx_pkts; 21743859Sml29623 head_mp = NULL; 21753859Sml29623 tail_mp = &head_mp; 21763859Sml29623 nmp = mp_cont = NULL; 21773859Sml29623 multi = B_FALSE; 21783859Sml29623 21793859Sml29623 while (qlen_hw) { 21803859Sml29623 21813859Sml29623 #ifdef NXGE_DEBUG 21823859Sml29623 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 21833859Sml29623 #endif 21843859Sml29623 /* 21853859Sml29623 * Process one completion ring entry. 21863859Sml29623 */ 21873859Sml29623 nxge_receive_packet(nxgep, 21886929Smisaki rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 21893859Sml29623 21903859Sml29623 /* 21913859Sml29623 * message chaining modes 21923859Sml29623 */ 21933859Sml29623 if (nmp) { 21943859Sml29623 nmp->b_next = NULL; 21953859Sml29623 if (!multi && !mp_cont) { /* frame fits a partition */ 21963859Sml29623 *tail_mp = nmp; 21973859Sml29623 tail_mp = &nmp->b_next; 21986495Sspeer totallen += MBLKL(nmp); 21993859Sml29623 nmp = NULL; 22003859Sml29623 } else if (multi && !mp_cont) { /* first segment */ 22013859Sml29623 *tail_mp = nmp; 22023859Sml29623 tail_mp = &nmp->b_cont; 22036495Sspeer totallen += MBLKL(nmp); 22043859Sml29623 } else if (multi && mp_cont) { /* mid of multi segs */ 22053859Sml29623 *tail_mp = mp_cont; 22063859Sml29623 tail_mp = &mp_cont->b_cont; 22076495Sspeer totallen += MBLKL(mp_cont); 22083859Sml29623 } else if (!multi && mp_cont) { /* last segment */ 22093859Sml29623 *tail_mp = mp_cont; 22103859Sml29623 tail_mp = &nmp->b_next; 22116495Sspeer totallen += MBLKL(mp_cont); 22123859Sml29623 nmp = NULL; 22133859Sml29623 } 22143859Sml29623 } 22153859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22166929Smisaki "==> nxge_rx_pkts: loop: rcr channel %d " 22176929Smisaki "before updating: multi %d " 22186929Smisaki "nrcr_read %d " 22196929Smisaki "npk read %d " 22206929Smisaki "head_pp $%p index %d ", 22216929Smisaki channel, 22226929Smisaki multi, 22236929Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp, 22246929Smisaki comp_rd_index)); 22253859Sml29623 22263859Sml29623 if (!multi) { 22273859Sml29623 qlen_hw--; 22283859Sml29623 npkt_read++; 22293859Sml29623 } 22303859Sml29623 22313859Sml29623 /* 22323859Sml29623 * Update the next read entry. 22333859Sml29623 */ 22343859Sml29623 comp_rd_index = NEXT_ENTRY(comp_rd_index, 22356929Smisaki rcr_p->comp_wrap_mask); 22363859Sml29623 22373859Sml29623 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 22386929Smisaki rcr_p->rcr_desc_first_p, 22396929Smisaki rcr_p->rcr_desc_last_p); 22403859Sml29623 22413859Sml29623 nrcr_read++; 22423859Sml29623 22433859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22446929Smisaki "<== nxge_rx_pkts: (SAM, process one packet) " 22456929Smisaki "nrcr_read %d", 22466929Smisaki nrcr_read)); 22473859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22486929Smisaki "==> nxge_rx_pkts: loop: rcr channel %d " 22496929Smisaki "multi %d " 22506929Smisaki "nrcr_read %d " 22516929Smisaki "npk read %d " 22526929Smisaki "head_pp $%p index %d ", 22536929Smisaki channel, 22546929Smisaki multi, 22556929Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp, 22566929Smisaki comp_rd_index)); 22573859Sml29623 22586495Sspeer if ((bytes_to_pickup != -1) && 22596495Sspeer (totallen >= bytes_to_pickup)) { 22606495Sspeer break; 22616495Sspeer } 22623859Sml29623 } 22633859Sml29623 22643859Sml29623 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 22653859Sml29623 rcr_p->comp_rd_index = comp_rd_index; 22663859Sml29623 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 22673859Sml29623 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 22686929Smisaki (nxgep->intr_threshold != rcr_p->intr_threshold)) { 22698661SSantwona.Behera@Sun.COM 22708661SSantwona.Behera@Sun.COM rcr_p->intr_timeout = (nxgep->intr_timeout < 22718661SSantwona.Behera@Sun.COM NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 22728661SSantwona.Behera@Sun.COM nxgep->intr_timeout; 22738661SSantwona.Behera@Sun.COM 22748661SSantwona.Behera@Sun.COM rcr_p->intr_threshold = (nxgep->intr_threshold < 22758661SSantwona.Behera@Sun.COM NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 22768661SSantwona.Behera@Sun.COM nxgep->intr_threshold; 22778661SSantwona.Behera@Sun.COM 22783859Sml29623 rcr_cfg_b.value = 0x0ULL; 22798661SSantwona.Behera@Sun.COM rcr_cfg_b.bits.ldw.entout = 1; 22803859Sml29623 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 22813859Sml29623 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 22828661SSantwona.Behera@Sun.COM 22833859Sml29623 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 22846929Smisaki channel, rcr_cfg_b.value); 22853859Sml29623 } 22863859Sml29623 22873859Sml29623 cs.bits.ldw.pktread = npkt_read; 22883859Sml29623 cs.bits.ldw.ptrread = nrcr_read; 22893859Sml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 22906929Smisaki channel, cs.value); 22913859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22926929Smisaki "==> nxge_rx_pkts: EXIT: rcr channel %d " 22936929Smisaki "head_pp $%p index %016llx ", 22946929Smisaki channel, 22956929Smisaki rcr_p->rcr_desc_rd_head_pp, 22966929Smisaki rcr_p->comp_rd_index)); 22973859Sml29623 /* 22983859Sml29623 * Update RCR buffer pointer read and number of packets 22993859Sml29623 * read. 23003859Sml29623 */ 23013859Sml29623 23028275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 23038275SEric Cheng "channel %d", rcr_p->rdc)); 23048275SEric Cheng 23053859Sml29623 return (head_mp); 23063859Sml29623 } 23073859Sml29623 23083859Sml29623 void 23093859Sml29623 nxge_receive_packet(p_nxge_t nxgep, 23103859Sml29623 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 23113859Sml29623 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 23123859Sml29623 { 23133859Sml29623 p_mblk_t nmp = NULL; 23143859Sml29623 uint64_t multi; 23153859Sml29623 uint64_t dcf_err; 23163859Sml29623 uint8_t channel; 23173859Sml29623 23183859Sml29623 boolean_t first_entry = B_TRUE; 23193859Sml29623 boolean_t is_tcp_udp = B_FALSE; 23203859Sml29623 boolean_t buffer_free = B_FALSE; 23213859Sml29623 boolean_t error_send_up = B_FALSE; 23223859Sml29623 uint8_t error_type; 23233859Sml29623 uint16_t l2_len; 23243859Sml29623 uint16_t skip_len; 23253859Sml29623 uint8_t pktbufsz_type; 23263859Sml29623 uint64_t rcr_entry; 23273859Sml29623 uint64_t *pkt_buf_addr_pp; 23283859Sml29623 uint64_t *pkt_buf_addr_p; 23293859Sml29623 uint32_t buf_offset; 23303859Sml29623 uint32_t bsize; 23313859Sml29623 uint32_t error_disp_cnt; 23323859Sml29623 uint32_t msg_index; 23333859Sml29623 p_rx_rbr_ring_t rx_rbr_p; 23343859Sml29623 p_rx_msg_t *rx_msg_ring_p; 23353859Sml29623 p_rx_msg_t rx_msg_p; 23363859Sml29623 uint16_t sw_offset_bytes = 0, hdr_size = 0; 23373859Sml29623 nxge_status_t status = NXGE_OK; 23383859Sml29623 boolean_t is_valid = B_FALSE; 23393859Sml29623 p_nxge_rx_ring_stats_t rdc_stats; 23403859Sml29623 uint32_t bytes_read; 23413859Sml29623 uint64_t pkt_type; 23423859Sml29623 uint64_t frag; 23436028Ssbehera boolean_t pkt_too_long_err = B_FALSE; 23443859Sml29623 #ifdef NXGE_DEBUG 23453859Sml29623 int dump_len; 23463859Sml29623 #endif 23473859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 23483859Sml29623 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 23493859Sml29623 23503859Sml29623 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 23513859Sml29623 23523859Sml29623 multi = (rcr_entry & RCR_MULTI_MASK); 23533859Sml29623 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 23543859Sml29623 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 23553859Sml29623 23563859Sml29623 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 23573859Sml29623 frag = (rcr_entry & RCR_FRAG_MASK); 23583859Sml29623 23593859Sml29623 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 23603859Sml29623 23613859Sml29623 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 23626929Smisaki RCR_PKTBUFSZ_SHIFT); 23635125Sjoycey #if defined(__i386) 23645125Sjoycey pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 23656929Smisaki RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 23665125Sjoycey #else 23673859Sml29623 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 23686929Smisaki RCR_PKT_BUF_ADDR_SHIFT); 23695125Sjoycey #endif 23703859Sml29623 23713859Sml29623 channel = rcr_p->rdc; 23723859Sml29623 23733859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23746929Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 23756929Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 23766929Smisaki "error_type 0x%x pkt_type 0x%x " 23776929Smisaki "pktbufsz_type %d ", 23786929Smisaki rcr_desc_rd_head_p, 23796929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 23806929Smisaki multi, 23816929Smisaki error_type, 23826929Smisaki pkt_type, 23836929Smisaki pktbufsz_type)); 23843859Sml29623 23853859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23866929Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 23876929Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 23886929Smisaki "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 23896929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 23906929Smisaki multi, 23916929Smisaki error_type, 23926929Smisaki pkt_type)); 23933859Sml29623 23943859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23956929Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx " 23966929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 23976929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 23983859Sml29623 23993859Sml29623 /* get the stats ptr */ 24003859Sml29623 rdc_stats = rcr_p->rdc_stats; 24013859Sml29623 24023859Sml29623 if (!l2_len) { 24033859Sml29623 24043859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24056929Smisaki "<== nxge_receive_packet: failed: l2 length is 0.")); 24063859Sml29623 return; 24073859Sml29623 } 24083859Sml29623 24096028Ssbehera /* 24108275SEric Cheng * Software workaround for BMAC hardware limitation that allows 24116028Ssbehera * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 24126028Ssbehera * instead of 0x2400 for jumbo. 24136028Ssbehera */ 24146028Ssbehera if (l2_len > nxgep->mac.maxframesize) { 24156028Ssbehera pkt_too_long_err = B_TRUE; 24166028Ssbehera } 24176028Ssbehera 24184185Sspeer /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 24194185Sspeer l2_len -= ETHERFCSL; 24204185Sspeer 24213859Sml29623 /* shift 6 bits to get the full io address */ 24225125Sjoycey #if defined(__i386) 24235125Sjoycey pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 24246929Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL); 24255125Sjoycey #else 24263859Sml29623 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 24276929Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL); 24285125Sjoycey #endif 24293859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24306929Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx " 24316929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24326929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 24333859Sml29623 24343859Sml29623 rx_rbr_p = rcr_p->rx_rbr_p; 24353859Sml29623 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 24363859Sml29623 24373859Sml29623 if (first_entry) { 24383859Sml29623 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 24396929Smisaki RXDMA_HDR_SIZE_DEFAULT); 24403859Sml29623 24413859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24426929Smisaki "==> nxge_receive_packet: first entry 0x%016llx " 24436929Smisaki "pkt_buf_addr_pp $%p l2_len %d hdr %d", 24446929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 24456929Smisaki hdr_size)); 24463859Sml29623 } 24473859Sml29623 24483859Sml29623 MUTEX_ENTER(&rx_rbr_p->lock); 24493859Sml29623 24503859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24516929Smisaki "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 24526929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24536929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 24543859Sml29623 24553859Sml29623 /* 24563859Sml29623 * Packet buffer address in the completion entry points 24573859Sml29623 * to the starting buffer address (offset 0). 24583859Sml29623 * Use the starting buffer address to locate the corresponding 24593859Sml29623 * kernel address. 24603859Sml29623 */ 24613859Sml29623 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 24626929Smisaki pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 24636929Smisaki &buf_offset, 24646929Smisaki &msg_index); 24653859Sml29623 24663859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24676929Smisaki "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 24686929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24696929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 24703859Sml29623 24713859Sml29623 if (status != NXGE_OK) { 24723859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 24733859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24746929Smisaki "<== nxge_receive_packet: found vaddr failed %d", 24756929Smisaki status)); 24763859Sml29623 return; 24773859Sml29623 } 24783859Sml29623 24793859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24806929Smisaki "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 24816929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24826929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 24833859Sml29623 24843859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24856929Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 24866929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24876929Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 24883859Sml29623 24893859Sml29623 rx_msg_p = rx_msg_ring_p[msg_index]; 24903859Sml29623 24913859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24926929Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 24936929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24946929Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 24953859Sml29623 24963859Sml29623 switch (pktbufsz_type) { 24973859Sml29623 case RCR_PKTBUFSZ_0: 24983859Sml29623 bsize = rx_rbr_p->pkt_buf_size0_bytes; 24993859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25006929Smisaki "==> nxge_receive_packet: 0 buf %d", bsize)); 25013859Sml29623 break; 25023859Sml29623 case RCR_PKTBUFSZ_1: 25033859Sml29623 bsize = rx_rbr_p->pkt_buf_size1_bytes; 25043859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25056929Smisaki "==> nxge_receive_packet: 1 buf %d", bsize)); 25063859Sml29623 break; 25073859Sml29623 case RCR_PKTBUFSZ_2: 25083859Sml29623 bsize = rx_rbr_p->pkt_buf_size2_bytes; 25093859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 25106929Smisaki "==> nxge_receive_packet: 2 buf %d", bsize)); 25113859Sml29623 break; 25123859Sml29623 case RCR_SINGLE_BLOCK: 25133859Sml29623 bsize = rx_msg_p->block_size; 25143859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25156929Smisaki "==> nxge_receive_packet: single %d", bsize)); 25163859Sml29623 25173859Sml29623 break; 25183859Sml29623 default: 25193859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 25203859Sml29623 return; 25213859Sml29623 } 25223859Sml29623 25233859Sml29623 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 25246929Smisaki (buf_offset + sw_offset_bytes), 25256929Smisaki (hdr_size + l2_len), 25266929Smisaki DDI_DMA_SYNC_FORCPU); 25273859Sml29623 25283859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25296929Smisaki "==> nxge_receive_packet: after first dump:usage count")); 25303859Sml29623 25313859Sml29623 if (rx_msg_p->cur_usage_cnt == 0) { 25323859Sml29623 if (rx_rbr_p->rbr_use_bcopy) { 25333859Sml29623 atomic_inc_32(&rx_rbr_p->rbr_consumed); 25343859Sml29623 if (rx_rbr_p->rbr_consumed < 25356929Smisaki rx_rbr_p->rbr_threshold_hi) { 25363859Sml29623 if (rx_rbr_p->rbr_threshold_lo == 0 || 25376929Smisaki ((rx_rbr_p->rbr_consumed >= 25386929Smisaki rx_rbr_p->rbr_threshold_lo) && 25396929Smisaki (rx_rbr_p->rbr_bufsize_type >= 25406929Smisaki pktbufsz_type))) { 25413859Sml29623 rx_msg_p->rx_use_bcopy = B_TRUE; 25423859Sml29623 } 25433859Sml29623 } else { 25443859Sml29623 rx_msg_p->rx_use_bcopy = B_TRUE; 25453859Sml29623 } 25463859Sml29623 } 25473859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25486929Smisaki "==> nxge_receive_packet: buf %d (new block) ", 25496929Smisaki bsize)); 25503859Sml29623 25513859Sml29623 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 25523859Sml29623 rx_msg_p->pkt_buf_size = bsize; 25533859Sml29623 rx_msg_p->cur_usage_cnt = 1; 25543859Sml29623 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 25553859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25566929Smisaki "==> nxge_receive_packet: buf %d " 25576929Smisaki "(single block) ", 25586929Smisaki bsize)); 25593859Sml29623 /* 25603859Sml29623 * Buffer can be reused once the free function 25613859Sml29623 * is called. 25623859Sml29623 */ 25633859Sml29623 rx_msg_p->max_usage_cnt = 1; 25643859Sml29623 buffer_free = B_TRUE; 25653859Sml29623 } else { 25663859Sml29623 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 25673859Sml29623 if (rx_msg_p->max_usage_cnt == 1) { 25683859Sml29623 buffer_free = B_TRUE; 25693859Sml29623 } 25703859Sml29623 } 25713859Sml29623 } else { 25723859Sml29623 rx_msg_p->cur_usage_cnt++; 25733859Sml29623 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 25743859Sml29623 buffer_free = B_TRUE; 25753859Sml29623 } 25763859Sml29623 } 25773859Sml29623 25783859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 25793859Sml29623 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 25806929Smisaki msg_index, l2_len, 25816929Smisaki rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 25823859Sml29623 25836028Ssbehera if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 25843859Sml29623 rdc_stats->ierrors++; 25853859Sml29623 if (dcf_err) { 25863859Sml29623 rdc_stats->dcf_err++; 25873859Sml29623 #ifdef NXGE_DEBUG 25883859Sml29623 if (!rdc_stats->dcf_err) { 25893859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 25903859Sml29623 "nxge_receive_packet: channel %d dcf_err rcr" 25913859Sml29623 " 0x%llx", channel, rcr_entry)); 25923859Sml29623 } 25933859Sml29623 #endif 25943859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 25956929Smisaki NXGE_FM_EREPORT_RDMC_DCF_ERR); 25966028Ssbehera } else if (pkt_too_long_err) { 25976028Ssbehera rdc_stats->pkt_too_long_err++; 25986028Ssbehera NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 25996028Ssbehera " channel %d packet length [%d] > " 26006028Ssbehera "maxframesize [%d]", channel, l2_len + ETHERFCSL, 26016028Ssbehera nxgep->mac.maxframesize)); 26023859Sml29623 } else { 26033859Sml29623 /* Update error stats */ 26043859Sml29623 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 26053859Sml29623 rdc_stats->errlog.compl_err_type = error_type; 26063859Sml29623 26073859Sml29623 switch (error_type) { 26085523Syc148097 /* 26095523Syc148097 * Do not send FMA ereport for RCR_L2_ERROR and 26105523Syc148097 * RCR_L4_CSUM_ERROR because most likely they indicate 26115523Syc148097 * back pressure rather than HW failures. 26125523Syc148097 */ 26135165Syc148097 case RCR_L2_ERROR: 26145165Syc148097 rdc_stats->l2_err++; 26155165Syc148097 if (rdc_stats->l2_err < 26165165Syc148097 error_disp_cnt) { 26175165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26185165Syc148097 " nxge_receive_packet:" 26195165Syc148097 " channel %d RCR L2_ERROR", 26205165Syc148097 channel)); 26215165Syc148097 } 26225165Syc148097 break; 26235165Syc148097 case RCR_L4_CSUM_ERROR: 26245165Syc148097 error_send_up = B_TRUE; 26255165Syc148097 rdc_stats->l4_cksum_err++; 26265165Syc148097 if (rdc_stats->l4_cksum_err < 26275165Syc148097 error_disp_cnt) { 26283859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26295165Syc148097 " nxge_receive_packet:" 26305165Syc148097 " channel %d" 26315165Syc148097 " RCR L4_CSUM_ERROR", channel)); 26325165Syc148097 } 26335165Syc148097 break; 26345523Syc148097 /* 26355523Syc148097 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 26365523Syc148097 * RCR_ZCP_SOFT_ERROR because they reflect the same 26375523Syc148097 * FFLP and ZCP errors that have been reported by 26385523Syc148097 * nxge_fflp.c and nxge_zcp.c. 26395523Syc148097 */ 26405165Syc148097 case RCR_FFLP_SOFT_ERROR: 26415165Syc148097 error_send_up = B_TRUE; 26425165Syc148097 rdc_stats->fflp_soft_err++; 26435165Syc148097 if (rdc_stats->fflp_soft_err < 26445165Syc148097 error_disp_cnt) { 26455165Syc148097 NXGE_ERROR_MSG((nxgep, 26465165Syc148097 NXGE_ERR_CTL, 26475165Syc148097 " nxge_receive_packet:" 26485165Syc148097 " channel %d" 26495165Syc148097 " RCR FFLP_SOFT_ERROR", channel)); 26505165Syc148097 } 26515165Syc148097 break; 26525165Syc148097 case RCR_ZCP_SOFT_ERROR: 26535165Syc148097 error_send_up = B_TRUE; 26545165Syc148097 rdc_stats->fflp_soft_err++; 26555165Syc148097 if (rdc_stats->zcp_soft_err < 26565165Syc148097 error_disp_cnt) 26575165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26585165Syc148097 " nxge_receive_packet: Channel %d" 26595165Syc148097 " RCR ZCP_SOFT_ERROR", channel)); 26605165Syc148097 break; 26615165Syc148097 default: 26625165Syc148097 rdc_stats->rcr_unknown_err++; 26635165Syc148097 if (rdc_stats->rcr_unknown_err 26645165Syc148097 < error_disp_cnt) { 26655165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26665165Syc148097 " nxge_receive_packet: Channel %d" 26675165Syc148097 " RCR entry 0x%llx error 0x%x", 26685165Syc148097 rcr_entry, channel, error_type)); 26695165Syc148097 } 26705165Syc148097 break; 26713859Sml29623 } 26723859Sml29623 } 26733859Sml29623 26743859Sml29623 /* 26753859Sml29623 * Update and repost buffer block if max usage 26763859Sml29623 * count is reached. 26773859Sml29623 */ 26783859Sml29623 if (error_send_up == B_FALSE) { 26794874Sml29623 atomic_inc_32(&rx_msg_p->ref_cnt); 26803859Sml29623 if (buffer_free == B_TRUE) { 26813859Sml29623 rx_msg_p->free = B_TRUE; 26823859Sml29623 } 26833859Sml29623 26843859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 26853859Sml29623 nxge_freeb(rx_msg_p); 26863859Sml29623 return; 26873859Sml29623 } 26883859Sml29623 } 26893859Sml29623 26903859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 26916929Smisaki "==> nxge_receive_packet: DMA sync second ")); 26923859Sml29623 26935165Syc148097 bytes_read = rcr_p->rcvd_pkt_bytes; 26943859Sml29623 skip_len = sw_offset_bytes + hdr_size; 26953859Sml29623 if (!rx_msg_p->rx_use_bcopy) { 26964874Sml29623 /* 26974874Sml29623 * For loaned up buffers, the driver reference count 26984874Sml29623 * will be incremented first and then the free state. 26994874Sml29623 */ 27005165Syc148097 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 27015165Syc148097 if (first_entry) { 27025165Syc148097 nmp->b_rptr = &nmp->b_rptr[skip_len]; 27035165Syc148097 if (l2_len < bsize - skip_len) { 27045165Syc148097 nmp->b_wptr = &nmp->b_rptr[l2_len]; 27055165Syc148097 } else { 27065165Syc148097 nmp->b_wptr = &nmp->b_rptr[bsize 27075165Syc148097 - skip_len]; 27085165Syc148097 } 27095165Syc148097 } else { 27105165Syc148097 if (l2_len - bytes_read < bsize) { 27115165Syc148097 nmp->b_wptr = 27125165Syc148097 &nmp->b_rptr[l2_len - bytes_read]; 27135165Syc148097 } else { 27145165Syc148097 nmp->b_wptr = &nmp->b_rptr[bsize]; 27155165Syc148097 } 27165165Syc148097 } 27175165Syc148097 } 27183859Sml29623 } else { 27195165Syc148097 if (first_entry) { 27205165Syc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 27215165Syc148097 l2_len < bsize - skip_len ? 27225165Syc148097 l2_len : bsize - skip_len); 27235165Syc148097 } else { 27245165Syc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 27255165Syc148097 l2_len - bytes_read < bsize ? 27265165Syc148097 l2_len - bytes_read : bsize); 27275165Syc148097 } 27283859Sml29623 } 27293859Sml29623 if (nmp != NULL) { 27307145Syc148097 if (first_entry) { 27317145Syc148097 /* 27327145Syc148097 * Jumbo packets may be received with more than one 27337145Syc148097 * buffer, increment ipackets for the first entry only. 27347145Syc148097 */ 27357145Syc148097 rdc_stats->ipackets++; 27367145Syc148097 27377145Syc148097 /* Update ibytes for kstat. */ 27387145Syc148097 rdc_stats->ibytes += skip_len 27397145Syc148097 + l2_len < bsize ? l2_len : bsize; 27407145Syc148097 /* 27417145Syc148097 * Update the number of bytes read so far for the 27427145Syc148097 * current frame. 27437145Syc148097 */ 27445165Syc148097 bytes_read = nmp->b_wptr - nmp->b_rptr; 27457145Syc148097 } else { 27467145Syc148097 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 27477145Syc148097 l2_len - bytes_read : bsize; 27483859Sml29623 bytes_read += nmp->b_wptr - nmp->b_rptr; 27497145Syc148097 } 27505165Syc148097 27515165Syc148097 NXGE_DEBUG_MSG((nxgep, RX_CTL, 27525165Syc148097 "==> nxge_receive_packet after dupb: " 27535165Syc148097 "rbr consumed %d " 27545165Syc148097 "pktbufsz_type %d " 27555165Syc148097 "nmp $%p rptr $%p wptr $%p " 27565165Syc148097 "buf_offset %d bzise %d l2_len %d skip_len %d", 27575165Syc148097 rx_rbr_p->rbr_consumed, 27585165Syc148097 pktbufsz_type, 27595165Syc148097 nmp, nmp->b_rptr, nmp->b_wptr, 27605165Syc148097 buf_offset, bsize, l2_len, skip_len)); 27613859Sml29623 } else { 27623859Sml29623 cmn_err(CE_WARN, "!nxge_receive_packet: " 27636929Smisaki "update stats (error)"); 27644977Sraghus atomic_inc_32(&rx_msg_p->ref_cnt); 27654977Sraghus if (buffer_free == B_TRUE) { 27664977Sraghus rx_msg_p->free = B_TRUE; 27674977Sraghus } 27684977Sraghus MUTEX_EXIT(&rx_rbr_p->lock); 27694977Sraghus nxge_freeb(rx_msg_p); 27704977Sraghus return; 27713859Sml29623 } 27725060Syc148097 27733859Sml29623 if (buffer_free == B_TRUE) { 27743859Sml29623 rx_msg_p->free = B_TRUE; 27753859Sml29623 } 27767145Syc148097 27773859Sml29623 is_valid = (nmp != NULL); 27785165Syc148097 27795165Syc148097 rcr_p->rcvd_pkt_bytes = bytes_read; 27805165Syc148097 27813859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 27823859Sml29623 27833859Sml29623 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 27843859Sml29623 atomic_inc_32(&rx_msg_p->ref_cnt); 27853859Sml29623 nxge_freeb(rx_msg_p); 27863859Sml29623 } 27873859Sml29623 27883859Sml29623 if (is_valid) { 27893859Sml29623 nmp->b_cont = NULL; 27903859Sml29623 if (first_entry) { 27913859Sml29623 *mp = nmp; 27923859Sml29623 *mp_cont = NULL; 27935165Syc148097 } else { 27943859Sml29623 *mp_cont = nmp; 27955165Syc148097 } 27963859Sml29623 } 27973859Sml29623 27983859Sml29623 /* 27997145Syc148097 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 28007145Syc148097 * If a packet is not fragmented and no error bit is set, then 28017145Syc148097 * L4 checksum is OK. 28023859Sml29623 */ 28037145Syc148097 28043859Sml29623 if (is_valid && !multi) { 28056495Sspeer /* 28066611Sml29623 * If the checksum flag nxge_chksum_offload 28076611Sml29623 * is 1, TCP and UDP packets can be sent 28086495Sspeer * up with good checksum. If the checksum flag 28096611Sml29623 * is set to 0, checksum reporting will apply to 28106495Sspeer * TCP packets only (workaround for a hardware bug). 28116611Sml29623 * If the checksum flag nxge_cksum_offload is 28126611Sml29623 * greater than 1, both TCP and UDP packets 28136611Sml29623 * will not be reported its hardware checksum results. 28146495Sspeer */ 28156611Sml29623 if (nxge_cksum_offload == 1) { 28166495Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 28176929Smisaki pkt_type == RCR_PKT_IS_UDP) ? 28186929Smisaki B_TRUE: B_FALSE); 28196611Sml29623 } else if (!nxge_cksum_offload) { 28206495Sspeer /* TCP checksum only. */ 28216495Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 28226929Smisaki B_TRUE: B_FALSE); 28236495Sspeer } 28243859Sml29623 28253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 28266929Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 28276929Smisaki is_valid, multi, is_tcp_udp, frag, error_type)); 28283859Sml29623 28293859Sml29623 if (is_tcp_udp && !frag && !error_type) { 28303859Sml29623 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 28316929Smisaki HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 28323859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 28336929Smisaki "==> nxge_receive_packet: Full tcp/udp cksum " 28346929Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d " 28356929Smisaki "error %d", 28366929Smisaki is_valid, multi, is_tcp_udp, frag, error_type)); 28373859Sml29623 } 28383859Sml29623 } 28393859Sml29623 28403859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 28416929Smisaki "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 28423859Sml29623 28433859Sml29623 *multi_p = (multi == RCR_MULTI_MASK); 28443859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 28456929Smisaki "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 28466929Smisaki *multi_p, nmp, *mp, *mp_cont)); 28473859Sml29623 } 28483859Sml29623 28498275SEric Cheng /* 28508275SEric Cheng * Enable polling for a ring. Interrupt for the ring is disabled when 28518275SEric Cheng * the nxge interrupt comes (see nxge_rx_intr). 28528275SEric Cheng */ 28538275SEric Cheng int 28548275SEric Cheng nxge_enable_poll(void *arg) 28558275SEric Cheng { 28568275SEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 28578275SEric Cheng p_rx_rcr_ring_t ringp; 28588275SEric Cheng p_nxge_t nxgep; 28598275SEric Cheng p_nxge_ldg_t ldgp; 28608275SEric Cheng uint32_t channel; 28618275SEric Cheng 28628275SEric Cheng if (ring_handle == NULL) { 28638275SEric Cheng return (0); 28648275SEric Cheng } 28658275SEric Cheng 28668275SEric Cheng nxgep = ring_handle->nxgep; 28678275SEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 28688275SEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 28698275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 28708275SEric Cheng "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 28718275SEric Cheng ldgp = ringp->ldgp; 28728275SEric Cheng if (ldgp == NULL) { 28738275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 28748275SEric Cheng "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 28758275SEric Cheng ringp->rdc)); 28768275SEric Cheng return (0); 28778275SEric Cheng } 28788275SEric Cheng 28798275SEric Cheng MUTEX_ENTER(&ringp->lock); 28808275SEric Cheng /* enable polling */ 28818275SEric Cheng if (ringp->poll_flag == 0) { 28828275SEric Cheng ringp->poll_flag = 1; 28838275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 28848275SEric Cheng "==> nxge_enable_poll: rdc %d set poll flag to 1", 28858275SEric Cheng ringp->rdc)); 28868275SEric Cheng } 28878275SEric Cheng 28888275SEric Cheng MUTEX_EXIT(&ringp->lock); 28898275SEric Cheng return (0); 28908275SEric Cheng } 28918275SEric Cheng /* 28928275SEric Cheng * Disable polling for a ring and enable its interrupt. 28938275SEric Cheng */ 28948275SEric Cheng int 28958275SEric Cheng nxge_disable_poll(void *arg) 28968275SEric Cheng { 28978275SEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 28988275SEric Cheng p_rx_rcr_ring_t ringp; 28998275SEric Cheng p_nxge_t nxgep; 29008275SEric Cheng uint32_t channel; 29018275SEric Cheng 29028275SEric Cheng if (ring_handle == NULL) { 29038275SEric Cheng return (0); 29048275SEric Cheng } 29058275SEric Cheng 29068275SEric Cheng nxgep = ring_handle->nxgep; 29078275SEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 29088275SEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 29098275SEric Cheng 29108275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 29118275SEric Cheng "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 29128275SEric Cheng 29138275SEric Cheng MUTEX_ENTER(&ringp->lock); 29148275SEric Cheng 29158275SEric Cheng /* disable polling: enable interrupt */ 29168275SEric Cheng if (ringp->poll_flag) { 29178275SEric Cheng npi_handle_t handle; 29188275SEric Cheng rx_dma_ctl_stat_t cs; 29198275SEric Cheng uint8_t channel; 29208275SEric Cheng p_nxge_ldg_t ldgp; 29218275SEric Cheng 29228275SEric Cheng /* 29238275SEric Cheng * Get the control and status for this channel. 29248275SEric Cheng */ 29258275SEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep); 29268275SEric Cheng channel = ringp->rdc; 29278275SEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 29288275SEric Cheng channel, &cs.value); 29298275SEric Cheng 29308275SEric Cheng /* 29318275SEric Cheng * Enable mailbox update 29328275SEric Cheng * Since packets were not read and the hardware uses 29338275SEric Cheng * bits pktread and ptrread to update the queue 29348275SEric Cheng * length, we need to set both bits to 0. 29358275SEric Cheng */ 29368275SEric Cheng cs.bits.ldw.pktread = 0; 29378275SEric Cheng cs.bits.ldw.ptrread = 0; 29388275SEric Cheng cs.bits.hdw.mex = 1; 29398275SEric Cheng RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 29408275SEric Cheng cs.value); 29418275SEric Cheng 29428275SEric Cheng /* 29438275SEric Cheng * Rearm this logical group if this is a single device 29448275SEric Cheng * group. 29458275SEric Cheng */ 29468275SEric Cheng ldgp = ringp->ldgp; 29478275SEric Cheng if (ldgp == NULL) { 29488275SEric Cheng ringp->poll_flag = 0; 29498275SEric Cheng MUTEX_EXIT(&ringp->lock); 29508275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 29518275SEric Cheng "==> nxge_disable_poll: no ldgp rdc %d " 29528275SEric Cheng "(still set poll to 0", ringp->rdc)); 29538275SEric Cheng return (0); 29548275SEric Cheng } 29558275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 29568275SEric Cheng "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 29578275SEric Cheng ringp->rdc, ldgp)); 29588275SEric Cheng if (ldgp->nldvs == 1) { 29598275SEric Cheng ldgimgm_t mgm; 29608275SEric Cheng mgm.value = 0; 29618275SEric Cheng mgm.bits.ldw.arm = 1; 29628275SEric Cheng mgm.bits.ldw.timer = ldgp->ldg_timer; 29638275SEric Cheng NXGE_REG_WR64(handle, 29648275SEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 29658275SEric Cheng } 29668275SEric Cheng ringp->poll_flag = 0; 29678275SEric Cheng } 29688275SEric Cheng 29698275SEric Cheng MUTEX_EXIT(&ringp->lock); 29708275SEric Cheng return (0); 29718275SEric Cheng } 29728275SEric Cheng 29738275SEric Cheng /* 29748275SEric Cheng * Poll 'bytes_to_pickup' bytes of message from the rx ring. 29758275SEric Cheng */ 29768275SEric Cheng mblk_t * 29778275SEric Cheng nxge_rx_poll(void *arg, int bytes_to_pickup) 29788275SEric Cheng { 29798275SEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 29808275SEric Cheng p_rx_rcr_ring_t rcr_p; 29818275SEric Cheng p_nxge_t nxgep; 29828275SEric Cheng npi_handle_t handle; 29838275SEric Cheng rx_dma_ctl_stat_t cs; 29848275SEric Cheng mblk_t *mblk; 29858275SEric Cheng p_nxge_ldv_t ldvp; 29868275SEric Cheng uint32_t channel; 29878275SEric Cheng 29888275SEric Cheng nxgep = ring_handle->nxgep; 29898275SEric Cheng 29908275SEric Cheng /* 29918275SEric Cheng * Get the control and status for this channel. 29928275SEric Cheng */ 29938275SEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep); 29948275SEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 29958275SEric Cheng rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 29968275SEric Cheng MUTEX_ENTER(&rcr_p->lock); 29978275SEric Cheng ASSERT(rcr_p->poll_flag == 1); 29988275SEric Cheng 29998275SEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 30008275SEric Cheng 30018275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 30028275SEric Cheng "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 30038275SEric Cheng rcr_p->rdc, rcr_p->poll_flag)); 30048275SEric Cheng mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 30058275SEric Cheng 30068275SEric Cheng ldvp = rcr_p->ldvp; 30078275SEric Cheng /* error events. */ 30088275SEric Cheng if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 30098275SEric Cheng (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 30108275SEric Cheng } 30118275SEric Cheng 30128275SEric Cheng MUTEX_EXIT(&rcr_p->lock); 30138275SEric Cheng 30148275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 30158275SEric Cheng "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 30168275SEric Cheng return (mblk); 30178275SEric Cheng } 30188275SEric Cheng 30198275SEric Cheng 30203859Sml29623 /*ARGSUSED*/ 30213859Sml29623 static nxge_status_t 30226495Sspeer nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 30233859Sml29623 { 30243859Sml29623 p_nxge_rx_ring_stats_t rdc_stats; 30253859Sml29623 npi_handle_t handle; 30263859Sml29623 npi_status_t rs; 30273859Sml29623 boolean_t rxchan_fatal = B_FALSE; 30283859Sml29623 boolean_t rxport_fatal = B_FALSE; 30293859Sml29623 uint8_t portn; 30303859Sml29623 nxge_status_t status = NXGE_OK; 30313859Sml29623 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 30323859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 30333859Sml29623 30343859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 30353859Sml29623 portn = nxgep->mac.portnum; 30366495Sspeer rdc_stats = &nxgep->statsp->rdc_stats[channel]; 30373859Sml29623 30383859Sml29623 if (cs.bits.hdw.rbr_tmout) { 30393859Sml29623 rdc_stats->rx_rbr_tmout++; 30403859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30416929Smisaki NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 30423859Sml29623 rxchan_fatal = B_TRUE; 30433859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30446929Smisaki "==> nxge_rx_err_evnts: rx_rbr_timeout")); 30453859Sml29623 } 30463859Sml29623 if (cs.bits.hdw.rsp_cnt_err) { 30473859Sml29623 rdc_stats->rsp_cnt_err++; 30483859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30496929Smisaki NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 30503859Sml29623 rxchan_fatal = B_TRUE; 30513859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30526929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30536929Smisaki "rsp_cnt_err", channel)); 30543859Sml29623 } 30553859Sml29623 if (cs.bits.hdw.byte_en_bus) { 30563859Sml29623 rdc_stats->byte_en_bus++; 30573859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30586929Smisaki NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 30593859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30606929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30616929Smisaki "fatal error: byte_en_bus", channel)); 30623859Sml29623 rxchan_fatal = B_TRUE; 30633859Sml29623 } 30643859Sml29623 if (cs.bits.hdw.rsp_dat_err) { 30653859Sml29623 rdc_stats->rsp_dat_err++; 30663859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30676929Smisaki NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 30683859Sml29623 rxchan_fatal = B_TRUE; 30693859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30706929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30716929Smisaki "fatal error: rsp_dat_err", channel)); 30723859Sml29623 } 30733859Sml29623 if (cs.bits.hdw.rcr_ack_err) { 30743859Sml29623 rdc_stats->rcr_ack_err++; 30753859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30766929Smisaki NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 30773859Sml29623 rxchan_fatal = B_TRUE; 30783859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30796929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30806929Smisaki "fatal error: rcr_ack_err", channel)); 30813859Sml29623 } 30823859Sml29623 if (cs.bits.hdw.dc_fifo_err) { 30833859Sml29623 rdc_stats->dc_fifo_err++; 30843859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30856929Smisaki NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 30863859Sml29623 /* This is not a fatal error! */ 30873859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30886929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30896929Smisaki "dc_fifo_err", channel)); 30903859Sml29623 rxport_fatal = B_TRUE; 30913859Sml29623 } 30923859Sml29623 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 30933859Sml29623 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 30946929Smisaki &rdc_stats->errlog.pre_par, 30956929Smisaki &rdc_stats->errlog.sha_par)) 30966929Smisaki != NPI_SUCCESS) { 30973859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30986929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30996929Smisaki "rcr_sha_par: get perr", channel)); 31003859Sml29623 return (NXGE_ERROR | rs); 31013859Sml29623 } 31023859Sml29623 if (cs.bits.hdw.rcr_sha_par) { 31033859Sml29623 rdc_stats->rcr_sha_par++; 31043859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31056929Smisaki NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 31063859Sml29623 rxchan_fatal = B_TRUE; 31073859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31086929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31096929Smisaki "fatal error: rcr_sha_par", channel)); 31103859Sml29623 } 31113859Sml29623 if (cs.bits.hdw.rbr_pre_par) { 31123859Sml29623 rdc_stats->rbr_pre_par++; 31133859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31146929Smisaki NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 31153859Sml29623 rxchan_fatal = B_TRUE; 31163859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31176929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31186929Smisaki "fatal error: rbr_pre_par", channel)); 31193859Sml29623 } 31203859Sml29623 } 31216172Syc148097 /* 31226172Syc148097 * The Following 4 status bits are for information, the system 31236172Syc148097 * is running fine. There is no need to send FMA ereports or 31246172Syc148097 * log messages. 31256172Syc148097 */ 31263859Sml29623 if (cs.bits.hdw.port_drop_pkt) { 31273859Sml29623 rdc_stats->port_drop_pkt++; 31283859Sml29623 } 31293859Sml29623 if (cs.bits.hdw.wred_drop) { 31303859Sml29623 rdc_stats->wred_drop++; 31313859Sml29623 } 31323859Sml29623 if (cs.bits.hdw.rbr_pre_empty) { 31333859Sml29623 rdc_stats->rbr_pre_empty++; 31343859Sml29623 } 31353859Sml29623 if (cs.bits.hdw.rcr_shadow_full) { 31363859Sml29623 rdc_stats->rcr_shadow_full++; 31373859Sml29623 } 31383859Sml29623 if (cs.bits.hdw.config_err) { 31393859Sml29623 rdc_stats->config_err++; 31403859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31416929Smisaki NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 31423859Sml29623 rxchan_fatal = B_TRUE; 31433859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31446929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31456929Smisaki "config error", channel)); 31463859Sml29623 } 31473859Sml29623 if (cs.bits.hdw.rcrincon) { 31483859Sml29623 rdc_stats->rcrincon++; 31493859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31506929Smisaki NXGE_FM_EREPORT_RDMC_RCRINCON); 31513859Sml29623 rxchan_fatal = B_TRUE; 31523859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31536929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31546929Smisaki "fatal error: rcrincon error", channel)); 31553859Sml29623 } 31563859Sml29623 if (cs.bits.hdw.rcrfull) { 31573859Sml29623 rdc_stats->rcrfull++; 31583859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31596929Smisaki NXGE_FM_EREPORT_RDMC_RCRFULL); 31603859Sml29623 rxchan_fatal = B_TRUE; 31613859Sml29623 if (rdc_stats->rcrfull < error_disp_cnt) 31623859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31636929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31646929Smisaki "fatal error: rcrfull error", channel)); 31653859Sml29623 } 31663859Sml29623 if (cs.bits.hdw.rbr_empty) { 31676172Syc148097 /* 31686172Syc148097 * This bit is for information, there is no need 31696172Syc148097 * send FMA ereport or log a message. 31706172Syc148097 */ 31713859Sml29623 rdc_stats->rbr_empty++; 31723859Sml29623 } 31733859Sml29623 if (cs.bits.hdw.rbrfull) { 31743859Sml29623 rdc_stats->rbrfull++; 31753859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31766929Smisaki NXGE_FM_EREPORT_RDMC_RBRFULL); 31773859Sml29623 rxchan_fatal = B_TRUE; 31783859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31796929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31806929Smisaki "fatal error: rbr_full error", channel)); 31813859Sml29623 } 31823859Sml29623 if (cs.bits.hdw.rbrlogpage) { 31833859Sml29623 rdc_stats->rbrlogpage++; 31843859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31856929Smisaki NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 31863859Sml29623 rxchan_fatal = B_TRUE; 31873859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31886929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31896929Smisaki "fatal error: rbr logical page error", channel)); 31903859Sml29623 } 31913859Sml29623 if (cs.bits.hdw.cfiglogpage) { 31923859Sml29623 rdc_stats->cfiglogpage++; 31933859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31946929Smisaki NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 31953859Sml29623 rxchan_fatal = B_TRUE; 31963859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31976929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31986929Smisaki "fatal error: cfig logical page error", channel)); 31993859Sml29623 } 32003859Sml29623 32013859Sml29623 if (rxport_fatal) { 32023859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32036495Sspeer " nxge_rx_err_evnts: fatal error on Port #%d\n", 32046495Sspeer portn)); 32056495Sspeer if (isLDOMguest(nxgep)) { 32066495Sspeer status = NXGE_ERROR; 32076495Sspeer } else { 32086495Sspeer status = nxge_ipp_fatal_err_recover(nxgep); 32096495Sspeer if (status == NXGE_OK) { 32106495Sspeer FM_SERVICE_RESTORED(nxgep); 32116495Sspeer } 32123859Sml29623 } 32133859Sml29623 } 32143859Sml29623 32153859Sml29623 if (rxchan_fatal) { 32163859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 32176495Sspeer " nxge_rx_err_evnts: fatal error on Channel #%d\n", 32186495Sspeer channel)); 32196495Sspeer if (isLDOMguest(nxgep)) { 32206495Sspeer status = NXGE_ERROR; 32216495Sspeer } else { 32226495Sspeer status = nxge_rxdma_fatal_err_recover(nxgep, channel); 32236495Sspeer if (status == NXGE_OK) { 32246495Sspeer FM_SERVICE_RESTORED(nxgep); 32256495Sspeer } 32263859Sml29623 } 32273859Sml29623 } 32283859Sml29623 32293859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 32303859Sml29623 32313859Sml29623 return (status); 32323859Sml29623 } 32333859Sml29623 32346495Sspeer /* 32356495Sspeer * nxge_rdc_hvio_setup 32366495Sspeer * 32376495Sspeer * This code appears to setup some Hypervisor variables. 32386495Sspeer * 32396495Sspeer * Arguments: 32406495Sspeer * nxgep 32416495Sspeer * channel 32426495Sspeer * 32436495Sspeer * Notes: 32446495Sspeer * What does NIU_LP_WORKAROUND mean? 32456495Sspeer * 32466495Sspeer * NPI/NXGE function calls: 32476495Sspeer * na 32486495Sspeer * 32496495Sspeer * Context: 32506495Sspeer * Any domain 32516495Sspeer */ 32526495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 32536495Sspeer static void 32546495Sspeer nxge_rdc_hvio_setup( 32556495Sspeer nxge_t *nxgep, int channel) 32563859Sml29623 { 32576495Sspeer nxge_dma_common_t *dma_common; 32586495Sspeer nxge_dma_common_t *dma_control; 32596495Sspeer rx_rbr_ring_t *ring; 32606495Sspeer 32616495Sspeer ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 32626495Sspeer dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 32636495Sspeer 32646495Sspeer ring->hv_set = B_FALSE; 32656495Sspeer 32666495Sspeer ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 32676495Sspeer dma_common->orig_ioaddr_pp; 32686495Sspeer ring->hv_rx_buf_ioaddr_size = (uint64_t) 32696495Sspeer dma_common->orig_alength; 32706495Sspeer 32716495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 32726495Sspeer "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 32736495Sspeer channel, ring->hv_rx_buf_base_ioaddr_pp, 32746495Sspeer dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 32756495Sspeer dma_common->orig_alength, dma_common->orig_alength)); 32766495Sspeer 32776495Sspeer dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 32786495Sspeer 32796495Sspeer ring->hv_rx_cntl_base_ioaddr_pp = 32806495Sspeer (uint64_t)dma_control->orig_ioaddr_pp; 32816495Sspeer ring->hv_rx_cntl_ioaddr_size = 32826495Sspeer (uint64_t)dma_control->orig_alength; 32836495Sspeer 32846495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 32856495Sspeer "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 32866495Sspeer channel, ring->hv_rx_cntl_base_ioaddr_pp, 32876495Sspeer dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 32886495Sspeer dma_control->orig_alength, dma_control->orig_alength)); 32896495Sspeer } 32903859Sml29623 #endif 32913859Sml29623 32926495Sspeer /* 32936495Sspeer * nxge_map_rxdma 32946495Sspeer * 32956495Sspeer * Map an RDC into our kernel space. 32966495Sspeer * 32976495Sspeer * Arguments: 32986495Sspeer * nxgep 32996495Sspeer * channel The channel to map. 33006495Sspeer * 33016495Sspeer * Notes: 33026495Sspeer * 1. Allocate & initialise a memory pool, if necessary. 33036495Sspeer * 2. Allocate however many receive buffers are required. 33046495Sspeer * 3. Setup buffers, descriptors, and mailbox. 33056495Sspeer * 33066495Sspeer * NPI/NXGE function calls: 33076495Sspeer * nxge_alloc_rx_mem_pool() 33086495Sspeer * nxge_alloc_rbb() 33096495Sspeer * nxge_map_rxdma_channel() 33106495Sspeer * 33116495Sspeer * Registers accessed: 33126495Sspeer * 33136495Sspeer * Context: 33146495Sspeer * Any domain 33156495Sspeer */ 33166495Sspeer static nxge_status_t 33176495Sspeer nxge_map_rxdma(p_nxge_t nxgep, int channel) 33186495Sspeer { 33196495Sspeer nxge_dma_common_t **data; 33206495Sspeer nxge_dma_common_t **control; 33216495Sspeer rx_rbr_ring_t **rbr_ring; 33226495Sspeer rx_rcr_ring_t **rcr_ring; 33236495Sspeer rx_mbox_t **mailbox; 33246495Sspeer uint32_t chunks; 33256495Sspeer 33266495Sspeer nxge_status_t status; 33276495Sspeer 33283859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 33293859Sml29623 33306495Sspeer if (!nxgep->rx_buf_pool_p) { 33316495Sspeer if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 33326495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33336495Sspeer "<== nxge_map_rxdma: buf not allocated")); 33346495Sspeer return (NXGE_ERROR); 33356495Sspeer } 33363859Sml29623 } 33373859Sml29623 33386495Sspeer if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 33396495Sspeer return (NXGE_ERROR); 33403859Sml29623 33413859Sml29623 /* 33426495Sspeer * Map descriptors from the buffer polls for each dma channel. 33436495Sspeer */ 33446495Sspeer 33456495Sspeer /* 33466495Sspeer * Set up and prepare buffer blocks, descriptors 33476495Sspeer * and mailbox. 33483859Sml29623 */ 33496495Sspeer data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 33506495Sspeer rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 33516495Sspeer chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 33526495Sspeer 33536495Sspeer control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 33546495Sspeer rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 33556495Sspeer 33566495Sspeer mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 33576495Sspeer 33586495Sspeer status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 33596495Sspeer chunks, control, rcr_ring, mailbox); 33606495Sspeer if (status != NXGE_OK) { 33616495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33626929Smisaki "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 33636929Smisaki "returned 0x%x", 33646929Smisaki channel, status)); 33656495Sspeer return (status); 33666495Sspeer } 33676495Sspeer nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 33686495Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 33696495Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 33706495Sspeer &nxgep->statsp->rdc_stats[channel]; 33713859Sml29623 33723859Sml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 33736495Sspeer if (!isLDOMguest(nxgep)) 33746495Sspeer nxge_rdc_hvio_setup(nxgep, channel); 33756495Sspeer #endif 33766495Sspeer 33773859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 33786495Sspeer "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 33793859Sml29623 33803859Sml29623 return (status); 33813859Sml29623 } 33823859Sml29623 33833859Sml29623 static void 33846495Sspeer nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 33853859Sml29623 { 33866495Sspeer rx_rbr_ring_t *rbr_ring; 33876495Sspeer rx_rcr_ring_t *rcr_ring; 33886495Sspeer rx_mbox_t *mailbox; 33896495Sspeer 33906495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 33916495Sspeer 33926495Sspeer if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 33936495Sspeer !nxgep->rx_mbox_areas_p) 33943859Sml29623 return; 33956495Sspeer 33966495Sspeer rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 33976495Sspeer rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 33986495Sspeer mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 33996495Sspeer 34006495Sspeer if (!rbr_ring || !rcr_ring || !mailbox) 34013859Sml29623 return; 34026495Sspeer 34036495Sspeer (void) nxge_unmap_rxdma_channel( 34046929Smisaki nxgep, channel, rbr_ring, rcr_ring, mailbox); 34056495Sspeer 34066495Sspeer nxge_free_rxb(nxgep, channel); 34076495Sspeer 34086495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 34093859Sml29623 } 34103859Sml29623 34113859Sml29623 nxge_status_t 34123859Sml29623 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 34133859Sml29623 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 34143859Sml29623 uint32_t num_chunks, 34153859Sml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 34163859Sml29623 p_rx_mbox_t *rx_mbox_p) 34173859Sml29623 { 34183859Sml29623 int status = NXGE_OK; 34193859Sml29623 34203859Sml29623 /* 34213859Sml29623 * Set up and prepare buffer blocks, descriptors 34223859Sml29623 * and mailbox. 34233859Sml29623 */ 34243859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34256929Smisaki "==> nxge_map_rxdma_channel (channel %d)", channel)); 34263859Sml29623 /* 34273859Sml29623 * Receive buffer blocks 34283859Sml29623 */ 34293859Sml29623 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 34306929Smisaki dma_buf_p, rbr_p, num_chunks); 34313859Sml29623 if (status != NXGE_OK) { 34323859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34336929Smisaki "==> nxge_map_rxdma_channel (channel %d): " 34346929Smisaki "map buffer failed 0x%x", channel, status)); 34353859Sml29623 goto nxge_map_rxdma_channel_exit; 34363859Sml29623 } 34373859Sml29623 34383859Sml29623 /* 34393859Sml29623 * Receive block ring, completion ring and mailbox. 34403859Sml29623 */ 34413859Sml29623 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 34426929Smisaki dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 34433859Sml29623 if (status != NXGE_OK) { 34443859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34456929Smisaki "==> nxge_map_rxdma_channel (channel %d): " 34466929Smisaki "map config failed 0x%x", channel, status)); 34473859Sml29623 goto nxge_map_rxdma_channel_fail2; 34483859Sml29623 } 34493859Sml29623 34503859Sml29623 goto nxge_map_rxdma_channel_exit; 34513859Sml29623 34523859Sml29623 nxge_map_rxdma_channel_fail3: 34533859Sml29623 /* Free rbr, rcr */ 34543859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34556929Smisaki "==> nxge_map_rxdma_channel: free rbr/rcr " 34566929Smisaki "(status 0x%x channel %d)", 34576929Smisaki status, channel)); 34583859Sml29623 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 34596929Smisaki *rcr_p, *rx_mbox_p); 34603859Sml29623 34613859Sml29623 nxge_map_rxdma_channel_fail2: 34623859Sml29623 /* Free buffer blocks */ 34633859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34646929Smisaki "==> nxge_map_rxdma_channel: free rx buffers" 34656929Smisaki "(nxgep 0x%x status 0x%x channel %d)", 34666929Smisaki nxgep, status, channel)); 34673859Sml29623 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 34683859Sml29623 34694185Sspeer status = NXGE_ERROR; 34704185Sspeer 34713859Sml29623 nxge_map_rxdma_channel_exit: 34723859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34736929Smisaki "<== nxge_map_rxdma_channel: " 34746929Smisaki "(nxgep 0x%x status 0x%x channel %d)", 34756929Smisaki nxgep, status, channel)); 34763859Sml29623 34773859Sml29623 return (status); 34783859Sml29623 } 34793859Sml29623 34803859Sml29623 /*ARGSUSED*/ 34813859Sml29623 static void 34823859Sml29623 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 34833859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 34843859Sml29623 { 34853859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34866929Smisaki "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 34873859Sml29623 34883859Sml29623 /* 34893859Sml29623 * unmap receive block ring, completion ring and mailbox. 34903859Sml29623 */ 34913859Sml29623 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 34926929Smisaki rcr_p, rx_mbox_p); 34933859Sml29623 34943859Sml29623 /* unmap buffer blocks */ 34953859Sml29623 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 34963859Sml29623 34973859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 34983859Sml29623 } 34993859Sml29623 35003859Sml29623 /*ARGSUSED*/ 35013859Sml29623 static nxge_status_t 35023859Sml29623 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 35033859Sml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 35043859Sml29623 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 35053859Sml29623 { 35063859Sml29623 p_rx_rbr_ring_t rbrp; 35073859Sml29623 p_rx_rcr_ring_t rcrp; 35083859Sml29623 p_rx_mbox_t mboxp; 35093859Sml29623 p_nxge_dma_common_t cntl_dmap; 35103859Sml29623 p_nxge_dma_common_t dmap; 35113859Sml29623 p_rx_msg_t *rx_msg_ring; 35123859Sml29623 p_rx_msg_t rx_msg_p; 35133859Sml29623 p_rbr_cfig_a_t rcfga_p; 35143859Sml29623 p_rbr_cfig_b_t rcfgb_p; 35153859Sml29623 p_rcrcfig_a_t cfga_p; 35163859Sml29623 p_rcrcfig_b_t cfgb_p; 35173859Sml29623 p_rxdma_cfig1_t cfig1_p; 35183859Sml29623 p_rxdma_cfig2_t cfig2_p; 35193859Sml29623 p_rbr_kick_t kick_p; 35203859Sml29623 uint32_t dmaaddrp; 35213859Sml29623 uint32_t *rbr_vaddrp; 35223859Sml29623 uint32_t bkaddr; 35233859Sml29623 nxge_status_t status = NXGE_OK; 35243859Sml29623 int i; 35253859Sml29623 uint32_t nxge_port_rcr_size; 35263859Sml29623 35273859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35286929Smisaki "==> nxge_map_rxdma_channel_cfg_ring")); 35293859Sml29623 35303859Sml29623 cntl_dmap = *dma_cntl_p; 35313859Sml29623 35323859Sml29623 /* Map in the receive block ring */ 35333859Sml29623 rbrp = *rbr_p; 35343859Sml29623 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 35353859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 35363859Sml29623 /* 35373859Sml29623 * Zero out buffer block ring descriptors. 35383859Sml29623 */ 35393859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 35403859Sml29623 35413859Sml29623 rcfga_p = &(rbrp->rbr_cfga); 35423859Sml29623 rcfgb_p = &(rbrp->rbr_cfgb); 35433859Sml29623 kick_p = &(rbrp->rbr_kick); 35443859Sml29623 rcfga_p->value = 0; 35453859Sml29623 rcfgb_p->value = 0; 35463859Sml29623 kick_p->value = 0; 35473859Sml29623 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 35483859Sml29623 rcfga_p->value = (rbrp->rbr_addr & 35496929Smisaki (RBR_CFIG_A_STDADDR_MASK | 35506929Smisaki RBR_CFIG_A_STDADDR_BASE_MASK)); 35513859Sml29623 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 35523859Sml29623 35533859Sml29623 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 35543859Sml29623 rcfgb_p->bits.ldw.vld0 = 1; 35553859Sml29623 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 35563859Sml29623 rcfgb_p->bits.ldw.vld1 = 1; 35573859Sml29623 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 35583859Sml29623 rcfgb_p->bits.ldw.vld2 = 1; 35593859Sml29623 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 35603859Sml29623 35613859Sml29623 /* 35623859Sml29623 * For each buffer block, enter receive block address to the ring. 35633859Sml29623 */ 35643859Sml29623 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 35653859Sml29623 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 35663859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35676929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 35686929Smisaki "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 35693859Sml29623 35703859Sml29623 rx_msg_ring = rbrp->rx_msg_ring; 35713859Sml29623 for (i = 0; i < rbrp->tnblocks; i++) { 35723859Sml29623 rx_msg_p = rx_msg_ring[i]; 35733859Sml29623 rx_msg_p->nxgep = nxgep; 35743859Sml29623 rx_msg_p->rx_rbr_p = rbrp; 35753859Sml29623 bkaddr = (uint32_t) 35766929Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 35776929Smisaki >> RBR_BKADDR_SHIFT)); 35783859Sml29623 rx_msg_p->free = B_FALSE; 35793859Sml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe; 35803859Sml29623 35813859Sml29623 *rbr_vaddrp++ = bkaddr; 35823859Sml29623 } 35833859Sml29623 35843859Sml29623 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 35853859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 35863859Sml29623 35873859Sml29623 rbrp->rbr_rd_index = 0; 35883859Sml29623 35893859Sml29623 rbrp->rbr_consumed = 0; 35903859Sml29623 rbrp->rbr_use_bcopy = B_TRUE; 35913859Sml29623 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 35923859Sml29623 /* 35933859Sml29623 * Do bcopy on packets greater than bcopy size once 35943859Sml29623 * the lo threshold is reached. 35953859Sml29623 * This lo threshold should be less than the hi threshold. 35963859Sml29623 * 35973859Sml29623 * Do bcopy on every packet once the hi threshold is reached. 35983859Sml29623 */ 35993859Sml29623 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 36003859Sml29623 /* default it to use hi */ 36013859Sml29623 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 36023859Sml29623 } 36033859Sml29623 36043859Sml29623 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 36053859Sml29623 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 36063859Sml29623 } 36073859Sml29623 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 36083859Sml29623 36093859Sml29623 switch (nxge_rx_threshold_hi) { 36103859Sml29623 default: 36113859Sml29623 case NXGE_RX_COPY_NONE: 36123859Sml29623 /* Do not do bcopy at all */ 36133859Sml29623 rbrp->rbr_use_bcopy = B_FALSE; 36143859Sml29623 rbrp->rbr_threshold_hi = rbrp->rbb_max; 36153859Sml29623 break; 36163859Sml29623 36173859Sml29623 case NXGE_RX_COPY_1: 36183859Sml29623 case NXGE_RX_COPY_2: 36193859Sml29623 case NXGE_RX_COPY_3: 36203859Sml29623 case NXGE_RX_COPY_4: 36213859Sml29623 case NXGE_RX_COPY_5: 36223859Sml29623 case NXGE_RX_COPY_6: 36233859Sml29623 case NXGE_RX_COPY_7: 36243859Sml29623 rbrp->rbr_threshold_hi = 36256929Smisaki rbrp->rbb_max * 36266929Smisaki (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 36273859Sml29623 break; 36283859Sml29623 36293859Sml29623 case NXGE_RX_COPY_ALL: 36303859Sml29623 rbrp->rbr_threshold_hi = 0; 36313859Sml29623 break; 36323859Sml29623 } 36333859Sml29623 36343859Sml29623 switch (nxge_rx_threshold_lo) { 36353859Sml29623 default: 36363859Sml29623 case NXGE_RX_COPY_NONE: 36373859Sml29623 /* Do not do bcopy at all */ 36383859Sml29623 if (rbrp->rbr_use_bcopy) { 36393859Sml29623 rbrp->rbr_use_bcopy = B_FALSE; 36403859Sml29623 } 36413859Sml29623 rbrp->rbr_threshold_lo = rbrp->rbb_max; 36423859Sml29623 break; 36433859Sml29623 36443859Sml29623 case NXGE_RX_COPY_1: 36453859Sml29623 case NXGE_RX_COPY_2: 36463859Sml29623 case NXGE_RX_COPY_3: 36473859Sml29623 case NXGE_RX_COPY_4: 36483859Sml29623 case NXGE_RX_COPY_5: 36493859Sml29623 case NXGE_RX_COPY_6: 36503859Sml29623 case NXGE_RX_COPY_7: 36513859Sml29623 rbrp->rbr_threshold_lo = 36526929Smisaki rbrp->rbb_max * 36536929Smisaki (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 36543859Sml29623 break; 36553859Sml29623 36563859Sml29623 case NXGE_RX_COPY_ALL: 36573859Sml29623 rbrp->rbr_threshold_lo = 0; 36583859Sml29623 break; 36593859Sml29623 } 36603859Sml29623 36613859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 36626929Smisaki "nxge_map_rxdma_channel_cfg_ring: channel %d " 36636929Smisaki "rbb_max %d " 36646929Smisaki "rbrp->rbr_bufsize_type %d " 36656929Smisaki "rbb_threshold_hi %d " 36666929Smisaki "rbb_threshold_lo %d", 36676929Smisaki dma_channel, 36686929Smisaki rbrp->rbb_max, 36696929Smisaki rbrp->rbr_bufsize_type, 36706929Smisaki rbrp->rbr_threshold_hi, 36716929Smisaki rbrp->rbr_threshold_lo)); 36723859Sml29623 36733859Sml29623 rbrp->page_valid.value = 0; 36743859Sml29623 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 36753859Sml29623 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 36763859Sml29623 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 36773859Sml29623 rbrp->page_hdl.value = 0; 36783859Sml29623 36793859Sml29623 rbrp->page_valid.bits.ldw.page0 = 1; 36803859Sml29623 rbrp->page_valid.bits.ldw.page1 = 1; 36813859Sml29623 36823859Sml29623 /* Map in the receive completion ring */ 36833859Sml29623 rcrp = (p_rx_rcr_ring_t) 36846929Smisaki KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 36853859Sml29623 rcrp->rdc = dma_channel; 36863859Sml29623 36873859Sml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 36883859Sml29623 rcrp->comp_size = nxge_port_rcr_size; 36893859Sml29623 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 36903859Sml29623 36913859Sml29623 rcrp->max_receive_pkts = nxge_max_rx_pkts; 36923859Sml29623 36933859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 36943859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 36956929Smisaki sizeof (rcr_entry_t)); 36963859Sml29623 rcrp->comp_rd_index = 0; 36973859Sml29623 rcrp->comp_wt_index = 0; 36983859Sml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 36996929Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 37005125Sjoycey #if defined(__i386) 37016929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 37026929Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 37035125Sjoycey #else 37046929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 37056929Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 37065125Sjoycey #endif 37073859Sml29623 37083859Sml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 37096929Smisaki (nxge_port_rcr_size - 1); 37103859Sml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 37116929Smisaki (nxge_port_rcr_size - 1); 37123859Sml29623 37133859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37146929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 37156929Smisaki "channel %d " 37166929Smisaki "rbr_vaddrp $%p " 37176929Smisaki "rcr_desc_rd_head_p $%p " 37186929Smisaki "rcr_desc_rd_head_pp $%p " 37196929Smisaki "rcr_desc_rd_last_p $%p " 37206929Smisaki "rcr_desc_rd_last_pp $%p ", 37216929Smisaki dma_channel, 37226929Smisaki rbr_vaddrp, 37236929Smisaki rcrp->rcr_desc_rd_head_p, 37246929Smisaki rcrp->rcr_desc_rd_head_pp, 37256929Smisaki rcrp->rcr_desc_last_p, 37266929Smisaki rcrp->rcr_desc_last_pp)); 37273859Sml29623 37283859Sml29623 /* 37293859Sml29623 * Zero out buffer block ring descriptors. 37303859Sml29623 */ 37313859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 37328661SSantwona.Behera@Sun.COM 37338661SSantwona.Behera@Sun.COM rcrp->intr_timeout = (nxgep->intr_timeout < 37348661SSantwona.Behera@Sun.COM NXGE_RDC_RCR_TIMEOUT_MIN) ? NXGE_RDC_RCR_TIMEOUT_MIN : 37358661SSantwona.Behera@Sun.COM nxgep->intr_timeout; 37368661SSantwona.Behera@Sun.COM 37378661SSantwona.Behera@Sun.COM rcrp->intr_threshold = (nxgep->intr_threshold < 37388661SSantwona.Behera@Sun.COM NXGE_RDC_RCR_THRESHOLD_MIN) ? NXGE_RDC_RCR_THRESHOLD_MIN : 37398661SSantwona.Behera@Sun.COM nxgep->intr_threshold; 37408661SSantwona.Behera@Sun.COM 37413859Sml29623 rcrp->full_hdr_flag = B_FALSE; 37423859Sml29623 rcrp->sw_priv_hdr_len = 0; 37433859Sml29623 37443859Sml29623 cfga_p = &(rcrp->rcr_cfga); 37453859Sml29623 cfgb_p = &(rcrp->rcr_cfgb); 37463859Sml29623 cfga_p->value = 0; 37473859Sml29623 cfgb_p->value = 0; 37483859Sml29623 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 37493859Sml29623 cfga_p->value = (rcrp->rcr_addr & 37506929Smisaki (RCRCFIG_A_STADDR_MASK | 37516929Smisaki RCRCFIG_A_STADDR_BASE_MASK)); 37523859Sml29623 37533859Sml29623 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 37546929Smisaki RCRCFIG_A_LEN_SHIF); 37553859Sml29623 37563859Sml29623 /* 37573859Sml29623 * Timeout should be set based on the system clock divider. 37588661SSantwona.Behera@Sun.COM * A timeout value of 1 assumes that the 37593859Sml29623 * granularity (1000) is 3 microseconds running at 300MHz. 37603859Sml29623 */ 37613859Sml29623 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 37623859Sml29623 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 37633859Sml29623 cfgb_p->bits.ldw.entout = 1; 37643859Sml29623 37653859Sml29623 /* Map in the mailbox */ 37663859Sml29623 mboxp = (p_rx_mbox_t) 37676929Smisaki KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 37683859Sml29623 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 37693859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 37703859Sml29623 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 37713859Sml29623 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 37723859Sml29623 cfig1_p->value = cfig2_p->value = 0; 37733859Sml29623 37743859Sml29623 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 37753859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37766929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 37776929Smisaki "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 37786929Smisaki dma_channel, cfig1_p->value, cfig2_p->value, 37796929Smisaki mboxp->mbox_addr)); 37803859Sml29623 37813859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 37826929Smisaki & 0xfff); 37833859Sml29623 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 37843859Sml29623 37853859Sml29623 37863859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 37873859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 37886929Smisaki RXDMA_CFIG2_MBADDR_L_MASK); 37893859Sml29623 37903859Sml29623 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 37913859Sml29623 37923859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37936929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 37946929Smisaki "channel %d damaddrp $%p " 37956929Smisaki "cfg1 0x%016llx cfig2 0x%016llx", 37966929Smisaki dma_channel, dmaaddrp, 37976929Smisaki cfig1_p->value, cfig2_p->value)); 37983859Sml29623 37993859Sml29623 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 38003859Sml29623 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 38013859Sml29623 38023859Sml29623 rbrp->rx_rcr_p = rcrp; 38033859Sml29623 rcrp->rx_rbr_p = rbrp; 38043859Sml29623 *rcr_p = rcrp; 38053859Sml29623 *rx_mbox_p = mboxp; 38063859Sml29623 38073859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38086929Smisaki "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 38093859Sml29623 38103859Sml29623 return (status); 38113859Sml29623 } 38123859Sml29623 38133859Sml29623 /*ARGSUSED*/ 38143859Sml29623 static void 38153859Sml29623 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 38163859Sml29623 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 38173859Sml29623 { 38183859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38196929Smisaki "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 38206929Smisaki rcr_p->rdc)); 38213859Sml29623 38223859Sml29623 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 38233859Sml29623 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 38243859Sml29623 38253859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38266929Smisaki "<== nxge_unmap_rxdma_channel_cfg_ring")); 38273859Sml29623 } 38283859Sml29623 38293859Sml29623 static nxge_status_t 38303859Sml29623 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 38313859Sml29623 p_nxge_dma_common_t *dma_buf_p, 38323859Sml29623 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 38333859Sml29623 { 38343859Sml29623 p_rx_rbr_ring_t rbrp; 38353859Sml29623 p_nxge_dma_common_t dma_bufp, tmp_bufp; 38363859Sml29623 p_rx_msg_t *rx_msg_ring; 38373859Sml29623 p_rx_msg_t rx_msg_p; 38383859Sml29623 p_mblk_t mblk_p; 38393859Sml29623 38403859Sml29623 rxring_info_t *ring_info; 38413859Sml29623 nxge_status_t status = NXGE_OK; 38423859Sml29623 int i, j, index; 38433859Sml29623 uint32_t size, bsize, nblocks, nmsgs; 38443859Sml29623 38453859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38466929Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d", 38476929Smisaki channel)); 38483859Sml29623 38493859Sml29623 dma_bufp = tmp_bufp = *dma_buf_p; 38503859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38516929Smisaki " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 38526929Smisaki "chunks bufp 0x%016llx", 38536929Smisaki channel, num_chunks, dma_bufp)); 38543859Sml29623 38553859Sml29623 nmsgs = 0; 38563859Sml29623 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 38573859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38586929Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d " 38596929Smisaki "bufp 0x%016llx nblocks %d nmsgs %d", 38606929Smisaki channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 38613859Sml29623 nmsgs += tmp_bufp->nblocks; 38623859Sml29623 } 38633859Sml29623 if (!nmsgs) { 38644185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 38656929Smisaki "<== nxge_map_rxdma_channel_buf_ring: channel %d " 38666929Smisaki "no msg blocks", 38676929Smisaki channel)); 38683859Sml29623 status = NXGE_ERROR; 38693859Sml29623 goto nxge_map_rxdma_channel_buf_ring_exit; 38703859Sml29623 } 38713859Sml29623 38725170Stm144005 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 38733859Sml29623 38743859Sml29623 size = nmsgs * sizeof (p_rx_msg_t); 38753859Sml29623 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 38763859Sml29623 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 38776929Smisaki KM_SLEEP); 38783859Sml29623 38793859Sml29623 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 38806929Smisaki (void *)nxgep->interrupt_cookie); 38813859Sml29623 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 38826929Smisaki (void *)nxgep->interrupt_cookie); 38833859Sml29623 rbrp->rdc = channel; 38843859Sml29623 rbrp->num_blocks = num_chunks; 38853859Sml29623 rbrp->tnblocks = nmsgs; 38863859Sml29623 rbrp->rbb_max = nmsgs; 38873859Sml29623 rbrp->rbr_max_size = nmsgs; 38883859Sml29623 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 38893859Sml29623 38903859Sml29623 /* 38913859Sml29623 * Buffer sizes suggested by NIU architect. 38923859Sml29623 * 256, 512 and 2K. 38933859Sml29623 */ 38943859Sml29623 38953859Sml29623 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 38963859Sml29623 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 38973859Sml29623 rbrp->npi_pkt_buf_size0 = SIZE_256B; 38983859Sml29623 38993859Sml29623 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 39003859Sml29623 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 39013859Sml29623 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 39023859Sml29623 39033859Sml29623 rbrp->block_size = nxgep->rx_default_block_size; 39043859Sml29623 39053859Sml29623 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 39063859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 39073859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 39083859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 39093859Sml29623 } else { 39103859Sml29623 if (rbrp->block_size >= 0x2000) { 39113859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 39123859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 39133859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 39143859Sml29623 } else { 39153859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 39163859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 39173859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 39183859Sml29623 } 39193859Sml29623 } 39203859Sml29623 39213859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39226929Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d " 39236929Smisaki "actual rbr max %d rbb_max %d nmsgs %d " 39246929Smisaki "rbrp->block_size %d default_block_size %d " 39256929Smisaki "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 39266929Smisaki channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 39276929Smisaki rbrp->block_size, nxgep->rx_default_block_size, 39286929Smisaki nxge_rbr_size, nxge_rbr_spare_size)); 39293859Sml29623 39303859Sml29623 /* Map in buffers from the buffer pool. */ 39313859Sml29623 index = 0; 39323859Sml29623 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 39333859Sml29623 bsize = dma_bufp->block_size; 39343859Sml29623 nblocks = dma_bufp->nblocks; 39355125Sjoycey #if defined(__i386) 39365125Sjoycey ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 39375125Sjoycey #else 39383859Sml29623 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 39395125Sjoycey #endif 39403859Sml29623 ring_info->buffer[i].buf_index = i; 39413859Sml29623 ring_info->buffer[i].buf_size = dma_bufp->alength; 39423859Sml29623 ring_info->buffer[i].start_index = index; 39435125Sjoycey #if defined(__i386) 39445125Sjoycey ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 39455125Sjoycey #else 39463859Sml29623 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 39475125Sjoycey #endif 39483859Sml29623 39493859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39506929Smisaki " nxge_map_rxdma_channel_buf_ring: map channel %d " 39516929Smisaki "chunk %d" 39526929Smisaki " nblocks %d chunk_size %x block_size 0x%x " 39536929Smisaki "dma_bufp $%p", channel, i, 39546929Smisaki dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 39556929Smisaki dma_bufp)); 39563859Sml29623 39573859Sml29623 for (j = 0; j < nblocks; j++) { 39583859Sml29623 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 39596929Smisaki dma_bufp)) == NULL) { 39604185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 39616929Smisaki "allocb failed (index %d i %d j %d)", 39626929Smisaki index, i, j)); 39634185Sspeer goto nxge_map_rxdma_channel_buf_ring_fail1; 39643859Sml29623 } 39653859Sml29623 rx_msg_ring[index] = rx_msg_p; 39663859Sml29623 rx_msg_p->block_index = index; 39673859Sml29623 rx_msg_p->shifted_addr = (uint32_t) 39686929Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 39696929Smisaki RBR_BKADDR_SHIFT)); 39703859Sml29623 39713859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39726929Smisaki "index %d j %d rx_msg_p $%p mblk %p", 39736929Smisaki index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 39743859Sml29623 39753859Sml29623 mblk_p = rx_msg_p->rx_mblk_p; 39763859Sml29623 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 39775170Stm144005 39785170Stm144005 rbrp->rbr_ref_cnt++; 39793859Sml29623 index++; 39803859Sml29623 rx_msg_p->buf_dma.dma_channel = channel; 39813859Sml29623 } 39826495Sspeer 39836495Sspeer rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 39846495Sspeer if (dma_bufp->contig_alloc_type) { 39856495Sspeer rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 39866495Sspeer } 39876495Sspeer 39886495Sspeer if (dma_bufp->kmem_alloc_type) { 39896495Sspeer rbrp->rbr_alloc_type = KMEM_ALLOC; 39906495Sspeer } 39916495Sspeer 39926495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39936495Sspeer " nxge_map_rxdma_channel_buf_ring: map channel %d " 39946495Sspeer "chunk %d" 39956495Sspeer " nblocks %d chunk_size %x block_size 0x%x " 39966495Sspeer "dma_bufp $%p", 39976495Sspeer channel, i, 39986495Sspeer dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 39996495Sspeer dma_bufp)); 40003859Sml29623 } 40013859Sml29623 if (i < rbrp->num_blocks) { 40023859Sml29623 goto nxge_map_rxdma_channel_buf_ring_fail1; 40033859Sml29623 } 40043859Sml29623 40053859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40066929Smisaki "nxge_map_rxdma_channel_buf_ring: done buf init " 40076929Smisaki "channel %d msg block entries %d", 40086929Smisaki channel, index)); 40093859Sml29623 ring_info->block_size_mask = bsize - 1; 40103859Sml29623 rbrp->rx_msg_ring = rx_msg_ring; 40113859Sml29623 rbrp->dma_bufp = dma_buf_p; 40123859Sml29623 rbrp->ring_info = ring_info; 40133859Sml29623 40143859Sml29623 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 40153859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40166929Smisaki " nxge_map_rxdma_channel_buf_ring: " 40176929Smisaki "channel %d done buf info init", channel)); 40183859Sml29623 40195170Stm144005 /* 40205170Stm144005 * Finally, permit nxge_freeb() to call nxge_post_page(). 40215170Stm144005 */ 40225170Stm144005 rbrp->rbr_state = RBR_POSTING; 40235170Stm144005 40243859Sml29623 *rbr_p = rbrp; 40253859Sml29623 goto nxge_map_rxdma_channel_buf_ring_exit; 40263859Sml29623 40273859Sml29623 nxge_map_rxdma_channel_buf_ring_fail1: 40283859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40296929Smisaki " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 40306929Smisaki channel, status)); 40313859Sml29623 40323859Sml29623 index--; 40333859Sml29623 for (; index >= 0; index--) { 40343859Sml29623 rx_msg_p = rx_msg_ring[index]; 40353859Sml29623 if (rx_msg_p != NULL) { 40363859Sml29623 freeb(rx_msg_p->rx_mblk_p); 40373859Sml29623 rx_msg_ring[index] = NULL; 40383859Sml29623 } 40393859Sml29623 } 40403859Sml29623 nxge_map_rxdma_channel_buf_ring_fail: 40413859Sml29623 MUTEX_DESTROY(&rbrp->post_lock); 40423859Sml29623 MUTEX_DESTROY(&rbrp->lock); 40433859Sml29623 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 40443859Sml29623 KMEM_FREE(rx_msg_ring, size); 40453859Sml29623 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 40463859Sml29623 40474185Sspeer status = NXGE_ERROR; 40484185Sspeer 40493859Sml29623 nxge_map_rxdma_channel_buf_ring_exit: 40503859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40516929Smisaki "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 40523859Sml29623 40533859Sml29623 return (status); 40543859Sml29623 } 40553859Sml29623 40563859Sml29623 /*ARGSUSED*/ 40573859Sml29623 static void 40583859Sml29623 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 40593859Sml29623 p_rx_rbr_ring_t rbr_p) 40603859Sml29623 { 40613859Sml29623 p_rx_msg_t *rx_msg_ring; 40623859Sml29623 p_rx_msg_t rx_msg_p; 40633859Sml29623 rxring_info_t *ring_info; 40643859Sml29623 int i; 40653859Sml29623 uint32_t size; 40663859Sml29623 #ifdef NXGE_DEBUG 40673859Sml29623 int num_chunks; 40683859Sml29623 #endif 40693859Sml29623 40703859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40716929Smisaki "==> nxge_unmap_rxdma_channel_buf_ring")); 40723859Sml29623 if (rbr_p == NULL) { 40733859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 40746929Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 40753859Sml29623 return; 40763859Sml29623 } 40773859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40786929Smisaki "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 40796929Smisaki rbr_p->rdc)); 40803859Sml29623 40813859Sml29623 rx_msg_ring = rbr_p->rx_msg_ring; 40823859Sml29623 ring_info = rbr_p->ring_info; 40833859Sml29623 40843859Sml29623 if (rx_msg_ring == NULL || ring_info == NULL) { 40856929Smisaki NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40866929Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: " 40876929Smisaki "rx_msg_ring $%p ring_info $%p", 40886929Smisaki rx_msg_p, ring_info)); 40893859Sml29623 return; 40903859Sml29623 } 40913859Sml29623 40923859Sml29623 #ifdef NXGE_DEBUG 40933859Sml29623 num_chunks = rbr_p->num_blocks; 40943859Sml29623 #endif 40953859Sml29623 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 40963859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40976929Smisaki " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 40986929Smisaki "tnblocks %d (max %d) size ptrs %d ", 40996929Smisaki rbr_p->rdc, num_chunks, 41006929Smisaki rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 41013859Sml29623 41023859Sml29623 for (i = 0; i < rbr_p->tnblocks; i++) { 41033859Sml29623 rx_msg_p = rx_msg_ring[i]; 41043859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 41056929Smisaki " nxge_unmap_rxdma_channel_buf_ring: " 41066929Smisaki "rx_msg_p $%p", 41076929Smisaki rx_msg_p)); 41083859Sml29623 if (rx_msg_p != NULL) { 41093859Sml29623 freeb(rx_msg_p->rx_mblk_p); 41103859Sml29623 rx_msg_ring[i] = NULL; 41113859Sml29623 } 41123859Sml29623 } 41133859Sml29623 41145170Stm144005 /* 41155170Stm144005 * We no longer may use the mutex <post_lock>. By setting 41165170Stm144005 * <rbr_state> to anything but POSTING, we prevent 41175170Stm144005 * nxge_post_page() from accessing a dead mutex. 41185170Stm144005 */ 41195170Stm144005 rbr_p->rbr_state = RBR_UNMAPPING; 41203859Sml29623 MUTEX_DESTROY(&rbr_p->post_lock); 41215170Stm144005 41223859Sml29623 MUTEX_DESTROY(&rbr_p->lock); 41235170Stm144005 41245170Stm144005 if (rbr_p->rbr_ref_cnt == 0) { 41256495Sspeer /* 41266495Sspeer * This is the normal state of affairs. 41276495Sspeer * Need to free the following buffers: 41286495Sspeer * - data buffers 41296495Sspeer * - rx_msg ring 41306495Sspeer * - ring_info 41316495Sspeer * - rbr ring 41326495Sspeer */ 41336495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 41346495Sspeer "unmap_rxdma_buf_ring: No outstanding - freeing ")); 41356495Sspeer nxge_rxdma_databuf_free(rbr_p); 41366495Sspeer KMEM_FREE(ring_info, sizeof (rxring_info_t)); 41376495Sspeer KMEM_FREE(rx_msg_ring, size); 41385170Stm144005 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 41395170Stm144005 } else { 41405170Stm144005 /* 41415170Stm144005 * Some of our buffers are still being used. 41425170Stm144005 * Therefore, tell nxge_freeb() this ring is 41435170Stm144005 * unmapped, so it may free <rbr_p> for us. 41445170Stm144005 */ 41455170Stm144005 rbr_p->rbr_state = RBR_UNMAPPED; 41465170Stm144005 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 41475170Stm144005 "unmap_rxdma_buf_ring: %d %s outstanding.", 41485170Stm144005 rbr_p->rbr_ref_cnt, 41495170Stm144005 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 41505170Stm144005 } 41513859Sml29623 41523859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 41536929Smisaki "<== nxge_unmap_rxdma_channel_buf_ring")); 41543859Sml29623 } 41553859Sml29623 41566495Sspeer /* 41576495Sspeer * nxge_rxdma_hw_start_common 41586495Sspeer * 41596495Sspeer * Arguments: 41606495Sspeer * nxgep 41616495Sspeer * 41626495Sspeer * Notes: 41636495Sspeer * 41646495Sspeer * NPI/NXGE function calls: 41656495Sspeer * nxge_init_fzc_rx_common(); 41666495Sspeer * nxge_init_fzc_rxdma_port(); 41676495Sspeer * 41686495Sspeer * Registers accessed: 41696495Sspeer * 41706495Sspeer * Context: 41716495Sspeer * Service domain 41726495Sspeer */ 41733859Sml29623 static nxge_status_t 41743859Sml29623 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 41753859Sml29623 { 41763859Sml29623 nxge_status_t status = NXGE_OK; 41773859Sml29623 41783859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 41793859Sml29623 41803859Sml29623 /* 41813859Sml29623 * Load the sharable parameters by writing to the 41823859Sml29623 * function zero control registers. These FZC registers 41833859Sml29623 * should be initialized only once for the entire chip. 41843859Sml29623 */ 41853859Sml29623 (void) nxge_init_fzc_rx_common(nxgep); 41863859Sml29623 41873859Sml29623 /* 41883859Sml29623 * Initialize the RXDMA port specific FZC control configurations. 41893859Sml29623 * These FZC registers are pertaining to each port. 41903859Sml29623 */ 41913859Sml29623 (void) nxge_init_fzc_rxdma_port(nxgep); 41923859Sml29623 41933859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 41943859Sml29623 41953859Sml29623 return (status); 41963859Sml29623 } 41973859Sml29623 41983859Sml29623 static nxge_status_t 41996495Sspeer nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 42003859Sml29623 { 42013859Sml29623 int i, ndmas; 42023859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 42033859Sml29623 p_rx_rbr_ring_t *rbr_rings; 42043859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 42053859Sml29623 p_rx_rcr_ring_t *rcr_rings; 42063859Sml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 42073859Sml29623 p_rx_mbox_t *rx_mbox_p; 42083859Sml29623 nxge_status_t status = NXGE_OK; 42093859Sml29623 42103859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 42113859Sml29623 42123859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 42133859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 42143859Sml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 42153859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 42166929Smisaki "<== nxge_rxdma_hw_start: NULL ring pointers")); 42173859Sml29623 return (NXGE_ERROR); 42183859Sml29623 } 42193859Sml29623 ndmas = rx_rbr_rings->ndmas; 42203859Sml29623 if (ndmas == 0) { 42213859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 42226929Smisaki "<== nxge_rxdma_hw_start: no dma channel allocated")); 42233859Sml29623 return (NXGE_ERROR); 42243859Sml29623 } 42253859Sml29623 42263859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42276929Smisaki "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 42283859Sml29623 42293859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 42303859Sml29623 rcr_rings = rx_rcr_rings->rcr_rings; 42313859Sml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 42323859Sml29623 if (rx_mbox_areas_p) { 42333859Sml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 42343859Sml29623 } 42353859Sml29623 42366495Sspeer i = channel; 42376495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42386929Smisaki "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 42396929Smisaki ndmas, channel)); 42406495Sspeer status = nxge_rxdma_start_channel(nxgep, channel, 42416495Sspeer (p_rx_rbr_ring_t)rbr_rings[i], 42426495Sspeer (p_rx_rcr_ring_t)rcr_rings[i], 42436495Sspeer (p_rx_mbox_t)rx_mbox_p[i]); 42446495Sspeer if (status != NXGE_OK) { 42456495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 42466495Sspeer "==> nxge_rxdma_hw_start: disable " 42476495Sspeer "(status 0x%x channel %d)", status, channel)); 42486495Sspeer return (status); 42493859Sml29623 } 42503859Sml29623 42513859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 42526929Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx", 42536929Smisaki rx_rbr_rings, rx_rcr_rings)); 42543859Sml29623 42553859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42566929Smisaki "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 42573859Sml29623 42583859Sml29623 return (status); 42593859Sml29623 } 42603859Sml29623 42613859Sml29623 static void 42626495Sspeer nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 42633859Sml29623 { 42643859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 42653859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 42663859Sml29623 42673859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 42683859Sml29623 42693859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 42703859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 42713859Sml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 42723859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 42736929Smisaki "<== nxge_rxdma_hw_stop: NULL ring pointers")); 42743859Sml29623 return; 42753859Sml29623 } 42763859Sml29623 42773859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42786929Smisaki "==> nxge_rxdma_hw_stop(channel %d)", 42796929Smisaki channel)); 42806495Sspeer (void) nxge_rxdma_stop_channel(nxgep, channel); 42813859Sml29623 42823859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 42836929Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx", 42846929Smisaki rx_rbr_rings, rx_rcr_rings)); 42853859Sml29623 42863859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 42873859Sml29623 } 42883859Sml29623 42893859Sml29623 42903859Sml29623 static nxge_status_t 42913859Sml29623 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 42923859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 42933859Sml29623 42943859Sml29623 { 42953859Sml29623 npi_handle_t handle; 42963859Sml29623 npi_status_t rs = NPI_SUCCESS; 42973859Sml29623 rx_dma_ctl_stat_t cs; 42983859Sml29623 rx_dma_ent_msk_t ent_mask; 42993859Sml29623 nxge_status_t status = NXGE_OK; 43003859Sml29623 43013859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 43023859Sml29623 43033859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 43043859Sml29623 43053859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 43063859Sml29623 "npi handle addr $%p acc $%p", 43073859Sml29623 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 43083859Sml29623 43096495Sspeer /* Reset RXDMA channel, but not if you're a guest. */ 43106495Sspeer if (!isLDOMguest(nxgep)) { 43116495Sspeer rs = npi_rxdma_cfg_rdc_reset(handle, channel); 43126495Sspeer if (rs != NPI_SUCCESS) { 43136495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43146495Sspeer "==> nxge_init_fzc_rdc: " 43156495Sspeer "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 43166495Sspeer channel, rs)); 43176495Sspeer return (NXGE_ERROR | rs); 43186495Sspeer } 43196495Sspeer 43206495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 43216495Sspeer "==> nxge_rxdma_start_channel: reset done: channel %d", 43226495Sspeer channel)); 43233859Sml29623 } 43243859Sml29623 43256495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 43266495Sspeer if (isLDOMguest(nxgep)) 43276495Sspeer (void) nxge_rdc_lp_conf(nxgep, channel); 43286495Sspeer #endif 43293859Sml29623 43303859Sml29623 /* 43313859Sml29623 * Initialize the RXDMA channel specific FZC control 43323859Sml29623 * configurations. These FZC registers are pertaining 43333859Sml29623 * to each RX channel (logical pages). 43343859Sml29623 */ 43356495Sspeer if (!isLDOMguest(nxgep)) { 43366495Sspeer status = nxge_init_fzc_rxdma_channel(nxgep, channel); 43376495Sspeer if (status != NXGE_OK) { 43386495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43396495Sspeer "==> nxge_rxdma_start_channel: " 43406495Sspeer "init fzc rxdma failed (0x%08x channel %d)", 43416495Sspeer status, channel)); 43426495Sspeer return (status); 43436495Sspeer } 43446495Sspeer 43456495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 43466495Sspeer "==> nxge_rxdma_start_channel: fzc done")); 43473859Sml29623 } 43483859Sml29623 43493859Sml29623 /* Set up the interrupt event masks. */ 43503859Sml29623 ent_mask.value = 0; 43513859Sml29623 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 43523859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 43536495Sspeer &ent_mask); 43543859Sml29623 if (rs != NPI_SUCCESS) { 43553859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43563859Sml29623 "==> nxge_rxdma_start_channel: " 43576495Sspeer "init rxdma event masks failed " 43586495Sspeer "(0x%08x channel %d)", 43593859Sml29623 status, channel)); 43603859Sml29623 return (NXGE_ERROR | rs); 43613859Sml29623 } 43623859Sml29623 43636495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 43646495Sspeer "==> nxge_rxdma_start_channel: " 43653859Sml29623 "event done: channel %d (mask 0x%016llx)", 43663859Sml29623 channel, ent_mask.value)); 43673859Sml29623 43683859Sml29623 /* Initialize the receive DMA control and status register */ 43693859Sml29623 cs.value = 0; 43703859Sml29623 cs.bits.hdw.mex = 1; 43713859Sml29623 cs.bits.hdw.rcrthres = 1; 43723859Sml29623 cs.bits.hdw.rcrto = 1; 43733859Sml29623 cs.bits.hdw.rbr_empty = 1; 43743859Sml29623 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 43753859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 43763859Sml29623 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 43773859Sml29623 if (status != NXGE_OK) { 43783859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43793859Sml29623 "==> nxge_rxdma_start_channel: " 43803859Sml29623 "init rxdma control register failed (0x%08x channel %d", 43813859Sml29623 status, channel)); 43823859Sml29623 return (status); 43833859Sml29623 } 43843859Sml29623 43853859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 43863859Sml29623 "control done - channel %d cs 0x%016llx", channel, cs.value)); 43873859Sml29623 43883859Sml29623 /* 43893859Sml29623 * Load RXDMA descriptors, buffers, mailbox, 43903859Sml29623 * initialise the receive DMA channels and 43913859Sml29623 * enable each DMA channel. 43923859Sml29623 */ 43933859Sml29623 status = nxge_enable_rxdma_channel(nxgep, 43946495Sspeer channel, rbr_p, rcr_p, mbox_p); 43953859Sml29623 43963859Sml29623 if (status != NXGE_OK) { 43973859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43986495Sspeer " nxge_rxdma_start_channel: " 43996495Sspeer " enable rxdma failed (0x%08x channel %d)", 44006495Sspeer status, channel)); 44016495Sspeer return (status); 44026495Sspeer } 44036495Sspeer 44046495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 44056495Sspeer "==> nxge_rxdma_start_channel: enabled channel %d")); 44066495Sspeer 44076495Sspeer if (isLDOMguest(nxgep)) { 44086495Sspeer /* Add interrupt handler for this channel. */ 44096495Sspeer if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 44106495Sspeer != NXGE_OK) { 44116495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44123859Sml29623 " nxge_rxdma_start_channel: " 44136495Sspeer " nxge_hio_intr_add failed (0x%08x channel %d)", 44146495Sspeer status, channel)); 44156495Sspeer } 44163859Sml29623 } 44173859Sml29623 44183859Sml29623 ent_mask.value = 0; 44193859Sml29623 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 44203859Sml29623 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 44213859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 44223859Sml29623 &ent_mask); 44233859Sml29623 if (rs != NPI_SUCCESS) { 44243859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 44253859Sml29623 "==> nxge_rxdma_start_channel: " 44263859Sml29623 "init rxdma event masks failed (0x%08x channel %d)", 44273859Sml29623 status, channel)); 44283859Sml29623 return (NXGE_ERROR | rs); 44293859Sml29623 } 44303859Sml29623 44313859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 44323859Sml29623 "control done - channel %d cs 0x%016llx", channel, cs.value)); 44333859Sml29623 44343859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 44353859Sml29623 44363859Sml29623 return (NXGE_OK); 44373859Sml29623 } 44383859Sml29623 44393859Sml29623 static nxge_status_t 44403859Sml29623 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 44413859Sml29623 { 44423859Sml29623 npi_handle_t handle; 44433859Sml29623 npi_status_t rs = NPI_SUCCESS; 44443859Sml29623 rx_dma_ctl_stat_t cs; 44453859Sml29623 rx_dma_ent_msk_t ent_mask; 44463859Sml29623 nxge_status_t status = NXGE_OK; 44473859Sml29623 44483859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 44493859Sml29623 44503859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 44513859Sml29623 44523859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 44536929Smisaki "npi handle addr $%p acc $%p", 44546929Smisaki nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 44553859Sml29623 44567812SMichael.Speer@Sun.COM if (!isLDOMguest(nxgep)) { 44577812SMichael.Speer@Sun.COM /* 44587812SMichael.Speer@Sun.COM * Stop RxMAC = A.9.2.6 44597812SMichael.Speer@Sun.COM */ 44607812SMichael.Speer@Sun.COM if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 44617812SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44627812SMichael.Speer@Sun.COM "nxge_rxdma_stop_channel: " 44637812SMichael.Speer@Sun.COM "Failed to disable RxMAC")); 44647812SMichael.Speer@Sun.COM } 44657812SMichael.Speer@Sun.COM 44667812SMichael.Speer@Sun.COM /* 44677812SMichael.Speer@Sun.COM * Drain IPP Port = A.9.3.6 44687812SMichael.Speer@Sun.COM */ 44697812SMichael.Speer@Sun.COM (void) nxge_ipp_drain(nxgep); 44707812SMichael.Speer@Sun.COM } 44717812SMichael.Speer@Sun.COM 44723859Sml29623 /* Reset RXDMA channel */ 44733859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 44743859Sml29623 if (rs != NPI_SUCCESS) { 44753859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44766929Smisaki " nxge_rxdma_stop_channel: " 44776929Smisaki " reset rxdma failed (0x%08x channel %d)", 44786929Smisaki rs, channel)); 44793859Sml29623 return (NXGE_ERROR | rs); 44803859Sml29623 } 44813859Sml29623 44823859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 44836929Smisaki "==> nxge_rxdma_stop_channel: reset done")); 44843859Sml29623 44853859Sml29623 /* Set up the interrupt event masks. */ 44863859Sml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL; 44873859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 44886929Smisaki &ent_mask); 44893859Sml29623 if (rs != NPI_SUCCESS) { 44903859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44916929Smisaki "==> nxge_rxdma_stop_channel: " 44926929Smisaki "set rxdma event masks failed (0x%08x channel %d)", 44936929Smisaki rs, channel)); 44943859Sml29623 return (NXGE_ERROR | rs); 44953859Sml29623 } 44963859Sml29623 44973859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 44986929Smisaki "==> nxge_rxdma_stop_channel: event done")); 44993859Sml29623 45007812SMichael.Speer@Sun.COM /* 45017812SMichael.Speer@Sun.COM * Initialize the receive DMA control and status register 45027812SMichael.Speer@Sun.COM */ 45033859Sml29623 cs.value = 0; 45047812SMichael.Speer@Sun.COM status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 45053859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 45066929Smisaki " to default (all 0s) 0x%08x", cs.value)); 45073859Sml29623 if (status != NXGE_OK) { 45083859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45096929Smisaki " nxge_rxdma_stop_channel: init rxdma" 45106929Smisaki " control register failed (0x%08x channel %d", 45116929Smisaki status, channel)); 45123859Sml29623 return (status); 45133859Sml29623 } 45143859Sml29623 45153859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 45166929Smisaki "==> nxge_rxdma_stop_channel: control done")); 45173859Sml29623 45187812SMichael.Speer@Sun.COM /* 45197812SMichael.Speer@Sun.COM * Make sure channel is disabled. 45207812SMichael.Speer@Sun.COM */ 45213859Sml29623 status = nxge_disable_rxdma_channel(nxgep, channel); 45228275SEric Cheng 45233859Sml29623 if (status != NXGE_OK) { 45243859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45256929Smisaki " nxge_rxdma_stop_channel: " 45266929Smisaki " init enable rxdma failed (0x%08x channel %d)", 45276929Smisaki status, channel)); 45283859Sml29623 return (status); 45293859Sml29623 } 45303859Sml29623 45317812SMichael.Speer@Sun.COM if (!isLDOMguest(nxgep)) { 45327812SMichael.Speer@Sun.COM /* 45337812SMichael.Speer@Sun.COM * Enable RxMAC = A.9.2.10 45347812SMichael.Speer@Sun.COM */ 45357812SMichael.Speer@Sun.COM if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 45367812SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45377812SMichael.Speer@Sun.COM "nxge_rxdma_stop_channel: Rx MAC still disabled")); 45387812SMichael.Speer@Sun.COM } 45397812SMichael.Speer@Sun.COM } 45407812SMichael.Speer@Sun.COM 45413859Sml29623 NXGE_DEBUG_MSG((nxgep, 45426929Smisaki RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 45433859Sml29623 45443859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 45453859Sml29623 45463859Sml29623 return (NXGE_OK); 45473859Sml29623 } 45483859Sml29623 45493859Sml29623 nxge_status_t 45503859Sml29623 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 45513859Sml29623 { 45523859Sml29623 npi_handle_t handle; 45533859Sml29623 p_nxge_rdc_sys_stats_t statsp; 45543859Sml29623 rx_ctl_dat_fifo_stat_t stat; 45553859Sml29623 uint32_t zcp_err_status; 45563859Sml29623 uint32_t ipp_err_status; 45573859Sml29623 nxge_status_t status = NXGE_OK; 45583859Sml29623 npi_status_t rs = NPI_SUCCESS; 45593859Sml29623 boolean_t my_err = B_FALSE; 45603859Sml29623 45613859Sml29623 handle = nxgep->npi_handle; 45623859Sml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 45633859Sml29623 45643859Sml29623 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 45653859Sml29623 45663859Sml29623 if (rs != NPI_SUCCESS) 45673859Sml29623 return (NXGE_ERROR | rs); 45683859Sml29623 45693859Sml29623 if (stat.bits.ldw.id_mismatch) { 45703859Sml29623 statsp->id_mismatch++; 45713859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 45726929Smisaki NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 45733859Sml29623 /* Global fatal error encountered */ 45743859Sml29623 } 45753859Sml29623 45763859Sml29623 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 45773859Sml29623 switch (nxgep->mac.portnum) { 45783859Sml29623 case 0: 45793859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 45806929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 45813859Sml29623 my_err = B_TRUE; 45823859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45833859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45843859Sml29623 } 45853859Sml29623 break; 45863859Sml29623 case 1: 45873859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 45886929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 45893859Sml29623 my_err = B_TRUE; 45903859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45913859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45923859Sml29623 } 45933859Sml29623 break; 45943859Sml29623 case 2: 45953859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 45966929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 45973859Sml29623 my_err = B_TRUE; 45983859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45993859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 46003859Sml29623 } 46013859Sml29623 break; 46023859Sml29623 case 3: 46033859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 46046929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 46053859Sml29623 my_err = B_TRUE; 46063859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 46073859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 46083859Sml29623 } 46093859Sml29623 break; 46103859Sml29623 default: 46113859Sml29623 return (NXGE_ERROR); 46123859Sml29623 } 46133859Sml29623 } 46143859Sml29623 46153859Sml29623 if (my_err) { 46163859Sml29623 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 46176929Smisaki zcp_err_status); 46183859Sml29623 if (status != NXGE_OK) 46193859Sml29623 return (status); 46203859Sml29623 } 46213859Sml29623 46223859Sml29623 return (NXGE_OK); 46233859Sml29623 } 46243859Sml29623 46253859Sml29623 static nxge_status_t 46263859Sml29623 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 46273859Sml29623 uint32_t zcp_status) 46283859Sml29623 { 46293859Sml29623 boolean_t rxport_fatal = B_FALSE; 46303859Sml29623 p_nxge_rdc_sys_stats_t statsp; 46313859Sml29623 nxge_status_t status = NXGE_OK; 46323859Sml29623 uint8_t portn; 46333859Sml29623 46343859Sml29623 portn = nxgep->mac.portnum; 46353859Sml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 46363859Sml29623 46373859Sml29623 if (ipp_status & (0x1 << portn)) { 46383859Sml29623 statsp->ipp_eop_err++; 46393859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 46406929Smisaki NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 46413859Sml29623 rxport_fatal = B_TRUE; 46423859Sml29623 } 46433859Sml29623 46443859Sml29623 if (zcp_status & (0x1 << portn)) { 46453859Sml29623 statsp->zcp_eop_err++; 46463859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 46476929Smisaki NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 46483859Sml29623 rxport_fatal = B_TRUE; 46493859Sml29623 } 46503859Sml29623 46513859Sml29623 if (rxport_fatal) { 46523859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46536929Smisaki " nxge_rxdma_handle_port_error: " 46546929Smisaki " fatal error on Port #%d\n", 46556929Smisaki portn)); 46563859Sml29623 status = nxge_rx_port_fatal_err_recover(nxgep); 46573859Sml29623 if (status == NXGE_OK) { 46583859Sml29623 FM_SERVICE_RESTORED(nxgep); 46593859Sml29623 } 46603859Sml29623 } 46613859Sml29623 46623859Sml29623 return (status); 46633859Sml29623 } 46643859Sml29623 46653859Sml29623 static nxge_status_t 46663859Sml29623 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 46673859Sml29623 { 46683859Sml29623 npi_handle_t handle; 46693859Sml29623 npi_status_t rs = NPI_SUCCESS; 46703859Sml29623 nxge_status_t status = NXGE_OK; 46713859Sml29623 p_rx_rbr_ring_t rbrp; 46723859Sml29623 p_rx_rcr_ring_t rcrp; 46733859Sml29623 p_rx_mbox_t mboxp; 46743859Sml29623 rx_dma_ent_msk_t ent_mask; 46753859Sml29623 p_nxge_dma_common_t dmap; 46763859Sml29623 int ring_idx; 46773859Sml29623 uint32_t ref_cnt; 46783859Sml29623 p_rx_msg_t rx_msg_p; 46793859Sml29623 int i; 46803859Sml29623 uint32_t nxge_port_rcr_size; 46813859Sml29623 46823859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 46833859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46846929Smisaki "Recovering from RxDMAChannel#%d error...", channel)); 46853859Sml29623 46863859Sml29623 /* 46873859Sml29623 * Stop the dma channel waits for the stop done. 46883859Sml29623 * If the stop done bit is not set, then create 46893859Sml29623 * an error. 46903859Sml29623 */ 46913859Sml29623 46923859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 46933859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 46943859Sml29623 46953859Sml29623 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 46963859Sml29623 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 46973859Sml29623 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 46983859Sml29623 46993859Sml29623 MUTEX_ENTER(&rcrp->lock); 47003859Sml29623 MUTEX_ENTER(&rbrp->lock); 47013859Sml29623 MUTEX_ENTER(&rbrp->post_lock); 47023859Sml29623 47033859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 47043859Sml29623 47053859Sml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 47063859Sml29623 if (rs != NPI_SUCCESS) { 47073859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47086929Smisaki "nxge_disable_rxdma_channel:failed")); 47093859Sml29623 goto fail; 47103859Sml29623 } 47113859Sml29623 47123859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 47133859Sml29623 47143859Sml29623 /* Disable interrupt */ 47153859Sml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL; 47163859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 47173859Sml29623 if (rs != NPI_SUCCESS) { 47183859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47196929Smisaki "nxge_rxdma_stop_channel: " 47206929Smisaki "set rxdma event masks failed (channel %d)", 47216929Smisaki channel)); 47223859Sml29623 } 47233859Sml29623 47243859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 47253859Sml29623 47263859Sml29623 /* Reset RXDMA channel */ 47273859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 47283859Sml29623 if (rs != NPI_SUCCESS) { 47293859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47306929Smisaki "nxge_rxdma_fatal_err_recover: " 47316929Smisaki " reset rxdma failed (channel %d)", channel)); 47323859Sml29623 goto fail; 47333859Sml29623 } 47343859Sml29623 47353859Sml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 47363859Sml29623 47373859Sml29623 mboxp = 47386929Smisaki (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 47393859Sml29623 47403859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 47413859Sml29623 rbrp->rbr_rd_index = 0; 47423859Sml29623 47433859Sml29623 rcrp->comp_rd_index = 0; 47443859Sml29623 rcrp->comp_wt_index = 0; 47453859Sml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 47466929Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 47475125Sjoycey #if defined(__i386) 47486929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 47496929Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 47505125Sjoycey #else 47516929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 47526929Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 47535125Sjoycey #endif 47543859Sml29623 47553859Sml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 47566929Smisaki (nxge_port_rcr_size - 1); 47573859Sml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 47586929Smisaki (nxge_port_rcr_size - 1); 47593859Sml29623 47603859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 47613859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 47623859Sml29623 47633859Sml29623 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 47643859Sml29623 47653859Sml29623 for (i = 0; i < rbrp->rbr_max_size; i++) { 47663859Sml29623 rx_msg_p = rbrp->rx_msg_ring[i]; 47673859Sml29623 ref_cnt = rx_msg_p->ref_cnt; 47683859Sml29623 if (ref_cnt != 1) { 47693859Sml29623 if (rx_msg_p->cur_usage_cnt != 47706929Smisaki rx_msg_p->max_usage_cnt) { 47713859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47726929Smisaki "buf[%d]: cur_usage_cnt = %d " 47736929Smisaki "max_usage_cnt = %d\n", i, 47746929Smisaki rx_msg_p->cur_usage_cnt, 47756929Smisaki rx_msg_p->max_usage_cnt)); 47763859Sml29623 } else { 47773859Sml29623 /* Buffer can be re-posted */ 47783859Sml29623 rx_msg_p->free = B_TRUE; 47793859Sml29623 rx_msg_p->cur_usage_cnt = 0; 47803859Sml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe; 47813859Sml29623 rx_msg_p->pkt_buf_size = 0; 47823859Sml29623 } 47833859Sml29623 } 47843859Sml29623 } 47853859Sml29623 47863859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 47873859Sml29623 47883859Sml29623 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 47893859Sml29623 if (status != NXGE_OK) { 47903859Sml29623 goto fail; 47913859Sml29623 } 47923859Sml29623 47933859Sml29623 MUTEX_EXIT(&rbrp->post_lock); 47943859Sml29623 MUTEX_EXIT(&rbrp->lock); 47953859Sml29623 MUTEX_EXIT(&rcrp->lock); 47963859Sml29623 47973859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47986929Smisaki "Recovery Successful, RxDMAChannel#%d Restored", 47996929Smisaki channel)); 48003859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 48013859Sml29623 48023859Sml29623 return (NXGE_OK); 48033859Sml29623 fail: 48043859Sml29623 MUTEX_EXIT(&rbrp->post_lock); 48053859Sml29623 MUTEX_EXIT(&rbrp->lock); 48063859Sml29623 MUTEX_EXIT(&rcrp->lock); 48073859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 48083859Sml29623 48093859Sml29623 return (NXGE_ERROR | rs); 48103859Sml29623 } 48113859Sml29623 48123859Sml29623 nxge_status_t 48133859Sml29623 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 48143859Sml29623 { 48156495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 48166495Sspeer nxge_status_t status = NXGE_OK; 48176495Sspeer int rdc; 48183859Sml29623 48193859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 48203859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48216929Smisaki "Recovering from RxPort error...")); 48226495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 48236495Sspeer 48243859Sml29623 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 48253859Sml29623 goto fail; 48263859Sml29623 48273859Sml29623 NXGE_DELAY(1000); 48283859Sml29623 48296495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 48306495Sspeer 48316495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 48326495Sspeer if ((1 << rdc) & set->owned.map) { 48336495Sspeer if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 48346495Sspeer != NXGE_OK) { 48356495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48366495Sspeer "Could not recover channel %d", rdc)); 48376495Sspeer } 48383859Sml29623 } 48393859Sml29623 } 48403859Sml29623 48416495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 48423859Sml29623 48433859Sml29623 /* Reset IPP */ 48443859Sml29623 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 48453859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48466929Smisaki "nxge_rx_port_fatal_err_recover: " 48476929Smisaki "Failed to reset IPP")); 48483859Sml29623 goto fail; 48493859Sml29623 } 48503859Sml29623 48513859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 48523859Sml29623 48533859Sml29623 /* Reset RxMAC */ 48543859Sml29623 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 48553859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48566929Smisaki "nxge_rx_port_fatal_err_recover: " 48576929Smisaki "Failed to reset RxMAC")); 48583859Sml29623 goto fail; 48593859Sml29623 } 48603859Sml29623 48613859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 48623859Sml29623 48633859Sml29623 /* Re-Initialize IPP */ 48643859Sml29623 if (nxge_ipp_init(nxgep) != NXGE_OK) { 48653859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48666929Smisaki "nxge_rx_port_fatal_err_recover: " 48676929Smisaki "Failed to init IPP")); 48683859Sml29623 goto fail; 48693859Sml29623 } 48703859Sml29623 48713859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 48723859Sml29623 48733859Sml29623 /* Re-Initialize RxMAC */ 48743859Sml29623 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 48753859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48766929Smisaki "nxge_rx_port_fatal_err_recover: " 48776929Smisaki "Failed to reset RxMAC")); 48783859Sml29623 goto fail; 48793859Sml29623 } 48803859Sml29623 48813859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 48823859Sml29623 48833859Sml29623 /* Re-enable RxMAC */ 48843859Sml29623 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 48853859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48866929Smisaki "nxge_rx_port_fatal_err_recover: " 48876929Smisaki "Failed to enable RxMAC")); 48883859Sml29623 goto fail; 48893859Sml29623 } 48903859Sml29623 48913859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48926929Smisaki "Recovery Successful, RxPort Restored")); 48933859Sml29623 48943859Sml29623 return (NXGE_OK); 48953859Sml29623 fail: 48963859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 48973859Sml29623 return (status); 48983859Sml29623 } 48993859Sml29623 49003859Sml29623 void 49013859Sml29623 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 49023859Sml29623 { 49033859Sml29623 rx_dma_ctl_stat_t cs; 49043859Sml29623 rx_ctl_dat_fifo_stat_t cdfs; 49053859Sml29623 49063859Sml29623 switch (err_id) { 49073859Sml29623 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 49083859Sml29623 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 49093859Sml29623 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 49103859Sml29623 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 49113859Sml29623 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 49123859Sml29623 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 49133859Sml29623 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 49143859Sml29623 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 49153859Sml29623 case NXGE_FM_EREPORT_RDMC_RCRINCON: 49163859Sml29623 case NXGE_FM_EREPORT_RDMC_RCRFULL: 49173859Sml29623 case NXGE_FM_EREPORT_RDMC_RBRFULL: 49183859Sml29623 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 49193859Sml29623 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 49203859Sml29623 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 49213859Sml29623 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 49226929Smisaki chan, &cs.value); 49233859Sml29623 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 49243859Sml29623 cs.bits.hdw.rcr_ack_err = 1; 49253859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 49263859Sml29623 cs.bits.hdw.dc_fifo_err = 1; 49273859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 49283859Sml29623 cs.bits.hdw.rcr_sha_par = 1; 49293859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 49303859Sml29623 cs.bits.hdw.rbr_pre_par = 1; 49313859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 49323859Sml29623 cs.bits.hdw.rbr_tmout = 1; 49333859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 49343859Sml29623 cs.bits.hdw.rsp_cnt_err = 1; 49353859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 49363859Sml29623 cs.bits.hdw.byte_en_bus = 1; 49373859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 49383859Sml29623 cs.bits.hdw.rsp_dat_err = 1; 49393859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 49403859Sml29623 cs.bits.hdw.config_err = 1; 49413859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 49423859Sml29623 cs.bits.hdw.rcrincon = 1; 49433859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 49443859Sml29623 cs.bits.hdw.rcrfull = 1; 49453859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 49463859Sml29623 cs.bits.hdw.rbrfull = 1; 49473859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 49483859Sml29623 cs.bits.hdw.rbrlogpage = 1; 49493859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 49503859Sml29623 cs.bits.hdw.cfiglogpage = 1; 49515125Sjoycey #if defined(__i386) 49525125Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 49536929Smisaki cs.value); 49545125Sjoycey #else 49553859Sml29623 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 49566929Smisaki cs.value); 49575125Sjoycey #endif 49583859Sml29623 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 49596929Smisaki chan, cs.value); 49603859Sml29623 break; 49613859Sml29623 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 49623859Sml29623 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 49633859Sml29623 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 49643859Sml29623 cdfs.value = 0; 49653859Sml29623 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 49663859Sml29623 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 49673859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 49683859Sml29623 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 49693859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 49703859Sml29623 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 49715125Sjoycey #if defined(__i386) 49725125Sjoycey cmn_err(CE_NOTE, 49736929Smisaki "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 49746929Smisaki cdfs.value); 49755125Sjoycey #else 49763859Sml29623 cmn_err(CE_NOTE, 49776929Smisaki "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 49786929Smisaki cdfs.value); 49795125Sjoycey #endif 49806495Sspeer NXGE_REG_WR64(nxgep->npi_handle, 49816495Sspeer RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 49823859Sml29623 break; 49833859Sml29623 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 49843859Sml29623 break; 49855165Syc148097 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 49863859Sml29623 break; 49873859Sml29623 } 49883859Sml29623 } 49896495Sspeer 49906495Sspeer static void 49916495Sspeer nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 49926495Sspeer { 49936495Sspeer rxring_info_t *ring_info; 49946495Sspeer int index; 49956495Sspeer uint32_t chunk_size; 49966495Sspeer uint64_t kaddr; 49976495Sspeer uint_t num_blocks; 49986495Sspeer 49996495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 50006495Sspeer 50016495Sspeer if (rbr_p == NULL) { 50026495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 50036495Sspeer "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 50046495Sspeer return; 50056495Sspeer } 50066495Sspeer 50076495Sspeer if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 5008*9232SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((NULL, DMA_CTL, 5009*9232SMichael.Speer@Sun.COM "<== nxge_rxdma_databuf_free: DDI")); 50106495Sspeer return; 50116495Sspeer } 50126495Sspeer 50136495Sspeer ring_info = rbr_p->ring_info; 50146495Sspeer if (ring_info == NULL) { 50156495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 50166495Sspeer "==> nxge_rxdma_databuf_free: NULL ring info")); 50176495Sspeer return; 50186495Sspeer } 50196495Sspeer num_blocks = rbr_p->num_blocks; 50206495Sspeer for (index = 0; index < num_blocks; index++) { 50216495Sspeer kaddr = ring_info->buffer[index].kaddr; 50226495Sspeer chunk_size = ring_info->buffer[index].buf_size; 50236495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 50246495Sspeer "==> nxge_rxdma_databuf_free: free chunk %d " 50256495Sspeer "kaddrp $%p chunk size %d", 50266495Sspeer index, kaddr, chunk_size)); 50276495Sspeer if (kaddr == NULL) continue; 50286495Sspeer nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 50296495Sspeer ring_info->buffer[index].kaddr = NULL; 50306495Sspeer } 50316495Sspeer 50326495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 50336495Sspeer } 50346495Sspeer 50356495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 50366495Sspeer extern void contig_mem_free(void *, size_t); 50376495Sspeer #endif 50386495Sspeer 50396495Sspeer void 50406495Sspeer nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 50416495Sspeer { 50426495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 50436495Sspeer 50446495Sspeer if (kaddr == NULL || !buf_size) { 50456495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 50466495Sspeer "==> nxge_free_buf: invalid kaddr $%p size to free %d", 50476495Sspeer kaddr, buf_size)); 50486495Sspeer return; 50496495Sspeer } 50506495Sspeer 50516495Sspeer switch (alloc_type) { 50526495Sspeer case KMEM_ALLOC: 50536495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 50546495Sspeer "==> nxge_free_buf: freeing kmem $%p size %d", 50556495Sspeer kaddr, buf_size)); 50566495Sspeer #if defined(__i386) 50576495Sspeer KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 50586495Sspeer #else 50596495Sspeer KMEM_FREE((void *)kaddr, buf_size); 50606495Sspeer #endif 50616495Sspeer break; 50626495Sspeer 50636495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 50646495Sspeer case CONTIG_MEM_ALLOC: 50656495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 50666495Sspeer "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 50676495Sspeer kaddr, buf_size)); 50686495Sspeer contig_mem_free((void *)kaddr, buf_size); 50696495Sspeer break; 50706495Sspeer #endif 50716495Sspeer 50726495Sspeer default: 50736495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 50746495Sspeer "<== nxge_free_buf: unsupported alloc type %d", 50756495Sspeer alloc_type)); 50766495Sspeer return; 50776495Sspeer } 50786495Sspeer 50796495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 50806495Sspeer } 5081