13859Sml29623 /* 23859Sml29623 * CDDL HEADER START 33859Sml29623 * 43859Sml29623 * The contents of this file are subject to the terms of the 53859Sml29623 * Common Development and Distribution License (the "License"). 63859Sml29623 * You may not use this file except in compliance with the License. 73859Sml29623 * 83859Sml29623 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 93859Sml29623 * or http://www.opensolaris.org/os/licensing. 103859Sml29623 * See the License for the specific language governing permissions 113859Sml29623 * and limitations under the License. 123859Sml29623 * 133859Sml29623 * When distributing Covered Code, include this CDDL HEADER in each 143859Sml29623 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 153859Sml29623 * If applicable, add the following below this CDDL HEADER, with the 163859Sml29623 * fields enclosed by brackets "[]" replaced with your own identifying 173859Sml29623 * information: Portions Copyright [yyyy] [name of copyright owner] 183859Sml29623 * 193859Sml29623 * CDDL HEADER END 203859Sml29623 */ 213859Sml29623 /* 225759Smisaki * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 233859Sml29623 * Use is subject to license terms. 243859Sml29623 */ 253859Sml29623 263859Sml29623 #include <sys/nxge/nxge_impl.h> 273859Sml29623 #include <sys/nxge/nxge_rxdma.h> 286495Sspeer #include <sys/nxge/nxge_hio.h> 296495Sspeer 306495Sspeer #if !defined(_BIG_ENDIAN) 316495Sspeer #include <npi_rx_rd32.h> 326495Sspeer #endif 336495Sspeer #include <npi_rx_rd64.h> 346495Sspeer #include <npi_rx_wr64.h> 353859Sml29623 363859Sml29623 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 376495Sspeer (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 383859Sml29623 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 393859Sml29623 (rdc + nxgep->pt_config.hw_config.start_rdc) 403859Sml29623 413859Sml29623 /* 423859Sml29623 * Globals: tunable parameters (/etc/system or adb) 433859Sml29623 * 443859Sml29623 */ 453859Sml29623 extern uint32_t nxge_rbr_size; 463859Sml29623 extern uint32_t nxge_rcr_size; 473859Sml29623 extern uint32_t nxge_rbr_spare_size; 483859Sml29623 493859Sml29623 extern uint32_t nxge_mblks_pending; 503859Sml29623 513859Sml29623 /* 523859Sml29623 * Tunable to reduce the amount of time spent in the 533859Sml29623 * ISR doing Rx Processing. 543859Sml29623 */ 553859Sml29623 extern uint32_t nxge_max_rx_pkts; 563859Sml29623 boolean_t nxge_jumbo_enable; 573859Sml29623 583859Sml29623 /* 593859Sml29623 * Tunables to manage the receive buffer blocks. 603859Sml29623 * 613859Sml29623 * nxge_rx_threshold_hi: copy all buffers. 623859Sml29623 * nxge_rx_bcopy_size_type: receive buffer block size type. 633859Sml29623 * nxge_rx_threshold_lo: copy only up to tunable block size type. 643859Sml29623 */ 653859Sml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 663859Sml29623 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 673859Sml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 683859Sml29623 696611Sml29623 extern uint32_t nxge_cksum_offload; 706495Sspeer 716495Sspeer static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 726495Sspeer static void nxge_unmap_rxdma(p_nxge_t, int); 733859Sml29623 743859Sml29623 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 756495Sspeer 766495Sspeer static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 776495Sspeer static void nxge_rxdma_hw_stop(p_nxge_t, int); 783859Sml29623 793859Sml29623 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 803859Sml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 813859Sml29623 uint32_t, 823859Sml29623 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 833859Sml29623 p_rx_mbox_t *); 843859Sml29623 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 853859Sml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 863859Sml29623 873859Sml29623 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 883859Sml29623 uint16_t, 893859Sml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 903859Sml29623 p_rx_rcr_ring_t *, p_rx_mbox_t *); 913859Sml29623 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 923859Sml29623 p_rx_rcr_ring_t, p_rx_mbox_t); 933859Sml29623 943859Sml29623 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 953859Sml29623 uint16_t, 963859Sml29623 p_nxge_dma_common_t *, 973859Sml29623 p_rx_rbr_ring_t *, uint32_t); 983859Sml29623 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 993859Sml29623 p_rx_rbr_ring_t); 1003859Sml29623 1013859Sml29623 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 1023859Sml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 1033859Sml29623 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 1043859Sml29623 1056495Sspeer static mblk_t * 1066495Sspeer nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 1073859Sml29623 1083859Sml29623 static void nxge_receive_packet(p_nxge_t, 1093859Sml29623 p_rx_rcr_ring_t, 1103859Sml29623 p_rcr_entry_t, 1113859Sml29623 boolean_t *, 1123859Sml29623 mblk_t **, mblk_t **); 1133859Sml29623 1143859Sml29623 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 1153859Sml29623 1163859Sml29623 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 1173859Sml29623 static void nxge_freeb(p_rx_msg_t); 1186495Sspeer static void nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 1196495Sspeer static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 1203859Sml29623 1213859Sml29623 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 1223859Sml29623 uint32_t, uint32_t); 1233859Sml29623 1243859Sml29623 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 1253859Sml29623 p_rx_rbr_ring_t); 1263859Sml29623 1273859Sml29623 1283859Sml29623 static nxge_status_t 1293859Sml29623 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 1303859Sml29623 1313859Sml29623 nxge_status_t 1323859Sml29623 nxge_rx_port_fatal_err_recover(p_nxge_t); 1333859Sml29623 1346495Sspeer static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 1356495Sspeer 1363859Sml29623 nxge_status_t 1373859Sml29623 nxge_init_rxdma_channels(p_nxge_t nxgep) 1383859Sml29623 { 139*7950SMichael.Speer@Sun.COM nxge_grp_set_t *set = &nxgep->rx_set; 140*7950SMichael.Speer@Sun.COM int i, count, rdc, channel; 141*7950SMichael.Speer@Sun.COM nxge_grp_t *group; 1423859Sml29623 1433859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 1443859Sml29623 1456495Sspeer if (!isLDOMguest(nxgep)) { 1466495Sspeer if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 1476495Sspeer cmn_err(CE_NOTE, "hw_start_common"); 1486495Sspeer return (NXGE_ERROR); 1496495Sspeer } 1506495Sspeer } 1516495Sspeer 1526495Sspeer /* 1536495Sspeer * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 1546495Sspeer * We only have 8 hardware RDC tables, but we may have 1556495Sspeer * up to 16 logical (software-defined) groups of RDCS, 1566495Sspeer * if we make use of layer 3 & 4 hardware classification. 1576495Sspeer */ 1586495Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 1596495Sspeer if ((1 << i) & set->lg.map) { 160*7950SMichael.Speer@Sun.COM group = set->group[i]; 161*7950SMichael.Speer@Sun.COM 1626495Sspeer for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 1636495Sspeer if ((1 << channel) & group->map) { 1646495Sspeer if ((nxge_grp_dc_add(nxgep, 1657755SMisaki.Kataoka@Sun.COM group, VP_BOUND_RX, channel))) 166*7950SMichael.Speer@Sun.COM goto init_rxdma_channels_exit; 1676495Sspeer } 1686495Sspeer } 1696495Sspeer } 1706495Sspeer if (++count == set->lg.count) 1716495Sspeer break; 1726495Sspeer } 1736495Sspeer 1746495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 1756495Sspeer return (NXGE_OK); 176*7950SMichael.Speer@Sun.COM 177*7950SMichael.Speer@Sun.COM init_rxdma_channels_exit: 178*7950SMichael.Speer@Sun.COM for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 179*7950SMichael.Speer@Sun.COM if ((1 << i) & set->lg.map) { 180*7950SMichael.Speer@Sun.COM group = set->group[i]; 181*7950SMichael.Speer@Sun.COM 182*7950SMichael.Speer@Sun.COM for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 183*7950SMichael.Speer@Sun.COM if ((1 << rdc) & group->map) { 184*7950SMichael.Speer@Sun.COM nxge_grp_dc_remove(nxgep, 185*7950SMichael.Speer@Sun.COM VP_BOUND_RX, rdc); 186*7950SMichael.Speer@Sun.COM } 187*7950SMichael.Speer@Sun.COM } 188*7950SMichael.Speer@Sun.COM } 189*7950SMichael.Speer@Sun.COM 190*7950SMichael.Speer@Sun.COM if (++count == set->lg.count) 191*7950SMichael.Speer@Sun.COM break; 192*7950SMichael.Speer@Sun.COM } 193*7950SMichael.Speer@Sun.COM 194*7950SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 195*7950SMichael.Speer@Sun.COM return (NXGE_ERROR); 1966495Sspeer } 1976495Sspeer 1986495Sspeer nxge_status_t 1996495Sspeer nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 2006495Sspeer { 2016495Sspeer nxge_status_t status; 2026495Sspeer 2036495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 2046495Sspeer 2056495Sspeer status = nxge_map_rxdma(nxge, channel); 2063859Sml29623 if (status != NXGE_OK) { 2076495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2086495Sspeer "<== nxge_init_rxdma: status 0x%x", status)); 2093859Sml29623 return (status); 2103859Sml29623 } 2113859Sml29623 2126495Sspeer status = nxge_rxdma_hw_start(nxge, channel); 2133859Sml29623 if (status != NXGE_OK) { 2146495Sspeer nxge_unmap_rxdma(nxge, channel); 2153859Sml29623 } 2163859Sml29623 2176495Sspeer if (!nxge->statsp->rdc_ksp[channel]) 2186495Sspeer nxge_setup_rdc_kstats(nxge, channel); 2196495Sspeer 2206495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, 2216495Sspeer "<== nxge_init_rxdma_channel: status 0x%x", status)); 2223859Sml29623 2233859Sml29623 return (status); 2243859Sml29623 } 2253859Sml29623 2263859Sml29623 void 2273859Sml29623 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 2283859Sml29623 { 2296495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 2306495Sspeer int rdc; 2316495Sspeer 2323859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 2333859Sml29623 2346495Sspeer if (set->owned.map == 0) { 2356495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2366495Sspeer "nxge_uninit_rxdma_channels: no channels")); 2376495Sspeer return; 2386495Sspeer } 2396495Sspeer 2406495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 2416495Sspeer if ((1 << rdc) & set->owned.map) { 2426495Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 2436495Sspeer } 2446495Sspeer } 2456495Sspeer 2466495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 2476495Sspeer } 2486495Sspeer 2496495Sspeer void 2506495Sspeer nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 2516495Sspeer { 2526495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 2536495Sspeer 2546495Sspeer if (nxgep->statsp->rdc_ksp[channel]) { 2556495Sspeer kstat_delete(nxgep->statsp->rdc_ksp[channel]); 2566495Sspeer nxgep->statsp->rdc_ksp[channel] = 0; 2576495Sspeer } 2586495Sspeer 2596495Sspeer nxge_rxdma_hw_stop(nxgep, channel); 2606495Sspeer nxge_unmap_rxdma(nxgep, channel); 2616495Sspeer 2626495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 2633859Sml29623 } 2643859Sml29623 2653859Sml29623 nxge_status_t 2663859Sml29623 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 2673859Sml29623 { 2683859Sml29623 npi_handle_t handle; 2693859Sml29623 npi_status_t rs = NPI_SUCCESS; 2703859Sml29623 nxge_status_t status = NXGE_OK; 2713859Sml29623 2727812SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 2733859Sml29623 2743859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2753859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 2763859Sml29623 2773859Sml29623 if (rs != NPI_SUCCESS) { 2783859Sml29623 status = NXGE_ERROR | rs; 2793859Sml29623 } 2803859Sml29623 2817812SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 2827812SMichael.Speer@Sun.COM 2833859Sml29623 return (status); 2843859Sml29623 } 2853859Sml29623 2863859Sml29623 void 2873859Sml29623 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 2883859Sml29623 { 2896495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 2906495Sspeer int rdc; 2913859Sml29623 2923859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 2933859Sml29623 2946495Sspeer if (!isLDOMguest(nxgep)) { 2956495Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 2966495Sspeer (void) npi_rxdma_dump_fzc_regs(handle); 2976495Sspeer } 2986495Sspeer 2996495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 3006495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 3016495Sspeer "nxge_rxdma_regs_dump_channels: " 3026495Sspeer "NULL ring pointer(s)")); 3033859Sml29623 return; 3043859Sml29623 } 3056495Sspeer 3066495Sspeer if (set->owned.map == 0) { 3073859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3086495Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 3093859Sml29623 return; 3103859Sml29623 } 3113859Sml29623 3126495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 3136495Sspeer if ((1 << rdc) & set->owned.map) { 3146495Sspeer rx_rbr_ring_t *ring = 3156495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 3166495Sspeer if (ring) { 3176495Sspeer (void) nxge_dump_rxdma_channel(nxgep, rdc); 3186495Sspeer } 3193859Sml29623 } 3203859Sml29623 } 3213859Sml29623 3223859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 3233859Sml29623 } 3243859Sml29623 3253859Sml29623 nxge_status_t 3263859Sml29623 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 3273859Sml29623 { 3283859Sml29623 npi_handle_t handle; 3293859Sml29623 npi_status_t rs = NPI_SUCCESS; 3303859Sml29623 nxge_status_t status = NXGE_OK; 3313859Sml29623 3323859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 3333859Sml29623 3343859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3353859Sml29623 rs = npi_rxdma_dump_rdc_regs(handle, channel); 3363859Sml29623 3373859Sml29623 if (rs != NPI_SUCCESS) { 3383859Sml29623 status = NXGE_ERROR | rs; 3393859Sml29623 } 3403859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 3413859Sml29623 return (status); 3423859Sml29623 } 3433859Sml29623 3443859Sml29623 nxge_status_t 3453859Sml29623 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 3463859Sml29623 p_rx_dma_ent_msk_t mask_p) 3473859Sml29623 { 3483859Sml29623 npi_handle_t handle; 3493859Sml29623 npi_status_t rs = NPI_SUCCESS; 3503859Sml29623 nxge_status_t status = NXGE_OK; 3513859Sml29623 3523859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3536929Smisaki "<== nxge_init_rxdma_channel_event_mask")); 3543859Sml29623 3553859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3563859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 3573859Sml29623 if (rs != NPI_SUCCESS) { 3583859Sml29623 status = NXGE_ERROR | rs; 3593859Sml29623 } 3603859Sml29623 3613859Sml29623 return (status); 3623859Sml29623 } 3633859Sml29623 3643859Sml29623 nxge_status_t 3653859Sml29623 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 3663859Sml29623 p_rx_dma_ctl_stat_t cs_p) 3673859Sml29623 { 3683859Sml29623 npi_handle_t handle; 3693859Sml29623 npi_status_t rs = NPI_SUCCESS; 3703859Sml29623 nxge_status_t status = NXGE_OK; 3713859Sml29623 3723859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3736929Smisaki "<== nxge_init_rxdma_channel_cntl_stat")); 3743859Sml29623 3753859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3763859Sml29623 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 3773859Sml29623 3783859Sml29623 if (rs != NPI_SUCCESS) { 3793859Sml29623 status = NXGE_ERROR | rs; 3803859Sml29623 } 3813859Sml29623 3823859Sml29623 return (status); 3833859Sml29623 } 3843859Sml29623 3856495Sspeer /* 3866495Sspeer * nxge_rxdma_cfg_rdcgrp_default_rdc 3876495Sspeer * 3886495Sspeer * Set the default RDC for an RDC Group (Table) 3896495Sspeer * 3906495Sspeer * Arguments: 3916495Sspeer * nxgep 3926495Sspeer * rdcgrp The group to modify 3936495Sspeer * rdc The new default RDC. 3946495Sspeer * 3956495Sspeer * Notes: 3966495Sspeer * 3976495Sspeer * NPI/NXGE function calls: 3986495Sspeer * npi_rxdma_cfg_rdc_table_default_rdc() 3996495Sspeer * 4006495Sspeer * Registers accessed: 4016495Sspeer * RDC_TBL_REG: FZC_ZCP + 0x10000 4026495Sspeer * 4036495Sspeer * Context: 4046495Sspeer * Service domain 4056495Sspeer */ 4063859Sml29623 nxge_status_t 4076495Sspeer nxge_rxdma_cfg_rdcgrp_default_rdc( 4086495Sspeer p_nxge_t nxgep, 4096495Sspeer uint8_t rdcgrp, 4106495Sspeer uint8_t rdc) 4113859Sml29623 { 4123859Sml29623 npi_handle_t handle; 4133859Sml29623 npi_status_t rs = NPI_SUCCESS; 4143859Sml29623 p_nxge_dma_pt_cfg_t p_dma_cfgp; 4153859Sml29623 p_nxge_rdc_grp_t rdc_grp_p; 4163859Sml29623 uint8_t actual_rdcgrp, actual_rdc; 4173859Sml29623 4183859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4196929Smisaki " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 4203859Sml29623 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 4213859Sml29623 4223859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4233859Sml29623 4246495Sspeer /* 4256495Sspeer * This has to be rewritten. Do we even allow this anymore? 4266495Sspeer */ 4273859Sml29623 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 4286495Sspeer RDC_MAP_IN(rdc_grp_p->map, rdc); 4296495Sspeer rdc_grp_p->def_rdc = rdc; 4303859Sml29623 4313859Sml29623 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 4323859Sml29623 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 4333859Sml29623 4346495Sspeer rs = npi_rxdma_cfg_rdc_table_default_rdc( 4356929Smisaki handle, actual_rdcgrp, actual_rdc); 4363859Sml29623 4373859Sml29623 if (rs != NPI_SUCCESS) { 4383859Sml29623 return (NXGE_ERROR | rs); 4393859Sml29623 } 4403859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4416929Smisaki " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 4423859Sml29623 return (NXGE_OK); 4433859Sml29623 } 4443859Sml29623 4453859Sml29623 nxge_status_t 4463859Sml29623 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 4473859Sml29623 { 4483859Sml29623 npi_handle_t handle; 4493859Sml29623 4503859Sml29623 uint8_t actual_rdc; 4513859Sml29623 npi_status_t rs = NPI_SUCCESS; 4523859Sml29623 4533859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4546929Smisaki " ==> nxge_rxdma_cfg_port_default_rdc")); 4553859Sml29623 4563859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4576495Sspeer actual_rdc = rdc; /* XXX Hack! */ 4583859Sml29623 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 4593859Sml29623 4603859Sml29623 4613859Sml29623 if (rs != NPI_SUCCESS) { 4623859Sml29623 return (NXGE_ERROR | rs); 4633859Sml29623 } 4643859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4656929Smisaki " <== nxge_rxdma_cfg_port_default_rdc")); 4663859Sml29623 4673859Sml29623 return (NXGE_OK); 4683859Sml29623 } 4693859Sml29623 4703859Sml29623 nxge_status_t 4713859Sml29623 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 4723859Sml29623 uint16_t pkts) 4733859Sml29623 { 4743859Sml29623 npi_status_t rs = NPI_SUCCESS; 4753859Sml29623 npi_handle_t handle; 4763859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4776929Smisaki " ==> nxge_rxdma_cfg_rcr_threshold")); 4783859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4793859Sml29623 4803859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 4813859Sml29623 4823859Sml29623 if (rs != NPI_SUCCESS) { 4833859Sml29623 return (NXGE_ERROR | rs); 4843859Sml29623 } 4853859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 4863859Sml29623 return (NXGE_OK); 4873859Sml29623 } 4883859Sml29623 4893859Sml29623 nxge_status_t 4903859Sml29623 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 4913859Sml29623 uint16_t tout, uint8_t enable) 4923859Sml29623 { 4933859Sml29623 npi_status_t rs = NPI_SUCCESS; 4943859Sml29623 npi_handle_t handle; 4953859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 4963859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4973859Sml29623 if (enable == 0) { 4983859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 4993859Sml29623 } else { 5003859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 5016929Smisaki tout); 5023859Sml29623 } 5033859Sml29623 5043859Sml29623 if (rs != NPI_SUCCESS) { 5053859Sml29623 return (NXGE_ERROR | rs); 5063859Sml29623 } 5073859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 5083859Sml29623 return (NXGE_OK); 5093859Sml29623 } 5103859Sml29623 5113859Sml29623 nxge_status_t 5123859Sml29623 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 5133859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 5143859Sml29623 { 5153859Sml29623 npi_handle_t handle; 5163859Sml29623 rdc_desc_cfg_t rdc_desc; 5173859Sml29623 p_rcrcfig_b_t cfgb_p; 5183859Sml29623 npi_status_t rs = NPI_SUCCESS; 5193859Sml29623 5203859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 5213859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5223859Sml29623 /* 5233859Sml29623 * Use configuration data composed at init time. 5243859Sml29623 * Write to hardware the receive ring configurations. 5253859Sml29623 */ 5263859Sml29623 rdc_desc.mbox_enable = 1; 5273859Sml29623 rdc_desc.mbox_addr = mbox_p->mbox_addr; 5283859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5296929Smisaki "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 5306929Smisaki mbox_p->mbox_addr, rdc_desc.mbox_addr)); 5313859Sml29623 5323859Sml29623 rdc_desc.rbr_len = rbr_p->rbb_max; 5333859Sml29623 rdc_desc.rbr_addr = rbr_p->rbr_addr; 5343859Sml29623 5353859Sml29623 switch (nxgep->rx_bksize_code) { 5363859Sml29623 case RBR_BKSIZE_4K: 5373859Sml29623 rdc_desc.page_size = SIZE_4KB; 5383859Sml29623 break; 5393859Sml29623 case RBR_BKSIZE_8K: 5403859Sml29623 rdc_desc.page_size = SIZE_8KB; 5413859Sml29623 break; 5423859Sml29623 case RBR_BKSIZE_16K: 5433859Sml29623 rdc_desc.page_size = SIZE_16KB; 5443859Sml29623 break; 5453859Sml29623 case RBR_BKSIZE_32K: 5463859Sml29623 rdc_desc.page_size = SIZE_32KB; 5473859Sml29623 break; 5483859Sml29623 } 5493859Sml29623 5503859Sml29623 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 5513859Sml29623 rdc_desc.valid0 = 1; 5523859Sml29623 5533859Sml29623 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 5543859Sml29623 rdc_desc.valid1 = 1; 5553859Sml29623 5563859Sml29623 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 5573859Sml29623 rdc_desc.valid2 = 1; 5583859Sml29623 5593859Sml29623 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 5603859Sml29623 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 5613859Sml29623 5623859Sml29623 rdc_desc.rcr_len = rcr_p->comp_size; 5633859Sml29623 rdc_desc.rcr_addr = rcr_p->rcr_addr; 5643859Sml29623 5653859Sml29623 cfgb_p = &(rcr_p->rcr_cfgb); 5663859Sml29623 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 5676495Sspeer /* For now, disable this timeout in a guest domain. */ 5686495Sspeer if (isLDOMguest(nxgep)) { 5696495Sspeer rdc_desc.rcr_timeout = 0; 5706495Sspeer rdc_desc.rcr_timeout_enable = 0; 5716495Sspeer } else { 5726495Sspeer rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 5736495Sspeer rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 5746495Sspeer } 5753859Sml29623 5763859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 5776929Smisaki "rbr_len qlen %d pagesize code %d rcr_len %d", 5786929Smisaki rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 5793859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 5806929Smisaki "size 0 %d size 1 %d size 2 %d", 5816929Smisaki rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 5826929Smisaki rbr_p->npi_pkt_buf_size2)); 5833859Sml29623 5843859Sml29623 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 5853859Sml29623 if (rs != NPI_SUCCESS) { 5863859Sml29623 return (NXGE_ERROR | rs); 5873859Sml29623 } 5883859Sml29623 5893859Sml29623 /* 5903859Sml29623 * Enable the timeout and threshold. 5913859Sml29623 */ 5923859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 5936929Smisaki rdc_desc.rcr_threshold); 5943859Sml29623 if (rs != NPI_SUCCESS) { 5953859Sml29623 return (NXGE_ERROR | rs); 5963859Sml29623 } 5973859Sml29623 5983859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 5996929Smisaki rdc_desc.rcr_timeout); 6003859Sml29623 if (rs != NPI_SUCCESS) { 6013859Sml29623 return (NXGE_ERROR | rs); 6023859Sml29623 } 6033859Sml29623 6043859Sml29623 /* Enable the DMA */ 6053859Sml29623 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 6063859Sml29623 if (rs != NPI_SUCCESS) { 6073859Sml29623 return (NXGE_ERROR | rs); 6083859Sml29623 } 6093859Sml29623 6103859Sml29623 /* Kick the DMA engine. */ 6113859Sml29623 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 6123859Sml29623 /* Clear the rbr empty bit */ 6133859Sml29623 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 6143859Sml29623 6153859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 6163859Sml29623 6173859Sml29623 return (NXGE_OK); 6183859Sml29623 } 6193859Sml29623 6203859Sml29623 nxge_status_t 6213859Sml29623 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 6223859Sml29623 { 6233859Sml29623 npi_handle_t handle; 6243859Sml29623 npi_status_t rs = NPI_SUCCESS; 6253859Sml29623 6263859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 6273859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 6283859Sml29623 6293859Sml29623 /* disable the DMA */ 6303859Sml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 6313859Sml29623 if (rs != NPI_SUCCESS) { 6323859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 6336929Smisaki "<== nxge_disable_rxdma_channel:failed (0x%x)", 6346929Smisaki rs)); 6353859Sml29623 return (NXGE_ERROR | rs); 6363859Sml29623 } 6373859Sml29623 6383859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 6393859Sml29623 return (NXGE_OK); 6403859Sml29623 } 6413859Sml29623 6423859Sml29623 nxge_status_t 6433859Sml29623 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 6443859Sml29623 { 6453859Sml29623 npi_handle_t handle; 6463859Sml29623 nxge_status_t status = NXGE_OK; 6473859Sml29623 6483859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 6496929Smisaki "<== nxge_init_rxdma_channel_rcrflush")); 6503859Sml29623 6513859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 6523859Sml29623 npi_rxdma_rdc_rcr_flush(handle, channel); 6533859Sml29623 6543859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 6556929Smisaki "<== nxge_init_rxdma_channel_rcrflsh")); 6563859Sml29623 return (status); 6573859Sml29623 6583859Sml29623 } 6593859Sml29623 6603859Sml29623 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 6613859Sml29623 6623859Sml29623 #define TO_LEFT -1 6633859Sml29623 #define TO_RIGHT 1 6643859Sml29623 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 6653859Sml29623 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 6663859Sml29623 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 6673859Sml29623 #define NO_HINT 0xffffffff 6683859Sml29623 6693859Sml29623 /*ARGSUSED*/ 6703859Sml29623 nxge_status_t 6713859Sml29623 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 6723859Sml29623 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 6733859Sml29623 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 6743859Sml29623 { 6753859Sml29623 int bufsize; 6763859Sml29623 uint64_t pktbuf_pp; 6773859Sml29623 uint64_t dvma_addr; 6783859Sml29623 rxring_info_t *ring_info; 6793859Sml29623 int base_side, end_side; 6803859Sml29623 int r_index, l_index, anchor_index; 6813859Sml29623 int found, search_done; 6823859Sml29623 uint32_t offset, chunk_size, block_size, page_size_mask; 6833859Sml29623 uint32_t chunk_index, block_index, total_index; 6843859Sml29623 int max_iterations, iteration; 6853859Sml29623 rxbuf_index_info_t *bufinfo; 6863859Sml29623 6873859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 6883859Sml29623 6893859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 6906929Smisaki "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 6916929Smisaki pkt_buf_addr_pp, 6926929Smisaki pktbufsz_type)); 6935125Sjoycey #if defined(__i386) 6945125Sjoycey pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 6955125Sjoycey #else 6963859Sml29623 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 6975125Sjoycey #endif 6983859Sml29623 6993859Sml29623 switch (pktbufsz_type) { 7003859Sml29623 case 0: 7013859Sml29623 bufsize = rbr_p->pkt_buf_size0; 7023859Sml29623 break; 7033859Sml29623 case 1: 7043859Sml29623 bufsize = rbr_p->pkt_buf_size1; 7053859Sml29623 break; 7063859Sml29623 case 2: 7073859Sml29623 bufsize = rbr_p->pkt_buf_size2; 7083859Sml29623 break; 7093859Sml29623 case RCR_SINGLE_BLOCK: 7103859Sml29623 bufsize = 0; 7113859Sml29623 anchor_index = 0; 7123859Sml29623 break; 7133859Sml29623 default: 7143859Sml29623 return (NXGE_ERROR); 7153859Sml29623 } 7163859Sml29623 7173859Sml29623 if (rbr_p->num_blocks == 1) { 7183859Sml29623 anchor_index = 0; 7193859Sml29623 ring_info = rbr_p->ring_info; 7203859Sml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 7213859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7226929Smisaki "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 7236929Smisaki "buf_pp $%p btype %d anchor_index %d " 7246929Smisaki "bufinfo $%p", 7256929Smisaki pkt_buf_addr_pp, 7266929Smisaki pktbufsz_type, 7276929Smisaki anchor_index, 7286929Smisaki bufinfo)); 7293859Sml29623 7303859Sml29623 goto found_index; 7313859Sml29623 } 7323859Sml29623 7333859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7346929Smisaki "==> nxge_rxbuf_pp_to_vp: " 7356929Smisaki "buf_pp $%p btype %d anchor_index %d", 7366929Smisaki pkt_buf_addr_pp, 7376929Smisaki pktbufsz_type, 7386929Smisaki anchor_index)); 7393859Sml29623 7403859Sml29623 ring_info = rbr_p->ring_info; 7413859Sml29623 found = B_FALSE; 7423859Sml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 7433859Sml29623 iteration = 0; 7443859Sml29623 max_iterations = ring_info->max_iterations; 7453859Sml29623 /* 7463859Sml29623 * First check if this block has been seen 7473859Sml29623 * recently. This is indicated by a hint which 7483859Sml29623 * is initialized when the first buffer of the block 7493859Sml29623 * is seen. The hint is reset when the last buffer of 7503859Sml29623 * the block has been processed. 7513859Sml29623 * As three block sizes are supported, three hints 7523859Sml29623 * are kept. The idea behind the hints is that once 7533859Sml29623 * the hardware uses a block for a buffer of that 7543859Sml29623 * size, it will use it exclusively for that size 7553859Sml29623 * and will use it until it is exhausted. It is assumed 7563859Sml29623 * that there would a single block being used for the same 7573859Sml29623 * buffer sizes at any given time. 7583859Sml29623 */ 7593859Sml29623 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 7603859Sml29623 anchor_index = ring_info->hint[pktbufsz_type]; 7613859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 7623859Sml29623 chunk_size = bufinfo[anchor_index].buf_size; 7633859Sml29623 if ((pktbuf_pp >= dvma_addr) && 7646929Smisaki (pktbuf_pp < (dvma_addr + chunk_size))) { 7653859Sml29623 found = B_TRUE; 7663859Sml29623 /* 7673859Sml29623 * check if this is the last buffer in the block 7683859Sml29623 * If so, then reset the hint for the size; 7693859Sml29623 */ 7703859Sml29623 7713859Sml29623 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 7723859Sml29623 ring_info->hint[pktbufsz_type] = NO_HINT; 7733859Sml29623 } 7743859Sml29623 } 7753859Sml29623 7763859Sml29623 if (found == B_FALSE) { 7773859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7786929Smisaki "==> nxge_rxbuf_pp_to_vp: (!found)" 7796929Smisaki "buf_pp $%p btype %d anchor_index %d", 7806929Smisaki pkt_buf_addr_pp, 7816929Smisaki pktbufsz_type, 7826929Smisaki anchor_index)); 7833859Sml29623 7843859Sml29623 /* 7853859Sml29623 * This is the first buffer of the block of this 7863859Sml29623 * size. Need to search the whole information 7873859Sml29623 * array. 7883859Sml29623 * the search algorithm uses a binary tree search 7893859Sml29623 * algorithm. It assumes that the information is 7903859Sml29623 * already sorted with increasing order 7913859Sml29623 * info[0] < info[1] < info[2] .... < info[n-1] 7923859Sml29623 * where n is the size of the information array 7933859Sml29623 */ 7943859Sml29623 r_index = rbr_p->num_blocks - 1; 7953859Sml29623 l_index = 0; 7963859Sml29623 search_done = B_FALSE; 7973859Sml29623 anchor_index = MID_INDEX(r_index, l_index); 7983859Sml29623 while (search_done == B_FALSE) { 7993859Sml29623 if ((r_index == l_index) || 8006929Smisaki (iteration >= max_iterations)) 8013859Sml29623 search_done = B_TRUE; 8023859Sml29623 end_side = TO_RIGHT; /* to the right */ 8033859Sml29623 base_side = TO_LEFT; /* to the left */ 8043859Sml29623 /* read the DVMA address information and sort it */ 8053859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 8063859Sml29623 chunk_size = bufinfo[anchor_index].buf_size; 8073859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8086929Smisaki "==> nxge_rxbuf_pp_to_vp: (searching)" 8096929Smisaki "buf_pp $%p btype %d " 8106929Smisaki "anchor_index %d chunk_size %d dvmaaddr $%p", 8116929Smisaki pkt_buf_addr_pp, 8126929Smisaki pktbufsz_type, 8136929Smisaki anchor_index, 8146929Smisaki chunk_size, 8156929Smisaki dvma_addr)); 8163859Sml29623 8173859Sml29623 if (pktbuf_pp >= dvma_addr) 8183859Sml29623 base_side = TO_RIGHT; /* to the right */ 8193859Sml29623 if (pktbuf_pp < (dvma_addr + chunk_size)) 8203859Sml29623 end_side = TO_LEFT; /* to the left */ 8213859Sml29623 8223859Sml29623 switch (base_side + end_side) { 8236929Smisaki case IN_MIDDLE: 8246929Smisaki /* found */ 8256929Smisaki found = B_TRUE; 8266929Smisaki search_done = B_TRUE; 8276929Smisaki if ((pktbuf_pp + bufsize) < 8286929Smisaki (dvma_addr + chunk_size)) 8296929Smisaki ring_info->hint[pktbufsz_type] = 8306929Smisaki bufinfo[anchor_index].buf_index; 8316929Smisaki break; 8326929Smisaki case BOTH_RIGHT: 8336929Smisaki /* not found: go to the right */ 8346929Smisaki l_index = anchor_index + 1; 8356929Smisaki anchor_index = MID_INDEX(r_index, l_index); 8366929Smisaki break; 8376929Smisaki 8386929Smisaki case BOTH_LEFT: 8396929Smisaki /* not found: go to the left */ 8406929Smisaki r_index = anchor_index - 1; 8416929Smisaki anchor_index = MID_INDEX(r_index, l_index); 8426929Smisaki break; 8436929Smisaki default: /* should not come here */ 8446929Smisaki return (NXGE_ERROR); 8453859Sml29623 } 8463859Sml29623 iteration++; 8473859Sml29623 } 8483859Sml29623 8493859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8506929Smisaki "==> nxge_rxbuf_pp_to_vp: (search done)" 8516929Smisaki "buf_pp $%p btype %d anchor_index %d", 8526929Smisaki pkt_buf_addr_pp, 8536929Smisaki pktbufsz_type, 8546929Smisaki anchor_index)); 8553859Sml29623 } 8563859Sml29623 8573859Sml29623 if (found == B_FALSE) { 8583859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8596929Smisaki "==> nxge_rxbuf_pp_to_vp: (search failed)" 8606929Smisaki "buf_pp $%p btype %d anchor_index %d", 8616929Smisaki pkt_buf_addr_pp, 8626929Smisaki pktbufsz_type, 8636929Smisaki anchor_index)); 8643859Sml29623 return (NXGE_ERROR); 8653859Sml29623 } 8663859Sml29623 8673859Sml29623 found_index: 8683859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8696929Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 8706929Smisaki "buf_pp $%p btype %d bufsize %d anchor_index %d", 8716929Smisaki pkt_buf_addr_pp, 8726929Smisaki pktbufsz_type, 8736929Smisaki bufsize, 8746929Smisaki anchor_index)); 8753859Sml29623 8763859Sml29623 /* index of the first block in this chunk */ 8773859Sml29623 chunk_index = bufinfo[anchor_index].start_index; 8783859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 8793859Sml29623 page_size_mask = ring_info->block_size_mask; 8803859Sml29623 8813859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8826929Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 8836929Smisaki "buf_pp $%p btype %d bufsize %d " 8846929Smisaki "anchor_index %d chunk_index %d dvma $%p", 8856929Smisaki pkt_buf_addr_pp, 8866929Smisaki pktbufsz_type, 8876929Smisaki bufsize, 8886929Smisaki anchor_index, 8896929Smisaki chunk_index, 8906929Smisaki dvma_addr)); 8913859Sml29623 8923859Sml29623 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 8933859Sml29623 block_size = rbr_p->block_size; /* System block(page) size */ 8943859Sml29623 8953859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8966929Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 8976929Smisaki "buf_pp $%p btype %d bufsize %d " 8986929Smisaki "anchor_index %d chunk_index %d dvma $%p " 8996929Smisaki "offset %d block_size %d", 9006929Smisaki pkt_buf_addr_pp, 9016929Smisaki pktbufsz_type, 9026929Smisaki bufsize, 9036929Smisaki anchor_index, 9046929Smisaki chunk_index, 9056929Smisaki dvma_addr, 9066929Smisaki offset, 9076929Smisaki block_size)); 9083859Sml29623 9093859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 9103859Sml29623 9113859Sml29623 block_index = (offset / block_size); /* index within chunk */ 9123859Sml29623 total_index = chunk_index + block_index; 9133859Sml29623 9143859Sml29623 9153859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9166929Smisaki "==> nxge_rxbuf_pp_to_vp: " 9176929Smisaki "total_index %d dvma_addr $%p " 9186929Smisaki "offset %d block_size %d " 9196929Smisaki "block_index %d ", 9206929Smisaki total_index, dvma_addr, 9216929Smisaki offset, block_size, 9226929Smisaki block_index)); 9235125Sjoycey #if defined(__i386) 9245125Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 9256929Smisaki (uint32_t)offset); 9265125Sjoycey #else 9275125Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 9286929Smisaki (uint64_t)offset); 9295125Sjoycey #endif 9303859Sml29623 9313859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9326929Smisaki "==> nxge_rxbuf_pp_to_vp: " 9336929Smisaki "total_index %d dvma_addr $%p " 9346929Smisaki "offset %d block_size %d " 9356929Smisaki "block_index %d " 9366929Smisaki "*pkt_buf_addr_p $%p", 9376929Smisaki total_index, dvma_addr, 9386929Smisaki offset, block_size, 9396929Smisaki block_index, 9406929Smisaki *pkt_buf_addr_p)); 9413859Sml29623 9423859Sml29623 9433859Sml29623 *msg_index = total_index; 9443859Sml29623 *bufoffset = (offset & page_size_mask); 9453859Sml29623 9463859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9476929Smisaki "==> nxge_rxbuf_pp_to_vp: get msg index: " 9486929Smisaki "msg_index %d bufoffset_index %d", 9496929Smisaki *msg_index, 9506929Smisaki *bufoffset)); 9513859Sml29623 9523859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 9533859Sml29623 9543859Sml29623 return (NXGE_OK); 9553859Sml29623 } 9563859Sml29623 9573859Sml29623 /* 9583859Sml29623 * used by quick sort (qsort) function 9593859Sml29623 * to perform comparison 9603859Sml29623 */ 9613859Sml29623 static int 9623859Sml29623 nxge_sort_compare(const void *p1, const void *p2) 9633859Sml29623 { 9643859Sml29623 9653859Sml29623 rxbuf_index_info_t *a, *b; 9663859Sml29623 9673859Sml29623 a = (rxbuf_index_info_t *)p1; 9683859Sml29623 b = (rxbuf_index_info_t *)p2; 9693859Sml29623 9703859Sml29623 if (a->dvma_addr > b->dvma_addr) 9713859Sml29623 return (1); 9723859Sml29623 if (a->dvma_addr < b->dvma_addr) 9733859Sml29623 return (-1); 9743859Sml29623 return (0); 9753859Sml29623 } 9763859Sml29623 9773859Sml29623 9783859Sml29623 9793859Sml29623 /* 9803859Sml29623 * grabbed this sort implementation from common/syscall/avl.c 9813859Sml29623 * 9823859Sml29623 */ 9833859Sml29623 /* 9843859Sml29623 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 9853859Sml29623 * v = Ptr to array/vector of objs 9863859Sml29623 * n = # objs in the array 9873859Sml29623 * s = size of each obj (must be multiples of a word size) 9883859Sml29623 * f = ptr to function to compare two objs 9893859Sml29623 * returns (-1 = less than, 0 = equal, 1 = greater than 9903859Sml29623 */ 9913859Sml29623 void 9923859Sml29623 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 9933859Sml29623 { 9943859Sml29623 int g, i, j, ii; 9953859Sml29623 unsigned int *p1, *p2; 9963859Sml29623 unsigned int tmp; 9973859Sml29623 9983859Sml29623 /* No work to do */ 9993859Sml29623 if (v == NULL || n <= 1) 10003859Sml29623 return; 10013859Sml29623 /* Sanity check on arguments */ 10023859Sml29623 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 10033859Sml29623 ASSERT(s > 0); 10043859Sml29623 10053859Sml29623 for (g = n / 2; g > 0; g /= 2) { 10063859Sml29623 for (i = g; i < n; i++) { 10073859Sml29623 for (j = i - g; j >= 0 && 10086929Smisaki (*f)(v + j * s, v + (j + g) * s) == 1; 10096929Smisaki j -= g) { 10103859Sml29623 p1 = (unsigned *)(v + j * s); 10113859Sml29623 p2 = (unsigned *)(v + (j + g) * s); 10123859Sml29623 for (ii = 0; ii < s / 4; ii++) { 10133859Sml29623 tmp = *p1; 10143859Sml29623 *p1++ = *p2; 10153859Sml29623 *p2++ = tmp; 10163859Sml29623 } 10173859Sml29623 } 10183859Sml29623 } 10193859Sml29623 } 10203859Sml29623 } 10213859Sml29623 10223859Sml29623 /* 10233859Sml29623 * Initialize data structures required for rxdma 10243859Sml29623 * buffer dvma->vmem address lookup 10253859Sml29623 */ 10263859Sml29623 /*ARGSUSED*/ 10273859Sml29623 static nxge_status_t 10283859Sml29623 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 10293859Sml29623 { 10303859Sml29623 10313859Sml29623 int index; 10323859Sml29623 rxring_info_t *ring_info; 10333859Sml29623 int max_iteration = 0, max_index = 0; 10343859Sml29623 10353859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 10363859Sml29623 10373859Sml29623 ring_info = rbrp->ring_info; 10383859Sml29623 ring_info->hint[0] = NO_HINT; 10393859Sml29623 ring_info->hint[1] = NO_HINT; 10403859Sml29623 ring_info->hint[2] = NO_HINT; 10413859Sml29623 max_index = rbrp->num_blocks; 10423859Sml29623 10433859Sml29623 /* read the DVMA address information and sort it */ 10443859Sml29623 /* do init of the information array */ 10453859Sml29623 10463859Sml29623 10473859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10486929Smisaki " nxge_rxbuf_index_info_init Sort ptrs")); 10493859Sml29623 10503859Sml29623 /* sort the array */ 10513859Sml29623 nxge_ksort((void *)ring_info->buffer, max_index, 10526929Smisaki sizeof (rxbuf_index_info_t), nxge_sort_compare); 10533859Sml29623 10543859Sml29623 10553859Sml29623 10563859Sml29623 for (index = 0; index < max_index; index++) { 10573859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10586929Smisaki " nxge_rxbuf_index_info_init: sorted chunk %d " 10596929Smisaki " ioaddr $%p kaddr $%p size %x", 10606929Smisaki index, ring_info->buffer[index].dvma_addr, 10616929Smisaki ring_info->buffer[index].kaddr, 10626929Smisaki ring_info->buffer[index].buf_size)); 10633859Sml29623 } 10643859Sml29623 10653859Sml29623 max_iteration = 0; 10663859Sml29623 while (max_index >= (1ULL << max_iteration)) 10673859Sml29623 max_iteration++; 10683859Sml29623 ring_info->max_iterations = max_iteration + 1; 10693859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10706929Smisaki " nxge_rxbuf_index_info_init Find max iter %d", 10716929Smisaki ring_info->max_iterations)); 10723859Sml29623 10733859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 10743859Sml29623 return (NXGE_OK); 10753859Sml29623 } 10763859Sml29623 10773859Sml29623 /* ARGSUSED */ 10783859Sml29623 void 10793859Sml29623 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 10803859Sml29623 { 10813859Sml29623 #ifdef NXGE_DEBUG 10823859Sml29623 10833859Sml29623 uint32_t bptr; 10843859Sml29623 uint64_t pp; 10853859Sml29623 10863859Sml29623 bptr = entry_p->bits.hdw.pkt_buf_addr; 10873859Sml29623 10883859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 10896929Smisaki "\trcr entry $%p " 10906929Smisaki "\trcr entry 0x%0llx " 10916929Smisaki "\trcr entry 0x%08x " 10926929Smisaki "\trcr entry 0x%08x " 10936929Smisaki "\tvalue 0x%0llx\n" 10946929Smisaki "\tmulti = %d\n" 10956929Smisaki "\tpkt_type = 0x%x\n" 10966929Smisaki "\tzero_copy = %d\n" 10976929Smisaki "\tnoport = %d\n" 10986929Smisaki "\tpromis = %d\n" 10996929Smisaki "\terror = 0x%04x\n" 11006929Smisaki "\tdcf_err = 0x%01x\n" 11016929Smisaki "\tl2_len = %d\n" 11026929Smisaki "\tpktbufsize = %d\n" 11036929Smisaki "\tpkt_buf_addr = $%p\n" 11046929Smisaki "\tpkt_buf_addr (<< 6) = $%p\n", 11056929Smisaki entry_p, 11066929Smisaki *(int64_t *)entry_p, 11076929Smisaki *(int32_t *)entry_p, 11086929Smisaki *(int32_t *)((char *)entry_p + 32), 11096929Smisaki entry_p->value, 11106929Smisaki entry_p->bits.hdw.multi, 11116929Smisaki entry_p->bits.hdw.pkt_type, 11126929Smisaki entry_p->bits.hdw.zero_copy, 11136929Smisaki entry_p->bits.hdw.noport, 11146929Smisaki entry_p->bits.hdw.promis, 11156929Smisaki entry_p->bits.hdw.error, 11166929Smisaki entry_p->bits.hdw.dcf_err, 11176929Smisaki entry_p->bits.hdw.l2_len, 11186929Smisaki entry_p->bits.hdw.pktbufsz, 11196929Smisaki bptr, 11206929Smisaki entry_p->bits.ldw.pkt_buf_addr)); 11213859Sml29623 11223859Sml29623 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 11236929Smisaki RCR_PKT_BUF_ADDR_SHIFT; 11243859Sml29623 11253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 11266929Smisaki pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 11273859Sml29623 #endif 11283859Sml29623 } 11293859Sml29623 11303859Sml29623 void 11313859Sml29623 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 11323859Sml29623 { 11333859Sml29623 npi_handle_t handle; 11343859Sml29623 rbr_stat_t rbr_stat; 11353859Sml29623 addr44_t hd_addr; 11363859Sml29623 addr44_t tail_addr; 11373859Sml29623 uint16_t qlen; 11383859Sml29623 11393859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11406929Smisaki "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 11413859Sml29623 11423859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 11433859Sml29623 11443859Sml29623 /* RBR head */ 11453859Sml29623 hd_addr.addr = 0; 11463859Sml29623 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 11475165Syc148097 #if defined(__i386) 11483859Sml29623 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 11496929Smisaki (void *)(uint32_t)hd_addr.addr); 11505125Sjoycey #else 11515165Syc148097 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 11526929Smisaki (void *)hd_addr.addr); 11535125Sjoycey #endif 11543859Sml29623 11553859Sml29623 /* RBR stats */ 11563859Sml29623 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 11573859Sml29623 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 11583859Sml29623 11593859Sml29623 /* RCR tail */ 11603859Sml29623 tail_addr.addr = 0; 11613859Sml29623 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 11625165Syc148097 #if defined(__i386) 11633859Sml29623 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 11646929Smisaki (void *)(uint32_t)tail_addr.addr); 11655125Sjoycey #else 11665165Syc148097 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 11676929Smisaki (void *)tail_addr.addr); 11685125Sjoycey #endif 11693859Sml29623 11703859Sml29623 /* RCR qlen */ 11713859Sml29623 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 11723859Sml29623 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 11733859Sml29623 11743859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11756929Smisaki "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 11763859Sml29623 } 11773859Sml29623 11783859Sml29623 void 11793859Sml29623 nxge_rxdma_stop(p_nxge_t nxgep) 11803859Sml29623 { 11813859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop")); 11823859Sml29623 11833859Sml29623 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 11843859Sml29623 (void) nxge_rx_mac_disable(nxgep); 11853859Sml29623 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 11863859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop")); 11873859Sml29623 } 11883859Sml29623 11893859Sml29623 void 11903859Sml29623 nxge_rxdma_stop_reinit(p_nxge_t nxgep) 11913859Sml29623 { 11923859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit")); 11933859Sml29623 11943859Sml29623 (void) nxge_rxdma_stop(nxgep); 11953859Sml29623 (void) nxge_uninit_rxdma_channels(nxgep); 11963859Sml29623 (void) nxge_init_rxdma_channels(nxgep); 11973859Sml29623 11983859Sml29623 #ifndef AXIS_DEBUG_LB 11993859Sml29623 (void) nxge_xcvr_init(nxgep); 12003859Sml29623 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 12013859Sml29623 #endif 12023859Sml29623 (void) nxge_rx_mac_enable(nxgep); 12033859Sml29623 12043859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit")); 12053859Sml29623 } 12063859Sml29623 12073859Sml29623 nxge_status_t 12083859Sml29623 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 12093859Sml29623 { 12106495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 12116495Sspeer nxge_status_t status; 12126495Sspeer npi_status_t rs; 12136495Sspeer int rdc; 12143859Sml29623 12153859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12166929Smisaki "==> nxge_rxdma_hw_mode: mode %d", enable)); 12173859Sml29623 12183859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 12193859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12206495Sspeer "<== nxge_rxdma_mode: not initialized")); 12213859Sml29623 return (NXGE_ERROR); 12223859Sml29623 } 12236495Sspeer 12246495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 12256495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 12266495Sspeer "<== nxge_tx_port_fatal_err_recover: " 12276495Sspeer "NULL ring pointer(s)")); 12283859Sml29623 return (NXGE_ERROR); 12293859Sml29623 } 12303859Sml29623 12316495Sspeer if (set->owned.map == 0) { 12326495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 12336495Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 12346495Sspeer return (NULL); 12356495Sspeer } 12366495Sspeer 12376495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 12386495Sspeer if ((1 << rdc) & set->owned.map) { 12396495Sspeer rx_rbr_ring_t *ring = 12406495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 12416495Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 12426495Sspeer if (ring) { 12436495Sspeer if (enable) { 12446495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12456495Sspeer "==> nxge_rxdma_hw_mode: " 12466495Sspeer "channel %d (enable)", rdc)); 12476495Sspeer rs = npi_rxdma_cfg_rdc_enable 12486495Sspeer (handle, rdc); 12496495Sspeer } else { 12506495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12516495Sspeer "==> nxge_rxdma_hw_mode: " 12526495Sspeer "channel %d disable)", rdc)); 12536495Sspeer rs = npi_rxdma_cfg_rdc_disable 12546495Sspeer (handle, rdc); 12556495Sspeer } 12566495Sspeer } 12573859Sml29623 } 12583859Sml29623 } 12593859Sml29623 12603859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 12613859Sml29623 12623859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12636929Smisaki "<== nxge_rxdma_hw_mode: status 0x%x", status)); 12643859Sml29623 12653859Sml29623 return (status); 12663859Sml29623 } 12673859Sml29623 12683859Sml29623 void 12693859Sml29623 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 12703859Sml29623 { 12713859Sml29623 npi_handle_t handle; 12723859Sml29623 12733859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 12746929Smisaki "==> nxge_rxdma_enable_channel: channel %d", channel)); 12753859Sml29623 12763859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 12773859Sml29623 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 12783859Sml29623 12793859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 12803859Sml29623 } 12813859Sml29623 12823859Sml29623 void 12833859Sml29623 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 12843859Sml29623 { 12853859Sml29623 npi_handle_t handle; 12863859Sml29623 12873859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 12886929Smisaki "==> nxge_rxdma_disable_channel: channel %d", channel)); 12893859Sml29623 12903859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 12913859Sml29623 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 12923859Sml29623 12933859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 12943859Sml29623 } 12953859Sml29623 12963859Sml29623 void 12973859Sml29623 nxge_hw_start_rx(p_nxge_t nxgep) 12983859Sml29623 { 12993859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 13003859Sml29623 13013859Sml29623 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 13023859Sml29623 (void) nxge_rx_mac_enable(nxgep); 13033859Sml29623 13043859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 13053859Sml29623 } 13063859Sml29623 13073859Sml29623 /*ARGSUSED*/ 13083859Sml29623 void 13093859Sml29623 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 13103859Sml29623 { 13116495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 13126495Sspeer int rdc; 13133859Sml29623 13143859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 13153859Sml29623 13166495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 13176495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 13186495Sspeer "<== nxge_tx_port_fatal_err_recover: " 13196495Sspeer "NULL ring pointer(s)")); 13203859Sml29623 return; 13213859Sml29623 } 13223859Sml29623 13236495Sspeer if (set->owned.map == 0) { 13243859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13256495Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 13263859Sml29623 return; 13273859Sml29623 } 13286495Sspeer 13296495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 13306495Sspeer if ((1 << rdc) & set->owned.map) { 13316495Sspeer rx_rbr_ring_t *ring = 13326495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 13336495Sspeer if (ring) { 13346495Sspeer nxge_rxdma_hw_stop(nxgep, rdc); 13356495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 13366929Smisaki "==> nxge_fixup_rxdma_rings: " 13376929Smisaki "channel %d ring $%px", 13386929Smisaki rdc, ring)); 13396495Sspeer (void) nxge_rxdma_fixup_channel 13406495Sspeer (nxgep, rdc, rdc); 13416495Sspeer } 13426495Sspeer } 13433859Sml29623 } 13443859Sml29623 13453859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 13463859Sml29623 } 13473859Sml29623 13483859Sml29623 void 13493859Sml29623 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 13503859Sml29623 { 13513859Sml29623 int i; 13523859Sml29623 13533859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 13543859Sml29623 i = nxge_rxdma_get_ring_index(nxgep, channel); 13553859Sml29623 if (i < 0) { 13563859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13576929Smisaki "<== nxge_rxdma_fix_channel: no entry found")); 13583859Sml29623 return; 13593859Sml29623 } 13603859Sml29623 13613859Sml29623 nxge_rxdma_fixup_channel(nxgep, channel, i); 13623859Sml29623 13636495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 13643859Sml29623 } 13653859Sml29623 13663859Sml29623 void 13673859Sml29623 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 13683859Sml29623 { 13693859Sml29623 int ndmas; 13703859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 13713859Sml29623 p_rx_rbr_ring_t *rbr_rings; 13723859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 13733859Sml29623 p_rx_rcr_ring_t *rcr_rings; 13743859Sml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 13753859Sml29623 p_rx_mbox_t *rx_mbox_p; 13763859Sml29623 p_nxge_dma_pool_t dma_buf_poolp; 13773859Sml29623 p_nxge_dma_pool_t dma_cntl_poolp; 13783859Sml29623 p_rx_rbr_ring_t rbrp; 13793859Sml29623 p_rx_rcr_ring_t rcrp; 13803859Sml29623 p_rx_mbox_t mboxp; 13813859Sml29623 p_nxge_dma_common_t dmap; 13823859Sml29623 nxge_status_t status = NXGE_OK; 13833859Sml29623 13843859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 13853859Sml29623 13863859Sml29623 (void) nxge_rxdma_stop_channel(nxgep, channel); 13873859Sml29623 13883859Sml29623 dma_buf_poolp = nxgep->rx_buf_pool_p; 13893859Sml29623 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 13903859Sml29623 13913859Sml29623 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 13923859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 13936929Smisaki "<== nxge_rxdma_fixup_channel: buf not allocated")); 13943859Sml29623 return; 13953859Sml29623 } 13963859Sml29623 13973859Sml29623 ndmas = dma_buf_poolp->ndmas; 13983859Sml29623 if (!ndmas) { 13993859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 14006929Smisaki "<== nxge_rxdma_fixup_channel: no dma allocated")); 14013859Sml29623 return; 14023859Sml29623 } 14033859Sml29623 14043859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 14053859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 14063859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 14073859Sml29623 rcr_rings = rx_rcr_rings->rcr_rings; 14083859Sml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 14093859Sml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 14103859Sml29623 14113859Sml29623 /* Reinitialize the receive block and completion rings */ 14123859Sml29623 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 14136929Smisaki rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 14146929Smisaki mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 14153859Sml29623 14163859Sml29623 14173859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 14183859Sml29623 rbrp->rbr_rd_index = 0; 14193859Sml29623 rcrp->comp_rd_index = 0; 14203859Sml29623 rcrp->comp_wt_index = 0; 14213859Sml29623 14223859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 14233859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 14243859Sml29623 14253859Sml29623 status = nxge_rxdma_start_channel(nxgep, channel, 14266929Smisaki rbrp, rcrp, mboxp); 14273859Sml29623 if (status != NXGE_OK) { 14283859Sml29623 goto nxge_rxdma_fixup_channel_fail; 14293859Sml29623 } 14303859Sml29623 if (status != NXGE_OK) { 14313859Sml29623 goto nxge_rxdma_fixup_channel_fail; 14323859Sml29623 } 14333859Sml29623 14343859Sml29623 nxge_rxdma_fixup_channel_fail: 14353859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14366929Smisaki "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 14373859Sml29623 14383859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 14393859Sml29623 } 14403859Sml29623 14416495Sspeer /* ARGSUSED */ 14423859Sml29623 int 14433859Sml29623 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 14443859Sml29623 { 14456495Sspeer return (channel); 14463859Sml29623 } 14473859Sml29623 14483859Sml29623 p_rx_rbr_ring_t 14493859Sml29623 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 14503859Sml29623 { 14516495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 14526495Sspeer nxge_channel_t rdc; 14533859Sml29623 14543859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14556929Smisaki "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 14563859Sml29623 14576495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 14586495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 14596495Sspeer "<== nxge_rxdma_get_rbr_ring: " 14606495Sspeer "NULL ring pointer(s)")); 14613859Sml29623 return (NULL); 14623859Sml29623 } 14636495Sspeer 14646495Sspeer if (set->owned.map == 0) { 14653859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14666495Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 14673859Sml29623 return (NULL); 14683859Sml29623 } 14693859Sml29623 14706495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 14716495Sspeer if ((1 << rdc) & set->owned.map) { 14726495Sspeer rx_rbr_ring_t *ring = 14736495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 14746495Sspeer if (ring) { 14756495Sspeer if (channel == ring->rdc) { 14766495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 14776495Sspeer "==> nxge_rxdma_get_rbr_ring: " 14786495Sspeer "channel %d ring $%p", rdc, ring)); 14796495Sspeer return (ring); 14806495Sspeer } 14816495Sspeer } 14823859Sml29623 } 14833859Sml29623 } 14843859Sml29623 14853859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14866929Smisaki "<== nxge_rxdma_get_rbr_ring: not found")); 14873859Sml29623 14883859Sml29623 return (NULL); 14893859Sml29623 } 14903859Sml29623 14913859Sml29623 p_rx_rcr_ring_t 14923859Sml29623 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 14933859Sml29623 { 14946495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 14956495Sspeer nxge_channel_t rdc; 14963859Sml29623 14973859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14986929Smisaki "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 14993859Sml29623 15006495Sspeer if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 15016495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 15026495Sspeer "<== nxge_rxdma_get_rcr_ring: " 15036495Sspeer "NULL ring pointer(s)")); 15043859Sml29623 return (NULL); 15053859Sml29623 } 15066495Sspeer 15076495Sspeer if (set->owned.map == 0) { 15083859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15096495Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 15103859Sml29623 return (NULL); 15113859Sml29623 } 15123859Sml29623 15136495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 15146495Sspeer if ((1 << rdc) & set->owned.map) { 15156495Sspeer rx_rcr_ring_t *ring = 15166495Sspeer nxgep->rx_rcr_rings->rcr_rings[rdc]; 15176495Sspeer if (ring) { 15186495Sspeer if (channel == ring->rdc) { 15196495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 15206495Sspeer "==> nxge_rxdma_get_rcr_ring: " 15216495Sspeer "channel %d ring $%p", rdc, ring)); 15226495Sspeer return (ring); 15236495Sspeer } 15246495Sspeer } 15253859Sml29623 } 15263859Sml29623 } 15273859Sml29623 15283859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15296929Smisaki "<== nxge_rxdma_get_rcr_ring: not found")); 15303859Sml29623 15313859Sml29623 return (NULL); 15323859Sml29623 } 15333859Sml29623 15343859Sml29623 /* 15353859Sml29623 * Static functions start here. 15363859Sml29623 */ 15373859Sml29623 static p_rx_msg_t 15383859Sml29623 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 15393859Sml29623 { 15403859Sml29623 p_rx_msg_t nxge_mp = NULL; 15413859Sml29623 p_nxge_dma_common_t dmamsg_p; 15423859Sml29623 uchar_t *buffer; 15433859Sml29623 15443859Sml29623 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 15453859Sml29623 if (nxge_mp == NULL) { 15464185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 15476929Smisaki "Allocation of a rx msg failed.")); 15483859Sml29623 goto nxge_allocb_exit; 15493859Sml29623 } 15503859Sml29623 15513859Sml29623 nxge_mp->use_buf_pool = B_FALSE; 15523859Sml29623 if (dmabuf_p) { 15533859Sml29623 nxge_mp->use_buf_pool = B_TRUE; 15543859Sml29623 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 15553859Sml29623 *dmamsg_p = *dmabuf_p; 15563859Sml29623 dmamsg_p->nblocks = 1; 15573859Sml29623 dmamsg_p->block_size = size; 15583859Sml29623 dmamsg_p->alength = size; 15593859Sml29623 buffer = (uchar_t *)dmabuf_p->kaddrp; 15603859Sml29623 15613859Sml29623 dmabuf_p->kaddrp = (void *) 15626929Smisaki ((char *)dmabuf_p->kaddrp + size); 15633859Sml29623 dmabuf_p->ioaddr_pp = (void *) 15646929Smisaki ((char *)dmabuf_p->ioaddr_pp + size); 15653859Sml29623 dmabuf_p->alength -= size; 15663859Sml29623 dmabuf_p->offset += size; 15673859Sml29623 dmabuf_p->dma_cookie.dmac_laddress += size; 15683859Sml29623 dmabuf_p->dma_cookie.dmac_size -= size; 15693859Sml29623 15703859Sml29623 } else { 15713859Sml29623 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 15723859Sml29623 if (buffer == NULL) { 15734185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 15746929Smisaki "Allocation of a receive page failed.")); 15753859Sml29623 goto nxge_allocb_fail1; 15763859Sml29623 } 15773859Sml29623 } 15783859Sml29623 15793859Sml29623 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 15803859Sml29623 if (nxge_mp->rx_mblk_p == NULL) { 15814185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 15823859Sml29623 goto nxge_allocb_fail2; 15833859Sml29623 } 15843859Sml29623 15853859Sml29623 nxge_mp->buffer = buffer; 15863859Sml29623 nxge_mp->block_size = size; 15873859Sml29623 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 15883859Sml29623 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 15893859Sml29623 nxge_mp->ref_cnt = 1; 15903859Sml29623 nxge_mp->free = B_TRUE; 15913859Sml29623 nxge_mp->rx_use_bcopy = B_FALSE; 15923859Sml29623 15933859Sml29623 atomic_inc_32(&nxge_mblks_pending); 15943859Sml29623 15953859Sml29623 goto nxge_allocb_exit; 15963859Sml29623 15973859Sml29623 nxge_allocb_fail2: 15983859Sml29623 if (!nxge_mp->use_buf_pool) { 15993859Sml29623 KMEM_FREE(buffer, size); 16003859Sml29623 } 16013859Sml29623 16023859Sml29623 nxge_allocb_fail1: 16033859Sml29623 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 16043859Sml29623 nxge_mp = NULL; 16053859Sml29623 16063859Sml29623 nxge_allocb_exit: 16073859Sml29623 return (nxge_mp); 16083859Sml29623 } 16093859Sml29623 16103859Sml29623 p_mblk_t 16113859Sml29623 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 16123859Sml29623 { 16133859Sml29623 p_mblk_t mp; 16143859Sml29623 16153859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 16163859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 16176929Smisaki "offset = 0x%08X " 16186929Smisaki "size = 0x%08X", 16196929Smisaki nxge_mp, offset, size)); 16203859Sml29623 16213859Sml29623 mp = desballoc(&nxge_mp->buffer[offset], size, 16226929Smisaki 0, &nxge_mp->freeb); 16233859Sml29623 if (mp == NULL) { 16243859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 16253859Sml29623 goto nxge_dupb_exit; 16263859Sml29623 } 16273859Sml29623 atomic_inc_32(&nxge_mp->ref_cnt); 16283859Sml29623 16293859Sml29623 16303859Sml29623 nxge_dupb_exit: 16313859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 16326929Smisaki nxge_mp)); 16333859Sml29623 return (mp); 16343859Sml29623 } 16353859Sml29623 16363859Sml29623 p_mblk_t 16373859Sml29623 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 16383859Sml29623 { 16393859Sml29623 p_mblk_t mp; 16403859Sml29623 uchar_t *dp; 16413859Sml29623 16423859Sml29623 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 16433859Sml29623 if (mp == NULL) { 16443859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 16453859Sml29623 goto nxge_dupb_bcopy_exit; 16463859Sml29623 } 16473859Sml29623 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 16483859Sml29623 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 16493859Sml29623 mp->b_wptr = dp + size; 16503859Sml29623 16513859Sml29623 nxge_dupb_bcopy_exit: 16523859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 16536929Smisaki nxge_mp)); 16543859Sml29623 return (mp); 16553859Sml29623 } 16563859Sml29623 16573859Sml29623 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 16583859Sml29623 p_rx_msg_t rx_msg_p); 16593859Sml29623 16603859Sml29623 void 16613859Sml29623 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 16623859Sml29623 { 16633859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 16643859Sml29623 16653859Sml29623 /* Reuse this buffer */ 16663859Sml29623 rx_msg_p->free = B_FALSE; 16673859Sml29623 rx_msg_p->cur_usage_cnt = 0; 16683859Sml29623 rx_msg_p->max_usage_cnt = 0; 16693859Sml29623 rx_msg_p->pkt_buf_size = 0; 16703859Sml29623 16713859Sml29623 if (rx_rbr_p->rbr_use_bcopy) { 16723859Sml29623 rx_msg_p->rx_use_bcopy = B_FALSE; 16733859Sml29623 atomic_dec_32(&rx_rbr_p->rbr_consumed); 16743859Sml29623 } 16753859Sml29623 16763859Sml29623 /* 16773859Sml29623 * Get the rbr header pointer and its offset index. 16783859Sml29623 */ 16793859Sml29623 MUTEX_ENTER(&rx_rbr_p->post_lock); 16803859Sml29623 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 16816929Smisaki rx_rbr_p->rbr_wrap_mask); 16823859Sml29623 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 16833859Sml29623 MUTEX_EXIT(&rx_rbr_p->post_lock); 16845770Sml29623 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 16855770Sml29623 rx_rbr_p->rdc, 1); 16863859Sml29623 16873859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 16886929Smisaki "<== nxge_post_page (channel %d post_next_index %d)", 16896929Smisaki rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 16903859Sml29623 16913859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 16923859Sml29623 } 16933859Sml29623 16943859Sml29623 void 16953859Sml29623 nxge_freeb(p_rx_msg_t rx_msg_p) 16963859Sml29623 { 16973859Sml29623 size_t size; 16983859Sml29623 uchar_t *buffer = NULL; 16993859Sml29623 int ref_cnt; 17004874Sml29623 boolean_t free_state = B_FALSE; 17013859Sml29623 17025170Stm144005 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 17035170Stm144005 17043859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 17053859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 17066929Smisaki "nxge_freeb:rx_msg_p = $%p (block pending %d)", 17076929Smisaki rx_msg_p, nxge_mblks_pending)); 17083859Sml29623 17094874Sml29623 /* 17104874Sml29623 * First we need to get the free state, then 17114874Sml29623 * atomic decrement the reference count to prevent 17124874Sml29623 * the race condition with the interrupt thread that 17134874Sml29623 * is processing a loaned up buffer block. 17144874Sml29623 */ 17154874Sml29623 free_state = rx_msg_p->free; 17163859Sml29623 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 17173859Sml29623 if (!ref_cnt) { 17185770Sml29623 atomic_dec_32(&nxge_mblks_pending); 17193859Sml29623 buffer = rx_msg_p->buffer; 17203859Sml29623 size = rx_msg_p->block_size; 17213859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 17226929Smisaki "will free: rx_msg_p = $%p (block pending %d)", 17236929Smisaki rx_msg_p, nxge_mblks_pending)); 17243859Sml29623 17253859Sml29623 if (!rx_msg_p->use_buf_pool) { 17263859Sml29623 KMEM_FREE(buffer, size); 17273859Sml29623 } 17283859Sml29623 17293859Sml29623 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 17305170Stm144005 17315759Smisaki if (ring) { 17325759Smisaki /* 17335759Smisaki * Decrement the receive buffer ring's reference 17345759Smisaki * count, too. 17355759Smisaki */ 17365759Smisaki atomic_dec_32(&ring->rbr_ref_cnt); 17375759Smisaki 17385759Smisaki /* 17396495Sspeer * Free the receive buffer ring, if 17405759Smisaki * 1. all the receive buffers have been freed 17415759Smisaki * 2. and we are in the proper state (that is, 17425759Smisaki * we are not UNMAPPING). 17435759Smisaki */ 17445759Smisaki if (ring->rbr_ref_cnt == 0 && 17455759Smisaki ring->rbr_state == RBR_UNMAPPED) { 17466495Sspeer /* 17476495Sspeer * Free receive data buffers, 17486495Sspeer * buffer index information 17496495Sspeer * (rxring_info) and 17506495Sspeer * the message block ring. 17516495Sspeer */ 17526495Sspeer NXGE_DEBUG_MSG((NULL, RX_CTL, 17536495Sspeer "nxge_freeb:rx_msg_p = $%p " 17546495Sspeer "(block pending %d) free buffers", 17556495Sspeer rx_msg_p, nxge_mblks_pending)); 17566495Sspeer nxge_rxdma_databuf_free(ring); 17576495Sspeer if (ring->ring_info) { 17586495Sspeer KMEM_FREE(ring->ring_info, 17596495Sspeer sizeof (rxring_info_t)); 17606495Sspeer } 17616495Sspeer 17626495Sspeer if (ring->rx_msg_ring) { 17636495Sspeer KMEM_FREE(ring->rx_msg_ring, 17646495Sspeer ring->tnblocks * 17656495Sspeer sizeof (p_rx_msg_t)); 17666495Sspeer } 17675759Smisaki KMEM_FREE(ring, sizeof (*ring)); 17685759Smisaki } 17695170Stm144005 } 17703859Sml29623 return; 17713859Sml29623 } 17723859Sml29623 17733859Sml29623 /* 17743859Sml29623 * Repost buffer. 17753859Sml29623 */ 17765759Smisaki if (free_state && (ref_cnt == 1) && ring) { 17773859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, 17783859Sml29623 "nxge_freeb: post page $%p:", rx_msg_p)); 17795170Stm144005 if (ring->rbr_state == RBR_POSTING) 17805170Stm144005 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 17813859Sml29623 } 17823859Sml29623 17833859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 17843859Sml29623 } 17853859Sml29623 17863859Sml29623 uint_t 17873859Sml29623 nxge_rx_intr(void *arg1, void *arg2) 17883859Sml29623 { 17893859Sml29623 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 17903859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg2; 17913859Sml29623 p_nxge_ldg_t ldgp; 17923859Sml29623 uint8_t channel; 17933859Sml29623 npi_handle_t handle; 17943859Sml29623 rx_dma_ctl_stat_t cs; 17953859Sml29623 17963859Sml29623 #ifdef NXGE_DEBUG 17973859Sml29623 rxdma_cfig1_t cfg; 17983859Sml29623 #endif 17993859Sml29623 uint_t serviced = DDI_INTR_UNCLAIMED; 18003859Sml29623 18013859Sml29623 if (ldvp == NULL) { 18023859Sml29623 NXGE_DEBUG_MSG((NULL, INT_CTL, 18036929Smisaki "<== nxge_rx_intr: arg2 $%p arg1 $%p", 18046929Smisaki nxgep, ldvp)); 18053859Sml29623 18063859Sml29623 return (DDI_INTR_CLAIMED); 18073859Sml29623 } 18083859Sml29623 18093859Sml29623 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 18103859Sml29623 nxgep = ldvp->nxgep; 18113859Sml29623 } 18126602Sspeer 18136602Sspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 18146602Sspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 18156602Sspeer NXGE_DEBUG_MSG((nxgep, INT_CTL, 18166602Sspeer "<== nxge_rx_intr: interface not started or intialized")); 18176602Sspeer return (DDI_INTR_CLAIMED); 18186602Sspeer } 18196602Sspeer 18203859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 18216929Smisaki "==> nxge_rx_intr: arg2 $%p arg1 $%p", 18226929Smisaki nxgep, ldvp)); 18233859Sml29623 18243859Sml29623 /* 18253859Sml29623 * This interrupt handler is for a specific 18263859Sml29623 * receive dma channel. 18273859Sml29623 */ 18283859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 18293859Sml29623 /* 18303859Sml29623 * Get the control and status for this channel. 18313859Sml29623 */ 18323859Sml29623 channel = ldvp->channel; 18333859Sml29623 ldgp = ldvp->ldgp; 18343859Sml29623 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 18353859Sml29623 18363859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 18376929Smisaki "cs 0x%016llx rcrto 0x%x rcrthres %x", 18386929Smisaki channel, 18396929Smisaki cs.value, 18406929Smisaki cs.bits.hdw.rcrto, 18416929Smisaki cs.bits.hdw.rcrthres)); 18423859Sml29623 18436495Sspeer nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 18443859Sml29623 serviced = DDI_INTR_CLAIMED; 18453859Sml29623 18463859Sml29623 /* error events. */ 18473859Sml29623 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 18486495Sspeer (void) nxge_rx_err_evnts(nxgep, channel, cs); 18493859Sml29623 } 18503859Sml29623 18513859Sml29623 nxge_intr_exit: 18523859Sml29623 /* 18533859Sml29623 * Enable the mailbox update interrupt if we want 18543859Sml29623 * to use mailbox. We probably don't need to use 18553859Sml29623 * mailbox as it only saves us one pio read. 18563859Sml29623 * Also write 1 to rcrthres and rcrto to clear 18573859Sml29623 * these two edge triggered bits. 18583859Sml29623 */ 18593859Sml29623 18603859Sml29623 cs.value &= RX_DMA_CTL_STAT_WR1C; 18613859Sml29623 cs.bits.hdw.mex = 1; 18623859Sml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 18636929Smisaki cs.value); 18643859Sml29623 18653859Sml29623 /* 18663859Sml29623 * Rearm this logical group if this is a single device 18673859Sml29623 * group. 18683859Sml29623 */ 18693859Sml29623 if (ldgp->nldvs == 1) { 18703859Sml29623 ldgimgm_t mgm; 18713859Sml29623 mgm.value = 0; 18723859Sml29623 mgm.bits.ldw.arm = 1; 18733859Sml29623 mgm.bits.ldw.timer = ldgp->ldg_timer; 18746495Sspeer if (isLDOMguest(nxgep)) { 18756495Sspeer nxge_hio_ldgimgn(nxgep, ldgp); 18766495Sspeer } else { 18776495Sspeer NXGE_REG_WR64(handle, 18783859Sml29623 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 18793859Sml29623 mgm.value); 18806495Sspeer } 18813859Sml29623 } 18823859Sml29623 18833859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d", 18846929Smisaki serviced)); 18853859Sml29623 return (serviced); 18863859Sml29623 } 18873859Sml29623 18883859Sml29623 /* 18893859Sml29623 * Process the packets received in the specified logical device 18903859Sml29623 * and pass up a chain of message blocks to the upper layer. 18913859Sml29623 */ 18923859Sml29623 static void 18936495Sspeer nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 18943859Sml29623 { 18953859Sml29623 p_mblk_t mp; 18963859Sml29623 p_rx_rcr_ring_t rcrp; 18973859Sml29623 18983859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 18996495Sspeer rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 19006495Sspeer if (rcrp->poll_flag) { 19016495Sspeer /* It is in the poll mode */ 19026495Sspeer return; 19036495Sspeer } 19046495Sspeer 19056495Sspeer if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 19063859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 19076929Smisaki "<== nxge_rx_pkts_vring: no mp")); 19083859Sml29623 return; 19093859Sml29623 } 19103859Sml29623 19113859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 19126929Smisaki mp)); 19133859Sml29623 19143859Sml29623 #ifdef NXGE_DEBUG 19153859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 19166929Smisaki "==> nxge_rx_pkts_vring:calling mac_rx " 19176929Smisaki "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 19186929Smisaki "mac_handle $%p", 19196929Smisaki mp->b_wptr - mp->b_rptr, 19206929Smisaki mp, mp->b_cont, mp->b_next, 19216929Smisaki rcrp, rcrp->rcr_mac_handle)); 19223859Sml29623 19233859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 19246929Smisaki "==> nxge_rx_pkts_vring: dump packets " 19256929Smisaki "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 19266929Smisaki mp, 19276929Smisaki mp->b_rptr, 19286929Smisaki mp->b_wptr, 19296929Smisaki nxge_dump_packet((char *)mp->b_rptr, 19306929Smisaki mp->b_wptr - mp->b_rptr))); 19313859Sml29623 if (mp->b_cont) { 19323859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 19336929Smisaki "==> nxge_rx_pkts_vring: dump b_cont packets " 19346929Smisaki "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 19356929Smisaki mp->b_cont, 19366929Smisaki mp->b_cont->b_rptr, 19376929Smisaki mp->b_cont->b_wptr, 19386929Smisaki nxge_dump_packet((char *)mp->b_cont->b_rptr, 19396929Smisaki mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 19403859Sml29623 } 19413859Sml29623 if (mp->b_next) { 19423859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 19436929Smisaki "==> nxge_rx_pkts_vring: dump next packets " 19446929Smisaki "(b_rptr $%p): %s", 19456929Smisaki mp->b_next->b_rptr, 19466929Smisaki nxge_dump_packet((char *)mp->b_next->b_rptr, 19476929Smisaki mp->b_next->b_wptr - mp->b_next->b_rptr))); 19483859Sml29623 } 19493859Sml29623 #endif 19503859Sml29623 19516495Sspeer if (!isLDOMguest(nxgep)) 19526495Sspeer mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp); 19536495Sspeer #if defined(sun4v) 19546495Sspeer else { /* isLDOMguest(nxgep) */ 19556495Sspeer nxge_hio_data_t *nhd = (nxge_hio_data_t *) 19566495Sspeer nxgep->nxge_hw_p->hio; 19576495Sspeer nx_vio_fp_t *vio = &nhd->hio.vio; 19586495Sspeer 19596495Sspeer if (vio->cb.vio_net_rx_cb) { 19606495Sspeer (*vio->cb.vio_net_rx_cb) 19616495Sspeer (nxgep->hio_vr->vhp, mp); 19626495Sspeer } 19636495Sspeer } 19646495Sspeer #endif 19653859Sml29623 } 19663859Sml29623 19673859Sml29623 19683859Sml29623 /* 19693859Sml29623 * This routine is the main packet receive processing function. 19703859Sml29623 * It gets the packet type, error code, and buffer related 19713859Sml29623 * information from the receive completion entry. 19723859Sml29623 * How many completion entries to process is based on the number of packets 19733859Sml29623 * queued by the hardware, a hardware maintained tail pointer 19743859Sml29623 * and a configurable receive packet count. 19753859Sml29623 * 19763859Sml29623 * A chain of message blocks will be created as result of processing 19773859Sml29623 * the completion entries. This chain of message blocks will be returned and 19783859Sml29623 * a hardware control status register will be updated with the number of 19793859Sml29623 * packets were removed from the hardware queue. 19803859Sml29623 * 19813859Sml29623 */ 19826495Sspeer static mblk_t * 19836495Sspeer nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 19846495Sspeer int bytes_to_pickup) 19853859Sml29623 { 19863859Sml29623 npi_handle_t handle; 19873859Sml29623 uint8_t channel; 19883859Sml29623 uint32_t comp_rd_index; 19893859Sml29623 p_rcr_entry_t rcr_desc_rd_head_p; 19903859Sml29623 p_rcr_entry_t rcr_desc_rd_head_pp; 19913859Sml29623 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 19923859Sml29623 uint16_t qlen, nrcr_read, npkt_read; 19936495Sspeer uint32_t qlen_hw; 19943859Sml29623 boolean_t multi; 19956495Sspeer rcrcfig_b_t rcr_cfg_b; 19966495Sspeer int totallen = 0; 19973859Sml29623 #if defined(_BIG_ENDIAN) 19983859Sml29623 npi_status_t rs = NPI_SUCCESS; 19993859Sml29623 #endif 20003859Sml29623 20016495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 20026929Smisaki "channel %d", rcr_p->rdc)); 20033859Sml29623 20043859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 20053859Sml29623 return (NULL); 20063859Sml29623 } 20073859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 20083859Sml29623 channel = rcr_p->rdc; 20093859Sml29623 20103859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20116929Smisaki "==> nxge_rx_pkts: START: rcr channel %d " 20126929Smisaki "head_p $%p head_pp $%p index %d ", 20136929Smisaki channel, rcr_p->rcr_desc_rd_head_p, 20146929Smisaki rcr_p->rcr_desc_rd_head_pp, 20156929Smisaki rcr_p->comp_rd_index)); 20163859Sml29623 20173859Sml29623 20183859Sml29623 #if !defined(_BIG_ENDIAN) 20193859Sml29623 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 20203859Sml29623 #else 20213859Sml29623 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 20223859Sml29623 if (rs != NPI_SUCCESS) { 20236495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 20243859Sml29623 "channel %d, get qlen failed 0x%08x", 20256929Smisaki channel, rs)); 20263859Sml29623 return (NULL); 20273859Sml29623 } 20283859Sml29623 #endif 20293859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 20306929Smisaki "qlen %d", channel, qlen)); 20313859Sml29623 20323859Sml29623 20333859Sml29623 20343859Sml29623 if (!qlen) { 20353859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20366929Smisaki "==> nxge_rx_pkts:rcr channel %d " 20376929Smisaki "qlen %d (no pkts)", channel, qlen)); 20383859Sml29623 20393859Sml29623 return (NULL); 20403859Sml29623 } 20413859Sml29623 20423859Sml29623 comp_rd_index = rcr_p->comp_rd_index; 20433859Sml29623 20443859Sml29623 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 20453859Sml29623 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 20463859Sml29623 nrcr_read = npkt_read = 0; 20473859Sml29623 20483859Sml29623 /* 20493859Sml29623 * Number of packets queued 20503859Sml29623 * (The jumbo or multi packet will be counted as only one 20513859Sml29623 * packets and it may take up more than one completion entry). 20523859Sml29623 */ 20533859Sml29623 qlen_hw = (qlen < nxge_max_rx_pkts) ? 20546929Smisaki qlen : nxge_max_rx_pkts; 20553859Sml29623 head_mp = NULL; 20563859Sml29623 tail_mp = &head_mp; 20573859Sml29623 nmp = mp_cont = NULL; 20583859Sml29623 multi = B_FALSE; 20593859Sml29623 20603859Sml29623 while (qlen_hw) { 20613859Sml29623 20623859Sml29623 #ifdef NXGE_DEBUG 20633859Sml29623 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 20643859Sml29623 #endif 20653859Sml29623 /* 20663859Sml29623 * Process one completion ring entry. 20673859Sml29623 */ 20683859Sml29623 nxge_receive_packet(nxgep, 20696929Smisaki rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 20703859Sml29623 20713859Sml29623 /* 20723859Sml29623 * message chaining modes 20733859Sml29623 */ 20743859Sml29623 if (nmp) { 20753859Sml29623 nmp->b_next = NULL; 20763859Sml29623 if (!multi && !mp_cont) { /* frame fits a partition */ 20773859Sml29623 *tail_mp = nmp; 20783859Sml29623 tail_mp = &nmp->b_next; 20796495Sspeer totallen += MBLKL(nmp); 20803859Sml29623 nmp = NULL; 20813859Sml29623 } else if (multi && !mp_cont) { /* first segment */ 20823859Sml29623 *tail_mp = nmp; 20833859Sml29623 tail_mp = &nmp->b_cont; 20846495Sspeer totallen += MBLKL(nmp); 20853859Sml29623 } else if (multi && mp_cont) { /* mid of multi segs */ 20863859Sml29623 *tail_mp = mp_cont; 20873859Sml29623 tail_mp = &mp_cont->b_cont; 20886495Sspeer totallen += MBLKL(mp_cont); 20893859Sml29623 } else if (!multi && mp_cont) { /* last segment */ 20903859Sml29623 *tail_mp = mp_cont; 20913859Sml29623 tail_mp = &nmp->b_next; 20926495Sspeer totallen += MBLKL(mp_cont); 20933859Sml29623 nmp = NULL; 20943859Sml29623 } 20953859Sml29623 } 20963859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20976929Smisaki "==> nxge_rx_pkts: loop: rcr channel %d " 20986929Smisaki "before updating: multi %d " 20996929Smisaki "nrcr_read %d " 21006929Smisaki "npk read %d " 21016929Smisaki "head_pp $%p index %d ", 21026929Smisaki channel, 21036929Smisaki multi, 21046929Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp, 21056929Smisaki comp_rd_index)); 21063859Sml29623 21073859Sml29623 if (!multi) { 21083859Sml29623 qlen_hw--; 21093859Sml29623 npkt_read++; 21103859Sml29623 } 21113859Sml29623 21123859Sml29623 /* 21133859Sml29623 * Update the next read entry. 21143859Sml29623 */ 21153859Sml29623 comp_rd_index = NEXT_ENTRY(comp_rd_index, 21166929Smisaki rcr_p->comp_wrap_mask); 21173859Sml29623 21183859Sml29623 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 21196929Smisaki rcr_p->rcr_desc_first_p, 21206929Smisaki rcr_p->rcr_desc_last_p); 21213859Sml29623 21223859Sml29623 nrcr_read++; 21233859Sml29623 21243859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 21256929Smisaki "<== nxge_rx_pkts: (SAM, process one packet) " 21266929Smisaki "nrcr_read %d", 21276929Smisaki nrcr_read)); 21283859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 21296929Smisaki "==> nxge_rx_pkts: loop: rcr channel %d " 21306929Smisaki "multi %d " 21316929Smisaki "nrcr_read %d " 21326929Smisaki "npk read %d " 21336929Smisaki "head_pp $%p index %d ", 21346929Smisaki channel, 21356929Smisaki multi, 21366929Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp, 21376929Smisaki comp_rd_index)); 21383859Sml29623 21396495Sspeer if ((bytes_to_pickup != -1) && 21406495Sspeer (totallen >= bytes_to_pickup)) { 21416495Sspeer break; 21426495Sspeer } 21433859Sml29623 } 21443859Sml29623 21453859Sml29623 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 21463859Sml29623 rcr_p->comp_rd_index = comp_rd_index; 21473859Sml29623 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 21483859Sml29623 21493859Sml29623 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 21506929Smisaki (nxgep->intr_threshold != rcr_p->intr_threshold)) { 21513859Sml29623 rcr_p->intr_timeout = nxgep->intr_timeout; 21523859Sml29623 rcr_p->intr_threshold = nxgep->intr_threshold; 21533859Sml29623 rcr_cfg_b.value = 0x0ULL; 21543859Sml29623 if (rcr_p->intr_timeout) 21553859Sml29623 rcr_cfg_b.bits.ldw.entout = 1; 21563859Sml29623 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 21573859Sml29623 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 21583859Sml29623 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 21596929Smisaki channel, rcr_cfg_b.value); 21603859Sml29623 } 21613859Sml29623 21623859Sml29623 cs.bits.ldw.pktread = npkt_read; 21633859Sml29623 cs.bits.ldw.ptrread = nrcr_read; 21643859Sml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 21656929Smisaki channel, cs.value); 21663859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 21676929Smisaki "==> nxge_rx_pkts: EXIT: rcr channel %d " 21686929Smisaki "head_pp $%p index %016llx ", 21696929Smisaki channel, 21706929Smisaki rcr_p->rcr_desc_rd_head_pp, 21716929Smisaki rcr_p->comp_rd_index)); 21723859Sml29623 /* 21733859Sml29623 * Update RCR buffer pointer read and number of packets 21743859Sml29623 * read. 21753859Sml29623 */ 21763859Sml29623 21773859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts")); 21783859Sml29623 return (head_mp); 21793859Sml29623 } 21803859Sml29623 21813859Sml29623 void 21823859Sml29623 nxge_receive_packet(p_nxge_t nxgep, 21833859Sml29623 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 21843859Sml29623 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 21853859Sml29623 { 21863859Sml29623 p_mblk_t nmp = NULL; 21873859Sml29623 uint64_t multi; 21883859Sml29623 uint64_t dcf_err; 21893859Sml29623 uint8_t channel; 21903859Sml29623 21913859Sml29623 boolean_t first_entry = B_TRUE; 21923859Sml29623 boolean_t is_tcp_udp = B_FALSE; 21933859Sml29623 boolean_t buffer_free = B_FALSE; 21943859Sml29623 boolean_t error_send_up = B_FALSE; 21953859Sml29623 uint8_t error_type; 21963859Sml29623 uint16_t l2_len; 21973859Sml29623 uint16_t skip_len; 21983859Sml29623 uint8_t pktbufsz_type; 21993859Sml29623 uint64_t rcr_entry; 22003859Sml29623 uint64_t *pkt_buf_addr_pp; 22013859Sml29623 uint64_t *pkt_buf_addr_p; 22023859Sml29623 uint32_t buf_offset; 22033859Sml29623 uint32_t bsize; 22043859Sml29623 uint32_t error_disp_cnt; 22053859Sml29623 uint32_t msg_index; 22063859Sml29623 p_rx_rbr_ring_t rx_rbr_p; 22073859Sml29623 p_rx_msg_t *rx_msg_ring_p; 22083859Sml29623 p_rx_msg_t rx_msg_p; 22093859Sml29623 uint16_t sw_offset_bytes = 0, hdr_size = 0; 22103859Sml29623 nxge_status_t status = NXGE_OK; 22113859Sml29623 boolean_t is_valid = B_FALSE; 22123859Sml29623 p_nxge_rx_ring_stats_t rdc_stats; 22133859Sml29623 uint32_t bytes_read; 22143859Sml29623 uint64_t pkt_type; 22153859Sml29623 uint64_t frag; 22166028Ssbehera boolean_t pkt_too_long_err = B_FALSE; 22173859Sml29623 #ifdef NXGE_DEBUG 22183859Sml29623 int dump_len; 22193859Sml29623 #endif 22203859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 22213859Sml29623 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 22223859Sml29623 22233859Sml29623 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 22243859Sml29623 22253859Sml29623 multi = (rcr_entry & RCR_MULTI_MASK); 22263859Sml29623 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 22273859Sml29623 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 22283859Sml29623 22293859Sml29623 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 22303859Sml29623 frag = (rcr_entry & RCR_FRAG_MASK); 22313859Sml29623 22323859Sml29623 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 22333859Sml29623 22343859Sml29623 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 22356929Smisaki RCR_PKTBUFSZ_SHIFT); 22365125Sjoycey #if defined(__i386) 22375125Sjoycey pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 22386929Smisaki RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 22395125Sjoycey #else 22403859Sml29623 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 22416929Smisaki RCR_PKT_BUF_ADDR_SHIFT); 22425125Sjoycey #endif 22433859Sml29623 22443859Sml29623 channel = rcr_p->rdc; 22453859Sml29623 22463859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 22476929Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 22486929Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 22496929Smisaki "error_type 0x%x pkt_type 0x%x " 22506929Smisaki "pktbufsz_type %d ", 22516929Smisaki rcr_desc_rd_head_p, 22526929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 22536929Smisaki multi, 22546929Smisaki error_type, 22556929Smisaki pkt_type, 22566929Smisaki pktbufsz_type)); 22573859Sml29623 22583859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 22596929Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 22606929Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 22616929Smisaki "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 22626929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 22636929Smisaki multi, 22646929Smisaki error_type, 22656929Smisaki pkt_type)); 22663859Sml29623 22673859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 22686929Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx " 22696929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 22706929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 22713859Sml29623 22723859Sml29623 /* get the stats ptr */ 22733859Sml29623 rdc_stats = rcr_p->rdc_stats; 22743859Sml29623 22753859Sml29623 if (!l2_len) { 22763859Sml29623 22773859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22786929Smisaki "<== nxge_receive_packet: failed: l2 length is 0.")); 22793859Sml29623 return; 22803859Sml29623 } 22813859Sml29623 22826028Ssbehera /* 22836028Ssbehera * Sofware workaround for BMAC hardware limitation that allows 22846028Ssbehera * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 22856028Ssbehera * instead of 0x2400 for jumbo. 22866028Ssbehera */ 22876028Ssbehera if (l2_len > nxgep->mac.maxframesize) { 22886028Ssbehera pkt_too_long_err = B_TRUE; 22896028Ssbehera } 22906028Ssbehera 22914185Sspeer /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 22924185Sspeer l2_len -= ETHERFCSL; 22934185Sspeer 22943859Sml29623 /* shift 6 bits to get the full io address */ 22955125Sjoycey #if defined(__i386) 22965125Sjoycey pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 22976929Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL); 22985125Sjoycey #else 22993859Sml29623 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 23006929Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL); 23015125Sjoycey #endif 23023859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23036929Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx " 23046929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 23056929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 23063859Sml29623 23073859Sml29623 rx_rbr_p = rcr_p->rx_rbr_p; 23083859Sml29623 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 23093859Sml29623 23103859Sml29623 if (first_entry) { 23113859Sml29623 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 23126929Smisaki RXDMA_HDR_SIZE_DEFAULT); 23133859Sml29623 23143859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 23156929Smisaki "==> nxge_receive_packet: first entry 0x%016llx " 23166929Smisaki "pkt_buf_addr_pp $%p l2_len %d hdr %d", 23176929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 23186929Smisaki hdr_size)); 23193859Sml29623 } 23203859Sml29623 23213859Sml29623 MUTEX_ENTER(&rcr_p->lock); 23223859Sml29623 MUTEX_ENTER(&rx_rbr_p->lock); 23233859Sml29623 23243859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 23256929Smisaki "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 23266929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 23276929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 23283859Sml29623 23293859Sml29623 /* 23303859Sml29623 * Packet buffer address in the completion entry points 23313859Sml29623 * to the starting buffer address (offset 0). 23323859Sml29623 * Use the starting buffer address to locate the corresponding 23333859Sml29623 * kernel address. 23343859Sml29623 */ 23353859Sml29623 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 23366929Smisaki pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 23376929Smisaki &buf_offset, 23386929Smisaki &msg_index); 23393859Sml29623 23403859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 23416929Smisaki "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 23426929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 23436929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 23443859Sml29623 23453859Sml29623 if (status != NXGE_OK) { 23463859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 23473859Sml29623 MUTEX_EXIT(&rcr_p->lock); 23483859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 23496929Smisaki "<== nxge_receive_packet: found vaddr failed %d", 23506929Smisaki status)); 23513859Sml29623 return; 23523859Sml29623 } 23533859Sml29623 23543859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23556929Smisaki "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 23566929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 23576929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 23583859Sml29623 23593859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23606929Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 23616929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 23626929Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 23633859Sml29623 23643859Sml29623 rx_msg_p = rx_msg_ring_p[msg_index]; 23653859Sml29623 23663859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23676929Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 23686929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 23696929Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 23703859Sml29623 23713859Sml29623 switch (pktbufsz_type) { 23723859Sml29623 case RCR_PKTBUFSZ_0: 23733859Sml29623 bsize = rx_rbr_p->pkt_buf_size0_bytes; 23743859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23756929Smisaki "==> nxge_receive_packet: 0 buf %d", bsize)); 23763859Sml29623 break; 23773859Sml29623 case RCR_PKTBUFSZ_1: 23783859Sml29623 bsize = rx_rbr_p->pkt_buf_size1_bytes; 23793859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23806929Smisaki "==> nxge_receive_packet: 1 buf %d", bsize)); 23813859Sml29623 break; 23823859Sml29623 case RCR_PKTBUFSZ_2: 23833859Sml29623 bsize = rx_rbr_p->pkt_buf_size2_bytes; 23843859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 23856929Smisaki "==> nxge_receive_packet: 2 buf %d", bsize)); 23863859Sml29623 break; 23873859Sml29623 case RCR_SINGLE_BLOCK: 23883859Sml29623 bsize = rx_msg_p->block_size; 23893859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23906929Smisaki "==> nxge_receive_packet: single %d", bsize)); 23913859Sml29623 23923859Sml29623 break; 23933859Sml29623 default: 23943859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 23953859Sml29623 MUTEX_EXIT(&rcr_p->lock); 23963859Sml29623 return; 23973859Sml29623 } 23983859Sml29623 23993859Sml29623 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 24006929Smisaki (buf_offset + sw_offset_bytes), 24016929Smisaki (hdr_size + l2_len), 24026929Smisaki DDI_DMA_SYNC_FORCPU); 24033859Sml29623 24043859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24056929Smisaki "==> nxge_receive_packet: after first dump:usage count")); 24063859Sml29623 24073859Sml29623 if (rx_msg_p->cur_usage_cnt == 0) { 24083859Sml29623 if (rx_rbr_p->rbr_use_bcopy) { 24093859Sml29623 atomic_inc_32(&rx_rbr_p->rbr_consumed); 24103859Sml29623 if (rx_rbr_p->rbr_consumed < 24116929Smisaki rx_rbr_p->rbr_threshold_hi) { 24123859Sml29623 if (rx_rbr_p->rbr_threshold_lo == 0 || 24136929Smisaki ((rx_rbr_p->rbr_consumed >= 24146929Smisaki rx_rbr_p->rbr_threshold_lo) && 24156929Smisaki (rx_rbr_p->rbr_bufsize_type >= 24166929Smisaki pktbufsz_type))) { 24173859Sml29623 rx_msg_p->rx_use_bcopy = B_TRUE; 24183859Sml29623 } 24193859Sml29623 } else { 24203859Sml29623 rx_msg_p->rx_use_bcopy = B_TRUE; 24213859Sml29623 } 24223859Sml29623 } 24233859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24246929Smisaki "==> nxge_receive_packet: buf %d (new block) ", 24256929Smisaki bsize)); 24263859Sml29623 24273859Sml29623 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 24283859Sml29623 rx_msg_p->pkt_buf_size = bsize; 24293859Sml29623 rx_msg_p->cur_usage_cnt = 1; 24303859Sml29623 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 24313859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24326929Smisaki "==> nxge_receive_packet: buf %d " 24336929Smisaki "(single block) ", 24346929Smisaki bsize)); 24353859Sml29623 /* 24363859Sml29623 * Buffer can be reused once the free function 24373859Sml29623 * is called. 24383859Sml29623 */ 24393859Sml29623 rx_msg_p->max_usage_cnt = 1; 24403859Sml29623 buffer_free = B_TRUE; 24413859Sml29623 } else { 24423859Sml29623 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 24433859Sml29623 if (rx_msg_p->max_usage_cnt == 1) { 24443859Sml29623 buffer_free = B_TRUE; 24453859Sml29623 } 24463859Sml29623 } 24473859Sml29623 } else { 24483859Sml29623 rx_msg_p->cur_usage_cnt++; 24493859Sml29623 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 24503859Sml29623 buffer_free = B_TRUE; 24513859Sml29623 } 24523859Sml29623 } 24533859Sml29623 24543859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24553859Sml29623 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 24566929Smisaki msg_index, l2_len, 24576929Smisaki rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 24583859Sml29623 24596028Ssbehera if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 24603859Sml29623 rdc_stats->ierrors++; 24613859Sml29623 if (dcf_err) { 24623859Sml29623 rdc_stats->dcf_err++; 24633859Sml29623 #ifdef NXGE_DEBUG 24643859Sml29623 if (!rdc_stats->dcf_err) { 24653859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24663859Sml29623 "nxge_receive_packet: channel %d dcf_err rcr" 24673859Sml29623 " 0x%llx", channel, rcr_entry)); 24683859Sml29623 } 24693859Sml29623 #endif 24703859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 24716929Smisaki NXGE_FM_EREPORT_RDMC_DCF_ERR); 24726028Ssbehera } else if (pkt_too_long_err) { 24736028Ssbehera rdc_stats->pkt_too_long_err++; 24746028Ssbehera NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 24756028Ssbehera " channel %d packet length [%d] > " 24766028Ssbehera "maxframesize [%d]", channel, l2_len + ETHERFCSL, 24776028Ssbehera nxgep->mac.maxframesize)); 24783859Sml29623 } else { 24793859Sml29623 /* Update error stats */ 24803859Sml29623 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 24813859Sml29623 rdc_stats->errlog.compl_err_type = error_type; 24823859Sml29623 24833859Sml29623 switch (error_type) { 24845523Syc148097 /* 24855523Syc148097 * Do not send FMA ereport for RCR_L2_ERROR and 24865523Syc148097 * RCR_L4_CSUM_ERROR because most likely they indicate 24875523Syc148097 * back pressure rather than HW failures. 24885523Syc148097 */ 24895165Syc148097 case RCR_L2_ERROR: 24905165Syc148097 rdc_stats->l2_err++; 24915165Syc148097 if (rdc_stats->l2_err < 24925165Syc148097 error_disp_cnt) { 24935165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 24945165Syc148097 " nxge_receive_packet:" 24955165Syc148097 " channel %d RCR L2_ERROR", 24965165Syc148097 channel)); 24975165Syc148097 } 24985165Syc148097 break; 24995165Syc148097 case RCR_L4_CSUM_ERROR: 25005165Syc148097 error_send_up = B_TRUE; 25015165Syc148097 rdc_stats->l4_cksum_err++; 25025165Syc148097 if (rdc_stats->l4_cksum_err < 25035165Syc148097 error_disp_cnt) { 25043859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 25055165Syc148097 " nxge_receive_packet:" 25065165Syc148097 " channel %d" 25075165Syc148097 " RCR L4_CSUM_ERROR", channel)); 25085165Syc148097 } 25095165Syc148097 break; 25105523Syc148097 /* 25115523Syc148097 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 25125523Syc148097 * RCR_ZCP_SOFT_ERROR because they reflect the same 25135523Syc148097 * FFLP and ZCP errors that have been reported by 25145523Syc148097 * nxge_fflp.c and nxge_zcp.c. 25155523Syc148097 */ 25165165Syc148097 case RCR_FFLP_SOFT_ERROR: 25175165Syc148097 error_send_up = B_TRUE; 25185165Syc148097 rdc_stats->fflp_soft_err++; 25195165Syc148097 if (rdc_stats->fflp_soft_err < 25205165Syc148097 error_disp_cnt) { 25215165Syc148097 NXGE_ERROR_MSG((nxgep, 25225165Syc148097 NXGE_ERR_CTL, 25235165Syc148097 " nxge_receive_packet:" 25245165Syc148097 " channel %d" 25255165Syc148097 " RCR FFLP_SOFT_ERROR", channel)); 25265165Syc148097 } 25275165Syc148097 break; 25285165Syc148097 case RCR_ZCP_SOFT_ERROR: 25295165Syc148097 error_send_up = B_TRUE; 25305165Syc148097 rdc_stats->fflp_soft_err++; 25315165Syc148097 if (rdc_stats->zcp_soft_err < 25325165Syc148097 error_disp_cnt) 25335165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 25345165Syc148097 " nxge_receive_packet: Channel %d" 25355165Syc148097 " RCR ZCP_SOFT_ERROR", channel)); 25365165Syc148097 break; 25375165Syc148097 default: 25385165Syc148097 rdc_stats->rcr_unknown_err++; 25395165Syc148097 if (rdc_stats->rcr_unknown_err 25405165Syc148097 < error_disp_cnt) { 25415165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 25425165Syc148097 " nxge_receive_packet: Channel %d" 25435165Syc148097 " RCR entry 0x%llx error 0x%x", 25445165Syc148097 rcr_entry, channel, error_type)); 25455165Syc148097 } 25465165Syc148097 break; 25473859Sml29623 } 25483859Sml29623 } 25493859Sml29623 25503859Sml29623 /* 25513859Sml29623 * Update and repost buffer block if max usage 25523859Sml29623 * count is reached. 25533859Sml29623 */ 25543859Sml29623 if (error_send_up == B_FALSE) { 25554874Sml29623 atomic_inc_32(&rx_msg_p->ref_cnt); 25563859Sml29623 if (buffer_free == B_TRUE) { 25573859Sml29623 rx_msg_p->free = B_TRUE; 25583859Sml29623 } 25593859Sml29623 25603859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 25613859Sml29623 MUTEX_EXIT(&rcr_p->lock); 25623859Sml29623 nxge_freeb(rx_msg_p); 25633859Sml29623 return; 25643859Sml29623 } 25653859Sml29623 } 25663859Sml29623 25673859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25686929Smisaki "==> nxge_receive_packet: DMA sync second ")); 25693859Sml29623 25705165Syc148097 bytes_read = rcr_p->rcvd_pkt_bytes; 25713859Sml29623 skip_len = sw_offset_bytes + hdr_size; 25723859Sml29623 if (!rx_msg_p->rx_use_bcopy) { 25734874Sml29623 /* 25744874Sml29623 * For loaned up buffers, the driver reference count 25754874Sml29623 * will be incremented first and then the free state. 25764874Sml29623 */ 25775165Syc148097 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 25785165Syc148097 if (first_entry) { 25795165Syc148097 nmp->b_rptr = &nmp->b_rptr[skip_len]; 25805165Syc148097 if (l2_len < bsize - skip_len) { 25815165Syc148097 nmp->b_wptr = &nmp->b_rptr[l2_len]; 25825165Syc148097 } else { 25835165Syc148097 nmp->b_wptr = &nmp->b_rptr[bsize 25845165Syc148097 - skip_len]; 25855165Syc148097 } 25865165Syc148097 } else { 25875165Syc148097 if (l2_len - bytes_read < bsize) { 25885165Syc148097 nmp->b_wptr = 25895165Syc148097 &nmp->b_rptr[l2_len - bytes_read]; 25905165Syc148097 } else { 25915165Syc148097 nmp->b_wptr = &nmp->b_rptr[bsize]; 25925165Syc148097 } 25935165Syc148097 } 25945165Syc148097 } 25953859Sml29623 } else { 25965165Syc148097 if (first_entry) { 25975165Syc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 25985165Syc148097 l2_len < bsize - skip_len ? 25995165Syc148097 l2_len : bsize - skip_len); 26005165Syc148097 } else { 26015165Syc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 26025165Syc148097 l2_len - bytes_read < bsize ? 26035165Syc148097 l2_len - bytes_read : bsize); 26045165Syc148097 } 26053859Sml29623 } 26063859Sml29623 if (nmp != NULL) { 26077145Syc148097 if (first_entry) { 26087145Syc148097 /* 26097145Syc148097 * Jumbo packets may be received with more than one 26107145Syc148097 * buffer, increment ipackets for the first entry only. 26117145Syc148097 */ 26127145Syc148097 rdc_stats->ipackets++; 26137145Syc148097 26147145Syc148097 /* Update ibytes for kstat. */ 26157145Syc148097 rdc_stats->ibytes += skip_len 26167145Syc148097 + l2_len < bsize ? l2_len : bsize; 26177145Syc148097 /* 26187145Syc148097 * Update the number of bytes read so far for the 26197145Syc148097 * current frame. 26207145Syc148097 */ 26215165Syc148097 bytes_read = nmp->b_wptr - nmp->b_rptr; 26227145Syc148097 } else { 26237145Syc148097 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 26247145Syc148097 l2_len - bytes_read : bsize; 26253859Sml29623 bytes_read += nmp->b_wptr - nmp->b_rptr; 26267145Syc148097 } 26275165Syc148097 26285165Syc148097 NXGE_DEBUG_MSG((nxgep, RX_CTL, 26295165Syc148097 "==> nxge_receive_packet after dupb: " 26305165Syc148097 "rbr consumed %d " 26315165Syc148097 "pktbufsz_type %d " 26325165Syc148097 "nmp $%p rptr $%p wptr $%p " 26335165Syc148097 "buf_offset %d bzise %d l2_len %d skip_len %d", 26345165Syc148097 rx_rbr_p->rbr_consumed, 26355165Syc148097 pktbufsz_type, 26365165Syc148097 nmp, nmp->b_rptr, nmp->b_wptr, 26375165Syc148097 buf_offset, bsize, l2_len, skip_len)); 26383859Sml29623 } else { 26393859Sml29623 cmn_err(CE_WARN, "!nxge_receive_packet: " 26406929Smisaki "update stats (error)"); 26414977Sraghus atomic_inc_32(&rx_msg_p->ref_cnt); 26424977Sraghus if (buffer_free == B_TRUE) { 26434977Sraghus rx_msg_p->free = B_TRUE; 26444977Sraghus } 26454977Sraghus MUTEX_EXIT(&rx_rbr_p->lock); 26464977Sraghus MUTEX_EXIT(&rcr_p->lock); 26474977Sraghus nxge_freeb(rx_msg_p); 26484977Sraghus return; 26493859Sml29623 } 26505060Syc148097 26513859Sml29623 if (buffer_free == B_TRUE) { 26523859Sml29623 rx_msg_p->free = B_TRUE; 26533859Sml29623 } 26547145Syc148097 26553859Sml29623 is_valid = (nmp != NULL); 26565165Syc148097 26575165Syc148097 rcr_p->rcvd_pkt_bytes = bytes_read; 26585165Syc148097 26593859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 26603859Sml29623 MUTEX_EXIT(&rcr_p->lock); 26613859Sml29623 26623859Sml29623 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 26633859Sml29623 atomic_inc_32(&rx_msg_p->ref_cnt); 26643859Sml29623 nxge_freeb(rx_msg_p); 26653859Sml29623 } 26663859Sml29623 26673859Sml29623 if (is_valid) { 26683859Sml29623 nmp->b_cont = NULL; 26693859Sml29623 if (first_entry) { 26703859Sml29623 *mp = nmp; 26713859Sml29623 *mp_cont = NULL; 26725165Syc148097 } else { 26733859Sml29623 *mp_cont = nmp; 26745165Syc148097 } 26753859Sml29623 } 26763859Sml29623 26773859Sml29623 /* 26787145Syc148097 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 26797145Syc148097 * If a packet is not fragmented and no error bit is set, then 26807145Syc148097 * L4 checksum is OK. 26813859Sml29623 */ 26827145Syc148097 26833859Sml29623 if (is_valid && !multi) { 26846495Sspeer /* 26857145Syc148097 * Update hardware checksuming. 26867145Syc148097 * 26876611Sml29623 * If the checksum flag nxge_chksum_offload 26886611Sml29623 * is 1, TCP and UDP packets can be sent 26896495Sspeer * up with good checksum. If the checksum flag 26906611Sml29623 * is set to 0, checksum reporting will apply to 26916495Sspeer * TCP packets only (workaround for a hardware bug). 26926611Sml29623 * If the checksum flag nxge_cksum_offload is 26936611Sml29623 * greater than 1, both TCP and UDP packets 26946611Sml29623 * will not be reported its hardware checksum results. 26956495Sspeer */ 26966611Sml29623 if (nxge_cksum_offload == 1) { 26976495Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 26986929Smisaki pkt_type == RCR_PKT_IS_UDP) ? 26996929Smisaki B_TRUE: B_FALSE); 27006611Sml29623 } else if (!nxge_cksum_offload) { 27016495Sspeer /* TCP checksum only. */ 27026495Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 27036929Smisaki B_TRUE: B_FALSE); 27046495Sspeer } 27053859Sml29623 27063859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 27076929Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 27086929Smisaki is_valid, multi, is_tcp_udp, frag, error_type)); 27093859Sml29623 27103859Sml29623 if (is_tcp_udp && !frag && !error_type) { 27113859Sml29623 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 27126929Smisaki HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 27133859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 27146929Smisaki "==> nxge_receive_packet: Full tcp/udp cksum " 27156929Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d " 27166929Smisaki "error %d", 27176929Smisaki is_valid, multi, is_tcp_udp, frag, error_type)); 27183859Sml29623 } 27193859Sml29623 } 27203859Sml29623 27213859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 27226929Smisaki "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 27233859Sml29623 27243859Sml29623 *multi_p = (multi == RCR_MULTI_MASK); 27253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 27266929Smisaki "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 27276929Smisaki *multi_p, nmp, *mp, *mp_cont)); 27283859Sml29623 } 27293859Sml29623 27303859Sml29623 /*ARGSUSED*/ 27313859Sml29623 static nxge_status_t 27326495Sspeer nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 27333859Sml29623 { 27343859Sml29623 p_nxge_rx_ring_stats_t rdc_stats; 27353859Sml29623 npi_handle_t handle; 27363859Sml29623 npi_status_t rs; 27373859Sml29623 boolean_t rxchan_fatal = B_FALSE; 27383859Sml29623 boolean_t rxport_fatal = B_FALSE; 27393859Sml29623 uint8_t portn; 27403859Sml29623 nxge_status_t status = NXGE_OK; 27413859Sml29623 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 27423859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 27433859Sml29623 27443859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 27453859Sml29623 portn = nxgep->mac.portnum; 27466495Sspeer rdc_stats = &nxgep->statsp->rdc_stats[channel]; 27473859Sml29623 27483859Sml29623 if (cs.bits.hdw.rbr_tmout) { 27493859Sml29623 rdc_stats->rx_rbr_tmout++; 27503859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 27516929Smisaki NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 27523859Sml29623 rxchan_fatal = B_TRUE; 27533859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27546929Smisaki "==> nxge_rx_err_evnts: rx_rbr_timeout")); 27553859Sml29623 } 27563859Sml29623 if (cs.bits.hdw.rsp_cnt_err) { 27573859Sml29623 rdc_stats->rsp_cnt_err++; 27583859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 27596929Smisaki NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 27603859Sml29623 rxchan_fatal = B_TRUE; 27613859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27626929Smisaki "==> nxge_rx_err_evnts(channel %d): " 27636929Smisaki "rsp_cnt_err", channel)); 27643859Sml29623 } 27653859Sml29623 if (cs.bits.hdw.byte_en_bus) { 27663859Sml29623 rdc_stats->byte_en_bus++; 27673859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 27686929Smisaki NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 27693859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27706929Smisaki "==> nxge_rx_err_evnts(channel %d): " 27716929Smisaki "fatal error: byte_en_bus", channel)); 27723859Sml29623 rxchan_fatal = B_TRUE; 27733859Sml29623 } 27743859Sml29623 if (cs.bits.hdw.rsp_dat_err) { 27753859Sml29623 rdc_stats->rsp_dat_err++; 27763859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 27776929Smisaki NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 27783859Sml29623 rxchan_fatal = B_TRUE; 27793859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27806929Smisaki "==> nxge_rx_err_evnts(channel %d): " 27816929Smisaki "fatal error: rsp_dat_err", channel)); 27823859Sml29623 } 27833859Sml29623 if (cs.bits.hdw.rcr_ack_err) { 27843859Sml29623 rdc_stats->rcr_ack_err++; 27853859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 27866929Smisaki NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 27873859Sml29623 rxchan_fatal = B_TRUE; 27883859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27896929Smisaki "==> nxge_rx_err_evnts(channel %d): " 27906929Smisaki "fatal error: rcr_ack_err", channel)); 27913859Sml29623 } 27923859Sml29623 if (cs.bits.hdw.dc_fifo_err) { 27933859Sml29623 rdc_stats->dc_fifo_err++; 27943859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 27956929Smisaki NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 27963859Sml29623 /* This is not a fatal error! */ 27973859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27986929Smisaki "==> nxge_rx_err_evnts(channel %d): " 27996929Smisaki "dc_fifo_err", channel)); 28003859Sml29623 rxport_fatal = B_TRUE; 28013859Sml29623 } 28023859Sml29623 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 28033859Sml29623 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 28046929Smisaki &rdc_stats->errlog.pre_par, 28056929Smisaki &rdc_stats->errlog.sha_par)) 28066929Smisaki != NPI_SUCCESS) { 28073859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 28086929Smisaki "==> nxge_rx_err_evnts(channel %d): " 28096929Smisaki "rcr_sha_par: get perr", channel)); 28103859Sml29623 return (NXGE_ERROR | rs); 28113859Sml29623 } 28123859Sml29623 if (cs.bits.hdw.rcr_sha_par) { 28133859Sml29623 rdc_stats->rcr_sha_par++; 28143859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 28156929Smisaki NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 28163859Sml29623 rxchan_fatal = B_TRUE; 28173859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 28186929Smisaki "==> nxge_rx_err_evnts(channel %d): " 28196929Smisaki "fatal error: rcr_sha_par", channel)); 28203859Sml29623 } 28213859Sml29623 if (cs.bits.hdw.rbr_pre_par) { 28223859Sml29623 rdc_stats->rbr_pre_par++; 28233859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 28246929Smisaki NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 28253859Sml29623 rxchan_fatal = B_TRUE; 28263859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 28276929Smisaki "==> nxge_rx_err_evnts(channel %d): " 28286929Smisaki "fatal error: rbr_pre_par", channel)); 28293859Sml29623 } 28303859Sml29623 } 28316172Syc148097 /* 28326172Syc148097 * The Following 4 status bits are for information, the system 28336172Syc148097 * is running fine. There is no need to send FMA ereports or 28346172Syc148097 * log messages. 28356172Syc148097 */ 28363859Sml29623 if (cs.bits.hdw.port_drop_pkt) { 28373859Sml29623 rdc_stats->port_drop_pkt++; 28383859Sml29623 } 28393859Sml29623 if (cs.bits.hdw.wred_drop) { 28403859Sml29623 rdc_stats->wred_drop++; 28413859Sml29623 } 28423859Sml29623 if (cs.bits.hdw.rbr_pre_empty) { 28433859Sml29623 rdc_stats->rbr_pre_empty++; 28443859Sml29623 } 28453859Sml29623 if (cs.bits.hdw.rcr_shadow_full) { 28463859Sml29623 rdc_stats->rcr_shadow_full++; 28473859Sml29623 } 28483859Sml29623 if (cs.bits.hdw.config_err) { 28493859Sml29623 rdc_stats->config_err++; 28503859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 28516929Smisaki NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 28523859Sml29623 rxchan_fatal = B_TRUE; 28533859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 28546929Smisaki "==> nxge_rx_err_evnts(channel %d): " 28556929Smisaki "config error", channel)); 28563859Sml29623 } 28573859Sml29623 if (cs.bits.hdw.rcrincon) { 28583859Sml29623 rdc_stats->rcrincon++; 28593859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 28606929Smisaki NXGE_FM_EREPORT_RDMC_RCRINCON); 28613859Sml29623 rxchan_fatal = B_TRUE; 28623859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 28636929Smisaki "==> nxge_rx_err_evnts(channel %d): " 28646929Smisaki "fatal error: rcrincon error", channel)); 28653859Sml29623 } 28663859Sml29623 if (cs.bits.hdw.rcrfull) { 28673859Sml29623 rdc_stats->rcrfull++; 28683859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 28696929Smisaki NXGE_FM_EREPORT_RDMC_RCRFULL); 28703859Sml29623 rxchan_fatal = B_TRUE; 28713859Sml29623 if (rdc_stats->rcrfull < error_disp_cnt) 28723859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 28736929Smisaki "==> nxge_rx_err_evnts(channel %d): " 28746929Smisaki "fatal error: rcrfull error", channel)); 28753859Sml29623 } 28763859Sml29623 if (cs.bits.hdw.rbr_empty) { 28776172Syc148097 /* 28786172Syc148097 * This bit is for information, there is no need 28796172Syc148097 * send FMA ereport or log a message. 28806172Syc148097 */ 28813859Sml29623 rdc_stats->rbr_empty++; 28823859Sml29623 } 28833859Sml29623 if (cs.bits.hdw.rbrfull) { 28843859Sml29623 rdc_stats->rbrfull++; 28853859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 28866929Smisaki NXGE_FM_EREPORT_RDMC_RBRFULL); 28873859Sml29623 rxchan_fatal = B_TRUE; 28883859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 28896929Smisaki "==> nxge_rx_err_evnts(channel %d): " 28906929Smisaki "fatal error: rbr_full error", channel)); 28913859Sml29623 } 28923859Sml29623 if (cs.bits.hdw.rbrlogpage) { 28933859Sml29623 rdc_stats->rbrlogpage++; 28943859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 28956929Smisaki NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 28963859Sml29623 rxchan_fatal = B_TRUE; 28973859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 28986929Smisaki "==> nxge_rx_err_evnts(channel %d): " 28996929Smisaki "fatal error: rbr logical page error", channel)); 29003859Sml29623 } 29013859Sml29623 if (cs.bits.hdw.cfiglogpage) { 29023859Sml29623 rdc_stats->cfiglogpage++; 29033859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 29046929Smisaki NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 29053859Sml29623 rxchan_fatal = B_TRUE; 29063859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 29076929Smisaki "==> nxge_rx_err_evnts(channel %d): " 29086929Smisaki "fatal error: cfig logical page error", channel)); 29093859Sml29623 } 29103859Sml29623 29113859Sml29623 if (rxport_fatal) { 29123859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 29136495Sspeer " nxge_rx_err_evnts: fatal error on Port #%d\n", 29146495Sspeer portn)); 29156495Sspeer if (isLDOMguest(nxgep)) { 29166495Sspeer status = NXGE_ERROR; 29176495Sspeer } else { 29186495Sspeer status = nxge_ipp_fatal_err_recover(nxgep); 29196495Sspeer if (status == NXGE_OK) { 29206495Sspeer FM_SERVICE_RESTORED(nxgep); 29216495Sspeer } 29223859Sml29623 } 29233859Sml29623 } 29243859Sml29623 29253859Sml29623 if (rxchan_fatal) { 29263859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 29276495Sspeer " nxge_rx_err_evnts: fatal error on Channel #%d\n", 29286495Sspeer channel)); 29296495Sspeer if (isLDOMguest(nxgep)) { 29306495Sspeer status = NXGE_ERROR; 29316495Sspeer } else { 29326495Sspeer status = nxge_rxdma_fatal_err_recover(nxgep, channel); 29336495Sspeer if (status == NXGE_OK) { 29346495Sspeer FM_SERVICE_RESTORED(nxgep); 29356495Sspeer } 29363859Sml29623 } 29373859Sml29623 } 29383859Sml29623 29393859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 29403859Sml29623 29413859Sml29623 return (status); 29423859Sml29623 } 29433859Sml29623 29446495Sspeer /* 29456495Sspeer * nxge_rdc_hvio_setup 29466495Sspeer * 29476495Sspeer * This code appears to setup some Hypervisor variables. 29486495Sspeer * 29496495Sspeer * Arguments: 29506495Sspeer * nxgep 29516495Sspeer * channel 29526495Sspeer * 29536495Sspeer * Notes: 29546495Sspeer * What does NIU_LP_WORKAROUND mean? 29556495Sspeer * 29566495Sspeer * NPI/NXGE function calls: 29576495Sspeer * na 29586495Sspeer * 29596495Sspeer * Context: 29606495Sspeer * Any domain 29616495Sspeer */ 29626495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 29636495Sspeer static void 29646495Sspeer nxge_rdc_hvio_setup( 29656495Sspeer nxge_t *nxgep, int channel) 29663859Sml29623 { 29676495Sspeer nxge_dma_common_t *dma_common; 29686495Sspeer nxge_dma_common_t *dma_control; 29696495Sspeer rx_rbr_ring_t *ring; 29706495Sspeer 29716495Sspeer ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 29726495Sspeer dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 29736495Sspeer 29746495Sspeer ring->hv_set = B_FALSE; 29756495Sspeer 29766495Sspeer ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 29776495Sspeer dma_common->orig_ioaddr_pp; 29786495Sspeer ring->hv_rx_buf_ioaddr_size = (uint64_t) 29796495Sspeer dma_common->orig_alength; 29806495Sspeer 29816495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 29826495Sspeer "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 29836495Sspeer channel, ring->hv_rx_buf_base_ioaddr_pp, 29846495Sspeer dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 29856495Sspeer dma_common->orig_alength, dma_common->orig_alength)); 29866495Sspeer 29876495Sspeer dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 29886495Sspeer 29896495Sspeer ring->hv_rx_cntl_base_ioaddr_pp = 29906495Sspeer (uint64_t)dma_control->orig_ioaddr_pp; 29916495Sspeer ring->hv_rx_cntl_ioaddr_size = 29926495Sspeer (uint64_t)dma_control->orig_alength; 29936495Sspeer 29946495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 29956495Sspeer "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 29966495Sspeer channel, ring->hv_rx_cntl_base_ioaddr_pp, 29976495Sspeer dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 29986495Sspeer dma_control->orig_alength, dma_control->orig_alength)); 29996495Sspeer } 30003859Sml29623 #endif 30013859Sml29623 30026495Sspeer /* 30036495Sspeer * nxge_map_rxdma 30046495Sspeer * 30056495Sspeer * Map an RDC into our kernel space. 30066495Sspeer * 30076495Sspeer * Arguments: 30086495Sspeer * nxgep 30096495Sspeer * channel The channel to map. 30106495Sspeer * 30116495Sspeer * Notes: 30126495Sspeer * 1. Allocate & initialise a memory pool, if necessary. 30136495Sspeer * 2. Allocate however many receive buffers are required. 30146495Sspeer * 3. Setup buffers, descriptors, and mailbox. 30156495Sspeer * 30166495Sspeer * NPI/NXGE function calls: 30176495Sspeer * nxge_alloc_rx_mem_pool() 30186495Sspeer * nxge_alloc_rbb() 30196495Sspeer * nxge_map_rxdma_channel() 30206495Sspeer * 30216495Sspeer * Registers accessed: 30226495Sspeer * 30236495Sspeer * Context: 30246495Sspeer * Any domain 30256495Sspeer */ 30266495Sspeer static nxge_status_t 30276495Sspeer nxge_map_rxdma(p_nxge_t nxgep, int channel) 30286495Sspeer { 30296495Sspeer nxge_dma_common_t **data; 30306495Sspeer nxge_dma_common_t **control; 30316495Sspeer rx_rbr_ring_t **rbr_ring; 30326495Sspeer rx_rcr_ring_t **rcr_ring; 30336495Sspeer rx_mbox_t **mailbox; 30346495Sspeer uint32_t chunks; 30356495Sspeer 30366495Sspeer nxge_status_t status; 30376495Sspeer 30383859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 30393859Sml29623 30406495Sspeer if (!nxgep->rx_buf_pool_p) { 30416495Sspeer if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 30426495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30436495Sspeer "<== nxge_map_rxdma: buf not allocated")); 30446495Sspeer return (NXGE_ERROR); 30456495Sspeer } 30463859Sml29623 } 30473859Sml29623 30486495Sspeer if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 30496495Sspeer return (NXGE_ERROR); 30503859Sml29623 30513859Sml29623 /* 30523859Sml29623 * Timeout should be set based on the system clock divider. 30533859Sml29623 * The following timeout value of 1 assumes that the 30543859Sml29623 * granularity (1000) is 3 microseconds running at 300MHz. 30553859Sml29623 */ 30563859Sml29623 30573859Sml29623 nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 30583859Sml29623 nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 30593859Sml29623 30603859Sml29623 /* 30616495Sspeer * Map descriptors from the buffer polls for each dma channel. 30626495Sspeer */ 30636495Sspeer 30646495Sspeer /* 30656495Sspeer * Set up and prepare buffer blocks, descriptors 30666495Sspeer * and mailbox. 30673859Sml29623 */ 30686495Sspeer data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 30696495Sspeer rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 30706495Sspeer chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 30716495Sspeer 30726495Sspeer control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 30736495Sspeer rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 30746495Sspeer 30756495Sspeer mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 30766495Sspeer 30776495Sspeer status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 30786495Sspeer chunks, control, rcr_ring, mailbox); 30796495Sspeer if (status != NXGE_OK) { 30806495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30816929Smisaki "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 30826929Smisaki "returned 0x%x", 30836929Smisaki channel, status)); 30846495Sspeer return (status); 30856495Sspeer } 30866495Sspeer nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 30876495Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 30886495Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 30896495Sspeer &nxgep->statsp->rdc_stats[channel]; 30903859Sml29623 30913859Sml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 30926495Sspeer if (!isLDOMguest(nxgep)) 30936495Sspeer nxge_rdc_hvio_setup(nxgep, channel); 30946495Sspeer #endif 30956495Sspeer 30963859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 30976495Sspeer "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 30983859Sml29623 30993859Sml29623 return (status); 31003859Sml29623 } 31013859Sml29623 31023859Sml29623 static void 31036495Sspeer nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 31043859Sml29623 { 31056495Sspeer rx_rbr_ring_t *rbr_ring; 31066495Sspeer rx_rcr_ring_t *rcr_ring; 31076495Sspeer rx_mbox_t *mailbox; 31086495Sspeer 31096495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 31106495Sspeer 31116495Sspeer if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 31126495Sspeer !nxgep->rx_mbox_areas_p) 31133859Sml29623 return; 31146495Sspeer 31156495Sspeer rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 31166495Sspeer rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 31176495Sspeer mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 31186495Sspeer 31196495Sspeer if (!rbr_ring || !rcr_ring || !mailbox) 31203859Sml29623 return; 31216495Sspeer 31226495Sspeer (void) nxge_unmap_rxdma_channel( 31236929Smisaki nxgep, channel, rbr_ring, rcr_ring, mailbox); 31246495Sspeer 31256495Sspeer nxge_free_rxb(nxgep, channel); 31266495Sspeer 31276495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 31283859Sml29623 } 31293859Sml29623 31303859Sml29623 nxge_status_t 31313859Sml29623 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 31323859Sml29623 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 31333859Sml29623 uint32_t num_chunks, 31343859Sml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 31353859Sml29623 p_rx_mbox_t *rx_mbox_p) 31363859Sml29623 { 31373859Sml29623 int status = NXGE_OK; 31383859Sml29623 31393859Sml29623 /* 31403859Sml29623 * Set up and prepare buffer blocks, descriptors 31413859Sml29623 * and mailbox. 31423859Sml29623 */ 31433859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 31446929Smisaki "==> nxge_map_rxdma_channel (channel %d)", channel)); 31453859Sml29623 /* 31463859Sml29623 * Receive buffer blocks 31473859Sml29623 */ 31483859Sml29623 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 31496929Smisaki dma_buf_p, rbr_p, num_chunks); 31503859Sml29623 if (status != NXGE_OK) { 31513859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31526929Smisaki "==> nxge_map_rxdma_channel (channel %d): " 31536929Smisaki "map buffer failed 0x%x", channel, status)); 31543859Sml29623 goto nxge_map_rxdma_channel_exit; 31553859Sml29623 } 31563859Sml29623 31573859Sml29623 /* 31583859Sml29623 * Receive block ring, completion ring and mailbox. 31593859Sml29623 */ 31603859Sml29623 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 31616929Smisaki dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 31623859Sml29623 if (status != NXGE_OK) { 31633859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31646929Smisaki "==> nxge_map_rxdma_channel (channel %d): " 31656929Smisaki "map config failed 0x%x", channel, status)); 31663859Sml29623 goto nxge_map_rxdma_channel_fail2; 31673859Sml29623 } 31683859Sml29623 31693859Sml29623 goto nxge_map_rxdma_channel_exit; 31703859Sml29623 31713859Sml29623 nxge_map_rxdma_channel_fail3: 31723859Sml29623 /* Free rbr, rcr */ 31733859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31746929Smisaki "==> nxge_map_rxdma_channel: free rbr/rcr " 31756929Smisaki "(status 0x%x channel %d)", 31766929Smisaki status, channel)); 31773859Sml29623 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 31786929Smisaki *rcr_p, *rx_mbox_p); 31793859Sml29623 31803859Sml29623 nxge_map_rxdma_channel_fail2: 31813859Sml29623 /* Free buffer blocks */ 31823859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31836929Smisaki "==> nxge_map_rxdma_channel: free rx buffers" 31846929Smisaki "(nxgep 0x%x status 0x%x channel %d)", 31856929Smisaki nxgep, status, channel)); 31863859Sml29623 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 31873859Sml29623 31884185Sspeer status = NXGE_ERROR; 31894185Sspeer 31903859Sml29623 nxge_map_rxdma_channel_exit: 31913859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 31926929Smisaki "<== nxge_map_rxdma_channel: " 31936929Smisaki "(nxgep 0x%x status 0x%x channel %d)", 31946929Smisaki nxgep, status, channel)); 31953859Sml29623 31963859Sml29623 return (status); 31973859Sml29623 } 31983859Sml29623 31993859Sml29623 /*ARGSUSED*/ 32003859Sml29623 static void 32013859Sml29623 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 32023859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 32033859Sml29623 { 32043859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 32056929Smisaki "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 32063859Sml29623 32073859Sml29623 /* 32083859Sml29623 * unmap receive block ring, completion ring and mailbox. 32093859Sml29623 */ 32103859Sml29623 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 32116929Smisaki rcr_p, rx_mbox_p); 32123859Sml29623 32133859Sml29623 /* unmap buffer blocks */ 32143859Sml29623 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 32153859Sml29623 32163859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 32173859Sml29623 } 32183859Sml29623 32193859Sml29623 /*ARGSUSED*/ 32203859Sml29623 static nxge_status_t 32213859Sml29623 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 32223859Sml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 32233859Sml29623 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 32243859Sml29623 { 32253859Sml29623 p_rx_rbr_ring_t rbrp; 32263859Sml29623 p_rx_rcr_ring_t rcrp; 32273859Sml29623 p_rx_mbox_t mboxp; 32283859Sml29623 p_nxge_dma_common_t cntl_dmap; 32293859Sml29623 p_nxge_dma_common_t dmap; 32303859Sml29623 p_rx_msg_t *rx_msg_ring; 32313859Sml29623 p_rx_msg_t rx_msg_p; 32323859Sml29623 p_rbr_cfig_a_t rcfga_p; 32333859Sml29623 p_rbr_cfig_b_t rcfgb_p; 32343859Sml29623 p_rcrcfig_a_t cfga_p; 32353859Sml29623 p_rcrcfig_b_t cfgb_p; 32363859Sml29623 p_rxdma_cfig1_t cfig1_p; 32373859Sml29623 p_rxdma_cfig2_t cfig2_p; 32383859Sml29623 p_rbr_kick_t kick_p; 32393859Sml29623 uint32_t dmaaddrp; 32403859Sml29623 uint32_t *rbr_vaddrp; 32413859Sml29623 uint32_t bkaddr; 32423859Sml29623 nxge_status_t status = NXGE_OK; 32433859Sml29623 int i; 32443859Sml29623 uint32_t nxge_port_rcr_size; 32453859Sml29623 32463859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 32476929Smisaki "==> nxge_map_rxdma_channel_cfg_ring")); 32483859Sml29623 32493859Sml29623 cntl_dmap = *dma_cntl_p; 32503859Sml29623 32513859Sml29623 /* Map in the receive block ring */ 32523859Sml29623 rbrp = *rbr_p; 32533859Sml29623 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 32543859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 32553859Sml29623 /* 32563859Sml29623 * Zero out buffer block ring descriptors. 32573859Sml29623 */ 32583859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 32593859Sml29623 32603859Sml29623 rcfga_p = &(rbrp->rbr_cfga); 32613859Sml29623 rcfgb_p = &(rbrp->rbr_cfgb); 32623859Sml29623 kick_p = &(rbrp->rbr_kick); 32633859Sml29623 rcfga_p->value = 0; 32643859Sml29623 rcfgb_p->value = 0; 32653859Sml29623 kick_p->value = 0; 32663859Sml29623 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 32673859Sml29623 rcfga_p->value = (rbrp->rbr_addr & 32686929Smisaki (RBR_CFIG_A_STDADDR_MASK | 32696929Smisaki RBR_CFIG_A_STDADDR_BASE_MASK)); 32703859Sml29623 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 32713859Sml29623 32723859Sml29623 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 32733859Sml29623 rcfgb_p->bits.ldw.vld0 = 1; 32743859Sml29623 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 32753859Sml29623 rcfgb_p->bits.ldw.vld1 = 1; 32763859Sml29623 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 32773859Sml29623 rcfgb_p->bits.ldw.vld2 = 1; 32783859Sml29623 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 32793859Sml29623 32803859Sml29623 /* 32813859Sml29623 * For each buffer block, enter receive block address to the ring. 32823859Sml29623 */ 32833859Sml29623 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 32843859Sml29623 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 32853859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 32866929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 32876929Smisaki "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 32883859Sml29623 32893859Sml29623 rx_msg_ring = rbrp->rx_msg_ring; 32903859Sml29623 for (i = 0; i < rbrp->tnblocks; i++) { 32913859Sml29623 rx_msg_p = rx_msg_ring[i]; 32923859Sml29623 rx_msg_p->nxgep = nxgep; 32933859Sml29623 rx_msg_p->rx_rbr_p = rbrp; 32943859Sml29623 bkaddr = (uint32_t) 32956929Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 32966929Smisaki >> RBR_BKADDR_SHIFT)); 32973859Sml29623 rx_msg_p->free = B_FALSE; 32983859Sml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe; 32993859Sml29623 33003859Sml29623 *rbr_vaddrp++ = bkaddr; 33013859Sml29623 } 33023859Sml29623 33033859Sml29623 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 33043859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 33053859Sml29623 33063859Sml29623 rbrp->rbr_rd_index = 0; 33073859Sml29623 33083859Sml29623 rbrp->rbr_consumed = 0; 33093859Sml29623 rbrp->rbr_use_bcopy = B_TRUE; 33103859Sml29623 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 33113859Sml29623 /* 33123859Sml29623 * Do bcopy on packets greater than bcopy size once 33133859Sml29623 * the lo threshold is reached. 33143859Sml29623 * This lo threshold should be less than the hi threshold. 33153859Sml29623 * 33163859Sml29623 * Do bcopy on every packet once the hi threshold is reached. 33173859Sml29623 */ 33183859Sml29623 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 33193859Sml29623 /* default it to use hi */ 33203859Sml29623 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 33213859Sml29623 } 33223859Sml29623 33233859Sml29623 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 33243859Sml29623 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 33253859Sml29623 } 33263859Sml29623 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 33273859Sml29623 33283859Sml29623 switch (nxge_rx_threshold_hi) { 33293859Sml29623 default: 33303859Sml29623 case NXGE_RX_COPY_NONE: 33313859Sml29623 /* Do not do bcopy at all */ 33323859Sml29623 rbrp->rbr_use_bcopy = B_FALSE; 33333859Sml29623 rbrp->rbr_threshold_hi = rbrp->rbb_max; 33343859Sml29623 break; 33353859Sml29623 33363859Sml29623 case NXGE_RX_COPY_1: 33373859Sml29623 case NXGE_RX_COPY_2: 33383859Sml29623 case NXGE_RX_COPY_3: 33393859Sml29623 case NXGE_RX_COPY_4: 33403859Sml29623 case NXGE_RX_COPY_5: 33413859Sml29623 case NXGE_RX_COPY_6: 33423859Sml29623 case NXGE_RX_COPY_7: 33433859Sml29623 rbrp->rbr_threshold_hi = 33446929Smisaki rbrp->rbb_max * 33456929Smisaki (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 33463859Sml29623 break; 33473859Sml29623 33483859Sml29623 case NXGE_RX_COPY_ALL: 33493859Sml29623 rbrp->rbr_threshold_hi = 0; 33503859Sml29623 break; 33513859Sml29623 } 33523859Sml29623 33533859Sml29623 switch (nxge_rx_threshold_lo) { 33543859Sml29623 default: 33553859Sml29623 case NXGE_RX_COPY_NONE: 33563859Sml29623 /* Do not do bcopy at all */ 33573859Sml29623 if (rbrp->rbr_use_bcopy) { 33583859Sml29623 rbrp->rbr_use_bcopy = B_FALSE; 33593859Sml29623 } 33603859Sml29623 rbrp->rbr_threshold_lo = rbrp->rbb_max; 33613859Sml29623 break; 33623859Sml29623 33633859Sml29623 case NXGE_RX_COPY_1: 33643859Sml29623 case NXGE_RX_COPY_2: 33653859Sml29623 case NXGE_RX_COPY_3: 33663859Sml29623 case NXGE_RX_COPY_4: 33673859Sml29623 case NXGE_RX_COPY_5: 33683859Sml29623 case NXGE_RX_COPY_6: 33693859Sml29623 case NXGE_RX_COPY_7: 33703859Sml29623 rbrp->rbr_threshold_lo = 33716929Smisaki rbrp->rbb_max * 33726929Smisaki (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 33733859Sml29623 break; 33743859Sml29623 33753859Sml29623 case NXGE_RX_COPY_ALL: 33763859Sml29623 rbrp->rbr_threshold_lo = 0; 33773859Sml29623 break; 33783859Sml29623 } 33793859Sml29623 33803859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 33816929Smisaki "nxge_map_rxdma_channel_cfg_ring: channel %d " 33826929Smisaki "rbb_max %d " 33836929Smisaki "rbrp->rbr_bufsize_type %d " 33846929Smisaki "rbb_threshold_hi %d " 33856929Smisaki "rbb_threshold_lo %d", 33866929Smisaki dma_channel, 33876929Smisaki rbrp->rbb_max, 33886929Smisaki rbrp->rbr_bufsize_type, 33896929Smisaki rbrp->rbr_threshold_hi, 33906929Smisaki rbrp->rbr_threshold_lo)); 33913859Sml29623 33923859Sml29623 rbrp->page_valid.value = 0; 33933859Sml29623 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 33943859Sml29623 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 33953859Sml29623 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 33963859Sml29623 rbrp->page_hdl.value = 0; 33973859Sml29623 33983859Sml29623 rbrp->page_valid.bits.ldw.page0 = 1; 33993859Sml29623 rbrp->page_valid.bits.ldw.page1 = 1; 34003859Sml29623 34013859Sml29623 /* Map in the receive completion ring */ 34023859Sml29623 rcrp = (p_rx_rcr_ring_t) 34036929Smisaki KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 34043859Sml29623 rcrp->rdc = dma_channel; 34053859Sml29623 34063859Sml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 34073859Sml29623 rcrp->comp_size = nxge_port_rcr_size; 34083859Sml29623 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 34093859Sml29623 34103859Sml29623 rcrp->max_receive_pkts = nxge_max_rx_pkts; 34113859Sml29623 34123859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 34133859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 34146929Smisaki sizeof (rcr_entry_t)); 34153859Sml29623 rcrp->comp_rd_index = 0; 34163859Sml29623 rcrp->comp_wt_index = 0; 34173859Sml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 34186929Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 34195125Sjoycey #if defined(__i386) 34206929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 34216929Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 34225125Sjoycey #else 34236929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 34246929Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 34255125Sjoycey #endif 34263859Sml29623 34273859Sml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 34286929Smisaki (nxge_port_rcr_size - 1); 34293859Sml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 34306929Smisaki (nxge_port_rcr_size - 1); 34313859Sml29623 34323859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34336929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 34346929Smisaki "channel %d " 34356929Smisaki "rbr_vaddrp $%p " 34366929Smisaki "rcr_desc_rd_head_p $%p " 34376929Smisaki "rcr_desc_rd_head_pp $%p " 34386929Smisaki "rcr_desc_rd_last_p $%p " 34396929Smisaki "rcr_desc_rd_last_pp $%p ", 34406929Smisaki dma_channel, 34416929Smisaki rbr_vaddrp, 34426929Smisaki rcrp->rcr_desc_rd_head_p, 34436929Smisaki rcrp->rcr_desc_rd_head_pp, 34446929Smisaki rcrp->rcr_desc_last_p, 34456929Smisaki rcrp->rcr_desc_last_pp)); 34463859Sml29623 34473859Sml29623 /* 34483859Sml29623 * Zero out buffer block ring descriptors. 34493859Sml29623 */ 34503859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 34513859Sml29623 rcrp->intr_timeout = nxgep->intr_timeout; 34523859Sml29623 rcrp->intr_threshold = nxgep->intr_threshold; 34533859Sml29623 rcrp->full_hdr_flag = B_FALSE; 34543859Sml29623 rcrp->sw_priv_hdr_len = 0; 34553859Sml29623 34563859Sml29623 cfga_p = &(rcrp->rcr_cfga); 34573859Sml29623 cfgb_p = &(rcrp->rcr_cfgb); 34583859Sml29623 cfga_p->value = 0; 34593859Sml29623 cfgb_p->value = 0; 34603859Sml29623 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 34613859Sml29623 cfga_p->value = (rcrp->rcr_addr & 34626929Smisaki (RCRCFIG_A_STADDR_MASK | 34636929Smisaki RCRCFIG_A_STADDR_BASE_MASK)); 34643859Sml29623 34653859Sml29623 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 34666929Smisaki RCRCFIG_A_LEN_SHIF); 34673859Sml29623 34683859Sml29623 /* 34693859Sml29623 * Timeout should be set based on the system clock divider. 34703859Sml29623 * The following timeout value of 1 assumes that the 34713859Sml29623 * granularity (1000) is 3 microseconds running at 300MHz. 34723859Sml29623 */ 34733859Sml29623 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 34743859Sml29623 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 34753859Sml29623 cfgb_p->bits.ldw.entout = 1; 34763859Sml29623 34773859Sml29623 /* Map in the mailbox */ 34783859Sml29623 mboxp = (p_rx_mbox_t) 34796929Smisaki KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 34803859Sml29623 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 34813859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 34823859Sml29623 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 34833859Sml29623 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 34843859Sml29623 cfig1_p->value = cfig2_p->value = 0; 34853859Sml29623 34863859Sml29623 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 34873859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34886929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 34896929Smisaki "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 34906929Smisaki dma_channel, cfig1_p->value, cfig2_p->value, 34916929Smisaki mboxp->mbox_addr)); 34923859Sml29623 34933859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 34946929Smisaki & 0xfff); 34953859Sml29623 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 34963859Sml29623 34973859Sml29623 34983859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 34993859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 35006929Smisaki RXDMA_CFIG2_MBADDR_L_MASK); 35013859Sml29623 35023859Sml29623 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 35033859Sml29623 35043859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35056929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 35066929Smisaki "channel %d damaddrp $%p " 35076929Smisaki "cfg1 0x%016llx cfig2 0x%016llx", 35086929Smisaki dma_channel, dmaaddrp, 35096929Smisaki cfig1_p->value, cfig2_p->value)); 35103859Sml29623 35113859Sml29623 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 35123859Sml29623 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 35133859Sml29623 35143859Sml29623 rbrp->rx_rcr_p = rcrp; 35153859Sml29623 rcrp->rx_rbr_p = rbrp; 35163859Sml29623 *rcr_p = rcrp; 35173859Sml29623 *rx_mbox_p = mboxp; 35183859Sml29623 35193859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35206929Smisaki "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 35213859Sml29623 35223859Sml29623 return (status); 35233859Sml29623 } 35243859Sml29623 35253859Sml29623 /*ARGSUSED*/ 35263859Sml29623 static void 35273859Sml29623 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 35283859Sml29623 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 35293859Sml29623 { 35303859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35316929Smisaki "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 35326929Smisaki rcr_p->rdc)); 35333859Sml29623 35343859Sml29623 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 35353859Sml29623 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 35363859Sml29623 35373859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35386929Smisaki "<== nxge_unmap_rxdma_channel_cfg_ring")); 35393859Sml29623 } 35403859Sml29623 35413859Sml29623 static nxge_status_t 35423859Sml29623 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 35433859Sml29623 p_nxge_dma_common_t *dma_buf_p, 35443859Sml29623 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 35453859Sml29623 { 35463859Sml29623 p_rx_rbr_ring_t rbrp; 35473859Sml29623 p_nxge_dma_common_t dma_bufp, tmp_bufp; 35483859Sml29623 p_rx_msg_t *rx_msg_ring; 35493859Sml29623 p_rx_msg_t rx_msg_p; 35503859Sml29623 p_mblk_t mblk_p; 35513859Sml29623 35523859Sml29623 rxring_info_t *ring_info; 35533859Sml29623 nxge_status_t status = NXGE_OK; 35543859Sml29623 int i, j, index; 35553859Sml29623 uint32_t size, bsize, nblocks, nmsgs; 35563859Sml29623 35573859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35586929Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d", 35596929Smisaki channel)); 35603859Sml29623 35613859Sml29623 dma_bufp = tmp_bufp = *dma_buf_p; 35623859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35636929Smisaki " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 35646929Smisaki "chunks bufp 0x%016llx", 35656929Smisaki channel, num_chunks, dma_bufp)); 35663859Sml29623 35673859Sml29623 nmsgs = 0; 35683859Sml29623 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 35693859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35706929Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d " 35716929Smisaki "bufp 0x%016llx nblocks %d nmsgs %d", 35726929Smisaki channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 35733859Sml29623 nmsgs += tmp_bufp->nblocks; 35743859Sml29623 } 35753859Sml29623 if (!nmsgs) { 35764185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 35776929Smisaki "<== nxge_map_rxdma_channel_buf_ring: channel %d " 35786929Smisaki "no msg blocks", 35796929Smisaki channel)); 35803859Sml29623 status = NXGE_ERROR; 35813859Sml29623 goto nxge_map_rxdma_channel_buf_ring_exit; 35823859Sml29623 } 35833859Sml29623 35845170Stm144005 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 35853859Sml29623 35863859Sml29623 size = nmsgs * sizeof (p_rx_msg_t); 35873859Sml29623 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 35883859Sml29623 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 35896929Smisaki KM_SLEEP); 35903859Sml29623 35913859Sml29623 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 35926929Smisaki (void *)nxgep->interrupt_cookie); 35933859Sml29623 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 35946929Smisaki (void *)nxgep->interrupt_cookie); 35953859Sml29623 rbrp->rdc = channel; 35963859Sml29623 rbrp->num_blocks = num_chunks; 35973859Sml29623 rbrp->tnblocks = nmsgs; 35983859Sml29623 rbrp->rbb_max = nmsgs; 35993859Sml29623 rbrp->rbr_max_size = nmsgs; 36003859Sml29623 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 36013859Sml29623 36023859Sml29623 /* 36033859Sml29623 * Buffer sizes suggested by NIU architect. 36043859Sml29623 * 256, 512 and 2K. 36053859Sml29623 */ 36063859Sml29623 36073859Sml29623 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 36083859Sml29623 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 36093859Sml29623 rbrp->npi_pkt_buf_size0 = SIZE_256B; 36103859Sml29623 36113859Sml29623 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 36123859Sml29623 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 36133859Sml29623 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 36143859Sml29623 36153859Sml29623 rbrp->block_size = nxgep->rx_default_block_size; 36163859Sml29623 36173859Sml29623 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 36183859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 36193859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 36203859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 36213859Sml29623 } else { 36223859Sml29623 if (rbrp->block_size >= 0x2000) { 36233859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 36243859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 36253859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 36263859Sml29623 } else { 36273859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 36283859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 36293859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 36303859Sml29623 } 36313859Sml29623 } 36323859Sml29623 36333859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 36346929Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d " 36356929Smisaki "actual rbr max %d rbb_max %d nmsgs %d " 36366929Smisaki "rbrp->block_size %d default_block_size %d " 36376929Smisaki "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 36386929Smisaki channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 36396929Smisaki rbrp->block_size, nxgep->rx_default_block_size, 36406929Smisaki nxge_rbr_size, nxge_rbr_spare_size)); 36413859Sml29623 36423859Sml29623 /* Map in buffers from the buffer pool. */ 36433859Sml29623 index = 0; 36443859Sml29623 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 36453859Sml29623 bsize = dma_bufp->block_size; 36463859Sml29623 nblocks = dma_bufp->nblocks; 36475125Sjoycey #if defined(__i386) 36485125Sjoycey ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 36495125Sjoycey #else 36503859Sml29623 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 36515125Sjoycey #endif 36523859Sml29623 ring_info->buffer[i].buf_index = i; 36533859Sml29623 ring_info->buffer[i].buf_size = dma_bufp->alength; 36543859Sml29623 ring_info->buffer[i].start_index = index; 36555125Sjoycey #if defined(__i386) 36565125Sjoycey ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 36575125Sjoycey #else 36583859Sml29623 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 36595125Sjoycey #endif 36603859Sml29623 36613859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 36626929Smisaki " nxge_map_rxdma_channel_buf_ring: map channel %d " 36636929Smisaki "chunk %d" 36646929Smisaki " nblocks %d chunk_size %x block_size 0x%x " 36656929Smisaki "dma_bufp $%p", channel, i, 36666929Smisaki dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 36676929Smisaki dma_bufp)); 36683859Sml29623 36693859Sml29623 for (j = 0; j < nblocks; j++) { 36703859Sml29623 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 36716929Smisaki dma_bufp)) == NULL) { 36724185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 36736929Smisaki "allocb failed (index %d i %d j %d)", 36746929Smisaki index, i, j)); 36754185Sspeer goto nxge_map_rxdma_channel_buf_ring_fail1; 36763859Sml29623 } 36773859Sml29623 rx_msg_ring[index] = rx_msg_p; 36783859Sml29623 rx_msg_p->block_index = index; 36793859Sml29623 rx_msg_p->shifted_addr = (uint32_t) 36806929Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 36816929Smisaki RBR_BKADDR_SHIFT)); 36823859Sml29623 36833859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 36846929Smisaki "index %d j %d rx_msg_p $%p mblk %p", 36856929Smisaki index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 36863859Sml29623 36873859Sml29623 mblk_p = rx_msg_p->rx_mblk_p; 36883859Sml29623 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 36895170Stm144005 36905170Stm144005 rbrp->rbr_ref_cnt++; 36913859Sml29623 index++; 36923859Sml29623 rx_msg_p->buf_dma.dma_channel = channel; 36933859Sml29623 } 36946495Sspeer 36956495Sspeer rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 36966495Sspeer if (dma_bufp->contig_alloc_type) { 36976495Sspeer rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 36986495Sspeer } 36996495Sspeer 37006495Sspeer if (dma_bufp->kmem_alloc_type) { 37016495Sspeer rbrp->rbr_alloc_type = KMEM_ALLOC; 37026495Sspeer } 37036495Sspeer 37046495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37056495Sspeer " nxge_map_rxdma_channel_buf_ring: map channel %d " 37066495Sspeer "chunk %d" 37076495Sspeer " nblocks %d chunk_size %x block_size 0x%x " 37086495Sspeer "dma_bufp $%p", 37096495Sspeer channel, i, 37106495Sspeer dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 37116495Sspeer dma_bufp)); 37123859Sml29623 } 37133859Sml29623 if (i < rbrp->num_blocks) { 37143859Sml29623 goto nxge_map_rxdma_channel_buf_ring_fail1; 37153859Sml29623 } 37163859Sml29623 37173859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37186929Smisaki "nxge_map_rxdma_channel_buf_ring: done buf init " 37196929Smisaki "channel %d msg block entries %d", 37206929Smisaki channel, index)); 37213859Sml29623 ring_info->block_size_mask = bsize - 1; 37223859Sml29623 rbrp->rx_msg_ring = rx_msg_ring; 37233859Sml29623 rbrp->dma_bufp = dma_buf_p; 37243859Sml29623 rbrp->ring_info = ring_info; 37253859Sml29623 37263859Sml29623 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 37273859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37286929Smisaki " nxge_map_rxdma_channel_buf_ring: " 37296929Smisaki "channel %d done buf info init", channel)); 37303859Sml29623 37315170Stm144005 /* 37325170Stm144005 * Finally, permit nxge_freeb() to call nxge_post_page(). 37335170Stm144005 */ 37345170Stm144005 rbrp->rbr_state = RBR_POSTING; 37355170Stm144005 37363859Sml29623 *rbr_p = rbrp; 37373859Sml29623 goto nxge_map_rxdma_channel_buf_ring_exit; 37383859Sml29623 37393859Sml29623 nxge_map_rxdma_channel_buf_ring_fail1: 37403859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37416929Smisaki " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 37426929Smisaki channel, status)); 37433859Sml29623 37443859Sml29623 index--; 37453859Sml29623 for (; index >= 0; index--) { 37463859Sml29623 rx_msg_p = rx_msg_ring[index]; 37473859Sml29623 if (rx_msg_p != NULL) { 37483859Sml29623 freeb(rx_msg_p->rx_mblk_p); 37493859Sml29623 rx_msg_ring[index] = NULL; 37503859Sml29623 } 37513859Sml29623 } 37523859Sml29623 nxge_map_rxdma_channel_buf_ring_fail: 37533859Sml29623 MUTEX_DESTROY(&rbrp->post_lock); 37543859Sml29623 MUTEX_DESTROY(&rbrp->lock); 37553859Sml29623 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 37563859Sml29623 KMEM_FREE(rx_msg_ring, size); 37573859Sml29623 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 37583859Sml29623 37594185Sspeer status = NXGE_ERROR; 37604185Sspeer 37613859Sml29623 nxge_map_rxdma_channel_buf_ring_exit: 37623859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37636929Smisaki "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 37643859Sml29623 37653859Sml29623 return (status); 37663859Sml29623 } 37673859Sml29623 37683859Sml29623 /*ARGSUSED*/ 37693859Sml29623 static void 37703859Sml29623 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 37713859Sml29623 p_rx_rbr_ring_t rbr_p) 37723859Sml29623 { 37733859Sml29623 p_rx_msg_t *rx_msg_ring; 37743859Sml29623 p_rx_msg_t rx_msg_p; 37753859Sml29623 rxring_info_t *ring_info; 37763859Sml29623 int i; 37773859Sml29623 uint32_t size; 37783859Sml29623 #ifdef NXGE_DEBUG 37793859Sml29623 int num_chunks; 37803859Sml29623 #endif 37813859Sml29623 37823859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37836929Smisaki "==> nxge_unmap_rxdma_channel_buf_ring")); 37843859Sml29623 if (rbr_p == NULL) { 37853859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 37866929Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 37873859Sml29623 return; 37883859Sml29623 } 37893859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37906929Smisaki "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 37916929Smisaki rbr_p->rdc)); 37923859Sml29623 37933859Sml29623 rx_msg_ring = rbr_p->rx_msg_ring; 37943859Sml29623 ring_info = rbr_p->ring_info; 37953859Sml29623 37963859Sml29623 if (rx_msg_ring == NULL || ring_info == NULL) { 37976929Smisaki NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37986929Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: " 37996929Smisaki "rx_msg_ring $%p ring_info $%p", 38006929Smisaki rx_msg_p, ring_info)); 38013859Sml29623 return; 38023859Sml29623 } 38033859Sml29623 38043859Sml29623 #ifdef NXGE_DEBUG 38053859Sml29623 num_chunks = rbr_p->num_blocks; 38063859Sml29623 #endif 38073859Sml29623 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 38083859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38096929Smisaki " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 38106929Smisaki "tnblocks %d (max %d) size ptrs %d ", 38116929Smisaki rbr_p->rdc, num_chunks, 38126929Smisaki rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 38133859Sml29623 38143859Sml29623 for (i = 0; i < rbr_p->tnblocks; i++) { 38153859Sml29623 rx_msg_p = rx_msg_ring[i]; 38163859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38176929Smisaki " nxge_unmap_rxdma_channel_buf_ring: " 38186929Smisaki "rx_msg_p $%p", 38196929Smisaki rx_msg_p)); 38203859Sml29623 if (rx_msg_p != NULL) { 38213859Sml29623 freeb(rx_msg_p->rx_mblk_p); 38223859Sml29623 rx_msg_ring[i] = NULL; 38233859Sml29623 } 38243859Sml29623 } 38253859Sml29623 38265170Stm144005 /* 38275170Stm144005 * We no longer may use the mutex <post_lock>. By setting 38285170Stm144005 * <rbr_state> to anything but POSTING, we prevent 38295170Stm144005 * nxge_post_page() from accessing a dead mutex. 38305170Stm144005 */ 38315170Stm144005 rbr_p->rbr_state = RBR_UNMAPPING; 38323859Sml29623 MUTEX_DESTROY(&rbr_p->post_lock); 38335170Stm144005 38343859Sml29623 MUTEX_DESTROY(&rbr_p->lock); 38355170Stm144005 38365170Stm144005 if (rbr_p->rbr_ref_cnt == 0) { 38376495Sspeer /* 38386495Sspeer * This is the normal state of affairs. 38396495Sspeer * Need to free the following buffers: 38406495Sspeer * - data buffers 38416495Sspeer * - rx_msg ring 38426495Sspeer * - ring_info 38436495Sspeer * - rbr ring 38446495Sspeer */ 38456495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 38466495Sspeer "unmap_rxdma_buf_ring: No outstanding - freeing ")); 38476495Sspeer nxge_rxdma_databuf_free(rbr_p); 38486495Sspeer KMEM_FREE(ring_info, sizeof (rxring_info_t)); 38496495Sspeer KMEM_FREE(rx_msg_ring, size); 38505170Stm144005 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 38515170Stm144005 } else { 38525170Stm144005 /* 38535170Stm144005 * Some of our buffers are still being used. 38545170Stm144005 * Therefore, tell nxge_freeb() this ring is 38555170Stm144005 * unmapped, so it may free <rbr_p> for us. 38565170Stm144005 */ 38575170Stm144005 rbr_p->rbr_state = RBR_UNMAPPED; 38585170Stm144005 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 38595170Stm144005 "unmap_rxdma_buf_ring: %d %s outstanding.", 38605170Stm144005 rbr_p->rbr_ref_cnt, 38615170Stm144005 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 38625170Stm144005 } 38633859Sml29623 38643859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38656929Smisaki "<== nxge_unmap_rxdma_channel_buf_ring")); 38663859Sml29623 } 38673859Sml29623 38686495Sspeer /* 38696495Sspeer * nxge_rxdma_hw_start_common 38706495Sspeer * 38716495Sspeer * Arguments: 38726495Sspeer * nxgep 38736495Sspeer * 38746495Sspeer * Notes: 38756495Sspeer * 38766495Sspeer * NPI/NXGE function calls: 38776495Sspeer * nxge_init_fzc_rx_common(); 38786495Sspeer * nxge_init_fzc_rxdma_port(); 38796495Sspeer * 38806495Sspeer * Registers accessed: 38816495Sspeer * 38826495Sspeer * Context: 38836495Sspeer * Service domain 38846495Sspeer */ 38853859Sml29623 static nxge_status_t 38863859Sml29623 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 38873859Sml29623 { 38883859Sml29623 nxge_status_t status = NXGE_OK; 38893859Sml29623 38903859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 38913859Sml29623 38923859Sml29623 /* 38933859Sml29623 * Load the sharable parameters by writing to the 38943859Sml29623 * function zero control registers. These FZC registers 38953859Sml29623 * should be initialized only once for the entire chip. 38963859Sml29623 */ 38973859Sml29623 (void) nxge_init_fzc_rx_common(nxgep); 38983859Sml29623 38993859Sml29623 /* 39003859Sml29623 * Initialize the RXDMA port specific FZC control configurations. 39013859Sml29623 * These FZC registers are pertaining to each port. 39023859Sml29623 */ 39033859Sml29623 (void) nxge_init_fzc_rxdma_port(nxgep); 39043859Sml29623 39053859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 39063859Sml29623 39073859Sml29623 return (status); 39083859Sml29623 } 39093859Sml29623 39103859Sml29623 static nxge_status_t 39116495Sspeer nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 39123859Sml29623 { 39133859Sml29623 int i, ndmas; 39143859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 39153859Sml29623 p_rx_rbr_ring_t *rbr_rings; 39163859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 39173859Sml29623 p_rx_rcr_ring_t *rcr_rings; 39183859Sml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 39193859Sml29623 p_rx_mbox_t *rx_mbox_p; 39203859Sml29623 nxge_status_t status = NXGE_OK; 39213859Sml29623 39223859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 39233859Sml29623 39243859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 39253859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 39263859Sml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 39273859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 39286929Smisaki "<== nxge_rxdma_hw_start: NULL ring pointers")); 39293859Sml29623 return (NXGE_ERROR); 39303859Sml29623 } 39313859Sml29623 ndmas = rx_rbr_rings->ndmas; 39323859Sml29623 if (ndmas == 0) { 39333859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 39346929Smisaki "<== nxge_rxdma_hw_start: no dma channel allocated")); 39353859Sml29623 return (NXGE_ERROR); 39363859Sml29623 } 39373859Sml29623 39383859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39396929Smisaki "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 39403859Sml29623 39413859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 39423859Sml29623 rcr_rings = rx_rcr_rings->rcr_rings; 39433859Sml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 39443859Sml29623 if (rx_mbox_areas_p) { 39453859Sml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 39463859Sml29623 } 39473859Sml29623 39486495Sspeer i = channel; 39496495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39506929Smisaki "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 39516929Smisaki ndmas, channel)); 39526495Sspeer status = nxge_rxdma_start_channel(nxgep, channel, 39536495Sspeer (p_rx_rbr_ring_t)rbr_rings[i], 39546495Sspeer (p_rx_rcr_ring_t)rcr_rings[i], 39556495Sspeer (p_rx_mbox_t)rx_mbox_p[i]); 39566495Sspeer if (status != NXGE_OK) { 39576495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 39586495Sspeer "==> nxge_rxdma_hw_start: disable " 39596495Sspeer "(status 0x%x channel %d)", status, channel)); 39606495Sspeer return (status); 39613859Sml29623 } 39623859Sml29623 39633859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 39646929Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx", 39656929Smisaki rx_rbr_rings, rx_rcr_rings)); 39663859Sml29623 39673859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39686929Smisaki "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 39693859Sml29623 39703859Sml29623 return (status); 39713859Sml29623 } 39723859Sml29623 39733859Sml29623 static void 39746495Sspeer nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 39753859Sml29623 { 39763859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 39773859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 39783859Sml29623 39793859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 39803859Sml29623 39813859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 39823859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 39833859Sml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 39843859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 39856929Smisaki "<== nxge_rxdma_hw_stop: NULL ring pointers")); 39863859Sml29623 return; 39873859Sml29623 } 39883859Sml29623 39893859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39906929Smisaki "==> nxge_rxdma_hw_stop(channel %d)", 39916929Smisaki channel)); 39926495Sspeer (void) nxge_rxdma_stop_channel(nxgep, channel); 39933859Sml29623 39943859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 39956929Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx", 39966929Smisaki rx_rbr_rings, rx_rcr_rings)); 39973859Sml29623 39983859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 39993859Sml29623 } 40003859Sml29623 40013859Sml29623 40023859Sml29623 static nxge_status_t 40033859Sml29623 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 40043859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 40053859Sml29623 40063859Sml29623 { 40073859Sml29623 npi_handle_t handle; 40083859Sml29623 npi_status_t rs = NPI_SUCCESS; 40093859Sml29623 rx_dma_ctl_stat_t cs; 40103859Sml29623 rx_dma_ent_msk_t ent_mask; 40113859Sml29623 nxge_status_t status = NXGE_OK; 40123859Sml29623 40133859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 40143859Sml29623 40153859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 40163859Sml29623 40173859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 40183859Sml29623 "npi handle addr $%p acc $%p", 40193859Sml29623 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 40203859Sml29623 40216495Sspeer /* Reset RXDMA channel, but not if you're a guest. */ 40226495Sspeer if (!isLDOMguest(nxgep)) { 40236495Sspeer rs = npi_rxdma_cfg_rdc_reset(handle, channel); 40246495Sspeer if (rs != NPI_SUCCESS) { 40256495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 40266495Sspeer "==> nxge_init_fzc_rdc: " 40276495Sspeer "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 40286495Sspeer channel, rs)); 40296495Sspeer return (NXGE_ERROR | rs); 40306495Sspeer } 40316495Sspeer 40326495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40336495Sspeer "==> nxge_rxdma_start_channel: reset done: channel %d", 40346495Sspeer channel)); 40353859Sml29623 } 40363859Sml29623 40376495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 40386495Sspeer if (isLDOMguest(nxgep)) 40396495Sspeer (void) nxge_rdc_lp_conf(nxgep, channel); 40406495Sspeer #endif 40413859Sml29623 40423859Sml29623 /* 40433859Sml29623 * Initialize the RXDMA channel specific FZC control 40443859Sml29623 * configurations. These FZC registers are pertaining 40453859Sml29623 * to each RX channel (logical pages). 40463859Sml29623 */ 40476495Sspeer if (!isLDOMguest(nxgep)) { 40486495Sspeer status = nxge_init_fzc_rxdma_channel(nxgep, channel); 40496495Sspeer if (status != NXGE_OK) { 40506495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 40516495Sspeer "==> nxge_rxdma_start_channel: " 40526495Sspeer "init fzc rxdma failed (0x%08x channel %d)", 40536495Sspeer status, channel)); 40546495Sspeer return (status); 40556495Sspeer } 40566495Sspeer 40576495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40586495Sspeer "==> nxge_rxdma_start_channel: fzc done")); 40593859Sml29623 } 40603859Sml29623 40613859Sml29623 /* Set up the interrupt event masks. */ 40623859Sml29623 ent_mask.value = 0; 40633859Sml29623 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 40643859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 40656495Sspeer &ent_mask); 40663859Sml29623 if (rs != NPI_SUCCESS) { 40673859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 40683859Sml29623 "==> nxge_rxdma_start_channel: " 40696495Sspeer "init rxdma event masks failed " 40706495Sspeer "(0x%08x channel %d)", 40713859Sml29623 status, channel)); 40723859Sml29623 return (NXGE_ERROR | rs); 40733859Sml29623 } 40743859Sml29623 40756495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40766495Sspeer "==> nxge_rxdma_start_channel: " 40773859Sml29623 "event done: channel %d (mask 0x%016llx)", 40783859Sml29623 channel, ent_mask.value)); 40793859Sml29623 40803859Sml29623 /* Initialize the receive DMA control and status register */ 40813859Sml29623 cs.value = 0; 40823859Sml29623 cs.bits.hdw.mex = 1; 40833859Sml29623 cs.bits.hdw.rcrthres = 1; 40843859Sml29623 cs.bits.hdw.rcrto = 1; 40853859Sml29623 cs.bits.hdw.rbr_empty = 1; 40863859Sml29623 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 40873859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 40883859Sml29623 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 40893859Sml29623 if (status != NXGE_OK) { 40903859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 40913859Sml29623 "==> nxge_rxdma_start_channel: " 40923859Sml29623 "init rxdma control register failed (0x%08x channel %d", 40933859Sml29623 status, channel)); 40943859Sml29623 return (status); 40953859Sml29623 } 40963859Sml29623 40973859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 40983859Sml29623 "control done - channel %d cs 0x%016llx", channel, cs.value)); 40993859Sml29623 41003859Sml29623 /* 41013859Sml29623 * Load RXDMA descriptors, buffers, mailbox, 41023859Sml29623 * initialise the receive DMA channels and 41033859Sml29623 * enable each DMA channel. 41043859Sml29623 */ 41053859Sml29623 status = nxge_enable_rxdma_channel(nxgep, 41066495Sspeer channel, rbr_p, rcr_p, mbox_p); 41073859Sml29623 41083859Sml29623 if (status != NXGE_OK) { 41093859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 41106495Sspeer " nxge_rxdma_start_channel: " 41116495Sspeer " enable rxdma failed (0x%08x channel %d)", 41126495Sspeer status, channel)); 41136495Sspeer return (status); 41146495Sspeer } 41156495Sspeer 41166495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 41176495Sspeer "==> nxge_rxdma_start_channel: enabled channel %d")); 41186495Sspeer 41196495Sspeer if (isLDOMguest(nxgep)) { 41206495Sspeer /* Add interrupt handler for this channel. */ 41216495Sspeer if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 41226495Sspeer != NXGE_OK) { 41236495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 41243859Sml29623 " nxge_rxdma_start_channel: " 41256495Sspeer " nxge_hio_intr_add failed (0x%08x channel %d)", 41266495Sspeer status, channel)); 41276495Sspeer } 41283859Sml29623 } 41293859Sml29623 41303859Sml29623 ent_mask.value = 0; 41313859Sml29623 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 41323859Sml29623 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 41333859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 41343859Sml29623 &ent_mask); 41353859Sml29623 if (rs != NPI_SUCCESS) { 41363859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 41373859Sml29623 "==> nxge_rxdma_start_channel: " 41383859Sml29623 "init rxdma event masks failed (0x%08x channel %d)", 41393859Sml29623 status, channel)); 41403859Sml29623 return (NXGE_ERROR | rs); 41413859Sml29623 } 41423859Sml29623 41433859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 41443859Sml29623 "control done - channel %d cs 0x%016llx", channel, cs.value)); 41453859Sml29623 41463859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 41473859Sml29623 41483859Sml29623 return (NXGE_OK); 41493859Sml29623 } 41503859Sml29623 41513859Sml29623 static nxge_status_t 41523859Sml29623 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 41533859Sml29623 { 41543859Sml29623 npi_handle_t handle; 41553859Sml29623 npi_status_t rs = NPI_SUCCESS; 41563859Sml29623 rx_dma_ctl_stat_t cs; 41573859Sml29623 rx_dma_ent_msk_t ent_mask; 41583859Sml29623 nxge_status_t status = NXGE_OK; 41593859Sml29623 41603859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 41613859Sml29623 41623859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 41633859Sml29623 41643859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 41656929Smisaki "npi handle addr $%p acc $%p", 41666929Smisaki nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 41673859Sml29623 41687812SMichael.Speer@Sun.COM if (!isLDOMguest(nxgep)) { 41697812SMichael.Speer@Sun.COM /* 41707812SMichael.Speer@Sun.COM * Stop RxMAC = A.9.2.6 41717812SMichael.Speer@Sun.COM */ 41727812SMichael.Speer@Sun.COM if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 41737812SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 41747812SMichael.Speer@Sun.COM "nxge_rxdma_stop_channel: " 41757812SMichael.Speer@Sun.COM "Failed to disable RxMAC")); 41767812SMichael.Speer@Sun.COM } 41777812SMichael.Speer@Sun.COM 41787812SMichael.Speer@Sun.COM /* 41797812SMichael.Speer@Sun.COM * Drain IPP Port = A.9.3.6 41807812SMichael.Speer@Sun.COM */ 41817812SMichael.Speer@Sun.COM (void) nxge_ipp_drain(nxgep); 41827812SMichael.Speer@Sun.COM } 41837812SMichael.Speer@Sun.COM 41843859Sml29623 /* Reset RXDMA channel */ 41853859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 41863859Sml29623 if (rs != NPI_SUCCESS) { 41873859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 41886929Smisaki " nxge_rxdma_stop_channel: " 41896929Smisaki " reset rxdma failed (0x%08x channel %d)", 41906929Smisaki rs, channel)); 41913859Sml29623 return (NXGE_ERROR | rs); 41923859Sml29623 } 41933859Sml29623 41943859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 41956929Smisaki "==> nxge_rxdma_stop_channel: reset done")); 41963859Sml29623 41973859Sml29623 /* Set up the interrupt event masks. */ 41983859Sml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL; 41993859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 42006929Smisaki &ent_mask); 42013859Sml29623 if (rs != NPI_SUCCESS) { 42023859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 42036929Smisaki "==> nxge_rxdma_stop_channel: " 42046929Smisaki "set rxdma event masks failed (0x%08x channel %d)", 42056929Smisaki rs, channel)); 42063859Sml29623 return (NXGE_ERROR | rs); 42073859Sml29623 } 42083859Sml29623 42093859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 42106929Smisaki "==> nxge_rxdma_stop_channel: event done")); 42113859Sml29623 42127812SMichael.Speer@Sun.COM /* 42137812SMichael.Speer@Sun.COM * Initialize the receive DMA control and status register 42147812SMichael.Speer@Sun.COM */ 42153859Sml29623 cs.value = 0; 42167812SMichael.Speer@Sun.COM status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 42173859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 42186929Smisaki " to default (all 0s) 0x%08x", cs.value)); 42193859Sml29623 if (status != NXGE_OK) { 42203859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 42216929Smisaki " nxge_rxdma_stop_channel: init rxdma" 42226929Smisaki " control register failed (0x%08x channel %d", 42236929Smisaki status, channel)); 42243859Sml29623 return (status); 42253859Sml29623 } 42263859Sml29623 42273859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 42286929Smisaki "==> nxge_rxdma_stop_channel: control done")); 42293859Sml29623 42307812SMichael.Speer@Sun.COM /* 42317812SMichael.Speer@Sun.COM * Make sure channel is disabled. 42327812SMichael.Speer@Sun.COM */ 42333859Sml29623 status = nxge_disable_rxdma_channel(nxgep, channel); 42343859Sml29623 if (status != NXGE_OK) { 42353859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 42366929Smisaki " nxge_rxdma_stop_channel: " 42376929Smisaki " init enable rxdma failed (0x%08x channel %d)", 42386929Smisaki status, channel)); 42393859Sml29623 return (status); 42403859Sml29623 } 42413859Sml29623 42427812SMichael.Speer@Sun.COM if (!isLDOMguest(nxgep)) { 42437812SMichael.Speer@Sun.COM /* 42447812SMichael.Speer@Sun.COM * Enable RxMAC = A.9.2.10 42457812SMichael.Speer@Sun.COM */ 42467812SMichael.Speer@Sun.COM if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 42477812SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 42487812SMichael.Speer@Sun.COM "nxge_rxdma_stop_channel: Rx MAC still disabled")); 42497812SMichael.Speer@Sun.COM } 42507812SMichael.Speer@Sun.COM } 42517812SMichael.Speer@Sun.COM 42523859Sml29623 NXGE_DEBUG_MSG((nxgep, 42536929Smisaki RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 42543859Sml29623 42553859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 42563859Sml29623 42573859Sml29623 return (NXGE_OK); 42583859Sml29623 } 42593859Sml29623 42603859Sml29623 nxge_status_t 42613859Sml29623 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 42623859Sml29623 { 42633859Sml29623 npi_handle_t handle; 42643859Sml29623 p_nxge_rdc_sys_stats_t statsp; 42653859Sml29623 rx_ctl_dat_fifo_stat_t stat; 42663859Sml29623 uint32_t zcp_err_status; 42673859Sml29623 uint32_t ipp_err_status; 42683859Sml29623 nxge_status_t status = NXGE_OK; 42693859Sml29623 npi_status_t rs = NPI_SUCCESS; 42703859Sml29623 boolean_t my_err = B_FALSE; 42713859Sml29623 42723859Sml29623 handle = nxgep->npi_handle; 42733859Sml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 42743859Sml29623 42753859Sml29623 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 42763859Sml29623 42773859Sml29623 if (rs != NPI_SUCCESS) 42783859Sml29623 return (NXGE_ERROR | rs); 42793859Sml29623 42803859Sml29623 if (stat.bits.ldw.id_mismatch) { 42813859Sml29623 statsp->id_mismatch++; 42823859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 42836929Smisaki NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 42843859Sml29623 /* Global fatal error encountered */ 42853859Sml29623 } 42863859Sml29623 42873859Sml29623 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 42883859Sml29623 switch (nxgep->mac.portnum) { 42893859Sml29623 case 0: 42903859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 42916929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 42923859Sml29623 my_err = B_TRUE; 42933859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 42943859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 42953859Sml29623 } 42963859Sml29623 break; 42973859Sml29623 case 1: 42983859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 42996929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 43003859Sml29623 my_err = B_TRUE; 43013859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 43023859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 43033859Sml29623 } 43043859Sml29623 break; 43053859Sml29623 case 2: 43063859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 43076929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 43083859Sml29623 my_err = B_TRUE; 43093859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 43103859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 43113859Sml29623 } 43123859Sml29623 break; 43133859Sml29623 case 3: 43143859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 43156929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 43163859Sml29623 my_err = B_TRUE; 43173859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 43183859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 43193859Sml29623 } 43203859Sml29623 break; 43213859Sml29623 default: 43223859Sml29623 return (NXGE_ERROR); 43233859Sml29623 } 43243859Sml29623 } 43253859Sml29623 43263859Sml29623 if (my_err) { 43273859Sml29623 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 43286929Smisaki zcp_err_status); 43293859Sml29623 if (status != NXGE_OK) 43303859Sml29623 return (status); 43313859Sml29623 } 43323859Sml29623 43333859Sml29623 return (NXGE_OK); 43343859Sml29623 } 43353859Sml29623 43363859Sml29623 static nxge_status_t 43373859Sml29623 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 43383859Sml29623 uint32_t zcp_status) 43393859Sml29623 { 43403859Sml29623 boolean_t rxport_fatal = B_FALSE; 43413859Sml29623 p_nxge_rdc_sys_stats_t statsp; 43423859Sml29623 nxge_status_t status = NXGE_OK; 43433859Sml29623 uint8_t portn; 43443859Sml29623 43453859Sml29623 portn = nxgep->mac.portnum; 43463859Sml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 43473859Sml29623 43483859Sml29623 if (ipp_status & (0x1 << portn)) { 43493859Sml29623 statsp->ipp_eop_err++; 43503859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 43516929Smisaki NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 43523859Sml29623 rxport_fatal = B_TRUE; 43533859Sml29623 } 43543859Sml29623 43553859Sml29623 if (zcp_status & (0x1 << portn)) { 43563859Sml29623 statsp->zcp_eop_err++; 43573859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 43586929Smisaki NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 43593859Sml29623 rxport_fatal = B_TRUE; 43603859Sml29623 } 43613859Sml29623 43623859Sml29623 if (rxport_fatal) { 43633859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43646929Smisaki " nxge_rxdma_handle_port_error: " 43656929Smisaki " fatal error on Port #%d\n", 43666929Smisaki portn)); 43673859Sml29623 status = nxge_rx_port_fatal_err_recover(nxgep); 43683859Sml29623 if (status == NXGE_OK) { 43693859Sml29623 FM_SERVICE_RESTORED(nxgep); 43703859Sml29623 } 43713859Sml29623 } 43723859Sml29623 43733859Sml29623 return (status); 43743859Sml29623 } 43753859Sml29623 43763859Sml29623 static nxge_status_t 43773859Sml29623 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 43783859Sml29623 { 43793859Sml29623 npi_handle_t handle; 43803859Sml29623 npi_status_t rs = NPI_SUCCESS; 43813859Sml29623 nxge_status_t status = NXGE_OK; 43823859Sml29623 p_rx_rbr_ring_t rbrp; 43833859Sml29623 p_rx_rcr_ring_t rcrp; 43843859Sml29623 p_rx_mbox_t mboxp; 43853859Sml29623 rx_dma_ent_msk_t ent_mask; 43863859Sml29623 p_nxge_dma_common_t dmap; 43873859Sml29623 int ring_idx; 43883859Sml29623 uint32_t ref_cnt; 43893859Sml29623 p_rx_msg_t rx_msg_p; 43903859Sml29623 int i; 43913859Sml29623 uint32_t nxge_port_rcr_size; 43923859Sml29623 43933859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 43943859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43956929Smisaki "Recovering from RxDMAChannel#%d error...", channel)); 43963859Sml29623 43973859Sml29623 /* 43983859Sml29623 * Stop the dma channel waits for the stop done. 43993859Sml29623 * If the stop done bit is not set, then create 44003859Sml29623 * an error. 44013859Sml29623 */ 44023859Sml29623 44033859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 44043859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 44053859Sml29623 44063859Sml29623 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 44073859Sml29623 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 44083859Sml29623 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 44093859Sml29623 44103859Sml29623 MUTEX_ENTER(&rcrp->lock); 44113859Sml29623 MUTEX_ENTER(&rbrp->lock); 44123859Sml29623 MUTEX_ENTER(&rbrp->post_lock); 44133859Sml29623 44143859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 44153859Sml29623 44163859Sml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 44173859Sml29623 if (rs != NPI_SUCCESS) { 44183859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44196929Smisaki "nxge_disable_rxdma_channel:failed")); 44203859Sml29623 goto fail; 44213859Sml29623 } 44223859Sml29623 44233859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 44243859Sml29623 44253859Sml29623 /* Disable interrupt */ 44263859Sml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL; 44273859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 44283859Sml29623 if (rs != NPI_SUCCESS) { 44293859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44306929Smisaki "nxge_rxdma_stop_channel: " 44316929Smisaki "set rxdma event masks failed (channel %d)", 44326929Smisaki channel)); 44333859Sml29623 } 44343859Sml29623 44353859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 44363859Sml29623 44373859Sml29623 /* Reset RXDMA channel */ 44383859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 44393859Sml29623 if (rs != NPI_SUCCESS) { 44403859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44416929Smisaki "nxge_rxdma_fatal_err_recover: " 44426929Smisaki " reset rxdma failed (channel %d)", channel)); 44433859Sml29623 goto fail; 44443859Sml29623 } 44453859Sml29623 44463859Sml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 44473859Sml29623 44483859Sml29623 mboxp = 44496929Smisaki (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 44503859Sml29623 44513859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 44523859Sml29623 rbrp->rbr_rd_index = 0; 44533859Sml29623 44543859Sml29623 rcrp->comp_rd_index = 0; 44553859Sml29623 rcrp->comp_wt_index = 0; 44563859Sml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 44576929Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 44585125Sjoycey #if defined(__i386) 44596929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 44606929Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 44615125Sjoycey #else 44626929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 44636929Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 44645125Sjoycey #endif 44653859Sml29623 44663859Sml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 44676929Smisaki (nxge_port_rcr_size - 1); 44683859Sml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 44696929Smisaki (nxge_port_rcr_size - 1); 44703859Sml29623 44713859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 44723859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 44733859Sml29623 44743859Sml29623 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 44753859Sml29623 44763859Sml29623 for (i = 0; i < rbrp->rbr_max_size; i++) { 44773859Sml29623 rx_msg_p = rbrp->rx_msg_ring[i]; 44783859Sml29623 ref_cnt = rx_msg_p->ref_cnt; 44793859Sml29623 if (ref_cnt != 1) { 44803859Sml29623 if (rx_msg_p->cur_usage_cnt != 44816929Smisaki rx_msg_p->max_usage_cnt) { 44823859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44836929Smisaki "buf[%d]: cur_usage_cnt = %d " 44846929Smisaki "max_usage_cnt = %d\n", i, 44856929Smisaki rx_msg_p->cur_usage_cnt, 44866929Smisaki rx_msg_p->max_usage_cnt)); 44873859Sml29623 } else { 44883859Sml29623 /* Buffer can be re-posted */ 44893859Sml29623 rx_msg_p->free = B_TRUE; 44903859Sml29623 rx_msg_p->cur_usage_cnt = 0; 44913859Sml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe; 44923859Sml29623 rx_msg_p->pkt_buf_size = 0; 44933859Sml29623 } 44943859Sml29623 } 44953859Sml29623 } 44963859Sml29623 44973859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 44983859Sml29623 44993859Sml29623 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 45003859Sml29623 if (status != NXGE_OK) { 45013859Sml29623 goto fail; 45023859Sml29623 } 45033859Sml29623 45043859Sml29623 MUTEX_EXIT(&rbrp->post_lock); 45053859Sml29623 MUTEX_EXIT(&rbrp->lock); 45063859Sml29623 MUTEX_EXIT(&rcrp->lock); 45073859Sml29623 45083859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45096929Smisaki "Recovery Successful, RxDMAChannel#%d Restored", 45106929Smisaki channel)); 45113859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 45123859Sml29623 45133859Sml29623 return (NXGE_OK); 45143859Sml29623 fail: 45153859Sml29623 MUTEX_EXIT(&rbrp->post_lock); 45163859Sml29623 MUTEX_EXIT(&rbrp->lock); 45173859Sml29623 MUTEX_EXIT(&rcrp->lock); 45183859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 45193859Sml29623 45203859Sml29623 return (NXGE_ERROR | rs); 45213859Sml29623 } 45223859Sml29623 45233859Sml29623 nxge_status_t 45243859Sml29623 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 45253859Sml29623 { 45266495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 45276495Sspeer nxge_status_t status = NXGE_OK; 45286495Sspeer int rdc; 45293859Sml29623 45303859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 45313859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45326929Smisaki "Recovering from RxPort error...")); 45336495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 45346495Sspeer 45353859Sml29623 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 45363859Sml29623 goto fail; 45373859Sml29623 45383859Sml29623 NXGE_DELAY(1000); 45393859Sml29623 45406495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 45416495Sspeer 45426495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 45436495Sspeer if ((1 << rdc) & set->owned.map) { 45446495Sspeer if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 45456495Sspeer != NXGE_OK) { 45466495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45476495Sspeer "Could not recover channel %d", rdc)); 45486495Sspeer } 45493859Sml29623 } 45503859Sml29623 } 45513859Sml29623 45526495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 45533859Sml29623 45543859Sml29623 /* Reset IPP */ 45553859Sml29623 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 45563859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45576929Smisaki "nxge_rx_port_fatal_err_recover: " 45586929Smisaki "Failed to reset IPP")); 45593859Sml29623 goto fail; 45603859Sml29623 } 45613859Sml29623 45623859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 45633859Sml29623 45643859Sml29623 /* Reset RxMAC */ 45653859Sml29623 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 45663859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45676929Smisaki "nxge_rx_port_fatal_err_recover: " 45686929Smisaki "Failed to reset RxMAC")); 45693859Sml29623 goto fail; 45703859Sml29623 } 45713859Sml29623 45723859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 45733859Sml29623 45743859Sml29623 /* Re-Initialize IPP */ 45753859Sml29623 if (nxge_ipp_init(nxgep) != NXGE_OK) { 45763859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45776929Smisaki "nxge_rx_port_fatal_err_recover: " 45786929Smisaki "Failed to init IPP")); 45793859Sml29623 goto fail; 45803859Sml29623 } 45813859Sml29623 45823859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 45833859Sml29623 45843859Sml29623 /* Re-Initialize RxMAC */ 45853859Sml29623 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 45863859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45876929Smisaki "nxge_rx_port_fatal_err_recover: " 45886929Smisaki "Failed to reset RxMAC")); 45893859Sml29623 goto fail; 45903859Sml29623 } 45913859Sml29623 45923859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 45933859Sml29623 45943859Sml29623 /* Re-enable RxMAC */ 45953859Sml29623 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 45963859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45976929Smisaki "nxge_rx_port_fatal_err_recover: " 45986929Smisaki "Failed to enable RxMAC")); 45993859Sml29623 goto fail; 46003859Sml29623 } 46013859Sml29623 46023859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46036929Smisaki "Recovery Successful, RxPort Restored")); 46043859Sml29623 46053859Sml29623 return (NXGE_OK); 46063859Sml29623 fail: 46073859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 46083859Sml29623 return (status); 46093859Sml29623 } 46103859Sml29623 46113859Sml29623 void 46123859Sml29623 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 46133859Sml29623 { 46143859Sml29623 rx_dma_ctl_stat_t cs; 46153859Sml29623 rx_ctl_dat_fifo_stat_t cdfs; 46163859Sml29623 46173859Sml29623 switch (err_id) { 46183859Sml29623 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 46193859Sml29623 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 46203859Sml29623 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 46213859Sml29623 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 46223859Sml29623 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 46233859Sml29623 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 46243859Sml29623 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 46253859Sml29623 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 46263859Sml29623 case NXGE_FM_EREPORT_RDMC_RCRINCON: 46273859Sml29623 case NXGE_FM_EREPORT_RDMC_RCRFULL: 46283859Sml29623 case NXGE_FM_EREPORT_RDMC_RBRFULL: 46293859Sml29623 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 46303859Sml29623 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 46313859Sml29623 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 46323859Sml29623 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 46336929Smisaki chan, &cs.value); 46343859Sml29623 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 46353859Sml29623 cs.bits.hdw.rcr_ack_err = 1; 46363859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 46373859Sml29623 cs.bits.hdw.dc_fifo_err = 1; 46383859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 46393859Sml29623 cs.bits.hdw.rcr_sha_par = 1; 46403859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 46413859Sml29623 cs.bits.hdw.rbr_pre_par = 1; 46423859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 46433859Sml29623 cs.bits.hdw.rbr_tmout = 1; 46443859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 46453859Sml29623 cs.bits.hdw.rsp_cnt_err = 1; 46463859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 46473859Sml29623 cs.bits.hdw.byte_en_bus = 1; 46483859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 46493859Sml29623 cs.bits.hdw.rsp_dat_err = 1; 46503859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 46513859Sml29623 cs.bits.hdw.config_err = 1; 46523859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 46533859Sml29623 cs.bits.hdw.rcrincon = 1; 46543859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 46553859Sml29623 cs.bits.hdw.rcrfull = 1; 46563859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 46573859Sml29623 cs.bits.hdw.rbrfull = 1; 46583859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 46593859Sml29623 cs.bits.hdw.rbrlogpage = 1; 46603859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 46613859Sml29623 cs.bits.hdw.cfiglogpage = 1; 46625125Sjoycey #if defined(__i386) 46635125Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 46646929Smisaki cs.value); 46655125Sjoycey #else 46663859Sml29623 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 46676929Smisaki cs.value); 46685125Sjoycey #endif 46693859Sml29623 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 46706929Smisaki chan, cs.value); 46713859Sml29623 break; 46723859Sml29623 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 46733859Sml29623 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 46743859Sml29623 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 46753859Sml29623 cdfs.value = 0; 46763859Sml29623 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 46773859Sml29623 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 46783859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 46793859Sml29623 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 46803859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 46813859Sml29623 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 46825125Sjoycey #if defined(__i386) 46835125Sjoycey cmn_err(CE_NOTE, 46846929Smisaki "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 46856929Smisaki cdfs.value); 46865125Sjoycey #else 46873859Sml29623 cmn_err(CE_NOTE, 46886929Smisaki "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 46896929Smisaki cdfs.value); 46905125Sjoycey #endif 46916495Sspeer NXGE_REG_WR64(nxgep->npi_handle, 46926495Sspeer RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 46933859Sml29623 break; 46943859Sml29623 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 46953859Sml29623 break; 46965165Syc148097 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 46973859Sml29623 break; 46983859Sml29623 } 46993859Sml29623 } 47006495Sspeer 47016495Sspeer static void 47026495Sspeer nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 47036495Sspeer { 47046495Sspeer rxring_info_t *ring_info; 47056495Sspeer int index; 47066495Sspeer uint32_t chunk_size; 47076495Sspeer uint64_t kaddr; 47086495Sspeer uint_t num_blocks; 47096495Sspeer 47106495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 47116495Sspeer 47126495Sspeer if (rbr_p == NULL) { 47136495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 47146495Sspeer "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 47156495Sspeer return; 47166495Sspeer } 47176495Sspeer 47186495Sspeer if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 47196495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 47206495Sspeer "==> nxge_rxdma_databuf_free: DDI")); 47216495Sspeer return; 47226495Sspeer } 47236495Sspeer 47246495Sspeer ring_info = rbr_p->ring_info; 47256495Sspeer if (ring_info == NULL) { 47266495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 47276495Sspeer "==> nxge_rxdma_databuf_free: NULL ring info")); 47286495Sspeer return; 47296495Sspeer } 47306495Sspeer num_blocks = rbr_p->num_blocks; 47316495Sspeer for (index = 0; index < num_blocks; index++) { 47326495Sspeer kaddr = ring_info->buffer[index].kaddr; 47336495Sspeer chunk_size = ring_info->buffer[index].buf_size; 47346495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 47356495Sspeer "==> nxge_rxdma_databuf_free: free chunk %d " 47366495Sspeer "kaddrp $%p chunk size %d", 47376495Sspeer index, kaddr, chunk_size)); 47386495Sspeer if (kaddr == NULL) continue; 47396495Sspeer nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 47406495Sspeer ring_info->buffer[index].kaddr = NULL; 47416495Sspeer } 47426495Sspeer 47436495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 47446495Sspeer } 47456495Sspeer 47466495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 47476495Sspeer extern void contig_mem_free(void *, size_t); 47486495Sspeer #endif 47496495Sspeer 47506495Sspeer void 47516495Sspeer nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 47526495Sspeer { 47536495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 47546495Sspeer 47556495Sspeer if (kaddr == NULL || !buf_size) { 47566495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 47576495Sspeer "==> nxge_free_buf: invalid kaddr $%p size to free %d", 47586495Sspeer kaddr, buf_size)); 47596495Sspeer return; 47606495Sspeer } 47616495Sspeer 47626495Sspeer switch (alloc_type) { 47636495Sspeer case KMEM_ALLOC: 47646495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 47656495Sspeer "==> nxge_free_buf: freeing kmem $%p size %d", 47666495Sspeer kaddr, buf_size)); 47676495Sspeer #if defined(__i386) 47686495Sspeer KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 47696495Sspeer #else 47706495Sspeer KMEM_FREE((void *)kaddr, buf_size); 47716495Sspeer #endif 47726495Sspeer break; 47736495Sspeer 47746495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 47756495Sspeer case CONTIG_MEM_ALLOC: 47766495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 47776495Sspeer "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 47786495Sspeer kaddr, buf_size)); 47796495Sspeer contig_mem_free((void *)kaddr, buf_size); 47806495Sspeer break; 47816495Sspeer #endif 47826495Sspeer 47836495Sspeer default: 47846495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 47856495Sspeer "<== nxge_free_buf: unsupported alloc type %d", 47866495Sspeer alloc_type)); 47876495Sspeer return; 47886495Sspeer } 47896495Sspeer 47906495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 47916495Sspeer } 4792