13859Sml29623 /* 23859Sml29623 * CDDL HEADER START 33859Sml29623 * 43859Sml29623 * The contents of this file are subject to the terms of the 53859Sml29623 * Common Development and Distribution License (the "License"). 63859Sml29623 * You may not use this file except in compliance with the License. 73859Sml29623 * 83859Sml29623 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 93859Sml29623 * or http://www.opensolaris.org/os/licensing. 103859Sml29623 * See the License for the specific language governing permissions 113859Sml29623 * and limitations under the License. 123859Sml29623 * 133859Sml29623 * When distributing Covered Code, include this CDDL HEADER in each 143859Sml29623 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 153859Sml29623 * If applicable, add the following below this CDDL HEADER, with the 163859Sml29623 * fields enclosed by brackets "[]" replaced with your own identifying 173859Sml29623 * information: Portions Copyright [yyyy] [name of copyright owner] 183859Sml29623 * 193859Sml29623 * CDDL HEADER END 203859Sml29623 */ 213859Sml29623 /* 225759Smisaki * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 233859Sml29623 * Use is subject to license terms. 243859Sml29623 */ 253859Sml29623 263859Sml29623 #include <sys/nxge/nxge_impl.h> 273859Sml29623 #include <sys/nxge/nxge_rxdma.h> 286495Sspeer #include <sys/nxge/nxge_hio.h> 296495Sspeer 306495Sspeer #if !defined(_BIG_ENDIAN) 316495Sspeer #include <npi_rx_rd32.h> 326495Sspeer #endif 336495Sspeer #include <npi_rx_rd64.h> 346495Sspeer #include <npi_rx_wr64.h> 353859Sml29623 363859Sml29623 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 376495Sspeer (rdcgrp + nxgep->pt_config.hw_config.def_mac_rxdma_grpid) 383859Sml29623 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 393859Sml29623 (rdc + nxgep->pt_config.hw_config.start_rdc) 403859Sml29623 413859Sml29623 /* 428275SEric Cheng * XXX: This is a tunable to limit the number of packets each interrupt 438275SEric Cheng * handles. 0 (default) means that each interrupt takes as much packets 448275SEric Cheng * as it finds. 458275SEric Cheng */ 468275SEric Cheng extern int nxge_max_intr_pkts; 478275SEric Cheng 488275SEric Cheng /* 493859Sml29623 * Globals: tunable parameters (/etc/system or adb) 503859Sml29623 * 513859Sml29623 */ 523859Sml29623 extern uint32_t nxge_rbr_size; 533859Sml29623 extern uint32_t nxge_rcr_size; 543859Sml29623 extern uint32_t nxge_rbr_spare_size; 553859Sml29623 563859Sml29623 extern uint32_t nxge_mblks_pending; 573859Sml29623 583859Sml29623 /* 593859Sml29623 * Tunable to reduce the amount of time spent in the 603859Sml29623 * ISR doing Rx Processing. 613859Sml29623 */ 623859Sml29623 extern uint32_t nxge_max_rx_pkts; 633859Sml29623 boolean_t nxge_jumbo_enable; 643859Sml29623 653859Sml29623 /* 663859Sml29623 * Tunables to manage the receive buffer blocks. 673859Sml29623 * 683859Sml29623 * nxge_rx_threshold_hi: copy all buffers. 693859Sml29623 * nxge_rx_bcopy_size_type: receive buffer block size type. 703859Sml29623 * nxge_rx_threshold_lo: copy only up to tunable block size type. 713859Sml29623 */ 723859Sml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 733859Sml29623 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 743859Sml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 753859Sml29623 766611Sml29623 extern uint32_t nxge_cksum_offload; 776495Sspeer 786495Sspeer static nxge_status_t nxge_map_rxdma(p_nxge_t, int); 796495Sspeer static void nxge_unmap_rxdma(p_nxge_t, int); 803859Sml29623 813859Sml29623 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 826495Sspeer 836495Sspeer static nxge_status_t nxge_rxdma_hw_start(p_nxge_t, int); 846495Sspeer static void nxge_rxdma_hw_stop(p_nxge_t, int); 853859Sml29623 863859Sml29623 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 873859Sml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 883859Sml29623 uint32_t, 893859Sml29623 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 903859Sml29623 p_rx_mbox_t *); 913859Sml29623 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 923859Sml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 933859Sml29623 943859Sml29623 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 953859Sml29623 uint16_t, 963859Sml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 973859Sml29623 p_rx_rcr_ring_t *, p_rx_mbox_t *); 983859Sml29623 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 993859Sml29623 p_rx_rcr_ring_t, p_rx_mbox_t); 1003859Sml29623 1013859Sml29623 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 1023859Sml29623 uint16_t, 1033859Sml29623 p_nxge_dma_common_t *, 1043859Sml29623 p_rx_rbr_ring_t *, uint32_t); 1053859Sml29623 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 1063859Sml29623 p_rx_rbr_ring_t); 1073859Sml29623 1083859Sml29623 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 1093859Sml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 1103859Sml29623 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 1113859Sml29623 1126495Sspeer static mblk_t * 1136495Sspeer nxge_rx_pkts(p_nxge_t, p_rx_rcr_ring_t, rx_dma_ctl_stat_t, int); 1143859Sml29623 1153859Sml29623 static void nxge_receive_packet(p_nxge_t, 1163859Sml29623 p_rx_rcr_ring_t, 1173859Sml29623 p_rcr_entry_t, 1183859Sml29623 boolean_t *, 1193859Sml29623 mblk_t **, mblk_t **); 1203859Sml29623 1213859Sml29623 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 1223859Sml29623 1233859Sml29623 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 1243859Sml29623 static void nxge_freeb(p_rx_msg_t); 1258275SEric Cheng static mblk_t *nxge_rx_pkts_vring(p_nxge_t, uint_t, rx_dma_ctl_stat_t); 1266495Sspeer static nxge_status_t nxge_rx_err_evnts(p_nxge_t, int, rx_dma_ctl_stat_t); 1273859Sml29623 1283859Sml29623 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 1293859Sml29623 uint32_t, uint32_t); 1303859Sml29623 1313859Sml29623 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 1323859Sml29623 p_rx_rbr_ring_t); 1333859Sml29623 1343859Sml29623 1353859Sml29623 static nxge_status_t 1363859Sml29623 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 1373859Sml29623 1383859Sml29623 nxge_status_t 1393859Sml29623 nxge_rx_port_fatal_err_recover(p_nxge_t); 1403859Sml29623 1416495Sspeer static void nxge_rxdma_databuf_free(p_rx_rbr_ring_t); 1426495Sspeer 1433859Sml29623 nxge_status_t 1443859Sml29623 nxge_init_rxdma_channels(p_nxge_t nxgep) 1453859Sml29623 { 1467950SMichael.Speer@Sun.COM nxge_grp_set_t *set = &nxgep->rx_set; 1478275SEric Cheng int i, count, channel; 1487950SMichael.Speer@Sun.COM nxge_grp_t *group; 1498275SEric Cheng dc_map_t map; 1508275SEric Cheng int dev_gindex; 1513859Sml29623 1523859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 1533859Sml29623 1546495Sspeer if (!isLDOMguest(nxgep)) { 1556495Sspeer if (nxge_rxdma_hw_start_common(nxgep) != NXGE_OK) { 1566495Sspeer cmn_err(CE_NOTE, "hw_start_common"); 1576495Sspeer return (NXGE_ERROR); 1586495Sspeer } 1596495Sspeer } 1606495Sspeer 1616495Sspeer /* 1626495Sspeer * NXGE_LOGICAL_GROUP_MAX > NXGE_MAX_RDC_GROUPS (8) 1636495Sspeer * We only have 8 hardware RDC tables, but we may have 1646495Sspeer * up to 16 logical (software-defined) groups of RDCS, 1656495Sspeer * if we make use of layer 3 & 4 hardware classification. 1666495Sspeer */ 1676495Sspeer for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 1686495Sspeer if ((1 << i) & set->lg.map) { 1697950SMichael.Speer@Sun.COM group = set->group[i]; 1708275SEric Cheng dev_gindex = 1718275SEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 1728275SEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map; 1736495Sspeer for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 1748275SEric Cheng if ((1 << channel) & map) { 1756495Sspeer if ((nxge_grp_dc_add(nxgep, 1767755SMisaki.Kataoka@Sun.COM group, VP_BOUND_RX, channel))) 1777950SMichael.Speer@Sun.COM goto init_rxdma_channels_exit; 1786495Sspeer } 1796495Sspeer } 1806495Sspeer } 1816495Sspeer if (++count == set->lg.count) 1826495Sspeer break; 1836495Sspeer } 1846495Sspeer 1856495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 1866495Sspeer return (NXGE_OK); 1877950SMichael.Speer@Sun.COM 1887950SMichael.Speer@Sun.COM init_rxdma_channels_exit: 1897950SMichael.Speer@Sun.COM for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 1907950SMichael.Speer@Sun.COM if ((1 << i) & set->lg.map) { 1917950SMichael.Speer@Sun.COM group = set->group[i]; 1928275SEric Cheng dev_gindex = 1938275SEric Cheng nxgep->pt_config.hw_config.def_mac_rxdma_grpid + i; 1948275SEric Cheng map = nxgep->pt_config.rdc_grps[dev_gindex].map; 1958275SEric Cheng for (channel = 0; channel < NXGE_MAX_RDCS; channel++) { 1968275SEric Cheng if ((1 << channel) & map) { 1977950SMichael.Speer@Sun.COM nxge_grp_dc_remove(nxgep, 1988275SEric Cheng VP_BOUND_RX, channel); 1997950SMichael.Speer@Sun.COM } 2007950SMichael.Speer@Sun.COM } 2017950SMichael.Speer@Sun.COM } 2027950SMichael.Speer@Sun.COM if (++count == set->lg.count) 2037950SMichael.Speer@Sun.COM break; 2047950SMichael.Speer@Sun.COM } 2057950SMichael.Speer@Sun.COM 2067950SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_rxdma_channels")); 2077950SMichael.Speer@Sun.COM return (NXGE_ERROR); 2086495Sspeer } 2096495Sspeer 2106495Sspeer nxge_status_t 2116495Sspeer nxge_init_rxdma_channel(p_nxge_t nxge, int channel) 2126495Sspeer { 213*8400SNicolas.Droux@Sun.COM nxge_status_t status; 2146495Sspeer 2156495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_rxdma_channel")); 2166495Sspeer 2176495Sspeer status = nxge_map_rxdma(nxge, channel); 2183859Sml29623 if (status != NXGE_OK) { 2196495Sspeer NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 2206495Sspeer "<== nxge_init_rxdma: status 0x%x", status)); 2213859Sml29623 return (status); 2223859Sml29623 } 2233859Sml29623 224*8400SNicolas.Droux@Sun.COM #if defined(sun4v) 225*8400SNicolas.Droux@Sun.COM if (isLDOMguest(nxge)) { 226*8400SNicolas.Droux@Sun.COM /* set rcr_ring */ 227*8400SNicolas.Droux@Sun.COM p_rx_rcr_ring_t ring = nxge->rx_rcr_rings->rcr_rings[channel]; 228*8400SNicolas.Droux@Sun.COM 229*8400SNicolas.Droux@Sun.COM status = nxge_hio_rxdma_bind_intr(nxge, ring, channel); 230*8400SNicolas.Droux@Sun.COM if (status != NXGE_OK) { 231*8400SNicolas.Droux@Sun.COM nxge_unmap_rxdma(nxge, channel); 232*8400SNicolas.Droux@Sun.COM return (status); 233*8400SNicolas.Droux@Sun.COM } 234*8400SNicolas.Droux@Sun.COM } 235*8400SNicolas.Droux@Sun.COM #endif 236*8400SNicolas.Droux@Sun.COM 2376495Sspeer status = nxge_rxdma_hw_start(nxge, channel); 2383859Sml29623 if (status != NXGE_OK) { 2396495Sspeer nxge_unmap_rxdma(nxge, channel); 2403859Sml29623 } 2413859Sml29623 2426495Sspeer if (!nxge->statsp->rdc_ksp[channel]) 2436495Sspeer nxge_setup_rdc_kstats(nxge, channel); 2446495Sspeer 2456495Sspeer NXGE_DEBUG_MSG((nxge, MEM2_CTL, 2466495Sspeer "<== nxge_init_rxdma_channel: status 0x%x", status)); 2473859Sml29623 2483859Sml29623 return (status); 2493859Sml29623 } 2503859Sml29623 2513859Sml29623 void 2523859Sml29623 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 2533859Sml29623 { 2546495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 2556495Sspeer int rdc; 2566495Sspeer 2573859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 2583859Sml29623 2596495Sspeer if (set->owned.map == 0) { 2606495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2616495Sspeer "nxge_uninit_rxdma_channels: no channels")); 2626495Sspeer return; 2636495Sspeer } 2646495Sspeer 2656495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 2666495Sspeer if ((1 << rdc) & set->owned.map) { 2676495Sspeer nxge_grp_dc_remove(nxgep, VP_BOUND_RX, rdc); 2686495Sspeer } 2696495Sspeer } 2706495Sspeer 2716495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_rxdma_channels")); 2726495Sspeer } 2736495Sspeer 2746495Sspeer void 2756495Sspeer nxge_uninit_rxdma_channel(p_nxge_t nxgep, int channel) 2766495Sspeer { 2776495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channel")); 2786495Sspeer 2796495Sspeer if (nxgep->statsp->rdc_ksp[channel]) { 2806495Sspeer kstat_delete(nxgep->statsp->rdc_ksp[channel]); 2816495Sspeer nxgep->statsp->rdc_ksp[channel] = 0; 2826495Sspeer } 2836495Sspeer 2846495Sspeer nxge_rxdma_hw_stop(nxgep, channel); 2856495Sspeer nxge_unmap_rxdma(nxgep, channel); 2866495Sspeer 2876495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uinit_rxdma_channel")); 2883859Sml29623 } 2893859Sml29623 2903859Sml29623 nxge_status_t 2913859Sml29623 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 2923859Sml29623 { 2933859Sml29623 npi_handle_t handle; 2943859Sml29623 npi_status_t rs = NPI_SUCCESS; 2953859Sml29623 nxge_status_t status = NXGE_OK; 2963859Sml29623 2977812SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_reset_rxdma_channel")); 2983859Sml29623 2993859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3003859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 3013859Sml29623 3023859Sml29623 if (rs != NPI_SUCCESS) { 3033859Sml29623 status = NXGE_ERROR | rs; 3043859Sml29623 } 3053859Sml29623 3067812SMichael.Speer@Sun.COM NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 3077812SMichael.Speer@Sun.COM 3083859Sml29623 return (status); 3093859Sml29623 } 3103859Sml29623 3113859Sml29623 void 3123859Sml29623 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 3133859Sml29623 { 3146495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 3156495Sspeer int rdc; 3163859Sml29623 3173859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 3183859Sml29623 3196495Sspeer if (!isLDOMguest(nxgep)) { 3206495Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 3216495Sspeer (void) npi_rxdma_dump_fzc_regs(handle); 3226495Sspeer } 3236495Sspeer 3246495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 3256495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 3266495Sspeer "nxge_rxdma_regs_dump_channels: " 3276495Sspeer "NULL ring pointer(s)")); 3283859Sml29623 return; 3293859Sml29623 } 3306495Sspeer 3316495Sspeer if (set->owned.map == 0) { 3323859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 3336495Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 3343859Sml29623 return; 3353859Sml29623 } 3363859Sml29623 3376495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 3386495Sspeer if ((1 << rdc) & set->owned.map) { 3396495Sspeer rx_rbr_ring_t *ring = 3406495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 3416495Sspeer if (ring) { 3426495Sspeer (void) nxge_dump_rxdma_channel(nxgep, rdc); 3436495Sspeer } 3443859Sml29623 } 3453859Sml29623 } 3463859Sml29623 3473859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 3483859Sml29623 } 3493859Sml29623 3503859Sml29623 nxge_status_t 3513859Sml29623 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 3523859Sml29623 { 3533859Sml29623 npi_handle_t handle; 3543859Sml29623 npi_status_t rs = NPI_SUCCESS; 3553859Sml29623 nxge_status_t status = NXGE_OK; 3563859Sml29623 3573859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 3583859Sml29623 3593859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3603859Sml29623 rs = npi_rxdma_dump_rdc_regs(handle, channel); 3613859Sml29623 3623859Sml29623 if (rs != NPI_SUCCESS) { 3633859Sml29623 status = NXGE_ERROR | rs; 3643859Sml29623 } 3653859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 3663859Sml29623 return (status); 3673859Sml29623 } 3683859Sml29623 3693859Sml29623 nxge_status_t 3703859Sml29623 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 3713859Sml29623 p_rx_dma_ent_msk_t mask_p) 3723859Sml29623 { 3733859Sml29623 npi_handle_t handle; 3743859Sml29623 npi_status_t rs = NPI_SUCCESS; 3753859Sml29623 nxge_status_t status = NXGE_OK; 3763859Sml29623 3773859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3786929Smisaki "<== nxge_init_rxdma_channel_event_mask")); 3793859Sml29623 3803859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3813859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 3823859Sml29623 if (rs != NPI_SUCCESS) { 3833859Sml29623 status = NXGE_ERROR | rs; 3843859Sml29623 } 3853859Sml29623 3863859Sml29623 return (status); 3873859Sml29623 } 3883859Sml29623 3893859Sml29623 nxge_status_t 3903859Sml29623 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 3913859Sml29623 p_rx_dma_ctl_stat_t cs_p) 3923859Sml29623 { 3933859Sml29623 npi_handle_t handle; 3943859Sml29623 npi_status_t rs = NPI_SUCCESS; 3953859Sml29623 nxge_status_t status = NXGE_OK; 3963859Sml29623 3973859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 3986929Smisaki "<== nxge_init_rxdma_channel_cntl_stat")); 3993859Sml29623 4003859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4013859Sml29623 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 4023859Sml29623 4033859Sml29623 if (rs != NPI_SUCCESS) { 4043859Sml29623 status = NXGE_ERROR | rs; 4053859Sml29623 } 4063859Sml29623 4073859Sml29623 return (status); 4083859Sml29623 } 4093859Sml29623 4106495Sspeer /* 4116495Sspeer * nxge_rxdma_cfg_rdcgrp_default_rdc 4126495Sspeer * 4136495Sspeer * Set the default RDC for an RDC Group (Table) 4146495Sspeer * 4156495Sspeer * Arguments: 4166495Sspeer * nxgep 4176495Sspeer * rdcgrp The group to modify 4186495Sspeer * rdc The new default RDC. 4196495Sspeer * 4206495Sspeer * Notes: 4216495Sspeer * 4226495Sspeer * NPI/NXGE function calls: 4236495Sspeer * npi_rxdma_cfg_rdc_table_default_rdc() 4246495Sspeer * 4256495Sspeer * Registers accessed: 4266495Sspeer * RDC_TBL_REG: FZC_ZCP + 0x10000 4276495Sspeer * 4286495Sspeer * Context: 4296495Sspeer * Service domain 4306495Sspeer */ 4313859Sml29623 nxge_status_t 4326495Sspeer nxge_rxdma_cfg_rdcgrp_default_rdc( 4336495Sspeer p_nxge_t nxgep, 4346495Sspeer uint8_t rdcgrp, 4356495Sspeer uint8_t rdc) 4363859Sml29623 { 4373859Sml29623 npi_handle_t handle; 4383859Sml29623 npi_status_t rs = NPI_SUCCESS; 4393859Sml29623 p_nxge_dma_pt_cfg_t p_dma_cfgp; 4403859Sml29623 p_nxge_rdc_grp_t rdc_grp_p; 4413859Sml29623 uint8_t actual_rdcgrp, actual_rdc; 4423859Sml29623 4433859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4446929Smisaki " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 4453859Sml29623 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 4463859Sml29623 4473859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4483859Sml29623 4496495Sspeer /* 4506495Sspeer * This has to be rewritten. Do we even allow this anymore? 4516495Sspeer */ 4523859Sml29623 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 4536495Sspeer RDC_MAP_IN(rdc_grp_p->map, rdc); 4546495Sspeer rdc_grp_p->def_rdc = rdc; 4553859Sml29623 4563859Sml29623 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 4573859Sml29623 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 4583859Sml29623 4596495Sspeer rs = npi_rxdma_cfg_rdc_table_default_rdc( 4606929Smisaki handle, actual_rdcgrp, actual_rdc); 4613859Sml29623 4623859Sml29623 if (rs != NPI_SUCCESS) { 4633859Sml29623 return (NXGE_ERROR | rs); 4643859Sml29623 } 4653859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4666929Smisaki " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 4673859Sml29623 return (NXGE_OK); 4683859Sml29623 } 4693859Sml29623 4703859Sml29623 nxge_status_t 4713859Sml29623 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 4723859Sml29623 { 4733859Sml29623 npi_handle_t handle; 4743859Sml29623 4753859Sml29623 uint8_t actual_rdc; 4763859Sml29623 npi_status_t rs = NPI_SUCCESS; 4773859Sml29623 4783859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4796929Smisaki " ==> nxge_rxdma_cfg_port_default_rdc")); 4803859Sml29623 4813859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4826495Sspeer actual_rdc = rdc; /* XXX Hack! */ 4833859Sml29623 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 4843859Sml29623 4853859Sml29623 4863859Sml29623 if (rs != NPI_SUCCESS) { 4873859Sml29623 return (NXGE_ERROR | rs); 4883859Sml29623 } 4893859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 4906929Smisaki " <== nxge_rxdma_cfg_port_default_rdc")); 4913859Sml29623 4923859Sml29623 return (NXGE_OK); 4933859Sml29623 } 4943859Sml29623 4953859Sml29623 nxge_status_t 4963859Sml29623 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 4973859Sml29623 uint16_t pkts) 4983859Sml29623 { 4993859Sml29623 npi_status_t rs = NPI_SUCCESS; 5003859Sml29623 npi_handle_t handle; 5013859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 5026929Smisaki " ==> nxge_rxdma_cfg_rcr_threshold")); 5033859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5043859Sml29623 5053859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 5063859Sml29623 5073859Sml29623 if (rs != NPI_SUCCESS) { 5083859Sml29623 return (NXGE_ERROR | rs); 5093859Sml29623 } 5103859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 5113859Sml29623 return (NXGE_OK); 5123859Sml29623 } 5133859Sml29623 5143859Sml29623 nxge_status_t 5153859Sml29623 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 5163859Sml29623 uint16_t tout, uint8_t enable) 5173859Sml29623 { 5183859Sml29623 npi_status_t rs = NPI_SUCCESS; 5193859Sml29623 npi_handle_t handle; 5203859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 5213859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5223859Sml29623 if (enable == 0) { 5233859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 5243859Sml29623 } else { 5253859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 5266929Smisaki tout); 5273859Sml29623 } 5283859Sml29623 5293859Sml29623 if (rs != NPI_SUCCESS) { 5303859Sml29623 return (NXGE_ERROR | rs); 5313859Sml29623 } 5323859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 5333859Sml29623 return (NXGE_OK); 5343859Sml29623 } 5353859Sml29623 5363859Sml29623 nxge_status_t 5373859Sml29623 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 5383859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 5393859Sml29623 { 5403859Sml29623 npi_handle_t handle; 5413859Sml29623 rdc_desc_cfg_t rdc_desc; 5423859Sml29623 p_rcrcfig_b_t cfgb_p; 5433859Sml29623 npi_status_t rs = NPI_SUCCESS; 5443859Sml29623 5453859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 5463859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5473859Sml29623 /* 5483859Sml29623 * Use configuration data composed at init time. 5493859Sml29623 * Write to hardware the receive ring configurations. 5503859Sml29623 */ 5513859Sml29623 rdc_desc.mbox_enable = 1; 5523859Sml29623 rdc_desc.mbox_addr = mbox_p->mbox_addr; 5533859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5546929Smisaki "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 5556929Smisaki mbox_p->mbox_addr, rdc_desc.mbox_addr)); 5563859Sml29623 5573859Sml29623 rdc_desc.rbr_len = rbr_p->rbb_max; 5583859Sml29623 rdc_desc.rbr_addr = rbr_p->rbr_addr; 5593859Sml29623 5603859Sml29623 switch (nxgep->rx_bksize_code) { 5613859Sml29623 case RBR_BKSIZE_4K: 5623859Sml29623 rdc_desc.page_size = SIZE_4KB; 5633859Sml29623 break; 5643859Sml29623 case RBR_BKSIZE_8K: 5653859Sml29623 rdc_desc.page_size = SIZE_8KB; 5663859Sml29623 break; 5673859Sml29623 case RBR_BKSIZE_16K: 5683859Sml29623 rdc_desc.page_size = SIZE_16KB; 5693859Sml29623 break; 5703859Sml29623 case RBR_BKSIZE_32K: 5713859Sml29623 rdc_desc.page_size = SIZE_32KB; 5723859Sml29623 break; 5733859Sml29623 } 5743859Sml29623 5753859Sml29623 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 5763859Sml29623 rdc_desc.valid0 = 1; 5773859Sml29623 5783859Sml29623 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 5793859Sml29623 rdc_desc.valid1 = 1; 5803859Sml29623 5813859Sml29623 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 5823859Sml29623 rdc_desc.valid2 = 1; 5833859Sml29623 5843859Sml29623 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 5853859Sml29623 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 5863859Sml29623 5873859Sml29623 rdc_desc.rcr_len = rcr_p->comp_size; 5883859Sml29623 rdc_desc.rcr_addr = rcr_p->rcr_addr; 5893859Sml29623 5903859Sml29623 cfgb_p = &(rcr_p->rcr_cfgb); 5913859Sml29623 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 5926495Sspeer /* For now, disable this timeout in a guest domain. */ 5936495Sspeer if (isLDOMguest(nxgep)) { 5946495Sspeer rdc_desc.rcr_timeout = 0; 5956495Sspeer rdc_desc.rcr_timeout_enable = 0; 5966495Sspeer } else { 5976495Sspeer rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 5986495Sspeer rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 5996495Sspeer } 6003859Sml29623 6013859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 6026929Smisaki "rbr_len qlen %d pagesize code %d rcr_len %d", 6036929Smisaki rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 6043859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 6056929Smisaki "size 0 %d size 1 %d size 2 %d", 6066929Smisaki rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 6076929Smisaki rbr_p->npi_pkt_buf_size2)); 6083859Sml29623 6093859Sml29623 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 6103859Sml29623 if (rs != NPI_SUCCESS) { 6113859Sml29623 return (NXGE_ERROR | rs); 6123859Sml29623 } 6133859Sml29623 6143859Sml29623 /* 6153859Sml29623 * Enable the timeout and threshold. 6163859Sml29623 */ 6173859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 6186929Smisaki rdc_desc.rcr_threshold); 6193859Sml29623 if (rs != NPI_SUCCESS) { 6203859Sml29623 return (NXGE_ERROR | rs); 6213859Sml29623 } 6223859Sml29623 6233859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 6246929Smisaki rdc_desc.rcr_timeout); 6253859Sml29623 if (rs != NPI_SUCCESS) { 6263859Sml29623 return (NXGE_ERROR | rs); 6273859Sml29623 } 6283859Sml29623 6293859Sml29623 /* Enable the DMA */ 6303859Sml29623 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 6313859Sml29623 if (rs != NPI_SUCCESS) { 6323859Sml29623 return (NXGE_ERROR | rs); 6333859Sml29623 } 6343859Sml29623 6353859Sml29623 /* Kick the DMA engine. */ 6363859Sml29623 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 6373859Sml29623 /* Clear the rbr empty bit */ 6383859Sml29623 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 6393859Sml29623 6403859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 6413859Sml29623 6423859Sml29623 return (NXGE_OK); 6433859Sml29623 } 6443859Sml29623 6453859Sml29623 nxge_status_t 6463859Sml29623 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 6473859Sml29623 { 6483859Sml29623 npi_handle_t handle; 6493859Sml29623 npi_status_t rs = NPI_SUCCESS; 6503859Sml29623 6513859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 6523859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 6533859Sml29623 6543859Sml29623 /* disable the DMA */ 6553859Sml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 6563859Sml29623 if (rs != NPI_SUCCESS) { 6573859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 6586929Smisaki "<== nxge_disable_rxdma_channel:failed (0x%x)", 6596929Smisaki rs)); 6603859Sml29623 return (NXGE_ERROR | rs); 6613859Sml29623 } 6623859Sml29623 6633859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 6643859Sml29623 return (NXGE_OK); 6653859Sml29623 } 6663859Sml29623 6673859Sml29623 nxge_status_t 6683859Sml29623 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 6693859Sml29623 { 6703859Sml29623 npi_handle_t handle; 6713859Sml29623 nxge_status_t status = NXGE_OK; 6723859Sml29623 6733859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 6746929Smisaki "<== nxge_init_rxdma_channel_rcrflush")); 6753859Sml29623 6763859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 6773859Sml29623 npi_rxdma_rdc_rcr_flush(handle, channel); 6783859Sml29623 6793859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 6806929Smisaki "<== nxge_init_rxdma_channel_rcrflsh")); 6813859Sml29623 return (status); 6823859Sml29623 6833859Sml29623 } 6843859Sml29623 6853859Sml29623 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 6863859Sml29623 6873859Sml29623 #define TO_LEFT -1 6883859Sml29623 #define TO_RIGHT 1 6893859Sml29623 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 6903859Sml29623 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 6913859Sml29623 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 6923859Sml29623 #define NO_HINT 0xffffffff 6933859Sml29623 6943859Sml29623 /*ARGSUSED*/ 6953859Sml29623 nxge_status_t 6963859Sml29623 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 6973859Sml29623 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 6983859Sml29623 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 6993859Sml29623 { 7003859Sml29623 int bufsize; 7013859Sml29623 uint64_t pktbuf_pp; 7023859Sml29623 uint64_t dvma_addr; 7033859Sml29623 rxring_info_t *ring_info; 7043859Sml29623 int base_side, end_side; 7053859Sml29623 int r_index, l_index, anchor_index; 7063859Sml29623 int found, search_done; 7073859Sml29623 uint32_t offset, chunk_size, block_size, page_size_mask; 7083859Sml29623 uint32_t chunk_index, block_index, total_index; 7093859Sml29623 int max_iterations, iteration; 7103859Sml29623 rxbuf_index_info_t *bufinfo; 7113859Sml29623 7123859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 7133859Sml29623 7143859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7156929Smisaki "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 7166929Smisaki pkt_buf_addr_pp, 7176929Smisaki pktbufsz_type)); 7185125Sjoycey #if defined(__i386) 7195125Sjoycey pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 7205125Sjoycey #else 7213859Sml29623 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 7225125Sjoycey #endif 7233859Sml29623 7243859Sml29623 switch (pktbufsz_type) { 7253859Sml29623 case 0: 7263859Sml29623 bufsize = rbr_p->pkt_buf_size0; 7273859Sml29623 break; 7283859Sml29623 case 1: 7293859Sml29623 bufsize = rbr_p->pkt_buf_size1; 7303859Sml29623 break; 7313859Sml29623 case 2: 7323859Sml29623 bufsize = rbr_p->pkt_buf_size2; 7333859Sml29623 break; 7343859Sml29623 case RCR_SINGLE_BLOCK: 7353859Sml29623 bufsize = 0; 7363859Sml29623 anchor_index = 0; 7373859Sml29623 break; 7383859Sml29623 default: 7393859Sml29623 return (NXGE_ERROR); 7403859Sml29623 } 7413859Sml29623 7423859Sml29623 if (rbr_p->num_blocks == 1) { 7433859Sml29623 anchor_index = 0; 7443859Sml29623 ring_info = rbr_p->ring_info; 7453859Sml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 7463859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7476929Smisaki "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 7486929Smisaki "buf_pp $%p btype %d anchor_index %d " 7496929Smisaki "bufinfo $%p", 7506929Smisaki pkt_buf_addr_pp, 7516929Smisaki pktbufsz_type, 7526929Smisaki anchor_index, 7536929Smisaki bufinfo)); 7543859Sml29623 7553859Sml29623 goto found_index; 7563859Sml29623 } 7573859Sml29623 7583859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7596929Smisaki "==> nxge_rxbuf_pp_to_vp: " 7606929Smisaki "buf_pp $%p btype %d anchor_index %d", 7616929Smisaki pkt_buf_addr_pp, 7626929Smisaki pktbufsz_type, 7636929Smisaki anchor_index)); 7643859Sml29623 7653859Sml29623 ring_info = rbr_p->ring_info; 7663859Sml29623 found = B_FALSE; 7673859Sml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 7683859Sml29623 iteration = 0; 7693859Sml29623 max_iterations = ring_info->max_iterations; 7703859Sml29623 /* 7713859Sml29623 * First check if this block has been seen 7723859Sml29623 * recently. This is indicated by a hint which 7733859Sml29623 * is initialized when the first buffer of the block 7743859Sml29623 * is seen. The hint is reset when the last buffer of 7753859Sml29623 * the block has been processed. 7763859Sml29623 * As three block sizes are supported, three hints 7773859Sml29623 * are kept. The idea behind the hints is that once 7783859Sml29623 * the hardware uses a block for a buffer of that 7793859Sml29623 * size, it will use it exclusively for that size 7803859Sml29623 * and will use it until it is exhausted. It is assumed 7813859Sml29623 * that there would a single block being used for the same 7823859Sml29623 * buffer sizes at any given time. 7833859Sml29623 */ 7843859Sml29623 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 7853859Sml29623 anchor_index = ring_info->hint[pktbufsz_type]; 7863859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 7873859Sml29623 chunk_size = bufinfo[anchor_index].buf_size; 7883859Sml29623 if ((pktbuf_pp >= dvma_addr) && 7896929Smisaki (pktbuf_pp < (dvma_addr + chunk_size))) { 7903859Sml29623 found = B_TRUE; 7913859Sml29623 /* 7923859Sml29623 * check if this is the last buffer in the block 7933859Sml29623 * If so, then reset the hint for the size; 7943859Sml29623 */ 7953859Sml29623 7963859Sml29623 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 7973859Sml29623 ring_info->hint[pktbufsz_type] = NO_HINT; 7983859Sml29623 } 7993859Sml29623 } 8003859Sml29623 8013859Sml29623 if (found == B_FALSE) { 8023859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8036929Smisaki "==> nxge_rxbuf_pp_to_vp: (!found)" 8046929Smisaki "buf_pp $%p btype %d anchor_index %d", 8056929Smisaki pkt_buf_addr_pp, 8066929Smisaki pktbufsz_type, 8076929Smisaki anchor_index)); 8083859Sml29623 8093859Sml29623 /* 8103859Sml29623 * This is the first buffer of the block of this 8113859Sml29623 * size. Need to search the whole information 8123859Sml29623 * array. 8133859Sml29623 * the search algorithm uses a binary tree search 8143859Sml29623 * algorithm. It assumes that the information is 8153859Sml29623 * already sorted with increasing order 8163859Sml29623 * info[0] < info[1] < info[2] .... < info[n-1] 8173859Sml29623 * where n is the size of the information array 8183859Sml29623 */ 8193859Sml29623 r_index = rbr_p->num_blocks - 1; 8203859Sml29623 l_index = 0; 8213859Sml29623 search_done = B_FALSE; 8223859Sml29623 anchor_index = MID_INDEX(r_index, l_index); 8233859Sml29623 while (search_done == B_FALSE) { 8243859Sml29623 if ((r_index == l_index) || 8256929Smisaki (iteration >= max_iterations)) 8263859Sml29623 search_done = B_TRUE; 8273859Sml29623 end_side = TO_RIGHT; /* to the right */ 8283859Sml29623 base_side = TO_LEFT; /* to the left */ 8293859Sml29623 /* read the DVMA address information and sort it */ 8303859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 8313859Sml29623 chunk_size = bufinfo[anchor_index].buf_size; 8323859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8336929Smisaki "==> nxge_rxbuf_pp_to_vp: (searching)" 8346929Smisaki "buf_pp $%p btype %d " 8356929Smisaki "anchor_index %d chunk_size %d dvmaaddr $%p", 8366929Smisaki pkt_buf_addr_pp, 8376929Smisaki pktbufsz_type, 8386929Smisaki anchor_index, 8396929Smisaki chunk_size, 8406929Smisaki dvma_addr)); 8413859Sml29623 8423859Sml29623 if (pktbuf_pp >= dvma_addr) 8433859Sml29623 base_side = TO_RIGHT; /* to the right */ 8443859Sml29623 if (pktbuf_pp < (dvma_addr + chunk_size)) 8453859Sml29623 end_side = TO_LEFT; /* to the left */ 8463859Sml29623 8473859Sml29623 switch (base_side + end_side) { 8486929Smisaki case IN_MIDDLE: 8496929Smisaki /* found */ 8506929Smisaki found = B_TRUE; 8516929Smisaki search_done = B_TRUE; 8526929Smisaki if ((pktbuf_pp + bufsize) < 8536929Smisaki (dvma_addr + chunk_size)) 8546929Smisaki ring_info->hint[pktbufsz_type] = 8556929Smisaki bufinfo[anchor_index].buf_index; 8566929Smisaki break; 8576929Smisaki case BOTH_RIGHT: 8586929Smisaki /* not found: go to the right */ 8596929Smisaki l_index = anchor_index + 1; 8606929Smisaki anchor_index = MID_INDEX(r_index, l_index); 8616929Smisaki break; 8626929Smisaki 8636929Smisaki case BOTH_LEFT: 8646929Smisaki /* not found: go to the left */ 8656929Smisaki r_index = anchor_index - 1; 8666929Smisaki anchor_index = MID_INDEX(r_index, l_index); 8676929Smisaki break; 8686929Smisaki default: /* should not come here */ 8696929Smisaki return (NXGE_ERROR); 8703859Sml29623 } 8713859Sml29623 iteration++; 8723859Sml29623 } 8733859Sml29623 8743859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8756929Smisaki "==> nxge_rxbuf_pp_to_vp: (search done)" 8766929Smisaki "buf_pp $%p btype %d anchor_index %d", 8776929Smisaki pkt_buf_addr_pp, 8786929Smisaki pktbufsz_type, 8796929Smisaki anchor_index)); 8803859Sml29623 } 8813859Sml29623 8823859Sml29623 if (found == B_FALSE) { 8833859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8846929Smisaki "==> nxge_rxbuf_pp_to_vp: (search failed)" 8856929Smisaki "buf_pp $%p btype %d anchor_index %d", 8866929Smisaki pkt_buf_addr_pp, 8876929Smisaki pktbufsz_type, 8886929Smisaki anchor_index)); 8893859Sml29623 return (NXGE_ERROR); 8903859Sml29623 } 8913859Sml29623 8923859Sml29623 found_index: 8933859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8946929Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 8956929Smisaki "buf_pp $%p btype %d bufsize %d anchor_index %d", 8966929Smisaki pkt_buf_addr_pp, 8976929Smisaki pktbufsz_type, 8986929Smisaki bufsize, 8996929Smisaki anchor_index)); 9003859Sml29623 9013859Sml29623 /* index of the first block in this chunk */ 9023859Sml29623 chunk_index = bufinfo[anchor_index].start_index; 9033859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 9043859Sml29623 page_size_mask = ring_info->block_size_mask; 9053859Sml29623 9063859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9076929Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 9086929Smisaki "buf_pp $%p btype %d bufsize %d " 9096929Smisaki "anchor_index %d chunk_index %d dvma $%p", 9106929Smisaki pkt_buf_addr_pp, 9116929Smisaki pktbufsz_type, 9126929Smisaki bufsize, 9136929Smisaki anchor_index, 9146929Smisaki chunk_index, 9156929Smisaki dvma_addr)); 9163859Sml29623 9173859Sml29623 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 9183859Sml29623 block_size = rbr_p->block_size; /* System block(page) size */ 9193859Sml29623 9203859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9216929Smisaki "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 9226929Smisaki "buf_pp $%p btype %d bufsize %d " 9236929Smisaki "anchor_index %d chunk_index %d dvma $%p " 9246929Smisaki "offset %d block_size %d", 9256929Smisaki pkt_buf_addr_pp, 9266929Smisaki pktbufsz_type, 9276929Smisaki bufsize, 9286929Smisaki anchor_index, 9296929Smisaki chunk_index, 9306929Smisaki dvma_addr, 9316929Smisaki offset, 9326929Smisaki block_size)); 9333859Sml29623 9343859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 9353859Sml29623 9363859Sml29623 block_index = (offset / block_size); /* index within chunk */ 9373859Sml29623 total_index = chunk_index + block_index; 9383859Sml29623 9393859Sml29623 9403859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9416929Smisaki "==> nxge_rxbuf_pp_to_vp: " 9426929Smisaki "total_index %d dvma_addr $%p " 9436929Smisaki "offset %d block_size %d " 9446929Smisaki "block_index %d ", 9456929Smisaki total_index, dvma_addr, 9466929Smisaki offset, block_size, 9476929Smisaki block_index)); 9485125Sjoycey #if defined(__i386) 9495125Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 9506929Smisaki (uint32_t)offset); 9515125Sjoycey #else 9525125Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 9536929Smisaki (uint64_t)offset); 9545125Sjoycey #endif 9553859Sml29623 9563859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9576929Smisaki "==> nxge_rxbuf_pp_to_vp: " 9586929Smisaki "total_index %d dvma_addr $%p " 9596929Smisaki "offset %d block_size %d " 9606929Smisaki "block_index %d " 9616929Smisaki "*pkt_buf_addr_p $%p", 9626929Smisaki total_index, dvma_addr, 9636929Smisaki offset, block_size, 9646929Smisaki block_index, 9656929Smisaki *pkt_buf_addr_p)); 9663859Sml29623 9673859Sml29623 9683859Sml29623 *msg_index = total_index; 9693859Sml29623 *bufoffset = (offset & page_size_mask); 9703859Sml29623 9713859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 9726929Smisaki "==> nxge_rxbuf_pp_to_vp: get msg index: " 9736929Smisaki "msg_index %d bufoffset_index %d", 9746929Smisaki *msg_index, 9756929Smisaki *bufoffset)); 9763859Sml29623 9773859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 9783859Sml29623 9793859Sml29623 return (NXGE_OK); 9803859Sml29623 } 9813859Sml29623 9823859Sml29623 /* 9833859Sml29623 * used by quick sort (qsort) function 9843859Sml29623 * to perform comparison 9853859Sml29623 */ 9863859Sml29623 static int 9873859Sml29623 nxge_sort_compare(const void *p1, const void *p2) 9883859Sml29623 { 9893859Sml29623 9903859Sml29623 rxbuf_index_info_t *a, *b; 9913859Sml29623 9923859Sml29623 a = (rxbuf_index_info_t *)p1; 9933859Sml29623 b = (rxbuf_index_info_t *)p2; 9943859Sml29623 9953859Sml29623 if (a->dvma_addr > b->dvma_addr) 9963859Sml29623 return (1); 9973859Sml29623 if (a->dvma_addr < b->dvma_addr) 9983859Sml29623 return (-1); 9993859Sml29623 return (0); 10003859Sml29623 } 10013859Sml29623 10023859Sml29623 10033859Sml29623 10043859Sml29623 /* 10053859Sml29623 * grabbed this sort implementation from common/syscall/avl.c 10063859Sml29623 * 10073859Sml29623 */ 10083859Sml29623 /* 10093859Sml29623 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 10103859Sml29623 * v = Ptr to array/vector of objs 10113859Sml29623 * n = # objs in the array 10123859Sml29623 * s = size of each obj (must be multiples of a word size) 10133859Sml29623 * f = ptr to function to compare two objs 10143859Sml29623 * returns (-1 = less than, 0 = equal, 1 = greater than 10153859Sml29623 */ 10163859Sml29623 void 10173859Sml29623 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 10183859Sml29623 { 10193859Sml29623 int g, i, j, ii; 10203859Sml29623 unsigned int *p1, *p2; 10213859Sml29623 unsigned int tmp; 10223859Sml29623 10233859Sml29623 /* No work to do */ 10243859Sml29623 if (v == NULL || n <= 1) 10253859Sml29623 return; 10263859Sml29623 /* Sanity check on arguments */ 10273859Sml29623 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 10283859Sml29623 ASSERT(s > 0); 10293859Sml29623 10303859Sml29623 for (g = n / 2; g > 0; g /= 2) { 10313859Sml29623 for (i = g; i < n; i++) { 10323859Sml29623 for (j = i - g; j >= 0 && 10336929Smisaki (*f)(v + j * s, v + (j + g) * s) == 1; 10346929Smisaki j -= g) { 10353859Sml29623 p1 = (unsigned *)(v + j * s); 10363859Sml29623 p2 = (unsigned *)(v + (j + g) * s); 10373859Sml29623 for (ii = 0; ii < s / 4; ii++) { 10383859Sml29623 tmp = *p1; 10393859Sml29623 *p1++ = *p2; 10403859Sml29623 *p2++ = tmp; 10413859Sml29623 } 10423859Sml29623 } 10433859Sml29623 } 10443859Sml29623 } 10453859Sml29623 } 10463859Sml29623 10473859Sml29623 /* 10483859Sml29623 * Initialize data structures required for rxdma 10493859Sml29623 * buffer dvma->vmem address lookup 10503859Sml29623 */ 10513859Sml29623 /*ARGSUSED*/ 10523859Sml29623 static nxge_status_t 10533859Sml29623 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 10543859Sml29623 { 10553859Sml29623 10563859Sml29623 int index; 10573859Sml29623 rxring_info_t *ring_info; 10583859Sml29623 int max_iteration = 0, max_index = 0; 10593859Sml29623 10603859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 10613859Sml29623 10623859Sml29623 ring_info = rbrp->ring_info; 10633859Sml29623 ring_info->hint[0] = NO_HINT; 10643859Sml29623 ring_info->hint[1] = NO_HINT; 10653859Sml29623 ring_info->hint[2] = NO_HINT; 10663859Sml29623 max_index = rbrp->num_blocks; 10673859Sml29623 10683859Sml29623 /* read the DVMA address information and sort it */ 10693859Sml29623 /* do init of the information array */ 10703859Sml29623 10713859Sml29623 10723859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10736929Smisaki " nxge_rxbuf_index_info_init Sort ptrs")); 10743859Sml29623 10753859Sml29623 /* sort the array */ 10763859Sml29623 nxge_ksort((void *)ring_info->buffer, max_index, 10776929Smisaki sizeof (rxbuf_index_info_t), nxge_sort_compare); 10783859Sml29623 10793859Sml29623 10803859Sml29623 10813859Sml29623 for (index = 0; index < max_index; index++) { 10823859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10836929Smisaki " nxge_rxbuf_index_info_init: sorted chunk %d " 10846929Smisaki " ioaddr $%p kaddr $%p size %x", 10856929Smisaki index, ring_info->buffer[index].dvma_addr, 10866929Smisaki ring_info->buffer[index].kaddr, 10876929Smisaki ring_info->buffer[index].buf_size)); 10883859Sml29623 } 10893859Sml29623 10903859Sml29623 max_iteration = 0; 10913859Sml29623 while (max_index >= (1ULL << max_iteration)) 10923859Sml29623 max_iteration++; 10933859Sml29623 ring_info->max_iterations = max_iteration + 1; 10943859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 10956929Smisaki " nxge_rxbuf_index_info_init Find max iter %d", 10966929Smisaki ring_info->max_iterations)); 10973859Sml29623 10983859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 10993859Sml29623 return (NXGE_OK); 11003859Sml29623 } 11013859Sml29623 11023859Sml29623 /* ARGSUSED */ 11033859Sml29623 void 11043859Sml29623 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 11053859Sml29623 { 11063859Sml29623 #ifdef NXGE_DEBUG 11073859Sml29623 11083859Sml29623 uint32_t bptr; 11093859Sml29623 uint64_t pp; 11103859Sml29623 11113859Sml29623 bptr = entry_p->bits.hdw.pkt_buf_addr; 11123859Sml29623 11133859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11146929Smisaki "\trcr entry $%p " 11156929Smisaki "\trcr entry 0x%0llx " 11166929Smisaki "\trcr entry 0x%08x " 11176929Smisaki "\trcr entry 0x%08x " 11186929Smisaki "\tvalue 0x%0llx\n" 11196929Smisaki "\tmulti = %d\n" 11206929Smisaki "\tpkt_type = 0x%x\n" 11216929Smisaki "\tzero_copy = %d\n" 11226929Smisaki "\tnoport = %d\n" 11236929Smisaki "\tpromis = %d\n" 11246929Smisaki "\terror = 0x%04x\n" 11256929Smisaki "\tdcf_err = 0x%01x\n" 11266929Smisaki "\tl2_len = %d\n" 11276929Smisaki "\tpktbufsize = %d\n" 11286929Smisaki "\tpkt_buf_addr = $%p\n" 11296929Smisaki "\tpkt_buf_addr (<< 6) = $%p\n", 11306929Smisaki entry_p, 11316929Smisaki *(int64_t *)entry_p, 11326929Smisaki *(int32_t *)entry_p, 11336929Smisaki *(int32_t *)((char *)entry_p + 32), 11346929Smisaki entry_p->value, 11356929Smisaki entry_p->bits.hdw.multi, 11366929Smisaki entry_p->bits.hdw.pkt_type, 11376929Smisaki entry_p->bits.hdw.zero_copy, 11386929Smisaki entry_p->bits.hdw.noport, 11396929Smisaki entry_p->bits.hdw.promis, 11406929Smisaki entry_p->bits.hdw.error, 11416929Smisaki entry_p->bits.hdw.dcf_err, 11426929Smisaki entry_p->bits.hdw.l2_len, 11436929Smisaki entry_p->bits.hdw.pktbufsz, 11446929Smisaki bptr, 11456929Smisaki entry_p->bits.ldw.pkt_buf_addr)); 11463859Sml29623 11473859Sml29623 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 11486929Smisaki RCR_PKT_BUF_ADDR_SHIFT; 11493859Sml29623 11503859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 11516929Smisaki pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 11523859Sml29623 #endif 11533859Sml29623 } 11543859Sml29623 11553859Sml29623 void 11563859Sml29623 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 11573859Sml29623 { 11583859Sml29623 npi_handle_t handle; 11593859Sml29623 rbr_stat_t rbr_stat; 11603859Sml29623 addr44_t hd_addr; 11613859Sml29623 addr44_t tail_addr; 11623859Sml29623 uint16_t qlen; 11633859Sml29623 11643859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11656929Smisaki "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 11663859Sml29623 11673859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 11683859Sml29623 11693859Sml29623 /* RBR head */ 11703859Sml29623 hd_addr.addr = 0; 11713859Sml29623 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 11725165Syc148097 #if defined(__i386) 11733859Sml29623 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 11746929Smisaki (void *)(uint32_t)hd_addr.addr); 11755125Sjoycey #else 11765165Syc148097 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 11776929Smisaki (void *)hd_addr.addr); 11785125Sjoycey #endif 11793859Sml29623 11803859Sml29623 /* RBR stats */ 11813859Sml29623 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 11823859Sml29623 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 11833859Sml29623 11843859Sml29623 /* RCR tail */ 11853859Sml29623 tail_addr.addr = 0; 11863859Sml29623 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 11875165Syc148097 #if defined(__i386) 11883859Sml29623 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 11896929Smisaki (void *)(uint32_t)tail_addr.addr); 11905125Sjoycey #else 11915165Syc148097 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 11926929Smisaki (void *)tail_addr.addr); 11935125Sjoycey #endif 11943859Sml29623 11953859Sml29623 /* RCR qlen */ 11963859Sml29623 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 11973859Sml29623 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 11983859Sml29623 11993859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12006929Smisaki "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 12013859Sml29623 } 12023859Sml29623 12033859Sml29623 nxge_status_t 12043859Sml29623 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 12053859Sml29623 { 12066495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 12076495Sspeer nxge_status_t status; 12086495Sspeer npi_status_t rs; 12096495Sspeer int rdc; 12103859Sml29623 12113859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12126929Smisaki "==> nxge_rxdma_hw_mode: mode %d", enable)); 12133859Sml29623 12143859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 12153859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12166495Sspeer "<== nxge_rxdma_mode: not initialized")); 12173859Sml29623 return (NXGE_ERROR); 12183859Sml29623 } 12196495Sspeer 12206495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 12216495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 12226495Sspeer "<== nxge_tx_port_fatal_err_recover: " 12236495Sspeer "NULL ring pointer(s)")); 12243859Sml29623 return (NXGE_ERROR); 12253859Sml29623 } 12263859Sml29623 12276495Sspeer if (set->owned.map == 0) { 12286495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 12296495Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 12306495Sspeer return (NULL); 12316495Sspeer } 12326495Sspeer 12336495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 12346495Sspeer if ((1 << rdc) & set->owned.map) { 12356495Sspeer rx_rbr_ring_t *ring = 12366495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 12376495Sspeer npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxgep); 12386495Sspeer if (ring) { 12396495Sspeer if (enable) { 12406495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12416495Sspeer "==> nxge_rxdma_hw_mode: " 12426495Sspeer "channel %d (enable)", rdc)); 12436495Sspeer rs = npi_rxdma_cfg_rdc_enable 12446495Sspeer (handle, rdc); 12456495Sspeer } else { 12466495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12476495Sspeer "==> nxge_rxdma_hw_mode: " 12486495Sspeer "channel %d disable)", rdc)); 12496495Sspeer rs = npi_rxdma_cfg_rdc_disable 12506495Sspeer (handle, rdc); 12516495Sspeer } 12526495Sspeer } 12533859Sml29623 } 12543859Sml29623 } 12553859Sml29623 12563859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 12573859Sml29623 12583859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 12596929Smisaki "<== nxge_rxdma_hw_mode: status 0x%x", status)); 12603859Sml29623 12613859Sml29623 return (status); 12623859Sml29623 } 12633859Sml29623 12643859Sml29623 void 12653859Sml29623 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 12663859Sml29623 { 12673859Sml29623 npi_handle_t handle; 12683859Sml29623 12693859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 12706929Smisaki "==> nxge_rxdma_enable_channel: channel %d", channel)); 12713859Sml29623 12723859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 12733859Sml29623 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 12743859Sml29623 12753859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 12763859Sml29623 } 12773859Sml29623 12783859Sml29623 void 12793859Sml29623 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 12803859Sml29623 { 12813859Sml29623 npi_handle_t handle; 12823859Sml29623 12833859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 12846929Smisaki "==> nxge_rxdma_disable_channel: channel %d", channel)); 12853859Sml29623 12863859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 12873859Sml29623 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 12883859Sml29623 12893859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 12903859Sml29623 } 12913859Sml29623 12923859Sml29623 void 12933859Sml29623 nxge_hw_start_rx(p_nxge_t nxgep) 12943859Sml29623 { 12953859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 12963859Sml29623 12973859Sml29623 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 12983859Sml29623 (void) nxge_rx_mac_enable(nxgep); 12993859Sml29623 13003859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 13013859Sml29623 } 13023859Sml29623 13033859Sml29623 /*ARGSUSED*/ 13043859Sml29623 void 13053859Sml29623 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 13063859Sml29623 { 13076495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 13086495Sspeer int rdc; 13093859Sml29623 13103859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 13113859Sml29623 13126495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 13136495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 13146495Sspeer "<== nxge_tx_port_fatal_err_recover: " 13156495Sspeer "NULL ring pointer(s)")); 13163859Sml29623 return; 13173859Sml29623 } 13183859Sml29623 13196495Sspeer if (set->owned.map == 0) { 13203859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13216495Sspeer "nxge_rxdma_regs_dump_channels: no channels")); 13223859Sml29623 return; 13233859Sml29623 } 13246495Sspeer 13256495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 13266495Sspeer if ((1 << rdc) & set->owned.map) { 13276495Sspeer rx_rbr_ring_t *ring = 13286495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 13296495Sspeer if (ring) { 13306495Sspeer nxge_rxdma_hw_stop(nxgep, rdc); 13316495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 13326929Smisaki "==> nxge_fixup_rxdma_rings: " 13336929Smisaki "channel %d ring $%px", 13346929Smisaki rdc, ring)); 13356495Sspeer (void) nxge_rxdma_fixup_channel 13366495Sspeer (nxgep, rdc, rdc); 13376495Sspeer } 13386495Sspeer } 13393859Sml29623 } 13403859Sml29623 13413859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 13423859Sml29623 } 13433859Sml29623 13443859Sml29623 void 13453859Sml29623 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 13463859Sml29623 { 13473859Sml29623 int i; 13483859Sml29623 13493859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 13503859Sml29623 i = nxge_rxdma_get_ring_index(nxgep, channel); 13513859Sml29623 if (i < 0) { 13523859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13536929Smisaki "<== nxge_rxdma_fix_channel: no entry found")); 13543859Sml29623 return; 13553859Sml29623 } 13563859Sml29623 13573859Sml29623 nxge_rxdma_fixup_channel(nxgep, channel, i); 13583859Sml29623 13596495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fix_channel")); 13603859Sml29623 } 13613859Sml29623 13623859Sml29623 void 13633859Sml29623 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 13643859Sml29623 { 13653859Sml29623 int ndmas; 13663859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 13673859Sml29623 p_rx_rbr_ring_t *rbr_rings; 13683859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 13693859Sml29623 p_rx_rcr_ring_t *rcr_rings; 13703859Sml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 13713859Sml29623 p_rx_mbox_t *rx_mbox_p; 13723859Sml29623 p_nxge_dma_pool_t dma_buf_poolp; 13733859Sml29623 p_nxge_dma_pool_t dma_cntl_poolp; 13743859Sml29623 p_rx_rbr_ring_t rbrp; 13753859Sml29623 p_rx_rcr_ring_t rcrp; 13763859Sml29623 p_rx_mbox_t mboxp; 13773859Sml29623 p_nxge_dma_common_t dmap; 13783859Sml29623 nxge_status_t status = NXGE_OK; 13793859Sml29623 13803859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 13813859Sml29623 13823859Sml29623 (void) nxge_rxdma_stop_channel(nxgep, channel); 13833859Sml29623 13843859Sml29623 dma_buf_poolp = nxgep->rx_buf_pool_p; 13853859Sml29623 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 13863859Sml29623 13873859Sml29623 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 13883859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 13896929Smisaki "<== nxge_rxdma_fixup_channel: buf not allocated")); 13903859Sml29623 return; 13913859Sml29623 } 13923859Sml29623 13933859Sml29623 ndmas = dma_buf_poolp->ndmas; 13943859Sml29623 if (!ndmas) { 13953859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 13966929Smisaki "<== nxge_rxdma_fixup_channel: no dma allocated")); 13973859Sml29623 return; 13983859Sml29623 } 13993859Sml29623 14003859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 14013859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 14023859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 14033859Sml29623 rcr_rings = rx_rcr_rings->rcr_rings; 14043859Sml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 14053859Sml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 14063859Sml29623 14073859Sml29623 /* Reinitialize the receive block and completion rings */ 14083859Sml29623 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 14096929Smisaki rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 14106929Smisaki mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 14113859Sml29623 14123859Sml29623 14133859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 14143859Sml29623 rbrp->rbr_rd_index = 0; 14153859Sml29623 rcrp->comp_rd_index = 0; 14163859Sml29623 rcrp->comp_wt_index = 0; 14173859Sml29623 14183859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 14193859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 14203859Sml29623 14213859Sml29623 status = nxge_rxdma_start_channel(nxgep, channel, 14226929Smisaki rbrp, rcrp, mboxp); 14233859Sml29623 if (status != NXGE_OK) { 14243859Sml29623 goto nxge_rxdma_fixup_channel_fail; 14253859Sml29623 } 14263859Sml29623 if (status != NXGE_OK) { 14273859Sml29623 goto nxge_rxdma_fixup_channel_fail; 14283859Sml29623 } 14293859Sml29623 14303859Sml29623 nxge_rxdma_fixup_channel_fail: 14313859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14326929Smisaki "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 14333859Sml29623 14343859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 14353859Sml29623 } 14363859Sml29623 14378275SEric Cheng /* 14388275SEric Cheng * Convert an absolute RDC number to a Receive Buffer Ring index. That is, 14398275SEric Cheng * map <channel> to an index into nxgep->rx_rbr_rings. 14408275SEric Cheng * (device ring index -> port ring index) 14418275SEric Cheng */ 14423859Sml29623 int 14433859Sml29623 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 14443859Sml29623 { 14458275SEric Cheng int i, ndmas; 14468275SEric Cheng uint16_t rdc; 14478275SEric Cheng p_rx_rbr_rings_t rx_rbr_rings; 14488275SEric Cheng p_rx_rbr_ring_t *rbr_rings; 14498275SEric Cheng 14508275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14518275SEric Cheng "==> nxge_rxdma_get_ring_index: channel %d", channel)); 14528275SEric Cheng 14538275SEric Cheng rx_rbr_rings = nxgep->rx_rbr_rings; 14548275SEric Cheng if (rx_rbr_rings == NULL) { 14558275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14568275SEric Cheng "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 14578275SEric Cheng return (-1); 14588275SEric Cheng } 14598275SEric Cheng ndmas = rx_rbr_rings->ndmas; 14608275SEric Cheng if (!ndmas) { 14618275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14628275SEric Cheng "<== nxge_rxdma_get_ring_index: no channel")); 14638275SEric Cheng return (-1); 14648275SEric Cheng } 14658275SEric Cheng 14668275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14678275SEric Cheng "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 14688275SEric Cheng 14698275SEric Cheng rbr_rings = rx_rbr_rings->rbr_rings; 14708275SEric Cheng for (i = 0; i < ndmas; i++) { 14718275SEric Cheng rdc = rbr_rings[i]->rdc; 14728275SEric Cheng if (channel == rdc) { 14738275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14748275SEric Cheng "==> nxge_rxdma_get_rbr_ring: channel %d " 14758275SEric Cheng "(index %d) ring %d", channel, i, rbr_rings[i])); 14768275SEric Cheng return (i); 14778275SEric Cheng } 14788275SEric Cheng } 14798275SEric Cheng 14808275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, 14818275SEric Cheng "<== nxge_rxdma_get_rbr_ring_index: not found")); 14828275SEric Cheng 14838275SEric Cheng return (-1); 14843859Sml29623 } 14853859Sml29623 14863859Sml29623 p_rx_rbr_ring_t 14873859Sml29623 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 14883859Sml29623 { 14896495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 14906495Sspeer nxge_channel_t rdc; 14913859Sml29623 14923859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14936929Smisaki "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 14943859Sml29623 14956495Sspeer if (nxgep->rx_rbr_rings == 0 || nxgep->rx_rbr_rings->rbr_rings == 0) { 14966495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 14976495Sspeer "<== nxge_rxdma_get_rbr_ring: " 14986495Sspeer "NULL ring pointer(s)")); 14993859Sml29623 return (NULL); 15003859Sml29623 } 15016495Sspeer 15026495Sspeer if (set->owned.map == 0) { 15033859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15046495Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 15053859Sml29623 return (NULL); 15063859Sml29623 } 15073859Sml29623 15086495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 15096495Sspeer if ((1 << rdc) & set->owned.map) { 15106495Sspeer rx_rbr_ring_t *ring = 15116495Sspeer nxgep->rx_rbr_rings->rbr_rings[rdc]; 15126495Sspeer if (ring) { 15136495Sspeer if (channel == ring->rdc) { 15146495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 15156495Sspeer "==> nxge_rxdma_get_rbr_ring: " 15166495Sspeer "channel %d ring $%p", rdc, ring)); 15176495Sspeer return (ring); 15186495Sspeer } 15196495Sspeer } 15203859Sml29623 } 15213859Sml29623 } 15223859Sml29623 15233859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15246929Smisaki "<== nxge_rxdma_get_rbr_ring: not found")); 15253859Sml29623 15263859Sml29623 return (NULL); 15273859Sml29623 } 15283859Sml29623 15293859Sml29623 p_rx_rcr_ring_t 15303859Sml29623 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 15313859Sml29623 { 15326495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 15336495Sspeer nxge_channel_t rdc; 15343859Sml29623 15353859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15366929Smisaki "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 15373859Sml29623 15386495Sspeer if (nxgep->rx_rcr_rings == 0 || nxgep->rx_rcr_rings->rcr_rings == 0) { 15396495Sspeer NXGE_DEBUG_MSG((nxgep, TX_CTL, 15406495Sspeer "<== nxge_rxdma_get_rcr_ring: " 15416495Sspeer "NULL ring pointer(s)")); 15423859Sml29623 return (NULL); 15433859Sml29623 } 15446495Sspeer 15456495Sspeer if (set->owned.map == 0) { 15463859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15476495Sspeer "<== nxge_rxdma_get_rbr_ring: no channels")); 15483859Sml29623 return (NULL); 15493859Sml29623 } 15503859Sml29623 15516495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 15526495Sspeer if ((1 << rdc) & set->owned.map) { 15536495Sspeer rx_rcr_ring_t *ring = 15546495Sspeer nxgep->rx_rcr_rings->rcr_rings[rdc]; 15556495Sspeer if (ring) { 15566495Sspeer if (channel == ring->rdc) { 15576495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 15586495Sspeer "==> nxge_rxdma_get_rcr_ring: " 15596495Sspeer "channel %d ring $%p", rdc, ring)); 15606495Sspeer return (ring); 15616495Sspeer } 15626495Sspeer } 15633859Sml29623 } 15643859Sml29623 } 15653859Sml29623 15663859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 15676929Smisaki "<== nxge_rxdma_get_rcr_ring: not found")); 15683859Sml29623 15693859Sml29623 return (NULL); 15703859Sml29623 } 15713859Sml29623 15723859Sml29623 /* 15733859Sml29623 * Static functions start here. 15743859Sml29623 */ 15753859Sml29623 static p_rx_msg_t 15763859Sml29623 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 15773859Sml29623 { 15783859Sml29623 p_rx_msg_t nxge_mp = NULL; 15793859Sml29623 p_nxge_dma_common_t dmamsg_p; 15803859Sml29623 uchar_t *buffer; 15813859Sml29623 15823859Sml29623 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 15833859Sml29623 if (nxge_mp == NULL) { 15844185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 15856929Smisaki "Allocation of a rx msg failed.")); 15863859Sml29623 goto nxge_allocb_exit; 15873859Sml29623 } 15883859Sml29623 15893859Sml29623 nxge_mp->use_buf_pool = B_FALSE; 15903859Sml29623 if (dmabuf_p) { 15913859Sml29623 nxge_mp->use_buf_pool = B_TRUE; 15923859Sml29623 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 15933859Sml29623 *dmamsg_p = *dmabuf_p; 15943859Sml29623 dmamsg_p->nblocks = 1; 15953859Sml29623 dmamsg_p->block_size = size; 15963859Sml29623 dmamsg_p->alength = size; 15973859Sml29623 buffer = (uchar_t *)dmabuf_p->kaddrp; 15983859Sml29623 15993859Sml29623 dmabuf_p->kaddrp = (void *) 16006929Smisaki ((char *)dmabuf_p->kaddrp + size); 16013859Sml29623 dmabuf_p->ioaddr_pp = (void *) 16026929Smisaki ((char *)dmabuf_p->ioaddr_pp + size); 16033859Sml29623 dmabuf_p->alength -= size; 16043859Sml29623 dmabuf_p->offset += size; 16053859Sml29623 dmabuf_p->dma_cookie.dmac_laddress += size; 16063859Sml29623 dmabuf_p->dma_cookie.dmac_size -= size; 16073859Sml29623 16083859Sml29623 } else { 16093859Sml29623 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 16103859Sml29623 if (buffer == NULL) { 16114185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 16126929Smisaki "Allocation of a receive page failed.")); 16133859Sml29623 goto nxge_allocb_fail1; 16143859Sml29623 } 16153859Sml29623 } 16163859Sml29623 16173859Sml29623 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 16183859Sml29623 if (nxge_mp->rx_mblk_p == NULL) { 16194185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 16203859Sml29623 goto nxge_allocb_fail2; 16213859Sml29623 } 16223859Sml29623 16233859Sml29623 nxge_mp->buffer = buffer; 16243859Sml29623 nxge_mp->block_size = size; 16253859Sml29623 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 16263859Sml29623 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 16273859Sml29623 nxge_mp->ref_cnt = 1; 16283859Sml29623 nxge_mp->free = B_TRUE; 16293859Sml29623 nxge_mp->rx_use_bcopy = B_FALSE; 16303859Sml29623 16313859Sml29623 atomic_inc_32(&nxge_mblks_pending); 16323859Sml29623 16333859Sml29623 goto nxge_allocb_exit; 16343859Sml29623 16353859Sml29623 nxge_allocb_fail2: 16363859Sml29623 if (!nxge_mp->use_buf_pool) { 16373859Sml29623 KMEM_FREE(buffer, size); 16383859Sml29623 } 16393859Sml29623 16403859Sml29623 nxge_allocb_fail1: 16413859Sml29623 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 16423859Sml29623 nxge_mp = NULL; 16433859Sml29623 16443859Sml29623 nxge_allocb_exit: 16453859Sml29623 return (nxge_mp); 16463859Sml29623 } 16473859Sml29623 16483859Sml29623 p_mblk_t 16493859Sml29623 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 16503859Sml29623 { 16513859Sml29623 p_mblk_t mp; 16523859Sml29623 16533859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 16543859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 16556929Smisaki "offset = 0x%08X " 16566929Smisaki "size = 0x%08X", 16576929Smisaki nxge_mp, offset, size)); 16583859Sml29623 16593859Sml29623 mp = desballoc(&nxge_mp->buffer[offset], size, 16606929Smisaki 0, &nxge_mp->freeb); 16613859Sml29623 if (mp == NULL) { 16623859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 16633859Sml29623 goto nxge_dupb_exit; 16643859Sml29623 } 16653859Sml29623 atomic_inc_32(&nxge_mp->ref_cnt); 16663859Sml29623 16673859Sml29623 16683859Sml29623 nxge_dupb_exit: 16693859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 16706929Smisaki nxge_mp)); 16713859Sml29623 return (mp); 16723859Sml29623 } 16733859Sml29623 16743859Sml29623 p_mblk_t 16753859Sml29623 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 16763859Sml29623 { 16773859Sml29623 p_mblk_t mp; 16783859Sml29623 uchar_t *dp; 16793859Sml29623 16803859Sml29623 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 16813859Sml29623 if (mp == NULL) { 16823859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 16833859Sml29623 goto nxge_dupb_bcopy_exit; 16843859Sml29623 } 16853859Sml29623 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 16863859Sml29623 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 16873859Sml29623 mp->b_wptr = dp + size; 16883859Sml29623 16893859Sml29623 nxge_dupb_bcopy_exit: 16903859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 16916929Smisaki nxge_mp)); 16923859Sml29623 return (mp); 16933859Sml29623 } 16943859Sml29623 16953859Sml29623 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 16963859Sml29623 p_rx_msg_t rx_msg_p); 16973859Sml29623 16983859Sml29623 void 16993859Sml29623 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 17003859Sml29623 { 17013859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 17023859Sml29623 17033859Sml29623 /* Reuse this buffer */ 17043859Sml29623 rx_msg_p->free = B_FALSE; 17053859Sml29623 rx_msg_p->cur_usage_cnt = 0; 17063859Sml29623 rx_msg_p->max_usage_cnt = 0; 17073859Sml29623 rx_msg_p->pkt_buf_size = 0; 17083859Sml29623 17093859Sml29623 if (rx_rbr_p->rbr_use_bcopy) { 17103859Sml29623 rx_msg_p->rx_use_bcopy = B_FALSE; 17113859Sml29623 atomic_dec_32(&rx_rbr_p->rbr_consumed); 17123859Sml29623 } 17133859Sml29623 17143859Sml29623 /* 17153859Sml29623 * Get the rbr header pointer and its offset index. 17163859Sml29623 */ 17173859Sml29623 MUTEX_ENTER(&rx_rbr_p->post_lock); 17183859Sml29623 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 17196929Smisaki rx_rbr_p->rbr_wrap_mask); 17203859Sml29623 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 17213859Sml29623 MUTEX_EXIT(&rx_rbr_p->post_lock); 17225770Sml29623 npi_rxdma_rdc_rbr_kick(NXGE_DEV_NPI_HANDLE(nxgep), 17235770Sml29623 rx_rbr_p->rdc, 1); 17243859Sml29623 17253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 17266929Smisaki "<== nxge_post_page (channel %d post_next_index %d)", 17276929Smisaki rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 17283859Sml29623 17293859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 17303859Sml29623 } 17313859Sml29623 17323859Sml29623 void 17333859Sml29623 nxge_freeb(p_rx_msg_t rx_msg_p) 17343859Sml29623 { 17353859Sml29623 size_t size; 17363859Sml29623 uchar_t *buffer = NULL; 17373859Sml29623 int ref_cnt; 17384874Sml29623 boolean_t free_state = B_FALSE; 17393859Sml29623 17405170Stm144005 rx_rbr_ring_t *ring = rx_msg_p->rx_rbr_p; 17415170Stm144005 17423859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 17433859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 17446929Smisaki "nxge_freeb:rx_msg_p = $%p (block pending %d)", 17456929Smisaki rx_msg_p, nxge_mblks_pending)); 17463859Sml29623 17474874Sml29623 /* 17484874Sml29623 * First we need to get the free state, then 17494874Sml29623 * atomic decrement the reference count to prevent 17504874Sml29623 * the race condition with the interrupt thread that 17514874Sml29623 * is processing a loaned up buffer block. 17524874Sml29623 */ 17534874Sml29623 free_state = rx_msg_p->free; 17543859Sml29623 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 17553859Sml29623 if (!ref_cnt) { 17565770Sml29623 atomic_dec_32(&nxge_mblks_pending); 17573859Sml29623 buffer = rx_msg_p->buffer; 17583859Sml29623 size = rx_msg_p->block_size; 17593859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 17606929Smisaki "will free: rx_msg_p = $%p (block pending %d)", 17616929Smisaki rx_msg_p, nxge_mblks_pending)); 17623859Sml29623 17633859Sml29623 if (!rx_msg_p->use_buf_pool) { 17643859Sml29623 KMEM_FREE(buffer, size); 17653859Sml29623 } 17663859Sml29623 17673859Sml29623 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 17685170Stm144005 17695759Smisaki if (ring) { 17705759Smisaki /* 17715759Smisaki * Decrement the receive buffer ring's reference 17725759Smisaki * count, too. 17735759Smisaki */ 17745759Smisaki atomic_dec_32(&ring->rbr_ref_cnt); 17755759Smisaki 17765759Smisaki /* 17776495Sspeer * Free the receive buffer ring, if 17785759Smisaki * 1. all the receive buffers have been freed 17795759Smisaki * 2. and we are in the proper state (that is, 17805759Smisaki * we are not UNMAPPING). 17815759Smisaki */ 17825759Smisaki if (ring->rbr_ref_cnt == 0 && 17835759Smisaki ring->rbr_state == RBR_UNMAPPED) { 17846495Sspeer /* 17856495Sspeer * Free receive data buffers, 17866495Sspeer * buffer index information 17876495Sspeer * (rxring_info) and 17886495Sspeer * the message block ring. 17896495Sspeer */ 17906495Sspeer NXGE_DEBUG_MSG((NULL, RX_CTL, 17916495Sspeer "nxge_freeb:rx_msg_p = $%p " 17926495Sspeer "(block pending %d) free buffers", 17936495Sspeer rx_msg_p, nxge_mblks_pending)); 17946495Sspeer nxge_rxdma_databuf_free(ring); 17956495Sspeer if (ring->ring_info) { 17966495Sspeer KMEM_FREE(ring->ring_info, 17976495Sspeer sizeof (rxring_info_t)); 17986495Sspeer } 17996495Sspeer 18006495Sspeer if (ring->rx_msg_ring) { 18016495Sspeer KMEM_FREE(ring->rx_msg_ring, 18026495Sspeer ring->tnblocks * 18036495Sspeer sizeof (p_rx_msg_t)); 18046495Sspeer } 18055759Smisaki KMEM_FREE(ring, sizeof (*ring)); 18065759Smisaki } 18075170Stm144005 } 18083859Sml29623 return; 18093859Sml29623 } 18103859Sml29623 18113859Sml29623 /* 18123859Sml29623 * Repost buffer. 18133859Sml29623 */ 18145759Smisaki if (free_state && (ref_cnt == 1) && ring) { 18153859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, 18163859Sml29623 "nxge_freeb: post page $%p:", rx_msg_p)); 18175170Stm144005 if (ring->rbr_state == RBR_POSTING) 18185170Stm144005 nxge_post_page(rx_msg_p->nxgep, ring, rx_msg_p); 18193859Sml29623 } 18203859Sml29623 18213859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 18223859Sml29623 } 18233859Sml29623 18243859Sml29623 uint_t 18253859Sml29623 nxge_rx_intr(void *arg1, void *arg2) 18263859Sml29623 { 18273859Sml29623 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 18283859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg2; 18293859Sml29623 p_nxge_ldg_t ldgp; 18303859Sml29623 uint8_t channel; 18313859Sml29623 npi_handle_t handle; 18323859Sml29623 rx_dma_ctl_stat_t cs; 18338275SEric Cheng p_rx_rcr_ring_t rcr_ring; 18348275SEric Cheng mblk_t *mp; 18353859Sml29623 18363859Sml29623 #ifdef NXGE_DEBUG 18373859Sml29623 rxdma_cfig1_t cfg; 18383859Sml29623 #endif 18393859Sml29623 18403859Sml29623 if (ldvp == NULL) { 18413859Sml29623 NXGE_DEBUG_MSG((NULL, INT_CTL, 18426929Smisaki "<== nxge_rx_intr: arg2 $%p arg1 $%p", 18436929Smisaki nxgep, ldvp)); 18443859Sml29623 18453859Sml29623 return (DDI_INTR_CLAIMED); 18463859Sml29623 } 18473859Sml29623 18483859Sml29623 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 18493859Sml29623 nxgep = ldvp->nxgep; 18503859Sml29623 } 18516602Sspeer 18526602Sspeer if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 18536602Sspeer (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 18546602Sspeer NXGE_DEBUG_MSG((nxgep, INT_CTL, 18556602Sspeer "<== nxge_rx_intr: interface not started or intialized")); 18566602Sspeer return (DDI_INTR_CLAIMED); 18576602Sspeer } 18586602Sspeer 18593859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 18606929Smisaki "==> nxge_rx_intr: arg2 $%p arg1 $%p", 18616929Smisaki nxgep, ldvp)); 18623859Sml29623 18633859Sml29623 /* 18643859Sml29623 * This interrupt handler is for a specific 18653859Sml29623 * receive dma channel. 18663859Sml29623 */ 18673859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 18688275SEric Cheng 18698275SEric Cheng rcr_ring = nxgep->rx_rcr_rings->rcr_rings[ldvp->vdma_index]; 18708275SEric Cheng 18718275SEric Cheng /* 18728275SEric Cheng * The RCR ring lock must be held when packets 18738275SEric Cheng * are being processed and the hardware registers are 18748275SEric Cheng * being read or written to prevent race condition 18758275SEric Cheng * among the interrupt thread, the polling thread 18768275SEric Cheng * (will cause fatal errors such as rcrincon bit set) 18778275SEric Cheng * and the setting of the poll_flag. 18788275SEric Cheng */ 18798275SEric Cheng MUTEX_ENTER(&rcr_ring->lock); 18808275SEric Cheng 18813859Sml29623 /* 18823859Sml29623 * Get the control and status for this channel. 18833859Sml29623 */ 18843859Sml29623 channel = ldvp->channel; 18853859Sml29623 ldgp = ldvp->ldgp; 18868275SEric Cheng 18878275SEric Cheng if (!isLDOMguest(nxgep)) { 18888275SEric Cheng if (!nxgep->rx_channel_started[channel]) { 18898275SEric Cheng NXGE_DEBUG_MSG((nxgep, INT_CTL, 18908275SEric Cheng "<== nxge_rx_intr: channel is not started")); 18918275SEric Cheng MUTEX_EXIT(&rcr_ring->lock); 18928275SEric Cheng return (DDI_INTR_CLAIMED); 18938275SEric Cheng } 18948275SEric Cheng } 18958275SEric Cheng 18968275SEric Cheng ASSERT(rcr_ring->ldgp == ldgp); 18978275SEric Cheng ASSERT(rcr_ring->ldvp == ldvp); 18988275SEric Cheng 18993859Sml29623 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 19003859Sml29623 19013859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 19026929Smisaki "cs 0x%016llx rcrto 0x%x rcrthres %x", 19036929Smisaki channel, 19046929Smisaki cs.value, 19056929Smisaki cs.bits.hdw.rcrto, 19066929Smisaki cs.bits.hdw.rcrthres)); 19073859Sml29623 19088275SEric Cheng mp = nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, cs); 19093859Sml29623 19103859Sml29623 /* error events. */ 19113859Sml29623 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 19126495Sspeer (void) nxge_rx_err_evnts(nxgep, channel, cs); 19133859Sml29623 } 19143859Sml29623 19153859Sml29623 /* 19163859Sml29623 * Enable the mailbox update interrupt if we want 19173859Sml29623 * to use mailbox. We probably don't need to use 19183859Sml29623 * mailbox as it only saves us one pio read. 19193859Sml29623 * Also write 1 to rcrthres and rcrto to clear 19203859Sml29623 * these two edge triggered bits. 19213859Sml29623 */ 19223859Sml29623 cs.value &= RX_DMA_CTL_STAT_WR1C; 19238275SEric Cheng cs.bits.hdw.mex = rcr_ring->poll_flag ? 0 : 1; 19243859Sml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 19256929Smisaki cs.value); 19263859Sml29623 19273859Sml29623 /* 19288275SEric Cheng * If the polling mode is enabled, disable the interrupt. 19293859Sml29623 */ 19308275SEric Cheng if (rcr_ring->poll_flag) { 19318275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 19328275SEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p ldvp $%p " 19338275SEric Cheng "(disabling interrupts)", channel, ldgp, ldvp)); 19348275SEric Cheng /* 19358275SEric Cheng * Disarm this logical group if this is a single device 19368275SEric Cheng * group. 19378275SEric Cheng */ 19388275SEric Cheng if (ldgp->nldvs == 1) { 19398275SEric Cheng ldgimgm_t mgm; 19408275SEric Cheng mgm.value = 0; 19418275SEric Cheng mgm.bits.ldw.arm = 0; 19426495Sspeer NXGE_REG_WR64(handle, 19438275SEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 19446495Sspeer } 19458275SEric Cheng } else { 19468275SEric Cheng /* 1947*8400SNicolas.Droux@Sun.COM * Rearm this logical group if this is a single device 1948*8400SNicolas.Droux@Sun.COM * group. 19498275SEric Cheng */ 19508275SEric Cheng if (ldgp->nldvs == 1) { 19518275SEric Cheng if (isLDOMguest(nxgep)) { 19528275SEric Cheng nxge_hio_ldgimgn(nxgep, ldgp); 19538275SEric Cheng } else { 19548275SEric Cheng ldgimgm_t mgm; 19558275SEric Cheng 19568275SEric Cheng mgm.value = 0; 19578275SEric Cheng mgm.bits.ldw.arm = 1; 19588275SEric Cheng mgm.bits.ldw.timer = ldgp->ldg_timer; 19598275SEric Cheng 19608275SEric Cheng NXGE_REG_WR64(handle, 19618275SEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 19628275SEric Cheng mgm.value); 19638275SEric Cheng } 19648275SEric Cheng } 19658275SEric Cheng 19668275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 19678275SEric Cheng "==> nxge_rx_intr: rdc %d ldgp $%p " 19688275SEric Cheng "exiting ISR (and call mac_rx_ring)", channel, ldgp)); 19693859Sml29623 } 19708275SEric Cheng MUTEX_EXIT(&rcr_ring->lock); 19718275SEric Cheng 19728275SEric Cheng if (mp) { 19738275SEric Cheng if (!isLDOMguest(nxgep)) 19748275SEric Cheng mac_rx_ring(nxgep->mach, rcr_ring->rcr_mac_handle, mp, 19758275SEric Cheng rcr_ring->rcr_gen_num); 19768275SEric Cheng #if defined(sun4v) 19778275SEric Cheng else { /* isLDOMguest(nxgep) */ 19788275SEric Cheng nxge_hio_data_t *nhd = (nxge_hio_data_t *) 19798275SEric Cheng nxgep->nxge_hw_p->hio; 19808275SEric Cheng nx_vio_fp_t *vio = &nhd->hio.vio; 19818275SEric Cheng 19828275SEric Cheng if (vio->cb.vio_net_rx_cb) { 19838275SEric Cheng (*vio->cb.vio_net_rx_cb) 19848275SEric Cheng (nxgep->hio_vr->vhp, mp); 19858275SEric Cheng } 19868275SEric Cheng } 19878275SEric Cheng #endif 19888275SEric Cheng } 19898275SEric Cheng NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: DDI_INTR_CLAIMED")); 19908275SEric Cheng return (DDI_INTR_CLAIMED); 19913859Sml29623 } 19923859Sml29623 19933859Sml29623 /* 19943859Sml29623 * Process the packets received in the specified logical device 19953859Sml29623 * and pass up a chain of message blocks to the upper layer. 19968275SEric Cheng * The RCR ring lock must be held before calling this function. 19973859Sml29623 */ 19988275SEric Cheng static mblk_t * 19996495Sspeer nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, rx_dma_ctl_stat_t cs) 20003859Sml29623 { 20013859Sml29623 p_mblk_t mp; 20023859Sml29623 p_rx_rcr_ring_t rcrp; 20033859Sml29623 20043859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 20056495Sspeer rcrp = nxgep->rx_rcr_rings->rcr_rings[vindex]; 20068275SEric Cheng 20078275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 20088275SEric Cheng "==> nxge_rx_pkts_vring: (calling nxge_rx_pkts)rdc %d " 20098275SEric Cheng "rcr_mac_handle $%p ", rcrp->rdc, rcrp->rcr_mac_handle)); 20106495Sspeer if ((mp = nxge_rx_pkts(nxgep, rcrp, cs, -1)) == NULL) { 20113859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20126929Smisaki "<== nxge_rx_pkts_vring: no mp")); 20138275SEric Cheng return (NULL); 20143859Sml29623 } 20153859Sml29623 20163859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 20176929Smisaki mp)); 20183859Sml29623 20193859Sml29623 #ifdef NXGE_DEBUG 20203859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20216929Smisaki "==> nxge_rx_pkts_vring:calling mac_rx " 20226929Smisaki "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 20236929Smisaki "mac_handle $%p", 20246929Smisaki mp->b_wptr - mp->b_rptr, 20256929Smisaki mp, mp->b_cont, mp->b_next, 20266929Smisaki rcrp, rcrp->rcr_mac_handle)); 20273859Sml29623 20283859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20296929Smisaki "==> nxge_rx_pkts_vring: dump packets " 20306929Smisaki "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 20316929Smisaki mp, 20326929Smisaki mp->b_rptr, 20336929Smisaki mp->b_wptr, 20346929Smisaki nxge_dump_packet((char *)mp->b_rptr, 20356929Smisaki mp->b_wptr - mp->b_rptr))); 20363859Sml29623 if (mp->b_cont) { 20373859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20386929Smisaki "==> nxge_rx_pkts_vring: dump b_cont packets " 20396929Smisaki "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 20406929Smisaki mp->b_cont, 20416929Smisaki mp->b_cont->b_rptr, 20426929Smisaki mp->b_cont->b_wptr, 20436929Smisaki nxge_dump_packet((char *)mp->b_cont->b_rptr, 20446929Smisaki mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 20453859Sml29623 } 20463859Sml29623 if (mp->b_next) { 20473859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20486929Smisaki "==> nxge_rx_pkts_vring: dump next packets " 20496929Smisaki "(b_rptr $%p): %s", 20506929Smisaki mp->b_next->b_rptr, 20516929Smisaki nxge_dump_packet((char *)mp->b_next->b_rptr, 20526929Smisaki mp->b_next->b_wptr - mp->b_next->b_rptr))); 20533859Sml29623 } 20543859Sml29623 #endif 20558275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 20568275SEric Cheng "<== nxge_rx_pkts_vring: returning rdc %d rcr_mac_handle $%p ", 20578275SEric Cheng rcrp->rdc, rcrp->rcr_mac_handle)); 20588275SEric Cheng 20598275SEric Cheng return (mp); 20603859Sml29623 } 20613859Sml29623 20623859Sml29623 20633859Sml29623 /* 20643859Sml29623 * This routine is the main packet receive processing function. 20653859Sml29623 * It gets the packet type, error code, and buffer related 20663859Sml29623 * information from the receive completion entry. 20673859Sml29623 * How many completion entries to process is based on the number of packets 20683859Sml29623 * queued by the hardware, a hardware maintained tail pointer 20693859Sml29623 * and a configurable receive packet count. 20703859Sml29623 * 20713859Sml29623 * A chain of message blocks will be created as result of processing 20723859Sml29623 * the completion entries. This chain of message blocks will be returned and 20733859Sml29623 * a hardware control status register will be updated with the number of 20743859Sml29623 * packets were removed from the hardware queue. 20753859Sml29623 * 20768275SEric Cheng * The RCR ring lock is held when entering this function. 20773859Sml29623 */ 20786495Sspeer static mblk_t * 20796495Sspeer nxge_rx_pkts(p_nxge_t nxgep, p_rx_rcr_ring_t rcr_p, rx_dma_ctl_stat_t cs, 20806495Sspeer int bytes_to_pickup) 20813859Sml29623 { 20823859Sml29623 npi_handle_t handle; 20833859Sml29623 uint8_t channel; 20843859Sml29623 uint32_t comp_rd_index; 20853859Sml29623 p_rcr_entry_t rcr_desc_rd_head_p; 20863859Sml29623 p_rcr_entry_t rcr_desc_rd_head_pp; 20873859Sml29623 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 20883859Sml29623 uint16_t qlen, nrcr_read, npkt_read; 20896495Sspeer uint32_t qlen_hw; 20903859Sml29623 boolean_t multi; 20916495Sspeer rcrcfig_b_t rcr_cfg_b; 20926495Sspeer int totallen = 0; 20933859Sml29623 #if defined(_BIG_ENDIAN) 20943859Sml29623 npi_status_t rs = NPI_SUCCESS; 20953859Sml29623 #endif 20963859Sml29623 20978275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "==> nxge_rx_pkts: " 20986929Smisaki "channel %d", rcr_p->rdc)); 20993859Sml29623 21003859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 21013859Sml29623 return (NULL); 21023859Sml29623 } 21033859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 21043859Sml29623 channel = rcr_p->rdc; 21053859Sml29623 21063859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 21076929Smisaki "==> nxge_rx_pkts: START: rcr channel %d " 21086929Smisaki "head_p $%p head_pp $%p index %d ", 21096929Smisaki channel, rcr_p->rcr_desc_rd_head_p, 21106929Smisaki rcr_p->rcr_desc_rd_head_pp, 21116929Smisaki rcr_p->comp_rd_index)); 21123859Sml29623 21133859Sml29623 21143859Sml29623 #if !defined(_BIG_ENDIAN) 21153859Sml29623 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 21163859Sml29623 #else 21173859Sml29623 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 21183859Sml29623 if (rs != NPI_SUCCESS) { 21196495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts: " 21203859Sml29623 "channel %d, get qlen failed 0x%08x", 21216929Smisaki channel, rs)); 21223859Sml29623 return (NULL); 21233859Sml29623 } 21243859Sml29623 #endif 21253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 21266929Smisaki "qlen %d", channel, qlen)); 21273859Sml29623 21283859Sml29623 21293859Sml29623 21303859Sml29623 if (!qlen) { 21318275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 21326929Smisaki "==> nxge_rx_pkts:rcr channel %d " 21336929Smisaki "qlen %d (no pkts)", channel, qlen)); 21343859Sml29623 21353859Sml29623 return (NULL); 21363859Sml29623 } 21373859Sml29623 21383859Sml29623 comp_rd_index = rcr_p->comp_rd_index; 21393859Sml29623 21403859Sml29623 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 21413859Sml29623 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 21423859Sml29623 nrcr_read = npkt_read = 0; 21433859Sml29623 21443859Sml29623 /* 21453859Sml29623 * Number of packets queued 21463859Sml29623 * (The jumbo or multi packet will be counted as only one 21473859Sml29623 * packets and it may take up more than one completion entry). 21483859Sml29623 */ 21493859Sml29623 qlen_hw = (qlen < nxge_max_rx_pkts) ? 21506929Smisaki qlen : nxge_max_rx_pkts; 21513859Sml29623 head_mp = NULL; 21523859Sml29623 tail_mp = &head_mp; 21533859Sml29623 nmp = mp_cont = NULL; 21543859Sml29623 multi = B_FALSE; 21553859Sml29623 21563859Sml29623 while (qlen_hw) { 21573859Sml29623 21583859Sml29623 #ifdef NXGE_DEBUG 21593859Sml29623 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 21603859Sml29623 #endif 21613859Sml29623 /* 21623859Sml29623 * Process one completion ring entry. 21633859Sml29623 */ 21643859Sml29623 nxge_receive_packet(nxgep, 21656929Smisaki rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 21663859Sml29623 21673859Sml29623 /* 21683859Sml29623 * message chaining modes 21693859Sml29623 */ 21703859Sml29623 if (nmp) { 21713859Sml29623 nmp->b_next = NULL; 21723859Sml29623 if (!multi && !mp_cont) { /* frame fits a partition */ 21733859Sml29623 *tail_mp = nmp; 21743859Sml29623 tail_mp = &nmp->b_next; 21756495Sspeer totallen += MBLKL(nmp); 21763859Sml29623 nmp = NULL; 21773859Sml29623 } else if (multi && !mp_cont) { /* first segment */ 21783859Sml29623 *tail_mp = nmp; 21793859Sml29623 tail_mp = &nmp->b_cont; 21806495Sspeer totallen += MBLKL(nmp); 21813859Sml29623 } else if (multi && mp_cont) { /* mid of multi segs */ 21823859Sml29623 *tail_mp = mp_cont; 21833859Sml29623 tail_mp = &mp_cont->b_cont; 21846495Sspeer totallen += MBLKL(mp_cont); 21853859Sml29623 } else if (!multi && mp_cont) { /* last segment */ 21863859Sml29623 *tail_mp = mp_cont; 21873859Sml29623 tail_mp = &nmp->b_next; 21886495Sspeer totallen += MBLKL(mp_cont); 21893859Sml29623 nmp = NULL; 21903859Sml29623 } 21913859Sml29623 } 21923859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 21936929Smisaki "==> nxge_rx_pkts: loop: rcr channel %d " 21946929Smisaki "before updating: multi %d " 21956929Smisaki "nrcr_read %d " 21966929Smisaki "npk read %d " 21976929Smisaki "head_pp $%p index %d ", 21986929Smisaki channel, 21996929Smisaki multi, 22006929Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp, 22016929Smisaki comp_rd_index)); 22023859Sml29623 22033859Sml29623 if (!multi) { 22043859Sml29623 qlen_hw--; 22053859Sml29623 npkt_read++; 22063859Sml29623 } 22073859Sml29623 22083859Sml29623 /* 22093859Sml29623 * Update the next read entry. 22103859Sml29623 */ 22113859Sml29623 comp_rd_index = NEXT_ENTRY(comp_rd_index, 22126929Smisaki rcr_p->comp_wrap_mask); 22133859Sml29623 22143859Sml29623 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 22156929Smisaki rcr_p->rcr_desc_first_p, 22166929Smisaki rcr_p->rcr_desc_last_p); 22173859Sml29623 22183859Sml29623 nrcr_read++; 22193859Sml29623 22203859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22216929Smisaki "<== nxge_rx_pkts: (SAM, process one packet) " 22226929Smisaki "nrcr_read %d", 22236929Smisaki nrcr_read)); 22243859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22256929Smisaki "==> nxge_rx_pkts: loop: rcr channel %d " 22266929Smisaki "multi %d " 22276929Smisaki "nrcr_read %d " 22286929Smisaki "npk read %d " 22296929Smisaki "head_pp $%p index %d ", 22306929Smisaki channel, 22316929Smisaki multi, 22326929Smisaki nrcr_read, npkt_read, rcr_desc_rd_head_pp, 22336929Smisaki comp_rd_index)); 22343859Sml29623 22356495Sspeer if ((bytes_to_pickup != -1) && 22366495Sspeer (totallen >= bytes_to_pickup)) { 22376495Sspeer break; 22386495Sspeer } 22398275SEric Cheng 22408275SEric Cheng /* limit the number of packets for interrupt */ 22418275SEric Cheng if (!(rcr_p->poll_flag)) { 22428275SEric Cheng if (npkt_read == nxge_max_intr_pkts) { 22438275SEric Cheng break; 22448275SEric Cheng } 22458275SEric Cheng } 22463859Sml29623 } 22473859Sml29623 22483859Sml29623 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 22493859Sml29623 rcr_p->comp_rd_index = comp_rd_index; 22503859Sml29623 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 22513859Sml29623 22523859Sml29623 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 22536929Smisaki (nxgep->intr_threshold != rcr_p->intr_threshold)) { 22543859Sml29623 rcr_p->intr_timeout = nxgep->intr_timeout; 22553859Sml29623 rcr_p->intr_threshold = nxgep->intr_threshold; 22563859Sml29623 rcr_cfg_b.value = 0x0ULL; 22573859Sml29623 if (rcr_p->intr_timeout) 22583859Sml29623 rcr_cfg_b.bits.ldw.entout = 1; 22593859Sml29623 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 22603859Sml29623 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 22613859Sml29623 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 22626929Smisaki channel, rcr_cfg_b.value); 22633859Sml29623 } 22643859Sml29623 22653859Sml29623 cs.bits.ldw.pktread = npkt_read; 22663859Sml29623 cs.bits.ldw.ptrread = nrcr_read; 22673859Sml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 22686929Smisaki channel, cs.value); 22693859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22706929Smisaki "==> nxge_rx_pkts: EXIT: rcr channel %d " 22716929Smisaki "head_pp $%p index %016llx ", 22726929Smisaki channel, 22736929Smisaki rcr_p->rcr_desc_rd_head_pp, 22746929Smisaki rcr_p->comp_rd_index)); 22753859Sml29623 /* 22763859Sml29623 * Update RCR buffer pointer read and number of packets 22773859Sml29623 * read. 22783859Sml29623 */ 22793859Sml29623 22808275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, "<== nxge_rx_pkts: return" 22818275SEric Cheng "channel %d", rcr_p->rdc)); 22828275SEric Cheng 22833859Sml29623 return (head_mp); 22843859Sml29623 } 22853859Sml29623 22863859Sml29623 void 22873859Sml29623 nxge_receive_packet(p_nxge_t nxgep, 22883859Sml29623 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 22893859Sml29623 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 22903859Sml29623 { 22913859Sml29623 p_mblk_t nmp = NULL; 22923859Sml29623 uint64_t multi; 22933859Sml29623 uint64_t dcf_err; 22943859Sml29623 uint8_t channel; 22953859Sml29623 22963859Sml29623 boolean_t first_entry = B_TRUE; 22973859Sml29623 boolean_t is_tcp_udp = B_FALSE; 22983859Sml29623 boolean_t buffer_free = B_FALSE; 22993859Sml29623 boolean_t error_send_up = B_FALSE; 23003859Sml29623 uint8_t error_type; 23013859Sml29623 uint16_t l2_len; 23023859Sml29623 uint16_t skip_len; 23033859Sml29623 uint8_t pktbufsz_type; 23043859Sml29623 uint64_t rcr_entry; 23053859Sml29623 uint64_t *pkt_buf_addr_pp; 23063859Sml29623 uint64_t *pkt_buf_addr_p; 23073859Sml29623 uint32_t buf_offset; 23083859Sml29623 uint32_t bsize; 23093859Sml29623 uint32_t error_disp_cnt; 23103859Sml29623 uint32_t msg_index; 23113859Sml29623 p_rx_rbr_ring_t rx_rbr_p; 23123859Sml29623 p_rx_msg_t *rx_msg_ring_p; 23133859Sml29623 p_rx_msg_t rx_msg_p; 23143859Sml29623 uint16_t sw_offset_bytes = 0, hdr_size = 0; 23153859Sml29623 nxge_status_t status = NXGE_OK; 23163859Sml29623 boolean_t is_valid = B_FALSE; 23173859Sml29623 p_nxge_rx_ring_stats_t rdc_stats; 23183859Sml29623 uint32_t bytes_read; 23193859Sml29623 uint64_t pkt_type; 23203859Sml29623 uint64_t frag; 23216028Ssbehera boolean_t pkt_too_long_err = B_FALSE; 23223859Sml29623 #ifdef NXGE_DEBUG 23233859Sml29623 int dump_len; 23243859Sml29623 #endif 23253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 23263859Sml29623 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 23273859Sml29623 23283859Sml29623 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 23293859Sml29623 23303859Sml29623 multi = (rcr_entry & RCR_MULTI_MASK); 23313859Sml29623 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 23323859Sml29623 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 23333859Sml29623 23343859Sml29623 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 23353859Sml29623 frag = (rcr_entry & RCR_FRAG_MASK); 23363859Sml29623 23373859Sml29623 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 23383859Sml29623 23393859Sml29623 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 23406929Smisaki RCR_PKTBUFSZ_SHIFT); 23415125Sjoycey #if defined(__i386) 23425125Sjoycey pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 23436929Smisaki RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 23445125Sjoycey #else 23453859Sml29623 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 23466929Smisaki RCR_PKT_BUF_ADDR_SHIFT); 23475125Sjoycey #endif 23483859Sml29623 23493859Sml29623 channel = rcr_p->rdc; 23503859Sml29623 23513859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23526929Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 23536929Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 23546929Smisaki "error_type 0x%x pkt_type 0x%x " 23556929Smisaki "pktbufsz_type %d ", 23566929Smisaki rcr_desc_rd_head_p, 23576929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 23586929Smisaki multi, 23596929Smisaki error_type, 23606929Smisaki pkt_type, 23616929Smisaki pktbufsz_type)); 23623859Sml29623 23633859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23646929Smisaki "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 23656929Smisaki "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 23666929Smisaki "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 23676929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 23686929Smisaki multi, 23696929Smisaki error_type, 23706929Smisaki pkt_type)); 23713859Sml29623 23723859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23736929Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx " 23746929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 23756929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 23763859Sml29623 23773859Sml29623 /* get the stats ptr */ 23783859Sml29623 rdc_stats = rcr_p->rdc_stats; 23793859Sml29623 23803859Sml29623 if (!l2_len) { 23813859Sml29623 23823859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 23836929Smisaki "<== nxge_receive_packet: failed: l2 length is 0.")); 23843859Sml29623 return; 23853859Sml29623 } 23863859Sml29623 23876028Ssbehera /* 23888275SEric Cheng * Software workaround for BMAC hardware limitation that allows 23896028Ssbehera * maxframe size of 1526, instead of 1522 for non-jumbo and 0x2406 23906028Ssbehera * instead of 0x2400 for jumbo. 23916028Ssbehera */ 23926028Ssbehera if (l2_len > nxgep->mac.maxframesize) { 23936028Ssbehera pkt_too_long_err = B_TRUE; 23946028Ssbehera } 23956028Ssbehera 23964185Sspeer /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 23974185Sspeer l2_len -= ETHERFCSL; 23984185Sspeer 23993859Sml29623 /* shift 6 bits to get the full io address */ 24005125Sjoycey #if defined(__i386) 24015125Sjoycey pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 24026929Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL); 24035125Sjoycey #else 24043859Sml29623 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 24056929Smisaki RCR_PKT_BUF_ADDR_SHIFT_FULL); 24065125Sjoycey #endif 24073859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24086929Smisaki "==> (rbr) nxge_receive_packet: entry 0x%0llx " 24096929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24106929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 24113859Sml29623 24123859Sml29623 rx_rbr_p = rcr_p->rx_rbr_p; 24133859Sml29623 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 24143859Sml29623 24153859Sml29623 if (first_entry) { 24163859Sml29623 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 24176929Smisaki RXDMA_HDR_SIZE_DEFAULT); 24183859Sml29623 24193859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24206929Smisaki "==> nxge_receive_packet: first entry 0x%016llx " 24216929Smisaki "pkt_buf_addr_pp $%p l2_len %d hdr %d", 24226929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len, 24236929Smisaki hdr_size)); 24243859Sml29623 } 24253859Sml29623 24263859Sml29623 MUTEX_ENTER(&rx_rbr_p->lock); 24273859Sml29623 24283859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24296929Smisaki "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 24306929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24316929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 24323859Sml29623 24333859Sml29623 /* 24343859Sml29623 * Packet buffer address in the completion entry points 24353859Sml29623 * to the starting buffer address (offset 0). 24363859Sml29623 * Use the starting buffer address to locate the corresponding 24373859Sml29623 * kernel address. 24383859Sml29623 */ 24393859Sml29623 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 24406929Smisaki pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 24416929Smisaki &buf_offset, 24426929Smisaki &msg_index); 24433859Sml29623 24443859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24456929Smisaki "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 24466929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24476929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 24483859Sml29623 24493859Sml29623 if (status != NXGE_OK) { 24503859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 24513859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24526929Smisaki "<== nxge_receive_packet: found vaddr failed %d", 24536929Smisaki status)); 24543859Sml29623 return; 24553859Sml29623 } 24563859Sml29623 24573859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24586929Smisaki "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 24596929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24606929Smisaki rcr_entry, pkt_buf_addr_pp, l2_len)); 24613859Sml29623 24623859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24636929Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 24646929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24656929Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 24663859Sml29623 24673859Sml29623 rx_msg_p = rx_msg_ring_p[msg_index]; 24683859Sml29623 24693859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24706929Smisaki "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 24716929Smisaki "full pkt_buf_addr_pp $%p l2_len %d", 24726929Smisaki msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 24733859Sml29623 24743859Sml29623 switch (pktbufsz_type) { 24753859Sml29623 case RCR_PKTBUFSZ_0: 24763859Sml29623 bsize = rx_rbr_p->pkt_buf_size0_bytes; 24773859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24786929Smisaki "==> nxge_receive_packet: 0 buf %d", bsize)); 24793859Sml29623 break; 24803859Sml29623 case RCR_PKTBUFSZ_1: 24813859Sml29623 bsize = rx_rbr_p->pkt_buf_size1_bytes; 24823859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24836929Smisaki "==> nxge_receive_packet: 1 buf %d", bsize)); 24843859Sml29623 break; 24853859Sml29623 case RCR_PKTBUFSZ_2: 24863859Sml29623 bsize = rx_rbr_p->pkt_buf_size2_bytes; 24873859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 24886929Smisaki "==> nxge_receive_packet: 2 buf %d", bsize)); 24893859Sml29623 break; 24903859Sml29623 case RCR_SINGLE_BLOCK: 24913859Sml29623 bsize = rx_msg_p->block_size; 24923859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24936929Smisaki "==> nxge_receive_packet: single %d", bsize)); 24943859Sml29623 24953859Sml29623 break; 24963859Sml29623 default: 24973859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 24983859Sml29623 return; 24993859Sml29623 } 25003859Sml29623 25013859Sml29623 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 25026929Smisaki (buf_offset + sw_offset_bytes), 25036929Smisaki (hdr_size + l2_len), 25046929Smisaki DDI_DMA_SYNC_FORCPU); 25053859Sml29623 25063859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25076929Smisaki "==> nxge_receive_packet: after first dump:usage count")); 25083859Sml29623 25093859Sml29623 if (rx_msg_p->cur_usage_cnt == 0) { 25103859Sml29623 if (rx_rbr_p->rbr_use_bcopy) { 25113859Sml29623 atomic_inc_32(&rx_rbr_p->rbr_consumed); 25123859Sml29623 if (rx_rbr_p->rbr_consumed < 25136929Smisaki rx_rbr_p->rbr_threshold_hi) { 25143859Sml29623 if (rx_rbr_p->rbr_threshold_lo == 0 || 25156929Smisaki ((rx_rbr_p->rbr_consumed >= 25166929Smisaki rx_rbr_p->rbr_threshold_lo) && 25176929Smisaki (rx_rbr_p->rbr_bufsize_type >= 25186929Smisaki pktbufsz_type))) { 25193859Sml29623 rx_msg_p->rx_use_bcopy = B_TRUE; 25203859Sml29623 } 25213859Sml29623 } else { 25223859Sml29623 rx_msg_p->rx_use_bcopy = B_TRUE; 25233859Sml29623 } 25243859Sml29623 } 25253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25266929Smisaki "==> nxge_receive_packet: buf %d (new block) ", 25276929Smisaki bsize)); 25283859Sml29623 25293859Sml29623 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 25303859Sml29623 rx_msg_p->pkt_buf_size = bsize; 25313859Sml29623 rx_msg_p->cur_usage_cnt = 1; 25323859Sml29623 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 25333859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25346929Smisaki "==> nxge_receive_packet: buf %d " 25356929Smisaki "(single block) ", 25366929Smisaki bsize)); 25373859Sml29623 /* 25383859Sml29623 * Buffer can be reused once the free function 25393859Sml29623 * is called. 25403859Sml29623 */ 25413859Sml29623 rx_msg_p->max_usage_cnt = 1; 25423859Sml29623 buffer_free = B_TRUE; 25433859Sml29623 } else { 25443859Sml29623 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 25453859Sml29623 if (rx_msg_p->max_usage_cnt == 1) { 25463859Sml29623 buffer_free = B_TRUE; 25473859Sml29623 } 25483859Sml29623 } 25493859Sml29623 } else { 25503859Sml29623 rx_msg_p->cur_usage_cnt++; 25513859Sml29623 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 25523859Sml29623 buffer_free = B_TRUE; 25533859Sml29623 } 25543859Sml29623 } 25553859Sml29623 25563859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 25573859Sml29623 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 25586929Smisaki msg_index, l2_len, 25596929Smisaki rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 25603859Sml29623 25616028Ssbehera if ((error_type) || (dcf_err) || (pkt_too_long_err)) { 25623859Sml29623 rdc_stats->ierrors++; 25633859Sml29623 if (dcf_err) { 25643859Sml29623 rdc_stats->dcf_err++; 25653859Sml29623 #ifdef NXGE_DEBUG 25663859Sml29623 if (!rdc_stats->dcf_err) { 25673859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 25683859Sml29623 "nxge_receive_packet: channel %d dcf_err rcr" 25693859Sml29623 " 0x%llx", channel, rcr_entry)); 25703859Sml29623 } 25713859Sml29623 #endif 25723859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 25736929Smisaki NXGE_FM_EREPORT_RDMC_DCF_ERR); 25746028Ssbehera } else if (pkt_too_long_err) { 25756028Ssbehera rdc_stats->pkt_too_long_err++; 25766028Ssbehera NXGE_DEBUG_MSG((nxgep, RX_CTL, " nxge_receive_packet:" 25776028Ssbehera " channel %d packet length [%d] > " 25786028Ssbehera "maxframesize [%d]", channel, l2_len + ETHERFCSL, 25796028Ssbehera nxgep->mac.maxframesize)); 25803859Sml29623 } else { 25813859Sml29623 /* Update error stats */ 25823859Sml29623 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 25833859Sml29623 rdc_stats->errlog.compl_err_type = error_type; 25843859Sml29623 25853859Sml29623 switch (error_type) { 25865523Syc148097 /* 25875523Syc148097 * Do not send FMA ereport for RCR_L2_ERROR and 25885523Syc148097 * RCR_L4_CSUM_ERROR because most likely they indicate 25895523Syc148097 * back pressure rather than HW failures. 25905523Syc148097 */ 25915165Syc148097 case RCR_L2_ERROR: 25925165Syc148097 rdc_stats->l2_err++; 25935165Syc148097 if (rdc_stats->l2_err < 25945165Syc148097 error_disp_cnt) { 25955165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 25965165Syc148097 " nxge_receive_packet:" 25975165Syc148097 " channel %d RCR L2_ERROR", 25985165Syc148097 channel)); 25995165Syc148097 } 26005165Syc148097 break; 26015165Syc148097 case RCR_L4_CSUM_ERROR: 26025165Syc148097 error_send_up = B_TRUE; 26035165Syc148097 rdc_stats->l4_cksum_err++; 26045165Syc148097 if (rdc_stats->l4_cksum_err < 26055165Syc148097 error_disp_cnt) { 26063859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26075165Syc148097 " nxge_receive_packet:" 26085165Syc148097 " channel %d" 26095165Syc148097 " RCR L4_CSUM_ERROR", channel)); 26105165Syc148097 } 26115165Syc148097 break; 26125523Syc148097 /* 26135523Syc148097 * Do not send FMA ereport for RCR_FFLP_SOFT_ERROR and 26145523Syc148097 * RCR_ZCP_SOFT_ERROR because they reflect the same 26155523Syc148097 * FFLP and ZCP errors that have been reported by 26165523Syc148097 * nxge_fflp.c and nxge_zcp.c. 26175523Syc148097 */ 26185165Syc148097 case RCR_FFLP_SOFT_ERROR: 26195165Syc148097 error_send_up = B_TRUE; 26205165Syc148097 rdc_stats->fflp_soft_err++; 26215165Syc148097 if (rdc_stats->fflp_soft_err < 26225165Syc148097 error_disp_cnt) { 26235165Syc148097 NXGE_ERROR_MSG((nxgep, 26245165Syc148097 NXGE_ERR_CTL, 26255165Syc148097 " nxge_receive_packet:" 26265165Syc148097 " channel %d" 26275165Syc148097 " RCR FFLP_SOFT_ERROR", channel)); 26285165Syc148097 } 26295165Syc148097 break; 26305165Syc148097 case RCR_ZCP_SOFT_ERROR: 26315165Syc148097 error_send_up = B_TRUE; 26325165Syc148097 rdc_stats->fflp_soft_err++; 26335165Syc148097 if (rdc_stats->zcp_soft_err < 26345165Syc148097 error_disp_cnt) 26355165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26365165Syc148097 " nxge_receive_packet: Channel %d" 26375165Syc148097 " RCR ZCP_SOFT_ERROR", channel)); 26385165Syc148097 break; 26395165Syc148097 default: 26405165Syc148097 rdc_stats->rcr_unknown_err++; 26415165Syc148097 if (rdc_stats->rcr_unknown_err 26425165Syc148097 < error_disp_cnt) { 26435165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26445165Syc148097 " nxge_receive_packet: Channel %d" 26455165Syc148097 " RCR entry 0x%llx error 0x%x", 26465165Syc148097 rcr_entry, channel, error_type)); 26475165Syc148097 } 26485165Syc148097 break; 26493859Sml29623 } 26503859Sml29623 } 26513859Sml29623 26523859Sml29623 /* 26533859Sml29623 * Update and repost buffer block if max usage 26543859Sml29623 * count is reached. 26553859Sml29623 */ 26563859Sml29623 if (error_send_up == B_FALSE) { 26574874Sml29623 atomic_inc_32(&rx_msg_p->ref_cnt); 26583859Sml29623 if (buffer_free == B_TRUE) { 26593859Sml29623 rx_msg_p->free = B_TRUE; 26603859Sml29623 } 26613859Sml29623 26623859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 26633859Sml29623 nxge_freeb(rx_msg_p); 26643859Sml29623 return; 26653859Sml29623 } 26663859Sml29623 } 26673859Sml29623 26683859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 26696929Smisaki "==> nxge_receive_packet: DMA sync second ")); 26703859Sml29623 26715165Syc148097 bytes_read = rcr_p->rcvd_pkt_bytes; 26723859Sml29623 skip_len = sw_offset_bytes + hdr_size; 26733859Sml29623 if (!rx_msg_p->rx_use_bcopy) { 26744874Sml29623 /* 26754874Sml29623 * For loaned up buffers, the driver reference count 26764874Sml29623 * will be incremented first and then the free state. 26774874Sml29623 */ 26785165Syc148097 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 26795165Syc148097 if (first_entry) { 26805165Syc148097 nmp->b_rptr = &nmp->b_rptr[skip_len]; 26815165Syc148097 if (l2_len < bsize - skip_len) { 26825165Syc148097 nmp->b_wptr = &nmp->b_rptr[l2_len]; 26835165Syc148097 } else { 26845165Syc148097 nmp->b_wptr = &nmp->b_rptr[bsize 26855165Syc148097 - skip_len]; 26865165Syc148097 } 26875165Syc148097 } else { 26885165Syc148097 if (l2_len - bytes_read < bsize) { 26895165Syc148097 nmp->b_wptr = 26905165Syc148097 &nmp->b_rptr[l2_len - bytes_read]; 26915165Syc148097 } else { 26925165Syc148097 nmp->b_wptr = &nmp->b_rptr[bsize]; 26935165Syc148097 } 26945165Syc148097 } 26955165Syc148097 } 26963859Sml29623 } else { 26975165Syc148097 if (first_entry) { 26985165Syc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 26995165Syc148097 l2_len < bsize - skip_len ? 27005165Syc148097 l2_len : bsize - skip_len); 27015165Syc148097 } else { 27025165Syc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 27035165Syc148097 l2_len - bytes_read < bsize ? 27045165Syc148097 l2_len - bytes_read : bsize); 27055165Syc148097 } 27063859Sml29623 } 27073859Sml29623 if (nmp != NULL) { 27087145Syc148097 if (first_entry) { 27097145Syc148097 /* 27107145Syc148097 * Jumbo packets may be received with more than one 27117145Syc148097 * buffer, increment ipackets for the first entry only. 27127145Syc148097 */ 27137145Syc148097 rdc_stats->ipackets++; 27147145Syc148097 27157145Syc148097 /* Update ibytes for kstat. */ 27167145Syc148097 rdc_stats->ibytes += skip_len 27177145Syc148097 + l2_len < bsize ? l2_len : bsize; 27187145Syc148097 /* 27197145Syc148097 * Update the number of bytes read so far for the 27207145Syc148097 * current frame. 27217145Syc148097 */ 27225165Syc148097 bytes_read = nmp->b_wptr - nmp->b_rptr; 27237145Syc148097 } else { 27247145Syc148097 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 27257145Syc148097 l2_len - bytes_read : bsize; 27263859Sml29623 bytes_read += nmp->b_wptr - nmp->b_rptr; 27277145Syc148097 } 27285165Syc148097 27295165Syc148097 NXGE_DEBUG_MSG((nxgep, RX_CTL, 27305165Syc148097 "==> nxge_receive_packet after dupb: " 27315165Syc148097 "rbr consumed %d " 27325165Syc148097 "pktbufsz_type %d " 27335165Syc148097 "nmp $%p rptr $%p wptr $%p " 27345165Syc148097 "buf_offset %d bzise %d l2_len %d skip_len %d", 27355165Syc148097 rx_rbr_p->rbr_consumed, 27365165Syc148097 pktbufsz_type, 27375165Syc148097 nmp, nmp->b_rptr, nmp->b_wptr, 27385165Syc148097 buf_offset, bsize, l2_len, skip_len)); 27393859Sml29623 } else { 27403859Sml29623 cmn_err(CE_WARN, "!nxge_receive_packet: " 27416929Smisaki "update stats (error)"); 27424977Sraghus atomic_inc_32(&rx_msg_p->ref_cnt); 27434977Sraghus if (buffer_free == B_TRUE) { 27444977Sraghus rx_msg_p->free = B_TRUE; 27454977Sraghus } 27464977Sraghus MUTEX_EXIT(&rx_rbr_p->lock); 27474977Sraghus nxge_freeb(rx_msg_p); 27484977Sraghus return; 27493859Sml29623 } 27505060Syc148097 27513859Sml29623 if (buffer_free == B_TRUE) { 27523859Sml29623 rx_msg_p->free = B_TRUE; 27533859Sml29623 } 27547145Syc148097 27553859Sml29623 is_valid = (nmp != NULL); 27565165Syc148097 27575165Syc148097 rcr_p->rcvd_pkt_bytes = bytes_read; 27585165Syc148097 27593859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 27603859Sml29623 27613859Sml29623 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 27623859Sml29623 atomic_inc_32(&rx_msg_p->ref_cnt); 27633859Sml29623 nxge_freeb(rx_msg_p); 27643859Sml29623 } 27653859Sml29623 27663859Sml29623 if (is_valid) { 27673859Sml29623 nmp->b_cont = NULL; 27683859Sml29623 if (first_entry) { 27693859Sml29623 *mp = nmp; 27703859Sml29623 *mp_cont = NULL; 27715165Syc148097 } else { 27723859Sml29623 *mp_cont = nmp; 27735165Syc148097 } 27743859Sml29623 } 27753859Sml29623 27763859Sml29623 /* 27777145Syc148097 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. 27787145Syc148097 * If a packet is not fragmented and no error bit is set, then 27797145Syc148097 * L4 checksum is OK. 27803859Sml29623 */ 27817145Syc148097 27823859Sml29623 if (is_valid && !multi) { 27836495Sspeer /* 27846611Sml29623 * If the checksum flag nxge_chksum_offload 27856611Sml29623 * is 1, TCP and UDP packets can be sent 27866495Sspeer * up with good checksum. If the checksum flag 27876611Sml29623 * is set to 0, checksum reporting will apply to 27886495Sspeer * TCP packets only (workaround for a hardware bug). 27896611Sml29623 * If the checksum flag nxge_cksum_offload is 27906611Sml29623 * greater than 1, both TCP and UDP packets 27916611Sml29623 * will not be reported its hardware checksum results. 27926495Sspeer */ 27936611Sml29623 if (nxge_cksum_offload == 1) { 27946495Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 27956929Smisaki pkt_type == RCR_PKT_IS_UDP) ? 27966929Smisaki B_TRUE: B_FALSE); 27976611Sml29623 } else if (!nxge_cksum_offload) { 27986495Sspeer /* TCP checksum only. */ 27996495Sspeer is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP) ? 28006929Smisaki B_TRUE: B_FALSE); 28016495Sspeer } 28023859Sml29623 28033859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 28046929Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 28056929Smisaki is_valid, multi, is_tcp_udp, frag, error_type)); 28063859Sml29623 28073859Sml29623 if (is_tcp_udp && !frag && !error_type) { 28083859Sml29623 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 28096929Smisaki HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 28103859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 28116929Smisaki "==> nxge_receive_packet: Full tcp/udp cksum " 28126929Smisaki "is_valid 0x%x multi 0x%llx pkt %d frag %d " 28136929Smisaki "error %d", 28146929Smisaki is_valid, multi, is_tcp_udp, frag, error_type)); 28153859Sml29623 } 28163859Sml29623 } 28173859Sml29623 28183859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 28196929Smisaki "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 28203859Sml29623 28213859Sml29623 *multi_p = (multi == RCR_MULTI_MASK); 28223859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 28236929Smisaki "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 28246929Smisaki *multi_p, nmp, *mp, *mp_cont)); 28253859Sml29623 } 28263859Sml29623 28278275SEric Cheng /* 28288275SEric Cheng * Enable polling for a ring. Interrupt for the ring is disabled when 28298275SEric Cheng * the nxge interrupt comes (see nxge_rx_intr). 28308275SEric Cheng */ 28318275SEric Cheng int 28328275SEric Cheng nxge_enable_poll(void *arg) 28338275SEric Cheng { 28348275SEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 28358275SEric Cheng p_rx_rcr_ring_t ringp; 28368275SEric Cheng p_nxge_t nxgep; 28378275SEric Cheng p_nxge_ldg_t ldgp; 28388275SEric Cheng uint32_t channel; 28398275SEric Cheng 28408275SEric Cheng if (ring_handle == NULL) { 28418275SEric Cheng return (0); 28428275SEric Cheng } 28438275SEric Cheng 28448275SEric Cheng nxgep = ring_handle->nxgep; 28458275SEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 28468275SEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 28478275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 28488275SEric Cheng "==> nxge_enable_poll: rdc %d ", ringp->rdc)); 28498275SEric Cheng ldgp = ringp->ldgp; 28508275SEric Cheng if (ldgp == NULL) { 28518275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 28528275SEric Cheng "==> nxge_enable_poll: rdc %d NULL ldgp: no change", 28538275SEric Cheng ringp->rdc)); 28548275SEric Cheng return (0); 28558275SEric Cheng } 28568275SEric Cheng 28578275SEric Cheng MUTEX_ENTER(&ringp->lock); 28588275SEric Cheng /* enable polling */ 28598275SEric Cheng if (ringp->poll_flag == 0) { 28608275SEric Cheng ringp->poll_flag = 1; 28618275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 28628275SEric Cheng "==> nxge_enable_poll: rdc %d set poll flag to 1", 28638275SEric Cheng ringp->rdc)); 28648275SEric Cheng } 28658275SEric Cheng 28668275SEric Cheng MUTEX_EXIT(&ringp->lock); 28678275SEric Cheng return (0); 28688275SEric Cheng } 28698275SEric Cheng /* 28708275SEric Cheng * Disable polling for a ring and enable its interrupt. 28718275SEric Cheng */ 28728275SEric Cheng int 28738275SEric Cheng nxge_disable_poll(void *arg) 28748275SEric Cheng { 28758275SEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 28768275SEric Cheng p_rx_rcr_ring_t ringp; 28778275SEric Cheng p_nxge_t nxgep; 28788275SEric Cheng uint32_t channel; 28798275SEric Cheng 28808275SEric Cheng if (ring_handle == NULL) { 28818275SEric Cheng return (0); 28828275SEric Cheng } 28838275SEric Cheng 28848275SEric Cheng nxgep = ring_handle->nxgep; 28858275SEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 28868275SEric Cheng ringp = nxgep->rx_rcr_rings->rcr_rings[channel]; 28878275SEric Cheng 28888275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 28898275SEric Cheng "==> nxge_disable_poll: rdc %d poll_flag %d", ringp->rdc)); 28908275SEric Cheng 28918275SEric Cheng MUTEX_ENTER(&ringp->lock); 28928275SEric Cheng 28938275SEric Cheng /* disable polling: enable interrupt */ 28948275SEric Cheng if (ringp->poll_flag) { 28958275SEric Cheng npi_handle_t handle; 28968275SEric Cheng rx_dma_ctl_stat_t cs; 28978275SEric Cheng uint8_t channel; 28988275SEric Cheng p_nxge_ldg_t ldgp; 28998275SEric Cheng 29008275SEric Cheng /* 29018275SEric Cheng * Get the control and status for this channel. 29028275SEric Cheng */ 29038275SEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep); 29048275SEric Cheng channel = ringp->rdc; 29058275SEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, 29068275SEric Cheng channel, &cs.value); 29078275SEric Cheng 29088275SEric Cheng /* 29098275SEric Cheng * Enable mailbox update 29108275SEric Cheng * Since packets were not read and the hardware uses 29118275SEric Cheng * bits pktread and ptrread to update the queue 29128275SEric Cheng * length, we need to set both bits to 0. 29138275SEric Cheng */ 29148275SEric Cheng cs.bits.ldw.pktread = 0; 29158275SEric Cheng cs.bits.ldw.ptrread = 0; 29168275SEric Cheng cs.bits.hdw.mex = 1; 29178275SEric Cheng RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 29188275SEric Cheng cs.value); 29198275SEric Cheng 29208275SEric Cheng /* 29218275SEric Cheng * Rearm this logical group if this is a single device 29228275SEric Cheng * group. 29238275SEric Cheng */ 29248275SEric Cheng ldgp = ringp->ldgp; 29258275SEric Cheng if (ldgp == NULL) { 29268275SEric Cheng ringp->poll_flag = 0; 29278275SEric Cheng MUTEX_EXIT(&ringp->lock); 29288275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 29298275SEric Cheng "==> nxge_disable_poll: no ldgp rdc %d " 29308275SEric Cheng "(still set poll to 0", ringp->rdc)); 29318275SEric Cheng return (0); 29328275SEric Cheng } 29338275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 29348275SEric Cheng "==> nxge_disable_poll: rdc %d ldgp $%p (enable intr)", 29358275SEric Cheng ringp->rdc, ldgp)); 29368275SEric Cheng if (ldgp->nldvs == 1) { 29378275SEric Cheng ldgimgm_t mgm; 29388275SEric Cheng mgm.value = 0; 29398275SEric Cheng mgm.bits.ldw.arm = 1; 29408275SEric Cheng mgm.bits.ldw.timer = ldgp->ldg_timer; 29418275SEric Cheng NXGE_REG_WR64(handle, 29428275SEric Cheng LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), mgm.value); 29438275SEric Cheng } 29448275SEric Cheng ringp->poll_flag = 0; 29458275SEric Cheng } 29468275SEric Cheng 29478275SEric Cheng MUTEX_EXIT(&ringp->lock); 29488275SEric Cheng return (0); 29498275SEric Cheng } 29508275SEric Cheng 29518275SEric Cheng /* 29528275SEric Cheng * Poll 'bytes_to_pickup' bytes of message from the rx ring. 29538275SEric Cheng */ 29548275SEric Cheng mblk_t * 29558275SEric Cheng nxge_rx_poll(void *arg, int bytes_to_pickup) 29568275SEric Cheng { 29578275SEric Cheng p_nxge_ring_handle_t ring_handle = (p_nxge_ring_handle_t)arg; 29588275SEric Cheng p_rx_rcr_ring_t rcr_p; 29598275SEric Cheng p_nxge_t nxgep; 29608275SEric Cheng npi_handle_t handle; 29618275SEric Cheng rx_dma_ctl_stat_t cs; 29628275SEric Cheng mblk_t *mblk; 29638275SEric Cheng p_nxge_ldv_t ldvp; 29648275SEric Cheng uint32_t channel; 29658275SEric Cheng 29668275SEric Cheng nxgep = ring_handle->nxgep; 29678275SEric Cheng 29688275SEric Cheng /* 29698275SEric Cheng * Get the control and status for this channel. 29708275SEric Cheng */ 29718275SEric Cheng handle = NXGE_DEV_NPI_HANDLE(nxgep); 29728275SEric Cheng channel = nxgep->pt_config.hw_config.start_rdc + ring_handle->index; 29738275SEric Cheng rcr_p = nxgep->rx_rcr_rings->rcr_rings[channel]; 29748275SEric Cheng MUTEX_ENTER(&rcr_p->lock); 29758275SEric Cheng ASSERT(rcr_p->poll_flag == 1); 29768275SEric Cheng 29778275SEric Cheng RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, rcr_p->rdc, &cs.value); 29788275SEric Cheng 29798275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 29808275SEric Cheng "==> nxge_rx_poll: calling nxge_rx_pkts: rdc %d poll_flag %d", 29818275SEric Cheng rcr_p->rdc, rcr_p->poll_flag)); 29828275SEric Cheng mblk = nxge_rx_pkts(nxgep, rcr_p, cs, bytes_to_pickup); 29838275SEric Cheng 29848275SEric Cheng ldvp = rcr_p->ldvp; 29858275SEric Cheng /* error events. */ 29868275SEric Cheng if (ldvp && (cs.value & RX_DMA_CTL_STAT_ERROR)) { 29878275SEric Cheng (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, cs); 29888275SEric Cheng } 29898275SEric Cheng 29908275SEric Cheng MUTEX_EXIT(&rcr_p->lock); 29918275SEric Cheng 29928275SEric Cheng NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 29938275SEric Cheng "<== nxge_rx_poll: rdc %d mblk $%p", rcr_p->rdc, mblk)); 29948275SEric Cheng return (mblk); 29958275SEric Cheng } 29968275SEric Cheng 29978275SEric Cheng 29983859Sml29623 /*ARGSUSED*/ 29993859Sml29623 static nxge_status_t 30006495Sspeer nxge_rx_err_evnts(p_nxge_t nxgep, int channel, rx_dma_ctl_stat_t cs) 30013859Sml29623 { 30023859Sml29623 p_nxge_rx_ring_stats_t rdc_stats; 30033859Sml29623 npi_handle_t handle; 30043859Sml29623 npi_status_t rs; 30053859Sml29623 boolean_t rxchan_fatal = B_FALSE; 30063859Sml29623 boolean_t rxport_fatal = B_FALSE; 30073859Sml29623 uint8_t portn; 30083859Sml29623 nxge_status_t status = NXGE_OK; 30093859Sml29623 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 30103859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 30113859Sml29623 30123859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 30133859Sml29623 portn = nxgep->mac.portnum; 30146495Sspeer rdc_stats = &nxgep->statsp->rdc_stats[channel]; 30153859Sml29623 30163859Sml29623 if (cs.bits.hdw.rbr_tmout) { 30173859Sml29623 rdc_stats->rx_rbr_tmout++; 30183859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30196929Smisaki NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 30203859Sml29623 rxchan_fatal = B_TRUE; 30213859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30226929Smisaki "==> nxge_rx_err_evnts: rx_rbr_timeout")); 30233859Sml29623 } 30243859Sml29623 if (cs.bits.hdw.rsp_cnt_err) { 30253859Sml29623 rdc_stats->rsp_cnt_err++; 30263859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30276929Smisaki NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 30283859Sml29623 rxchan_fatal = B_TRUE; 30293859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30306929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30316929Smisaki "rsp_cnt_err", channel)); 30323859Sml29623 } 30333859Sml29623 if (cs.bits.hdw.byte_en_bus) { 30343859Sml29623 rdc_stats->byte_en_bus++; 30353859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30366929Smisaki NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 30373859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30386929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30396929Smisaki "fatal error: byte_en_bus", channel)); 30403859Sml29623 rxchan_fatal = B_TRUE; 30413859Sml29623 } 30423859Sml29623 if (cs.bits.hdw.rsp_dat_err) { 30433859Sml29623 rdc_stats->rsp_dat_err++; 30443859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30456929Smisaki NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 30463859Sml29623 rxchan_fatal = B_TRUE; 30473859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30486929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30496929Smisaki "fatal error: rsp_dat_err", channel)); 30503859Sml29623 } 30513859Sml29623 if (cs.bits.hdw.rcr_ack_err) { 30523859Sml29623 rdc_stats->rcr_ack_err++; 30533859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30546929Smisaki NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 30553859Sml29623 rxchan_fatal = B_TRUE; 30563859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30576929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30586929Smisaki "fatal error: rcr_ack_err", channel)); 30593859Sml29623 } 30603859Sml29623 if (cs.bits.hdw.dc_fifo_err) { 30613859Sml29623 rdc_stats->dc_fifo_err++; 30623859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30636929Smisaki NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 30643859Sml29623 /* This is not a fatal error! */ 30653859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30666929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30676929Smisaki "dc_fifo_err", channel)); 30683859Sml29623 rxport_fatal = B_TRUE; 30693859Sml29623 } 30703859Sml29623 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 30713859Sml29623 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 30726929Smisaki &rdc_stats->errlog.pre_par, 30736929Smisaki &rdc_stats->errlog.sha_par)) 30746929Smisaki != NPI_SUCCESS) { 30753859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30766929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30776929Smisaki "rcr_sha_par: get perr", channel)); 30783859Sml29623 return (NXGE_ERROR | rs); 30793859Sml29623 } 30803859Sml29623 if (cs.bits.hdw.rcr_sha_par) { 30813859Sml29623 rdc_stats->rcr_sha_par++; 30823859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30836929Smisaki NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 30843859Sml29623 rxchan_fatal = B_TRUE; 30853859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30866929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30876929Smisaki "fatal error: rcr_sha_par", channel)); 30883859Sml29623 } 30893859Sml29623 if (cs.bits.hdw.rbr_pre_par) { 30903859Sml29623 rdc_stats->rbr_pre_par++; 30913859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 30926929Smisaki NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 30933859Sml29623 rxchan_fatal = B_TRUE; 30943859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30956929Smisaki "==> nxge_rx_err_evnts(channel %d): " 30966929Smisaki "fatal error: rbr_pre_par", channel)); 30973859Sml29623 } 30983859Sml29623 } 30996172Syc148097 /* 31006172Syc148097 * The Following 4 status bits are for information, the system 31016172Syc148097 * is running fine. There is no need to send FMA ereports or 31026172Syc148097 * log messages. 31036172Syc148097 */ 31043859Sml29623 if (cs.bits.hdw.port_drop_pkt) { 31053859Sml29623 rdc_stats->port_drop_pkt++; 31063859Sml29623 } 31073859Sml29623 if (cs.bits.hdw.wred_drop) { 31083859Sml29623 rdc_stats->wred_drop++; 31093859Sml29623 } 31103859Sml29623 if (cs.bits.hdw.rbr_pre_empty) { 31113859Sml29623 rdc_stats->rbr_pre_empty++; 31123859Sml29623 } 31133859Sml29623 if (cs.bits.hdw.rcr_shadow_full) { 31143859Sml29623 rdc_stats->rcr_shadow_full++; 31153859Sml29623 } 31163859Sml29623 if (cs.bits.hdw.config_err) { 31173859Sml29623 rdc_stats->config_err++; 31183859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31196929Smisaki NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 31203859Sml29623 rxchan_fatal = B_TRUE; 31213859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31226929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31236929Smisaki "config error", channel)); 31243859Sml29623 } 31253859Sml29623 if (cs.bits.hdw.rcrincon) { 31263859Sml29623 rdc_stats->rcrincon++; 31273859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31286929Smisaki NXGE_FM_EREPORT_RDMC_RCRINCON); 31293859Sml29623 rxchan_fatal = B_TRUE; 31303859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31316929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31326929Smisaki "fatal error: rcrincon error", channel)); 31333859Sml29623 } 31343859Sml29623 if (cs.bits.hdw.rcrfull) { 31353859Sml29623 rdc_stats->rcrfull++; 31363859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31376929Smisaki NXGE_FM_EREPORT_RDMC_RCRFULL); 31383859Sml29623 rxchan_fatal = B_TRUE; 31393859Sml29623 if (rdc_stats->rcrfull < error_disp_cnt) 31403859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31416929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31426929Smisaki "fatal error: rcrfull error", channel)); 31433859Sml29623 } 31443859Sml29623 if (cs.bits.hdw.rbr_empty) { 31456172Syc148097 /* 31466172Syc148097 * This bit is for information, there is no need 31476172Syc148097 * send FMA ereport or log a message. 31486172Syc148097 */ 31493859Sml29623 rdc_stats->rbr_empty++; 31503859Sml29623 } 31513859Sml29623 if (cs.bits.hdw.rbrfull) { 31523859Sml29623 rdc_stats->rbrfull++; 31533859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31546929Smisaki NXGE_FM_EREPORT_RDMC_RBRFULL); 31553859Sml29623 rxchan_fatal = B_TRUE; 31563859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31576929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31586929Smisaki "fatal error: rbr_full error", channel)); 31593859Sml29623 } 31603859Sml29623 if (cs.bits.hdw.rbrlogpage) { 31613859Sml29623 rdc_stats->rbrlogpage++; 31623859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31636929Smisaki NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 31643859Sml29623 rxchan_fatal = B_TRUE; 31653859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31666929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31676929Smisaki "fatal error: rbr logical page error", channel)); 31683859Sml29623 } 31693859Sml29623 if (cs.bits.hdw.cfiglogpage) { 31703859Sml29623 rdc_stats->cfiglogpage++; 31713859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 31726929Smisaki NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 31733859Sml29623 rxchan_fatal = B_TRUE; 31743859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31756929Smisaki "==> nxge_rx_err_evnts(channel %d): " 31766929Smisaki "fatal error: cfig logical page error", channel)); 31773859Sml29623 } 31783859Sml29623 31793859Sml29623 if (rxport_fatal) { 31803859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31816495Sspeer " nxge_rx_err_evnts: fatal error on Port #%d\n", 31826495Sspeer portn)); 31836495Sspeer if (isLDOMguest(nxgep)) { 31846495Sspeer status = NXGE_ERROR; 31856495Sspeer } else { 31866495Sspeer status = nxge_ipp_fatal_err_recover(nxgep); 31876495Sspeer if (status == NXGE_OK) { 31886495Sspeer FM_SERVICE_RESTORED(nxgep); 31896495Sspeer } 31903859Sml29623 } 31913859Sml29623 } 31923859Sml29623 31933859Sml29623 if (rxchan_fatal) { 31943859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31956495Sspeer " nxge_rx_err_evnts: fatal error on Channel #%d\n", 31966495Sspeer channel)); 31976495Sspeer if (isLDOMguest(nxgep)) { 31986495Sspeer status = NXGE_ERROR; 31996495Sspeer } else { 32006495Sspeer status = nxge_rxdma_fatal_err_recover(nxgep, channel); 32016495Sspeer if (status == NXGE_OK) { 32026495Sspeer FM_SERVICE_RESTORED(nxgep); 32036495Sspeer } 32043859Sml29623 } 32053859Sml29623 } 32063859Sml29623 32073859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 32083859Sml29623 32093859Sml29623 return (status); 32103859Sml29623 } 32113859Sml29623 32126495Sspeer /* 32136495Sspeer * nxge_rdc_hvio_setup 32146495Sspeer * 32156495Sspeer * This code appears to setup some Hypervisor variables. 32166495Sspeer * 32176495Sspeer * Arguments: 32186495Sspeer * nxgep 32196495Sspeer * channel 32206495Sspeer * 32216495Sspeer * Notes: 32226495Sspeer * What does NIU_LP_WORKAROUND mean? 32236495Sspeer * 32246495Sspeer * NPI/NXGE function calls: 32256495Sspeer * na 32266495Sspeer * 32276495Sspeer * Context: 32286495Sspeer * Any domain 32296495Sspeer */ 32306495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 32316495Sspeer static void 32326495Sspeer nxge_rdc_hvio_setup( 32336495Sspeer nxge_t *nxgep, int channel) 32343859Sml29623 { 32356495Sspeer nxge_dma_common_t *dma_common; 32366495Sspeer nxge_dma_common_t *dma_control; 32376495Sspeer rx_rbr_ring_t *ring; 32386495Sspeer 32396495Sspeer ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 32406495Sspeer dma_common = nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 32416495Sspeer 32426495Sspeer ring->hv_set = B_FALSE; 32436495Sspeer 32446495Sspeer ring->hv_rx_buf_base_ioaddr_pp = (uint64_t) 32456495Sspeer dma_common->orig_ioaddr_pp; 32466495Sspeer ring->hv_rx_buf_ioaddr_size = (uint64_t) 32476495Sspeer dma_common->orig_alength; 32486495Sspeer 32496495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 32506495Sspeer "channel %d data buf base io $%lx ($%p) size 0x%lx (%ld 0x%lx)", 32516495Sspeer channel, ring->hv_rx_buf_base_ioaddr_pp, 32526495Sspeer dma_common->ioaddr_pp, ring->hv_rx_buf_ioaddr_size, 32536495Sspeer dma_common->orig_alength, dma_common->orig_alength)); 32546495Sspeer 32556495Sspeer dma_control = nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 32566495Sspeer 32576495Sspeer ring->hv_rx_cntl_base_ioaddr_pp = 32586495Sspeer (uint64_t)dma_control->orig_ioaddr_pp; 32596495Sspeer ring->hv_rx_cntl_ioaddr_size = 32606495Sspeer (uint64_t)dma_control->orig_alength; 32616495Sspeer 32626495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma_channel: " 32636495Sspeer "channel %d cntl base io $%p ($%p) size 0x%llx (%d 0x%x)", 32646495Sspeer channel, ring->hv_rx_cntl_base_ioaddr_pp, 32656495Sspeer dma_control->ioaddr_pp, ring->hv_rx_cntl_ioaddr_size, 32666495Sspeer dma_control->orig_alength, dma_control->orig_alength)); 32676495Sspeer } 32683859Sml29623 #endif 32693859Sml29623 32706495Sspeer /* 32716495Sspeer * nxge_map_rxdma 32726495Sspeer * 32736495Sspeer * Map an RDC into our kernel space. 32746495Sspeer * 32756495Sspeer * Arguments: 32766495Sspeer * nxgep 32776495Sspeer * channel The channel to map. 32786495Sspeer * 32796495Sspeer * Notes: 32806495Sspeer * 1. Allocate & initialise a memory pool, if necessary. 32816495Sspeer * 2. Allocate however many receive buffers are required. 32826495Sspeer * 3. Setup buffers, descriptors, and mailbox. 32836495Sspeer * 32846495Sspeer * NPI/NXGE function calls: 32856495Sspeer * nxge_alloc_rx_mem_pool() 32866495Sspeer * nxge_alloc_rbb() 32876495Sspeer * nxge_map_rxdma_channel() 32886495Sspeer * 32896495Sspeer * Registers accessed: 32906495Sspeer * 32916495Sspeer * Context: 32926495Sspeer * Any domain 32936495Sspeer */ 32946495Sspeer static nxge_status_t 32956495Sspeer nxge_map_rxdma(p_nxge_t nxgep, int channel) 32966495Sspeer { 32976495Sspeer nxge_dma_common_t **data; 32986495Sspeer nxge_dma_common_t **control; 32996495Sspeer rx_rbr_ring_t **rbr_ring; 33006495Sspeer rx_rcr_ring_t **rcr_ring; 33016495Sspeer rx_mbox_t **mailbox; 33026495Sspeer uint32_t chunks; 33036495Sspeer 33046495Sspeer nxge_status_t status; 33056495Sspeer 33063859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 33073859Sml29623 33086495Sspeer if (!nxgep->rx_buf_pool_p) { 33096495Sspeer if (nxge_alloc_rx_mem_pool(nxgep) != NXGE_OK) { 33106495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33116495Sspeer "<== nxge_map_rxdma: buf not allocated")); 33126495Sspeer return (NXGE_ERROR); 33136495Sspeer } 33143859Sml29623 } 33153859Sml29623 33166495Sspeer if (nxge_alloc_rxb(nxgep, channel) != NXGE_OK) 33176495Sspeer return (NXGE_ERROR); 33183859Sml29623 33193859Sml29623 /* 33203859Sml29623 * Timeout should be set based on the system clock divider. 33213859Sml29623 * The following timeout value of 1 assumes that the 33223859Sml29623 * granularity (1000) is 3 microseconds running at 300MHz. 33233859Sml29623 */ 33243859Sml29623 33253859Sml29623 nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 33263859Sml29623 nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 33273859Sml29623 33283859Sml29623 /* 33296495Sspeer * Map descriptors from the buffer polls for each dma channel. 33306495Sspeer */ 33316495Sspeer 33326495Sspeer /* 33336495Sspeer * Set up and prepare buffer blocks, descriptors 33346495Sspeer * and mailbox. 33353859Sml29623 */ 33366495Sspeer data = &nxgep->rx_buf_pool_p->dma_buf_pool_p[channel]; 33376495Sspeer rbr_ring = &nxgep->rx_rbr_rings->rbr_rings[channel]; 33386495Sspeer chunks = nxgep->rx_buf_pool_p->num_chunks[channel]; 33396495Sspeer 33406495Sspeer control = &nxgep->rx_cntl_pool_p->dma_buf_pool_p[channel]; 33416495Sspeer rcr_ring = &nxgep->rx_rcr_rings->rcr_rings[channel]; 33426495Sspeer 33436495Sspeer mailbox = &nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 33446495Sspeer 33456495Sspeer status = nxge_map_rxdma_channel(nxgep, channel, data, rbr_ring, 33466495Sspeer chunks, control, rcr_ring, mailbox); 33476495Sspeer if (status != NXGE_OK) { 33486495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 33496929Smisaki "==> nxge_map_rxdma: nxge_map_rxdma_channel(%d) " 33506929Smisaki "returned 0x%x", 33516929Smisaki channel, status)); 33526495Sspeer return (status); 33536495Sspeer } 33546495Sspeer nxgep->rx_rbr_rings->rbr_rings[channel]->index = (uint16_t)channel; 33556495Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->index = (uint16_t)channel; 33566495Sspeer nxgep->rx_rcr_rings->rcr_rings[channel]->rdc_stats = 33576495Sspeer &nxgep->statsp->rdc_stats[channel]; 33583859Sml29623 33593859Sml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 33606495Sspeer if (!isLDOMguest(nxgep)) 33616495Sspeer nxge_rdc_hvio_setup(nxgep, channel); 33626495Sspeer #endif 33636495Sspeer 33643859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 33656495Sspeer "<== nxge_map_rxdma: (status 0x%x channel %d)", status, channel)); 33663859Sml29623 33673859Sml29623 return (status); 33683859Sml29623 } 33693859Sml29623 33703859Sml29623 static void 33716495Sspeer nxge_unmap_rxdma(p_nxge_t nxgep, int channel) 33723859Sml29623 { 33736495Sspeer rx_rbr_ring_t *rbr_ring; 33746495Sspeer rx_rcr_ring_t *rcr_ring; 33756495Sspeer rx_mbox_t *mailbox; 33766495Sspeer 33776495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma(%d)", channel)); 33786495Sspeer 33796495Sspeer if (!nxgep->rx_rbr_rings || !nxgep->rx_rcr_rings || 33806495Sspeer !nxgep->rx_mbox_areas_p) 33813859Sml29623 return; 33826495Sspeer 33836495Sspeer rbr_ring = nxgep->rx_rbr_rings->rbr_rings[channel]; 33846495Sspeer rcr_ring = nxgep->rx_rcr_rings->rcr_rings[channel]; 33856495Sspeer mailbox = nxgep->rx_mbox_areas_p->rxmbox_areas[channel]; 33866495Sspeer 33876495Sspeer if (!rbr_ring || !rcr_ring || !mailbox) 33883859Sml29623 return; 33896495Sspeer 33906495Sspeer (void) nxge_unmap_rxdma_channel( 33916929Smisaki nxgep, channel, rbr_ring, rcr_ring, mailbox); 33926495Sspeer 33936495Sspeer nxge_free_rxb(nxgep, channel); 33946495Sspeer 33956495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma")); 33963859Sml29623 } 33973859Sml29623 33983859Sml29623 nxge_status_t 33993859Sml29623 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 34003859Sml29623 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 34013859Sml29623 uint32_t num_chunks, 34023859Sml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 34033859Sml29623 p_rx_mbox_t *rx_mbox_p) 34043859Sml29623 { 34053859Sml29623 int status = NXGE_OK; 34063859Sml29623 34073859Sml29623 /* 34083859Sml29623 * Set up and prepare buffer blocks, descriptors 34093859Sml29623 * and mailbox. 34103859Sml29623 */ 34113859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34126929Smisaki "==> nxge_map_rxdma_channel (channel %d)", channel)); 34133859Sml29623 /* 34143859Sml29623 * Receive buffer blocks 34153859Sml29623 */ 34163859Sml29623 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 34176929Smisaki dma_buf_p, rbr_p, num_chunks); 34183859Sml29623 if (status != NXGE_OK) { 34193859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34206929Smisaki "==> nxge_map_rxdma_channel (channel %d): " 34216929Smisaki "map buffer failed 0x%x", channel, status)); 34223859Sml29623 goto nxge_map_rxdma_channel_exit; 34233859Sml29623 } 34243859Sml29623 34253859Sml29623 /* 34263859Sml29623 * Receive block ring, completion ring and mailbox. 34273859Sml29623 */ 34283859Sml29623 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 34296929Smisaki dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 34303859Sml29623 if (status != NXGE_OK) { 34313859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34326929Smisaki "==> nxge_map_rxdma_channel (channel %d): " 34336929Smisaki "map config failed 0x%x", channel, status)); 34343859Sml29623 goto nxge_map_rxdma_channel_fail2; 34353859Sml29623 } 34363859Sml29623 34373859Sml29623 goto nxge_map_rxdma_channel_exit; 34383859Sml29623 34393859Sml29623 nxge_map_rxdma_channel_fail3: 34403859Sml29623 /* Free rbr, rcr */ 34413859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34426929Smisaki "==> nxge_map_rxdma_channel: free rbr/rcr " 34436929Smisaki "(status 0x%x channel %d)", 34446929Smisaki status, channel)); 34453859Sml29623 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 34466929Smisaki *rcr_p, *rx_mbox_p); 34473859Sml29623 34483859Sml29623 nxge_map_rxdma_channel_fail2: 34493859Sml29623 /* Free buffer blocks */ 34503859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34516929Smisaki "==> nxge_map_rxdma_channel: free rx buffers" 34526929Smisaki "(nxgep 0x%x status 0x%x channel %d)", 34536929Smisaki nxgep, status, channel)); 34543859Sml29623 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 34553859Sml29623 34564185Sspeer status = NXGE_ERROR; 34574185Sspeer 34583859Sml29623 nxge_map_rxdma_channel_exit: 34593859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34606929Smisaki "<== nxge_map_rxdma_channel: " 34616929Smisaki "(nxgep 0x%x status 0x%x channel %d)", 34626929Smisaki nxgep, status, channel)); 34633859Sml29623 34643859Sml29623 return (status); 34653859Sml29623 } 34663859Sml29623 34673859Sml29623 /*ARGSUSED*/ 34683859Sml29623 static void 34693859Sml29623 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 34703859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 34713859Sml29623 { 34723859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34736929Smisaki "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 34743859Sml29623 34753859Sml29623 /* 34763859Sml29623 * unmap receive block ring, completion ring and mailbox. 34773859Sml29623 */ 34783859Sml29623 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 34796929Smisaki rcr_p, rx_mbox_p); 34803859Sml29623 34813859Sml29623 /* unmap buffer blocks */ 34823859Sml29623 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 34833859Sml29623 34843859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 34853859Sml29623 } 34863859Sml29623 34873859Sml29623 /*ARGSUSED*/ 34883859Sml29623 static nxge_status_t 34893859Sml29623 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 34903859Sml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 34913859Sml29623 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 34923859Sml29623 { 34933859Sml29623 p_rx_rbr_ring_t rbrp; 34943859Sml29623 p_rx_rcr_ring_t rcrp; 34953859Sml29623 p_rx_mbox_t mboxp; 34963859Sml29623 p_nxge_dma_common_t cntl_dmap; 34973859Sml29623 p_nxge_dma_common_t dmap; 34983859Sml29623 p_rx_msg_t *rx_msg_ring; 34993859Sml29623 p_rx_msg_t rx_msg_p; 35003859Sml29623 p_rbr_cfig_a_t rcfga_p; 35013859Sml29623 p_rbr_cfig_b_t rcfgb_p; 35023859Sml29623 p_rcrcfig_a_t cfga_p; 35033859Sml29623 p_rcrcfig_b_t cfgb_p; 35043859Sml29623 p_rxdma_cfig1_t cfig1_p; 35053859Sml29623 p_rxdma_cfig2_t cfig2_p; 35063859Sml29623 p_rbr_kick_t kick_p; 35073859Sml29623 uint32_t dmaaddrp; 35083859Sml29623 uint32_t *rbr_vaddrp; 35093859Sml29623 uint32_t bkaddr; 35103859Sml29623 nxge_status_t status = NXGE_OK; 35113859Sml29623 int i; 35123859Sml29623 uint32_t nxge_port_rcr_size; 35133859Sml29623 35143859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35156929Smisaki "==> nxge_map_rxdma_channel_cfg_ring")); 35163859Sml29623 35173859Sml29623 cntl_dmap = *dma_cntl_p; 35183859Sml29623 35193859Sml29623 /* Map in the receive block ring */ 35203859Sml29623 rbrp = *rbr_p; 35213859Sml29623 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 35223859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 35233859Sml29623 /* 35243859Sml29623 * Zero out buffer block ring descriptors. 35253859Sml29623 */ 35263859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 35273859Sml29623 35283859Sml29623 rcfga_p = &(rbrp->rbr_cfga); 35293859Sml29623 rcfgb_p = &(rbrp->rbr_cfgb); 35303859Sml29623 kick_p = &(rbrp->rbr_kick); 35313859Sml29623 rcfga_p->value = 0; 35323859Sml29623 rcfgb_p->value = 0; 35333859Sml29623 kick_p->value = 0; 35343859Sml29623 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 35353859Sml29623 rcfga_p->value = (rbrp->rbr_addr & 35366929Smisaki (RBR_CFIG_A_STDADDR_MASK | 35376929Smisaki RBR_CFIG_A_STDADDR_BASE_MASK)); 35383859Sml29623 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 35393859Sml29623 35403859Sml29623 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 35413859Sml29623 rcfgb_p->bits.ldw.vld0 = 1; 35423859Sml29623 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 35433859Sml29623 rcfgb_p->bits.ldw.vld1 = 1; 35443859Sml29623 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 35453859Sml29623 rcfgb_p->bits.ldw.vld2 = 1; 35463859Sml29623 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 35473859Sml29623 35483859Sml29623 /* 35493859Sml29623 * For each buffer block, enter receive block address to the ring. 35503859Sml29623 */ 35513859Sml29623 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 35523859Sml29623 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 35533859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35546929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 35556929Smisaki "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 35563859Sml29623 35573859Sml29623 rx_msg_ring = rbrp->rx_msg_ring; 35583859Sml29623 for (i = 0; i < rbrp->tnblocks; i++) { 35593859Sml29623 rx_msg_p = rx_msg_ring[i]; 35603859Sml29623 rx_msg_p->nxgep = nxgep; 35613859Sml29623 rx_msg_p->rx_rbr_p = rbrp; 35623859Sml29623 bkaddr = (uint32_t) 35636929Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 35646929Smisaki >> RBR_BKADDR_SHIFT)); 35653859Sml29623 rx_msg_p->free = B_FALSE; 35663859Sml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe; 35673859Sml29623 35683859Sml29623 *rbr_vaddrp++ = bkaddr; 35693859Sml29623 } 35703859Sml29623 35713859Sml29623 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 35723859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 35733859Sml29623 35743859Sml29623 rbrp->rbr_rd_index = 0; 35753859Sml29623 35763859Sml29623 rbrp->rbr_consumed = 0; 35773859Sml29623 rbrp->rbr_use_bcopy = B_TRUE; 35783859Sml29623 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 35793859Sml29623 /* 35803859Sml29623 * Do bcopy on packets greater than bcopy size once 35813859Sml29623 * the lo threshold is reached. 35823859Sml29623 * This lo threshold should be less than the hi threshold. 35833859Sml29623 * 35843859Sml29623 * Do bcopy on every packet once the hi threshold is reached. 35853859Sml29623 */ 35863859Sml29623 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 35873859Sml29623 /* default it to use hi */ 35883859Sml29623 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 35893859Sml29623 } 35903859Sml29623 35913859Sml29623 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 35923859Sml29623 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 35933859Sml29623 } 35943859Sml29623 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 35953859Sml29623 35963859Sml29623 switch (nxge_rx_threshold_hi) { 35973859Sml29623 default: 35983859Sml29623 case NXGE_RX_COPY_NONE: 35993859Sml29623 /* Do not do bcopy at all */ 36003859Sml29623 rbrp->rbr_use_bcopy = B_FALSE; 36013859Sml29623 rbrp->rbr_threshold_hi = rbrp->rbb_max; 36023859Sml29623 break; 36033859Sml29623 36043859Sml29623 case NXGE_RX_COPY_1: 36053859Sml29623 case NXGE_RX_COPY_2: 36063859Sml29623 case NXGE_RX_COPY_3: 36073859Sml29623 case NXGE_RX_COPY_4: 36083859Sml29623 case NXGE_RX_COPY_5: 36093859Sml29623 case NXGE_RX_COPY_6: 36103859Sml29623 case NXGE_RX_COPY_7: 36113859Sml29623 rbrp->rbr_threshold_hi = 36126929Smisaki rbrp->rbb_max * 36136929Smisaki (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 36143859Sml29623 break; 36153859Sml29623 36163859Sml29623 case NXGE_RX_COPY_ALL: 36173859Sml29623 rbrp->rbr_threshold_hi = 0; 36183859Sml29623 break; 36193859Sml29623 } 36203859Sml29623 36213859Sml29623 switch (nxge_rx_threshold_lo) { 36223859Sml29623 default: 36233859Sml29623 case NXGE_RX_COPY_NONE: 36243859Sml29623 /* Do not do bcopy at all */ 36253859Sml29623 if (rbrp->rbr_use_bcopy) { 36263859Sml29623 rbrp->rbr_use_bcopy = B_FALSE; 36273859Sml29623 } 36283859Sml29623 rbrp->rbr_threshold_lo = rbrp->rbb_max; 36293859Sml29623 break; 36303859Sml29623 36313859Sml29623 case NXGE_RX_COPY_1: 36323859Sml29623 case NXGE_RX_COPY_2: 36333859Sml29623 case NXGE_RX_COPY_3: 36343859Sml29623 case NXGE_RX_COPY_4: 36353859Sml29623 case NXGE_RX_COPY_5: 36363859Sml29623 case NXGE_RX_COPY_6: 36373859Sml29623 case NXGE_RX_COPY_7: 36383859Sml29623 rbrp->rbr_threshold_lo = 36396929Smisaki rbrp->rbb_max * 36406929Smisaki (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 36413859Sml29623 break; 36423859Sml29623 36433859Sml29623 case NXGE_RX_COPY_ALL: 36443859Sml29623 rbrp->rbr_threshold_lo = 0; 36453859Sml29623 break; 36463859Sml29623 } 36473859Sml29623 36483859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 36496929Smisaki "nxge_map_rxdma_channel_cfg_ring: channel %d " 36506929Smisaki "rbb_max %d " 36516929Smisaki "rbrp->rbr_bufsize_type %d " 36526929Smisaki "rbb_threshold_hi %d " 36536929Smisaki "rbb_threshold_lo %d", 36546929Smisaki dma_channel, 36556929Smisaki rbrp->rbb_max, 36566929Smisaki rbrp->rbr_bufsize_type, 36576929Smisaki rbrp->rbr_threshold_hi, 36586929Smisaki rbrp->rbr_threshold_lo)); 36593859Sml29623 36603859Sml29623 rbrp->page_valid.value = 0; 36613859Sml29623 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 36623859Sml29623 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 36633859Sml29623 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 36643859Sml29623 rbrp->page_hdl.value = 0; 36653859Sml29623 36663859Sml29623 rbrp->page_valid.bits.ldw.page0 = 1; 36673859Sml29623 rbrp->page_valid.bits.ldw.page1 = 1; 36683859Sml29623 36693859Sml29623 /* Map in the receive completion ring */ 36703859Sml29623 rcrp = (p_rx_rcr_ring_t) 36716929Smisaki KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 36723859Sml29623 rcrp->rdc = dma_channel; 36733859Sml29623 36743859Sml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 36753859Sml29623 rcrp->comp_size = nxge_port_rcr_size; 36763859Sml29623 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 36773859Sml29623 36783859Sml29623 rcrp->max_receive_pkts = nxge_max_rx_pkts; 36793859Sml29623 36803859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 36813859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 36826929Smisaki sizeof (rcr_entry_t)); 36833859Sml29623 rcrp->comp_rd_index = 0; 36843859Sml29623 rcrp->comp_wt_index = 0; 36853859Sml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 36866929Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 36875125Sjoycey #if defined(__i386) 36886929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 36896929Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 36905125Sjoycey #else 36916929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 36926929Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 36935125Sjoycey #endif 36943859Sml29623 36953859Sml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 36966929Smisaki (nxge_port_rcr_size - 1); 36973859Sml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 36986929Smisaki (nxge_port_rcr_size - 1); 36993859Sml29623 37003859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37016929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 37026929Smisaki "channel %d " 37036929Smisaki "rbr_vaddrp $%p " 37046929Smisaki "rcr_desc_rd_head_p $%p " 37056929Smisaki "rcr_desc_rd_head_pp $%p " 37066929Smisaki "rcr_desc_rd_last_p $%p " 37076929Smisaki "rcr_desc_rd_last_pp $%p ", 37086929Smisaki dma_channel, 37096929Smisaki rbr_vaddrp, 37106929Smisaki rcrp->rcr_desc_rd_head_p, 37116929Smisaki rcrp->rcr_desc_rd_head_pp, 37126929Smisaki rcrp->rcr_desc_last_p, 37136929Smisaki rcrp->rcr_desc_last_pp)); 37143859Sml29623 37153859Sml29623 /* 37163859Sml29623 * Zero out buffer block ring descriptors. 37173859Sml29623 */ 37183859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 37193859Sml29623 rcrp->intr_timeout = nxgep->intr_timeout; 37203859Sml29623 rcrp->intr_threshold = nxgep->intr_threshold; 37213859Sml29623 rcrp->full_hdr_flag = B_FALSE; 37223859Sml29623 rcrp->sw_priv_hdr_len = 0; 37233859Sml29623 37243859Sml29623 cfga_p = &(rcrp->rcr_cfga); 37253859Sml29623 cfgb_p = &(rcrp->rcr_cfgb); 37263859Sml29623 cfga_p->value = 0; 37273859Sml29623 cfgb_p->value = 0; 37283859Sml29623 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 37293859Sml29623 cfga_p->value = (rcrp->rcr_addr & 37306929Smisaki (RCRCFIG_A_STADDR_MASK | 37316929Smisaki RCRCFIG_A_STADDR_BASE_MASK)); 37323859Sml29623 37333859Sml29623 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 37346929Smisaki RCRCFIG_A_LEN_SHIF); 37353859Sml29623 37363859Sml29623 /* 37373859Sml29623 * Timeout should be set based on the system clock divider. 37383859Sml29623 * The following timeout value of 1 assumes that the 37393859Sml29623 * granularity (1000) is 3 microseconds running at 300MHz. 37403859Sml29623 */ 37413859Sml29623 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 37423859Sml29623 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 37433859Sml29623 cfgb_p->bits.ldw.entout = 1; 37443859Sml29623 37453859Sml29623 /* Map in the mailbox */ 37463859Sml29623 mboxp = (p_rx_mbox_t) 37476929Smisaki KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 37483859Sml29623 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 37493859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 37503859Sml29623 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 37513859Sml29623 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 37523859Sml29623 cfig1_p->value = cfig2_p->value = 0; 37533859Sml29623 37543859Sml29623 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 37553859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37566929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 37576929Smisaki "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 37586929Smisaki dma_channel, cfig1_p->value, cfig2_p->value, 37596929Smisaki mboxp->mbox_addr)); 37603859Sml29623 37613859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 37626929Smisaki & 0xfff); 37633859Sml29623 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 37643859Sml29623 37653859Sml29623 37663859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 37673859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 37686929Smisaki RXDMA_CFIG2_MBADDR_L_MASK); 37693859Sml29623 37703859Sml29623 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 37713859Sml29623 37723859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37736929Smisaki "==> nxge_map_rxdma_channel_cfg_ring: " 37746929Smisaki "channel %d damaddrp $%p " 37756929Smisaki "cfg1 0x%016llx cfig2 0x%016llx", 37766929Smisaki dma_channel, dmaaddrp, 37776929Smisaki cfig1_p->value, cfig2_p->value)); 37783859Sml29623 37793859Sml29623 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 37803859Sml29623 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 37813859Sml29623 37823859Sml29623 rbrp->rx_rcr_p = rcrp; 37833859Sml29623 rcrp->rx_rbr_p = rbrp; 37843859Sml29623 *rcr_p = rcrp; 37853859Sml29623 *rx_mbox_p = mboxp; 37863859Sml29623 37873859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37886929Smisaki "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 37893859Sml29623 37903859Sml29623 return (status); 37913859Sml29623 } 37923859Sml29623 37933859Sml29623 /*ARGSUSED*/ 37943859Sml29623 static void 37953859Sml29623 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 37963859Sml29623 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 37973859Sml29623 { 37983859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37996929Smisaki "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 38006929Smisaki rcr_p->rdc)); 38013859Sml29623 38023859Sml29623 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 38033859Sml29623 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 38043859Sml29623 38053859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38066929Smisaki "<== nxge_unmap_rxdma_channel_cfg_ring")); 38073859Sml29623 } 38083859Sml29623 38093859Sml29623 static nxge_status_t 38103859Sml29623 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 38113859Sml29623 p_nxge_dma_common_t *dma_buf_p, 38123859Sml29623 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 38133859Sml29623 { 38143859Sml29623 p_rx_rbr_ring_t rbrp; 38153859Sml29623 p_nxge_dma_common_t dma_bufp, tmp_bufp; 38163859Sml29623 p_rx_msg_t *rx_msg_ring; 38173859Sml29623 p_rx_msg_t rx_msg_p; 38183859Sml29623 p_mblk_t mblk_p; 38193859Sml29623 38203859Sml29623 rxring_info_t *ring_info; 38213859Sml29623 nxge_status_t status = NXGE_OK; 38223859Sml29623 int i, j, index; 38233859Sml29623 uint32_t size, bsize, nblocks, nmsgs; 38243859Sml29623 38253859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38266929Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d", 38276929Smisaki channel)); 38283859Sml29623 38293859Sml29623 dma_bufp = tmp_bufp = *dma_buf_p; 38303859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38316929Smisaki " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 38326929Smisaki "chunks bufp 0x%016llx", 38336929Smisaki channel, num_chunks, dma_bufp)); 38343859Sml29623 38353859Sml29623 nmsgs = 0; 38363859Sml29623 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 38373859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38386929Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d " 38396929Smisaki "bufp 0x%016llx nblocks %d nmsgs %d", 38406929Smisaki channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 38413859Sml29623 nmsgs += tmp_bufp->nblocks; 38423859Sml29623 } 38433859Sml29623 if (!nmsgs) { 38444185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 38456929Smisaki "<== nxge_map_rxdma_channel_buf_ring: channel %d " 38466929Smisaki "no msg blocks", 38476929Smisaki channel)); 38483859Sml29623 status = NXGE_ERROR; 38493859Sml29623 goto nxge_map_rxdma_channel_buf_ring_exit; 38503859Sml29623 } 38513859Sml29623 38525170Stm144005 rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (*rbrp), KM_SLEEP); 38533859Sml29623 38543859Sml29623 size = nmsgs * sizeof (p_rx_msg_t); 38553859Sml29623 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 38563859Sml29623 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 38576929Smisaki KM_SLEEP); 38583859Sml29623 38593859Sml29623 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 38606929Smisaki (void *)nxgep->interrupt_cookie); 38613859Sml29623 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 38626929Smisaki (void *)nxgep->interrupt_cookie); 38633859Sml29623 rbrp->rdc = channel; 38643859Sml29623 rbrp->num_blocks = num_chunks; 38653859Sml29623 rbrp->tnblocks = nmsgs; 38663859Sml29623 rbrp->rbb_max = nmsgs; 38673859Sml29623 rbrp->rbr_max_size = nmsgs; 38683859Sml29623 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 38693859Sml29623 38703859Sml29623 /* 38713859Sml29623 * Buffer sizes suggested by NIU architect. 38723859Sml29623 * 256, 512 and 2K. 38733859Sml29623 */ 38743859Sml29623 38753859Sml29623 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 38763859Sml29623 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 38773859Sml29623 rbrp->npi_pkt_buf_size0 = SIZE_256B; 38783859Sml29623 38793859Sml29623 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 38803859Sml29623 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 38813859Sml29623 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 38823859Sml29623 38833859Sml29623 rbrp->block_size = nxgep->rx_default_block_size; 38843859Sml29623 38853859Sml29623 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 38863859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 38873859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 38883859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 38893859Sml29623 } else { 38903859Sml29623 if (rbrp->block_size >= 0x2000) { 38913859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 38923859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 38933859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 38943859Sml29623 } else { 38953859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 38963859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 38973859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 38983859Sml29623 } 38993859Sml29623 } 39003859Sml29623 39013859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39026929Smisaki "==> nxge_map_rxdma_channel_buf_ring: channel %d " 39036929Smisaki "actual rbr max %d rbb_max %d nmsgs %d " 39046929Smisaki "rbrp->block_size %d default_block_size %d " 39056929Smisaki "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 39066929Smisaki channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 39076929Smisaki rbrp->block_size, nxgep->rx_default_block_size, 39086929Smisaki nxge_rbr_size, nxge_rbr_spare_size)); 39093859Sml29623 39103859Sml29623 /* Map in buffers from the buffer pool. */ 39113859Sml29623 index = 0; 39123859Sml29623 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 39133859Sml29623 bsize = dma_bufp->block_size; 39143859Sml29623 nblocks = dma_bufp->nblocks; 39155125Sjoycey #if defined(__i386) 39165125Sjoycey ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 39175125Sjoycey #else 39183859Sml29623 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 39195125Sjoycey #endif 39203859Sml29623 ring_info->buffer[i].buf_index = i; 39213859Sml29623 ring_info->buffer[i].buf_size = dma_bufp->alength; 39223859Sml29623 ring_info->buffer[i].start_index = index; 39235125Sjoycey #if defined(__i386) 39245125Sjoycey ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 39255125Sjoycey #else 39263859Sml29623 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 39275125Sjoycey #endif 39283859Sml29623 39293859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39306929Smisaki " nxge_map_rxdma_channel_buf_ring: map channel %d " 39316929Smisaki "chunk %d" 39326929Smisaki " nblocks %d chunk_size %x block_size 0x%x " 39336929Smisaki "dma_bufp $%p", channel, i, 39346929Smisaki dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 39356929Smisaki dma_bufp)); 39363859Sml29623 39373859Sml29623 for (j = 0; j < nblocks; j++) { 39383859Sml29623 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 39396929Smisaki dma_bufp)) == NULL) { 39404185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 39416929Smisaki "allocb failed (index %d i %d j %d)", 39426929Smisaki index, i, j)); 39434185Sspeer goto nxge_map_rxdma_channel_buf_ring_fail1; 39443859Sml29623 } 39453859Sml29623 rx_msg_ring[index] = rx_msg_p; 39463859Sml29623 rx_msg_p->block_index = index; 39473859Sml29623 rx_msg_p->shifted_addr = (uint32_t) 39486929Smisaki ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 39496929Smisaki RBR_BKADDR_SHIFT)); 39503859Sml29623 39513859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39526929Smisaki "index %d j %d rx_msg_p $%p mblk %p", 39536929Smisaki index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 39543859Sml29623 39553859Sml29623 mblk_p = rx_msg_p->rx_mblk_p; 39563859Sml29623 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 39575170Stm144005 39585170Stm144005 rbrp->rbr_ref_cnt++; 39593859Sml29623 index++; 39603859Sml29623 rx_msg_p->buf_dma.dma_channel = channel; 39613859Sml29623 } 39626495Sspeer 39636495Sspeer rbrp->rbr_alloc_type = DDI_MEM_ALLOC; 39646495Sspeer if (dma_bufp->contig_alloc_type) { 39656495Sspeer rbrp->rbr_alloc_type = CONTIG_MEM_ALLOC; 39666495Sspeer } 39676495Sspeer 39686495Sspeer if (dma_bufp->kmem_alloc_type) { 39696495Sspeer rbrp->rbr_alloc_type = KMEM_ALLOC; 39706495Sspeer } 39716495Sspeer 39726495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39736495Sspeer " nxge_map_rxdma_channel_buf_ring: map channel %d " 39746495Sspeer "chunk %d" 39756495Sspeer " nblocks %d chunk_size %x block_size 0x%x " 39766495Sspeer "dma_bufp $%p", 39776495Sspeer channel, i, 39786495Sspeer dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 39796495Sspeer dma_bufp)); 39803859Sml29623 } 39813859Sml29623 if (i < rbrp->num_blocks) { 39823859Sml29623 goto nxge_map_rxdma_channel_buf_ring_fail1; 39833859Sml29623 } 39843859Sml29623 39853859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39866929Smisaki "nxge_map_rxdma_channel_buf_ring: done buf init " 39876929Smisaki "channel %d msg block entries %d", 39886929Smisaki channel, index)); 39893859Sml29623 ring_info->block_size_mask = bsize - 1; 39903859Sml29623 rbrp->rx_msg_ring = rx_msg_ring; 39913859Sml29623 rbrp->dma_bufp = dma_buf_p; 39923859Sml29623 rbrp->ring_info = ring_info; 39933859Sml29623 39943859Sml29623 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 39953859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39966929Smisaki " nxge_map_rxdma_channel_buf_ring: " 39976929Smisaki "channel %d done buf info init", channel)); 39983859Sml29623 39995170Stm144005 /* 40005170Stm144005 * Finally, permit nxge_freeb() to call nxge_post_page(). 40015170Stm144005 */ 40025170Stm144005 rbrp->rbr_state = RBR_POSTING; 40035170Stm144005 40043859Sml29623 *rbr_p = rbrp; 40053859Sml29623 goto nxge_map_rxdma_channel_buf_ring_exit; 40063859Sml29623 40073859Sml29623 nxge_map_rxdma_channel_buf_ring_fail1: 40083859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40096929Smisaki " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 40106929Smisaki channel, status)); 40113859Sml29623 40123859Sml29623 index--; 40133859Sml29623 for (; index >= 0; index--) { 40143859Sml29623 rx_msg_p = rx_msg_ring[index]; 40153859Sml29623 if (rx_msg_p != NULL) { 40163859Sml29623 freeb(rx_msg_p->rx_mblk_p); 40173859Sml29623 rx_msg_ring[index] = NULL; 40183859Sml29623 } 40193859Sml29623 } 40203859Sml29623 nxge_map_rxdma_channel_buf_ring_fail: 40213859Sml29623 MUTEX_DESTROY(&rbrp->post_lock); 40223859Sml29623 MUTEX_DESTROY(&rbrp->lock); 40233859Sml29623 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 40243859Sml29623 KMEM_FREE(rx_msg_ring, size); 40253859Sml29623 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 40263859Sml29623 40274185Sspeer status = NXGE_ERROR; 40284185Sspeer 40293859Sml29623 nxge_map_rxdma_channel_buf_ring_exit: 40303859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40316929Smisaki "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 40323859Sml29623 40333859Sml29623 return (status); 40343859Sml29623 } 40353859Sml29623 40363859Sml29623 /*ARGSUSED*/ 40373859Sml29623 static void 40383859Sml29623 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 40393859Sml29623 p_rx_rbr_ring_t rbr_p) 40403859Sml29623 { 40413859Sml29623 p_rx_msg_t *rx_msg_ring; 40423859Sml29623 p_rx_msg_t rx_msg_p; 40433859Sml29623 rxring_info_t *ring_info; 40443859Sml29623 int i; 40453859Sml29623 uint32_t size; 40463859Sml29623 #ifdef NXGE_DEBUG 40473859Sml29623 int num_chunks; 40483859Sml29623 #endif 40493859Sml29623 40503859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40516929Smisaki "==> nxge_unmap_rxdma_channel_buf_ring")); 40523859Sml29623 if (rbr_p == NULL) { 40533859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 40546929Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 40553859Sml29623 return; 40563859Sml29623 } 40573859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40586929Smisaki "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 40596929Smisaki rbr_p->rdc)); 40603859Sml29623 40613859Sml29623 rx_msg_ring = rbr_p->rx_msg_ring; 40623859Sml29623 ring_info = rbr_p->ring_info; 40633859Sml29623 40643859Sml29623 if (rx_msg_ring == NULL || ring_info == NULL) { 40656929Smisaki NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40666929Smisaki "<== nxge_unmap_rxdma_channel_buf_ring: " 40676929Smisaki "rx_msg_ring $%p ring_info $%p", 40686929Smisaki rx_msg_p, ring_info)); 40693859Sml29623 return; 40703859Sml29623 } 40713859Sml29623 40723859Sml29623 #ifdef NXGE_DEBUG 40733859Sml29623 num_chunks = rbr_p->num_blocks; 40743859Sml29623 #endif 40753859Sml29623 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 40763859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40776929Smisaki " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 40786929Smisaki "tnblocks %d (max %d) size ptrs %d ", 40796929Smisaki rbr_p->rdc, num_chunks, 40806929Smisaki rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 40813859Sml29623 40823859Sml29623 for (i = 0; i < rbr_p->tnblocks; i++) { 40833859Sml29623 rx_msg_p = rx_msg_ring[i]; 40843859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40856929Smisaki " nxge_unmap_rxdma_channel_buf_ring: " 40866929Smisaki "rx_msg_p $%p", 40876929Smisaki rx_msg_p)); 40883859Sml29623 if (rx_msg_p != NULL) { 40893859Sml29623 freeb(rx_msg_p->rx_mblk_p); 40903859Sml29623 rx_msg_ring[i] = NULL; 40913859Sml29623 } 40923859Sml29623 } 40933859Sml29623 40945170Stm144005 /* 40955170Stm144005 * We no longer may use the mutex <post_lock>. By setting 40965170Stm144005 * <rbr_state> to anything but POSTING, we prevent 40975170Stm144005 * nxge_post_page() from accessing a dead mutex. 40985170Stm144005 */ 40995170Stm144005 rbr_p->rbr_state = RBR_UNMAPPING; 41003859Sml29623 MUTEX_DESTROY(&rbr_p->post_lock); 41015170Stm144005 41023859Sml29623 MUTEX_DESTROY(&rbr_p->lock); 41035170Stm144005 41045170Stm144005 if (rbr_p->rbr_ref_cnt == 0) { 41056495Sspeer /* 41066495Sspeer * This is the normal state of affairs. 41076495Sspeer * Need to free the following buffers: 41086495Sspeer * - data buffers 41096495Sspeer * - rx_msg ring 41106495Sspeer * - ring_info 41116495Sspeer * - rbr ring 41126495Sspeer */ 41136495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, 41146495Sspeer "unmap_rxdma_buf_ring: No outstanding - freeing ")); 41156495Sspeer nxge_rxdma_databuf_free(rbr_p); 41166495Sspeer KMEM_FREE(ring_info, sizeof (rxring_info_t)); 41176495Sspeer KMEM_FREE(rx_msg_ring, size); 41185170Stm144005 KMEM_FREE(rbr_p, sizeof (*rbr_p)); 41195170Stm144005 } else { 41205170Stm144005 /* 41215170Stm144005 * Some of our buffers are still being used. 41225170Stm144005 * Therefore, tell nxge_freeb() this ring is 41235170Stm144005 * unmapped, so it may free <rbr_p> for us. 41245170Stm144005 */ 41255170Stm144005 rbr_p->rbr_state = RBR_UNMAPPED; 41265170Stm144005 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 41275170Stm144005 "unmap_rxdma_buf_ring: %d %s outstanding.", 41285170Stm144005 rbr_p->rbr_ref_cnt, 41295170Stm144005 rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs")); 41305170Stm144005 } 41313859Sml29623 41323859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 41336929Smisaki "<== nxge_unmap_rxdma_channel_buf_ring")); 41343859Sml29623 } 41353859Sml29623 41366495Sspeer /* 41376495Sspeer * nxge_rxdma_hw_start_common 41386495Sspeer * 41396495Sspeer * Arguments: 41406495Sspeer * nxgep 41416495Sspeer * 41426495Sspeer * Notes: 41436495Sspeer * 41446495Sspeer * NPI/NXGE function calls: 41456495Sspeer * nxge_init_fzc_rx_common(); 41466495Sspeer * nxge_init_fzc_rxdma_port(); 41476495Sspeer * 41486495Sspeer * Registers accessed: 41496495Sspeer * 41506495Sspeer * Context: 41516495Sspeer * Service domain 41526495Sspeer */ 41533859Sml29623 static nxge_status_t 41543859Sml29623 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 41553859Sml29623 { 41563859Sml29623 nxge_status_t status = NXGE_OK; 41573859Sml29623 41583859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 41593859Sml29623 41603859Sml29623 /* 41613859Sml29623 * Load the sharable parameters by writing to the 41623859Sml29623 * function zero control registers. These FZC registers 41633859Sml29623 * should be initialized only once for the entire chip. 41643859Sml29623 */ 41653859Sml29623 (void) nxge_init_fzc_rx_common(nxgep); 41663859Sml29623 41673859Sml29623 /* 41683859Sml29623 * Initialize the RXDMA port specific FZC control configurations. 41693859Sml29623 * These FZC registers are pertaining to each port. 41703859Sml29623 */ 41713859Sml29623 (void) nxge_init_fzc_rxdma_port(nxgep); 41723859Sml29623 41733859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 41743859Sml29623 41753859Sml29623 return (status); 41763859Sml29623 } 41773859Sml29623 41783859Sml29623 static nxge_status_t 41796495Sspeer nxge_rxdma_hw_start(p_nxge_t nxgep, int channel) 41803859Sml29623 { 41813859Sml29623 int i, ndmas; 41823859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 41833859Sml29623 p_rx_rbr_ring_t *rbr_rings; 41843859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 41853859Sml29623 p_rx_rcr_ring_t *rcr_rings; 41863859Sml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 41873859Sml29623 p_rx_mbox_t *rx_mbox_p; 41883859Sml29623 nxge_status_t status = NXGE_OK; 41893859Sml29623 41903859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 41913859Sml29623 41923859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 41933859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 41943859Sml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 41953859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 41966929Smisaki "<== nxge_rxdma_hw_start: NULL ring pointers")); 41973859Sml29623 return (NXGE_ERROR); 41983859Sml29623 } 41993859Sml29623 ndmas = rx_rbr_rings->ndmas; 42003859Sml29623 if (ndmas == 0) { 42013859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 42026929Smisaki "<== nxge_rxdma_hw_start: no dma channel allocated")); 42033859Sml29623 return (NXGE_ERROR); 42043859Sml29623 } 42053859Sml29623 42063859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42076929Smisaki "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 42083859Sml29623 42093859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 42103859Sml29623 rcr_rings = rx_rcr_rings->rcr_rings; 42113859Sml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 42123859Sml29623 if (rx_mbox_areas_p) { 42133859Sml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 42143859Sml29623 } 42153859Sml29623 42166495Sspeer i = channel; 42176495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42186929Smisaki "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 42196929Smisaki ndmas, channel)); 42206495Sspeer status = nxge_rxdma_start_channel(nxgep, channel, 42216495Sspeer (p_rx_rbr_ring_t)rbr_rings[i], 42226495Sspeer (p_rx_rcr_ring_t)rcr_rings[i], 42236495Sspeer (p_rx_mbox_t)rx_mbox_p[i]); 42246495Sspeer if (status != NXGE_OK) { 42256495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 42266495Sspeer "==> nxge_rxdma_hw_start: disable " 42276495Sspeer "(status 0x%x channel %d)", status, channel)); 42286495Sspeer return (status); 42293859Sml29623 } 42303859Sml29623 42313859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 42326929Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx", 42336929Smisaki rx_rbr_rings, rx_rcr_rings)); 42343859Sml29623 42353859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42366929Smisaki "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 42373859Sml29623 42383859Sml29623 return (status); 42393859Sml29623 } 42403859Sml29623 42413859Sml29623 static void 42426495Sspeer nxge_rxdma_hw_stop(p_nxge_t nxgep, int channel) 42433859Sml29623 { 42443859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 42453859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 42463859Sml29623 42473859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 42483859Sml29623 42493859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 42503859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 42513859Sml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 42523859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 42536929Smisaki "<== nxge_rxdma_hw_stop: NULL ring pointers")); 42543859Sml29623 return; 42553859Sml29623 } 42563859Sml29623 42573859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 42586929Smisaki "==> nxge_rxdma_hw_stop(channel %d)", 42596929Smisaki channel)); 42606495Sspeer (void) nxge_rxdma_stop_channel(nxgep, channel); 42613859Sml29623 42623859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 42636929Smisaki "rx_rbr_rings 0x%016llx rings 0x%016llx", 42646929Smisaki rx_rbr_rings, rx_rcr_rings)); 42653859Sml29623 42663859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 42673859Sml29623 } 42683859Sml29623 42693859Sml29623 42703859Sml29623 static nxge_status_t 42713859Sml29623 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 42723859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 42733859Sml29623 42743859Sml29623 { 42753859Sml29623 npi_handle_t handle; 42763859Sml29623 npi_status_t rs = NPI_SUCCESS; 42773859Sml29623 rx_dma_ctl_stat_t cs; 42783859Sml29623 rx_dma_ent_msk_t ent_mask; 42793859Sml29623 nxge_status_t status = NXGE_OK; 42803859Sml29623 42813859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 42823859Sml29623 42833859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 42843859Sml29623 42853859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 42863859Sml29623 "npi handle addr $%p acc $%p", 42873859Sml29623 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 42883859Sml29623 42896495Sspeer /* Reset RXDMA channel, but not if you're a guest. */ 42906495Sspeer if (!isLDOMguest(nxgep)) { 42916495Sspeer rs = npi_rxdma_cfg_rdc_reset(handle, channel); 42926495Sspeer if (rs != NPI_SUCCESS) { 42936495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 42946495Sspeer "==> nxge_init_fzc_rdc: " 42956495Sspeer "npi_rxdma_cfg_rdc_reset(%d) returned 0x%08x", 42966495Sspeer channel, rs)); 42976495Sspeer return (NXGE_ERROR | rs); 42986495Sspeer } 42996495Sspeer 43006495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 43016495Sspeer "==> nxge_rxdma_start_channel: reset done: channel %d", 43026495Sspeer channel)); 43033859Sml29623 } 43043859Sml29623 43056495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 43066495Sspeer if (isLDOMguest(nxgep)) 43076495Sspeer (void) nxge_rdc_lp_conf(nxgep, channel); 43086495Sspeer #endif 43093859Sml29623 43103859Sml29623 /* 43113859Sml29623 * Initialize the RXDMA channel specific FZC control 43123859Sml29623 * configurations. These FZC registers are pertaining 43133859Sml29623 * to each RX channel (logical pages). 43143859Sml29623 */ 43156495Sspeer if (!isLDOMguest(nxgep)) { 43166495Sspeer status = nxge_init_fzc_rxdma_channel(nxgep, channel); 43176495Sspeer if (status != NXGE_OK) { 43186495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43196495Sspeer "==> nxge_rxdma_start_channel: " 43206495Sspeer "init fzc rxdma failed (0x%08x channel %d)", 43216495Sspeer status, channel)); 43226495Sspeer return (status); 43236495Sspeer } 43246495Sspeer 43256495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 43266495Sspeer "==> nxge_rxdma_start_channel: fzc done")); 43273859Sml29623 } 43283859Sml29623 43293859Sml29623 /* Set up the interrupt event masks. */ 43303859Sml29623 ent_mask.value = 0; 43313859Sml29623 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 43323859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 43336495Sspeer &ent_mask); 43343859Sml29623 if (rs != NPI_SUCCESS) { 43353859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43363859Sml29623 "==> nxge_rxdma_start_channel: " 43376495Sspeer "init rxdma event masks failed " 43386495Sspeer "(0x%08x channel %d)", 43393859Sml29623 status, channel)); 43403859Sml29623 return (NXGE_ERROR | rs); 43413859Sml29623 } 43423859Sml29623 43436495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 43446495Sspeer "==> nxge_rxdma_start_channel: " 43453859Sml29623 "event done: channel %d (mask 0x%016llx)", 43463859Sml29623 channel, ent_mask.value)); 43473859Sml29623 43483859Sml29623 /* Initialize the receive DMA control and status register */ 43493859Sml29623 cs.value = 0; 43503859Sml29623 cs.bits.hdw.mex = 1; 43513859Sml29623 cs.bits.hdw.rcrthres = 1; 43523859Sml29623 cs.bits.hdw.rcrto = 1; 43533859Sml29623 cs.bits.hdw.rbr_empty = 1; 43543859Sml29623 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 43553859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 43563859Sml29623 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 43573859Sml29623 if (status != NXGE_OK) { 43583859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43593859Sml29623 "==> nxge_rxdma_start_channel: " 43603859Sml29623 "init rxdma control register failed (0x%08x channel %d", 43613859Sml29623 status, channel)); 43623859Sml29623 return (status); 43633859Sml29623 } 43643859Sml29623 43653859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 43663859Sml29623 "control done - channel %d cs 0x%016llx", channel, cs.value)); 43673859Sml29623 43683859Sml29623 /* 43693859Sml29623 * Load RXDMA descriptors, buffers, mailbox, 43703859Sml29623 * initialise the receive DMA channels and 43713859Sml29623 * enable each DMA channel. 43723859Sml29623 */ 43733859Sml29623 status = nxge_enable_rxdma_channel(nxgep, 43746495Sspeer channel, rbr_p, rcr_p, mbox_p); 43753859Sml29623 43763859Sml29623 if (status != NXGE_OK) { 43773859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43786495Sspeer " nxge_rxdma_start_channel: " 43796495Sspeer " enable rxdma failed (0x%08x channel %d)", 43806495Sspeer status, channel)); 43816495Sspeer return (status); 43826495Sspeer } 43836495Sspeer 43846495Sspeer NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 43856495Sspeer "==> nxge_rxdma_start_channel: enabled channel %d")); 43866495Sspeer 43876495Sspeer if (isLDOMguest(nxgep)) { 43886495Sspeer /* Add interrupt handler for this channel. */ 43896495Sspeer if (nxge_hio_intr_add(nxgep, VP_BOUND_RX, channel) 43906495Sspeer != NXGE_OK) { 43916495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43923859Sml29623 " nxge_rxdma_start_channel: " 43936495Sspeer " nxge_hio_intr_add failed (0x%08x channel %d)", 43946495Sspeer status, channel)); 43956495Sspeer } 43963859Sml29623 } 43973859Sml29623 43983859Sml29623 ent_mask.value = 0; 43993859Sml29623 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 44003859Sml29623 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 44013859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 44023859Sml29623 &ent_mask); 44033859Sml29623 if (rs != NPI_SUCCESS) { 44043859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 44053859Sml29623 "==> nxge_rxdma_start_channel: " 44063859Sml29623 "init rxdma event masks failed (0x%08x channel %d)", 44073859Sml29623 status, channel)); 44083859Sml29623 return (NXGE_ERROR | rs); 44093859Sml29623 } 44103859Sml29623 44113859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 44123859Sml29623 "control done - channel %d cs 0x%016llx", channel, cs.value)); 44133859Sml29623 44143859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 44153859Sml29623 44163859Sml29623 return (NXGE_OK); 44173859Sml29623 } 44183859Sml29623 44193859Sml29623 static nxge_status_t 44203859Sml29623 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 44213859Sml29623 { 44223859Sml29623 npi_handle_t handle; 44233859Sml29623 npi_status_t rs = NPI_SUCCESS; 44243859Sml29623 rx_dma_ctl_stat_t cs; 44253859Sml29623 rx_dma_ent_msk_t ent_mask; 44263859Sml29623 nxge_status_t status = NXGE_OK; 44273859Sml29623 44283859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 44293859Sml29623 44303859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 44313859Sml29623 44323859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 44336929Smisaki "npi handle addr $%p acc $%p", 44346929Smisaki nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 44353859Sml29623 44367812SMichael.Speer@Sun.COM if (!isLDOMguest(nxgep)) { 44377812SMichael.Speer@Sun.COM /* 44387812SMichael.Speer@Sun.COM * Stop RxMAC = A.9.2.6 44397812SMichael.Speer@Sun.COM */ 44407812SMichael.Speer@Sun.COM if (nxge_rx_mac_disable(nxgep) != NXGE_OK) { 44417812SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44427812SMichael.Speer@Sun.COM "nxge_rxdma_stop_channel: " 44437812SMichael.Speer@Sun.COM "Failed to disable RxMAC")); 44447812SMichael.Speer@Sun.COM } 44457812SMichael.Speer@Sun.COM 44467812SMichael.Speer@Sun.COM /* 44477812SMichael.Speer@Sun.COM * Drain IPP Port = A.9.3.6 44487812SMichael.Speer@Sun.COM */ 44497812SMichael.Speer@Sun.COM (void) nxge_ipp_drain(nxgep); 44507812SMichael.Speer@Sun.COM } 44517812SMichael.Speer@Sun.COM 44523859Sml29623 /* Reset RXDMA channel */ 44533859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 44543859Sml29623 if (rs != NPI_SUCCESS) { 44553859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44566929Smisaki " nxge_rxdma_stop_channel: " 44576929Smisaki " reset rxdma failed (0x%08x channel %d)", 44586929Smisaki rs, channel)); 44593859Sml29623 return (NXGE_ERROR | rs); 44603859Sml29623 } 44613859Sml29623 44623859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 44636929Smisaki "==> nxge_rxdma_stop_channel: reset done")); 44643859Sml29623 44653859Sml29623 /* Set up the interrupt event masks. */ 44663859Sml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL; 44673859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 44686929Smisaki &ent_mask); 44693859Sml29623 if (rs != NPI_SUCCESS) { 44703859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44716929Smisaki "==> nxge_rxdma_stop_channel: " 44726929Smisaki "set rxdma event masks failed (0x%08x channel %d)", 44736929Smisaki rs, channel)); 44743859Sml29623 return (NXGE_ERROR | rs); 44753859Sml29623 } 44763859Sml29623 44773859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 44786929Smisaki "==> nxge_rxdma_stop_channel: event done")); 44793859Sml29623 44807812SMichael.Speer@Sun.COM /* 44817812SMichael.Speer@Sun.COM * Initialize the receive DMA control and status register 44827812SMichael.Speer@Sun.COM */ 44833859Sml29623 cs.value = 0; 44847812SMichael.Speer@Sun.COM status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 44853859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 44866929Smisaki " to default (all 0s) 0x%08x", cs.value)); 44873859Sml29623 if (status != NXGE_OK) { 44883859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44896929Smisaki " nxge_rxdma_stop_channel: init rxdma" 44906929Smisaki " control register failed (0x%08x channel %d", 44916929Smisaki status, channel)); 44923859Sml29623 return (status); 44933859Sml29623 } 44943859Sml29623 44953859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 44966929Smisaki "==> nxge_rxdma_stop_channel: control done")); 44973859Sml29623 44987812SMichael.Speer@Sun.COM /* 44997812SMichael.Speer@Sun.COM * Make sure channel is disabled. 45007812SMichael.Speer@Sun.COM */ 45013859Sml29623 status = nxge_disable_rxdma_channel(nxgep, channel); 45028275SEric Cheng 45033859Sml29623 if (status != NXGE_OK) { 45043859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45056929Smisaki " nxge_rxdma_stop_channel: " 45066929Smisaki " init enable rxdma failed (0x%08x channel %d)", 45076929Smisaki status, channel)); 45083859Sml29623 return (status); 45093859Sml29623 } 45103859Sml29623 45117812SMichael.Speer@Sun.COM if (!isLDOMguest(nxgep)) { 45127812SMichael.Speer@Sun.COM /* 45137812SMichael.Speer@Sun.COM * Enable RxMAC = A.9.2.10 45147812SMichael.Speer@Sun.COM */ 45157812SMichael.Speer@Sun.COM if (nxge_rx_mac_enable(nxgep) != NXGE_OK) { 45167812SMichael.Speer@Sun.COM NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45177812SMichael.Speer@Sun.COM "nxge_rxdma_stop_channel: Rx MAC still disabled")); 45187812SMichael.Speer@Sun.COM } 45197812SMichael.Speer@Sun.COM } 45207812SMichael.Speer@Sun.COM 45213859Sml29623 NXGE_DEBUG_MSG((nxgep, 45226929Smisaki RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 45233859Sml29623 45243859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 45253859Sml29623 45263859Sml29623 return (NXGE_OK); 45273859Sml29623 } 45283859Sml29623 45293859Sml29623 nxge_status_t 45303859Sml29623 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 45313859Sml29623 { 45323859Sml29623 npi_handle_t handle; 45333859Sml29623 p_nxge_rdc_sys_stats_t statsp; 45343859Sml29623 rx_ctl_dat_fifo_stat_t stat; 45353859Sml29623 uint32_t zcp_err_status; 45363859Sml29623 uint32_t ipp_err_status; 45373859Sml29623 nxge_status_t status = NXGE_OK; 45383859Sml29623 npi_status_t rs = NPI_SUCCESS; 45393859Sml29623 boolean_t my_err = B_FALSE; 45403859Sml29623 45413859Sml29623 handle = nxgep->npi_handle; 45423859Sml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 45433859Sml29623 45443859Sml29623 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 45453859Sml29623 45463859Sml29623 if (rs != NPI_SUCCESS) 45473859Sml29623 return (NXGE_ERROR | rs); 45483859Sml29623 45493859Sml29623 if (stat.bits.ldw.id_mismatch) { 45503859Sml29623 statsp->id_mismatch++; 45513859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 45526929Smisaki NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 45533859Sml29623 /* Global fatal error encountered */ 45543859Sml29623 } 45553859Sml29623 45563859Sml29623 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 45573859Sml29623 switch (nxgep->mac.portnum) { 45583859Sml29623 case 0: 45593859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 45606929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 45613859Sml29623 my_err = B_TRUE; 45623859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45633859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45643859Sml29623 } 45653859Sml29623 break; 45663859Sml29623 case 1: 45673859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 45686929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 45693859Sml29623 my_err = B_TRUE; 45703859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45713859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45723859Sml29623 } 45733859Sml29623 break; 45743859Sml29623 case 2: 45753859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 45766929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 45773859Sml29623 my_err = B_TRUE; 45783859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45793859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45803859Sml29623 } 45813859Sml29623 break; 45823859Sml29623 case 3: 45833859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 45846929Smisaki (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 45853859Sml29623 my_err = B_TRUE; 45863859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 45873859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 45883859Sml29623 } 45893859Sml29623 break; 45903859Sml29623 default: 45913859Sml29623 return (NXGE_ERROR); 45923859Sml29623 } 45933859Sml29623 } 45943859Sml29623 45953859Sml29623 if (my_err) { 45963859Sml29623 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 45976929Smisaki zcp_err_status); 45983859Sml29623 if (status != NXGE_OK) 45993859Sml29623 return (status); 46003859Sml29623 } 46013859Sml29623 46023859Sml29623 return (NXGE_OK); 46033859Sml29623 } 46043859Sml29623 46053859Sml29623 static nxge_status_t 46063859Sml29623 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 46073859Sml29623 uint32_t zcp_status) 46083859Sml29623 { 46093859Sml29623 boolean_t rxport_fatal = B_FALSE; 46103859Sml29623 p_nxge_rdc_sys_stats_t statsp; 46113859Sml29623 nxge_status_t status = NXGE_OK; 46123859Sml29623 uint8_t portn; 46133859Sml29623 46143859Sml29623 portn = nxgep->mac.portnum; 46153859Sml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 46163859Sml29623 46173859Sml29623 if (ipp_status & (0x1 << portn)) { 46183859Sml29623 statsp->ipp_eop_err++; 46193859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 46206929Smisaki NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 46213859Sml29623 rxport_fatal = B_TRUE; 46223859Sml29623 } 46233859Sml29623 46243859Sml29623 if (zcp_status & (0x1 << portn)) { 46253859Sml29623 statsp->zcp_eop_err++; 46263859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 46276929Smisaki NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 46283859Sml29623 rxport_fatal = B_TRUE; 46293859Sml29623 } 46303859Sml29623 46313859Sml29623 if (rxport_fatal) { 46323859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46336929Smisaki " nxge_rxdma_handle_port_error: " 46346929Smisaki " fatal error on Port #%d\n", 46356929Smisaki portn)); 46363859Sml29623 status = nxge_rx_port_fatal_err_recover(nxgep); 46373859Sml29623 if (status == NXGE_OK) { 46383859Sml29623 FM_SERVICE_RESTORED(nxgep); 46393859Sml29623 } 46403859Sml29623 } 46413859Sml29623 46423859Sml29623 return (status); 46433859Sml29623 } 46443859Sml29623 46453859Sml29623 static nxge_status_t 46463859Sml29623 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 46473859Sml29623 { 46483859Sml29623 npi_handle_t handle; 46493859Sml29623 npi_status_t rs = NPI_SUCCESS; 46503859Sml29623 nxge_status_t status = NXGE_OK; 46513859Sml29623 p_rx_rbr_ring_t rbrp; 46523859Sml29623 p_rx_rcr_ring_t rcrp; 46533859Sml29623 p_rx_mbox_t mboxp; 46543859Sml29623 rx_dma_ent_msk_t ent_mask; 46553859Sml29623 p_nxge_dma_common_t dmap; 46563859Sml29623 int ring_idx; 46573859Sml29623 uint32_t ref_cnt; 46583859Sml29623 p_rx_msg_t rx_msg_p; 46593859Sml29623 int i; 46603859Sml29623 uint32_t nxge_port_rcr_size; 46613859Sml29623 46623859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 46633859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46646929Smisaki "Recovering from RxDMAChannel#%d error...", channel)); 46653859Sml29623 46663859Sml29623 /* 46673859Sml29623 * Stop the dma channel waits for the stop done. 46683859Sml29623 * If the stop done bit is not set, then create 46693859Sml29623 * an error. 46703859Sml29623 */ 46713859Sml29623 46723859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 46733859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 46743859Sml29623 46753859Sml29623 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 46763859Sml29623 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 46773859Sml29623 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 46783859Sml29623 46793859Sml29623 MUTEX_ENTER(&rcrp->lock); 46803859Sml29623 MUTEX_ENTER(&rbrp->lock); 46813859Sml29623 MUTEX_ENTER(&rbrp->post_lock); 46823859Sml29623 46833859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 46843859Sml29623 46853859Sml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 46863859Sml29623 if (rs != NPI_SUCCESS) { 46873859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46886929Smisaki "nxge_disable_rxdma_channel:failed")); 46893859Sml29623 goto fail; 46903859Sml29623 } 46913859Sml29623 46923859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 46933859Sml29623 46943859Sml29623 /* Disable interrupt */ 46953859Sml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL; 46963859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 46973859Sml29623 if (rs != NPI_SUCCESS) { 46983859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46996929Smisaki "nxge_rxdma_stop_channel: " 47006929Smisaki "set rxdma event masks failed (channel %d)", 47016929Smisaki channel)); 47023859Sml29623 } 47033859Sml29623 47043859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 47053859Sml29623 47063859Sml29623 /* Reset RXDMA channel */ 47073859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 47083859Sml29623 if (rs != NPI_SUCCESS) { 47093859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47106929Smisaki "nxge_rxdma_fatal_err_recover: " 47116929Smisaki " reset rxdma failed (channel %d)", channel)); 47123859Sml29623 goto fail; 47133859Sml29623 } 47143859Sml29623 47153859Sml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 47163859Sml29623 47173859Sml29623 mboxp = 47186929Smisaki (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 47193859Sml29623 47203859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 47213859Sml29623 rbrp->rbr_rd_index = 0; 47223859Sml29623 47233859Sml29623 rcrp->comp_rd_index = 0; 47243859Sml29623 rcrp->comp_wt_index = 0; 47253859Sml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 47266929Smisaki (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 47275125Sjoycey #if defined(__i386) 47286929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 47296929Smisaki (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 47305125Sjoycey #else 47316929Smisaki rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 47326929Smisaki (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 47335125Sjoycey #endif 47343859Sml29623 47353859Sml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 47366929Smisaki (nxge_port_rcr_size - 1); 47373859Sml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 47386929Smisaki (nxge_port_rcr_size - 1); 47393859Sml29623 47403859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 47413859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 47423859Sml29623 47433859Sml29623 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 47443859Sml29623 47453859Sml29623 for (i = 0; i < rbrp->rbr_max_size; i++) { 47463859Sml29623 rx_msg_p = rbrp->rx_msg_ring[i]; 47473859Sml29623 ref_cnt = rx_msg_p->ref_cnt; 47483859Sml29623 if (ref_cnt != 1) { 47493859Sml29623 if (rx_msg_p->cur_usage_cnt != 47506929Smisaki rx_msg_p->max_usage_cnt) { 47513859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47526929Smisaki "buf[%d]: cur_usage_cnt = %d " 47536929Smisaki "max_usage_cnt = %d\n", i, 47546929Smisaki rx_msg_p->cur_usage_cnt, 47556929Smisaki rx_msg_p->max_usage_cnt)); 47563859Sml29623 } else { 47573859Sml29623 /* Buffer can be re-posted */ 47583859Sml29623 rx_msg_p->free = B_TRUE; 47593859Sml29623 rx_msg_p->cur_usage_cnt = 0; 47603859Sml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe; 47613859Sml29623 rx_msg_p->pkt_buf_size = 0; 47623859Sml29623 } 47633859Sml29623 } 47643859Sml29623 } 47653859Sml29623 47663859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 47673859Sml29623 47683859Sml29623 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 47693859Sml29623 if (status != NXGE_OK) { 47703859Sml29623 goto fail; 47713859Sml29623 } 47723859Sml29623 47733859Sml29623 MUTEX_EXIT(&rbrp->post_lock); 47743859Sml29623 MUTEX_EXIT(&rbrp->lock); 47753859Sml29623 MUTEX_EXIT(&rcrp->lock); 47763859Sml29623 47773859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 47786929Smisaki "Recovery Successful, RxDMAChannel#%d Restored", 47796929Smisaki channel)); 47803859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 47813859Sml29623 47823859Sml29623 return (NXGE_OK); 47833859Sml29623 fail: 47843859Sml29623 MUTEX_EXIT(&rbrp->post_lock); 47853859Sml29623 MUTEX_EXIT(&rbrp->lock); 47863859Sml29623 MUTEX_EXIT(&rcrp->lock); 47873859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 47883859Sml29623 47893859Sml29623 return (NXGE_ERROR | rs); 47903859Sml29623 } 47913859Sml29623 47923859Sml29623 nxge_status_t 47933859Sml29623 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 47943859Sml29623 { 47956495Sspeer nxge_grp_set_t *set = &nxgep->rx_set; 47966495Sspeer nxge_status_t status = NXGE_OK; 47976495Sspeer int rdc; 47983859Sml29623 47993859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 48003859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48016929Smisaki "Recovering from RxPort error...")); 48026495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disabling RxMAC...\n")); 48036495Sspeer 48043859Sml29623 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 48053859Sml29623 goto fail; 48063859Sml29623 48073859Sml29623 NXGE_DELAY(1000); 48083859Sml29623 48096495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stopping all RxDMA channels...")); 48106495Sspeer 48116495Sspeer for (rdc = 0; rdc < NXGE_MAX_RDCS; rdc++) { 48126495Sspeer if ((1 << rdc) & set->owned.map) { 48136495Sspeer if (nxge_rxdma_fatal_err_recover(nxgep, rdc) 48146495Sspeer != NXGE_OK) { 48156495Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48166495Sspeer "Could not recover channel %d", rdc)); 48176495Sspeer } 48183859Sml29623 } 48193859Sml29623 } 48203859Sml29623 48216495Sspeer NXGE_DEBUG_MSG((nxgep, RX_CTL, "Resetting IPP...")); 48223859Sml29623 48233859Sml29623 /* Reset IPP */ 48243859Sml29623 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 48253859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48266929Smisaki "nxge_rx_port_fatal_err_recover: " 48276929Smisaki "Failed to reset IPP")); 48283859Sml29623 goto fail; 48293859Sml29623 } 48303859Sml29623 48313859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 48323859Sml29623 48333859Sml29623 /* Reset RxMAC */ 48343859Sml29623 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 48353859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48366929Smisaki "nxge_rx_port_fatal_err_recover: " 48376929Smisaki "Failed to reset RxMAC")); 48383859Sml29623 goto fail; 48393859Sml29623 } 48403859Sml29623 48413859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 48423859Sml29623 48433859Sml29623 /* Re-Initialize IPP */ 48443859Sml29623 if (nxge_ipp_init(nxgep) != NXGE_OK) { 48453859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48466929Smisaki "nxge_rx_port_fatal_err_recover: " 48476929Smisaki "Failed to init IPP")); 48483859Sml29623 goto fail; 48493859Sml29623 } 48503859Sml29623 48513859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 48523859Sml29623 48533859Sml29623 /* Re-Initialize RxMAC */ 48543859Sml29623 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 48553859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48566929Smisaki "nxge_rx_port_fatal_err_recover: " 48576929Smisaki "Failed to reset RxMAC")); 48583859Sml29623 goto fail; 48593859Sml29623 } 48603859Sml29623 48613859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 48623859Sml29623 48633859Sml29623 /* Re-enable RxMAC */ 48643859Sml29623 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 48653859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48666929Smisaki "nxge_rx_port_fatal_err_recover: " 48676929Smisaki "Failed to enable RxMAC")); 48683859Sml29623 goto fail; 48693859Sml29623 } 48703859Sml29623 48713859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 48726929Smisaki "Recovery Successful, RxPort Restored")); 48733859Sml29623 48743859Sml29623 return (NXGE_OK); 48753859Sml29623 fail: 48763859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 48773859Sml29623 return (status); 48783859Sml29623 } 48793859Sml29623 48803859Sml29623 void 48813859Sml29623 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 48823859Sml29623 { 48833859Sml29623 rx_dma_ctl_stat_t cs; 48843859Sml29623 rx_ctl_dat_fifo_stat_t cdfs; 48853859Sml29623 48863859Sml29623 switch (err_id) { 48873859Sml29623 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 48883859Sml29623 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 48893859Sml29623 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 48903859Sml29623 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 48913859Sml29623 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 48923859Sml29623 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 48933859Sml29623 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 48943859Sml29623 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 48953859Sml29623 case NXGE_FM_EREPORT_RDMC_RCRINCON: 48963859Sml29623 case NXGE_FM_EREPORT_RDMC_RCRFULL: 48973859Sml29623 case NXGE_FM_EREPORT_RDMC_RBRFULL: 48983859Sml29623 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 48993859Sml29623 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 49003859Sml29623 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 49013859Sml29623 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 49026929Smisaki chan, &cs.value); 49033859Sml29623 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 49043859Sml29623 cs.bits.hdw.rcr_ack_err = 1; 49053859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 49063859Sml29623 cs.bits.hdw.dc_fifo_err = 1; 49073859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 49083859Sml29623 cs.bits.hdw.rcr_sha_par = 1; 49093859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 49103859Sml29623 cs.bits.hdw.rbr_pre_par = 1; 49113859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 49123859Sml29623 cs.bits.hdw.rbr_tmout = 1; 49133859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 49143859Sml29623 cs.bits.hdw.rsp_cnt_err = 1; 49153859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 49163859Sml29623 cs.bits.hdw.byte_en_bus = 1; 49173859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 49183859Sml29623 cs.bits.hdw.rsp_dat_err = 1; 49193859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 49203859Sml29623 cs.bits.hdw.config_err = 1; 49213859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 49223859Sml29623 cs.bits.hdw.rcrincon = 1; 49233859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 49243859Sml29623 cs.bits.hdw.rcrfull = 1; 49253859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 49263859Sml29623 cs.bits.hdw.rbrfull = 1; 49273859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 49283859Sml29623 cs.bits.hdw.rbrlogpage = 1; 49293859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 49303859Sml29623 cs.bits.hdw.cfiglogpage = 1; 49315125Sjoycey #if defined(__i386) 49325125Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 49336929Smisaki cs.value); 49345125Sjoycey #else 49353859Sml29623 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 49366929Smisaki cs.value); 49375125Sjoycey #endif 49383859Sml29623 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 49396929Smisaki chan, cs.value); 49403859Sml29623 break; 49413859Sml29623 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 49423859Sml29623 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 49433859Sml29623 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 49443859Sml29623 cdfs.value = 0; 49453859Sml29623 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 49463859Sml29623 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 49473859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 49483859Sml29623 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 49493859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 49503859Sml29623 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 49515125Sjoycey #if defined(__i386) 49525125Sjoycey cmn_err(CE_NOTE, 49536929Smisaki "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 49546929Smisaki cdfs.value); 49555125Sjoycey #else 49563859Sml29623 cmn_err(CE_NOTE, 49576929Smisaki "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 49586929Smisaki cdfs.value); 49595125Sjoycey #endif 49606495Sspeer NXGE_REG_WR64(nxgep->npi_handle, 49616495Sspeer RX_CTL_DAT_FIFO_STAT_DBG_REG, cdfs.value); 49623859Sml29623 break; 49633859Sml29623 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 49643859Sml29623 break; 49655165Syc148097 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 49663859Sml29623 break; 49673859Sml29623 } 49683859Sml29623 } 49696495Sspeer 49706495Sspeer static void 49716495Sspeer nxge_rxdma_databuf_free(p_rx_rbr_ring_t rbr_p) 49726495Sspeer { 49736495Sspeer rxring_info_t *ring_info; 49746495Sspeer int index; 49756495Sspeer uint32_t chunk_size; 49766495Sspeer uint64_t kaddr; 49776495Sspeer uint_t num_blocks; 49786495Sspeer 49796495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_rxdma_databuf_free")); 49806495Sspeer 49816495Sspeer if (rbr_p == NULL) { 49826495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 49836495Sspeer "==> nxge_rxdma_databuf_free: NULL rbr pointer")); 49846495Sspeer return; 49856495Sspeer } 49866495Sspeer 49876495Sspeer if (rbr_p->rbr_alloc_type == DDI_MEM_ALLOC) { 49886495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 49896495Sspeer "==> nxge_rxdma_databuf_free: DDI")); 49906495Sspeer return; 49916495Sspeer } 49926495Sspeer 49936495Sspeer ring_info = rbr_p->ring_info; 49946495Sspeer if (ring_info == NULL) { 49956495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 49966495Sspeer "==> nxge_rxdma_databuf_free: NULL ring info")); 49976495Sspeer return; 49986495Sspeer } 49996495Sspeer num_blocks = rbr_p->num_blocks; 50006495Sspeer for (index = 0; index < num_blocks; index++) { 50016495Sspeer kaddr = ring_info->buffer[index].kaddr; 50026495Sspeer chunk_size = ring_info->buffer[index].buf_size; 50036495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 50046495Sspeer "==> nxge_rxdma_databuf_free: free chunk %d " 50056495Sspeer "kaddrp $%p chunk size %d", 50066495Sspeer index, kaddr, chunk_size)); 50076495Sspeer if (kaddr == NULL) continue; 50086495Sspeer nxge_free_buf(rbr_p->rbr_alloc_type, kaddr, chunk_size); 50096495Sspeer ring_info->buffer[index].kaddr = NULL; 50106495Sspeer } 50116495Sspeer 50126495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_databuf_free")); 50136495Sspeer } 50146495Sspeer 50156495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 50166495Sspeer extern void contig_mem_free(void *, size_t); 50176495Sspeer #endif 50186495Sspeer 50196495Sspeer void 50206495Sspeer nxge_free_buf(buf_alloc_type_t alloc_type, uint64_t kaddr, uint32_t buf_size) 50216495Sspeer { 50226495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "==> nxge_free_buf")); 50236495Sspeer 50246495Sspeer if (kaddr == NULL || !buf_size) { 50256495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 50266495Sspeer "==> nxge_free_buf: invalid kaddr $%p size to free %d", 50276495Sspeer kaddr, buf_size)); 50286495Sspeer return; 50296495Sspeer } 50306495Sspeer 50316495Sspeer switch (alloc_type) { 50326495Sspeer case KMEM_ALLOC: 50336495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 50346495Sspeer "==> nxge_free_buf: freeing kmem $%p size %d", 50356495Sspeer kaddr, buf_size)); 50366495Sspeer #if defined(__i386) 50376495Sspeer KMEM_FREE((void *)(uint32_t)kaddr, buf_size); 50386495Sspeer #else 50396495Sspeer KMEM_FREE((void *)kaddr, buf_size); 50406495Sspeer #endif 50416495Sspeer break; 50426495Sspeer 50436495Sspeer #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 50446495Sspeer case CONTIG_MEM_ALLOC: 50456495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, 50466495Sspeer "==> nxge_free_buf: freeing contig_mem kaddr $%p size %d", 50476495Sspeer kaddr, buf_size)); 50486495Sspeer contig_mem_free((void *)kaddr, buf_size); 50496495Sspeer break; 50506495Sspeer #endif 50516495Sspeer 50526495Sspeer default: 50536495Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 50546495Sspeer "<== nxge_free_buf: unsupported alloc type %d", 50556495Sspeer alloc_type)); 50566495Sspeer return; 50576495Sspeer } 50586495Sspeer 50596495Sspeer NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_free_buf")); 50606495Sspeer } 5061