13859Sml29623 /* 23859Sml29623 * CDDL HEADER START 33859Sml29623 * 43859Sml29623 * The contents of this file are subject to the terms of the 53859Sml29623 * Common Development and Distribution License (the "License"). 63859Sml29623 * You may not use this file except in compliance with the License. 73859Sml29623 * 83859Sml29623 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 93859Sml29623 * or http://www.opensolaris.org/os/licensing. 103859Sml29623 * See the License for the specific language governing permissions 113859Sml29623 * and limitations under the License. 123859Sml29623 * 133859Sml29623 * When distributing Covered Code, include this CDDL HEADER in each 143859Sml29623 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 153859Sml29623 * If applicable, add the following below this CDDL HEADER, with the 163859Sml29623 * fields enclosed by brackets "[]" replaced with your own identifying 173859Sml29623 * information: Portions Copyright [yyyy] [name of copyright owner] 183859Sml29623 * 193859Sml29623 * CDDL HEADER END 203859Sml29623 */ 213859Sml29623 /* 223859Sml29623 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 233859Sml29623 * Use is subject to license terms. 243859Sml29623 */ 253859Sml29623 263859Sml29623 #pragma ident "%Z%%M% %I% %E% SMI" 273859Sml29623 283859Sml29623 #include <sys/nxge/nxge_impl.h> 293859Sml29623 #include <sys/nxge/nxge_rxdma.h> 303859Sml29623 313859Sml29623 #define NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp) \ 323859Sml29623 (rdcgrp + nxgep->pt_config.hw_config.start_rdc_grpid) 333859Sml29623 #define NXGE_ACTUAL_RDC(nxgep, rdc) \ 343859Sml29623 (rdc + nxgep->pt_config.hw_config.start_rdc) 353859Sml29623 363859Sml29623 /* 373859Sml29623 * Globals: tunable parameters (/etc/system or adb) 383859Sml29623 * 393859Sml29623 */ 403859Sml29623 extern uint32_t nxge_rbr_size; 413859Sml29623 extern uint32_t nxge_rcr_size; 423859Sml29623 extern uint32_t nxge_rbr_spare_size; 433859Sml29623 443859Sml29623 extern uint32_t nxge_mblks_pending; 453859Sml29623 463859Sml29623 /* 473859Sml29623 * Tunable to reduce the amount of time spent in the 483859Sml29623 * ISR doing Rx Processing. 493859Sml29623 */ 503859Sml29623 extern uint32_t nxge_max_rx_pkts; 513859Sml29623 boolean_t nxge_jumbo_enable; 523859Sml29623 533859Sml29623 /* 543859Sml29623 * Tunables to manage the receive buffer blocks. 553859Sml29623 * 563859Sml29623 * nxge_rx_threshold_hi: copy all buffers. 573859Sml29623 * nxge_rx_bcopy_size_type: receive buffer block size type. 583859Sml29623 * nxge_rx_threshold_lo: copy only up to tunable block size type. 593859Sml29623 */ 603859Sml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_hi; 613859Sml29623 extern nxge_rxbuf_type_t nxge_rx_buf_size_type; 623859Sml29623 extern nxge_rxbuf_threshold_t nxge_rx_threshold_lo; 633859Sml29623 643859Sml29623 static nxge_status_t nxge_map_rxdma(p_nxge_t); 653859Sml29623 static void nxge_unmap_rxdma(p_nxge_t); 663859Sml29623 673859Sml29623 static nxge_status_t nxge_rxdma_hw_start_common(p_nxge_t); 683859Sml29623 static void nxge_rxdma_hw_stop_common(p_nxge_t); 693859Sml29623 703859Sml29623 static nxge_status_t nxge_rxdma_hw_start(p_nxge_t); 713859Sml29623 static void nxge_rxdma_hw_stop(p_nxge_t); 723859Sml29623 733859Sml29623 static nxge_status_t nxge_map_rxdma_channel(p_nxge_t, uint16_t, 743859Sml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 753859Sml29623 uint32_t, 763859Sml29623 p_nxge_dma_common_t *, p_rx_rcr_ring_t *, 773859Sml29623 p_rx_mbox_t *); 783859Sml29623 static void nxge_unmap_rxdma_channel(p_nxge_t, uint16_t, 793859Sml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 803859Sml29623 813859Sml29623 static nxge_status_t nxge_map_rxdma_channel_cfg_ring(p_nxge_t, 823859Sml29623 uint16_t, 833859Sml29623 p_nxge_dma_common_t *, p_rx_rbr_ring_t *, 843859Sml29623 p_rx_rcr_ring_t *, p_rx_mbox_t *); 853859Sml29623 static void nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t, 863859Sml29623 p_rx_rcr_ring_t, p_rx_mbox_t); 873859Sml29623 883859Sml29623 static nxge_status_t nxge_map_rxdma_channel_buf_ring(p_nxge_t, 893859Sml29623 uint16_t, 903859Sml29623 p_nxge_dma_common_t *, 913859Sml29623 p_rx_rbr_ring_t *, uint32_t); 923859Sml29623 static void nxge_unmap_rxdma_channel_buf_ring(p_nxge_t, 933859Sml29623 p_rx_rbr_ring_t); 943859Sml29623 953859Sml29623 static nxge_status_t nxge_rxdma_start_channel(p_nxge_t, uint16_t, 963859Sml29623 p_rx_rbr_ring_t, p_rx_rcr_ring_t, p_rx_mbox_t); 973859Sml29623 static nxge_status_t nxge_rxdma_stop_channel(p_nxge_t, uint16_t); 983859Sml29623 993859Sml29623 mblk_t * 1003859Sml29623 nxge_rx_pkts(p_nxge_t, uint_t, p_nxge_ldv_t, 1013859Sml29623 p_rx_rcr_ring_t *, rx_dma_ctl_stat_t); 1023859Sml29623 1033859Sml29623 static void nxge_receive_packet(p_nxge_t, 1043859Sml29623 p_rx_rcr_ring_t, 1053859Sml29623 p_rcr_entry_t, 1063859Sml29623 boolean_t *, 1073859Sml29623 mblk_t **, mblk_t **); 1083859Sml29623 1093859Sml29623 nxge_status_t nxge_disable_rxdma_channel(p_nxge_t, uint16_t); 1103859Sml29623 1113859Sml29623 static p_rx_msg_t nxge_allocb(size_t, uint32_t, p_nxge_dma_common_t); 1123859Sml29623 static void nxge_freeb(p_rx_msg_t); 1133859Sml29623 static void nxge_rx_pkts_vring(p_nxge_t, uint_t, 1143859Sml29623 p_nxge_ldv_t, rx_dma_ctl_stat_t); 1153859Sml29623 static nxge_status_t nxge_rx_err_evnts(p_nxge_t, uint_t, 1163859Sml29623 p_nxge_ldv_t, rx_dma_ctl_stat_t); 1173859Sml29623 1183859Sml29623 static nxge_status_t nxge_rxdma_handle_port_errors(p_nxge_t, 1193859Sml29623 uint32_t, uint32_t); 1203859Sml29623 1213859Sml29623 static nxge_status_t nxge_rxbuf_index_info_init(p_nxge_t, 1223859Sml29623 p_rx_rbr_ring_t); 1233859Sml29623 1243859Sml29623 1253859Sml29623 static nxge_status_t 1263859Sml29623 nxge_rxdma_fatal_err_recover(p_nxge_t, uint16_t); 1273859Sml29623 1283859Sml29623 nxge_status_t 1293859Sml29623 nxge_rx_port_fatal_err_recover(p_nxge_t); 1303859Sml29623 1313859Sml29623 static uint16_t 1323859Sml29623 nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb); 1333859Sml29623 1343859Sml29623 nxge_status_t 1353859Sml29623 nxge_init_rxdma_channels(p_nxge_t nxgep) 1363859Sml29623 { 1373859Sml29623 nxge_status_t status = NXGE_OK; 1383859Sml29623 1393859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_rxdma_channels")); 1403859Sml29623 1413859Sml29623 status = nxge_map_rxdma(nxgep); 1423859Sml29623 if (status != NXGE_OK) { 1433859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1443859Sml29623 "<== nxge_init_rxdma: status 0x%x", status)); 1453859Sml29623 return (status); 1463859Sml29623 } 1473859Sml29623 1483859Sml29623 status = nxge_rxdma_hw_start_common(nxgep); 1493859Sml29623 if (status != NXGE_OK) { 1503859Sml29623 nxge_unmap_rxdma(nxgep); 1513859Sml29623 } 1523859Sml29623 1533859Sml29623 status = nxge_rxdma_hw_start(nxgep); 1543859Sml29623 if (status != NXGE_OK) { 1553859Sml29623 nxge_unmap_rxdma(nxgep); 1563859Sml29623 } 1573859Sml29623 1583859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1593859Sml29623 "<== nxge_init_rxdma_channels: status 0x%x", status)); 1603859Sml29623 1613859Sml29623 return (status); 1623859Sml29623 } 1633859Sml29623 1643859Sml29623 void 1653859Sml29623 nxge_uninit_rxdma_channels(p_nxge_t nxgep) 1663859Sml29623 { 1673859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_rxdma_channels")); 1683859Sml29623 1693859Sml29623 nxge_rxdma_hw_stop(nxgep); 1703859Sml29623 nxge_rxdma_hw_stop_common(nxgep); 1713859Sml29623 nxge_unmap_rxdma(nxgep); 1723859Sml29623 1733859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 1743859Sml29623 "<== nxge_uinit_rxdma_channels")); 1753859Sml29623 } 1763859Sml29623 1773859Sml29623 nxge_status_t 1783859Sml29623 nxge_reset_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 1793859Sml29623 { 1803859Sml29623 npi_handle_t handle; 1813859Sml29623 npi_status_t rs = NPI_SUCCESS; 1823859Sml29623 nxge_status_t status = NXGE_OK; 1833859Sml29623 1843859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_reset_rxdma_channel")); 1853859Sml29623 1863859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1873859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 1883859Sml29623 1893859Sml29623 if (rs != NPI_SUCCESS) { 1903859Sml29623 status = NXGE_ERROR | rs; 1913859Sml29623 } 1923859Sml29623 1933859Sml29623 return (status); 1943859Sml29623 } 1953859Sml29623 1963859Sml29623 void 1973859Sml29623 nxge_rxdma_regs_dump_channels(p_nxge_t nxgep) 1983859Sml29623 { 1993859Sml29623 int i, ndmas; 2003859Sml29623 uint16_t channel; 2013859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 2023859Sml29623 p_rx_rbr_ring_t *rbr_rings; 2033859Sml29623 npi_handle_t handle; 2043859Sml29623 2053859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_regs_dump_channels")); 2063859Sml29623 2073859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2083859Sml29623 (void) npi_rxdma_dump_fzc_regs(handle); 2093859Sml29623 2103859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 2113859Sml29623 if (rx_rbr_rings == NULL) { 2123859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2133859Sml29623 "<== nxge_rxdma_regs_dump_channels: " 2143859Sml29623 "NULL ring pointer")); 2153859Sml29623 return; 2163859Sml29623 } 2173859Sml29623 if (rx_rbr_rings->rbr_rings == NULL) { 2183859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2193859Sml29623 "<== nxge_rxdma_regs_dump_channels: " 2203859Sml29623 " NULL rbr rings pointer")); 2213859Sml29623 return; 2223859Sml29623 } 2233859Sml29623 2243859Sml29623 ndmas = rx_rbr_rings->ndmas; 2253859Sml29623 if (!ndmas) { 2263859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2273859Sml29623 "<== nxge_rxdma_regs_dump_channels: no channel")); 2283859Sml29623 return; 2293859Sml29623 } 2303859Sml29623 2313859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 2323859Sml29623 "==> nxge_rxdma_regs_dump_channels (ndmas %d)", ndmas)); 2333859Sml29623 2343859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 2353859Sml29623 for (i = 0; i < ndmas; i++) { 2363859Sml29623 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 2373859Sml29623 continue; 2383859Sml29623 } 2393859Sml29623 channel = rbr_rings[i]->rdc; 2403859Sml29623 (void) nxge_dump_rxdma_channel(nxgep, channel); 2413859Sml29623 } 2423859Sml29623 2433859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_regs_dump")); 2443859Sml29623 2453859Sml29623 } 2463859Sml29623 2473859Sml29623 nxge_status_t 2483859Sml29623 nxge_dump_rxdma_channel(p_nxge_t nxgep, uint8_t channel) 2493859Sml29623 { 2503859Sml29623 npi_handle_t handle; 2513859Sml29623 npi_status_t rs = NPI_SUCCESS; 2523859Sml29623 nxge_status_t status = NXGE_OK; 2533859Sml29623 2543859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_dump_rxdma_channel")); 2553859Sml29623 2563859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2573859Sml29623 rs = npi_rxdma_dump_rdc_regs(handle, channel); 2583859Sml29623 2593859Sml29623 if (rs != NPI_SUCCESS) { 2603859Sml29623 status = NXGE_ERROR | rs; 2613859Sml29623 } 2623859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_dump_rxdma_channel")); 2633859Sml29623 return (status); 2643859Sml29623 } 2653859Sml29623 2663859Sml29623 nxge_status_t 2673859Sml29623 nxge_init_rxdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 2683859Sml29623 p_rx_dma_ent_msk_t mask_p) 2693859Sml29623 { 2703859Sml29623 npi_handle_t handle; 2713859Sml29623 npi_status_t rs = NPI_SUCCESS; 2723859Sml29623 nxge_status_t status = NXGE_OK; 2733859Sml29623 2743859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2753859Sml29623 "<== nxge_init_rxdma_channel_event_mask")); 2763859Sml29623 2773859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2783859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, mask_p); 2793859Sml29623 if (rs != NPI_SUCCESS) { 2803859Sml29623 status = NXGE_ERROR | rs; 2813859Sml29623 } 2823859Sml29623 2833859Sml29623 return (status); 2843859Sml29623 } 2853859Sml29623 2863859Sml29623 nxge_status_t 2873859Sml29623 nxge_init_rxdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 2883859Sml29623 p_rx_dma_ctl_stat_t cs_p) 2893859Sml29623 { 2903859Sml29623 npi_handle_t handle; 2913859Sml29623 npi_status_t rs = NPI_SUCCESS; 2923859Sml29623 nxge_status_t status = NXGE_OK; 2933859Sml29623 2943859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 2953859Sml29623 "<== nxge_init_rxdma_channel_cntl_stat")); 2963859Sml29623 2973859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2983859Sml29623 rs = npi_rxdma_control_status(handle, OP_SET, channel, cs_p); 2993859Sml29623 3003859Sml29623 if (rs != NPI_SUCCESS) { 3013859Sml29623 status = NXGE_ERROR | rs; 3023859Sml29623 } 3033859Sml29623 3043859Sml29623 return (status); 3053859Sml29623 } 3063859Sml29623 3073859Sml29623 nxge_status_t 3083859Sml29623 nxge_rxdma_cfg_rdcgrp_default_rdc(p_nxge_t nxgep, uint8_t rdcgrp, 3093859Sml29623 uint8_t rdc) 3103859Sml29623 { 3113859Sml29623 npi_handle_t handle; 3123859Sml29623 npi_status_t rs = NPI_SUCCESS; 3133859Sml29623 p_nxge_dma_pt_cfg_t p_dma_cfgp; 3143859Sml29623 p_nxge_rdc_grp_t rdc_grp_p; 3153859Sml29623 uint8_t actual_rdcgrp, actual_rdc; 3163859Sml29623 3173859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 3183859Sml29623 " ==> nxge_rxdma_cfg_rdcgrp_default_rdc")); 3193859Sml29623 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config; 3203859Sml29623 3213859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3223859Sml29623 3233859Sml29623 rdc_grp_p = &p_dma_cfgp->rdc_grps[rdcgrp]; 3243859Sml29623 rdc_grp_p->rdc[0] = rdc; 3253859Sml29623 3263859Sml29623 actual_rdcgrp = NXGE_ACTUAL_RDCGRP(nxgep, rdcgrp); 3273859Sml29623 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 3283859Sml29623 3293859Sml29623 rs = npi_rxdma_cfg_rdc_table_default_rdc(handle, actual_rdcgrp, 3303859Sml29623 actual_rdc); 3313859Sml29623 3323859Sml29623 if (rs != NPI_SUCCESS) { 3333859Sml29623 return (NXGE_ERROR | rs); 3343859Sml29623 } 3353859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 3363859Sml29623 " <== nxge_rxdma_cfg_rdcgrp_default_rdc")); 3373859Sml29623 return (NXGE_OK); 3383859Sml29623 } 3393859Sml29623 3403859Sml29623 nxge_status_t 3413859Sml29623 nxge_rxdma_cfg_port_default_rdc(p_nxge_t nxgep, uint8_t port, uint8_t rdc) 3423859Sml29623 { 3433859Sml29623 npi_handle_t handle; 3443859Sml29623 3453859Sml29623 uint8_t actual_rdc; 3463859Sml29623 npi_status_t rs = NPI_SUCCESS; 3473859Sml29623 3483859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 3493859Sml29623 " ==> nxge_rxdma_cfg_port_default_rdc")); 3503859Sml29623 3513859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3523859Sml29623 actual_rdc = NXGE_ACTUAL_RDC(nxgep, rdc); 3533859Sml29623 rs = npi_rxdma_cfg_default_port_rdc(handle, port, actual_rdc); 3543859Sml29623 3553859Sml29623 3563859Sml29623 if (rs != NPI_SUCCESS) { 3573859Sml29623 return (NXGE_ERROR | rs); 3583859Sml29623 } 3593859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 3603859Sml29623 " <== nxge_rxdma_cfg_port_default_rdc")); 3613859Sml29623 3623859Sml29623 return (NXGE_OK); 3633859Sml29623 } 3643859Sml29623 3653859Sml29623 nxge_status_t 3663859Sml29623 nxge_rxdma_cfg_rcr_threshold(p_nxge_t nxgep, uint8_t channel, 3673859Sml29623 uint16_t pkts) 3683859Sml29623 { 3693859Sml29623 npi_status_t rs = NPI_SUCCESS; 3703859Sml29623 npi_handle_t handle; 3713859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 3723859Sml29623 " ==> nxge_rxdma_cfg_rcr_threshold")); 3733859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3743859Sml29623 3753859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, pkts); 3763859Sml29623 3773859Sml29623 if (rs != NPI_SUCCESS) { 3783859Sml29623 return (NXGE_ERROR | rs); 3793859Sml29623 } 3803859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_threshold")); 3813859Sml29623 return (NXGE_OK); 3823859Sml29623 } 3833859Sml29623 3843859Sml29623 nxge_status_t 3853859Sml29623 nxge_rxdma_cfg_rcr_timeout(p_nxge_t nxgep, uint8_t channel, 3863859Sml29623 uint16_t tout, uint8_t enable) 3873859Sml29623 { 3883859Sml29623 npi_status_t rs = NPI_SUCCESS; 3893859Sml29623 npi_handle_t handle; 3903859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " ==> nxge_rxdma_cfg_rcr_timeout")); 3913859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3923859Sml29623 if (enable == 0) { 3933859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout_disable(handle, channel); 3943859Sml29623 } else { 3953859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 3963859Sml29623 tout); 3973859Sml29623 } 3983859Sml29623 3993859Sml29623 if (rs != NPI_SUCCESS) { 4003859Sml29623 return (NXGE_ERROR | rs); 4013859Sml29623 } 4023859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, " <== nxge_rxdma_cfg_rcr_timeout")); 4033859Sml29623 return (NXGE_OK); 4043859Sml29623 } 4053859Sml29623 4063859Sml29623 nxge_status_t 4073859Sml29623 nxge_enable_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 4083859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 4093859Sml29623 { 4103859Sml29623 npi_handle_t handle; 4113859Sml29623 rdc_desc_cfg_t rdc_desc; 4123859Sml29623 p_rcrcfig_b_t cfgb_p; 4133859Sml29623 npi_status_t rs = NPI_SUCCESS; 4143859Sml29623 4153859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel")); 4163859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 4173859Sml29623 /* 4183859Sml29623 * Use configuration data composed at init time. 4193859Sml29623 * Write to hardware the receive ring configurations. 4203859Sml29623 */ 4213859Sml29623 rdc_desc.mbox_enable = 1; 4223859Sml29623 rdc_desc.mbox_addr = mbox_p->mbox_addr; 4233859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 4243859Sml29623 "==> nxge_enable_rxdma_channel: mboxp $%p($%p)", 4253859Sml29623 mbox_p->mbox_addr, rdc_desc.mbox_addr)); 4263859Sml29623 4273859Sml29623 rdc_desc.rbr_len = rbr_p->rbb_max; 4283859Sml29623 rdc_desc.rbr_addr = rbr_p->rbr_addr; 4293859Sml29623 4303859Sml29623 switch (nxgep->rx_bksize_code) { 4313859Sml29623 case RBR_BKSIZE_4K: 4323859Sml29623 rdc_desc.page_size = SIZE_4KB; 4333859Sml29623 break; 4343859Sml29623 case RBR_BKSIZE_8K: 4353859Sml29623 rdc_desc.page_size = SIZE_8KB; 4363859Sml29623 break; 4373859Sml29623 case RBR_BKSIZE_16K: 4383859Sml29623 rdc_desc.page_size = SIZE_16KB; 4393859Sml29623 break; 4403859Sml29623 case RBR_BKSIZE_32K: 4413859Sml29623 rdc_desc.page_size = SIZE_32KB; 4423859Sml29623 break; 4433859Sml29623 } 4443859Sml29623 4453859Sml29623 rdc_desc.size0 = rbr_p->npi_pkt_buf_size0; 4463859Sml29623 rdc_desc.valid0 = 1; 4473859Sml29623 4483859Sml29623 rdc_desc.size1 = rbr_p->npi_pkt_buf_size1; 4493859Sml29623 rdc_desc.valid1 = 1; 4503859Sml29623 4513859Sml29623 rdc_desc.size2 = rbr_p->npi_pkt_buf_size2; 4523859Sml29623 rdc_desc.valid2 = 1; 4533859Sml29623 4543859Sml29623 rdc_desc.full_hdr = rcr_p->full_hdr_flag; 4553859Sml29623 rdc_desc.offset = rcr_p->sw_priv_hdr_len; 4563859Sml29623 4573859Sml29623 rdc_desc.rcr_len = rcr_p->comp_size; 4583859Sml29623 rdc_desc.rcr_addr = rcr_p->rcr_addr; 4593859Sml29623 4603859Sml29623 cfgb_p = &(rcr_p->rcr_cfgb); 4613859Sml29623 rdc_desc.rcr_threshold = cfgb_p->bits.ldw.pthres; 4623859Sml29623 rdc_desc.rcr_timeout = cfgb_p->bits.ldw.timeout; 4633859Sml29623 rdc_desc.rcr_timeout_enable = cfgb_p->bits.ldw.entout; 4643859Sml29623 4653859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 4663859Sml29623 "rbr_len qlen %d pagesize code %d rcr_len %d", 4673859Sml29623 rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len)); 4683859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_enable_rxdma_channel: " 4693859Sml29623 "size 0 %d size 1 %d size 2 %d", 4703859Sml29623 rbr_p->npi_pkt_buf_size0, rbr_p->npi_pkt_buf_size1, 4713859Sml29623 rbr_p->npi_pkt_buf_size2)); 4723859Sml29623 4733859Sml29623 rs = npi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc); 4743859Sml29623 if (rs != NPI_SUCCESS) { 4753859Sml29623 return (NXGE_ERROR | rs); 4763859Sml29623 } 4773859Sml29623 4783859Sml29623 /* 4793859Sml29623 * Enable the timeout and threshold. 4803859Sml29623 */ 4813859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_threshold(handle, channel, 4823859Sml29623 rdc_desc.rcr_threshold); 4833859Sml29623 if (rs != NPI_SUCCESS) { 4843859Sml29623 return (NXGE_ERROR | rs); 4853859Sml29623 } 4863859Sml29623 4873859Sml29623 rs = npi_rxdma_cfg_rdc_rcr_timeout(handle, channel, 4883859Sml29623 rdc_desc.rcr_timeout); 4893859Sml29623 if (rs != NPI_SUCCESS) { 4903859Sml29623 return (NXGE_ERROR | rs); 4913859Sml29623 } 4923859Sml29623 4933859Sml29623 /* Enable the DMA */ 4943859Sml29623 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 4953859Sml29623 if (rs != NPI_SUCCESS) { 4963859Sml29623 return (NXGE_ERROR | rs); 4973859Sml29623 } 4983859Sml29623 4993859Sml29623 /* Kick the DMA engine. */ 5003859Sml29623 npi_rxdma_rdc_rbr_kick(handle, channel, rbr_p->rbb_max); 5013859Sml29623 /* Clear the rbr empty bit */ 5023859Sml29623 (void) npi_rxdma_channel_rbr_empty_clear(handle, channel); 5033859Sml29623 5043859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_enable_rxdma_channel")); 5053859Sml29623 5063859Sml29623 return (NXGE_OK); 5073859Sml29623 } 5083859Sml29623 5093859Sml29623 nxge_status_t 5103859Sml29623 nxge_disable_rxdma_channel(p_nxge_t nxgep, uint16_t channel) 5113859Sml29623 { 5123859Sml29623 npi_handle_t handle; 5133859Sml29623 npi_status_t rs = NPI_SUCCESS; 5143859Sml29623 5153859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_disable_rxdma_channel")); 5163859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5173859Sml29623 5183859Sml29623 /* disable the DMA */ 5193859Sml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 5203859Sml29623 if (rs != NPI_SUCCESS) { 5213859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 5223859Sml29623 "<== nxge_disable_rxdma_channel:failed (0x%x)", 5233859Sml29623 rs)); 5243859Sml29623 return (NXGE_ERROR | rs); 5253859Sml29623 } 5263859Sml29623 5273859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_disable_rxdma_channel")); 5283859Sml29623 return (NXGE_OK); 5293859Sml29623 } 5303859Sml29623 5313859Sml29623 nxge_status_t 5323859Sml29623 nxge_rxdma_channel_rcrflush(p_nxge_t nxgep, uint8_t channel) 5333859Sml29623 { 5343859Sml29623 npi_handle_t handle; 5353859Sml29623 nxge_status_t status = NXGE_OK; 5363859Sml29623 5373859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 5383859Sml29623 "<== nxge_init_rxdma_channel_rcrflush")); 5393859Sml29623 5403859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 5413859Sml29623 npi_rxdma_rdc_rcr_flush(handle, channel); 5423859Sml29623 5433859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 5443859Sml29623 "<== nxge_init_rxdma_channel_rcrflsh")); 5453859Sml29623 return (status); 5463859Sml29623 5473859Sml29623 } 5483859Sml29623 5493859Sml29623 #define MID_INDEX(l, r) ((r + l + 1) >> 1) 5503859Sml29623 5513859Sml29623 #define TO_LEFT -1 5523859Sml29623 #define TO_RIGHT 1 5533859Sml29623 #define BOTH_RIGHT (TO_RIGHT + TO_RIGHT) 5543859Sml29623 #define BOTH_LEFT (TO_LEFT + TO_LEFT) 5553859Sml29623 #define IN_MIDDLE (TO_RIGHT + TO_LEFT) 5563859Sml29623 #define NO_HINT 0xffffffff 5573859Sml29623 5583859Sml29623 /*ARGSUSED*/ 5593859Sml29623 nxge_status_t 5603859Sml29623 nxge_rxbuf_pp_to_vp(p_nxge_t nxgep, p_rx_rbr_ring_t rbr_p, 5613859Sml29623 uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp, 5623859Sml29623 uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index) 5633859Sml29623 { 5643859Sml29623 int bufsize; 5653859Sml29623 uint64_t pktbuf_pp; 5663859Sml29623 uint64_t dvma_addr; 5673859Sml29623 rxring_info_t *ring_info; 5683859Sml29623 int base_side, end_side; 5693859Sml29623 int r_index, l_index, anchor_index; 5703859Sml29623 int found, search_done; 5713859Sml29623 uint32_t offset, chunk_size, block_size, page_size_mask; 5723859Sml29623 uint32_t chunk_index, block_index, total_index; 5733859Sml29623 int max_iterations, iteration; 5743859Sml29623 rxbuf_index_info_t *bufinfo; 5753859Sml29623 5763859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_rxbuf_pp_to_vp")); 5773859Sml29623 5783859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 5793859Sml29623 "==> nxge_rxbuf_pp_to_vp: buf_pp $%p btype %d", 5803859Sml29623 pkt_buf_addr_pp, 5813859Sml29623 pktbufsz_type)); 5825125Sjoycey #if defined(__i386) 5835125Sjoycey pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp; 5845125Sjoycey #else 5853859Sml29623 pktbuf_pp = (uint64_t)pkt_buf_addr_pp; 5865125Sjoycey #endif 5873859Sml29623 5883859Sml29623 switch (pktbufsz_type) { 5893859Sml29623 case 0: 5903859Sml29623 bufsize = rbr_p->pkt_buf_size0; 5913859Sml29623 break; 5923859Sml29623 case 1: 5933859Sml29623 bufsize = rbr_p->pkt_buf_size1; 5943859Sml29623 break; 5953859Sml29623 case 2: 5963859Sml29623 bufsize = rbr_p->pkt_buf_size2; 5973859Sml29623 break; 5983859Sml29623 case RCR_SINGLE_BLOCK: 5993859Sml29623 bufsize = 0; 6003859Sml29623 anchor_index = 0; 6013859Sml29623 break; 6023859Sml29623 default: 6033859Sml29623 return (NXGE_ERROR); 6043859Sml29623 } 6053859Sml29623 6063859Sml29623 if (rbr_p->num_blocks == 1) { 6073859Sml29623 anchor_index = 0; 6083859Sml29623 ring_info = rbr_p->ring_info; 6093859Sml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 6103859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 6113859Sml29623 "==> nxge_rxbuf_pp_to_vp: (found, 1 block) " 6123859Sml29623 "buf_pp $%p btype %d anchor_index %d " 6133859Sml29623 "bufinfo $%p", 6143859Sml29623 pkt_buf_addr_pp, 6153859Sml29623 pktbufsz_type, 6163859Sml29623 anchor_index, 6173859Sml29623 bufinfo)); 6183859Sml29623 6193859Sml29623 goto found_index; 6203859Sml29623 } 6213859Sml29623 6223859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 6233859Sml29623 "==> nxge_rxbuf_pp_to_vp: " 6243859Sml29623 "buf_pp $%p btype %d anchor_index %d", 6253859Sml29623 pkt_buf_addr_pp, 6263859Sml29623 pktbufsz_type, 6273859Sml29623 anchor_index)); 6283859Sml29623 6293859Sml29623 ring_info = rbr_p->ring_info; 6303859Sml29623 found = B_FALSE; 6313859Sml29623 bufinfo = (rxbuf_index_info_t *)ring_info->buffer; 6323859Sml29623 iteration = 0; 6333859Sml29623 max_iterations = ring_info->max_iterations; 6343859Sml29623 /* 6353859Sml29623 * First check if this block has been seen 6363859Sml29623 * recently. This is indicated by a hint which 6373859Sml29623 * is initialized when the first buffer of the block 6383859Sml29623 * is seen. The hint is reset when the last buffer of 6393859Sml29623 * the block has been processed. 6403859Sml29623 * As three block sizes are supported, three hints 6413859Sml29623 * are kept. The idea behind the hints is that once 6423859Sml29623 * the hardware uses a block for a buffer of that 6433859Sml29623 * size, it will use it exclusively for that size 6443859Sml29623 * and will use it until it is exhausted. It is assumed 6453859Sml29623 * that there would a single block being used for the same 6463859Sml29623 * buffer sizes at any given time. 6473859Sml29623 */ 6483859Sml29623 if (ring_info->hint[pktbufsz_type] != NO_HINT) { 6493859Sml29623 anchor_index = ring_info->hint[pktbufsz_type]; 6503859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 6513859Sml29623 chunk_size = bufinfo[anchor_index].buf_size; 6523859Sml29623 if ((pktbuf_pp >= dvma_addr) && 6533859Sml29623 (pktbuf_pp < (dvma_addr + chunk_size))) { 6543859Sml29623 found = B_TRUE; 6553859Sml29623 /* 6563859Sml29623 * check if this is the last buffer in the block 6573859Sml29623 * If so, then reset the hint for the size; 6583859Sml29623 */ 6593859Sml29623 6603859Sml29623 if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size)) 6613859Sml29623 ring_info->hint[pktbufsz_type] = NO_HINT; 6623859Sml29623 } 6633859Sml29623 } 6643859Sml29623 6653859Sml29623 if (found == B_FALSE) { 6663859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 6673859Sml29623 "==> nxge_rxbuf_pp_to_vp: (!found)" 6683859Sml29623 "buf_pp $%p btype %d anchor_index %d", 6693859Sml29623 pkt_buf_addr_pp, 6703859Sml29623 pktbufsz_type, 6713859Sml29623 anchor_index)); 6723859Sml29623 6733859Sml29623 /* 6743859Sml29623 * This is the first buffer of the block of this 6753859Sml29623 * size. Need to search the whole information 6763859Sml29623 * array. 6773859Sml29623 * the search algorithm uses a binary tree search 6783859Sml29623 * algorithm. It assumes that the information is 6793859Sml29623 * already sorted with increasing order 6803859Sml29623 * info[0] < info[1] < info[2] .... < info[n-1] 6813859Sml29623 * where n is the size of the information array 6823859Sml29623 */ 6833859Sml29623 r_index = rbr_p->num_blocks - 1; 6843859Sml29623 l_index = 0; 6853859Sml29623 search_done = B_FALSE; 6863859Sml29623 anchor_index = MID_INDEX(r_index, l_index); 6873859Sml29623 while (search_done == B_FALSE) { 6883859Sml29623 if ((r_index == l_index) || 6893859Sml29623 (iteration >= max_iterations)) 6903859Sml29623 search_done = B_TRUE; 6913859Sml29623 end_side = TO_RIGHT; /* to the right */ 6923859Sml29623 base_side = TO_LEFT; /* to the left */ 6933859Sml29623 /* read the DVMA address information and sort it */ 6943859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 6953859Sml29623 chunk_size = bufinfo[anchor_index].buf_size; 6963859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 6973859Sml29623 "==> nxge_rxbuf_pp_to_vp: (searching)" 6983859Sml29623 "buf_pp $%p btype %d " 6993859Sml29623 "anchor_index %d chunk_size %d dvmaaddr $%p", 7003859Sml29623 pkt_buf_addr_pp, 7013859Sml29623 pktbufsz_type, 7023859Sml29623 anchor_index, 7033859Sml29623 chunk_size, 7043859Sml29623 dvma_addr)); 7053859Sml29623 7063859Sml29623 if (pktbuf_pp >= dvma_addr) 7073859Sml29623 base_side = TO_RIGHT; /* to the right */ 7083859Sml29623 if (pktbuf_pp < (dvma_addr + chunk_size)) 7093859Sml29623 end_side = TO_LEFT; /* to the left */ 7103859Sml29623 7113859Sml29623 switch (base_side + end_side) { 7123859Sml29623 case IN_MIDDLE: 7133859Sml29623 /* found */ 7143859Sml29623 found = B_TRUE; 7153859Sml29623 search_done = B_TRUE; 7163859Sml29623 if ((pktbuf_pp + bufsize) < 7173859Sml29623 (dvma_addr + chunk_size)) 7183859Sml29623 ring_info->hint[pktbufsz_type] = 7193859Sml29623 bufinfo[anchor_index].buf_index; 7203859Sml29623 break; 7213859Sml29623 case BOTH_RIGHT: 7223859Sml29623 /* not found: go to the right */ 7233859Sml29623 l_index = anchor_index + 1; 7243859Sml29623 anchor_index = 7253859Sml29623 MID_INDEX(r_index, l_index); 7263859Sml29623 break; 7273859Sml29623 7283859Sml29623 case BOTH_LEFT: 7293859Sml29623 /* not found: go to the left */ 7303859Sml29623 r_index = anchor_index - 1; 7313859Sml29623 anchor_index = MID_INDEX(r_index, 7323859Sml29623 l_index); 7333859Sml29623 break; 7343859Sml29623 default: /* should not come here */ 7353859Sml29623 return (NXGE_ERROR); 7363859Sml29623 } 7373859Sml29623 iteration++; 7383859Sml29623 } 7393859Sml29623 7403859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7413859Sml29623 "==> nxge_rxbuf_pp_to_vp: (search done)" 7423859Sml29623 "buf_pp $%p btype %d anchor_index %d", 7433859Sml29623 pkt_buf_addr_pp, 7443859Sml29623 pktbufsz_type, 7453859Sml29623 anchor_index)); 7463859Sml29623 } 7473859Sml29623 7483859Sml29623 if (found == B_FALSE) { 7493859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7503859Sml29623 "==> nxge_rxbuf_pp_to_vp: (search failed)" 7513859Sml29623 "buf_pp $%p btype %d anchor_index %d", 7523859Sml29623 pkt_buf_addr_pp, 7533859Sml29623 pktbufsz_type, 7543859Sml29623 anchor_index)); 7553859Sml29623 return (NXGE_ERROR); 7563859Sml29623 } 7573859Sml29623 7583859Sml29623 found_index: 7593859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7603859Sml29623 "==> nxge_rxbuf_pp_to_vp: (FOUND1)" 7613859Sml29623 "buf_pp $%p btype %d bufsize %d anchor_index %d", 7623859Sml29623 pkt_buf_addr_pp, 7633859Sml29623 pktbufsz_type, 7643859Sml29623 bufsize, 7653859Sml29623 anchor_index)); 7663859Sml29623 7673859Sml29623 /* index of the first block in this chunk */ 7683859Sml29623 chunk_index = bufinfo[anchor_index].start_index; 7693859Sml29623 dvma_addr = bufinfo[anchor_index].dvma_addr; 7703859Sml29623 page_size_mask = ring_info->block_size_mask; 7713859Sml29623 7723859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7733859Sml29623 "==> nxge_rxbuf_pp_to_vp: (FOUND3), get chunk)" 7743859Sml29623 "buf_pp $%p btype %d bufsize %d " 7753859Sml29623 "anchor_index %d chunk_index %d dvma $%p", 7763859Sml29623 pkt_buf_addr_pp, 7773859Sml29623 pktbufsz_type, 7783859Sml29623 bufsize, 7793859Sml29623 anchor_index, 7803859Sml29623 chunk_index, 7813859Sml29623 dvma_addr)); 7823859Sml29623 7833859Sml29623 offset = pktbuf_pp - dvma_addr; /* offset within the chunk */ 7843859Sml29623 block_size = rbr_p->block_size; /* System block(page) size */ 7853859Sml29623 7863859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 7873859Sml29623 "==> nxge_rxbuf_pp_to_vp: (FOUND4), get chunk)" 7883859Sml29623 "buf_pp $%p btype %d bufsize %d " 7893859Sml29623 "anchor_index %d chunk_index %d dvma $%p " 7903859Sml29623 "offset %d block_size %d", 7913859Sml29623 pkt_buf_addr_pp, 7923859Sml29623 pktbufsz_type, 7933859Sml29623 bufsize, 7943859Sml29623 anchor_index, 7953859Sml29623 chunk_index, 7963859Sml29623 dvma_addr, 7973859Sml29623 offset, 7983859Sml29623 block_size)); 7993859Sml29623 8003859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> getting total index")); 8013859Sml29623 8023859Sml29623 block_index = (offset / block_size); /* index within chunk */ 8033859Sml29623 total_index = chunk_index + block_index; 8043859Sml29623 8053859Sml29623 8063859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8073859Sml29623 "==> nxge_rxbuf_pp_to_vp: " 8083859Sml29623 "total_index %d dvma_addr $%p " 8093859Sml29623 "offset %d block_size %d " 8103859Sml29623 "block_index %d ", 8113859Sml29623 total_index, dvma_addr, 8123859Sml29623 offset, block_size, 8133859Sml29623 block_index)); 8145125Sjoycey #if defined(__i386) 8155125Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr + 8165125Sjoycey (uint32_t)offset); 8175125Sjoycey #else 8185125Sjoycey *pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr + 8195125Sjoycey (uint64_t)offset); 8205125Sjoycey #endif 8213859Sml29623 8223859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8233859Sml29623 "==> nxge_rxbuf_pp_to_vp: " 8243859Sml29623 "total_index %d dvma_addr $%p " 8253859Sml29623 "offset %d block_size %d " 8263859Sml29623 "block_index %d " 8273859Sml29623 "*pkt_buf_addr_p $%p", 8283859Sml29623 total_index, dvma_addr, 8293859Sml29623 offset, block_size, 8303859Sml29623 block_index, 8313859Sml29623 *pkt_buf_addr_p)); 8323859Sml29623 8333859Sml29623 8343859Sml29623 *msg_index = total_index; 8353859Sml29623 *bufoffset = (offset & page_size_mask); 8363859Sml29623 8373859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 8383859Sml29623 "==> nxge_rxbuf_pp_to_vp: get msg index: " 8393859Sml29623 "msg_index %d bufoffset_index %d", 8403859Sml29623 *msg_index, 8413859Sml29623 *bufoffset)); 8423859Sml29623 8433859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rxbuf_pp_to_vp")); 8443859Sml29623 8453859Sml29623 return (NXGE_OK); 8463859Sml29623 } 8473859Sml29623 8483859Sml29623 /* 8493859Sml29623 * used by quick sort (qsort) function 8503859Sml29623 * to perform comparison 8513859Sml29623 */ 8523859Sml29623 static int 8533859Sml29623 nxge_sort_compare(const void *p1, const void *p2) 8543859Sml29623 { 8553859Sml29623 8563859Sml29623 rxbuf_index_info_t *a, *b; 8573859Sml29623 8583859Sml29623 a = (rxbuf_index_info_t *)p1; 8593859Sml29623 b = (rxbuf_index_info_t *)p2; 8603859Sml29623 8613859Sml29623 if (a->dvma_addr > b->dvma_addr) 8623859Sml29623 return (1); 8633859Sml29623 if (a->dvma_addr < b->dvma_addr) 8643859Sml29623 return (-1); 8653859Sml29623 return (0); 8663859Sml29623 } 8673859Sml29623 8683859Sml29623 8693859Sml29623 8703859Sml29623 /* 8713859Sml29623 * grabbed this sort implementation from common/syscall/avl.c 8723859Sml29623 * 8733859Sml29623 */ 8743859Sml29623 /* 8753859Sml29623 * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified. 8763859Sml29623 * v = Ptr to array/vector of objs 8773859Sml29623 * n = # objs in the array 8783859Sml29623 * s = size of each obj (must be multiples of a word size) 8793859Sml29623 * f = ptr to function to compare two objs 8803859Sml29623 * returns (-1 = less than, 0 = equal, 1 = greater than 8813859Sml29623 */ 8823859Sml29623 void 8833859Sml29623 nxge_ksort(caddr_t v, int n, int s, int (*f)()) 8843859Sml29623 { 8853859Sml29623 int g, i, j, ii; 8863859Sml29623 unsigned int *p1, *p2; 8873859Sml29623 unsigned int tmp; 8883859Sml29623 8893859Sml29623 /* No work to do */ 8903859Sml29623 if (v == NULL || n <= 1) 8913859Sml29623 return; 8923859Sml29623 /* Sanity check on arguments */ 8933859Sml29623 ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); 8943859Sml29623 ASSERT(s > 0); 8953859Sml29623 8963859Sml29623 for (g = n / 2; g > 0; g /= 2) { 8973859Sml29623 for (i = g; i < n; i++) { 8983859Sml29623 for (j = i - g; j >= 0 && 8993859Sml29623 (*f)(v + j * s, v + (j + g) * s) == 1; 9003859Sml29623 j -= g) { 9013859Sml29623 p1 = (unsigned *)(v + j * s); 9023859Sml29623 p2 = (unsigned *)(v + (j + g) * s); 9033859Sml29623 for (ii = 0; ii < s / 4; ii++) { 9043859Sml29623 tmp = *p1; 9053859Sml29623 *p1++ = *p2; 9063859Sml29623 *p2++ = tmp; 9073859Sml29623 } 9083859Sml29623 } 9093859Sml29623 } 9103859Sml29623 } 9113859Sml29623 } 9123859Sml29623 9133859Sml29623 /* 9143859Sml29623 * Initialize data structures required for rxdma 9153859Sml29623 * buffer dvma->vmem address lookup 9163859Sml29623 */ 9173859Sml29623 /*ARGSUSED*/ 9183859Sml29623 static nxge_status_t 9193859Sml29623 nxge_rxbuf_index_info_init(p_nxge_t nxgep, p_rx_rbr_ring_t rbrp) 9203859Sml29623 { 9213859Sml29623 9223859Sml29623 int index; 9233859Sml29623 rxring_info_t *ring_info; 9243859Sml29623 int max_iteration = 0, max_index = 0; 9253859Sml29623 9263859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "==> nxge_rxbuf_index_info_init")); 9273859Sml29623 9283859Sml29623 ring_info = rbrp->ring_info; 9293859Sml29623 ring_info->hint[0] = NO_HINT; 9303859Sml29623 ring_info->hint[1] = NO_HINT; 9313859Sml29623 ring_info->hint[2] = NO_HINT; 9323859Sml29623 max_index = rbrp->num_blocks; 9333859Sml29623 9343859Sml29623 /* read the DVMA address information and sort it */ 9353859Sml29623 /* do init of the information array */ 9363859Sml29623 9373859Sml29623 9383859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 9393859Sml29623 " nxge_rxbuf_index_info_init Sort ptrs")); 9403859Sml29623 9413859Sml29623 /* sort the array */ 9423859Sml29623 nxge_ksort((void *)ring_info->buffer, max_index, 9433859Sml29623 sizeof (rxbuf_index_info_t), nxge_sort_compare); 9443859Sml29623 9453859Sml29623 9463859Sml29623 9473859Sml29623 for (index = 0; index < max_index; index++) { 9483859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 9493859Sml29623 " nxge_rxbuf_index_info_init: sorted chunk %d " 9503859Sml29623 " ioaddr $%p kaddr $%p size %x", 9513859Sml29623 index, ring_info->buffer[index].dvma_addr, 9523859Sml29623 ring_info->buffer[index].kaddr, 9533859Sml29623 ring_info->buffer[index].buf_size)); 9543859Sml29623 } 9553859Sml29623 9563859Sml29623 max_iteration = 0; 9573859Sml29623 while (max_index >= (1ULL << max_iteration)) 9583859Sml29623 max_iteration++; 9593859Sml29623 ring_info->max_iterations = max_iteration + 1; 9603859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA2_CTL, 9613859Sml29623 " nxge_rxbuf_index_info_init Find max iter %d", 9623859Sml29623 ring_info->max_iterations)); 9633859Sml29623 9643859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxbuf_index_info_init")); 9653859Sml29623 return (NXGE_OK); 9663859Sml29623 } 9673859Sml29623 9683859Sml29623 /* ARGSUSED */ 9693859Sml29623 void 9703859Sml29623 nxge_dump_rcr_entry(p_nxge_t nxgep, p_rcr_entry_t entry_p) 9713859Sml29623 { 9723859Sml29623 #ifdef NXGE_DEBUG 9733859Sml29623 9743859Sml29623 uint32_t bptr; 9753859Sml29623 uint64_t pp; 9763859Sml29623 9773859Sml29623 bptr = entry_p->bits.hdw.pkt_buf_addr; 9783859Sml29623 9793859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 9803859Sml29623 "\trcr entry $%p " 9813859Sml29623 "\trcr entry 0x%0llx " 9823859Sml29623 "\trcr entry 0x%08x " 9833859Sml29623 "\trcr entry 0x%08x " 9843859Sml29623 "\tvalue 0x%0llx\n" 9853859Sml29623 "\tmulti = %d\n" 9863859Sml29623 "\tpkt_type = 0x%x\n" 9873859Sml29623 "\tzero_copy = %d\n" 9883859Sml29623 "\tnoport = %d\n" 9893859Sml29623 "\tpromis = %d\n" 9903859Sml29623 "\terror = 0x%04x\n" 9913859Sml29623 "\tdcf_err = 0x%01x\n" 9923859Sml29623 "\tl2_len = %d\n" 9933859Sml29623 "\tpktbufsize = %d\n" 9943859Sml29623 "\tpkt_buf_addr = $%p\n" 9953859Sml29623 "\tpkt_buf_addr (<< 6) = $%p\n", 9963859Sml29623 entry_p, 9973859Sml29623 *(int64_t *)entry_p, 9983859Sml29623 *(int32_t *)entry_p, 9993859Sml29623 *(int32_t *)((char *)entry_p + 32), 10003859Sml29623 entry_p->value, 10013859Sml29623 entry_p->bits.hdw.multi, 10023859Sml29623 entry_p->bits.hdw.pkt_type, 10033859Sml29623 entry_p->bits.hdw.zero_copy, 10043859Sml29623 entry_p->bits.hdw.noport, 10053859Sml29623 entry_p->bits.hdw.promis, 10063859Sml29623 entry_p->bits.hdw.error, 10073859Sml29623 entry_p->bits.hdw.dcf_err, 10083859Sml29623 entry_p->bits.hdw.l2_len, 10093859Sml29623 entry_p->bits.hdw.pktbufsz, 10103859Sml29623 bptr, 10113859Sml29623 entry_p->bits.ldw.pkt_buf_addr)); 10123859Sml29623 10133859Sml29623 pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) << 10143859Sml29623 RCR_PKT_BUF_ADDR_SHIFT; 10153859Sml29623 10163859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "rcr pp 0x%llx l2 len %d", 10173859Sml29623 pp, (*(int64_t *)entry_p >> 40) & 0x3fff)); 10183859Sml29623 #endif 10193859Sml29623 } 10203859Sml29623 10213859Sml29623 void 10223859Sml29623 nxge_rxdma_regs_dump(p_nxge_t nxgep, int rdc) 10233859Sml29623 { 10243859Sml29623 npi_handle_t handle; 10253859Sml29623 rbr_stat_t rbr_stat; 10263859Sml29623 addr44_t hd_addr; 10273859Sml29623 addr44_t tail_addr; 10283859Sml29623 uint16_t qlen; 10293859Sml29623 10303859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 10313859Sml29623 "==> nxge_rxdma_regs_dump: rdc channel %d", rdc)); 10323859Sml29623 10333859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 10343859Sml29623 10353859Sml29623 /* RBR head */ 10363859Sml29623 hd_addr.addr = 0; 10373859Sml29623 (void) npi_rxdma_rdc_rbr_head_get(handle, rdc, &hd_addr); 1038*5165Syc148097 #if defined(__i386) 10393859Sml29623 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 10405125Sjoycey (void *)(uint32_t)hd_addr.addr); 10415125Sjoycey #else 1042*5165Syc148097 printf("nxge_rxdma_regs_dump: got hdptr $%p \n", 10433859Sml29623 (void *)hd_addr.addr); 10445125Sjoycey #endif 10453859Sml29623 10463859Sml29623 /* RBR stats */ 10473859Sml29623 (void) npi_rxdma_rdc_rbr_stat_get(handle, rdc, &rbr_stat); 10483859Sml29623 printf("nxge_rxdma_regs_dump: rbr len %d \n", rbr_stat.bits.ldw.qlen); 10493859Sml29623 10503859Sml29623 /* RCR tail */ 10513859Sml29623 tail_addr.addr = 0; 10523859Sml29623 (void) npi_rxdma_rdc_rcr_tail_get(handle, rdc, &tail_addr); 1053*5165Syc148097 #if defined(__i386) 10543859Sml29623 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 10555125Sjoycey (void *)(uint32_t)tail_addr.addr); 10565125Sjoycey #else 1057*5165Syc148097 printf("nxge_rxdma_regs_dump: got tail ptr $%p \n", 10583859Sml29623 (void *)tail_addr.addr); 10595125Sjoycey #endif 10603859Sml29623 10613859Sml29623 /* RCR qlen */ 10623859Sml29623 (void) npi_rxdma_rdc_rcr_qlen_get(handle, rdc, &qlen); 10633859Sml29623 printf("nxge_rxdma_regs_dump: rcr len %x \n", qlen); 10643859Sml29623 10653859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 10663859Sml29623 "<== nxge_rxdma_regs_dump: rdc rdc %d", rdc)); 10673859Sml29623 } 10683859Sml29623 10693859Sml29623 void 10703859Sml29623 nxge_rxdma_stop(p_nxge_t nxgep) 10713859Sml29623 { 10723859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop")); 10733859Sml29623 10743859Sml29623 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 10753859Sml29623 (void) nxge_rx_mac_disable(nxgep); 10763859Sml29623 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 10773859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop")); 10783859Sml29623 } 10793859Sml29623 10803859Sml29623 void 10813859Sml29623 nxge_rxdma_stop_reinit(p_nxge_t nxgep) 10823859Sml29623 { 10833859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_reinit")); 10843859Sml29623 10853859Sml29623 (void) nxge_rxdma_stop(nxgep); 10863859Sml29623 (void) nxge_uninit_rxdma_channels(nxgep); 10873859Sml29623 (void) nxge_init_rxdma_channels(nxgep); 10883859Sml29623 10893859Sml29623 #ifndef AXIS_DEBUG_LB 10903859Sml29623 (void) nxge_xcvr_init(nxgep); 10913859Sml29623 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 10923859Sml29623 #endif 10933859Sml29623 (void) nxge_rx_mac_enable(nxgep); 10943859Sml29623 10953859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_reinit")); 10963859Sml29623 } 10973859Sml29623 10983859Sml29623 nxge_status_t 10993859Sml29623 nxge_rxdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 11003859Sml29623 { 11013859Sml29623 int i, ndmas; 11023859Sml29623 uint16_t channel; 11033859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 11043859Sml29623 p_rx_rbr_ring_t *rbr_rings; 11053859Sml29623 npi_handle_t handle; 11063859Sml29623 npi_status_t rs = NPI_SUCCESS; 11073859Sml29623 nxge_status_t status = NXGE_OK; 11083859Sml29623 11093859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 11103859Sml29623 "==> nxge_rxdma_hw_mode: mode %d", enable)); 11113859Sml29623 11123859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 11133859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11143859Sml29623 "<== nxge_rxdma_mode: not initialized")); 11153859Sml29623 return (NXGE_ERROR); 11163859Sml29623 } 11173859Sml29623 11183859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 11193859Sml29623 if (rx_rbr_rings == NULL) { 11203859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11213859Sml29623 "<== nxge_rxdma_mode: NULL ring pointer")); 11223859Sml29623 return (NXGE_ERROR); 11233859Sml29623 } 11243859Sml29623 if (rx_rbr_rings->rbr_rings == NULL) { 11253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11263859Sml29623 "<== nxge_rxdma_mode: NULL rbr rings pointer")); 11273859Sml29623 return (NXGE_ERROR); 11283859Sml29623 } 11293859Sml29623 11303859Sml29623 ndmas = rx_rbr_rings->ndmas; 11313859Sml29623 if (!ndmas) { 11323859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 11333859Sml29623 "<== nxge_rxdma_mode: no channel")); 11343859Sml29623 return (NXGE_ERROR); 11353859Sml29623 } 11363859Sml29623 11373859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 11383859Sml29623 "==> nxge_rxdma_mode (ndmas %d)", ndmas)); 11393859Sml29623 11403859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 11413859Sml29623 11423859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 11433859Sml29623 for (i = 0; i < ndmas; i++) { 11443859Sml29623 if (rbr_rings == NULL || rbr_rings[i] == NULL) { 11453859Sml29623 continue; 11463859Sml29623 } 11473859Sml29623 channel = rbr_rings[i]->rdc; 11483859Sml29623 if (enable) { 11493859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 11503859Sml29623 "==> nxge_rxdma_hw_mode: channel %d (enable)", 11513859Sml29623 channel)); 11523859Sml29623 rs = npi_rxdma_cfg_rdc_enable(handle, channel); 11533859Sml29623 } else { 11543859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 11553859Sml29623 "==> nxge_rxdma_hw_mode: channel %d (disable)", 11563859Sml29623 channel)); 11573859Sml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 11583859Sml29623 } 11593859Sml29623 } 11603859Sml29623 11613859Sml29623 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 11623859Sml29623 11633859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 11643859Sml29623 "<== nxge_rxdma_hw_mode: status 0x%x", status)); 11653859Sml29623 11663859Sml29623 return (status); 11673859Sml29623 } 11683859Sml29623 11693859Sml29623 void 11703859Sml29623 nxge_rxdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 11713859Sml29623 { 11723859Sml29623 npi_handle_t handle; 11733859Sml29623 11743859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 11753859Sml29623 "==> nxge_rxdma_enable_channel: channel %d", channel)); 11763859Sml29623 11773859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 11783859Sml29623 (void) npi_rxdma_cfg_rdc_enable(handle, channel); 11793859Sml29623 11803859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_enable_channel")); 11813859Sml29623 } 11823859Sml29623 11833859Sml29623 void 11843859Sml29623 nxge_rxdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 11853859Sml29623 { 11863859Sml29623 npi_handle_t handle; 11873859Sml29623 11883859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 11893859Sml29623 "==> nxge_rxdma_disable_channel: channel %d", channel)); 11903859Sml29623 11913859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 11923859Sml29623 (void) npi_rxdma_cfg_rdc_disable(handle, channel); 11933859Sml29623 11943859Sml29623 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_rxdma_disable_channel")); 11953859Sml29623 } 11963859Sml29623 11973859Sml29623 void 11983859Sml29623 nxge_hw_start_rx(p_nxge_t nxgep) 11993859Sml29623 { 12003859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_start_rx")); 12013859Sml29623 12023859Sml29623 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_START); 12033859Sml29623 (void) nxge_rx_mac_enable(nxgep); 12043859Sml29623 12053859Sml29623 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_start_rx")); 12063859Sml29623 } 12073859Sml29623 12083859Sml29623 /*ARGSUSED*/ 12093859Sml29623 void 12103859Sml29623 nxge_fixup_rxdma_rings(p_nxge_t nxgep) 12113859Sml29623 { 12123859Sml29623 int i, ndmas; 12133859Sml29623 uint16_t rdc; 12143859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 12153859Sml29623 p_rx_rbr_ring_t *rbr_rings; 12163859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 12173859Sml29623 12183859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_fixup_rxdma_rings")); 12193859Sml29623 12203859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 12213859Sml29623 if (rx_rbr_rings == NULL) { 12223859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12233859Sml29623 "<== nxge_fixup_rxdma_rings: NULL ring pointer")); 12243859Sml29623 return; 12253859Sml29623 } 12263859Sml29623 ndmas = rx_rbr_rings->ndmas; 12273859Sml29623 if (!ndmas) { 12283859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12293859Sml29623 "<== nxge_fixup_rxdma_rings: no channel")); 12303859Sml29623 return; 12313859Sml29623 } 12323859Sml29623 12333859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 12343859Sml29623 if (rx_rcr_rings == NULL) { 12353859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12363859Sml29623 "<== nxge_fixup_rxdma_rings: NULL ring pointer")); 12373859Sml29623 return; 12383859Sml29623 } 12393859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12403859Sml29623 "==> nxge_fixup_rxdma_rings (ndmas %d)", ndmas)); 12413859Sml29623 12423859Sml29623 nxge_rxdma_hw_stop(nxgep); 12433859Sml29623 12443859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 12453859Sml29623 for (i = 0; i < ndmas; i++) { 12463859Sml29623 rdc = rbr_rings[i]->rdc; 12473859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12483859Sml29623 "==> nxge_fixup_rxdma_rings: channel %d " 12493859Sml29623 "ring $%px", rdc, rbr_rings[i])); 12503859Sml29623 (void) nxge_rxdma_fixup_channel(nxgep, rdc, i); 12513859Sml29623 } 12523859Sml29623 12533859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_fixup_rxdma_rings")); 12543859Sml29623 } 12553859Sml29623 12563859Sml29623 void 12573859Sml29623 nxge_rxdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 12583859Sml29623 { 12593859Sml29623 int i; 12603859Sml29623 12613859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fix_channel")); 12623859Sml29623 i = nxge_rxdma_get_ring_index(nxgep, channel); 12633859Sml29623 if (i < 0) { 12643859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 12653859Sml29623 "<== nxge_rxdma_fix_channel: no entry found")); 12663859Sml29623 return; 12673859Sml29623 } 12683859Sml29623 12693859Sml29623 nxge_rxdma_fixup_channel(nxgep, channel, i); 12703859Sml29623 12713859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_txdma_fix_channel")); 12723859Sml29623 } 12733859Sml29623 12743859Sml29623 void 12753859Sml29623 nxge_rxdma_fixup_channel(p_nxge_t nxgep, uint16_t channel, int entry) 12763859Sml29623 { 12773859Sml29623 int ndmas; 12783859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 12793859Sml29623 p_rx_rbr_ring_t *rbr_rings; 12803859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 12813859Sml29623 p_rx_rcr_ring_t *rcr_rings; 12823859Sml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 12833859Sml29623 p_rx_mbox_t *rx_mbox_p; 12843859Sml29623 p_nxge_dma_pool_t dma_buf_poolp; 12853859Sml29623 p_nxge_dma_pool_t dma_cntl_poolp; 12863859Sml29623 p_rx_rbr_ring_t rbrp; 12873859Sml29623 p_rx_rcr_ring_t rcrp; 12883859Sml29623 p_rx_mbox_t mboxp; 12893859Sml29623 p_nxge_dma_common_t dmap; 12903859Sml29623 nxge_status_t status = NXGE_OK; 12913859Sml29623 12923859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fixup_channel")); 12933859Sml29623 12943859Sml29623 (void) nxge_rxdma_stop_channel(nxgep, channel); 12953859Sml29623 12963859Sml29623 dma_buf_poolp = nxgep->rx_buf_pool_p; 12973859Sml29623 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 12983859Sml29623 12993859Sml29623 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 13003859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 13013859Sml29623 "<== nxge_rxdma_fixup_channel: buf not allocated")); 13023859Sml29623 return; 13033859Sml29623 } 13043859Sml29623 13053859Sml29623 ndmas = dma_buf_poolp->ndmas; 13063859Sml29623 if (!ndmas) { 13073859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 13083859Sml29623 "<== nxge_rxdma_fixup_channel: no dma allocated")); 13093859Sml29623 return; 13103859Sml29623 } 13113859Sml29623 13123859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 13133859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 13143859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 13153859Sml29623 rcr_rings = rx_rcr_rings->rcr_rings; 13163859Sml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 13173859Sml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 13183859Sml29623 13193859Sml29623 /* Reinitialize the receive block and completion rings */ 13203859Sml29623 rbrp = (p_rx_rbr_ring_t)rbr_rings[entry], 13213859Sml29623 rcrp = (p_rx_rcr_ring_t)rcr_rings[entry], 13223859Sml29623 mboxp = (p_rx_mbox_t)rx_mbox_p[entry]; 13233859Sml29623 13243859Sml29623 13253859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 13263859Sml29623 rbrp->rbr_rd_index = 0; 13273859Sml29623 rcrp->comp_rd_index = 0; 13283859Sml29623 rcrp->comp_wt_index = 0; 13293859Sml29623 13303859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 13313859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 13323859Sml29623 13333859Sml29623 status = nxge_rxdma_start_channel(nxgep, channel, 13343859Sml29623 rbrp, rcrp, mboxp); 13353859Sml29623 if (status != NXGE_OK) { 13363859Sml29623 goto nxge_rxdma_fixup_channel_fail; 13373859Sml29623 } 13383859Sml29623 if (status != NXGE_OK) { 13393859Sml29623 goto nxge_rxdma_fixup_channel_fail; 13403859Sml29623 } 13413859Sml29623 13423859Sml29623 nxge_rxdma_fixup_channel_fail: 13433859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13443859Sml29623 "==> nxge_rxdma_fixup_channel: failed (0x%08x)", status)); 13453859Sml29623 13463859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fixup_channel")); 13473859Sml29623 } 13483859Sml29623 13493859Sml29623 int 13503859Sml29623 nxge_rxdma_get_ring_index(p_nxge_t nxgep, uint16_t channel) 13513859Sml29623 { 13523859Sml29623 int i, ndmas; 13533859Sml29623 uint16_t rdc; 13543859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 13553859Sml29623 p_rx_rbr_ring_t *rbr_rings; 13563859Sml29623 13573859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13583859Sml29623 "==> nxge_rxdma_get_ring_index: channel %d", channel)); 13593859Sml29623 13603859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 13613859Sml29623 if (rx_rbr_rings == NULL) { 13623859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13633859Sml29623 "<== nxge_rxdma_get_ring_index: NULL ring pointer")); 13643859Sml29623 return (-1); 13653859Sml29623 } 13663859Sml29623 ndmas = rx_rbr_rings->ndmas; 13673859Sml29623 if (!ndmas) { 13683859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13693859Sml29623 "<== nxge_rxdma_get_ring_index: no channel")); 13703859Sml29623 return (-1); 13713859Sml29623 } 13723859Sml29623 13733859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13743859Sml29623 "==> nxge_rxdma_get_ring_index (ndmas %d)", ndmas)); 13753859Sml29623 13763859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 13773859Sml29623 for (i = 0; i < ndmas; i++) { 13783859Sml29623 rdc = rbr_rings[i]->rdc; 13793859Sml29623 if (channel == rdc) { 13803859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13813859Sml29623 "==> nxge_rxdma_get_rbr_ring: " 13823859Sml29623 "channel %d (index %d) " 13833859Sml29623 "ring %d", channel, i, 13843859Sml29623 rbr_rings[i])); 13853859Sml29623 return (i); 13863859Sml29623 } 13873859Sml29623 } 13883859Sml29623 13893859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 13903859Sml29623 "<== nxge_rxdma_get_rbr_ring_index: not found")); 13913859Sml29623 13923859Sml29623 return (-1); 13933859Sml29623 } 13943859Sml29623 13953859Sml29623 p_rx_rbr_ring_t 13963859Sml29623 nxge_rxdma_get_rbr_ring(p_nxge_t nxgep, uint16_t channel) 13973859Sml29623 { 13983859Sml29623 int i, ndmas; 13993859Sml29623 uint16_t rdc; 14003859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 14013859Sml29623 p_rx_rbr_ring_t *rbr_rings; 14023859Sml29623 14033859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14043859Sml29623 "==> nxge_rxdma_get_rbr_ring: channel %d", channel)); 14053859Sml29623 14063859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 14073859Sml29623 if (rx_rbr_rings == NULL) { 14083859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14093859Sml29623 "<== nxge_rxdma_get_rbr_ring: NULL ring pointer")); 14103859Sml29623 return (NULL); 14113859Sml29623 } 14123859Sml29623 ndmas = rx_rbr_rings->ndmas; 14133859Sml29623 if (!ndmas) { 14143859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14153859Sml29623 "<== nxge_rxdma_get_rbr_ring: no channel")); 14163859Sml29623 return (NULL); 14173859Sml29623 } 14183859Sml29623 14193859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14203859Sml29623 "==> nxge_rxdma_get_ring (ndmas %d)", ndmas)); 14213859Sml29623 14223859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 14233859Sml29623 for (i = 0; i < ndmas; i++) { 14243859Sml29623 rdc = rbr_rings[i]->rdc; 14253859Sml29623 if (channel == rdc) { 14263859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14273859Sml29623 "==> nxge_rxdma_get_rbr_ring: channel %d " 14283859Sml29623 "ring $%p", channel, rbr_rings[i])); 14293859Sml29623 return (rbr_rings[i]); 14303859Sml29623 } 14313859Sml29623 } 14323859Sml29623 14333859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14343859Sml29623 "<== nxge_rxdma_get_rbr_ring: not found")); 14353859Sml29623 14363859Sml29623 return (NULL); 14373859Sml29623 } 14383859Sml29623 14393859Sml29623 p_rx_rcr_ring_t 14403859Sml29623 nxge_rxdma_get_rcr_ring(p_nxge_t nxgep, uint16_t channel) 14413859Sml29623 { 14423859Sml29623 int i, ndmas; 14433859Sml29623 uint16_t rdc; 14443859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 14453859Sml29623 p_rx_rcr_ring_t *rcr_rings; 14463859Sml29623 14473859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14483859Sml29623 "==> nxge_rxdma_get_rcr_ring: channel %d", channel)); 14493859Sml29623 14503859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 14513859Sml29623 if (rx_rcr_rings == NULL) { 14523859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14533859Sml29623 "<== nxge_rxdma_get_rcr_ring: NULL ring pointer")); 14543859Sml29623 return (NULL); 14553859Sml29623 } 14563859Sml29623 ndmas = rx_rcr_rings->ndmas; 14573859Sml29623 if (!ndmas) { 14583859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14593859Sml29623 "<== nxge_rxdma_get_rcr_ring: no channel")); 14603859Sml29623 return (NULL); 14613859Sml29623 } 14623859Sml29623 14633859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14643859Sml29623 "==> nxge_rxdma_get_rcr_ring (ndmas %d)", ndmas)); 14653859Sml29623 14663859Sml29623 rcr_rings = rx_rcr_rings->rcr_rings; 14673859Sml29623 for (i = 0; i < ndmas; i++) { 14683859Sml29623 rdc = rcr_rings[i]->rdc; 14693859Sml29623 if (channel == rdc) { 14703859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14713859Sml29623 "==> nxge_rxdma_get_rcr_ring: channel %d " 14723859Sml29623 "ring $%p", channel, rcr_rings[i])); 14733859Sml29623 return (rcr_rings[i]); 14743859Sml29623 } 14753859Sml29623 } 14763859Sml29623 14773859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 14783859Sml29623 "<== nxge_rxdma_get_rcr_ring: not found")); 14793859Sml29623 14803859Sml29623 return (NULL); 14813859Sml29623 } 14823859Sml29623 14833859Sml29623 /* 14843859Sml29623 * Static functions start here. 14853859Sml29623 */ 14863859Sml29623 static p_rx_msg_t 14873859Sml29623 nxge_allocb(size_t size, uint32_t pri, p_nxge_dma_common_t dmabuf_p) 14883859Sml29623 { 14893859Sml29623 p_rx_msg_t nxge_mp = NULL; 14903859Sml29623 p_nxge_dma_common_t dmamsg_p; 14913859Sml29623 uchar_t *buffer; 14923859Sml29623 14933859Sml29623 nxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP); 14943859Sml29623 if (nxge_mp == NULL) { 14954185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 14963859Sml29623 "Allocation of a rx msg failed.")); 14973859Sml29623 goto nxge_allocb_exit; 14983859Sml29623 } 14993859Sml29623 15003859Sml29623 nxge_mp->use_buf_pool = B_FALSE; 15013859Sml29623 if (dmabuf_p) { 15023859Sml29623 nxge_mp->use_buf_pool = B_TRUE; 15033859Sml29623 dmamsg_p = (p_nxge_dma_common_t)&nxge_mp->buf_dma; 15043859Sml29623 *dmamsg_p = *dmabuf_p; 15053859Sml29623 dmamsg_p->nblocks = 1; 15063859Sml29623 dmamsg_p->block_size = size; 15073859Sml29623 dmamsg_p->alength = size; 15083859Sml29623 buffer = (uchar_t *)dmabuf_p->kaddrp; 15093859Sml29623 15103859Sml29623 dmabuf_p->kaddrp = (void *) 15113859Sml29623 ((char *)dmabuf_p->kaddrp + size); 15123859Sml29623 dmabuf_p->ioaddr_pp = (void *) 15133859Sml29623 ((char *)dmabuf_p->ioaddr_pp + size); 15143859Sml29623 dmabuf_p->alength -= size; 15153859Sml29623 dmabuf_p->offset += size; 15163859Sml29623 dmabuf_p->dma_cookie.dmac_laddress += size; 15173859Sml29623 dmabuf_p->dma_cookie.dmac_size -= size; 15183859Sml29623 15193859Sml29623 } else { 15203859Sml29623 buffer = KMEM_ALLOC(size, KM_NOSLEEP); 15213859Sml29623 if (buffer == NULL) { 15224185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 15233859Sml29623 "Allocation of a receive page failed.")); 15243859Sml29623 goto nxge_allocb_fail1; 15253859Sml29623 } 15263859Sml29623 } 15273859Sml29623 15283859Sml29623 nxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &nxge_mp->freeb); 15293859Sml29623 if (nxge_mp->rx_mblk_p == NULL) { 15304185Sspeer NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "desballoc failed.")); 15313859Sml29623 goto nxge_allocb_fail2; 15323859Sml29623 } 15333859Sml29623 15343859Sml29623 nxge_mp->buffer = buffer; 15353859Sml29623 nxge_mp->block_size = size; 15363859Sml29623 nxge_mp->freeb.free_func = (void (*)())nxge_freeb; 15373859Sml29623 nxge_mp->freeb.free_arg = (caddr_t)nxge_mp; 15383859Sml29623 nxge_mp->ref_cnt = 1; 15393859Sml29623 nxge_mp->free = B_TRUE; 15403859Sml29623 nxge_mp->rx_use_bcopy = B_FALSE; 15413859Sml29623 15423859Sml29623 atomic_inc_32(&nxge_mblks_pending); 15433859Sml29623 15443859Sml29623 goto nxge_allocb_exit; 15453859Sml29623 15463859Sml29623 nxge_allocb_fail2: 15473859Sml29623 if (!nxge_mp->use_buf_pool) { 15483859Sml29623 KMEM_FREE(buffer, size); 15493859Sml29623 } 15503859Sml29623 15513859Sml29623 nxge_allocb_fail1: 15523859Sml29623 KMEM_FREE(nxge_mp, sizeof (rx_msg_t)); 15533859Sml29623 nxge_mp = NULL; 15543859Sml29623 15553859Sml29623 nxge_allocb_exit: 15563859Sml29623 return (nxge_mp); 15573859Sml29623 } 15583859Sml29623 15593859Sml29623 p_mblk_t 15603859Sml29623 nxge_dupb(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 15613859Sml29623 { 15623859Sml29623 p_mblk_t mp; 15633859Sml29623 15643859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "==> nxge_dupb")); 15653859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "nxge_mp = $%p " 15663859Sml29623 "offset = 0x%08X " 15673859Sml29623 "size = 0x%08X", 15683859Sml29623 nxge_mp, offset, size)); 15693859Sml29623 15703859Sml29623 mp = desballoc(&nxge_mp->buffer[offset], size, 15713859Sml29623 0, &nxge_mp->freeb); 15723859Sml29623 if (mp == NULL) { 15733859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 15743859Sml29623 goto nxge_dupb_exit; 15753859Sml29623 } 15763859Sml29623 atomic_inc_32(&nxge_mp->ref_cnt); 15773859Sml29623 atomic_inc_32(&nxge_mblks_pending); 15783859Sml29623 15793859Sml29623 15803859Sml29623 nxge_dupb_exit: 15813859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 15823859Sml29623 nxge_mp)); 15833859Sml29623 return (mp); 15843859Sml29623 } 15853859Sml29623 15863859Sml29623 p_mblk_t 15873859Sml29623 nxge_dupb_bcopy(p_rx_msg_t nxge_mp, uint_t offset, size_t size) 15883859Sml29623 { 15893859Sml29623 p_mblk_t mp; 15903859Sml29623 uchar_t *dp; 15913859Sml29623 15923859Sml29623 mp = allocb(size + NXGE_RXBUF_EXTRA, 0); 15933859Sml29623 if (mp == NULL) { 15943859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed")); 15953859Sml29623 goto nxge_dupb_bcopy_exit; 15963859Sml29623 } 15973859Sml29623 dp = mp->b_rptr = mp->b_rptr + NXGE_RXBUF_EXTRA; 15983859Sml29623 bcopy((void *)&nxge_mp->buffer[offset], dp, size); 15993859Sml29623 mp->b_wptr = dp + size; 16003859Sml29623 16013859Sml29623 nxge_dupb_bcopy_exit: 16023859Sml29623 NXGE_DEBUG_MSG((NULL, MEM_CTL, "<== nxge_dupb mp = $%p", 16033859Sml29623 nxge_mp)); 16043859Sml29623 return (mp); 16053859Sml29623 } 16063859Sml29623 16073859Sml29623 void nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, 16083859Sml29623 p_rx_msg_t rx_msg_p); 16093859Sml29623 16103859Sml29623 void 16113859Sml29623 nxge_post_page(p_nxge_t nxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p) 16123859Sml29623 { 16133859Sml29623 16143859Sml29623 npi_handle_t handle; 16153859Sml29623 16163859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_post_page")); 16173859Sml29623 16183859Sml29623 /* Reuse this buffer */ 16193859Sml29623 rx_msg_p->free = B_FALSE; 16203859Sml29623 rx_msg_p->cur_usage_cnt = 0; 16213859Sml29623 rx_msg_p->max_usage_cnt = 0; 16223859Sml29623 rx_msg_p->pkt_buf_size = 0; 16233859Sml29623 16243859Sml29623 if (rx_rbr_p->rbr_use_bcopy) { 16253859Sml29623 rx_msg_p->rx_use_bcopy = B_FALSE; 16263859Sml29623 atomic_dec_32(&rx_rbr_p->rbr_consumed); 16273859Sml29623 } 16283859Sml29623 16293859Sml29623 /* 16303859Sml29623 * Get the rbr header pointer and its offset index. 16313859Sml29623 */ 16323859Sml29623 MUTEX_ENTER(&rx_rbr_p->post_lock); 16333859Sml29623 16343859Sml29623 16353859Sml29623 rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) & 16363859Sml29623 rx_rbr_p->rbr_wrap_mask); 16373859Sml29623 rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr; 16383859Sml29623 MUTEX_EXIT(&rx_rbr_p->post_lock); 16393859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 16403859Sml29623 npi_rxdma_rdc_rbr_kick(handle, rx_rbr_p->rdc, 1); 16413859Sml29623 16423859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 16433859Sml29623 "<== nxge_post_page (channel %d post_next_index %d)", 16443859Sml29623 rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index)); 16453859Sml29623 16463859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_post_page")); 16473859Sml29623 } 16483859Sml29623 16493859Sml29623 void 16503859Sml29623 nxge_freeb(p_rx_msg_t rx_msg_p) 16513859Sml29623 { 16523859Sml29623 size_t size; 16533859Sml29623 uchar_t *buffer = NULL; 16543859Sml29623 int ref_cnt; 16554874Sml29623 boolean_t free_state = B_FALSE; 16563859Sml29623 16573859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> nxge_freeb")); 16583859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, 16593859Sml29623 "nxge_freeb:rx_msg_p = $%p (block pending %d)", 16603859Sml29623 rx_msg_p, nxge_mblks_pending)); 16613859Sml29623 16624874Sml29623 atomic_dec_32(&nxge_mblks_pending); 16634874Sml29623 /* 16644874Sml29623 * First we need to get the free state, then 16654874Sml29623 * atomic decrement the reference count to prevent 16664874Sml29623 * the race condition with the interrupt thread that 16674874Sml29623 * is processing a loaned up buffer block. 16684874Sml29623 */ 16694874Sml29623 free_state = rx_msg_p->free; 16703859Sml29623 ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1); 16713859Sml29623 if (!ref_cnt) { 16723859Sml29623 buffer = rx_msg_p->buffer; 16733859Sml29623 size = rx_msg_p->block_size; 16743859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "nxge_freeb: " 16753859Sml29623 "will free: rx_msg_p = $%p (block pending %d)", 16764185Sspeer rx_msg_p, nxge_mblks_pending)); 16773859Sml29623 16783859Sml29623 if (!rx_msg_p->use_buf_pool) { 16793859Sml29623 KMEM_FREE(buffer, size); 16803859Sml29623 } 16813859Sml29623 16823859Sml29623 KMEM_FREE(rx_msg_p, sizeof (rx_msg_t)); 16833859Sml29623 return; 16843859Sml29623 } 16853859Sml29623 16863859Sml29623 /* 16873859Sml29623 * Repost buffer. 16883859Sml29623 */ 16894874Sml29623 if (free_state && (ref_cnt == 1)) { 16903859Sml29623 NXGE_DEBUG_MSG((NULL, RX_CTL, 16913859Sml29623 "nxge_freeb: post page $%p:", rx_msg_p)); 16923859Sml29623 nxge_post_page(rx_msg_p->nxgep, rx_msg_p->rx_rbr_p, 16933859Sml29623 rx_msg_p); 16943859Sml29623 } 16953859Sml29623 16963859Sml29623 NXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== nxge_freeb")); 16973859Sml29623 } 16983859Sml29623 16993859Sml29623 uint_t 17003859Sml29623 nxge_rx_intr(void *arg1, void *arg2) 17013859Sml29623 { 17023859Sml29623 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 17033859Sml29623 p_nxge_t nxgep = (p_nxge_t)arg2; 17043859Sml29623 p_nxge_ldg_t ldgp; 17053859Sml29623 uint8_t channel; 17063859Sml29623 npi_handle_t handle; 17073859Sml29623 rx_dma_ctl_stat_t cs; 17083859Sml29623 17093859Sml29623 #ifdef NXGE_DEBUG 17103859Sml29623 rxdma_cfig1_t cfg; 17113859Sml29623 #endif 17123859Sml29623 uint_t serviced = DDI_INTR_UNCLAIMED; 17133859Sml29623 17143859Sml29623 if (ldvp == NULL) { 17153859Sml29623 NXGE_DEBUG_MSG((NULL, INT_CTL, 17163859Sml29623 "<== nxge_rx_intr: arg2 $%p arg1 $%p", 17173859Sml29623 nxgep, ldvp)); 17183859Sml29623 17193859Sml29623 return (DDI_INTR_CLAIMED); 17203859Sml29623 } 17213859Sml29623 17223859Sml29623 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 17233859Sml29623 nxgep = ldvp->nxgep; 17243859Sml29623 } 17253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 17263859Sml29623 "==> nxge_rx_intr: arg2 $%p arg1 $%p", 17273859Sml29623 nxgep, ldvp)); 17283859Sml29623 17293859Sml29623 /* 17303859Sml29623 * This interrupt handler is for a specific 17313859Sml29623 * receive dma channel. 17323859Sml29623 */ 17333859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 17343859Sml29623 /* 17353859Sml29623 * Get the control and status for this channel. 17363859Sml29623 */ 17373859Sml29623 channel = ldvp->channel; 17383859Sml29623 ldgp = ldvp->ldgp; 17393859Sml29623 RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel, &cs.value); 17403859Sml29623 17413859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_intr:channel %d " 17423859Sml29623 "cs 0x%016llx rcrto 0x%x rcrthres %x", 17433859Sml29623 channel, 17443859Sml29623 cs.value, 17453859Sml29623 cs.bits.hdw.rcrto, 17463859Sml29623 cs.bits.hdw.rcrthres)); 17473859Sml29623 17483859Sml29623 nxge_rx_pkts_vring(nxgep, ldvp->vdma_index, ldvp, cs); 17493859Sml29623 serviced = DDI_INTR_CLAIMED; 17503859Sml29623 17513859Sml29623 /* error events. */ 17523859Sml29623 if (cs.value & RX_DMA_CTL_STAT_ERROR) { 17533859Sml29623 (void) nxge_rx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 17543859Sml29623 } 17553859Sml29623 17563859Sml29623 nxge_intr_exit: 17573859Sml29623 17583859Sml29623 17593859Sml29623 /* 17603859Sml29623 * Enable the mailbox update interrupt if we want 17613859Sml29623 * to use mailbox. We probably don't need to use 17623859Sml29623 * mailbox as it only saves us one pio read. 17633859Sml29623 * Also write 1 to rcrthres and rcrto to clear 17643859Sml29623 * these two edge triggered bits. 17653859Sml29623 */ 17663859Sml29623 17673859Sml29623 cs.value &= RX_DMA_CTL_STAT_WR1C; 17683859Sml29623 cs.bits.hdw.mex = 1; 17693859Sml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel, 17703859Sml29623 cs.value); 17713859Sml29623 17723859Sml29623 /* 17733859Sml29623 * Rearm this logical group if this is a single device 17743859Sml29623 * group. 17753859Sml29623 */ 17763859Sml29623 if (ldgp->nldvs == 1) { 17773859Sml29623 ldgimgm_t mgm; 17783859Sml29623 mgm.value = 0; 17793859Sml29623 mgm.bits.ldw.arm = 1; 17803859Sml29623 mgm.bits.ldw.timer = ldgp->ldg_timer; 17813859Sml29623 NXGE_REG_WR64(handle, 17823859Sml29623 LDGIMGN_REG + LDSV_OFFSET(ldgp->ldg), 17833859Sml29623 mgm.value); 17843859Sml29623 } 17853859Sml29623 17863859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_intr: serviced %d", 17873859Sml29623 serviced)); 17883859Sml29623 return (serviced); 17893859Sml29623 } 17903859Sml29623 17913859Sml29623 /* 17923859Sml29623 * Process the packets received in the specified logical device 17933859Sml29623 * and pass up a chain of message blocks to the upper layer. 17943859Sml29623 */ 17953859Sml29623 static void 17963859Sml29623 nxge_rx_pkts_vring(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp, 17973859Sml29623 rx_dma_ctl_stat_t cs) 17983859Sml29623 { 17993859Sml29623 p_mblk_t mp; 18003859Sml29623 p_rx_rcr_ring_t rcrp; 18013859Sml29623 18023859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring")); 18033859Sml29623 if ((mp = nxge_rx_pkts(nxgep, vindex, ldvp, &rcrp, cs)) == NULL) { 18043859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 18053859Sml29623 "<== nxge_rx_pkts_vring: no mp")); 18063859Sml29623 return; 18073859Sml29623 } 18083859Sml29623 18093859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts_vring: $%p", 18103859Sml29623 mp)); 18113859Sml29623 18123859Sml29623 #ifdef NXGE_DEBUG 18133859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 18143859Sml29623 "==> nxge_rx_pkts_vring:calling mac_rx " 18153859Sml29623 "LEN %d mp $%p mp->b_cont $%p mp->b_next $%p rcrp $%p " 18163859Sml29623 "mac_handle $%p", 18173859Sml29623 mp->b_wptr - mp->b_rptr, 18183859Sml29623 mp, mp->b_cont, mp->b_next, 18193859Sml29623 rcrp, rcrp->rcr_mac_handle)); 18203859Sml29623 18213859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 18223859Sml29623 "==> nxge_rx_pkts_vring: dump packets " 18233859Sml29623 "(mp $%p b_rptr $%p b_wptr $%p):\n %s", 18243859Sml29623 mp, 18253859Sml29623 mp->b_rptr, 18263859Sml29623 mp->b_wptr, 18273859Sml29623 nxge_dump_packet((char *)mp->b_rptr, 18283859Sml29623 mp->b_wptr - mp->b_rptr))); 18293859Sml29623 if (mp->b_cont) { 18303859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 18313859Sml29623 "==> nxge_rx_pkts_vring: dump b_cont packets " 18323859Sml29623 "(mp->b_cont $%p b_rptr $%p b_wptr $%p):\n %s", 18333859Sml29623 mp->b_cont, 18343859Sml29623 mp->b_cont->b_rptr, 18353859Sml29623 mp->b_cont->b_wptr, 18363859Sml29623 nxge_dump_packet((char *)mp->b_cont->b_rptr, 18373859Sml29623 mp->b_cont->b_wptr - mp->b_cont->b_rptr))); 18383859Sml29623 } 18393859Sml29623 if (mp->b_next) { 18403859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 18413859Sml29623 "==> nxge_rx_pkts_vring: dump next packets " 18423859Sml29623 "(b_rptr $%p): %s", 18433859Sml29623 mp->b_next->b_rptr, 18443859Sml29623 nxge_dump_packet((char *)mp->b_next->b_rptr, 18453859Sml29623 mp->b_next->b_wptr - mp->b_next->b_rptr))); 18463859Sml29623 } 18473859Sml29623 #endif 18483859Sml29623 18493859Sml29623 mac_rx(nxgep->mach, rcrp->rcr_mac_handle, mp); 18503859Sml29623 } 18513859Sml29623 18523859Sml29623 18533859Sml29623 /* 18543859Sml29623 * This routine is the main packet receive processing function. 18553859Sml29623 * It gets the packet type, error code, and buffer related 18563859Sml29623 * information from the receive completion entry. 18573859Sml29623 * How many completion entries to process is based on the number of packets 18583859Sml29623 * queued by the hardware, a hardware maintained tail pointer 18593859Sml29623 * and a configurable receive packet count. 18603859Sml29623 * 18613859Sml29623 * A chain of message blocks will be created as result of processing 18623859Sml29623 * the completion entries. This chain of message blocks will be returned and 18633859Sml29623 * a hardware control status register will be updated with the number of 18643859Sml29623 * packets were removed from the hardware queue. 18653859Sml29623 * 18663859Sml29623 */ 18673859Sml29623 mblk_t * 18683859Sml29623 nxge_rx_pkts(p_nxge_t nxgep, uint_t vindex, p_nxge_ldv_t ldvp, 18693859Sml29623 p_rx_rcr_ring_t *rcrp, rx_dma_ctl_stat_t cs) 18703859Sml29623 { 18713859Sml29623 npi_handle_t handle; 18723859Sml29623 uint8_t channel; 18733859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 18743859Sml29623 p_rx_rcr_ring_t rcr_p; 18753859Sml29623 uint32_t comp_rd_index; 18763859Sml29623 p_rcr_entry_t rcr_desc_rd_head_p; 18773859Sml29623 p_rcr_entry_t rcr_desc_rd_head_pp; 18783859Sml29623 p_mblk_t nmp, mp_cont, head_mp, *tail_mp; 18793859Sml29623 uint16_t qlen, nrcr_read, npkt_read; 18803859Sml29623 uint32_t qlen_hw; 18813859Sml29623 boolean_t multi; 18823859Sml29623 rcrcfig_b_t rcr_cfg_b; 18833859Sml29623 #if defined(_BIG_ENDIAN) 18843859Sml29623 npi_status_t rs = NPI_SUCCESS; 18853859Sml29623 #endif 18863859Sml29623 18873859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:vindex %d " 18883859Sml29623 "channel %d", vindex, ldvp->channel)); 18893859Sml29623 18903859Sml29623 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 18913859Sml29623 return (NULL); 18923859Sml29623 } 18933859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 18943859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 18953859Sml29623 rcr_p = rx_rcr_rings->rcr_rings[vindex]; 18963859Sml29623 channel = rcr_p->rdc; 18973859Sml29623 if (channel != ldvp->channel) { 18983859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d " 18993859Sml29623 "channel %d, and rcr channel %d not matched.", 19003859Sml29623 vindex, ldvp->channel, channel)); 19013859Sml29623 return (NULL); 19023859Sml29623 } 19033859Sml29623 19043859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 19053859Sml29623 "==> nxge_rx_pkts: START: rcr channel %d " 19063859Sml29623 "head_p $%p head_pp $%p index %d ", 19073859Sml29623 channel, rcr_p->rcr_desc_rd_head_p, 19083859Sml29623 rcr_p->rcr_desc_rd_head_pp, 19093859Sml29623 rcr_p->comp_rd_index)); 19103859Sml29623 19113859Sml29623 19123859Sml29623 #if !defined(_BIG_ENDIAN) 19133859Sml29623 qlen = RXDMA_REG_READ32(handle, RCRSTAT_A_REG, channel) & 0xffff; 19143859Sml29623 #else 19153859Sml29623 rs = npi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen); 19163859Sml29623 if (rs != NPI_SUCCESS) { 19173859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:index %d " 19183859Sml29623 "channel %d, get qlen failed 0x%08x", 19193859Sml29623 vindex, ldvp->channel, rs)); 19203859Sml29623 return (NULL); 19213859Sml29623 } 19223859Sml29623 #endif 19233859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rx_pkts:rcr channel %d " 19243859Sml29623 "qlen %d", channel, qlen)); 19253859Sml29623 19263859Sml29623 19273859Sml29623 19283859Sml29623 if (!qlen) { 19293859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 19303859Sml29623 "==> nxge_rx_pkts:rcr channel %d " 19313859Sml29623 "qlen %d (no pkts)", channel, qlen)); 19323859Sml29623 19333859Sml29623 return (NULL); 19343859Sml29623 } 19353859Sml29623 19363859Sml29623 comp_rd_index = rcr_p->comp_rd_index; 19373859Sml29623 19383859Sml29623 rcr_desc_rd_head_p = rcr_p->rcr_desc_rd_head_p; 19393859Sml29623 rcr_desc_rd_head_pp = rcr_p->rcr_desc_rd_head_pp; 19403859Sml29623 nrcr_read = npkt_read = 0; 19413859Sml29623 19423859Sml29623 /* 19433859Sml29623 * Number of packets queued 19443859Sml29623 * (The jumbo or multi packet will be counted as only one 19453859Sml29623 * packets and it may take up more than one completion entry). 19463859Sml29623 */ 19473859Sml29623 qlen_hw = (qlen < nxge_max_rx_pkts) ? 19483859Sml29623 qlen : nxge_max_rx_pkts; 19493859Sml29623 head_mp = NULL; 19503859Sml29623 tail_mp = &head_mp; 19513859Sml29623 nmp = mp_cont = NULL; 19523859Sml29623 multi = B_FALSE; 19533859Sml29623 19543859Sml29623 while (qlen_hw) { 19553859Sml29623 19563859Sml29623 #ifdef NXGE_DEBUG 19573859Sml29623 nxge_dump_rcr_entry(nxgep, rcr_desc_rd_head_p); 19583859Sml29623 #endif 19593859Sml29623 /* 19603859Sml29623 * Process one completion ring entry. 19613859Sml29623 */ 19623859Sml29623 nxge_receive_packet(nxgep, 19633859Sml29623 rcr_p, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont); 19643859Sml29623 19653859Sml29623 /* 19663859Sml29623 * message chaining modes 19673859Sml29623 */ 19683859Sml29623 if (nmp) { 19693859Sml29623 nmp->b_next = NULL; 19703859Sml29623 if (!multi && !mp_cont) { /* frame fits a partition */ 19713859Sml29623 *tail_mp = nmp; 19723859Sml29623 tail_mp = &nmp->b_next; 19733859Sml29623 nmp = NULL; 19743859Sml29623 } else if (multi && !mp_cont) { /* first segment */ 19753859Sml29623 *tail_mp = nmp; 19763859Sml29623 tail_mp = &nmp->b_cont; 19773859Sml29623 } else if (multi && mp_cont) { /* mid of multi segs */ 19783859Sml29623 *tail_mp = mp_cont; 19793859Sml29623 tail_mp = &mp_cont->b_cont; 19803859Sml29623 } else if (!multi && mp_cont) { /* last segment */ 19813859Sml29623 *tail_mp = mp_cont; 19823859Sml29623 tail_mp = &nmp->b_next; 19833859Sml29623 nmp = NULL; 19843859Sml29623 } 19853859Sml29623 } 19863859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 19873859Sml29623 "==> nxge_rx_pkts: loop: rcr channel %d " 19883859Sml29623 "before updating: multi %d " 19893859Sml29623 "nrcr_read %d " 19903859Sml29623 "npk read %d " 19913859Sml29623 "head_pp $%p index %d ", 19923859Sml29623 channel, 19933859Sml29623 multi, 19943859Sml29623 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 19953859Sml29623 comp_rd_index)); 19963859Sml29623 19973859Sml29623 if (!multi) { 19983859Sml29623 qlen_hw--; 19993859Sml29623 npkt_read++; 20003859Sml29623 } 20013859Sml29623 20023859Sml29623 /* 20033859Sml29623 * Update the next read entry. 20043859Sml29623 */ 20053859Sml29623 comp_rd_index = NEXT_ENTRY(comp_rd_index, 20063859Sml29623 rcr_p->comp_wrap_mask); 20073859Sml29623 20083859Sml29623 rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p, 20093859Sml29623 rcr_p->rcr_desc_first_p, 20103859Sml29623 rcr_p->rcr_desc_last_p); 20113859Sml29623 20123859Sml29623 nrcr_read++; 20133859Sml29623 20143859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20153859Sml29623 "<== nxge_rx_pkts: (SAM, process one packet) " 20163859Sml29623 "nrcr_read %d", 20173859Sml29623 nrcr_read)); 20183859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20193859Sml29623 "==> nxge_rx_pkts: loop: rcr channel %d " 20203859Sml29623 "multi %d " 20213859Sml29623 "nrcr_read %d " 20223859Sml29623 "npk read %d " 20233859Sml29623 "head_pp $%p index %d ", 20243859Sml29623 channel, 20253859Sml29623 multi, 20263859Sml29623 nrcr_read, npkt_read, rcr_desc_rd_head_pp, 20273859Sml29623 comp_rd_index)); 20283859Sml29623 20293859Sml29623 } 20303859Sml29623 20313859Sml29623 rcr_p->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp; 20323859Sml29623 rcr_p->comp_rd_index = comp_rd_index; 20333859Sml29623 rcr_p->rcr_desc_rd_head_p = rcr_desc_rd_head_p; 20343859Sml29623 20353859Sml29623 if ((nxgep->intr_timeout != rcr_p->intr_timeout) || 20363859Sml29623 (nxgep->intr_threshold != rcr_p->intr_threshold)) { 20373859Sml29623 rcr_p->intr_timeout = nxgep->intr_timeout; 20383859Sml29623 rcr_p->intr_threshold = nxgep->intr_threshold; 20393859Sml29623 rcr_cfg_b.value = 0x0ULL; 20403859Sml29623 if (rcr_p->intr_timeout) 20413859Sml29623 rcr_cfg_b.bits.ldw.entout = 1; 20423859Sml29623 rcr_cfg_b.bits.ldw.timeout = rcr_p->intr_timeout; 20433859Sml29623 rcr_cfg_b.bits.ldw.pthres = rcr_p->intr_threshold; 20443859Sml29623 RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, 20453859Sml29623 channel, rcr_cfg_b.value); 20463859Sml29623 } 20473859Sml29623 20483859Sml29623 cs.bits.ldw.pktread = npkt_read; 20493859Sml29623 cs.bits.ldw.ptrread = nrcr_read; 20503859Sml29623 RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, 20513859Sml29623 channel, cs.value); 20523859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 20533859Sml29623 "==> nxge_rx_pkts: EXIT: rcr channel %d " 20543859Sml29623 "head_pp $%p index %016llx ", 20553859Sml29623 channel, 20563859Sml29623 rcr_p->rcr_desc_rd_head_pp, 20573859Sml29623 rcr_p->comp_rd_index)); 20583859Sml29623 /* 20593859Sml29623 * Update RCR buffer pointer read and number of packets 20603859Sml29623 * read. 20613859Sml29623 */ 20623859Sml29623 20633859Sml29623 *rcrp = rcr_p; 20643859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_pkts")); 20653859Sml29623 return (head_mp); 20663859Sml29623 } 20673859Sml29623 20683859Sml29623 void 20693859Sml29623 nxge_receive_packet(p_nxge_t nxgep, 20703859Sml29623 p_rx_rcr_ring_t rcr_p, p_rcr_entry_t rcr_desc_rd_head_p, 20713859Sml29623 boolean_t *multi_p, mblk_t **mp, mblk_t **mp_cont) 20723859Sml29623 { 20733859Sml29623 p_mblk_t nmp = NULL; 20743859Sml29623 uint64_t multi; 20753859Sml29623 uint64_t dcf_err; 20763859Sml29623 uint8_t channel; 20773859Sml29623 20783859Sml29623 boolean_t first_entry = B_TRUE; 20793859Sml29623 boolean_t is_tcp_udp = B_FALSE; 20803859Sml29623 boolean_t buffer_free = B_FALSE; 20813859Sml29623 boolean_t error_send_up = B_FALSE; 20823859Sml29623 uint8_t error_type; 20833859Sml29623 uint16_t l2_len; 20843859Sml29623 uint16_t skip_len; 20853859Sml29623 uint8_t pktbufsz_type; 20863859Sml29623 uint64_t rcr_entry; 20873859Sml29623 uint64_t *pkt_buf_addr_pp; 20883859Sml29623 uint64_t *pkt_buf_addr_p; 20893859Sml29623 uint32_t buf_offset; 20903859Sml29623 uint32_t bsize; 20913859Sml29623 uint32_t error_disp_cnt; 20923859Sml29623 uint32_t msg_index; 20933859Sml29623 p_rx_rbr_ring_t rx_rbr_p; 20943859Sml29623 p_rx_msg_t *rx_msg_ring_p; 20953859Sml29623 p_rx_msg_t rx_msg_p; 20963859Sml29623 uint16_t sw_offset_bytes = 0, hdr_size = 0; 20973859Sml29623 nxge_status_t status = NXGE_OK; 20983859Sml29623 boolean_t is_valid = B_FALSE; 20993859Sml29623 p_nxge_rx_ring_stats_t rdc_stats; 21003859Sml29623 uint32_t bytes_read; 21013859Sml29623 uint64_t pkt_type; 21023859Sml29623 uint64_t frag; 21033859Sml29623 #ifdef NXGE_DEBUG 21043859Sml29623 int dump_len; 21053859Sml29623 #endif 21063859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "==> nxge_receive_packet")); 21073859Sml29623 first_entry = (*mp == NULL) ? B_TRUE : B_FALSE; 21083859Sml29623 21093859Sml29623 rcr_entry = *((uint64_t *)rcr_desc_rd_head_p); 21103859Sml29623 21113859Sml29623 multi = (rcr_entry & RCR_MULTI_MASK); 21123859Sml29623 dcf_err = (rcr_entry & RCR_DCF_ERROR_MASK); 21133859Sml29623 pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK); 21143859Sml29623 21153859Sml29623 error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT); 21163859Sml29623 frag = (rcr_entry & RCR_FRAG_MASK); 21173859Sml29623 21183859Sml29623 l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT); 21193859Sml29623 21203859Sml29623 pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >> 21213859Sml29623 RCR_PKTBUFSZ_SHIFT); 21225125Sjoycey #if defined(__i386) 21235125Sjoycey pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry & 21245125Sjoycey RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT); 21255125Sjoycey #else 21263859Sml29623 pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) << 21273859Sml29623 RCR_PKT_BUF_ADDR_SHIFT); 21285125Sjoycey #endif 21293859Sml29623 21303859Sml29623 channel = rcr_p->rdc; 21313859Sml29623 21323859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 21333859Sml29623 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 21343859Sml29623 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 21353859Sml29623 "error_type 0x%x pkt_type 0x%x " 21363859Sml29623 "pktbufsz_type %d ", 21373859Sml29623 rcr_desc_rd_head_p, 21383859Sml29623 rcr_entry, pkt_buf_addr_pp, l2_len, 21393859Sml29623 multi, 21403859Sml29623 error_type, 21413859Sml29623 pkt_type, 21423859Sml29623 pktbufsz_type)); 21433859Sml29623 21443859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 21453859Sml29623 "==> nxge_receive_packet: entryp $%p entry 0x%0llx " 21463859Sml29623 "pkt_buf_addr_pp $%p l2_len %d multi 0x%llx " 21473859Sml29623 "error_type 0x%x pkt_type 0x%x ", rcr_desc_rd_head_p, 21483859Sml29623 rcr_entry, pkt_buf_addr_pp, l2_len, 21493859Sml29623 multi, 21503859Sml29623 error_type, 21513859Sml29623 pkt_type)); 21523859Sml29623 21533859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 21543859Sml29623 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 21553859Sml29623 "full pkt_buf_addr_pp $%p l2_len %d", 21563859Sml29623 rcr_entry, pkt_buf_addr_pp, l2_len)); 21573859Sml29623 21583859Sml29623 /* get the stats ptr */ 21593859Sml29623 rdc_stats = rcr_p->rdc_stats; 21603859Sml29623 21613859Sml29623 if (!l2_len) { 21623859Sml29623 21633859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 21643859Sml29623 "<== nxge_receive_packet: failed: l2 length is 0.")); 21653859Sml29623 return; 21663859Sml29623 } 21673859Sml29623 21684185Sspeer /* Hardware sends us 4 bytes of CRC as no stripping is done. */ 21694185Sspeer l2_len -= ETHERFCSL; 21704185Sspeer 21713859Sml29623 /* shift 6 bits to get the full io address */ 21725125Sjoycey #if defined(__i386) 21735125Sjoycey pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp << 21745125Sjoycey RCR_PKT_BUF_ADDR_SHIFT_FULL); 21755125Sjoycey #else 21763859Sml29623 pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp << 21773859Sml29623 RCR_PKT_BUF_ADDR_SHIFT_FULL); 21785125Sjoycey #endif 21793859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 21803859Sml29623 "==> (rbr) nxge_receive_packet: entry 0x%0llx " 21813859Sml29623 "full pkt_buf_addr_pp $%p l2_len %d", 21823859Sml29623 rcr_entry, pkt_buf_addr_pp, l2_len)); 21833859Sml29623 21843859Sml29623 rx_rbr_p = rcr_p->rx_rbr_p; 21853859Sml29623 rx_msg_ring_p = rx_rbr_p->rx_msg_ring; 21863859Sml29623 21873859Sml29623 if (first_entry) { 21883859Sml29623 hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL : 21893859Sml29623 RXDMA_HDR_SIZE_DEFAULT); 21903859Sml29623 21913859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 21923859Sml29623 "==> nxge_receive_packet: first entry 0x%016llx " 21933859Sml29623 "pkt_buf_addr_pp $%p l2_len %d hdr %d", 21943859Sml29623 rcr_entry, pkt_buf_addr_pp, l2_len, 21953859Sml29623 hdr_size)); 21963859Sml29623 } 21973859Sml29623 21983859Sml29623 MUTEX_ENTER(&rcr_p->lock); 21993859Sml29623 MUTEX_ENTER(&rx_rbr_p->lock); 22003859Sml29623 22013859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22023859Sml29623 "==> (rbr 1) nxge_receive_packet: entry 0x%0llx " 22033859Sml29623 "full pkt_buf_addr_pp $%p l2_len %d", 22043859Sml29623 rcr_entry, pkt_buf_addr_pp, l2_len)); 22053859Sml29623 22063859Sml29623 /* 22073859Sml29623 * Packet buffer address in the completion entry points 22083859Sml29623 * to the starting buffer address (offset 0). 22093859Sml29623 * Use the starting buffer address to locate the corresponding 22103859Sml29623 * kernel address. 22113859Sml29623 */ 22123859Sml29623 status = nxge_rxbuf_pp_to_vp(nxgep, rx_rbr_p, 22133859Sml29623 pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p, 22143859Sml29623 &buf_offset, 22153859Sml29623 &msg_index); 22163859Sml29623 22173859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22183859Sml29623 "==> (rbr 2) nxge_receive_packet: entry 0x%0llx " 22193859Sml29623 "full pkt_buf_addr_pp $%p l2_len %d", 22203859Sml29623 rcr_entry, pkt_buf_addr_pp, l2_len)); 22213859Sml29623 22223859Sml29623 if (status != NXGE_OK) { 22233859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 22243859Sml29623 MUTEX_EXIT(&rcr_p->lock); 22253859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22263859Sml29623 "<== nxge_receive_packet: found vaddr failed %d", 22273859Sml29623 status)); 22283859Sml29623 return; 22293859Sml29623 } 22303859Sml29623 22313859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 22323859Sml29623 "==> (rbr 3) nxge_receive_packet: entry 0x%0llx " 22333859Sml29623 "full pkt_buf_addr_pp $%p l2_len %d", 22343859Sml29623 rcr_entry, pkt_buf_addr_pp, l2_len)); 22353859Sml29623 22363859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 22373859Sml29623 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 22383859Sml29623 "full pkt_buf_addr_pp $%p l2_len %d", 22393859Sml29623 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 22403859Sml29623 22413859Sml29623 rx_msg_p = rx_msg_ring_p[msg_index]; 22423859Sml29623 22433859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 22443859Sml29623 "==> (rbr 4 msgindex %d) nxge_receive_packet: entry 0x%0llx " 22453859Sml29623 "full pkt_buf_addr_pp $%p l2_len %d", 22463859Sml29623 msg_index, rcr_entry, pkt_buf_addr_pp, l2_len)); 22473859Sml29623 22483859Sml29623 switch (pktbufsz_type) { 22493859Sml29623 case RCR_PKTBUFSZ_0: 22503859Sml29623 bsize = rx_rbr_p->pkt_buf_size0_bytes; 22513859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 22523859Sml29623 "==> nxge_receive_packet: 0 buf %d", bsize)); 22533859Sml29623 break; 22543859Sml29623 case RCR_PKTBUFSZ_1: 22553859Sml29623 bsize = rx_rbr_p->pkt_buf_size1_bytes; 22563859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 22573859Sml29623 "==> nxge_receive_packet: 1 buf %d", bsize)); 22583859Sml29623 break; 22593859Sml29623 case RCR_PKTBUFSZ_2: 22603859Sml29623 bsize = rx_rbr_p->pkt_buf_size2_bytes; 22613859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 22623859Sml29623 "==> nxge_receive_packet: 2 buf %d", bsize)); 22633859Sml29623 break; 22643859Sml29623 case RCR_SINGLE_BLOCK: 22653859Sml29623 bsize = rx_msg_p->block_size; 22663859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 22673859Sml29623 "==> nxge_receive_packet: single %d", bsize)); 22683859Sml29623 22693859Sml29623 break; 22703859Sml29623 default: 22713859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 22723859Sml29623 MUTEX_EXIT(&rcr_p->lock); 22733859Sml29623 return; 22743859Sml29623 } 22753859Sml29623 22763859Sml29623 DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma, 22773859Sml29623 (buf_offset + sw_offset_bytes), 22783859Sml29623 (hdr_size + l2_len), 22793859Sml29623 DDI_DMA_SYNC_FORCPU); 22803859Sml29623 22813859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 22823859Sml29623 "==> nxge_receive_packet: after first dump:usage count")); 22833859Sml29623 22843859Sml29623 if (rx_msg_p->cur_usage_cnt == 0) { 22853859Sml29623 if (rx_rbr_p->rbr_use_bcopy) { 22863859Sml29623 atomic_inc_32(&rx_rbr_p->rbr_consumed); 22873859Sml29623 if (rx_rbr_p->rbr_consumed < 22883859Sml29623 rx_rbr_p->rbr_threshold_hi) { 22893859Sml29623 if (rx_rbr_p->rbr_threshold_lo == 0 || 22903859Sml29623 ((rx_rbr_p->rbr_consumed >= 22913859Sml29623 rx_rbr_p->rbr_threshold_lo) && 22923859Sml29623 (rx_rbr_p->rbr_bufsize_type >= 22933859Sml29623 pktbufsz_type))) { 22943859Sml29623 rx_msg_p->rx_use_bcopy = B_TRUE; 22953859Sml29623 } 22963859Sml29623 } else { 22973859Sml29623 rx_msg_p->rx_use_bcopy = B_TRUE; 22983859Sml29623 } 22993859Sml29623 } 23003859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23013859Sml29623 "==> nxge_receive_packet: buf %d (new block) ", 23023859Sml29623 bsize)); 23033859Sml29623 23043859Sml29623 rx_msg_p->pkt_buf_size_code = pktbufsz_type; 23053859Sml29623 rx_msg_p->pkt_buf_size = bsize; 23063859Sml29623 rx_msg_p->cur_usage_cnt = 1; 23073859Sml29623 if (pktbufsz_type == RCR_SINGLE_BLOCK) { 23083859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 23093859Sml29623 "==> nxge_receive_packet: buf %d " 23103859Sml29623 "(single block) ", 23113859Sml29623 bsize)); 23123859Sml29623 /* 23133859Sml29623 * Buffer can be reused once the free function 23143859Sml29623 * is called. 23153859Sml29623 */ 23163859Sml29623 rx_msg_p->max_usage_cnt = 1; 23173859Sml29623 buffer_free = B_TRUE; 23183859Sml29623 } else { 23193859Sml29623 rx_msg_p->max_usage_cnt = rx_msg_p->block_size/bsize; 23203859Sml29623 if (rx_msg_p->max_usage_cnt == 1) { 23213859Sml29623 buffer_free = B_TRUE; 23223859Sml29623 } 23233859Sml29623 } 23243859Sml29623 } else { 23253859Sml29623 rx_msg_p->cur_usage_cnt++; 23263859Sml29623 if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) { 23273859Sml29623 buffer_free = B_TRUE; 23283859Sml29623 } 23293859Sml29623 } 23303859Sml29623 23313859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 23323859Sml29623 "msgbuf index = %d l2len %d bytes usage %d max_usage %d ", 23333859Sml29623 msg_index, l2_len, 23343859Sml29623 rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt)); 23353859Sml29623 23363859Sml29623 if ((error_type) || (dcf_err)) { 23373859Sml29623 rdc_stats->ierrors++; 23383859Sml29623 if (dcf_err) { 23393859Sml29623 rdc_stats->dcf_err++; 23403859Sml29623 #ifdef NXGE_DEBUG 23413859Sml29623 if (!rdc_stats->dcf_err) { 23423859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 23433859Sml29623 "nxge_receive_packet: channel %d dcf_err rcr" 23443859Sml29623 " 0x%llx", channel, rcr_entry)); 23453859Sml29623 } 23463859Sml29623 #endif 23473859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 23483859Sml29623 NXGE_FM_EREPORT_RDMC_DCF_ERR); 23493859Sml29623 } else { 23503859Sml29623 /* Update error stats */ 23513859Sml29623 error_disp_cnt = NXGE_ERROR_SHOW_MAX; 23523859Sml29623 rdc_stats->errlog.compl_err_type = error_type; 23533859Sml29623 23543859Sml29623 switch (error_type) { 2355*5165Syc148097 case RCR_L2_ERROR: 2356*5165Syc148097 rdc_stats->l2_err++; 2357*5165Syc148097 if (rdc_stats->l2_err < 2358*5165Syc148097 error_disp_cnt) { 2359*5165Syc148097 NXGE_FM_REPORT_ERROR(nxgep, 2360*5165Syc148097 nxgep->mac.portnum, NULL, 2361*5165Syc148097 NXGE_FM_EREPORT_RDMC_RCR_ERR); 2362*5165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2363*5165Syc148097 " nxge_receive_packet:" 2364*5165Syc148097 " channel %d RCR L2_ERROR", 2365*5165Syc148097 channel)); 2366*5165Syc148097 } 2367*5165Syc148097 break; 2368*5165Syc148097 case RCR_L4_CSUM_ERROR: 2369*5165Syc148097 error_send_up = B_TRUE; 2370*5165Syc148097 rdc_stats->l4_cksum_err++; 2371*5165Syc148097 if (rdc_stats->l4_cksum_err < 2372*5165Syc148097 error_disp_cnt) { 2373*5165Syc148097 NXGE_FM_REPORT_ERROR(nxgep, 2374*5165Syc148097 nxgep->mac.portnum, NULL, 2375*5165Syc148097 NXGE_FM_EREPORT_RDMC_RCR_ERR); 23763859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2377*5165Syc148097 " nxge_receive_packet:" 2378*5165Syc148097 " channel %d" 2379*5165Syc148097 " RCR L4_CSUM_ERROR", channel)); 2380*5165Syc148097 } 2381*5165Syc148097 break; 2382*5165Syc148097 case RCR_FFLP_SOFT_ERROR: 2383*5165Syc148097 error_send_up = B_TRUE; 2384*5165Syc148097 rdc_stats->fflp_soft_err++; 2385*5165Syc148097 if (rdc_stats->fflp_soft_err < 2386*5165Syc148097 error_disp_cnt) { 2387*5165Syc148097 NXGE_FM_REPORT_ERROR(nxgep, 2388*5165Syc148097 nxgep->mac.portnum, NULL, 2389*5165Syc148097 NXGE_FM_EREPORT_RDMC_RCR_ERR); 2390*5165Syc148097 NXGE_ERROR_MSG((nxgep, 2391*5165Syc148097 NXGE_ERR_CTL, 2392*5165Syc148097 " nxge_receive_packet:" 2393*5165Syc148097 " channel %d" 2394*5165Syc148097 " RCR FFLP_SOFT_ERROR", channel)); 2395*5165Syc148097 } 2396*5165Syc148097 break; 2397*5165Syc148097 case RCR_ZCP_SOFT_ERROR: 2398*5165Syc148097 error_send_up = B_TRUE; 2399*5165Syc148097 rdc_stats->fflp_soft_err++; 2400*5165Syc148097 if (rdc_stats->zcp_soft_err < 2401*5165Syc148097 error_disp_cnt) 2402*5165Syc148097 NXGE_FM_REPORT_ERROR(nxgep, 2403*5165Syc148097 nxgep->mac.portnum, NULL, 2404*5165Syc148097 NXGE_FM_EREPORT_RDMC_RCR_ERR); 2405*5165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2406*5165Syc148097 " nxge_receive_packet: Channel %d" 2407*5165Syc148097 " RCR ZCP_SOFT_ERROR", channel)); 2408*5165Syc148097 break; 2409*5165Syc148097 default: 2410*5165Syc148097 rdc_stats->rcr_unknown_err++; 2411*5165Syc148097 if (rdc_stats->rcr_unknown_err 2412*5165Syc148097 < error_disp_cnt) { 2413*5165Syc148097 NXGE_FM_REPORT_ERROR(nxgep, 2414*5165Syc148097 nxgep->mac.portnum, NULL, 2415*5165Syc148097 NXGE_FM_EREPORT_RDMC_RCR_ERR); 2416*5165Syc148097 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2417*5165Syc148097 " nxge_receive_packet: Channel %d" 2418*5165Syc148097 " RCR entry 0x%llx error 0x%x", 2419*5165Syc148097 rcr_entry, channel, error_type)); 2420*5165Syc148097 } 2421*5165Syc148097 break; 24223859Sml29623 } 24233859Sml29623 } 24243859Sml29623 24253859Sml29623 /* 24263859Sml29623 * Update and repost buffer block if max usage 24273859Sml29623 * count is reached. 24283859Sml29623 */ 24293859Sml29623 if (error_send_up == B_FALSE) { 24304874Sml29623 atomic_inc_32(&rx_msg_p->ref_cnt); 24314874Sml29623 atomic_inc_32(&nxge_mblks_pending); 24323859Sml29623 if (buffer_free == B_TRUE) { 24333859Sml29623 rx_msg_p->free = B_TRUE; 24343859Sml29623 } 24353859Sml29623 24363859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 24373859Sml29623 MUTEX_EXIT(&rcr_p->lock); 24383859Sml29623 nxge_freeb(rx_msg_p); 24393859Sml29623 return; 24403859Sml29623 } 24413859Sml29623 } 24423859Sml29623 24433859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 24443859Sml29623 "==> nxge_receive_packet: DMA sync second ")); 24453859Sml29623 2446*5165Syc148097 bytes_read = rcr_p->rcvd_pkt_bytes; 24473859Sml29623 skip_len = sw_offset_bytes + hdr_size; 24483859Sml29623 if (!rx_msg_p->rx_use_bcopy) { 24494874Sml29623 /* 24504874Sml29623 * For loaned up buffers, the driver reference count 24514874Sml29623 * will be incremented first and then the free state. 24524874Sml29623 */ 2453*5165Syc148097 if ((nmp = nxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) { 2454*5165Syc148097 if (first_entry) { 2455*5165Syc148097 nmp->b_rptr = &nmp->b_rptr[skip_len]; 2456*5165Syc148097 if (l2_len < bsize - skip_len) { 2457*5165Syc148097 nmp->b_wptr = &nmp->b_rptr[l2_len]; 2458*5165Syc148097 } else { 2459*5165Syc148097 nmp->b_wptr = &nmp->b_rptr[bsize 2460*5165Syc148097 - skip_len]; 2461*5165Syc148097 } 2462*5165Syc148097 } else { 2463*5165Syc148097 if (l2_len - bytes_read < bsize) { 2464*5165Syc148097 nmp->b_wptr = 2465*5165Syc148097 &nmp->b_rptr[l2_len - bytes_read]; 2466*5165Syc148097 } else { 2467*5165Syc148097 nmp->b_wptr = &nmp->b_rptr[bsize]; 2468*5165Syc148097 } 2469*5165Syc148097 } 2470*5165Syc148097 } 24713859Sml29623 } else { 2472*5165Syc148097 if (first_entry) { 2473*5165Syc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len, 2474*5165Syc148097 l2_len < bsize - skip_len ? 2475*5165Syc148097 l2_len : bsize - skip_len); 2476*5165Syc148097 } else { 2477*5165Syc148097 nmp = nxge_dupb_bcopy(rx_msg_p, buf_offset, 2478*5165Syc148097 l2_len - bytes_read < bsize ? 2479*5165Syc148097 l2_len - bytes_read : bsize); 2480*5165Syc148097 } 24813859Sml29623 } 24823859Sml29623 if (nmp != NULL) { 2483*5165Syc148097 if (first_entry) 2484*5165Syc148097 bytes_read = nmp->b_wptr - nmp->b_rptr; 2485*5165Syc148097 else 24863859Sml29623 bytes_read += nmp->b_wptr - nmp->b_rptr; 2487*5165Syc148097 2488*5165Syc148097 NXGE_DEBUG_MSG((nxgep, RX_CTL, 2489*5165Syc148097 "==> nxge_receive_packet after dupb: " 2490*5165Syc148097 "rbr consumed %d " 2491*5165Syc148097 "pktbufsz_type %d " 2492*5165Syc148097 "nmp $%p rptr $%p wptr $%p " 2493*5165Syc148097 "buf_offset %d bzise %d l2_len %d skip_len %d", 2494*5165Syc148097 rx_rbr_p->rbr_consumed, 2495*5165Syc148097 pktbufsz_type, 2496*5165Syc148097 nmp, nmp->b_rptr, nmp->b_wptr, 2497*5165Syc148097 buf_offset, bsize, l2_len, skip_len)); 24983859Sml29623 } else { 24993859Sml29623 cmn_err(CE_WARN, "!nxge_receive_packet: " 25003859Sml29623 "update stats (error)"); 25014977Sraghus atomic_inc_32(&rx_msg_p->ref_cnt); 25024977Sraghus atomic_inc_32(&nxge_mblks_pending); 25034977Sraghus if (buffer_free == B_TRUE) { 25044977Sraghus rx_msg_p->free = B_TRUE; 25054977Sraghus } 25064977Sraghus MUTEX_EXIT(&rx_rbr_p->lock); 25074977Sraghus MUTEX_EXIT(&rcr_p->lock); 25084977Sraghus nxge_freeb(rx_msg_p); 25094977Sraghus return; 25103859Sml29623 } 25115060Syc148097 25123859Sml29623 if (buffer_free == B_TRUE) { 25133859Sml29623 rx_msg_p->free = B_TRUE; 25143859Sml29623 } 25153859Sml29623 /* 25163859Sml29623 * ERROR, FRAG and PKT_TYPE are only reported 25173859Sml29623 * in the first entry. 25183859Sml29623 * If a packet is not fragmented and no error bit is set, then 25193859Sml29623 * L4 checksum is OK. 25203859Sml29623 */ 25213859Sml29623 is_valid = (nmp != NULL); 2522*5165Syc148097 if (first_entry) { 2523*5165Syc148097 rdc_stats->ipackets++; /* count only 1st seg for jumbo */ 2524*5165Syc148097 rdc_stats->ibytes += skip_len + l2_len < bsize ? 2525*5165Syc148097 l2_len : bsize; 2526*5165Syc148097 } else { 2527*5165Syc148097 rdc_stats->ibytes += l2_len - bytes_read < bsize ? 2528*5165Syc148097 l2_len - bytes_read : bsize; 2529*5165Syc148097 } 2530*5165Syc148097 2531*5165Syc148097 rcr_p->rcvd_pkt_bytes = bytes_read; 2532*5165Syc148097 25333859Sml29623 MUTEX_EXIT(&rx_rbr_p->lock); 25343859Sml29623 MUTEX_EXIT(&rcr_p->lock); 25353859Sml29623 25363859Sml29623 if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) { 25373859Sml29623 atomic_inc_32(&rx_msg_p->ref_cnt); 25384185Sspeer atomic_inc_32(&nxge_mblks_pending); 25393859Sml29623 nxge_freeb(rx_msg_p); 25403859Sml29623 } 25413859Sml29623 25423859Sml29623 if (is_valid) { 25433859Sml29623 nmp->b_cont = NULL; 25443859Sml29623 if (first_entry) { 25453859Sml29623 *mp = nmp; 25463859Sml29623 *mp_cont = NULL; 2547*5165Syc148097 } else { 25483859Sml29623 *mp_cont = nmp; 2549*5165Syc148097 } 25503859Sml29623 } 25513859Sml29623 25523859Sml29623 /* 25533859Sml29623 * Update stats and hardware checksuming. 25543859Sml29623 */ 25553859Sml29623 if (is_valid && !multi) { 25563859Sml29623 25573859Sml29623 is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP || 25583859Sml29623 pkt_type == RCR_PKT_IS_UDP) ? 25593859Sml29623 B_TRUE: B_FALSE); 25603859Sml29623 25613859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_receive_packet: " 25623859Sml29623 "is_valid 0x%x multi 0x%llx pkt %d frag %d error %d", 25633859Sml29623 is_valid, multi, is_tcp_udp, frag, error_type)); 25643859Sml29623 25653859Sml29623 if (is_tcp_udp && !frag && !error_type) { 25663859Sml29623 (void) hcksum_assoc(nmp, NULL, NULL, 0, 0, 0, 0, 25673859Sml29623 HCK_FULLCKSUM_OK | HCK_FULLCKSUM, 0); 25683859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 25693859Sml29623 "==> nxge_receive_packet: Full tcp/udp cksum " 25703859Sml29623 "is_valid 0x%x multi 0x%llx pkt %d frag %d " 25713859Sml29623 "error %d", 25723859Sml29623 is_valid, multi, is_tcp_udp, frag, error_type)); 25733859Sml29623 } 25743859Sml29623 } 25753859Sml29623 25763859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, 25773859Sml29623 "==> nxge_receive_packet: *mp 0x%016llx", *mp)); 25783859Sml29623 25793859Sml29623 *multi_p = (multi == RCR_MULTI_MASK); 25803859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_receive_packet: " 25813859Sml29623 "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx", 25823859Sml29623 *multi_p, nmp, *mp, *mp_cont)); 25833859Sml29623 } 25843859Sml29623 25853859Sml29623 /*ARGSUSED*/ 25863859Sml29623 static nxge_status_t 25873859Sml29623 nxge_rx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, 25883859Sml29623 rx_dma_ctl_stat_t cs) 25893859Sml29623 { 25903859Sml29623 p_nxge_rx_ring_stats_t rdc_stats; 25913859Sml29623 npi_handle_t handle; 25923859Sml29623 npi_status_t rs; 25933859Sml29623 boolean_t rxchan_fatal = B_FALSE; 25943859Sml29623 boolean_t rxport_fatal = B_FALSE; 25953859Sml29623 uint8_t channel; 25963859Sml29623 uint8_t portn; 25973859Sml29623 nxge_status_t status = NXGE_OK; 25983859Sml29623 uint32_t error_disp_cnt = NXGE_ERROR_SHOW_MAX; 25993859Sml29623 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_err_evnts")); 26003859Sml29623 26013859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 26023859Sml29623 channel = ldvp->channel; 26033859Sml29623 portn = nxgep->mac.portnum; 26043859Sml29623 rdc_stats = &nxgep->statsp->rdc_stats[ldvp->vdma_index]; 26053859Sml29623 26063859Sml29623 if (cs.bits.hdw.rbr_tmout) { 26073859Sml29623 rdc_stats->rx_rbr_tmout++; 26083859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 26093859Sml29623 NXGE_FM_EREPORT_RDMC_RBR_TMOUT); 26103859Sml29623 rxchan_fatal = B_TRUE; 26113859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26123859Sml29623 "==> nxge_rx_err_evnts: rx_rbr_timeout")); 26133859Sml29623 } 26143859Sml29623 if (cs.bits.hdw.rsp_cnt_err) { 26153859Sml29623 rdc_stats->rsp_cnt_err++; 26163859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 26173859Sml29623 NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR); 26183859Sml29623 rxchan_fatal = B_TRUE; 26193859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26203859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 26213859Sml29623 "rsp_cnt_err", channel)); 26223859Sml29623 } 26233859Sml29623 if (cs.bits.hdw.byte_en_bus) { 26243859Sml29623 rdc_stats->byte_en_bus++; 26253859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 26263859Sml29623 NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS); 26273859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26283859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 26293859Sml29623 "fatal error: byte_en_bus", channel)); 26303859Sml29623 rxchan_fatal = B_TRUE; 26313859Sml29623 } 26323859Sml29623 if (cs.bits.hdw.rsp_dat_err) { 26333859Sml29623 rdc_stats->rsp_dat_err++; 26343859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 26353859Sml29623 NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR); 26363859Sml29623 rxchan_fatal = B_TRUE; 26373859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26383859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 26393859Sml29623 "fatal error: rsp_dat_err", channel)); 26403859Sml29623 } 26413859Sml29623 if (cs.bits.hdw.rcr_ack_err) { 26423859Sml29623 rdc_stats->rcr_ack_err++; 26433859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 26443859Sml29623 NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR); 26453859Sml29623 rxchan_fatal = B_TRUE; 26463859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26473859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 26483859Sml29623 "fatal error: rcr_ack_err", channel)); 26493859Sml29623 } 26503859Sml29623 if (cs.bits.hdw.dc_fifo_err) { 26513859Sml29623 rdc_stats->dc_fifo_err++; 26523859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 26533859Sml29623 NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR); 26543859Sml29623 /* This is not a fatal error! */ 26553859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26563859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 26573859Sml29623 "dc_fifo_err", channel)); 26583859Sml29623 rxport_fatal = B_TRUE; 26593859Sml29623 } 26603859Sml29623 if ((cs.bits.hdw.rcr_sha_par) || (cs.bits.hdw.rbr_pre_par)) { 26613859Sml29623 if ((rs = npi_rxdma_ring_perr_stat_get(handle, 26623859Sml29623 &rdc_stats->errlog.pre_par, 26633859Sml29623 &rdc_stats->errlog.sha_par)) 26643859Sml29623 != NPI_SUCCESS) { 26653859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26663859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 26673859Sml29623 "rcr_sha_par: get perr", channel)); 26683859Sml29623 return (NXGE_ERROR | rs); 26693859Sml29623 } 26703859Sml29623 if (cs.bits.hdw.rcr_sha_par) { 26713859Sml29623 rdc_stats->rcr_sha_par++; 26723859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 26733859Sml29623 NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR); 26743859Sml29623 rxchan_fatal = B_TRUE; 26753859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26763859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 26773859Sml29623 "fatal error: rcr_sha_par", channel)); 26783859Sml29623 } 26793859Sml29623 if (cs.bits.hdw.rbr_pre_par) { 26803859Sml29623 rdc_stats->rbr_pre_par++; 26813859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 26823859Sml29623 NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR); 26833859Sml29623 rxchan_fatal = B_TRUE; 26843859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26853859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 26863859Sml29623 "fatal error: rbr_pre_par", channel)); 26873859Sml29623 } 26883859Sml29623 } 26893859Sml29623 if (cs.bits.hdw.port_drop_pkt) { 26903859Sml29623 rdc_stats->port_drop_pkt++; 26913859Sml29623 if (rdc_stats->port_drop_pkt < error_disp_cnt) 26923859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 26933859Sml29623 "==> nxge_rx_err_evnts (channel %d): " 26943859Sml29623 "port_drop_pkt", channel)); 26953859Sml29623 } 26963859Sml29623 if (cs.bits.hdw.wred_drop) { 26973859Sml29623 rdc_stats->wred_drop++; 26983859Sml29623 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 26993859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 27003859Sml29623 "wred_drop", channel)); 27013859Sml29623 } 27023859Sml29623 if (cs.bits.hdw.rbr_pre_empty) { 27033859Sml29623 rdc_stats->rbr_pre_empty++; 27043859Sml29623 if (rdc_stats->rbr_pre_empty < error_disp_cnt) 27053859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27063859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 27073859Sml29623 "rbr_pre_empty", channel)); 27083859Sml29623 } 27093859Sml29623 if (cs.bits.hdw.rcr_shadow_full) { 27103859Sml29623 rdc_stats->rcr_shadow_full++; 27113859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27123859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 27133859Sml29623 "rcr_shadow_full", channel)); 27143859Sml29623 } 27153859Sml29623 if (cs.bits.hdw.config_err) { 27163859Sml29623 rdc_stats->config_err++; 27173859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 27183859Sml29623 NXGE_FM_EREPORT_RDMC_CONFIG_ERR); 27193859Sml29623 rxchan_fatal = B_TRUE; 27203859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27213859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 27223859Sml29623 "config error", channel)); 27233859Sml29623 } 27243859Sml29623 if (cs.bits.hdw.rcrincon) { 27253859Sml29623 rdc_stats->rcrincon++; 27263859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 27273859Sml29623 NXGE_FM_EREPORT_RDMC_RCRINCON); 27283859Sml29623 rxchan_fatal = B_TRUE; 27293859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27303859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 27313859Sml29623 "fatal error: rcrincon error", channel)); 27323859Sml29623 } 27333859Sml29623 if (cs.bits.hdw.rcrfull) { 27343859Sml29623 rdc_stats->rcrfull++; 27353859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 27363859Sml29623 NXGE_FM_EREPORT_RDMC_RCRFULL); 27373859Sml29623 rxchan_fatal = B_TRUE; 27383859Sml29623 if (rdc_stats->rcrfull < error_disp_cnt) 27393859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27403859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 27413859Sml29623 "fatal error: rcrfull error", channel)); 27423859Sml29623 } 27433859Sml29623 if (cs.bits.hdw.rbr_empty) { 27443859Sml29623 rdc_stats->rbr_empty++; 27453859Sml29623 if (rdc_stats->rbr_empty < error_disp_cnt) 27463859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27473859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 27483859Sml29623 "rbr empty error", channel)); 27493859Sml29623 } 27503859Sml29623 if (cs.bits.hdw.rbrfull) { 27513859Sml29623 rdc_stats->rbrfull++; 27523859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 27533859Sml29623 NXGE_FM_EREPORT_RDMC_RBRFULL); 27543859Sml29623 rxchan_fatal = B_TRUE; 27553859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27563859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 27573859Sml29623 "fatal error: rbr_full error", channel)); 27583859Sml29623 } 27593859Sml29623 if (cs.bits.hdw.rbrlogpage) { 27603859Sml29623 rdc_stats->rbrlogpage++; 27613859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 27623859Sml29623 NXGE_FM_EREPORT_RDMC_RBRLOGPAGE); 27633859Sml29623 rxchan_fatal = B_TRUE; 27643859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27653859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 27663859Sml29623 "fatal error: rbr logical page error", channel)); 27673859Sml29623 } 27683859Sml29623 if (cs.bits.hdw.cfiglogpage) { 27693859Sml29623 rdc_stats->cfiglogpage++; 27703859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, channel, 27713859Sml29623 NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE); 27723859Sml29623 rxchan_fatal = B_TRUE; 27733859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27743859Sml29623 "==> nxge_rx_err_evnts(channel %d): " 27753859Sml29623 "fatal error: cfig logical page error", channel)); 27763859Sml29623 } 27773859Sml29623 27783859Sml29623 if (rxport_fatal) { 27793859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27803859Sml29623 " nxge_rx_err_evnts: " 27813859Sml29623 " fatal error on Port #%d\n", 27823859Sml29623 portn)); 27833859Sml29623 status = nxge_ipp_fatal_err_recover(nxgep); 27843859Sml29623 if (status == NXGE_OK) { 27853859Sml29623 FM_SERVICE_RESTORED(nxgep); 27863859Sml29623 } 27873859Sml29623 } 27883859Sml29623 27893859Sml29623 if (rxchan_fatal) { 27903859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 27913859Sml29623 " nxge_rx_err_evnts: " 27923859Sml29623 " fatal error on Channel #%d\n", 27933859Sml29623 channel)); 27943859Sml29623 status = nxge_rxdma_fatal_err_recover(nxgep, channel); 27953859Sml29623 if (status == NXGE_OK) { 27963859Sml29623 FM_SERVICE_RESTORED(nxgep); 27973859Sml29623 } 27983859Sml29623 } 27993859Sml29623 28003859Sml29623 NXGE_DEBUG_MSG((nxgep, RX2_CTL, "<== nxge_rx_err_evnts")); 28013859Sml29623 28023859Sml29623 return (status); 28033859Sml29623 } 28043859Sml29623 28053859Sml29623 static nxge_status_t 28063859Sml29623 nxge_map_rxdma(p_nxge_t nxgep) 28073859Sml29623 { 28083859Sml29623 int i, ndmas; 28093859Sml29623 uint16_t channel; 28103859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 28113859Sml29623 p_rx_rbr_ring_t *rbr_rings; 28123859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 28133859Sml29623 p_rx_rcr_ring_t *rcr_rings; 28143859Sml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 28153859Sml29623 p_rx_mbox_t *rx_mbox_p; 28163859Sml29623 p_nxge_dma_pool_t dma_buf_poolp; 28173859Sml29623 p_nxge_dma_pool_t dma_cntl_poolp; 28183859Sml29623 p_nxge_dma_common_t *dma_buf_p; 28193859Sml29623 p_nxge_dma_common_t *dma_cntl_p; 28203859Sml29623 uint32_t *num_chunks; 28213859Sml29623 nxge_status_t status = NXGE_OK; 28223859Sml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 28233859Sml29623 p_nxge_dma_common_t t_dma_buf_p; 28243859Sml29623 p_nxge_dma_common_t t_dma_cntl_p; 28253859Sml29623 #endif 28263859Sml29623 28273859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_map_rxdma")); 28283859Sml29623 28293859Sml29623 dma_buf_poolp = nxgep->rx_buf_pool_p; 28303859Sml29623 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 28313859Sml29623 28323859Sml29623 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 28333859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 28343859Sml29623 "<== nxge_map_rxdma: buf not allocated")); 28353859Sml29623 return (NXGE_ERROR); 28363859Sml29623 } 28373859Sml29623 28383859Sml29623 ndmas = dma_buf_poolp->ndmas; 28393859Sml29623 if (!ndmas) { 28403859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 28413859Sml29623 "<== nxge_map_rxdma: no dma allocated")); 28423859Sml29623 return (NXGE_ERROR); 28433859Sml29623 } 28443859Sml29623 28453859Sml29623 num_chunks = dma_buf_poolp->num_chunks; 28463859Sml29623 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 28473859Sml29623 dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p; 28483859Sml29623 28493859Sml29623 rx_rbr_rings = (p_rx_rbr_rings_t) 28503859Sml29623 KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP); 28513859Sml29623 rbr_rings = (p_rx_rbr_ring_t *) 28523859Sml29623 KMEM_ZALLOC(sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP); 28533859Sml29623 rx_rcr_rings = (p_rx_rcr_rings_t) 28543859Sml29623 KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP); 28553859Sml29623 rcr_rings = (p_rx_rcr_ring_t *) 28563859Sml29623 KMEM_ZALLOC(sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP); 28573859Sml29623 rx_mbox_areas_p = (p_rx_mbox_areas_t) 28583859Sml29623 KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP); 28593859Sml29623 rx_mbox_p = (p_rx_mbox_t *) 28603859Sml29623 KMEM_ZALLOC(sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP); 28613859Sml29623 28623859Sml29623 /* 28633859Sml29623 * Timeout should be set based on the system clock divider. 28643859Sml29623 * The following timeout value of 1 assumes that the 28653859Sml29623 * granularity (1000) is 3 microseconds running at 300MHz. 28663859Sml29623 */ 28673859Sml29623 28683859Sml29623 nxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT; 28693859Sml29623 nxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT; 28703859Sml29623 28713859Sml29623 /* 28723859Sml29623 * Map descriptors from the buffer polls for each dam channel. 28733859Sml29623 */ 28743859Sml29623 for (i = 0; i < ndmas; i++) { 28753859Sml29623 /* 28763859Sml29623 * Set up and prepare buffer blocks, descriptors 28773859Sml29623 * and mailbox. 28783859Sml29623 */ 28793859Sml29623 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 28803859Sml29623 status = nxge_map_rxdma_channel(nxgep, channel, 28813859Sml29623 (p_nxge_dma_common_t *)&dma_buf_p[i], 28823859Sml29623 (p_rx_rbr_ring_t *)&rbr_rings[i], 28833859Sml29623 num_chunks[i], 28843859Sml29623 (p_nxge_dma_common_t *)&dma_cntl_p[i], 28853859Sml29623 (p_rx_rcr_ring_t *)&rcr_rings[i], 28863859Sml29623 (p_rx_mbox_t *)&rx_mbox_p[i]); 28873859Sml29623 if (status != NXGE_OK) { 28883859Sml29623 goto nxge_map_rxdma_fail1; 28893859Sml29623 } 28903859Sml29623 rbr_rings[i]->index = (uint16_t)i; 28913859Sml29623 rcr_rings[i]->index = (uint16_t)i; 28923859Sml29623 rcr_rings[i]->rdc_stats = &nxgep->statsp->rdc_stats[i]; 28933859Sml29623 28943859Sml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 28953859Sml29623 if (nxgep->niu_type == N2_NIU && NXGE_DMA_BLOCK == 1) { 28963859Sml29623 rbr_rings[i]->hv_set = B_FALSE; 28973859Sml29623 t_dma_buf_p = (p_nxge_dma_common_t)dma_buf_p[i]; 28983859Sml29623 t_dma_cntl_p = 28993859Sml29623 (p_nxge_dma_common_t)dma_cntl_p[i]; 29003859Sml29623 29013859Sml29623 rbr_rings[i]->hv_rx_buf_base_ioaddr_pp = 29023859Sml29623 (uint64_t)t_dma_buf_p->orig_ioaddr_pp; 29033859Sml29623 rbr_rings[i]->hv_rx_buf_ioaddr_size = 29043859Sml29623 (uint64_t)t_dma_buf_p->orig_alength; 29053859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 29063859Sml29623 "==> nxge_map_rxdma_channel: " 29073859Sml29623 "channel %d " 29083859Sml29623 "data buf base io $%p ($%p) " 29093859Sml29623 "size 0x%llx (%d 0x%x)", 29103859Sml29623 channel, 29113859Sml29623 rbr_rings[i]->hv_rx_buf_base_ioaddr_pp, 29123859Sml29623 t_dma_cntl_p->ioaddr_pp, 29133859Sml29623 rbr_rings[i]->hv_rx_buf_ioaddr_size, 29143859Sml29623 t_dma_buf_p->orig_alength, 29153859Sml29623 t_dma_buf_p->orig_alength)); 29163859Sml29623 29173859Sml29623 rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp = 29183859Sml29623 (uint64_t)t_dma_cntl_p->orig_ioaddr_pp; 29193859Sml29623 rbr_rings[i]->hv_rx_cntl_ioaddr_size = 29203859Sml29623 (uint64_t)t_dma_cntl_p->orig_alength; 29213859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 29223859Sml29623 "==> nxge_map_rxdma_channel: " 29233859Sml29623 "channel %d " 29243859Sml29623 "cntl base io $%p ($%p) " 29253859Sml29623 "size 0x%llx (%d 0x%x)", 29263859Sml29623 channel, 29273859Sml29623 rbr_rings[i]->hv_rx_cntl_base_ioaddr_pp, 29283859Sml29623 t_dma_cntl_p->ioaddr_pp, 29293859Sml29623 rbr_rings[i]->hv_rx_cntl_ioaddr_size, 29303859Sml29623 t_dma_cntl_p->orig_alength, 29313859Sml29623 t_dma_cntl_p->orig_alength)); 29323859Sml29623 } 29333859Sml29623 29343859Sml29623 #endif /* sun4v and NIU_LP_WORKAROUND */ 29353859Sml29623 } 29363859Sml29623 29373859Sml29623 rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas; 29383859Sml29623 rx_rbr_rings->rbr_rings = rbr_rings; 29393859Sml29623 nxgep->rx_rbr_rings = rx_rbr_rings; 29403859Sml29623 rx_rcr_rings->rcr_rings = rcr_rings; 29413859Sml29623 nxgep->rx_rcr_rings = rx_rcr_rings; 29423859Sml29623 29433859Sml29623 rx_mbox_areas_p->rxmbox_areas = rx_mbox_p; 29443859Sml29623 nxgep->rx_mbox_areas_p = rx_mbox_areas_p; 29453859Sml29623 29463859Sml29623 goto nxge_map_rxdma_exit; 29473859Sml29623 29483859Sml29623 nxge_map_rxdma_fail1: 29493859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 29503859Sml29623 "==> nxge_map_rxdma: unmap rbr,rcr " 29513859Sml29623 "(status 0x%x channel %d i %d)", 29523859Sml29623 status, channel, i)); 29534185Sspeer i--; 29543859Sml29623 for (; i >= 0; i--) { 29553859Sml29623 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 29563859Sml29623 nxge_unmap_rxdma_channel(nxgep, channel, 29573859Sml29623 rbr_rings[i], 29583859Sml29623 rcr_rings[i], 29593859Sml29623 rx_mbox_p[i]); 29603859Sml29623 } 29613859Sml29623 29623859Sml29623 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 29633859Sml29623 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 29643859Sml29623 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 29653859Sml29623 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 29663859Sml29623 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 29673859Sml29623 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 29683859Sml29623 29693859Sml29623 nxge_map_rxdma_exit: 29703859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 29713859Sml29623 "<== nxge_map_rxdma: " 29723859Sml29623 "(status 0x%x channel %d)", 29733859Sml29623 status, channel)); 29743859Sml29623 29753859Sml29623 return (status); 29763859Sml29623 } 29773859Sml29623 29783859Sml29623 static void 29793859Sml29623 nxge_unmap_rxdma(p_nxge_t nxgep) 29803859Sml29623 { 29813859Sml29623 int i, ndmas; 29823859Sml29623 uint16_t channel; 29833859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 29843859Sml29623 p_rx_rbr_ring_t *rbr_rings; 29853859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 29863859Sml29623 p_rx_rcr_ring_t *rcr_rings; 29873859Sml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 29883859Sml29623 p_rx_mbox_t *rx_mbox_p; 29893859Sml29623 p_nxge_dma_pool_t dma_buf_poolp; 29903859Sml29623 p_nxge_dma_pool_t dma_cntl_poolp; 29913859Sml29623 p_nxge_dma_common_t *dma_buf_p; 29923859Sml29623 29933859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_unmap_rxdma")); 29943859Sml29623 29953859Sml29623 dma_buf_poolp = nxgep->rx_buf_pool_p; 29963859Sml29623 dma_cntl_poolp = nxgep->rx_cntl_pool_p; 29973859Sml29623 29983859Sml29623 if (!dma_buf_poolp->buf_allocated || !dma_cntl_poolp->buf_allocated) { 29993859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30003859Sml29623 "<== nxge_unmap_rxdma: NULL buf pointers")); 30013859Sml29623 return; 30023859Sml29623 } 30033859Sml29623 30043859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 30053859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 30063859Sml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 30073859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30083859Sml29623 "<== nxge_unmap_rxdma: NULL ring pointers")); 30093859Sml29623 return; 30103859Sml29623 } 30113859Sml29623 ndmas = rx_rbr_rings->ndmas; 30123859Sml29623 if (!ndmas) { 30133859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30143859Sml29623 "<== nxge_unmap_rxdma: no channel")); 30153859Sml29623 return; 30163859Sml29623 } 30173859Sml29623 30183859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 30193859Sml29623 "==> nxge_unmap_rxdma (ndmas %d)", ndmas)); 30203859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 30213859Sml29623 rcr_rings = rx_rcr_rings->rcr_rings; 30223859Sml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 30233859Sml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 30243859Sml29623 dma_buf_p = dma_buf_poolp->dma_buf_pool_p; 30253859Sml29623 30263859Sml29623 for (i = 0; i < ndmas; i++) { 30273859Sml29623 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 30283859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 30293859Sml29623 "==> nxge_unmap_rxdma (ndmas %d) channel %d", 30303859Sml29623 ndmas, channel)); 30313859Sml29623 (void) nxge_unmap_rxdma_channel(nxgep, channel, 30323859Sml29623 (p_rx_rbr_ring_t)rbr_rings[i], 30333859Sml29623 (p_rx_rcr_ring_t)rcr_rings[i], 30343859Sml29623 (p_rx_mbox_t)rx_mbox_p[i]); 30353859Sml29623 } 30363859Sml29623 30373859Sml29623 KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t)); 30383859Sml29623 KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas); 30393859Sml29623 KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t)); 30403859Sml29623 KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas); 30413859Sml29623 KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t)); 30423859Sml29623 KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas); 30433859Sml29623 30443859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 30453859Sml29623 "<== nxge_unmap_rxdma")); 30463859Sml29623 } 30473859Sml29623 30483859Sml29623 nxge_status_t 30493859Sml29623 nxge_map_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 30503859Sml29623 p_nxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p, 30513859Sml29623 uint32_t num_chunks, 30523859Sml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rcr_ring_t *rcr_p, 30533859Sml29623 p_rx_mbox_t *rx_mbox_p) 30543859Sml29623 { 30553859Sml29623 int status = NXGE_OK; 30563859Sml29623 30573859Sml29623 /* 30583859Sml29623 * Set up and prepare buffer blocks, descriptors 30593859Sml29623 * and mailbox. 30603859Sml29623 */ 30613859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 30623859Sml29623 "==> nxge_map_rxdma_channel (channel %d)", channel)); 30633859Sml29623 /* 30643859Sml29623 * Receive buffer blocks 30653859Sml29623 */ 30663859Sml29623 status = nxge_map_rxdma_channel_buf_ring(nxgep, channel, 30673859Sml29623 dma_buf_p, rbr_p, num_chunks); 30683859Sml29623 if (status != NXGE_OK) { 30693859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30703859Sml29623 "==> nxge_map_rxdma_channel (channel %d): " 30713859Sml29623 "map buffer failed 0x%x", channel, status)); 30723859Sml29623 goto nxge_map_rxdma_channel_exit; 30733859Sml29623 } 30743859Sml29623 30753859Sml29623 /* 30763859Sml29623 * Receive block ring, completion ring and mailbox. 30773859Sml29623 */ 30783859Sml29623 status = nxge_map_rxdma_channel_cfg_ring(nxgep, channel, 30793859Sml29623 dma_cntl_p, rbr_p, rcr_p, rx_mbox_p); 30803859Sml29623 if (status != NXGE_OK) { 30813859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30823859Sml29623 "==> nxge_map_rxdma_channel (channel %d): " 30833859Sml29623 "map config failed 0x%x", channel, status)); 30843859Sml29623 goto nxge_map_rxdma_channel_fail2; 30853859Sml29623 } 30863859Sml29623 30873859Sml29623 goto nxge_map_rxdma_channel_exit; 30883859Sml29623 30893859Sml29623 nxge_map_rxdma_channel_fail3: 30903859Sml29623 /* Free rbr, rcr */ 30913859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 30923859Sml29623 "==> nxge_map_rxdma_channel: free rbr/rcr " 30933859Sml29623 "(status 0x%x channel %d)", 30943859Sml29623 status, channel)); 30953859Sml29623 nxge_unmap_rxdma_channel_cfg_ring(nxgep, 30963859Sml29623 *rcr_p, *rx_mbox_p); 30973859Sml29623 30983859Sml29623 nxge_map_rxdma_channel_fail2: 30993859Sml29623 /* Free buffer blocks */ 31003859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 31013859Sml29623 "==> nxge_map_rxdma_channel: free rx buffers" 31023859Sml29623 "(nxgep 0x%x status 0x%x channel %d)", 31033859Sml29623 nxgep, status, channel)); 31043859Sml29623 nxge_unmap_rxdma_channel_buf_ring(nxgep, *rbr_p); 31053859Sml29623 31064185Sspeer status = NXGE_ERROR; 31074185Sspeer 31083859Sml29623 nxge_map_rxdma_channel_exit: 31093859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 31103859Sml29623 "<== nxge_map_rxdma_channel: " 31113859Sml29623 "(nxgep 0x%x status 0x%x channel %d)", 31123859Sml29623 nxgep, status, channel)); 31133859Sml29623 31143859Sml29623 return (status); 31153859Sml29623 } 31163859Sml29623 31173859Sml29623 /*ARGSUSED*/ 31183859Sml29623 static void 31193859Sml29623 nxge_unmap_rxdma_channel(p_nxge_t nxgep, uint16_t channel, 31203859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 31213859Sml29623 { 31223859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 31233859Sml29623 "==> nxge_unmap_rxdma_channel (channel %d)", channel)); 31243859Sml29623 31253859Sml29623 /* 31263859Sml29623 * unmap receive block ring, completion ring and mailbox. 31273859Sml29623 */ 31283859Sml29623 (void) nxge_unmap_rxdma_channel_cfg_ring(nxgep, 31293859Sml29623 rcr_p, rx_mbox_p); 31303859Sml29623 31313859Sml29623 /* unmap buffer blocks */ 31323859Sml29623 (void) nxge_unmap_rxdma_channel_buf_ring(nxgep, rbr_p); 31333859Sml29623 31343859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_unmap_rxdma_channel")); 31353859Sml29623 } 31363859Sml29623 31373859Sml29623 /*ARGSUSED*/ 31383859Sml29623 static nxge_status_t 31393859Sml29623 nxge_map_rxdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 31403859Sml29623 p_nxge_dma_common_t *dma_cntl_p, p_rx_rbr_ring_t *rbr_p, 31413859Sml29623 p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p) 31423859Sml29623 { 31433859Sml29623 p_rx_rbr_ring_t rbrp; 31443859Sml29623 p_rx_rcr_ring_t rcrp; 31453859Sml29623 p_rx_mbox_t mboxp; 31463859Sml29623 p_nxge_dma_common_t cntl_dmap; 31473859Sml29623 p_nxge_dma_common_t dmap; 31483859Sml29623 p_rx_msg_t *rx_msg_ring; 31493859Sml29623 p_rx_msg_t rx_msg_p; 31503859Sml29623 p_rbr_cfig_a_t rcfga_p; 31513859Sml29623 p_rbr_cfig_b_t rcfgb_p; 31523859Sml29623 p_rcrcfig_a_t cfga_p; 31533859Sml29623 p_rcrcfig_b_t cfgb_p; 31543859Sml29623 p_rxdma_cfig1_t cfig1_p; 31553859Sml29623 p_rxdma_cfig2_t cfig2_p; 31563859Sml29623 p_rbr_kick_t kick_p; 31573859Sml29623 uint32_t dmaaddrp; 31583859Sml29623 uint32_t *rbr_vaddrp; 31593859Sml29623 uint32_t bkaddr; 31603859Sml29623 nxge_status_t status = NXGE_OK; 31613859Sml29623 int i; 31623859Sml29623 uint32_t nxge_port_rcr_size; 31633859Sml29623 31643859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 31653859Sml29623 "==> nxge_map_rxdma_channel_cfg_ring")); 31663859Sml29623 31673859Sml29623 cntl_dmap = *dma_cntl_p; 31683859Sml29623 31693859Sml29623 /* Map in the receive block ring */ 31703859Sml29623 rbrp = *rbr_p; 31713859Sml29623 dmap = (p_nxge_dma_common_t)&rbrp->rbr_desc; 31723859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4); 31733859Sml29623 /* 31743859Sml29623 * Zero out buffer block ring descriptors. 31753859Sml29623 */ 31763859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 31773859Sml29623 31783859Sml29623 rcfga_p = &(rbrp->rbr_cfga); 31793859Sml29623 rcfgb_p = &(rbrp->rbr_cfgb); 31803859Sml29623 kick_p = &(rbrp->rbr_kick); 31813859Sml29623 rcfga_p->value = 0; 31823859Sml29623 rcfgb_p->value = 0; 31833859Sml29623 kick_p->value = 0; 31843859Sml29623 rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress; 31853859Sml29623 rcfga_p->value = (rbrp->rbr_addr & 31863859Sml29623 (RBR_CFIG_A_STDADDR_MASK | 31873859Sml29623 RBR_CFIG_A_STDADDR_BASE_MASK)); 31883859Sml29623 rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT); 31893859Sml29623 31903859Sml29623 rcfgb_p->bits.ldw.bufsz0 = rbrp->pkt_buf_size0; 31913859Sml29623 rcfgb_p->bits.ldw.vld0 = 1; 31923859Sml29623 rcfgb_p->bits.ldw.bufsz1 = rbrp->pkt_buf_size1; 31933859Sml29623 rcfgb_p->bits.ldw.vld1 = 1; 31943859Sml29623 rcfgb_p->bits.ldw.bufsz2 = rbrp->pkt_buf_size2; 31953859Sml29623 rcfgb_p->bits.ldw.vld2 = 1; 31963859Sml29623 rcfgb_p->bits.ldw.bksize = nxgep->rx_bksize_code; 31973859Sml29623 31983859Sml29623 /* 31993859Sml29623 * For each buffer block, enter receive block address to the ring. 32003859Sml29623 */ 32013859Sml29623 rbr_vaddrp = (uint32_t *)dmap->kaddrp; 32023859Sml29623 rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp; 32033859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 32043859Sml29623 "==> nxge_map_rxdma_channel_cfg_ring: channel %d " 32053859Sml29623 "rbr_vaddrp $%p", dma_channel, rbr_vaddrp)); 32063859Sml29623 32073859Sml29623 rx_msg_ring = rbrp->rx_msg_ring; 32083859Sml29623 for (i = 0; i < rbrp->tnblocks; i++) { 32093859Sml29623 rx_msg_p = rx_msg_ring[i]; 32103859Sml29623 rx_msg_p->nxgep = nxgep; 32113859Sml29623 rx_msg_p->rx_rbr_p = rbrp; 32123859Sml29623 bkaddr = (uint32_t) 32133859Sml29623 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress 32143859Sml29623 >> RBR_BKADDR_SHIFT)); 32153859Sml29623 rx_msg_p->free = B_FALSE; 32163859Sml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe; 32173859Sml29623 32183859Sml29623 *rbr_vaddrp++ = bkaddr; 32193859Sml29623 } 32203859Sml29623 32213859Sml29623 kick_p->bits.ldw.bkadd = rbrp->rbb_max; 32223859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 32233859Sml29623 32243859Sml29623 rbrp->rbr_rd_index = 0; 32253859Sml29623 32263859Sml29623 rbrp->rbr_consumed = 0; 32273859Sml29623 rbrp->rbr_use_bcopy = B_TRUE; 32283859Sml29623 rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0; 32293859Sml29623 /* 32303859Sml29623 * Do bcopy on packets greater than bcopy size once 32313859Sml29623 * the lo threshold is reached. 32323859Sml29623 * This lo threshold should be less than the hi threshold. 32333859Sml29623 * 32343859Sml29623 * Do bcopy on every packet once the hi threshold is reached. 32353859Sml29623 */ 32363859Sml29623 if (nxge_rx_threshold_lo >= nxge_rx_threshold_hi) { 32373859Sml29623 /* default it to use hi */ 32383859Sml29623 nxge_rx_threshold_lo = nxge_rx_threshold_hi; 32393859Sml29623 } 32403859Sml29623 32413859Sml29623 if (nxge_rx_buf_size_type > NXGE_RBR_TYPE2) { 32423859Sml29623 nxge_rx_buf_size_type = NXGE_RBR_TYPE2; 32433859Sml29623 } 32443859Sml29623 rbrp->rbr_bufsize_type = nxge_rx_buf_size_type; 32453859Sml29623 32463859Sml29623 switch (nxge_rx_threshold_hi) { 32473859Sml29623 default: 32483859Sml29623 case NXGE_RX_COPY_NONE: 32493859Sml29623 /* Do not do bcopy at all */ 32503859Sml29623 rbrp->rbr_use_bcopy = B_FALSE; 32513859Sml29623 rbrp->rbr_threshold_hi = rbrp->rbb_max; 32523859Sml29623 break; 32533859Sml29623 32543859Sml29623 case NXGE_RX_COPY_1: 32553859Sml29623 case NXGE_RX_COPY_2: 32563859Sml29623 case NXGE_RX_COPY_3: 32573859Sml29623 case NXGE_RX_COPY_4: 32583859Sml29623 case NXGE_RX_COPY_5: 32593859Sml29623 case NXGE_RX_COPY_6: 32603859Sml29623 case NXGE_RX_COPY_7: 32613859Sml29623 rbrp->rbr_threshold_hi = 32623859Sml29623 rbrp->rbb_max * 32633859Sml29623 (nxge_rx_threshold_hi)/NXGE_RX_BCOPY_SCALE; 32643859Sml29623 break; 32653859Sml29623 32663859Sml29623 case NXGE_RX_COPY_ALL: 32673859Sml29623 rbrp->rbr_threshold_hi = 0; 32683859Sml29623 break; 32693859Sml29623 } 32703859Sml29623 32713859Sml29623 switch (nxge_rx_threshold_lo) { 32723859Sml29623 default: 32733859Sml29623 case NXGE_RX_COPY_NONE: 32743859Sml29623 /* Do not do bcopy at all */ 32753859Sml29623 if (rbrp->rbr_use_bcopy) { 32763859Sml29623 rbrp->rbr_use_bcopy = B_FALSE; 32773859Sml29623 } 32783859Sml29623 rbrp->rbr_threshold_lo = rbrp->rbb_max; 32793859Sml29623 break; 32803859Sml29623 32813859Sml29623 case NXGE_RX_COPY_1: 32823859Sml29623 case NXGE_RX_COPY_2: 32833859Sml29623 case NXGE_RX_COPY_3: 32843859Sml29623 case NXGE_RX_COPY_4: 32853859Sml29623 case NXGE_RX_COPY_5: 32863859Sml29623 case NXGE_RX_COPY_6: 32873859Sml29623 case NXGE_RX_COPY_7: 32883859Sml29623 rbrp->rbr_threshold_lo = 32893859Sml29623 rbrp->rbb_max * 32903859Sml29623 (nxge_rx_threshold_lo)/NXGE_RX_BCOPY_SCALE; 32913859Sml29623 break; 32923859Sml29623 32933859Sml29623 case NXGE_RX_COPY_ALL: 32943859Sml29623 rbrp->rbr_threshold_lo = 0; 32953859Sml29623 break; 32963859Sml29623 } 32973859Sml29623 32983859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 32993859Sml29623 "nxge_map_rxdma_channel_cfg_ring: channel %d " 33003859Sml29623 "rbb_max %d " 33013859Sml29623 "rbrp->rbr_bufsize_type %d " 33023859Sml29623 "rbb_threshold_hi %d " 33033859Sml29623 "rbb_threshold_lo %d", 33043859Sml29623 dma_channel, 33053859Sml29623 rbrp->rbb_max, 33063859Sml29623 rbrp->rbr_bufsize_type, 33073859Sml29623 rbrp->rbr_threshold_hi, 33083859Sml29623 rbrp->rbr_threshold_lo)); 33093859Sml29623 33103859Sml29623 rbrp->page_valid.value = 0; 33113859Sml29623 rbrp->page_mask_1.value = rbrp->page_mask_2.value = 0; 33123859Sml29623 rbrp->page_value_1.value = rbrp->page_value_2.value = 0; 33133859Sml29623 rbrp->page_reloc_1.value = rbrp->page_reloc_2.value = 0; 33143859Sml29623 rbrp->page_hdl.value = 0; 33153859Sml29623 33163859Sml29623 rbrp->page_valid.bits.ldw.page0 = 1; 33173859Sml29623 rbrp->page_valid.bits.ldw.page1 = 1; 33183859Sml29623 33193859Sml29623 /* Map in the receive completion ring */ 33203859Sml29623 rcrp = (p_rx_rcr_ring_t) 33213859Sml29623 KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP); 33223859Sml29623 rcrp->rdc = dma_channel; 33233859Sml29623 33243859Sml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 33253859Sml29623 rcrp->comp_size = nxge_port_rcr_size; 33263859Sml29623 rcrp->comp_wrap_mask = nxge_port_rcr_size - 1; 33273859Sml29623 33283859Sml29623 rcrp->max_receive_pkts = nxge_max_rx_pkts; 33293859Sml29623 33303859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 33313859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size, 33323859Sml29623 sizeof (rcr_entry_t)); 33333859Sml29623 rcrp->comp_rd_index = 0; 33343859Sml29623 rcrp->comp_wt_index = 0; 33353859Sml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 33363859Sml29623 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 33373859Sml29623 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 33385125Sjoycey #if defined(__i386) 33395125Sjoycey (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 33405125Sjoycey #else 33413859Sml29623 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 33425125Sjoycey #endif 33433859Sml29623 33443859Sml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 33453859Sml29623 (nxge_port_rcr_size - 1); 33463859Sml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 33473859Sml29623 (nxge_port_rcr_size - 1); 33483859Sml29623 33493859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 33503859Sml29623 "==> nxge_map_rxdma_channel_cfg_ring: " 33513859Sml29623 "channel %d " 33523859Sml29623 "rbr_vaddrp $%p " 33533859Sml29623 "rcr_desc_rd_head_p $%p " 33543859Sml29623 "rcr_desc_rd_head_pp $%p " 33553859Sml29623 "rcr_desc_rd_last_p $%p " 33563859Sml29623 "rcr_desc_rd_last_pp $%p ", 33573859Sml29623 dma_channel, 33583859Sml29623 rbr_vaddrp, 33593859Sml29623 rcrp->rcr_desc_rd_head_p, 33603859Sml29623 rcrp->rcr_desc_rd_head_pp, 33613859Sml29623 rcrp->rcr_desc_last_p, 33623859Sml29623 rcrp->rcr_desc_last_pp)); 33633859Sml29623 33643859Sml29623 /* 33653859Sml29623 * Zero out buffer block ring descriptors. 33663859Sml29623 */ 33673859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 33683859Sml29623 rcrp->intr_timeout = nxgep->intr_timeout; 33693859Sml29623 rcrp->intr_threshold = nxgep->intr_threshold; 33703859Sml29623 rcrp->full_hdr_flag = B_FALSE; 33713859Sml29623 rcrp->sw_priv_hdr_len = 0; 33723859Sml29623 33733859Sml29623 cfga_p = &(rcrp->rcr_cfga); 33743859Sml29623 cfgb_p = &(rcrp->rcr_cfgb); 33753859Sml29623 cfga_p->value = 0; 33763859Sml29623 cfgb_p->value = 0; 33773859Sml29623 rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress; 33783859Sml29623 cfga_p->value = (rcrp->rcr_addr & 33793859Sml29623 (RCRCFIG_A_STADDR_MASK | 33803859Sml29623 RCRCFIG_A_STADDR_BASE_MASK)); 33813859Sml29623 33823859Sml29623 rcfga_p->value |= ((uint64_t)rcrp->comp_size << 33833859Sml29623 RCRCFIG_A_LEN_SHIF); 33843859Sml29623 33853859Sml29623 /* 33863859Sml29623 * Timeout should be set based on the system clock divider. 33873859Sml29623 * The following timeout value of 1 assumes that the 33883859Sml29623 * granularity (1000) is 3 microseconds running at 300MHz. 33893859Sml29623 */ 33903859Sml29623 cfgb_p->bits.ldw.pthres = rcrp->intr_threshold; 33913859Sml29623 cfgb_p->bits.ldw.timeout = rcrp->intr_timeout; 33923859Sml29623 cfgb_p->bits.ldw.entout = 1; 33933859Sml29623 33943859Sml29623 /* Map in the mailbox */ 33953859Sml29623 mboxp = (p_rx_mbox_t) 33963859Sml29623 KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP); 33973859Sml29623 dmap = (p_nxge_dma_common_t)&mboxp->rx_mbox; 33983859Sml29623 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t)); 33993859Sml29623 cfig1_p = (p_rxdma_cfig1_t)&mboxp->rx_cfg1; 34003859Sml29623 cfig2_p = (p_rxdma_cfig2_t)&mboxp->rx_cfg2; 34013859Sml29623 cfig1_p->value = cfig2_p->value = 0; 34023859Sml29623 34033859Sml29623 mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress; 34043859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34053859Sml29623 "==> nxge_map_rxdma_channel_cfg_ring: " 34063859Sml29623 "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx", 34073859Sml29623 dma_channel, cfig1_p->value, cfig2_p->value, 34083859Sml29623 mboxp->mbox_addr)); 34093859Sml29623 34103859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress >> 32 34113859Sml29623 & 0xfff); 34123859Sml29623 cfig1_p->bits.ldw.mbaddr_h = dmaaddrp; 34133859Sml29623 34143859Sml29623 34153859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff); 34163859Sml29623 dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 34173859Sml29623 RXDMA_CFIG2_MBADDR_L_MASK); 34183859Sml29623 34193859Sml29623 cfig2_p->bits.ldw.mbaddr = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT); 34203859Sml29623 34213859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34223859Sml29623 "==> nxge_map_rxdma_channel_cfg_ring: " 34233859Sml29623 "channel %d damaddrp $%p " 34243859Sml29623 "cfg1 0x%016llx cfig2 0x%016llx", 34253859Sml29623 dma_channel, dmaaddrp, 34263859Sml29623 cfig1_p->value, cfig2_p->value)); 34273859Sml29623 34283859Sml29623 cfig2_p->bits.ldw.full_hdr = rcrp->full_hdr_flag; 34293859Sml29623 cfig2_p->bits.ldw.offset = rcrp->sw_priv_hdr_len; 34303859Sml29623 34313859Sml29623 rbrp->rx_rcr_p = rcrp; 34323859Sml29623 rcrp->rx_rbr_p = rbrp; 34333859Sml29623 *rcr_p = rcrp; 34343859Sml29623 *rx_mbox_p = mboxp; 34353859Sml29623 34363859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34373859Sml29623 "<== nxge_map_rxdma_channel_cfg_ring status 0x%08x", status)); 34383859Sml29623 34393859Sml29623 return (status); 34403859Sml29623 } 34413859Sml29623 34423859Sml29623 /*ARGSUSED*/ 34433859Sml29623 static void 34443859Sml29623 nxge_unmap_rxdma_channel_cfg_ring(p_nxge_t nxgep, 34453859Sml29623 p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p) 34463859Sml29623 { 34473859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34483859Sml29623 "==> nxge_unmap_rxdma_channel_cfg_ring: channel %d", 34493859Sml29623 rcr_p->rdc)); 34503859Sml29623 34513859Sml29623 KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t)); 34523859Sml29623 KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t)); 34533859Sml29623 34543859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34553859Sml29623 "<== nxge_unmap_rxdma_channel_cfg_ring")); 34563859Sml29623 } 34573859Sml29623 34583859Sml29623 static nxge_status_t 34593859Sml29623 nxge_map_rxdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 34603859Sml29623 p_nxge_dma_common_t *dma_buf_p, 34613859Sml29623 p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks) 34623859Sml29623 { 34633859Sml29623 p_rx_rbr_ring_t rbrp; 34643859Sml29623 p_nxge_dma_common_t dma_bufp, tmp_bufp; 34653859Sml29623 p_rx_msg_t *rx_msg_ring; 34663859Sml29623 p_rx_msg_t rx_msg_p; 34673859Sml29623 p_mblk_t mblk_p; 34683859Sml29623 34693859Sml29623 rxring_info_t *ring_info; 34703859Sml29623 nxge_status_t status = NXGE_OK; 34713859Sml29623 int i, j, index; 34723859Sml29623 uint32_t size, bsize, nblocks, nmsgs; 34733859Sml29623 34743859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34753859Sml29623 "==> nxge_map_rxdma_channel_buf_ring: channel %d", 34763859Sml29623 channel)); 34773859Sml29623 34783859Sml29623 dma_bufp = tmp_bufp = *dma_buf_p; 34793859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34803859Sml29623 " nxge_map_rxdma_channel_buf_ring: channel %d to map %d " 34813859Sml29623 "chunks bufp 0x%016llx", 34823859Sml29623 channel, num_chunks, dma_bufp)); 34833859Sml29623 34843859Sml29623 nmsgs = 0; 34853859Sml29623 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 34863859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 34873859Sml29623 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 34883859Sml29623 "bufp 0x%016llx nblocks %d nmsgs %d", 34893859Sml29623 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 34903859Sml29623 nmsgs += tmp_bufp->nblocks; 34913859Sml29623 } 34923859Sml29623 if (!nmsgs) { 34934185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 34943859Sml29623 "<== nxge_map_rxdma_channel_buf_ring: channel %d " 34953859Sml29623 "no msg blocks", 34963859Sml29623 channel)); 34973859Sml29623 status = NXGE_ERROR; 34983859Sml29623 goto nxge_map_rxdma_channel_buf_ring_exit; 34993859Sml29623 } 35003859Sml29623 35013859Sml29623 rbrp = (p_rx_rbr_ring_t) 35023859Sml29623 KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP); 35033859Sml29623 35043859Sml29623 size = nmsgs * sizeof (p_rx_msg_t); 35053859Sml29623 rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 35063859Sml29623 ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t), 35073859Sml29623 KM_SLEEP); 35083859Sml29623 35093859Sml29623 MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER, 35103859Sml29623 (void *)nxgep->interrupt_cookie); 35113859Sml29623 MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER, 35123859Sml29623 (void *)nxgep->interrupt_cookie); 35133859Sml29623 rbrp->rdc = channel; 35143859Sml29623 rbrp->num_blocks = num_chunks; 35153859Sml29623 rbrp->tnblocks = nmsgs; 35163859Sml29623 rbrp->rbb_max = nmsgs; 35173859Sml29623 rbrp->rbr_max_size = nmsgs; 35183859Sml29623 rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1); 35193859Sml29623 35203859Sml29623 /* 35213859Sml29623 * Buffer sizes suggested by NIU architect. 35223859Sml29623 * 256, 512 and 2K. 35233859Sml29623 */ 35243859Sml29623 35253859Sml29623 rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B; 35263859Sml29623 rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES; 35273859Sml29623 rbrp->npi_pkt_buf_size0 = SIZE_256B; 35283859Sml29623 35293859Sml29623 rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K; 35303859Sml29623 rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES; 35313859Sml29623 rbrp->npi_pkt_buf_size1 = SIZE_1KB; 35323859Sml29623 35333859Sml29623 rbrp->block_size = nxgep->rx_default_block_size; 35343859Sml29623 35353859Sml29623 if (!nxge_jumbo_enable && !nxgep->param_arr[param_accept_jumbo].value) { 35363859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K; 35373859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES; 35383859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_2KB; 35393859Sml29623 } else { 35403859Sml29623 if (rbrp->block_size >= 0x2000) { 35413859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_8K; 35423859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_8K_BYTES; 35433859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_8KB; 35443859Sml29623 } else { 35453859Sml29623 rbrp->pkt_buf_size2 = RBR_BUFSZ2_4K; 35463859Sml29623 rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_4K_BYTES; 35473859Sml29623 rbrp->npi_pkt_buf_size2 = SIZE_4KB; 35483859Sml29623 } 35493859Sml29623 } 35503859Sml29623 35513859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35523859Sml29623 "==> nxge_map_rxdma_channel_buf_ring: channel %d " 35533859Sml29623 "actual rbr max %d rbb_max %d nmsgs %d " 35543859Sml29623 "rbrp->block_size %d default_block_size %d " 35553859Sml29623 "(config nxge_rbr_size %d nxge_rbr_spare_size %d)", 35563859Sml29623 channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs, 35573859Sml29623 rbrp->block_size, nxgep->rx_default_block_size, 35583859Sml29623 nxge_rbr_size, nxge_rbr_spare_size)); 35593859Sml29623 35603859Sml29623 /* Map in buffers from the buffer pool. */ 35613859Sml29623 index = 0; 35623859Sml29623 for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) { 35633859Sml29623 bsize = dma_bufp->block_size; 35643859Sml29623 nblocks = dma_bufp->nblocks; 35655125Sjoycey #if defined(__i386) 35665125Sjoycey ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp; 35675125Sjoycey #else 35683859Sml29623 ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp; 35695125Sjoycey #endif 35703859Sml29623 ring_info->buffer[i].buf_index = i; 35713859Sml29623 ring_info->buffer[i].buf_size = dma_bufp->alength; 35723859Sml29623 ring_info->buffer[i].start_index = index; 35735125Sjoycey #if defined(__i386) 35745125Sjoycey ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp; 35755125Sjoycey #else 35763859Sml29623 ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp; 35775125Sjoycey #endif 35783859Sml29623 35793859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 35803859Sml29623 " nxge_map_rxdma_channel_buf_ring: map channel %d " 35813859Sml29623 "chunk %d" 35823859Sml29623 " nblocks %d chunk_size %x block_size 0x%x " 35833859Sml29623 "dma_bufp $%p", channel, i, 35843859Sml29623 dma_bufp->nblocks, ring_info->buffer[i].buf_size, bsize, 35853859Sml29623 dma_bufp)); 35863859Sml29623 35873859Sml29623 for (j = 0; j < nblocks; j++) { 35883859Sml29623 if ((rx_msg_p = nxge_allocb(bsize, BPRI_LO, 35893859Sml29623 dma_bufp)) == NULL) { 35904185Sspeer NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 35914185Sspeer "allocb failed (index %d i %d j %d)", 35924185Sspeer index, i, j)); 35934185Sspeer goto nxge_map_rxdma_channel_buf_ring_fail1; 35943859Sml29623 } 35953859Sml29623 rx_msg_ring[index] = rx_msg_p; 35963859Sml29623 rx_msg_p->block_index = index; 35973859Sml29623 rx_msg_p->shifted_addr = (uint32_t) 35983859Sml29623 ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >> 35993859Sml29623 RBR_BKADDR_SHIFT)); 36003859Sml29623 36013859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 36024185Sspeer "index %d j %d rx_msg_p $%p mblk %p", 36034185Sspeer index, j, rx_msg_p, rx_msg_p->rx_mblk_p)); 36043859Sml29623 36053859Sml29623 mblk_p = rx_msg_p->rx_mblk_p; 36063859Sml29623 mblk_p->b_wptr = mblk_p->b_rptr + bsize; 36073859Sml29623 index++; 36083859Sml29623 rx_msg_p->buf_dma.dma_channel = channel; 36093859Sml29623 } 36103859Sml29623 } 36113859Sml29623 if (i < rbrp->num_blocks) { 36123859Sml29623 goto nxge_map_rxdma_channel_buf_ring_fail1; 36133859Sml29623 } 36143859Sml29623 36153859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 36163859Sml29623 "nxge_map_rxdma_channel_buf_ring: done buf init " 36173859Sml29623 "channel %d msg block entries %d", 36183859Sml29623 channel, index)); 36193859Sml29623 ring_info->block_size_mask = bsize - 1; 36203859Sml29623 rbrp->rx_msg_ring = rx_msg_ring; 36213859Sml29623 rbrp->dma_bufp = dma_buf_p; 36223859Sml29623 rbrp->ring_info = ring_info; 36233859Sml29623 36243859Sml29623 status = nxge_rxbuf_index_info_init(nxgep, rbrp); 36253859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 36263859Sml29623 " nxge_map_rxdma_channel_buf_ring: " 36273859Sml29623 "channel %d done buf info init", channel)); 36283859Sml29623 36293859Sml29623 *rbr_p = rbrp; 36303859Sml29623 goto nxge_map_rxdma_channel_buf_ring_exit; 36313859Sml29623 36323859Sml29623 nxge_map_rxdma_channel_buf_ring_fail1: 36333859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 36343859Sml29623 " nxge_map_rxdma_channel_buf_ring: failed channel (0x%x)", 36353859Sml29623 channel, status)); 36363859Sml29623 36373859Sml29623 index--; 36383859Sml29623 for (; index >= 0; index--) { 36393859Sml29623 rx_msg_p = rx_msg_ring[index]; 36403859Sml29623 if (rx_msg_p != NULL) { 36413859Sml29623 freeb(rx_msg_p->rx_mblk_p); 36423859Sml29623 rx_msg_ring[index] = NULL; 36433859Sml29623 } 36443859Sml29623 } 36453859Sml29623 nxge_map_rxdma_channel_buf_ring_fail: 36463859Sml29623 MUTEX_DESTROY(&rbrp->post_lock); 36473859Sml29623 MUTEX_DESTROY(&rbrp->lock); 36483859Sml29623 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 36493859Sml29623 KMEM_FREE(rx_msg_ring, size); 36503859Sml29623 KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t)); 36513859Sml29623 36524185Sspeer status = NXGE_ERROR; 36534185Sspeer 36543859Sml29623 nxge_map_rxdma_channel_buf_ring_exit: 36553859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 36563859Sml29623 "<== nxge_map_rxdma_channel_buf_ring status 0x%08x", status)); 36573859Sml29623 36583859Sml29623 return (status); 36593859Sml29623 } 36603859Sml29623 36613859Sml29623 /*ARGSUSED*/ 36623859Sml29623 static void 36633859Sml29623 nxge_unmap_rxdma_channel_buf_ring(p_nxge_t nxgep, 36643859Sml29623 p_rx_rbr_ring_t rbr_p) 36653859Sml29623 { 36663859Sml29623 p_rx_msg_t *rx_msg_ring; 36673859Sml29623 p_rx_msg_t rx_msg_p; 36683859Sml29623 rxring_info_t *ring_info; 36693859Sml29623 int i; 36703859Sml29623 uint32_t size; 36713859Sml29623 #ifdef NXGE_DEBUG 36723859Sml29623 int num_chunks; 36733859Sml29623 #endif 36743859Sml29623 36753859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 36763859Sml29623 "==> nxge_unmap_rxdma_channel_buf_ring")); 36773859Sml29623 if (rbr_p == NULL) { 36783859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 36793859Sml29623 "<== nxge_unmap_rxdma_channel_buf_ring: NULL rbrp")); 36803859Sml29623 return; 36813859Sml29623 } 36823859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 36833859Sml29623 "==> nxge_unmap_rxdma_channel_buf_ring: channel %d", 36843859Sml29623 rbr_p->rdc)); 36853859Sml29623 36863859Sml29623 rx_msg_ring = rbr_p->rx_msg_ring; 36873859Sml29623 ring_info = rbr_p->ring_info; 36883859Sml29623 36893859Sml29623 if (rx_msg_ring == NULL || ring_info == NULL) { 36903859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 36913859Sml29623 "<== nxge_unmap_rxdma_channel_buf_ring: " 36923859Sml29623 "rx_msg_ring $%p ring_info $%p", 36933859Sml29623 rx_msg_p, ring_info)); 36943859Sml29623 return; 36953859Sml29623 } 36963859Sml29623 36973859Sml29623 #ifdef NXGE_DEBUG 36983859Sml29623 num_chunks = rbr_p->num_blocks; 36993859Sml29623 #endif 37003859Sml29623 size = rbr_p->tnblocks * sizeof (p_rx_msg_t); 37013859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37023859Sml29623 " nxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d " 37033859Sml29623 "tnblocks %d (max %d) size ptrs %d ", 37043859Sml29623 rbr_p->rdc, num_chunks, 37053859Sml29623 rbr_p->tnblocks, rbr_p->rbr_max_size, size)); 37063859Sml29623 37073859Sml29623 for (i = 0; i < rbr_p->tnblocks; i++) { 37083859Sml29623 rx_msg_p = rx_msg_ring[i]; 37093859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37103859Sml29623 " nxge_unmap_rxdma_channel_buf_ring: " 37113859Sml29623 "rx_msg_p $%p", 37123859Sml29623 rx_msg_p)); 37133859Sml29623 if (rx_msg_p != NULL) { 37143859Sml29623 freeb(rx_msg_p->rx_mblk_p); 37153859Sml29623 rx_msg_ring[i] = NULL; 37163859Sml29623 } 37173859Sml29623 } 37183859Sml29623 37193859Sml29623 MUTEX_DESTROY(&rbr_p->post_lock); 37203859Sml29623 MUTEX_DESTROY(&rbr_p->lock); 37213859Sml29623 KMEM_FREE(ring_info, sizeof (rxring_info_t)); 37223859Sml29623 KMEM_FREE(rx_msg_ring, size); 37233859Sml29623 KMEM_FREE(rbr_p, sizeof (rx_rbr_ring_t)); 37243859Sml29623 37253859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37263859Sml29623 "<== nxge_unmap_rxdma_channel_buf_ring")); 37273859Sml29623 } 37283859Sml29623 37293859Sml29623 static nxge_status_t 37303859Sml29623 nxge_rxdma_hw_start_common(p_nxge_t nxgep) 37313859Sml29623 { 37323859Sml29623 nxge_status_t status = NXGE_OK; 37333859Sml29623 37343859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 37353859Sml29623 37363859Sml29623 /* 37373859Sml29623 * Load the sharable parameters by writing to the 37383859Sml29623 * function zero control registers. These FZC registers 37393859Sml29623 * should be initialized only once for the entire chip. 37403859Sml29623 */ 37413859Sml29623 (void) nxge_init_fzc_rx_common(nxgep); 37423859Sml29623 37433859Sml29623 /* 37443859Sml29623 * Initialize the RXDMA port specific FZC control configurations. 37453859Sml29623 * These FZC registers are pertaining to each port. 37463859Sml29623 */ 37473859Sml29623 (void) nxge_init_fzc_rxdma_port(nxgep); 37483859Sml29623 37493859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start_common")); 37503859Sml29623 37513859Sml29623 return (status); 37523859Sml29623 } 37533859Sml29623 37543859Sml29623 /*ARGSUSED*/ 37553859Sml29623 static void 37563859Sml29623 nxge_rxdma_hw_stop_common(p_nxge_t nxgep) 37573859Sml29623 { 37583859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common")); 37593859Sml29623 37603859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop_common")); 37613859Sml29623 } 37623859Sml29623 37633859Sml29623 static nxge_status_t 37643859Sml29623 nxge_rxdma_hw_start(p_nxge_t nxgep) 37653859Sml29623 { 37663859Sml29623 int i, ndmas; 37673859Sml29623 uint16_t channel; 37683859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 37693859Sml29623 p_rx_rbr_ring_t *rbr_rings; 37703859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 37713859Sml29623 p_rx_rcr_ring_t *rcr_rings; 37723859Sml29623 p_rx_mbox_areas_t rx_mbox_areas_p; 37733859Sml29623 p_rx_mbox_t *rx_mbox_p; 37743859Sml29623 nxge_status_t status = NXGE_OK; 37753859Sml29623 37763859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start")); 37773859Sml29623 37783859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 37793859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 37803859Sml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 37813859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 37823859Sml29623 "<== nxge_rxdma_hw_start: NULL ring pointers")); 37833859Sml29623 return (NXGE_ERROR); 37843859Sml29623 } 37853859Sml29623 ndmas = rx_rbr_rings->ndmas; 37863859Sml29623 if (ndmas == 0) { 37873859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 37883859Sml29623 "<== nxge_rxdma_hw_start: no dma channel allocated")); 37893859Sml29623 return (NXGE_ERROR); 37903859Sml29623 } 37913859Sml29623 37923859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 37933859Sml29623 "==> nxge_rxdma_hw_start (ndmas %d)", ndmas)); 37943859Sml29623 37953859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 37963859Sml29623 rcr_rings = rx_rcr_rings->rcr_rings; 37973859Sml29623 rx_mbox_areas_p = nxgep->rx_mbox_areas_p; 37983859Sml29623 if (rx_mbox_areas_p) { 37993859Sml29623 rx_mbox_p = rx_mbox_areas_p->rxmbox_areas; 38003859Sml29623 } 38013859Sml29623 38023859Sml29623 for (i = 0; i < ndmas; i++) { 38033859Sml29623 channel = rbr_rings[i]->rdc; 38043859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38053859Sml29623 "==> nxge_rxdma_hw_start (ndmas %d) channel %d", 38063859Sml29623 ndmas, channel)); 38073859Sml29623 status = nxge_rxdma_start_channel(nxgep, channel, 38083859Sml29623 (p_rx_rbr_ring_t)rbr_rings[i], 38093859Sml29623 (p_rx_rcr_ring_t)rcr_rings[i], 38103859Sml29623 (p_rx_mbox_t)rx_mbox_p[i]); 38113859Sml29623 if (status != NXGE_OK) { 38123859Sml29623 goto nxge_rxdma_hw_start_fail1; 38133859Sml29623 } 38143859Sml29623 } 38153859Sml29623 38163859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_start: " 38173859Sml29623 "rx_rbr_rings 0x%016llx rings 0x%016llx", 38183859Sml29623 rx_rbr_rings, rx_rcr_rings)); 38193859Sml29623 38203859Sml29623 goto nxge_rxdma_hw_start_exit; 38213859Sml29623 38223859Sml29623 nxge_rxdma_hw_start_fail1: 38233859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 38243859Sml29623 "==> nxge_rxdma_hw_start: disable " 38253859Sml29623 "(status 0x%x channel %d i %d)", status, channel, i)); 38263859Sml29623 for (; i >= 0; i--) { 38273859Sml29623 channel = rbr_rings[i]->rdc; 38283859Sml29623 (void) nxge_rxdma_stop_channel(nxgep, channel); 38293859Sml29623 } 38303859Sml29623 38313859Sml29623 nxge_rxdma_hw_start_exit: 38323859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38333859Sml29623 "==> nxge_rxdma_hw_start: (status 0x%x)", status)); 38343859Sml29623 38353859Sml29623 return (status); 38363859Sml29623 } 38373859Sml29623 38383859Sml29623 static void 38393859Sml29623 nxge_rxdma_hw_stop(p_nxge_t nxgep) 38403859Sml29623 { 38413859Sml29623 int i, ndmas; 38423859Sml29623 uint16_t channel; 38433859Sml29623 p_rx_rbr_rings_t rx_rbr_rings; 38443859Sml29623 p_rx_rbr_ring_t *rbr_rings; 38453859Sml29623 p_rx_rcr_rings_t rx_rcr_rings; 38463859Sml29623 38473859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop")); 38483859Sml29623 38493859Sml29623 rx_rbr_rings = nxgep->rx_rbr_rings; 38503859Sml29623 rx_rcr_rings = nxgep->rx_rcr_rings; 38513859Sml29623 if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) { 38523859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 38533859Sml29623 "<== nxge_rxdma_hw_stop: NULL ring pointers")); 38543859Sml29623 return; 38553859Sml29623 } 38563859Sml29623 ndmas = rx_rbr_rings->ndmas; 38573859Sml29623 if (!ndmas) { 38583859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 38593859Sml29623 "<== nxge_rxdma_hw_stop: no dma channel allocated")); 38603859Sml29623 return; 38613859Sml29623 } 38623859Sml29623 38633859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38643859Sml29623 "==> nxge_rxdma_hw_stop (ndmas %d)", ndmas)); 38653859Sml29623 38663859Sml29623 rbr_rings = rx_rbr_rings->rbr_rings; 38673859Sml29623 38683859Sml29623 for (i = 0; i < ndmas; i++) { 38693859Sml29623 channel = rbr_rings[i]->rdc; 38703859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 38713859Sml29623 "==> nxge_rxdma_hw_stop (ndmas %d) channel %d", 38723859Sml29623 ndmas, channel)); 38733859Sml29623 (void) nxge_rxdma_stop_channel(nxgep, channel); 38743859Sml29623 } 38753859Sml29623 38763859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_hw_stop: " 38773859Sml29623 "rx_rbr_rings 0x%016llx rings 0x%016llx", 38783859Sml29623 rx_rbr_rings, rx_rcr_rings)); 38793859Sml29623 38803859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_hw_stop")); 38813859Sml29623 } 38823859Sml29623 38833859Sml29623 38843859Sml29623 static nxge_status_t 38853859Sml29623 nxge_rxdma_start_channel(p_nxge_t nxgep, uint16_t channel, 38863859Sml29623 p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p) 38873859Sml29623 38883859Sml29623 { 38893859Sml29623 npi_handle_t handle; 38903859Sml29623 npi_status_t rs = NPI_SUCCESS; 38913859Sml29623 rx_dma_ctl_stat_t cs; 38923859Sml29623 rx_dma_ent_msk_t ent_mask; 38933859Sml29623 nxge_status_t status = NXGE_OK; 38943859Sml29623 38953859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel")); 38963859Sml29623 38973859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 38983859Sml29623 38993859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "nxge_rxdma_start_channel: " 39003859Sml29623 "npi handle addr $%p acc $%p", 39013859Sml29623 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 39023859Sml29623 39033859Sml29623 /* Reset RXDMA channel */ 39043859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 39053859Sml29623 if (rs != NPI_SUCCESS) { 39063859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 39073859Sml29623 "==> nxge_rxdma_start_channel: " 39083859Sml29623 "reset rxdma failed (0x%08x channel %d)", 39093859Sml29623 status, channel)); 39103859Sml29623 return (NXGE_ERROR | rs); 39113859Sml29623 } 39123859Sml29623 39133859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39143859Sml29623 "==> nxge_rxdma_start_channel: reset done: channel %d", 39153859Sml29623 channel)); 39163859Sml29623 39173859Sml29623 /* 39183859Sml29623 * Initialize the RXDMA channel specific FZC control 39193859Sml29623 * configurations. These FZC registers are pertaining 39203859Sml29623 * to each RX channel (logical pages). 39213859Sml29623 */ 39223859Sml29623 status = nxge_init_fzc_rxdma_channel(nxgep, 39233859Sml29623 channel, rbr_p, rcr_p, mbox_p); 39243859Sml29623 if (status != NXGE_OK) { 39253859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 39263859Sml29623 "==> nxge_rxdma_start_channel: " 39273859Sml29623 "init fzc rxdma failed (0x%08x channel %d)", 39283859Sml29623 status, channel)); 39293859Sml29623 return (status); 39303859Sml29623 } 39313859Sml29623 39323859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 39333859Sml29623 "==> nxge_rxdma_start_channel: fzc done")); 39343859Sml29623 39353859Sml29623 /* 39363859Sml29623 * Zero out the shadow and prefetch ram. 39373859Sml29623 */ 39383859Sml29623 39393859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 39403859Sml29623 "ram done")); 39413859Sml29623 39423859Sml29623 /* Set up the interrupt event masks. */ 39433859Sml29623 ent_mask.value = 0; 39443859Sml29623 ent_mask.value |= RX_DMA_ENT_MSK_RBREMPTY_MASK; 39453859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 39463859Sml29623 &ent_mask); 39473859Sml29623 if (rs != NPI_SUCCESS) { 39483859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 39493859Sml29623 "==> nxge_rxdma_start_channel: " 39503859Sml29623 "init rxdma event masks failed (0x%08x channel %d)", 39513859Sml29623 status, channel)); 39523859Sml29623 return (NXGE_ERROR | rs); 39533859Sml29623 } 39543859Sml29623 39553859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 39563859Sml29623 "event done: channel %d (mask 0x%016llx)", 39573859Sml29623 channel, ent_mask.value)); 39583859Sml29623 39593859Sml29623 /* Initialize the receive DMA control and status register */ 39603859Sml29623 cs.value = 0; 39613859Sml29623 cs.bits.hdw.mex = 1; 39623859Sml29623 cs.bits.hdw.rcrthres = 1; 39633859Sml29623 cs.bits.hdw.rcrto = 1; 39643859Sml29623 cs.bits.hdw.rbr_empty = 1; 39653859Sml29623 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, &cs); 39663859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 39673859Sml29623 "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value)); 39683859Sml29623 if (status != NXGE_OK) { 39693859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 39703859Sml29623 "==> nxge_rxdma_start_channel: " 39713859Sml29623 "init rxdma control register failed (0x%08x channel %d", 39723859Sml29623 status, channel)); 39733859Sml29623 return (status); 39743859Sml29623 } 39753859Sml29623 39763859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 39773859Sml29623 "control done - channel %d cs 0x%016llx", channel, cs.value)); 39783859Sml29623 39793859Sml29623 /* 39803859Sml29623 * Load RXDMA descriptors, buffers, mailbox, 39813859Sml29623 * initialise the receive DMA channels and 39823859Sml29623 * enable each DMA channel. 39833859Sml29623 */ 39843859Sml29623 status = nxge_enable_rxdma_channel(nxgep, 39853859Sml29623 channel, rbr_p, rcr_p, mbox_p); 39863859Sml29623 39873859Sml29623 if (status != NXGE_OK) { 39883859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 39893859Sml29623 " nxge_rxdma_start_channel: " 39903859Sml29623 " init enable rxdma failed (0x%08x channel %d)", 39913859Sml29623 status, channel)); 39923859Sml29623 return (status); 39933859Sml29623 } 39943859Sml29623 39953859Sml29623 ent_mask.value = 0; 39963859Sml29623 ent_mask.value |= (RX_DMA_ENT_MSK_WRED_DROP_MASK | 39973859Sml29623 RX_DMA_ENT_MSK_PTDROP_PKT_MASK); 39983859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 39993859Sml29623 &ent_mask); 40003859Sml29623 if (rs != NPI_SUCCESS) { 40013859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40023859Sml29623 "==> nxge_rxdma_start_channel: " 40033859Sml29623 "init rxdma event masks failed (0x%08x channel %d)", 40043859Sml29623 status, channel)); 40053859Sml29623 return (NXGE_ERROR | rs); 40063859Sml29623 } 40073859Sml29623 40083859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_rxdma_start_channel: " 40093859Sml29623 "control done - channel %d cs 0x%016llx", channel, cs.value)); 40103859Sml29623 40113859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 40123859Sml29623 "==> nxge_rxdma_start_channel: enable done")); 40133859Sml29623 40143859Sml29623 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_rxdma_start_channel")); 40153859Sml29623 40163859Sml29623 return (NXGE_OK); 40173859Sml29623 } 40183859Sml29623 40193859Sml29623 static nxge_status_t 40203859Sml29623 nxge_rxdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 40213859Sml29623 { 40223859Sml29623 npi_handle_t handle; 40233859Sml29623 npi_status_t rs = NPI_SUCCESS; 40243859Sml29623 rx_dma_ctl_stat_t cs; 40253859Sml29623 rx_dma_ent_msk_t ent_mask; 40263859Sml29623 nxge_status_t status = NXGE_OK; 40273859Sml29623 40283859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel")); 40293859Sml29623 40303859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 40313859Sml29623 40323859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "nxge_rxdma_stop_channel: " 40333859Sml29623 "npi handle addr $%p acc $%p", 40343859Sml29623 nxgep->npi_handle.regp, nxgep->npi_handle.regh)); 40353859Sml29623 40363859Sml29623 /* Reset RXDMA channel */ 40373859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 40383859Sml29623 if (rs != NPI_SUCCESS) { 40393859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 40403859Sml29623 " nxge_rxdma_stop_channel: " 40413859Sml29623 " reset rxdma failed (0x%08x channel %d)", 40423859Sml29623 rs, channel)); 40433859Sml29623 return (NXGE_ERROR | rs); 40443859Sml29623 } 40453859Sml29623 40463859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 40473859Sml29623 "==> nxge_rxdma_stop_channel: reset done")); 40483859Sml29623 40493859Sml29623 /* Set up the interrupt event masks. */ 40503859Sml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL; 40513859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, 40523859Sml29623 &ent_mask); 40533859Sml29623 if (rs != NPI_SUCCESS) { 40543859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 40553859Sml29623 "==> nxge_rxdma_stop_channel: " 40563859Sml29623 "set rxdma event masks failed (0x%08x channel %d)", 40573859Sml29623 rs, channel)); 40583859Sml29623 return (NXGE_ERROR | rs); 40593859Sml29623 } 40603859Sml29623 40613859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 40623859Sml29623 "==> nxge_rxdma_stop_channel: event done")); 40633859Sml29623 40643859Sml29623 /* Initialize the receive DMA control and status register */ 40653859Sml29623 cs.value = 0; 40663859Sml29623 status = nxge_init_rxdma_channel_cntl_stat(nxgep, channel, 40673859Sml29623 &cs); 40683859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_stop_channel: control " 40693859Sml29623 " to default (all 0s) 0x%08x", cs.value)); 40703859Sml29623 if (status != NXGE_OK) { 40713859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 40723859Sml29623 " nxge_rxdma_stop_channel: init rxdma" 40733859Sml29623 " control register failed (0x%08x channel %d", 40743859Sml29623 status, channel)); 40753859Sml29623 return (status); 40763859Sml29623 } 40773859Sml29623 40783859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, 40793859Sml29623 "==> nxge_rxdma_stop_channel: control done")); 40803859Sml29623 40813859Sml29623 /* disable dma channel */ 40823859Sml29623 status = nxge_disable_rxdma_channel(nxgep, channel); 40833859Sml29623 40843859Sml29623 if (status != NXGE_OK) { 40853859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 40863859Sml29623 " nxge_rxdma_stop_channel: " 40873859Sml29623 " init enable rxdma failed (0x%08x channel %d)", 40883859Sml29623 status, channel)); 40893859Sml29623 return (status); 40903859Sml29623 } 40913859Sml29623 40923859Sml29623 NXGE_DEBUG_MSG((nxgep, 40933859Sml29623 RX_CTL, "==> nxge_rxdma_stop_channel: disable done")); 40943859Sml29623 40953859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_stop_channel")); 40963859Sml29623 40973859Sml29623 return (NXGE_OK); 40983859Sml29623 } 40993859Sml29623 41003859Sml29623 nxge_status_t 41013859Sml29623 nxge_rxdma_handle_sys_errors(p_nxge_t nxgep) 41023859Sml29623 { 41033859Sml29623 npi_handle_t handle; 41043859Sml29623 p_nxge_rdc_sys_stats_t statsp; 41053859Sml29623 rx_ctl_dat_fifo_stat_t stat; 41063859Sml29623 uint32_t zcp_err_status; 41073859Sml29623 uint32_t ipp_err_status; 41083859Sml29623 nxge_status_t status = NXGE_OK; 41093859Sml29623 npi_status_t rs = NPI_SUCCESS; 41103859Sml29623 boolean_t my_err = B_FALSE; 41113859Sml29623 41123859Sml29623 handle = nxgep->npi_handle; 41133859Sml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 41143859Sml29623 41153859Sml29623 rs = npi_rxdma_rxctl_fifo_error_intr_get(handle, &stat); 41163859Sml29623 41173859Sml29623 if (rs != NPI_SUCCESS) 41183859Sml29623 return (NXGE_ERROR | rs); 41193859Sml29623 41203859Sml29623 if (stat.bits.ldw.id_mismatch) { 41213859Sml29623 statsp->id_mismatch++; 41223859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, NULL, 41233859Sml29623 NXGE_FM_EREPORT_RDMC_ID_MISMATCH); 41243859Sml29623 /* Global fatal error encountered */ 41253859Sml29623 } 41263859Sml29623 41273859Sml29623 if ((stat.bits.ldw.zcp_eop_err) || (stat.bits.ldw.ipp_eop_err)) { 41283859Sml29623 switch (nxgep->mac.portnum) { 41293859Sml29623 case 0: 41303859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT0) || 41313859Sml29623 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT0)) { 41323859Sml29623 my_err = B_TRUE; 41333859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 41343859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 41353859Sml29623 } 41363859Sml29623 break; 41373859Sml29623 case 1: 41383859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT1) || 41393859Sml29623 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT1)) { 41403859Sml29623 my_err = B_TRUE; 41413859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 41423859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 41433859Sml29623 } 41443859Sml29623 break; 41453859Sml29623 case 2: 41463859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT2) || 41473859Sml29623 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT2)) { 41483859Sml29623 my_err = B_TRUE; 41493859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 41503859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 41513859Sml29623 } 41523859Sml29623 break; 41533859Sml29623 case 3: 41543859Sml29623 if ((stat.bits.ldw.zcp_eop_err & FIFO_EOP_PORT3) || 41553859Sml29623 (stat.bits.ldw.ipp_eop_err & FIFO_EOP_PORT3)) { 41563859Sml29623 my_err = B_TRUE; 41573859Sml29623 zcp_err_status = stat.bits.ldw.zcp_eop_err; 41583859Sml29623 ipp_err_status = stat.bits.ldw.ipp_eop_err; 41593859Sml29623 } 41603859Sml29623 break; 41613859Sml29623 default: 41623859Sml29623 return (NXGE_ERROR); 41633859Sml29623 } 41643859Sml29623 } 41653859Sml29623 41663859Sml29623 if (my_err) { 41673859Sml29623 status = nxge_rxdma_handle_port_errors(nxgep, ipp_err_status, 41683859Sml29623 zcp_err_status); 41693859Sml29623 if (status != NXGE_OK) 41703859Sml29623 return (status); 41713859Sml29623 } 41723859Sml29623 41733859Sml29623 return (NXGE_OK); 41743859Sml29623 } 41753859Sml29623 41763859Sml29623 static nxge_status_t 41773859Sml29623 nxge_rxdma_handle_port_errors(p_nxge_t nxgep, uint32_t ipp_status, 41783859Sml29623 uint32_t zcp_status) 41793859Sml29623 { 41803859Sml29623 boolean_t rxport_fatal = B_FALSE; 41813859Sml29623 p_nxge_rdc_sys_stats_t statsp; 41823859Sml29623 nxge_status_t status = NXGE_OK; 41833859Sml29623 uint8_t portn; 41843859Sml29623 41853859Sml29623 portn = nxgep->mac.portnum; 41863859Sml29623 statsp = (p_nxge_rdc_sys_stats_t)&nxgep->statsp->rdc_sys_stats; 41873859Sml29623 41883859Sml29623 if (ipp_status & (0x1 << portn)) { 41893859Sml29623 statsp->ipp_eop_err++; 41903859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 41913859Sml29623 NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR); 41923859Sml29623 rxport_fatal = B_TRUE; 41933859Sml29623 } 41943859Sml29623 41953859Sml29623 if (zcp_status & (0x1 << portn)) { 41963859Sml29623 statsp->zcp_eop_err++; 41973859Sml29623 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 41983859Sml29623 NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR); 41993859Sml29623 rxport_fatal = B_TRUE; 42003859Sml29623 } 42013859Sml29623 42023859Sml29623 if (rxport_fatal) { 42033859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 42043859Sml29623 " nxge_rxdma_handle_port_error: " 42053859Sml29623 " fatal error on Port #%d\n", 42063859Sml29623 portn)); 42073859Sml29623 status = nxge_rx_port_fatal_err_recover(nxgep); 42083859Sml29623 if (status == NXGE_OK) { 42093859Sml29623 FM_SERVICE_RESTORED(nxgep); 42103859Sml29623 } 42113859Sml29623 } 42123859Sml29623 42133859Sml29623 return (status); 42143859Sml29623 } 42153859Sml29623 42163859Sml29623 static nxge_status_t 42173859Sml29623 nxge_rxdma_fatal_err_recover(p_nxge_t nxgep, uint16_t channel) 42183859Sml29623 { 42193859Sml29623 npi_handle_t handle; 42203859Sml29623 npi_status_t rs = NPI_SUCCESS; 42213859Sml29623 nxge_status_t status = NXGE_OK; 42223859Sml29623 p_rx_rbr_ring_t rbrp; 42233859Sml29623 p_rx_rcr_ring_t rcrp; 42243859Sml29623 p_rx_mbox_t mboxp; 42253859Sml29623 rx_dma_ent_msk_t ent_mask; 42263859Sml29623 p_nxge_dma_common_t dmap; 42273859Sml29623 int ring_idx; 42283859Sml29623 uint32_t ref_cnt; 42293859Sml29623 p_rx_msg_t rx_msg_p; 42303859Sml29623 int i; 42313859Sml29623 uint32_t nxge_port_rcr_size; 42323859Sml29623 42333859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rxdma_fatal_err_recover")); 42343859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 42353859Sml29623 "Recovering from RxDMAChannel#%d error...", channel)); 42363859Sml29623 42373859Sml29623 /* 42383859Sml29623 * Stop the dma channel waits for the stop done. 42393859Sml29623 * If the stop done bit is not set, then create 42403859Sml29623 * an error. 42413859Sml29623 */ 42423859Sml29623 42433859Sml29623 handle = NXGE_DEV_NPI_HANDLE(nxgep); 42443859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Rx DMA stop...")); 42453859Sml29623 42463859Sml29623 ring_idx = nxge_rxdma_get_ring_index(nxgep, channel); 42473859Sml29623 rbrp = (p_rx_rbr_ring_t)nxgep->rx_rbr_rings->rbr_rings[ring_idx]; 42483859Sml29623 rcrp = (p_rx_rcr_ring_t)nxgep->rx_rcr_rings->rcr_rings[ring_idx]; 42493859Sml29623 42503859Sml29623 MUTEX_ENTER(&rcrp->lock); 42513859Sml29623 MUTEX_ENTER(&rbrp->lock); 42523859Sml29623 MUTEX_ENTER(&rbrp->post_lock); 42533859Sml29623 42543859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA channel...")); 42553859Sml29623 42563859Sml29623 rs = npi_rxdma_cfg_rdc_disable(handle, channel); 42573859Sml29623 if (rs != NPI_SUCCESS) { 42583859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 42593859Sml29623 "nxge_disable_rxdma_channel:failed")); 42603859Sml29623 goto fail; 42613859Sml29623 } 42623859Sml29623 42633859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxDMA interrupt...")); 42643859Sml29623 42653859Sml29623 /* Disable interrupt */ 42663859Sml29623 ent_mask.value = RX_DMA_ENT_MSK_ALL; 42673859Sml29623 rs = npi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask); 42683859Sml29623 if (rs != NPI_SUCCESS) { 42693859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 42703859Sml29623 "nxge_rxdma_stop_channel: " 42713859Sml29623 "set rxdma event masks failed (channel %d)", 42723859Sml29623 channel)); 42733859Sml29623 } 42743859Sml29623 42753859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel reset...")); 42763859Sml29623 42773859Sml29623 /* Reset RXDMA channel */ 42783859Sml29623 rs = npi_rxdma_cfg_rdc_reset(handle, channel); 42793859Sml29623 if (rs != NPI_SUCCESS) { 42803859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 42813859Sml29623 "nxge_rxdma_fatal_err_recover: " 42823859Sml29623 " reset rxdma failed (channel %d)", channel)); 42833859Sml29623 goto fail; 42843859Sml29623 } 42853859Sml29623 42863859Sml29623 nxge_port_rcr_size = nxgep->nxge_port_rcr_size; 42873859Sml29623 42883859Sml29623 mboxp = 42893859Sml29623 (p_rx_mbox_t)nxgep->rx_mbox_areas_p->rxmbox_areas[ring_idx]; 42903859Sml29623 42913859Sml29623 rbrp->rbr_wr_index = (rbrp->rbb_max - 1); 42923859Sml29623 rbrp->rbr_rd_index = 0; 42933859Sml29623 42943859Sml29623 rcrp->comp_rd_index = 0; 42953859Sml29623 rcrp->comp_wt_index = 0; 42963859Sml29623 rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p = 42973859Sml29623 (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc); 42983859Sml29623 rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp = 42995125Sjoycey #if defined(__i386) 43005125Sjoycey (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 43015125Sjoycey #else 43023859Sml29623 (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc); 43035125Sjoycey #endif 43043859Sml29623 43053859Sml29623 rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p + 43063859Sml29623 (nxge_port_rcr_size - 1); 43073859Sml29623 rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp + 43083859Sml29623 (nxge_port_rcr_size - 1); 43093859Sml29623 43103859Sml29623 dmap = (p_nxge_dma_common_t)&rcrp->rcr_desc; 43113859Sml29623 bzero((caddr_t)dmap->kaddrp, dmap->alength); 43123859Sml29623 43133859Sml29623 cmn_err(CE_NOTE, "!rbr entries = %d\n", rbrp->rbr_max_size); 43143859Sml29623 43153859Sml29623 for (i = 0; i < rbrp->rbr_max_size; i++) { 43163859Sml29623 rx_msg_p = rbrp->rx_msg_ring[i]; 43173859Sml29623 ref_cnt = rx_msg_p->ref_cnt; 43183859Sml29623 if (ref_cnt != 1) { 43193859Sml29623 if (rx_msg_p->cur_usage_cnt != 43203859Sml29623 rx_msg_p->max_usage_cnt) { 43213859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43223859Sml29623 "buf[%d]: cur_usage_cnt = %d " 43233859Sml29623 "max_usage_cnt = %d\n", i, 43243859Sml29623 rx_msg_p->cur_usage_cnt, 43253859Sml29623 rx_msg_p->max_usage_cnt)); 43263859Sml29623 } else { 43273859Sml29623 /* Buffer can be re-posted */ 43283859Sml29623 rx_msg_p->free = B_TRUE; 43293859Sml29623 rx_msg_p->cur_usage_cnt = 0; 43303859Sml29623 rx_msg_p->max_usage_cnt = 0xbaddcafe; 43313859Sml29623 rx_msg_p->pkt_buf_size = 0; 43323859Sml29623 } 43333859Sml29623 } 43343859Sml29623 } 43353859Sml29623 43363859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "RxDMA channel re-start...")); 43373859Sml29623 43383859Sml29623 status = nxge_rxdma_start_channel(nxgep, channel, rbrp, rcrp, mboxp); 43393859Sml29623 if (status != NXGE_OK) { 43403859Sml29623 goto fail; 43413859Sml29623 } 43423859Sml29623 43433859Sml29623 MUTEX_EXIT(&rbrp->post_lock); 43443859Sml29623 MUTEX_EXIT(&rbrp->lock); 43453859Sml29623 MUTEX_EXIT(&rcrp->lock); 43463859Sml29623 43473859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43483859Sml29623 "Recovery Successful, RxDMAChannel#%d Restored", 43493859Sml29623 channel)); 43503859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "==> nxge_rxdma_fatal_err_recover")); 43513859Sml29623 43523859Sml29623 return (NXGE_OK); 43533859Sml29623 fail: 43543859Sml29623 MUTEX_EXIT(&rbrp->post_lock); 43553859Sml29623 MUTEX_EXIT(&rbrp->lock); 43563859Sml29623 MUTEX_EXIT(&rcrp->lock); 43573859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 43583859Sml29623 43593859Sml29623 return (NXGE_ERROR | rs); 43603859Sml29623 } 43613859Sml29623 43623859Sml29623 nxge_status_t 43633859Sml29623 nxge_rx_port_fatal_err_recover(p_nxge_t nxgep) 43643859Sml29623 { 43653859Sml29623 nxge_status_t status = NXGE_OK; 43663859Sml29623 p_nxge_dma_common_t *dma_buf_p; 43673859Sml29623 uint16_t channel; 43683859Sml29623 int ndmas; 43693859Sml29623 int i; 43703859Sml29623 43713859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "<== nxge_rx_port_fatal_err_recover")); 43723859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43733859Sml29623 "Recovering from RxPort error...")); 43743859Sml29623 /* Disable RxMAC */ 43753859Sml29623 43763859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Disable RxMAC...\n")); 43773859Sml29623 if (nxge_rx_mac_disable(nxgep) != NXGE_OK) 43783859Sml29623 goto fail; 43793859Sml29623 43803859Sml29623 NXGE_DELAY(1000); 43813859Sml29623 43823859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Stop all RxDMA channels...")); 43833859Sml29623 43843859Sml29623 ndmas = nxgep->rx_buf_pool_p->ndmas; 43853859Sml29623 dma_buf_p = nxgep->rx_buf_pool_p->dma_buf_pool_p; 43863859Sml29623 43873859Sml29623 for (i = 0; i < ndmas; i++) { 43883859Sml29623 channel = ((p_nxge_dma_common_t)dma_buf_p[i])->dma_channel; 43893859Sml29623 if (nxge_rxdma_fatal_err_recover(nxgep, channel) != NXGE_OK) { 43903859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 43913859Sml29623 "Could not recover channel %d", 43923859Sml29623 channel)); 43933859Sml29623 } 43943859Sml29623 } 43953859Sml29623 43963859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset IPP...")); 43973859Sml29623 43983859Sml29623 /* Reset IPP */ 43993859Sml29623 if (nxge_ipp_reset(nxgep) != NXGE_OK) { 44003859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44013859Sml29623 "nxge_rx_port_fatal_err_recover: " 44023859Sml29623 "Failed to reset IPP")); 44033859Sml29623 goto fail; 44043859Sml29623 } 44053859Sml29623 44063859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Reset RxMAC...")); 44073859Sml29623 44083859Sml29623 /* Reset RxMAC */ 44093859Sml29623 if (nxge_rx_mac_reset(nxgep) != NXGE_OK) { 44103859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44113859Sml29623 "nxge_rx_port_fatal_err_recover: " 44123859Sml29623 "Failed to reset RxMAC")); 44133859Sml29623 goto fail; 44143859Sml29623 } 44153859Sml29623 44163859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize IPP...")); 44173859Sml29623 44183859Sml29623 /* Re-Initialize IPP */ 44193859Sml29623 if (nxge_ipp_init(nxgep) != NXGE_OK) { 44203859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44213859Sml29623 "nxge_rx_port_fatal_err_recover: " 44223859Sml29623 "Failed to init IPP")); 44233859Sml29623 goto fail; 44243859Sml29623 } 44253859Sml29623 44263859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-initialize RxMAC...")); 44273859Sml29623 44283859Sml29623 /* Re-Initialize RxMAC */ 44293859Sml29623 if ((status = nxge_rx_mac_init(nxgep)) != NXGE_OK) { 44303859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44313859Sml29623 "nxge_rx_port_fatal_err_recover: " 44323859Sml29623 "Failed to reset RxMAC")); 44333859Sml29623 goto fail; 44343859Sml29623 } 44353859Sml29623 44363859Sml29623 NXGE_DEBUG_MSG((nxgep, RX_CTL, "Re-enable RxMAC...")); 44373859Sml29623 44383859Sml29623 /* Re-enable RxMAC */ 44393859Sml29623 if ((status = nxge_rx_mac_enable(nxgep)) != NXGE_OK) { 44403859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44413859Sml29623 "nxge_rx_port_fatal_err_recover: " 44423859Sml29623 "Failed to enable RxMAC")); 44433859Sml29623 goto fail; 44443859Sml29623 } 44453859Sml29623 44463859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 44473859Sml29623 "Recovery Successful, RxPort Restored")); 44483859Sml29623 44493859Sml29623 return (NXGE_OK); 44503859Sml29623 fail: 44513859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 44523859Sml29623 return (status); 44533859Sml29623 } 44543859Sml29623 44553859Sml29623 void 44563859Sml29623 nxge_rxdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 44573859Sml29623 { 44583859Sml29623 rx_dma_ctl_stat_t cs; 44593859Sml29623 rx_ctl_dat_fifo_stat_t cdfs; 44603859Sml29623 44613859Sml29623 switch (err_id) { 44623859Sml29623 case NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR: 44633859Sml29623 case NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR: 44643859Sml29623 case NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR: 44653859Sml29623 case NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR: 44663859Sml29623 case NXGE_FM_EREPORT_RDMC_RBR_TMOUT: 44673859Sml29623 case NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR: 44683859Sml29623 case NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS: 44693859Sml29623 case NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR: 44703859Sml29623 case NXGE_FM_EREPORT_RDMC_RCRINCON: 44713859Sml29623 case NXGE_FM_EREPORT_RDMC_RCRFULL: 44723859Sml29623 case NXGE_FM_EREPORT_RDMC_RBRFULL: 44733859Sml29623 case NXGE_FM_EREPORT_RDMC_RBRLOGPAGE: 44743859Sml29623 case NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE: 44753859Sml29623 case NXGE_FM_EREPORT_RDMC_CONFIG_ERR: 44763859Sml29623 RXDMA_REG_READ64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 44773859Sml29623 chan, &cs.value); 44783859Sml29623 if (err_id == NXGE_FM_EREPORT_RDMC_RCR_ACK_ERR) 44793859Sml29623 cs.bits.hdw.rcr_ack_err = 1; 44803859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_DC_FIFO_ERR) 44813859Sml29623 cs.bits.hdw.dc_fifo_err = 1; 44823859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCR_SHA_PAR) 44833859Sml29623 cs.bits.hdw.rcr_sha_par = 1; 44843859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_PRE_PAR) 44853859Sml29623 cs.bits.hdw.rbr_pre_par = 1; 44863859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBR_TMOUT) 44873859Sml29623 cs.bits.hdw.rbr_tmout = 1; 44883859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_CNT_ERR) 44893859Sml29623 cs.bits.hdw.rsp_cnt_err = 1; 44903859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_BYTE_EN_BUS) 44913859Sml29623 cs.bits.hdw.byte_en_bus = 1; 44923859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RSP_DAT_ERR) 44933859Sml29623 cs.bits.hdw.rsp_dat_err = 1; 44943859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CONFIG_ERR) 44953859Sml29623 cs.bits.hdw.config_err = 1; 44963859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRINCON) 44973859Sml29623 cs.bits.hdw.rcrincon = 1; 44983859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RCRFULL) 44993859Sml29623 cs.bits.hdw.rcrfull = 1; 45003859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRFULL) 45013859Sml29623 cs.bits.hdw.rbrfull = 1; 45023859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_RBRLOGPAGE) 45033859Sml29623 cs.bits.hdw.rbrlogpage = 1; 45043859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_CFIGLOGPAGE) 45053859Sml29623 cs.bits.hdw.cfiglogpage = 1; 45065125Sjoycey #if defined(__i386) 45075125Sjoycey cmn_err(CE_NOTE, "!Write 0x%llx to RX_DMA_CTL_STAT_DBG_REG\n", 45085125Sjoycey cs.value); 45095125Sjoycey #else 45103859Sml29623 cmn_err(CE_NOTE, "!Write 0x%lx to RX_DMA_CTL_STAT_DBG_REG\n", 45113859Sml29623 cs.value); 45125125Sjoycey #endif 45133859Sml29623 RXDMA_REG_WRITE64(nxgep->npi_handle, RX_DMA_CTL_STAT_DBG_REG, 45143859Sml29623 chan, cs.value); 45153859Sml29623 break; 45163859Sml29623 case NXGE_FM_EREPORT_RDMC_ID_MISMATCH: 45173859Sml29623 case NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR: 45183859Sml29623 case NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR: 45193859Sml29623 cdfs.value = 0; 45203859Sml29623 if (err_id == NXGE_FM_EREPORT_RDMC_ID_MISMATCH) 45213859Sml29623 cdfs.bits.ldw.id_mismatch = (1 << nxgep->mac.portnum); 45223859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_ZCP_EOP_ERR) 45233859Sml29623 cdfs.bits.ldw.zcp_eop_err = (1 << nxgep->mac.portnum); 45243859Sml29623 else if (err_id == NXGE_FM_EREPORT_RDMC_IPP_EOP_ERR) 45253859Sml29623 cdfs.bits.ldw.ipp_eop_err = (1 << nxgep->mac.portnum); 45265125Sjoycey #if defined(__i386) 45275125Sjoycey cmn_err(CE_NOTE, 45285125Sjoycey "!Write 0x%llx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 45295125Sjoycey cdfs.value); 45305125Sjoycey #else 45313859Sml29623 cmn_err(CE_NOTE, 45323859Sml29623 "!Write 0x%lx to RX_CTL_DAT_FIFO_STAT_DBG_REG\n", 45333859Sml29623 cdfs.value); 45345125Sjoycey #endif 45353859Sml29623 RXDMA_REG_WRITE64(nxgep->npi_handle, 45363859Sml29623 RX_CTL_DAT_FIFO_STAT_DBG_REG, chan, cdfs.value); 45373859Sml29623 break; 45383859Sml29623 case NXGE_FM_EREPORT_RDMC_DCF_ERR: 45393859Sml29623 break; 4540*5165Syc148097 case NXGE_FM_EREPORT_RDMC_RCR_ERR: 45413859Sml29623 break; 45423859Sml29623 } 45433859Sml29623 } 45443859Sml29623 45453859Sml29623 45463859Sml29623 static uint16_t 45473859Sml29623 nxge_get_pktbuf_size(p_nxge_t nxgep, int bufsz_type, rbr_cfig_b_t rbr_cfgb) 45483859Sml29623 { 45493859Sml29623 uint16_t sz = RBR_BKSIZE_8K_BYTES; 45503859Sml29623 45513859Sml29623 switch (bufsz_type) { 45523859Sml29623 case RCR_PKTBUFSZ_0: 45533859Sml29623 switch (rbr_cfgb.bits.ldw.bufsz0) { 45543859Sml29623 case RBR_BUFSZ0_256B: 45553859Sml29623 sz = RBR_BUFSZ0_256_BYTES; 45563859Sml29623 break; 45573859Sml29623 case RBR_BUFSZ0_512B: 45583859Sml29623 sz = RBR_BUFSZ0_512B_BYTES; 45593859Sml29623 break; 45603859Sml29623 case RBR_BUFSZ0_1K: 45613859Sml29623 sz = RBR_BUFSZ0_1K_BYTES; 45623859Sml29623 break; 45633859Sml29623 case RBR_BUFSZ0_2K: 45643859Sml29623 sz = RBR_BUFSZ0_2K_BYTES; 45653859Sml29623 break; 45663859Sml29623 default: 45673859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45683859Sml29623 "nxge_get_pktbug_size: bad bufsz0")); 45693859Sml29623 break; 45703859Sml29623 } 45713859Sml29623 break; 45723859Sml29623 case RCR_PKTBUFSZ_1: 45733859Sml29623 switch (rbr_cfgb.bits.ldw.bufsz1) { 45743859Sml29623 case RBR_BUFSZ1_1K: 45753859Sml29623 sz = RBR_BUFSZ1_1K_BYTES; 45763859Sml29623 break; 45773859Sml29623 case RBR_BUFSZ1_2K: 45783859Sml29623 sz = RBR_BUFSZ1_2K_BYTES; 45793859Sml29623 break; 45803859Sml29623 case RBR_BUFSZ1_4K: 45813859Sml29623 sz = RBR_BUFSZ1_4K_BYTES; 45823859Sml29623 break; 45833859Sml29623 case RBR_BUFSZ1_8K: 45843859Sml29623 sz = RBR_BUFSZ1_8K_BYTES; 45853859Sml29623 break; 45863859Sml29623 default: 45873859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 45883859Sml29623 "nxge_get_pktbug_size: bad bufsz1")); 45893859Sml29623 break; 45903859Sml29623 } 45913859Sml29623 break; 45923859Sml29623 case RCR_PKTBUFSZ_2: 45933859Sml29623 switch (rbr_cfgb.bits.ldw.bufsz2) { 45943859Sml29623 case RBR_BUFSZ2_2K: 45953859Sml29623 sz = RBR_BUFSZ2_2K_BYTES; 45963859Sml29623 break; 45973859Sml29623 case RBR_BUFSZ2_4K: 45983859Sml29623 sz = RBR_BUFSZ2_4K_BYTES; 45993859Sml29623 break; 46003859Sml29623 case RBR_BUFSZ2_8K: 46013859Sml29623 sz = RBR_BUFSZ2_8K_BYTES; 46023859Sml29623 break; 46033859Sml29623 case RBR_BUFSZ2_16K: 46043859Sml29623 sz = RBR_BUFSZ2_16K_BYTES; 46053859Sml29623 break; 46063859Sml29623 default: 46073859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46083859Sml29623 "nxge_get_pktbug_size: bad bufsz2")); 46093859Sml29623 break; 46103859Sml29623 } 46113859Sml29623 break; 46123859Sml29623 case RCR_SINGLE_BLOCK: 46133859Sml29623 switch (rbr_cfgb.bits.ldw.bksize) { 46143859Sml29623 case BKSIZE_4K: 46153859Sml29623 sz = RBR_BKSIZE_4K_BYTES; 46163859Sml29623 break; 46173859Sml29623 case BKSIZE_8K: 46183859Sml29623 sz = RBR_BKSIZE_8K_BYTES; 46193859Sml29623 break; 46203859Sml29623 case BKSIZE_16K: 46213859Sml29623 sz = RBR_BKSIZE_16K_BYTES; 46223859Sml29623 break; 46233859Sml29623 case BKSIZE_32K: 46243859Sml29623 sz = RBR_BKSIZE_32K_BYTES; 46253859Sml29623 break; 46263859Sml29623 default: 46273859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46283859Sml29623 "nxge_get_pktbug_size: bad bksize")); 46293859Sml29623 break; 46303859Sml29623 } 46313859Sml29623 break; 46323859Sml29623 default: 46333859Sml29623 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 46343859Sml29623 "nxge_get_pktbug_size: bad bufsz_type")); 46353859Sml29623 break; 46363859Sml29623 } 46373859Sml29623 return (sz); 46383859Sml29623 } 4639