13859Sml29623 /* 23859Sml29623 * CDDL HEADER START 33859Sml29623 * 43859Sml29623 * The contents of this file are subject to the terms of the 53859Sml29623 * Common Development and Distribution License (the "License"). 63859Sml29623 * You may not use this file except in compliance with the License. 73859Sml29623 * 83859Sml29623 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 93859Sml29623 * or http://www.opensolaris.org/os/licensing. 103859Sml29623 * See the License for the specific language governing permissions 113859Sml29623 * and limitations under the License. 123859Sml29623 * 133859Sml29623 * When distributing Covered Code, include this CDDL HEADER in each 143859Sml29623 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 153859Sml29623 * If applicable, add the following below this CDDL HEADER, with the 163859Sml29623 * fields enclosed by brackets "[]" replaced with your own identifying 173859Sml29623 * information: Portions Copyright [yyyy] [name of copyright owner] 183859Sml29623 * 193859Sml29623 * CDDL HEADER END 203859Sml29623 */ 2110218SMichael.Speer@Sun.COM 223859Sml29623 /* 23*11878SVenu.Iyer@Sun.COM * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 243859Sml29623 * Use is subject to license terms. 253859Sml29623 */ 263859Sml29623 273859Sml29623 #ifndef _SYS_NXGE_NXGE_RXDMA_H 283859Sml29623 #define _SYS_NXGE_NXGE_RXDMA_H 293859Sml29623 303859Sml29623 #ifdef __cplusplus 313859Sml29623 extern "C" { 323859Sml29623 #endif 333859Sml29623 343859Sml29623 #include <sys/nxge/nxge_rxdma_hw.h> 353859Sml29623 #include <npi_rxdma.h> 363859Sml29623 373859Sml29623 #define RXDMA_CK_DIV_DEFAULT 7500 /* 25 usec */ 383859Sml29623 /* 393859Sml29623 * Hardware RDC designer: 8 cache lines during Atlas bringup. 403859Sml29623 */ 413859Sml29623 #define RXDMA_RED_LESS_BYTES (8 * 64) /* 8 cache line */ 423859Sml29623 #define RXDMA_RED_LESS_ENTRIES (RXDMA_RED_LESS_BYTES/8) 433859Sml29623 #define RXDMA_RED_WINDOW_DEFAULT 0 443859Sml29623 #define RXDMA_RED_THRES_DEFAULT 0 453859Sml29623 463859Sml29623 #define RXDMA_RCR_PTHRES_DEFAULT 0x20 473859Sml29623 #define RXDMA_RCR_TO_DEFAULT 0x8 483859Sml29623 493859Sml29623 /* 503859Sml29623 * hardware workarounds: kick 16 (was 8 before) 513859Sml29623 */ 523859Sml29623 #define NXGE_RXDMA_POST_BATCH 16 533859Sml29623 543859Sml29623 #define RXBUF_START_ADDR(a, index, bsize) ((a & (index * bsize)) 553859Sml29623 #define RXBUF_OFFSET_FROM_START(a, start) (start - a) 563859Sml29623 #define RXBUF_64B_ALIGNED 64 573859Sml29623 583859Sml29623 #define NXGE_RXBUF_EXTRA 34 593859Sml29623 /* 603859Sml29623 * Receive buffer thresholds and buffer types 613859Sml29623 */ 623859Sml29623 #define NXGE_RX_BCOPY_SCALE 8 /* use 1/8 as lowest granularity */ 633859Sml29623 typedef enum { 643859Sml29623 NXGE_RX_COPY_ALL = 0, /* do bcopy on every packet */ 653859Sml29623 NXGE_RX_COPY_1, /* bcopy on 1/8 of buffer posted */ 663859Sml29623 NXGE_RX_COPY_2, /* bcopy on 2/8 of buffer posted */ 673859Sml29623 NXGE_RX_COPY_3, /* bcopy on 3/8 of buffer posted */ 683859Sml29623 NXGE_RX_COPY_4, /* bcopy on 4/8 of buffer posted */ 693859Sml29623 NXGE_RX_COPY_5, /* bcopy on 5/8 of buffer posted */ 703859Sml29623 NXGE_RX_COPY_6, /* bcopy on 6/8 of buffer posted */ 713859Sml29623 NXGE_RX_COPY_7, /* bcopy on 7/8 of buffer posted */ 723859Sml29623 NXGE_RX_COPY_NONE /* don't do bcopy at all */ 733859Sml29623 } nxge_rxbuf_threshold_t; 743859Sml29623 753859Sml29623 typedef enum { 763859Sml29623 NXGE_RBR_TYPE0 = RCR_PKTBUFSZ_0, /* bcopy buffer size 0 (small) */ 773859Sml29623 NXGE_RBR_TYPE1 = RCR_PKTBUFSZ_1, /* bcopy buffer size 1 (medium) */ 783859Sml29623 NXGE_RBR_TYPE2 = RCR_PKTBUFSZ_2 /* bcopy buffer size 2 (large) */ 793859Sml29623 } nxge_rxbuf_type_t; 803859Sml29623 813859Sml29623 typedef struct _rdc_errlog { 823859Sml29623 rdmc_par_err_log_t pre_par; 833859Sml29623 rdmc_par_err_log_t sha_par; 843859Sml29623 uint8_t compl_err_type; 853859Sml29623 } rdc_errlog_t; 863859Sml29623 873859Sml29623 /* 883859Sml29623 * Receive Statistics. 893859Sml29623 */ 903859Sml29623 typedef struct _nxge_rx_ring_stats_t { 913859Sml29623 uint64_t ipackets; 923859Sml29623 uint64_t ibytes; 933859Sml29623 uint32_t ierrors; 943859Sml29623 uint32_t multircv; 953859Sml29623 uint32_t brdcstrcv; 963859Sml29623 uint32_t norcvbuf; 973859Sml29623 983859Sml29623 uint32_t rx_inits; 993859Sml29623 uint32_t rx_jumbo_pkts; 1003859Sml29623 uint32_t rx_multi_pkts; 1013859Sml29623 uint32_t rx_mtu_pkts; 1023859Sml29623 uint32_t rx_no_buf; 1033859Sml29623 1043859Sml29623 /* 1053859Sml29623 * Receive buffer management statistics. 1063859Sml29623 */ 1073859Sml29623 uint32_t rx_new_pages; 1083859Sml29623 uint32_t rx_new_mtu_pgs; 1093859Sml29623 uint32_t rx_new_nxt_pgs; 1103859Sml29623 uint32_t rx_reused_pgs; 1113859Sml29623 uint32_t rx_mtu_drops; 1123859Sml29623 uint32_t rx_nxt_drops; 1133859Sml29623 1143859Sml29623 /* 1153859Sml29623 * Error event stats. 1163859Sml29623 */ 1173859Sml29623 uint32_t rx_rbr_tmout; 1186028Ssbehera uint32_t pkt_too_long_err; 1193859Sml29623 uint32_t l2_err; 1203859Sml29623 uint32_t l4_cksum_err; 1213859Sml29623 uint32_t fflp_soft_err; 1223859Sml29623 uint32_t zcp_soft_err; 1235165Syc148097 uint32_t rcr_unknown_err; 1243859Sml29623 uint32_t dcf_err; 1253859Sml29623 uint32_t rbr_tmout; 1263859Sml29623 uint32_t rsp_cnt_err; 1273859Sml29623 uint32_t byte_en_err; 1283859Sml29623 uint32_t byte_en_bus; 1293859Sml29623 uint32_t rsp_dat_err; 1303859Sml29623 uint32_t rcr_ack_err; 1313859Sml29623 uint32_t dc_fifo_err; 1323859Sml29623 uint32_t rcr_sha_par; 1333859Sml29623 uint32_t rbr_pre_par; 1343859Sml29623 uint32_t port_drop_pkt; 1353859Sml29623 uint32_t wred_drop; 1363859Sml29623 uint32_t rbr_pre_empty; 1373859Sml29623 uint32_t rcr_shadow_full; 1383859Sml29623 uint32_t config_err; 1393859Sml29623 uint32_t rcrincon; 1403859Sml29623 uint32_t rcrfull; 1413859Sml29623 uint32_t rbr_empty; 1423859Sml29623 uint32_t rbrfull; 1433859Sml29623 uint32_t rbrlogpage; 1443859Sml29623 uint32_t cfiglogpage; 1453859Sml29623 uint32_t rcrto; 1463859Sml29623 uint32_t rcrthres; 1473859Sml29623 uint32_t mex; 1483859Sml29623 rdc_errlog_t errlog; 1493859Sml29623 } nxge_rx_ring_stats_t, *p_nxge_rx_ring_stats_t; 1503859Sml29623 1513859Sml29623 typedef struct _nxge_rdc_sys_stats { 1523859Sml29623 uint32_t pre_par; 1533859Sml29623 uint32_t sha_par; 1543859Sml29623 uint32_t id_mismatch; 1553859Sml29623 uint32_t ipp_eop_err; 1563859Sml29623 uint32_t zcp_eop_err; 1573859Sml29623 } nxge_rdc_sys_stats_t, *p_nxge_rdc_sys_stats_t; 1583859Sml29623 1598275SEric Cheng /* 1608275SEric Cheng * Software reserved buffer offset 1618275SEric Cheng */ 1628275SEric Cheng typedef struct _nxge_rxbuf_off_hdr_t { 1638275SEric Cheng uint32_t index; 1648275SEric Cheng } nxge_rxbuf_off_hdr_t, *p_nxge_rxbuf_off_hdr_t; 1658275SEric Cheng 1663859Sml29623 1673859Sml29623 typedef struct _rx_msg_t { 1683859Sml29623 nxge_os_dma_common_t buf_dma; 1693859Sml29623 nxge_os_mutex_t lock; 1703859Sml29623 struct _nxge_t *nxgep; 1713859Sml29623 struct _rx_rbr_ring_t *rx_rbr_p; 1723859Sml29623 boolean_t spare_in_use; 1733859Sml29623 boolean_t free; 1743859Sml29623 uint32_t ref_cnt; 1753859Sml29623 #ifdef RXBUFF_USE_SEPARATE_UP_CNTR 1763859Sml29623 uint32_t pass_up_cnt; 1773859Sml29623 boolean_t release; 1783859Sml29623 #endif 1793859Sml29623 nxge_os_frtn_t freeb; 1803859Sml29623 size_t bytes_arrived; 1813859Sml29623 size_t bytes_expected; 1823859Sml29623 size_t block_size; 1833859Sml29623 uint32_t block_index; 1843859Sml29623 uint32_t pkt_buf_size; 1853859Sml29623 uint32_t pkt_buf_size_code; 1863859Sml29623 uint32_t max_pkt_bufs; 1873859Sml29623 uint32_t cur_usage_cnt; 1883859Sml29623 uint32_t max_usage_cnt; 1893859Sml29623 uchar_t *buffer; 1903859Sml29623 uint32_t pri; 1913859Sml29623 uint32_t shifted_addr; 1923859Sml29623 boolean_t use_buf_pool; 1933859Sml29623 p_mblk_t rx_mblk_p; 1943859Sml29623 boolean_t rx_use_bcopy; 1953859Sml29623 } rx_msg_t, *p_rx_msg_t; 1963859Sml29623 1973859Sml29623 typedef struct _rx_dma_handle_t { 1983859Sml29623 nxge_os_dma_handle_t dma_handle; /* DMA handle */ 1993859Sml29623 nxge_os_acc_handle_t acc_handle; /* DMA memory handle */ 2003859Sml29623 npi_handle_t npi_handle; 2013859Sml29623 } rx_dma_handle_t, *p_rx_dma_handle_t; 2023859Sml29623 2033859Sml29623 2043859Sml29623 /* Receive Completion Ring */ 2053859Sml29623 typedef struct _rx_rcr_ring_t { 2063859Sml29623 nxge_os_dma_common_t rcr_desc; 2073859Sml29623 2083859Sml29623 struct _nxge_t *nxgep; 2093859Sml29623 2103859Sml29623 p_nxge_rx_ring_stats_t rdc_stats; 2113859Sml29623 212*11878SVenu.Iyer@Sun.COM boolean_t poll_flag; /* B_TRUE, if polling mode */ 2136495Sspeer 2143859Sml29623 rcrcfig_a_t rcr_cfga; 2153859Sml29623 rcrcfig_b_t rcr_cfgb; 2163859Sml29623 2173859Sml29623 nxge_os_mutex_t lock; 2183859Sml29623 uint16_t index; 2193859Sml29623 uint16_t rdc; 2203859Sml29623 boolean_t full_hdr_flag; /* 1: 18 bytes header */ 2213859Sml29623 uint16_t sw_priv_hdr_len; /* 0 - 192 bytes (SW) */ 2223859Sml29623 uint32_t comp_size; /* # of RCR entries */ 2233859Sml29623 uint64_t rcr_addr; 2243859Sml29623 uint_t comp_wrap_mask; 2253859Sml29623 uint_t comp_rd_index; 2263859Sml29623 uint_t comp_wt_index; 2273859Sml29623 2283859Sml29623 p_rcr_entry_t rcr_desc_first_p; 2293859Sml29623 p_rcr_entry_t rcr_desc_first_pp; 2303859Sml29623 p_rcr_entry_t rcr_desc_last_p; 2313859Sml29623 p_rcr_entry_t rcr_desc_last_pp; 2323859Sml29623 2333859Sml29623 p_rcr_entry_t rcr_desc_rd_head_p; /* software next read */ 2343859Sml29623 p_rcr_entry_t rcr_desc_rd_head_pp; 2353859Sml29623 2363859Sml29623 uint64_t rcr_tail_pp; 2373859Sml29623 uint64_t rcr_head_pp; 2383859Sml29623 struct _rx_rbr_ring_t *rx_rbr_p; 2393859Sml29623 uint32_t intr_timeout; 2403859Sml29623 uint32_t intr_threshold; 2413859Sml29623 uint64_t max_receive_pkts; 2428275SEric Cheng mac_ring_handle_t rcr_mac_handle; 2438275SEric Cheng uint64_t rcr_gen_num; 2443859Sml29623 uint32_t rcvd_pkt_bytes; /* Received bytes of a packet */ 2458275SEric Cheng p_nxge_ldv_t ldvp; 2468275SEric Cheng p_nxge_ldg_t ldgp; 247*11878SVenu.Iyer@Sun.COM boolean_t started; 2483859Sml29623 } rx_rcr_ring_t, *p_rx_rcr_ring_t; 2493859Sml29623 2503859Sml29623 2513859Sml29623 2523859Sml29623 /* Buffer index information */ 2533859Sml29623 typedef struct _rxbuf_index_info_t { 2543859Sml29623 uint32_t buf_index; 2553859Sml29623 uint32_t start_index; 2563859Sml29623 uint32_t buf_size; 2573859Sml29623 uint64_t dvma_addr; 2583859Sml29623 uint64_t kaddr; 2593859Sml29623 } rxbuf_index_info_t, *p_rxbuf_index_info_t; 2603859Sml29623 26110392SMichael.Speer@Sun.COM /* 26210392SMichael.Speer@Sun.COM * Buffer index information 26310392SMichael.Speer@Sun.COM */ 2643859Sml29623 typedef struct _rxring_info_t { 26510392SMichael.Speer@Sun.COM uint32_t hint[RCR_N_PKTBUF_SZ]; 2663859Sml29623 uint32_t block_size_mask; 2673859Sml29623 uint16_t max_iterations; 2683859Sml29623 rxbuf_index_info_t buffer[NXGE_DMA_BLOCK]; 2693859Sml29623 } rxring_info_t, *p_rxring_info_t; 2703859Sml29623 2713859Sml29623 2725170Stm144005 typedef enum { 2735170Stm144005 RBR_POSTING = 1, /* We may post rx buffers. */ 2745170Stm144005 RBR_UNMAPPING, /* We are in the process of unmapping. */ 2755170Stm144005 RBR_UNMAPPED /* The ring is unmapped. */ 2765170Stm144005 } rbr_state_t; 2775170Stm144005 2785170Stm144005 2793859Sml29623 /* Receive Buffer Block Ring */ 2803859Sml29623 typedef struct _rx_rbr_ring_t { 2813859Sml29623 nxge_os_dma_common_t rbr_desc; 2823859Sml29623 p_rx_msg_t *rx_msg_ring; 2833859Sml29623 p_nxge_dma_common_t *dma_bufp; 2843859Sml29623 rbr_cfig_a_t rbr_cfga; 2853859Sml29623 rbr_cfig_b_t rbr_cfgb; 2863859Sml29623 rbr_kick_t rbr_kick; 2873859Sml29623 log_page_vld_t page_valid; 2883859Sml29623 log_page_mask_t page_mask_1; 2893859Sml29623 log_page_mask_t page_mask_2; 2903859Sml29623 log_page_value_t page_value_1; 2913859Sml29623 log_page_value_t page_value_2; 2923859Sml29623 log_page_relo_t page_reloc_1; 2933859Sml29623 log_page_relo_t page_reloc_2; 2943859Sml29623 log_page_hdl_t page_hdl; 2953859Sml29623 2963859Sml29623 boolean_t cfg_set; 2973859Sml29623 2983859Sml29623 nxge_os_mutex_t lock; 2993859Sml29623 nxge_os_mutex_t post_lock; 3003859Sml29623 uint16_t index; 3013859Sml29623 struct _nxge_t *nxgep; 3023859Sml29623 uint16_t rdc; 3033859Sml29623 uint16_t rdc_grp_id; 3043859Sml29623 uint_t rbr_max_size; 3053859Sml29623 uint64_t rbr_addr; 3063859Sml29623 uint_t rbr_wrap_mask; 3073859Sml29623 uint_t rbb_max; 3083859Sml29623 uint_t rbb_added; 3093859Sml29623 uint_t block_size; 3103859Sml29623 uint_t num_blocks; 3113859Sml29623 uint_t tnblocks; 3123859Sml29623 uint_t pkt_buf_size0; 3133859Sml29623 uint_t pkt_buf_size0_bytes; 3143859Sml29623 uint_t npi_pkt_buf_size0; 3153859Sml29623 uint_t pkt_buf_size1; 3163859Sml29623 uint_t pkt_buf_size1_bytes; 3173859Sml29623 uint_t npi_pkt_buf_size1; 3183859Sml29623 uint_t pkt_buf_size2; 3193859Sml29623 uint_t pkt_buf_size2_bytes; 3203859Sml29623 uint_t npi_pkt_buf_size2; 3213859Sml29623 3223859Sml29623 uint32_t *rbr_desc_vp; 3233859Sml29623 3243859Sml29623 p_rx_rcr_ring_t rx_rcr_p; 3253859Sml29623 3263859Sml29623 uint_t rbr_wr_index; 3273859Sml29623 uint_t rbr_rd_index; 3283859Sml29623 3293859Sml29623 rxring_info_t *ring_info; 3303859Sml29623 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 3313859Sml29623 uint64_t hv_rx_buf_base_ioaddr_pp; 3323859Sml29623 uint64_t hv_rx_buf_ioaddr_size; 3333859Sml29623 uint64_t hv_rx_cntl_base_ioaddr_pp; 3343859Sml29623 uint64_t hv_rx_cntl_ioaddr_size; 3353859Sml29623 boolean_t hv_set; 3363859Sml29623 #endif 3373859Sml29623 uint_t rbr_consumed; 3383859Sml29623 uint_t rbr_threshold_hi; 3393859Sml29623 uint_t rbr_threshold_lo; 3403859Sml29623 nxge_rxbuf_type_t rbr_bufsize_type; 3413859Sml29623 boolean_t rbr_use_bcopy; 3425170Stm144005 3435170Stm144005 /* 3445170Stm144005 * <rbr_ref_cnt> is a count of those receive buffers which 3455170Stm144005 * have been loaned to the kernel. We will not free this 3465170Stm144005 * ring until the reference count reaches zero (0). 3475170Stm144005 */ 3485170Stm144005 uint32_t rbr_ref_cnt; 3495170Stm144005 rbr_state_t rbr_state; /* POSTING, etc */ 3506495Sspeer /* 3516495Sspeer * Receive buffer allocation types: 3526495Sspeer * ddi_dma_mem_alloc(), contig_mem_alloc(), kmem_alloc() 3536495Sspeer */ 3546495Sspeer buf_alloc_type_t rbr_alloc_type; 3553859Sml29623 } rx_rbr_ring_t, *p_rx_rbr_ring_t; 3563859Sml29623 3573859Sml29623 /* Receive Mailbox */ 3583859Sml29623 typedef struct _rx_mbox_t { 3593859Sml29623 nxge_os_dma_common_t rx_mbox; 3603859Sml29623 rxdma_cfig1_t rx_cfg1; 3613859Sml29623 rxdma_cfig2_t rx_cfg2; 3623859Sml29623 uint64_t mbox_addr; 3633859Sml29623 boolean_t cfg_set; 3643859Sml29623 3653859Sml29623 nxge_os_mutex_t lock; 3663859Sml29623 uint16_t index; 3673859Sml29623 struct _nxge_t *nxgep; 3683859Sml29623 uint16_t rdc; 3693859Sml29623 } rx_mbox_t, *p_rx_mbox_t; 3703859Sml29623 3713859Sml29623 3723859Sml29623 typedef struct _rx_rbr_rings_t { 3733859Sml29623 p_rx_rbr_ring_t *rbr_rings; 3747755SMisaki.Kataoka@Sun.COM uint32_t ndmas; 3758275SEric Cheng boolean_t rxbuf_allocated; 3763859Sml29623 } rx_rbr_rings_t, *p_rx_rbr_rings_t; 3773859Sml29623 3783859Sml29623 typedef struct _rx_rcr_rings_t { 3793859Sml29623 p_rx_rcr_ring_t *rcr_rings; 3807755SMisaki.Kataoka@Sun.COM uint32_t ndmas; 3818275SEric Cheng boolean_t cntl_buf_allocated; 3823859Sml29623 } rx_rcr_rings_t, *p_rx_rcr_rings_t; 3833859Sml29623 3843859Sml29623 typedef struct _rx_mbox_areas_t { 3853859Sml29623 p_rx_mbox_t *rxmbox_areas; 3867755SMisaki.Kataoka@Sun.COM uint32_t ndmas; 3873859Sml29623 boolean_t mbox_allocated; 3883859Sml29623 } rx_mbox_areas_t, *p_rx_mbox_areas_t; 3893859Sml29623 3903859Sml29623 /* 3913859Sml29623 * Global register definitions per chip and they are initialized 3923859Sml29623 * using the function zero control registers. 3933859Sml29623 * . 3943859Sml29623 */ 3953859Sml29623 3963859Sml29623 typedef struct _rxdma_globals { 3973859Sml29623 boolean_t mode32; 3983859Sml29623 uint16_t rxdma_ck_div_cnt; 3993859Sml29623 uint16_t rxdma_red_ran_init; 4003859Sml29623 uint32_t rxdma_eing_timeout; 4013859Sml29623 } rxdma_globals_t, *p_rxdma_globals; 4023859Sml29623 4033859Sml29623 4043859Sml29623 /* 4053859Sml29623 * Receive DMA Prototypes. 4063859Sml29623 */ 4073859Sml29623 nxge_status_t nxge_init_rxdma_channels(p_nxge_t); 4083859Sml29623 void nxge_uninit_rxdma_channels(p_nxge_t); 4096495Sspeer 4106495Sspeer nxge_status_t nxge_init_rxdma_channel(p_nxge_t, int); 4116495Sspeer void nxge_uninit_rxdma_channel(p_nxge_t, int); 4126495Sspeer 4136495Sspeer nxge_status_t nxge_init_rxdma_channel_rcrflush(p_nxge_t, uint8_t); 4143859Sml29623 nxge_status_t nxge_reset_rxdma_channel(p_nxge_t, uint16_t); 4153859Sml29623 nxge_status_t nxge_init_rxdma_channel_cntl_stat(p_nxge_t, 4163859Sml29623 uint16_t, p_rx_dma_ctl_stat_t); 4173859Sml29623 nxge_status_t nxge_enable_rxdma_channel(p_nxge_t, 4183859Sml29623 uint16_t, p_rx_rbr_ring_t, p_rx_rcr_ring_t, 4193859Sml29623 p_rx_mbox_t); 4203859Sml29623 nxge_status_t nxge_init_rxdma_channel_event_mask(p_nxge_t, 4213859Sml29623 uint16_t, p_rx_dma_ent_msk_t); 4223859Sml29623 4233859Sml29623 nxge_status_t nxge_rxdma_hw_mode(p_nxge_t, boolean_t); 4243859Sml29623 void nxge_hw_start_rx(p_nxge_t); 4253859Sml29623 void nxge_fixup_rxdma_rings(p_nxge_t); 4263859Sml29623 nxge_status_t nxge_dump_rxdma_channel(p_nxge_t, uint8_t); 4273859Sml29623 4283859Sml29623 void nxge_rxdma_fix_channel(p_nxge_t, uint16_t); 4293859Sml29623 4308275SEric Cheng mblk_t *nxge_rx_poll(void *, int); 4318275SEric Cheng int nxge_enable_poll(void *); 4328275SEric Cheng int nxge_disable_poll(void *); 4338275SEric Cheng 4343859Sml29623 void nxge_rxdma_regs_dump_channels(p_nxge_t); 4353859Sml29623 nxge_status_t nxge_rxdma_handle_sys_errors(p_nxge_t); 4363859Sml29623 void nxge_rxdma_inject_err(p_nxge_t, uint32_t, uint8_t); 4373859Sml29623 4386495Sspeer extern nxge_status_t nxge_alloc_rx_mem_pool(p_nxge_t); 4396495Sspeer extern nxge_status_t nxge_alloc_rxb(p_nxge_t nxgep, int channel); 4406495Sspeer extern void nxge_free_rxb(p_nxge_t nxgep, int channel); 4413859Sml29623 4428275SEric Cheng int nxge_get_rxring_index(p_nxge_t, int, int); 4438275SEric Cheng 4443859Sml29623 #ifdef __cplusplus 4453859Sml29623 } 4463859Sml29623 #endif 4473859Sml29623 4483859Sml29623 #endif /* _SYS_NXGE_NXGE_RXDMA_H */ 449