xref: /onnv-gate/usr/src/uts/common/sys/nxge/nxge_rxdma.h (revision 6028:72bc433f666e)
13859Sml29623 /*
23859Sml29623  * CDDL HEADER START
33859Sml29623  *
43859Sml29623  * The contents of this file are subject to the terms of the
53859Sml29623  * Common Development and Distribution License (the "License").
63859Sml29623  * You may not use this file except in compliance with the License.
73859Sml29623  *
83859Sml29623  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93859Sml29623  * or http://www.opensolaris.org/os/licensing.
103859Sml29623  * See the License for the specific language governing permissions
113859Sml29623  * and limitations under the License.
123859Sml29623  *
133859Sml29623  * When distributing Covered Code, include this CDDL HEADER in each
143859Sml29623  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153859Sml29623  * If applicable, add the following below this CDDL HEADER, with the
163859Sml29623  * fields enclosed by brackets "[]" replaced with your own identifying
173859Sml29623  * information: Portions Copyright [yyyy] [name of copyright owner]
183859Sml29623  *
193859Sml29623  * CDDL HEADER END
203859Sml29623  */
213859Sml29623 /*
22*6028Ssbehera  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
233859Sml29623  * Use is subject to license terms.
243859Sml29623  */
253859Sml29623 
263859Sml29623 #ifndef	_SYS_NXGE_NXGE_RXDMA_H
273859Sml29623 #define	_SYS_NXGE_NXGE_RXDMA_H
283859Sml29623 
293859Sml29623 #pragma ident	"%Z%%M%	%I%	%E% SMI"
303859Sml29623 
313859Sml29623 #ifdef	__cplusplus
323859Sml29623 extern "C" {
333859Sml29623 #endif
343859Sml29623 
353859Sml29623 #include <sys/nxge/nxge_rxdma_hw.h>
363859Sml29623 #include <npi_rxdma.h>
373859Sml29623 
383859Sml29623 #define	RXDMA_CK_DIV_DEFAULT		7500 	/* 25 usec */
393859Sml29623 /*
403859Sml29623  * Hardware RDC designer: 8 cache lines during Atlas bringup.
413859Sml29623  */
423859Sml29623 #define	RXDMA_RED_LESS_BYTES		(8 * 64) /* 8 cache line */
433859Sml29623 #define	RXDMA_RED_LESS_ENTRIES		(RXDMA_RED_LESS_BYTES/8)
443859Sml29623 #define	RXDMA_RED_WINDOW_DEFAULT	0
453859Sml29623 #define	RXDMA_RED_THRES_DEFAULT		0
463859Sml29623 
473859Sml29623 #define	RXDMA_RCR_PTHRES_DEFAULT	0x20
483859Sml29623 #define	RXDMA_RCR_TO_DEFAULT		0x8
493859Sml29623 
503859Sml29623 /*
513859Sml29623  * hardware workarounds: kick 16 (was 8 before)
523859Sml29623  */
533859Sml29623 #define	NXGE_RXDMA_POST_BATCH		16
543859Sml29623 
553859Sml29623 #define	RXBUF_START_ADDR(a, index, bsize)	((a & (index * bsize))
563859Sml29623 #define	RXBUF_OFFSET_FROM_START(a, start)	(start - a)
573859Sml29623 #define	RXBUF_64B_ALIGNED		64
583859Sml29623 
593859Sml29623 #define	NXGE_RXBUF_EXTRA		34
603859Sml29623 /*
613859Sml29623  * Receive buffer thresholds and buffer types
623859Sml29623  */
633859Sml29623 #define	NXGE_RX_BCOPY_SCALE	8	/* use 1/8 as lowest granularity */
643859Sml29623 typedef enum  {
653859Sml29623 	NXGE_RX_COPY_ALL = 0,		/* do bcopy on every packet	 */
663859Sml29623 	NXGE_RX_COPY_1,			/* bcopy on 1/8 of buffer posted */
673859Sml29623 	NXGE_RX_COPY_2,			/* bcopy on 2/8 of buffer posted */
683859Sml29623 	NXGE_RX_COPY_3,			/* bcopy on 3/8 of buffer posted */
693859Sml29623 	NXGE_RX_COPY_4,			/* bcopy on 4/8 of buffer posted */
703859Sml29623 	NXGE_RX_COPY_5,			/* bcopy on 5/8 of buffer posted */
713859Sml29623 	NXGE_RX_COPY_6,			/* bcopy on 6/8 of buffer posted */
723859Sml29623 	NXGE_RX_COPY_7,			/* bcopy on 7/8 of buffer posted */
733859Sml29623 	NXGE_RX_COPY_NONE		/* don't do bcopy at all	 */
743859Sml29623 } nxge_rxbuf_threshold_t;
753859Sml29623 
763859Sml29623 typedef enum  {
773859Sml29623 	NXGE_RBR_TYPE0 = RCR_PKTBUFSZ_0,  /* bcopy buffer size 0 (small) */
783859Sml29623 	NXGE_RBR_TYPE1 = RCR_PKTBUFSZ_1,  /* bcopy buffer size 1 (medium) */
793859Sml29623 	NXGE_RBR_TYPE2 = RCR_PKTBUFSZ_2	  /* bcopy buffer size 2 (large) */
803859Sml29623 } nxge_rxbuf_type_t;
813859Sml29623 
823859Sml29623 typedef	struct _rdc_errlog {
833859Sml29623 	rdmc_par_err_log_t	pre_par;
843859Sml29623 	rdmc_par_err_log_t	sha_par;
853859Sml29623 	uint8_t			compl_err_type;
863859Sml29623 } rdc_errlog_t;
873859Sml29623 
883859Sml29623 /*
893859Sml29623  * Receive  Statistics.
903859Sml29623  */
913859Sml29623 typedef struct _nxge_rx_ring_stats_t {
923859Sml29623 	uint64_t	ipackets;
933859Sml29623 	uint64_t	ibytes;
943859Sml29623 	uint32_t	ierrors;
953859Sml29623 	uint32_t	multircv;
963859Sml29623 	uint32_t	brdcstrcv;
973859Sml29623 	uint32_t	norcvbuf;
983859Sml29623 
993859Sml29623 	uint32_t	rx_inits;
1003859Sml29623 	uint32_t	rx_jumbo_pkts;
1013859Sml29623 	uint32_t	rx_multi_pkts;
1023859Sml29623 	uint32_t	rx_mtu_pkts;
1033859Sml29623 	uint32_t	rx_no_buf;
1043859Sml29623 
1053859Sml29623 	/*
1063859Sml29623 	 * Receive buffer management statistics.
1073859Sml29623 	 */
1083859Sml29623 	uint32_t	rx_new_pages;
1093859Sml29623 	uint32_t	rx_new_mtu_pgs;
1103859Sml29623 	uint32_t	rx_new_nxt_pgs;
1113859Sml29623 	uint32_t	rx_reused_pgs;
1123859Sml29623 	uint32_t	rx_mtu_drops;
1133859Sml29623 	uint32_t	rx_nxt_drops;
1143859Sml29623 
1153859Sml29623 	/*
1163859Sml29623 	 * Error event stats.
1173859Sml29623 	 */
1183859Sml29623 	uint32_t	rx_rbr_tmout;
119*6028Ssbehera 	uint32_t	pkt_too_long_err;
1203859Sml29623 	uint32_t	l2_err;
1213859Sml29623 	uint32_t	l4_cksum_err;
1223859Sml29623 	uint32_t	fflp_soft_err;
1233859Sml29623 	uint32_t	zcp_soft_err;
1245165Syc148097 	uint32_t	rcr_unknown_err;
1253859Sml29623 	uint32_t	dcf_err;
1263859Sml29623 	uint32_t 	rbr_tmout;
1273859Sml29623 	uint32_t 	rsp_cnt_err;
1283859Sml29623 	uint32_t 	byte_en_err;
1293859Sml29623 	uint32_t 	byte_en_bus;
1303859Sml29623 	uint32_t 	rsp_dat_err;
1313859Sml29623 	uint32_t 	rcr_ack_err;
1323859Sml29623 	uint32_t 	dc_fifo_err;
1333859Sml29623 	uint32_t 	rcr_sha_par;
1343859Sml29623 	uint32_t 	rbr_pre_par;
1353859Sml29623 	uint32_t 	port_drop_pkt;
1363859Sml29623 	uint32_t 	wred_drop;
1373859Sml29623 	uint32_t 	rbr_pre_empty;
1383859Sml29623 	uint32_t 	rcr_shadow_full;
1393859Sml29623 	uint32_t 	config_err;
1403859Sml29623 	uint32_t 	rcrincon;
1413859Sml29623 	uint32_t 	rcrfull;
1423859Sml29623 	uint32_t 	rbr_empty;
1433859Sml29623 	uint32_t 	rbrfull;
1443859Sml29623 	uint32_t 	rbrlogpage;
1453859Sml29623 	uint32_t 	cfiglogpage;
1463859Sml29623 	uint32_t 	rcrto;
1473859Sml29623 	uint32_t 	rcrthres;
1483859Sml29623 	uint32_t 	mex;
1493859Sml29623 	rdc_errlog_t	errlog;
1503859Sml29623 } nxge_rx_ring_stats_t, *p_nxge_rx_ring_stats_t;
1513859Sml29623 
1523859Sml29623 typedef struct _nxge_rdc_sys_stats {
1533859Sml29623 	uint32_t	pre_par;
1543859Sml29623 	uint32_t	sha_par;
1553859Sml29623 	uint32_t	id_mismatch;
1563859Sml29623 	uint32_t	ipp_eop_err;
1573859Sml29623 	uint32_t	zcp_eop_err;
1583859Sml29623 } nxge_rdc_sys_stats_t, *p_nxge_rdc_sys_stats_t;
1593859Sml29623 
1603859Sml29623 /*
1613859Sml29623  * Software reserved buffer offset
1623859Sml29623  */
1633859Sml29623 typedef struct _nxge_rxbuf_off_hdr_t {
1643859Sml29623 	uint32_t		index;
1653859Sml29623 } nxge_rxbuf_off_hdr_t, *p_nxge_rxbuf_off_hdr_t;
1663859Sml29623 
1673859Sml29623 /*
1683859Sml29623  * Definitions for each receive buffer block.
1693859Sml29623  */
1703859Sml29623 typedef struct _nxge_rbb_t {
1713859Sml29623 	nxge_os_dma_common_t	dma_buf_info;
1723859Sml29623 	uint8_t			rbr_page_num;
1733859Sml29623 	uint32_t		block_size;
1743859Sml29623 	uint16_t		dma_channel;
1753859Sml29623 	uint32_t		bytes_received;
1763859Sml29623 	uint32_t		ref_cnt;
1773859Sml29623 	uint_t			pkt_buf_size;
1783859Sml29623 	uint_t			max_pkt_bufs;
1793859Sml29623 	uint32_t		cur_usage_cnt;
1803859Sml29623 } nxge_rbb_t, *p_nxge_rbb_t;
1813859Sml29623 
1823859Sml29623 
1833859Sml29623 typedef struct _rx_tx_param_t {
1843859Sml29623 	nxge_logical_page_t logical_pages[NXGE_MAX_LOGICAL_PAGES];
1853859Sml29623 } rx_tx_param_t, *p_rx_tx_param_t;
1863859Sml29623 
1873859Sml29623 typedef struct _rx_tx_params {
1883859Sml29623 	struct _tx_param_t 	*tx_param_p;
1893859Sml29623 } rx_tx_params_t, *p_rx_tx_params_t;
1903859Sml29623 
1913859Sml29623 
1923859Sml29623 typedef struct _rx_msg_t {
1933859Sml29623 	nxge_os_dma_common_t	buf_dma;
1943859Sml29623 	nxge_os_mutex_t 	lock;
1953859Sml29623 	struct _nxge_t		*nxgep;
1963859Sml29623 	struct _rx_rbr_ring_t	*rx_rbr_p;
1973859Sml29623 	boolean_t 		spare_in_use;
1983859Sml29623 	boolean_t 		free;
1993859Sml29623 	uint32_t 		ref_cnt;
2003859Sml29623 #ifdef RXBUFF_USE_SEPARATE_UP_CNTR
2013859Sml29623 	uint32_t 		pass_up_cnt;
2023859Sml29623 	boolean_t 		release;
2033859Sml29623 #endif
2043859Sml29623 	nxge_os_frtn_t 		freeb;
2053859Sml29623 	size_t 			bytes_arrived;
2063859Sml29623 	size_t 			bytes_expected;
2073859Sml29623 	size_t 			block_size;
2083859Sml29623 	uint32_t		block_index;
2093859Sml29623 	uint32_t 		pkt_buf_size;
2103859Sml29623 	uint32_t 		pkt_buf_size_code;
2113859Sml29623 	uint32_t 		max_pkt_bufs;
2123859Sml29623 	uint32_t		cur_usage_cnt;
2133859Sml29623 	uint32_t		max_usage_cnt;
2143859Sml29623 	uchar_t			*buffer;
2153859Sml29623 	uint32_t 		pri;
2163859Sml29623 	uint32_t 		shifted_addr;
2173859Sml29623 	boolean_t		use_buf_pool;
2183859Sml29623 	p_mblk_t 		rx_mblk_p;
2193859Sml29623 	boolean_t		rx_use_bcopy;
2203859Sml29623 } rx_msg_t, *p_rx_msg_t;
2213859Sml29623 
2223859Sml29623 typedef struct _rx_dma_handle_t {
2233859Sml29623 	nxge_os_dma_handle_t	dma_handle;	/* DMA handle	*/
2243859Sml29623 	nxge_os_acc_handle_t	acc_handle;	/* DMA memory handle */
2253859Sml29623 	npi_handle_t		npi_handle;
2263859Sml29623 } rx_dma_handle_t, *p_rx_dma_handle_t;
2273859Sml29623 
2283859Sml29623 #define	RXCOMP_HIST_ELEMENTS 100000
2293859Sml29623 
2303859Sml29623 typedef struct _nxge_rxcomphist_t {
2313859Sml29623 	uint_t 			comp_cnt;
2323859Sml29623 	uint64_t 		rx_comp_entry;
2333859Sml29623 } nxge_rxcomphist_t, *p_nxge_rxcomphist_t;
2343859Sml29623 
2353859Sml29623 /* Receive Completion Ring */
2363859Sml29623 typedef struct _rx_rcr_ring_t {
2373859Sml29623 	nxge_os_dma_common_t	rcr_desc;
2383859Sml29623 	uint8_t			rcr_page_num;
2393859Sml29623 	uint8_t			rcr_buf_page_num;
2403859Sml29623 
2413859Sml29623 	struct _nxge_t		*nxgep;
2423859Sml29623 
2433859Sml29623 	p_nxge_rx_ring_stats_t	rdc_stats;
2443859Sml29623 
2453859Sml29623 	rcrcfig_a_t		rcr_cfga;
2463859Sml29623 	rcrcfig_b_t		rcr_cfgb;
2473859Sml29623 	boolean_t		cfg_set;
2483859Sml29623 
2493859Sml29623 	nxge_os_mutex_t 	lock;
2503859Sml29623 	uint16_t		index;
2513859Sml29623 	uint16_t		rdc;
2523859Sml29623 	uint16_t		rdc_grp_id;
2533859Sml29623 	uint16_t		ldg_group_id;
2543859Sml29623 	boolean_t		full_hdr_flag;	 /* 1: 18 bytes header */
2553859Sml29623 	uint16_t		sw_priv_hdr_len; /* 0 - 192 bytes (SW) */
2563859Sml29623 	uint32_t 		comp_size;	 /* # of RCR entries */
2573859Sml29623 	uint64_t		rcr_addr;
2583859Sml29623 	uint_t 			comp_wrap_mask;
2593859Sml29623 	uint_t 			comp_rd_index;
2603859Sml29623 	uint_t 			comp_wt_index;
2613859Sml29623 
2623859Sml29623 	p_rcr_entry_t		rcr_desc_first_p;
2633859Sml29623 	p_rcr_entry_t		rcr_desc_first_pp;
2643859Sml29623 	p_rcr_entry_t		rcr_desc_last_p;
2653859Sml29623 	p_rcr_entry_t		rcr_desc_last_pp;
2663859Sml29623 
2673859Sml29623 	p_rcr_entry_t		rcr_desc_rd_head_p;	/* software next read */
2683859Sml29623 	p_rcr_entry_t		rcr_desc_rd_head_pp;
2693859Sml29623 
2703859Sml29623 	p_rcr_entry_t		rcr_desc_wt_tail_p;	/* hardware write */
2713859Sml29623 	p_rcr_entry_t		rcr_desc_wt_tail_pp;
2723859Sml29623 
2733859Sml29623 	uint64_t		rcr_tail_pp;
2743859Sml29623 	uint64_t		rcr_head_pp;
2753859Sml29623 	struct _rx_rbr_ring_t	*rx_rbr_p;
2763859Sml29623 	uint32_t		intr_timeout;
2773859Sml29623 	uint32_t		intr_threshold;
2783859Sml29623 	uint64_t		max_receive_pkts;
2793859Sml29623 	p_mblk_t		rx_first_mp;
2803859Sml29623 	mac_resource_handle_t	rcr_mac_handle;
2813859Sml29623 	uint32_t		rcvd_pkt_bytes; /* Received bytes of a packet */
2823859Sml29623 } rx_rcr_ring_t, *p_rx_rcr_ring_t;
2833859Sml29623 
2843859Sml29623 
2853859Sml29623 
2863859Sml29623 /* Buffer index information */
2873859Sml29623 typedef struct _rxbuf_index_info_t {
2883859Sml29623 	uint32_t buf_index;
2893859Sml29623 	uint32_t start_index;
2903859Sml29623 	uint32_t buf_size;
2913859Sml29623 	uint64_t dvma_addr;
2923859Sml29623 	uint64_t kaddr;
2933859Sml29623 } rxbuf_index_info_t, *p_rxbuf_index_info_t;
2943859Sml29623 
2953859Sml29623 /* Buffer index information */
2963859Sml29623 
2973859Sml29623 typedef struct _rxring_info_t {
2983859Sml29623 	uint32_t hint[3];
2993859Sml29623 	uint32_t block_size_mask;
3003859Sml29623 	uint16_t max_iterations;
3013859Sml29623 	rxbuf_index_info_t buffer[NXGE_DMA_BLOCK];
3023859Sml29623 } rxring_info_t, *p_rxring_info_t;
3033859Sml29623 
3043859Sml29623 
3055170Stm144005 typedef enum {
3065170Stm144005 	RBR_POSTING = 1,	/* We may post rx buffers. */
3075170Stm144005 	RBR_UNMAPPING,		/* We are in the process of unmapping. */
3085170Stm144005 	RBR_UNMAPPED		/* The ring is unmapped. */
3095170Stm144005 } rbr_state_t;
3105170Stm144005 
3115170Stm144005 
3123859Sml29623 /* Receive Buffer Block Ring */
3133859Sml29623 typedef struct _rx_rbr_ring_t {
3143859Sml29623 	nxge_os_dma_common_t	rbr_desc;
3153859Sml29623 	p_rx_msg_t 		*rx_msg_ring;
3163859Sml29623 	p_nxge_dma_common_t 	*dma_bufp;
3173859Sml29623 	rbr_cfig_a_t		rbr_cfga;
3183859Sml29623 	rbr_cfig_b_t		rbr_cfgb;
3193859Sml29623 	rbr_kick_t		rbr_kick;
3203859Sml29623 	log_page_vld_t		page_valid;
3213859Sml29623 	log_page_mask_t		page_mask_1;
3223859Sml29623 	log_page_mask_t		page_mask_2;
3233859Sml29623 	log_page_value_t	page_value_1;
3243859Sml29623 	log_page_value_t	page_value_2;
3253859Sml29623 	log_page_relo_t		page_reloc_1;
3263859Sml29623 	log_page_relo_t		page_reloc_2;
3273859Sml29623 	log_page_hdl_t		page_hdl;
3283859Sml29623 
3293859Sml29623 	boolean_t		cfg_set;
3303859Sml29623 
3313859Sml29623 	nxge_os_mutex_t		lock;
3323859Sml29623 	nxge_os_mutex_t		post_lock;
3333859Sml29623 	uint16_t		index;
3343859Sml29623 	struct _nxge_t		*nxgep;
3353859Sml29623 	uint16_t		rdc;
3363859Sml29623 	uint16_t		rdc_grp_id;
3373859Sml29623 	uint_t 			rbr_max_size;
3383859Sml29623 	uint64_t		rbr_addr;
3393859Sml29623 	uint_t 			rbr_wrap_mask;
3403859Sml29623 	uint_t 			rbb_max;
3413859Sml29623 	uint_t 			rbb_added;
3423859Sml29623 	uint_t			block_size;
3433859Sml29623 	uint_t			num_blocks;
3443859Sml29623 	uint_t			tnblocks;
3453859Sml29623 	uint_t			pkt_buf_size0;
3463859Sml29623 	uint_t			pkt_buf_size0_bytes;
3473859Sml29623 	uint_t			npi_pkt_buf_size0;
3483859Sml29623 	uint_t			pkt_buf_size1;
3493859Sml29623 	uint_t			pkt_buf_size1_bytes;
3503859Sml29623 	uint_t			npi_pkt_buf_size1;
3513859Sml29623 	uint_t			pkt_buf_size2;
3523859Sml29623 	uint_t			pkt_buf_size2_bytes;
3533859Sml29623 	uint_t			npi_pkt_buf_size2;
3543859Sml29623 
3553859Sml29623 	uint64_t		rbr_head_pp;
3563859Sml29623 	uint64_t		rbr_tail_pp;
3573859Sml29623 	uint32_t		*rbr_desc_vp;
3583859Sml29623 
3593859Sml29623 	p_rx_rcr_ring_t		rx_rcr_p;
3603859Sml29623 
3613859Sml29623 	rx_dma_ent_msk_t	rx_dma_ent_mask;
3623859Sml29623 
3633859Sml29623 	rbr_hdh_t		rbr_head;
3643859Sml29623 	rbr_hdl_t		rbr_tail;
3653859Sml29623 	uint_t 			rbr_wr_index;
3663859Sml29623 	uint_t 			rbr_rd_index;
3673859Sml29623 	uint_t 			rbr_hw_head_index;
3683859Sml29623 	uint64_t 		rbr_hw_head_ptr;
3693859Sml29623 
3703859Sml29623 	/* may not be needed */
3713859Sml29623 	p_nxge_rbb_t		rbb_p;
3723859Sml29623 
3733859Sml29623 	rxring_info_t  *ring_info;
3743859Sml29623 #ifdef RX_USE_RECLAIM_POST
3753859Sml29623 	uint32_t hw_freed;
3763859Sml29623 	uint32_t sw_freed;
3773859Sml29623 	uint32_t msg_rd_index;
3783859Sml29623 	uint32_t msg_cnt;
3793859Sml29623 #endif
3803859Sml29623 #if	defined(sun4v) && defined(NIU_LP_WORKAROUND)
3813859Sml29623 	uint64_t		hv_rx_buf_base_ioaddr_pp;
3823859Sml29623 	uint64_t		hv_rx_buf_ioaddr_size;
3833859Sml29623 	uint64_t		hv_rx_cntl_base_ioaddr_pp;
3843859Sml29623 	uint64_t		hv_rx_cntl_ioaddr_size;
3853859Sml29623 	boolean_t		hv_set;
3863859Sml29623 #endif
3873859Sml29623 	uint_t 			rbr_consumed;
3883859Sml29623 	uint_t 			rbr_threshold_hi;
3893859Sml29623 	uint_t 			rbr_threshold_lo;
3903859Sml29623 	nxge_rxbuf_type_t	rbr_bufsize_type;
3913859Sml29623 	boolean_t		rbr_use_bcopy;
3925170Stm144005 
3935170Stm144005 	/*
3945170Stm144005 	 * <rbr_ref_cnt> is a count of those receive buffers which
3955170Stm144005 	 * have been loaned to the kernel.  We will not free this
3965170Stm144005 	 * ring until the reference count reaches zero (0).
3975170Stm144005 	 */
3985170Stm144005 	uint32_t		rbr_ref_cnt;
3995170Stm144005 	rbr_state_t		rbr_state; /* POSTING, etc */
4005170Stm144005 
4013859Sml29623 } rx_rbr_ring_t, *p_rx_rbr_ring_t;
4023859Sml29623 
4033859Sml29623 /* Receive Mailbox */
4043859Sml29623 typedef struct _rx_mbox_t {
4053859Sml29623 	nxge_os_dma_common_t	rx_mbox;
4063859Sml29623 	rxdma_cfig1_t		rx_cfg1;
4073859Sml29623 	rxdma_cfig2_t		rx_cfg2;
4083859Sml29623 	uint64_t		mbox_addr;
4093859Sml29623 	boolean_t		cfg_set;
4103859Sml29623 
4113859Sml29623 	nxge_os_mutex_t 	lock;
4123859Sml29623 	uint16_t		index;
4133859Sml29623 	struct _nxge_t		*nxgep;
4143859Sml29623 	uint16_t		rdc;
4153859Sml29623 } rx_mbox_t, *p_rx_mbox_t;
4163859Sml29623 
4173859Sml29623 
4183859Sml29623 typedef struct _rx_rbr_rings_t {
4193859Sml29623 	p_rx_rbr_ring_t 	*rbr_rings;
4203859Sml29623 	uint32_t			ndmas;
4213859Sml29623 	boolean_t		rxbuf_allocated;
4223859Sml29623 } rx_rbr_rings_t, *p_rx_rbr_rings_t;
4233859Sml29623 
4243859Sml29623 typedef struct _rx_rcr_rings_t {
4253859Sml29623 	p_rx_rcr_ring_t 	*rcr_rings;
4263859Sml29623 	uint32_t			ndmas;
4273859Sml29623 	boolean_t		cntl_buf_allocated;
4283859Sml29623 } rx_rcr_rings_t, *p_rx_rcr_rings_t;
4293859Sml29623 
4303859Sml29623 typedef struct _rx_mbox_areas_t {
4313859Sml29623 	p_rx_mbox_t 		*rxmbox_areas;
4323859Sml29623 	uint32_t			ndmas;
4333859Sml29623 	boolean_t		mbox_allocated;
4343859Sml29623 } rx_mbox_areas_t, *p_rx_mbox_areas_t;
4353859Sml29623 
4363859Sml29623 /*
4373859Sml29623  * Global register definitions per chip and they are initialized
4383859Sml29623  * using the function zero control registers.
4393859Sml29623  * .
4403859Sml29623  */
4413859Sml29623 
4423859Sml29623 typedef struct _rxdma_globals {
4433859Sml29623 	boolean_t		mode32;
4443859Sml29623 	uint16_t		rxdma_ck_div_cnt;
4453859Sml29623 	uint16_t		rxdma_red_ran_init;
4463859Sml29623 	uint32_t		rxdma_eing_timeout;
4473859Sml29623 } rxdma_globals_t, *p_rxdma_globals;
4483859Sml29623 
4493859Sml29623 
4503859Sml29623 /*
4513859Sml29623  * Receive DMA Prototypes.
4523859Sml29623  */
4533859Sml29623 nxge_status_t nxge_init_rxdma_channel_rcrflush(p_nxge_t, uint8_t);
4543859Sml29623 nxge_status_t nxge_init_rxdma_channels(p_nxge_t);
4553859Sml29623 void nxge_uninit_rxdma_channels(p_nxge_t);
4563859Sml29623 nxge_status_t nxge_reset_rxdma_channel(p_nxge_t, uint16_t);
4573859Sml29623 nxge_status_t nxge_init_rxdma_channel_cntl_stat(p_nxge_t,
4583859Sml29623 	uint16_t, p_rx_dma_ctl_stat_t);
4593859Sml29623 nxge_status_t nxge_enable_rxdma_channel(p_nxge_t,
4603859Sml29623 	uint16_t, p_rx_rbr_ring_t, p_rx_rcr_ring_t,
4613859Sml29623 	p_rx_mbox_t);
4623859Sml29623 nxge_status_t nxge_init_rxdma_channel_event_mask(p_nxge_t,
4633859Sml29623 		uint16_t, p_rx_dma_ent_msk_t);
4643859Sml29623 
4653859Sml29623 nxge_status_t nxge_rxdma_hw_mode(p_nxge_t, boolean_t);
4663859Sml29623 void nxge_hw_start_rx(p_nxge_t);
4673859Sml29623 void nxge_fixup_rxdma_rings(p_nxge_t);
4683859Sml29623 nxge_status_t nxge_dump_rxdma_channel(p_nxge_t, uint8_t);
4693859Sml29623 
4703859Sml29623 void nxge_rxdma_fix_channel(p_nxge_t, uint16_t);
4713859Sml29623 void nxge_rxdma_fixup_channel(p_nxge_t, uint16_t, int);
4723859Sml29623 int nxge_rxdma_get_ring_index(p_nxge_t, uint16_t);
4733859Sml29623 
4743859Sml29623 void nxge_rxdma_regs_dump_channels(p_nxge_t);
4753859Sml29623 nxge_status_t nxge_rxdma_handle_sys_errors(p_nxge_t);
4763859Sml29623 void nxge_rxdma_inject_err(p_nxge_t, uint32_t, uint8_t);
4773859Sml29623 
4783859Sml29623 
4793859Sml29623 #ifdef	__cplusplus
4803859Sml29623 }
4813859Sml29623 #endif
4823859Sml29623 
4833859Sml29623 #endif	/* _SYS_NXGE_NXGE_RXDMA_H */
484