xref: /onnv-gate/usr/src/uts/common/io/hxge/hxge_rxdma.c (revision 11878:ac93462db6d7)
16349Sqs148142 /*
26349Sqs148142  * CDDL HEADER START
36349Sqs148142  *
46349Sqs148142  * The contents of this file are subject to the terms of the
56349Sqs148142  * Common Development and Distribution License (the "License").
66349Sqs148142  * You may not use this file except in compliance with the License.
76349Sqs148142  *
86349Sqs148142  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
96349Sqs148142  * or http://www.opensolaris.org/os/licensing.
106349Sqs148142  * See the License for the specific language governing permissions
116349Sqs148142  * and limitations under the License.
126349Sqs148142  *
136349Sqs148142  * When distributing Covered Code, include this CDDL HEADER in each
146349Sqs148142  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
156349Sqs148142  * If applicable, add the following below this CDDL HEADER, with the
166349Sqs148142  * fields enclosed by brackets "[]" replaced with your own identifying
176349Sqs148142  * information: Portions Copyright [yyyy] [name of copyright owner]
186349Sqs148142  *
196349Sqs148142  * CDDL HEADER END
206349Sqs148142  */
216349Sqs148142 /*
22*11878SVenu.Iyer@Sun.COM  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
236349Sqs148142  * Use is subject to license terms.
246349Sqs148142  */
256349Sqs148142 
266349Sqs148142 #include <hxge_impl.h>
276349Sqs148142 #include <hxge_rxdma.h>
2810091SMichael.Speer@Sun.COM #include <hpi.h>
2910091SMichael.Speer@Sun.COM #include <hpi_vir.h>
306349Sqs148142 
316349Sqs148142 /*
328141SMichael.Speer@Sun.COM  * Number of blocks to accumulate before re-enabling DMA
338141SMichael.Speer@Sun.COM  * when we get RBR empty.
348141SMichael.Speer@Sun.COM  */
358422SMichael.Speer@Sun.COM #define	HXGE_RBR_EMPTY_THRESHOLD	64
368141SMichael.Speer@Sun.COM 
378141SMichael.Speer@Sun.COM /*
386349Sqs148142  * Globals: tunable parameters (/etc/system or adb)
396349Sqs148142  *
406349Sqs148142  */
416349Sqs148142 extern uint32_t hxge_rbr_size;
426349Sqs148142 extern uint32_t hxge_rcr_size;
436349Sqs148142 extern uint32_t hxge_rbr_spare_size;
446349Sqs148142 extern uint32_t hxge_mblks_pending;
456349Sqs148142 
466349Sqs148142 /*
476349Sqs148142  * Tunables to manage the receive buffer blocks.
486349Sqs148142  *
496349Sqs148142  * hxge_rx_threshold_hi: copy all buffers.
506349Sqs148142  * hxge_rx_bcopy_size_type: receive buffer block size type.
516349Sqs148142  * hxge_rx_threshold_lo: copy only up to tunable block size type.
526349Sqs148142  */
536349Sqs148142 extern hxge_rxbuf_threshold_t hxge_rx_threshold_hi;
546349Sqs148142 extern hxge_rxbuf_type_t hxge_rx_buf_size_type;
556349Sqs148142 extern hxge_rxbuf_threshold_t hxge_rx_threshold_lo;
566349Sqs148142 
578141SMichael.Speer@Sun.COM /*
588141SMichael.Speer@Sun.COM  * Static local functions.
598141SMichael.Speer@Sun.COM  */
606349Sqs148142 static hxge_status_t hxge_map_rxdma(p_hxge_t hxgep);
616349Sqs148142 static void hxge_unmap_rxdma(p_hxge_t hxgep);
626349Sqs148142 static hxge_status_t hxge_rxdma_hw_start_common(p_hxge_t hxgep);
636349Sqs148142 static hxge_status_t hxge_rxdma_hw_start(p_hxge_t hxgep);
646349Sqs148142 static void hxge_rxdma_hw_stop(p_hxge_t hxgep);
656349Sqs148142 static hxge_status_t hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
667618SMichael.Speer@Sun.COM     p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
677618SMichael.Speer@Sun.COM     uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p,
687618SMichael.Speer@Sun.COM     p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p,
697618SMichael.Speer@Sun.COM     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p);
706349Sqs148142 static void hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
716349Sqs148142 	p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p);
726349Sqs148142 static hxge_status_t hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep,
737618SMichael.Speer@Sun.COM     uint16_t dma_channel, p_hxge_dma_common_t *dma_rbr_cntl_p,
747618SMichael.Speer@Sun.COM     p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p,
757618SMichael.Speer@Sun.COM     p_rx_rbr_ring_t *rbr_p, p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p);
766349Sqs148142 static void hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep,
776349Sqs148142 	p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p);
786349Sqs148142 static hxge_status_t hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep,
796349Sqs148142 	uint16_t channel, p_hxge_dma_common_t *dma_buf_p,
806349Sqs148142 	p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks);
816349Sqs148142 static void hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep,
826349Sqs148142 	p_rx_rbr_ring_t rbr_p);
836349Sqs148142 static hxge_status_t hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel,
848103SQiyan.Sun@Sun.COM 	p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p,
858103SQiyan.Sun@Sun.COM 	int n_init_kick);
866349Sqs148142 static hxge_status_t hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel);
876349Sqs148142 static mblk_t *hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
888718SMichael.Speer@Sun.COM 	p_rx_rcr_ring_t	rcr_p, rdc_stat_t cs, int bytes_to_read);
898544SQiyan.Sun@Sun.COM static uint32_t hxge_scan_for_last_eop(p_rx_rcr_ring_t rcr_p,
908544SQiyan.Sun@Sun.COM     p_rcr_entry_t rcr_desc_rd_head_p, uint32_t num_rcrs);
916349Sqs148142 static void hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p,
926349Sqs148142 	p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p,
936864Sqs148142 	mblk_t ** mp, mblk_t ** mp_cont, uint32_t *invalid_rcr_entry);
946349Sqs148142 static hxge_status_t hxge_disable_rxdma_channel(p_hxge_t hxgep,
956349Sqs148142 	uint16_t channel);
966349Sqs148142 static p_rx_msg_t hxge_allocb(size_t, uint32_t, p_hxge_dma_common_t);
976349Sqs148142 static void hxge_freeb(p_rx_msg_t);
986349Sqs148142 static hxge_status_t hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index,
996349Sqs148142 	p_hxge_ldv_t ldvp, rdc_stat_t cs);
1006349Sqs148142 static hxge_status_t hxge_rxbuf_index_info_init(p_hxge_t hxgep,
1016349Sqs148142 	p_rx_rbr_ring_t rx_dmap);
1026349Sqs148142 static hxge_status_t hxge_rxdma_fatal_err_recover(p_hxge_t hxgep,
1036349Sqs148142 	uint16_t channel);
1046349Sqs148142 static hxge_status_t hxge_rx_port_fatal_err_recover(p_hxge_t hxgep);
1058236SQiyan.Sun@Sun.COM static void hxge_rbr_empty_restore(p_hxge_t hxgep,
1068236SQiyan.Sun@Sun.COM 	p_rx_rbr_ring_t rx_rbr_p);
1076349Sqs148142 
1086349Sqs148142 hxge_status_t
hxge_init_rxdma_channels(p_hxge_t hxgep)1096349Sqs148142 hxge_init_rxdma_channels(p_hxge_t hxgep)
1106349Sqs148142 {
1116864Sqs148142 	hxge_status_t		status = HXGE_OK;
1126864Sqs148142 	block_reset_t		reset_reg;
1138422SMichael.Speer@Sun.COM 	int			i;
1146349Sqs148142 
1156349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_init_rxdma_channels"));
1166349Sqs148142 
1178422SMichael.Speer@Sun.COM 	for (i = 0; i < HXGE_MAX_RDCS; i++)
1188422SMichael.Speer@Sun.COM 		hxgep->rdc_first_intr[i] = B_TRUE;
1198422SMichael.Speer@Sun.COM 
1206864Sqs148142 	/* Reset RDC block from PEU to clear any previous state */
1216864Sqs148142 	reset_reg.value = 0;
1226864Sqs148142 	reset_reg.bits.rdc_rst = 1;
1236864Sqs148142 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
1246864Sqs148142 	HXGE_DELAY(1000);
1256864Sqs148142 
1266349Sqs148142 	status = hxge_map_rxdma(hxgep);
1276349Sqs148142 	if (status != HXGE_OK) {
1286349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1296349Sqs148142 		    "<== hxge_init_rxdma: status 0x%x", status));
1306349Sqs148142 		return (status);
1316349Sqs148142 	}
1326349Sqs148142 
1336349Sqs148142 	status = hxge_rxdma_hw_start_common(hxgep);
1346349Sqs148142 	if (status != HXGE_OK) {
1356349Sqs148142 		hxge_unmap_rxdma(hxgep);
1366349Sqs148142 	}
1376349Sqs148142 
1386349Sqs148142 	status = hxge_rxdma_hw_start(hxgep);
1396349Sqs148142 	if (status != HXGE_OK) {
1406349Sqs148142 		hxge_unmap_rxdma(hxgep);
1416349Sqs148142 	}
1426349Sqs148142 
1436349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1446349Sqs148142 	    "<== hxge_init_rxdma_channels: status 0x%x", status));
1456349Sqs148142 	return (status);
1466349Sqs148142 }
1476349Sqs148142 
1486349Sqs148142 void
hxge_uninit_rxdma_channels(p_hxge_t hxgep)1496349Sqs148142 hxge_uninit_rxdma_channels(p_hxge_t hxgep)
1506349Sqs148142 {
1516349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_uninit_rxdma_channels"));
1526349Sqs148142 
1536349Sqs148142 	hxge_rxdma_hw_stop(hxgep);
1546349Sqs148142 	hxge_unmap_rxdma(hxgep);
1556349Sqs148142 
1566349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_uinit_rxdma_channels"));
1576349Sqs148142 }
1586349Sqs148142 
1596349Sqs148142 hxge_status_t
hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep,uint16_t channel,rdc_stat_t * cs_p)1606349Sqs148142 hxge_init_rxdma_channel_cntl_stat(p_hxge_t hxgep, uint16_t channel,
1616349Sqs148142     rdc_stat_t *cs_p)
1626349Sqs148142 {
1636349Sqs148142 	hpi_handle_t	handle;
1646349Sqs148142 	hpi_status_t	rs = HPI_SUCCESS;
1656349Sqs148142 	hxge_status_t	status = HXGE_OK;
1666349Sqs148142 
1676349Sqs148142 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1686349Sqs148142 	    "<== hxge_init_rxdma_channel_cntl_stat"));
1696349Sqs148142 
1706349Sqs148142 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1716349Sqs148142 	rs = hpi_rxdma_control_status(handle, OP_SET, channel, cs_p);
1726349Sqs148142 
1736349Sqs148142 	if (rs != HPI_SUCCESS) {
1746349Sqs148142 		status = HXGE_ERROR | rs;
1756349Sqs148142 	}
1766349Sqs148142 	return (status);
1776349Sqs148142 }
1786349Sqs148142 
1796349Sqs148142 
1806349Sqs148142 hxge_status_t
hxge_enable_rxdma_channel(p_hxge_t hxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t mbox_p,int n_init_kick)1816349Sqs148142 hxge_enable_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
1828103SQiyan.Sun@Sun.COM     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p,
1838103SQiyan.Sun@Sun.COM     int n_init_kick)
1846349Sqs148142 {
1856349Sqs148142 	hpi_handle_t		handle;
1866349Sqs148142 	rdc_desc_cfg_t 		rdc_desc;
1876349Sqs148142 	rdc_rcr_cfg_b_t		*cfgb_p;
1886349Sqs148142 	hpi_status_t		rs = HPI_SUCCESS;
1896349Sqs148142 
1906349Sqs148142 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel"));
1916349Sqs148142 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
1926349Sqs148142 
1936349Sqs148142 	/*
1946349Sqs148142 	 * Use configuration data composed at init time. Write to hardware the
1956349Sqs148142 	 * receive ring configurations.
1966349Sqs148142 	 */
1976349Sqs148142 	rdc_desc.mbox_enable = 1;
1986349Sqs148142 	rdc_desc.mbox_addr = mbox_p->mbox_addr;
1996349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
2006349Sqs148142 	    "==> hxge_enable_rxdma_channel: mboxp $%p($%p)",
2016349Sqs148142 	    mbox_p->mbox_addr, rdc_desc.mbox_addr));
2026349Sqs148142 
2036349Sqs148142 	rdc_desc.rbr_len = rbr_p->rbb_max;
2046349Sqs148142 	rdc_desc.rbr_addr = rbr_p->rbr_addr;
2056349Sqs148142 
2066349Sqs148142 	switch (hxgep->rx_bksize_code) {
2076349Sqs148142 	case RBR_BKSIZE_4K:
2086349Sqs148142 		rdc_desc.page_size = SIZE_4KB;
2096349Sqs148142 		break;
2106349Sqs148142 	case RBR_BKSIZE_8K:
2116349Sqs148142 		rdc_desc.page_size = SIZE_8KB;
2126349Sqs148142 		break;
2136349Sqs148142 	}
2146349Sqs148142 
2156349Sqs148142 	rdc_desc.size0 = rbr_p->hpi_pkt_buf_size0;
2166349Sqs148142 	rdc_desc.valid0 = 1;
2176349Sqs148142 
2186349Sqs148142 	rdc_desc.size1 = rbr_p->hpi_pkt_buf_size1;
2196349Sqs148142 	rdc_desc.valid1 = 1;
2206349Sqs148142 
2216349Sqs148142 	rdc_desc.size2 = rbr_p->hpi_pkt_buf_size2;
2226349Sqs148142 	rdc_desc.valid2 = 1;
2236349Sqs148142 
2246349Sqs148142 	rdc_desc.full_hdr = rcr_p->full_hdr_flag;
2256349Sqs148142 	rdc_desc.offset = rcr_p->sw_priv_hdr_len;
2266349Sqs148142 
2276349Sqs148142 	rdc_desc.rcr_len = rcr_p->comp_size;
2286349Sqs148142 	rdc_desc.rcr_addr = rcr_p->rcr_addr;
2296349Sqs148142 
2306349Sqs148142 	cfgb_p = &(rcr_p->rcr_cfgb);
2316349Sqs148142 	rdc_desc.rcr_threshold = cfgb_p->bits.pthres;
2326349Sqs148142 	rdc_desc.rcr_timeout = cfgb_p->bits.timeout;
2336349Sqs148142 	rdc_desc.rcr_timeout_enable = cfgb_p->bits.entout;
2346349Sqs148142 
2356349Sqs148142 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: "
2366349Sqs148142 	    "rbr_len qlen %d pagesize code %d rcr_len %d",
2376349Sqs148142 	    rdc_desc.rbr_len, rdc_desc.page_size, rdc_desc.rcr_len));
2386349Sqs148142 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_enable_rxdma_channel: "
2396349Sqs148142 	    "size 0 %d size 1 %d size 2 %d",
2406349Sqs148142 	    rbr_p->hpi_pkt_buf_size0, rbr_p->hpi_pkt_buf_size1,
2416349Sqs148142 	    rbr_p->hpi_pkt_buf_size2));
2426349Sqs148142 
2436349Sqs148142 	rs = hpi_rxdma_cfg_rdc_ring(handle, rbr_p->rdc, &rdc_desc);
2446349Sqs148142 	if (rs != HPI_SUCCESS) {
2456349Sqs148142 		return (HXGE_ERROR | rs);
2466349Sqs148142 	}
2476349Sqs148142 
2486349Sqs148142 	/*
2496349Sqs148142 	 * Enable the timeout and threshold.
2506349Sqs148142 	 */
2516349Sqs148142 	rs = hpi_rxdma_cfg_rdc_rcr_threshold(handle, channel,
2526349Sqs148142 	    rdc_desc.rcr_threshold);
2536349Sqs148142 	if (rs != HPI_SUCCESS) {
2546349Sqs148142 		return (HXGE_ERROR | rs);
2556349Sqs148142 	}
2566349Sqs148142 
2576349Sqs148142 	rs = hpi_rxdma_cfg_rdc_rcr_timeout(handle, channel,
2586349Sqs148142 	    rdc_desc.rcr_timeout);
2596349Sqs148142 	if (rs != HPI_SUCCESS) {
2606349Sqs148142 		return (HXGE_ERROR | rs);
2616349Sqs148142 	}
2626349Sqs148142 
2637918SQiyan.Sun@Sun.COM 	/* Kick the DMA engine */
2648103SQiyan.Sun@Sun.COM 	hpi_rxdma_rdc_rbr_kick(handle, channel, n_init_kick);
2657465SMichael.Speer@Sun.COM 
2666349Sqs148142 	/* Clear the rbr empty bit */
2676349Sqs148142 	(void) hpi_rxdma_channel_rbr_empty_clear(handle, channel);
2686349Sqs148142 
26910091SMichael.Speer@Sun.COM 	/*
27010091SMichael.Speer@Sun.COM 	 * Enable the DMA
27110091SMichael.Speer@Sun.COM 	 */
27210091SMichael.Speer@Sun.COM 	rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
27310091SMichael.Speer@Sun.COM 	if (rs != HPI_SUCCESS) {
27410091SMichael.Speer@Sun.COM 		return (HXGE_ERROR | rs);
27510091SMichael.Speer@Sun.COM 	}
27610091SMichael.Speer@Sun.COM 
2776349Sqs148142 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_enable_rxdma_channel"));
2786349Sqs148142 
2796349Sqs148142 	return (HXGE_OK);
2806349Sqs148142 }
2816349Sqs148142 
2826349Sqs148142 static hxge_status_t
hxge_disable_rxdma_channel(p_hxge_t hxgep,uint16_t channel)2836349Sqs148142 hxge_disable_rxdma_channel(p_hxge_t hxgep, uint16_t channel)
2846349Sqs148142 {
2856349Sqs148142 	hpi_handle_t handle;
2866349Sqs148142 	hpi_status_t rs = HPI_SUCCESS;
2876349Sqs148142 
2886349Sqs148142 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_disable_rxdma_channel"));
2896349Sqs148142 
2906349Sqs148142 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
2916349Sqs148142 
2926349Sqs148142 	/* disable the DMA */
2936349Sqs148142 	rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
2946349Sqs148142 	if (rs != HPI_SUCCESS) {
2956349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2966349Sqs148142 		    "<== hxge_disable_rxdma_channel:failed (0x%x)", rs));
2976349Sqs148142 		return (HXGE_ERROR | rs);
2986349Sqs148142 	}
2996349Sqs148142 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_disable_rxdma_channel"));
3006349Sqs148142 	return (HXGE_OK);
3016349Sqs148142 }
3026349Sqs148142 
3036349Sqs148142 hxge_status_t
hxge_rxdma_channel_rcrflush(p_hxge_t hxgep,uint8_t channel)3046349Sqs148142 hxge_rxdma_channel_rcrflush(p_hxge_t hxgep, uint8_t channel)
3056349Sqs148142 {
3066349Sqs148142 	hpi_handle_t	handle;
3076349Sqs148142 	hxge_status_t	status = HXGE_OK;
3086349Sqs148142 
3096349Sqs148142 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
3106349Sqs148142 	    "==> hxge_rxdma_channel_rcrflush"));
3116349Sqs148142 
3126349Sqs148142 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
3136349Sqs148142 	hpi_rxdma_rdc_rcr_flush(handle, channel);
3146349Sqs148142 
3156349Sqs148142 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
3166349Sqs148142 	    "<== hxge_rxdma_channel_rcrflush"));
3176349Sqs148142 	return (status);
3186349Sqs148142 
3196349Sqs148142 }
3206349Sqs148142 
3216349Sqs148142 #define	MID_INDEX(l, r) ((r + l + 1) >> 1)
3226349Sqs148142 
3236349Sqs148142 #define	TO_LEFT -1
3246349Sqs148142 #define	TO_RIGHT 1
3256349Sqs148142 #define	BOTH_RIGHT (TO_RIGHT + TO_RIGHT)
3266349Sqs148142 #define	BOTH_LEFT (TO_LEFT + TO_LEFT)
3276349Sqs148142 #define	IN_MIDDLE (TO_RIGHT + TO_LEFT)
3286349Sqs148142 #define	NO_HINT 0xffffffff
3296349Sqs148142 
3306349Sqs148142 /*ARGSUSED*/
3316349Sqs148142 hxge_status_t
hxge_rxbuf_pp_to_vp(p_hxge_t hxgep,p_rx_rbr_ring_t rbr_p,uint8_t pktbufsz_type,uint64_t * pkt_buf_addr_pp,uint64_t ** pkt_buf_addr_p,uint32_t * bufoffset,uint32_t * msg_index)3326349Sqs148142 hxge_rxbuf_pp_to_vp(p_hxge_t hxgep, p_rx_rbr_ring_t rbr_p,
3336349Sqs148142     uint8_t pktbufsz_type, uint64_t *pkt_buf_addr_pp,
3346349Sqs148142     uint64_t **pkt_buf_addr_p, uint32_t *bufoffset, uint32_t *msg_index)
3356349Sqs148142 {
3366349Sqs148142 	int			bufsize;
3376349Sqs148142 	uint64_t		pktbuf_pp;
3386349Sqs148142 	uint64_t		dvma_addr;
3396349Sqs148142 	rxring_info_t		*ring_info;
3406349Sqs148142 	int			base_side, end_side;
3416349Sqs148142 	int			r_index, l_index, anchor_index;
3426349Sqs148142 	int			found, search_done;
3436349Sqs148142 	uint32_t		offset, chunk_size, block_size, page_size_mask;
3446349Sqs148142 	uint32_t		chunk_index, block_index, total_index;
3456349Sqs148142 	int			max_iterations, iteration;
3466349Sqs148142 	rxbuf_index_info_t	*bufinfo;
3476349Sqs148142 
3486349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_rxbuf_pp_to_vp"));
3496349Sqs148142 
3506349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
3516349Sqs148142 	    "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d",
3526349Sqs148142 	    pkt_buf_addr_pp, pktbufsz_type));
3536349Sqs148142 
3546864Sqs148142 #if defined(__i386)
3556864Sqs148142 	pktbuf_pp = (uint64_t)(uint32_t)pkt_buf_addr_pp;
3566864Sqs148142 #else
3576349Sqs148142 	pktbuf_pp = (uint64_t)pkt_buf_addr_pp;
3586864Sqs148142 #endif
3596349Sqs148142 
3606349Sqs148142 	switch (pktbufsz_type) {
3616349Sqs148142 	case 0:
3626349Sqs148142 		bufsize = rbr_p->pkt_buf_size0;
3636349Sqs148142 		break;
3646349Sqs148142 	case 1:
3656349Sqs148142 		bufsize = rbr_p->pkt_buf_size1;
3666349Sqs148142 		break;
3676349Sqs148142 	case 2:
3686349Sqs148142 		bufsize = rbr_p->pkt_buf_size2;
3696349Sqs148142 		break;
3706349Sqs148142 	case RCR_SINGLE_BLOCK:
3716349Sqs148142 		bufsize = 0;
3726349Sqs148142 		anchor_index = 0;
3736349Sqs148142 		break;
3746349Sqs148142 	default:
3756349Sqs148142 		return (HXGE_ERROR);
3766349Sqs148142 	}
3776349Sqs148142 
3786349Sqs148142 	if (rbr_p->num_blocks == 1) {
3796349Sqs148142 		anchor_index = 0;
3806349Sqs148142 		ring_info = rbr_p->ring_info;
3816349Sqs148142 		bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
3826349Sqs148142 
3836349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
3846349Sqs148142 		    "==> hxge_rxbuf_pp_to_vp: (found, 1 block) "
3856349Sqs148142 		    "buf_pp $%p btype %d anchor_index %d bufinfo $%p",
3866349Sqs148142 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index, bufinfo));
3876349Sqs148142 
3886349Sqs148142 		goto found_index;
3896349Sqs148142 	}
3906349Sqs148142 
3916349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
3926349Sqs148142 	    "==> hxge_rxbuf_pp_to_vp: buf_pp $%p btype %d anchor_index %d",
3936349Sqs148142 	    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
3946349Sqs148142 
3956349Sqs148142 	ring_info = rbr_p->ring_info;
3966349Sqs148142 	found = B_FALSE;
3976349Sqs148142 	bufinfo = (rxbuf_index_info_t *)ring_info->buffer;
3986349Sqs148142 	iteration = 0;
3996349Sqs148142 	max_iterations = ring_info->max_iterations;
4006349Sqs148142 
4016349Sqs148142 	/*
4026349Sqs148142 	 * First check if this block have been seen recently. This is indicated
4036349Sqs148142 	 * by a hint which is initialized when the first buffer of the block is
4046349Sqs148142 	 * seen. The hint is reset when the last buffer of the block has been
4056349Sqs148142 	 * processed. As three block sizes are supported, three hints are kept.
4066349Sqs148142 	 * The idea behind the hints is that once the hardware  uses a block
4076349Sqs148142 	 * for a buffer  of that size, it will use it exclusively for that size
4086349Sqs148142 	 * and will use it until it is exhausted. It is assumed that there
4096349Sqs148142 	 * would a single block being used for the same buffer sizes at any
4106349Sqs148142 	 * given time.
4116349Sqs148142 	 */
4126349Sqs148142 	if (ring_info->hint[pktbufsz_type] != NO_HINT) {
4136349Sqs148142 		anchor_index = ring_info->hint[pktbufsz_type];
4146349Sqs148142 		dvma_addr = bufinfo[anchor_index].dvma_addr;
4156349Sqs148142 		chunk_size = bufinfo[anchor_index].buf_size;
4166349Sqs148142 		if ((pktbuf_pp >= dvma_addr) &&
4176349Sqs148142 		    (pktbuf_pp < (dvma_addr + chunk_size))) {
4186349Sqs148142 			found = B_TRUE;
4196349Sqs148142 			/*
4206349Sqs148142 			 * check if this is the last buffer in the block If so,
4216349Sqs148142 			 * then reset the hint for the size;
4226349Sqs148142 			 */
4236349Sqs148142 
4246349Sqs148142 			if ((pktbuf_pp + bufsize) >= (dvma_addr + chunk_size))
4256349Sqs148142 				ring_info->hint[pktbufsz_type] = NO_HINT;
4266349Sqs148142 		}
4276349Sqs148142 	}
4286349Sqs148142 
4296349Sqs148142 	if (found == B_FALSE) {
4306349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
4316349Sqs148142 		    "==> hxge_rxbuf_pp_to_vp: (!found)"
4326349Sqs148142 		    "buf_pp $%p btype %d anchor_index %d",
4336349Sqs148142 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
4346349Sqs148142 
4356349Sqs148142 		/*
4366349Sqs148142 		 * This is the first buffer of the block of this size. Need to
4376349Sqs148142 		 * search the whole information array. the search algorithm
4386349Sqs148142 		 * uses a binary tree search algorithm. It assumes that the
4396349Sqs148142 		 * information is already sorted with increasing order info[0]
4406349Sqs148142 		 * < info[1] < info[2]  .... < info[n-1] where n is the size of
4416349Sqs148142 		 * the information array
4426349Sqs148142 		 */
4436349Sqs148142 		r_index = rbr_p->num_blocks - 1;
4446349Sqs148142 		l_index = 0;
4456349Sqs148142 		search_done = B_FALSE;
4466349Sqs148142 		anchor_index = MID_INDEX(r_index, l_index);
4476349Sqs148142 		while (search_done == B_FALSE) {
4486349Sqs148142 			if ((r_index == l_index) ||
4496349Sqs148142 			    (iteration >= max_iterations))
4506349Sqs148142 				search_done = B_TRUE;
4516349Sqs148142 
4526349Sqs148142 			end_side = TO_RIGHT;	/* to the right */
4536349Sqs148142 			base_side = TO_LEFT;	/* to the left */
4546349Sqs148142 			/* read the DVMA address information and sort it */
4556349Sqs148142 			dvma_addr = bufinfo[anchor_index].dvma_addr;
4566349Sqs148142 			chunk_size = bufinfo[anchor_index].buf_size;
4576349Sqs148142 
4586349Sqs148142 			HXGE_DEBUG_MSG((hxgep, RX2_CTL,
4596349Sqs148142 			    "==> hxge_rxbuf_pp_to_vp: (searching)"
4606349Sqs148142 			    "buf_pp $%p btype %d "
4616349Sqs148142 			    "anchor_index %d chunk_size %d dvmaaddr $%p",
4626349Sqs148142 			    pkt_buf_addr_pp, pktbufsz_type, anchor_index,
4636349Sqs148142 			    chunk_size, dvma_addr));
4646349Sqs148142 
4656349Sqs148142 			if (pktbuf_pp >= dvma_addr)
4666349Sqs148142 				base_side = TO_RIGHT;	/* to the right */
4676349Sqs148142 			if (pktbuf_pp < (dvma_addr + chunk_size))
4686349Sqs148142 				end_side = TO_LEFT;	/* to the left */
4696349Sqs148142 
4706349Sqs148142 			switch (base_side + end_side) {
4716349Sqs148142 			case IN_MIDDLE:
4726349Sqs148142 				/* found */
4736349Sqs148142 				found = B_TRUE;
4746349Sqs148142 				search_done = B_TRUE;
4756349Sqs148142 				if ((pktbuf_pp + bufsize) <
4766349Sqs148142 				    (dvma_addr + chunk_size))
4776349Sqs148142 					ring_info->hint[pktbufsz_type] =
4786349Sqs148142 					    bufinfo[anchor_index].buf_index;
4796349Sqs148142 				break;
4806349Sqs148142 			case BOTH_RIGHT:
4816349Sqs148142 				/* not found: go to the right */
4826349Sqs148142 				l_index = anchor_index + 1;
4836349Sqs148142 				anchor_index = MID_INDEX(r_index, l_index);
4846349Sqs148142 				break;
4856349Sqs148142 
4866349Sqs148142 			case BOTH_LEFT:
4876349Sqs148142 				/* not found: go to the left */
4886349Sqs148142 				r_index = anchor_index - 1;
4896349Sqs148142 				anchor_index = MID_INDEX(r_index, l_index);
4906349Sqs148142 				break;
4916349Sqs148142 			default:	/* should not come here */
4926349Sqs148142 				return (HXGE_ERROR);
4936349Sqs148142 			}
4946349Sqs148142 			iteration++;
4956349Sqs148142 		}
4966349Sqs148142 
4976349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
4986349Sqs148142 		    "==> hxge_rxbuf_pp_to_vp: (search done)"
4996349Sqs148142 		    "buf_pp $%p btype %d anchor_index %d",
5006349Sqs148142 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
5016349Sqs148142 	}
5026349Sqs148142 
5036349Sqs148142 	if (found == B_FALSE) {
5046349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
5056349Sqs148142 		    "==> hxge_rxbuf_pp_to_vp: (search failed)"
5066349Sqs148142 		    "buf_pp $%p btype %d anchor_index %d",
5076349Sqs148142 		    pkt_buf_addr_pp, pktbufsz_type, anchor_index));
5086349Sqs148142 		return (HXGE_ERROR);
5096349Sqs148142 	}
5106349Sqs148142 
5116349Sqs148142 found_index:
5126349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
5136349Sqs148142 	    "==> hxge_rxbuf_pp_to_vp: (FOUND1)"
5146349Sqs148142 	    "buf_pp $%p btype %d bufsize %d anchor_index %d",
5156349Sqs148142 	    pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index));
5166349Sqs148142 
5176349Sqs148142 	/* index of the first block in this chunk */
5186349Sqs148142 	chunk_index = bufinfo[anchor_index].start_index;
5196349Sqs148142 	dvma_addr = bufinfo[anchor_index].dvma_addr;
5206349Sqs148142 	page_size_mask = ring_info->block_size_mask;
5216349Sqs148142 
5226349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
5236349Sqs148142 	    "==> hxge_rxbuf_pp_to_vp: (FOUND3), get chunk)"
5246349Sqs148142 	    "buf_pp $%p btype %d bufsize %d "
5256349Sqs148142 	    "anchor_index %d chunk_index %d dvma $%p",
5266349Sqs148142 	    pkt_buf_addr_pp, pktbufsz_type, bufsize,
5276349Sqs148142 	    anchor_index, chunk_index, dvma_addr));
5286349Sqs148142 
5296349Sqs148142 	offset = pktbuf_pp - dvma_addr;	/* offset within the chunk */
5306349Sqs148142 	block_size = rbr_p->block_size;	/* System  block(page) size */
5316349Sqs148142 
5326349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
5336349Sqs148142 	    "==> hxge_rxbuf_pp_to_vp: (FOUND4), get chunk)"
5346349Sqs148142 	    "buf_pp $%p btype %d bufsize %d "
5356349Sqs148142 	    "anchor_index %d chunk_index %d dvma $%p "
5366349Sqs148142 	    "offset %d block_size %d",
5376349Sqs148142 	    pkt_buf_addr_pp, pktbufsz_type, bufsize, anchor_index,
5386349Sqs148142 	    chunk_index, dvma_addr, offset, block_size));
5396349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> getting total index"));
5406349Sqs148142 
5416349Sqs148142 	block_index = (offset / block_size);	/* index within chunk */
5426349Sqs148142 	total_index = chunk_index + block_index;
5436349Sqs148142 
5446349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
5456349Sqs148142 	    "==> hxge_rxbuf_pp_to_vp: "
5466349Sqs148142 	    "total_index %d dvma_addr $%p "
5476349Sqs148142 	    "offset %d block_size %d "
5486349Sqs148142 	    "block_index %d ",
5496349Sqs148142 	    total_index, dvma_addr, offset, block_size, block_index));
5506349Sqs148142 
5516864Sqs148142 #if defined(__i386)
5526864Sqs148142 	*pkt_buf_addr_p = (uint64_t *)((uint32_t)bufinfo[anchor_index].kaddr +
5536864Sqs148142 	    (uint32_t)offset);
5546864Sqs148142 #else
5556349Sqs148142 	*pkt_buf_addr_p = (uint64_t *)((uint64_t)bufinfo[anchor_index].kaddr +
5566349Sqs148142 	    offset);
5576864Sqs148142 #endif
5586349Sqs148142 
5596349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
5606349Sqs148142 	    "==> hxge_rxbuf_pp_to_vp: "
5616349Sqs148142 	    "total_index %d dvma_addr $%p "
5626349Sqs148142 	    "offset %d block_size %d "
5636349Sqs148142 	    "block_index %d "
5646349Sqs148142 	    "*pkt_buf_addr_p $%p",
5656349Sqs148142 	    total_index, dvma_addr, offset, block_size,
5666349Sqs148142 	    block_index, *pkt_buf_addr_p));
5676349Sqs148142 
5686349Sqs148142 	*msg_index = total_index;
5696349Sqs148142 	*bufoffset = (offset & page_size_mask);
5706349Sqs148142 
5716349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
5726349Sqs148142 	    "==> hxge_rxbuf_pp_to_vp: get msg index: "
5736349Sqs148142 	    "msg_index %d bufoffset_index %d",
5746349Sqs148142 	    *msg_index, *bufoffset));
5756349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "<== hxge_rxbuf_pp_to_vp"));
5766349Sqs148142 
5776349Sqs148142 	return (HXGE_OK);
5786349Sqs148142 }
5796349Sqs148142 
5806349Sqs148142 
5816349Sqs148142 /*
5826349Sqs148142  * used by quick sort (qsort) function
5836349Sqs148142  * to perform comparison
5846349Sqs148142  */
5856349Sqs148142 static int
hxge_sort_compare(const void * p1,const void * p2)5866349Sqs148142 hxge_sort_compare(const void *p1, const void *p2)
5876349Sqs148142 {
5886349Sqs148142 
5896349Sqs148142 	rxbuf_index_info_t *a, *b;
5906349Sqs148142 
5916349Sqs148142 	a = (rxbuf_index_info_t *)p1;
5926349Sqs148142 	b = (rxbuf_index_info_t *)p2;
5936349Sqs148142 
5946349Sqs148142 	if (a->dvma_addr > b->dvma_addr)
5956349Sqs148142 		return (1);
5966349Sqs148142 	if (a->dvma_addr < b->dvma_addr)
5976349Sqs148142 		return (-1);
5986349Sqs148142 	return (0);
5996349Sqs148142 }
6006349Sqs148142 
6016349Sqs148142 /*
6026349Sqs148142  * Grabbed this sort implementation from common/syscall/avl.c
6036349Sqs148142  *
6046349Sqs148142  * Generic shellsort, from K&R (1st ed, p 58.), somewhat modified.
6056349Sqs148142  * v = Ptr to array/vector of objs
6066349Sqs148142  * n = # objs in the array
6076349Sqs148142  * s = size of each obj (must be multiples of a word size)
6086349Sqs148142  * f = ptr to function to compare two objs
6096349Sqs148142  *	returns (-1 = less than, 0 = equal, 1 = greater than
6106349Sqs148142  */
6116349Sqs148142 void
hxge_ksort(caddr_t v,int n,int s,int (* f)())6126349Sqs148142 hxge_ksort(caddr_t v, int n, int s, int (*f) ())
6136349Sqs148142 {
6146349Sqs148142 	int		g, i, j, ii;
6156349Sqs148142 	unsigned int	*p1, *p2;
6166349Sqs148142 	unsigned int	tmp;
6176349Sqs148142 
6186349Sqs148142 	/* No work to do */
6196349Sqs148142 	if (v == NULL || n <= 1)
6206349Sqs148142 		return;
6216349Sqs148142 	/* Sanity check on arguments */
6226349Sqs148142 	ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0);
6236349Sqs148142 	ASSERT(s > 0);
6246349Sqs148142 
6256349Sqs148142 	for (g = n / 2; g > 0; g /= 2) {
6266349Sqs148142 		for (i = g; i < n; i++) {
6276349Sqs148142 			for (j = i - g; j >= 0 &&
6286349Sqs148142 			    (*f) (v + j * s, v + (j + g) * s) == 1; j -= g) {
6296349Sqs148142 				p1 = (unsigned *)(v + j * s);
6306349Sqs148142 				p2 = (unsigned *)(v + (j + g) * s);
6316349Sqs148142 				for (ii = 0; ii < s / 4; ii++) {
6326349Sqs148142 					tmp = *p1;
6336349Sqs148142 					*p1++ = *p2;
6346349Sqs148142 					*p2++ = tmp;
6356349Sqs148142 				}
6366349Sqs148142 			}
6376349Sqs148142 		}
6386349Sqs148142 	}
6396349Sqs148142 }
6406349Sqs148142 
6416349Sqs148142 /*
6426349Sqs148142  * Initialize data structures required for rxdma
6436349Sqs148142  * buffer dvma->vmem address lookup
6446349Sqs148142  */
6456349Sqs148142 /*ARGSUSED*/
6466349Sqs148142 static hxge_status_t
hxge_rxbuf_index_info_init(p_hxge_t hxgep,p_rx_rbr_ring_t rbrp)6476349Sqs148142 hxge_rxbuf_index_info_init(p_hxge_t hxgep, p_rx_rbr_ring_t rbrp)
6486349Sqs148142 {
6496349Sqs148142 	int		index;
6506349Sqs148142 	rxring_info_t	*ring_info;
6516349Sqs148142 	int		max_iteration = 0, max_index = 0;
6526349Sqs148142 
6536349Sqs148142 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_rxbuf_index_info_init"));
6546349Sqs148142 
6556349Sqs148142 	ring_info = rbrp->ring_info;
6566349Sqs148142 	ring_info->hint[0] = NO_HINT;
6576349Sqs148142 	ring_info->hint[1] = NO_HINT;
6586349Sqs148142 	ring_info->hint[2] = NO_HINT;
65911257SMichael.Speer@Sun.COM 	ring_info->hint[3] = NO_HINT;
6606349Sqs148142 	max_index = rbrp->num_blocks;
6616349Sqs148142 
6626349Sqs148142 	/* read the DVMA address information and sort it */
6636349Sqs148142 	/* do init of the information array */
6646349Sqs148142 
6656349Sqs148142 	HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
6666349Sqs148142 	    " hxge_rxbuf_index_info_init Sort ptrs"));
6676349Sqs148142 
6686349Sqs148142 	/* sort the array */
6696349Sqs148142 	hxge_ksort((void *) ring_info->buffer, max_index,
6706349Sqs148142 	    sizeof (rxbuf_index_info_t), hxge_sort_compare);
6716349Sqs148142 
6726349Sqs148142 	for (index = 0; index < max_index; index++) {
6736349Sqs148142 		HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
6746349Sqs148142 		    " hxge_rxbuf_index_info_init: sorted chunk %d "
6756349Sqs148142 		    " ioaddr $%p kaddr $%p size %x",
6766349Sqs148142 		    index, ring_info->buffer[index].dvma_addr,
6776349Sqs148142 		    ring_info->buffer[index].kaddr,
6786349Sqs148142 		    ring_info->buffer[index].buf_size));
6796349Sqs148142 	}
6806349Sqs148142 
6816349Sqs148142 	max_iteration = 0;
6826349Sqs148142 	while (max_index >= (1ULL << max_iteration))
6836349Sqs148142 		max_iteration++;
6846349Sqs148142 	ring_info->max_iterations = max_iteration + 1;
6856349Sqs148142 
6866349Sqs148142 	HXGE_DEBUG_MSG((hxgep, DMA2_CTL,
6876349Sqs148142 	    " hxge_rxbuf_index_info_init Find max iter %d",
6886349Sqs148142 	    ring_info->max_iterations));
6896349Sqs148142 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_rxbuf_index_info_init"));
6906349Sqs148142 
6916349Sqs148142 	return (HXGE_OK);
6926349Sqs148142 }
6936349Sqs148142 
6946349Sqs148142 /*ARGSUSED*/
6956349Sqs148142 void
hxge_dump_rcr_entry(p_hxge_t hxgep,p_rcr_entry_t entry_p)6966349Sqs148142 hxge_dump_rcr_entry(p_hxge_t hxgep, p_rcr_entry_t entry_p)
6976349Sqs148142 {
6986349Sqs148142 #ifdef	HXGE_DEBUG
6996349Sqs148142 
7006349Sqs148142 	uint32_t bptr;
7016349Sqs148142 	uint64_t pp;
7026349Sqs148142 
7036349Sqs148142 	bptr = entry_p->bits.pkt_buf_addr;
7046349Sqs148142 
7056349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
7066349Sqs148142 	    "\trcr entry $%p "
7076349Sqs148142 	    "\trcr entry 0x%0llx "
7086349Sqs148142 	    "\trcr entry 0x%08x "
7096349Sqs148142 	    "\trcr entry 0x%08x "
7106349Sqs148142 	    "\tvalue 0x%0llx\n"
7116349Sqs148142 	    "\tmulti = %d\n"
7126349Sqs148142 	    "\tpkt_type = 0x%x\n"
7136349Sqs148142 	    "\terror = 0x%04x\n"
7146349Sqs148142 	    "\tl2_len = %d\n"
7156349Sqs148142 	    "\tpktbufsize = %d\n"
7166349Sqs148142 	    "\tpkt_buf_addr = $%p\n"
7176349Sqs148142 	    "\tpkt_buf_addr (<< 6) = $%p\n",
7186349Sqs148142 	    entry_p,
7196349Sqs148142 	    *(int64_t *)entry_p,
7206349Sqs148142 	    *(int32_t *)entry_p,
7216349Sqs148142 	    *(int32_t *)((char *)entry_p + 32),
7226349Sqs148142 	    entry_p->value,
7236349Sqs148142 	    entry_p->bits.multi,
7246349Sqs148142 	    entry_p->bits.pkt_type,
7256349Sqs148142 	    entry_p->bits.error,
7266349Sqs148142 	    entry_p->bits.l2_len,
7276349Sqs148142 	    entry_p->bits.pktbufsz,
7286349Sqs148142 	    bptr,
7296864Sqs148142 	    entry_p->bits.pkt_buf_addr_l));
7306349Sqs148142 
7316349Sqs148142 	pp = (entry_p->value & RCR_PKT_BUF_ADDR_MASK) <<
7326349Sqs148142 	    RCR_PKT_BUF_ADDR_SHIFT;
7336349Sqs148142 
7346349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "rcr pp 0x%llx l2 len %d",
7356349Sqs148142 	    pp, (*(int64_t *)entry_p >> 40) & 0x3fff));
7366349Sqs148142 #endif
7376349Sqs148142 }
7386349Sqs148142 
7396349Sqs148142 /*ARGSUSED*/
7406349Sqs148142 void
hxge_rxdma_stop(p_hxge_t hxgep)7416349Sqs148142 hxge_rxdma_stop(p_hxge_t hxgep)
7426349Sqs148142 {
7436349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop"));
7446349Sqs148142 
74510091SMichael.Speer@Sun.COM 	MUTEX_ENTER(&hxgep->vmac_lock);
7466349Sqs148142 	(void) hxge_rx_vmac_disable(hxgep);
7476349Sqs148142 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
74810091SMichael.Speer@Sun.COM 	MUTEX_EXIT(&hxgep->vmac_lock);
7496349Sqs148142 
7506349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop"));
7516349Sqs148142 }
7526349Sqs148142 
7536349Sqs148142 void
hxge_rxdma_stop_reinit(p_hxge_t hxgep)7546349Sqs148142 hxge_rxdma_stop_reinit(p_hxge_t hxgep)
7556349Sqs148142 {
7566349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_reinit"));
7576349Sqs148142 
7586349Sqs148142 	(void) hxge_rxdma_stop(hxgep);
7596349Sqs148142 	(void) hxge_uninit_rxdma_channels(hxgep);
7606349Sqs148142 	(void) hxge_init_rxdma_channels(hxgep);
7616349Sqs148142 
76210091SMichael.Speer@Sun.COM 	MUTEX_ENTER(&hxgep->vmac_lock);
7636349Sqs148142 	(void) hxge_rx_vmac_enable(hxgep);
76410091SMichael.Speer@Sun.COM 	MUTEX_EXIT(&hxgep->vmac_lock);
7656349Sqs148142 
7666349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_reinit"));
7676349Sqs148142 }
7686349Sqs148142 
7696349Sqs148142 hxge_status_t
hxge_rxdma_hw_mode(p_hxge_t hxgep,boolean_t enable)7706349Sqs148142 hxge_rxdma_hw_mode(p_hxge_t hxgep, boolean_t enable)
7716349Sqs148142 {
7726349Sqs148142 	int			i, ndmas;
7736349Sqs148142 	uint16_t		channel;
7746349Sqs148142 	p_rx_rbr_rings_t	rx_rbr_rings;
7756349Sqs148142 	p_rx_rbr_ring_t		*rbr_rings;
7766349Sqs148142 	hpi_handle_t		handle;
7776349Sqs148142 	hpi_status_t		rs = HPI_SUCCESS;
7786349Sqs148142 	hxge_status_t		status = HXGE_OK;
7796349Sqs148142 
7806349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
7816349Sqs148142 	    "==> hxge_rxdma_hw_mode: mode %d", enable));
7826349Sqs148142 
7836349Sqs148142 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
7846349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
7856349Sqs148142 		    "<== hxge_rxdma_mode: not initialized"));
7866349Sqs148142 		return (HXGE_ERROR);
7876349Sqs148142 	}
7886349Sqs148142 
7896349Sqs148142 	rx_rbr_rings = hxgep->rx_rbr_rings;
7906349Sqs148142 	if (rx_rbr_rings == NULL) {
7916349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
7926349Sqs148142 		    "<== hxge_rxdma_mode: NULL ring pointer"));
7936349Sqs148142 		return (HXGE_ERROR);
7946349Sqs148142 	}
7956349Sqs148142 
7966349Sqs148142 	if (rx_rbr_rings->rbr_rings == NULL) {
7976349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
7986349Sqs148142 		    "<== hxge_rxdma_mode: NULL rbr rings pointer"));
7996349Sqs148142 		return (HXGE_ERROR);
8006349Sqs148142 	}
8016349Sqs148142 
8026349Sqs148142 	ndmas = rx_rbr_rings->ndmas;
8036349Sqs148142 	if (!ndmas) {
8046349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
8056349Sqs148142 		    "<== hxge_rxdma_mode: no channel"));
8066349Sqs148142 		return (HXGE_ERROR);
8076349Sqs148142 	}
8086349Sqs148142 
8096349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
8106349Sqs148142 	    "==> hxge_rxdma_mode (ndmas %d)", ndmas));
8116349Sqs148142 
8126349Sqs148142 	rbr_rings = rx_rbr_rings->rbr_rings;
8136349Sqs148142 
8146349Sqs148142 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
8156349Sqs148142 
8166349Sqs148142 	for (i = 0; i < ndmas; i++) {
8176349Sqs148142 		if (rbr_rings == NULL || rbr_rings[i] == NULL) {
8186349Sqs148142 			continue;
8196349Sqs148142 		}
8206349Sqs148142 		channel = rbr_rings[i]->rdc;
8216349Sqs148142 		if (enable) {
8226349Sqs148142 			HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
8236349Sqs148142 			    "==> hxge_rxdma_hw_mode: channel %d (enable)",
8246349Sqs148142 			    channel));
8256349Sqs148142 			rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
8266349Sqs148142 		} else {
8276349Sqs148142 			HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
8286349Sqs148142 			    "==> hxge_rxdma_hw_mode: channel %d (disable)",
8296349Sqs148142 			    channel));
8306349Sqs148142 			rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
8316349Sqs148142 		}
8326349Sqs148142 	}
8336349Sqs148142 
8346349Sqs148142 	status = ((rs == HPI_SUCCESS) ? HXGE_OK : HXGE_ERROR | rs);
8356349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
8366349Sqs148142 	    "<== hxge_rxdma_hw_mode: status 0x%x", status));
8376349Sqs148142 
8386349Sqs148142 	return (status);
8396349Sqs148142 }
8406349Sqs148142 
8416349Sqs148142 /*
8426349Sqs148142  * Static functions start here.
8436349Sqs148142  */
8446349Sqs148142 static p_rx_msg_t
hxge_allocb(size_t size,uint32_t pri,p_hxge_dma_common_t dmabuf_p)8456349Sqs148142 hxge_allocb(size_t size, uint32_t pri, p_hxge_dma_common_t dmabuf_p)
8466349Sqs148142 {
8476349Sqs148142 	p_rx_msg_t		hxge_mp = NULL;
8486349Sqs148142 	p_hxge_dma_common_t	dmamsg_p;
8496349Sqs148142 	uchar_t			*buffer;
8506349Sqs148142 
8516349Sqs148142 	hxge_mp = KMEM_ZALLOC(sizeof (rx_msg_t), KM_NOSLEEP);
8526349Sqs148142 	if (hxge_mp == NULL) {
8536349Sqs148142 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
8546349Sqs148142 		    "Allocation of a rx msg failed."));
8556349Sqs148142 		goto hxge_allocb_exit;
8566349Sqs148142 	}
8576349Sqs148142 
8586349Sqs148142 	hxge_mp->use_buf_pool = B_FALSE;
8596349Sqs148142 	if (dmabuf_p) {
8606349Sqs148142 		hxge_mp->use_buf_pool = B_TRUE;
8616349Sqs148142 
8626349Sqs148142 		dmamsg_p = (p_hxge_dma_common_t)&hxge_mp->buf_dma;
8636349Sqs148142 		*dmamsg_p = *dmabuf_p;
8646349Sqs148142 		dmamsg_p->nblocks = 1;
8656349Sqs148142 		dmamsg_p->block_size = size;
8666349Sqs148142 		dmamsg_p->alength = size;
8676349Sqs148142 		buffer = (uchar_t *)dmabuf_p->kaddrp;
8686349Sqs148142 
8696349Sqs148142 		dmabuf_p->kaddrp = (void *)((char *)dmabuf_p->kaddrp + size);
8706349Sqs148142 		dmabuf_p->ioaddr_pp = (void *)
8716349Sqs148142 		    ((char *)dmabuf_p->ioaddr_pp + size);
8726349Sqs148142 
8736349Sqs148142 		dmabuf_p->alength -= size;
8746349Sqs148142 		dmabuf_p->offset += size;
8756349Sqs148142 		dmabuf_p->dma_cookie.dmac_laddress += size;
8766349Sqs148142 		dmabuf_p->dma_cookie.dmac_size -= size;
8776349Sqs148142 	} else {
8786349Sqs148142 		buffer = KMEM_ALLOC(size, KM_NOSLEEP);
8796349Sqs148142 		if (buffer == NULL) {
8806349Sqs148142 			HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
8816349Sqs148142 			    "Allocation of a receive page failed."));
8826349Sqs148142 			goto hxge_allocb_fail1;
8836349Sqs148142 		}
8846349Sqs148142 	}
8856349Sqs148142 
8866349Sqs148142 	hxge_mp->rx_mblk_p = desballoc(buffer, size, pri, &hxge_mp->freeb);
8876349Sqs148142 	if (hxge_mp->rx_mblk_p == NULL) {
8886349Sqs148142 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "desballoc failed."));
8896349Sqs148142 		goto hxge_allocb_fail2;
8906349Sqs148142 	}
8916349Sqs148142 	hxge_mp->buffer = buffer;
8926349Sqs148142 	hxge_mp->block_size = size;
8936349Sqs148142 	hxge_mp->freeb.free_func = (void (*) ()) hxge_freeb;
8946349Sqs148142 	hxge_mp->freeb.free_arg = (caddr_t)hxge_mp;
8956349Sqs148142 	hxge_mp->ref_cnt = 1;
8966349Sqs148142 	hxge_mp->free = B_TRUE;
8976349Sqs148142 	hxge_mp->rx_use_bcopy = B_FALSE;
8986349Sqs148142 
8997465SMichael.Speer@Sun.COM 	atomic_inc_32(&hxge_mblks_pending);
9006349Sqs148142 
9016349Sqs148142 	goto hxge_allocb_exit;
9026349Sqs148142 
9036349Sqs148142 hxge_allocb_fail2:
9046349Sqs148142 	if (!hxge_mp->use_buf_pool) {
9056349Sqs148142 		KMEM_FREE(buffer, size);
9066349Sqs148142 	}
9076349Sqs148142 hxge_allocb_fail1:
9086349Sqs148142 	KMEM_FREE(hxge_mp, sizeof (rx_msg_t));
9096349Sqs148142 	hxge_mp = NULL;
9106349Sqs148142 
9116349Sqs148142 hxge_allocb_exit:
9126349Sqs148142 	return (hxge_mp);
9136349Sqs148142 }
9146349Sqs148142 
9156349Sqs148142 p_mblk_t
hxge_dupb(p_rx_msg_t hxge_mp,uint_t offset,size_t size)9166349Sqs148142 hxge_dupb(p_rx_msg_t hxge_mp, uint_t offset, size_t size)
9176349Sqs148142 {
9186349Sqs148142 	p_mblk_t mp;
9196349Sqs148142 
9206349Sqs148142 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "==> hxge_dupb"));
9216349Sqs148142 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "hxge_mp = $%p "
9226349Sqs148142 	    "offset = 0x%08X " "size = 0x%08X", hxge_mp, offset, size));
9236349Sqs148142 
9246349Sqs148142 	mp = desballoc(&hxge_mp->buffer[offset], size, 0, &hxge_mp->freeb);
9256349Sqs148142 	if (mp == NULL) {
9266349Sqs148142 		HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
9276349Sqs148142 		goto hxge_dupb_exit;
9286349Sqs148142 	}
9296349Sqs148142 
9306349Sqs148142 	atomic_inc_32(&hxge_mp->ref_cnt);
9316349Sqs148142 
9326349Sqs148142 hxge_dupb_exit:
9336349Sqs148142 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp));
9346349Sqs148142 	return (mp);
9356349Sqs148142 }
9366349Sqs148142 
9376349Sqs148142 p_mblk_t
hxge_dupb_bcopy(p_rx_msg_t hxge_mp,uint_t offset,size_t size)9386349Sqs148142 hxge_dupb_bcopy(p_rx_msg_t hxge_mp, uint_t offset, size_t size)
9396349Sqs148142 {
9406349Sqs148142 	p_mblk_t	mp;
9416349Sqs148142 	uchar_t		*dp;
9426349Sqs148142 
9436349Sqs148142 	mp = allocb(size + HXGE_RXBUF_EXTRA, 0);
9446349Sqs148142 	if (mp == NULL) {
9456349Sqs148142 		HXGE_DEBUG_MSG((NULL, RX_CTL, "desballoc failed"));
9466349Sqs148142 		goto hxge_dupb_bcopy_exit;
9476349Sqs148142 	}
9486349Sqs148142 	dp = mp->b_rptr = mp->b_rptr + HXGE_RXBUF_EXTRA;
9496349Sqs148142 	bcopy((void *) &hxge_mp->buffer[offset], dp, size);
9506349Sqs148142 	mp->b_wptr = dp + size;
9516349Sqs148142 
9526349Sqs148142 hxge_dupb_bcopy_exit:
9536349Sqs148142 
9546349Sqs148142 	HXGE_DEBUG_MSG((NULL, MEM_CTL, "<== hxge_dupb mp = $%p", hxge_mp));
9556349Sqs148142 
9566349Sqs148142 	return (mp);
9576349Sqs148142 }
9586349Sqs148142 
9596349Sqs148142 void hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p,
9606349Sqs148142     p_rx_msg_t rx_msg_p);
9616349Sqs148142 
9626349Sqs148142 void
hxge_post_page(p_hxge_t hxgep,p_rx_rbr_ring_t rx_rbr_p,p_rx_msg_t rx_msg_p)9636349Sqs148142 hxge_post_page(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p, p_rx_msg_t rx_msg_p)
9646349Sqs148142 {
9656349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_post_page"));
9666349Sqs148142 
9676349Sqs148142 	/* Reuse this buffer */
9686349Sqs148142 	rx_msg_p->free = B_FALSE;
9696349Sqs148142 	rx_msg_p->cur_usage_cnt = 0;
9706349Sqs148142 	rx_msg_p->max_usage_cnt = 0;
9716349Sqs148142 	rx_msg_p->pkt_buf_size = 0;
9726349Sqs148142 
9736349Sqs148142 	if (rx_rbr_p->rbr_use_bcopy) {
9746349Sqs148142 		rx_msg_p->rx_use_bcopy = B_FALSE;
9756349Sqs148142 		atomic_dec_32(&rx_rbr_p->rbr_consumed);
9766349Sqs148142 	}
9778366SQiyan.Sun@Sun.COM 	atomic_dec_32(&rx_rbr_p->rbr_used);
9786349Sqs148142 
9796349Sqs148142 	/*
9806349Sqs148142 	 * Get the rbr header pointer and its offset index.
9816349Sqs148142 	 */
9826349Sqs148142 	rx_rbr_p->rbr_wr_index = ((rx_rbr_p->rbr_wr_index + 1) &
9836349Sqs148142 	    rx_rbr_p->rbr_wrap_mask);
9846349Sqs148142 	rx_rbr_p->rbr_desc_vp[rx_rbr_p->rbr_wr_index] = rx_msg_p->shifted_addr;
9856349Sqs148142 
9868141SMichael.Speer@Sun.COM 	/*
9878141SMichael.Speer@Sun.COM 	 * Accumulate some buffers in the ring before re-enabling the
9888141SMichael.Speer@Sun.COM 	 * DMA channel, if rbr empty was signaled.
9898141SMichael.Speer@Sun.COM 	 */
9908236SQiyan.Sun@Sun.COM 	hpi_rxdma_rdc_rbr_kick(HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc, 1);
99111257SMichael.Speer@Sun.COM 	if (rx_rbr_p->rbr_is_empty && (rx_rbr_p->rbb_max -
99211257SMichael.Speer@Sun.COM 	    rx_rbr_p->rbr_used) >= HXGE_RBR_EMPTY_THRESHOLD) {
9938236SQiyan.Sun@Sun.COM 		hxge_rbr_empty_restore(hxgep, rx_rbr_p);
9948141SMichael.Speer@Sun.COM 	}
9957918SQiyan.Sun@Sun.COM 
9966349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
9976349Sqs148142 	    "<== hxge_post_page (channel %d post_next_index %d)",
9986349Sqs148142 	    rx_rbr_p->rdc, rx_rbr_p->rbr_wr_index));
9996349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_post_page"));
10006349Sqs148142 }
10016349Sqs148142 
10026349Sqs148142 void
hxge_freeb(p_rx_msg_t rx_msg_p)10036349Sqs148142 hxge_freeb(p_rx_msg_t rx_msg_p)
10046349Sqs148142 {
10056349Sqs148142 	size_t		size;
10066349Sqs148142 	uchar_t		*buffer = NULL;
10076349Sqs148142 	int		ref_cnt;
10086349Sqs148142 	boolean_t	free_state = B_FALSE;
10096349Sqs148142 	rx_rbr_ring_t	*ring = rx_msg_p->rx_rbr_p;
10106349Sqs148142 
10116349Sqs148142 	HXGE_DEBUG_MSG((NULL, MEM2_CTL, "==> hxge_freeb"));
10126349Sqs148142 	HXGE_DEBUG_MSG((NULL, MEM2_CTL,
10136349Sqs148142 	    "hxge_freeb:rx_msg_p = $%p (block pending %d)",
10146349Sqs148142 	    rx_msg_p, hxge_mblks_pending));
10156349Sqs148142 
10168103SQiyan.Sun@Sun.COM 	if (ring == NULL)
10178103SQiyan.Sun@Sun.COM 		return;
10188103SQiyan.Sun@Sun.COM 
10198103SQiyan.Sun@Sun.COM 	/*
10208103SQiyan.Sun@Sun.COM 	 * This is to prevent posting activities while we are recovering
10218103SQiyan.Sun@Sun.COM 	 * from fatal errors. This should not be a performance drag since
10228103SQiyan.Sun@Sun.COM 	 * ref_cnt != 0 most times.
10238103SQiyan.Sun@Sun.COM 	 */
10248177SQiyan.Sun@Sun.COM 	if (ring->rbr_state == RBR_POSTING)
10258177SQiyan.Sun@Sun.COM 		MUTEX_ENTER(&ring->post_lock);
10268103SQiyan.Sun@Sun.COM 
10276349Sqs148142 	/*
10286349Sqs148142 	 * First we need to get the free state, then
10296349Sqs148142 	 * atomic decrement the reference count to prevent
10306349Sqs148142 	 * the race condition with the interrupt thread that
10316349Sqs148142 	 * is processing a loaned up buffer block.
10326349Sqs148142 	 */
10336349Sqs148142 	free_state = rx_msg_p->free;
10346349Sqs148142 	ref_cnt = atomic_add_32_nv(&rx_msg_p->ref_cnt, -1);
10356349Sqs148142 	if (!ref_cnt) {
10367465SMichael.Speer@Sun.COM 		atomic_dec_32(&hxge_mblks_pending);
10377465SMichael.Speer@Sun.COM 
10386349Sqs148142 		buffer = rx_msg_p->buffer;
10396349Sqs148142 		size = rx_msg_p->block_size;
10406349Sqs148142 
10416349Sqs148142 		HXGE_DEBUG_MSG((NULL, MEM2_CTL, "hxge_freeb: "
10426349Sqs148142 		    "will free: rx_msg_p = $%p (block pending %d)",
10436349Sqs148142 		    rx_msg_p, hxge_mblks_pending));
10446349Sqs148142 
10456349Sqs148142 		if (!rx_msg_p->use_buf_pool) {
10466349Sqs148142 			KMEM_FREE(buffer, size);
10476349Sqs148142 		}
10486349Sqs148142 
10496349Sqs148142 		KMEM_FREE(rx_msg_p, sizeof (rx_msg_t));
10508103SQiyan.Sun@Sun.COM 		/*
10518103SQiyan.Sun@Sun.COM 		 * Decrement the receive buffer ring's reference
10528103SQiyan.Sun@Sun.COM 		 * count, too.
10538103SQiyan.Sun@Sun.COM 		 */
10548103SQiyan.Sun@Sun.COM 		atomic_dec_32(&ring->rbr_ref_cnt);
10558103SQiyan.Sun@Sun.COM 
10568103SQiyan.Sun@Sun.COM 		/*
10578103SQiyan.Sun@Sun.COM 		 * Free the receive buffer ring, iff
10588103SQiyan.Sun@Sun.COM 		 * 1. all the receive buffers have been freed
10598103SQiyan.Sun@Sun.COM 		 * 2. and we are in the proper state (that is,
10608103SQiyan.Sun@Sun.COM 		 *    we are not UNMAPPING).
10618103SQiyan.Sun@Sun.COM 		 */
10628103SQiyan.Sun@Sun.COM 		if (ring->rbr_ref_cnt == 0 &&
10638103SQiyan.Sun@Sun.COM 		    ring->rbr_state == RBR_UNMAPPED) {
10648103SQiyan.Sun@Sun.COM 			KMEM_FREE(ring, sizeof (*ring));
10658177SQiyan.Sun@Sun.COM 			/* post_lock has been destroyed already */
10668177SQiyan.Sun@Sun.COM 			return;
10676349Sqs148142 		}
10686349Sqs148142 	}
10696349Sqs148142 
10706349Sqs148142 	/*
10716349Sqs148142 	 * Repost buffer.
10726349Sqs148142 	 */
10738103SQiyan.Sun@Sun.COM 	if (free_state && (ref_cnt == 1)) {
10746349Sqs148142 		HXGE_DEBUG_MSG((NULL, RX_CTL,
10756349Sqs148142 		    "hxge_freeb: post page $%p:", rx_msg_p));
10766349Sqs148142 		if (ring->rbr_state == RBR_POSTING)
10776349Sqs148142 			hxge_post_page(rx_msg_p->hxgep, ring, rx_msg_p);
10786349Sqs148142 	}
10796349Sqs148142 
10808177SQiyan.Sun@Sun.COM 	if (ring->rbr_state == RBR_POSTING)
10818177SQiyan.Sun@Sun.COM 		MUTEX_EXIT(&ring->post_lock);
10828103SQiyan.Sun@Sun.COM 
10836349Sqs148142 	HXGE_DEBUG_MSG((NULL, MEM2_CTL, "<== hxge_freeb"));
10846349Sqs148142 }
10856349Sqs148142 
10866349Sqs148142 uint_t
hxge_rx_intr(caddr_t arg1,caddr_t arg2)10876349Sqs148142 hxge_rx_intr(caddr_t arg1, caddr_t arg2)
10886349Sqs148142 {
10898718SMichael.Speer@Sun.COM 	p_hxge_ring_handle_t	rhp;
10906349Sqs148142 	p_hxge_ldv_t		ldvp = (p_hxge_ldv_t)arg1;
10916349Sqs148142 	p_hxge_t		hxgep = (p_hxge_t)arg2;
10926349Sqs148142 	p_hxge_ldg_t		ldgp;
10936349Sqs148142 	uint8_t			channel;
10946349Sqs148142 	hpi_handle_t		handle;
10958544SQiyan.Sun@Sun.COM 	rdc_stat_t		cs;
10968718SMichael.Speer@Sun.COM 	p_rx_rcr_ring_t		ring;
109711257SMichael.Speer@Sun.COM 	p_rx_rbr_ring_t		rbrp;
10989808SMichael.Speer@Sun.COM 	mblk_t			*mp = NULL;
10996349Sqs148142 
11006349Sqs148142 	if (ldvp == NULL) {
11016349Sqs148142 		HXGE_DEBUG_MSG((NULL, RX_INT_CTL,
11026349Sqs148142 		    "<== hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
11037465SMichael.Speer@Sun.COM 		return (DDI_INTR_UNCLAIMED);
11046349Sqs148142 	}
11056349Sqs148142 
11066349Sqs148142 	if (arg2 == NULL || (void *) ldvp->hxgep != arg2) {
11076349Sqs148142 		hxgep = ldvp->hxgep;
11086349Sqs148142 	}
11096349Sqs148142 
11106349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
11116349Sqs148142 	    "==> hxge_rx_intr: arg2 $%p arg1 $%p", hxgep, ldvp));
11126349Sqs148142 
11136349Sqs148142 	/*
11146349Sqs148142 	 * This interrupt handler is for a specific receive dma channel.
11156349Sqs148142 	 */
11166349Sqs148142 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
11176349Sqs148142 
11186349Sqs148142 	/*
11196349Sqs148142 	 * Get the control and status for this channel.
11206349Sqs148142 	 */
11218718SMichael.Speer@Sun.COM 	channel = ldvp->vdma_index;
11228718SMichael.Speer@Sun.COM 	ring = hxgep->rx_rcr_rings->rcr_rings[channel];
11238718SMichael.Speer@Sun.COM 	rhp = &hxgep->rx_ring_handles[channel];
11249808SMichael.Speer@Sun.COM 	ldgp = ldvp->ldgp;
11259808SMichael.Speer@Sun.COM 
11269808SMichael.Speer@Sun.COM 	ASSERT(ring != NULL);
112711257SMichael.Speer@Sun.COM #if defined(DEBUG)
112811257SMichael.Speer@Sun.COM 	if (rhp->started) {
112911257SMichael.Speer@Sun.COM 		ASSERT(ring->ldgp == ldgp);
113011257SMichael.Speer@Sun.COM 		ASSERT(ring->ldvp == ldvp);
113111257SMichael.Speer@Sun.COM 	}
113211257SMichael.Speer@Sun.COM #endif
11338718SMichael.Speer@Sun.COM 
11348718SMichael.Speer@Sun.COM 	MUTEX_ENTER(&ring->lock);
11358718SMichael.Speer@Sun.COM 
113610091SMichael.Speer@Sun.COM 	if (!ring->poll_flag) {
113710091SMichael.Speer@Sun.COM 		RXDMA_REG_READ64(handle, RDC_STAT, channel, &cs.value);
113810091SMichael.Speer@Sun.COM 		cs.bits.ptrread = 0;
113910091SMichael.Speer@Sun.COM 		cs.bits.pktread = 0;
114010091SMichael.Speer@Sun.COM 		RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
114110091SMichael.Speer@Sun.COM 
114210091SMichael.Speer@Sun.COM 		/*
114310091SMichael.Speer@Sun.COM 		 * Process packets, if we are not in polling mode, the ring is
114410091SMichael.Speer@Sun.COM 		 * started and the interface is started. The MAC layer under
114510091SMichael.Speer@Sun.COM 		 * load will be operating in polling mode for RX traffic.
114610091SMichael.Speer@Sun.COM 		 */
114710091SMichael.Speer@Sun.COM 		if ((rhp->started) &&
114810091SMichael.Speer@Sun.COM 		    (hxgep->hxge_mac_state == HXGE_MAC_STARTED)) {
114910091SMichael.Speer@Sun.COM 			mp = hxge_rx_pkts(hxgep, ldvp->vdma_index,
115010091SMichael.Speer@Sun.COM 			    ldvp, ring, cs, -1);
115110091SMichael.Speer@Sun.COM 		}
115210091SMichael.Speer@Sun.COM 
115310091SMichael.Speer@Sun.COM 		/* Process error events. */
115410091SMichael.Speer@Sun.COM 		if (cs.value & RDC_STAT_ERROR) {
115510091SMichael.Speer@Sun.COM 			MUTEX_EXIT(&ring->lock);
115610091SMichael.Speer@Sun.COM 			(void) hxge_rx_err_evnts(hxgep, channel, ldvp, cs);
115710091SMichael.Speer@Sun.COM 			MUTEX_ENTER(&ring->lock);
115810091SMichael.Speer@Sun.COM 		}
115910091SMichael.Speer@Sun.COM 
116010091SMichael.Speer@Sun.COM 		/*
116110091SMichael.Speer@Sun.COM 		 * Enable the mailbox update interrupt if we want to use
116210091SMichael.Speer@Sun.COM 		 * mailbox. We probably don't need to use mailbox as it only
116310091SMichael.Speer@Sun.COM 		 * saves us one pio read.  Also write 1 to rcrthres and
116410091SMichael.Speer@Sun.COM 		 * rcrto to clear these two edge triggered bits.
116510091SMichael.Speer@Sun.COM 		 */
116611257SMichael.Speer@Sun.COM 		rbrp = hxgep->rx_rbr_rings->rbr_rings[channel];
116711257SMichael.Speer@Sun.COM 		MUTEX_ENTER(&rbrp->post_lock);
116811257SMichael.Speer@Sun.COM 		if (!rbrp->rbr_is_empty) {
116911257SMichael.Speer@Sun.COM 			cs.value = 0;
117011257SMichael.Speer@Sun.COM 			cs.bits.mex = 1;
117111257SMichael.Speer@Sun.COM 			cs.bits.ptrread = 0;
117211257SMichael.Speer@Sun.COM 			cs.bits.pktread = 0;
117311257SMichael.Speer@Sun.COM 			RXDMA_REG_WRITE64(handle, RDC_STAT, channel, cs.value);
117411257SMichael.Speer@Sun.COM 		}
117511257SMichael.Speer@Sun.COM 		MUTEX_EXIT(&rbrp->post_lock);
117610091SMichael.Speer@Sun.COM 
117710091SMichael.Speer@Sun.COM 		if (ldgp->nldvs == 1) {
117810091SMichael.Speer@Sun.COM 			/*
117910091SMichael.Speer@Sun.COM 			 * Re-arm the group.
118010091SMichael.Speer@Sun.COM 			 */
118110091SMichael.Speer@Sun.COM 			(void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_TRUE,
118210091SMichael.Speer@Sun.COM 			    ldgp->ldg_timer);
118310091SMichael.Speer@Sun.COM 		}
118410091SMichael.Speer@Sun.COM 	} else if ((ldgp->nldvs == 1) && (ring->poll_flag)) {
118510091SMichael.Speer@Sun.COM 		/*
118610091SMichael.Speer@Sun.COM 		 * Disarm the group, if we are not a shared interrupt.
118710091SMichael.Speer@Sun.COM 		 */
118810091SMichael.Speer@Sun.COM 		(void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_FALSE, 0);
118910091SMichael.Speer@Sun.COM 	} else if (ring->poll_flag) {
119010091SMichael.Speer@Sun.COM 		/*
119110091SMichael.Speer@Sun.COM 		 * Mask-off this device from the group.
119210091SMichael.Speer@Sun.COM 		 */
119310091SMichael.Speer@Sun.COM 		(void) hpi_intr_mask_set(handle, ldvp->ldv, 1);
11946349Sqs148142 	}
11956349Sqs148142 
11968718SMichael.Speer@Sun.COM 	MUTEX_EXIT(&ring->lock);
11978718SMichael.Speer@Sun.COM 
11988718SMichael.Speer@Sun.COM 	/*
11998718SMichael.Speer@Sun.COM 	 * Send the packets up the stack.
12008718SMichael.Speer@Sun.COM 	 */
12018718SMichael.Speer@Sun.COM 	if (mp != NULL) {
12028718SMichael.Speer@Sun.COM 		mac_rx_ring(hxgep->mach, ring->rcr_mac_handle, mp,
12038718SMichael.Speer@Sun.COM 		    ring->rcr_gen_num);
12048718SMichael.Speer@Sun.COM 	}
12056349Sqs148142 
120610091SMichael.Speer@Sun.COM 	HXGE_DEBUG_MSG((NULL, RX_INT_CTL, "<== hxge_rx_intr"));
120710091SMichael.Speer@Sun.COM 	return (DDI_INTR_CLAIMED);
12086349Sqs148142 }
12096349Sqs148142 
12108718SMichael.Speer@Sun.COM /*
12118718SMichael.Speer@Sun.COM  * Enable polling for a ring. Interrupt for the ring is disabled when
12128718SMichael.Speer@Sun.COM  * the hxge interrupt comes (see hxge_rx_intr).
12138718SMichael.Speer@Sun.COM  */
12148718SMichael.Speer@Sun.COM int
hxge_enable_poll(void * arg)12158718SMichael.Speer@Sun.COM hxge_enable_poll(void *arg)
12166349Sqs148142 {
12178718SMichael.Speer@Sun.COM 	p_hxge_ring_handle_t	ring_handle = (p_hxge_ring_handle_t)arg;
12188718SMichael.Speer@Sun.COM 	p_rx_rcr_ring_t		ringp;
12198718SMichael.Speer@Sun.COM 	p_hxge_t		hxgep;
12208718SMichael.Speer@Sun.COM 	p_hxge_ldg_t		ldgp;
12218718SMichael.Speer@Sun.COM 
12228718SMichael.Speer@Sun.COM 	if (ring_handle == NULL) {
122310091SMichael.Speer@Sun.COM 		ASSERT(ring_handle != NULL);
122410091SMichael.Speer@Sun.COM 		return (1);
12258718SMichael.Speer@Sun.COM 	}
12268718SMichael.Speer@Sun.COM 
122710091SMichael.Speer@Sun.COM 
12288718SMichael.Speer@Sun.COM 	hxgep = ring_handle->hxgep;
12298718SMichael.Speer@Sun.COM 	ringp = hxgep->rx_rcr_rings->rcr_rings[ring_handle->index];
12308718SMichael.Speer@Sun.COM 
12318718SMichael.Speer@Sun.COM 	MUTEX_ENTER(&ringp->lock);
12328718SMichael.Speer@Sun.COM 
123310091SMichael.Speer@Sun.COM 	/*
123410091SMichael.Speer@Sun.COM 	 * Are we already polling ?
123510091SMichael.Speer@Sun.COM 	 */
123610091SMichael.Speer@Sun.COM 	if (ringp->poll_flag) {
123710091SMichael.Speer@Sun.COM 		MUTEX_EXIT(&ringp->lock);
123810091SMichael.Speer@Sun.COM 		return (1);
123910091SMichael.Speer@Sun.COM 	}
124010091SMichael.Speer@Sun.COM 
12418718SMichael.Speer@Sun.COM 	ldgp = ringp->ldgp;
12428718SMichael.Speer@Sun.COM 	if (ldgp == NULL) {
12438718SMichael.Speer@Sun.COM 		MUTEX_EXIT(&ringp->lock);
124410091SMichael.Speer@Sun.COM 		return (1);
12458718SMichael.Speer@Sun.COM 	}
12468718SMichael.Speer@Sun.COM 
12478718SMichael.Speer@Sun.COM 	/*
12488718SMichael.Speer@Sun.COM 	 * Enable polling
12498718SMichael.Speer@Sun.COM 	 */
125010091SMichael.Speer@Sun.COM 	ringp->poll_flag = B_TRUE;
12518718SMichael.Speer@Sun.COM 
12528718SMichael.Speer@Sun.COM 	MUTEX_EXIT(&ringp->lock);
12538718SMichael.Speer@Sun.COM 	return (0);
12548718SMichael.Speer@Sun.COM }
12558718SMichael.Speer@Sun.COM 
12568718SMichael.Speer@Sun.COM /*
12578718SMichael.Speer@Sun.COM  * Disable polling for a ring and enable its interrupt.
12588718SMichael.Speer@Sun.COM  */
12598718SMichael.Speer@Sun.COM int
hxge_disable_poll(void * arg)12608718SMichael.Speer@Sun.COM hxge_disable_poll(void *arg)
12618718SMichael.Speer@Sun.COM {
12628718SMichael.Speer@Sun.COM 	p_hxge_ring_handle_t	ring_handle = (p_hxge_ring_handle_t)arg;
12638718SMichael.Speer@Sun.COM 	p_rx_rcr_ring_t		ringp;
12648718SMichael.Speer@Sun.COM 	p_hxge_t		hxgep;
12658718SMichael.Speer@Sun.COM 
12668718SMichael.Speer@Sun.COM 	if (ring_handle == NULL) {
126710091SMichael.Speer@Sun.COM 		ASSERT(ring_handle != NULL);
12688718SMichael.Speer@Sun.COM 		return (0);
12696349Sqs148142 	}
12708718SMichael.Speer@Sun.COM 
12718718SMichael.Speer@Sun.COM 	hxgep = ring_handle->hxgep;
12728718SMichael.Speer@Sun.COM 	ringp = hxgep->rx_rcr_rings->rcr_rings[ring_handle->index];
12738718SMichael.Speer@Sun.COM 
12748718SMichael.Speer@Sun.COM 	MUTEX_ENTER(&ringp->lock);
12758718SMichael.Speer@Sun.COM 
12768718SMichael.Speer@Sun.COM 	/*
12778718SMichael.Speer@Sun.COM 	 * Disable polling: enable interrupt
12788718SMichael.Speer@Sun.COM 	 */
12798718SMichael.Speer@Sun.COM 	if (ringp->poll_flag) {
12808718SMichael.Speer@Sun.COM 		hpi_handle_t		handle;
12818718SMichael.Speer@Sun.COM 		rdc_stat_t		cs;
12828718SMichael.Speer@Sun.COM 		p_hxge_ldg_t		ldgp;
12838718SMichael.Speer@Sun.COM 
12848718SMichael.Speer@Sun.COM 		/*
12858718SMichael.Speer@Sun.COM 		 * Get the control and status for this channel.
12868718SMichael.Speer@Sun.COM 		 */
12878718SMichael.Speer@Sun.COM 		handle = HXGE_DEV_HPI_HANDLE(hxgep);
12888718SMichael.Speer@Sun.COM 
12898718SMichael.Speer@Sun.COM 		/*
12908718SMichael.Speer@Sun.COM 		 * Rearm this logical group if this is a single device
12918718SMichael.Speer@Sun.COM 		 * group.
12928718SMichael.Speer@Sun.COM 		 */
12938718SMichael.Speer@Sun.COM 		ldgp = ringp->ldgp;
12948718SMichael.Speer@Sun.COM 		if (ldgp == NULL) {
12958718SMichael.Speer@Sun.COM 			MUTEX_EXIT(&ringp->lock);
129610091SMichael.Speer@Sun.COM 			return (1);
12978718SMichael.Speer@Sun.COM 		}
12988718SMichael.Speer@Sun.COM 
129910091SMichael.Speer@Sun.COM 		ringp->poll_flag = B_FALSE;
130010091SMichael.Speer@Sun.COM 
130110091SMichael.Speer@Sun.COM 		/*
130210091SMichael.Speer@Sun.COM 		 * Enable mailbox update, to start interrupts again.
130310091SMichael.Speer@Sun.COM 		 */
130410091SMichael.Speer@Sun.COM 		cs.value = 0ULL;
130510091SMichael.Speer@Sun.COM 		cs.bits.mex = 1;
130610091SMichael.Speer@Sun.COM 		cs.bits.pktread = 0;
130710091SMichael.Speer@Sun.COM 		cs.bits.ptrread = 0;
130810091SMichael.Speer@Sun.COM 		RXDMA_REG_WRITE64(handle, RDC_STAT, ringp->rdc, cs.value);
130910091SMichael.Speer@Sun.COM 
13108718SMichael.Speer@Sun.COM 		if (ldgp->nldvs == 1) {
131110091SMichael.Speer@Sun.COM 			/*
131210091SMichael.Speer@Sun.COM 			 * Re-arm the group, since it is the only member
131310091SMichael.Speer@Sun.COM 			 * of the group.
131410091SMichael.Speer@Sun.COM 			 */
131510091SMichael.Speer@Sun.COM 			(void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_TRUE,
131610091SMichael.Speer@Sun.COM 			    ldgp->ldg_timer);
131710091SMichael.Speer@Sun.COM 		} else {
131810091SMichael.Speer@Sun.COM 			/*
131910091SMichael.Speer@Sun.COM 			 * Mask-on interrupts for the device and re-arm
132010091SMichael.Speer@Sun.COM 			 * the group.
132110091SMichael.Speer@Sun.COM 			 */
132210091SMichael.Speer@Sun.COM 			(void) hpi_intr_mask_set(handle, ringp->ldvp->ldv, 0);
132310091SMichael.Speer@Sun.COM 			(void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, B_TRUE,
132410091SMichael.Speer@Sun.COM 			    ldgp->ldg_timer);
13256349Sqs148142 		}
13266349Sqs148142 	}
13278718SMichael.Speer@Sun.COM 	MUTEX_EXIT(&ringp->lock);
13288718SMichael.Speer@Sun.COM 	return (0);
13298718SMichael.Speer@Sun.COM }
13308718SMichael.Speer@Sun.COM 
13318718SMichael.Speer@Sun.COM /*
13328718SMichael.Speer@Sun.COM  * Poll 'bytes_to_pickup' bytes of message from the rx ring.
13338718SMichael.Speer@Sun.COM  */
13348718SMichael.Speer@Sun.COM mblk_t *
hxge_rx_poll(void * arg,int bytes_to_pickup)13358718SMichael.Speer@Sun.COM hxge_rx_poll(void *arg, int bytes_to_pickup)
13368718SMichael.Speer@Sun.COM {
13378718SMichael.Speer@Sun.COM 	p_hxge_ring_handle_t	rhp = (p_hxge_ring_handle_t)arg;
13388718SMichael.Speer@Sun.COM 	p_rx_rcr_ring_t		ring;
13398718SMichael.Speer@Sun.COM 	p_hxge_t		hxgep;
13408718SMichael.Speer@Sun.COM 	hpi_handle_t		handle;
13418718SMichael.Speer@Sun.COM 	rdc_stat_t		cs;
13428718SMichael.Speer@Sun.COM 	mblk_t			*mblk;
13438718SMichael.Speer@Sun.COM 	p_hxge_ldv_t		ldvp;
13448718SMichael.Speer@Sun.COM 
13458718SMichael.Speer@Sun.COM 	hxgep = rhp->hxgep;
13468718SMichael.Speer@Sun.COM 
13478718SMichael.Speer@Sun.COM 	/*
13488718SMichael.Speer@Sun.COM 	 * Get the control and status for this channel.
13498718SMichael.Speer@Sun.COM 	 */
13508718SMichael.Speer@Sun.COM 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
13518718SMichael.Speer@Sun.COM 	ring = hxgep->rx_rcr_rings->rcr_rings[rhp->index];
13528718SMichael.Speer@Sun.COM 
13538718SMichael.Speer@Sun.COM 	MUTEX_ENTER(&ring->lock);
135410091SMichael.Speer@Sun.COM 	ASSERT(ring->poll_flag == B_TRUE);
13558718SMichael.Speer@Sun.COM 	ASSERT(rhp->started);
13568718SMichael.Speer@Sun.COM 
135710091SMichael.Speer@Sun.COM 	if (!ring->poll_flag) {
13588718SMichael.Speer@Sun.COM 		MUTEX_EXIT(&ring->lock);
13598718SMichael.Speer@Sun.COM 		return ((mblk_t *)NULL);
13608718SMichael.Speer@Sun.COM 	}
13618718SMichael.Speer@Sun.COM 
136210091SMichael.Speer@Sun.COM 	/*
136310091SMichael.Speer@Sun.COM 	 * Get the control and status bits for the ring.
136410091SMichael.Speer@Sun.COM 	 */
13658718SMichael.Speer@Sun.COM 	RXDMA_REG_READ64(handle, RDC_STAT, rhp->index, &cs.value);
13668718SMichael.Speer@Sun.COM 	cs.bits.ptrread = 0;
13678718SMichael.Speer@Sun.COM 	cs.bits.pktread = 0;
13688718SMichael.Speer@Sun.COM 	RXDMA_REG_WRITE64(handle, RDC_STAT, rhp->index, cs.value);
13698718SMichael.Speer@Sun.COM 
137010091SMichael.Speer@Sun.COM 	/*
137110091SMichael.Speer@Sun.COM 	 * Process packets.
137210091SMichael.Speer@Sun.COM 	 */
13738718SMichael.Speer@Sun.COM 	mblk = hxge_rx_pkts(hxgep, ring->ldvp->vdma_index,
13748718SMichael.Speer@Sun.COM 	    ring->ldvp, ring, cs, bytes_to_pickup);
13758718SMichael.Speer@Sun.COM 	ldvp = ring->ldvp;
13768718SMichael.Speer@Sun.COM 
13778718SMichael.Speer@Sun.COM 	/*
13788718SMichael.Speer@Sun.COM 	 * Process Error Events.
13798718SMichael.Speer@Sun.COM 	 */
13808718SMichael.Speer@Sun.COM 	if (ldvp && (cs.value & RDC_STAT_ERROR)) {
138110091SMichael.Speer@Sun.COM 		/*
138210091SMichael.Speer@Sun.COM 		 * Recovery routines will grab the RCR ring lock.
138310091SMichael.Speer@Sun.COM 		 */
138410091SMichael.Speer@Sun.COM 		MUTEX_EXIT(&ring->lock);
13858718SMichael.Speer@Sun.COM 		(void) hxge_rx_err_evnts(hxgep, ldvp->vdma_index, ldvp, cs);
138610091SMichael.Speer@Sun.COM 		MUTEX_ENTER(&ring->lock);
13878718SMichael.Speer@Sun.COM 	}
13888718SMichael.Speer@Sun.COM 
13898718SMichael.Speer@Sun.COM 	MUTEX_EXIT(&ring->lock);
13908718SMichael.Speer@Sun.COM 	return (mblk);
13916349Sqs148142 }
13926349Sqs148142 
13936349Sqs148142 /*ARGSUSED*/
13946349Sqs148142 mblk_t *
hxge_rx_pkts(p_hxge_t hxgep,uint_t vindex,p_hxge_ldv_t ldvp,p_rx_rcr_ring_t rcrp,rdc_stat_t cs,int bytes_to_read)13956349Sqs148142 hxge_rx_pkts(p_hxge_t hxgep, uint_t vindex, p_hxge_ldv_t ldvp,
13968718SMichael.Speer@Sun.COM     p_rx_rcr_ring_t rcrp, rdc_stat_t cs, int bytes_to_read)
13976349Sqs148142 {
13986349Sqs148142 	hpi_handle_t		handle;
13996349Sqs148142 	uint8_t			channel;
14006349Sqs148142 	uint32_t		comp_rd_index;
14016349Sqs148142 	p_rcr_entry_t		rcr_desc_rd_head_p;
14026349Sqs148142 	p_rcr_entry_t		rcr_desc_rd_head_pp;
14036349Sqs148142 	p_mblk_t		nmp, mp_cont, head_mp, *tail_mp;
14046349Sqs148142 	uint16_t		qlen, nrcr_read, npkt_read;
140511257SMichael.Speer@Sun.COM 	uint32_t		qlen_hw, npkts, num_rcrs;
14066864Sqs148142 	uint32_t		invalid_rcr_entry;
14076349Sqs148142 	boolean_t		multi;
140811257SMichael.Speer@Sun.COM 	rdc_stat_t		pktcs;
14096349Sqs148142 	rdc_rcr_cfg_b_t		rcr_cfg_b;
14107584SQiyan.Sun@Sun.COM 	uint64_t		rcr_head_index, rcr_tail_index;
14117584SQiyan.Sun@Sun.COM 	uint64_t		rcr_tail;
14127584SQiyan.Sun@Sun.COM 	rdc_rcr_tail_t		rcr_tail_reg;
14137618SMichael.Speer@Sun.COM 	p_hxge_rx_ring_stats_t	rdc_stats;
14148718SMichael.Speer@Sun.COM 	int			totallen = 0;
14156349Sqs148142 
14166349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:vindex %d "
14176349Sqs148142 	    "channel %d", vindex, ldvp->channel));
14186349Sqs148142 
14196349Sqs148142 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
14208718SMichael.Speer@Sun.COM 	channel = rcrp->rdc;
14216349Sqs148142 	if (channel != ldvp->channel) {
14226349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "==> hxge_rx_pkts:index %d "
14236349Sqs148142 		    "channel %d, and rcr channel %d not matched.",
14246349Sqs148142 		    vindex, ldvp->channel, channel));
14256349Sqs148142 		return (NULL);
14266349Sqs148142 	}
14276349Sqs148142 
14286349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
14296349Sqs148142 	    "==> hxge_rx_pkts: START: rcr channel %d "
14306349Sqs148142 	    "head_p $%p head_pp $%p  index %d ",
14318718SMichael.Speer@Sun.COM 	    channel, rcrp->rcr_desc_rd_head_p,
14328718SMichael.Speer@Sun.COM 	    rcrp->rcr_desc_rd_head_pp, rcrp->comp_rd_index));
14336349Sqs148142 
14348544SQiyan.Sun@Sun.COM 	(void) hpi_rxdma_rdc_rcr_qlen_get(handle, channel, &qlen);
14358544SQiyan.Sun@Sun.COM 	RXDMA_REG_READ64(handle, RDC_RCR_TAIL, channel, &rcr_tail_reg.value);
14368544SQiyan.Sun@Sun.COM 	rcr_tail = rcr_tail_reg.bits.tail;
14376864Sqs148142 
14386349Sqs148142 	if (!qlen) {
14396349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
14406349Sqs148142 		    "<== hxge_rx_pkts:rcr channel %d qlen %d (no pkts)",
14416349Sqs148142 		    channel, qlen));
14426349Sqs148142 		return (NULL);
14436349Sqs148142 	}
14446349Sqs148142 
14456349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_pkts:rcr channel %d "
14466349Sqs148142 	    "qlen %d", channel, qlen));
14476349Sqs148142 
14488718SMichael.Speer@Sun.COM 	comp_rd_index = rcrp->comp_rd_index;
14498718SMichael.Speer@Sun.COM 
14508718SMichael.Speer@Sun.COM 	rcr_desc_rd_head_p = rcrp->rcr_desc_rd_head_p;
14518718SMichael.Speer@Sun.COM 	rcr_desc_rd_head_pp = rcrp->rcr_desc_rd_head_pp;
14526349Sqs148142 	nrcr_read = npkt_read = 0;
14536349Sqs148142 
14548544SQiyan.Sun@Sun.COM 	if (hxgep->rdc_first_intr[channel])
14558366SQiyan.Sun@Sun.COM 		qlen_hw = qlen;
14568366SQiyan.Sun@Sun.COM 	else
14578544SQiyan.Sun@Sun.COM 		qlen_hw = qlen - 1;
14588366SQiyan.Sun@Sun.COM 
14596349Sqs148142 	head_mp = NULL;
14606349Sqs148142 	tail_mp = &head_mp;
14616349Sqs148142 	nmp = mp_cont = NULL;
14626349Sqs148142 	multi = B_FALSE;
14636349Sqs148142 
14648718SMichael.Speer@Sun.COM 	rcr_head_index = rcrp->rcr_desc_rd_head_p - rcrp->rcr_desc_first_p;
14658718SMichael.Speer@Sun.COM 	rcr_tail_index = rcr_tail - rcrp->rcr_tail_begin;
14667584SQiyan.Sun@Sun.COM 
14677584SQiyan.Sun@Sun.COM 	if (rcr_tail_index >= rcr_head_index) {
14688544SQiyan.Sun@Sun.COM 		num_rcrs = rcr_tail_index - rcr_head_index;
14697584SQiyan.Sun@Sun.COM 	} else {
14707584SQiyan.Sun@Sun.COM 		/* rcr_tail has wrapped around */
14718718SMichael.Speer@Sun.COM 		num_rcrs = (rcrp->comp_size - rcr_head_index) + rcr_tail_index;
14727584SQiyan.Sun@Sun.COM 	}
14737584SQiyan.Sun@Sun.COM 
147411257SMichael.Speer@Sun.COM 	npkts = hxge_scan_for_last_eop(rcrp, rcr_desc_rd_head_p, num_rcrs);
147511257SMichael.Speer@Sun.COM 	if (!npkts)
14768544SQiyan.Sun@Sun.COM 		return (NULL);
14778544SQiyan.Sun@Sun.COM 
147811257SMichael.Speer@Sun.COM 	if (qlen_hw > npkts) {
14797584SQiyan.Sun@Sun.COM 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
14807584SQiyan.Sun@Sun.COM 		    "Channel %d, rcr_qlen from reg %d and from rcr_tail %d\n",
14817584SQiyan.Sun@Sun.COM 		    channel, qlen_hw, qlen_sw));
148211257SMichael.Speer@Sun.COM 		qlen_hw = npkts;
14837584SQiyan.Sun@Sun.COM 	}
14847584SQiyan.Sun@Sun.COM 
14856349Sqs148142 	while (qlen_hw) {
14866349Sqs148142 #ifdef HXGE_DEBUG
14876349Sqs148142 		hxge_dump_rcr_entry(hxgep, rcr_desc_rd_head_p);
14886349Sqs148142 #endif
14896349Sqs148142 		/*
14906349Sqs148142 		 * Process one completion ring entry.
14916349Sqs148142 		 */
14926864Sqs148142 		invalid_rcr_entry = 0;
14936349Sqs148142 		hxge_receive_packet(hxgep,
14948718SMichael.Speer@Sun.COM 		    rcrp, rcr_desc_rd_head_p, &multi, &nmp, &mp_cont,
14956864Sqs148142 		    &invalid_rcr_entry);
14966864Sqs148142 		if (invalid_rcr_entry != 0) {
14978718SMichael.Speer@Sun.COM 			rdc_stats = rcrp->rdc_stats;
14987618SMichael.Speer@Sun.COM 			rdc_stats->rcr_invalids++;
14996864Sqs148142 			HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
15006864Sqs148142 			    "Channel %d could only read 0x%x packets, "
15016864Sqs148142 			    "but 0x%x pending\n", channel, npkt_read, qlen_hw));
15026864Sqs148142 			break;
15036864Sqs148142 		}
15046349Sqs148142 
15056349Sqs148142 		/*
15066349Sqs148142 		 * message chaining modes (nemo msg chaining)
15076349Sqs148142 		 */
15086349Sqs148142 		if (nmp) {
15096349Sqs148142 			nmp->b_next = NULL;
15106349Sqs148142 			if (!multi && !mp_cont) { /* frame fits a partition */
15116349Sqs148142 				*tail_mp = nmp;
15126349Sqs148142 				tail_mp = &nmp->b_next;
15136349Sqs148142 				nmp = NULL;
15146349Sqs148142 			} else if (multi && !mp_cont) { /* first segment */
15156349Sqs148142 				*tail_mp = nmp;
15166349Sqs148142 				tail_mp = &nmp->b_cont;
15176349Sqs148142 			} else if (multi && mp_cont) {	/* mid of multi segs */
15186349Sqs148142 				*tail_mp = mp_cont;
15196349Sqs148142 				tail_mp = &mp_cont->b_cont;
15206349Sqs148142 			} else if (!multi && mp_cont) { /* last segment */
15216349Sqs148142 				*tail_mp = mp_cont;
15226349Sqs148142 				tail_mp = &nmp->b_next;
15238718SMichael.Speer@Sun.COM 				totallen += MBLKL(mp_cont);
15246349Sqs148142 				nmp = NULL;
15256349Sqs148142 			}
15266349Sqs148142 		}
15276349Sqs148142 
15286349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
15296349Sqs148142 		    "==> hxge_rx_pkts: loop: rcr channel %d "
15306349Sqs148142 		    "before updating: multi %d "
15316349Sqs148142 		    "nrcr_read %d "
15326349Sqs148142 		    "npk read %d "
15336349Sqs148142 		    "head_pp $%p  index %d ",
15346349Sqs148142 		    channel, multi,
15356349Sqs148142 		    nrcr_read, npkt_read, rcr_desc_rd_head_pp, comp_rd_index));
15366349Sqs148142 
15376349Sqs148142 		if (!multi) {
15386349Sqs148142 			qlen_hw--;
15396349Sqs148142 			npkt_read++;
15406349Sqs148142 		}
15416349Sqs148142 
15426349Sqs148142 		/*
15436349Sqs148142 		 * Update the next read entry.
15446349Sqs148142 		 */
15456349Sqs148142 		comp_rd_index = NEXT_ENTRY(comp_rd_index,
15468718SMichael.Speer@Sun.COM 		    rcrp->comp_wrap_mask);
15476349Sqs148142 
15486349Sqs148142 		rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
15498718SMichael.Speer@Sun.COM 		    rcrp->rcr_desc_first_p, rcrp->rcr_desc_last_p);
15506349Sqs148142 
15516349Sqs148142 		nrcr_read++;
15526349Sqs148142 
15536349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
15546349Sqs148142 		    "<== hxge_rx_pkts: (SAM, process one packet) "
15556349Sqs148142 		    "nrcr_read %d", nrcr_read));
15566349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
15576349Sqs148142 		    "==> hxge_rx_pkts: loop: rcr channel %d "
15586349Sqs148142 		    "multi %d nrcr_read %d npk read %d head_pp $%p  index %d ",
15596349Sqs148142 		    channel, multi, nrcr_read, npkt_read, rcr_desc_rd_head_pp,
15606349Sqs148142 		    comp_rd_index));
15618718SMichael.Speer@Sun.COM 
15628718SMichael.Speer@Sun.COM 		if ((bytes_to_read != -1) &&
15638718SMichael.Speer@Sun.COM 		    (totallen >= bytes_to_read)) {
15648718SMichael.Speer@Sun.COM 			break;
15658718SMichael.Speer@Sun.COM 		}
15666349Sqs148142 	}
15676349Sqs148142 
15688718SMichael.Speer@Sun.COM 	rcrp->rcr_desc_rd_head_pp = rcr_desc_rd_head_pp;
15698718SMichael.Speer@Sun.COM 	rcrp->comp_rd_index = comp_rd_index;
15708718SMichael.Speer@Sun.COM 	rcrp->rcr_desc_rd_head_p = rcr_desc_rd_head_p;
15718718SMichael.Speer@Sun.COM 
15728718SMichael.Speer@Sun.COM 	if ((hxgep->intr_timeout != rcrp->intr_timeout) ||
15738718SMichael.Speer@Sun.COM 	    (hxgep->intr_threshold != rcrp->intr_threshold)) {
15748718SMichael.Speer@Sun.COM 		rcrp->intr_timeout = hxgep->intr_timeout;
15758718SMichael.Speer@Sun.COM 		rcrp->intr_threshold = hxgep->intr_threshold;
15766349Sqs148142 		rcr_cfg_b.value = 0x0ULL;
15778718SMichael.Speer@Sun.COM 		if (rcrp->intr_timeout)
15786349Sqs148142 			rcr_cfg_b.bits.entout = 1;
15798718SMichael.Speer@Sun.COM 		rcr_cfg_b.bits.timeout = rcrp->intr_timeout;
15808718SMichael.Speer@Sun.COM 		rcr_cfg_b.bits.pthres = rcrp->intr_threshold;
15816349Sqs148142 		RXDMA_REG_WRITE64(handle, RDC_RCR_CFG_B,
15826349Sqs148142 		    channel, rcr_cfg_b.value);
15836349Sqs148142 	}
15846349Sqs148142 
158511257SMichael.Speer@Sun.COM 	pktcs.value = 0;
15868422SMichael.Speer@Sun.COM 	if (hxgep->rdc_first_intr[channel] && (npkt_read > 0)) {
15878422SMichael.Speer@Sun.COM 		hxgep->rdc_first_intr[channel] = B_FALSE;
158811257SMichael.Speer@Sun.COM 		pktcs.bits.pktread = npkt_read - 1;
15898422SMichael.Speer@Sun.COM 	} else
159011257SMichael.Speer@Sun.COM 		pktcs.bits.pktread = npkt_read;
159111257SMichael.Speer@Sun.COM 	pktcs.bits.ptrread = nrcr_read;
159211257SMichael.Speer@Sun.COM 	RXDMA_REG_WRITE64(handle, RDC_STAT, channel, pktcs.value);
15938366SQiyan.Sun@Sun.COM 
15946349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL,
15956349Sqs148142 	    "==> hxge_rx_pkts: EXIT: rcr channel %d "
15966349Sqs148142 	    "head_pp $%p  index %016llx ",
15978718SMichael.Speer@Sun.COM 	    channel, rcrp->rcr_desc_rd_head_pp, rcrp->comp_rd_index));
15986349Sqs148142 
15996349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_INT_CTL, "<== hxge_rx_pkts"));
16006349Sqs148142 	return (head_mp);
16016349Sqs148142 }
16026349Sqs148142 
16036864Sqs148142 #define	RCR_ENTRY_PATTERN	0x5a5a6b6b7c7c8d8dULL
16048177SQiyan.Sun@Sun.COM #define	NO_PORT_BIT		0x20
16058366SQiyan.Sun@Sun.COM #define	L4_CS_EQ_BIT		0x40
16066864Sqs148142 
hxge_scan_for_last_eop(p_rx_rcr_ring_t rcrp,p_rcr_entry_t rcr_desc_rd_head_p,uint32_t num_rcrs)16078718SMichael.Speer@Sun.COM static uint32_t hxge_scan_for_last_eop(p_rx_rcr_ring_t rcrp,
16088544SQiyan.Sun@Sun.COM     p_rcr_entry_t rcr_desc_rd_head_p, uint32_t num_rcrs)
16098544SQiyan.Sun@Sun.COM {
16108544SQiyan.Sun@Sun.COM 	uint64_t	rcr_entry;
16118544SQiyan.Sun@Sun.COM 	uint32_t	rcrs = 0;
16128544SQiyan.Sun@Sun.COM 	uint32_t	pkts = 0;
16138544SQiyan.Sun@Sun.COM 
161411257SMichael.Speer@Sun.COM 	while (rcrs < num_rcrs) {
16158544SQiyan.Sun@Sun.COM 		rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
16168544SQiyan.Sun@Sun.COM 
16178544SQiyan.Sun@Sun.COM 		if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN))
16188544SQiyan.Sun@Sun.COM 			break;
16198544SQiyan.Sun@Sun.COM 
16208544SQiyan.Sun@Sun.COM 		if (!(rcr_entry & RCR_MULTI_MASK))
16218544SQiyan.Sun@Sun.COM 			pkts++;
16228544SQiyan.Sun@Sun.COM 
16238544SQiyan.Sun@Sun.COM 		rcr_desc_rd_head_p = NEXT_ENTRY_PTR(rcr_desc_rd_head_p,
16248718SMichael.Speer@Sun.COM 		    rcrp->rcr_desc_first_p, rcrp->rcr_desc_last_p);
162511257SMichael.Speer@Sun.COM 
162611257SMichael.Speer@Sun.COM 		rcrs++;
16278544SQiyan.Sun@Sun.COM 	}
16288544SQiyan.Sun@Sun.COM 
16298544SQiyan.Sun@Sun.COM 	return (pkts);
16308544SQiyan.Sun@Sun.COM }
16318544SQiyan.Sun@Sun.COM 
16326349Sqs148142 /*ARGSUSED*/
16336349Sqs148142 void
hxge_receive_packet(p_hxge_t hxgep,p_rx_rcr_ring_t rcr_p,p_rcr_entry_t rcr_desc_rd_head_p,boolean_t * multi_p,mblk_t ** mp,mblk_t ** mp_cont,uint32_t * invalid_rcr_entry)16348718SMichael.Speer@Sun.COM hxge_receive_packet(p_hxge_t hxgep, p_rx_rcr_ring_t rcr_p,
16358718SMichael.Speer@Sun.COM     p_rcr_entry_t rcr_desc_rd_head_p, boolean_t *multi_p, mblk_t **mp,
16368718SMichael.Speer@Sun.COM     mblk_t **mp_cont, uint32_t *invalid_rcr_entry)
16376349Sqs148142 {
16388718SMichael.Speer@Sun.COM 	p_mblk_t nmp = NULL;
16398718SMichael.Speer@Sun.COM 	uint64_t multi;
16408718SMichael.Speer@Sun.COM 	uint8_t channel;
16416349Sqs148142 	boolean_t first_entry = B_TRUE;
16428366SQiyan.Sun@Sun.COM 	boolean_t is_tcp_udp = B_FALSE;
16436349Sqs148142 	boolean_t buffer_free = B_FALSE;
16446349Sqs148142 	boolean_t error_send_up = B_FALSE;
16456349Sqs148142 	uint8_t error_type;
16466349Sqs148142 	uint16_t l2_len;
16476349Sqs148142 	uint16_t skip_len;
16486349Sqs148142 	uint8_t pktbufsz_type;
16496349Sqs148142 	uint64_t rcr_entry;
16506349Sqs148142 	uint64_t *pkt_buf_addr_pp;
16516349Sqs148142 	uint64_t *pkt_buf_addr_p;
16526349Sqs148142 	uint32_t buf_offset;
16536349Sqs148142 	uint32_t bsize;
16546349Sqs148142 	uint32_t msg_index;
16556349Sqs148142 	p_rx_rbr_ring_t rx_rbr_p;
16566349Sqs148142 	p_rx_msg_t *rx_msg_ring_p;
16576349Sqs148142 	p_rx_msg_t rx_msg_p;
16586349Sqs148142 	uint16_t sw_offset_bytes = 0, hdr_size = 0;
16596349Sqs148142 	hxge_status_t status = HXGE_OK;
16606349Sqs148142 	boolean_t is_valid = B_FALSE;
16616349Sqs148142 	p_hxge_rx_ring_stats_t rdc_stats;
16626349Sqs148142 	uint32_t bytes_read;
16638366SQiyan.Sun@Sun.COM 	uint8_t header0 = 0;
16648366SQiyan.Sun@Sun.COM 	uint8_t header1 = 0;
16658366SQiyan.Sun@Sun.COM 	uint64_t pkt_type;
16668366SQiyan.Sun@Sun.COM 	uint8_t no_port_bit = 0;
16678366SQiyan.Sun@Sun.COM 	uint8_t l4_cs_eq_bit = 0;
16686349Sqs148142 
16696864Sqs148142 	channel = rcr_p->rdc;
16706864Sqs148142 
16716349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL, "==> hxge_receive_packet"));
16726349Sqs148142 
16736349Sqs148142 	first_entry = (*mp == NULL) ? B_TRUE : B_FALSE;
16746349Sqs148142 	rcr_entry = *((uint64_t *)rcr_desc_rd_head_p);
16756349Sqs148142 
16766864Sqs148142 	/* Verify the content of the rcr_entry for a hardware bug workaround */
16776864Sqs148142 	if ((rcr_entry == 0x0) || (rcr_entry == RCR_ENTRY_PATTERN)) {
16786864Sqs148142 		*invalid_rcr_entry = 1;
16796864Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX2_CTL, "hxge_receive_packet "
16806864Sqs148142 		    "Channel %d invalid RCR entry 0x%llx found, returning\n",
16816864Sqs148142 		    channel, (long long) rcr_entry));
16826864Sqs148142 		return;
16836864Sqs148142 	}
16846864Sqs148142 	*((uint64_t *)rcr_desc_rd_head_p) = RCR_ENTRY_PATTERN;
16856864Sqs148142 
16866349Sqs148142 	multi = (rcr_entry & RCR_MULTI_MASK);
16878366SQiyan.Sun@Sun.COM 	pkt_type = (rcr_entry & RCR_PKT_TYPE_MASK);
16886349Sqs148142 
16896349Sqs148142 	error_type = ((rcr_entry & RCR_ERROR_MASK) >> RCR_ERROR_SHIFT);
16906349Sqs148142 	l2_len = ((rcr_entry & RCR_L2_LEN_MASK) >> RCR_L2_LEN_SHIFT);
16916349Sqs148142 
16926349Sqs148142 	/*
16936349Sqs148142 	 * Hardware does not strip the CRC due bug ID 11451 where
16946349Sqs148142 	 * the hardware mis handles minimum size packets.
16956349Sqs148142 	 */
16966349Sqs148142 	l2_len -= ETHERFCSL;
16976349Sqs148142 
16986349Sqs148142 	pktbufsz_type = ((rcr_entry & RCR_PKTBUFSZ_MASK) >>
16996349Sqs148142 	    RCR_PKTBUFSZ_SHIFT);
17006864Sqs148142 #if defined(__i386)
17016864Sqs148142 	pkt_buf_addr_pp = (uint64_t *)(uint32_t)((rcr_entry &
17026864Sqs148142 	    RCR_PKT_BUF_ADDR_MASK) << RCR_PKT_BUF_ADDR_SHIFT);
17036864Sqs148142 #else
17046349Sqs148142 	pkt_buf_addr_pp = (uint64_t *)((rcr_entry & RCR_PKT_BUF_ADDR_MASK) <<
17056349Sqs148142 	    RCR_PKT_BUF_ADDR_SHIFT);
17066864Sqs148142 #endif
17076349Sqs148142 
17086349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
17096349Sqs148142 	    "==> hxge_receive_packet: entryp $%p entry 0x%0llx "
17106349Sqs148142 	    "pkt_buf_addr_pp $%p l2_len %d multi %d "
17118177SQiyan.Sun@Sun.COM 	    "error_type 0x%x pktbufsz_type %d ",
17126349Sqs148142 	    rcr_desc_rd_head_p, rcr_entry, pkt_buf_addr_pp, l2_len,
17138177SQiyan.Sun@Sun.COM 	    multi, error_type, pktbufsz_type));
17146349Sqs148142 
17156349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
17166349Sqs148142 	    "==> hxge_receive_packet: entryp $%p entry 0x%0llx "
17176349Sqs148142 	    "pkt_buf_addr_pp $%p l2_len %d multi %d "
17188177SQiyan.Sun@Sun.COM 	    "error_type 0x%x ", rcr_desc_rd_head_p,
17198177SQiyan.Sun@Sun.COM 	    rcr_entry, pkt_buf_addr_pp, l2_len, multi, error_type));
17206349Sqs148142 
17216349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
17226349Sqs148142 	    "==> (rbr) hxge_receive_packet: entry 0x%0llx "
17236349Sqs148142 	    "full pkt_buf_addr_pp $%p l2_len %d",
17246349Sqs148142 	    rcr_entry, pkt_buf_addr_pp, l2_len));
17256349Sqs148142 
17266349Sqs148142 	/* get the stats ptr */
17276349Sqs148142 	rdc_stats = rcr_p->rdc_stats;
17286349Sqs148142 
17296349Sqs148142 	if (!l2_len) {
17306349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
17316349Sqs148142 		    "<== hxge_receive_packet: failed: l2 length is 0."));
17326349Sqs148142 		return;
17336349Sqs148142 	}
17346349Sqs148142 
17356349Sqs148142 	/* shift 6 bits to get the full io address */
17366864Sqs148142 #if defined(__i386)
17376864Sqs148142 	pkt_buf_addr_pp = (uint64_t *)((uint32_t)pkt_buf_addr_pp <<
17386864Sqs148142 	    RCR_PKT_BUF_ADDR_SHIFT_FULL);
17396864Sqs148142 #else
17406349Sqs148142 	pkt_buf_addr_pp = (uint64_t *)((uint64_t)pkt_buf_addr_pp <<
17416349Sqs148142 	    RCR_PKT_BUF_ADDR_SHIFT_FULL);
17426864Sqs148142 #endif
17436349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
17446349Sqs148142 	    "==> (rbr) hxge_receive_packet: entry 0x%0llx "
17456349Sqs148142 	    "full pkt_buf_addr_pp $%p l2_len %d",
17466349Sqs148142 	    rcr_entry, pkt_buf_addr_pp, l2_len));
17476349Sqs148142 
17486349Sqs148142 	rx_rbr_p = rcr_p->rx_rbr_p;
17496349Sqs148142 	rx_msg_ring_p = rx_rbr_p->rx_msg_ring;
17506349Sqs148142 
17516349Sqs148142 	if (first_entry) {
17526349Sqs148142 		hdr_size = (rcr_p->full_hdr_flag ? RXDMA_HDR_SIZE_FULL :
17536349Sqs148142 		    RXDMA_HDR_SIZE_DEFAULT);
17546349Sqs148142 
17556349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
17566349Sqs148142 		    "==> hxge_receive_packet: first entry 0x%016llx "
17576349Sqs148142 		    "pkt_buf_addr_pp $%p l2_len %d hdr %d",
17586349Sqs148142 		    rcr_entry, pkt_buf_addr_pp, l2_len, hdr_size));
17596349Sqs148142 	}
17606349Sqs148142 
17616349Sqs148142 	MUTEX_ENTER(&rx_rbr_p->lock);
17626349Sqs148142 
17636349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
17646349Sqs148142 	    "==> (rbr 1) hxge_receive_packet: entry 0x%0llx "
17656349Sqs148142 	    "full pkt_buf_addr_pp $%p l2_len %d",
17666349Sqs148142 	    rcr_entry, pkt_buf_addr_pp, l2_len));
17676349Sqs148142 
17686349Sqs148142 	/*
17696349Sqs148142 	 * Packet buffer address in the completion entry points to the starting
17706349Sqs148142 	 * buffer address (offset 0). Use the starting buffer address to locate
17716349Sqs148142 	 * the corresponding kernel address.
17726349Sqs148142 	 */
17736349Sqs148142 	status = hxge_rxbuf_pp_to_vp(hxgep, rx_rbr_p,
17746349Sqs148142 	    pktbufsz_type, pkt_buf_addr_pp, &pkt_buf_addr_p,
17756349Sqs148142 	    &buf_offset, &msg_index);
17766349Sqs148142 
17776349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
17786349Sqs148142 	    "==> (rbr 2) hxge_receive_packet: entry 0x%0llx "
17796349Sqs148142 	    "full pkt_buf_addr_pp $%p l2_len %d",
17806349Sqs148142 	    rcr_entry, pkt_buf_addr_pp, l2_len));
17816349Sqs148142 
17826349Sqs148142 	if (status != HXGE_OK) {
17836349Sqs148142 		MUTEX_EXIT(&rx_rbr_p->lock);
17846349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
17856349Sqs148142 		    "<== hxge_receive_packet: found vaddr failed %d", status));
17866349Sqs148142 		return;
17876349Sqs148142 	}
17886349Sqs148142 
17896349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
17906349Sqs148142 	    "==> (rbr 3) hxge_receive_packet: entry 0x%0llx "
17916349Sqs148142 	    "full pkt_buf_addr_pp $%p l2_len %d",
17926349Sqs148142 	    rcr_entry, pkt_buf_addr_pp, l2_len));
17936349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
17946349Sqs148142 	    "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
17956349Sqs148142 	    "full pkt_buf_addr_pp $%p l2_len %d",
17966349Sqs148142 	    msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
17976349Sqs148142 
17986349Sqs148142 	if (msg_index >= rx_rbr_p->tnblocks) {
17996349Sqs148142 		MUTEX_EXIT(&rx_rbr_p->lock);
18006349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
18016349Sqs148142 		    "==> hxge_receive_packet: FATAL msg_index (%d) "
18026349Sqs148142 		    "should be smaller than tnblocks (%d)\n",
18036349Sqs148142 		    msg_index, rx_rbr_p->tnblocks));
18046349Sqs148142 		return;
18056349Sqs148142 	}
18066349Sqs148142 
18076349Sqs148142 	rx_msg_p = rx_msg_ring_p[msg_index];
18086349Sqs148142 
18096349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
18106349Sqs148142 	    "==> (rbr 4 msgindex %d) hxge_receive_packet: entry 0x%0llx "
18116349Sqs148142 	    "full pkt_buf_addr_pp $%p l2_len %d",
18126349Sqs148142 	    msg_index, rcr_entry, pkt_buf_addr_pp, l2_len));
18136349Sqs148142 
18146349Sqs148142 	switch (pktbufsz_type) {
18156349Sqs148142 	case RCR_PKTBUFSZ_0:
18166349Sqs148142 		bsize = rx_rbr_p->pkt_buf_size0_bytes;
18176349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
18186349Sqs148142 		    "==> hxge_receive_packet: 0 buf %d", bsize));
18196349Sqs148142 		break;
18206349Sqs148142 	case RCR_PKTBUFSZ_1:
18216349Sqs148142 		bsize = rx_rbr_p->pkt_buf_size1_bytes;
18226349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
18236349Sqs148142 		    "==> hxge_receive_packet: 1 buf %d", bsize));
18246349Sqs148142 		break;
18256349Sqs148142 	case RCR_PKTBUFSZ_2:
18266349Sqs148142 		bsize = rx_rbr_p->pkt_buf_size2_bytes;
18276349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
18286349Sqs148142 		    "==> hxge_receive_packet: 2 buf %d", bsize));
18296349Sqs148142 		break;
18306349Sqs148142 	case RCR_SINGLE_BLOCK:
18316349Sqs148142 		bsize = rx_msg_p->block_size;
18326349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
18336349Sqs148142 		    "==> hxge_receive_packet: single %d", bsize));
18346349Sqs148142 
18356349Sqs148142 		break;
18366349Sqs148142 	default:
18376349Sqs148142 		MUTEX_EXIT(&rx_rbr_p->lock);
18386349Sqs148142 		return;
18396349Sqs148142 	}
18406349Sqs148142 
18416349Sqs148142 	DMA_COMMON_SYNC_OFFSET(rx_msg_p->buf_dma,
18426349Sqs148142 	    (buf_offset + sw_offset_bytes), (hdr_size + l2_len),
18436349Sqs148142 	    DDI_DMA_SYNC_FORCPU);
18446349Sqs148142 
18456349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
18466349Sqs148142 	    "==> hxge_receive_packet: after first dump:usage count"));
18476349Sqs148142 
18486349Sqs148142 	if (rx_msg_p->cur_usage_cnt == 0) {
18498366SQiyan.Sun@Sun.COM 		atomic_inc_32(&rx_rbr_p->rbr_used);
18506349Sqs148142 		if (rx_rbr_p->rbr_use_bcopy) {
18516349Sqs148142 			atomic_inc_32(&rx_rbr_p->rbr_consumed);
18528366SQiyan.Sun@Sun.COM 			if (rx_rbr_p->rbr_consumed <
18536349Sqs148142 			    rx_rbr_p->rbr_threshold_hi) {
18548366SQiyan.Sun@Sun.COM 				if (rx_rbr_p->rbr_threshold_lo == 0 ||
18558366SQiyan.Sun@Sun.COM 				    ((rx_rbr_p->rbr_consumed >=
18568366SQiyan.Sun@Sun.COM 				    rx_rbr_p->rbr_threshold_lo) &&
18578366SQiyan.Sun@Sun.COM 				    (rx_rbr_p->rbr_bufsize_type >=
18588366SQiyan.Sun@Sun.COM 				    pktbufsz_type))) {
18598366SQiyan.Sun@Sun.COM 					rx_msg_p->rx_use_bcopy = B_TRUE;
18608366SQiyan.Sun@Sun.COM 				}
18618366SQiyan.Sun@Sun.COM 			} else {
18626349Sqs148142 				rx_msg_p->rx_use_bcopy = B_TRUE;
18636349Sqs148142 			}
18646349Sqs148142 		}
18656349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX2_CTL,
18666349Sqs148142 		    "==> hxge_receive_packet: buf %d (new block) ", bsize));
18676349Sqs148142 
18686349Sqs148142 		rx_msg_p->pkt_buf_size_code = pktbufsz_type;
18696349Sqs148142 		rx_msg_p->pkt_buf_size = bsize;
18706349Sqs148142 		rx_msg_p->cur_usage_cnt = 1;
18716349Sqs148142 		if (pktbufsz_type == RCR_SINGLE_BLOCK) {
18726349Sqs148142 			HXGE_DEBUG_MSG((hxgep, RX2_CTL,
18736349Sqs148142 			    "==> hxge_receive_packet: buf %d (single block) ",
18746349Sqs148142 			    bsize));
18756349Sqs148142 			/*
18766349Sqs148142 			 * Buffer can be reused once the free function is
18776349Sqs148142 			 * called.
18786349Sqs148142 			 */
18796349Sqs148142 			rx_msg_p->max_usage_cnt = 1;
18806349Sqs148142 			buffer_free = B_TRUE;
18816349Sqs148142 		} else {
18826349Sqs148142 			rx_msg_p->max_usage_cnt = rx_msg_p->block_size / bsize;
18836349Sqs148142 			if (rx_msg_p->max_usage_cnt == 1) {
18846349Sqs148142 				buffer_free = B_TRUE;
18856349Sqs148142 			}
18866349Sqs148142 		}
18876349Sqs148142 	} else {
18886349Sqs148142 		rx_msg_p->cur_usage_cnt++;
18896349Sqs148142 		if (rx_msg_p->cur_usage_cnt == rx_msg_p->max_usage_cnt) {
18906349Sqs148142 			buffer_free = B_TRUE;
18916349Sqs148142 		}
18926349Sqs148142 	}
18936349Sqs148142 
18946349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
18956349Sqs148142 	    "msgbuf index = %d l2len %d bytes usage %d max_usage %d ",
18966349Sqs148142 	    msg_index, l2_len,
18976349Sqs148142 	    rx_msg_p->cur_usage_cnt, rx_msg_p->max_usage_cnt));
18986349Sqs148142 
18996349Sqs148142 	if (error_type) {
19006349Sqs148142 		rdc_stats->ierrors++;
19016349Sqs148142 		/* Update error stats */
19026349Sqs148142 		rdc_stats->errlog.compl_err_type = error_type;
19036349Sqs148142 		HXGE_FM_REPORT_ERROR(hxgep, NULL, HXGE_FM_EREPORT_RDMC_RCR_ERR);
19046349Sqs148142 
19056349Sqs148142 		if (error_type & RCR_CTRL_FIFO_DED) {
19066349Sqs148142 			rdc_stats->ctrl_fifo_ecc_err++;
19076349Sqs148142 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
19086349Sqs148142 			    " hxge_receive_packet: "
19096349Sqs148142 			    " channel %d RCR ctrl_fifo_ded error", channel));
19106349Sqs148142 		} else if (error_type & RCR_DATA_FIFO_DED) {
19116349Sqs148142 			rdc_stats->data_fifo_ecc_err++;
19126349Sqs148142 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
19136349Sqs148142 			    " hxge_receive_packet: channel %d"
19146349Sqs148142 			    " RCR data_fifo_ded error", channel));
19156349Sqs148142 		}
19166349Sqs148142 
19176349Sqs148142 		/*
19186349Sqs148142 		 * Update and repost buffer block if max usage count is
19196349Sqs148142 		 * reached.
19206349Sqs148142 		 */
19216349Sqs148142 		if (error_send_up == B_FALSE) {
19226349Sqs148142 			atomic_inc_32(&rx_msg_p->ref_cnt);
19236349Sqs148142 			if (buffer_free == B_TRUE) {
19246349Sqs148142 				rx_msg_p->free = B_TRUE;
19256349Sqs148142 			}
19266349Sqs148142 
19276349Sqs148142 			MUTEX_EXIT(&rx_rbr_p->lock);
19286349Sqs148142 			hxge_freeb(rx_msg_p);
19296349Sqs148142 			return;
19306349Sqs148142 		}
19316349Sqs148142 	}
19326349Sqs148142 
19336349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
19346349Sqs148142 	    "==> hxge_receive_packet: DMA sync second "));
19356349Sqs148142 
19366349Sqs148142 	bytes_read = rcr_p->rcvd_pkt_bytes;
19376349Sqs148142 	skip_len = sw_offset_bytes + hdr_size;
19388177SQiyan.Sun@Sun.COM 
19398177SQiyan.Sun@Sun.COM 	if (first_entry) {
19408366SQiyan.Sun@Sun.COM 		header0 = rx_msg_p->buffer[buf_offset];
19418366SQiyan.Sun@Sun.COM 		no_port_bit = header0 & NO_PORT_BIT;
19428366SQiyan.Sun@Sun.COM 		header1 = rx_msg_p->buffer[buf_offset + 1];
19438366SQiyan.Sun@Sun.COM 		l4_cs_eq_bit = header1 & L4_CS_EQ_BIT;
19448177SQiyan.Sun@Sun.COM 	}
19458177SQiyan.Sun@Sun.COM 
19466349Sqs148142 	if (!rx_msg_p->rx_use_bcopy) {
19476349Sqs148142 		/*
19486349Sqs148142 		 * For loaned up buffers, the driver reference count
19496349Sqs148142 		 * will be incremented first and then the free state.
19506349Sqs148142 		 */
19516349Sqs148142 		if ((nmp = hxge_dupb(rx_msg_p, buf_offset, bsize)) != NULL) {
19526349Sqs148142 			if (first_entry) {
19536349Sqs148142 				nmp->b_rptr = &nmp->b_rptr[skip_len];
19546349Sqs148142 				if (l2_len < bsize - skip_len) {
19556349Sqs148142 					nmp->b_wptr = &nmp->b_rptr[l2_len];
19566349Sqs148142 				} else {
19576349Sqs148142 					nmp->b_wptr = &nmp->b_rptr[bsize
19586349Sqs148142 					    - skip_len];
19596349Sqs148142 				}
19606349Sqs148142 			} else {
19616349Sqs148142 				if (l2_len - bytes_read < bsize) {
19626349Sqs148142 					nmp->b_wptr =
19636349Sqs148142 					    &nmp->b_rptr[l2_len - bytes_read];
19646349Sqs148142 				} else {
19656349Sqs148142 					nmp->b_wptr = &nmp->b_rptr[bsize];
19666349Sqs148142 				}
19676349Sqs148142 			}
19686349Sqs148142 		}
19696349Sqs148142 	} else {
19706349Sqs148142 		if (first_entry) {
19716349Sqs148142 			nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset + skip_len,
19726349Sqs148142 			    l2_len < bsize - skip_len ?
19736349Sqs148142 			    l2_len : bsize - skip_len);
19746349Sqs148142 		} else {
19756349Sqs148142 			nmp = hxge_dupb_bcopy(rx_msg_p, buf_offset,
19766349Sqs148142 			    l2_len - bytes_read < bsize ?
19776349Sqs148142 			    l2_len - bytes_read : bsize);
19786349Sqs148142 		}
19796349Sqs148142 	}
19806349Sqs148142 
19816349Sqs148142 	if (nmp != NULL) {
19826349Sqs148142 		if (first_entry)
19836349Sqs148142 			bytes_read  = nmp->b_wptr - nmp->b_rptr;
19846349Sqs148142 		else
19856349Sqs148142 			bytes_read += nmp->b_wptr - nmp->b_rptr;
19866349Sqs148142 
19876349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
19886349Sqs148142 		    "==> hxge_receive_packet after dupb: "
19896349Sqs148142 		    "rbr consumed %d "
19906349Sqs148142 		    "pktbufsz_type %d "
19916349Sqs148142 		    "nmp $%p rptr $%p wptr $%p "
19926349Sqs148142 		    "buf_offset %d bzise %d l2_len %d skip_len %d",
19936349Sqs148142 		    rx_rbr_p->rbr_consumed,
19946349Sqs148142 		    pktbufsz_type,
19956349Sqs148142 		    nmp, nmp->b_rptr, nmp->b_wptr,
19966349Sqs148142 		    buf_offset, bsize, l2_len, skip_len));
19976349Sqs148142 	} else {
19986349Sqs148142 		cmn_err(CE_WARN, "!hxge_receive_packet: update stats (error)");
19996349Sqs148142 
20006349Sqs148142 		atomic_inc_32(&rx_msg_p->ref_cnt);
20016349Sqs148142 		if (buffer_free == B_TRUE) {
20026349Sqs148142 			rx_msg_p->free = B_TRUE;
20036349Sqs148142 		}
20046349Sqs148142 
20056349Sqs148142 		MUTEX_EXIT(&rx_rbr_p->lock);
20066349Sqs148142 		hxge_freeb(rx_msg_p);
20076349Sqs148142 		return;
20086349Sqs148142 	}
20096349Sqs148142 
20106349Sqs148142 	if (buffer_free == B_TRUE) {
20116349Sqs148142 		rx_msg_p->free = B_TRUE;
20126349Sqs148142 	}
20136349Sqs148142 
20146349Sqs148142 	/*
20156349Sqs148142 	 * ERROR, FRAG and PKT_TYPE are only reported in the first entry. If a
20166349Sqs148142 	 * packet is not fragmented and no error bit is set, then L4 checksum
20176349Sqs148142 	 * is OK.
20186349Sqs148142 	 */
20196349Sqs148142 	is_valid = (nmp != NULL);
20206349Sqs148142 	if (first_entry) {
20216349Sqs148142 		rdc_stats->ipackets++; /* count only 1st seg for jumbo */
20227584SQiyan.Sun@Sun.COM 		if (l2_len > (STD_FRAME_SIZE - ETHERFCSL))
20237584SQiyan.Sun@Sun.COM 			rdc_stats->jumbo_pkts++;
20246349Sqs148142 		rdc_stats->ibytes += skip_len + l2_len < bsize ?
20256349Sqs148142 		    l2_len : bsize;
20266349Sqs148142 	} else {
20277584SQiyan.Sun@Sun.COM 		/*
20287584SQiyan.Sun@Sun.COM 		 * Add the current portion of the packet to the kstats.
20297584SQiyan.Sun@Sun.COM 		 * The current portion of the packet is calculated by using
20307584SQiyan.Sun@Sun.COM 		 * length of the packet and the previously received portion.
20317584SQiyan.Sun@Sun.COM 		 */
20327584SQiyan.Sun@Sun.COM 		rdc_stats->ibytes += l2_len - rcr_p->rcvd_pkt_bytes < bsize ?
20337584SQiyan.Sun@Sun.COM 		    l2_len - rcr_p->rcvd_pkt_bytes : bsize;
20346349Sqs148142 	}
20356349Sqs148142 
20366349Sqs148142 	rcr_p->rcvd_pkt_bytes = bytes_read;
20376349Sqs148142 
20386349Sqs148142 	if (rx_msg_p->free && rx_msg_p->rx_use_bcopy) {
20396349Sqs148142 		atomic_inc_32(&rx_msg_p->ref_cnt);
20407465SMichael.Speer@Sun.COM 		MUTEX_EXIT(&rx_rbr_p->lock);
20416349Sqs148142 		hxge_freeb(rx_msg_p);
20428718SMichael.Speer@Sun.COM 	} else
20437465SMichael.Speer@Sun.COM 		MUTEX_EXIT(&rx_rbr_p->lock);
20446349Sqs148142 
20456349Sqs148142 	if (is_valid) {
20466349Sqs148142 		nmp->b_cont = NULL;
20476349Sqs148142 		if (first_entry) {
20486349Sqs148142 			*mp = nmp;
20496349Sqs148142 			*mp_cont = NULL;
20506349Sqs148142 		} else {
20516349Sqs148142 			*mp_cont = nmp;
20526349Sqs148142 		}
20536349Sqs148142 	}
20546349Sqs148142 
20556349Sqs148142 	/*
20566349Sqs148142 	 * Update stats and hardware checksuming.
20576349Sqs148142 	 */
20586349Sqs148142 	if (is_valid && !multi) {
20598366SQiyan.Sun@Sun.COM 		is_tcp_udp = ((pkt_type == RCR_PKT_IS_TCP ||
20608366SQiyan.Sun@Sun.COM 		    pkt_type == RCR_PKT_IS_UDP) ? B_TRUE : B_FALSE);
20618366SQiyan.Sun@Sun.COM 
20628366SQiyan.Sun@Sun.COM 		if (!no_port_bit && l4_cs_eq_bit && is_tcp_udp && !error_type) {
2063*11878SVenu.Iyer@Sun.COM 			mac_hcksum_set(nmp, 0, 0, 0, 0, HCK_FULLCKSUM_OK);
20646349Sqs148142 
20656349Sqs148142 			HXGE_DEBUG_MSG((hxgep, RX_CTL,
20666349Sqs148142 			    "==> hxge_receive_packet: Full tcp/udp cksum "
20678177SQiyan.Sun@Sun.COM 			    "is_valid 0x%x multi %d error %d",
20688177SQiyan.Sun@Sun.COM 			    is_valid, multi, error_type));
20696349Sqs148142 		}
20706349Sqs148142 	}
20716349Sqs148142 
20726349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX2_CTL,
20736349Sqs148142 	    "==> hxge_receive_packet: *mp 0x%016llx", *mp));
20746349Sqs148142 
20756349Sqs148142 	*multi_p = (multi == RCR_MULTI_MASK);
20766349Sqs148142 
20776349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_receive_packet: "
20786349Sqs148142 	    "multi %d nmp 0x%016llx *mp 0x%016llx *mp_cont 0x%016llx",
20796349Sqs148142 	    *multi_p, nmp, *mp, *mp_cont));
20806349Sqs148142 }
20816349Sqs148142 
20828141SMichael.Speer@Sun.COM static void
hxge_rx_rbr_empty_recover(p_hxge_t hxgep,uint8_t channel)20838141SMichael.Speer@Sun.COM hxge_rx_rbr_empty_recover(p_hxge_t hxgep, uint8_t channel)
20848141SMichael.Speer@Sun.COM {
20858141SMichael.Speer@Sun.COM 	hpi_handle_t	handle;
20868141SMichael.Speer@Sun.COM 	p_rx_rcr_ring_t	rcrp;
20878141SMichael.Speer@Sun.COM 	p_rx_rbr_ring_t	rbrp;
20888141SMichael.Speer@Sun.COM 
20898141SMichael.Speer@Sun.COM 	rcrp = hxgep->rx_rcr_rings->rcr_rings[channel];
20908141SMichael.Speer@Sun.COM 	rbrp = rcrp->rx_rbr_p;
20918141SMichael.Speer@Sun.COM 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
20928141SMichael.Speer@Sun.COM 
20938141SMichael.Speer@Sun.COM 	/*
20948141SMichael.Speer@Sun.COM 	 * Wait for the channel to be quiet
20958141SMichael.Speer@Sun.COM 	 */
20968141SMichael.Speer@Sun.COM 	(void) hpi_rxdma_cfg_rdc_wait_for_qst(handle, channel);
20978141SMichael.Speer@Sun.COM 
20988141SMichael.Speer@Sun.COM 	/*
20998141SMichael.Speer@Sun.COM 	 * Post page will accumulate some buffers before re-enabling
21008141SMichael.Speer@Sun.COM 	 * the DMA channel.
21018141SMichael.Speer@Sun.COM 	 */
21028366SQiyan.Sun@Sun.COM 
21038141SMichael.Speer@Sun.COM 	MUTEX_ENTER(&rbrp->post_lock);
21048366SQiyan.Sun@Sun.COM 	if ((rbrp->rbb_max - rbrp->rbr_used) >= HXGE_RBR_EMPTY_THRESHOLD) {
21058236SQiyan.Sun@Sun.COM 		hxge_rbr_empty_restore(hxgep, rbrp);
21068236SQiyan.Sun@Sun.COM 	} else {
21078236SQiyan.Sun@Sun.COM 		rbrp->rbr_is_empty = B_TRUE;
21088236SQiyan.Sun@Sun.COM 	}
21098141SMichael.Speer@Sun.COM 	MUTEX_EXIT(&rbrp->post_lock);
21108141SMichael.Speer@Sun.COM }
21118141SMichael.Speer@Sun.COM 
21128718SMichael.Speer@Sun.COM 
21136349Sqs148142 /*ARGSUSED*/
21146349Sqs148142 static hxge_status_t
hxge_rx_err_evnts(p_hxge_t hxgep,uint_t index,p_hxge_ldv_t ldvp,rdc_stat_t cs)21156349Sqs148142 hxge_rx_err_evnts(p_hxge_t hxgep, uint_t index, p_hxge_ldv_t ldvp,
21166349Sqs148142     rdc_stat_t cs)
21176349Sqs148142 {
21186349Sqs148142 	p_hxge_rx_ring_stats_t	rdc_stats;
21196349Sqs148142 	hpi_handle_t		handle;
21206349Sqs148142 	boolean_t		rxchan_fatal = B_FALSE;
21216349Sqs148142 	uint8_t			channel;
21226349Sqs148142 	hxge_status_t		status = HXGE_OK;
21236349Sqs148142 
21246349Sqs148142 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_err_evnts"));
21256349Sqs148142 
21266349Sqs148142 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
21276349Sqs148142 	channel = ldvp->channel;
21286349Sqs148142 
21296349Sqs148142 	rdc_stats = &hxgep->statsp->rdc_stats[ldvp->vdma_index];
21306349Sqs148142 
21316349Sqs148142 	if (cs.bits.rbr_cpl_to) {
21326349Sqs148142 		rdc_stats->rbr_tmout++;
21336349Sqs148142 		HXGE_FM_REPORT_ERROR(hxgep, channel,
21346349Sqs148142 		    HXGE_FM_EREPORT_RDMC_RBR_CPL_TO);
21356349Sqs148142 		rxchan_fatal = B_TRUE;
21366349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
21376349Sqs148142 		    "==> hxge_rx_err_evnts(channel %d): "
21386349Sqs148142 		    "fatal error: rx_rbr_timeout", channel));
21396349Sqs148142 	}
21406349Sqs148142 
21416349Sqs148142 	if ((cs.bits.rcr_shadow_par_err) || (cs.bits.rbr_prefetch_par_err)) {
21426349Sqs148142 		(void) hpi_rxdma_ring_perr_stat_get(handle,
21436349Sqs148142 		    &rdc_stats->errlog.pre_par, &rdc_stats->errlog.sha_par);
21446349Sqs148142 	}
21456349Sqs148142 
21466349Sqs148142 	if (cs.bits.rcr_shadow_par_err) {
21476349Sqs148142 		rdc_stats->rcr_sha_par++;
21486349Sqs148142 		HXGE_FM_REPORT_ERROR(hxgep, channel,
21496349Sqs148142 		    HXGE_FM_EREPORT_RDMC_RCR_SHA_PAR);
21506349Sqs148142 		rxchan_fatal = B_TRUE;
21516349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
21526349Sqs148142 		    "==> hxge_rx_err_evnts(channel %d): "
21536349Sqs148142 		    "fatal error: rcr_shadow_par_err", channel));
21546349Sqs148142 	}
21556349Sqs148142 
21566349Sqs148142 	if (cs.bits.rbr_prefetch_par_err) {
21576349Sqs148142 		rdc_stats->rbr_pre_par++;
21586349Sqs148142 		HXGE_FM_REPORT_ERROR(hxgep, channel,
21596349Sqs148142 		    HXGE_FM_EREPORT_RDMC_RBR_PRE_PAR);
21606349Sqs148142 		rxchan_fatal = B_TRUE;
21616349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
21626349Sqs148142 		    "==> hxge_rx_err_evnts(channel %d): "
21636349Sqs148142 		    "fatal error: rbr_prefetch_par_err", channel));
21646349Sqs148142 	}
21656349Sqs148142 
21666349Sqs148142 	if (cs.bits.rbr_pre_empty) {
21676349Sqs148142 		rdc_stats->rbr_pre_empty++;
21686349Sqs148142 		HXGE_FM_REPORT_ERROR(hxgep, channel,
21696349Sqs148142 		    HXGE_FM_EREPORT_RDMC_RBR_PRE_EMPTY);
21706349Sqs148142 		rxchan_fatal = B_TRUE;
21716349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
21726349Sqs148142 		    "==> hxge_rx_err_evnts(channel %d): "
21736349Sqs148142 		    "fatal error: rbr_pre_empty", channel));
21746349Sqs148142 	}
21756349Sqs148142 
21766349Sqs148142 	if (cs.bits.peu_resp_err) {
21776349Sqs148142 		rdc_stats->peu_resp_err++;
21786349Sqs148142 		HXGE_FM_REPORT_ERROR(hxgep, channel,
21796349Sqs148142 		    HXGE_FM_EREPORT_RDMC_PEU_RESP_ERR);
21806349Sqs148142 		rxchan_fatal = B_TRUE;
21816349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
21826349Sqs148142 		    "==> hxge_rx_err_evnts(channel %d): "
21836349Sqs148142 		    "fatal error: peu_resp_err", channel));
21846349Sqs148142 	}
21856349Sqs148142 
21866349Sqs148142 	if (cs.bits.rcr_thres) {
21876349Sqs148142 		rdc_stats->rcr_thres++;
21886349Sqs148142 	}
21896349Sqs148142 
21906349Sqs148142 	if (cs.bits.rcr_to) {
21916349Sqs148142 		rdc_stats->rcr_to++;
21926349Sqs148142 	}
21936349Sqs148142 
21946349Sqs148142 	if (cs.bits.rcr_shadow_full) {
21956349Sqs148142 		rdc_stats->rcr_shadow_full++;
21966349Sqs148142 		HXGE_FM_REPORT_ERROR(hxgep, channel,
21976349Sqs148142 		    HXGE_FM_EREPORT_RDMC_RCR_SHA_FULL);
21986349Sqs148142 		rxchan_fatal = B_TRUE;
21996349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
22006349Sqs148142 		    "==> hxge_rx_err_evnts(channel %d): "
22016349Sqs148142 		    "fatal error: rcr_shadow_full", channel));
22026349Sqs148142 	}
22036349Sqs148142 
22046349Sqs148142 	if (cs.bits.rcr_full) {
22056349Sqs148142 		rdc_stats->rcrfull++;
22066349Sqs148142 		HXGE_FM_REPORT_ERROR(hxgep, channel,
22076349Sqs148142 		    HXGE_FM_EREPORT_RDMC_RCRFULL);
22086349Sqs148142 		rxchan_fatal = B_TRUE;
22096349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
22106349Sqs148142 		    "==> hxge_rx_err_evnts(channel %d): "
22116349Sqs148142 		    "fatal error: rcrfull error", channel));
22126349Sqs148142 	}
22136349Sqs148142 
22146349Sqs148142 	if (cs.bits.rbr_empty) {
22156349Sqs148142 		rdc_stats->rbr_empty++;
22168141SMichael.Speer@Sun.COM 		hxge_rx_rbr_empty_recover(hxgep, channel);
22176349Sqs148142 	}
22186349Sqs148142 
22196349Sqs148142 	if (cs.bits.rbr_full) {
22206349Sqs148142 		rdc_stats->rbrfull++;
22216349Sqs148142 		HXGE_FM_REPORT_ERROR(hxgep, channel,
22226349Sqs148142 		    HXGE_FM_EREPORT_RDMC_RBRFULL);
22236349Sqs148142 		rxchan_fatal = B_TRUE;
22246349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
22256349Sqs148142 		    "==> hxge_rx_err_evnts(channel %d): "
22266349Sqs148142 		    "fatal error: rbr_full error", channel));
22276349Sqs148142 	}
22286349Sqs148142 
22296349Sqs148142 	if (rxchan_fatal) {
22308236SQiyan.Sun@Sun.COM 		p_rx_rcr_ring_t	rcrp;
22318236SQiyan.Sun@Sun.COM 		p_rx_rbr_ring_t rbrp;
22328236SQiyan.Sun@Sun.COM 
22338236SQiyan.Sun@Sun.COM 		rcrp = hxgep->rx_rcr_rings->rcr_rings[channel];
22348236SQiyan.Sun@Sun.COM 		rbrp = rcrp->rx_rbr_p;
22358236SQiyan.Sun@Sun.COM 
22366349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
22376349Sqs148142 		    " hxge_rx_err_evnts: fatal error on Channel #%d\n",
22386349Sqs148142 		    channel));
223910091SMichael.Speer@Sun.COM 
22408236SQiyan.Sun@Sun.COM 		MUTEX_ENTER(&rbrp->post_lock);
22418236SQiyan.Sun@Sun.COM 		/* This function needs to be inside the post_lock */
22426349Sqs148142 		status = hxge_rxdma_fatal_err_recover(hxgep, channel);
22438236SQiyan.Sun@Sun.COM 		MUTEX_EXIT(&rbrp->post_lock);
22446349Sqs148142 		if (status == HXGE_OK) {
22456349Sqs148142 			FM_SERVICE_RESTORED(hxgep);
22466349Sqs148142 		}
22476349Sqs148142 	}
224810091SMichael.Speer@Sun.COM 
22497465SMichael.Speer@Sun.COM 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_err_evnts"));
22506349Sqs148142 	return (status);
22516349Sqs148142 }
22526349Sqs148142 
22536349Sqs148142 static hxge_status_t
hxge_map_rxdma(p_hxge_t hxgep)22546349Sqs148142 hxge_map_rxdma(p_hxge_t hxgep)
22556349Sqs148142 {
22566349Sqs148142 	int			i, ndmas;
22576349Sqs148142 	uint16_t		channel;
22586349Sqs148142 	p_rx_rbr_rings_t	rx_rbr_rings;
22596349Sqs148142 	p_rx_rbr_ring_t		*rbr_rings;
22606349Sqs148142 	p_rx_rcr_rings_t	rx_rcr_rings;
22616349Sqs148142 	p_rx_rcr_ring_t		*rcr_rings;
22626349Sqs148142 	p_rx_mbox_areas_t	rx_mbox_areas_p;
22636349Sqs148142 	p_rx_mbox_t		*rx_mbox_p;
22646349Sqs148142 	p_hxge_dma_pool_t	dma_buf_poolp;
22656349Sqs148142 	p_hxge_dma_common_t	*dma_buf_p;
22667618SMichael.Speer@Sun.COM 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
22677618SMichael.Speer@Sun.COM 	p_hxge_dma_common_t	*dma_rbr_cntl_p;
22687618SMichael.Speer@Sun.COM 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
22697618SMichael.Speer@Sun.COM 	p_hxge_dma_common_t	*dma_rcr_cntl_p;
22707618SMichael.Speer@Sun.COM 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
22717618SMichael.Speer@Sun.COM 	p_hxge_dma_common_t	*dma_mbox_cntl_p;
22726349Sqs148142 	uint32_t		*num_chunks;
22736349Sqs148142 	hxge_status_t		status = HXGE_OK;
22746349Sqs148142 
22756349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_map_rxdma"));
22766349Sqs148142 
22776349Sqs148142 	dma_buf_poolp = hxgep->rx_buf_pool_p;
22787618SMichael.Speer@Sun.COM 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
22797618SMichael.Speer@Sun.COM 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
22807618SMichael.Speer@Sun.COM 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
22817618SMichael.Speer@Sun.COM 
22827618SMichael.Speer@Sun.COM 	if (!dma_buf_poolp->buf_allocated ||
22837618SMichael.Speer@Sun.COM 	    !dma_rbr_cntl_poolp->buf_allocated ||
22847618SMichael.Speer@Sun.COM 	    !dma_rcr_cntl_poolp->buf_allocated ||
22857618SMichael.Speer@Sun.COM 	    !dma_mbox_cntl_poolp->buf_allocated) {
22866349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
22876349Sqs148142 		    "<== hxge_map_rxdma: buf not allocated"));
22886349Sqs148142 		return (HXGE_ERROR);
22896349Sqs148142 	}
22906349Sqs148142 
22916349Sqs148142 	ndmas = dma_buf_poolp->ndmas;
22926349Sqs148142 	if (!ndmas) {
22936349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
22946349Sqs148142 		    "<== hxge_map_rxdma: no dma allocated"));
22956349Sqs148142 		return (HXGE_ERROR);
22966349Sqs148142 	}
22976349Sqs148142 
22986349Sqs148142 	num_chunks = dma_buf_poolp->num_chunks;
22996349Sqs148142 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
23007618SMichael.Speer@Sun.COM 	dma_rbr_cntl_p = dma_rbr_cntl_poolp->dma_buf_pool_p;
23017618SMichael.Speer@Sun.COM 	dma_rcr_cntl_p = dma_rcr_cntl_poolp->dma_buf_pool_p;
23027618SMichael.Speer@Sun.COM 	dma_mbox_cntl_p = dma_mbox_cntl_poolp->dma_buf_pool_p;
23037618SMichael.Speer@Sun.COM 
23046349Sqs148142 	rx_rbr_rings = (p_rx_rbr_rings_t)
23056349Sqs148142 	    KMEM_ZALLOC(sizeof (rx_rbr_rings_t), KM_SLEEP);
23066349Sqs148142 	rbr_rings = (p_rx_rbr_ring_t *)KMEM_ZALLOC(
23076349Sqs148142 	    sizeof (p_rx_rbr_ring_t) * ndmas, KM_SLEEP);
23086349Sqs148142 
23096349Sqs148142 	rx_rcr_rings = (p_rx_rcr_rings_t)
23106349Sqs148142 	    KMEM_ZALLOC(sizeof (rx_rcr_rings_t), KM_SLEEP);
23116349Sqs148142 	rcr_rings = (p_rx_rcr_ring_t *)KMEM_ZALLOC(
23126349Sqs148142 	    sizeof (p_rx_rcr_ring_t) * ndmas, KM_SLEEP);
23136349Sqs148142 
23146349Sqs148142 	rx_mbox_areas_p = (p_rx_mbox_areas_t)
23156349Sqs148142 	    KMEM_ZALLOC(sizeof (rx_mbox_areas_t), KM_SLEEP);
23166349Sqs148142 	rx_mbox_p = (p_rx_mbox_t *)KMEM_ZALLOC(
23176349Sqs148142 	    sizeof (p_rx_mbox_t) * ndmas, KM_SLEEP);
23186349Sqs148142 
23196349Sqs148142 	/*
23206349Sqs148142 	 * Timeout should be set based on the system clock divider.
23216349Sqs148142 	 * The following timeout value of 1 assumes that the
23226349Sqs148142 	 * granularity (1000) is 3 microseconds running at 300MHz.
23236349Sqs148142 	 */
23246349Sqs148142 
23256349Sqs148142 	hxgep->intr_threshold = RXDMA_RCR_PTHRES_DEFAULT;
23266349Sqs148142 	hxgep->intr_timeout = RXDMA_RCR_TO_DEFAULT;
23276349Sqs148142 
23286349Sqs148142 	/*
23296349Sqs148142 	 * Map descriptors from the buffer polls for each dam channel.
23306349Sqs148142 	 */
23316349Sqs148142 	for (i = 0; i < ndmas; i++) {
233211257SMichael.Speer@Sun.COM 		if (((p_hxge_dma_common_t)dma_buf_p[i]) == NULL) {
233311257SMichael.Speer@Sun.COM 			status = HXGE_ERROR;
233411257SMichael.Speer@Sun.COM 			goto hxge_map_rxdma_fail1;
233511257SMichael.Speer@Sun.COM 		}
233611257SMichael.Speer@Sun.COM 
23376349Sqs148142 		/*
23386349Sqs148142 		 * Set up and prepare buffer blocks, descriptors and mailbox.
23396349Sqs148142 		 */
23406349Sqs148142 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
23416349Sqs148142 		status = hxge_map_rxdma_channel(hxgep, channel,
23426349Sqs148142 		    (p_hxge_dma_common_t *)&dma_buf_p[i],
23436349Sqs148142 		    (p_rx_rbr_ring_t *)&rbr_rings[i],
23447618SMichael.Speer@Sun.COM 		    num_chunks[i],
23457618SMichael.Speer@Sun.COM 		    (p_hxge_dma_common_t *)&dma_rbr_cntl_p[i],
23467618SMichael.Speer@Sun.COM 		    (p_hxge_dma_common_t *)&dma_rcr_cntl_p[i],
23477618SMichael.Speer@Sun.COM 		    (p_hxge_dma_common_t *)&dma_mbox_cntl_p[i],
23486349Sqs148142 		    (p_rx_rcr_ring_t *)&rcr_rings[i],
23496349Sqs148142 		    (p_rx_mbox_t *)&rx_mbox_p[i]);
23506349Sqs148142 		if (status != HXGE_OK) {
23516349Sqs148142 			goto hxge_map_rxdma_fail1;
23526349Sqs148142 		}
23536349Sqs148142 		rbr_rings[i]->index = (uint16_t)i;
23546349Sqs148142 		rcr_rings[i]->index = (uint16_t)i;
23556349Sqs148142 		rcr_rings[i]->rdc_stats = &hxgep->statsp->rdc_stats[i];
23566349Sqs148142 	}
23576349Sqs148142 
23586349Sqs148142 	rx_rbr_rings->ndmas = rx_rcr_rings->ndmas = ndmas;
23596349Sqs148142 	rx_rbr_rings->rbr_rings = rbr_rings;
23606349Sqs148142 	hxgep->rx_rbr_rings = rx_rbr_rings;
23616349Sqs148142 	rx_rcr_rings->rcr_rings = rcr_rings;
23626349Sqs148142 	hxgep->rx_rcr_rings = rx_rcr_rings;
23636349Sqs148142 
23646349Sqs148142 	rx_mbox_areas_p->rxmbox_areas = rx_mbox_p;
23656349Sqs148142 	hxgep->rx_mbox_areas_p = rx_mbox_areas_p;
23666349Sqs148142 
23676349Sqs148142 	goto hxge_map_rxdma_exit;
23686349Sqs148142 
23696349Sqs148142 hxge_map_rxdma_fail1:
23706349Sqs148142 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
23716349Sqs148142 	    "==> hxge_map_rxdma: unmap rbr,rcr (status 0x%x channel %d i %d)",
23726349Sqs148142 	    status, channel, i));
23736349Sqs148142 	i--;
23746349Sqs148142 	for (; i >= 0; i--) {
23756349Sqs148142 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
23766349Sqs148142 		hxge_unmap_rxdma_channel(hxgep, channel,
23776349Sqs148142 		    rbr_rings[i], rcr_rings[i], rx_mbox_p[i]);
23786349Sqs148142 	}
23796349Sqs148142 
23806349Sqs148142 	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
23816349Sqs148142 	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
23826349Sqs148142 	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
23836349Sqs148142 	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
23846349Sqs148142 	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
23856349Sqs148142 	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
23866349Sqs148142 
23876349Sqs148142 hxge_map_rxdma_exit:
23886349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
23896349Sqs148142 	    "<== hxge_map_rxdma: (status 0x%x channel %d)", status, channel));
23906349Sqs148142 
23916349Sqs148142 	return (status);
23926349Sqs148142 }
23936349Sqs148142 
23946349Sqs148142 static void
hxge_unmap_rxdma(p_hxge_t hxgep)23956349Sqs148142 hxge_unmap_rxdma(p_hxge_t hxgep)
23966349Sqs148142 {
23976349Sqs148142 	int			i, ndmas;
23986349Sqs148142 	uint16_t		channel;
23996349Sqs148142 	p_rx_rbr_rings_t	rx_rbr_rings;
24006349Sqs148142 	p_rx_rbr_ring_t		*rbr_rings;
24016349Sqs148142 	p_rx_rcr_rings_t	rx_rcr_rings;
24026349Sqs148142 	p_rx_rcr_ring_t		*rcr_rings;
24036349Sqs148142 	p_rx_mbox_areas_t	rx_mbox_areas_p;
24046349Sqs148142 	p_rx_mbox_t		*rx_mbox_p;
24056349Sqs148142 	p_hxge_dma_pool_t	dma_buf_poolp;
24067618SMichael.Speer@Sun.COM 	p_hxge_dma_pool_t	dma_rbr_cntl_poolp;
24077618SMichael.Speer@Sun.COM 	p_hxge_dma_pool_t	dma_rcr_cntl_poolp;
24087618SMichael.Speer@Sun.COM 	p_hxge_dma_pool_t	dma_mbox_cntl_poolp;
24096349Sqs148142 	p_hxge_dma_common_t	*dma_buf_p;
24106349Sqs148142 
24116349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_unmap_rxdma"));
24126349Sqs148142 
24136349Sqs148142 	dma_buf_poolp = hxgep->rx_buf_pool_p;
24147618SMichael.Speer@Sun.COM 	dma_rbr_cntl_poolp = hxgep->rx_rbr_cntl_pool_p;
24157618SMichael.Speer@Sun.COM 	dma_rcr_cntl_poolp = hxgep->rx_rcr_cntl_pool_p;
24167618SMichael.Speer@Sun.COM 	dma_mbox_cntl_poolp = hxgep->rx_mbox_cntl_pool_p;
24177618SMichael.Speer@Sun.COM 
24187618SMichael.Speer@Sun.COM 	if (!dma_buf_poolp->buf_allocated ||
24197618SMichael.Speer@Sun.COM 	    !dma_rbr_cntl_poolp->buf_allocated ||
24207618SMichael.Speer@Sun.COM 	    !dma_rcr_cntl_poolp->buf_allocated ||
24217618SMichael.Speer@Sun.COM 	    !dma_mbox_cntl_poolp->buf_allocated) {
24226349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
24236349Sqs148142 		    "<== hxge_unmap_rxdma: NULL buf pointers"));
24246349Sqs148142 		return;
24256349Sqs148142 	}
24266349Sqs148142 
24276349Sqs148142 	rx_rbr_rings = hxgep->rx_rbr_rings;
24286349Sqs148142 	rx_rcr_rings = hxgep->rx_rcr_rings;
24296349Sqs148142 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
24306349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
24317618SMichael.Speer@Sun.COM 		    "<== hxge_unmap_rxdma: NULL pointers"));
24326349Sqs148142 		return;
24336349Sqs148142 	}
24346349Sqs148142 
24356349Sqs148142 	ndmas = rx_rbr_rings->ndmas;
24366349Sqs148142 	if (!ndmas) {
24376349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
24386349Sqs148142 		    "<== hxge_unmap_rxdma: no channel"));
24396349Sqs148142 		return;
24406349Sqs148142 	}
24416349Sqs148142 
24426349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
24436349Sqs148142 	    "==> hxge_unmap_rxdma (ndmas %d)", ndmas));
24446349Sqs148142 
24456349Sqs148142 	rbr_rings = rx_rbr_rings->rbr_rings;
24466349Sqs148142 	rcr_rings = rx_rcr_rings->rcr_rings;
24476349Sqs148142 	rx_mbox_areas_p = hxgep->rx_mbox_areas_p;
24486349Sqs148142 	rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
24496349Sqs148142 	dma_buf_p = dma_buf_poolp->dma_buf_pool_p;
24506349Sqs148142 
24516349Sqs148142 	for (i = 0; i < ndmas; i++) {
24526349Sqs148142 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
24536349Sqs148142 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
24546349Sqs148142 		    "==> hxge_unmap_rxdma (ndmas %d) channel %d",
24556349Sqs148142 		    ndmas, channel));
24566349Sqs148142 		(void) hxge_unmap_rxdma_channel(hxgep, channel,
24576349Sqs148142 		    (p_rx_rbr_ring_t)rbr_rings[i],
24586349Sqs148142 		    (p_rx_rcr_ring_t)rcr_rings[i],
24596349Sqs148142 		    (p_rx_mbox_t)rx_mbox_p[i]);
24606349Sqs148142 	}
24616349Sqs148142 
24626349Sqs148142 	KMEM_FREE(rx_rbr_rings, sizeof (rx_rbr_rings_t));
24636349Sqs148142 	KMEM_FREE(rbr_rings, sizeof (p_rx_rbr_ring_t) * ndmas);
24646349Sqs148142 	KMEM_FREE(rx_rcr_rings, sizeof (rx_rcr_rings_t));
24656349Sqs148142 	KMEM_FREE(rcr_rings, sizeof (p_rx_rcr_ring_t) * ndmas);
24666349Sqs148142 	KMEM_FREE(rx_mbox_areas_p, sizeof (rx_mbox_areas_t));
24676349Sqs148142 	KMEM_FREE(rx_mbox_p, sizeof (p_rx_mbox_t) * ndmas);
24686349Sqs148142 
24696349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma"));
24706349Sqs148142 }
24716349Sqs148142 
24726349Sqs148142 hxge_status_t
hxge_map_rxdma_channel(p_hxge_t hxgep,uint16_t channel,p_hxge_dma_common_t * dma_buf_p,p_rx_rbr_ring_t * rbr_p,uint32_t num_chunks,p_hxge_dma_common_t * dma_rbr_cntl_p,p_hxge_dma_common_t * dma_rcr_cntl_p,p_hxge_dma_common_t * dma_mbox_cntl_p,p_rx_rcr_ring_t * rcr_p,p_rx_mbox_t * rx_mbox_p)24736349Sqs148142 hxge_map_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
24746349Sqs148142     p_hxge_dma_common_t *dma_buf_p, p_rx_rbr_ring_t *rbr_p,
24757618SMichael.Speer@Sun.COM     uint32_t num_chunks, p_hxge_dma_common_t *dma_rbr_cntl_p,
24767618SMichael.Speer@Sun.COM     p_hxge_dma_common_t *dma_rcr_cntl_p, p_hxge_dma_common_t *dma_mbox_cntl_p,
24776349Sqs148142     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
24786349Sqs148142 {
24796349Sqs148142 	int status = HXGE_OK;
24806349Sqs148142 
24816349Sqs148142 	/*
24826349Sqs148142 	 * Set up and prepare buffer blocks, descriptors and mailbox.
24836349Sqs148142 	 */
24846349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
24856349Sqs148142 	    "==> hxge_map_rxdma_channel (channel %d)", channel));
24866349Sqs148142 
24876349Sqs148142 	/*
24886349Sqs148142 	 * Receive buffer blocks
24896349Sqs148142 	 */
24906349Sqs148142 	status = hxge_map_rxdma_channel_buf_ring(hxgep, channel,
24916349Sqs148142 	    dma_buf_p, rbr_p, num_chunks);
24926349Sqs148142 	if (status != HXGE_OK) {
24936349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
24946349Sqs148142 		    "==> hxge_map_rxdma_channel (channel %d): "
24956349Sqs148142 		    "map buffer failed 0x%x", channel, status));
24966349Sqs148142 		goto hxge_map_rxdma_channel_exit;
24976349Sqs148142 	}
24986349Sqs148142 
24996349Sqs148142 	/*
25006349Sqs148142 	 * Receive block ring, completion ring and mailbox.
25016349Sqs148142 	 */
25026349Sqs148142 	status = hxge_map_rxdma_channel_cfg_ring(hxgep, channel,
25037618SMichael.Speer@Sun.COM 	    dma_rbr_cntl_p, dma_rcr_cntl_p, dma_mbox_cntl_p,
25047618SMichael.Speer@Sun.COM 	    rbr_p, rcr_p, rx_mbox_p);
25056349Sqs148142 	if (status != HXGE_OK) {
25066349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
25076349Sqs148142 		    "==> hxge_map_rxdma_channel (channel %d): "
25086349Sqs148142 		    "map config failed 0x%x", channel, status));
25096349Sqs148142 		goto hxge_map_rxdma_channel_fail2;
25106349Sqs148142 	}
25116349Sqs148142 	goto hxge_map_rxdma_channel_exit;
25126349Sqs148142 
25136349Sqs148142 hxge_map_rxdma_channel_fail3:
25146349Sqs148142 	/* Free rbr, rcr */
25156349Sqs148142 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
25166349Sqs148142 	    "==> hxge_map_rxdma_channel: free rbr/rcr (status 0x%x channel %d)",
25176349Sqs148142 	    status, channel));
25186349Sqs148142 	hxge_unmap_rxdma_channel_cfg_ring(hxgep, *rcr_p, *rx_mbox_p);
25196349Sqs148142 
25206349Sqs148142 hxge_map_rxdma_channel_fail2:
25216349Sqs148142 	/* Free buffer blocks */
25226349Sqs148142 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
25236349Sqs148142 	    "==> hxge_map_rxdma_channel: free rx buffers"
25246349Sqs148142 	    "(hxgep 0x%x status 0x%x channel %d)",
25256349Sqs148142 	    hxgep, status, channel));
25266349Sqs148142 	hxge_unmap_rxdma_channel_buf_ring(hxgep, *rbr_p);
25276349Sqs148142 
25286349Sqs148142 	status = HXGE_ERROR;
25296349Sqs148142 
25306349Sqs148142 hxge_map_rxdma_channel_exit:
25316349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
25326349Sqs148142 	    "<== hxge_map_rxdma_channel: (hxgep 0x%x status 0x%x channel %d)",
25336349Sqs148142 	    hxgep, status, channel));
25346349Sqs148142 
25356349Sqs148142 	return (status);
25366349Sqs148142 }
25376349Sqs148142 
25386349Sqs148142 /*ARGSUSED*/
25396349Sqs148142 static void
hxge_unmap_rxdma_channel(p_hxge_t hxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t rx_mbox_p)25406349Sqs148142 hxge_unmap_rxdma_channel(p_hxge_t hxgep, uint16_t channel,
25416349Sqs148142     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
25426349Sqs148142 {
25436349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
25446349Sqs148142 	    "==> hxge_unmap_rxdma_channel (channel %d)", channel));
25456349Sqs148142 
25466349Sqs148142 	/*
25476349Sqs148142 	 * unmap receive block ring, completion ring and mailbox.
25486349Sqs148142 	 */
25496349Sqs148142 	(void) hxge_unmap_rxdma_channel_cfg_ring(hxgep, rcr_p, rx_mbox_p);
25506349Sqs148142 
25516349Sqs148142 	/* unmap buffer blocks */
25526349Sqs148142 	(void) hxge_unmap_rxdma_channel_buf_ring(hxgep, rbr_p);
25536349Sqs148142 
25546349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_unmap_rxdma_channel"));
25556349Sqs148142 }
25566349Sqs148142 
25576349Sqs148142 /*ARGSUSED*/
25586349Sqs148142 static hxge_status_t
hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep,uint16_t dma_channel,p_hxge_dma_common_t * dma_rbr_cntl_p,p_hxge_dma_common_t * dma_rcr_cntl_p,p_hxge_dma_common_t * dma_mbox_cntl_p,p_rx_rbr_ring_t * rbr_p,p_rx_rcr_ring_t * rcr_p,p_rx_mbox_t * rx_mbox_p)25596349Sqs148142 hxge_map_rxdma_channel_cfg_ring(p_hxge_t hxgep, uint16_t dma_channel,
25607618SMichael.Speer@Sun.COM     p_hxge_dma_common_t *dma_rbr_cntl_p, p_hxge_dma_common_t *dma_rcr_cntl_p,
25617618SMichael.Speer@Sun.COM     p_hxge_dma_common_t *dma_mbox_cntl_p, p_rx_rbr_ring_t *rbr_p,
25626349Sqs148142     p_rx_rcr_ring_t *rcr_p, p_rx_mbox_t *rx_mbox_p)
25636349Sqs148142 {
25646349Sqs148142 	p_rx_rbr_ring_t 	rbrp;
25656349Sqs148142 	p_rx_rcr_ring_t 	rcrp;
25666349Sqs148142 	p_rx_mbox_t 		mboxp;
25676349Sqs148142 	p_hxge_dma_common_t 	cntl_dmap;
25686349Sqs148142 	p_hxge_dma_common_t 	dmap;
25696349Sqs148142 	p_rx_msg_t 		*rx_msg_ring;
25706349Sqs148142 	p_rx_msg_t 		rx_msg_p;
25716349Sqs148142 	rdc_rbr_cfg_a_t		*rcfga_p;
25726349Sqs148142 	rdc_rbr_cfg_b_t		*rcfgb_p;
25736349Sqs148142 	rdc_rcr_cfg_a_t		*cfga_p;
25746349Sqs148142 	rdc_rcr_cfg_b_t		*cfgb_p;
25756349Sqs148142 	rdc_rx_cfg1_t		*cfig1_p;
25766349Sqs148142 	rdc_rx_cfg2_t		*cfig2_p;
25776349Sqs148142 	rdc_rbr_kick_t		*kick_p;
25786349Sqs148142 	uint32_t		dmaaddrp;
25796349Sqs148142 	uint32_t		*rbr_vaddrp;
25806349Sqs148142 	uint32_t		bkaddr;
25816349Sqs148142 	hxge_status_t		status = HXGE_OK;
25826349Sqs148142 	int			i;
25836349Sqs148142 	uint32_t 		hxge_port_rcr_size;
25846349Sqs148142 
25856349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
25866349Sqs148142 	    "==> hxge_map_rxdma_channel_cfg_ring"));
25876349Sqs148142 
25887618SMichael.Speer@Sun.COM 	cntl_dmap = *dma_rbr_cntl_p;
25897618SMichael.Speer@Sun.COM 
25907618SMichael.Speer@Sun.COM 	/*
25917618SMichael.Speer@Sun.COM 	 * Map in the receive block ring
25927618SMichael.Speer@Sun.COM 	 */
25936349Sqs148142 	rbrp = *rbr_p;
25946349Sqs148142 	dmap = (p_hxge_dma_common_t)&rbrp->rbr_desc;
25956349Sqs148142 	hxge_setup_dma_common(dmap, cntl_dmap, rbrp->rbb_max, 4);
25966349Sqs148142 
25976349Sqs148142 	/*
25986349Sqs148142 	 * Zero out buffer block ring descriptors.
25996349Sqs148142 	 */
26006349Sqs148142 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
26016349Sqs148142 
26026349Sqs148142 	rcfga_p = &(rbrp->rbr_cfga);
26036349Sqs148142 	rcfgb_p = &(rbrp->rbr_cfgb);
26046349Sqs148142 	kick_p = &(rbrp->rbr_kick);
26056349Sqs148142 	rcfga_p->value = 0;
26066349Sqs148142 	rcfgb_p->value = 0;
26076349Sqs148142 	kick_p->value = 0;
26086349Sqs148142 	rbrp->rbr_addr = dmap->dma_cookie.dmac_laddress;
26096349Sqs148142 	rcfga_p->value = (rbrp->rbr_addr &
26106349Sqs148142 	    (RBR_CFIG_A_STDADDR_MASK | RBR_CFIG_A_STDADDR_BASE_MASK));
26116349Sqs148142 	rcfga_p->value |= ((uint64_t)rbrp->rbb_max << RBR_CFIG_A_LEN_SHIFT);
26126349Sqs148142 
26136349Sqs148142 	/* XXXX: how to choose packet buffer sizes */
26146349Sqs148142 	rcfgb_p->bits.bufsz0 = rbrp->pkt_buf_size0;
26156349Sqs148142 	rcfgb_p->bits.vld0 = 1;
26166349Sqs148142 	rcfgb_p->bits.bufsz1 = rbrp->pkt_buf_size1;
26176349Sqs148142 	rcfgb_p->bits.vld1 = 1;
26186349Sqs148142 	rcfgb_p->bits.bufsz2 = rbrp->pkt_buf_size2;
26196349Sqs148142 	rcfgb_p->bits.vld2 = 1;
26206349Sqs148142 	rcfgb_p->bits.bksize = hxgep->rx_bksize_code;
26216349Sqs148142 
26226349Sqs148142 	/*
26236349Sqs148142 	 * For each buffer block, enter receive block address to the ring.
26246349Sqs148142 	 */
26256349Sqs148142 	rbr_vaddrp = (uint32_t *)dmap->kaddrp;
26266349Sqs148142 	rbrp->rbr_desc_vp = (uint32_t *)dmap->kaddrp;
26276349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
26286349Sqs148142 	    "==> hxge_map_rxdma_channel_cfg_ring: channel %d "
26296349Sqs148142 	    "rbr_vaddrp $%p", dma_channel, rbr_vaddrp));
26306349Sqs148142 
26316349Sqs148142 	rx_msg_ring = rbrp->rx_msg_ring;
26326349Sqs148142 	for (i = 0; i < rbrp->tnblocks; i++) {
26336349Sqs148142 		rx_msg_p = rx_msg_ring[i];
26346349Sqs148142 		rx_msg_p->hxgep = hxgep;
26356349Sqs148142 		rx_msg_p->rx_rbr_p = rbrp;
26366349Sqs148142 		bkaddr = (uint32_t)
26376349Sqs148142 		    ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
26386349Sqs148142 		    RBR_BKADDR_SHIFT));
26396349Sqs148142 		rx_msg_p->free = B_FALSE;
26406349Sqs148142 		rx_msg_p->max_usage_cnt = 0xbaddcafe;
26416349Sqs148142 
26426349Sqs148142 		*rbr_vaddrp++ = bkaddr;
26436349Sqs148142 	}
26446349Sqs148142 
26456349Sqs148142 	kick_p->bits.bkadd = rbrp->rbb_max;
26466349Sqs148142 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
26476349Sqs148142 
26486349Sqs148142 	rbrp->rbr_rd_index = 0;
26496349Sqs148142 
26506349Sqs148142 	rbrp->rbr_consumed = 0;
26518366SQiyan.Sun@Sun.COM 	rbrp->rbr_used = 0;
26526349Sqs148142 	rbrp->rbr_use_bcopy = B_TRUE;
26536349Sqs148142 	rbrp->rbr_bufsize_type = RCR_PKTBUFSZ_0;
26546349Sqs148142 
26556349Sqs148142 	/*
26566349Sqs148142 	 * Do bcopy on packets greater than bcopy size once the lo threshold is
26576349Sqs148142 	 * reached. This lo threshold should be less than the hi threshold.
26586349Sqs148142 	 *
26596349Sqs148142 	 * Do bcopy on every packet once the hi threshold is reached.
26606349Sqs148142 	 */
26616349Sqs148142 	if (hxge_rx_threshold_lo >= hxge_rx_threshold_hi) {
26626349Sqs148142 		/* default it to use hi */
26636349Sqs148142 		hxge_rx_threshold_lo = hxge_rx_threshold_hi;
26646349Sqs148142 	}
26656349Sqs148142 	if (hxge_rx_buf_size_type > HXGE_RBR_TYPE2) {
26666349Sqs148142 		hxge_rx_buf_size_type = HXGE_RBR_TYPE2;
26676349Sqs148142 	}
26686349Sqs148142 	rbrp->rbr_bufsize_type = hxge_rx_buf_size_type;
26696349Sqs148142 
26706349Sqs148142 	switch (hxge_rx_threshold_hi) {
26716349Sqs148142 	default:
26726349Sqs148142 	case HXGE_RX_COPY_NONE:
26736349Sqs148142 		/* Do not do bcopy at all */
26746349Sqs148142 		rbrp->rbr_use_bcopy = B_FALSE;
26756349Sqs148142 		rbrp->rbr_threshold_hi = rbrp->rbb_max;
26766349Sqs148142 		break;
26776349Sqs148142 
26786349Sqs148142 	case HXGE_RX_COPY_1:
26796349Sqs148142 	case HXGE_RX_COPY_2:
26806349Sqs148142 	case HXGE_RX_COPY_3:
26816349Sqs148142 	case HXGE_RX_COPY_4:
26826349Sqs148142 	case HXGE_RX_COPY_5:
26836349Sqs148142 	case HXGE_RX_COPY_6:
26846349Sqs148142 	case HXGE_RX_COPY_7:
26856349Sqs148142 		rbrp->rbr_threshold_hi =
26866349Sqs148142 		    rbrp->rbb_max * (hxge_rx_threshold_hi) /
26876349Sqs148142 		    HXGE_RX_BCOPY_SCALE;
26886349Sqs148142 		break;
26896349Sqs148142 
26906349Sqs148142 	case HXGE_RX_COPY_ALL:
26916349Sqs148142 		rbrp->rbr_threshold_hi = 0;
26926349Sqs148142 		break;
26936349Sqs148142 	}
26946349Sqs148142 
26956349Sqs148142 	switch (hxge_rx_threshold_lo) {
26966349Sqs148142 	default:
26976349Sqs148142 	case HXGE_RX_COPY_NONE:
26986349Sqs148142 		/* Do not do bcopy at all */
26996349Sqs148142 		if (rbrp->rbr_use_bcopy) {
27006349Sqs148142 			rbrp->rbr_use_bcopy = B_FALSE;
27016349Sqs148142 		}
27026349Sqs148142 		rbrp->rbr_threshold_lo = rbrp->rbb_max;
27036349Sqs148142 		break;
27046349Sqs148142 
27056349Sqs148142 	case HXGE_RX_COPY_1:
27066349Sqs148142 	case HXGE_RX_COPY_2:
27076349Sqs148142 	case HXGE_RX_COPY_3:
27086349Sqs148142 	case HXGE_RX_COPY_4:
27096349Sqs148142 	case HXGE_RX_COPY_5:
27106349Sqs148142 	case HXGE_RX_COPY_6:
27116349Sqs148142 	case HXGE_RX_COPY_7:
27126349Sqs148142 		rbrp->rbr_threshold_lo =
27136349Sqs148142 		    rbrp->rbb_max * (hxge_rx_threshold_lo) /
27146349Sqs148142 		    HXGE_RX_BCOPY_SCALE;
27156349Sqs148142 		break;
27166349Sqs148142 
27176349Sqs148142 	case HXGE_RX_COPY_ALL:
27186349Sqs148142 		rbrp->rbr_threshold_lo = 0;
27196349Sqs148142 		break;
27206349Sqs148142 	}
27216349Sqs148142 
27226349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
27236349Sqs148142 	    "hxge_map_rxdma_channel_cfg_ring: channel %d rbb_max %d "
27246349Sqs148142 	    "rbrp->rbr_bufsize_type %d rbb_threshold_hi %d "
27256349Sqs148142 	    "rbb_threshold_lo %d",
27266349Sqs148142 	    dma_channel, rbrp->rbb_max, rbrp->rbr_bufsize_type,
27276349Sqs148142 	    rbrp->rbr_threshold_hi, rbrp->rbr_threshold_lo));
27286349Sqs148142 
27296349Sqs148142 	/* Map in the receive completion ring */
27306349Sqs148142 	rcrp = (p_rx_rcr_ring_t)KMEM_ZALLOC(sizeof (rx_rcr_ring_t), KM_SLEEP);
273110091SMichael.Speer@Sun.COM 	MUTEX_INIT(&rcrp->lock, NULL, MUTEX_DRIVER,
273210091SMichael.Speer@Sun.COM 	    (void *) hxgep->interrupt_cookie);
27336349Sqs148142 	rcrp->rdc = dma_channel;
27347959SMichael.Speer@Sun.COM 	rcrp->hxgep = hxgep;
27356349Sqs148142 
27366349Sqs148142 	hxge_port_rcr_size = hxgep->hxge_port_rcr_size;
27376349Sqs148142 	rcrp->comp_size = hxge_port_rcr_size;
27386349Sqs148142 	rcrp->comp_wrap_mask = hxge_port_rcr_size - 1;
27396349Sqs148142 
27407618SMichael.Speer@Sun.COM 	cntl_dmap = *dma_rcr_cntl_p;
27417618SMichael.Speer@Sun.COM 
27426349Sqs148142 	dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc;
27436349Sqs148142 	hxge_setup_dma_common(dmap, cntl_dmap, rcrp->comp_size,
27446349Sqs148142 	    sizeof (rcr_entry_t));
27456349Sqs148142 	rcrp->comp_rd_index = 0;
27466349Sqs148142 	rcrp->comp_wt_index = 0;
27476349Sqs148142 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
27486349Sqs148142 	    (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
27496864Sqs148142 #if defined(__i386)
27506864Sqs148142 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
27516864Sqs148142 	    (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
27526864Sqs148142 #else
27536349Sqs148142 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
27546349Sqs148142 	    (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
27556864Sqs148142 #endif
27566349Sqs148142 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
27576349Sqs148142 	    (hxge_port_rcr_size - 1);
27586349Sqs148142 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
27596349Sqs148142 	    (hxge_port_rcr_size - 1);
27606349Sqs148142 
27617584SQiyan.Sun@Sun.COM 	rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc);
27627584SQiyan.Sun@Sun.COM 	rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3;
27637584SQiyan.Sun@Sun.COM 
27646349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
27656349Sqs148142 	    "==> hxge_map_rxdma_channel_cfg_ring: channel %d "
27666349Sqs148142 	    "rbr_vaddrp $%p rcr_desc_rd_head_p $%p "
27676349Sqs148142 	    "rcr_desc_rd_head_pp $%p rcr_desc_rd_last_p $%p "
27686349Sqs148142 	    "rcr_desc_rd_last_pp $%p ",
27696349Sqs148142 	    dma_channel, rbr_vaddrp, rcrp->rcr_desc_rd_head_p,
27706349Sqs148142 	    rcrp->rcr_desc_rd_head_pp, rcrp->rcr_desc_last_p,
27716349Sqs148142 	    rcrp->rcr_desc_last_pp));
27726349Sqs148142 
27736349Sqs148142 	/*
27746349Sqs148142 	 * Zero out buffer block ring descriptors.
27756349Sqs148142 	 */
27766349Sqs148142 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
27776349Sqs148142 	rcrp->intr_timeout = hxgep->intr_timeout;
27786349Sqs148142 	rcrp->intr_threshold = hxgep->intr_threshold;
27796349Sqs148142 	rcrp->full_hdr_flag = B_FALSE;
27806349Sqs148142 	rcrp->sw_priv_hdr_len = 0;
27816349Sqs148142 
27826349Sqs148142 	cfga_p = &(rcrp->rcr_cfga);
27836349Sqs148142 	cfgb_p = &(rcrp->rcr_cfgb);
27846349Sqs148142 	cfga_p->value = 0;
27856349Sqs148142 	cfgb_p->value = 0;
27866349Sqs148142 	rcrp->rcr_addr = dmap->dma_cookie.dmac_laddress;
27876349Sqs148142 
27886349Sqs148142 	cfga_p->value = (rcrp->rcr_addr &
27896349Sqs148142 	    (RCRCFIG_A_STADDR_MASK | RCRCFIG_A_STADDR_BASE_MASK));
27906349Sqs148142 
27916349Sqs148142 	cfga_p->value |= ((uint64_t)rcrp->comp_size << RCRCFIG_A_LEN_SHIF);
27926349Sqs148142 
27936349Sqs148142 	/*
27946349Sqs148142 	 * Timeout should be set based on the system clock divider. The
27956349Sqs148142 	 * following timeout value of 1 assumes that the granularity (1000) is
27966349Sqs148142 	 * 3 microseconds running at 300MHz.
27976349Sqs148142 	 */
27986349Sqs148142 	cfgb_p->bits.pthres = rcrp->intr_threshold;
27996349Sqs148142 	cfgb_p->bits.timeout = rcrp->intr_timeout;
28006349Sqs148142 	cfgb_p->bits.entout = 1;
28016349Sqs148142 
28026349Sqs148142 	/* Map in the mailbox */
28037618SMichael.Speer@Sun.COM 	cntl_dmap = *dma_mbox_cntl_p;
28046349Sqs148142 	mboxp = (p_rx_mbox_t)KMEM_ZALLOC(sizeof (rx_mbox_t), KM_SLEEP);
28056349Sqs148142 	dmap = (p_hxge_dma_common_t)&mboxp->rx_mbox;
28066349Sqs148142 	hxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (rxdma_mailbox_t));
28076349Sqs148142 	cfig1_p = (rdc_rx_cfg1_t *)&mboxp->rx_cfg1;
28086349Sqs148142 	cfig2_p = (rdc_rx_cfg2_t *)&mboxp->rx_cfg2;
28096349Sqs148142 	cfig1_p->value = cfig2_p->value = 0;
28106349Sqs148142 
28116349Sqs148142 	mboxp->mbox_addr = dmap->dma_cookie.dmac_laddress;
28126349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
28136349Sqs148142 	    "==> hxge_map_rxdma_channel_cfg_ring: "
28146349Sqs148142 	    "channel %d cfg1 0x%016llx cfig2 0x%016llx cookie 0x%016llx",
28156349Sqs148142 	    dma_channel, cfig1_p->value, cfig2_p->value,
28166349Sqs148142 	    mboxp->mbox_addr));
28176349Sqs148142 
28186349Sqs148142 	dmaaddrp = (uint32_t)((dmap->dma_cookie.dmac_laddress >> 32) & 0xfff);
28196349Sqs148142 	cfig1_p->bits.mbaddr_h = dmaaddrp;
28206349Sqs148142 
28216349Sqs148142 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress & 0xffffffff);
28226349Sqs148142 	dmaaddrp = (uint32_t)(dmap->dma_cookie.dmac_laddress &
28236349Sqs148142 	    RXDMA_CFIG2_MBADDR_L_MASK);
28246349Sqs148142 
28256349Sqs148142 	cfig2_p->bits.mbaddr_l = (dmaaddrp >> RXDMA_CFIG2_MBADDR_L_SHIFT);
28266349Sqs148142 
28276349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
28286349Sqs148142 	    "==> hxge_map_rxdma_channel_cfg_ring: channel %d damaddrp $%p "
28296349Sqs148142 	    "cfg1 0x%016llx cfig2 0x%016llx",
28306349Sqs148142 	    dma_channel, dmaaddrp, cfig1_p->value, cfig2_p->value));
28316349Sqs148142 
28326349Sqs148142 	cfig2_p->bits.full_hdr = rcrp->full_hdr_flag;
28336349Sqs148142 	cfig2_p->bits.offset = rcrp->sw_priv_hdr_len;
28346349Sqs148142 
28356349Sqs148142 	rbrp->rx_rcr_p = rcrp;
28366349Sqs148142 	rcrp->rx_rbr_p = rbrp;
28376349Sqs148142 	*rcr_p = rcrp;
28386349Sqs148142 	*rx_mbox_p = mboxp;
28396349Sqs148142 
28406349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
28416349Sqs148142 	    "<== hxge_map_rxdma_channel_cfg_ring status 0x%08x", status));
28426349Sqs148142 	return (status);
28436349Sqs148142 }
28446349Sqs148142 
28456349Sqs148142 /*ARGSUSED*/
28466349Sqs148142 static void
hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t rx_mbox_p)28476349Sqs148142 hxge_unmap_rxdma_channel_cfg_ring(p_hxge_t hxgep,
28486349Sqs148142     p_rx_rcr_ring_t rcr_p, p_rx_mbox_t rx_mbox_p)
28496349Sqs148142 {
28506349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
28516349Sqs148142 	    "==> hxge_unmap_rxdma_channel_cfg_ring: channel %d", rcr_p->rdc));
28526349Sqs148142 
285310091SMichael.Speer@Sun.COM 	MUTEX_DESTROY(&rcr_p->lock);
28546349Sqs148142 	KMEM_FREE(rcr_p, sizeof (rx_rcr_ring_t));
28556349Sqs148142 	KMEM_FREE(rx_mbox_p, sizeof (rx_mbox_t));
28566349Sqs148142 
28576349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
28586349Sqs148142 	    "<== hxge_unmap_rxdma_channel_cfg_ring"));
28596349Sqs148142 }
28606349Sqs148142 
28616349Sqs148142 static hxge_status_t
hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep,uint16_t channel,p_hxge_dma_common_t * dma_buf_p,p_rx_rbr_ring_t * rbr_p,uint32_t num_chunks)28626349Sqs148142 hxge_map_rxdma_channel_buf_ring(p_hxge_t hxgep, uint16_t channel,
28636349Sqs148142     p_hxge_dma_common_t *dma_buf_p,
28646349Sqs148142     p_rx_rbr_ring_t *rbr_p, uint32_t num_chunks)
28656349Sqs148142 {
28666349Sqs148142 	p_rx_rbr_ring_t		rbrp;
28676349Sqs148142 	p_hxge_dma_common_t	dma_bufp, tmp_bufp;
28686349Sqs148142 	p_rx_msg_t		*rx_msg_ring;
28696349Sqs148142 	p_rx_msg_t		rx_msg_p;
28706349Sqs148142 	p_mblk_t		mblk_p;
28716349Sqs148142 
28726349Sqs148142 	rxring_info_t *ring_info;
28736349Sqs148142 	hxge_status_t status = HXGE_OK;
28746349Sqs148142 	int i, j, index;
28756349Sqs148142 	uint32_t size, bsize, nblocks, nmsgs;
28766349Sqs148142 
28776349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
28786349Sqs148142 	    "==> hxge_map_rxdma_channel_buf_ring: channel %d", channel));
28796349Sqs148142 
28806349Sqs148142 	dma_bufp = tmp_bufp = *dma_buf_p;
28816349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
28826349Sqs148142 	    " hxge_map_rxdma_channel_buf_ring: channel %d to map %d "
28836349Sqs148142 	    "chunks bufp 0x%016llx", channel, num_chunks, dma_bufp));
28846349Sqs148142 
28856349Sqs148142 	nmsgs = 0;
28866349Sqs148142 	for (i = 0; i < num_chunks; i++, tmp_bufp++) {
28876349Sqs148142 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
28886349Sqs148142 		    "==> hxge_map_rxdma_channel_buf_ring: channel %d "
28896349Sqs148142 		    "bufp 0x%016llx nblocks %d nmsgs %d",
28906349Sqs148142 		    channel, tmp_bufp, tmp_bufp->nblocks, nmsgs));
28916349Sqs148142 		nmsgs += tmp_bufp->nblocks;
28926349Sqs148142 	}
28936349Sqs148142 	if (!nmsgs) {
28946349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
28956349Sqs148142 		    "<== hxge_map_rxdma_channel_buf_ring: channel %d "
28966349Sqs148142 		    "no msg blocks", channel));
28976349Sqs148142 		status = HXGE_ERROR;
28986349Sqs148142 		goto hxge_map_rxdma_channel_buf_ring_exit;
28996349Sqs148142 	}
29006349Sqs148142 	rbrp = (p_rx_rbr_ring_t)KMEM_ZALLOC(sizeof (rx_rbr_ring_t), KM_SLEEP);
29016349Sqs148142 
29026349Sqs148142 	size = nmsgs * sizeof (p_rx_msg_t);
29036349Sqs148142 	rx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP);
29046349Sqs148142 	ring_info = (rxring_info_t *)KMEM_ZALLOC(sizeof (rxring_info_t),
29056349Sqs148142 	    KM_SLEEP);
29066349Sqs148142 
29076349Sqs148142 	MUTEX_INIT(&rbrp->lock, NULL, MUTEX_DRIVER,
29086349Sqs148142 	    (void *) hxgep->interrupt_cookie);
29096349Sqs148142 	MUTEX_INIT(&rbrp->post_lock, NULL, MUTEX_DRIVER,
29106349Sqs148142 	    (void *) hxgep->interrupt_cookie);
29117959SMichael.Speer@Sun.COM 
29126349Sqs148142 	rbrp->rdc = channel;
29136349Sqs148142 	rbrp->num_blocks = num_chunks;
29146349Sqs148142 	rbrp->tnblocks = nmsgs;
29156349Sqs148142 	rbrp->rbb_max = nmsgs;
29166349Sqs148142 	rbrp->rbr_max_size = nmsgs;
29176349Sqs148142 	rbrp->rbr_wrap_mask = (rbrp->rbb_max - 1);
29186349Sqs148142 
29196349Sqs148142 	/*
292011257SMichael.Speer@Sun.COM 	 * Buffer sizes: 256, 1K, and 2K.
292111257SMichael.Speer@Sun.COM 	 *
292211257SMichael.Speer@Sun.COM 	 * Blk 0 size.
29236349Sqs148142 	 */
292411257SMichael.Speer@Sun.COM 	rbrp->pkt_buf_size0 = RBR_BUFSZ0_256B;
292511257SMichael.Speer@Sun.COM 	rbrp->pkt_buf_size0_bytes = RBR_BUFSZ0_256_BYTES;
292611257SMichael.Speer@Sun.COM 	rbrp->hpi_pkt_buf_size0 = SIZE_256B;
292711257SMichael.Speer@Sun.COM 
292811257SMichael.Speer@Sun.COM 	/*
292911257SMichael.Speer@Sun.COM 	 * Blk 1 size.
293011257SMichael.Speer@Sun.COM 	 */
29316349Sqs148142 	rbrp->pkt_buf_size1 = RBR_BUFSZ1_1K;
29326349Sqs148142 	rbrp->pkt_buf_size1_bytes = RBR_BUFSZ1_1K_BYTES;
29336349Sqs148142 	rbrp->hpi_pkt_buf_size1 = SIZE_1KB;
29346349Sqs148142 
293511257SMichael.Speer@Sun.COM 	/*
293611257SMichael.Speer@Sun.COM 	 * Blk 2 size.
293711257SMichael.Speer@Sun.COM 	 */
293811257SMichael.Speer@Sun.COM 	rbrp->pkt_buf_size2 = RBR_BUFSZ2_2K;
293911257SMichael.Speer@Sun.COM 	rbrp->pkt_buf_size2_bytes = RBR_BUFSZ2_2K_BYTES;
294011257SMichael.Speer@Sun.COM 	rbrp->hpi_pkt_buf_size2 = SIZE_2KB;
294111257SMichael.Speer@Sun.COM 
29426349Sqs148142 	rbrp->block_size = hxgep->rx_default_block_size;
29436349Sqs148142 
29446349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
29456349Sqs148142 	    "==> hxge_map_rxdma_channel_buf_ring: channel %d "
29466349Sqs148142 	    "actual rbr max %d rbb_max %d nmsgs %d "
29476349Sqs148142 	    "rbrp->block_size %d default_block_size %d "
29486349Sqs148142 	    "(config hxge_rbr_size %d hxge_rbr_spare_size %d)",
29496349Sqs148142 	    channel, rbrp->rbr_max_size, rbrp->rbb_max, nmsgs,
29506349Sqs148142 	    rbrp->block_size, hxgep->rx_default_block_size,
29516349Sqs148142 	    hxge_rbr_size, hxge_rbr_spare_size));
29526349Sqs148142 
29536349Sqs148142 	/*
29546349Sqs148142 	 * Map in buffers from the buffer pool.
29556349Sqs148142 	 * Note that num_blocks is the num_chunks. For Sparc, there is likely
29566349Sqs148142 	 * only one chunk. For x86, there will be many chunks.
29576349Sqs148142 	 * Loop over chunks.
29586349Sqs148142 	 */
29596349Sqs148142 	index = 0;
29606349Sqs148142 	for (i = 0; i < rbrp->num_blocks; i++, dma_bufp++) {
29616349Sqs148142 		bsize = dma_bufp->block_size;
29626349Sqs148142 		nblocks = dma_bufp->nblocks;
29636864Sqs148142 #if defined(__i386)
29646864Sqs148142 		ring_info->buffer[i].dvma_addr = (uint32_t)dma_bufp->ioaddr_pp;
29656864Sqs148142 #else
29666349Sqs148142 		ring_info->buffer[i].dvma_addr = (uint64_t)dma_bufp->ioaddr_pp;
29676864Sqs148142 #endif
29686349Sqs148142 		ring_info->buffer[i].buf_index = i;
29696349Sqs148142 		ring_info->buffer[i].buf_size = dma_bufp->alength;
29706349Sqs148142 		ring_info->buffer[i].start_index = index;
29716864Sqs148142 #if defined(__i386)
29726864Sqs148142 		ring_info->buffer[i].kaddr = (uint32_t)dma_bufp->kaddrp;
29736864Sqs148142 #else
29746349Sqs148142 		ring_info->buffer[i].kaddr = (uint64_t)dma_bufp->kaddrp;
29756864Sqs148142 #endif
29766349Sqs148142 
29776349Sqs148142 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
29786349Sqs148142 		    " hxge_map_rxdma_channel_buf_ring: map channel %d "
29796349Sqs148142 		    "chunk %d nblocks %d chunk_size %x block_size 0x%x "
29806349Sqs148142 		    "dma_bufp $%p dvma_addr $%p", channel, i,
29816349Sqs148142 		    dma_bufp->nblocks,
29826349Sqs148142 		    ring_info->buffer[i].buf_size, bsize, dma_bufp,
29836349Sqs148142 		    ring_info->buffer[i].dvma_addr));
29846349Sqs148142 
29856349Sqs148142 		/* loop over blocks within a chunk */
29866349Sqs148142 		for (j = 0; j < nblocks; j++) {
29876349Sqs148142 			if ((rx_msg_p = hxge_allocb(bsize, BPRI_LO,
29886349Sqs148142 			    dma_bufp)) == NULL) {
29896349Sqs148142 				HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
29906349Sqs148142 				    "allocb failed (index %d i %d j %d)",
29916349Sqs148142 				    index, i, j));
29926349Sqs148142 				goto hxge_map_rxdma_channel_buf_ring_fail1;
29936349Sqs148142 			}
29946349Sqs148142 			rx_msg_ring[index] = rx_msg_p;
29956349Sqs148142 			rx_msg_p->block_index = index;
29966349Sqs148142 			rx_msg_p->shifted_addr = (uint32_t)
29976349Sqs148142 			    ((rx_msg_p->buf_dma.dma_cookie.dmac_laddress >>
29986349Sqs148142 			    RBR_BKADDR_SHIFT));
29996349Sqs148142 			/*
30006349Sqs148142 			 * Too much output
30016349Sqs148142 			 * HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
30026349Sqs148142 			 *	"index %d j %d rx_msg_p $%p mblk %p",
30036349Sqs148142 			 *	index, j, rx_msg_p, rx_msg_p->rx_mblk_p));
30046349Sqs148142 			 */
30056349Sqs148142 			mblk_p = rx_msg_p->rx_mblk_p;
30066349Sqs148142 			mblk_p->b_wptr = mblk_p->b_rptr + bsize;
30076349Sqs148142 
30086349Sqs148142 			rbrp->rbr_ref_cnt++;
30096349Sqs148142 			index++;
30106349Sqs148142 			rx_msg_p->buf_dma.dma_channel = channel;
30116349Sqs148142 		}
30126349Sqs148142 	}
30136349Sqs148142 	if (i < rbrp->num_blocks) {
30146349Sqs148142 		goto hxge_map_rxdma_channel_buf_ring_fail1;
30156349Sqs148142 	}
30166349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
30176349Sqs148142 	    "hxge_map_rxdma_channel_buf_ring: done buf init "
30186349Sqs148142 	    "channel %d msg block entries %d", channel, index));
30196349Sqs148142 	ring_info->block_size_mask = bsize - 1;
30206349Sqs148142 	rbrp->rx_msg_ring = rx_msg_ring;
30216349Sqs148142 	rbrp->dma_bufp = dma_buf_p;
30226349Sqs148142 	rbrp->ring_info = ring_info;
30236349Sqs148142 
30246349Sqs148142 	status = hxge_rxbuf_index_info_init(hxgep, rbrp);
30256349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, " hxge_map_rxdma_channel_buf_ring: "
30266349Sqs148142 	    "channel %d done buf info init", channel));
30276349Sqs148142 
30286349Sqs148142 	/*
30296349Sqs148142 	 * Finally, permit hxge_freeb() to call hxge_post_page().
30306349Sqs148142 	 */
30316349Sqs148142 	rbrp->rbr_state = RBR_POSTING;
30326349Sqs148142 
30336349Sqs148142 	*rbr_p = rbrp;
30346349Sqs148142 
30356349Sqs148142 	goto hxge_map_rxdma_channel_buf_ring_exit;
30366349Sqs148142 
30376349Sqs148142 hxge_map_rxdma_channel_buf_ring_fail1:
30386349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
30396349Sqs148142 	    " hxge_map_rxdma_channel_buf_ring: failed channel (0x%x)",
30406349Sqs148142 	    channel, status));
30416349Sqs148142 
30426349Sqs148142 	index--;
30436349Sqs148142 	for (; index >= 0; index--) {
30446349Sqs148142 		rx_msg_p = rx_msg_ring[index];
30456349Sqs148142 		if (rx_msg_p != NULL) {
30468177SQiyan.Sun@Sun.COM 			freeb(rx_msg_p->rx_mblk_p);
30476349Sqs148142 			rx_msg_ring[index] = NULL;
30486349Sqs148142 		}
30496349Sqs148142 	}
30506349Sqs148142 
30516349Sqs148142 hxge_map_rxdma_channel_buf_ring_fail:
30526349Sqs148142 	MUTEX_DESTROY(&rbrp->post_lock);
30536349Sqs148142 	MUTEX_DESTROY(&rbrp->lock);
30546349Sqs148142 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
30556349Sqs148142 	KMEM_FREE(rx_msg_ring, size);
30566349Sqs148142 	KMEM_FREE(rbrp, sizeof (rx_rbr_ring_t));
30576349Sqs148142 
30586349Sqs148142 	status = HXGE_ERROR;
30596349Sqs148142 
30606349Sqs148142 hxge_map_rxdma_channel_buf_ring_exit:
30616349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
30626349Sqs148142 	    "<== hxge_map_rxdma_channel_buf_ring status 0x%08x", status));
30636349Sqs148142 
30646349Sqs148142 	return (status);
30656349Sqs148142 }
30666349Sqs148142 
30676349Sqs148142 /*ARGSUSED*/
30686349Sqs148142 static void
hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep,p_rx_rbr_ring_t rbr_p)30696349Sqs148142 hxge_unmap_rxdma_channel_buf_ring(p_hxge_t hxgep,
30706349Sqs148142     p_rx_rbr_ring_t rbr_p)
30716349Sqs148142 {
30726349Sqs148142 	p_rx_msg_t	*rx_msg_ring;
30736349Sqs148142 	p_rx_msg_t	rx_msg_p;
30746349Sqs148142 	rxring_info_t	*ring_info;
30756349Sqs148142 	int		i;
30766349Sqs148142 	uint32_t	size;
30776349Sqs148142 
30786349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
30796349Sqs148142 	    "==> hxge_unmap_rxdma_channel_buf_ring"));
30806349Sqs148142 	if (rbr_p == NULL) {
30816349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
30826349Sqs148142 		    "<== hxge_unmap_rxdma_channel_buf_ring: NULL rbrp"));
30836349Sqs148142 		return;
30846349Sqs148142 	}
30856349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
30866349Sqs148142 	    "==> hxge_unmap_rxdma_channel_buf_ring: channel %d", rbr_p->rdc));
30876349Sqs148142 
30886349Sqs148142 	rx_msg_ring = rbr_p->rx_msg_ring;
30896349Sqs148142 	ring_info = rbr_p->ring_info;
30906349Sqs148142 
30916349Sqs148142 	if (rx_msg_ring == NULL || ring_info == NULL) {
30926349Sqs148142 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
30936349Sqs148142 		    "<== hxge_unmap_rxdma_channel_buf_ring: "
30946349Sqs148142 		    "rx_msg_ring $%p ring_info $%p", rx_msg_p, ring_info));
30956349Sqs148142 		return;
30966349Sqs148142 	}
30976349Sqs148142 
30986349Sqs148142 	size = rbr_p->tnblocks * sizeof (p_rx_msg_t);
30996349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
31006349Sqs148142 	    " hxge_unmap_rxdma_channel_buf_ring: channel %d chunks %d "
31016349Sqs148142 	    "tnblocks %d (max %d) size ptrs %d ", rbr_p->rdc, rbr_p->num_blocks,
31026349Sqs148142 	    rbr_p->tnblocks, rbr_p->rbr_max_size, size));
31036349Sqs148142 
31046349Sqs148142 	for (i = 0; i < rbr_p->tnblocks; i++) {
31056349Sqs148142 		rx_msg_p = rx_msg_ring[i];
31066349Sqs148142 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
31076349Sqs148142 		    " hxge_unmap_rxdma_channel_buf_ring: "
31086349Sqs148142 		    "rx_msg_p $%p", rx_msg_p));
31096349Sqs148142 		if (rx_msg_p != NULL) {
31108177SQiyan.Sun@Sun.COM 			freeb(rx_msg_p->rx_mblk_p);
31116349Sqs148142 			rx_msg_ring[i] = NULL;
31126349Sqs148142 		}
31136349Sqs148142 	}
31146349Sqs148142 
31156349Sqs148142 	/*
31166349Sqs148142 	 * We no longer may use the mutex <post_lock>. By setting
31176349Sqs148142 	 * <rbr_state> to anything but POSTING, we prevent
31186349Sqs148142 	 * hxge_post_page() from accessing a dead mutex.
31196349Sqs148142 	 */
31206349Sqs148142 	rbr_p->rbr_state = RBR_UNMAPPING;
31216349Sqs148142 	MUTEX_DESTROY(&rbr_p->post_lock);
31226349Sqs148142 
31236349Sqs148142 	MUTEX_DESTROY(&rbr_p->lock);
31246349Sqs148142 	KMEM_FREE(ring_info, sizeof (rxring_info_t));
31256349Sqs148142 	KMEM_FREE(rx_msg_ring, size);
31266349Sqs148142 
31276349Sqs148142 	if (rbr_p->rbr_ref_cnt == 0) {
31286349Sqs148142 		/* This is the normal state of affairs. */
31296349Sqs148142 		KMEM_FREE(rbr_p, sizeof (*rbr_p));
31306349Sqs148142 	} else {
31316349Sqs148142 		/*
31326349Sqs148142 		 * Some of our buffers are still being used.
31336349Sqs148142 		 * Therefore, tell hxge_freeb() this ring is
31346349Sqs148142 		 * unmapped, so it may free <rbr_p> for us.
31356349Sqs148142 		 */
31366349Sqs148142 		rbr_p->rbr_state = RBR_UNMAPPED;
313710091SMichael.Speer@Sun.COM 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
31386349Sqs148142 		    "unmap_rxdma_buf_ring: %d %s outstanding.",
31396349Sqs148142 		    rbr_p->rbr_ref_cnt,
31406349Sqs148142 		    rbr_p->rbr_ref_cnt == 1 ? "msg" : "msgs"));
31416349Sqs148142 	}
31426349Sqs148142 
31436349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
31446349Sqs148142 	    "<== hxge_unmap_rxdma_channel_buf_ring"));
31456349Sqs148142 }
31466349Sqs148142 
31476349Sqs148142 static hxge_status_t
hxge_rxdma_hw_start_common(p_hxge_t hxgep)31486349Sqs148142 hxge_rxdma_hw_start_common(p_hxge_t hxgep)
31496349Sqs148142 {
31506349Sqs148142 	hxge_status_t status = HXGE_OK;
31516349Sqs148142 
31526349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common"));
31536349Sqs148142 
31546349Sqs148142 	/*
31556349Sqs148142 	 * Load the sharable parameters by writing to the function zero control
31566349Sqs148142 	 * registers. These FZC registers should be initialized only once for
31576349Sqs148142 	 * the entire chip.
31586349Sqs148142 	 */
31596349Sqs148142 	(void) hxge_init_fzc_rx_common(hxgep);
31606349Sqs148142 
31616349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start_common"));
31626349Sqs148142 
31636349Sqs148142 	return (status);
31646349Sqs148142 }
31656349Sqs148142 
31666349Sqs148142 static hxge_status_t
hxge_rxdma_hw_start(p_hxge_t hxgep)31676349Sqs148142 hxge_rxdma_hw_start(p_hxge_t hxgep)
31686349Sqs148142 {
31696349Sqs148142 	int			i, ndmas;
31706349Sqs148142 	uint16_t		channel;
31716349Sqs148142 	p_rx_rbr_rings_t	rx_rbr_rings;
31726349Sqs148142 	p_rx_rbr_ring_t		*rbr_rings;
31736349Sqs148142 	p_rx_rcr_rings_t	rx_rcr_rings;
31746349Sqs148142 	p_rx_rcr_ring_t		*rcr_rings;
31756349Sqs148142 	p_rx_mbox_areas_t	rx_mbox_areas_p;
31766349Sqs148142 	p_rx_mbox_t		*rx_mbox_p;
31776349Sqs148142 	hxge_status_t		status = HXGE_OK;
31786349Sqs148142 
31796349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start"));
31806349Sqs148142 
31816349Sqs148142 	rx_rbr_rings = hxgep->rx_rbr_rings;
31826349Sqs148142 	rx_rcr_rings = hxgep->rx_rcr_rings;
31836349Sqs148142 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
31846349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
31856349Sqs148142 		    "<== hxge_rxdma_hw_start: NULL ring pointers"));
31866349Sqs148142 		return (HXGE_ERROR);
31876349Sqs148142 	}
31886349Sqs148142 
31896349Sqs148142 	ndmas = rx_rbr_rings->ndmas;
31906349Sqs148142 	if (ndmas == 0) {
31916349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
31926349Sqs148142 		    "<== hxge_rxdma_hw_start: no dma channel allocated"));
31936349Sqs148142 		return (HXGE_ERROR);
31946349Sqs148142 	}
31956349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
31966349Sqs148142 	    "==> hxge_rxdma_hw_start (ndmas %d)", ndmas));
31976349Sqs148142 
31986349Sqs148142 	/*
31996349Sqs148142 	 * Scrub the RDC Rx DMA Prefetch Buffer Command.
32006349Sqs148142 	 */
32016349Sqs148142 	for (i = 0; i < 128; i++) {
32026349Sqs148142 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_PREF_CMD, i);
32036349Sqs148142 	}
32046349Sqs148142 
32056349Sqs148142 	/*
32066349Sqs148142 	 * Scrub Rx DMA Shadow Tail Command.
32076349Sqs148142 	 */
32086349Sqs148142 	for (i = 0; i < 64; i++) {
32096349Sqs148142 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_SHADOW_CMD, i);
32106349Sqs148142 	}
32116349Sqs148142 
32126349Sqs148142 	/*
32136349Sqs148142 	 * Scrub Rx DMA Control Fifo Command.
32146349Sqs148142 	 */
32156349Sqs148142 	for (i = 0; i < 512; i++) {
32166349Sqs148142 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_CTRL_FIFO_CMD, i);
32176349Sqs148142 	}
32186349Sqs148142 
32196349Sqs148142 	/*
32206349Sqs148142 	 * Scrub Rx DMA Data Fifo Command.
32216349Sqs148142 	 */
32226349Sqs148142 	for (i = 0; i < 1536; i++) {
32236349Sqs148142 		HXGE_REG_WR64(hxgep->hpi_handle, RDC_DATA_FIFO_CMD, i);
32246349Sqs148142 	}
32256349Sqs148142 
32266349Sqs148142 	/*
32276349Sqs148142 	 * Reset the FIFO Error Stat.
32286349Sqs148142 	 */
32296349Sqs148142 	HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_STAT, 0xFF);
32306349Sqs148142 
32316349Sqs148142 	/* Set the error mask to receive interrupts */
32326349Sqs148142 	HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0);
32336349Sqs148142 
32346349Sqs148142 	rbr_rings = rx_rbr_rings->rbr_rings;
32356349Sqs148142 	rcr_rings = rx_rcr_rings->rcr_rings;
32366349Sqs148142 	rx_mbox_areas_p = hxgep->rx_mbox_areas_p;
32376349Sqs148142 	if (rx_mbox_areas_p) {
32386349Sqs148142 		rx_mbox_p = rx_mbox_areas_p->rxmbox_areas;
32396349Sqs148142 	}
32406349Sqs148142 
32416349Sqs148142 	for (i = 0; i < ndmas; i++) {
32426349Sqs148142 		channel = rbr_rings[i]->rdc;
32436349Sqs148142 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
32446349Sqs148142 		    "==> hxge_rxdma_hw_start (ndmas %d) channel %d",
32456349Sqs148142 		    ndmas, channel));
32466349Sqs148142 		status = hxge_rxdma_start_channel(hxgep, channel,
32476349Sqs148142 		    (p_rx_rbr_ring_t)rbr_rings[i],
32486349Sqs148142 		    (p_rx_rcr_ring_t)rcr_rings[i],
32498103SQiyan.Sun@Sun.COM 		    (p_rx_mbox_t)rx_mbox_p[i], rbr_rings[i]->rbb_max);
32506349Sqs148142 		if (status != HXGE_OK) {
32516349Sqs148142 			goto hxge_rxdma_hw_start_fail1;
32526349Sqs148142 		}
32536349Sqs148142 	}
32546349Sqs148142 
32556349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_start: "
32566349Sqs148142 	    "rx_rbr_rings 0x%016llx rings 0x%016llx",
32576349Sqs148142 	    rx_rbr_rings, rx_rcr_rings));
32586349Sqs148142 	goto hxge_rxdma_hw_start_exit;
32596349Sqs148142 
32606349Sqs148142 hxge_rxdma_hw_start_fail1:
32616349Sqs148142 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
32626349Sqs148142 	    "==> hxge_rxdma_hw_start: disable "
32636349Sqs148142 	    "(status 0x%x channel %d i %d)", status, channel, i));
32646349Sqs148142 	for (; i >= 0; i--) {
32656349Sqs148142 		channel = rbr_rings[i]->rdc;
32666349Sqs148142 		(void) hxge_rxdma_stop_channel(hxgep, channel);
32676349Sqs148142 	}
32686349Sqs148142 
32696349Sqs148142 hxge_rxdma_hw_start_exit:
32706349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
32716349Sqs148142 	    "==> hxge_rxdma_hw_start: (status 0x%x)", status));
32726349Sqs148142 	return (status);
32736349Sqs148142 }
32746349Sqs148142 
32756349Sqs148142 static void
hxge_rxdma_hw_stop(p_hxge_t hxgep)32766349Sqs148142 hxge_rxdma_hw_stop(p_hxge_t hxgep)
32776349Sqs148142 {
32786349Sqs148142 	int			i, ndmas;
32796349Sqs148142 	uint16_t		channel;
32806349Sqs148142 	p_rx_rbr_rings_t	rx_rbr_rings;
32816349Sqs148142 	p_rx_rbr_ring_t		*rbr_rings;
32826349Sqs148142 	p_rx_rcr_rings_t	rx_rcr_rings;
32836349Sqs148142 
32846349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop"));
32856349Sqs148142 
32866349Sqs148142 	rx_rbr_rings = hxgep->rx_rbr_rings;
32876349Sqs148142 	rx_rcr_rings = hxgep->rx_rcr_rings;
32886349Sqs148142 
32896349Sqs148142 	if (rx_rbr_rings == NULL || rx_rcr_rings == NULL) {
32906349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
32916349Sqs148142 		    "<== hxge_rxdma_hw_stop: NULL ring pointers"));
32926349Sqs148142 		return;
32936349Sqs148142 	}
32946349Sqs148142 
32956349Sqs148142 	ndmas = rx_rbr_rings->ndmas;
32966349Sqs148142 	if (!ndmas) {
32976349Sqs148142 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
32986349Sqs148142 		    "<== hxge_rxdma_hw_stop: no dma channel allocated"));
32996349Sqs148142 		return;
33006349Sqs148142 	}
33016349Sqs148142 
33026349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
33036349Sqs148142 	    "==> hxge_rxdma_hw_stop (ndmas %d)", ndmas));
33046349Sqs148142 
33056349Sqs148142 	rbr_rings = rx_rbr_rings->rbr_rings;
33066349Sqs148142 	for (i = 0; i < ndmas; i++) {
33076349Sqs148142 		channel = rbr_rings[i]->rdc;
33086349Sqs148142 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
33096349Sqs148142 		    "==> hxge_rxdma_hw_stop (ndmas %d) channel %d",
33106349Sqs148142 		    ndmas, channel));
33116349Sqs148142 		(void) hxge_rxdma_stop_channel(hxgep, channel);
33126349Sqs148142 	}
33136349Sqs148142 
33146349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_hw_stop: "
33156349Sqs148142 	    "rx_rbr_rings 0x%016llx rings 0x%016llx",
33166349Sqs148142 	    rx_rbr_rings, rx_rcr_rings));
33176349Sqs148142 
33186349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_hw_stop"));
33196349Sqs148142 }
33206349Sqs148142 
33216349Sqs148142 static hxge_status_t
hxge_rxdma_start_channel(p_hxge_t hxgep,uint16_t channel,p_rx_rbr_ring_t rbr_p,p_rx_rcr_ring_t rcr_p,p_rx_mbox_t mbox_p,int n_init_kick)33226349Sqs148142 hxge_rxdma_start_channel(p_hxge_t hxgep, uint16_t channel,
33238103SQiyan.Sun@Sun.COM     p_rx_rbr_ring_t rbr_p, p_rx_rcr_ring_t rcr_p, p_rx_mbox_t mbox_p,
33248103SQiyan.Sun@Sun.COM     int n_init_kick)
33256349Sqs148142 {
33266349Sqs148142 	hpi_handle_t		handle;
33276349Sqs148142 	hpi_status_t		rs = HPI_SUCCESS;
33286349Sqs148142 	rdc_stat_t		cs;
33296349Sqs148142 	rdc_int_mask_t		ent_mask;
33306349Sqs148142 	hxge_status_t		status = HXGE_OK;
33316349Sqs148142 
33326349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel"));
33336349Sqs148142 
33346349Sqs148142 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
33356349Sqs148142 
33366349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "hxge_rxdma_start_channel: "
33376349Sqs148142 	    "hpi handle addr $%p acc $%p",
33386349Sqs148142 	    hxgep->hpi_handle.regp, hxgep->hpi_handle.regh));
33396349Sqs148142 
33406349Sqs148142 	/* Reset RXDMA channel */
33416349Sqs148142 	rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
33426349Sqs148142 	if (rs != HPI_SUCCESS) {
33436349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
33446349Sqs148142 		    "==> hxge_rxdma_start_channel: "
33456349Sqs148142 		    "reset rxdma failed (0x%08x channel %d)",
33466349Sqs148142 		    status, channel));
33476349Sqs148142 		return (HXGE_ERROR | rs);
33486349Sqs148142 	}
33496349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
33506349Sqs148142 	    "==> hxge_rxdma_start_channel: reset done: channel %d", channel));
33516349Sqs148142 
33526349Sqs148142 	/*
33536349Sqs148142 	 * Initialize the RXDMA channel specific FZC control configurations.
33546349Sqs148142 	 * These FZC registers are pertaining to each RX channel (logical
33556349Sqs148142 	 * pages).
33566349Sqs148142 	 */
33576349Sqs148142 	status = hxge_init_fzc_rxdma_channel(hxgep,
33586349Sqs148142 	    channel, rbr_p, rcr_p, mbox_p);
33596349Sqs148142 	if (status != HXGE_OK) {
33606349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
33616349Sqs148142 		    "==> hxge_rxdma_start_channel: "
33626349Sqs148142 		    "init fzc rxdma failed (0x%08x channel %d)",
33636349Sqs148142 		    status, channel));
33646349Sqs148142 		return (status);
33656349Sqs148142 	}
33666349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
33676349Sqs148142 	    "==> hxge_rxdma_start_channel: fzc done"));
33686349Sqs148142 
33696349Sqs148142 	/*
33706349Sqs148142 	 * Zero out the shadow  and prefetch ram.
33716349Sqs148142 	 */
33726349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
33736349Sqs148142 	    "==> hxge_rxdma_start_channel: ram done"));
33746349Sqs148142 
33756349Sqs148142 	/* Set up the interrupt event masks. */
33766349Sqs148142 	ent_mask.value = 0;
33776349Sqs148142 	rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
33786349Sqs148142 	if (rs != HPI_SUCCESS) {
33796349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
33806349Sqs148142 		    "==> hxge_rxdma_start_channel: "
33816349Sqs148142 		    "init rxdma event masks failed (0x%08x channel %d)",
33826349Sqs148142 		    status, channel));
33836349Sqs148142 		return (HXGE_ERROR | rs);
33846349Sqs148142 	}
33856349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
33866349Sqs148142 	    "event done: channel %d (mask 0x%016llx)",
33876349Sqs148142 	    channel, ent_mask.value));
33886349Sqs148142 
33896349Sqs148142 	/*
33906349Sqs148142 	 * Load RXDMA descriptors, buffers, mailbox, initialise the receive DMA
33916349Sqs148142 	 * channels and enable each DMA channel.
33926349Sqs148142 	 */
33936349Sqs148142 	status = hxge_enable_rxdma_channel(hxgep,
33948103SQiyan.Sun@Sun.COM 	    channel, rbr_p, rcr_p, mbox_p, n_init_kick);
33956349Sqs148142 	if (status != HXGE_OK) {
33966349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
33976349Sqs148142 		    " hxge_rxdma_start_channel: "
33986349Sqs148142 		    " init enable rxdma failed (0x%08x channel %d)",
33996349Sqs148142 		    status, channel));
34006349Sqs148142 		return (status);
34016349Sqs148142 	}
34026349Sqs148142 
34036349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
34046349Sqs148142 	    "control done - channel %d cs 0x%016llx", channel, cs.value));
34056349Sqs148142 
34066349Sqs148142 	/*
34076349Sqs148142 	 * Initialize the receive DMA control and status register
34086349Sqs148142 	 * Note that rdc_stat HAS to be set after RBR and RCR rings are set
34096349Sqs148142 	 */
34106349Sqs148142 	cs.value = 0;
34116349Sqs148142 	cs.bits.mex = 1;
34126349Sqs148142 	cs.bits.rcr_thres = 1;
34136349Sqs148142 	cs.bits.rcr_to = 1;
34146349Sqs148142 	cs.bits.rbr_empty = 1;
34156349Sqs148142 	status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs);
34166349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
34176349Sqs148142 	    "channel %d rx_dma_cntl_stat 0x%0016llx", channel, cs.value));
34186349Sqs148142 	if (status != HXGE_OK) {
34196349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
34206349Sqs148142 		    "==> hxge_rxdma_start_channel: "
34216349Sqs148142 		    "init rxdma control register failed (0x%08x channel %d",
34226349Sqs148142 		    status, channel));
34236349Sqs148142 		return (status);
34246349Sqs148142 	}
34256349Sqs148142 
34266349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_rxdma_start_channel: "
34276349Sqs148142 	    "control done - channel %d cs 0x%016llx", channel, cs.value));
34286349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
34296349Sqs148142 	    "==> hxge_rxdma_start_channel: enable done"));
34306349Sqs148142 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_rxdma_start_channel"));
34316349Sqs148142 	return (HXGE_OK);
34326349Sqs148142 }
34336349Sqs148142 
34346349Sqs148142 static hxge_status_t
hxge_rxdma_stop_channel(p_hxge_t hxgep,uint16_t channel)34356349Sqs148142 hxge_rxdma_stop_channel(p_hxge_t hxgep, uint16_t channel)
34366349Sqs148142 {
34376349Sqs148142 	hpi_handle_t		handle;
34386349Sqs148142 	hpi_status_t		rs = HPI_SUCCESS;
34396349Sqs148142 	rdc_stat_t		cs;
34406349Sqs148142 	rdc_int_mask_t		ent_mask;
34416349Sqs148142 	hxge_status_t		status = HXGE_OK;
34426349Sqs148142 
34436349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel"));
34446349Sqs148142 
34456349Sqs148142 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
34466349Sqs148142 
34476349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "hxge_rxdma_stop_channel: "
34486349Sqs148142 	    "hpi handle addr $%p acc $%p",
34496349Sqs148142 	    hxgep->hpi_handle.regp, hxgep->hpi_handle.regh));
34506349Sqs148142 
34516349Sqs148142 	/* Reset RXDMA channel */
34526349Sqs148142 	rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
34536349Sqs148142 	if (rs != HPI_SUCCESS) {
34546349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
34556349Sqs148142 		    " hxge_rxdma_stop_channel: "
34566349Sqs148142 		    " reset rxdma failed (0x%08x channel %d)",
34576349Sqs148142 		    rs, channel));
34586349Sqs148142 		return (HXGE_ERROR | rs);
34596349Sqs148142 	}
34606349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
34616349Sqs148142 	    "==> hxge_rxdma_stop_channel: reset done"));
34626349Sqs148142 
34636349Sqs148142 	/* Set up the interrupt event masks. */
34646349Sqs148142 	ent_mask.value = RDC_INT_MASK_ALL;
34656349Sqs148142 	rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
34666349Sqs148142 	if (rs != HPI_SUCCESS) {
34676349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
34686349Sqs148142 		    "==> hxge_rxdma_stop_channel: "
34696349Sqs148142 		    "set rxdma event masks failed (0x%08x channel %d)",
34706349Sqs148142 		    rs, channel));
34716349Sqs148142 		return (HXGE_ERROR | rs);
34726349Sqs148142 	}
34736349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
34746349Sqs148142 	    "==> hxge_rxdma_stop_channel: event done"));
34756349Sqs148142 
34766349Sqs148142 	/* Initialize the receive DMA control and status register */
34776349Sqs148142 	cs.value = 0;
34786349Sqs148142 	status = hxge_init_rxdma_channel_cntl_stat(hxgep, channel, &cs);
34796349Sqs148142 
34806349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_stop_channel: control "
34816349Sqs148142 	    " to default (all 0s) 0x%08x", cs.value));
34826349Sqs148142 
34836349Sqs148142 	if (status != HXGE_OK) {
34846349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
34856349Sqs148142 		    " hxge_rxdma_stop_channel: init rxdma"
34866349Sqs148142 		    " control register failed (0x%08x channel %d",
34876349Sqs148142 		    status, channel));
34886349Sqs148142 		return (status);
34896349Sqs148142 	}
34906349Sqs148142 
34916349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
34926349Sqs148142 	    "==> hxge_rxdma_stop_channel: control done"));
34936349Sqs148142 
34946349Sqs148142 	/* disable dma channel */
34956349Sqs148142 	status = hxge_disable_rxdma_channel(hxgep, channel);
34966349Sqs148142 
34976349Sqs148142 	if (status != HXGE_OK) {
34986349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
34996349Sqs148142 		    " hxge_rxdma_stop_channel: "
35006349Sqs148142 		    " init enable rxdma failed (0x%08x channel %d)",
35016349Sqs148142 		    status, channel));
35026349Sqs148142 		return (status);
35036349Sqs148142 	}
35046349Sqs148142 
35056349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL,
35066349Sqs148142 	    "==> hxge_rxdma_stop_channel: disable done"));
35076349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_stop_channel"));
35086349Sqs148142 
35096349Sqs148142 	return (HXGE_OK);
35106349Sqs148142 }
35116349Sqs148142 
35126349Sqs148142 hxge_status_t
hxge_rxdma_handle_sys_errors(p_hxge_t hxgep)35136349Sqs148142 hxge_rxdma_handle_sys_errors(p_hxge_t hxgep)
35146349Sqs148142 {
35156349Sqs148142 	hpi_handle_t		handle;
35166349Sqs148142 	p_hxge_rdc_sys_stats_t	statsp;
35176349Sqs148142 	rdc_fifo_err_stat_t	stat;
35186349Sqs148142 	hxge_status_t		status = HXGE_OK;
35196349Sqs148142 
35206349Sqs148142 	handle = hxgep->hpi_handle;
35216349Sqs148142 	statsp = (p_hxge_rdc_sys_stats_t)&hxgep->statsp->rdc_sys_stats;
35226349Sqs148142 
35236349Sqs148142 	/* Get the error status and clear the register */
35246349Sqs148142 	HXGE_REG_RD64(handle, RDC_FIFO_ERR_STAT, &stat.value);
35256349Sqs148142 	HXGE_REG_WR64(handle, RDC_FIFO_ERR_STAT, stat.value);
35266349Sqs148142 
35276349Sqs148142 	if (stat.bits.rx_ctrl_fifo_sec) {
35286349Sqs148142 		statsp->ctrl_fifo_sec++;
35296349Sqs148142 		if (statsp->ctrl_fifo_sec == 1)
35306349Sqs148142 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
35316349Sqs148142 			    "==> hxge_rxdma_handle_sys_errors: "
35326349Sqs148142 			    "rx_ctrl_fifo_sec"));
35336349Sqs148142 	}
35346349Sqs148142 
35356349Sqs148142 	if (stat.bits.rx_ctrl_fifo_ded) {
35366349Sqs148142 		/* Global fatal error encountered */
35376349Sqs148142 		statsp->ctrl_fifo_ded++;
35386349Sqs148142 		HXGE_FM_REPORT_ERROR(hxgep, NULL,
35396349Sqs148142 		    HXGE_FM_EREPORT_RDMC_CTRL_FIFO_DED);
35406349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
35416349Sqs148142 		    "==> hxge_rxdma_handle_sys_errors: "
35426349Sqs148142 		    "fatal error: rx_ctrl_fifo_ded error"));
35436349Sqs148142 	}
35446349Sqs148142 
35456349Sqs148142 	if (stat.bits.rx_data_fifo_sec) {
35466349Sqs148142 		statsp->data_fifo_sec++;
35476349Sqs148142 		if (statsp->data_fifo_sec == 1)
35486349Sqs148142 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
35496349Sqs148142 			    "==> hxge_rxdma_handle_sys_errors: "
35506349Sqs148142 			    "rx_data_fifo_sec"));
35516349Sqs148142 	}
35526349Sqs148142 
35536349Sqs148142 	if (stat.bits.rx_data_fifo_ded) {
35546349Sqs148142 		/* Global fatal error encountered */
35556349Sqs148142 		statsp->data_fifo_ded++;
35566349Sqs148142 		HXGE_FM_REPORT_ERROR(hxgep, NULL,
35576349Sqs148142 		    HXGE_FM_EREPORT_RDMC_DATA_FIFO_DED);
35586349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
35596349Sqs148142 		    "==> hxge_rxdma_handle_sys_errors: "
35606349Sqs148142 		    "fatal error: rx_data_fifo_ded error"));
35616349Sqs148142 	}
35626349Sqs148142 
35636349Sqs148142 	if (stat.bits.rx_ctrl_fifo_ded || stat.bits.rx_data_fifo_ded) {
35646349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
35656349Sqs148142 		    " hxge_rxdma_handle_sys_errors: fatal error\n"));
35666349Sqs148142 		status = hxge_rx_port_fatal_err_recover(hxgep);
35676349Sqs148142 		if (status == HXGE_OK) {
35686349Sqs148142 			FM_SERVICE_RESTORED(hxgep);
35696349Sqs148142 		}
35706349Sqs148142 	}
35716349Sqs148142 
35726349Sqs148142 	return (HXGE_OK);
35736349Sqs148142 }
35746349Sqs148142 
35756349Sqs148142 static hxge_status_t
hxge_rxdma_fatal_err_recover(p_hxge_t hxgep,uint16_t channel)35766349Sqs148142 hxge_rxdma_fatal_err_recover(p_hxge_t hxgep, uint16_t channel)
35776349Sqs148142 {
35786349Sqs148142 	hpi_handle_t		handle;
35796349Sqs148142 	hpi_status_t 		rs = HPI_SUCCESS;
35806349Sqs148142 	p_rx_rbr_ring_t		rbrp;
35816349Sqs148142 	p_rx_rcr_ring_t		rcrp;
35826349Sqs148142 	p_rx_mbox_t		mboxp;
35836349Sqs148142 	rdc_int_mask_t		ent_mask;
35846349Sqs148142 	p_hxge_dma_common_t	dmap;
35856349Sqs148142 	p_rx_msg_t		rx_msg_p;
35866349Sqs148142 	int			i;
35876349Sqs148142 	uint32_t		hxge_port_rcr_size;
35886349Sqs148142 	uint64_t		tmp;
35898103SQiyan.Sun@Sun.COM 	int			n_init_kick = 0;
35906349Sqs148142 
35916349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rxdma_fatal_err_recover"));
35926349Sqs148142 
35936349Sqs148142 	/*
35946349Sqs148142 	 * Stop the dma channel waits for the stop done. If the stop done bit
35956349Sqs148142 	 * is not set, then create an error.
35966349Sqs148142 	 */
35976349Sqs148142 
35986349Sqs148142 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
35996349Sqs148142 
36006349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Rx DMA stop..."));
36016349Sqs148142 
360211257SMichael.Speer@Sun.COM 	rbrp = (p_rx_rbr_ring_t)hxgep->rx_rbr_rings->rbr_rings[channel];
360311257SMichael.Speer@Sun.COM 	rcrp = (p_rx_rcr_ring_t)hxgep->rx_rcr_rings->rcr_rings[channel];
36046349Sqs148142 
36056349Sqs148142 	MUTEX_ENTER(&rcrp->lock);
36066349Sqs148142 	MUTEX_ENTER(&rbrp->lock);
36076349Sqs148142 
36086349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA channel..."));
36096349Sqs148142 
36106349Sqs148142 	rs = hpi_rxdma_cfg_rdc_disable(handle, channel);
36116349Sqs148142 	if (rs != HPI_SUCCESS) {
36126349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
36136349Sqs148142 		    "hxge_disable_rxdma_channel:failed"));
36146349Sqs148142 		goto fail;
36156349Sqs148142 	}
36166349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxDMA interrupt..."));
36176349Sqs148142 
36186349Sqs148142 	/* Disable interrupt */
36196349Sqs148142 	ent_mask.value = RDC_INT_MASK_ALL;
36206349Sqs148142 	rs = hpi_rxdma_event_mask(handle, OP_SET, channel, &ent_mask);
36216349Sqs148142 	if (rs != HPI_SUCCESS) {
36226349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
36236349Sqs148142 		    "Set rxdma event masks failed (channel %d)", channel));
36246349Sqs148142 	}
36256349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel reset..."));
36266349Sqs148142 
36276349Sqs148142 	/* Reset RXDMA channel */
36286349Sqs148142 	rs = hpi_rxdma_cfg_rdc_reset(handle, channel);
36296349Sqs148142 	if (rs != HPI_SUCCESS) {
36306349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
36316349Sqs148142 		    "Reset rxdma failed (channel %d)", channel));
36326349Sqs148142 		goto fail;
36336349Sqs148142 	}
36346349Sqs148142 	hxge_port_rcr_size = hxgep->hxge_port_rcr_size;
363511257SMichael.Speer@Sun.COM 	mboxp = (p_rx_mbox_t)hxgep->rx_mbox_areas_p->rxmbox_areas[channel];
36366349Sqs148142 
36376349Sqs148142 	rbrp->rbr_wr_index = (rbrp->rbb_max - 1);
36386349Sqs148142 	rbrp->rbr_rd_index = 0;
36396349Sqs148142 
36406349Sqs148142 	rcrp->comp_rd_index = 0;
36416349Sqs148142 	rcrp->comp_wt_index = 0;
36426349Sqs148142 	rcrp->rcr_desc_rd_head_p = rcrp->rcr_desc_first_p =
36436349Sqs148142 	    (p_rcr_entry_t)DMA_COMMON_VPTR(rcrp->rcr_desc);
36446864Sqs148142 #if defined(__i386)
36456864Sqs148142 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
36466864Sqs148142 	    (p_rcr_entry_t)(uint32_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
36476864Sqs148142 #else
36486349Sqs148142 	rcrp->rcr_desc_rd_head_pp = rcrp->rcr_desc_first_pp =
36496349Sqs148142 	    (p_rcr_entry_t)DMA_COMMON_IOADDR(rcrp->rcr_desc);
36506864Sqs148142 #endif
36516349Sqs148142 
36526349Sqs148142 	rcrp->rcr_desc_last_p = rcrp->rcr_desc_rd_head_p +
36536349Sqs148142 	    (hxge_port_rcr_size - 1);
36546349Sqs148142 	rcrp->rcr_desc_last_pp = rcrp->rcr_desc_rd_head_pp +
36556349Sqs148142 	    (hxge_port_rcr_size - 1);
36566349Sqs148142 
36577584SQiyan.Sun@Sun.COM 	rcrp->rcr_tail_begin = DMA_COMMON_IOADDR(rcrp->rcr_desc);
36587584SQiyan.Sun@Sun.COM 	rcrp->rcr_tail_begin = (rcrp->rcr_tail_begin & 0x7ffffULL) >> 3;
36597584SQiyan.Sun@Sun.COM 
36606349Sqs148142 	dmap = (p_hxge_dma_common_t)&rcrp->rcr_desc;
36616349Sqs148142 	bzero((caddr_t)dmap->kaddrp, dmap->alength);
36626349Sqs148142 
36636349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "rbr entries = %d\n",
36646349Sqs148142 	    rbrp->rbr_max_size));
36656349Sqs148142 
36668103SQiyan.Sun@Sun.COM 	/* Count the number of buffers owned by the hardware at this moment */
36676349Sqs148142 	for (i = 0; i < rbrp->rbr_max_size; i++) {
36686349Sqs148142 		rx_msg_p = rbrp->rx_msg_ring[i];
36698103SQiyan.Sun@Sun.COM 		if (rx_msg_p->ref_cnt == 1) {
36708103SQiyan.Sun@Sun.COM 			n_init_kick++;
36718103SQiyan.Sun@Sun.COM 		}
36726349Sqs148142 	}
36736349Sqs148142 
36746349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "RxDMA channel re-start..."));
36756349Sqs148142 
36768103SQiyan.Sun@Sun.COM 	/*
36778103SQiyan.Sun@Sun.COM 	 * This is error recover! Some buffers are owned by the hardware and
36788103SQiyan.Sun@Sun.COM 	 * the rest are owned by the apps. We should only kick in those
36798103SQiyan.Sun@Sun.COM 	 * owned by the hardware initially. The apps will post theirs
36808103SQiyan.Sun@Sun.COM 	 * eventually.
36818103SQiyan.Sun@Sun.COM 	 */
368210091SMichael.Speer@Sun.COM 	(void) hxge_rxdma_start_channel(hxgep, channel, rbrp, rcrp, mboxp,
36838103SQiyan.Sun@Sun.COM 	    n_init_kick);
36846349Sqs148142 
36856349Sqs148142 	/*
36866349Sqs148142 	 * The DMA channel may disable itself automatically.
36876349Sqs148142 	 * The following is a work-around.
36886349Sqs148142 	 */
36896349Sqs148142 	HXGE_REG_RD64(handle, RDC_RX_CFG1, &tmp);
36906349Sqs148142 	rs = hpi_rxdma_cfg_rdc_enable(handle, channel);
36916349Sqs148142 	if (rs != HPI_SUCCESS) {
36926349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
36936349Sqs148142 		    "hpi_rxdma_cfg_rdc_enable (channel %d)", channel));
36946349Sqs148142 	}
36956349Sqs148142 
369610091SMichael.Speer@Sun.COM 	/*
369710091SMichael.Speer@Sun.COM 	 * Delay a bit of time by doing reads.
369810091SMichael.Speer@Sun.COM 	 */
369910091SMichael.Speer@Sun.COM 	for (i = 0; i < 1024; i++) {
370010091SMichael.Speer@Sun.COM 		uint64_t value;
370110091SMichael.Speer@Sun.COM 		RXDMA_REG_READ64(HXGE_DEV_HPI_HANDLE(hxgep),
370210091SMichael.Speer@Sun.COM 		    RDC_INT_MASK, i & 3, &value);
370310091SMichael.Speer@Sun.COM 	}
370410091SMichael.Speer@Sun.COM 
37056349Sqs148142 	MUTEX_EXIT(&rbrp->lock);
37066349Sqs148142 	MUTEX_EXIT(&rcrp->lock);
37076349Sqs148142 
37086349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rxdma_fatal_err_recover"));
37096349Sqs148142 	return (HXGE_OK);
37106349Sqs148142 
37116349Sqs148142 fail:
37126349Sqs148142 	MUTEX_EXIT(&rbrp->lock);
37136349Sqs148142 	MUTEX_EXIT(&rcrp->lock);
371410091SMichael.Speer@Sun.COM 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
371510091SMichael.Speer@Sun.COM 	    "Error Recovery failed for channel(%d)", channel));
37166349Sqs148142 	return (HXGE_ERROR | rs);
37176349Sqs148142 }
37186349Sqs148142 
37196349Sqs148142 static hxge_status_t
hxge_rx_port_fatal_err_recover(p_hxge_t hxgep)37206349Sqs148142 hxge_rx_port_fatal_err_recover(p_hxge_t hxgep)
37216349Sqs148142 {
37226349Sqs148142 	hxge_status_t		status = HXGE_OK;
37236349Sqs148142 	p_hxge_dma_common_t	*dma_buf_p;
37246349Sqs148142 	uint16_t		channel;
37256349Sqs148142 	int			ndmas;
37266349Sqs148142 	int			i;
37276349Sqs148142 	block_reset_t		reset_reg;
37288236SQiyan.Sun@Sun.COM 	p_rx_rcr_ring_t	rcrp;
37298236SQiyan.Sun@Sun.COM 	p_rx_rbr_ring_t rbrp;
37306349Sqs148142 
37316349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_rx_port_fatal_err_recover"));
37326349Sqs148142 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "Recovering from RDC error ..."));
37336349Sqs148142 
37346349Sqs148142 	/* Disable RxMAC */
37356349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Disable RxMAC...\n"));
373610091SMichael.Speer@Sun.COM 	MUTEX_ENTER(&hxgep->vmac_lock);
37376349Sqs148142 	if (hxge_rx_vmac_disable(hxgep) != HXGE_OK)
37386349Sqs148142 		goto fail;
37396349Sqs148142 
37406349Sqs148142 	HXGE_DELAY(1000);
37416349Sqs148142 
374211257SMichael.Speer@Sun.COM 	/*
374311257SMichael.Speer@Sun.COM 	 * Reset RDC block from PEU for this fatal error
374411257SMichael.Speer@Sun.COM 	 */
374511257SMichael.Speer@Sun.COM 	reset_reg.value = 0;
374611257SMichael.Speer@Sun.COM 	reset_reg.bits.rdc_rst = 1;
374711257SMichael.Speer@Sun.COM 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, reset_reg.value);
374811257SMichael.Speer@Sun.COM 
374911257SMichael.Speer@Sun.COM 	HXGE_DELAY(1000);
375011257SMichael.Speer@Sun.COM 
37516349Sqs148142 	/* Restore any common settings after PEU reset */
37526349Sqs148142 	if (hxge_rxdma_hw_start_common(hxgep) != HXGE_OK)
37536349Sqs148142 		goto fail;
37546349Sqs148142 
37556349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Stop all RxDMA channels..."));
37566349Sqs148142 
37576349Sqs148142 	ndmas = hxgep->rx_buf_pool_p->ndmas;
37586349Sqs148142 	dma_buf_p = hxgep->rx_buf_pool_p->dma_buf_pool_p;
37596349Sqs148142 
37606349Sqs148142 	for (i = 0; i < ndmas; i++) {
37616349Sqs148142 		channel = ((p_hxge_dma_common_t)dma_buf_p[i])->dma_channel;
37628236SQiyan.Sun@Sun.COM 		rcrp = hxgep->rx_rcr_rings->rcr_rings[channel];
37638236SQiyan.Sun@Sun.COM 		rbrp = rcrp->rx_rbr_p;
37648236SQiyan.Sun@Sun.COM 
37658236SQiyan.Sun@Sun.COM 		MUTEX_ENTER(&rbrp->post_lock);
376611257SMichael.Speer@Sun.COM 
376711257SMichael.Speer@Sun.COM 		/*
376811257SMichael.Speer@Sun.COM 		 * This function needs to be inside the post_lock
376911257SMichael.Speer@Sun.COM 		 */
37706349Sqs148142 		if (hxge_rxdma_fatal_err_recover(hxgep, channel) != HXGE_OK) {
37716349Sqs148142 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
37726349Sqs148142 			    "Could not recover channel %d", channel));
37736349Sqs148142 		}
37748236SQiyan.Sun@Sun.COM 		MUTEX_EXIT(&rbrp->post_lock);
37756349Sqs148142 	}
37766349Sqs148142 
37776349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Reset RxMAC..."));
37786349Sqs148142 
37796349Sqs148142 	/* Reset RxMAC */
37806349Sqs148142 	if (hxge_rx_vmac_reset(hxgep) != HXGE_OK) {
37816349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
37826349Sqs148142 		    "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
37836349Sqs148142 		goto fail;
37846349Sqs148142 	}
37856349Sqs148142 
37866349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-initialize RxMAC..."));
37876349Sqs148142 
37886349Sqs148142 	/* Re-Initialize RxMAC */
37896349Sqs148142 	if ((status = hxge_rx_vmac_init(hxgep)) != HXGE_OK) {
37906349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
37916349Sqs148142 		    "hxge_rx_port_fatal_err_recover: Failed to reset RxMAC"));
37926349Sqs148142 		goto fail;
37936349Sqs148142 	}
37946349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "Re-enable RxMAC..."));
37956349Sqs148142 
37966349Sqs148142 	/* Re-enable RxMAC */
37976349Sqs148142 	if ((status = hxge_rx_vmac_enable(hxgep)) != HXGE_OK) {
37986349Sqs148142 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
37996349Sqs148142 		    "hxge_rx_port_fatal_err_recover: Failed to enable RxMAC"));
38006349Sqs148142 		goto fail;
38016349Sqs148142 	}
380210091SMichael.Speer@Sun.COM 	MUTEX_EXIT(&hxgep->vmac_lock);
38036349Sqs148142 
38046349Sqs148142 	/* Reset the error mask since PEU reset cleared it */
38056349Sqs148142 	HXGE_REG_WR64(hxgep->hpi_handle, RDC_FIFO_ERR_INT_MASK, 0x0);
38066349Sqs148142 
38076349Sqs148142 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
38086349Sqs148142 	    "Recovery Successful, RxPort Restored"));
38096349Sqs148142 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_rx_port_fatal_err_recover"));
38106349Sqs148142 	return (HXGE_OK);
381110091SMichael.Speer@Sun.COM 
38126349Sqs148142 fail:
381310091SMichael.Speer@Sun.COM 	MUTEX_EXIT(&hxgep->vmac_lock);
381410091SMichael.Speer@Sun.COM 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
381510091SMichael.Speer@Sun.COM 	    "Error Recovery failed for hxge(%d)", hxgep->instance));
38166349Sqs148142 	return (status);
38176349Sqs148142 }
38188236SQiyan.Sun@Sun.COM 
38198236SQiyan.Sun@Sun.COM static void
hxge_rbr_empty_restore(p_hxge_t hxgep,p_rx_rbr_ring_t rx_rbr_p)38208236SQiyan.Sun@Sun.COM hxge_rbr_empty_restore(p_hxge_t hxgep, p_rx_rbr_ring_t rx_rbr_p)
38218236SQiyan.Sun@Sun.COM {
38228236SQiyan.Sun@Sun.COM 	hpi_status_t		hpi_status;
38238236SQiyan.Sun@Sun.COM 	hxge_status_t		status;
382411257SMichael.Speer@Sun.COM 	rdc_stat_t		cs;
38258236SQiyan.Sun@Sun.COM 	p_hxge_rx_ring_stats_t	rdc_stats;
38268236SQiyan.Sun@Sun.COM 
38278236SQiyan.Sun@Sun.COM 	rdc_stats = &hxgep->statsp->rdc_stats[rx_rbr_p->rdc];
38288236SQiyan.Sun@Sun.COM 
38298236SQiyan.Sun@Sun.COM 	/*
38308236SQiyan.Sun@Sun.COM 	 * Complete the processing for the RBR Empty by:
38318236SQiyan.Sun@Sun.COM 	 *	0) kicking back HXGE_RBR_EMPTY_THRESHOLD
38328236SQiyan.Sun@Sun.COM 	 *	   packets.
38338236SQiyan.Sun@Sun.COM 	 *	1) Disable the RX vmac.
38348236SQiyan.Sun@Sun.COM 	 *	2) Re-enable the affected DMA channel.
38358236SQiyan.Sun@Sun.COM 	 *	3) Re-enable the RX vmac.
38368236SQiyan.Sun@Sun.COM 	 */
38378236SQiyan.Sun@Sun.COM 
38388236SQiyan.Sun@Sun.COM 	/*
38398236SQiyan.Sun@Sun.COM 	 * Disable the RX VMAC, but setting the framelength
38408236SQiyan.Sun@Sun.COM 	 * to 0, since there is a hardware bug when disabling
38418236SQiyan.Sun@Sun.COM 	 * the vmac.
38428236SQiyan.Sun@Sun.COM 	 */
384310091SMichael.Speer@Sun.COM 	MUTEX_ENTER(&hxgep->vmac_lock);
384410091SMichael.Speer@Sun.COM 	(void) hxge_rx_vmac_disable(hxgep);
38458236SQiyan.Sun@Sun.COM 
384611257SMichael.Speer@Sun.COM 	/*
384711257SMichael.Speer@Sun.COM 	 * Re-arm the mex bit for interrupts to be enabled.
384811257SMichael.Speer@Sun.COM 	 */
384911257SMichael.Speer@Sun.COM 	cs.value = 0;
385011257SMichael.Speer@Sun.COM 	cs.bits.mex = 1;
385111257SMichael.Speer@Sun.COM 	RXDMA_REG_WRITE64(HXGE_DEV_HPI_HANDLE(hxgep), RDC_STAT,
385211257SMichael.Speer@Sun.COM 	    rx_rbr_p->rdc, cs.value);
385311257SMichael.Speer@Sun.COM 
38548236SQiyan.Sun@Sun.COM 	hpi_status = hpi_rxdma_cfg_rdc_enable(
38558236SQiyan.Sun@Sun.COM 	    HXGE_DEV_HPI_HANDLE(hxgep), rx_rbr_p->rdc);
38568236SQiyan.Sun@Sun.COM 	if (hpi_status != HPI_SUCCESS) {
38578236SQiyan.Sun@Sun.COM 		rdc_stats->rbr_empty_fail++;
38588236SQiyan.Sun@Sun.COM 
38598236SQiyan.Sun@Sun.COM 		/* Assume we are already inside the post_lock */
38608236SQiyan.Sun@Sun.COM 		status = hxge_rxdma_fatal_err_recover(hxgep, rx_rbr_p->rdc);
38618236SQiyan.Sun@Sun.COM 		if (status != HXGE_OK) {
38628236SQiyan.Sun@Sun.COM 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
38638236SQiyan.Sun@Sun.COM 			    "hxge(%d): channel(%d) is empty.",
38648236SQiyan.Sun@Sun.COM 			    hxgep->instance, rx_rbr_p->rdc));
38658236SQiyan.Sun@Sun.COM 		}
38668236SQiyan.Sun@Sun.COM 	}
38678236SQiyan.Sun@Sun.COM 
38688236SQiyan.Sun@Sun.COM 	/*
38698236SQiyan.Sun@Sun.COM 	 * Re-enable the RX VMAC.
38708236SQiyan.Sun@Sun.COM 	 */
387110091SMichael.Speer@Sun.COM 	(void) hxge_rx_vmac_enable(hxgep);
387210091SMichael.Speer@Sun.COM 	MUTEX_EXIT(&hxgep->vmac_lock);
387311257SMichael.Speer@Sun.COM 
387411257SMichael.Speer@Sun.COM 	rdc_stats->rbr_empty_restore++;
387511257SMichael.Speer@Sun.COM 	rx_rbr_p->rbr_is_empty = B_FALSE;
38768236SQiyan.Sun@Sun.COM }
3877