xref: /dpdk/drivers/net/axgbe/axgbe_dev.c (revision b4b24f3e80f9b1bd2c56e1b56b8b8337748e15e6)
1572890efSRavi Kumar /*   SPDX-License-Identifier: BSD-3-Clause
2572890efSRavi Kumar  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3572890efSRavi Kumar  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4572890efSRavi Kumar  */
5572890efSRavi Kumar 
6572890efSRavi Kumar #include "axgbe_ethdev.h"
7572890efSRavi Kumar #include "axgbe_common.h"
8572890efSRavi Kumar #include "axgbe_phy.h"
97c4158a5SRavi Kumar #include "axgbe_rxtx.h"
107c4158a5SRavi Kumar 
1186578516SGirish Nandibasappa static uint32_t bitrev32(uint32_t x)
1286578516SGirish Nandibasappa {
1386578516SGirish Nandibasappa 	x = (x >> 16) | (x << 16);
1486578516SGirish Nandibasappa 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1586578516SGirish Nandibasappa 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1686578516SGirish Nandibasappa 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1786578516SGirish Nandibasappa 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1886578516SGirish Nandibasappa 	return x;
1986578516SGirish Nandibasappa }
2086578516SGirish Nandibasappa 
2186578516SGirish Nandibasappa /*MSB set bit from 32 to 1*/
2286578516SGirish Nandibasappa static int get_lastbit_set(int x)
2386578516SGirish Nandibasappa {
2486578516SGirish Nandibasappa 	int r = 32;
2586578516SGirish Nandibasappa 
2686578516SGirish Nandibasappa 	if (!x)
2786578516SGirish Nandibasappa 		return 0;
2886578516SGirish Nandibasappa 	if (!(x & 0xffff0000)) {
2986578516SGirish Nandibasappa 		x <<= 16;
3086578516SGirish Nandibasappa 		r -= 16;
3186578516SGirish Nandibasappa 	}
3286578516SGirish Nandibasappa 	if (!(x & 0xff000000)) {
3386578516SGirish Nandibasappa 		x <<= 8;
3486578516SGirish Nandibasappa 		r -= 8;
3586578516SGirish Nandibasappa 	}
3686578516SGirish Nandibasappa 	if (!(x & 0xf0000000)) {
3786578516SGirish Nandibasappa 		x <<= 4;
3886578516SGirish Nandibasappa 		r -= 4;
3986578516SGirish Nandibasappa 	}
4086578516SGirish Nandibasappa 	if (!(x & 0xc0000000)) {
4186578516SGirish Nandibasappa 		x <<= 2;
4286578516SGirish Nandibasappa 		r -= 2;
4386578516SGirish Nandibasappa 	}
4486578516SGirish Nandibasappa 	if (!(x & 0x80000000)) {
4586578516SGirish Nandibasappa 		x <<= 1;
4686578516SGirish Nandibasappa 		r -= 1;
4786578516SGirish Nandibasappa 	}
4886578516SGirish Nandibasappa 	return r;
4986578516SGirish Nandibasappa }
5086578516SGirish Nandibasappa 
517c4158a5SRavi Kumar static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
527c4158a5SRavi Kumar {
5335b2d13fSOlivier Matz 	return pdata->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
5425cf2630SFerruh Yigit 		RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN;
557c4158a5SRavi Kumar }
56572890efSRavi Kumar 
574ac7516bSRavi Kumar /* query busy bit */
584ac7516bSRavi Kumar static int mdio_complete(struct axgbe_port *pdata)
594ac7516bSRavi Kumar {
604ac7516bSRavi Kumar 	if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY))
614ac7516bSRavi Kumar 		return 1;
624ac7516bSRavi Kumar 
634ac7516bSRavi Kumar 	return 0;
644ac7516bSRavi Kumar }
654ac7516bSRavi Kumar 
66d06394d2SVenkat Kumar Ande static unsigned int axgbe_create_mdio_sca(int port, int reg)
67d06394d2SVenkat Kumar Ande {
68d06394d2SVenkat Kumar Ande 	unsigned int mdio_sca, da;
69d06394d2SVenkat Kumar Ande 
70d06394d2SVenkat Kumar Ande 	da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
71d06394d2SVenkat Kumar Ande 
72d06394d2SVenkat Kumar Ande 	mdio_sca = 0;
73d06394d2SVenkat Kumar Ande 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
74d06394d2SVenkat Kumar Ande 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
75d06394d2SVenkat Kumar Ande 	AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
76d06394d2SVenkat Kumar Ande 
77d06394d2SVenkat Kumar Ande 	return mdio_sca;
78d06394d2SVenkat Kumar Ande }
79d06394d2SVenkat Kumar Ande 
804ac7516bSRavi Kumar static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr,
814ac7516bSRavi Kumar 				    int reg, u16 val)
824ac7516bSRavi Kumar {
834ac7516bSRavi Kumar 	unsigned int mdio_sca, mdio_sccd;
844ac7516bSRavi Kumar 	uint64_t timeout;
854ac7516bSRavi Kumar 
86d06394d2SVenkat Kumar Ande 	mdio_sca = axgbe_create_mdio_sca(addr, reg);
874ac7516bSRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
884ac7516bSRavi Kumar 
894ac7516bSRavi Kumar 	mdio_sccd = 0;
904ac7516bSRavi Kumar 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
914ac7516bSRavi Kumar 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
924ac7516bSRavi Kumar 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
934ac7516bSRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
944ac7516bSRavi Kumar 
954ac7516bSRavi Kumar 	timeout = rte_get_timer_cycles() + rte_get_timer_hz();
964ac7516bSRavi Kumar 	while (time_before(rte_get_timer_cycles(), timeout)) {
974ac7516bSRavi Kumar 		rte_delay_us(100);
984ac7516bSRavi Kumar 		if (mdio_complete(pdata))
994ac7516bSRavi Kumar 			return 0;
1004ac7516bSRavi Kumar 	}
1014ac7516bSRavi Kumar 
1024ac7516bSRavi Kumar 	PMD_DRV_LOG(ERR, "Mdio write operation timed out\n");
1034ac7516bSRavi Kumar 	return -ETIMEDOUT;
1044ac7516bSRavi Kumar }
1054ac7516bSRavi Kumar 
1064ac7516bSRavi Kumar static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr,
1074ac7516bSRavi Kumar 				   int reg)
1084ac7516bSRavi Kumar {
1094ac7516bSRavi Kumar 	unsigned int mdio_sca, mdio_sccd;
1104ac7516bSRavi Kumar 	uint64_t timeout;
1114ac7516bSRavi Kumar 
112d06394d2SVenkat Kumar Ande 	mdio_sca = axgbe_create_mdio_sca(addr, reg);
1134ac7516bSRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1144ac7516bSRavi Kumar 
1154ac7516bSRavi Kumar 	mdio_sccd = 0;
1164ac7516bSRavi Kumar 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
1174ac7516bSRavi Kumar 	AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1184ac7516bSRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1194ac7516bSRavi Kumar 
1204ac7516bSRavi Kumar 	timeout = rte_get_timer_cycles() + rte_get_timer_hz();
1214ac7516bSRavi Kumar 
1224ac7516bSRavi Kumar 	while (time_before(rte_get_timer_cycles(), timeout)) {
1234ac7516bSRavi Kumar 		rte_delay_us(100);
1244ac7516bSRavi Kumar 		if (mdio_complete(pdata))
1254ac7516bSRavi Kumar 			goto success;
1264ac7516bSRavi Kumar 	}
1274ac7516bSRavi Kumar 
1284ac7516bSRavi Kumar 	PMD_DRV_LOG(ERR, "Mdio read operation timed out\n");
1294ac7516bSRavi Kumar 	return -ETIMEDOUT;
1304ac7516bSRavi Kumar 
1314ac7516bSRavi Kumar success:
1324ac7516bSRavi Kumar 	return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
1334ac7516bSRavi Kumar }
1344ac7516bSRavi Kumar 
1354ac7516bSRavi Kumar static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port,
1364ac7516bSRavi Kumar 				  enum axgbe_mdio_mode mode)
1374ac7516bSRavi Kumar {
1384ac7516bSRavi Kumar 	unsigned int reg_val = 0;
1394ac7516bSRavi Kumar 
1404ac7516bSRavi Kumar 	switch (mode) {
1414ac7516bSRavi Kumar 	case AXGBE_MDIO_MODE_CL22:
1424ac7516bSRavi Kumar 		if (port > AXGMAC_MAX_C22_PORT)
1434ac7516bSRavi Kumar 			return -EINVAL;
1444ac7516bSRavi Kumar 		reg_val |= (1 << port);
1454ac7516bSRavi Kumar 		break;
1464ac7516bSRavi Kumar 	case AXGBE_MDIO_MODE_CL45:
1474ac7516bSRavi Kumar 		break;
1484ac7516bSRavi Kumar 	default:
1494ac7516bSRavi Kumar 		return -EINVAL;
1504ac7516bSRavi Kumar 	}
1514ac7516bSRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
1524ac7516bSRavi Kumar 
1534ac7516bSRavi Kumar 	return 0;
1544ac7516bSRavi Kumar }
1554ac7516bSRavi Kumar 
1564ac7516bSRavi Kumar static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
1574ac7516bSRavi Kumar 				  int prtad __rte_unused, int mmd_reg)
1584ac7516bSRavi Kumar {
1594ac7516bSRavi Kumar 	unsigned int mmd_address, index, offset;
1604ac7516bSRavi Kumar 	int mmd_data;
1614ac7516bSRavi Kumar 
1624ac7516bSRavi Kumar 	if (mmd_reg & MII_ADDR_C45)
1634ac7516bSRavi Kumar 		mmd_address = mmd_reg & ~MII_ADDR_C45;
1644ac7516bSRavi Kumar 	else
1654ac7516bSRavi Kumar 		mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1664ac7516bSRavi Kumar 
1674ac7516bSRavi Kumar 	/* The PCS registers are accessed using mmio. The underlying
1684ac7516bSRavi Kumar 	 * management interface uses indirect addressing to access the MMD
1694ac7516bSRavi Kumar 	 * register sets. This requires accessing of the PCS register in two
1704ac7516bSRavi Kumar 	 * phases, an address phase and a data phase.
1714ac7516bSRavi Kumar 	 *
1724ac7516bSRavi Kumar 	 * The mmio interface is based on 16-bit offsets and values. All
1734ac7516bSRavi Kumar 	 * register offsets must therefore be adjusted by left shifting the
1744ac7516bSRavi Kumar 	 * offset 1 bit and reading 16 bits of data.
1754ac7516bSRavi Kumar 	 */
1764ac7516bSRavi Kumar 	mmd_address <<= 1;
1774ac7516bSRavi Kumar 	index = mmd_address & ~pdata->xpcs_window_mask;
1784ac7516bSRavi Kumar 	offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1794ac7516bSRavi Kumar 
1804ac7516bSRavi Kumar 	pthread_mutex_lock(&pdata->xpcs_mutex);
1814ac7516bSRavi Kumar 
1824ac7516bSRavi Kumar 	XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1834ac7516bSRavi Kumar 	mmd_data = XPCS16_IOREAD(pdata, offset);
1844ac7516bSRavi Kumar 
1854ac7516bSRavi Kumar 	pthread_mutex_unlock(&pdata->xpcs_mutex);
1864ac7516bSRavi Kumar 
1874ac7516bSRavi Kumar 	return mmd_data;
1884ac7516bSRavi Kumar }
1894ac7516bSRavi Kumar 
1904ac7516bSRavi Kumar static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
1914ac7516bSRavi Kumar 				    int prtad __rte_unused,
1924ac7516bSRavi Kumar 				    int mmd_reg, int mmd_data)
1934ac7516bSRavi Kumar {
1944ac7516bSRavi Kumar 	unsigned int mmd_address, index, offset;
1954ac7516bSRavi Kumar 
1964ac7516bSRavi Kumar 	if (mmd_reg & MII_ADDR_C45)
1974ac7516bSRavi Kumar 		mmd_address = mmd_reg & ~MII_ADDR_C45;
1984ac7516bSRavi Kumar 	else
1994ac7516bSRavi Kumar 		mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
2004ac7516bSRavi Kumar 
2014ac7516bSRavi Kumar 	/* The PCS registers are accessed using mmio. The underlying
2024ac7516bSRavi Kumar 	 * management interface uses indirect addressing to access the MMD
2034ac7516bSRavi Kumar 	 * register sets. This requires accessing of the PCS register in two
2044ac7516bSRavi Kumar 	 * phases, an address phase and a data phase.
2054ac7516bSRavi Kumar 	 *
2064ac7516bSRavi Kumar 	 * The mmio interface is based on 16-bit offsets and values. All
2074ac7516bSRavi Kumar 	 * register offsets must therefore be adjusted by left shifting the
2084ac7516bSRavi Kumar 	 * offset 1 bit and writing 16 bits of data.
2094ac7516bSRavi Kumar 	 */
2104ac7516bSRavi Kumar 	mmd_address <<= 1;
2114ac7516bSRavi Kumar 	index = mmd_address & ~pdata->xpcs_window_mask;
2124ac7516bSRavi Kumar 	offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
2134ac7516bSRavi Kumar 
2144ac7516bSRavi Kumar 	pthread_mutex_lock(&pdata->xpcs_mutex);
2154ac7516bSRavi Kumar 
2164ac7516bSRavi Kumar 	XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
2174ac7516bSRavi Kumar 	XPCS16_IOWRITE(pdata, offset, mmd_data);
2184ac7516bSRavi Kumar 
2194ac7516bSRavi Kumar 	pthread_mutex_unlock(&pdata->xpcs_mutex);
2204ac7516bSRavi Kumar }
2214ac7516bSRavi Kumar 
2224ac7516bSRavi Kumar static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
2234ac7516bSRavi Kumar 			       int mmd_reg)
2244ac7516bSRavi Kumar {
2254ac7516bSRavi Kumar 	switch (pdata->vdata->xpcs_access) {
2264ac7516bSRavi Kumar 	case AXGBE_XPCS_ACCESS_V1:
2274ac7516bSRavi Kumar 		PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
2284ac7516bSRavi Kumar 		return -1;
2294ac7516bSRavi Kumar 	case AXGBE_XPCS_ACCESS_V2:
2304ac7516bSRavi Kumar 	default:
2314ac7516bSRavi Kumar 		return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
2324ac7516bSRavi Kumar 	}
2334ac7516bSRavi Kumar }
2344ac7516bSRavi Kumar 
2354ac7516bSRavi Kumar static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad,
2364ac7516bSRavi Kumar 				 int mmd_reg, int mmd_data)
2374ac7516bSRavi Kumar {
2384ac7516bSRavi Kumar 	switch (pdata->vdata->xpcs_access) {
2394ac7516bSRavi Kumar 	case AXGBE_XPCS_ACCESS_V1:
2404ac7516bSRavi Kumar 		PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
2414ac7516bSRavi Kumar 		return;
2424ac7516bSRavi Kumar 	case AXGBE_XPCS_ACCESS_V2:
2434ac7516bSRavi Kumar 	default:
2444ac7516bSRavi Kumar 		return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
2454ac7516bSRavi Kumar 	}
2464ac7516bSRavi Kumar }
2474ac7516bSRavi Kumar 
248a5c72737SRavi Kumar static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
249a5c72737SRavi Kumar {
250a5c72737SRavi Kumar 	unsigned int ss;
251a5c72737SRavi Kumar 
252a5c72737SRavi Kumar 	switch (speed) {
253a5c72737SRavi Kumar 	case SPEED_1000:
254a5c72737SRavi Kumar 		ss = 0x03;
255a5c72737SRavi Kumar 		break;
256a5c72737SRavi Kumar 	case SPEED_2500:
257a5c72737SRavi Kumar 		ss = 0x02;
258a5c72737SRavi Kumar 		break;
259a5c72737SRavi Kumar 	case SPEED_10000:
260a5c72737SRavi Kumar 		ss = 0x00;
261a5c72737SRavi Kumar 		break;
262a5c72737SRavi Kumar 	default:
263a5c72737SRavi Kumar 		return -EINVAL;
264a5c72737SRavi Kumar 	}
265a5c72737SRavi Kumar 
266a5c72737SRavi Kumar 	if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
267a5c72737SRavi Kumar 		AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
268a5c72737SRavi Kumar 
269a5c72737SRavi Kumar 	return 0;
270a5c72737SRavi Kumar }
271a5c72737SRavi Kumar 
272*b4b24f3eSVenkat Kumar Ande static unsigned int axgbe_get_fc_queue_count(struct axgbe_port *pdata)
273*b4b24f3eSVenkat Kumar Ande {
274*b4b24f3eSVenkat Kumar Ande 	unsigned int max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
275*b4b24f3eSVenkat Kumar Ande 
276*b4b24f3eSVenkat Kumar Ande 	/* From MAC ver 30H the TFCR is per priority, instead of per queue */
277*b4b24f3eSVenkat Kumar Ande 	if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
278*b4b24f3eSVenkat Kumar Ande 		return max_q_count;
279*b4b24f3eSVenkat Kumar Ande 	else
280*b4b24f3eSVenkat Kumar Ande 		return (RTE_MIN(pdata->tx_q_count, max_q_count));
281*b4b24f3eSVenkat Kumar Ande }
282*b4b24f3eSVenkat Kumar Ande 
2837c4158a5SRavi Kumar static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
2847c4158a5SRavi Kumar {
2857c4158a5SRavi Kumar 	unsigned int reg, reg_val;
286*b4b24f3eSVenkat Kumar Ande 	unsigned int i, q_count;
2877c4158a5SRavi Kumar 
2887c4158a5SRavi Kumar 	/* Clear MTL flow control */
2897c4158a5SRavi Kumar 	for (i = 0; i < pdata->rx_q_count; i++)
2907c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
2917c4158a5SRavi Kumar 
2927c4158a5SRavi Kumar 	/* Clear MAC flow control */
293*b4b24f3eSVenkat Kumar Ande 	q_count = axgbe_get_fc_queue_count(pdata);
2947c4158a5SRavi Kumar 	reg = MAC_Q0TFCR;
2957c4158a5SRavi Kumar 	for (i = 0; i < q_count; i++) {
2967c4158a5SRavi Kumar 		reg_val = AXGMAC_IOREAD(pdata, reg);
2977c4158a5SRavi Kumar 		AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
2987c4158a5SRavi Kumar 		AXGMAC_IOWRITE(pdata, reg, reg_val);
2997c4158a5SRavi Kumar 
3007c4158a5SRavi Kumar 		reg += MAC_QTFCR_INC;
3017c4158a5SRavi Kumar 	}
3027c4158a5SRavi Kumar 
3037c4158a5SRavi Kumar 	return 0;
3047c4158a5SRavi Kumar }
3057c4158a5SRavi Kumar 
3067c4158a5SRavi Kumar static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
3077c4158a5SRavi Kumar {
3087c4158a5SRavi Kumar 	unsigned int reg, reg_val;
309*b4b24f3eSVenkat Kumar Ande 	unsigned int i, q_count;
3107c4158a5SRavi Kumar 
3117c4158a5SRavi Kumar 	/* Set MTL flow control */
3127c4158a5SRavi Kumar 	for (i = 0; i < pdata->rx_q_count; i++) {
3137c4158a5SRavi Kumar 		unsigned int ehfc = 0;
3147c4158a5SRavi Kumar 
3157c4158a5SRavi Kumar 		/* Flow control thresholds are established */
3167c4158a5SRavi Kumar 		if (pdata->rx_rfd[i])
3177c4158a5SRavi Kumar 			ehfc = 1;
3187c4158a5SRavi Kumar 
3197c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
3204216cdc0SChandu Babu N 
3214216cdc0SChandu Babu N 		PMD_DRV_LOG(DEBUG, "flow control %s for RXq%u\n",
3224216cdc0SChandu Babu N 			    ehfc ? "enabled" : "disabled", i);
3237c4158a5SRavi Kumar 	}
3247c4158a5SRavi Kumar 
3257c4158a5SRavi Kumar 	/* Set MAC flow control */
326*b4b24f3eSVenkat Kumar Ande 	q_count = axgbe_get_fc_queue_count(pdata);
3277c4158a5SRavi Kumar 	reg = MAC_Q0TFCR;
3287c4158a5SRavi Kumar 	for (i = 0; i < q_count; i++) {
3297c4158a5SRavi Kumar 		reg_val = AXGMAC_IOREAD(pdata, reg);
3307c4158a5SRavi Kumar 
3317c4158a5SRavi Kumar 		/* Enable transmit flow control */
3327c4158a5SRavi Kumar 		AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
3337c4158a5SRavi Kumar 		/* Set pause time */
3347c4158a5SRavi Kumar 		AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
3357c4158a5SRavi Kumar 
3367c4158a5SRavi Kumar 		AXGMAC_IOWRITE(pdata, reg, reg_val);
3377c4158a5SRavi Kumar 
3387c4158a5SRavi Kumar 		reg += MAC_QTFCR_INC;
3397c4158a5SRavi Kumar 	}
3407c4158a5SRavi Kumar 
3417c4158a5SRavi Kumar 	return 0;
3427c4158a5SRavi Kumar }
3437c4158a5SRavi Kumar 
3447c4158a5SRavi Kumar static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata)
3457c4158a5SRavi Kumar {
3467c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
3477c4158a5SRavi Kumar 
3487c4158a5SRavi Kumar 	return 0;
3497c4158a5SRavi Kumar }
3507c4158a5SRavi Kumar 
3517c4158a5SRavi Kumar static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata)
3527c4158a5SRavi Kumar {
3537c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
3547c4158a5SRavi Kumar 
3557c4158a5SRavi Kumar 	return 0;
3567c4158a5SRavi Kumar }
3577c4158a5SRavi Kumar 
3587c4158a5SRavi Kumar static int axgbe_config_tx_flow_control(struct axgbe_port *pdata)
3597c4158a5SRavi Kumar {
3607c4158a5SRavi Kumar 	if (pdata->tx_pause)
3617c4158a5SRavi Kumar 		axgbe_enable_tx_flow_control(pdata);
3627c4158a5SRavi Kumar 	else
3637c4158a5SRavi Kumar 		axgbe_disable_tx_flow_control(pdata);
3647c4158a5SRavi Kumar 
3657c4158a5SRavi Kumar 	return 0;
3667c4158a5SRavi Kumar }
3677c4158a5SRavi Kumar 
3687c4158a5SRavi Kumar static int axgbe_config_rx_flow_control(struct axgbe_port *pdata)
3697c4158a5SRavi Kumar {
3707c4158a5SRavi Kumar 	if (pdata->rx_pause)
3717c4158a5SRavi Kumar 		axgbe_enable_rx_flow_control(pdata);
3727c4158a5SRavi Kumar 	else
3737c4158a5SRavi Kumar 		axgbe_disable_rx_flow_control(pdata);
3747c4158a5SRavi Kumar 
3757c4158a5SRavi Kumar 	return 0;
3767c4158a5SRavi Kumar }
3777c4158a5SRavi Kumar 
3787c4158a5SRavi Kumar static void axgbe_config_flow_control(struct axgbe_port *pdata)
3797c4158a5SRavi Kumar {
3807c4158a5SRavi Kumar 	axgbe_config_tx_flow_control(pdata);
3817c4158a5SRavi Kumar 	axgbe_config_rx_flow_control(pdata);
3827c4158a5SRavi Kumar 
3837c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
3847c4158a5SRavi Kumar }
3857c4158a5SRavi Kumar 
3867c4158a5SRavi Kumar static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata,
3877c4158a5SRavi Kumar 					       unsigned int queue,
3887c4158a5SRavi Kumar 					       unsigned int q_fifo_size)
3897c4158a5SRavi Kumar {
3907c4158a5SRavi Kumar 	unsigned int frame_fifo_size;
3917c4158a5SRavi Kumar 	unsigned int rfa, rfd;
3927c4158a5SRavi Kumar 
3937c4158a5SRavi Kumar 	frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata));
3947c4158a5SRavi Kumar 
3957c4158a5SRavi Kumar 	/* This path deals with just maximum frame sizes which are
3967c4158a5SRavi Kumar 	 * limited to a jumbo frame of 9,000 (plus headers, etc.)
3977c4158a5SRavi Kumar 	 * so we can never exceed the maximum allowable RFA/RFD
3987c4158a5SRavi Kumar 	 * values.
3997c4158a5SRavi Kumar 	 */
4007c4158a5SRavi Kumar 	if (q_fifo_size <= 2048) {
4017c4158a5SRavi Kumar 		/* rx_rfd to zero to signal no flow control */
4027c4158a5SRavi Kumar 		pdata->rx_rfa[queue] = 0;
4037c4158a5SRavi Kumar 		pdata->rx_rfd[queue] = 0;
4047c4158a5SRavi Kumar 		return;
4057c4158a5SRavi Kumar 	}
4067c4158a5SRavi Kumar 
4077c4158a5SRavi Kumar 	if (q_fifo_size <= 4096) {
4087c4158a5SRavi Kumar 		/* Between 2048 and 4096 */
4097c4158a5SRavi Kumar 		pdata->rx_rfa[queue] = 0;	/* Full - 1024 bytes */
4107c4158a5SRavi Kumar 		pdata->rx_rfd[queue] = 1;	/* Full - 1536 bytes */
4117c4158a5SRavi Kumar 		return;
4127c4158a5SRavi Kumar 	}
4137c4158a5SRavi Kumar 
4147c4158a5SRavi Kumar 	if (q_fifo_size <= frame_fifo_size) {
4157c4158a5SRavi Kumar 		/* Between 4096 and max-frame */
4167c4158a5SRavi Kumar 		pdata->rx_rfa[queue] = 2;	/* Full - 2048 bytes */
4177c4158a5SRavi Kumar 		pdata->rx_rfd[queue] = 5;	/* Full - 3584 bytes */
4187c4158a5SRavi Kumar 		return;
4197c4158a5SRavi Kumar 	}
4207c4158a5SRavi Kumar 
4217c4158a5SRavi Kumar 	if (q_fifo_size <= (frame_fifo_size * 3)) {
4227c4158a5SRavi Kumar 		/* Between max-frame and 3 max-frames,
4237c4158a5SRavi Kumar 		 * trigger if we get just over a frame of data and
4247c4158a5SRavi Kumar 		 * resume when we have just under half a frame left.
4257c4158a5SRavi Kumar 		 */
4267c4158a5SRavi Kumar 		rfa = q_fifo_size - frame_fifo_size;
4277c4158a5SRavi Kumar 		rfd = rfa + (frame_fifo_size / 2);
4287c4158a5SRavi Kumar 	} else {
4297c4158a5SRavi Kumar 		/* Above 3 max-frames - trigger when just over
4307c4158a5SRavi Kumar 		 * 2 frames of space available
4317c4158a5SRavi Kumar 		 */
4327c4158a5SRavi Kumar 		rfa = frame_fifo_size * 2;
4337c4158a5SRavi Kumar 		rfa += AXGMAC_FLOW_CONTROL_UNIT;
4347c4158a5SRavi Kumar 		rfd = rfa + frame_fifo_size;
4357c4158a5SRavi Kumar 	}
4367c4158a5SRavi Kumar 
4377c4158a5SRavi Kumar 	pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa);
4387c4158a5SRavi Kumar 	pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd);
4397c4158a5SRavi Kumar }
4407c4158a5SRavi Kumar 
4417c4158a5SRavi Kumar static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata)
4427c4158a5SRavi Kumar {
4437c4158a5SRavi Kumar 	unsigned int q_fifo_size;
4447c4158a5SRavi Kumar 	unsigned int i;
4457c4158a5SRavi Kumar 
4467c4158a5SRavi Kumar 	for (i = 0; i < pdata->rx_q_count; i++) {
4477c4158a5SRavi Kumar 		q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT;
4487c4158a5SRavi Kumar 
4497c4158a5SRavi Kumar 		axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
4507c4158a5SRavi Kumar 	}
4517c4158a5SRavi Kumar }
4527c4158a5SRavi Kumar 
4537c4158a5SRavi Kumar static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)
4547c4158a5SRavi Kumar {
4557c4158a5SRavi Kumar 	unsigned int i;
4567c4158a5SRavi Kumar 
4577c4158a5SRavi Kumar 	for (i = 0; i < pdata->rx_q_count; i++) {
4587c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
4597c4158a5SRavi Kumar 					pdata->rx_rfa[i]);
4607c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
4617c4158a5SRavi Kumar 					pdata->rx_rfd[i]);
4627c4158a5SRavi Kumar 	}
4637c4158a5SRavi Kumar }
4647c4158a5SRavi Kumar 
46586578516SGirish Nandibasappa static int axgbe_enable_rx_vlan_stripping(struct axgbe_port *pdata)
46686578516SGirish Nandibasappa {
46786578516SGirish Nandibasappa 	/* Put the VLAN tag in the Rx descriptor */
46886578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
46986578516SGirish Nandibasappa 
47086578516SGirish Nandibasappa 	/* Don't check the VLAN type */
47186578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
47286578516SGirish Nandibasappa 
47386578516SGirish Nandibasappa 	/* Check only C-TAG (0x8100) packets */
47486578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
47586578516SGirish Nandibasappa 
47686578516SGirish Nandibasappa 	/* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
47786578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
47886578516SGirish Nandibasappa 
47986578516SGirish Nandibasappa 	/* Enable VLAN tag stripping */
48086578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
48186578516SGirish Nandibasappa 	return 0;
48286578516SGirish Nandibasappa }
48386578516SGirish Nandibasappa 
48486578516SGirish Nandibasappa static int axgbe_disable_rx_vlan_stripping(struct axgbe_port *pdata)
48586578516SGirish Nandibasappa {
48686578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
48786578516SGirish Nandibasappa 	return 0;
48886578516SGirish Nandibasappa }
48986578516SGirish Nandibasappa 
49086578516SGirish Nandibasappa static int axgbe_enable_rx_vlan_filtering(struct axgbe_port *pdata)
49186578516SGirish Nandibasappa {
49286578516SGirish Nandibasappa 	/* Enable VLAN filtering */
49386578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
49486578516SGirish Nandibasappa 
49586578516SGirish Nandibasappa 	/* Enable VLAN Hash Table filtering */
49686578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
49786578516SGirish Nandibasappa 
49886578516SGirish Nandibasappa 	/* Disable VLAN tag inverse matching */
49986578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
50086578516SGirish Nandibasappa 
50186578516SGirish Nandibasappa 	/* Only filter on the lower 12-bits of the VLAN tag */
50286578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
50386578516SGirish Nandibasappa 
50486578516SGirish Nandibasappa 	/* In order for the VLAN Hash Table filtering to be effective,
50586578516SGirish Nandibasappa 	 * the VLAN tag identifier in the VLAN Tag Register must not
50686578516SGirish Nandibasappa 	 * be zero.  Set the VLAN tag identifier to "1" to enable the
50786578516SGirish Nandibasappa 	 * VLAN Hash Table filtering.  This implies that a VLAN tag of
50886578516SGirish Nandibasappa 	 * 1 will always pass filtering.
50986578516SGirish Nandibasappa 	 */
51086578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
51186578516SGirish Nandibasappa 	return 0;
51286578516SGirish Nandibasappa }
51386578516SGirish Nandibasappa 
51486578516SGirish Nandibasappa static int axgbe_disable_rx_vlan_filtering(struct axgbe_port *pdata)
51586578516SGirish Nandibasappa {
51686578516SGirish Nandibasappa 	/* Disable VLAN filtering */
51786578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
51886578516SGirish Nandibasappa 	return 0;
51986578516SGirish Nandibasappa }
52086578516SGirish Nandibasappa 
52186578516SGirish Nandibasappa static u32 axgbe_vid_crc32_le(__le16 vid_le)
52286578516SGirish Nandibasappa {
52386578516SGirish Nandibasappa 	u32 poly = 0xedb88320;  /* CRCPOLY_LE */
52486578516SGirish Nandibasappa 	u32 crc = ~0;
52586578516SGirish Nandibasappa 	u32 temp = 0;
52686578516SGirish Nandibasappa 	unsigned char *data = (unsigned char *)&vid_le;
52786578516SGirish Nandibasappa 	unsigned char data_byte = 0;
52886578516SGirish Nandibasappa 	int i, bits;
52986578516SGirish Nandibasappa 
53086578516SGirish Nandibasappa 	bits = get_lastbit_set(VLAN_VID_MASK);
53186578516SGirish Nandibasappa 	for (i = 0; i < bits; i++) {
53286578516SGirish Nandibasappa 		if ((i % 8) == 0)
53386578516SGirish Nandibasappa 			data_byte = data[i / 8];
53486578516SGirish Nandibasappa 
53586578516SGirish Nandibasappa 		temp = ((crc & 1) ^ data_byte) & 1;
53686578516SGirish Nandibasappa 		crc >>= 1;
53786578516SGirish Nandibasappa 		data_byte >>= 1;
53886578516SGirish Nandibasappa 
53986578516SGirish Nandibasappa 		if (temp)
54086578516SGirish Nandibasappa 			crc ^= poly;
54186578516SGirish Nandibasappa 	}
54286578516SGirish Nandibasappa 	return crc;
54386578516SGirish Nandibasappa }
54486578516SGirish Nandibasappa 
54586578516SGirish Nandibasappa static int axgbe_update_vlan_hash_table(struct axgbe_port *pdata)
54686578516SGirish Nandibasappa {
54786578516SGirish Nandibasappa 	u32 crc = 0;
54886578516SGirish Nandibasappa 	u16 vid;
54986578516SGirish Nandibasappa 	__le16 vid_le = 0;
55086578516SGirish Nandibasappa 	u16 vlan_hash_table = 0;
55186578516SGirish Nandibasappa 	unsigned int reg = 0;
55286578516SGirish Nandibasappa 	unsigned long vid_idx, vid_valid;
55386578516SGirish Nandibasappa 
55486578516SGirish Nandibasappa 	/* Generate the VLAN Hash Table value */
55586578516SGirish Nandibasappa 	for (vid = 0; vid < VLAN_N_VID; vid++) {
55686578516SGirish Nandibasappa 		vid_idx = VLAN_TABLE_IDX(vid);
55786578516SGirish Nandibasappa 		vid_valid = pdata->active_vlans[vid_idx];
55886578516SGirish Nandibasappa 		vid_valid = (unsigned long)vid_valid >> (vid - (64 * vid_idx));
55986578516SGirish Nandibasappa 		if (vid_valid & 1)
56086578516SGirish Nandibasappa 			PMD_DRV_LOG(DEBUG,
56186578516SGirish Nandibasappa 				    "vid:%d pdata->active_vlans[%ld]=0x%lx\n",
56286578516SGirish Nandibasappa 				    vid, vid_idx, pdata->active_vlans[vid_idx]);
56386578516SGirish Nandibasappa 		else
56486578516SGirish Nandibasappa 			continue;
56586578516SGirish Nandibasappa 
56686578516SGirish Nandibasappa 		vid_le = rte_cpu_to_le_16(vid);
56786578516SGirish Nandibasappa 		crc = bitrev32(~axgbe_vid_crc32_le(vid_le)) >> 28;
56886578516SGirish Nandibasappa 		vlan_hash_table |= (1 << crc);
56986578516SGirish Nandibasappa 		PMD_DRV_LOG(DEBUG, "crc = %d vlan_hash_table = 0x%x\n",
57086578516SGirish Nandibasappa 			    crc, vlan_hash_table);
57186578516SGirish Nandibasappa 	}
57286578516SGirish Nandibasappa 	/* Set the VLAN Hash Table filtering register */
57386578516SGirish Nandibasappa 	AXGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
57486578516SGirish Nandibasappa 	reg = AXGMAC_IOREAD(pdata, MAC_VLANHTR);
57586578516SGirish Nandibasappa 	PMD_DRV_LOG(DEBUG, "vlan_hash_table reg val = 0x%x\n", reg);
57686578516SGirish Nandibasappa 	return 0;
57786578516SGirish Nandibasappa }
57886578516SGirish Nandibasappa 
579572890efSRavi Kumar static int __axgbe_exit(struct axgbe_port *pdata)
580572890efSRavi Kumar {
581572890efSRavi Kumar 	unsigned int count = 2000;
582572890efSRavi Kumar 
583572890efSRavi Kumar 	/* Issue a software reset */
584572890efSRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
585572890efSRavi Kumar 	rte_delay_us(10);
586572890efSRavi Kumar 
587572890efSRavi Kumar 	/* Poll Until Poll Condition */
588572890efSRavi Kumar 	while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
589572890efSRavi Kumar 		rte_delay_us(500);
590572890efSRavi Kumar 
591572890efSRavi Kumar 	if (!count)
592572890efSRavi Kumar 		return -EBUSY;
593572890efSRavi Kumar 
594572890efSRavi Kumar 	return 0;
595572890efSRavi Kumar }
596572890efSRavi Kumar 
597572890efSRavi Kumar static int axgbe_exit(struct axgbe_port *pdata)
598572890efSRavi Kumar {
599572890efSRavi Kumar 	int ret;
600572890efSRavi Kumar 
601572890efSRavi Kumar 	/* To guard against possible incorrectly generated interrupts,
602572890efSRavi Kumar 	 * issue the software reset twice.
603572890efSRavi Kumar 	 */
604572890efSRavi Kumar 	ret = __axgbe_exit(pdata);
605572890efSRavi Kumar 	if (ret)
606572890efSRavi Kumar 		return ret;
607572890efSRavi Kumar 
608572890efSRavi Kumar 	return __axgbe_exit(pdata);
609572890efSRavi Kumar }
610572890efSRavi Kumar 
6117c4158a5SRavi Kumar static int axgbe_flush_tx_queues(struct axgbe_port *pdata)
6127c4158a5SRavi Kumar {
6137c4158a5SRavi Kumar 	unsigned int i, count;
6147c4158a5SRavi Kumar 
6157c4158a5SRavi Kumar 	if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
6167c4158a5SRavi Kumar 		return 0;
6177c4158a5SRavi Kumar 
6187c4158a5SRavi Kumar 	for (i = 0; i < pdata->tx_q_count; i++)
6197c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
6207c4158a5SRavi Kumar 
6217c4158a5SRavi Kumar 	/* Poll Until Poll Condition */
6227c4158a5SRavi Kumar 	for (i = 0; i < pdata->tx_q_count; i++) {
6237c4158a5SRavi Kumar 		count = 2000;
6247c4158a5SRavi Kumar 		while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i,
6257c4158a5SRavi Kumar 							 MTL_Q_TQOMR, FTQ))
6267c4158a5SRavi Kumar 			rte_delay_us(500);
6277c4158a5SRavi Kumar 
6287c4158a5SRavi Kumar 		if (!count)
6297c4158a5SRavi Kumar 			return -EBUSY;
6307c4158a5SRavi Kumar 	}
6317c4158a5SRavi Kumar 
6327c4158a5SRavi Kumar 	return 0;
6337c4158a5SRavi Kumar }
6347c4158a5SRavi Kumar 
6357c4158a5SRavi Kumar static void axgbe_config_dma_bus(struct axgbe_port *pdata)
6367c4158a5SRavi Kumar {
6377c4158a5SRavi Kumar 	/* Set enhanced addressing mode */
6387c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
6397c4158a5SRavi Kumar 
6407c4158a5SRavi Kumar 	/* Out standing read/write requests*/
6417c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f);
6427c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f);
6437c4158a5SRavi Kumar 
6447c4158a5SRavi Kumar 	/* Set the System Bus mode */
6457c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
6467c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1);
6477c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1);
6487c4158a5SRavi Kumar }
6497c4158a5SRavi Kumar 
6507c4158a5SRavi Kumar static void axgbe_config_dma_cache(struct axgbe_port *pdata)
6517c4158a5SRavi Kumar {
6527c4158a5SRavi Kumar 	unsigned int arcache, awcache, arwcache;
6537c4158a5SRavi Kumar 
6547c4158a5SRavi Kumar 	arcache = 0;
6554e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0xf);
6564e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, 0xf);
6574e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, 0xf);
6587c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
6597c4158a5SRavi Kumar 
6607c4158a5SRavi Kumar 	awcache = 0;
6614e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0xf);
6624e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0xf);
6634e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0xf);
6644e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0xf);
6657c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
6667c4158a5SRavi Kumar 
6677c4158a5SRavi Kumar 	arwcache = 0;
6684e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0xf);
6694e6d9f19SVenkat Kumar Ande 	AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0xf);
6707c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);
6717c4158a5SRavi Kumar }
6727c4158a5SRavi Kumar 
6737c4158a5SRavi Kumar static void axgbe_config_edma_control(struct axgbe_port *pdata)
6747c4158a5SRavi Kumar {
6757c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5);
6767c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5);
6777c4158a5SRavi Kumar }
6787c4158a5SRavi Kumar 
6797c4158a5SRavi Kumar static int axgbe_config_osp_mode(struct axgbe_port *pdata)
6807c4158a5SRavi Kumar {
6817c4158a5SRavi Kumar 	/* Force DMA to operate on second packet before closing descriptors
6827c4158a5SRavi Kumar 	 *  of first packet
6837c4158a5SRavi Kumar 	 */
6847c4158a5SRavi Kumar 	struct axgbe_tx_queue *txq;
6857c4158a5SRavi Kumar 	unsigned int i;
6867c4158a5SRavi Kumar 
6877c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
6887c4158a5SRavi Kumar 		txq = pdata->eth_dev->data->tx_queues[i];
6897c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP,
6907c4158a5SRavi Kumar 					pdata->tx_osp_mode);
6917c4158a5SRavi Kumar 	}
6927c4158a5SRavi Kumar 
6937c4158a5SRavi Kumar 	return 0;
6947c4158a5SRavi Kumar }
6957c4158a5SRavi Kumar 
6967c4158a5SRavi Kumar static int axgbe_config_pblx8(struct axgbe_port *pdata)
6977c4158a5SRavi Kumar {
6987c4158a5SRavi Kumar 	struct axgbe_tx_queue *txq;
6997c4158a5SRavi Kumar 	unsigned int i;
7007c4158a5SRavi Kumar 
7017c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
7027c4158a5SRavi Kumar 		txq = pdata->eth_dev->data->tx_queues[i];
7037c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8,
7047c4158a5SRavi Kumar 					pdata->pblx8);
7057c4158a5SRavi Kumar 	}
7067c4158a5SRavi Kumar 	return 0;
7077c4158a5SRavi Kumar }
7087c4158a5SRavi Kumar 
7097c4158a5SRavi Kumar static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata)
7107c4158a5SRavi Kumar {
7117c4158a5SRavi Kumar 	struct axgbe_tx_queue *txq;
7127c4158a5SRavi Kumar 	unsigned int i;
7137c4158a5SRavi Kumar 
7147c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
7157c4158a5SRavi Kumar 		txq = pdata->eth_dev->data->tx_queues[i];
7167c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL,
7177c4158a5SRavi Kumar 				pdata->tx_pbl);
7187c4158a5SRavi Kumar 	}
7197c4158a5SRavi Kumar 
7207c4158a5SRavi Kumar 	return 0;
7217c4158a5SRavi Kumar }
7227c4158a5SRavi Kumar 
7237c4158a5SRavi Kumar static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata)
7247c4158a5SRavi Kumar {
7257c4158a5SRavi Kumar 	struct axgbe_rx_queue *rxq;
7267c4158a5SRavi Kumar 	unsigned int i;
7277c4158a5SRavi Kumar 
7287c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
7297c4158a5SRavi Kumar 		rxq = pdata->eth_dev->data->rx_queues[i];
7307c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL,
7317c4158a5SRavi Kumar 				pdata->rx_pbl);
7327c4158a5SRavi Kumar 	}
7337c4158a5SRavi Kumar 
7347c4158a5SRavi Kumar 	return 0;
7357c4158a5SRavi Kumar }
7367c4158a5SRavi Kumar 
7377c4158a5SRavi Kumar static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata)
7387c4158a5SRavi Kumar {
7397c4158a5SRavi Kumar 	struct axgbe_rx_queue *rxq;
7407c4158a5SRavi Kumar 	unsigned int i;
7417c4158a5SRavi Kumar 
7427c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
7437c4158a5SRavi Kumar 		rxq = pdata->eth_dev->data->rx_queues[i];
7447c4158a5SRavi Kumar 
7457c4158a5SRavi Kumar 		rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
7467c4158a5SRavi Kumar 			RTE_PKTMBUF_HEADROOM;
7477c4158a5SRavi Kumar 		rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) &
7487c4158a5SRavi Kumar 			~(AXGBE_RX_BUF_ALIGN - 1);
7497c4158a5SRavi Kumar 
7507c4158a5SRavi Kumar 		if (rxq->buf_size > pdata->rx_buf_size)
7517c4158a5SRavi Kumar 			pdata->rx_buf_size = rxq->buf_size;
7527c4158a5SRavi Kumar 
7537c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ,
7547c4158a5SRavi Kumar 					rxq->buf_size);
7557c4158a5SRavi Kumar 	}
7567c4158a5SRavi Kumar }
7577c4158a5SRavi Kumar 
7587c4158a5SRavi Kumar static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type,
7597c4158a5SRavi Kumar 			       unsigned int index, unsigned int val)
7607c4158a5SRavi Kumar {
7617c4158a5SRavi Kumar 	unsigned int wait;
7627c4158a5SRavi Kumar 
7637c4158a5SRavi Kumar 	if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
7647c4158a5SRavi Kumar 		return -EBUSY;
7657c4158a5SRavi Kumar 
7667c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_RSSDR, val);
7677c4158a5SRavi Kumar 
7687c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
7697c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
7707c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
7717c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
7727c4158a5SRavi Kumar 
7737c4158a5SRavi Kumar 	wait = 1000;
7747c4158a5SRavi Kumar 	while (wait--) {
7757c4158a5SRavi Kumar 		if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
7767c4158a5SRavi Kumar 			return 0;
7777c4158a5SRavi Kumar 
7787c4158a5SRavi Kumar 		rte_delay_us(1500);
7797c4158a5SRavi Kumar 	}
7807c4158a5SRavi Kumar 
7817c4158a5SRavi Kumar 	return -EBUSY;
7827c4158a5SRavi Kumar }
7837c4158a5SRavi Kumar 
78476d7664dSChandu Babu N int axgbe_write_rss_hash_key(struct axgbe_port *pdata)
7857c4158a5SRavi Kumar {
7867c4158a5SRavi Kumar 	struct rte_eth_rss_conf *rss_conf;
7877c4158a5SRavi Kumar 	unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
7887c4158a5SRavi Kumar 	unsigned int *key;
7897c4158a5SRavi Kumar 	int ret;
7907c4158a5SRavi Kumar 
7917c4158a5SRavi Kumar 	rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
7927c4158a5SRavi Kumar 
7937c4158a5SRavi Kumar 	if (!rss_conf->rss_key)
7947c4158a5SRavi Kumar 		key = (unsigned int *)&pdata->rss_key;
7957c4158a5SRavi Kumar 	else
7967c4158a5SRavi Kumar 		key = (unsigned int *)&rss_conf->rss_key;
7977c4158a5SRavi Kumar 
7987c4158a5SRavi Kumar 	while (key_regs--) {
7997c4158a5SRavi Kumar 		ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE,
8007c4158a5SRavi Kumar 					  key_regs, *key++);
8017c4158a5SRavi Kumar 		if (ret)
8027c4158a5SRavi Kumar 			return ret;
8037c4158a5SRavi Kumar 	}
8047c4158a5SRavi Kumar 
8057c4158a5SRavi Kumar 	return 0;
8067c4158a5SRavi Kumar }
8077c4158a5SRavi Kumar 
80876d7664dSChandu Babu N int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
8097c4158a5SRavi Kumar {
8107c4158a5SRavi Kumar 	unsigned int i;
8117c4158a5SRavi Kumar 	int ret;
8127c4158a5SRavi Kumar 
8137c4158a5SRavi Kumar 	for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
8147c4158a5SRavi Kumar 		ret = axgbe_write_rss_reg(pdata,
8157c4158a5SRavi Kumar 					  AXGBE_RSS_LOOKUP_TABLE_TYPE, i,
8167c4158a5SRavi Kumar 					  pdata->rss_table[i]);
8177c4158a5SRavi Kumar 		if (ret)
8187c4158a5SRavi Kumar 			return ret;
8197c4158a5SRavi Kumar 	}
8207c4158a5SRavi Kumar 
8217c4158a5SRavi Kumar 	return 0;
8227c4158a5SRavi Kumar }
8237c4158a5SRavi Kumar 
8247c4158a5SRavi Kumar static int axgbe_enable_rss(struct axgbe_port *pdata)
8257c4158a5SRavi Kumar {
8267c4158a5SRavi Kumar 	int ret;
8277c4158a5SRavi Kumar 
8287c4158a5SRavi Kumar 	/* Program the hash key */
8297c4158a5SRavi Kumar 	ret = axgbe_write_rss_hash_key(pdata);
8307c4158a5SRavi Kumar 	if (ret)
8317c4158a5SRavi Kumar 		return ret;
8327c4158a5SRavi Kumar 
8337c4158a5SRavi Kumar 	/* Program the lookup table */
8347c4158a5SRavi Kumar 	ret = axgbe_write_rss_lookup_table(pdata);
8357c4158a5SRavi Kumar 	if (ret)
8367c4158a5SRavi Kumar 		return ret;
8377c4158a5SRavi Kumar 
8387c4158a5SRavi Kumar 	/* Set the RSS options */
8397c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
8407c4158a5SRavi Kumar 
8417c4158a5SRavi Kumar 	/* Enable RSS */
8427c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
8437c4158a5SRavi Kumar 
8447c4158a5SRavi Kumar 	return 0;
8457c4158a5SRavi Kumar }
8467c4158a5SRavi Kumar 
8477c4158a5SRavi Kumar static void axgbe_rss_options(struct axgbe_port *pdata)
8487c4158a5SRavi Kumar {
8497c4158a5SRavi Kumar 	struct rte_eth_rss_conf *rss_conf;
8507c4158a5SRavi Kumar 	uint64_t rss_hf;
8517c4158a5SRavi Kumar 
8527c4158a5SRavi Kumar 	rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
85376d7664dSChandu Babu N 	pdata->rss_hf = rss_conf->rss_hf;
8547c4158a5SRavi Kumar 	rss_hf = rss_conf->rss_hf;
8557c4158a5SRavi Kumar 
856295968d1SFerruh Yigit 	if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
8577c4158a5SRavi Kumar 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
858295968d1SFerruh Yigit 	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
8597c4158a5SRavi Kumar 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
860295968d1SFerruh Yigit 	if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
8617c4158a5SRavi Kumar 		AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
8627c4158a5SRavi Kumar }
8637c4158a5SRavi Kumar 
8647c4158a5SRavi Kumar static int axgbe_config_rss(struct axgbe_port *pdata)
8657c4158a5SRavi Kumar {
8667c4158a5SRavi Kumar 	uint32_t i;
8677c4158a5SRavi Kumar 
8687c4158a5SRavi Kumar 	if (pdata->rss_enable) {
8697c4158a5SRavi Kumar 		/* Initialize RSS hash key and lookup table */
8707c4158a5SRavi Kumar 		uint32_t *key = (uint32_t *)pdata->rss_key;
8717c4158a5SRavi Kumar 
8727c4158a5SRavi Kumar 		for (i = 0; i < sizeof(pdata->rss_key) / 4; i++)
8737c4158a5SRavi Kumar 			*key++ = (uint32_t)rte_rand();
8747c4158a5SRavi Kumar 		for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++)
8757c4158a5SRavi Kumar 			AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
8767c4158a5SRavi Kumar 					i % pdata->eth_dev->data->nb_rx_queues);
8777c4158a5SRavi Kumar 		axgbe_rss_options(pdata);
8787c4158a5SRavi Kumar 		if (axgbe_enable_rss(pdata)) {
8797c4158a5SRavi Kumar 			PMD_DRV_LOG(ERR, "Error in enabling RSS support\n");
8807c4158a5SRavi Kumar 			return -1;
8817c4158a5SRavi Kumar 		}
8827c4158a5SRavi Kumar 	} else {
8837c4158a5SRavi Kumar 		AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
8847c4158a5SRavi Kumar 	}
8857c4158a5SRavi Kumar 
8867c4158a5SRavi Kumar 	return 0;
8877c4158a5SRavi Kumar }
8887c4158a5SRavi Kumar 
8897c4158a5SRavi Kumar static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata)
8907c4158a5SRavi Kumar {
8917c4158a5SRavi Kumar 	struct axgbe_tx_queue *txq;
8927c4158a5SRavi Kumar 	unsigned int dma_ch_isr, dma_ch_ier;
8937c4158a5SRavi Kumar 	unsigned int i;
8947c4158a5SRavi Kumar 
8957c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
8967c4158a5SRavi Kumar 		txq = pdata->eth_dev->data->tx_queues[i];
8977c4158a5SRavi Kumar 
8987c4158a5SRavi Kumar 		/* Clear all the interrupts which are set */
8997c4158a5SRavi Kumar 		dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR);
9007c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr);
9017c4158a5SRavi Kumar 
9027c4158a5SRavi Kumar 		/* Clear all interrupt enable bits */
9037c4158a5SRavi Kumar 		dma_ch_ier = 0;
9047c4158a5SRavi Kumar 
9057c4158a5SRavi Kumar 		/* Enable following interrupts
9067c4158a5SRavi Kumar 		 *   NIE  - Normal Interrupt Summary Enable
9077c4158a5SRavi Kumar 		 *   AIE  - Abnormal Interrupt Summary Enable
9087c4158a5SRavi Kumar 		 *   FBEE - Fatal Bus Error Enable
9097c4158a5SRavi Kumar 		 */
9107c4158a5SRavi Kumar 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0);
9117c4158a5SRavi Kumar 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
9127c4158a5SRavi Kumar 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
9137c4158a5SRavi Kumar 
9147c4158a5SRavi Kumar 		/* Enable following Rx interrupts
9157c4158a5SRavi Kumar 		 *   RBUE - Receive Buffer Unavailable Enable
9167c4158a5SRavi Kumar 		 *   RIE  - Receive Interrupt Enable (unless using
9177c4158a5SRavi Kumar 		 *          per channel interrupts in edge triggered
9187c4158a5SRavi Kumar 		 *          mode)
9197c4158a5SRavi Kumar 		 */
9207c4158a5SRavi Kumar 		AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
9217c4158a5SRavi Kumar 
9227c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier);
9237c4158a5SRavi Kumar 	}
9247c4158a5SRavi Kumar }
9257c4158a5SRavi Kumar 
9267c4158a5SRavi Kumar static void wrapper_tx_desc_init(struct axgbe_port *pdata)
9277c4158a5SRavi Kumar {
9287c4158a5SRavi Kumar 	struct axgbe_tx_queue *txq;
9297c4158a5SRavi Kumar 	unsigned int i;
9307c4158a5SRavi Kumar 
9317c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
9327c4158a5SRavi Kumar 		txq = pdata->eth_dev->data->tx_queues[i];
9337c4158a5SRavi Kumar 		txq->cur = 0;
9347c4158a5SRavi Kumar 		txq->dirty = 0;
9357c4158a5SRavi Kumar 		/* Update the total number of Tx descriptors */
9367c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1);
9377c4158a5SRavi Kumar 		/* Update the starting address of descriptor ring */
9387c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI,
9397c4158a5SRavi Kumar 					high32_value(txq->ring_phys_addr));
9407c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO,
9417c4158a5SRavi Kumar 					low32_value(txq->ring_phys_addr));
9427c4158a5SRavi Kumar 	}
9437c4158a5SRavi Kumar }
9447c4158a5SRavi Kumar 
9457c4158a5SRavi Kumar static int wrapper_rx_desc_init(struct axgbe_port *pdata)
9467c4158a5SRavi Kumar {
9477c4158a5SRavi Kumar 	struct axgbe_rx_queue *rxq;
9487c4158a5SRavi Kumar 	struct rte_mbuf *mbuf;
9497c4158a5SRavi Kumar 	volatile union axgbe_rx_desc *desc;
9507c4158a5SRavi Kumar 	unsigned int i, j;
9517c4158a5SRavi Kumar 
9527c4158a5SRavi Kumar 	for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
9537c4158a5SRavi Kumar 		rxq = pdata->eth_dev->data->rx_queues[i];
9547c4158a5SRavi Kumar 
9557c4158a5SRavi Kumar 		/* Initialize software ring entries */
9567c4158a5SRavi Kumar 		rxq->mbuf_alloc = 0;
9577c4158a5SRavi Kumar 		rxq->cur = 0;
9587c4158a5SRavi Kumar 		rxq->dirty = 0;
9597c4158a5SRavi Kumar 		desc = AXGBE_GET_DESC_PT(rxq, 0);
9607c4158a5SRavi Kumar 
9617c4158a5SRavi Kumar 		for (j = 0; j < rxq->nb_desc; j++) {
9627c4158a5SRavi Kumar 			mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
9637c4158a5SRavi Kumar 			if (mbuf == NULL) {
9647c4158a5SRavi Kumar 				PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
9657c4158a5SRavi Kumar 					    (unsigned int)rxq->queue_id, j);
9667483341aSXueming Li 				axgbe_dev_rx_queue_release(pdata->eth_dev, i);
9677c4158a5SRavi Kumar 				return -ENOMEM;
9687c4158a5SRavi Kumar 			}
9697c4158a5SRavi Kumar 			rxq->sw_ring[j] = mbuf;
9707c4158a5SRavi Kumar 			/* Mbuf populate */
9717c4158a5SRavi Kumar 			mbuf->next = NULL;
9727c4158a5SRavi Kumar 			mbuf->data_off = RTE_PKTMBUF_HEADROOM;
9737c4158a5SRavi Kumar 			mbuf->nb_segs = 1;
9747c4158a5SRavi Kumar 			mbuf->port = rxq->port_id;
9757c4158a5SRavi Kumar 			desc->read.baddr =
9767c4158a5SRavi Kumar 				rte_cpu_to_le_64(
9777c4158a5SRavi Kumar 					rte_mbuf_data_iova_default(mbuf));
9787c4158a5SRavi Kumar 			rte_wmb();
9797c4158a5SRavi Kumar 			AXGMAC_SET_BITS_LE(desc->read.desc3,
9807c4158a5SRavi Kumar 						RX_NORMAL_DESC3, OWN, 1);
9817c4158a5SRavi Kumar 			rte_wmb();
9827c4158a5SRavi Kumar 			rxq->mbuf_alloc++;
9837c4158a5SRavi Kumar 			desc++;
9847c4158a5SRavi Kumar 		}
9857c4158a5SRavi Kumar 		/* Update the total number of Rx descriptors */
9867c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR,
9877c4158a5SRavi Kumar 					rxq->nb_desc - 1);
9887c4158a5SRavi Kumar 		/* Update the starting address of descriptor ring */
9897c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI,
9907c4158a5SRavi Kumar 					high32_value(rxq->ring_phys_addr));
9917c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO,
9927c4158a5SRavi Kumar 					low32_value(rxq->ring_phys_addr));
9937c4158a5SRavi Kumar 		/* Update the Rx Descriptor Tail Pointer */
9947c4158a5SRavi Kumar 		AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
9957c4158a5SRavi Kumar 				   low32_value(rxq->ring_phys_addr +
9967c4158a5SRavi Kumar 				   (rxq->nb_desc - 1) *
9977c4158a5SRavi Kumar 				   sizeof(union axgbe_rx_desc)));
9987c4158a5SRavi Kumar 	}
9997c4158a5SRavi Kumar 	return 0;
10007c4158a5SRavi Kumar }
10017c4158a5SRavi Kumar 
10027c4158a5SRavi Kumar static void axgbe_config_mtl_mode(struct axgbe_port *pdata)
10037c4158a5SRavi Kumar {
10047c4158a5SRavi Kumar 	unsigned int i;
10057c4158a5SRavi Kumar 
10067c4158a5SRavi Kumar 	/* Set Tx to weighted round robin scheduling algorithm */
10077c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
10087c4158a5SRavi Kumar 
10097c4158a5SRavi Kumar 	/* Set Tx traffic classes to use WRR algorithm with equal weights */
10107c4158a5SRavi Kumar 	for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
10117c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
10127c4158a5SRavi Kumar 				MTL_TSA_ETS);
10137c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
10147c4158a5SRavi Kumar 	}
10157c4158a5SRavi Kumar 
10167c4158a5SRavi Kumar 	/* Set Rx to strict priority algorithm */
10177c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
10187c4158a5SRavi Kumar }
10197c4158a5SRavi Kumar 
10207c4158a5SRavi Kumar static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val)
10217c4158a5SRavi Kumar {
10227c4158a5SRavi Kumar 	unsigned int i;
10237c4158a5SRavi Kumar 
10247c4158a5SRavi Kumar 	for (i = 0; i < pdata->tx_q_count; i++)
10257c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
10267c4158a5SRavi Kumar 
10277c4158a5SRavi Kumar 	return 0;
10287c4158a5SRavi Kumar }
10297c4158a5SRavi Kumar 
10307c4158a5SRavi Kumar static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val)
10317c4158a5SRavi Kumar {
10327c4158a5SRavi Kumar 	unsigned int i;
10337c4158a5SRavi Kumar 
10347c4158a5SRavi Kumar 	for (i = 0; i < pdata->rx_q_count; i++)
10357c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
10367c4158a5SRavi Kumar 
10377c4158a5SRavi Kumar 	return 0;
10387c4158a5SRavi Kumar }
10397c4158a5SRavi Kumar 
10407c4158a5SRavi Kumar static int axgbe_config_tx_threshold(struct axgbe_port *pdata,
10417c4158a5SRavi Kumar 				     unsigned int val)
10427c4158a5SRavi Kumar {
10437c4158a5SRavi Kumar 	unsigned int i;
10447c4158a5SRavi Kumar 
10457c4158a5SRavi Kumar 	for (i = 0; i < pdata->tx_q_count; i++)
10467c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
10477c4158a5SRavi Kumar 
10487c4158a5SRavi Kumar 	return 0;
10497c4158a5SRavi Kumar }
10507c4158a5SRavi Kumar 
10517c4158a5SRavi Kumar static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
10527c4158a5SRavi Kumar 				     unsigned int val)
10537c4158a5SRavi Kumar {
10547c4158a5SRavi Kumar 	unsigned int i;
10557c4158a5SRavi Kumar 
10567c4158a5SRavi Kumar 	for (i = 0; i < pdata->rx_q_count; i++)
10577c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
10587c4158a5SRavi Kumar 
10597c4158a5SRavi Kumar 	return 0;
10607c4158a5SRavi Kumar }
10617c4158a5SRavi Kumar 
10627be78d02SJosh Soref /* Distributing FIFO size */
10637c4158a5SRavi Kumar static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
10647c4158a5SRavi Kumar {
10657c4158a5SRavi Kumar 	unsigned int fifo_size;
10667c4158a5SRavi Kumar 	unsigned int q_fifo_size;
10677c4158a5SRavi Kumar 	unsigned int p_fifo, i;
10687c4158a5SRavi Kumar 
10697c4158a5SRavi Kumar 	fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
10707c4158a5SRavi Kumar 			  pdata->hw_feat.rx_fifo_size);
10717c4158a5SRavi Kumar 	q_fifo_size = fifo_size / pdata->rx_q_count;
10727c4158a5SRavi Kumar 
10737c4158a5SRavi Kumar 	/* Calculate the fifo setting by dividing the queue's fifo size
10747c4158a5SRavi Kumar 	 * by the fifo allocation increment (with 0 representing the
10757c4158a5SRavi Kumar 	 * base allocation increment so decrement the result
10767c4158a5SRavi Kumar 	 * by 1).
10777c4158a5SRavi Kumar 	 */
10787c4158a5SRavi Kumar 	p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
10797c4158a5SRavi Kumar 	if (p_fifo)
10807c4158a5SRavi Kumar 		p_fifo--;
10817c4158a5SRavi Kumar 
10827c4158a5SRavi Kumar 	for (i = 0; i < pdata->rx_q_count; i++)
10837c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo);
10847c4158a5SRavi Kumar 	pdata->fifo = p_fifo;
10857c4158a5SRavi Kumar 
10867c4158a5SRavi Kumar 	/*Calculate and config Flow control threshold*/
10877c4158a5SRavi Kumar 	axgbe_calculate_flow_control_threshold(pdata);
10887c4158a5SRavi Kumar 	axgbe_config_flow_control_threshold(pdata);
10894216cdc0SChandu Babu N 
10904216cdc0SChandu Babu N 	PMD_DRV_LOG(DEBUG, "%d Rx hardware queues, %d byte fifo per queue\n",
10914216cdc0SChandu Babu N 		    pdata->rx_q_count, q_fifo_size);
10927c4158a5SRavi Kumar }
10937c4158a5SRavi Kumar 
10947c4158a5SRavi Kumar static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)
10957c4158a5SRavi Kumar {
10967c4158a5SRavi Kumar 	unsigned int fifo_size;
10977c4158a5SRavi Kumar 	unsigned int q_fifo_size;
10987c4158a5SRavi Kumar 	unsigned int p_fifo, i;
10997c4158a5SRavi Kumar 
11007c4158a5SRavi Kumar 	fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
11017c4158a5SRavi Kumar 				pdata->hw_feat.tx_fifo_size);
11027c4158a5SRavi Kumar 	q_fifo_size = fifo_size / pdata->tx_q_count;
11037c4158a5SRavi Kumar 
11047c4158a5SRavi Kumar 	/* Calculate the fifo setting by dividing the queue's fifo size
11057c4158a5SRavi Kumar 	 * by the fifo allocation increment (with 0 representing the
11067c4158a5SRavi Kumar 	 * base allocation increment so decrement the result
11077c4158a5SRavi Kumar 	 * by 1).
11087c4158a5SRavi Kumar 	 */
11097c4158a5SRavi Kumar 	p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
11107c4158a5SRavi Kumar 	if (p_fifo)
11117c4158a5SRavi Kumar 		p_fifo--;
11127c4158a5SRavi Kumar 
11137c4158a5SRavi Kumar 	for (i = 0; i < pdata->tx_q_count; i++)
11147c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);
11154216cdc0SChandu Babu N 
11164216cdc0SChandu Babu N 	PMD_DRV_LOG(DEBUG, "%d Tx hardware queues, %d byte fifo per queue\n",
11174216cdc0SChandu Babu N 		    pdata->tx_q_count, q_fifo_size);
11187c4158a5SRavi Kumar }
11197c4158a5SRavi Kumar 
11207c4158a5SRavi Kumar static void axgbe_config_queue_mapping(struct axgbe_port *pdata)
11217c4158a5SRavi Kumar {
11227c4158a5SRavi Kumar 	unsigned int qptc, qptc_extra, queue;
11237c4158a5SRavi Kumar 	unsigned int i, j, reg, reg_val;
11247c4158a5SRavi Kumar 
11257c4158a5SRavi Kumar 	/* Map the MTL Tx Queues to Traffic Classes
11267c4158a5SRavi Kumar 	 *   Note: Tx Queues >= Traffic Classes
11277c4158a5SRavi Kumar 	 */
11287c4158a5SRavi Kumar 	qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
11297c4158a5SRavi Kumar 	qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
11307c4158a5SRavi Kumar 
11317c4158a5SRavi Kumar 	for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
11324216cdc0SChandu Babu N 		for (j = 0; j < qptc; j++) {
11334216cdc0SChandu Babu N 			PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i);
11347c4158a5SRavi Kumar 			AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
11357c4158a5SRavi Kumar 						Q2TCMAP, i);
11364216cdc0SChandu Babu N 		}
11374216cdc0SChandu Babu N 		if (i < qptc_extra) {
11384216cdc0SChandu Babu N 			PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i);
11397c4158a5SRavi Kumar 			AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
11407c4158a5SRavi Kumar 						Q2TCMAP, i);
11417c4158a5SRavi Kumar 		}
11424216cdc0SChandu Babu N 	}
11437c4158a5SRavi Kumar 
11447c4158a5SRavi Kumar 	if (pdata->rss_enable) {
11457c4158a5SRavi Kumar 		/* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
11467c4158a5SRavi Kumar 		reg = MTL_RQDCM0R;
11477c4158a5SRavi Kumar 		reg_val = 0;
11487c4158a5SRavi Kumar 		for (i = 0; i < pdata->rx_q_count;) {
11497c4158a5SRavi Kumar 			reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
11507c4158a5SRavi Kumar 
11517c4158a5SRavi Kumar 			if ((i % MTL_RQDCM_Q_PER_REG) &&
11527c4158a5SRavi Kumar 			    (i != pdata->rx_q_count))
11537c4158a5SRavi Kumar 				continue;
11547c4158a5SRavi Kumar 
11557c4158a5SRavi Kumar 			AXGMAC_IOWRITE(pdata, reg, reg_val);
11567c4158a5SRavi Kumar 
11577c4158a5SRavi Kumar 			reg += MTL_RQDCM_INC;
11587c4158a5SRavi Kumar 			reg_val = 0;
11597c4158a5SRavi Kumar 		}
11607c4158a5SRavi Kumar 	}
11617c4158a5SRavi Kumar }
11627c4158a5SRavi Kumar 
11637c4158a5SRavi Kumar static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)
11647c4158a5SRavi Kumar {
11657c4158a5SRavi Kumar 	unsigned int mtl_q_isr;
11667c4158a5SRavi Kumar 	unsigned int q_count, i;
11677c4158a5SRavi Kumar 
11687c4158a5SRavi Kumar 	q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
11697c4158a5SRavi Kumar 	for (i = 0; i < q_count; i++) {
11707c4158a5SRavi Kumar 		/* Clear all the interrupts which are set */
11717c4158a5SRavi Kumar 		mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
11727c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
11737c4158a5SRavi Kumar 
11747c4158a5SRavi Kumar 		/* No MTL interrupts to be enabled */
11757c4158a5SRavi Kumar 		AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
11767c4158a5SRavi Kumar 	}
11777c4158a5SRavi Kumar }
11787c4158a5SRavi Kumar 
1179e01d9b2eSChandu Babu N static uint32_t crc32_le(uint32_t crc, uint8_t *p, uint32_t len)
1180e01d9b2eSChandu Babu N {
1181e01d9b2eSChandu Babu N 	int i;
1182e01d9b2eSChandu Babu N 	while (len--) {
1183e01d9b2eSChandu Babu N 		crc ^= *p++;
1184e01d9b2eSChandu Babu N 		for (i = 0; i < 8; i++)
1185e01d9b2eSChandu Babu N 			crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
1186e01d9b2eSChandu Babu N 	}
1187e01d9b2eSChandu Babu N 	return crc;
1188e01d9b2eSChandu Babu N }
1189e01d9b2eSChandu Babu N 
1190e01d9b2eSChandu Babu N void axgbe_set_mac_hash_table(struct axgbe_port *pdata, u8 *addr, bool add)
1191e01d9b2eSChandu Babu N {
1192e01d9b2eSChandu Babu N 	uint32_t crc, htable_index, htable_bitmask;
1193e01d9b2eSChandu Babu N 
1194e01d9b2eSChandu Babu N 	crc = bitrev32(~crc32_le(~0, addr, RTE_ETHER_ADDR_LEN));
1195e01d9b2eSChandu Babu N 	crc >>= pdata->hash_table_shift;
1196e01d9b2eSChandu Babu N 	htable_index = crc >> 5;
1197e01d9b2eSChandu Babu N 	htable_bitmask = 1 << (crc & 0x1f);
1198e01d9b2eSChandu Babu N 
1199e01d9b2eSChandu Babu N 	if (add) {
1200e01d9b2eSChandu Babu N 		pdata->uc_hash_table[htable_index] |= htable_bitmask;
1201e01d9b2eSChandu Babu N 		pdata->uc_hash_mac_addr++;
1202e01d9b2eSChandu Babu N 	} else {
1203e01d9b2eSChandu Babu N 		pdata->uc_hash_table[htable_index] &= ~htable_bitmask;
1204e01d9b2eSChandu Babu N 		pdata->uc_hash_mac_addr--;
1205e01d9b2eSChandu Babu N 	}
1206e01d9b2eSChandu Babu N 	PMD_DRV_LOG(DEBUG, "%s MAC hash table Bit %d at Index %#x\n",
1207e01d9b2eSChandu Babu N 		    add ? "set" : "clear", (crc & 0x1f), htable_index);
1208e01d9b2eSChandu Babu N 
1209e01d9b2eSChandu Babu N 	AXGMAC_IOWRITE(pdata, MAC_HTR(htable_index),
1210e01d9b2eSChandu Babu N 		       pdata->uc_hash_table[htable_index]);
1211e01d9b2eSChandu Babu N }
1212e01d9b2eSChandu Babu N 
121349a5e622SChandu Babu N void axgbe_set_mac_addn_addr(struct axgbe_port *pdata, u8 *addr, uint32_t index)
121449a5e622SChandu Babu N {
121549a5e622SChandu Babu N 	unsigned int mac_addr_hi, mac_addr_lo;
121649a5e622SChandu Babu N 	u8 *mac_addr;
121749a5e622SChandu Babu N 
121849a5e622SChandu Babu N 	mac_addr_lo = 0;
121949a5e622SChandu Babu N 	mac_addr_hi = 0;
122049a5e622SChandu Babu N 
122149a5e622SChandu Babu N 	if (addr) {
122249a5e622SChandu Babu N 		mac_addr = (u8 *)&mac_addr_lo;
122349a5e622SChandu Babu N 		mac_addr[0] = addr[0];
122449a5e622SChandu Babu N 		mac_addr[1] = addr[1];
122549a5e622SChandu Babu N 		mac_addr[2] = addr[2];
122649a5e622SChandu Babu N 		mac_addr[3] = addr[3];
122749a5e622SChandu Babu N 		mac_addr = (u8 *)&mac_addr_hi;
122849a5e622SChandu Babu N 		mac_addr[0] = addr[4];
122949a5e622SChandu Babu N 		mac_addr[1] = addr[5];
123049a5e622SChandu Babu N 
123149a5e622SChandu Babu N 		/*Address Enable: Use this Addr for Perfect Filtering */
123249a5e622SChandu Babu N 		AXGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
123349a5e622SChandu Babu N 	}
123449a5e622SChandu Babu N 
123549a5e622SChandu Babu N 	PMD_DRV_LOG(DEBUG, "%s mac address at %#x\n",
123649a5e622SChandu Babu N 		    addr ? "set" : "clear", index);
123749a5e622SChandu Babu N 
123849a5e622SChandu Babu N 	AXGMAC_IOWRITE(pdata, MAC_MACAHR(index), mac_addr_hi);
123949a5e622SChandu Babu N 	AXGMAC_IOWRITE(pdata, MAC_MACALR(index), mac_addr_lo);
124049a5e622SChandu Babu N }
124149a5e622SChandu Babu N 
12427c4158a5SRavi Kumar static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr)
12437c4158a5SRavi Kumar {
12447c4158a5SRavi Kumar 	unsigned int mac_addr_hi, mac_addr_lo;
12457c4158a5SRavi Kumar 
12467c4158a5SRavi Kumar 	mac_addr_hi = (addr[5] <<  8) | (addr[4] <<  0);
12477c4158a5SRavi Kumar 	mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
12487c4158a5SRavi Kumar 		(addr[1] <<  8) | (addr[0] <<  0);
12497c4158a5SRavi Kumar 
12507c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
12517c4158a5SRavi Kumar 	AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
12527c4158a5SRavi Kumar 
12537c4158a5SRavi Kumar 	return 0;
12547c4158a5SRavi Kumar }
12557c4158a5SRavi Kumar 
1256e01d9b2eSChandu Babu N static void axgbe_config_mac_hash_table(struct axgbe_port *pdata)
1257e01d9b2eSChandu Babu N {
1258e01d9b2eSChandu Babu N 	struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
1259e01d9b2eSChandu Babu N 
1260e01d9b2eSChandu Babu N 	pdata->hash_table_shift = 0;
1261e01d9b2eSChandu Babu N 	pdata->hash_table_count = 0;
1262e01d9b2eSChandu Babu N 	pdata->uc_hash_mac_addr = 0;
1263e01d9b2eSChandu Babu N 	memset(pdata->uc_hash_table, 0, sizeof(pdata->uc_hash_table));
1264e01d9b2eSChandu Babu N 
1265e01d9b2eSChandu Babu N 	if (hw_feat->hash_table_size) {
1266e01d9b2eSChandu Babu N 		pdata->hash_table_shift = 26 - (hw_feat->hash_table_size >> 7);
1267e01d9b2eSChandu Babu N 		pdata->hash_table_count = hw_feat->hash_table_size / 32;
1268e01d9b2eSChandu Babu N 	}
1269e01d9b2eSChandu Babu N }
1270e01d9b2eSChandu Babu N 
12717c4158a5SRavi Kumar static void axgbe_config_mac_address(struct axgbe_port *pdata)
12727c4158a5SRavi Kumar {
12737c4158a5SRavi Kumar 	axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes);
12747c4158a5SRavi Kumar }
12757c4158a5SRavi Kumar 
12767c4158a5SRavi Kumar static void axgbe_config_jumbo_enable(struct axgbe_port *pdata)
12777c4158a5SRavi Kumar {
12787c4158a5SRavi Kumar 	unsigned int val;
12797c4158a5SRavi Kumar 
12807c4158a5SRavi Kumar 	val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0;
12817c4158a5SRavi Kumar 
12827c4158a5SRavi Kumar 	AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
12837c4158a5SRavi Kumar }
12847c4158a5SRavi Kumar 
12857c4158a5SRavi Kumar static void axgbe_config_mac_speed(struct axgbe_port *pdata)
12867c4158a5SRavi Kumar {
12877c4158a5SRavi Kumar 	axgbe_set_speed(pdata, pdata->phy_speed);
12887c4158a5SRavi Kumar }
12897c4158a5SRavi Kumar 
12907c4158a5SRavi Kumar static void axgbe_config_checksum_offload(struct axgbe_port *pdata)
12917c4158a5SRavi Kumar {
12927c4158a5SRavi Kumar 	if (pdata->rx_csum_enable)
12937c4158a5SRavi Kumar 		AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
12947c4158a5SRavi Kumar 	else
12957c4158a5SRavi Kumar 		AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
12967c4158a5SRavi Kumar }
12977c4158a5SRavi Kumar 
12989d1ef6b2SChandu Babu N static void axgbe_config_mmc(struct axgbe_port *pdata)
12999d1ef6b2SChandu Babu N {
13009d1ef6b2SChandu Babu N 	struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
13019d1ef6b2SChandu Babu N 
13029d1ef6b2SChandu Babu N 	/* Reset stats */
13039d1ef6b2SChandu Babu N 	memset(stats, 0, sizeof(*stats));
13049d1ef6b2SChandu Babu N 
13059d1ef6b2SChandu Babu N 	/* Set counters to reset on read */
13069d1ef6b2SChandu Babu N 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
13079d1ef6b2SChandu Babu N 
13089d1ef6b2SChandu Babu N 	/* Reset the counters */
13099d1ef6b2SChandu Babu N 	AXGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
13109d1ef6b2SChandu Babu N }
13119d1ef6b2SChandu Babu N 
13127c4158a5SRavi Kumar static int axgbe_init(struct axgbe_port *pdata)
13137c4158a5SRavi Kumar {
13147c4158a5SRavi Kumar 	int ret;
13157c4158a5SRavi Kumar 
13167c4158a5SRavi Kumar 	/* Flush Tx queues */
13177c4158a5SRavi Kumar 	ret = axgbe_flush_tx_queues(pdata);
13187c4158a5SRavi Kumar 	if (ret)
13197c4158a5SRavi Kumar 		return ret;
13207c4158a5SRavi Kumar 	/* Initialize DMA related features */
13217c4158a5SRavi Kumar 	axgbe_config_dma_bus(pdata);
13227c4158a5SRavi Kumar 	axgbe_config_dma_cache(pdata);
13237c4158a5SRavi Kumar 	axgbe_config_edma_control(pdata);
13247c4158a5SRavi Kumar 	axgbe_config_osp_mode(pdata);
13257c4158a5SRavi Kumar 	axgbe_config_pblx8(pdata);
13267c4158a5SRavi Kumar 	axgbe_config_tx_pbl_val(pdata);
13277c4158a5SRavi Kumar 	axgbe_config_rx_pbl_val(pdata);
13287c4158a5SRavi Kumar 	axgbe_config_rx_buffer_size(pdata);
13297c4158a5SRavi Kumar 	axgbe_config_rss(pdata);
13307c4158a5SRavi Kumar 	wrapper_tx_desc_init(pdata);
13317c4158a5SRavi Kumar 	ret = wrapper_rx_desc_init(pdata);
13327c4158a5SRavi Kumar 	if (ret)
13337c4158a5SRavi Kumar 		return ret;
13347c4158a5SRavi Kumar 	axgbe_enable_dma_interrupts(pdata);
13357c4158a5SRavi Kumar 
13367c4158a5SRavi Kumar 	/* Initialize MTL related features */
13377c4158a5SRavi Kumar 	axgbe_config_mtl_mode(pdata);
13387c4158a5SRavi Kumar 	axgbe_config_queue_mapping(pdata);
13397c4158a5SRavi Kumar 	axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
13407c4158a5SRavi Kumar 	axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
13417c4158a5SRavi Kumar 	axgbe_config_tx_threshold(pdata, pdata->tx_threshold);
13427c4158a5SRavi Kumar 	axgbe_config_rx_threshold(pdata, pdata->rx_threshold);
13437c4158a5SRavi Kumar 	axgbe_config_tx_fifo_size(pdata);
13447c4158a5SRavi Kumar 	axgbe_config_rx_fifo_size(pdata);
13457c4158a5SRavi Kumar 
13467c4158a5SRavi Kumar 	axgbe_enable_mtl_interrupts(pdata);
13477c4158a5SRavi Kumar 
13487c4158a5SRavi Kumar 	/* Initialize MAC related features */
1349e01d9b2eSChandu Babu N 	axgbe_config_mac_hash_table(pdata);
13507c4158a5SRavi Kumar 	axgbe_config_mac_address(pdata);
13517c4158a5SRavi Kumar 	axgbe_config_jumbo_enable(pdata);
13527c4158a5SRavi Kumar 	axgbe_config_flow_control(pdata);
13537c4158a5SRavi Kumar 	axgbe_config_mac_speed(pdata);
13547c4158a5SRavi Kumar 	axgbe_config_checksum_offload(pdata);
13559d1ef6b2SChandu Babu N 	axgbe_config_mmc(pdata);
13567c4158a5SRavi Kumar 
13577c4158a5SRavi Kumar 	return 0;
13587c4158a5SRavi Kumar }
13597c4158a5SRavi Kumar 
1360572890efSRavi Kumar void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
1361572890efSRavi Kumar {
1362572890efSRavi Kumar 	hw_if->exit = axgbe_exit;
13637c4158a5SRavi Kumar 	hw_if->config_flow_control = axgbe_config_flow_control;
13644ac7516bSRavi Kumar 
13657c4158a5SRavi Kumar 	hw_if->init = axgbe_init;
1366a5c72737SRavi Kumar 
13674ac7516bSRavi Kumar 	hw_if->read_mmd_regs = axgbe_read_mmd_regs;
13684ac7516bSRavi Kumar 	hw_if->write_mmd_regs = axgbe_write_mmd_regs;
13694ac7516bSRavi Kumar 
1370a5c72737SRavi Kumar 	hw_if->set_speed = axgbe_set_speed;
1371a5c72737SRavi Kumar 
13724ac7516bSRavi Kumar 	hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
13734ac7516bSRavi Kumar 	hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs;
13744ac7516bSRavi Kumar 	hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs;
13757c4158a5SRavi Kumar 	/* For FLOW ctrl */
13767c4158a5SRavi Kumar 	hw_if->config_tx_flow_control = axgbe_config_tx_flow_control;
13777c4158a5SRavi Kumar 	hw_if->config_rx_flow_control = axgbe_config_rx_flow_control;
137886578516SGirish Nandibasappa 
137986578516SGirish Nandibasappa 	/*vlan*/
138086578516SGirish Nandibasappa 	hw_if->enable_rx_vlan_stripping = axgbe_enable_rx_vlan_stripping;
138186578516SGirish Nandibasappa 	hw_if->disable_rx_vlan_stripping = axgbe_disable_rx_vlan_stripping;
138286578516SGirish Nandibasappa 	hw_if->enable_rx_vlan_filtering = axgbe_enable_rx_vlan_filtering;
138386578516SGirish Nandibasappa 	hw_if->disable_rx_vlan_filtering = axgbe_disable_rx_vlan_filtering;
138486578516SGirish Nandibasappa 	hw_if->update_vlan_hash_table = axgbe_update_vlan_hash_table;
1385572890efSRavi Kumar }
1386