xref: /dpdk/drivers/net/enetfec/enet_ethdev.c (revision 191128d7f6a02b816deaa86d761fbde4483724e9)
1fc0ec740SApeksha Gupta /* SPDX-License-Identifier: BSD-3-Clause
2fc0ec740SApeksha Gupta  * Copyright 2020-2021 NXP
3fc0ec740SApeksha Gupta  */
4fc0ec740SApeksha Gupta 
57c3c0d0fSDavid Marchand #include <inttypes.h>
67c3c0d0fSDavid Marchand 
7fc0ec740SApeksha Gupta #include <ethdev_vdev.h>
8fc0ec740SApeksha Gupta #include <ethdev_driver.h>
9*191128d7SDavid Marchand #include <rte_bitops.h>
10b84fdd39SApeksha Gupta #include <rte_io.h>
117c3c0d0fSDavid Marchand 
12fc0ec740SApeksha Gupta #include "enet_pmd_logs.h"
13fc0ec740SApeksha Gupta #include "enet_ethdev.h"
14b84fdd39SApeksha Gupta #include "enet_regs.h"
15b84fdd39SApeksha Gupta #include "enet_uio.h"
16fc0ec740SApeksha Gupta 
17fc0ec740SApeksha Gupta #define ENETFEC_NAME_PMD                net_enetfec
18fc0ec740SApeksha Gupta 
19b84fdd39SApeksha Gupta /* FEC receive acceleration */
20b84fdd39SApeksha Gupta #define ENETFEC_RACC_IPDIS		RTE_BIT32(1)
21b84fdd39SApeksha Gupta #define ENETFEC_RACC_PRODIS		RTE_BIT32(2)
22b84fdd39SApeksha Gupta #define ENETFEC_RACC_SHIFT16		RTE_BIT32(7)
23b84fdd39SApeksha Gupta #define ENETFEC_RACC_OPTIONS		(ENETFEC_RACC_IPDIS | \
24b84fdd39SApeksha Gupta 						ENETFEC_RACC_PRODIS)
25b84fdd39SApeksha Gupta 
26b84fdd39SApeksha Gupta #define ENETFEC_PAUSE_FLAG_AUTONEG	0x1
27b84fdd39SApeksha Gupta #define ENETFEC_PAUSE_FLAG_ENABLE	0x2
28b84fdd39SApeksha Gupta 
29b84fdd39SApeksha Gupta /* Pause frame field and FIFO threshold */
30b84fdd39SApeksha Gupta #define ENETFEC_FCE			RTE_BIT32(5)
31b84fdd39SApeksha Gupta #define ENETFEC_RSEM_V			0x84
32b84fdd39SApeksha Gupta #define ENETFEC_RSFL_V			16
33b84fdd39SApeksha Gupta #define ENETFEC_RAEM_V			0x8
34b84fdd39SApeksha Gupta #define ENETFEC_RAFL_V			0x8
35b84fdd39SApeksha Gupta #define ENETFEC_OPD_V			0xFFF0
36b84fdd39SApeksha Gupta 
37ecae7157SApeksha Gupta /* Extended buffer descriptor */
38ecae7157SApeksha Gupta #define ENETFEC_EXTENDED_BD		0
39b84fdd39SApeksha Gupta #define NUM_OF_BD_QUEUES		6
40b84fdd39SApeksha Gupta 
41bb5b5bf1SApeksha Gupta /* Supported Rx offloads */
42bb5b5bf1SApeksha Gupta static uint64_t dev_rx_offloads_sup =
43bb5b5bf1SApeksha Gupta 		RTE_ETH_RX_OFFLOAD_CHECKSUM |
44bb5b5bf1SApeksha Gupta 		RTE_ETH_RX_OFFLOAD_VLAN;
45bb5b5bf1SApeksha Gupta 
46b84fdd39SApeksha Gupta /*
47b84fdd39SApeksha Gupta  * This function is called to start or restart the ENETFEC during a link
48b84fdd39SApeksha Gupta  * change, transmit timeout, or to reconfigure the ENETFEC. The network
49b84fdd39SApeksha Gupta  * packet processing for this device must be stopped before this call.
50b84fdd39SApeksha Gupta  */
51b84fdd39SApeksha Gupta static void
52b84fdd39SApeksha Gupta enetfec_restart(struct rte_eth_dev *dev)
53b84fdd39SApeksha Gupta {
54b84fdd39SApeksha Gupta 	struct enetfec_private *fep = dev->data->dev_private;
55b84fdd39SApeksha Gupta 	uint32_t rcntl = OPT_FRAME_SIZE | 0x04;
56b84fdd39SApeksha Gupta 	uint32_t ecntl = ENETFEC_ETHEREN;
57b84fdd39SApeksha Gupta 	uint32_t val;
58d64e9cfeSApeksha Gupta 	int i;
59b84fdd39SApeksha Gupta 
60b84fdd39SApeksha Gupta 	/* Clear any outstanding interrupt. */
61b84fdd39SApeksha Gupta 	writel(0xffffffff, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_EIR);
62b84fdd39SApeksha Gupta 
63b84fdd39SApeksha Gupta 	/* Enable MII mode */
64b84fdd39SApeksha Gupta 	if (fep->full_duplex == FULL_DUPLEX) {
65b84fdd39SApeksha Gupta 		/* FD enable */
66b84fdd39SApeksha Gupta 		rte_write32(rte_cpu_to_le_32(0x04),
67b84fdd39SApeksha Gupta 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
68b84fdd39SApeksha Gupta 	} else {
69b84fdd39SApeksha Gupta 	/* No Rcv on Xmit */
70b84fdd39SApeksha Gupta 		rcntl |= 0x02;
71b84fdd39SApeksha Gupta 		rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
72b84fdd39SApeksha Gupta 	}
73b84fdd39SApeksha Gupta 
74b84fdd39SApeksha Gupta 	if (fep->quirks & QUIRK_RACC) {
75b84fdd39SApeksha Gupta 		val = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
76b84fdd39SApeksha Gupta 		/* align IP header */
77b84fdd39SApeksha Gupta 		val |= ENETFEC_RACC_SHIFT16;
78c75b9c3aSApeksha Gupta 		if (fep->flag_csum & RX_FLAG_CSUM_EN)
79c75b9c3aSApeksha Gupta 			/* set RX checksum */
80c75b9c3aSApeksha Gupta 			val |= ENETFEC_RACC_OPTIONS;
81c75b9c3aSApeksha Gupta 		else
82b84fdd39SApeksha Gupta 			val &= ~ENETFEC_RACC_OPTIONS;
83b84fdd39SApeksha Gupta 		rte_write32(rte_cpu_to_le_32(val),
84b84fdd39SApeksha Gupta 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
85b84fdd39SApeksha Gupta 		rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
86b84fdd39SApeksha Gupta 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_FRAME_TRL);
87b84fdd39SApeksha Gupta 	}
88b84fdd39SApeksha Gupta 
89b84fdd39SApeksha Gupta 	/*
90b84fdd39SApeksha Gupta 	 * The phy interface and speed need to get configured
91b84fdd39SApeksha Gupta 	 * differently on enet-mac.
92b84fdd39SApeksha Gupta 	 */
93b84fdd39SApeksha Gupta 	if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
94b84fdd39SApeksha Gupta 		/* Enable flow control and length check */
95b84fdd39SApeksha Gupta 		rcntl |= 0x40000000 | 0x00000020;
96b84fdd39SApeksha Gupta 
97b84fdd39SApeksha Gupta 		/* RGMII, RMII or MII */
98b84fdd39SApeksha Gupta 		rcntl |= RTE_BIT32(6);
99b84fdd39SApeksha Gupta 		ecntl |= RTE_BIT32(5);
100b84fdd39SApeksha Gupta 	}
101b84fdd39SApeksha Gupta 
102b84fdd39SApeksha Gupta 	/* enable pause frame*/
103b84fdd39SApeksha Gupta 	if ((fep->flag_pause & ENETFEC_PAUSE_FLAG_ENABLE) ||
104b84fdd39SApeksha Gupta 		((fep->flag_pause & ENETFEC_PAUSE_FLAG_AUTONEG)
105b84fdd39SApeksha Gupta 		/*&& ndev->phydev && ndev->phydev->pause*/)) {
106b84fdd39SApeksha Gupta 		rcntl |= ENETFEC_FCE;
107b84fdd39SApeksha Gupta 
108b84fdd39SApeksha Gupta 		/* set FIFO threshold parameter to reduce overrun */
109b84fdd39SApeksha Gupta 		rte_write32(rte_cpu_to_le_32(ENETFEC_RSEM_V),
110b84fdd39SApeksha Gupta 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SEM);
111b84fdd39SApeksha Gupta 		rte_write32(rte_cpu_to_le_32(ENETFEC_RSFL_V),
112b84fdd39SApeksha Gupta 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SFL);
113b84fdd39SApeksha Gupta 		rte_write32(rte_cpu_to_le_32(ENETFEC_RAEM_V),
114b84fdd39SApeksha Gupta 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AEM);
115b84fdd39SApeksha Gupta 		rte_write32(rte_cpu_to_le_32(ENETFEC_RAFL_V),
116b84fdd39SApeksha Gupta 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AFL);
117b84fdd39SApeksha Gupta 
118b84fdd39SApeksha Gupta 		/* OPD */
119b84fdd39SApeksha Gupta 		rte_write32(rte_cpu_to_le_32(ENETFEC_OPD_V),
120b84fdd39SApeksha Gupta 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_OPD);
121b84fdd39SApeksha Gupta 	} else {
122b84fdd39SApeksha Gupta 		rcntl &= ~ENETFEC_FCE;
123b84fdd39SApeksha Gupta 	}
124b84fdd39SApeksha Gupta 
125b84fdd39SApeksha Gupta 	rte_write32(rte_cpu_to_le_32(rcntl),
126b84fdd39SApeksha Gupta 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
127b84fdd39SApeksha Gupta 
128b84fdd39SApeksha Gupta 	rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IAUR);
129b84fdd39SApeksha Gupta 	rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IALR);
130b84fdd39SApeksha Gupta 
131b84fdd39SApeksha Gupta 	if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
132b84fdd39SApeksha Gupta 		/* enable ENETFEC endian swap */
133b84fdd39SApeksha Gupta 		ecntl |= (1 << 8);
134b84fdd39SApeksha Gupta 		/* enable ENETFEC store and forward mode */
135b84fdd39SApeksha Gupta 		rte_write32(rte_cpu_to_le_32(1 << 8),
136b84fdd39SApeksha Gupta 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TFWR);
137b84fdd39SApeksha Gupta 	}
138b84fdd39SApeksha Gupta 	if (fep->bufdesc_ex)
139b84fdd39SApeksha Gupta 		ecntl |= (1 << 4);
140b84fdd39SApeksha Gupta 	if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
141b84fdd39SApeksha Gupta 		fep->rgmii_txc_delay)
142b84fdd39SApeksha Gupta 		ecntl |= ENETFEC_TXC_DLY;
143b84fdd39SApeksha Gupta 	if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
144b84fdd39SApeksha Gupta 		fep->rgmii_rxc_delay)
145b84fdd39SApeksha Gupta 		ecntl |= ENETFEC_RXC_DLY;
146b84fdd39SApeksha Gupta 	/* Enable the MIB statistic event counters */
147b84fdd39SApeksha Gupta 	rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_MIBC);
148b84fdd39SApeksha Gupta 
149b84fdd39SApeksha Gupta 	ecntl |= 0x70000000;
150b84fdd39SApeksha Gupta 	fep->enetfec_e_cntl = ecntl;
151b84fdd39SApeksha Gupta 	/* And last, enable the transmit and receive processing */
152b84fdd39SApeksha Gupta 	rte_write32(rte_cpu_to_le_32(ecntl),
153b84fdd39SApeksha Gupta 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
154d64e9cfeSApeksha Gupta 
155d64e9cfeSApeksha Gupta 	for (i = 0; i < fep->max_rx_queues; i++)
156d64e9cfeSApeksha Gupta 		rte_write32(0, fep->rx_queues[i]->bd.active_reg_desc);
157b84fdd39SApeksha Gupta 	rte_delay_us(10);
158b84fdd39SApeksha Gupta }
159b84fdd39SApeksha Gupta 
160ecae7157SApeksha Gupta static void
161ecae7157SApeksha Gupta enet_free_buffers(struct rte_eth_dev *dev)
162ecae7157SApeksha Gupta {
163ecae7157SApeksha Gupta 	struct enetfec_private *fep = dev->data->dev_private;
164ecae7157SApeksha Gupta 	unsigned int i, q;
165ecae7157SApeksha Gupta 	struct rte_mbuf *mbuf;
166ecae7157SApeksha Gupta 	struct bufdesc  *bdp;
167ecae7157SApeksha Gupta 	struct enetfec_priv_rx_q *rxq;
168ecae7157SApeksha Gupta 	struct enetfec_priv_tx_q *txq;
169ecae7157SApeksha Gupta 
170ecae7157SApeksha Gupta 	for (q = 0; q < dev->data->nb_rx_queues; q++) {
171ecae7157SApeksha Gupta 		rxq = fep->rx_queues[q];
172ecae7157SApeksha Gupta 		bdp = rxq->bd.base;
173ecae7157SApeksha Gupta 		for (i = 0; i < rxq->bd.ring_size; i++) {
174ecae7157SApeksha Gupta 			mbuf = rxq->rx_mbuf[i];
175ecae7157SApeksha Gupta 			rxq->rx_mbuf[i] = NULL;
176ecae7157SApeksha Gupta 			rte_pktmbuf_free(mbuf);
177ecae7157SApeksha Gupta 			bdp = enet_get_nextdesc(bdp, &rxq->bd);
178ecae7157SApeksha Gupta 		}
179ecae7157SApeksha Gupta 	}
180ecae7157SApeksha Gupta 
181ecae7157SApeksha Gupta 	for (q = 0; q < dev->data->nb_tx_queues; q++) {
182ecae7157SApeksha Gupta 		txq = fep->tx_queues[q];
183ecae7157SApeksha Gupta 		bdp = txq->bd.base;
184ecae7157SApeksha Gupta 		for (i = 0; i < txq->bd.ring_size; i++) {
185ecae7157SApeksha Gupta 			mbuf = txq->tx_mbuf[i];
186ecae7157SApeksha Gupta 			txq->tx_mbuf[i] = NULL;
187ecae7157SApeksha Gupta 			rte_pktmbuf_free(mbuf);
188ecae7157SApeksha Gupta 		}
189ecae7157SApeksha Gupta 	}
190ecae7157SApeksha Gupta }
191ecae7157SApeksha Gupta 
192b84fdd39SApeksha Gupta static int
193b84fdd39SApeksha Gupta enetfec_eth_configure(struct rte_eth_dev *dev)
194b84fdd39SApeksha Gupta {
195c75b9c3aSApeksha Gupta 	struct enetfec_private *fep = dev->data->dev_private;
196c75b9c3aSApeksha Gupta 
197c75b9c3aSApeksha Gupta 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
198c75b9c3aSApeksha Gupta 		fep->flag_csum |= RX_FLAG_CSUM_EN;
199c75b9c3aSApeksha Gupta 
200b84fdd39SApeksha Gupta 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
201b84fdd39SApeksha Gupta 		ENETFEC_PMD_ERR("PMD does not support KEEP_CRC offload");
202b84fdd39SApeksha Gupta 
203b84fdd39SApeksha Gupta 	return 0;
204b84fdd39SApeksha Gupta }
205b84fdd39SApeksha Gupta 
206b84fdd39SApeksha Gupta static int
207b84fdd39SApeksha Gupta enetfec_eth_start(struct rte_eth_dev *dev)
208b84fdd39SApeksha Gupta {
209b84fdd39SApeksha Gupta 	enetfec_restart(dev);
210ecae7157SApeksha Gupta 	dev->rx_pkt_burst = &enetfec_recv_pkts;
211ecae7157SApeksha Gupta 	dev->tx_pkt_burst = &enetfec_xmit_pkts;
212b84fdd39SApeksha Gupta 
213b84fdd39SApeksha Gupta 	return 0;
214b84fdd39SApeksha Gupta }
215b84fdd39SApeksha Gupta 
216b84fdd39SApeksha Gupta /* ENETFEC disable function.
217b84fdd39SApeksha Gupta  * @param[in] base      ENETFEC base address
218b84fdd39SApeksha Gupta  */
219b84fdd39SApeksha Gupta static void
220b84fdd39SApeksha Gupta enetfec_disable(struct enetfec_private *fep)
221b84fdd39SApeksha Gupta {
222b84fdd39SApeksha Gupta 	rte_write32(rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR)
223b84fdd39SApeksha Gupta 		    & ~(fep->enetfec_e_cntl),
224b84fdd39SApeksha Gupta 		    (uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
225b84fdd39SApeksha Gupta }
226b84fdd39SApeksha Gupta 
227b84fdd39SApeksha Gupta static int
228b84fdd39SApeksha Gupta enetfec_eth_stop(struct rte_eth_dev *dev)
229b84fdd39SApeksha Gupta {
230b84fdd39SApeksha Gupta 	struct enetfec_private *fep = dev->data->dev_private;
231b84fdd39SApeksha Gupta 
232b84fdd39SApeksha Gupta 	dev->data->dev_started = 0;
233b84fdd39SApeksha Gupta 	enetfec_disable(fep);
234b84fdd39SApeksha Gupta 
235b84fdd39SApeksha Gupta 	return 0;
236b84fdd39SApeksha Gupta }
237b84fdd39SApeksha Gupta 
238bb5b5bf1SApeksha Gupta static int
239ecae7157SApeksha Gupta enetfec_eth_close(struct rte_eth_dev *dev)
240ecae7157SApeksha Gupta {
241ecae7157SApeksha Gupta 	enet_free_buffers(dev);
242ecae7157SApeksha Gupta 	return 0;
243ecae7157SApeksha Gupta }
244ecae7157SApeksha Gupta 
245ecae7157SApeksha Gupta static int
246ecae7157SApeksha Gupta enetfec_eth_link_update(struct rte_eth_dev *dev,
247ecae7157SApeksha Gupta 			int wait_to_complete __rte_unused)
248ecae7157SApeksha Gupta {
249ecae7157SApeksha Gupta 	struct rte_eth_link link;
250ecae7157SApeksha Gupta 	unsigned int lstatus = 1;
251ecae7157SApeksha Gupta 
252ecae7157SApeksha Gupta 	memset(&link, 0, sizeof(struct rte_eth_link));
253ecae7157SApeksha Gupta 
254ecae7157SApeksha Gupta 	link.link_status = lstatus;
255ecae7157SApeksha Gupta 	link.link_speed = RTE_ETH_SPEED_NUM_1G;
256ecae7157SApeksha Gupta 
257f665790aSDavid Marchand 	ENETFEC_PMD_INFO("Port (%d) link is %s", dev->data->port_id,
258ecae7157SApeksha Gupta 			 "Up");
259ecae7157SApeksha Gupta 
260ecae7157SApeksha Gupta 	return rte_eth_linkstatus_set(dev, &link);
261ecae7157SApeksha Gupta }
262ecae7157SApeksha Gupta 
263ecae7157SApeksha Gupta static int
264ecae7157SApeksha Gupta enetfec_promiscuous_enable(struct rte_eth_dev *dev)
265ecae7157SApeksha Gupta {
266ecae7157SApeksha Gupta 	struct enetfec_private *fep = dev->data->dev_private;
267ecae7157SApeksha Gupta 	uint32_t tmp;
268ecae7157SApeksha Gupta 
269ecae7157SApeksha Gupta 	tmp = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
270ecae7157SApeksha Gupta 	tmp |= 0x8;
271ecae7157SApeksha Gupta 	tmp &= ~0x2;
272ecae7157SApeksha Gupta 	rte_write32(rte_cpu_to_le_32(tmp),
273ecae7157SApeksha Gupta 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
274ecae7157SApeksha Gupta 
275ecae7157SApeksha Gupta 	return 0;
276ecae7157SApeksha Gupta }
277ecae7157SApeksha Gupta 
278ecae7157SApeksha Gupta static int
279ecae7157SApeksha Gupta enetfec_multicast_enable(struct rte_eth_dev *dev)
280ecae7157SApeksha Gupta {
281ecae7157SApeksha Gupta 	struct enetfec_private *fep = dev->data->dev_private;
282ecae7157SApeksha Gupta 
283ecae7157SApeksha Gupta 	rte_write32(rte_cpu_to_le_32(0xffffffff),
284ecae7157SApeksha Gupta 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
285ecae7157SApeksha Gupta 	rte_write32(rte_cpu_to_le_32(0xffffffff),
286ecae7157SApeksha Gupta 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
287ecae7157SApeksha Gupta 	dev->data->all_multicast = 1;
288ecae7157SApeksha Gupta 
289ecae7157SApeksha Gupta 	rte_write32(rte_cpu_to_le_32(0x04400002),
290ecae7157SApeksha Gupta 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
291ecae7157SApeksha Gupta 	rte_write32(rte_cpu_to_le_32(0x10800049),
292ecae7157SApeksha Gupta 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
293ecae7157SApeksha Gupta 
294ecae7157SApeksha Gupta 	return 0;
295ecae7157SApeksha Gupta }
296ecae7157SApeksha Gupta 
297ecae7157SApeksha Gupta /* Set a MAC change in hardware. */
298ecae7157SApeksha Gupta static int
299ecae7157SApeksha Gupta enetfec_set_mac_address(struct rte_eth_dev *dev,
300ecae7157SApeksha Gupta 		    struct rte_ether_addr *addr)
301ecae7157SApeksha Gupta {
302ecae7157SApeksha Gupta 	struct enetfec_private *fep = dev->data->dev_private;
303ecae7157SApeksha Gupta 
304ecae7157SApeksha Gupta 	writel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) |
305ecae7157SApeksha Gupta 		(addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24),
306ecae7157SApeksha Gupta 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_PALR);
307ecae7157SApeksha Gupta 	writel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24),
308ecae7157SApeksha Gupta 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_PAUR);
309ecae7157SApeksha Gupta 
310ecae7157SApeksha Gupta 	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
311ecae7157SApeksha Gupta 
312ecae7157SApeksha Gupta 	return 0;
313ecae7157SApeksha Gupta }
314ecae7157SApeksha Gupta 
315ecae7157SApeksha Gupta static int
316ecae7157SApeksha Gupta enetfec_stats_get(struct rte_eth_dev *dev,
317ecae7157SApeksha Gupta 	      struct rte_eth_stats *stats)
318ecae7157SApeksha Gupta {
319ecae7157SApeksha Gupta 	struct enetfec_private *fep = dev->data->dev_private;
320ecae7157SApeksha Gupta 	struct rte_eth_stats *eth_stats = &fep->stats;
321ecae7157SApeksha Gupta 
322ecae7157SApeksha Gupta 	stats->ipackets = eth_stats->ipackets;
323ecae7157SApeksha Gupta 	stats->ibytes = eth_stats->ibytes;
324ecae7157SApeksha Gupta 	stats->ierrors = eth_stats->ierrors;
325ecae7157SApeksha Gupta 	stats->opackets = eth_stats->opackets;
326ecae7157SApeksha Gupta 	stats->obytes = eth_stats->obytes;
327ecae7157SApeksha Gupta 	stats->oerrors = eth_stats->oerrors;
328ecae7157SApeksha Gupta 	stats->rx_nombuf = eth_stats->rx_nombuf;
329ecae7157SApeksha Gupta 
330ecae7157SApeksha Gupta 	return 0;
331ecae7157SApeksha Gupta }
332ecae7157SApeksha Gupta 
333ecae7157SApeksha Gupta static int
334bb5b5bf1SApeksha Gupta enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
335bb5b5bf1SApeksha Gupta 	struct rte_eth_dev_info *dev_info)
336bb5b5bf1SApeksha Gupta {
337bb5b5bf1SApeksha Gupta 	dev_info->max_rx_pktlen = ENETFEC_MAX_RX_PKT_LEN;
338bb5b5bf1SApeksha Gupta 	dev_info->max_rx_queues = ENETFEC_MAX_Q;
339bb5b5bf1SApeksha Gupta 	dev_info->max_tx_queues = ENETFEC_MAX_Q;
340bb5b5bf1SApeksha Gupta 	dev_info->rx_offload_capa = dev_rx_offloads_sup;
341bb5b5bf1SApeksha Gupta 	return 0;
342bb5b5bf1SApeksha Gupta }
343bb5b5bf1SApeksha Gupta 
344ecae7157SApeksha Gupta static void
345ecae7157SApeksha Gupta enet_free_queue(struct rte_eth_dev *dev)
346ecae7157SApeksha Gupta {
347ecae7157SApeksha Gupta 	struct enetfec_private *fep = dev->data->dev_private;
348ecae7157SApeksha Gupta 	unsigned int i;
349ecae7157SApeksha Gupta 
350ecae7157SApeksha Gupta 	for (i = 0; i < dev->data->nb_rx_queues; i++)
351ecae7157SApeksha Gupta 		rte_free(fep->rx_queues[i]);
352ecae7157SApeksha Gupta 	for (i = 0; i < dev->data->nb_tx_queues; i++)
353ecae7157SApeksha Gupta 		rte_free(fep->rx_queues[i]);
354ecae7157SApeksha Gupta }
355ecae7157SApeksha Gupta 
356bb5b5bf1SApeksha Gupta static const unsigned short offset_des_active_rxq[] = {
357bb5b5bf1SApeksha Gupta 	ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
358bb5b5bf1SApeksha Gupta };
359bb5b5bf1SApeksha Gupta 
360bb5b5bf1SApeksha Gupta static const unsigned short offset_des_active_txq[] = {
361bb5b5bf1SApeksha Gupta 	ENETFEC_TDAR_0, ENETFEC_TDAR_1, ENETFEC_TDAR_2
362bb5b5bf1SApeksha Gupta };
363bb5b5bf1SApeksha Gupta 
364bb5b5bf1SApeksha Gupta static int
365bb5b5bf1SApeksha Gupta enetfec_tx_queue_setup(struct rte_eth_dev *dev,
366bb5b5bf1SApeksha Gupta 			uint16_t queue_idx,
367bb5b5bf1SApeksha Gupta 			uint16_t nb_desc,
368bb5b5bf1SApeksha Gupta 			unsigned int socket_id __rte_unused,
369bb5b5bf1SApeksha Gupta 			const struct rte_eth_txconf *tx_conf)
370bb5b5bf1SApeksha Gupta {
371bb5b5bf1SApeksha Gupta 	struct enetfec_private *fep = dev->data->dev_private;
372bb5b5bf1SApeksha Gupta 	unsigned int i;
373bb5b5bf1SApeksha Gupta 	struct bufdesc *bdp, *bd_base;
374bb5b5bf1SApeksha Gupta 	struct enetfec_priv_tx_q *txq;
375bb5b5bf1SApeksha Gupta 	unsigned int size;
376bb5b5bf1SApeksha Gupta 	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
377bb5b5bf1SApeksha Gupta 		sizeof(struct bufdesc);
378*191128d7SDavid Marchand 	unsigned int dsize_log2 = rte_fls_u64(dsize) - 1;
379bb5b5bf1SApeksha Gupta 
380bb5b5bf1SApeksha Gupta 	/* Tx deferred start is not supported */
381bb5b5bf1SApeksha Gupta 	if (tx_conf->tx_deferred_start) {
382bb5b5bf1SApeksha Gupta 		ENETFEC_PMD_ERR("Tx deferred start not supported");
383bb5b5bf1SApeksha Gupta 		return -EINVAL;
384bb5b5bf1SApeksha Gupta 	}
385bb5b5bf1SApeksha Gupta 
386bb5b5bf1SApeksha Gupta 	/* allocate transmit queue */
387bb5b5bf1SApeksha Gupta 	txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);
388bb5b5bf1SApeksha Gupta 	if (txq == NULL) {
389bb5b5bf1SApeksha Gupta 		ENETFEC_PMD_ERR("transmit queue allocation failed");
390bb5b5bf1SApeksha Gupta 		return -ENOMEM;
391bb5b5bf1SApeksha Gupta 	}
392bb5b5bf1SApeksha Gupta 
393bb5b5bf1SApeksha Gupta 	if (nb_desc > MAX_TX_BD_RING_SIZE) {
394bb5b5bf1SApeksha Gupta 		nb_desc = MAX_TX_BD_RING_SIZE;
395bb5b5bf1SApeksha Gupta 		ENETFEC_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE");
396bb5b5bf1SApeksha Gupta 	}
397bb5b5bf1SApeksha Gupta 	txq->bd.ring_size = nb_desc;
398bb5b5bf1SApeksha Gupta 	fep->total_tx_ring_size += txq->bd.ring_size;
399bb5b5bf1SApeksha Gupta 	fep->tx_queues[queue_idx] = txq;
400bb5b5bf1SApeksha Gupta 
401bb5b5bf1SApeksha Gupta 	rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_t[queue_idx]),
402bb5b5bf1SApeksha Gupta 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TD_START(queue_idx));
403bb5b5bf1SApeksha Gupta 
404bb5b5bf1SApeksha Gupta 	/* Set transmit descriptor base. */
405bb5b5bf1SApeksha Gupta 	txq = fep->tx_queues[queue_idx];
406bb5b5bf1SApeksha Gupta 	txq->fep = fep;
407bb5b5bf1SApeksha Gupta 	size = dsize * txq->bd.ring_size;
408bb5b5bf1SApeksha Gupta 	bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];
409bb5b5bf1SApeksha Gupta 	txq->bd.queue_id = queue_idx;
410bb5b5bf1SApeksha Gupta 	txq->bd.base = bd_base;
411bb5b5bf1SApeksha Gupta 	txq->bd.cur = bd_base;
412bb5b5bf1SApeksha Gupta 	txq->bd.d_size = dsize;
413bb5b5bf1SApeksha Gupta 	txq->bd.d_size_log2 = dsize_log2;
414bb5b5bf1SApeksha Gupta 	txq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
415bb5b5bf1SApeksha Gupta 			offset_des_active_txq[queue_idx];
416bb5b5bf1SApeksha Gupta 	bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
417bb5b5bf1SApeksha Gupta 	txq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
418bb5b5bf1SApeksha Gupta 	bdp = txq->bd.base;
419bb5b5bf1SApeksha Gupta 	bdp = txq->bd.cur;
420bb5b5bf1SApeksha Gupta 
421bb5b5bf1SApeksha Gupta 	for (i = 0; i < txq->bd.ring_size; i++) {
422bb5b5bf1SApeksha Gupta 		/* Initialize the BD for every fragment in the page. */
423bb5b5bf1SApeksha Gupta 		rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
424bb5b5bf1SApeksha Gupta 		if (txq->tx_mbuf[i] != NULL) {
425bb5b5bf1SApeksha Gupta 			rte_pktmbuf_free(txq->tx_mbuf[i]);
426bb5b5bf1SApeksha Gupta 			txq->tx_mbuf[i] = NULL;
427bb5b5bf1SApeksha Gupta 		}
428bb5b5bf1SApeksha Gupta 		rte_write32(0, &bdp->bd_bufaddr);
429bb5b5bf1SApeksha Gupta 		bdp = enet_get_nextdesc(bdp, &txq->bd);
430bb5b5bf1SApeksha Gupta 	}
431bb5b5bf1SApeksha Gupta 
432bb5b5bf1SApeksha Gupta 	/* Set the last buffer to wrap */
433bb5b5bf1SApeksha Gupta 	bdp = enet_get_prevdesc(bdp, &txq->bd);
434bb5b5bf1SApeksha Gupta 	rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |
435bb5b5bf1SApeksha Gupta 		rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
436bb5b5bf1SApeksha Gupta 	txq->dirty_tx = bdp;
437bb5b5bf1SApeksha Gupta 	dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];
438bb5b5bf1SApeksha Gupta 	return 0;
439bb5b5bf1SApeksha Gupta }
440bb5b5bf1SApeksha Gupta 
441bb5b5bf1SApeksha Gupta static int
442bb5b5bf1SApeksha Gupta enetfec_rx_queue_setup(struct rte_eth_dev *dev,
443bb5b5bf1SApeksha Gupta 			uint16_t queue_idx,
444bb5b5bf1SApeksha Gupta 			uint16_t nb_rx_desc,
445bb5b5bf1SApeksha Gupta 			unsigned int socket_id __rte_unused,
446bb5b5bf1SApeksha Gupta 			const struct rte_eth_rxconf *rx_conf,
447bb5b5bf1SApeksha Gupta 			struct rte_mempool *mb_pool)
448bb5b5bf1SApeksha Gupta {
449bb5b5bf1SApeksha Gupta 	struct enetfec_private *fep = dev->data->dev_private;
450bb5b5bf1SApeksha Gupta 	unsigned int i;
451bb5b5bf1SApeksha Gupta 	struct bufdesc *bd_base;
452bb5b5bf1SApeksha Gupta 	struct bufdesc *bdp;
453bb5b5bf1SApeksha Gupta 	struct enetfec_priv_rx_q *rxq;
454bb5b5bf1SApeksha Gupta 	unsigned int size;
455bb5b5bf1SApeksha Gupta 	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
456bb5b5bf1SApeksha Gupta 			sizeof(struct bufdesc);
457*191128d7SDavid Marchand 	unsigned int dsize_log2 = rte_fls_u64(dsize) - 1;
458bb5b5bf1SApeksha Gupta 
459bb5b5bf1SApeksha Gupta 	/* Rx deferred start is not supported */
460bb5b5bf1SApeksha Gupta 	if (rx_conf->rx_deferred_start) {
461bb5b5bf1SApeksha Gupta 		ENETFEC_PMD_ERR("Rx deferred start not supported");
462bb5b5bf1SApeksha Gupta 		return -EINVAL;
463bb5b5bf1SApeksha Gupta 	}
464bb5b5bf1SApeksha Gupta 
4657c3c0d0fSDavid Marchand 	if (queue_idx >= ENETFEC_MAX_Q) {
466f665790aSDavid Marchand 		ENETFEC_PMD_ERR("Invalid queue id %" PRIu16 ", max %d",
4677c3c0d0fSDavid Marchand 			queue_idx, ENETFEC_MAX_Q);
4687c3c0d0fSDavid Marchand 		return -EINVAL;
4697c3c0d0fSDavid Marchand 	}
4707c3c0d0fSDavid Marchand 
471bb5b5bf1SApeksha Gupta 	/* allocate receive queue */
472bb5b5bf1SApeksha Gupta 	rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
473bb5b5bf1SApeksha Gupta 	if (rxq == NULL) {
474bb5b5bf1SApeksha Gupta 		ENETFEC_PMD_ERR("receive queue allocation failed");
475bb5b5bf1SApeksha Gupta 		return -ENOMEM;
476bb5b5bf1SApeksha Gupta 	}
477bb5b5bf1SApeksha Gupta 
478bb5b5bf1SApeksha Gupta 	if (nb_rx_desc > MAX_RX_BD_RING_SIZE) {
479bb5b5bf1SApeksha Gupta 		nb_rx_desc = MAX_RX_BD_RING_SIZE;
480bb5b5bf1SApeksha Gupta 		ENETFEC_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE");
481bb5b5bf1SApeksha Gupta 	}
482bb5b5bf1SApeksha Gupta 
483bb5b5bf1SApeksha Gupta 	rxq->bd.ring_size = nb_rx_desc;
484bb5b5bf1SApeksha Gupta 	fep->total_rx_ring_size += rxq->bd.ring_size;
485bb5b5bf1SApeksha Gupta 	fep->rx_queues[queue_idx] = rxq;
486bb5b5bf1SApeksha Gupta 
487bb5b5bf1SApeksha Gupta 	rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_r[queue_idx]),
488bb5b5bf1SApeksha Gupta 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RD_START(queue_idx));
489bb5b5bf1SApeksha Gupta 	rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
490bb5b5bf1SApeksha Gupta 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_MRB_SIZE(queue_idx));
491bb5b5bf1SApeksha Gupta 
492bb5b5bf1SApeksha Gupta 	/* Set receive descriptor base. */
493bb5b5bf1SApeksha Gupta 	rxq = fep->rx_queues[queue_idx];
494bb5b5bf1SApeksha Gupta 	rxq->pool = mb_pool;
495bb5b5bf1SApeksha Gupta 	size = dsize * rxq->bd.ring_size;
496bb5b5bf1SApeksha Gupta 	bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];
497bb5b5bf1SApeksha Gupta 	rxq->bd.queue_id = queue_idx;
498bb5b5bf1SApeksha Gupta 	rxq->bd.base = bd_base;
499bb5b5bf1SApeksha Gupta 	rxq->bd.cur = bd_base;
500bb5b5bf1SApeksha Gupta 	rxq->bd.d_size = dsize;
501bb5b5bf1SApeksha Gupta 	rxq->bd.d_size_log2 = dsize_log2;
502bb5b5bf1SApeksha Gupta 	rxq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
503bb5b5bf1SApeksha Gupta 			offset_des_active_rxq[queue_idx];
504bb5b5bf1SApeksha Gupta 	bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
505bb5b5bf1SApeksha Gupta 	rxq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
506bb5b5bf1SApeksha Gupta 
507bb5b5bf1SApeksha Gupta 	rxq->fep = fep;
508bb5b5bf1SApeksha Gupta 	bdp = rxq->bd.base;
509bb5b5bf1SApeksha Gupta 	rxq->bd.cur = bdp;
510bb5b5bf1SApeksha Gupta 
511bb5b5bf1SApeksha Gupta 	for (i = 0; i < nb_rx_desc; i++) {
512bb5b5bf1SApeksha Gupta 		/* Initialize Rx buffers from pktmbuf pool */
513bb5b5bf1SApeksha Gupta 		struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);
514bb5b5bf1SApeksha Gupta 		if (mbuf == NULL) {
515bb5b5bf1SApeksha Gupta 			ENETFEC_PMD_ERR("mbuf failed");
516bb5b5bf1SApeksha Gupta 			goto err_alloc;
517bb5b5bf1SApeksha Gupta 		}
518bb5b5bf1SApeksha Gupta 
519bb5b5bf1SApeksha Gupta 		/* Get the virtual address & physical address */
520bb5b5bf1SApeksha Gupta 		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
521bb5b5bf1SApeksha Gupta 			&bdp->bd_bufaddr);
522bb5b5bf1SApeksha Gupta 
523bb5b5bf1SApeksha Gupta 		rxq->rx_mbuf[i] = mbuf;
524bb5b5bf1SApeksha Gupta 		rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);
525bb5b5bf1SApeksha Gupta 
526bb5b5bf1SApeksha Gupta 		bdp = enet_get_nextdesc(bdp, &rxq->bd);
527bb5b5bf1SApeksha Gupta 	}
528bb5b5bf1SApeksha Gupta 
529bb5b5bf1SApeksha Gupta 	/* Initialize the receive buffer descriptors. */
530bb5b5bf1SApeksha Gupta 	bdp = rxq->bd.cur;
531bb5b5bf1SApeksha Gupta 	for (i = 0; i < rxq->bd.ring_size; i++) {
532bb5b5bf1SApeksha Gupta 		/* Initialize the BD for every fragment in the page. */
533bb5b5bf1SApeksha Gupta 		if (rte_read32(&bdp->bd_bufaddr) > 0)
534bb5b5bf1SApeksha Gupta 			rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),
535bb5b5bf1SApeksha Gupta 				&bdp->bd_sc);
536bb5b5bf1SApeksha Gupta 		else
537bb5b5bf1SApeksha Gupta 			rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
538bb5b5bf1SApeksha Gupta 
539bb5b5bf1SApeksha Gupta 		bdp = enet_get_nextdesc(bdp, &rxq->bd);
540bb5b5bf1SApeksha Gupta 	}
541bb5b5bf1SApeksha Gupta 
542bb5b5bf1SApeksha Gupta 	/* Set the last buffer to wrap */
543bb5b5bf1SApeksha Gupta 	bdp = enet_get_prevdesc(bdp, &rxq->bd);
544bb5b5bf1SApeksha Gupta 	rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |
545bb5b5bf1SApeksha Gupta 		rte_read16(&bdp->bd_sc)),  &bdp->bd_sc);
546bb5b5bf1SApeksha Gupta 	dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];
547bb5b5bf1SApeksha Gupta 	rte_write32(0, fep->rx_queues[queue_idx]->bd.active_reg_desc);
548bb5b5bf1SApeksha Gupta 	return 0;
549bb5b5bf1SApeksha Gupta 
550bb5b5bf1SApeksha Gupta err_alloc:
551bb5b5bf1SApeksha Gupta 	for (i = 0; i < nb_rx_desc; i++) {
552bb5b5bf1SApeksha Gupta 		if (rxq->rx_mbuf[i] != NULL) {
553bb5b5bf1SApeksha Gupta 			rte_pktmbuf_free(rxq->rx_mbuf[i]);
554bb5b5bf1SApeksha Gupta 			rxq->rx_mbuf[i] = NULL;
555bb5b5bf1SApeksha Gupta 		}
556bb5b5bf1SApeksha Gupta 	}
557bb5b5bf1SApeksha Gupta 	rte_free(rxq);
558bb5b5bf1SApeksha Gupta 	return errno;
559bb5b5bf1SApeksha Gupta }
560bb5b5bf1SApeksha Gupta 
561b84fdd39SApeksha Gupta static const struct eth_dev_ops enetfec_ops = {
562b84fdd39SApeksha Gupta 	.dev_configure          = enetfec_eth_configure,
563b84fdd39SApeksha Gupta 	.dev_start              = enetfec_eth_start,
564bb5b5bf1SApeksha Gupta 	.dev_stop               = enetfec_eth_stop,
565ecae7157SApeksha Gupta 	.dev_close              = enetfec_eth_close,
566ecae7157SApeksha Gupta 	.link_update            = enetfec_eth_link_update,
567ecae7157SApeksha Gupta 	.promiscuous_enable     = enetfec_promiscuous_enable,
568ecae7157SApeksha Gupta 	.allmulticast_enable    = enetfec_multicast_enable,
569ecae7157SApeksha Gupta 	.mac_addr_set           = enetfec_set_mac_address,
570ecae7157SApeksha Gupta 	.stats_get              = enetfec_stats_get,
571bb5b5bf1SApeksha Gupta 	.dev_infos_get          = enetfec_eth_info,
572bb5b5bf1SApeksha Gupta 	.rx_queue_setup         = enetfec_rx_queue_setup,
573bb5b5bf1SApeksha Gupta 	.tx_queue_setup         = enetfec_tx_queue_setup
574b84fdd39SApeksha Gupta };
575b84fdd39SApeksha Gupta 
576fc0ec740SApeksha Gupta static int
577fc0ec740SApeksha Gupta enetfec_eth_init(struct rte_eth_dev *dev)
578fc0ec740SApeksha Gupta {
579b84fdd39SApeksha Gupta 	struct enetfec_private *fep = dev->data->dev_private;
580b84fdd39SApeksha Gupta 
581b84fdd39SApeksha Gupta 	fep->full_duplex = FULL_DUPLEX;
582b84fdd39SApeksha Gupta 	dev->dev_ops = &enetfec_ops;
583fc0ec740SApeksha Gupta 	rte_eth_dev_probing_finish(dev);
584c75b9c3aSApeksha Gupta 
585fc0ec740SApeksha Gupta 	return 0;
586fc0ec740SApeksha Gupta }
587fc0ec740SApeksha Gupta 
588fc0ec740SApeksha Gupta static int
589fc0ec740SApeksha Gupta pmd_enetfec_probe(struct rte_vdev_device *vdev)
590fc0ec740SApeksha Gupta {
591fc0ec740SApeksha Gupta 	struct rte_eth_dev *dev = NULL;
592fc0ec740SApeksha Gupta 	struct enetfec_private *fep;
593fc0ec740SApeksha Gupta 	const char *name;
594fc0ec740SApeksha Gupta 	int rc;
595b84fdd39SApeksha Gupta 	int i;
596b84fdd39SApeksha Gupta 	unsigned int bdsize;
597ecae7157SApeksha Gupta 	struct rte_ether_addr macaddr = {
598ecae7157SApeksha Gupta 		.addr_bytes = { 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 }
599ecae7157SApeksha Gupta 	};
600fc0ec740SApeksha Gupta 
601fc0ec740SApeksha Gupta 	name = rte_vdev_device_name(vdev);
602fc0ec740SApeksha Gupta 	ENETFEC_PMD_LOG(INFO, "Initializing pmd_fec for %s", name);
603fc0ec740SApeksha Gupta 
604fc0ec740SApeksha Gupta 	dev = rte_eth_vdev_allocate(vdev, sizeof(*fep));
605fc0ec740SApeksha Gupta 	if (dev == NULL)
606fc0ec740SApeksha Gupta 		return -ENOMEM;
607fc0ec740SApeksha Gupta 
608fc0ec740SApeksha Gupta 	/* setup board info structure */
609fc0ec740SApeksha Gupta 	fep = dev->data->dev_private;
610fc0ec740SApeksha Gupta 	fep->dev = dev;
611b84fdd39SApeksha Gupta 
612b84fdd39SApeksha Gupta 	fep->max_rx_queues = ENETFEC_MAX_Q;
613b84fdd39SApeksha Gupta 	fep->max_tx_queues = ENETFEC_MAX_Q;
614b84fdd39SApeksha Gupta 	fep->quirks = QUIRK_HAS_ENETFEC_MAC | QUIRK_GBIT
615b84fdd39SApeksha Gupta 		| QUIRK_RACC;
616b84fdd39SApeksha Gupta 
617b84fdd39SApeksha Gupta 	rc = enetfec_configure();
618b84fdd39SApeksha Gupta 	if (rc != 0)
619b84fdd39SApeksha Gupta 		return -ENOMEM;
620b84fdd39SApeksha Gupta 	rc = config_enetfec_uio(fep);
621b84fdd39SApeksha Gupta 	if (rc != 0)
622b84fdd39SApeksha Gupta 		return -ENOMEM;
623b84fdd39SApeksha Gupta 
624b84fdd39SApeksha Gupta 	/* Get the BD size for distributing among six queues */
625b84fdd39SApeksha Gupta 	bdsize = (fep->bd_size) / NUM_OF_BD_QUEUES;
626b84fdd39SApeksha Gupta 
627b84fdd39SApeksha Gupta 	for (i = 0; i < fep->max_tx_queues; i++) {
628b84fdd39SApeksha Gupta 		fep->dma_baseaddr_t[i] = fep->bd_addr_v;
629b84fdd39SApeksha Gupta 		fep->bd_addr_p_t[i] = fep->bd_addr_p;
630b84fdd39SApeksha Gupta 		fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
631b84fdd39SApeksha Gupta 		fep->bd_addr_p = fep->bd_addr_p + bdsize;
632b84fdd39SApeksha Gupta 	}
633b84fdd39SApeksha Gupta 	for (i = 0; i < fep->max_rx_queues; i++) {
634b84fdd39SApeksha Gupta 		fep->dma_baseaddr_r[i] = fep->bd_addr_v;
635b84fdd39SApeksha Gupta 		fep->bd_addr_p_r[i] = fep->bd_addr_p;
636b84fdd39SApeksha Gupta 		fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
637b84fdd39SApeksha Gupta 		fep->bd_addr_p = fep->bd_addr_p + bdsize;
638b84fdd39SApeksha Gupta 	}
639b84fdd39SApeksha Gupta 
640ecae7157SApeksha Gupta 	/* Copy the station address into the dev structure, */
641ecae7157SApeksha Gupta 	dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
642ecae7157SApeksha Gupta 	if (dev->data->mac_addrs == NULL) {
643ecae7157SApeksha Gupta 		ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
644ecae7157SApeksha Gupta 			RTE_ETHER_ADDR_LEN);
645ecae7157SApeksha Gupta 		rc = -ENOMEM;
646ecae7157SApeksha Gupta 		goto err;
647ecae7157SApeksha Gupta 	}
648ecae7157SApeksha Gupta 
649ecae7157SApeksha Gupta 	/*
650ecae7157SApeksha Gupta 	 * Set default mac address
651ecae7157SApeksha Gupta 	 */
652ecae7157SApeksha Gupta 	enetfec_set_mac_address(dev, &macaddr);
653ecae7157SApeksha Gupta 
654ecae7157SApeksha Gupta 	fep->bufdesc_ex = ENETFEC_EXTENDED_BD;
655fc0ec740SApeksha Gupta 	rc = enetfec_eth_init(dev);
656fc0ec740SApeksha Gupta 	if (rc)
657fc0ec740SApeksha Gupta 		goto failed_init;
658fc0ec740SApeksha Gupta 
659fc0ec740SApeksha Gupta 	return 0;
660fc0ec740SApeksha Gupta 
661fc0ec740SApeksha Gupta failed_init:
662fc0ec740SApeksha Gupta 	ENETFEC_PMD_ERR("Failed to init");
663ecae7157SApeksha Gupta err:
664ecae7157SApeksha Gupta 	rte_eth_dev_release_port(dev);
665fc0ec740SApeksha Gupta 	return rc;
666fc0ec740SApeksha Gupta }
667fc0ec740SApeksha Gupta 
668fc0ec740SApeksha Gupta static int
669fc0ec740SApeksha Gupta pmd_enetfec_remove(struct rte_vdev_device *vdev)
670fc0ec740SApeksha Gupta {
671fc0ec740SApeksha Gupta 	struct rte_eth_dev *eth_dev = NULL;
672ecae7157SApeksha Gupta 	struct enetfec_private *fep;
673ecae7157SApeksha Gupta 	struct enetfec_priv_rx_q *rxq;
674fc0ec740SApeksha Gupta 	int ret;
675fc0ec740SApeksha Gupta 
676fc0ec740SApeksha Gupta 	/* find the ethdev entry */
677fc0ec740SApeksha Gupta 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
678fc0ec740SApeksha Gupta 	if (eth_dev == NULL)
679fc0ec740SApeksha Gupta 		return -ENODEV;
680fc0ec740SApeksha Gupta 
681ecae7157SApeksha Gupta 	fep = eth_dev->data->dev_private;
682ecae7157SApeksha Gupta 	/* Free descriptor base of first RX queue as it was configured
683ecae7157SApeksha Gupta 	 * first in enetfec_eth_init().
684ecae7157SApeksha Gupta 	 */
685ecae7157SApeksha Gupta 	rxq = fep->rx_queues[0];
686ecae7157SApeksha Gupta 	rte_free(rxq->bd.base);
687ecae7157SApeksha Gupta 	enet_free_queue(eth_dev);
688ecae7157SApeksha Gupta 	enetfec_eth_stop(eth_dev);
689ecae7157SApeksha Gupta 
690fc0ec740SApeksha Gupta 	ret = rte_eth_dev_release_port(eth_dev);
691fc0ec740SApeksha Gupta 	if (ret != 0)
692fc0ec740SApeksha Gupta 		return -EINVAL;
693fc0ec740SApeksha Gupta 
694fc0ec740SApeksha Gupta 	ENETFEC_PMD_INFO("Release enetfec sw device");
695ecae7157SApeksha Gupta 	enetfec_cleanup(fep);
696ecae7157SApeksha Gupta 
697fc0ec740SApeksha Gupta 	return 0;
698fc0ec740SApeksha Gupta }
699fc0ec740SApeksha Gupta 
700fc0ec740SApeksha Gupta static struct rte_vdev_driver pmd_enetfec_drv = {
701fc0ec740SApeksha Gupta 	.probe = pmd_enetfec_probe,
702fc0ec740SApeksha Gupta 	.remove = pmd_enetfec_remove,
703fc0ec740SApeksha Gupta };
704fc0ec740SApeksha Gupta 
705fc0ec740SApeksha Gupta RTE_PMD_REGISTER_VDEV(ENETFEC_NAME_PMD, pmd_enetfec_drv);
706fc0ec740SApeksha Gupta RTE_LOG_REGISTER_DEFAULT(enetfec_logtype_pmd, NOTICE);
707