xref: /dpdk/drivers/net/enetfec/enet_ethdev.c (revision 191128d7f6a02b816deaa86d761fbde4483724e9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020-2021 NXP
3  */
4 
5 #include <inttypes.h>
6 
7 #include <ethdev_vdev.h>
8 #include <ethdev_driver.h>
9 #include <rte_bitops.h>
10 #include <rte_io.h>
11 
12 #include "enet_pmd_logs.h"
13 #include "enet_ethdev.h"
14 #include "enet_regs.h"
15 #include "enet_uio.h"
16 
17 #define ENETFEC_NAME_PMD                net_enetfec
18 
19 /* FEC receive acceleration */
20 #define ENETFEC_RACC_IPDIS		RTE_BIT32(1)
21 #define ENETFEC_RACC_PRODIS		RTE_BIT32(2)
22 #define ENETFEC_RACC_SHIFT16		RTE_BIT32(7)
23 #define ENETFEC_RACC_OPTIONS		(ENETFEC_RACC_IPDIS | \
24 						ENETFEC_RACC_PRODIS)
25 
26 #define ENETFEC_PAUSE_FLAG_AUTONEG	0x1
27 #define ENETFEC_PAUSE_FLAG_ENABLE	0x2
28 
29 /* Pause frame field and FIFO threshold */
30 #define ENETFEC_FCE			RTE_BIT32(5)
31 #define ENETFEC_RSEM_V			0x84
32 #define ENETFEC_RSFL_V			16
33 #define ENETFEC_RAEM_V			0x8
34 #define ENETFEC_RAFL_V			0x8
35 #define ENETFEC_OPD_V			0xFFF0
36 
37 /* Extended buffer descriptor */
38 #define ENETFEC_EXTENDED_BD		0
39 #define NUM_OF_BD_QUEUES		6
40 
41 /* Supported Rx offloads */
42 static uint64_t dev_rx_offloads_sup =
43 		RTE_ETH_RX_OFFLOAD_CHECKSUM |
44 		RTE_ETH_RX_OFFLOAD_VLAN;
45 
46 /*
47  * This function is called to start or restart the ENETFEC during a link
48  * change, transmit timeout, or to reconfigure the ENETFEC. The network
49  * packet processing for this device must be stopped before this call.
50  */
51 static void
52 enetfec_restart(struct rte_eth_dev *dev)
53 {
54 	struct enetfec_private *fep = dev->data->dev_private;
55 	uint32_t rcntl = OPT_FRAME_SIZE | 0x04;
56 	uint32_t ecntl = ENETFEC_ETHEREN;
57 	uint32_t val;
58 	int i;
59 
60 	/* Clear any outstanding interrupt. */
61 	writel(0xffffffff, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_EIR);
62 
63 	/* Enable MII mode */
64 	if (fep->full_duplex == FULL_DUPLEX) {
65 		/* FD enable */
66 		rte_write32(rte_cpu_to_le_32(0x04),
67 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
68 	} else {
69 	/* No Rcv on Xmit */
70 		rcntl |= 0x02;
71 		rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
72 	}
73 
74 	if (fep->quirks & QUIRK_RACC) {
75 		val = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
76 		/* align IP header */
77 		val |= ENETFEC_RACC_SHIFT16;
78 		if (fep->flag_csum & RX_FLAG_CSUM_EN)
79 			/* set RX checksum */
80 			val |= ENETFEC_RACC_OPTIONS;
81 		else
82 			val &= ~ENETFEC_RACC_OPTIONS;
83 		rte_write32(rte_cpu_to_le_32(val),
84 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
85 		rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
86 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_FRAME_TRL);
87 	}
88 
89 	/*
90 	 * The phy interface and speed need to get configured
91 	 * differently on enet-mac.
92 	 */
93 	if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
94 		/* Enable flow control and length check */
95 		rcntl |= 0x40000000 | 0x00000020;
96 
97 		/* RGMII, RMII or MII */
98 		rcntl |= RTE_BIT32(6);
99 		ecntl |= RTE_BIT32(5);
100 	}
101 
102 	/* enable pause frame*/
103 	if ((fep->flag_pause & ENETFEC_PAUSE_FLAG_ENABLE) ||
104 		((fep->flag_pause & ENETFEC_PAUSE_FLAG_AUTONEG)
105 		/*&& ndev->phydev && ndev->phydev->pause*/)) {
106 		rcntl |= ENETFEC_FCE;
107 
108 		/* set FIFO threshold parameter to reduce overrun */
109 		rte_write32(rte_cpu_to_le_32(ENETFEC_RSEM_V),
110 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SEM);
111 		rte_write32(rte_cpu_to_le_32(ENETFEC_RSFL_V),
112 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SFL);
113 		rte_write32(rte_cpu_to_le_32(ENETFEC_RAEM_V),
114 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AEM);
115 		rte_write32(rte_cpu_to_le_32(ENETFEC_RAFL_V),
116 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AFL);
117 
118 		/* OPD */
119 		rte_write32(rte_cpu_to_le_32(ENETFEC_OPD_V),
120 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_OPD);
121 	} else {
122 		rcntl &= ~ENETFEC_FCE;
123 	}
124 
125 	rte_write32(rte_cpu_to_le_32(rcntl),
126 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
127 
128 	rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IAUR);
129 	rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IALR);
130 
131 	if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
132 		/* enable ENETFEC endian swap */
133 		ecntl |= (1 << 8);
134 		/* enable ENETFEC store and forward mode */
135 		rte_write32(rte_cpu_to_le_32(1 << 8),
136 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TFWR);
137 	}
138 	if (fep->bufdesc_ex)
139 		ecntl |= (1 << 4);
140 	if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
141 		fep->rgmii_txc_delay)
142 		ecntl |= ENETFEC_TXC_DLY;
143 	if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
144 		fep->rgmii_rxc_delay)
145 		ecntl |= ENETFEC_RXC_DLY;
146 	/* Enable the MIB statistic event counters */
147 	rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_MIBC);
148 
149 	ecntl |= 0x70000000;
150 	fep->enetfec_e_cntl = ecntl;
151 	/* And last, enable the transmit and receive processing */
152 	rte_write32(rte_cpu_to_le_32(ecntl),
153 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
154 
155 	for (i = 0; i < fep->max_rx_queues; i++)
156 		rte_write32(0, fep->rx_queues[i]->bd.active_reg_desc);
157 	rte_delay_us(10);
158 }
159 
160 static void
161 enet_free_buffers(struct rte_eth_dev *dev)
162 {
163 	struct enetfec_private *fep = dev->data->dev_private;
164 	unsigned int i, q;
165 	struct rte_mbuf *mbuf;
166 	struct bufdesc  *bdp;
167 	struct enetfec_priv_rx_q *rxq;
168 	struct enetfec_priv_tx_q *txq;
169 
170 	for (q = 0; q < dev->data->nb_rx_queues; q++) {
171 		rxq = fep->rx_queues[q];
172 		bdp = rxq->bd.base;
173 		for (i = 0; i < rxq->bd.ring_size; i++) {
174 			mbuf = rxq->rx_mbuf[i];
175 			rxq->rx_mbuf[i] = NULL;
176 			rte_pktmbuf_free(mbuf);
177 			bdp = enet_get_nextdesc(bdp, &rxq->bd);
178 		}
179 	}
180 
181 	for (q = 0; q < dev->data->nb_tx_queues; q++) {
182 		txq = fep->tx_queues[q];
183 		bdp = txq->bd.base;
184 		for (i = 0; i < txq->bd.ring_size; i++) {
185 			mbuf = txq->tx_mbuf[i];
186 			txq->tx_mbuf[i] = NULL;
187 			rte_pktmbuf_free(mbuf);
188 		}
189 	}
190 }
191 
192 static int
193 enetfec_eth_configure(struct rte_eth_dev *dev)
194 {
195 	struct enetfec_private *fep = dev->data->dev_private;
196 
197 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
198 		fep->flag_csum |= RX_FLAG_CSUM_EN;
199 
200 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
201 		ENETFEC_PMD_ERR("PMD does not support KEEP_CRC offload");
202 
203 	return 0;
204 }
205 
206 static int
207 enetfec_eth_start(struct rte_eth_dev *dev)
208 {
209 	enetfec_restart(dev);
210 	dev->rx_pkt_burst = &enetfec_recv_pkts;
211 	dev->tx_pkt_burst = &enetfec_xmit_pkts;
212 
213 	return 0;
214 }
215 
216 /* ENETFEC disable function.
217  * @param[in] base      ENETFEC base address
218  */
219 static void
220 enetfec_disable(struct enetfec_private *fep)
221 {
222 	rte_write32(rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR)
223 		    & ~(fep->enetfec_e_cntl),
224 		    (uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
225 }
226 
227 static int
228 enetfec_eth_stop(struct rte_eth_dev *dev)
229 {
230 	struct enetfec_private *fep = dev->data->dev_private;
231 
232 	dev->data->dev_started = 0;
233 	enetfec_disable(fep);
234 
235 	return 0;
236 }
237 
238 static int
239 enetfec_eth_close(struct rte_eth_dev *dev)
240 {
241 	enet_free_buffers(dev);
242 	return 0;
243 }
244 
245 static int
246 enetfec_eth_link_update(struct rte_eth_dev *dev,
247 			int wait_to_complete __rte_unused)
248 {
249 	struct rte_eth_link link;
250 	unsigned int lstatus = 1;
251 
252 	memset(&link, 0, sizeof(struct rte_eth_link));
253 
254 	link.link_status = lstatus;
255 	link.link_speed = RTE_ETH_SPEED_NUM_1G;
256 
257 	ENETFEC_PMD_INFO("Port (%d) link is %s", dev->data->port_id,
258 			 "Up");
259 
260 	return rte_eth_linkstatus_set(dev, &link);
261 }
262 
263 static int
264 enetfec_promiscuous_enable(struct rte_eth_dev *dev)
265 {
266 	struct enetfec_private *fep = dev->data->dev_private;
267 	uint32_t tmp;
268 
269 	tmp = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
270 	tmp |= 0x8;
271 	tmp &= ~0x2;
272 	rte_write32(rte_cpu_to_le_32(tmp),
273 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
274 
275 	return 0;
276 }
277 
278 static int
279 enetfec_multicast_enable(struct rte_eth_dev *dev)
280 {
281 	struct enetfec_private *fep = dev->data->dev_private;
282 
283 	rte_write32(rte_cpu_to_le_32(0xffffffff),
284 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
285 	rte_write32(rte_cpu_to_le_32(0xffffffff),
286 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
287 	dev->data->all_multicast = 1;
288 
289 	rte_write32(rte_cpu_to_le_32(0x04400002),
290 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
291 	rte_write32(rte_cpu_to_le_32(0x10800049),
292 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
293 
294 	return 0;
295 }
296 
297 /* Set a MAC change in hardware. */
298 static int
299 enetfec_set_mac_address(struct rte_eth_dev *dev,
300 		    struct rte_ether_addr *addr)
301 {
302 	struct enetfec_private *fep = dev->data->dev_private;
303 
304 	writel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) |
305 		(addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24),
306 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_PALR);
307 	writel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24),
308 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_PAUR);
309 
310 	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
311 
312 	return 0;
313 }
314 
315 static int
316 enetfec_stats_get(struct rte_eth_dev *dev,
317 	      struct rte_eth_stats *stats)
318 {
319 	struct enetfec_private *fep = dev->data->dev_private;
320 	struct rte_eth_stats *eth_stats = &fep->stats;
321 
322 	stats->ipackets = eth_stats->ipackets;
323 	stats->ibytes = eth_stats->ibytes;
324 	stats->ierrors = eth_stats->ierrors;
325 	stats->opackets = eth_stats->opackets;
326 	stats->obytes = eth_stats->obytes;
327 	stats->oerrors = eth_stats->oerrors;
328 	stats->rx_nombuf = eth_stats->rx_nombuf;
329 
330 	return 0;
331 }
332 
333 static int
334 enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
335 	struct rte_eth_dev_info *dev_info)
336 {
337 	dev_info->max_rx_pktlen = ENETFEC_MAX_RX_PKT_LEN;
338 	dev_info->max_rx_queues = ENETFEC_MAX_Q;
339 	dev_info->max_tx_queues = ENETFEC_MAX_Q;
340 	dev_info->rx_offload_capa = dev_rx_offloads_sup;
341 	return 0;
342 }
343 
344 static void
345 enet_free_queue(struct rte_eth_dev *dev)
346 {
347 	struct enetfec_private *fep = dev->data->dev_private;
348 	unsigned int i;
349 
350 	for (i = 0; i < dev->data->nb_rx_queues; i++)
351 		rte_free(fep->rx_queues[i]);
352 	for (i = 0; i < dev->data->nb_tx_queues; i++)
353 		rte_free(fep->rx_queues[i]);
354 }
355 
356 static const unsigned short offset_des_active_rxq[] = {
357 	ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
358 };
359 
360 static const unsigned short offset_des_active_txq[] = {
361 	ENETFEC_TDAR_0, ENETFEC_TDAR_1, ENETFEC_TDAR_2
362 };
363 
364 static int
365 enetfec_tx_queue_setup(struct rte_eth_dev *dev,
366 			uint16_t queue_idx,
367 			uint16_t nb_desc,
368 			unsigned int socket_id __rte_unused,
369 			const struct rte_eth_txconf *tx_conf)
370 {
371 	struct enetfec_private *fep = dev->data->dev_private;
372 	unsigned int i;
373 	struct bufdesc *bdp, *bd_base;
374 	struct enetfec_priv_tx_q *txq;
375 	unsigned int size;
376 	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
377 		sizeof(struct bufdesc);
378 	unsigned int dsize_log2 = rte_fls_u64(dsize) - 1;
379 
380 	/* Tx deferred start is not supported */
381 	if (tx_conf->tx_deferred_start) {
382 		ENETFEC_PMD_ERR("Tx deferred start not supported");
383 		return -EINVAL;
384 	}
385 
386 	/* allocate transmit queue */
387 	txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);
388 	if (txq == NULL) {
389 		ENETFEC_PMD_ERR("transmit queue allocation failed");
390 		return -ENOMEM;
391 	}
392 
393 	if (nb_desc > MAX_TX_BD_RING_SIZE) {
394 		nb_desc = MAX_TX_BD_RING_SIZE;
395 		ENETFEC_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE");
396 	}
397 	txq->bd.ring_size = nb_desc;
398 	fep->total_tx_ring_size += txq->bd.ring_size;
399 	fep->tx_queues[queue_idx] = txq;
400 
401 	rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_t[queue_idx]),
402 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TD_START(queue_idx));
403 
404 	/* Set transmit descriptor base. */
405 	txq = fep->tx_queues[queue_idx];
406 	txq->fep = fep;
407 	size = dsize * txq->bd.ring_size;
408 	bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];
409 	txq->bd.queue_id = queue_idx;
410 	txq->bd.base = bd_base;
411 	txq->bd.cur = bd_base;
412 	txq->bd.d_size = dsize;
413 	txq->bd.d_size_log2 = dsize_log2;
414 	txq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
415 			offset_des_active_txq[queue_idx];
416 	bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
417 	txq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
418 	bdp = txq->bd.base;
419 	bdp = txq->bd.cur;
420 
421 	for (i = 0; i < txq->bd.ring_size; i++) {
422 		/* Initialize the BD for every fragment in the page. */
423 		rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
424 		if (txq->tx_mbuf[i] != NULL) {
425 			rte_pktmbuf_free(txq->tx_mbuf[i]);
426 			txq->tx_mbuf[i] = NULL;
427 		}
428 		rte_write32(0, &bdp->bd_bufaddr);
429 		bdp = enet_get_nextdesc(bdp, &txq->bd);
430 	}
431 
432 	/* Set the last buffer to wrap */
433 	bdp = enet_get_prevdesc(bdp, &txq->bd);
434 	rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |
435 		rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
436 	txq->dirty_tx = bdp;
437 	dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];
438 	return 0;
439 }
440 
441 static int
442 enetfec_rx_queue_setup(struct rte_eth_dev *dev,
443 			uint16_t queue_idx,
444 			uint16_t nb_rx_desc,
445 			unsigned int socket_id __rte_unused,
446 			const struct rte_eth_rxconf *rx_conf,
447 			struct rte_mempool *mb_pool)
448 {
449 	struct enetfec_private *fep = dev->data->dev_private;
450 	unsigned int i;
451 	struct bufdesc *bd_base;
452 	struct bufdesc *bdp;
453 	struct enetfec_priv_rx_q *rxq;
454 	unsigned int size;
455 	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
456 			sizeof(struct bufdesc);
457 	unsigned int dsize_log2 = rte_fls_u64(dsize) - 1;
458 
459 	/* Rx deferred start is not supported */
460 	if (rx_conf->rx_deferred_start) {
461 		ENETFEC_PMD_ERR("Rx deferred start not supported");
462 		return -EINVAL;
463 	}
464 
465 	if (queue_idx >= ENETFEC_MAX_Q) {
466 		ENETFEC_PMD_ERR("Invalid queue id %" PRIu16 ", max %d",
467 			queue_idx, ENETFEC_MAX_Q);
468 		return -EINVAL;
469 	}
470 
471 	/* allocate receive queue */
472 	rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
473 	if (rxq == NULL) {
474 		ENETFEC_PMD_ERR("receive queue allocation failed");
475 		return -ENOMEM;
476 	}
477 
478 	if (nb_rx_desc > MAX_RX_BD_RING_SIZE) {
479 		nb_rx_desc = MAX_RX_BD_RING_SIZE;
480 		ENETFEC_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE");
481 	}
482 
483 	rxq->bd.ring_size = nb_rx_desc;
484 	fep->total_rx_ring_size += rxq->bd.ring_size;
485 	fep->rx_queues[queue_idx] = rxq;
486 
487 	rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_r[queue_idx]),
488 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RD_START(queue_idx));
489 	rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
490 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_MRB_SIZE(queue_idx));
491 
492 	/* Set receive descriptor base. */
493 	rxq = fep->rx_queues[queue_idx];
494 	rxq->pool = mb_pool;
495 	size = dsize * rxq->bd.ring_size;
496 	bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];
497 	rxq->bd.queue_id = queue_idx;
498 	rxq->bd.base = bd_base;
499 	rxq->bd.cur = bd_base;
500 	rxq->bd.d_size = dsize;
501 	rxq->bd.d_size_log2 = dsize_log2;
502 	rxq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
503 			offset_des_active_rxq[queue_idx];
504 	bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
505 	rxq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
506 
507 	rxq->fep = fep;
508 	bdp = rxq->bd.base;
509 	rxq->bd.cur = bdp;
510 
511 	for (i = 0; i < nb_rx_desc; i++) {
512 		/* Initialize Rx buffers from pktmbuf pool */
513 		struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);
514 		if (mbuf == NULL) {
515 			ENETFEC_PMD_ERR("mbuf failed");
516 			goto err_alloc;
517 		}
518 
519 		/* Get the virtual address & physical address */
520 		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
521 			&bdp->bd_bufaddr);
522 
523 		rxq->rx_mbuf[i] = mbuf;
524 		rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);
525 
526 		bdp = enet_get_nextdesc(bdp, &rxq->bd);
527 	}
528 
529 	/* Initialize the receive buffer descriptors. */
530 	bdp = rxq->bd.cur;
531 	for (i = 0; i < rxq->bd.ring_size; i++) {
532 		/* Initialize the BD for every fragment in the page. */
533 		if (rte_read32(&bdp->bd_bufaddr) > 0)
534 			rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),
535 				&bdp->bd_sc);
536 		else
537 			rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
538 
539 		bdp = enet_get_nextdesc(bdp, &rxq->bd);
540 	}
541 
542 	/* Set the last buffer to wrap */
543 	bdp = enet_get_prevdesc(bdp, &rxq->bd);
544 	rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |
545 		rte_read16(&bdp->bd_sc)),  &bdp->bd_sc);
546 	dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];
547 	rte_write32(0, fep->rx_queues[queue_idx]->bd.active_reg_desc);
548 	return 0;
549 
550 err_alloc:
551 	for (i = 0; i < nb_rx_desc; i++) {
552 		if (rxq->rx_mbuf[i] != NULL) {
553 			rte_pktmbuf_free(rxq->rx_mbuf[i]);
554 			rxq->rx_mbuf[i] = NULL;
555 		}
556 	}
557 	rte_free(rxq);
558 	return errno;
559 }
560 
561 static const struct eth_dev_ops enetfec_ops = {
562 	.dev_configure          = enetfec_eth_configure,
563 	.dev_start              = enetfec_eth_start,
564 	.dev_stop               = enetfec_eth_stop,
565 	.dev_close              = enetfec_eth_close,
566 	.link_update            = enetfec_eth_link_update,
567 	.promiscuous_enable     = enetfec_promiscuous_enable,
568 	.allmulticast_enable    = enetfec_multicast_enable,
569 	.mac_addr_set           = enetfec_set_mac_address,
570 	.stats_get              = enetfec_stats_get,
571 	.dev_infos_get          = enetfec_eth_info,
572 	.rx_queue_setup         = enetfec_rx_queue_setup,
573 	.tx_queue_setup         = enetfec_tx_queue_setup
574 };
575 
576 static int
577 enetfec_eth_init(struct rte_eth_dev *dev)
578 {
579 	struct enetfec_private *fep = dev->data->dev_private;
580 
581 	fep->full_duplex = FULL_DUPLEX;
582 	dev->dev_ops = &enetfec_ops;
583 	rte_eth_dev_probing_finish(dev);
584 
585 	return 0;
586 }
587 
588 static int
589 pmd_enetfec_probe(struct rte_vdev_device *vdev)
590 {
591 	struct rte_eth_dev *dev = NULL;
592 	struct enetfec_private *fep;
593 	const char *name;
594 	int rc;
595 	int i;
596 	unsigned int bdsize;
597 	struct rte_ether_addr macaddr = {
598 		.addr_bytes = { 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 }
599 	};
600 
601 	name = rte_vdev_device_name(vdev);
602 	ENETFEC_PMD_LOG(INFO, "Initializing pmd_fec for %s", name);
603 
604 	dev = rte_eth_vdev_allocate(vdev, sizeof(*fep));
605 	if (dev == NULL)
606 		return -ENOMEM;
607 
608 	/* setup board info structure */
609 	fep = dev->data->dev_private;
610 	fep->dev = dev;
611 
612 	fep->max_rx_queues = ENETFEC_MAX_Q;
613 	fep->max_tx_queues = ENETFEC_MAX_Q;
614 	fep->quirks = QUIRK_HAS_ENETFEC_MAC | QUIRK_GBIT
615 		| QUIRK_RACC;
616 
617 	rc = enetfec_configure();
618 	if (rc != 0)
619 		return -ENOMEM;
620 	rc = config_enetfec_uio(fep);
621 	if (rc != 0)
622 		return -ENOMEM;
623 
624 	/* Get the BD size for distributing among six queues */
625 	bdsize = (fep->bd_size) / NUM_OF_BD_QUEUES;
626 
627 	for (i = 0; i < fep->max_tx_queues; i++) {
628 		fep->dma_baseaddr_t[i] = fep->bd_addr_v;
629 		fep->bd_addr_p_t[i] = fep->bd_addr_p;
630 		fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
631 		fep->bd_addr_p = fep->bd_addr_p + bdsize;
632 	}
633 	for (i = 0; i < fep->max_rx_queues; i++) {
634 		fep->dma_baseaddr_r[i] = fep->bd_addr_v;
635 		fep->bd_addr_p_r[i] = fep->bd_addr_p;
636 		fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
637 		fep->bd_addr_p = fep->bd_addr_p + bdsize;
638 	}
639 
640 	/* Copy the station address into the dev structure, */
641 	dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
642 	if (dev->data->mac_addrs == NULL) {
643 		ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
644 			RTE_ETHER_ADDR_LEN);
645 		rc = -ENOMEM;
646 		goto err;
647 	}
648 
649 	/*
650 	 * Set default mac address
651 	 */
652 	enetfec_set_mac_address(dev, &macaddr);
653 
654 	fep->bufdesc_ex = ENETFEC_EXTENDED_BD;
655 	rc = enetfec_eth_init(dev);
656 	if (rc)
657 		goto failed_init;
658 
659 	return 0;
660 
661 failed_init:
662 	ENETFEC_PMD_ERR("Failed to init");
663 err:
664 	rte_eth_dev_release_port(dev);
665 	return rc;
666 }
667 
668 static int
669 pmd_enetfec_remove(struct rte_vdev_device *vdev)
670 {
671 	struct rte_eth_dev *eth_dev = NULL;
672 	struct enetfec_private *fep;
673 	struct enetfec_priv_rx_q *rxq;
674 	int ret;
675 
676 	/* find the ethdev entry */
677 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
678 	if (eth_dev == NULL)
679 		return -ENODEV;
680 
681 	fep = eth_dev->data->dev_private;
682 	/* Free descriptor base of first RX queue as it was configured
683 	 * first in enetfec_eth_init().
684 	 */
685 	rxq = fep->rx_queues[0];
686 	rte_free(rxq->bd.base);
687 	enet_free_queue(eth_dev);
688 	enetfec_eth_stop(eth_dev);
689 
690 	ret = rte_eth_dev_release_port(eth_dev);
691 	if (ret != 0)
692 		return -EINVAL;
693 
694 	ENETFEC_PMD_INFO("Release enetfec sw device");
695 	enetfec_cleanup(fep);
696 
697 	return 0;
698 }
699 
700 static struct rte_vdev_driver pmd_enetfec_drv = {
701 	.probe = pmd_enetfec_probe,
702 	.remove = pmd_enetfec_remove,
703 };
704 
705 RTE_PMD_REGISTER_VDEV(ENETFEC_NAME_PMD, pmd_enetfec_drv);
706 RTE_LOG_REGISTER_DEFAULT(enetfec_logtype_pmd, NOTICE);
707