xref: /dpdk/drivers/net/enetfec/enet_ethdev.c (revision 42a8fc7daa46256d150278fc9a7a846e27945a0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020-2021 NXP
3  */
4 
5 #include <inttypes.h>
6 
7 #include <ethdev_vdev.h>
8 #include <ethdev_driver.h>
9 #include <rte_io.h>
10 
11 #include "enet_pmd_logs.h"
12 #include "enet_ethdev.h"
13 #include "enet_regs.h"
14 #include "enet_uio.h"
15 
16 #define ENETFEC_NAME_PMD                net_enetfec
17 
18 /* FEC receive acceleration */
19 #define ENETFEC_RACC_IPDIS		RTE_BIT32(1)
20 #define ENETFEC_RACC_PRODIS		RTE_BIT32(2)
21 #define ENETFEC_RACC_SHIFT16		RTE_BIT32(7)
22 #define ENETFEC_RACC_OPTIONS		(ENETFEC_RACC_IPDIS | \
23 						ENETFEC_RACC_PRODIS)
24 
25 #define ENETFEC_PAUSE_FLAG_AUTONEG	0x1
26 #define ENETFEC_PAUSE_FLAG_ENABLE	0x2
27 
28 /* Pause frame field and FIFO threshold */
29 #define ENETFEC_FCE			RTE_BIT32(5)
30 #define ENETFEC_RSEM_V			0x84
31 #define ENETFEC_RSFL_V			16
32 #define ENETFEC_RAEM_V			0x8
33 #define ENETFEC_RAFL_V			0x8
34 #define ENETFEC_OPD_V			0xFFF0
35 
36 /* Extended buffer descriptor */
37 #define ENETFEC_EXTENDED_BD		0
38 #define NUM_OF_BD_QUEUES		6
39 
40 /* Supported Rx offloads */
41 static uint64_t dev_rx_offloads_sup =
42 		RTE_ETH_RX_OFFLOAD_CHECKSUM |
43 		RTE_ETH_RX_OFFLOAD_VLAN;
44 
45 /*
46  * This function is called to start or restart the ENETFEC during a link
47  * change, transmit timeout, or to reconfigure the ENETFEC. The network
48  * packet processing for this device must be stopped before this call.
49  */
50 static void
51 enetfec_restart(struct rte_eth_dev *dev)
52 {
53 	struct enetfec_private *fep = dev->data->dev_private;
54 	uint32_t rcntl = OPT_FRAME_SIZE | 0x04;
55 	uint32_t ecntl = ENETFEC_ETHEREN;
56 	uint32_t val;
57 
58 	/* Clear any outstanding interrupt. */
59 	writel(0xffffffff, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_EIR);
60 
61 	/* Enable MII mode */
62 	if (fep->full_duplex == FULL_DUPLEX) {
63 		/* FD enable */
64 		rte_write32(rte_cpu_to_le_32(0x04),
65 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
66 	} else {
67 	/* No Rcv on Xmit */
68 		rcntl |= 0x02;
69 		rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
70 	}
71 
72 	if (fep->quirks & QUIRK_RACC) {
73 		val = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
74 		/* align IP header */
75 		val |= ENETFEC_RACC_SHIFT16;
76 		if (fep->flag_csum & RX_FLAG_CSUM_EN)
77 			/* set RX checksum */
78 			val |= ENETFEC_RACC_OPTIONS;
79 		else
80 			val &= ~ENETFEC_RACC_OPTIONS;
81 		rte_write32(rte_cpu_to_le_32(val),
82 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
83 		rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
84 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_FRAME_TRL);
85 	}
86 
87 	/*
88 	 * The phy interface and speed need to get configured
89 	 * differently on enet-mac.
90 	 */
91 	if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
92 		/* Enable flow control and length check */
93 		rcntl |= 0x40000000 | 0x00000020;
94 
95 		/* RGMII, RMII or MII */
96 		rcntl |= RTE_BIT32(6);
97 		ecntl |= RTE_BIT32(5);
98 	}
99 
100 	/* enable pause frame*/
101 	if ((fep->flag_pause & ENETFEC_PAUSE_FLAG_ENABLE) ||
102 		((fep->flag_pause & ENETFEC_PAUSE_FLAG_AUTONEG)
103 		/*&& ndev->phydev && ndev->phydev->pause*/)) {
104 		rcntl |= ENETFEC_FCE;
105 
106 		/* set FIFO threshold parameter to reduce overrun */
107 		rte_write32(rte_cpu_to_le_32(ENETFEC_RSEM_V),
108 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SEM);
109 		rte_write32(rte_cpu_to_le_32(ENETFEC_RSFL_V),
110 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SFL);
111 		rte_write32(rte_cpu_to_le_32(ENETFEC_RAEM_V),
112 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AEM);
113 		rte_write32(rte_cpu_to_le_32(ENETFEC_RAFL_V),
114 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AFL);
115 
116 		/* OPD */
117 		rte_write32(rte_cpu_to_le_32(ENETFEC_OPD_V),
118 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_OPD);
119 	} else {
120 		rcntl &= ~ENETFEC_FCE;
121 	}
122 
123 	rte_write32(rte_cpu_to_le_32(rcntl),
124 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
125 
126 	rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IAUR);
127 	rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IALR);
128 
129 	if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
130 		/* enable ENETFEC endian swap */
131 		ecntl |= (1 << 8);
132 		/* enable ENETFEC store and forward mode */
133 		rte_write32(rte_cpu_to_le_32(1 << 8),
134 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TFWR);
135 	}
136 	if (fep->bufdesc_ex)
137 		ecntl |= (1 << 4);
138 	if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
139 		fep->rgmii_txc_delay)
140 		ecntl |= ENETFEC_TXC_DLY;
141 	if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
142 		fep->rgmii_rxc_delay)
143 		ecntl |= ENETFEC_RXC_DLY;
144 	/* Enable the MIB statistic event counters */
145 	rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_MIBC);
146 
147 	ecntl |= 0x70000000;
148 	fep->enetfec_e_cntl = ecntl;
149 	/* And last, enable the transmit and receive processing */
150 	rte_write32(rte_cpu_to_le_32(ecntl),
151 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
152 	rte_delay_us(10);
153 }
154 
155 static void
156 enet_free_buffers(struct rte_eth_dev *dev)
157 {
158 	struct enetfec_private *fep = dev->data->dev_private;
159 	unsigned int i, q;
160 	struct rte_mbuf *mbuf;
161 	struct bufdesc  *bdp;
162 	struct enetfec_priv_rx_q *rxq;
163 	struct enetfec_priv_tx_q *txq;
164 
165 	for (q = 0; q < dev->data->nb_rx_queues; q++) {
166 		rxq = fep->rx_queues[q];
167 		bdp = rxq->bd.base;
168 		for (i = 0; i < rxq->bd.ring_size; i++) {
169 			mbuf = rxq->rx_mbuf[i];
170 			rxq->rx_mbuf[i] = NULL;
171 			rte_pktmbuf_free(mbuf);
172 			bdp = enet_get_nextdesc(bdp, &rxq->bd);
173 		}
174 	}
175 
176 	for (q = 0; q < dev->data->nb_tx_queues; q++) {
177 		txq = fep->tx_queues[q];
178 		bdp = txq->bd.base;
179 		for (i = 0; i < txq->bd.ring_size; i++) {
180 			mbuf = txq->tx_mbuf[i];
181 			txq->tx_mbuf[i] = NULL;
182 			rte_pktmbuf_free(mbuf);
183 		}
184 	}
185 }
186 
187 static int
188 enetfec_eth_configure(struct rte_eth_dev *dev)
189 {
190 	struct enetfec_private *fep = dev->data->dev_private;
191 
192 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
193 		fep->flag_csum |= RX_FLAG_CSUM_EN;
194 
195 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
196 		ENETFEC_PMD_ERR("PMD does not support KEEP_CRC offload");
197 
198 	return 0;
199 }
200 
201 static int
202 enetfec_eth_start(struct rte_eth_dev *dev)
203 {
204 	enetfec_restart(dev);
205 	dev->rx_pkt_burst = &enetfec_recv_pkts;
206 	dev->tx_pkt_burst = &enetfec_xmit_pkts;
207 
208 	return 0;
209 }
210 
211 /* ENETFEC disable function.
212  * @param[in] base      ENETFEC base address
213  */
214 static void
215 enetfec_disable(struct enetfec_private *fep)
216 {
217 	rte_write32(rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR)
218 		    & ~(fep->enetfec_e_cntl),
219 		    (uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
220 }
221 
222 static int
223 enetfec_eth_stop(struct rte_eth_dev *dev)
224 {
225 	struct enetfec_private *fep = dev->data->dev_private;
226 
227 	dev->data->dev_started = 0;
228 	enetfec_disable(fep);
229 
230 	return 0;
231 }
232 
233 static int
234 enetfec_eth_close(struct rte_eth_dev *dev)
235 {
236 	enet_free_buffers(dev);
237 	return 0;
238 }
239 
240 static int
241 enetfec_eth_link_update(struct rte_eth_dev *dev,
242 			int wait_to_complete __rte_unused)
243 {
244 	struct rte_eth_link link;
245 	unsigned int lstatus = 1;
246 
247 	memset(&link, 0, sizeof(struct rte_eth_link));
248 
249 	link.link_status = lstatus;
250 	link.link_speed = RTE_ETH_SPEED_NUM_1G;
251 
252 	ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
253 			 "Up");
254 
255 	return rte_eth_linkstatus_set(dev, &link);
256 }
257 
258 static int
259 enetfec_promiscuous_enable(struct rte_eth_dev *dev)
260 {
261 	struct enetfec_private *fep = dev->data->dev_private;
262 	uint32_t tmp;
263 
264 	tmp = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
265 	tmp |= 0x8;
266 	tmp &= ~0x2;
267 	rte_write32(rte_cpu_to_le_32(tmp),
268 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
269 
270 	return 0;
271 }
272 
273 static int
274 enetfec_multicast_enable(struct rte_eth_dev *dev)
275 {
276 	struct enetfec_private *fep = dev->data->dev_private;
277 
278 	rte_write32(rte_cpu_to_le_32(0xffffffff),
279 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
280 	rte_write32(rte_cpu_to_le_32(0xffffffff),
281 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
282 	dev->data->all_multicast = 1;
283 
284 	rte_write32(rte_cpu_to_le_32(0x04400002),
285 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
286 	rte_write32(rte_cpu_to_le_32(0x10800049),
287 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
288 
289 	return 0;
290 }
291 
292 /* Set a MAC change in hardware. */
293 static int
294 enetfec_set_mac_address(struct rte_eth_dev *dev,
295 		    struct rte_ether_addr *addr)
296 {
297 	struct enetfec_private *fep = dev->data->dev_private;
298 
299 	writel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) |
300 		(addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24),
301 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_PALR);
302 	writel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24),
303 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_PAUR);
304 
305 	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
306 
307 	return 0;
308 }
309 
310 static int
311 enetfec_stats_get(struct rte_eth_dev *dev,
312 	      struct rte_eth_stats *stats)
313 {
314 	struct enetfec_private *fep = dev->data->dev_private;
315 	struct rte_eth_stats *eth_stats = &fep->stats;
316 
317 	stats->ipackets = eth_stats->ipackets;
318 	stats->ibytes = eth_stats->ibytes;
319 	stats->ierrors = eth_stats->ierrors;
320 	stats->opackets = eth_stats->opackets;
321 	stats->obytes = eth_stats->obytes;
322 	stats->oerrors = eth_stats->oerrors;
323 	stats->rx_nombuf = eth_stats->rx_nombuf;
324 
325 	return 0;
326 }
327 
328 static int
329 enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
330 	struct rte_eth_dev_info *dev_info)
331 {
332 	dev_info->max_rx_pktlen = ENETFEC_MAX_RX_PKT_LEN;
333 	dev_info->max_rx_queues = ENETFEC_MAX_Q;
334 	dev_info->max_tx_queues = ENETFEC_MAX_Q;
335 	dev_info->rx_offload_capa = dev_rx_offloads_sup;
336 	return 0;
337 }
338 
339 static void
340 enet_free_queue(struct rte_eth_dev *dev)
341 {
342 	struct enetfec_private *fep = dev->data->dev_private;
343 	unsigned int i;
344 
345 	for (i = 0; i < dev->data->nb_rx_queues; i++)
346 		rte_free(fep->rx_queues[i]);
347 	for (i = 0; i < dev->data->nb_tx_queues; i++)
348 		rte_free(fep->rx_queues[i]);
349 }
350 
351 static const unsigned short offset_des_active_rxq[] = {
352 	ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
353 };
354 
355 static const unsigned short offset_des_active_txq[] = {
356 	ENETFEC_TDAR_0, ENETFEC_TDAR_1, ENETFEC_TDAR_2
357 };
358 
359 static int
360 enetfec_tx_queue_setup(struct rte_eth_dev *dev,
361 			uint16_t queue_idx,
362 			uint16_t nb_desc,
363 			unsigned int socket_id __rte_unused,
364 			const struct rte_eth_txconf *tx_conf)
365 {
366 	struct enetfec_private *fep = dev->data->dev_private;
367 	unsigned int i;
368 	struct bufdesc *bdp, *bd_base;
369 	struct enetfec_priv_tx_q *txq;
370 	unsigned int size;
371 	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
372 		sizeof(struct bufdesc);
373 	unsigned int dsize_log2 = fls64(dsize);
374 
375 	/* Tx deferred start is not supported */
376 	if (tx_conf->tx_deferred_start) {
377 		ENETFEC_PMD_ERR("Tx deferred start not supported");
378 		return -EINVAL;
379 	}
380 
381 	/* allocate transmit queue */
382 	txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);
383 	if (txq == NULL) {
384 		ENETFEC_PMD_ERR("transmit queue allocation failed");
385 		return -ENOMEM;
386 	}
387 
388 	if (nb_desc > MAX_TX_BD_RING_SIZE) {
389 		nb_desc = MAX_TX_BD_RING_SIZE;
390 		ENETFEC_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE");
391 	}
392 	txq->bd.ring_size = nb_desc;
393 	fep->total_tx_ring_size += txq->bd.ring_size;
394 	fep->tx_queues[queue_idx] = txq;
395 
396 	rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_t[queue_idx]),
397 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TD_START(queue_idx));
398 
399 	/* Set transmit descriptor base. */
400 	txq = fep->tx_queues[queue_idx];
401 	txq->fep = fep;
402 	size = dsize * txq->bd.ring_size;
403 	bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];
404 	txq->bd.queue_id = queue_idx;
405 	txq->bd.base = bd_base;
406 	txq->bd.cur = bd_base;
407 	txq->bd.d_size = dsize;
408 	txq->bd.d_size_log2 = dsize_log2;
409 	txq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
410 			offset_des_active_txq[queue_idx];
411 	bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
412 	txq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
413 	bdp = txq->bd.base;
414 	bdp = txq->bd.cur;
415 
416 	for (i = 0; i < txq->bd.ring_size; i++) {
417 		/* Initialize the BD for every fragment in the page. */
418 		rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
419 		if (txq->tx_mbuf[i] != NULL) {
420 			rte_pktmbuf_free(txq->tx_mbuf[i]);
421 			txq->tx_mbuf[i] = NULL;
422 		}
423 		rte_write32(0, &bdp->bd_bufaddr);
424 		bdp = enet_get_nextdesc(bdp, &txq->bd);
425 	}
426 
427 	/* Set the last buffer to wrap */
428 	bdp = enet_get_prevdesc(bdp, &txq->bd);
429 	rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |
430 		rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
431 	txq->dirty_tx = bdp;
432 	dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];
433 	return 0;
434 }
435 
436 static int
437 enetfec_rx_queue_setup(struct rte_eth_dev *dev,
438 			uint16_t queue_idx,
439 			uint16_t nb_rx_desc,
440 			unsigned int socket_id __rte_unused,
441 			const struct rte_eth_rxconf *rx_conf,
442 			struct rte_mempool *mb_pool)
443 {
444 	struct enetfec_private *fep = dev->data->dev_private;
445 	unsigned int i;
446 	struct bufdesc *bd_base;
447 	struct bufdesc *bdp;
448 	struct enetfec_priv_rx_q *rxq;
449 	unsigned int size;
450 	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
451 			sizeof(struct bufdesc);
452 	unsigned int dsize_log2 = fls64(dsize);
453 
454 	/* Rx deferred start is not supported */
455 	if (rx_conf->rx_deferred_start) {
456 		ENETFEC_PMD_ERR("Rx deferred start not supported");
457 		return -EINVAL;
458 	}
459 
460 	if (queue_idx >= ENETFEC_MAX_Q) {
461 		ENETFEC_PMD_ERR("Invalid queue id %" PRIu16 ", max %d\n",
462 			queue_idx, ENETFEC_MAX_Q);
463 		return -EINVAL;
464 	}
465 
466 	/* allocate receive queue */
467 	rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
468 	if (rxq == NULL) {
469 		ENETFEC_PMD_ERR("receive queue allocation failed");
470 		return -ENOMEM;
471 	}
472 
473 	if (nb_rx_desc > MAX_RX_BD_RING_SIZE) {
474 		nb_rx_desc = MAX_RX_BD_RING_SIZE;
475 		ENETFEC_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE");
476 	}
477 
478 	rxq->bd.ring_size = nb_rx_desc;
479 	fep->total_rx_ring_size += rxq->bd.ring_size;
480 	fep->rx_queues[queue_idx] = rxq;
481 
482 	rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_r[queue_idx]),
483 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RD_START(queue_idx));
484 	rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
485 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_MRB_SIZE(queue_idx));
486 
487 	/* Set receive descriptor base. */
488 	rxq = fep->rx_queues[queue_idx];
489 	rxq->pool = mb_pool;
490 	size = dsize * rxq->bd.ring_size;
491 	bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];
492 	rxq->bd.queue_id = queue_idx;
493 	rxq->bd.base = bd_base;
494 	rxq->bd.cur = bd_base;
495 	rxq->bd.d_size = dsize;
496 	rxq->bd.d_size_log2 = dsize_log2;
497 	rxq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
498 			offset_des_active_rxq[queue_idx];
499 	bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
500 	rxq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
501 
502 	rxq->fep = fep;
503 	bdp = rxq->bd.base;
504 	rxq->bd.cur = bdp;
505 
506 	for (i = 0; i < nb_rx_desc; i++) {
507 		/* Initialize Rx buffers from pktmbuf pool */
508 		struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);
509 		if (mbuf == NULL) {
510 			ENETFEC_PMD_ERR("mbuf failed");
511 			goto err_alloc;
512 		}
513 
514 		/* Get the virtual address & physical address */
515 		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
516 			&bdp->bd_bufaddr);
517 
518 		rxq->rx_mbuf[i] = mbuf;
519 		rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);
520 
521 		bdp = enet_get_nextdesc(bdp, &rxq->bd);
522 	}
523 
524 	/* Initialize the receive buffer descriptors. */
525 	bdp = rxq->bd.cur;
526 	for (i = 0; i < rxq->bd.ring_size; i++) {
527 		/* Initialize the BD for every fragment in the page. */
528 		if (rte_read32(&bdp->bd_bufaddr) > 0)
529 			rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),
530 				&bdp->bd_sc);
531 		else
532 			rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
533 
534 		bdp = enet_get_nextdesc(bdp, &rxq->bd);
535 	}
536 
537 	/* Set the last buffer to wrap */
538 	bdp = enet_get_prevdesc(bdp, &rxq->bd);
539 	rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |
540 		rte_read16(&bdp->bd_sc)),  &bdp->bd_sc);
541 	dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];
542 	rte_write32(0, fep->rx_queues[queue_idx]->bd.active_reg_desc);
543 	return 0;
544 
545 err_alloc:
546 	for (i = 0; i < nb_rx_desc; i++) {
547 		if (rxq->rx_mbuf[i] != NULL) {
548 			rte_pktmbuf_free(rxq->rx_mbuf[i]);
549 			rxq->rx_mbuf[i] = NULL;
550 		}
551 	}
552 	rte_free(rxq);
553 	return errno;
554 }
555 
556 static const struct eth_dev_ops enetfec_ops = {
557 	.dev_configure          = enetfec_eth_configure,
558 	.dev_start              = enetfec_eth_start,
559 	.dev_stop               = enetfec_eth_stop,
560 	.dev_close              = enetfec_eth_close,
561 	.link_update            = enetfec_eth_link_update,
562 	.promiscuous_enable     = enetfec_promiscuous_enable,
563 	.allmulticast_enable    = enetfec_multicast_enable,
564 	.mac_addr_set           = enetfec_set_mac_address,
565 	.stats_get              = enetfec_stats_get,
566 	.dev_infos_get          = enetfec_eth_info,
567 	.rx_queue_setup         = enetfec_rx_queue_setup,
568 	.tx_queue_setup         = enetfec_tx_queue_setup
569 };
570 
571 static int
572 enetfec_eth_init(struct rte_eth_dev *dev)
573 {
574 	struct enetfec_private *fep = dev->data->dev_private;
575 
576 	fep->full_duplex = FULL_DUPLEX;
577 	dev->dev_ops = &enetfec_ops;
578 	rte_eth_dev_probing_finish(dev);
579 
580 	return 0;
581 }
582 
583 static int
584 pmd_enetfec_probe(struct rte_vdev_device *vdev)
585 {
586 	struct rte_eth_dev *dev = NULL;
587 	struct enetfec_private *fep;
588 	const char *name;
589 	int rc;
590 	int i;
591 	unsigned int bdsize;
592 	struct rte_ether_addr macaddr = {
593 		.addr_bytes = { 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 }
594 	};
595 
596 	name = rte_vdev_device_name(vdev);
597 	ENETFEC_PMD_LOG(INFO, "Initializing pmd_fec for %s", name);
598 
599 	dev = rte_eth_vdev_allocate(vdev, sizeof(*fep));
600 	if (dev == NULL)
601 		return -ENOMEM;
602 
603 	/* setup board info structure */
604 	fep = dev->data->dev_private;
605 	fep->dev = dev;
606 
607 	fep->max_rx_queues = ENETFEC_MAX_Q;
608 	fep->max_tx_queues = ENETFEC_MAX_Q;
609 	fep->quirks = QUIRK_HAS_ENETFEC_MAC | QUIRK_GBIT
610 		| QUIRK_RACC;
611 
612 	rc = enetfec_configure();
613 	if (rc != 0)
614 		return -ENOMEM;
615 	rc = config_enetfec_uio(fep);
616 	if (rc != 0)
617 		return -ENOMEM;
618 
619 	/* Get the BD size for distributing among six queues */
620 	bdsize = (fep->bd_size) / NUM_OF_BD_QUEUES;
621 
622 	for (i = 0; i < fep->max_tx_queues; i++) {
623 		fep->dma_baseaddr_t[i] = fep->bd_addr_v;
624 		fep->bd_addr_p_t[i] = fep->bd_addr_p;
625 		fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
626 		fep->bd_addr_p = fep->bd_addr_p + bdsize;
627 	}
628 	for (i = 0; i < fep->max_rx_queues; i++) {
629 		fep->dma_baseaddr_r[i] = fep->bd_addr_v;
630 		fep->bd_addr_p_r[i] = fep->bd_addr_p;
631 		fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
632 		fep->bd_addr_p = fep->bd_addr_p + bdsize;
633 	}
634 
635 	/* Copy the station address into the dev structure, */
636 	dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
637 	if (dev->data->mac_addrs == NULL) {
638 		ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
639 			RTE_ETHER_ADDR_LEN);
640 		rc = -ENOMEM;
641 		goto err;
642 	}
643 
644 	/*
645 	 * Set default mac address
646 	 */
647 	enetfec_set_mac_address(dev, &macaddr);
648 
649 	fep->bufdesc_ex = ENETFEC_EXTENDED_BD;
650 	rc = enetfec_eth_init(dev);
651 	if (rc)
652 		goto failed_init;
653 
654 	return 0;
655 
656 failed_init:
657 	ENETFEC_PMD_ERR("Failed to init");
658 err:
659 	rte_eth_dev_release_port(dev);
660 	return rc;
661 }
662 
663 static int
664 pmd_enetfec_remove(struct rte_vdev_device *vdev)
665 {
666 	struct rte_eth_dev *eth_dev = NULL;
667 	struct enetfec_private *fep;
668 	struct enetfec_priv_rx_q *rxq;
669 	int ret;
670 
671 	/* find the ethdev entry */
672 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
673 	if (eth_dev == NULL)
674 		return -ENODEV;
675 
676 	fep = eth_dev->data->dev_private;
677 	/* Free descriptor base of first RX queue as it was configured
678 	 * first in enetfec_eth_init().
679 	 */
680 	rxq = fep->rx_queues[0];
681 	rte_free(rxq->bd.base);
682 	enet_free_queue(eth_dev);
683 	enetfec_eth_stop(eth_dev);
684 
685 	ret = rte_eth_dev_release_port(eth_dev);
686 	if (ret != 0)
687 		return -EINVAL;
688 
689 	ENETFEC_PMD_INFO("Release enetfec sw device");
690 	enetfec_cleanup(fep);
691 
692 	return 0;
693 }
694 
695 static struct rte_vdev_driver pmd_enetfec_drv = {
696 	.probe = pmd_enetfec_probe,
697 	.remove = pmd_enetfec_remove,
698 };
699 
700 RTE_PMD_REGISTER_VDEV(ENETFEC_NAME_PMD, pmd_enetfec_drv);
701 RTE_LOG_REGISTER_DEFAULT(enetfec_logtype_pmd, NOTICE);
702