xref: /dpdk/drivers/net/enetfec/enet_ethdev.c (revision d64e9cfe97f42226abc6d145f4f62b74b5fdc04a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020-2021 NXP
3  */
4 
5 #include <inttypes.h>
6 
7 #include <ethdev_vdev.h>
8 #include <ethdev_driver.h>
9 #include <rte_io.h>
10 
11 #include "enet_pmd_logs.h"
12 #include "enet_ethdev.h"
13 #include "enet_regs.h"
14 #include "enet_uio.h"
15 
16 #define ENETFEC_NAME_PMD                net_enetfec
17 
18 /* FEC receive acceleration */
19 #define ENETFEC_RACC_IPDIS		RTE_BIT32(1)
20 #define ENETFEC_RACC_PRODIS		RTE_BIT32(2)
21 #define ENETFEC_RACC_SHIFT16		RTE_BIT32(7)
22 #define ENETFEC_RACC_OPTIONS		(ENETFEC_RACC_IPDIS | \
23 						ENETFEC_RACC_PRODIS)
24 
25 #define ENETFEC_PAUSE_FLAG_AUTONEG	0x1
26 #define ENETFEC_PAUSE_FLAG_ENABLE	0x2
27 
28 /* Pause frame field and FIFO threshold */
29 #define ENETFEC_FCE			RTE_BIT32(5)
30 #define ENETFEC_RSEM_V			0x84
31 #define ENETFEC_RSFL_V			16
32 #define ENETFEC_RAEM_V			0x8
33 #define ENETFEC_RAFL_V			0x8
34 #define ENETFEC_OPD_V			0xFFF0
35 
36 /* Extended buffer descriptor */
37 #define ENETFEC_EXTENDED_BD		0
38 #define NUM_OF_BD_QUEUES		6
39 
40 /* Supported Rx offloads */
41 static uint64_t dev_rx_offloads_sup =
42 		RTE_ETH_RX_OFFLOAD_CHECKSUM |
43 		RTE_ETH_RX_OFFLOAD_VLAN;
44 
45 /*
46  * This function is called to start or restart the ENETFEC during a link
47  * change, transmit timeout, or to reconfigure the ENETFEC. The network
48  * packet processing for this device must be stopped before this call.
49  */
50 static void
51 enetfec_restart(struct rte_eth_dev *dev)
52 {
53 	struct enetfec_private *fep = dev->data->dev_private;
54 	uint32_t rcntl = OPT_FRAME_SIZE | 0x04;
55 	uint32_t ecntl = ENETFEC_ETHEREN;
56 	uint32_t val;
57 	int i;
58 
59 	/* Clear any outstanding interrupt. */
60 	writel(0xffffffff, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_EIR);
61 
62 	/* Enable MII mode */
63 	if (fep->full_duplex == FULL_DUPLEX) {
64 		/* FD enable */
65 		rte_write32(rte_cpu_to_le_32(0x04),
66 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
67 	} else {
68 	/* No Rcv on Xmit */
69 		rcntl |= 0x02;
70 		rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
71 	}
72 
73 	if (fep->quirks & QUIRK_RACC) {
74 		val = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
75 		/* align IP header */
76 		val |= ENETFEC_RACC_SHIFT16;
77 		if (fep->flag_csum & RX_FLAG_CSUM_EN)
78 			/* set RX checksum */
79 			val |= ENETFEC_RACC_OPTIONS;
80 		else
81 			val &= ~ENETFEC_RACC_OPTIONS;
82 		rte_write32(rte_cpu_to_le_32(val),
83 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
84 		rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
85 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_FRAME_TRL);
86 	}
87 
88 	/*
89 	 * The phy interface and speed need to get configured
90 	 * differently on enet-mac.
91 	 */
92 	if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
93 		/* Enable flow control and length check */
94 		rcntl |= 0x40000000 | 0x00000020;
95 
96 		/* RGMII, RMII or MII */
97 		rcntl |= RTE_BIT32(6);
98 		ecntl |= RTE_BIT32(5);
99 	}
100 
101 	/* enable pause frame*/
102 	if ((fep->flag_pause & ENETFEC_PAUSE_FLAG_ENABLE) ||
103 		((fep->flag_pause & ENETFEC_PAUSE_FLAG_AUTONEG)
104 		/*&& ndev->phydev && ndev->phydev->pause*/)) {
105 		rcntl |= ENETFEC_FCE;
106 
107 		/* set FIFO threshold parameter to reduce overrun */
108 		rte_write32(rte_cpu_to_le_32(ENETFEC_RSEM_V),
109 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SEM);
110 		rte_write32(rte_cpu_to_le_32(ENETFEC_RSFL_V),
111 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SFL);
112 		rte_write32(rte_cpu_to_le_32(ENETFEC_RAEM_V),
113 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AEM);
114 		rte_write32(rte_cpu_to_le_32(ENETFEC_RAFL_V),
115 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AFL);
116 
117 		/* OPD */
118 		rte_write32(rte_cpu_to_le_32(ENETFEC_OPD_V),
119 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_OPD);
120 	} else {
121 		rcntl &= ~ENETFEC_FCE;
122 	}
123 
124 	rte_write32(rte_cpu_to_le_32(rcntl),
125 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
126 
127 	rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IAUR);
128 	rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IALR);
129 
130 	if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
131 		/* enable ENETFEC endian swap */
132 		ecntl |= (1 << 8);
133 		/* enable ENETFEC store and forward mode */
134 		rte_write32(rte_cpu_to_le_32(1 << 8),
135 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TFWR);
136 	}
137 	if (fep->bufdesc_ex)
138 		ecntl |= (1 << 4);
139 	if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
140 		fep->rgmii_txc_delay)
141 		ecntl |= ENETFEC_TXC_DLY;
142 	if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
143 		fep->rgmii_rxc_delay)
144 		ecntl |= ENETFEC_RXC_DLY;
145 	/* Enable the MIB statistic event counters */
146 	rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_MIBC);
147 
148 	ecntl |= 0x70000000;
149 	fep->enetfec_e_cntl = ecntl;
150 	/* And last, enable the transmit and receive processing */
151 	rte_write32(rte_cpu_to_le_32(ecntl),
152 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
153 
154 	for (i = 0; i < fep->max_rx_queues; i++)
155 		rte_write32(0, fep->rx_queues[i]->bd.active_reg_desc);
156 	rte_delay_us(10);
157 }
158 
159 static void
160 enet_free_buffers(struct rte_eth_dev *dev)
161 {
162 	struct enetfec_private *fep = dev->data->dev_private;
163 	unsigned int i, q;
164 	struct rte_mbuf *mbuf;
165 	struct bufdesc  *bdp;
166 	struct enetfec_priv_rx_q *rxq;
167 	struct enetfec_priv_tx_q *txq;
168 
169 	for (q = 0; q < dev->data->nb_rx_queues; q++) {
170 		rxq = fep->rx_queues[q];
171 		bdp = rxq->bd.base;
172 		for (i = 0; i < rxq->bd.ring_size; i++) {
173 			mbuf = rxq->rx_mbuf[i];
174 			rxq->rx_mbuf[i] = NULL;
175 			rte_pktmbuf_free(mbuf);
176 			bdp = enet_get_nextdesc(bdp, &rxq->bd);
177 		}
178 	}
179 
180 	for (q = 0; q < dev->data->nb_tx_queues; q++) {
181 		txq = fep->tx_queues[q];
182 		bdp = txq->bd.base;
183 		for (i = 0; i < txq->bd.ring_size; i++) {
184 			mbuf = txq->tx_mbuf[i];
185 			txq->tx_mbuf[i] = NULL;
186 			rte_pktmbuf_free(mbuf);
187 		}
188 	}
189 }
190 
191 static int
192 enetfec_eth_configure(struct rte_eth_dev *dev)
193 {
194 	struct enetfec_private *fep = dev->data->dev_private;
195 
196 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
197 		fep->flag_csum |= RX_FLAG_CSUM_EN;
198 
199 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
200 		ENETFEC_PMD_ERR("PMD does not support KEEP_CRC offload");
201 
202 	return 0;
203 }
204 
205 static int
206 enetfec_eth_start(struct rte_eth_dev *dev)
207 {
208 	enetfec_restart(dev);
209 	dev->rx_pkt_burst = &enetfec_recv_pkts;
210 	dev->tx_pkt_burst = &enetfec_xmit_pkts;
211 
212 	return 0;
213 }
214 
215 /* ENETFEC disable function.
216  * @param[in] base      ENETFEC base address
217  */
218 static void
219 enetfec_disable(struct enetfec_private *fep)
220 {
221 	rte_write32(rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR)
222 		    & ~(fep->enetfec_e_cntl),
223 		    (uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
224 }
225 
226 static int
227 enetfec_eth_stop(struct rte_eth_dev *dev)
228 {
229 	struct enetfec_private *fep = dev->data->dev_private;
230 
231 	dev->data->dev_started = 0;
232 	enetfec_disable(fep);
233 
234 	return 0;
235 }
236 
237 static int
238 enetfec_eth_close(struct rte_eth_dev *dev)
239 {
240 	enet_free_buffers(dev);
241 	return 0;
242 }
243 
244 static int
245 enetfec_eth_link_update(struct rte_eth_dev *dev,
246 			int wait_to_complete __rte_unused)
247 {
248 	struct rte_eth_link link;
249 	unsigned int lstatus = 1;
250 
251 	memset(&link, 0, sizeof(struct rte_eth_link));
252 
253 	link.link_status = lstatus;
254 	link.link_speed = RTE_ETH_SPEED_NUM_1G;
255 
256 	ENETFEC_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
257 			 "Up");
258 
259 	return rte_eth_linkstatus_set(dev, &link);
260 }
261 
262 static int
263 enetfec_promiscuous_enable(struct rte_eth_dev *dev)
264 {
265 	struct enetfec_private *fep = dev->data->dev_private;
266 	uint32_t tmp;
267 
268 	tmp = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
269 	tmp |= 0x8;
270 	tmp &= ~0x2;
271 	rte_write32(rte_cpu_to_le_32(tmp),
272 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
273 
274 	return 0;
275 }
276 
277 static int
278 enetfec_multicast_enable(struct rte_eth_dev *dev)
279 {
280 	struct enetfec_private *fep = dev->data->dev_private;
281 
282 	rte_write32(rte_cpu_to_le_32(0xffffffff),
283 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
284 	rte_write32(rte_cpu_to_le_32(0xffffffff),
285 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
286 	dev->data->all_multicast = 1;
287 
288 	rte_write32(rte_cpu_to_le_32(0x04400002),
289 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GAUR);
290 	rte_write32(rte_cpu_to_le_32(0x10800049),
291 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_GALR);
292 
293 	return 0;
294 }
295 
296 /* Set a MAC change in hardware. */
297 static int
298 enetfec_set_mac_address(struct rte_eth_dev *dev,
299 		    struct rte_ether_addr *addr)
300 {
301 	struct enetfec_private *fep = dev->data->dev_private;
302 
303 	writel(addr->addr_bytes[3] | (addr->addr_bytes[2] << 8) |
304 		(addr->addr_bytes[1] << 16) | (addr->addr_bytes[0] << 24),
305 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_PALR);
306 	writel((addr->addr_bytes[5] << 16) | (addr->addr_bytes[4] << 24),
307 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_PAUR);
308 
309 	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
310 
311 	return 0;
312 }
313 
314 static int
315 enetfec_stats_get(struct rte_eth_dev *dev,
316 	      struct rte_eth_stats *stats)
317 {
318 	struct enetfec_private *fep = dev->data->dev_private;
319 	struct rte_eth_stats *eth_stats = &fep->stats;
320 
321 	stats->ipackets = eth_stats->ipackets;
322 	stats->ibytes = eth_stats->ibytes;
323 	stats->ierrors = eth_stats->ierrors;
324 	stats->opackets = eth_stats->opackets;
325 	stats->obytes = eth_stats->obytes;
326 	stats->oerrors = eth_stats->oerrors;
327 	stats->rx_nombuf = eth_stats->rx_nombuf;
328 
329 	return 0;
330 }
331 
332 static int
333 enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
334 	struct rte_eth_dev_info *dev_info)
335 {
336 	dev_info->max_rx_pktlen = ENETFEC_MAX_RX_PKT_LEN;
337 	dev_info->max_rx_queues = ENETFEC_MAX_Q;
338 	dev_info->max_tx_queues = ENETFEC_MAX_Q;
339 	dev_info->rx_offload_capa = dev_rx_offloads_sup;
340 	return 0;
341 }
342 
343 static void
344 enet_free_queue(struct rte_eth_dev *dev)
345 {
346 	struct enetfec_private *fep = dev->data->dev_private;
347 	unsigned int i;
348 
349 	for (i = 0; i < dev->data->nb_rx_queues; i++)
350 		rte_free(fep->rx_queues[i]);
351 	for (i = 0; i < dev->data->nb_tx_queues; i++)
352 		rte_free(fep->rx_queues[i]);
353 }
354 
355 static const unsigned short offset_des_active_rxq[] = {
356 	ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
357 };
358 
359 static const unsigned short offset_des_active_txq[] = {
360 	ENETFEC_TDAR_0, ENETFEC_TDAR_1, ENETFEC_TDAR_2
361 };
362 
363 static int
364 enetfec_tx_queue_setup(struct rte_eth_dev *dev,
365 			uint16_t queue_idx,
366 			uint16_t nb_desc,
367 			unsigned int socket_id __rte_unused,
368 			const struct rte_eth_txconf *tx_conf)
369 {
370 	struct enetfec_private *fep = dev->data->dev_private;
371 	unsigned int i;
372 	struct bufdesc *bdp, *bd_base;
373 	struct enetfec_priv_tx_q *txq;
374 	unsigned int size;
375 	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
376 		sizeof(struct bufdesc);
377 	unsigned int dsize_log2 = fls64(dsize);
378 
379 	/* Tx deferred start is not supported */
380 	if (tx_conf->tx_deferred_start) {
381 		ENETFEC_PMD_ERR("Tx deferred start not supported");
382 		return -EINVAL;
383 	}
384 
385 	/* allocate transmit queue */
386 	txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);
387 	if (txq == NULL) {
388 		ENETFEC_PMD_ERR("transmit queue allocation failed");
389 		return -ENOMEM;
390 	}
391 
392 	if (nb_desc > MAX_TX_BD_RING_SIZE) {
393 		nb_desc = MAX_TX_BD_RING_SIZE;
394 		ENETFEC_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE");
395 	}
396 	txq->bd.ring_size = nb_desc;
397 	fep->total_tx_ring_size += txq->bd.ring_size;
398 	fep->tx_queues[queue_idx] = txq;
399 
400 	rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_t[queue_idx]),
401 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TD_START(queue_idx));
402 
403 	/* Set transmit descriptor base. */
404 	txq = fep->tx_queues[queue_idx];
405 	txq->fep = fep;
406 	size = dsize * txq->bd.ring_size;
407 	bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];
408 	txq->bd.queue_id = queue_idx;
409 	txq->bd.base = bd_base;
410 	txq->bd.cur = bd_base;
411 	txq->bd.d_size = dsize;
412 	txq->bd.d_size_log2 = dsize_log2;
413 	txq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
414 			offset_des_active_txq[queue_idx];
415 	bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
416 	txq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
417 	bdp = txq->bd.base;
418 	bdp = txq->bd.cur;
419 
420 	for (i = 0; i < txq->bd.ring_size; i++) {
421 		/* Initialize the BD for every fragment in the page. */
422 		rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
423 		if (txq->tx_mbuf[i] != NULL) {
424 			rte_pktmbuf_free(txq->tx_mbuf[i]);
425 			txq->tx_mbuf[i] = NULL;
426 		}
427 		rte_write32(0, &bdp->bd_bufaddr);
428 		bdp = enet_get_nextdesc(bdp, &txq->bd);
429 	}
430 
431 	/* Set the last buffer to wrap */
432 	bdp = enet_get_prevdesc(bdp, &txq->bd);
433 	rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |
434 		rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
435 	txq->dirty_tx = bdp;
436 	dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];
437 	return 0;
438 }
439 
440 static int
441 enetfec_rx_queue_setup(struct rte_eth_dev *dev,
442 			uint16_t queue_idx,
443 			uint16_t nb_rx_desc,
444 			unsigned int socket_id __rte_unused,
445 			const struct rte_eth_rxconf *rx_conf,
446 			struct rte_mempool *mb_pool)
447 {
448 	struct enetfec_private *fep = dev->data->dev_private;
449 	unsigned int i;
450 	struct bufdesc *bd_base;
451 	struct bufdesc *bdp;
452 	struct enetfec_priv_rx_q *rxq;
453 	unsigned int size;
454 	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
455 			sizeof(struct bufdesc);
456 	unsigned int dsize_log2 = fls64(dsize);
457 
458 	/* Rx deferred start is not supported */
459 	if (rx_conf->rx_deferred_start) {
460 		ENETFEC_PMD_ERR("Rx deferred start not supported");
461 		return -EINVAL;
462 	}
463 
464 	if (queue_idx >= ENETFEC_MAX_Q) {
465 		ENETFEC_PMD_ERR("Invalid queue id %" PRIu16 ", max %d\n",
466 			queue_idx, ENETFEC_MAX_Q);
467 		return -EINVAL;
468 	}
469 
470 	/* allocate receive queue */
471 	rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
472 	if (rxq == NULL) {
473 		ENETFEC_PMD_ERR("receive queue allocation failed");
474 		return -ENOMEM;
475 	}
476 
477 	if (nb_rx_desc > MAX_RX_BD_RING_SIZE) {
478 		nb_rx_desc = MAX_RX_BD_RING_SIZE;
479 		ENETFEC_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE");
480 	}
481 
482 	rxq->bd.ring_size = nb_rx_desc;
483 	fep->total_rx_ring_size += rxq->bd.ring_size;
484 	fep->rx_queues[queue_idx] = rxq;
485 
486 	rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_r[queue_idx]),
487 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RD_START(queue_idx));
488 	rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
489 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_MRB_SIZE(queue_idx));
490 
491 	/* Set receive descriptor base. */
492 	rxq = fep->rx_queues[queue_idx];
493 	rxq->pool = mb_pool;
494 	size = dsize * rxq->bd.ring_size;
495 	bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];
496 	rxq->bd.queue_id = queue_idx;
497 	rxq->bd.base = bd_base;
498 	rxq->bd.cur = bd_base;
499 	rxq->bd.d_size = dsize;
500 	rxq->bd.d_size_log2 = dsize_log2;
501 	rxq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
502 			offset_des_active_rxq[queue_idx];
503 	bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
504 	rxq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
505 
506 	rxq->fep = fep;
507 	bdp = rxq->bd.base;
508 	rxq->bd.cur = bdp;
509 
510 	for (i = 0; i < nb_rx_desc; i++) {
511 		/* Initialize Rx buffers from pktmbuf pool */
512 		struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);
513 		if (mbuf == NULL) {
514 			ENETFEC_PMD_ERR("mbuf failed");
515 			goto err_alloc;
516 		}
517 
518 		/* Get the virtual address & physical address */
519 		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
520 			&bdp->bd_bufaddr);
521 
522 		rxq->rx_mbuf[i] = mbuf;
523 		rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);
524 
525 		bdp = enet_get_nextdesc(bdp, &rxq->bd);
526 	}
527 
528 	/* Initialize the receive buffer descriptors. */
529 	bdp = rxq->bd.cur;
530 	for (i = 0; i < rxq->bd.ring_size; i++) {
531 		/* Initialize the BD for every fragment in the page. */
532 		if (rte_read32(&bdp->bd_bufaddr) > 0)
533 			rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),
534 				&bdp->bd_sc);
535 		else
536 			rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
537 
538 		bdp = enet_get_nextdesc(bdp, &rxq->bd);
539 	}
540 
541 	/* Set the last buffer to wrap */
542 	bdp = enet_get_prevdesc(bdp, &rxq->bd);
543 	rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |
544 		rte_read16(&bdp->bd_sc)),  &bdp->bd_sc);
545 	dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];
546 	rte_write32(0, fep->rx_queues[queue_idx]->bd.active_reg_desc);
547 	return 0;
548 
549 err_alloc:
550 	for (i = 0; i < nb_rx_desc; i++) {
551 		if (rxq->rx_mbuf[i] != NULL) {
552 			rte_pktmbuf_free(rxq->rx_mbuf[i]);
553 			rxq->rx_mbuf[i] = NULL;
554 		}
555 	}
556 	rte_free(rxq);
557 	return errno;
558 }
559 
560 static const struct eth_dev_ops enetfec_ops = {
561 	.dev_configure          = enetfec_eth_configure,
562 	.dev_start              = enetfec_eth_start,
563 	.dev_stop               = enetfec_eth_stop,
564 	.dev_close              = enetfec_eth_close,
565 	.link_update            = enetfec_eth_link_update,
566 	.promiscuous_enable     = enetfec_promiscuous_enable,
567 	.allmulticast_enable    = enetfec_multicast_enable,
568 	.mac_addr_set           = enetfec_set_mac_address,
569 	.stats_get              = enetfec_stats_get,
570 	.dev_infos_get          = enetfec_eth_info,
571 	.rx_queue_setup         = enetfec_rx_queue_setup,
572 	.tx_queue_setup         = enetfec_tx_queue_setup
573 };
574 
575 static int
576 enetfec_eth_init(struct rte_eth_dev *dev)
577 {
578 	struct enetfec_private *fep = dev->data->dev_private;
579 
580 	fep->full_duplex = FULL_DUPLEX;
581 	dev->dev_ops = &enetfec_ops;
582 	rte_eth_dev_probing_finish(dev);
583 
584 	return 0;
585 }
586 
587 static int
588 pmd_enetfec_probe(struct rte_vdev_device *vdev)
589 {
590 	struct rte_eth_dev *dev = NULL;
591 	struct enetfec_private *fep;
592 	const char *name;
593 	int rc;
594 	int i;
595 	unsigned int bdsize;
596 	struct rte_ether_addr macaddr = {
597 		.addr_bytes = { 0x1, 0x1, 0x1, 0x1, 0x1, 0x1 }
598 	};
599 
600 	name = rte_vdev_device_name(vdev);
601 	ENETFEC_PMD_LOG(INFO, "Initializing pmd_fec for %s", name);
602 
603 	dev = rte_eth_vdev_allocate(vdev, sizeof(*fep));
604 	if (dev == NULL)
605 		return -ENOMEM;
606 
607 	/* setup board info structure */
608 	fep = dev->data->dev_private;
609 	fep->dev = dev;
610 
611 	fep->max_rx_queues = ENETFEC_MAX_Q;
612 	fep->max_tx_queues = ENETFEC_MAX_Q;
613 	fep->quirks = QUIRK_HAS_ENETFEC_MAC | QUIRK_GBIT
614 		| QUIRK_RACC;
615 
616 	rc = enetfec_configure();
617 	if (rc != 0)
618 		return -ENOMEM;
619 	rc = config_enetfec_uio(fep);
620 	if (rc != 0)
621 		return -ENOMEM;
622 
623 	/* Get the BD size for distributing among six queues */
624 	bdsize = (fep->bd_size) / NUM_OF_BD_QUEUES;
625 
626 	for (i = 0; i < fep->max_tx_queues; i++) {
627 		fep->dma_baseaddr_t[i] = fep->bd_addr_v;
628 		fep->bd_addr_p_t[i] = fep->bd_addr_p;
629 		fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
630 		fep->bd_addr_p = fep->bd_addr_p + bdsize;
631 	}
632 	for (i = 0; i < fep->max_rx_queues; i++) {
633 		fep->dma_baseaddr_r[i] = fep->bd_addr_v;
634 		fep->bd_addr_p_r[i] = fep->bd_addr_p;
635 		fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
636 		fep->bd_addr_p = fep->bd_addr_p + bdsize;
637 	}
638 
639 	/* Copy the station address into the dev structure, */
640 	dev->data->mac_addrs = rte_zmalloc("mac_addr", RTE_ETHER_ADDR_LEN, 0);
641 	if (dev->data->mac_addrs == NULL) {
642 		ENETFEC_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
643 			RTE_ETHER_ADDR_LEN);
644 		rc = -ENOMEM;
645 		goto err;
646 	}
647 
648 	/*
649 	 * Set default mac address
650 	 */
651 	enetfec_set_mac_address(dev, &macaddr);
652 
653 	fep->bufdesc_ex = ENETFEC_EXTENDED_BD;
654 	rc = enetfec_eth_init(dev);
655 	if (rc)
656 		goto failed_init;
657 
658 	return 0;
659 
660 failed_init:
661 	ENETFEC_PMD_ERR("Failed to init");
662 err:
663 	rte_eth_dev_release_port(dev);
664 	return rc;
665 }
666 
667 static int
668 pmd_enetfec_remove(struct rte_vdev_device *vdev)
669 {
670 	struct rte_eth_dev *eth_dev = NULL;
671 	struct enetfec_private *fep;
672 	struct enetfec_priv_rx_q *rxq;
673 	int ret;
674 
675 	/* find the ethdev entry */
676 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
677 	if (eth_dev == NULL)
678 		return -ENODEV;
679 
680 	fep = eth_dev->data->dev_private;
681 	/* Free descriptor base of first RX queue as it was configured
682 	 * first in enetfec_eth_init().
683 	 */
684 	rxq = fep->rx_queues[0];
685 	rte_free(rxq->bd.base);
686 	enet_free_queue(eth_dev);
687 	enetfec_eth_stop(eth_dev);
688 
689 	ret = rte_eth_dev_release_port(eth_dev);
690 	if (ret != 0)
691 		return -EINVAL;
692 
693 	ENETFEC_PMD_INFO("Release enetfec sw device");
694 	enetfec_cleanup(fep);
695 
696 	return 0;
697 }
698 
699 static struct rte_vdev_driver pmd_enetfec_drv = {
700 	.probe = pmd_enetfec_probe,
701 	.remove = pmd_enetfec_remove,
702 };
703 
704 RTE_PMD_REGISTER_VDEV(ENETFEC_NAME_PMD, pmd_enetfec_drv);
705 RTE_LOG_REGISTER_DEFAULT(enetfec_logtype_pmd, NOTICE);
706