xref: /dpdk/drivers/net/enetfec/enet_ethdev.c (revision bb5b5bf1e5c6a0564ab304c78a7bb977425c7e3a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020-2021 NXP
3  */
4 
5 #include <ethdev_vdev.h>
6 #include <ethdev_driver.h>
7 #include <rte_io.h>
8 #include "enet_pmd_logs.h"
9 #include "enet_ethdev.h"
10 #include "enet_regs.h"
11 #include "enet_uio.h"
12 
13 #define ENETFEC_NAME_PMD                net_enetfec
14 
15 /* FEC receive acceleration */
16 #define ENETFEC_RACC_IPDIS		RTE_BIT32(1)
17 #define ENETFEC_RACC_PRODIS		RTE_BIT32(2)
18 #define ENETFEC_RACC_SHIFT16		RTE_BIT32(7)
19 #define ENETFEC_RACC_OPTIONS		(ENETFEC_RACC_IPDIS | \
20 						ENETFEC_RACC_PRODIS)
21 
22 #define ENETFEC_PAUSE_FLAG_AUTONEG	0x1
23 #define ENETFEC_PAUSE_FLAG_ENABLE	0x2
24 
25 /* Pause frame field and FIFO threshold */
26 #define ENETFEC_FCE			RTE_BIT32(5)
27 #define ENETFEC_RSEM_V			0x84
28 #define ENETFEC_RSFL_V			16
29 #define ENETFEC_RAEM_V			0x8
30 #define ENETFEC_RAFL_V			0x8
31 #define ENETFEC_OPD_V			0xFFF0
32 
33 #define NUM_OF_BD_QUEUES		6
34 
35 /* Supported Rx offloads */
36 static uint64_t dev_rx_offloads_sup =
37 		RTE_ETH_RX_OFFLOAD_CHECKSUM |
38 		RTE_ETH_RX_OFFLOAD_VLAN;
39 
40 /*
41  * This function is called to start or restart the ENETFEC during a link
42  * change, transmit timeout, or to reconfigure the ENETFEC. The network
43  * packet processing for this device must be stopped before this call.
44  */
45 static void
46 enetfec_restart(struct rte_eth_dev *dev)
47 {
48 	struct enetfec_private *fep = dev->data->dev_private;
49 	uint32_t rcntl = OPT_FRAME_SIZE | 0x04;
50 	uint32_t ecntl = ENETFEC_ETHEREN;
51 	uint32_t val;
52 
53 	/* Clear any outstanding interrupt. */
54 	writel(0xffffffff, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_EIR);
55 
56 	/* Enable MII mode */
57 	if (fep->full_duplex == FULL_DUPLEX) {
58 		/* FD enable */
59 		rte_write32(rte_cpu_to_le_32(0x04),
60 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
61 	} else {
62 	/* No Rcv on Xmit */
63 		rcntl |= 0x02;
64 		rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_TCR);
65 	}
66 
67 	if (fep->quirks & QUIRK_RACC) {
68 		val = rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
69 		/* align IP header */
70 		val |= ENETFEC_RACC_SHIFT16;
71 		val &= ~ENETFEC_RACC_OPTIONS;
72 		rte_write32(rte_cpu_to_le_32(val),
73 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RACC);
74 		rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
75 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_FRAME_TRL);
76 	}
77 
78 	/*
79 	 * The phy interface and speed need to get configured
80 	 * differently on enet-mac.
81 	 */
82 	if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
83 		/* Enable flow control and length check */
84 		rcntl |= 0x40000000 | 0x00000020;
85 
86 		/* RGMII, RMII or MII */
87 		rcntl |= RTE_BIT32(6);
88 		ecntl |= RTE_BIT32(5);
89 	}
90 
91 	/* enable pause frame*/
92 	if ((fep->flag_pause & ENETFEC_PAUSE_FLAG_ENABLE) ||
93 		((fep->flag_pause & ENETFEC_PAUSE_FLAG_AUTONEG)
94 		/*&& ndev->phydev && ndev->phydev->pause*/)) {
95 		rcntl |= ENETFEC_FCE;
96 
97 		/* set FIFO threshold parameter to reduce overrun */
98 		rte_write32(rte_cpu_to_le_32(ENETFEC_RSEM_V),
99 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SEM);
100 		rte_write32(rte_cpu_to_le_32(ENETFEC_RSFL_V),
101 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_SFL);
102 		rte_write32(rte_cpu_to_le_32(ENETFEC_RAEM_V),
103 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AEM);
104 		rte_write32(rte_cpu_to_le_32(ENETFEC_RAFL_V),
105 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_R_FIFO_AFL);
106 
107 		/* OPD */
108 		rte_write32(rte_cpu_to_le_32(ENETFEC_OPD_V),
109 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_OPD);
110 	} else {
111 		rcntl &= ~ENETFEC_FCE;
112 	}
113 
114 	rte_write32(rte_cpu_to_le_32(rcntl),
115 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RCR);
116 
117 	rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IAUR);
118 	rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_IALR);
119 
120 	if (fep->quirks & QUIRK_HAS_ENETFEC_MAC) {
121 		/* enable ENETFEC endian swap */
122 		ecntl |= (1 << 8);
123 		/* enable ENETFEC store and forward mode */
124 		rte_write32(rte_cpu_to_le_32(1 << 8),
125 			(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TFWR);
126 	}
127 	if (fep->bufdesc_ex)
128 		ecntl |= (1 << 4);
129 	if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
130 		fep->rgmii_txc_delay)
131 		ecntl |= ENETFEC_TXC_DLY;
132 	if (fep->quirks & QUIRK_SUPPORT_DELAYED_CLKS &&
133 		fep->rgmii_rxc_delay)
134 		ecntl |= ENETFEC_RXC_DLY;
135 	/* Enable the MIB statistic event counters */
136 	rte_write32(0, (uint8_t *)fep->hw_baseaddr_v + ENETFEC_MIBC);
137 
138 	ecntl |= 0x70000000;
139 	fep->enetfec_e_cntl = ecntl;
140 	/* And last, enable the transmit and receive processing */
141 	rte_write32(rte_cpu_to_le_32(ecntl),
142 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
143 	rte_delay_us(10);
144 }
145 
146 static int
147 enetfec_eth_configure(struct rte_eth_dev *dev)
148 {
149 	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
150 		ENETFEC_PMD_ERR("PMD does not support KEEP_CRC offload");
151 
152 	return 0;
153 }
154 
155 static int
156 enetfec_eth_start(struct rte_eth_dev *dev)
157 {
158 	enetfec_restart(dev);
159 
160 	return 0;
161 }
162 
163 /* ENETFEC disable function.
164  * @param[in] base      ENETFEC base address
165  */
166 static void
167 enetfec_disable(struct enetfec_private *fep)
168 {
169 	rte_write32(rte_read32((uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR)
170 		    & ~(fep->enetfec_e_cntl),
171 		    (uint8_t *)fep->hw_baseaddr_v + ENETFEC_ECR);
172 }
173 
174 static int
175 enetfec_eth_stop(struct rte_eth_dev *dev)
176 {
177 	struct enetfec_private *fep = dev->data->dev_private;
178 
179 	dev->data->dev_started = 0;
180 	enetfec_disable(fep);
181 
182 	return 0;
183 }
184 
185 static int
186 enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,
187 	struct rte_eth_dev_info *dev_info)
188 {
189 	dev_info->max_rx_pktlen = ENETFEC_MAX_RX_PKT_LEN;
190 	dev_info->max_rx_queues = ENETFEC_MAX_Q;
191 	dev_info->max_tx_queues = ENETFEC_MAX_Q;
192 	dev_info->rx_offload_capa = dev_rx_offloads_sup;
193 	return 0;
194 }
195 
196 static const unsigned short offset_des_active_rxq[] = {
197 	ENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2
198 };
199 
200 static const unsigned short offset_des_active_txq[] = {
201 	ENETFEC_TDAR_0, ENETFEC_TDAR_1, ENETFEC_TDAR_2
202 };
203 
204 static int
205 enetfec_tx_queue_setup(struct rte_eth_dev *dev,
206 			uint16_t queue_idx,
207 			uint16_t nb_desc,
208 			unsigned int socket_id __rte_unused,
209 			const struct rte_eth_txconf *tx_conf)
210 {
211 	struct enetfec_private *fep = dev->data->dev_private;
212 	unsigned int i;
213 	struct bufdesc *bdp, *bd_base;
214 	struct enetfec_priv_tx_q *txq;
215 	unsigned int size;
216 	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
217 		sizeof(struct bufdesc);
218 	unsigned int dsize_log2 = fls64(dsize);
219 
220 	/* Tx deferred start is not supported */
221 	if (tx_conf->tx_deferred_start) {
222 		ENETFEC_PMD_ERR("Tx deferred start not supported");
223 		return -EINVAL;
224 	}
225 
226 	/* allocate transmit queue */
227 	txq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);
228 	if (txq == NULL) {
229 		ENETFEC_PMD_ERR("transmit queue allocation failed");
230 		return -ENOMEM;
231 	}
232 
233 	if (nb_desc > MAX_TX_BD_RING_SIZE) {
234 		nb_desc = MAX_TX_BD_RING_SIZE;
235 		ENETFEC_PMD_WARN("modified the nb_desc to MAX_TX_BD_RING_SIZE");
236 	}
237 	txq->bd.ring_size = nb_desc;
238 	fep->total_tx_ring_size += txq->bd.ring_size;
239 	fep->tx_queues[queue_idx] = txq;
240 
241 	rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_t[queue_idx]),
242 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_TD_START(queue_idx));
243 
244 	/* Set transmit descriptor base. */
245 	txq = fep->tx_queues[queue_idx];
246 	txq->fep = fep;
247 	size = dsize * txq->bd.ring_size;
248 	bd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];
249 	txq->bd.queue_id = queue_idx;
250 	txq->bd.base = bd_base;
251 	txq->bd.cur = bd_base;
252 	txq->bd.d_size = dsize;
253 	txq->bd.d_size_log2 = dsize_log2;
254 	txq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
255 			offset_des_active_txq[queue_idx];
256 	bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
257 	txq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
258 	bdp = txq->bd.base;
259 	bdp = txq->bd.cur;
260 
261 	for (i = 0; i < txq->bd.ring_size; i++) {
262 		/* Initialize the BD for every fragment in the page. */
263 		rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
264 		if (txq->tx_mbuf[i] != NULL) {
265 			rte_pktmbuf_free(txq->tx_mbuf[i]);
266 			txq->tx_mbuf[i] = NULL;
267 		}
268 		rte_write32(0, &bdp->bd_bufaddr);
269 		bdp = enet_get_nextdesc(bdp, &txq->bd);
270 	}
271 
272 	/* Set the last buffer to wrap */
273 	bdp = enet_get_prevdesc(bdp, &txq->bd);
274 	rte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |
275 		rte_read16(&bdp->bd_sc)), &bdp->bd_sc);
276 	txq->dirty_tx = bdp;
277 	dev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];
278 	return 0;
279 }
280 
281 static int
282 enetfec_rx_queue_setup(struct rte_eth_dev *dev,
283 			uint16_t queue_idx,
284 			uint16_t nb_rx_desc,
285 			unsigned int socket_id __rte_unused,
286 			const struct rte_eth_rxconf *rx_conf,
287 			struct rte_mempool *mb_pool)
288 {
289 	struct enetfec_private *fep = dev->data->dev_private;
290 	unsigned int i;
291 	struct bufdesc *bd_base;
292 	struct bufdesc *bdp;
293 	struct enetfec_priv_rx_q *rxq;
294 	unsigned int size;
295 	unsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
296 			sizeof(struct bufdesc);
297 	unsigned int dsize_log2 = fls64(dsize);
298 
299 	/* Rx deferred start is not supported */
300 	if (rx_conf->rx_deferred_start) {
301 		ENETFEC_PMD_ERR("Rx deferred start not supported");
302 		return -EINVAL;
303 	}
304 
305 	/* allocate receive queue */
306 	rxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);
307 	if (rxq == NULL) {
308 		ENETFEC_PMD_ERR("receive queue allocation failed");
309 		return -ENOMEM;
310 	}
311 
312 	if (nb_rx_desc > MAX_RX_BD_RING_SIZE) {
313 		nb_rx_desc = MAX_RX_BD_RING_SIZE;
314 		ENETFEC_PMD_WARN("modified the nb_desc to MAX_RX_BD_RING_SIZE");
315 	}
316 
317 	rxq->bd.ring_size = nb_rx_desc;
318 	fep->total_rx_ring_size += rxq->bd.ring_size;
319 	fep->rx_queues[queue_idx] = rxq;
320 
321 	rte_write32(rte_cpu_to_le_32(fep->bd_addr_p_r[queue_idx]),
322 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_RD_START(queue_idx));
323 	rte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),
324 		(uint8_t *)fep->hw_baseaddr_v + ENETFEC_MRB_SIZE(queue_idx));
325 
326 	/* Set receive descriptor base. */
327 	rxq = fep->rx_queues[queue_idx];
328 	rxq->pool = mb_pool;
329 	size = dsize * rxq->bd.ring_size;
330 	bd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];
331 	rxq->bd.queue_id = queue_idx;
332 	rxq->bd.base = bd_base;
333 	rxq->bd.cur = bd_base;
334 	rxq->bd.d_size = dsize;
335 	rxq->bd.d_size_log2 = dsize_log2;
336 	rxq->bd.active_reg_desc = (uint8_t *)fep->hw_baseaddr_v +
337 			offset_des_active_rxq[queue_idx];
338 	bd_base = (struct bufdesc *)(((uintptr_t)bd_base) + size);
339 	rxq->bd.last = (struct bufdesc *)(((uintptr_t)bd_base) - dsize);
340 
341 	rxq->fep = fep;
342 	bdp = rxq->bd.base;
343 	rxq->bd.cur = bdp;
344 
345 	for (i = 0; i < nb_rx_desc; i++) {
346 		/* Initialize Rx buffers from pktmbuf pool */
347 		struct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);
348 		if (mbuf == NULL) {
349 			ENETFEC_PMD_ERR("mbuf failed");
350 			goto err_alloc;
351 		}
352 
353 		/* Get the virtual address & physical address */
354 		rte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),
355 			&bdp->bd_bufaddr);
356 
357 		rxq->rx_mbuf[i] = mbuf;
358 		rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);
359 
360 		bdp = enet_get_nextdesc(bdp, &rxq->bd);
361 	}
362 
363 	/* Initialize the receive buffer descriptors. */
364 	bdp = rxq->bd.cur;
365 	for (i = 0; i < rxq->bd.ring_size; i++) {
366 		/* Initialize the BD for every fragment in the page. */
367 		if (rte_read32(&bdp->bd_bufaddr) > 0)
368 			rte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),
369 				&bdp->bd_sc);
370 		else
371 			rte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);
372 
373 		bdp = enet_get_nextdesc(bdp, &rxq->bd);
374 	}
375 
376 	/* Set the last buffer to wrap */
377 	bdp = enet_get_prevdesc(bdp, &rxq->bd);
378 	rte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |
379 		rte_read16(&bdp->bd_sc)),  &bdp->bd_sc);
380 	dev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];
381 	rte_write32(0, fep->rx_queues[queue_idx]->bd.active_reg_desc);
382 	return 0;
383 
384 err_alloc:
385 	for (i = 0; i < nb_rx_desc; i++) {
386 		if (rxq->rx_mbuf[i] != NULL) {
387 			rte_pktmbuf_free(rxq->rx_mbuf[i]);
388 			rxq->rx_mbuf[i] = NULL;
389 		}
390 	}
391 	rte_free(rxq);
392 	return errno;
393 }
394 
395 static const struct eth_dev_ops enetfec_ops = {
396 	.dev_configure          = enetfec_eth_configure,
397 	.dev_start              = enetfec_eth_start,
398 	.dev_stop               = enetfec_eth_stop,
399 	.dev_infos_get          = enetfec_eth_info,
400 	.rx_queue_setup         = enetfec_rx_queue_setup,
401 	.tx_queue_setup         = enetfec_tx_queue_setup
402 };
403 
404 static int
405 enetfec_eth_init(struct rte_eth_dev *dev)
406 {
407 	struct enetfec_private *fep = dev->data->dev_private;
408 
409 	fep->full_duplex = FULL_DUPLEX;
410 	dev->dev_ops = &enetfec_ops;
411 	rte_eth_dev_probing_finish(dev);
412 	return 0;
413 }
414 
415 static int
416 pmd_enetfec_probe(struct rte_vdev_device *vdev)
417 {
418 	struct rte_eth_dev *dev = NULL;
419 	struct enetfec_private *fep;
420 	const char *name;
421 	int rc;
422 	int i;
423 	unsigned int bdsize;
424 
425 	name = rte_vdev_device_name(vdev);
426 	ENETFEC_PMD_LOG(INFO, "Initializing pmd_fec for %s", name);
427 
428 	dev = rte_eth_vdev_allocate(vdev, sizeof(*fep));
429 	if (dev == NULL)
430 		return -ENOMEM;
431 
432 	/* setup board info structure */
433 	fep = dev->data->dev_private;
434 	fep->dev = dev;
435 
436 	fep->max_rx_queues = ENETFEC_MAX_Q;
437 	fep->max_tx_queues = ENETFEC_MAX_Q;
438 	fep->quirks = QUIRK_HAS_ENETFEC_MAC | QUIRK_GBIT
439 		| QUIRK_RACC;
440 
441 	rc = enetfec_configure();
442 	if (rc != 0)
443 		return -ENOMEM;
444 	rc = config_enetfec_uio(fep);
445 	if (rc != 0)
446 		return -ENOMEM;
447 
448 	/* Get the BD size for distributing among six queues */
449 	bdsize = (fep->bd_size) / NUM_OF_BD_QUEUES;
450 
451 	for (i = 0; i < fep->max_tx_queues; i++) {
452 		fep->dma_baseaddr_t[i] = fep->bd_addr_v;
453 		fep->bd_addr_p_t[i] = fep->bd_addr_p;
454 		fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
455 		fep->bd_addr_p = fep->bd_addr_p + bdsize;
456 	}
457 	for (i = 0; i < fep->max_rx_queues; i++) {
458 		fep->dma_baseaddr_r[i] = fep->bd_addr_v;
459 		fep->bd_addr_p_r[i] = fep->bd_addr_p;
460 		fep->bd_addr_v = (uint8_t *)fep->bd_addr_v + bdsize;
461 		fep->bd_addr_p = fep->bd_addr_p + bdsize;
462 	}
463 
464 	rc = enetfec_eth_init(dev);
465 	if (rc)
466 		goto failed_init;
467 
468 	return 0;
469 
470 failed_init:
471 	ENETFEC_PMD_ERR("Failed to init");
472 	return rc;
473 }
474 
475 static int
476 pmd_enetfec_remove(struct rte_vdev_device *vdev)
477 {
478 	struct rte_eth_dev *eth_dev = NULL;
479 	int ret;
480 
481 	/* find the ethdev entry */
482 	eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
483 	if (eth_dev == NULL)
484 		return -ENODEV;
485 
486 	ret = rte_eth_dev_release_port(eth_dev);
487 	if (ret != 0)
488 		return -EINVAL;
489 
490 	ENETFEC_PMD_INFO("Release enetfec sw device");
491 	return 0;
492 }
493 
494 static struct rte_vdev_driver pmd_enetfec_drv = {
495 	.probe = pmd_enetfec_probe,
496 	.remove = pmd_enetfec_remove,
497 };
498 
499 RTE_PMD_REGISTER_VDEV(ENETFEC_NAME_PMD, pmd_enetfec_drv);
500 RTE_LOG_REGISTER_DEFAULT(enetfec_logtype_pmd, NOTICE);
501