xref: /dpdk/drivers/net/enetc/enetc_ethdev.c (revision da7e701151ea8b742d4c38ace3e4fefd1b4507fc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4 
5 #include <stdbool.h>
6 #include <ethdev_pci.h>
7 #include <rte_random.h>
8 #include <dpaax_iova_table.h>
9 
10 #include "enetc_logs.h"
11 #include "enetc.h"
12 
13 static int
14 enetc_dev_start(struct rte_eth_dev *dev)
15 {
16 	struct enetc_eth_hw *hw =
17 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
18 	struct enetc_hw *enetc_hw = &hw->hw;
19 	uint32_t val;
20 	uint16_t i;
21 
22 	PMD_INIT_FUNC_TRACE();
23 	if (hw->device_id == ENETC_DEV_ID_VF)
24 		return 0;
25 
26 	val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
27 	enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
28 		      val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
29 
30 	/* Enable port */
31 	val = enetc_port_rd(enetc_hw, ENETC_PMR);
32 	enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
33 
34 	/* set auto-speed for RGMII */
35 	if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
36 		enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
37 			      ENETC_PM0_IFM_RGAUTO);
38 		enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
39 			      ENETC_PM0_IFM_RGAUTO);
40 	}
41 	if (enetc_global_rd(enetc_hw,
42 			    ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
43 		enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
44 			      ENETC_PM0_IFM_XGMII);
45 		enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
46 			      ENETC_PM0_IFM_XGMII);
47 	}
48 
49 	for (i = 0; i < dev->data->nb_rx_queues; i++)
50 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
51 	for (i = 0; i < dev->data->nb_tx_queues; i++)
52 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
53 
54 	return 0;
55 }
56 
57 static int
58 enetc_dev_stop(struct rte_eth_dev *dev)
59 {
60 	struct enetc_eth_hw *hw =
61 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
62 	struct enetc_hw *enetc_hw = &hw->hw;
63 	uint32_t val;
64 	uint16_t i;
65 
66 	PMD_INIT_FUNC_TRACE();
67 	dev->data->dev_started = 0;
68 	if (hw->device_id == ENETC_DEV_ID_VF)
69 		return 0;
70 
71 	/* Disable port */
72 	val = enetc_port_rd(enetc_hw, ENETC_PMR);
73 	enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
74 
75 	val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
76 	enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
77 		      val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
78 
79 	for (i = 0; i < dev->data->nb_rx_queues; i++)
80 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
81 	for (i = 0; i < dev->data->nb_tx_queues; i++)
82 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
83 
84 	return 0;
85 }
86 
87 static const uint32_t *
88 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
89 {
90 	static const uint32_t ptypes[] = {
91 		RTE_PTYPE_L2_ETHER,
92 		RTE_PTYPE_L3_IPV4,
93 		RTE_PTYPE_L3_IPV6,
94 		RTE_PTYPE_L4_TCP,
95 		RTE_PTYPE_L4_UDP,
96 		RTE_PTYPE_L4_SCTP,
97 		RTE_PTYPE_L4_ICMP,
98 		RTE_PTYPE_UNKNOWN
99 	};
100 
101 	return ptypes;
102 }
103 
104 /* return 0 means link status changed, -1 means not changed */
105 static int
106 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
107 {
108 	struct enetc_eth_hw *hw =
109 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
110 	struct enetc_hw *enetc_hw = &hw->hw;
111 	struct rte_eth_link link;
112 	uint32_t status;
113 
114 	PMD_INIT_FUNC_TRACE();
115 
116 	memset(&link, 0, sizeof(link));
117 
118 	status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
119 
120 	if (status & ENETC_LINK_MODE)
121 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
122 	else
123 		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
124 
125 	if (status & ENETC_LINK_STATUS)
126 		link.link_status = RTE_ETH_LINK_UP;
127 	else
128 		link.link_status = RTE_ETH_LINK_DOWN;
129 
130 	switch (status & ENETC_LINK_SPEED_MASK) {
131 	case ENETC_LINK_SPEED_1G:
132 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
133 		break;
134 
135 	case ENETC_LINK_SPEED_100M:
136 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
137 		break;
138 
139 	default:
140 	case ENETC_LINK_SPEED_10M:
141 		link.link_speed = RTE_ETH_SPEED_NUM_10M;
142 	}
143 
144 	return rte_eth_linkstatus_set(dev, &link);
145 }
146 
147 static void
148 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
149 {
150 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
151 
152 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
153 	ENETC_PMD_NOTICE("%s%s\n", name, buf);
154 }
155 
156 static int
157 enetc_hardware_init(struct enetc_eth_hw *hw)
158 {
159 	struct enetc_hw *enetc_hw = &hw->hw;
160 	uint32_t *mac = (uint32_t *)hw->mac.addr;
161 	uint32_t high_mac = 0;
162 	uint16_t low_mac = 0;
163 
164 	PMD_INIT_FUNC_TRACE();
165 	/* Calculating and storing the base HW addresses */
166 	hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
167 	hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
168 
169 	/* WA for Rx lock-up HW erratum */
170 	enetc_port_wr(enetc_hw, ENETC_PM0_RX_FIFO, 1);
171 
172 	/* set ENETC transaction flags to coherent, don't allocate.
173 	 * BD writes merge with surrounding cache line data, frame data writes
174 	 * overwrite cache line.
175 	 */
176 	enetc_wr(enetc_hw, ENETC_SICAR0, ENETC_SICAR0_COHERENT);
177 
178 	/* Enabling Station Interface */
179 	enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
180 
181 
182 	if (hw->device_id == ENETC_DEV_ID_VF) {
183 		*mac = (uint32_t)enetc_rd(enetc_hw, ENETC_SIPMAR0);
184 		high_mac = (uint32_t)*mac;
185 		mac++;
186 		*mac = (uint32_t)enetc_rd(enetc_hw, ENETC_SIPMAR1);
187 		low_mac = (uint16_t)*mac;
188 	} else {
189 		*mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
190 		high_mac = (uint32_t)*mac;
191 		mac++;
192 		*mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
193 		low_mac = (uint16_t)*mac;
194 	}
195 
196 	if ((high_mac | low_mac) == 0) {
197 		char *first_byte;
198 
199 		ENETC_PMD_NOTICE("MAC is not available for this SI, "
200 				"set random MAC\n");
201 		mac = (uint32_t *)hw->mac.addr;
202 		*mac = (uint32_t)rte_rand();
203 		first_byte = (char *)mac;
204 		*first_byte &= 0xfe;	/* clear multicast bit */
205 		*first_byte |= 0x02;	/* set local assignment bit (IEEE802) */
206 
207 		enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac);
208 		mac++;
209 		*mac = (uint16_t)rte_rand();
210 		enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac);
211 		print_ethaddr("New address: ",
212 			      (const struct rte_ether_addr *)hw->mac.addr);
213 	}
214 
215 	return 0;
216 }
217 
218 static int
219 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
220 		    struct rte_eth_dev_info *dev_info)
221 {
222 	PMD_INIT_FUNC_TRACE();
223 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
224 		.nb_max = MAX_BD_COUNT,
225 		.nb_min = MIN_BD_COUNT,
226 		.nb_align = BD_ALIGN,
227 	};
228 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
229 		.nb_max = MAX_BD_COUNT,
230 		.nb_min = MIN_BD_COUNT,
231 		.nb_align = BD_ALIGN,
232 	};
233 	dev_info->max_rx_queues = MAX_RX_RINGS;
234 	dev_info->max_tx_queues = MAX_TX_RINGS;
235 	dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
236 	dev_info->rx_offload_capa =
237 		(RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
238 		 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
239 		 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
240 		 RTE_ETH_RX_OFFLOAD_KEEP_CRC);
241 
242 	return 0;
243 }
244 
245 static int
246 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
247 {
248 	int size;
249 
250 	size = nb_desc * sizeof(struct enetc_swbd);
251 	txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
252 	if (txr->q_swbd == NULL)
253 		return -ENOMEM;
254 
255 	size = nb_desc * sizeof(struct enetc_tx_bd);
256 	txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
257 	if (txr->bd_base == NULL) {
258 		rte_free(txr->q_swbd);
259 		txr->q_swbd = NULL;
260 		return -ENOMEM;
261 	}
262 
263 	txr->bd_count = nb_desc;
264 	txr->next_to_clean = 0;
265 	txr->next_to_use = 0;
266 
267 	return 0;
268 }
269 
270 static void
271 enetc_free_bdr(struct enetc_bdr *rxr)
272 {
273 	rte_free(rxr->q_swbd);
274 	rte_free(rxr->bd_base);
275 	rxr->q_swbd = NULL;
276 	rxr->bd_base = NULL;
277 }
278 
279 static void
280 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
281 {
282 	int idx = tx_ring->index;
283 	phys_addr_t bd_address;
284 
285 	bd_address = (phys_addr_t)
286 		     rte_mem_virt2iova((const void *)tx_ring->bd_base);
287 	enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
288 		       lower_32_bits((uint64_t)bd_address));
289 	enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
290 		       upper_32_bits((uint64_t)bd_address));
291 	enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
292 		       ENETC_RTBLENR_LEN(tx_ring->bd_count));
293 
294 	enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
295 	enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
296 	tx_ring->tcir = (void *)((size_t)hw->reg +
297 			ENETC_BDR(TX, idx, ENETC_TBCIR));
298 	tx_ring->tcisr = (void *)((size_t)hw->reg +
299 			 ENETC_BDR(TX, idx, ENETC_TBCISR));
300 }
301 
302 static int
303 enetc_tx_queue_setup(struct rte_eth_dev *dev,
304 		     uint16_t queue_idx,
305 		     uint16_t nb_desc,
306 		     unsigned int socket_id __rte_unused,
307 		     const struct rte_eth_txconf *tx_conf)
308 {
309 	int err = 0;
310 	struct enetc_bdr *tx_ring;
311 	struct rte_eth_dev_data *data = dev->data;
312 	struct enetc_eth_adapter *priv =
313 			ENETC_DEV_PRIVATE(data->dev_private);
314 
315 	PMD_INIT_FUNC_TRACE();
316 	if (nb_desc > MAX_BD_COUNT)
317 		return -1;
318 
319 	tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
320 	if (tx_ring == NULL) {
321 		ENETC_PMD_ERR("Failed to allocate TX ring memory");
322 		err = -ENOMEM;
323 		return -1;
324 	}
325 
326 	err = enetc_alloc_txbdr(tx_ring, nb_desc);
327 	if (err)
328 		goto fail;
329 
330 	tx_ring->index = queue_idx;
331 	tx_ring->ndev = dev;
332 	enetc_setup_txbdr(&priv->hw.hw, tx_ring);
333 	data->tx_queues[queue_idx] = tx_ring;
334 
335 	if (!tx_conf->tx_deferred_start) {
336 		/* enable ring */
337 		enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
338 			       ENETC_TBMR, ENETC_TBMR_EN);
339 		dev->data->tx_queue_state[tx_ring->index] =
340 			       RTE_ETH_QUEUE_STATE_STARTED;
341 	} else {
342 		dev->data->tx_queue_state[tx_ring->index] =
343 			       RTE_ETH_QUEUE_STATE_STOPPED;
344 	}
345 
346 	return 0;
347 fail:
348 	rte_free(tx_ring);
349 
350 	return err;
351 }
352 
353 static void
354 enetc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
355 {
356 	void *txq = dev->data->tx_queues[qid];
357 
358 	if (txq == NULL)
359 		return;
360 
361 	struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
362 	struct enetc_eth_hw *eth_hw =
363 		ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
364 	struct enetc_hw *hw;
365 	struct enetc_swbd *tx_swbd;
366 	int i;
367 	uint32_t val;
368 
369 	/* Disable the ring */
370 	hw = &eth_hw->hw;
371 	val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
372 	val &= (~ENETC_TBMR_EN);
373 	enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
374 
375 	/* clean the ring*/
376 	i = tx_ring->next_to_clean;
377 	tx_swbd = &tx_ring->q_swbd[i];
378 	while (tx_swbd->buffer_addr != NULL) {
379 		rte_pktmbuf_free(tx_swbd->buffer_addr);
380 		tx_swbd->buffer_addr = NULL;
381 		tx_swbd++;
382 		i++;
383 		if (unlikely(i == tx_ring->bd_count)) {
384 			i = 0;
385 			tx_swbd = &tx_ring->q_swbd[i];
386 		}
387 	}
388 
389 	enetc_free_bdr(tx_ring);
390 	rte_free(tx_ring);
391 }
392 
393 static int
394 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
395 		  uint16_t nb_rx_desc)
396 {
397 	int size;
398 
399 	size = nb_rx_desc * sizeof(struct enetc_swbd);
400 	rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
401 	if (rxr->q_swbd == NULL)
402 		return -ENOMEM;
403 
404 	size = nb_rx_desc * sizeof(union enetc_rx_bd);
405 	rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
406 	if (rxr->bd_base == NULL) {
407 		rte_free(rxr->q_swbd);
408 		rxr->q_swbd = NULL;
409 		return -ENOMEM;
410 	}
411 
412 	rxr->bd_count = nb_rx_desc;
413 	rxr->next_to_clean = 0;
414 	rxr->next_to_use = 0;
415 	rxr->next_to_alloc = 0;
416 
417 	return 0;
418 }
419 
420 static void
421 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
422 		  struct rte_mempool *mb_pool)
423 {
424 	int idx = rx_ring->index;
425 	uint16_t buf_size;
426 	phys_addr_t bd_address;
427 
428 	bd_address = (phys_addr_t)
429 		     rte_mem_virt2iova((const void *)rx_ring->bd_base);
430 	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
431 		       lower_32_bits((uint64_t)bd_address));
432 	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
433 		       upper_32_bits((uint64_t)bd_address));
434 	enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
435 		       ENETC_RTBLENR_LEN(rx_ring->bd_count));
436 
437 	rx_ring->mb_pool = mb_pool;
438 	rx_ring->rcir = (void *)((size_t)hw->reg +
439 			ENETC_BDR(RX, idx, ENETC_RBCIR));
440 	enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
441 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
442 		   RTE_PKTMBUF_HEADROOM);
443 	enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
444 	enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
445 }
446 
447 static int
448 enetc_rx_queue_setup(struct rte_eth_dev *dev,
449 		     uint16_t rx_queue_id,
450 		     uint16_t nb_rx_desc,
451 		     unsigned int socket_id __rte_unused,
452 		     const struct rte_eth_rxconf *rx_conf,
453 		     struct rte_mempool *mb_pool)
454 {
455 	int err = 0;
456 	struct enetc_bdr *rx_ring;
457 	struct rte_eth_dev_data *data =  dev->data;
458 	struct enetc_eth_adapter *adapter =
459 			ENETC_DEV_PRIVATE(data->dev_private);
460 	uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
461 
462 	PMD_INIT_FUNC_TRACE();
463 	if (nb_rx_desc > MAX_BD_COUNT)
464 		return -1;
465 
466 	rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
467 	if (rx_ring == NULL) {
468 		ENETC_PMD_ERR("Failed to allocate RX ring memory");
469 		err = -ENOMEM;
470 		return err;
471 	}
472 
473 	err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
474 	if (err)
475 		goto fail;
476 
477 	rx_ring->index = rx_queue_id;
478 	rx_ring->ndev = dev;
479 	enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
480 	data->rx_queues[rx_queue_id] = rx_ring;
481 
482 	if (!rx_conf->rx_deferred_start) {
483 		/* enable ring */
484 		enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
485 			       ENETC_RBMR_EN);
486 		dev->data->rx_queue_state[rx_ring->index] =
487 			       RTE_ETH_QUEUE_STATE_STARTED;
488 	} else {
489 		dev->data->rx_queue_state[rx_ring->index] =
490 			       RTE_ETH_QUEUE_STATE_STOPPED;
491 	}
492 
493 	rx_ring->crc_len = (uint8_t)((rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
494 				     RTE_ETHER_CRC_LEN : 0);
495 
496 	return 0;
497 fail:
498 	rte_free(rx_ring);
499 
500 	return err;
501 }
502 
503 static void
504 enetc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
505 {
506 	void *rxq = dev->data->rx_queues[qid];
507 
508 	if (rxq == NULL)
509 		return;
510 
511 	struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
512 	struct enetc_eth_hw *eth_hw =
513 		ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
514 	struct enetc_swbd *q_swbd;
515 	struct enetc_hw *hw;
516 	uint32_t val;
517 	int i;
518 
519 	/* Disable the ring */
520 	hw = &eth_hw->hw;
521 	val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
522 	val &= (~ENETC_RBMR_EN);
523 	enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
524 
525 	/* Clean the ring */
526 	i = rx_ring->next_to_clean;
527 	q_swbd = &rx_ring->q_swbd[i];
528 	while (i != rx_ring->next_to_use) {
529 		rte_pktmbuf_free(q_swbd->buffer_addr);
530 		q_swbd->buffer_addr = NULL;
531 		q_swbd++;
532 		i++;
533 		if (unlikely(i == rx_ring->bd_count)) {
534 			i = 0;
535 			q_swbd = &rx_ring->q_swbd[i];
536 		}
537 	}
538 
539 	enetc_free_bdr(rx_ring);
540 	rte_free(rx_ring);
541 }
542 
543 static
544 int enetc_stats_get(struct rte_eth_dev *dev,
545 		    struct rte_eth_stats *stats)
546 {
547 	struct enetc_eth_hw *hw =
548 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
549 	struct enetc_hw *enetc_hw = &hw->hw;
550 
551 	/* Total received packets, bad + good, if we want to get counters of
552 	 * only good received packets then use ENETC_PM0_RFRM,
553 	 * ENETC_PM0_TFRM registers.
554 	 */
555 	stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
556 	stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
557 	stats->ibytes =  enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
558 	stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
559 	/* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
560 	 * truncated packets
561 	 */
562 	stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
563 	stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
564 	stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
565 
566 	return 0;
567 }
568 
569 static int
570 enetc_stats_reset(struct rte_eth_dev *dev)
571 {
572 	struct enetc_eth_hw *hw =
573 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
574 	struct enetc_hw *enetc_hw = &hw->hw;
575 
576 	enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
577 
578 	return 0;
579 }
580 
581 static int
582 enetc_dev_close(struct rte_eth_dev *dev)
583 {
584 	uint16_t i;
585 	int ret;
586 
587 	PMD_INIT_FUNC_TRACE();
588 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
589 		return 0;
590 
591 	ret = enetc_dev_stop(dev);
592 
593 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
594 		enetc_rx_queue_release(dev, i);
595 		dev->data->rx_queues[i] = NULL;
596 	}
597 	dev->data->nb_rx_queues = 0;
598 
599 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
600 		enetc_tx_queue_release(dev, i);
601 		dev->data->tx_queues[i] = NULL;
602 	}
603 	dev->data->nb_tx_queues = 0;
604 
605 	if (rte_eal_iova_mode() == RTE_IOVA_PA)
606 		dpaax_iova_table_depopulate();
607 
608 	return ret;
609 }
610 
611 static int
612 enetc_promiscuous_enable(struct rte_eth_dev *dev)
613 {
614 	struct enetc_eth_hw *hw =
615 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
616 	struct enetc_hw *enetc_hw = &hw->hw;
617 	uint32_t psipmr = 0;
618 
619 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
620 
621 	/* Setting to enable promiscuous mode*/
622 	psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
623 
624 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
625 
626 	return 0;
627 }
628 
629 static int
630 enetc_promiscuous_disable(struct rte_eth_dev *dev)
631 {
632 	struct enetc_eth_hw *hw =
633 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
634 	struct enetc_hw *enetc_hw = &hw->hw;
635 	uint32_t psipmr = 0;
636 
637 	/* Setting to disable promiscuous mode for SI0*/
638 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
639 	psipmr &= (~ENETC_PSIPMR_SET_UP(0));
640 
641 	if (dev->data->all_multicast == 0)
642 		psipmr &= (~ENETC_PSIPMR_SET_MP(0));
643 
644 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
645 
646 	return 0;
647 }
648 
649 static int
650 enetc_allmulticast_enable(struct rte_eth_dev *dev)
651 {
652 	struct enetc_eth_hw *hw =
653 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
654 	struct enetc_hw *enetc_hw = &hw->hw;
655 	uint32_t psipmr = 0;
656 
657 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
658 
659 	/* Setting to enable allmulticast mode for SI0*/
660 	psipmr |= ENETC_PSIPMR_SET_MP(0);
661 
662 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
663 
664 	return 0;
665 }
666 
667 static int
668 enetc_allmulticast_disable(struct rte_eth_dev *dev)
669 {
670 	struct enetc_eth_hw *hw =
671 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
672 	struct enetc_hw *enetc_hw = &hw->hw;
673 	uint32_t psipmr = 0;
674 
675 	if (dev->data->promiscuous == 1)
676 		return 0; /* must remain in all_multicast mode */
677 
678 	/* Setting to disable all multicast mode for SI0*/
679 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
680 			       ~(ENETC_PSIPMR_SET_MP(0));
681 
682 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
683 
684 	return 0;
685 }
686 
687 static int
688 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
689 {
690 	struct enetc_eth_hw *hw =
691 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
692 	struct enetc_hw *enetc_hw = &hw->hw;
693 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
694 
695 	/*
696 	 * Refuse mtu that requires the support of scattered packets
697 	 * when this feature has not been enabled before.
698 	 */
699 	if (dev->data->min_rx_buf_size &&
700 		!dev->data->scattered_rx && frame_size >
701 		dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
702 		ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
703 		return -EINVAL;
704 	}
705 
706 	enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
707 	enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
708 
709 	/*setting the MTU*/
710 	enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
711 		      ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
712 
713 	return 0;
714 }
715 
716 static int
717 enetc_dev_configure(struct rte_eth_dev *dev)
718 {
719 	struct enetc_eth_hw *hw =
720 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
721 	struct enetc_hw *enetc_hw = &hw->hw;
722 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
723 	uint64_t rx_offloads = eth_conf->rxmode.offloads;
724 	uint32_t checksum = L3_CKSUM | L4_CKSUM;
725 	uint32_t max_len;
726 
727 	PMD_INIT_FUNC_TRACE();
728 
729 	max_len = dev->data->dev_conf.rxmode.mtu + RTE_ETHER_HDR_LEN +
730 		RTE_ETHER_CRC_LEN;
731 	enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(max_len));
732 	enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
733 	enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
734 
735 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
736 		int config;
737 
738 		config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
739 		config |= ENETC_PM0_CRC;
740 		enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
741 	}
742 
743 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
744 		checksum &= ~L3_CKSUM;
745 
746 	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
747 		checksum &= ~L4_CKSUM;
748 
749 	enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
750 
751 
752 	return 0;
753 }
754 
755 static int
756 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
757 {
758 	struct enetc_eth_adapter *priv =
759 			ENETC_DEV_PRIVATE(dev->data->dev_private);
760 	struct enetc_bdr *rx_ring;
761 	uint32_t rx_data;
762 
763 	rx_ring = dev->data->rx_queues[qidx];
764 	if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
765 		rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
766 					 ENETC_RBMR);
767 		rx_data = rx_data | ENETC_RBMR_EN;
768 		enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
769 			       rx_data);
770 		dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
771 	}
772 
773 	return 0;
774 }
775 
776 static int
777 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
778 {
779 	struct enetc_eth_adapter *priv =
780 			ENETC_DEV_PRIVATE(dev->data->dev_private);
781 	struct enetc_bdr *rx_ring;
782 	uint32_t rx_data;
783 
784 	rx_ring = dev->data->rx_queues[qidx];
785 	if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
786 		rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
787 					 ENETC_RBMR);
788 		rx_data = rx_data & (~ENETC_RBMR_EN);
789 		enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
790 			       rx_data);
791 		dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
792 	}
793 
794 	return 0;
795 }
796 
797 static int
798 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
799 {
800 	struct enetc_eth_adapter *priv =
801 			ENETC_DEV_PRIVATE(dev->data->dev_private);
802 	struct enetc_bdr *tx_ring;
803 	uint32_t tx_data;
804 
805 	tx_ring = dev->data->tx_queues[qidx];
806 	if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
807 		tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
808 					 ENETC_TBMR);
809 		tx_data = tx_data | ENETC_TBMR_EN;
810 		enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
811 			       tx_data);
812 		dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
813 	}
814 
815 	return 0;
816 }
817 
818 static int
819 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
820 {
821 	struct enetc_eth_adapter *priv =
822 			ENETC_DEV_PRIVATE(dev->data->dev_private);
823 	struct enetc_bdr *tx_ring;
824 	uint32_t tx_data;
825 
826 	tx_ring = dev->data->tx_queues[qidx];
827 	if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
828 		tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
829 					 ENETC_TBMR);
830 		tx_data = tx_data & (~ENETC_TBMR_EN);
831 		enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
832 			       tx_data);
833 		dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
834 	}
835 
836 	return 0;
837 }
838 
839 /*
840  * The set of PCI devices this driver supports
841  */
842 static const struct rte_pci_id pci_id_enetc_map[] = {
843 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
844 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
845 	{ .vendor_id = 0, /* sentinel */ },
846 };
847 
848 /* Features supported by this driver */
849 static const struct eth_dev_ops enetc_ops = {
850 	.dev_configure        = enetc_dev_configure,
851 	.dev_start            = enetc_dev_start,
852 	.dev_stop             = enetc_dev_stop,
853 	.dev_close            = enetc_dev_close,
854 	.link_update          = enetc_link_update,
855 	.stats_get            = enetc_stats_get,
856 	.stats_reset          = enetc_stats_reset,
857 	.promiscuous_enable   = enetc_promiscuous_enable,
858 	.promiscuous_disable  = enetc_promiscuous_disable,
859 	.allmulticast_enable  = enetc_allmulticast_enable,
860 	.allmulticast_disable = enetc_allmulticast_disable,
861 	.dev_infos_get        = enetc_dev_infos_get,
862 	.mtu_set              = enetc_mtu_set,
863 	.rx_queue_setup       = enetc_rx_queue_setup,
864 	.rx_queue_start       = enetc_rx_queue_start,
865 	.rx_queue_stop        = enetc_rx_queue_stop,
866 	.rx_queue_release     = enetc_rx_queue_release,
867 	.tx_queue_setup       = enetc_tx_queue_setup,
868 	.tx_queue_start       = enetc_tx_queue_start,
869 	.tx_queue_stop        = enetc_tx_queue_stop,
870 	.tx_queue_release     = enetc_tx_queue_release,
871 	.dev_supported_ptypes_get = enetc_supported_ptypes_get,
872 };
873 
874 /**
875  * Initialisation of the enetc device
876  *
877  * @param eth_dev
878  *   - Pointer to the structure rte_eth_dev
879  *
880  * @return
881  *   - On success, zero.
882  *   - On failure, negative value.
883  */
884 static int
885 enetc_dev_init(struct rte_eth_dev *eth_dev)
886 {
887 	int error = 0;
888 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
889 	struct enetc_eth_hw *hw =
890 		ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
891 
892 	PMD_INIT_FUNC_TRACE();
893 	eth_dev->dev_ops = &enetc_ops;
894 	eth_dev->rx_pkt_burst = &enetc_recv_pkts;
895 	eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
896 
897 	/* Retrieving and storing the HW base address of device */
898 	hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
899 	hw->device_id = pci_dev->id.device_id;
900 
901 	error = enetc_hardware_init(hw);
902 	if (error != 0) {
903 		ENETC_PMD_ERR("Hardware initialization failed");
904 		return -1;
905 	}
906 
907 	/* Allocate memory for storing MAC addresses */
908 	eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
909 					RTE_ETHER_ADDR_LEN, 0);
910 	if (!eth_dev->data->mac_addrs) {
911 		ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
912 			      "store MAC addresses",
913 			      RTE_ETHER_ADDR_LEN * 1);
914 		error = -ENOMEM;
915 		return -1;
916 	}
917 
918 	/* Copy the permanent MAC address */
919 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
920 			&eth_dev->data->mac_addrs[0]);
921 
922 	/* Set MTU */
923 	enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
924 		      ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
925 	eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
926 		RTE_ETHER_CRC_LEN;
927 
928 	if (rte_eal_iova_mode() == RTE_IOVA_PA)
929 		dpaax_iova_table_populate();
930 
931 	ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
932 			eth_dev->data->port_id, pci_dev->id.vendor_id,
933 			pci_dev->id.device_id);
934 	return 0;
935 }
936 
937 static int
938 enetc_dev_uninit(struct rte_eth_dev *eth_dev)
939 {
940 	PMD_INIT_FUNC_TRACE();
941 
942 	return enetc_dev_close(eth_dev);
943 }
944 
945 static int
946 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
947 			   struct rte_pci_device *pci_dev)
948 {
949 	return rte_eth_dev_pci_generic_probe(pci_dev,
950 					     sizeof(struct enetc_eth_adapter),
951 					     enetc_dev_init);
952 }
953 
954 static int
955 enetc_pci_remove(struct rte_pci_device *pci_dev)
956 {
957 	return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
958 }
959 
960 static struct rte_pci_driver rte_enetc_pmd = {
961 	.id_table = pci_id_enetc_map,
962 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
963 	.probe = enetc_pci_probe,
964 	.remove = enetc_pci_remove,
965 };
966 
967 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
968 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
969 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
970 RTE_LOG_REGISTER_DEFAULT(enetc_logtype_pmd, NOTICE);
971