xref: /dpdk/drivers/net/enetc/enetc_ethdev.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4 
5 #include <stdbool.h>
6 #include <ethdev_pci.h>
7 #include <rte_random.h>
8 #include <dpaax_iova_table.h>
9 
10 #include "enetc_logs.h"
11 #include "enetc.h"
12 
13 static int
14 enetc_dev_start(struct rte_eth_dev *dev)
15 {
16 	struct enetc_eth_hw *hw =
17 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
18 	struct enetc_hw *enetc_hw = &hw->hw;
19 	uint32_t val;
20 	uint16_t i;
21 
22 	PMD_INIT_FUNC_TRACE();
23 	if (hw->device_id == ENETC_DEV_ID_VF)
24 		return 0;
25 
26 	val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
27 	enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
28 		      val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
29 
30 	/* Enable port */
31 	val = enetc_port_rd(enetc_hw, ENETC_PMR);
32 	enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
33 
34 	/* set auto-speed for RGMII */
35 	if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
36 		enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
37 			      ENETC_PM0_IFM_RGAUTO);
38 		enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
39 			      ENETC_PM0_IFM_RGAUTO);
40 	}
41 	if (enetc_global_rd(enetc_hw,
42 			    ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
43 		enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
44 			      ENETC_PM0_IFM_XGMII);
45 		enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
46 			      ENETC_PM0_IFM_XGMII);
47 	}
48 
49 	for (i = 0; i < dev->data->nb_rx_queues; i++)
50 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
51 	for (i = 0; i < dev->data->nb_tx_queues; i++)
52 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
53 
54 	return 0;
55 }
56 
57 static int
58 enetc_dev_stop(struct rte_eth_dev *dev)
59 {
60 	struct enetc_eth_hw *hw =
61 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
62 	struct enetc_hw *enetc_hw = &hw->hw;
63 	uint32_t val;
64 	uint16_t i;
65 
66 	PMD_INIT_FUNC_TRACE();
67 	dev->data->dev_started = 0;
68 	if (hw->device_id == ENETC_DEV_ID_VF)
69 		return 0;
70 
71 	/* Disable port */
72 	val = enetc_port_rd(enetc_hw, ENETC_PMR);
73 	enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
74 
75 	val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
76 	enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
77 		      val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
78 
79 	for (i = 0; i < dev->data->nb_rx_queues; i++)
80 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
81 	for (i = 0; i < dev->data->nb_tx_queues; i++)
82 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
83 
84 	return 0;
85 }
86 
87 static const uint32_t *
88 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused,
89 			   size_t *no_of_elements)
90 {
91 	static const uint32_t ptypes[] = {
92 		RTE_PTYPE_L2_ETHER,
93 		RTE_PTYPE_L3_IPV4,
94 		RTE_PTYPE_L3_IPV6,
95 		RTE_PTYPE_L4_TCP,
96 		RTE_PTYPE_L4_UDP,
97 		RTE_PTYPE_L4_SCTP,
98 		RTE_PTYPE_L4_ICMP,
99 	};
100 
101 	*no_of_elements = RTE_DIM(ptypes);
102 	return ptypes;
103 }
104 
105 /* return 0 means link status changed, -1 means not changed */
106 static int
107 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
108 {
109 	struct enetc_eth_hw *hw =
110 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
111 	struct enetc_hw *enetc_hw = &hw->hw;
112 	struct rte_eth_link link;
113 	uint32_t status;
114 
115 	PMD_INIT_FUNC_TRACE();
116 
117 	memset(&link, 0, sizeof(link));
118 
119 	status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
120 
121 	if (status & ENETC_LINK_MODE)
122 		link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
123 	else
124 		link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
125 
126 	if (status & ENETC_LINK_STATUS)
127 		link.link_status = RTE_ETH_LINK_UP;
128 	else
129 		link.link_status = RTE_ETH_LINK_DOWN;
130 
131 	switch (status & ENETC_LINK_SPEED_MASK) {
132 	case ENETC_LINK_SPEED_1G:
133 		link.link_speed = RTE_ETH_SPEED_NUM_1G;
134 		break;
135 
136 	case ENETC_LINK_SPEED_100M:
137 		link.link_speed = RTE_ETH_SPEED_NUM_100M;
138 		break;
139 
140 	default:
141 	case ENETC_LINK_SPEED_10M:
142 		link.link_speed = RTE_ETH_SPEED_NUM_10M;
143 	}
144 
145 	return rte_eth_linkstatus_set(dev, &link);
146 }
147 
148 static void
149 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
150 {
151 	char buf[RTE_ETHER_ADDR_FMT_SIZE];
152 
153 	rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
154 	ENETC_PMD_NOTICE("%s%s", name, buf);
155 }
156 
157 static int
158 enetc_hardware_init(struct enetc_eth_hw *hw)
159 {
160 	struct enetc_hw *enetc_hw = &hw->hw;
161 	uint32_t *mac = (uint32_t *)hw->mac.addr;
162 	uint32_t high_mac = 0;
163 	uint16_t low_mac = 0;
164 
165 	PMD_INIT_FUNC_TRACE();
166 	/* Calculating and storing the base HW addresses */
167 	hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
168 	hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
169 
170 	/* WA for Rx lock-up HW erratum */
171 	enetc_port_wr(enetc_hw, ENETC_PM0_RX_FIFO, 1);
172 
173 	/* set ENETC transaction flags to coherent, don't allocate.
174 	 * BD writes merge with surrounding cache line data, frame data writes
175 	 * overwrite cache line.
176 	 */
177 	enetc_wr(enetc_hw, ENETC_SICAR0, ENETC_SICAR0_COHERENT);
178 
179 	/* Enabling Station Interface */
180 	enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
181 
182 
183 	if (hw->device_id == ENETC_DEV_ID_VF) {
184 		*mac = (uint32_t)enetc_rd(enetc_hw, ENETC_SIPMAR0);
185 		high_mac = (uint32_t)*mac;
186 		mac++;
187 		*mac = (uint32_t)enetc_rd(enetc_hw, ENETC_SIPMAR1);
188 		low_mac = (uint16_t)*mac;
189 	} else {
190 		*mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
191 		high_mac = (uint32_t)*mac;
192 		mac++;
193 		*mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
194 		low_mac = (uint16_t)*mac;
195 	}
196 
197 	if ((high_mac | low_mac) == 0) {
198 		char *first_byte;
199 
200 		ENETC_PMD_NOTICE("MAC is not available for this SI, "
201 				"set random MAC");
202 		mac = (uint32_t *)hw->mac.addr;
203 		*mac = (uint32_t)rte_rand();
204 		first_byte = (char *)mac;
205 		*first_byte &= 0xfe;	/* clear multicast bit */
206 		*first_byte |= 0x02;	/* set local assignment bit (IEEE802) */
207 
208 		enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac);
209 		mac++;
210 		*mac = (uint16_t)rte_rand();
211 		enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac);
212 		print_ethaddr("New address: ",
213 			      (const struct rte_ether_addr *)hw->mac.addr);
214 	}
215 
216 	return 0;
217 }
218 
219 static int
220 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
221 		    struct rte_eth_dev_info *dev_info)
222 {
223 	PMD_INIT_FUNC_TRACE();
224 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
225 		.nb_max = MAX_BD_COUNT,
226 		.nb_min = MIN_BD_COUNT,
227 		.nb_align = BD_ALIGN,
228 	};
229 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
230 		.nb_max = MAX_BD_COUNT,
231 		.nb_min = MIN_BD_COUNT,
232 		.nb_align = BD_ALIGN,
233 	};
234 	dev_info->max_rx_queues = MAX_RX_RINGS;
235 	dev_info->max_tx_queues = MAX_TX_RINGS;
236 	dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
237 	dev_info->rx_offload_capa =
238 		(RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
239 		 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
240 		 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
241 		 RTE_ETH_RX_OFFLOAD_KEEP_CRC);
242 
243 	return 0;
244 }
245 
246 static int
247 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
248 {
249 	int size;
250 
251 	size = nb_desc * sizeof(struct enetc_swbd);
252 	txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
253 	if (txr->q_swbd == NULL)
254 		return -ENOMEM;
255 
256 	size = nb_desc * sizeof(struct enetc_tx_bd);
257 	txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
258 	if (txr->bd_base == NULL) {
259 		rte_free(txr->q_swbd);
260 		txr->q_swbd = NULL;
261 		return -ENOMEM;
262 	}
263 
264 	txr->bd_count = nb_desc;
265 	txr->next_to_clean = 0;
266 	txr->next_to_use = 0;
267 
268 	return 0;
269 }
270 
271 static void
272 enetc_free_bdr(struct enetc_bdr *rxr)
273 {
274 	rte_free(rxr->q_swbd);
275 	rte_free(rxr->bd_base);
276 	rxr->q_swbd = NULL;
277 	rxr->bd_base = NULL;
278 }
279 
280 static void
281 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
282 {
283 	int idx = tx_ring->index;
284 	phys_addr_t bd_address;
285 
286 	bd_address = (phys_addr_t)
287 		     rte_mem_virt2iova((const void *)tx_ring->bd_base);
288 	enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
289 		       lower_32_bits((uint64_t)bd_address));
290 	enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
291 		       upper_32_bits((uint64_t)bd_address));
292 	enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
293 		       ENETC_RTBLENR_LEN(tx_ring->bd_count));
294 
295 	enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
296 	enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
297 	tx_ring->tcir = (void *)((size_t)hw->reg +
298 			ENETC_BDR(TX, idx, ENETC_TBCIR));
299 	tx_ring->tcisr = (void *)((size_t)hw->reg +
300 			 ENETC_BDR(TX, idx, ENETC_TBCISR));
301 }
302 
303 static int
304 enetc_tx_queue_setup(struct rte_eth_dev *dev,
305 		     uint16_t queue_idx,
306 		     uint16_t nb_desc,
307 		     unsigned int socket_id __rte_unused,
308 		     const struct rte_eth_txconf *tx_conf)
309 {
310 	int err = 0;
311 	struct enetc_bdr *tx_ring;
312 	struct rte_eth_dev_data *data = dev->data;
313 	struct enetc_eth_adapter *priv =
314 			ENETC_DEV_PRIVATE(data->dev_private);
315 
316 	PMD_INIT_FUNC_TRACE();
317 	if (nb_desc > MAX_BD_COUNT)
318 		return -1;
319 
320 	tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
321 	if (tx_ring == NULL) {
322 		ENETC_PMD_ERR("Failed to allocate TX ring memory");
323 		err = -ENOMEM;
324 		return -1;
325 	}
326 
327 	err = enetc_alloc_txbdr(tx_ring, nb_desc);
328 	if (err)
329 		goto fail;
330 
331 	tx_ring->index = queue_idx;
332 	tx_ring->ndev = dev;
333 	enetc_setup_txbdr(&priv->hw.hw, tx_ring);
334 	data->tx_queues[queue_idx] = tx_ring;
335 
336 	if (!tx_conf->tx_deferred_start) {
337 		/* enable ring */
338 		enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
339 			       ENETC_TBMR, ENETC_TBMR_EN);
340 		dev->data->tx_queue_state[tx_ring->index] =
341 			       RTE_ETH_QUEUE_STATE_STARTED;
342 	} else {
343 		dev->data->tx_queue_state[tx_ring->index] =
344 			       RTE_ETH_QUEUE_STATE_STOPPED;
345 	}
346 
347 	return 0;
348 fail:
349 	rte_free(tx_ring);
350 
351 	return err;
352 }
353 
354 static void
355 enetc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
356 {
357 	void *txq = dev->data->tx_queues[qid];
358 
359 	if (txq == NULL)
360 		return;
361 
362 	struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
363 	struct enetc_eth_hw *eth_hw =
364 		ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
365 	struct enetc_hw *hw;
366 	struct enetc_swbd *tx_swbd;
367 	int i;
368 	uint32_t val;
369 
370 	/* Disable the ring */
371 	hw = &eth_hw->hw;
372 	val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
373 	val &= (~ENETC_TBMR_EN);
374 	enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
375 
376 	/* clean the ring*/
377 	i = tx_ring->next_to_clean;
378 	tx_swbd = &tx_ring->q_swbd[i];
379 	while (tx_swbd->buffer_addr != NULL) {
380 		rte_pktmbuf_free(tx_swbd->buffer_addr);
381 		tx_swbd->buffer_addr = NULL;
382 		tx_swbd++;
383 		i++;
384 		if (unlikely(i == tx_ring->bd_count)) {
385 			i = 0;
386 			tx_swbd = &tx_ring->q_swbd[i];
387 		}
388 	}
389 
390 	enetc_free_bdr(tx_ring);
391 	rte_free(tx_ring);
392 }
393 
394 static int
395 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
396 		  uint16_t nb_rx_desc)
397 {
398 	int size;
399 
400 	size = nb_rx_desc * sizeof(struct enetc_swbd);
401 	rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
402 	if (rxr->q_swbd == NULL)
403 		return -ENOMEM;
404 
405 	size = nb_rx_desc * sizeof(union enetc_rx_bd);
406 	rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
407 	if (rxr->bd_base == NULL) {
408 		rte_free(rxr->q_swbd);
409 		rxr->q_swbd = NULL;
410 		return -ENOMEM;
411 	}
412 
413 	rxr->bd_count = nb_rx_desc;
414 	rxr->next_to_clean = 0;
415 	rxr->next_to_use = 0;
416 	rxr->next_to_alloc = 0;
417 
418 	return 0;
419 }
420 
421 static void
422 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
423 		  struct rte_mempool *mb_pool)
424 {
425 	int idx = rx_ring->index;
426 	uint16_t buf_size;
427 	phys_addr_t bd_address;
428 
429 	bd_address = (phys_addr_t)
430 		     rte_mem_virt2iova((const void *)rx_ring->bd_base);
431 	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
432 		       lower_32_bits((uint64_t)bd_address));
433 	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
434 		       upper_32_bits((uint64_t)bd_address));
435 	enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
436 		       ENETC_RTBLENR_LEN(rx_ring->bd_count));
437 
438 	rx_ring->mb_pool = mb_pool;
439 	rx_ring->rcir = (void *)((size_t)hw->reg +
440 			ENETC_BDR(RX, idx, ENETC_RBCIR));
441 	enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
442 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
443 		   RTE_PKTMBUF_HEADROOM);
444 	enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
445 	enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
446 }
447 
448 static int
449 enetc_rx_queue_setup(struct rte_eth_dev *dev,
450 		     uint16_t rx_queue_id,
451 		     uint16_t nb_rx_desc,
452 		     unsigned int socket_id __rte_unused,
453 		     const struct rte_eth_rxconf *rx_conf,
454 		     struct rte_mempool *mb_pool)
455 {
456 	int err = 0;
457 	struct enetc_bdr *rx_ring;
458 	struct rte_eth_dev_data *data =  dev->data;
459 	struct enetc_eth_adapter *adapter =
460 			ENETC_DEV_PRIVATE(data->dev_private);
461 	uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
462 
463 	PMD_INIT_FUNC_TRACE();
464 	if (nb_rx_desc > MAX_BD_COUNT)
465 		return -1;
466 
467 	rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
468 	if (rx_ring == NULL) {
469 		ENETC_PMD_ERR("Failed to allocate RX ring memory");
470 		err = -ENOMEM;
471 		return err;
472 	}
473 
474 	err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
475 	if (err)
476 		goto fail;
477 
478 	rx_ring->index = rx_queue_id;
479 	rx_ring->ndev = dev;
480 	enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
481 	data->rx_queues[rx_queue_id] = rx_ring;
482 
483 	if (!rx_conf->rx_deferred_start) {
484 		/* enable ring */
485 		enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
486 			       ENETC_RBMR_EN);
487 		dev->data->rx_queue_state[rx_ring->index] =
488 			       RTE_ETH_QUEUE_STATE_STARTED;
489 	} else {
490 		dev->data->rx_queue_state[rx_ring->index] =
491 			       RTE_ETH_QUEUE_STATE_STOPPED;
492 	}
493 
494 	rx_ring->crc_len = (uint8_t)((rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
495 				     RTE_ETHER_CRC_LEN : 0);
496 
497 	return 0;
498 fail:
499 	rte_free(rx_ring);
500 
501 	return err;
502 }
503 
504 static void
505 enetc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
506 {
507 	void *rxq = dev->data->rx_queues[qid];
508 
509 	if (rxq == NULL)
510 		return;
511 
512 	struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
513 	struct enetc_eth_hw *eth_hw =
514 		ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
515 	struct enetc_swbd *q_swbd;
516 	struct enetc_hw *hw;
517 	uint32_t val;
518 	int i;
519 
520 	/* Disable the ring */
521 	hw = &eth_hw->hw;
522 	val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
523 	val &= (~ENETC_RBMR_EN);
524 	enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
525 
526 	/* Clean the ring */
527 	i = rx_ring->next_to_clean;
528 	q_swbd = &rx_ring->q_swbd[i];
529 	while (i != rx_ring->next_to_use) {
530 		rte_pktmbuf_free(q_swbd->buffer_addr);
531 		q_swbd->buffer_addr = NULL;
532 		q_swbd++;
533 		i++;
534 		if (unlikely(i == rx_ring->bd_count)) {
535 			i = 0;
536 			q_swbd = &rx_ring->q_swbd[i];
537 		}
538 	}
539 
540 	enetc_free_bdr(rx_ring);
541 	rte_free(rx_ring);
542 }
543 
544 static
545 int enetc_stats_get(struct rte_eth_dev *dev,
546 		    struct rte_eth_stats *stats)
547 {
548 	struct enetc_eth_hw *hw =
549 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
550 	struct enetc_hw *enetc_hw = &hw->hw;
551 
552 	/* Total received packets, bad + good, if we want to get counters of
553 	 * only good received packets then use ENETC_PM0_RFRM,
554 	 * ENETC_PM0_TFRM registers.
555 	 */
556 	stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
557 	stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
558 	stats->ibytes =  enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
559 	stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
560 	/* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
561 	 * truncated packets
562 	 */
563 	stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
564 	stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
565 	stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
566 
567 	return 0;
568 }
569 
570 static int
571 enetc_stats_reset(struct rte_eth_dev *dev)
572 {
573 	struct enetc_eth_hw *hw =
574 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
575 	struct enetc_hw *enetc_hw = &hw->hw;
576 
577 	enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
578 
579 	return 0;
580 }
581 
582 static int
583 enetc_dev_close(struct rte_eth_dev *dev)
584 {
585 	uint16_t i;
586 	int ret;
587 
588 	PMD_INIT_FUNC_TRACE();
589 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
590 		return 0;
591 
592 	ret = enetc_dev_stop(dev);
593 
594 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
595 		enetc_rx_queue_release(dev, i);
596 		dev->data->rx_queues[i] = NULL;
597 	}
598 	dev->data->nb_rx_queues = 0;
599 
600 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
601 		enetc_tx_queue_release(dev, i);
602 		dev->data->tx_queues[i] = NULL;
603 	}
604 	dev->data->nb_tx_queues = 0;
605 
606 	if (rte_eal_iova_mode() == RTE_IOVA_PA)
607 		dpaax_iova_table_depopulate();
608 
609 	return ret;
610 }
611 
612 static int
613 enetc_promiscuous_enable(struct rte_eth_dev *dev)
614 {
615 	struct enetc_eth_hw *hw =
616 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
617 	struct enetc_hw *enetc_hw = &hw->hw;
618 	uint32_t psipmr = 0;
619 
620 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
621 
622 	/* Setting to enable promiscuous mode*/
623 	psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
624 
625 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
626 
627 	return 0;
628 }
629 
630 static int
631 enetc_promiscuous_disable(struct rte_eth_dev *dev)
632 {
633 	struct enetc_eth_hw *hw =
634 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
635 	struct enetc_hw *enetc_hw = &hw->hw;
636 	uint32_t psipmr = 0;
637 
638 	/* Setting to disable promiscuous mode for SI0*/
639 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
640 	psipmr &= (~ENETC_PSIPMR_SET_UP(0));
641 
642 	if (dev->data->all_multicast == 0)
643 		psipmr &= (~ENETC_PSIPMR_SET_MP(0));
644 
645 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
646 
647 	return 0;
648 }
649 
650 static int
651 enetc_allmulticast_enable(struct rte_eth_dev *dev)
652 {
653 	struct enetc_eth_hw *hw =
654 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
655 	struct enetc_hw *enetc_hw = &hw->hw;
656 	uint32_t psipmr = 0;
657 
658 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
659 
660 	/* Setting to enable allmulticast mode for SI0*/
661 	psipmr |= ENETC_PSIPMR_SET_MP(0);
662 
663 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
664 
665 	return 0;
666 }
667 
668 static int
669 enetc_allmulticast_disable(struct rte_eth_dev *dev)
670 {
671 	struct enetc_eth_hw *hw =
672 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
673 	struct enetc_hw *enetc_hw = &hw->hw;
674 	uint32_t psipmr = 0;
675 
676 	if (dev->data->promiscuous == 1)
677 		return 0; /* must remain in all_multicast mode */
678 
679 	/* Setting to disable all multicast mode for SI0*/
680 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
681 			       ~(ENETC_PSIPMR_SET_MP(0));
682 
683 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
684 
685 	return 0;
686 }
687 
688 static int
689 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
690 {
691 	struct enetc_eth_hw *hw =
692 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
693 	struct enetc_hw *enetc_hw = &hw->hw;
694 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
695 
696 	/*
697 	 * Refuse mtu that requires the support of scattered packets
698 	 * when this feature has not been enabled before.
699 	 */
700 	if (dev->data->min_rx_buf_size &&
701 		!dev->data->scattered_rx && frame_size >
702 		dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
703 		ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
704 		return -EINVAL;
705 	}
706 
707 	enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
708 	enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
709 
710 	/*setting the MTU*/
711 	enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
712 		      ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
713 
714 	return 0;
715 }
716 
717 static int
718 enetc_dev_configure(struct rte_eth_dev *dev)
719 {
720 	struct enetc_eth_hw *hw =
721 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
722 	struct enetc_hw *enetc_hw = &hw->hw;
723 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
724 	uint64_t rx_offloads = eth_conf->rxmode.offloads;
725 	uint32_t checksum = L3_CKSUM | L4_CKSUM;
726 	uint32_t max_len;
727 
728 	PMD_INIT_FUNC_TRACE();
729 
730 	max_len = dev->data->dev_conf.rxmode.mtu + RTE_ETHER_HDR_LEN +
731 		RTE_ETHER_CRC_LEN;
732 	enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(max_len));
733 	enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
734 	enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
735 
736 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
737 		int config;
738 
739 		config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
740 		config |= ENETC_PM0_CRC;
741 		enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
742 	}
743 
744 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
745 		checksum &= ~L3_CKSUM;
746 
747 	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
748 		checksum &= ~L4_CKSUM;
749 
750 	enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
751 
752 
753 	return 0;
754 }
755 
756 static int
757 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
758 {
759 	struct enetc_eth_adapter *priv =
760 			ENETC_DEV_PRIVATE(dev->data->dev_private);
761 	struct enetc_bdr *rx_ring;
762 	uint32_t rx_data;
763 
764 	rx_ring = dev->data->rx_queues[qidx];
765 	if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
766 		rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
767 					 ENETC_RBMR);
768 		rx_data = rx_data | ENETC_RBMR_EN;
769 		enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
770 			       rx_data);
771 		dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
772 	}
773 
774 	return 0;
775 }
776 
777 static int
778 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
779 {
780 	struct enetc_eth_adapter *priv =
781 			ENETC_DEV_PRIVATE(dev->data->dev_private);
782 	struct enetc_bdr *rx_ring;
783 	uint32_t rx_data;
784 
785 	rx_ring = dev->data->rx_queues[qidx];
786 	if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
787 		rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
788 					 ENETC_RBMR);
789 		rx_data = rx_data & (~ENETC_RBMR_EN);
790 		enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
791 			       rx_data);
792 		dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
793 	}
794 
795 	return 0;
796 }
797 
798 static int
799 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
800 {
801 	struct enetc_eth_adapter *priv =
802 			ENETC_DEV_PRIVATE(dev->data->dev_private);
803 	struct enetc_bdr *tx_ring;
804 	uint32_t tx_data;
805 
806 	tx_ring = dev->data->tx_queues[qidx];
807 	if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
808 		tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
809 					 ENETC_TBMR);
810 		tx_data = tx_data | ENETC_TBMR_EN;
811 		enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
812 			       tx_data);
813 		dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
814 	}
815 
816 	return 0;
817 }
818 
819 static int
820 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
821 {
822 	struct enetc_eth_adapter *priv =
823 			ENETC_DEV_PRIVATE(dev->data->dev_private);
824 	struct enetc_bdr *tx_ring;
825 	uint32_t tx_data;
826 
827 	tx_ring = dev->data->tx_queues[qidx];
828 	if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
829 		tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
830 					 ENETC_TBMR);
831 		tx_data = tx_data & (~ENETC_TBMR_EN);
832 		enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
833 			       tx_data);
834 		dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
835 	}
836 
837 	return 0;
838 }
839 
840 /*
841  * The set of PCI devices this driver supports
842  */
843 static const struct rte_pci_id pci_id_enetc_map[] = {
844 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
845 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
846 	{ .vendor_id = 0, /* sentinel */ },
847 };
848 
849 /* Features supported by this driver */
850 static const struct eth_dev_ops enetc_ops = {
851 	.dev_configure        = enetc_dev_configure,
852 	.dev_start            = enetc_dev_start,
853 	.dev_stop             = enetc_dev_stop,
854 	.dev_close            = enetc_dev_close,
855 	.link_update          = enetc_link_update,
856 	.stats_get            = enetc_stats_get,
857 	.stats_reset          = enetc_stats_reset,
858 	.promiscuous_enable   = enetc_promiscuous_enable,
859 	.promiscuous_disable  = enetc_promiscuous_disable,
860 	.allmulticast_enable  = enetc_allmulticast_enable,
861 	.allmulticast_disable = enetc_allmulticast_disable,
862 	.dev_infos_get        = enetc_dev_infos_get,
863 	.mtu_set              = enetc_mtu_set,
864 	.rx_queue_setup       = enetc_rx_queue_setup,
865 	.rx_queue_start       = enetc_rx_queue_start,
866 	.rx_queue_stop        = enetc_rx_queue_stop,
867 	.rx_queue_release     = enetc_rx_queue_release,
868 	.tx_queue_setup       = enetc_tx_queue_setup,
869 	.tx_queue_start       = enetc_tx_queue_start,
870 	.tx_queue_stop        = enetc_tx_queue_stop,
871 	.tx_queue_release     = enetc_tx_queue_release,
872 	.dev_supported_ptypes_get = enetc_supported_ptypes_get,
873 };
874 
875 /**
876  * Initialisation of the enetc device
877  *
878  * @param eth_dev
879  *   - Pointer to the structure rte_eth_dev
880  *
881  * @return
882  *   - On success, zero.
883  *   - On failure, negative value.
884  */
885 static int
886 enetc_dev_init(struct rte_eth_dev *eth_dev)
887 {
888 	int error = 0;
889 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
890 	struct enetc_eth_hw *hw =
891 		ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
892 
893 	PMD_INIT_FUNC_TRACE();
894 	eth_dev->dev_ops = &enetc_ops;
895 	eth_dev->rx_pkt_burst = &enetc_recv_pkts;
896 	eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
897 
898 	/* Retrieving and storing the HW base address of device */
899 	hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
900 	hw->device_id = pci_dev->id.device_id;
901 
902 	error = enetc_hardware_init(hw);
903 	if (error != 0) {
904 		ENETC_PMD_ERR("Hardware initialization failed");
905 		return -1;
906 	}
907 
908 	/* Allocate memory for storing MAC addresses */
909 	eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
910 					RTE_ETHER_ADDR_LEN, 0);
911 	if (!eth_dev->data->mac_addrs) {
912 		ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
913 			      "store MAC addresses",
914 			      RTE_ETHER_ADDR_LEN * 1);
915 		error = -ENOMEM;
916 		return -1;
917 	}
918 
919 	/* Copy the permanent MAC address */
920 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
921 			&eth_dev->data->mac_addrs[0]);
922 
923 	/* Set MTU */
924 	enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
925 		      ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
926 	eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
927 		RTE_ETHER_CRC_LEN;
928 
929 	if (rte_eal_iova_mode() == RTE_IOVA_PA)
930 		dpaax_iova_table_populate();
931 
932 	ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
933 			eth_dev->data->port_id, pci_dev->id.vendor_id,
934 			pci_dev->id.device_id);
935 	return 0;
936 }
937 
938 static int
939 enetc_dev_uninit(struct rte_eth_dev *eth_dev)
940 {
941 	PMD_INIT_FUNC_TRACE();
942 
943 	return enetc_dev_close(eth_dev);
944 }
945 
946 static int
947 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
948 			   struct rte_pci_device *pci_dev)
949 {
950 	return rte_eth_dev_pci_generic_probe(pci_dev,
951 					     sizeof(struct enetc_eth_adapter),
952 					     enetc_dev_init);
953 }
954 
955 static int
956 enetc_pci_remove(struct rte_pci_device *pci_dev)
957 {
958 	return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
959 }
960 
961 static struct rte_pci_driver rte_enetc_pmd = {
962 	.id_table = pci_id_enetc_map,
963 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
964 	.probe = enetc_pci_probe,
965 	.remove = enetc_pci_remove,
966 };
967 
968 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
969 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
970 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
971 RTE_LOG_REGISTER_DEFAULT(enetc_logtype_pmd, NOTICE);
972