xref: /dpdk/drivers/net/enetc/enetc_ethdev.c (revision 0ecc27f28d202a3356a8601e6762b601ea822c4c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4 
5 #include <stdbool.h>
6 #include <rte_ethdev_pci.h>
7 
8 #include "enetc_logs.h"
9 #include "enetc.h"
10 
11 int enetc_logtype_pmd;
12 
13 static int
14 enetc_dev_start(struct rte_eth_dev *dev)
15 {
16 	struct enetc_eth_hw *hw =
17 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
18 	struct enetc_hw *enetc_hw = &hw->hw;
19 	uint32_t val;
20 
21 	PMD_INIT_FUNC_TRACE();
22 	val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
23 	enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
24 		      val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
25 
26 	/* Enable port */
27 	val = enetc_port_rd(enetc_hw, ENETC_PMR);
28 	enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
29 
30 	/* set auto-speed for RGMII */
31 	if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
32 		enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
33 			      ENETC_PM0_IFM_RGAUTO);
34 		enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
35 			      ENETC_PM0_IFM_RGAUTO);
36 	}
37 	if (enetc_global_rd(enetc_hw,
38 			    ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
39 		enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
40 			      ENETC_PM0_IFM_XGMII);
41 		enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
42 			      ENETC_PM0_IFM_XGMII);
43 	}
44 
45 	return 0;
46 }
47 
48 static void
49 enetc_dev_stop(struct rte_eth_dev *dev)
50 {
51 	struct enetc_eth_hw *hw =
52 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
53 	struct enetc_hw *enetc_hw = &hw->hw;
54 	uint32_t val;
55 
56 	PMD_INIT_FUNC_TRACE();
57 	/* Disable port */
58 	val = enetc_port_rd(enetc_hw, ENETC_PMR);
59 	enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
60 
61 	val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
62 	enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
63 		      val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
64 }
65 
66 static const uint32_t *
67 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
68 {
69 	static const uint32_t ptypes[] = {
70 		RTE_PTYPE_L2_ETHER,
71 		RTE_PTYPE_L3_IPV4,
72 		RTE_PTYPE_L3_IPV6,
73 		RTE_PTYPE_L4_TCP,
74 		RTE_PTYPE_L4_UDP,
75 		RTE_PTYPE_L4_SCTP,
76 		RTE_PTYPE_L4_ICMP,
77 		RTE_PTYPE_UNKNOWN
78 	};
79 
80 	return ptypes;
81 }
82 
83 /* return 0 means link status changed, -1 means not changed */
84 static int
85 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
86 {
87 	struct enetc_eth_hw *hw =
88 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
89 	struct enetc_hw *enetc_hw = &hw->hw;
90 	struct rte_eth_link link;
91 	uint32_t status;
92 
93 	PMD_INIT_FUNC_TRACE();
94 
95 	memset(&link, 0, sizeof(link));
96 
97 	status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
98 
99 	if (status & ENETC_LINK_MODE)
100 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
101 	else
102 		link.link_duplex = ETH_LINK_HALF_DUPLEX;
103 
104 	if (status & ENETC_LINK_STATUS)
105 		link.link_status = ETH_LINK_UP;
106 	else
107 		link.link_status = ETH_LINK_DOWN;
108 
109 	switch (status & ENETC_LINK_SPEED_MASK) {
110 	case ENETC_LINK_SPEED_1G:
111 		link.link_speed = ETH_SPEED_NUM_1G;
112 		break;
113 
114 	case ENETC_LINK_SPEED_100M:
115 		link.link_speed = ETH_SPEED_NUM_100M;
116 		break;
117 
118 	default:
119 	case ENETC_LINK_SPEED_10M:
120 		link.link_speed = ETH_SPEED_NUM_10M;
121 	}
122 
123 	return rte_eth_linkstatus_set(dev, &link);
124 }
125 
126 static int
127 enetc_hardware_init(struct enetc_eth_hw *hw)
128 {
129 	struct enetc_hw *enetc_hw = &hw->hw;
130 	uint32_t *mac = (uint32_t *)hw->mac.addr;
131 
132 	PMD_INIT_FUNC_TRACE();
133 	/* Calculating and storing the base HW addresses */
134 	hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
135 	hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
136 
137 	/* Enabling Station Interface */
138 	enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
139 
140 	*mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
141 	mac++;
142 	*mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
143 
144 	return 0;
145 }
146 
147 static int
148 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
149 		    struct rte_eth_dev_info *dev_info)
150 {
151 	PMD_INIT_FUNC_TRACE();
152 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
153 		.nb_max = MAX_BD_COUNT,
154 		.nb_min = MIN_BD_COUNT,
155 		.nb_align = BD_ALIGN,
156 	};
157 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
158 		.nb_max = MAX_BD_COUNT,
159 		.nb_min = MIN_BD_COUNT,
160 		.nb_align = BD_ALIGN,
161 	};
162 	dev_info->max_rx_queues = MAX_RX_RINGS;
163 	dev_info->max_tx_queues = MAX_TX_RINGS;
164 	dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
165 	dev_info->rx_offload_capa =
166 		(DEV_RX_OFFLOAD_IPV4_CKSUM |
167 		 DEV_RX_OFFLOAD_UDP_CKSUM |
168 		 DEV_RX_OFFLOAD_TCP_CKSUM |
169 		 DEV_RX_OFFLOAD_KEEP_CRC |
170 		 DEV_RX_OFFLOAD_JUMBO_FRAME);
171 
172 	return 0;
173 }
174 
175 static int
176 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
177 {
178 	int size;
179 
180 	size = nb_desc * sizeof(struct enetc_swbd);
181 	txr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
182 	if (txr->q_swbd == NULL)
183 		return -ENOMEM;
184 
185 	size = nb_desc * sizeof(struct enetc_tx_bd);
186 	txr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
187 	if (txr->bd_base == NULL) {
188 		rte_free(txr->q_swbd);
189 		txr->q_swbd = NULL;
190 		return -ENOMEM;
191 	}
192 
193 	txr->bd_count = nb_desc;
194 	txr->next_to_clean = 0;
195 	txr->next_to_use = 0;
196 
197 	return 0;
198 }
199 
200 static void
201 enetc_free_bdr(struct enetc_bdr *rxr)
202 {
203 	rte_free(rxr->q_swbd);
204 	rte_free(rxr->bd_base);
205 	rxr->q_swbd = NULL;
206 	rxr->bd_base = NULL;
207 }
208 
209 static void
210 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
211 {
212 	int idx = tx_ring->index;
213 	phys_addr_t bd_address;
214 
215 	bd_address = (phys_addr_t)
216 		     rte_mem_virt2iova((const void *)tx_ring->bd_base);
217 	enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
218 		       lower_32_bits((uint64_t)bd_address));
219 	enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
220 		       upper_32_bits((uint64_t)bd_address));
221 	enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
222 		       ENETC_RTBLENR_LEN(tx_ring->bd_count));
223 
224 	enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
225 	enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
226 	tx_ring->tcir = (void *)((size_t)hw->reg +
227 			ENETC_BDR(TX, idx, ENETC_TBCIR));
228 	tx_ring->tcisr = (void *)((size_t)hw->reg +
229 			 ENETC_BDR(TX, idx, ENETC_TBCISR));
230 }
231 
232 static int
233 enetc_tx_queue_setup(struct rte_eth_dev *dev,
234 		     uint16_t queue_idx,
235 		     uint16_t nb_desc,
236 		     unsigned int socket_id __rte_unused,
237 		     const struct rte_eth_txconf *tx_conf)
238 {
239 	int err = 0;
240 	struct enetc_bdr *tx_ring;
241 	struct rte_eth_dev_data *data = dev->data;
242 	struct enetc_eth_adapter *priv =
243 			ENETC_DEV_PRIVATE(data->dev_private);
244 
245 	PMD_INIT_FUNC_TRACE();
246 	if (nb_desc > MAX_BD_COUNT)
247 		return -1;
248 
249 	tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
250 	if (tx_ring == NULL) {
251 		ENETC_PMD_ERR("Failed to allocate TX ring memory");
252 		err = -ENOMEM;
253 		return -1;
254 	}
255 
256 	err = enetc_alloc_txbdr(tx_ring, nb_desc);
257 	if (err)
258 		goto fail;
259 
260 	tx_ring->index = queue_idx;
261 	tx_ring->ndev = dev;
262 	enetc_setup_txbdr(&priv->hw.hw, tx_ring);
263 	data->tx_queues[queue_idx] = tx_ring;
264 
265 	if (!tx_conf->tx_deferred_start) {
266 		/* enable ring */
267 		enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
268 			       ENETC_TBMR, ENETC_TBMR_EN);
269 		dev->data->tx_queue_state[tx_ring->index] =
270 			       RTE_ETH_QUEUE_STATE_STARTED;
271 	} else {
272 		dev->data->tx_queue_state[tx_ring->index] =
273 			       RTE_ETH_QUEUE_STATE_STOPPED;
274 	}
275 
276 	return 0;
277 fail:
278 	rte_free(tx_ring);
279 
280 	return err;
281 }
282 
283 static void
284 enetc_tx_queue_release(void *txq)
285 {
286 	if (txq == NULL)
287 		return;
288 
289 	struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
290 	struct enetc_eth_hw *eth_hw =
291 		ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
292 	struct enetc_hw *hw;
293 	struct enetc_swbd *tx_swbd;
294 	int i;
295 	uint32_t val;
296 
297 	/* Disable the ring */
298 	hw = &eth_hw->hw;
299 	val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
300 	val &= (~ENETC_TBMR_EN);
301 	enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
302 
303 	/* clean the ring*/
304 	i = tx_ring->next_to_clean;
305 	tx_swbd = &tx_ring->q_swbd[i];
306 	while (tx_swbd->buffer_addr != NULL) {
307 		rte_pktmbuf_free(tx_swbd->buffer_addr);
308 		tx_swbd->buffer_addr = NULL;
309 		tx_swbd++;
310 		i++;
311 		if (unlikely(i == tx_ring->bd_count)) {
312 			i = 0;
313 			tx_swbd = &tx_ring->q_swbd[i];
314 		}
315 	}
316 
317 	enetc_free_bdr(tx_ring);
318 	rte_free(tx_ring);
319 }
320 
321 static int
322 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
323 		  uint16_t nb_rx_desc)
324 {
325 	int size;
326 
327 	size = nb_rx_desc * sizeof(struct enetc_swbd);
328 	rxr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
329 	if (rxr->q_swbd == NULL)
330 		return -ENOMEM;
331 
332 	size = nb_rx_desc * sizeof(union enetc_rx_bd);
333 	rxr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
334 	if (rxr->bd_base == NULL) {
335 		rte_free(rxr->q_swbd);
336 		rxr->q_swbd = NULL;
337 		return -ENOMEM;
338 	}
339 
340 	rxr->bd_count = nb_rx_desc;
341 	rxr->next_to_clean = 0;
342 	rxr->next_to_use = 0;
343 	rxr->next_to_alloc = 0;
344 
345 	return 0;
346 }
347 
348 static void
349 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
350 		  struct rte_mempool *mb_pool)
351 {
352 	int idx = rx_ring->index;
353 	uint16_t buf_size;
354 	phys_addr_t bd_address;
355 
356 	bd_address = (phys_addr_t)
357 		     rte_mem_virt2iova((const void *)rx_ring->bd_base);
358 	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
359 		       lower_32_bits((uint64_t)bd_address));
360 	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
361 		       upper_32_bits((uint64_t)bd_address));
362 	enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
363 		       ENETC_RTBLENR_LEN(rx_ring->bd_count));
364 
365 	rx_ring->mb_pool = mb_pool;
366 	rx_ring->rcir = (void *)((size_t)hw->reg +
367 			ENETC_BDR(RX, idx, ENETC_RBCIR));
368 	enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
369 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
370 		   RTE_PKTMBUF_HEADROOM);
371 	enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
372 	enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
373 }
374 
375 static int
376 enetc_rx_queue_setup(struct rte_eth_dev *dev,
377 		     uint16_t rx_queue_id,
378 		     uint16_t nb_rx_desc,
379 		     unsigned int socket_id __rte_unused,
380 		     const struct rte_eth_rxconf *rx_conf,
381 		     struct rte_mempool *mb_pool)
382 {
383 	int err = 0;
384 	struct enetc_bdr *rx_ring;
385 	struct rte_eth_dev_data *data =  dev->data;
386 	struct enetc_eth_adapter *adapter =
387 			ENETC_DEV_PRIVATE(data->dev_private);
388 	uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
389 
390 	PMD_INIT_FUNC_TRACE();
391 	if (nb_rx_desc > MAX_BD_COUNT)
392 		return -1;
393 
394 	rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
395 	if (rx_ring == NULL) {
396 		ENETC_PMD_ERR("Failed to allocate RX ring memory");
397 		err = -ENOMEM;
398 		return err;
399 	}
400 
401 	err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
402 	if (err)
403 		goto fail;
404 
405 	rx_ring->index = rx_queue_id;
406 	rx_ring->ndev = dev;
407 	enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
408 	data->rx_queues[rx_queue_id] = rx_ring;
409 
410 	if (!rx_conf->rx_deferred_start) {
411 		/* enable ring */
412 		enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
413 			       ENETC_RBMR_EN);
414 		dev->data->rx_queue_state[rx_ring->index] =
415 			       RTE_ETH_QUEUE_STATE_STARTED;
416 	} else {
417 		dev->data->rx_queue_state[rx_ring->index] =
418 			       RTE_ETH_QUEUE_STATE_STOPPED;
419 	}
420 
421 	rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
422 				     RTE_ETHER_CRC_LEN : 0);
423 
424 	return 0;
425 fail:
426 	rte_free(rx_ring);
427 
428 	return err;
429 }
430 
431 static void
432 enetc_rx_queue_release(void *rxq)
433 {
434 	if (rxq == NULL)
435 		return;
436 
437 	struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
438 	struct enetc_eth_hw *eth_hw =
439 		ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
440 	struct enetc_swbd *q_swbd;
441 	struct enetc_hw *hw;
442 	uint32_t val;
443 	int i;
444 
445 	/* Disable the ring */
446 	hw = &eth_hw->hw;
447 	val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
448 	val &= (~ENETC_RBMR_EN);
449 	enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
450 
451 	/* Clean the ring */
452 	i = rx_ring->next_to_clean;
453 	q_swbd = &rx_ring->q_swbd[i];
454 	while (i != rx_ring->next_to_use) {
455 		rte_pktmbuf_free(q_swbd->buffer_addr);
456 		q_swbd->buffer_addr = NULL;
457 		q_swbd++;
458 		i++;
459 		if (unlikely(i == rx_ring->bd_count)) {
460 			i = 0;
461 			q_swbd = &rx_ring->q_swbd[i];
462 		}
463 	}
464 
465 	enetc_free_bdr(rx_ring);
466 	rte_free(rx_ring);
467 }
468 
469 static
470 int enetc_stats_get(struct rte_eth_dev *dev,
471 		    struct rte_eth_stats *stats)
472 {
473 	struct enetc_eth_hw *hw =
474 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
475 	struct enetc_hw *enetc_hw = &hw->hw;
476 
477 	/* Total received packets, bad + good, if we want to get counters of
478 	 * only good received packets then use ENETC_PM0_RFRM,
479 	 * ENETC_PM0_TFRM registers.
480 	 */
481 	stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
482 	stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
483 	stats->ibytes =  enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
484 	stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
485 	/* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
486 	 * truncated packets
487 	 */
488 	stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
489 	stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
490 	stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
491 
492 	return 0;
493 }
494 
495 static int
496 enetc_stats_reset(struct rte_eth_dev *dev)
497 {
498 	struct enetc_eth_hw *hw =
499 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
500 	struct enetc_hw *enetc_hw = &hw->hw;
501 
502 	enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
503 
504 	return 0;
505 }
506 
507 static void
508 enetc_dev_close(struct rte_eth_dev *dev)
509 {
510 	uint16_t i;
511 
512 	PMD_INIT_FUNC_TRACE();
513 	enetc_dev_stop(dev);
514 
515 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
516 		enetc_rx_queue_release(dev->data->rx_queues[i]);
517 		dev->data->rx_queues[i] = NULL;
518 	}
519 	dev->data->nb_rx_queues = 0;
520 
521 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
522 		enetc_tx_queue_release(dev->data->tx_queues[i]);
523 		dev->data->tx_queues[i] = NULL;
524 	}
525 	dev->data->nb_tx_queues = 0;
526 }
527 
528 static int
529 enetc_promiscuous_enable(struct rte_eth_dev *dev)
530 {
531 	struct enetc_eth_hw *hw =
532 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
533 	struct enetc_hw *enetc_hw = &hw->hw;
534 	uint32_t psipmr = 0;
535 
536 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
537 
538 	/* Setting to enable promiscuous mode*/
539 	psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
540 
541 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
542 
543 	return 0;
544 }
545 
546 static int
547 enetc_promiscuous_disable(struct rte_eth_dev *dev)
548 {
549 	struct enetc_eth_hw *hw =
550 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
551 	struct enetc_hw *enetc_hw = &hw->hw;
552 	uint32_t psipmr = 0;
553 
554 	/* Setting to disable promiscuous mode for SI0*/
555 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
556 	psipmr &= (~ENETC_PSIPMR_SET_UP(0));
557 
558 	if (dev->data->all_multicast == 0)
559 		psipmr &= (~ENETC_PSIPMR_SET_MP(0));
560 
561 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
562 
563 	return 0;
564 }
565 
566 static int
567 enetc_allmulticast_enable(struct rte_eth_dev *dev)
568 {
569 	struct enetc_eth_hw *hw =
570 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
571 	struct enetc_hw *enetc_hw = &hw->hw;
572 	uint32_t psipmr = 0;
573 
574 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
575 
576 	/* Setting to enable allmulticast mode for SI0*/
577 	psipmr |= ENETC_PSIPMR_SET_MP(0);
578 
579 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
580 
581 	return 0;
582 }
583 
584 static int
585 enetc_allmulticast_disable(struct rte_eth_dev *dev)
586 {
587 	struct enetc_eth_hw *hw =
588 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
589 	struct enetc_hw *enetc_hw = &hw->hw;
590 	uint32_t psipmr = 0;
591 
592 	if (dev->data->promiscuous == 1)
593 		return 0; /* must remain in all_multicast mode */
594 
595 	/* Setting to disable all multicast mode for SI0*/
596 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
597 			       ~(ENETC_PSIPMR_SET_MP(0));
598 
599 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
600 
601 	return 0;
602 }
603 
604 static int
605 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
606 {
607 	struct enetc_eth_hw *hw =
608 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
609 	struct enetc_hw *enetc_hw = &hw->hw;
610 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
611 
612 	/* check that mtu is within the allowed range */
613 	if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
614 		return -EINVAL;
615 
616 	/*
617 	 * Refuse mtu that requires the support of scattered packets
618 	 * when this feature has not been enabled before.
619 	 */
620 	if (dev->data->min_rx_buf_size &&
621 		!dev->data->scattered_rx && frame_size >
622 		dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
623 		ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
624 		return -EINVAL;
625 	}
626 
627 	if (frame_size > RTE_ETHER_MAX_LEN)
628 		dev->data->dev_conf.rxmode.offloads &=
629 						DEV_RX_OFFLOAD_JUMBO_FRAME;
630 	else
631 		dev->data->dev_conf.rxmode.offloads &=
632 						~DEV_RX_OFFLOAD_JUMBO_FRAME;
633 
634 	enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
635 	enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
636 
637 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
638 
639 	/*setting the MTU*/
640 	enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
641 		      ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
642 
643 	return 0;
644 }
645 
646 static int
647 enetc_dev_configure(struct rte_eth_dev *dev)
648 {
649 	struct enetc_eth_hw *hw =
650 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
651 	struct enetc_hw *enetc_hw = &hw->hw;
652 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
653 	uint64_t rx_offloads = eth_conf->rxmode.offloads;
654 	uint32_t checksum = L3_CKSUM | L4_CKSUM;
655 
656 	PMD_INIT_FUNC_TRACE();
657 
658 	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
659 		uint32_t max_len;
660 
661 		max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
662 
663 		enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
664 			      ENETC_SET_MAXFRM(max_len));
665 		enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
666 			      ENETC_MAC_MAXFRM_SIZE);
667 		enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
668 			      2 * ENETC_MAC_MAXFRM_SIZE);
669 		dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
670 			RTE_ETHER_CRC_LEN;
671 	}
672 
673 	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
674 		int config;
675 
676 		config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
677 		config |= ENETC_PM0_CRC;
678 		enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
679 	}
680 
681 	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
682 		checksum &= ~L3_CKSUM;
683 
684 	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
685 		checksum &= ~L4_CKSUM;
686 
687 	enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
688 
689 
690 	return 0;
691 }
692 
693 static int
694 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
695 {
696 	struct enetc_eth_adapter *priv =
697 			ENETC_DEV_PRIVATE(dev->data->dev_private);
698 	struct enetc_bdr *rx_ring;
699 	uint32_t rx_data;
700 
701 	rx_ring = dev->data->rx_queues[qidx];
702 	if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
703 		rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
704 					 ENETC_RBMR);
705 		rx_data = rx_data | ENETC_RBMR_EN;
706 		enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
707 			       rx_data);
708 		dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
709 	}
710 
711 	return 0;
712 }
713 
714 static int
715 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
716 {
717 	struct enetc_eth_adapter *priv =
718 			ENETC_DEV_PRIVATE(dev->data->dev_private);
719 	struct enetc_bdr *rx_ring;
720 	uint32_t rx_data;
721 
722 	rx_ring = dev->data->rx_queues[qidx];
723 	if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
724 		rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
725 					 ENETC_RBMR);
726 		rx_data = rx_data & (~ENETC_RBMR_EN);
727 		enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
728 			       rx_data);
729 		dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
730 	}
731 
732 	return 0;
733 }
734 
735 static int
736 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
737 {
738 	struct enetc_eth_adapter *priv =
739 			ENETC_DEV_PRIVATE(dev->data->dev_private);
740 	struct enetc_bdr *tx_ring;
741 	uint32_t tx_data;
742 
743 	tx_ring = dev->data->tx_queues[qidx];
744 	if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
745 		tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
746 					 ENETC_TBMR);
747 		tx_data = tx_data | ENETC_TBMR_EN;
748 		enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
749 			       tx_data);
750 		dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
751 	}
752 
753 	return 0;
754 }
755 
756 static int
757 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
758 {
759 	struct enetc_eth_adapter *priv =
760 			ENETC_DEV_PRIVATE(dev->data->dev_private);
761 	struct enetc_bdr *tx_ring;
762 	uint32_t tx_data;
763 
764 	tx_ring = dev->data->tx_queues[qidx];
765 	if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
766 		tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
767 					 ENETC_TBMR);
768 		tx_data = tx_data & (~ENETC_TBMR_EN);
769 		enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
770 			       tx_data);
771 		dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
772 	}
773 
774 	return 0;
775 }
776 
777 /*
778  * The set of PCI devices this driver supports
779  */
780 static const struct rte_pci_id pci_id_enetc_map[] = {
781 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
782 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
783 	{ .vendor_id = 0, /* sentinel */ },
784 };
785 
786 /* Features supported by this driver */
787 static const struct eth_dev_ops enetc_ops = {
788 	.dev_configure        = enetc_dev_configure,
789 	.dev_start            = enetc_dev_start,
790 	.dev_stop             = enetc_dev_stop,
791 	.dev_close            = enetc_dev_close,
792 	.link_update          = enetc_link_update,
793 	.stats_get            = enetc_stats_get,
794 	.stats_reset          = enetc_stats_reset,
795 	.promiscuous_enable   = enetc_promiscuous_enable,
796 	.promiscuous_disable  = enetc_promiscuous_disable,
797 	.allmulticast_enable  = enetc_allmulticast_enable,
798 	.allmulticast_disable = enetc_allmulticast_disable,
799 	.dev_infos_get        = enetc_dev_infos_get,
800 	.mtu_set              = enetc_mtu_set,
801 	.rx_queue_setup       = enetc_rx_queue_setup,
802 	.rx_queue_start       = enetc_rx_queue_start,
803 	.rx_queue_stop        = enetc_rx_queue_stop,
804 	.rx_queue_release     = enetc_rx_queue_release,
805 	.tx_queue_setup       = enetc_tx_queue_setup,
806 	.tx_queue_start       = enetc_tx_queue_start,
807 	.tx_queue_stop        = enetc_tx_queue_stop,
808 	.tx_queue_release     = enetc_tx_queue_release,
809 	.dev_supported_ptypes_get = enetc_supported_ptypes_get,
810 };
811 
812 /**
813  * Initialisation of the enetc device
814  *
815  * @param eth_dev
816  *   - Pointer to the structure rte_eth_dev
817  *
818  * @return
819  *   - On success, zero.
820  *   - On failure, negative value.
821  */
822 static int
823 enetc_dev_init(struct rte_eth_dev *eth_dev)
824 {
825 	int error = 0;
826 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
827 	struct enetc_eth_hw *hw =
828 		ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
829 
830 	PMD_INIT_FUNC_TRACE();
831 	eth_dev->dev_ops = &enetc_ops;
832 	eth_dev->rx_pkt_burst = &enetc_recv_pkts;
833 	eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
834 
835 	/* Retrieving and storing the HW base address of device */
836 	hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
837 	hw->device_id = pci_dev->id.device_id;
838 
839 	error = enetc_hardware_init(hw);
840 	if (error != 0) {
841 		ENETC_PMD_ERR("Hardware initialization failed");
842 		return -1;
843 	}
844 
845 	/* Allocate memory for storing MAC addresses */
846 	eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
847 					RTE_ETHER_ADDR_LEN, 0);
848 	if (!eth_dev->data->mac_addrs) {
849 		ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
850 			      "store MAC addresses",
851 			      RTE_ETHER_ADDR_LEN * 1);
852 		error = -ENOMEM;
853 		return -1;
854 	}
855 
856 	/* Copy the permanent MAC address */
857 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
858 			&eth_dev->data->mac_addrs[0]);
859 
860 	/* Set MTU */
861 	enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
862 		      ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
863 	eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
864 		RTE_ETHER_CRC_LEN;
865 
866 	ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
867 			eth_dev->data->port_id, pci_dev->id.vendor_id,
868 			pci_dev->id.device_id);
869 	return 0;
870 }
871 
872 static int
873 enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
874 {
875 	PMD_INIT_FUNC_TRACE();
876 	return 0;
877 }
878 
879 static int
880 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
881 			   struct rte_pci_device *pci_dev)
882 {
883 	return rte_eth_dev_pci_generic_probe(pci_dev,
884 					     sizeof(struct enetc_eth_adapter),
885 					     enetc_dev_init);
886 }
887 
888 static int
889 enetc_pci_remove(struct rte_pci_device *pci_dev)
890 {
891 	return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
892 }
893 
894 static struct rte_pci_driver rte_enetc_pmd = {
895 	.id_table = pci_id_enetc_map,
896 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
897 	.probe = enetc_pci_probe,
898 	.remove = enetc_pci_remove,
899 };
900 
901 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
902 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
903 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
904 
905 RTE_INIT(enetc_pmd_init_log)
906 {
907 	enetc_logtype_pmd = rte_log_register("pmd.net.enetc");
908 	if (enetc_logtype_pmd >= 0)
909 		rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE);
910 }
911