xref: /dpdk/drivers/net/enetc/enetc_ethdev.c (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4 
5 #include <stdbool.h>
6 #include <rte_ethdev_pci.h>
7 
8 #include "enetc_logs.h"
9 #include "enetc.h"
10 
11 int enetc_logtype_pmd;
12 
13 static int
14 enetc_dev_start(struct rte_eth_dev *dev)
15 {
16 	struct enetc_eth_hw *hw =
17 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
18 	struct enetc_hw *enetc_hw = &hw->hw;
19 	uint32_t val;
20 
21 	PMD_INIT_FUNC_TRACE();
22 	val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
23 	enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
24 		      val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
25 
26 	/* Enable port */
27 	val = enetc_port_rd(enetc_hw, ENETC_PMR);
28 	enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
29 
30 	/* set auto-speed for RGMII */
31 	if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
32 		enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
33 			      ENETC_PM0_IFM_RGAUTO);
34 		enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
35 			      ENETC_PM0_IFM_RGAUTO);
36 	}
37 	if (enetc_global_rd(enetc_hw,
38 			    ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
39 		enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
40 			      ENETC_PM0_IFM_XGMII);
41 		enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
42 			      ENETC_PM0_IFM_XGMII);
43 	}
44 
45 	return 0;
46 }
47 
48 static void
49 enetc_dev_stop(struct rte_eth_dev *dev)
50 {
51 	struct enetc_eth_hw *hw =
52 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
53 	struct enetc_hw *enetc_hw = &hw->hw;
54 	uint32_t val;
55 
56 	PMD_INIT_FUNC_TRACE();
57 	/* Disable port */
58 	val = enetc_port_rd(enetc_hw, ENETC_PMR);
59 	enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
60 
61 	val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
62 	enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
63 		      val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
64 }
65 
66 static const uint32_t *
67 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
68 {
69 	static const uint32_t ptypes[] = {
70 		RTE_PTYPE_L2_ETHER,
71 		RTE_PTYPE_L3_IPV4,
72 		RTE_PTYPE_L3_IPV6,
73 		RTE_PTYPE_L4_TCP,
74 		RTE_PTYPE_L4_UDP,
75 		RTE_PTYPE_L4_SCTP,
76 		RTE_PTYPE_L4_ICMP,
77 		RTE_PTYPE_UNKNOWN
78 	};
79 
80 	return ptypes;
81 }
82 
83 /* return 0 means link status changed, -1 means not changed */
84 static int
85 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
86 {
87 	struct enetc_eth_hw *hw =
88 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
89 	struct enetc_hw *enetc_hw = &hw->hw;
90 	struct rte_eth_link link;
91 	uint32_t status;
92 
93 	PMD_INIT_FUNC_TRACE();
94 
95 	memset(&link, 0, sizeof(link));
96 
97 	status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
98 
99 	if (status & ENETC_LINK_MODE)
100 		link.link_duplex = ETH_LINK_FULL_DUPLEX;
101 	else
102 		link.link_duplex = ETH_LINK_HALF_DUPLEX;
103 
104 	if (status & ENETC_LINK_STATUS)
105 		link.link_status = ETH_LINK_UP;
106 	else
107 		link.link_status = ETH_LINK_DOWN;
108 
109 	switch (status & ENETC_LINK_SPEED_MASK) {
110 	case ENETC_LINK_SPEED_1G:
111 		link.link_speed = ETH_SPEED_NUM_1G;
112 		break;
113 
114 	case ENETC_LINK_SPEED_100M:
115 		link.link_speed = ETH_SPEED_NUM_100M;
116 		break;
117 
118 	default:
119 	case ENETC_LINK_SPEED_10M:
120 		link.link_speed = ETH_SPEED_NUM_10M;
121 	}
122 
123 	return rte_eth_linkstatus_set(dev, &link);
124 }
125 
126 static int
127 enetc_hardware_init(struct enetc_eth_hw *hw)
128 {
129 	struct enetc_hw *enetc_hw = &hw->hw;
130 	uint32_t *mac = (uint32_t *)hw->mac.addr;
131 
132 	PMD_INIT_FUNC_TRACE();
133 	/* Calculating and storing the base HW addresses */
134 	hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
135 	hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
136 
137 	/* Enabling Station Interface */
138 	enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
139 
140 	*mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
141 	mac++;
142 	*mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
143 
144 	return 0;
145 }
146 
147 static int
148 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
149 		    struct rte_eth_dev_info *dev_info)
150 {
151 	PMD_INIT_FUNC_TRACE();
152 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
153 		.nb_max = MAX_BD_COUNT,
154 		.nb_min = MIN_BD_COUNT,
155 		.nb_align = BD_ALIGN,
156 	};
157 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
158 		.nb_max = MAX_BD_COUNT,
159 		.nb_min = MIN_BD_COUNT,
160 		.nb_align = BD_ALIGN,
161 	};
162 	dev_info->max_rx_queues = MAX_RX_RINGS;
163 	dev_info->max_tx_queues = MAX_TX_RINGS;
164 	dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
165 	dev_info->rx_offload_capa =
166 		(DEV_RX_OFFLOAD_IPV4_CKSUM |
167 		 DEV_RX_OFFLOAD_UDP_CKSUM |
168 		 DEV_RX_OFFLOAD_TCP_CKSUM |
169 		 DEV_RX_OFFLOAD_KEEP_CRC |
170 		 DEV_RX_OFFLOAD_JUMBO_FRAME);
171 
172 	return 0;
173 }
174 
175 static int
176 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
177 {
178 	int size;
179 
180 	size = nb_desc * sizeof(struct enetc_swbd);
181 	txr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
182 	if (txr->q_swbd == NULL)
183 		return -ENOMEM;
184 
185 	size = nb_desc * sizeof(struct enetc_tx_bd);
186 	txr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
187 	if (txr->bd_base == NULL) {
188 		rte_free(txr->q_swbd);
189 		txr->q_swbd = NULL;
190 		return -ENOMEM;
191 	}
192 
193 	txr->bd_count = nb_desc;
194 	txr->next_to_clean = 0;
195 	txr->next_to_use = 0;
196 
197 	return 0;
198 }
199 
200 static void
201 enetc_free_bdr(struct enetc_bdr *rxr)
202 {
203 	rte_free(rxr->q_swbd);
204 	rte_free(rxr->bd_base);
205 	rxr->q_swbd = NULL;
206 	rxr->bd_base = NULL;
207 }
208 
209 static void
210 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
211 {
212 	int idx = tx_ring->index;
213 	phys_addr_t bd_address;
214 
215 	bd_address = (phys_addr_t)
216 		     rte_mem_virt2iova((const void *)tx_ring->bd_base);
217 	enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
218 		       lower_32_bits((uint64_t)bd_address));
219 	enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
220 		       upper_32_bits((uint64_t)bd_address));
221 	enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
222 		       ENETC_RTBLENR_LEN(tx_ring->bd_count));
223 
224 	enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
225 	enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
226 	tx_ring->tcir = (void *)((size_t)hw->reg +
227 			ENETC_BDR(TX, idx, ENETC_TBCIR));
228 	tx_ring->tcisr = (void *)((size_t)hw->reg +
229 			 ENETC_BDR(TX, idx, ENETC_TBCISR));
230 }
231 
232 static int
233 enetc_tx_queue_setup(struct rte_eth_dev *dev,
234 		     uint16_t queue_idx,
235 		     uint16_t nb_desc,
236 		     unsigned int socket_id __rte_unused,
237 		     const struct rte_eth_txconf *tx_conf)
238 {
239 	int err = 0;
240 	struct enetc_bdr *tx_ring;
241 	struct rte_eth_dev_data *data = dev->data;
242 	struct enetc_eth_adapter *priv =
243 			ENETC_DEV_PRIVATE(data->dev_private);
244 
245 	PMD_INIT_FUNC_TRACE();
246 	if (nb_desc > MAX_BD_COUNT)
247 		return -1;
248 
249 	tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
250 	if (tx_ring == NULL) {
251 		ENETC_PMD_ERR("Failed to allocate TX ring memory");
252 		err = -ENOMEM;
253 		return -1;
254 	}
255 
256 	err = enetc_alloc_txbdr(tx_ring, nb_desc);
257 	if (err)
258 		goto fail;
259 
260 	tx_ring->index = queue_idx;
261 	tx_ring->ndev = dev;
262 	enetc_setup_txbdr(&priv->hw.hw, tx_ring);
263 	data->tx_queues[queue_idx] = tx_ring;
264 
265 	if (!tx_conf->tx_deferred_start) {
266 		/* enable ring */
267 		enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
268 			       ENETC_TBMR, ENETC_TBMR_EN);
269 		dev->data->tx_queue_state[tx_ring->index] =
270 			       RTE_ETH_QUEUE_STATE_STARTED;
271 	} else {
272 		dev->data->tx_queue_state[tx_ring->index] =
273 			       RTE_ETH_QUEUE_STATE_STOPPED;
274 	}
275 
276 	return 0;
277 fail:
278 	rte_free(tx_ring);
279 
280 	return err;
281 }
282 
283 static void
284 enetc_tx_queue_release(void *txq)
285 {
286 	if (txq == NULL)
287 		return;
288 
289 	struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
290 	struct enetc_eth_hw *eth_hw =
291 		ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
292 	struct enetc_hw *hw;
293 	struct enetc_swbd *tx_swbd;
294 	int i;
295 	uint32_t val;
296 
297 	/* Disable the ring */
298 	hw = &eth_hw->hw;
299 	val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
300 	val &= (~ENETC_TBMR_EN);
301 	enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
302 
303 	/* clean the ring*/
304 	i = tx_ring->next_to_clean;
305 	tx_swbd = &tx_ring->q_swbd[i];
306 	while (tx_swbd->buffer_addr != NULL) {
307 		rte_pktmbuf_free(tx_swbd->buffer_addr);
308 		tx_swbd->buffer_addr = NULL;
309 		tx_swbd++;
310 		i++;
311 		if (unlikely(i == tx_ring->bd_count)) {
312 			i = 0;
313 			tx_swbd = &tx_ring->q_swbd[i];
314 		}
315 	}
316 
317 	enetc_free_bdr(tx_ring);
318 	rte_free(tx_ring);
319 }
320 
321 static int
322 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
323 		  uint16_t nb_rx_desc)
324 {
325 	int size;
326 
327 	size = nb_rx_desc * sizeof(struct enetc_swbd);
328 	rxr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
329 	if (rxr->q_swbd == NULL)
330 		return -ENOMEM;
331 
332 	size = nb_rx_desc * sizeof(union enetc_rx_bd);
333 	rxr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
334 	if (rxr->bd_base == NULL) {
335 		rte_free(rxr->q_swbd);
336 		rxr->q_swbd = NULL;
337 		return -ENOMEM;
338 	}
339 
340 	rxr->bd_count = nb_rx_desc;
341 	rxr->next_to_clean = 0;
342 	rxr->next_to_use = 0;
343 	rxr->next_to_alloc = 0;
344 
345 	return 0;
346 }
347 
348 static void
349 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
350 		  struct rte_mempool *mb_pool)
351 {
352 	int idx = rx_ring->index;
353 	uint16_t buf_size;
354 	phys_addr_t bd_address;
355 
356 	bd_address = (phys_addr_t)
357 		     rte_mem_virt2iova((const void *)rx_ring->bd_base);
358 	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
359 		       lower_32_bits((uint64_t)bd_address));
360 	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
361 		       upper_32_bits((uint64_t)bd_address));
362 	enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
363 		       ENETC_RTBLENR_LEN(rx_ring->bd_count));
364 
365 	rx_ring->mb_pool = mb_pool;
366 	rx_ring->rcir = (void *)((size_t)hw->reg +
367 			ENETC_BDR(RX, idx, ENETC_RBCIR));
368 	enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
369 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
370 		   RTE_PKTMBUF_HEADROOM);
371 	enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
372 	enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
373 }
374 
375 static int
376 enetc_rx_queue_setup(struct rte_eth_dev *dev,
377 		     uint16_t rx_queue_id,
378 		     uint16_t nb_rx_desc,
379 		     unsigned int socket_id __rte_unused,
380 		     const struct rte_eth_rxconf *rx_conf,
381 		     struct rte_mempool *mb_pool)
382 {
383 	int err = 0;
384 	struct enetc_bdr *rx_ring;
385 	struct rte_eth_dev_data *data =  dev->data;
386 	struct enetc_eth_adapter *adapter =
387 			ENETC_DEV_PRIVATE(data->dev_private);
388 	uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
389 
390 	PMD_INIT_FUNC_TRACE();
391 	if (nb_rx_desc > MAX_BD_COUNT)
392 		return -1;
393 
394 	rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
395 	if (rx_ring == NULL) {
396 		ENETC_PMD_ERR("Failed to allocate RX ring memory");
397 		err = -ENOMEM;
398 		return err;
399 	}
400 
401 	err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
402 	if (err)
403 		goto fail;
404 
405 	rx_ring->index = rx_queue_id;
406 	rx_ring->ndev = dev;
407 	enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
408 	data->rx_queues[rx_queue_id] = rx_ring;
409 
410 	if (!rx_conf->rx_deferred_start) {
411 		/* enable ring */
412 		enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
413 			       ENETC_RBMR_EN);
414 		dev->data->rx_queue_state[rx_ring->index] =
415 			       RTE_ETH_QUEUE_STATE_STARTED;
416 	} else {
417 		dev->data->rx_queue_state[rx_ring->index] =
418 			       RTE_ETH_QUEUE_STATE_STOPPED;
419 	}
420 
421 	rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
422 				     RTE_ETHER_CRC_LEN : 0);
423 
424 	return 0;
425 fail:
426 	rte_free(rx_ring);
427 
428 	return err;
429 }
430 
431 static void
432 enetc_rx_queue_release(void *rxq)
433 {
434 	if (rxq == NULL)
435 		return;
436 
437 	struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
438 	struct enetc_eth_hw *eth_hw =
439 		ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
440 	struct enetc_swbd *q_swbd;
441 	struct enetc_hw *hw;
442 	uint32_t val;
443 	int i;
444 
445 	/* Disable the ring */
446 	hw = &eth_hw->hw;
447 	val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
448 	val &= (~ENETC_RBMR_EN);
449 	enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
450 
451 	/* Clean the ring */
452 	i = rx_ring->next_to_clean;
453 	q_swbd = &rx_ring->q_swbd[i];
454 	while (i != rx_ring->next_to_use) {
455 		rte_pktmbuf_free(q_swbd->buffer_addr);
456 		q_swbd->buffer_addr = NULL;
457 		q_swbd++;
458 		i++;
459 		if (unlikely(i == rx_ring->bd_count)) {
460 			i = 0;
461 			q_swbd = &rx_ring->q_swbd[i];
462 		}
463 	}
464 
465 	enetc_free_bdr(rx_ring);
466 	rte_free(rx_ring);
467 }
468 
469 static
470 int enetc_stats_get(struct rte_eth_dev *dev,
471 		    struct rte_eth_stats *stats)
472 {
473 	struct enetc_eth_hw *hw =
474 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
475 	struct enetc_hw *enetc_hw = &hw->hw;
476 
477 	/* Total received packets, bad + good, if we want to get counters of
478 	 * only good received packets then use ENETC_PM0_RFRM,
479 	 * ENETC_PM0_TFRM registers.
480 	 */
481 	stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
482 	stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
483 	stats->ibytes =  enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
484 	stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
485 	/* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
486 	 * truncated packets
487 	 */
488 	stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
489 	stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
490 	stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
491 
492 	return 0;
493 }
494 
495 static void
496 enetc_stats_reset(struct rte_eth_dev *dev)
497 {
498 	struct enetc_eth_hw *hw =
499 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
500 	struct enetc_hw *enetc_hw = &hw->hw;
501 
502 	enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
503 }
504 
505 static void
506 enetc_dev_close(struct rte_eth_dev *dev)
507 {
508 	uint16_t i;
509 
510 	PMD_INIT_FUNC_TRACE();
511 	enetc_dev_stop(dev);
512 
513 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
514 		enetc_rx_queue_release(dev->data->rx_queues[i]);
515 		dev->data->rx_queues[i] = NULL;
516 	}
517 	dev->data->nb_rx_queues = 0;
518 
519 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
520 		enetc_tx_queue_release(dev->data->tx_queues[i]);
521 		dev->data->tx_queues[i] = NULL;
522 	}
523 	dev->data->nb_tx_queues = 0;
524 }
525 
526 static void
527 enetc_promiscuous_enable(struct rte_eth_dev *dev)
528 {
529 	struct enetc_eth_hw *hw =
530 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
531 	struct enetc_hw *enetc_hw = &hw->hw;
532 	uint32_t psipmr = 0;
533 
534 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
535 
536 	/* Setting to enable promiscuous mode*/
537 	psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
538 
539 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
540 }
541 
542 static void
543 enetc_promiscuous_disable(struct rte_eth_dev *dev)
544 {
545 	struct enetc_eth_hw *hw =
546 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
547 	struct enetc_hw *enetc_hw = &hw->hw;
548 	uint32_t psipmr = 0;
549 
550 	/* Setting to disable promiscuous mode for SI0*/
551 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
552 	psipmr &= (~ENETC_PSIPMR_SET_UP(0));
553 
554 	if (dev->data->all_multicast == 0)
555 		psipmr &= (~ENETC_PSIPMR_SET_MP(0));
556 
557 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
558 }
559 
560 static void
561 enetc_allmulticast_enable(struct rte_eth_dev *dev)
562 {
563 	struct enetc_eth_hw *hw =
564 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
565 	struct enetc_hw *enetc_hw = &hw->hw;
566 	uint32_t psipmr = 0;
567 
568 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
569 
570 	/* Setting to enable allmulticast mode for SI0*/
571 	psipmr |= ENETC_PSIPMR_SET_MP(0);
572 
573 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
574 }
575 
576 static void
577 enetc_allmulticast_disable(struct rte_eth_dev *dev)
578 {
579 	struct enetc_eth_hw *hw =
580 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
581 	struct enetc_hw *enetc_hw = &hw->hw;
582 	uint32_t psipmr = 0;
583 
584 	if (dev->data->promiscuous == 1)
585 		return; /* must remain in all_multicast mode */
586 
587 	/* Setting to disable all multicast mode for SI0*/
588 	psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
589 			       ~(ENETC_PSIPMR_SET_MP(0));
590 
591 	enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
592 }
593 
594 static int
595 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
596 {
597 	struct enetc_eth_hw *hw =
598 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
599 	struct enetc_hw *enetc_hw = &hw->hw;
600 	uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
601 
602 	/* check that mtu is within the allowed range */
603 	if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
604 		return -EINVAL;
605 
606 	/*
607 	 * Refuse mtu that requires the support of scattered packets
608 	 * when this feature has not been enabled before.
609 	 */
610 	if (dev->data->min_rx_buf_size &&
611 		!dev->data->scattered_rx && frame_size >
612 		dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
613 		ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
614 		return -EINVAL;
615 	}
616 
617 	if (frame_size > RTE_ETHER_MAX_LEN)
618 		dev->data->dev_conf.rxmode.offloads &=
619 						DEV_RX_OFFLOAD_JUMBO_FRAME;
620 	else
621 		dev->data->dev_conf.rxmode.offloads &=
622 						~DEV_RX_OFFLOAD_JUMBO_FRAME;
623 
624 	enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
625 	enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
626 
627 	dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
628 
629 	/*setting the MTU*/
630 	enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
631 		      ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
632 
633 	return 0;
634 }
635 
636 static int
637 enetc_dev_configure(struct rte_eth_dev *dev)
638 {
639 	struct enetc_eth_hw *hw =
640 		ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
641 	struct enetc_hw *enetc_hw = &hw->hw;
642 	struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
643 	uint64_t rx_offloads = eth_conf->rxmode.offloads;
644 	uint32_t checksum = L3_CKSUM | L4_CKSUM;
645 
646 	PMD_INIT_FUNC_TRACE();
647 
648 	if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
649 		uint32_t max_len;
650 
651 		max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
652 
653 		enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
654 			      ENETC_SET_MAXFRM(max_len));
655 		enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
656 			      ENETC_MAC_MAXFRM_SIZE);
657 		enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
658 			      2 * ENETC_MAC_MAXFRM_SIZE);
659 		dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
660 			RTE_ETHER_CRC_LEN;
661 	}
662 
663 	if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
664 		int config;
665 
666 		config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
667 		config |= ENETC_PM0_CRC;
668 		enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
669 	}
670 
671 	if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
672 		checksum &= ~L3_CKSUM;
673 
674 	if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
675 		checksum &= ~L4_CKSUM;
676 
677 	enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
678 
679 
680 	return 0;
681 }
682 
683 static int
684 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
685 {
686 	struct enetc_eth_adapter *priv =
687 			ENETC_DEV_PRIVATE(dev->data->dev_private);
688 	struct enetc_bdr *rx_ring;
689 	uint32_t rx_data;
690 
691 	rx_ring = dev->data->rx_queues[qidx];
692 	if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
693 		rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
694 					 ENETC_RBMR);
695 		rx_data = rx_data | ENETC_RBMR_EN;
696 		enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
697 			       rx_data);
698 		dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
699 	}
700 
701 	return 0;
702 }
703 
704 static int
705 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
706 {
707 	struct enetc_eth_adapter *priv =
708 			ENETC_DEV_PRIVATE(dev->data->dev_private);
709 	struct enetc_bdr *rx_ring;
710 	uint32_t rx_data;
711 
712 	rx_ring = dev->data->rx_queues[qidx];
713 	if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
714 		rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
715 					 ENETC_RBMR);
716 		rx_data = rx_data & (~ENETC_RBMR_EN);
717 		enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
718 			       rx_data);
719 		dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
720 	}
721 
722 	return 0;
723 }
724 
725 static int
726 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
727 {
728 	struct enetc_eth_adapter *priv =
729 			ENETC_DEV_PRIVATE(dev->data->dev_private);
730 	struct enetc_bdr *tx_ring;
731 	uint32_t tx_data;
732 
733 	tx_ring = dev->data->tx_queues[qidx];
734 	if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
735 		tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
736 					 ENETC_TBMR);
737 		tx_data = tx_data | ENETC_TBMR_EN;
738 		enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
739 			       tx_data);
740 		dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
741 	}
742 
743 	return 0;
744 }
745 
746 static int
747 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
748 {
749 	struct enetc_eth_adapter *priv =
750 			ENETC_DEV_PRIVATE(dev->data->dev_private);
751 	struct enetc_bdr *tx_ring;
752 	uint32_t tx_data;
753 
754 	tx_ring = dev->data->tx_queues[qidx];
755 	if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
756 		tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
757 					 ENETC_TBMR);
758 		tx_data = tx_data & (~ENETC_TBMR_EN);
759 		enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
760 			       tx_data);
761 		dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
762 	}
763 
764 	return 0;
765 }
766 
767 /*
768  * The set of PCI devices this driver supports
769  */
770 static const struct rte_pci_id pci_id_enetc_map[] = {
771 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
772 	{ RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
773 	{ .vendor_id = 0, /* sentinel */ },
774 };
775 
776 /* Features supported by this driver */
777 static const struct eth_dev_ops enetc_ops = {
778 	.dev_configure        = enetc_dev_configure,
779 	.dev_start            = enetc_dev_start,
780 	.dev_stop             = enetc_dev_stop,
781 	.dev_close            = enetc_dev_close,
782 	.link_update          = enetc_link_update,
783 	.stats_get            = enetc_stats_get,
784 	.stats_reset          = enetc_stats_reset,
785 	.promiscuous_enable   = enetc_promiscuous_enable,
786 	.promiscuous_disable  = enetc_promiscuous_disable,
787 	.allmulticast_enable  = enetc_allmulticast_enable,
788 	.allmulticast_disable = enetc_allmulticast_disable,
789 	.dev_infos_get        = enetc_dev_infos_get,
790 	.mtu_set              = enetc_mtu_set,
791 	.rx_queue_setup       = enetc_rx_queue_setup,
792 	.rx_queue_start       = enetc_rx_queue_start,
793 	.rx_queue_stop        = enetc_rx_queue_stop,
794 	.rx_queue_release     = enetc_rx_queue_release,
795 	.tx_queue_setup       = enetc_tx_queue_setup,
796 	.tx_queue_start       = enetc_tx_queue_start,
797 	.tx_queue_stop        = enetc_tx_queue_stop,
798 	.tx_queue_release     = enetc_tx_queue_release,
799 	.dev_supported_ptypes_get = enetc_supported_ptypes_get,
800 };
801 
802 /**
803  * Initialisation of the enetc device
804  *
805  * @param eth_dev
806  *   - Pointer to the structure rte_eth_dev
807  *
808  * @return
809  *   - On success, zero.
810  *   - On failure, negative value.
811  */
812 static int
813 enetc_dev_init(struct rte_eth_dev *eth_dev)
814 {
815 	int error = 0;
816 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
817 	struct enetc_eth_hw *hw =
818 		ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
819 
820 	PMD_INIT_FUNC_TRACE();
821 	eth_dev->dev_ops = &enetc_ops;
822 	eth_dev->rx_pkt_burst = &enetc_recv_pkts;
823 	eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
824 
825 	/* Retrieving and storing the HW base address of device */
826 	hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
827 	hw->device_id = pci_dev->id.device_id;
828 
829 	error = enetc_hardware_init(hw);
830 	if (error != 0) {
831 		ENETC_PMD_ERR("Hardware initialization failed");
832 		return -1;
833 	}
834 
835 	/* Allocate memory for storing MAC addresses */
836 	eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
837 					RTE_ETHER_ADDR_LEN, 0);
838 	if (!eth_dev->data->mac_addrs) {
839 		ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
840 			      "store MAC addresses",
841 			      RTE_ETHER_ADDR_LEN * 1);
842 		error = -ENOMEM;
843 		return -1;
844 	}
845 
846 	/* Copy the permanent MAC address */
847 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
848 			&eth_dev->data->mac_addrs[0]);
849 
850 	/* Set MTU */
851 	enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
852 		      ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
853 	eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
854 		RTE_ETHER_CRC_LEN;
855 
856 	ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
857 			eth_dev->data->port_id, pci_dev->id.vendor_id,
858 			pci_dev->id.device_id);
859 	return 0;
860 }
861 
862 static int
863 enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
864 {
865 	PMD_INIT_FUNC_TRACE();
866 	return 0;
867 }
868 
869 static int
870 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
871 			   struct rte_pci_device *pci_dev)
872 {
873 	return rte_eth_dev_pci_generic_probe(pci_dev,
874 					     sizeof(struct enetc_eth_adapter),
875 					     enetc_dev_init);
876 }
877 
878 static int
879 enetc_pci_remove(struct rte_pci_device *pci_dev)
880 {
881 	return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
882 }
883 
884 static struct rte_pci_driver rte_enetc_pmd = {
885 	.id_table = pci_id_enetc_map,
886 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
887 	.probe = enetc_pci_probe,
888 	.remove = enetc_pci_remove,
889 };
890 
891 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
892 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
893 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
894 
895 RTE_INIT(enetc_pmd_init_log)
896 {
897 	enetc_logtype_pmd = rte_log_register("pmd.net.enetc");
898 	if (enetc_logtype_pmd >= 0)
899 		rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE);
900 }
901