xref: /dpdk/drivers/net/octeontx/octeontx_ethdev.c (revision f06125c07d6203a84e9b242c62d6a8e532a5c51d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <stdio.h>
6 #include <stdarg.h>
7 #include <stdbool.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 
12 #include <rte_alarm.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_debug.h>
15 #include <rte_devargs.h>
16 #include <rte_dev.h>
17 #include <rte_kvargs.h>
18 #include <rte_malloc.h>
19 #include <rte_prefetch.h>
20 #include <rte_bus_vdev.h>
21 
22 #include "octeontx_ethdev.h"
23 #include "octeontx_rxtx.h"
24 #include "octeontx_logs.h"
25 
26 struct octeontx_vdev_init_params {
27 	uint8_t	nr_port;
28 };
29 
30 enum octeontx_link_speed {
31 	OCTEONTX_LINK_SPEED_SGMII,
32 	OCTEONTX_LINK_SPEED_XAUI,
33 	OCTEONTX_LINK_SPEED_RXAUI,
34 	OCTEONTX_LINK_SPEED_10G_R,
35 	OCTEONTX_LINK_SPEED_40G_R,
36 	OCTEONTX_LINK_SPEED_RESERVE1,
37 	OCTEONTX_LINK_SPEED_QSGMII,
38 	OCTEONTX_LINK_SPEED_RESERVE2
39 };
40 
41 /* Parse integer from integer argument */
42 static int
43 parse_integer_arg(const char *key __rte_unused,
44 		const char *value, void *extra_args)
45 {
46 	int *i = (int *)extra_args;
47 
48 	*i = atoi(value);
49 	if (*i < 0) {
50 		octeontx_log_err("argument has to be positive.");
51 		return -1;
52 	}
53 
54 	return 0;
55 }
56 
57 static int
58 octeontx_parse_vdev_init_params(struct octeontx_vdev_init_params *params,
59 				struct rte_vdev_device *dev)
60 {
61 	struct rte_kvargs *kvlist = NULL;
62 	int ret = 0;
63 
64 	static const char * const octeontx_vdev_valid_params[] = {
65 		OCTEONTX_VDEV_NR_PORT_ARG,
66 		NULL
67 	};
68 
69 	const char *input_args = rte_vdev_device_args(dev);
70 	if (params == NULL)
71 		return -EINVAL;
72 
73 
74 	if (input_args) {
75 		kvlist = rte_kvargs_parse(input_args,
76 				octeontx_vdev_valid_params);
77 		if (kvlist == NULL)
78 			return -1;
79 
80 		ret = rte_kvargs_process(kvlist,
81 					OCTEONTX_VDEV_NR_PORT_ARG,
82 					&parse_integer_arg,
83 					&params->nr_port);
84 		if (ret < 0)
85 			goto free_kvlist;
86 	}
87 
88 free_kvlist:
89 	rte_kvargs_free(kvlist);
90 	return ret;
91 }
92 
93 static int
94 octeontx_port_open(struct octeontx_nic *nic)
95 {
96 	octeontx_mbox_bgx_port_conf_t bgx_port_conf;
97 	int res;
98 
99 	res = 0;
100 
101 	PMD_INIT_FUNC_TRACE();
102 
103 	res = octeontx_bgx_port_open(nic->port_id, &bgx_port_conf);
104 	if (res < 0) {
105 		octeontx_log_err("failed to open port %d", res);
106 		return res;
107 	}
108 
109 	nic->node = bgx_port_conf.node;
110 	nic->port_ena = bgx_port_conf.enable;
111 	nic->base_ichan = bgx_port_conf.base_chan;
112 	nic->base_ochan = bgx_port_conf.base_chan;
113 	nic->num_ichans = bgx_port_conf.num_chans;
114 	nic->num_ochans = bgx_port_conf.num_chans;
115 	nic->mtu = bgx_port_conf.mtu;
116 	nic->bpen = bgx_port_conf.bpen;
117 	nic->fcs_strip = bgx_port_conf.fcs_strip;
118 	nic->bcast_mode = bgx_port_conf.bcast_mode;
119 	nic->mcast_mode = bgx_port_conf.mcast_mode;
120 	nic->speed	= bgx_port_conf.mode;
121 
122 	memcpy(&nic->mac_addr[0], &bgx_port_conf.macaddr[0], ETHER_ADDR_LEN);
123 
124 	octeontx_log_dbg("port opened %d", nic->port_id);
125 	return res;
126 }
127 
128 static void
129 octeontx_port_close(struct octeontx_nic *nic)
130 {
131 	PMD_INIT_FUNC_TRACE();
132 
133 	octeontx_bgx_port_close(nic->port_id);
134 	octeontx_log_dbg("port closed %d", nic->port_id);
135 }
136 
137 static int
138 octeontx_port_start(struct octeontx_nic *nic)
139 {
140 	PMD_INIT_FUNC_TRACE();
141 
142 	return octeontx_bgx_port_start(nic->port_id);
143 }
144 
145 static int
146 octeontx_port_stop(struct octeontx_nic *nic)
147 {
148 	PMD_INIT_FUNC_TRACE();
149 
150 	return octeontx_bgx_port_stop(nic->port_id);
151 }
152 
153 static void
154 octeontx_port_promisc_set(struct octeontx_nic *nic, int en)
155 {
156 	struct rte_eth_dev *dev;
157 	int res;
158 
159 	res = 0;
160 	PMD_INIT_FUNC_TRACE();
161 	dev = nic->dev;
162 
163 	res = octeontx_bgx_port_promisc_set(nic->port_id, en);
164 	if (res < 0)
165 		octeontx_log_err("failed to set promiscuous mode %d",
166 				nic->port_id);
167 
168 	/* Set proper flag for the mode */
169 	dev->data->promiscuous = (en != 0) ? 1 : 0;
170 
171 	octeontx_log_dbg("port %d : promiscuous mode %s",
172 			nic->port_id, en ? "set" : "unset");
173 }
174 
175 static int
176 octeontx_port_stats(struct octeontx_nic *nic, struct rte_eth_stats *stats)
177 {
178 	octeontx_mbox_bgx_port_stats_t bgx_stats;
179 	int res;
180 
181 	PMD_INIT_FUNC_TRACE();
182 
183 	res = octeontx_bgx_port_stats(nic->port_id, &bgx_stats);
184 	if (res < 0) {
185 		octeontx_log_err("failed to get port stats %d", nic->port_id);
186 		return res;
187 	}
188 
189 	stats->ipackets = bgx_stats.rx_packets;
190 	stats->ibytes = bgx_stats.rx_bytes;
191 	stats->imissed = bgx_stats.rx_dropped;
192 	stats->ierrors = bgx_stats.rx_errors;
193 	stats->opackets = bgx_stats.tx_packets;
194 	stats->obytes = bgx_stats.tx_bytes;
195 	stats->oerrors = bgx_stats.tx_errors;
196 
197 	octeontx_log_dbg("port%d stats inpkts=%" PRIx64 " outpkts=%" PRIx64 "",
198 			nic->port_id, stats->ipackets, stats->opackets);
199 
200 	return 0;
201 }
202 
203 static void
204 octeontx_port_stats_clr(struct octeontx_nic *nic)
205 {
206 	PMD_INIT_FUNC_TRACE();
207 
208 	octeontx_bgx_port_stats_clr(nic->port_id);
209 }
210 
211 static inline void
212 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
213 				struct rte_event_dev_info *info)
214 {
215 	memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
216 	dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
217 
218 	dev_conf->nb_event_ports = info->max_event_ports;
219 	dev_conf->nb_event_queues = info->max_event_queues;
220 
221 	dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
222 	dev_conf->nb_event_port_dequeue_depth =
223 			info->max_event_port_dequeue_depth;
224 	dev_conf->nb_event_port_enqueue_depth =
225 			info->max_event_port_enqueue_depth;
226 	dev_conf->nb_event_port_enqueue_depth =
227 			info->max_event_port_enqueue_depth;
228 	dev_conf->nb_events_limit =
229 			info->max_num_events;
230 }
231 
232 static int
233 octeontx_dev_configure(struct rte_eth_dev *dev)
234 {
235 	struct rte_eth_dev_data *data = dev->data;
236 	struct rte_eth_conf *conf = &data->dev_conf;
237 	struct rte_eth_rxmode *rxmode = &conf->rxmode;
238 	struct rte_eth_txmode *txmode = &conf->txmode;
239 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
240 	int ret;
241 
242 	PMD_INIT_FUNC_TRACE();
243 	RTE_SET_USED(conf);
244 
245 	if (!rte_eal_has_hugepages()) {
246 		octeontx_log_err("huge page is not configured");
247 		return -EINVAL;
248 	}
249 
250 	if (txmode->mq_mode) {
251 		octeontx_log_err("tx mq_mode DCB or VMDq not supported");
252 		return -EINVAL;
253 	}
254 
255 	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
256 		rxmode->mq_mode != ETH_MQ_RX_RSS) {
257 		octeontx_log_err("unsupported rx qmode %d", rxmode->mq_mode);
258 		return -EINVAL;
259 	}
260 
261 	if (!rxmode->hw_strip_crc) {
262 		PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
263 		rxmode->hw_strip_crc = 1;
264 	}
265 
266 	if (rxmode->hw_ip_checksum) {
267 		PMD_INIT_LOG(NOTICE, "rxcksum not supported");
268 		rxmode->hw_ip_checksum = 0;
269 	}
270 
271 	if (rxmode->split_hdr_size) {
272 		octeontx_log_err("rxmode does not support split header");
273 		return -EINVAL;
274 	}
275 
276 	if (rxmode->hw_vlan_filter) {
277 		octeontx_log_err("VLAN filter not supported");
278 		return -EINVAL;
279 	}
280 
281 	if (rxmode->hw_vlan_extend) {
282 		octeontx_log_err("VLAN extended not supported");
283 		return -EINVAL;
284 	}
285 
286 	if (rxmode->enable_lro) {
287 		octeontx_log_err("LRO not supported");
288 		return -EINVAL;
289 	}
290 
291 	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
292 		octeontx_log_err("setting link speed/duplex not supported");
293 		return -EINVAL;
294 	}
295 
296 	if (conf->dcb_capability_en) {
297 		octeontx_log_err("DCB enable not supported");
298 		return -EINVAL;
299 	}
300 
301 	if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
302 		octeontx_log_err("flow director not supported");
303 		return -EINVAL;
304 	}
305 
306 	nic->num_tx_queues = dev->data->nb_tx_queues;
307 
308 	ret = octeontx_pko_channel_open(nic->port_id * PKO_VF_NUM_DQ,
309 					nic->num_tx_queues,
310 					nic->base_ochan);
311 	if (ret) {
312 		octeontx_log_err("failed to open channel %d no-of-txq %d",
313 			   nic->base_ochan, nic->num_tx_queues);
314 		return -EFAULT;
315 	}
316 
317 	nic->pki.classifier_enable = false;
318 	nic->pki.hash_enable = true;
319 	nic->pki.initialized = false;
320 
321 	return 0;
322 }
323 
324 static void
325 octeontx_dev_close(struct rte_eth_dev *dev)
326 {
327 	struct octeontx_txq *txq = NULL;
328 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
329 	unsigned int i;
330 	int ret;
331 
332 	PMD_INIT_FUNC_TRACE();
333 
334 	rte_event_dev_close(nic->evdev);
335 
336 	ret = octeontx_pko_channel_close(nic->base_ochan);
337 	if (ret < 0) {
338 		octeontx_log_err("failed to close channel %d VF%d %d %d",
339 			     nic->base_ochan, nic->port_id, nic->num_tx_queues,
340 			     ret);
341 	}
342 	/* Free txq resources for this port */
343 	for (i = 0; i < nic->num_tx_queues; i++) {
344 		txq = dev->data->tx_queues[i];
345 		if (!txq)
346 			continue;
347 
348 		rte_free(txq);
349 	}
350 }
351 
352 static int
353 octeontx_dev_start(struct rte_eth_dev *dev)
354 {
355 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
356 	int ret;
357 
358 	ret = 0;
359 
360 	PMD_INIT_FUNC_TRACE();
361 	/*
362 	 * Tx start
363 	 */
364 	dev->tx_pkt_burst = octeontx_xmit_pkts;
365 	ret = octeontx_pko_channel_start(nic->base_ochan);
366 	if (ret < 0) {
367 		octeontx_log_err("fail to conf VF%d no. txq %d chan %d ret %d",
368 			   nic->port_id, nic->num_tx_queues, nic->base_ochan,
369 			   ret);
370 		goto error;
371 	}
372 
373 	/*
374 	 * Rx start
375 	 */
376 	dev->rx_pkt_burst = octeontx_recv_pkts;
377 	ret = octeontx_pki_port_start(nic->port_id);
378 	if (ret < 0) {
379 		octeontx_log_err("fail to start Rx on port %d", nic->port_id);
380 		goto channel_stop_error;
381 	}
382 
383 	/*
384 	 * Start port
385 	 */
386 	ret = octeontx_port_start(nic);
387 	if (ret < 0) {
388 		octeontx_log_err("failed start port %d", ret);
389 		goto pki_port_stop_error;
390 	}
391 
392 	PMD_TX_LOG(DEBUG, "pko: start channel %d no.of txq %d port %d",
393 			nic->base_ochan, nic->num_tx_queues, nic->port_id);
394 
395 	ret = rte_event_dev_start(nic->evdev);
396 	if (ret < 0) {
397 		octeontx_log_err("failed to start evdev: ret (%d)", ret);
398 		goto pki_port_stop_error;
399 	}
400 
401 	/* Success */
402 	return ret;
403 
404 pki_port_stop_error:
405 	octeontx_pki_port_stop(nic->port_id);
406 channel_stop_error:
407 	octeontx_pko_channel_stop(nic->base_ochan);
408 error:
409 	return ret;
410 }
411 
412 static void
413 octeontx_dev_stop(struct rte_eth_dev *dev)
414 {
415 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
416 	int ret;
417 
418 	PMD_INIT_FUNC_TRACE();
419 
420 	rte_event_dev_stop(nic->evdev);
421 
422 	ret = octeontx_port_stop(nic);
423 	if (ret < 0) {
424 		octeontx_log_err("failed to req stop port %d res=%d",
425 					nic->port_id, ret);
426 		return;
427 	}
428 
429 	ret = octeontx_pki_port_stop(nic->port_id);
430 	if (ret < 0) {
431 		octeontx_log_err("failed to stop pki port %d res=%d",
432 					nic->port_id, ret);
433 		return;
434 	}
435 
436 	ret = octeontx_pko_channel_stop(nic->base_ochan);
437 	if (ret < 0) {
438 		octeontx_log_err("failed to stop channel %d VF%d %d %d",
439 			     nic->base_ochan, nic->port_id, nic->num_tx_queues,
440 			     ret);
441 		return;
442 	}
443 
444 	dev->tx_pkt_burst = NULL;
445 	dev->rx_pkt_burst = NULL;
446 }
447 
448 static void
449 octeontx_dev_promisc_enable(struct rte_eth_dev *dev)
450 {
451 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
452 
453 	PMD_INIT_FUNC_TRACE();
454 	octeontx_port_promisc_set(nic, 1);
455 }
456 
457 static void
458 octeontx_dev_promisc_disable(struct rte_eth_dev *dev)
459 {
460 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
461 
462 	PMD_INIT_FUNC_TRACE();
463 	octeontx_port_promisc_set(nic, 0);
464 }
465 
466 static inline int
467 octeontx_atomic_write_link_status(struct rte_eth_dev *dev,
468 				  struct rte_eth_link *link)
469 {
470 	struct rte_eth_link *dst = &dev->data->dev_link;
471 	struct rte_eth_link *src = link;
472 
473 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
474 		*(uint64_t *)src) == 0)
475 		return -1;
476 
477 	return 0;
478 }
479 
480 static int
481 octeontx_port_link_status(struct octeontx_nic *nic)
482 {
483 	int res;
484 
485 	PMD_INIT_FUNC_TRACE();
486 	res = octeontx_bgx_port_link_status(nic->port_id);
487 	if (res < 0) {
488 		octeontx_log_err("failed to get port %d link status",
489 				nic->port_id);
490 		return res;
491 	}
492 
493 	nic->link_up = (uint8_t)res;
494 	octeontx_log_dbg("port %d link status %d", nic->port_id, nic->link_up);
495 
496 	return res;
497 }
498 
499 /*
500  * Return 0 means link status changed, -1 means not changed
501  */
502 static int
503 octeontx_dev_link_update(struct rte_eth_dev *dev,
504 			 int wait_to_complete __rte_unused)
505 {
506 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
507 	struct rte_eth_link link;
508 	int res;
509 
510 	res = 0;
511 	PMD_INIT_FUNC_TRACE();
512 
513 	res = octeontx_port_link_status(nic);
514 	if (res < 0) {
515 		octeontx_log_err("failed to request link status %d", res);
516 		return res;
517 	}
518 
519 	link.link_status = nic->link_up;
520 
521 	switch (nic->speed) {
522 	case OCTEONTX_LINK_SPEED_SGMII:
523 		link.link_speed = ETH_SPEED_NUM_1G;
524 		break;
525 
526 	case OCTEONTX_LINK_SPEED_XAUI:
527 		link.link_speed = ETH_SPEED_NUM_10G;
528 		break;
529 
530 	case OCTEONTX_LINK_SPEED_RXAUI:
531 	case OCTEONTX_LINK_SPEED_10G_R:
532 		link.link_speed = ETH_SPEED_NUM_10G;
533 		break;
534 	case OCTEONTX_LINK_SPEED_QSGMII:
535 		link.link_speed = ETH_SPEED_NUM_5G;
536 		break;
537 	case OCTEONTX_LINK_SPEED_40G_R:
538 		link.link_speed = ETH_SPEED_NUM_40G;
539 		break;
540 
541 	case OCTEONTX_LINK_SPEED_RESERVE1:
542 	case OCTEONTX_LINK_SPEED_RESERVE2:
543 	default:
544 		octeontx_log_err("incorrect link speed %d", nic->speed);
545 		break;
546 	}
547 
548 	link.link_duplex = ETH_LINK_AUTONEG;
549 	link.link_autoneg = ETH_LINK_SPEED_AUTONEG;
550 
551 	return octeontx_atomic_write_link_status(dev, &link);
552 }
553 
554 static int
555 octeontx_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
556 {
557 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
558 
559 	PMD_INIT_FUNC_TRACE();
560 	return octeontx_port_stats(nic, stats);
561 }
562 
563 static void
564 octeontx_dev_stats_reset(struct rte_eth_dev *dev)
565 {
566 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
567 
568 	PMD_INIT_FUNC_TRACE();
569 	octeontx_port_stats_clr(nic);
570 }
571 
572 static void
573 octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev,
574 					struct ether_addr *addr)
575 {
576 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
577 	int ret;
578 
579 	ret = octeontx_bgx_port_mac_set(nic->port_id, addr->addr_bytes);
580 	if (ret != 0)
581 		octeontx_log_err("failed to set MAC address on port %d",
582 				nic->port_id);
583 }
584 
585 static void
586 octeontx_dev_info(struct rte_eth_dev *dev,
587 		struct rte_eth_dev_info *dev_info)
588 {
589 	RTE_SET_USED(dev);
590 
591 	/* Autonegotiation may be disabled */
592 	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
593 	dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
594 			ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
595 			ETH_LINK_SPEED_40G;
596 
597 	dev_info->driver_name = RTE_STR(rte_octeontx_pmd);
598 	dev_info->max_mac_addrs = 1;
599 	dev_info->max_rx_pktlen = PKI_MAX_PKTLEN;
600 	dev_info->max_rx_queues = 1;
601 	dev_info->max_tx_queues = PKO_MAX_NUM_DQ;
602 	dev_info->min_rx_bufsize = 0;
603 	dev_info->pci_dev = NULL;
604 
605 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
606 		.rx_free_thresh = 0,
607 		.rx_drop_en = 0,
608 	};
609 
610 	dev_info->default_txconf = (struct rte_eth_txconf) {
611 		.tx_free_thresh = 0,
612 		.txq_flags =
613 			ETH_TXQ_FLAGS_NOMULTSEGS |
614 			ETH_TXQ_FLAGS_NOOFFLOADS |
615 			ETH_TXQ_FLAGS_NOXSUMS,
616 	};
617 
618 	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MT_LOCKFREE;
619 }
620 
621 static void
622 octeontx_dq_info_getter(octeontx_dq_t *dq, void *out)
623 {
624 	((octeontx_dq_t *)out)->lmtline_va = dq->lmtline_va;
625 	((octeontx_dq_t *)out)->ioreg_va = dq->ioreg_va;
626 	((octeontx_dq_t *)out)->fc_status_va = dq->fc_status_va;
627 }
628 
629 static int
630 octeontx_vf_start_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic,
631 				uint16_t qidx)
632 {
633 	struct octeontx_txq *txq;
634 	int res;
635 
636 	PMD_INIT_FUNC_TRACE();
637 
638 	if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
639 		return 0;
640 
641 	txq = dev->data->tx_queues[qidx];
642 
643 	res = octeontx_pko_channel_query_dqs(nic->base_ochan,
644 						&txq->dq,
645 						sizeof(octeontx_dq_t),
646 						txq->queue_id,
647 						octeontx_dq_info_getter);
648 	if (res < 0) {
649 		res = -EFAULT;
650 		goto close_port;
651 	}
652 
653 	dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
654 	return res;
655 
656 close_port:
657 	(void)octeontx_port_stop(nic);
658 	octeontx_pko_channel_stop(nic->base_ochan);
659 	octeontx_pko_channel_close(nic->base_ochan);
660 	dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
661 	return res;
662 }
663 
664 static int
665 octeontx_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
666 {
667 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
668 
669 	PMD_INIT_FUNC_TRACE();
670 	qidx = qidx % PKO_VF_NUM_DQ;
671 	return octeontx_vf_start_tx_queue(dev, nic, qidx);
672 }
673 
674 static inline int
675 octeontx_vf_stop_tx_queue(struct rte_eth_dev *dev, struct octeontx_nic *nic,
676 			  uint16_t qidx)
677 {
678 	int ret = 0;
679 
680 	RTE_SET_USED(nic);
681 	PMD_INIT_FUNC_TRACE();
682 
683 	if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
684 		return 0;
685 
686 	dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
687 	return ret;
688 }
689 
690 static int
691 octeontx_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
692 {
693 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
694 
695 	PMD_INIT_FUNC_TRACE();
696 	qidx = qidx % PKO_VF_NUM_DQ;
697 
698 	return octeontx_vf_stop_tx_queue(dev, nic, qidx);
699 }
700 
701 static void
702 octeontx_dev_tx_queue_release(void *tx_queue)
703 {
704 	struct octeontx_txq *txq = tx_queue;
705 	int res;
706 
707 	PMD_INIT_FUNC_TRACE();
708 
709 	if (txq) {
710 		res = octeontx_dev_tx_queue_stop(txq->eth_dev, txq->queue_id);
711 		if (res < 0)
712 			octeontx_log_err("failed stop tx_queue(%d)\n",
713 				   txq->queue_id);
714 
715 		rte_free(txq);
716 	}
717 }
718 
719 static int
720 octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
721 			    uint16_t nb_desc, unsigned int socket_id,
722 			    const struct rte_eth_txconf *tx_conf)
723 {
724 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
725 	struct octeontx_txq *txq = NULL;
726 	uint16_t dq_num;
727 	int res = 0;
728 
729 	RTE_SET_USED(nb_desc);
730 	RTE_SET_USED(socket_id);
731 	RTE_SET_USED(tx_conf);
732 
733 	dq_num = (nic->port_id * PKO_VF_NUM_DQ) + qidx;
734 
735 	/* Socket id check */
736 	if (socket_id != (unsigned int)SOCKET_ID_ANY &&
737 			socket_id != (unsigned int)nic->node)
738 		PMD_TX_LOG(INFO, "socket_id expected %d, configured %d",
739 						socket_id, nic->node);
740 
741 	/* Free memory prior to re-allocation if needed. */
742 	if (dev->data->tx_queues[qidx] != NULL) {
743 		PMD_TX_LOG(DEBUG, "freeing memory prior to re-allocation %d",
744 				qidx);
745 		octeontx_dev_tx_queue_release(dev->data->tx_queues[qidx]);
746 		dev->data->tx_queues[qidx] = NULL;
747 	}
748 
749 	/* Allocating tx queue data structure */
750 	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq),
751 				 RTE_CACHE_LINE_SIZE, nic->node);
752 	if (txq == NULL) {
753 		octeontx_log_err("failed to allocate txq=%d", qidx);
754 		res = -ENOMEM;
755 		goto err;
756 	}
757 
758 	txq->eth_dev = dev;
759 	txq->queue_id = dq_num;
760 	dev->data->tx_queues[qidx] = txq;
761 	dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
762 
763 	res = octeontx_pko_channel_query_dqs(nic->base_ochan,
764 						&txq->dq,
765 						sizeof(octeontx_dq_t),
766 						txq->queue_id,
767 						octeontx_dq_info_getter);
768 	if (res < 0) {
769 		res = -EFAULT;
770 		goto err;
771 	}
772 
773 	PMD_TX_LOG(DEBUG, "[%d]:[%d] txq=%p nb_desc=%d lmtline=%p ioreg_va=%p fc_status_va=%p",
774 			qidx, txq->queue_id, txq, nb_desc, txq->dq.lmtline_va,
775 			txq->dq.ioreg_va,
776 			txq->dq.fc_status_va);
777 
778 	return res;
779 
780 err:
781 	if (txq)
782 		rte_free(txq);
783 
784 	return res;
785 }
786 
787 static int
788 octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
789 				uint16_t nb_desc, unsigned int socket_id,
790 				const struct rte_eth_rxconf *rx_conf,
791 				struct rte_mempool *mb_pool)
792 {
793 	struct octeontx_nic *nic = octeontx_pmd_priv(dev);
794 	struct rte_mempool_ops *mp_ops = NULL;
795 	struct octeontx_rxq *rxq = NULL;
796 	pki_pktbuf_cfg_t pktbuf_conf;
797 	pki_hash_cfg_t pki_hash;
798 	pki_qos_cfg_t pki_qos;
799 	uintptr_t pool;
800 	int ret, port;
801 	uint8_t gaura;
802 	unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
803 	unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
804 
805 	RTE_SET_USED(nb_desc);
806 
807 	memset(&pktbuf_conf, 0, sizeof(pktbuf_conf));
808 	memset(&pki_hash, 0, sizeof(pki_hash));
809 	memset(&pki_qos, 0, sizeof(pki_qos));
810 
811 	mp_ops = rte_mempool_get_ops(mb_pool->ops_index);
812 	if (strcmp(mp_ops->name, "octeontx_fpavf")) {
813 		octeontx_log_err("failed to find octeontx_fpavf mempool");
814 		return -ENOTSUP;
815 	}
816 
817 	/* Handle forbidden configurations */
818 	if (nic->pki.classifier_enable) {
819 		octeontx_log_err("cannot setup queue %d. "
820 					"Classifier option unsupported", qidx);
821 		return -EINVAL;
822 	}
823 
824 	port = nic->port_id;
825 
826 	/* Rx deferred start is not supported */
827 	if (rx_conf->rx_deferred_start) {
828 		octeontx_log_err("rx deferred start not supported");
829 		return -EINVAL;
830 	}
831 
832 	/* Verify queue index */
833 	if (qidx >= dev->data->nb_rx_queues) {
834 		octeontx_log_err("QID %d not supporteded (0 - %d available)\n",
835 				qidx, (dev->data->nb_rx_queues - 1));
836 		return -ENOTSUP;
837 	}
838 
839 	/* Socket id check */
840 	if (socket_id != (unsigned int)SOCKET_ID_ANY &&
841 			socket_id != (unsigned int)nic->node)
842 		PMD_RX_LOG(INFO, "socket_id expected %d, configured %d",
843 						socket_id, nic->node);
844 
845 	/* Allocating rx queue data structure */
846 	rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct octeontx_rxq),
847 				 RTE_CACHE_LINE_SIZE, nic->node);
848 	if (rxq == NULL) {
849 		octeontx_log_err("failed to allocate rxq=%d", qidx);
850 		return -ENOMEM;
851 	}
852 
853 	if (!nic->pki.initialized) {
854 		pktbuf_conf.port_type = 0;
855 		pki_hash.port_type = 0;
856 		pki_qos.port_type = 0;
857 
858 		pktbuf_conf.mmask.f_wqe_skip = 1;
859 		pktbuf_conf.mmask.f_first_skip = 1;
860 		pktbuf_conf.mmask.f_later_skip = 1;
861 		pktbuf_conf.mmask.f_mbuff_size = 1;
862 		pktbuf_conf.mmask.f_cache_mode = 1;
863 
864 		pktbuf_conf.wqe_skip = OCTTX_PACKET_WQE_SKIP;
865 		pktbuf_conf.first_skip = OCTTX_PACKET_FIRST_SKIP;
866 		pktbuf_conf.later_skip = OCTTX_PACKET_LATER_SKIP;
867 		pktbuf_conf.mbuff_size = (mb_pool->elt_size -
868 					RTE_PKTMBUF_HEADROOM -
869 					sizeof(struct rte_mbuf));
870 
871 		pktbuf_conf.cache_mode = PKI_OPC_MODE_STF2_STT;
872 
873 		ret = octeontx_pki_port_pktbuf_config(port, &pktbuf_conf);
874 		if (ret != 0) {
875 			octeontx_log_err("fail to configure pktbuf for port %d",
876 					port);
877 			rte_free(rxq);
878 			return ret;
879 		}
880 		PMD_RX_LOG(DEBUG, "Port %d Rx pktbuf configured:\n"
881 				"\tmbuf_size:\t0x%0x\n"
882 				"\twqe_skip:\t0x%0x\n"
883 				"\tfirst_skip:\t0x%0x\n"
884 				"\tlater_skip:\t0x%0x\n"
885 				"\tcache_mode:\t%s\n",
886 				port,
887 				pktbuf_conf.mbuff_size,
888 				pktbuf_conf.wqe_skip,
889 				pktbuf_conf.first_skip,
890 				pktbuf_conf.later_skip,
891 				(pktbuf_conf.cache_mode ==
892 						PKI_OPC_MODE_STT) ?
893 				"STT" :
894 				(pktbuf_conf.cache_mode ==
895 						PKI_OPC_MODE_STF) ?
896 				"STF" :
897 				(pktbuf_conf.cache_mode ==
898 						PKI_OPC_MODE_STF1_STT) ?
899 				"STF1_STT" : "STF2_STT");
900 
901 		if (nic->pki.hash_enable) {
902 			pki_hash.tag_dlc = 1;
903 			pki_hash.tag_slc = 1;
904 			pki_hash.tag_dlf = 1;
905 			pki_hash.tag_slf = 1;
906 			pki_hash.tag_prt = 1;
907 			octeontx_pki_port_hash_config(port, &pki_hash);
908 		}
909 
910 		pool = (uintptr_t)mb_pool->pool_id;
911 
912 		/* Get the gpool Id */
913 		gaura = octeontx_fpa_bufpool_gpool(pool);
914 
915 		pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
916 		pki_qos.num_entry = 1;
917 		pki_qos.drop_policy = 0;
918 		pki_qos.tag_type = 0L;
919 		pki_qos.qos_entry[0].port_add = 0;
920 		pki_qos.qos_entry[0].gaura = gaura;
921 		pki_qos.qos_entry[0].ggrp_ok = ev_queues;
922 		pki_qos.qos_entry[0].ggrp_bad = ev_queues;
923 		pki_qos.qos_entry[0].grptag_bad = 0;
924 		pki_qos.qos_entry[0].grptag_ok = 0;
925 
926 		ret = octeontx_pki_port_create_qos(port, &pki_qos);
927 		if (ret < 0) {
928 			octeontx_log_err("failed to create QOS port=%d, q=%d",
929 					port, qidx);
930 			rte_free(rxq);
931 			return ret;
932 		}
933 		nic->pki.initialized = true;
934 	}
935 
936 	rxq->port_id = nic->port_id;
937 	rxq->eth_dev = dev;
938 	rxq->queue_id = qidx;
939 	rxq->evdev = nic->evdev;
940 	rxq->ev_queues = ev_queues;
941 	rxq->ev_ports = ev_ports;
942 
943 	dev->data->rx_queues[qidx] = rxq;
944 	dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
945 	return 0;
946 }
947 
948 static void
949 octeontx_dev_rx_queue_release(void *rxq)
950 {
951 	rte_free(rxq);
952 }
953 
954 static const uint32_t *
955 octeontx_dev_supported_ptypes_get(struct rte_eth_dev *dev)
956 {
957 	static const uint32_t ptypes[] = {
958 		RTE_PTYPE_L3_IPV4,
959 		RTE_PTYPE_L3_IPV4_EXT,
960 		RTE_PTYPE_L3_IPV6,
961 		RTE_PTYPE_L3_IPV6_EXT,
962 		RTE_PTYPE_L4_TCP,
963 		RTE_PTYPE_L4_UDP,
964 		RTE_PTYPE_L4_FRAG,
965 		RTE_PTYPE_UNKNOWN
966 	};
967 
968 	if (dev->rx_pkt_burst == octeontx_recv_pkts)
969 		return ptypes;
970 
971 	return NULL;
972 }
973 
974 /* Initialize and register driver with DPDK Application */
975 static const struct eth_dev_ops octeontx_dev_ops = {
976 	.dev_configure		 = octeontx_dev_configure,
977 	.dev_infos_get		 = octeontx_dev_info,
978 	.dev_close		 = octeontx_dev_close,
979 	.dev_start		 = octeontx_dev_start,
980 	.dev_stop		 = octeontx_dev_stop,
981 	.promiscuous_enable	 = octeontx_dev_promisc_enable,
982 	.promiscuous_disable	 = octeontx_dev_promisc_disable,
983 	.link_update		 = octeontx_dev_link_update,
984 	.stats_get		 = octeontx_dev_stats_get,
985 	.stats_reset		 = octeontx_dev_stats_reset,
986 	.mac_addr_set		 = octeontx_dev_default_mac_addr_set,
987 	.tx_queue_start		 = octeontx_dev_tx_queue_start,
988 	.tx_queue_stop		 = octeontx_dev_tx_queue_stop,
989 	.tx_queue_setup		 = octeontx_dev_tx_queue_setup,
990 	.tx_queue_release	 = octeontx_dev_tx_queue_release,
991 	.rx_queue_setup		 = octeontx_dev_rx_queue_setup,
992 	.rx_queue_release	 = octeontx_dev_rx_queue_release,
993 	.dev_supported_ptypes_get = octeontx_dev_supported_ptypes_get,
994 };
995 
996 /* Create Ethdev interface per BGX LMAC ports */
997 static int
998 octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
999 			int socket_id)
1000 {
1001 	int res;
1002 	char octtx_name[OCTEONTX_MAX_NAME_LEN];
1003 	struct octeontx_nic *nic = NULL;
1004 	struct rte_eth_dev *eth_dev = NULL;
1005 	struct rte_eth_dev_data *data = NULL;
1006 	const char *name = rte_vdev_device_name(dev);
1007 
1008 	PMD_INIT_FUNC_TRACE();
1009 
1010 	sprintf(octtx_name, "%s_%d", name, port);
1011 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1012 		eth_dev = rte_eth_dev_attach_secondary(octtx_name);
1013 		if (eth_dev == NULL)
1014 			return -ENODEV;
1015 
1016 		eth_dev->tx_pkt_burst = octeontx_xmit_pkts;
1017 		eth_dev->rx_pkt_burst = octeontx_recv_pkts;
1018 		return 0;
1019 	}
1020 
1021 	data = rte_zmalloc_socket(octtx_name, sizeof(*data), 0, socket_id);
1022 	if (data == NULL) {
1023 		octeontx_log_err("failed to allocate devdata");
1024 		res = -ENOMEM;
1025 		goto err;
1026 	}
1027 
1028 	nic = rte_zmalloc_socket(octtx_name, sizeof(*nic), 0, socket_id);
1029 	if (nic == NULL) {
1030 		octeontx_log_err("failed to allocate nic structure");
1031 		res = -ENOMEM;
1032 		goto err;
1033 	}
1034 
1035 	nic->port_id = port;
1036 	nic->evdev = evdev;
1037 
1038 	res = octeontx_port_open(nic);
1039 	if (res < 0)
1040 		goto err;
1041 
1042 	/* Rx side port configuration */
1043 	res = octeontx_pki_port_open(port);
1044 	if (res != 0) {
1045 		octeontx_log_err("failed to open PKI port %d", port);
1046 		res = -ENODEV;
1047 		goto err;
1048 	}
1049 
1050 	/* Reserve an ethdev entry */
1051 	eth_dev = rte_eth_dev_allocate(octtx_name);
1052 	if (eth_dev == NULL) {
1053 		octeontx_log_err("failed to allocate rte_eth_dev");
1054 		res = -ENOMEM;
1055 		goto err;
1056 	}
1057 
1058 	eth_dev->device = &dev->device;
1059 	eth_dev->intr_handle = NULL;
1060 	eth_dev->data->kdrv = RTE_KDRV_NONE;
1061 	eth_dev->data->numa_node = dev->device.numa_node;
1062 
1063 	rte_memcpy(data, (eth_dev)->data, sizeof(*data));
1064 	data->dev_private = nic;
1065 
1066 	data->port_id = eth_dev->data->port_id;
1067 	snprintf(data->name, sizeof(data->name), "%s", eth_dev->data->name);
1068 
1069 	nic->ev_queues = 1;
1070 	nic->ev_ports = 1;
1071 
1072 	data->dev_link.link_status = ETH_LINK_DOWN;
1073 	data->dev_started = 0;
1074 	data->promiscuous = 0;
1075 	data->all_multicast = 0;
1076 	data->scattered_rx = 0;
1077 
1078 	data->mac_addrs = rte_zmalloc_socket(octtx_name, ETHER_ADDR_LEN, 0,
1079 							socket_id);
1080 	if (data->mac_addrs == NULL) {
1081 		octeontx_log_err("failed to allocate memory for mac_addrs");
1082 		res = -ENOMEM;
1083 		goto err;
1084 	}
1085 
1086 	eth_dev->data = data;
1087 	eth_dev->dev_ops = &octeontx_dev_ops;
1088 
1089 	/* Finally save ethdev pointer to the NIC structure */
1090 	nic->dev = eth_dev;
1091 
1092 	if (nic->port_id != data->port_id) {
1093 		octeontx_log_err("eth_dev->port_id (%d) is diff to orig (%d)",
1094 				data->port_id, nic->port_id);
1095 		res = -EINVAL;
1096 		goto err;
1097 	}
1098 
1099 	/* Update port_id mac to eth_dev */
1100 	memcpy(data->mac_addrs, nic->mac_addr, ETHER_ADDR_LEN);
1101 
1102 	PMD_INIT_LOG(DEBUG, "ethdev info: ");
1103 	PMD_INIT_LOG(DEBUG, "port %d, port_ena %d ochan %d num_ochan %d tx_q %d",
1104 				nic->port_id, nic->port_ena,
1105 				nic->base_ochan, nic->num_ochans,
1106 				nic->num_tx_queues);
1107 	PMD_INIT_LOG(DEBUG, "speed %d mtu %d", nic->speed, nic->mtu);
1108 
1109 	return data->port_id;
1110 
1111 err:
1112 	if (port)
1113 		octeontx_port_close(nic);
1114 
1115 	if (eth_dev != NULL) {
1116 		rte_free(eth_dev->data->mac_addrs);
1117 		rte_free(data);
1118 		rte_free(nic);
1119 		rte_eth_dev_release_port(eth_dev);
1120 	}
1121 
1122 	return res;
1123 }
1124 
1125 /* Un initialize octeontx device */
1126 static int
1127 octeontx_remove(struct rte_vdev_device *dev)
1128 {
1129 	char octtx_name[OCTEONTX_MAX_NAME_LEN];
1130 	struct rte_eth_dev *eth_dev = NULL;
1131 	struct octeontx_nic *nic = NULL;
1132 	int i;
1133 
1134 	if (dev == NULL)
1135 		return -EINVAL;
1136 
1137 	for (i = 0; i < OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT; i++) {
1138 		sprintf(octtx_name, "eth_octeontx_%d", i);
1139 
1140 		/* reserve an ethdev entry */
1141 		eth_dev = rte_eth_dev_allocated(octtx_name);
1142 		if (eth_dev == NULL)
1143 			return -ENODEV;
1144 
1145 		nic = octeontx_pmd_priv(eth_dev);
1146 		rte_event_dev_stop(nic->evdev);
1147 		PMD_INIT_LOG(INFO, "Closing octeontx device %s", octtx_name);
1148 
1149 		rte_free(eth_dev->data->mac_addrs);
1150 		rte_free(eth_dev->data->dev_private);
1151 		rte_free(eth_dev->data);
1152 		rte_eth_dev_release_port(eth_dev);
1153 		rte_event_dev_close(nic->evdev);
1154 	}
1155 
1156 	/* Free FC resource */
1157 	octeontx_pko_fc_free();
1158 
1159 	return 0;
1160 }
1161 
1162 /* Initialize octeontx device */
1163 static int
1164 octeontx_probe(struct rte_vdev_device *dev)
1165 {
1166 	const char *dev_name;
1167 	static int probe_once;
1168 	uint8_t socket_id, qlist;
1169 	int tx_vfcnt, port_id, evdev, qnum, pnum, res, i;
1170 	struct rte_event_dev_config dev_conf;
1171 	const char *eventdev_name = "event_octeontx";
1172 	struct rte_event_dev_info info;
1173 
1174 	struct octeontx_vdev_init_params init_params = {
1175 		OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT
1176 	};
1177 
1178 	dev_name = rte_vdev_device_name(dev);
1179 	res = octeontx_parse_vdev_init_params(&init_params, dev);
1180 	if (res < 0)
1181 		return -EINVAL;
1182 
1183 	if (init_params.nr_port > OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT) {
1184 		octeontx_log_err("nr_port (%d) > max (%d)", init_params.nr_port,
1185 				OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT);
1186 		return -ENOTSUP;
1187 	}
1188 
1189 	PMD_INIT_LOG(DEBUG, "initializing %s pmd", dev_name);
1190 
1191 	socket_id = rte_socket_id();
1192 
1193 	tx_vfcnt = octeontx_pko_vf_count();
1194 
1195 	if (tx_vfcnt < init_params.nr_port) {
1196 		octeontx_log_err("not enough PKO (%d) for port number (%d)",
1197 				tx_vfcnt, init_params.nr_port);
1198 		return -EINVAL;
1199 	}
1200 	evdev = rte_event_dev_get_dev_id(eventdev_name);
1201 	if (evdev < 0) {
1202 		octeontx_log_err("eventdev %s not found", eventdev_name);
1203 		return -ENODEV;
1204 	}
1205 
1206 	res = rte_event_dev_info_get(evdev, &info);
1207 	if (res < 0) {
1208 		octeontx_log_err("failed to eventdev info %d", res);
1209 		return -EINVAL;
1210 	}
1211 
1212 	PMD_INIT_LOG(DEBUG, "max_queue %d max_port %d",
1213 			info.max_event_queues, info.max_event_ports);
1214 
1215 	if (octeontx_pko_init_fc(tx_vfcnt))
1216 		return -ENOMEM;
1217 
1218 	devconf_set_default_sane_values(&dev_conf, &info);
1219 	res = rte_event_dev_configure(evdev, &dev_conf);
1220 	if (res < 0)
1221 		goto parse_error;
1222 
1223 	rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
1224 			(uint32_t *)&pnum);
1225 	rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
1226 			(uint32_t *)&qnum);
1227 	if (pnum < qnum) {
1228 		octeontx_log_err("too few event ports (%d) for event_q(%d)",
1229 				pnum, qnum);
1230 		res = -EINVAL;
1231 		goto parse_error;
1232 	}
1233 	if (pnum > qnum) {
1234 		/*
1235 		 * We don't poll on event ports
1236 		 * that do not have any queues assigned.
1237 		 */
1238 		pnum = qnum;
1239 		PMD_INIT_LOG(INFO,
1240 			"reducing number of active event ports to %d", pnum);
1241 	}
1242 	for (i = 0; i < qnum; i++) {
1243 		res = rte_event_queue_setup(evdev, i, NULL);
1244 		if (res < 0) {
1245 			octeontx_log_err("failed to setup event_q(%d): res %d",
1246 					i, res);
1247 			goto parse_error;
1248 		}
1249 	}
1250 
1251 	for (i = 0; i < pnum; i++) {
1252 		res = rte_event_port_setup(evdev, i, NULL);
1253 		if (res < 0) {
1254 			res = -ENODEV;
1255 			octeontx_log_err("failed to setup ev port(%d) res=%d",
1256 						i, res);
1257 			goto parse_error;
1258 		}
1259 		/* Link one queue to one event port */
1260 		qlist = i;
1261 		res = rte_event_port_link(evdev, i, &qlist, NULL, 1);
1262 		if (res < 0) {
1263 			res = -ENODEV;
1264 			octeontx_log_err("failed to link port (%d): res=%d",
1265 					i, res);
1266 			goto parse_error;
1267 		}
1268 	}
1269 
1270 	/* Create ethdev interface */
1271 	for (i = 0; i < init_params.nr_port; i++) {
1272 		port_id = octeontx_create(dev, i, evdev, socket_id);
1273 		if (port_id < 0) {
1274 			octeontx_log_err("failed to create device %s",
1275 					dev_name);
1276 			res = -ENODEV;
1277 			goto parse_error;
1278 		}
1279 
1280 		PMD_INIT_LOG(INFO, "created ethdev %s for port %d", dev_name,
1281 					port_id);
1282 	}
1283 
1284 	if (probe_once) {
1285 		octeontx_log_err("interface %s not supported", dev_name);
1286 		octeontx_remove(dev);
1287 		res = -ENOTSUP;
1288 		goto parse_error;
1289 	}
1290 	probe_once = 1;
1291 
1292 	return 0;
1293 
1294 parse_error:
1295 	octeontx_pko_fc_free();
1296 	return res;
1297 }
1298 
1299 static struct rte_vdev_driver octeontx_pmd_drv = {
1300 	.probe = octeontx_probe,
1301 	.remove = octeontx_remove,
1302 };
1303 
1304 RTE_PMD_REGISTER_VDEV(OCTEONTX_PMD, octeontx_pmd_drv);
1305 RTE_PMD_REGISTER_ALIAS(OCTEONTX_PMD, eth_octeontx);
1306 RTE_PMD_REGISTER_PARAM_STRING(OCTEONTX_PMD, "nr_port=<int> ");
1307