xref: /dpdk/lib/ethdev/rte_ethdev.c (revision 1f37cb2bb46b1fd403faa7c3bf8884e6a4dfde66)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <errno.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13 
14 #include <rte_log.h>
15 #include <rte_interrupts.h>
16 #include <rte_memcpy.h>
17 #include <rte_common.h>
18 #include <rte_mempool.h>
19 #include <rte_malloc.h>
20 #include <rte_mbuf.h>
21 #include <rte_errno.h>
22 #include <rte_spinlock.h>
23 #include <rte_string_fns.h>
24 #include <rte_class.h>
25 #include <rte_ether.h>
26 #include <rte_telemetry.h>
27 
28 #include "rte_ethdev_trace.h"
29 #include "rte_ethdev.h"
30 #include "ethdev_driver.h"
31 #include "ethdev_profile.h"
32 #include "ethdev_private.h"
33 #include "sff_telemetry.h"
34 
35 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
36 
37 /* public fast-path API */
38 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
39 
40 /* spinlock for add/remove Rx callbacks */
41 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
42 
43 /* spinlock for add/remove Tx callbacks */
44 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
45 
46 /* store statistics names and its offset in stats structure  */
47 struct rte_eth_xstats_name_off {
48 	char name[RTE_ETH_XSTATS_NAME_SIZE];
49 	unsigned offset;
50 };
51 
52 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
53 	{"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
54 	{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
55 	{"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
56 	{"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
57 	{"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
58 	{"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
59 	{"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
60 	{"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
61 		rx_nombuf)},
62 };
63 
64 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
65 
66 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
67 	{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
68 	{"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
69 	{"errors", offsetof(struct rte_eth_stats, q_errors)},
70 };
71 
72 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
73 
74 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
75 	{"packets", offsetof(struct rte_eth_stats, q_opackets)},
76 	{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
77 };
78 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
79 
80 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
81 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
82 
83 static const struct {
84 	uint64_t offload;
85 	const char *name;
86 } eth_dev_rx_offload_names[] = {
87 	RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
88 	RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
89 	RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
90 	RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
91 	RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
92 	RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
93 	RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
94 	RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
95 	RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
96 	RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
97 	RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
98 	RTE_RX_OFFLOAD_BIT2STR(SCATTER),
99 	RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
100 	RTE_RX_OFFLOAD_BIT2STR(SECURITY),
101 	RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
102 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
103 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
104 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
105 	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
106 };
107 
108 #undef RTE_RX_OFFLOAD_BIT2STR
109 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
110 
111 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
112 	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
113 
114 static const struct {
115 	uint64_t offload;
116 	const char *name;
117 } eth_dev_tx_offload_names[] = {
118 	RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
119 	RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
120 	RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
121 	RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
122 	RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
123 	RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
124 	RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
125 	RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
126 	RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
127 	RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
128 	RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
129 	RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
130 	RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
131 	RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
132 	RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
133 	RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
134 	RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
135 	RTE_TX_OFFLOAD_BIT2STR(SECURITY),
136 	RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
137 	RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
138 	RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
139 	RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
140 };
141 
142 #undef RTE_TX_OFFLOAD_BIT2STR
143 
144 static const struct {
145 	uint64_t offload;
146 	const char *name;
147 } rte_eth_dev_capa_names[] = {
148 	{RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"},
149 	{RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"},
150 	{RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
151 	{RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"},
152 	{RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"},
153 };
154 
155 enum {
156 	STAT_QMAP_TX = 0,
157 	STAT_QMAP_RX
158 };
159 
160 int
161 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
162 {
163 	int ret;
164 	struct rte_devargs devargs;
165 	const char *bus_param_key;
166 	char *bus_str = NULL;
167 	char *cls_str = NULL;
168 	int str_size;
169 
170 	if (iter == NULL) {
171 		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
172 		return -EINVAL;
173 	}
174 
175 	if (devargs_str == NULL) {
176 		RTE_ETHDEV_LOG(ERR,
177 			"Cannot initialize iterator from NULL device description string\n");
178 		return -EINVAL;
179 	}
180 
181 	memset(iter, 0, sizeof(*iter));
182 	memset(&devargs, 0, sizeof(devargs));
183 
184 	/*
185 	 * The devargs string may use various syntaxes:
186 	 *   - 0000:08:00.0,representor=[1-3]
187 	 *   - pci:0000:06:00.0,representor=[0,5]
188 	 *   - class=eth,mac=00:11:22:33:44:55
189 	 *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
190 	 */
191 
192 	/*
193 	 * Handle pure class filter (i.e. without any bus-level argument),
194 	 * from future new syntax.
195 	 * rte_devargs_parse() is not yet supporting the new syntax,
196 	 * that's why this simple case is temporarily parsed here.
197 	 */
198 #define iter_anybus_str "class=eth,"
199 	if (strncmp(devargs_str, iter_anybus_str,
200 			strlen(iter_anybus_str)) == 0) {
201 		iter->cls_str = devargs_str + strlen(iter_anybus_str);
202 		goto end;
203 	}
204 
205 	/* Split bus, device and parameters. */
206 	ret = rte_devargs_parse(&devargs, devargs_str);
207 	if (ret != 0)
208 		goto error;
209 
210 	/*
211 	 * Assume parameters of old syntax can match only at ethdev level.
212 	 * Extra parameters will be ignored, thanks to "+" prefix.
213 	 */
214 	str_size = strlen(devargs.args) + 2;
215 	cls_str = malloc(str_size);
216 	if (cls_str == NULL) {
217 		ret = -ENOMEM;
218 		goto error;
219 	}
220 	ret = snprintf(cls_str, str_size, "+%s", devargs.args);
221 	if (ret != str_size - 1) {
222 		ret = -EINVAL;
223 		goto error;
224 	}
225 	iter->cls_str = cls_str;
226 
227 	iter->bus = devargs.bus;
228 	if (iter->bus->dev_iterate == NULL) {
229 		ret = -ENOTSUP;
230 		goto error;
231 	}
232 
233 	/* Convert bus args to new syntax for use with new API dev_iterate. */
234 	if ((strcmp(iter->bus->name, "vdev") == 0) ||
235 		(strcmp(iter->bus->name, "fslmc") == 0) ||
236 		(strcmp(iter->bus->name, "dpaa_bus") == 0)) {
237 		bus_param_key = "name";
238 	} else if (strcmp(iter->bus->name, "pci") == 0) {
239 		bus_param_key = "addr";
240 	} else {
241 		ret = -ENOTSUP;
242 		goto error;
243 	}
244 	str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
245 	bus_str = malloc(str_size);
246 	if (bus_str == NULL) {
247 		ret = -ENOMEM;
248 		goto error;
249 	}
250 	ret = snprintf(bus_str, str_size, "%s=%s",
251 			bus_param_key, devargs.name);
252 	if (ret != str_size - 1) {
253 		ret = -EINVAL;
254 		goto error;
255 	}
256 	iter->bus_str = bus_str;
257 
258 end:
259 	iter->cls = rte_class_find_by_name("eth");
260 	rte_devargs_reset(&devargs);
261 	return 0;
262 
263 error:
264 	if (ret == -ENOTSUP)
265 		RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
266 				iter->bus->name);
267 	rte_devargs_reset(&devargs);
268 	free(bus_str);
269 	free(cls_str);
270 	return ret;
271 }
272 
273 uint16_t
274 rte_eth_iterator_next(struct rte_dev_iterator *iter)
275 {
276 	if (iter == NULL) {
277 		RTE_ETHDEV_LOG(ERR,
278 			"Cannot get next device from NULL iterator\n");
279 		return RTE_MAX_ETHPORTS;
280 	}
281 
282 	if (iter->cls == NULL) /* invalid ethdev iterator */
283 		return RTE_MAX_ETHPORTS;
284 
285 	do { /* loop to try all matching rte_device */
286 		/* If not pure ethdev filter and */
287 		if (iter->bus != NULL &&
288 				/* not in middle of rte_eth_dev iteration, */
289 				iter->class_device == NULL) {
290 			/* get next rte_device to try. */
291 			iter->device = iter->bus->dev_iterate(
292 					iter->device, iter->bus_str, iter);
293 			if (iter->device == NULL)
294 				break; /* no more rte_device candidate */
295 		}
296 		/* A device is matching bus part, need to check ethdev part. */
297 		iter->class_device = iter->cls->dev_iterate(
298 				iter->class_device, iter->cls_str, iter);
299 		if (iter->class_device != NULL)
300 			return eth_dev_to_id(iter->class_device); /* match */
301 	} while (iter->bus != NULL); /* need to try next rte_device */
302 
303 	/* No more ethdev port to iterate. */
304 	rte_eth_iterator_cleanup(iter);
305 	return RTE_MAX_ETHPORTS;
306 }
307 
308 void
309 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
310 {
311 	if (iter == NULL) {
312 		RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
313 		return;
314 	}
315 
316 	if (iter->bus_str == NULL)
317 		return; /* nothing to free in pure class filter */
318 	free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
319 	free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
320 	memset(iter, 0, sizeof(*iter));
321 }
322 
323 uint16_t
324 rte_eth_find_next(uint16_t port_id)
325 {
326 	while (port_id < RTE_MAX_ETHPORTS &&
327 			rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
328 		port_id++;
329 
330 	if (port_id >= RTE_MAX_ETHPORTS)
331 		return RTE_MAX_ETHPORTS;
332 
333 	return port_id;
334 }
335 
336 /*
337  * Macro to iterate over all valid ports for internal usage.
338  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
339  */
340 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
341 	for (port_id = rte_eth_find_next(0); \
342 	     port_id < RTE_MAX_ETHPORTS; \
343 	     port_id = rte_eth_find_next(port_id + 1))
344 
345 uint16_t
346 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
347 {
348 	port_id = rte_eth_find_next(port_id);
349 	while (port_id < RTE_MAX_ETHPORTS &&
350 			rte_eth_devices[port_id].device != parent)
351 		port_id = rte_eth_find_next(port_id + 1);
352 
353 	return port_id;
354 }
355 
356 uint16_t
357 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
358 {
359 	RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
360 	return rte_eth_find_next_of(port_id,
361 			rte_eth_devices[ref_port_id].device);
362 }
363 
364 static bool
365 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
366 {
367 	return ethdev->data->name[0] != '\0';
368 }
369 
370 int
371 rte_eth_dev_is_valid_port(uint16_t port_id)
372 {
373 	if (port_id >= RTE_MAX_ETHPORTS ||
374 	    (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
375 		return 0;
376 	else
377 		return 1;
378 }
379 
380 static int
381 eth_is_valid_owner_id(uint64_t owner_id)
382 {
383 	if (owner_id == RTE_ETH_DEV_NO_OWNER ||
384 	    eth_dev_shared_data->next_owner_id <= owner_id)
385 		return 0;
386 	return 1;
387 }
388 
389 uint64_t
390 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
391 {
392 	port_id = rte_eth_find_next(port_id);
393 	while (port_id < RTE_MAX_ETHPORTS &&
394 			rte_eth_devices[port_id].data->owner.id != owner_id)
395 		port_id = rte_eth_find_next(port_id + 1);
396 
397 	return port_id;
398 }
399 
400 int
401 rte_eth_dev_owner_new(uint64_t *owner_id)
402 {
403 	if (owner_id == NULL) {
404 		RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
405 		return -EINVAL;
406 	}
407 
408 	eth_dev_shared_data_prepare();
409 
410 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
411 
412 	*owner_id = eth_dev_shared_data->next_owner_id++;
413 
414 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
415 	return 0;
416 }
417 
418 static int
419 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
420 		       const struct rte_eth_dev_owner *new_owner)
421 {
422 	struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
423 	struct rte_eth_dev_owner *port_owner;
424 
425 	if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
426 		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
427 			port_id);
428 		return -ENODEV;
429 	}
430 
431 	if (new_owner == NULL) {
432 		RTE_ETHDEV_LOG(ERR,
433 			"Cannot set ethdev port %u owner from NULL owner\n",
434 			port_id);
435 		return -EINVAL;
436 	}
437 
438 	if (!eth_is_valid_owner_id(new_owner->id) &&
439 	    !eth_is_valid_owner_id(old_owner_id)) {
440 		RTE_ETHDEV_LOG(ERR,
441 			"Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
442 		       old_owner_id, new_owner->id);
443 		return -EINVAL;
444 	}
445 
446 	port_owner = &rte_eth_devices[port_id].data->owner;
447 	if (port_owner->id != old_owner_id) {
448 		RTE_ETHDEV_LOG(ERR,
449 			"Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
450 			port_id, port_owner->name, port_owner->id);
451 		return -EPERM;
452 	}
453 
454 	/* can not truncate (same structure) */
455 	strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
456 
457 	port_owner->id = new_owner->id;
458 
459 	RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
460 		port_id, new_owner->name, new_owner->id);
461 
462 	return 0;
463 }
464 
465 int
466 rte_eth_dev_owner_set(const uint16_t port_id,
467 		      const struct rte_eth_dev_owner *owner)
468 {
469 	int ret;
470 
471 	eth_dev_shared_data_prepare();
472 
473 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
474 
475 	ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
476 
477 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
478 	return ret;
479 }
480 
481 int
482 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
483 {
484 	const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
485 			{.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
486 	int ret;
487 
488 	eth_dev_shared_data_prepare();
489 
490 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
491 
492 	ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
493 
494 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
495 	return ret;
496 }
497 
498 int
499 rte_eth_dev_owner_delete(const uint64_t owner_id)
500 {
501 	uint16_t port_id;
502 	int ret = 0;
503 
504 	eth_dev_shared_data_prepare();
505 
506 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
507 
508 	if (eth_is_valid_owner_id(owner_id)) {
509 		for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
510 			struct rte_eth_dev_data *data =
511 				rte_eth_devices[port_id].data;
512 			if (data != NULL && data->owner.id == owner_id)
513 				memset(&data->owner, 0,
514 				       sizeof(struct rte_eth_dev_owner));
515 		}
516 		RTE_ETHDEV_LOG(NOTICE,
517 			"All port owners owned by %016"PRIx64" identifier have removed\n",
518 			owner_id);
519 	} else {
520 		RTE_ETHDEV_LOG(ERR,
521 			       "Invalid owner ID=%016"PRIx64"\n",
522 			       owner_id);
523 		ret = -EINVAL;
524 	}
525 
526 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
527 
528 	return ret;
529 }
530 
531 int
532 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
533 {
534 	struct rte_eth_dev *ethdev;
535 
536 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
537 	ethdev = &rte_eth_devices[port_id];
538 
539 	if (!eth_dev_is_allocated(ethdev)) {
540 		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
541 			port_id);
542 		return -ENODEV;
543 	}
544 
545 	if (owner == NULL) {
546 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
547 			port_id);
548 		return -EINVAL;
549 	}
550 
551 	eth_dev_shared_data_prepare();
552 
553 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
554 	rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
555 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
556 
557 	return 0;
558 }
559 
560 int
561 rte_eth_dev_socket_id(uint16_t port_id)
562 {
563 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
564 	return rte_eth_devices[port_id].data->numa_node;
565 }
566 
567 void *
568 rte_eth_dev_get_sec_ctx(uint16_t port_id)
569 {
570 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
571 	return rte_eth_devices[port_id].security_ctx;
572 }
573 
574 uint16_t
575 rte_eth_dev_count_avail(void)
576 {
577 	uint16_t p;
578 	uint16_t count;
579 
580 	count = 0;
581 
582 	RTE_ETH_FOREACH_DEV(p)
583 		count++;
584 
585 	return count;
586 }
587 
588 uint16_t
589 rte_eth_dev_count_total(void)
590 {
591 	uint16_t port, count = 0;
592 
593 	RTE_ETH_FOREACH_VALID_DEV(port)
594 		count++;
595 
596 	return count;
597 }
598 
599 int
600 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
601 {
602 	char *tmp;
603 
604 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
605 
606 	if (name == NULL) {
607 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
608 			port_id);
609 		return -EINVAL;
610 	}
611 
612 	/* shouldn't check 'rte_eth_devices[i].data',
613 	 * because it might be overwritten by VDEV PMD */
614 	tmp = eth_dev_shared_data->data[port_id].name;
615 	strcpy(name, tmp);
616 	return 0;
617 }
618 
619 int
620 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
621 {
622 	uint16_t pid;
623 
624 	if (name == NULL) {
625 		RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
626 		return -EINVAL;
627 	}
628 
629 	if (port_id == NULL) {
630 		RTE_ETHDEV_LOG(ERR,
631 			"Cannot get port ID to NULL for %s\n", name);
632 		return -EINVAL;
633 	}
634 
635 	RTE_ETH_FOREACH_VALID_DEV(pid)
636 		if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
637 			*port_id = pid;
638 			return 0;
639 		}
640 
641 	return -ENODEV;
642 }
643 
644 static int
645 eth_err(uint16_t port_id, int ret)
646 {
647 	if (ret == 0)
648 		return 0;
649 	if (rte_eth_dev_is_removed(port_id))
650 		return -EIO;
651 	return ret;
652 }
653 
654 static int
655 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
656 {
657 	uint16_t port_id;
658 
659 	if (rx_queue_id >= dev->data->nb_rx_queues) {
660 		port_id = dev->data->port_id;
661 		RTE_ETHDEV_LOG(ERR,
662 			       "Invalid Rx queue_id=%u of device with port_id=%u\n",
663 			       rx_queue_id, port_id);
664 		return -EINVAL;
665 	}
666 
667 	if (dev->data->rx_queues[rx_queue_id] == NULL) {
668 		port_id = dev->data->port_id;
669 		RTE_ETHDEV_LOG(ERR,
670 			       "Queue %u of device with port_id=%u has not been setup\n",
671 			       rx_queue_id, port_id);
672 		return -EINVAL;
673 	}
674 
675 	return 0;
676 }
677 
678 static int
679 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
680 {
681 	uint16_t port_id;
682 
683 	if (tx_queue_id >= dev->data->nb_tx_queues) {
684 		port_id = dev->data->port_id;
685 		RTE_ETHDEV_LOG(ERR,
686 			       "Invalid Tx queue_id=%u of device with port_id=%u\n",
687 			       tx_queue_id, port_id);
688 		return -EINVAL;
689 	}
690 
691 	if (dev->data->tx_queues[tx_queue_id] == NULL) {
692 		port_id = dev->data->port_id;
693 		RTE_ETHDEV_LOG(ERR,
694 			       "Queue %u of device with port_id=%u has not been setup\n",
695 			       tx_queue_id, port_id);
696 		return -EINVAL;
697 	}
698 
699 	return 0;
700 }
701 
702 int
703 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
704 {
705 	struct rte_eth_dev *dev;
706 	int ret;
707 
708 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
709 	dev = &rte_eth_devices[port_id];
710 
711 	if (!dev->data->dev_started) {
712 		RTE_ETHDEV_LOG(ERR,
713 			"Port %u must be started before start any queue\n",
714 			port_id);
715 		return -EINVAL;
716 	}
717 
718 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
719 	if (ret != 0)
720 		return ret;
721 
722 	if (*dev->dev_ops->rx_queue_start == NULL)
723 		return -ENOTSUP;
724 
725 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
726 		RTE_ETHDEV_LOG(INFO,
727 			"Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
728 			rx_queue_id, port_id);
729 		return -EINVAL;
730 	}
731 
732 	if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
733 		RTE_ETHDEV_LOG(INFO,
734 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
735 			rx_queue_id, port_id);
736 		return 0;
737 	}
738 
739 	return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
740 }
741 
742 int
743 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
744 {
745 	struct rte_eth_dev *dev;
746 	int ret;
747 
748 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
749 	dev = &rte_eth_devices[port_id];
750 
751 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
752 	if (ret != 0)
753 		return ret;
754 
755 	if (*dev->dev_ops->rx_queue_stop == NULL)
756 		return -ENOTSUP;
757 
758 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
759 		RTE_ETHDEV_LOG(INFO,
760 			"Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
761 			rx_queue_id, port_id);
762 		return -EINVAL;
763 	}
764 
765 	if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
766 		RTE_ETHDEV_LOG(INFO,
767 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
768 			rx_queue_id, port_id);
769 		return 0;
770 	}
771 
772 	return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
773 }
774 
775 int
776 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
777 {
778 	struct rte_eth_dev *dev;
779 	int ret;
780 
781 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
782 	dev = &rte_eth_devices[port_id];
783 
784 	if (!dev->data->dev_started) {
785 		RTE_ETHDEV_LOG(ERR,
786 			"Port %u must be started before start any queue\n",
787 			port_id);
788 		return -EINVAL;
789 	}
790 
791 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
792 	if (ret != 0)
793 		return ret;
794 
795 	if (*dev->dev_ops->tx_queue_start == NULL)
796 		return -ENOTSUP;
797 
798 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
799 		RTE_ETHDEV_LOG(INFO,
800 			"Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
801 			tx_queue_id, port_id);
802 		return -EINVAL;
803 	}
804 
805 	if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
806 		RTE_ETHDEV_LOG(INFO,
807 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
808 			tx_queue_id, port_id);
809 		return 0;
810 	}
811 
812 	return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
813 }
814 
815 int
816 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
817 {
818 	struct rte_eth_dev *dev;
819 	int ret;
820 
821 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
822 	dev = &rte_eth_devices[port_id];
823 
824 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
825 	if (ret != 0)
826 		return ret;
827 
828 	if (*dev->dev_ops->tx_queue_stop == NULL)
829 		return -ENOTSUP;
830 
831 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
832 		RTE_ETHDEV_LOG(INFO,
833 			"Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
834 			tx_queue_id, port_id);
835 		return -EINVAL;
836 	}
837 
838 	if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
839 		RTE_ETHDEV_LOG(INFO,
840 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
841 			tx_queue_id, port_id);
842 		return 0;
843 	}
844 
845 	return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
846 }
847 
848 uint32_t
849 rte_eth_speed_bitflag(uint32_t speed, int duplex)
850 {
851 	switch (speed) {
852 	case RTE_ETH_SPEED_NUM_10M:
853 		return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
854 	case RTE_ETH_SPEED_NUM_100M:
855 		return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
856 	case RTE_ETH_SPEED_NUM_1G:
857 		return RTE_ETH_LINK_SPEED_1G;
858 	case RTE_ETH_SPEED_NUM_2_5G:
859 		return RTE_ETH_LINK_SPEED_2_5G;
860 	case RTE_ETH_SPEED_NUM_5G:
861 		return RTE_ETH_LINK_SPEED_5G;
862 	case RTE_ETH_SPEED_NUM_10G:
863 		return RTE_ETH_LINK_SPEED_10G;
864 	case RTE_ETH_SPEED_NUM_20G:
865 		return RTE_ETH_LINK_SPEED_20G;
866 	case RTE_ETH_SPEED_NUM_25G:
867 		return RTE_ETH_LINK_SPEED_25G;
868 	case RTE_ETH_SPEED_NUM_40G:
869 		return RTE_ETH_LINK_SPEED_40G;
870 	case RTE_ETH_SPEED_NUM_50G:
871 		return RTE_ETH_LINK_SPEED_50G;
872 	case RTE_ETH_SPEED_NUM_56G:
873 		return RTE_ETH_LINK_SPEED_56G;
874 	case RTE_ETH_SPEED_NUM_100G:
875 		return RTE_ETH_LINK_SPEED_100G;
876 	case RTE_ETH_SPEED_NUM_200G:
877 		return RTE_ETH_LINK_SPEED_200G;
878 	default:
879 		return 0;
880 	}
881 }
882 
883 const char *
884 rte_eth_dev_rx_offload_name(uint64_t offload)
885 {
886 	const char *name = "UNKNOWN";
887 	unsigned int i;
888 
889 	for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
890 		if (offload == eth_dev_rx_offload_names[i].offload) {
891 			name = eth_dev_rx_offload_names[i].name;
892 			break;
893 		}
894 	}
895 
896 	return name;
897 }
898 
899 const char *
900 rte_eth_dev_tx_offload_name(uint64_t offload)
901 {
902 	const char *name = "UNKNOWN";
903 	unsigned int i;
904 
905 	for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
906 		if (offload == eth_dev_tx_offload_names[i].offload) {
907 			name = eth_dev_tx_offload_names[i].name;
908 			break;
909 		}
910 	}
911 
912 	return name;
913 }
914 
915 const char *
916 rte_eth_dev_capability_name(uint64_t capability)
917 {
918 	const char *name = "UNKNOWN";
919 	unsigned int i;
920 
921 	for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) {
922 		if (capability == rte_eth_dev_capa_names[i].offload) {
923 			name = rte_eth_dev_capa_names[i].name;
924 			break;
925 		}
926 	}
927 
928 	return name;
929 }
930 
931 static inline int
932 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
933 		   uint32_t max_rx_pkt_len, uint32_t dev_info_size)
934 {
935 	int ret = 0;
936 
937 	if (dev_info_size == 0) {
938 		if (config_size != max_rx_pkt_len) {
939 			RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
940 				       " %u != %u is not allowed\n",
941 				       port_id, config_size, max_rx_pkt_len);
942 			ret = -EINVAL;
943 		}
944 	} else if (config_size > dev_info_size) {
945 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
946 			       "> max allowed value %u\n", port_id, config_size,
947 			       dev_info_size);
948 		ret = -EINVAL;
949 	} else if (config_size < RTE_ETHER_MIN_LEN) {
950 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
951 			       "< min allowed value %u\n", port_id, config_size,
952 			       (unsigned int)RTE_ETHER_MIN_LEN);
953 		ret = -EINVAL;
954 	}
955 	return ret;
956 }
957 
958 /*
959  * Validate offloads that are requested through rte_eth_dev_configure against
960  * the offloads successfully set by the Ethernet device.
961  *
962  * @param port_id
963  *   The port identifier of the Ethernet device.
964  * @param req_offloads
965  *   The offloads that have been requested through `rte_eth_dev_configure`.
966  * @param set_offloads
967  *   The offloads successfully set by the Ethernet device.
968  * @param offload_type
969  *   The offload type i.e. Rx/Tx string.
970  * @param offload_name
971  *   The function that prints the offload name.
972  * @return
973  *   - (0) if validation successful.
974  *   - (-EINVAL) if requested offload has been silently disabled.
975  *
976  */
977 static int
978 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
979 		  uint64_t set_offloads, const char *offload_type,
980 		  const char *(*offload_name)(uint64_t))
981 {
982 	uint64_t offloads_diff = req_offloads ^ set_offloads;
983 	uint64_t offload;
984 	int ret = 0;
985 
986 	while (offloads_diff != 0) {
987 		/* Check if any offload is requested but not enabled. */
988 		offload = RTE_BIT64(__builtin_ctzll(offloads_diff));
989 		if (offload & req_offloads) {
990 			RTE_ETHDEV_LOG(ERR,
991 				"Port %u failed to enable %s offload %s\n",
992 				port_id, offload_type, offload_name(offload));
993 			ret = -EINVAL;
994 		}
995 
996 		/* Check if offload couldn't be disabled. */
997 		if (offload & set_offloads) {
998 			RTE_ETHDEV_LOG(DEBUG,
999 				"Port %u %s offload %s is not requested but enabled\n",
1000 				port_id, offload_type, offload_name(offload));
1001 		}
1002 
1003 		offloads_diff &= ~offload;
1004 	}
1005 
1006 	return ret;
1007 }
1008 
1009 static uint32_t
1010 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1011 {
1012 	uint32_t overhead_len;
1013 
1014 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1015 		overhead_len = max_rx_pktlen - max_mtu;
1016 	else
1017 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1018 
1019 	return overhead_len;
1020 }
1021 
1022 /* rte_eth_dev_info_get() should be called prior to this function */
1023 static int
1024 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
1025 		uint16_t mtu)
1026 {
1027 	uint32_t overhead_len;
1028 	uint32_t frame_size;
1029 
1030 	if (mtu < dev_info->min_mtu) {
1031 		RTE_ETHDEV_LOG(ERR,
1032 			"MTU (%u) < device min MTU (%u) for port_id %u\n",
1033 			mtu, dev_info->min_mtu, port_id);
1034 		return -EINVAL;
1035 	}
1036 	if (mtu > dev_info->max_mtu) {
1037 		RTE_ETHDEV_LOG(ERR,
1038 			"MTU (%u) > device max MTU (%u) for port_id %u\n",
1039 			mtu, dev_info->max_mtu, port_id);
1040 		return -EINVAL;
1041 	}
1042 
1043 	overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1044 			dev_info->max_mtu);
1045 	frame_size = mtu + overhead_len;
1046 	if (frame_size < RTE_ETHER_MIN_LEN) {
1047 		RTE_ETHDEV_LOG(ERR,
1048 			"Frame size (%u) < min frame size (%u) for port_id %u\n",
1049 			frame_size, RTE_ETHER_MIN_LEN, port_id);
1050 		return -EINVAL;
1051 	}
1052 
1053 	if (frame_size > dev_info->max_rx_pktlen) {
1054 		RTE_ETHDEV_LOG(ERR,
1055 			"Frame size (%u) > device max frame size (%u) for port_id %u\n",
1056 			frame_size, dev_info->max_rx_pktlen, port_id);
1057 		return -EINVAL;
1058 	}
1059 
1060 	return 0;
1061 }
1062 
1063 int
1064 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1065 		      const struct rte_eth_conf *dev_conf)
1066 {
1067 	struct rte_eth_dev *dev;
1068 	struct rte_eth_dev_info dev_info;
1069 	struct rte_eth_conf orig_conf;
1070 	int diag;
1071 	int ret;
1072 	uint16_t old_mtu;
1073 
1074 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1075 	dev = &rte_eth_devices[port_id];
1076 
1077 	if (dev_conf == NULL) {
1078 		RTE_ETHDEV_LOG(ERR,
1079 			"Cannot configure ethdev port %u from NULL config\n",
1080 			port_id);
1081 		return -EINVAL;
1082 	}
1083 
1084 	if (*dev->dev_ops->dev_configure == NULL)
1085 		return -ENOTSUP;
1086 
1087 	if (dev->data->dev_started) {
1088 		RTE_ETHDEV_LOG(ERR,
1089 			"Port %u must be stopped to allow configuration\n",
1090 			port_id);
1091 		return -EBUSY;
1092 	}
1093 
1094 	/*
1095 	 * Ensure that "dev_configured" is always 0 each time prepare to do
1096 	 * dev_configure() to avoid any non-anticipated behaviour.
1097 	 * And set to 1 when dev_configure() is executed successfully.
1098 	 */
1099 	dev->data->dev_configured = 0;
1100 
1101 	 /* Store original config, as rollback required on failure */
1102 	memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1103 
1104 	/*
1105 	 * Copy the dev_conf parameter into the dev structure.
1106 	 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1107 	 */
1108 	if (dev_conf != &dev->data->dev_conf)
1109 		memcpy(&dev->data->dev_conf, dev_conf,
1110 		       sizeof(dev->data->dev_conf));
1111 
1112 	/* Backup mtu for rollback */
1113 	old_mtu = dev->data->mtu;
1114 
1115 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1116 	if (ret != 0)
1117 		goto rollback;
1118 
1119 	/* If number of queues specified by application for both Rx and Tx is
1120 	 * zero, use driver preferred values. This cannot be done individually
1121 	 * as it is valid for either Tx or Rx (but not both) to be zero.
1122 	 * If driver does not provide any preferred valued, fall back on
1123 	 * EAL defaults.
1124 	 */
1125 	if (nb_rx_q == 0 && nb_tx_q == 0) {
1126 		nb_rx_q = dev_info.default_rxportconf.nb_queues;
1127 		if (nb_rx_q == 0)
1128 			nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1129 		nb_tx_q = dev_info.default_txportconf.nb_queues;
1130 		if (nb_tx_q == 0)
1131 			nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1132 	}
1133 
1134 	if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1135 		RTE_ETHDEV_LOG(ERR,
1136 			"Number of Rx queues requested (%u) is greater than max supported(%d)\n",
1137 			nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1138 		ret = -EINVAL;
1139 		goto rollback;
1140 	}
1141 
1142 	if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1143 		RTE_ETHDEV_LOG(ERR,
1144 			"Number of Tx queues requested (%u) is greater than max supported(%d)\n",
1145 			nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1146 		ret = -EINVAL;
1147 		goto rollback;
1148 	}
1149 
1150 	/*
1151 	 * Check that the numbers of Rx and Tx queues are not greater
1152 	 * than the maximum number of Rx and Tx queues supported by the
1153 	 * configured device.
1154 	 */
1155 	if (nb_rx_q > dev_info.max_rx_queues) {
1156 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1157 			port_id, nb_rx_q, dev_info.max_rx_queues);
1158 		ret = -EINVAL;
1159 		goto rollback;
1160 	}
1161 
1162 	if (nb_tx_q > dev_info.max_tx_queues) {
1163 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1164 			port_id, nb_tx_q, dev_info.max_tx_queues);
1165 		ret = -EINVAL;
1166 		goto rollback;
1167 	}
1168 
1169 	/* Check that the device supports requested interrupts */
1170 	if ((dev_conf->intr_conf.lsc == 1) &&
1171 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1172 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1173 			dev->device->driver->name);
1174 		ret = -EINVAL;
1175 		goto rollback;
1176 	}
1177 	if ((dev_conf->intr_conf.rmv == 1) &&
1178 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1179 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1180 			dev->device->driver->name);
1181 		ret = -EINVAL;
1182 		goto rollback;
1183 	}
1184 
1185 	if (dev_conf->rxmode.mtu == 0)
1186 		dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
1187 
1188 	ret = eth_dev_validate_mtu(port_id, &dev_info,
1189 			dev->data->dev_conf.rxmode.mtu);
1190 	if (ret != 0)
1191 		goto rollback;
1192 
1193 	dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
1194 
1195 	/*
1196 	 * If LRO is enabled, check that the maximum aggregated packet
1197 	 * size is supported by the configured device.
1198 	 */
1199 	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1200 		uint32_t max_rx_pktlen;
1201 		uint32_t overhead_len;
1202 
1203 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1204 				dev_info.max_mtu);
1205 		max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
1206 		if (dev_conf->rxmode.max_lro_pkt_size == 0)
1207 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1208 		ret = eth_dev_check_lro_pkt_size(port_id,
1209 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
1210 				max_rx_pktlen,
1211 				dev_info.max_lro_pkt_size);
1212 		if (ret != 0)
1213 			goto rollback;
1214 	}
1215 
1216 	/* Any requested offloading must be within its device capabilities */
1217 	if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1218 	     dev_conf->rxmode.offloads) {
1219 		RTE_ETHDEV_LOG(ERR,
1220 			"Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1221 			"capabilities 0x%"PRIx64" in %s()\n",
1222 			port_id, dev_conf->rxmode.offloads,
1223 			dev_info.rx_offload_capa,
1224 			__func__);
1225 		ret = -EINVAL;
1226 		goto rollback;
1227 	}
1228 	if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1229 	     dev_conf->txmode.offloads) {
1230 		RTE_ETHDEV_LOG(ERR,
1231 			"Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1232 			"capabilities 0x%"PRIx64" in %s()\n",
1233 			port_id, dev_conf->txmode.offloads,
1234 			dev_info.tx_offload_capa,
1235 			__func__);
1236 		ret = -EINVAL;
1237 		goto rollback;
1238 	}
1239 
1240 	dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1241 		rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1242 
1243 	/* Check that device supports requested rss hash functions. */
1244 	if ((dev_info.flow_type_rss_offloads |
1245 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1246 	    dev_info.flow_type_rss_offloads) {
1247 		RTE_ETHDEV_LOG(ERR,
1248 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1249 			port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1250 			dev_info.flow_type_rss_offloads);
1251 		ret = -EINVAL;
1252 		goto rollback;
1253 	}
1254 
1255 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1256 	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
1257 	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
1258 		RTE_ETHDEV_LOG(ERR,
1259 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1260 			port_id,
1261 			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
1262 		ret = -EINVAL;
1263 		goto rollback;
1264 	}
1265 
1266 	/*
1267 	 * Setup new number of Rx/Tx queues and reconfigure device.
1268 	 */
1269 	diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1270 	if (diag != 0) {
1271 		RTE_ETHDEV_LOG(ERR,
1272 			"Port%u eth_dev_rx_queue_config = %d\n",
1273 			port_id, diag);
1274 		ret = diag;
1275 		goto rollback;
1276 	}
1277 
1278 	diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1279 	if (diag != 0) {
1280 		RTE_ETHDEV_LOG(ERR,
1281 			"Port%u eth_dev_tx_queue_config = %d\n",
1282 			port_id, diag);
1283 		eth_dev_rx_queue_config(dev, 0);
1284 		ret = diag;
1285 		goto rollback;
1286 	}
1287 
1288 	diag = (*dev->dev_ops->dev_configure)(dev);
1289 	if (diag != 0) {
1290 		RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1291 			port_id, diag);
1292 		ret = eth_err(port_id, diag);
1293 		goto reset_queues;
1294 	}
1295 
1296 	/* Initialize Rx profiling if enabled at compilation time. */
1297 	diag = __rte_eth_dev_profile_init(port_id, dev);
1298 	if (diag != 0) {
1299 		RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1300 			port_id, diag);
1301 		ret = eth_err(port_id, diag);
1302 		goto reset_queues;
1303 	}
1304 
1305 	/* Validate Rx offloads. */
1306 	diag = eth_dev_validate_offloads(port_id,
1307 			dev_conf->rxmode.offloads,
1308 			dev->data->dev_conf.rxmode.offloads, "Rx",
1309 			rte_eth_dev_rx_offload_name);
1310 	if (diag != 0) {
1311 		ret = diag;
1312 		goto reset_queues;
1313 	}
1314 
1315 	/* Validate Tx offloads. */
1316 	diag = eth_dev_validate_offloads(port_id,
1317 			dev_conf->txmode.offloads,
1318 			dev->data->dev_conf.txmode.offloads, "Tx",
1319 			rte_eth_dev_tx_offload_name);
1320 	if (diag != 0) {
1321 		ret = diag;
1322 		goto reset_queues;
1323 	}
1324 
1325 	dev->data->dev_configured = 1;
1326 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1327 	return 0;
1328 reset_queues:
1329 	eth_dev_rx_queue_config(dev, 0);
1330 	eth_dev_tx_queue_config(dev, 0);
1331 rollback:
1332 	memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1333 	if (old_mtu != dev->data->mtu)
1334 		dev->data->mtu = old_mtu;
1335 
1336 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1337 	return ret;
1338 }
1339 
1340 static void
1341 eth_dev_mac_restore(struct rte_eth_dev *dev,
1342 			struct rte_eth_dev_info *dev_info)
1343 {
1344 	struct rte_ether_addr *addr;
1345 	uint16_t i;
1346 	uint32_t pool = 0;
1347 	uint64_t pool_mask;
1348 
1349 	/* replay MAC address configuration including default MAC */
1350 	addr = &dev->data->mac_addrs[0];
1351 	if (*dev->dev_ops->mac_addr_set != NULL)
1352 		(*dev->dev_ops->mac_addr_set)(dev, addr);
1353 	else if (*dev->dev_ops->mac_addr_add != NULL)
1354 		(*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1355 
1356 	if (*dev->dev_ops->mac_addr_add != NULL) {
1357 		for (i = 1; i < dev_info->max_mac_addrs; i++) {
1358 			addr = &dev->data->mac_addrs[i];
1359 
1360 			/* skip zero address */
1361 			if (rte_is_zero_ether_addr(addr))
1362 				continue;
1363 
1364 			pool = 0;
1365 			pool_mask = dev->data->mac_pool_sel[i];
1366 
1367 			do {
1368 				if (pool_mask & UINT64_C(1))
1369 					(*dev->dev_ops->mac_addr_add)(dev,
1370 						addr, i, pool);
1371 				pool_mask >>= 1;
1372 				pool++;
1373 			} while (pool_mask);
1374 		}
1375 	}
1376 }
1377 
1378 static int
1379 eth_dev_config_restore(struct rte_eth_dev *dev,
1380 		struct rte_eth_dev_info *dev_info, uint16_t port_id)
1381 {
1382 	int ret;
1383 
1384 	if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1385 		eth_dev_mac_restore(dev, dev_info);
1386 
1387 	/* replay promiscuous configuration */
1388 	/*
1389 	 * use callbacks directly since we don't need port_id check and
1390 	 * would like to bypass the same value set
1391 	 */
1392 	if (rte_eth_promiscuous_get(port_id) == 1 &&
1393 	    *dev->dev_ops->promiscuous_enable != NULL) {
1394 		ret = eth_err(port_id,
1395 			      (*dev->dev_ops->promiscuous_enable)(dev));
1396 		if (ret != 0 && ret != -ENOTSUP) {
1397 			RTE_ETHDEV_LOG(ERR,
1398 				"Failed to enable promiscuous mode for device (port %u): %s\n",
1399 				port_id, rte_strerror(-ret));
1400 			return ret;
1401 		}
1402 	} else if (rte_eth_promiscuous_get(port_id) == 0 &&
1403 		   *dev->dev_ops->promiscuous_disable != NULL) {
1404 		ret = eth_err(port_id,
1405 			      (*dev->dev_ops->promiscuous_disable)(dev));
1406 		if (ret != 0 && ret != -ENOTSUP) {
1407 			RTE_ETHDEV_LOG(ERR,
1408 				"Failed to disable promiscuous mode for device (port %u): %s\n",
1409 				port_id, rte_strerror(-ret));
1410 			return ret;
1411 		}
1412 	}
1413 
1414 	/* replay all multicast configuration */
1415 	/*
1416 	 * use callbacks directly since we don't need port_id check and
1417 	 * would like to bypass the same value set
1418 	 */
1419 	if (rte_eth_allmulticast_get(port_id) == 1 &&
1420 	    *dev->dev_ops->allmulticast_enable != NULL) {
1421 		ret = eth_err(port_id,
1422 			      (*dev->dev_ops->allmulticast_enable)(dev));
1423 		if (ret != 0 && ret != -ENOTSUP) {
1424 			RTE_ETHDEV_LOG(ERR,
1425 				"Failed to enable allmulticast mode for device (port %u): %s\n",
1426 				port_id, rte_strerror(-ret));
1427 			return ret;
1428 		}
1429 	} else if (rte_eth_allmulticast_get(port_id) == 0 &&
1430 		   *dev->dev_ops->allmulticast_disable != NULL) {
1431 		ret = eth_err(port_id,
1432 			      (*dev->dev_ops->allmulticast_disable)(dev));
1433 		if (ret != 0 && ret != -ENOTSUP) {
1434 			RTE_ETHDEV_LOG(ERR,
1435 				"Failed to disable allmulticast mode for device (port %u): %s\n",
1436 				port_id, rte_strerror(-ret));
1437 			return ret;
1438 		}
1439 	}
1440 
1441 	return 0;
1442 }
1443 
1444 int
1445 rte_eth_dev_start(uint16_t port_id)
1446 {
1447 	struct rte_eth_dev *dev;
1448 	struct rte_eth_dev_info dev_info;
1449 	int diag;
1450 	int ret, ret_stop;
1451 
1452 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1453 	dev = &rte_eth_devices[port_id];
1454 
1455 	if (*dev->dev_ops->dev_start == NULL)
1456 		return -ENOTSUP;
1457 
1458 	if (dev->data->dev_configured == 0) {
1459 		RTE_ETHDEV_LOG(INFO,
1460 			"Device with port_id=%"PRIu16" is not configured.\n",
1461 			port_id);
1462 		return -EINVAL;
1463 	}
1464 
1465 	if (dev->data->dev_started != 0) {
1466 		RTE_ETHDEV_LOG(INFO,
1467 			"Device with port_id=%"PRIu16" already started\n",
1468 			port_id);
1469 		return 0;
1470 	}
1471 
1472 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1473 	if (ret != 0)
1474 		return ret;
1475 
1476 	/* Lets restore MAC now if device does not support live change */
1477 	if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1478 		eth_dev_mac_restore(dev, &dev_info);
1479 
1480 	diag = (*dev->dev_ops->dev_start)(dev);
1481 	if (diag == 0)
1482 		dev->data->dev_started = 1;
1483 	else
1484 		return eth_err(port_id, diag);
1485 
1486 	ret = eth_dev_config_restore(dev, &dev_info, port_id);
1487 	if (ret != 0) {
1488 		RTE_ETHDEV_LOG(ERR,
1489 			"Error during restoring configuration for device (port %u): %s\n",
1490 			port_id, rte_strerror(-ret));
1491 		ret_stop = rte_eth_dev_stop(port_id);
1492 		if (ret_stop != 0) {
1493 			RTE_ETHDEV_LOG(ERR,
1494 				"Failed to stop device (port %u): %s\n",
1495 				port_id, rte_strerror(-ret_stop));
1496 		}
1497 
1498 		return ret;
1499 	}
1500 
1501 	if (dev->data->dev_conf.intr_conf.lsc == 0) {
1502 		if (*dev->dev_ops->link_update == NULL)
1503 			return -ENOTSUP;
1504 		(*dev->dev_ops->link_update)(dev, 0);
1505 	}
1506 
1507 	/* expose selection of PMD fast-path functions */
1508 	eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev);
1509 
1510 	rte_ethdev_trace_start(port_id);
1511 	return 0;
1512 }
1513 
1514 int
1515 rte_eth_dev_stop(uint16_t port_id)
1516 {
1517 	struct rte_eth_dev *dev;
1518 	int ret;
1519 
1520 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1521 	dev = &rte_eth_devices[port_id];
1522 
1523 	if (*dev->dev_ops->dev_stop == NULL)
1524 		return -ENOTSUP;
1525 
1526 	if (dev->data->dev_started == 0) {
1527 		RTE_ETHDEV_LOG(INFO,
1528 			"Device with port_id=%"PRIu16" already stopped\n",
1529 			port_id);
1530 		return 0;
1531 	}
1532 
1533 	/* point fast-path functions to dummy ones */
1534 	eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id);
1535 
1536 	ret = (*dev->dev_ops->dev_stop)(dev);
1537 	if (ret == 0)
1538 		dev->data->dev_started = 0;
1539 	rte_ethdev_trace_stop(port_id, ret);
1540 
1541 	return ret;
1542 }
1543 
1544 int
1545 rte_eth_dev_set_link_up(uint16_t port_id)
1546 {
1547 	struct rte_eth_dev *dev;
1548 
1549 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1550 	dev = &rte_eth_devices[port_id];
1551 
1552 	if (*dev->dev_ops->dev_set_link_up == NULL)
1553 		return -ENOTSUP;
1554 	return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1555 }
1556 
1557 int
1558 rte_eth_dev_set_link_down(uint16_t port_id)
1559 {
1560 	struct rte_eth_dev *dev;
1561 
1562 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1563 	dev = &rte_eth_devices[port_id];
1564 
1565 	if (*dev->dev_ops->dev_set_link_down == NULL)
1566 		return -ENOTSUP;
1567 	return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1568 }
1569 
1570 int
1571 rte_eth_dev_close(uint16_t port_id)
1572 {
1573 	struct rte_eth_dev *dev;
1574 	int firsterr, binerr;
1575 	int *lasterr = &firsterr;
1576 
1577 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1578 	dev = &rte_eth_devices[port_id];
1579 
1580 	/*
1581 	 * Secondary process needs to close device to release process private
1582 	 * resources. But secondary process should not be obliged to wait
1583 	 * for device stop before closing ethdev.
1584 	 */
1585 	if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
1586 			dev->data->dev_started) {
1587 		RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n",
1588 			       port_id);
1589 		return -EINVAL;
1590 	}
1591 
1592 	if (*dev->dev_ops->dev_close == NULL)
1593 		return -ENOTSUP;
1594 	*lasterr = (*dev->dev_ops->dev_close)(dev);
1595 	if (*lasterr != 0)
1596 		lasterr = &binerr;
1597 
1598 	rte_ethdev_trace_close(port_id);
1599 	*lasterr = rte_eth_dev_release_port(dev);
1600 
1601 	return firsterr;
1602 }
1603 
1604 int
1605 rte_eth_dev_reset(uint16_t port_id)
1606 {
1607 	struct rte_eth_dev *dev;
1608 	int ret;
1609 
1610 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1611 	dev = &rte_eth_devices[port_id];
1612 
1613 	if (*dev->dev_ops->dev_reset == NULL)
1614 		return -ENOTSUP;
1615 
1616 	ret = rte_eth_dev_stop(port_id);
1617 	if (ret != 0) {
1618 		RTE_ETHDEV_LOG(ERR,
1619 			"Failed to stop device (port %u) before reset: %s - ignore\n",
1620 			port_id, rte_strerror(-ret));
1621 	}
1622 	ret = dev->dev_ops->dev_reset(dev);
1623 
1624 	return eth_err(port_id, ret);
1625 }
1626 
1627 int
1628 rte_eth_dev_is_removed(uint16_t port_id)
1629 {
1630 	struct rte_eth_dev *dev;
1631 	int ret;
1632 
1633 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1634 	dev = &rte_eth_devices[port_id];
1635 
1636 	if (dev->state == RTE_ETH_DEV_REMOVED)
1637 		return 1;
1638 
1639 	if (*dev->dev_ops->is_removed == NULL)
1640 		return 0;
1641 
1642 	ret = dev->dev_ops->is_removed(dev);
1643 	if (ret != 0)
1644 		/* Device is physically removed. */
1645 		dev->state = RTE_ETH_DEV_REMOVED;
1646 
1647 	return ret;
1648 }
1649 
1650 static int
1651 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1652 			     uint16_t n_seg, uint32_t *mbp_buf_size,
1653 			     const struct rte_eth_dev_info *dev_info)
1654 {
1655 	const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1656 	struct rte_mempool *mp_first;
1657 	uint32_t offset_mask;
1658 	uint16_t seg_idx;
1659 
1660 	if (n_seg > seg_capa->max_nseg) {
1661 		RTE_ETHDEV_LOG(ERR,
1662 			       "Requested Rx segments %u exceed supported %u\n",
1663 			       n_seg, seg_capa->max_nseg);
1664 		return -EINVAL;
1665 	}
1666 	/*
1667 	 * Check the sizes and offsets against buffer sizes
1668 	 * for each segment specified in extended configuration.
1669 	 */
1670 	mp_first = rx_seg[0].mp;
1671 	offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1;
1672 	for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1673 		struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1674 		uint32_t length = rx_seg[seg_idx].length;
1675 		uint32_t offset = rx_seg[seg_idx].offset;
1676 
1677 		if (mpl == NULL) {
1678 			RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1679 			return -EINVAL;
1680 		}
1681 		if (seg_idx != 0 && mp_first != mpl &&
1682 		    seg_capa->multi_pools == 0) {
1683 			RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1684 			return -ENOTSUP;
1685 		}
1686 		if (offset != 0) {
1687 			if (seg_capa->offset_allowed == 0) {
1688 				RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1689 				return -ENOTSUP;
1690 			}
1691 			if (offset & offset_mask) {
1692 				RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1693 					       offset,
1694 					       seg_capa->offset_align_log2);
1695 				return -EINVAL;
1696 			}
1697 		}
1698 		if (mpl->private_data_size <
1699 			sizeof(struct rte_pktmbuf_pool_private)) {
1700 			RTE_ETHDEV_LOG(ERR,
1701 				       "%s private_data_size %u < %u\n",
1702 				       mpl->name, mpl->private_data_size,
1703 				       (unsigned int)sizeof
1704 					(struct rte_pktmbuf_pool_private));
1705 			return -ENOSPC;
1706 		}
1707 		offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1708 		*mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1709 		length = length != 0 ? length : *mbp_buf_size;
1710 		if (*mbp_buf_size < length + offset) {
1711 			RTE_ETHDEV_LOG(ERR,
1712 				       "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1713 				       mpl->name, *mbp_buf_size,
1714 				       length + offset, length, offset);
1715 			return -EINVAL;
1716 		}
1717 	}
1718 	return 0;
1719 }
1720 
1721 int
1722 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1723 		       uint16_t nb_rx_desc, unsigned int socket_id,
1724 		       const struct rte_eth_rxconf *rx_conf,
1725 		       struct rte_mempool *mp)
1726 {
1727 	int ret;
1728 	uint32_t mbp_buf_size;
1729 	struct rte_eth_dev *dev;
1730 	struct rte_eth_dev_info dev_info;
1731 	struct rte_eth_rxconf local_conf;
1732 
1733 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1734 	dev = &rte_eth_devices[port_id];
1735 
1736 	if (rx_queue_id >= dev->data->nb_rx_queues) {
1737 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
1738 		return -EINVAL;
1739 	}
1740 
1741 	if (*dev->dev_ops->rx_queue_setup == NULL)
1742 		return -ENOTSUP;
1743 
1744 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1745 	if (ret != 0)
1746 		return ret;
1747 
1748 	if (mp != NULL) {
1749 		/* Single pool configuration check. */
1750 		if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
1751 			RTE_ETHDEV_LOG(ERR,
1752 				       "Ambiguous segment configuration\n");
1753 			return -EINVAL;
1754 		}
1755 		/*
1756 		 * Check the size of the mbuf data buffer, this value
1757 		 * must be provided in the private data of the memory pool.
1758 		 * First check that the memory pool(s) has a valid private data.
1759 		 */
1760 		if (mp->private_data_size <
1761 				sizeof(struct rte_pktmbuf_pool_private)) {
1762 			RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1763 				mp->name, mp->private_data_size,
1764 				(unsigned int)
1765 				sizeof(struct rte_pktmbuf_pool_private));
1766 			return -ENOSPC;
1767 		}
1768 		mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1769 		if (mbp_buf_size < dev_info.min_rx_bufsize +
1770 				   RTE_PKTMBUF_HEADROOM) {
1771 			RTE_ETHDEV_LOG(ERR,
1772 				       "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
1773 				       mp->name, mbp_buf_size,
1774 				       RTE_PKTMBUF_HEADROOM +
1775 				       dev_info.min_rx_bufsize,
1776 				       RTE_PKTMBUF_HEADROOM,
1777 				       dev_info.min_rx_bufsize);
1778 			return -EINVAL;
1779 		}
1780 	} else {
1781 		const struct rte_eth_rxseg_split *rx_seg;
1782 		uint16_t n_seg;
1783 
1784 		/* Extended multi-segment configuration check. */
1785 		if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
1786 			RTE_ETHDEV_LOG(ERR,
1787 				       "Memory pool is null and no extended configuration provided\n");
1788 			return -EINVAL;
1789 		}
1790 
1791 		rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
1792 		n_seg = rx_conf->rx_nseg;
1793 
1794 		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1795 			ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
1796 							   &mbp_buf_size,
1797 							   &dev_info);
1798 			if (ret != 0)
1799 				return ret;
1800 		} else {
1801 			RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
1802 			return -EINVAL;
1803 		}
1804 	}
1805 
1806 	/* Use default specified by driver, if nb_rx_desc is zero */
1807 	if (nb_rx_desc == 0) {
1808 		nb_rx_desc = dev_info.default_rxportconf.ring_size;
1809 		/* If driver default is also zero, fall back on EAL default */
1810 		if (nb_rx_desc == 0)
1811 			nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1812 	}
1813 
1814 	if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1815 			nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1816 			nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1817 
1818 		RTE_ETHDEV_LOG(ERR,
1819 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1820 			nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1821 			dev_info.rx_desc_lim.nb_min,
1822 			dev_info.rx_desc_lim.nb_align);
1823 		return -EINVAL;
1824 	}
1825 
1826 	if (dev->data->dev_started &&
1827 		!(dev_info.dev_capa &
1828 			RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1829 		return -EBUSY;
1830 
1831 	if (dev->data->dev_started &&
1832 		(dev->data->rx_queue_state[rx_queue_id] !=
1833 			RTE_ETH_QUEUE_STATE_STOPPED))
1834 		return -EBUSY;
1835 
1836 	eth_dev_rxq_release(dev, rx_queue_id);
1837 
1838 	if (rx_conf == NULL)
1839 		rx_conf = &dev_info.default_rxconf;
1840 
1841 	local_conf = *rx_conf;
1842 
1843 	/*
1844 	 * If an offloading has already been enabled in
1845 	 * rte_eth_dev_configure(), it has been enabled on all queues,
1846 	 * so there is no need to enable it in this queue again.
1847 	 * The local_conf.offloads input to underlying PMD only carries
1848 	 * those offloadings which are only enabled on this queue and
1849 	 * not enabled on all queues.
1850 	 */
1851 	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1852 
1853 	/*
1854 	 * New added offloadings for this queue are those not enabled in
1855 	 * rte_eth_dev_configure() and they must be per-queue type.
1856 	 * A pure per-port offloading can't be enabled on a queue while
1857 	 * disabled on another queue. A pure per-port offloading can't
1858 	 * be enabled for any queue as new added one if it hasn't been
1859 	 * enabled in rte_eth_dev_configure().
1860 	 */
1861 	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1862 	     local_conf.offloads) {
1863 		RTE_ETHDEV_LOG(ERR,
1864 			"Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1865 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1866 			port_id, rx_queue_id, local_conf.offloads,
1867 			dev_info.rx_queue_offload_capa,
1868 			__func__);
1869 		return -EINVAL;
1870 	}
1871 
1872 	if (local_conf.share_group > 0 &&
1873 	    (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {
1874 		RTE_ETHDEV_LOG(ERR,
1875 			"Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n",
1876 			port_id, rx_queue_id, local_conf.share_group);
1877 		return -EINVAL;
1878 	}
1879 
1880 	/*
1881 	 * If LRO is enabled, check that the maximum aggregated packet
1882 	 * size is supported by the configured device.
1883 	 */
1884 	/* Get the real Ethernet overhead length */
1885 	if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1886 		uint32_t overhead_len;
1887 		uint32_t max_rx_pktlen;
1888 		int ret;
1889 
1890 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1891 				dev_info.max_mtu);
1892 		max_rx_pktlen = dev->data->mtu + overhead_len;
1893 		if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
1894 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1895 		ret = eth_dev_check_lro_pkt_size(port_id,
1896 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
1897 				max_rx_pktlen,
1898 				dev_info.max_lro_pkt_size);
1899 		if (ret != 0)
1900 			return ret;
1901 	}
1902 
1903 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1904 					      socket_id, &local_conf, mp);
1905 	if (!ret) {
1906 		if (!dev->data->min_rx_buf_size ||
1907 		    dev->data->min_rx_buf_size > mbp_buf_size)
1908 			dev->data->min_rx_buf_size = mbp_buf_size;
1909 	}
1910 
1911 	rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
1912 		rx_conf, ret);
1913 	return eth_err(port_id, ret);
1914 }
1915 
1916 int
1917 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1918 			       uint16_t nb_rx_desc,
1919 			       const struct rte_eth_hairpin_conf *conf)
1920 {
1921 	int ret;
1922 	struct rte_eth_dev *dev;
1923 	struct rte_eth_hairpin_cap cap;
1924 	int i;
1925 	int count;
1926 
1927 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1928 	dev = &rte_eth_devices[port_id];
1929 
1930 	if (rx_queue_id >= dev->data->nb_rx_queues) {
1931 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
1932 		return -EINVAL;
1933 	}
1934 
1935 	if (conf == NULL) {
1936 		RTE_ETHDEV_LOG(ERR,
1937 			"Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
1938 			port_id);
1939 		return -EINVAL;
1940 	}
1941 
1942 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1943 	if (ret != 0)
1944 		return ret;
1945 	if (*dev->dev_ops->rx_hairpin_queue_setup == NULL)
1946 		return -ENOTSUP;
1947 	/* if nb_rx_desc is zero use max number of desc from the driver. */
1948 	if (nb_rx_desc == 0)
1949 		nb_rx_desc = cap.max_nb_desc;
1950 	if (nb_rx_desc > cap.max_nb_desc) {
1951 		RTE_ETHDEV_LOG(ERR,
1952 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1953 			nb_rx_desc, cap.max_nb_desc);
1954 		return -EINVAL;
1955 	}
1956 	if (conf->peer_count > cap.max_rx_2_tx) {
1957 		RTE_ETHDEV_LOG(ERR,
1958 			"Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
1959 			conf->peer_count, cap.max_rx_2_tx);
1960 		return -EINVAL;
1961 	}
1962 	if (conf->peer_count == 0) {
1963 		RTE_ETHDEV_LOG(ERR,
1964 			"Invalid value for number of peers for Rx queue(=%u), should be: > 0",
1965 			conf->peer_count);
1966 		return -EINVAL;
1967 	}
1968 	for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1969 	     cap.max_nb_queues != UINT16_MAX; i++) {
1970 		if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1971 			count++;
1972 	}
1973 	if (count > cap.max_nb_queues) {
1974 		RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1975 		cap.max_nb_queues);
1976 		return -EINVAL;
1977 	}
1978 	if (dev->data->dev_started)
1979 		return -EBUSY;
1980 	eth_dev_rxq_release(dev, rx_queue_id);
1981 	ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
1982 						      nb_rx_desc, conf);
1983 	if (ret == 0)
1984 		dev->data->rx_queue_state[rx_queue_id] =
1985 			RTE_ETH_QUEUE_STATE_HAIRPIN;
1986 	return eth_err(port_id, ret);
1987 }
1988 
1989 int
1990 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1991 		       uint16_t nb_tx_desc, unsigned int socket_id,
1992 		       const struct rte_eth_txconf *tx_conf)
1993 {
1994 	struct rte_eth_dev *dev;
1995 	struct rte_eth_dev_info dev_info;
1996 	struct rte_eth_txconf local_conf;
1997 	int ret;
1998 
1999 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2000 	dev = &rte_eth_devices[port_id];
2001 
2002 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2003 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2004 		return -EINVAL;
2005 	}
2006 
2007 	if (*dev->dev_ops->tx_queue_setup == NULL)
2008 		return -ENOTSUP;
2009 
2010 	ret = rte_eth_dev_info_get(port_id, &dev_info);
2011 	if (ret != 0)
2012 		return ret;
2013 
2014 	/* Use default specified by driver, if nb_tx_desc is zero */
2015 	if (nb_tx_desc == 0) {
2016 		nb_tx_desc = dev_info.default_txportconf.ring_size;
2017 		/* If driver default is zero, fall back on EAL default */
2018 		if (nb_tx_desc == 0)
2019 			nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2020 	}
2021 	if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2022 	    nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2023 	    nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2024 		RTE_ETHDEV_LOG(ERR,
2025 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2026 			nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2027 			dev_info.tx_desc_lim.nb_min,
2028 			dev_info.tx_desc_lim.nb_align);
2029 		return -EINVAL;
2030 	}
2031 
2032 	if (dev->data->dev_started &&
2033 		!(dev_info.dev_capa &
2034 			RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2035 		return -EBUSY;
2036 
2037 	if (dev->data->dev_started &&
2038 		(dev->data->tx_queue_state[tx_queue_id] !=
2039 			RTE_ETH_QUEUE_STATE_STOPPED))
2040 		return -EBUSY;
2041 
2042 	eth_dev_txq_release(dev, tx_queue_id);
2043 
2044 	if (tx_conf == NULL)
2045 		tx_conf = &dev_info.default_txconf;
2046 
2047 	local_conf = *tx_conf;
2048 
2049 	/*
2050 	 * If an offloading has already been enabled in
2051 	 * rte_eth_dev_configure(), it has been enabled on all queues,
2052 	 * so there is no need to enable it in this queue again.
2053 	 * The local_conf.offloads input to underlying PMD only carries
2054 	 * those offloadings which are only enabled on this queue and
2055 	 * not enabled on all queues.
2056 	 */
2057 	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2058 
2059 	/*
2060 	 * New added offloadings for this queue are those not enabled in
2061 	 * rte_eth_dev_configure() and they must be per-queue type.
2062 	 * A pure per-port offloading can't be enabled on a queue while
2063 	 * disabled on another queue. A pure per-port offloading can't
2064 	 * be enabled for any queue as new added one if it hasn't been
2065 	 * enabled in rte_eth_dev_configure().
2066 	 */
2067 	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2068 	     local_conf.offloads) {
2069 		RTE_ETHDEV_LOG(ERR,
2070 			"Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2071 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2072 			port_id, tx_queue_id, local_conf.offloads,
2073 			dev_info.tx_queue_offload_capa,
2074 			__func__);
2075 		return -EINVAL;
2076 	}
2077 
2078 	rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2079 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2080 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2081 }
2082 
2083 int
2084 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2085 			       uint16_t nb_tx_desc,
2086 			       const struct rte_eth_hairpin_conf *conf)
2087 {
2088 	struct rte_eth_dev *dev;
2089 	struct rte_eth_hairpin_cap cap;
2090 	int i;
2091 	int count;
2092 	int ret;
2093 
2094 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2095 	dev = &rte_eth_devices[port_id];
2096 
2097 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2098 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2099 		return -EINVAL;
2100 	}
2101 
2102 	if (conf == NULL) {
2103 		RTE_ETHDEV_LOG(ERR,
2104 			"Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2105 			port_id);
2106 		return -EINVAL;
2107 	}
2108 
2109 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2110 	if (ret != 0)
2111 		return ret;
2112 	if (*dev->dev_ops->tx_hairpin_queue_setup == NULL)
2113 		return -ENOTSUP;
2114 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2115 	if (nb_tx_desc == 0)
2116 		nb_tx_desc = cap.max_nb_desc;
2117 	if (nb_tx_desc > cap.max_nb_desc) {
2118 		RTE_ETHDEV_LOG(ERR,
2119 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2120 			nb_tx_desc, cap.max_nb_desc);
2121 		return -EINVAL;
2122 	}
2123 	if (conf->peer_count > cap.max_tx_2_rx) {
2124 		RTE_ETHDEV_LOG(ERR,
2125 			"Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2126 			conf->peer_count, cap.max_tx_2_rx);
2127 		return -EINVAL;
2128 	}
2129 	if (conf->peer_count == 0) {
2130 		RTE_ETHDEV_LOG(ERR,
2131 			"Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2132 			conf->peer_count);
2133 		return -EINVAL;
2134 	}
2135 	for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2136 	     cap.max_nb_queues != UINT16_MAX; i++) {
2137 		if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2138 			count++;
2139 	}
2140 	if (count > cap.max_nb_queues) {
2141 		RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2142 		cap.max_nb_queues);
2143 		return -EINVAL;
2144 	}
2145 	if (dev->data->dev_started)
2146 		return -EBUSY;
2147 	eth_dev_txq_release(dev, tx_queue_id);
2148 	ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2149 		(dev, tx_queue_id, nb_tx_desc, conf);
2150 	if (ret == 0)
2151 		dev->data->tx_queue_state[tx_queue_id] =
2152 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2153 	return eth_err(port_id, ret);
2154 }
2155 
2156 int
2157 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2158 {
2159 	struct rte_eth_dev *dev;
2160 	int ret;
2161 
2162 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2163 	dev = &rte_eth_devices[tx_port];
2164 
2165 	if (dev->data->dev_started == 0) {
2166 		RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2167 		return -EBUSY;
2168 	}
2169 
2170 	if (*dev->dev_ops->hairpin_bind == NULL)
2171 		return -ENOTSUP;
2172 	ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2173 	if (ret != 0)
2174 		RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2175 			       " to Rx %d (%d - all ports)\n",
2176 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2177 
2178 	return ret;
2179 }
2180 
2181 int
2182 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2183 {
2184 	struct rte_eth_dev *dev;
2185 	int ret;
2186 
2187 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2188 	dev = &rte_eth_devices[tx_port];
2189 
2190 	if (dev->data->dev_started == 0) {
2191 		RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2192 		return -EBUSY;
2193 	}
2194 
2195 	if (*dev->dev_ops->hairpin_unbind == NULL)
2196 		return -ENOTSUP;
2197 	ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2198 	if (ret != 0)
2199 		RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2200 			       " from Rx %d (%d - all ports)\n",
2201 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2202 
2203 	return ret;
2204 }
2205 
2206 int
2207 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2208 			       size_t len, uint32_t direction)
2209 {
2210 	struct rte_eth_dev *dev;
2211 	int ret;
2212 
2213 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2214 	dev = &rte_eth_devices[port_id];
2215 
2216 	if (peer_ports == NULL) {
2217 		RTE_ETHDEV_LOG(ERR,
2218 			"Cannot get ethdev port %u hairpin peer ports to NULL\n",
2219 			port_id);
2220 		return -EINVAL;
2221 	}
2222 
2223 	if (len == 0) {
2224 		RTE_ETHDEV_LOG(ERR,
2225 			"Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2226 			port_id);
2227 		return -EINVAL;
2228 	}
2229 
2230 	if (*dev->dev_ops->hairpin_get_peer_ports == NULL)
2231 		return -ENOTSUP;
2232 
2233 	ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2234 						      len, direction);
2235 	if (ret < 0)
2236 		RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2237 			       port_id, direction ? "Rx" : "Tx");
2238 
2239 	return ret;
2240 }
2241 
2242 void
2243 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2244 		void *userdata __rte_unused)
2245 {
2246 	rte_pktmbuf_free_bulk(pkts, unsent);
2247 }
2248 
2249 void
2250 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2251 		void *userdata)
2252 {
2253 	uint64_t *count = userdata;
2254 
2255 	rte_pktmbuf_free_bulk(pkts, unsent);
2256 	*count += unsent;
2257 }
2258 
2259 int
2260 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2261 		buffer_tx_error_fn cbfn, void *userdata)
2262 {
2263 	if (buffer == NULL) {
2264 		RTE_ETHDEV_LOG(ERR,
2265 			"Cannot set Tx buffer error callback to NULL buffer\n");
2266 		return -EINVAL;
2267 	}
2268 
2269 	buffer->error_callback = cbfn;
2270 	buffer->error_userdata = userdata;
2271 	return 0;
2272 }
2273 
2274 int
2275 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2276 {
2277 	int ret = 0;
2278 
2279 	if (buffer == NULL) {
2280 		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2281 		return -EINVAL;
2282 	}
2283 
2284 	buffer->size = size;
2285 	if (buffer->error_callback == NULL) {
2286 		ret = rte_eth_tx_buffer_set_err_callback(
2287 			buffer, rte_eth_tx_buffer_drop_callback, NULL);
2288 	}
2289 
2290 	return ret;
2291 }
2292 
2293 int
2294 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2295 {
2296 	struct rte_eth_dev *dev;
2297 	int ret;
2298 
2299 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2300 	dev = &rte_eth_devices[port_id];
2301 
2302 	if (*dev->dev_ops->tx_done_cleanup == NULL)
2303 		return -ENOTSUP;
2304 
2305 	/* Call driver to free pending mbufs. */
2306 	ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2307 					       free_cnt);
2308 	return eth_err(port_id, ret);
2309 }
2310 
2311 int
2312 rte_eth_promiscuous_enable(uint16_t port_id)
2313 {
2314 	struct rte_eth_dev *dev;
2315 	int diag = 0;
2316 
2317 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2318 	dev = &rte_eth_devices[port_id];
2319 
2320 	if (dev->data->promiscuous == 1)
2321 		return 0;
2322 
2323 	if (*dev->dev_ops->promiscuous_enable == NULL)
2324 		return -ENOTSUP;
2325 
2326 	diag = (*dev->dev_ops->promiscuous_enable)(dev);
2327 	dev->data->promiscuous = (diag == 0) ? 1 : 0;
2328 
2329 	return eth_err(port_id, diag);
2330 }
2331 
2332 int
2333 rte_eth_promiscuous_disable(uint16_t port_id)
2334 {
2335 	struct rte_eth_dev *dev;
2336 	int diag = 0;
2337 
2338 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2339 	dev = &rte_eth_devices[port_id];
2340 
2341 	if (dev->data->promiscuous == 0)
2342 		return 0;
2343 
2344 	if (*dev->dev_ops->promiscuous_disable == NULL)
2345 		return -ENOTSUP;
2346 
2347 	dev->data->promiscuous = 0;
2348 	diag = (*dev->dev_ops->promiscuous_disable)(dev);
2349 	if (diag != 0)
2350 		dev->data->promiscuous = 1;
2351 
2352 	return eth_err(port_id, diag);
2353 }
2354 
2355 int
2356 rte_eth_promiscuous_get(uint16_t port_id)
2357 {
2358 	struct rte_eth_dev *dev;
2359 
2360 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2361 	dev = &rte_eth_devices[port_id];
2362 
2363 	return dev->data->promiscuous;
2364 }
2365 
2366 int
2367 rte_eth_allmulticast_enable(uint16_t port_id)
2368 {
2369 	struct rte_eth_dev *dev;
2370 	int diag;
2371 
2372 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2373 	dev = &rte_eth_devices[port_id];
2374 
2375 	if (dev->data->all_multicast == 1)
2376 		return 0;
2377 
2378 	if (*dev->dev_ops->allmulticast_enable == NULL)
2379 		return -ENOTSUP;
2380 	diag = (*dev->dev_ops->allmulticast_enable)(dev);
2381 	dev->data->all_multicast = (diag == 0) ? 1 : 0;
2382 
2383 	return eth_err(port_id, diag);
2384 }
2385 
2386 int
2387 rte_eth_allmulticast_disable(uint16_t port_id)
2388 {
2389 	struct rte_eth_dev *dev;
2390 	int diag;
2391 
2392 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2393 	dev = &rte_eth_devices[port_id];
2394 
2395 	if (dev->data->all_multicast == 0)
2396 		return 0;
2397 
2398 	if (*dev->dev_ops->allmulticast_disable == NULL)
2399 		return -ENOTSUP;
2400 	dev->data->all_multicast = 0;
2401 	diag = (*dev->dev_ops->allmulticast_disable)(dev);
2402 	if (diag != 0)
2403 		dev->data->all_multicast = 1;
2404 
2405 	return eth_err(port_id, diag);
2406 }
2407 
2408 int
2409 rte_eth_allmulticast_get(uint16_t port_id)
2410 {
2411 	struct rte_eth_dev *dev;
2412 
2413 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2414 	dev = &rte_eth_devices[port_id];
2415 
2416 	return dev->data->all_multicast;
2417 }
2418 
2419 int
2420 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2421 {
2422 	struct rte_eth_dev *dev;
2423 
2424 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2425 	dev = &rte_eth_devices[port_id];
2426 
2427 	if (eth_link == NULL) {
2428 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2429 			port_id);
2430 		return -EINVAL;
2431 	}
2432 
2433 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2434 		rte_eth_linkstatus_get(dev, eth_link);
2435 	else {
2436 		if (*dev->dev_ops->link_update == NULL)
2437 			return -ENOTSUP;
2438 		(*dev->dev_ops->link_update)(dev, 1);
2439 		*eth_link = dev->data->dev_link;
2440 	}
2441 
2442 	return 0;
2443 }
2444 
2445 int
2446 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2447 {
2448 	struct rte_eth_dev *dev;
2449 
2450 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2451 	dev = &rte_eth_devices[port_id];
2452 
2453 	if (eth_link == NULL) {
2454 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2455 			port_id);
2456 		return -EINVAL;
2457 	}
2458 
2459 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2460 		rte_eth_linkstatus_get(dev, eth_link);
2461 	else {
2462 		if (*dev->dev_ops->link_update == NULL)
2463 			return -ENOTSUP;
2464 		(*dev->dev_ops->link_update)(dev, 0);
2465 		*eth_link = dev->data->dev_link;
2466 	}
2467 
2468 	return 0;
2469 }
2470 
2471 const char *
2472 rte_eth_link_speed_to_str(uint32_t link_speed)
2473 {
2474 	switch (link_speed) {
2475 	case RTE_ETH_SPEED_NUM_NONE: return "None";
2476 	case RTE_ETH_SPEED_NUM_10M:  return "10 Mbps";
2477 	case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
2478 	case RTE_ETH_SPEED_NUM_1G:   return "1 Gbps";
2479 	case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2480 	case RTE_ETH_SPEED_NUM_5G:   return "5 Gbps";
2481 	case RTE_ETH_SPEED_NUM_10G:  return "10 Gbps";
2482 	case RTE_ETH_SPEED_NUM_20G:  return "20 Gbps";
2483 	case RTE_ETH_SPEED_NUM_25G:  return "25 Gbps";
2484 	case RTE_ETH_SPEED_NUM_40G:  return "40 Gbps";
2485 	case RTE_ETH_SPEED_NUM_50G:  return "50 Gbps";
2486 	case RTE_ETH_SPEED_NUM_56G:  return "56 Gbps";
2487 	case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
2488 	case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
2489 	case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2490 	default: return "Invalid";
2491 	}
2492 }
2493 
2494 int
2495 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2496 {
2497 	if (str == NULL) {
2498 		RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
2499 		return -EINVAL;
2500 	}
2501 
2502 	if (len == 0) {
2503 		RTE_ETHDEV_LOG(ERR,
2504 			"Cannot convert link to string with zero size\n");
2505 		return -EINVAL;
2506 	}
2507 
2508 	if (eth_link == NULL) {
2509 		RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
2510 		return -EINVAL;
2511 	}
2512 
2513 	if (eth_link->link_status == RTE_ETH_LINK_DOWN)
2514 		return snprintf(str, len, "Link down");
2515 	else
2516 		return snprintf(str, len, "Link up at %s %s %s",
2517 			rte_eth_link_speed_to_str(eth_link->link_speed),
2518 			(eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
2519 			"FDX" : "HDX",
2520 			(eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
2521 			"Autoneg" : "Fixed");
2522 }
2523 
2524 int
2525 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2526 {
2527 	struct rte_eth_dev *dev;
2528 
2529 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2530 	dev = &rte_eth_devices[port_id];
2531 
2532 	if (stats == NULL) {
2533 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
2534 			port_id);
2535 		return -EINVAL;
2536 	}
2537 
2538 	memset(stats, 0, sizeof(*stats));
2539 
2540 	if (*dev->dev_ops->stats_get == NULL)
2541 		return -ENOTSUP;
2542 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2543 	return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2544 }
2545 
2546 int
2547 rte_eth_stats_reset(uint16_t port_id)
2548 {
2549 	struct rte_eth_dev *dev;
2550 	int ret;
2551 
2552 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2553 	dev = &rte_eth_devices[port_id];
2554 
2555 	if (*dev->dev_ops->stats_reset == NULL)
2556 		return -ENOTSUP;
2557 	ret = (*dev->dev_ops->stats_reset)(dev);
2558 	if (ret != 0)
2559 		return eth_err(port_id, ret);
2560 
2561 	dev->data->rx_mbuf_alloc_failed = 0;
2562 
2563 	return 0;
2564 }
2565 
2566 static inline int
2567 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2568 {
2569 	uint16_t nb_rxqs, nb_txqs;
2570 	int count;
2571 
2572 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2573 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2574 
2575 	count = RTE_NB_STATS;
2576 	if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2577 		count += nb_rxqs * RTE_NB_RXQ_STATS;
2578 		count += nb_txqs * RTE_NB_TXQ_STATS;
2579 	}
2580 
2581 	return count;
2582 }
2583 
2584 static int
2585 eth_dev_get_xstats_count(uint16_t port_id)
2586 {
2587 	struct rte_eth_dev *dev;
2588 	int count;
2589 
2590 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2591 	dev = &rte_eth_devices[port_id];
2592 	if (dev->dev_ops->xstats_get_names != NULL) {
2593 		count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2594 		if (count < 0)
2595 			return eth_err(port_id, count);
2596 	} else
2597 		count = 0;
2598 
2599 
2600 	count += eth_dev_get_xstats_basic_count(dev);
2601 
2602 	return count;
2603 }
2604 
2605 int
2606 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2607 		uint64_t *id)
2608 {
2609 	int cnt_xstats, idx_xstat;
2610 
2611 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2612 
2613 	if (xstat_name == NULL) {
2614 		RTE_ETHDEV_LOG(ERR,
2615 			"Cannot get ethdev port %u xstats ID from NULL xstat name\n",
2616 			port_id);
2617 		return -ENOMEM;
2618 	}
2619 
2620 	if (id == NULL) {
2621 		RTE_ETHDEV_LOG(ERR,
2622 			"Cannot get ethdev port %u xstats ID to NULL\n",
2623 			port_id);
2624 		return -ENOMEM;
2625 	}
2626 
2627 	/* Get count */
2628 	cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2629 	if (cnt_xstats  < 0) {
2630 		RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2631 		return -ENODEV;
2632 	}
2633 
2634 	/* Get id-name lookup table */
2635 	struct rte_eth_xstat_name xstats_names[cnt_xstats];
2636 
2637 	if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2638 			port_id, xstats_names, cnt_xstats, NULL)) {
2639 		RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2640 		return -1;
2641 	}
2642 
2643 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2644 		if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2645 			*id = idx_xstat;
2646 			return 0;
2647 		};
2648 	}
2649 
2650 	return -EINVAL;
2651 }
2652 
2653 /* retrieve basic stats names */
2654 static int
2655 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2656 	struct rte_eth_xstat_name *xstats_names)
2657 {
2658 	int cnt_used_entries = 0;
2659 	uint32_t idx, id_queue;
2660 	uint16_t num_q;
2661 
2662 	for (idx = 0; idx < RTE_NB_STATS; idx++) {
2663 		strlcpy(xstats_names[cnt_used_entries].name,
2664 			eth_dev_stats_strings[idx].name,
2665 			sizeof(xstats_names[0].name));
2666 		cnt_used_entries++;
2667 	}
2668 
2669 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2670 		return cnt_used_entries;
2671 
2672 	num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2673 	for (id_queue = 0; id_queue < num_q; id_queue++) {
2674 		for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2675 			snprintf(xstats_names[cnt_used_entries].name,
2676 				sizeof(xstats_names[0].name),
2677 				"rx_q%u_%s",
2678 				id_queue, eth_dev_rxq_stats_strings[idx].name);
2679 			cnt_used_entries++;
2680 		}
2681 
2682 	}
2683 	num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2684 	for (id_queue = 0; id_queue < num_q; id_queue++) {
2685 		for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2686 			snprintf(xstats_names[cnt_used_entries].name,
2687 				sizeof(xstats_names[0].name),
2688 				"tx_q%u_%s",
2689 				id_queue, eth_dev_txq_stats_strings[idx].name);
2690 			cnt_used_entries++;
2691 		}
2692 	}
2693 	return cnt_used_entries;
2694 }
2695 
2696 /* retrieve ethdev extended statistics names */
2697 int
2698 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2699 	struct rte_eth_xstat_name *xstats_names, unsigned int size,
2700 	uint64_t *ids)
2701 {
2702 	struct rte_eth_xstat_name *xstats_names_copy;
2703 	unsigned int no_basic_stat_requested = 1;
2704 	unsigned int no_ext_stat_requested = 1;
2705 	unsigned int expected_entries;
2706 	unsigned int basic_count;
2707 	struct rte_eth_dev *dev;
2708 	unsigned int i;
2709 	int ret;
2710 
2711 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2712 	dev = &rte_eth_devices[port_id];
2713 
2714 	basic_count = eth_dev_get_xstats_basic_count(dev);
2715 	ret = eth_dev_get_xstats_count(port_id);
2716 	if (ret < 0)
2717 		return ret;
2718 	expected_entries = (unsigned int)ret;
2719 
2720 	/* Return max number of stats if no ids given */
2721 	if (!ids) {
2722 		if (!xstats_names)
2723 			return expected_entries;
2724 		else if (xstats_names && size < expected_entries)
2725 			return expected_entries;
2726 	}
2727 
2728 	if (ids && !xstats_names)
2729 		return -EINVAL;
2730 
2731 	if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2732 		uint64_t ids_copy[size];
2733 
2734 		for (i = 0; i < size; i++) {
2735 			if (ids[i] < basic_count) {
2736 				no_basic_stat_requested = 0;
2737 				break;
2738 			}
2739 
2740 			/*
2741 			 * Convert ids to xstats ids that PMD knows.
2742 			 * ids known by user are basic + extended stats.
2743 			 */
2744 			ids_copy[i] = ids[i] - basic_count;
2745 		}
2746 
2747 		if (no_basic_stat_requested)
2748 			return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2749 					ids_copy, xstats_names, size);
2750 	}
2751 
2752 	/* Retrieve all stats */
2753 	if (!ids) {
2754 		int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2755 				expected_entries);
2756 		if (num_stats < 0 || num_stats > (int)expected_entries)
2757 			return num_stats;
2758 		else
2759 			return expected_entries;
2760 	}
2761 
2762 	xstats_names_copy = calloc(expected_entries,
2763 		sizeof(struct rte_eth_xstat_name));
2764 
2765 	if (!xstats_names_copy) {
2766 		RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2767 		return -ENOMEM;
2768 	}
2769 
2770 	if (ids) {
2771 		for (i = 0; i < size; i++) {
2772 			if (ids[i] >= basic_count) {
2773 				no_ext_stat_requested = 0;
2774 				break;
2775 			}
2776 		}
2777 	}
2778 
2779 	/* Fill xstats_names_copy structure */
2780 	if (ids && no_ext_stat_requested) {
2781 		eth_basic_stats_get_names(dev, xstats_names_copy);
2782 	} else {
2783 		ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2784 			expected_entries);
2785 		if (ret < 0) {
2786 			free(xstats_names_copy);
2787 			return ret;
2788 		}
2789 	}
2790 
2791 	/* Filter stats */
2792 	for (i = 0; i < size; i++) {
2793 		if (ids[i] >= expected_entries) {
2794 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2795 			free(xstats_names_copy);
2796 			return -1;
2797 		}
2798 		xstats_names[i] = xstats_names_copy[ids[i]];
2799 	}
2800 
2801 	free(xstats_names_copy);
2802 	return size;
2803 }
2804 
2805 int
2806 rte_eth_xstats_get_names(uint16_t port_id,
2807 	struct rte_eth_xstat_name *xstats_names,
2808 	unsigned int size)
2809 {
2810 	struct rte_eth_dev *dev;
2811 	int cnt_used_entries;
2812 	int cnt_expected_entries;
2813 	int cnt_driver_entries;
2814 
2815 	cnt_expected_entries = eth_dev_get_xstats_count(port_id);
2816 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
2817 			(int)size < cnt_expected_entries)
2818 		return cnt_expected_entries;
2819 
2820 	/* port_id checked in eth_dev_get_xstats_count() */
2821 	dev = &rte_eth_devices[port_id];
2822 
2823 	cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
2824 
2825 	if (dev->dev_ops->xstats_get_names != NULL) {
2826 		/* If there are any driver-specific xstats, append them
2827 		 * to end of list.
2828 		 */
2829 		cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2830 			dev,
2831 			xstats_names + cnt_used_entries,
2832 			size - cnt_used_entries);
2833 		if (cnt_driver_entries < 0)
2834 			return eth_err(port_id, cnt_driver_entries);
2835 		cnt_used_entries += cnt_driver_entries;
2836 	}
2837 
2838 	return cnt_used_entries;
2839 }
2840 
2841 
2842 static int
2843 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2844 {
2845 	struct rte_eth_dev *dev;
2846 	struct rte_eth_stats eth_stats;
2847 	unsigned int count = 0, i, q;
2848 	uint64_t val, *stats_ptr;
2849 	uint16_t nb_rxqs, nb_txqs;
2850 	int ret;
2851 
2852 	ret = rte_eth_stats_get(port_id, &eth_stats);
2853 	if (ret < 0)
2854 		return ret;
2855 
2856 	dev = &rte_eth_devices[port_id];
2857 
2858 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2859 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2860 
2861 	/* global stats */
2862 	for (i = 0; i < RTE_NB_STATS; i++) {
2863 		stats_ptr = RTE_PTR_ADD(&eth_stats,
2864 					eth_dev_stats_strings[i].offset);
2865 		val = *stats_ptr;
2866 		xstats[count++].value = val;
2867 	}
2868 
2869 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2870 		return count;
2871 
2872 	/* per-rxq stats */
2873 	for (q = 0; q < nb_rxqs; q++) {
2874 		for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2875 			stats_ptr = RTE_PTR_ADD(&eth_stats,
2876 					eth_dev_rxq_stats_strings[i].offset +
2877 					q * sizeof(uint64_t));
2878 			val = *stats_ptr;
2879 			xstats[count++].value = val;
2880 		}
2881 	}
2882 
2883 	/* per-txq stats */
2884 	for (q = 0; q < nb_txqs; q++) {
2885 		for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2886 			stats_ptr = RTE_PTR_ADD(&eth_stats,
2887 					eth_dev_txq_stats_strings[i].offset +
2888 					q * sizeof(uint64_t));
2889 			val = *stats_ptr;
2890 			xstats[count++].value = val;
2891 		}
2892 	}
2893 	return count;
2894 }
2895 
2896 /* retrieve ethdev extended statistics */
2897 int
2898 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2899 			 uint64_t *values, unsigned int size)
2900 {
2901 	unsigned int no_basic_stat_requested = 1;
2902 	unsigned int no_ext_stat_requested = 1;
2903 	unsigned int num_xstats_filled;
2904 	unsigned int basic_count;
2905 	uint16_t expected_entries;
2906 	struct rte_eth_dev *dev;
2907 	unsigned int i;
2908 	int ret;
2909 
2910 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2911 	dev = &rte_eth_devices[port_id];
2912 
2913 	ret = eth_dev_get_xstats_count(port_id);
2914 	if (ret < 0)
2915 		return ret;
2916 	expected_entries = (uint16_t)ret;
2917 	struct rte_eth_xstat xstats[expected_entries];
2918 	basic_count = eth_dev_get_xstats_basic_count(dev);
2919 
2920 	/* Return max number of stats if no ids given */
2921 	if (!ids) {
2922 		if (!values)
2923 			return expected_entries;
2924 		else if (values && size < expected_entries)
2925 			return expected_entries;
2926 	}
2927 
2928 	if (ids && !values)
2929 		return -EINVAL;
2930 
2931 	if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2932 		unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
2933 		uint64_t ids_copy[size];
2934 
2935 		for (i = 0; i < size; i++) {
2936 			if (ids[i] < basic_count) {
2937 				no_basic_stat_requested = 0;
2938 				break;
2939 			}
2940 
2941 			/*
2942 			 * Convert ids to xstats ids that PMD knows.
2943 			 * ids known by user are basic + extended stats.
2944 			 */
2945 			ids_copy[i] = ids[i] - basic_count;
2946 		}
2947 
2948 		if (no_basic_stat_requested)
2949 			return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2950 					values, size);
2951 	}
2952 
2953 	if (ids) {
2954 		for (i = 0; i < size; i++) {
2955 			if (ids[i] >= basic_count) {
2956 				no_ext_stat_requested = 0;
2957 				break;
2958 			}
2959 		}
2960 	}
2961 
2962 	/* Fill the xstats structure */
2963 	if (ids && no_ext_stat_requested)
2964 		ret = eth_basic_stats_get(port_id, xstats);
2965 	else
2966 		ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2967 
2968 	if (ret < 0)
2969 		return ret;
2970 	num_xstats_filled = (unsigned int)ret;
2971 
2972 	/* Return all stats */
2973 	if (!ids) {
2974 		for (i = 0; i < num_xstats_filled; i++)
2975 			values[i] = xstats[i].value;
2976 		return expected_entries;
2977 	}
2978 
2979 	/* Filter stats */
2980 	for (i = 0; i < size; i++) {
2981 		if (ids[i] >= expected_entries) {
2982 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2983 			return -1;
2984 		}
2985 		values[i] = xstats[ids[i]].value;
2986 	}
2987 	return size;
2988 }
2989 
2990 int
2991 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2992 	unsigned int n)
2993 {
2994 	struct rte_eth_dev *dev;
2995 	unsigned int count, i;
2996 	signed int xcount = 0;
2997 	int ret;
2998 
2999 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3000 	if (xstats == NULL && n > 0)
3001 		return -EINVAL;
3002 	dev = &rte_eth_devices[port_id];
3003 
3004 	count = eth_dev_get_xstats_basic_count(dev);
3005 
3006 	/* implemented by the driver */
3007 	if (dev->dev_ops->xstats_get != NULL) {
3008 		/* Retrieve the xstats from the driver at the end of the
3009 		 * xstats struct.
3010 		 */
3011 		xcount = (*dev->dev_ops->xstats_get)(dev,
3012 				     (n > count) ? xstats + count : NULL,
3013 				     (n > count) ? n - count : 0);
3014 
3015 		if (xcount < 0)
3016 			return eth_err(port_id, xcount);
3017 	}
3018 
3019 	if (n < count + xcount || xstats == NULL)
3020 		return count + xcount;
3021 
3022 	/* now fill the xstats structure */
3023 	ret = eth_basic_stats_get(port_id, xstats);
3024 	if (ret < 0)
3025 		return ret;
3026 	count = ret;
3027 
3028 	for (i = 0; i < count; i++)
3029 		xstats[i].id = i;
3030 	/* add an offset to driver-specific stats */
3031 	for ( ; i < count + xcount; i++)
3032 		xstats[i].id += count;
3033 
3034 	return count + xcount;
3035 }
3036 
3037 /* reset ethdev extended statistics */
3038 int
3039 rte_eth_xstats_reset(uint16_t port_id)
3040 {
3041 	struct rte_eth_dev *dev;
3042 
3043 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3044 	dev = &rte_eth_devices[port_id];
3045 
3046 	/* implemented by the driver */
3047 	if (dev->dev_ops->xstats_reset != NULL)
3048 		return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3049 
3050 	/* fallback to default */
3051 	return rte_eth_stats_reset(port_id);
3052 }
3053 
3054 static int
3055 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3056 		uint8_t stat_idx, uint8_t is_rx)
3057 {
3058 	struct rte_eth_dev *dev;
3059 
3060 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3061 	dev = &rte_eth_devices[port_id];
3062 
3063 	if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3064 		return -EINVAL;
3065 
3066 	if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3067 		return -EINVAL;
3068 
3069 	if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3070 		return -EINVAL;
3071 
3072 	if (*dev->dev_ops->queue_stats_mapping_set == NULL)
3073 		return -ENOTSUP;
3074 	return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3075 }
3076 
3077 int
3078 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3079 		uint8_t stat_idx)
3080 {
3081 	return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3082 						tx_queue_id,
3083 						stat_idx, STAT_QMAP_TX));
3084 }
3085 
3086 int
3087 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3088 		uint8_t stat_idx)
3089 {
3090 	return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3091 						rx_queue_id,
3092 						stat_idx, STAT_QMAP_RX));
3093 }
3094 
3095 int
3096 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3097 {
3098 	struct rte_eth_dev *dev;
3099 
3100 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3101 	dev = &rte_eth_devices[port_id];
3102 
3103 	if (fw_version == NULL && fw_size > 0) {
3104 		RTE_ETHDEV_LOG(ERR,
3105 			"Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3106 			port_id);
3107 		return -EINVAL;
3108 	}
3109 
3110 	if (*dev->dev_ops->fw_version_get == NULL)
3111 		return -ENOTSUP;
3112 	return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3113 							fw_version, fw_size));
3114 }
3115 
3116 int
3117 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3118 {
3119 	struct rte_eth_dev *dev;
3120 	const struct rte_eth_desc_lim lim = {
3121 		.nb_max = UINT16_MAX,
3122 		.nb_min = 0,
3123 		.nb_align = 1,
3124 		.nb_seg_max = UINT16_MAX,
3125 		.nb_mtu_seg_max = UINT16_MAX,
3126 	};
3127 	int diag;
3128 
3129 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3130 	dev = &rte_eth_devices[port_id];
3131 
3132 	if (dev_info == NULL) {
3133 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3134 			port_id);
3135 		return -EINVAL;
3136 	}
3137 
3138 	/*
3139 	 * Init dev_info before port_id check since caller does not have
3140 	 * return status and does not know if get is successful or not.
3141 	 */
3142 	memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3143 	dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3144 
3145 	dev_info->rx_desc_lim = lim;
3146 	dev_info->tx_desc_lim = lim;
3147 	dev_info->device = dev->device;
3148 	dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
3149 		RTE_ETHER_CRC_LEN;
3150 	dev_info->max_mtu = UINT16_MAX;
3151 
3152 	if (*dev->dev_ops->dev_infos_get == NULL)
3153 		return -ENOTSUP;
3154 	diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3155 	if (diag != 0) {
3156 		/* Cleanup already filled in device information */
3157 		memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3158 		return eth_err(port_id, diag);
3159 	}
3160 
3161 	/* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3162 	dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3163 			RTE_MAX_QUEUES_PER_PORT);
3164 	dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3165 			RTE_MAX_QUEUES_PER_PORT);
3166 
3167 	dev_info->driver_name = dev->device->driver->name;
3168 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3169 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3170 
3171 	dev_info->dev_flags = &dev->data->dev_flags;
3172 
3173 	return 0;
3174 }
3175 
3176 int
3177 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3178 {
3179 	struct rte_eth_dev *dev;
3180 
3181 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3182 	dev = &rte_eth_devices[port_id];
3183 
3184 	if (dev_conf == NULL) {
3185 		RTE_ETHDEV_LOG(ERR,
3186 			"Cannot get ethdev port %u configuration to NULL\n",
3187 			port_id);
3188 		return -EINVAL;
3189 	}
3190 
3191 	memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf));
3192 
3193 	return 0;
3194 }
3195 
3196 int
3197 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3198 				 uint32_t *ptypes, int num)
3199 {
3200 	int i, j;
3201 	struct rte_eth_dev *dev;
3202 	const uint32_t *all_ptypes;
3203 
3204 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3205 	dev = &rte_eth_devices[port_id];
3206 
3207 	if (ptypes == NULL && num > 0) {
3208 		RTE_ETHDEV_LOG(ERR,
3209 			"Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3210 			port_id);
3211 		return -EINVAL;
3212 	}
3213 
3214 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL)
3215 		return 0;
3216 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3217 
3218 	if (!all_ptypes)
3219 		return 0;
3220 
3221 	for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3222 		if (all_ptypes[i] & ptype_mask) {
3223 			if (j < num)
3224 				ptypes[j] = all_ptypes[i];
3225 			j++;
3226 		}
3227 
3228 	return j;
3229 }
3230 
3231 int
3232 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3233 				 uint32_t *set_ptypes, unsigned int num)
3234 {
3235 	const uint32_t valid_ptype_masks[] = {
3236 		RTE_PTYPE_L2_MASK,
3237 		RTE_PTYPE_L3_MASK,
3238 		RTE_PTYPE_L4_MASK,
3239 		RTE_PTYPE_TUNNEL_MASK,
3240 		RTE_PTYPE_INNER_L2_MASK,
3241 		RTE_PTYPE_INNER_L3_MASK,
3242 		RTE_PTYPE_INNER_L4_MASK,
3243 	};
3244 	const uint32_t *all_ptypes;
3245 	struct rte_eth_dev *dev;
3246 	uint32_t unused_mask;
3247 	unsigned int i, j;
3248 	int ret;
3249 
3250 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3251 	dev = &rte_eth_devices[port_id];
3252 
3253 	if (num > 0 && set_ptypes == NULL) {
3254 		RTE_ETHDEV_LOG(ERR,
3255 			"Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3256 			port_id);
3257 		return -EINVAL;
3258 	}
3259 
3260 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3261 			*dev->dev_ops->dev_ptypes_set == NULL) {
3262 		ret = 0;
3263 		goto ptype_unknown;
3264 	}
3265 
3266 	if (ptype_mask == 0) {
3267 		ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3268 				ptype_mask);
3269 		goto ptype_unknown;
3270 	}
3271 
3272 	unused_mask = ptype_mask;
3273 	for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3274 		uint32_t mask = ptype_mask & valid_ptype_masks[i];
3275 		if (mask && mask != valid_ptype_masks[i]) {
3276 			ret = -EINVAL;
3277 			goto ptype_unknown;
3278 		}
3279 		unused_mask &= ~valid_ptype_masks[i];
3280 	}
3281 
3282 	if (unused_mask) {
3283 		ret = -EINVAL;
3284 		goto ptype_unknown;
3285 	}
3286 
3287 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3288 	if (all_ptypes == NULL) {
3289 		ret = 0;
3290 		goto ptype_unknown;
3291 	}
3292 
3293 	/*
3294 	 * Accommodate as many set_ptypes as possible. If the supplied
3295 	 * set_ptypes array is insufficient fill it partially.
3296 	 */
3297 	for (i = 0, j = 0; set_ptypes != NULL &&
3298 				(all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3299 		if (ptype_mask & all_ptypes[i]) {
3300 			if (j < num - 1) {
3301 				set_ptypes[j] = all_ptypes[i];
3302 				j++;
3303 				continue;
3304 			}
3305 			break;
3306 		}
3307 	}
3308 
3309 	if (set_ptypes != NULL && j < num)
3310 		set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3311 
3312 	return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3313 
3314 ptype_unknown:
3315 	if (num > 0)
3316 		set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3317 
3318 	return ret;
3319 }
3320 
3321 int
3322 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3323 	unsigned int num)
3324 {
3325 	int32_t ret;
3326 	struct rte_eth_dev *dev;
3327 	struct rte_eth_dev_info dev_info;
3328 
3329 	if (ma == NULL) {
3330 		RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__);
3331 		return -EINVAL;
3332 	}
3333 
3334 	/* will check for us that port_id is a valid one */
3335 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3336 	if (ret != 0)
3337 		return ret;
3338 
3339 	dev = &rte_eth_devices[port_id];
3340 	num = RTE_MIN(dev_info.max_mac_addrs, num);
3341 	memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0]));
3342 
3343 	return num;
3344 }
3345 
3346 int
3347 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3348 {
3349 	struct rte_eth_dev *dev;
3350 
3351 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3352 	dev = &rte_eth_devices[port_id];
3353 
3354 	if (mac_addr == NULL) {
3355 		RTE_ETHDEV_LOG(ERR,
3356 			"Cannot get ethdev port %u MAC address to NULL\n",
3357 			port_id);
3358 		return -EINVAL;
3359 	}
3360 
3361 	rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3362 
3363 	return 0;
3364 }
3365 
3366 int
3367 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3368 {
3369 	struct rte_eth_dev *dev;
3370 
3371 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3372 	dev = &rte_eth_devices[port_id];
3373 
3374 	if (mtu == NULL) {
3375 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
3376 			port_id);
3377 		return -EINVAL;
3378 	}
3379 
3380 	*mtu = dev->data->mtu;
3381 	return 0;
3382 }
3383 
3384 int
3385 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3386 {
3387 	int ret;
3388 	struct rte_eth_dev_info dev_info;
3389 	struct rte_eth_dev *dev;
3390 
3391 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3392 	dev = &rte_eth_devices[port_id];
3393 	if (*dev->dev_ops->mtu_set == NULL)
3394 		return -ENOTSUP;
3395 
3396 	/*
3397 	 * Check if the device supports dev_infos_get, if it does not
3398 	 * skip min_mtu/max_mtu validation here as this requires values
3399 	 * that are populated within the call to rte_eth_dev_info_get()
3400 	 * which relies on dev->dev_ops->dev_infos_get.
3401 	 */
3402 	if (*dev->dev_ops->dev_infos_get != NULL) {
3403 		ret = rte_eth_dev_info_get(port_id, &dev_info);
3404 		if (ret != 0)
3405 			return ret;
3406 
3407 		ret = eth_dev_validate_mtu(port_id, &dev_info, mtu);
3408 		if (ret != 0)
3409 			return ret;
3410 	}
3411 
3412 	if (dev->data->dev_configured == 0) {
3413 		RTE_ETHDEV_LOG(ERR,
3414 			"Port %u must be configured before MTU set\n",
3415 			port_id);
3416 		return -EINVAL;
3417 	}
3418 
3419 	ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3420 	if (ret == 0)
3421 		dev->data->mtu = mtu;
3422 
3423 	return eth_err(port_id, ret);
3424 }
3425 
3426 int
3427 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3428 {
3429 	struct rte_eth_dev *dev;
3430 	int ret;
3431 
3432 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3433 	dev = &rte_eth_devices[port_id];
3434 
3435 	if (!(dev->data->dev_conf.rxmode.offloads &
3436 	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
3437 		RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
3438 			port_id);
3439 		return -ENOSYS;
3440 	}
3441 
3442 	if (vlan_id > 4095) {
3443 		RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3444 			port_id, vlan_id);
3445 		return -EINVAL;
3446 	}
3447 	if (*dev->dev_ops->vlan_filter_set == NULL)
3448 		return -ENOTSUP;
3449 
3450 	ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3451 	if (ret == 0) {
3452 		struct rte_vlan_filter_conf *vfc;
3453 		int vidx;
3454 		int vbit;
3455 
3456 		vfc = &dev->data->vlan_filter_conf;
3457 		vidx = vlan_id / 64;
3458 		vbit = vlan_id % 64;
3459 
3460 		if (on)
3461 			vfc->ids[vidx] |= RTE_BIT64(vbit);
3462 		else
3463 			vfc->ids[vidx] &= ~RTE_BIT64(vbit);
3464 	}
3465 
3466 	return eth_err(port_id, ret);
3467 }
3468 
3469 int
3470 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3471 				    int on)
3472 {
3473 	struct rte_eth_dev *dev;
3474 
3475 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3476 	dev = &rte_eth_devices[port_id];
3477 
3478 	if (rx_queue_id >= dev->data->nb_rx_queues) {
3479 		RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3480 		return -EINVAL;
3481 	}
3482 
3483 	if (*dev->dev_ops->vlan_strip_queue_set == NULL)
3484 		return -ENOTSUP;
3485 	(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3486 
3487 	return 0;
3488 }
3489 
3490 int
3491 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3492 				enum rte_vlan_type vlan_type,
3493 				uint16_t tpid)
3494 {
3495 	struct rte_eth_dev *dev;
3496 
3497 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3498 	dev = &rte_eth_devices[port_id];
3499 
3500 	if (*dev->dev_ops->vlan_tpid_set == NULL)
3501 		return -ENOTSUP;
3502 	return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3503 							       tpid));
3504 }
3505 
3506 int
3507 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3508 {
3509 	struct rte_eth_dev_info dev_info;
3510 	struct rte_eth_dev *dev;
3511 	int ret = 0;
3512 	int mask = 0;
3513 	int cur, org = 0;
3514 	uint64_t orig_offloads;
3515 	uint64_t dev_offloads;
3516 	uint64_t new_offloads;
3517 
3518 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3519 	dev = &rte_eth_devices[port_id];
3520 
3521 	/* save original values in case of failure */
3522 	orig_offloads = dev->data->dev_conf.rxmode.offloads;
3523 	dev_offloads = orig_offloads;
3524 
3525 	/* check which option changed by application */
3526 	cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
3527 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
3528 	if (cur != org) {
3529 		if (cur)
3530 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3531 		else
3532 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3533 		mask |= RTE_ETH_VLAN_STRIP_MASK;
3534 	}
3535 
3536 	cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
3537 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
3538 	if (cur != org) {
3539 		if (cur)
3540 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3541 		else
3542 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3543 		mask |= RTE_ETH_VLAN_FILTER_MASK;
3544 	}
3545 
3546 	cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
3547 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
3548 	if (cur != org) {
3549 		if (cur)
3550 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3551 		else
3552 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3553 		mask |= RTE_ETH_VLAN_EXTEND_MASK;
3554 	}
3555 
3556 	cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
3557 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
3558 	if (cur != org) {
3559 		if (cur)
3560 			dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3561 		else
3562 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3563 		mask |= RTE_ETH_QINQ_STRIP_MASK;
3564 	}
3565 
3566 	/*no change*/
3567 	if (mask == 0)
3568 		return ret;
3569 
3570 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3571 	if (ret != 0)
3572 		return ret;
3573 
3574 	/* Rx VLAN offloading must be within its device capabilities */
3575 	if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3576 		new_offloads = dev_offloads & ~orig_offloads;
3577 		RTE_ETHDEV_LOG(ERR,
3578 			"Ethdev port_id=%u requested new added VLAN offloads "
3579 			"0x%" PRIx64 " must be within Rx offloads capabilities "
3580 			"0x%" PRIx64 " in %s()\n",
3581 			port_id, new_offloads, dev_info.rx_offload_capa,
3582 			__func__);
3583 		return -EINVAL;
3584 	}
3585 
3586 	if (*dev->dev_ops->vlan_offload_set == NULL)
3587 		return -ENOTSUP;
3588 	dev->data->dev_conf.rxmode.offloads = dev_offloads;
3589 	ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3590 	if (ret) {
3591 		/* hit an error restore  original values */
3592 		dev->data->dev_conf.rxmode.offloads = orig_offloads;
3593 	}
3594 
3595 	return eth_err(port_id, ret);
3596 }
3597 
3598 int
3599 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3600 {
3601 	struct rte_eth_dev *dev;
3602 	uint64_t *dev_offloads;
3603 	int ret = 0;
3604 
3605 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3606 	dev = &rte_eth_devices[port_id];
3607 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3608 
3609 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
3610 		ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
3611 
3612 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
3613 		ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
3614 
3615 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
3616 		ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
3617 
3618 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
3619 		ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
3620 
3621 	return ret;
3622 }
3623 
3624 int
3625 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3626 {
3627 	struct rte_eth_dev *dev;
3628 
3629 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3630 	dev = &rte_eth_devices[port_id];
3631 
3632 	if (*dev->dev_ops->vlan_pvid_set == NULL)
3633 		return -ENOTSUP;
3634 	return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3635 }
3636 
3637 int
3638 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3639 {
3640 	struct rte_eth_dev *dev;
3641 
3642 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3643 	dev = &rte_eth_devices[port_id];
3644 
3645 	if (fc_conf == NULL) {
3646 		RTE_ETHDEV_LOG(ERR,
3647 			"Cannot get ethdev port %u flow control config to NULL\n",
3648 			port_id);
3649 		return -EINVAL;
3650 	}
3651 
3652 	if (*dev->dev_ops->flow_ctrl_get == NULL)
3653 		return -ENOTSUP;
3654 	memset(fc_conf, 0, sizeof(*fc_conf));
3655 	return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3656 }
3657 
3658 int
3659 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3660 {
3661 	struct rte_eth_dev *dev;
3662 
3663 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3664 	dev = &rte_eth_devices[port_id];
3665 
3666 	if (fc_conf == NULL) {
3667 		RTE_ETHDEV_LOG(ERR,
3668 			"Cannot set ethdev port %u flow control from NULL config\n",
3669 			port_id);
3670 		return -EINVAL;
3671 	}
3672 
3673 	if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3674 		RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3675 		return -EINVAL;
3676 	}
3677 
3678 	if (*dev->dev_ops->flow_ctrl_set == NULL)
3679 		return -ENOTSUP;
3680 	return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3681 }
3682 
3683 int
3684 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3685 				   struct rte_eth_pfc_conf *pfc_conf)
3686 {
3687 	struct rte_eth_dev *dev;
3688 
3689 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3690 	dev = &rte_eth_devices[port_id];
3691 
3692 	if (pfc_conf == NULL) {
3693 		RTE_ETHDEV_LOG(ERR,
3694 			"Cannot set ethdev port %u priority flow control from NULL config\n",
3695 			port_id);
3696 		return -EINVAL;
3697 	}
3698 
3699 	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3700 		RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3701 		return -EINVAL;
3702 	}
3703 
3704 	/* High water, low water validation are device specific */
3705 	if  (*dev->dev_ops->priority_flow_ctrl_set)
3706 		return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3707 					(dev, pfc_conf));
3708 	return -ENOTSUP;
3709 }
3710 
3711 static int
3712 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
3713 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3714 {
3715 	if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) ||
3716 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
3717 		if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) {
3718 			RTE_ETHDEV_LOG(ERR,
3719 				"PFC Tx queue not in range for Rx pause requested:%d configured:%d\n",
3720 				pfc_queue_conf->rx_pause.tx_qid,
3721 				dev_info->nb_tx_queues);
3722 			return -EINVAL;
3723 		}
3724 
3725 		if (pfc_queue_conf->rx_pause.tc >= tc_max) {
3726 			RTE_ETHDEV_LOG(ERR,
3727 				"PFC TC not in range for Rx pause requested:%d max:%d\n",
3728 				pfc_queue_conf->rx_pause.tc, tc_max);
3729 			return -EINVAL;
3730 		}
3731 	}
3732 
3733 	return 0;
3734 }
3735 
3736 static int
3737 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
3738 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3739 {
3740 	if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) ||
3741 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
3742 		if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) {
3743 			RTE_ETHDEV_LOG(ERR,
3744 				"PFC Rx queue not in range for Tx pause requested:%d configured:%d\n",
3745 				pfc_queue_conf->tx_pause.rx_qid,
3746 				dev_info->nb_rx_queues);
3747 			return -EINVAL;
3748 		}
3749 
3750 		if (pfc_queue_conf->tx_pause.tc >= tc_max) {
3751 			RTE_ETHDEV_LOG(ERR,
3752 				"PFC TC not in range for Tx pause requested:%d max:%d\n",
3753 				pfc_queue_conf->tx_pause.tc, tc_max);
3754 			return -EINVAL;
3755 		}
3756 	}
3757 
3758 	return 0;
3759 }
3760 
3761 int
3762 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
3763 		struct rte_eth_pfc_queue_info *pfc_queue_info)
3764 {
3765 	struct rte_eth_dev *dev;
3766 
3767 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3768 	dev = &rte_eth_devices[port_id];
3769 
3770 	if (pfc_queue_info == NULL) {
3771 		RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n",
3772 			port_id);
3773 		return -EINVAL;
3774 	}
3775 
3776 	if (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
3777 		return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
3778 			(dev, pfc_queue_info));
3779 	return -ENOTSUP;
3780 }
3781 
3782 int
3783 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
3784 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3785 {
3786 	struct rte_eth_pfc_queue_info pfc_info;
3787 	struct rte_eth_dev_info dev_info;
3788 	struct rte_eth_dev *dev;
3789 	int ret;
3790 
3791 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3792 	dev = &rte_eth_devices[port_id];
3793 
3794 	if (pfc_queue_conf == NULL) {
3795 		RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n",
3796 			port_id);
3797 		return -EINVAL;
3798 	}
3799 
3800 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3801 	if (ret != 0)
3802 		return ret;
3803 
3804 	ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info);
3805 	if (ret != 0)
3806 		return ret;
3807 
3808 	if (pfc_info.tc_max == 0) {
3809 		RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n",
3810 			port_id);
3811 		return -ENOTSUP;
3812 	}
3813 
3814 	/* Check requested mode supported or not */
3815 	if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE &&
3816 			pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) {
3817 		RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n",
3818 			port_id);
3819 		return -EINVAL;
3820 	}
3821 
3822 	if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE &&
3823 			pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) {
3824 		RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n",
3825 			port_id);
3826 		return -EINVAL;
3827 	}
3828 
3829 	/* Validate Rx pause parameters */
3830 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
3831 			pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) {
3832 		ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max,
3833 				pfc_queue_conf);
3834 		if (ret != 0)
3835 			return ret;
3836 	}
3837 
3838 	/* Validate Tx pause parameters */
3839 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
3840 			pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) {
3841 		ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max,
3842 				pfc_queue_conf);
3843 		if (ret != 0)
3844 			return ret;
3845 	}
3846 
3847 	if (*dev->dev_ops->priority_flow_ctrl_queue_config)
3848 		return eth_err(port_id,
3849 			       (*dev->dev_ops->priority_flow_ctrl_queue_config)(
3850 				dev, pfc_queue_conf));
3851 	return -ENOTSUP;
3852 }
3853 
3854 static int
3855 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3856 			uint16_t reta_size)
3857 {
3858 	uint16_t i, num;
3859 
3860 	num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
3861 	for (i = 0; i < num; i++) {
3862 		if (reta_conf[i].mask)
3863 			return 0;
3864 	}
3865 
3866 	return -EINVAL;
3867 }
3868 
3869 static int
3870 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3871 			 uint16_t reta_size,
3872 			 uint16_t max_rxq)
3873 {
3874 	uint16_t i, idx, shift;
3875 
3876 	if (max_rxq == 0) {
3877 		RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3878 		return -EINVAL;
3879 	}
3880 
3881 	for (i = 0; i < reta_size; i++) {
3882 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
3883 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
3884 		if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
3885 			(reta_conf[idx].reta[shift] >= max_rxq)) {
3886 			RTE_ETHDEV_LOG(ERR,
3887 				"reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3888 				idx, shift,
3889 				reta_conf[idx].reta[shift], max_rxq);
3890 			return -EINVAL;
3891 		}
3892 	}
3893 
3894 	return 0;
3895 }
3896 
3897 int
3898 rte_eth_dev_rss_reta_update(uint16_t port_id,
3899 			    struct rte_eth_rss_reta_entry64 *reta_conf,
3900 			    uint16_t reta_size)
3901 {
3902 	enum rte_eth_rx_mq_mode mq_mode;
3903 	struct rte_eth_dev *dev;
3904 	int ret;
3905 
3906 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3907 	dev = &rte_eth_devices[port_id];
3908 
3909 	if (reta_conf == NULL) {
3910 		RTE_ETHDEV_LOG(ERR,
3911 			"Cannot update ethdev port %u RSS RETA to NULL\n",
3912 			port_id);
3913 		return -EINVAL;
3914 	}
3915 
3916 	if (reta_size == 0) {
3917 		RTE_ETHDEV_LOG(ERR,
3918 			"Cannot update ethdev port %u RSS RETA with zero size\n",
3919 			port_id);
3920 		return -EINVAL;
3921 	}
3922 
3923 	/* Check mask bits */
3924 	ret = eth_check_reta_mask(reta_conf, reta_size);
3925 	if (ret < 0)
3926 		return ret;
3927 
3928 	/* Check entry value */
3929 	ret = eth_check_reta_entry(reta_conf, reta_size,
3930 				dev->data->nb_rx_queues);
3931 	if (ret < 0)
3932 		return ret;
3933 
3934 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
3935 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
3936 		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
3937 		return -ENOTSUP;
3938 	}
3939 
3940 	if (*dev->dev_ops->reta_update == NULL)
3941 		return -ENOTSUP;
3942 	return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3943 							     reta_size));
3944 }
3945 
3946 int
3947 rte_eth_dev_rss_reta_query(uint16_t port_id,
3948 			   struct rte_eth_rss_reta_entry64 *reta_conf,
3949 			   uint16_t reta_size)
3950 {
3951 	struct rte_eth_dev *dev;
3952 	int ret;
3953 
3954 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3955 	dev = &rte_eth_devices[port_id];
3956 
3957 	if (reta_conf == NULL) {
3958 		RTE_ETHDEV_LOG(ERR,
3959 			"Cannot query ethdev port %u RSS RETA from NULL config\n",
3960 			port_id);
3961 		return -EINVAL;
3962 	}
3963 
3964 	/* Check mask bits */
3965 	ret = eth_check_reta_mask(reta_conf, reta_size);
3966 	if (ret < 0)
3967 		return ret;
3968 
3969 	if (*dev->dev_ops->reta_query == NULL)
3970 		return -ENOTSUP;
3971 	return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3972 							    reta_size));
3973 }
3974 
3975 int
3976 rte_eth_dev_rss_hash_update(uint16_t port_id,
3977 			    struct rte_eth_rss_conf *rss_conf)
3978 {
3979 	struct rte_eth_dev *dev;
3980 	struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3981 	enum rte_eth_rx_mq_mode mq_mode;
3982 	int ret;
3983 
3984 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3985 	dev = &rte_eth_devices[port_id];
3986 
3987 	if (rss_conf == NULL) {
3988 		RTE_ETHDEV_LOG(ERR,
3989 			"Cannot update ethdev port %u RSS hash from NULL config\n",
3990 			port_id);
3991 		return -EINVAL;
3992 	}
3993 
3994 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3995 	if (ret != 0)
3996 		return ret;
3997 
3998 	rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3999 	if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
4000 	    dev_info.flow_type_rss_offloads) {
4001 		RTE_ETHDEV_LOG(ERR,
4002 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
4003 			port_id, rss_conf->rss_hf,
4004 			dev_info.flow_type_rss_offloads);
4005 		return -EINVAL;
4006 	}
4007 
4008 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4009 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
4010 		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
4011 		return -ENOTSUP;
4012 	}
4013 
4014 	if (*dev->dev_ops->rss_hash_update == NULL)
4015 		return -ENOTSUP;
4016 	return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4017 								 rss_conf));
4018 }
4019 
4020 int
4021 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4022 			      struct rte_eth_rss_conf *rss_conf)
4023 {
4024 	struct rte_eth_dev *dev;
4025 
4026 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4027 	dev = &rte_eth_devices[port_id];
4028 
4029 	if (rss_conf == NULL) {
4030 		RTE_ETHDEV_LOG(ERR,
4031 			"Cannot get ethdev port %u RSS hash config to NULL\n",
4032 			port_id);
4033 		return -EINVAL;
4034 	}
4035 
4036 	if (*dev->dev_ops->rss_hash_conf_get == NULL)
4037 		return -ENOTSUP;
4038 	return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4039 								   rss_conf));
4040 }
4041 
4042 int
4043 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4044 				struct rte_eth_udp_tunnel *udp_tunnel)
4045 {
4046 	struct rte_eth_dev *dev;
4047 
4048 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4049 	dev = &rte_eth_devices[port_id];
4050 
4051 	if (udp_tunnel == NULL) {
4052 		RTE_ETHDEV_LOG(ERR,
4053 			"Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4054 			port_id);
4055 		return -EINVAL;
4056 	}
4057 
4058 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4059 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4060 		return -EINVAL;
4061 	}
4062 
4063 	if (*dev->dev_ops->udp_tunnel_port_add == NULL)
4064 		return -ENOTSUP;
4065 	return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4066 								udp_tunnel));
4067 }
4068 
4069 int
4070 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4071 				   struct rte_eth_udp_tunnel *udp_tunnel)
4072 {
4073 	struct rte_eth_dev *dev;
4074 
4075 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4076 	dev = &rte_eth_devices[port_id];
4077 
4078 	if (udp_tunnel == NULL) {
4079 		RTE_ETHDEV_LOG(ERR,
4080 			"Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4081 			port_id);
4082 		return -EINVAL;
4083 	}
4084 
4085 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4086 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4087 		return -EINVAL;
4088 	}
4089 
4090 	if (*dev->dev_ops->udp_tunnel_port_del == NULL)
4091 		return -ENOTSUP;
4092 	return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4093 								udp_tunnel));
4094 }
4095 
4096 int
4097 rte_eth_led_on(uint16_t port_id)
4098 {
4099 	struct rte_eth_dev *dev;
4100 
4101 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4102 	dev = &rte_eth_devices[port_id];
4103 
4104 	if (*dev->dev_ops->dev_led_on == NULL)
4105 		return -ENOTSUP;
4106 	return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4107 }
4108 
4109 int
4110 rte_eth_led_off(uint16_t port_id)
4111 {
4112 	struct rte_eth_dev *dev;
4113 
4114 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4115 	dev = &rte_eth_devices[port_id];
4116 
4117 	if (*dev->dev_ops->dev_led_off == NULL)
4118 		return -ENOTSUP;
4119 	return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4120 }
4121 
4122 int
4123 rte_eth_fec_get_capability(uint16_t port_id,
4124 			   struct rte_eth_fec_capa *speed_fec_capa,
4125 			   unsigned int num)
4126 {
4127 	struct rte_eth_dev *dev;
4128 	int ret;
4129 
4130 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4131 	dev = &rte_eth_devices[port_id];
4132 
4133 	if (speed_fec_capa == NULL && num > 0) {
4134 		RTE_ETHDEV_LOG(ERR,
4135 			"Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4136 			port_id);
4137 		return -EINVAL;
4138 	}
4139 
4140 	if (*dev->dev_ops->fec_get_capability == NULL)
4141 		return -ENOTSUP;
4142 	ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4143 
4144 	return ret;
4145 }
4146 
4147 int
4148 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4149 {
4150 	struct rte_eth_dev *dev;
4151 
4152 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4153 	dev = &rte_eth_devices[port_id];
4154 
4155 	if (fec_capa == NULL) {
4156 		RTE_ETHDEV_LOG(ERR,
4157 			"Cannot get ethdev port %u current FEC mode to NULL\n",
4158 			port_id);
4159 		return -EINVAL;
4160 	}
4161 
4162 	if (*dev->dev_ops->fec_get == NULL)
4163 		return -ENOTSUP;
4164 	return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4165 }
4166 
4167 int
4168 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4169 {
4170 	struct rte_eth_dev *dev;
4171 
4172 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4173 	dev = &rte_eth_devices[port_id];
4174 
4175 	if (*dev->dev_ops->fec_set == NULL)
4176 		return -ENOTSUP;
4177 	return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4178 }
4179 
4180 /*
4181  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4182  * an empty spot.
4183  */
4184 static int
4185 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4186 {
4187 	struct rte_eth_dev_info dev_info;
4188 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4189 	unsigned i;
4190 	int ret;
4191 
4192 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4193 	if (ret != 0)
4194 		return -1;
4195 
4196 	for (i = 0; i < dev_info.max_mac_addrs; i++)
4197 		if (memcmp(addr, &dev->data->mac_addrs[i],
4198 				RTE_ETHER_ADDR_LEN) == 0)
4199 			return i;
4200 
4201 	return -1;
4202 }
4203 
4204 static const struct rte_ether_addr null_mac_addr;
4205 
4206 int
4207 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4208 			uint32_t pool)
4209 {
4210 	struct rte_eth_dev *dev;
4211 	int index;
4212 	uint64_t pool_mask;
4213 	int ret;
4214 
4215 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4216 	dev = &rte_eth_devices[port_id];
4217 
4218 	if (addr == NULL) {
4219 		RTE_ETHDEV_LOG(ERR,
4220 			"Cannot add ethdev port %u MAC address from NULL address\n",
4221 			port_id);
4222 		return -EINVAL;
4223 	}
4224 
4225 	if (*dev->dev_ops->mac_addr_add == NULL)
4226 		return -ENOTSUP;
4227 
4228 	if (rte_is_zero_ether_addr(addr)) {
4229 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4230 			port_id);
4231 		return -EINVAL;
4232 	}
4233 	if (pool >= RTE_ETH_64_POOLS) {
4234 		RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1);
4235 		return -EINVAL;
4236 	}
4237 
4238 	index = eth_dev_get_mac_addr_index(port_id, addr);
4239 	if (index < 0) {
4240 		index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4241 		if (index < 0) {
4242 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4243 				port_id);
4244 			return -ENOSPC;
4245 		}
4246 	} else {
4247 		pool_mask = dev->data->mac_pool_sel[index];
4248 
4249 		/* Check if both MAC address and pool is already there, and do nothing */
4250 		if (pool_mask & RTE_BIT64(pool))
4251 			return 0;
4252 	}
4253 
4254 	/* Update NIC */
4255 	ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4256 
4257 	if (ret == 0) {
4258 		/* Update address in NIC data structure */
4259 		rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4260 
4261 		/* Update pool bitmap in NIC data structure */
4262 		dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
4263 	}
4264 
4265 	return eth_err(port_id, ret);
4266 }
4267 
4268 int
4269 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4270 {
4271 	struct rte_eth_dev *dev;
4272 	int index;
4273 
4274 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4275 	dev = &rte_eth_devices[port_id];
4276 
4277 	if (addr == NULL) {
4278 		RTE_ETHDEV_LOG(ERR,
4279 			"Cannot remove ethdev port %u MAC address from NULL address\n",
4280 			port_id);
4281 		return -EINVAL;
4282 	}
4283 
4284 	if (*dev->dev_ops->mac_addr_remove == NULL)
4285 		return -ENOTSUP;
4286 
4287 	index = eth_dev_get_mac_addr_index(port_id, addr);
4288 	if (index == 0) {
4289 		RTE_ETHDEV_LOG(ERR,
4290 			"Port %u: Cannot remove default MAC address\n",
4291 			port_id);
4292 		return -EADDRINUSE;
4293 	} else if (index < 0)
4294 		return 0;  /* Do nothing if address wasn't found */
4295 
4296 	/* Update NIC */
4297 	(*dev->dev_ops->mac_addr_remove)(dev, index);
4298 
4299 	/* Update address in NIC data structure */
4300 	rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4301 
4302 	/* reset pool bitmap */
4303 	dev->data->mac_pool_sel[index] = 0;
4304 
4305 	return 0;
4306 }
4307 
4308 int
4309 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4310 {
4311 	struct rte_eth_dev *dev;
4312 	int ret;
4313 
4314 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4315 	dev = &rte_eth_devices[port_id];
4316 
4317 	if (addr == NULL) {
4318 		RTE_ETHDEV_LOG(ERR,
4319 			"Cannot set ethdev port %u default MAC address from NULL address\n",
4320 			port_id);
4321 		return -EINVAL;
4322 	}
4323 
4324 	if (!rte_is_valid_assigned_ether_addr(addr))
4325 		return -EINVAL;
4326 
4327 	if (*dev->dev_ops->mac_addr_set == NULL)
4328 		return -ENOTSUP;
4329 
4330 	ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4331 	if (ret < 0)
4332 		return ret;
4333 
4334 	/* Update default address in NIC data structure */
4335 	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4336 
4337 	return 0;
4338 }
4339 
4340 
4341 /*
4342  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4343  * an empty spot.
4344  */
4345 static int
4346 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4347 		const struct rte_ether_addr *addr)
4348 {
4349 	struct rte_eth_dev_info dev_info;
4350 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4351 	unsigned i;
4352 	int ret;
4353 
4354 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4355 	if (ret != 0)
4356 		return -1;
4357 
4358 	if (!dev->data->hash_mac_addrs)
4359 		return -1;
4360 
4361 	for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4362 		if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4363 			RTE_ETHER_ADDR_LEN) == 0)
4364 			return i;
4365 
4366 	return -1;
4367 }
4368 
4369 int
4370 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4371 				uint8_t on)
4372 {
4373 	int index;
4374 	int ret;
4375 	struct rte_eth_dev *dev;
4376 
4377 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4378 	dev = &rte_eth_devices[port_id];
4379 
4380 	if (addr == NULL) {
4381 		RTE_ETHDEV_LOG(ERR,
4382 			"Cannot set ethdev port %u unicast hash table from NULL address\n",
4383 			port_id);
4384 		return -EINVAL;
4385 	}
4386 
4387 	if (rte_is_zero_ether_addr(addr)) {
4388 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4389 			port_id);
4390 		return -EINVAL;
4391 	}
4392 
4393 	index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4394 	/* Check if it's already there, and do nothing */
4395 	if ((index >= 0) && on)
4396 		return 0;
4397 
4398 	if (index < 0) {
4399 		if (!on) {
4400 			RTE_ETHDEV_LOG(ERR,
4401 				"Port %u: the MAC address was not set in UTA\n",
4402 				port_id);
4403 			return -EINVAL;
4404 		}
4405 
4406 		index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4407 		if (index < 0) {
4408 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4409 				port_id);
4410 			return -ENOSPC;
4411 		}
4412 	}
4413 
4414 	if (*dev->dev_ops->uc_hash_table_set == NULL)
4415 		return -ENOTSUP;
4416 	ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4417 	if (ret == 0) {
4418 		/* Update address in NIC data structure */
4419 		if (on)
4420 			rte_ether_addr_copy(addr,
4421 					&dev->data->hash_mac_addrs[index]);
4422 		else
4423 			rte_ether_addr_copy(&null_mac_addr,
4424 					&dev->data->hash_mac_addrs[index]);
4425 	}
4426 
4427 	return eth_err(port_id, ret);
4428 }
4429 
4430 int
4431 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4432 {
4433 	struct rte_eth_dev *dev;
4434 
4435 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4436 	dev = &rte_eth_devices[port_id];
4437 
4438 	if (*dev->dev_ops->uc_all_hash_table_set == NULL)
4439 		return -ENOTSUP;
4440 	return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4441 								       on));
4442 }
4443 
4444 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4445 					uint16_t tx_rate)
4446 {
4447 	struct rte_eth_dev *dev;
4448 	struct rte_eth_dev_info dev_info;
4449 	struct rte_eth_link link;
4450 	int ret;
4451 
4452 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4453 	dev = &rte_eth_devices[port_id];
4454 
4455 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4456 	if (ret != 0)
4457 		return ret;
4458 
4459 	link = dev->data->dev_link;
4460 
4461 	if (queue_idx > dev_info.max_tx_queues) {
4462 		RTE_ETHDEV_LOG(ERR,
4463 			"Set queue rate limit:port %u: invalid queue ID=%u\n",
4464 			port_id, queue_idx);
4465 		return -EINVAL;
4466 	}
4467 
4468 	if (tx_rate > link.link_speed) {
4469 		RTE_ETHDEV_LOG(ERR,
4470 			"Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4471 			tx_rate, link.link_speed);
4472 		return -EINVAL;
4473 	}
4474 
4475 	if (*dev->dev_ops->set_queue_rate_limit == NULL)
4476 		return -ENOTSUP;
4477 	return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4478 							queue_idx, tx_rate));
4479 }
4480 
4481 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
4482 			       uint8_t avail_thresh)
4483 {
4484 	struct rte_eth_dev *dev;
4485 
4486 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4487 	dev = &rte_eth_devices[port_id];
4488 
4489 	if (queue_id > dev->data->nb_rx_queues) {
4490 		RTE_ETHDEV_LOG(ERR,
4491 			"Set queue avail thresh: port %u: invalid queue ID=%u.\n",
4492 			port_id, queue_id);
4493 		return -EINVAL;
4494 	}
4495 
4496 	if (avail_thresh > 99) {
4497 		RTE_ETHDEV_LOG(ERR,
4498 			"Set queue avail thresh: port %u: threshold should be <= 99.\n",
4499 			port_id);
4500 		return -EINVAL;
4501 	}
4502 	if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL)
4503 		return -ENOTSUP;
4504 	return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev,
4505 							     queue_id, avail_thresh));
4506 }
4507 
4508 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
4509 				 uint8_t *avail_thresh)
4510 {
4511 	struct rte_eth_dev *dev;
4512 
4513 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4514 	dev = &rte_eth_devices[port_id];
4515 
4516 	if (queue_id == NULL)
4517 		return -EINVAL;
4518 	if (*queue_id >= dev->data->nb_rx_queues)
4519 		*queue_id = 0;
4520 
4521 	if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL)
4522 		return -ENOTSUP;
4523 	return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev,
4524 							     queue_id, avail_thresh));
4525 }
4526 
4527 RTE_INIT(eth_dev_init_fp_ops)
4528 {
4529 	uint32_t i;
4530 
4531 	for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++)
4532 		eth_dev_fp_ops_reset(rte_eth_fp_ops + i);
4533 }
4534 
4535 RTE_INIT(eth_dev_init_cb_lists)
4536 {
4537 	uint16_t i;
4538 
4539 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4540 		TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4541 }
4542 
4543 int
4544 rte_eth_dev_callback_register(uint16_t port_id,
4545 			enum rte_eth_event_type event,
4546 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4547 {
4548 	struct rte_eth_dev *dev;
4549 	struct rte_eth_dev_callback *user_cb;
4550 	uint16_t next_port;
4551 	uint16_t last_port;
4552 
4553 	if (cb_fn == NULL) {
4554 		RTE_ETHDEV_LOG(ERR,
4555 			"Cannot register ethdev port %u callback from NULL\n",
4556 			port_id);
4557 		return -EINVAL;
4558 	}
4559 
4560 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4561 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4562 		return -EINVAL;
4563 	}
4564 
4565 	if (port_id == RTE_ETH_ALL) {
4566 		next_port = 0;
4567 		last_port = RTE_MAX_ETHPORTS - 1;
4568 	} else {
4569 		next_port = last_port = port_id;
4570 	}
4571 
4572 	rte_spinlock_lock(&eth_dev_cb_lock);
4573 
4574 	do {
4575 		dev = &rte_eth_devices[next_port];
4576 
4577 		TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4578 			if (user_cb->cb_fn == cb_fn &&
4579 				user_cb->cb_arg == cb_arg &&
4580 				user_cb->event == event) {
4581 				break;
4582 			}
4583 		}
4584 
4585 		/* create a new callback. */
4586 		if (user_cb == NULL) {
4587 			user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4588 				sizeof(struct rte_eth_dev_callback), 0);
4589 			if (user_cb != NULL) {
4590 				user_cb->cb_fn = cb_fn;
4591 				user_cb->cb_arg = cb_arg;
4592 				user_cb->event = event;
4593 				TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4594 						  user_cb, next);
4595 			} else {
4596 				rte_spinlock_unlock(&eth_dev_cb_lock);
4597 				rte_eth_dev_callback_unregister(port_id, event,
4598 								cb_fn, cb_arg);
4599 				return -ENOMEM;
4600 			}
4601 
4602 		}
4603 	} while (++next_port <= last_port);
4604 
4605 	rte_spinlock_unlock(&eth_dev_cb_lock);
4606 	return 0;
4607 }
4608 
4609 int
4610 rte_eth_dev_callback_unregister(uint16_t port_id,
4611 			enum rte_eth_event_type event,
4612 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4613 {
4614 	int ret;
4615 	struct rte_eth_dev *dev;
4616 	struct rte_eth_dev_callback *cb, *next;
4617 	uint16_t next_port;
4618 	uint16_t last_port;
4619 
4620 	if (cb_fn == NULL) {
4621 		RTE_ETHDEV_LOG(ERR,
4622 			"Cannot unregister ethdev port %u callback from NULL\n",
4623 			port_id);
4624 		return -EINVAL;
4625 	}
4626 
4627 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4628 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4629 		return -EINVAL;
4630 	}
4631 
4632 	if (port_id == RTE_ETH_ALL) {
4633 		next_port = 0;
4634 		last_port = RTE_MAX_ETHPORTS - 1;
4635 	} else {
4636 		next_port = last_port = port_id;
4637 	}
4638 
4639 	rte_spinlock_lock(&eth_dev_cb_lock);
4640 
4641 	do {
4642 		dev = &rte_eth_devices[next_port];
4643 		ret = 0;
4644 		for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4645 		     cb = next) {
4646 
4647 			next = TAILQ_NEXT(cb, next);
4648 
4649 			if (cb->cb_fn != cb_fn || cb->event != event ||
4650 			    (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4651 				continue;
4652 
4653 			/*
4654 			 * if this callback is not executing right now,
4655 			 * then remove it.
4656 			 */
4657 			if (cb->active == 0) {
4658 				TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4659 				rte_free(cb);
4660 			} else {
4661 				ret = -EAGAIN;
4662 			}
4663 		}
4664 	} while (++next_port <= last_port);
4665 
4666 	rte_spinlock_unlock(&eth_dev_cb_lock);
4667 	return ret;
4668 }
4669 
4670 int
4671 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4672 {
4673 	uint32_t vec;
4674 	struct rte_eth_dev *dev;
4675 	struct rte_intr_handle *intr_handle;
4676 	uint16_t qid;
4677 	int rc;
4678 
4679 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4680 	dev = &rte_eth_devices[port_id];
4681 
4682 	if (!dev->intr_handle) {
4683 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4684 		return -ENOTSUP;
4685 	}
4686 
4687 	intr_handle = dev->intr_handle;
4688 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4689 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4690 		return -EPERM;
4691 	}
4692 
4693 	for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4694 		vec = rte_intr_vec_list_index_get(intr_handle, qid);
4695 		rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4696 		if (rc && rc != -EEXIST) {
4697 			RTE_ETHDEV_LOG(ERR,
4698 				"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
4699 				port_id, qid, op, epfd, vec);
4700 		}
4701 	}
4702 
4703 	return 0;
4704 }
4705 
4706 int
4707 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4708 {
4709 	struct rte_intr_handle *intr_handle;
4710 	struct rte_eth_dev *dev;
4711 	unsigned int efd_idx;
4712 	uint32_t vec;
4713 	int fd;
4714 
4715 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4716 	dev = &rte_eth_devices[port_id];
4717 
4718 	if (queue_id >= dev->data->nb_rx_queues) {
4719 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4720 		return -1;
4721 	}
4722 
4723 	if (!dev->intr_handle) {
4724 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4725 		return -1;
4726 	}
4727 
4728 	intr_handle = dev->intr_handle;
4729 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4730 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4731 		return -1;
4732 	}
4733 
4734 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
4735 	efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4736 		(vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4737 	fd = rte_intr_efds_index_get(intr_handle, efd_idx);
4738 
4739 	return fd;
4740 }
4741 
4742 int
4743 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4744 			  int epfd, int op, void *data)
4745 {
4746 	uint32_t vec;
4747 	struct rte_eth_dev *dev;
4748 	struct rte_intr_handle *intr_handle;
4749 	int rc;
4750 
4751 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4752 	dev = &rte_eth_devices[port_id];
4753 
4754 	if (queue_id >= dev->data->nb_rx_queues) {
4755 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4756 		return -EINVAL;
4757 	}
4758 
4759 	if (!dev->intr_handle) {
4760 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4761 		return -ENOTSUP;
4762 	}
4763 
4764 	intr_handle = dev->intr_handle;
4765 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4766 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4767 		return -EPERM;
4768 	}
4769 
4770 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
4771 	rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4772 	if (rc && rc != -EEXIST) {
4773 		RTE_ETHDEV_LOG(ERR,
4774 			"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
4775 			port_id, queue_id, op, epfd, vec);
4776 		return rc;
4777 	}
4778 
4779 	return 0;
4780 }
4781 
4782 int
4783 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4784 			   uint16_t queue_id)
4785 {
4786 	struct rte_eth_dev *dev;
4787 	int ret;
4788 
4789 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4790 	dev = &rte_eth_devices[port_id];
4791 
4792 	ret = eth_dev_validate_rx_queue(dev, queue_id);
4793 	if (ret != 0)
4794 		return ret;
4795 
4796 	if (*dev->dev_ops->rx_queue_intr_enable == NULL)
4797 		return -ENOTSUP;
4798 	return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
4799 }
4800 
4801 int
4802 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4803 			    uint16_t queue_id)
4804 {
4805 	struct rte_eth_dev *dev;
4806 	int ret;
4807 
4808 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4809 	dev = &rte_eth_devices[port_id];
4810 
4811 	ret = eth_dev_validate_rx_queue(dev, queue_id);
4812 	if (ret != 0)
4813 		return ret;
4814 
4815 	if (*dev->dev_ops->rx_queue_intr_disable == NULL)
4816 		return -ENOTSUP;
4817 	return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
4818 }
4819 
4820 
4821 const struct rte_eth_rxtx_callback *
4822 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4823 		rte_rx_callback_fn fn, void *user_param)
4824 {
4825 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4826 	rte_errno = ENOTSUP;
4827 	return NULL;
4828 #endif
4829 	struct rte_eth_dev *dev;
4830 
4831 	/* check input parameters */
4832 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4833 		    queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4834 		rte_errno = EINVAL;
4835 		return NULL;
4836 	}
4837 	dev = &rte_eth_devices[port_id];
4838 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4839 		rte_errno = EINVAL;
4840 		return NULL;
4841 	}
4842 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4843 
4844 	if (cb == NULL) {
4845 		rte_errno = ENOMEM;
4846 		return NULL;
4847 	}
4848 
4849 	cb->fn.rx = fn;
4850 	cb->param = user_param;
4851 
4852 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
4853 	/* Add the callbacks in fifo order. */
4854 	struct rte_eth_rxtx_callback *tail =
4855 		rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4856 
4857 	if (!tail) {
4858 		/* Stores to cb->fn and cb->param should complete before
4859 		 * cb is visible to data plane.
4860 		 */
4861 		__atomic_store_n(
4862 			&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4863 			cb, __ATOMIC_RELEASE);
4864 
4865 	} else {
4866 		while (tail->next)
4867 			tail = tail->next;
4868 		/* Stores to cb->fn and cb->param should complete before
4869 		 * cb is visible to data plane.
4870 		 */
4871 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4872 	}
4873 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4874 
4875 	return cb;
4876 }
4877 
4878 const struct rte_eth_rxtx_callback *
4879 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4880 		rte_rx_callback_fn fn, void *user_param)
4881 {
4882 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4883 	rte_errno = ENOTSUP;
4884 	return NULL;
4885 #endif
4886 	/* check input parameters */
4887 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4888 		queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4889 		rte_errno = EINVAL;
4890 		return NULL;
4891 	}
4892 
4893 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4894 
4895 	if (cb == NULL) {
4896 		rte_errno = ENOMEM;
4897 		return NULL;
4898 	}
4899 
4900 	cb->fn.rx = fn;
4901 	cb->param = user_param;
4902 
4903 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
4904 	/* Add the callbacks at first position */
4905 	cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4906 	/* Stores to cb->fn, cb->param and cb->next should complete before
4907 	 * cb is visible to data plane threads.
4908 	 */
4909 	__atomic_store_n(
4910 		&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4911 		cb, __ATOMIC_RELEASE);
4912 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4913 
4914 	return cb;
4915 }
4916 
4917 const struct rte_eth_rxtx_callback *
4918 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4919 		rte_tx_callback_fn fn, void *user_param)
4920 {
4921 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4922 	rte_errno = ENOTSUP;
4923 	return NULL;
4924 #endif
4925 	struct rte_eth_dev *dev;
4926 
4927 	/* check input parameters */
4928 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4929 		    queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4930 		rte_errno = EINVAL;
4931 		return NULL;
4932 	}
4933 
4934 	dev = &rte_eth_devices[port_id];
4935 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4936 		rte_errno = EINVAL;
4937 		return NULL;
4938 	}
4939 
4940 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4941 
4942 	if (cb == NULL) {
4943 		rte_errno = ENOMEM;
4944 		return NULL;
4945 	}
4946 
4947 	cb->fn.tx = fn;
4948 	cb->param = user_param;
4949 
4950 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
4951 	/* Add the callbacks in fifo order. */
4952 	struct rte_eth_rxtx_callback *tail =
4953 		rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4954 
4955 	if (!tail) {
4956 		/* Stores to cb->fn and cb->param should complete before
4957 		 * cb is visible to data plane.
4958 		 */
4959 		__atomic_store_n(
4960 			&rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
4961 			cb, __ATOMIC_RELEASE);
4962 
4963 	} else {
4964 		while (tail->next)
4965 			tail = tail->next;
4966 		/* Stores to cb->fn and cb->param should complete before
4967 		 * cb is visible to data plane.
4968 		 */
4969 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4970 	}
4971 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
4972 
4973 	return cb;
4974 }
4975 
4976 int
4977 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4978 		const struct rte_eth_rxtx_callback *user_cb)
4979 {
4980 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4981 	return -ENOTSUP;
4982 #endif
4983 	/* Check input parameters. */
4984 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4985 	if (user_cb == NULL ||
4986 			queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4987 		return -EINVAL;
4988 
4989 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4990 	struct rte_eth_rxtx_callback *cb;
4991 	struct rte_eth_rxtx_callback **prev_cb;
4992 	int ret = -EINVAL;
4993 
4994 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
4995 	prev_cb = &dev->post_rx_burst_cbs[queue_id];
4996 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
4997 		cb = *prev_cb;
4998 		if (cb == user_cb) {
4999 			/* Remove the user cb from the callback list. */
5000 			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5001 			ret = 0;
5002 			break;
5003 		}
5004 	}
5005 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5006 
5007 	return ret;
5008 }
5009 
5010 int
5011 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5012 		const struct rte_eth_rxtx_callback *user_cb)
5013 {
5014 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5015 	return -ENOTSUP;
5016 #endif
5017 	/* Check input parameters. */
5018 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5019 	if (user_cb == NULL ||
5020 			queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
5021 		return -EINVAL;
5022 
5023 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5024 	int ret = -EINVAL;
5025 	struct rte_eth_rxtx_callback *cb;
5026 	struct rte_eth_rxtx_callback **prev_cb;
5027 
5028 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
5029 	prev_cb = &dev->pre_tx_burst_cbs[queue_id];
5030 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
5031 		cb = *prev_cb;
5032 		if (cb == user_cb) {
5033 			/* Remove the user cb from the callback list. */
5034 			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5035 			ret = 0;
5036 			break;
5037 		}
5038 	}
5039 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5040 
5041 	return ret;
5042 }
5043 
5044 int
5045 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5046 	struct rte_eth_rxq_info *qinfo)
5047 {
5048 	struct rte_eth_dev *dev;
5049 
5050 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5051 	dev = &rte_eth_devices[port_id];
5052 
5053 	if (queue_id >= dev->data->nb_rx_queues) {
5054 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5055 		return -EINVAL;
5056 	}
5057 
5058 	if (qinfo == NULL) {
5059 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
5060 			port_id, queue_id);
5061 		return -EINVAL;
5062 	}
5063 
5064 	if (dev->data->rx_queues == NULL ||
5065 			dev->data->rx_queues[queue_id] == NULL) {
5066 		RTE_ETHDEV_LOG(ERR,
5067 			       "Rx queue %"PRIu16" of device with port_id=%"
5068 			       PRIu16" has not been setup\n",
5069 			       queue_id, port_id);
5070 		return -EINVAL;
5071 	}
5072 
5073 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5074 		RTE_ETHDEV_LOG(INFO,
5075 			"Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5076 			queue_id, port_id);
5077 		return -EINVAL;
5078 	}
5079 
5080 	if (*dev->dev_ops->rxq_info_get == NULL)
5081 		return -ENOTSUP;
5082 
5083 	memset(qinfo, 0, sizeof(*qinfo));
5084 	dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5085 	qinfo->queue_state = dev->data->rx_queue_state[queue_id];
5086 
5087 	return 0;
5088 }
5089 
5090 int
5091 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5092 	struct rte_eth_txq_info *qinfo)
5093 {
5094 	struct rte_eth_dev *dev;
5095 
5096 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5097 	dev = &rte_eth_devices[port_id];
5098 
5099 	if (queue_id >= dev->data->nb_tx_queues) {
5100 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5101 		return -EINVAL;
5102 	}
5103 
5104 	if (qinfo == NULL) {
5105 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
5106 			port_id, queue_id);
5107 		return -EINVAL;
5108 	}
5109 
5110 	if (dev->data->tx_queues == NULL ||
5111 			dev->data->tx_queues[queue_id] == NULL) {
5112 		RTE_ETHDEV_LOG(ERR,
5113 			       "Tx queue %"PRIu16" of device with port_id=%"
5114 			       PRIu16" has not been setup\n",
5115 			       queue_id, port_id);
5116 		return -EINVAL;
5117 	}
5118 
5119 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5120 		RTE_ETHDEV_LOG(INFO,
5121 			"Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5122 			queue_id, port_id);
5123 		return -EINVAL;
5124 	}
5125 
5126 	if (*dev->dev_ops->txq_info_get == NULL)
5127 		return -ENOTSUP;
5128 
5129 	memset(qinfo, 0, sizeof(*qinfo));
5130 	dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5131 	qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5132 
5133 	return 0;
5134 }
5135 
5136 int
5137 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5138 			  struct rte_eth_burst_mode *mode)
5139 {
5140 	struct rte_eth_dev *dev;
5141 
5142 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5143 	dev = &rte_eth_devices[port_id];
5144 
5145 	if (queue_id >= dev->data->nb_rx_queues) {
5146 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5147 		return -EINVAL;
5148 	}
5149 
5150 	if (mode == NULL) {
5151 		RTE_ETHDEV_LOG(ERR,
5152 			"Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5153 			port_id, queue_id);
5154 		return -EINVAL;
5155 	}
5156 
5157 	if (*dev->dev_ops->rx_burst_mode_get == NULL)
5158 		return -ENOTSUP;
5159 	memset(mode, 0, sizeof(*mode));
5160 	return eth_err(port_id,
5161 		       dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5162 }
5163 
5164 int
5165 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5166 			  struct rte_eth_burst_mode *mode)
5167 {
5168 	struct rte_eth_dev *dev;
5169 
5170 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5171 	dev = &rte_eth_devices[port_id];
5172 
5173 	if (queue_id >= dev->data->nb_tx_queues) {
5174 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5175 		return -EINVAL;
5176 	}
5177 
5178 	if (mode == NULL) {
5179 		RTE_ETHDEV_LOG(ERR,
5180 			"Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
5181 			port_id, queue_id);
5182 		return -EINVAL;
5183 	}
5184 
5185 	if (*dev->dev_ops->tx_burst_mode_get == NULL)
5186 		return -ENOTSUP;
5187 	memset(mode, 0, sizeof(*mode));
5188 	return eth_err(port_id,
5189 		       dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5190 }
5191 
5192 int
5193 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5194 		struct rte_power_monitor_cond *pmc)
5195 {
5196 	struct rte_eth_dev *dev;
5197 
5198 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5199 	dev = &rte_eth_devices[port_id];
5200 
5201 	if (queue_id >= dev->data->nb_rx_queues) {
5202 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5203 		return -EINVAL;
5204 	}
5205 
5206 	if (pmc == NULL) {
5207 		RTE_ETHDEV_LOG(ERR,
5208 			"Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
5209 			port_id, queue_id);
5210 		return -EINVAL;
5211 	}
5212 
5213 	if (*dev->dev_ops->get_monitor_addr == NULL)
5214 		return -ENOTSUP;
5215 	return eth_err(port_id,
5216 		dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
5217 }
5218 
5219 int
5220 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5221 			     struct rte_ether_addr *mc_addr_set,
5222 			     uint32_t nb_mc_addr)
5223 {
5224 	struct rte_eth_dev *dev;
5225 
5226 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5227 	dev = &rte_eth_devices[port_id];
5228 
5229 	if (*dev->dev_ops->set_mc_addr_list == NULL)
5230 		return -ENOTSUP;
5231 	return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5232 						mc_addr_set, nb_mc_addr));
5233 }
5234 
5235 int
5236 rte_eth_timesync_enable(uint16_t port_id)
5237 {
5238 	struct rte_eth_dev *dev;
5239 
5240 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5241 	dev = &rte_eth_devices[port_id];
5242 
5243 	if (*dev->dev_ops->timesync_enable == NULL)
5244 		return -ENOTSUP;
5245 	return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5246 }
5247 
5248 int
5249 rte_eth_timesync_disable(uint16_t port_id)
5250 {
5251 	struct rte_eth_dev *dev;
5252 
5253 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5254 	dev = &rte_eth_devices[port_id];
5255 
5256 	if (*dev->dev_ops->timesync_disable == NULL)
5257 		return -ENOTSUP;
5258 	return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5259 }
5260 
5261 int
5262 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5263 				   uint32_t flags)
5264 {
5265 	struct rte_eth_dev *dev;
5266 
5267 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5268 	dev = &rte_eth_devices[port_id];
5269 
5270 	if (timestamp == NULL) {
5271 		RTE_ETHDEV_LOG(ERR,
5272 			"Cannot read ethdev port %u Rx timestamp to NULL\n",
5273 			port_id);
5274 		return -EINVAL;
5275 	}
5276 
5277 	if (*dev->dev_ops->timesync_read_rx_timestamp == NULL)
5278 		return -ENOTSUP;
5279 	return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5280 				(dev, timestamp, flags));
5281 }
5282 
5283 int
5284 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5285 				   struct timespec *timestamp)
5286 {
5287 	struct rte_eth_dev *dev;
5288 
5289 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5290 	dev = &rte_eth_devices[port_id];
5291 
5292 	if (timestamp == NULL) {
5293 		RTE_ETHDEV_LOG(ERR,
5294 			"Cannot read ethdev port %u Tx timestamp to NULL\n",
5295 			port_id);
5296 		return -EINVAL;
5297 	}
5298 
5299 	if (*dev->dev_ops->timesync_read_tx_timestamp == NULL)
5300 		return -ENOTSUP;
5301 	return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5302 				(dev, timestamp));
5303 }
5304 
5305 int
5306 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5307 {
5308 	struct rte_eth_dev *dev;
5309 
5310 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5311 	dev = &rte_eth_devices[port_id];
5312 
5313 	if (*dev->dev_ops->timesync_adjust_time == NULL)
5314 		return -ENOTSUP;
5315 	return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
5316 }
5317 
5318 int
5319 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5320 {
5321 	struct rte_eth_dev *dev;
5322 
5323 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5324 	dev = &rte_eth_devices[port_id];
5325 
5326 	if (timestamp == NULL) {
5327 		RTE_ETHDEV_LOG(ERR,
5328 			"Cannot read ethdev port %u timesync time to NULL\n",
5329 			port_id);
5330 		return -EINVAL;
5331 	}
5332 
5333 	if (*dev->dev_ops->timesync_read_time == NULL)
5334 		return -ENOTSUP;
5335 	return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5336 								timestamp));
5337 }
5338 
5339 int
5340 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5341 {
5342 	struct rte_eth_dev *dev;
5343 
5344 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5345 	dev = &rte_eth_devices[port_id];
5346 
5347 	if (timestamp == NULL) {
5348 		RTE_ETHDEV_LOG(ERR,
5349 			"Cannot write ethdev port %u timesync from NULL time\n",
5350 			port_id);
5351 		return -EINVAL;
5352 	}
5353 
5354 	if (*dev->dev_ops->timesync_write_time == NULL)
5355 		return -ENOTSUP;
5356 	return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5357 								timestamp));
5358 }
5359 
5360 int
5361 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5362 {
5363 	struct rte_eth_dev *dev;
5364 
5365 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5366 	dev = &rte_eth_devices[port_id];
5367 
5368 	if (clock == NULL) {
5369 		RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
5370 			port_id);
5371 		return -EINVAL;
5372 	}
5373 
5374 	if (*dev->dev_ops->read_clock == NULL)
5375 		return -ENOTSUP;
5376 	return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5377 }
5378 
5379 int
5380 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5381 {
5382 	struct rte_eth_dev *dev;
5383 
5384 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5385 	dev = &rte_eth_devices[port_id];
5386 
5387 	if (info == NULL) {
5388 		RTE_ETHDEV_LOG(ERR,
5389 			"Cannot get ethdev port %u register info to NULL\n",
5390 			port_id);
5391 		return -EINVAL;
5392 	}
5393 
5394 	if (*dev->dev_ops->get_reg == NULL)
5395 		return -ENOTSUP;
5396 	return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5397 }
5398 
5399 int
5400 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5401 {
5402 	struct rte_eth_dev *dev;
5403 
5404 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5405 	dev = &rte_eth_devices[port_id];
5406 
5407 	if (*dev->dev_ops->get_eeprom_length == NULL)
5408 		return -ENOTSUP;
5409 	return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5410 }
5411 
5412 int
5413 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5414 {
5415 	struct rte_eth_dev *dev;
5416 
5417 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5418 	dev = &rte_eth_devices[port_id];
5419 
5420 	if (info == NULL) {
5421 		RTE_ETHDEV_LOG(ERR,
5422 			"Cannot get ethdev port %u EEPROM info to NULL\n",
5423 			port_id);
5424 		return -EINVAL;
5425 	}
5426 
5427 	if (*dev->dev_ops->get_eeprom == NULL)
5428 		return -ENOTSUP;
5429 	return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5430 }
5431 
5432 int
5433 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5434 {
5435 	struct rte_eth_dev *dev;
5436 
5437 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5438 	dev = &rte_eth_devices[port_id];
5439 
5440 	if (info == NULL) {
5441 		RTE_ETHDEV_LOG(ERR,
5442 			"Cannot set ethdev port %u EEPROM from NULL info\n",
5443 			port_id);
5444 		return -EINVAL;
5445 	}
5446 
5447 	if (*dev->dev_ops->set_eeprom == NULL)
5448 		return -ENOTSUP;
5449 	return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5450 }
5451 
5452 int
5453 rte_eth_dev_get_module_info(uint16_t port_id,
5454 			    struct rte_eth_dev_module_info *modinfo)
5455 {
5456 	struct rte_eth_dev *dev;
5457 
5458 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5459 	dev = &rte_eth_devices[port_id];
5460 
5461 	if (modinfo == NULL) {
5462 		RTE_ETHDEV_LOG(ERR,
5463 			"Cannot get ethdev port %u EEPROM module info to NULL\n",
5464 			port_id);
5465 		return -EINVAL;
5466 	}
5467 
5468 	if (*dev->dev_ops->get_module_info == NULL)
5469 		return -ENOTSUP;
5470 	return (*dev->dev_ops->get_module_info)(dev, modinfo);
5471 }
5472 
5473 int
5474 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5475 			      struct rte_dev_eeprom_info *info)
5476 {
5477 	struct rte_eth_dev *dev;
5478 
5479 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5480 	dev = &rte_eth_devices[port_id];
5481 
5482 	if (info == NULL) {
5483 		RTE_ETHDEV_LOG(ERR,
5484 			"Cannot get ethdev port %u module EEPROM info to NULL\n",
5485 			port_id);
5486 		return -EINVAL;
5487 	}
5488 
5489 	if (info->data == NULL) {
5490 		RTE_ETHDEV_LOG(ERR,
5491 			"Cannot get ethdev port %u module EEPROM data to NULL\n",
5492 			port_id);
5493 		return -EINVAL;
5494 	}
5495 
5496 	if (info->length == 0) {
5497 		RTE_ETHDEV_LOG(ERR,
5498 			"Cannot get ethdev port %u module EEPROM to data with zero size\n",
5499 			port_id);
5500 		return -EINVAL;
5501 	}
5502 
5503 	if (*dev->dev_ops->get_module_eeprom == NULL)
5504 		return -ENOTSUP;
5505 	return (*dev->dev_ops->get_module_eeprom)(dev, info);
5506 }
5507 
5508 int
5509 rte_eth_dev_get_dcb_info(uint16_t port_id,
5510 			     struct rte_eth_dcb_info *dcb_info)
5511 {
5512 	struct rte_eth_dev *dev;
5513 
5514 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5515 	dev = &rte_eth_devices[port_id];
5516 
5517 	if (dcb_info == NULL) {
5518 		RTE_ETHDEV_LOG(ERR,
5519 			"Cannot get ethdev port %u DCB info to NULL\n",
5520 			port_id);
5521 		return -EINVAL;
5522 	}
5523 
5524 	memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5525 
5526 	if (*dev->dev_ops->get_dcb_info == NULL)
5527 		return -ENOTSUP;
5528 	return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5529 }
5530 
5531 static void
5532 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5533 		const struct rte_eth_desc_lim *desc_lim)
5534 {
5535 	if (desc_lim->nb_align != 0)
5536 		*nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5537 
5538 	if (desc_lim->nb_max != 0)
5539 		*nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5540 
5541 	*nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5542 }
5543 
5544 int
5545 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5546 				 uint16_t *nb_rx_desc,
5547 				 uint16_t *nb_tx_desc)
5548 {
5549 	struct rte_eth_dev_info dev_info;
5550 	int ret;
5551 
5552 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5553 
5554 	ret = rte_eth_dev_info_get(port_id, &dev_info);
5555 	if (ret != 0)
5556 		return ret;
5557 
5558 	if (nb_rx_desc != NULL)
5559 		eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5560 
5561 	if (nb_tx_desc != NULL)
5562 		eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5563 
5564 	return 0;
5565 }
5566 
5567 int
5568 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5569 				   struct rte_eth_hairpin_cap *cap)
5570 {
5571 	struct rte_eth_dev *dev;
5572 
5573 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5574 	dev = &rte_eth_devices[port_id];
5575 
5576 	if (cap == NULL) {
5577 		RTE_ETHDEV_LOG(ERR,
5578 			"Cannot get ethdev port %u hairpin capability to NULL\n",
5579 			port_id);
5580 		return -EINVAL;
5581 	}
5582 
5583 	if (*dev->dev_ops->hairpin_cap_get == NULL)
5584 		return -ENOTSUP;
5585 	memset(cap, 0, sizeof(*cap));
5586 	return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5587 }
5588 
5589 int
5590 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5591 {
5592 	struct rte_eth_dev *dev;
5593 
5594 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5595 	dev = &rte_eth_devices[port_id];
5596 
5597 	if (pool == NULL) {
5598 		RTE_ETHDEV_LOG(ERR,
5599 			"Cannot test ethdev port %u mempool operation from NULL pool\n",
5600 			port_id);
5601 		return -EINVAL;
5602 	}
5603 
5604 	if (*dev->dev_ops->pool_ops_supported == NULL)
5605 		return 1; /* all pools are supported */
5606 
5607 	return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5608 }
5609 
5610 static int
5611 eth_dev_handle_port_list(const char *cmd __rte_unused,
5612 		const char *params __rte_unused,
5613 		struct rte_tel_data *d)
5614 {
5615 	int port_id;
5616 
5617 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5618 	RTE_ETH_FOREACH_DEV(port_id)
5619 		rte_tel_data_add_array_int(d, port_id);
5620 	return 0;
5621 }
5622 
5623 static void
5624 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5625 		const char *stat_name)
5626 {
5627 	int q;
5628 	struct rte_tel_data *q_data = rte_tel_data_alloc();
5629 	if (q_data == NULL)
5630 		return;
5631 	rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5632 	for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5633 		rte_tel_data_add_array_u64(q_data, q_stats[q]);
5634 	rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5635 }
5636 
5637 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5638 
5639 static int
5640 eth_dev_handle_port_stats(const char *cmd __rte_unused,
5641 		const char *params,
5642 		struct rte_tel_data *d)
5643 {
5644 	struct rte_eth_stats stats;
5645 	int port_id, ret;
5646 
5647 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5648 		return -1;
5649 
5650 	port_id = atoi(params);
5651 	if (!rte_eth_dev_is_valid_port(port_id))
5652 		return -1;
5653 
5654 	ret = rte_eth_stats_get(port_id, &stats);
5655 	if (ret < 0)
5656 		return -1;
5657 
5658 	rte_tel_data_start_dict(d);
5659 	ADD_DICT_STAT(stats, ipackets);
5660 	ADD_DICT_STAT(stats, opackets);
5661 	ADD_DICT_STAT(stats, ibytes);
5662 	ADD_DICT_STAT(stats, obytes);
5663 	ADD_DICT_STAT(stats, imissed);
5664 	ADD_DICT_STAT(stats, ierrors);
5665 	ADD_DICT_STAT(stats, oerrors);
5666 	ADD_DICT_STAT(stats, rx_nombuf);
5667 	eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5668 	eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5669 	eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5670 	eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5671 	eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
5672 
5673 	return 0;
5674 }
5675 
5676 static int
5677 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
5678 		const char *params,
5679 		struct rte_tel_data *d)
5680 {
5681 	struct rte_eth_xstat *eth_xstats;
5682 	struct rte_eth_xstat_name *xstat_names;
5683 	int port_id, num_xstats;
5684 	int i, ret;
5685 	char *end_param;
5686 
5687 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5688 		return -1;
5689 
5690 	port_id = strtoul(params, &end_param, 0);
5691 	if (*end_param != '\0')
5692 		RTE_ETHDEV_LOG(NOTICE,
5693 			"Extra parameters passed to ethdev telemetry command, ignoring");
5694 	if (!rte_eth_dev_is_valid_port(port_id))
5695 		return -1;
5696 
5697 	num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5698 	if (num_xstats < 0)
5699 		return -1;
5700 
5701 	/* use one malloc for both names and stats */
5702 	eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5703 			sizeof(struct rte_eth_xstat_name)) * num_xstats);
5704 	if (eth_xstats == NULL)
5705 		return -1;
5706 	xstat_names = (void *)&eth_xstats[num_xstats];
5707 
5708 	ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5709 	if (ret < 0 || ret > num_xstats) {
5710 		free(eth_xstats);
5711 		return -1;
5712 	}
5713 
5714 	ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5715 	if (ret < 0 || ret > num_xstats) {
5716 		free(eth_xstats);
5717 		return -1;
5718 	}
5719 
5720 	rte_tel_data_start_dict(d);
5721 	for (i = 0; i < num_xstats; i++)
5722 		rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5723 				eth_xstats[i].value);
5724 	free(eth_xstats);
5725 	return 0;
5726 }
5727 
5728 static int
5729 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
5730 		const char *params,
5731 		struct rte_tel_data *d)
5732 {
5733 	static const char *status_str = "status";
5734 	int ret, port_id;
5735 	struct rte_eth_link link;
5736 	char *end_param;
5737 
5738 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5739 		return -1;
5740 
5741 	port_id = strtoul(params, &end_param, 0);
5742 	if (*end_param != '\0')
5743 		RTE_ETHDEV_LOG(NOTICE,
5744 			"Extra parameters passed to ethdev telemetry command, ignoring");
5745 	if (!rte_eth_dev_is_valid_port(port_id))
5746 		return -1;
5747 
5748 	ret = rte_eth_link_get_nowait(port_id, &link);
5749 	if (ret < 0)
5750 		return -1;
5751 
5752 	rte_tel_data_start_dict(d);
5753 	if (!link.link_status) {
5754 		rte_tel_data_add_dict_string(d, status_str, "DOWN");
5755 		return 0;
5756 	}
5757 	rte_tel_data_add_dict_string(d, status_str, "UP");
5758 	rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5759 	rte_tel_data_add_dict_string(d, "duplex",
5760 			(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
5761 				"full-duplex" : "half-duplex");
5762 	return 0;
5763 }
5764 
5765 static int
5766 eth_dev_handle_port_info(const char *cmd __rte_unused,
5767 		const char *params,
5768 		struct rte_tel_data *d)
5769 {
5770 	struct rte_tel_data *rxq_state, *txq_state;
5771 	char mac_addr[RTE_ETHER_ADDR_FMT_SIZE];
5772 	struct rte_eth_dev *eth_dev;
5773 	char *end_param;
5774 	int port_id, i;
5775 
5776 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5777 		return -1;
5778 
5779 	port_id = strtoul(params, &end_param, 0);
5780 	if (*end_param != '\0')
5781 		RTE_ETHDEV_LOG(NOTICE,
5782 			"Extra parameters passed to ethdev telemetry command, ignoring");
5783 
5784 	if (!rte_eth_dev_is_valid_port(port_id))
5785 		return -EINVAL;
5786 
5787 	eth_dev = &rte_eth_devices[port_id];
5788 
5789 	rxq_state = rte_tel_data_alloc();
5790 	if (!rxq_state)
5791 		return -ENOMEM;
5792 
5793 	txq_state = rte_tel_data_alloc();
5794 	if (!txq_state) {
5795 		rte_tel_data_free(rxq_state);
5796 		return -ENOMEM;
5797 	}
5798 
5799 	rte_tel_data_start_dict(d);
5800 	rte_tel_data_add_dict_string(d, "name", eth_dev->data->name);
5801 	rte_tel_data_add_dict_int(d, "state", eth_dev->state);
5802 	rte_tel_data_add_dict_int(d, "nb_rx_queues",
5803 			eth_dev->data->nb_rx_queues);
5804 	rte_tel_data_add_dict_int(d, "nb_tx_queues",
5805 			eth_dev->data->nb_tx_queues);
5806 	rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id);
5807 	rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu);
5808 	rte_tel_data_add_dict_int(d, "rx_mbuf_size_min",
5809 			eth_dev->data->min_rx_buf_size);
5810 	rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail",
5811 			eth_dev->data->rx_mbuf_alloc_failed);
5812 	rte_ether_format_addr(mac_addr, sizeof(mac_addr),
5813 			eth_dev->data->mac_addrs);
5814 	rte_tel_data_add_dict_string(d, "mac_addr", mac_addr);
5815 	rte_tel_data_add_dict_int(d, "promiscuous",
5816 			eth_dev->data->promiscuous);
5817 	rte_tel_data_add_dict_int(d, "scattered_rx",
5818 			eth_dev->data->scattered_rx);
5819 	rte_tel_data_add_dict_int(d, "all_multicast",
5820 			eth_dev->data->all_multicast);
5821 	rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started);
5822 	rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro);
5823 	rte_tel_data_add_dict_int(d, "dev_configured",
5824 			eth_dev->data->dev_configured);
5825 
5826 	rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL);
5827 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
5828 		rte_tel_data_add_array_int(rxq_state,
5829 				eth_dev->data->rx_queue_state[i]);
5830 
5831 	rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL);
5832 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
5833 		rte_tel_data_add_array_int(txq_state,
5834 				eth_dev->data->tx_queue_state[i]);
5835 
5836 	rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0);
5837 	rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0);
5838 	rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node);
5839 	rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags);
5840 	rte_tel_data_add_dict_int(d, "rx_offloads",
5841 			eth_dev->data->dev_conf.rxmode.offloads);
5842 	rte_tel_data_add_dict_int(d, "tx_offloads",
5843 			eth_dev->data->dev_conf.txmode.offloads);
5844 	rte_tel_data_add_dict_int(d, "ethdev_rss_hf",
5845 			eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
5846 
5847 	return 0;
5848 }
5849 
5850 int
5851 rte_eth_representor_info_get(uint16_t port_id,
5852 			     struct rte_eth_representor_info *info)
5853 {
5854 	struct rte_eth_dev *dev;
5855 
5856 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5857 	dev = &rte_eth_devices[port_id];
5858 
5859 	if (*dev->dev_ops->representor_info_get == NULL)
5860 		return -ENOTSUP;
5861 	return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
5862 }
5863 
5864 int
5865 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
5866 {
5867 	struct rte_eth_dev *dev;
5868 
5869 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5870 	dev = &rte_eth_devices[port_id];
5871 
5872 	if (dev->data->dev_configured != 0) {
5873 		RTE_ETHDEV_LOG(ERR,
5874 			"The port (ID=%"PRIu16") is already configured\n",
5875 			port_id);
5876 		return -EBUSY;
5877 	}
5878 
5879 	if (features == NULL) {
5880 		RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n");
5881 		return -EINVAL;
5882 	}
5883 
5884 	if (*dev->dev_ops->rx_metadata_negotiate == NULL)
5885 		return -ENOTSUP;
5886 	return eth_err(port_id,
5887 		       (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
5888 }
5889 
5890 int
5891 rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5892 		struct rte_eth_ip_reassembly_params *reassembly_capa)
5893 {
5894 	struct rte_eth_dev *dev;
5895 
5896 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5897 	dev = &rte_eth_devices[port_id];
5898 
5899 	if (dev->data->dev_configured == 0) {
5900 		RTE_ETHDEV_LOG(ERR,
5901 			"Device with port_id=%u is not configured.\n"
5902 			"Cannot get IP reassembly capability\n",
5903 			port_id);
5904 		return -EINVAL;
5905 	}
5906 
5907 	if (reassembly_capa == NULL) {
5908 		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL");
5909 		return -EINVAL;
5910 	}
5911 
5912 	if (*dev->dev_ops->ip_reassembly_capability_get == NULL)
5913 		return -ENOTSUP;
5914 	memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params));
5915 
5916 	return eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get)
5917 					(dev, reassembly_capa));
5918 }
5919 
5920 int
5921 rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5922 		struct rte_eth_ip_reassembly_params *conf)
5923 {
5924 	struct rte_eth_dev *dev;
5925 
5926 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5927 	dev = &rte_eth_devices[port_id];
5928 
5929 	if (dev->data->dev_configured == 0) {
5930 		RTE_ETHDEV_LOG(ERR,
5931 			"Device with port_id=%u is not configured.\n"
5932 			"Cannot get IP reassembly configuration\n",
5933 			port_id);
5934 		return -EINVAL;
5935 	}
5936 
5937 	if (conf == NULL) {
5938 		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL");
5939 		return -EINVAL;
5940 	}
5941 
5942 	if (*dev->dev_ops->ip_reassembly_conf_get == NULL)
5943 		return -ENOTSUP;
5944 	memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params));
5945 	return eth_err(port_id,
5946 		       (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf));
5947 }
5948 
5949 int
5950 rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5951 		const struct rte_eth_ip_reassembly_params *conf)
5952 {
5953 	struct rte_eth_dev *dev;
5954 
5955 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5956 	dev = &rte_eth_devices[port_id];
5957 
5958 	if (dev->data->dev_configured == 0) {
5959 		RTE_ETHDEV_LOG(ERR,
5960 			"Device with port_id=%u is not configured.\n"
5961 			"Cannot set IP reassembly configuration",
5962 			port_id);
5963 		return -EINVAL;
5964 	}
5965 
5966 	if (dev->data->dev_started != 0) {
5967 		RTE_ETHDEV_LOG(ERR,
5968 			"Device with port_id=%u started,\n"
5969 			"cannot configure IP reassembly params.\n",
5970 			port_id);
5971 		return -EINVAL;
5972 	}
5973 
5974 	if (conf == NULL) {
5975 		RTE_ETHDEV_LOG(ERR,
5976 				"Invalid IP reassembly configuration (NULL)\n");
5977 		return -EINVAL;
5978 	}
5979 
5980 	if (*dev->dev_ops->ip_reassembly_conf_set == NULL)
5981 		return -ENOTSUP;
5982 	return eth_err(port_id,
5983 		       (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf));
5984 }
5985 
5986 int
5987 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
5988 {
5989 	struct rte_eth_dev *dev;
5990 
5991 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5992 	dev = &rte_eth_devices[port_id];
5993 
5994 	if (file == NULL) {
5995 		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
5996 		return -EINVAL;
5997 	}
5998 
5999 	if (*dev->dev_ops->eth_dev_priv_dump == NULL)
6000 		return -ENOTSUP;
6001 	return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file));
6002 }
6003 
6004 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
6005 
6006 RTE_INIT(ethdev_init_telemetry)
6007 {
6008 	rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
6009 			"Returns list of available ethdev ports. Takes no parameters");
6010 	rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
6011 			"Returns the common stats for a port. Parameters: int port_id");
6012 	rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
6013 			"Returns the extended stats for a port. Parameters: int port_id");
6014 	rte_telemetry_register_cmd("/ethdev/link_status",
6015 			eth_dev_handle_port_link_status,
6016 			"Returns the link status for a port. Parameters: int port_id");
6017 	rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info,
6018 			"Returns the device info for a port. Parameters: int port_id");
6019 	rte_telemetry_register_cmd("/ethdev/module_eeprom", eth_dev_handle_port_module_eeprom,
6020 			"Returns module EEPROM info with SFF specs. Parameters: int port_id");
6021 }
6022