xref: /dpdk/lib/ethdev/rte_ethdev.c (revision f4eac3a09c51a1a2dab1f2fd3a10fe0619286a0d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <errno.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13 
14 #include <rte_byteorder.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_interrupts.h>
18 #include <rte_memory.h>
19 #include <rte_memcpy.h>
20 #include <rte_memzone.h>
21 #include <rte_launch.h>
22 #include <rte_eal.h>
23 #include <rte_per_lcore.h>
24 #include <rte_lcore.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_common.h>
27 #include <rte_mempool.h>
28 #include <rte_malloc.h>
29 #include <rte_mbuf.h>
30 #include <rte_errno.h>
31 #include <rte_spinlock.h>
32 #include <rte_string_fns.h>
33 #include <rte_class.h>
34 #include <rte_ether.h>
35 #include <rte_telemetry.h>
36 
37 #include "rte_ethdev_trace.h"
38 #include "rte_ethdev.h"
39 #include "ethdev_driver.h"
40 #include "ethdev_profile.h"
41 #include "ethdev_private.h"
42 
43 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
44 
45 /* public fast-path API */
46 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
47 
48 /* spinlock for add/remove Rx callbacks */
49 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
50 
51 /* spinlock for add/remove Tx callbacks */
52 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
53 
54 /* store statistics names and its offset in stats structure  */
55 struct rte_eth_xstats_name_off {
56 	char name[RTE_ETH_XSTATS_NAME_SIZE];
57 	unsigned offset;
58 };
59 
60 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
61 	{"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
62 	{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
63 	{"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
64 	{"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
65 	{"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
66 	{"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
67 	{"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
68 	{"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
69 		rx_nombuf)},
70 };
71 
72 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
73 
74 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
75 	{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
76 	{"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
77 	{"errors", offsetof(struct rte_eth_stats, q_errors)},
78 };
79 
80 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
81 
82 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
83 	{"packets", offsetof(struct rte_eth_stats, q_opackets)},
84 	{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
85 };
86 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
87 
88 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
89 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
90 
91 static const struct {
92 	uint64_t offload;
93 	const char *name;
94 } eth_dev_rx_offload_names[] = {
95 	RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
96 	RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
97 	RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
98 	RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
99 	RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
100 	RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
101 	RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
102 	RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
103 	RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
104 	RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
105 	RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
106 	RTE_RX_OFFLOAD_BIT2STR(SCATTER),
107 	RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
108 	RTE_RX_OFFLOAD_BIT2STR(SECURITY),
109 	RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
110 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
111 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
112 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
113 	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
114 };
115 
116 #undef RTE_RX_OFFLOAD_BIT2STR
117 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
118 
119 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
120 	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
121 
122 static const struct {
123 	uint64_t offload;
124 	const char *name;
125 } eth_dev_tx_offload_names[] = {
126 	RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
127 	RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
128 	RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
129 	RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
130 	RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131 	RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
132 	RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
133 	RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
134 	RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
135 	RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
136 	RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
137 	RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
138 	RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
139 	RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
140 	RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
141 	RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
142 	RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
143 	RTE_TX_OFFLOAD_BIT2STR(SECURITY),
144 	RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
145 	RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
146 	RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
147 	RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
148 };
149 
150 #undef RTE_TX_OFFLOAD_BIT2STR
151 
152 static const struct {
153 	uint64_t offload;
154 	const char *name;
155 } rte_eth_dev_capa_names[] = {
156 	{RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"},
157 	{RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"},
158 	{RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
159 	{RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"},
160 	{RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"},
161 };
162 
163 enum {
164 	STAT_QMAP_TX = 0,
165 	STAT_QMAP_RX
166 };
167 
168 int
169 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
170 {
171 	int ret;
172 	struct rte_devargs devargs;
173 	const char *bus_param_key;
174 	char *bus_str = NULL;
175 	char *cls_str = NULL;
176 	int str_size;
177 
178 	if (iter == NULL) {
179 		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
180 		return -EINVAL;
181 	}
182 
183 	if (devargs_str == NULL) {
184 		RTE_ETHDEV_LOG(ERR,
185 			"Cannot initialize iterator from NULL device description string\n");
186 		return -EINVAL;
187 	}
188 
189 	memset(iter, 0, sizeof(*iter));
190 	memset(&devargs, 0, sizeof(devargs));
191 
192 	/*
193 	 * The devargs string may use various syntaxes:
194 	 *   - 0000:08:00.0,representor=[1-3]
195 	 *   - pci:0000:06:00.0,representor=[0,5]
196 	 *   - class=eth,mac=00:11:22:33:44:55
197 	 *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
198 	 */
199 
200 	/*
201 	 * Handle pure class filter (i.e. without any bus-level argument),
202 	 * from future new syntax.
203 	 * rte_devargs_parse() is not yet supporting the new syntax,
204 	 * that's why this simple case is temporarily parsed here.
205 	 */
206 #define iter_anybus_str "class=eth,"
207 	if (strncmp(devargs_str, iter_anybus_str,
208 			strlen(iter_anybus_str)) == 0) {
209 		iter->cls_str = devargs_str + strlen(iter_anybus_str);
210 		goto end;
211 	}
212 
213 	/* Split bus, device and parameters. */
214 	ret = rte_devargs_parse(&devargs, devargs_str);
215 	if (ret != 0)
216 		goto error;
217 
218 	/*
219 	 * Assume parameters of old syntax can match only at ethdev level.
220 	 * Extra parameters will be ignored, thanks to "+" prefix.
221 	 */
222 	str_size = strlen(devargs.args) + 2;
223 	cls_str = malloc(str_size);
224 	if (cls_str == NULL) {
225 		ret = -ENOMEM;
226 		goto error;
227 	}
228 	ret = snprintf(cls_str, str_size, "+%s", devargs.args);
229 	if (ret != str_size - 1) {
230 		ret = -EINVAL;
231 		goto error;
232 	}
233 	iter->cls_str = cls_str;
234 
235 	iter->bus = devargs.bus;
236 	if (iter->bus->dev_iterate == NULL) {
237 		ret = -ENOTSUP;
238 		goto error;
239 	}
240 
241 	/* Convert bus args to new syntax for use with new API dev_iterate. */
242 	if ((strcmp(iter->bus->name, "vdev") == 0) ||
243 		(strcmp(iter->bus->name, "fslmc") == 0) ||
244 		(strcmp(iter->bus->name, "dpaa_bus") == 0)) {
245 		bus_param_key = "name";
246 	} else if (strcmp(iter->bus->name, "pci") == 0) {
247 		bus_param_key = "addr";
248 	} else {
249 		ret = -ENOTSUP;
250 		goto error;
251 	}
252 	str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
253 	bus_str = malloc(str_size);
254 	if (bus_str == NULL) {
255 		ret = -ENOMEM;
256 		goto error;
257 	}
258 	ret = snprintf(bus_str, str_size, "%s=%s",
259 			bus_param_key, devargs.name);
260 	if (ret != str_size - 1) {
261 		ret = -EINVAL;
262 		goto error;
263 	}
264 	iter->bus_str = bus_str;
265 
266 end:
267 	iter->cls = rte_class_find_by_name("eth");
268 	rte_devargs_reset(&devargs);
269 	return 0;
270 
271 error:
272 	if (ret == -ENOTSUP)
273 		RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
274 				iter->bus->name);
275 	rte_devargs_reset(&devargs);
276 	free(bus_str);
277 	free(cls_str);
278 	return ret;
279 }
280 
281 uint16_t
282 rte_eth_iterator_next(struct rte_dev_iterator *iter)
283 {
284 	if (iter == NULL) {
285 		RTE_ETHDEV_LOG(ERR,
286 			"Cannot get next device from NULL iterator\n");
287 		return RTE_MAX_ETHPORTS;
288 	}
289 
290 	if (iter->cls == NULL) /* invalid ethdev iterator */
291 		return RTE_MAX_ETHPORTS;
292 
293 	do { /* loop to try all matching rte_device */
294 		/* If not pure ethdev filter and */
295 		if (iter->bus != NULL &&
296 				/* not in middle of rte_eth_dev iteration, */
297 				iter->class_device == NULL) {
298 			/* get next rte_device to try. */
299 			iter->device = iter->bus->dev_iterate(
300 					iter->device, iter->bus_str, iter);
301 			if (iter->device == NULL)
302 				break; /* no more rte_device candidate */
303 		}
304 		/* A device is matching bus part, need to check ethdev part. */
305 		iter->class_device = iter->cls->dev_iterate(
306 				iter->class_device, iter->cls_str, iter);
307 		if (iter->class_device != NULL)
308 			return eth_dev_to_id(iter->class_device); /* match */
309 	} while (iter->bus != NULL); /* need to try next rte_device */
310 
311 	/* No more ethdev port to iterate. */
312 	rte_eth_iterator_cleanup(iter);
313 	return RTE_MAX_ETHPORTS;
314 }
315 
316 void
317 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
318 {
319 	if (iter == NULL) {
320 		RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
321 		return;
322 	}
323 
324 	if (iter->bus_str == NULL)
325 		return; /* nothing to free in pure class filter */
326 	free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
327 	free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
328 	memset(iter, 0, sizeof(*iter));
329 }
330 
331 uint16_t
332 rte_eth_find_next(uint16_t port_id)
333 {
334 	while (port_id < RTE_MAX_ETHPORTS &&
335 			rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
336 		port_id++;
337 
338 	if (port_id >= RTE_MAX_ETHPORTS)
339 		return RTE_MAX_ETHPORTS;
340 
341 	return port_id;
342 }
343 
344 /*
345  * Macro to iterate over all valid ports for internal usage.
346  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
347  */
348 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
349 	for (port_id = rte_eth_find_next(0); \
350 	     port_id < RTE_MAX_ETHPORTS; \
351 	     port_id = rte_eth_find_next(port_id + 1))
352 
353 uint16_t
354 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
355 {
356 	port_id = rte_eth_find_next(port_id);
357 	while (port_id < RTE_MAX_ETHPORTS &&
358 			rte_eth_devices[port_id].device != parent)
359 		port_id = rte_eth_find_next(port_id + 1);
360 
361 	return port_id;
362 }
363 
364 uint16_t
365 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
366 {
367 	RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
368 	return rte_eth_find_next_of(port_id,
369 			rte_eth_devices[ref_port_id].device);
370 }
371 
372 static bool
373 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
374 {
375 	return ethdev->data->name[0] != '\0';
376 }
377 
378 int
379 rte_eth_dev_is_valid_port(uint16_t port_id)
380 {
381 	if (port_id >= RTE_MAX_ETHPORTS ||
382 	    (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
383 		return 0;
384 	else
385 		return 1;
386 }
387 
388 static int
389 eth_is_valid_owner_id(uint64_t owner_id)
390 {
391 	if (owner_id == RTE_ETH_DEV_NO_OWNER ||
392 	    eth_dev_shared_data->next_owner_id <= owner_id)
393 		return 0;
394 	return 1;
395 }
396 
397 uint64_t
398 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
399 {
400 	port_id = rte_eth_find_next(port_id);
401 	while (port_id < RTE_MAX_ETHPORTS &&
402 			rte_eth_devices[port_id].data->owner.id != owner_id)
403 		port_id = rte_eth_find_next(port_id + 1);
404 
405 	return port_id;
406 }
407 
408 int
409 rte_eth_dev_owner_new(uint64_t *owner_id)
410 {
411 	if (owner_id == NULL) {
412 		RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
413 		return -EINVAL;
414 	}
415 
416 	eth_dev_shared_data_prepare();
417 
418 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
419 
420 	*owner_id = eth_dev_shared_data->next_owner_id++;
421 
422 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
423 	return 0;
424 }
425 
426 static int
427 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
428 		       const struct rte_eth_dev_owner *new_owner)
429 {
430 	struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
431 	struct rte_eth_dev_owner *port_owner;
432 
433 	if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
434 		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
435 			port_id);
436 		return -ENODEV;
437 	}
438 
439 	if (new_owner == NULL) {
440 		RTE_ETHDEV_LOG(ERR,
441 			"Cannot set ethdev port %u owner from NULL owner\n",
442 			port_id);
443 		return -EINVAL;
444 	}
445 
446 	if (!eth_is_valid_owner_id(new_owner->id) &&
447 	    !eth_is_valid_owner_id(old_owner_id)) {
448 		RTE_ETHDEV_LOG(ERR,
449 			"Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
450 		       old_owner_id, new_owner->id);
451 		return -EINVAL;
452 	}
453 
454 	port_owner = &rte_eth_devices[port_id].data->owner;
455 	if (port_owner->id != old_owner_id) {
456 		RTE_ETHDEV_LOG(ERR,
457 			"Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
458 			port_id, port_owner->name, port_owner->id);
459 		return -EPERM;
460 	}
461 
462 	/* can not truncate (same structure) */
463 	strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
464 
465 	port_owner->id = new_owner->id;
466 
467 	RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
468 		port_id, new_owner->name, new_owner->id);
469 
470 	return 0;
471 }
472 
473 int
474 rte_eth_dev_owner_set(const uint16_t port_id,
475 		      const struct rte_eth_dev_owner *owner)
476 {
477 	int ret;
478 
479 	eth_dev_shared_data_prepare();
480 
481 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
482 
483 	ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
484 
485 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
486 	return ret;
487 }
488 
489 int
490 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
491 {
492 	const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
493 			{.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
494 	int ret;
495 
496 	eth_dev_shared_data_prepare();
497 
498 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
499 
500 	ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
501 
502 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
503 	return ret;
504 }
505 
506 int
507 rte_eth_dev_owner_delete(const uint64_t owner_id)
508 {
509 	uint16_t port_id;
510 	int ret = 0;
511 
512 	eth_dev_shared_data_prepare();
513 
514 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
515 
516 	if (eth_is_valid_owner_id(owner_id)) {
517 		for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
518 			struct rte_eth_dev_data *data =
519 				rte_eth_devices[port_id].data;
520 			if (data != NULL && data->owner.id == owner_id)
521 				memset(&data->owner, 0,
522 				       sizeof(struct rte_eth_dev_owner));
523 		}
524 		RTE_ETHDEV_LOG(NOTICE,
525 			"All port owners owned by %016"PRIx64" identifier have removed\n",
526 			owner_id);
527 	} else {
528 		RTE_ETHDEV_LOG(ERR,
529 			       "Invalid owner ID=%016"PRIx64"\n",
530 			       owner_id);
531 		ret = -EINVAL;
532 	}
533 
534 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
535 
536 	return ret;
537 }
538 
539 int
540 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
541 {
542 	struct rte_eth_dev *ethdev;
543 
544 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
545 	ethdev = &rte_eth_devices[port_id];
546 
547 	if (!eth_dev_is_allocated(ethdev)) {
548 		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
549 			port_id);
550 		return -ENODEV;
551 	}
552 
553 	if (owner == NULL) {
554 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
555 			port_id);
556 		return -EINVAL;
557 	}
558 
559 	eth_dev_shared_data_prepare();
560 
561 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
562 	rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
563 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
564 
565 	return 0;
566 }
567 
568 int
569 rte_eth_dev_socket_id(uint16_t port_id)
570 {
571 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
572 	return rte_eth_devices[port_id].data->numa_node;
573 }
574 
575 void *
576 rte_eth_dev_get_sec_ctx(uint16_t port_id)
577 {
578 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
579 	return rte_eth_devices[port_id].security_ctx;
580 }
581 
582 uint16_t
583 rte_eth_dev_count_avail(void)
584 {
585 	uint16_t p;
586 	uint16_t count;
587 
588 	count = 0;
589 
590 	RTE_ETH_FOREACH_DEV(p)
591 		count++;
592 
593 	return count;
594 }
595 
596 uint16_t
597 rte_eth_dev_count_total(void)
598 {
599 	uint16_t port, count = 0;
600 
601 	RTE_ETH_FOREACH_VALID_DEV(port)
602 		count++;
603 
604 	return count;
605 }
606 
607 int
608 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
609 {
610 	char *tmp;
611 
612 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
613 
614 	if (name == NULL) {
615 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
616 			port_id);
617 		return -EINVAL;
618 	}
619 
620 	/* shouldn't check 'rte_eth_devices[i].data',
621 	 * because it might be overwritten by VDEV PMD */
622 	tmp = eth_dev_shared_data->data[port_id].name;
623 	strcpy(name, tmp);
624 	return 0;
625 }
626 
627 int
628 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
629 {
630 	uint16_t pid;
631 
632 	if (name == NULL) {
633 		RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
634 		return -EINVAL;
635 	}
636 
637 	if (port_id == NULL) {
638 		RTE_ETHDEV_LOG(ERR,
639 			"Cannot get port ID to NULL for %s\n", name);
640 		return -EINVAL;
641 	}
642 
643 	RTE_ETH_FOREACH_VALID_DEV(pid)
644 		if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
645 			*port_id = pid;
646 			return 0;
647 		}
648 
649 	return -ENODEV;
650 }
651 
652 static int
653 eth_err(uint16_t port_id, int ret)
654 {
655 	if (ret == 0)
656 		return 0;
657 	if (rte_eth_dev_is_removed(port_id))
658 		return -EIO;
659 	return ret;
660 }
661 
662 static int
663 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
664 {
665 	uint16_t port_id;
666 
667 	if (rx_queue_id >= dev->data->nb_rx_queues) {
668 		port_id = dev->data->port_id;
669 		RTE_ETHDEV_LOG(ERR,
670 			       "Invalid Rx queue_id=%u of device with port_id=%u\n",
671 			       rx_queue_id, port_id);
672 		return -EINVAL;
673 	}
674 
675 	if (dev->data->rx_queues[rx_queue_id] == NULL) {
676 		port_id = dev->data->port_id;
677 		RTE_ETHDEV_LOG(ERR,
678 			       "Queue %u of device with port_id=%u has not been setup\n",
679 			       rx_queue_id, port_id);
680 		return -EINVAL;
681 	}
682 
683 	return 0;
684 }
685 
686 static int
687 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
688 {
689 	uint16_t port_id;
690 
691 	if (tx_queue_id >= dev->data->nb_tx_queues) {
692 		port_id = dev->data->port_id;
693 		RTE_ETHDEV_LOG(ERR,
694 			       "Invalid Tx queue_id=%u of device with port_id=%u\n",
695 			       tx_queue_id, port_id);
696 		return -EINVAL;
697 	}
698 
699 	if (dev->data->tx_queues[tx_queue_id] == NULL) {
700 		port_id = dev->data->port_id;
701 		RTE_ETHDEV_LOG(ERR,
702 			       "Queue %u of device with port_id=%u has not been setup\n",
703 			       tx_queue_id, port_id);
704 		return -EINVAL;
705 	}
706 
707 	return 0;
708 }
709 
710 int
711 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
712 {
713 	struct rte_eth_dev *dev;
714 	int ret;
715 
716 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
717 	dev = &rte_eth_devices[port_id];
718 
719 	if (!dev->data->dev_started) {
720 		RTE_ETHDEV_LOG(ERR,
721 			"Port %u must be started before start any queue\n",
722 			port_id);
723 		return -EINVAL;
724 	}
725 
726 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
727 	if (ret != 0)
728 		return ret;
729 
730 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
731 
732 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
733 		RTE_ETHDEV_LOG(INFO,
734 			"Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
735 			rx_queue_id, port_id);
736 		return -EINVAL;
737 	}
738 
739 	if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
740 		RTE_ETHDEV_LOG(INFO,
741 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
742 			rx_queue_id, port_id);
743 		return 0;
744 	}
745 
746 	return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
747 }
748 
749 int
750 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
751 {
752 	struct rte_eth_dev *dev;
753 	int ret;
754 
755 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
756 	dev = &rte_eth_devices[port_id];
757 
758 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
759 	if (ret != 0)
760 		return ret;
761 
762 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
763 
764 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
765 		RTE_ETHDEV_LOG(INFO,
766 			"Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
767 			rx_queue_id, port_id);
768 		return -EINVAL;
769 	}
770 
771 	if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
772 		RTE_ETHDEV_LOG(INFO,
773 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
774 			rx_queue_id, port_id);
775 		return 0;
776 	}
777 
778 	return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
779 }
780 
781 int
782 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
783 {
784 	struct rte_eth_dev *dev;
785 	int ret;
786 
787 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
788 	dev = &rte_eth_devices[port_id];
789 
790 	if (!dev->data->dev_started) {
791 		RTE_ETHDEV_LOG(ERR,
792 			"Port %u must be started before start any queue\n",
793 			port_id);
794 		return -EINVAL;
795 	}
796 
797 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
798 	if (ret != 0)
799 		return ret;
800 
801 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
802 
803 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
804 		RTE_ETHDEV_LOG(INFO,
805 			"Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
806 			tx_queue_id, port_id);
807 		return -EINVAL;
808 	}
809 
810 	if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
811 		RTE_ETHDEV_LOG(INFO,
812 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
813 			tx_queue_id, port_id);
814 		return 0;
815 	}
816 
817 	return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
818 }
819 
820 int
821 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
822 {
823 	struct rte_eth_dev *dev;
824 	int ret;
825 
826 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
827 	dev = &rte_eth_devices[port_id];
828 
829 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
830 	if (ret != 0)
831 		return ret;
832 
833 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
834 
835 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
836 		RTE_ETHDEV_LOG(INFO,
837 			"Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
838 			tx_queue_id, port_id);
839 		return -EINVAL;
840 	}
841 
842 	if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
843 		RTE_ETHDEV_LOG(INFO,
844 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
845 			tx_queue_id, port_id);
846 		return 0;
847 	}
848 
849 	return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
850 }
851 
852 uint32_t
853 rte_eth_speed_bitflag(uint32_t speed, int duplex)
854 {
855 	switch (speed) {
856 	case RTE_ETH_SPEED_NUM_10M:
857 		return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
858 	case RTE_ETH_SPEED_NUM_100M:
859 		return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
860 	case RTE_ETH_SPEED_NUM_1G:
861 		return RTE_ETH_LINK_SPEED_1G;
862 	case RTE_ETH_SPEED_NUM_2_5G:
863 		return RTE_ETH_LINK_SPEED_2_5G;
864 	case RTE_ETH_SPEED_NUM_5G:
865 		return RTE_ETH_LINK_SPEED_5G;
866 	case RTE_ETH_SPEED_NUM_10G:
867 		return RTE_ETH_LINK_SPEED_10G;
868 	case RTE_ETH_SPEED_NUM_20G:
869 		return RTE_ETH_LINK_SPEED_20G;
870 	case RTE_ETH_SPEED_NUM_25G:
871 		return RTE_ETH_LINK_SPEED_25G;
872 	case RTE_ETH_SPEED_NUM_40G:
873 		return RTE_ETH_LINK_SPEED_40G;
874 	case RTE_ETH_SPEED_NUM_50G:
875 		return RTE_ETH_LINK_SPEED_50G;
876 	case RTE_ETH_SPEED_NUM_56G:
877 		return RTE_ETH_LINK_SPEED_56G;
878 	case RTE_ETH_SPEED_NUM_100G:
879 		return RTE_ETH_LINK_SPEED_100G;
880 	case RTE_ETH_SPEED_NUM_200G:
881 		return RTE_ETH_LINK_SPEED_200G;
882 	default:
883 		return 0;
884 	}
885 }
886 
887 const char *
888 rte_eth_dev_rx_offload_name(uint64_t offload)
889 {
890 	const char *name = "UNKNOWN";
891 	unsigned int i;
892 
893 	for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
894 		if (offload == eth_dev_rx_offload_names[i].offload) {
895 			name = eth_dev_rx_offload_names[i].name;
896 			break;
897 		}
898 	}
899 
900 	return name;
901 }
902 
903 const char *
904 rte_eth_dev_tx_offload_name(uint64_t offload)
905 {
906 	const char *name = "UNKNOWN";
907 	unsigned int i;
908 
909 	for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
910 		if (offload == eth_dev_tx_offload_names[i].offload) {
911 			name = eth_dev_tx_offload_names[i].name;
912 			break;
913 		}
914 	}
915 
916 	return name;
917 }
918 
919 const char *
920 rte_eth_dev_capability_name(uint64_t capability)
921 {
922 	const char *name = "UNKNOWN";
923 	unsigned int i;
924 
925 	for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) {
926 		if (capability == rte_eth_dev_capa_names[i].offload) {
927 			name = rte_eth_dev_capa_names[i].name;
928 			break;
929 		}
930 	}
931 
932 	return name;
933 }
934 
935 static inline int
936 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
937 		   uint32_t max_rx_pkt_len, uint32_t dev_info_size)
938 {
939 	int ret = 0;
940 
941 	if (dev_info_size == 0) {
942 		if (config_size != max_rx_pkt_len) {
943 			RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
944 				       " %u != %u is not allowed\n",
945 				       port_id, config_size, max_rx_pkt_len);
946 			ret = -EINVAL;
947 		}
948 	} else if (config_size > dev_info_size) {
949 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
950 			       "> max allowed value %u\n", port_id, config_size,
951 			       dev_info_size);
952 		ret = -EINVAL;
953 	} else if (config_size < RTE_ETHER_MIN_LEN) {
954 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
955 			       "< min allowed value %u\n", port_id, config_size,
956 			       (unsigned int)RTE_ETHER_MIN_LEN);
957 		ret = -EINVAL;
958 	}
959 	return ret;
960 }
961 
962 /*
963  * Validate offloads that are requested through rte_eth_dev_configure against
964  * the offloads successfully set by the Ethernet device.
965  *
966  * @param port_id
967  *   The port identifier of the Ethernet device.
968  * @param req_offloads
969  *   The offloads that have been requested through `rte_eth_dev_configure`.
970  * @param set_offloads
971  *   The offloads successfully set by the Ethernet device.
972  * @param offload_type
973  *   The offload type i.e. Rx/Tx string.
974  * @param offload_name
975  *   The function that prints the offload name.
976  * @return
977  *   - (0) if validation successful.
978  *   - (-EINVAL) if requested offload has been silently disabled.
979  *
980  */
981 static int
982 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
983 		  uint64_t set_offloads, const char *offload_type,
984 		  const char *(*offload_name)(uint64_t))
985 {
986 	uint64_t offloads_diff = req_offloads ^ set_offloads;
987 	uint64_t offload;
988 	int ret = 0;
989 
990 	while (offloads_diff != 0) {
991 		/* Check if any offload is requested but not enabled. */
992 		offload = RTE_BIT64(__builtin_ctzll(offloads_diff));
993 		if (offload & req_offloads) {
994 			RTE_ETHDEV_LOG(ERR,
995 				"Port %u failed to enable %s offload %s\n",
996 				port_id, offload_type, offload_name(offload));
997 			ret = -EINVAL;
998 		}
999 
1000 		/* Check if offload couldn't be disabled. */
1001 		if (offload & set_offloads) {
1002 			RTE_ETHDEV_LOG(DEBUG,
1003 				"Port %u %s offload %s is not requested but enabled\n",
1004 				port_id, offload_type, offload_name(offload));
1005 		}
1006 
1007 		offloads_diff &= ~offload;
1008 	}
1009 
1010 	return ret;
1011 }
1012 
1013 static uint32_t
1014 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1015 {
1016 	uint32_t overhead_len;
1017 
1018 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1019 		overhead_len = max_rx_pktlen - max_mtu;
1020 	else
1021 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1022 
1023 	return overhead_len;
1024 }
1025 
1026 /* rte_eth_dev_info_get() should be called prior to this function */
1027 static int
1028 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
1029 		uint16_t mtu)
1030 {
1031 	uint32_t overhead_len;
1032 	uint32_t frame_size;
1033 
1034 	if (mtu < dev_info->min_mtu) {
1035 		RTE_ETHDEV_LOG(ERR,
1036 			"MTU (%u) < device min MTU (%u) for port_id %u\n",
1037 			mtu, dev_info->min_mtu, port_id);
1038 		return -EINVAL;
1039 	}
1040 	if (mtu > dev_info->max_mtu) {
1041 		RTE_ETHDEV_LOG(ERR,
1042 			"MTU (%u) > device max MTU (%u) for port_id %u\n",
1043 			mtu, dev_info->max_mtu, port_id);
1044 		return -EINVAL;
1045 	}
1046 
1047 	overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1048 			dev_info->max_mtu);
1049 	frame_size = mtu + overhead_len;
1050 	if (frame_size < RTE_ETHER_MIN_LEN) {
1051 		RTE_ETHDEV_LOG(ERR,
1052 			"Frame size (%u) < min frame size (%u) for port_id %u\n",
1053 			frame_size, RTE_ETHER_MIN_LEN, port_id);
1054 		return -EINVAL;
1055 	}
1056 
1057 	if (frame_size > dev_info->max_rx_pktlen) {
1058 		RTE_ETHDEV_LOG(ERR,
1059 			"Frame size (%u) > device max frame size (%u) for port_id %u\n",
1060 			frame_size, dev_info->max_rx_pktlen, port_id);
1061 		return -EINVAL;
1062 	}
1063 
1064 	return 0;
1065 }
1066 
1067 int
1068 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1069 		      const struct rte_eth_conf *dev_conf)
1070 {
1071 	struct rte_eth_dev *dev;
1072 	struct rte_eth_dev_info dev_info;
1073 	struct rte_eth_conf orig_conf;
1074 	int diag;
1075 	int ret;
1076 	uint16_t old_mtu;
1077 
1078 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1079 	dev = &rte_eth_devices[port_id];
1080 
1081 	if (dev_conf == NULL) {
1082 		RTE_ETHDEV_LOG(ERR,
1083 			"Cannot configure ethdev port %u from NULL config\n",
1084 			port_id);
1085 		return -EINVAL;
1086 	}
1087 
1088 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1089 
1090 	if (dev->data->dev_started) {
1091 		RTE_ETHDEV_LOG(ERR,
1092 			"Port %u must be stopped to allow configuration\n",
1093 			port_id);
1094 		return -EBUSY;
1095 	}
1096 
1097 	/*
1098 	 * Ensure that "dev_configured" is always 0 each time prepare to do
1099 	 * dev_configure() to avoid any non-anticipated behaviour.
1100 	 * And set to 1 when dev_configure() is executed successfully.
1101 	 */
1102 	dev->data->dev_configured = 0;
1103 
1104 	 /* Store original config, as rollback required on failure */
1105 	memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1106 
1107 	/*
1108 	 * Copy the dev_conf parameter into the dev structure.
1109 	 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1110 	 */
1111 	if (dev_conf != &dev->data->dev_conf)
1112 		memcpy(&dev->data->dev_conf, dev_conf,
1113 		       sizeof(dev->data->dev_conf));
1114 
1115 	/* Backup mtu for rollback */
1116 	old_mtu = dev->data->mtu;
1117 
1118 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1119 	if (ret != 0)
1120 		goto rollback;
1121 
1122 	/* If number of queues specified by application for both Rx and Tx is
1123 	 * zero, use driver preferred values. This cannot be done individually
1124 	 * as it is valid for either Tx or Rx (but not both) to be zero.
1125 	 * If driver does not provide any preferred valued, fall back on
1126 	 * EAL defaults.
1127 	 */
1128 	if (nb_rx_q == 0 && nb_tx_q == 0) {
1129 		nb_rx_q = dev_info.default_rxportconf.nb_queues;
1130 		if (nb_rx_q == 0)
1131 			nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1132 		nb_tx_q = dev_info.default_txportconf.nb_queues;
1133 		if (nb_tx_q == 0)
1134 			nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1135 	}
1136 
1137 	if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1138 		RTE_ETHDEV_LOG(ERR,
1139 			"Number of Rx queues requested (%u) is greater than max supported(%d)\n",
1140 			nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1141 		ret = -EINVAL;
1142 		goto rollback;
1143 	}
1144 
1145 	if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1146 		RTE_ETHDEV_LOG(ERR,
1147 			"Number of Tx queues requested (%u) is greater than max supported(%d)\n",
1148 			nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1149 		ret = -EINVAL;
1150 		goto rollback;
1151 	}
1152 
1153 	/*
1154 	 * Check that the numbers of Rx and Tx queues are not greater
1155 	 * than the maximum number of Rx and Tx queues supported by the
1156 	 * configured device.
1157 	 */
1158 	if (nb_rx_q > dev_info.max_rx_queues) {
1159 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1160 			port_id, nb_rx_q, dev_info.max_rx_queues);
1161 		ret = -EINVAL;
1162 		goto rollback;
1163 	}
1164 
1165 	if (nb_tx_q > dev_info.max_tx_queues) {
1166 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1167 			port_id, nb_tx_q, dev_info.max_tx_queues);
1168 		ret = -EINVAL;
1169 		goto rollback;
1170 	}
1171 
1172 	/* Check that the device supports requested interrupts */
1173 	if ((dev_conf->intr_conf.lsc == 1) &&
1174 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1175 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1176 			dev->device->driver->name);
1177 		ret = -EINVAL;
1178 		goto rollback;
1179 	}
1180 	if ((dev_conf->intr_conf.rmv == 1) &&
1181 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1182 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1183 			dev->device->driver->name);
1184 		ret = -EINVAL;
1185 		goto rollback;
1186 	}
1187 
1188 	if (dev_conf->rxmode.mtu == 0)
1189 		dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
1190 
1191 	ret = eth_dev_validate_mtu(port_id, &dev_info,
1192 			dev->data->dev_conf.rxmode.mtu);
1193 	if (ret != 0)
1194 		goto rollback;
1195 
1196 	dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
1197 
1198 	/*
1199 	 * If LRO is enabled, check that the maximum aggregated packet
1200 	 * size is supported by the configured device.
1201 	 */
1202 	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1203 		uint32_t max_rx_pktlen;
1204 		uint32_t overhead_len;
1205 
1206 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1207 				dev_info.max_mtu);
1208 		max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
1209 		if (dev_conf->rxmode.max_lro_pkt_size == 0)
1210 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1211 		ret = eth_dev_check_lro_pkt_size(port_id,
1212 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
1213 				max_rx_pktlen,
1214 				dev_info.max_lro_pkt_size);
1215 		if (ret != 0)
1216 			goto rollback;
1217 	}
1218 
1219 	/* Any requested offloading must be within its device capabilities */
1220 	if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1221 	     dev_conf->rxmode.offloads) {
1222 		RTE_ETHDEV_LOG(ERR,
1223 			"Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1224 			"capabilities 0x%"PRIx64" in %s()\n",
1225 			port_id, dev_conf->rxmode.offloads,
1226 			dev_info.rx_offload_capa,
1227 			__func__);
1228 		ret = -EINVAL;
1229 		goto rollback;
1230 	}
1231 	if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1232 	     dev_conf->txmode.offloads) {
1233 		RTE_ETHDEV_LOG(ERR,
1234 			"Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1235 			"capabilities 0x%"PRIx64" in %s()\n",
1236 			port_id, dev_conf->txmode.offloads,
1237 			dev_info.tx_offload_capa,
1238 			__func__);
1239 		ret = -EINVAL;
1240 		goto rollback;
1241 	}
1242 
1243 	dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1244 		rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1245 
1246 	/* Check that device supports requested rss hash functions. */
1247 	if ((dev_info.flow_type_rss_offloads |
1248 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1249 	    dev_info.flow_type_rss_offloads) {
1250 		RTE_ETHDEV_LOG(ERR,
1251 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1252 			port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1253 			dev_info.flow_type_rss_offloads);
1254 		ret = -EINVAL;
1255 		goto rollback;
1256 	}
1257 
1258 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1259 	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
1260 	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
1261 		RTE_ETHDEV_LOG(ERR,
1262 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1263 			port_id,
1264 			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
1265 		ret = -EINVAL;
1266 		goto rollback;
1267 	}
1268 
1269 	/*
1270 	 * Setup new number of Rx/Tx queues and reconfigure device.
1271 	 */
1272 	diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1273 	if (diag != 0) {
1274 		RTE_ETHDEV_LOG(ERR,
1275 			"Port%u eth_dev_rx_queue_config = %d\n",
1276 			port_id, diag);
1277 		ret = diag;
1278 		goto rollback;
1279 	}
1280 
1281 	diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1282 	if (diag != 0) {
1283 		RTE_ETHDEV_LOG(ERR,
1284 			"Port%u eth_dev_tx_queue_config = %d\n",
1285 			port_id, diag);
1286 		eth_dev_rx_queue_config(dev, 0);
1287 		ret = diag;
1288 		goto rollback;
1289 	}
1290 
1291 	diag = (*dev->dev_ops->dev_configure)(dev);
1292 	if (diag != 0) {
1293 		RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1294 			port_id, diag);
1295 		ret = eth_err(port_id, diag);
1296 		goto reset_queues;
1297 	}
1298 
1299 	/* Initialize Rx profiling if enabled at compilation time. */
1300 	diag = __rte_eth_dev_profile_init(port_id, dev);
1301 	if (diag != 0) {
1302 		RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1303 			port_id, diag);
1304 		ret = eth_err(port_id, diag);
1305 		goto reset_queues;
1306 	}
1307 
1308 	/* Validate Rx offloads. */
1309 	diag = eth_dev_validate_offloads(port_id,
1310 			dev_conf->rxmode.offloads,
1311 			dev->data->dev_conf.rxmode.offloads, "Rx",
1312 			rte_eth_dev_rx_offload_name);
1313 	if (diag != 0) {
1314 		ret = diag;
1315 		goto reset_queues;
1316 	}
1317 
1318 	/* Validate Tx offloads. */
1319 	diag = eth_dev_validate_offloads(port_id,
1320 			dev_conf->txmode.offloads,
1321 			dev->data->dev_conf.txmode.offloads, "Tx",
1322 			rte_eth_dev_tx_offload_name);
1323 	if (diag != 0) {
1324 		ret = diag;
1325 		goto reset_queues;
1326 	}
1327 
1328 	dev->data->dev_configured = 1;
1329 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1330 	return 0;
1331 reset_queues:
1332 	eth_dev_rx_queue_config(dev, 0);
1333 	eth_dev_tx_queue_config(dev, 0);
1334 rollback:
1335 	memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1336 	if (old_mtu != dev->data->mtu)
1337 		dev->data->mtu = old_mtu;
1338 
1339 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1340 	return ret;
1341 }
1342 
1343 static void
1344 eth_dev_mac_restore(struct rte_eth_dev *dev,
1345 			struct rte_eth_dev_info *dev_info)
1346 {
1347 	struct rte_ether_addr *addr;
1348 	uint16_t i;
1349 	uint32_t pool = 0;
1350 	uint64_t pool_mask;
1351 
1352 	/* replay MAC address configuration including default MAC */
1353 	addr = &dev->data->mac_addrs[0];
1354 	if (*dev->dev_ops->mac_addr_set != NULL)
1355 		(*dev->dev_ops->mac_addr_set)(dev, addr);
1356 	else if (*dev->dev_ops->mac_addr_add != NULL)
1357 		(*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1358 
1359 	if (*dev->dev_ops->mac_addr_add != NULL) {
1360 		for (i = 1; i < dev_info->max_mac_addrs; i++) {
1361 			addr = &dev->data->mac_addrs[i];
1362 
1363 			/* skip zero address */
1364 			if (rte_is_zero_ether_addr(addr))
1365 				continue;
1366 
1367 			pool = 0;
1368 			pool_mask = dev->data->mac_pool_sel[i];
1369 
1370 			do {
1371 				if (pool_mask & UINT64_C(1))
1372 					(*dev->dev_ops->mac_addr_add)(dev,
1373 						addr, i, pool);
1374 				pool_mask >>= 1;
1375 				pool++;
1376 			} while (pool_mask);
1377 		}
1378 	}
1379 }
1380 
1381 static int
1382 eth_dev_config_restore(struct rte_eth_dev *dev,
1383 		struct rte_eth_dev_info *dev_info, uint16_t port_id)
1384 {
1385 	int ret;
1386 
1387 	if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1388 		eth_dev_mac_restore(dev, dev_info);
1389 
1390 	/* replay promiscuous configuration */
1391 	/*
1392 	 * use callbacks directly since we don't need port_id check and
1393 	 * would like to bypass the same value set
1394 	 */
1395 	if (rte_eth_promiscuous_get(port_id) == 1 &&
1396 	    *dev->dev_ops->promiscuous_enable != NULL) {
1397 		ret = eth_err(port_id,
1398 			      (*dev->dev_ops->promiscuous_enable)(dev));
1399 		if (ret != 0 && ret != -ENOTSUP) {
1400 			RTE_ETHDEV_LOG(ERR,
1401 				"Failed to enable promiscuous mode for device (port %u): %s\n",
1402 				port_id, rte_strerror(-ret));
1403 			return ret;
1404 		}
1405 	} else if (rte_eth_promiscuous_get(port_id) == 0 &&
1406 		   *dev->dev_ops->promiscuous_disable != NULL) {
1407 		ret = eth_err(port_id,
1408 			      (*dev->dev_ops->promiscuous_disable)(dev));
1409 		if (ret != 0 && ret != -ENOTSUP) {
1410 			RTE_ETHDEV_LOG(ERR,
1411 				"Failed to disable promiscuous mode for device (port %u): %s\n",
1412 				port_id, rte_strerror(-ret));
1413 			return ret;
1414 		}
1415 	}
1416 
1417 	/* replay all multicast configuration */
1418 	/*
1419 	 * use callbacks directly since we don't need port_id check and
1420 	 * would like to bypass the same value set
1421 	 */
1422 	if (rte_eth_allmulticast_get(port_id) == 1 &&
1423 	    *dev->dev_ops->allmulticast_enable != NULL) {
1424 		ret = eth_err(port_id,
1425 			      (*dev->dev_ops->allmulticast_enable)(dev));
1426 		if (ret != 0 && ret != -ENOTSUP) {
1427 			RTE_ETHDEV_LOG(ERR,
1428 				"Failed to enable allmulticast mode for device (port %u): %s\n",
1429 				port_id, rte_strerror(-ret));
1430 			return ret;
1431 		}
1432 	} else if (rte_eth_allmulticast_get(port_id) == 0 &&
1433 		   *dev->dev_ops->allmulticast_disable != NULL) {
1434 		ret = eth_err(port_id,
1435 			      (*dev->dev_ops->allmulticast_disable)(dev));
1436 		if (ret != 0 && ret != -ENOTSUP) {
1437 			RTE_ETHDEV_LOG(ERR,
1438 				"Failed to disable allmulticast mode for device (port %u): %s\n",
1439 				port_id, rte_strerror(-ret));
1440 			return ret;
1441 		}
1442 	}
1443 
1444 	return 0;
1445 }
1446 
1447 int
1448 rte_eth_dev_start(uint16_t port_id)
1449 {
1450 	struct rte_eth_dev *dev;
1451 	struct rte_eth_dev_info dev_info;
1452 	int diag;
1453 	int ret, ret_stop;
1454 
1455 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1456 	dev = &rte_eth_devices[port_id];
1457 
1458 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1459 
1460 	if (dev->data->dev_configured == 0) {
1461 		RTE_ETHDEV_LOG(INFO,
1462 			"Device with port_id=%"PRIu16" is not configured.\n",
1463 			port_id);
1464 		return -EINVAL;
1465 	}
1466 
1467 	if (dev->data->dev_started != 0) {
1468 		RTE_ETHDEV_LOG(INFO,
1469 			"Device with port_id=%"PRIu16" already started\n",
1470 			port_id);
1471 		return 0;
1472 	}
1473 
1474 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1475 	if (ret != 0)
1476 		return ret;
1477 
1478 	/* Lets restore MAC now if device does not support live change */
1479 	if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1480 		eth_dev_mac_restore(dev, &dev_info);
1481 
1482 	diag = (*dev->dev_ops->dev_start)(dev);
1483 	if (diag == 0)
1484 		dev->data->dev_started = 1;
1485 	else
1486 		return eth_err(port_id, diag);
1487 
1488 	ret = eth_dev_config_restore(dev, &dev_info, port_id);
1489 	if (ret != 0) {
1490 		RTE_ETHDEV_LOG(ERR,
1491 			"Error during restoring configuration for device (port %u): %s\n",
1492 			port_id, rte_strerror(-ret));
1493 		ret_stop = rte_eth_dev_stop(port_id);
1494 		if (ret_stop != 0) {
1495 			RTE_ETHDEV_LOG(ERR,
1496 				"Failed to stop device (port %u): %s\n",
1497 				port_id, rte_strerror(-ret_stop));
1498 		}
1499 
1500 		return ret;
1501 	}
1502 
1503 	if (dev->data->dev_conf.intr_conf.lsc == 0) {
1504 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1505 		(*dev->dev_ops->link_update)(dev, 0);
1506 	}
1507 
1508 	/* expose selection of PMD fast-path functions */
1509 	eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev);
1510 
1511 	rte_ethdev_trace_start(port_id);
1512 	return 0;
1513 }
1514 
1515 int
1516 rte_eth_dev_stop(uint16_t port_id)
1517 {
1518 	struct rte_eth_dev *dev;
1519 	int ret;
1520 
1521 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1522 	dev = &rte_eth_devices[port_id];
1523 
1524 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_stop, -ENOTSUP);
1525 
1526 	if (dev->data->dev_started == 0) {
1527 		RTE_ETHDEV_LOG(INFO,
1528 			"Device with port_id=%"PRIu16" already stopped\n",
1529 			port_id);
1530 		return 0;
1531 	}
1532 
1533 	/* point fast-path functions to dummy ones */
1534 	eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id);
1535 
1536 	dev->data->dev_started = 0;
1537 	ret = (*dev->dev_ops->dev_stop)(dev);
1538 	rte_ethdev_trace_stop(port_id, ret);
1539 
1540 	return ret;
1541 }
1542 
1543 int
1544 rte_eth_dev_set_link_up(uint16_t port_id)
1545 {
1546 	struct rte_eth_dev *dev;
1547 
1548 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1549 	dev = &rte_eth_devices[port_id];
1550 
1551 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1552 	return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1553 }
1554 
1555 int
1556 rte_eth_dev_set_link_down(uint16_t port_id)
1557 {
1558 	struct rte_eth_dev *dev;
1559 
1560 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1561 	dev = &rte_eth_devices[port_id];
1562 
1563 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1564 	return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1565 }
1566 
1567 int
1568 rte_eth_dev_close(uint16_t port_id)
1569 {
1570 	struct rte_eth_dev *dev;
1571 	int firsterr, binerr;
1572 	int *lasterr = &firsterr;
1573 
1574 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1575 	dev = &rte_eth_devices[port_id];
1576 
1577 	if (dev->data->dev_started) {
1578 		RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n",
1579 			       port_id);
1580 		return -EINVAL;
1581 	}
1582 
1583 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1584 	*lasterr = (*dev->dev_ops->dev_close)(dev);
1585 	if (*lasterr != 0)
1586 		lasterr = &binerr;
1587 
1588 	rte_ethdev_trace_close(port_id);
1589 	*lasterr = rte_eth_dev_release_port(dev);
1590 
1591 	return firsterr;
1592 }
1593 
1594 int
1595 rte_eth_dev_reset(uint16_t port_id)
1596 {
1597 	struct rte_eth_dev *dev;
1598 	int ret;
1599 
1600 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1601 	dev = &rte_eth_devices[port_id];
1602 
1603 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1604 
1605 	ret = rte_eth_dev_stop(port_id);
1606 	if (ret != 0) {
1607 		RTE_ETHDEV_LOG(ERR,
1608 			"Failed to stop device (port %u) before reset: %s - ignore\n",
1609 			port_id, rte_strerror(-ret));
1610 	}
1611 	ret = dev->dev_ops->dev_reset(dev);
1612 
1613 	return eth_err(port_id, ret);
1614 }
1615 
1616 int
1617 rte_eth_dev_is_removed(uint16_t port_id)
1618 {
1619 	struct rte_eth_dev *dev;
1620 	int ret;
1621 
1622 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1623 	dev = &rte_eth_devices[port_id];
1624 
1625 	if (dev->state == RTE_ETH_DEV_REMOVED)
1626 		return 1;
1627 
1628 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1629 
1630 	ret = dev->dev_ops->is_removed(dev);
1631 	if (ret != 0)
1632 		/* Device is physically removed. */
1633 		dev->state = RTE_ETH_DEV_REMOVED;
1634 
1635 	return ret;
1636 }
1637 
1638 static int
1639 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1640 			     uint16_t n_seg, uint32_t *mbp_buf_size,
1641 			     const struct rte_eth_dev_info *dev_info)
1642 {
1643 	const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1644 	struct rte_mempool *mp_first;
1645 	uint32_t offset_mask;
1646 	uint16_t seg_idx;
1647 
1648 	if (n_seg > seg_capa->max_nseg) {
1649 		RTE_ETHDEV_LOG(ERR,
1650 			       "Requested Rx segments %u exceed supported %u\n",
1651 			       n_seg, seg_capa->max_nseg);
1652 		return -EINVAL;
1653 	}
1654 	/*
1655 	 * Check the sizes and offsets against buffer sizes
1656 	 * for each segment specified in extended configuration.
1657 	 */
1658 	mp_first = rx_seg[0].mp;
1659 	offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1;
1660 	for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1661 		struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1662 		uint32_t length = rx_seg[seg_idx].length;
1663 		uint32_t offset = rx_seg[seg_idx].offset;
1664 
1665 		if (mpl == NULL) {
1666 			RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1667 			return -EINVAL;
1668 		}
1669 		if (seg_idx != 0 && mp_first != mpl &&
1670 		    seg_capa->multi_pools == 0) {
1671 			RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1672 			return -ENOTSUP;
1673 		}
1674 		if (offset != 0) {
1675 			if (seg_capa->offset_allowed == 0) {
1676 				RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1677 				return -ENOTSUP;
1678 			}
1679 			if (offset & offset_mask) {
1680 				RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1681 					       offset,
1682 					       seg_capa->offset_align_log2);
1683 				return -EINVAL;
1684 			}
1685 		}
1686 		if (mpl->private_data_size <
1687 			sizeof(struct rte_pktmbuf_pool_private)) {
1688 			RTE_ETHDEV_LOG(ERR,
1689 				       "%s private_data_size %u < %u\n",
1690 				       mpl->name, mpl->private_data_size,
1691 				       (unsigned int)sizeof
1692 					(struct rte_pktmbuf_pool_private));
1693 			return -ENOSPC;
1694 		}
1695 		offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1696 		*mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1697 		length = length != 0 ? length : *mbp_buf_size;
1698 		if (*mbp_buf_size < length + offset) {
1699 			RTE_ETHDEV_LOG(ERR,
1700 				       "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1701 				       mpl->name, *mbp_buf_size,
1702 				       length + offset, length, offset);
1703 			return -EINVAL;
1704 		}
1705 	}
1706 	return 0;
1707 }
1708 
1709 int
1710 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1711 		       uint16_t nb_rx_desc, unsigned int socket_id,
1712 		       const struct rte_eth_rxconf *rx_conf,
1713 		       struct rte_mempool *mp)
1714 {
1715 	int ret;
1716 	uint32_t mbp_buf_size;
1717 	struct rte_eth_dev *dev;
1718 	struct rte_eth_dev_info dev_info;
1719 	struct rte_eth_rxconf local_conf;
1720 
1721 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1722 	dev = &rte_eth_devices[port_id];
1723 
1724 	if (rx_queue_id >= dev->data->nb_rx_queues) {
1725 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
1726 		return -EINVAL;
1727 	}
1728 
1729 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1730 
1731 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1732 	if (ret != 0)
1733 		return ret;
1734 
1735 	if (mp != NULL) {
1736 		/* Single pool configuration check. */
1737 		if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
1738 			RTE_ETHDEV_LOG(ERR,
1739 				       "Ambiguous segment configuration\n");
1740 			return -EINVAL;
1741 		}
1742 		/*
1743 		 * Check the size of the mbuf data buffer, this value
1744 		 * must be provided in the private data of the memory pool.
1745 		 * First check that the memory pool(s) has a valid private data.
1746 		 */
1747 		if (mp->private_data_size <
1748 				sizeof(struct rte_pktmbuf_pool_private)) {
1749 			RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1750 				mp->name, mp->private_data_size,
1751 				(unsigned int)
1752 				sizeof(struct rte_pktmbuf_pool_private));
1753 			return -ENOSPC;
1754 		}
1755 		mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1756 		if (mbp_buf_size < dev_info.min_rx_bufsize +
1757 				   RTE_PKTMBUF_HEADROOM) {
1758 			RTE_ETHDEV_LOG(ERR,
1759 				       "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
1760 				       mp->name, mbp_buf_size,
1761 				       RTE_PKTMBUF_HEADROOM +
1762 				       dev_info.min_rx_bufsize,
1763 				       RTE_PKTMBUF_HEADROOM,
1764 				       dev_info.min_rx_bufsize);
1765 			return -EINVAL;
1766 		}
1767 	} else {
1768 		const struct rte_eth_rxseg_split *rx_seg;
1769 		uint16_t n_seg;
1770 
1771 		/* Extended multi-segment configuration check. */
1772 		if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
1773 			RTE_ETHDEV_LOG(ERR,
1774 				       "Memory pool is null and no extended configuration provided\n");
1775 			return -EINVAL;
1776 		}
1777 
1778 		rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
1779 		n_seg = rx_conf->rx_nseg;
1780 
1781 		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1782 			ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
1783 							   &mbp_buf_size,
1784 							   &dev_info);
1785 			if (ret != 0)
1786 				return ret;
1787 		} else {
1788 			RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
1789 			return -EINVAL;
1790 		}
1791 	}
1792 
1793 	/* Use default specified by driver, if nb_rx_desc is zero */
1794 	if (nb_rx_desc == 0) {
1795 		nb_rx_desc = dev_info.default_rxportconf.ring_size;
1796 		/* If driver default is also zero, fall back on EAL default */
1797 		if (nb_rx_desc == 0)
1798 			nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1799 	}
1800 
1801 	if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1802 			nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1803 			nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1804 
1805 		RTE_ETHDEV_LOG(ERR,
1806 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1807 			nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1808 			dev_info.rx_desc_lim.nb_min,
1809 			dev_info.rx_desc_lim.nb_align);
1810 		return -EINVAL;
1811 	}
1812 
1813 	if (dev->data->dev_started &&
1814 		!(dev_info.dev_capa &
1815 			RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1816 		return -EBUSY;
1817 
1818 	if (dev->data->dev_started &&
1819 		(dev->data->rx_queue_state[rx_queue_id] !=
1820 			RTE_ETH_QUEUE_STATE_STOPPED))
1821 		return -EBUSY;
1822 
1823 	eth_dev_rxq_release(dev, rx_queue_id);
1824 
1825 	if (rx_conf == NULL)
1826 		rx_conf = &dev_info.default_rxconf;
1827 
1828 	local_conf = *rx_conf;
1829 
1830 	/*
1831 	 * If an offloading has already been enabled in
1832 	 * rte_eth_dev_configure(), it has been enabled on all queues,
1833 	 * so there is no need to enable it in this queue again.
1834 	 * The local_conf.offloads input to underlying PMD only carries
1835 	 * those offloadings which are only enabled on this queue and
1836 	 * not enabled on all queues.
1837 	 */
1838 	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1839 
1840 	/*
1841 	 * New added offloadings for this queue are those not enabled in
1842 	 * rte_eth_dev_configure() and they must be per-queue type.
1843 	 * A pure per-port offloading can't be enabled on a queue while
1844 	 * disabled on another queue. A pure per-port offloading can't
1845 	 * be enabled for any queue as new added one if it hasn't been
1846 	 * enabled in rte_eth_dev_configure().
1847 	 */
1848 	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1849 	     local_conf.offloads) {
1850 		RTE_ETHDEV_LOG(ERR,
1851 			"Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1852 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1853 			port_id, rx_queue_id, local_conf.offloads,
1854 			dev_info.rx_queue_offload_capa,
1855 			__func__);
1856 		return -EINVAL;
1857 	}
1858 
1859 	if (local_conf.share_group > 0 &&
1860 	    (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {
1861 		RTE_ETHDEV_LOG(ERR,
1862 			"Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n",
1863 			port_id, rx_queue_id, local_conf.share_group);
1864 		return -EINVAL;
1865 	}
1866 
1867 	/*
1868 	 * If LRO is enabled, check that the maximum aggregated packet
1869 	 * size is supported by the configured device.
1870 	 */
1871 	/* Get the real Ethernet overhead length */
1872 	if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1873 		uint32_t overhead_len;
1874 		uint32_t max_rx_pktlen;
1875 		int ret;
1876 
1877 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1878 				dev_info.max_mtu);
1879 		max_rx_pktlen = dev->data->mtu + overhead_len;
1880 		if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
1881 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1882 		ret = eth_dev_check_lro_pkt_size(port_id,
1883 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
1884 				max_rx_pktlen,
1885 				dev_info.max_lro_pkt_size);
1886 		if (ret != 0)
1887 			return ret;
1888 	}
1889 
1890 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1891 					      socket_id, &local_conf, mp);
1892 	if (!ret) {
1893 		if (!dev->data->min_rx_buf_size ||
1894 		    dev->data->min_rx_buf_size > mbp_buf_size)
1895 			dev->data->min_rx_buf_size = mbp_buf_size;
1896 	}
1897 
1898 	rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
1899 		rx_conf, ret);
1900 	return eth_err(port_id, ret);
1901 }
1902 
1903 int
1904 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1905 			       uint16_t nb_rx_desc,
1906 			       const struct rte_eth_hairpin_conf *conf)
1907 {
1908 	int ret;
1909 	struct rte_eth_dev *dev;
1910 	struct rte_eth_hairpin_cap cap;
1911 	int i;
1912 	int count;
1913 
1914 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1915 	dev = &rte_eth_devices[port_id];
1916 
1917 	if (rx_queue_id >= dev->data->nb_rx_queues) {
1918 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
1919 		return -EINVAL;
1920 	}
1921 
1922 	if (conf == NULL) {
1923 		RTE_ETHDEV_LOG(ERR,
1924 			"Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
1925 			port_id);
1926 		return -EINVAL;
1927 	}
1928 
1929 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1930 	if (ret != 0)
1931 		return ret;
1932 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
1933 				-ENOTSUP);
1934 	/* if nb_rx_desc is zero use max number of desc from the driver. */
1935 	if (nb_rx_desc == 0)
1936 		nb_rx_desc = cap.max_nb_desc;
1937 	if (nb_rx_desc > cap.max_nb_desc) {
1938 		RTE_ETHDEV_LOG(ERR,
1939 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1940 			nb_rx_desc, cap.max_nb_desc);
1941 		return -EINVAL;
1942 	}
1943 	if (conf->peer_count > cap.max_rx_2_tx) {
1944 		RTE_ETHDEV_LOG(ERR,
1945 			"Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
1946 			conf->peer_count, cap.max_rx_2_tx);
1947 		return -EINVAL;
1948 	}
1949 	if (conf->peer_count == 0) {
1950 		RTE_ETHDEV_LOG(ERR,
1951 			"Invalid value for number of peers for Rx queue(=%u), should be: > 0",
1952 			conf->peer_count);
1953 		return -EINVAL;
1954 	}
1955 	for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1956 	     cap.max_nb_queues != UINT16_MAX; i++) {
1957 		if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1958 			count++;
1959 	}
1960 	if (count > cap.max_nb_queues) {
1961 		RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1962 		cap.max_nb_queues);
1963 		return -EINVAL;
1964 	}
1965 	if (dev->data->dev_started)
1966 		return -EBUSY;
1967 	eth_dev_rxq_release(dev, rx_queue_id);
1968 	ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
1969 						      nb_rx_desc, conf);
1970 	if (ret == 0)
1971 		dev->data->rx_queue_state[rx_queue_id] =
1972 			RTE_ETH_QUEUE_STATE_HAIRPIN;
1973 	return eth_err(port_id, ret);
1974 }
1975 
1976 int
1977 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1978 		       uint16_t nb_tx_desc, unsigned int socket_id,
1979 		       const struct rte_eth_txconf *tx_conf)
1980 {
1981 	struct rte_eth_dev *dev;
1982 	struct rte_eth_dev_info dev_info;
1983 	struct rte_eth_txconf local_conf;
1984 	int ret;
1985 
1986 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1987 	dev = &rte_eth_devices[port_id];
1988 
1989 	if (tx_queue_id >= dev->data->nb_tx_queues) {
1990 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
1991 		return -EINVAL;
1992 	}
1993 
1994 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1995 
1996 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1997 	if (ret != 0)
1998 		return ret;
1999 
2000 	/* Use default specified by driver, if nb_tx_desc is zero */
2001 	if (nb_tx_desc == 0) {
2002 		nb_tx_desc = dev_info.default_txportconf.ring_size;
2003 		/* If driver default is zero, fall back on EAL default */
2004 		if (nb_tx_desc == 0)
2005 			nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2006 	}
2007 	if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2008 	    nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2009 	    nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2010 		RTE_ETHDEV_LOG(ERR,
2011 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2012 			nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2013 			dev_info.tx_desc_lim.nb_min,
2014 			dev_info.tx_desc_lim.nb_align);
2015 		return -EINVAL;
2016 	}
2017 
2018 	if (dev->data->dev_started &&
2019 		!(dev_info.dev_capa &
2020 			RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2021 		return -EBUSY;
2022 
2023 	if (dev->data->dev_started &&
2024 		(dev->data->tx_queue_state[tx_queue_id] !=
2025 			RTE_ETH_QUEUE_STATE_STOPPED))
2026 		return -EBUSY;
2027 
2028 	eth_dev_txq_release(dev, tx_queue_id);
2029 
2030 	if (tx_conf == NULL)
2031 		tx_conf = &dev_info.default_txconf;
2032 
2033 	local_conf = *tx_conf;
2034 
2035 	/*
2036 	 * If an offloading has already been enabled in
2037 	 * rte_eth_dev_configure(), it has been enabled on all queues,
2038 	 * so there is no need to enable it in this queue again.
2039 	 * The local_conf.offloads input to underlying PMD only carries
2040 	 * those offloadings which are only enabled on this queue and
2041 	 * not enabled on all queues.
2042 	 */
2043 	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2044 
2045 	/*
2046 	 * New added offloadings for this queue are those not enabled in
2047 	 * rte_eth_dev_configure() and they must be per-queue type.
2048 	 * A pure per-port offloading can't be enabled on a queue while
2049 	 * disabled on another queue. A pure per-port offloading can't
2050 	 * be enabled for any queue as new added one if it hasn't been
2051 	 * enabled in rte_eth_dev_configure().
2052 	 */
2053 	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2054 	     local_conf.offloads) {
2055 		RTE_ETHDEV_LOG(ERR,
2056 			"Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2057 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2058 			port_id, tx_queue_id, local_conf.offloads,
2059 			dev_info.tx_queue_offload_capa,
2060 			__func__);
2061 		return -EINVAL;
2062 	}
2063 
2064 	rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2065 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2066 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2067 }
2068 
2069 int
2070 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2071 			       uint16_t nb_tx_desc,
2072 			       const struct rte_eth_hairpin_conf *conf)
2073 {
2074 	struct rte_eth_dev *dev;
2075 	struct rte_eth_hairpin_cap cap;
2076 	int i;
2077 	int count;
2078 	int ret;
2079 
2080 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2081 	dev = &rte_eth_devices[port_id];
2082 
2083 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2084 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2085 		return -EINVAL;
2086 	}
2087 
2088 	if (conf == NULL) {
2089 		RTE_ETHDEV_LOG(ERR,
2090 			"Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2091 			port_id);
2092 		return -EINVAL;
2093 	}
2094 
2095 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2096 	if (ret != 0)
2097 		return ret;
2098 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2099 				-ENOTSUP);
2100 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2101 	if (nb_tx_desc == 0)
2102 		nb_tx_desc = cap.max_nb_desc;
2103 	if (nb_tx_desc > cap.max_nb_desc) {
2104 		RTE_ETHDEV_LOG(ERR,
2105 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2106 			nb_tx_desc, cap.max_nb_desc);
2107 		return -EINVAL;
2108 	}
2109 	if (conf->peer_count > cap.max_tx_2_rx) {
2110 		RTE_ETHDEV_LOG(ERR,
2111 			"Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2112 			conf->peer_count, cap.max_tx_2_rx);
2113 		return -EINVAL;
2114 	}
2115 	if (conf->peer_count == 0) {
2116 		RTE_ETHDEV_LOG(ERR,
2117 			"Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2118 			conf->peer_count);
2119 		return -EINVAL;
2120 	}
2121 	for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2122 	     cap.max_nb_queues != UINT16_MAX; i++) {
2123 		if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2124 			count++;
2125 	}
2126 	if (count > cap.max_nb_queues) {
2127 		RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2128 		cap.max_nb_queues);
2129 		return -EINVAL;
2130 	}
2131 	if (dev->data->dev_started)
2132 		return -EBUSY;
2133 	eth_dev_txq_release(dev, tx_queue_id);
2134 	ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2135 		(dev, tx_queue_id, nb_tx_desc, conf);
2136 	if (ret == 0)
2137 		dev->data->tx_queue_state[tx_queue_id] =
2138 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2139 	return eth_err(port_id, ret);
2140 }
2141 
2142 int
2143 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2144 {
2145 	struct rte_eth_dev *dev;
2146 	int ret;
2147 
2148 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2149 	dev = &rte_eth_devices[tx_port];
2150 
2151 	if (dev->data->dev_started == 0) {
2152 		RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2153 		return -EBUSY;
2154 	}
2155 
2156 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_bind, -ENOTSUP);
2157 	ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2158 	if (ret != 0)
2159 		RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2160 			       " to Rx %d (%d - all ports)\n",
2161 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2162 
2163 	return ret;
2164 }
2165 
2166 int
2167 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2168 {
2169 	struct rte_eth_dev *dev;
2170 	int ret;
2171 
2172 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2173 	dev = &rte_eth_devices[tx_port];
2174 
2175 	if (dev->data->dev_started == 0) {
2176 		RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2177 		return -EBUSY;
2178 	}
2179 
2180 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_unbind, -ENOTSUP);
2181 	ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2182 	if (ret != 0)
2183 		RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2184 			       " from Rx %d (%d - all ports)\n",
2185 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2186 
2187 	return ret;
2188 }
2189 
2190 int
2191 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2192 			       size_t len, uint32_t direction)
2193 {
2194 	struct rte_eth_dev *dev;
2195 	int ret;
2196 
2197 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2198 	dev = &rte_eth_devices[port_id];
2199 
2200 	if (peer_ports == NULL) {
2201 		RTE_ETHDEV_LOG(ERR,
2202 			"Cannot get ethdev port %u hairpin peer ports to NULL\n",
2203 			port_id);
2204 		return -EINVAL;
2205 	}
2206 
2207 	if (len == 0) {
2208 		RTE_ETHDEV_LOG(ERR,
2209 			"Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2210 			port_id);
2211 		return -EINVAL;
2212 	}
2213 
2214 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_get_peer_ports,
2215 				-ENOTSUP);
2216 
2217 	ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2218 						      len, direction);
2219 	if (ret < 0)
2220 		RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2221 			       port_id, direction ? "Rx" : "Tx");
2222 
2223 	return ret;
2224 }
2225 
2226 void
2227 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2228 		void *userdata __rte_unused)
2229 {
2230 	rte_pktmbuf_free_bulk(pkts, unsent);
2231 }
2232 
2233 void
2234 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2235 		void *userdata)
2236 {
2237 	uint64_t *count = userdata;
2238 
2239 	rte_pktmbuf_free_bulk(pkts, unsent);
2240 	*count += unsent;
2241 }
2242 
2243 int
2244 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2245 		buffer_tx_error_fn cbfn, void *userdata)
2246 {
2247 	if (buffer == NULL) {
2248 		RTE_ETHDEV_LOG(ERR,
2249 			"Cannot set Tx buffer error callback to NULL buffer\n");
2250 		return -EINVAL;
2251 	}
2252 
2253 	buffer->error_callback = cbfn;
2254 	buffer->error_userdata = userdata;
2255 	return 0;
2256 }
2257 
2258 int
2259 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2260 {
2261 	int ret = 0;
2262 
2263 	if (buffer == NULL) {
2264 		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2265 		return -EINVAL;
2266 	}
2267 
2268 	buffer->size = size;
2269 	if (buffer->error_callback == NULL) {
2270 		ret = rte_eth_tx_buffer_set_err_callback(
2271 			buffer, rte_eth_tx_buffer_drop_callback, NULL);
2272 	}
2273 
2274 	return ret;
2275 }
2276 
2277 int
2278 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2279 {
2280 	struct rte_eth_dev *dev;
2281 	int ret;
2282 
2283 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2284 	dev = &rte_eth_devices[port_id];
2285 
2286 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2287 
2288 	/* Call driver to free pending mbufs. */
2289 	ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2290 					       free_cnt);
2291 	return eth_err(port_id, ret);
2292 }
2293 
2294 int
2295 rte_eth_promiscuous_enable(uint16_t port_id)
2296 {
2297 	struct rte_eth_dev *dev;
2298 	int diag = 0;
2299 
2300 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2301 	dev = &rte_eth_devices[port_id];
2302 
2303 	if (dev->data->promiscuous == 1)
2304 		return 0;
2305 
2306 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2307 
2308 	diag = (*dev->dev_ops->promiscuous_enable)(dev);
2309 	dev->data->promiscuous = (diag == 0) ? 1 : 0;
2310 
2311 	return eth_err(port_id, diag);
2312 }
2313 
2314 int
2315 rte_eth_promiscuous_disable(uint16_t port_id)
2316 {
2317 	struct rte_eth_dev *dev;
2318 	int diag = 0;
2319 
2320 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2321 	dev = &rte_eth_devices[port_id];
2322 
2323 	if (dev->data->promiscuous == 0)
2324 		return 0;
2325 
2326 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2327 
2328 	dev->data->promiscuous = 0;
2329 	diag = (*dev->dev_ops->promiscuous_disable)(dev);
2330 	if (diag != 0)
2331 		dev->data->promiscuous = 1;
2332 
2333 	return eth_err(port_id, diag);
2334 }
2335 
2336 int
2337 rte_eth_promiscuous_get(uint16_t port_id)
2338 {
2339 	struct rte_eth_dev *dev;
2340 
2341 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2342 	dev = &rte_eth_devices[port_id];
2343 
2344 	return dev->data->promiscuous;
2345 }
2346 
2347 int
2348 rte_eth_allmulticast_enable(uint16_t port_id)
2349 {
2350 	struct rte_eth_dev *dev;
2351 	int diag;
2352 
2353 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2354 	dev = &rte_eth_devices[port_id];
2355 
2356 	if (dev->data->all_multicast == 1)
2357 		return 0;
2358 
2359 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2360 	diag = (*dev->dev_ops->allmulticast_enable)(dev);
2361 	dev->data->all_multicast = (diag == 0) ? 1 : 0;
2362 
2363 	return eth_err(port_id, diag);
2364 }
2365 
2366 int
2367 rte_eth_allmulticast_disable(uint16_t port_id)
2368 {
2369 	struct rte_eth_dev *dev;
2370 	int diag;
2371 
2372 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2373 	dev = &rte_eth_devices[port_id];
2374 
2375 	if (dev->data->all_multicast == 0)
2376 		return 0;
2377 
2378 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
2379 	dev->data->all_multicast = 0;
2380 	diag = (*dev->dev_ops->allmulticast_disable)(dev);
2381 	if (diag != 0)
2382 		dev->data->all_multicast = 1;
2383 
2384 	return eth_err(port_id, diag);
2385 }
2386 
2387 int
2388 rte_eth_allmulticast_get(uint16_t port_id)
2389 {
2390 	struct rte_eth_dev *dev;
2391 
2392 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2393 	dev = &rte_eth_devices[port_id];
2394 
2395 	return dev->data->all_multicast;
2396 }
2397 
2398 int
2399 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2400 {
2401 	struct rte_eth_dev *dev;
2402 
2403 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2404 	dev = &rte_eth_devices[port_id];
2405 
2406 	if (eth_link == NULL) {
2407 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2408 			port_id);
2409 		return -EINVAL;
2410 	}
2411 
2412 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2413 		rte_eth_linkstatus_get(dev, eth_link);
2414 	else {
2415 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2416 		(*dev->dev_ops->link_update)(dev, 1);
2417 		*eth_link = dev->data->dev_link;
2418 	}
2419 
2420 	return 0;
2421 }
2422 
2423 int
2424 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2425 {
2426 	struct rte_eth_dev *dev;
2427 
2428 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2429 	dev = &rte_eth_devices[port_id];
2430 
2431 	if (eth_link == NULL) {
2432 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2433 			port_id);
2434 		return -EINVAL;
2435 	}
2436 
2437 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2438 		rte_eth_linkstatus_get(dev, eth_link);
2439 	else {
2440 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
2441 		(*dev->dev_ops->link_update)(dev, 0);
2442 		*eth_link = dev->data->dev_link;
2443 	}
2444 
2445 	return 0;
2446 }
2447 
2448 const char *
2449 rte_eth_link_speed_to_str(uint32_t link_speed)
2450 {
2451 	switch (link_speed) {
2452 	case RTE_ETH_SPEED_NUM_NONE: return "None";
2453 	case RTE_ETH_SPEED_NUM_10M:  return "10 Mbps";
2454 	case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
2455 	case RTE_ETH_SPEED_NUM_1G:   return "1 Gbps";
2456 	case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2457 	case RTE_ETH_SPEED_NUM_5G:   return "5 Gbps";
2458 	case RTE_ETH_SPEED_NUM_10G:  return "10 Gbps";
2459 	case RTE_ETH_SPEED_NUM_20G:  return "20 Gbps";
2460 	case RTE_ETH_SPEED_NUM_25G:  return "25 Gbps";
2461 	case RTE_ETH_SPEED_NUM_40G:  return "40 Gbps";
2462 	case RTE_ETH_SPEED_NUM_50G:  return "50 Gbps";
2463 	case RTE_ETH_SPEED_NUM_56G:  return "56 Gbps";
2464 	case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
2465 	case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
2466 	case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2467 	default: return "Invalid";
2468 	}
2469 }
2470 
2471 int
2472 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2473 {
2474 	if (str == NULL) {
2475 		RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
2476 		return -EINVAL;
2477 	}
2478 
2479 	if (len == 0) {
2480 		RTE_ETHDEV_LOG(ERR,
2481 			"Cannot convert link to string with zero size\n");
2482 		return -EINVAL;
2483 	}
2484 
2485 	if (eth_link == NULL) {
2486 		RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
2487 		return -EINVAL;
2488 	}
2489 
2490 	if (eth_link->link_status == RTE_ETH_LINK_DOWN)
2491 		return snprintf(str, len, "Link down");
2492 	else
2493 		return snprintf(str, len, "Link up at %s %s %s",
2494 			rte_eth_link_speed_to_str(eth_link->link_speed),
2495 			(eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
2496 			"FDX" : "HDX",
2497 			(eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
2498 			"Autoneg" : "Fixed");
2499 }
2500 
2501 int
2502 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2503 {
2504 	struct rte_eth_dev *dev;
2505 
2506 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2507 	dev = &rte_eth_devices[port_id];
2508 
2509 	if (stats == NULL) {
2510 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
2511 			port_id);
2512 		return -EINVAL;
2513 	}
2514 
2515 	memset(stats, 0, sizeof(*stats));
2516 
2517 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2518 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2519 	return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2520 }
2521 
2522 int
2523 rte_eth_stats_reset(uint16_t port_id)
2524 {
2525 	struct rte_eth_dev *dev;
2526 	int ret;
2527 
2528 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2529 	dev = &rte_eth_devices[port_id];
2530 
2531 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
2532 	ret = (*dev->dev_ops->stats_reset)(dev);
2533 	if (ret != 0)
2534 		return eth_err(port_id, ret);
2535 
2536 	dev->data->rx_mbuf_alloc_failed = 0;
2537 
2538 	return 0;
2539 }
2540 
2541 static inline int
2542 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2543 {
2544 	uint16_t nb_rxqs, nb_txqs;
2545 	int count;
2546 
2547 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2548 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2549 
2550 	count = RTE_NB_STATS;
2551 	if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2552 		count += nb_rxqs * RTE_NB_RXQ_STATS;
2553 		count += nb_txqs * RTE_NB_TXQ_STATS;
2554 	}
2555 
2556 	return count;
2557 }
2558 
2559 static int
2560 eth_dev_get_xstats_count(uint16_t port_id)
2561 {
2562 	struct rte_eth_dev *dev;
2563 	int count;
2564 
2565 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2566 	dev = &rte_eth_devices[port_id];
2567 	if (dev->dev_ops->xstats_get_names != NULL) {
2568 		count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2569 		if (count < 0)
2570 			return eth_err(port_id, count);
2571 	} else
2572 		count = 0;
2573 
2574 
2575 	count += eth_dev_get_xstats_basic_count(dev);
2576 
2577 	return count;
2578 }
2579 
2580 int
2581 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2582 		uint64_t *id)
2583 {
2584 	int cnt_xstats, idx_xstat;
2585 
2586 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2587 
2588 	if (xstat_name == NULL) {
2589 		RTE_ETHDEV_LOG(ERR,
2590 			"Cannot get ethdev port %u xstats ID from NULL xstat name\n",
2591 			port_id);
2592 		return -ENOMEM;
2593 	}
2594 
2595 	if (id == NULL) {
2596 		RTE_ETHDEV_LOG(ERR,
2597 			"Cannot get ethdev port %u xstats ID to NULL\n",
2598 			port_id);
2599 		return -ENOMEM;
2600 	}
2601 
2602 	/* Get count */
2603 	cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2604 	if (cnt_xstats  < 0) {
2605 		RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2606 		return -ENODEV;
2607 	}
2608 
2609 	/* Get id-name lookup table */
2610 	struct rte_eth_xstat_name xstats_names[cnt_xstats];
2611 
2612 	if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2613 			port_id, xstats_names, cnt_xstats, NULL)) {
2614 		RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2615 		return -1;
2616 	}
2617 
2618 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2619 		if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2620 			*id = idx_xstat;
2621 			return 0;
2622 		};
2623 	}
2624 
2625 	return -EINVAL;
2626 }
2627 
2628 /* retrieve basic stats names */
2629 static int
2630 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2631 	struct rte_eth_xstat_name *xstats_names)
2632 {
2633 	int cnt_used_entries = 0;
2634 	uint32_t idx, id_queue;
2635 	uint16_t num_q;
2636 
2637 	for (idx = 0; idx < RTE_NB_STATS; idx++) {
2638 		strlcpy(xstats_names[cnt_used_entries].name,
2639 			eth_dev_stats_strings[idx].name,
2640 			sizeof(xstats_names[0].name));
2641 		cnt_used_entries++;
2642 	}
2643 
2644 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2645 		return cnt_used_entries;
2646 
2647 	num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2648 	for (id_queue = 0; id_queue < num_q; id_queue++) {
2649 		for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2650 			snprintf(xstats_names[cnt_used_entries].name,
2651 				sizeof(xstats_names[0].name),
2652 				"rx_q%u_%s",
2653 				id_queue, eth_dev_rxq_stats_strings[idx].name);
2654 			cnt_used_entries++;
2655 		}
2656 
2657 	}
2658 	num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2659 	for (id_queue = 0; id_queue < num_q; id_queue++) {
2660 		for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2661 			snprintf(xstats_names[cnt_used_entries].name,
2662 				sizeof(xstats_names[0].name),
2663 				"tx_q%u_%s",
2664 				id_queue, eth_dev_txq_stats_strings[idx].name);
2665 			cnt_used_entries++;
2666 		}
2667 	}
2668 	return cnt_used_entries;
2669 }
2670 
2671 /* retrieve ethdev extended statistics names */
2672 int
2673 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2674 	struct rte_eth_xstat_name *xstats_names, unsigned int size,
2675 	uint64_t *ids)
2676 {
2677 	struct rte_eth_xstat_name *xstats_names_copy;
2678 	unsigned int no_basic_stat_requested = 1;
2679 	unsigned int no_ext_stat_requested = 1;
2680 	unsigned int expected_entries;
2681 	unsigned int basic_count;
2682 	struct rte_eth_dev *dev;
2683 	unsigned int i;
2684 	int ret;
2685 
2686 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2687 	dev = &rte_eth_devices[port_id];
2688 
2689 	basic_count = eth_dev_get_xstats_basic_count(dev);
2690 	ret = eth_dev_get_xstats_count(port_id);
2691 	if (ret < 0)
2692 		return ret;
2693 	expected_entries = (unsigned int)ret;
2694 
2695 	/* Return max number of stats if no ids given */
2696 	if (!ids) {
2697 		if (!xstats_names)
2698 			return expected_entries;
2699 		else if (xstats_names && size < expected_entries)
2700 			return expected_entries;
2701 	}
2702 
2703 	if (ids && !xstats_names)
2704 		return -EINVAL;
2705 
2706 	if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2707 		uint64_t ids_copy[size];
2708 
2709 		for (i = 0; i < size; i++) {
2710 			if (ids[i] < basic_count) {
2711 				no_basic_stat_requested = 0;
2712 				break;
2713 			}
2714 
2715 			/*
2716 			 * Convert ids to xstats ids that PMD knows.
2717 			 * ids known by user are basic + extended stats.
2718 			 */
2719 			ids_copy[i] = ids[i] - basic_count;
2720 		}
2721 
2722 		if (no_basic_stat_requested)
2723 			return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2724 					ids_copy, xstats_names, size);
2725 	}
2726 
2727 	/* Retrieve all stats */
2728 	if (!ids) {
2729 		int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2730 				expected_entries);
2731 		if (num_stats < 0 || num_stats > (int)expected_entries)
2732 			return num_stats;
2733 		else
2734 			return expected_entries;
2735 	}
2736 
2737 	xstats_names_copy = calloc(expected_entries,
2738 		sizeof(struct rte_eth_xstat_name));
2739 
2740 	if (!xstats_names_copy) {
2741 		RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2742 		return -ENOMEM;
2743 	}
2744 
2745 	if (ids) {
2746 		for (i = 0; i < size; i++) {
2747 			if (ids[i] >= basic_count) {
2748 				no_ext_stat_requested = 0;
2749 				break;
2750 			}
2751 		}
2752 	}
2753 
2754 	/* Fill xstats_names_copy structure */
2755 	if (ids && no_ext_stat_requested) {
2756 		eth_basic_stats_get_names(dev, xstats_names_copy);
2757 	} else {
2758 		ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2759 			expected_entries);
2760 		if (ret < 0) {
2761 			free(xstats_names_copy);
2762 			return ret;
2763 		}
2764 	}
2765 
2766 	/* Filter stats */
2767 	for (i = 0; i < size; i++) {
2768 		if (ids[i] >= expected_entries) {
2769 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2770 			free(xstats_names_copy);
2771 			return -1;
2772 		}
2773 		xstats_names[i] = xstats_names_copy[ids[i]];
2774 	}
2775 
2776 	free(xstats_names_copy);
2777 	return size;
2778 }
2779 
2780 int
2781 rte_eth_xstats_get_names(uint16_t port_id,
2782 	struct rte_eth_xstat_name *xstats_names,
2783 	unsigned int size)
2784 {
2785 	struct rte_eth_dev *dev;
2786 	int cnt_used_entries;
2787 	int cnt_expected_entries;
2788 	int cnt_driver_entries;
2789 
2790 	cnt_expected_entries = eth_dev_get_xstats_count(port_id);
2791 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
2792 			(int)size < cnt_expected_entries)
2793 		return cnt_expected_entries;
2794 
2795 	/* port_id checked in eth_dev_get_xstats_count() */
2796 	dev = &rte_eth_devices[port_id];
2797 
2798 	cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
2799 
2800 	if (dev->dev_ops->xstats_get_names != NULL) {
2801 		/* If there are any driver-specific xstats, append them
2802 		 * to end of list.
2803 		 */
2804 		cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2805 			dev,
2806 			xstats_names + cnt_used_entries,
2807 			size - cnt_used_entries);
2808 		if (cnt_driver_entries < 0)
2809 			return eth_err(port_id, cnt_driver_entries);
2810 		cnt_used_entries += cnt_driver_entries;
2811 	}
2812 
2813 	return cnt_used_entries;
2814 }
2815 
2816 
2817 static int
2818 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2819 {
2820 	struct rte_eth_dev *dev;
2821 	struct rte_eth_stats eth_stats;
2822 	unsigned int count = 0, i, q;
2823 	uint64_t val, *stats_ptr;
2824 	uint16_t nb_rxqs, nb_txqs;
2825 	int ret;
2826 
2827 	ret = rte_eth_stats_get(port_id, &eth_stats);
2828 	if (ret < 0)
2829 		return ret;
2830 
2831 	dev = &rte_eth_devices[port_id];
2832 
2833 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2834 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2835 
2836 	/* global stats */
2837 	for (i = 0; i < RTE_NB_STATS; i++) {
2838 		stats_ptr = RTE_PTR_ADD(&eth_stats,
2839 					eth_dev_stats_strings[i].offset);
2840 		val = *stats_ptr;
2841 		xstats[count++].value = val;
2842 	}
2843 
2844 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2845 		return count;
2846 
2847 	/* per-rxq stats */
2848 	for (q = 0; q < nb_rxqs; q++) {
2849 		for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2850 			stats_ptr = RTE_PTR_ADD(&eth_stats,
2851 					eth_dev_rxq_stats_strings[i].offset +
2852 					q * sizeof(uint64_t));
2853 			val = *stats_ptr;
2854 			xstats[count++].value = val;
2855 		}
2856 	}
2857 
2858 	/* per-txq stats */
2859 	for (q = 0; q < nb_txqs; q++) {
2860 		for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2861 			stats_ptr = RTE_PTR_ADD(&eth_stats,
2862 					eth_dev_txq_stats_strings[i].offset +
2863 					q * sizeof(uint64_t));
2864 			val = *stats_ptr;
2865 			xstats[count++].value = val;
2866 		}
2867 	}
2868 	return count;
2869 }
2870 
2871 /* retrieve ethdev extended statistics */
2872 int
2873 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2874 			 uint64_t *values, unsigned int size)
2875 {
2876 	unsigned int no_basic_stat_requested = 1;
2877 	unsigned int no_ext_stat_requested = 1;
2878 	unsigned int num_xstats_filled;
2879 	unsigned int basic_count;
2880 	uint16_t expected_entries;
2881 	struct rte_eth_dev *dev;
2882 	unsigned int i;
2883 	int ret;
2884 
2885 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2886 	dev = &rte_eth_devices[port_id];
2887 
2888 	ret = eth_dev_get_xstats_count(port_id);
2889 	if (ret < 0)
2890 		return ret;
2891 	expected_entries = (uint16_t)ret;
2892 	struct rte_eth_xstat xstats[expected_entries];
2893 	basic_count = eth_dev_get_xstats_basic_count(dev);
2894 
2895 	/* Return max number of stats if no ids given */
2896 	if (!ids) {
2897 		if (!values)
2898 			return expected_entries;
2899 		else if (values && size < expected_entries)
2900 			return expected_entries;
2901 	}
2902 
2903 	if (ids && !values)
2904 		return -EINVAL;
2905 
2906 	if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2907 		unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
2908 		uint64_t ids_copy[size];
2909 
2910 		for (i = 0; i < size; i++) {
2911 			if (ids[i] < basic_count) {
2912 				no_basic_stat_requested = 0;
2913 				break;
2914 			}
2915 
2916 			/*
2917 			 * Convert ids to xstats ids that PMD knows.
2918 			 * ids known by user are basic + extended stats.
2919 			 */
2920 			ids_copy[i] = ids[i] - basic_count;
2921 		}
2922 
2923 		if (no_basic_stat_requested)
2924 			return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2925 					values, size);
2926 	}
2927 
2928 	if (ids) {
2929 		for (i = 0; i < size; i++) {
2930 			if (ids[i] >= basic_count) {
2931 				no_ext_stat_requested = 0;
2932 				break;
2933 			}
2934 		}
2935 	}
2936 
2937 	/* Fill the xstats structure */
2938 	if (ids && no_ext_stat_requested)
2939 		ret = eth_basic_stats_get(port_id, xstats);
2940 	else
2941 		ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2942 
2943 	if (ret < 0)
2944 		return ret;
2945 	num_xstats_filled = (unsigned int)ret;
2946 
2947 	/* Return all stats */
2948 	if (!ids) {
2949 		for (i = 0; i < num_xstats_filled; i++)
2950 			values[i] = xstats[i].value;
2951 		return expected_entries;
2952 	}
2953 
2954 	/* Filter stats */
2955 	for (i = 0; i < size; i++) {
2956 		if (ids[i] >= expected_entries) {
2957 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2958 			return -1;
2959 		}
2960 		values[i] = xstats[ids[i]].value;
2961 	}
2962 	return size;
2963 }
2964 
2965 int
2966 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2967 	unsigned int n)
2968 {
2969 	struct rte_eth_dev *dev;
2970 	unsigned int count = 0, i;
2971 	signed int xcount = 0;
2972 	uint16_t nb_rxqs, nb_txqs;
2973 	int ret;
2974 
2975 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2976 	dev = &rte_eth_devices[port_id];
2977 
2978 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2979 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2980 
2981 	/* Return generic statistics */
2982 	count = RTE_NB_STATS;
2983 	if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
2984 		count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
2985 
2986 	/* implemented by the driver */
2987 	if (dev->dev_ops->xstats_get != NULL) {
2988 		/* Retrieve the xstats from the driver at the end of the
2989 		 * xstats struct.
2990 		 */
2991 		xcount = (*dev->dev_ops->xstats_get)(dev,
2992 				     xstats ? xstats + count : NULL,
2993 				     (n > count) ? n - count : 0);
2994 
2995 		if (xcount < 0)
2996 			return eth_err(port_id, xcount);
2997 	}
2998 
2999 	if (n < count + xcount || xstats == NULL)
3000 		return count + xcount;
3001 
3002 	/* now fill the xstats structure */
3003 	ret = eth_basic_stats_get(port_id, xstats);
3004 	if (ret < 0)
3005 		return ret;
3006 	count = ret;
3007 
3008 	for (i = 0; i < count; i++)
3009 		xstats[i].id = i;
3010 	/* add an offset to driver-specific stats */
3011 	for ( ; i < count + xcount; i++)
3012 		xstats[i].id += count;
3013 
3014 	return count + xcount;
3015 }
3016 
3017 /* reset ethdev extended statistics */
3018 int
3019 rte_eth_xstats_reset(uint16_t port_id)
3020 {
3021 	struct rte_eth_dev *dev;
3022 
3023 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3024 	dev = &rte_eth_devices[port_id];
3025 
3026 	/* implemented by the driver */
3027 	if (dev->dev_ops->xstats_reset != NULL)
3028 		return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3029 
3030 	/* fallback to default */
3031 	return rte_eth_stats_reset(port_id);
3032 }
3033 
3034 static int
3035 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3036 		uint8_t stat_idx, uint8_t is_rx)
3037 {
3038 	struct rte_eth_dev *dev;
3039 
3040 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3041 	dev = &rte_eth_devices[port_id];
3042 
3043 	if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3044 		return -EINVAL;
3045 
3046 	if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3047 		return -EINVAL;
3048 
3049 	if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3050 		return -EINVAL;
3051 
3052 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
3053 	return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3054 }
3055 
3056 int
3057 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3058 		uint8_t stat_idx)
3059 {
3060 	return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3061 						tx_queue_id,
3062 						stat_idx, STAT_QMAP_TX));
3063 }
3064 
3065 int
3066 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3067 		uint8_t stat_idx)
3068 {
3069 	return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3070 						rx_queue_id,
3071 						stat_idx, STAT_QMAP_RX));
3072 }
3073 
3074 int
3075 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3076 {
3077 	struct rte_eth_dev *dev;
3078 
3079 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3080 	dev = &rte_eth_devices[port_id];
3081 
3082 	if (fw_version == NULL && fw_size > 0) {
3083 		RTE_ETHDEV_LOG(ERR,
3084 			"Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3085 			port_id);
3086 		return -EINVAL;
3087 	}
3088 
3089 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
3090 	return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3091 							fw_version, fw_size));
3092 }
3093 
3094 int
3095 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3096 {
3097 	struct rte_eth_dev *dev;
3098 	const struct rte_eth_desc_lim lim = {
3099 		.nb_max = UINT16_MAX,
3100 		.nb_min = 0,
3101 		.nb_align = 1,
3102 		.nb_seg_max = UINT16_MAX,
3103 		.nb_mtu_seg_max = UINT16_MAX,
3104 	};
3105 	int diag;
3106 
3107 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3108 	dev = &rte_eth_devices[port_id];
3109 
3110 	if (dev_info == NULL) {
3111 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3112 			port_id);
3113 		return -EINVAL;
3114 	}
3115 
3116 	/*
3117 	 * Init dev_info before port_id check since caller does not have
3118 	 * return status and does not know if get is successful or not.
3119 	 */
3120 	memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3121 	dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3122 
3123 	dev_info->rx_desc_lim = lim;
3124 	dev_info->tx_desc_lim = lim;
3125 	dev_info->device = dev->device;
3126 	dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
3127 		RTE_ETHER_CRC_LEN;
3128 	dev_info->max_mtu = UINT16_MAX;
3129 
3130 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3131 	diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3132 	if (diag != 0) {
3133 		/* Cleanup already filled in device information */
3134 		memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3135 		return eth_err(port_id, diag);
3136 	}
3137 
3138 	/* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3139 	dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3140 			RTE_MAX_QUEUES_PER_PORT);
3141 	dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3142 			RTE_MAX_QUEUES_PER_PORT);
3143 
3144 	dev_info->driver_name = dev->device->driver->name;
3145 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3146 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3147 
3148 	dev_info->dev_flags = &dev->data->dev_flags;
3149 
3150 	return 0;
3151 }
3152 
3153 int
3154 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3155 {
3156 	struct rte_eth_dev *dev;
3157 
3158 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3159 	dev = &rte_eth_devices[port_id];
3160 
3161 	if (dev_conf == NULL) {
3162 		RTE_ETHDEV_LOG(ERR,
3163 			"Cannot get ethdev port %u configuration to NULL\n",
3164 			port_id);
3165 		return -EINVAL;
3166 	}
3167 
3168 	memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf));
3169 
3170 	return 0;
3171 }
3172 
3173 int
3174 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3175 				 uint32_t *ptypes, int num)
3176 {
3177 	int i, j;
3178 	struct rte_eth_dev *dev;
3179 	const uint32_t *all_ptypes;
3180 
3181 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3182 	dev = &rte_eth_devices[port_id];
3183 
3184 	if (ptypes == NULL && num > 0) {
3185 		RTE_ETHDEV_LOG(ERR,
3186 			"Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3187 			port_id);
3188 		return -EINVAL;
3189 	}
3190 
3191 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3192 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3193 
3194 	if (!all_ptypes)
3195 		return 0;
3196 
3197 	for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3198 		if (all_ptypes[i] & ptype_mask) {
3199 			if (j < num)
3200 				ptypes[j] = all_ptypes[i];
3201 			j++;
3202 		}
3203 
3204 	return j;
3205 }
3206 
3207 int
3208 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3209 				 uint32_t *set_ptypes, unsigned int num)
3210 {
3211 	const uint32_t valid_ptype_masks[] = {
3212 		RTE_PTYPE_L2_MASK,
3213 		RTE_PTYPE_L3_MASK,
3214 		RTE_PTYPE_L4_MASK,
3215 		RTE_PTYPE_TUNNEL_MASK,
3216 		RTE_PTYPE_INNER_L2_MASK,
3217 		RTE_PTYPE_INNER_L3_MASK,
3218 		RTE_PTYPE_INNER_L4_MASK,
3219 	};
3220 	const uint32_t *all_ptypes;
3221 	struct rte_eth_dev *dev;
3222 	uint32_t unused_mask;
3223 	unsigned int i, j;
3224 	int ret;
3225 
3226 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3227 	dev = &rte_eth_devices[port_id];
3228 
3229 	if (num > 0 && set_ptypes == NULL) {
3230 		RTE_ETHDEV_LOG(ERR,
3231 			"Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3232 			port_id);
3233 		return -EINVAL;
3234 	}
3235 
3236 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3237 			*dev->dev_ops->dev_ptypes_set == NULL) {
3238 		ret = 0;
3239 		goto ptype_unknown;
3240 	}
3241 
3242 	if (ptype_mask == 0) {
3243 		ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3244 				ptype_mask);
3245 		goto ptype_unknown;
3246 	}
3247 
3248 	unused_mask = ptype_mask;
3249 	for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3250 		uint32_t mask = ptype_mask & valid_ptype_masks[i];
3251 		if (mask && mask != valid_ptype_masks[i]) {
3252 			ret = -EINVAL;
3253 			goto ptype_unknown;
3254 		}
3255 		unused_mask &= ~valid_ptype_masks[i];
3256 	}
3257 
3258 	if (unused_mask) {
3259 		ret = -EINVAL;
3260 		goto ptype_unknown;
3261 	}
3262 
3263 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3264 	if (all_ptypes == NULL) {
3265 		ret = 0;
3266 		goto ptype_unknown;
3267 	}
3268 
3269 	/*
3270 	 * Accommodate as many set_ptypes as possible. If the supplied
3271 	 * set_ptypes array is insufficient fill it partially.
3272 	 */
3273 	for (i = 0, j = 0; set_ptypes != NULL &&
3274 				(all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3275 		if (ptype_mask & all_ptypes[i]) {
3276 			if (j < num - 1) {
3277 				set_ptypes[j] = all_ptypes[i];
3278 				j++;
3279 				continue;
3280 			}
3281 			break;
3282 		}
3283 	}
3284 
3285 	if (set_ptypes != NULL && j < num)
3286 		set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3287 
3288 	return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3289 
3290 ptype_unknown:
3291 	if (num > 0)
3292 		set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3293 
3294 	return ret;
3295 }
3296 
3297 int
3298 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3299 	unsigned int num)
3300 {
3301 	int32_t ret;
3302 	struct rte_eth_dev *dev;
3303 	struct rte_eth_dev_info dev_info;
3304 
3305 	if (ma == NULL) {
3306 		RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__);
3307 		return -EINVAL;
3308 	}
3309 
3310 	/* will check for us that port_id is a valid one */
3311 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3312 	if (ret != 0)
3313 		return ret;
3314 
3315 	dev = &rte_eth_devices[port_id];
3316 	num = RTE_MIN(dev_info.max_mac_addrs, num);
3317 	memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0]));
3318 
3319 	return num;
3320 }
3321 
3322 int
3323 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3324 {
3325 	struct rte_eth_dev *dev;
3326 
3327 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3328 	dev = &rte_eth_devices[port_id];
3329 
3330 	if (mac_addr == NULL) {
3331 		RTE_ETHDEV_LOG(ERR,
3332 			"Cannot get ethdev port %u MAC address to NULL\n",
3333 			port_id);
3334 		return -EINVAL;
3335 	}
3336 
3337 	rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3338 
3339 	return 0;
3340 }
3341 
3342 int
3343 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3344 {
3345 	struct rte_eth_dev *dev;
3346 
3347 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3348 	dev = &rte_eth_devices[port_id];
3349 
3350 	if (mtu == NULL) {
3351 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
3352 			port_id);
3353 		return -EINVAL;
3354 	}
3355 
3356 	*mtu = dev->data->mtu;
3357 	return 0;
3358 }
3359 
3360 int
3361 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3362 {
3363 	int ret;
3364 	struct rte_eth_dev_info dev_info;
3365 	struct rte_eth_dev *dev;
3366 
3367 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3368 	dev = &rte_eth_devices[port_id];
3369 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3370 
3371 	/*
3372 	 * Check if the device supports dev_infos_get, if it does not
3373 	 * skip min_mtu/max_mtu validation here as this requires values
3374 	 * that are populated within the call to rte_eth_dev_info_get()
3375 	 * which relies on dev->dev_ops->dev_infos_get.
3376 	 */
3377 	if (*dev->dev_ops->dev_infos_get != NULL) {
3378 		ret = rte_eth_dev_info_get(port_id, &dev_info);
3379 		if (ret != 0)
3380 			return ret;
3381 
3382 		ret = eth_dev_validate_mtu(port_id, &dev_info, mtu);
3383 		if (ret != 0)
3384 			return ret;
3385 	}
3386 
3387 	if (dev->data->dev_configured == 0) {
3388 		RTE_ETHDEV_LOG(ERR,
3389 			"Port %u must be configured before MTU set\n",
3390 			port_id);
3391 		return -EINVAL;
3392 	}
3393 
3394 	ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3395 	if (ret == 0)
3396 		dev->data->mtu = mtu;
3397 
3398 	return eth_err(port_id, ret);
3399 }
3400 
3401 int
3402 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3403 {
3404 	struct rte_eth_dev *dev;
3405 	int ret;
3406 
3407 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3408 	dev = &rte_eth_devices[port_id];
3409 
3410 	if (!(dev->data->dev_conf.rxmode.offloads &
3411 	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
3412 		RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
3413 			port_id);
3414 		return -ENOSYS;
3415 	}
3416 
3417 	if (vlan_id > 4095) {
3418 		RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3419 			port_id, vlan_id);
3420 		return -EINVAL;
3421 	}
3422 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3423 
3424 	ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3425 	if (ret == 0) {
3426 		struct rte_vlan_filter_conf *vfc;
3427 		int vidx;
3428 		int vbit;
3429 
3430 		vfc = &dev->data->vlan_filter_conf;
3431 		vidx = vlan_id / 64;
3432 		vbit = vlan_id % 64;
3433 
3434 		if (on)
3435 			vfc->ids[vidx] |= RTE_BIT64(vbit);
3436 		else
3437 			vfc->ids[vidx] &= ~RTE_BIT64(vbit);
3438 	}
3439 
3440 	return eth_err(port_id, ret);
3441 }
3442 
3443 int
3444 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3445 				    int on)
3446 {
3447 	struct rte_eth_dev *dev;
3448 
3449 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3450 	dev = &rte_eth_devices[port_id];
3451 
3452 	if (rx_queue_id >= dev->data->nb_rx_queues) {
3453 		RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3454 		return -EINVAL;
3455 	}
3456 
3457 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3458 	(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3459 
3460 	return 0;
3461 }
3462 
3463 int
3464 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3465 				enum rte_vlan_type vlan_type,
3466 				uint16_t tpid)
3467 {
3468 	struct rte_eth_dev *dev;
3469 
3470 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3471 	dev = &rte_eth_devices[port_id];
3472 
3473 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3474 	return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3475 							       tpid));
3476 }
3477 
3478 int
3479 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3480 {
3481 	struct rte_eth_dev_info dev_info;
3482 	struct rte_eth_dev *dev;
3483 	int ret = 0;
3484 	int mask = 0;
3485 	int cur, org = 0;
3486 	uint64_t orig_offloads;
3487 	uint64_t dev_offloads;
3488 	uint64_t new_offloads;
3489 
3490 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3491 	dev = &rte_eth_devices[port_id];
3492 
3493 	/* save original values in case of failure */
3494 	orig_offloads = dev->data->dev_conf.rxmode.offloads;
3495 	dev_offloads = orig_offloads;
3496 
3497 	/* check which option changed by application */
3498 	cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
3499 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
3500 	if (cur != org) {
3501 		if (cur)
3502 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3503 		else
3504 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3505 		mask |= RTE_ETH_VLAN_STRIP_MASK;
3506 	}
3507 
3508 	cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
3509 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
3510 	if (cur != org) {
3511 		if (cur)
3512 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3513 		else
3514 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3515 		mask |= RTE_ETH_VLAN_FILTER_MASK;
3516 	}
3517 
3518 	cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
3519 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
3520 	if (cur != org) {
3521 		if (cur)
3522 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3523 		else
3524 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3525 		mask |= RTE_ETH_VLAN_EXTEND_MASK;
3526 	}
3527 
3528 	cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
3529 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
3530 	if (cur != org) {
3531 		if (cur)
3532 			dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3533 		else
3534 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3535 		mask |= RTE_ETH_QINQ_STRIP_MASK;
3536 	}
3537 
3538 	/*no change*/
3539 	if (mask == 0)
3540 		return ret;
3541 
3542 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3543 	if (ret != 0)
3544 		return ret;
3545 
3546 	/* Rx VLAN offloading must be within its device capabilities */
3547 	if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3548 		new_offloads = dev_offloads & ~orig_offloads;
3549 		RTE_ETHDEV_LOG(ERR,
3550 			"Ethdev port_id=%u requested new added VLAN offloads "
3551 			"0x%" PRIx64 " must be within Rx offloads capabilities "
3552 			"0x%" PRIx64 " in %s()\n",
3553 			port_id, new_offloads, dev_info.rx_offload_capa,
3554 			__func__);
3555 		return -EINVAL;
3556 	}
3557 
3558 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
3559 	dev->data->dev_conf.rxmode.offloads = dev_offloads;
3560 	ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3561 	if (ret) {
3562 		/* hit an error restore  original values */
3563 		dev->data->dev_conf.rxmode.offloads = orig_offloads;
3564 	}
3565 
3566 	return eth_err(port_id, ret);
3567 }
3568 
3569 int
3570 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3571 {
3572 	struct rte_eth_dev *dev;
3573 	uint64_t *dev_offloads;
3574 	int ret = 0;
3575 
3576 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3577 	dev = &rte_eth_devices[port_id];
3578 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3579 
3580 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
3581 		ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
3582 
3583 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
3584 		ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
3585 
3586 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
3587 		ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
3588 
3589 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
3590 		ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
3591 
3592 	return ret;
3593 }
3594 
3595 int
3596 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3597 {
3598 	struct rte_eth_dev *dev;
3599 
3600 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3601 	dev = &rte_eth_devices[port_id];
3602 
3603 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3604 	return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3605 }
3606 
3607 int
3608 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3609 {
3610 	struct rte_eth_dev *dev;
3611 
3612 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3613 	dev = &rte_eth_devices[port_id];
3614 
3615 	if (fc_conf == NULL) {
3616 		RTE_ETHDEV_LOG(ERR,
3617 			"Cannot get ethdev port %u flow control config to NULL\n",
3618 			port_id);
3619 		return -EINVAL;
3620 	}
3621 
3622 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3623 	memset(fc_conf, 0, sizeof(*fc_conf));
3624 	return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3625 }
3626 
3627 int
3628 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3629 {
3630 	struct rte_eth_dev *dev;
3631 
3632 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3633 	dev = &rte_eth_devices[port_id];
3634 
3635 	if (fc_conf == NULL) {
3636 		RTE_ETHDEV_LOG(ERR,
3637 			"Cannot set ethdev port %u flow control from NULL config\n",
3638 			port_id);
3639 		return -EINVAL;
3640 	}
3641 
3642 	if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3643 		RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3644 		return -EINVAL;
3645 	}
3646 
3647 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3648 	return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3649 }
3650 
3651 int
3652 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3653 				   struct rte_eth_pfc_conf *pfc_conf)
3654 {
3655 	struct rte_eth_dev *dev;
3656 
3657 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3658 	dev = &rte_eth_devices[port_id];
3659 
3660 	if (pfc_conf == NULL) {
3661 		RTE_ETHDEV_LOG(ERR,
3662 			"Cannot set ethdev port %u priority flow control from NULL config\n",
3663 			port_id);
3664 		return -EINVAL;
3665 	}
3666 
3667 	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3668 		RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3669 		return -EINVAL;
3670 	}
3671 
3672 	/* High water, low water validation are device specific */
3673 	if  (*dev->dev_ops->priority_flow_ctrl_set)
3674 		return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3675 					(dev, pfc_conf));
3676 	return -ENOTSUP;
3677 }
3678 
3679 static int
3680 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
3681 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3682 {
3683 	if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) ||
3684 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
3685 		if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) {
3686 			RTE_ETHDEV_LOG(ERR,
3687 				"PFC Tx queue not in range for Rx pause requested:%d configured:%d\n",
3688 				pfc_queue_conf->rx_pause.tx_qid,
3689 				dev_info->nb_tx_queues);
3690 			return -EINVAL;
3691 		}
3692 
3693 		if (pfc_queue_conf->rx_pause.tc >= tc_max) {
3694 			RTE_ETHDEV_LOG(ERR,
3695 				"PFC TC not in range for Rx pause requested:%d max:%d\n",
3696 				pfc_queue_conf->rx_pause.tc, tc_max);
3697 			return -EINVAL;
3698 		}
3699 	}
3700 
3701 	return 0;
3702 }
3703 
3704 static int
3705 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
3706 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3707 {
3708 	if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) ||
3709 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
3710 		if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) {
3711 			RTE_ETHDEV_LOG(ERR,
3712 				"PFC Rx queue not in range for Tx pause requested:%d configured:%d\n",
3713 				pfc_queue_conf->tx_pause.rx_qid,
3714 				dev_info->nb_rx_queues);
3715 			return -EINVAL;
3716 		}
3717 
3718 		if (pfc_queue_conf->tx_pause.tc >= tc_max) {
3719 			RTE_ETHDEV_LOG(ERR,
3720 				"PFC TC not in range for Tx pause requested:%d max:%d\n",
3721 				pfc_queue_conf->tx_pause.tc, tc_max);
3722 			return -EINVAL;
3723 		}
3724 	}
3725 
3726 	return 0;
3727 }
3728 
3729 int
3730 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
3731 		struct rte_eth_pfc_queue_info *pfc_queue_info)
3732 {
3733 	struct rte_eth_dev *dev;
3734 
3735 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3736 	dev = &rte_eth_devices[port_id];
3737 
3738 	if (pfc_queue_info == NULL) {
3739 		RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n",
3740 			port_id);
3741 		return -EINVAL;
3742 	}
3743 
3744 	if (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
3745 		return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
3746 			(dev, pfc_queue_info));
3747 	return -ENOTSUP;
3748 }
3749 
3750 int
3751 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
3752 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3753 {
3754 	struct rte_eth_pfc_queue_info pfc_info;
3755 	struct rte_eth_dev_info dev_info;
3756 	struct rte_eth_dev *dev;
3757 	int ret;
3758 
3759 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3760 	dev = &rte_eth_devices[port_id];
3761 
3762 	if (pfc_queue_conf == NULL) {
3763 		RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n",
3764 			port_id);
3765 		return -EINVAL;
3766 	}
3767 
3768 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3769 	if (ret != 0)
3770 		return ret;
3771 
3772 	ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info);
3773 	if (ret != 0)
3774 		return ret;
3775 
3776 	if (pfc_info.tc_max == 0) {
3777 		RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n",
3778 			port_id);
3779 		return -ENOTSUP;
3780 	}
3781 
3782 	/* Check requested mode supported or not */
3783 	if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE &&
3784 			pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) {
3785 		RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n",
3786 			port_id);
3787 		return -EINVAL;
3788 	}
3789 
3790 	if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE &&
3791 			pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) {
3792 		RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n",
3793 			port_id);
3794 		return -EINVAL;
3795 	}
3796 
3797 	/* Validate Rx pause parameters */
3798 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
3799 			pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) {
3800 		ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max,
3801 				pfc_queue_conf);
3802 		if (ret != 0)
3803 			return ret;
3804 	}
3805 
3806 	/* Validate Tx pause parameters */
3807 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
3808 			pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) {
3809 		ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max,
3810 				pfc_queue_conf);
3811 		if (ret != 0)
3812 			return ret;
3813 	}
3814 
3815 	if (*dev->dev_ops->priority_flow_ctrl_queue_config)
3816 		return eth_err(port_id,
3817 			       (*dev->dev_ops->priority_flow_ctrl_queue_config)(
3818 				dev, pfc_queue_conf));
3819 	return -ENOTSUP;
3820 }
3821 
3822 static int
3823 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3824 			uint16_t reta_size)
3825 {
3826 	uint16_t i, num;
3827 
3828 	num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
3829 	for (i = 0; i < num; i++) {
3830 		if (reta_conf[i].mask)
3831 			return 0;
3832 	}
3833 
3834 	return -EINVAL;
3835 }
3836 
3837 static int
3838 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3839 			 uint16_t reta_size,
3840 			 uint16_t max_rxq)
3841 {
3842 	uint16_t i, idx, shift;
3843 
3844 	if (max_rxq == 0) {
3845 		RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3846 		return -EINVAL;
3847 	}
3848 
3849 	for (i = 0; i < reta_size; i++) {
3850 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
3851 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
3852 		if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
3853 			(reta_conf[idx].reta[shift] >= max_rxq)) {
3854 			RTE_ETHDEV_LOG(ERR,
3855 				"reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3856 				idx, shift,
3857 				reta_conf[idx].reta[shift], max_rxq);
3858 			return -EINVAL;
3859 		}
3860 	}
3861 
3862 	return 0;
3863 }
3864 
3865 int
3866 rte_eth_dev_rss_reta_update(uint16_t port_id,
3867 			    struct rte_eth_rss_reta_entry64 *reta_conf,
3868 			    uint16_t reta_size)
3869 {
3870 	struct rte_eth_dev *dev;
3871 	int ret;
3872 
3873 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3874 	dev = &rte_eth_devices[port_id];
3875 
3876 	if (reta_conf == NULL) {
3877 		RTE_ETHDEV_LOG(ERR,
3878 			"Cannot update ethdev port %u RSS RETA to NULL\n",
3879 			port_id);
3880 		return -EINVAL;
3881 	}
3882 
3883 	if (reta_size == 0) {
3884 		RTE_ETHDEV_LOG(ERR,
3885 			"Cannot update ethdev port %u RSS RETA with zero size\n",
3886 			port_id);
3887 		return -EINVAL;
3888 	}
3889 
3890 	/* Check mask bits */
3891 	ret = eth_check_reta_mask(reta_conf, reta_size);
3892 	if (ret < 0)
3893 		return ret;
3894 
3895 	/* Check entry value */
3896 	ret = eth_check_reta_entry(reta_conf, reta_size,
3897 				dev->data->nb_rx_queues);
3898 	if (ret < 0)
3899 		return ret;
3900 
3901 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3902 	return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3903 							     reta_size));
3904 }
3905 
3906 int
3907 rte_eth_dev_rss_reta_query(uint16_t port_id,
3908 			   struct rte_eth_rss_reta_entry64 *reta_conf,
3909 			   uint16_t reta_size)
3910 {
3911 	struct rte_eth_dev *dev;
3912 	int ret;
3913 
3914 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3915 	dev = &rte_eth_devices[port_id];
3916 
3917 	if (reta_conf == NULL) {
3918 		RTE_ETHDEV_LOG(ERR,
3919 			"Cannot query ethdev port %u RSS RETA from NULL config\n",
3920 			port_id);
3921 		return -EINVAL;
3922 	}
3923 
3924 	/* Check mask bits */
3925 	ret = eth_check_reta_mask(reta_conf, reta_size);
3926 	if (ret < 0)
3927 		return ret;
3928 
3929 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3930 	return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3931 							    reta_size));
3932 }
3933 
3934 int
3935 rte_eth_dev_rss_hash_update(uint16_t port_id,
3936 			    struct rte_eth_rss_conf *rss_conf)
3937 {
3938 	struct rte_eth_dev *dev;
3939 	struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3940 	int ret;
3941 
3942 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3943 	dev = &rte_eth_devices[port_id];
3944 
3945 	if (rss_conf == NULL) {
3946 		RTE_ETHDEV_LOG(ERR,
3947 			"Cannot update ethdev port %u RSS hash from NULL config\n",
3948 			port_id);
3949 		return -EINVAL;
3950 	}
3951 
3952 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3953 	if (ret != 0)
3954 		return ret;
3955 
3956 	rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3957 	if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3958 	    dev_info.flow_type_rss_offloads) {
3959 		RTE_ETHDEV_LOG(ERR,
3960 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3961 			port_id, rss_conf->rss_hf,
3962 			dev_info.flow_type_rss_offloads);
3963 		return -EINVAL;
3964 	}
3965 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3966 	return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3967 								 rss_conf));
3968 }
3969 
3970 int
3971 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3972 			      struct rte_eth_rss_conf *rss_conf)
3973 {
3974 	struct rte_eth_dev *dev;
3975 
3976 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3977 	dev = &rte_eth_devices[port_id];
3978 
3979 	if (rss_conf == NULL) {
3980 		RTE_ETHDEV_LOG(ERR,
3981 			"Cannot get ethdev port %u RSS hash config to NULL\n",
3982 			port_id);
3983 		return -EINVAL;
3984 	}
3985 
3986 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3987 	return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3988 								   rss_conf));
3989 }
3990 
3991 int
3992 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3993 				struct rte_eth_udp_tunnel *udp_tunnel)
3994 {
3995 	struct rte_eth_dev *dev;
3996 
3997 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3998 	dev = &rte_eth_devices[port_id];
3999 
4000 	if (udp_tunnel == NULL) {
4001 		RTE_ETHDEV_LOG(ERR,
4002 			"Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4003 			port_id);
4004 		return -EINVAL;
4005 	}
4006 
4007 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4008 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4009 		return -EINVAL;
4010 	}
4011 
4012 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
4013 	return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4014 								udp_tunnel));
4015 }
4016 
4017 int
4018 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4019 				   struct rte_eth_udp_tunnel *udp_tunnel)
4020 {
4021 	struct rte_eth_dev *dev;
4022 
4023 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4024 	dev = &rte_eth_devices[port_id];
4025 
4026 	if (udp_tunnel == NULL) {
4027 		RTE_ETHDEV_LOG(ERR,
4028 			"Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4029 			port_id);
4030 		return -EINVAL;
4031 	}
4032 
4033 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4034 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4035 		return -EINVAL;
4036 	}
4037 
4038 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
4039 	return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4040 								udp_tunnel));
4041 }
4042 
4043 int
4044 rte_eth_led_on(uint16_t port_id)
4045 {
4046 	struct rte_eth_dev *dev;
4047 
4048 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4049 	dev = &rte_eth_devices[port_id];
4050 
4051 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
4052 	return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4053 }
4054 
4055 int
4056 rte_eth_led_off(uint16_t port_id)
4057 {
4058 	struct rte_eth_dev *dev;
4059 
4060 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4061 	dev = &rte_eth_devices[port_id];
4062 
4063 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
4064 	return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4065 }
4066 
4067 int
4068 rte_eth_fec_get_capability(uint16_t port_id,
4069 			   struct rte_eth_fec_capa *speed_fec_capa,
4070 			   unsigned int num)
4071 {
4072 	struct rte_eth_dev *dev;
4073 	int ret;
4074 
4075 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4076 	dev = &rte_eth_devices[port_id];
4077 
4078 	if (speed_fec_capa == NULL && num > 0) {
4079 		RTE_ETHDEV_LOG(ERR,
4080 			"Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4081 			port_id);
4082 		return -EINVAL;
4083 	}
4084 
4085 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get_capability, -ENOTSUP);
4086 	ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4087 
4088 	return ret;
4089 }
4090 
4091 int
4092 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4093 {
4094 	struct rte_eth_dev *dev;
4095 
4096 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4097 	dev = &rte_eth_devices[port_id];
4098 
4099 	if (fec_capa == NULL) {
4100 		RTE_ETHDEV_LOG(ERR,
4101 			"Cannot get ethdev port %u current FEC mode to NULL\n",
4102 			port_id);
4103 		return -EINVAL;
4104 	}
4105 
4106 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_get, -ENOTSUP);
4107 	return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4108 }
4109 
4110 int
4111 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4112 {
4113 	struct rte_eth_dev *dev;
4114 
4115 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4116 	dev = &rte_eth_devices[port_id];
4117 
4118 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fec_set, -ENOTSUP);
4119 	return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4120 }
4121 
4122 /*
4123  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4124  * an empty spot.
4125  */
4126 static int
4127 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4128 {
4129 	struct rte_eth_dev_info dev_info;
4130 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4131 	unsigned i;
4132 	int ret;
4133 
4134 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4135 	if (ret != 0)
4136 		return -1;
4137 
4138 	for (i = 0; i < dev_info.max_mac_addrs; i++)
4139 		if (memcmp(addr, &dev->data->mac_addrs[i],
4140 				RTE_ETHER_ADDR_LEN) == 0)
4141 			return i;
4142 
4143 	return -1;
4144 }
4145 
4146 static const struct rte_ether_addr null_mac_addr;
4147 
4148 int
4149 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4150 			uint32_t pool)
4151 {
4152 	struct rte_eth_dev *dev;
4153 	int index;
4154 	uint64_t pool_mask;
4155 	int ret;
4156 
4157 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4158 	dev = &rte_eth_devices[port_id];
4159 
4160 	if (addr == NULL) {
4161 		RTE_ETHDEV_LOG(ERR,
4162 			"Cannot add ethdev port %u MAC address from NULL address\n",
4163 			port_id);
4164 		return -EINVAL;
4165 	}
4166 
4167 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
4168 
4169 	if (rte_is_zero_ether_addr(addr)) {
4170 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4171 			port_id);
4172 		return -EINVAL;
4173 	}
4174 	if (pool >= RTE_ETH_64_POOLS) {
4175 		RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1);
4176 		return -EINVAL;
4177 	}
4178 
4179 	index = eth_dev_get_mac_addr_index(port_id, addr);
4180 	if (index < 0) {
4181 		index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4182 		if (index < 0) {
4183 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4184 				port_id);
4185 			return -ENOSPC;
4186 		}
4187 	} else {
4188 		pool_mask = dev->data->mac_pool_sel[index];
4189 
4190 		/* Check if both MAC address and pool is already there, and do nothing */
4191 		if (pool_mask & RTE_BIT64(pool))
4192 			return 0;
4193 	}
4194 
4195 	/* Update NIC */
4196 	ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4197 
4198 	if (ret == 0) {
4199 		/* Update address in NIC data structure */
4200 		rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4201 
4202 		/* Update pool bitmap in NIC data structure */
4203 		dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
4204 	}
4205 
4206 	return eth_err(port_id, ret);
4207 }
4208 
4209 int
4210 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4211 {
4212 	struct rte_eth_dev *dev;
4213 	int index;
4214 
4215 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4216 	dev = &rte_eth_devices[port_id];
4217 
4218 	if (addr == NULL) {
4219 		RTE_ETHDEV_LOG(ERR,
4220 			"Cannot remove ethdev port %u MAC address from NULL address\n",
4221 			port_id);
4222 		return -EINVAL;
4223 	}
4224 
4225 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
4226 
4227 	index = eth_dev_get_mac_addr_index(port_id, addr);
4228 	if (index == 0) {
4229 		RTE_ETHDEV_LOG(ERR,
4230 			"Port %u: Cannot remove default MAC address\n",
4231 			port_id);
4232 		return -EADDRINUSE;
4233 	} else if (index < 0)
4234 		return 0;  /* Do nothing if address wasn't found */
4235 
4236 	/* Update NIC */
4237 	(*dev->dev_ops->mac_addr_remove)(dev, index);
4238 
4239 	/* Update address in NIC data structure */
4240 	rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4241 
4242 	/* reset pool bitmap */
4243 	dev->data->mac_pool_sel[index] = 0;
4244 
4245 	return 0;
4246 }
4247 
4248 int
4249 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4250 {
4251 	struct rte_eth_dev *dev;
4252 	int ret;
4253 
4254 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4255 	dev = &rte_eth_devices[port_id];
4256 
4257 	if (addr == NULL) {
4258 		RTE_ETHDEV_LOG(ERR,
4259 			"Cannot set ethdev port %u default MAC address from NULL address\n",
4260 			port_id);
4261 		return -EINVAL;
4262 	}
4263 
4264 	if (!rte_is_valid_assigned_ether_addr(addr))
4265 		return -EINVAL;
4266 
4267 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
4268 
4269 	ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4270 	if (ret < 0)
4271 		return ret;
4272 
4273 	/* Update default address in NIC data structure */
4274 	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4275 
4276 	return 0;
4277 }
4278 
4279 
4280 /*
4281  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4282  * an empty spot.
4283  */
4284 static int
4285 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4286 		const struct rte_ether_addr *addr)
4287 {
4288 	struct rte_eth_dev_info dev_info;
4289 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4290 	unsigned i;
4291 	int ret;
4292 
4293 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4294 	if (ret != 0)
4295 		return -1;
4296 
4297 	if (!dev->data->hash_mac_addrs)
4298 		return -1;
4299 
4300 	for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4301 		if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4302 			RTE_ETHER_ADDR_LEN) == 0)
4303 			return i;
4304 
4305 	return -1;
4306 }
4307 
4308 int
4309 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4310 				uint8_t on)
4311 {
4312 	int index;
4313 	int ret;
4314 	struct rte_eth_dev *dev;
4315 
4316 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4317 	dev = &rte_eth_devices[port_id];
4318 
4319 	if (addr == NULL) {
4320 		RTE_ETHDEV_LOG(ERR,
4321 			"Cannot set ethdev port %u unicast hash table from NULL address\n",
4322 			port_id);
4323 		return -EINVAL;
4324 	}
4325 
4326 	if (rte_is_zero_ether_addr(addr)) {
4327 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4328 			port_id);
4329 		return -EINVAL;
4330 	}
4331 
4332 	index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4333 	/* Check if it's already there, and do nothing */
4334 	if ((index >= 0) && on)
4335 		return 0;
4336 
4337 	if (index < 0) {
4338 		if (!on) {
4339 			RTE_ETHDEV_LOG(ERR,
4340 				"Port %u: the MAC address was not set in UTA\n",
4341 				port_id);
4342 			return -EINVAL;
4343 		}
4344 
4345 		index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4346 		if (index < 0) {
4347 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4348 				port_id);
4349 			return -ENOSPC;
4350 		}
4351 	}
4352 
4353 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
4354 	ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4355 	if (ret == 0) {
4356 		/* Update address in NIC data structure */
4357 		if (on)
4358 			rte_ether_addr_copy(addr,
4359 					&dev->data->hash_mac_addrs[index]);
4360 		else
4361 			rte_ether_addr_copy(&null_mac_addr,
4362 					&dev->data->hash_mac_addrs[index]);
4363 	}
4364 
4365 	return eth_err(port_id, ret);
4366 }
4367 
4368 int
4369 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4370 {
4371 	struct rte_eth_dev *dev;
4372 
4373 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4374 	dev = &rte_eth_devices[port_id];
4375 
4376 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
4377 	return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4378 								       on));
4379 }
4380 
4381 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4382 					uint16_t tx_rate)
4383 {
4384 	struct rte_eth_dev *dev;
4385 	struct rte_eth_dev_info dev_info;
4386 	struct rte_eth_link link;
4387 	int ret;
4388 
4389 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4390 	dev = &rte_eth_devices[port_id];
4391 
4392 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4393 	if (ret != 0)
4394 		return ret;
4395 
4396 	link = dev->data->dev_link;
4397 
4398 	if (queue_idx > dev_info.max_tx_queues) {
4399 		RTE_ETHDEV_LOG(ERR,
4400 			"Set queue rate limit:port %u: invalid queue ID=%u\n",
4401 			port_id, queue_idx);
4402 		return -EINVAL;
4403 	}
4404 
4405 	if (tx_rate > link.link_speed) {
4406 		RTE_ETHDEV_LOG(ERR,
4407 			"Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4408 			tx_rate, link.link_speed);
4409 		return -EINVAL;
4410 	}
4411 
4412 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
4413 	return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4414 							queue_idx, tx_rate));
4415 }
4416 
4417 RTE_INIT(eth_dev_init_fp_ops)
4418 {
4419 	uint32_t i;
4420 
4421 	for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++)
4422 		eth_dev_fp_ops_reset(rte_eth_fp_ops + i);
4423 }
4424 
4425 RTE_INIT(eth_dev_init_cb_lists)
4426 {
4427 	uint16_t i;
4428 
4429 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4430 		TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4431 }
4432 
4433 int
4434 rte_eth_dev_callback_register(uint16_t port_id,
4435 			enum rte_eth_event_type event,
4436 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4437 {
4438 	struct rte_eth_dev *dev;
4439 	struct rte_eth_dev_callback *user_cb;
4440 	uint16_t next_port;
4441 	uint16_t last_port;
4442 
4443 	if (cb_fn == NULL) {
4444 		RTE_ETHDEV_LOG(ERR,
4445 			"Cannot register ethdev port %u callback from NULL\n",
4446 			port_id);
4447 		return -EINVAL;
4448 	}
4449 
4450 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4451 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4452 		return -EINVAL;
4453 	}
4454 
4455 	if (port_id == RTE_ETH_ALL) {
4456 		next_port = 0;
4457 		last_port = RTE_MAX_ETHPORTS - 1;
4458 	} else {
4459 		next_port = last_port = port_id;
4460 	}
4461 
4462 	rte_spinlock_lock(&eth_dev_cb_lock);
4463 
4464 	do {
4465 		dev = &rte_eth_devices[next_port];
4466 
4467 		TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4468 			if (user_cb->cb_fn == cb_fn &&
4469 				user_cb->cb_arg == cb_arg &&
4470 				user_cb->event == event) {
4471 				break;
4472 			}
4473 		}
4474 
4475 		/* create a new callback. */
4476 		if (user_cb == NULL) {
4477 			user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4478 				sizeof(struct rte_eth_dev_callback), 0);
4479 			if (user_cb != NULL) {
4480 				user_cb->cb_fn = cb_fn;
4481 				user_cb->cb_arg = cb_arg;
4482 				user_cb->event = event;
4483 				TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4484 						  user_cb, next);
4485 			} else {
4486 				rte_spinlock_unlock(&eth_dev_cb_lock);
4487 				rte_eth_dev_callback_unregister(port_id, event,
4488 								cb_fn, cb_arg);
4489 				return -ENOMEM;
4490 			}
4491 
4492 		}
4493 	} while (++next_port <= last_port);
4494 
4495 	rte_spinlock_unlock(&eth_dev_cb_lock);
4496 	return 0;
4497 }
4498 
4499 int
4500 rte_eth_dev_callback_unregister(uint16_t port_id,
4501 			enum rte_eth_event_type event,
4502 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4503 {
4504 	int ret;
4505 	struct rte_eth_dev *dev;
4506 	struct rte_eth_dev_callback *cb, *next;
4507 	uint16_t next_port;
4508 	uint16_t last_port;
4509 
4510 	if (cb_fn == NULL) {
4511 		RTE_ETHDEV_LOG(ERR,
4512 			"Cannot unregister ethdev port %u callback from NULL\n",
4513 			port_id);
4514 		return -EINVAL;
4515 	}
4516 
4517 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4518 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4519 		return -EINVAL;
4520 	}
4521 
4522 	if (port_id == RTE_ETH_ALL) {
4523 		next_port = 0;
4524 		last_port = RTE_MAX_ETHPORTS - 1;
4525 	} else {
4526 		next_port = last_port = port_id;
4527 	}
4528 
4529 	rte_spinlock_lock(&eth_dev_cb_lock);
4530 
4531 	do {
4532 		dev = &rte_eth_devices[next_port];
4533 		ret = 0;
4534 		for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4535 		     cb = next) {
4536 
4537 			next = TAILQ_NEXT(cb, next);
4538 
4539 			if (cb->cb_fn != cb_fn || cb->event != event ||
4540 			    (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4541 				continue;
4542 
4543 			/*
4544 			 * if this callback is not executing right now,
4545 			 * then remove it.
4546 			 */
4547 			if (cb->active == 0) {
4548 				TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4549 				rte_free(cb);
4550 			} else {
4551 				ret = -EAGAIN;
4552 			}
4553 		}
4554 	} while (++next_port <= last_port);
4555 
4556 	rte_spinlock_unlock(&eth_dev_cb_lock);
4557 	return ret;
4558 }
4559 
4560 int
4561 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4562 {
4563 	uint32_t vec;
4564 	struct rte_eth_dev *dev;
4565 	struct rte_intr_handle *intr_handle;
4566 	uint16_t qid;
4567 	int rc;
4568 
4569 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4570 	dev = &rte_eth_devices[port_id];
4571 
4572 	if (!dev->intr_handle) {
4573 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4574 		return -ENOTSUP;
4575 	}
4576 
4577 	intr_handle = dev->intr_handle;
4578 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4579 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4580 		return -EPERM;
4581 	}
4582 
4583 	for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4584 		vec = rte_intr_vec_list_index_get(intr_handle, qid);
4585 		rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4586 		if (rc && rc != -EEXIST) {
4587 			RTE_ETHDEV_LOG(ERR,
4588 				"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
4589 				port_id, qid, op, epfd, vec);
4590 		}
4591 	}
4592 
4593 	return 0;
4594 }
4595 
4596 int
4597 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4598 {
4599 	struct rte_intr_handle *intr_handle;
4600 	struct rte_eth_dev *dev;
4601 	unsigned int efd_idx;
4602 	uint32_t vec;
4603 	int fd;
4604 
4605 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4606 	dev = &rte_eth_devices[port_id];
4607 
4608 	if (queue_id >= dev->data->nb_rx_queues) {
4609 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4610 		return -1;
4611 	}
4612 
4613 	if (!dev->intr_handle) {
4614 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4615 		return -1;
4616 	}
4617 
4618 	intr_handle = dev->intr_handle;
4619 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4620 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4621 		return -1;
4622 	}
4623 
4624 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
4625 	efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4626 		(vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4627 	fd = rte_intr_efds_index_get(intr_handle, efd_idx);
4628 
4629 	return fd;
4630 }
4631 
4632 int
4633 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4634 			  int epfd, int op, void *data)
4635 {
4636 	uint32_t vec;
4637 	struct rte_eth_dev *dev;
4638 	struct rte_intr_handle *intr_handle;
4639 	int rc;
4640 
4641 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4642 	dev = &rte_eth_devices[port_id];
4643 
4644 	if (queue_id >= dev->data->nb_rx_queues) {
4645 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4646 		return -EINVAL;
4647 	}
4648 
4649 	if (!dev->intr_handle) {
4650 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4651 		return -ENOTSUP;
4652 	}
4653 
4654 	intr_handle = dev->intr_handle;
4655 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4656 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4657 		return -EPERM;
4658 	}
4659 
4660 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
4661 	rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4662 	if (rc && rc != -EEXIST) {
4663 		RTE_ETHDEV_LOG(ERR,
4664 			"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
4665 			port_id, queue_id, op, epfd, vec);
4666 		return rc;
4667 	}
4668 
4669 	return 0;
4670 }
4671 
4672 int
4673 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4674 			   uint16_t queue_id)
4675 {
4676 	struct rte_eth_dev *dev;
4677 	int ret;
4678 
4679 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4680 	dev = &rte_eth_devices[port_id];
4681 
4682 	ret = eth_dev_validate_rx_queue(dev, queue_id);
4683 	if (ret != 0)
4684 		return ret;
4685 
4686 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4687 	return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
4688 }
4689 
4690 int
4691 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4692 			    uint16_t queue_id)
4693 {
4694 	struct rte_eth_dev *dev;
4695 	int ret;
4696 
4697 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4698 	dev = &rte_eth_devices[port_id];
4699 
4700 	ret = eth_dev_validate_rx_queue(dev, queue_id);
4701 	if (ret != 0)
4702 		return ret;
4703 
4704 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4705 	return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
4706 }
4707 
4708 
4709 const struct rte_eth_rxtx_callback *
4710 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4711 		rte_rx_callback_fn fn, void *user_param)
4712 {
4713 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4714 	rte_errno = ENOTSUP;
4715 	return NULL;
4716 #endif
4717 	struct rte_eth_dev *dev;
4718 
4719 	/* check input parameters */
4720 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4721 		    queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4722 		rte_errno = EINVAL;
4723 		return NULL;
4724 	}
4725 	dev = &rte_eth_devices[port_id];
4726 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4727 		rte_errno = EINVAL;
4728 		return NULL;
4729 	}
4730 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4731 
4732 	if (cb == NULL) {
4733 		rte_errno = ENOMEM;
4734 		return NULL;
4735 	}
4736 
4737 	cb->fn.rx = fn;
4738 	cb->param = user_param;
4739 
4740 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
4741 	/* Add the callbacks in fifo order. */
4742 	struct rte_eth_rxtx_callback *tail =
4743 		rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4744 
4745 	if (!tail) {
4746 		/* Stores to cb->fn and cb->param should complete before
4747 		 * cb is visible to data plane.
4748 		 */
4749 		__atomic_store_n(
4750 			&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4751 			cb, __ATOMIC_RELEASE);
4752 
4753 	} else {
4754 		while (tail->next)
4755 			tail = tail->next;
4756 		/* Stores to cb->fn and cb->param should complete before
4757 		 * cb is visible to data plane.
4758 		 */
4759 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4760 	}
4761 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4762 
4763 	return cb;
4764 }
4765 
4766 const struct rte_eth_rxtx_callback *
4767 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4768 		rte_rx_callback_fn fn, void *user_param)
4769 {
4770 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4771 	rte_errno = ENOTSUP;
4772 	return NULL;
4773 #endif
4774 	/* check input parameters */
4775 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4776 		queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4777 		rte_errno = EINVAL;
4778 		return NULL;
4779 	}
4780 
4781 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4782 
4783 	if (cb == NULL) {
4784 		rte_errno = ENOMEM;
4785 		return NULL;
4786 	}
4787 
4788 	cb->fn.rx = fn;
4789 	cb->param = user_param;
4790 
4791 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
4792 	/* Add the callbacks at first position */
4793 	cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4794 	/* Stores to cb->fn, cb->param and cb->next should complete before
4795 	 * cb is visible to data plane threads.
4796 	 */
4797 	__atomic_store_n(
4798 		&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4799 		cb, __ATOMIC_RELEASE);
4800 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4801 
4802 	return cb;
4803 }
4804 
4805 const struct rte_eth_rxtx_callback *
4806 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4807 		rte_tx_callback_fn fn, void *user_param)
4808 {
4809 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4810 	rte_errno = ENOTSUP;
4811 	return NULL;
4812 #endif
4813 	struct rte_eth_dev *dev;
4814 
4815 	/* check input parameters */
4816 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4817 		    queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4818 		rte_errno = EINVAL;
4819 		return NULL;
4820 	}
4821 
4822 	dev = &rte_eth_devices[port_id];
4823 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4824 		rte_errno = EINVAL;
4825 		return NULL;
4826 	}
4827 
4828 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4829 
4830 	if (cb == NULL) {
4831 		rte_errno = ENOMEM;
4832 		return NULL;
4833 	}
4834 
4835 	cb->fn.tx = fn;
4836 	cb->param = user_param;
4837 
4838 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
4839 	/* Add the callbacks in fifo order. */
4840 	struct rte_eth_rxtx_callback *tail =
4841 		rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4842 
4843 	if (!tail) {
4844 		/* Stores to cb->fn and cb->param should complete before
4845 		 * cb is visible to data plane.
4846 		 */
4847 		__atomic_store_n(
4848 			&rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
4849 			cb, __ATOMIC_RELEASE);
4850 
4851 	} else {
4852 		while (tail->next)
4853 			tail = tail->next;
4854 		/* Stores to cb->fn and cb->param should complete before
4855 		 * cb is visible to data plane.
4856 		 */
4857 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4858 	}
4859 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
4860 
4861 	return cb;
4862 }
4863 
4864 int
4865 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4866 		const struct rte_eth_rxtx_callback *user_cb)
4867 {
4868 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4869 	return -ENOTSUP;
4870 #endif
4871 	/* Check input parameters. */
4872 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4873 	if (user_cb == NULL ||
4874 			queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4875 		return -EINVAL;
4876 
4877 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4878 	struct rte_eth_rxtx_callback *cb;
4879 	struct rte_eth_rxtx_callback **prev_cb;
4880 	int ret = -EINVAL;
4881 
4882 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
4883 	prev_cb = &dev->post_rx_burst_cbs[queue_id];
4884 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
4885 		cb = *prev_cb;
4886 		if (cb == user_cb) {
4887 			/* Remove the user cb from the callback list. */
4888 			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4889 			ret = 0;
4890 			break;
4891 		}
4892 	}
4893 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4894 
4895 	return ret;
4896 }
4897 
4898 int
4899 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4900 		const struct rte_eth_rxtx_callback *user_cb)
4901 {
4902 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4903 	return -ENOTSUP;
4904 #endif
4905 	/* Check input parameters. */
4906 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4907 	if (user_cb == NULL ||
4908 			queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4909 		return -EINVAL;
4910 
4911 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4912 	int ret = -EINVAL;
4913 	struct rte_eth_rxtx_callback *cb;
4914 	struct rte_eth_rxtx_callback **prev_cb;
4915 
4916 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
4917 	prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4918 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
4919 		cb = *prev_cb;
4920 		if (cb == user_cb) {
4921 			/* Remove the user cb from the callback list. */
4922 			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
4923 			ret = 0;
4924 			break;
4925 		}
4926 	}
4927 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
4928 
4929 	return ret;
4930 }
4931 
4932 int
4933 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4934 	struct rte_eth_rxq_info *qinfo)
4935 {
4936 	struct rte_eth_dev *dev;
4937 
4938 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4939 	dev = &rte_eth_devices[port_id];
4940 
4941 	if (queue_id >= dev->data->nb_rx_queues) {
4942 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4943 		return -EINVAL;
4944 	}
4945 
4946 	if (qinfo == NULL) {
4947 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
4948 			port_id, queue_id);
4949 		return -EINVAL;
4950 	}
4951 
4952 	if (dev->data->rx_queues == NULL ||
4953 			dev->data->rx_queues[queue_id] == NULL) {
4954 		RTE_ETHDEV_LOG(ERR,
4955 			       "Rx queue %"PRIu16" of device with port_id=%"
4956 			       PRIu16" has not been setup\n",
4957 			       queue_id, port_id);
4958 		return -EINVAL;
4959 	}
4960 
4961 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4962 		RTE_ETHDEV_LOG(INFO,
4963 			"Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4964 			queue_id, port_id);
4965 		return -EINVAL;
4966 	}
4967 
4968 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4969 
4970 	memset(qinfo, 0, sizeof(*qinfo));
4971 	dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4972 	qinfo->queue_state = dev->data->rx_queue_state[queue_id];
4973 
4974 	return 0;
4975 }
4976 
4977 int
4978 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4979 	struct rte_eth_txq_info *qinfo)
4980 {
4981 	struct rte_eth_dev *dev;
4982 
4983 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4984 	dev = &rte_eth_devices[port_id];
4985 
4986 	if (queue_id >= dev->data->nb_tx_queues) {
4987 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
4988 		return -EINVAL;
4989 	}
4990 
4991 	if (qinfo == NULL) {
4992 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
4993 			port_id, queue_id);
4994 		return -EINVAL;
4995 	}
4996 
4997 	if (dev->data->tx_queues == NULL ||
4998 			dev->data->tx_queues[queue_id] == NULL) {
4999 		RTE_ETHDEV_LOG(ERR,
5000 			       "Tx queue %"PRIu16" of device with port_id=%"
5001 			       PRIu16" has not been setup\n",
5002 			       queue_id, port_id);
5003 		return -EINVAL;
5004 	}
5005 
5006 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5007 		RTE_ETHDEV_LOG(INFO,
5008 			"Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5009 			queue_id, port_id);
5010 		return -EINVAL;
5011 	}
5012 
5013 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
5014 
5015 	memset(qinfo, 0, sizeof(*qinfo));
5016 	dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5017 	qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5018 
5019 	return 0;
5020 }
5021 
5022 int
5023 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5024 			  struct rte_eth_burst_mode *mode)
5025 {
5026 	struct rte_eth_dev *dev;
5027 
5028 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5029 	dev = &rte_eth_devices[port_id];
5030 
5031 	if (queue_id >= dev->data->nb_rx_queues) {
5032 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5033 		return -EINVAL;
5034 	}
5035 
5036 	if (mode == NULL) {
5037 		RTE_ETHDEV_LOG(ERR,
5038 			"Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5039 			port_id, queue_id);
5040 		return -EINVAL;
5041 	}
5042 
5043 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
5044 	memset(mode, 0, sizeof(*mode));
5045 	return eth_err(port_id,
5046 		       dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5047 }
5048 
5049 int
5050 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5051 			  struct rte_eth_burst_mode *mode)
5052 {
5053 	struct rte_eth_dev *dev;
5054 
5055 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5056 	dev = &rte_eth_devices[port_id];
5057 
5058 	if (queue_id >= dev->data->nb_tx_queues) {
5059 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5060 		return -EINVAL;
5061 	}
5062 
5063 	if (mode == NULL) {
5064 		RTE_ETHDEV_LOG(ERR,
5065 			"Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
5066 			port_id, queue_id);
5067 		return -EINVAL;
5068 	}
5069 
5070 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
5071 	memset(mode, 0, sizeof(*mode));
5072 	return eth_err(port_id,
5073 		       dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5074 }
5075 
5076 int
5077 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5078 		struct rte_power_monitor_cond *pmc)
5079 {
5080 	struct rte_eth_dev *dev;
5081 
5082 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5083 	dev = &rte_eth_devices[port_id];
5084 
5085 	if (queue_id >= dev->data->nb_rx_queues) {
5086 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5087 		return -EINVAL;
5088 	}
5089 
5090 	if (pmc == NULL) {
5091 		RTE_ETHDEV_LOG(ERR,
5092 			"Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
5093 			port_id, queue_id);
5094 		return -EINVAL;
5095 	}
5096 
5097 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_monitor_addr, -ENOTSUP);
5098 	return eth_err(port_id,
5099 		dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
5100 }
5101 
5102 int
5103 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5104 			     struct rte_ether_addr *mc_addr_set,
5105 			     uint32_t nb_mc_addr)
5106 {
5107 	struct rte_eth_dev *dev;
5108 
5109 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5110 	dev = &rte_eth_devices[port_id];
5111 
5112 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
5113 	return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5114 						mc_addr_set, nb_mc_addr));
5115 }
5116 
5117 int
5118 rte_eth_timesync_enable(uint16_t port_id)
5119 {
5120 	struct rte_eth_dev *dev;
5121 
5122 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5123 	dev = &rte_eth_devices[port_id];
5124 
5125 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
5126 	return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5127 }
5128 
5129 int
5130 rte_eth_timesync_disable(uint16_t port_id)
5131 {
5132 	struct rte_eth_dev *dev;
5133 
5134 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5135 	dev = &rte_eth_devices[port_id];
5136 
5137 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
5138 	return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5139 }
5140 
5141 int
5142 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5143 				   uint32_t flags)
5144 {
5145 	struct rte_eth_dev *dev;
5146 
5147 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5148 	dev = &rte_eth_devices[port_id];
5149 
5150 	if (timestamp == NULL) {
5151 		RTE_ETHDEV_LOG(ERR,
5152 			"Cannot read ethdev port %u Rx timestamp to NULL\n",
5153 			port_id);
5154 		return -EINVAL;
5155 	}
5156 
5157 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
5158 	return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5159 				(dev, timestamp, flags));
5160 }
5161 
5162 int
5163 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5164 				   struct timespec *timestamp)
5165 {
5166 	struct rte_eth_dev *dev;
5167 
5168 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5169 	dev = &rte_eth_devices[port_id];
5170 
5171 	if (timestamp == NULL) {
5172 		RTE_ETHDEV_LOG(ERR,
5173 			"Cannot read ethdev port %u Tx timestamp to NULL\n",
5174 			port_id);
5175 		return -EINVAL;
5176 	}
5177 
5178 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
5179 	return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5180 				(dev, timestamp));
5181 }
5182 
5183 int
5184 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5185 {
5186 	struct rte_eth_dev *dev;
5187 
5188 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5189 	dev = &rte_eth_devices[port_id];
5190 
5191 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
5192 	return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
5193 }
5194 
5195 int
5196 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5197 {
5198 	struct rte_eth_dev *dev;
5199 
5200 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5201 	dev = &rte_eth_devices[port_id];
5202 
5203 	if (timestamp == NULL) {
5204 		RTE_ETHDEV_LOG(ERR,
5205 			"Cannot read ethdev port %u timesync time to NULL\n",
5206 			port_id);
5207 		return -EINVAL;
5208 	}
5209 
5210 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
5211 	return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5212 								timestamp));
5213 }
5214 
5215 int
5216 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5217 {
5218 	struct rte_eth_dev *dev;
5219 
5220 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5221 	dev = &rte_eth_devices[port_id];
5222 
5223 	if (timestamp == NULL) {
5224 		RTE_ETHDEV_LOG(ERR,
5225 			"Cannot write ethdev port %u timesync from NULL time\n",
5226 			port_id);
5227 		return -EINVAL;
5228 	}
5229 
5230 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
5231 	return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5232 								timestamp));
5233 }
5234 
5235 int
5236 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5237 {
5238 	struct rte_eth_dev *dev;
5239 
5240 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5241 	dev = &rte_eth_devices[port_id];
5242 
5243 	if (clock == NULL) {
5244 		RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
5245 			port_id);
5246 		return -EINVAL;
5247 	}
5248 
5249 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
5250 	return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5251 }
5252 
5253 int
5254 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5255 {
5256 	struct rte_eth_dev *dev;
5257 
5258 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5259 	dev = &rte_eth_devices[port_id];
5260 
5261 	if (info == NULL) {
5262 		RTE_ETHDEV_LOG(ERR,
5263 			"Cannot get ethdev port %u register info to NULL\n",
5264 			port_id);
5265 		return -EINVAL;
5266 	}
5267 
5268 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
5269 	return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5270 }
5271 
5272 int
5273 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5274 {
5275 	struct rte_eth_dev *dev;
5276 
5277 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5278 	dev = &rte_eth_devices[port_id];
5279 
5280 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
5281 	return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5282 }
5283 
5284 int
5285 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5286 {
5287 	struct rte_eth_dev *dev;
5288 
5289 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5290 	dev = &rte_eth_devices[port_id];
5291 
5292 	if (info == NULL) {
5293 		RTE_ETHDEV_LOG(ERR,
5294 			"Cannot get ethdev port %u EEPROM info to NULL\n",
5295 			port_id);
5296 		return -EINVAL;
5297 	}
5298 
5299 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
5300 	return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5301 }
5302 
5303 int
5304 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5305 {
5306 	struct rte_eth_dev *dev;
5307 
5308 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5309 	dev = &rte_eth_devices[port_id];
5310 
5311 	if (info == NULL) {
5312 		RTE_ETHDEV_LOG(ERR,
5313 			"Cannot set ethdev port %u EEPROM from NULL info\n",
5314 			port_id);
5315 		return -EINVAL;
5316 	}
5317 
5318 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
5319 	return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5320 }
5321 
5322 int
5323 rte_eth_dev_get_module_info(uint16_t port_id,
5324 			    struct rte_eth_dev_module_info *modinfo)
5325 {
5326 	struct rte_eth_dev *dev;
5327 
5328 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5329 	dev = &rte_eth_devices[port_id];
5330 
5331 	if (modinfo == NULL) {
5332 		RTE_ETHDEV_LOG(ERR,
5333 			"Cannot get ethdev port %u EEPROM module info to NULL\n",
5334 			port_id);
5335 		return -EINVAL;
5336 	}
5337 
5338 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
5339 	return (*dev->dev_ops->get_module_info)(dev, modinfo);
5340 }
5341 
5342 int
5343 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5344 			      struct rte_dev_eeprom_info *info)
5345 {
5346 	struct rte_eth_dev *dev;
5347 
5348 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5349 	dev = &rte_eth_devices[port_id];
5350 
5351 	if (info == NULL) {
5352 		RTE_ETHDEV_LOG(ERR,
5353 			"Cannot get ethdev port %u module EEPROM info to NULL\n",
5354 			port_id);
5355 		return -EINVAL;
5356 	}
5357 
5358 	if (info->data == NULL) {
5359 		RTE_ETHDEV_LOG(ERR,
5360 			"Cannot get ethdev port %u module EEPROM data to NULL\n",
5361 			port_id);
5362 		return -EINVAL;
5363 	}
5364 
5365 	if (info->length == 0) {
5366 		RTE_ETHDEV_LOG(ERR,
5367 			"Cannot get ethdev port %u module EEPROM to data with zero size\n",
5368 			port_id);
5369 		return -EINVAL;
5370 	}
5371 
5372 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
5373 	return (*dev->dev_ops->get_module_eeprom)(dev, info);
5374 }
5375 
5376 int
5377 rte_eth_dev_get_dcb_info(uint16_t port_id,
5378 			     struct rte_eth_dcb_info *dcb_info)
5379 {
5380 	struct rte_eth_dev *dev;
5381 
5382 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5383 	dev = &rte_eth_devices[port_id];
5384 
5385 	if (dcb_info == NULL) {
5386 		RTE_ETHDEV_LOG(ERR,
5387 			"Cannot get ethdev port %u DCB info to NULL\n",
5388 			port_id);
5389 		return -EINVAL;
5390 	}
5391 
5392 	memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5393 
5394 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
5395 	return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5396 }
5397 
5398 static void
5399 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5400 		const struct rte_eth_desc_lim *desc_lim)
5401 {
5402 	if (desc_lim->nb_align != 0)
5403 		*nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5404 
5405 	if (desc_lim->nb_max != 0)
5406 		*nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5407 
5408 	*nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5409 }
5410 
5411 int
5412 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5413 				 uint16_t *nb_rx_desc,
5414 				 uint16_t *nb_tx_desc)
5415 {
5416 	struct rte_eth_dev_info dev_info;
5417 	int ret;
5418 
5419 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5420 
5421 	ret = rte_eth_dev_info_get(port_id, &dev_info);
5422 	if (ret != 0)
5423 		return ret;
5424 
5425 	if (nb_rx_desc != NULL)
5426 		eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5427 
5428 	if (nb_tx_desc != NULL)
5429 		eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5430 
5431 	return 0;
5432 }
5433 
5434 int
5435 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5436 				   struct rte_eth_hairpin_cap *cap)
5437 {
5438 	struct rte_eth_dev *dev;
5439 
5440 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5441 	dev = &rte_eth_devices[port_id];
5442 
5443 	if (cap == NULL) {
5444 		RTE_ETHDEV_LOG(ERR,
5445 			"Cannot get ethdev port %u hairpin capability to NULL\n",
5446 			port_id);
5447 		return -EINVAL;
5448 	}
5449 
5450 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5451 	memset(cap, 0, sizeof(*cap));
5452 	return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5453 }
5454 
5455 int
5456 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5457 {
5458 	struct rte_eth_dev *dev;
5459 
5460 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5461 	dev = &rte_eth_devices[port_id];
5462 
5463 	if (pool == NULL) {
5464 		RTE_ETHDEV_LOG(ERR,
5465 			"Cannot test ethdev port %u mempool operation from NULL pool\n",
5466 			port_id);
5467 		return -EINVAL;
5468 	}
5469 
5470 	if (*dev->dev_ops->pool_ops_supported == NULL)
5471 		return 1; /* all pools are supported */
5472 
5473 	return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5474 }
5475 
5476 static int
5477 eth_dev_handle_port_list(const char *cmd __rte_unused,
5478 		const char *params __rte_unused,
5479 		struct rte_tel_data *d)
5480 {
5481 	int port_id;
5482 
5483 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5484 	RTE_ETH_FOREACH_DEV(port_id)
5485 		rte_tel_data_add_array_int(d, port_id);
5486 	return 0;
5487 }
5488 
5489 static void
5490 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5491 		const char *stat_name)
5492 {
5493 	int q;
5494 	struct rte_tel_data *q_data = rte_tel_data_alloc();
5495 	rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5496 	for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5497 		rte_tel_data_add_array_u64(q_data, q_stats[q]);
5498 	rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5499 }
5500 
5501 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5502 
5503 static int
5504 eth_dev_handle_port_stats(const char *cmd __rte_unused,
5505 		const char *params,
5506 		struct rte_tel_data *d)
5507 {
5508 	struct rte_eth_stats stats;
5509 	int port_id, ret;
5510 
5511 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5512 		return -1;
5513 
5514 	port_id = atoi(params);
5515 	if (!rte_eth_dev_is_valid_port(port_id))
5516 		return -1;
5517 
5518 	ret = rte_eth_stats_get(port_id, &stats);
5519 	if (ret < 0)
5520 		return -1;
5521 
5522 	rte_tel_data_start_dict(d);
5523 	ADD_DICT_STAT(stats, ipackets);
5524 	ADD_DICT_STAT(stats, opackets);
5525 	ADD_DICT_STAT(stats, ibytes);
5526 	ADD_DICT_STAT(stats, obytes);
5527 	ADD_DICT_STAT(stats, imissed);
5528 	ADD_DICT_STAT(stats, ierrors);
5529 	ADD_DICT_STAT(stats, oerrors);
5530 	ADD_DICT_STAT(stats, rx_nombuf);
5531 	eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5532 	eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5533 	eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5534 	eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5535 	eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
5536 
5537 	return 0;
5538 }
5539 
5540 static int
5541 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
5542 		const char *params,
5543 		struct rte_tel_data *d)
5544 {
5545 	struct rte_eth_xstat *eth_xstats;
5546 	struct rte_eth_xstat_name *xstat_names;
5547 	int port_id, num_xstats;
5548 	int i, ret;
5549 	char *end_param;
5550 
5551 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5552 		return -1;
5553 
5554 	port_id = strtoul(params, &end_param, 0);
5555 	if (*end_param != '\0')
5556 		RTE_ETHDEV_LOG(NOTICE,
5557 			"Extra parameters passed to ethdev telemetry command, ignoring");
5558 	if (!rte_eth_dev_is_valid_port(port_id))
5559 		return -1;
5560 
5561 	num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5562 	if (num_xstats < 0)
5563 		return -1;
5564 
5565 	/* use one malloc for both names and stats */
5566 	eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5567 			sizeof(struct rte_eth_xstat_name)) * num_xstats);
5568 	if (eth_xstats == NULL)
5569 		return -1;
5570 	xstat_names = (void *)&eth_xstats[num_xstats];
5571 
5572 	ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5573 	if (ret < 0 || ret > num_xstats) {
5574 		free(eth_xstats);
5575 		return -1;
5576 	}
5577 
5578 	ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5579 	if (ret < 0 || ret > num_xstats) {
5580 		free(eth_xstats);
5581 		return -1;
5582 	}
5583 
5584 	rte_tel_data_start_dict(d);
5585 	for (i = 0; i < num_xstats; i++)
5586 		rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5587 				eth_xstats[i].value);
5588 	return 0;
5589 }
5590 
5591 static int
5592 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
5593 		const char *params,
5594 		struct rte_tel_data *d)
5595 {
5596 	static const char *status_str = "status";
5597 	int ret, port_id;
5598 	struct rte_eth_link link;
5599 	char *end_param;
5600 
5601 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5602 		return -1;
5603 
5604 	port_id = strtoul(params, &end_param, 0);
5605 	if (*end_param != '\0')
5606 		RTE_ETHDEV_LOG(NOTICE,
5607 			"Extra parameters passed to ethdev telemetry command, ignoring");
5608 	if (!rte_eth_dev_is_valid_port(port_id))
5609 		return -1;
5610 
5611 	ret = rte_eth_link_get_nowait(port_id, &link);
5612 	if (ret < 0)
5613 		return -1;
5614 
5615 	rte_tel_data_start_dict(d);
5616 	if (!link.link_status) {
5617 		rte_tel_data_add_dict_string(d, status_str, "DOWN");
5618 		return 0;
5619 	}
5620 	rte_tel_data_add_dict_string(d, status_str, "UP");
5621 	rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5622 	rte_tel_data_add_dict_string(d, "duplex",
5623 			(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
5624 				"full-duplex" : "half-duplex");
5625 	return 0;
5626 }
5627 
5628 static int
5629 eth_dev_handle_port_info(const char *cmd __rte_unused,
5630 		const char *params,
5631 		struct rte_tel_data *d)
5632 {
5633 	struct rte_tel_data *rxq_state, *txq_state;
5634 	char mac_addr[RTE_ETHER_ADDR_LEN];
5635 	struct rte_eth_dev *eth_dev;
5636 	char *end_param;
5637 	int port_id, i;
5638 
5639 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5640 		return -1;
5641 
5642 	port_id = strtoul(params, &end_param, 0);
5643 	if (*end_param != '\0')
5644 		RTE_ETHDEV_LOG(NOTICE,
5645 			"Extra parameters passed to ethdev telemetry command, ignoring");
5646 
5647 	if (!rte_eth_dev_is_valid_port(port_id))
5648 		return -EINVAL;
5649 
5650 	eth_dev = &rte_eth_devices[port_id];
5651 
5652 	rxq_state = rte_tel_data_alloc();
5653 	if (!rxq_state)
5654 		return -ENOMEM;
5655 
5656 	txq_state = rte_tel_data_alloc();
5657 	if (!txq_state) {
5658 		rte_tel_data_free(rxq_state);
5659 		return -ENOMEM;
5660 	}
5661 
5662 	rte_tel_data_start_dict(d);
5663 	rte_tel_data_add_dict_string(d, "name", eth_dev->data->name);
5664 	rte_tel_data_add_dict_int(d, "state", eth_dev->state);
5665 	rte_tel_data_add_dict_int(d, "nb_rx_queues",
5666 			eth_dev->data->nb_rx_queues);
5667 	rte_tel_data_add_dict_int(d, "nb_tx_queues",
5668 			eth_dev->data->nb_tx_queues);
5669 	rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id);
5670 	rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu);
5671 	rte_tel_data_add_dict_int(d, "rx_mbuf_size_min",
5672 			eth_dev->data->min_rx_buf_size);
5673 	rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail",
5674 			eth_dev->data->rx_mbuf_alloc_failed);
5675 	snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x",
5676 			 eth_dev->data->mac_addrs->addr_bytes[0],
5677 			 eth_dev->data->mac_addrs->addr_bytes[1],
5678 			 eth_dev->data->mac_addrs->addr_bytes[2],
5679 			 eth_dev->data->mac_addrs->addr_bytes[3],
5680 			 eth_dev->data->mac_addrs->addr_bytes[4],
5681 			 eth_dev->data->mac_addrs->addr_bytes[5]);
5682 	rte_tel_data_add_dict_string(d, "mac_addr", mac_addr);
5683 	rte_tel_data_add_dict_int(d, "promiscuous",
5684 			eth_dev->data->promiscuous);
5685 	rte_tel_data_add_dict_int(d, "scattered_rx",
5686 			eth_dev->data->scattered_rx);
5687 	rte_tel_data_add_dict_int(d, "all_multicast",
5688 			eth_dev->data->all_multicast);
5689 	rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started);
5690 	rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro);
5691 	rte_tel_data_add_dict_int(d, "dev_configured",
5692 			eth_dev->data->dev_configured);
5693 
5694 	rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL);
5695 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
5696 		rte_tel_data_add_array_int(rxq_state,
5697 				eth_dev->data->rx_queue_state[i]);
5698 
5699 	rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL);
5700 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
5701 		rte_tel_data_add_array_int(txq_state,
5702 				eth_dev->data->tx_queue_state[i]);
5703 
5704 	rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0);
5705 	rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0);
5706 	rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node);
5707 	rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags);
5708 	rte_tel_data_add_dict_int(d, "rx_offloads",
5709 			eth_dev->data->dev_conf.rxmode.offloads);
5710 	rte_tel_data_add_dict_int(d, "tx_offloads",
5711 			eth_dev->data->dev_conf.txmode.offloads);
5712 	rte_tel_data_add_dict_int(d, "ethdev_rss_hf",
5713 			eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
5714 
5715 	return 0;
5716 }
5717 
5718 int
5719 rte_eth_representor_info_get(uint16_t port_id,
5720 			     struct rte_eth_representor_info *info)
5721 {
5722 	struct rte_eth_dev *dev;
5723 
5724 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5725 	dev = &rte_eth_devices[port_id];
5726 
5727 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
5728 	return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
5729 }
5730 
5731 int
5732 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
5733 {
5734 	struct rte_eth_dev *dev;
5735 
5736 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5737 	dev = &rte_eth_devices[port_id];
5738 
5739 	if (dev->data->dev_configured != 0) {
5740 		RTE_ETHDEV_LOG(ERR,
5741 			"The port (ID=%"PRIu16") is already configured\n",
5742 			port_id);
5743 		return -EBUSY;
5744 	}
5745 
5746 	if (features == NULL) {
5747 		RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n");
5748 		return -EINVAL;
5749 	}
5750 
5751 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP);
5752 	return eth_err(port_id,
5753 		       (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
5754 }
5755 
5756 int
5757 rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5758 		struct rte_eth_ip_reassembly_params *reassembly_capa)
5759 {
5760 	struct rte_eth_dev *dev;
5761 
5762 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5763 	dev = &rte_eth_devices[port_id];
5764 
5765 	if (dev->data->dev_configured == 0) {
5766 		RTE_ETHDEV_LOG(ERR,
5767 			"Device with port_id=%u is not configured.\n"
5768 			"Cannot get IP reassembly capability\n",
5769 			port_id);
5770 		return -EINVAL;
5771 	}
5772 
5773 	if (reassembly_capa == NULL) {
5774 		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL");
5775 		return -EINVAL;
5776 	}
5777 
5778 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_capability_get,
5779 				-ENOTSUP);
5780 	memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params));
5781 
5782 	return eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get)
5783 					(dev, reassembly_capa));
5784 }
5785 
5786 int
5787 rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5788 		struct rte_eth_ip_reassembly_params *conf)
5789 {
5790 	struct rte_eth_dev *dev;
5791 
5792 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5793 	dev = &rte_eth_devices[port_id];
5794 
5795 	if (dev->data->dev_configured == 0) {
5796 		RTE_ETHDEV_LOG(ERR,
5797 			"Device with port_id=%u is not configured.\n"
5798 			"Cannot get IP reassembly configuration\n",
5799 			port_id);
5800 		return -EINVAL;
5801 	}
5802 
5803 	if (conf == NULL) {
5804 		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL");
5805 		return -EINVAL;
5806 	}
5807 
5808 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_get,
5809 				-ENOTSUP);
5810 	memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params));
5811 	return eth_err(port_id,
5812 		       (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf));
5813 }
5814 
5815 int
5816 rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5817 		const struct rte_eth_ip_reassembly_params *conf)
5818 {
5819 	struct rte_eth_dev *dev;
5820 
5821 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5822 	dev = &rte_eth_devices[port_id];
5823 
5824 	if (dev->data->dev_configured == 0) {
5825 		RTE_ETHDEV_LOG(ERR,
5826 			"Device with port_id=%u is not configured.\n"
5827 			"Cannot set IP reassembly configuration",
5828 			port_id);
5829 		return -EINVAL;
5830 	}
5831 
5832 	if (dev->data->dev_started != 0) {
5833 		RTE_ETHDEV_LOG(ERR,
5834 			"Device with port_id=%u started,\n"
5835 			"cannot configure IP reassembly params.\n",
5836 			port_id);
5837 		return -EINVAL;
5838 	}
5839 
5840 	if (conf == NULL) {
5841 		RTE_ETHDEV_LOG(ERR,
5842 				"Invalid IP reassembly configuration (NULL)\n");
5843 		return -EINVAL;
5844 	}
5845 
5846 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_set,
5847 				-ENOTSUP);
5848 	return eth_err(port_id,
5849 		       (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf));
5850 }
5851 
5852 int
5853 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
5854 {
5855 	struct rte_eth_dev *dev;
5856 
5857 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5858 	dev = &rte_eth_devices[port_id];
5859 
5860 	if (file == NULL) {
5861 		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
5862 		return -EINVAL;
5863 	}
5864 
5865 	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_dev_priv_dump, -ENOTSUP);
5866 	return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file));
5867 }
5868 
5869 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
5870 
5871 RTE_INIT(ethdev_init_telemetry)
5872 {
5873 	rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
5874 			"Returns list of available ethdev ports. Takes no parameters");
5875 	rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
5876 			"Returns the common stats for a port. Parameters: int port_id");
5877 	rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
5878 			"Returns the extended stats for a port. Parameters: int port_id");
5879 	rte_telemetry_register_cmd("/ethdev/link_status",
5880 			eth_dev_handle_port_link_status,
5881 			"Returns the link status for a port. Parameters: int port_id");
5882 	rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info,
5883 			"Returns the device info for a port. Parameters: int port_id");
5884 }
5885