xref: /dpdk/lib/ethdev/rte_ethdev.c (revision e0d947a1e6c2f80aa039a4f7082a8aa16797d8b9)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <errno.h>
6 #include <inttypes.h>
7 #include <stdbool.h>
8 #include <stdint.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13 
14 #include <bus_driver.h>
15 #include <rte_log.h>
16 #include <rte_interrupts.h>
17 #include <rte_kvargs.h>
18 #include <rte_memcpy.h>
19 #include <rte_common.h>
20 #include <rte_mempool.h>
21 #include <rte_malloc.h>
22 #include <rte_mbuf.h>
23 #include <rte_errno.h>
24 #include <rte_spinlock.h>
25 #include <rte_string_fns.h>
26 #include <rte_class.h>
27 #include <rte_ether.h>
28 #include <rte_telemetry.h>
29 
30 #include "rte_ethdev.h"
31 #include "rte_ethdev_trace_fp.h"
32 #include "ethdev_driver.h"
33 #include "rte_flow_driver.h"
34 #include "ethdev_profile.h"
35 #include "ethdev_private.h"
36 #include "ethdev_trace.h"
37 #include "sff_telemetry.h"
38 
39 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
40 
41 /* public fast-path API */
42 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
43 
44 /* spinlock for add/remove Rx callbacks */
45 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
46 
47 /* spinlock for add/remove Tx callbacks */
48 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
49 
50 /* store statistics names and its offset in stats structure  */
51 struct rte_eth_xstats_name_off {
52 	char name[RTE_ETH_XSTATS_NAME_SIZE];
53 	unsigned offset;
54 };
55 
56 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
57 	{"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
58 	{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
59 	{"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
60 	{"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
61 	{"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
62 	{"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
63 	{"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
64 	{"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
65 		rx_nombuf)},
66 };
67 
68 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
69 
70 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
71 	{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
72 	{"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
73 	{"errors", offsetof(struct rte_eth_stats, q_errors)},
74 };
75 
76 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
77 
78 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
79 	{"packets", offsetof(struct rte_eth_stats, q_opackets)},
80 	{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
81 };
82 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
83 
84 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
85 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
86 
87 static const struct {
88 	uint64_t offload;
89 	const char *name;
90 } eth_dev_rx_offload_names[] = {
91 	RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
92 	RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
93 	RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
94 	RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
95 	RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
96 	RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
97 	RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
98 	RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
99 	RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
100 	RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
101 	RTE_RX_OFFLOAD_BIT2STR(SCATTER),
102 	RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
103 	RTE_RX_OFFLOAD_BIT2STR(SECURITY),
104 	RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
105 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
106 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
107 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
108 	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
109 };
110 
111 #undef RTE_RX_OFFLOAD_BIT2STR
112 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
113 
114 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
115 	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
116 
117 static const struct {
118 	uint64_t offload;
119 	const char *name;
120 } eth_dev_tx_offload_names[] = {
121 	RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
122 	RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
123 	RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
124 	RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
125 	RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
126 	RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
127 	RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
128 	RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
129 	RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
130 	RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
131 	RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
132 	RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
133 	RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
134 	RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
135 	RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
136 	RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
137 	RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
138 	RTE_TX_OFFLOAD_BIT2STR(SECURITY),
139 	RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
140 	RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
141 	RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
142 	RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
143 };
144 
145 #undef RTE_TX_OFFLOAD_BIT2STR
146 
147 static const struct {
148 	uint64_t offload;
149 	const char *name;
150 } rte_eth_dev_capa_names[] = {
151 	{RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"},
152 	{RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"},
153 	{RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
154 	{RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"},
155 	{RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"},
156 };
157 
158 enum {
159 	STAT_QMAP_TX = 0,
160 	STAT_QMAP_RX
161 };
162 
163 static const struct {
164 	enum rte_eth_hash_function algo;
165 	const char *name;
166 } rte_eth_dev_rss_algo_names[] = {
167 	{RTE_ETH_HASH_FUNCTION_DEFAULT, "default"},
168 	{RTE_ETH_HASH_FUNCTION_SIMPLE_XOR, "simple_xor"},
169 	{RTE_ETH_HASH_FUNCTION_TOEPLITZ, "toeplitz"},
170 	{RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ, "symmetric_toeplitz"},
171 	{RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT, "symmetric_toeplitz_sort"},
172 };
173 
174 int
175 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
176 {
177 	int ret;
178 	struct rte_devargs devargs;
179 	const char *bus_param_key;
180 	char *bus_str = NULL;
181 	char *cls_str = NULL;
182 	int str_size;
183 
184 	if (iter == NULL) {
185 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot initialize NULL iterator");
186 		return -EINVAL;
187 	}
188 
189 	if (devargs_str == NULL) {
190 		RTE_ETHDEV_LOG_LINE(ERR,
191 			"Cannot initialize iterator from NULL device description string");
192 		return -EINVAL;
193 	}
194 
195 	memset(iter, 0, sizeof(*iter));
196 	memset(&devargs, 0, sizeof(devargs));
197 
198 	/*
199 	 * The devargs string may use various syntaxes:
200 	 *   - 0000:08:00.0,representor=[1-3]
201 	 *   - pci:0000:06:00.0,representor=[0,5]
202 	 *   - class=eth,mac=00:11:22:33:44:55
203 	 *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
204 	 */
205 
206 	/*
207 	 * Handle pure class filter (i.e. without any bus-level argument),
208 	 * from future new syntax.
209 	 * rte_devargs_parse() is not yet supporting the new syntax,
210 	 * that's why this simple case is temporarily parsed here.
211 	 */
212 #define iter_anybus_str "class=eth,"
213 	if (strncmp(devargs_str, iter_anybus_str,
214 			strlen(iter_anybus_str)) == 0) {
215 		iter->cls_str = devargs_str + strlen(iter_anybus_str);
216 		goto end;
217 	}
218 
219 	/* Split bus, device and parameters. */
220 	ret = rte_devargs_parse(&devargs, devargs_str);
221 	if (ret != 0)
222 		goto error;
223 
224 	/*
225 	 * Assume parameters of old syntax can match only at ethdev level.
226 	 * Extra parameters will be ignored, thanks to "+" prefix.
227 	 */
228 	str_size = strlen(devargs.args) + 2;
229 	cls_str = malloc(str_size);
230 	if (cls_str == NULL) {
231 		ret = -ENOMEM;
232 		goto error;
233 	}
234 	ret = snprintf(cls_str, str_size, "+%s", devargs.args);
235 	if (ret != str_size - 1) {
236 		ret = -EINVAL;
237 		goto error;
238 	}
239 	iter->cls_str = cls_str;
240 
241 	iter->bus = devargs.bus;
242 	if (iter->bus->dev_iterate == NULL) {
243 		ret = -ENOTSUP;
244 		goto error;
245 	}
246 
247 	/* Convert bus args to new syntax for use with new API dev_iterate. */
248 	if ((strcmp(iter->bus->name, "vdev") == 0) ||
249 		(strcmp(iter->bus->name, "fslmc") == 0) ||
250 		(strcmp(iter->bus->name, "dpaa_bus") == 0)) {
251 		bus_param_key = "name";
252 	} else if (strcmp(iter->bus->name, "pci") == 0) {
253 		bus_param_key = "addr";
254 	} else {
255 		ret = -ENOTSUP;
256 		goto error;
257 	}
258 	str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
259 	bus_str = malloc(str_size);
260 	if (bus_str == NULL) {
261 		ret = -ENOMEM;
262 		goto error;
263 	}
264 	ret = snprintf(bus_str, str_size, "%s=%s",
265 			bus_param_key, devargs.name);
266 	if (ret != str_size - 1) {
267 		ret = -EINVAL;
268 		goto error;
269 	}
270 	iter->bus_str = bus_str;
271 
272 end:
273 	iter->cls = rte_class_find_by_name("eth");
274 	rte_devargs_reset(&devargs);
275 
276 	rte_eth_trace_iterator_init(devargs_str);
277 
278 	return 0;
279 
280 error:
281 	if (ret == -ENOTSUP)
282 		RTE_ETHDEV_LOG_LINE(ERR, "Bus %s does not support iterating.",
283 				iter->bus->name);
284 	rte_devargs_reset(&devargs);
285 	free(bus_str);
286 	free(cls_str);
287 	return ret;
288 }
289 
290 uint16_t
291 rte_eth_iterator_next(struct rte_dev_iterator *iter)
292 {
293 	if (iter == NULL) {
294 		RTE_ETHDEV_LOG_LINE(ERR,
295 			"Cannot get next device from NULL iterator");
296 		return RTE_MAX_ETHPORTS;
297 	}
298 
299 	if (iter->cls == NULL) /* invalid ethdev iterator */
300 		return RTE_MAX_ETHPORTS;
301 
302 	do { /* loop to try all matching rte_device */
303 		/* If not pure ethdev filter and */
304 		if (iter->bus != NULL &&
305 				/* not in middle of rte_eth_dev iteration, */
306 				iter->class_device == NULL) {
307 			/* get next rte_device to try. */
308 			iter->device = iter->bus->dev_iterate(
309 					iter->device, iter->bus_str, iter);
310 			if (iter->device == NULL)
311 				break; /* no more rte_device candidate */
312 		}
313 		/* A device is matching bus part, need to check ethdev part. */
314 		iter->class_device = iter->cls->dev_iterate(
315 				iter->class_device, iter->cls_str, iter);
316 		if (iter->class_device != NULL) {
317 			uint16_t id = eth_dev_to_id(iter->class_device);
318 
319 			rte_eth_trace_iterator_next(iter, id);
320 
321 			return id; /* match */
322 		}
323 	} while (iter->bus != NULL); /* need to try next rte_device */
324 
325 	/* No more ethdev port to iterate. */
326 	rte_eth_iterator_cleanup(iter);
327 	return RTE_MAX_ETHPORTS;
328 }
329 
330 void
331 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
332 {
333 	if (iter == NULL) {
334 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot do clean up from NULL iterator");
335 		return;
336 	}
337 
338 	if (iter->bus_str == NULL)
339 		return; /* nothing to free in pure class filter */
340 
341 	rte_eth_trace_iterator_cleanup(iter);
342 
343 	free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
344 	free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
345 	memset(iter, 0, sizeof(*iter));
346 }
347 
348 uint16_t
349 rte_eth_find_next(uint16_t port_id)
350 {
351 	while (port_id < RTE_MAX_ETHPORTS &&
352 			rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
353 		port_id++;
354 
355 	if (port_id >= RTE_MAX_ETHPORTS)
356 		return RTE_MAX_ETHPORTS;
357 
358 	rte_eth_trace_find_next(port_id);
359 
360 	return port_id;
361 }
362 
363 /*
364  * Macro to iterate over all valid ports for internal usage.
365  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
366  */
367 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
368 	for (port_id = rte_eth_find_next(0); \
369 	     port_id < RTE_MAX_ETHPORTS; \
370 	     port_id = rte_eth_find_next(port_id + 1))
371 
372 uint16_t
373 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
374 {
375 	port_id = rte_eth_find_next(port_id);
376 	while (port_id < RTE_MAX_ETHPORTS &&
377 			rte_eth_devices[port_id].device != parent)
378 		port_id = rte_eth_find_next(port_id + 1);
379 
380 	rte_eth_trace_find_next_of(port_id, parent);
381 
382 	return port_id;
383 }
384 
385 uint16_t
386 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
387 {
388 	uint16_t ret;
389 
390 	RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
391 	ret = rte_eth_find_next_of(port_id,
392 			rte_eth_devices[ref_port_id].device);
393 
394 	rte_eth_trace_find_next_sibling(port_id, ref_port_id, ret);
395 
396 	return ret;
397 }
398 
399 static bool
400 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
401 {
402 	return ethdev->data != NULL && ethdev->data->name[0] != '\0';
403 }
404 
405 int
406 rte_eth_dev_is_valid_port(uint16_t port_id)
407 {
408 	int is_valid;
409 
410 	if (port_id >= RTE_MAX_ETHPORTS ||
411 	    (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
412 		is_valid = 0;
413 	else
414 		is_valid = 1;
415 
416 	rte_ethdev_trace_is_valid_port(port_id, is_valid);
417 
418 	return is_valid;
419 }
420 
421 static int
422 eth_is_valid_owner_id(uint64_t owner_id)
423 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
424 {
425 	if (owner_id == RTE_ETH_DEV_NO_OWNER ||
426 	    eth_dev_shared_data->next_owner_id <= owner_id)
427 		return 0;
428 	return 1;
429 }
430 
431 uint64_t
432 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
433 {
434 	port_id = rte_eth_find_next(port_id);
435 	while (port_id < RTE_MAX_ETHPORTS &&
436 			rte_eth_devices[port_id].data->owner.id != owner_id)
437 		port_id = rte_eth_find_next(port_id + 1);
438 
439 	rte_eth_trace_find_next_owned_by(port_id, owner_id);
440 
441 	return port_id;
442 }
443 
444 int
445 rte_eth_dev_owner_new(uint64_t *owner_id)
446 {
447 	int ret;
448 
449 	if (owner_id == NULL) {
450 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get new owner ID to NULL");
451 		return -EINVAL;
452 	}
453 
454 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
455 
456 	if (eth_dev_shared_data_prepare() != NULL) {
457 		*owner_id = eth_dev_shared_data->next_owner_id++;
458 		eth_dev_shared_data->allocated_owners++;
459 		ret = 0;
460 	} else {
461 		ret = -ENOMEM;
462 	}
463 
464 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
465 
466 	rte_ethdev_trace_owner_new(*owner_id, ret);
467 
468 	return ret;
469 }
470 
471 static int
472 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
473 		       const struct rte_eth_dev_owner *new_owner)
474 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
475 {
476 	struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
477 	struct rte_eth_dev_owner *port_owner;
478 
479 	if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
480 		RTE_ETHDEV_LOG_LINE(ERR, "Port ID %"PRIu16" is not allocated",
481 			port_id);
482 		return -ENODEV;
483 	}
484 
485 	if (new_owner == NULL) {
486 		RTE_ETHDEV_LOG_LINE(ERR,
487 			"Cannot set ethdev port %u owner from NULL owner",
488 			port_id);
489 		return -EINVAL;
490 	}
491 
492 	if (!eth_is_valid_owner_id(new_owner->id) &&
493 	    !eth_is_valid_owner_id(old_owner_id)) {
494 		RTE_ETHDEV_LOG_LINE(ERR,
495 			"Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64,
496 		       old_owner_id, new_owner->id);
497 		return -EINVAL;
498 	}
499 
500 	port_owner = &rte_eth_devices[port_id].data->owner;
501 	if (port_owner->id != old_owner_id) {
502 		RTE_ETHDEV_LOG_LINE(ERR,
503 			"Cannot set owner to port %u already owned by %s_%016"PRIX64,
504 			port_id, port_owner->name, port_owner->id);
505 		return -EPERM;
506 	}
507 
508 	/* can not truncate (same structure) */
509 	strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
510 
511 	port_owner->id = new_owner->id;
512 
513 	RTE_ETHDEV_LOG_LINE(DEBUG, "Port %u owner is %s_%016"PRIx64,
514 		port_id, new_owner->name, new_owner->id);
515 
516 	return 0;
517 }
518 
519 int
520 rte_eth_dev_owner_set(const uint16_t port_id,
521 		      const struct rte_eth_dev_owner *owner)
522 {
523 	int ret;
524 
525 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
526 
527 	if (eth_dev_shared_data_prepare() != NULL)
528 		ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
529 	else
530 		ret = -ENOMEM;
531 
532 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
533 
534 	rte_ethdev_trace_owner_set(port_id, owner, ret);
535 
536 	return ret;
537 }
538 
539 int
540 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
541 {
542 	const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
543 			{.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
544 	int ret;
545 
546 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
547 
548 	if (eth_dev_shared_data_prepare() != NULL)
549 		ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
550 	else
551 		ret = -ENOMEM;
552 
553 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
554 
555 	rte_ethdev_trace_owner_unset(port_id, owner_id, ret);
556 
557 	return ret;
558 }
559 
560 int
561 rte_eth_dev_owner_delete(const uint64_t owner_id)
562 {
563 	uint16_t port_id;
564 	int ret = 0;
565 
566 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
567 
568 	if (eth_dev_shared_data_prepare() == NULL) {
569 		ret = -ENOMEM;
570 	} else if (eth_is_valid_owner_id(owner_id)) {
571 		for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
572 			struct rte_eth_dev_data *data =
573 				rte_eth_devices[port_id].data;
574 			if (data != NULL && data->owner.id == owner_id)
575 				memset(&data->owner, 0,
576 				       sizeof(struct rte_eth_dev_owner));
577 		}
578 		RTE_ETHDEV_LOG_LINE(NOTICE,
579 			"All port owners owned by %016"PRIx64" identifier have removed",
580 			owner_id);
581 		eth_dev_shared_data->allocated_owners--;
582 		eth_dev_shared_data_release();
583 	} else {
584 		RTE_ETHDEV_LOG_LINE(ERR,
585 			       "Invalid owner ID=%016"PRIx64,
586 			       owner_id);
587 		ret = -EINVAL;
588 	}
589 
590 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
591 
592 	rte_ethdev_trace_owner_delete(owner_id, ret);
593 
594 	return ret;
595 }
596 
597 int
598 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
599 {
600 	struct rte_eth_dev *ethdev;
601 	int ret;
602 
603 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
604 	ethdev = &rte_eth_devices[port_id];
605 
606 	if (!eth_dev_is_allocated(ethdev)) {
607 		RTE_ETHDEV_LOG_LINE(ERR, "Port ID %"PRIu16" is not allocated",
608 			port_id);
609 		return -ENODEV;
610 	}
611 
612 	if (owner == NULL) {
613 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u owner to NULL",
614 			port_id);
615 		return -EINVAL;
616 	}
617 
618 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
619 
620 	if (eth_dev_shared_data_prepare() != NULL) {
621 		rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
622 		ret = 0;
623 	} else {
624 		ret = -ENOMEM;
625 	}
626 
627 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
628 
629 	rte_ethdev_trace_owner_get(port_id, owner, ret);
630 
631 	return ret;
632 }
633 
634 int
635 rte_eth_dev_socket_id(uint16_t port_id)
636 {
637 	int socket_id = SOCKET_ID_ANY;
638 
639 	if (!rte_eth_dev_is_valid_port(port_id)) {
640 		rte_errno = EINVAL;
641 	} else {
642 		socket_id = rte_eth_devices[port_id].data->numa_node;
643 		if (socket_id == SOCKET_ID_ANY)
644 			rte_errno = 0;
645 	}
646 
647 	rte_ethdev_trace_socket_id(port_id, socket_id);
648 
649 	return socket_id;
650 }
651 
652 void *
653 rte_eth_dev_get_sec_ctx(uint16_t port_id)
654 {
655 	void *ctx;
656 
657 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
658 	ctx = rte_eth_devices[port_id].security_ctx;
659 
660 	rte_ethdev_trace_get_sec_ctx(port_id, ctx);
661 
662 	return ctx;
663 }
664 
665 uint16_t
666 rte_eth_dev_count_avail(void)
667 {
668 	uint16_t p;
669 	uint16_t count;
670 
671 	count = 0;
672 
673 	RTE_ETH_FOREACH_DEV(p)
674 		count++;
675 
676 	rte_ethdev_trace_count_avail(count);
677 
678 	return count;
679 }
680 
681 uint16_t
682 rte_eth_dev_count_total(void)
683 {
684 	uint16_t port, count = 0;
685 
686 	RTE_ETH_FOREACH_VALID_DEV(port)
687 		count++;
688 
689 	rte_ethdev_trace_count_total(count);
690 
691 	return count;
692 }
693 
694 int
695 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
696 {
697 	char *tmp;
698 
699 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
700 
701 	if (name == NULL) {
702 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u name to NULL",
703 			port_id);
704 		return -EINVAL;
705 	}
706 
707 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
708 	/* shouldn't check 'rte_eth_devices[i].data',
709 	 * because it might be overwritten by VDEV PMD */
710 	tmp = eth_dev_shared_data->data[port_id].name;
711 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
712 
713 	strcpy(name, tmp);
714 
715 	rte_ethdev_trace_get_name_by_port(port_id, name);
716 
717 	return 0;
718 }
719 
720 int
721 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
722 {
723 	int ret = -ENODEV;
724 	uint16_t pid;
725 
726 	if (name == NULL) {
727 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get port ID from NULL name");
728 		return -EINVAL;
729 	}
730 
731 	if (port_id == NULL) {
732 		RTE_ETHDEV_LOG_LINE(ERR,
733 			"Cannot get port ID to NULL for %s", name);
734 		return -EINVAL;
735 	}
736 
737 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
738 	RTE_ETH_FOREACH_VALID_DEV(pid) {
739 		if (strcmp(name, eth_dev_shared_data->data[pid].name) != 0)
740 			continue;
741 
742 		*port_id = pid;
743 		rte_ethdev_trace_get_port_by_name(name, *port_id);
744 		ret = 0;
745 		break;
746 	}
747 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
748 
749 	return ret;
750 }
751 
752 int
753 eth_err(uint16_t port_id, int ret)
754 {
755 	if (ret == 0)
756 		return 0;
757 	if (rte_eth_dev_is_removed(port_id))
758 		return -EIO;
759 	return ret;
760 }
761 
762 static int
763 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
764 {
765 	uint16_t port_id;
766 
767 	if (rx_queue_id >= dev->data->nb_rx_queues) {
768 		port_id = dev->data->port_id;
769 		RTE_ETHDEV_LOG_LINE(ERR,
770 			       "Invalid Rx queue_id=%u of device with port_id=%u",
771 			       rx_queue_id, port_id);
772 		return -EINVAL;
773 	}
774 
775 	if (dev->data->rx_queues[rx_queue_id] == NULL) {
776 		port_id = dev->data->port_id;
777 		RTE_ETHDEV_LOG_LINE(ERR,
778 			       "Queue %u of device with port_id=%u has not been setup",
779 			       rx_queue_id, port_id);
780 		return -EINVAL;
781 	}
782 
783 	return 0;
784 }
785 
786 static int
787 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
788 {
789 	uint16_t port_id;
790 
791 	if (tx_queue_id >= dev->data->nb_tx_queues) {
792 		port_id = dev->data->port_id;
793 		RTE_ETHDEV_LOG_LINE(ERR,
794 			       "Invalid Tx queue_id=%u of device with port_id=%u",
795 			       tx_queue_id, port_id);
796 		return -EINVAL;
797 	}
798 
799 	if (dev->data->tx_queues[tx_queue_id] == NULL) {
800 		port_id = dev->data->port_id;
801 		RTE_ETHDEV_LOG_LINE(ERR,
802 			       "Queue %u of device with port_id=%u has not been setup",
803 			       tx_queue_id, port_id);
804 		return -EINVAL;
805 	}
806 
807 	return 0;
808 }
809 
810 int
811 rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
812 {
813 	struct rte_eth_dev *dev;
814 
815 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
816 	dev = &rte_eth_devices[port_id];
817 
818 	return eth_dev_validate_rx_queue(dev, queue_id);
819 }
820 
821 int
822 rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
823 {
824 	struct rte_eth_dev *dev;
825 
826 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
827 	dev = &rte_eth_devices[port_id];
828 
829 	return eth_dev_validate_tx_queue(dev, queue_id);
830 }
831 
832 int
833 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
834 {
835 	struct rte_eth_dev *dev;
836 	int ret;
837 
838 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
839 	dev = &rte_eth_devices[port_id];
840 
841 	if (!dev->data->dev_started) {
842 		RTE_ETHDEV_LOG_LINE(ERR,
843 			"Port %u must be started before start any queue",
844 			port_id);
845 		return -EINVAL;
846 	}
847 
848 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
849 	if (ret != 0)
850 		return ret;
851 
852 	if (*dev->dev_ops->rx_queue_start == NULL)
853 		return -ENOTSUP;
854 
855 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
856 		RTE_ETHDEV_LOG_LINE(INFO,
857 			"Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16,
858 			rx_queue_id, port_id);
859 		return -EINVAL;
860 	}
861 
862 	if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
863 		RTE_ETHDEV_LOG_LINE(INFO,
864 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started",
865 			rx_queue_id, port_id);
866 		return 0;
867 	}
868 
869 	ret = eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
870 
871 	rte_ethdev_trace_rx_queue_start(port_id, rx_queue_id, ret);
872 
873 	return ret;
874 }
875 
876 int
877 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
878 {
879 	struct rte_eth_dev *dev;
880 	int ret;
881 
882 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
883 	dev = &rte_eth_devices[port_id];
884 
885 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
886 	if (ret != 0)
887 		return ret;
888 
889 	if (*dev->dev_ops->rx_queue_stop == NULL)
890 		return -ENOTSUP;
891 
892 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
893 		RTE_ETHDEV_LOG_LINE(INFO,
894 			"Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16,
895 			rx_queue_id, port_id);
896 		return -EINVAL;
897 	}
898 
899 	if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
900 		RTE_ETHDEV_LOG_LINE(INFO,
901 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped",
902 			rx_queue_id, port_id);
903 		return 0;
904 	}
905 
906 	ret = eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
907 
908 	rte_ethdev_trace_rx_queue_stop(port_id, rx_queue_id, ret);
909 
910 	return ret;
911 }
912 
913 int
914 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
915 {
916 	struct rte_eth_dev *dev;
917 	int ret;
918 
919 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
920 	dev = &rte_eth_devices[port_id];
921 
922 	if (!dev->data->dev_started) {
923 		RTE_ETHDEV_LOG_LINE(ERR,
924 			"Port %u must be started before start any queue",
925 			port_id);
926 		return -EINVAL;
927 	}
928 
929 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
930 	if (ret != 0)
931 		return ret;
932 
933 	if (*dev->dev_ops->tx_queue_start == NULL)
934 		return -ENOTSUP;
935 
936 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
937 		RTE_ETHDEV_LOG_LINE(INFO,
938 			"Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16,
939 			tx_queue_id, port_id);
940 		return -EINVAL;
941 	}
942 
943 	if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
944 		RTE_ETHDEV_LOG_LINE(INFO,
945 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started",
946 			tx_queue_id, port_id);
947 		return 0;
948 	}
949 
950 	ret = eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
951 
952 	rte_ethdev_trace_tx_queue_start(port_id, tx_queue_id, ret);
953 
954 	return ret;
955 }
956 
957 int
958 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
959 {
960 	struct rte_eth_dev *dev;
961 	int ret;
962 
963 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
964 	dev = &rte_eth_devices[port_id];
965 
966 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
967 	if (ret != 0)
968 		return ret;
969 
970 	if (*dev->dev_ops->tx_queue_stop == NULL)
971 		return -ENOTSUP;
972 
973 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
974 		RTE_ETHDEV_LOG_LINE(INFO,
975 			"Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16,
976 			tx_queue_id, port_id);
977 		return -EINVAL;
978 	}
979 
980 	if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
981 		RTE_ETHDEV_LOG_LINE(INFO,
982 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped",
983 			tx_queue_id, port_id);
984 		return 0;
985 	}
986 
987 	ret = eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
988 
989 	rte_ethdev_trace_tx_queue_stop(port_id, tx_queue_id, ret);
990 
991 	return ret;
992 }
993 
994 uint32_t
995 rte_eth_speed_bitflag(uint32_t speed, int duplex)
996 {
997 	uint32_t ret;
998 
999 	switch (speed) {
1000 	case RTE_ETH_SPEED_NUM_10M:
1001 		ret = duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
1002 		break;
1003 	case RTE_ETH_SPEED_NUM_100M:
1004 		ret = duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
1005 		break;
1006 	case RTE_ETH_SPEED_NUM_1G:
1007 		ret = RTE_ETH_LINK_SPEED_1G;
1008 		break;
1009 	case RTE_ETH_SPEED_NUM_2_5G:
1010 		ret = RTE_ETH_LINK_SPEED_2_5G;
1011 		break;
1012 	case RTE_ETH_SPEED_NUM_5G:
1013 		ret = RTE_ETH_LINK_SPEED_5G;
1014 		break;
1015 	case RTE_ETH_SPEED_NUM_10G:
1016 		ret = RTE_ETH_LINK_SPEED_10G;
1017 		break;
1018 	case RTE_ETH_SPEED_NUM_20G:
1019 		ret = RTE_ETH_LINK_SPEED_20G;
1020 		break;
1021 	case RTE_ETH_SPEED_NUM_25G:
1022 		ret = RTE_ETH_LINK_SPEED_25G;
1023 		break;
1024 	case RTE_ETH_SPEED_NUM_40G:
1025 		ret = RTE_ETH_LINK_SPEED_40G;
1026 		break;
1027 	case RTE_ETH_SPEED_NUM_50G:
1028 		ret = RTE_ETH_LINK_SPEED_50G;
1029 		break;
1030 	case RTE_ETH_SPEED_NUM_56G:
1031 		ret = RTE_ETH_LINK_SPEED_56G;
1032 		break;
1033 	case RTE_ETH_SPEED_NUM_100G:
1034 		ret = RTE_ETH_LINK_SPEED_100G;
1035 		break;
1036 	case RTE_ETH_SPEED_NUM_200G:
1037 		ret = RTE_ETH_LINK_SPEED_200G;
1038 		break;
1039 	case RTE_ETH_SPEED_NUM_400G:
1040 		ret = RTE_ETH_LINK_SPEED_400G;
1041 		break;
1042 	default:
1043 		ret = 0;
1044 	}
1045 
1046 	rte_eth_trace_speed_bitflag(speed, duplex, ret);
1047 
1048 	return ret;
1049 }
1050 
1051 const char *
1052 rte_eth_dev_rx_offload_name(uint64_t offload)
1053 {
1054 	const char *name = "UNKNOWN";
1055 	unsigned int i;
1056 
1057 	for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1058 		if (offload == eth_dev_rx_offload_names[i].offload) {
1059 			name = eth_dev_rx_offload_names[i].name;
1060 			break;
1061 		}
1062 	}
1063 
1064 	rte_ethdev_trace_rx_offload_name(offload, name);
1065 
1066 	return name;
1067 }
1068 
1069 const char *
1070 rte_eth_dev_tx_offload_name(uint64_t offload)
1071 {
1072 	const char *name = "UNKNOWN";
1073 	unsigned int i;
1074 
1075 	for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1076 		if (offload == eth_dev_tx_offload_names[i].offload) {
1077 			name = eth_dev_tx_offload_names[i].name;
1078 			break;
1079 		}
1080 	}
1081 
1082 	rte_ethdev_trace_tx_offload_name(offload, name);
1083 
1084 	return name;
1085 }
1086 
1087 static char *
1088 eth_dev_offload_names(uint64_t bitmask, char *buf, size_t size,
1089 	const char *(*offload_name)(uint64_t))
1090 {
1091 	unsigned int pos = 0;
1092 	int ret;
1093 
1094 	/* There should be at least enough space to handle those cases */
1095 	RTE_ASSERT(size >= sizeof("none") && size >= sizeof("..."));
1096 
1097 	if (bitmask == 0) {
1098 		ret = snprintf(&buf[pos], size - pos, "none");
1099 		if (ret < 0 || pos + ret >= size)
1100 			ret = 0;
1101 		pos += ret;
1102 		goto out;
1103 	}
1104 
1105 	while (bitmask != 0) {
1106 		uint64_t offload = RTE_BIT64(rte_ctz64(bitmask));
1107 		const char *name = offload_name(offload);
1108 
1109 		ret = snprintf(&buf[pos], size - pos, "%s,", name);
1110 		if (ret < 0 || pos + ret >= size) {
1111 			if (pos + sizeof("...") >= size)
1112 				pos = size - sizeof("...");
1113 			ret = snprintf(&buf[pos], size - pos, "...");
1114 			if (ret > 0 && pos + ret < size)
1115 				pos += ret;
1116 			goto out;
1117 		}
1118 
1119 		pos += ret;
1120 		bitmask &= ~offload;
1121 	}
1122 
1123 	/* Eliminate trailing comma */
1124 	pos--;
1125 out:
1126 	buf[pos] = '\0';
1127 	return buf;
1128 }
1129 
1130 const char *
1131 rte_eth_dev_capability_name(uint64_t capability)
1132 {
1133 	const char *name = "UNKNOWN";
1134 	unsigned int i;
1135 
1136 	for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) {
1137 		if (capability == rte_eth_dev_capa_names[i].offload) {
1138 			name = rte_eth_dev_capa_names[i].name;
1139 			break;
1140 		}
1141 	}
1142 
1143 	rte_ethdev_trace_capability_name(capability, name);
1144 
1145 	return name;
1146 }
1147 
1148 static inline int
1149 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1150 		   uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1151 {
1152 	int ret = 0;
1153 
1154 	if (dev_info_size == 0) {
1155 		if (config_size != max_rx_pkt_len) {
1156 			RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1157 				       " %u != %u is not allowed",
1158 				       port_id, config_size, max_rx_pkt_len);
1159 			ret = -EINVAL;
1160 		}
1161 	} else if (config_size > dev_info_size) {
1162 		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1163 			       "> max allowed value %u", port_id, config_size,
1164 			       dev_info_size);
1165 		ret = -EINVAL;
1166 	} else if (config_size < RTE_ETHER_MIN_LEN) {
1167 		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1168 			       "< min allowed value %u", port_id, config_size,
1169 			       (unsigned int)RTE_ETHER_MIN_LEN);
1170 		ret = -EINVAL;
1171 	}
1172 	return ret;
1173 }
1174 
1175 /*
1176  * Validate offloads that are requested through rte_eth_dev_configure against
1177  * the offloads successfully set by the Ethernet device.
1178  *
1179  * @param port_id
1180  *   The port identifier of the Ethernet device.
1181  * @param req_offloads
1182  *   The offloads that have been requested through `rte_eth_dev_configure`.
1183  * @param set_offloads
1184  *   The offloads successfully set by the Ethernet device.
1185  * @param offload_type
1186  *   The offload type i.e. Rx/Tx string.
1187  * @param offload_name
1188  *   The function that prints the offload name.
1189  * @return
1190  *   - (0) if validation successful.
1191  *   - (-EINVAL) if requested offload has been silently disabled.
1192  */
1193 static int
1194 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1195 		  uint64_t set_offloads, const char *offload_type,
1196 		  const char *(*offload_name)(uint64_t))
1197 {
1198 	uint64_t offloads_diff = req_offloads ^ set_offloads;
1199 	uint64_t offload;
1200 	int ret = 0;
1201 
1202 	while (offloads_diff != 0) {
1203 		/* Check if any offload is requested but not enabled. */
1204 		offload = RTE_BIT64(rte_ctz64(offloads_diff));
1205 		if (offload & req_offloads) {
1206 			RTE_ETHDEV_LOG_LINE(ERR,
1207 				"Port %u failed to enable %s offload %s",
1208 				port_id, offload_type, offload_name(offload));
1209 			ret = -EINVAL;
1210 		}
1211 
1212 		/* Check if offload couldn't be disabled. */
1213 		if (offload & set_offloads) {
1214 			RTE_ETHDEV_LOG_LINE(DEBUG,
1215 				"Port %u %s offload %s is not requested but enabled",
1216 				port_id, offload_type, offload_name(offload));
1217 		}
1218 
1219 		offloads_diff &= ~offload;
1220 	}
1221 
1222 	return ret;
1223 }
1224 
1225 static uint32_t
1226 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1227 {
1228 	uint32_t overhead_len;
1229 
1230 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1231 		overhead_len = max_rx_pktlen - max_mtu;
1232 	else
1233 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1234 
1235 	return overhead_len;
1236 }
1237 
1238 /* rte_eth_dev_info_get() should be called prior to this function */
1239 static int
1240 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
1241 		uint16_t mtu)
1242 {
1243 	uint32_t overhead_len;
1244 	uint32_t frame_size;
1245 
1246 	if (mtu < dev_info->min_mtu) {
1247 		RTE_ETHDEV_LOG_LINE(ERR,
1248 			"MTU (%u) < device min MTU (%u) for port_id %u",
1249 			mtu, dev_info->min_mtu, port_id);
1250 		return -EINVAL;
1251 	}
1252 	if (mtu > dev_info->max_mtu) {
1253 		RTE_ETHDEV_LOG_LINE(ERR,
1254 			"MTU (%u) > device max MTU (%u) for port_id %u",
1255 			mtu, dev_info->max_mtu, port_id);
1256 		return -EINVAL;
1257 	}
1258 
1259 	overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1260 			dev_info->max_mtu);
1261 	frame_size = mtu + overhead_len;
1262 	if (frame_size < RTE_ETHER_MIN_LEN) {
1263 		RTE_ETHDEV_LOG_LINE(ERR,
1264 			"Frame size (%u) < min frame size (%u) for port_id %u",
1265 			frame_size, RTE_ETHER_MIN_LEN, port_id);
1266 		return -EINVAL;
1267 	}
1268 
1269 	if (frame_size > dev_info->max_rx_pktlen) {
1270 		RTE_ETHDEV_LOG_LINE(ERR,
1271 			"Frame size (%u) > device max frame size (%u) for port_id %u",
1272 			frame_size, dev_info->max_rx_pktlen, port_id);
1273 		return -EINVAL;
1274 	}
1275 
1276 	return 0;
1277 }
1278 
1279 int
1280 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1281 		      const struct rte_eth_conf *dev_conf)
1282 {
1283 	enum rte_eth_hash_function algorithm;
1284 	struct rte_eth_dev *dev;
1285 	struct rte_eth_dev_info dev_info;
1286 	struct rte_eth_conf orig_conf;
1287 	int diag;
1288 	int ret;
1289 	uint16_t old_mtu;
1290 
1291 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1292 	dev = &rte_eth_devices[port_id];
1293 
1294 	if (dev_conf == NULL) {
1295 		RTE_ETHDEV_LOG_LINE(ERR,
1296 			"Cannot configure ethdev port %u from NULL config",
1297 			port_id);
1298 		return -EINVAL;
1299 	}
1300 
1301 	if (*dev->dev_ops->dev_configure == NULL)
1302 		return -ENOTSUP;
1303 
1304 	if (dev->data->dev_started) {
1305 		RTE_ETHDEV_LOG_LINE(ERR,
1306 			"Port %u must be stopped to allow configuration",
1307 			port_id);
1308 		return -EBUSY;
1309 	}
1310 
1311 	/*
1312 	 * Ensure that "dev_configured" is always 0 each time prepare to do
1313 	 * dev_configure() to avoid any non-anticipated behaviour.
1314 	 * And set to 1 when dev_configure() is executed successfully.
1315 	 */
1316 	dev->data->dev_configured = 0;
1317 
1318 	 /* Store original config, as rollback required on failure */
1319 	memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1320 
1321 	/*
1322 	 * Copy the dev_conf parameter into the dev structure.
1323 	 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1324 	 */
1325 	if (dev_conf != &dev->data->dev_conf)
1326 		memcpy(&dev->data->dev_conf, dev_conf,
1327 		       sizeof(dev->data->dev_conf));
1328 
1329 	/* Backup mtu for rollback */
1330 	old_mtu = dev->data->mtu;
1331 
1332 	/* fields must be zero to reserve them for future ABI changes */
1333 	if (dev_conf->rxmode.reserved_64s[0] != 0 ||
1334 	    dev_conf->rxmode.reserved_64s[1] != 0 ||
1335 	    dev_conf->rxmode.reserved_ptrs[0] != NULL ||
1336 	    dev_conf->rxmode.reserved_ptrs[1] != NULL) {
1337 		RTE_ETHDEV_LOG_LINE(ERR, "Rxmode reserved fields not zero");
1338 		ret = -EINVAL;
1339 		goto rollback;
1340 	}
1341 
1342 	if (dev_conf->txmode.reserved_64s[0] != 0 ||
1343 	    dev_conf->txmode.reserved_64s[1] != 0 ||
1344 	    dev_conf->txmode.reserved_ptrs[0] != NULL ||
1345 	    dev_conf->txmode.reserved_ptrs[1] != NULL) {
1346 		RTE_ETHDEV_LOG_LINE(ERR, "txmode reserved fields not zero");
1347 		ret = -EINVAL;
1348 		goto rollback;
1349 	}
1350 
1351 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1352 	if (ret != 0)
1353 		goto rollback;
1354 
1355 	/* If number of queues specified by application for both Rx and Tx is
1356 	 * zero, use driver preferred values. This cannot be done individually
1357 	 * as it is valid for either Tx or Rx (but not both) to be zero.
1358 	 * If driver does not provide any preferred valued, fall back on
1359 	 * EAL defaults.
1360 	 */
1361 	if (nb_rx_q == 0 && nb_tx_q == 0) {
1362 		nb_rx_q = dev_info.default_rxportconf.nb_queues;
1363 		if (nb_rx_q == 0)
1364 			nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1365 		nb_tx_q = dev_info.default_txportconf.nb_queues;
1366 		if (nb_tx_q == 0)
1367 			nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1368 	}
1369 
1370 	if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1371 		RTE_ETHDEV_LOG_LINE(ERR,
1372 			"Number of Rx queues requested (%u) is greater than max supported(%d)",
1373 			nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1374 		ret = -EINVAL;
1375 		goto rollback;
1376 	}
1377 
1378 	if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1379 		RTE_ETHDEV_LOG_LINE(ERR,
1380 			"Number of Tx queues requested (%u) is greater than max supported(%d)",
1381 			nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1382 		ret = -EINVAL;
1383 		goto rollback;
1384 	}
1385 
1386 	/*
1387 	 * Check that the numbers of Rx and Tx queues are not greater
1388 	 * than the maximum number of Rx and Tx queues supported by the
1389 	 * configured device.
1390 	 */
1391 	if (nb_rx_q > dev_info.max_rx_queues) {
1392 		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u",
1393 			port_id, nb_rx_q, dev_info.max_rx_queues);
1394 		ret = -EINVAL;
1395 		goto rollback;
1396 	}
1397 
1398 	if (nb_tx_q > dev_info.max_tx_queues) {
1399 		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u",
1400 			port_id, nb_tx_q, dev_info.max_tx_queues);
1401 		ret = -EINVAL;
1402 		goto rollback;
1403 	}
1404 
1405 	/* Check that the device supports requested interrupts */
1406 	if ((dev_conf->intr_conf.lsc == 1) &&
1407 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1408 		RTE_ETHDEV_LOG_LINE(ERR, "Driver %s does not support lsc",
1409 			dev->device->driver->name);
1410 		ret = -EINVAL;
1411 		goto rollback;
1412 	}
1413 	if ((dev_conf->intr_conf.rmv == 1) &&
1414 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1415 		RTE_ETHDEV_LOG_LINE(ERR, "Driver %s does not support rmv",
1416 			dev->device->driver->name);
1417 		ret = -EINVAL;
1418 		goto rollback;
1419 	}
1420 
1421 	if (dev_conf->rxmode.mtu == 0)
1422 		dev->data->dev_conf.rxmode.mtu =
1423 			(dev_info.max_mtu == 0) ? RTE_ETHER_MTU :
1424 			RTE_MIN(dev_info.max_mtu, RTE_ETHER_MTU);
1425 
1426 	ret = eth_dev_validate_mtu(port_id, &dev_info,
1427 			dev->data->dev_conf.rxmode.mtu);
1428 	if (ret != 0)
1429 		goto rollback;
1430 
1431 	dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
1432 
1433 	/*
1434 	 * If LRO is enabled, check that the maximum aggregated packet
1435 	 * size is supported by the configured device.
1436 	 */
1437 	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1438 		uint32_t max_rx_pktlen;
1439 		uint32_t overhead_len;
1440 
1441 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1442 				dev_info.max_mtu);
1443 		max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
1444 		if (dev_conf->rxmode.max_lro_pkt_size == 0)
1445 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1446 		ret = eth_dev_check_lro_pkt_size(port_id,
1447 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
1448 				max_rx_pktlen,
1449 				dev_info.max_lro_pkt_size);
1450 		if (ret != 0)
1451 			goto rollback;
1452 	}
1453 
1454 	/* Any requested offloading must be within its device capabilities */
1455 	if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1456 	     dev_conf->rxmode.offloads) {
1457 		char buffer[512];
1458 
1459 		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u does not support Rx offloads %s",
1460 			port_id, eth_dev_offload_names(
1461 			dev_conf->rxmode.offloads & ~dev_info.rx_offload_capa,
1462 			buffer, sizeof(buffer), rte_eth_dev_rx_offload_name));
1463 		RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u was requested Rx offloads %s",
1464 			port_id, eth_dev_offload_names(dev_conf->rxmode.offloads,
1465 			buffer, sizeof(buffer), rte_eth_dev_rx_offload_name));
1466 		RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u supports Rx offloads %s",
1467 			port_id, eth_dev_offload_names(dev_info.rx_offload_capa,
1468 			buffer, sizeof(buffer), rte_eth_dev_rx_offload_name));
1469 
1470 		ret = -EINVAL;
1471 		goto rollback;
1472 	}
1473 	if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1474 	     dev_conf->txmode.offloads) {
1475 		char buffer[512];
1476 
1477 		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u does not support Tx offloads %s",
1478 			port_id, eth_dev_offload_names(
1479 			dev_conf->txmode.offloads & ~dev_info.tx_offload_capa,
1480 			buffer, sizeof(buffer), rte_eth_dev_tx_offload_name));
1481 		RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u was requested Tx offloads %s",
1482 			port_id, eth_dev_offload_names(dev_conf->txmode.offloads,
1483 			buffer, sizeof(buffer), rte_eth_dev_tx_offload_name));
1484 		RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u supports Tx offloads %s",
1485 			port_id, eth_dev_offload_names(dev_info.tx_offload_capa,
1486 			buffer, sizeof(buffer), rte_eth_dev_tx_offload_name));
1487 		ret = -EINVAL;
1488 		goto rollback;
1489 	}
1490 
1491 	dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1492 		rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1493 
1494 	/* Check that device supports requested rss hash functions. */
1495 	if ((dev_info.flow_type_rss_offloads |
1496 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1497 	    dev_info.flow_type_rss_offloads) {
1498 		RTE_ETHDEV_LOG_LINE(ERR,
1499 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64,
1500 			port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1501 			dev_info.flow_type_rss_offloads);
1502 		ret = -EINVAL;
1503 		goto rollback;
1504 	}
1505 
1506 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1507 	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
1508 	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
1509 		RTE_ETHDEV_LOG_LINE(ERR,
1510 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested",
1511 			port_id,
1512 			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
1513 		ret = -EINVAL;
1514 		goto rollback;
1515 	}
1516 
1517 	if (dev_conf->rx_adv_conf.rss_conf.rss_key != NULL &&
1518 	    dev_conf->rx_adv_conf.rss_conf.rss_key_len != dev_info.hash_key_size) {
1519 		RTE_ETHDEV_LOG_LINE(ERR,
1520 			"Ethdev port_id=%u invalid RSS key len: %u, valid value: %u",
1521 			port_id, dev_conf->rx_adv_conf.rss_conf.rss_key_len,
1522 			dev_info.hash_key_size);
1523 		ret = -EINVAL;
1524 		goto rollback;
1525 	}
1526 
1527 	algorithm = dev_conf->rx_adv_conf.rss_conf.algorithm;
1528 	if ((size_t)algorithm >= CHAR_BIT * sizeof(dev_info.rss_algo_capa) ||
1529 	    (dev_info.rss_algo_capa & RTE_ETH_HASH_ALGO_TO_CAPA(algorithm)) == 0) {
1530 		RTE_ETHDEV_LOG_LINE(ERR,
1531 			"Ethdev port_id=%u configured RSS hash algorithm (%u)"
1532 			"is not in the algorithm capability (0x%" PRIx32 ")",
1533 			port_id, algorithm, dev_info.rss_algo_capa);
1534 		ret = -EINVAL;
1535 		goto rollback;
1536 	}
1537 
1538 	/*
1539 	 * Setup new number of Rx/Tx queues and reconfigure device.
1540 	 */
1541 	diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1542 	if (diag != 0) {
1543 		RTE_ETHDEV_LOG_LINE(ERR,
1544 			"Port%u eth_dev_rx_queue_config = %d",
1545 			port_id, diag);
1546 		ret = diag;
1547 		goto rollback;
1548 	}
1549 
1550 	diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1551 	if (diag != 0) {
1552 		RTE_ETHDEV_LOG_LINE(ERR,
1553 			"Port%u eth_dev_tx_queue_config = %d",
1554 			port_id, diag);
1555 		eth_dev_rx_queue_config(dev, 0);
1556 		ret = diag;
1557 		goto rollback;
1558 	}
1559 
1560 	diag = (*dev->dev_ops->dev_configure)(dev);
1561 	if (diag != 0) {
1562 		RTE_ETHDEV_LOG_LINE(ERR, "Port%u dev_configure = %d",
1563 			port_id, diag);
1564 		ret = eth_err(port_id, diag);
1565 		goto reset_queues;
1566 	}
1567 
1568 	/* Initialize Rx profiling if enabled at compilation time. */
1569 	diag = __rte_eth_dev_profile_init(port_id, dev);
1570 	if (diag != 0) {
1571 		RTE_ETHDEV_LOG_LINE(ERR, "Port%u __rte_eth_dev_profile_init = %d",
1572 			port_id, diag);
1573 		ret = eth_err(port_id, diag);
1574 		goto reset_queues;
1575 	}
1576 
1577 	/* Validate Rx offloads. */
1578 	diag = eth_dev_validate_offloads(port_id,
1579 			dev_conf->rxmode.offloads,
1580 			dev->data->dev_conf.rxmode.offloads, "Rx",
1581 			rte_eth_dev_rx_offload_name);
1582 	if (diag != 0) {
1583 		ret = diag;
1584 		goto reset_queues;
1585 	}
1586 
1587 	/* Validate Tx offloads. */
1588 	diag = eth_dev_validate_offloads(port_id,
1589 			dev_conf->txmode.offloads,
1590 			dev->data->dev_conf.txmode.offloads, "Tx",
1591 			rte_eth_dev_tx_offload_name);
1592 	if (diag != 0) {
1593 		ret = diag;
1594 		goto reset_queues;
1595 	}
1596 
1597 	dev->data->dev_configured = 1;
1598 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1599 	return 0;
1600 reset_queues:
1601 	eth_dev_rx_queue_config(dev, 0);
1602 	eth_dev_tx_queue_config(dev, 0);
1603 rollback:
1604 	memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1605 	if (old_mtu != dev->data->mtu)
1606 		dev->data->mtu = old_mtu;
1607 
1608 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1609 	return ret;
1610 }
1611 
1612 static void
1613 eth_dev_mac_restore(struct rte_eth_dev *dev,
1614 			struct rte_eth_dev_info *dev_info)
1615 {
1616 	struct rte_ether_addr *addr;
1617 	uint16_t i;
1618 	uint32_t pool = 0;
1619 	uint64_t pool_mask;
1620 
1621 	/* replay MAC address configuration including default MAC */
1622 	addr = &dev->data->mac_addrs[0];
1623 	if (*dev->dev_ops->mac_addr_set != NULL)
1624 		(*dev->dev_ops->mac_addr_set)(dev, addr);
1625 	else if (*dev->dev_ops->mac_addr_add != NULL)
1626 		(*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1627 
1628 	if (*dev->dev_ops->mac_addr_add != NULL) {
1629 		for (i = 1; i < dev_info->max_mac_addrs; i++) {
1630 			addr = &dev->data->mac_addrs[i];
1631 
1632 			/* skip zero address */
1633 			if (rte_is_zero_ether_addr(addr))
1634 				continue;
1635 
1636 			pool = 0;
1637 			pool_mask = dev->data->mac_pool_sel[i];
1638 
1639 			do {
1640 				if (pool_mask & UINT64_C(1))
1641 					(*dev->dev_ops->mac_addr_add)(dev,
1642 						addr, i, pool);
1643 				pool_mask >>= 1;
1644 				pool++;
1645 			} while (pool_mask);
1646 		}
1647 	}
1648 }
1649 
1650 static int
1651 eth_dev_config_restore(struct rte_eth_dev *dev,
1652 		struct rte_eth_dev_info *dev_info, uint16_t port_id)
1653 {
1654 	int ret;
1655 
1656 	if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1657 		eth_dev_mac_restore(dev, dev_info);
1658 
1659 	/* replay promiscuous configuration */
1660 	/*
1661 	 * use callbacks directly since we don't need port_id check and
1662 	 * would like to bypass the same value set
1663 	 */
1664 	if (rte_eth_promiscuous_get(port_id) == 1 &&
1665 	    *dev->dev_ops->promiscuous_enable != NULL) {
1666 		ret = eth_err(port_id,
1667 			      (*dev->dev_ops->promiscuous_enable)(dev));
1668 		if (ret != 0 && ret != -ENOTSUP) {
1669 			RTE_ETHDEV_LOG_LINE(ERR,
1670 				"Failed to enable promiscuous mode for device (port %u): %s",
1671 				port_id, rte_strerror(-ret));
1672 			return ret;
1673 		}
1674 	} else if (rte_eth_promiscuous_get(port_id) == 0 &&
1675 		   *dev->dev_ops->promiscuous_disable != NULL) {
1676 		ret = eth_err(port_id,
1677 			      (*dev->dev_ops->promiscuous_disable)(dev));
1678 		if (ret != 0 && ret != -ENOTSUP) {
1679 			RTE_ETHDEV_LOG_LINE(ERR,
1680 				"Failed to disable promiscuous mode for device (port %u): %s",
1681 				port_id, rte_strerror(-ret));
1682 			return ret;
1683 		}
1684 	}
1685 
1686 	/* replay all multicast configuration */
1687 	/*
1688 	 * use callbacks directly since we don't need port_id check and
1689 	 * would like to bypass the same value set
1690 	 */
1691 	if (rte_eth_allmulticast_get(port_id) == 1 &&
1692 	    *dev->dev_ops->allmulticast_enable != NULL) {
1693 		ret = eth_err(port_id,
1694 			      (*dev->dev_ops->allmulticast_enable)(dev));
1695 		if (ret != 0 && ret != -ENOTSUP) {
1696 			RTE_ETHDEV_LOG_LINE(ERR,
1697 				"Failed to enable allmulticast mode for device (port %u): %s",
1698 				port_id, rte_strerror(-ret));
1699 			return ret;
1700 		}
1701 	} else if (rte_eth_allmulticast_get(port_id) == 0 &&
1702 		   *dev->dev_ops->allmulticast_disable != NULL) {
1703 		ret = eth_err(port_id,
1704 			      (*dev->dev_ops->allmulticast_disable)(dev));
1705 		if (ret != 0 && ret != -ENOTSUP) {
1706 			RTE_ETHDEV_LOG_LINE(ERR,
1707 				"Failed to disable allmulticast mode for device (port %u): %s",
1708 				port_id, rte_strerror(-ret));
1709 			return ret;
1710 		}
1711 	}
1712 
1713 	return 0;
1714 }
1715 
1716 int
1717 rte_eth_dev_start(uint16_t port_id)
1718 {
1719 	struct rte_eth_dev *dev;
1720 	struct rte_eth_dev_info dev_info;
1721 	int diag;
1722 	int ret, ret_stop;
1723 
1724 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1725 	dev = &rte_eth_devices[port_id];
1726 
1727 	if (*dev->dev_ops->dev_start == NULL)
1728 		return -ENOTSUP;
1729 
1730 	if (dev->data->dev_configured == 0) {
1731 		RTE_ETHDEV_LOG_LINE(INFO,
1732 			"Device with port_id=%"PRIu16" is not configured.",
1733 			port_id);
1734 		return -EINVAL;
1735 	}
1736 
1737 	if (dev->data->dev_started != 0) {
1738 		RTE_ETHDEV_LOG_LINE(INFO,
1739 			"Device with port_id=%"PRIu16" already started",
1740 			port_id);
1741 		return 0;
1742 	}
1743 
1744 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1745 	if (ret != 0)
1746 		return ret;
1747 
1748 	/* Lets restore MAC now if device does not support live change */
1749 	if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1750 		eth_dev_mac_restore(dev, &dev_info);
1751 
1752 	diag = (*dev->dev_ops->dev_start)(dev);
1753 	if (diag == 0)
1754 		dev->data->dev_started = 1;
1755 	else
1756 		return eth_err(port_id, diag);
1757 
1758 	ret = eth_dev_config_restore(dev, &dev_info, port_id);
1759 	if (ret != 0) {
1760 		RTE_ETHDEV_LOG_LINE(ERR,
1761 			"Error during restoring configuration for device (port %u): %s",
1762 			port_id, rte_strerror(-ret));
1763 		ret_stop = rte_eth_dev_stop(port_id);
1764 		if (ret_stop != 0) {
1765 			RTE_ETHDEV_LOG_LINE(ERR,
1766 				"Failed to stop device (port %u): %s",
1767 				port_id, rte_strerror(-ret_stop));
1768 		}
1769 
1770 		return ret;
1771 	}
1772 
1773 	if (dev->data->dev_conf.intr_conf.lsc == 0) {
1774 		if (*dev->dev_ops->link_update == NULL)
1775 			return -ENOTSUP;
1776 		(*dev->dev_ops->link_update)(dev, 0);
1777 	}
1778 
1779 	/* expose selection of PMD fast-path functions */
1780 	eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev);
1781 
1782 	rte_ethdev_trace_start(port_id);
1783 	return 0;
1784 }
1785 
1786 int
1787 rte_eth_dev_stop(uint16_t port_id)
1788 {
1789 	struct rte_eth_dev *dev;
1790 	int ret;
1791 
1792 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1793 	dev = &rte_eth_devices[port_id];
1794 
1795 	if (*dev->dev_ops->dev_stop == NULL)
1796 		return -ENOTSUP;
1797 
1798 	if (dev->data->dev_started == 0) {
1799 		RTE_ETHDEV_LOG_LINE(INFO,
1800 			"Device with port_id=%"PRIu16" already stopped",
1801 			port_id);
1802 		return 0;
1803 	}
1804 
1805 	/* point fast-path functions to dummy ones */
1806 	eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id);
1807 
1808 	ret = (*dev->dev_ops->dev_stop)(dev);
1809 	if (ret == 0)
1810 		dev->data->dev_started = 0;
1811 	rte_ethdev_trace_stop(port_id, ret);
1812 
1813 	return ret;
1814 }
1815 
1816 int
1817 rte_eth_dev_set_link_up(uint16_t port_id)
1818 {
1819 	struct rte_eth_dev *dev;
1820 	int ret;
1821 
1822 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1823 	dev = &rte_eth_devices[port_id];
1824 
1825 	if (*dev->dev_ops->dev_set_link_up == NULL)
1826 		return -ENOTSUP;
1827 	ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1828 
1829 	rte_ethdev_trace_set_link_up(port_id, ret);
1830 
1831 	return ret;
1832 }
1833 
1834 int
1835 rte_eth_dev_set_link_down(uint16_t port_id)
1836 {
1837 	struct rte_eth_dev *dev;
1838 	int ret;
1839 
1840 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1841 	dev = &rte_eth_devices[port_id];
1842 
1843 	if (*dev->dev_ops->dev_set_link_down == NULL)
1844 		return -ENOTSUP;
1845 	ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1846 
1847 	rte_ethdev_trace_set_link_down(port_id, ret);
1848 
1849 	return ret;
1850 }
1851 
1852 int
1853 rte_eth_speed_lanes_get(uint16_t port_id, uint32_t *lane)
1854 {
1855 	struct rte_eth_dev *dev;
1856 
1857 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1858 	dev = &rte_eth_devices[port_id];
1859 
1860 	if (*dev->dev_ops->speed_lanes_get == NULL)
1861 		return -ENOTSUP;
1862 	return eth_err(port_id, (*dev->dev_ops->speed_lanes_get)(dev, lane));
1863 }
1864 
1865 int
1866 rte_eth_speed_lanes_get_capability(uint16_t port_id,
1867 				   struct rte_eth_speed_lanes_capa *speed_lanes_capa,
1868 				   unsigned int num)
1869 {
1870 	struct rte_eth_dev *dev;
1871 	int ret;
1872 
1873 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1874 	dev = &rte_eth_devices[port_id];
1875 
1876 	if (*dev->dev_ops->speed_lanes_get_capa == NULL)
1877 		return -ENOTSUP;
1878 
1879 	if (speed_lanes_capa == NULL && num > 0) {
1880 		RTE_ETHDEV_LOG_LINE(ERR,
1881 				    "Cannot get ethdev port %u speed lanes capability to NULL when array size is non zero",
1882 				    port_id);
1883 		return -EINVAL;
1884 	}
1885 
1886 	ret = (*dev->dev_ops->speed_lanes_get_capa)(dev, speed_lanes_capa, num);
1887 
1888 	return ret;
1889 }
1890 
1891 int
1892 rte_eth_speed_lanes_set(uint16_t port_id, uint32_t speed_lanes_capa)
1893 {
1894 	struct rte_eth_dev *dev;
1895 
1896 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1897 	dev = &rte_eth_devices[port_id];
1898 
1899 	if (*dev->dev_ops->speed_lanes_set == NULL)
1900 		return -ENOTSUP;
1901 	return eth_err(port_id, (*dev->dev_ops->speed_lanes_set)(dev, speed_lanes_capa));
1902 }
1903 
1904 int
1905 rte_eth_dev_close(uint16_t port_id)
1906 {
1907 	struct rte_eth_dev *dev;
1908 	int firsterr, binerr;
1909 	int *lasterr = &firsterr;
1910 
1911 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1912 	dev = &rte_eth_devices[port_id];
1913 
1914 	/*
1915 	 * Secondary process needs to close device to release process private
1916 	 * resources. But secondary process should not be obliged to wait
1917 	 * for device stop before closing ethdev.
1918 	 */
1919 	if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
1920 			dev->data->dev_started) {
1921 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot close started device (port %u)",
1922 			       port_id);
1923 		return -EINVAL;
1924 	}
1925 
1926 	if (*dev->dev_ops->dev_close == NULL)
1927 		return -ENOTSUP;
1928 	*lasterr = (*dev->dev_ops->dev_close)(dev);
1929 	if (*lasterr != 0)
1930 		lasterr = &binerr;
1931 
1932 	rte_ethdev_trace_close(port_id);
1933 	*lasterr = rte_eth_dev_release_port(dev);
1934 
1935 	return firsterr;
1936 }
1937 
1938 int
1939 rte_eth_dev_reset(uint16_t port_id)
1940 {
1941 	struct rte_eth_dev *dev;
1942 	int ret;
1943 
1944 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1945 	dev = &rte_eth_devices[port_id];
1946 
1947 	if (*dev->dev_ops->dev_reset == NULL)
1948 		return -ENOTSUP;
1949 
1950 	ret = rte_eth_dev_stop(port_id);
1951 	if (ret != 0) {
1952 		RTE_ETHDEV_LOG_LINE(ERR,
1953 			"Failed to stop device (port %u) before reset: %s - ignore",
1954 			port_id, rte_strerror(-ret));
1955 	}
1956 	ret = eth_err(port_id, dev->dev_ops->dev_reset(dev));
1957 
1958 	rte_ethdev_trace_reset(port_id, ret);
1959 
1960 	return ret;
1961 }
1962 
1963 int
1964 rte_eth_dev_is_removed(uint16_t port_id)
1965 {
1966 	struct rte_eth_dev *dev;
1967 	int ret;
1968 
1969 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1970 	dev = &rte_eth_devices[port_id];
1971 
1972 	if (dev->state == RTE_ETH_DEV_REMOVED)
1973 		return 1;
1974 
1975 	if (*dev->dev_ops->is_removed == NULL)
1976 		return 0;
1977 
1978 	ret = dev->dev_ops->is_removed(dev);
1979 	if (ret != 0)
1980 		/* Device is physically removed. */
1981 		dev->state = RTE_ETH_DEV_REMOVED;
1982 
1983 	rte_ethdev_trace_is_removed(port_id, ret);
1984 
1985 	return ret;
1986 }
1987 
1988 static int
1989 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset,
1990 			 uint16_t min_length)
1991 {
1992 	uint16_t data_room_size;
1993 
1994 	/*
1995 	 * Check the size of the mbuf data buffer, this value
1996 	 * must be provided in the private data of the memory pool.
1997 	 * First check that the memory pool(s) has a valid private data.
1998 	 */
1999 	if (mp->private_data_size <
2000 			sizeof(struct rte_pktmbuf_pool_private)) {
2001 		RTE_ETHDEV_LOG_LINE(ERR, "%s private_data_size %u < %u",
2002 			mp->name, mp->private_data_size,
2003 			(unsigned int)
2004 			sizeof(struct rte_pktmbuf_pool_private));
2005 		return -ENOSPC;
2006 	}
2007 	data_room_size = rte_pktmbuf_data_room_size(mp);
2008 	if (data_room_size < offset + min_length) {
2009 		RTE_ETHDEV_LOG_LINE(ERR,
2010 			       "%s mbuf_data_room_size %u < %u (%u + %u)",
2011 			       mp->name, data_room_size,
2012 			       offset + min_length, offset, min_length);
2013 		return -EINVAL;
2014 	}
2015 	return 0;
2016 }
2017 
2018 static int
2019 eth_dev_buffer_split_get_supported_hdrs_helper(uint16_t port_id, uint32_t **ptypes)
2020 {
2021 	int cnt;
2022 
2023 	cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, NULL, 0);
2024 	if (cnt <= 0)
2025 		return cnt;
2026 
2027 	*ptypes = malloc(sizeof(uint32_t) * cnt);
2028 	if (*ptypes == NULL)
2029 		return -ENOMEM;
2030 
2031 	cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, *ptypes, cnt);
2032 	if (cnt <= 0) {
2033 		free(*ptypes);
2034 		*ptypes = NULL;
2035 	}
2036 	return cnt;
2037 }
2038 
2039 static int
2040 rte_eth_rx_queue_check_split(uint16_t port_id,
2041 			const struct rte_eth_rxseg_split *rx_seg,
2042 			uint16_t n_seg, uint32_t *mbp_buf_size,
2043 			const struct rte_eth_dev_info *dev_info)
2044 {
2045 	const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
2046 	struct rte_mempool *mp_first;
2047 	uint32_t offset_mask;
2048 	uint16_t seg_idx;
2049 	int ret = 0;
2050 	int ptype_cnt;
2051 	uint32_t *ptypes;
2052 	uint32_t prev_proto_hdrs = RTE_PTYPE_UNKNOWN;
2053 	int i;
2054 
2055 	if (n_seg > seg_capa->max_nseg) {
2056 		RTE_ETHDEV_LOG_LINE(ERR,
2057 			       "Requested Rx segments %u exceed supported %u",
2058 			       n_seg, seg_capa->max_nseg);
2059 		return -EINVAL;
2060 	}
2061 	/*
2062 	 * Check the sizes and offsets against buffer sizes
2063 	 * for each segment specified in extended configuration.
2064 	 */
2065 	mp_first = rx_seg[0].mp;
2066 	offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1;
2067 
2068 	ptypes = NULL;
2069 	ptype_cnt = eth_dev_buffer_split_get_supported_hdrs_helper(port_id, &ptypes);
2070 
2071 	for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
2072 		struct rte_mempool *mpl = rx_seg[seg_idx].mp;
2073 		uint32_t length = rx_seg[seg_idx].length;
2074 		uint32_t offset = rx_seg[seg_idx].offset;
2075 		uint32_t proto_hdr = rx_seg[seg_idx].proto_hdr;
2076 
2077 		if (mpl == NULL) {
2078 			RTE_ETHDEV_LOG_LINE(ERR, "null mempool pointer");
2079 			ret = -EINVAL;
2080 			goto out;
2081 		}
2082 		if (seg_idx != 0 && mp_first != mpl &&
2083 		    seg_capa->multi_pools == 0) {
2084 			RTE_ETHDEV_LOG_LINE(ERR, "Receiving to multiple pools is not supported");
2085 			ret = -ENOTSUP;
2086 			goto out;
2087 		}
2088 		if (offset != 0) {
2089 			if (seg_capa->offset_allowed == 0) {
2090 				RTE_ETHDEV_LOG_LINE(ERR, "Rx segmentation with offset is not supported");
2091 				ret = -ENOTSUP;
2092 				goto out;
2093 			}
2094 			if (offset & offset_mask) {
2095 				RTE_ETHDEV_LOG_LINE(ERR, "Rx segmentation invalid offset alignment %u, %u",
2096 					       offset,
2097 					       seg_capa->offset_align_log2);
2098 				ret = -EINVAL;
2099 				goto out;
2100 			}
2101 		}
2102 
2103 		offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
2104 		*mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
2105 		if (proto_hdr != 0) {
2106 			/* Split based on protocol headers. */
2107 			if (length != 0) {
2108 				RTE_ETHDEV_LOG_LINE(ERR,
2109 					"Do not set length split and protocol split within a segment"
2110 					);
2111 				ret = -EINVAL;
2112 				goto out;
2113 			}
2114 			if ((proto_hdr & prev_proto_hdrs) != 0) {
2115 				RTE_ETHDEV_LOG_LINE(ERR,
2116 					"Repeat with previous protocol headers or proto-split after length-based split"
2117 					);
2118 				ret = -EINVAL;
2119 				goto out;
2120 			}
2121 			if (ptype_cnt <= 0) {
2122 				RTE_ETHDEV_LOG_LINE(ERR,
2123 					"Port %u failed to get supported buffer split header protocols",
2124 					port_id);
2125 				ret = -ENOTSUP;
2126 				goto out;
2127 			}
2128 			for (i = 0; i < ptype_cnt; i++) {
2129 				if ((prev_proto_hdrs | proto_hdr) == ptypes[i])
2130 					break;
2131 			}
2132 			if (i == ptype_cnt) {
2133 				RTE_ETHDEV_LOG_LINE(ERR,
2134 					"Requested Rx split header protocols 0x%x is not supported.",
2135 					proto_hdr);
2136 				ret = -EINVAL;
2137 				goto out;
2138 			}
2139 			prev_proto_hdrs |= proto_hdr;
2140 		} else {
2141 			/* Split at fixed length. */
2142 			length = length != 0 ? length : *mbp_buf_size;
2143 			prev_proto_hdrs = RTE_PTYPE_ALL_MASK;
2144 		}
2145 
2146 		ret = rte_eth_check_rx_mempool(mpl, offset, length);
2147 		if (ret != 0)
2148 			goto out;
2149 	}
2150 out:
2151 	free(ptypes);
2152 	return ret;
2153 }
2154 
2155 static int
2156 rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools,
2157 			       uint16_t n_mempools, uint32_t *min_buf_size,
2158 			       const struct rte_eth_dev_info *dev_info)
2159 {
2160 	uint16_t pool_idx;
2161 	int ret;
2162 
2163 	if (n_mempools > dev_info->max_rx_mempools) {
2164 		RTE_ETHDEV_LOG_LINE(ERR,
2165 			       "Too many Rx mempools %u vs maximum %u",
2166 			       n_mempools, dev_info->max_rx_mempools);
2167 		return -EINVAL;
2168 	}
2169 
2170 	for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) {
2171 		struct rte_mempool *mp = rx_mempools[pool_idx];
2172 
2173 		if (mp == NULL) {
2174 			RTE_ETHDEV_LOG_LINE(ERR, "null Rx mempool pointer");
2175 			return -EINVAL;
2176 		}
2177 
2178 		ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM,
2179 					       dev_info->min_rx_bufsize);
2180 		if (ret != 0)
2181 			return ret;
2182 
2183 		*min_buf_size = RTE_MIN(*min_buf_size,
2184 					rte_pktmbuf_data_room_size(mp));
2185 	}
2186 
2187 	return 0;
2188 }
2189 
2190 int
2191 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2192 		       uint16_t nb_rx_desc, unsigned int socket_id,
2193 		       const struct rte_eth_rxconf *rx_conf,
2194 		       struct rte_mempool *mp)
2195 {
2196 	int ret;
2197 	uint64_t rx_offloads;
2198 	uint32_t mbp_buf_size = UINT32_MAX;
2199 	struct rte_eth_dev *dev;
2200 	struct rte_eth_dev_info dev_info;
2201 	struct rte_eth_rxconf local_conf;
2202 	uint32_t buf_data_size;
2203 
2204 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2205 	dev = &rte_eth_devices[port_id];
2206 
2207 	if (rx_queue_id >= dev->data->nb_rx_queues) {
2208 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", rx_queue_id);
2209 		return -EINVAL;
2210 	}
2211 
2212 	if (*dev->dev_ops->rx_queue_setup == NULL)
2213 		return -ENOTSUP;
2214 
2215 	if (rx_conf != NULL &&
2216 	   (rx_conf->reserved_64s[0] != 0 ||
2217 	    rx_conf->reserved_64s[1] != 0 ||
2218 	    rx_conf->reserved_ptrs[0] != NULL ||
2219 	    rx_conf->reserved_ptrs[1] != NULL)) {
2220 		RTE_ETHDEV_LOG_LINE(ERR, "Rx conf reserved fields not zero");
2221 		return -EINVAL;
2222 	}
2223 
2224 	ret = rte_eth_dev_info_get(port_id, &dev_info);
2225 	if (ret != 0)
2226 		return ret;
2227 
2228 	rx_offloads = dev->data->dev_conf.rxmode.offloads;
2229 	if (rx_conf != NULL)
2230 		rx_offloads |= rx_conf->offloads;
2231 
2232 	/* Ensure that we have one and only one source of Rx buffers */
2233 	if ((mp != NULL) +
2234 	    (rx_conf != NULL && rx_conf->rx_nseg > 0) +
2235 	    (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) {
2236 		RTE_ETHDEV_LOG_LINE(ERR,
2237 			       "Ambiguous Rx mempools configuration");
2238 		return -EINVAL;
2239 	}
2240 
2241 	if (mp != NULL) {
2242 		/* Single pool configuration check. */
2243 		ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM,
2244 					       dev_info.min_rx_bufsize);
2245 		if (ret != 0)
2246 			return ret;
2247 
2248 		mbp_buf_size = rte_pktmbuf_data_room_size(mp);
2249 		buf_data_size = mbp_buf_size - RTE_PKTMBUF_HEADROOM;
2250 		if (buf_data_size > dev_info.max_rx_bufsize)
2251 			RTE_ETHDEV_LOG_LINE(DEBUG,
2252 				"For port_id=%u, the mbuf data buffer size (%u) is bigger than "
2253 				"max buffer size (%u) device can utilize, so mbuf size can be reduced.",
2254 				port_id, buf_data_size, dev_info.max_rx_bufsize);
2255 	} else if (rx_conf != NULL && rx_conf->rx_nseg > 0) {
2256 		const struct rte_eth_rxseg_split *rx_seg;
2257 		uint16_t n_seg;
2258 
2259 		/* Extended multi-segment configuration check. */
2260 		if (rx_conf->rx_seg == NULL) {
2261 			RTE_ETHDEV_LOG_LINE(ERR,
2262 				       "Memory pool is null and no multi-segment configuration provided");
2263 			return -EINVAL;
2264 		}
2265 
2266 		rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
2267 		n_seg = rx_conf->rx_nseg;
2268 
2269 		if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
2270 			ret = rte_eth_rx_queue_check_split(port_id, rx_seg, n_seg,
2271 							   &mbp_buf_size,
2272 							   &dev_info);
2273 			if (ret != 0)
2274 				return ret;
2275 		} else {
2276 			RTE_ETHDEV_LOG_LINE(ERR, "No Rx segmentation offload configured");
2277 			return -EINVAL;
2278 		}
2279 	} else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) {
2280 		/* Extended multi-pool configuration check. */
2281 		if (rx_conf->rx_mempools == NULL) {
2282 			RTE_ETHDEV_LOG_LINE(ERR, "Memory pools array is null");
2283 			return -EINVAL;
2284 		}
2285 
2286 		ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools,
2287 						     rx_conf->rx_nmempool,
2288 						     &mbp_buf_size,
2289 						     &dev_info);
2290 		if (ret != 0)
2291 			return ret;
2292 	} else {
2293 		RTE_ETHDEV_LOG_LINE(ERR, "Missing Rx mempool configuration");
2294 		return -EINVAL;
2295 	}
2296 
2297 	/* Use default specified by driver, if nb_rx_desc is zero */
2298 	if (nb_rx_desc == 0) {
2299 		nb_rx_desc = dev_info.default_rxportconf.ring_size;
2300 		/* If driver default is also zero, fall back on EAL default */
2301 		if (nb_rx_desc == 0)
2302 			nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2303 	}
2304 
2305 	if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2306 			nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2307 			nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2308 
2309 		RTE_ETHDEV_LOG_LINE(ERR,
2310 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu",
2311 			nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2312 			dev_info.rx_desc_lim.nb_min,
2313 			dev_info.rx_desc_lim.nb_align);
2314 		return -EINVAL;
2315 	}
2316 
2317 	if (dev->data->dev_started &&
2318 		!(dev_info.dev_capa &
2319 			RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2320 		return -EBUSY;
2321 
2322 	if (dev->data->dev_started &&
2323 		(dev->data->rx_queue_state[rx_queue_id] !=
2324 			RTE_ETH_QUEUE_STATE_STOPPED))
2325 		return -EBUSY;
2326 
2327 	eth_dev_rxq_release(dev, rx_queue_id);
2328 
2329 	if (rx_conf == NULL)
2330 		rx_conf = &dev_info.default_rxconf;
2331 
2332 	local_conf = *rx_conf;
2333 
2334 	/*
2335 	 * If an offloading has already been enabled in
2336 	 * rte_eth_dev_configure(), it has been enabled on all queues,
2337 	 * so there is no need to enable it in this queue again.
2338 	 * The local_conf.offloads input to underlying PMD only carries
2339 	 * those offloadings which are only enabled on this queue and
2340 	 * not enabled on all queues.
2341 	 */
2342 	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2343 
2344 	/*
2345 	 * New added offloadings for this queue are those not enabled in
2346 	 * rte_eth_dev_configure() and they must be per-queue type.
2347 	 * A pure per-port offloading can't be enabled on a queue while
2348 	 * disabled on another queue. A pure per-port offloading can't
2349 	 * be enabled for any queue as new added one if it hasn't been
2350 	 * enabled in rte_eth_dev_configure().
2351 	 */
2352 	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2353 	     local_conf.offloads) {
2354 		RTE_ETHDEV_LOG_LINE(ERR,
2355 			"Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2356 			"within per-queue offload capabilities 0x%"PRIx64" in %s()",
2357 			port_id, rx_queue_id, local_conf.offloads,
2358 			dev_info.rx_queue_offload_capa,
2359 			__func__);
2360 		return -EINVAL;
2361 	}
2362 
2363 	if (local_conf.share_group > 0 &&
2364 	    (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {
2365 		RTE_ETHDEV_LOG_LINE(ERR,
2366 			"Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share",
2367 			port_id, rx_queue_id, local_conf.share_group);
2368 		return -EINVAL;
2369 	}
2370 
2371 	/*
2372 	 * If LRO is enabled, check that the maximum aggregated packet
2373 	 * size is supported by the configured device.
2374 	 */
2375 	/* Get the real Ethernet overhead length */
2376 	if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
2377 		uint32_t overhead_len;
2378 		uint32_t max_rx_pktlen;
2379 		int ret;
2380 
2381 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
2382 				dev_info.max_mtu);
2383 		max_rx_pktlen = dev->data->mtu + overhead_len;
2384 		if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2385 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
2386 		ret = eth_dev_check_lro_pkt_size(port_id,
2387 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
2388 				max_rx_pktlen,
2389 				dev_info.max_lro_pkt_size);
2390 		if (ret != 0)
2391 			return ret;
2392 	}
2393 
2394 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2395 					      socket_id, &local_conf, mp);
2396 	if (!ret) {
2397 		if (!dev->data->min_rx_buf_size ||
2398 		    dev->data->min_rx_buf_size > mbp_buf_size)
2399 			dev->data->min_rx_buf_size = mbp_buf_size;
2400 	}
2401 
2402 	rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2403 		rx_conf, ret);
2404 	return eth_err(port_id, ret);
2405 }
2406 
2407 int
2408 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2409 			       uint16_t nb_rx_desc,
2410 			       const struct rte_eth_hairpin_conf *conf)
2411 {
2412 	int ret;
2413 	struct rte_eth_dev *dev;
2414 	struct rte_eth_hairpin_cap cap;
2415 	int i;
2416 	int count;
2417 
2418 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2419 	dev = &rte_eth_devices[port_id];
2420 
2421 	if (rx_queue_id >= dev->data->nb_rx_queues) {
2422 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", rx_queue_id);
2423 		return -EINVAL;
2424 	}
2425 
2426 	if (conf == NULL) {
2427 		RTE_ETHDEV_LOG_LINE(ERR,
2428 			"Cannot setup ethdev port %u Rx hairpin queue from NULL config",
2429 			port_id);
2430 		return -EINVAL;
2431 	}
2432 
2433 	if (conf->reserved != 0) {
2434 		RTE_ETHDEV_LOG_LINE(ERR,
2435 			       "Rx hairpin reserved field not zero");
2436 		return -EINVAL;
2437 	}
2438 
2439 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2440 	if (ret != 0)
2441 		return ret;
2442 	if (*dev->dev_ops->rx_hairpin_queue_setup == NULL)
2443 		return -ENOTSUP;
2444 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2445 	if (nb_rx_desc == 0)
2446 		nb_rx_desc = cap.max_nb_desc;
2447 	if (nb_rx_desc > cap.max_nb_desc) {
2448 		RTE_ETHDEV_LOG_LINE(ERR,
2449 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2450 			nb_rx_desc, cap.max_nb_desc);
2451 		return -EINVAL;
2452 	}
2453 	if (conf->peer_count > cap.max_rx_2_tx) {
2454 		RTE_ETHDEV_LOG_LINE(ERR,
2455 			"Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2456 			conf->peer_count, cap.max_rx_2_tx);
2457 		return -EINVAL;
2458 	}
2459 	if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) {
2460 		RTE_ETHDEV_LOG_LINE(ERR,
2461 			"Attempt to use locked device memory for Rx queue, which is not supported");
2462 		return -EINVAL;
2463 	}
2464 	if (conf->use_rte_memory && !cap.rx_cap.rte_memory) {
2465 		RTE_ETHDEV_LOG_LINE(ERR,
2466 			"Attempt to use DPDK memory for Rx queue, which is not supported");
2467 		return -EINVAL;
2468 	}
2469 	if (conf->use_locked_device_memory && conf->use_rte_memory) {
2470 		RTE_ETHDEV_LOG_LINE(ERR,
2471 			"Attempt to use mutually exclusive memory settings for Rx queue");
2472 		return -EINVAL;
2473 	}
2474 	if (conf->force_memory &&
2475 	    !conf->use_locked_device_memory &&
2476 	    !conf->use_rte_memory) {
2477 		RTE_ETHDEV_LOG_LINE(ERR,
2478 			"Attempt to force Rx queue memory settings, but none is set");
2479 		return -EINVAL;
2480 	}
2481 	if (conf->peer_count == 0) {
2482 		RTE_ETHDEV_LOG_LINE(ERR,
2483 			"Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2484 			conf->peer_count);
2485 		return -EINVAL;
2486 	}
2487 	for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2488 	     cap.max_nb_queues != UINT16_MAX; i++) {
2489 		if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2490 			count++;
2491 	}
2492 	if (count > cap.max_nb_queues) {
2493 		RTE_ETHDEV_LOG_LINE(ERR, "To many Rx hairpin queues max is %d",
2494 		cap.max_nb_queues);
2495 		return -EINVAL;
2496 	}
2497 	if (dev->data->dev_started)
2498 		return -EBUSY;
2499 	eth_dev_rxq_release(dev, rx_queue_id);
2500 	ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2501 						      nb_rx_desc, conf);
2502 	if (ret == 0)
2503 		dev->data->rx_queue_state[rx_queue_id] =
2504 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2505 	ret = eth_err(port_id, ret);
2506 
2507 	rte_eth_trace_rx_hairpin_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2508 					     conf, ret);
2509 
2510 	return ret;
2511 }
2512 
2513 int
2514 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2515 		       uint16_t nb_tx_desc, unsigned int socket_id,
2516 		       const struct rte_eth_txconf *tx_conf)
2517 {
2518 	struct rte_eth_dev *dev;
2519 	struct rte_eth_dev_info dev_info;
2520 	struct rte_eth_txconf local_conf;
2521 	int ret;
2522 
2523 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2524 	dev = &rte_eth_devices[port_id];
2525 
2526 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2527 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", tx_queue_id);
2528 		return -EINVAL;
2529 	}
2530 
2531 	if (*dev->dev_ops->tx_queue_setup == NULL)
2532 		return -ENOTSUP;
2533 
2534 	if (tx_conf != NULL &&
2535 	   (tx_conf->reserved_64s[0] != 0 ||
2536 	    tx_conf->reserved_64s[1] != 0 ||
2537 	    tx_conf->reserved_ptrs[0] != NULL ||
2538 	    tx_conf->reserved_ptrs[1] != NULL)) {
2539 		RTE_ETHDEV_LOG_LINE(ERR, "Tx conf reserved fields not zero");
2540 		return -EINVAL;
2541 	}
2542 
2543 	ret = rte_eth_dev_info_get(port_id, &dev_info);
2544 	if (ret != 0)
2545 		return ret;
2546 
2547 	/* Use default specified by driver, if nb_tx_desc is zero */
2548 	if (nb_tx_desc == 0) {
2549 		nb_tx_desc = dev_info.default_txportconf.ring_size;
2550 		/* If driver default is zero, fall back on EAL default */
2551 		if (nb_tx_desc == 0)
2552 			nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2553 	}
2554 	if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2555 	    nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2556 	    nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2557 		RTE_ETHDEV_LOG_LINE(ERR,
2558 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu",
2559 			nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2560 			dev_info.tx_desc_lim.nb_min,
2561 			dev_info.tx_desc_lim.nb_align);
2562 		return -EINVAL;
2563 	}
2564 
2565 	if (dev->data->dev_started &&
2566 		!(dev_info.dev_capa &
2567 			RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2568 		return -EBUSY;
2569 
2570 	if (dev->data->dev_started &&
2571 		(dev->data->tx_queue_state[tx_queue_id] !=
2572 			RTE_ETH_QUEUE_STATE_STOPPED))
2573 		return -EBUSY;
2574 
2575 	eth_dev_txq_release(dev, tx_queue_id);
2576 
2577 	if (tx_conf == NULL)
2578 		tx_conf = &dev_info.default_txconf;
2579 
2580 	local_conf = *tx_conf;
2581 
2582 	/*
2583 	 * If an offloading has already been enabled in
2584 	 * rte_eth_dev_configure(), it has been enabled on all queues,
2585 	 * so there is no need to enable it in this queue again.
2586 	 * The local_conf.offloads input to underlying PMD only carries
2587 	 * those offloadings which are only enabled on this queue and
2588 	 * not enabled on all queues.
2589 	 */
2590 	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2591 
2592 	/*
2593 	 * New added offloadings for this queue are those not enabled in
2594 	 * rte_eth_dev_configure() and they must be per-queue type.
2595 	 * A pure per-port offloading can't be enabled on a queue while
2596 	 * disabled on another queue. A pure per-port offloading can't
2597 	 * be enabled for any queue as new added one if it hasn't been
2598 	 * enabled in rte_eth_dev_configure().
2599 	 */
2600 	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2601 	     local_conf.offloads) {
2602 		RTE_ETHDEV_LOG_LINE(ERR,
2603 			"Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2604 			"within per-queue offload capabilities 0x%"PRIx64" in %s()",
2605 			port_id, tx_queue_id, local_conf.offloads,
2606 			dev_info.tx_queue_offload_capa,
2607 			__func__);
2608 		return -EINVAL;
2609 	}
2610 
2611 	rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2612 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2613 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2614 }
2615 
2616 int
2617 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2618 			       uint16_t nb_tx_desc,
2619 			       const struct rte_eth_hairpin_conf *conf)
2620 {
2621 	struct rte_eth_dev *dev;
2622 	struct rte_eth_hairpin_cap cap;
2623 	int i;
2624 	int count;
2625 	int ret;
2626 
2627 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2628 	dev = &rte_eth_devices[port_id];
2629 
2630 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2631 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", tx_queue_id);
2632 		return -EINVAL;
2633 	}
2634 
2635 	if (conf == NULL) {
2636 		RTE_ETHDEV_LOG_LINE(ERR,
2637 			"Cannot setup ethdev port %u Tx hairpin queue from NULL config",
2638 			port_id);
2639 		return -EINVAL;
2640 	}
2641 
2642 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2643 	if (ret != 0)
2644 		return ret;
2645 	if (*dev->dev_ops->tx_hairpin_queue_setup == NULL)
2646 		return -ENOTSUP;
2647 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2648 	if (nb_tx_desc == 0)
2649 		nb_tx_desc = cap.max_nb_desc;
2650 	if (nb_tx_desc > cap.max_nb_desc) {
2651 		RTE_ETHDEV_LOG_LINE(ERR,
2652 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2653 			nb_tx_desc, cap.max_nb_desc);
2654 		return -EINVAL;
2655 	}
2656 	if (conf->peer_count > cap.max_tx_2_rx) {
2657 		RTE_ETHDEV_LOG_LINE(ERR,
2658 			"Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2659 			conf->peer_count, cap.max_tx_2_rx);
2660 		return -EINVAL;
2661 	}
2662 	if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) {
2663 		RTE_ETHDEV_LOG_LINE(ERR,
2664 			"Attempt to use locked device memory for Tx queue, which is not supported");
2665 		return -EINVAL;
2666 	}
2667 	if (conf->use_rte_memory && !cap.tx_cap.rte_memory) {
2668 		RTE_ETHDEV_LOG_LINE(ERR,
2669 			"Attempt to use DPDK memory for Tx queue, which is not supported");
2670 		return -EINVAL;
2671 	}
2672 	if (conf->use_locked_device_memory && conf->use_rte_memory) {
2673 		RTE_ETHDEV_LOG_LINE(ERR,
2674 			"Attempt to use mutually exclusive memory settings for Tx queue");
2675 		return -EINVAL;
2676 	}
2677 	if (conf->force_memory &&
2678 	    !conf->use_locked_device_memory &&
2679 	    !conf->use_rte_memory) {
2680 		RTE_ETHDEV_LOG_LINE(ERR,
2681 			"Attempt to force Tx queue memory settings, but none is set");
2682 		return -EINVAL;
2683 	}
2684 	if (conf->peer_count == 0) {
2685 		RTE_ETHDEV_LOG_LINE(ERR,
2686 			"Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2687 			conf->peer_count);
2688 		return -EINVAL;
2689 	}
2690 	for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2691 	     cap.max_nb_queues != UINT16_MAX; i++) {
2692 		if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2693 			count++;
2694 	}
2695 	if (count > cap.max_nb_queues) {
2696 		RTE_ETHDEV_LOG_LINE(ERR, "To many Tx hairpin queues max is %d",
2697 		cap.max_nb_queues);
2698 		return -EINVAL;
2699 	}
2700 	if (dev->data->dev_started)
2701 		return -EBUSY;
2702 	eth_dev_txq_release(dev, tx_queue_id);
2703 	ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2704 		(dev, tx_queue_id, nb_tx_desc, conf);
2705 	if (ret == 0)
2706 		dev->data->tx_queue_state[tx_queue_id] =
2707 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2708 	ret = eth_err(port_id, ret);
2709 
2710 	rte_eth_trace_tx_hairpin_queue_setup(port_id, tx_queue_id, nb_tx_desc,
2711 					     conf, ret);
2712 
2713 	return ret;
2714 }
2715 
2716 int
2717 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2718 {
2719 	struct rte_eth_dev *dev;
2720 	int ret;
2721 
2722 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2723 	dev = &rte_eth_devices[tx_port];
2724 
2725 	if (dev->data->dev_started == 0) {
2726 		RTE_ETHDEV_LOG_LINE(ERR, "Tx port %d is not started", tx_port);
2727 		return -EBUSY;
2728 	}
2729 
2730 	if (*dev->dev_ops->hairpin_bind == NULL)
2731 		return -ENOTSUP;
2732 	ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2733 	if (ret != 0)
2734 		RTE_ETHDEV_LOG_LINE(ERR, "Failed to bind hairpin Tx %d"
2735 			       " to Rx %d (%d - all ports)",
2736 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2737 
2738 	rte_eth_trace_hairpin_bind(tx_port, rx_port, ret);
2739 
2740 	return ret;
2741 }
2742 
2743 int
2744 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2745 {
2746 	struct rte_eth_dev *dev;
2747 	int ret;
2748 
2749 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2750 	dev = &rte_eth_devices[tx_port];
2751 
2752 	if (dev->data->dev_started == 0) {
2753 		RTE_ETHDEV_LOG_LINE(ERR, "Tx port %d is already stopped", tx_port);
2754 		return -EBUSY;
2755 	}
2756 
2757 	if (*dev->dev_ops->hairpin_unbind == NULL)
2758 		return -ENOTSUP;
2759 	ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2760 	if (ret != 0)
2761 		RTE_ETHDEV_LOG_LINE(ERR, "Failed to unbind hairpin Tx %d"
2762 			       " from Rx %d (%d - all ports)",
2763 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2764 
2765 	rte_eth_trace_hairpin_unbind(tx_port, rx_port, ret);
2766 
2767 	return ret;
2768 }
2769 
2770 int
2771 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2772 			       size_t len, uint32_t direction)
2773 {
2774 	struct rte_eth_dev *dev;
2775 	int ret;
2776 
2777 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2778 	dev = &rte_eth_devices[port_id];
2779 
2780 	if (peer_ports == NULL) {
2781 		RTE_ETHDEV_LOG_LINE(ERR,
2782 			"Cannot get ethdev port %u hairpin peer ports to NULL",
2783 			port_id);
2784 		return -EINVAL;
2785 	}
2786 
2787 	if (len == 0) {
2788 		RTE_ETHDEV_LOG_LINE(ERR,
2789 			"Cannot get ethdev port %u hairpin peer ports to array with zero size",
2790 			port_id);
2791 		return -EINVAL;
2792 	}
2793 
2794 	if (*dev->dev_ops->hairpin_get_peer_ports == NULL)
2795 		return -ENOTSUP;
2796 
2797 	ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2798 						      len, direction);
2799 	if (ret < 0)
2800 		RTE_ETHDEV_LOG_LINE(ERR, "Failed to get %d hairpin peer %s ports",
2801 			       port_id, direction ? "Rx" : "Tx");
2802 
2803 	rte_eth_trace_hairpin_get_peer_ports(port_id, peer_ports, len,
2804 					     direction, ret);
2805 
2806 	return ret;
2807 }
2808 
2809 void
2810 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2811 		void *userdata __rte_unused)
2812 {
2813 	rte_pktmbuf_free_bulk(pkts, unsent);
2814 
2815 	rte_eth_trace_tx_buffer_drop_callback((void **)pkts, unsent);
2816 }
2817 
2818 void
2819 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2820 		void *userdata)
2821 {
2822 	uint64_t *count = userdata;
2823 
2824 	rte_pktmbuf_free_bulk(pkts, unsent);
2825 	*count += unsent;
2826 
2827 	rte_eth_trace_tx_buffer_count_callback((void **)pkts, unsent, *count);
2828 }
2829 
2830 int
2831 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2832 		buffer_tx_error_fn cbfn, void *userdata)
2833 {
2834 	if (buffer == NULL) {
2835 		RTE_ETHDEV_LOG_LINE(ERR,
2836 			"Cannot set Tx buffer error callback to NULL buffer");
2837 		return -EINVAL;
2838 	}
2839 
2840 	buffer->error_callback = cbfn;
2841 	buffer->error_userdata = userdata;
2842 
2843 	rte_eth_trace_tx_buffer_set_err_callback(buffer);
2844 
2845 	return 0;
2846 }
2847 
2848 int
2849 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2850 {
2851 	int ret = 0;
2852 
2853 	if (buffer == NULL) {
2854 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot initialize NULL buffer");
2855 		return -EINVAL;
2856 	}
2857 
2858 	buffer->size = size;
2859 	if (buffer->error_callback == NULL) {
2860 		ret = rte_eth_tx_buffer_set_err_callback(
2861 			buffer, rte_eth_tx_buffer_drop_callback, NULL);
2862 	}
2863 
2864 	rte_eth_trace_tx_buffer_init(buffer, size, ret);
2865 
2866 	return ret;
2867 }
2868 
2869 int
2870 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2871 {
2872 	struct rte_eth_dev *dev;
2873 	int ret;
2874 
2875 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2876 	dev = &rte_eth_devices[port_id];
2877 
2878 	if (*dev->dev_ops->tx_done_cleanup == NULL)
2879 		return -ENOTSUP;
2880 
2881 	/* Call driver to free pending mbufs. */
2882 	ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2883 					       free_cnt);
2884 	ret = eth_err(port_id, ret);
2885 
2886 	rte_eth_trace_tx_done_cleanup(port_id, queue_id, free_cnt, ret);
2887 
2888 	return ret;
2889 }
2890 
2891 int
2892 rte_eth_promiscuous_enable(uint16_t port_id)
2893 {
2894 	struct rte_eth_dev *dev;
2895 	int diag = 0;
2896 
2897 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2898 	dev = &rte_eth_devices[port_id];
2899 
2900 	if (dev->data->promiscuous == 1)
2901 		return 0;
2902 
2903 	if (*dev->dev_ops->promiscuous_enable == NULL)
2904 		return -ENOTSUP;
2905 
2906 	diag = (*dev->dev_ops->promiscuous_enable)(dev);
2907 	dev->data->promiscuous = (diag == 0) ? 1 : 0;
2908 
2909 	diag = eth_err(port_id, diag);
2910 
2911 	rte_eth_trace_promiscuous_enable(port_id, dev->data->promiscuous,
2912 					 diag);
2913 
2914 	return diag;
2915 }
2916 
2917 int
2918 rte_eth_promiscuous_disable(uint16_t port_id)
2919 {
2920 	struct rte_eth_dev *dev;
2921 	int diag = 0;
2922 
2923 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2924 	dev = &rte_eth_devices[port_id];
2925 
2926 	if (dev->data->promiscuous == 0)
2927 		return 0;
2928 
2929 	if (*dev->dev_ops->promiscuous_disable == NULL)
2930 		return -ENOTSUP;
2931 
2932 	dev->data->promiscuous = 0;
2933 	diag = (*dev->dev_ops->promiscuous_disable)(dev);
2934 	if (diag != 0)
2935 		dev->data->promiscuous = 1;
2936 
2937 	diag = eth_err(port_id, diag);
2938 
2939 	rte_eth_trace_promiscuous_disable(port_id, dev->data->promiscuous,
2940 					  diag);
2941 
2942 	return diag;
2943 }
2944 
2945 int
2946 rte_eth_promiscuous_get(uint16_t port_id)
2947 {
2948 	struct rte_eth_dev *dev;
2949 
2950 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2951 	dev = &rte_eth_devices[port_id];
2952 
2953 	rte_eth_trace_promiscuous_get(port_id, dev->data->promiscuous);
2954 
2955 	return dev->data->promiscuous;
2956 }
2957 
2958 int
2959 rte_eth_allmulticast_enable(uint16_t port_id)
2960 {
2961 	struct rte_eth_dev *dev;
2962 	int diag;
2963 
2964 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2965 	dev = &rte_eth_devices[port_id];
2966 
2967 	if (dev->data->all_multicast == 1)
2968 		return 0;
2969 
2970 	if (*dev->dev_ops->allmulticast_enable == NULL)
2971 		return -ENOTSUP;
2972 	diag = (*dev->dev_ops->allmulticast_enable)(dev);
2973 	dev->data->all_multicast = (diag == 0) ? 1 : 0;
2974 
2975 	diag = eth_err(port_id, diag);
2976 
2977 	rte_eth_trace_allmulticast_enable(port_id, dev->data->all_multicast,
2978 					  diag);
2979 
2980 	return diag;
2981 }
2982 
2983 int
2984 rte_eth_allmulticast_disable(uint16_t port_id)
2985 {
2986 	struct rte_eth_dev *dev;
2987 	int diag;
2988 
2989 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2990 	dev = &rte_eth_devices[port_id];
2991 
2992 	if (dev->data->all_multicast == 0)
2993 		return 0;
2994 
2995 	if (*dev->dev_ops->allmulticast_disable == NULL)
2996 		return -ENOTSUP;
2997 	dev->data->all_multicast = 0;
2998 	diag = (*dev->dev_ops->allmulticast_disable)(dev);
2999 	if (diag != 0)
3000 		dev->data->all_multicast = 1;
3001 
3002 	diag = eth_err(port_id, diag);
3003 
3004 	rte_eth_trace_allmulticast_disable(port_id, dev->data->all_multicast,
3005 					   diag);
3006 
3007 	return diag;
3008 }
3009 
3010 int
3011 rte_eth_allmulticast_get(uint16_t port_id)
3012 {
3013 	struct rte_eth_dev *dev;
3014 
3015 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3016 	dev = &rte_eth_devices[port_id];
3017 
3018 	rte_eth_trace_allmulticast_get(port_id, dev->data->all_multicast);
3019 
3020 	return dev->data->all_multicast;
3021 }
3022 
3023 int
3024 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
3025 {
3026 	struct rte_eth_dev *dev;
3027 
3028 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3029 	dev = &rte_eth_devices[port_id];
3030 
3031 	if (eth_link == NULL) {
3032 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u link to NULL",
3033 			port_id);
3034 		return -EINVAL;
3035 	}
3036 
3037 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
3038 		rte_eth_linkstatus_get(dev, eth_link);
3039 	else {
3040 		if (*dev->dev_ops->link_update == NULL)
3041 			return -ENOTSUP;
3042 		(*dev->dev_ops->link_update)(dev, 1);
3043 		*eth_link = dev->data->dev_link;
3044 	}
3045 
3046 	rte_eth_trace_link_get(port_id, eth_link);
3047 
3048 	return 0;
3049 }
3050 
3051 int
3052 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
3053 {
3054 	struct rte_eth_dev *dev;
3055 
3056 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3057 	dev = &rte_eth_devices[port_id];
3058 
3059 	if (eth_link == NULL) {
3060 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u link to NULL",
3061 			port_id);
3062 		return -EINVAL;
3063 	}
3064 
3065 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
3066 		rte_eth_linkstatus_get(dev, eth_link);
3067 	else {
3068 		if (*dev->dev_ops->link_update == NULL)
3069 			return -ENOTSUP;
3070 		(*dev->dev_ops->link_update)(dev, 0);
3071 		*eth_link = dev->data->dev_link;
3072 	}
3073 
3074 	rte_eth_trace_link_get_nowait(port_id, eth_link);
3075 
3076 	return 0;
3077 }
3078 
3079 const char *
3080 rte_eth_link_speed_to_str(uint32_t link_speed)
3081 {
3082 	const char *ret;
3083 
3084 	switch (link_speed) {
3085 	case RTE_ETH_SPEED_NUM_NONE:
3086 		ret = "None";
3087 		break;
3088 	case RTE_ETH_SPEED_NUM_10M:
3089 		ret = "10 Mbps";
3090 		break;
3091 	case RTE_ETH_SPEED_NUM_100M:
3092 		ret = "100 Mbps";
3093 		break;
3094 	case RTE_ETH_SPEED_NUM_1G:
3095 		ret = "1 Gbps";
3096 		break;
3097 	case RTE_ETH_SPEED_NUM_2_5G:
3098 		ret = "2.5 Gbps";
3099 		break;
3100 	case RTE_ETH_SPEED_NUM_5G:
3101 		ret = "5 Gbps";
3102 		break;
3103 	case RTE_ETH_SPEED_NUM_10G:
3104 		ret = "10 Gbps";
3105 		break;
3106 	case RTE_ETH_SPEED_NUM_20G:
3107 		ret = "20 Gbps";
3108 		break;
3109 	case RTE_ETH_SPEED_NUM_25G:
3110 		ret = "25 Gbps";
3111 		break;
3112 	case RTE_ETH_SPEED_NUM_40G:
3113 		ret = "40 Gbps";
3114 		break;
3115 	case RTE_ETH_SPEED_NUM_50G:
3116 		ret = "50 Gbps";
3117 		break;
3118 	case RTE_ETH_SPEED_NUM_56G:
3119 		ret = "56 Gbps";
3120 		break;
3121 	case RTE_ETH_SPEED_NUM_100G:
3122 		ret = "100 Gbps";
3123 		break;
3124 	case RTE_ETH_SPEED_NUM_200G:
3125 		ret = "200 Gbps";
3126 		break;
3127 	case RTE_ETH_SPEED_NUM_400G:
3128 		ret = "400 Gbps";
3129 		break;
3130 	case RTE_ETH_SPEED_NUM_UNKNOWN:
3131 		ret = "Unknown";
3132 		break;
3133 	default:
3134 		ret = "Invalid";
3135 	}
3136 
3137 	rte_eth_trace_link_speed_to_str(link_speed, ret);
3138 
3139 	return ret;
3140 }
3141 
3142 int
3143 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
3144 {
3145 	int ret;
3146 
3147 	if (str == NULL) {
3148 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot convert link to NULL string");
3149 		return -EINVAL;
3150 	}
3151 
3152 	if (len == 0) {
3153 		RTE_ETHDEV_LOG_LINE(ERR,
3154 			"Cannot convert link to string with zero size");
3155 		return -EINVAL;
3156 	}
3157 
3158 	if (eth_link == NULL) {
3159 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot convert to string from NULL link");
3160 		return -EINVAL;
3161 	}
3162 
3163 	if (eth_link->link_status == RTE_ETH_LINK_DOWN)
3164 		ret = snprintf(str, len, "Link down");
3165 	else
3166 		ret = snprintf(str, len, "Link up at %s %s %s",
3167 			rte_eth_link_speed_to_str(eth_link->link_speed),
3168 			(eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
3169 			"FDX" : "HDX",
3170 			(eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
3171 			"Autoneg" : "Fixed");
3172 
3173 	rte_eth_trace_link_to_str(len, eth_link, str, ret);
3174 
3175 	return ret;
3176 }
3177 
3178 int
3179 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
3180 {
3181 	struct rte_eth_dev *dev;
3182 	int ret;
3183 
3184 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3185 	dev = &rte_eth_devices[port_id];
3186 
3187 	if (stats == NULL) {
3188 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u stats to NULL",
3189 			port_id);
3190 		return -EINVAL;
3191 	}
3192 
3193 	memset(stats, 0, sizeof(*stats));
3194 
3195 	if (*dev->dev_ops->stats_get == NULL)
3196 		return -ENOTSUP;
3197 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
3198 	ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
3199 
3200 	rte_eth_trace_stats_get(port_id, stats, ret);
3201 
3202 	return ret;
3203 }
3204 
3205 int
3206 rte_eth_stats_reset(uint16_t port_id)
3207 {
3208 	struct rte_eth_dev *dev;
3209 	int ret;
3210 
3211 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3212 	dev = &rte_eth_devices[port_id];
3213 
3214 	if (*dev->dev_ops->stats_reset == NULL)
3215 		return -ENOTSUP;
3216 	ret = (*dev->dev_ops->stats_reset)(dev);
3217 	if (ret != 0)
3218 		return eth_err(port_id, ret);
3219 
3220 	dev->data->rx_mbuf_alloc_failed = 0;
3221 
3222 	rte_eth_trace_stats_reset(port_id);
3223 
3224 	return 0;
3225 }
3226 
3227 static inline int
3228 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
3229 {
3230 	uint16_t nb_rxqs, nb_txqs;
3231 	int count;
3232 
3233 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3234 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3235 
3236 	count = RTE_NB_STATS;
3237 	if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
3238 		count += nb_rxqs * RTE_NB_RXQ_STATS;
3239 		count += nb_txqs * RTE_NB_TXQ_STATS;
3240 	}
3241 
3242 	return count;
3243 }
3244 
3245 static int
3246 eth_dev_get_xstats_count(uint16_t port_id)
3247 {
3248 	struct rte_eth_dev *dev;
3249 	int count;
3250 
3251 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3252 	dev = &rte_eth_devices[port_id];
3253 	if (dev->dev_ops->xstats_get_names != NULL) {
3254 		count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
3255 		if (count < 0)
3256 			return eth_err(port_id, count);
3257 	} else
3258 		count = 0;
3259 
3260 
3261 	count += eth_dev_get_xstats_basic_count(dev);
3262 
3263 	return count;
3264 }
3265 
3266 int
3267 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3268 		uint64_t *id)
3269 {
3270 	int cnt_xstats, idx_xstat;
3271 
3272 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3273 
3274 	if (xstat_name == NULL) {
3275 		RTE_ETHDEV_LOG_LINE(ERR,
3276 			"Cannot get ethdev port %u xstats ID from NULL xstat name",
3277 			port_id);
3278 		return -ENOMEM;
3279 	}
3280 
3281 	if (id == NULL) {
3282 		RTE_ETHDEV_LOG_LINE(ERR,
3283 			"Cannot get ethdev port %u xstats ID to NULL",
3284 			port_id);
3285 		return -ENOMEM;
3286 	}
3287 
3288 	/* Get count */
3289 	cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
3290 	if (cnt_xstats  < 0) {
3291 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get count of xstats");
3292 		return -ENODEV;
3293 	}
3294 
3295 	/* Get id-name lookup table */
3296 	struct rte_eth_xstat_name xstats_names[cnt_xstats];
3297 
3298 	if (cnt_xstats != rte_eth_xstats_get_names_by_id(
3299 			port_id, xstats_names, cnt_xstats, NULL)) {
3300 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get xstats lookup");
3301 		return -1;
3302 	}
3303 
3304 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
3305 		if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
3306 			*id = idx_xstat;
3307 
3308 			rte_eth_trace_xstats_get_id_by_name(port_id,
3309 							    xstat_name, *id);
3310 
3311 			return 0;
3312 		};
3313 	}
3314 
3315 	return -EINVAL;
3316 }
3317 
3318 /* retrieve basic stats names */
3319 static int
3320 eth_basic_stats_get_names(struct rte_eth_dev *dev,
3321 	struct rte_eth_xstat_name *xstats_names)
3322 {
3323 	int cnt_used_entries = 0;
3324 	uint32_t idx, id_queue;
3325 	uint16_t num_q;
3326 
3327 	for (idx = 0; idx < RTE_NB_STATS; idx++) {
3328 		strlcpy(xstats_names[cnt_used_entries].name,
3329 			eth_dev_stats_strings[idx].name,
3330 			sizeof(xstats_names[0].name));
3331 		cnt_used_entries++;
3332 	}
3333 
3334 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3335 		return cnt_used_entries;
3336 
3337 	num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3338 	for (id_queue = 0; id_queue < num_q; id_queue++) {
3339 		for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
3340 			snprintf(xstats_names[cnt_used_entries].name,
3341 				sizeof(xstats_names[0].name),
3342 				"rx_q%u_%s",
3343 				id_queue, eth_dev_rxq_stats_strings[idx].name);
3344 			cnt_used_entries++;
3345 		}
3346 
3347 	}
3348 	num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3349 	for (id_queue = 0; id_queue < num_q; id_queue++) {
3350 		for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
3351 			snprintf(xstats_names[cnt_used_entries].name,
3352 				sizeof(xstats_names[0].name),
3353 				"tx_q%u_%s",
3354 				id_queue, eth_dev_txq_stats_strings[idx].name);
3355 			cnt_used_entries++;
3356 		}
3357 	}
3358 	return cnt_used_entries;
3359 }
3360 
3361 /* retrieve ethdev extended statistics names */
3362 int
3363 rte_eth_xstats_get_names_by_id(uint16_t port_id,
3364 	struct rte_eth_xstat_name *xstats_names, unsigned int size,
3365 	uint64_t *ids)
3366 {
3367 	struct rte_eth_xstat_name *xstats_names_copy;
3368 	unsigned int no_basic_stat_requested = 1;
3369 	unsigned int no_ext_stat_requested = 1;
3370 	unsigned int expected_entries;
3371 	unsigned int basic_count;
3372 	struct rte_eth_dev *dev;
3373 	unsigned int i;
3374 	int ret;
3375 
3376 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3377 	dev = &rte_eth_devices[port_id];
3378 
3379 	basic_count = eth_dev_get_xstats_basic_count(dev);
3380 	ret = eth_dev_get_xstats_count(port_id);
3381 	if (ret < 0)
3382 		return ret;
3383 	expected_entries = (unsigned int)ret;
3384 
3385 	/* Return max number of stats if no ids given */
3386 	if (!ids) {
3387 		if (!xstats_names)
3388 			return expected_entries;
3389 		else if (xstats_names && size < expected_entries)
3390 			return expected_entries;
3391 	}
3392 
3393 	if (ids && !xstats_names)
3394 		return -EINVAL;
3395 
3396 	if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
3397 		uint64_t ids_copy[size];
3398 
3399 		for (i = 0; i < size; i++) {
3400 			if (ids[i] < basic_count) {
3401 				no_basic_stat_requested = 0;
3402 				break;
3403 			}
3404 
3405 			/*
3406 			 * Convert ids to xstats ids that PMD knows.
3407 			 * ids known by user are basic + extended stats.
3408 			 */
3409 			ids_copy[i] = ids[i] - basic_count;
3410 		}
3411 
3412 		if (no_basic_stat_requested)
3413 			return (*dev->dev_ops->xstats_get_names_by_id)(dev,
3414 					ids_copy, xstats_names, size);
3415 	}
3416 
3417 	/* Retrieve all stats */
3418 	if (!ids) {
3419 		int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
3420 				expected_entries);
3421 		if (num_stats < 0 || num_stats > (int)expected_entries)
3422 			return num_stats;
3423 		else
3424 			return expected_entries;
3425 	}
3426 
3427 	xstats_names_copy = calloc(expected_entries,
3428 		sizeof(struct rte_eth_xstat_name));
3429 
3430 	if (!xstats_names_copy) {
3431 		RTE_ETHDEV_LOG_LINE(ERR, "Can't allocate memory");
3432 		return -ENOMEM;
3433 	}
3434 
3435 	if (ids) {
3436 		for (i = 0; i < size; i++) {
3437 			if (ids[i] >= basic_count) {
3438 				no_ext_stat_requested = 0;
3439 				break;
3440 			}
3441 		}
3442 	}
3443 
3444 	/* Fill xstats_names_copy structure */
3445 	if (ids && no_ext_stat_requested) {
3446 		eth_basic_stats_get_names(dev, xstats_names_copy);
3447 	} else {
3448 		ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
3449 			expected_entries);
3450 		if (ret < 0) {
3451 			free(xstats_names_copy);
3452 			return ret;
3453 		}
3454 	}
3455 
3456 	/* Filter stats */
3457 	for (i = 0; i < size; i++) {
3458 		if (ids[i] >= expected_entries) {
3459 			RTE_ETHDEV_LOG_LINE(ERR, "Id value isn't valid");
3460 			free(xstats_names_copy);
3461 			return -1;
3462 		}
3463 		xstats_names[i] = xstats_names_copy[ids[i]];
3464 
3465 		rte_eth_trace_xstats_get_names_by_id(port_id, &xstats_names[i],
3466 						     ids[i]);
3467 	}
3468 
3469 	free(xstats_names_copy);
3470 	return size;
3471 }
3472 
3473 int
3474 rte_eth_xstats_get_names(uint16_t port_id,
3475 	struct rte_eth_xstat_name *xstats_names,
3476 	unsigned int size)
3477 {
3478 	struct rte_eth_dev *dev;
3479 	int cnt_used_entries;
3480 	int cnt_expected_entries;
3481 	int cnt_driver_entries;
3482 	int i;
3483 
3484 	cnt_expected_entries = eth_dev_get_xstats_count(port_id);
3485 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
3486 			(int)size < cnt_expected_entries)
3487 		return cnt_expected_entries;
3488 
3489 	/* port_id checked in eth_dev_get_xstats_count() */
3490 	dev = &rte_eth_devices[port_id];
3491 
3492 	cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
3493 
3494 	if (dev->dev_ops->xstats_get_names != NULL) {
3495 		/* If there are any driver-specific xstats, append them
3496 		 * to end of list.
3497 		 */
3498 		cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
3499 			dev,
3500 			xstats_names + cnt_used_entries,
3501 			size - cnt_used_entries);
3502 		if (cnt_driver_entries < 0)
3503 			return eth_err(port_id, cnt_driver_entries);
3504 		cnt_used_entries += cnt_driver_entries;
3505 	}
3506 
3507 	for (i = 0; i < cnt_used_entries; i++)
3508 		rte_eth_trace_xstats_get_names(port_id, i, &xstats_names[i],
3509 					       size, cnt_used_entries);
3510 
3511 	return cnt_used_entries;
3512 }
3513 
3514 
3515 static int
3516 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
3517 {
3518 	struct rte_eth_dev *dev;
3519 	struct rte_eth_stats eth_stats;
3520 	unsigned int count = 0, i, q;
3521 	uint64_t val, *stats_ptr;
3522 	uint16_t nb_rxqs, nb_txqs;
3523 	int ret;
3524 
3525 	ret = rte_eth_stats_get(port_id, &eth_stats);
3526 	if (ret < 0)
3527 		return ret;
3528 
3529 	dev = &rte_eth_devices[port_id];
3530 
3531 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3532 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3533 
3534 	/* global stats */
3535 	for (i = 0; i < RTE_NB_STATS; i++) {
3536 		stats_ptr = RTE_PTR_ADD(&eth_stats,
3537 					eth_dev_stats_strings[i].offset);
3538 		val = *stats_ptr;
3539 		xstats[count++].value = val;
3540 	}
3541 
3542 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3543 		return count;
3544 
3545 	/* per-rxq stats */
3546 	for (q = 0; q < nb_rxqs; q++) {
3547 		for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3548 			stats_ptr = RTE_PTR_ADD(&eth_stats,
3549 					eth_dev_rxq_stats_strings[i].offset +
3550 					q * sizeof(uint64_t));
3551 			val = *stats_ptr;
3552 			xstats[count++].value = val;
3553 		}
3554 	}
3555 
3556 	/* per-txq stats */
3557 	for (q = 0; q < nb_txqs; q++) {
3558 		for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3559 			stats_ptr = RTE_PTR_ADD(&eth_stats,
3560 					eth_dev_txq_stats_strings[i].offset +
3561 					q * sizeof(uint64_t));
3562 			val = *stats_ptr;
3563 			xstats[count++].value = val;
3564 		}
3565 	}
3566 	return count;
3567 }
3568 
3569 /* retrieve ethdev extended statistics */
3570 int
3571 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3572 			 uint64_t *values, unsigned int size)
3573 {
3574 	unsigned int no_basic_stat_requested = 1;
3575 	unsigned int no_ext_stat_requested = 1;
3576 	unsigned int num_xstats_filled;
3577 	unsigned int basic_count;
3578 	uint16_t expected_entries;
3579 	struct rte_eth_dev *dev;
3580 	unsigned int i;
3581 	int ret;
3582 
3583 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3584 	dev = &rte_eth_devices[port_id];
3585 
3586 	ret = eth_dev_get_xstats_count(port_id);
3587 	if (ret < 0)
3588 		return ret;
3589 	expected_entries = (uint16_t)ret;
3590 	struct rte_eth_xstat xstats[expected_entries];
3591 	basic_count = eth_dev_get_xstats_basic_count(dev);
3592 
3593 	/* Return max number of stats if no ids given */
3594 	if (!ids) {
3595 		if (!values)
3596 			return expected_entries;
3597 		else if (values && size < expected_entries)
3598 			return expected_entries;
3599 	}
3600 
3601 	if (ids && !values)
3602 		return -EINVAL;
3603 
3604 	if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3605 		unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3606 		uint64_t ids_copy[size];
3607 
3608 		for (i = 0; i < size; i++) {
3609 			if (ids[i] < basic_count) {
3610 				no_basic_stat_requested = 0;
3611 				break;
3612 			}
3613 
3614 			/*
3615 			 * Convert ids to xstats ids that PMD knows.
3616 			 * ids known by user are basic + extended stats.
3617 			 */
3618 			ids_copy[i] = ids[i] - basic_count;
3619 		}
3620 
3621 		if (no_basic_stat_requested)
3622 			return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3623 					values, size);
3624 	}
3625 
3626 	if (ids) {
3627 		for (i = 0; i < size; i++) {
3628 			if (ids[i] >= basic_count) {
3629 				no_ext_stat_requested = 0;
3630 				break;
3631 			}
3632 		}
3633 	}
3634 
3635 	/* Fill the xstats structure */
3636 	if (ids && no_ext_stat_requested)
3637 		ret = eth_basic_stats_get(port_id, xstats);
3638 	else
3639 		ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3640 
3641 	if (ret < 0)
3642 		return ret;
3643 	num_xstats_filled = (unsigned int)ret;
3644 
3645 	/* Return all stats */
3646 	if (!ids) {
3647 		for (i = 0; i < num_xstats_filled; i++)
3648 			values[i] = xstats[i].value;
3649 		return expected_entries;
3650 	}
3651 
3652 	/* Filter stats */
3653 	for (i = 0; i < size; i++) {
3654 		if (ids[i] >= expected_entries) {
3655 			RTE_ETHDEV_LOG_LINE(ERR, "Id value isn't valid");
3656 			return -1;
3657 		}
3658 		values[i] = xstats[ids[i]].value;
3659 	}
3660 
3661 	rte_eth_trace_xstats_get_by_id(port_id, ids, values, size);
3662 
3663 	return size;
3664 }
3665 
3666 int
3667 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3668 	unsigned int n)
3669 {
3670 	struct rte_eth_dev *dev;
3671 	unsigned int count, i;
3672 	signed int xcount = 0;
3673 	int ret;
3674 
3675 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3676 	if (xstats == NULL && n > 0)
3677 		return -EINVAL;
3678 	dev = &rte_eth_devices[port_id];
3679 
3680 	count = eth_dev_get_xstats_basic_count(dev);
3681 
3682 	/* implemented by the driver */
3683 	if (dev->dev_ops->xstats_get != NULL) {
3684 		/* Retrieve the xstats from the driver at the end of the
3685 		 * xstats struct.
3686 		 */
3687 		xcount = (*dev->dev_ops->xstats_get)(dev,
3688 				     (n > count) ? xstats + count : NULL,
3689 				     (n > count) ? n - count : 0);
3690 
3691 		if (xcount < 0)
3692 			return eth_err(port_id, xcount);
3693 	}
3694 
3695 	if (n < count + xcount || xstats == NULL)
3696 		return count + xcount;
3697 
3698 	/* now fill the xstats structure */
3699 	ret = eth_basic_stats_get(port_id, xstats);
3700 	if (ret < 0)
3701 		return ret;
3702 	count = ret;
3703 
3704 	for (i = 0; i < count; i++)
3705 		xstats[i].id = i;
3706 	/* add an offset to driver-specific stats */
3707 	for ( ; i < count + xcount; i++)
3708 		xstats[i].id += count;
3709 
3710 	for (i = 0; i < n; i++)
3711 		rte_eth_trace_xstats_get(port_id, xstats[i]);
3712 
3713 	return count + xcount;
3714 }
3715 
3716 /* reset ethdev extended statistics */
3717 int
3718 rte_eth_xstats_reset(uint16_t port_id)
3719 {
3720 	struct rte_eth_dev *dev;
3721 
3722 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3723 	dev = &rte_eth_devices[port_id];
3724 
3725 	/* implemented by the driver */
3726 	if (dev->dev_ops->xstats_reset != NULL) {
3727 		int ret = eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3728 
3729 		rte_eth_trace_xstats_reset(port_id, ret);
3730 
3731 		return ret;
3732 	}
3733 
3734 	/* fallback to default */
3735 	return rte_eth_stats_reset(port_id);
3736 }
3737 
3738 static int
3739 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3740 		uint8_t stat_idx, uint8_t is_rx)
3741 {
3742 	struct rte_eth_dev *dev;
3743 
3744 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3745 	dev = &rte_eth_devices[port_id];
3746 
3747 	if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3748 		return -EINVAL;
3749 
3750 	if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3751 		return -EINVAL;
3752 
3753 	if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3754 		return -EINVAL;
3755 
3756 	if (*dev->dev_ops->queue_stats_mapping_set == NULL)
3757 		return -ENOTSUP;
3758 	return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3759 }
3760 
3761 int
3762 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3763 		uint8_t stat_idx)
3764 {
3765 	int ret;
3766 
3767 	ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3768 						tx_queue_id,
3769 						stat_idx, STAT_QMAP_TX));
3770 
3771 	rte_ethdev_trace_set_tx_queue_stats_mapping(port_id, tx_queue_id,
3772 						    stat_idx, ret);
3773 
3774 	return ret;
3775 }
3776 
3777 int
3778 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3779 		uint8_t stat_idx)
3780 {
3781 	int ret;
3782 
3783 	ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3784 						rx_queue_id,
3785 						stat_idx, STAT_QMAP_RX));
3786 
3787 	rte_ethdev_trace_set_rx_queue_stats_mapping(port_id, rx_queue_id,
3788 						    stat_idx, ret);
3789 
3790 	return ret;
3791 }
3792 
3793 int
3794 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3795 {
3796 	struct rte_eth_dev *dev;
3797 	int ret;
3798 
3799 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3800 	dev = &rte_eth_devices[port_id];
3801 
3802 	if (fw_version == NULL && fw_size > 0) {
3803 		RTE_ETHDEV_LOG_LINE(ERR,
3804 			"Cannot get ethdev port %u FW version to NULL when string size is non zero",
3805 			port_id);
3806 		return -EINVAL;
3807 	}
3808 
3809 	if (*dev->dev_ops->fw_version_get == NULL)
3810 		return -ENOTSUP;
3811 	ret = eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3812 							fw_version, fw_size));
3813 
3814 	rte_ethdev_trace_fw_version_get(port_id, fw_version, fw_size, ret);
3815 
3816 	return ret;
3817 }
3818 
3819 int
3820 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3821 {
3822 	struct rte_eth_dev *dev;
3823 	const struct rte_eth_desc_lim lim = {
3824 		.nb_max = UINT16_MAX,
3825 		.nb_min = 0,
3826 		.nb_align = 1,
3827 		.nb_seg_max = UINT16_MAX,
3828 		.nb_mtu_seg_max = UINT16_MAX,
3829 	};
3830 	int diag;
3831 
3832 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3833 	dev = &rte_eth_devices[port_id];
3834 
3835 	if (dev_info == NULL) {
3836 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u info to NULL",
3837 			port_id);
3838 		return -EINVAL;
3839 	}
3840 
3841 	/*
3842 	 * Init dev_info before port_id check since caller does not have
3843 	 * return status and does not know if get is successful or not.
3844 	 */
3845 	memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3846 	dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3847 
3848 	dev_info->rx_desc_lim = lim;
3849 	dev_info->tx_desc_lim = lim;
3850 	dev_info->device = dev->device;
3851 	dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
3852 		RTE_ETHER_CRC_LEN;
3853 	dev_info->max_mtu = UINT16_MAX;
3854 	dev_info->rss_algo_capa = RTE_ETH_HASH_ALGO_CAPA_MASK(DEFAULT);
3855 	dev_info->max_rx_bufsize = UINT32_MAX;
3856 
3857 	if (*dev->dev_ops->dev_infos_get == NULL)
3858 		return -ENOTSUP;
3859 	diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3860 	if (diag != 0) {
3861 		/* Cleanup already filled in device information */
3862 		memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3863 		return eth_err(port_id, diag);
3864 	}
3865 
3866 	/* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3867 	dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3868 			RTE_MAX_QUEUES_PER_PORT);
3869 	dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3870 			RTE_MAX_QUEUES_PER_PORT);
3871 
3872 	dev_info->driver_name = dev->device->driver->name;
3873 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3874 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3875 
3876 	dev_info->dev_flags = &dev->data->dev_flags;
3877 
3878 	rte_ethdev_trace_info_get(port_id, dev_info);
3879 
3880 	return 0;
3881 }
3882 
3883 int
3884 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3885 {
3886 	struct rte_eth_dev *dev;
3887 
3888 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3889 	dev = &rte_eth_devices[port_id];
3890 
3891 	if (dev_conf == NULL) {
3892 		RTE_ETHDEV_LOG_LINE(ERR,
3893 			"Cannot get ethdev port %u configuration to NULL",
3894 			port_id);
3895 		return -EINVAL;
3896 	}
3897 
3898 	memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf));
3899 
3900 	rte_ethdev_trace_conf_get(port_id, dev_conf);
3901 
3902 	return 0;
3903 }
3904 
3905 int
3906 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3907 				 uint32_t *ptypes, int num)
3908 {
3909 	size_t i;
3910 	int j;
3911 	struct rte_eth_dev *dev;
3912 	const uint32_t *all_ptypes;
3913 	size_t no_of_elements = 0;
3914 
3915 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3916 	dev = &rte_eth_devices[port_id];
3917 
3918 	if (ptypes == NULL && num > 0) {
3919 		RTE_ETHDEV_LOG_LINE(ERR,
3920 			"Cannot get ethdev port %u supported packet types to NULL when array size is non zero",
3921 			port_id);
3922 		return -EINVAL;
3923 	}
3924 
3925 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL)
3926 		return 0;
3927 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev,
3928 							       &no_of_elements);
3929 
3930 	if (!all_ptypes)
3931 		return 0;
3932 
3933 	for (i = 0, j = 0; i < no_of_elements; ++i)
3934 		if (all_ptypes[i] & ptype_mask) {
3935 			if (j < num) {
3936 				ptypes[j] = all_ptypes[i];
3937 
3938 				rte_ethdev_trace_get_supported_ptypes(port_id,
3939 						j, num, ptypes[j]);
3940 			}
3941 			j++;
3942 		}
3943 
3944 	return j;
3945 }
3946 
3947 int
3948 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3949 				 uint32_t *set_ptypes, unsigned int num)
3950 {
3951 	const uint32_t valid_ptype_masks[] = {
3952 		RTE_PTYPE_L2_MASK,
3953 		RTE_PTYPE_L3_MASK,
3954 		RTE_PTYPE_L4_MASK,
3955 		RTE_PTYPE_TUNNEL_MASK,
3956 		RTE_PTYPE_INNER_L2_MASK,
3957 		RTE_PTYPE_INNER_L3_MASK,
3958 		RTE_PTYPE_INNER_L4_MASK,
3959 	};
3960 	const uint32_t *all_ptypes;
3961 	struct rte_eth_dev *dev;
3962 	uint32_t unused_mask;
3963 	size_t i;
3964 	unsigned int j;
3965 	int ret;
3966 	size_t no_of_elements = 0;
3967 
3968 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3969 	dev = &rte_eth_devices[port_id];
3970 
3971 	if (num > 0 && set_ptypes == NULL) {
3972 		RTE_ETHDEV_LOG_LINE(ERR,
3973 			"Cannot get ethdev port %u set packet types to NULL when array size is non zero",
3974 			port_id);
3975 		return -EINVAL;
3976 	}
3977 
3978 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3979 			*dev->dev_ops->dev_ptypes_set == NULL) {
3980 		ret = 0;
3981 		goto ptype_unknown;
3982 	}
3983 
3984 	if (ptype_mask == 0) {
3985 		ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3986 				ptype_mask);
3987 		goto ptype_unknown;
3988 	}
3989 
3990 	unused_mask = ptype_mask;
3991 	for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3992 		uint32_t mask = ptype_mask & valid_ptype_masks[i];
3993 		if (mask && mask != valid_ptype_masks[i]) {
3994 			ret = -EINVAL;
3995 			goto ptype_unknown;
3996 		}
3997 		unused_mask &= ~valid_ptype_masks[i];
3998 	}
3999 
4000 	if (unused_mask) {
4001 		ret = -EINVAL;
4002 		goto ptype_unknown;
4003 	}
4004 
4005 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev,
4006 							       &no_of_elements);
4007 	if (all_ptypes == NULL) {
4008 		ret = 0;
4009 		goto ptype_unknown;
4010 	}
4011 
4012 	/*
4013 	 * Accommodate as many set_ptypes as possible. If the supplied
4014 	 * set_ptypes array is insufficient fill it partially.
4015 	 */
4016 	for (i = 0, j = 0; set_ptypes != NULL &&
4017 				(i < no_of_elements); ++i) {
4018 		if (ptype_mask & all_ptypes[i]) {
4019 			if (j < num - 1) {
4020 				set_ptypes[j] = all_ptypes[i];
4021 
4022 				rte_ethdev_trace_set_ptypes(port_id, j, num,
4023 						set_ptypes[j]);
4024 
4025 				j++;
4026 				continue;
4027 			}
4028 			break;
4029 		}
4030 	}
4031 
4032 	if (set_ptypes != NULL && j < num)
4033 		set_ptypes[j] = RTE_PTYPE_UNKNOWN;
4034 
4035 	return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
4036 
4037 ptype_unknown:
4038 	if (num > 0)
4039 		set_ptypes[0] = RTE_PTYPE_UNKNOWN;
4040 
4041 	return ret;
4042 }
4043 
4044 int
4045 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
4046 	unsigned int num)
4047 {
4048 	int32_t ret;
4049 	struct rte_eth_dev *dev;
4050 	struct rte_eth_dev_info dev_info;
4051 
4052 	if (ma == NULL) {
4053 		RTE_ETHDEV_LOG_LINE(ERR, "%s: invalid parameters", __func__);
4054 		return -EINVAL;
4055 	}
4056 
4057 	/* will check for us that port_id is a valid one */
4058 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4059 	if (ret != 0)
4060 		return ret;
4061 
4062 	dev = &rte_eth_devices[port_id];
4063 	num = RTE_MIN(dev_info.max_mac_addrs, num);
4064 	memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0]));
4065 
4066 	rte_eth_trace_macaddrs_get(port_id, num);
4067 
4068 	return num;
4069 }
4070 
4071 int
4072 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
4073 {
4074 	struct rte_eth_dev *dev;
4075 
4076 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4077 	dev = &rte_eth_devices[port_id];
4078 
4079 	if (mac_addr == NULL) {
4080 		RTE_ETHDEV_LOG_LINE(ERR,
4081 			"Cannot get ethdev port %u MAC address to NULL",
4082 			port_id);
4083 		return -EINVAL;
4084 	}
4085 
4086 	rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
4087 
4088 	rte_eth_trace_macaddr_get(port_id, mac_addr);
4089 
4090 	return 0;
4091 }
4092 
4093 int
4094 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
4095 {
4096 	struct rte_eth_dev *dev;
4097 
4098 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4099 	dev = &rte_eth_devices[port_id];
4100 
4101 	if (mtu == NULL) {
4102 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u MTU to NULL",
4103 			port_id);
4104 		return -EINVAL;
4105 	}
4106 
4107 	*mtu = dev->data->mtu;
4108 
4109 	rte_ethdev_trace_get_mtu(port_id, *mtu);
4110 
4111 	return 0;
4112 }
4113 
4114 int
4115 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
4116 {
4117 	int ret;
4118 	struct rte_eth_dev_info dev_info;
4119 	struct rte_eth_dev *dev;
4120 
4121 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4122 	dev = &rte_eth_devices[port_id];
4123 	if (*dev->dev_ops->mtu_set == NULL)
4124 		return -ENOTSUP;
4125 
4126 	/*
4127 	 * Check if the device supports dev_infos_get, if it does not
4128 	 * skip min_mtu/max_mtu validation here as this requires values
4129 	 * that are populated within the call to rte_eth_dev_info_get()
4130 	 * which relies on dev->dev_ops->dev_infos_get.
4131 	 */
4132 	if (*dev->dev_ops->dev_infos_get != NULL) {
4133 		ret = rte_eth_dev_info_get(port_id, &dev_info);
4134 		if (ret != 0)
4135 			return ret;
4136 
4137 		ret = eth_dev_validate_mtu(port_id, &dev_info, mtu);
4138 		if (ret != 0)
4139 			return ret;
4140 	}
4141 
4142 	if (dev->data->dev_configured == 0) {
4143 		RTE_ETHDEV_LOG_LINE(ERR,
4144 			"Port %u must be configured before MTU set",
4145 			port_id);
4146 		return -EINVAL;
4147 	}
4148 
4149 	ret = (*dev->dev_ops->mtu_set)(dev, mtu);
4150 	if (ret == 0)
4151 		dev->data->mtu = mtu;
4152 
4153 	ret = eth_err(port_id, ret);
4154 
4155 	rte_ethdev_trace_set_mtu(port_id, mtu, ret);
4156 
4157 	return ret;
4158 }
4159 
4160 int
4161 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
4162 {
4163 	struct rte_eth_dev *dev;
4164 	int ret;
4165 
4166 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4167 	dev = &rte_eth_devices[port_id];
4168 
4169 	if (!(dev->data->dev_conf.rxmode.offloads &
4170 	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
4171 		RTE_ETHDEV_LOG_LINE(ERR, "Port %u: VLAN-filtering disabled",
4172 			port_id);
4173 		return -ENOSYS;
4174 	}
4175 
4176 	if (vlan_id > 4095) {
4177 		RTE_ETHDEV_LOG_LINE(ERR, "Port_id=%u invalid vlan_id=%u > 4095",
4178 			port_id, vlan_id);
4179 		return -EINVAL;
4180 	}
4181 	if (*dev->dev_ops->vlan_filter_set == NULL)
4182 		return -ENOTSUP;
4183 
4184 	ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
4185 	if (ret == 0) {
4186 		struct rte_vlan_filter_conf *vfc;
4187 		int vidx;
4188 		int vbit;
4189 
4190 		vfc = &dev->data->vlan_filter_conf;
4191 		vidx = vlan_id / 64;
4192 		vbit = vlan_id % 64;
4193 
4194 		if (on)
4195 			vfc->ids[vidx] |= RTE_BIT64(vbit);
4196 		else
4197 			vfc->ids[vidx] &= ~RTE_BIT64(vbit);
4198 	}
4199 
4200 	ret = eth_err(port_id, ret);
4201 
4202 	rte_ethdev_trace_vlan_filter(port_id, vlan_id, on, ret);
4203 
4204 	return ret;
4205 }
4206 
4207 int
4208 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
4209 				    int on)
4210 {
4211 	struct rte_eth_dev *dev;
4212 
4213 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4214 	dev = &rte_eth_devices[port_id];
4215 
4216 	if (rx_queue_id >= dev->data->nb_rx_queues) {
4217 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_queue_id=%u", rx_queue_id);
4218 		return -EINVAL;
4219 	}
4220 
4221 	if (*dev->dev_ops->vlan_strip_queue_set == NULL)
4222 		return -ENOTSUP;
4223 	(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
4224 
4225 	rte_ethdev_trace_set_vlan_strip_on_queue(port_id, rx_queue_id, on);
4226 
4227 	return 0;
4228 }
4229 
4230 int
4231 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
4232 				enum rte_vlan_type vlan_type,
4233 				uint16_t tpid)
4234 {
4235 	struct rte_eth_dev *dev;
4236 	int ret;
4237 
4238 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4239 	dev = &rte_eth_devices[port_id];
4240 
4241 	if (*dev->dev_ops->vlan_tpid_set == NULL)
4242 		return -ENOTSUP;
4243 	ret = eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
4244 							      tpid));
4245 
4246 	rte_ethdev_trace_set_vlan_ether_type(port_id, vlan_type, tpid, ret);
4247 
4248 	return ret;
4249 }
4250 
4251 int
4252 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
4253 {
4254 	struct rte_eth_dev_info dev_info;
4255 	struct rte_eth_dev *dev;
4256 	int ret = 0;
4257 	int mask = 0;
4258 	int cur, org = 0;
4259 	uint64_t orig_offloads;
4260 	uint64_t dev_offloads;
4261 	uint64_t new_offloads;
4262 
4263 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4264 	dev = &rte_eth_devices[port_id];
4265 
4266 	/* save original values in case of failure */
4267 	orig_offloads = dev->data->dev_conf.rxmode.offloads;
4268 	dev_offloads = orig_offloads;
4269 
4270 	/* check which option changed by application */
4271 	cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
4272 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
4273 	if (cur != org) {
4274 		if (cur)
4275 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
4276 		else
4277 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
4278 		mask |= RTE_ETH_VLAN_STRIP_MASK;
4279 	}
4280 
4281 	cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
4282 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
4283 	if (cur != org) {
4284 		if (cur)
4285 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4286 		else
4287 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4288 		mask |= RTE_ETH_VLAN_FILTER_MASK;
4289 	}
4290 
4291 	cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
4292 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
4293 	if (cur != org) {
4294 		if (cur)
4295 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
4296 		else
4297 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
4298 		mask |= RTE_ETH_VLAN_EXTEND_MASK;
4299 	}
4300 
4301 	cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
4302 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
4303 	if (cur != org) {
4304 		if (cur)
4305 			dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
4306 		else
4307 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
4308 		mask |= RTE_ETH_QINQ_STRIP_MASK;
4309 	}
4310 
4311 	/*no change*/
4312 	if (mask == 0)
4313 		return ret;
4314 
4315 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4316 	if (ret != 0)
4317 		return ret;
4318 
4319 	/* Rx VLAN offloading must be within its device capabilities */
4320 	if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
4321 		new_offloads = dev_offloads & ~orig_offloads;
4322 		RTE_ETHDEV_LOG_LINE(ERR,
4323 			"Ethdev port_id=%u requested new added VLAN offloads "
4324 			"0x%" PRIx64 " must be within Rx offloads capabilities "
4325 			"0x%" PRIx64 " in %s()",
4326 			port_id, new_offloads, dev_info.rx_offload_capa,
4327 			__func__);
4328 		return -EINVAL;
4329 	}
4330 
4331 	if (*dev->dev_ops->vlan_offload_set == NULL)
4332 		return -ENOTSUP;
4333 	dev->data->dev_conf.rxmode.offloads = dev_offloads;
4334 	ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
4335 	if (ret) {
4336 		/* hit an error restore  original values */
4337 		dev->data->dev_conf.rxmode.offloads = orig_offloads;
4338 	}
4339 
4340 	ret = eth_err(port_id, ret);
4341 
4342 	rte_ethdev_trace_set_vlan_offload(port_id, offload_mask, ret);
4343 
4344 	return ret;
4345 }
4346 
4347 int
4348 rte_eth_dev_get_vlan_offload(uint16_t port_id)
4349 {
4350 	struct rte_eth_dev *dev;
4351 	uint64_t *dev_offloads;
4352 	int ret = 0;
4353 
4354 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4355 	dev = &rte_eth_devices[port_id];
4356 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
4357 
4358 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
4359 		ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
4360 
4361 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
4362 		ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
4363 
4364 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
4365 		ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
4366 
4367 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
4368 		ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
4369 
4370 	rte_ethdev_trace_get_vlan_offload(port_id, ret);
4371 
4372 	return ret;
4373 }
4374 
4375 int
4376 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
4377 {
4378 	struct rte_eth_dev *dev;
4379 	int ret;
4380 
4381 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4382 	dev = &rte_eth_devices[port_id];
4383 
4384 	if (*dev->dev_ops->vlan_pvid_set == NULL)
4385 		return -ENOTSUP;
4386 	ret = eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
4387 
4388 	rte_ethdev_trace_set_vlan_pvid(port_id, pvid, on, ret);
4389 
4390 	return ret;
4391 }
4392 
4393 int
4394 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
4395 {
4396 	struct rte_eth_dev *dev;
4397 	int ret;
4398 
4399 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4400 	dev = &rte_eth_devices[port_id];
4401 
4402 	if (fc_conf == NULL) {
4403 		RTE_ETHDEV_LOG_LINE(ERR,
4404 			"Cannot get ethdev port %u flow control config to NULL",
4405 			port_id);
4406 		return -EINVAL;
4407 	}
4408 
4409 	if (*dev->dev_ops->flow_ctrl_get == NULL)
4410 		return -ENOTSUP;
4411 	memset(fc_conf, 0, sizeof(*fc_conf));
4412 	ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
4413 
4414 	rte_ethdev_trace_flow_ctrl_get(port_id, fc_conf, ret);
4415 
4416 	return ret;
4417 }
4418 
4419 int
4420 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
4421 {
4422 	struct rte_eth_dev *dev;
4423 	int ret;
4424 
4425 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4426 	dev = &rte_eth_devices[port_id];
4427 
4428 	if (fc_conf == NULL) {
4429 		RTE_ETHDEV_LOG_LINE(ERR,
4430 			"Cannot set ethdev port %u flow control from NULL config",
4431 			port_id);
4432 		return -EINVAL;
4433 	}
4434 
4435 	if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
4436 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid send_xon, only 0/1 allowed");
4437 		return -EINVAL;
4438 	}
4439 
4440 	if (*dev->dev_ops->flow_ctrl_set == NULL)
4441 		return -ENOTSUP;
4442 	ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
4443 
4444 	rte_ethdev_trace_flow_ctrl_set(port_id, fc_conf, ret);
4445 
4446 	return ret;
4447 }
4448 
4449 int
4450 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
4451 				   struct rte_eth_pfc_conf *pfc_conf)
4452 {
4453 	struct rte_eth_dev *dev;
4454 	int ret;
4455 
4456 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4457 	dev = &rte_eth_devices[port_id];
4458 
4459 	if (pfc_conf == NULL) {
4460 		RTE_ETHDEV_LOG_LINE(ERR,
4461 			"Cannot set ethdev port %u priority flow control from NULL config",
4462 			port_id);
4463 		return -EINVAL;
4464 	}
4465 
4466 	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
4467 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid priority, only 0-7 allowed");
4468 		return -EINVAL;
4469 	}
4470 
4471 	/* High water, low water validation are device specific */
4472 	if  (*dev->dev_ops->priority_flow_ctrl_set == NULL)
4473 		return -ENOTSUP;
4474 	ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
4475 			       (dev, pfc_conf));
4476 
4477 	rte_ethdev_trace_priority_flow_ctrl_set(port_id, pfc_conf, ret);
4478 
4479 	return ret;
4480 }
4481 
4482 static int
4483 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
4484 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
4485 {
4486 	if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) ||
4487 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
4488 		if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) {
4489 			RTE_ETHDEV_LOG_LINE(ERR,
4490 				"PFC Tx queue not in range for Rx pause requested:%d configured:%d",
4491 				pfc_queue_conf->rx_pause.tx_qid,
4492 				dev_info->nb_tx_queues);
4493 			return -EINVAL;
4494 		}
4495 
4496 		if (pfc_queue_conf->rx_pause.tc >= tc_max) {
4497 			RTE_ETHDEV_LOG_LINE(ERR,
4498 				"PFC TC not in range for Rx pause requested:%d max:%d",
4499 				pfc_queue_conf->rx_pause.tc, tc_max);
4500 			return -EINVAL;
4501 		}
4502 	}
4503 
4504 	return 0;
4505 }
4506 
4507 static int
4508 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
4509 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
4510 {
4511 	if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) ||
4512 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
4513 		if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) {
4514 			RTE_ETHDEV_LOG_LINE(ERR,
4515 				"PFC Rx queue not in range for Tx pause requested:%d configured:%d",
4516 				pfc_queue_conf->tx_pause.rx_qid,
4517 				dev_info->nb_rx_queues);
4518 			return -EINVAL;
4519 		}
4520 
4521 		if (pfc_queue_conf->tx_pause.tc >= tc_max) {
4522 			RTE_ETHDEV_LOG_LINE(ERR,
4523 				"PFC TC not in range for Tx pause requested:%d max:%d",
4524 				pfc_queue_conf->tx_pause.tc, tc_max);
4525 			return -EINVAL;
4526 		}
4527 	}
4528 
4529 	return 0;
4530 }
4531 
4532 int
4533 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
4534 		struct rte_eth_pfc_queue_info *pfc_queue_info)
4535 {
4536 	struct rte_eth_dev *dev;
4537 	int ret;
4538 
4539 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4540 	dev = &rte_eth_devices[port_id];
4541 
4542 	if (pfc_queue_info == NULL) {
4543 		RTE_ETHDEV_LOG_LINE(ERR, "PFC info param is NULL for port (%u)",
4544 			port_id);
4545 		return -EINVAL;
4546 	}
4547 
4548 	if (*dev->dev_ops->priority_flow_ctrl_queue_info_get == NULL)
4549 		return -ENOTSUP;
4550 	ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
4551 			(dev, pfc_queue_info));
4552 
4553 	rte_ethdev_trace_priority_flow_ctrl_queue_info_get(port_id,
4554 						pfc_queue_info, ret);
4555 
4556 	return ret;
4557 }
4558 
4559 int
4560 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
4561 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
4562 {
4563 	struct rte_eth_pfc_queue_info pfc_info;
4564 	struct rte_eth_dev_info dev_info;
4565 	struct rte_eth_dev *dev;
4566 	int ret;
4567 
4568 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4569 	dev = &rte_eth_devices[port_id];
4570 
4571 	if (pfc_queue_conf == NULL) {
4572 		RTE_ETHDEV_LOG_LINE(ERR, "PFC parameters are NULL for port (%u)",
4573 			port_id);
4574 		return -EINVAL;
4575 	}
4576 
4577 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4578 	if (ret != 0)
4579 		return ret;
4580 
4581 	ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info);
4582 	if (ret != 0)
4583 		return ret;
4584 
4585 	if (pfc_info.tc_max == 0) {
4586 		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port %u does not support PFC TC values",
4587 			port_id);
4588 		return -ENOTSUP;
4589 	}
4590 
4591 	/* Check requested mode supported or not */
4592 	if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE &&
4593 			pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) {
4594 		RTE_ETHDEV_LOG_LINE(ERR, "PFC Tx pause unsupported for port (%d)",
4595 			port_id);
4596 		return -EINVAL;
4597 	}
4598 
4599 	if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE &&
4600 			pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) {
4601 		RTE_ETHDEV_LOG_LINE(ERR, "PFC Rx pause unsupported for port (%d)",
4602 			port_id);
4603 		return -EINVAL;
4604 	}
4605 
4606 	/* Validate Rx pause parameters */
4607 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
4608 			pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) {
4609 		ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max,
4610 				pfc_queue_conf);
4611 		if (ret != 0)
4612 			return ret;
4613 	}
4614 
4615 	/* Validate Tx pause parameters */
4616 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
4617 			pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) {
4618 		ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max,
4619 				pfc_queue_conf);
4620 		if (ret != 0)
4621 			return ret;
4622 	}
4623 
4624 	if (*dev->dev_ops->priority_flow_ctrl_queue_config == NULL)
4625 		return -ENOTSUP;
4626 	ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_config)
4627 			(dev, pfc_queue_conf));
4628 
4629 	rte_ethdev_trace_priority_flow_ctrl_queue_configure(port_id,
4630 						pfc_queue_conf, ret);
4631 
4632 	return ret;
4633 }
4634 
4635 static int
4636 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
4637 			uint16_t reta_size)
4638 {
4639 	uint16_t i, num;
4640 
4641 	num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
4642 	for (i = 0; i < num; i++) {
4643 		if (reta_conf[i].mask)
4644 			return 0;
4645 	}
4646 
4647 	return -EINVAL;
4648 }
4649 
4650 static int
4651 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
4652 			 uint16_t reta_size,
4653 			 uint16_t max_rxq)
4654 {
4655 	uint16_t i, idx, shift;
4656 
4657 	if (max_rxq == 0) {
4658 		RTE_ETHDEV_LOG_LINE(ERR, "No receive queue is available");
4659 		return -EINVAL;
4660 	}
4661 
4662 	for (i = 0; i < reta_size; i++) {
4663 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
4664 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
4665 		if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
4666 			(reta_conf[idx].reta[shift] >= max_rxq)) {
4667 			RTE_ETHDEV_LOG_LINE(ERR,
4668 				"reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u",
4669 				idx, shift,
4670 				reta_conf[idx].reta[shift], max_rxq);
4671 			return -EINVAL;
4672 		}
4673 	}
4674 
4675 	return 0;
4676 }
4677 
4678 int
4679 rte_eth_dev_rss_reta_update(uint16_t port_id,
4680 			    struct rte_eth_rss_reta_entry64 *reta_conf,
4681 			    uint16_t reta_size)
4682 {
4683 	enum rte_eth_rx_mq_mode mq_mode;
4684 	struct rte_eth_dev *dev;
4685 	int ret;
4686 
4687 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4688 	dev = &rte_eth_devices[port_id];
4689 
4690 	if (reta_conf == NULL) {
4691 		RTE_ETHDEV_LOG_LINE(ERR,
4692 			"Cannot update ethdev port %u RSS RETA to NULL",
4693 			port_id);
4694 		return -EINVAL;
4695 	}
4696 
4697 	if (reta_size == 0) {
4698 		RTE_ETHDEV_LOG_LINE(ERR,
4699 			"Cannot update ethdev port %u RSS RETA with zero size",
4700 			port_id);
4701 		return -EINVAL;
4702 	}
4703 
4704 	/* Check mask bits */
4705 	ret = eth_check_reta_mask(reta_conf, reta_size);
4706 	if (ret < 0)
4707 		return ret;
4708 
4709 	/* Check entry value */
4710 	ret = eth_check_reta_entry(reta_conf, reta_size,
4711 				dev->data->nb_rx_queues);
4712 	if (ret < 0)
4713 		return ret;
4714 
4715 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4716 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
4717 		RTE_ETHDEV_LOG_LINE(ERR, "Multi-queue RSS mode isn't enabled.");
4718 		return -ENOTSUP;
4719 	}
4720 
4721 	if (*dev->dev_ops->reta_update == NULL)
4722 		return -ENOTSUP;
4723 	ret = eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
4724 							    reta_size));
4725 
4726 	rte_ethdev_trace_rss_reta_update(port_id, reta_conf, reta_size, ret);
4727 
4728 	return ret;
4729 }
4730 
4731 int
4732 rte_eth_dev_rss_reta_query(uint16_t port_id,
4733 			   struct rte_eth_rss_reta_entry64 *reta_conf,
4734 			   uint16_t reta_size)
4735 {
4736 	struct rte_eth_dev *dev;
4737 	int ret;
4738 
4739 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4740 	dev = &rte_eth_devices[port_id];
4741 
4742 	if (reta_conf == NULL) {
4743 		RTE_ETHDEV_LOG_LINE(ERR,
4744 			"Cannot query ethdev port %u RSS RETA from NULL config",
4745 			port_id);
4746 		return -EINVAL;
4747 	}
4748 
4749 	/* Check mask bits */
4750 	ret = eth_check_reta_mask(reta_conf, reta_size);
4751 	if (ret < 0)
4752 		return ret;
4753 
4754 	if (*dev->dev_ops->reta_query == NULL)
4755 		return -ENOTSUP;
4756 	ret = eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
4757 							   reta_size));
4758 
4759 	rte_ethdev_trace_rss_reta_query(port_id, reta_conf, reta_size, ret);
4760 
4761 	return ret;
4762 }
4763 
4764 int
4765 rte_eth_dev_rss_hash_update(uint16_t port_id,
4766 			    struct rte_eth_rss_conf *rss_conf)
4767 {
4768 	struct rte_eth_dev *dev;
4769 	struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
4770 	enum rte_eth_rx_mq_mode mq_mode;
4771 	int ret;
4772 
4773 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4774 	dev = &rte_eth_devices[port_id];
4775 
4776 	if (rss_conf == NULL) {
4777 		RTE_ETHDEV_LOG_LINE(ERR,
4778 			"Cannot update ethdev port %u RSS hash from NULL config",
4779 			port_id);
4780 		return -EINVAL;
4781 	}
4782 
4783 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4784 	if (ret != 0)
4785 		return ret;
4786 
4787 	rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
4788 	if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
4789 	    dev_info.flow_type_rss_offloads) {
4790 		RTE_ETHDEV_LOG_LINE(ERR,
4791 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64,
4792 			port_id, rss_conf->rss_hf,
4793 			dev_info.flow_type_rss_offloads);
4794 		return -EINVAL;
4795 	}
4796 
4797 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4798 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
4799 		RTE_ETHDEV_LOG_LINE(ERR, "Multi-queue RSS mode isn't enabled.");
4800 		return -ENOTSUP;
4801 	}
4802 
4803 	if (rss_conf->rss_key != NULL &&
4804 	    rss_conf->rss_key_len != dev_info.hash_key_size) {
4805 		RTE_ETHDEV_LOG_LINE(ERR,
4806 			"Ethdev port_id=%u invalid RSS key len: %u, valid value: %u",
4807 			port_id, rss_conf->rss_key_len, dev_info.hash_key_size);
4808 		return -EINVAL;
4809 	}
4810 
4811 	if ((size_t)rss_conf->algorithm >= CHAR_BIT * sizeof(dev_info.rss_algo_capa) ||
4812 	    (dev_info.rss_algo_capa &
4813 	     RTE_ETH_HASH_ALGO_TO_CAPA(rss_conf->algorithm)) == 0) {
4814 		RTE_ETHDEV_LOG_LINE(ERR,
4815 			"Ethdev port_id=%u configured RSS hash algorithm (%u)"
4816 			"is not in the algorithm capability (0x%" PRIx32 ")",
4817 			port_id, rss_conf->algorithm, dev_info.rss_algo_capa);
4818 		return -EINVAL;
4819 	}
4820 
4821 	if (*dev->dev_ops->rss_hash_update == NULL)
4822 		return -ENOTSUP;
4823 	ret = eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4824 								rss_conf));
4825 
4826 	rte_ethdev_trace_rss_hash_update(port_id, rss_conf, ret);
4827 
4828 	return ret;
4829 }
4830 
4831 int
4832 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4833 			      struct rte_eth_rss_conf *rss_conf)
4834 {
4835 	struct rte_eth_dev_info dev_info = { 0 };
4836 	struct rte_eth_dev *dev;
4837 	int ret;
4838 
4839 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4840 	dev = &rte_eth_devices[port_id];
4841 
4842 	if (rss_conf == NULL) {
4843 		RTE_ETHDEV_LOG_LINE(ERR,
4844 			"Cannot get ethdev port %u RSS hash config to NULL",
4845 			port_id);
4846 		return -EINVAL;
4847 	}
4848 
4849 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4850 	if (ret != 0)
4851 		return ret;
4852 
4853 	if (rss_conf->rss_key != NULL &&
4854 	    rss_conf->rss_key_len < dev_info.hash_key_size) {
4855 		RTE_ETHDEV_LOG_LINE(ERR,
4856 			"Ethdev port_id=%u invalid RSS key len: %u, should not be less than: %u",
4857 			port_id, rss_conf->rss_key_len, dev_info.hash_key_size);
4858 		return -EINVAL;
4859 	}
4860 
4861 	rss_conf->algorithm = RTE_ETH_HASH_FUNCTION_DEFAULT;
4862 
4863 	if (*dev->dev_ops->rss_hash_conf_get == NULL)
4864 		return -ENOTSUP;
4865 	ret = eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4866 								  rss_conf));
4867 
4868 	rte_ethdev_trace_rss_hash_conf_get(port_id, rss_conf, ret);
4869 
4870 	return ret;
4871 }
4872 
4873 const char *
4874 rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo)
4875 {
4876 	const char *name = "Unknown function";
4877 	unsigned int i;
4878 
4879 	for (i = 0; i < RTE_DIM(rte_eth_dev_rss_algo_names); i++) {
4880 		if (rss_algo == rte_eth_dev_rss_algo_names[i].algo)
4881 			return rte_eth_dev_rss_algo_names[i].name;
4882 	}
4883 
4884 	return name;
4885 }
4886 
4887 int
4888 rte_eth_find_rss_algo(const char *name, uint32_t *algo)
4889 {
4890 	unsigned int i;
4891 
4892 	for (i = 0; i < RTE_DIM(rte_eth_dev_rss_algo_names); i++) {
4893 		if (strcmp(name, rte_eth_dev_rss_algo_names[i].name) == 0) {
4894 			*algo = rte_eth_dev_rss_algo_names[i].algo;
4895 			return 0;
4896 		}
4897 	}
4898 
4899 	return -EINVAL;
4900 }
4901 
4902 int
4903 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4904 				struct rte_eth_udp_tunnel *udp_tunnel)
4905 {
4906 	struct rte_eth_dev *dev;
4907 	int ret;
4908 
4909 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4910 	dev = &rte_eth_devices[port_id];
4911 
4912 	if (udp_tunnel == NULL) {
4913 		RTE_ETHDEV_LOG_LINE(ERR,
4914 			"Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel",
4915 			port_id);
4916 		return -EINVAL;
4917 	}
4918 
4919 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4920 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid tunnel type");
4921 		return -EINVAL;
4922 	}
4923 
4924 	if (*dev->dev_ops->udp_tunnel_port_add == NULL)
4925 		return -ENOTSUP;
4926 	ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4927 								udp_tunnel));
4928 
4929 	rte_ethdev_trace_udp_tunnel_port_add(port_id, udp_tunnel, ret);
4930 
4931 	return ret;
4932 }
4933 
4934 int
4935 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4936 				   struct rte_eth_udp_tunnel *udp_tunnel)
4937 {
4938 	struct rte_eth_dev *dev;
4939 	int ret;
4940 
4941 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4942 	dev = &rte_eth_devices[port_id];
4943 
4944 	if (udp_tunnel == NULL) {
4945 		RTE_ETHDEV_LOG_LINE(ERR,
4946 			"Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel",
4947 			port_id);
4948 		return -EINVAL;
4949 	}
4950 
4951 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4952 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid tunnel type");
4953 		return -EINVAL;
4954 	}
4955 
4956 	if (*dev->dev_ops->udp_tunnel_port_del == NULL)
4957 		return -ENOTSUP;
4958 	ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4959 								udp_tunnel));
4960 
4961 	rte_ethdev_trace_udp_tunnel_port_delete(port_id, udp_tunnel, ret);
4962 
4963 	return ret;
4964 }
4965 
4966 int
4967 rte_eth_led_on(uint16_t port_id)
4968 {
4969 	struct rte_eth_dev *dev;
4970 	int ret;
4971 
4972 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4973 	dev = &rte_eth_devices[port_id];
4974 
4975 	if (*dev->dev_ops->dev_led_on == NULL)
4976 		return -ENOTSUP;
4977 	ret = eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4978 
4979 	rte_eth_trace_led_on(port_id, ret);
4980 
4981 	return ret;
4982 }
4983 
4984 int
4985 rte_eth_led_off(uint16_t port_id)
4986 {
4987 	struct rte_eth_dev *dev;
4988 	int ret;
4989 
4990 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4991 	dev = &rte_eth_devices[port_id];
4992 
4993 	if (*dev->dev_ops->dev_led_off == NULL)
4994 		return -ENOTSUP;
4995 	ret = eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4996 
4997 	rte_eth_trace_led_off(port_id, ret);
4998 
4999 	return ret;
5000 }
5001 
5002 int
5003 rte_eth_fec_get_capability(uint16_t port_id,
5004 			   struct rte_eth_fec_capa *speed_fec_capa,
5005 			   unsigned int num)
5006 {
5007 	struct rte_eth_dev *dev;
5008 	int ret;
5009 
5010 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5011 	dev = &rte_eth_devices[port_id];
5012 
5013 	if (speed_fec_capa == NULL && num > 0) {
5014 		RTE_ETHDEV_LOG_LINE(ERR,
5015 			"Cannot get ethdev port %u FEC capability to NULL when array size is non zero",
5016 			port_id);
5017 		return -EINVAL;
5018 	}
5019 
5020 	if (*dev->dev_ops->fec_get_capability == NULL)
5021 		return -ENOTSUP;
5022 	ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
5023 
5024 	rte_eth_trace_fec_get_capability(port_id, speed_fec_capa, num, ret);
5025 
5026 	return ret;
5027 }
5028 
5029 int
5030 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
5031 {
5032 	struct rte_eth_dev *dev;
5033 	int ret;
5034 
5035 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5036 	dev = &rte_eth_devices[port_id];
5037 
5038 	if (fec_capa == NULL) {
5039 		RTE_ETHDEV_LOG_LINE(ERR,
5040 			"Cannot get ethdev port %u current FEC mode to NULL",
5041 			port_id);
5042 		return -EINVAL;
5043 	}
5044 
5045 	if (*dev->dev_ops->fec_get == NULL)
5046 		return -ENOTSUP;
5047 	ret = eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
5048 
5049 	rte_eth_trace_fec_get(port_id, fec_capa, ret);
5050 
5051 	return ret;
5052 }
5053 
5054 int
5055 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
5056 {
5057 	struct rte_eth_dev *dev;
5058 	int ret;
5059 
5060 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5061 	dev = &rte_eth_devices[port_id];
5062 
5063 	if (fec_capa == 0) {
5064 		RTE_ETHDEV_LOG_LINE(ERR, "At least one FEC mode should be specified");
5065 		return -EINVAL;
5066 	}
5067 
5068 	if (*dev->dev_ops->fec_set == NULL)
5069 		return -ENOTSUP;
5070 	ret = eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
5071 
5072 	rte_eth_trace_fec_set(port_id, fec_capa, ret);
5073 
5074 	return ret;
5075 }
5076 
5077 /*
5078  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
5079  * an empty spot.
5080  */
5081 static int
5082 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
5083 {
5084 	struct rte_eth_dev_info dev_info;
5085 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5086 	unsigned i;
5087 	int ret;
5088 
5089 	ret = rte_eth_dev_info_get(port_id, &dev_info);
5090 	if (ret != 0)
5091 		return -1;
5092 
5093 	for (i = 0; i < dev_info.max_mac_addrs; i++)
5094 		if (memcmp(addr, &dev->data->mac_addrs[i],
5095 				RTE_ETHER_ADDR_LEN) == 0)
5096 			return i;
5097 
5098 	return -1;
5099 }
5100 
5101 static const struct rte_ether_addr null_mac_addr;
5102 
5103 int
5104 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
5105 			uint32_t pool)
5106 {
5107 	struct rte_eth_dev *dev;
5108 	int index;
5109 	uint64_t pool_mask;
5110 	int ret;
5111 
5112 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5113 	dev = &rte_eth_devices[port_id];
5114 
5115 	if (addr == NULL) {
5116 		RTE_ETHDEV_LOG_LINE(ERR,
5117 			"Cannot add ethdev port %u MAC address from NULL address",
5118 			port_id);
5119 		return -EINVAL;
5120 	}
5121 
5122 	if (*dev->dev_ops->mac_addr_add == NULL)
5123 		return -ENOTSUP;
5124 
5125 	if (rte_is_zero_ether_addr(addr)) {
5126 		RTE_ETHDEV_LOG_LINE(ERR, "Port %u: Cannot add NULL MAC address",
5127 			port_id);
5128 		return -EINVAL;
5129 	}
5130 	if (pool >= RTE_ETH_64_POOLS) {
5131 		RTE_ETHDEV_LOG_LINE(ERR, "Pool ID must be 0-%d", RTE_ETH_64_POOLS - 1);
5132 		return -EINVAL;
5133 	}
5134 
5135 	index = eth_dev_get_mac_addr_index(port_id, addr);
5136 	if (index < 0) {
5137 		index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
5138 		if (index < 0) {
5139 			RTE_ETHDEV_LOG_LINE(ERR, "Port %u: MAC address array full",
5140 				port_id);
5141 			return -ENOSPC;
5142 		}
5143 	} else {
5144 		pool_mask = dev->data->mac_pool_sel[index];
5145 
5146 		/* Check if both MAC address and pool is already there, and do nothing */
5147 		if (pool_mask & RTE_BIT64(pool))
5148 			return 0;
5149 	}
5150 
5151 	/* Update NIC */
5152 	ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
5153 
5154 	if (ret == 0) {
5155 		/* Update address in NIC data structure */
5156 		rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
5157 
5158 		/* Update pool bitmap in NIC data structure */
5159 		dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
5160 	}
5161 
5162 	ret = eth_err(port_id, ret);
5163 
5164 	rte_ethdev_trace_mac_addr_add(port_id, addr, pool, ret);
5165 
5166 	return ret;
5167 }
5168 
5169 int
5170 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
5171 {
5172 	struct rte_eth_dev *dev;
5173 	int index;
5174 
5175 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5176 	dev = &rte_eth_devices[port_id];
5177 
5178 	if (addr == NULL) {
5179 		RTE_ETHDEV_LOG_LINE(ERR,
5180 			"Cannot remove ethdev port %u MAC address from NULL address",
5181 			port_id);
5182 		return -EINVAL;
5183 	}
5184 
5185 	if (*dev->dev_ops->mac_addr_remove == NULL)
5186 		return -ENOTSUP;
5187 
5188 	index = eth_dev_get_mac_addr_index(port_id, addr);
5189 	if (index == 0) {
5190 		RTE_ETHDEV_LOG_LINE(ERR,
5191 			"Port %u: Cannot remove default MAC address",
5192 			port_id);
5193 		return -EADDRINUSE;
5194 	} else if (index < 0)
5195 		return 0;  /* Do nothing if address wasn't found */
5196 
5197 	/* Update NIC */
5198 	(*dev->dev_ops->mac_addr_remove)(dev, index);
5199 
5200 	/* Update address in NIC data structure */
5201 	rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
5202 
5203 	/* reset pool bitmap */
5204 	dev->data->mac_pool_sel[index] = 0;
5205 
5206 	rte_ethdev_trace_mac_addr_remove(port_id, addr);
5207 
5208 	return 0;
5209 }
5210 
5211 int
5212 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
5213 {
5214 	struct rte_eth_dev *dev;
5215 	int index;
5216 	int ret;
5217 
5218 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5219 	dev = &rte_eth_devices[port_id];
5220 
5221 	if (addr == NULL) {
5222 		RTE_ETHDEV_LOG_LINE(ERR,
5223 			"Cannot set ethdev port %u default MAC address from NULL address",
5224 			port_id);
5225 		return -EINVAL;
5226 	}
5227 
5228 	if (!rte_is_valid_assigned_ether_addr(addr))
5229 		return -EINVAL;
5230 
5231 	if (*dev->dev_ops->mac_addr_set == NULL)
5232 		return -ENOTSUP;
5233 
5234 	/* Keep address unique in dev->data->mac_addrs[]. */
5235 	index = eth_dev_get_mac_addr_index(port_id, addr);
5236 	if (index > 0) {
5237 		RTE_ETHDEV_LOG_LINE(ERR,
5238 			"New default address for port %u was already in the address list. Please remove it first.",
5239 			port_id);
5240 		return -EEXIST;
5241 	}
5242 
5243 	ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
5244 	if (ret < 0)
5245 		return ret;
5246 
5247 	/* Update default address in NIC data structure */
5248 	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
5249 
5250 	rte_ethdev_trace_default_mac_addr_set(port_id, addr);
5251 
5252 	return 0;
5253 }
5254 
5255 
5256 /*
5257  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
5258  * an empty spot.
5259  */
5260 static int
5261 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
5262 		const struct rte_ether_addr *addr)
5263 {
5264 	struct rte_eth_dev_info dev_info;
5265 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5266 	unsigned i;
5267 	int ret;
5268 
5269 	ret = rte_eth_dev_info_get(port_id, &dev_info);
5270 	if (ret != 0)
5271 		return -1;
5272 
5273 	if (!dev->data->hash_mac_addrs)
5274 		return -1;
5275 
5276 	for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
5277 		if (memcmp(addr, &dev->data->hash_mac_addrs[i],
5278 			RTE_ETHER_ADDR_LEN) == 0)
5279 			return i;
5280 
5281 	return -1;
5282 }
5283 
5284 int
5285 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
5286 				uint8_t on)
5287 {
5288 	int index;
5289 	int ret;
5290 	struct rte_eth_dev *dev;
5291 
5292 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5293 	dev = &rte_eth_devices[port_id];
5294 
5295 	if (addr == NULL) {
5296 		RTE_ETHDEV_LOG_LINE(ERR,
5297 			"Cannot set ethdev port %u unicast hash table from NULL address",
5298 			port_id);
5299 		return -EINVAL;
5300 	}
5301 
5302 	if (rte_is_zero_ether_addr(addr)) {
5303 		RTE_ETHDEV_LOG_LINE(ERR, "Port %u: Cannot add NULL MAC address",
5304 			port_id);
5305 		return -EINVAL;
5306 	}
5307 
5308 	index = eth_dev_get_hash_mac_addr_index(port_id, addr);
5309 	/* Check if it's already there, and do nothing */
5310 	if ((index >= 0) && on)
5311 		return 0;
5312 
5313 	if (index < 0) {
5314 		if (!on) {
5315 			RTE_ETHDEV_LOG_LINE(ERR,
5316 				"Port %u: the MAC address was not set in UTA",
5317 				port_id);
5318 			return -EINVAL;
5319 		}
5320 
5321 		index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
5322 		if (index < 0) {
5323 			RTE_ETHDEV_LOG_LINE(ERR, "Port %u: MAC address array full",
5324 				port_id);
5325 			return -ENOSPC;
5326 		}
5327 	}
5328 
5329 	if (*dev->dev_ops->uc_hash_table_set == NULL)
5330 		return -ENOTSUP;
5331 	ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
5332 	if (ret == 0) {
5333 		/* Update address in NIC data structure */
5334 		if (on)
5335 			rte_ether_addr_copy(addr,
5336 					&dev->data->hash_mac_addrs[index]);
5337 		else
5338 			rte_ether_addr_copy(&null_mac_addr,
5339 					&dev->data->hash_mac_addrs[index]);
5340 	}
5341 
5342 	ret = eth_err(port_id, ret);
5343 
5344 	rte_ethdev_trace_uc_hash_table_set(port_id, on, ret);
5345 
5346 	return ret;
5347 }
5348 
5349 int
5350 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
5351 {
5352 	struct rte_eth_dev *dev;
5353 	int ret;
5354 
5355 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5356 	dev = &rte_eth_devices[port_id];
5357 
5358 	if (*dev->dev_ops->uc_all_hash_table_set == NULL)
5359 		return -ENOTSUP;
5360 	ret = eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, on));
5361 
5362 	rte_ethdev_trace_uc_all_hash_table_set(port_id, on, ret);
5363 
5364 	return ret;
5365 }
5366 
5367 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
5368 					uint32_t tx_rate)
5369 {
5370 	struct rte_eth_dev *dev;
5371 	struct rte_eth_dev_info dev_info;
5372 	struct rte_eth_link link;
5373 	int ret;
5374 
5375 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5376 	dev = &rte_eth_devices[port_id];
5377 
5378 	ret = rte_eth_dev_info_get(port_id, &dev_info);
5379 	if (ret != 0)
5380 		return ret;
5381 
5382 	link = dev->data->dev_link;
5383 
5384 	if (queue_idx > dev_info.max_tx_queues) {
5385 		RTE_ETHDEV_LOG_LINE(ERR,
5386 			"Set queue rate limit:port %u: invalid queue ID=%u",
5387 			port_id, queue_idx);
5388 		return -EINVAL;
5389 	}
5390 
5391 	if (tx_rate > link.link_speed) {
5392 		RTE_ETHDEV_LOG_LINE(ERR,
5393 			"Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d",
5394 			tx_rate, link.link_speed);
5395 		return -EINVAL;
5396 	}
5397 
5398 	if (*dev->dev_ops->set_queue_rate_limit == NULL)
5399 		return -ENOTSUP;
5400 	ret = eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
5401 							queue_idx, tx_rate));
5402 
5403 	rte_eth_trace_set_queue_rate_limit(port_id, queue_idx, tx_rate, ret);
5404 
5405 	return ret;
5406 }
5407 
5408 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
5409 			       uint8_t avail_thresh)
5410 {
5411 	struct rte_eth_dev *dev;
5412 	int ret;
5413 
5414 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5415 	dev = &rte_eth_devices[port_id];
5416 
5417 	if (queue_id > dev->data->nb_rx_queues) {
5418 		RTE_ETHDEV_LOG_LINE(ERR,
5419 			"Set queue avail thresh: port %u: invalid queue ID=%u.",
5420 			port_id, queue_id);
5421 		return -EINVAL;
5422 	}
5423 
5424 	if (avail_thresh > 99) {
5425 		RTE_ETHDEV_LOG_LINE(ERR,
5426 			"Set queue avail thresh: port %u: threshold should be <= 99.",
5427 			port_id);
5428 		return -EINVAL;
5429 	}
5430 	if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL)
5431 		return -ENOTSUP;
5432 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev,
5433 							     queue_id, avail_thresh));
5434 
5435 	rte_eth_trace_rx_avail_thresh_set(port_id, queue_id, avail_thresh, ret);
5436 
5437 	return ret;
5438 }
5439 
5440 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
5441 				 uint8_t *avail_thresh)
5442 {
5443 	struct rte_eth_dev *dev;
5444 	int ret;
5445 
5446 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5447 	dev = &rte_eth_devices[port_id];
5448 
5449 	if (queue_id == NULL)
5450 		return -EINVAL;
5451 	if (*queue_id >= dev->data->nb_rx_queues)
5452 		*queue_id = 0;
5453 
5454 	if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL)
5455 		return -ENOTSUP;
5456 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev,
5457 							     queue_id, avail_thresh));
5458 
5459 	rte_eth_trace_rx_avail_thresh_query(port_id, *queue_id, ret);
5460 
5461 	return ret;
5462 }
5463 
5464 RTE_INIT(eth_dev_init_fp_ops)
5465 {
5466 	uint32_t i;
5467 
5468 	for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++)
5469 		eth_dev_fp_ops_reset(rte_eth_fp_ops + i);
5470 }
5471 
5472 RTE_INIT(eth_dev_init_cb_lists)
5473 {
5474 	uint16_t i;
5475 
5476 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
5477 		TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
5478 }
5479 
5480 int
5481 rte_eth_dev_callback_register(uint16_t port_id,
5482 			enum rte_eth_event_type event,
5483 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
5484 {
5485 	struct rte_eth_dev *dev;
5486 	struct rte_eth_dev_callback *user_cb;
5487 	uint16_t next_port;
5488 	uint16_t last_port;
5489 
5490 	if (cb_fn == NULL) {
5491 		RTE_ETHDEV_LOG_LINE(ERR,
5492 			"Cannot register ethdev port %u callback from NULL",
5493 			port_id);
5494 		return -EINVAL;
5495 	}
5496 
5497 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
5498 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%d", port_id);
5499 		return -EINVAL;
5500 	}
5501 
5502 	if (port_id == RTE_ETH_ALL) {
5503 		next_port = 0;
5504 		last_port = RTE_MAX_ETHPORTS - 1;
5505 	} else {
5506 		next_port = last_port = port_id;
5507 	}
5508 
5509 	rte_spinlock_lock(&eth_dev_cb_lock);
5510 
5511 	do {
5512 		dev = &rte_eth_devices[next_port];
5513 
5514 		TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
5515 			if (user_cb->cb_fn == cb_fn &&
5516 				user_cb->cb_arg == cb_arg &&
5517 				user_cb->event == event) {
5518 				break;
5519 			}
5520 		}
5521 
5522 		/* create a new callback. */
5523 		if (user_cb == NULL) {
5524 			user_cb = rte_zmalloc("INTR_USER_CALLBACK",
5525 				sizeof(struct rte_eth_dev_callback), 0);
5526 			if (user_cb != NULL) {
5527 				user_cb->cb_fn = cb_fn;
5528 				user_cb->cb_arg = cb_arg;
5529 				user_cb->event = event;
5530 				TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
5531 						  user_cb, next);
5532 			} else {
5533 				rte_spinlock_unlock(&eth_dev_cb_lock);
5534 				rte_eth_dev_callback_unregister(port_id, event,
5535 								cb_fn, cb_arg);
5536 				return -ENOMEM;
5537 			}
5538 
5539 		}
5540 	} while (++next_port <= last_port);
5541 
5542 	rte_spinlock_unlock(&eth_dev_cb_lock);
5543 
5544 	rte_ethdev_trace_callback_register(port_id, event, cb_fn, cb_arg);
5545 
5546 	return 0;
5547 }
5548 
5549 int
5550 rte_eth_dev_callback_unregister(uint16_t port_id,
5551 			enum rte_eth_event_type event,
5552 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
5553 {
5554 	int ret;
5555 	struct rte_eth_dev *dev;
5556 	struct rte_eth_dev_callback *cb, *next;
5557 	uint16_t next_port;
5558 	uint16_t last_port;
5559 
5560 	if (cb_fn == NULL) {
5561 		RTE_ETHDEV_LOG_LINE(ERR,
5562 			"Cannot unregister ethdev port %u callback from NULL",
5563 			port_id);
5564 		return -EINVAL;
5565 	}
5566 
5567 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
5568 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%d", port_id);
5569 		return -EINVAL;
5570 	}
5571 
5572 	if (port_id == RTE_ETH_ALL) {
5573 		next_port = 0;
5574 		last_port = RTE_MAX_ETHPORTS - 1;
5575 	} else {
5576 		next_port = last_port = port_id;
5577 	}
5578 
5579 	rte_spinlock_lock(&eth_dev_cb_lock);
5580 
5581 	do {
5582 		dev = &rte_eth_devices[next_port];
5583 		ret = 0;
5584 		for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
5585 		     cb = next) {
5586 
5587 			next = TAILQ_NEXT(cb, next);
5588 
5589 			if (cb->cb_fn != cb_fn || cb->event != event ||
5590 			    (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
5591 				continue;
5592 
5593 			/*
5594 			 * if this callback is not executing right now,
5595 			 * then remove it.
5596 			 */
5597 			if (cb->active == 0) {
5598 				TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
5599 				rte_free(cb);
5600 			} else {
5601 				ret = -EAGAIN;
5602 			}
5603 		}
5604 	} while (++next_port <= last_port);
5605 
5606 	rte_spinlock_unlock(&eth_dev_cb_lock);
5607 
5608 	rte_ethdev_trace_callback_unregister(port_id, event, cb_fn, cb_arg,
5609 					     ret);
5610 
5611 	return ret;
5612 }
5613 
5614 int
5615 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
5616 {
5617 	uint32_t vec;
5618 	struct rte_eth_dev *dev;
5619 	struct rte_intr_handle *intr_handle;
5620 	uint16_t qid;
5621 	int rc;
5622 
5623 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5624 	dev = &rte_eth_devices[port_id];
5625 
5626 	if (!dev->intr_handle) {
5627 		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr handle unset");
5628 		return -ENOTSUP;
5629 	}
5630 
5631 	intr_handle = dev->intr_handle;
5632 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
5633 		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr vector unset");
5634 		return -EPERM;
5635 	}
5636 
5637 	for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
5638 		vec = rte_intr_vec_list_index_get(intr_handle, qid);
5639 		rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
5640 
5641 		rte_ethdev_trace_rx_intr_ctl(port_id, qid, epfd, op, data, rc);
5642 
5643 		if (rc && rc != -EEXIST) {
5644 			RTE_ETHDEV_LOG_LINE(ERR,
5645 				"p %u q %u Rx ctl error op %d epfd %d vec %u",
5646 				port_id, qid, op, epfd, vec);
5647 		}
5648 	}
5649 
5650 	return 0;
5651 }
5652 
5653 int
5654 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
5655 {
5656 	struct rte_intr_handle *intr_handle;
5657 	struct rte_eth_dev *dev;
5658 	unsigned int efd_idx;
5659 	uint32_t vec;
5660 	int fd;
5661 
5662 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
5663 	dev = &rte_eth_devices[port_id];
5664 
5665 	if (queue_id >= dev->data->nb_rx_queues) {
5666 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
5667 		return -1;
5668 	}
5669 
5670 	if (!dev->intr_handle) {
5671 		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr handle unset");
5672 		return -1;
5673 	}
5674 
5675 	intr_handle = dev->intr_handle;
5676 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
5677 		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr vector unset");
5678 		return -1;
5679 	}
5680 
5681 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
5682 	efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
5683 		(vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
5684 	fd = rte_intr_efds_index_get(intr_handle, efd_idx);
5685 
5686 	rte_ethdev_trace_rx_intr_ctl_q_get_fd(port_id, queue_id, fd);
5687 
5688 	return fd;
5689 }
5690 
5691 int
5692 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
5693 			  int epfd, int op, void *data)
5694 {
5695 	uint32_t vec;
5696 	struct rte_eth_dev *dev;
5697 	struct rte_intr_handle *intr_handle;
5698 	int rc;
5699 
5700 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5701 	dev = &rte_eth_devices[port_id];
5702 
5703 	if (queue_id >= dev->data->nb_rx_queues) {
5704 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
5705 		return -EINVAL;
5706 	}
5707 
5708 	if (!dev->intr_handle) {
5709 		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr handle unset");
5710 		return -ENOTSUP;
5711 	}
5712 
5713 	intr_handle = dev->intr_handle;
5714 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
5715 		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr vector unset");
5716 		return -EPERM;
5717 	}
5718 
5719 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
5720 	rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
5721 
5722 	rte_ethdev_trace_rx_intr_ctl_q(port_id, queue_id, epfd, op, data, rc);
5723 
5724 	if (rc && rc != -EEXIST) {
5725 		RTE_ETHDEV_LOG_LINE(ERR,
5726 			"p %u q %u Rx ctl error op %d epfd %d vec %u",
5727 			port_id, queue_id, op, epfd, vec);
5728 		return rc;
5729 	}
5730 
5731 	return 0;
5732 }
5733 
5734 int
5735 rte_eth_dev_rx_intr_enable(uint16_t port_id,
5736 			   uint16_t queue_id)
5737 {
5738 	struct rte_eth_dev *dev;
5739 	int ret;
5740 
5741 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5742 	dev = &rte_eth_devices[port_id];
5743 
5744 	ret = eth_dev_validate_rx_queue(dev, queue_id);
5745 	if (ret != 0)
5746 		return ret;
5747 
5748 	if (*dev->dev_ops->rx_queue_intr_enable == NULL)
5749 		return -ENOTSUP;
5750 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
5751 
5752 	rte_ethdev_trace_rx_intr_enable(port_id, queue_id, ret);
5753 
5754 	return ret;
5755 }
5756 
5757 int
5758 rte_eth_dev_rx_intr_disable(uint16_t port_id,
5759 			    uint16_t queue_id)
5760 {
5761 	struct rte_eth_dev *dev;
5762 	int ret;
5763 
5764 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5765 	dev = &rte_eth_devices[port_id];
5766 
5767 	ret = eth_dev_validate_rx_queue(dev, queue_id);
5768 	if (ret != 0)
5769 		return ret;
5770 
5771 	if (*dev->dev_ops->rx_queue_intr_disable == NULL)
5772 		return -ENOTSUP;
5773 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
5774 
5775 	rte_ethdev_trace_rx_intr_disable(port_id, queue_id, ret);
5776 
5777 	return ret;
5778 }
5779 
5780 
5781 const struct rte_eth_rxtx_callback *
5782 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
5783 		rte_rx_callback_fn fn, void *user_param)
5784 {
5785 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5786 	rte_errno = ENOTSUP;
5787 	return NULL;
5788 #endif
5789 	struct rte_eth_dev *dev;
5790 
5791 	/* check input parameters */
5792 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5793 		    queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5794 		rte_errno = EINVAL;
5795 		return NULL;
5796 	}
5797 	dev = &rte_eth_devices[port_id];
5798 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5799 		rte_errno = EINVAL;
5800 		return NULL;
5801 	}
5802 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5803 
5804 	if (cb == NULL) {
5805 		rte_errno = ENOMEM;
5806 		return NULL;
5807 	}
5808 
5809 	cb->fn.rx = fn;
5810 	cb->param = user_param;
5811 
5812 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
5813 	/* Add the callbacks in fifo order. */
5814 	struct rte_eth_rxtx_callback *tail =
5815 		rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5816 
5817 	if (!tail) {
5818 		/* Stores to cb->fn and cb->param should complete before
5819 		 * cb is visible to data plane.
5820 		 */
5821 		rte_atomic_store_explicit(
5822 			&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5823 			cb, rte_memory_order_release);
5824 
5825 	} else {
5826 		while (tail->next)
5827 			tail = tail->next;
5828 		/* Stores to cb->fn and cb->param should complete before
5829 		 * cb is visible to data plane.
5830 		 */
5831 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
5832 	}
5833 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5834 
5835 	rte_eth_trace_add_rx_callback(port_id, queue_id, fn, user_param, cb);
5836 
5837 	return cb;
5838 }
5839 
5840 const struct rte_eth_rxtx_callback *
5841 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
5842 		rte_rx_callback_fn fn, void *user_param)
5843 {
5844 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5845 	rte_errno = ENOTSUP;
5846 	return NULL;
5847 #endif
5848 	/* check input parameters */
5849 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5850 		queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5851 		rte_errno = EINVAL;
5852 		return NULL;
5853 	}
5854 
5855 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5856 
5857 	if (cb == NULL) {
5858 		rte_errno = ENOMEM;
5859 		return NULL;
5860 	}
5861 
5862 	cb->fn.rx = fn;
5863 	cb->param = user_param;
5864 
5865 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
5866 	/* Add the callbacks at first position */
5867 	cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5868 	/* Stores to cb->fn, cb->param and cb->next should complete before
5869 	 * cb is visible to data plane threads.
5870 	 */
5871 	rte_atomic_store_explicit(
5872 		&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5873 		cb, rte_memory_order_release);
5874 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5875 
5876 	rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param,
5877 					    cb);
5878 
5879 	return cb;
5880 }
5881 
5882 const struct rte_eth_rxtx_callback *
5883 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
5884 		rte_tx_callback_fn fn, void *user_param)
5885 {
5886 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5887 	rte_errno = ENOTSUP;
5888 	return NULL;
5889 #endif
5890 	struct rte_eth_dev *dev;
5891 
5892 	/* check input parameters */
5893 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5894 		    queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
5895 		rte_errno = EINVAL;
5896 		return NULL;
5897 	}
5898 
5899 	dev = &rte_eth_devices[port_id];
5900 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5901 		rte_errno = EINVAL;
5902 		return NULL;
5903 	}
5904 
5905 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5906 
5907 	if (cb == NULL) {
5908 		rte_errno = ENOMEM;
5909 		return NULL;
5910 	}
5911 
5912 	cb->fn.tx = fn;
5913 	cb->param = user_param;
5914 
5915 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
5916 	/* Add the callbacks in fifo order. */
5917 	struct rte_eth_rxtx_callback *tail =
5918 		rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
5919 
5920 	if (!tail) {
5921 		/* Stores to cb->fn and cb->param should complete before
5922 		 * cb is visible to data plane.
5923 		 */
5924 		rte_atomic_store_explicit(
5925 			&rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
5926 			cb, rte_memory_order_release);
5927 
5928 	} else {
5929 		while (tail->next)
5930 			tail = tail->next;
5931 		/* Stores to cb->fn and cb->param should complete before
5932 		 * cb is visible to data plane.
5933 		 */
5934 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
5935 	}
5936 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5937 
5938 	rte_eth_trace_add_tx_callback(port_id, queue_id, fn, user_param, cb);
5939 
5940 	return cb;
5941 }
5942 
5943 int
5944 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
5945 		const struct rte_eth_rxtx_callback *user_cb)
5946 {
5947 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5948 	return -ENOTSUP;
5949 #endif
5950 	/* Check input parameters. */
5951 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5952 	if (user_cb == NULL ||
5953 			queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
5954 		return -EINVAL;
5955 
5956 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5957 	struct rte_eth_rxtx_callback *cb;
5958 	RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb;
5959 	int ret = -EINVAL;
5960 
5961 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
5962 	prev_cb = &dev->post_rx_burst_cbs[queue_id];
5963 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
5964 		cb = *prev_cb;
5965 		if (cb == user_cb) {
5966 			/* Remove the user cb from the callback list. */
5967 			rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed);
5968 			ret = 0;
5969 			break;
5970 		}
5971 	}
5972 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5973 
5974 	rte_eth_trace_remove_rx_callback(port_id, queue_id, user_cb, ret);
5975 
5976 	return ret;
5977 }
5978 
5979 int
5980 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5981 		const struct rte_eth_rxtx_callback *user_cb)
5982 {
5983 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5984 	return -ENOTSUP;
5985 #endif
5986 	/* Check input parameters. */
5987 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5988 	if (user_cb == NULL ||
5989 			queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
5990 		return -EINVAL;
5991 
5992 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5993 	int ret = -EINVAL;
5994 	struct rte_eth_rxtx_callback *cb;
5995 	RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb;
5996 
5997 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
5998 	prev_cb = &dev->pre_tx_burst_cbs[queue_id];
5999 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
6000 		cb = *prev_cb;
6001 		if (cb == user_cb) {
6002 			/* Remove the user cb from the callback list. */
6003 			rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed);
6004 			ret = 0;
6005 			break;
6006 		}
6007 	}
6008 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
6009 
6010 	rte_eth_trace_remove_tx_callback(port_id, queue_id, user_cb, ret);
6011 
6012 	return ret;
6013 }
6014 
6015 int
6016 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
6017 	struct rte_eth_rxq_info *qinfo)
6018 {
6019 	struct rte_eth_dev *dev;
6020 
6021 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6022 	dev = &rte_eth_devices[port_id];
6023 
6024 	if (queue_id >= dev->data->nb_rx_queues) {
6025 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
6026 		return -EINVAL;
6027 	}
6028 
6029 	if (qinfo == NULL) {
6030 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL",
6031 			port_id, queue_id);
6032 		return -EINVAL;
6033 	}
6034 
6035 	if (dev->data->rx_queues == NULL ||
6036 			dev->data->rx_queues[queue_id] == NULL) {
6037 		RTE_ETHDEV_LOG_LINE(ERR,
6038 			       "Rx queue %"PRIu16" of device with port_id=%"
6039 			       PRIu16" has not been setup",
6040 			       queue_id, port_id);
6041 		return -EINVAL;
6042 	}
6043 
6044 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
6045 		RTE_ETHDEV_LOG_LINE(INFO,
6046 			"Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16,
6047 			queue_id, port_id);
6048 		return -EINVAL;
6049 	}
6050 
6051 	if (*dev->dev_ops->rxq_info_get == NULL)
6052 		return -ENOTSUP;
6053 
6054 	memset(qinfo, 0, sizeof(*qinfo));
6055 	dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
6056 	qinfo->queue_state = dev->data->rx_queue_state[queue_id];
6057 
6058 	rte_eth_trace_rx_queue_info_get(port_id, queue_id, qinfo);
6059 
6060 	return 0;
6061 }
6062 
6063 int
6064 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
6065 	struct rte_eth_txq_info *qinfo)
6066 {
6067 	struct rte_eth_dev *dev;
6068 
6069 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6070 	dev = &rte_eth_devices[port_id];
6071 
6072 	if (queue_id >= dev->data->nb_tx_queues) {
6073 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", queue_id);
6074 		return -EINVAL;
6075 	}
6076 
6077 	if (qinfo == NULL) {
6078 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL",
6079 			port_id, queue_id);
6080 		return -EINVAL;
6081 	}
6082 
6083 	if (dev->data->tx_queues == NULL ||
6084 			dev->data->tx_queues[queue_id] == NULL) {
6085 		RTE_ETHDEV_LOG_LINE(ERR,
6086 			       "Tx queue %"PRIu16" of device with port_id=%"
6087 			       PRIu16" has not been setup",
6088 			       queue_id, port_id);
6089 		return -EINVAL;
6090 	}
6091 
6092 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
6093 		RTE_ETHDEV_LOG_LINE(INFO,
6094 			"Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16,
6095 			queue_id, port_id);
6096 		return -EINVAL;
6097 	}
6098 
6099 	if (*dev->dev_ops->txq_info_get == NULL)
6100 		return -ENOTSUP;
6101 
6102 	memset(qinfo, 0, sizeof(*qinfo));
6103 	dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
6104 	qinfo->queue_state = dev->data->tx_queue_state[queue_id];
6105 
6106 	rte_eth_trace_tx_queue_info_get(port_id, queue_id, qinfo);
6107 
6108 	return 0;
6109 }
6110 
6111 int
6112 rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
6113 		struct rte_eth_recycle_rxq_info *recycle_rxq_info)
6114 {
6115 	struct rte_eth_dev *dev;
6116 	int ret;
6117 
6118 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6119 	dev = &rte_eth_devices[port_id];
6120 
6121 	ret = eth_dev_validate_rx_queue(dev, queue_id);
6122 	if (unlikely(ret != 0))
6123 		return ret;
6124 
6125 	if (*dev->dev_ops->recycle_rxq_info_get == NULL)
6126 		return -ENOTSUP;
6127 
6128 	dev->dev_ops->recycle_rxq_info_get(dev, queue_id, recycle_rxq_info);
6129 
6130 	return 0;
6131 }
6132 
6133 int
6134 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
6135 			  struct rte_eth_burst_mode *mode)
6136 {
6137 	struct rte_eth_dev *dev;
6138 	int ret;
6139 
6140 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6141 	dev = &rte_eth_devices[port_id];
6142 
6143 	if (queue_id >= dev->data->nb_rx_queues) {
6144 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
6145 		return -EINVAL;
6146 	}
6147 
6148 	if (mode == NULL) {
6149 		RTE_ETHDEV_LOG_LINE(ERR,
6150 			"Cannot get ethdev port %u Rx queue %u burst mode to NULL",
6151 			port_id, queue_id);
6152 		return -EINVAL;
6153 	}
6154 
6155 	if (*dev->dev_ops->rx_burst_mode_get == NULL)
6156 		return -ENOTSUP;
6157 	memset(mode, 0, sizeof(*mode));
6158 	ret = eth_err(port_id,
6159 		      dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
6160 
6161 	rte_eth_trace_rx_burst_mode_get(port_id, queue_id, mode, ret);
6162 
6163 	return ret;
6164 }
6165 
6166 int
6167 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
6168 			  struct rte_eth_burst_mode *mode)
6169 {
6170 	struct rte_eth_dev *dev;
6171 	int ret;
6172 
6173 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6174 	dev = &rte_eth_devices[port_id];
6175 
6176 	if (queue_id >= dev->data->nb_tx_queues) {
6177 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", queue_id);
6178 		return -EINVAL;
6179 	}
6180 
6181 	if (mode == NULL) {
6182 		RTE_ETHDEV_LOG_LINE(ERR,
6183 			"Cannot get ethdev port %u Tx queue %u burst mode to NULL",
6184 			port_id, queue_id);
6185 		return -EINVAL;
6186 	}
6187 
6188 	if (*dev->dev_ops->tx_burst_mode_get == NULL)
6189 		return -ENOTSUP;
6190 	memset(mode, 0, sizeof(*mode));
6191 	ret = eth_err(port_id,
6192 		      dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
6193 
6194 	rte_eth_trace_tx_burst_mode_get(port_id, queue_id, mode, ret);
6195 
6196 	return ret;
6197 }
6198 
6199 int
6200 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
6201 		struct rte_power_monitor_cond *pmc)
6202 {
6203 	struct rte_eth_dev *dev;
6204 	int ret;
6205 
6206 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6207 	dev = &rte_eth_devices[port_id];
6208 
6209 	if (queue_id >= dev->data->nb_rx_queues) {
6210 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
6211 		return -EINVAL;
6212 	}
6213 
6214 	if (pmc == NULL) {
6215 		RTE_ETHDEV_LOG_LINE(ERR,
6216 			"Cannot get ethdev port %u Rx queue %u power monitor condition to NULL",
6217 			port_id, queue_id);
6218 		return -EINVAL;
6219 	}
6220 
6221 	if (*dev->dev_ops->get_monitor_addr == NULL)
6222 		return -ENOTSUP;
6223 	ret = eth_err(port_id,
6224 		dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
6225 
6226 	rte_eth_trace_get_monitor_addr(port_id, queue_id, pmc, ret);
6227 
6228 	return ret;
6229 }
6230 
6231 int
6232 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
6233 			     struct rte_ether_addr *mc_addr_set,
6234 			     uint32_t nb_mc_addr)
6235 {
6236 	struct rte_eth_dev *dev;
6237 	int ret;
6238 
6239 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6240 	dev = &rte_eth_devices[port_id];
6241 
6242 	if (*dev->dev_ops->set_mc_addr_list == NULL)
6243 		return -ENOTSUP;
6244 	ret = eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
6245 						mc_addr_set, nb_mc_addr));
6246 
6247 	rte_ethdev_trace_set_mc_addr_list(port_id, mc_addr_set, nb_mc_addr,
6248 					  ret);
6249 
6250 	return ret;
6251 }
6252 
6253 int
6254 rte_eth_timesync_enable(uint16_t port_id)
6255 {
6256 	struct rte_eth_dev *dev;
6257 	int ret;
6258 
6259 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6260 	dev = &rte_eth_devices[port_id];
6261 
6262 	if (*dev->dev_ops->timesync_enable == NULL)
6263 		return -ENOTSUP;
6264 	ret = eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
6265 
6266 	rte_eth_trace_timesync_enable(port_id, ret);
6267 
6268 	return ret;
6269 }
6270 
6271 int
6272 rte_eth_timesync_disable(uint16_t port_id)
6273 {
6274 	struct rte_eth_dev *dev;
6275 	int ret;
6276 
6277 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6278 	dev = &rte_eth_devices[port_id];
6279 
6280 	if (*dev->dev_ops->timesync_disable == NULL)
6281 		return -ENOTSUP;
6282 	ret = eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
6283 
6284 	rte_eth_trace_timesync_disable(port_id, ret);
6285 
6286 	return ret;
6287 }
6288 
6289 int
6290 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
6291 				   uint32_t flags)
6292 {
6293 	struct rte_eth_dev *dev;
6294 	int ret;
6295 
6296 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6297 	dev = &rte_eth_devices[port_id];
6298 
6299 	if (timestamp == NULL) {
6300 		RTE_ETHDEV_LOG_LINE(ERR,
6301 			"Cannot read ethdev port %u Rx timestamp to NULL",
6302 			port_id);
6303 		return -EINVAL;
6304 	}
6305 
6306 	if (*dev->dev_ops->timesync_read_rx_timestamp == NULL)
6307 		return -ENOTSUP;
6308 
6309 	ret = eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
6310 			       (dev, timestamp, flags));
6311 
6312 	rte_eth_trace_timesync_read_rx_timestamp(port_id, timestamp, flags,
6313 						 ret);
6314 
6315 	return ret;
6316 }
6317 
6318 int
6319 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
6320 				   struct timespec *timestamp)
6321 {
6322 	struct rte_eth_dev *dev;
6323 	int ret;
6324 
6325 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6326 	dev = &rte_eth_devices[port_id];
6327 
6328 	if (timestamp == NULL) {
6329 		RTE_ETHDEV_LOG_LINE(ERR,
6330 			"Cannot read ethdev port %u Tx timestamp to NULL",
6331 			port_id);
6332 		return -EINVAL;
6333 	}
6334 
6335 	if (*dev->dev_ops->timesync_read_tx_timestamp == NULL)
6336 		return -ENOTSUP;
6337 
6338 	ret = eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
6339 			       (dev, timestamp));
6340 
6341 	rte_eth_trace_timesync_read_tx_timestamp(port_id, timestamp, ret);
6342 
6343 	return ret;
6344 
6345 }
6346 
6347 int
6348 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
6349 {
6350 	struct rte_eth_dev *dev;
6351 	int ret;
6352 
6353 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6354 	dev = &rte_eth_devices[port_id];
6355 
6356 	if (*dev->dev_ops->timesync_adjust_time == NULL)
6357 		return -ENOTSUP;
6358 	ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
6359 
6360 	rte_eth_trace_timesync_adjust_time(port_id, delta, ret);
6361 
6362 	return ret;
6363 }
6364 
6365 int
6366 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
6367 {
6368 	struct rte_eth_dev *dev;
6369 	int ret;
6370 
6371 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6372 	dev = &rte_eth_devices[port_id];
6373 
6374 	if (timestamp == NULL) {
6375 		RTE_ETHDEV_LOG_LINE(ERR,
6376 			"Cannot read ethdev port %u timesync time to NULL",
6377 			port_id);
6378 		return -EINVAL;
6379 	}
6380 
6381 	if (*dev->dev_ops->timesync_read_time == NULL)
6382 		return -ENOTSUP;
6383 	ret = eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
6384 								timestamp));
6385 
6386 	rte_eth_trace_timesync_read_time(port_id, timestamp, ret);
6387 
6388 	return ret;
6389 }
6390 
6391 int
6392 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
6393 {
6394 	struct rte_eth_dev *dev;
6395 	int ret;
6396 
6397 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6398 	dev = &rte_eth_devices[port_id];
6399 
6400 	if (timestamp == NULL) {
6401 		RTE_ETHDEV_LOG_LINE(ERR,
6402 			"Cannot write ethdev port %u timesync from NULL time",
6403 			port_id);
6404 		return -EINVAL;
6405 	}
6406 
6407 	if (*dev->dev_ops->timesync_write_time == NULL)
6408 		return -ENOTSUP;
6409 	ret = eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
6410 								timestamp));
6411 
6412 	rte_eth_trace_timesync_write_time(port_id, timestamp, ret);
6413 
6414 	return ret;
6415 }
6416 
6417 int
6418 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
6419 {
6420 	struct rte_eth_dev *dev;
6421 	int ret;
6422 
6423 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6424 	dev = &rte_eth_devices[port_id];
6425 
6426 	if (clock == NULL) {
6427 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot read ethdev port %u clock to NULL",
6428 			port_id);
6429 		return -EINVAL;
6430 	}
6431 
6432 	if (*dev->dev_ops->read_clock == NULL)
6433 		return -ENOTSUP;
6434 	ret = eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
6435 
6436 	rte_eth_trace_read_clock(port_id, clock, ret);
6437 
6438 	return ret;
6439 }
6440 
6441 int
6442 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
6443 {
6444 	struct rte_dev_reg_info reg_info = { 0 };
6445 	int ret;
6446 
6447 	if (info == NULL) {
6448 		RTE_ETHDEV_LOG_LINE(ERR,
6449 			"Cannot get ethdev port %u register info to NULL",
6450 			port_id);
6451 		return -EINVAL;
6452 	}
6453 
6454 	reg_info.length = info->length;
6455 	reg_info.data = info->data;
6456 
6457 	ret = rte_eth_dev_get_reg_info_ext(port_id, &reg_info);
6458 	if (ret != 0)
6459 		return ret;
6460 
6461 	info->length = reg_info.length;
6462 	info->width = reg_info.width;
6463 	info->version = reg_info.version;
6464 	info->offset = reg_info.offset;
6465 
6466 	return 0;
6467 }
6468 
6469 int
6470 rte_eth_dev_get_reg_info_ext(uint16_t port_id, struct rte_dev_reg_info *info)
6471 {
6472 	struct rte_eth_dev *dev;
6473 	uint32_t i;
6474 	int ret;
6475 
6476 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6477 	dev = &rte_eth_devices[port_id];
6478 
6479 	if (info == NULL) {
6480 		RTE_ETHDEV_LOG_LINE(ERR,
6481 			"Cannot get ethdev port %u register info to NULL",
6482 			port_id);
6483 		return -EINVAL;
6484 	}
6485 
6486 	if (info->names != NULL && info->length != 0)
6487 		memset(info->names, 0, sizeof(struct rte_eth_reg_name) * info->length);
6488 
6489 	if (*dev->dev_ops->get_reg == NULL)
6490 		return -ENOTSUP;
6491 	ret = eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
6492 
6493 	rte_ethdev_trace_get_reg_info(port_id, info, ret);
6494 
6495 	/* Report the default names if drivers not report. */
6496 	if (ret == 0 && info->names != NULL && strlen(info->names[0].name) == 0) {
6497 		for (i = 0; i < info->length; i++)
6498 			snprintf(info->names[i].name, RTE_ETH_REG_NAME_SIZE,
6499 				"index_%u", info->offset + i);
6500 	}
6501 	return ret;
6502 }
6503 
6504 int
6505 rte_eth_dev_get_eeprom_length(uint16_t port_id)
6506 {
6507 	struct rte_eth_dev *dev;
6508 	int ret;
6509 
6510 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6511 	dev = &rte_eth_devices[port_id];
6512 
6513 	if (*dev->dev_ops->get_eeprom_length == NULL)
6514 		return -ENOTSUP;
6515 	ret = eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
6516 
6517 	rte_ethdev_trace_get_eeprom_length(port_id, ret);
6518 
6519 	return ret;
6520 }
6521 
6522 int
6523 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
6524 {
6525 	struct rte_eth_dev *dev;
6526 	int ret;
6527 
6528 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6529 	dev = &rte_eth_devices[port_id];
6530 
6531 	if (info == NULL) {
6532 		RTE_ETHDEV_LOG_LINE(ERR,
6533 			"Cannot get ethdev port %u EEPROM info to NULL",
6534 			port_id);
6535 		return -EINVAL;
6536 	}
6537 
6538 	if (*dev->dev_ops->get_eeprom == NULL)
6539 		return -ENOTSUP;
6540 	ret = eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
6541 
6542 	rte_ethdev_trace_get_eeprom(port_id, info, ret);
6543 
6544 	return ret;
6545 }
6546 
6547 int
6548 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
6549 {
6550 	struct rte_eth_dev *dev;
6551 	int ret;
6552 
6553 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6554 	dev = &rte_eth_devices[port_id];
6555 
6556 	if (info == NULL) {
6557 		RTE_ETHDEV_LOG_LINE(ERR,
6558 			"Cannot set ethdev port %u EEPROM from NULL info",
6559 			port_id);
6560 		return -EINVAL;
6561 	}
6562 
6563 	if (*dev->dev_ops->set_eeprom == NULL)
6564 		return -ENOTSUP;
6565 	ret = eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
6566 
6567 	rte_ethdev_trace_set_eeprom(port_id, info, ret);
6568 
6569 	return ret;
6570 }
6571 
6572 int
6573 rte_eth_dev_get_module_info(uint16_t port_id,
6574 			    struct rte_eth_dev_module_info *modinfo)
6575 {
6576 	struct rte_eth_dev *dev;
6577 	int ret;
6578 
6579 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6580 	dev = &rte_eth_devices[port_id];
6581 
6582 	if (modinfo == NULL) {
6583 		RTE_ETHDEV_LOG_LINE(ERR,
6584 			"Cannot get ethdev port %u EEPROM module info to NULL",
6585 			port_id);
6586 		return -EINVAL;
6587 	}
6588 
6589 	if (*dev->dev_ops->get_module_info == NULL)
6590 		return -ENOTSUP;
6591 	ret = (*dev->dev_ops->get_module_info)(dev, modinfo);
6592 
6593 	rte_ethdev_trace_get_module_info(port_id, modinfo, ret);
6594 
6595 	return ret;
6596 }
6597 
6598 int
6599 rte_eth_dev_get_module_eeprom(uint16_t port_id,
6600 			      struct rte_dev_eeprom_info *info)
6601 {
6602 	struct rte_eth_dev *dev;
6603 	int ret;
6604 
6605 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6606 	dev = &rte_eth_devices[port_id];
6607 
6608 	if (info == NULL) {
6609 		RTE_ETHDEV_LOG_LINE(ERR,
6610 			"Cannot get ethdev port %u module EEPROM info to NULL",
6611 			port_id);
6612 		return -EINVAL;
6613 	}
6614 
6615 	if (info->data == NULL) {
6616 		RTE_ETHDEV_LOG_LINE(ERR,
6617 			"Cannot get ethdev port %u module EEPROM data to NULL",
6618 			port_id);
6619 		return -EINVAL;
6620 	}
6621 
6622 	if (info->length == 0) {
6623 		RTE_ETHDEV_LOG_LINE(ERR,
6624 			"Cannot get ethdev port %u module EEPROM to data with zero size",
6625 			port_id);
6626 		return -EINVAL;
6627 	}
6628 
6629 	if (*dev->dev_ops->get_module_eeprom == NULL)
6630 		return -ENOTSUP;
6631 	ret = (*dev->dev_ops->get_module_eeprom)(dev, info);
6632 
6633 	rte_ethdev_trace_get_module_eeprom(port_id, info, ret);
6634 
6635 	return ret;
6636 }
6637 
6638 int
6639 rte_eth_dev_get_dcb_info(uint16_t port_id,
6640 			     struct rte_eth_dcb_info *dcb_info)
6641 {
6642 	struct rte_eth_dev *dev;
6643 	int ret;
6644 
6645 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6646 	dev = &rte_eth_devices[port_id];
6647 
6648 	if (dcb_info == NULL) {
6649 		RTE_ETHDEV_LOG_LINE(ERR,
6650 			"Cannot get ethdev port %u DCB info to NULL",
6651 			port_id);
6652 		return -EINVAL;
6653 	}
6654 
6655 	memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
6656 
6657 	if (*dev->dev_ops->get_dcb_info == NULL)
6658 		return -ENOTSUP;
6659 	ret = eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
6660 
6661 	rte_ethdev_trace_get_dcb_info(port_id, dcb_info, ret);
6662 
6663 	return ret;
6664 }
6665 
6666 static void
6667 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
6668 		const struct rte_eth_desc_lim *desc_lim)
6669 {
6670 	/* Upcast to uint32 to avoid potential overflow with RTE_ALIGN_CEIL(). */
6671 	uint32_t nb_desc_32 = (uint32_t)*nb_desc;
6672 
6673 	if (desc_lim->nb_align != 0)
6674 		nb_desc_32 = RTE_ALIGN_CEIL(nb_desc_32, desc_lim->nb_align);
6675 
6676 	if (desc_lim->nb_max != 0)
6677 		nb_desc_32 = RTE_MIN(nb_desc_32, desc_lim->nb_max);
6678 
6679 	nb_desc_32 = RTE_MAX(nb_desc_32, desc_lim->nb_min);
6680 
6681 	/* Assign clipped u32 back to u16. */
6682 	*nb_desc = (uint16_t)nb_desc_32;
6683 }
6684 
6685 int
6686 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
6687 				 uint16_t *nb_rx_desc,
6688 				 uint16_t *nb_tx_desc)
6689 {
6690 	struct rte_eth_dev_info dev_info;
6691 	int ret;
6692 
6693 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6694 
6695 	ret = rte_eth_dev_info_get(port_id, &dev_info);
6696 	if (ret != 0)
6697 		return ret;
6698 
6699 	if (nb_rx_desc != NULL)
6700 		eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
6701 
6702 	if (nb_tx_desc != NULL)
6703 		eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
6704 
6705 	rte_ethdev_trace_adjust_nb_rx_tx_desc(port_id);
6706 
6707 	return 0;
6708 }
6709 
6710 int
6711 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
6712 				   struct rte_eth_hairpin_cap *cap)
6713 {
6714 	struct rte_eth_dev *dev;
6715 	int ret;
6716 
6717 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6718 	dev = &rte_eth_devices[port_id];
6719 
6720 	if (cap == NULL) {
6721 		RTE_ETHDEV_LOG_LINE(ERR,
6722 			"Cannot get ethdev port %u hairpin capability to NULL",
6723 			port_id);
6724 		return -EINVAL;
6725 	}
6726 
6727 	if (*dev->dev_ops->hairpin_cap_get == NULL)
6728 		return -ENOTSUP;
6729 	memset(cap, 0, sizeof(*cap));
6730 	ret = eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
6731 
6732 	rte_ethdev_trace_hairpin_capability_get(port_id, cap, ret);
6733 
6734 	return ret;
6735 }
6736 
6737 int
6738 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
6739 {
6740 	struct rte_eth_dev *dev;
6741 	int ret;
6742 
6743 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6744 	dev = &rte_eth_devices[port_id];
6745 
6746 	if (pool == NULL) {
6747 		RTE_ETHDEV_LOG_LINE(ERR,
6748 			"Cannot test ethdev port %u mempool operation from NULL pool",
6749 			port_id);
6750 		return -EINVAL;
6751 	}
6752 
6753 	if (*dev->dev_ops->pool_ops_supported == NULL)
6754 		return 1; /* all pools are supported */
6755 
6756 	ret = (*dev->dev_ops->pool_ops_supported)(dev, pool);
6757 
6758 	rte_ethdev_trace_pool_ops_supported(port_id, pool, ret);
6759 
6760 	return ret;
6761 }
6762 
6763 int
6764 rte_eth_representor_info_get(uint16_t port_id,
6765 			     struct rte_eth_representor_info *info)
6766 {
6767 	struct rte_eth_dev *dev;
6768 	int ret;
6769 
6770 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6771 	dev = &rte_eth_devices[port_id];
6772 
6773 	if (*dev->dev_ops->representor_info_get == NULL)
6774 		return -ENOTSUP;
6775 	ret = eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
6776 
6777 	rte_eth_trace_representor_info_get(port_id, info, ret);
6778 
6779 	return ret;
6780 }
6781 
6782 int
6783 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
6784 {
6785 	struct rte_eth_dev *dev;
6786 	int ret;
6787 
6788 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6789 	dev = &rte_eth_devices[port_id];
6790 
6791 	if (dev->data->dev_configured != 0) {
6792 		RTE_ETHDEV_LOG_LINE(ERR,
6793 			"The port (ID=%"PRIu16") is already configured",
6794 			port_id);
6795 		return -EBUSY;
6796 	}
6797 
6798 	if (features == NULL) {
6799 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid features (NULL)");
6800 		return -EINVAL;
6801 	}
6802 
6803 	if ((*features & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0 &&
6804 			rte_flow_restore_info_dynflag_register() < 0)
6805 		*features &= ~RTE_ETH_RX_METADATA_TUNNEL_ID;
6806 
6807 	if (*dev->dev_ops->rx_metadata_negotiate == NULL)
6808 		return -ENOTSUP;
6809 	ret = eth_err(port_id,
6810 		      (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
6811 
6812 	rte_eth_trace_rx_metadata_negotiate(port_id, *features, ret);
6813 
6814 	return ret;
6815 }
6816 
6817 int
6818 rte_eth_ip_reassembly_capability_get(uint16_t port_id,
6819 		struct rte_eth_ip_reassembly_params *reassembly_capa)
6820 {
6821 	struct rte_eth_dev *dev;
6822 	int ret;
6823 
6824 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6825 	dev = &rte_eth_devices[port_id];
6826 
6827 	if (dev->data->dev_configured == 0) {
6828 		RTE_ETHDEV_LOG_LINE(ERR,
6829 			"port_id=%u is not configured, cannot get IP reassembly capability",
6830 			port_id);
6831 		return -EINVAL;
6832 	}
6833 
6834 	if (reassembly_capa == NULL) {
6835 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get reassembly capability to NULL");
6836 		return -EINVAL;
6837 	}
6838 
6839 	if (*dev->dev_ops->ip_reassembly_capability_get == NULL)
6840 		return -ENOTSUP;
6841 	memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params));
6842 
6843 	ret = eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get)
6844 					(dev, reassembly_capa));
6845 
6846 	rte_eth_trace_ip_reassembly_capability_get(port_id, reassembly_capa,
6847 						   ret);
6848 
6849 	return ret;
6850 }
6851 
6852 int
6853 rte_eth_ip_reassembly_conf_get(uint16_t port_id,
6854 		struct rte_eth_ip_reassembly_params *conf)
6855 {
6856 	struct rte_eth_dev *dev;
6857 	int ret;
6858 
6859 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6860 	dev = &rte_eth_devices[port_id];
6861 
6862 	if (dev->data->dev_configured == 0) {
6863 		RTE_ETHDEV_LOG_LINE(ERR,
6864 			"port_id=%u is not configured, cannot get IP reassembly configuration",
6865 			port_id);
6866 		return -EINVAL;
6867 	}
6868 
6869 	if (conf == NULL) {
6870 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get reassembly info to NULL");
6871 		return -EINVAL;
6872 	}
6873 
6874 	if (*dev->dev_ops->ip_reassembly_conf_get == NULL)
6875 		return -ENOTSUP;
6876 	memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params));
6877 	ret = eth_err(port_id,
6878 		      (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf));
6879 
6880 	rte_eth_trace_ip_reassembly_conf_get(port_id, conf, ret);
6881 
6882 	return ret;
6883 }
6884 
6885 int
6886 rte_eth_ip_reassembly_conf_set(uint16_t port_id,
6887 		const struct rte_eth_ip_reassembly_params *conf)
6888 {
6889 	struct rte_eth_dev *dev;
6890 	int ret;
6891 
6892 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6893 	dev = &rte_eth_devices[port_id];
6894 
6895 	if (dev->data->dev_configured == 0) {
6896 		RTE_ETHDEV_LOG_LINE(ERR,
6897 			"port_id=%u is not configured, cannot set IP reassembly configuration",
6898 			port_id);
6899 		return -EINVAL;
6900 	}
6901 
6902 	if (dev->data->dev_started != 0) {
6903 		RTE_ETHDEV_LOG_LINE(ERR,
6904 			"port_id=%u is started, cannot configure IP reassembly params.",
6905 			port_id);
6906 		return -EINVAL;
6907 	}
6908 
6909 	if (conf == NULL) {
6910 		RTE_ETHDEV_LOG_LINE(ERR,
6911 				"Invalid IP reassembly configuration (NULL)");
6912 		return -EINVAL;
6913 	}
6914 
6915 	if (*dev->dev_ops->ip_reassembly_conf_set == NULL)
6916 		return -ENOTSUP;
6917 	ret = eth_err(port_id,
6918 		      (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf));
6919 
6920 	rte_eth_trace_ip_reassembly_conf_set(port_id, conf, ret);
6921 
6922 	return ret;
6923 }
6924 
6925 int
6926 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
6927 {
6928 	struct rte_eth_dev *dev;
6929 
6930 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6931 	dev = &rte_eth_devices[port_id];
6932 
6933 	if (file == NULL) {
6934 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid file (NULL)");
6935 		return -EINVAL;
6936 	}
6937 
6938 	if (*dev->dev_ops->eth_dev_priv_dump == NULL)
6939 		return -ENOTSUP;
6940 	return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file));
6941 }
6942 
6943 int
6944 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
6945 			   uint16_t offset, uint16_t num, FILE *file)
6946 {
6947 	struct rte_eth_dev *dev;
6948 
6949 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6950 	dev = &rte_eth_devices[port_id];
6951 
6952 	if (queue_id >= dev->data->nb_rx_queues) {
6953 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
6954 		return -EINVAL;
6955 	}
6956 
6957 	if (file == NULL) {
6958 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid file (NULL)");
6959 		return -EINVAL;
6960 	}
6961 
6962 	if (*dev->dev_ops->eth_rx_descriptor_dump == NULL)
6963 		return -ENOTSUP;
6964 
6965 	return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev,
6966 						queue_id, offset, num, file));
6967 }
6968 
6969 int
6970 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
6971 			   uint16_t offset, uint16_t num, FILE *file)
6972 {
6973 	struct rte_eth_dev *dev;
6974 
6975 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6976 	dev = &rte_eth_devices[port_id];
6977 
6978 	if (queue_id >= dev->data->nb_tx_queues) {
6979 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", queue_id);
6980 		return -EINVAL;
6981 	}
6982 
6983 	if (file == NULL) {
6984 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid file (NULL)");
6985 		return -EINVAL;
6986 	}
6987 
6988 	if (*dev->dev_ops->eth_tx_descriptor_dump == NULL)
6989 		return -ENOTSUP;
6990 
6991 	return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev,
6992 						queue_id, offset, num, file));
6993 }
6994 
6995 int
6996 rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
6997 {
6998 	size_t i;
6999 	int j;
7000 	struct rte_eth_dev *dev;
7001 	const uint32_t *all_types;
7002 	size_t no_of_elements = 0;
7003 
7004 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
7005 	dev = &rte_eth_devices[port_id];
7006 
7007 	if (ptypes == NULL && num > 0) {
7008 		RTE_ETHDEV_LOG_LINE(ERR,
7009 			"Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero",
7010 			port_id);
7011 		return -EINVAL;
7012 	}
7013 
7014 	if (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get == NULL)
7015 		return -ENOTSUP;
7016 	all_types = (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get)(dev,
7017 							      &no_of_elements);
7018 
7019 	if (all_types == NULL)
7020 		return 0;
7021 
7022 	for (i = 0, j = 0; i < no_of_elements; ++i) {
7023 		if (j < num) {
7024 			ptypes[j] = all_types[i];
7025 
7026 			rte_eth_trace_buffer_split_get_supported_hdr_ptypes(
7027 							port_id, j, ptypes[j]);
7028 		}
7029 		j++;
7030 	}
7031 
7032 	return j;
7033 }
7034 
7035 int rte_eth_dev_count_aggr_ports(uint16_t port_id)
7036 {
7037 	struct rte_eth_dev *dev;
7038 	int ret;
7039 
7040 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
7041 	dev = &rte_eth_devices[port_id];
7042 
7043 	if (*dev->dev_ops->count_aggr_ports == NULL)
7044 		return 0;
7045 	ret = eth_err(port_id, (*dev->dev_ops->count_aggr_ports)(dev));
7046 
7047 	rte_eth_trace_count_aggr_ports(port_id, ret);
7048 
7049 	return ret;
7050 }
7051 
7052 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
7053 				     uint8_t affinity)
7054 {
7055 	struct rte_eth_dev *dev;
7056 	int aggr_ports;
7057 	int ret;
7058 
7059 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
7060 	dev = &rte_eth_devices[port_id];
7061 
7062 	if (tx_queue_id >= dev->data->nb_tx_queues) {
7063 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", tx_queue_id);
7064 		return -EINVAL;
7065 	}
7066 
7067 	if (*dev->dev_ops->map_aggr_tx_affinity == NULL)
7068 		return -ENOTSUP;
7069 
7070 	if (dev->data->dev_configured == 0) {
7071 		RTE_ETHDEV_LOG_LINE(ERR,
7072 			"Port %u must be configured before Tx affinity mapping",
7073 			port_id);
7074 		return -EINVAL;
7075 	}
7076 
7077 	if (dev->data->dev_started) {
7078 		RTE_ETHDEV_LOG_LINE(ERR,
7079 			"Port %u must be stopped to allow configuration",
7080 			port_id);
7081 		return -EBUSY;
7082 	}
7083 
7084 	aggr_ports = rte_eth_dev_count_aggr_ports(port_id);
7085 	if (aggr_ports == 0) {
7086 		RTE_ETHDEV_LOG_LINE(ERR,
7087 			"Port %u has no aggregated port",
7088 			port_id);
7089 		return -ENOTSUP;
7090 	}
7091 
7092 	if (affinity > aggr_ports) {
7093 		RTE_ETHDEV_LOG_LINE(ERR,
7094 			"Port %u map invalid affinity %u exceeds the maximum number %u",
7095 			port_id, affinity, aggr_ports);
7096 		return -EINVAL;
7097 	}
7098 
7099 	ret = eth_err(port_id, (*dev->dev_ops->map_aggr_tx_affinity)(dev,
7100 				tx_queue_id, affinity));
7101 
7102 	rte_eth_trace_map_aggr_tx_affinity(port_id, tx_queue_id, affinity, ret);
7103 
7104 	return ret;
7105 }
7106 
7107 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
7108