xref: /dpdk/lib/ethdev/rte_ethdev.c (revision ef2a3f3b4c9e6dff1690ba9c63b8a87d61264859)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <errno.h>
6 #include <inttypes.h>
7 #include <stdbool.h>
8 #include <stdint.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13 
14 #include <bus_driver.h>
15 #include <rte_log.h>
16 #include <rte_interrupts.h>
17 #include <rte_kvargs.h>
18 #include <rte_memcpy.h>
19 #include <rte_common.h>
20 #include <rte_mempool.h>
21 #include <rte_malloc.h>
22 #include <rte_mbuf.h>
23 #include <rte_errno.h>
24 #include <rte_spinlock.h>
25 #include <rte_string_fns.h>
26 #include <rte_class.h>
27 #include <rte_ether.h>
28 #include <rte_telemetry.h>
29 
30 #include "rte_ethdev.h"
31 #include "rte_ethdev_trace_fp.h"
32 #include "ethdev_driver.h"
33 #include "rte_flow_driver.h"
34 #include "ethdev_profile.h"
35 #include "ethdev_private.h"
36 #include "ethdev_trace.h"
37 #include "sff_telemetry.h"
38 
39 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
40 
41 /* public fast-path API */
42 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
43 
44 /* spinlock for add/remove Rx callbacks */
45 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
46 
47 /* spinlock for add/remove Tx callbacks */
48 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
49 
50 /* store statistics names and its offset in stats structure  */
51 struct rte_eth_xstats_name_off {
52 	char name[RTE_ETH_XSTATS_NAME_SIZE];
53 	unsigned offset;
54 };
55 
56 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
57 	{"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
58 	{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
59 	{"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
60 	{"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
61 	{"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
62 	{"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
63 	{"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
64 	{"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
65 		rx_nombuf)},
66 };
67 
68 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
69 
70 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
71 	{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
72 	{"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
73 	{"errors", offsetof(struct rte_eth_stats, q_errors)},
74 };
75 
76 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
77 
78 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
79 	{"packets", offsetof(struct rte_eth_stats, q_opackets)},
80 	{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
81 };
82 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
83 
84 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
85 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
86 
87 static const struct {
88 	uint64_t offload;
89 	const char *name;
90 } eth_dev_rx_offload_names[] = {
91 	RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
92 	RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
93 	RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
94 	RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
95 	RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
96 	RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
97 	RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
98 	RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
99 	RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
100 	RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
101 	RTE_RX_OFFLOAD_BIT2STR(SCATTER),
102 	RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
103 	RTE_RX_OFFLOAD_BIT2STR(SECURITY),
104 	RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
105 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
106 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
107 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
108 	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
109 };
110 
111 #undef RTE_RX_OFFLOAD_BIT2STR
112 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
113 
114 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
115 	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
116 
117 static const struct {
118 	uint64_t offload;
119 	const char *name;
120 } eth_dev_tx_offload_names[] = {
121 	RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
122 	RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
123 	RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
124 	RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
125 	RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
126 	RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
127 	RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
128 	RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
129 	RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
130 	RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
131 	RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
132 	RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
133 	RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
134 	RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
135 	RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
136 	RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
137 	RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
138 	RTE_TX_OFFLOAD_BIT2STR(SECURITY),
139 	RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
140 	RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
141 	RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
142 	RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
143 };
144 
145 #undef RTE_TX_OFFLOAD_BIT2STR
146 
147 static const struct {
148 	uint64_t offload;
149 	const char *name;
150 } rte_eth_dev_capa_names[] = {
151 	{RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"},
152 	{RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"},
153 	{RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
154 	{RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"},
155 	{RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"},
156 };
157 
158 enum {
159 	STAT_QMAP_TX = 0,
160 	STAT_QMAP_RX
161 };
162 
163 int
164 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
165 {
166 	int ret;
167 	struct rte_devargs devargs;
168 	const char *bus_param_key;
169 	char *bus_str = NULL;
170 	char *cls_str = NULL;
171 	int str_size;
172 
173 	if (iter == NULL) {
174 		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
175 		return -EINVAL;
176 	}
177 
178 	if (devargs_str == NULL) {
179 		RTE_ETHDEV_LOG(ERR,
180 			"Cannot initialize iterator from NULL device description string\n");
181 		return -EINVAL;
182 	}
183 
184 	memset(iter, 0, sizeof(*iter));
185 	memset(&devargs, 0, sizeof(devargs));
186 
187 	/*
188 	 * The devargs string may use various syntaxes:
189 	 *   - 0000:08:00.0,representor=[1-3]
190 	 *   - pci:0000:06:00.0,representor=[0,5]
191 	 *   - class=eth,mac=00:11:22:33:44:55
192 	 *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
193 	 */
194 
195 	/*
196 	 * Handle pure class filter (i.e. without any bus-level argument),
197 	 * from future new syntax.
198 	 * rte_devargs_parse() is not yet supporting the new syntax,
199 	 * that's why this simple case is temporarily parsed here.
200 	 */
201 #define iter_anybus_str "class=eth,"
202 	if (strncmp(devargs_str, iter_anybus_str,
203 			strlen(iter_anybus_str)) == 0) {
204 		iter->cls_str = devargs_str + strlen(iter_anybus_str);
205 		goto end;
206 	}
207 
208 	/* Split bus, device and parameters. */
209 	ret = rte_devargs_parse(&devargs, devargs_str);
210 	if (ret != 0)
211 		goto error;
212 
213 	/*
214 	 * Assume parameters of old syntax can match only at ethdev level.
215 	 * Extra parameters will be ignored, thanks to "+" prefix.
216 	 */
217 	str_size = strlen(devargs.args) + 2;
218 	cls_str = malloc(str_size);
219 	if (cls_str == NULL) {
220 		ret = -ENOMEM;
221 		goto error;
222 	}
223 	ret = snprintf(cls_str, str_size, "+%s", devargs.args);
224 	if (ret != str_size - 1) {
225 		ret = -EINVAL;
226 		goto error;
227 	}
228 	iter->cls_str = cls_str;
229 
230 	iter->bus = devargs.bus;
231 	if (iter->bus->dev_iterate == NULL) {
232 		ret = -ENOTSUP;
233 		goto error;
234 	}
235 
236 	/* Convert bus args to new syntax for use with new API dev_iterate. */
237 	if ((strcmp(iter->bus->name, "vdev") == 0) ||
238 		(strcmp(iter->bus->name, "fslmc") == 0) ||
239 		(strcmp(iter->bus->name, "dpaa_bus") == 0)) {
240 		bus_param_key = "name";
241 	} else if (strcmp(iter->bus->name, "pci") == 0) {
242 		bus_param_key = "addr";
243 	} else {
244 		ret = -ENOTSUP;
245 		goto error;
246 	}
247 	str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
248 	bus_str = malloc(str_size);
249 	if (bus_str == NULL) {
250 		ret = -ENOMEM;
251 		goto error;
252 	}
253 	ret = snprintf(bus_str, str_size, "%s=%s",
254 			bus_param_key, devargs.name);
255 	if (ret != str_size - 1) {
256 		ret = -EINVAL;
257 		goto error;
258 	}
259 	iter->bus_str = bus_str;
260 
261 end:
262 	iter->cls = rte_class_find_by_name("eth");
263 	rte_devargs_reset(&devargs);
264 
265 	rte_eth_trace_iterator_init(devargs_str);
266 
267 	return 0;
268 
269 error:
270 	if (ret == -ENOTSUP)
271 		RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
272 				iter->bus->name);
273 	rte_devargs_reset(&devargs);
274 	free(bus_str);
275 	free(cls_str);
276 	return ret;
277 }
278 
279 uint16_t
280 rte_eth_iterator_next(struct rte_dev_iterator *iter)
281 {
282 	if (iter == NULL) {
283 		RTE_ETHDEV_LOG(ERR,
284 			"Cannot get next device from NULL iterator\n");
285 		return RTE_MAX_ETHPORTS;
286 	}
287 
288 	if (iter->cls == NULL) /* invalid ethdev iterator */
289 		return RTE_MAX_ETHPORTS;
290 
291 	do { /* loop to try all matching rte_device */
292 		/* If not pure ethdev filter and */
293 		if (iter->bus != NULL &&
294 				/* not in middle of rte_eth_dev iteration, */
295 				iter->class_device == NULL) {
296 			/* get next rte_device to try. */
297 			iter->device = iter->bus->dev_iterate(
298 					iter->device, iter->bus_str, iter);
299 			if (iter->device == NULL)
300 				break; /* no more rte_device candidate */
301 		}
302 		/* A device is matching bus part, need to check ethdev part. */
303 		iter->class_device = iter->cls->dev_iterate(
304 				iter->class_device, iter->cls_str, iter);
305 		if (iter->class_device != NULL) {
306 			uint16_t id = eth_dev_to_id(iter->class_device);
307 
308 			rte_eth_trace_iterator_next(iter, id);
309 
310 			return id; /* match */
311 		}
312 	} while (iter->bus != NULL); /* need to try next rte_device */
313 
314 	/* No more ethdev port to iterate. */
315 	rte_eth_iterator_cleanup(iter);
316 	return RTE_MAX_ETHPORTS;
317 }
318 
319 void
320 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
321 {
322 	if (iter == NULL) {
323 		RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
324 		return;
325 	}
326 
327 	if (iter->bus_str == NULL)
328 		return; /* nothing to free in pure class filter */
329 
330 	rte_eth_trace_iterator_cleanup(iter);
331 
332 	free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
333 	free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
334 	memset(iter, 0, sizeof(*iter));
335 }
336 
337 uint16_t
338 rte_eth_find_next(uint16_t port_id)
339 {
340 	while (port_id < RTE_MAX_ETHPORTS &&
341 			rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
342 		port_id++;
343 
344 	if (port_id >= RTE_MAX_ETHPORTS)
345 		return RTE_MAX_ETHPORTS;
346 
347 	rte_eth_trace_find_next(port_id);
348 
349 	return port_id;
350 }
351 
352 /*
353  * Macro to iterate over all valid ports for internal usage.
354  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
355  */
356 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
357 	for (port_id = rte_eth_find_next(0); \
358 	     port_id < RTE_MAX_ETHPORTS; \
359 	     port_id = rte_eth_find_next(port_id + 1))
360 
361 uint16_t
362 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
363 {
364 	port_id = rte_eth_find_next(port_id);
365 	while (port_id < RTE_MAX_ETHPORTS &&
366 			rte_eth_devices[port_id].device != parent)
367 		port_id = rte_eth_find_next(port_id + 1);
368 
369 	rte_eth_trace_find_next_of(port_id, parent);
370 
371 	return port_id;
372 }
373 
374 uint16_t
375 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
376 {
377 	uint16_t ret;
378 
379 	RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
380 	ret = rte_eth_find_next_of(port_id,
381 			rte_eth_devices[ref_port_id].device);
382 
383 	rte_eth_trace_find_next_sibling(port_id, ref_port_id, ret);
384 
385 	return ret;
386 }
387 
388 static bool
389 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
390 {
391 	return ethdev->data != NULL && ethdev->data->name[0] != '\0';
392 }
393 
394 int
395 rte_eth_dev_is_valid_port(uint16_t port_id)
396 {
397 	int is_valid;
398 
399 	if (port_id >= RTE_MAX_ETHPORTS ||
400 	    (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
401 		is_valid = 0;
402 	else
403 		is_valid = 1;
404 
405 	rte_ethdev_trace_is_valid_port(port_id, is_valid);
406 
407 	return is_valid;
408 }
409 
410 static int
411 eth_is_valid_owner_id(uint64_t owner_id)
412 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
413 {
414 	if (owner_id == RTE_ETH_DEV_NO_OWNER ||
415 	    eth_dev_shared_data->next_owner_id <= owner_id)
416 		return 0;
417 	return 1;
418 }
419 
420 uint64_t
421 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
422 {
423 	port_id = rte_eth_find_next(port_id);
424 	while (port_id < RTE_MAX_ETHPORTS &&
425 			rte_eth_devices[port_id].data->owner.id != owner_id)
426 		port_id = rte_eth_find_next(port_id + 1);
427 
428 	rte_eth_trace_find_next_owned_by(port_id, owner_id);
429 
430 	return port_id;
431 }
432 
433 int
434 rte_eth_dev_owner_new(uint64_t *owner_id)
435 {
436 	int ret;
437 
438 	if (owner_id == NULL) {
439 		RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
440 		return -EINVAL;
441 	}
442 
443 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
444 
445 	if (eth_dev_shared_data_prepare() != NULL) {
446 		*owner_id = eth_dev_shared_data->next_owner_id++;
447 		eth_dev_shared_data->allocated_owners++;
448 		ret = 0;
449 	} else {
450 		ret = -ENOMEM;
451 	}
452 
453 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
454 
455 	rte_ethdev_trace_owner_new(*owner_id, ret);
456 
457 	return ret;
458 }
459 
460 static int
461 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
462 		       const struct rte_eth_dev_owner *new_owner)
463 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
464 {
465 	struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
466 	struct rte_eth_dev_owner *port_owner;
467 
468 	if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
469 		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
470 			port_id);
471 		return -ENODEV;
472 	}
473 
474 	if (new_owner == NULL) {
475 		RTE_ETHDEV_LOG(ERR,
476 			"Cannot set ethdev port %u owner from NULL owner\n",
477 			port_id);
478 		return -EINVAL;
479 	}
480 
481 	if (!eth_is_valid_owner_id(new_owner->id) &&
482 	    !eth_is_valid_owner_id(old_owner_id)) {
483 		RTE_ETHDEV_LOG(ERR,
484 			"Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
485 		       old_owner_id, new_owner->id);
486 		return -EINVAL;
487 	}
488 
489 	port_owner = &rte_eth_devices[port_id].data->owner;
490 	if (port_owner->id != old_owner_id) {
491 		RTE_ETHDEV_LOG(ERR,
492 			"Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
493 			port_id, port_owner->name, port_owner->id);
494 		return -EPERM;
495 	}
496 
497 	/* can not truncate (same structure) */
498 	strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
499 
500 	port_owner->id = new_owner->id;
501 
502 	RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
503 		port_id, new_owner->name, new_owner->id);
504 
505 	return 0;
506 }
507 
508 int
509 rte_eth_dev_owner_set(const uint16_t port_id,
510 		      const struct rte_eth_dev_owner *owner)
511 {
512 	int ret;
513 
514 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
515 
516 	if (eth_dev_shared_data_prepare() != NULL)
517 		ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
518 	else
519 		ret = -ENOMEM;
520 
521 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
522 
523 	rte_ethdev_trace_owner_set(port_id, owner, ret);
524 
525 	return ret;
526 }
527 
528 int
529 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
530 {
531 	const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
532 			{.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
533 	int ret;
534 
535 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
536 
537 	if (eth_dev_shared_data_prepare() != NULL)
538 		ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
539 	else
540 		ret = -ENOMEM;
541 
542 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
543 
544 	rte_ethdev_trace_owner_unset(port_id, owner_id, ret);
545 
546 	return ret;
547 }
548 
549 int
550 rte_eth_dev_owner_delete(const uint64_t owner_id)
551 {
552 	uint16_t port_id;
553 	int ret = 0;
554 
555 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
556 
557 	if (eth_dev_shared_data_prepare() == NULL) {
558 		ret = -ENOMEM;
559 	} else if (eth_is_valid_owner_id(owner_id)) {
560 		for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
561 			struct rte_eth_dev_data *data =
562 				rte_eth_devices[port_id].data;
563 			if (data != NULL && data->owner.id == owner_id)
564 				memset(&data->owner, 0,
565 				       sizeof(struct rte_eth_dev_owner));
566 		}
567 		RTE_ETHDEV_LOG(NOTICE,
568 			"All port owners owned by %016"PRIx64" identifier have removed\n",
569 			owner_id);
570 		eth_dev_shared_data->allocated_owners--;
571 		eth_dev_shared_data_release();
572 	} else {
573 		RTE_ETHDEV_LOG(ERR,
574 			       "Invalid owner ID=%016"PRIx64"\n",
575 			       owner_id);
576 		ret = -EINVAL;
577 	}
578 
579 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
580 
581 	rte_ethdev_trace_owner_delete(owner_id, ret);
582 
583 	return ret;
584 }
585 
586 int
587 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
588 {
589 	struct rte_eth_dev *ethdev;
590 	int ret;
591 
592 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
593 	ethdev = &rte_eth_devices[port_id];
594 
595 	if (!eth_dev_is_allocated(ethdev)) {
596 		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
597 			port_id);
598 		return -ENODEV;
599 	}
600 
601 	if (owner == NULL) {
602 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
603 			port_id);
604 		return -EINVAL;
605 	}
606 
607 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
608 
609 	if (eth_dev_shared_data_prepare() != NULL) {
610 		rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
611 		ret = 0;
612 	} else {
613 		ret = -ENOMEM;
614 	}
615 
616 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
617 
618 	rte_ethdev_trace_owner_get(port_id, owner, ret);
619 
620 	return ret;
621 }
622 
623 int
624 rte_eth_dev_socket_id(uint16_t port_id)
625 {
626 	int socket_id = SOCKET_ID_ANY;
627 
628 	if (!rte_eth_dev_is_valid_port(port_id)) {
629 		rte_errno = EINVAL;
630 	} else {
631 		socket_id = rte_eth_devices[port_id].data->numa_node;
632 		if (socket_id == SOCKET_ID_ANY)
633 			rte_errno = 0;
634 	}
635 
636 	rte_ethdev_trace_socket_id(port_id, socket_id);
637 
638 	return socket_id;
639 }
640 
641 void *
642 rte_eth_dev_get_sec_ctx(uint16_t port_id)
643 {
644 	void *ctx;
645 
646 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
647 	ctx = rte_eth_devices[port_id].security_ctx;
648 
649 	rte_ethdev_trace_get_sec_ctx(port_id, ctx);
650 
651 	return ctx;
652 }
653 
654 uint16_t
655 rte_eth_dev_count_avail(void)
656 {
657 	uint16_t p;
658 	uint16_t count;
659 
660 	count = 0;
661 
662 	RTE_ETH_FOREACH_DEV(p)
663 		count++;
664 
665 	rte_ethdev_trace_count_avail(count);
666 
667 	return count;
668 }
669 
670 uint16_t
671 rte_eth_dev_count_total(void)
672 {
673 	uint16_t port, count = 0;
674 
675 	RTE_ETH_FOREACH_VALID_DEV(port)
676 		count++;
677 
678 	rte_ethdev_trace_count_total(count);
679 
680 	return count;
681 }
682 
683 int
684 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
685 {
686 	char *tmp;
687 
688 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
689 
690 	if (name == NULL) {
691 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
692 			port_id);
693 		return -EINVAL;
694 	}
695 
696 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
697 	/* shouldn't check 'rte_eth_devices[i].data',
698 	 * because it might be overwritten by VDEV PMD */
699 	tmp = eth_dev_shared_data->data[port_id].name;
700 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
701 
702 	strcpy(name, tmp);
703 
704 	rte_ethdev_trace_get_name_by_port(port_id, name);
705 
706 	return 0;
707 }
708 
709 int
710 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
711 {
712 	int ret = -ENODEV;
713 	uint16_t pid;
714 
715 	if (name == NULL) {
716 		RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
717 		return -EINVAL;
718 	}
719 
720 	if (port_id == NULL) {
721 		RTE_ETHDEV_LOG(ERR,
722 			"Cannot get port ID to NULL for %s\n", name);
723 		return -EINVAL;
724 	}
725 
726 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
727 	RTE_ETH_FOREACH_VALID_DEV(pid) {
728 		if (strcmp(name, eth_dev_shared_data->data[pid].name) != 0)
729 			continue;
730 
731 		*port_id = pid;
732 		rte_ethdev_trace_get_port_by_name(name, *port_id);
733 		ret = 0;
734 		break;
735 	}
736 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
737 
738 	return ret;
739 }
740 
741 int
742 eth_err(uint16_t port_id, int ret)
743 {
744 	if (ret == 0)
745 		return 0;
746 	if (rte_eth_dev_is_removed(port_id))
747 		return -EIO;
748 	return ret;
749 }
750 
751 static int
752 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
753 {
754 	uint16_t port_id;
755 
756 	if (rx_queue_id >= dev->data->nb_rx_queues) {
757 		port_id = dev->data->port_id;
758 		RTE_ETHDEV_LOG(ERR,
759 			       "Invalid Rx queue_id=%u of device with port_id=%u\n",
760 			       rx_queue_id, port_id);
761 		return -EINVAL;
762 	}
763 
764 	if (dev->data->rx_queues[rx_queue_id] == NULL) {
765 		port_id = dev->data->port_id;
766 		RTE_ETHDEV_LOG(ERR,
767 			       "Queue %u of device with port_id=%u has not been setup\n",
768 			       rx_queue_id, port_id);
769 		return -EINVAL;
770 	}
771 
772 	return 0;
773 }
774 
775 static int
776 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
777 {
778 	uint16_t port_id;
779 
780 	if (tx_queue_id >= dev->data->nb_tx_queues) {
781 		port_id = dev->data->port_id;
782 		RTE_ETHDEV_LOG(ERR,
783 			       "Invalid Tx queue_id=%u of device with port_id=%u\n",
784 			       tx_queue_id, port_id);
785 		return -EINVAL;
786 	}
787 
788 	if (dev->data->tx_queues[tx_queue_id] == NULL) {
789 		port_id = dev->data->port_id;
790 		RTE_ETHDEV_LOG(ERR,
791 			       "Queue %u of device with port_id=%u has not been setup\n",
792 			       tx_queue_id, port_id);
793 		return -EINVAL;
794 	}
795 
796 	return 0;
797 }
798 
799 int
800 rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
801 {
802 	struct rte_eth_dev *dev;
803 
804 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
805 	dev = &rte_eth_devices[port_id];
806 
807 	return eth_dev_validate_rx_queue(dev, queue_id);
808 }
809 
810 int
811 rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
812 {
813 	struct rte_eth_dev *dev;
814 
815 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
816 	dev = &rte_eth_devices[port_id];
817 
818 	return eth_dev_validate_tx_queue(dev, queue_id);
819 }
820 
821 int
822 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
823 {
824 	struct rte_eth_dev *dev;
825 	int ret;
826 
827 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
828 	dev = &rte_eth_devices[port_id];
829 
830 	if (!dev->data->dev_started) {
831 		RTE_ETHDEV_LOG(ERR,
832 			"Port %u must be started before start any queue\n",
833 			port_id);
834 		return -EINVAL;
835 	}
836 
837 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
838 	if (ret != 0)
839 		return ret;
840 
841 	if (*dev->dev_ops->rx_queue_start == NULL)
842 		return -ENOTSUP;
843 
844 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
845 		RTE_ETHDEV_LOG(INFO,
846 			"Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
847 			rx_queue_id, port_id);
848 		return -EINVAL;
849 	}
850 
851 	if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
852 		RTE_ETHDEV_LOG(INFO,
853 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
854 			rx_queue_id, port_id);
855 		return 0;
856 	}
857 
858 	ret = eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
859 
860 	rte_ethdev_trace_rx_queue_start(port_id, rx_queue_id, ret);
861 
862 	return ret;
863 }
864 
865 int
866 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
867 {
868 	struct rte_eth_dev *dev;
869 	int ret;
870 
871 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
872 	dev = &rte_eth_devices[port_id];
873 
874 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
875 	if (ret != 0)
876 		return ret;
877 
878 	if (*dev->dev_ops->rx_queue_stop == NULL)
879 		return -ENOTSUP;
880 
881 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
882 		RTE_ETHDEV_LOG(INFO,
883 			"Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
884 			rx_queue_id, port_id);
885 		return -EINVAL;
886 	}
887 
888 	if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
889 		RTE_ETHDEV_LOG(INFO,
890 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
891 			rx_queue_id, port_id);
892 		return 0;
893 	}
894 
895 	ret = eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
896 
897 	rte_ethdev_trace_rx_queue_stop(port_id, rx_queue_id, ret);
898 
899 	return ret;
900 }
901 
902 int
903 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
904 {
905 	struct rte_eth_dev *dev;
906 	int ret;
907 
908 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
909 	dev = &rte_eth_devices[port_id];
910 
911 	if (!dev->data->dev_started) {
912 		RTE_ETHDEV_LOG(ERR,
913 			"Port %u must be started before start any queue\n",
914 			port_id);
915 		return -EINVAL;
916 	}
917 
918 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
919 	if (ret != 0)
920 		return ret;
921 
922 	if (*dev->dev_ops->tx_queue_start == NULL)
923 		return -ENOTSUP;
924 
925 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
926 		RTE_ETHDEV_LOG(INFO,
927 			"Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
928 			tx_queue_id, port_id);
929 		return -EINVAL;
930 	}
931 
932 	if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
933 		RTE_ETHDEV_LOG(INFO,
934 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
935 			tx_queue_id, port_id);
936 		return 0;
937 	}
938 
939 	ret = eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
940 
941 	rte_ethdev_trace_tx_queue_start(port_id, tx_queue_id, ret);
942 
943 	return ret;
944 }
945 
946 int
947 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
948 {
949 	struct rte_eth_dev *dev;
950 	int ret;
951 
952 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
953 	dev = &rte_eth_devices[port_id];
954 
955 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
956 	if (ret != 0)
957 		return ret;
958 
959 	if (*dev->dev_ops->tx_queue_stop == NULL)
960 		return -ENOTSUP;
961 
962 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
963 		RTE_ETHDEV_LOG(INFO,
964 			"Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
965 			tx_queue_id, port_id);
966 		return -EINVAL;
967 	}
968 
969 	if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
970 		RTE_ETHDEV_LOG(INFO,
971 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
972 			tx_queue_id, port_id);
973 		return 0;
974 	}
975 
976 	ret = eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
977 
978 	rte_ethdev_trace_tx_queue_stop(port_id, tx_queue_id, ret);
979 
980 	return ret;
981 }
982 
983 uint32_t
984 rte_eth_speed_bitflag(uint32_t speed, int duplex)
985 {
986 	uint32_t ret;
987 
988 	switch (speed) {
989 	case RTE_ETH_SPEED_NUM_10M:
990 		ret = duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
991 		break;
992 	case RTE_ETH_SPEED_NUM_100M:
993 		ret = duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
994 		break;
995 	case RTE_ETH_SPEED_NUM_1G:
996 		ret = RTE_ETH_LINK_SPEED_1G;
997 		break;
998 	case RTE_ETH_SPEED_NUM_2_5G:
999 		ret = RTE_ETH_LINK_SPEED_2_5G;
1000 		break;
1001 	case RTE_ETH_SPEED_NUM_5G:
1002 		ret = RTE_ETH_LINK_SPEED_5G;
1003 		break;
1004 	case RTE_ETH_SPEED_NUM_10G:
1005 		ret = RTE_ETH_LINK_SPEED_10G;
1006 		break;
1007 	case RTE_ETH_SPEED_NUM_20G:
1008 		ret = RTE_ETH_LINK_SPEED_20G;
1009 		break;
1010 	case RTE_ETH_SPEED_NUM_25G:
1011 		ret = RTE_ETH_LINK_SPEED_25G;
1012 		break;
1013 	case RTE_ETH_SPEED_NUM_40G:
1014 		ret = RTE_ETH_LINK_SPEED_40G;
1015 		break;
1016 	case RTE_ETH_SPEED_NUM_50G:
1017 		ret = RTE_ETH_LINK_SPEED_50G;
1018 		break;
1019 	case RTE_ETH_SPEED_NUM_56G:
1020 		ret = RTE_ETH_LINK_SPEED_56G;
1021 		break;
1022 	case RTE_ETH_SPEED_NUM_100G:
1023 		ret = RTE_ETH_LINK_SPEED_100G;
1024 		break;
1025 	case RTE_ETH_SPEED_NUM_200G:
1026 		ret = RTE_ETH_LINK_SPEED_200G;
1027 		break;
1028 	case RTE_ETH_SPEED_NUM_400G:
1029 		ret = RTE_ETH_LINK_SPEED_400G;
1030 		break;
1031 	default:
1032 		ret = 0;
1033 	}
1034 
1035 	rte_eth_trace_speed_bitflag(speed, duplex, ret);
1036 
1037 	return ret;
1038 }
1039 
1040 const char *
1041 rte_eth_dev_rx_offload_name(uint64_t offload)
1042 {
1043 	const char *name = "UNKNOWN";
1044 	unsigned int i;
1045 
1046 	for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1047 		if (offload == eth_dev_rx_offload_names[i].offload) {
1048 			name = eth_dev_rx_offload_names[i].name;
1049 			break;
1050 		}
1051 	}
1052 
1053 	rte_ethdev_trace_rx_offload_name(offload, name);
1054 
1055 	return name;
1056 }
1057 
1058 const char *
1059 rte_eth_dev_tx_offload_name(uint64_t offload)
1060 {
1061 	const char *name = "UNKNOWN";
1062 	unsigned int i;
1063 
1064 	for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1065 		if (offload == eth_dev_tx_offload_names[i].offload) {
1066 			name = eth_dev_tx_offload_names[i].name;
1067 			break;
1068 		}
1069 	}
1070 
1071 	rte_ethdev_trace_tx_offload_name(offload, name);
1072 
1073 	return name;
1074 }
1075 
1076 static char *
1077 eth_dev_offload_names(uint64_t bitmask, char *buf, size_t size,
1078 	const char *(*offload_name)(uint64_t))
1079 {
1080 	unsigned int pos = 0;
1081 	int ret;
1082 
1083 	/* There should be at least enough space to handle those cases */
1084 	RTE_ASSERT(size >= sizeof("none") && size >= sizeof("..."));
1085 
1086 	if (bitmask == 0) {
1087 		ret = snprintf(&buf[pos], size - pos, "none");
1088 		if (ret < 0 || pos + ret >= size)
1089 			ret = 0;
1090 		pos += ret;
1091 		goto out;
1092 	}
1093 
1094 	while (bitmask != 0) {
1095 		uint64_t offload = RTE_BIT64(rte_ctz64(bitmask));
1096 		const char *name = offload_name(offload);
1097 
1098 		ret = snprintf(&buf[pos], size - pos, "%s,", name);
1099 		if (ret < 0 || pos + ret >= size) {
1100 			if (pos + sizeof("...") >= size)
1101 				pos = size - sizeof("...");
1102 			ret = snprintf(&buf[pos], size - pos, "...");
1103 			if (ret > 0 && pos + ret < size)
1104 				pos += ret;
1105 			goto out;
1106 		}
1107 
1108 		pos += ret;
1109 		bitmask &= ~offload;
1110 	}
1111 
1112 	/* Eliminate trailing comma */
1113 	pos--;
1114 out:
1115 	buf[pos] = '\0';
1116 	return buf;
1117 }
1118 
1119 const char *
1120 rte_eth_dev_capability_name(uint64_t capability)
1121 {
1122 	const char *name = "UNKNOWN";
1123 	unsigned int i;
1124 
1125 	for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) {
1126 		if (capability == rte_eth_dev_capa_names[i].offload) {
1127 			name = rte_eth_dev_capa_names[i].name;
1128 			break;
1129 		}
1130 	}
1131 
1132 	rte_ethdev_trace_capability_name(capability, name);
1133 
1134 	return name;
1135 }
1136 
1137 static inline int
1138 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1139 		   uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1140 {
1141 	int ret = 0;
1142 
1143 	if (dev_info_size == 0) {
1144 		if (config_size != max_rx_pkt_len) {
1145 			RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1146 				       " %u != %u is not allowed\n",
1147 				       port_id, config_size, max_rx_pkt_len);
1148 			ret = -EINVAL;
1149 		}
1150 	} else if (config_size > dev_info_size) {
1151 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1152 			       "> max allowed value %u\n", port_id, config_size,
1153 			       dev_info_size);
1154 		ret = -EINVAL;
1155 	} else if (config_size < RTE_ETHER_MIN_LEN) {
1156 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1157 			       "< min allowed value %u\n", port_id, config_size,
1158 			       (unsigned int)RTE_ETHER_MIN_LEN);
1159 		ret = -EINVAL;
1160 	}
1161 	return ret;
1162 }
1163 
1164 /*
1165  * Validate offloads that are requested through rte_eth_dev_configure against
1166  * the offloads successfully set by the Ethernet device.
1167  *
1168  * @param port_id
1169  *   The port identifier of the Ethernet device.
1170  * @param req_offloads
1171  *   The offloads that have been requested through `rte_eth_dev_configure`.
1172  * @param set_offloads
1173  *   The offloads successfully set by the Ethernet device.
1174  * @param offload_type
1175  *   The offload type i.e. Rx/Tx string.
1176  * @param offload_name
1177  *   The function that prints the offload name.
1178  * @return
1179  *   - (0) if validation successful.
1180  *   - (-EINVAL) if requested offload has been silently disabled.
1181  */
1182 static int
1183 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1184 		  uint64_t set_offloads, const char *offload_type,
1185 		  const char *(*offload_name)(uint64_t))
1186 {
1187 	uint64_t offloads_diff = req_offloads ^ set_offloads;
1188 	uint64_t offload;
1189 	int ret = 0;
1190 
1191 	while (offloads_diff != 0) {
1192 		/* Check if any offload is requested but not enabled. */
1193 		offload = RTE_BIT64(rte_ctz64(offloads_diff));
1194 		if (offload & req_offloads) {
1195 			RTE_ETHDEV_LOG(ERR,
1196 				"Port %u failed to enable %s offload %s\n",
1197 				port_id, offload_type, offload_name(offload));
1198 			ret = -EINVAL;
1199 		}
1200 
1201 		/* Check if offload couldn't be disabled. */
1202 		if (offload & set_offloads) {
1203 			RTE_ETHDEV_LOG(DEBUG,
1204 				"Port %u %s offload %s is not requested but enabled\n",
1205 				port_id, offload_type, offload_name(offload));
1206 		}
1207 
1208 		offloads_diff &= ~offload;
1209 	}
1210 
1211 	return ret;
1212 }
1213 
1214 static uint32_t
1215 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1216 {
1217 	uint32_t overhead_len;
1218 
1219 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1220 		overhead_len = max_rx_pktlen - max_mtu;
1221 	else
1222 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1223 
1224 	return overhead_len;
1225 }
1226 
1227 /* rte_eth_dev_info_get() should be called prior to this function */
1228 static int
1229 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
1230 		uint16_t mtu)
1231 {
1232 	uint32_t overhead_len;
1233 	uint32_t frame_size;
1234 
1235 	if (mtu < dev_info->min_mtu) {
1236 		RTE_ETHDEV_LOG(ERR,
1237 			"MTU (%u) < device min MTU (%u) for port_id %u\n",
1238 			mtu, dev_info->min_mtu, port_id);
1239 		return -EINVAL;
1240 	}
1241 	if (mtu > dev_info->max_mtu) {
1242 		RTE_ETHDEV_LOG(ERR,
1243 			"MTU (%u) > device max MTU (%u) for port_id %u\n",
1244 			mtu, dev_info->max_mtu, port_id);
1245 		return -EINVAL;
1246 	}
1247 
1248 	overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1249 			dev_info->max_mtu);
1250 	frame_size = mtu + overhead_len;
1251 	if (frame_size < RTE_ETHER_MIN_LEN) {
1252 		RTE_ETHDEV_LOG(ERR,
1253 			"Frame size (%u) < min frame size (%u) for port_id %u\n",
1254 			frame_size, RTE_ETHER_MIN_LEN, port_id);
1255 		return -EINVAL;
1256 	}
1257 
1258 	if (frame_size > dev_info->max_rx_pktlen) {
1259 		RTE_ETHDEV_LOG(ERR,
1260 			"Frame size (%u) > device max frame size (%u) for port_id %u\n",
1261 			frame_size, dev_info->max_rx_pktlen, port_id);
1262 		return -EINVAL;
1263 	}
1264 
1265 	return 0;
1266 }
1267 
1268 int
1269 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1270 		      const struct rte_eth_conf *dev_conf)
1271 {
1272 	struct rte_eth_dev *dev;
1273 	struct rte_eth_dev_info dev_info;
1274 	struct rte_eth_conf orig_conf;
1275 	int diag;
1276 	int ret;
1277 	uint16_t old_mtu;
1278 
1279 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1280 	dev = &rte_eth_devices[port_id];
1281 
1282 	if (dev_conf == NULL) {
1283 		RTE_ETHDEV_LOG(ERR,
1284 			"Cannot configure ethdev port %u from NULL config\n",
1285 			port_id);
1286 		return -EINVAL;
1287 	}
1288 
1289 	if (*dev->dev_ops->dev_configure == NULL)
1290 		return -ENOTSUP;
1291 
1292 	if (dev->data->dev_started) {
1293 		RTE_ETHDEV_LOG(ERR,
1294 			"Port %u must be stopped to allow configuration\n",
1295 			port_id);
1296 		return -EBUSY;
1297 	}
1298 
1299 	/*
1300 	 * Ensure that "dev_configured" is always 0 each time prepare to do
1301 	 * dev_configure() to avoid any non-anticipated behaviour.
1302 	 * And set to 1 when dev_configure() is executed successfully.
1303 	 */
1304 	dev->data->dev_configured = 0;
1305 
1306 	 /* Store original config, as rollback required on failure */
1307 	memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1308 
1309 	/*
1310 	 * Copy the dev_conf parameter into the dev structure.
1311 	 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1312 	 */
1313 	if (dev_conf != &dev->data->dev_conf)
1314 		memcpy(&dev->data->dev_conf, dev_conf,
1315 		       sizeof(dev->data->dev_conf));
1316 
1317 	/* Backup mtu for rollback */
1318 	old_mtu = dev->data->mtu;
1319 
1320 	/* fields must be zero to reserve them for future ABI changes */
1321 	if (dev_conf->rxmode.reserved_64s[0] != 0 ||
1322 	    dev_conf->rxmode.reserved_64s[1] != 0 ||
1323 	    dev_conf->rxmode.reserved_ptrs[0] != NULL ||
1324 	    dev_conf->rxmode.reserved_ptrs[1] != NULL) {
1325 		RTE_ETHDEV_LOG(ERR, "Rxmode reserved fields not zero\n");
1326 		ret = -EINVAL;
1327 		goto rollback;
1328 	}
1329 
1330 	if (dev_conf->txmode.reserved_64s[0] != 0 ||
1331 	    dev_conf->txmode.reserved_64s[1] != 0 ||
1332 	    dev_conf->txmode.reserved_ptrs[0] != NULL ||
1333 	    dev_conf->txmode.reserved_ptrs[1] != NULL) {
1334 		RTE_ETHDEV_LOG(ERR, "txmode reserved fields not zero\n");
1335 		ret = -EINVAL;
1336 		goto rollback;
1337 	}
1338 
1339 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1340 	if (ret != 0)
1341 		goto rollback;
1342 
1343 	/* If number of queues specified by application for both Rx and Tx is
1344 	 * zero, use driver preferred values. This cannot be done individually
1345 	 * as it is valid for either Tx or Rx (but not both) to be zero.
1346 	 * If driver does not provide any preferred valued, fall back on
1347 	 * EAL defaults.
1348 	 */
1349 	if (nb_rx_q == 0 && nb_tx_q == 0) {
1350 		nb_rx_q = dev_info.default_rxportconf.nb_queues;
1351 		if (nb_rx_q == 0)
1352 			nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1353 		nb_tx_q = dev_info.default_txportconf.nb_queues;
1354 		if (nb_tx_q == 0)
1355 			nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1356 	}
1357 
1358 	if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1359 		RTE_ETHDEV_LOG(ERR,
1360 			"Number of Rx queues requested (%u) is greater than max supported(%d)\n",
1361 			nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1362 		ret = -EINVAL;
1363 		goto rollback;
1364 	}
1365 
1366 	if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1367 		RTE_ETHDEV_LOG(ERR,
1368 			"Number of Tx queues requested (%u) is greater than max supported(%d)\n",
1369 			nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1370 		ret = -EINVAL;
1371 		goto rollback;
1372 	}
1373 
1374 	/*
1375 	 * Check that the numbers of Rx and Tx queues are not greater
1376 	 * than the maximum number of Rx and Tx queues supported by the
1377 	 * configured device.
1378 	 */
1379 	if (nb_rx_q > dev_info.max_rx_queues) {
1380 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1381 			port_id, nb_rx_q, dev_info.max_rx_queues);
1382 		ret = -EINVAL;
1383 		goto rollback;
1384 	}
1385 
1386 	if (nb_tx_q > dev_info.max_tx_queues) {
1387 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1388 			port_id, nb_tx_q, dev_info.max_tx_queues);
1389 		ret = -EINVAL;
1390 		goto rollback;
1391 	}
1392 
1393 	/* Check that the device supports requested interrupts */
1394 	if ((dev_conf->intr_conf.lsc == 1) &&
1395 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1396 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1397 			dev->device->driver->name);
1398 		ret = -EINVAL;
1399 		goto rollback;
1400 	}
1401 	if ((dev_conf->intr_conf.rmv == 1) &&
1402 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1403 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1404 			dev->device->driver->name);
1405 		ret = -EINVAL;
1406 		goto rollback;
1407 	}
1408 
1409 	if (dev_conf->rxmode.mtu == 0)
1410 		dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
1411 
1412 	ret = eth_dev_validate_mtu(port_id, &dev_info,
1413 			dev->data->dev_conf.rxmode.mtu);
1414 	if (ret != 0)
1415 		goto rollback;
1416 
1417 	dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
1418 
1419 	/*
1420 	 * If LRO is enabled, check that the maximum aggregated packet
1421 	 * size is supported by the configured device.
1422 	 */
1423 	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1424 		uint32_t max_rx_pktlen;
1425 		uint32_t overhead_len;
1426 
1427 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1428 				dev_info.max_mtu);
1429 		max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
1430 		if (dev_conf->rxmode.max_lro_pkt_size == 0)
1431 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1432 		ret = eth_dev_check_lro_pkt_size(port_id,
1433 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
1434 				max_rx_pktlen,
1435 				dev_info.max_lro_pkt_size);
1436 		if (ret != 0)
1437 			goto rollback;
1438 	}
1439 
1440 	/* Any requested offloading must be within its device capabilities */
1441 	if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1442 	     dev_conf->rxmode.offloads) {
1443 		char buffer[512];
1444 
1445 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u does not support Rx offloads %s\n",
1446 			port_id, eth_dev_offload_names(
1447 			dev_conf->rxmode.offloads & ~dev_info.rx_offload_capa,
1448 			buffer, sizeof(buffer), rte_eth_dev_rx_offload_name));
1449 		RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u was requested Rx offloads %s\n",
1450 			port_id, eth_dev_offload_names(dev_conf->rxmode.offloads,
1451 			buffer, sizeof(buffer), rte_eth_dev_rx_offload_name));
1452 		RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u supports Rx offloads %s\n",
1453 			port_id, eth_dev_offload_names(dev_info.rx_offload_capa,
1454 			buffer, sizeof(buffer), rte_eth_dev_rx_offload_name));
1455 
1456 		ret = -EINVAL;
1457 		goto rollback;
1458 	}
1459 	if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1460 	     dev_conf->txmode.offloads) {
1461 		char buffer[512];
1462 
1463 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u does not support Tx offloads %s\n",
1464 			port_id, eth_dev_offload_names(
1465 			dev_conf->txmode.offloads & ~dev_info.tx_offload_capa,
1466 			buffer, sizeof(buffer), rte_eth_dev_tx_offload_name));
1467 		RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u was requested Tx offloads %s\n",
1468 			port_id, eth_dev_offload_names(dev_conf->txmode.offloads,
1469 			buffer, sizeof(buffer), rte_eth_dev_tx_offload_name));
1470 		RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u supports Tx offloads %s\n",
1471 			port_id, eth_dev_offload_names(dev_info.tx_offload_capa,
1472 			buffer, sizeof(buffer), rte_eth_dev_tx_offload_name));
1473 		ret = -EINVAL;
1474 		goto rollback;
1475 	}
1476 
1477 	dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1478 		rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1479 
1480 	/* Check that device supports requested rss hash functions. */
1481 	if ((dev_info.flow_type_rss_offloads |
1482 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1483 	    dev_info.flow_type_rss_offloads) {
1484 		RTE_ETHDEV_LOG(ERR,
1485 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1486 			port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1487 			dev_info.flow_type_rss_offloads);
1488 		ret = -EINVAL;
1489 		goto rollback;
1490 	}
1491 
1492 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1493 	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
1494 	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
1495 		RTE_ETHDEV_LOG(ERR,
1496 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1497 			port_id,
1498 			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
1499 		ret = -EINVAL;
1500 		goto rollback;
1501 	}
1502 
1503 	/*
1504 	 * Setup new number of Rx/Tx queues and reconfigure device.
1505 	 */
1506 	diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1507 	if (diag != 0) {
1508 		RTE_ETHDEV_LOG(ERR,
1509 			"Port%u eth_dev_rx_queue_config = %d\n",
1510 			port_id, diag);
1511 		ret = diag;
1512 		goto rollback;
1513 	}
1514 
1515 	diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1516 	if (diag != 0) {
1517 		RTE_ETHDEV_LOG(ERR,
1518 			"Port%u eth_dev_tx_queue_config = %d\n",
1519 			port_id, diag);
1520 		eth_dev_rx_queue_config(dev, 0);
1521 		ret = diag;
1522 		goto rollback;
1523 	}
1524 
1525 	diag = (*dev->dev_ops->dev_configure)(dev);
1526 	if (diag != 0) {
1527 		RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1528 			port_id, diag);
1529 		ret = eth_err(port_id, diag);
1530 		goto reset_queues;
1531 	}
1532 
1533 	/* Initialize Rx profiling if enabled at compilation time. */
1534 	diag = __rte_eth_dev_profile_init(port_id, dev);
1535 	if (diag != 0) {
1536 		RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1537 			port_id, diag);
1538 		ret = eth_err(port_id, diag);
1539 		goto reset_queues;
1540 	}
1541 
1542 	/* Validate Rx offloads. */
1543 	diag = eth_dev_validate_offloads(port_id,
1544 			dev_conf->rxmode.offloads,
1545 			dev->data->dev_conf.rxmode.offloads, "Rx",
1546 			rte_eth_dev_rx_offload_name);
1547 	if (diag != 0) {
1548 		ret = diag;
1549 		goto reset_queues;
1550 	}
1551 
1552 	/* Validate Tx offloads. */
1553 	diag = eth_dev_validate_offloads(port_id,
1554 			dev_conf->txmode.offloads,
1555 			dev->data->dev_conf.txmode.offloads, "Tx",
1556 			rte_eth_dev_tx_offload_name);
1557 	if (diag != 0) {
1558 		ret = diag;
1559 		goto reset_queues;
1560 	}
1561 
1562 	dev->data->dev_configured = 1;
1563 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1564 	return 0;
1565 reset_queues:
1566 	eth_dev_rx_queue_config(dev, 0);
1567 	eth_dev_tx_queue_config(dev, 0);
1568 rollback:
1569 	memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1570 	if (old_mtu != dev->data->mtu)
1571 		dev->data->mtu = old_mtu;
1572 
1573 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1574 	return ret;
1575 }
1576 
1577 static void
1578 eth_dev_mac_restore(struct rte_eth_dev *dev,
1579 			struct rte_eth_dev_info *dev_info)
1580 {
1581 	struct rte_ether_addr *addr;
1582 	uint16_t i;
1583 	uint32_t pool = 0;
1584 	uint64_t pool_mask;
1585 
1586 	/* replay MAC address configuration including default MAC */
1587 	addr = &dev->data->mac_addrs[0];
1588 	if (*dev->dev_ops->mac_addr_set != NULL)
1589 		(*dev->dev_ops->mac_addr_set)(dev, addr);
1590 	else if (*dev->dev_ops->mac_addr_add != NULL)
1591 		(*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1592 
1593 	if (*dev->dev_ops->mac_addr_add != NULL) {
1594 		for (i = 1; i < dev_info->max_mac_addrs; i++) {
1595 			addr = &dev->data->mac_addrs[i];
1596 
1597 			/* skip zero address */
1598 			if (rte_is_zero_ether_addr(addr))
1599 				continue;
1600 
1601 			pool = 0;
1602 			pool_mask = dev->data->mac_pool_sel[i];
1603 
1604 			do {
1605 				if (pool_mask & UINT64_C(1))
1606 					(*dev->dev_ops->mac_addr_add)(dev,
1607 						addr, i, pool);
1608 				pool_mask >>= 1;
1609 				pool++;
1610 			} while (pool_mask);
1611 		}
1612 	}
1613 }
1614 
1615 static int
1616 eth_dev_config_restore(struct rte_eth_dev *dev,
1617 		struct rte_eth_dev_info *dev_info, uint16_t port_id)
1618 {
1619 	int ret;
1620 
1621 	if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1622 		eth_dev_mac_restore(dev, dev_info);
1623 
1624 	/* replay promiscuous configuration */
1625 	/*
1626 	 * use callbacks directly since we don't need port_id check and
1627 	 * would like to bypass the same value set
1628 	 */
1629 	if (rte_eth_promiscuous_get(port_id) == 1 &&
1630 	    *dev->dev_ops->promiscuous_enable != NULL) {
1631 		ret = eth_err(port_id,
1632 			      (*dev->dev_ops->promiscuous_enable)(dev));
1633 		if (ret != 0 && ret != -ENOTSUP) {
1634 			RTE_ETHDEV_LOG(ERR,
1635 				"Failed to enable promiscuous mode for device (port %u): %s\n",
1636 				port_id, rte_strerror(-ret));
1637 			return ret;
1638 		}
1639 	} else if (rte_eth_promiscuous_get(port_id) == 0 &&
1640 		   *dev->dev_ops->promiscuous_disable != NULL) {
1641 		ret = eth_err(port_id,
1642 			      (*dev->dev_ops->promiscuous_disable)(dev));
1643 		if (ret != 0 && ret != -ENOTSUP) {
1644 			RTE_ETHDEV_LOG(ERR,
1645 				"Failed to disable promiscuous mode for device (port %u): %s\n",
1646 				port_id, rte_strerror(-ret));
1647 			return ret;
1648 		}
1649 	}
1650 
1651 	/* replay all multicast configuration */
1652 	/*
1653 	 * use callbacks directly since we don't need port_id check and
1654 	 * would like to bypass the same value set
1655 	 */
1656 	if (rte_eth_allmulticast_get(port_id) == 1 &&
1657 	    *dev->dev_ops->allmulticast_enable != NULL) {
1658 		ret = eth_err(port_id,
1659 			      (*dev->dev_ops->allmulticast_enable)(dev));
1660 		if (ret != 0 && ret != -ENOTSUP) {
1661 			RTE_ETHDEV_LOG(ERR,
1662 				"Failed to enable allmulticast mode for device (port %u): %s\n",
1663 				port_id, rte_strerror(-ret));
1664 			return ret;
1665 		}
1666 	} else if (rte_eth_allmulticast_get(port_id) == 0 &&
1667 		   *dev->dev_ops->allmulticast_disable != NULL) {
1668 		ret = eth_err(port_id,
1669 			      (*dev->dev_ops->allmulticast_disable)(dev));
1670 		if (ret != 0 && ret != -ENOTSUP) {
1671 			RTE_ETHDEV_LOG(ERR,
1672 				"Failed to disable allmulticast mode for device (port %u): %s\n",
1673 				port_id, rte_strerror(-ret));
1674 			return ret;
1675 		}
1676 	}
1677 
1678 	return 0;
1679 }
1680 
1681 int
1682 rte_eth_dev_start(uint16_t port_id)
1683 {
1684 	struct rte_eth_dev *dev;
1685 	struct rte_eth_dev_info dev_info;
1686 	int diag;
1687 	int ret, ret_stop;
1688 
1689 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1690 	dev = &rte_eth_devices[port_id];
1691 
1692 	if (*dev->dev_ops->dev_start == NULL)
1693 		return -ENOTSUP;
1694 
1695 	if (dev->data->dev_configured == 0) {
1696 		RTE_ETHDEV_LOG(INFO,
1697 			"Device with port_id=%"PRIu16" is not configured.\n",
1698 			port_id);
1699 		return -EINVAL;
1700 	}
1701 
1702 	if (dev->data->dev_started != 0) {
1703 		RTE_ETHDEV_LOG(INFO,
1704 			"Device with port_id=%"PRIu16" already started\n",
1705 			port_id);
1706 		return 0;
1707 	}
1708 
1709 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1710 	if (ret != 0)
1711 		return ret;
1712 
1713 	/* Lets restore MAC now if device does not support live change */
1714 	if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1715 		eth_dev_mac_restore(dev, &dev_info);
1716 
1717 	diag = (*dev->dev_ops->dev_start)(dev);
1718 	if (diag == 0)
1719 		dev->data->dev_started = 1;
1720 	else
1721 		return eth_err(port_id, diag);
1722 
1723 	ret = eth_dev_config_restore(dev, &dev_info, port_id);
1724 	if (ret != 0) {
1725 		RTE_ETHDEV_LOG(ERR,
1726 			"Error during restoring configuration for device (port %u): %s\n",
1727 			port_id, rte_strerror(-ret));
1728 		ret_stop = rte_eth_dev_stop(port_id);
1729 		if (ret_stop != 0) {
1730 			RTE_ETHDEV_LOG(ERR,
1731 				"Failed to stop device (port %u): %s\n",
1732 				port_id, rte_strerror(-ret_stop));
1733 		}
1734 
1735 		return ret;
1736 	}
1737 
1738 	if (dev->data->dev_conf.intr_conf.lsc == 0) {
1739 		if (*dev->dev_ops->link_update == NULL)
1740 			return -ENOTSUP;
1741 		(*dev->dev_ops->link_update)(dev, 0);
1742 	}
1743 
1744 	/* expose selection of PMD fast-path functions */
1745 	eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev);
1746 
1747 	rte_ethdev_trace_start(port_id);
1748 	return 0;
1749 }
1750 
1751 int
1752 rte_eth_dev_stop(uint16_t port_id)
1753 {
1754 	struct rte_eth_dev *dev;
1755 	int ret;
1756 
1757 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1758 	dev = &rte_eth_devices[port_id];
1759 
1760 	if (*dev->dev_ops->dev_stop == NULL)
1761 		return -ENOTSUP;
1762 
1763 	if (dev->data->dev_started == 0) {
1764 		RTE_ETHDEV_LOG(INFO,
1765 			"Device with port_id=%"PRIu16" already stopped\n",
1766 			port_id);
1767 		return 0;
1768 	}
1769 
1770 	/* point fast-path functions to dummy ones */
1771 	eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id);
1772 
1773 	ret = (*dev->dev_ops->dev_stop)(dev);
1774 	if (ret == 0)
1775 		dev->data->dev_started = 0;
1776 	rte_ethdev_trace_stop(port_id, ret);
1777 
1778 	return ret;
1779 }
1780 
1781 int
1782 rte_eth_dev_set_link_up(uint16_t port_id)
1783 {
1784 	struct rte_eth_dev *dev;
1785 	int ret;
1786 
1787 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1788 	dev = &rte_eth_devices[port_id];
1789 
1790 	if (*dev->dev_ops->dev_set_link_up == NULL)
1791 		return -ENOTSUP;
1792 	ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1793 
1794 	rte_ethdev_trace_set_link_up(port_id, ret);
1795 
1796 	return ret;
1797 }
1798 
1799 int
1800 rte_eth_dev_set_link_down(uint16_t port_id)
1801 {
1802 	struct rte_eth_dev *dev;
1803 	int ret;
1804 
1805 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1806 	dev = &rte_eth_devices[port_id];
1807 
1808 	if (*dev->dev_ops->dev_set_link_down == NULL)
1809 		return -ENOTSUP;
1810 	ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1811 
1812 	rte_ethdev_trace_set_link_down(port_id, ret);
1813 
1814 	return ret;
1815 }
1816 
1817 int
1818 rte_eth_dev_close(uint16_t port_id)
1819 {
1820 	struct rte_eth_dev *dev;
1821 	int firsterr, binerr;
1822 	int *lasterr = &firsterr;
1823 
1824 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1825 	dev = &rte_eth_devices[port_id];
1826 
1827 	/*
1828 	 * Secondary process needs to close device to release process private
1829 	 * resources. But secondary process should not be obliged to wait
1830 	 * for device stop before closing ethdev.
1831 	 */
1832 	if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
1833 			dev->data->dev_started) {
1834 		RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n",
1835 			       port_id);
1836 		return -EINVAL;
1837 	}
1838 
1839 	if (*dev->dev_ops->dev_close == NULL)
1840 		return -ENOTSUP;
1841 	*lasterr = (*dev->dev_ops->dev_close)(dev);
1842 	if (*lasterr != 0)
1843 		lasterr = &binerr;
1844 
1845 	rte_ethdev_trace_close(port_id);
1846 	*lasterr = rte_eth_dev_release_port(dev);
1847 
1848 	return firsterr;
1849 }
1850 
1851 int
1852 rte_eth_dev_reset(uint16_t port_id)
1853 {
1854 	struct rte_eth_dev *dev;
1855 	int ret;
1856 
1857 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1858 	dev = &rte_eth_devices[port_id];
1859 
1860 	if (*dev->dev_ops->dev_reset == NULL)
1861 		return -ENOTSUP;
1862 
1863 	ret = rte_eth_dev_stop(port_id);
1864 	if (ret != 0) {
1865 		RTE_ETHDEV_LOG(ERR,
1866 			"Failed to stop device (port %u) before reset: %s - ignore\n",
1867 			port_id, rte_strerror(-ret));
1868 	}
1869 	ret = eth_err(port_id, dev->dev_ops->dev_reset(dev));
1870 
1871 	rte_ethdev_trace_reset(port_id, ret);
1872 
1873 	return ret;
1874 }
1875 
1876 int
1877 rte_eth_dev_is_removed(uint16_t port_id)
1878 {
1879 	struct rte_eth_dev *dev;
1880 	int ret;
1881 
1882 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1883 	dev = &rte_eth_devices[port_id];
1884 
1885 	if (dev->state == RTE_ETH_DEV_REMOVED)
1886 		return 1;
1887 
1888 	if (*dev->dev_ops->is_removed == NULL)
1889 		return 0;
1890 
1891 	ret = dev->dev_ops->is_removed(dev);
1892 	if (ret != 0)
1893 		/* Device is physically removed. */
1894 		dev->state = RTE_ETH_DEV_REMOVED;
1895 
1896 	rte_ethdev_trace_is_removed(port_id, ret);
1897 
1898 	return ret;
1899 }
1900 
1901 static int
1902 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset,
1903 			 uint16_t min_length)
1904 {
1905 	uint16_t data_room_size;
1906 
1907 	/*
1908 	 * Check the size of the mbuf data buffer, this value
1909 	 * must be provided in the private data of the memory pool.
1910 	 * First check that the memory pool(s) has a valid private data.
1911 	 */
1912 	if (mp->private_data_size <
1913 			sizeof(struct rte_pktmbuf_pool_private)) {
1914 		RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1915 			mp->name, mp->private_data_size,
1916 			(unsigned int)
1917 			sizeof(struct rte_pktmbuf_pool_private));
1918 		return -ENOSPC;
1919 	}
1920 	data_room_size = rte_pktmbuf_data_room_size(mp);
1921 	if (data_room_size < offset + min_length) {
1922 		RTE_ETHDEV_LOG(ERR,
1923 			       "%s mbuf_data_room_size %u < %u (%u + %u)\n",
1924 			       mp->name, data_room_size,
1925 			       offset + min_length, offset, min_length);
1926 		return -EINVAL;
1927 	}
1928 	return 0;
1929 }
1930 
1931 static int
1932 eth_dev_buffer_split_get_supported_hdrs_helper(uint16_t port_id, uint32_t **ptypes)
1933 {
1934 	int cnt;
1935 
1936 	cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, NULL, 0);
1937 	if (cnt <= 0)
1938 		return cnt;
1939 
1940 	*ptypes = malloc(sizeof(uint32_t) * cnt);
1941 	if (*ptypes == NULL)
1942 		return -ENOMEM;
1943 
1944 	cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, *ptypes, cnt);
1945 	if (cnt <= 0) {
1946 		free(*ptypes);
1947 		*ptypes = NULL;
1948 	}
1949 	return cnt;
1950 }
1951 
1952 static int
1953 rte_eth_rx_queue_check_split(uint16_t port_id,
1954 			const struct rte_eth_rxseg_split *rx_seg,
1955 			uint16_t n_seg, uint32_t *mbp_buf_size,
1956 			const struct rte_eth_dev_info *dev_info)
1957 {
1958 	const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1959 	struct rte_mempool *mp_first;
1960 	uint32_t offset_mask;
1961 	uint16_t seg_idx;
1962 	int ret = 0;
1963 	int ptype_cnt;
1964 	uint32_t *ptypes;
1965 	uint32_t prev_proto_hdrs = RTE_PTYPE_UNKNOWN;
1966 	int i;
1967 
1968 	if (n_seg > seg_capa->max_nseg) {
1969 		RTE_ETHDEV_LOG(ERR,
1970 			       "Requested Rx segments %u exceed supported %u\n",
1971 			       n_seg, seg_capa->max_nseg);
1972 		return -EINVAL;
1973 	}
1974 	/*
1975 	 * Check the sizes and offsets against buffer sizes
1976 	 * for each segment specified in extended configuration.
1977 	 */
1978 	mp_first = rx_seg[0].mp;
1979 	offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1;
1980 
1981 	ptypes = NULL;
1982 	ptype_cnt = eth_dev_buffer_split_get_supported_hdrs_helper(port_id, &ptypes);
1983 
1984 	for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1985 		struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1986 		uint32_t length = rx_seg[seg_idx].length;
1987 		uint32_t offset = rx_seg[seg_idx].offset;
1988 		uint32_t proto_hdr = rx_seg[seg_idx].proto_hdr;
1989 
1990 		if (mpl == NULL) {
1991 			RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1992 			ret = -EINVAL;
1993 			goto out;
1994 		}
1995 		if (seg_idx != 0 && mp_first != mpl &&
1996 		    seg_capa->multi_pools == 0) {
1997 			RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1998 			ret = -ENOTSUP;
1999 			goto out;
2000 		}
2001 		if (offset != 0) {
2002 			if (seg_capa->offset_allowed == 0) {
2003 				RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
2004 				ret = -ENOTSUP;
2005 				goto out;
2006 			}
2007 			if (offset & offset_mask) {
2008 				RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
2009 					       offset,
2010 					       seg_capa->offset_align_log2);
2011 				ret = -EINVAL;
2012 				goto out;
2013 			}
2014 		}
2015 
2016 		offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
2017 		*mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
2018 		if (proto_hdr != 0) {
2019 			/* Split based on protocol headers. */
2020 			if (length != 0) {
2021 				RTE_ETHDEV_LOG(ERR,
2022 					"Do not set length split and protocol split within a segment\n"
2023 					);
2024 				ret = -EINVAL;
2025 				goto out;
2026 			}
2027 			if ((proto_hdr & prev_proto_hdrs) != 0) {
2028 				RTE_ETHDEV_LOG(ERR,
2029 					"Repeat with previous protocol headers or proto-split after length-based split\n"
2030 					);
2031 				ret = -EINVAL;
2032 				goto out;
2033 			}
2034 			if (ptype_cnt <= 0) {
2035 				RTE_ETHDEV_LOG(ERR,
2036 					"Port %u failed to get supported buffer split header protocols\n",
2037 					port_id);
2038 				ret = -ENOTSUP;
2039 				goto out;
2040 			}
2041 			for (i = 0; i < ptype_cnt; i++) {
2042 				if ((prev_proto_hdrs | proto_hdr) == ptypes[i])
2043 					break;
2044 			}
2045 			if (i == ptype_cnt) {
2046 				RTE_ETHDEV_LOG(ERR,
2047 					"Requested Rx split header protocols 0x%x is not supported.\n",
2048 					proto_hdr);
2049 				ret = -EINVAL;
2050 				goto out;
2051 			}
2052 			prev_proto_hdrs |= proto_hdr;
2053 		} else {
2054 			/* Split at fixed length. */
2055 			length = length != 0 ? length : *mbp_buf_size;
2056 			prev_proto_hdrs = RTE_PTYPE_ALL_MASK;
2057 		}
2058 
2059 		ret = rte_eth_check_rx_mempool(mpl, offset, length);
2060 		if (ret != 0)
2061 			goto out;
2062 	}
2063 out:
2064 	free(ptypes);
2065 	return ret;
2066 }
2067 
2068 static int
2069 rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools,
2070 			       uint16_t n_mempools, uint32_t *min_buf_size,
2071 			       const struct rte_eth_dev_info *dev_info)
2072 {
2073 	uint16_t pool_idx;
2074 	int ret;
2075 
2076 	if (n_mempools > dev_info->max_rx_mempools) {
2077 		RTE_ETHDEV_LOG(ERR,
2078 			       "Too many Rx mempools %u vs maximum %u\n",
2079 			       n_mempools, dev_info->max_rx_mempools);
2080 		return -EINVAL;
2081 	}
2082 
2083 	for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) {
2084 		struct rte_mempool *mp = rx_mempools[pool_idx];
2085 
2086 		if (mp == NULL) {
2087 			RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n");
2088 			return -EINVAL;
2089 		}
2090 
2091 		ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM,
2092 					       dev_info->min_rx_bufsize);
2093 		if (ret != 0)
2094 			return ret;
2095 
2096 		*min_buf_size = RTE_MIN(*min_buf_size,
2097 					rte_pktmbuf_data_room_size(mp));
2098 	}
2099 
2100 	return 0;
2101 }
2102 
2103 int
2104 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2105 		       uint16_t nb_rx_desc, unsigned int socket_id,
2106 		       const struct rte_eth_rxconf *rx_conf,
2107 		       struct rte_mempool *mp)
2108 {
2109 	int ret;
2110 	uint64_t rx_offloads;
2111 	uint32_t mbp_buf_size = UINT32_MAX;
2112 	struct rte_eth_dev *dev;
2113 	struct rte_eth_dev_info dev_info;
2114 	struct rte_eth_rxconf local_conf;
2115 
2116 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2117 	dev = &rte_eth_devices[port_id];
2118 
2119 	if (rx_queue_id >= dev->data->nb_rx_queues) {
2120 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
2121 		return -EINVAL;
2122 	}
2123 
2124 	if (*dev->dev_ops->rx_queue_setup == NULL)
2125 		return -ENOTSUP;
2126 
2127 	if (rx_conf != NULL &&
2128 	   (rx_conf->reserved_64s[0] != 0 ||
2129 	    rx_conf->reserved_64s[1] != 0 ||
2130 	    rx_conf->reserved_ptrs[0] != NULL ||
2131 	    rx_conf->reserved_ptrs[1] != NULL)) {
2132 		RTE_ETHDEV_LOG(ERR, "Rx conf reserved fields not zero\n");
2133 		return -EINVAL;
2134 	}
2135 
2136 	ret = rte_eth_dev_info_get(port_id, &dev_info);
2137 	if (ret != 0)
2138 		return ret;
2139 
2140 	rx_offloads = dev->data->dev_conf.rxmode.offloads;
2141 	if (rx_conf != NULL)
2142 		rx_offloads |= rx_conf->offloads;
2143 
2144 	/* Ensure that we have one and only one source of Rx buffers */
2145 	if ((mp != NULL) +
2146 	    (rx_conf != NULL && rx_conf->rx_nseg > 0) +
2147 	    (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) {
2148 		RTE_ETHDEV_LOG(ERR,
2149 			       "Ambiguous Rx mempools configuration\n");
2150 		return -EINVAL;
2151 	}
2152 
2153 	if (mp != NULL) {
2154 		/* Single pool configuration check. */
2155 		ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM,
2156 					       dev_info.min_rx_bufsize);
2157 		if (ret != 0)
2158 			return ret;
2159 
2160 		mbp_buf_size = rte_pktmbuf_data_room_size(mp);
2161 	} else if (rx_conf != NULL && rx_conf->rx_nseg > 0) {
2162 		const struct rte_eth_rxseg_split *rx_seg;
2163 		uint16_t n_seg;
2164 
2165 		/* Extended multi-segment configuration check. */
2166 		if (rx_conf->rx_seg == NULL) {
2167 			RTE_ETHDEV_LOG(ERR,
2168 				       "Memory pool is null and no multi-segment configuration provided\n");
2169 			return -EINVAL;
2170 		}
2171 
2172 		rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
2173 		n_seg = rx_conf->rx_nseg;
2174 
2175 		if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
2176 			ret = rte_eth_rx_queue_check_split(port_id, rx_seg, n_seg,
2177 							   &mbp_buf_size,
2178 							   &dev_info);
2179 			if (ret != 0)
2180 				return ret;
2181 		} else {
2182 			RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
2183 			return -EINVAL;
2184 		}
2185 	} else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) {
2186 		/* Extended multi-pool configuration check. */
2187 		if (rx_conf->rx_mempools == NULL) {
2188 			RTE_ETHDEV_LOG(ERR, "Memory pools array is null\n");
2189 			return -EINVAL;
2190 		}
2191 
2192 		ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools,
2193 						     rx_conf->rx_nmempool,
2194 						     &mbp_buf_size,
2195 						     &dev_info);
2196 		if (ret != 0)
2197 			return ret;
2198 	} else {
2199 		RTE_ETHDEV_LOG(ERR, "Missing Rx mempool configuration\n");
2200 		return -EINVAL;
2201 	}
2202 
2203 	/* Use default specified by driver, if nb_rx_desc is zero */
2204 	if (nb_rx_desc == 0) {
2205 		nb_rx_desc = dev_info.default_rxportconf.ring_size;
2206 		/* If driver default is also zero, fall back on EAL default */
2207 		if (nb_rx_desc == 0)
2208 			nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2209 	}
2210 
2211 	if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2212 			nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2213 			nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2214 
2215 		RTE_ETHDEV_LOG(ERR,
2216 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2217 			nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2218 			dev_info.rx_desc_lim.nb_min,
2219 			dev_info.rx_desc_lim.nb_align);
2220 		return -EINVAL;
2221 	}
2222 
2223 	if (dev->data->dev_started &&
2224 		!(dev_info.dev_capa &
2225 			RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2226 		return -EBUSY;
2227 
2228 	if (dev->data->dev_started &&
2229 		(dev->data->rx_queue_state[rx_queue_id] !=
2230 			RTE_ETH_QUEUE_STATE_STOPPED))
2231 		return -EBUSY;
2232 
2233 	eth_dev_rxq_release(dev, rx_queue_id);
2234 
2235 	if (rx_conf == NULL)
2236 		rx_conf = &dev_info.default_rxconf;
2237 
2238 	local_conf = *rx_conf;
2239 
2240 	/*
2241 	 * If an offloading has already been enabled in
2242 	 * rte_eth_dev_configure(), it has been enabled on all queues,
2243 	 * so there is no need to enable it in this queue again.
2244 	 * The local_conf.offloads input to underlying PMD only carries
2245 	 * those offloadings which are only enabled on this queue and
2246 	 * not enabled on all queues.
2247 	 */
2248 	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2249 
2250 	/*
2251 	 * New added offloadings for this queue are those not enabled in
2252 	 * rte_eth_dev_configure() and they must be per-queue type.
2253 	 * A pure per-port offloading can't be enabled on a queue while
2254 	 * disabled on another queue. A pure per-port offloading can't
2255 	 * be enabled for any queue as new added one if it hasn't been
2256 	 * enabled in rte_eth_dev_configure().
2257 	 */
2258 	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2259 	     local_conf.offloads) {
2260 		RTE_ETHDEV_LOG(ERR,
2261 			"Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2262 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2263 			port_id, rx_queue_id, local_conf.offloads,
2264 			dev_info.rx_queue_offload_capa,
2265 			__func__);
2266 		return -EINVAL;
2267 	}
2268 
2269 	if (local_conf.share_group > 0 &&
2270 	    (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {
2271 		RTE_ETHDEV_LOG(ERR,
2272 			"Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n",
2273 			port_id, rx_queue_id, local_conf.share_group);
2274 		return -EINVAL;
2275 	}
2276 
2277 	/*
2278 	 * If LRO is enabled, check that the maximum aggregated packet
2279 	 * size is supported by the configured device.
2280 	 */
2281 	/* Get the real Ethernet overhead length */
2282 	if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
2283 		uint32_t overhead_len;
2284 		uint32_t max_rx_pktlen;
2285 		int ret;
2286 
2287 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
2288 				dev_info.max_mtu);
2289 		max_rx_pktlen = dev->data->mtu + overhead_len;
2290 		if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2291 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
2292 		ret = eth_dev_check_lro_pkt_size(port_id,
2293 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
2294 				max_rx_pktlen,
2295 				dev_info.max_lro_pkt_size);
2296 		if (ret != 0)
2297 			return ret;
2298 	}
2299 
2300 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2301 					      socket_id, &local_conf, mp);
2302 	if (!ret) {
2303 		if (!dev->data->min_rx_buf_size ||
2304 		    dev->data->min_rx_buf_size > mbp_buf_size)
2305 			dev->data->min_rx_buf_size = mbp_buf_size;
2306 	}
2307 
2308 	rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2309 		rx_conf, ret);
2310 	return eth_err(port_id, ret);
2311 }
2312 
2313 int
2314 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2315 			       uint16_t nb_rx_desc,
2316 			       const struct rte_eth_hairpin_conf *conf)
2317 {
2318 	int ret;
2319 	struct rte_eth_dev *dev;
2320 	struct rte_eth_hairpin_cap cap;
2321 	int i;
2322 	int count;
2323 
2324 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2325 	dev = &rte_eth_devices[port_id];
2326 
2327 	if (rx_queue_id >= dev->data->nb_rx_queues) {
2328 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
2329 		return -EINVAL;
2330 	}
2331 
2332 	if (conf == NULL) {
2333 		RTE_ETHDEV_LOG(ERR,
2334 			"Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
2335 			port_id);
2336 		return -EINVAL;
2337 	}
2338 
2339 	if (conf->reserved != 0) {
2340 		RTE_ETHDEV_LOG(ERR,
2341 			       "Rx hairpin reserved field not zero\n");
2342 		return -EINVAL;
2343 	}
2344 
2345 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2346 	if (ret != 0)
2347 		return ret;
2348 	if (*dev->dev_ops->rx_hairpin_queue_setup == NULL)
2349 		return -ENOTSUP;
2350 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2351 	if (nb_rx_desc == 0)
2352 		nb_rx_desc = cap.max_nb_desc;
2353 	if (nb_rx_desc > cap.max_nb_desc) {
2354 		RTE_ETHDEV_LOG(ERR,
2355 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2356 			nb_rx_desc, cap.max_nb_desc);
2357 		return -EINVAL;
2358 	}
2359 	if (conf->peer_count > cap.max_rx_2_tx) {
2360 		RTE_ETHDEV_LOG(ERR,
2361 			"Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2362 			conf->peer_count, cap.max_rx_2_tx);
2363 		return -EINVAL;
2364 	}
2365 	if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) {
2366 		RTE_ETHDEV_LOG(ERR,
2367 			"Attempt to use locked device memory for Rx queue, which is not supported");
2368 		return -EINVAL;
2369 	}
2370 	if (conf->use_rte_memory && !cap.rx_cap.rte_memory) {
2371 		RTE_ETHDEV_LOG(ERR,
2372 			"Attempt to use DPDK memory for Rx queue, which is not supported");
2373 		return -EINVAL;
2374 	}
2375 	if (conf->use_locked_device_memory && conf->use_rte_memory) {
2376 		RTE_ETHDEV_LOG(ERR,
2377 			"Attempt to use mutually exclusive memory settings for Rx queue");
2378 		return -EINVAL;
2379 	}
2380 	if (conf->force_memory &&
2381 	    !conf->use_locked_device_memory &&
2382 	    !conf->use_rte_memory) {
2383 		RTE_ETHDEV_LOG(ERR,
2384 			"Attempt to force Rx queue memory settings, but none is set");
2385 		return -EINVAL;
2386 	}
2387 	if (conf->peer_count == 0) {
2388 		RTE_ETHDEV_LOG(ERR,
2389 			"Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2390 			conf->peer_count);
2391 		return -EINVAL;
2392 	}
2393 	for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2394 	     cap.max_nb_queues != UINT16_MAX; i++) {
2395 		if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2396 			count++;
2397 	}
2398 	if (count > cap.max_nb_queues) {
2399 		RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2400 		cap.max_nb_queues);
2401 		return -EINVAL;
2402 	}
2403 	if (dev->data->dev_started)
2404 		return -EBUSY;
2405 	eth_dev_rxq_release(dev, rx_queue_id);
2406 	ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2407 						      nb_rx_desc, conf);
2408 	if (ret == 0)
2409 		dev->data->rx_queue_state[rx_queue_id] =
2410 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2411 	ret = eth_err(port_id, ret);
2412 
2413 	rte_eth_trace_rx_hairpin_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2414 					     conf, ret);
2415 
2416 	return ret;
2417 }
2418 
2419 int
2420 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2421 		       uint16_t nb_tx_desc, unsigned int socket_id,
2422 		       const struct rte_eth_txconf *tx_conf)
2423 {
2424 	struct rte_eth_dev *dev;
2425 	struct rte_eth_dev_info dev_info;
2426 	struct rte_eth_txconf local_conf;
2427 	int ret;
2428 
2429 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2430 	dev = &rte_eth_devices[port_id];
2431 
2432 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2433 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2434 		return -EINVAL;
2435 	}
2436 
2437 	if (*dev->dev_ops->tx_queue_setup == NULL)
2438 		return -ENOTSUP;
2439 
2440 	if (tx_conf != NULL &&
2441 	   (tx_conf->reserved_64s[0] != 0 ||
2442 	    tx_conf->reserved_64s[1] != 0 ||
2443 	    tx_conf->reserved_ptrs[0] != NULL ||
2444 	    tx_conf->reserved_ptrs[1] != NULL)) {
2445 		RTE_ETHDEV_LOG(ERR, "Tx conf reserved fields not zero\n");
2446 		return -EINVAL;
2447 	}
2448 
2449 	ret = rte_eth_dev_info_get(port_id, &dev_info);
2450 	if (ret != 0)
2451 		return ret;
2452 
2453 	/* Use default specified by driver, if nb_tx_desc is zero */
2454 	if (nb_tx_desc == 0) {
2455 		nb_tx_desc = dev_info.default_txportconf.ring_size;
2456 		/* If driver default is zero, fall back on EAL default */
2457 		if (nb_tx_desc == 0)
2458 			nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2459 	}
2460 	if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2461 	    nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2462 	    nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2463 		RTE_ETHDEV_LOG(ERR,
2464 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2465 			nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2466 			dev_info.tx_desc_lim.nb_min,
2467 			dev_info.tx_desc_lim.nb_align);
2468 		return -EINVAL;
2469 	}
2470 
2471 	if (dev->data->dev_started &&
2472 		!(dev_info.dev_capa &
2473 			RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2474 		return -EBUSY;
2475 
2476 	if (dev->data->dev_started &&
2477 		(dev->data->tx_queue_state[tx_queue_id] !=
2478 			RTE_ETH_QUEUE_STATE_STOPPED))
2479 		return -EBUSY;
2480 
2481 	eth_dev_txq_release(dev, tx_queue_id);
2482 
2483 	if (tx_conf == NULL)
2484 		tx_conf = &dev_info.default_txconf;
2485 
2486 	local_conf = *tx_conf;
2487 
2488 	/*
2489 	 * If an offloading has already been enabled in
2490 	 * rte_eth_dev_configure(), it has been enabled on all queues,
2491 	 * so there is no need to enable it in this queue again.
2492 	 * The local_conf.offloads input to underlying PMD only carries
2493 	 * those offloadings which are only enabled on this queue and
2494 	 * not enabled on all queues.
2495 	 */
2496 	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2497 
2498 	/*
2499 	 * New added offloadings for this queue are those not enabled in
2500 	 * rte_eth_dev_configure() and they must be per-queue type.
2501 	 * A pure per-port offloading can't be enabled on a queue while
2502 	 * disabled on another queue. A pure per-port offloading can't
2503 	 * be enabled for any queue as new added one if it hasn't been
2504 	 * enabled in rte_eth_dev_configure().
2505 	 */
2506 	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2507 	     local_conf.offloads) {
2508 		RTE_ETHDEV_LOG(ERR,
2509 			"Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2510 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2511 			port_id, tx_queue_id, local_conf.offloads,
2512 			dev_info.tx_queue_offload_capa,
2513 			__func__);
2514 		return -EINVAL;
2515 	}
2516 
2517 	rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2518 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2519 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2520 }
2521 
2522 int
2523 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2524 			       uint16_t nb_tx_desc,
2525 			       const struct rte_eth_hairpin_conf *conf)
2526 {
2527 	struct rte_eth_dev *dev;
2528 	struct rte_eth_hairpin_cap cap;
2529 	int i;
2530 	int count;
2531 	int ret;
2532 
2533 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2534 	dev = &rte_eth_devices[port_id];
2535 
2536 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2537 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2538 		return -EINVAL;
2539 	}
2540 
2541 	if (conf == NULL) {
2542 		RTE_ETHDEV_LOG(ERR,
2543 			"Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2544 			port_id);
2545 		return -EINVAL;
2546 	}
2547 
2548 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2549 	if (ret != 0)
2550 		return ret;
2551 	if (*dev->dev_ops->tx_hairpin_queue_setup == NULL)
2552 		return -ENOTSUP;
2553 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2554 	if (nb_tx_desc == 0)
2555 		nb_tx_desc = cap.max_nb_desc;
2556 	if (nb_tx_desc > cap.max_nb_desc) {
2557 		RTE_ETHDEV_LOG(ERR,
2558 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2559 			nb_tx_desc, cap.max_nb_desc);
2560 		return -EINVAL;
2561 	}
2562 	if (conf->peer_count > cap.max_tx_2_rx) {
2563 		RTE_ETHDEV_LOG(ERR,
2564 			"Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2565 			conf->peer_count, cap.max_tx_2_rx);
2566 		return -EINVAL;
2567 	}
2568 	if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) {
2569 		RTE_ETHDEV_LOG(ERR,
2570 			"Attempt to use locked device memory for Tx queue, which is not supported");
2571 		return -EINVAL;
2572 	}
2573 	if (conf->use_rte_memory && !cap.tx_cap.rte_memory) {
2574 		RTE_ETHDEV_LOG(ERR,
2575 			"Attempt to use DPDK memory for Tx queue, which is not supported");
2576 		return -EINVAL;
2577 	}
2578 	if (conf->use_locked_device_memory && conf->use_rte_memory) {
2579 		RTE_ETHDEV_LOG(ERR,
2580 			"Attempt to use mutually exclusive memory settings for Tx queue");
2581 		return -EINVAL;
2582 	}
2583 	if (conf->force_memory &&
2584 	    !conf->use_locked_device_memory &&
2585 	    !conf->use_rte_memory) {
2586 		RTE_ETHDEV_LOG(ERR,
2587 			"Attempt to force Tx queue memory settings, but none is set");
2588 		return -EINVAL;
2589 	}
2590 	if (conf->peer_count == 0) {
2591 		RTE_ETHDEV_LOG(ERR,
2592 			"Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2593 			conf->peer_count);
2594 		return -EINVAL;
2595 	}
2596 	for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2597 	     cap.max_nb_queues != UINT16_MAX; i++) {
2598 		if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2599 			count++;
2600 	}
2601 	if (count > cap.max_nb_queues) {
2602 		RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2603 		cap.max_nb_queues);
2604 		return -EINVAL;
2605 	}
2606 	if (dev->data->dev_started)
2607 		return -EBUSY;
2608 	eth_dev_txq_release(dev, tx_queue_id);
2609 	ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2610 		(dev, tx_queue_id, nb_tx_desc, conf);
2611 	if (ret == 0)
2612 		dev->data->tx_queue_state[tx_queue_id] =
2613 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2614 	ret = eth_err(port_id, ret);
2615 
2616 	rte_eth_trace_tx_hairpin_queue_setup(port_id, tx_queue_id, nb_tx_desc,
2617 					     conf, ret);
2618 
2619 	return ret;
2620 }
2621 
2622 int
2623 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2624 {
2625 	struct rte_eth_dev *dev;
2626 	int ret;
2627 
2628 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2629 	dev = &rte_eth_devices[tx_port];
2630 
2631 	if (dev->data->dev_started == 0) {
2632 		RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2633 		return -EBUSY;
2634 	}
2635 
2636 	if (*dev->dev_ops->hairpin_bind == NULL)
2637 		return -ENOTSUP;
2638 	ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2639 	if (ret != 0)
2640 		RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2641 			       " to Rx %d (%d - all ports)\n",
2642 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2643 
2644 	rte_eth_trace_hairpin_bind(tx_port, rx_port, ret);
2645 
2646 	return ret;
2647 }
2648 
2649 int
2650 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2651 {
2652 	struct rte_eth_dev *dev;
2653 	int ret;
2654 
2655 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2656 	dev = &rte_eth_devices[tx_port];
2657 
2658 	if (dev->data->dev_started == 0) {
2659 		RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2660 		return -EBUSY;
2661 	}
2662 
2663 	if (*dev->dev_ops->hairpin_unbind == NULL)
2664 		return -ENOTSUP;
2665 	ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2666 	if (ret != 0)
2667 		RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2668 			       " from Rx %d (%d - all ports)\n",
2669 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2670 
2671 	rte_eth_trace_hairpin_unbind(tx_port, rx_port, ret);
2672 
2673 	return ret;
2674 }
2675 
2676 int
2677 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2678 			       size_t len, uint32_t direction)
2679 {
2680 	struct rte_eth_dev *dev;
2681 	int ret;
2682 
2683 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2684 	dev = &rte_eth_devices[port_id];
2685 
2686 	if (peer_ports == NULL) {
2687 		RTE_ETHDEV_LOG(ERR,
2688 			"Cannot get ethdev port %u hairpin peer ports to NULL\n",
2689 			port_id);
2690 		return -EINVAL;
2691 	}
2692 
2693 	if (len == 0) {
2694 		RTE_ETHDEV_LOG(ERR,
2695 			"Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2696 			port_id);
2697 		return -EINVAL;
2698 	}
2699 
2700 	if (*dev->dev_ops->hairpin_get_peer_ports == NULL)
2701 		return -ENOTSUP;
2702 
2703 	ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2704 						      len, direction);
2705 	if (ret < 0)
2706 		RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2707 			       port_id, direction ? "Rx" : "Tx");
2708 
2709 	rte_eth_trace_hairpin_get_peer_ports(port_id, peer_ports, len,
2710 					     direction, ret);
2711 
2712 	return ret;
2713 }
2714 
2715 void
2716 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2717 		void *userdata __rte_unused)
2718 {
2719 	rte_pktmbuf_free_bulk(pkts, unsent);
2720 
2721 	rte_eth_trace_tx_buffer_drop_callback((void **)pkts, unsent);
2722 }
2723 
2724 void
2725 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2726 		void *userdata)
2727 {
2728 	uint64_t *count = userdata;
2729 
2730 	rte_pktmbuf_free_bulk(pkts, unsent);
2731 	*count += unsent;
2732 
2733 	rte_eth_trace_tx_buffer_count_callback((void **)pkts, unsent, *count);
2734 }
2735 
2736 int
2737 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2738 		buffer_tx_error_fn cbfn, void *userdata)
2739 {
2740 	if (buffer == NULL) {
2741 		RTE_ETHDEV_LOG(ERR,
2742 			"Cannot set Tx buffer error callback to NULL buffer\n");
2743 		return -EINVAL;
2744 	}
2745 
2746 	buffer->error_callback = cbfn;
2747 	buffer->error_userdata = userdata;
2748 
2749 	rte_eth_trace_tx_buffer_set_err_callback(buffer);
2750 
2751 	return 0;
2752 }
2753 
2754 int
2755 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2756 {
2757 	int ret = 0;
2758 
2759 	if (buffer == NULL) {
2760 		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2761 		return -EINVAL;
2762 	}
2763 
2764 	buffer->size = size;
2765 	if (buffer->error_callback == NULL) {
2766 		ret = rte_eth_tx_buffer_set_err_callback(
2767 			buffer, rte_eth_tx_buffer_drop_callback, NULL);
2768 	}
2769 
2770 	rte_eth_trace_tx_buffer_init(buffer, size, ret);
2771 
2772 	return ret;
2773 }
2774 
2775 int
2776 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2777 {
2778 	struct rte_eth_dev *dev;
2779 	int ret;
2780 
2781 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2782 	dev = &rte_eth_devices[port_id];
2783 
2784 	if (*dev->dev_ops->tx_done_cleanup == NULL)
2785 		return -ENOTSUP;
2786 
2787 	/* Call driver to free pending mbufs. */
2788 	ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2789 					       free_cnt);
2790 	ret = eth_err(port_id, ret);
2791 
2792 	rte_eth_trace_tx_done_cleanup(port_id, queue_id, free_cnt, ret);
2793 
2794 	return ret;
2795 }
2796 
2797 int
2798 rte_eth_promiscuous_enable(uint16_t port_id)
2799 {
2800 	struct rte_eth_dev *dev;
2801 	int diag = 0;
2802 
2803 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2804 	dev = &rte_eth_devices[port_id];
2805 
2806 	if (dev->data->promiscuous == 1)
2807 		return 0;
2808 
2809 	if (*dev->dev_ops->promiscuous_enable == NULL)
2810 		return -ENOTSUP;
2811 
2812 	diag = (*dev->dev_ops->promiscuous_enable)(dev);
2813 	dev->data->promiscuous = (diag == 0) ? 1 : 0;
2814 
2815 	diag = eth_err(port_id, diag);
2816 
2817 	rte_eth_trace_promiscuous_enable(port_id, dev->data->promiscuous,
2818 					 diag);
2819 
2820 	return diag;
2821 }
2822 
2823 int
2824 rte_eth_promiscuous_disable(uint16_t port_id)
2825 {
2826 	struct rte_eth_dev *dev;
2827 	int diag = 0;
2828 
2829 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2830 	dev = &rte_eth_devices[port_id];
2831 
2832 	if (dev->data->promiscuous == 0)
2833 		return 0;
2834 
2835 	if (*dev->dev_ops->promiscuous_disable == NULL)
2836 		return -ENOTSUP;
2837 
2838 	dev->data->promiscuous = 0;
2839 	diag = (*dev->dev_ops->promiscuous_disable)(dev);
2840 	if (diag != 0)
2841 		dev->data->promiscuous = 1;
2842 
2843 	diag = eth_err(port_id, diag);
2844 
2845 	rte_eth_trace_promiscuous_disable(port_id, dev->data->promiscuous,
2846 					  diag);
2847 
2848 	return diag;
2849 }
2850 
2851 int
2852 rte_eth_promiscuous_get(uint16_t port_id)
2853 {
2854 	struct rte_eth_dev *dev;
2855 
2856 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2857 	dev = &rte_eth_devices[port_id];
2858 
2859 	rte_eth_trace_promiscuous_get(port_id, dev->data->promiscuous);
2860 
2861 	return dev->data->promiscuous;
2862 }
2863 
2864 int
2865 rte_eth_allmulticast_enable(uint16_t port_id)
2866 {
2867 	struct rte_eth_dev *dev;
2868 	int diag;
2869 
2870 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2871 	dev = &rte_eth_devices[port_id];
2872 
2873 	if (dev->data->all_multicast == 1)
2874 		return 0;
2875 
2876 	if (*dev->dev_ops->allmulticast_enable == NULL)
2877 		return -ENOTSUP;
2878 	diag = (*dev->dev_ops->allmulticast_enable)(dev);
2879 	dev->data->all_multicast = (diag == 0) ? 1 : 0;
2880 
2881 	diag = eth_err(port_id, diag);
2882 
2883 	rte_eth_trace_allmulticast_enable(port_id, dev->data->all_multicast,
2884 					  diag);
2885 
2886 	return diag;
2887 }
2888 
2889 int
2890 rte_eth_allmulticast_disable(uint16_t port_id)
2891 {
2892 	struct rte_eth_dev *dev;
2893 	int diag;
2894 
2895 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2896 	dev = &rte_eth_devices[port_id];
2897 
2898 	if (dev->data->all_multicast == 0)
2899 		return 0;
2900 
2901 	if (*dev->dev_ops->allmulticast_disable == NULL)
2902 		return -ENOTSUP;
2903 	dev->data->all_multicast = 0;
2904 	diag = (*dev->dev_ops->allmulticast_disable)(dev);
2905 	if (diag != 0)
2906 		dev->data->all_multicast = 1;
2907 
2908 	diag = eth_err(port_id, diag);
2909 
2910 	rte_eth_trace_allmulticast_disable(port_id, dev->data->all_multicast,
2911 					   diag);
2912 
2913 	return diag;
2914 }
2915 
2916 int
2917 rte_eth_allmulticast_get(uint16_t port_id)
2918 {
2919 	struct rte_eth_dev *dev;
2920 
2921 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2922 	dev = &rte_eth_devices[port_id];
2923 
2924 	rte_eth_trace_allmulticast_get(port_id, dev->data->all_multicast);
2925 
2926 	return dev->data->all_multicast;
2927 }
2928 
2929 int
2930 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2931 {
2932 	struct rte_eth_dev *dev;
2933 
2934 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2935 	dev = &rte_eth_devices[port_id];
2936 
2937 	if (eth_link == NULL) {
2938 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2939 			port_id);
2940 		return -EINVAL;
2941 	}
2942 
2943 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2944 		rte_eth_linkstatus_get(dev, eth_link);
2945 	else {
2946 		if (*dev->dev_ops->link_update == NULL)
2947 			return -ENOTSUP;
2948 		(*dev->dev_ops->link_update)(dev, 1);
2949 		*eth_link = dev->data->dev_link;
2950 	}
2951 
2952 	rte_eth_trace_link_get(port_id, eth_link);
2953 
2954 	return 0;
2955 }
2956 
2957 int
2958 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2959 {
2960 	struct rte_eth_dev *dev;
2961 
2962 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2963 	dev = &rte_eth_devices[port_id];
2964 
2965 	if (eth_link == NULL) {
2966 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2967 			port_id);
2968 		return -EINVAL;
2969 	}
2970 
2971 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2972 		rte_eth_linkstatus_get(dev, eth_link);
2973 	else {
2974 		if (*dev->dev_ops->link_update == NULL)
2975 			return -ENOTSUP;
2976 		(*dev->dev_ops->link_update)(dev, 0);
2977 		*eth_link = dev->data->dev_link;
2978 	}
2979 
2980 	rte_eth_trace_link_get_nowait(port_id, eth_link);
2981 
2982 	return 0;
2983 }
2984 
2985 const char *
2986 rte_eth_link_speed_to_str(uint32_t link_speed)
2987 {
2988 	const char *ret;
2989 
2990 	switch (link_speed) {
2991 	case RTE_ETH_SPEED_NUM_NONE:
2992 		ret = "None";
2993 		break;
2994 	case RTE_ETH_SPEED_NUM_10M:
2995 		ret = "10 Mbps";
2996 		break;
2997 	case RTE_ETH_SPEED_NUM_100M:
2998 		ret = "100 Mbps";
2999 		break;
3000 	case RTE_ETH_SPEED_NUM_1G:
3001 		ret = "1 Gbps";
3002 		break;
3003 	case RTE_ETH_SPEED_NUM_2_5G:
3004 		ret = "2.5 Gbps";
3005 		break;
3006 	case RTE_ETH_SPEED_NUM_5G:
3007 		ret = "5 Gbps";
3008 		break;
3009 	case RTE_ETH_SPEED_NUM_10G:
3010 		ret = "10 Gbps";
3011 		break;
3012 	case RTE_ETH_SPEED_NUM_20G:
3013 		ret = "20 Gbps";
3014 		break;
3015 	case RTE_ETH_SPEED_NUM_25G:
3016 		ret = "25 Gbps";
3017 		break;
3018 	case RTE_ETH_SPEED_NUM_40G:
3019 		ret = "40 Gbps";
3020 		break;
3021 	case RTE_ETH_SPEED_NUM_50G:
3022 		ret = "50 Gbps";
3023 		break;
3024 	case RTE_ETH_SPEED_NUM_56G:
3025 		ret = "56 Gbps";
3026 		break;
3027 	case RTE_ETH_SPEED_NUM_100G:
3028 		ret = "100 Gbps";
3029 		break;
3030 	case RTE_ETH_SPEED_NUM_200G:
3031 		ret = "200 Gbps";
3032 		break;
3033 	case RTE_ETH_SPEED_NUM_400G:
3034 		ret = "400 Gbps";
3035 		break;
3036 	case RTE_ETH_SPEED_NUM_UNKNOWN:
3037 		ret = "Unknown";
3038 		break;
3039 	default:
3040 		ret = "Invalid";
3041 	}
3042 
3043 	rte_eth_trace_link_speed_to_str(link_speed, ret);
3044 
3045 	return ret;
3046 }
3047 
3048 int
3049 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
3050 {
3051 	int ret;
3052 
3053 	if (str == NULL) {
3054 		RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
3055 		return -EINVAL;
3056 	}
3057 
3058 	if (len == 0) {
3059 		RTE_ETHDEV_LOG(ERR,
3060 			"Cannot convert link to string with zero size\n");
3061 		return -EINVAL;
3062 	}
3063 
3064 	if (eth_link == NULL) {
3065 		RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
3066 		return -EINVAL;
3067 	}
3068 
3069 	if (eth_link->link_status == RTE_ETH_LINK_DOWN)
3070 		ret = snprintf(str, len, "Link down");
3071 	else
3072 		ret = snprintf(str, len, "Link up at %s %s %s",
3073 			rte_eth_link_speed_to_str(eth_link->link_speed),
3074 			(eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
3075 			"FDX" : "HDX",
3076 			(eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
3077 			"Autoneg" : "Fixed");
3078 
3079 	rte_eth_trace_link_to_str(len, eth_link, str, ret);
3080 
3081 	return ret;
3082 }
3083 
3084 int
3085 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
3086 {
3087 	struct rte_eth_dev *dev;
3088 	int ret;
3089 
3090 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3091 	dev = &rte_eth_devices[port_id];
3092 
3093 	if (stats == NULL) {
3094 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
3095 			port_id);
3096 		return -EINVAL;
3097 	}
3098 
3099 	memset(stats, 0, sizeof(*stats));
3100 
3101 	if (*dev->dev_ops->stats_get == NULL)
3102 		return -ENOTSUP;
3103 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
3104 	ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
3105 
3106 	rte_eth_trace_stats_get(port_id, stats, ret);
3107 
3108 	return ret;
3109 }
3110 
3111 int
3112 rte_eth_stats_reset(uint16_t port_id)
3113 {
3114 	struct rte_eth_dev *dev;
3115 	int ret;
3116 
3117 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3118 	dev = &rte_eth_devices[port_id];
3119 
3120 	if (*dev->dev_ops->stats_reset == NULL)
3121 		return -ENOTSUP;
3122 	ret = (*dev->dev_ops->stats_reset)(dev);
3123 	if (ret != 0)
3124 		return eth_err(port_id, ret);
3125 
3126 	dev->data->rx_mbuf_alloc_failed = 0;
3127 
3128 	rte_eth_trace_stats_reset(port_id);
3129 
3130 	return 0;
3131 }
3132 
3133 static inline int
3134 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
3135 {
3136 	uint16_t nb_rxqs, nb_txqs;
3137 	int count;
3138 
3139 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3140 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3141 
3142 	count = RTE_NB_STATS;
3143 	if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
3144 		count += nb_rxqs * RTE_NB_RXQ_STATS;
3145 		count += nb_txqs * RTE_NB_TXQ_STATS;
3146 	}
3147 
3148 	return count;
3149 }
3150 
3151 static int
3152 eth_dev_get_xstats_count(uint16_t port_id)
3153 {
3154 	struct rte_eth_dev *dev;
3155 	int count;
3156 
3157 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3158 	dev = &rte_eth_devices[port_id];
3159 	if (dev->dev_ops->xstats_get_names != NULL) {
3160 		count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
3161 		if (count < 0)
3162 			return eth_err(port_id, count);
3163 	} else
3164 		count = 0;
3165 
3166 
3167 	count += eth_dev_get_xstats_basic_count(dev);
3168 
3169 	return count;
3170 }
3171 
3172 int
3173 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3174 		uint64_t *id)
3175 {
3176 	int cnt_xstats, idx_xstat;
3177 
3178 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3179 
3180 	if (xstat_name == NULL) {
3181 		RTE_ETHDEV_LOG(ERR,
3182 			"Cannot get ethdev port %u xstats ID from NULL xstat name\n",
3183 			port_id);
3184 		return -ENOMEM;
3185 	}
3186 
3187 	if (id == NULL) {
3188 		RTE_ETHDEV_LOG(ERR,
3189 			"Cannot get ethdev port %u xstats ID to NULL\n",
3190 			port_id);
3191 		return -ENOMEM;
3192 	}
3193 
3194 	/* Get count */
3195 	cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
3196 	if (cnt_xstats  < 0) {
3197 		RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
3198 		return -ENODEV;
3199 	}
3200 
3201 	/* Get id-name lookup table */
3202 	struct rte_eth_xstat_name xstats_names[cnt_xstats];
3203 
3204 	if (cnt_xstats != rte_eth_xstats_get_names_by_id(
3205 			port_id, xstats_names, cnt_xstats, NULL)) {
3206 		RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
3207 		return -1;
3208 	}
3209 
3210 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
3211 		if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
3212 			*id = idx_xstat;
3213 
3214 			rte_eth_trace_xstats_get_id_by_name(port_id,
3215 							    xstat_name, *id);
3216 
3217 			return 0;
3218 		};
3219 	}
3220 
3221 	return -EINVAL;
3222 }
3223 
3224 /* retrieve basic stats names */
3225 static int
3226 eth_basic_stats_get_names(struct rte_eth_dev *dev,
3227 	struct rte_eth_xstat_name *xstats_names)
3228 {
3229 	int cnt_used_entries = 0;
3230 	uint32_t idx, id_queue;
3231 	uint16_t num_q;
3232 
3233 	for (idx = 0; idx < RTE_NB_STATS; idx++) {
3234 		strlcpy(xstats_names[cnt_used_entries].name,
3235 			eth_dev_stats_strings[idx].name,
3236 			sizeof(xstats_names[0].name));
3237 		cnt_used_entries++;
3238 	}
3239 
3240 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3241 		return cnt_used_entries;
3242 
3243 	num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3244 	for (id_queue = 0; id_queue < num_q; id_queue++) {
3245 		for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
3246 			snprintf(xstats_names[cnt_used_entries].name,
3247 				sizeof(xstats_names[0].name),
3248 				"rx_q%u_%s",
3249 				id_queue, eth_dev_rxq_stats_strings[idx].name);
3250 			cnt_used_entries++;
3251 		}
3252 
3253 	}
3254 	num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3255 	for (id_queue = 0; id_queue < num_q; id_queue++) {
3256 		for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
3257 			snprintf(xstats_names[cnt_used_entries].name,
3258 				sizeof(xstats_names[0].name),
3259 				"tx_q%u_%s",
3260 				id_queue, eth_dev_txq_stats_strings[idx].name);
3261 			cnt_used_entries++;
3262 		}
3263 	}
3264 	return cnt_used_entries;
3265 }
3266 
3267 /* retrieve ethdev extended statistics names */
3268 int
3269 rte_eth_xstats_get_names_by_id(uint16_t port_id,
3270 	struct rte_eth_xstat_name *xstats_names, unsigned int size,
3271 	uint64_t *ids)
3272 {
3273 	struct rte_eth_xstat_name *xstats_names_copy;
3274 	unsigned int no_basic_stat_requested = 1;
3275 	unsigned int no_ext_stat_requested = 1;
3276 	unsigned int expected_entries;
3277 	unsigned int basic_count;
3278 	struct rte_eth_dev *dev;
3279 	unsigned int i;
3280 	int ret;
3281 
3282 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3283 	dev = &rte_eth_devices[port_id];
3284 
3285 	basic_count = eth_dev_get_xstats_basic_count(dev);
3286 	ret = eth_dev_get_xstats_count(port_id);
3287 	if (ret < 0)
3288 		return ret;
3289 	expected_entries = (unsigned int)ret;
3290 
3291 	/* Return max number of stats if no ids given */
3292 	if (!ids) {
3293 		if (!xstats_names)
3294 			return expected_entries;
3295 		else if (xstats_names && size < expected_entries)
3296 			return expected_entries;
3297 	}
3298 
3299 	if (ids && !xstats_names)
3300 		return -EINVAL;
3301 
3302 	if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
3303 		uint64_t ids_copy[size];
3304 
3305 		for (i = 0; i < size; i++) {
3306 			if (ids[i] < basic_count) {
3307 				no_basic_stat_requested = 0;
3308 				break;
3309 			}
3310 
3311 			/*
3312 			 * Convert ids to xstats ids that PMD knows.
3313 			 * ids known by user are basic + extended stats.
3314 			 */
3315 			ids_copy[i] = ids[i] - basic_count;
3316 		}
3317 
3318 		if (no_basic_stat_requested)
3319 			return (*dev->dev_ops->xstats_get_names_by_id)(dev,
3320 					ids_copy, xstats_names, size);
3321 	}
3322 
3323 	/* Retrieve all stats */
3324 	if (!ids) {
3325 		int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
3326 				expected_entries);
3327 		if (num_stats < 0 || num_stats > (int)expected_entries)
3328 			return num_stats;
3329 		else
3330 			return expected_entries;
3331 	}
3332 
3333 	xstats_names_copy = calloc(expected_entries,
3334 		sizeof(struct rte_eth_xstat_name));
3335 
3336 	if (!xstats_names_copy) {
3337 		RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
3338 		return -ENOMEM;
3339 	}
3340 
3341 	if (ids) {
3342 		for (i = 0; i < size; i++) {
3343 			if (ids[i] >= basic_count) {
3344 				no_ext_stat_requested = 0;
3345 				break;
3346 			}
3347 		}
3348 	}
3349 
3350 	/* Fill xstats_names_copy structure */
3351 	if (ids && no_ext_stat_requested) {
3352 		eth_basic_stats_get_names(dev, xstats_names_copy);
3353 	} else {
3354 		ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
3355 			expected_entries);
3356 		if (ret < 0) {
3357 			free(xstats_names_copy);
3358 			return ret;
3359 		}
3360 	}
3361 
3362 	/* Filter stats */
3363 	for (i = 0; i < size; i++) {
3364 		if (ids[i] >= expected_entries) {
3365 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3366 			free(xstats_names_copy);
3367 			return -1;
3368 		}
3369 		xstats_names[i] = xstats_names_copy[ids[i]];
3370 
3371 		rte_eth_trace_xstats_get_names_by_id(port_id, &xstats_names[i],
3372 						     ids[i]);
3373 	}
3374 
3375 	free(xstats_names_copy);
3376 	return size;
3377 }
3378 
3379 int
3380 rte_eth_xstats_get_names(uint16_t port_id,
3381 	struct rte_eth_xstat_name *xstats_names,
3382 	unsigned int size)
3383 {
3384 	struct rte_eth_dev *dev;
3385 	int cnt_used_entries;
3386 	int cnt_expected_entries;
3387 	int cnt_driver_entries;
3388 	int i;
3389 
3390 	cnt_expected_entries = eth_dev_get_xstats_count(port_id);
3391 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
3392 			(int)size < cnt_expected_entries)
3393 		return cnt_expected_entries;
3394 
3395 	/* port_id checked in eth_dev_get_xstats_count() */
3396 	dev = &rte_eth_devices[port_id];
3397 
3398 	cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
3399 
3400 	if (dev->dev_ops->xstats_get_names != NULL) {
3401 		/* If there are any driver-specific xstats, append them
3402 		 * to end of list.
3403 		 */
3404 		cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
3405 			dev,
3406 			xstats_names + cnt_used_entries,
3407 			size - cnt_used_entries);
3408 		if (cnt_driver_entries < 0)
3409 			return eth_err(port_id, cnt_driver_entries);
3410 		cnt_used_entries += cnt_driver_entries;
3411 	}
3412 
3413 	for (i = 0; i < cnt_used_entries; i++)
3414 		rte_eth_trace_xstats_get_names(port_id, i, &xstats_names[i],
3415 					       size, cnt_used_entries);
3416 
3417 	return cnt_used_entries;
3418 }
3419 
3420 
3421 static int
3422 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
3423 {
3424 	struct rte_eth_dev *dev;
3425 	struct rte_eth_stats eth_stats;
3426 	unsigned int count = 0, i, q;
3427 	uint64_t val, *stats_ptr;
3428 	uint16_t nb_rxqs, nb_txqs;
3429 	int ret;
3430 
3431 	ret = rte_eth_stats_get(port_id, &eth_stats);
3432 	if (ret < 0)
3433 		return ret;
3434 
3435 	dev = &rte_eth_devices[port_id];
3436 
3437 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3438 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3439 
3440 	/* global stats */
3441 	for (i = 0; i < RTE_NB_STATS; i++) {
3442 		stats_ptr = RTE_PTR_ADD(&eth_stats,
3443 					eth_dev_stats_strings[i].offset);
3444 		val = *stats_ptr;
3445 		xstats[count++].value = val;
3446 	}
3447 
3448 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3449 		return count;
3450 
3451 	/* per-rxq stats */
3452 	for (q = 0; q < nb_rxqs; q++) {
3453 		for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3454 			stats_ptr = RTE_PTR_ADD(&eth_stats,
3455 					eth_dev_rxq_stats_strings[i].offset +
3456 					q * sizeof(uint64_t));
3457 			val = *stats_ptr;
3458 			xstats[count++].value = val;
3459 		}
3460 	}
3461 
3462 	/* per-txq stats */
3463 	for (q = 0; q < nb_txqs; q++) {
3464 		for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3465 			stats_ptr = RTE_PTR_ADD(&eth_stats,
3466 					eth_dev_txq_stats_strings[i].offset +
3467 					q * sizeof(uint64_t));
3468 			val = *stats_ptr;
3469 			xstats[count++].value = val;
3470 		}
3471 	}
3472 	return count;
3473 }
3474 
3475 /* retrieve ethdev extended statistics */
3476 int
3477 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3478 			 uint64_t *values, unsigned int size)
3479 {
3480 	unsigned int no_basic_stat_requested = 1;
3481 	unsigned int no_ext_stat_requested = 1;
3482 	unsigned int num_xstats_filled;
3483 	unsigned int basic_count;
3484 	uint16_t expected_entries;
3485 	struct rte_eth_dev *dev;
3486 	unsigned int i;
3487 	int ret;
3488 
3489 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3490 	dev = &rte_eth_devices[port_id];
3491 
3492 	ret = eth_dev_get_xstats_count(port_id);
3493 	if (ret < 0)
3494 		return ret;
3495 	expected_entries = (uint16_t)ret;
3496 	struct rte_eth_xstat xstats[expected_entries];
3497 	basic_count = eth_dev_get_xstats_basic_count(dev);
3498 
3499 	/* Return max number of stats if no ids given */
3500 	if (!ids) {
3501 		if (!values)
3502 			return expected_entries;
3503 		else if (values && size < expected_entries)
3504 			return expected_entries;
3505 	}
3506 
3507 	if (ids && !values)
3508 		return -EINVAL;
3509 
3510 	if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3511 		unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3512 		uint64_t ids_copy[size];
3513 
3514 		for (i = 0; i < size; i++) {
3515 			if (ids[i] < basic_count) {
3516 				no_basic_stat_requested = 0;
3517 				break;
3518 			}
3519 
3520 			/*
3521 			 * Convert ids to xstats ids that PMD knows.
3522 			 * ids known by user are basic + extended stats.
3523 			 */
3524 			ids_copy[i] = ids[i] - basic_count;
3525 		}
3526 
3527 		if (no_basic_stat_requested)
3528 			return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3529 					values, size);
3530 	}
3531 
3532 	if (ids) {
3533 		for (i = 0; i < size; i++) {
3534 			if (ids[i] >= basic_count) {
3535 				no_ext_stat_requested = 0;
3536 				break;
3537 			}
3538 		}
3539 	}
3540 
3541 	/* Fill the xstats structure */
3542 	if (ids && no_ext_stat_requested)
3543 		ret = eth_basic_stats_get(port_id, xstats);
3544 	else
3545 		ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3546 
3547 	if (ret < 0)
3548 		return ret;
3549 	num_xstats_filled = (unsigned int)ret;
3550 
3551 	/* Return all stats */
3552 	if (!ids) {
3553 		for (i = 0; i < num_xstats_filled; i++)
3554 			values[i] = xstats[i].value;
3555 		return expected_entries;
3556 	}
3557 
3558 	/* Filter stats */
3559 	for (i = 0; i < size; i++) {
3560 		if (ids[i] >= expected_entries) {
3561 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3562 			return -1;
3563 		}
3564 		values[i] = xstats[ids[i]].value;
3565 	}
3566 
3567 	rte_eth_trace_xstats_get_by_id(port_id, ids, values, size);
3568 
3569 	return size;
3570 }
3571 
3572 int
3573 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3574 	unsigned int n)
3575 {
3576 	struct rte_eth_dev *dev;
3577 	unsigned int count, i;
3578 	signed int xcount = 0;
3579 	int ret;
3580 
3581 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3582 	if (xstats == NULL && n > 0)
3583 		return -EINVAL;
3584 	dev = &rte_eth_devices[port_id];
3585 
3586 	count = eth_dev_get_xstats_basic_count(dev);
3587 
3588 	/* implemented by the driver */
3589 	if (dev->dev_ops->xstats_get != NULL) {
3590 		/* Retrieve the xstats from the driver at the end of the
3591 		 * xstats struct.
3592 		 */
3593 		xcount = (*dev->dev_ops->xstats_get)(dev,
3594 				     (n > count) ? xstats + count : NULL,
3595 				     (n > count) ? n - count : 0);
3596 
3597 		if (xcount < 0)
3598 			return eth_err(port_id, xcount);
3599 	}
3600 
3601 	if (n < count + xcount || xstats == NULL)
3602 		return count + xcount;
3603 
3604 	/* now fill the xstats structure */
3605 	ret = eth_basic_stats_get(port_id, xstats);
3606 	if (ret < 0)
3607 		return ret;
3608 	count = ret;
3609 
3610 	for (i = 0; i < count; i++)
3611 		xstats[i].id = i;
3612 	/* add an offset to driver-specific stats */
3613 	for ( ; i < count + xcount; i++)
3614 		xstats[i].id += count;
3615 
3616 	for (i = 0; i < n; i++)
3617 		rte_eth_trace_xstats_get(port_id, xstats[i]);
3618 
3619 	return count + xcount;
3620 }
3621 
3622 /* reset ethdev extended statistics */
3623 int
3624 rte_eth_xstats_reset(uint16_t port_id)
3625 {
3626 	struct rte_eth_dev *dev;
3627 
3628 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3629 	dev = &rte_eth_devices[port_id];
3630 
3631 	/* implemented by the driver */
3632 	if (dev->dev_ops->xstats_reset != NULL) {
3633 		int ret = eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3634 
3635 		rte_eth_trace_xstats_reset(port_id, ret);
3636 
3637 		return ret;
3638 	}
3639 
3640 	/* fallback to default */
3641 	return rte_eth_stats_reset(port_id);
3642 }
3643 
3644 static int
3645 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3646 		uint8_t stat_idx, uint8_t is_rx)
3647 {
3648 	struct rte_eth_dev *dev;
3649 
3650 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3651 	dev = &rte_eth_devices[port_id];
3652 
3653 	if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3654 		return -EINVAL;
3655 
3656 	if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3657 		return -EINVAL;
3658 
3659 	if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3660 		return -EINVAL;
3661 
3662 	if (*dev->dev_ops->queue_stats_mapping_set == NULL)
3663 		return -ENOTSUP;
3664 	return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3665 }
3666 
3667 int
3668 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3669 		uint8_t stat_idx)
3670 {
3671 	int ret;
3672 
3673 	ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3674 						tx_queue_id,
3675 						stat_idx, STAT_QMAP_TX));
3676 
3677 	rte_ethdev_trace_set_tx_queue_stats_mapping(port_id, tx_queue_id,
3678 						    stat_idx, ret);
3679 
3680 	return ret;
3681 }
3682 
3683 int
3684 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3685 		uint8_t stat_idx)
3686 {
3687 	int ret;
3688 
3689 	ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3690 						rx_queue_id,
3691 						stat_idx, STAT_QMAP_RX));
3692 
3693 	rte_ethdev_trace_set_rx_queue_stats_mapping(port_id, rx_queue_id,
3694 						    stat_idx, ret);
3695 
3696 	return ret;
3697 }
3698 
3699 int
3700 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3701 {
3702 	struct rte_eth_dev *dev;
3703 	int ret;
3704 
3705 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3706 	dev = &rte_eth_devices[port_id];
3707 
3708 	if (fw_version == NULL && fw_size > 0) {
3709 		RTE_ETHDEV_LOG(ERR,
3710 			"Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3711 			port_id);
3712 		return -EINVAL;
3713 	}
3714 
3715 	if (*dev->dev_ops->fw_version_get == NULL)
3716 		return -ENOTSUP;
3717 	ret = eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3718 							fw_version, fw_size));
3719 
3720 	rte_ethdev_trace_fw_version_get(port_id, fw_version, fw_size, ret);
3721 
3722 	return ret;
3723 }
3724 
3725 int
3726 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3727 {
3728 	struct rte_eth_dev *dev;
3729 	const struct rte_eth_desc_lim lim = {
3730 		.nb_max = UINT16_MAX,
3731 		.nb_min = 0,
3732 		.nb_align = 1,
3733 		.nb_seg_max = UINT16_MAX,
3734 		.nb_mtu_seg_max = UINT16_MAX,
3735 	};
3736 	int diag;
3737 
3738 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3739 	dev = &rte_eth_devices[port_id];
3740 
3741 	if (dev_info == NULL) {
3742 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3743 			port_id);
3744 		return -EINVAL;
3745 	}
3746 
3747 	/*
3748 	 * Init dev_info before port_id check since caller does not have
3749 	 * return status and does not know if get is successful or not.
3750 	 */
3751 	memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3752 	dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3753 
3754 	dev_info->rx_desc_lim = lim;
3755 	dev_info->tx_desc_lim = lim;
3756 	dev_info->device = dev->device;
3757 	dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
3758 		RTE_ETHER_CRC_LEN;
3759 	dev_info->max_mtu = UINT16_MAX;
3760 
3761 	if (*dev->dev_ops->dev_infos_get == NULL)
3762 		return -ENOTSUP;
3763 	diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3764 	if (diag != 0) {
3765 		/* Cleanup already filled in device information */
3766 		memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3767 		return eth_err(port_id, diag);
3768 	}
3769 
3770 	/* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3771 	dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3772 			RTE_MAX_QUEUES_PER_PORT);
3773 	dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3774 			RTE_MAX_QUEUES_PER_PORT);
3775 
3776 	dev_info->driver_name = dev->device->driver->name;
3777 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3778 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3779 
3780 	dev_info->dev_flags = &dev->data->dev_flags;
3781 
3782 	rte_ethdev_trace_info_get(port_id, dev_info);
3783 
3784 	return 0;
3785 }
3786 
3787 int
3788 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3789 {
3790 	struct rte_eth_dev *dev;
3791 
3792 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3793 	dev = &rte_eth_devices[port_id];
3794 
3795 	if (dev_conf == NULL) {
3796 		RTE_ETHDEV_LOG(ERR,
3797 			"Cannot get ethdev port %u configuration to NULL\n",
3798 			port_id);
3799 		return -EINVAL;
3800 	}
3801 
3802 	memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf));
3803 
3804 	rte_ethdev_trace_conf_get(port_id, dev_conf);
3805 
3806 	return 0;
3807 }
3808 
3809 int
3810 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3811 				 uint32_t *ptypes, int num)
3812 {
3813 	int i, j;
3814 	struct rte_eth_dev *dev;
3815 	const uint32_t *all_ptypes;
3816 
3817 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3818 	dev = &rte_eth_devices[port_id];
3819 
3820 	if (ptypes == NULL && num > 0) {
3821 		RTE_ETHDEV_LOG(ERR,
3822 			"Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3823 			port_id);
3824 		return -EINVAL;
3825 	}
3826 
3827 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL)
3828 		return 0;
3829 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3830 
3831 	if (!all_ptypes)
3832 		return 0;
3833 
3834 	for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3835 		if (all_ptypes[i] & ptype_mask) {
3836 			if (j < num) {
3837 				ptypes[j] = all_ptypes[i];
3838 
3839 				rte_ethdev_trace_get_supported_ptypes(port_id,
3840 						j, num, ptypes[j]);
3841 			}
3842 			j++;
3843 		}
3844 
3845 	return j;
3846 }
3847 
3848 int
3849 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3850 				 uint32_t *set_ptypes, unsigned int num)
3851 {
3852 	const uint32_t valid_ptype_masks[] = {
3853 		RTE_PTYPE_L2_MASK,
3854 		RTE_PTYPE_L3_MASK,
3855 		RTE_PTYPE_L4_MASK,
3856 		RTE_PTYPE_TUNNEL_MASK,
3857 		RTE_PTYPE_INNER_L2_MASK,
3858 		RTE_PTYPE_INNER_L3_MASK,
3859 		RTE_PTYPE_INNER_L4_MASK,
3860 	};
3861 	const uint32_t *all_ptypes;
3862 	struct rte_eth_dev *dev;
3863 	uint32_t unused_mask;
3864 	unsigned int i, j;
3865 	int ret;
3866 
3867 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3868 	dev = &rte_eth_devices[port_id];
3869 
3870 	if (num > 0 && set_ptypes == NULL) {
3871 		RTE_ETHDEV_LOG(ERR,
3872 			"Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3873 			port_id);
3874 		return -EINVAL;
3875 	}
3876 
3877 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3878 			*dev->dev_ops->dev_ptypes_set == NULL) {
3879 		ret = 0;
3880 		goto ptype_unknown;
3881 	}
3882 
3883 	if (ptype_mask == 0) {
3884 		ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3885 				ptype_mask);
3886 		goto ptype_unknown;
3887 	}
3888 
3889 	unused_mask = ptype_mask;
3890 	for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3891 		uint32_t mask = ptype_mask & valid_ptype_masks[i];
3892 		if (mask && mask != valid_ptype_masks[i]) {
3893 			ret = -EINVAL;
3894 			goto ptype_unknown;
3895 		}
3896 		unused_mask &= ~valid_ptype_masks[i];
3897 	}
3898 
3899 	if (unused_mask) {
3900 		ret = -EINVAL;
3901 		goto ptype_unknown;
3902 	}
3903 
3904 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3905 	if (all_ptypes == NULL) {
3906 		ret = 0;
3907 		goto ptype_unknown;
3908 	}
3909 
3910 	/*
3911 	 * Accommodate as many set_ptypes as possible. If the supplied
3912 	 * set_ptypes array is insufficient fill it partially.
3913 	 */
3914 	for (i = 0, j = 0; set_ptypes != NULL &&
3915 				(all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3916 		if (ptype_mask & all_ptypes[i]) {
3917 			if (j < num - 1) {
3918 				set_ptypes[j] = all_ptypes[i];
3919 
3920 				rte_ethdev_trace_set_ptypes(port_id, j, num,
3921 						set_ptypes[j]);
3922 
3923 				j++;
3924 				continue;
3925 			}
3926 			break;
3927 		}
3928 	}
3929 
3930 	if (set_ptypes != NULL && j < num)
3931 		set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3932 
3933 	return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3934 
3935 ptype_unknown:
3936 	if (num > 0)
3937 		set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3938 
3939 	return ret;
3940 }
3941 
3942 int
3943 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3944 	unsigned int num)
3945 {
3946 	int32_t ret;
3947 	struct rte_eth_dev *dev;
3948 	struct rte_eth_dev_info dev_info;
3949 
3950 	if (ma == NULL) {
3951 		RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__);
3952 		return -EINVAL;
3953 	}
3954 
3955 	/* will check for us that port_id is a valid one */
3956 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3957 	if (ret != 0)
3958 		return ret;
3959 
3960 	dev = &rte_eth_devices[port_id];
3961 	num = RTE_MIN(dev_info.max_mac_addrs, num);
3962 	memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0]));
3963 
3964 	rte_eth_trace_macaddrs_get(port_id, num);
3965 
3966 	return num;
3967 }
3968 
3969 int
3970 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3971 {
3972 	struct rte_eth_dev *dev;
3973 
3974 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3975 	dev = &rte_eth_devices[port_id];
3976 
3977 	if (mac_addr == NULL) {
3978 		RTE_ETHDEV_LOG(ERR,
3979 			"Cannot get ethdev port %u MAC address to NULL\n",
3980 			port_id);
3981 		return -EINVAL;
3982 	}
3983 
3984 	rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3985 
3986 	rte_eth_trace_macaddr_get(port_id, mac_addr);
3987 
3988 	return 0;
3989 }
3990 
3991 int
3992 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3993 {
3994 	struct rte_eth_dev *dev;
3995 
3996 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3997 	dev = &rte_eth_devices[port_id];
3998 
3999 	if (mtu == NULL) {
4000 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
4001 			port_id);
4002 		return -EINVAL;
4003 	}
4004 
4005 	*mtu = dev->data->mtu;
4006 
4007 	rte_ethdev_trace_get_mtu(port_id, *mtu);
4008 
4009 	return 0;
4010 }
4011 
4012 int
4013 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
4014 {
4015 	int ret;
4016 	struct rte_eth_dev_info dev_info;
4017 	struct rte_eth_dev *dev;
4018 
4019 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4020 	dev = &rte_eth_devices[port_id];
4021 	if (*dev->dev_ops->mtu_set == NULL)
4022 		return -ENOTSUP;
4023 
4024 	/*
4025 	 * Check if the device supports dev_infos_get, if it does not
4026 	 * skip min_mtu/max_mtu validation here as this requires values
4027 	 * that are populated within the call to rte_eth_dev_info_get()
4028 	 * which relies on dev->dev_ops->dev_infos_get.
4029 	 */
4030 	if (*dev->dev_ops->dev_infos_get != NULL) {
4031 		ret = rte_eth_dev_info_get(port_id, &dev_info);
4032 		if (ret != 0)
4033 			return ret;
4034 
4035 		ret = eth_dev_validate_mtu(port_id, &dev_info, mtu);
4036 		if (ret != 0)
4037 			return ret;
4038 	}
4039 
4040 	if (dev->data->dev_configured == 0) {
4041 		RTE_ETHDEV_LOG(ERR,
4042 			"Port %u must be configured before MTU set\n",
4043 			port_id);
4044 		return -EINVAL;
4045 	}
4046 
4047 	ret = (*dev->dev_ops->mtu_set)(dev, mtu);
4048 	if (ret == 0)
4049 		dev->data->mtu = mtu;
4050 
4051 	ret = eth_err(port_id, ret);
4052 
4053 	rte_ethdev_trace_set_mtu(port_id, mtu, ret);
4054 
4055 	return ret;
4056 }
4057 
4058 int
4059 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
4060 {
4061 	struct rte_eth_dev *dev;
4062 	int ret;
4063 
4064 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4065 	dev = &rte_eth_devices[port_id];
4066 
4067 	if (!(dev->data->dev_conf.rxmode.offloads &
4068 	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
4069 		RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
4070 			port_id);
4071 		return -ENOSYS;
4072 	}
4073 
4074 	if (vlan_id > 4095) {
4075 		RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
4076 			port_id, vlan_id);
4077 		return -EINVAL;
4078 	}
4079 	if (*dev->dev_ops->vlan_filter_set == NULL)
4080 		return -ENOTSUP;
4081 
4082 	ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
4083 	if (ret == 0) {
4084 		struct rte_vlan_filter_conf *vfc;
4085 		int vidx;
4086 		int vbit;
4087 
4088 		vfc = &dev->data->vlan_filter_conf;
4089 		vidx = vlan_id / 64;
4090 		vbit = vlan_id % 64;
4091 
4092 		if (on)
4093 			vfc->ids[vidx] |= RTE_BIT64(vbit);
4094 		else
4095 			vfc->ids[vidx] &= ~RTE_BIT64(vbit);
4096 	}
4097 
4098 	ret = eth_err(port_id, ret);
4099 
4100 	rte_ethdev_trace_vlan_filter(port_id, vlan_id, on, ret);
4101 
4102 	return ret;
4103 }
4104 
4105 int
4106 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
4107 				    int on)
4108 {
4109 	struct rte_eth_dev *dev;
4110 
4111 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4112 	dev = &rte_eth_devices[port_id];
4113 
4114 	if (rx_queue_id >= dev->data->nb_rx_queues) {
4115 		RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
4116 		return -EINVAL;
4117 	}
4118 
4119 	if (*dev->dev_ops->vlan_strip_queue_set == NULL)
4120 		return -ENOTSUP;
4121 	(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
4122 
4123 	rte_ethdev_trace_set_vlan_strip_on_queue(port_id, rx_queue_id, on);
4124 
4125 	return 0;
4126 }
4127 
4128 int
4129 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
4130 				enum rte_vlan_type vlan_type,
4131 				uint16_t tpid)
4132 {
4133 	struct rte_eth_dev *dev;
4134 	int ret;
4135 
4136 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4137 	dev = &rte_eth_devices[port_id];
4138 
4139 	if (*dev->dev_ops->vlan_tpid_set == NULL)
4140 		return -ENOTSUP;
4141 	ret = eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
4142 							      tpid));
4143 
4144 	rte_ethdev_trace_set_vlan_ether_type(port_id, vlan_type, tpid, ret);
4145 
4146 	return ret;
4147 }
4148 
4149 int
4150 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
4151 {
4152 	struct rte_eth_dev_info dev_info;
4153 	struct rte_eth_dev *dev;
4154 	int ret = 0;
4155 	int mask = 0;
4156 	int cur, org = 0;
4157 	uint64_t orig_offloads;
4158 	uint64_t dev_offloads;
4159 	uint64_t new_offloads;
4160 
4161 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4162 	dev = &rte_eth_devices[port_id];
4163 
4164 	/* save original values in case of failure */
4165 	orig_offloads = dev->data->dev_conf.rxmode.offloads;
4166 	dev_offloads = orig_offloads;
4167 
4168 	/* check which option changed by application */
4169 	cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
4170 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
4171 	if (cur != org) {
4172 		if (cur)
4173 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
4174 		else
4175 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
4176 		mask |= RTE_ETH_VLAN_STRIP_MASK;
4177 	}
4178 
4179 	cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
4180 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
4181 	if (cur != org) {
4182 		if (cur)
4183 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4184 		else
4185 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4186 		mask |= RTE_ETH_VLAN_FILTER_MASK;
4187 	}
4188 
4189 	cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
4190 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
4191 	if (cur != org) {
4192 		if (cur)
4193 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
4194 		else
4195 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
4196 		mask |= RTE_ETH_VLAN_EXTEND_MASK;
4197 	}
4198 
4199 	cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
4200 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
4201 	if (cur != org) {
4202 		if (cur)
4203 			dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
4204 		else
4205 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
4206 		mask |= RTE_ETH_QINQ_STRIP_MASK;
4207 	}
4208 
4209 	/*no change*/
4210 	if (mask == 0)
4211 		return ret;
4212 
4213 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4214 	if (ret != 0)
4215 		return ret;
4216 
4217 	/* Rx VLAN offloading must be within its device capabilities */
4218 	if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
4219 		new_offloads = dev_offloads & ~orig_offloads;
4220 		RTE_ETHDEV_LOG(ERR,
4221 			"Ethdev port_id=%u requested new added VLAN offloads "
4222 			"0x%" PRIx64 " must be within Rx offloads capabilities "
4223 			"0x%" PRIx64 " in %s()\n",
4224 			port_id, new_offloads, dev_info.rx_offload_capa,
4225 			__func__);
4226 		return -EINVAL;
4227 	}
4228 
4229 	if (*dev->dev_ops->vlan_offload_set == NULL)
4230 		return -ENOTSUP;
4231 	dev->data->dev_conf.rxmode.offloads = dev_offloads;
4232 	ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
4233 	if (ret) {
4234 		/* hit an error restore  original values */
4235 		dev->data->dev_conf.rxmode.offloads = orig_offloads;
4236 	}
4237 
4238 	ret = eth_err(port_id, ret);
4239 
4240 	rte_ethdev_trace_set_vlan_offload(port_id, offload_mask, ret);
4241 
4242 	return ret;
4243 }
4244 
4245 int
4246 rte_eth_dev_get_vlan_offload(uint16_t port_id)
4247 {
4248 	struct rte_eth_dev *dev;
4249 	uint64_t *dev_offloads;
4250 	int ret = 0;
4251 
4252 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4253 	dev = &rte_eth_devices[port_id];
4254 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
4255 
4256 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
4257 		ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
4258 
4259 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
4260 		ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
4261 
4262 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
4263 		ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
4264 
4265 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
4266 		ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
4267 
4268 	rte_ethdev_trace_get_vlan_offload(port_id, ret);
4269 
4270 	return ret;
4271 }
4272 
4273 int
4274 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
4275 {
4276 	struct rte_eth_dev *dev;
4277 	int ret;
4278 
4279 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4280 	dev = &rte_eth_devices[port_id];
4281 
4282 	if (*dev->dev_ops->vlan_pvid_set == NULL)
4283 		return -ENOTSUP;
4284 	ret = eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
4285 
4286 	rte_ethdev_trace_set_vlan_pvid(port_id, pvid, on, ret);
4287 
4288 	return ret;
4289 }
4290 
4291 int
4292 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
4293 {
4294 	struct rte_eth_dev *dev;
4295 	int ret;
4296 
4297 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4298 	dev = &rte_eth_devices[port_id];
4299 
4300 	if (fc_conf == NULL) {
4301 		RTE_ETHDEV_LOG(ERR,
4302 			"Cannot get ethdev port %u flow control config to NULL\n",
4303 			port_id);
4304 		return -EINVAL;
4305 	}
4306 
4307 	if (*dev->dev_ops->flow_ctrl_get == NULL)
4308 		return -ENOTSUP;
4309 	memset(fc_conf, 0, sizeof(*fc_conf));
4310 	ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
4311 
4312 	rte_ethdev_trace_flow_ctrl_get(port_id, fc_conf, ret);
4313 
4314 	return ret;
4315 }
4316 
4317 int
4318 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
4319 {
4320 	struct rte_eth_dev *dev;
4321 	int ret;
4322 
4323 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4324 	dev = &rte_eth_devices[port_id];
4325 
4326 	if (fc_conf == NULL) {
4327 		RTE_ETHDEV_LOG(ERR,
4328 			"Cannot set ethdev port %u flow control from NULL config\n",
4329 			port_id);
4330 		return -EINVAL;
4331 	}
4332 
4333 	if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
4334 		RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
4335 		return -EINVAL;
4336 	}
4337 
4338 	if (*dev->dev_ops->flow_ctrl_set == NULL)
4339 		return -ENOTSUP;
4340 	ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
4341 
4342 	rte_ethdev_trace_flow_ctrl_set(port_id, fc_conf, ret);
4343 
4344 	return ret;
4345 }
4346 
4347 int
4348 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
4349 				   struct rte_eth_pfc_conf *pfc_conf)
4350 {
4351 	struct rte_eth_dev *dev;
4352 	int ret;
4353 
4354 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4355 	dev = &rte_eth_devices[port_id];
4356 
4357 	if (pfc_conf == NULL) {
4358 		RTE_ETHDEV_LOG(ERR,
4359 			"Cannot set ethdev port %u priority flow control from NULL config\n",
4360 			port_id);
4361 		return -EINVAL;
4362 	}
4363 
4364 	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
4365 		RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
4366 		return -EINVAL;
4367 	}
4368 
4369 	/* High water, low water validation are device specific */
4370 	if  (*dev->dev_ops->priority_flow_ctrl_set == NULL)
4371 		return -ENOTSUP;
4372 	ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
4373 			       (dev, pfc_conf));
4374 
4375 	rte_ethdev_trace_priority_flow_ctrl_set(port_id, pfc_conf, ret);
4376 
4377 	return ret;
4378 }
4379 
4380 static int
4381 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
4382 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
4383 {
4384 	if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) ||
4385 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
4386 		if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) {
4387 			RTE_ETHDEV_LOG(ERR,
4388 				"PFC Tx queue not in range for Rx pause requested:%d configured:%d\n",
4389 				pfc_queue_conf->rx_pause.tx_qid,
4390 				dev_info->nb_tx_queues);
4391 			return -EINVAL;
4392 		}
4393 
4394 		if (pfc_queue_conf->rx_pause.tc >= tc_max) {
4395 			RTE_ETHDEV_LOG(ERR,
4396 				"PFC TC not in range for Rx pause requested:%d max:%d\n",
4397 				pfc_queue_conf->rx_pause.tc, tc_max);
4398 			return -EINVAL;
4399 		}
4400 	}
4401 
4402 	return 0;
4403 }
4404 
4405 static int
4406 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
4407 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
4408 {
4409 	if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) ||
4410 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
4411 		if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) {
4412 			RTE_ETHDEV_LOG(ERR,
4413 				"PFC Rx queue not in range for Tx pause requested:%d configured:%d\n",
4414 				pfc_queue_conf->tx_pause.rx_qid,
4415 				dev_info->nb_rx_queues);
4416 			return -EINVAL;
4417 		}
4418 
4419 		if (pfc_queue_conf->tx_pause.tc >= tc_max) {
4420 			RTE_ETHDEV_LOG(ERR,
4421 				"PFC TC not in range for Tx pause requested:%d max:%d\n",
4422 				pfc_queue_conf->tx_pause.tc, tc_max);
4423 			return -EINVAL;
4424 		}
4425 	}
4426 
4427 	return 0;
4428 }
4429 
4430 int
4431 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
4432 		struct rte_eth_pfc_queue_info *pfc_queue_info)
4433 {
4434 	struct rte_eth_dev *dev;
4435 	int ret;
4436 
4437 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4438 	dev = &rte_eth_devices[port_id];
4439 
4440 	if (pfc_queue_info == NULL) {
4441 		RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n",
4442 			port_id);
4443 		return -EINVAL;
4444 	}
4445 
4446 	if (*dev->dev_ops->priority_flow_ctrl_queue_info_get == NULL)
4447 		return -ENOTSUP;
4448 	ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
4449 			(dev, pfc_queue_info));
4450 
4451 	rte_ethdev_trace_priority_flow_ctrl_queue_info_get(port_id,
4452 						pfc_queue_info, ret);
4453 
4454 	return ret;
4455 }
4456 
4457 int
4458 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
4459 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
4460 {
4461 	struct rte_eth_pfc_queue_info pfc_info;
4462 	struct rte_eth_dev_info dev_info;
4463 	struct rte_eth_dev *dev;
4464 	int ret;
4465 
4466 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4467 	dev = &rte_eth_devices[port_id];
4468 
4469 	if (pfc_queue_conf == NULL) {
4470 		RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n",
4471 			port_id);
4472 		return -EINVAL;
4473 	}
4474 
4475 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4476 	if (ret != 0)
4477 		return ret;
4478 
4479 	ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info);
4480 	if (ret != 0)
4481 		return ret;
4482 
4483 	if (pfc_info.tc_max == 0) {
4484 		RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n",
4485 			port_id);
4486 		return -ENOTSUP;
4487 	}
4488 
4489 	/* Check requested mode supported or not */
4490 	if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE &&
4491 			pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) {
4492 		RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n",
4493 			port_id);
4494 		return -EINVAL;
4495 	}
4496 
4497 	if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE &&
4498 			pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) {
4499 		RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n",
4500 			port_id);
4501 		return -EINVAL;
4502 	}
4503 
4504 	/* Validate Rx pause parameters */
4505 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
4506 			pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) {
4507 		ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max,
4508 				pfc_queue_conf);
4509 		if (ret != 0)
4510 			return ret;
4511 	}
4512 
4513 	/* Validate Tx pause parameters */
4514 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
4515 			pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) {
4516 		ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max,
4517 				pfc_queue_conf);
4518 		if (ret != 0)
4519 			return ret;
4520 	}
4521 
4522 	if (*dev->dev_ops->priority_flow_ctrl_queue_config == NULL)
4523 		return -ENOTSUP;
4524 	ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_config)
4525 			(dev, pfc_queue_conf));
4526 
4527 	rte_ethdev_trace_priority_flow_ctrl_queue_configure(port_id,
4528 						pfc_queue_conf, ret);
4529 
4530 	return ret;
4531 }
4532 
4533 static int
4534 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
4535 			uint16_t reta_size)
4536 {
4537 	uint16_t i, num;
4538 
4539 	num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
4540 	for (i = 0; i < num; i++) {
4541 		if (reta_conf[i].mask)
4542 			return 0;
4543 	}
4544 
4545 	return -EINVAL;
4546 }
4547 
4548 static int
4549 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
4550 			 uint16_t reta_size,
4551 			 uint16_t max_rxq)
4552 {
4553 	uint16_t i, idx, shift;
4554 
4555 	if (max_rxq == 0) {
4556 		RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
4557 		return -EINVAL;
4558 	}
4559 
4560 	for (i = 0; i < reta_size; i++) {
4561 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
4562 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
4563 		if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
4564 			(reta_conf[idx].reta[shift] >= max_rxq)) {
4565 			RTE_ETHDEV_LOG(ERR,
4566 				"reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
4567 				idx, shift,
4568 				reta_conf[idx].reta[shift], max_rxq);
4569 			return -EINVAL;
4570 		}
4571 	}
4572 
4573 	return 0;
4574 }
4575 
4576 int
4577 rte_eth_dev_rss_reta_update(uint16_t port_id,
4578 			    struct rte_eth_rss_reta_entry64 *reta_conf,
4579 			    uint16_t reta_size)
4580 {
4581 	enum rte_eth_rx_mq_mode mq_mode;
4582 	struct rte_eth_dev *dev;
4583 	int ret;
4584 
4585 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4586 	dev = &rte_eth_devices[port_id];
4587 
4588 	if (reta_conf == NULL) {
4589 		RTE_ETHDEV_LOG(ERR,
4590 			"Cannot update ethdev port %u RSS RETA to NULL\n",
4591 			port_id);
4592 		return -EINVAL;
4593 	}
4594 
4595 	if (reta_size == 0) {
4596 		RTE_ETHDEV_LOG(ERR,
4597 			"Cannot update ethdev port %u RSS RETA with zero size\n",
4598 			port_id);
4599 		return -EINVAL;
4600 	}
4601 
4602 	/* Check mask bits */
4603 	ret = eth_check_reta_mask(reta_conf, reta_size);
4604 	if (ret < 0)
4605 		return ret;
4606 
4607 	/* Check entry value */
4608 	ret = eth_check_reta_entry(reta_conf, reta_size,
4609 				dev->data->nb_rx_queues);
4610 	if (ret < 0)
4611 		return ret;
4612 
4613 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4614 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
4615 		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
4616 		return -ENOTSUP;
4617 	}
4618 
4619 	if (*dev->dev_ops->reta_update == NULL)
4620 		return -ENOTSUP;
4621 	ret = eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
4622 							    reta_size));
4623 
4624 	rte_ethdev_trace_rss_reta_update(port_id, reta_conf, reta_size, ret);
4625 
4626 	return ret;
4627 }
4628 
4629 int
4630 rte_eth_dev_rss_reta_query(uint16_t port_id,
4631 			   struct rte_eth_rss_reta_entry64 *reta_conf,
4632 			   uint16_t reta_size)
4633 {
4634 	struct rte_eth_dev *dev;
4635 	int ret;
4636 
4637 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4638 	dev = &rte_eth_devices[port_id];
4639 
4640 	if (reta_conf == NULL) {
4641 		RTE_ETHDEV_LOG(ERR,
4642 			"Cannot query ethdev port %u RSS RETA from NULL config\n",
4643 			port_id);
4644 		return -EINVAL;
4645 	}
4646 
4647 	/* Check mask bits */
4648 	ret = eth_check_reta_mask(reta_conf, reta_size);
4649 	if (ret < 0)
4650 		return ret;
4651 
4652 	if (*dev->dev_ops->reta_query == NULL)
4653 		return -ENOTSUP;
4654 	ret = eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
4655 							   reta_size));
4656 
4657 	rte_ethdev_trace_rss_reta_query(port_id, reta_conf, reta_size, ret);
4658 
4659 	return ret;
4660 }
4661 
4662 int
4663 rte_eth_dev_rss_hash_update(uint16_t port_id,
4664 			    struct rte_eth_rss_conf *rss_conf)
4665 {
4666 	struct rte_eth_dev *dev;
4667 	struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
4668 	enum rte_eth_rx_mq_mode mq_mode;
4669 	int ret;
4670 
4671 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4672 	dev = &rte_eth_devices[port_id];
4673 
4674 	if (rss_conf == NULL) {
4675 		RTE_ETHDEV_LOG(ERR,
4676 			"Cannot update ethdev port %u RSS hash from NULL config\n",
4677 			port_id);
4678 		return -EINVAL;
4679 	}
4680 
4681 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4682 	if (ret != 0)
4683 		return ret;
4684 
4685 	rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
4686 	if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
4687 	    dev_info.flow_type_rss_offloads) {
4688 		RTE_ETHDEV_LOG(ERR,
4689 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
4690 			port_id, rss_conf->rss_hf,
4691 			dev_info.flow_type_rss_offloads);
4692 		return -EINVAL;
4693 	}
4694 
4695 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4696 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
4697 		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
4698 		return -ENOTSUP;
4699 	}
4700 
4701 	if (*dev->dev_ops->rss_hash_update == NULL)
4702 		return -ENOTSUP;
4703 	ret = eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4704 								rss_conf));
4705 
4706 	rte_ethdev_trace_rss_hash_update(port_id, rss_conf, ret);
4707 
4708 	return ret;
4709 }
4710 
4711 int
4712 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4713 			      struct rte_eth_rss_conf *rss_conf)
4714 {
4715 	struct rte_eth_dev *dev;
4716 	int ret;
4717 
4718 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4719 	dev = &rte_eth_devices[port_id];
4720 
4721 	if (rss_conf == NULL) {
4722 		RTE_ETHDEV_LOG(ERR,
4723 			"Cannot get ethdev port %u RSS hash config to NULL\n",
4724 			port_id);
4725 		return -EINVAL;
4726 	}
4727 
4728 	if (*dev->dev_ops->rss_hash_conf_get == NULL)
4729 		return -ENOTSUP;
4730 	ret = eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4731 								  rss_conf));
4732 
4733 	rte_ethdev_trace_rss_hash_conf_get(port_id, rss_conf, ret);
4734 
4735 	return ret;
4736 }
4737 
4738 int
4739 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4740 				struct rte_eth_udp_tunnel *udp_tunnel)
4741 {
4742 	struct rte_eth_dev *dev;
4743 	int ret;
4744 
4745 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4746 	dev = &rte_eth_devices[port_id];
4747 
4748 	if (udp_tunnel == NULL) {
4749 		RTE_ETHDEV_LOG(ERR,
4750 			"Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4751 			port_id);
4752 		return -EINVAL;
4753 	}
4754 
4755 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4756 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4757 		return -EINVAL;
4758 	}
4759 
4760 	if (*dev->dev_ops->udp_tunnel_port_add == NULL)
4761 		return -ENOTSUP;
4762 	ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4763 								udp_tunnel));
4764 
4765 	rte_ethdev_trace_udp_tunnel_port_add(port_id, udp_tunnel, ret);
4766 
4767 	return ret;
4768 }
4769 
4770 int
4771 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4772 				   struct rte_eth_udp_tunnel *udp_tunnel)
4773 {
4774 	struct rte_eth_dev *dev;
4775 	int ret;
4776 
4777 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4778 	dev = &rte_eth_devices[port_id];
4779 
4780 	if (udp_tunnel == NULL) {
4781 		RTE_ETHDEV_LOG(ERR,
4782 			"Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4783 			port_id);
4784 		return -EINVAL;
4785 	}
4786 
4787 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4788 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4789 		return -EINVAL;
4790 	}
4791 
4792 	if (*dev->dev_ops->udp_tunnel_port_del == NULL)
4793 		return -ENOTSUP;
4794 	ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4795 								udp_tunnel));
4796 
4797 	rte_ethdev_trace_udp_tunnel_port_delete(port_id, udp_tunnel, ret);
4798 
4799 	return ret;
4800 }
4801 
4802 int
4803 rte_eth_led_on(uint16_t port_id)
4804 {
4805 	struct rte_eth_dev *dev;
4806 	int ret;
4807 
4808 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4809 	dev = &rte_eth_devices[port_id];
4810 
4811 	if (*dev->dev_ops->dev_led_on == NULL)
4812 		return -ENOTSUP;
4813 	ret = eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4814 
4815 	rte_eth_trace_led_on(port_id, ret);
4816 
4817 	return ret;
4818 }
4819 
4820 int
4821 rte_eth_led_off(uint16_t port_id)
4822 {
4823 	struct rte_eth_dev *dev;
4824 	int ret;
4825 
4826 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4827 	dev = &rte_eth_devices[port_id];
4828 
4829 	if (*dev->dev_ops->dev_led_off == NULL)
4830 		return -ENOTSUP;
4831 	ret = eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4832 
4833 	rte_eth_trace_led_off(port_id, ret);
4834 
4835 	return ret;
4836 }
4837 
4838 int
4839 rte_eth_fec_get_capability(uint16_t port_id,
4840 			   struct rte_eth_fec_capa *speed_fec_capa,
4841 			   unsigned int num)
4842 {
4843 	struct rte_eth_dev *dev;
4844 	int ret;
4845 
4846 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4847 	dev = &rte_eth_devices[port_id];
4848 
4849 	if (speed_fec_capa == NULL && num > 0) {
4850 		RTE_ETHDEV_LOG(ERR,
4851 			"Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4852 			port_id);
4853 		return -EINVAL;
4854 	}
4855 
4856 	if (*dev->dev_ops->fec_get_capability == NULL)
4857 		return -ENOTSUP;
4858 	ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4859 
4860 	rte_eth_trace_fec_get_capability(port_id, speed_fec_capa, num, ret);
4861 
4862 	return ret;
4863 }
4864 
4865 int
4866 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4867 {
4868 	struct rte_eth_dev *dev;
4869 	int ret;
4870 
4871 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4872 	dev = &rte_eth_devices[port_id];
4873 
4874 	if (fec_capa == NULL) {
4875 		RTE_ETHDEV_LOG(ERR,
4876 			"Cannot get ethdev port %u current FEC mode to NULL\n",
4877 			port_id);
4878 		return -EINVAL;
4879 	}
4880 
4881 	if (*dev->dev_ops->fec_get == NULL)
4882 		return -ENOTSUP;
4883 	ret = eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4884 
4885 	rte_eth_trace_fec_get(port_id, fec_capa, ret);
4886 
4887 	return ret;
4888 }
4889 
4890 int
4891 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4892 {
4893 	struct rte_eth_dev *dev;
4894 	int ret;
4895 
4896 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4897 	dev = &rte_eth_devices[port_id];
4898 
4899 	if (fec_capa == 0) {
4900 		RTE_ETHDEV_LOG(ERR, "At least one FEC mode should be specified\n");
4901 		return -EINVAL;
4902 	}
4903 
4904 	if (*dev->dev_ops->fec_set == NULL)
4905 		return -ENOTSUP;
4906 	ret = eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4907 
4908 	rte_eth_trace_fec_set(port_id, fec_capa, ret);
4909 
4910 	return ret;
4911 }
4912 
4913 /*
4914  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4915  * an empty spot.
4916  */
4917 static int
4918 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4919 {
4920 	struct rte_eth_dev_info dev_info;
4921 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4922 	unsigned i;
4923 	int ret;
4924 
4925 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4926 	if (ret != 0)
4927 		return -1;
4928 
4929 	for (i = 0; i < dev_info.max_mac_addrs; i++)
4930 		if (memcmp(addr, &dev->data->mac_addrs[i],
4931 				RTE_ETHER_ADDR_LEN) == 0)
4932 			return i;
4933 
4934 	return -1;
4935 }
4936 
4937 static const struct rte_ether_addr null_mac_addr;
4938 
4939 int
4940 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4941 			uint32_t pool)
4942 {
4943 	struct rte_eth_dev *dev;
4944 	int index;
4945 	uint64_t pool_mask;
4946 	int ret;
4947 
4948 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4949 	dev = &rte_eth_devices[port_id];
4950 
4951 	if (addr == NULL) {
4952 		RTE_ETHDEV_LOG(ERR,
4953 			"Cannot add ethdev port %u MAC address from NULL address\n",
4954 			port_id);
4955 		return -EINVAL;
4956 	}
4957 
4958 	if (*dev->dev_ops->mac_addr_add == NULL)
4959 		return -ENOTSUP;
4960 
4961 	if (rte_is_zero_ether_addr(addr)) {
4962 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4963 			port_id);
4964 		return -EINVAL;
4965 	}
4966 	if (pool >= RTE_ETH_64_POOLS) {
4967 		RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1);
4968 		return -EINVAL;
4969 	}
4970 
4971 	index = eth_dev_get_mac_addr_index(port_id, addr);
4972 	if (index < 0) {
4973 		index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4974 		if (index < 0) {
4975 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4976 				port_id);
4977 			return -ENOSPC;
4978 		}
4979 	} else {
4980 		pool_mask = dev->data->mac_pool_sel[index];
4981 
4982 		/* Check if both MAC address and pool is already there, and do nothing */
4983 		if (pool_mask & RTE_BIT64(pool))
4984 			return 0;
4985 	}
4986 
4987 	/* Update NIC */
4988 	ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4989 
4990 	if (ret == 0) {
4991 		/* Update address in NIC data structure */
4992 		rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4993 
4994 		/* Update pool bitmap in NIC data structure */
4995 		dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
4996 	}
4997 
4998 	ret = eth_err(port_id, ret);
4999 
5000 	rte_ethdev_trace_mac_addr_add(port_id, addr, pool, ret);
5001 
5002 	return ret;
5003 }
5004 
5005 int
5006 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
5007 {
5008 	struct rte_eth_dev *dev;
5009 	int index;
5010 
5011 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5012 	dev = &rte_eth_devices[port_id];
5013 
5014 	if (addr == NULL) {
5015 		RTE_ETHDEV_LOG(ERR,
5016 			"Cannot remove ethdev port %u MAC address from NULL address\n",
5017 			port_id);
5018 		return -EINVAL;
5019 	}
5020 
5021 	if (*dev->dev_ops->mac_addr_remove == NULL)
5022 		return -ENOTSUP;
5023 
5024 	index = eth_dev_get_mac_addr_index(port_id, addr);
5025 	if (index == 0) {
5026 		RTE_ETHDEV_LOG(ERR,
5027 			"Port %u: Cannot remove default MAC address\n",
5028 			port_id);
5029 		return -EADDRINUSE;
5030 	} else if (index < 0)
5031 		return 0;  /* Do nothing if address wasn't found */
5032 
5033 	/* Update NIC */
5034 	(*dev->dev_ops->mac_addr_remove)(dev, index);
5035 
5036 	/* Update address in NIC data structure */
5037 	rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
5038 
5039 	/* reset pool bitmap */
5040 	dev->data->mac_pool_sel[index] = 0;
5041 
5042 	rte_ethdev_trace_mac_addr_remove(port_id, addr);
5043 
5044 	return 0;
5045 }
5046 
5047 int
5048 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
5049 {
5050 	struct rte_eth_dev *dev;
5051 	int index;
5052 	int ret;
5053 
5054 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5055 	dev = &rte_eth_devices[port_id];
5056 
5057 	if (addr == NULL) {
5058 		RTE_ETHDEV_LOG(ERR,
5059 			"Cannot set ethdev port %u default MAC address from NULL address\n",
5060 			port_id);
5061 		return -EINVAL;
5062 	}
5063 
5064 	if (!rte_is_valid_assigned_ether_addr(addr))
5065 		return -EINVAL;
5066 
5067 	if (*dev->dev_ops->mac_addr_set == NULL)
5068 		return -ENOTSUP;
5069 
5070 	/* Keep address unique in dev->data->mac_addrs[]. */
5071 	index = eth_dev_get_mac_addr_index(port_id, addr);
5072 	if (index > 0) {
5073 		RTE_ETHDEV_LOG(ERR,
5074 			"New default address for port %u was already in the address list. Please remove it first.\n",
5075 			port_id);
5076 		return -EEXIST;
5077 	}
5078 
5079 	ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
5080 	if (ret < 0)
5081 		return ret;
5082 
5083 	/* Update default address in NIC data structure */
5084 	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
5085 
5086 	rte_ethdev_trace_default_mac_addr_set(port_id, addr);
5087 
5088 	return 0;
5089 }
5090 
5091 
5092 /*
5093  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
5094  * an empty spot.
5095  */
5096 static int
5097 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
5098 		const struct rte_ether_addr *addr)
5099 {
5100 	struct rte_eth_dev_info dev_info;
5101 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5102 	unsigned i;
5103 	int ret;
5104 
5105 	ret = rte_eth_dev_info_get(port_id, &dev_info);
5106 	if (ret != 0)
5107 		return -1;
5108 
5109 	if (!dev->data->hash_mac_addrs)
5110 		return -1;
5111 
5112 	for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
5113 		if (memcmp(addr, &dev->data->hash_mac_addrs[i],
5114 			RTE_ETHER_ADDR_LEN) == 0)
5115 			return i;
5116 
5117 	return -1;
5118 }
5119 
5120 int
5121 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
5122 				uint8_t on)
5123 {
5124 	int index;
5125 	int ret;
5126 	struct rte_eth_dev *dev;
5127 
5128 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5129 	dev = &rte_eth_devices[port_id];
5130 
5131 	if (addr == NULL) {
5132 		RTE_ETHDEV_LOG(ERR,
5133 			"Cannot set ethdev port %u unicast hash table from NULL address\n",
5134 			port_id);
5135 		return -EINVAL;
5136 	}
5137 
5138 	if (rte_is_zero_ether_addr(addr)) {
5139 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
5140 			port_id);
5141 		return -EINVAL;
5142 	}
5143 
5144 	index = eth_dev_get_hash_mac_addr_index(port_id, addr);
5145 	/* Check if it's already there, and do nothing */
5146 	if ((index >= 0) && on)
5147 		return 0;
5148 
5149 	if (index < 0) {
5150 		if (!on) {
5151 			RTE_ETHDEV_LOG(ERR,
5152 				"Port %u: the MAC address was not set in UTA\n",
5153 				port_id);
5154 			return -EINVAL;
5155 		}
5156 
5157 		index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
5158 		if (index < 0) {
5159 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
5160 				port_id);
5161 			return -ENOSPC;
5162 		}
5163 	}
5164 
5165 	if (*dev->dev_ops->uc_hash_table_set == NULL)
5166 		return -ENOTSUP;
5167 	ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
5168 	if (ret == 0) {
5169 		/* Update address in NIC data structure */
5170 		if (on)
5171 			rte_ether_addr_copy(addr,
5172 					&dev->data->hash_mac_addrs[index]);
5173 		else
5174 			rte_ether_addr_copy(&null_mac_addr,
5175 					&dev->data->hash_mac_addrs[index]);
5176 	}
5177 
5178 	ret = eth_err(port_id, ret);
5179 
5180 	rte_ethdev_trace_uc_hash_table_set(port_id, on, ret);
5181 
5182 	return ret;
5183 }
5184 
5185 int
5186 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
5187 {
5188 	struct rte_eth_dev *dev;
5189 	int ret;
5190 
5191 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5192 	dev = &rte_eth_devices[port_id];
5193 
5194 	if (*dev->dev_ops->uc_all_hash_table_set == NULL)
5195 		return -ENOTSUP;
5196 	ret = eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, on));
5197 
5198 	rte_ethdev_trace_uc_all_hash_table_set(port_id, on, ret);
5199 
5200 	return ret;
5201 }
5202 
5203 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
5204 					uint32_t tx_rate)
5205 {
5206 	struct rte_eth_dev *dev;
5207 	struct rte_eth_dev_info dev_info;
5208 	struct rte_eth_link link;
5209 	int ret;
5210 
5211 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5212 	dev = &rte_eth_devices[port_id];
5213 
5214 	ret = rte_eth_dev_info_get(port_id, &dev_info);
5215 	if (ret != 0)
5216 		return ret;
5217 
5218 	link = dev->data->dev_link;
5219 
5220 	if (queue_idx > dev_info.max_tx_queues) {
5221 		RTE_ETHDEV_LOG(ERR,
5222 			"Set queue rate limit:port %u: invalid queue ID=%u\n",
5223 			port_id, queue_idx);
5224 		return -EINVAL;
5225 	}
5226 
5227 	if (tx_rate > link.link_speed) {
5228 		RTE_ETHDEV_LOG(ERR,
5229 			"Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
5230 			tx_rate, link.link_speed);
5231 		return -EINVAL;
5232 	}
5233 
5234 	if (*dev->dev_ops->set_queue_rate_limit == NULL)
5235 		return -ENOTSUP;
5236 	ret = eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
5237 							queue_idx, tx_rate));
5238 
5239 	rte_eth_trace_set_queue_rate_limit(port_id, queue_idx, tx_rate, ret);
5240 
5241 	return ret;
5242 }
5243 
5244 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
5245 			       uint8_t avail_thresh)
5246 {
5247 	struct rte_eth_dev *dev;
5248 	int ret;
5249 
5250 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5251 	dev = &rte_eth_devices[port_id];
5252 
5253 	if (queue_id > dev->data->nb_rx_queues) {
5254 		RTE_ETHDEV_LOG(ERR,
5255 			"Set queue avail thresh: port %u: invalid queue ID=%u.\n",
5256 			port_id, queue_id);
5257 		return -EINVAL;
5258 	}
5259 
5260 	if (avail_thresh > 99) {
5261 		RTE_ETHDEV_LOG(ERR,
5262 			"Set queue avail thresh: port %u: threshold should be <= 99.\n",
5263 			port_id);
5264 		return -EINVAL;
5265 	}
5266 	if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL)
5267 		return -ENOTSUP;
5268 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev,
5269 							     queue_id, avail_thresh));
5270 
5271 	rte_eth_trace_rx_avail_thresh_set(port_id, queue_id, avail_thresh, ret);
5272 
5273 	return ret;
5274 }
5275 
5276 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
5277 				 uint8_t *avail_thresh)
5278 {
5279 	struct rte_eth_dev *dev;
5280 	int ret;
5281 
5282 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5283 	dev = &rte_eth_devices[port_id];
5284 
5285 	if (queue_id == NULL)
5286 		return -EINVAL;
5287 	if (*queue_id >= dev->data->nb_rx_queues)
5288 		*queue_id = 0;
5289 
5290 	if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL)
5291 		return -ENOTSUP;
5292 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev,
5293 							     queue_id, avail_thresh));
5294 
5295 	rte_eth_trace_rx_avail_thresh_query(port_id, *queue_id, ret);
5296 
5297 	return ret;
5298 }
5299 
5300 RTE_INIT(eth_dev_init_fp_ops)
5301 {
5302 	uint32_t i;
5303 
5304 	for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++)
5305 		eth_dev_fp_ops_reset(rte_eth_fp_ops + i);
5306 }
5307 
5308 RTE_INIT(eth_dev_init_cb_lists)
5309 {
5310 	uint16_t i;
5311 
5312 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
5313 		TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
5314 }
5315 
5316 int
5317 rte_eth_dev_callback_register(uint16_t port_id,
5318 			enum rte_eth_event_type event,
5319 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
5320 {
5321 	struct rte_eth_dev *dev;
5322 	struct rte_eth_dev_callback *user_cb;
5323 	uint16_t next_port;
5324 	uint16_t last_port;
5325 
5326 	if (cb_fn == NULL) {
5327 		RTE_ETHDEV_LOG(ERR,
5328 			"Cannot register ethdev port %u callback from NULL\n",
5329 			port_id);
5330 		return -EINVAL;
5331 	}
5332 
5333 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
5334 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
5335 		return -EINVAL;
5336 	}
5337 
5338 	if (port_id == RTE_ETH_ALL) {
5339 		next_port = 0;
5340 		last_port = RTE_MAX_ETHPORTS - 1;
5341 	} else {
5342 		next_port = last_port = port_id;
5343 	}
5344 
5345 	rte_spinlock_lock(&eth_dev_cb_lock);
5346 
5347 	do {
5348 		dev = &rte_eth_devices[next_port];
5349 
5350 		TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
5351 			if (user_cb->cb_fn == cb_fn &&
5352 				user_cb->cb_arg == cb_arg &&
5353 				user_cb->event == event) {
5354 				break;
5355 			}
5356 		}
5357 
5358 		/* create a new callback. */
5359 		if (user_cb == NULL) {
5360 			user_cb = rte_zmalloc("INTR_USER_CALLBACK",
5361 				sizeof(struct rte_eth_dev_callback), 0);
5362 			if (user_cb != NULL) {
5363 				user_cb->cb_fn = cb_fn;
5364 				user_cb->cb_arg = cb_arg;
5365 				user_cb->event = event;
5366 				TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
5367 						  user_cb, next);
5368 			} else {
5369 				rte_spinlock_unlock(&eth_dev_cb_lock);
5370 				rte_eth_dev_callback_unregister(port_id, event,
5371 								cb_fn, cb_arg);
5372 				return -ENOMEM;
5373 			}
5374 
5375 		}
5376 	} while (++next_port <= last_port);
5377 
5378 	rte_spinlock_unlock(&eth_dev_cb_lock);
5379 
5380 	rte_ethdev_trace_callback_register(port_id, event, cb_fn, cb_arg);
5381 
5382 	return 0;
5383 }
5384 
5385 int
5386 rte_eth_dev_callback_unregister(uint16_t port_id,
5387 			enum rte_eth_event_type event,
5388 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
5389 {
5390 	int ret;
5391 	struct rte_eth_dev *dev;
5392 	struct rte_eth_dev_callback *cb, *next;
5393 	uint16_t next_port;
5394 	uint16_t last_port;
5395 
5396 	if (cb_fn == NULL) {
5397 		RTE_ETHDEV_LOG(ERR,
5398 			"Cannot unregister ethdev port %u callback from NULL\n",
5399 			port_id);
5400 		return -EINVAL;
5401 	}
5402 
5403 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
5404 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
5405 		return -EINVAL;
5406 	}
5407 
5408 	if (port_id == RTE_ETH_ALL) {
5409 		next_port = 0;
5410 		last_port = RTE_MAX_ETHPORTS - 1;
5411 	} else {
5412 		next_port = last_port = port_id;
5413 	}
5414 
5415 	rte_spinlock_lock(&eth_dev_cb_lock);
5416 
5417 	do {
5418 		dev = &rte_eth_devices[next_port];
5419 		ret = 0;
5420 		for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
5421 		     cb = next) {
5422 
5423 			next = TAILQ_NEXT(cb, next);
5424 
5425 			if (cb->cb_fn != cb_fn || cb->event != event ||
5426 			    (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
5427 				continue;
5428 
5429 			/*
5430 			 * if this callback is not executing right now,
5431 			 * then remove it.
5432 			 */
5433 			if (cb->active == 0) {
5434 				TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
5435 				rte_free(cb);
5436 			} else {
5437 				ret = -EAGAIN;
5438 			}
5439 		}
5440 	} while (++next_port <= last_port);
5441 
5442 	rte_spinlock_unlock(&eth_dev_cb_lock);
5443 
5444 	rte_ethdev_trace_callback_unregister(port_id, event, cb_fn, cb_arg,
5445 					     ret);
5446 
5447 	return ret;
5448 }
5449 
5450 int
5451 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
5452 {
5453 	uint32_t vec;
5454 	struct rte_eth_dev *dev;
5455 	struct rte_intr_handle *intr_handle;
5456 	uint16_t qid;
5457 	int rc;
5458 
5459 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5460 	dev = &rte_eth_devices[port_id];
5461 
5462 	if (!dev->intr_handle) {
5463 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
5464 		return -ENOTSUP;
5465 	}
5466 
5467 	intr_handle = dev->intr_handle;
5468 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
5469 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
5470 		return -EPERM;
5471 	}
5472 
5473 	for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
5474 		vec = rte_intr_vec_list_index_get(intr_handle, qid);
5475 		rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
5476 
5477 		rte_ethdev_trace_rx_intr_ctl(port_id, qid, epfd, op, data, rc);
5478 
5479 		if (rc && rc != -EEXIST) {
5480 			RTE_ETHDEV_LOG(ERR,
5481 				"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
5482 				port_id, qid, op, epfd, vec);
5483 		}
5484 	}
5485 
5486 	return 0;
5487 }
5488 
5489 int
5490 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
5491 {
5492 	struct rte_intr_handle *intr_handle;
5493 	struct rte_eth_dev *dev;
5494 	unsigned int efd_idx;
5495 	uint32_t vec;
5496 	int fd;
5497 
5498 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
5499 	dev = &rte_eth_devices[port_id];
5500 
5501 	if (queue_id >= dev->data->nb_rx_queues) {
5502 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5503 		return -1;
5504 	}
5505 
5506 	if (!dev->intr_handle) {
5507 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
5508 		return -1;
5509 	}
5510 
5511 	intr_handle = dev->intr_handle;
5512 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
5513 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
5514 		return -1;
5515 	}
5516 
5517 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
5518 	efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
5519 		(vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
5520 	fd = rte_intr_efds_index_get(intr_handle, efd_idx);
5521 
5522 	rte_ethdev_trace_rx_intr_ctl_q_get_fd(port_id, queue_id, fd);
5523 
5524 	return fd;
5525 }
5526 
5527 int
5528 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
5529 			  int epfd, int op, void *data)
5530 {
5531 	uint32_t vec;
5532 	struct rte_eth_dev *dev;
5533 	struct rte_intr_handle *intr_handle;
5534 	int rc;
5535 
5536 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5537 	dev = &rte_eth_devices[port_id];
5538 
5539 	if (queue_id >= dev->data->nb_rx_queues) {
5540 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5541 		return -EINVAL;
5542 	}
5543 
5544 	if (!dev->intr_handle) {
5545 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
5546 		return -ENOTSUP;
5547 	}
5548 
5549 	intr_handle = dev->intr_handle;
5550 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
5551 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
5552 		return -EPERM;
5553 	}
5554 
5555 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
5556 	rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
5557 
5558 	rte_ethdev_trace_rx_intr_ctl_q(port_id, queue_id, epfd, op, data, rc);
5559 
5560 	if (rc && rc != -EEXIST) {
5561 		RTE_ETHDEV_LOG(ERR,
5562 			"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
5563 			port_id, queue_id, op, epfd, vec);
5564 		return rc;
5565 	}
5566 
5567 	return 0;
5568 }
5569 
5570 int
5571 rte_eth_dev_rx_intr_enable(uint16_t port_id,
5572 			   uint16_t queue_id)
5573 {
5574 	struct rte_eth_dev *dev;
5575 	int ret;
5576 
5577 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5578 	dev = &rte_eth_devices[port_id];
5579 
5580 	ret = eth_dev_validate_rx_queue(dev, queue_id);
5581 	if (ret != 0)
5582 		return ret;
5583 
5584 	if (*dev->dev_ops->rx_queue_intr_enable == NULL)
5585 		return -ENOTSUP;
5586 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
5587 
5588 	rte_ethdev_trace_rx_intr_enable(port_id, queue_id, ret);
5589 
5590 	return ret;
5591 }
5592 
5593 int
5594 rte_eth_dev_rx_intr_disable(uint16_t port_id,
5595 			    uint16_t queue_id)
5596 {
5597 	struct rte_eth_dev *dev;
5598 	int ret;
5599 
5600 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5601 	dev = &rte_eth_devices[port_id];
5602 
5603 	ret = eth_dev_validate_rx_queue(dev, queue_id);
5604 	if (ret != 0)
5605 		return ret;
5606 
5607 	if (*dev->dev_ops->rx_queue_intr_disable == NULL)
5608 		return -ENOTSUP;
5609 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
5610 
5611 	rte_ethdev_trace_rx_intr_disable(port_id, queue_id, ret);
5612 
5613 	return ret;
5614 }
5615 
5616 
5617 const struct rte_eth_rxtx_callback *
5618 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
5619 		rte_rx_callback_fn fn, void *user_param)
5620 {
5621 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5622 	rte_errno = ENOTSUP;
5623 	return NULL;
5624 #endif
5625 	struct rte_eth_dev *dev;
5626 
5627 	/* check input parameters */
5628 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5629 		    queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5630 		rte_errno = EINVAL;
5631 		return NULL;
5632 	}
5633 	dev = &rte_eth_devices[port_id];
5634 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5635 		rte_errno = EINVAL;
5636 		return NULL;
5637 	}
5638 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5639 
5640 	if (cb == NULL) {
5641 		rte_errno = ENOMEM;
5642 		return NULL;
5643 	}
5644 
5645 	cb->fn.rx = fn;
5646 	cb->param = user_param;
5647 
5648 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
5649 	/* Add the callbacks in fifo order. */
5650 	struct rte_eth_rxtx_callback *tail =
5651 		rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5652 
5653 	if (!tail) {
5654 		/* Stores to cb->fn and cb->param should complete before
5655 		 * cb is visible to data plane.
5656 		 */
5657 		rte_atomic_store_explicit(
5658 			&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5659 			cb, rte_memory_order_release);
5660 
5661 	} else {
5662 		while (tail->next)
5663 			tail = tail->next;
5664 		/* Stores to cb->fn and cb->param should complete before
5665 		 * cb is visible to data plane.
5666 		 */
5667 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
5668 	}
5669 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5670 
5671 	rte_eth_trace_add_rx_callback(port_id, queue_id, fn, user_param, cb);
5672 
5673 	return cb;
5674 }
5675 
5676 const struct rte_eth_rxtx_callback *
5677 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
5678 		rte_rx_callback_fn fn, void *user_param)
5679 {
5680 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5681 	rte_errno = ENOTSUP;
5682 	return NULL;
5683 #endif
5684 	/* check input parameters */
5685 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5686 		queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5687 		rte_errno = EINVAL;
5688 		return NULL;
5689 	}
5690 
5691 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5692 
5693 	if (cb == NULL) {
5694 		rte_errno = ENOMEM;
5695 		return NULL;
5696 	}
5697 
5698 	cb->fn.rx = fn;
5699 	cb->param = user_param;
5700 
5701 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
5702 	/* Add the callbacks at first position */
5703 	cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5704 	/* Stores to cb->fn, cb->param and cb->next should complete before
5705 	 * cb is visible to data plane threads.
5706 	 */
5707 	rte_atomic_store_explicit(
5708 		&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5709 		cb, rte_memory_order_release);
5710 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5711 
5712 	rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param,
5713 					    cb);
5714 
5715 	return cb;
5716 }
5717 
5718 const struct rte_eth_rxtx_callback *
5719 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
5720 		rte_tx_callback_fn fn, void *user_param)
5721 {
5722 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5723 	rte_errno = ENOTSUP;
5724 	return NULL;
5725 #endif
5726 	struct rte_eth_dev *dev;
5727 
5728 	/* check input parameters */
5729 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5730 		    queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
5731 		rte_errno = EINVAL;
5732 		return NULL;
5733 	}
5734 
5735 	dev = &rte_eth_devices[port_id];
5736 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5737 		rte_errno = EINVAL;
5738 		return NULL;
5739 	}
5740 
5741 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5742 
5743 	if (cb == NULL) {
5744 		rte_errno = ENOMEM;
5745 		return NULL;
5746 	}
5747 
5748 	cb->fn.tx = fn;
5749 	cb->param = user_param;
5750 
5751 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
5752 	/* Add the callbacks in fifo order. */
5753 	struct rte_eth_rxtx_callback *tail =
5754 		rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
5755 
5756 	if (!tail) {
5757 		/* Stores to cb->fn and cb->param should complete before
5758 		 * cb is visible to data plane.
5759 		 */
5760 		rte_atomic_store_explicit(
5761 			&rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
5762 			cb, rte_memory_order_release);
5763 
5764 	} else {
5765 		while (tail->next)
5766 			tail = tail->next;
5767 		/* Stores to cb->fn and cb->param should complete before
5768 		 * cb is visible to data plane.
5769 		 */
5770 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
5771 	}
5772 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5773 
5774 	rte_eth_trace_add_tx_callback(port_id, queue_id, fn, user_param, cb);
5775 
5776 	return cb;
5777 }
5778 
5779 int
5780 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
5781 		const struct rte_eth_rxtx_callback *user_cb)
5782 {
5783 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5784 	return -ENOTSUP;
5785 #endif
5786 	/* Check input parameters. */
5787 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5788 	if (user_cb == NULL ||
5789 			queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
5790 		return -EINVAL;
5791 
5792 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5793 	struct rte_eth_rxtx_callback *cb;
5794 	RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb;
5795 	int ret = -EINVAL;
5796 
5797 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
5798 	prev_cb = &dev->post_rx_burst_cbs[queue_id];
5799 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
5800 		cb = *prev_cb;
5801 		if (cb == user_cb) {
5802 			/* Remove the user cb from the callback list. */
5803 			rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed);
5804 			ret = 0;
5805 			break;
5806 		}
5807 	}
5808 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5809 
5810 	rte_eth_trace_remove_rx_callback(port_id, queue_id, user_cb, ret);
5811 
5812 	return ret;
5813 }
5814 
5815 int
5816 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5817 		const struct rte_eth_rxtx_callback *user_cb)
5818 {
5819 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5820 	return -ENOTSUP;
5821 #endif
5822 	/* Check input parameters. */
5823 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5824 	if (user_cb == NULL ||
5825 			queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
5826 		return -EINVAL;
5827 
5828 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5829 	int ret = -EINVAL;
5830 	struct rte_eth_rxtx_callback *cb;
5831 	RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb;
5832 
5833 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
5834 	prev_cb = &dev->pre_tx_burst_cbs[queue_id];
5835 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
5836 		cb = *prev_cb;
5837 		if (cb == user_cb) {
5838 			/* Remove the user cb from the callback list. */
5839 			rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed);
5840 			ret = 0;
5841 			break;
5842 		}
5843 	}
5844 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5845 
5846 	rte_eth_trace_remove_tx_callback(port_id, queue_id, user_cb, ret);
5847 
5848 	return ret;
5849 }
5850 
5851 int
5852 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5853 	struct rte_eth_rxq_info *qinfo)
5854 {
5855 	struct rte_eth_dev *dev;
5856 
5857 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5858 	dev = &rte_eth_devices[port_id];
5859 
5860 	if (queue_id >= dev->data->nb_rx_queues) {
5861 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5862 		return -EINVAL;
5863 	}
5864 
5865 	if (qinfo == NULL) {
5866 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
5867 			port_id, queue_id);
5868 		return -EINVAL;
5869 	}
5870 
5871 	if (dev->data->rx_queues == NULL ||
5872 			dev->data->rx_queues[queue_id] == NULL) {
5873 		RTE_ETHDEV_LOG(ERR,
5874 			       "Rx queue %"PRIu16" of device with port_id=%"
5875 			       PRIu16" has not been setup\n",
5876 			       queue_id, port_id);
5877 		return -EINVAL;
5878 	}
5879 
5880 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5881 		RTE_ETHDEV_LOG(INFO,
5882 			"Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5883 			queue_id, port_id);
5884 		return -EINVAL;
5885 	}
5886 
5887 	if (*dev->dev_ops->rxq_info_get == NULL)
5888 		return -ENOTSUP;
5889 
5890 	memset(qinfo, 0, sizeof(*qinfo));
5891 	dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5892 	qinfo->queue_state = dev->data->rx_queue_state[queue_id];
5893 
5894 	rte_eth_trace_rx_queue_info_get(port_id, queue_id, qinfo);
5895 
5896 	return 0;
5897 }
5898 
5899 int
5900 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5901 	struct rte_eth_txq_info *qinfo)
5902 {
5903 	struct rte_eth_dev *dev;
5904 
5905 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5906 	dev = &rte_eth_devices[port_id];
5907 
5908 	if (queue_id >= dev->data->nb_tx_queues) {
5909 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5910 		return -EINVAL;
5911 	}
5912 
5913 	if (qinfo == NULL) {
5914 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
5915 			port_id, queue_id);
5916 		return -EINVAL;
5917 	}
5918 
5919 	if (dev->data->tx_queues == NULL ||
5920 			dev->data->tx_queues[queue_id] == NULL) {
5921 		RTE_ETHDEV_LOG(ERR,
5922 			       "Tx queue %"PRIu16" of device with port_id=%"
5923 			       PRIu16" has not been setup\n",
5924 			       queue_id, port_id);
5925 		return -EINVAL;
5926 	}
5927 
5928 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5929 		RTE_ETHDEV_LOG(INFO,
5930 			"Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5931 			queue_id, port_id);
5932 		return -EINVAL;
5933 	}
5934 
5935 	if (*dev->dev_ops->txq_info_get == NULL)
5936 		return -ENOTSUP;
5937 
5938 	memset(qinfo, 0, sizeof(*qinfo));
5939 	dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5940 	qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5941 
5942 	rte_eth_trace_tx_queue_info_get(port_id, queue_id, qinfo);
5943 
5944 	return 0;
5945 }
5946 
5947 int
5948 rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5949 		struct rte_eth_recycle_rxq_info *recycle_rxq_info)
5950 {
5951 	struct rte_eth_dev *dev;
5952 	int ret;
5953 
5954 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5955 	dev = &rte_eth_devices[port_id];
5956 
5957 	ret = eth_dev_validate_rx_queue(dev, queue_id);
5958 	if (unlikely(ret != 0))
5959 		return ret;
5960 
5961 	if (*dev->dev_ops->recycle_rxq_info_get == NULL)
5962 		return -ENOTSUP;
5963 
5964 	dev->dev_ops->recycle_rxq_info_get(dev, queue_id, recycle_rxq_info);
5965 
5966 	return 0;
5967 }
5968 
5969 int
5970 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5971 			  struct rte_eth_burst_mode *mode)
5972 {
5973 	struct rte_eth_dev *dev;
5974 	int ret;
5975 
5976 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5977 	dev = &rte_eth_devices[port_id];
5978 
5979 	if (queue_id >= dev->data->nb_rx_queues) {
5980 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5981 		return -EINVAL;
5982 	}
5983 
5984 	if (mode == NULL) {
5985 		RTE_ETHDEV_LOG(ERR,
5986 			"Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5987 			port_id, queue_id);
5988 		return -EINVAL;
5989 	}
5990 
5991 	if (*dev->dev_ops->rx_burst_mode_get == NULL)
5992 		return -ENOTSUP;
5993 	memset(mode, 0, sizeof(*mode));
5994 	ret = eth_err(port_id,
5995 		      dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5996 
5997 	rte_eth_trace_rx_burst_mode_get(port_id, queue_id, mode, ret);
5998 
5999 	return ret;
6000 }
6001 
6002 int
6003 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
6004 			  struct rte_eth_burst_mode *mode)
6005 {
6006 	struct rte_eth_dev *dev;
6007 	int ret;
6008 
6009 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6010 	dev = &rte_eth_devices[port_id];
6011 
6012 	if (queue_id >= dev->data->nb_tx_queues) {
6013 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
6014 		return -EINVAL;
6015 	}
6016 
6017 	if (mode == NULL) {
6018 		RTE_ETHDEV_LOG(ERR,
6019 			"Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
6020 			port_id, queue_id);
6021 		return -EINVAL;
6022 	}
6023 
6024 	if (*dev->dev_ops->tx_burst_mode_get == NULL)
6025 		return -ENOTSUP;
6026 	memset(mode, 0, sizeof(*mode));
6027 	ret = eth_err(port_id,
6028 		      dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
6029 
6030 	rte_eth_trace_tx_burst_mode_get(port_id, queue_id, mode, ret);
6031 
6032 	return ret;
6033 }
6034 
6035 int
6036 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
6037 		struct rte_power_monitor_cond *pmc)
6038 {
6039 	struct rte_eth_dev *dev;
6040 	int ret;
6041 
6042 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6043 	dev = &rte_eth_devices[port_id];
6044 
6045 	if (queue_id >= dev->data->nb_rx_queues) {
6046 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
6047 		return -EINVAL;
6048 	}
6049 
6050 	if (pmc == NULL) {
6051 		RTE_ETHDEV_LOG(ERR,
6052 			"Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
6053 			port_id, queue_id);
6054 		return -EINVAL;
6055 	}
6056 
6057 	if (*dev->dev_ops->get_monitor_addr == NULL)
6058 		return -ENOTSUP;
6059 	ret = eth_err(port_id,
6060 		dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
6061 
6062 	rte_eth_trace_get_monitor_addr(port_id, queue_id, pmc, ret);
6063 
6064 	return ret;
6065 }
6066 
6067 int
6068 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
6069 			     struct rte_ether_addr *mc_addr_set,
6070 			     uint32_t nb_mc_addr)
6071 {
6072 	struct rte_eth_dev *dev;
6073 	int ret;
6074 
6075 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6076 	dev = &rte_eth_devices[port_id];
6077 
6078 	if (*dev->dev_ops->set_mc_addr_list == NULL)
6079 		return -ENOTSUP;
6080 	ret = eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
6081 						mc_addr_set, nb_mc_addr));
6082 
6083 	rte_ethdev_trace_set_mc_addr_list(port_id, mc_addr_set, nb_mc_addr,
6084 					  ret);
6085 
6086 	return ret;
6087 }
6088 
6089 int
6090 rte_eth_timesync_enable(uint16_t port_id)
6091 {
6092 	struct rte_eth_dev *dev;
6093 	int ret;
6094 
6095 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6096 	dev = &rte_eth_devices[port_id];
6097 
6098 	if (*dev->dev_ops->timesync_enable == NULL)
6099 		return -ENOTSUP;
6100 	ret = eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
6101 
6102 	rte_eth_trace_timesync_enable(port_id, ret);
6103 
6104 	return ret;
6105 }
6106 
6107 int
6108 rte_eth_timesync_disable(uint16_t port_id)
6109 {
6110 	struct rte_eth_dev *dev;
6111 	int ret;
6112 
6113 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6114 	dev = &rte_eth_devices[port_id];
6115 
6116 	if (*dev->dev_ops->timesync_disable == NULL)
6117 		return -ENOTSUP;
6118 	ret = eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
6119 
6120 	rte_eth_trace_timesync_disable(port_id, ret);
6121 
6122 	return ret;
6123 }
6124 
6125 int
6126 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
6127 				   uint32_t flags)
6128 {
6129 	struct rte_eth_dev *dev;
6130 	int ret;
6131 
6132 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6133 	dev = &rte_eth_devices[port_id];
6134 
6135 	if (timestamp == NULL) {
6136 		RTE_ETHDEV_LOG(ERR,
6137 			"Cannot read ethdev port %u Rx timestamp to NULL\n",
6138 			port_id);
6139 		return -EINVAL;
6140 	}
6141 
6142 	if (*dev->dev_ops->timesync_read_rx_timestamp == NULL)
6143 		return -ENOTSUP;
6144 
6145 	ret = eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
6146 			       (dev, timestamp, flags));
6147 
6148 	rte_eth_trace_timesync_read_rx_timestamp(port_id, timestamp, flags,
6149 						 ret);
6150 
6151 	return ret;
6152 }
6153 
6154 int
6155 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
6156 				   struct timespec *timestamp)
6157 {
6158 	struct rte_eth_dev *dev;
6159 	int ret;
6160 
6161 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6162 	dev = &rte_eth_devices[port_id];
6163 
6164 	if (timestamp == NULL) {
6165 		RTE_ETHDEV_LOG(ERR,
6166 			"Cannot read ethdev port %u Tx timestamp to NULL\n",
6167 			port_id);
6168 		return -EINVAL;
6169 	}
6170 
6171 	if (*dev->dev_ops->timesync_read_tx_timestamp == NULL)
6172 		return -ENOTSUP;
6173 
6174 	ret = eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
6175 			       (dev, timestamp));
6176 
6177 	rte_eth_trace_timesync_read_tx_timestamp(port_id, timestamp, ret);
6178 
6179 	return ret;
6180 
6181 }
6182 
6183 int
6184 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
6185 {
6186 	struct rte_eth_dev *dev;
6187 	int ret;
6188 
6189 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6190 	dev = &rte_eth_devices[port_id];
6191 
6192 	if (*dev->dev_ops->timesync_adjust_time == NULL)
6193 		return -ENOTSUP;
6194 	ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
6195 
6196 	rte_eth_trace_timesync_adjust_time(port_id, delta, ret);
6197 
6198 	return ret;
6199 }
6200 
6201 int
6202 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
6203 {
6204 	struct rte_eth_dev *dev;
6205 	int ret;
6206 
6207 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6208 	dev = &rte_eth_devices[port_id];
6209 
6210 	if (timestamp == NULL) {
6211 		RTE_ETHDEV_LOG(ERR,
6212 			"Cannot read ethdev port %u timesync time to NULL\n",
6213 			port_id);
6214 		return -EINVAL;
6215 	}
6216 
6217 	if (*dev->dev_ops->timesync_read_time == NULL)
6218 		return -ENOTSUP;
6219 	ret = eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
6220 								timestamp));
6221 
6222 	rte_eth_trace_timesync_read_time(port_id, timestamp, ret);
6223 
6224 	return ret;
6225 }
6226 
6227 int
6228 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
6229 {
6230 	struct rte_eth_dev *dev;
6231 	int ret;
6232 
6233 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6234 	dev = &rte_eth_devices[port_id];
6235 
6236 	if (timestamp == NULL) {
6237 		RTE_ETHDEV_LOG(ERR,
6238 			"Cannot write ethdev port %u timesync from NULL time\n",
6239 			port_id);
6240 		return -EINVAL;
6241 	}
6242 
6243 	if (*dev->dev_ops->timesync_write_time == NULL)
6244 		return -ENOTSUP;
6245 	ret = eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
6246 								timestamp));
6247 
6248 	rte_eth_trace_timesync_write_time(port_id, timestamp, ret);
6249 
6250 	return ret;
6251 }
6252 
6253 int
6254 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
6255 {
6256 	struct rte_eth_dev *dev;
6257 	int ret;
6258 
6259 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6260 	dev = &rte_eth_devices[port_id];
6261 
6262 	if (clock == NULL) {
6263 		RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
6264 			port_id);
6265 		return -EINVAL;
6266 	}
6267 
6268 	if (*dev->dev_ops->read_clock == NULL)
6269 		return -ENOTSUP;
6270 	ret = eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
6271 
6272 	rte_eth_trace_read_clock(port_id, clock, ret);
6273 
6274 	return ret;
6275 }
6276 
6277 int
6278 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
6279 {
6280 	struct rte_eth_dev *dev;
6281 	int ret;
6282 
6283 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6284 	dev = &rte_eth_devices[port_id];
6285 
6286 	if (info == NULL) {
6287 		RTE_ETHDEV_LOG(ERR,
6288 			"Cannot get ethdev port %u register info to NULL\n",
6289 			port_id);
6290 		return -EINVAL;
6291 	}
6292 
6293 	if (*dev->dev_ops->get_reg == NULL)
6294 		return -ENOTSUP;
6295 	ret = eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
6296 
6297 	rte_ethdev_trace_get_reg_info(port_id, info, ret);
6298 
6299 	return ret;
6300 }
6301 
6302 int
6303 rte_eth_dev_get_eeprom_length(uint16_t port_id)
6304 {
6305 	struct rte_eth_dev *dev;
6306 	int ret;
6307 
6308 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6309 	dev = &rte_eth_devices[port_id];
6310 
6311 	if (*dev->dev_ops->get_eeprom_length == NULL)
6312 		return -ENOTSUP;
6313 	ret = eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
6314 
6315 	rte_ethdev_trace_get_eeprom_length(port_id, ret);
6316 
6317 	return ret;
6318 }
6319 
6320 int
6321 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
6322 {
6323 	struct rte_eth_dev *dev;
6324 	int ret;
6325 
6326 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6327 	dev = &rte_eth_devices[port_id];
6328 
6329 	if (info == NULL) {
6330 		RTE_ETHDEV_LOG(ERR,
6331 			"Cannot get ethdev port %u EEPROM info to NULL\n",
6332 			port_id);
6333 		return -EINVAL;
6334 	}
6335 
6336 	if (*dev->dev_ops->get_eeprom == NULL)
6337 		return -ENOTSUP;
6338 	ret = eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
6339 
6340 	rte_ethdev_trace_get_eeprom(port_id, info, ret);
6341 
6342 	return ret;
6343 }
6344 
6345 int
6346 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
6347 {
6348 	struct rte_eth_dev *dev;
6349 	int ret;
6350 
6351 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6352 	dev = &rte_eth_devices[port_id];
6353 
6354 	if (info == NULL) {
6355 		RTE_ETHDEV_LOG(ERR,
6356 			"Cannot set ethdev port %u EEPROM from NULL info\n",
6357 			port_id);
6358 		return -EINVAL;
6359 	}
6360 
6361 	if (*dev->dev_ops->set_eeprom == NULL)
6362 		return -ENOTSUP;
6363 	ret = eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
6364 
6365 	rte_ethdev_trace_set_eeprom(port_id, info, ret);
6366 
6367 	return ret;
6368 }
6369 
6370 int
6371 rte_eth_dev_get_module_info(uint16_t port_id,
6372 			    struct rte_eth_dev_module_info *modinfo)
6373 {
6374 	struct rte_eth_dev *dev;
6375 	int ret;
6376 
6377 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6378 	dev = &rte_eth_devices[port_id];
6379 
6380 	if (modinfo == NULL) {
6381 		RTE_ETHDEV_LOG(ERR,
6382 			"Cannot get ethdev port %u EEPROM module info to NULL\n",
6383 			port_id);
6384 		return -EINVAL;
6385 	}
6386 
6387 	if (*dev->dev_ops->get_module_info == NULL)
6388 		return -ENOTSUP;
6389 	ret = (*dev->dev_ops->get_module_info)(dev, modinfo);
6390 
6391 	rte_ethdev_trace_get_module_info(port_id, modinfo, ret);
6392 
6393 	return ret;
6394 }
6395 
6396 int
6397 rte_eth_dev_get_module_eeprom(uint16_t port_id,
6398 			      struct rte_dev_eeprom_info *info)
6399 {
6400 	struct rte_eth_dev *dev;
6401 	int ret;
6402 
6403 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6404 	dev = &rte_eth_devices[port_id];
6405 
6406 	if (info == NULL) {
6407 		RTE_ETHDEV_LOG(ERR,
6408 			"Cannot get ethdev port %u module EEPROM info to NULL\n",
6409 			port_id);
6410 		return -EINVAL;
6411 	}
6412 
6413 	if (info->data == NULL) {
6414 		RTE_ETHDEV_LOG(ERR,
6415 			"Cannot get ethdev port %u module EEPROM data to NULL\n",
6416 			port_id);
6417 		return -EINVAL;
6418 	}
6419 
6420 	if (info->length == 0) {
6421 		RTE_ETHDEV_LOG(ERR,
6422 			"Cannot get ethdev port %u module EEPROM to data with zero size\n",
6423 			port_id);
6424 		return -EINVAL;
6425 	}
6426 
6427 	if (*dev->dev_ops->get_module_eeprom == NULL)
6428 		return -ENOTSUP;
6429 	ret = (*dev->dev_ops->get_module_eeprom)(dev, info);
6430 
6431 	rte_ethdev_trace_get_module_eeprom(port_id, info, ret);
6432 
6433 	return ret;
6434 }
6435 
6436 int
6437 rte_eth_dev_get_dcb_info(uint16_t port_id,
6438 			     struct rte_eth_dcb_info *dcb_info)
6439 {
6440 	struct rte_eth_dev *dev;
6441 	int ret;
6442 
6443 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6444 	dev = &rte_eth_devices[port_id];
6445 
6446 	if (dcb_info == NULL) {
6447 		RTE_ETHDEV_LOG(ERR,
6448 			"Cannot get ethdev port %u DCB info to NULL\n",
6449 			port_id);
6450 		return -EINVAL;
6451 	}
6452 
6453 	memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
6454 
6455 	if (*dev->dev_ops->get_dcb_info == NULL)
6456 		return -ENOTSUP;
6457 	ret = eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
6458 
6459 	rte_ethdev_trace_get_dcb_info(port_id, dcb_info, ret);
6460 
6461 	return ret;
6462 }
6463 
6464 static void
6465 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
6466 		const struct rte_eth_desc_lim *desc_lim)
6467 {
6468 	if (desc_lim->nb_align != 0)
6469 		*nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
6470 
6471 	if (desc_lim->nb_max != 0)
6472 		*nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
6473 
6474 	*nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
6475 }
6476 
6477 int
6478 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
6479 				 uint16_t *nb_rx_desc,
6480 				 uint16_t *nb_tx_desc)
6481 {
6482 	struct rte_eth_dev_info dev_info;
6483 	int ret;
6484 
6485 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6486 
6487 	ret = rte_eth_dev_info_get(port_id, &dev_info);
6488 	if (ret != 0)
6489 		return ret;
6490 
6491 	if (nb_rx_desc != NULL)
6492 		eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
6493 
6494 	if (nb_tx_desc != NULL)
6495 		eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
6496 
6497 	rte_ethdev_trace_adjust_nb_rx_tx_desc(port_id);
6498 
6499 	return 0;
6500 }
6501 
6502 int
6503 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
6504 				   struct rte_eth_hairpin_cap *cap)
6505 {
6506 	struct rte_eth_dev *dev;
6507 	int ret;
6508 
6509 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6510 	dev = &rte_eth_devices[port_id];
6511 
6512 	if (cap == NULL) {
6513 		RTE_ETHDEV_LOG(ERR,
6514 			"Cannot get ethdev port %u hairpin capability to NULL\n",
6515 			port_id);
6516 		return -EINVAL;
6517 	}
6518 
6519 	if (*dev->dev_ops->hairpin_cap_get == NULL)
6520 		return -ENOTSUP;
6521 	memset(cap, 0, sizeof(*cap));
6522 	ret = eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
6523 
6524 	rte_ethdev_trace_hairpin_capability_get(port_id, cap, ret);
6525 
6526 	return ret;
6527 }
6528 
6529 int
6530 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
6531 {
6532 	struct rte_eth_dev *dev;
6533 	int ret;
6534 
6535 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6536 	dev = &rte_eth_devices[port_id];
6537 
6538 	if (pool == NULL) {
6539 		RTE_ETHDEV_LOG(ERR,
6540 			"Cannot test ethdev port %u mempool operation from NULL pool\n",
6541 			port_id);
6542 		return -EINVAL;
6543 	}
6544 
6545 	if (*dev->dev_ops->pool_ops_supported == NULL)
6546 		return 1; /* all pools are supported */
6547 
6548 	ret = (*dev->dev_ops->pool_ops_supported)(dev, pool);
6549 
6550 	rte_ethdev_trace_pool_ops_supported(port_id, pool, ret);
6551 
6552 	return ret;
6553 }
6554 
6555 int
6556 rte_eth_representor_info_get(uint16_t port_id,
6557 			     struct rte_eth_representor_info *info)
6558 {
6559 	struct rte_eth_dev *dev;
6560 	int ret;
6561 
6562 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6563 	dev = &rte_eth_devices[port_id];
6564 
6565 	if (*dev->dev_ops->representor_info_get == NULL)
6566 		return -ENOTSUP;
6567 	ret = eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
6568 
6569 	rte_eth_trace_representor_info_get(port_id, info, ret);
6570 
6571 	return ret;
6572 }
6573 
6574 int
6575 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
6576 {
6577 	struct rte_eth_dev *dev;
6578 	int ret;
6579 
6580 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6581 	dev = &rte_eth_devices[port_id];
6582 
6583 	if (dev->data->dev_configured != 0) {
6584 		RTE_ETHDEV_LOG(ERR,
6585 			"The port (ID=%"PRIu16") is already configured\n",
6586 			port_id);
6587 		return -EBUSY;
6588 	}
6589 
6590 	if (features == NULL) {
6591 		RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n");
6592 		return -EINVAL;
6593 	}
6594 
6595 	if ((*features & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0 &&
6596 			rte_flow_restore_info_dynflag_register() < 0)
6597 		*features &= ~RTE_ETH_RX_METADATA_TUNNEL_ID;
6598 
6599 	if (*dev->dev_ops->rx_metadata_negotiate == NULL)
6600 		return -ENOTSUP;
6601 	ret = eth_err(port_id,
6602 		      (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
6603 
6604 	rte_eth_trace_rx_metadata_negotiate(port_id, *features, ret);
6605 
6606 	return ret;
6607 }
6608 
6609 int
6610 rte_eth_ip_reassembly_capability_get(uint16_t port_id,
6611 		struct rte_eth_ip_reassembly_params *reassembly_capa)
6612 {
6613 	struct rte_eth_dev *dev;
6614 	int ret;
6615 
6616 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6617 	dev = &rte_eth_devices[port_id];
6618 
6619 	if (dev->data->dev_configured == 0) {
6620 		RTE_ETHDEV_LOG(ERR,
6621 			"Device with port_id=%u is not configured.\n"
6622 			"Cannot get IP reassembly capability\n",
6623 			port_id);
6624 		return -EINVAL;
6625 	}
6626 
6627 	if (reassembly_capa == NULL) {
6628 		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL");
6629 		return -EINVAL;
6630 	}
6631 
6632 	if (*dev->dev_ops->ip_reassembly_capability_get == NULL)
6633 		return -ENOTSUP;
6634 	memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params));
6635 
6636 	ret = eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get)
6637 					(dev, reassembly_capa));
6638 
6639 	rte_eth_trace_ip_reassembly_capability_get(port_id, reassembly_capa,
6640 						   ret);
6641 
6642 	return ret;
6643 }
6644 
6645 int
6646 rte_eth_ip_reassembly_conf_get(uint16_t port_id,
6647 		struct rte_eth_ip_reassembly_params *conf)
6648 {
6649 	struct rte_eth_dev *dev;
6650 	int ret;
6651 
6652 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6653 	dev = &rte_eth_devices[port_id];
6654 
6655 	if (dev->data->dev_configured == 0) {
6656 		RTE_ETHDEV_LOG(ERR,
6657 			"Device with port_id=%u is not configured.\n"
6658 			"Cannot get IP reassembly configuration\n",
6659 			port_id);
6660 		return -EINVAL;
6661 	}
6662 
6663 	if (conf == NULL) {
6664 		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL");
6665 		return -EINVAL;
6666 	}
6667 
6668 	if (*dev->dev_ops->ip_reassembly_conf_get == NULL)
6669 		return -ENOTSUP;
6670 	memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params));
6671 	ret = eth_err(port_id,
6672 		      (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf));
6673 
6674 	rte_eth_trace_ip_reassembly_conf_get(port_id, conf, ret);
6675 
6676 	return ret;
6677 }
6678 
6679 int
6680 rte_eth_ip_reassembly_conf_set(uint16_t port_id,
6681 		const struct rte_eth_ip_reassembly_params *conf)
6682 {
6683 	struct rte_eth_dev *dev;
6684 	int ret;
6685 
6686 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6687 	dev = &rte_eth_devices[port_id];
6688 
6689 	if (dev->data->dev_configured == 0) {
6690 		RTE_ETHDEV_LOG(ERR,
6691 			"Device with port_id=%u is not configured.\n"
6692 			"Cannot set IP reassembly configuration",
6693 			port_id);
6694 		return -EINVAL;
6695 	}
6696 
6697 	if (dev->data->dev_started != 0) {
6698 		RTE_ETHDEV_LOG(ERR,
6699 			"Device with port_id=%u started,\n"
6700 			"cannot configure IP reassembly params.\n",
6701 			port_id);
6702 		return -EINVAL;
6703 	}
6704 
6705 	if (conf == NULL) {
6706 		RTE_ETHDEV_LOG(ERR,
6707 				"Invalid IP reassembly configuration (NULL)\n");
6708 		return -EINVAL;
6709 	}
6710 
6711 	if (*dev->dev_ops->ip_reassembly_conf_set == NULL)
6712 		return -ENOTSUP;
6713 	ret = eth_err(port_id,
6714 		      (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf));
6715 
6716 	rte_eth_trace_ip_reassembly_conf_set(port_id, conf, ret);
6717 
6718 	return ret;
6719 }
6720 
6721 int
6722 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
6723 {
6724 	struct rte_eth_dev *dev;
6725 
6726 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6727 	dev = &rte_eth_devices[port_id];
6728 
6729 	if (file == NULL) {
6730 		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
6731 		return -EINVAL;
6732 	}
6733 
6734 	if (*dev->dev_ops->eth_dev_priv_dump == NULL)
6735 		return -ENOTSUP;
6736 	return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file));
6737 }
6738 
6739 int
6740 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
6741 			   uint16_t offset, uint16_t num, FILE *file)
6742 {
6743 	struct rte_eth_dev *dev;
6744 
6745 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6746 	dev = &rte_eth_devices[port_id];
6747 
6748 	if (queue_id >= dev->data->nb_rx_queues) {
6749 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
6750 		return -EINVAL;
6751 	}
6752 
6753 	if (file == NULL) {
6754 		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
6755 		return -EINVAL;
6756 	}
6757 
6758 	if (*dev->dev_ops->eth_rx_descriptor_dump == NULL)
6759 		return -ENOTSUP;
6760 
6761 	return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev,
6762 						queue_id, offset, num, file));
6763 }
6764 
6765 int
6766 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
6767 			   uint16_t offset, uint16_t num, FILE *file)
6768 {
6769 	struct rte_eth_dev *dev;
6770 
6771 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6772 	dev = &rte_eth_devices[port_id];
6773 
6774 	if (queue_id >= dev->data->nb_tx_queues) {
6775 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
6776 		return -EINVAL;
6777 	}
6778 
6779 	if (file == NULL) {
6780 		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
6781 		return -EINVAL;
6782 	}
6783 
6784 	if (*dev->dev_ops->eth_tx_descriptor_dump == NULL)
6785 		return -ENOTSUP;
6786 
6787 	return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev,
6788 						queue_id, offset, num, file));
6789 }
6790 
6791 int
6792 rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
6793 {
6794 	int i, j;
6795 	struct rte_eth_dev *dev;
6796 	const uint32_t *all_types;
6797 
6798 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6799 	dev = &rte_eth_devices[port_id];
6800 
6801 	if (ptypes == NULL && num > 0) {
6802 		RTE_ETHDEV_LOG(ERR,
6803 			"Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero\n",
6804 			port_id);
6805 		return -EINVAL;
6806 	}
6807 
6808 	if (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get == NULL)
6809 		return -ENOTSUP;
6810 	all_types = (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get)(dev);
6811 
6812 	if (all_types == NULL)
6813 		return 0;
6814 
6815 	for (i = 0, j = 0; all_types[i] != RTE_PTYPE_UNKNOWN; ++i) {
6816 		if (j < num) {
6817 			ptypes[j] = all_types[i];
6818 
6819 			rte_eth_trace_buffer_split_get_supported_hdr_ptypes(
6820 							port_id, j, ptypes[j]);
6821 		}
6822 		j++;
6823 	}
6824 
6825 	return j;
6826 }
6827 
6828 int rte_eth_dev_count_aggr_ports(uint16_t port_id)
6829 {
6830 	struct rte_eth_dev *dev;
6831 	int ret;
6832 
6833 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6834 	dev = &rte_eth_devices[port_id];
6835 
6836 	if (*dev->dev_ops->count_aggr_ports == NULL)
6837 		return 0;
6838 	ret = eth_err(port_id, (*dev->dev_ops->count_aggr_ports)(dev));
6839 
6840 	rte_eth_trace_count_aggr_ports(port_id, ret);
6841 
6842 	return ret;
6843 }
6844 
6845 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
6846 				     uint8_t affinity)
6847 {
6848 	struct rte_eth_dev *dev;
6849 	int aggr_ports;
6850 	int ret;
6851 
6852 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6853 	dev = &rte_eth_devices[port_id];
6854 
6855 	if (tx_queue_id >= dev->data->nb_tx_queues) {
6856 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
6857 		return -EINVAL;
6858 	}
6859 
6860 	if (*dev->dev_ops->map_aggr_tx_affinity == NULL)
6861 		return -ENOTSUP;
6862 
6863 	if (dev->data->dev_configured == 0) {
6864 		RTE_ETHDEV_LOG(ERR,
6865 			"Port %u must be configured before Tx affinity mapping\n",
6866 			port_id);
6867 		return -EINVAL;
6868 	}
6869 
6870 	if (dev->data->dev_started) {
6871 		RTE_ETHDEV_LOG(ERR,
6872 			"Port %u must be stopped to allow configuration\n",
6873 			port_id);
6874 		return -EBUSY;
6875 	}
6876 
6877 	aggr_ports = rte_eth_dev_count_aggr_ports(port_id);
6878 	if (aggr_ports == 0) {
6879 		RTE_ETHDEV_LOG(ERR,
6880 			"Port %u has no aggregated port\n",
6881 			port_id);
6882 		return -ENOTSUP;
6883 	}
6884 
6885 	if (affinity > aggr_ports) {
6886 		RTE_ETHDEV_LOG(ERR,
6887 			"Port %u map invalid affinity %u exceeds the maximum number %u\n",
6888 			port_id, affinity, aggr_ports);
6889 		return -EINVAL;
6890 	}
6891 
6892 	ret = eth_err(port_id, (*dev->dev_ops->map_aggr_tx_affinity)(dev,
6893 				tx_queue_id, affinity));
6894 
6895 	rte_eth_trace_map_aggr_tx_affinity(port_id, tx_queue_id, affinity, ret);
6896 
6897 	return ret;
6898 }
6899 
6900 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
6901