xref: /dpdk/lib/ethdev/rte_ethdev.c (revision 85256fea3859b57451657919486e4559b0f2677c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <errno.h>
6 #include <inttypes.h>
7 #include <stdbool.h>
8 #include <stdint.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13 
14 #include <bus_driver.h>
15 #include <rte_log.h>
16 #include <rte_interrupts.h>
17 #include <rte_kvargs.h>
18 #include <rte_memcpy.h>
19 #include <rte_common.h>
20 #include <rte_mempool.h>
21 #include <rte_malloc.h>
22 #include <rte_mbuf.h>
23 #include <rte_errno.h>
24 #include <rte_spinlock.h>
25 #include <rte_string_fns.h>
26 #include <rte_class.h>
27 #include <rte_ether.h>
28 #include <rte_telemetry.h>
29 
30 #include "rte_ethdev.h"
31 #include "rte_ethdev_trace_fp.h"
32 #include "ethdev_driver.h"
33 #include "rte_flow_driver.h"
34 #include "ethdev_profile.h"
35 #include "ethdev_private.h"
36 #include "ethdev_trace.h"
37 #include "sff_telemetry.h"
38 
39 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
40 
41 /* public fast-path API */
42 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
43 
44 /* spinlock for add/remove Rx callbacks */
45 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
46 
47 /* spinlock for add/remove Tx callbacks */
48 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
49 
50 /* store statistics names and its offset in stats structure  */
51 struct rte_eth_xstats_name_off {
52 	char name[RTE_ETH_XSTATS_NAME_SIZE];
53 	unsigned offset;
54 };
55 
56 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
57 	{"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
58 	{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
59 	{"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
60 	{"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
61 	{"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
62 	{"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
63 	{"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
64 	{"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
65 		rx_nombuf)},
66 };
67 
68 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
69 
70 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
71 	{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
72 	{"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
73 	{"errors", offsetof(struct rte_eth_stats, q_errors)},
74 };
75 
76 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
77 
78 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
79 	{"packets", offsetof(struct rte_eth_stats, q_opackets)},
80 	{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
81 };
82 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
83 
84 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
85 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
86 
87 static const struct {
88 	uint64_t offload;
89 	const char *name;
90 } eth_dev_rx_offload_names[] = {
91 	RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
92 	RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
93 	RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
94 	RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
95 	RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
96 	RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
97 	RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
98 	RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
99 	RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
100 	RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
101 	RTE_RX_OFFLOAD_BIT2STR(SCATTER),
102 	RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
103 	RTE_RX_OFFLOAD_BIT2STR(SECURITY),
104 	RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
105 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
106 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
107 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
108 	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
109 };
110 
111 #undef RTE_RX_OFFLOAD_BIT2STR
112 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
113 
114 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
115 	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
116 
117 static const struct {
118 	uint64_t offload;
119 	const char *name;
120 } eth_dev_tx_offload_names[] = {
121 	RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
122 	RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
123 	RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
124 	RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
125 	RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
126 	RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
127 	RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
128 	RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
129 	RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
130 	RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
131 	RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
132 	RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
133 	RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
134 	RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
135 	RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
136 	RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
137 	RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
138 	RTE_TX_OFFLOAD_BIT2STR(SECURITY),
139 	RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
140 	RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
141 	RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
142 	RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
143 };
144 
145 #undef RTE_TX_OFFLOAD_BIT2STR
146 
147 static const struct {
148 	uint64_t offload;
149 	const char *name;
150 } rte_eth_dev_capa_names[] = {
151 	{RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"},
152 	{RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"},
153 	{RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
154 	{RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"},
155 	{RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"},
156 };
157 
158 enum {
159 	STAT_QMAP_TX = 0,
160 	STAT_QMAP_RX
161 };
162 
163 static const struct {
164 	enum rte_eth_hash_function algo;
165 	const char *name;
166 } rte_eth_dev_rss_algo_names[] = {
167 	{RTE_ETH_HASH_FUNCTION_DEFAULT, "default"},
168 	{RTE_ETH_HASH_FUNCTION_SIMPLE_XOR, "simple_xor"},
169 	{RTE_ETH_HASH_FUNCTION_TOEPLITZ, "toeplitz"},
170 	{RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ, "symmetric_toeplitz"},
171 	{RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT, "symmetric_toeplitz_sort"},
172 };
173 
174 int
175 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
176 {
177 	int ret;
178 	struct rte_devargs devargs;
179 	const char *bus_param_key;
180 	char *bus_str = NULL;
181 	char *cls_str = NULL;
182 	int str_size;
183 
184 	if (iter == NULL) {
185 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot initialize NULL iterator");
186 		return -EINVAL;
187 	}
188 
189 	if (devargs_str == NULL) {
190 		RTE_ETHDEV_LOG_LINE(ERR,
191 			"Cannot initialize iterator from NULL device description string");
192 		return -EINVAL;
193 	}
194 
195 	memset(iter, 0, sizeof(*iter));
196 	memset(&devargs, 0, sizeof(devargs));
197 
198 	/*
199 	 * The devargs string may use various syntaxes:
200 	 *   - 0000:08:00.0,representor=[1-3]
201 	 *   - pci:0000:06:00.0,representor=[0,5]
202 	 *   - class=eth,mac=00:11:22:33:44:55
203 	 *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
204 	 */
205 
206 	/*
207 	 * Handle pure class filter (i.e. without any bus-level argument),
208 	 * from future new syntax.
209 	 * rte_devargs_parse() is not yet supporting the new syntax,
210 	 * that's why this simple case is temporarily parsed here.
211 	 */
212 #define iter_anybus_str "class=eth,"
213 	if (strncmp(devargs_str, iter_anybus_str,
214 			strlen(iter_anybus_str)) == 0) {
215 		iter->cls_str = devargs_str + strlen(iter_anybus_str);
216 		goto end;
217 	}
218 
219 	/* Split bus, device and parameters. */
220 	ret = rte_devargs_parse(&devargs, devargs_str);
221 	if (ret != 0)
222 		goto error;
223 
224 	/*
225 	 * Assume parameters of old syntax can match only at ethdev level.
226 	 * Extra parameters will be ignored, thanks to "+" prefix.
227 	 */
228 	str_size = strlen(devargs.args) + 2;
229 	cls_str = malloc(str_size);
230 	if (cls_str == NULL) {
231 		ret = -ENOMEM;
232 		goto error;
233 	}
234 	ret = snprintf(cls_str, str_size, "+%s", devargs.args);
235 	if (ret != str_size - 1) {
236 		ret = -EINVAL;
237 		goto error;
238 	}
239 	iter->cls_str = cls_str;
240 
241 	iter->bus = devargs.bus;
242 	if (iter->bus->dev_iterate == NULL) {
243 		ret = -ENOTSUP;
244 		goto error;
245 	}
246 
247 	/* Convert bus args to new syntax for use with new API dev_iterate. */
248 	if ((strcmp(iter->bus->name, "vdev") == 0) ||
249 		(strcmp(iter->bus->name, "fslmc") == 0) ||
250 		(strcmp(iter->bus->name, "dpaa_bus") == 0)) {
251 		bus_param_key = "name";
252 	} else if (strcmp(iter->bus->name, "pci") == 0) {
253 		bus_param_key = "addr";
254 	} else {
255 		ret = -ENOTSUP;
256 		goto error;
257 	}
258 	str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
259 	bus_str = malloc(str_size);
260 	if (bus_str == NULL) {
261 		ret = -ENOMEM;
262 		goto error;
263 	}
264 	ret = snprintf(bus_str, str_size, "%s=%s",
265 			bus_param_key, devargs.name);
266 	if (ret != str_size - 1) {
267 		ret = -EINVAL;
268 		goto error;
269 	}
270 	iter->bus_str = bus_str;
271 
272 end:
273 	iter->cls = rte_class_find_by_name("eth");
274 	rte_devargs_reset(&devargs);
275 
276 	rte_eth_trace_iterator_init(devargs_str);
277 
278 	return 0;
279 
280 error:
281 	if (ret == -ENOTSUP)
282 		RTE_ETHDEV_LOG_LINE(ERR, "Bus %s does not support iterating.",
283 				iter->bus->name);
284 	rte_devargs_reset(&devargs);
285 	free(bus_str);
286 	free(cls_str);
287 	return ret;
288 }
289 
290 uint16_t
291 rte_eth_iterator_next(struct rte_dev_iterator *iter)
292 {
293 	if (iter == NULL) {
294 		RTE_ETHDEV_LOG_LINE(ERR,
295 			"Cannot get next device from NULL iterator");
296 		return RTE_MAX_ETHPORTS;
297 	}
298 
299 	if (iter->cls == NULL) /* invalid ethdev iterator */
300 		return RTE_MAX_ETHPORTS;
301 
302 	do { /* loop to try all matching rte_device */
303 		/* If not pure ethdev filter and */
304 		if (iter->bus != NULL &&
305 				/* not in middle of rte_eth_dev iteration, */
306 				iter->class_device == NULL) {
307 			/* get next rte_device to try. */
308 			iter->device = iter->bus->dev_iterate(
309 					iter->device, iter->bus_str, iter);
310 			if (iter->device == NULL)
311 				break; /* no more rte_device candidate */
312 		}
313 		/* A device is matching bus part, need to check ethdev part. */
314 		iter->class_device = iter->cls->dev_iterate(
315 				iter->class_device, iter->cls_str, iter);
316 		if (iter->class_device != NULL) {
317 			uint16_t id = eth_dev_to_id(iter->class_device);
318 
319 			rte_eth_trace_iterator_next(iter, id);
320 
321 			return id; /* match */
322 		}
323 	} while (iter->bus != NULL); /* need to try next rte_device */
324 
325 	/* No more ethdev port to iterate. */
326 	rte_eth_iterator_cleanup(iter);
327 	return RTE_MAX_ETHPORTS;
328 }
329 
330 void
331 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
332 {
333 	if (iter == NULL) {
334 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot do clean up from NULL iterator");
335 		return;
336 	}
337 
338 	if (iter->bus_str == NULL)
339 		return; /* nothing to free in pure class filter */
340 
341 	rte_eth_trace_iterator_cleanup(iter);
342 
343 	free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
344 	free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
345 	memset(iter, 0, sizeof(*iter));
346 }
347 
348 uint16_t
349 rte_eth_find_next(uint16_t port_id)
350 {
351 	while (port_id < RTE_MAX_ETHPORTS &&
352 			rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
353 		port_id++;
354 
355 	if (port_id >= RTE_MAX_ETHPORTS)
356 		return RTE_MAX_ETHPORTS;
357 
358 	rte_eth_trace_find_next(port_id);
359 
360 	return port_id;
361 }
362 
363 /*
364  * Macro to iterate over all valid ports for internal usage.
365  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
366  */
367 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
368 	for (port_id = rte_eth_find_next(0); \
369 	     port_id < RTE_MAX_ETHPORTS; \
370 	     port_id = rte_eth_find_next(port_id + 1))
371 
372 uint16_t
373 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
374 {
375 	port_id = rte_eth_find_next(port_id);
376 	while (port_id < RTE_MAX_ETHPORTS &&
377 			rte_eth_devices[port_id].device != parent)
378 		port_id = rte_eth_find_next(port_id + 1);
379 
380 	rte_eth_trace_find_next_of(port_id, parent);
381 
382 	return port_id;
383 }
384 
385 uint16_t
386 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
387 {
388 	uint16_t ret;
389 
390 	RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
391 	ret = rte_eth_find_next_of(port_id,
392 			rte_eth_devices[ref_port_id].device);
393 
394 	rte_eth_trace_find_next_sibling(port_id, ref_port_id, ret);
395 
396 	return ret;
397 }
398 
399 static bool
400 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
401 {
402 	return ethdev->data != NULL && ethdev->data->name[0] != '\0';
403 }
404 
405 int
406 rte_eth_dev_is_valid_port(uint16_t port_id)
407 {
408 	int is_valid;
409 
410 	if (port_id >= RTE_MAX_ETHPORTS ||
411 	    (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
412 		is_valid = 0;
413 	else
414 		is_valid = 1;
415 
416 	rte_ethdev_trace_is_valid_port(port_id, is_valid);
417 
418 	return is_valid;
419 }
420 
421 static int
422 eth_is_valid_owner_id(uint64_t owner_id)
423 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
424 {
425 	if (owner_id == RTE_ETH_DEV_NO_OWNER ||
426 	    eth_dev_shared_data->next_owner_id <= owner_id)
427 		return 0;
428 	return 1;
429 }
430 
431 uint64_t
432 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
433 {
434 	port_id = rte_eth_find_next(port_id);
435 	while (port_id < RTE_MAX_ETHPORTS &&
436 			rte_eth_devices[port_id].data->owner.id != owner_id)
437 		port_id = rte_eth_find_next(port_id + 1);
438 
439 	rte_eth_trace_find_next_owned_by(port_id, owner_id);
440 
441 	return port_id;
442 }
443 
444 int
445 rte_eth_dev_owner_new(uint64_t *owner_id)
446 {
447 	int ret;
448 
449 	if (owner_id == NULL) {
450 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get new owner ID to NULL");
451 		return -EINVAL;
452 	}
453 
454 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
455 
456 	if (eth_dev_shared_data_prepare() != NULL) {
457 		*owner_id = eth_dev_shared_data->next_owner_id++;
458 		eth_dev_shared_data->allocated_owners++;
459 		ret = 0;
460 	} else {
461 		ret = -ENOMEM;
462 	}
463 
464 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
465 
466 	rte_ethdev_trace_owner_new(*owner_id, ret);
467 
468 	return ret;
469 }
470 
471 static int
472 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
473 		       const struct rte_eth_dev_owner *new_owner)
474 	__rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
475 {
476 	struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
477 	struct rte_eth_dev_owner *port_owner;
478 
479 	if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
480 		RTE_ETHDEV_LOG_LINE(ERR, "Port ID %"PRIu16" is not allocated",
481 			port_id);
482 		return -ENODEV;
483 	}
484 
485 	if (new_owner == NULL) {
486 		RTE_ETHDEV_LOG_LINE(ERR,
487 			"Cannot set ethdev port %u owner from NULL owner",
488 			port_id);
489 		return -EINVAL;
490 	}
491 
492 	if (!eth_is_valid_owner_id(new_owner->id) &&
493 	    !eth_is_valid_owner_id(old_owner_id)) {
494 		RTE_ETHDEV_LOG_LINE(ERR,
495 			"Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64,
496 		       old_owner_id, new_owner->id);
497 		return -EINVAL;
498 	}
499 
500 	port_owner = &rte_eth_devices[port_id].data->owner;
501 	if (port_owner->id != old_owner_id) {
502 		RTE_ETHDEV_LOG_LINE(ERR,
503 			"Cannot set owner to port %u already owned by %s_%016"PRIX64,
504 			port_id, port_owner->name, port_owner->id);
505 		return -EPERM;
506 	}
507 
508 	/* can not truncate (same structure) */
509 	strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
510 
511 	port_owner->id = new_owner->id;
512 
513 	RTE_ETHDEV_LOG_LINE(DEBUG, "Port %u owner is %s_%016"PRIx64,
514 		port_id, new_owner->name, new_owner->id);
515 
516 	return 0;
517 }
518 
519 int
520 rte_eth_dev_owner_set(const uint16_t port_id,
521 		      const struct rte_eth_dev_owner *owner)
522 {
523 	int ret;
524 
525 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
526 
527 	if (eth_dev_shared_data_prepare() != NULL)
528 		ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
529 	else
530 		ret = -ENOMEM;
531 
532 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
533 
534 	rte_ethdev_trace_owner_set(port_id, owner, ret);
535 
536 	return ret;
537 }
538 
539 int
540 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
541 {
542 	const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
543 			{.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
544 	int ret;
545 
546 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
547 
548 	if (eth_dev_shared_data_prepare() != NULL)
549 		ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
550 	else
551 		ret = -ENOMEM;
552 
553 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
554 
555 	rte_ethdev_trace_owner_unset(port_id, owner_id, ret);
556 
557 	return ret;
558 }
559 
560 int
561 rte_eth_dev_owner_delete(const uint64_t owner_id)
562 {
563 	uint16_t port_id;
564 	int ret = 0;
565 
566 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
567 
568 	if (eth_dev_shared_data_prepare() == NULL) {
569 		ret = -ENOMEM;
570 	} else if (eth_is_valid_owner_id(owner_id)) {
571 		for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
572 			struct rte_eth_dev_data *data =
573 				rte_eth_devices[port_id].data;
574 			if (data != NULL && data->owner.id == owner_id)
575 				memset(&data->owner, 0,
576 				       sizeof(struct rte_eth_dev_owner));
577 		}
578 		RTE_ETHDEV_LOG_LINE(NOTICE,
579 			"All port owners owned by %016"PRIx64" identifier have removed",
580 			owner_id);
581 		eth_dev_shared_data->allocated_owners--;
582 		eth_dev_shared_data_release();
583 	} else {
584 		RTE_ETHDEV_LOG_LINE(ERR,
585 			       "Invalid owner ID=%016"PRIx64,
586 			       owner_id);
587 		ret = -EINVAL;
588 	}
589 
590 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
591 
592 	rte_ethdev_trace_owner_delete(owner_id, ret);
593 
594 	return ret;
595 }
596 
597 int
598 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
599 {
600 	struct rte_eth_dev *ethdev;
601 	int ret;
602 
603 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
604 	ethdev = &rte_eth_devices[port_id];
605 
606 	if (!eth_dev_is_allocated(ethdev)) {
607 		RTE_ETHDEV_LOG_LINE(ERR, "Port ID %"PRIu16" is not allocated",
608 			port_id);
609 		return -ENODEV;
610 	}
611 
612 	if (owner == NULL) {
613 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u owner to NULL",
614 			port_id);
615 		return -EINVAL;
616 	}
617 
618 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
619 
620 	if (eth_dev_shared_data_prepare() != NULL) {
621 		rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
622 		ret = 0;
623 	} else {
624 		ret = -ENOMEM;
625 	}
626 
627 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
628 
629 	rte_ethdev_trace_owner_get(port_id, owner, ret);
630 
631 	return ret;
632 }
633 
634 int
635 rte_eth_dev_socket_id(uint16_t port_id)
636 {
637 	int socket_id = SOCKET_ID_ANY;
638 
639 	if (!rte_eth_dev_is_valid_port(port_id)) {
640 		rte_errno = EINVAL;
641 	} else {
642 		socket_id = rte_eth_devices[port_id].data->numa_node;
643 		if (socket_id == SOCKET_ID_ANY)
644 			rte_errno = 0;
645 	}
646 
647 	rte_ethdev_trace_socket_id(port_id, socket_id);
648 
649 	return socket_id;
650 }
651 
652 void *
653 rte_eth_dev_get_sec_ctx(uint16_t port_id)
654 {
655 	void *ctx;
656 
657 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
658 	ctx = rte_eth_devices[port_id].security_ctx;
659 
660 	rte_ethdev_trace_get_sec_ctx(port_id, ctx);
661 
662 	return ctx;
663 }
664 
665 uint16_t
666 rte_eth_dev_count_avail(void)
667 {
668 	uint16_t p;
669 	uint16_t count;
670 
671 	count = 0;
672 
673 	RTE_ETH_FOREACH_DEV(p)
674 		count++;
675 
676 	rte_ethdev_trace_count_avail(count);
677 
678 	return count;
679 }
680 
681 uint16_t
682 rte_eth_dev_count_total(void)
683 {
684 	uint16_t port, count = 0;
685 
686 	RTE_ETH_FOREACH_VALID_DEV(port)
687 		count++;
688 
689 	rte_ethdev_trace_count_total(count);
690 
691 	return count;
692 }
693 
694 int
695 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
696 {
697 	char *tmp;
698 
699 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
700 
701 	if (name == NULL) {
702 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u name to NULL",
703 			port_id);
704 		return -EINVAL;
705 	}
706 
707 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
708 	/* shouldn't check 'rte_eth_devices[i].data',
709 	 * because it might be overwritten by VDEV PMD */
710 	tmp = eth_dev_shared_data->data[port_id].name;
711 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
712 
713 	strcpy(name, tmp);
714 
715 	rte_ethdev_trace_get_name_by_port(port_id, name);
716 
717 	return 0;
718 }
719 
720 int
721 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
722 {
723 	int ret = -ENODEV;
724 	uint16_t pid;
725 
726 	if (name == NULL) {
727 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get port ID from NULL name");
728 		return -EINVAL;
729 	}
730 
731 	if (port_id == NULL) {
732 		RTE_ETHDEV_LOG_LINE(ERR,
733 			"Cannot get port ID to NULL for %s", name);
734 		return -EINVAL;
735 	}
736 
737 	rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
738 	RTE_ETH_FOREACH_VALID_DEV(pid) {
739 		if (strcmp(name, eth_dev_shared_data->data[pid].name) != 0)
740 			continue;
741 
742 		*port_id = pid;
743 		rte_ethdev_trace_get_port_by_name(name, *port_id);
744 		ret = 0;
745 		break;
746 	}
747 	rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
748 
749 	return ret;
750 }
751 
752 int
753 eth_err(uint16_t port_id, int ret)
754 {
755 	if (ret == 0)
756 		return 0;
757 	if (rte_eth_dev_is_removed(port_id))
758 		return -EIO;
759 	return ret;
760 }
761 
762 static int
763 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
764 {
765 	uint16_t port_id;
766 
767 	if (rx_queue_id >= dev->data->nb_rx_queues) {
768 		port_id = dev->data->port_id;
769 		RTE_ETHDEV_LOG_LINE(ERR,
770 			       "Invalid Rx queue_id=%u of device with port_id=%u",
771 			       rx_queue_id, port_id);
772 		return -EINVAL;
773 	}
774 
775 	if (dev->data->rx_queues[rx_queue_id] == NULL) {
776 		port_id = dev->data->port_id;
777 		RTE_ETHDEV_LOG_LINE(ERR,
778 			       "Queue %u of device with port_id=%u has not been setup",
779 			       rx_queue_id, port_id);
780 		return -EINVAL;
781 	}
782 
783 	return 0;
784 }
785 
786 static int
787 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
788 {
789 	uint16_t port_id;
790 
791 	if (tx_queue_id >= dev->data->nb_tx_queues) {
792 		port_id = dev->data->port_id;
793 		RTE_ETHDEV_LOG_LINE(ERR,
794 			       "Invalid Tx queue_id=%u of device with port_id=%u",
795 			       tx_queue_id, port_id);
796 		return -EINVAL;
797 	}
798 
799 	if (dev->data->tx_queues[tx_queue_id] == NULL) {
800 		port_id = dev->data->port_id;
801 		RTE_ETHDEV_LOG_LINE(ERR,
802 			       "Queue %u of device with port_id=%u has not been setup",
803 			       tx_queue_id, port_id);
804 		return -EINVAL;
805 	}
806 
807 	return 0;
808 }
809 
810 int
811 rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
812 {
813 	struct rte_eth_dev *dev;
814 
815 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
816 	dev = &rte_eth_devices[port_id];
817 
818 	return eth_dev_validate_rx_queue(dev, queue_id);
819 }
820 
821 int
822 rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
823 {
824 	struct rte_eth_dev *dev;
825 
826 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
827 	dev = &rte_eth_devices[port_id];
828 
829 	return eth_dev_validate_tx_queue(dev, queue_id);
830 }
831 
832 int
833 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
834 {
835 	struct rte_eth_dev *dev;
836 	int ret;
837 
838 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
839 	dev = &rte_eth_devices[port_id];
840 
841 	if (!dev->data->dev_started) {
842 		RTE_ETHDEV_LOG_LINE(ERR,
843 			"Port %u must be started before start any queue",
844 			port_id);
845 		return -EINVAL;
846 	}
847 
848 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
849 	if (ret != 0)
850 		return ret;
851 
852 	if (*dev->dev_ops->rx_queue_start == NULL)
853 		return -ENOTSUP;
854 
855 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
856 		RTE_ETHDEV_LOG_LINE(INFO,
857 			"Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16,
858 			rx_queue_id, port_id);
859 		return -EINVAL;
860 	}
861 
862 	if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
863 		RTE_ETHDEV_LOG_LINE(INFO,
864 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started",
865 			rx_queue_id, port_id);
866 		return 0;
867 	}
868 
869 	ret = eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
870 
871 	rte_ethdev_trace_rx_queue_start(port_id, rx_queue_id, ret);
872 
873 	return ret;
874 }
875 
876 int
877 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
878 {
879 	struct rte_eth_dev *dev;
880 	int ret;
881 
882 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
883 	dev = &rte_eth_devices[port_id];
884 
885 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
886 	if (ret != 0)
887 		return ret;
888 
889 	if (*dev->dev_ops->rx_queue_stop == NULL)
890 		return -ENOTSUP;
891 
892 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
893 		RTE_ETHDEV_LOG_LINE(INFO,
894 			"Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16,
895 			rx_queue_id, port_id);
896 		return -EINVAL;
897 	}
898 
899 	if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
900 		RTE_ETHDEV_LOG_LINE(INFO,
901 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped",
902 			rx_queue_id, port_id);
903 		return 0;
904 	}
905 
906 	ret = eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
907 
908 	rte_ethdev_trace_rx_queue_stop(port_id, rx_queue_id, ret);
909 
910 	return ret;
911 }
912 
913 int
914 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
915 {
916 	struct rte_eth_dev *dev;
917 	int ret;
918 
919 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
920 	dev = &rte_eth_devices[port_id];
921 
922 	if (!dev->data->dev_started) {
923 		RTE_ETHDEV_LOG_LINE(ERR,
924 			"Port %u must be started before start any queue",
925 			port_id);
926 		return -EINVAL;
927 	}
928 
929 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
930 	if (ret != 0)
931 		return ret;
932 
933 	if (*dev->dev_ops->tx_queue_start == NULL)
934 		return -ENOTSUP;
935 
936 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
937 		RTE_ETHDEV_LOG_LINE(INFO,
938 			"Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16,
939 			tx_queue_id, port_id);
940 		return -EINVAL;
941 	}
942 
943 	if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
944 		RTE_ETHDEV_LOG_LINE(INFO,
945 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started",
946 			tx_queue_id, port_id);
947 		return 0;
948 	}
949 
950 	ret = eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
951 
952 	rte_ethdev_trace_tx_queue_start(port_id, tx_queue_id, ret);
953 
954 	return ret;
955 }
956 
957 int
958 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
959 {
960 	struct rte_eth_dev *dev;
961 	int ret;
962 
963 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
964 	dev = &rte_eth_devices[port_id];
965 
966 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
967 	if (ret != 0)
968 		return ret;
969 
970 	if (*dev->dev_ops->tx_queue_stop == NULL)
971 		return -ENOTSUP;
972 
973 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
974 		RTE_ETHDEV_LOG_LINE(INFO,
975 			"Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16,
976 			tx_queue_id, port_id);
977 		return -EINVAL;
978 	}
979 
980 	if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
981 		RTE_ETHDEV_LOG_LINE(INFO,
982 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped",
983 			tx_queue_id, port_id);
984 		return 0;
985 	}
986 
987 	ret = eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
988 
989 	rte_ethdev_trace_tx_queue_stop(port_id, tx_queue_id, ret);
990 
991 	return ret;
992 }
993 
994 uint32_t
995 rte_eth_speed_bitflag(uint32_t speed, int duplex)
996 {
997 	uint32_t ret;
998 
999 	switch (speed) {
1000 	case RTE_ETH_SPEED_NUM_10M:
1001 		ret = duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
1002 		break;
1003 	case RTE_ETH_SPEED_NUM_100M:
1004 		ret = duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
1005 		break;
1006 	case RTE_ETH_SPEED_NUM_1G:
1007 		ret = RTE_ETH_LINK_SPEED_1G;
1008 		break;
1009 	case RTE_ETH_SPEED_NUM_2_5G:
1010 		ret = RTE_ETH_LINK_SPEED_2_5G;
1011 		break;
1012 	case RTE_ETH_SPEED_NUM_5G:
1013 		ret = RTE_ETH_LINK_SPEED_5G;
1014 		break;
1015 	case RTE_ETH_SPEED_NUM_10G:
1016 		ret = RTE_ETH_LINK_SPEED_10G;
1017 		break;
1018 	case RTE_ETH_SPEED_NUM_20G:
1019 		ret = RTE_ETH_LINK_SPEED_20G;
1020 		break;
1021 	case RTE_ETH_SPEED_NUM_25G:
1022 		ret = RTE_ETH_LINK_SPEED_25G;
1023 		break;
1024 	case RTE_ETH_SPEED_NUM_40G:
1025 		ret = RTE_ETH_LINK_SPEED_40G;
1026 		break;
1027 	case RTE_ETH_SPEED_NUM_50G:
1028 		ret = RTE_ETH_LINK_SPEED_50G;
1029 		break;
1030 	case RTE_ETH_SPEED_NUM_56G:
1031 		ret = RTE_ETH_LINK_SPEED_56G;
1032 		break;
1033 	case RTE_ETH_SPEED_NUM_100G:
1034 		ret = RTE_ETH_LINK_SPEED_100G;
1035 		break;
1036 	case RTE_ETH_SPEED_NUM_200G:
1037 		ret = RTE_ETH_LINK_SPEED_200G;
1038 		break;
1039 	case RTE_ETH_SPEED_NUM_400G:
1040 		ret = RTE_ETH_LINK_SPEED_400G;
1041 		break;
1042 	default:
1043 		ret = 0;
1044 	}
1045 
1046 	rte_eth_trace_speed_bitflag(speed, duplex, ret);
1047 
1048 	return ret;
1049 }
1050 
1051 const char *
1052 rte_eth_dev_rx_offload_name(uint64_t offload)
1053 {
1054 	const char *name = "UNKNOWN";
1055 	unsigned int i;
1056 
1057 	for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1058 		if (offload == eth_dev_rx_offload_names[i].offload) {
1059 			name = eth_dev_rx_offload_names[i].name;
1060 			break;
1061 		}
1062 	}
1063 
1064 	rte_ethdev_trace_rx_offload_name(offload, name);
1065 
1066 	return name;
1067 }
1068 
1069 const char *
1070 rte_eth_dev_tx_offload_name(uint64_t offload)
1071 {
1072 	const char *name = "UNKNOWN";
1073 	unsigned int i;
1074 
1075 	for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1076 		if (offload == eth_dev_tx_offload_names[i].offload) {
1077 			name = eth_dev_tx_offload_names[i].name;
1078 			break;
1079 		}
1080 	}
1081 
1082 	rte_ethdev_trace_tx_offload_name(offload, name);
1083 
1084 	return name;
1085 }
1086 
1087 static char *
1088 eth_dev_offload_names(uint64_t bitmask, char *buf, size_t size,
1089 	const char *(*offload_name)(uint64_t))
1090 {
1091 	unsigned int pos = 0;
1092 	int ret;
1093 
1094 	/* There should be at least enough space to handle those cases */
1095 	RTE_ASSERT(size >= sizeof("none") && size >= sizeof("..."));
1096 
1097 	if (bitmask == 0) {
1098 		ret = snprintf(&buf[pos], size - pos, "none");
1099 		if (ret < 0 || pos + ret >= size)
1100 			ret = 0;
1101 		pos += ret;
1102 		goto out;
1103 	}
1104 
1105 	while (bitmask != 0) {
1106 		uint64_t offload = RTE_BIT64(rte_ctz64(bitmask));
1107 		const char *name = offload_name(offload);
1108 
1109 		ret = snprintf(&buf[pos], size - pos, "%s,", name);
1110 		if (ret < 0 || pos + ret >= size) {
1111 			if (pos + sizeof("...") >= size)
1112 				pos = size - sizeof("...");
1113 			ret = snprintf(&buf[pos], size - pos, "...");
1114 			if (ret > 0 && pos + ret < size)
1115 				pos += ret;
1116 			goto out;
1117 		}
1118 
1119 		pos += ret;
1120 		bitmask &= ~offload;
1121 	}
1122 
1123 	/* Eliminate trailing comma */
1124 	pos--;
1125 out:
1126 	buf[pos] = '\0';
1127 	return buf;
1128 }
1129 
1130 const char *
1131 rte_eth_dev_capability_name(uint64_t capability)
1132 {
1133 	const char *name = "UNKNOWN";
1134 	unsigned int i;
1135 
1136 	for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) {
1137 		if (capability == rte_eth_dev_capa_names[i].offload) {
1138 			name = rte_eth_dev_capa_names[i].name;
1139 			break;
1140 		}
1141 	}
1142 
1143 	rte_ethdev_trace_capability_name(capability, name);
1144 
1145 	return name;
1146 }
1147 
1148 static inline int
1149 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1150 		   uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1151 {
1152 	int ret = 0;
1153 
1154 	if (dev_info_size == 0) {
1155 		if (config_size != max_rx_pkt_len) {
1156 			RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1157 				       " %u != %u is not allowed",
1158 				       port_id, config_size, max_rx_pkt_len);
1159 			ret = -EINVAL;
1160 		}
1161 	} else if (config_size > dev_info_size) {
1162 		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1163 			       "> max allowed value %u", port_id, config_size,
1164 			       dev_info_size);
1165 		ret = -EINVAL;
1166 	} else if (config_size < RTE_ETHER_MIN_LEN) {
1167 		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1168 			       "< min allowed value %u", port_id, config_size,
1169 			       (unsigned int)RTE_ETHER_MIN_LEN);
1170 		ret = -EINVAL;
1171 	}
1172 	return ret;
1173 }
1174 
1175 /*
1176  * Validate offloads that are requested through rte_eth_dev_configure against
1177  * the offloads successfully set by the Ethernet device.
1178  *
1179  * @param port_id
1180  *   The port identifier of the Ethernet device.
1181  * @param req_offloads
1182  *   The offloads that have been requested through `rte_eth_dev_configure`.
1183  * @param set_offloads
1184  *   The offloads successfully set by the Ethernet device.
1185  * @param offload_type
1186  *   The offload type i.e. Rx/Tx string.
1187  * @param offload_name
1188  *   The function that prints the offload name.
1189  * @return
1190  *   - (0) if validation successful.
1191  *   - (-EINVAL) if requested offload has been silently disabled.
1192  */
1193 static int
1194 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1195 		  uint64_t set_offloads, const char *offload_type,
1196 		  const char *(*offload_name)(uint64_t))
1197 {
1198 	uint64_t offloads_diff = req_offloads ^ set_offloads;
1199 	uint64_t offload;
1200 	int ret = 0;
1201 
1202 	while (offloads_diff != 0) {
1203 		/* Check if any offload is requested but not enabled. */
1204 		offload = RTE_BIT64(rte_ctz64(offloads_diff));
1205 		if (offload & req_offloads) {
1206 			RTE_ETHDEV_LOG_LINE(ERR,
1207 				"Port %u failed to enable %s offload %s",
1208 				port_id, offload_type, offload_name(offload));
1209 			ret = -EINVAL;
1210 		}
1211 
1212 		/* Check if offload couldn't be disabled. */
1213 		if (offload & set_offloads) {
1214 			RTE_ETHDEV_LOG_LINE(DEBUG,
1215 				"Port %u %s offload %s is not requested but enabled",
1216 				port_id, offload_type, offload_name(offload));
1217 		}
1218 
1219 		offloads_diff &= ~offload;
1220 	}
1221 
1222 	return ret;
1223 }
1224 
1225 static uint32_t
1226 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1227 {
1228 	uint32_t overhead_len;
1229 
1230 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1231 		overhead_len = max_rx_pktlen - max_mtu;
1232 	else
1233 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1234 
1235 	return overhead_len;
1236 }
1237 
1238 /* rte_eth_dev_info_get() should be called prior to this function */
1239 static int
1240 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
1241 		uint16_t mtu)
1242 {
1243 	uint32_t overhead_len;
1244 	uint32_t frame_size;
1245 
1246 	if (mtu < dev_info->min_mtu) {
1247 		RTE_ETHDEV_LOG_LINE(ERR,
1248 			"MTU (%u) < device min MTU (%u) for port_id %u",
1249 			mtu, dev_info->min_mtu, port_id);
1250 		return -EINVAL;
1251 	}
1252 	if (mtu > dev_info->max_mtu) {
1253 		RTE_ETHDEV_LOG_LINE(ERR,
1254 			"MTU (%u) > device max MTU (%u) for port_id %u",
1255 			mtu, dev_info->max_mtu, port_id);
1256 		return -EINVAL;
1257 	}
1258 
1259 	overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1260 			dev_info->max_mtu);
1261 	frame_size = mtu + overhead_len;
1262 	if (frame_size < RTE_ETHER_MIN_LEN) {
1263 		RTE_ETHDEV_LOG_LINE(ERR,
1264 			"Frame size (%u) < min frame size (%u) for port_id %u",
1265 			frame_size, RTE_ETHER_MIN_LEN, port_id);
1266 		return -EINVAL;
1267 	}
1268 
1269 	if (frame_size > dev_info->max_rx_pktlen) {
1270 		RTE_ETHDEV_LOG_LINE(ERR,
1271 			"Frame size (%u) > device max frame size (%u) for port_id %u",
1272 			frame_size, dev_info->max_rx_pktlen, port_id);
1273 		return -EINVAL;
1274 	}
1275 
1276 	return 0;
1277 }
1278 
1279 int
1280 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1281 		      const struct rte_eth_conf *dev_conf)
1282 {
1283 	enum rte_eth_hash_function algorithm;
1284 	struct rte_eth_dev *dev;
1285 	struct rte_eth_dev_info dev_info;
1286 	struct rte_eth_conf orig_conf;
1287 	int diag;
1288 	int ret;
1289 	uint16_t old_mtu;
1290 
1291 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1292 	dev = &rte_eth_devices[port_id];
1293 
1294 	if (dev_conf == NULL) {
1295 		RTE_ETHDEV_LOG_LINE(ERR,
1296 			"Cannot configure ethdev port %u from NULL config",
1297 			port_id);
1298 		return -EINVAL;
1299 	}
1300 
1301 	if (*dev->dev_ops->dev_configure == NULL)
1302 		return -ENOTSUP;
1303 
1304 	if (dev->data->dev_started) {
1305 		RTE_ETHDEV_LOG_LINE(ERR,
1306 			"Port %u must be stopped to allow configuration",
1307 			port_id);
1308 		return -EBUSY;
1309 	}
1310 
1311 	/*
1312 	 * Ensure that "dev_configured" is always 0 each time prepare to do
1313 	 * dev_configure() to avoid any non-anticipated behaviour.
1314 	 * And set to 1 when dev_configure() is executed successfully.
1315 	 */
1316 	dev->data->dev_configured = 0;
1317 
1318 	 /* Store original config, as rollback required on failure */
1319 	memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1320 
1321 	/*
1322 	 * Copy the dev_conf parameter into the dev structure.
1323 	 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1324 	 */
1325 	if (dev_conf != &dev->data->dev_conf)
1326 		memcpy(&dev->data->dev_conf, dev_conf,
1327 		       sizeof(dev->data->dev_conf));
1328 
1329 	/* Backup mtu for rollback */
1330 	old_mtu = dev->data->mtu;
1331 
1332 	/* fields must be zero to reserve them for future ABI changes */
1333 	if (dev_conf->rxmode.reserved_64s[0] != 0 ||
1334 	    dev_conf->rxmode.reserved_64s[1] != 0 ||
1335 	    dev_conf->rxmode.reserved_ptrs[0] != NULL ||
1336 	    dev_conf->rxmode.reserved_ptrs[1] != NULL) {
1337 		RTE_ETHDEV_LOG_LINE(ERR, "Rxmode reserved fields not zero");
1338 		ret = -EINVAL;
1339 		goto rollback;
1340 	}
1341 
1342 	if (dev_conf->txmode.reserved_64s[0] != 0 ||
1343 	    dev_conf->txmode.reserved_64s[1] != 0 ||
1344 	    dev_conf->txmode.reserved_ptrs[0] != NULL ||
1345 	    dev_conf->txmode.reserved_ptrs[1] != NULL) {
1346 		RTE_ETHDEV_LOG_LINE(ERR, "txmode reserved fields not zero");
1347 		ret = -EINVAL;
1348 		goto rollback;
1349 	}
1350 
1351 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1352 	if (ret != 0)
1353 		goto rollback;
1354 
1355 	/* If number of queues specified by application for both Rx and Tx is
1356 	 * zero, use driver preferred values. This cannot be done individually
1357 	 * as it is valid for either Tx or Rx (but not both) to be zero.
1358 	 * If driver does not provide any preferred valued, fall back on
1359 	 * EAL defaults.
1360 	 */
1361 	if (nb_rx_q == 0 && nb_tx_q == 0) {
1362 		nb_rx_q = dev_info.default_rxportconf.nb_queues;
1363 		if (nb_rx_q == 0)
1364 			nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1365 		nb_tx_q = dev_info.default_txportconf.nb_queues;
1366 		if (nb_tx_q == 0)
1367 			nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1368 	}
1369 
1370 	if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1371 		RTE_ETHDEV_LOG_LINE(ERR,
1372 			"Number of Rx queues requested (%u) is greater than max supported(%d)",
1373 			nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1374 		ret = -EINVAL;
1375 		goto rollback;
1376 	}
1377 
1378 	if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1379 		RTE_ETHDEV_LOG_LINE(ERR,
1380 			"Number of Tx queues requested (%u) is greater than max supported(%d)",
1381 			nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1382 		ret = -EINVAL;
1383 		goto rollback;
1384 	}
1385 
1386 	/*
1387 	 * Check that the numbers of Rx and Tx queues are not greater
1388 	 * than the maximum number of Rx and Tx queues supported by the
1389 	 * configured device.
1390 	 */
1391 	if (nb_rx_q > dev_info.max_rx_queues) {
1392 		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u",
1393 			port_id, nb_rx_q, dev_info.max_rx_queues);
1394 		ret = -EINVAL;
1395 		goto rollback;
1396 	}
1397 
1398 	if (nb_tx_q > dev_info.max_tx_queues) {
1399 		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u",
1400 			port_id, nb_tx_q, dev_info.max_tx_queues);
1401 		ret = -EINVAL;
1402 		goto rollback;
1403 	}
1404 
1405 	/* Check that the device supports requested interrupts */
1406 	if ((dev_conf->intr_conf.lsc == 1) &&
1407 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1408 		RTE_ETHDEV_LOG_LINE(ERR, "Driver %s does not support lsc",
1409 			dev->device->driver->name);
1410 		ret = -EINVAL;
1411 		goto rollback;
1412 	}
1413 	if ((dev_conf->intr_conf.rmv == 1) &&
1414 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1415 		RTE_ETHDEV_LOG_LINE(ERR, "Driver %s does not support rmv",
1416 			dev->device->driver->name);
1417 		ret = -EINVAL;
1418 		goto rollback;
1419 	}
1420 
1421 	if (dev_conf->rxmode.mtu == 0)
1422 		dev->data->dev_conf.rxmode.mtu =
1423 			(dev_info.max_mtu == 0) ? RTE_ETHER_MTU :
1424 			RTE_MIN(dev_info.max_mtu, RTE_ETHER_MTU);
1425 
1426 	ret = eth_dev_validate_mtu(port_id, &dev_info,
1427 			dev->data->dev_conf.rxmode.mtu);
1428 	if (ret != 0)
1429 		goto rollback;
1430 
1431 	dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
1432 
1433 	/*
1434 	 * If LRO is enabled, check that the maximum aggregated packet
1435 	 * size is supported by the configured device.
1436 	 */
1437 	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1438 		uint32_t max_rx_pktlen;
1439 		uint32_t overhead_len;
1440 
1441 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1442 				dev_info.max_mtu);
1443 		max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
1444 		if (dev_conf->rxmode.max_lro_pkt_size == 0)
1445 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1446 		ret = eth_dev_check_lro_pkt_size(port_id,
1447 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
1448 				max_rx_pktlen,
1449 				dev_info.max_lro_pkt_size);
1450 		if (ret != 0)
1451 			goto rollback;
1452 	}
1453 
1454 	/* Any requested offloading must be within its device capabilities */
1455 	if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1456 	     dev_conf->rxmode.offloads) {
1457 		char buffer[512];
1458 
1459 		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u does not support Rx offloads %s",
1460 			port_id, eth_dev_offload_names(
1461 			dev_conf->rxmode.offloads & ~dev_info.rx_offload_capa,
1462 			buffer, sizeof(buffer), rte_eth_dev_rx_offload_name));
1463 		RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u was requested Rx offloads %s",
1464 			port_id, eth_dev_offload_names(dev_conf->rxmode.offloads,
1465 			buffer, sizeof(buffer), rte_eth_dev_rx_offload_name));
1466 		RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u supports Rx offloads %s",
1467 			port_id, eth_dev_offload_names(dev_info.rx_offload_capa,
1468 			buffer, sizeof(buffer), rte_eth_dev_rx_offload_name));
1469 
1470 		ret = -EINVAL;
1471 		goto rollback;
1472 	}
1473 	if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1474 	     dev_conf->txmode.offloads) {
1475 		char buffer[512];
1476 
1477 		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u does not support Tx offloads %s",
1478 			port_id, eth_dev_offload_names(
1479 			dev_conf->txmode.offloads & ~dev_info.tx_offload_capa,
1480 			buffer, sizeof(buffer), rte_eth_dev_tx_offload_name));
1481 		RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u was requested Tx offloads %s",
1482 			port_id, eth_dev_offload_names(dev_conf->txmode.offloads,
1483 			buffer, sizeof(buffer), rte_eth_dev_tx_offload_name));
1484 		RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u supports Tx offloads %s",
1485 			port_id, eth_dev_offload_names(dev_info.tx_offload_capa,
1486 			buffer, sizeof(buffer), rte_eth_dev_tx_offload_name));
1487 		ret = -EINVAL;
1488 		goto rollback;
1489 	}
1490 
1491 	dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1492 		rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1493 
1494 	/* Check that device supports requested rss hash functions. */
1495 	if ((dev_info.flow_type_rss_offloads |
1496 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1497 	    dev_info.flow_type_rss_offloads) {
1498 		RTE_ETHDEV_LOG_LINE(ERR,
1499 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64,
1500 			port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1501 			dev_info.flow_type_rss_offloads);
1502 		ret = -EINVAL;
1503 		goto rollback;
1504 	}
1505 
1506 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1507 	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
1508 	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
1509 		RTE_ETHDEV_LOG_LINE(ERR,
1510 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested",
1511 			port_id,
1512 			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
1513 		ret = -EINVAL;
1514 		goto rollback;
1515 	}
1516 
1517 	if (dev_conf->rx_adv_conf.rss_conf.rss_key != NULL &&
1518 	    dev_conf->rx_adv_conf.rss_conf.rss_key_len != dev_info.hash_key_size) {
1519 		RTE_ETHDEV_LOG_LINE(ERR,
1520 			"Ethdev port_id=%u invalid RSS key len: %u, valid value: %u",
1521 			port_id, dev_conf->rx_adv_conf.rss_conf.rss_key_len,
1522 			dev_info.hash_key_size);
1523 		ret = -EINVAL;
1524 		goto rollback;
1525 	}
1526 
1527 	algorithm = dev_conf->rx_adv_conf.rss_conf.algorithm;
1528 	if ((size_t)algorithm >= CHAR_BIT * sizeof(dev_info.rss_algo_capa) ||
1529 	    (dev_info.rss_algo_capa & RTE_ETH_HASH_ALGO_TO_CAPA(algorithm)) == 0) {
1530 		RTE_ETHDEV_LOG_LINE(ERR,
1531 			"Ethdev port_id=%u configured RSS hash algorithm (%u)"
1532 			"is not in the algorithm capability (0x%" PRIx32 ")",
1533 			port_id, algorithm, dev_info.rss_algo_capa);
1534 		ret = -EINVAL;
1535 		goto rollback;
1536 	}
1537 
1538 	/*
1539 	 * Setup new number of Rx/Tx queues and reconfigure device.
1540 	 */
1541 	diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1542 	if (diag != 0) {
1543 		RTE_ETHDEV_LOG_LINE(ERR,
1544 			"Port%u eth_dev_rx_queue_config = %d",
1545 			port_id, diag);
1546 		ret = diag;
1547 		goto rollback;
1548 	}
1549 
1550 	diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1551 	if (diag != 0) {
1552 		RTE_ETHDEV_LOG_LINE(ERR,
1553 			"Port%u eth_dev_tx_queue_config = %d",
1554 			port_id, diag);
1555 		eth_dev_rx_queue_config(dev, 0);
1556 		ret = diag;
1557 		goto rollback;
1558 	}
1559 
1560 	diag = (*dev->dev_ops->dev_configure)(dev);
1561 	if (diag != 0) {
1562 		RTE_ETHDEV_LOG_LINE(ERR, "Port%u dev_configure = %d",
1563 			port_id, diag);
1564 		ret = eth_err(port_id, diag);
1565 		goto reset_queues;
1566 	}
1567 
1568 	/* Initialize Rx profiling if enabled at compilation time. */
1569 	diag = __rte_eth_dev_profile_init(port_id, dev);
1570 	if (diag != 0) {
1571 		RTE_ETHDEV_LOG_LINE(ERR, "Port%u __rte_eth_dev_profile_init = %d",
1572 			port_id, diag);
1573 		ret = eth_err(port_id, diag);
1574 		goto reset_queues;
1575 	}
1576 
1577 	/* Validate Rx offloads. */
1578 	diag = eth_dev_validate_offloads(port_id,
1579 			dev_conf->rxmode.offloads,
1580 			dev->data->dev_conf.rxmode.offloads, "Rx",
1581 			rte_eth_dev_rx_offload_name);
1582 	if (diag != 0) {
1583 		ret = diag;
1584 		goto reset_queues;
1585 	}
1586 
1587 	/* Validate Tx offloads. */
1588 	diag = eth_dev_validate_offloads(port_id,
1589 			dev_conf->txmode.offloads,
1590 			dev->data->dev_conf.txmode.offloads, "Tx",
1591 			rte_eth_dev_tx_offload_name);
1592 	if (diag != 0) {
1593 		ret = diag;
1594 		goto reset_queues;
1595 	}
1596 
1597 	dev->data->dev_configured = 1;
1598 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1599 	return 0;
1600 reset_queues:
1601 	eth_dev_rx_queue_config(dev, 0);
1602 	eth_dev_tx_queue_config(dev, 0);
1603 rollback:
1604 	memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1605 	if (old_mtu != dev->data->mtu)
1606 		dev->data->mtu = old_mtu;
1607 
1608 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1609 	return ret;
1610 }
1611 
1612 static void
1613 eth_dev_mac_restore(struct rte_eth_dev *dev,
1614 			struct rte_eth_dev_info *dev_info)
1615 {
1616 	struct rte_ether_addr *addr;
1617 	uint16_t i;
1618 	uint32_t pool = 0;
1619 	uint64_t pool_mask;
1620 
1621 	/* replay MAC address configuration including default MAC */
1622 	addr = &dev->data->mac_addrs[0];
1623 	if (*dev->dev_ops->mac_addr_set != NULL)
1624 		(*dev->dev_ops->mac_addr_set)(dev, addr);
1625 	else if (*dev->dev_ops->mac_addr_add != NULL)
1626 		(*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1627 
1628 	if (*dev->dev_ops->mac_addr_add != NULL) {
1629 		for (i = 1; i < dev_info->max_mac_addrs; i++) {
1630 			addr = &dev->data->mac_addrs[i];
1631 
1632 			/* skip zero address */
1633 			if (rte_is_zero_ether_addr(addr))
1634 				continue;
1635 
1636 			pool = 0;
1637 			pool_mask = dev->data->mac_pool_sel[i];
1638 
1639 			do {
1640 				if (pool_mask & UINT64_C(1))
1641 					(*dev->dev_ops->mac_addr_add)(dev,
1642 						addr, i, pool);
1643 				pool_mask >>= 1;
1644 				pool++;
1645 			} while (pool_mask);
1646 		}
1647 	}
1648 }
1649 
1650 static int
1651 eth_dev_config_restore(struct rte_eth_dev *dev,
1652 		struct rte_eth_dev_info *dev_info, uint16_t port_id)
1653 {
1654 	int ret;
1655 
1656 	if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1657 		eth_dev_mac_restore(dev, dev_info);
1658 
1659 	/* replay promiscuous configuration */
1660 	/*
1661 	 * use callbacks directly since we don't need port_id check and
1662 	 * would like to bypass the same value set
1663 	 */
1664 	if (rte_eth_promiscuous_get(port_id) == 1 &&
1665 	    *dev->dev_ops->promiscuous_enable != NULL) {
1666 		ret = eth_err(port_id,
1667 			      (*dev->dev_ops->promiscuous_enable)(dev));
1668 		if (ret != 0 && ret != -ENOTSUP) {
1669 			RTE_ETHDEV_LOG_LINE(ERR,
1670 				"Failed to enable promiscuous mode for device (port %u): %s",
1671 				port_id, rte_strerror(-ret));
1672 			return ret;
1673 		}
1674 	} else if (rte_eth_promiscuous_get(port_id) == 0 &&
1675 		   *dev->dev_ops->promiscuous_disable != NULL) {
1676 		ret = eth_err(port_id,
1677 			      (*dev->dev_ops->promiscuous_disable)(dev));
1678 		if (ret != 0 && ret != -ENOTSUP) {
1679 			RTE_ETHDEV_LOG_LINE(ERR,
1680 				"Failed to disable promiscuous mode for device (port %u): %s",
1681 				port_id, rte_strerror(-ret));
1682 			return ret;
1683 		}
1684 	}
1685 
1686 	/* replay all multicast configuration */
1687 	/*
1688 	 * use callbacks directly since we don't need port_id check and
1689 	 * would like to bypass the same value set
1690 	 */
1691 	if (rte_eth_allmulticast_get(port_id) == 1 &&
1692 	    *dev->dev_ops->allmulticast_enable != NULL) {
1693 		ret = eth_err(port_id,
1694 			      (*dev->dev_ops->allmulticast_enable)(dev));
1695 		if (ret != 0 && ret != -ENOTSUP) {
1696 			RTE_ETHDEV_LOG_LINE(ERR,
1697 				"Failed to enable allmulticast mode for device (port %u): %s",
1698 				port_id, rte_strerror(-ret));
1699 			return ret;
1700 		}
1701 	} else if (rte_eth_allmulticast_get(port_id) == 0 &&
1702 		   *dev->dev_ops->allmulticast_disable != NULL) {
1703 		ret = eth_err(port_id,
1704 			      (*dev->dev_ops->allmulticast_disable)(dev));
1705 		if (ret != 0 && ret != -ENOTSUP) {
1706 			RTE_ETHDEV_LOG_LINE(ERR,
1707 				"Failed to disable allmulticast mode for device (port %u): %s",
1708 				port_id, rte_strerror(-ret));
1709 			return ret;
1710 		}
1711 	}
1712 
1713 	return 0;
1714 }
1715 
1716 int
1717 rte_eth_dev_start(uint16_t port_id)
1718 {
1719 	struct rte_eth_dev *dev;
1720 	struct rte_eth_dev_info dev_info;
1721 	int diag;
1722 	int ret, ret_stop;
1723 
1724 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1725 	dev = &rte_eth_devices[port_id];
1726 
1727 	if (*dev->dev_ops->dev_start == NULL)
1728 		return -ENOTSUP;
1729 
1730 	if (dev->data->dev_configured == 0) {
1731 		RTE_ETHDEV_LOG_LINE(INFO,
1732 			"Device with port_id=%"PRIu16" is not configured.",
1733 			port_id);
1734 		return -EINVAL;
1735 	}
1736 
1737 	if (dev->data->dev_started != 0) {
1738 		RTE_ETHDEV_LOG_LINE(INFO,
1739 			"Device with port_id=%"PRIu16" already started",
1740 			port_id);
1741 		return 0;
1742 	}
1743 
1744 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1745 	if (ret != 0)
1746 		return ret;
1747 
1748 	/* Lets restore MAC now if device does not support live change */
1749 	if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1750 		eth_dev_mac_restore(dev, &dev_info);
1751 
1752 	diag = (*dev->dev_ops->dev_start)(dev);
1753 	if (diag == 0)
1754 		dev->data->dev_started = 1;
1755 	else
1756 		return eth_err(port_id, diag);
1757 
1758 	ret = eth_dev_config_restore(dev, &dev_info, port_id);
1759 	if (ret != 0) {
1760 		RTE_ETHDEV_LOG_LINE(ERR,
1761 			"Error during restoring configuration for device (port %u): %s",
1762 			port_id, rte_strerror(-ret));
1763 		ret_stop = rte_eth_dev_stop(port_id);
1764 		if (ret_stop != 0) {
1765 			RTE_ETHDEV_LOG_LINE(ERR,
1766 				"Failed to stop device (port %u): %s",
1767 				port_id, rte_strerror(-ret_stop));
1768 		}
1769 
1770 		return ret;
1771 	}
1772 
1773 	if (dev->data->dev_conf.intr_conf.lsc == 0) {
1774 		if (*dev->dev_ops->link_update == NULL)
1775 			return -ENOTSUP;
1776 		(*dev->dev_ops->link_update)(dev, 0);
1777 	}
1778 
1779 	/* expose selection of PMD fast-path functions */
1780 	eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev);
1781 
1782 	rte_ethdev_trace_start(port_id);
1783 	return 0;
1784 }
1785 
1786 int
1787 rte_eth_dev_stop(uint16_t port_id)
1788 {
1789 	struct rte_eth_dev *dev;
1790 	int ret;
1791 
1792 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1793 	dev = &rte_eth_devices[port_id];
1794 
1795 	if (*dev->dev_ops->dev_stop == NULL)
1796 		return -ENOTSUP;
1797 
1798 	if (dev->data->dev_started == 0) {
1799 		RTE_ETHDEV_LOG_LINE(INFO,
1800 			"Device with port_id=%"PRIu16" already stopped",
1801 			port_id);
1802 		return 0;
1803 	}
1804 
1805 	/* point fast-path functions to dummy ones */
1806 	eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id);
1807 
1808 	ret = (*dev->dev_ops->dev_stop)(dev);
1809 	if (ret == 0)
1810 		dev->data->dev_started = 0;
1811 	rte_ethdev_trace_stop(port_id, ret);
1812 
1813 	return ret;
1814 }
1815 
1816 int
1817 rte_eth_dev_set_link_up(uint16_t port_id)
1818 {
1819 	struct rte_eth_dev *dev;
1820 	int ret;
1821 
1822 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1823 	dev = &rte_eth_devices[port_id];
1824 
1825 	if (*dev->dev_ops->dev_set_link_up == NULL)
1826 		return -ENOTSUP;
1827 	ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1828 
1829 	rte_ethdev_trace_set_link_up(port_id, ret);
1830 
1831 	return ret;
1832 }
1833 
1834 int
1835 rte_eth_dev_set_link_down(uint16_t port_id)
1836 {
1837 	struct rte_eth_dev *dev;
1838 	int ret;
1839 
1840 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1841 	dev = &rte_eth_devices[port_id];
1842 
1843 	if (*dev->dev_ops->dev_set_link_down == NULL)
1844 		return -ENOTSUP;
1845 	ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1846 
1847 	rte_ethdev_trace_set_link_down(port_id, ret);
1848 
1849 	return ret;
1850 }
1851 
1852 int
1853 rte_eth_dev_close(uint16_t port_id)
1854 {
1855 	struct rte_eth_dev *dev;
1856 	int firsterr, binerr;
1857 	int *lasterr = &firsterr;
1858 
1859 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1860 	dev = &rte_eth_devices[port_id];
1861 
1862 	/*
1863 	 * Secondary process needs to close device to release process private
1864 	 * resources. But secondary process should not be obliged to wait
1865 	 * for device stop before closing ethdev.
1866 	 */
1867 	if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
1868 			dev->data->dev_started) {
1869 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot close started device (port %u)",
1870 			       port_id);
1871 		return -EINVAL;
1872 	}
1873 
1874 	if (*dev->dev_ops->dev_close == NULL)
1875 		return -ENOTSUP;
1876 	*lasterr = (*dev->dev_ops->dev_close)(dev);
1877 	if (*lasterr != 0)
1878 		lasterr = &binerr;
1879 
1880 	rte_ethdev_trace_close(port_id);
1881 	*lasterr = rte_eth_dev_release_port(dev);
1882 
1883 	return firsterr;
1884 }
1885 
1886 int
1887 rte_eth_dev_reset(uint16_t port_id)
1888 {
1889 	struct rte_eth_dev *dev;
1890 	int ret;
1891 
1892 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1893 	dev = &rte_eth_devices[port_id];
1894 
1895 	if (*dev->dev_ops->dev_reset == NULL)
1896 		return -ENOTSUP;
1897 
1898 	ret = rte_eth_dev_stop(port_id);
1899 	if (ret != 0) {
1900 		RTE_ETHDEV_LOG_LINE(ERR,
1901 			"Failed to stop device (port %u) before reset: %s - ignore",
1902 			port_id, rte_strerror(-ret));
1903 	}
1904 	ret = eth_err(port_id, dev->dev_ops->dev_reset(dev));
1905 
1906 	rte_ethdev_trace_reset(port_id, ret);
1907 
1908 	return ret;
1909 }
1910 
1911 int
1912 rte_eth_dev_is_removed(uint16_t port_id)
1913 {
1914 	struct rte_eth_dev *dev;
1915 	int ret;
1916 
1917 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1918 	dev = &rte_eth_devices[port_id];
1919 
1920 	if (dev->state == RTE_ETH_DEV_REMOVED)
1921 		return 1;
1922 
1923 	if (*dev->dev_ops->is_removed == NULL)
1924 		return 0;
1925 
1926 	ret = dev->dev_ops->is_removed(dev);
1927 	if (ret != 0)
1928 		/* Device is physically removed. */
1929 		dev->state = RTE_ETH_DEV_REMOVED;
1930 
1931 	rte_ethdev_trace_is_removed(port_id, ret);
1932 
1933 	return ret;
1934 }
1935 
1936 static int
1937 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset,
1938 			 uint16_t min_length)
1939 {
1940 	uint16_t data_room_size;
1941 
1942 	/*
1943 	 * Check the size of the mbuf data buffer, this value
1944 	 * must be provided in the private data of the memory pool.
1945 	 * First check that the memory pool(s) has a valid private data.
1946 	 */
1947 	if (mp->private_data_size <
1948 			sizeof(struct rte_pktmbuf_pool_private)) {
1949 		RTE_ETHDEV_LOG_LINE(ERR, "%s private_data_size %u < %u",
1950 			mp->name, mp->private_data_size,
1951 			(unsigned int)
1952 			sizeof(struct rte_pktmbuf_pool_private));
1953 		return -ENOSPC;
1954 	}
1955 	data_room_size = rte_pktmbuf_data_room_size(mp);
1956 	if (data_room_size < offset + min_length) {
1957 		RTE_ETHDEV_LOG_LINE(ERR,
1958 			       "%s mbuf_data_room_size %u < %u (%u + %u)",
1959 			       mp->name, data_room_size,
1960 			       offset + min_length, offset, min_length);
1961 		return -EINVAL;
1962 	}
1963 	return 0;
1964 }
1965 
1966 static int
1967 eth_dev_buffer_split_get_supported_hdrs_helper(uint16_t port_id, uint32_t **ptypes)
1968 {
1969 	int cnt;
1970 
1971 	cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, NULL, 0);
1972 	if (cnt <= 0)
1973 		return cnt;
1974 
1975 	*ptypes = malloc(sizeof(uint32_t) * cnt);
1976 	if (*ptypes == NULL)
1977 		return -ENOMEM;
1978 
1979 	cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, *ptypes, cnt);
1980 	if (cnt <= 0) {
1981 		free(*ptypes);
1982 		*ptypes = NULL;
1983 	}
1984 	return cnt;
1985 }
1986 
1987 static int
1988 rte_eth_rx_queue_check_split(uint16_t port_id,
1989 			const struct rte_eth_rxseg_split *rx_seg,
1990 			uint16_t n_seg, uint32_t *mbp_buf_size,
1991 			const struct rte_eth_dev_info *dev_info)
1992 {
1993 	const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1994 	struct rte_mempool *mp_first;
1995 	uint32_t offset_mask;
1996 	uint16_t seg_idx;
1997 	int ret = 0;
1998 	int ptype_cnt;
1999 	uint32_t *ptypes;
2000 	uint32_t prev_proto_hdrs = RTE_PTYPE_UNKNOWN;
2001 	int i;
2002 
2003 	if (n_seg > seg_capa->max_nseg) {
2004 		RTE_ETHDEV_LOG_LINE(ERR,
2005 			       "Requested Rx segments %u exceed supported %u",
2006 			       n_seg, seg_capa->max_nseg);
2007 		return -EINVAL;
2008 	}
2009 	/*
2010 	 * Check the sizes and offsets against buffer sizes
2011 	 * for each segment specified in extended configuration.
2012 	 */
2013 	mp_first = rx_seg[0].mp;
2014 	offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1;
2015 
2016 	ptypes = NULL;
2017 	ptype_cnt = eth_dev_buffer_split_get_supported_hdrs_helper(port_id, &ptypes);
2018 
2019 	for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
2020 		struct rte_mempool *mpl = rx_seg[seg_idx].mp;
2021 		uint32_t length = rx_seg[seg_idx].length;
2022 		uint32_t offset = rx_seg[seg_idx].offset;
2023 		uint32_t proto_hdr = rx_seg[seg_idx].proto_hdr;
2024 
2025 		if (mpl == NULL) {
2026 			RTE_ETHDEV_LOG_LINE(ERR, "null mempool pointer");
2027 			ret = -EINVAL;
2028 			goto out;
2029 		}
2030 		if (seg_idx != 0 && mp_first != mpl &&
2031 		    seg_capa->multi_pools == 0) {
2032 			RTE_ETHDEV_LOG_LINE(ERR, "Receiving to multiple pools is not supported");
2033 			ret = -ENOTSUP;
2034 			goto out;
2035 		}
2036 		if (offset != 0) {
2037 			if (seg_capa->offset_allowed == 0) {
2038 				RTE_ETHDEV_LOG_LINE(ERR, "Rx segmentation with offset is not supported");
2039 				ret = -ENOTSUP;
2040 				goto out;
2041 			}
2042 			if (offset & offset_mask) {
2043 				RTE_ETHDEV_LOG_LINE(ERR, "Rx segmentation invalid offset alignment %u, %u",
2044 					       offset,
2045 					       seg_capa->offset_align_log2);
2046 				ret = -EINVAL;
2047 				goto out;
2048 			}
2049 		}
2050 
2051 		offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
2052 		*mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
2053 		if (proto_hdr != 0) {
2054 			/* Split based on protocol headers. */
2055 			if (length != 0) {
2056 				RTE_ETHDEV_LOG_LINE(ERR,
2057 					"Do not set length split and protocol split within a segment"
2058 					);
2059 				ret = -EINVAL;
2060 				goto out;
2061 			}
2062 			if ((proto_hdr & prev_proto_hdrs) != 0) {
2063 				RTE_ETHDEV_LOG_LINE(ERR,
2064 					"Repeat with previous protocol headers or proto-split after length-based split"
2065 					);
2066 				ret = -EINVAL;
2067 				goto out;
2068 			}
2069 			if (ptype_cnt <= 0) {
2070 				RTE_ETHDEV_LOG_LINE(ERR,
2071 					"Port %u failed to get supported buffer split header protocols",
2072 					port_id);
2073 				ret = -ENOTSUP;
2074 				goto out;
2075 			}
2076 			for (i = 0; i < ptype_cnt; i++) {
2077 				if ((prev_proto_hdrs | proto_hdr) == ptypes[i])
2078 					break;
2079 			}
2080 			if (i == ptype_cnt) {
2081 				RTE_ETHDEV_LOG_LINE(ERR,
2082 					"Requested Rx split header protocols 0x%x is not supported.",
2083 					proto_hdr);
2084 				ret = -EINVAL;
2085 				goto out;
2086 			}
2087 			prev_proto_hdrs |= proto_hdr;
2088 		} else {
2089 			/* Split at fixed length. */
2090 			length = length != 0 ? length : *mbp_buf_size;
2091 			prev_proto_hdrs = RTE_PTYPE_ALL_MASK;
2092 		}
2093 
2094 		ret = rte_eth_check_rx_mempool(mpl, offset, length);
2095 		if (ret != 0)
2096 			goto out;
2097 	}
2098 out:
2099 	free(ptypes);
2100 	return ret;
2101 }
2102 
2103 static int
2104 rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools,
2105 			       uint16_t n_mempools, uint32_t *min_buf_size,
2106 			       const struct rte_eth_dev_info *dev_info)
2107 {
2108 	uint16_t pool_idx;
2109 	int ret;
2110 
2111 	if (n_mempools > dev_info->max_rx_mempools) {
2112 		RTE_ETHDEV_LOG_LINE(ERR,
2113 			       "Too many Rx mempools %u vs maximum %u",
2114 			       n_mempools, dev_info->max_rx_mempools);
2115 		return -EINVAL;
2116 	}
2117 
2118 	for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) {
2119 		struct rte_mempool *mp = rx_mempools[pool_idx];
2120 
2121 		if (mp == NULL) {
2122 			RTE_ETHDEV_LOG_LINE(ERR, "null Rx mempool pointer");
2123 			return -EINVAL;
2124 		}
2125 
2126 		ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM,
2127 					       dev_info->min_rx_bufsize);
2128 		if (ret != 0)
2129 			return ret;
2130 
2131 		*min_buf_size = RTE_MIN(*min_buf_size,
2132 					rte_pktmbuf_data_room_size(mp));
2133 	}
2134 
2135 	return 0;
2136 }
2137 
2138 int
2139 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2140 		       uint16_t nb_rx_desc, unsigned int socket_id,
2141 		       const struct rte_eth_rxconf *rx_conf,
2142 		       struct rte_mempool *mp)
2143 {
2144 	int ret;
2145 	uint64_t rx_offloads;
2146 	uint32_t mbp_buf_size = UINT32_MAX;
2147 	struct rte_eth_dev *dev;
2148 	struct rte_eth_dev_info dev_info;
2149 	struct rte_eth_rxconf local_conf;
2150 	uint32_t buf_data_size;
2151 
2152 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2153 	dev = &rte_eth_devices[port_id];
2154 
2155 	if (rx_queue_id >= dev->data->nb_rx_queues) {
2156 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", rx_queue_id);
2157 		return -EINVAL;
2158 	}
2159 
2160 	if (*dev->dev_ops->rx_queue_setup == NULL)
2161 		return -ENOTSUP;
2162 
2163 	if (rx_conf != NULL &&
2164 	   (rx_conf->reserved_64s[0] != 0 ||
2165 	    rx_conf->reserved_64s[1] != 0 ||
2166 	    rx_conf->reserved_ptrs[0] != NULL ||
2167 	    rx_conf->reserved_ptrs[1] != NULL)) {
2168 		RTE_ETHDEV_LOG_LINE(ERR, "Rx conf reserved fields not zero");
2169 		return -EINVAL;
2170 	}
2171 
2172 	ret = rte_eth_dev_info_get(port_id, &dev_info);
2173 	if (ret != 0)
2174 		return ret;
2175 
2176 	rx_offloads = dev->data->dev_conf.rxmode.offloads;
2177 	if (rx_conf != NULL)
2178 		rx_offloads |= rx_conf->offloads;
2179 
2180 	/* Ensure that we have one and only one source of Rx buffers */
2181 	if ((mp != NULL) +
2182 	    (rx_conf != NULL && rx_conf->rx_nseg > 0) +
2183 	    (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) {
2184 		RTE_ETHDEV_LOG_LINE(ERR,
2185 			       "Ambiguous Rx mempools configuration");
2186 		return -EINVAL;
2187 	}
2188 
2189 	if (mp != NULL) {
2190 		/* Single pool configuration check. */
2191 		ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM,
2192 					       dev_info.min_rx_bufsize);
2193 		if (ret != 0)
2194 			return ret;
2195 
2196 		mbp_buf_size = rte_pktmbuf_data_room_size(mp);
2197 		buf_data_size = mbp_buf_size - RTE_PKTMBUF_HEADROOM;
2198 		if (buf_data_size > dev_info.max_rx_bufsize)
2199 			RTE_ETHDEV_LOG_LINE(DEBUG,
2200 				"For port_id=%u, the mbuf data buffer size (%u) is bigger than "
2201 				"max buffer size (%u) device can utilize, so mbuf size can be reduced.",
2202 				port_id, buf_data_size, dev_info.max_rx_bufsize);
2203 	} else if (rx_conf != NULL && rx_conf->rx_nseg > 0) {
2204 		const struct rte_eth_rxseg_split *rx_seg;
2205 		uint16_t n_seg;
2206 
2207 		/* Extended multi-segment configuration check. */
2208 		if (rx_conf->rx_seg == NULL) {
2209 			RTE_ETHDEV_LOG_LINE(ERR,
2210 				       "Memory pool is null and no multi-segment configuration provided");
2211 			return -EINVAL;
2212 		}
2213 
2214 		rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
2215 		n_seg = rx_conf->rx_nseg;
2216 
2217 		if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
2218 			ret = rte_eth_rx_queue_check_split(port_id, rx_seg, n_seg,
2219 							   &mbp_buf_size,
2220 							   &dev_info);
2221 			if (ret != 0)
2222 				return ret;
2223 		} else {
2224 			RTE_ETHDEV_LOG_LINE(ERR, "No Rx segmentation offload configured");
2225 			return -EINVAL;
2226 		}
2227 	} else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) {
2228 		/* Extended multi-pool configuration check. */
2229 		if (rx_conf->rx_mempools == NULL) {
2230 			RTE_ETHDEV_LOG_LINE(ERR, "Memory pools array is null");
2231 			return -EINVAL;
2232 		}
2233 
2234 		ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools,
2235 						     rx_conf->rx_nmempool,
2236 						     &mbp_buf_size,
2237 						     &dev_info);
2238 		if (ret != 0)
2239 			return ret;
2240 	} else {
2241 		RTE_ETHDEV_LOG_LINE(ERR, "Missing Rx mempool configuration");
2242 		return -EINVAL;
2243 	}
2244 
2245 	/* Use default specified by driver, if nb_rx_desc is zero */
2246 	if (nb_rx_desc == 0) {
2247 		nb_rx_desc = dev_info.default_rxportconf.ring_size;
2248 		/* If driver default is also zero, fall back on EAL default */
2249 		if (nb_rx_desc == 0)
2250 			nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2251 	}
2252 
2253 	if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2254 			nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2255 			nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2256 
2257 		RTE_ETHDEV_LOG_LINE(ERR,
2258 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu",
2259 			nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2260 			dev_info.rx_desc_lim.nb_min,
2261 			dev_info.rx_desc_lim.nb_align);
2262 		return -EINVAL;
2263 	}
2264 
2265 	if (dev->data->dev_started &&
2266 		!(dev_info.dev_capa &
2267 			RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2268 		return -EBUSY;
2269 
2270 	if (dev->data->dev_started &&
2271 		(dev->data->rx_queue_state[rx_queue_id] !=
2272 			RTE_ETH_QUEUE_STATE_STOPPED))
2273 		return -EBUSY;
2274 
2275 	eth_dev_rxq_release(dev, rx_queue_id);
2276 
2277 	if (rx_conf == NULL)
2278 		rx_conf = &dev_info.default_rxconf;
2279 
2280 	local_conf = *rx_conf;
2281 
2282 	/*
2283 	 * If an offloading has already been enabled in
2284 	 * rte_eth_dev_configure(), it has been enabled on all queues,
2285 	 * so there is no need to enable it in this queue again.
2286 	 * The local_conf.offloads input to underlying PMD only carries
2287 	 * those offloadings which are only enabled on this queue and
2288 	 * not enabled on all queues.
2289 	 */
2290 	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2291 
2292 	/*
2293 	 * New added offloadings for this queue are those not enabled in
2294 	 * rte_eth_dev_configure() and they must be per-queue type.
2295 	 * A pure per-port offloading can't be enabled on a queue while
2296 	 * disabled on another queue. A pure per-port offloading can't
2297 	 * be enabled for any queue as new added one if it hasn't been
2298 	 * enabled in rte_eth_dev_configure().
2299 	 */
2300 	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2301 	     local_conf.offloads) {
2302 		RTE_ETHDEV_LOG_LINE(ERR,
2303 			"Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2304 			"within per-queue offload capabilities 0x%"PRIx64" in %s()",
2305 			port_id, rx_queue_id, local_conf.offloads,
2306 			dev_info.rx_queue_offload_capa,
2307 			__func__);
2308 		return -EINVAL;
2309 	}
2310 
2311 	if (local_conf.share_group > 0 &&
2312 	    (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {
2313 		RTE_ETHDEV_LOG_LINE(ERR,
2314 			"Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share",
2315 			port_id, rx_queue_id, local_conf.share_group);
2316 		return -EINVAL;
2317 	}
2318 
2319 	/*
2320 	 * If LRO is enabled, check that the maximum aggregated packet
2321 	 * size is supported by the configured device.
2322 	 */
2323 	/* Get the real Ethernet overhead length */
2324 	if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
2325 		uint32_t overhead_len;
2326 		uint32_t max_rx_pktlen;
2327 		int ret;
2328 
2329 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
2330 				dev_info.max_mtu);
2331 		max_rx_pktlen = dev->data->mtu + overhead_len;
2332 		if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2333 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
2334 		ret = eth_dev_check_lro_pkt_size(port_id,
2335 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
2336 				max_rx_pktlen,
2337 				dev_info.max_lro_pkt_size);
2338 		if (ret != 0)
2339 			return ret;
2340 	}
2341 
2342 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2343 					      socket_id, &local_conf, mp);
2344 	if (!ret) {
2345 		if (!dev->data->min_rx_buf_size ||
2346 		    dev->data->min_rx_buf_size > mbp_buf_size)
2347 			dev->data->min_rx_buf_size = mbp_buf_size;
2348 	}
2349 
2350 	rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2351 		rx_conf, ret);
2352 	return eth_err(port_id, ret);
2353 }
2354 
2355 int
2356 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2357 			       uint16_t nb_rx_desc,
2358 			       const struct rte_eth_hairpin_conf *conf)
2359 {
2360 	int ret;
2361 	struct rte_eth_dev *dev;
2362 	struct rte_eth_hairpin_cap cap;
2363 	int i;
2364 	int count;
2365 
2366 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2367 	dev = &rte_eth_devices[port_id];
2368 
2369 	if (rx_queue_id >= dev->data->nb_rx_queues) {
2370 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", rx_queue_id);
2371 		return -EINVAL;
2372 	}
2373 
2374 	if (conf == NULL) {
2375 		RTE_ETHDEV_LOG_LINE(ERR,
2376 			"Cannot setup ethdev port %u Rx hairpin queue from NULL config",
2377 			port_id);
2378 		return -EINVAL;
2379 	}
2380 
2381 	if (conf->reserved != 0) {
2382 		RTE_ETHDEV_LOG_LINE(ERR,
2383 			       "Rx hairpin reserved field not zero");
2384 		return -EINVAL;
2385 	}
2386 
2387 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2388 	if (ret != 0)
2389 		return ret;
2390 	if (*dev->dev_ops->rx_hairpin_queue_setup == NULL)
2391 		return -ENOTSUP;
2392 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2393 	if (nb_rx_desc == 0)
2394 		nb_rx_desc = cap.max_nb_desc;
2395 	if (nb_rx_desc > cap.max_nb_desc) {
2396 		RTE_ETHDEV_LOG_LINE(ERR,
2397 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2398 			nb_rx_desc, cap.max_nb_desc);
2399 		return -EINVAL;
2400 	}
2401 	if (conf->peer_count > cap.max_rx_2_tx) {
2402 		RTE_ETHDEV_LOG_LINE(ERR,
2403 			"Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2404 			conf->peer_count, cap.max_rx_2_tx);
2405 		return -EINVAL;
2406 	}
2407 	if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) {
2408 		RTE_ETHDEV_LOG_LINE(ERR,
2409 			"Attempt to use locked device memory for Rx queue, which is not supported");
2410 		return -EINVAL;
2411 	}
2412 	if (conf->use_rte_memory && !cap.rx_cap.rte_memory) {
2413 		RTE_ETHDEV_LOG_LINE(ERR,
2414 			"Attempt to use DPDK memory for Rx queue, which is not supported");
2415 		return -EINVAL;
2416 	}
2417 	if (conf->use_locked_device_memory && conf->use_rte_memory) {
2418 		RTE_ETHDEV_LOG_LINE(ERR,
2419 			"Attempt to use mutually exclusive memory settings for Rx queue");
2420 		return -EINVAL;
2421 	}
2422 	if (conf->force_memory &&
2423 	    !conf->use_locked_device_memory &&
2424 	    !conf->use_rte_memory) {
2425 		RTE_ETHDEV_LOG_LINE(ERR,
2426 			"Attempt to force Rx queue memory settings, but none is set");
2427 		return -EINVAL;
2428 	}
2429 	if (conf->peer_count == 0) {
2430 		RTE_ETHDEV_LOG_LINE(ERR,
2431 			"Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2432 			conf->peer_count);
2433 		return -EINVAL;
2434 	}
2435 	for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2436 	     cap.max_nb_queues != UINT16_MAX; i++) {
2437 		if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2438 			count++;
2439 	}
2440 	if (count > cap.max_nb_queues) {
2441 		RTE_ETHDEV_LOG_LINE(ERR, "To many Rx hairpin queues max is %d",
2442 		cap.max_nb_queues);
2443 		return -EINVAL;
2444 	}
2445 	if (dev->data->dev_started)
2446 		return -EBUSY;
2447 	eth_dev_rxq_release(dev, rx_queue_id);
2448 	ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2449 						      nb_rx_desc, conf);
2450 	if (ret == 0)
2451 		dev->data->rx_queue_state[rx_queue_id] =
2452 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2453 	ret = eth_err(port_id, ret);
2454 
2455 	rte_eth_trace_rx_hairpin_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2456 					     conf, ret);
2457 
2458 	return ret;
2459 }
2460 
2461 int
2462 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2463 		       uint16_t nb_tx_desc, unsigned int socket_id,
2464 		       const struct rte_eth_txconf *tx_conf)
2465 {
2466 	struct rte_eth_dev *dev;
2467 	struct rte_eth_dev_info dev_info;
2468 	struct rte_eth_txconf local_conf;
2469 	int ret;
2470 
2471 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2472 	dev = &rte_eth_devices[port_id];
2473 
2474 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2475 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", tx_queue_id);
2476 		return -EINVAL;
2477 	}
2478 
2479 	if (*dev->dev_ops->tx_queue_setup == NULL)
2480 		return -ENOTSUP;
2481 
2482 	if (tx_conf != NULL &&
2483 	   (tx_conf->reserved_64s[0] != 0 ||
2484 	    tx_conf->reserved_64s[1] != 0 ||
2485 	    tx_conf->reserved_ptrs[0] != NULL ||
2486 	    tx_conf->reserved_ptrs[1] != NULL)) {
2487 		RTE_ETHDEV_LOG_LINE(ERR, "Tx conf reserved fields not zero");
2488 		return -EINVAL;
2489 	}
2490 
2491 	ret = rte_eth_dev_info_get(port_id, &dev_info);
2492 	if (ret != 0)
2493 		return ret;
2494 
2495 	/* Use default specified by driver, if nb_tx_desc is zero */
2496 	if (nb_tx_desc == 0) {
2497 		nb_tx_desc = dev_info.default_txportconf.ring_size;
2498 		/* If driver default is zero, fall back on EAL default */
2499 		if (nb_tx_desc == 0)
2500 			nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2501 	}
2502 	if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2503 	    nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2504 	    nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2505 		RTE_ETHDEV_LOG_LINE(ERR,
2506 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu",
2507 			nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2508 			dev_info.tx_desc_lim.nb_min,
2509 			dev_info.tx_desc_lim.nb_align);
2510 		return -EINVAL;
2511 	}
2512 
2513 	if (dev->data->dev_started &&
2514 		!(dev_info.dev_capa &
2515 			RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2516 		return -EBUSY;
2517 
2518 	if (dev->data->dev_started &&
2519 		(dev->data->tx_queue_state[tx_queue_id] !=
2520 			RTE_ETH_QUEUE_STATE_STOPPED))
2521 		return -EBUSY;
2522 
2523 	eth_dev_txq_release(dev, tx_queue_id);
2524 
2525 	if (tx_conf == NULL)
2526 		tx_conf = &dev_info.default_txconf;
2527 
2528 	local_conf = *tx_conf;
2529 
2530 	/*
2531 	 * If an offloading has already been enabled in
2532 	 * rte_eth_dev_configure(), it has been enabled on all queues,
2533 	 * so there is no need to enable it in this queue again.
2534 	 * The local_conf.offloads input to underlying PMD only carries
2535 	 * those offloadings which are only enabled on this queue and
2536 	 * not enabled on all queues.
2537 	 */
2538 	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2539 
2540 	/*
2541 	 * New added offloadings for this queue are those not enabled in
2542 	 * rte_eth_dev_configure() and they must be per-queue type.
2543 	 * A pure per-port offloading can't be enabled on a queue while
2544 	 * disabled on another queue. A pure per-port offloading can't
2545 	 * be enabled for any queue as new added one if it hasn't been
2546 	 * enabled in rte_eth_dev_configure().
2547 	 */
2548 	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2549 	     local_conf.offloads) {
2550 		RTE_ETHDEV_LOG_LINE(ERR,
2551 			"Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2552 			"within per-queue offload capabilities 0x%"PRIx64" in %s()",
2553 			port_id, tx_queue_id, local_conf.offloads,
2554 			dev_info.tx_queue_offload_capa,
2555 			__func__);
2556 		return -EINVAL;
2557 	}
2558 
2559 	rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2560 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2561 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2562 }
2563 
2564 int
2565 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2566 			       uint16_t nb_tx_desc,
2567 			       const struct rte_eth_hairpin_conf *conf)
2568 {
2569 	struct rte_eth_dev *dev;
2570 	struct rte_eth_hairpin_cap cap;
2571 	int i;
2572 	int count;
2573 	int ret;
2574 
2575 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2576 	dev = &rte_eth_devices[port_id];
2577 
2578 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2579 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", tx_queue_id);
2580 		return -EINVAL;
2581 	}
2582 
2583 	if (conf == NULL) {
2584 		RTE_ETHDEV_LOG_LINE(ERR,
2585 			"Cannot setup ethdev port %u Tx hairpin queue from NULL config",
2586 			port_id);
2587 		return -EINVAL;
2588 	}
2589 
2590 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2591 	if (ret != 0)
2592 		return ret;
2593 	if (*dev->dev_ops->tx_hairpin_queue_setup == NULL)
2594 		return -ENOTSUP;
2595 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2596 	if (nb_tx_desc == 0)
2597 		nb_tx_desc = cap.max_nb_desc;
2598 	if (nb_tx_desc > cap.max_nb_desc) {
2599 		RTE_ETHDEV_LOG_LINE(ERR,
2600 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2601 			nb_tx_desc, cap.max_nb_desc);
2602 		return -EINVAL;
2603 	}
2604 	if (conf->peer_count > cap.max_tx_2_rx) {
2605 		RTE_ETHDEV_LOG_LINE(ERR,
2606 			"Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2607 			conf->peer_count, cap.max_tx_2_rx);
2608 		return -EINVAL;
2609 	}
2610 	if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) {
2611 		RTE_ETHDEV_LOG_LINE(ERR,
2612 			"Attempt to use locked device memory for Tx queue, which is not supported");
2613 		return -EINVAL;
2614 	}
2615 	if (conf->use_rte_memory && !cap.tx_cap.rte_memory) {
2616 		RTE_ETHDEV_LOG_LINE(ERR,
2617 			"Attempt to use DPDK memory for Tx queue, which is not supported");
2618 		return -EINVAL;
2619 	}
2620 	if (conf->use_locked_device_memory && conf->use_rte_memory) {
2621 		RTE_ETHDEV_LOG_LINE(ERR,
2622 			"Attempt to use mutually exclusive memory settings for Tx queue");
2623 		return -EINVAL;
2624 	}
2625 	if (conf->force_memory &&
2626 	    !conf->use_locked_device_memory &&
2627 	    !conf->use_rte_memory) {
2628 		RTE_ETHDEV_LOG_LINE(ERR,
2629 			"Attempt to force Tx queue memory settings, but none is set");
2630 		return -EINVAL;
2631 	}
2632 	if (conf->peer_count == 0) {
2633 		RTE_ETHDEV_LOG_LINE(ERR,
2634 			"Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2635 			conf->peer_count);
2636 		return -EINVAL;
2637 	}
2638 	for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2639 	     cap.max_nb_queues != UINT16_MAX; i++) {
2640 		if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2641 			count++;
2642 	}
2643 	if (count > cap.max_nb_queues) {
2644 		RTE_ETHDEV_LOG_LINE(ERR, "To many Tx hairpin queues max is %d",
2645 		cap.max_nb_queues);
2646 		return -EINVAL;
2647 	}
2648 	if (dev->data->dev_started)
2649 		return -EBUSY;
2650 	eth_dev_txq_release(dev, tx_queue_id);
2651 	ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2652 		(dev, tx_queue_id, nb_tx_desc, conf);
2653 	if (ret == 0)
2654 		dev->data->tx_queue_state[tx_queue_id] =
2655 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2656 	ret = eth_err(port_id, ret);
2657 
2658 	rte_eth_trace_tx_hairpin_queue_setup(port_id, tx_queue_id, nb_tx_desc,
2659 					     conf, ret);
2660 
2661 	return ret;
2662 }
2663 
2664 int
2665 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2666 {
2667 	struct rte_eth_dev *dev;
2668 	int ret;
2669 
2670 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2671 	dev = &rte_eth_devices[tx_port];
2672 
2673 	if (dev->data->dev_started == 0) {
2674 		RTE_ETHDEV_LOG_LINE(ERR, "Tx port %d is not started", tx_port);
2675 		return -EBUSY;
2676 	}
2677 
2678 	if (*dev->dev_ops->hairpin_bind == NULL)
2679 		return -ENOTSUP;
2680 	ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2681 	if (ret != 0)
2682 		RTE_ETHDEV_LOG_LINE(ERR, "Failed to bind hairpin Tx %d"
2683 			       " to Rx %d (%d - all ports)",
2684 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2685 
2686 	rte_eth_trace_hairpin_bind(tx_port, rx_port, ret);
2687 
2688 	return ret;
2689 }
2690 
2691 int
2692 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2693 {
2694 	struct rte_eth_dev *dev;
2695 	int ret;
2696 
2697 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2698 	dev = &rte_eth_devices[tx_port];
2699 
2700 	if (dev->data->dev_started == 0) {
2701 		RTE_ETHDEV_LOG_LINE(ERR, "Tx port %d is already stopped", tx_port);
2702 		return -EBUSY;
2703 	}
2704 
2705 	if (*dev->dev_ops->hairpin_unbind == NULL)
2706 		return -ENOTSUP;
2707 	ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2708 	if (ret != 0)
2709 		RTE_ETHDEV_LOG_LINE(ERR, "Failed to unbind hairpin Tx %d"
2710 			       " from Rx %d (%d - all ports)",
2711 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2712 
2713 	rte_eth_trace_hairpin_unbind(tx_port, rx_port, ret);
2714 
2715 	return ret;
2716 }
2717 
2718 int
2719 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2720 			       size_t len, uint32_t direction)
2721 {
2722 	struct rte_eth_dev *dev;
2723 	int ret;
2724 
2725 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2726 	dev = &rte_eth_devices[port_id];
2727 
2728 	if (peer_ports == NULL) {
2729 		RTE_ETHDEV_LOG_LINE(ERR,
2730 			"Cannot get ethdev port %u hairpin peer ports to NULL",
2731 			port_id);
2732 		return -EINVAL;
2733 	}
2734 
2735 	if (len == 0) {
2736 		RTE_ETHDEV_LOG_LINE(ERR,
2737 			"Cannot get ethdev port %u hairpin peer ports to array with zero size",
2738 			port_id);
2739 		return -EINVAL;
2740 	}
2741 
2742 	if (*dev->dev_ops->hairpin_get_peer_ports == NULL)
2743 		return -ENOTSUP;
2744 
2745 	ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2746 						      len, direction);
2747 	if (ret < 0)
2748 		RTE_ETHDEV_LOG_LINE(ERR, "Failed to get %d hairpin peer %s ports",
2749 			       port_id, direction ? "Rx" : "Tx");
2750 
2751 	rte_eth_trace_hairpin_get_peer_ports(port_id, peer_ports, len,
2752 					     direction, ret);
2753 
2754 	return ret;
2755 }
2756 
2757 void
2758 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2759 		void *userdata __rte_unused)
2760 {
2761 	rte_pktmbuf_free_bulk(pkts, unsent);
2762 
2763 	rte_eth_trace_tx_buffer_drop_callback((void **)pkts, unsent);
2764 }
2765 
2766 void
2767 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2768 		void *userdata)
2769 {
2770 	uint64_t *count = userdata;
2771 
2772 	rte_pktmbuf_free_bulk(pkts, unsent);
2773 	*count += unsent;
2774 
2775 	rte_eth_trace_tx_buffer_count_callback((void **)pkts, unsent, *count);
2776 }
2777 
2778 int
2779 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2780 		buffer_tx_error_fn cbfn, void *userdata)
2781 {
2782 	if (buffer == NULL) {
2783 		RTE_ETHDEV_LOG_LINE(ERR,
2784 			"Cannot set Tx buffer error callback to NULL buffer");
2785 		return -EINVAL;
2786 	}
2787 
2788 	buffer->error_callback = cbfn;
2789 	buffer->error_userdata = userdata;
2790 
2791 	rte_eth_trace_tx_buffer_set_err_callback(buffer);
2792 
2793 	return 0;
2794 }
2795 
2796 int
2797 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2798 {
2799 	int ret = 0;
2800 
2801 	if (buffer == NULL) {
2802 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot initialize NULL buffer");
2803 		return -EINVAL;
2804 	}
2805 
2806 	buffer->size = size;
2807 	if (buffer->error_callback == NULL) {
2808 		ret = rte_eth_tx_buffer_set_err_callback(
2809 			buffer, rte_eth_tx_buffer_drop_callback, NULL);
2810 	}
2811 
2812 	rte_eth_trace_tx_buffer_init(buffer, size, ret);
2813 
2814 	return ret;
2815 }
2816 
2817 int
2818 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2819 {
2820 	struct rte_eth_dev *dev;
2821 	int ret;
2822 
2823 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2824 	dev = &rte_eth_devices[port_id];
2825 
2826 	if (*dev->dev_ops->tx_done_cleanup == NULL)
2827 		return -ENOTSUP;
2828 
2829 	/* Call driver to free pending mbufs. */
2830 	ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2831 					       free_cnt);
2832 	ret = eth_err(port_id, ret);
2833 
2834 	rte_eth_trace_tx_done_cleanup(port_id, queue_id, free_cnt, ret);
2835 
2836 	return ret;
2837 }
2838 
2839 int
2840 rte_eth_promiscuous_enable(uint16_t port_id)
2841 {
2842 	struct rte_eth_dev *dev;
2843 	int diag = 0;
2844 
2845 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2846 	dev = &rte_eth_devices[port_id];
2847 
2848 	if (dev->data->promiscuous == 1)
2849 		return 0;
2850 
2851 	if (*dev->dev_ops->promiscuous_enable == NULL)
2852 		return -ENOTSUP;
2853 
2854 	diag = (*dev->dev_ops->promiscuous_enable)(dev);
2855 	dev->data->promiscuous = (diag == 0) ? 1 : 0;
2856 
2857 	diag = eth_err(port_id, diag);
2858 
2859 	rte_eth_trace_promiscuous_enable(port_id, dev->data->promiscuous,
2860 					 diag);
2861 
2862 	return diag;
2863 }
2864 
2865 int
2866 rte_eth_promiscuous_disable(uint16_t port_id)
2867 {
2868 	struct rte_eth_dev *dev;
2869 	int diag = 0;
2870 
2871 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2872 	dev = &rte_eth_devices[port_id];
2873 
2874 	if (dev->data->promiscuous == 0)
2875 		return 0;
2876 
2877 	if (*dev->dev_ops->promiscuous_disable == NULL)
2878 		return -ENOTSUP;
2879 
2880 	dev->data->promiscuous = 0;
2881 	diag = (*dev->dev_ops->promiscuous_disable)(dev);
2882 	if (diag != 0)
2883 		dev->data->promiscuous = 1;
2884 
2885 	diag = eth_err(port_id, diag);
2886 
2887 	rte_eth_trace_promiscuous_disable(port_id, dev->data->promiscuous,
2888 					  diag);
2889 
2890 	return diag;
2891 }
2892 
2893 int
2894 rte_eth_promiscuous_get(uint16_t port_id)
2895 {
2896 	struct rte_eth_dev *dev;
2897 
2898 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2899 	dev = &rte_eth_devices[port_id];
2900 
2901 	rte_eth_trace_promiscuous_get(port_id, dev->data->promiscuous);
2902 
2903 	return dev->data->promiscuous;
2904 }
2905 
2906 int
2907 rte_eth_allmulticast_enable(uint16_t port_id)
2908 {
2909 	struct rte_eth_dev *dev;
2910 	int diag;
2911 
2912 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2913 	dev = &rte_eth_devices[port_id];
2914 
2915 	if (dev->data->all_multicast == 1)
2916 		return 0;
2917 
2918 	if (*dev->dev_ops->allmulticast_enable == NULL)
2919 		return -ENOTSUP;
2920 	diag = (*dev->dev_ops->allmulticast_enable)(dev);
2921 	dev->data->all_multicast = (diag == 0) ? 1 : 0;
2922 
2923 	diag = eth_err(port_id, diag);
2924 
2925 	rte_eth_trace_allmulticast_enable(port_id, dev->data->all_multicast,
2926 					  diag);
2927 
2928 	return diag;
2929 }
2930 
2931 int
2932 rte_eth_allmulticast_disable(uint16_t port_id)
2933 {
2934 	struct rte_eth_dev *dev;
2935 	int diag;
2936 
2937 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2938 	dev = &rte_eth_devices[port_id];
2939 
2940 	if (dev->data->all_multicast == 0)
2941 		return 0;
2942 
2943 	if (*dev->dev_ops->allmulticast_disable == NULL)
2944 		return -ENOTSUP;
2945 	dev->data->all_multicast = 0;
2946 	diag = (*dev->dev_ops->allmulticast_disable)(dev);
2947 	if (diag != 0)
2948 		dev->data->all_multicast = 1;
2949 
2950 	diag = eth_err(port_id, diag);
2951 
2952 	rte_eth_trace_allmulticast_disable(port_id, dev->data->all_multicast,
2953 					   diag);
2954 
2955 	return diag;
2956 }
2957 
2958 int
2959 rte_eth_allmulticast_get(uint16_t port_id)
2960 {
2961 	struct rte_eth_dev *dev;
2962 
2963 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2964 	dev = &rte_eth_devices[port_id];
2965 
2966 	rte_eth_trace_allmulticast_get(port_id, dev->data->all_multicast);
2967 
2968 	return dev->data->all_multicast;
2969 }
2970 
2971 int
2972 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2973 {
2974 	struct rte_eth_dev *dev;
2975 
2976 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2977 	dev = &rte_eth_devices[port_id];
2978 
2979 	if (eth_link == NULL) {
2980 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u link to NULL",
2981 			port_id);
2982 		return -EINVAL;
2983 	}
2984 
2985 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2986 		rte_eth_linkstatus_get(dev, eth_link);
2987 	else {
2988 		if (*dev->dev_ops->link_update == NULL)
2989 			return -ENOTSUP;
2990 		(*dev->dev_ops->link_update)(dev, 1);
2991 		*eth_link = dev->data->dev_link;
2992 	}
2993 
2994 	rte_eth_trace_link_get(port_id, eth_link);
2995 
2996 	return 0;
2997 }
2998 
2999 int
3000 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
3001 {
3002 	struct rte_eth_dev *dev;
3003 
3004 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3005 	dev = &rte_eth_devices[port_id];
3006 
3007 	if (eth_link == NULL) {
3008 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u link to NULL",
3009 			port_id);
3010 		return -EINVAL;
3011 	}
3012 
3013 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
3014 		rte_eth_linkstatus_get(dev, eth_link);
3015 	else {
3016 		if (*dev->dev_ops->link_update == NULL)
3017 			return -ENOTSUP;
3018 		(*dev->dev_ops->link_update)(dev, 0);
3019 		*eth_link = dev->data->dev_link;
3020 	}
3021 
3022 	rte_eth_trace_link_get_nowait(port_id, eth_link);
3023 
3024 	return 0;
3025 }
3026 
3027 const char *
3028 rte_eth_link_speed_to_str(uint32_t link_speed)
3029 {
3030 	const char *ret;
3031 
3032 	switch (link_speed) {
3033 	case RTE_ETH_SPEED_NUM_NONE:
3034 		ret = "None";
3035 		break;
3036 	case RTE_ETH_SPEED_NUM_10M:
3037 		ret = "10 Mbps";
3038 		break;
3039 	case RTE_ETH_SPEED_NUM_100M:
3040 		ret = "100 Mbps";
3041 		break;
3042 	case RTE_ETH_SPEED_NUM_1G:
3043 		ret = "1 Gbps";
3044 		break;
3045 	case RTE_ETH_SPEED_NUM_2_5G:
3046 		ret = "2.5 Gbps";
3047 		break;
3048 	case RTE_ETH_SPEED_NUM_5G:
3049 		ret = "5 Gbps";
3050 		break;
3051 	case RTE_ETH_SPEED_NUM_10G:
3052 		ret = "10 Gbps";
3053 		break;
3054 	case RTE_ETH_SPEED_NUM_20G:
3055 		ret = "20 Gbps";
3056 		break;
3057 	case RTE_ETH_SPEED_NUM_25G:
3058 		ret = "25 Gbps";
3059 		break;
3060 	case RTE_ETH_SPEED_NUM_40G:
3061 		ret = "40 Gbps";
3062 		break;
3063 	case RTE_ETH_SPEED_NUM_50G:
3064 		ret = "50 Gbps";
3065 		break;
3066 	case RTE_ETH_SPEED_NUM_56G:
3067 		ret = "56 Gbps";
3068 		break;
3069 	case RTE_ETH_SPEED_NUM_100G:
3070 		ret = "100 Gbps";
3071 		break;
3072 	case RTE_ETH_SPEED_NUM_200G:
3073 		ret = "200 Gbps";
3074 		break;
3075 	case RTE_ETH_SPEED_NUM_400G:
3076 		ret = "400 Gbps";
3077 		break;
3078 	case RTE_ETH_SPEED_NUM_UNKNOWN:
3079 		ret = "Unknown";
3080 		break;
3081 	default:
3082 		ret = "Invalid";
3083 	}
3084 
3085 	rte_eth_trace_link_speed_to_str(link_speed, ret);
3086 
3087 	return ret;
3088 }
3089 
3090 int
3091 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
3092 {
3093 	int ret;
3094 
3095 	if (str == NULL) {
3096 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot convert link to NULL string");
3097 		return -EINVAL;
3098 	}
3099 
3100 	if (len == 0) {
3101 		RTE_ETHDEV_LOG_LINE(ERR,
3102 			"Cannot convert link to string with zero size");
3103 		return -EINVAL;
3104 	}
3105 
3106 	if (eth_link == NULL) {
3107 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot convert to string from NULL link");
3108 		return -EINVAL;
3109 	}
3110 
3111 	if (eth_link->link_status == RTE_ETH_LINK_DOWN)
3112 		ret = snprintf(str, len, "Link down");
3113 	else
3114 		ret = snprintf(str, len, "Link up at %s %s %s",
3115 			rte_eth_link_speed_to_str(eth_link->link_speed),
3116 			(eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
3117 			"FDX" : "HDX",
3118 			(eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
3119 			"Autoneg" : "Fixed");
3120 
3121 	rte_eth_trace_link_to_str(len, eth_link, str, ret);
3122 
3123 	return ret;
3124 }
3125 
3126 int
3127 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
3128 {
3129 	struct rte_eth_dev *dev;
3130 	int ret;
3131 
3132 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3133 	dev = &rte_eth_devices[port_id];
3134 
3135 	if (stats == NULL) {
3136 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u stats to NULL",
3137 			port_id);
3138 		return -EINVAL;
3139 	}
3140 
3141 	memset(stats, 0, sizeof(*stats));
3142 
3143 	if (*dev->dev_ops->stats_get == NULL)
3144 		return -ENOTSUP;
3145 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
3146 	ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
3147 
3148 	rte_eth_trace_stats_get(port_id, stats, ret);
3149 
3150 	return ret;
3151 }
3152 
3153 int
3154 rte_eth_stats_reset(uint16_t port_id)
3155 {
3156 	struct rte_eth_dev *dev;
3157 	int ret;
3158 
3159 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3160 	dev = &rte_eth_devices[port_id];
3161 
3162 	if (*dev->dev_ops->stats_reset == NULL)
3163 		return -ENOTSUP;
3164 	ret = (*dev->dev_ops->stats_reset)(dev);
3165 	if (ret != 0)
3166 		return eth_err(port_id, ret);
3167 
3168 	dev->data->rx_mbuf_alloc_failed = 0;
3169 
3170 	rte_eth_trace_stats_reset(port_id);
3171 
3172 	return 0;
3173 }
3174 
3175 static inline int
3176 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
3177 {
3178 	uint16_t nb_rxqs, nb_txqs;
3179 	int count;
3180 
3181 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3182 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3183 
3184 	count = RTE_NB_STATS;
3185 	if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
3186 		count += nb_rxqs * RTE_NB_RXQ_STATS;
3187 		count += nb_txqs * RTE_NB_TXQ_STATS;
3188 	}
3189 
3190 	return count;
3191 }
3192 
3193 static int
3194 eth_dev_get_xstats_count(uint16_t port_id)
3195 {
3196 	struct rte_eth_dev *dev;
3197 	int count;
3198 
3199 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3200 	dev = &rte_eth_devices[port_id];
3201 	if (dev->dev_ops->xstats_get_names != NULL) {
3202 		count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
3203 		if (count < 0)
3204 			return eth_err(port_id, count);
3205 	} else
3206 		count = 0;
3207 
3208 
3209 	count += eth_dev_get_xstats_basic_count(dev);
3210 
3211 	return count;
3212 }
3213 
3214 int
3215 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3216 		uint64_t *id)
3217 {
3218 	int cnt_xstats, idx_xstat;
3219 
3220 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3221 
3222 	if (xstat_name == NULL) {
3223 		RTE_ETHDEV_LOG_LINE(ERR,
3224 			"Cannot get ethdev port %u xstats ID from NULL xstat name",
3225 			port_id);
3226 		return -ENOMEM;
3227 	}
3228 
3229 	if (id == NULL) {
3230 		RTE_ETHDEV_LOG_LINE(ERR,
3231 			"Cannot get ethdev port %u xstats ID to NULL",
3232 			port_id);
3233 		return -ENOMEM;
3234 	}
3235 
3236 	/* Get count */
3237 	cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
3238 	if (cnt_xstats  < 0) {
3239 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get count of xstats");
3240 		return -ENODEV;
3241 	}
3242 
3243 	/* Get id-name lookup table */
3244 	struct rte_eth_xstat_name xstats_names[cnt_xstats];
3245 
3246 	if (cnt_xstats != rte_eth_xstats_get_names_by_id(
3247 			port_id, xstats_names, cnt_xstats, NULL)) {
3248 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get xstats lookup");
3249 		return -1;
3250 	}
3251 
3252 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
3253 		if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
3254 			*id = idx_xstat;
3255 
3256 			rte_eth_trace_xstats_get_id_by_name(port_id,
3257 							    xstat_name, *id);
3258 
3259 			return 0;
3260 		};
3261 	}
3262 
3263 	return -EINVAL;
3264 }
3265 
3266 /* retrieve basic stats names */
3267 static int
3268 eth_basic_stats_get_names(struct rte_eth_dev *dev,
3269 	struct rte_eth_xstat_name *xstats_names)
3270 {
3271 	int cnt_used_entries = 0;
3272 	uint32_t idx, id_queue;
3273 	uint16_t num_q;
3274 
3275 	for (idx = 0; idx < RTE_NB_STATS; idx++) {
3276 		strlcpy(xstats_names[cnt_used_entries].name,
3277 			eth_dev_stats_strings[idx].name,
3278 			sizeof(xstats_names[0].name));
3279 		cnt_used_entries++;
3280 	}
3281 
3282 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3283 		return cnt_used_entries;
3284 
3285 	num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3286 	for (id_queue = 0; id_queue < num_q; id_queue++) {
3287 		for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
3288 			snprintf(xstats_names[cnt_used_entries].name,
3289 				sizeof(xstats_names[0].name),
3290 				"rx_q%u_%s",
3291 				id_queue, eth_dev_rxq_stats_strings[idx].name);
3292 			cnt_used_entries++;
3293 		}
3294 
3295 	}
3296 	num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3297 	for (id_queue = 0; id_queue < num_q; id_queue++) {
3298 		for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
3299 			snprintf(xstats_names[cnt_used_entries].name,
3300 				sizeof(xstats_names[0].name),
3301 				"tx_q%u_%s",
3302 				id_queue, eth_dev_txq_stats_strings[idx].name);
3303 			cnt_used_entries++;
3304 		}
3305 	}
3306 	return cnt_used_entries;
3307 }
3308 
3309 /* retrieve ethdev extended statistics names */
3310 int
3311 rte_eth_xstats_get_names_by_id(uint16_t port_id,
3312 	struct rte_eth_xstat_name *xstats_names, unsigned int size,
3313 	uint64_t *ids)
3314 {
3315 	struct rte_eth_xstat_name *xstats_names_copy;
3316 	unsigned int no_basic_stat_requested = 1;
3317 	unsigned int no_ext_stat_requested = 1;
3318 	unsigned int expected_entries;
3319 	unsigned int basic_count;
3320 	struct rte_eth_dev *dev;
3321 	unsigned int i;
3322 	int ret;
3323 
3324 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3325 	dev = &rte_eth_devices[port_id];
3326 
3327 	basic_count = eth_dev_get_xstats_basic_count(dev);
3328 	ret = eth_dev_get_xstats_count(port_id);
3329 	if (ret < 0)
3330 		return ret;
3331 	expected_entries = (unsigned int)ret;
3332 
3333 	/* Return max number of stats if no ids given */
3334 	if (!ids) {
3335 		if (!xstats_names)
3336 			return expected_entries;
3337 		else if (xstats_names && size < expected_entries)
3338 			return expected_entries;
3339 	}
3340 
3341 	if (ids && !xstats_names)
3342 		return -EINVAL;
3343 
3344 	if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
3345 		uint64_t ids_copy[size];
3346 
3347 		for (i = 0; i < size; i++) {
3348 			if (ids[i] < basic_count) {
3349 				no_basic_stat_requested = 0;
3350 				break;
3351 			}
3352 
3353 			/*
3354 			 * Convert ids to xstats ids that PMD knows.
3355 			 * ids known by user are basic + extended stats.
3356 			 */
3357 			ids_copy[i] = ids[i] - basic_count;
3358 		}
3359 
3360 		if (no_basic_stat_requested)
3361 			return (*dev->dev_ops->xstats_get_names_by_id)(dev,
3362 					ids_copy, xstats_names, size);
3363 	}
3364 
3365 	/* Retrieve all stats */
3366 	if (!ids) {
3367 		int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
3368 				expected_entries);
3369 		if (num_stats < 0 || num_stats > (int)expected_entries)
3370 			return num_stats;
3371 		else
3372 			return expected_entries;
3373 	}
3374 
3375 	xstats_names_copy = calloc(expected_entries,
3376 		sizeof(struct rte_eth_xstat_name));
3377 
3378 	if (!xstats_names_copy) {
3379 		RTE_ETHDEV_LOG_LINE(ERR, "Can't allocate memory");
3380 		return -ENOMEM;
3381 	}
3382 
3383 	if (ids) {
3384 		for (i = 0; i < size; i++) {
3385 			if (ids[i] >= basic_count) {
3386 				no_ext_stat_requested = 0;
3387 				break;
3388 			}
3389 		}
3390 	}
3391 
3392 	/* Fill xstats_names_copy structure */
3393 	if (ids && no_ext_stat_requested) {
3394 		eth_basic_stats_get_names(dev, xstats_names_copy);
3395 	} else {
3396 		ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
3397 			expected_entries);
3398 		if (ret < 0) {
3399 			free(xstats_names_copy);
3400 			return ret;
3401 		}
3402 	}
3403 
3404 	/* Filter stats */
3405 	for (i = 0; i < size; i++) {
3406 		if (ids[i] >= expected_entries) {
3407 			RTE_ETHDEV_LOG_LINE(ERR, "Id value isn't valid");
3408 			free(xstats_names_copy);
3409 			return -1;
3410 		}
3411 		xstats_names[i] = xstats_names_copy[ids[i]];
3412 
3413 		rte_eth_trace_xstats_get_names_by_id(port_id, &xstats_names[i],
3414 						     ids[i]);
3415 	}
3416 
3417 	free(xstats_names_copy);
3418 	return size;
3419 }
3420 
3421 int
3422 rte_eth_xstats_get_names(uint16_t port_id,
3423 	struct rte_eth_xstat_name *xstats_names,
3424 	unsigned int size)
3425 {
3426 	struct rte_eth_dev *dev;
3427 	int cnt_used_entries;
3428 	int cnt_expected_entries;
3429 	int cnt_driver_entries;
3430 	int i;
3431 
3432 	cnt_expected_entries = eth_dev_get_xstats_count(port_id);
3433 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
3434 			(int)size < cnt_expected_entries)
3435 		return cnt_expected_entries;
3436 
3437 	/* port_id checked in eth_dev_get_xstats_count() */
3438 	dev = &rte_eth_devices[port_id];
3439 
3440 	cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
3441 
3442 	if (dev->dev_ops->xstats_get_names != NULL) {
3443 		/* If there are any driver-specific xstats, append them
3444 		 * to end of list.
3445 		 */
3446 		cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
3447 			dev,
3448 			xstats_names + cnt_used_entries,
3449 			size - cnt_used_entries);
3450 		if (cnt_driver_entries < 0)
3451 			return eth_err(port_id, cnt_driver_entries);
3452 		cnt_used_entries += cnt_driver_entries;
3453 	}
3454 
3455 	for (i = 0; i < cnt_used_entries; i++)
3456 		rte_eth_trace_xstats_get_names(port_id, i, &xstats_names[i],
3457 					       size, cnt_used_entries);
3458 
3459 	return cnt_used_entries;
3460 }
3461 
3462 
3463 static int
3464 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
3465 {
3466 	struct rte_eth_dev *dev;
3467 	struct rte_eth_stats eth_stats;
3468 	unsigned int count = 0, i, q;
3469 	uint64_t val, *stats_ptr;
3470 	uint16_t nb_rxqs, nb_txqs;
3471 	int ret;
3472 
3473 	ret = rte_eth_stats_get(port_id, &eth_stats);
3474 	if (ret < 0)
3475 		return ret;
3476 
3477 	dev = &rte_eth_devices[port_id];
3478 
3479 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3480 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3481 
3482 	/* global stats */
3483 	for (i = 0; i < RTE_NB_STATS; i++) {
3484 		stats_ptr = RTE_PTR_ADD(&eth_stats,
3485 					eth_dev_stats_strings[i].offset);
3486 		val = *stats_ptr;
3487 		xstats[count++].value = val;
3488 	}
3489 
3490 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3491 		return count;
3492 
3493 	/* per-rxq stats */
3494 	for (q = 0; q < nb_rxqs; q++) {
3495 		for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3496 			stats_ptr = RTE_PTR_ADD(&eth_stats,
3497 					eth_dev_rxq_stats_strings[i].offset +
3498 					q * sizeof(uint64_t));
3499 			val = *stats_ptr;
3500 			xstats[count++].value = val;
3501 		}
3502 	}
3503 
3504 	/* per-txq stats */
3505 	for (q = 0; q < nb_txqs; q++) {
3506 		for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3507 			stats_ptr = RTE_PTR_ADD(&eth_stats,
3508 					eth_dev_txq_stats_strings[i].offset +
3509 					q * sizeof(uint64_t));
3510 			val = *stats_ptr;
3511 			xstats[count++].value = val;
3512 		}
3513 	}
3514 	return count;
3515 }
3516 
3517 /* retrieve ethdev extended statistics */
3518 int
3519 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3520 			 uint64_t *values, unsigned int size)
3521 {
3522 	unsigned int no_basic_stat_requested = 1;
3523 	unsigned int no_ext_stat_requested = 1;
3524 	unsigned int num_xstats_filled;
3525 	unsigned int basic_count;
3526 	uint16_t expected_entries;
3527 	struct rte_eth_dev *dev;
3528 	unsigned int i;
3529 	int ret;
3530 
3531 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3532 	dev = &rte_eth_devices[port_id];
3533 
3534 	ret = eth_dev_get_xstats_count(port_id);
3535 	if (ret < 0)
3536 		return ret;
3537 	expected_entries = (uint16_t)ret;
3538 	struct rte_eth_xstat xstats[expected_entries];
3539 	basic_count = eth_dev_get_xstats_basic_count(dev);
3540 
3541 	/* Return max number of stats if no ids given */
3542 	if (!ids) {
3543 		if (!values)
3544 			return expected_entries;
3545 		else if (values && size < expected_entries)
3546 			return expected_entries;
3547 	}
3548 
3549 	if (ids && !values)
3550 		return -EINVAL;
3551 
3552 	if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3553 		unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3554 		uint64_t ids_copy[size];
3555 
3556 		for (i = 0; i < size; i++) {
3557 			if (ids[i] < basic_count) {
3558 				no_basic_stat_requested = 0;
3559 				break;
3560 			}
3561 
3562 			/*
3563 			 * Convert ids to xstats ids that PMD knows.
3564 			 * ids known by user are basic + extended stats.
3565 			 */
3566 			ids_copy[i] = ids[i] - basic_count;
3567 		}
3568 
3569 		if (no_basic_stat_requested)
3570 			return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3571 					values, size);
3572 	}
3573 
3574 	if (ids) {
3575 		for (i = 0; i < size; i++) {
3576 			if (ids[i] >= basic_count) {
3577 				no_ext_stat_requested = 0;
3578 				break;
3579 			}
3580 		}
3581 	}
3582 
3583 	/* Fill the xstats structure */
3584 	if (ids && no_ext_stat_requested)
3585 		ret = eth_basic_stats_get(port_id, xstats);
3586 	else
3587 		ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3588 
3589 	if (ret < 0)
3590 		return ret;
3591 	num_xstats_filled = (unsigned int)ret;
3592 
3593 	/* Return all stats */
3594 	if (!ids) {
3595 		for (i = 0; i < num_xstats_filled; i++)
3596 			values[i] = xstats[i].value;
3597 		return expected_entries;
3598 	}
3599 
3600 	/* Filter stats */
3601 	for (i = 0; i < size; i++) {
3602 		if (ids[i] >= expected_entries) {
3603 			RTE_ETHDEV_LOG_LINE(ERR, "Id value isn't valid");
3604 			return -1;
3605 		}
3606 		values[i] = xstats[ids[i]].value;
3607 	}
3608 
3609 	rte_eth_trace_xstats_get_by_id(port_id, ids, values, size);
3610 
3611 	return size;
3612 }
3613 
3614 int
3615 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3616 	unsigned int n)
3617 {
3618 	struct rte_eth_dev *dev;
3619 	unsigned int count, i;
3620 	signed int xcount = 0;
3621 	int ret;
3622 
3623 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3624 	if (xstats == NULL && n > 0)
3625 		return -EINVAL;
3626 	dev = &rte_eth_devices[port_id];
3627 
3628 	count = eth_dev_get_xstats_basic_count(dev);
3629 
3630 	/* implemented by the driver */
3631 	if (dev->dev_ops->xstats_get != NULL) {
3632 		/* Retrieve the xstats from the driver at the end of the
3633 		 * xstats struct.
3634 		 */
3635 		xcount = (*dev->dev_ops->xstats_get)(dev,
3636 				     (n > count) ? xstats + count : NULL,
3637 				     (n > count) ? n - count : 0);
3638 
3639 		if (xcount < 0)
3640 			return eth_err(port_id, xcount);
3641 	}
3642 
3643 	if (n < count + xcount || xstats == NULL)
3644 		return count + xcount;
3645 
3646 	/* now fill the xstats structure */
3647 	ret = eth_basic_stats_get(port_id, xstats);
3648 	if (ret < 0)
3649 		return ret;
3650 	count = ret;
3651 
3652 	for (i = 0; i < count; i++)
3653 		xstats[i].id = i;
3654 	/* add an offset to driver-specific stats */
3655 	for ( ; i < count + xcount; i++)
3656 		xstats[i].id += count;
3657 
3658 	for (i = 0; i < n; i++)
3659 		rte_eth_trace_xstats_get(port_id, xstats[i]);
3660 
3661 	return count + xcount;
3662 }
3663 
3664 /* reset ethdev extended statistics */
3665 int
3666 rte_eth_xstats_reset(uint16_t port_id)
3667 {
3668 	struct rte_eth_dev *dev;
3669 
3670 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3671 	dev = &rte_eth_devices[port_id];
3672 
3673 	/* implemented by the driver */
3674 	if (dev->dev_ops->xstats_reset != NULL) {
3675 		int ret = eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3676 
3677 		rte_eth_trace_xstats_reset(port_id, ret);
3678 
3679 		return ret;
3680 	}
3681 
3682 	/* fallback to default */
3683 	return rte_eth_stats_reset(port_id);
3684 }
3685 
3686 static int
3687 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3688 		uint8_t stat_idx, uint8_t is_rx)
3689 {
3690 	struct rte_eth_dev *dev;
3691 
3692 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3693 	dev = &rte_eth_devices[port_id];
3694 
3695 	if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3696 		return -EINVAL;
3697 
3698 	if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3699 		return -EINVAL;
3700 
3701 	if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3702 		return -EINVAL;
3703 
3704 	if (*dev->dev_ops->queue_stats_mapping_set == NULL)
3705 		return -ENOTSUP;
3706 	return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3707 }
3708 
3709 int
3710 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3711 		uint8_t stat_idx)
3712 {
3713 	int ret;
3714 
3715 	ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3716 						tx_queue_id,
3717 						stat_idx, STAT_QMAP_TX));
3718 
3719 	rte_ethdev_trace_set_tx_queue_stats_mapping(port_id, tx_queue_id,
3720 						    stat_idx, ret);
3721 
3722 	return ret;
3723 }
3724 
3725 int
3726 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3727 		uint8_t stat_idx)
3728 {
3729 	int ret;
3730 
3731 	ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3732 						rx_queue_id,
3733 						stat_idx, STAT_QMAP_RX));
3734 
3735 	rte_ethdev_trace_set_rx_queue_stats_mapping(port_id, rx_queue_id,
3736 						    stat_idx, ret);
3737 
3738 	return ret;
3739 }
3740 
3741 int
3742 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3743 {
3744 	struct rte_eth_dev *dev;
3745 	int ret;
3746 
3747 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3748 	dev = &rte_eth_devices[port_id];
3749 
3750 	if (fw_version == NULL && fw_size > 0) {
3751 		RTE_ETHDEV_LOG_LINE(ERR,
3752 			"Cannot get ethdev port %u FW version to NULL when string size is non zero",
3753 			port_id);
3754 		return -EINVAL;
3755 	}
3756 
3757 	if (*dev->dev_ops->fw_version_get == NULL)
3758 		return -ENOTSUP;
3759 	ret = eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3760 							fw_version, fw_size));
3761 
3762 	rte_ethdev_trace_fw_version_get(port_id, fw_version, fw_size, ret);
3763 
3764 	return ret;
3765 }
3766 
3767 int
3768 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3769 {
3770 	struct rte_eth_dev *dev;
3771 	const struct rte_eth_desc_lim lim = {
3772 		.nb_max = UINT16_MAX,
3773 		.nb_min = 0,
3774 		.nb_align = 1,
3775 		.nb_seg_max = UINT16_MAX,
3776 		.nb_mtu_seg_max = UINT16_MAX,
3777 	};
3778 	int diag;
3779 
3780 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3781 	dev = &rte_eth_devices[port_id];
3782 
3783 	if (dev_info == NULL) {
3784 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u info to NULL",
3785 			port_id);
3786 		return -EINVAL;
3787 	}
3788 
3789 	/*
3790 	 * Init dev_info before port_id check since caller does not have
3791 	 * return status and does not know if get is successful or not.
3792 	 */
3793 	memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3794 	dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3795 
3796 	dev_info->rx_desc_lim = lim;
3797 	dev_info->tx_desc_lim = lim;
3798 	dev_info->device = dev->device;
3799 	dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
3800 		RTE_ETHER_CRC_LEN;
3801 	dev_info->max_mtu = UINT16_MAX;
3802 	dev_info->rss_algo_capa = RTE_ETH_HASH_ALGO_CAPA_MASK(DEFAULT);
3803 	dev_info->max_rx_bufsize = UINT32_MAX;
3804 
3805 	if (*dev->dev_ops->dev_infos_get == NULL)
3806 		return -ENOTSUP;
3807 	diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3808 	if (diag != 0) {
3809 		/* Cleanup already filled in device information */
3810 		memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3811 		return eth_err(port_id, diag);
3812 	}
3813 
3814 	/* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3815 	dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3816 			RTE_MAX_QUEUES_PER_PORT);
3817 	dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3818 			RTE_MAX_QUEUES_PER_PORT);
3819 
3820 	dev_info->driver_name = dev->device->driver->name;
3821 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3822 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3823 
3824 	dev_info->dev_flags = &dev->data->dev_flags;
3825 
3826 	rte_ethdev_trace_info_get(port_id, dev_info);
3827 
3828 	return 0;
3829 }
3830 
3831 int
3832 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3833 {
3834 	struct rte_eth_dev *dev;
3835 
3836 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3837 	dev = &rte_eth_devices[port_id];
3838 
3839 	if (dev_conf == NULL) {
3840 		RTE_ETHDEV_LOG_LINE(ERR,
3841 			"Cannot get ethdev port %u configuration to NULL",
3842 			port_id);
3843 		return -EINVAL;
3844 	}
3845 
3846 	memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf));
3847 
3848 	rte_ethdev_trace_conf_get(port_id, dev_conf);
3849 
3850 	return 0;
3851 }
3852 
3853 int
3854 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3855 				 uint32_t *ptypes, int num)
3856 {
3857 	size_t i;
3858 	int j;
3859 	struct rte_eth_dev *dev;
3860 	const uint32_t *all_ptypes;
3861 	size_t no_of_elements = 0;
3862 
3863 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3864 	dev = &rte_eth_devices[port_id];
3865 
3866 	if (ptypes == NULL && num > 0) {
3867 		RTE_ETHDEV_LOG_LINE(ERR,
3868 			"Cannot get ethdev port %u supported packet types to NULL when array size is non zero",
3869 			port_id);
3870 		return -EINVAL;
3871 	}
3872 
3873 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL)
3874 		return 0;
3875 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev,
3876 							       &no_of_elements);
3877 
3878 	if (!all_ptypes)
3879 		return 0;
3880 
3881 	for (i = 0, j = 0; i < no_of_elements; ++i)
3882 		if (all_ptypes[i] & ptype_mask) {
3883 			if (j < num) {
3884 				ptypes[j] = all_ptypes[i];
3885 
3886 				rte_ethdev_trace_get_supported_ptypes(port_id,
3887 						j, num, ptypes[j]);
3888 			}
3889 			j++;
3890 		}
3891 
3892 	return j;
3893 }
3894 
3895 int
3896 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3897 				 uint32_t *set_ptypes, unsigned int num)
3898 {
3899 	const uint32_t valid_ptype_masks[] = {
3900 		RTE_PTYPE_L2_MASK,
3901 		RTE_PTYPE_L3_MASK,
3902 		RTE_PTYPE_L4_MASK,
3903 		RTE_PTYPE_TUNNEL_MASK,
3904 		RTE_PTYPE_INNER_L2_MASK,
3905 		RTE_PTYPE_INNER_L3_MASK,
3906 		RTE_PTYPE_INNER_L4_MASK,
3907 	};
3908 	const uint32_t *all_ptypes;
3909 	struct rte_eth_dev *dev;
3910 	uint32_t unused_mask;
3911 	size_t i;
3912 	unsigned int j;
3913 	int ret;
3914 	size_t no_of_elements = 0;
3915 
3916 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3917 	dev = &rte_eth_devices[port_id];
3918 
3919 	if (num > 0 && set_ptypes == NULL) {
3920 		RTE_ETHDEV_LOG_LINE(ERR,
3921 			"Cannot get ethdev port %u set packet types to NULL when array size is non zero",
3922 			port_id);
3923 		return -EINVAL;
3924 	}
3925 
3926 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3927 			*dev->dev_ops->dev_ptypes_set == NULL) {
3928 		ret = 0;
3929 		goto ptype_unknown;
3930 	}
3931 
3932 	if (ptype_mask == 0) {
3933 		ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3934 				ptype_mask);
3935 		goto ptype_unknown;
3936 	}
3937 
3938 	unused_mask = ptype_mask;
3939 	for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3940 		uint32_t mask = ptype_mask & valid_ptype_masks[i];
3941 		if (mask && mask != valid_ptype_masks[i]) {
3942 			ret = -EINVAL;
3943 			goto ptype_unknown;
3944 		}
3945 		unused_mask &= ~valid_ptype_masks[i];
3946 	}
3947 
3948 	if (unused_mask) {
3949 		ret = -EINVAL;
3950 		goto ptype_unknown;
3951 	}
3952 
3953 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev,
3954 							       &no_of_elements);
3955 	if (all_ptypes == NULL) {
3956 		ret = 0;
3957 		goto ptype_unknown;
3958 	}
3959 
3960 	/*
3961 	 * Accommodate as many set_ptypes as possible. If the supplied
3962 	 * set_ptypes array is insufficient fill it partially.
3963 	 */
3964 	for (i = 0, j = 0; set_ptypes != NULL &&
3965 				(i < no_of_elements); ++i) {
3966 		if (ptype_mask & all_ptypes[i]) {
3967 			if (j < num - 1) {
3968 				set_ptypes[j] = all_ptypes[i];
3969 
3970 				rte_ethdev_trace_set_ptypes(port_id, j, num,
3971 						set_ptypes[j]);
3972 
3973 				j++;
3974 				continue;
3975 			}
3976 			break;
3977 		}
3978 	}
3979 
3980 	if (set_ptypes != NULL && j < num)
3981 		set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3982 
3983 	return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3984 
3985 ptype_unknown:
3986 	if (num > 0)
3987 		set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3988 
3989 	return ret;
3990 }
3991 
3992 int
3993 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3994 	unsigned int num)
3995 {
3996 	int32_t ret;
3997 	struct rte_eth_dev *dev;
3998 	struct rte_eth_dev_info dev_info;
3999 
4000 	if (ma == NULL) {
4001 		RTE_ETHDEV_LOG_LINE(ERR, "%s: invalid parameters", __func__);
4002 		return -EINVAL;
4003 	}
4004 
4005 	/* will check for us that port_id is a valid one */
4006 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4007 	if (ret != 0)
4008 		return ret;
4009 
4010 	dev = &rte_eth_devices[port_id];
4011 	num = RTE_MIN(dev_info.max_mac_addrs, num);
4012 	memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0]));
4013 
4014 	rte_eth_trace_macaddrs_get(port_id, num);
4015 
4016 	return num;
4017 }
4018 
4019 int
4020 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
4021 {
4022 	struct rte_eth_dev *dev;
4023 
4024 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4025 	dev = &rte_eth_devices[port_id];
4026 
4027 	if (mac_addr == NULL) {
4028 		RTE_ETHDEV_LOG_LINE(ERR,
4029 			"Cannot get ethdev port %u MAC address to NULL",
4030 			port_id);
4031 		return -EINVAL;
4032 	}
4033 
4034 	rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
4035 
4036 	rte_eth_trace_macaddr_get(port_id, mac_addr);
4037 
4038 	return 0;
4039 }
4040 
4041 int
4042 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
4043 {
4044 	struct rte_eth_dev *dev;
4045 
4046 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4047 	dev = &rte_eth_devices[port_id];
4048 
4049 	if (mtu == NULL) {
4050 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u MTU to NULL",
4051 			port_id);
4052 		return -EINVAL;
4053 	}
4054 
4055 	*mtu = dev->data->mtu;
4056 
4057 	rte_ethdev_trace_get_mtu(port_id, *mtu);
4058 
4059 	return 0;
4060 }
4061 
4062 int
4063 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
4064 {
4065 	int ret;
4066 	struct rte_eth_dev_info dev_info;
4067 	struct rte_eth_dev *dev;
4068 
4069 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4070 	dev = &rte_eth_devices[port_id];
4071 	if (*dev->dev_ops->mtu_set == NULL)
4072 		return -ENOTSUP;
4073 
4074 	/*
4075 	 * Check if the device supports dev_infos_get, if it does not
4076 	 * skip min_mtu/max_mtu validation here as this requires values
4077 	 * that are populated within the call to rte_eth_dev_info_get()
4078 	 * which relies on dev->dev_ops->dev_infos_get.
4079 	 */
4080 	if (*dev->dev_ops->dev_infos_get != NULL) {
4081 		ret = rte_eth_dev_info_get(port_id, &dev_info);
4082 		if (ret != 0)
4083 			return ret;
4084 
4085 		ret = eth_dev_validate_mtu(port_id, &dev_info, mtu);
4086 		if (ret != 0)
4087 			return ret;
4088 	}
4089 
4090 	if (dev->data->dev_configured == 0) {
4091 		RTE_ETHDEV_LOG_LINE(ERR,
4092 			"Port %u must be configured before MTU set",
4093 			port_id);
4094 		return -EINVAL;
4095 	}
4096 
4097 	ret = (*dev->dev_ops->mtu_set)(dev, mtu);
4098 	if (ret == 0)
4099 		dev->data->mtu = mtu;
4100 
4101 	ret = eth_err(port_id, ret);
4102 
4103 	rte_ethdev_trace_set_mtu(port_id, mtu, ret);
4104 
4105 	return ret;
4106 }
4107 
4108 int
4109 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
4110 {
4111 	struct rte_eth_dev *dev;
4112 	int ret;
4113 
4114 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4115 	dev = &rte_eth_devices[port_id];
4116 
4117 	if (!(dev->data->dev_conf.rxmode.offloads &
4118 	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
4119 		RTE_ETHDEV_LOG_LINE(ERR, "Port %u: VLAN-filtering disabled",
4120 			port_id);
4121 		return -ENOSYS;
4122 	}
4123 
4124 	if (vlan_id > 4095) {
4125 		RTE_ETHDEV_LOG_LINE(ERR, "Port_id=%u invalid vlan_id=%u > 4095",
4126 			port_id, vlan_id);
4127 		return -EINVAL;
4128 	}
4129 	if (*dev->dev_ops->vlan_filter_set == NULL)
4130 		return -ENOTSUP;
4131 
4132 	ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
4133 	if (ret == 0) {
4134 		struct rte_vlan_filter_conf *vfc;
4135 		int vidx;
4136 		int vbit;
4137 
4138 		vfc = &dev->data->vlan_filter_conf;
4139 		vidx = vlan_id / 64;
4140 		vbit = vlan_id % 64;
4141 
4142 		if (on)
4143 			vfc->ids[vidx] |= RTE_BIT64(vbit);
4144 		else
4145 			vfc->ids[vidx] &= ~RTE_BIT64(vbit);
4146 	}
4147 
4148 	ret = eth_err(port_id, ret);
4149 
4150 	rte_ethdev_trace_vlan_filter(port_id, vlan_id, on, ret);
4151 
4152 	return ret;
4153 }
4154 
4155 int
4156 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
4157 				    int on)
4158 {
4159 	struct rte_eth_dev *dev;
4160 
4161 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4162 	dev = &rte_eth_devices[port_id];
4163 
4164 	if (rx_queue_id >= dev->data->nb_rx_queues) {
4165 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_queue_id=%u", rx_queue_id);
4166 		return -EINVAL;
4167 	}
4168 
4169 	if (*dev->dev_ops->vlan_strip_queue_set == NULL)
4170 		return -ENOTSUP;
4171 	(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
4172 
4173 	rte_ethdev_trace_set_vlan_strip_on_queue(port_id, rx_queue_id, on);
4174 
4175 	return 0;
4176 }
4177 
4178 int
4179 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
4180 				enum rte_vlan_type vlan_type,
4181 				uint16_t tpid)
4182 {
4183 	struct rte_eth_dev *dev;
4184 	int ret;
4185 
4186 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4187 	dev = &rte_eth_devices[port_id];
4188 
4189 	if (*dev->dev_ops->vlan_tpid_set == NULL)
4190 		return -ENOTSUP;
4191 	ret = eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
4192 							      tpid));
4193 
4194 	rte_ethdev_trace_set_vlan_ether_type(port_id, vlan_type, tpid, ret);
4195 
4196 	return ret;
4197 }
4198 
4199 int
4200 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
4201 {
4202 	struct rte_eth_dev_info dev_info;
4203 	struct rte_eth_dev *dev;
4204 	int ret = 0;
4205 	int mask = 0;
4206 	int cur, org = 0;
4207 	uint64_t orig_offloads;
4208 	uint64_t dev_offloads;
4209 	uint64_t new_offloads;
4210 
4211 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4212 	dev = &rte_eth_devices[port_id];
4213 
4214 	/* save original values in case of failure */
4215 	orig_offloads = dev->data->dev_conf.rxmode.offloads;
4216 	dev_offloads = orig_offloads;
4217 
4218 	/* check which option changed by application */
4219 	cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
4220 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
4221 	if (cur != org) {
4222 		if (cur)
4223 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
4224 		else
4225 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
4226 		mask |= RTE_ETH_VLAN_STRIP_MASK;
4227 	}
4228 
4229 	cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
4230 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
4231 	if (cur != org) {
4232 		if (cur)
4233 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4234 		else
4235 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4236 		mask |= RTE_ETH_VLAN_FILTER_MASK;
4237 	}
4238 
4239 	cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
4240 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
4241 	if (cur != org) {
4242 		if (cur)
4243 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
4244 		else
4245 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
4246 		mask |= RTE_ETH_VLAN_EXTEND_MASK;
4247 	}
4248 
4249 	cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
4250 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
4251 	if (cur != org) {
4252 		if (cur)
4253 			dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
4254 		else
4255 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
4256 		mask |= RTE_ETH_QINQ_STRIP_MASK;
4257 	}
4258 
4259 	/*no change*/
4260 	if (mask == 0)
4261 		return ret;
4262 
4263 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4264 	if (ret != 0)
4265 		return ret;
4266 
4267 	/* Rx VLAN offloading must be within its device capabilities */
4268 	if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
4269 		new_offloads = dev_offloads & ~orig_offloads;
4270 		RTE_ETHDEV_LOG_LINE(ERR,
4271 			"Ethdev port_id=%u requested new added VLAN offloads "
4272 			"0x%" PRIx64 " must be within Rx offloads capabilities "
4273 			"0x%" PRIx64 " in %s()",
4274 			port_id, new_offloads, dev_info.rx_offload_capa,
4275 			__func__);
4276 		return -EINVAL;
4277 	}
4278 
4279 	if (*dev->dev_ops->vlan_offload_set == NULL)
4280 		return -ENOTSUP;
4281 	dev->data->dev_conf.rxmode.offloads = dev_offloads;
4282 	ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
4283 	if (ret) {
4284 		/* hit an error restore  original values */
4285 		dev->data->dev_conf.rxmode.offloads = orig_offloads;
4286 	}
4287 
4288 	ret = eth_err(port_id, ret);
4289 
4290 	rte_ethdev_trace_set_vlan_offload(port_id, offload_mask, ret);
4291 
4292 	return ret;
4293 }
4294 
4295 int
4296 rte_eth_dev_get_vlan_offload(uint16_t port_id)
4297 {
4298 	struct rte_eth_dev *dev;
4299 	uint64_t *dev_offloads;
4300 	int ret = 0;
4301 
4302 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4303 	dev = &rte_eth_devices[port_id];
4304 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
4305 
4306 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
4307 		ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
4308 
4309 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
4310 		ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
4311 
4312 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
4313 		ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
4314 
4315 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
4316 		ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
4317 
4318 	rte_ethdev_trace_get_vlan_offload(port_id, ret);
4319 
4320 	return ret;
4321 }
4322 
4323 int
4324 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
4325 {
4326 	struct rte_eth_dev *dev;
4327 	int ret;
4328 
4329 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4330 	dev = &rte_eth_devices[port_id];
4331 
4332 	if (*dev->dev_ops->vlan_pvid_set == NULL)
4333 		return -ENOTSUP;
4334 	ret = eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
4335 
4336 	rte_ethdev_trace_set_vlan_pvid(port_id, pvid, on, ret);
4337 
4338 	return ret;
4339 }
4340 
4341 int
4342 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
4343 {
4344 	struct rte_eth_dev *dev;
4345 	int ret;
4346 
4347 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4348 	dev = &rte_eth_devices[port_id];
4349 
4350 	if (fc_conf == NULL) {
4351 		RTE_ETHDEV_LOG_LINE(ERR,
4352 			"Cannot get ethdev port %u flow control config to NULL",
4353 			port_id);
4354 		return -EINVAL;
4355 	}
4356 
4357 	if (*dev->dev_ops->flow_ctrl_get == NULL)
4358 		return -ENOTSUP;
4359 	memset(fc_conf, 0, sizeof(*fc_conf));
4360 	ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
4361 
4362 	rte_ethdev_trace_flow_ctrl_get(port_id, fc_conf, ret);
4363 
4364 	return ret;
4365 }
4366 
4367 int
4368 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
4369 {
4370 	struct rte_eth_dev *dev;
4371 	int ret;
4372 
4373 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4374 	dev = &rte_eth_devices[port_id];
4375 
4376 	if (fc_conf == NULL) {
4377 		RTE_ETHDEV_LOG_LINE(ERR,
4378 			"Cannot set ethdev port %u flow control from NULL config",
4379 			port_id);
4380 		return -EINVAL;
4381 	}
4382 
4383 	if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
4384 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid send_xon, only 0/1 allowed");
4385 		return -EINVAL;
4386 	}
4387 
4388 	if (*dev->dev_ops->flow_ctrl_set == NULL)
4389 		return -ENOTSUP;
4390 	ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
4391 
4392 	rte_ethdev_trace_flow_ctrl_set(port_id, fc_conf, ret);
4393 
4394 	return ret;
4395 }
4396 
4397 int
4398 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
4399 				   struct rte_eth_pfc_conf *pfc_conf)
4400 {
4401 	struct rte_eth_dev *dev;
4402 	int ret;
4403 
4404 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4405 	dev = &rte_eth_devices[port_id];
4406 
4407 	if (pfc_conf == NULL) {
4408 		RTE_ETHDEV_LOG_LINE(ERR,
4409 			"Cannot set ethdev port %u priority flow control from NULL config",
4410 			port_id);
4411 		return -EINVAL;
4412 	}
4413 
4414 	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
4415 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid priority, only 0-7 allowed");
4416 		return -EINVAL;
4417 	}
4418 
4419 	/* High water, low water validation are device specific */
4420 	if  (*dev->dev_ops->priority_flow_ctrl_set == NULL)
4421 		return -ENOTSUP;
4422 	ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
4423 			       (dev, pfc_conf));
4424 
4425 	rte_ethdev_trace_priority_flow_ctrl_set(port_id, pfc_conf, ret);
4426 
4427 	return ret;
4428 }
4429 
4430 static int
4431 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
4432 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
4433 {
4434 	if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) ||
4435 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
4436 		if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) {
4437 			RTE_ETHDEV_LOG_LINE(ERR,
4438 				"PFC Tx queue not in range for Rx pause requested:%d configured:%d",
4439 				pfc_queue_conf->rx_pause.tx_qid,
4440 				dev_info->nb_tx_queues);
4441 			return -EINVAL;
4442 		}
4443 
4444 		if (pfc_queue_conf->rx_pause.tc >= tc_max) {
4445 			RTE_ETHDEV_LOG_LINE(ERR,
4446 				"PFC TC not in range for Rx pause requested:%d max:%d",
4447 				pfc_queue_conf->rx_pause.tc, tc_max);
4448 			return -EINVAL;
4449 		}
4450 	}
4451 
4452 	return 0;
4453 }
4454 
4455 static int
4456 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
4457 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
4458 {
4459 	if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) ||
4460 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
4461 		if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) {
4462 			RTE_ETHDEV_LOG_LINE(ERR,
4463 				"PFC Rx queue not in range for Tx pause requested:%d configured:%d",
4464 				pfc_queue_conf->tx_pause.rx_qid,
4465 				dev_info->nb_rx_queues);
4466 			return -EINVAL;
4467 		}
4468 
4469 		if (pfc_queue_conf->tx_pause.tc >= tc_max) {
4470 			RTE_ETHDEV_LOG_LINE(ERR,
4471 				"PFC TC not in range for Tx pause requested:%d max:%d",
4472 				pfc_queue_conf->tx_pause.tc, tc_max);
4473 			return -EINVAL;
4474 		}
4475 	}
4476 
4477 	return 0;
4478 }
4479 
4480 int
4481 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
4482 		struct rte_eth_pfc_queue_info *pfc_queue_info)
4483 {
4484 	struct rte_eth_dev *dev;
4485 	int ret;
4486 
4487 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4488 	dev = &rte_eth_devices[port_id];
4489 
4490 	if (pfc_queue_info == NULL) {
4491 		RTE_ETHDEV_LOG_LINE(ERR, "PFC info param is NULL for port (%u)",
4492 			port_id);
4493 		return -EINVAL;
4494 	}
4495 
4496 	if (*dev->dev_ops->priority_flow_ctrl_queue_info_get == NULL)
4497 		return -ENOTSUP;
4498 	ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
4499 			(dev, pfc_queue_info));
4500 
4501 	rte_ethdev_trace_priority_flow_ctrl_queue_info_get(port_id,
4502 						pfc_queue_info, ret);
4503 
4504 	return ret;
4505 }
4506 
4507 int
4508 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
4509 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
4510 {
4511 	struct rte_eth_pfc_queue_info pfc_info;
4512 	struct rte_eth_dev_info dev_info;
4513 	struct rte_eth_dev *dev;
4514 	int ret;
4515 
4516 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4517 	dev = &rte_eth_devices[port_id];
4518 
4519 	if (pfc_queue_conf == NULL) {
4520 		RTE_ETHDEV_LOG_LINE(ERR, "PFC parameters are NULL for port (%u)",
4521 			port_id);
4522 		return -EINVAL;
4523 	}
4524 
4525 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4526 	if (ret != 0)
4527 		return ret;
4528 
4529 	ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info);
4530 	if (ret != 0)
4531 		return ret;
4532 
4533 	if (pfc_info.tc_max == 0) {
4534 		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port %u does not support PFC TC values",
4535 			port_id);
4536 		return -ENOTSUP;
4537 	}
4538 
4539 	/* Check requested mode supported or not */
4540 	if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE &&
4541 			pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) {
4542 		RTE_ETHDEV_LOG_LINE(ERR, "PFC Tx pause unsupported for port (%d)",
4543 			port_id);
4544 		return -EINVAL;
4545 	}
4546 
4547 	if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE &&
4548 			pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) {
4549 		RTE_ETHDEV_LOG_LINE(ERR, "PFC Rx pause unsupported for port (%d)",
4550 			port_id);
4551 		return -EINVAL;
4552 	}
4553 
4554 	/* Validate Rx pause parameters */
4555 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
4556 			pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) {
4557 		ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max,
4558 				pfc_queue_conf);
4559 		if (ret != 0)
4560 			return ret;
4561 	}
4562 
4563 	/* Validate Tx pause parameters */
4564 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
4565 			pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) {
4566 		ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max,
4567 				pfc_queue_conf);
4568 		if (ret != 0)
4569 			return ret;
4570 	}
4571 
4572 	if (*dev->dev_ops->priority_flow_ctrl_queue_config == NULL)
4573 		return -ENOTSUP;
4574 	ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_config)
4575 			(dev, pfc_queue_conf));
4576 
4577 	rte_ethdev_trace_priority_flow_ctrl_queue_configure(port_id,
4578 						pfc_queue_conf, ret);
4579 
4580 	return ret;
4581 }
4582 
4583 static int
4584 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
4585 			uint16_t reta_size)
4586 {
4587 	uint16_t i, num;
4588 
4589 	num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
4590 	for (i = 0; i < num; i++) {
4591 		if (reta_conf[i].mask)
4592 			return 0;
4593 	}
4594 
4595 	return -EINVAL;
4596 }
4597 
4598 static int
4599 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
4600 			 uint16_t reta_size,
4601 			 uint16_t max_rxq)
4602 {
4603 	uint16_t i, idx, shift;
4604 
4605 	if (max_rxq == 0) {
4606 		RTE_ETHDEV_LOG_LINE(ERR, "No receive queue is available");
4607 		return -EINVAL;
4608 	}
4609 
4610 	for (i = 0; i < reta_size; i++) {
4611 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
4612 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
4613 		if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
4614 			(reta_conf[idx].reta[shift] >= max_rxq)) {
4615 			RTE_ETHDEV_LOG_LINE(ERR,
4616 				"reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u",
4617 				idx, shift,
4618 				reta_conf[idx].reta[shift], max_rxq);
4619 			return -EINVAL;
4620 		}
4621 	}
4622 
4623 	return 0;
4624 }
4625 
4626 int
4627 rte_eth_dev_rss_reta_update(uint16_t port_id,
4628 			    struct rte_eth_rss_reta_entry64 *reta_conf,
4629 			    uint16_t reta_size)
4630 {
4631 	enum rte_eth_rx_mq_mode mq_mode;
4632 	struct rte_eth_dev *dev;
4633 	int ret;
4634 
4635 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4636 	dev = &rte_eth_devices[port_id];
4637 
4638 	if (reta_conf == NULL) {
4639 		RTE_ETHDEV_LOG_LINE(ERR,
4640 			"Cannot update ethdev port %u RSS RETA to NULL",
4641 			port_id);
4642 		return -EINVAL;
4643 	}
4644 
4645 	if (reta_size == 0) {
4646 		RTE_ETHDEV_LOG_LINE(ERR,
4647 			"Cannot update ethdev port %u RSS RETA with zero size",
4648 			port_id);
4649 		return -EINVAL;
4650 	}
4651 
4652 	/* Check mask bits */
4653 	ret = eth_check_reta_mask(reta_conf, reta_size);
4654 	if (ret < 0)
4655 		return ret;
4656 
4657 	/* Check entry value */
4658 	ret = eth_check_reta_entry(reta_conf, reta_size,
4659 				dev->data->nb_rx_queues);
4660 	if (ret < 0)
4661 		return ret;
4662 
4663 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4664 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
4665 		RTE_ETHDEV_LOG_LINE(ERR, "Multi-queue RSS mode isn't enabled.");
4666 		return -ENOTSUP;
4667 	}
4668 
4669 	if (*dev->dev_ops->reta_update == NULL)
4670 		return -ENOTSUP;
4671 	ret = eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
4672 							    reta_size));
4673 
4674 	rte_ethdev_trace_rss_reta_update(port_id, reta_conf, reta_size, ret);
4675 
4676 	return ret;
4677 }
4678 
4679 int
4680 rte_eth_dev_rss_reta_query(uint16_t port_id,
4681 			   struct rte_eth_rss_reta_entry64 *reta_conf,
4682 			   uint16_t reta_size)
4683 {
4684 	struct rte_eth_dev *dev;
4685 	int ret;
4686 
4687 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4688 	dev = &rte_eth_devices[port_id];
4689 
4690 	if (reta_conf == NULL) {
4691 		RTE_ETHDEV_LOG_LINE(ERR,
4692 			"Cannot query ethdev port %u RSS RETA from NULL config",
4693 			port_id);
4694 		return -EINVAL;
4695 	}
4696 
4697 	/* Check mask bits */
4698 	ret = eth_check_reta_mask(reta_conf, reta_size);
4699 	if (ret < 0)
4700 		return ret;
4701 
4702 	if (*dev->dev_ops->reta_query == NULL)
4703 		return -ENOTSUP;
4704 	ret = eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
4705 							   reta_size));
4706 
4707 	rte_ethdev_trace_rss_reta_query(port_id, reta_conf, reta_size, ret);
4708 
4709 	return ret;
4710 }
4711 
4712 int
4713 rte_eth_dev_rss_hash_update(uint16_t port_id,
4714 			    struct rte_eth_rss_conf *rss_conf)
4715 {
4716 	struct rte_eth_dev *dev;
4717 	struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
4718 	enum rte_eth_rx_mq_mode mq_mode;
4719 	int ret;
4720 
4721 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4722 	dev = &rte_eth_devices[port_id];
4723 
4724 	if (rss_conf == NULL) {
4725 		RTE_ETHDEV_LOG_LINE(ERR,
4726 			"Cannot update ethdev port %u RSS hash from NULL config",
4727 			port_id);
4728 		return -EINVAL;
4729 	}
4730 
4731 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4732 	if (ret != 0)
4733 		return ret;
4734 
4735 	rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
4736 	if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
4737 	    dev_info.flow_type_rss_offloads) {
4738 		RTE_ETHDEV_LOG_LINE(ERR,
4739 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64,
4740 			port_id, rss_conf->rss_hf,
4741 			dev_info.flow_type_rss_offloads);
4742 		return -EINVAL;
4743 	}
4744 
4745 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4746 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
4747 		RTE_ETHDEV_LOG_LINE(ERR, "Multi-queue RSS mode isn't enabled.");
4748 		return -ENOTSUP;
4749 	}
4750 
4751 	if (rss_conf->rss_key != NULL &&
4752 	    rss_conf->rss_key_len != dev_info.hash_key_size) {
4753 		RTE_ETHDEV_LOG_LINE(ERR,
4754 			"Ethdev port_id=%u invalid RSS key len: %u, valid value: %u",
4755 			port_id, rss_conf->rss_key_len, dev_info.hash_key_size);
4756 		return -EINVAL;
4757 	}
4758 
4759 	if ((size_t)rss_conf->algorithm >= CHAR_BIT * sizeof(dev_info.rss_algo_capa) ||
4760 	    (dev_info.rss_algo_capa &
4761 	     RTE_ETH_HASH_ALGO_TO_CAPA(rss_conf->algorithm)) == 0) {
4762 		RTE_ETHDEV_LOG_LINE(ERR,
4763 			"Ethdev port_id=%u configured RSS hash algorithm (%u)"
4764 			"is not in the algorithm capability (0x%" PRIx32 ")",
4765 			port_id, rss_conf->algorithm, dev_info.rss_algo_capa);
4766 		return -EINVAL;
4767 	}
4768 
4769 	if (*dev->dev_ops->rss_hash_update == NULL)
4770 		return -ENOTSUP;
4771 	ret = eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4772 								rss_conf));
4773 
4774 	rte_ethdev_trace_rss_hash_update(port_id, rss_conf, ret);
4775 
4776 	return ret;
4777 }
4778 
4779 int
4780 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4781 			      struct rte_eth_rss_conf *rss_conf)
4782 {
4783 	struct rte_eth_dev_info dev_info = { 0 };
4784 	struct rte_eth_dev *dev;
4785 	int ret;
4786 
4787 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4788 	dev = &rte_eth_devices[port_id];
4789 
4790 	if (rss_conf == NULL) {
4791 		RTE_ETHDEV_LOG_LINE(ERR,
4792 			"Cannot get ethdev port %u RSS hash config to NULL",
4793 			port_id);
4794 		return -EINVAL;
4795 	}
4796 
4797 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4798 	if (ret != 0)
4799 		return ret;
4800 
4801 	if (rss_conf->rss_key != NULL &&
4802 	    rss_conf->rss_key_len < dev_info.hash_key_size) {
4803 		RTE_ETHDEV_LOG_LINE(ERR,
4804 			"Ethdev port_id=%u invalid RSS key len: %u, should not be less than: %u",
4805 			port_id, rss_conf->rss_key_len, dev_info.hash_key_size);
4806 		return -EINVAL;
4807 	}
4808 
4809 	rss_conf->algorithm = RTE_ETH_HASH_FUNCTION_DEFAULT;
4810 
4811 	if (*dev->dev_ops->rss_hash_conf_get == NULL)
4812 		return -ENOTSUP;
4813 	ret = eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4814 								  rss_conf));
4815 
4816 	rte_ethdev_trace_rss_hash_conf_get(port_id, rss_conf, ret);
4817 
4818 	return ret;
4819 }
4820 
4821 const char *
4822 rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo)
4823 {
4824 	const char *name = "Unknown function";
4825 	unsigned int i;
4826 
4827 	for (i = 0; i < RTE_DIM(rte_eth_dev_rss_algo_names); i++) {
4828 		if (rss_algo == rte_eth_dev_rss_algo_names[i].algo)
4829 			return rte_eth_dev_rss_algo_names[i].name;
4830 	}
4831 
4832 	return name;
4833 }
4834 
4835 int
4836 rte_eth_find_rss_algo(const char *name, uint32_t *algo)
4837 {
4838 	unsigned int i;
4839 
4840 	for (i = 0; i < RTE_DIM(rte_eth_dev_rss_algo_names); i++) {
4841 		if (strcmp(name, rte_eth_dev_rss_algo_names[i].name) == 0) {
4842 			*algo = rte_eth_dev_rss_algo_names[i].algo;
4843 			return 0;
4844 		}
4845 	}
4846 
4847 	return -EINVAL;
4848 }
4849 
4850 int
4851 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4852 				struct rte_eth_udp_tunnel *udp_tunnel)
4853 {
4854 	struct rte_eth_dev *dev;
4855 	int ret;
4856 
4857 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4858 	dev = &rte_eth_devices[port_id];
4859 
4860 	if (udp_tunnel == NULL) {
4861 		RTE_ETHDEV_LOG_LINE(ERR,
4862 			"Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel",
4863 			port_id);
4864 		return -EINVAL;
4865 	}
4866 
4867 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4868 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid tunnel type");
4869 		return -EINVAL;
4870 	}
4871 
4872 	if (*dev->dev_ops->udp_tunnel_port_add == NULL)
4873 		return -ENOTSUP;
4874 	ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4875 								udp_tunnel));
4876 
4877 	rte_ethdev_trace_udp_tunnel_port_add(port_id, udp_tunnel, ret);
4878 
4879 	return ret;
4880 }
4881 
4882 int
4883 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4884 				   struct rte_eth_udp_tunnel *udp_tunnel)
4885 {
4886 	struct rte_eth_dev *dev;
4887 	int ret;
4888 
4889 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4890 	dev = &rte_eth_devices[port_id];
4891 
4892 	if (udp_tunnel == NULL) {
4893 		RTE_ETHDEV_LOG_LINE(ERR,
4894 			"Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel",
4895 			port_id);
4896 		return -EINVAL;
4897 	}
4898 
4899 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4900 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid tunnel type");
4901 		return -EINVAL;
4902 	}
4903 
4904 	if (*dev->dev_ops->udp_tunnel_port_del == NULL)
4905 		return -ENOTSUP;
4906 	ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4907 								udp_tunnel));
4908 
4909 	rte_ethdev_trace_udp_tunnel_port_delete(port_id, udp_tunnel, ret);
4910 
4911 	return ret;
4912 }
4913 
4914 int
4915 rte_eth_led_on(uint16_t port_id)
4916 {
4917 	struct rte_eth_dev *dev;
4918 	int ret;
4919 
4920 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4921 	dev = &rte_eth_devices[port_id];
4922 
4923 	if (*dev->dev_ops->dev_led_on == NULL)
4924 		return -ENOTSUP;
4925 	ret = eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4926 
4927 	rte_eth_trace_led_on(port_id, ret);
4928 
4929 	return ret;
4930 }
4931 
4932 int
4933 rte_eth_led_off(uint16_t port_id)
4934 {
4935 	struct rte_eth_dev *dev;
4936 	int ret;
4937 
4938 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4939 	dev = &rte_eth_devices[port_id];
4940 
4941 	if (*dev->dev_ops->dev_led_off == NULL)
4942 		return -ENOTSUP;
4943 	ret = eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4944 
4945 	rte_eth_trace_led_off(port_id, ret);
4946 
4947 	return ret;
4948 }
4949 
4950 int
4951 rte_eth_fec_get_capability(uint16_t port_id,
4952 			   struct rte_eth_fec_capa *speed_fec_capa,
4953 			   unsigned int num)
4954 {
4955 	struct rte_eth_dev *dev;
4956 	int ret;
4957 
4958 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4959 	dev = &rte_eth_devices[port_id];
4960 
4961 	if (speed_fec_capa == NULL && num > 0) {
4962 		RTE_ETHDEV_LOG_LINE(ERR,
4963 			"Cannot get ethdev port %u FEC capability to NULL when array size is non zero",
4964 			port_id);
4965 		return -EINVAL;
4966 	}
4967 
4968 	if (*dev->dev_ops->fec_get_capability == NULL)
4969 		return -ENOTSUP;
4970 	ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4971 
4972 	rte_eth_trace_fec_get_capability(port_id, speed_fec_capa, num, ret);
4973 
4974 	return ret;
4975 }
4976 
4977 int
4978 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4979 {
4980 	struct rte_eth_dev *dev;
4981 	int ret;
4982 
4983 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4984 	dev = &rte_eth_devices[port_id];
4985 
4986 	if (fec_capa == NULL) {
4987 		RTE_ETHDEV_LOG_LINE(ERR,
4988 			"Cannot get ethdev port %u current FEC mode to NULL",
4989 			port_id);
4990 		return -EINVAL;
4991 	}
4992 
4993 	if (*dev->dev_ops->fec_get == NULL)
4994 		return -ENOTSUP;
4995 	ret = eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4996 
4997 	rte_eth_trace_fec_get(port_id, fec_capa, ret);
4998 
4999 	return ret;
5000 }
5001 
5002 int
5003 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
5004 {
5005 	struct rte_eth_dev *dev;
5006 	int ret;
5007 
5008 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5009 	dev = &rte_eth_devices[port_id];
5010 
5011 	if (fec_capa == 0) {
5012 		RTE_ETHDEV_LOG_LINE(ERR, "At least one FEC mode should be specified");
5013 		return -EINVAL;
5014 	}
5015 
5016 	if (*dev->dev_ops->fec_set == NULL)
5017 		return -ENOTSUP;
5018 	ret = eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
5019 
5020 	rte_eth_trace_fec_set(port_id, fec_capa, ret);
5021 
5022 	return ret;
5023 }
5024 
5025 /*
5026  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
5027  * an empty spot.
5028  */
5029 static int
5030 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
5031 {
5032 	struct rte_eth_dev_info dev_info;
5033 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5034 	unsigned i;
5035 	int ret;
5036 
5037 	ret = rte_eth_dev_info_get(port_id, &dev_info);
5038 	if (ret != 0)
5039 		return -1;
5040 
5041 	for (i = 0; i < dev_info.max_mac_addrs; i++)
5042 		if (memcmp(addr, &dev->data->mac_addrs[i],
5043 				RTE_ETHER_ADDR_LEN) == 0)
5044 			return i;
5045 
5046 	return -1;
5047 }
5048 
5049 static const struct rte_ether_addr null_mac_addr;
5050 
5051 int
5052 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
5053 			uint32_t pool)
5054 {
5055 	struct rte_eth_dev *dev;
5056 	int index;
5057 	uint64_t pool_mask;
5058 	int ret;
5059 
5060 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5061 	dev = &rte_eth_devices[port_id];
5062 
5063 	if (addr == NULL) {
5064 		RTE_ETHDEV_LOG_LINE(ERR,
5065 			"Cannot add ethdev port %u MAC address from NULL address",
5066 			port_id);
5067 		return -EINVAL;
5068 	}
5069 
5070 	if (*dev->dev_ops->mac_addr_add == NULL)
5071 		return -ENOTSUP;
5072 
5073 	if (rte_is_zero_ether_addr(addr)) {
5074 		RTE_ETHDEV_LOG_LINE(ERR, "Port %u: Cannot add NULL MAC address",
5075 			port_id);
5076 		return -EINVAL;
5077 	}
5078 	if (pool >= RTE_ETH_64_POOLS) {
5079 		RTE_ETHDEV_LOG_LINE(ERR, "Pool ID must be 0-%d", RTE_ETH_64_POOLS - 1);
5080 		return -EINVAL;
5081 	}
5082 
5083 	index = eth_dev_get_mac_addr_index(port_id, addr);
5084 	if (index < 0) {
5085 		index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
5086 		if (index < 0) {
5087 			RTE_ETHDEV_LOG_LINE(ERR, "Port %u: MAC address array full",
5088 				port_id);
5089 			return -ENOSPC;
5090 		}
5091 	} else {
5092 		pool_mask = dev->data->mac_pool_sel[index];
5093 
5094 		/* Check if both MAC address and pool is already there, and do nothing */
5095 		if (pool_mask & RTE_BIT64(pool))
5096 			return 0;
5097 	}
5098 
5099 	/* Update NIC */
5100 	ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
5101 
5102 	if (ret == 0) {
5103 		/* Update address in NIC data structure */
5104 		rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
5105 
5106 		/* Update pool bitmap in NIC data structure */
5107 		dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
5108 	}
5109 
5110 	ret = eth_err(port_id, ret);
5111 
5112 	rte_ethdev_trace_mac_addr_add(port_id, addr, pool, ret);
5113 
5114 	return ret;
5115 }
5116 
5117 int
5118 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
5119 {
5120 	struct rte_eth_dev *dev;
5121 	int index;
5122 
5123 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5124 	dev = &rte_eth_devices[port_id];
5125 
5126 	if (addr == NULL) {
5127 		RTE_ETHDEV_LOG_LINE(ERR,
5128 			"Cannot remove ethdev port %u MAC address from NULL address",
5129 			port_id);
5130 		return -EINVAL;
5131 	}
5132 
5133 	if (*dev->dev_ops->mac_addr_remove == NULL)
5134 		return -ENOTSUP;
5135 
5136 	index = eth_dev_get_mac_addr_index(port_id, addr);
5137 	if (index == 0) {
5138 		RTE_ETHDEV_LOG_LINE(ERR,
5139 			"Port %u: Cannot remove default MAC address",
5140 			port_id);
5141 		return -EADDRINUSE;
5142 	} else if (index < 0)
5143 		return 0;  /* Do nothing if address wasn't found */
5144 
5145 	/* Update NIC */
5146 	(*dev->dev_ops->mac_addr_remove)(dev, index);
5147 
5148 	/* Update address in NIC data structure */
5149 	rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
5150 
5151 	/* reset pool bitmap */
5152 	dev->data->mac_pool_sel[index] = 0;
5153 
5154 	rte_ethdev_trace_mac_addr_remove(port_id, addr);
5155 
5156 	return 0;
5157 }
5158 
5159 int
5160 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
5161 {
5162 	struct rte_eth_dev *dev;
5163 	int index;
5164 	int ret;
5165 
5166 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5167 	dev = &rte_eth_devices[port_id];
5168 
5169 	if (addr == NULL) {
5170 		RTE_ETHDEV_LOG_LINE(ERR,
5171 			"Cannot set ethdev port %u default MAC address from NULL address",
5172 			port_id);
5173 		return -EINVAL;
5174 	}
5175 
5176 	if (!rte_is_valid_assigned_ether_addr(addr))
5177 		return -EINVAL;
5178 
5179 	if (*dev->dev_ops->mac_addr_set == NULL)
5180 		return -ENOTSUP;
5181 
5182 	/* Keep address unique in dev->data->mac_addrs[]. */
5183 	index = eth_dev_get_mac_addr_index(port_id, addr);
5184 	if (index > 0) {
5185 		RTE_ETHDEV_LOG_LINE(ERR,
5186 			"New default address for port %u was already in the address list. Please remove it first.",
5187 			port_id);
5188 		return -EEXIST;
5189 	}
5190 
5191 	ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
5192 	if (ret < 0)
5193 		return ret;
5194 
5195 	/* Update default address in NIC data structure */
5196 	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
5197 
5198 	rte_ethdev_trace_default_mac_addr_set(port_id, addr);
5199 
5200 	return 0;
5201 }
5202 
5203 
5204 /*
5205  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
5206  * an empty spot.
5207  */
5208 static int
5209 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
5210 		const struct rte_ether_addr *addr)
5211 {
5212 	struct rte_eth_dev_info dev_info;
5213 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5214 	unsigned i;
5215 	int ret;
5216 
5217 	ret = rte_eth_dev_info_get(port_id, &dev_info);
5218 	if (ret != 0)
5219 		return -1;
5220 
5221 	if (!dev->data->hash_mac_addrs)
5222 		return -1;
5223 
5224 	for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
5225 		if (memcmp(addr, &dev->data->hash_mac_addrs[i],
5226 			RTE_ETHER_ADDR_LEN) == 0)
5227 			return i;
5228 
5229 	return -1;
5230 }
5231 
5232 int
5233 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
5234 				uint8_t on)
5235 {
5236 	int index;
5237 	int ret;
5238 	struct rte_eth_dev *dev;
5239 
5240 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5241 	dev = &rte_eth_devices[port_id];
5242 
5243 	if (addr == NULL) {
5244 		RTE_ETHDEV_LOG_LINE(ERR,
5245 			"Cannot set ethdev port %u unicast hash table from NULL address",
5246 			port_id);
5247 		return -EINVAL;
5248 	}
5249 
5250 	if (rte_is_zero_ether_addr(addr)) {
5251 		RTE_ETHDEV_LOG_LINE(ERR, "Port %u: Cannot add NULL MAC address",
5252 			port_id);
5253 		return -EINVAL;
5254 	}
5255 
5256 	index = eth_dev_get_hash_mac_addr_index(port_id, addr);
5257 	/* Check if it's already there, and do nothing */
5258 	if ((index >= 0) && on)
5259 		return 0;
5260 
5261 	if (index < 0) {
5262 		if (!on) {
5263 			RTE_ETHDEV_LOG_LINE(ERR,
5264 				"Port %u: the MAC address was not set in UTA",
5265 				port_id);
5266 			return -EINVAL;
5267 		}
5268 
5269 		index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
5270 		if (index < 0) {
5271 			RTE_ETHDEV_LOG_LINE(ERR, "Port %u: MAC address array full",
5272 				port_id);
5273 			return -ENOSPC;
5274 		}
5275 	}
5276 
5277 	if (*dev->dev_ops->uc_hash_table_set == NULL)
5278 		return -ENOTSUP;
5279 	ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
5280 	if (ret == 0) {
5281 		/* Update address in NIC data structure */
5282 		if (on)
5283 			rte_ether_addr_copy(addr,
5284 					&dev->data->hash_mac_addrs[index]);
5285 		else
5286 			rte_ether_addr_copy(&null_mac_addr,
5287 					&dev->data->hash_mac_addrs[index]);
5288 	}
5289 
5290 	ret = eth_err(port_id, ret);
5291 
5292 	rte_ethdev_trace_uc_hash_table_set(port_id, on, ret);
5293 
5294 	return ret;
5295 }
5296 
5297 int
5298 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
5299 {
5300 	struct rte_eth_dev *dev;
5301 	int ret;
5302 
5303 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5304 	dev = &rte_eth_devices[port_id];
5305 
5306 	if (*dev->dev_ops->uc_all_hash_table_set == NULL)
5307 		return -ENOTSUP;
5308 	ret = eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, on));
5309 
5310 	rte_ethdev_trace_uc_all_hash_table_set(port_id, on, ret);
5311 
5312 	return ret;
5313 }
5314 
5315 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
5316 					uint32_t tx_rate)
5317 {
5318 	struct rte_eth_dev *dev;
5319 	struct rte_eth_dev_info dev_info;
5320 	struct rte_eth_link link;
5321 	int ret;
5322 
5323 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5324 	dev = &rte_eth_devices[port_id];
5325 
5326 	ret = rte_eth_dev_info_get(port_id, &dev_info);
5327 	if (ret != 0)
5328 		return ret;
5329 
5330 	link = dev->data->dev_link;
5331 
5332 	if (queue_idx > dev_info.max_tx_queues) {
5333 		RTE_ETHDEV_LOG_LINE(ERR,
5334 			"Set queue rate limit:port %u: invalid queue ID=%u",
5335 			port_id, queue_idx);
5336 		return -EINVAL;
5337 	}
5338 
5339 	if (tx_rate > link.link_speed) {
5340 		RTE_ETHDEV_LOG_LINE(ERR,
5341 			"Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d",
5342 			tx_rate, link.link_speed);
5343 		return -EINVAL;
5344 	}
5345 
5346 	if (*dev->dev_ops->set_queue_rate_limit == NULL)
5347 		return -ENOTSUP;
5348 	ret = eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
5349 							queue_idx, tx_rate));
5350 
5351 	rte_eth_trace_set_queue_rate_limit(port_id, queue_idx, tx_rate, ret);
5352 
5353 	return ret;
5354 }
5355 
5356 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
5357 			       uint8_t avail_thresh)
5358 {
5359 	struct rte_eth_dev *dev;
5360 	int ret;
5361 
5362 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5363 	dev = &rte_eth_devices[port_id];
5364 
5365 	if (queue_id > dev->data->nb_rx_queues) {
5366 		RTE_ETHDEV_LOG_LINE(ERR,
5367 			"Set queue avail thresh: port %u: invalid queue ID=%u.",
5368 			port_id, queue_id);
5369 		return -EINVAL;
5370 	}
5371 
5372 	if (avail_thresh > 99) {
5373 		RTE_ETHDEV_LOG_LINE(ERR,
5374 			"Set queue avail thresh: port %u: threshold should be <= 99.",
5375 			port_id);
5376 		return -EINVAL;
5377 	}
5378 	if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL)
5379 		return -ENOTSUP;
5380 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev,
5381 							     queue_id, avail_thresh));
5382 
5383 	rte_eth_trace_rx_avail_thresh_set(port_id, queue_id, avail_thresh, ret);
5384 
5385 	return ret;
5386 }
5387 
5388 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
5389 				 uint8_t *avail_thresh)
5390 {
5391 	struct rte_eth_dev *dev;
5392 	int ret;
5393 
5394 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5395 	dev = &rte_eth_devices[port_id];
5396 
5397 	if (queue_id == NULL)
5398 		return -EINVAL;
5399 	if (*queue_id >= dev->data->nb_rx_queues)
5400 		*queue_id = 0;
5401 
5402 	if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL)
5403 		return -ENOTSUP;
5404 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev,
5405 							     queue_id, avail_thresh));
5406 
5407 	rte_eth_trace_rx_avail_thresh_query(port_id, *queue_id, ret);
5408 
5409 	return ret;
5410 }
5411 
5412 RTE_INIT(eth_dev_init_fp_ops)
5413 {
5414 	uint32_t i;
5415 
5416 	for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++)
5417 		eth_dev_fp_ops_reset(rte_eth_fp_ops + i);
5418 }
5419 
5420 RTE_INIT(eth_dev_init_cb_lists)
5421 {
5422 	uint16_t i;
5423 
5424 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
5425 		TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
5426 }
5427 
5428 int
5429 rte_eth_dev_callback_register(uint16_t port_id,
5430 			enum rte_eth_event_type event,
5431 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
5432 {
5433 	struct rte_eth_dev *dev;
5434 	struct rte_eth_dev_callback *user_cb;
5435 	uint16_t next_port;
5436 	uint16_t last_port;
5437 
5438 	if (cb_fn == NULL) {
5439 		RTE_ETHDEV_LOG_LINE(ERR,
5440 			"Cannot register ethdev port %u callback from NULL",
5441 			port_id);
5442 		return -EINVAL;
5443 	}
5444 
5445 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
5446 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%d", port_id);
5447 		return -EINVAL;
5448 	}
5449 
5450 	if (port_id == RTE_ETH_ALL) {
5451 		next_port = 0;
5452 		last_port = RTE_MAX_ETHPORTS - 1;
5453 	} else {
5454 		next_port = last_port = port_id;
5455 	}
5456 
5457 	rte_spinlock_lock(&eth_dev_cb_lock);
5458 
5459 	do {
5460 		dev = &rte_eth_devices[next_port];
5461 
5462 		TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
5463 			if (user_cb->cb_fn == cb_fn &&
5464 				user_cb->cb_arg == cb_arg &&
5465 				user_cb->event == event) {
5466 				break;
5467 			}
5468 		}
5469 
5470 		/* create a new callback. */
5471 		if (user_cb == NULL) {
5472 			user_cb = rte_zmalloc("INTR_USER_CALLBACK",
5473 				sizeof(struct rte_eth_dev_callback), 0);
5474 			if (user_cb != NULL) {
5475 				user_cb->cb_fn = cb_fn;
5476 				user_cb->cb_arg = cb_arg;
5477 				user_cb->event = event;
5478 				TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
5479 						  user_cb, next);
5480 			} else {
5481 				rte_spinlock_unlock(&eth_dev_cb_lock);
5482 				rte_eth_dev_callback_unregister(port_id, event,
5483 								cb_fn, cb_arg);
5484 				return -ENOMEM;
5485 			}
5486 
5487 		}
5488 	} while (++next_port <= last_port);
5489 
5490 	rte_spinlock_unlock(&eth_dev_cb_lock);
5491 
5492 	rte_ethdev_trace_callback_register(port_id, event, cb_fn, cb_arg);
5493 
5494 	return 0;
5495 }
5496 
5497 int
5498 rte_eth_dev_callback_unregister(uint16_t port_id,
5499 			enum rte_eth_event_type event,
5500 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
5501 {
5502 	int ret;
5503 	struct rte_eth_dev *dev;
5504 	struct rte_eth_dev_callback *cb, *next;
5505 	uint16_t next_port;
5506 	uint16_t last_port;
5507 
5508 	if (cb_fn == NULL) {
5509 		RTE_ETHDEV_LOG_LINE(ERR,
5510 			"Cannot unregister ethdev port %u callback from NULL",
5511 			port_id);
5512 		return -EINVAL;
5513 	}
5514 
5515 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
5516 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%d", port_id);
5517 		return -EINVAL;
5518 	}
5519 
5520 	if (port_id == RTE_ETH_ALL) {
5521 		next_port = 0;
5522 		last_port = RTE_MAX_ETHPORTS - 1;
5523 	} else {
5524 		next_port = last_port = port_id;
5525 	}
5526 
5527 	rte_spinlock_lock(&eth_dev_cb_lock);
5528 
5529 	do {
5530 		dev = &rte_eth_devices[next_port];
5531 		ret = 0;
5532 		for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
5533 		     cb = next) {
5534 
5535 			next = TAILQ_NEXT(cb, next);
5536 
5537 			if (cb->cb_fn != cb_fn || cb->event != event ||
5538 			    (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
5539 				continue;
5540 
5541 			/*
5542 			 * if this callback is not executing right now,
5543 			 * then remove it.
5544 			 */
5545 			if (cb->active == 0) {
5546 				TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
5547 				rte_free(cb);
5548 			} else {
5549 				ret = -EAGAIN;
5550 			}
5551 		}
5552 	} while (++next_port <= last_port);
5553 
5554 	rte_spinlock_unlock(&eth_dev_cb_lock);
5555 
5556 	rte_ethdev_trace_callback_unregister(port_id, event, cb_fn, cb_arg,
5557 					     ret);
5558 
5559 	return ret;
5560 }
5561 
5562 int
5563 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
5564 {
5565 	uint32_t vec;
5566 	struct rte_eth_dev *dev;
5567 	struct rte_intr_handle *intr_handle;
5568 	uint16_t qid;
5569 	int rc;
5570 
5571 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5572 	dev = &rte_eth_devices[port_id];
5573 
5574 	if (!dev->intr_handle) {
5575 		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr handle unset");
5576 		return -ENOTSUP;
5577 	}
5578 
5579 	intr_handle = dev->intr_handle;
5580 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
5581 		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr vector unset");
5582 		return -EPERM;
5583 	}
5584 
5585 	for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
5586 		vec = rte_intr_vec_list_index_get(intr_handle, qid);
5587 		rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
5588 
5589 		rte_ethdev_trace_rx_intr_ctl(port_id, qid, epfd, op, data, rc);
5590 
5591 		if (rc && rc != -EEXIST) {
5592 			RTE_ETHDEV_LOG_LINE(ERR,
5593 				"p %u q %u Rx ctl error op %d epfd %d vec %u",
5594 				port_id, qid, op, epfd, vec);
5595 		}
5596 	}
5597 
5598 	return 0;
5599 }
5600 
5601 int
5602 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
5603 {
5604 	struct rte_intr_handle *intr_handle;
5605 	struct rte_eth_dev *dev;
5606 	unsigned int efd_idx;
5607 	uint32_t vec;
5608 	int fd;
5609 
5610 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
5611 	dev = &rte_eth_devices[port_id];
5612 
5613 	if (queue_id >= dev->data->nb_rx_queues) {
5614 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
5615 		return -1;
5616 	}
5617 
5618 	if (!dev->intr_handle) {
5619 		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr handle unset");
5620 		return -1;
5621 	}
5622 
5623 	intr_handle = dev->intr_handle;
5624 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
5625 		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr vector unset");
5626 		return -1;
5627 	}
5628 
5629 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
5630 	efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
5631 		(vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
5632 	fd = rte_intr_efds_index_get(intr_handle, efd_idx);
5633 
5634 	rte_ethdev_trace_rx_intr_ctl_q_get_fd(port_id, queue_id, fd);
5635 
5636 	return fd;
5637 }
5638 
5639 int
5640 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
5641 			  int epfd, int op, void *data)
5642 {
5643 	uint32_t vec;
5644 	struct rte_eth_dev *dev;
5645 	struct rte_intr_handle *intr_handle;
5646 	int rc;
5647 
5648 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5649 	dev = &rte_eth_devices[port_id];
5650 
5651 	if (queue_id >= dev->data->nb_rx_queues) {
5652 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
5653 		return -EINVAL;
5654 	}
5655 
5656 	if (!dev->intr_handle) {
5657 		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr handle unset");
5658 		return -ENOTSUP;
5659 	}
5660 
5661 	intr_handle = dev->intr_handle;
5662 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
5663 		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr vector unset");
5664 		return -EPERM;
5665 	}
5666 
5667 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
5668 	rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
5669 
5670 	rte_ethdev_trace_rx_intr_ctl_q(port_id, queue_id, epfd, op, data, rc);
5671 
5672 	if (rc && rc != -EEXIST) {
5673 		RTE_ETHDEV_LOG_LINE(ERR,
5674 			"p %u q %u Rx ctl error op %d epfd %d vec %u",
5675 			port_id, queue_id, op, epfd, vec);
5676 		return rc;
5677 	}
5678 
5679 	return 0;
5680 }
5681 
5682 int
5683 rte_eth_dev_rx_intr_enable(uint16_t port_id,
5684 			   uint16_t queue_id)
5685 {
5686 	struct rte_eth_dev *dev;
5687 	int ret;
5688 
5689 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5690 	dev = &rte_eth_devices[port_id];
5691 
5692 	ret = eth_dev_validate_rx_queue(dev, queue_id);
5693 	if (ret != 0)
5694 		return ret;
5695 
5696 	if (*dev->dev_ops->rx_queue_intr_enable == NULL)
5697 		return -ENOTSUP;
5698 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
5699 
5700 	rte_ethdev_trace_rx_intr_enable(port_id, queue_id, ret);
5701 
5702 	return ret;
5703 }
5704 
5705 int
5706 rte_eth_dev_rx_intr_disable(uint16_t port_id,
5707 			    uint16_t queue_id)
5708 {
5709 	struct rte_eth_dev *dev;
5710 	int ret;
5711 
5712 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5713 	dev = &rte_eth_devices[port_id];
5714 
5715 	ret = eth_dev_validate_rx_queue(dev, queue_id);
5716 	if (ret != 0)
5717 		return ret;
5718 
5719 	if (*dev->dev_ops->rx_queue_intr_disable == NULL)
5720 		return -ENOTSUP;
5721 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
5722 
5723 	rte_ethdev_trace_rx_intr_disable(port_id, queue_id, ret);
5724 
5725 	return ret;
5726 }
5727 
5728 
5729 const struct rte_eth_rxtx_callback *
5730 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
5731 		rte_rx_callback_fn fn, void *user_param)
5732 {
5733 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5734 	rte_errno = ENOTSUP;
5735 	return NULL;
5736 #endif
5737 	struct rte_eth_dev *dev;
5738 
5739 	/* check input parameters */
5740 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5741 		    queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5742 		rte_errno = EINVAL;
5743 		return NULL;
5744 	}
5745 	dev = &rte_eth_devices[port_id];
5746 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5747 		rte_errno = EINVAL;
5748 		return NULL;
5749 	}
5750 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5751 
5752 	if (cb == NULL) {
5753 		rte_errno = ENOMEM;
5754 		return NULL;
5755 	}
5756 
5757 	cb->fn.rx = fn;
5758 	cb->param = user_param;
5759 
5760 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
5761 	/* Add the callbacks in fifo order. */
5762 	struct rte_eth_rxtx_callback *tail =
5763 		rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5764 
5765 	if (!tail) {
5766 		/* Stores to cb->fn and cb->param should complete before
5767 		 * cb is visible to data plane.
5768 		 */
5769 		rte_atomic_store_explicit(
5770 			&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5771 			cb, rte_memory_order_release);
5772 
5773 	} else {
5774 		while (tail->next)
5775 			tail = tail->next;
5776 		/* Stores to cb->fn and cb->param should complete before
5777 		 * cb is visible to data plane.
5778 		 */
5779 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
5780 	}
5781 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5782 
5783 	rte_eth_trace_add_rx_callback(port_id, queue_id, fn, user_param, cb);
5784 
5785 	return cb;
5786 }
5787 
5788 const struct rte_eth_rxtx_callback *
5789 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
5790 		rte_rx_callback_fn fn, void *user_param)
5791 {
5792 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5793 	rte_errno = ENOTSUP;
5794 	return NULL;
5795 #endif
5796 	/* check input parameters */
5797 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5798 		queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5799 		rte_errno = EINVAL;
5800 		return NULL;
5801 	}
5802 
5803 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5804 
5805 	if (cb == NULL) {
5806 		rte_errno = ENOMEM;
5807 		return NULL;
5808 	}
5809 
5810 	cb->fn.rx = fn;
5811 	cb->param = user_param;
5812 
5813 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
5814 	/* Add the callbacks at first position */
5815 	cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5816 	/* Stores to cb->fn, cb->param and cb->next should complete before
5817 	 * cb is visible to data plane threads.
5818 	 */
5819 	rte_atomic_store_explicit(
5820 		&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5821 		cb, rte_memory_order_release);
5822 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5823 
5824 	rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param,
5825 					    cb);
5826 
5827 	return cb;
5828 }
5829 
5830 const struct rte_eth_rxtx_callback *
5831 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
5832 		rte_tx_callback_fn fn, void *user_param)
5833 {
5834 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5835 	rte_errno = ENOTSUP;
5836 	return NULL;
5837 #endif
5838 	struct rte_eth_dev *dev;
5839 
5840 	/* check input parameters */
5841 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5842 		    queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
5843 		rte_errno = EINVAL;
5844 		return NULL;
5845 	}
5846 
5847 	dev = &rte_eth_devices[port_id];
5848 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5849 		rte_errno = EINVAL;
5850 		return NULL;
5851 	}
5852 
5853 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5854 
5855 	if (cb == NULL) {
5856 		rte_errno = ENOMEM;
5857 		return NULL;
5858 	}
5859 
5860 	cb->fn.tx = fn;
5861 	cb->param = user_param;
5862 
5863 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
5864 	/* Add the callbacks in fifo order. */
5865 	struct rte_eth_rxtx_callback *tail =
5866 		rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
5867 
5868 	if (!tail) {
5869 		/* Stores to cb->fn and cb->param should complete before
5870 		 * cb is visible to data plane.
5871 		 */
5872 		rte_atomic_store_explicit(
5873 			&rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
5874 			cb, rte_memory_order_release);
5875 
5876 	} else {
5877 		while (tail->next)
5878 			tail = tail->next;
5879 		/* Stores to cb->fn and cb->param should complete before
5880 		 * cb is visible to data plane.
5881 		 */
5882 		rte_atomic_store_explicit(&tail->next, cb, rte_memory_order_release);
5883 	}
5884 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5885 
5886 	rte_eth_trace_add_tx_callback(port_id, queue_id, fn, user_param, cb);
5887 
5888 	return cb;
5889 }
5890 
5891 int
5892 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
5893 		const struct rte_eth_rxtx_callback *user_cb)
5894 {
5895 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5896 	return -ENOTSUP;
5897 #endif
5898 	/* Check input parameters. */
5899 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5900 	if (user_cb == NULL ||
5901 			queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
5902 		return -EINVAL;
5903 
5904 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5905 	struct rte_eth_rxtx_callback *cb;
5906 	RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb;
5907 	int ret = -EINVAL;
5908 
5909 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
5910 	prev_cb = &dev->post_rx_burst_cbs[queue_id];
5911 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
5912 		cb = *prev_cb;
5913 		if (cb == user_cb) {
5914 			/* Remove the user cb from the callback list. */
5915 			rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed);
5916 			ret = 0;
5917 			break;
5918 		}
5919 	}
5920 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5921 
5922 	rte_eth_trace_remove_rx_callback(port_id, queue_id, user_cb, ret);
5923 
5924 	return ret;
5925 }
5926 
5927 int
5928 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5929 		const struct rte_eth_rxtx_callback *user_cb)
5930 {
5931 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5932 	return -ENOTSUP;
5933 #endif
5934 	/* Check input parameters. */
5935 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5936 	if (user_cb == NULL ||
5937 			queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
5938 		return -EINVAL;
5939 
5940 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5941 	int ret = -EINVAL;
5942 	struct rte_eth_rxtx_callback *cb;
5943 	RTE_ATOMIC(struct rte_eth_rxtx_callback *) *prev_cb;
5944 
5945 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
5946 	prev_cb = &dev->pre_tx_burst_cbs[queue_id];
5947 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
5948 		cb = *prev_cb;
5949 		if (cb == user_cb) {
5950 			/* Remove the user cb from the callback list. */
5951 			rte_atomic_store_explicit(prev_cb, cb->next, rte_memory_order_relaxed);
5952 			ret = 0;
5953 			break;
5954 		}
5955 	}
5956 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5957 
5958 	rte_eth_trace_remove_tx_callback(port_id, queue_id, user_cb, ret);
5959 
5960 	return ret;
5961 }
5962 
5963 int
5964 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5965 	struct rte_eth_rxq_info *qinfo)
5966 {
5967 	struct rte_eth_dev *dev;
5968 
5969 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5970 	dev = &rte_eth_devices[port_id];
5971 
5972 	if (queue_id >= dev->data->nb_rx_queues) {
5973 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
5974 		return -EINVAL;
5975 	}
5976 
5977 	if (qinfo == NULL) {
5978 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL",
5979 			port_id, queue_id);
5980 		return -EINVAL;
5981 	}
5982 
5983 	if (dev->data->rx_queues == NULL ||
5984 			dev->data->rx_queues[queue_id] == NULL) {
5985 		RTE_ETHDEV_LOG_LINE(ERR,
5986 			       "Rx queue %"PRIu16" of device with port_id=%"
5987 			       PRIu16" has not been setup",
5988 			       queue_id, port_id);
5989 		return -EINVAL;
5990 	}
5991 
5992 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5993 		RTE_ETHDEV_LOG_LINE(INFO,
5994 			"Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16,
5995 			queue_id, port_id);
5996 		return -EINVAL;
5997 	}
5998 
5999 	if (*dev->dev_ops->rxq_info_get == NULL)
6000 		return -ENOTSUP;
6001 
6002 	memset(qinfo, 0, sizeof(*qinfo));
6003 	dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
6004 	qinfo->queue_state = dev->data->rx_queue_state[queue_id];
6005 
6006 	rte_eth_trace_rx_queue_info_get(port_id, queue_id, qinfo);
6007 
6008 	return 0;
6009 }
6010 
6011 int
6012 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
6013 	struct rte_eth_txq_info *qinfo)
6014 {
6015 	struct rte_eth_dev *dev;
6016 
6017 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6018 	dev = &rte_eth_devices[port_id];
6019 
6020 	if (queue_id >= dev->data->nb_tx_queues) {
6021 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", queue_id);
6022 		return -EINVAL;
6023 	}
6024 
6025 	if (qinfo == NULL) {
6026 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL",
6027 			port_id, queue_id);
6028 		return -EINVAL;
6029 	}
6030 
6031 	if (dev->data->tx_queues == NULL ||
6032 			dev->data->tx_queues[queue_id] == NULL) {
6033 		RTE_ETHDEV_LOG_LINE(ERR,
6034 			       "Tx queue %"PRIu16" of device with port_id=%"
6035 			       PRIu16" has not been setup",
6036 			       queue_id, port_id);
6037 		return -EINVAL;
6038 	}
6039 
6040 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
6041 		RTE_ETHDEV_LOG_LINE(INFO,
6042 			"Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16,
6043 			queue_id, port_id);
6044 		return -EINVAL;
6045 	}
6046 
6047 	if (*dev->dev_ops->txq_info_get == NULL)
6048 		return -ENOTSUP;
6049 
6050 	memset(qinfo, 0, sizeof(*qinfo));
6051 	dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
6052 	qinfo->queue_state = dev->data->tx_queue_state[queue_id];
6053 
6054 	rte_eth_trace_tx_queue_info_get(port_id, queue_id, qinfo);
6055 
6056 	return 0;
6057 }
6058 
6059 int
6060 rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
6061 		struct rte_eth_recycle_rxq_info *recycle_rxq_info)
6062 {
6063 	struct rte_eth_dev *dev;
6064 	int ret;
6065 
6066 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6067 	dev = &rte_eth_devices[port_id];
6068 
6069 	ret = eth_dev_validate_rx_queue(dev, queue_id);
6070 	if (unlikely(ret != 0))
6071 		return ret;
6072 
6073 	if (*dev->dev_ops->recycle_rxq_info_get == NULL)
6074 		return -ENOTSUP;
6075 
6076 	dev->dev_ops->recycle_rxq_info_get(dev, queue_id, recycle_rxq_info);
6077 
6078 	return 0;
6079 }
6080 
6081 int
6082 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
6083 			  struct rte_eth_burst_mode *mode)
6084 {
6085 	struct rte_eth_dev *dev;
6086 	int ret;
6087 
6088 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6089 	dev = &rte_eth_devices[port_id];
6090 
6091 	if (queue_id >= dev->data->nb_rx_queues) {
6092 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
6093 		return -EINVAL;
6094 	}
6095 
6096 	if (mode == NULL) {
6097 		RTE_ETHDEV_LOG_LINE(ERR,
6098 			"Cannot get ethdev port %u Rx queue %u burst mode to NULL",
6099 			port_id, queue_id);
6100 		return -EINVAL;
6101 	}
6102 
6103 	if (*dev->dev_ops->rx_burst_mode_get == NULL)
6104 		return -ENOTSUP;
6105 	memset(mode, 0, sizeof(*mode));
6106 	ret = eth_err(port_id,
6107 		      dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
6108 
6109 	rte_eth_trace_rx_burst_mode_get(port_id, queue_id, mode, ret);
6110 
6111 	return ret;
6112 }
6113 
6114 int
6115 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
6116 			  struct rte_eth_burst_mode *mode)
6117 {
6118 	struct rte_eth_dev *dev;
6119 	int ret;
6120 
6121 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6122 	dev = &rte_eth_devices[port_id];
6123 
6124 	if (queue_id >= dev->data->nb_tx_queues) {
6125 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", queue_id);
6126 		return -EINVAL;
6127 	}
6128 
6129 	if (mode == NULL) {
6130 		RTE_ETHDEV_LOG_LINE(ERR,
6131 			"Cannot get ethdev port %u Tx queue %u burst mode to NULL",
6132 			port_id, queue_id);
6133 		return -EINVAL;
6134 	}
6135 
6136 	if (*dev->dev_ops->tx_burst_mode_get == NULL)
6137 		return -ENOTSUP;
6138 	memset(mode, 0, sizeof(*mode));
6139 	ret = eth_err(port_id,
6140 		      dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
6141 
6142 	rte_eth_trace_tx_burst_mode_get(port_id, queue_id, mode, ret);
6143 
6144 	return ret;
6145 }
6146 
6147 int
6148 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
6149 		struct rte_power_monitor_cond *pmc)
6150 {
6151 	struct rte_eth_dev *dev;
6152 	int ret;
6153 
6154 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6155 	dev = &rte_eth_devices[port_id];
6156 
6157 	if (queue_id >= dev->data->nb_rx_queues) {
6158 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
6159 		return -EINVAL;
6160 	}
6161 
6162 	if (pmc == NULL) {
6163 		RTE_ETHDEV_LOG_LINE(ERR,
6164 			"Cannot get ethdev port %u Rx queue %u power monitor condition to NULL",
6165 			port_id, queue_id);
6166 		return -EINVAL;
6167 	}
6168 
6169 	if (*dev->dev_ops->get_monitor_addr == NULL)
6170 		return -ENOTSUP;
6171 	ret = eth_err(port_id,
6172 		dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
6173 
6174 	rte_eth_trace_get_monitor_addr(port_id, queue_id, pmc, ret);
6175 
6176 	return ret;
6177 }
6178 
6179 int
6180 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
6181 			     struct rte_ether_addr *mc_addr_set,
6182 			     uint32_t nb_mc_addr)
6183 {
6184 	struct rte_eth_dev *dev;
6185 	int ret;
6186 
6187 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6188 	dev = &rte_eth_devices[port_id];
6189 
6190 	if (*dev->dev_ops->set_mc_addr_list == NULL)
6191 		return -ENOTSUP;
6192 	ret = eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
6193 						mc_addr_set, nb_mc_addr));
6194 
6195 	rte_ethdev_trace_set_mc_addr_list(port_id, mc_addr_set, nb_mc_addr,
6196 					  ret);
6197 
6198 	return ret;
6199 }
6200 
6201 int
6202 rte_eth_timesync_enable(uint16_t port_id)
6203 {
6204 	struct rte_eth_dev *dev;
6205 	int ret;
6206 
6207 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6208 	dev = &rte_eth_devices[port_id];
6209 
6210 	if (*dev->dev_ops->timesync_enable == NULL)
6211 		return -ENOTSUP;
6212 	ret = eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
6213 
6214 	rte_eth_trace_timesync_enable(port_id, ret);
6215 
6216 	return ret;
6217 }
6218 
6219 int
6220 rte_eth_timesync_disable(uint16_t port_id)
6221 {
6222 	struct rte_eth_dev *dev;
6223 	int ret;
6224 
6225 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6226 	dev = &rte_eth_devices[port_id];
6227 
6228 	if (*dev->dev_ops->timesync_disable == NULL)
6229 		return -ENOTSUP;
6230 	ret = eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
6231 
6232 	rte_eth_trace_timesync_disable(port_id, ret);
6233 
6234 	return ret;
6235 }
6236 
6237 int
6238 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
6239 				   uint32_t flags)
6240 {
6241 	struct rte_eth_dev *dev;
6242 	int ret;
6243 
6244 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6245 	dev = &rte_eth_devices[port_id];
6246 
6247 	if (timestamp == NULL) {
6248 		RTE_ETHDEV_LOG_LINE(ERR,
6249 			"Cannot read ethdev port %u Rx timestamp to NULL",
6250 			port_id);
6251 		return -EINVAL;
6252 	}
6253 
6254 	if (*dev->dev_ops->timesync_read_rx_timestamp == NULL)
6255 		return -ENOTSUP;
6256 
6257 	ret = eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
6258 			       (dev, timestamp, flags));
6259 
6260 	rte_eth_trace_timesync_read_rx_timestamp(port_id, timestamp, flags,
6261 						 ret);
6262 
6263 	return ret;
6264 }
6265 
6266 int
6267 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
6268 				   struct timespec *timestamp)
6269 {
6270 	struct rte_eth_dev *dev;
6271 	int ret;
6272 
6273 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6274 	dev = &rte_eth_devices[port_id];
6275 
6276 	if (timestamp == NULL) {
6277 		RTE_ETHDEV_LOG_LINE(ERR,
6278 			"Cannot read ethdev port %u Tx timestamp to NULL",
6279 			port_id);
6280 		return -EINVAL;
6281 	}
6282 
6283 	if (*dev->dev_ops->timesync_read_tx_timestamp == NULL)
6284 		return -ENOTSUP;
6285 
6286 	ret = eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
6287 			       (dev, timestamp));
6288 
6289 	rte_eth_trace_timesync_read_tx_timestamp(port_id, timestamp, ret);
6290 
6291 	return ret;
6292 
6293 }
6294 
6295 int
6296 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
6297 {
6298 	struct rte_eth_dev *dev;
6299 	int ret;
6300 
6301 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6302 	dev = &rte_eth_devices[port_id];
6303 
6304 	if (*dev->dev_ops->timesync_adjust_time == NULL)
6305 		return -ENOTSUP;
6306 	ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
6307 
6308 	rte_eth_trace_timesync_adjust_time(port_id, delta, ret);
6309 
6310 	return ret;
6311 }
6312 
6313 int
6314 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
6315 {
6316 	struct rte_eth_dev *dev;
6317 	int ret;
6318 
6319 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6320 	dev = &rte_eth_devices[port_id];
6321 
6322 	if (timestamp == NULL) {
6323 		RTE_ETHDEV_LOG_LINE(ERR,
6324 			"Cannot read ethdev port %u timesync time to NULL",
6325 			port_id);
6326 		return -EINVAL;
6327 	}
6328 
6329 	if (*dev->dev_ops->timesync_read_time == NULL)
6330 		return -ENOTSUP;
6331 	ret = eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
6332 								timestamp));
6333 
6334 	rte_eth_trace_timesync_read_time(port_id, timestamp, ret);
6335 
6336 	return ret;
6337 }
6338 
6339 int
6340 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
6341 {
6342 	struct rte_eth_dev *dev;
6343 	int ret;
6344 
6345 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6346 	dev = &rte_eth_devices[port_id];
6347 
6348 	if (timestamp == NULL) {
6349 		RTE_ETHDEV_LOG_LINE(ERR,
6350 			"Cannot write ethdev port %u timesync from NULL time",
6351 			port_id);
6352 		return -EINVAL;
6353 	}
6354 
6355 	if (*dev->dev_ops->timesync_write_time == NULL)
6356 		return -ENOTSUP;
6357 	ret = eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
6358 								timestamp));
6359 
6360 	rte_eth_trace_timesync_write_time(port_id, timestamp, ret);
6361 
6362 	return ret;
6363 }
6364 
6365 int
6366 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
6367 {
6368 	struct rte_eth_dev *dev;
6369 	int ret;
6370 
6371 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6372 	dev = &rte_eth_devices[port_id];
6373 
6374 	if (clock == NULL) {
6375 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot read ethdev port %u clock to NULL",
6376 			port_id);
6377 		return -EINVAL;
6378 	}
6379 
6380 	if (*dev->dev_ops->read_clock == NULL)
6381 		return -ENOTSUP;
6382 	ret = eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
6383 
6384 	rte_eth_trace_read_clock(port_id, clock, ret);
6385 
6386 	return ret;
6387 }
6388 
6389 int
6390 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
6391 {
6392 	struct rte_eth_dev *dev;
6393 	int ret;
6394 
6395 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6396 	dev = &rte_eth_devices[port_id];
6397 
6398 	if (info == NULL) {
6399 		RTE_ETHDEV_LOG_LINE(ERR,
6400 			"Cannot get ethdev port %u register info to NULL",
6401 			port_id);
6402 		return -EINVAL;
6403 	}
6404 
6405 	if (*dev->dev_ops->get_reg == NULL)
6406 		return -ENOTSUP;
6407 	ret = eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
6408 
6409 	rte_ethdev_trace_get_reg_info(port_id, info, ret);
6410 
6411 	return ret;
6412 }
6413 
6414 int
6415 rte_eth_dev_get_eeprom_length(uint16_t port_id)
6416 {
6417 	struct rte_eth_dev *dev;
6418 	int ret;
6419 
6420 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6421 	dev = &rte_eth_devices[port_id];
6422 
6423 	if (*dev->dev_ops->get_eeprom_length == NULL)
6424 		return -ENOTSUP;
6425 	ret = eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
6426 
6427 	rte_ethdev_trace_get_eeprom_length(port_id, ret);
6428 
6429 	return ret;
6430 }
6431 
6432 int
6433 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
6434 {
6435 	struct rte_eth_dev *dev;
6436 	int ret;
6437 
6438 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6439 	dev = &rte_eth_devices[port_id];
6440 
6441 	if (info == NULL) {
6442 		RTE_ETHDEV_LOG_LINE(ERR,
6443 			"Cannot get ethdev port %u EEPROM info to NULL",
6444 			port_id);
6445 		return -EINVAL;
6446 	}
6447 
6448 	if (*dev->dev_ops->get_eeprom == NULL)
6449 		return -ENOTSUP;
6450 	ret = eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
6451 
6452 	rte_ethdev_trace_get_eeprom(port_id, info, ret);
6453 
6454 	return ret;
6455 }
6456 
6457 int
6458 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
6459 {
6460 	struct rte_eth_dev *dev;
6461 	int ret;
6462 
6463 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6464 	dev = &rte_eth_devices[port_id];
6465 
6466 	if (info == NULL) {
6467 		RTE_ETHDEV_LOG_LINE(ERR,
6468 			"Cannot set ethdev port %u EEPROM from NULL info",
6469 			port_id);
6470 		return -EINVAL;
6471 	}
6472 
6473 	if (*dev->dev_ops->set_eeprom == NULL)
6474 		return -ENOTSUP;
6475 	ret = eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
6476 
6477 	rte_ethdev_trace_set_eeprom(port_id, info, ret);
6478 
6479 	return ret;
6480 }
6481 
6482 int
6483 rte_eth_dev_get_module_info(uint16_t port_id,
6484 			    struct rte_eth_dev_module_info *modinfo)
6485 {
6486 	struct rte_eth_dev *dev;
6487 	int ret;
6488 
6489 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6490 	dev = &rte_eth_devices[port_id];
6491 
6492 	if (modinfo == NULL) {
6493 		RTE_ETHDEV_LOG_LINE(ERR,
6494 			"Cannot get ethdev port %u EEPROM module info to NULL",
6495 			port_id);
6496 		return -EINVAL;
6497 	}
6498 
6499 	if (*dev->dev_ops->get_module_info == NULL)
6500 		return -ENOTSUP;
6501 	ret = (*dev->dev_ops->get_module_info)(dev, modinfo);
6502 
6503 	rte_ethdev_trace_get_module_info(port_id, modinfo, ret);
6504 
6505 	return ret;
6506 }
6507 
6508 int
6509 rte_eth_dev_get_module_eeprom(uint16_t port_id,
6510 			      struct rte_dev_eeprom_info *info)
6511 {
6512 	struct rte_eth_dev *dev;
6513 	int ret;
6514 
6515 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6516 	dev = &rte_eth_devices[port_id];
6517 
6518 	if (info == NULL) {
6519 		RTE_ETHDEV_LOG_LINE(ERR,
6520 			"Cannot get ethdev port %u module EEPROM info to NULL",
6521 			port_id);
6522 		return -EINVAL;
6523 	}
6524 
6525 	if (info->data == NULL) {
6526 		RTE_ETHDEV_LOG_LINE(ERR,
6527 			"Cannot get ethdev port %u module EEPROM data to NULL",
6528 			port_id);
6529 		return -EINVAL;
6530 	}
6531 
6532 	if (info->length == 0) {
6533 		RTE_ETHDEV_LOG_LINE(ERR,
6534 			"Cannot get ethdev port %u module EEPROM to data with zero size",
6535 			port_id);
6536 		return -EINVAL;
6537 	}
6538 
6539 	if (*dev->dev_ops->get_module_eeprom == NULL)
6540 		return -ENOTSUP;
6541 	ret = (*dev->dev_ops->get_module_eeprom)(dev, info);
6542 
6543 	rte_ethdev_trace_get_module_eeprom(port_id, info, ret);
6544 
6545 	return ret;
6546 }
6547 
6548 int
6549 rte_eth_dev_get_dcb_info(uint16_t port_id,
6550 			     struct rte_eth_dcb_info *dcb_info)
6551 {
6552 	struct rte_eth_dev *dev;
6553 	int ret;
6554 
6555 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6556 	dev = &rte_eth_devices[port_id];
6557 
6558 	if (dcb_info == NULL) {
6559 		RTE_ETHDEV_LOG_LINE(ERR,
6560 			"Cannot get ethdev port %u DCB info to NULL",
6561 			port_id);
6562 		return -EINVAL;
6563 	}
6564 
6565 	memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
6566 
6567 	if (*dev->dev_ops->get_dcb_info == NULL)
6568 		return -ENOTSUP;
6569 	ret = eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
6570 
6571 	rte_ethdev_trace_get_dcb_info(port_id, dcb_info, ret);
6572 
6573 	return ret;
6574 }
6575 
6576 static void
6577 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
6578 		const struct rte_eth_desc_lim *desc_lim)
6579 {
6580 	if (desc_lim->nb_align != 0)
6581 		*nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
6582 
6583 	if (desc_lim->nb_max != 0)
6584 		*nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
6585 
6586 	*nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
6587 }
6588 
6589 int
6590 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
6591 				 uint16_t *nb_rx_desc,
6592 				 uint16_t *nb_tx_desc)
6593 {
6594 	struct rte_eth_dev_info dev_info;
6595 	int ret;
6596 
6597 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6598 
6599 	ret = rte_eth_dev_info_get(port_id, &dev_info);
6600 	if (ret != 0)
6601 		return ret;
6602 
6603 	if (nb_rx_desc != NULL)
6604 		eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
6605 
6606 	if (nb_tx_desc != NULL)
6607 		eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
6608 
6609 	rte_ethdev_trace_adjust_nb_rx_tx_desc(port_id);
6610 
6611 	return 0;
6612 }
6613 
6614 int
6615 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
6616 				   struct rte_eth_hairpin_cap *cap)
6617 {
6618 	struct rte_eth_dev *dev;
6619 	int ret;
6620 
6621 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6622 	dev = &rte_eth_devices[port_id];
6623 
6624 	if (cap == NULL) {
6625 		RTE_ETHDEV_LOG_LINE(ERR,
6626 			"Cannot get ethdev port %u hairpin capability to NULL",
6627 			port_id);
6628 		return -EINVAL;
6629 	}
6630 
6631 	if (*dev->dev_ops->hairpin_cap_get == NULL)
6632 		return -ENOTSUP;
6633 	memset(cap, 0, sizeof(*cap));
6634 	ret = eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
6635 
6636 	rte_ethdev_trace_hairpin_capability_get(port_id, cap, ret);
6637 
6638 	return ret;
6639 }
6640 
6641 int
6642 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
6643 {
6644 	struct rte_eth_dev *dev;
6645 	int ret;
6646 
6647 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6648 	dev = &rte_eth_devices[port_id];
6649 
6650 	if (pool == NULL) {
6651 		RTE_ETHDEV_LOG_LINE(ERR,
6652 			"Cannot test ethdev port %u mempool operation from NULL pool",
6653 			port_id);
6654 		return -EINVAL;
6655 	}
6656 
6657 	if (*dev->dev_ops->pool_ops_supported == NULL)
6658 		return 1; /* all pools are supported */
6659 
6660 	ret = (*dev->dev_ops->pool_ops_supported)(dev, pool);
6661 
6662 	rte_ethdev_trace_pool_ops_supported(port_id, pool, ret);
6663 
6664 	return ret;
6665 }
6666 
6667 int
6668 rte_eth_representor_info_get(uint16_t port_id,
6669 			     struct rte_eth_representor_info *info)
6670 {
6671 	struct rte_eth_dev *dev;
6672 	int ret;
6673 
6674 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6675 	dev = &rte_eth_devices[port_id];
6676 
6677 	if (*dev->dev_ops->representor_info_get == NULL)
6678 		return -ENOTSUP;
6679 	ret = eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
6680 
6681 	rte_eth_trace_representor_info_get(port_id, info, ret);
6682 
6683 	return ret;
6684 }
6685 
6686 int
6687 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
6688 {
6689 	struct rte_eth_dev *dev;
6690 	int ret;
6691 
6692 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6693 	dev = &rte_eth_devices[port_id];
6694 
6695 	if (dev->data->dev_configured != 0) {
6696 		RTE_ETHDEV_LOG_LINE(ERR,
6697 			"The port (ID=%"PRIu16") is already configured",
6698 			port_id);
6699 		return -EBUSY;
6700 	}
6701 
6702 	if (features == NULL) {
6703 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid features (NULL)");
6704 		return -EINVAL;
6705 	}
6706 
6707 	if ((*features & RTE_ETH_RX_METADATA_TUNNEL_ID) != 0 &&
6708 			rte_flow_restore_info_dynflag_register() < 0)
6709 		*features &= ~RTE_ETH_RX_METADATA_TUNNEL_ID;
6710 
6711 	if (*dev->dev_ops->rx_metadata_negotiate == NULL)
6712 		return -ENOTSUP;
6713 	ret = eth_err(port_id,
6714 		      (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
6715 
6716 	rte_eth_trace_rx_metadata_negotiate(port_id, *features, ret);
6717 
6718 	return ret;
6719 }
6720 
6721 int
6722 rte_eth_ip_reassembly_capability_get(uint16_t port_id,
6723 		struct rte_eth_ip_reassembly_params *reassembly_capa)
6724 {
6725 	struct rte_eth_dev *dev;
6726 	int ret;
6727 
6728 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6729 	dev = &rte_eth_devices[port_id];
6730 
6731 	if (dev->data->dev_configured == 0) {
6732 		RTE_ETHDEV_LOG_LINE(ERR,
6733 			"port_id=%u is not configured, cannot get IP reassembly capability",
6734 			port_id);
6735 		return -EINVAL;
6736 	}
6737 
6738 	if (reassembly_capa == NULL) {
6739 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get reassembly capability to NULL");
6740 		return -EINVAL;
6741 	}
6742 
6743 	if (*dev->dev_ops->ip_reassembly_capability_get == NULL)
6744 		return -ENOTSUP;
6745 	memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params));
6746 
6747 	ret = eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get)
6748 					(dev, reassembly_capa));
6749 
6750 	rte_eth_trace_ip_reassembly_capability_get(port_id, reassembly_capa,
6751 						   ret);
6752 
6753 	return ret;
6754 }
6755 
6756 int
6757 rte_eth_ip_reassembly_conf_get(uint16_t port_id,
6758 		struct rte_eth_ip_reassembly_params *conf)
6759 {
6760 	struct rte_eth_dev *dev;
6761 	int ret;
6762 
6763 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6764 	dev = &rte_eth_devices[port_id];
6765 
6766 	if (dev->data->dev_configured == 0) {
6767 		RTE_ETHDEV_LOG_LINE(ERR,
6768 			"port_id=%u is not configured, cannot get IP reassembly configuration",
6769 			port_id);
6770 		return -EINVAL;
6771 	}
6772 
6773 	if (conf == NULL) {
6774 		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get reassembly info to NULL");
6775 		return -EINVAL;
6776 	}
6777 
6778 	if (*dev->dev_ops->ip_reassembly_conf_get == NULL)
6779 		return -ENOTSUP;
6780 	memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params));
6781 	ret = eth_err(port_id,
6782 		      (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf));
6783 
6784 	rte_eth_trace_ip_reassembly_conf_get(port_id, conf, ret);
6785 
6786 	return ret;
6787 }
6788 
6789 int
6790 rte_eth_ip_reassembly_conf_set(uint16_t port_id,
6791 		const struct rte_eth_ip_reassembly_params *conf)
6792 {
6793 	struct rte_eth_dev *dev;
6794 	int ret;
6795 
6796 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6797 	dev = &rte_eth_devices[port_id];
6798 
6799 	if (dev->data->dev_configured == 0) {
6800 		RTE_ETHDEV_LOG_LINE(ERR,
6801 			"port_id=%u is not configured, cannot set IP reassembly configuration",
6802 			port_id);
6803 		return -EINVAL;
6804 	}
6805 
6806 	if (dev->data->dev_started != 0) {
6807 		RTE_ETHDEV_LOG_LINE(ERR,
6808 			"port_id=%u is started, cannot configure IP reassembly params.",
6809 			port_id);
6810 		return -EINVAL;
6811 	}
6812 
6813 	if (conf == NULL) {
6814 		RTE_ETHDEV_LOG_LINE(ERR,
6815 				"Invalid IP reassembly configuration (NULL)");
6816 		return -EINVAL;
6817 	}
6818 
6819 	if (*dev->dev_ops->ip_reassembly_conf_set == NULL)
6820 		return -ENOTSUP;
6821 	ret = eth_err(port_id,
6822 		      (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf));
6823 
6824 	rte_eth_trace_ip_reassembly_conf_set(port_id, conf, ret);
6825 
6826 	return ret;
6827 }
6828 
6829 int
6830 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
6831 {
6832 	struct rte_eth_dev *dev;
6833 
6834 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6835 	dev = &rte_eth_devices[port_id];
6836 
6837 	if (file == NULL) {
6838 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid file (NULL)");
6839 		return -EINVAL;
6840 	}
6841 
6842 	if (*dev->dev_ops->eth_dev_priv_dump == NULL)
6843 		return -ENOTSUP;
6844 	return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file));
6845 }
6846 
6847 int
6848 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
6849 			   uint16_t offset, uint16_t num, FILE *file)
6850 {
6851 	struct rte_eth_dev *dev;
6852 
6853 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6854 	dev = &rte_eth_devices[port_id];
6855 
6856 	if (queue_id >= dev->data->nb_rx_queues) {
6857 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
6858 		return -EINVAL;
6859 	}
6860 
6861 	if (file == NULL) {
6862 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid file (NULL)");
6863 		return -EINVAL;
6864 	}
6865 
6866 	if (*dev->dev_ops->eth_rx_descriptor_dump == NULL)
6867 		return -ENOTSUP;
6868 
6869 	return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev,
6870 						queue_id, offset, num, file));
6871 }
6872 
6873 int
6874 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
6875 			   uint16_t offset, uint16_t num, FILE *file)
6876 {
6877 	struct rte_eth_dev *dev;
6878 
6879 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6880 	dev = &rte_eth_devices[port_id];
6881 
6882 	if (queue_id >= dev->data->nb_tx_queues) {
6883 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", queue_id);
6884 		return -EINVAL;
6885 	}
6886 
6887 	if (file == NULL) {
6888 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid file (NULL)");
6889 		return -EINVAL;
6890 	}
6891 
6892 	if (*dev->dev_ops->eth_tx_descriptor_dump == NULL)
6893 		return -ENOTSUP;
6894 
6895 	return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev,
6896 						queue_id, offset, num, file));
6897 }
6898 
6899 int
6900 rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
6901 {
6902 	size_t i;
6903 	int j;
6904 	struct rte_eth_dev *dev;
6905 	const uint32_t *all_types;
6906 	size_t no_of_elements = 0;
6907 
6908 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6909 	dev = &rte_eth_devices[port_id];
6910 
6911 	if (ptypes == NULL && num > 0) {
6912 		RTE_ETHDEV_LOG_LINE(ERR,
6913 			"Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero",
6914 			port_id);
6915 		return -EINVAL;
6916 	}
6917 
6918 	if (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get == NULL)
6919 		return -ENOTSUP;
6920 	all_types = (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get)(dev,
6921 							      &no_of_elements);
6922 
6923 	if (all_types == NULL)
6924 		return 0;
6925 
6926 	for (i = 0, j = 0; i < no_of_elements; ++i) {
6927 		if (j < num) {
6928 			ptypes[j] = all_types[i];
6929 
6930 			rte_eth_trace_buffer_split_get_supported_hdr_ptypes(
6931 							port_id, j, ptypes[j]);
6932 		}
6933 		j++;
6934 	}
6935 
6936 	return j;
6937 }
6938 
6939 int rte_eth_dev_count_aggr_ports(uint16_t port_id)
6940 {
6941 	struct rte_eth_dev *dev;
6942 	int ret;
6943 
6944 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6945 	dev = &rte_eth_devices[port_id];
6946 
6947 	if (*dev->dev_ops->count_aggr_ports == NULL)
6948 		return 0;
6949 	ret = eth_err(port_id, (*dev->dev_ops->count_aggr_ports)(dev));
6950 
6951 	rte_eth_trace_count_aggr_ports(port_id, ret);
6952 
6953 	return ret;
6954 }
6955 
6956 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
6957 				     uint8_t affinity)
6958 {
6959 	struct rte_eth_dev *dev;
6960 	int aggr_ports;
6961 	int ret;
6962 
6963 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6964 	dev = &rte_eth_devices[port_id];
6965 
6966 	if (tx_queue_id >= dev->data->nb_tx_queues) {
6967 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", tx_queue_id);
6968 		return -EINVAL;
6969 	}
6970 
6971 	if (*dev->dev_ops->map_aggr_tx_affinity == NULL)
6972 		return -ENOTSUP;
6973 
6974 	if (dev->data->dev_configured == 0) {
6975 		RTE_ETHDEV_LOG_LINE(ERR,
6976 			"Port %u must be configured before Tx affinity mapping",
6977 			port_id);
6978 		return -EINVAL;
6979 	}
6980 
6981 	if (dev->data->dev_started) {
6982 		RTE_ETHDEV_LOG_LINE(ERR,
6983 			"Port %u must be stopped to allow configuration",
6984 			port_id);
6985 		return -EBUSY;
6986 	}
6987 
6988 	aggr_ports = rte_eth_dev_count_aggr_ports(port_id);
6989 	if (aggr_ports == 0) {
6990 		RTE_ETHDEV_LOG_LINE(ERR,
6991 			"Port %u has no aggregated port",
6992 			port_id);
6993 		return -ENOTSUP;
6994 	}
6995 
6996 	if (affinity > aggr_ports) {
6997 		RTE_ETHDEV_LOG_LINE(ERR,
6998 			"Port %u map invalid affinity %u exceeds the maximum number %u",
6999 			port_id, affinity, aggr_ports);
7000 		return -EINVAL;
7001 	}
7002 
7003 	ret = eth_err(port_id, (*dev->dev_ops->map_aggr_tx_affinity)(dev,
7004 				tx_queue_id, affinity));
7005 
7006 	rte_eth_trace_map_aggr_tx_affinity(port_id, tx_queue_id, affinity, ret);
7007 
7008 	return ret;
7009 }
7010 
7011 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
7012