xref: /dpdk/lib/ethdev/rte_ethdev.c (revision ce7a737c8f02cd001f3f66f5c4e73ab7060ed588)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <errno.h>
6 #include <inttypes.h>
7 #include <stdbool.h>
8 #include <stdint.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13 
14 #include <bus_driver.h>
15 #include <rte_log.h>
16 #include <rte_interrupts.h>
17 #include <rte_kvargs.h>
18 #include <rte_memcpy.h>
19 #include <rte_common.h>
20 #include <rte_mempool.h>
21 #include <rte_malloc.h>
22 #include <rte_mbuf.h>
23 #include <rte_errno.h>
24 #include <rte_spinlock.h>
25 #include <rte_string_fns.h>
26 #include <rte_class.h>
27 #include <rte_ether.h>
28 #include <rte_telemetry.h>
29 
30 #include "rte_ethdev.h"
31 #include "rte_ethdev_trace_fp.h"
32 #include "ethdev_driver.h"
33 #include "ethdev_profile.h"
34 #include "ethdev_private.h"
35 #include "ethdev_trace.h"
36 #include "sff_telemetry.h"
37 
38 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
39 
40 /* public fast-path API */
41 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
42 
43 /* spinlock for add/remove Rx callbacks */
44 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
45 
46 /* spinlock for add/remove Tx callbacks */
47 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
48 
49 /* store statistics names and its offset in stats structure  */
50 struct rte_eth_xstats_name_off {
51 	char name[RTE_ETH_XSTATS_NAME_SIZE];
52 	unsigned offset;
53 };
54 
55 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
56 	{"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
57 	{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
58 	{"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
59 	{"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
60 	{"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
61 	{"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
62 	{"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
63 	{"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
64 		rx_nombuf)},
65 };
66 
67 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
68 
69 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
70 	{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
71 	{"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
72 	{"errors", offsetof(struct rte_eth_stats, q_errors)},
73 };
74 
75 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
76 
77 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
78 	{"packets", offsetof(struct rte_eth_stats, q_opackets)},
79 	{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
80 };
81 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
82 
83 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
84 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
85 
86 static const struct {
87 	uint64_t offload;
88 	const char *name;
89 } eth_dev_rx_offload_names[] = {
90 	RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
91 	RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
92 	RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
93 	RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
94 	RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
95 	RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
96 	RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
97 	RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
98 	RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
99 	RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
100 	RTE_RX_OFFLOAD_BIT2STR(SCATTER),
101 	RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
102 	RTE_RX_OFFLOAD_BIT2STR(SECURITY),
103 	RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
104 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
105 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
106 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
107 	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
108 };
109 
110 #undef RTE_RX_OFFLOAD_BIT2STR
111 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
112 
113 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
114 	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
115 
116 static const struct {
117 	uint64_t offload;
118 	const char *name;
119 } eth_dev_tx_offload_names[] = {
120 	RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
121 	RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
122 	RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
123 	RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
124 	RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
125 	RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
126 	RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
127 	RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
128 	RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
129 	RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
130 	RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
131 	RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
132 	RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
133 	RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
134 	RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
135 	RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
136 	RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
137 	RTE_TX_OFFLOAD_BIT2STR(SECURITY),
138 	RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
139 	RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
140 	RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
141 	RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
142 };
143 
144 #undef RTE_TX_OFFLOAD_BIT2STR
145 
146 static const struct {
147 	uint64_t offload;
148 	const char *name;
149 } rte_eth_dev_capa_names[] = {
150 	{RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"},
151 	{RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"},
152 	{RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
153 	{RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"},
154 	{RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"},
155 };
156 
157 enum {
158 	STAT_QMAP_TX = 0,
159 	STAT_QMAP_RX
160 };
161 
162 int
163 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
164 {
165 	int ret;
166 	struct rte_devargs devargs;
167 	const char *bus_param_key;
168 	char *bus_str = NULL;
169 	char *cls_str = NULL;
170 	int str_size;
171 
172 	if (iter == NULL) {
173 		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
174 		return -EINVAL;
175 	}
176 
177 	if (devargs_str == NULL) {
178 		RTE_ETHDEV_LOG(ERR,
179 			"Cannot initialize iterator from NULL device description string\n");
180 		return -EINVAL;
181 	}
182 
183 	memset(iter, 0, sizeof(*iter));
184 	memset(&devargs, 0, sizeof(devargs));
185 
186 	/*
187 	 * The devargs string may use various syntaxes:
188 	 *   - 0000:08:00.0,representor=[1-3]
189 	 *   - pci:0000:06:00.0,representor=[0,5]
190 	 *   - class=eth,mac=00:11:22:33:44:55
191 	 *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
192 	 */
193 
194 	/*
195 	 * Handle pure class filter (i.e. without any bus-level argument),
196 	 * from future new syntax.
197 	 * rte_devargs_parse() is not yet supporting the new syntax,
198 	 * that's why this simple case is temporarily parsed here.
199 	 */
200 #define iter_anybus_str "class=eth,"
201 	if (strncmp(devargs_str, iter_anybus_str,
202 			strlen(iter_anybus_str)) == 0) {
203 		iter->cls_str = devargs_str + strlen(iter_anybus_str);
204 		goto end;
205 	}
206 
207 	/* Split bus, device and parameters. */
208 	ret = rte_devargs_parse(&devargs, devargs_str);
209 	if (ret != 0)
210 		goto error;
211 
212 	/*
213 	 * Assume parameters of old syntax can match only at ethdev level.
214 	 * Extra parameters will be ignored, thanks to "+" prefix.
215 	 */
216 	str_size = strlen(devargs.args) + 2;
217 	cls_str = malloc(str_size);
218 	if (cls_str == NULL) {
219 		ret = -ENOMEM;
220 		goto error;
221 	}
222 	ret = snprintf(cls_str, str_size, "+%s", devargs.args);
223 	if (ret != str_size - 1) {
224 		ret = -EINVAL;
225 		goto error;
226 	}
227 	iter->cls_str = cls_str;
228 
229 	iter->bus = devargs.bus;
230 	if (iter->bus->dev_iterate == NULL) {
231 		ret = -ENOTSUP;
232 		goto error;
233 	}
234 
235 	/* Convert bus args to new syntax for use with new API dev_iterate. */
236 	if ((strcmp(iter->bus->name, "vdev") == 0) ||
237 		(strcmp(iter->bus->name, "fslmc") == 0) ||
238 		(strcmp(iter->bus->name, "dpaa_bus") == 0)) {
239 		bus_param_key = "name";
240 	} else if (strcmp(iter->bus->name, "pci") == 0) {
241 		bus_param_key = "addr";
242 	} else {
243 		ret = -ENOTSUP;
244 		goto error;
245 	}
246 	str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
247 	bus_str = malloc(str_size);
248 	if (bus_str == NULL) {
249 		ret = -ENOMEM;
250 		goto error;
251 	}
252 	ret = snprintf(bus_str, str_size, "%s=%s",
253 			bus_param_key, devargs.name);
254 	if (ret != str_size - 1) {
255 		ret = -EINVAL;
256 		goto error;
257 	}
258 	iter->bus_str = bus_str;
259 
260 end:
261 	iter->cls = rte_class_find_by_name("eth");
262 	rte_devargs_reset(&devargs);
263 
264 	rte_eth_trace_iterator_init(devargs_str);
265 
266 	return 0;
267 
268 error:
269 	if (ret == -ENOTSUP)
270 		RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
271 				iter->bus->name);
272 	rte_devargs_reset(&devargs);
273 	free(bus_str);
274 	free(cls_str);
275 	return ret;
276 }
277 
278 uint16_t
279 rte_eth_iterator_next(struct rte_dev_iterator *iter)
280 {
281 	if (iter == NULL) {
282 		RTE_ETHDEV_LOG(ERR,
283 			"Cannot get next device from NULL iterator\n");
284 		return RTE_MAX_ETHPORTS;
285 	}
286 
287 	if (iter->cls == NULL) /* invalid ethdev iterator */
288 		return RTE_MAX_ETHPORTS;
289 
290 	do { /* loop to try all matching rte_device */
291 		/* If not pure ethdev filter and */
292 		if (iter->bus != NULL &&
293 				/* not in middle of rte_eth_dev iteration, */
294 				iter->class_device == NULL) {
295 			/* get next rte_device to try. */
296 			iter->device = iter->bus->dev_iterate(
297 					iter->device, iter->bus_str, iter);
298 			if (iter->device == NULL)
299 				break; /* no more rte_device candidate */
300 		}
301 		/* A device is matching bus part, need to check ethdev part. */
302 		iter->class_device = iter->cls->dev_iterate(
303 				iter->class_device, iter->cls_str, iter);
304 		if (iter->class_device != NULL) {
305 			uint16_t id = eth_dev_to_id(iter->class_device);
306 
307 			rte_eth_trace_iterator_next(iter, id);
308 
309 			return id; /* match */
310 		}
311 	} while (iter->bus != NULL); /* need to try next rte_device */
312 
313 	/* No more ethdev port to iterate. */
314 	rte_eth_iterator_cleanup(iter);
315 	return RTE_MAX_ETHPORTS;
316 }
317 
318 void
319 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
320 {
321 	if (iter == NULL) {
322 		RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
323 		return;
324 	}
325 
326 	if (iter->bus_str == NULL)
327 		return; /* nothing to free in pure class filter */
328 
329 	rte_eth_trace_iterator_cleanup(iter);
330 
331 	free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
332 	free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
333 	memset(iter, 0, sizeof(*iter));
334 }
335 
336 uint16_t
337 rte_eth_find_next(uint16_t port_id)
338 {
339 	while (port_id < RTE_MAX_ETHPORTS &&
340 			rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
341 		port_id++;
342 
343 	if (port_id >= RTE_MAX_ETHPORTS)
344 		return RTE_MAX_ETHPORTS;
345 
346 	rte_eth_trace_find_next(port_id);
347 
348 	return port_id;
349 }
350 
351 /*
352  * Macro to iterate over all valid ports for internal usage.
353  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
354  */
355 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
356 	for (port_id = rte_eth_find_next(0); \
357 	     port_id < RTE_MAX_ETHPORTS; \
358 	     port_id = rte_eth_find_next(port_id + 1))
359 
360 uint16_t
361 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
362 {
363 	port_id = rte_eth_find_next(port_id);
364 	while (port_id < RTE_MAX_ETHPORTS &&
365 			rte_eth_devices[port_id].device != parent)
366 		port_id = rte_eth_find_next(port_id + 1);
367 
368 	rte_eth_trace_find_next_of(port_id, parent);
369 
370 	return port_id;
371 }
372 
373 uint16_t
374 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
375 {
376 	uint16_t ret;
377 
378 	RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
379 	ret = rte_eth_find_next_of(port_id,
380 			rte_eth_devices[ref_port_id].device);
381 
382 	rte_eth_trace_find_next_sibling(port_id, ref_port_id, ret);
383 
384 	return ret;
385 }
386 
387 static bool
388 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
389 {
390 	return ethdev->data->name[0] != '\0';
391 }
392 
393 int
394 rte_eth_dev_is_valid_port(uint16_t port_id)
395 {
396 	int is_valid;
397 
398 	if (port_id >= RTE_MAX_ETHPORTS ||
399 	    (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
400 		is_valid = 0;
401 	else
402 		is_valid = 1;
403 
404 	rte_ethdev_trace_is_valid_port(port_id, is_valid);
405 
406 	return is_valid;
407 }
408 
409 static int
410 eth_is_valid_owner_id(uint64_t owner_id)
411 {
412 	if (owner_id == RTE_ETH_DEV_NO_OWNER ||
413 	    eth_dev_shared_data->next_owner_id <= owner_id)
414 		return 0;
415 	return 1;
416 }
417 
418 uint64_t
419 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
420 {
421 	port_id = rte_eth_find_next(port_id);
422 	while (port_id < RTE_MAX_ETHPORTS &&
423 			rte_eth_devices[port_id].data->owner.id != owner_id)
424 		port_id = rte_eth_find_next(port_id + 1);
425 
426 	rte_eth_trace_find_next_owned_by(port_id, owner_id);
427 
428 	return port_id;
429 }
430 
431 int
432 rte_eth_dev_owner_new(uint64_t *owner_id)
433 {
434 	if (owner_id == NULL) {
435 		RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
436 		return -EINVAL;
437 	}
438 
439 	eth_dev_shared_data_prepare();
440 
441 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
442 
443 	*owner_id = eth_dev_shared_data->next_owner_id++;
444 
445 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
446 
447 	rte_ethdev_trace_owner_new(*owner_id);
448 
449 	return 0;
450 }
451 
452 static int
453 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
454 		       const struct rte_eth_dev_owner *new_owner)
455 {
456 	struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
457 	struct rte_eth_dev_owner *port_owner;
458 
459 	if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
460 		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
461 			port_id);
462 		return -ENODEV;
463 	}
464 
465 	if (new_owner == NULL) {
466 		RTE_ETHDEV_LOG(ERR,
467 			"Cannot set ethdev port %u owner from NULL owner\n",
468 			port_id);
469 		return -EINVAL;
470 	}
471 
472 	if (!eth_is_valid_owner_id(new_owner->id) &&
473 	    !eth_is_valid_owner_id(old_owner_id)) {
474 		RTE_ETHDEV_LOG(ERR,
475 			"Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
476 		       old_owner_id, new_owner->id);
477 		return -EINVAL;
478 	}
479 
480 	port_owner = &rte_eth_devices[port_id].data->owner;
481 	if (port_owner->id != old_owner_id) {
482 		RTE_ETHDEV_LOG(ERR,
483 			"Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
484 			port_id, port_owner->name, port_owner->id);
485 		return -EPERM;
486 	}
487 
488 	/* can not truncate (same structure) */
489 	strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
490 
491 	port_owner->id = new_owner->id;
492 
493 	RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
494 		port_id, new_owner->name, new_owner->id);
495 
496 	return 0;
497 }
498 
499 int
500 rte_eth_dev_owner_set(const uint16_t port_id,
501 		      const struct rte_eth_dev_owner *owner)
502 {
503 	int ret;
504 
505 	eth_dev_shared_data_prepare();
506 
507 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
508 
509 	ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
510 
511 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
512 
513 	rte_ethdev_trace_owner_set(port_id, owner, ret);
514 
515 	return ret;
516 }
517 
518 int
519 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
520 {
521 	const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
522 			{.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
523 	int ret;
524 
525 	eth_dev_shared_data_prepare();
526 
527 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
528 
529 	ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
530 
531 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
532 
533 	rte_ethdev_trace_owner_unset(port_id, owner_id, ret);
534 
535 	return ret;
536 }
537 
538 int
539 rte_eth_dev_owner_delete(const uint64_t owner_id)
540 {
541 	uint16_t port_id;
542 	int ret = 0;
543 
544 	eth_dev_shared_data_prepare();
545 
546 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
547 
548 	if (eth_is_valid_owner_id(owner_id)) {
549 		for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
550 			struct rte_eth_dev_data *data =
551 				rte_eth_devices[port_id].data;
552 			if (data != NULL && data->owner.id == owner_id)
553 				memset(&data->owner, 0,
554 				       sizeof(struct rte_eth_dev_owner));
555 		}
556 		RTE_ETHDEV_LOG(NOTICE,
557 			"All port owners owned by %016"PRIx64" identifier have removed\n",
558 			owner_id);
559 	} else {
560 		RTE_ETHDEV_LOG(ERR,
561 			       "Invalid owner ID=%016"PRIx64"\n",
562 			       owner_id);
563 		ret = -EINVAL;
564 	}
565 
566 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
567 
568 	rte_ethdev_trace_owner_delete(owner_id, ret);
569 
570 	return ret;
571 }
572 
573 int
574 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
575 {
576 	struct rte_eth_dev *ethdev;
577 
578 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
579 	ethdev = &rte_eth_devices[port_id];
580 
581 	if (!eth_dev_is_allocated(ethdev)) {
582 		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
583 			port_id);
584 		return -ENODEV;
585 	}
586 
587 	if (owner == NULL) {
588 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
589 			port_id);
590 		return -EINVAL;
591 	}
592 
593 	eth_dev_shared_data_prepare();
594 
595 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
596 	rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
597 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
598 
599 	rte_ethdev_trace_owner_get(port_id, owner);
600 
601 	return 0;
602 }
603 
604 int
605 rte_eth_dev_socket_id(uint16_t port_id)
606 {
607 	int socket_id = SOCKET_ID_ANY;
608 
609 	if (!rte_eth_dev_is_valid_port(port_id)) {
610 		rte_errno = EINVAL;
611 	} else {
612 		socket_id = rte_eth_devices[port_id].data->numa_node;
613 		if (socket_id == SOCKET_ID_ANY)
614 			rte_errno = 0;
615 	}
616 
617 	rte_ethdev_trace_socket_id(port_id, socket_id);
618 
619 	return socket_id;
620 }
621 
622 void *
623 rte_eth_dev_get_sec_ctx(uint16_t port_id)
624 {
625 	void *ctx;
626 
627 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
628 	ctx = rte_eth_devices[port_id].security_ctx;
629 
630 	rte_ethdev_trace_get_sec_ctx(port_id, ctx);
631 
632 	return ctx;
633 }
634 
635 uint16_t
636 rte_eth_dev_count_avail(void)
637 {
638 	uint16_t p;
639 	uint16_t count;
640 
641 	count = 0;
642 
643 	RTE_ETH_FOREACH_DEV(p)
644 		count++;
645 
646 	rte_ethdev_trace_count_avail(count);
647 
648 	return count;
649 }
650 
651 uint16_t
652 rte_eth_dev_count_total(void)
653 {
654 	uint16_t port, count = 0;
655 
656 	RTE_ETH_FOREACH_VALID_DEV(port)
657 		count++;
658 
659 	rte_ethdev_trace_count_total(count);
660 
661 	return count;
662 }
663 
664 int
665 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
666 {
667 	char *tmp;
668 
669 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
670 
671 	if (name == NULL) {
672 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
673 			port_id);
674 		return -EINVAL;
675 	}
676 
677 	/* shouldn't check 'rte_eth_devices[i].data',
678 	 * because it might be overwritten by VDEV PMD */
679 	tmp = eth_dev_shared_data->data[port_id].name;
680 	strcpy(name, tmp);
681 
682 	rte_ethdev_trace_get_name_by_port(port_id, name);
683 
684 	return 0;
685 }
686 
687 int
688 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
689 {
690 	uint16_t pid;
691 
692 	if (name == NULL) {
693 		RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
694 		return -EINVAL;
695 	}
696 
697 	if (port_id == NULL) {
698 		RTE_ETHDEV_LOG(ERR,
699 			"Cannot get port ID to NULL for %s\n", name);
700 		return -EINVAL;
701 	}
702 
703 	RTE_ETH_FOREACH_VALID_DEV(pid)
704 		if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
705 			*port_id = pid;
706 
707 			rte_ethdev_trace_get_port_by_name(name, *port_id);
708 
709 			return 0;
710 		}
711 
712 	return -ENODEV;
713 }
714 
715 int
716 eth_err(uint16_t port_id, int ret)
717 {
718 	if (ret == 0)
719 		return 0;
720 	if (rte_eth_dev_is_removed(port_id))
721 		return -EIO;
722 	return ret;
723 }
724 
725 static int
726 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
727 {
728 	uint16_t port_id;
729 
730 	if (rx_queue_id >= dev->data->nb_rx_queues) {
731 		port_id = dev->data->port_id;
732 		RTE_ETHDEV_LOG(ERR,
733 			       "Invalid Rx queue_id=%u of device with port_id=%u\n",
734 			       rx_queue_id, port_id);
735 		return -EINVAL;
736 	}
737 
738 	if (dev->data->rx_queues[rx_queue_id] == NULL) {
739 		port_id = dev->data->port_id;
740 		RTE_ETHDEV_LOG(ERR,
741 			       "Queue %u of device with port_id=%u has not been setup\n",
742 			       rx_queue_id, port_id);
743 		return -EINVAL;
744 	}
745 
746 	return 0;
747 }
748 
749 static int
750 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
751 {
752 	uint16_t port_id;
753 
754 	if (tx_queue_id >= dev->data->nb_tx_queues) {
755 		port_id = dev->data->port_id;
756 		RTE_ETHDEV_LOG(ERR,
757 			       "Invalid Tx queue_id=%u of device with port_id=%u\n",
758 			       tx_queue_id, port_id);
759 		return -EINVAL;
760 	}
761 
762 	if (dev->data->tx_queues[tx_queue_id] == NULL) {
763 		port_id = dev->data->port_id;
764 		RTE_ETHDEV_LOG(ERR,
765 			       "Queue %u of device with port_id=%u has not been setup\n",
766 			       tx_queue_id, port_id);
767 		return -EINVAL;
768 	}
769 
770 	return 0;
771 }
772 
773 int
774 rte_eth_dev_is_valid_rxq(uint16_t port_id, uint16_t queue_id)
775 {
776 	struct rte_eth_dev *dev;
777 
778 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
779 	dev = &rte_eth_devices[port_id];
780 
781 	return eth_dev_validate_rx_queue(dev, queue_id);
782 }
783 
784 int
785 rte_eth_dev_is_valid_txq(uint16_t port_id, uint16_t queue_id)
786 {
787 	struct rte_eth_dev *dev;
788 
789 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
790 	dev = &rte_eth_devices[port_id];
791 
792 	return eth_dev_validate_tx_queue(dev, queue_id);
793 }
794 
795 int
796 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
797 {
798 	struct rte_eth_dev *dev;
799 	int ret;
800 
801 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
802 	dev = &rte_eth_devices[port_id];
803 
804 	if (!dev->data->dev_started) {
805 		RTE_ETHDEV_LOG(ERR,
806 			"Port %u must be started before start any queue\n",
807 			port_id);
808 		return -EINVAL;
809 	}
810 
811 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
812 	if (ret != 0)
813 		return ret;
814 
815 	if (*dev->dev_ops->rx_queue_start == NULL)
816 		return -ENOTSUP;
817 
818 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
819 		RTE_ETHDEV_LOG(INFO,
820 			"Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
821 			rx_queue_id, port_id);
822 		return -EINVAL;
823 	}
824 
825 	if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
826 		RTE_ETHDEV_LOG(INFO,
827 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
828 			rx_queue_id, port_id);
829 		return 0;
830 	}
831 
832 	ret = eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
833 
834 	rte_ethdev_trace_rx_queue_start(port_id, rx_queue_id, ret);
835 
836 	return ret;
837 }
838 
839 int
840 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
841 {
842 	struct rte_eth_dev *dev;
843 	int ret;
844 
845 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
846 	dev = &rte_eth_devices[port_id];
847 
848 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
849 	if (ret != 0)
850 		return ret;
851 
852 	if (*dev->dev_ops->rx_queue_stop == NULL)
853 		return -ENOTSUP;
854 
855 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
856 		RTE_ETHDEV_LOG(INFO,
857 			"Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
858 			rx_queue_id, port_id);
859 		return -EINVAL;
860 	}
861 
862 	if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
863 		RTE_ETHDEV_LOG(INFO,
864 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
865 			rx_queue_id, port_id);
866 		return 0;
867 	}
868 
869 	ret = eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
870 
871 	rte_ethdev_trace_rx_queue_stop(port_id, rx_queue_id, ret);
872 
873 	return ret;
874 }
875 
876 int
877 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
878 {
879 	struct rte_eth_dev *dev;
880 	int ret;
881 
882 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
883 	dev = &rte_eth_devices[port_id];
884 
885 	if (!dev->data->dev_started) {
886 		RTE_ETHDEV_LOG(ERR,
887 			"Port %u must be started before start any queue\n",
888 			port_id);
889 		return -EINVAL;
890 	}
891 
892 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
893 	if (ret != 0)
894 		return ret;
895 
896 	if (*dev->dev_ops->tx_queue_start == NULL)
897 		return -ENOTSUP;
898 
899 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
900 		RTE_ETHDEV_LOG(INFO,
901 			"Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
902 			tx_queue_id, port_id);
903 		return -EINVAL;
904 	}
905 
906 	if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
907 		RTE_ETHDEV_LOG(INFO,
908 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
909 			tx_queue_id, port_id);
910 		return 0;
911 	}
912 
913 	ret = eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
914 
915 	rte_ethdev_trace_tx_queue_start(port_id, tx_queue_id, ret);
916 
917 	return ret;
918 }
919 
920 int
921 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
922 {
923 	struct rte_eth_dev *dev;
924 	int ret;
925 
926 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
927 	dev = &rte_eth_devices[port_id];
928 
929 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
930 	if (ret != 0)
931 		return ret;
932 
933 	if (*dev->dev_ops->tx_queue_stop == NULL)
934 		return -ENOTSUP;
935 
936 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
937 		RTE_ETHDEV_LOG(INFO,
938 			"Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
939 			tx_queue_id, port_id);
940 		return -EINVAL;
941 	}
942 
943 	if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
944 		RTE_ETHDEV_LOG(INFO,
945 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
946 			tx_queue_id, port_id);
947 		return 0;
948 	}
949 
950 	ret = eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
951 
952 	rte_ethdev_trace_tx_queue_stop(port_id, tx_queue_id, ret);
953 
954 	return ret;
955 }
956 
957 uint32_t
958 rte_eth_speed_bitflag(uint32_t speed, int duplex)
959 {
960 	uint32_t ret;
961 
962 	switch (speed) {
963 	case RTE_ETH_SPEED_NUM_10M:
964 		ret = duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
965 		break;
966 	case RTE_ETH_SPEED_NUM_100M:
967 		ret = duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
968 		break;
969 	case RTE_ETH_SPEED_NUM_1G:
970 		ret = RTE_ETH_LINK_SPEED_1G;
971 		break;
972 	case RTE_ETH_SPEED_NUM_2_5G:
973 		ret = RTE_ETH_LINK_SPEED_2_5G;
974 		break;
975 	case RTE_ETH_SPEED_NUM_5G:
976 		ret = RTE_ETH_LINK_SPEED_5G;
977 		break;
978 	case RTE_ETH_SPEED_NUM_10G:
979 		ret = RTE_ETH_LINK_SPEED_10G;
980 		break;
981 	case RTE_ETH_SPEED_NUM_20G:
982 		ret = RTE_ETH_LINK_SPEED_20G;
983 		break;
984 	case RTE_ETH_SPEED_NUM_25G:
985 		ret = RTE_ETH_LINK_SPEED_25G;
986 		break;
987 	case RTE_ETH_SPEED_NUM_40G:
988 		ret = RTE_ETH_LINK_SPEED_40G;
989 		break;
990 	case RTE_ETH_SPEED_NUM_50G:
991 		ret = RTE_ETH_LINK_SPEED_50G;
992 		break;
993 	case RTE_ETH_SPEED_NUM_56G:
994 		ret = RTE_ETH_LINK_SPEED_56G;
995 		break;
996 	case RTE_ETH_SPEED_NUM_100G:
997 		ret = RTE_ETH_LINK_SPEED_100G;
998 		break;
999 	case RTE_ETH_SPEED_NUM_200G:
1000 		ret = RTE_ETH_LINK_SPEED_200G;
1001 		break;
1002 	case RTE_ETH_SPEED_NUM_400G:
1003 		ret = RTE_ETH_LINK_SPEED_400G;
1004 		break;
1005 	default:
1006 		ret = 0;
1007 	}
1008 
1009 	rte_eth_trace_speed_bitflag(speed, duplex, ret);
1010 
1011 	return ret;
1012 }
1013 
1014 const char *
1015 rte_eth_dev_rx_offload_name(uint64_t offload)
1016 {
1017 	const char *name = "UNKNOWN";
1018 	unsigned int i;
1019 
1020 	for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
1021 		if (offload == eth_dev_rx_offload_names[i].offload) {
1022 			name = eth_dev_rx_offload_names[i].name;
1023 			break;
1024 		}
1025 	}
1026 
1027 	rte_ethdev_trace_rx_offload_name(offload, name);
1028 
1029 	return name;
1030 }
1031 
1032 const char *
1033 rte_eth_dev_tx_offload_name(uint64_t offload)
1034 {
1035 	const char *name = "UNKNOWN";
1036 	unsigned int i;
1037 
1038 	for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
1039 		if (offload == eth_dev_tx_offload_names[i].offload) {
1040 			name = eth_dev_tx_offload_names[i].name;
1041 			break;
1042 		}
1043 	}
1044 
1045 	rte_ethdev_trace_tx_offload_name(offload, name);
1046 
1047 	return name;
1048 }
1049 
1050 const char *
1051 rte_eth_dev_capability_name(uint64_t capability)
1052 {
1053 	const char *name = "UNKNOWN";
1054 	unsigned int i;
1055 
1056 	for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) {
1057 		if (capability == rte_eth_dev_capa_names[i].offload) {
1058 			name = rte_eth_dev_capa_names[i].name;
1059 			break;
1060 		}
1061 	}
1062 
1063 	rte_ethdev_trace_capability_name(capability, name);
1064 
1065 	return name;
1066 }
1067 
1068 static inline int
1069 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1070 		   uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1071 {
1072 	int ret = 0;
1073 
1074 	if (dev_info_size == 0) {
1075 		if (config_size != max_rx_pkt_len) {
1076 			RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1077 				       " %u != %u is not allowed\n",
1078 				       port_id, config_size, max_rx_pkt_len);
1079 			ret = -EINVAL;
1080 		}
1081 	} else if (config_size > dev_info_size) {
1082 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1083 			       "> max allowed value %u\n", port_id, config_size,
1084 			       dev_info_size);
1085 		ret = -EINVAL;
1086 	} else if (config_size < RTE_ETHER_MIN_LEN) {
1087 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1088 			       "< min allowed value %u\n", port_id, config_size,
1089 			       (unsigned int)RTE_ETHER_MIN_LEN);
1090 		ret = -EINVAL;
1091 	}
1092 	return ret;
1093 }
1094 
1095 /*
1096  * Validate offloads that are requested through rte_eth_dev_configure against
1097  * the offloads successfully set by the Ethernet device.
1098  *
1099  * @param port_id
1100  *   The port identifier of the Ethernet device.
1101  * @param req_offloads
1102  *   The offloads that have been requested through `rte_eth_dev_configure`.
1103  * @param set_offloads
1104  *   The offloads successfully set by the Ethernet device.
1105  * @param offload_type
1106  *   The offload type i.e. Rx/Tx string.
1107  * @param offload_name
1108  *   The function that prints the offload name.
1109  * @return
1110  *   - (0) if validation successful.
1111  *   - (-EINVAL) if requested offload has been silently disabled.
1112  *
1113  */
1114 static int
1115 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
1116 		  uint64_t set_offloads, const char *offload_type,
1117 		  const char *(*offload_name)(uint64_t))
1118 {
1119 	uint64_t offloads_diff = req_offloads ^ set_offloads;
1120 	uint64_t offload;
1121 	int ret = 0;
1122 
1123 	while (offloads_diff != 0) {
1124 		/* Check if any offload is requested but not enabled. */
1125 		offload = RTE_BIT64(__builtin_ctzll(offloads_diff));
1126 		if (offload & req_offloads) {
1127 			RTE_ETHDEV_LOG(ERR,
1128 				"Port %u failed to enable %s offload %s\n",
1129 				port_id, offload_type, offload_name(offload));
1130 			ret = -EINVAL;
1131 		}
1132 
1133 		/* Check if offload couldn't be disabled. */
1134 		if (offload & set_offloads) {
1135 			RTE_ETHDEV_LOG(DEBUG,
1136 				"Port %u %s offload %s is not requested but enabled\n",
1137 				port_id, offload_type, offload_name(offload));
1138 		}
1139 
1140 		offloads_diff &= ~offload;
1141 	}
1142 
1143 	return ret;
1144 }
1145 
1146 static uint32_t
1147 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1148 {
1149 	uint32_t overhead_len;
1150 
1151 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1152 		overhead_len = max_rx_pktlen - max_mtu;
1153 	else
1154 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1155 
1156 	return overhead_len;
1157 }
1158 
1159 /* rte_eth_dev_info_get() should be called prior to this function */
1160 static int
1161 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
1162 		uint16_t mtu)
1163 {
1164 	uint32_t overhead_len;
1165 	uint32_t frame_size;
1166 
1167 	if (mtu < dev_info->min_mtu) {
1168 		RTE_ETHDEV_LOG(ERR,
1169 			"MTU (%u) < device min MTU (%u) for port_id %u\n",
1170 			mtu, dev_info->min_mtu, port_id);
1171 		return -EINVAL;
1172 	}
1173 	if (mtu > dev_info->max_mtu) {
1174 		RTE_ETHDEV_LOG(ERR,
1175 			"MTU (%u) > device max MTU (%u) for port_id %u\n",
1176 			mtu, dev_info->max_mtu, port_id);
1177 		return -EINVAL;
1178 	}
1179 
1180 	overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1181 			dev_info->max_mtu);
1182 	frame_size = mtu + overhead_len;
1183 	if (frame_size < RTE_ETHER_MIN_LEN) {
1184 		RTE_ETHDEV_LOG(ERR,
1185 			"Frame size (%u) < min frame size (%u) for port_id %u\n",
1186 			frame_size, RTE_ETHER_MIN_LEN, port_id);
1187 		return -EINVAL;
1188 	}
1189 
1190 	if (frame_size > dev_info->max_rx_pktlen) {
1191 		RTE_ETHDEV_LOG(ERR,
1192 			"Frame size (%u) > device max frame size (%u) for port_id %u\n",
1193 			frame_size, dev_info->max_rx_pktlen, port_id);
1194 		return -EINVAL;
1195 	}
1196 
1197 	return 0;
1198 }
1199 
1200 int
1201 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1202 		      const struct rte_eth_conf *dev_conf)
1203 {
1204 	struct rte_eth_dev *dev;
1205 	struct rte_eth_dev_info dev_info;
1206 	struct rte_eth_conf orig_conf;
1207 	int diag;
1208 	int ret;
1209 	uint16_t old_mtu;
1210 
1211 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1212 	dev = &rte_eth_devices[port_id];
1213 
1214 	if (dev_conf == NULL) {
1215 		RTE_ETHDEV_LOG(ERR,
1216 			"Cannot configure ethdev port %u from NULL config\n",
1217 			port_id);
1218 		return -EINVAL;
1219 	}
1220 
1221 	if (*dev->dev_ops->dev_configure == NULL)
1222 		return -ENOTSUP;
1223 
1224 	if (dev->data->dev_started) {
1225 		RTE_ETHDEV_LOG(ERR,
1226 			"Port %u must be stopped to allow configuration\n",
1227 			port_id);
1228 		return -EBUSY;
1229 	}
1230 
1231 	/*
1232 	 * Ensure that "dev_configured" is always 0 each time prepare to do
1233 	 * dev_configure() to avoid any non-anticipated behaviour.
1234 	 * And set to 1 when dev_configure() is executed successfully.
1235 	 */
1236 	dev->data->dev_configured = 0;
1237 
1238 	 /* Store original config, as rollback required on failure */
1239 	memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1240 
1241 	/*
1242 	 * Copy the dev_conf parameter into the dev structure.
1243 	 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1244 	 */
1245 	if (dev_conf != &dev->data->dev_conf)
1246 		memcpy(&dev->data->dev_conf, dev_conf,
1247 		       sizeof(dev->data->dev_conf));
1248 
1249 	/* Backup mtu for rollback */
1250 	old_mtu = dev->data->mtu;
1251 
1252 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1253 	if (ret != 0)
1254 		goto rollback;
1255 
1256 	/* If number of queues specified by application for both Rx and Tx is
1257 	 * zero, use driver preferred values. This cannot be done individually
1258 	 * as it is valid for either Tx or Rx (but not both) to be zero.
1259 	 * If driver does not provide any preferred valued, fall back on
1260 	 * EAL defaults.
1261 	 */
1262 	if (nb_rx_q == 0 && nb_tx_q == 0) {
1263 		nb_rx_q = dev_info.default_rxportconf.nb_queues;
1264 		if (nb_rx_q == 0)
1265 			nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1266 		nb_tx_q = dev_info.default_txportconf.nb_queues;
1267 		if (nb_tx_q == 0)
1268 			nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1269 	}
1270 
1271 	if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1272 		RTE_ETHDEV_LOG(ERR,
1273 			"Number of Rx queues requested (%u) is greater than max supported(%d)\n",
1274 			nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1275 		ret = -EINVAL;
1276 		goto rollback;
1277 	}
1278 
1279 	if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1280 		RTE_ETHDEV_LOG(ERR,
1281 			"Number of Tx queues requested (%u) is greater than max supported(%d)\n",
1282 			nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1283 		ret = -EINVAL;
1284 		goto rollback;
1285 	}
1286 
1287 	/*
1288 	 * Check that the numbers of Rx and Tx queues are not greater
1289 	 * than the maximum number of Rx and Tx queues supported by the
1290 	 * configured device.
1291 	 */
1292 	if (nb_rx_q > dev_info.max_rx_queues) {
1293 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1294 			port_id, nb_rx_q, dev_info.max_rx_queues);
1295 		ret = -EINVAL;
1296 		goto rollback;
1297 	}
1298 
1299 	if (nb_tx_q > dev_info.max_tx_queues) {
1300 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1301 			port_id, nb_tx_q, dev_info.max_tx_queues);
1302 		ret = -EINVAL;
1303 		goto rollback;
1304 	}
1305 
1306 	/* Check that the device supports requested interrupts */
1307 	if ((dev_conf->intr_conf.lsc == 1) &&
1308 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1309 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1310 			dev->device->driver->name);
1311 		ret = -EINVAL;
1312 		goto rollback;
1313 	}
1314 	if ((dev_conf->intr_conf.rmv == 1) &&
1315 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1316 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1317 			dev->device->driver->name);
1318 		ret = -EINVAL;
1319 		goto rollback;
1320 	}
1321 
1322 	if (dev_conf->rxmode.mtu == 0)
1323 		dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
1324 
1325 	ret = eth_dev_validate_mtu(port_id, &dev_info,
1326 			dev->data->dev_conf.rxmode.mtu);
1327 	if (ret != 0)
1328 		goto rollback;
1329 
1330 	dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
1331 
1332 	/*
1333 	 * If LRO is enabled, check that the maximum aggregated packet
1334 	 * size is supported by the configured device.
1335 	 */
1336 	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1337 		uint32_t max_rx_pktlen;
1338 		uint32_t overhead_len;
1339 
1340 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1341 				dev_info.max_mtu);
1342 		max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
1343 		if (dev_conf->rxmode.max_lro_pkt_size == 0)
1344 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1345 		ret = eth_dev_check_lro_pkt_size(port_id,
1346 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
1347 				max_rx_pktlen,
1348 				dev_info.max_lro_pkt_size);
1349 		if (ret != 0)
1350 			goto rollback;
1351 	}
1352 
1353 	/* Any requested offloading must be within its device capabilities */
1354 	if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1355 	     dev_conf->rxmode.offloads) {
1356 		RTE_ETHDEV_LOG(ERR,
1357 			"Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1358 			"capabilities 0x%"PRIx64" in %s()\n",
1359 			port_id, dev_conf->rxmode.offloads,
1360 			dev_info.rx_offload_capa,
1361 			__func__);
1362 		ret = -EINVAL;
1363 		goto rollback;
1364 	}
1365 	if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1366 	     dev_conf->txmode.offloads) {
1367 		RTE_ETHDEV_LOG(ERR,
1368 			"Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1369 			"capabilities 0x%"PRIx64" in %s()\n",
1370 			port_id, dev_conf->txmode.offloads,
1371 			dev_info.tx_offload_capa,
1372 			__func__);
1373 		ret = -EINVAL;
1374 		goto rollback;
1375 	}
1376 
1377 	dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1378 		rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1379 
1380 	/* Check that device supports requested rss hash functions. */
1381 	if ((dev_info.flow_type_rss_offloads |
1382 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1383 	    dev_info.flow_type_rss_offloads) {
1384 		RTE_ETHDEV_LOG(ERR,
1385 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1386 			port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1387 			dev_info.flow_type_rss_offloads);
1388 		ret = -EINVAL;
1389 		goto rollback;
1390 	}
1391 
1392 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1393 	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
1394 	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
1395 		RTE_ETHDEV_LOG(ERR,
1396 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1397 			port_id,
1398 			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
1399 		ret = -EINVAL;
1400 		goto rollback;
1401 	}
1402 
1403 	/*
1404 	 * Setup new number of Rx/Tx queues and reconfigure device.
1405 	 */
1406 	diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1407 	if (diag != 0) {
1408 		RTE_ETHDEV_LOG(ERR,
1409 			"Port%u eth_dev_rx_queue_config = %d\n",
1410 			port_id, diag);
1411 		ret = diag;
1412 		goto rollback;
1413 	}
1414 
1415 	diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1416 	if (diag != 0) {
1417 		RTE_ETHDEV_LOG(ERR,
1418 			"Port%u eth_dev_tx_queue_config = %d\n",
1419 			port_id, diag);
1420 		eth_dev_rx_queue_config(dev, 0);
1421 		ret = diag;
1422 		goto rollback;
1423 	}
1424 
1425 	diag = (*dev->dev_ops->dev_configure)(dev);
1426 	if (diag != 0) {
1427 		RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1428 			port_id, diag);
1429 		ret = eth_err(port_id, diag);
1430 		goto reset_queues;
1431 	}
1432 
1433 	/* Initialize Rx profiling if enabled at compilation time. */
1434 	diag = __rte_eth_dev_profile_init(port_id, dev);
1435 	if (diag != 0) {
1436 		RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1437 			port_id, diag);
1438 		ret = eth_err(port_id, diag);
1439 		goto reset_queues;
1440 	}
1441 
1442 	/* Validate Rx offloads. */
1443 	diag = eth_dev_validate_offloads(port_id,
1444 			dev_conf->rxmode.offloads,
1445 			dev->data->dev_conf.rxmode.offloads, "Rx",
1446 			rte_eth_dev_rx_offload_name);
1447 	if (diag != 0) {
1448 		ret = diag;
1449 		goto reset_queues;
1450 	}
1451 
1452 	/* Validate Tx offloads. */
1453 	diag = eth_dev_validate_offloads(port_id,
1454 			dev_conf->txmode.offloads,
1455 			dev->data->dev_conf.txmode.offloads, "Tx",
1456 			rte_eth_dev_tx_offload_name);
1457 	if (diag != 0) {
1458 		ret = diag;
1459 		goto reset_queues;
1460 	}
1461 
1462 	dev->data->dev_configured = 1;
1463 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1464 	return 0;
1465 reset_queues:
1466 	eth_dev_rx_queue_config(dev, 0);
1467 	eth_dev_tx_queue_config(dev, 0);
1468 rollback:
1469 	memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1470 	if (old_mtu != dev->data->mtu)
1471 		dev->data->mtu = old_mtu;
1472 
1473 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1474 	return ret;
1475 }
1476 
1477 static void
1478 eth_dev_mac_restore(struct rte_eth_dev *dev,
1479 			struct rte_eth_dev_info *dev_info)
1480 {
1481 	struct rte_ether_addr *addr;
1482 	uint16_t i;
1483 	uint32_t pool = 0;
1484 	uint64_t pool_mask;
1485 
1486 	/* replay MAC address configuration including default MAC */
1487 	addr = &dev->data->mac_addrs[0];
1488 	if (*dev->dev_ops->mac_addr_set != NULL)
1489 		(*dev->dev_ops->mac_addr_set)(dev, addr);
1490 	else if (*dev->dev_ops->mac_addr_add != NULL)
1491 		(*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1492 
1493 	if (*dev->dev_ops->mac_addr_add != NULL) {
1494 		for (i = 1; i < dev_info->max_mac_addrs; i++) {
1495 			addr = &dev->data->mac_addrs[i];
1496 
1497 			/* skip zero address */
1498 			if (rte_is_zero_ether_addr(addr))
1499 				continue;
1500 
1501 			pool = 0;
1502 			pool_mask = dev->data->mac_pool_sel[i];
1503 
1504 			do {
1505 				if (pool_mask & UINT64_C(1))
1506 					(*dev->dev_ops->mac_addr_add)(dev,
1507 						addr, i, pool);
1508 				pool_mask >>= 1;
1509 				pool++;
1510 			} while (pool_mask);
1511 		}
1512 	}
1513 }
1514 
1515 static int
1516 eth_dev_config_restore(struct rte_eth_dev *dev,
1517 		struct rte_eth_dev_info *dev_info, uint16_t port_id)
1518 {
1519 	int ret;
1520 
1521 	if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1522 		eth_dev_mac_restore(dev, dev_info);
1523 
1524 	/* replay promiscuous configuration */
1525 	/*
1526 	 * use callbacks directly since we don't need port_id check and
1527 	 * would like to bypass the same value set
1528 	 */
1529 	if (rte_eth_promiscuous_get(port_id) == 1 &&
1530 	    *dev->dev_ops->promiscuous_enable != NULL) {
1531 		ret = eth_err(port_id,
1532 			      (*dev->dev_ops->promiscuous_enable)(dev));
1533 		if (ret != 0 && ret != -ENOTSUP) {
1534 			RTE_ETHDEV_LOG(ERR,
1535 				"Failed to enable promiscuous mode for device (port %u): %s\n",
1536 				port_id, rte_strerror(-ret));
1537 			return ret;
1538 		}
1539 	} else if (rte_eth_promiscuous_get(port_id) == 0 &&
1540 		   *dev->dev_ops->promiscuous_disable != NULL) {
1541 		ret = eth_err(port_id,
1542 			      (*dev->dev_ops->promiscuous_disable)(dev));
1543 		if (ret != 0 && ret != -ENOTSUP) {
1544 			RTE_ETHDEV_LOG(ERR,
1545 				"Failed to disable promiscuous mode for device (port %u): %s\n",
1546 				port_id, rte_strerror(-ret));
1547 			return ret;
1548 		}
1549 	}
1550 
1551 	/* replay all multicast configuration */
1552 	/*
1553 	 * use callbacks directly since we don't need port_id check and
1554 	 * would like to bypass the same value set
1555 	 */
1556 	if (rte_eth_allmulticast_get(port_id) == 1 &&
1557 	    *dev->dev_ops->allmulticast_enable != NULL) {
1558 		ret = eth_err(port_id,
1559 			      (*dev->dev_ops->allmulticast_enable)(dev));
1560 		if (ret != 0 && ret != -ENOTSUP) {
1561 			RTE_ETHDEV_LOG(ERR,
1562 				"Failed to enable allmulticast mode for device (port %u): %s\n",
1563 				port_id, rte_strerror(-ret));
1564 			return ret;
1565 		}
1566 	} else if (rte_eth_allmulticast_get(port_id) == 0 &&
1567 		   *dev->dev_ops->allmulticast_disable != NULL) {
1568 		ret = eth_err(port_id,
1569 			      (*dev->dev_ops->allmulticast_disable)(dev));
1570 		if (ret != 0 && ret != -ENOTSUP) {
1571 			RTE_ETHDEV_LOG(ERR,
1572 				"Failed to disable allmulticast mode for device (port %u): %s\n",
1573 				port_id, rte_strerror(-ret));
1574 			return ret;
1575 		}
1576 	}
1577 
1578 	return 0;
1579 }
1580 
1581 int
1582 rte_eth_dev_start(uint16_t port_id)
1583 {
1584 	struct rte_eth_dev *dev;
1585 	struct rte_eth_dev_info dev_info;
1586 	int diag;
1587 	int ret, ret_stop;
1588 
1589 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1590 	dev = &rte_eth_devices[port_id];
1591 
1592 	if (*dev->dev_ops->dev_start == NULL)
1593 		return -ENOTSUP;
1594 
1595 	if (dev->data->dev_configured == 0) {
1596 		RTE_ETHDEV_LOG(INFO,
1597 			"Device with port_id=%"PRIu16" is not configured.\n",
1598 			port_id);
1599 		return -EINVAL;
1600 	}
1601 
1602 	if (dev->data->dev_started != 0) {
1603 		RTE_ETHDEV_LOG(INFO,
1604 			"Device with port_id=%"PRIu16" already started\n",
1605 			port_id);
1606 		return 0;
1607 	}
1608 
1609 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1610 	if (ret != 0)
1611 		return ret;
1612 
1613 	/* Lets restore MAC now if device does not support live change */
1614 	if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1615 		eth_dev_mac_restore(dev, &dev_info);
1616 
1617 	diag = (*dev->dev_ops->dev_start)(dev);
1618 	if (diag == 0)
1619 		dev->data->dev_started = 1;
1620 	else
1621 		return eth_err(port_id, diag);
1622 
1623 	ret = eth_dev_config_restore(dev, &dev_info, port_id);
1624 	if (ret != 0) {
1625 		RTE_ETHDEV_LOG(ERR,
1626 			"Error during restoring configuration for device (port %u): %s\n",
1627 			port_id, rte_strerror(-ret));
1628 		ret_stop = rte_eth_dev_stop(port_id);
1629 		if (ret_stop != 0) {
1630 			RTE_ETHDEV_LOG(ERR,
1631 				"Failed to stop device (port %u): %s\n",
1632 				port_id, rte_strerror(-ret_stop));
1633 		}
1634 
1635 		return ret;
1636 	}
1637 
1638 	if (dev->data->dev_conf.intr_conf.lsc == 0) {
1639 		if (*dev->dev_ops->link_update == NULL)
1640 			return -ENOTSUP;
1641 		(*dev->dev_ops->link_update)(dev, 0);
1642 	}
1643 
1644 	/* expose selection of PMD fast-path functions */
1645 	eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev);
1646 
1647 	rte_ethdev_trace_start(port_id);
1648 	return 0;
1649 }
1650 
1651 int
1652 rte_eth_dev_stop(uint16_t port_id)
1653 {
1654 	struct rte_eth_dev *dev;
1655 	int ret;
1656 
1657 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1658 	dev = &rte_eth_devices[port_id];
1659 
1660 	if (*dev->dev_ops->dev_stop == NULL)
1661 		return -ENOTSUP;
1662 
1663 	if (dev->data->dev_started == 0) {
1664 		RTE_ETHDEV_LOG(INFO,
1665 			"Device with port_id=%"PRIu16" already stopped\n",
1666 			port_id);
1667 		return 0;
1668 	}
1669 
1670 	/* point fast-path functions to dummy ones */
1671 	eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id);
1672 
1673 	ret = (*dev->dev_ops->dev_stop)(dev);
1674 	if (ret == 0)
1675 		dev->data->dev_started = 0;
1676 	rte_ethdev_trace_stop(port_id, ret);
1677 
1678 	return ret;
1679 }
1680 
1681 int
1682 rte_eth_dev_set_link_up(uint16_t port_id)
1683 {
1684 	struct rte_eth_dev *dev;
1685 	int ret;
1686 
1687 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1688 	dev = &rte_eth_devices[port_id];
1689 
1690 	if (*dev->dev_ops->dev_set_link_up == NULL)
1691 		return -ENOTSUP;
1692 	ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1693 
1694 	rte_ethdev_trace_set_link_up(port_id, ret);
1695 
1696 	return ret;
1697 }
1698 
1699 int
1700 rte_eth_dev_set_link_down(uint16_t port_id)
1701 {
1702 	struct rte_eth_dev *dev;
1703 	int ret;
1704 
1705 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1706 	dev = &rte_eth_devices[port_id];
1707 
1708 	if (*dev->dev_ops->dev_set_link_down == NULL)
1709 		return -ENOTSUP;
1710 	ret = eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1711 
1712 	rte_ethdev_trace_set_link_down(port_id, ret);
1713 
1714 	return ret;
1715 }
1716 
1717 int
1718 rte_eth_dev_close(uint16_t port_id)
1719 {
1720 	struct rte_eth_dev *dev;
1721 	int firsterr, binerr;
1722 	int *lasterr = &firsterr;
1723 
1724 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1725 	dev = &rte_eth_devices[port_id];
1726 
1727 	/*
1728 	 * Secondary process needs to close device to release process private
1729 	 * resources. But secondary process should not be obliged to wait
1730 	 * for device stop before closing ethdev.
1731 	 */
1732 	if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
1733 			dev->data->dev_started) {
1734 		RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n",
1735 			       port_id);
1736 		return -EINVAL;
1737 	}
1738 
1739 	if (*dev->dev_ops->dev_close == NULL)
1740 		return -ENOTSUP;
1741 	*lasterr = (*dev->dev_ops->dev_close)(dev);
1742 	if (*lasterr != 0)
1743 		lasterr = &binerr;
1744 
1745 	rte_ethdev_trace_close(port_id);
1746 	*lasterr = rte_eth_dev_release_port(dev);
1747 
1748 	return firsterr;
1749 }
1750 
1751 int
1752 rte_eth_dev_reset(uint16_t port_id)
1753 {
1754 	struct rte_eth_dev *dev;
1755 	int ret;
1756 
1757 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1758 	dev = &rte_eth_devices[port_id];
1759 
1760 	if (*dev->dev_ops->dev_reset == NULL)
1761 		return -ENOTSUP;
1762 
1763 	ret = rte_eth_dev_stop(port_id);
1764 	if (ret != 0) {
1765 		RTE_ETHDEV_LOG(ERR,
1766 			"Failed to stop device (port %u) before reset: %s - ignore\n",
1767 			port_id, rte_strerror(-ret));
1768 	}
1769 	ret = eth_err(port_id, dev->dev_ops->dev_reset(dev));
1770 
1771 	rte_ethdev_trace_reset(port_id, ret);
1772 
1773 	return ret;
1774 }
1775 
1776 int
1777 rte_eth_dev_is_removed(uint16_t port_id)
1778 {
1779 	struct rte_eth_dev *dev;
1780 	int ret;
1781 
1782 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1783 	dev = &rte_eth_devices[port_id];
1784 
1785 	if (dev->state == RTE_ETH_DEV_REMOVED)
1786 		return 1;
1787 
1788 	if (*dev->dev_ops->is_removed == NULL)
1789 		return 0;
1790 
1791 	ret = dev->dev_ops->is_removed(dev);
1792 	if (ret != 0)
1793 		/* Device is physically removed. */
1794 		dev->state = RTE_ETH_DEV_REMOVED;
1795 
1796 	rte_ethdev_trace_is_removed(port_id, ret);
1797 
1798 	return ret;
1799 }
1800 
1801 static int
1802 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset,
1803 			 uint16_t min_length)
1804 {
1805 	uint16_t data_room_size;
1806 
1807 	/*
1808 	 * Check the size of the mbuf data buffer, this value
1809 	 * must be provided in the private data of the memory pool.
1810 	 * First check that the memory pool(s) has a valid private data.
1811 	 */
1812 	if (mp->private_data_size <
1813 			sizeof(struct rte_pktmbuf_pool_private)) {
1814 		RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1815 			mp->name, mp->private_data_size,
1816 			(unsigned int)
1817 			sizeof(struct rte_pktmbuf_pool_private));
1818 		return -ENOSPC;
1819 	}
1820 	data_room_size = rte_pktmbuf_data_room_size(mp);
1821 	if (data_room_size < offset + min_length) {
1822 		RTE_ETHDEV_LOG(ERR,
1823 			       "%s mbuf_data_room_size %u < %u (%u + %u)\n",
1824 			       mp->name, data_room_size,
1825 			       offset + min_length, offset, min_length);
1826 		return -EINVAL;
1827 	}
1828 	return 0;
1829 }
1830 
1831 static int
1832 eth_dev_buffer_split_get_supported_hdrs_helper(uint16_t port_id, uint32_t **ptypes)
1833 {
1834 	int cnt;
1835 
1836 	cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, NULL, 0);
1837 	if (cnt <= 0)
1838 		return cnt;
1839 
1840 	*ptypes = malloc(sizeof(uint32_t) * cnt);
1841 	if (*ptypes == NULL)
1842 		return -ENOMEM;
1843 
1844 	cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, *ptypes, cnt);
1845 	if (cnt <= 0) {
1846 		free(*ptypes);
1847 		*ptypes = NULL;
1848 	}
1849 	return cnt;
1850 }
1851 
1852 static int
1853 rte_eth_rx_queue_check_split(uint16_t port_id,
1854 			const struct rte_eth_rxseg_split *rx_seg,
1855 			uint16_t n_seg, uint32_t *mbp_buf_size,
1856 			const struct rte_eth_dev_info *dev_info)
1857 {
1858 	const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1859 	struct rte_mempool *mp_first;
1860 	uint32_t offset_mask;
1861 	uint16_t seg_idx;
1862 	int ret = 0;
1863 	int ptype_cnt;
1864 	uint32_t *ptypes;
1865 	uint32_t prev_proto_hdrs = RTE_PTYPE_UNKNOWN;
1866 	int i;
1867 
1868 	if (n_seg > seg_capa->max_nseg) {
1869 		RTE_ETHDEV_LOG(ERR,
1870 			       "Requested Rx segments %u exceed supported %u\n",
1871 			       n_seg, seg_capa->max_nseg);
1872 		return -EINVAL;
1873 	}
1874 	/*
1875 	 * Check the sizes and offsets against buffer sizes
1876 	 * for each segment specified in extended configuration.
1877 	 */
1878 	mp_first = rx_seg[0].mp;
1879 	offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1;
1880 
1881 	ptypes = NULL;
1882 	ptype_cnt = eth_dev_buffer_split_get_supported_hdrs_helper(port_id, &ptypes);
1883 
1884 	for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1885 		struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1886 		uint32_t length = rx_seg[seg_idx].length;
1887 		uint32_t offset = rx_seg[seg_idx].offset;
1888 		uint32_t proto_hdr = rx_seg[seg_idx].proto_hdr;
1889 
1890 		if (mpl == NULL) {
1891 			RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1892 			ret = -EINVAL;
1893 			goto out;
1894 		}
1895 		if (seg_idx != 0 && mp_first != mpl &&
1896 		    seg_capa->multi_pools == 0) {
1897 			RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1898 			ret = -ENOTSUP;
1899 			goto out;
1900 		}
1901 		if (offset != 0) {
1902 			if (seg_capa->offset_allowed == 0) {
1903 				RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1904 				ret = -ENOTSUP;
1905 				goto out;
1906 			}
1907 			if (offset & offset_mask) {
1908 				RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1909 					       offset,
1910 					       seg_capa->offset_align_log2);
1911 				ret = -EINVAL;
1912 				goto out;
1913 			}
1914 		}
1915 
1916 		offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1917 		*mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1918 		if (proto_hdr != 0) {
1919 			/* Split based on protocol headers. */
1920 			if (length != 0) {
1921 				RTE_ETHDEV_LOG(ERR,
1922 					"Do not set length split and protocol split within a segment\n"
1923 					);
1924 				ret = -EINVAL;
1925 				goto out;
1926 			}
1927 			if ((proto_hdr & prev_proto_hdrs) != 0) {
1928 				RTE_ETHDEV_LOG(ERR,
1929 					"Repeat with previous protocol headers or proto-split after length-based split\n"
1930 					);
1931 				ret = -EINVAL;
1932 				goto out;
1933 			}
1934 			if (ptype_cnt <= 0) {
1935 				RTE_ETHDEV_LOG(ERR,
1936 					"Port %u failed to get supported buffer split header protocols\n",
1937 					port_id);
1938 				ret = -ENOTSUP;
1939 				goto out;
1940 			}
1941 			for (i = 0; i < ptype_cnt; i++) {
1942 				if ((prev_proto_hdrs | proto_hdr) == ptypes[i])
1943 					break;
1944 			}
1945 			if (i == ptype_cnt) {
1946 				RTE_ETHDEV_LOG(ERR,
1947 					"Requested Rx split header protocols 0x%x is not supported.\n",
1948 					proto_hdr);
1949 				ret = -EINVAL;
1950 				goto out;
1951 			}
1952 			prev_proto_hdrs |= proto_hdr;
1953 		} else {
1954 			/* Split at fixed length. */
1955 			length = length != 0 ? length : *mbp_buf_size;
1956 			prev_proto_hdrs = RTE_PTYPE_ALL_MASK;
1957 		}
1958 
1959 		ret = rte_eth_check_rx_mempool(mpl, offset, length);
1960 		if (ret != 0)
1961 			goto out;
1962 	}
1963 out:
1964 	free(ptypes);
1965 	return ret;
1966 }
1967 
1968 static int
1969 rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools,
1970 			       uint16_t n_mempools, uint32_t *min_buf_size,
1971 			       const struct rte_eth_dev_info *dev_info)
1972 {
1973 	uint16_t pool_idx;
1974 	int ret;
1975 
1976 	if (n_mempools > dev_info->max_rx_mempools) {
1977 		RTE_ETHDEV_LOG(ERR,
1978 			       "Too many Rx mempools %u vs maximum %u\n",
1979 			       n_mempools, dev_info->max_rx_mempools);
1980 		return -EINVAL;
1981 	}
1982 
1983 	for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) {
1984 		struct rte_mempool *mp = rx_mempools[pool_idx];
1985 
1986 		if (mp == NULL) {
1987 			RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n");
1988 			return -EINVAL;
1989 		}
1990 
1991 		ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM,
1992 					       dev_info->min_rx_bufsize);
1993 		if (ret != 0)
1994 			return ret;
1995 
1996 		*min_buf_size = RTE_MIN(*min_buf_size,
1997 					rte_pktmbuf_data_room_size(mp));
1998 	}
1999 
2000 	return 0;
2001 }
2002 
2003 int
2004 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2005 		       uint16_t nb_rx_desc, unsigned int socket_id,
2006 		       const struct rte_eth_rxconf *rx_conf,
2007 		       struct rte_mempool *mp)
2008 {
2009 	int ret;
2010 	uint64_t rx_offloads;
2011 	uint32_t mbp_buf_size = UINT32_MAX;
2012 	struct rte_eth_dev *dev;
2013 	struct rte_eth_dev_info dev_info;
2014 	struct rte_eth_rxconf local_conf;
2015 
2016 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2017 	dev = &rte_eth_devices[port_id];
2018 
2019 	if (rx_queue_id >= dev->data->nb_rx_queues) {
2020 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
2021 		return -EINVAL;
2022 	}
2023 
2024 	if (*dev->dev_ops->rx_queue_setup == NULL)
2025 		return -ENOTSUP;
2026 
2027 	ret = rte_eth_dev_info_get(port_id, &dev_info);
2028 	if (ret != 0)
2029 		return ret;
2030 
2031 	rx_offloads = dev->data->dev_conf.rxmode.offloads;
2032 	if (rx_conf != NULL)
2033 		rx_offloads |= rx_conf->offloads;
2034 
2035 	/* Ensure that we have one and only one source of Rx buffers */
2036 	if ((mp != NULL) +
2037 	    (rx_conf != NULL && rx_conf->rx_nseg > 0) +
2038 	    (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) {
2039 		RTE_ETHDEV_LOG(ERR,
2040 			       "Ambiguous Rx mempools configuration\n");
2041 		return -EINVAL;
2042 	}
2043 
2044 	if (mp != NULL) {
2045 		/* Single pool configuration check. */
2046 		ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM,
2047 					       dev_info.min_rx_bufsize);
2048 		if (ret != 0)
2049 			return ret;
2050 
2051 		mbp_buf_size = rte_pktmbuf_data_room_size(mp);
2052 	} else if (rx_conf != NULL && rx_conf->rx_nseg > 0) {
2053 		const struct rte_eth_rxseg_split *rx_seg;
2054 		uint16_t n_seg;
2055 
2056 		/* Extended multi-segment configuration check. */
2057 		if (rx_conf->rx_seg == NULL) {
2058 			RTE_ETHDEV_LOG(ERR,
2059 				       "Memory pool is null and no multi-segment configuration provided\n");
2060 			return -EINVAL;
2061 		}
2062 
2063 		rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
2064 		n_seg = rx_conf->rx_nseg;
2065 
2066 		if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
2067 			ret = rte_eth_rx_queue_check_split(port_id, rx_seg, n_seg,
2068 							   &mbp_buf_size,
2069 							   &dev_info);
2070 			if (ret != 0)
2071 				return ret;
2072 		} else {
2073 			RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
2074 			return -EINVAL;
2075 		}
2076 	} else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) {
2077 		/* Extended multi-pool configuration check. */
2078 		if (rx_conf->rx_mempools == NULL) {
2079 			RTE_ETHDEV_LOG(ERR, "Memory pools array is null\n");
2080 			return -EINVAL;
2081 		}
2082 
2083 		ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools,
2084 						     rx_conf->rx_nmempool,
2085 						     &mbp_buf_size,
2086 						     &dev_info);
2087 		if (ret != 0)
2088 			return ret;
2089 	} else {
2090 		RTE_ETHDEV_LOG(ERR, "Missing Rx mempool configuration\n");
2091 		return -EINVAL;
2092 	}
2093 
2094 	/* Use default specified by driver, if nb_rx_desc is zero */
2095 	if (nb_rx_desc == 0) {
2096 		nb_rx_desc = dev_info.default_rxportconf.ring_size;
2097 		/* If driver default is also zero, fall back on EAL default */
2098 		if (nb_rx_desc == 0)
2099 			nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
2100 	}
2101 
2102 	if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
2103 			nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
2104 			nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
2105 
2106 		RTE_ETHDEV_LOG(ERR,
2107 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2108 			nb_rx_desc, dev_info.rx_desc_lim.nb_max,
2109 			dev_info.rx_desc_lim.nb_min,
2110 			dev_info.rx_desc_lim.nb_align);
2111 		return -EINVAL;
2112 	}
2113 
2114 	if (dev->data->dev_started &&
2115 		!(dev_info.dev_capa &
2116 			RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
2117 		return -EBUSY;
2118 
2119 	if (dev->data->dev_started &&
2120 		(dev->data->rx_queue_state[rx_queue_id] !=
2121 			RTE_ETH_QUEUE_STATE_STOPPED))
2122 		return -EBUSY;
2123 
2124 	eth_dev_rxq_release(dev, rx_queue_id);
2125 
2126 	if (rx_conf == NULL)
2127 		rx_conf = &dev_info.default_rxconf;
2128 
2129 	local_conf = *rx_conf;
2130 
2131 	/*
2132 	 * If an offloading has already been enabled in
2133 	 * rte_eth_dev_configure(), it has been enabled on all queues,
2134 	 * so there is no need to enable it in this queue again.
2135 	 * The local_conf.offloads input to underlying PMD only carries
2136 	 * those offloadings which are only enabled on this queue and
2137 	 * not enabled on all queues.
2138 	 */
2139 	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
2140 
2141 	/*
2142 	 * New added offloadings for this queue are those not enabled in
2143 	 * rte_eth_dev_configure() and they must be per-queue type.
2144 	 * A pure per-port offloading can't be enabled on a queue while
2145 	 * disabled on another queue. A pure per-port offloading can't
2146 	 * be enabled for any queue as new added one if it hasn't been
2147 	 * enabled in rte_eth_dev_configure().
2148 	 */
2149 	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2150 	     local_conf.offloads) {
2151 		RTE_ETHDEV_LOG(ERR,
2152 			"Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2153 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2154 			port_id, rx_queue_id, local_conf.offloads,
2155 			dev_info.rx_queue_offload_capa,
2156 			__func__);
2157 		return -EINVAL;
2158 	}
2159 
2160 	if (local_conf.share_group > 0 &&
2161 	    (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {
2162 		RTE_ETHDEV_LOG(ERR,
2163 			"Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n",
2164 			port_id, rx_queue_id, local_conf.share_group);
2165 		return -EINVAL;
2166 	}
2167 
2168 	/*
2169 	 * If LRO is enabled, check that the maximum aggregated packet
2170 	 * size is supported by the configured device.
2171 	 */
2172 	/* Get the real Ethernet overhead length */
2173 	if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
2174 		uint32_t overhead_len;
2175 		uint32_t max_rx_pktlen;
2176 		int ret;
2177 
2178 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
2179 				dev_info.max_mtu);
2180 		max_rx_pktlen = dev->data->mtu + overhead_len;
2181 		if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2182 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
2183 		ret = eth_dev_check_lro_pkt_size(port_id,
2184 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
2185 				max_rx_pktlen,
2186 				dev_info.max_lro_pkt_size);
2187 		if (ret != 0)
2188 			return ret;
2189 	}
2190 
2191 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2192 					      socket_id, &local_conf, mp);
2193 	if (!ret) {
2194 		if (!dev->data->min_rx_buf_size ||
2195 		    dev->data->min_rx_buf_size > mbp_buf_size)
2196 			dev->data->min_rx_buf_size = mbp_buf_size;
2197 	}
2198 
2199 	rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2200 		rx_conf, ret);
2201 	return eth_err(port_id, ret);
2202 }
2203 
2204 int
2205 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2206 			       uint16_t nb_rx_desc,
2207 			       const struct rte_eth_hairpin_conf *conf)
2208 {
2209 	int ret;
2210 	struct rte_eth_dev *dev;
2211 	struct rte_eth_hairpin_cap cap;
2212 	int i;
2213 	int count;
2214 
2215 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2216 	dev = &rte_eth_devices[port_id];
2217 
2218 	if (rx_queue_id >= dev->data->nb_rx_queues) {
2219 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
2220 		return -EINVAL;
2221 	}
2222 
2223 	if (conf == NULL) {
2224 		RTE_ETHDEV_LOG(ERR,
2225 			"Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
2226 			port_id);
2227 		return -EINVAL;
2228 	}
2229 
2230 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2231 	if (ret != 0)
2232 		return ret;
2233 	if (*dev->dev_ops->rx_hairpin_queue_setup == NULL)
2234 		return -ENOTSUP;
2235 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2236 	if (nb_rx_desc == 0)
2237 		nb_rx_desc = cap.max_nb_desc;
2238 	if (nb_rx_desc > cap.max_nb_desc) {
2239 		RTE_ETHDEV_LOG(ERR,
2240 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2241 			nb_rx_desc, cap.max_nb_desc);
2242 		return -EINVAL;
2243 	}
2244 	if (conf->peer_count > cap.max_rx_2_tx) {
2245 		RTE_ETHDEV_LOG(ERR,
2246 			"Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2247 			conf->peer_count, cap.max_rx_2_tx);
2248 		return -EINVAL;
2249 	}
2250 	if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) {
2251 		RTE_ETHDEV_LOG(ERR,
2252 			"Attempt to use locked device memory for Rx queue, which is not supported");
2253 		return -EINVAL;
2254 	}
2255 	if (conf->use_rte_memory && !cap.rx_cap.rte_memory) {
2256 		RTE_ETHDEV_LOG(ERR,
2257 			"Attempt to use DPDK memory for Rx queue, which is not supported");
2258 		return -EINVAL;
2259 	}
2260 	if (conf->use_locked_device_memory && conf->use_rte_memory) {
2261 		RTE_ETHDEV_LOG(ERR,
2262 			"Attempt to use mutually exclusive memory settings for Rx queue");
2263 		return -EINVAL;
2264 	}
2265 	if (conf->force_memory &&
2266 	    !conf->use_locked_device_memory &&
2267 	    !conf->use_rte_memory) {
2268 		RTE_ETHDEV_LOG(ERR,
2269 			"Attempt to force Rx queue memory settings, but none is set");
2270 		return -EINVAL;
2271 	}
2272 	if (conf->peer_count == 0) {
2273 		RTE_ETHDEV_LOG(ERR,
2274 			"Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2275 			conf->peer_count);
2276 		return -EINVAL;
2277 	}
2278 	for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2279 	     cap.max_nb_queues != UINT16_MAX; i++) {
2280 		if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2281 			count++;
2282 	}
2283 	if (count > cap.max_nb_queues) {
2284 		RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2285 		cap.max_nb_queues);
2286 		return -EINVAL;
2287 	}
2288 	if (dev->data->dev_started)
2289 		return -EBUSY;
2290 	eth_dev_rxq_release(dev, rx_queue_id);
2291 	ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2292 						      nb_rx_desc, conf);
2293 	if (ret == 0)
2294 		dev->data->rx_queue_state[rx_queue_id] =
2295 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2296 	ret = eth_err(port_id, ret);
2297 
2298 	rte_eth_trace_rx_hairpin_queue_setup(port_id, rx_queue_id, nb_rx_desc,
2299 					     conf, ret);
2300 
2301 	return ret;
2302 }
2303 
2304 int
2305 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2306 		       uint16_t nb_tx_desc, unsigned int socket_id,
2307 		       const struct rte_eth_txconf *tx_conf)
2308 {
2309 	struct rte_eth_dev *dev;
2310 	struct rte_eth_dev_info dev_info;
2311 	struct rte_eth_txconf local_conf;
2312 	int ret;
2313 
2314 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2315 	dev = &rte_eth_devices[port_id];
2316 
2317 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2318 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2319 		return -EINVAL;
2320 	}
2321 
2322 	if (*dev->dev_ops->tx_queue_setup == NULL)
2323 		return -ENOTSUP;
2324 
2325 	ret = rte_eth_dev_info_get(port_id, &dev_info);
2326 	if (ret != 0)
2327 		return ret;
2328 
2329 	/* Use default specified by driver, if nb_tx_desc is zero */
2330 	if (nb_tx_desc == 0) {
2331 		nb_tx_desc = dev_info.default_txportconf.ring_size;
2332 		/* If driver default is zero, fall back on EAL default */
2333 		if (nb_tx_desc == 0)
2334 			nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2335 	}
2336 	if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2337 	    nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2338 	    nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2339 		RTE_ETHDEV_LOG(ERR,
2340 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2341 			nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2342 			dev_info.tx_desc_lim.nb_min,
2343 			dev_info.tx_desc_lim.nb_align);
2344 		return -EINVAL;
2345 	}
2346 
2347 	if (dev->data->dev_started &&
2348 		!(dev_info.dev_capa &
2349 			RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2350 		return -EBUSY;
2351 
2352 	if (dev->data->dev_started &&
2353 		(dev->data->tx_queue_state[tx_queue_id] !=
2354 			RTE_ETH_QUEUE_STATE_STOPPED))
2355 		return -EBUSY;
2356 
2357 	eth_dev_txq_release(dev, tx_queue_id);
2358 
2359 	if (tx_conf == NULL)
2360 		tx_conf = &dev_info.default_txconf;
2361 
2362 	local_conf = *tx_conf;
2363 
2364 	/*
2365 	 * If an offloading has already been enabled in
2366 	 * rte_eth_dev_configure(), it has been enabled on all queues,
2367 	 * so there is no need to enable it in this queue again.
2368 	 * The local_conf.offloads input to underlying PMD only carries
2369 	 * those offloadings which are only enabled on this queue and
2370 	 * not enabled on all queues.
2371 	 */
2372 	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2373 
2374 	/*
2375 	 * New added offloadings for this queue are those not enabled in
2376 	 * rte_eth_dev_configure() and they must be per-queue type.
2377 	 * A pure per-port offloading can't be enabled on a queue while
2378 	 * disabled on another queue. A pure per-port offloading can't
2379 	 * be enabled for any queue as new added one if it hasn't been
2380 	 * enabled in rte_eth_dev_configure().
2381 	 */
2382 	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2383 	     local_conf.offloads) {
2384 		RTE_ETHDEV_LOG(ERR,
2385 			"Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2386 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2387 			port_id, tx_queue_id, local_conf.offloads,
2388 			dev_info.tx_queue_offload_capa,
2389 			__func__);
2390 		return -EINVAL;
2391 	}
2392 
2393 	rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2394 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2395 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2396 }
2397 
2398 int
2399 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2400 			       uint16_t nb_tx_desc,
2401 			       const struct rte_eth_hairpin_conf *conf)
2402 {
2403 	struct rte_eth_dev *dev;
2404 	struct rte_eth_hairpin_cap cap;
2405 	int i;
2406 	int count;
2407 	int ret;
2408 
2409 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2410 	dev = &rte_eth_devices[port_id];
2411 
2412 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2413 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2414 		return -EINVAL;
2415 	}
2416 
2417 	if (conf == NULL) {
2418 		RTE_ETHDEV_LOG(ERR,
2419 			"Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2420 			port_id);
2421 		return -EINVAL;
2422 	}
2423 
2424 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2425 	if (ret != 0)
2426 		return ret;
2427 	if (*dev->dev_ops->tx_hairpin_queue_setup == NULL)
2428 		return -ENOTSUP;
2429 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2430 	if (nb_tx_desc == 0)
2431 		nb_tx_desc = cap.max_nb_desc;
2432 	if (nb_tx_desc > cap.max_nb_desc) {
2433 		RTE_ETHDEV_LOG(ERR,
2434 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2435 			nb_tx_desc, cap.max_nb_desc);
2436 		return -EINVAL;
2437 	}
2438 	if (conf->peer_count > cap.max_tx_2_rx) {
2439 		RTE_ETHDEV_LOG(ERR,
2440 			"Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2441 			conf->peer_count, cap.max_tx_2_rx);
2442 		return -EINVAL;
2443 	}
2444 	if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) {
2445 		RTE_ETHDEV_LOG(ERR,
2446 			"Attempt to use locked device memory for Tx queue, which is not supported");
2447 		return -EINVAL;
2448 	}
2449 	if (conf->use_rte_memory && !cap.tx_cap.rte_memory) {
2450 		RTE_ETHDEV_LOG(ERR,
2451 			"Attempt to use DPDK memory for Tx queue, which is not supported");
2452 		return -EINVAL;
2453 	}
2454 	if (conf->use_locked_device_memory && conf->use_rte_memory) {
2455 		RTE_ETHDEV_LOG(ERR,
2456 			"Attempt to use mutually exclusive memory settings for Tx queue");
2457 		return -EINVAL;
2458 	}
2459 	if (conf->force_memory &&
2460 	    !conf->use_locked_device_memory &&
2461 	    !conf->use_rte_memory) {
2462 		RTE_ETHDEV_LOG(ERR,
2463 			"Attempt to force Tx queue memory settings, but none is set");
2464 		return -EINVAL;
2465 	}
2466 	if (conf->peer_count == 0) {
2467 		RTE_ETHDEV_LOG(ERR,
2468 			"Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2469 			conf->peer_count);
2470 		return -EINVAL;
2471 	}
2472 	for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2473 	     cap.max_nb_queues != UINT16_MAX; i++) {
2474 		if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2475 			count++;
2476 	}
2477 	if (count > cap.max_nb_queues) {
2478 		RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2479 		cap.max_nb_queues);
2480 		return -EINVAL;
2481 	}
2482 	if (dev->data->dev_started)
2483 		return -EBUSY;
2484 	eth_dev_txq_release(dev, tx_queue_id);
2485 	ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2486 		(dev, tx_queue_id, nb_tx_desc, conf);
2487 	if (ret == 0)
2488 		dev->data->tx_queue_state[tx_queue_id] =
2489 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2490 	ret = eth_err(port_id, ret);
2491 
2492 	rte_eth_trace_tx_hairpin_queue_setup(port_id, tx_queue_id, nb_tx_desc,
2493 					     conf, ret);
2494 
2495 	return ret;
2496 }
2497 
2498 int
2499 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2500 {
2501 	struct rte_eth_dev *dev;
2502 	int ret;
2503 
2504 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2505 	dev = &rte_eth_devices[tx_port];
2506 
2507 	if (dev->data->dev_started == 0) {
2508 		RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2509 		return -EBUSY;
2510 	}
2511 
2512 	if (*dev->dev_ops->hairpin_bind == NULL)
2513 		return -ENOTSUP;
2514 	ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2515 	if (ret != 0)
2516 		RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2517 			       " to Rx %d (%d - all ports)\n",
2518 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2519 
2520 	rte_eth_trace_hairpin_bind(tx_port, rx_port, ret);
2521 
2522 	return ret;
2523 }
2524 
2525 int
2526 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2527 {
2528 	struct rte_eth_dev *dev;
2529 	int ret;
2530 
2531 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2532 	dev = &rte_eth_devices[tx_port];
2533 
2534 	if (dev->data->dev_started == 0) {
2535 		RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2536 		return -EBUSY;
2537 	}
2538 
2539 	if (*dev->dev_ops->hairpin_unbind == NULL)
2540 		return -ENOTSUP;
2541 	ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2542 	if (ret != 0)
2543 		RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2544 			       " from Rx %d (%d - all ports)\n",
2545 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2546 
2547 	rte_eth_trace_hairpin_unbind(tx_port, rx_port, ret);
2548 
2549 	return ret;
2550 }
2551 
2552 int
2553 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2554 			       size_t len, uint32_t direction)
2555 {
2556 	struct rte_eth_dev *dev;
2557 	int ret;
2558 
2559 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2560 	dev = &rte_eth_devices[port_id];
2561 
2562 	if (peer_ports == NULL) {
2563 		RTE_ETHDEV_LOG(ERR,
2564 			"Cannot get ethdev port %u hairpin peer ports to NULL\n",
2565 			port_id);
2566 		return -EINVAL;
2567 	}
2568 
2569 	if (len == 0) {
2570 		RTE_ETHDEV_LOG(ERR,
2571 			"Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2572 			port_id);
2573 		return -EINVAL;
2574 	}
2575 
2576 	if (*dev->dev_ops->hairpin_get_peer_ports == NULL)
2577 		return -ENOTSUP;
2578 
2579 	ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2580 						      len, direction);
2581 	if (ret < 0)
2582 		RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2583 			       port_id, direction ? "Rx" : "Tx");
2584 
2585 	rte_eth_trace_hairpin_get_peer_ports(port_id, peer_ports, len,
2586 					     direction, ret);
2587 
2588 	return ret;
2589 }
2590 
2591 void
2592 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2593 		void *userdata __rte_unused)
2594 {
2595 	rte_pktmbuf_free_bulk(pkts, unsent);
2596 
2597 	rte_eth_trace_tx_buffer_drop_callback((void **)pkts, unsent);
2598 }
2599 
2600 void
2601 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2602 		void *userdata)
2603 {
2604 	uint64_t *count = userdata;
2605 
2606 	rte_pktmbuf_free_bulk(pkts, unsent);
2607 	*count += unsent;
2608 
2609 	rte_eth_trace_tx_buffer_count_callback((void **)pkts, unsent, *count);
2610 }
2611 
2612 int
2613 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2614 		buffer_tx_error_fn cbfn, void *userdata)
2615 {
2616 	if (buffer == NULL) {
2617 		RTE_ETHDEV_LOG(ERR,
2618 			"Cannot set Tx buffer error callback to NULL buffer\n");
2619 		return -EINVAL;
2620 	}
2621 
2622 	buffer->error_callback = cbfn;
2623 	buffer->error_userdata = userdata;
2624 
2625 	rte_eth_trace_tx_buffer_set_err_callback(buffer);
2626 
2627 	return 0;
2628 }
2629 
2630 int
2631 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2632 {
2633 	int ret = 0;
2634 
2635 	if (buffer == NULL) {
2636 		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2637 		return -EINVAL;
2638 	}
2639 
2640 	buffer->size = size;
2641 	if (buffer->error_callback == NULL) {
2642 		ret = rte_eth_tx_buffer_set_err_callback(
2643 			buffer, rte_eth_tx_buffer_drop_callback, NULL);
2644 	}
2645 
2646 	rte_eth_trace_tx_buffer_init(buffer, size, ret);
2647 
2648 	return ret;
2649 }
2650 
2651 int
2652 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2653 {
2654 	struct rte_eth_dev *dev;
2655 	int ret;
2656 
2657 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2658 	dev = &rte_eth_devices[port_id];
2659 
2660 	if (*dev->dev_ops->tx_done_cleanup == NULL)
2661 		return -ENOTSUP;
2662 
2663 	/* Call driver to free pending mbufs. */
2664 	ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2665 					       free_cnt);
2666 	ret = eth_err(port_id, ret);
2667 
2668 	rte_eth_trace_tx_done_cleanup(port_id, queue_id, free_cnt, ret);
2669 
2670 	return ret;
2671 }
2672 
2673 int
2674 rte_eth_promiscuous_enable(uint16_t port_id)
2675 {
2676 	struct rte_eth_dev *dev;
2677 	int diag = 0;
2678 
2679 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2680 	dev = &rte_eth_devices[port_id];
2681 
2682 	if (dev->data->promiscuous == 1)
2683 		return 0;
2684 
2685 	if (*dev->dev_ops->promiscuous_enable == NULL)
2686 		return -ENOTSUP;
2687 
2688 	diag = (*dev->dev_ops->promiscuous_enable)(dev);
2689 	dev->data->promiscuous = (diag == 0) ? 1 : 0;
2690 
2691 	diag = eth_err(port_id, diag);
2692 
2693 	rte_eth_trace_promiscuous_enable(port_id, dev->data->promiscuous,
2694 					 diag);
2695 
2696 	return diag;
2697 }
2698 
2699 int
2700 rte_eth_promiscuous_disable(uint16_t port_id)
2701 {
2702 	struct rte_eth_dev *dev;
2703 	int diag = 0;
2704 
2705 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2706 	dev = &rte_eth_devices[port_id];
2707 
2708 	if (dev->data->promiscuous == 0)
2709 		return 0;
2710 
2711 	if (*dev->dev_ops->promiscuous_disable == NULL)
2712 		return -ENOTSUP;
2713 
2714 	dev->data->promiscuous = 0;
2715 	diag = (*dev->dev_ops->promiscuous_disable)(dev);
2716 	if (diag != 0)
2717 		dev->data->promiscuous = 1;
2718 
2719 	diag = eth_err(port_id, diag);
2720 
2721 	rte_eth_trace_promiscuous_disable(port_id, dev->data->promiscuous,
2722 					  diag);
2723 
2724 	return diag;
2725 }
2726 
2727 int
2728 rte_eth_promiscuous_get(uint16_t port_id)
2729 {
2730 	struct rte_eth_dev *dev;
2731 
2732 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2733 	dev = &rte_eth_devices[port_id];
2734 
2735 	rte_eth_trace_promiscuous_get(port_id, dev->data->promiscuous);
2736 
2737 	return dev->data->promiscuous;
2738 }
2739 
2740 int
2741 rte_eth_allmulticast_enable(uint16_t port_id)
2742 {
2743 	struct rte_eth_dev *dev;
2744 	int diag;
2745 
2746 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2747 	dev = &rte_eth_devices[port_id];
2748 
2749 	if (dev->data->all_multicast == 1)
2750 		return 0;
2751 
2752 	if (*dev->dev_ops->allmulticast_enable == NULL)
2753 		return -ENOTSUP;
2754 	diag = (*dev->dev_ops->allmulticast_enable)(dev);
2755 	dev->data->all_multicast = (diag == 0) ? 1 : 0;
2756 
2757 	diag = eth_err(port_id, diag);
2758 
2759 	rte_eth_trace_allmulticast_enable(port_id, dev->data->all_multicast,
2760 					  diag);
2761 
2762 	return diag;
2763 }
2764 
2765 int
2766 rte_eth_allmulticast_disable(uint16_t port_id)
2767 {
2768 	struct rte_eth_dev *dev;
2769 	int diag;
2770 
2771 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2772 	dev = &rte_eth_devices[port_id];
2773 
2774 	if (dev->data->all_multicast == 0)
2775 		return 0;
2776 
2777 	if (*dev->dev_ops->allmulticast_disable == NULL)
2778 		return -ENOTSUP;
2779 	dev->data->all_multicast = 0;
2780 	diag = (*dev->dev_ops->allmulticast_disable)(dev);
2781 	if (diag != 0)
2782 		dev->data->all_multicast = 1;
2783 
2784 	diag = eth_err(port_id, diag);
2785 
2786 	rte_eth_trace_allmulticast_disable(port_id, dev->data->all_multicast,
2787 					   diag);
2788 
2789 	return diag;
2790 }
2791 
2792 int
2793 rte_eth_allmulticast_get(uint16_t port_id)
2794 {
2795 	struct rte_eth_dev *dev;
2796 
2797 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2798 	dev = &rte_eth_devices[port_id];
2799 
2800 	rte_eth_trace_allmulticast_get(port_id, dev->data->all_multicast);
2801 
2802 	return dev->data->all_multicast;
2803 }
2804 
2805 int
2806 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2807 {
2808 	struct rte_eth_dev *dev;
2809 
2810 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2811 	dev = &rte_eth_devices[port_id];
2812 
2813 	if (eth_link == NULL) {
2814 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2815 			port_id);
2816 		return -EINVAL;
2817 	}
2818 
2819 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2820 		rte_eth_linkstatus_get(dev, eth_link);
2821 	else {
2822 		if (*dev->dev_ops->link_update == NULL)
2823 			return -ENOTSUP;
2824 		(*dev->dev_ops->link_update)(dev, 1);
2825 		*eth_link = dev->data->dev_link;
2826 	}
2827 
2828 	rte_eth_trace_link_get(port_id, eth_link);
2829 
2830 	return 0;
2831 }
2832 
2833 int
2834 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2835 {
2836 	struct rte_eth_dev *dev;
2837 
2838 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2839 	dev = &rte_eth_devices[port_id];
2840 
2841 	if (eth_link == NULL) {
2842 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2843 			port_id);
2844 		return -EINVAL;
2845 	}
2846 
2847 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2848 		rte_eth_linkstatus_get(dev, eth_link);
2849 	else {
2850 		if (*dev->dev_ops->link_update == NULL)
2851 			return -ENOTSUP;
2852 		(*dev->dev_ops->link_update)(dev, 0);
2853 		*eth_link = dev->data->dev_link;
2854 	}
2855 
2856 	rte_eth_trace_link_get_nowait(port_id, eth_link);
2857 
2858 	return 0;
2859 }
2860 
2861 const char *
2862 rte_eth_link_speed_to_str(uint32_t link_speed)
2863 {
2864 	const char *ret;
2865 
2866 	switch (link_speed) {
2867 	case RTE_ETH_SPEED_NUM_NONE:
2868 		ret = "None";
2869 		break;
2870 	case RTE_ETH_SPEED_NUM_10M:
2871 		ret = "10 Mbps";
2872 		break;
2873 	case RTE_ETH_SPEED_NUM_100M:
2874 		ret = "100 Mbps";
2875 		break;
2876 	case RTE_ETH_SPEED_NUM_1G:
2877 		ret = "1 Gbps";
2878 		break;
2879 	case RTE_ETH_SPEED_NUM_2_5G:
2880 		ret = "2.5 Gbps";
2881 		break;
2882 	case RTE_ETH_SPEED_NUM_5G:
2883 		ret = "5 Gbps";
2884 		break;
2885 	case RTE_ETH_SPEED_NUM_10G:
2886 		ret = "10 Gbps";
2887 		break;
2888 	case RTE_ETH_SPEED_NUM_20G:
2889 		ret = "20 Gbps";
2890 		break;
2891 	case RTE_ETH_SPEED_NUM_25G:
2892 		ret = "25 Gbps";
2893 		break;
2894 	case RTE_ETH_SPEED_NUM_40G:
2895 		ret = "40 Gbps";
2896 		break;
2897 	case RTE_ETH_SPEED_NUM_50G:
2898 		ret = "50 Gbps";
2899 		break;
2900 	case RTE_ETH_SPEED_NUM_56G:
2901 		ret = "56 Gbps";
2902 		break;
2903 	case RTE_ETH_SPEED_NUM_100G:
2904 		ret = "100 Gbps";
2905 		break;
2906 	case RTE_ETH_SPEED_NUM_200G:
2907 		ret = "200 Gbps";
2908 		break;
2909 	case RTE_ETH_SPEED_NUM_400G:
2910 		ret = "400 Gbps";
2911 		break;
2912 	case RTE_ETH_SPEED_NUM_UNKNOWN:
2913 		ret = "Unknown";
2914 		break;
2915 	default:
2916 		ret = "Invalid";
2917 	}
2918 
2919 	rte_eth_trace_link_speed_to_str(link_speed, ret);
2920 
2921 	return ret;
2922 }
2923 
2924 int
2925 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2926 {
2927 	int ret;
2928 
2929 	if (str == NULL) {
2930 		RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
2931 		return -EINVAL;
2932 	}
2933 
2934 	if (len == 0) {
2935 		RTE_ETHDEV_LOG(ERR,
2936 			"Cannot convert link to string with zero size\n");
2937 		return -EINVAL;
2938 	}
2939 
2940 	if (eth_link == NULL) {
2941 		RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
2942 		return -EINVAL;
2943 	}
2944 
2945 	if (eth_link->link_status == RTE_ETH_LINK_DOWN)
2946 		ret = snprintf(str, len, "Link down");
2947 	else
2948 		ret = snprintf(str, len, "Link up at %s %s %s",
2949 			rte_eth_link_speed_to_str(eth_link->link_speed),
2950 			(eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
2951 			"FDX" : "HDX",
2952 			(eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
2953 			"Autoneg" : "Fixed");
2954 
2955 	rte_eth_trace_link_to_str(len, eth_link, str, ret);
2956 
2957 	return ret;
2958 }
2959 
2960 int
2961 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2962 {
2963 	struct rte_eth_dev *dev;
2964 	int ret;
2965 
2966 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2967 	dev = &rte_eth_devices[port_id];
2968 
2969 	if (stats == NULL) {
2970 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
2971 			port_id);
2972 		return -EINVAL;
2973 	}
2974 
2975 	memset(stats, 0, sizeof(*stats));
2976 
2977 	if (*dev->dev_ops->stats_get == NULL)
2978 		return -ENOTSUP;
2979 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2980 	ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2981 
2982 	rte_eth_trace_stats_get(port_id, stats, ret);
2983 
2984 	return ret;
2985 }
2986 
2987 int
2988 rte_eth_stats_reset(uint16_t port_id)
2989 {
2990 	struct rte_eth_dev *dev;
2991 	int ret;
2992 
2993 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2994 	dev = &rte_eth_devices[port_id];
2995 
2996 	if (*dev->dev_ops->stats_reset == NULL)
2997 		return -ENOTSUP;
2998 	ret = (*dev->dev_ops->stats_reset)(dev);
2999 	if (ret != 0)
3000 		return eth_err(port_id, ret);
3001 
3002 	dev->data->rx_mbuf_alloc_failed = 0;
3003 
3004 	rte_eth_trace_stats_reset(port_id);
3005 
3006 	return 0;
3007 }
3008 
3009 static inline int
3010 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
3011 {
3012 	uint16_t nb_rxqs, nb_txqs;
3013 	int count;
3014 
3015 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3016 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3017 
3018 	count = RTE_NB_STATS;
3019 	if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
3020 		count += nb_rxqs * RTE_NB_RXQ_STATS;
3021 		count += nb_txqs * RTE_NB_TXQ_STATS;
3022 	}
3023 
3024 	return count;
3025 }
3026 
3027 static int
3028 eth_dev_get_xstats_count(uint16_t port_id)
3029 {
3030 	struct rte_eth_dev *dev;
3031 	int count;
3032 
3033 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3034 	dev = &rte_eth_devices[port_id];
3035 	if (dev->dev_ops->xstats_get_names != NULL) {
3036 		count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
3037 		if (count < 0)
3038 			return eth_err(port_id, count);
3039 	} else
3040 		count = 0;
3041 
3042 
3043 	count += eth_dev_get_xstats_basic_count(dev);
3044 
3045 	return count;
3046 }
3047 
3048 int
3049 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3050 		uint64_t *id)
3051 {
3052 	int cnt_xstats, idx_xstat;
3053 
3054 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3055 
3056 	if (xstat_name == NULL) {
3057 		RTE_ETHDEV_LOG(ERR,
3058 			"Cannot get ethdev port %u xstats ID from NULL xstat name\n",
3059 			port_id);
3060 		return -ENOMEM;
3061 	}
3062 
3063 	if (id == NULL) {
3064 		RTE_ETHDEV_LOG(ERR,
3065 			"Cannot get ethdev port %u xstats ID to NULL\n",
3066 			port_id);
3067 		return -ENOMEM;
3068 	}
3069 
3070 	/* Get count */
3071 	cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
3072 	if (cnt_xstats  < 0) {
3073 		RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
3074 		return -ENODEV;
3075 	}
3076 
3077 	/* Get id-name lookup table */
3078 	struct rte_eth_xstat_name xstats_names[cnt_xstats];
3079 
3080 	if (cnt_xstats != rte_eth_xstats_get_names_by_id(
3081 			port_id, xstats_names, cnt_xstats, NULL)) {
3082 		RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
3083 		return -1;
3084 	}
3085 
3086 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
3087 		if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
3088 			*id = idx_xstat;
3089 
3090 			rte_eth_trace_xstats_get_id_by_name(port_id,
3091 							    xstat_name, *id);
3092 
3093 			return 0;
3094 		};
3095 	}
3096 
3097 	return -EINVAL;
3098 }
3099 
3100 /* retrieve basic stats names */
3101 static int
3102 eth_basic_stats_get_names(struct rte_eth_dev *dev,
3103 	struct rte_eth_xstat_name *xstats_names)
3104 {
3105 	int cnt_used_entries = 0;
3106 	uint32_t idx, id_queue;
3107 	uint16_t num_q;
3108 
3109 	for (idx = 0; idx < RTE_NB_STATS; idx++) {
3110 		strlcpy(xstats_names[cnt_used_entries].name,
3111 			eth_dev_stats_strings[idx].name,
3112 			sizeof(xstats_names[0].name));
3113 		cnt_used_entries++;
3114 	}
3115 
3116 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3117 		return cnt_used_entries;
3118 
3119 	num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3120 	for (id_queue = 0; id_queue < num_q; id_queue++) {
3121 		for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
3122 			snprintf(xstats_names[cnt_used_entries].name,
3123 				sizeof(xstats_names[0].name),
3124 				"rx_q%u_%s",
3125 				id_queue, eth_dev_rxq_stats_strings[idx].name);
3126 			cnt_used_entries++;
3127 		}
3128 
3129 	}
3130 	num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3131 	for (id_queue = 0; id_queue < num_q; id_queue++) {
3132 		for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
3133 			snprintf(xstats_names[cnt_used_entries].name,
3134 				sizeof(xstats_names[0].name),
3135 				"tx_q%u_%s",
3136 				id_queue, eth_dev_txq_stats_strings[idx].name);
3137 			cnt_used_entries++;
3138 		}
3139 	}
3140 	return cnt_used_entries;
3141 }
3142 
3143 /* retrieve ethdev extended statistics names */
3144 int
3145 rte_eth_xstats_get_names_by_id(uint16_t port_id,
3146 	struct rte_eth_xstat_name *xstats_names, unsigned int size,
3147 	uint64_t *ids)
3148 {
3149 	struct rte_eth_xstat_name *xstats_names_copy;
3150 	unsigned int no_basic_stat_requested = 1;
3151 	unsigned int no_ext_stat_requested = 1;
3152 	unsigned int expected_entries;
3153 	unsigned int basic_count;
3154 	struct rte_eth_dev *dev;
3155 	unsigned int i;
3156 	int ret;
3157 
3158 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3159 	dev = &rte_eth_devices[port_id];
3160 
3161 	basic_count = eth_dev_get_xstats_basic_count(dev);
3162 	ret = eth_dev_get_xstats_count(port_id);
3163 	if (ret < 0)
3164 		return ret;
3165 	expected_entries = (unsigned int)ret;
3166 
3167 	/* Return max number of stats if no ids given */
3168 	if (!ids) {
3169 		if (!xstats_names)
3170 			return expected_entries;
3171 		else if (xstats_names && size < expected_entries)
3172 			return expected_entries;
3173 	}
3174 
3175 	if (ids && !xstats_names)
3176 		return -EINVAL;
3177 
3178 	if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
3179 		uint64_t ids_copy[size];
3180 
3181 		for (i = 0; i < size; i++) {
3182 			if (ids[i] < basic_count) {
3183 				no_basic_stat_requested = 0;
3184 				break;
3185 			}
3186 
3187 			/*
3188 			 * Convert ids to xstats ids that PMD knows.
3189 			 * ids known by user are basic + extended stats.
3190 			 */
3191 			ids_copy[i] = ids[i] - basic_count;
3192 		}
3193 
3194 		if (no_basic_stat_requested)
3195 			return (*dev->dev_ops->xstats_get_names_by_id)(dev,
3196 					ids_copy, xstats_names, size);
3197 	}
3198 
3199 	/* Retrieve all stats */
3200 	if (!ids) {
3201 		int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
3202 				expected_entries);
3203 		if (num_stats < 0 || num_stats > (int)expected_entries)
3204 			return num_stats;
3205 		else
3206 			return expected_entries;
3207 	}
3208 
3209 	xstats_names_copy = calloc(expected_entries,
3210 		sizeof(struct rte_eth_xstat_name));
3211 
3212 	if (!xstats_names_copy) {
3213 		RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
3214 		return -ENOMEM;
3215 	}
3216 
3217 	if (ids) {
3218 		for (i = 0; i < size; i++) {
3219 			if (ids[i] >= basic_count) {
3220 				no_ext_stat_requested = 0;
3221 				break;
3222 			}
3223 		}
3224 	}
3225 
3226 	/* Fill xstats_names_copy structure */
3227 	if (ids && no_ext_stat_requested) {
3228 		eth_basic_stats_get_names(dev, xstats_names_copy);
3229 	} else {
3230 		ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
3231 			expected_entries);
3232 		if (ret < 0) {
3233 			free(xstats_names_copy);
3234 			return ret;
3235 		}
3236 	}
3237 
3238 	/* Filter stats */
3239 	for (i = 0; i < size; i++) {
3240 		if (ids[i] >= expected_entries) {
3241 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3242 			free(xstats_names_copy);
3243 			return -1;
3244 		}
3245 		xstats_names[i] = xstats_names_copy[ids[i]];
3246 
3247 		rte_eth_trace_xstats_get_names_by_id(port_id, &xstats_names[i],
3248 						     ids[i]);
3249 	}
3250 
3251 	free(xstats_names_copy);
3252 	return size;
3253 }
3254 
3255 int
3256 rte_eth_xstats_get_names(uint16_t port_id,
3257 	struct rte_eth_xstat_name *xstats_names,
3258 	unsigned int size)
3259 {
3260 	struct rte_eth_dev *dev;
3261 	int cnt_used_entries;
3262 	int cnt_expected_entries;
3263 	int cnt_driver_entries;
3264 	int i;
3265 
3266 	cnt_expected_entries = eth_dev_get_xstats_count(port_id);
3267 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
3268 			(int)size < cnt_expected_entries)
3269 		return cnt_expected_entries;
3270 
3271 	/* port_id checked in eth_dev_get_xstats_count() */
3272 	dev = &rte_eth_devices[port_id];
3273 
3274 	cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
3275 
3276 	if (dev->dev_ops->xstats_get_names != NULL) {
3277 		/* If there are any driver-specific xstats, append them
3278 		 * to end of list.
3279 		 */
3280 		cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
3281 			dev,
3282 			xstats_names + cnt_used_entries,
3283 			size - cnt_used_entries);
3284 		if (cnt_driver_entries < 0)
3285 			return eth_err(port_id, cnt_driver_entries);
3286 		cnt_used_entries += cnt_driver_entries;
3287 	}
3288 
3289 	for (i = 0; i < cnt_used_entries; i++)
3290 		rte_eth_trace_xstats_get_names(port_id, i, &xstats_names[i],
3291 					       size, cnt_used_entries);
3292 
3293 	return cnt_used_entries;
3294 }
3295 
3296 
3297 static int
3298 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
3299 {
3300 	struct rte_eth_dev *dev;
3301 	struct rte_eth_stats eth_stats;
3302 	unsigned int count = 0, i, q;
3303 	uint64_t val, *stats_ptr;
3304 	uint16_t nb_rxqs, nb_txqs;
3305 	int ret;
3306 
3307 	ret = rte_eth_stats_get(port_id, &eth_stats);
3308 	if (ret < 0)
3309 		return ret;
3310 
3311 	dev = &rte_eth_devices[port_id];
3312 
3313 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3314 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3315 
3316 	/* global stats */
3317 	for (i = 0; i < RTE_NB_STATS; i++) {
3318 		stats_ptr = RTE_PTR_ADD(&eth_stats,
3319 					eth_dev_stats_strings[i].offset);
3320 		val = *stats_ptr;
3321 		xstats[count++].value = val;
3322 	}
3323 
3324 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3325 		return count;
3326 
3327 	/* per-rxq stats */
3328 	for (q = 0; q < nb_rxqs; q++) {
3329 		for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3330 			stats_ptr = RTE_PTR_ADD(&eth_stats,
3331 					eth_dev_rxq_stats_strings[i].offset +
3332 					q * sizeof(uint64_t));
3333 			val = *stats_ptr;
3334 			xstats[count++].value = val;
3335 		}
3336 	}
3337 
3338 	/* per-txq stats */
3339 	for (q = 0; q < nb_txqs; q++) {
3340 		for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3341 			stats_ptr = RTE_PTR_ADD(&eth_stats,
3342 					eth_dev_txq_stats_strings[i].offset +
3343 					q * sizeof(uint64_t));
3344 			val = *stats_ptr;
3345 			xstats[count++].value = val;
3346 		}
3347 	}
3348 	return count;
3349 }
3350 
3351 /* retrieve ethdev extended statistics */
3352 int
3353 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3354 			 uint64_t *values, unsigned int size)
3355 {
3356 	unsigned int no_basic_stat_requested = 1;
3357 	unsigned int no_ext_stat_requested = 1;
3358 	unsigned int num_xstats_filled;
3359 	unsigned int basic_count;
3360 	uint16_t expected_entries;
3361 	struct rte_eth_dev *dev;
3362 	unsigned int i;
3363 	int ret;
3364 
3365 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3366 	dev = &rte_eth_devices[port_id];
3367 
3368 	ret = eth_dev_get_xstats_count(port_id);
3369 	if (ret < 0)
3370 		return ret;
3371 	expected_entries = (uint16_t)ret;
3372 	struct rte_eth_xstat xstats[expected_entries];
3373 	basic_count = eth_dev_get_xstats_basic_count(dev);
3374 
3375 	/* Return max number of stats if no ids given */
3376 	if (!ids) {
3377 		if (!values)
3378 			return expected_entries;
3379 		else if (values && size < expected_entries)
3380 			return expected_entries;
3381 	}
3382 
3383 	if (ids && !values)
3384 		return -EINVAL;
3385 
3386 	if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3387 		unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3388 		uint64_t ids_copy[size];
3389 
3390 		for (i = 0; i < size; i++) {
3391 			if (ids[i] < basic_count) {
3392 				no_basic_stat_requested = 0;
3393 				break;
3394 			}
3395 
3396 			/*
3397 			 * Convert ids to xstats ids that PMD knows.
3398 			 * ids known by user are basic + extended stats.
3399 			 */
3400 			ids_copy[i] = ids[i] - basic_count;
3401 		}
3402 
3403 		if (no_basic_stat_requested)
3404 			return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3405 					values, size);
3406 	}
3407 
3408 	if (ids) {
3409 		for (i = 0; i < size; i++) {
3410 			if (ids[i] >= basic_count) {
3411 				no_ext_stat_requested = 0;
3412 				break;
3413 			}
3414 		}
3415 	}
3416 
3417 	/* Fill the xstats structure */
3418 	if (ids && no_ext_stat_requested)
3419 		ret = eth_basic_stats_get(port_id, xstats);
3420 	else
3421 		ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3422 
3423 	if (ret < 0)
3424 		return ret;
3425 	num_xstats_filled = (unsigned int)ret;
3426 
3427 	/* Return all stats */
3428 	if (!ids) {
3429 		for (i = 0; i < num_xstats_filled; i++)
3430 			values[i] = xstats[i].value;
3431 		return expected_entries;
3432 	}
3433 
3434 	/* Filter stats */
3435 	for (i = 0; i < size; i++) {
3436 		if (ids[i] >= expected_entries) {
3437 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3438 			return -1;
3439 		}
3440 		values[i] = xstats[ids[i]].value;
3441 	}
3442 
3443 	rte_eth_trace_xstats_get_by_id(port_id, ids, values, size);
3444 
3445 	return size;
3446 }
3447 
3448 int
3449 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3450 	unsigned int n)
3451 {
3452 	struct rte_eth_dev *dev;
3453 	unsigned int count, i;
3454 	signed int xcount = 0;
3455 	int ret;
3456 
3457 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3458 	if (xstats == NULL && n > 0)
3459 		return -EINVAL;
3460 	dev = &rte_eth_devices[port_id];
3461 
3462 	count = eth_dev_get_xstats_basic_count(dev);
3463 
3464 	/* implemented by the driver */
3465 	if (dev->dev_ops->xstats_get != NULL) {
3466 		/* Retrieve the xstats from the driver at the end of the
3467 		 * xstats struct.
3468 		 */
3469 		xcount = (*dev->dev_ops->xstats_get)(dev,
3470 				     (n > count) ? xstats + count : NULL,
3471 				     (n > count) ? n - count : 0);
3472 
3473 		if (xcount < 0)
3474 			return eth_err(port_id, xcount);
3475 	}
3476 
3477 	if (n < count + xcount || xstats == NULL)
3478 		return count + xcount;
3479 
3480 	/* now fill the xstats structure */
3481 	ret = eth_basic_stats_get(port_id, xstats);
3482 	if (ret < 0)
3483 		return ret;
3484 	count = ret;
3485 
3486 	for (i = 0; i < count; i++)
3487 		xstats[i].id = i;
3488 	/* add an offset to driver-specific stats */
3489 	for ( ; i < count + xcount; i++)
3490 		xstats[i].id += count;
3491 
3492 	for (i = 0; i < n; i++)
3493 		rte_eth_trace_xstats_get(port_id, xstats[i]);
3494 
3495 	return count + xcount;
3496 }
3497 
3498 /* reset ethdev extended statistics */
3499 int
3500 rte_eth_xstats_reset(uint16_t port_id)
3501 {
3502 	struct rte_eth_dev *dev;
3503 
3504 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3505 	dev = &rte_eth_devices[port_id];
3506 
3507 	/* implemented by the driver */
3508 	if (dev->dev_ops->xstats_reset != NULL) {
3509 		int ret = eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3510 
3511 		rte_eth_trace_xstats_reset(port_id, ret);
3512 
3513 		return ret;
3514 	}
3515 
3516 	/* fallback to default */
3517 	return rte_eth_stats_reset(port_id);
3518 }
3519 
3520 static int
3521 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3522 		uint8_t stat_idx, uint8_t is_rx)
3523 {
3524 	struct rte_eth_dev *dev;
3525 
3526 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3527 	dev = &rte_eth_devices[port_id];
3528 
3529 	if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3530 		return -EINVAL;
3531 
3532 	if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3533 		return -EINVAL;
3534 
3535 	if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3536 		return -EINVAL;
3537 
3538 	if (*dev->dev_ops->queue_stats_mapping_set == NULL)
3539 		return -ENOTSUP;
3540 	return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3541 }
3542 
3543 int
3544 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3545 		uint8_t stat_idx)
3546 {
3547 	int ret;
3548 
3549 	ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3550 						tx_queue_id,
3551 						stat_idx, STAT_QMAP_TX));
3552 
3553 	rte_ethdev_trace_set_tx_queue_stats_mapping(port_id, tx_queue_id,
3554 						    stat_idx, ret);
3555 
3556 	return ret;
3557 }
3558 
3559 int
3560 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3561 		uint8_t stat_idx)
3562 {
3563 	int ret;
3564 
3565 	ret = eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3566 						rx_queue_id,
3567 						stat_idx, STAT_QMAP_RX));
3568 
3569 	rte_ethdev_trace_set_rx_queue_stats_mapping(port_id, rx_queue_id,
3570 						    stat_idx, ret);
3571 
3572 	return ret;
3573 }
3574 
3575 int
3576 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3577 {
3578 	struct rte_eth_dev *dev;
3579 	int ret;
3580 
3581 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3582 	dev = &rte_eth_devices[port_id];
3583 
3584 	if (fw_version == NULL && fw_size > 0) {
3585 		RTE_ETHDEV_LOG(ERR,
3586 			"Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3587 			port_id);
3588 		return -EINVAL;
3589 	}
3590 
3591 	if (*dev->dev_ops->fw_version_get == NULL)
3592 		return -ENOTSUP;
3593 	ret = eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3594 							fw_version, fw_size));
3595 
3596 	rte_ethdev_trace_fw_version_get(port_id, fw_version, fw_size, ret);
3597 
3598 	return ret;
3599 }
3600 
3601 int
3602 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3603 {
3604 	struct rte_eth_dev *dev;
3605 	const struct rte_eth_desc_lim lim = {
3606 		.nb_max = UINT16_MAX,
3607 		.nb_min = 0,
3608 		.nb_align = 1,
3609 		.nb_seg_max = UINT16_MAX,
3610 		.nb_mtu_seg_max = UINT16_MAX,
3611 	};
3612 	int diag;
3613 
3614 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3615 	dev = &rte_eth_devices[port_id];
3616 
3617 	if (dev_info == NULL) {
3618 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3619 			port_id);
3620 		return -EINVAL;
3621 	}
3622 
3623 	/*
3624 	 * Init dev_info before port_id check since caller does not have
3625 	 * return status and does not know if get is successful or not.
3626 	 */
3627 	memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3628 	dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3629 
3630 	dev_info->rx_desc_lim = lim;
3631 	dev_info->tx_desc_lim = lim;
3632 	dev_info->device = dev->device;
3633 	dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
3634 		RTE_ETHER_CRC_LEN;
3635 	dev_info->max_mtu = UINT16_MAX;
3636 
3637 	if (*dev->dev_ops->dev_infos_get == NULL)
3638 		return -ENOTSUP;
3639 	diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3640 	if (diag != 0) {
3641 		/* Cleanup already filled in device information */
3642 		memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3643 		return eth_err(port_id, diag);
3644 	}
3645 
3646 	/* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3647 	dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3648 			RTE_MAX_QUEUES_PER_PORT);
3649 	dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3650 			RTE_MAX_QUEUES_PER_PORT);
3651 
3652 	dev_info->driver_name = dev->device->driver->name;
3653 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3654 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3655 
3656 	dev_info->dev_flags = &dev->data->dev_flags;
3657 
3658 	rte_ethdev_trace_info_get(port_id, dev_info);
3659 
3660 	return 0;
3661 }
3662 
3663 int
3664 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3665 {
3666 	struct rte_eth_dev *dev;
3667 
3668 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3669 	dev = &rte_eth_devices[port_id];
3670 
3671 	if (dev_conf == NULL) {
3672 		RTE_ETHDEV_LOG(ERR,
3673 			"Cannot get ethdev port %u configuration to NULL\n",
3674 			port_id);
3675 		return -EINVAL;
3676 	}
3677 
3678 	memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf));
3679 
3680 	rte_ethdev_trace_conf_get(port_id, dev_conf);
3681 
3682 	return 0;
3683 }
3684 
3685 int
3686 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3687 				 uint32_t *ptypes, int num)
3688 {
3689 	int i, j;
3690 	struct rte_eth_dev *dev;
3691 	const uint32_t *all_ptypes;
3692 
3693 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3694 	dev = &rte_eth_devices[port_id];
3695 
3696 	if (ptypes == NULL && num > 0) {
3697 		RTE_ETHDEV_LOG(ERR,
3698 			"Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3699 			port_id);
3700 		return -EINVAL;
3701 	}
3702 
3703 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL)
3704 		return 0;
3705 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3706 
3707 	if (!all_ptypes)
3708 		return 0;
3709 
3710 	for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3711 		if (all_ptypes[i] & ptype_mask) {
3712 			if (j < num) {
3713 				ptypes[j] = all_ptypes[i];
3714 
3715 				rte_ethdev_trace_get_supported_ptypes(port_id,
3716 						j, num, ptypes[j]);
3717 			}
3718 			j++;
3719 		}
3720 
3721 	return j;
3722 }
3723 
3724 int
3725 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3726 				 uint32_t *set_ptypes, unsigned int num)
3727 {
3728 	const uint32_t valid_ptype_masks[] = {
3729 		RTE_PTYPE_L2_MASK,
3730 		RTE_PTYPE_L3_MASK,
3731 		RTE_PTYPE_L4_MASK,
3732 		RTE_PTYPE_TUNNEL_MASK,
3733 		RTE_PTYPE_INNER_L2_MASK,
3734 		RTE_PTYPE_INNER_L3_MASK,
3735 		RTE_PTYPE_INNER_L4_MASK,
3736 	};
3737 	const uint32_t *all_ptypes;
3738 	struct rte_eth_dev *dev;
3739 	uint32_t unused_mask;
3740 	unsigned int i, j;
3741 	int ret;
3742 
3743 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3744 	dev = &rte_eth_devices[port_id];
3745 
3746 	if (num > 0 && set_ptypes == NULL) {
3747 		RTE_ETHDEV_LOG(ERR,
3748 			"Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3749 			port_id);
3750 		return -EINVAL;
3751 	}
3752 
3753 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3754 			*dev->dev_ops->dev_ptypes_set == NULL) {
3755 		ret = 0;
3756 		goto ptype_unknown;
3757 	}
3758 
3759 	if (ptype_mask == 0) {
3760 		ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3761 				ptype_mask);
3762 		goto ptype_unknown;
3763 	}
3764 
3765 	unused_mask = ptype_mask;
3766 	for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3767 		uint32_t mask = ptype_mask & valid_ptype_masks[i];
3768 		if (mask && mask != valid_ptype_masks[i]) {
3769 			ret = -EINVAL;
3770 			goto ptype_unknown;
3771 		}
3772 		unused_mask &= ~valid_ptype_masks[i];
3773 	}
3774 
3775 	if (unused_mask) {
3776 		ret = -EINVAL;
3777 		goto ptype_unknown;
3778 	}
3779 
3780 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3781 	if (all_ptypes == NULL) {
3782 		ret = 0;
3783 		goto ptype_unknown;
3784 	}
3785 
3786 	/*
3787 	 * Accommodate as many set_ptypes as possible. If the supplied
3788 	 * set_ptypes array is insufficient fill it partially.
3789 	 */
3790 	for (i = 0, j = 0; set_ptypes != NULL &&
3791 				(all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3792 		if (ptype_mask & all_ptypes[i]) {
3793 			if (j < num - 1) {
3794 				set_ptypes[j] = all_ptypes[i];
3795 
3796 				rte_ethdev_trace_set_ptypes(port_id, j, num,
3797 						set_ptypes[j]);
3798 
3799 				j++;
3800 				continue;
3801 			}
3802 			break;
3803 		}
3804 	}
3805 
3806 	if (set_ptypes != NULL && j < num)
3807 		set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3808 
3809 	return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3810 
3811 ptype_unknown:
3812 	if (num > 0)
3813 		set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3814 
3815 	return ret;
3816 }
3817 
3818 int
3819 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3820 	unsigned int num)
3821 {
3822 	int32_t ret;
3823 	struct rte_eth_dev *dev;
3824 	struct rte_eth_dev_info dev_info;
3825 
3826 	if (ma == NULL) {
3827 		RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__);
3828 		return -EINVAL;
3829 	}
3830 
3831 	/* will check for us that port_id is a valid one */
3832 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3833 	if (ret != 0)
3834 		return ret;
3835 
3836 	dev = &rte_eth_devices[port_id];
3837 	num = RTE_MIN(dev_info.max_mac_addrs, num);
3838 	memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0]));
3839 
3840 	rte_eth_trace_macaddrs_get(port_id, num);
3841 
3842 	return num;
3843 }
3844 
3845 int
3846 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3847 {
3848 	struct rte_eth_dev *dev;
3849 
3850 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3851 	dev = &rte_eth_devices[port_id];
3852 
3853 	if (mac_addr == NULL) {
3854 		RTE_ETHDEV_LOG(ERR,
3855 			"Cannot get ethdev port %u MAC address to NULL\n",
3856 			port_id);
3857 		return -EINVAL;
3858 	}
3859 
3860 	rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3861 
3862 	rte_eth_trace_macaddr_get(port_id, mac_addr);
3863 
3864 	return 0;
3865 }
3866 
3867 int
3868 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3869 {
3870 	struct rte_eth_dev *dev;
3871 
3872 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3873 	dev = &rte_eth_devices[port_id];
3874 
3875 	if (mtu == NULL) {
3876 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
3877 			port_id);
3878 		return -EINVAL;
3879 	}
3880 
3881 	*mtu = dev->data->mtu;
3882 
3883 	rte_ethdev_trace_get_mtu(port_id, *mtu);
3884 
3885 	return 0;
3886 }
3887 
3888 int
3889 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3890 {
3891 	int ret;
3892 	struct rte_eth_dev_info dev_info;
3893 	struct rte_eth_dev *dev;
3894 
3895 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3896 	dev = &rte_eth_devices[port_id];
3897 	if (*dev->dev_ops->mtu_set == NULL)
3898 		return -ENOTSUP;
3899 
3900 	/*
3901 	 * Check if the device supports dev_infos_get, if it does not
3902 	 * skip min_mtu/max_mtu validation here as this requires values
3903 	 * that are populated within the call to rte_eth_dev_info_get()
3904 	 * which relies on dev->dev_ops->dev_infos_get.
3905 	 */
3906 	if (*dev->dev_ops->dev_infos_get != NULL) {
3907 		ret = rte_eth_dev_info_get(port_id, &dev_info);
3908 		if (ret != 0)
3909 			return ret;
3910 
3911 		ret = eth_dev_validate_mtu(port_id, &dev_info, mtu);
3912 		if (ret != 0)
3913 			return ret;
3914 	}
3915 
3916 	if (dev->data->dev_configured == 0) {
3917 		RTE_ETHDEV_LOG(ERR,
3918 			"Port %u must be configured before MTU set\n",
3919 			port_id);
3920 		return -EINVAL;
3921 	}
3922 
3923 	ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3924 	if (ret == 0)
3925 		dev->data->mtu = mtu;
3926 
3927 	ret = eth_err(port_id, ret);
3928 
3929 	rte_ethdev_trace_set_mtu(port_id, mtu, ret);
3930 
3931 	return ret;
3932 }
3933 
3934 int
3935 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3936 {
3937 	struct rte_eth_dev *dev;
3938 	int ret;
3939 
3940 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3941 	dev = &rte_eth_devices[port_id];
3942 
3943 	if (!(dev->data->dev_conf.rxmode.offloads &
3944 	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
3945 		RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
3946 			port_id);
3947 		return -ENOSYS;
3948 	}
3949 
3950 	if (vlan_id > 4095) {
3951 		RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3952 			port_id, vlan_id);
3953 		return -EINVAL;
3954 	}
3955 	if (*dev->dev_ops->vlan_filter_set == NULL)
3956 		return -ENOTSUP;
3957 
3958 	ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3959 	if (ret == 0) {
3960 		struct rte_vlan_filter_conf *vfc;
3961 		int vidx;
3962 		int vbit;
3963 
3964 		vfc = &dev->data->vlan_filter_conf;
3965 		vidx = vlan_id / 64;
3966 		vbit = vlan_id % 64;
3967 
3968 		if (on)
3969 			vfc->ids[vidx] |= RTE_BIT64(vbit);
3970 		else
3971 			vfc->ids[vidx] &= ~RTE_BIT64(vbit);
3972 	}
3973 
3974 	ret = eth_err(port_id, ret);
3975 
3976 	rte_ethdev_trace_vlan_filter(port_id, vlan_id, on, ret);
3977 
3978 	return ret;
3979 }
3980 
3981 int
3982 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3983 				    int on)
3984 {
3985 	struct rte_eth_dev *dev;
3986 
3987 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3988 	dev = &rte_eth_devices[port_id];
3989 
3990 	if (rx_queue_id >= dev->data->nb_rx_queues) {
3991 		RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3992 		return -EINVAL;
3993 	}
3994 
3995 	if (*dev->dev_ops->vlan_strip_queue_set == NULL)
3996 		return -ENOTSUP;
3997 	(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3998 
3999 	rte_ethdev_trace_set_vlan_strip_on_queue(port_id, rx_queue_id, on);
4000 
4001 	return 0;
4002 }
4003 
4004 int
4005 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
4006 				enum rte_vlan_type vlan_type,
4007 				uint16_t tpid)
4008 {
4009 	struct rte_eth_dev *dev;
4010 	int ret;
4011 
4012 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4013 	dev = &rte_eth_devices[port_id];
4014 
4015 	if (*dev->dev_ops->vlan_tpid_set == NULL)
4016 		return -ENOTSUP;
4017 	ret = eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
4018 							      tpid));
4019 
4020 	rte_ethdev_trace_set_vlan_ether_type(port_id, vlan_type, tpid, ret);
4021 
4022 	return ret;
4023 }
4024 
4025 int
4026 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
4027 {
4028 	struct rte_eth_dev_info dev_info;
4029 	struct rte_eth_dev *dev;
4030 	int ret = 0;
4031 	int mask = 0;
4032 	int cur, org = 0;
4033 	uint64_t orig_offloads;
4034 	uint64_t dev_offloads;
4035 	uint64_t new_offloads;
4036 
4037 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4038 	dev = &rte_eth_devices[port_id];
4039 
4040 	/* save original values in case of failure */
4041 	orig_offloads = dev->data->dev_conf.rxmode.offloads;
4042 	dev_offloads = orig_offloads;
4043 
4044 	/* check which option changed by application */
4045 	cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
4046 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
4047 	if (cur != org) {
4048 		if (cur)
4049 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
4050 		else
4051 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
4052 		mask |= RTE_ETH_VLAN_STRIP_MASK;
4053 	}
4054 
4055 	cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
4056 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
4057 	if (cur != org) {
4058 		if (cur)
4059 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4060 		else
4061 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
4062 		mask |= RTE_ETH_VLAN_FILTER_MASK;
4063 	}
4064 
4065 	cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
4066 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
4067 	if (cur != org) {
4068 		if (cur)
4069 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
4070 		else
4071 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
4072 		mask |= RTE_ETH_VLAN_EXTEND_MASK;
4073 	}
4074 
4075 	cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
4076 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
4077 	if (cur != org) {
4078 		if (cur)
4079 			dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
4080 		else
4081 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
4082 		mask |= RTE_ETH_QINQ_STRIP_MASK;
4083 	}
4084 
4085 	/*no change*/
4086 	if (mask == 0)
4087 		return ret;
4088 
4089 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4090 	if (ret != 0)
4091 		return ret;
4092 
4093 	/* Rx VLAN offloading must be within its device capabilities */
4094 	if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
4095 		new_offloads = dev_offloads & ~orig_offloads;
4096 		RTE_ETHDEV_LOG(ERR,
4097 			"Ethdev port_id=%u requested new added VLAN offloads "
4098 			"0x%" PRIx64 " must be within Rx offloads capabilities "
4099 			"0x%" PRIx64 " in %s()\n",
4100 			port_id, new_offloads, dev_info.rx_offload_capa,
4101 			__func__);
4102 		return -EINVAL;
4103 	}
4104 
4105 	if (*dev->dev_ops->vlan_offload_set == NULL)
4106 		return -ENOTSUP;
4107 	dev->data->dev_conf.rxmode.offloads = dev_offloads;
4108 	ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
4109 	if (ret) {
4110 		/* hit an error restore  original values */
4111 		dev->data->dev_conf.rxmode.offloads = orig_offloads;
4112 	}
4113 
4114 	ret = eth_err(port_id, ret);
4115 
4116 	rte_ethdev_trace_set_vlan_offload(port_id, offload_mask, ret);
4117 
4118 	return ret;
4119 }
4120 
4121 int
4122 rte_eth_dev_get_vlan_offload(uint16_t port_id)
4123 {
4124 	struct rte_eth_dev *dev;
4125 	uint64_t *dev_offloads;
4126 	int ret = 0;
4127 
4128 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4129 	dev = &rte_eth_devices[port_id];
4130 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
4131 
4132 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
4133 		ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
4134 
4135 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
4136 		ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
4137 
4138 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
4139 		ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
4140 
4141 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
4142 		ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
4143 
4144 	rte_ethdev_trace_get_vlan_offload(port_id, ret);
4145 
4146 	return ret;
4147 }
4148 
4149 int
4150 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
4151 {
4152 	struct rte_eth_dev *dev;
4153 	int ret;
4154 
4155 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4156 	dev = &rte_eth_devices[port_id];
4157 
4158 	if (*dev->dev_ops->vlan_pvid_set == NULL)
4159 		return -ENOTSUP;
4160 	ret = eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
4161 
4162 	rte_ethdev_trace_set_vlan_pvid(port_id, pvid, on, ret);
4163 
4164 	return ret;
4165 }
4166 
4167 int
4168 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
4169 {
4170 	struct rte_eth_dev *dev;
4171 	int ret;
4172 
4173 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4174 	dev = &rte_eth_devices[port_id];
4175 
4176 	if (fc_conf == NULL) {
4177 		RTE_ETHDEV_LOG(ERR,
4178 			"Cannot get ethdev port %u flow control config to NULL\n",
4179 			port_id);
4180 		return -EINVAL;
4181 	}
4182 
4183 	if (*dev->dev_ops->flow_ctrl_get == NULL)
4184 		return -ENOTSUP;
4185 	memset(fc_conf, 0, sizeof(*fc_conf));
4186 	ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
4187 
4188 	rte_ethdev_trace_flow_ctrl_get(port_id, fc_conf, ret);
4189 
4190 	return ret;
4191 }
4192 
4193 int
4194 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
4195 {
4196 	struct rte_eth_dev *dev;
4197 	int ret;
4198 
4199 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4200 	dev = &rte_eth_devices[port_id];
4201 
4202 	if (fc_conf == NULL) {
4203 		RTE_ETHDEV_LOG(ERR,
4204 			"Cannot set ethdev port %u flow control from NULL config\n",
4205 			port_id);
4206 		return -EINVAL;
4207 	}
4208 
4209 	if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
4210 		RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
4211 		return -EINVAL;
4212 	}
4213 
4214 	if (*dev->dev_ops->flow_ctrl_set == NULL)
4215 		return -ENOTSUP;
4216 	ret = eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
4217 
4218 	rte_ethdev_trace_flow_ctrl_set(port_id, fc_conf, ret);
4219 
4220 	return ret;
4221 }
4222 
4223 int
4224 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
4225 				   struct rte_eth_pfc_conf *pfc_conf)
4226 {
4227 	struct rte_eth_dev *dev;
4228 	int ret;
4229 
4230 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4231 	dev = &rte_eth_devices[port_id];
4232 
4233 	if (pfc_conf == NULL) {
4234 		RTE_ETHDEV_LOG(ERR,
4235 			"Cannot set ethdev port %u priority flow control from NULL config\n",
4236 			port_id);
4237 		return -EINVAL;
4238 	}
4239 
4240 	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
4241 		RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
4242 		return -EINVAL;
4243 	}
4244 
4245 	/* High water, low water validation are device specific */
4246 	if  (*dev->dev_ops->priority_flow_ctrl_set == NULL)
4247 		return -ENOTSUP;
4248 	ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
4249 			       (dev, pfc_conf));
4250 
4251 	rte_ethdev_trace_priority_flow_ctrl_set(port_id, pfc_conf, ret);
4252 
4253 	return ret;
4254 }
4255 
4256 static int
4257 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
4258 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
4259 {
4260 	if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) ||
4261 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
4262 		if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) {
4263 			RTE_ETHDEV_LOG(ERR,
4264 				"PFC Tx queue not in range for Rx pause requested:%d configured:%d\n",
4265 				pfc_queue_conf->rx_pause.tx_qid,
4266 				dev_info->nb_tx_queues);
4267 			return -EINVAL;
4268 		}
4269 
4270 		if (pfc_queue_conf->rx_pause.tc >= tc_max) {
4271 			RTE_ETHDEV_LOG(ERR,
4272 				"PFC TC not in range for Rx pause requested:%d max:%d\n",
4273 				pfc_queue_conf->rx_pause.tc, tc_max);
4274 			return -EINVAL;
4275 		}
4276 	}
4277 
4278 	return 0;
4279 }
4280 
4281 static int
4282 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
4283 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
4284 {
4285 	if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) ||
4286 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
4287 		if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) {
4288 			RTE_ETHDEV_LOG(ERR,
4289 				"PFC Rx queue not in range for Tx pause requested:%d configured:%d\n",
4290 				pfc_queue_conf->tx_pause.rx_qid,
4291 				dev_info->nb_rx_queues);
4292 			return -EINVAL;
4293 		}
4294 
4295 		if (pfc_queue_conf->tx_pause.tc >= tc_max) {
4296 			RTE_ETHDEV_LOG(ERR,
4297 				"PFC TC not in range for Tx pause requested:%d max:%d\n",
4298 				pfc_queue_conf->tx_pause.tc, tc_max);
4299 			return -EINVAL;
4300 		}
4301 	}
4302 
4303 	return 0;
4304 }
4305 
4306 int
4307 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
4308 		struct rte_eth_pfc_queue_info *pfc_queue_info)
4309 {
4310 	struct rte_eth_dev *dev;
4311 	int ret;
4312 
4313 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4314 	dev = &rte_eth_devices[port_id];
4315 
4316 	if (pfc_queue_info == NULL) {
4317 		RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n",
4318 			port_id);
4319 		return -EINVAL;
4320 	}
4321 
4322 	if (*dev->dev_ops->priority_flow_ctrl_queue_info_get == NULL)
4323 		return -ENOTSUP;
4324 	ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
4325 			(dev, pfc_queue_info));
4326 
4327 	rte_ethdev_trace_priority_flow_ctrl_queue_info_get(port_id,
4328 						pfc_queue_info, ret);
4329 
4330 	return ret;
4331 }
4332 
4333 int
4334 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
4335 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
4336 {
4337 	struct rte_eth_pfc_queue_info pfc_info;
4338 	struct rte_eth_dev_info dev_info;
4339 	struct rte_eth_dev *dev;
4340 	int ret;
4341 
4342 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4343 	dev = &rte_eth_devices[port_id];
4344 
4345 	if (pfc_queue_conf == NULL) {
4346 		RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n",
4347 			port_id);
4348 		return -EINVAL;
4349 	}
4350 
4351 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4352 	if (ret != 0)
4353 		return ret;
4354 
4355 	ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info);
4356 	if (ret != 0)
4357 		return ret;
4358 
4359 	if (pfc_info.tc_max == 0) {
4360 		RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n",
4361 			port_id);
4362 		return -ENOTSUP;
4363 	}
4364 
4365 	/* Check requested mode supported or not */
4366 	if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE &&
4367 			pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) {
4368 		RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n",
4369 			port_id);
4370 		return -EINVAL;
4371 	}
4372 
4373 	if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE &&
4374 			pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) {
4375 		RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n",
4376 			port_id);
4377 		return -EINVAL;
4378 	}
4379 
4380 	/* Validate Rx pause parameters */
4381 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
4382 			pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) {
4383 		ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max,
4384 				pfc_queue_conf);
4385 		if (ret != 0)
4386 			return ret;
4387 	}
4388 
4389 	/* Validate Tx pause parameters */
4390 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
4391 			pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) {
4392 		ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max,
4393 				pfc_queue_conf);
4394 		if (ret != 0)
4395 			return ret;
4396 	}
4397 
4398 	if (*dev->dev_ops->priority_flow_ctrl_queue_config == NULL)
4399 		return -ENOTSUP;
4400 	ret = eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_config)
4401 			(dev, pfc_queue_conf));
4402 
4403 	rte_ethdev_trace_priority_flow_ctrl_queue_configure(port_id,
4404 						pfc_queue_conf, ret);
4405 
4406 	return ret;
4407 }
4408 
4409 static int
4410 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
4411 			uint16_t reta_size)
4412 {
4413 	uint16_t i, num;
4414 
4415 	num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
4416 	for (i = 0; i < num; i++) {
4417 		if (reta_conf[i].mask)
4418 			return 0;
4419 	}
4420 
4421 	return -EINVAL;
4422 }
4423 
4424 static int
4425 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
4426 			 uint16_t reta_size,
4427 			 uint16_t max_rxq)
4428 {
4429 	uint16_t i, idx, shift;
4430 
4431 	if (max_rxq == 0) {
4432 		RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
4433 		return -EINVAL;
4434 	}
4435 
4436 	for (i = 0; i < reta_size; i++) {
4437 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
4438 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
4439 		if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
4440 			(reta_conf[idx].reta[shift] >= max_rxq)) {
4441 			RTE_ETHDEV_LOG(ERR,
4442 				"reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
4443 				idx, shift,
4444 				reta_conf[idx].reta[shift], max_rxq);
4445 			return -EINVAL;
4446 		}
4447 	}
4448 
4449 	return 0;
4450 }
4451 
4452 int
4453 rte_eth_dev_rss_reta_update(uint16_t port_id,
4454 			    struct rte_eth_rss_reta_entry64 *reta_conf,
4455 			    uint16_t reta_size)
4456 {
4457 	enum rte_eth_rx_mq_mode mq_mode;
4458 	struct rte_eth_dev *dev;
4459 	int ret;
4460 
4461 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4462 	dev = &rte_eth_devices[port_id];
4463 
4464 	if (reta_conf == NULL) {
4465 		RTE_ETHDEV_LOG(ERR,
4466 			"Cannot update ethdev port %u RSS RETA to NULL\n",
4467 			port_id);
4468 		return -EINVAL;
4469 	}
4470 
4471 	if (reta_size == 0) {
4472 		RTE_ETHDEV_LOG(ERR,
4473 			"Cannot update ethdev port %u RSS RETA with zero size\n",
4474 			port_id);
4475 		return -EINVAL;
4476 	}
4477 
4478 	/* Check mask bits */
4479 	ret = eth_check_reta_mask(reta_conf, reta_size);
4480 	if (ret < 0)
4481 		return ret;
4482 
4483 	/* Check entry value */
4484 	ret = eth_check_reta_entry(reta_conf, reta_size,
4485 				dev->data->nb_rx_queues);
4486 	if (ret < 0)
4487 		return ret;
4488 
4489 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4490 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
4491 		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
4492 		return -ENOTSUP;
4493 	}
4494 
4495 	if (*dev->dev_ops->reta_update == NULL)
4496 		return -ENOTSUP;
4497 	ret = eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
4498 							    reta_size));
4499 
4500 	rte_ethdev_trace_rss_reta_update(port_id, reta_conf, reta_size, ret);
4501 
4502 	return ret;
4503 }
4504 
4505 int
4506 rte_eth_dev_rss_reta_query(uint16_t port_id,
4507 			   struct rte_eth_rss_reta_entry64 *reta_conf,
4508 			   uint16_t reta_size)
4509 {
4510 	struct rte_eth_dev *dev;
4511 	int ret;
4512 
4513 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4514 	dev = &rte_eth_devices[port_id];
4515 
4516 	if (reta_conf == NULL) {
4517 		RTE_ETHDEV_LOG(ERR,
4518 			"Cannot query ethdev port %u RSS RETA from NULL config\n",
4519 			port_id);
4520 		return -EINVAL;
4521 	}
4522 
4523 	/* Check mask bits */
4524 	ret = eth_check_reta_mask(reta_conf, reta_size);
4525 	if (ret < 0)
4526 		return ret;
4527 
4528 	if (*dev->dev_ops->reta_query == NULL)
4529 		return -ENOTSUP;
4530 	ret = eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
4531 							   reta_size));
4532 
4533 	rte_ethdev_trace_rss_reta_query(port_id, reta_conf, reta_size, ret);
4534 
4535 	return ret;
4536 }
4537 
4538 int
4539 rte_eth_dev_rss_hash_update(uint16_t port_id,
4540 			    struct rte_eth_rss_conf *rss_conf)
4541 {
4542 	struct rte_eth_dev *dev;
4543 	struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
4544 	enum rte_eth_rx_mq_mode mq_mode;
4545 	int ret;
4546 
4547 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4548 	dev = &rte_eth_devices[port_id];
4549 
4550 	if (rss_conf == NULL) {
4551 		RTE_ETHDEV_LOG(ERR,
4552 			"Cannot update ethdev port %u RSS hash from NULL config\n",
4553 			port_id);
4554 		return -EINVAL;
4555 	}
4556 
4557 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4558 	if (ret != 0)
4559 		return ret;
4560 
4561 	rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
4562 	if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
4563 	    dev_info.flow_type_rss_offloads) {
4564 		RTE_ETHDEV_LOG(ERR,
4565 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
4566 			port_id, rss_conf->rss_hf,
4567 			dev_info.flow_type_rss_offloads);
4568 		return -EINVAL;
4569 	}
4570 
4571 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4572 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
4573 		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
4574 		return -ENOTSUP;
4575 	}
4576 
4577 	if (*dev->dev_ops->rss_hash_update == NULL)
4578 		return -ENOTSUP;
4579 	ret = eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4580 								rss_conf));
4581 
4582 	rte_ethdev_trace_rss_hash_update(port_id, rss_conf, ret);
4583 
4584 	return ret;
4585 }
4586 
4587 int
4588 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4589 			      struct rte_eth_rss_conf *rss_conf)
4590 {
4591 	struct rte_eth_dev *dev;
4592 	int ret;
4593 
4594 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4595 	dev = &rte_eth_devices[port_id];
4596 
4597 	if (rss_conf == NULL) {
4598 		RTE_ETHDEV_LOG(ERR,
4599 			"Cannot get ethdev port %u RSS hash config to NULL\n",
4600 			port_id);
4601 		return -EINVAL;
4602 	}
4603 
4604 	if (*dev->dev_ops->rss_hash_conf_get == NULL)
4605 		return -ENOTSUP;
4606 	ret = eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4607 								  rss_conf));
4608 
4609 	rte_ethdev_trace_rss_hash_conf_get(port_id, rss_conf, ret);
4610 
4611 	return ret;
4612 }
4613 
4614 int
4615 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4616 				struct rte_eth_udp_tunnel *udp_tunnel)
4617 {
4618 	struct rte_eth_dev *dev;
4619 	int ret;
4620 
4621 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4622 	dev = &rte_eth_devices[port_id];
4623 
4624 	if (udp_tunnel == NULL) {
4625 		RTE_ETHDEV_LOG(ERR,
4626 			"Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4627 			port_id);
4628 		return -EINVAL;
4629 	}
4630 
4631 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4632 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4633 		return -EINVAL;
4634 	}
4635 
4636 	if (*dev->dev_ops->udp_tunnel_port_add == NULL)
4637 		return -ENOTSUP;
4638 	ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4639 								udp_tunnel));
4640 
4641 	rte_ethdev_trace_udp_tunnel_port_add(port_id, udp_tunnel, ret);
4642 
4643 	return ret;
4644 }
4645 
4646 int
4647 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4648 				   struct rte_eth_udp_tunnel *udp_tunnel)
4649 {
4650 	struct rte_eth_dev *dev;
4651 	int ret;
4652 
4653 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4654 	dev = &rte_eth_devices[port_id];
4655 
4656 	if (udp_tunnel == NULL) {
4657 		RTE_ETHDEV_LOG(ERR,
4658 			"Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4659 			port_id);
4660 		return -EINVAL;
4661 	}
4662 
4663 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4664 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4665 		return -EINVAL;
4666 	}
4667 
4668 	if (*dev->dev_ops->udp_tunnel_port_del == NULL)
4669 		return -ENOTSUP;
4670 	ret = eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4671 								udp_tunnel));
4672 
4673 	rte_ethdev_trace_udp_tunnel_port_delete(port_id, udp_tunnel, ret);
4674 
4675 	return ret;
4676 }
4677 
4678 int
4679 rte_eth_led_on(uint16_t port_id)
4680 {
4681 	struct rte_eth_dev *dev;
4682 	int ret;
4683 
4684 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4685 	dev = &rte_eth_devices[port_id];
4686 
4687 	if (*dev->dev_ops->dev_led_on == NULL)
4688 		return -ENOTSUP;
4689 	ret = eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4690 
4691 	rte_eth_trace_led_on(port_id, ret);
4692 
4693 	return ret;
4694 }
4695 
4696 int
4697 rte_eth_led_off(uint16_t port_id)
4698 {
4699 	struct rte_eth_dev *dev;
4700 	int ret;
4701 
4702 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4703 	dev = &rte_eth_devices[port_id];
4704 
4705 	if (*dev->dev_ops->dev_led_off == NULL)
4706 		return -ENOTSUP;
4707 	ret = eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4708 
4709 	rte_eth_trace_led_off(port_id, ret);
4710 
4711 	return ret;
4712 }
4713 
4714 int
4715 rte_eth_fec_get_capability(uint16_t port_id,
4716 			   struct rte_eth_fec_capa *speed_fec_capa,
4717 			   unsigned int num)
4718 {
4719 	struct rte_eth_dev *dev;
4720 	int ret;
4721 
4722 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4723 	dev = &rte_eth_devices[port_id];
4724 
4725 	if (speed_fec_capa == NULL && num > 0) {
4726 		RTE_ETHDEV_LOG(ERR,
4727 			"Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4728 			port_id);
4729 		return -EINVAL;
4730 	}
4731 
4732 	if (*dev->dev_ops->fec_get_capability == NULL)
4733 		return -ENOTSUP;
4734 	ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4735 
4736 	rte_eth_trace_fec_get_capability(port_id, speed_fec_capa, num, ret);
4737 
4738 	return ret;
4739 }
4740 
4741 int
4742 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4743 {
4744 	struct rte_eth_dev *dev;
4745 	int ret;
4746 
4747 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4748 	dev = &rte_eth_devices[port_id];
4749 
4750 	if (fec_capa == NULL) {
4751 		RTE_ETHDEV_LOG(ERR,
4752 			"Cannot get ethdev port %u current FEC mode to NULL\n",
4753 			port_id);
4754 		return -EINVAL;
4755 	}
4756 
4757 	if (*dev->dev_ops->fec_get == NULL)
4758 		return -ENOTSUP;
4759 	ret = eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4760 
4761 	rte_eth_trace_fec_get(port_id, fec_capa, ret);
4762 
4763 	return ret;
4764 }
4765 
4766 int
4767 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4768 {
4769 	struct rte_eth_dev *dev;
4770 	int ret;
4771 
4772 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4773 	dev = &rte_eth_devices[port_id];
4774 
4775 	if (*dev->dev_ops->fec_set == NULL)
4776 		return -ENOTSUP;
4777 	ret = eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4778 
4779 	rte_eth_trace_fec_set(port_id, fec_capa, ret);
4780 
4781 	return ret;
4782 }
4783 
4784 /*
4785  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4786  * an empty spot.
4787  */
4788 static int
4789 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4790 {
4791 	struct rte_eth_dev_info dev_info;
4792 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4793 	unsigned i;
4794 	int ret;
4795 
4796 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4797 	if (ret != 0)
4798 		return -1;
4799 
4800 	for (i = 0; i < dev_info.max_mac_addrs; i++)
4801 		if (memcmp(addr, &dev->data->mac_addrs[i],
4802 				RTE_ETHER_ADDR_LEN) == 0)
4803 			return i;
4804 
4805 	return -1;
4806 }
4807 
4808 static const struct rte_ether_addr null_mac_addr;
4809 
4810 int
4811 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4812 			uint32_t pool)
4813 {
4814 	struct rte_eth_dev *dev;
4815 	int index;
4816 	uint64_t pool_mask;
4817 	int ret;
4818 
4819 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4820 	dev = &rte_eth_devices[port_id];
4821 
4822 	if (addr == NULL) {
4823 		RTE_ETHDEV_LOG(ERR,
4824 			"Cannot add ethdev port %u MAC address from NULL address\n",
4825 			port_id);
4826 		return -EINVAL;
4827 	}
4828 
4829 	if (*dev->dev_ops->mac_addr_add == NULL)
4830 		return -ENOTSUP;
4831 
4832 	if (rte_is_zero_ether_addr(addr)) {
4833 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4834 			port_id);
4835 		return -EINVAL;
4836 	}
4837 	if (pool >= RTE_ETH_64_POOLS) {
4838 		RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1);
4839 		return -EINVAL;
4840 	}
4841 
4842 	index = eth_dev_get_mac_addr_index(port_id, addr);
4843 	if (index < 0) {
4844 		index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4845 		if (index < 0) {
4846 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4847 				port_id);
4848 			return -ENOSPC;
4849 		}
4850 	} else {
4851 		pool_mask = dev->data->mac_pool_sel[index];
4852 
4853 		/* Check if both MAC address and pool is already there, and do nothing */
4854 		if (pool_mask & RTE_BIT64(pool))
4855 			return 0;
4856 	}
4857 
4858 	/* Update NIC */
4859 	ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4860 
4861 	if (ret == 0) {
4862 		/* Update address in NIC data structure */
4863 		rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4864 
4865 		/* Update pool bitmap in NIC data structure */
4866 		dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
4867 	}
4868 
4869 	ret = eth_err(port_id, ret);
4870 
4871 	rte_ethdev_trace_mac_addr_add(port_id, addr, pool, ret);
4872 
4873 	return ret;
4874 }
4875 
4876 int
4877 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4878 {
4879 	struct rte_eth_dev *dev;
4880 	int index;
4881 
4882 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4883 	dev = &rte_eth_devices[port_id];
4884 
4885 	if (addr == NULL) {
4886 		RTE_ETHDEV_LOG(ERR,
4887 			"Cannot remove ethdev port %u MAC address from NULL address\n",
4888 			port_id);
4889 		return -EINVAL;
4890 	}
4891 
4892 	if (*dev->dev_ops->mac_addr_remove == NULL)
4893 		return -ENOTSUP;
4894 
4895 	index = eth_dev_get_mac_addr_index(port_id, addr);
4896 	if (index == 0) {
4897 		RTE_ETHDEV_LOG(ERR,
4898 			"Port %u: Cannot remove default MAC address\n",
4899 			port_id);
4900 		return -EADDRINUSE;
4901 	} else if (index < 0)
4902 		return 0;  /* Do nothing if address wasn't found */
4903 
4904 	/* Update NIC */
4905 	(*dev->dev_ops->mac_addr_remove)(dev, index);
4906 
4907 	/* Update address in NIC data structure */
4908 	rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4909 
4910 	/* reset pool bitmap */
4911 	dev->data->mac_pool_sel[index] = 0;
4912 
4913 	rte_ethdev_trace_mac_addr_remove(port_id, addr);
4914 
4915 	return 0;
4916 }
4917 
4918 int
4919 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4920 {
4921 	struct rte_eth_dev *dev;
4922 	int index;
4923 	int ret;
4924 
4925 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4926 	dev = &rte_eth_devices[port_id];
4927 
4928 	if (addr == NULL) {
4929 		RTE_ETHDEV_LOG(ERR,
4930 			"Cannot set ethdev port %u default MAC address from NULL address\n",
4931 			port_id);
4932 		return -EINVAL;
4933 	}
4934 
4935 	if (!rte_is_valid_assigned_ether_addr(addr))
4936 		return -EINVAL;
4937 
4938 	if (*dev->dev_ops->mac_addr_set == NULL)
4939 		return -ENOTSUP;
4940 
4941 	/* Keep address unique in dev->data->mac_addrs[]. */
4942 	index = eth_dev_get_mac_addr_index(port_id, addr);
4943 	if (index > 0) {
4944 		RTE_ETHDEV_LOG(ERR,
4945 			"New default address for port %u was already in the address list. Please remove it first.\n",
4946 			port_id);
4947 		return -EEXIST;
4948 	}
4949 
4950 	ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4951 	if (ret < 0)
4952 		return ret;
4953 
4954 	/* Update default address in NIC data structure */
4955 	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4956 
4957 	rte_ethdev_trace_default_mac_addr_set(port_id, addr);
4958 
4959 	return 0;
4960 }
4961 
4962 
4963 /*
4964  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4965  * an empty spot.
4966  */
4967 static int
4968 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4969 		const struct rte_ether_addr *addr)
4970 {
4971 	struct rte_eth_dev_info dev_info;
4972 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4973 	unsigned i;
4974 	int ret;
4975 
4976 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4977 	if (ret != 0)
4978 		return -1;
4979 
4980 	if (!dev->data->hash_mac_addrs)
4981 		return -1;
4982 
4983 	for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4984 		if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4985 			RTE_ETHER_ADDR_LEN) == 0)
4986 			return i;
4987 
4988 	return -1;
4989 }
4990 
4991 int
4992 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4993 				uint8_t on)
4994 {
4995 	int index;
4996 	int ret;
4997 	struct rte_eth_dev *dev;
4998 
4999 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5000 	dev = &rte_eth_devices[port_id];
5001 
5002 	if (addr == NULL) {
5003 		RTE_ETHDEV_LOG(ERR,
5004 			"Cannot set ethdev port %u unicast hash table from NULL address\n",
5005 			port_id);
5006 		return -EINVAL;
5007 	}
5008 
5009 	if (rte_is_zero_ether_addr(addr)) {
5010 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
5011 			port_id);
5012 		return -EINVAL;
5013 	}
5014 
5015 	index = eth_dev_get_hash_mac_addr_index(port_id, addr);
5016 	/* Check if it's already there, and do nothing */
5017 	if ((index >= 0) && on)
5018 		return 0;
5019 
5020 	if (index < 0) {
5021 		if (!on) {
5022 			RTE_ETHDEV_LOG(ERR,
5023 				"Port %u: the MAC address was not set in UTA\n",
5024 				port_id);
5025 			return -EINVAL;
5026 		}
5027 
5028 		index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
5029 		if (index < 0) {
5030 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
5031 				port_id);
5032 			return -ENOSPC;
5033 		}
5034 	}
5035 
5036 	if (*dev->dev_ops->uc_hash_table_set == NULL)
5037 		return -ENOTSUP;
5038 	ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
5039 	if (ret == 0) {
5040 		/* Update address in NIC data structure */
5041 		if (on)
5042 			rte_ether_addr_copy(addr,
5043 					&dev->data->hash_mac_addrs[index]);
5044 		else
5045 			rte_ether_addr_copy(&null_mac_addr,
5046 					&dev->data->hash_mac_addrs[index]);
5047 	}
5048 
5049 	ret = eth_err(port_id, ret);
5050 
5051 	rte_ethdev_trace_uc_hash_table_set(port_id, on, ret);
5052 
5053 	return ret;
5054 }
5055 
5056 int
5057 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
5058 {
5059 	struct rte_eth_dev *dev;
5060 	int ret;
5061 
5062 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5063 	dev = &rte_eth_devices[port_id];
5064 
5065 	if (*dev->dev_ops->uc_all_hash_table_set == NULL)
5066 		return -ENOTSUP;
5067 	ret = eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev, on));
5068 
5069 	rte_ethdev_trace_uc_all_hash_table_set(port_id, on, ret);
5070 
5071 	return ret;
5072 }
5073 
5074 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
5075 					uint32_t tx_rate)
5076 {
5077 	struct rte_eth_dev *dev;
5078 	struct rte_eth_dev_info dev_info;
5079 	struct rte_eth_link link;
5080 	int ret;
5081 
5082 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5083 	dev = &rte_eth_devices[port_id];
5084 
5085 	ret = rte_eth_dev_info_get(port_id, &dev_info);
5086 	if (ret != 0)
5087 		return ret;
5088 
5089 	link = dev->data->dev_link;
5090 
5091 	if (queue_idx > dev_info.max_tx_queues) {
5092 		RTE_ETHDEV_LOG(ERR,
5093 			"Set queue rate limit:port %u: invalid queue ID=%u\n",
5094 			port_id, queue_idx);
5095 		return -EINVAL;
5096 	}
5097 
5098 	if (tx_rate > link.link_speed) {
5099 		RTE_ETHDEV_LOG(ERR,
5100 			"Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
5101 			tx_rate, link.link_speed);
5102 		return -EINVAL;
5103 	}
5104 
5105 	if (*dev->dev_ops->set_queue_rate_limit == NULL)
5106 		return -ENOTSUP;
5107 	ret = eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
5108 							queue_idx, tx_rate));
5109 
5110 	rte_eth_trace_set_queue_rate_limit(port_id, queue_idx, tx_rate, ret);
5111 
5112 	return ret;
5113 }
5114 
5115 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
5116 			       uint8_t avail_thresh)
5117 {
5118 	struct rte_eth_dev *dev;
5119 	int ret;
5120 
5121 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5122 	dev = &rte_eth_devices[port_id];
5123 
5124 	if (queue_id > dev->data->nb_rx_queues) {
5125 		RTE_ETHDEV_LOG(ERR,
5126 			"Set queue avail thresh: port %u: invalid queue ID=%u.\n",
5127 			port_id, queue_id);
5128 		return -EINVAL;
5129 	}
5130 
5131 	if (avail_thresh > 99) {
5132 		RTE_ETHDEV_LOG(ERR,
5133 			"Set queue avail thresh: port %u: threshold should be <= 99.\n",
5134 			port_id);
5135 		return -EINVAL;
5136 	}
5137 	if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL)
5138 		return -ENOTSUP;
5139 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev,
5140 							     queue_id, avail_thresh));
5141 
5142 	rte_eth_trace_rx_avail_thresh_set(port_id, queue_id, avail_thresh, ret);
5143 
5144 	return ret;
5145 }
5146 
5147 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
5148 				 uint8_t *avail_thresh)
5149 {
5150 	struct rte_eth_dev *dev;
5151 	int ret;
5152 
5153 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5154 	dev = &rte_eth_devices[port_id];
5155 
5156 	if (queue_id == NULL)
5157 		return -EINVAL;
5158 	if (*queue_id >= dev->data->nb_rx_queues)
5159 		*queue_id = 0;
5160 
5161 	if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL)
5162 		return -ENOTSUP;
5163 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev,
5164 							     queue_id, avail_thresh));
5165 
5166 	rte_eth_trace_rx_avail_thresh_query(port_id, *queue_id, ret);
5167 
5168 	return ret;
5169 }
5170 
5171 RTE_INIT(eth_dev_init_fp_ops)
5172 {
5173 	uint32_t i;
5174 
5175 	for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++)
5176 		eth_dev_fp_ops_reset(rte_eth_fp_ops + i);
5177 }
5178 
5179 RTE_INIT(eth_dev_init_cb_lists)
5180 {
5181 	uint16_t i;
5182 
5183 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
5184 		TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
5185 }
5186 
5187 int
5188 rte_eth_dev_callback_register(uint16_t port_id,
5189 			enum rte_eth_event_type event,
5190 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
5191 {
5192 	struct rte_eth_dev *dev;
5193 	struct rte_eth_dev_callback *user_cb;
5194 	uint16_t next_port;
5195 	uint16_t last_port;
5196 
5197 	if (cb_fn == NULL) {
5198 		RTE_ETHDEV_LOG(ERR,
5199 			"Cannot register ethdev port %u callback from NULL\n",
5200 			port_id);
5201 		return -EINVAL;
5202 	}
5203 
5204 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
5205 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
5206 		return -EINVAL;
5207 	}
5208 
5209 	if (port_id == RTE_ETH_ALL) {
5210 		next_port = 0;
5211 		last_port = RTE_MAX_ETHPORTS - 1;
5212 	} else {
5213 		next_port = last_port = port_id;
5214 	}
5215 
5216 	rte_spinlock_lock(&eth_dev_cb_lock);
5217 
5218 	do {
5219 		dev = &rte_eth_devices[next_port];
5220 
5221 		TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
5222 			if (user_cb->cb_fn == cb_fn &&
5223 				user_cb->cb_arg == cb_arg &&
5224 				user_cb->event == event) {
5225 				break;
5226 			}
5227 		}
5228 
5229 		/* create a new callback. */
5230 		if (user_cb == NULL) {
5231 			user_cb = rte_zmalloc("INTR_USER_CALLBACK",
5232 				sizeof(struct rte_eth_dev_callback), 0);
5233 			if (user_cb != NULL) {
5234 				user_cb->cb_fn = cb_fn;
5235 				user_cb->cb_arg = cb_arg;
5236 				user_cb->event = event;
5237 				TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
5238 						  user_cb, next);
5239 			} else {
5240 				rte_spinlock_unlock(&eth_dev_cb_lock);
5241 				rte_eth_dev_callback_unregister(port_id, event,
5242 								cb_fn, cb_arg);
5243 				return -ENOMEM;
5244 			}
5245 
5246 		}
5247 	} while (++next_port <= last_port);
5248 
5249 	rte_spinlock_unlock(&eth_dev_cb_lock);
5250 
5251 	rte_ethdev_trace_callback_register(port_id, event, cb_fn, cb_arg);
5252 
5253 	return 0;
5254 }
5255 
5256 int
5257 rte_eth_dev_callback_unregister(uint16_t port_id,
5258 			enum rte_eth_event_type event,
5259 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
5260 {
5261 	int ret;
5262 	struct rte_eth_dev *dev;
5263 	struct rte_eth_dev_callback *cb, *next;
5264 	uint16_t next_port;
5265 	uint16_t last_port;
5266 
5267 	if (cb_fn == NULL) {
5268 		RTE_ETHDEV_LOG(ERR,
5269 			"Cannot unregister ethdev port %u callback from NULL\n",
5270 			port_id);
5271 		return -EINVAL;
5272 	}
5273 
5274 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
5275 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
5276 		return -EINVAL;
5277 	}
5278 
5279 	if (port_id == RTE_ETH_ALL) {
5280 		next_port = 0;
5281 		last_port = RTE_MAX_ETHPORTS - 1;
5282 	} else {
5283 		next_port = last_port = port_id;
5284 	}
5285 
5286 	rte_spinlock_lock(&eth_dev_cb_lock);
5287 
5288 	do {
5289 		dev = &rte_eth_devices[next_port];
5290 		ret = 0;
5291 		for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
5292 		     cb = next) {
5293 
5294 			next = TAILQ_NEXT(cb, next);
5295 
5296 			if (cb->cb_fn != cb_fn || cb->event != event ||
5297 			    (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
5298 				continue;
5299 
5300 			/*
5301 			 * if this callback is not executing right now,
5302 			 * then remove it.
5303 			 */
5304 			if (cb->active == 0) {
5305 				TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
5306 				rte_free(cb);
5307 			} else {
5308 				ret = -EAGAIN;
5309 			}
5310 		}
5311 	} while (++next_port <= last_port);
5312 
5313 	rte_spinlock_unlock(&eth_dev_cb_lock);
5314 
5315 	rte_ethdev_trace_callback_unregister(port_id, event, cb_fn, cb_arg,
5316 					     ret);
5317 
5318 	return ret;
5319 }
5320 
5321 int
5322 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
5323 {
5324 	uint32_t vec;
5325 	struct rte_eth_dev *dev;
5326 	struct rte_intr_handle *intr_handle;
5327 	uint16_t qid;
5328 	int rc;
5329 
5330 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5331 	dev = &rte_eth_devices[port_id];
5332 
5333 	if (!dev->intr_handle) {
5334 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
5335 		return -ENOTSUP;
5336 	}
5337 
5338 	intr_handle = dev->intr_handle;
5339 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
5340 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
5341 		return -EPERM;
5342 	}
5343 
5344 	for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
5345 		vec = rte_intr_vec_list_index_get(intr_handle, qid);
5346 		rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
5347 
5348 		rte_ethdev_trace_rx_intr_ctl(port_id, qid, epfd, op, data, rc);
5349 
5350 		if (rc && rc != -EEXIST) {
5351 			RTE_ETHDEV_LOG(ERR,
5352 				"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
5353 				port_id, qid, op, epfd, vec);
5354 		}
5355 	}
5356 
5357 	return 0;
5358 }
5359 
5360 int
5361 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
5362 {
5363 	struct rte_intr_handle *intr_handle;
5364 	struct rte_eth_dev *dev;
5365 	unsigned int efd_idx;
5366 	uint32_t vec;
5367 	int fd;
5368 
5369 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
5370 	dev = &rte_eth_devices[port_id];
5371 
5372 	if (queue_id >= dev->data->nb_rx_queues) {
5373 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5374 		return -1;
5375 	}
5376 
5377 	if (!dev->intr_handle) {
5378 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
5379 		return -1;
5380 	}
5381 
5382 	intr_handle = dev->intr_handle;
5383 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
5384 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
5385 		return -1;
5386 	}
5387 
5388 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
5389 	efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
5390 		(vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
5391 	fd = rte_intr_efds_index_get(intr_handle, efd_idx);
5392 
5393 	rte_ethdev_trace_rx_intr_ctl_q_get_fd(port_id, queue_id, fd);
5394 
5395 	return fd;
5396 }
5397 
5398 int
5399 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
5400 			  int epfd, int op, void *data)
5401 {
5402 	uint32_t vec;
5403 	struct rte_eth_dev *dev;
5404 	struct rte_intr_handle *intr_handle;
5405 	int rc;
5406 
5407 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5408 	dev = &rte_eth_devices[port_id];
5409 
5410 	if (queue_id >= dev->data->nb_rx_queues) {
5411 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5412 		return -EINVAL;
5413 	}
5414 
5415 	if (!dev->intr_handle) {
5416 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
5417 		return -ENOTSUP;
5418 	}
5419 
5420 	intr_handle = dev->intr_handle;
5421 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
5422 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
5423 		return -EPERM;
5424 	}
5425 
5426 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
5427 	rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
5428 
5429 	rte_ethdev_trace_rx_intr_ctl_q(port_id, queue_id, epfd, op, data, rc);
5430 
5431 	if (rc && rc != -EEXIST) {
5432 		RTE_ETHDEV_LOG(ERR,
5433 			"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
5434 			port_id, queue_id, op, epfd, vec);
5435 		return rc;
5436 	}
5437 
5438 	return 0;
5439 }
5440 
5441 int
5442 rte_eth_dev_rx_intr_enable(uint16_t port_id,
5443 			   uint16_t queue_id)
5444 {
5445 	struct rte_eth_dev *dev;
5446 	int ret;
5447 
5448 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5449 	dev = &rte_eth_devices[port_id];
5450 
5451 	ret = eth_dev_validate_rx_queue(dev, queue_id);
5452 	if (ret != 0)
5453 		return ret;
5454 
5455 	if (*dev->dev_ops->rx_queue_intr_enable == NULL)
5456 		return -ENOTSUP;
5457 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
5458 
5459 	rte_ethdev_trace_rx_intr_enable(port_id, queue_id, ret);
5460 
5461 	return ret;
5462 }
5463 
5464 int
5465 rte_eth_dev_rx_intr_disable(uint16_t port_id,
5466 			    uint16_t queue_id)
5467 {
5468 	struct rte_eth_dev *dev;
5469 	int ret;
5470 
5471 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5472 	dev = &rte_eth_devices[port_id];
5473 
5474 	ret = eth_dev_validate_rx_queue(dev, queue_id);
5475 	if (ret != 0)
5476 		return ret;
5477 
5478 	if (*dev->dev_ops->rx_queue_intr_disable == NULL)
5479 		return -ENOTSUP;
5480 	ret = eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
5481 
5482 	rte_ethdev_trace_rx_intr_disable(port_id, queue_id, ret);
5483 
5484 	return ret;
5485 }
5486 
5487 
5488 const struct rte_eth_rxtx_callback *
5489 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
5490 		rte_rx_callback_fn fn, void *user_param)
5491 {
5492 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5493 	rte_errno = ENOTSUP;
5494 	return NULL;
5495 #endif
5496 	struct rte_eth_dev *dev;
5497 
5498 	/* check input parameters */
5499 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5500 		    queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5501 		rte_errno = EINVAL;
5502 		return NULL;
5503 	}
5504 	dev = &rte_eth_devices[port_id];
5505 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5506 		rte_errno = EINVAL;
5507 		return NULL;
5508 	}
5509 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5510 
5511 	if (cb == NULL) {
5512 		rte_errno = ENOMEM;
5513 		return NULL;
5514 	}
5515 
5516 	cb->fn.rx = fn;
5517 	cb->param = user_param;
5518 
5519 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
5520 	/* Add the callbacks in fifo order. */
5521 	struct rte_eth_rxtx_callback *tail =
5522 		rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5523 
5524 	if (!tail) {
5525 		/* Stores to cb->fn and cb->param should complete before
5526 		 * cb is visible to data plane.
5527 		 */
5528 		__atomic_store_n(
5529 			&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5530 			cb, __ATOMIC_RELEASE);
5531 
5532 	} else {
5533 		while (tail->next)
5534 			tail = tail->next;
5535 		/* Stores to cb->fn and cb->param should complete before
5536 		 * cb is visible to data plane.
5537 		 */
5538 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5539 	}
5540 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5541 
5542 	rte_eth_trace_add_rx_callback(port_id, queue_id, fn, user_param, cb);
5543 
5544 	return cb;
5545 }
5546 
5547 const struct rte_eth_rxtx_callback *
5548 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
5549 		rte_rx_callback_fn fn, void *user_param)
5550 {
5551 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5552 	rte_errno = ENOTSUP;
5553 	return NULL;
5554 #endif
5555 	/* check input parameters */
5556 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5557 		queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5558 		rte_errno = EINVAL;
5559 		return NULL;
5560 	}
5561 
5562 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5563 
5564 	if (cb == NULL) {
5565 		rte_errno = ENOMEM;
5566 		return NULL;
5567 	}
5568 
5569 	cb->fn.rx = fn;
5570 	cb->param = user_param;
5571 
5572 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
5573 	/* Add the callbacks at first position */
5574 	cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5575 	/* Stores to cb->fn, cb->param and cb->next should complete before
5576 	 * cb is visible to data plane threads.
5577 	 */
5578 	__atomic_store_n(
5579 		&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5580 		cb, __ATOMIC_RELEASE);
5581 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5582 
5583 	rte_eth_trace_add_first_rx_callback(port_id, queue_id, fn, user_param,
5584 					    cb);
5585 
5586 	return cb;
5587 }
5588 
5589 const struct rte_eth_rxtx_callback *
5590 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
5591 		rte_tx_callback_fn fn, void *user_param)
5592 {
5593 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5594 	rte_errno = ENOTSUP;
5595 	return NULL;
5596 #endif
5597 	struct rte_eth_dev *dev;
5598 
5599 	/* check input parameters */
5600 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5601 		    queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
5602 		rte_errno = EINVAL;
5603 		return NULL;
5604 	}
5605 
5606 	dev = &rte_eth_devices[port_id];
5607 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5608 		rte_errno = EINVAL;
5609 		return NULL;
5610 	}
5611 
5612 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5613 
5614 	if (cb == NULL) {
5615 		rte_errno = ENOMEM;
5616 		return NULL;
5617 	}
5618 
5619 	cb->fn.tx = fn;
5620 	cb->param = user_param;
5621 
5622 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
5623 	/* Add the callbacks in fifo order. */
5624 	struct rte_eth_rxtx_callback *tail =
5625 		rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
5626 
5627 	if (!tail) {
5628 		/* Stores to cb->fn and cb->param should complete before
5629 		 * cb is visible to data plane.
5630 		 */
5631 		__atomic_store_n(
5632 			&rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
5633 			cb, __ATOMIC_RELEASE);
5634 
5635 	} else {
5636 		while (tail->next)
5637 			tail = tail->next;
5638 		/* Stores to cb->fn and cb->param should complete before
5639 		 * cb is visible to data plane.
5640 		 */
5641 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5642 	}
5643 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5644 
5645 	rte_eth_trace_add_tx_callback(port_id, queue_id, fn, user_param, cb);
5646 
5647 	return cb;
5648 }
5649 
5650 int
5651 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
5652 		const struct rte_eth_rxtx_callback *user_cb)
5653 {
5654 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5655 	return -ENOTSUP;
5656 #endif
5657 	/* Check input parameters. */
5658 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5659 	if (user_cb == NULL ||
5660 			queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
5661 		return -EINVAL;
5662 
5663 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5664 	struct rte_eth_rxtx_callback *cb;
5665 	struct rte_eth_rxtx_callback **prev_cb;
5666 	int ret = -EINVAL;
5667 
5668 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
5669 	prev_cb = &dev->post_rx_burst_cbs[queue_id];
5670 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
5671 		cb = *prev_cb;
5672 		if (cb == user_cb) {
5673 			/* Remove the user cb from the callback list. */
5674 			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5675 			ret = 0;
5676 			break;
5677 		}
5678 	}
5679 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5680 
5681 	rte_eth_trace_remove_rx_callback(port_id, queue_id, user_cb, ret);
5682 
5683 	return ret;
5684 }
5685 
5686 int
5687 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5688 		const struct rte_eth_rxtx_callback *user_cb)
5689 {
5690 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5691 	return -ENOTSUP;
5692 #endif
5693 	/* Check input parameters. */
5694 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5695 	if (user_cb == NULL ||
5696 			queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
5697 		return -EINVAL;
5698 
5699 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5700 	int ret = -EINVAL;
5701 	struct rte_eth_rxtx_callback *cb;
5702 	struct rte_eth_rxtx_callback **prev_cb;
5703 
5704 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
5705 	prev_cb = &dev->pre_tx_burst_cbs[queue_id];
5706 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
5707 		cb = *prev_cb;
5708 		if (cb == user_cb) {
5709 			/* Remove the user cb from the callback list. */
5710 			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5711 			ret = 0;
5712 			break;
5713 		}
5714 	}
5715 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5716 
5717 	rte_eth_trace_remove_tx_callback(port_id, queue_id, user_cb, ret);
5718 
5719 	return ret;
5720 }
5721 
5722 int
5723 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5724 	struct rte_eth_rxq_info *qinfo)
5725 {
5726 	struct rte_eth_dev *dev;
5727 
5728 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5729 	dev = &rte_eth_devices[port_id];
5730 
5731 	if (queue_id >= dev->data->nb_rx_queues) {
5732 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5733 		return -EINVAL;
5734 	}
5735 
5736 	if (qinfo == NULL) {
5737 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
5738 			port_id, queue_id);
5739 		return -EINVAL;
5740 	}
5741 
5742 	if (dev->data->rx_queues == NULL ||
5743 			dev->data->rx_queues[queue_id] == NULL) {
5744 		RTE_ETHDEV_LOG(ERR,
5745 			       "Rx queue %"PRIu16" of device with port_id=%"
5746 			       PRIu16" has not been setup\n",
5747 			       queue_id, port_id);
5748 		return -EINVAL;
5749 	}
5750 
5751 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5752 		RTE_ETHDEV_LOG(INFO,
5753 			"Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5754 			queue_id, port_id);
5755 		return -EINVAL;
5756 	}
5757 
5758 	if (*dev->dev_ops->rxq_info_get == NULL)
5759 		return -ENOTSUP;
5760 
5761 	memset(qinfo, 0, sizeof(*qinfo));
5762 	dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5763 	qinfo->queue_state = dev->data->rx_queue_state[queue_id];
5764 
5765 	rte_eth_trace_rx_queue_info_get(port_id, queue_id, qinfo);
5766 
5767 	return 0;
5768 }
5769 
5770 int
5771 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5772 	struct rte_eth_txq_info *qinfo)
5773 {
5774 	struct rte_eth_dev *dev;
5775 
5776 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5777 	dev = &rte_eth_devices[port_id];
5778 
5779 	if (queue_id >= dev->data->nb_tx_queues) {
5780 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5781 		return -EINVAL;
5782 	}
5783 
5784 	if (qinfo == NULL) {
5785 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
5786 			port_id, queue_id);
5787 		return -EINVAL;
5788 	}
5789 
5790 	if (dev->data->tx_queues == NULL ||
5791 			dev->data->tx_queues[queue_id] == NULL) {
5792 		RTE_ETHDEV_LOG(ERR,
5793 			       "Tx queue %"PRIu16" of device with port_id=%"
5794 			       PRIu16" has not been setup\n",
5795 			       queue_id, port_id);
5796 		return -EINVAL;
5797 	}
5798 
5799 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5800 		RTE_ETHDEV_LOG(INFO,
5801 			"Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5802 			queue_id, port_id);
5803 		return -EINVAL;
5804 	}
5805 
5806 	if (*dev->dev_ops->txq_info_get == NULL)
5807 		return -ENOTSUP;
5808 
5809 	memset(qinfo, 0, sizeof(*qinfo));
5810 	dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5811 	qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5812 
5813 	rte_eth_trace_tx_queue_info_get(port_id, queue_id, qinfo);
5814 
5815 	return 0;
5816 }
5817 
5818 int
5819 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5820 			  struct rte_eth_burst_mode *mode)
5821 {
5822 	struct rte_eth_dev *dev;
5823 	int ret;
5824 
5825 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5826 	dev = &rte_eth_devices[port_id];
5827 
5828 	if (queue_id >= dev->data->nb_rx_queues) {
5829 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5830 		return -EINVAL;
5831 	}
5832 
5833 	if (mode == NULL) {
5834 		RTE_ETHDEV_LOG(ERR,
5835 			"Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5836 			port_id, queue_id);
5837 		return -EINVAL;
5838 	}
5839 
5840 	if (*dev->dev_ops->rx_burst_mode_get == NULL)
5841 		return -ENOTSUP;
5842 	memset(mode, 0, sizeof(*mode));
5843 	ret = eth_err(port_id,
5844 		      dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5845 
5846 	rte_eth_trace_rx_burst_mode_get(port_id, queue_id, mode, ret);
5847 
5848 	return ret;
5849 }
5850 
5851 int
5852 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5853 			  struct rte_eth_burst_mode *mode)
5854 {
5855 	struct rte_eth_dev *dev;
5856 	int ret;
5857 
5858 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5859 	dev = &rte_eth_devices[port_id];
5860 
5861 	if (queue_id >= dev->data->nb_tx_queues) {
5862 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5863 		return -EINVAL;
5864 	}
5865 
5866 	if (mode == NULL) {
5867 		RTE_ETHDEV_LOG(ERR,
5868 			"Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
5869 			port_id, queue_id);
5870 		return -EINVAL;
5871 	}
5872 
5873 	if (*dev->dev_ops->tx_burst_mode_get == NULL)
5874 		return -ENOTSUP;
5875 	memset(mode, 0, sizeof(*mode));
5876 	ret = eth_err(port_id,
5877 		      dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5878 
5879 	rte_eth_trace_tx_burst_mode_get(port_id, queue_id, mode, ret);
5880 
5881 	return ret;
5882 }
5883 
5884 int
5885 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5886 		struct rte_power_monitor_cond *pmc)
5887 {
5888 	struct rte_eth_dev *dev;
5889 	int ret;
5890 
5891 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5892 	dev = &rte_eth_devices[port_id];
5893 
5894 	if (queue_id >= dev->data->nb_rx_queues) {
5895 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5896 		return -EINVAL;
5897 	}
5898 
5899 	if (pmc == NULL) {
5900 		RTE_ETHDEV_LOG(ERR,
5901 			"Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
5902 			port_id, queue_id);
5903 		return -EINVAL;
5904 	}
5905 
5906 	if (*dev->dev_ops->get_monitor_addr == NULL)
5907 		return -ENOTSUP;
5908 	ret = eth_err(port_id,
5909 		dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
5910 
5911 	rte_eth_trace_get_monitor_addr(port_id, queue_id, pmc, ret);
5912 
5913 	return ret;
5914 }
5915 
5916 int
5917 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5918 			     struct rte_ether_addr *mc_addr_set,
5919 			     uint32_t nb_mc_addr)
5920 {
5921 	struct rte_eth_dev *dev;
5922 	int ret;
5923 
5924 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5925 	dev = &rte_eth_devices[port_id];
5926 
5927 	if (*dev->dev_ops->set_mc_addr_list == NULL)
5928 		return -ENOTSUP;
5929 	ret = eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5930 						mc_addr_set, nb_mc_addr));
5931 
5932 	rte_ethdev_trace_set_mc_addr_list(port_id, mc_addr_set, nb_mc_addr,
5933 					  ret);
5934 
5935 	return ret;
5936 }
5937 
5938 int
5939 rte_eth_timesync_enable(uint16_t port_id)
5940 {
5941 	struct rte_eth_dev *dev;
5942 	int ret;
5943 
5944 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5945 	dev = &rte_eth_devices[port_id];
5946 
5947 	if (*dev->dev_ops->timesync_enable == NULL)
5948 		return -ENOTSUP;
5949 	ret = eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5950 
5951 	rte_eth_trace_timesync_enable(port_id, ret);
5952 
5953 	return ret;
5954 }
5955 
5956 int
5957 rte_eth_timesync_disable(uint16_t port_id)
5958 {
5959 	struct rte_eth_dev *dev;
5960 	int ret;
5961 
5962 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5963 	dev = &rte_eth_devices[port_id];
5964 
5965 	if (*dev->dev_ops->timesync_disable == NULL)
5966 		return -ENOTSUP;
5967 	ret = eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5968 
5969 	rte_eth_trace_timesync_disable(port_id, ret);
5970 
5971 	return ret;
5972 }
5973 
5974 int
5975 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5976 				   uint32_t flags)
5977 {
5978 	struct rte_eth_dev *dev;
5979 	int ret;
5980 
5981 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5982 	dev = &rte_eth_devices[port_id];
5983 
5984 	if (timestamp == NULL) {
5985 		RTE_ETHDEV_LOG(ERR,
5986 			"Cannot read ethdev port %u Rx timestamp to NULL\n",
5987 			port_id);
5988 		return -EINVAL;
5989 	}
5990 
5991 	if (*dev->dev_ops->timesync_read_rx_timestamp == NULL)
5992 		return -ENOTSUP;
5993 
5994 	ret = eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5995 			       (dev, timestamp, flags));
5996 
5997 	rte_eth_trace_timesync_read_rx_timestamp(port_id, timestamp, flags,
5998 						 ret);
5999 
6000 	return ret;
6001 }
6002 
6003 int
6004 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
6005 				   struct timespec *timestamp)
6006 {
6007 	struct rte_eth_dev *dev;
6008 	int ret;
6009 
6010 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6011 	dev = &rte_eth_devices[port_id];
6012 
6013 	if (timestamp == NULL) {
6014 		RTE_ETHDEV_LOG(ERR,
6015 			"Cannot read ethdev port %u Tx timestamp to NULL\n",
6016 			port_id);
6017 		return -EINVAL;
6018 	}
6019 
6020 	if (*dev->dev_ops->timesync_read_tx_timestamp == NULL)
6021 		return -ENOTSUP;
6022 
6023 	ret = eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
6024 			       (dev, timestamp));
6025 
6026 	rte_eth_trace_timesync_read_tx_timestamp(port_id, timestamp, ret);
6027 
6028 	return ret;
6029 
6030 }
6031 
6032 int
6033 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
6034 {
6035 	struct rte_eth_dev *dev;
6036 	int ret;
6037 
6038 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6039 	dev = &rte_eth_devices[port_id];
6040 
6041 	if (*dev->dev_ops->timesync_adjust_time == NULL)
6042 		return -ENOTSUP;
6043 	ret = eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
6044 
6045 	rte_eth_trace_timesync_adjust_time(port_id, delta, ret);
6046 
6047 	return ret;
6048 }
6049 
6050 int
6051 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
6052 {
6053 	struct rte_eth_dev *dev;
6054 	int ret;
6055 
6056 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6057 	dev = &rte_eth_devices[port_id];
6058 
6059 	if (timestamp == NULL) {
6060 		RTE_ETHDEV_LOG(ERR,
6061 			"Cannot read ethdev port %u timesync time to NULL\n",
6062 			port_id);
6063 		return -EINVAL;
6064 	}
6065 
6066 	if (*dev->dev_ops->timesync_read_time == NULL)
6067 		return -ENOTSUP;
6068 	ret = eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
6069 								timestamp));
6070 
6071 	rte_eth_trace_timesync_read_time(port_id, timestamp, ret);
6072 
6073 	return ret;
6074 }
6075 
6076 int
6077 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
6078 {
6079 	struct rte_eth_dev *dev;
6080 	int ret;
6081 
6082 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6083 	dev = &rte_eth_devices[port_id];
6084 
6085 	if (timestamp == NULL) {
6086 		RTE_ETHDEV_LOG(ERR,
6087 			"Cannot write ethdev port %u timesync from NULL time\n",
6088 			port_id);
6089 		return -EINVAL;
6090 	}
6091 
6092 	if (*dev->dev_ops->timesync_write_time == NULL)
6093 		return -ENOTSUP;
6094 	ret = eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
6095 								timestamp));
6096 
6097 	rte_eth_trace_timesync_write_time(port_id, timestamp, ret);
6098 
6099 	return ret;
6100 }
6101 
6102 int
6103 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
6104 {
6105 	struct rte_eth_dev *dev;
6106 	int ret;
6107 
6108 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6109 	dev = &rte_eth_devices[port_id];
6110 
6111 	if (clock == NULL) {
6112 		RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
6113 			port_id);
6114 		return -EINVAL;
6115 	}
6116 
6117 	if (*dev->dev_ops->read_clock == NULL)
6118 		return -ENOTSUP;
6119 	ret = eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
6120 
6121 	rte_eth_trace_read_clock(port_id, clock, ret);
6122 
6123 	return ret;
6124 }
6125 
6126 int
6127 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
6128 {
6129 	struct rte_eth_dev *dev;
6130 	int ret;
6131 
6132 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6133 	dev = &rte_eth_devices[port_id];
6134 
6135 	if (info == NULL) {
6136 		RTE_ETHDEV_LOG(ERR,
6137 			"Cannot get ethdev port %u register info to NULL\n",
6138 			port_id);
6139 		return -EINVAL;
6140 	}
6141 
6142 	if (*dev->dev_ops->get_reg == NULL)
6143 		return -ENOTSUP;
6144 	ret = eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
6145 
6146 	rte_ethdev_trace_get_reg_info(port_id, info, ret);
6147 
6148 	return ret;
6149 }
6150 
6151 int
6152 rte_eth_dev_get_eeprom_length(uint16_t port_id)
6153 {
6154 	struct rte_eth_dev *dev;
6155 	int ret;
6156 
6157 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6158 	dev = &rte_eth_devices[port_id];
6159 
6160 	if (*dev->dev_ops->get_eeprom_length == NULL)
6161 		return -ENOTSUP;
6162 	ret = eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
6163 
6164 	rte_ethdev_trace_get_eeprom_length(port_id, ret);
6165 
6166 	return ret;
6167 }
6168 
6169 int
6170 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
6171 {
6172 	struct rte_eth_dev *dev;
6173 	int ret;
6174 
6175 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6176 	dev = &rte_eth_devices[port_id];
6177 
6178 	if (info == NULL) {
6179 		RTE_ETHDEV_LOG(ERR,
6180 			"Cannot get ethdev port %u EEPROM info to NULL\n",
6181 			port_id);
6182 		return -EINVAL;
6183 	}
6184 
6185 	if (*dev->dev_ops->get_eeprom == NULL)
6186 		return -ENOTSUP;
6187 	ret = eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
6188 
6189 	rte_ethdev_trace_get_eeprom(port_id, info, ret);
6190 
6191 	return ret;
6192 }
6193 
6194 int
6195 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
6196 {
6197 	struct rte_eth_dev *dev;
6198 	int ret;
6199 
6200 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6201 	dev = &rte_eth_devices[port_id];
6202 
6203 	if (info == NULL) {
6204 		RTE_ETHDEV_LOG(ERR,
6205 			"Cannot set ethdev port %u EEPROM from NULL info\n",
6206 			port_id);
6207 		return -EINVAL;
6208 	}
6209 
6210 	if (*dev->dev_ops->set_eeprom == NULL)
6211 		return -ENOTSUP;
6212 	ret = eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
6213 
6214 	rte_ethdev_trace_set_eeprom(port_id, info, ret);
6215 
6216 	return ret;
6217 }
6218 
6219 int
6220 rte_eth_dev_get_module_info(uint16_t port_id,
6221 			    struct rte_eth_dev_module_info *modinfo)
6222 {
6223 	struct rte_eth_dev *dev;
6224 	int ret;
6225 
6226 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6227 	dev = &rte_eth_devices[port_id];
6228 
6229 	if (modinfo == NULL) {
6230 		RTE_ETHDEV_LOG(ERR,
6231 			"Cannot get ethdev port %u EEPROM module info to NULL\n",
6232 			port_id);
6233 		return -EINVAL;
6234 	}
6235 
6236 	if (*dev->dev_ops->get_module_info == NULL)
6237 		return -ENOTSUP;
6238 	ret = (*dev->dev_ops->get_module_info)(dev, modinfo);
6239 
6240 	rte_ethdev_trace_get_module_info(port_id, modinfo, ret);
6241 
6242 	return ret;
6243 }
6244 
6245 int
6246 rte_eth_dev_get_module_eeprom(uint16_t port_id,
6247 			      struct rte_dev_eeprom_info *info)
6248 {
6249 	struct rte_eth_dev *dev;
6250 	int ret;
6251 
6252 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6253 	dev = &rte_eth_devices[port_id];
6254 
6255 	if (info == NULL) {
6256 		RTE_ETHDEV_LOG(ERR,
6257 			"Cannot get ethdev port %u module EEPROM info to NULL\n",
6258 			port_id);
6259 		return -EINVAL;
6260 	}
6261 
6262 	if (info->data == NULL) {
6263 		RTE_ETHDEV_LOG(ERR,
6264 			"Cannot get ethdev port %u module EEPROM data to NULL\n",
6265 			port_id);
6266 		return -EINVAL;
6267 	}
6268 
6269 	if (info->length == 0) {
6270 		RTE_ETHDEV_LOG(ERR,
6271 			"Cannot get ethdev port %u module EEPROM to data with zero size\n",
6272 			port_id);
6273 		return -EINVAL;
6274 	}
6275 
6276 	if (*dev->dev_ops->get_module_eeprom == NULL)
6277 		return -ENOTSUP;
6278 	ret = (*dev->dev_ops->get_module_eeprom)(dev, info);
6279 
6280 	rte_ethdev_trace_get_module_eeprom(port_id, info, ret);
6281 
6282 	return ret;
6283 }
6284 
6285 int
6286 rte_eth_dev_get_dcb_info(uint16_t port_id,
6287 			     struct rte_eth_dcb_info *dcb_info)
6288 {
6289 	struct rte_eth_dev *dev;
6290 	int ret;
6291 
6292 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6293 	dev = &rte_eth_devices[port_id];
6294 
6295 	if (dcb_info == NULL) {
6296 		RTE_ETHDEV_LOG(ERR,
6297 			"Cannot get ethdev port %u DCB info to NULL\n",
6298 			port_id);
6299 		return -EINVAL;
6300 	}
6301 
6302 	memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
6303 
6304 	if (*dev->dev_ops->get_dcb_info == NULL)
6305 		return -ENOTSUP;
6306 	ret = eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
6307 
6308 	rte_ethdev_trace_get_dcb_info(port_id, dcb_info, ret);
6309 
6310 	return ret;
6311 }
6312 
6313 static void
6314 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
6315 		const struct rte_eth_desc_lim *desc_lim)
6316 {
6317 	if (desc_lim->nb_align != 0)
6318 		*nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
6319 
6320 	if (desc_lim->nb_max != 0)
6321 		*nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
6322 
6323 	*nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
6324 }
6325 
6326 int
6327 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
6328 				 uint16_t *nb_rx_desc,
6329 				 uint16_t *nb_tx_desc)
6330 {
6331 	struct rte_eth_dev_info dev_info;
6332 	int ret;
6333 
6334 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6335 
6336 	ret = rte_eth_dev_info_get(port_id, &dev_info);
6337 	if (ret != 0)
6338 		return ret;
6339 
6340 	if (nb_rx_desc != NULL)
6341 		eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
6342 
6343 	if (nb_tx_desc != NULL)
6344 		eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
6345 
6346 	rte_ethdev_trace_adjust_nb_rx_tx_desc(port_id);
6347 
6348 	return 0;
6349 }
6350 
6351 int
6352 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
6353 				   struct rte_eth_hairpin_cap *cap)
6354 {
6355 	struct rte_eth_dev *dev;
6356 	int ret;
6357 
6358 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6359 	dev = &rte_eth_devices[port_id];
6360 
6361 	if (cap == NULL) {
6362 		RTE_ETHDEV_LOG(ERR,
6363 			"Cannot get ethdev port %u hairpin capability to NULL\n",
6364 			port_id);
6365 		return -EINVAL;
6366 	}
6367 
6368 	if (*dev->dev_ops->hairpin_cap_get == NULL)
6369 		return -ENOTSUP;
6370 	memset(cap, 0, sizeof(*cap));
6371 	ret = eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
6372 
6373 	rte_ethdev_trace_hairpin_capability_get(port_id, cap, ret);
6374 
6375 	return ret;
6376 }
6377 
6378 int
6379 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
6380 {
6381 	struct rte_eth_dev *dev;
6382 	int ret;
6383 
6384 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6385 	dev = &rte_eth_devices[port_id];
6386 
6387 	if (pool == NULL) {
6388 		RTE_ETHDEV_LOG(ERR,
6389 			"Cannot test ethdev port %u mempool operation from NULL pool\n",
6390 			port_id);
6391 		return -EINVAL;
6392 	}
6393 
6394 	if (*dev->dev_ops->pool_ops_supported == NULL)
6395 		return 1; /* all pools are supported */
6396 
6397 	ret = (*dev->dev_ops->pool_ops_supported)(dev, pool);
6398 
6399 	rte_ethdev_trace_pool_ops_supported(port_id, pool, ret);
6400 
6401 	return ret;
6402 }
6403 
6404 int
6405 rte_eth_representor_info_get(uint16_t port_id,
6406 			     struct rte_eth_representor_info *info)
6407 {
6408 	struct rte_eth_dev *dev;
6409 	int ret;
6410 
6411 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6412 	dev = &rte_eth_devices[port_id];
6413 
6414 	if (*dev->dev_ops->representor_info_get == NULL)
6415 		return -ENOTSUP;
6416 	ret = eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
6417 
6418 	rte_eth_trace_representor_info_get(port_id, info, ret);
6419 
6420 	return ret;
6421 }
6422 
6423 int
6424 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
6425 {
6426 	struct rte_eth_dev *dev;
6427 	int ret;
6428 
6429 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6430 	dev = &rte_eth_devices[port_id];
6431 
6432 	if (dev->data->dev_configured != 0) {
6433 		RTE_ETHDEV_LOG(ERR,
6434 			"The port (ID=%"PRIu16") is already configured\n",
6435 			port_id);
6436 		return -EBUSY;
6437 	}
6438 
6439 	if (features == NULL) {
6440 		RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n");
6441 		return -EINVAL;
6442 	}
6443 
6444 	if (*dev->dev_ops->rx_metadata_negotiate == NULL)
6445 		return -ENOTSUP;
6446 	ret = eth_err(port_id,
6447 		      (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
6448 
6449 	rte_eth_trace_rx_metadata_negotiate(port_id, *features, ret);
6450 
6451 	return ret;
6452 }
6453 
6454 int
6455 rte_eth_ip_reassembly_capability_get(uint16_t port_id,
6456 		struct rte_eth_ip_reassembly_params *reassembly_capa)
6457 {
6458 	struct rte_eth_dev *dev;
6459 	int ret;
6460 
6461 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6462 	dev = &rte_eth_devices[port_id];
6463 
6464 	if (dev->data->dev_configured == 0) {
6465 		RTE_ETHDEV_LOG(ERR,
6466 			"Device with port_id=%u is not configured.\n"
6467 			"Cannot get IP reassembly capability\n",
6468 			port_id);
6469 		return -EINVAL;
6470 	}
6471 
6472 	if (reassembly_capa == NULL) {
6473 		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL");
6474 		return -EINVAL;
6475 	}
6476 
6477 	if (*dev->dev_ops->ip_reassembly_capability_get == NULL)
6478 		return -ENOTSUP;
6479 	memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params));
6480 
6481 	ret = eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get)
6482 					(dev, reassembly_capa));
6483 
6484 	rte_eth_trace_ip_reassembly_capability_get(port_id, reassembly_capa,
6485 						   ret);
6486 
6487 	return ret;
6488 }
6489 
6490 int
6491 rte_eth_ip_reassembly_conf_get(uint16_t port_id,
6492 		struct rte_eth_ip_reassembly_params *conf)
6493 {
6494 	struct rte_eth_dev *dev;
6495 	int ret;
6496 
6497 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6498 	dev = &rte_eth_devices[port_id];
6499 
6500 	if (dev->data->dev_configured == 0) {
6501 		RTE_ETHDEV_LOG(ERR,
6502 			"Device with port_id=%u is not configured.\n"
6503 			"Cannot get IP reassembly configuration\n",
6504 			port_id);
6505 		return -EINVAL;
6506 	}
6507 
6508 	if (conf == NULL) {
6509 		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL");
6510 		return -EINVAL;
6511 	}
6512 
6513 	if (*dev->dev_ops->ip_reassembly_conf_get == NULL)
6514 		return -ENOTSUP;
6515 	memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params));
6516 	ret = eth_err(port_id,
6517 		      (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf));
6518 
6519 	rte_eth_trace_ip_reassembly_conf_get(port_id, conf, ret);
6520 
6521 	return ret;
6522 }
6523 
6524 int
6525 rte_eth_ip_reassembly_conf_set(uint16_t port_id,
6526 		const struct rte_eth_ip_reassembly_params *conf)
6527 {
6528 	struct rte_eth_dev *dev;
6529 	int ret;
6530 
6531 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6532 	dev = &rte_eth_devices[port_id];
6533 
6534 	if (dev->data->dev_configured == 0) {
6535 		RTE_ETHDEV_LOG(ERR,
6536 			"Device with port_id=%u is not configured.\n"
6537 			"Cannot set IP reassembly configuration",
6538 			port_id);
6539 		return -EINVAL;
6540 	}
6541 
6542 	if (dev->data->dev_started != 0) {
6543 		RTE_ETHDEV_LOG(ERR,
6544 			"Device with port_id=%u started,\n"
6545 			"cannot configure IP reassembly params.\n",
6546 			port_id);
6547 		return -EINVAL;
6548 	}
6549 
6550 	if (conf == NULL) {
6551 		RTE_ETHDEV_LOG(ERR,
6552 				"Invalid IP reassembly configuration (NULL)\n");
6553 		return -EINVAL;
6554 	}
6555 
6556 	if (*dev->dev_ops->ip_reassembly_conf_set == NULL)
6557 		return -ENOTSUP;
6558 	ret = eth_err(port_id,
6559 		      (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf));
6560 
6561 	rte_eth_trace_ip_reassembly_conf_set(port_id, conf, ret);
6562 
6563 	return ret;
6564 }
6565 
6566 int
6567 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
6568 {
6569 	struct rte_eth_dev *dev;
6570 
6571 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6572 	dev = &rte_eth_devices[port_id];
6573 
6574 	if (file == NULL) {
6575 		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
6576 		return -EINVAL;
6577 	}
6578 
6579 	if (*dev->dev_ops->eth_dev_priv_dump == NULL)
6580 		return -ENOTSUP;
6581 	return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file));
6582 }
6583 
6584 int
6585 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
6586 			   uint16_t offset, uint16_t num, FILE *file)
6587 {
6588 	struct rte_eth_dev *dev;
6589 
6590 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6591 	dev = &rte_eth_devices[port_id];
6592 
6593 	if (queue_id >= dev->data->nb_rx_queues) {
6594 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
6595 		return -EINVAL;
6596 	}
6597 
6598 	if (file == NULL) {
6599 		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
6600 		return -EINVAL;
6601 	}
6602 
6603 	if (*dev->dev_ops->eth_rx_descriptor_dump == NULL)
6604 		return -ENOTSUP;
6605 
6606 	return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev,
6607 						queue_id, offset, num, file));
6608 }
6609 
6610 int
6611 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
6612 			   uint16_t offset, uint16_t num, FILE *file)
6613 {
6614 	struct rte_eth_dev *dev;
6615 
6616 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6617 	dev = &rte_eth_devices[port_id];
6618 
6619 	if (queue_id >= dev->data->nb_tx_queues) {
6620 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
6621 		return -EINVAL;
6622 	}
6623 
6624 	if (file == NULL) {
6625 		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
6626 		return -EINVAL;
6627 	}
6628 
6629 	if (*dev->dev_ops->eth_tx_descriptor_dump == NULL)
6630 		return -ENOTSUP;
6631 
6632 	return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev,
6633 						queue_id, offset, num, file));
6634 }
6635 
6636 int
6637 rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
6638 {
6639 	int i, j;
6640 	struct rte_eth_dev *dev;
6641 	const uint32_t *all_types;
6642 
6643 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6644 	dev = &rte_eth_devices[port_id];
6645 
6646 	if (ptypes == NULL && num > 0) {
6647 		RTE_ETHDEV_LOG(ERR,
6648 			"Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero\n",
6649 			port_id);
6650 		return -EINVAL;
6651 	}
6652 
6653 	if (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get == NULL)
6654 		return -ENOTSUP;
6655 	all_types = (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get)(dev);
6656 
6657 	if (all_types == NULL)
6658 		return 0;
6659 
6660 	for (i = 0, j = 0; all_types[i] != RTE_PTYPE_UNKNOWN; ++i) {
6661 		if (j < num) {
6662 			ptypes[j] = all_types[i];
6663 
6664 			rte_eth_trace_buffer_split_get_supported_hdr_ptypes(
6665 							port_id, j, ptypes[j]);
6666 		}
6667 		j++;
6668 	}
6669 
6670 	return j;
6671 }
6672 
6673 int rte_eth_dev_count_aggr_ports(uint16_t port_id)
6674 {
6675 	struct rte_eth_dev *dev;
6676 	int ret;
6677 
6678 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6679 	dev = &rte_eth_devices[port_id];
6680 
6681 	if (*dev->dev_ops->count_aggr_ports == NULL)
6682 		return 0;
6683 	ret = eth_err(port_id, (*dev->dev_ops->count_aggr_ports)(dev));
6684 
6685 	rte_eth_trace_count_aggr_ports(port_id, ret);
6686 
6687 	return ret;
6688 }
6689 
6690 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
6691 				     uint8_t affinity)
6692 {
6693 	struct rte_eth_dev *dev;
6694 	int aggr_ports;
6695 	int ret;
6696 
6697 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6698 	dev = &rte_eth_devices[port_id];
6699 
6700 	if (tx_queue_id >= dev->data->nb_tx_queues) {
6701 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
6702 		return -EINVAL;
6703 	}
6704 
6705 	if (*dev->dev_ops->map_aggr_tx_affinity == NULL)
6706 		return -ENOTSUP;
6707 
6708 	if (dev->data->dev_configured == 0) {
6709 		RTE_ETHDEV_LOG(ERR,
6710 			"Port %u must be configured before Tx affinity mapping\n",
6711 			port_id);
6712 		return -EINVAL;
6713 	}
6714 
6715 	if (dev->data->dev_started) {
6716 		RTE_ETHDEV_LOG(ERR,
6717 			"Port %u must be stopped to allow configuration\n",
6718 			port_id);
6719 		return -EBUSY;
6720 	}
6721 
6722 	aggr_ports = rte_eth_dev_count_aggr_ports(port_id);
6723 	if (aggr_ports == 0) {
6724 		RTE_ETHDEV_LOG(ERR,
6725 			"Port %u has no aggregated port\n",
6726 			port_id);
6727 		return -ENOTSUP;
6728 	}
6729 
6730 	if (affinity > aggr_ports) {
6731 		RTE_ETHDEV_LOG(ERR,
6732 			"Port %u map invalid affinity %u exceeds the maximum number %u\n",
6733 			port_id, affinity, aggr_ports);
6734 		return -EINVAL;
6735 	}
6736 
6737 	ret = eth_err(port_id, (*dev->dev_ops->map_aggr_tx_affinity)(dev,
6738 				tx_queue_id, affinity));
6739 
6740 	rte_eth_trace_map_aggr_tx_affinity(port_id, tx_queue_id, affinity, ret);
6741 
6742 	return ret;
6743 }
6744 
6745 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
6746