xref: /dpdk/lib/ethdev/rte_ethdev.c (revision 515cd4a488b6a0c6e40d20e6b10d8e89657dc23f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <errno.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <sys/queue.h>
14 
15 #include <bus_driver.h>
16 #include <rte_log.h>
17 #include <rte_interrupts.h>
18 #include <rte_memcpy.h>
19 #include <rte_common.h>
20 #include <rte_mempool.h>
21 #include <rte_malloc.h>
22 #include <rte_mbuf.h>
23 #include <rte_errno.h>
24 #include <rte_spinlock.h>
25 #include <rte_string_fns.h>
26 #include <rte_class.h>
27 #include <rte_ether.h>
28 #include <rte_telemetry.h>
29 
30 #include "rte_ethdev_trace.h"
31 #include "rte_ethdev.h"
32 #include "ethdev_driver.h"
33 #include "ethdev_profile.h"
34 #include "ethdev_private.h"
35 #include "sff_telemetry.h"
36 
37 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
38 
39 /* public fast-path API */
40 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
41 
42 /* spinlock for add/remove Rx callbacks */
43 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
44 
45 /* spinlock for add/remove Tx callbacks */
46 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
47 
48 /* store statistics names and its offset in stats structure  */
49 struct rte_eth_xstats_name_off {
50 	char name[RTE_ETH_XSTATS_NAME_SIZE];
51 	unsigned offset;
52 };
53 
54 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
55 	{"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
56 	{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
57 	{"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
58 	{"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
59 	{"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
60 	{"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
61 	{"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
62 	{"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
63 		rx_nombuf)},
64 };
65 
66 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
67 
68 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
69 	{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
70 	{"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
71 	{"errors", offsetof(struct rte_eth_stats, q_errors)},
72 };
73 
74 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
75 
76 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
77 	{"packets", offsetof(struct rte_eth_stats, q_opackets)},
78 	{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
79 };
80 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
81 
82 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
83 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
84 
85 static const struct {
86 	uint64_t offload;
87 	const char *name;
88 } eth_dev_rx_offload_names[] = {
89 	RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
90 	RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
91 	RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
92 	RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
93 	RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
94 	RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
95 	RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
96 	RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
97 	RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
98 	RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
99 	RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
100 	RTE_RX_OFFLOAD_BIT2STR(SCATTER),
101 	RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
102 	RTE_RX_OFFLOAD_BIT2STR(SECURITY),
103 	RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
104 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
105 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
106 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
107 	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
108 };
109 
110 #undef RTE_RX_OFFLOAD_BIT2STR
111 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
112 
113 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
114 	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
115 
116 static const struct {
117 	uint64_t offload;
118 	const char *name;
119 } eth_dev_tx_offload_names[] = {
120 	RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
121 	RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
122 	RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
123 	RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
124 	RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
125 	RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
126 	RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
127 	RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
128 	RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
129 	RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
130 	RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
131 	RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
132 	RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
133 	RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
134 	RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
135 	RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
136 	RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
137 	RTE_TX_OFFLOAD_BIT2STR(SECURITY),
138 	RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
139 	RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
140 	RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
141 	RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
142 };
143 
144 #undef RTE_TX_OFFLOAD_BIT2STR
145 
146 static const struct {
147 	uint64_t offload;
148 	const char *name;
149 } rte_eth_dev_capa_names[] = {
150 	{RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"},
151 	{RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"},
152 	{RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
153 	{RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"},
154 	{RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"},
155 };
156 
157 enum {
158 	STAT_QMAP_TX = 0,
159 	STAT_QMAP_RX
160 };
161 
162 int
163 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
164 {
165 	int ret;
166 	struct rte_devargs devargs;
167 	const char *bus_param_key;
168 	char *bus_str = NULL;
169 	char *cls_str = NULL;
170 	int str_size;
171 
172 	if (iter == NULL) {
173 		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
174 		return -EINVAL;
175 	}
176 
177 	if (devargs_str == NULL) {
178 		RTE_ETHDEV_LOG(ERR,
179 			"Cannot initialize iterator from NULL device description string\n");
180 		return -EINVAL;
181 	}
182 
183 	memset(iter, 0, sizeof(*iter));
184 	memset(&devargs, 0, sizeof(devargs));
185 
186 	/*
187 	 * The devargs string may use various syntaxes:
188 	 *   - 0000:08:00.0,representor=[1-3]
189 	 *   - pci:0000:06:00.0,representor=[0,5]
190 	 *   - class=eth,mac=00:11:22:33:44:55
191 	 *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
192 	 */
193 
194 	/*
195 	 * Handle pure class filter (i.e. without any bus-level argument),
196 	 * from future new syntax.
197 	 * rte_devargs_parse() is not yet supporting the new syntax,
198 	 * that's why this simple case is temporarily parsed here.
199 	 */
200 #define iter_anybus_str "class=eth,"
201 	if (strncmp(devargs_str, iter_anybus_str,
202 			strlen(iter_anybus_str)) == 0) {
203 		iter->cls_str = devargs_str + strlen(iter_anybus_str);
204 		goto end;
205 	}
206 
207 	/* Split bus, device and parameters. */
208 	ret = rte_devargs_parse(&devargs, devargs_str);
209 	if (ret != 0)
210 		goto error;
211 
212 	/*
213 	 * Assume parameters of old syntax can match only at ethdev level.
214 	 * Extra parameters will be ignored, thanks to "+" prefix.
215 	 */
216 	str_size = strlen(devargs.args) + 2;
217 	cls_str = malloc(str_size);
218 	if (cls_str == NULL) {
219 		ret = -ENOMEM;
220 		goto error;
221 	}
222 	ret = snprintf(cls_str, str_size, "+%s", devargs.args);
223 	if (ret != str_size - 1) {
224 		ret = -EINVAL;
225 		goto error;
226 	}
227 	iter->cls_str = cls_str;
228 
229 	iter->bus = devargs.bus;
230 	if (iter->bus->dev_iterate == NULL) {
231 		ret = -ENOTSUP;
232 		goto error;
233 	}
234 
235 	/* Convert bus args to new syntax for use with new API dev_iterate. */
236 	if ((strcmp(iter->bus->name, "vdev") == 0) ||
237 		(strcmp(iter->bus->name, "fslmc") == 0) ||
238 		(strcmp(iter->bus->name, "dpaa_bus") == 0)) {
239 		bus_param_key = "name";
240 	} else if (strcmp(iter->bus->name, "pci") == 0) {
241 		bus_param_key = "addr";
242 	} else {
243 		ret = -ENOTSUP;
244 		goto error;
245 	}
246 	str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
247 	bus_str = malloc(str_size);
248 	if (bus_str == NULL) {
249 		ret = -ENOMEM;
250 		goto error;
251 	}
252 	ret = snprintf(bus_str, str_size, "%s=%s",
253 			bus_param_key, devargs.name);
254 	if (ret != str_size - 1) {
255 		ret = -EINVAL;
256 		goto error;
257 	}
258 	iter->bus_str = bus_str;
259 
260 end:
261 	iter->cls = rte_class_find_by_name("eth");
262 	rte_devargs_reset(&devargs);
263 	return 0;
264 
265 error:
266 	if (ret == -ENOTSUP)
267 		RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
268 				iter->bus->name);
269 	rte_devargs_reset(&devargs);
270 	free(bus_str);
271 	free(cls_str);
272 	return ret;
273 }
274 
275 uint16_t
276 rte_eth_iterator_next(struct rte_dev_iterator *iter)
277 {
278 	if (iter == NULL) {
279 		RTE_ETHDEV_LOG(ERR,
280 			"Cannot get next device from NULL iterator\n");
281 		return RTE_MAX_ETHPORTS;
282 	}
283 
284 	if (iter->cls == NULL) /* invalid ethdev iterator */
285 		return RTE_MAX_ETHPORTS;
286 
287 	do { /* loop to try all matching rte_device */
288 		/* If not pure ethdev filter and */
289 		if (iter->bus != NULL &&
290 				/* not in middle of rte_eth_dev iteration, */
291 				iter->class_device == NULL) {
292 			/* get next rte_device to try. */
293 			iter->device = iter->bus->dev_iterate(
294 					iter->device, iter->bus_str, iter);
295 			if (iter->device == NULL)
296 				break; /* no more rte_device candidate */
297 		}
298 		/* A device is matching bus part, need to check ethdev part. */
299 		iter->class_device = iter->cls->dev_iterate(
300 				iter->class_device, iter->cls_str, iter);
301 		if (iter->class_device != NULL)
302 			return eth_dev_to_id(iter->class_device); /* match */
303 	} while (iter->bus != NULL); /* need to try next rte_device */
304 
305 	/* No more ethdev port to iterate. */
306 	rte_eth_iterator_cleanup(iter);
307 	return RTE_MAX_ETHPORTS;
308 }
309 
310 void
311 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
312 {
313 	if (iter == NULL) {
314 		RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
315 		return;
316 	}
317 
318 	if (iter->bus_str == NULL)
319 		return; /* nothing to free in pure class filter */
320 	free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
321 	free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
322 	memset(iter, 0, sizeof(*iter));
323 }
324 
325 uint16_t
326 rte_eth_find_next(uint16_t port_id)
327 {
328 	while (port_id < RTE_MAX_ETHPORTS &&
329 			rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
330 		port_id++;
331 
332 	if (port_id >= RTE_MAX_ETHPORTS)
333 		return RTE_MAX_ETHPORTS;
334 
335 	return port_id;
336 }
337 
338 /*
339  * Macro to iterate over all valid ports for internal usage.
340  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
341  */
342 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
343 	for (port_id = rte_eth_find_next(0); \
344 	     port_id < RTE_MAX_ETHPORTS; \
345 	     port_id = rte_eth_find_next(port_id + 1))
346 
347 uint16_t
348 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
349 {
350 	port_id = rte_eth_find_next(port_id);
351 	while (port_id < RTE_MAX_ETHPORTS &&
352 			rte_eth_devices[port_id].device != parent)
353 		port_id = rte_eth_find_next(port_id + 1);
354 
355 	return port_id;
356 }
357 
358 uint16_t
359 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
360 {
361 	RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
362 	return rte_eth_find_next_of(port_id,
363 			rte_eth_devices[ref_port_id].device);
364 }
365 
366 static bool
367 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
368 {
369 	return ethdev->data->name[0] != '\0';
370 }
371 
372 int
373 rte_eth_dev_is_valid_port(uint16_t port_id)
374 {
375 	if (port_id >= RTE_MAX_ETHPORTS ||
376 	    (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
377 		return 0;
378 	else
379 		return 1;
380 }
381 
382 static int
383 eth_is_valid_owner_id(uint64_t owner_id)
384 {
385 	if (owner_id == RTE_ETH_DEV_NO_OWNER ||
386 	    eth_dev_shared_data->next_owner_id <= owner_id)
387 		return 0;
388 	return 1;
389 }
390 
391 uint64_t
392 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
393 {
394 	port_id = rte_eth_find_next(port_id);
395 	while (port_id < RTE_MAX_ETHPORTS &&
396 			rte_eth_devices[port_id].data->owner.id != owner_id)
397 		port_id = rte_eth_find_next(port_id + 1);
398 
399 	return port_id;
400 }
401 
402 int
403 rte_eth_dev_owner_new(uint64_t *owner_id)
404 {
405 	if (owner_id == NULL) {
406 		RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
407 		return -EINVAL;
408 	}
409 
410 	eth_dev_shared_data_prepare();
411 
412 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
413 
414 	*owner_id = eth_dev_shared_data->next_owner_id++;
415 
416 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
417 	return 0;
418 }
419 
420 static int
421 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
422 		       const struct rte_eth_dev_owner *new_owner)
423 {
424 	struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
425 	struct rte_eth_dev_owner *port_owner;
426 
427 	if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
428 		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
429 			port_id);
430 		return -ENODEV;
431 	}
432 
433 	if (new_owner == NULL) {
434 		RTE_ETHDEV_LOG(ERR,
435 			"Cannot set ethdev port %u owner from NULL owner\n",
436 			port_id);
437 		return -EINVAL;
438 	}
439 
440 	if (!eth_is_valid_owner_id(new_owner->id) &&
441 	    !eth_is_valid_owner_id(old_owner_id)) {
442 		RTE_ETHDEV_LOG(ERR,
443 			"Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
444 		       old_owner_id, new_owner->id);
445 		return -EINVAL;
446 	}
447 
448 	port_owner = &rte_eth_devices[port_id].data->owner;
449 	if (port_owner->id != old_owner_id) {
450 		RTE_ETHDEV_LOG(ERR,
451 			"Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
452 			port_id, port_owner->name, port_owner->id);
453 		return -EPERM;
454 	}
455 
456 	/* can not truncate (same structure) */
457 	strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
458 
459 	port_owner->id = new_owner->id;
460 
461 	RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
462 		port_id, new_owner->name, new_owner->id);
463 
464 	return 0;
465 }
466 
467 int
468 rte_eth_dev_owner_set(const uint16_t port_id,
469 		      const struct rte_eth_dev_owner *owner)
470 {
471 	int ret;
472 
473 	eth_dev_shared_data_prepare();
474 
475 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
476 
477 	ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
478 
479 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
480 	return ret;
481 }
482 
483 int
484 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
485 {
486 	const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
487 			{.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
488 	int ret;
489 
490 	eth_dev_shared_data_prepare();
491 
492 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
493 
494 	ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
495 
496 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
497 	return ret;
498 }
499 
500 int
501 rte_eth_dev_owner_delete(const uint64_t owner_id)
502 {
503 	uint16_t port_id;
504 	int ret = 0;
505 
506 	eth_dev_shared_data_prepare();
507 
508 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
509 
510 	if (eth_is_valid_owner_id(owner_id)) {
511 		for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
512 			struct rte_eth_dev_data *data =
513 				rte_eth_devices[port_id].data;
514 			if (data != NULL && data->owner.id == owner_id)
515 				memset(&data->owner, 0,
516 				       sizeof(struct rte_eth_dev_owner));
517 		}
518 		RTE_ETHDEV_LOG(NOTICE,
519 			"All port owners owned by %016"PRIx64" identifier have removed\n",
520 			owner_id);
521 	} else {
522 		RTE_ETHDEV_LOG(ERR,
523 			       "Invalid owner ID=%016"PRIx64"\n",
524 			       owner_id);
525 		ret = -EINVAL;
526 	}
527 
528 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
529 
530 	return ret;
531 }
532 
533 int
534 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
535 {
536 	struct rte_eth_dev *ethdev;
537 
538 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
539 	ethdev = &rte_eth_devices[port_id];
540 
541 	if (!eth_dev_is_allocated(ethdev)) {
542 		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
543 			port_id);
544 		return -ENODEV;
545 	}
546 
547 	if (owner == NULL) {
548 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
549 			port_id);
550 		return -EINVAL;
551 	}
552 
553 	eth_dev_shared_data_prepare();
554 
555 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
556 	rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
557 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
558 
559 	return 0;
560 }
561 
562 int
563 rte_eth_dev_socket_id(uint16_t port_id)
564 {
565 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
566 	return rte_eth_devices[port_id].data->numa_node;
567 }
568 
569 void *
570 rte_eth_dev_get_sec_ctx(uint16_t port_id)
571 {
572 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
573 	return rte_eth_devices[port_id].security_ctx;
574 }
575 
576 uint16_t
577 rte_eth_dev_count_avail(void)
578 {
579 	uint16_t p;
580 	uint16_t count;
581 
582 	count = 0;
583 
584 	RTE_ETH_FOREACH_DEV(p)
585 		count++;
586 
587 	return count;
588 }
589 
590 uint16_t
591 rte_eth_dev_count_total(void)
592 {
593 	uint16_t port, count = 0;
594 
595 	RTE_ETH_FOREACH_VALID_DEV(port)
596 		count++;
597 
598 	return count;
599 }
600 
601 int
602 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
603 {
604 	char *tmp;
605 
606 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
607 
608 	if (name == NULL) {
609 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
610 			port_id);
611 		return -EINVAL;
612 	}
613 
614 	/* shouldn't check 'rte_eth_devices[i].data',
615 	 * because it might be overwritten by VDEV PMD */
616 	tmp = eth_dev_shared_data->data[port_id].name;
617 	strcpy(name, tmp);
618 	return 0;
619 }
620 
621 int
622 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
623 {
624 	uint16_t pid;
625 
626 	if (name == NULL) {
627 		RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
628 		return -EINVAL;
629 	}
630 
631 	if (port_id == NULL) {
632 		RTE_ETHDEV_LOG(ERR,
633 			"Cannot get port ID to NULL for %s\n", name);
634 		return -EINVAL;
635 	}
636 
637 	RTE_ETH_FOREACH_VALID_DEV(pid)
638 		if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
639 			*port_id = pid;
640 			return 0;
641 		}
642 
643 	return -ENODEV;
644 }
645 
646 static int
647 eth_err(uint16_t port_id, int ret)
648 {
649 	if (ret == 0)
650 		return 0;
651 	if (rte_eth_dev_is_removed(port_id))
652 		return -EIO;
653 	return ret;
654 }
655 
656 static int
657 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
658 {
659 	uint16_t port_id;
660 
661 	if (rx_queue_id >= dev->data->nb_rx_queues) {
662 		port_id = dev->data->port_id;
663 		RTE_ETHDEV_LOG(ERR,
664 			       "Invalid Rx queue_id=%u of device with port_id=%u\n",
665 			       rx_queue_id, port_id);
666 		return -EINVAL;
667 	}
668 
669 	if (dev->data->rx_queues[rx_queue_id] == NULL) {
670 		port_id = dev->data->port_id;
671 		RTE_ETHDEV_LOG(ERR,
672 			       "Queue %u of device with port_id=%u has not been setup\n",
673 			       rx_queue_id, port_id);
674 		return -EINVAL;
675 	}
676 
677 	return 0;
678 }
679 
680 static int
681 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
682 {
683 	uint16_t port_id;
684 
685 	if (tx_queue_id >= dev->data->nb_tx_queues) {
686 		port_id = dev->data->port_id;
687 		RTE_ETHDEV_LOG(ERR,
688 			       "Invalid Tx queue_id=%u of device with port_id=%u\n",
689 			       tx_queue_id, port_id);
690 		return -EINVAL;
691 	}
692 
693 	if (dev->data->tx_queues[tx_queue_id] == NULL) {
694 		port_id = dev->data->port_id;
695 		RTE_ETHDEV_LOG(ERR,
696 			       "Queue %u of device with port_id=%u has not been setup\n",
697 			       tx_queue_id, port_id);
698 		return -EINVAL;
699 	}
700 
701 	return 0;
702 }
703 
704 int
705 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
706 {
707 	struct rte_eth_dev *dev;
708 	int ret;
709 
710 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
711 	dev = &rte_eth_devices[port_id];
712 
713 	if (!dev->data->dev_started) {
714 		RTE_ETHDEV_LOG(ERR,
715 			"Port %u must be started before start any queue\n",
716 			port_id);
717 		return -EINVAL;
718 	}
719 
720 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
721 	if (ret != 0)
722 		return ret;
723 
724 	if (*dev->dev_ops->rx_queue_start == NULL)
725 		return -ENOTSUP;
726 
727 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
728 		RTE_ETHDEV_LOG(INFO,
729 			"Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
730 			rx_queue_id, port_id);
731 		return -EINVAL;
732 	}
733 
734 	if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
735 		RTE_ETHDEV_LOG(INFO,
736 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
737 			rx_queue_id, port_id);
738 		return 0;
739 	}
740 
741 	return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
742 }
743 
744 int
745 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
746 {
747 	struct rte_eth_dev *dev;
748 	int ret;
749 
750 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
751 	dev = &rte_eth_devices[port_id];
752 
753 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
754 	if (ret != 0)
755 		return ret;
756 
757 	if (*dev->dev_ops->rx_queue_stop == NULL)
758 		return -ENOTSUP;
759 
760 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
761 		RTE_ETHDEV_LOG(INFO,
762 			"Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
763 			rx_queue_id, port_id);
764 		return -EINVAL;
765 	}
766 
767 	if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
768 		RTE_ETHDEV_LOG(INFO,
769 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
770 			rx_queue_id, port_id);
771 		return 0;
772 	}
773 
774 	return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
775 }
776 
777 int
778 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
779 {
780 	struct rte_eth_dev *dev;
781 	int ret;
782 
783 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
784 	dev = &rte_eth_devices[port_id];
785 
786 	if (!dev->data->dev_started) {
787 		RTE_ETHDEV_LOG(ERR,
788 			"Port %u must be started before start any queue\n",
789 			port_id);
790 		return -EINVAL;
791 	}
792 
793 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
794 	if (ret != 0)
795 		return ret;
796 
797 	if (*dev->dev_ops->tx_queue_start == NULL)
798 		return -ENOTSUP;
799 
800 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
801 		RTE_ETHDEV_LOG(INFO,
802 			"Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
803 			tx_queue_id, port_id);
804 		return -EINVAL;
805 	}
806 
807 	if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
808 		RTE_ETHDEV_LOG(INFO,
809 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
810 			tx_queue_id, port_id);
811 		return 0;
812 	}
813 
814 	return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
815 }
816 
817 int
818 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
819 {
820 	struct rte_eth_dev *dev;
821 	int ret;
822 
823 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
824 	dev = &rte_eth_devices[port_id];
825 
826 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
827 	if (ret != 0)
828 		return ret;
829 
830 	if (*dev->dev_ops->tx_queue_stop == NULL)
831 		return -ENOTSUP;
832 
833 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
834 		RTE_ETHDEV_LOG(INFO,
835 			"Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
836 			tx_queue_id, port_id);
837 		return -EINVAL;
838 	}
839 
840 	if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
841 		RTE_ETHDEV_LOG(INFO,
842 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
843 			tx_queue_id, port_id);
844 		return 0;
845 	}
846 
847 	return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
848 }
849 
850 uint32_t
851 rte_eth_speed_bitflag(uint32_t speed, int duplex)
852 {
853 	switch (speed) {
854 	case RTE_ETH_SPEED_NUM_10M:
855 		return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
856 	case RTE_ETH_SPEED_NUM_100M:
857 		return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
858 	case RTE_ETH_SPEED_NUM_1G:
859 		return RTE_ETH_LINK_SPEED_1G;
860 	case RTE_ETH_SPEED_NUM_2_5G:
861 		return RTE_ETH_LINK_SPEED_2_5G;
862 	case RTE_ETH_SPEED_NUM_5G:
863 		return RTE_ETH_LINK_SPEED_5G;
864 	case RTE_ETH_SPEED_NUM_10G:
865 		return RTE_ETH_LINK_SPEED_10G;
866 	case RTE_ETH_SPEED_NUM_20G:
867 		return RTE_ETH_LINK_SPEED_20G;
868 	case RTE_ETH_SPEED_NUM_25G:
869 		return RTE_ETH_LINK_SPEED_25G;
870 	case RTE_ETH_SPEED_NUM_40G:
871 		return RTE_ETH_LINK_SPEED_40G;
872 	case RTE_ETH_SPEED_NUM_50G:
873 		return RTE_ETH_LINK_SPEED_50G;
874 	case RTE_ETH_SPEED_NUM_56G:
875 		return RTE_ETH_LINK_SPEED_56G;
876 	case RTE_ETH_SPEED_NUM_100G:
877 		return RTE_ETH_LINK_SPEED_100G;
878 	case RTE_ETH_SPEED_NUM_200G:
879 		return RTE_ETH_LINK_SPEED_200G;
880 	default:
881 		return 0;
882 	}
883 }
884 
885 const char *
886 rte_eth_dev_rx_offload_name(uint64_t offload)
887 {
888 	const char *name = "UNKNOWN";
889 	unsigned int i;
890 
891 	for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
892 		if (offload == eth_dev_rx_offload_names[i].offload) {
893 			name = eth_dev_rx_offload_names[i].name;
894 			break;
895 		}
896 	}
897 
898 	return name;
899 }
900 
901 const char *
902 rte_eth_dev_tx_offload_name(uint64_t offload)
903 {
904 	const char *name = "UNKNOWN";
905 	unsigned int i;
906 
907 	for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
908 		if (offload == eth_dev_tx_offload_names[i].offload) {
909 			name = eth_dev_tx_offload_names[i].name;
910 			break;
911 		}
912 	}
913 
914 	return name;
915 }
916 
917 const char *
918 rte_eth_dev_capability_name(uint64_t capability)
919 {
920 	const char *name = "UNKNOWN";
921 	unsigned int i;
922 
923 	for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) {
924 		if (capability == rte_eth_dev_capa_names[i].offload) {
925 			name = rte_eth_dev_capa_names[i].name;
926 			break;
927 		}
928 	}
929 
930 	return name;
931 }
932 
933 static inline int
934 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
935 		   uint32_t max_rx_pkt_len, uint32_t dev_info_size)
936 {
937 	int ret = 0;
938 
939 	if (dev_info_size == 0) {
940 		if (config_size != max_rx_pkt_len) {
941 			RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
942 				       " %u != %u is not allowed\n",
943 				       port_id, config_size, max_rx_pkt_len);
944 			ret = -EINVAL;
945 		}
946 	} else if (config_size > dev_info_size) {
947 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
948 			       "> max allowed value %u\n", port_id, config_size,
949 			       dev_info_size);
950 		ret = -EINVAL;
951 	} else if (config_size < RTE_ETHER_MIN_LEN) {
952 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
953 			       "< min allowed value %u\n", port_id, config_size,
954 			       (unsigned int)RTE_ETHER_MIN_LEN);
955 		ret = -EINVAL;
956 	}
957 	return ret;
958 }
959 
960 /*
961  * Validate offloads that are requested through rte_eth_dev_configure against
962  * the offloads successfully set by the Ethernet device.
963  *
964  * @param port_id
965  *   The port identifier of the Ethernet device.
966  * @param req_offloads
967  *   The offloads that have been requested through `rte_eth_dev_configure`.
968  * @param set_offloads
969  *   The offloads successfully set by the Ethernet device.
970  * @param offload_type
971  *   The offload type i.e. Rx/Tx string.
972  * @param offload_name
973  *   The function that prints the offload name.
974  * @return
975  *   - (0) if validation successful.
976  *   - (-EINVAL) if requested offload has been silently disabled.
977  *
978  */
979 static int
980 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
981 		  uint64_t set_offloads, const char *offload_type,
982 		  const char *(*offload_name)(uint64_t))
983 {
984 	uint64_t offloads_diff = req_offloads ^ set_offloads;
985 	uint64_t offload;
986 	int ret = 0;
987 
988 	while (offloads_diff != 0) {
989 		/* Check if any offload is requested but not enabled. */
990 		offload = RTE_BIT64(__builtin_ctzll(offloads_diff));
991 		if (offload & req_offloads) {
992 			RTE_ETHDEV_LOG(ERR,
993 				"Port %u failed to enable %s offload %s\n",
994 				port_id, offload_type, offload_name(offload));
995 			ret = -EINVAL;
996 		}
997 
998 		/* Check if offload couldn't be disabled. */
999 		if (offload & set_offloads) {
1000 			RTE_ETHDEV_LOG(DEBUG,
1001 				"Port %u %s offload %s is not requested but enabled\n",
1002 				port_id, offload_type, offload_name(offload));
1003 		}
1004 
1005 		offloads_diff &= ~offload;
1006 	}
1007 
1008 	return ret;
1009 }
1010 
1011 static uint32_t
1012 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1013 {
1014 	uint32_t overhead_len;
1015 
1016 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1017 		overhead_len = max_rx_pktlen - max_mtu;
1018 	else
1019 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1020 
1021 	return overhead_len;
1022 }
1023 
1024 /* rte_eth_dev_info_get() should be called prior to this function */
1025 static int
1026 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
1027 		uint16_t mtu)
1028 {
1029 	uint32_t overhead_len;
1030 	uint32_t frame_size;
1031 
1032 	if (mtu < dev_info->min_mtu) {
1033 		RTE_ETHDEV_LOG(ERR,
1034 			"MTU (%u) < device min MTU (%u) for port_id %u\n",
1035 			mtu, dev_info->min_mtu, port_id);
1036 		return -EINVAL;
1037 	}
1038 	if (mtu > dev_info->max_mtu) {
1039 		RTE_ETHDEV_LOG(ERR,
1040 			"MTU (%u) > device max MTU (%u) for port_id %u\n",
1041 			mtu, dev_info->max_mtu, port_id);
1042 		return -EINVAL;
1043 	}
1044 
1045 	overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1046 			dev_info->max_mtu);
1047 	frame_size = mtu + overhead_len;
1048 	if (frame_size < RTE_ETHER_MIN_LEN) {
1049 		RTE_ETHDEV_LOG(ERR,
1050 			"Frame size (%u) < min frame size (%u) for port_id %u\n",
1051 			frame_size, RTE_ETHER_MIN_LEN, port_id);
1052 		return -EINVAL;
1053 	}
1054 
1055 	if (frame_size > dev_info->max_rx_pktlen) {
1056 		RTE_ETHDEV_LOG(ERR,
1057 			"Frame size (%u) > device max frame size (%u) for port_id %u\n",
1058 			frame_size, dev_info->max_rx_pktlen, port_id);
1059 		return -EINVAL;
1060 	}
1061 
1062 	return 0;
1063 }
1064 
1065 int
1066 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1067 		      const struct rte_eth_conf *dev_conf)
1068 {
1069 	struct rte_eth_dev *dev;
1070 	struct rte_eth_dev_info dev_info;
1071 	struct rte_eth_conf orig_conf;
1072 	int diag;
1073 	int ret;
1074 	uint16_t old_mtu;
1075 
1076 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1077 	dev = &rte_eth_devices[port_id];
1078 
1079 	if (dev_conf == NULL) {
1080 		RTE_ETHDEV_LOG(ERR,
1081 			"Cannot configure ethdev port %u from NULL config\n",
1082 			port_id);
1083 		return -EINVAL;
1084 	}
1085 
1086 	if (*dev->dev_ops->dev_configure == NULL)
1087 		return -ENOTSUP;
1088 
1089 	if (dev->data->dev_started) {
1090 		RTE_ETHDEV_LOG(ERR,
1091 			"Port %u must be stopped to allow configuration\n",
1092 			port_id);
1093 		return -EBUSY;
1094 	}
1095 
1096 	/*
1097 	 * Ensure that "dev_configured" is always 0 each time prepare to do
1098 	 * dev_configure() to avoid any non-anticipated behaviour.
1099 	 * And set to 1 when dev_configure() is executed successfully.
1100 	 */
1101 	dev->data->dev_configured = 0;
1102 
1103 	 /* Store original config, as rollback required on failure */
1104 	memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1105 
1106 	/*
1107 	 * Copy the dev_conf parameter into the dev structure.
1108 	 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1109 	 */
1110 	if (dev_conf != &dev->data->dev_conf)
1111 		memcpy(&dev->data->dev_conf, dev_conf,
1112 		       sizeof(dev->data->dev_conf));
1113 
1114 	/* Backup mtu for rollback */
1115 	old_mtu = dev->data->mtu;
1116 
1117 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1118 	if (ret != 0)
1119 		goto rollback;
1120 
1121 	/* If number of queues specified by application for both Rx and Tx is
1122 	 * zero, use driver preferred values. This cannot be done individually
1123 	 * as it is valid for either Tx or Rx (but not both) to be zero.
1124 	 * If driver does not provide any preferred valued, fall back on
1125 	 * EAL defaults.
1126 	 */
1127 	if (nb_rx_q == 0 && nb_tx_q == 0) {
1128 		nb_rx_q = dev_info.default_rxportconf.nb_queues;
1129 		if (nb_rx_q == 0)
1130 			nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1131 		nb_tx_q = dev_info.default_txportconf.nb_queues;
1132 		if (nb_tx_q == 0)
1133 			nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1134 	}
1135 
1136 	if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1137 		RTE_ETHDEV_LOG(ERR,
1138 			"Number of Rx queues requested (%u) is greater than max supported(%d)\n",
1139 			nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1140 		ret = -EINVAL;
1141 		goto rollback;
1142 	}
1143 
1144 	if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1145 		RTE_ETHDEV_LOG(ERR,
1146 			"Number of Tx queues requested (%u) is greater than max supported(%d)\n",
1147 			nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1148 		ret = -EINVAL;
1149 		goto rollback;
1150 	}
1151 
1152 	/*
1153 	 * Check that the numbers of Rx and Tx queues are not greater
1154 	 * than the maximum number of Rx and Tx queues supported by the
1155 	 * configured device.
1156 	 */
1157 	if (nb_rx_q > dev_info.max_rx_queues) {
1158 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1159 			port_id, nb_rx_q, dev_info.max_rx_queues);
1160 		ret = -EINVAL;
1161 		goto rollback;
1162 	}
1163 
1164 	if (nb_tx_q > dev_info.max_tx_queues) {
1165 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1166 			port_id, nb_tx_q, dev_info.max_tx_queues);
1167 		ret = -EINVAL;
1168 		goto rollback;
1169 	}
1170 
1171 	/* Check that the device supports requested interrupts */
1172 	if ((dev_conf->intr_conf.lsc == 1) &&
1173 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1174 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1175 			dev->device->driver->name);
1176 		ret = -EINVAL;
1177 		goto rollback;
1178 	}
1179 	if ((dev_conf->intr_conf.rmv == 1) &&
1180 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1181 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1182 			dev->device->driver->name);
1183 		ret = -EINVAL;
1184 		goto rollback;
1185 	}
1186 
1187 	if (dev_conf->rxmode.mtu == 0)
1188 		dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
1189 
1190 	ret = eth_dev_validate_mtu(port_id, &dev_info,
1191 			dev->data->dev_conf.rxmode.mtu);
1192 	if (ret != 0)
1193 		goto rollback;
1194 
1195 	dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
1196 
1197 	/*
1198 	 * If LRO is enabled, check that the maximum aggregated packet
1199 	 * size is supported by the configured device.
1200 	 */
1201 	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1202 		uint32_t max_rx_pktlen;
1203 		uint32_t overhead_len;
1204 
1205 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1206 				dev_info.max_mtu);
1207 		max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
1208 		if (dev_conf->rxmode.max_lro_pkt_size == 0)
1209 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1210 		ret = eth_dev_check_lro_pkt_size(port_id,
1211 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
1212 				max_rx_pktlen,
1213 				dev_info.max_lro_pkt_size);
1214 		if (ret != 0)
1215 			goto rollback;
1216 	}
1217 
1218 	/* Any requested offloading must be within its device capabilities */
1219 	if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1220 	     dev_conf->rxmode.offloads) {
1221 		RTE_ETHDEV_LOG(ERR,
1222 			"Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1223 			"capabilities 0x%"PRIx64" in %s()\n",
1224 			port_id, dev_conf->rxmode.offloads,
1225 			dev_info.rx_offload_capa,
1226 			__func__);
1227 		ret = -EINVAL;
1228 		goto rollback;
1229 	}
1230 	if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1231 	     dev_conf->txmode.offloads) {
1232 		RTE_ETHDEV_LOG(ERR,
1233 			"Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1234 			"capabilities 0x%"PRIx64" in %s()\n",
1235 			port_id, dev_conf->txmode.offloads,
1236 			dev_info.tx_offload_capa,
1237 			__func__);
1238 		ret = -EINVAL;
1239 		goto rollback;
1240 	}
1241 
1242 	dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1243 		rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1244 
1245 	/* Check that device supports requested rss hash functions. */
1246 	if ((dev_info.flow_type_rss_offloads |
1247 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1248 	    dev_info.flow_type_rss_offloads) {
1249 		RTE_ETHDEV_LOG(ERR,
1250 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1251 			port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1252 			dev_info.flow_type_rss_offloads);
1253 		ret = -EINVAL;
1254 		goto rollback;
1255 	}
1256 
1257 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1258 	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
1259 	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
1260 		RTE_ETHDEV_LOG(ERR,
1261 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1262 			port_id,
1263 			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
1264 		ret = -EINVAL;
1265 		goto rollback;
1266 	}
1267 
1268 	/*
1269 	 * Setup new number of Rx/Tx queues and reconfigure device.
1270 	 */
1271 	diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1272 	if (diag != 0) {
1273 		RTE_ETHDEV_LOG(ERR,
1274 			"Port%u eth_dev_rx_queue_config = %d\n",
1275 			port_id, diag);
1276 		ret = diag;
1277 		goto rollback;
1278 	}
1279 
1280 	diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1281 	if (diag != 0) {
1282 		RTE_ETHDEV_LOG(ERR,
1283 			"Port%u eth_dev_tx_queue_config = %d\n",
1284 			port_id, diag);
1285 		eth_dev_rx_queue_config(dev, 0);
1286 		ret = diag;
1287 		goto rollback;
1288 	}
1289 
1290 	diag = (*dev->dev_ops->dev_configure)(dev);
1291 	if (diag != 0) {
1292 		RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1293 			port_id, diag);
1294 		ret = eth_err(port_id, diag);
1295 		goto reset_queues;
1296 	}
1297 
1298 	/* Initialize Rx profiling if enabled at compilation time. */
1299 	diag = __rte_eth_dev_profile_init(port_id, dev);
1300 	if (diag != 0) {
1301 		RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1302 			port_id, diag);
1303 		ret = eth_err(port_id, diag);
1304 		goto reset_queues;
1305 	}
1306 
1307 	/* Validate Rx offloads. */
1308 	diag = eth_dev_validate_offloads(port_id,
1309 			dev_conf->rxmode.offloads,
1310 			dev->data->dev_conf.rxmode.offloads, "Rx",
1311 			rte_eth_dev_rx_offload_name);
1312 	if (diag != 0) {
1313 		ret = diag;
1314 		goto reset_queues;
1315 	}
1316 
1317 	/* Validate Tx offloads. */
1318 	diag = eth_dev_validate_offloads(port_id,
1319 			dev_conf->txmode.offloads,
1320 			dev->data->dev_conf.txmode.offloads, "Tx",
1321 			rte_eth_dev_tx_offload_name);
1322 	if (diag != 0) {
1323 		ret = diag;
1324 		goto reset_queues;
1325 	}
1326 
1327 	dev->data->dev_configured = 1;
1328 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1329 	return 0;
1330 reset_queues:
1331 	eth_dev_rx_queue_config(dev, 0);
1332 	eth_dev_tx_queue_config(dev, 0);
1333 rollback:
1334 	memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1335 	if (old_mtu != dev->data->mtu)
1336 		dev->data->mtu = old_mtu;
1337 
1338 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1339 	return ret;
1340 }
1341 
1342 static void
1343 eth_dev_mac_restore(struct rte_eth_dev *dev,
1344 			struct rte_eth_dev_info *dev_info)
1345 {
1346 	struct rte_ether_addr *addr;
1347 	uint16_t i;
1348 	uint32_t pool = 0;
1349 	uint64_t pool_mask;
1350 
1351 	/* replay MAC address configuration including default MAC */
1352 	addr = &dev->data->mac_addrs[0];
1353 	if (*dev->dev_ops->mac_addr_set != NULL)
1354 		(*dev->dev_ops->mac_addr_set)(dev, addr);
1355 	else if (*dev->dev_ops->mac_addr_add != NULL)
1356 		(*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1357 
1358 	if (*dev->dev_ops->mac_addr_add != NULL) {
1359 		for (i = 1; i < dev_info->max_mac_addrs; i++) {
1360 			addr = &dev->data->mac_addrs[i];
1361 
1362 			/* skip zero address */
1363 			if (rte_is_zero_ether_addr(addr))
1364 				continue;
1365 
1366 			pool = 0;
1367 			pool_mask = dev->data->mac_pool_sel[i];
1368 
1369 			do {
1370 				if (pool_mask & UINT64_C(1))
1371 					(*dev->dev_ops->mac_addr_add)(dev,
1372 						addr, i, pool);
1373 				pool_mask >>= 1;
1374 				pool++;
1375 			} while (pool_mask);
1376 		}
1377 	}
1378 }
1379 
1380 static int
1381 eth_dev_config_restore(struct rte_eth_dev *dev,
1382 		struct rte_eth_dev_info *dev_info, uint16_t port_id)
1383 {
1384 	int ret;
1385 
1386 	if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1387 		eth_dev_mac_restore(dev, dev_info);
1388 
1389 	/* replay promiscuous configuration */
1390 	/*
1391 	 * use callbacks directly since we don't need port_id check and
1392 	 * would like to bypass the same value set
1393 	 */
1394 	if (rte_eth_promiscuous_get(port_id) == 1 &&
1395 	    *dev->dev_ops->promiscuous_enable != NULL) {
1396 		ret = eth_err(port_id,
1397 			      (*dev->dev_ops->promiscuous_enable)(dev));
1398 		if (ret != 0 && ret != -ENOTSUP) {
1399 			RTE_ETHDEV_LOG(ERR,
1400 				"Failed to enable promiscuous mode for device (port %u): %s\n",
1401 				port_id, rte_strerror(-ret));
1402 			return ret;
1403 		}
1404 	} else if (rte_eth_promiscuous_get(port_id) == 0 &&
1405 		   *dev->dev_ops->promiscuous_disable != NULL) {
1406 		ret = eth_err(port_id,
1407 			      (*dev->dev_ops->promiscuous_disable)(dev));
1408 		if (ret != 0 && ret != -ENOTSUP) {
1409 			RTE_ETHDEV_LOG(ERR,
1410 				"Failed to disable promiscuous mode for device (port %u): %s\n",
1411 				port_id, rte_strerror(-ret));
1412 			return ret;
1413 		}
1414 	}
1415 
1416 	/* replay all multicast configuration */
1417 	/*
1418 	 * use callbacks directly since we don't need port_id check and
1419 	 * would like to bypass the same value set
1420 	 */
1421 	if (rte_eth_allmulticast_get(port_id) == 1 &&
1422 	    *dev->dev_ops->allmulticast_enable != NULL) {
1423 		ret = eth_err(port_id,
1424 			      (*dev->dev_ops->allmulticast_enable)(dev));
1425 		if (ret != 0 && ret != -ENOTSUP) {
1426 			RTE_ETHDEV_LOG(ERR,
1427 				"Failed to enable allmulticast mode for device (port %u): %s\n",
1428 				port_id, rte_strerror(-ret));
1429 			return ret;
1430 		}
1431 	} else if (rte_eth_allmulticast_get(port_id) == 0 &&
1432 		   *dev->dev_ops->allmulticast_disable != NULL) {
1433 		ret = eth_err(port_id,
1434 			      (*dev->dev_ops->allmulticast_disable)(dev));
1435 		if (ret != 0 && ret != -ENOTSUP) {
1436 			RTE_ETHDEV_LOG(ERR,
1437 				"Failed to disable allmulticast mode for device (port %u): %s\n",
1438 				port_id, rte_strerror(-ret));
1439 			return ret;
1440 		}
1441 	}
1442 
1443 	return 0;
1444 }
1445 
1446 int
1447 rte_eth_dev_start(uint16_t port_id)
1448 {
1449 	struct rte_eth_dev *dev;
1450 	struct rte_eth_dev_info dev_info;
1451 	int diag;
1452 	int ret, ret_stop;
1453 
1454 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1455 	dev = &rte_eth_devices[port_id];
1456 
1457 	if (*dev->dev_ops->dev_start == NULL)
1458 		return -ENOTSUP;
1459 
1460 	if (dev->data->dev_configured == 0) {
1461 		RTE_ETHDEV_LOG(INFO,
1462 			"Device with port_id=%"PRIu16" is not configured.\n",
1463 			port_id);
1464 		return -EINVAL;
1465 	}
1466 
1467 	if (dev->data->dev_started != 0) {
1468 		RTE_ETHDEV_LOG(INFO,
1469 			"Device with port_id=%"PRIu16" already started\n",
1470 			port_id);
1471 		return 0;
1472 	}
1473 
1474 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1475 	if (ret != 0)
1476 		return ret;
1477 
1478 	/* Lets restore MAC now if device does not support live change */
1479 	if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1480 		eth_dev_mac_restore(dev, &dev_info);
1481 
1482 	diag = (*dev->dev_ops->dev_start)(dev);
1483 	if (diag == 0)
1484 		dev->data->dev_started = 1;
1485 	else
1486 		return eth_err(port_id, diag);
1487 
1488 	ret = eth_dev_config_restore(dev, &dev_info, port_id);
1489 	if (ret != 0) {
1490 		RTE_ETHDEV_LOG(ERR,
1491 			"Error during restoring configuration for device (port %u): %s\n",
1492 			port_id, rte_strerror(-ret));
1493 		ret_stop = rte_eth_dev_stop(port_id);
1494 		if (ret_stop != 0) {
1495 			RTE_ETHDEV_LOG(ERR,
1496 				"Failed to stop device (port %u): %s\n",
1497 				port_id, rte_strerror(-ret_stop));
1498 		}
1499 
1500 		return ret;
1501 	}
1502 
1503 	if (dev->data->dev_conf.intr_conf.lsc == 0) {
1504 		if (*dev->dev_ops->link_update == NULL)
1505 			return -ENOTSUP;
1506 		(*dev->dev_ops->link_update)(dev, 0);
1507 	}
1508 
1509 	/* expose selection of PMD fast-path functions */
1510 	eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev);
1511 
1512 	rte_ethdev_trace_start(port_id);
1513 	return 0;
1514 }
1515 
1516 int
1517 rte_eth_dev_stop(uint16_t port_id)
1518 {
1519 	struct rte_eth_dev *dev;
1520 	int ret;
1521 
1522 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1523 	dev = &rte_eth_devices[port_id];
1524 
1525 	if (*dev->dev_ops->dev_stop == NULL)
1526 		return -ENOTSUP;
1527 
1528 	if (dev->data->dev_started == 0) {
1529 		RTE_ETHDEV_LOG(INFO,
1530 			"Device with port_id=%"PRIu16" already stopped\n",
1531 			port_id);
1532 		return 0;
1533 	}
1534 
1535 	/* point fast-path functions to dummy ones */
1536 	eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id);
1537 
1538 	ret = (*dev->dev_ops->dev_stop)(dev);
1539 	if (ret == 0)
1540 		dev->data->dev_started = 0;
1541 	rte_ethdev_trace_stop(port_id, ret);
1542 
1543 	return ret;
1544 }
1545 
1546 int
1547 rte_eth_dev_set_link_up(uint16_t port_id)
1548 {
1549 	struct rte_eth_dev *dev;
1550 
1551 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1552 	dev = &rte_eth_devices[port_id];
1553 
1554 	if (*dev->dev_ops->dev_set_link_up == NULL)
1555 		return -ENOTSUP;
1556 	return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1557 }
1558 
1559 int
1560 rte_eth_dev_set_link_down(uint16_t port_id)
1561 {
1562 	struct rte_eth_dev *dev;
1563 
1564 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1565 	dev = &rte_eth_devices[port_id];
1566 
1567 	if (*dev->dev_ops->dev_set_link_down == NULL)
1568 		return -ENOTSUP;
1569 	return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1570 }
1571 
1572 int
1573 rte_eth_dev_close(uint16_t port_id)
1574 {
1575 	struct rte_eth_dev *dev;
1576 	int firsterr, binerr;
1577 	int *lasterr = &firsterr;
1578 
1579 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1580 	dev = &rte_eth_devices[port_id];
1581 
1582 	/*
1583 	 * Secondary process needs to close device to release process private
1584 	 * resources. But secondary process should not be obliged to wait
1585 	 * for device stop before closing ethdev.
1586 	 */
1587 	if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
1588 			dev->data->dev_started) {
1589 		RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n",
1590 			       port_id);
1591 		return -EINVAL;
1592 	}
1593 
1594 	if (*dev->dev_ops->dev_close == NULL)
1595 		return -ENOTSUP;
1596 	*lasterr = (*dev->dev_ops->dev_close)(dev);
1597 	if (*lasterr != 0)
1598 		lasterr = &binerr;
1599 
1600 	rte_ethdev_trace_close(port_id);
1601 	*lasterr = rte_eth_dev_release_port(dev);
1602 
1603 	return firsterr;
1604 }
1605 
1606 int
1607 rte_eth_dev_reset(uint16_t port_id)
1608 {
1609 	struct rte_eth_dev *dev;
1610 	int ret;
1611 
1612 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1613 	dev = &rte_eth_devices[port_id];
1614 
1615 	if (*dev->dev_ops->dev_reset == NULL)
1616 		return -ENOTSUP;
1617 
1618 	ret = rte_eth_dev_stop(port_id);
1619 	if (ret != 0) {
1620 		RTE_ETHDEV_LOG(ERR,
1621 			"Failed to stop device (port %u) before reset: %s - ignore\n",
1622 			port_id, rte_strerror(-ret));
1623 	}
1624 	ret = dev->dev_ops->dev_reset(dev);
1625 
1626 	return eth_err(port_id, ret);
1627 }
1628 
1629 int
1630 rte_eth_dev_is_removed(uint16_t port_id)
1631 {
1632 	struct rte_eth_dev *dev;
1633 	int ret;
1634 
1635 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1636 	dev = &rte_eth_devices[port_id];
1637 
1638 	if (dev->state == RTE_ETH_DEV_REMOVED)
1639 		return 1;
1640 
1641 	if (*dev->dev_ops->is_removed == NULL)
1642 		return 0;
1643 
1644 	ret = dev->dev_ops->is_removed(dev);
1645 	if (ret != 0)
1646 		/* Device is physically removed. */
1647 		dev->state = RTE_ETH_DEV_REMOVED;
1648 
1649 	return ret;
1650 }
1651 
1652 static int
1653 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1654 			     uint16_t n_seg, uint32_t *mbp_buf_size,
1655 			     const struct rte_eth_dev_info *dev_info)
1656 {
1657 	const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1658 	struct rte_mempool *mp_first;
1659 	uint32_t offset_mask;
1660 	uint16_t seg_idx;
1661 
1662 	if (n_seg > seg_capa->max_nseg) {
1663 		RTE_ETHDEV_LOG(ERR,
1664 			       "Requested Rx segments %u exceed supported %u\n",
1665 			       n_seg, seg_capa->max_nseg);
1666 		return -EINVAL;
1667 	}
1668 	/*
1669 	 * Check the sizes and offsets against buffer sizes
1670 	 * for each segment specified in extended configuration.
1671 	 */
1672 	mp_first = rx_seg[0].mp;
1673 	offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1;
1674 	for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1675 		struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1676 		uint32_t length = rx_seg[seg_idx].length;
1677 		uint32_t offset = rx_seg[seg_idx].offset;
1678 
1679 		if (mpl == NULL) {
1680 			RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1681 			return -EINVAL;
1682 		}
1683 		if (seg_idx != 0 && mp_first != mpl &&
1684 		    seg_capa->multi_pools == 0) {
1685 			RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1686 			return -ENOTSUP;
1687 		}
1688 		if (offset != 0) {
1689 			if (seg_capa->offset_allowed == 0) {
1690 				RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1691 				return -ENOTSUP;
1692 			}
1693 			if (offset & offset_mask) {
1694 				RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1695 					       offset,
1696 					       seg_capa->offset_align_log2);
1697 				return -EINVAL;
1698 			}
1699 		}
1700 		if (mpl->private_data_size <
1701 			sizeof(struct rte_pktmbuf_pool_private)) {
1702 			RTE_ETHDEV_LOG(ERR,
1703 				       "%s private_data_size %u < %u\n",
1704 				       mpl->name, mpl->private_data_size,
1705 				       (unsigned int)sizeof
1706 					(struct rte_pktmbuf_pool_private));
1707 			return -ENOSPC;
1708 		}
1709 		offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1710 		*mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1711 		length = length != 0 ? length : *mbp_buf_size;
1712 		if (*mbp_buf_size < length + offset) {
1713 			RTE_ETHDEV_LOG(ERR,
1714 				       "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1715 				       mpl->name, *mbp_buf_size,
1716 				       length + offset, length, offset);
1717 			return -EINVAL;
1718 		}
1719 	}
1720 	return 0;
1721 }
1722 
1723 int
1724 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1725 		       uint16_t nb_rx_desc, unsigned int socket_id,
1726 		       const struct rte_eth_rxconf *rx_conf,
1727 		       struct rte_mempool *mp)
1728 {
1729 	int ret;
1730 	uint32_t mbp_buf_size;
1731 	struct rte_eth_dev *dev;
1732 	struct rte_eth_dev_info dev_info;
1733 	struct rte_eth_rxconf local_conf;
1734 
1735 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1736 	dev = &rte_eth_devices[port_id];
1737 
1738 	if (rx_queue_id >= dev->data->nb_rx_queues) {
1739 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
1740 		return -EINVAL;
1741 	}
1742 
1743 	if (*dev->dev_ops->rx_queue_setup == NULL)
1744 		return -ENOTSUP;
1745 
1746 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1747 	if (ret != 0)
1748 		return ret;
1749 
1750 	if (mp != NULL) {
1751 		/* Single pool configuration check. */
1752 		if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
1753 			RTE_ETHDEV_LOG(ERR,
1754 				       "Ambiguous segment configuration\n");
1755 			return -EINVAL;
1756 		}
1757 		/*
1758 		 * Check the size of the mbuf data buffer, this value
1759 		 * must be provided in the private data of the memory pool.
1760 		 * First check that the memory pool(s) has a valid private data.
1761 		 */
1762 		if (mp->private_data_size <
1763 				sizeof(struct rte_pktmbuf_pool_private)) {
1764 			RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1765 				mp->name, mp->private_data_size,
1766 				(unsigned int)
1767 				sizeof(struct rte_pktmbuf_pool_private));
1768 			return -ENOSPC;
1769 		}
1770 		mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1771 		if (mbp_buf_size < dev_info.min_rx_bufsize +
1772 				   RTE_PKTMBUF_HEADROOM) {
1773 			RTE_ETHDEV_LOG(ERR,
1774 				       "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
1775 				       mp->name, mbp_buf_size,
1776 				       RTE_PKTMBUF_HEADROOM +
1777 				       dev_info.min_rx_bufsize,
1778 				       RTE_PKTMBUF_HEADROOM,
1779 				       dev_info.min_rx_bufsize);
1780 			return -EINVAL;
1781 		}
1782 	} else {
1783 		const struct rte_eth_rxseg_split *rx_seg;
1784 		uint16_t n_seg;
1785 
1786 		/* Extended multi-segment configuration check. */
1787 		if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
1788 			RTE_ETHDEV_LOG(ERR,
1789 				       "Memory pool is null and no extended configuration provided\n");
1790 			return -EINVAL;
1791 		}
1792 
1793 		rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
1794 		n_seg = rx_conf->rx_nseg;
1795 
1796 		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1797 			ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
1798 							   &mbp_buf_size,
1799 							   &dev_info);
1800 			if (ret != 0)
1801 				return ret;
1802 		} else {
1803 			RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
1804 			return -EINVAL;
1805 		}
1806 	}
1807 
1808 	/* Use default specified by driver, if nb_rx_desc is zero */
1809 	if (nb_rx_desc == 0) {
1810 		nb_rx_desc = dev_info.default_rxportconf.ring_size;
1811 		/* If driver default is also zero, fall back on EAL default */
1812 		if (nb_rx_desc == 0)
1813 			nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1814 	}
1815 
1816 	if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1817 			nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1818 			nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1819 
1820 		RTE_ETHDEV_LOG(ERR,
1821 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1822 			nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1823 			dev_info.rx_desc_lim.nb_min,
1824 			dev_info.rx_desc_lim.nb_align);
1825 		return -EINVAL;
1826 	}
1827 
1828 	if (dev->data->dev_started &&
1829 		!(dev_info.dev_capa &
1830 			RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1831 		return -EBUSY;
1832 
1833 	if (dev->data->dev_started &&
1834 		(dev->data->rx_queue_state[rx_queue_id] !=
1835 			RTE_ETH_QUEUE_STATE_STOPPED))
1836 		return -EBUSY;
1837 
1838 	eth_dev_rxq_release(dev, rx_queue_id);
1839 
1840 	if (rx_conf == NULL)
1841 		rx_conf = &dev_info.default_rxconf;
1842 
1843 	local_conf = *rx_conf;
1844 
1845 	/*
1846 	 * If an offloading has already been enabled in
1847 	 * rte_eth_dev_configure(), it has been enabled on all queues,
1848 	 * so there is no need to enable it in this queue again.
1849 	 * The local_conf.offloads input to underlying PMD only carries
1850 	 * those offloadings which are only enabled on this queue and
1851 	 * not enabled on all queues.
1852 	 */
1853 	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1854 
1855 	/*
1856 	 * New added offloadings for this queue are those not enabled in
1857 	 * rte_eth_dev_configure() and they must be per-queue type.
1858 	 * A pure per-port offloading can't be enabled on a queue while
1859 	 * disabled on another queue. A pure per-port offloading can't
1860 	 * be enabled for any queue as new added one if it hasn't been
1861 	 * enabled in rte_eth_dev_configure().
1862 	 */
1863 	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1864 	     local_conf.offloads) {
1865 		RTE_ETHDEV_LOG(ERR,
1866 			"Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1867 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1868 			port_id, rx_queue_id, local_conf.offloads,
1869 			dev_info.rx_queue_offload_capa,
1870 			__func__);
1871 		return -EINVAL;
1872 	}
1873 
1874 	if (local_conf.share_group > 0 &&
1875 	    (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {
1876 		RTE_ETHDEV_LOG(ERR,
1877 			"Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n",
1878 			port_id, rx_queue_id, local_conf.share_group);
1879 		return -EINVAL;
1880 	}
1881 
1882 	/*
1883 	 * If LRO is enabled, check that the maximum aggregated packet
1884 	 * size is supported by the configured device.
1885 	 */
1886 	/* Get the real Ethernet overhead length */
1887 	if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1888 		uint32_t overhead_len;
1889 		uint32_t max_rx_pktlen;
1890 		int ret;
1891 
1892 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1893 				dev_info.max_mtu);
1894 		max_rx_pktlen = dev->data->mtu + overhead_len;
1895 		if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
1896 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1897 		ret = eth_dev_check_lro_pkt_size(port_id,
1898 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
1899 				max_rx_pktlen,
1900 				dev_info.max_lro_pkt_size);
1901 		if (ret != 0)
1902 			return ret;
1903 	}
1904 
1905 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1906 					      socket_id, &local_conf, mp);
1907 	if (!ret) {
1908 		if (!dev->data->min_rx_buf_size ||
1909 		    dev->data->min_rx_buf_size > mbp_buf_size)
1910 			dev->data->min_rx_buf_size = mbp_buf_size;
1911 	}
1912 
1913 	rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
1914 		rx_conf, ret);
1915 	return eth_err(port_id, ret);
1916 }
1917 
1918 int
1919 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1920 			       uint16_t nb_rx_desc,
1921 			       const struct rte_eth_hairpin_conf *conf)
1922 {
1923 	int ret;
1924 	struct rte_eth_dev *dev;
1925 	struct rte_eth_hairpin_cap cap;
1926 	int i;
1927 	int count;
1928 
1929 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1930 	dev = &rte_eth_devices[port_id];
1931 
1932 	if (rx_queue_id >= dev->data->nb_rx_queues) {
1933 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
1934 		return -EINVAL;
1935 	}
1936 
1937 	if (conf == NULL) {
1938 		RTE_ETHDEV_LOG(ERR,
1939 			"Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
1940 			port_id);
1941 		return -EINVAL;
1942 	}
1943 
1944 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1945 	if (ret != 0)
1946 		return ret;
1947 	if (*dev->dev_ops->rx_hairpin_queue_setup == NULL)
1948 		return -ENOTSUP;
1949 	/* if nb_rx_desc is zero use max number of desc from the driver. */
1950 	if (nb_rx_desc == 0)
1951 		nb_rx_desc = cap.max_nb_desc;
1952 	if (nb_rx_desc > cap.max_nb_desc) {
1953 		RTE_ETHDEV_LOG(ERR,
1954 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1955 			nb_rx_desc, cap.max_nb_desc);
1956 		return -EINVAL;
1957 	}
1958 	if (conf->peer_count > cap.max_rx_2_tx) {
1959 		RTE_ETHDEV_LOG(ERR,
1960 			"Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
1961 			conf->peer_count, cap.max_rx_2_tx);
1962 		return -EINVAL;
1963 	}
1964 	if (conf->peer_count == 0) {
1965 		RTE_ETHDEV_LOG(ERR,
1966 			"Invalid value for number of peers for Rx queue(=%u), should be: > 0",
1967 			conf->peer_count);
1968 		return -EINVAL;
1969 	}
1970 	for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1971 	     cap.max_nb_queues != UINT16_MAX; i++) {
1972 		if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1973 			count++;
1974 	}
1975 	if (count > cap.max_nb_queues) {
1976 		RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1977 		cap.max_nb_queues);
1978 		return -EINVAL;
1979 	}
1980 	if (dev->data->dev_started)
1981 		return -EBUSY;
1982 	eth_dev_rxq_release(dev, rx_queue_id);
1983 	ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
1984 						      nb_rx_desc, conf);
1985 	if (ret == 0)
1986 		dev->data->rx_queue_state[rx_queue_id] =
1987 			RTE_ETH_QUEUE_STATE_HAIRPIN;
1988 	return eth_err(port_id, ret);
1989 }
1990 
1991 int
1992 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1993 		       uint16_t nb_tx_desc, unsigned int socket_id,
1994 		       const struct rte_eth_txconf *tx_conf)
1995 {
1996 	struct rte_eth_dev *dev;
1997 	struct rte_eth_dev_info dev_info;
1998 	struct rte_eth_txconf local_conf;
1999 	int ret;
2000 
2001 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2002 	dev = &rte_eth_devices[port_id];
2003 
2004 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2005 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2006 		return -EINVAL;
2007 	}
2008 
2009 	if (*dev->dev_ops->tx_queue_setup == NULL)
2010 		return -ENOTSUP;
2011 
2012 	ret = rte_eth_dev_info_get(port_id, &dev_info);
2013 	if (ret != 0)
2014 		return ret;
2015 
2016 	/* Use default specified by driver, if nb_tx_desc is zero */
2017 	if (nb_tx_desc == 0) {
2018 		nb_tx_desc = dev_info.default_txportconf.ring_size;
2019 		/* If driver default is zero, fall back on EAL default */
2020 		if (nb_tx_desc == 0)
2021 			nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2022 	}
2023 	if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2024 	    nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2025 	    nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2026 		RTE_ETHDEV_LOG(ERR,
2027 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2028 			nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2029 			dev_info.tx_desc_lim.nb_min,
2030 			dev_info.tx_desc_lim.nb_align);
2031 		return -EINVAL;
2032 	}
2033 
2034 	if (dev->data->dev_started &&
2035 		!(dev_info.dev_capa &
2036 			RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2037 		return -EBUSY;
2038 
2039 	if (dev->data->dev_started &&
2040 		(dev->data->tx_queue_state[tx_queue_id] !=
2041 			RTE_ETH_QUEUE_STATE_STOPPED))
2042 		return -EBUSY;
2043 
2044 	eth_dev_txq_release(dev, tx_queue_id);
2045 
2046 	if (tx_conf == NULL)
2047 		tx_conf = &dev_info.default_txconf;
2048 
2049 	local_conf = *tx_conf;
2050 
2051 	/*
2052 	 * If an offloading has already been enabled in
2053 	 * rte_eth_dev_configure(), it has been enabled on all queues,
2054 	 * so there is no need to enable it in this queue again.
2055 	 * The local_conf.offloads input to underlying PMD only carries
2056 	 * those offloadings which are only enabled on this queue and
2057 	 * not enabled on all queues.
2058 	 */
2059 	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2060 
2061 	/*
2062 	 * New added offloadings for this queue are those not enabled in
2063 	 * rte_eth_dev_configure() and they must be per-queue type.
2064 	 * A pure per-port offloading can't be enabled on a queue while
2065 	 * disabled on another queue. A pure per-port offloading can't
2066 	 * be enabled for any queue as new added one if it hasn't been
2067 	 * enabled in rte_eth_dev_configure().
2068 	 */
2069 	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2070 	     local_conf.offloads) {
2071 		RTE_ETHDEV_LOG(ERR,
2072 			"Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2073 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2074 			port_id, tx_queue_id, local_conf.offloads,
2075 			dev_info.tx_queue_offload_capa,
2076 			__func__);
2077 		return -EINVAL;
2078 	}
2079 
2080 	rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2081 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2082 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2083 }
2084 
2085 int
2086 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2087 			       uint16_t nb_tx_desc,
2088 			       const struct rte_eth_hairpin_conf *conf)
2089 {
2090 	struct rte_eth_dev *dev;
2091 	struct rte_eth_hairpin_cap cap;
2092 	int i;
2093 	int count;
2094 	int ret;
2095 
2096 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2097 	dev = &rte_eth_devices[port_id];
2098 
2099 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2100 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2101 		return -EINVAL;
2102 	}
2103 
2104 	if (conf == NULL) {
2105 		RTE_ETHDEV_LOG(ERR,
2106 			"Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2107 			port_id);
2108 		return -EINVAL;
2109 	}
2110 
2111 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2112 	if (ret != 0)
2113 		return ret;
2114 	if (*dev->dev_ops->tx_hairpin_queue_setup == NULL)
2115 		return -ENOTSUP;
2116 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2117 	if (nb_tx_desc == 0)
2118 		nb_tx_desc = cap.max_nb_desc;
2119 	if (nb_tx_desc > cap.max_nb_desc) {
2120 		RTE_ETHDEV_LOG(ERR,
2121 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2122 			nb_tx_desc, cap.max_nb_desc);
2123 		return -EINVAL;
2124 	}
2125 	if (conf->peer_count > cap.max_tx_2_rx) {
2126 		RTE_ETHDEV_LOG(ERR,
2127 			"Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2128 			conf->peer_count, cap.max_tx_2_rx);
2129 		return -EINVAL;
2130 	}
2131 	if (conf->peer_count == 0) {
2132 		RTE_ETHDEV_LOG(ERR,
2133 			"Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2134 			conf->peer_count);
2135 		return -EINVAL;
2136 	}
2137 	for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2138 	     cap.max_nb_queues != UINT16_MAX; i++) {
2139 		if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2140 			count++;
2141 	}
2142 	if (count > cap.max_nb_queues) {
2143 		RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2144 		cap.max_nb_queues);
2145 		return -EINVAL;
2146 	}
2147 	if (dev->data->dev_started)
2148 		return -EBUSY;
2149 	eth_dev_txq_release(dev, tx_queue_id);
2150 	ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2151 		(dev, tx_queue_id, nb_tx_desc, conf);
2152 	if (ret == 0)
2153 		dev->data->tx_queue_state[tx_queue_id] =
2154 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2155 	return eth_err(port_id, ret);
2156 }
2157 
2158 int
2159 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2160 {
2161 	struct rte_eth_dev *dev;
2162 	int ret;
2163 
2164 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2165 	dev = &rte_eth_devices[tx_port];
2166 
2167 	if (dev->data->dev_started == 0) {
2168 		RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2169 		return -EBUSY;
2170 	}
2171 
2172 	if (*dev->dev_ops->hairpin_bind == NULL)
2173 		return -ENOTSUP;
2174 	ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2175 	if (ret != 0)
2176 		RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2177 			       " to Rx %d (%d - all ports)\n",
2178 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2179 
2180 	return ret;
2181 }
2182 
2183 int
2184 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2185 {
2186 	struct rte_eth_dev *dev;
2187 	int ret;
2188 
2189 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2190 	dev = &rte_eth_devices[tx_port];
2191 
2192 	if (dev->data->dev_started == 0) {
2193 		RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2194 		return -EBUSY;
2195 	}
2196 
2197 	if (*dev->dev_ops->hairpin_unbind == NULL)
2198 		return -ENOTSUP;
2199 	ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2200 	if (ret != 0)
2201 		RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2202 			       " from Rx %d (%d - all ports)\n",
2203 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2204 
2205 	return ret;
2206 }
2207 
2208 int
2209 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2210 			       size_t len, uint32_t direction)
2211 {
2212 	struct rte_eth_dev *dev;
2213 	int ret;
2214 
2215 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2216 	dev = &rte_eth_devices[port_id];
2217 
2218 	if (peer_ports == NULL) {
2219 		RTE_ETHDEV_LOG(ERR,
2220 			"Cannot get ethdev port %u hairpin peer ports to NULL\n",
2221 			port_id);
2222 		return -EINVAL;
2223 	}
2224 
2225 	if (len == 0) {
2226 		RTE_ETHDEV_LOG(ERR,
2227 			"Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2228 			port_id);
2229 		return -EINVAL;
2230 	}
2231 
2232 	if (*dev->dev_ops->hairpin_get_peer_ports == NULL)
2233 		return -ENOTSUP;
2234 
2235 	ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2236 						      len, direction);
2237 	if (ret < 0)
2238 		RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2239 			       port_id, direction ? "Rx" : "Tx");
2240 
2241 	return ret;
2242 }
2243 
2244 void
2245 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2246 		void *userdata __rte_unused)
2247 {
2248 	rte_pktmbuf_free_bulk(pkts, unsent);
2249 }
2250 
2251 void
2252 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2253 		void *userdata)
2254 {
2255 	uint64_t *count = userdata;
2256 
2257 	rte_pktmbuf_free_bulk(pkts, unsent);
2258 	*count += unsent;
2259 }
2260 
2261 int
2262 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2263 		buffer_tx_error_fn cbfn, void *userdata)
2264 {
2265 	if (buffer == NULL) {
2266 		RTE_ETHDEV_LOG(ERR,
2267 			"Cannot set Tx buffer error callback to NULL buffer\n");
2268 		return -EINVAL;
2269 	}
2270 
2271 	buffer->error_callback = cbfn;
2272 	buffer->error_userdata = userdata;
2273 	return 0;
2274 }
2275 
2276 int
2277 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2278 {
2279 	int ret = 0;
2280 
2281 	if (buffer == NULL) {
2282 		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2283 		return -EINVAL;
2284 	}
2285 
2286 	buffer->size = size;
2287 	if (buffer->error_callback == NULL) {
2288 		ret = rte_eth_tx_buffer_set_err_callback(
2289 			buffer, rte_eth_tx_buffer_drop_callback, NULL);
2290 	}
2291 
2292 	return ret;
2293 }
2294 
2295 int
2296 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2297 {
2298 	struct rte_eth_dev *dev;
2299 	int ret;
2300 
2301 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2302 	dev = &rte_eth_devices[port_id];
2303 
2304 	if (*dev->dev_ops->tx_done_cleanup == NULL)
2305 		return -ENOTSUP;
2306 
2307 	/* Call driver to free pending mbufs. */
2308 	ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2309 					       free_cnt);
2310 	return eth_err(port_id, ret);
2311 }
2312 
2313 int
2314 rte_eth_promiscuous_enable(uint16_t port_id)
2315 {
2316 	struct rte_eth_dev *dev;
2317 	int diag = 0;
2318 
2319 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2320 	dev = &rte_eth_devices[port_id];
2321 
2322 	if (dev->data->promiscuous == 1)
2323 		return 0;
2324 
2325 	if (*dev->dev_ops->promiscuous_enable == NULL)
2326 		return -ENOTSUP;
2327 
2328 	diag = (*dev->dev_ops->promiscuous_enable)(dev);
2329 	dev->data->promiscuous = (diag == 0) ? 1 : 0;
2330 
2331 	return eth_err(port_id, diag);
2332 }
2333 
2334 int
2335 rte_eth_promiscuous_disable(uint16_t port_id)
2336 {
2337 	struct rte_eth_dev *dev;
2338 	int diag = 0;
2339 
2340 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2341 	dev = &rte_eth_devices[port_id];
2342 
2343 	if (dev->data->promiscuous == 0)
2344 		return 0;
2345 
2346 	if (*dev->dev_ops->promiscuous_disable == NULL)
2347 		return -ENOTSUP;
2348 
2349 	dev->data->promiscuous = 0;
2350 	diag = (*dev->dev_ops->promiscuous_disable)(dev);
2351 	if (diag != 0)
2352 		dev->data->promiscuous = 1;
2353 
2354 	return eth_err(port_id, diag);
2355 }
2356 
2357 int
2358 rte_eth_promiscuous_get(uint16_t port_id)
2359 {
2360 	struct rte_eth_dev *dev;
2361 
2362 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2363 	dev = &rte_eth_devices[port_id];
2364 
2365 	return dev->data->promiscuous;
2366 }
2367 
2368 int
2369 rte_eth_allmulticast_enable(uint16_t port_id)
2370 {
2371 	struct rte_eth_dev *dev;
2372 	int diag;
2373 
2374 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2375 	dev = &rte_eth_devices[port_id];
2376 
2377 	if (dev->data->all_multicast == 1)
2378 		return 0;
2379 
2380 	if (*dev->dev_ops->allmulticast_enable == NULL)
2381 		return -ENOTSUP;
2382 	diag = (*dev->dev_ops->allmulticast_enable)(dev);
2383 	dev->data->all_multicast = (diag == 0) ? 1 : 0;
2384 
2385 	return eth_err(port_id, diag);
2386 }
2387 
2388 int
2389 rte_eth_allmulticast_disable(uint16_t port_id)
2390 {
2391 	struct rte_eth_dev *dev;
2392 	int diag;
2393 
2394 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2395 	dev = &rte_eth_devices[port_id];
2396 
2397 	if (dev->data->all_multicast == 0)
2398 		return 0;
2399 
2400 	if (*dev->dev_ops->allmulticast_disable == NULL)
2401 		return -ENOTSUP;
2402 	dev->data->all_multicast = 0;
2403 	diag = (*dev->dev_ops->allmulticast_disable)(dev);
2404 	if (diag != 0)
2405 		dev->data->all_multicast = 1;
2406 
2407 	return eth_err(port_id, diag);
2408 }
2409 
2410 int
2411 rte_eth_allmulticast_get(uint16_t port_id)
2412 {
2413 	struct rte_eth_dev *dev;
2414 
2415 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2416 	dev = &rte_eth_devices[port_id];
2417 
2418 	return dev->data->all_multicast;
2419 }
2420 
2421 int
2422 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2423 {
2424 	struct rte_eth_dev *dev;
2425 
2426 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2427 	dev = &rte_eth_devices[port_id];
2428 
2429 	if (eth_link == NULL) {
2430 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2431 			port_id);
2432 		return -EINVAL;
2433 	}
2434 
2435 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2436 		rte_eth_linkstatus_get(dev, eth_link);
2437 	else {
2438 		if (*dev->dev_ops->link_update == NULL)
2439 			return -ENOTSUP;
2440 		(*dev->dev_ops->link_update)(dev, 1);
2441 		*eth_link = dev->data->dev_link;
2442 	}
2443 
2444 	return 0;
2445 }
2446 
2447 int
2448 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2449 {
2450 	struct rte_eth_dev *dev;
2451 
2452 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2453 	dev = &rte_eth_devices[port_id];
2454 
2455 	if (eth_link == NULL) {
2456 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2457 			port_id);
2458 		return -EINVAL;
2459 	}
2460 
2461 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2462 		rte_eth_linkstatus_get(dev, eth_link);
2463 	else {
2464 		if (*dev->dev_ops->link_update == NULL)
2465 			return -ENOTSUP;
2466 		(*dev->dev_ops->link_update)(dev, 0);
2467 		*eth_link = dev->data->dev_link;
2468 	}
2469 
2470 	return 0;
2471 }
2472 
2473 const char *
2474 rte_eth_link_speed_to_str(uint32_t link_speed)
2475 {
2476 	switch (link_speed) {
2477 	case RTE_ETH_SPEED_NUM_NONE: return "None";
2478 	case RTE_ETH_SPEED_NUM_10M:  return "10 Mbps";
2479 	case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
2480 	case RTE_ETH_SPEED_NUM_1G:   return "1 Gbps";
2481 	case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2482 	case RTE_ETH_SPEED_NUM_5G:   return "5 Gbps";
2483 	case RTE_ETH_SPEED_NUM_10G:  return "10 Gbps";
2484 	case RTE_ETH_SPEED_NUM_20G:  return "20 Gbps";
2485 	case RTE_ETH_SPEED_NUM_25G:  return "25 Gbps";
2486 	case RTE_ETH_SPEED_NUM_40G:  return "40 Gbps";
2487 	case RTE_ETH_SPEED_NUM_50G:  return "50 Gbps";
2488 	case RTE_ETH_SPEED_NUM_56G:  return "56 Gbps";
2489 	case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
2490 	case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
2491 	case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2492 	default: return "Invalid";
2493 	}
2494 }
2495 
2496 int
2497 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2498 {
2499 	if (str == NULL) {
2500 		RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
2501 		return -EINVAL;
2502 	}
2503 
2504 	if (len == 0) {
2505 		RTE_ETHDEV_LOG(ERR,
2506 			"Cannot convert link to string with zero size\n");
2507 		return -EINVAL;
2508 	}
2509 
2510 	if (eth_link == NULL) {
2511 		RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
2512 		return -EINVAL;
2513 	}
2514 
2515 	if (eth_link->link_status == RTE_ETH_LINK_DOWN)
2516 		return snprintf(str, len, "Link down");
2517 	else
2518 		return snprintf(str, len, "Link up at %s %s %s",
2519 			rte_eth_link_speed_to_str(eth_link->link_speed),
2520 			(eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
2521 			"FDX" : "HDX",
2522 			(eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
2523 			"Autoneg" : "Fixed");
2524 }
2525 
2526 int
2527 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2528 {
2529 	struct rte_eth_dev *dev;
2530 
2531 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2532 	dev = &rte_eth_devices[port_id];
2533 
2534 	if (stats == NULL) {
2535 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
2536 			port_id);
2537 		return -EINVAL;
2538 	}
2539 
2540 	memset(stats, 0, sizeof(*stats));
2541 
2542 	if (*dev->dev_ops->stats_get == NULL)
2543 		return -ENOTSUP;
2544 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2545 	return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2546 }
2547 
2548 int
2549 rte_eth_stats_reset(uint16_t port_id)
2550 {
2551 	struct rte_eth_dev *dev;
2552 	int ret;
2553 
2554 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2555 	dev = &rte_eth_devices[port_id];
2556 
2557 	if (*dev->dev_ops->stats_reset == NULL)
2558 		return -ENOTSUP;
2559 	ret = (*dev->dev_ops->stats_reset)(dev);
2560 	if (ret != 0)
2561 		return eth_err(port_id, ret);
2562 
2563 	dev->data->rx_mbuf_alloc_failed = 0;
2564 
2565 	return 0;
2566 }
2567 
2568 static inline int
2569 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2570 {
2571 	uint16_t nb_rxqs, nb_txqs;
2572 	int count;
2573 
2574 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2575 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2576 
2577 	count = RTE_NB_STATS;
2578 	if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2579 		count += nb_rxqs * RTE_NB_RXQ_STATS;
2580 		count += nb_txqs * RTE_NB_TXQ_STATS;
2581 	}
2582 
2583 	return count;
2584 }
2585 
2586 static int
2587 eth_dev_get_xstats_count(uint16_t port_id)
2588 {
2589 	struct rte_eth_dev *dev;
2590 	int count;
2591 
2592 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2593 	dev = &rte_eth_devices[port_id];
2594 	if (dev->dev_ops->xstats_get_names != NULL) {
2595 		count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2596 		if (count < 0)
2597 			return eth_err(port_id, count);
2598 	} else
2599 		count = 0;
2600 
2601 
2602 	count += eth_dev_get_xstats_basic_count(dev);
2603 
2604 	return count;
2605 }
2606 
2607 int
2608 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2609 		uint64_t *id)
2610 {
2611 	int cnt_xstats, idx_xstat;
2612 
2613 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2614 
2615 	if (xstat_name == NULL) {
2616 		RTE_ETHDEV_LOG(ERR,
2617 			"Cannot get ethdev port %u xstats ID from NULL xstat name\n",
2618 			port_id);
2619 		return -ENOMEM;
2620 	}
2621 
2622 	if (id == NULL) {
2623 		RTE_ETHDEV_LOG(ERR,
2624 			"Cannot get ethdev port %u xstats ID to NULL\n",
2625 			port_id);
2626 		return -ENOMEM;
2627 	}
2628 
2629 	/* Get count */
2630 	cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2631 	if (cnt_xstats  < 0) {
2632 		RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2633 		return -ENODEV;
2634 	}
2635 
2636 	/* Get id-name lookup table */
2637 	struct rte_eth_xstat_name xstats_names[cnt_xstats];
2638 
2639 	if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2640 			port_id, xstats_names, cnt_xstats, NULL)) {
2641 		RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2642 		return -1;
2643 	}
2644 
2645 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2646 		if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2647 			*id = idx_xstat;
2648 			return 0;
2649 		};
2650 	}
2651 
2652 	return -EINVAL;
2653 }
2654 
2655 /* retrieve basic stats names */
2656 static int
2657 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2658 	struct rte_eth_xstat_name *xstats_names)
2659 {
2660 	int cnt_used_entries = 0;
2661 	uint32_t idx, id_queue;
2662 	uint16_t num_q;
2663 
2664 	for (idx = 0; idx < RTE_NB_STATS; idx++) {
2665 		strlcpy(xstats_names[cnt_used_entries].name,
2666 			eth_dev_stats_strings[idx].name,
2667 			sizeof(xstats_names[0].name));
2668 		cnt_used_entries++;
2669 	}
2670 
2671 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2672 		return cnt_used_entries;
2673 
2674 	num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2675 	for (id_queue = 0; id_queue < num_q; id_queue++) {
2676 		for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2677 			snprintf(xstats_names[cnt_used_entries].name,
2678 				sizeof(xstats_names[0].name),
2679 				"rx_q%u_%s",
2680 				id_queue, eth_dev_rxq_stats_strings[idx].name);
2681 			cnt_used_entries++;
2682 		}
2683 
2684 	}
2685 	num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2686 	for (id_queue = 0; id_queue < num_q; id_queue++) {
2687 		for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2688 			snprintf(xstats_names[cnt_used_entries].name,
2689 				sizeof(xstats_names[0].name),
2690 				"tx_q%u_%s",
2691 				id_queue, eth_dev_txq_stats_strings[idx].name);
2692 			cnt_used_entries++;
2693 		}
2694 	}
2695 	return cnt_used_entries;
2696 }
2697 
2698 /* retrieve ethdev extended statistics names */
2699 int
2700 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2701 	struct rte_eth_xstat_name *xstats_names, unsigned int size,
2702 	uint64_t *ids)
2703 {
2704 	struct rte_eth_xstat_name *xstats_names_copy;
2705 	unsigned int no_basic_stat_requested = 1;
2706 	unsigned int no_ext_stat_requested = 1;
2707 	unsigned int expected_entries;
2708 	unsigned int basic_count;
2709 	struct rte_eth_dev *dev;
2710 	unsigned int i;
2711 	int ret;
2712 
2713 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2714 	dev = &rte_eth_devices[port_id];
2715 
2716 	basic_count = eth_dev_get_xstats_basic_count(dev);
2717 	ret = eth_dev_get_xstats_count(port_id);
2718 	if (ret < 0)
2719 		return ret;
2720 	expected_entries = (unsigned int)ret;
2721 
2722 	/* Return max number of stats if no ids given */
2723 	if (!ids) {
2724 		if (!xstats_names)
2725 			return expected_entries;
2726 		else if (xstats_names && size < expected_entries)
2727 			return expected_entries;
2728 	}
2729 
2730 	if (ids && !xstats_names)
2731 		return -EINVAL;
2732 
2733 	if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2734 		uint64_t ids_copy[size];
2735 
2736 		for (i = 0; i < size; i++) {
2737 			if (ids[i] < basic_count) {
2738 				no_basic_stat_requested = 0;
2739 				break;
2740 			}
2741 
2742 			/*
2743 			 * Convert ids to xstats ids that PMD knows.
2744 			 * ids known by user are basic + extended stats.
2745 			 */
2746 			ids_copy[i] = ids[i] - basic_count;
2747 		}
2748 
2749 		if (no_basic_stat_requested)
2750 			return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2751 					ids_copy, xstats_names, size);
2752 	}
2753 
2754 	/* Retrieve all stats */
2755 	if (!ids) {
2756 		int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2757 				expected_entries);
2758 		if (num_stats < 0 || num_stats > (int)expected_entries)
2759 			return num_stats;
2760 		else
2761 			return expected_entries;
2762 	}
2763 
2764 	xstats_names_copy = calloc(expected_entries,
2765 		sizeof(struct rte_eth_xstat_name));
2766 
2767 	if (!xstats_names_copy) {
2768 		RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2769 		return -ENOMEM;
2770 	}
2771 
2772 	if (ids) {
2773 		for (i = 0; i < size; i++) {
2774 			if (ids[i] >= basic_count) {
2775 				no_ext_stat_requested = 0;
2776 				break;
2777 			}
2778 		}
2779 	}
2780 
2781 	/* Fill xstats_names_copy structure */
2782 	if (ids && no_ext_stat_requested) {
2783 		eth_basic_stats_get_names(dev, xstats_names_copy);
2784 	} else {
2785 		ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2786 			expected_entries);
2787 		if (ret < 0) {
2788 			free(xstats_names_copy);
2789 			return ret;
2790 		}
2791 	}
2792 
2793 	/* Filter stats */
2794 	for (i = 0; i < size; i++) {
2795 		if (ids[i] >= expected_entries) {
2796 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2797 			free(xstats_names_copy);
2798 			return -1;
2799 		}
2800 		xstats_names[i] = xstats_names_copy[ids[i]];
2801 	}
2802 
2803 	free(xstats_names_copy);
2804 	return size;
2805 }
2806 
2807 int
2808 rte_eth_xstats_get_names(uint16_t port_id,
2809 	struct rte_eth_xstat_name *xstats_names,
2810 	unsigned int size)
2811 {
2812 	struct rte_eth_dev *dev;
2813 	int cnt_used_entries;
2814 	int cnt_expected_entries;
2815 	int cnt_driver_entries;
2816 
2817 	cnt_expected_entries = eth_dev_get_xstats_count(port_id);
2818 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
2819 			(int)size < cnt_expected_entries)
2820 		return cnt_expected_entries;
2821 
2822 	/* port_id checked in eth_dev_get_xstats_count() */
2823 	dev = &rte_eth_devices[port_id];
2824 
2825 	cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
2826 
2827 	if (dev->dev_ops->xstats_get_names != NULL) {
2828 		/* If there are any driver-specific xstats, append them
2829 		 * to end of list.
2830 		 */
2831 		cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2832 			dev,
2833 			xstats_names + cnt_used_entries,
2834 			size - cnt_used_entries);
2835 		if (cnt_driver_entries < 0)
2836 			return eth_err(port_id, cnt_driver_entries);
2837 		cnt_used_entries += cnt_driver_entries;
2838 	}
2839 
2840 	return cnt_used_entries;
2841 }
2842 
2843 
2844 static int
2845 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2846 {
2847 	struct rte_eth_dev *dev;
2848 	struct rte_eth_stats eth_stats;
2849 	unsigned int count = 0, i, q;
2850 	uint64_t val, *stats_ptr;
2851 	uint16_t nb_rxqs, nb_txqs;
2852 	int ret;
2853 
2854 	ret = rte_eth_stats_get(port_id, &eth_stats);
2855 	if (ret < 0)
2856 		return ret;
2857 
2858 	dev = &rte_eth_devices[port_id];
2859 
2860 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2861 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2862 
2863 	/* global stats */
2864 	for (i = 0; i < RTE_NB_STATS; i++) {
2865 		stats_ptr = RTE_PTR_ADD(&eth_stats,
2866 					eth_dev_stats_strings[i].offset);
2867 		val = *stats_ptr;
2868 		xstats[count++].value = val;
2869 	}
2870 
2871 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2872 		return count;
2873 
2874 	/* per-rxq stats */
2875 	for (q = 0; q < nb_rxqs; q++) {
2876 		for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2877 			stats_ptr = RTE_PTR_ADD(&eth_stats,
2878 					eth_dev_rxq_stats_strings[i].offset +
2879 					q * sizeof(uint64_t));
2880 			val = *stats_ptr;
2881 			xstats[count++].value = val;
2882 		}
2883 	}
2884 
2885 	/* per-txq stats */
2886 	for (q = 0; q < nb_txqs; q++) {
2887 		for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2888 			stats_ptr = RTE_PTR_ADD(&eth_stats,
2889 					eth_dev_txq_stats_strings[i].offset +
2890 					q * sizeof(uint64_t));
2891 			val = *stats_ptr;
2892 			xstats[count++].value = val;
2893 		}
2894 	}
2895 	return count;
2896 }
2897 
2898 /* retrieve ethdev extended statistics */
2899 int
2900 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2901 			 uint64_t *values, unsigned int size)
2902 {
2903 	unsigned int no_basic_stat_requested = 1;
2904 	unsigned int no_ext_stat_requested = 1;
2905 	unsigned int num_xstats_filled;
2906 	unsigned int basic_count;
2907 	uint16_t expected_entries;
2908 	struct rte_eth_dev *dev;
2909 	unsigned int i;
2910 	int ret;
2911 
2912 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2913 	dev = &rte_eth_devices[port_id];
2914 
2915 	ret = eth_dev_get_xstats_count(port_id);
2916 	if (ret < 0)
2917 		return ret;
2918 	expected_entries = (uint16_t)ret;
2919 	struct rte_eth_xstat xstats[expected_entries];
2920 	basic_count = eth_dev_get_xstats_basic_count(dev);
2921 
2922 	/* Return max number of stats if no ids given */
2923 	if (!ids) {
2924 		if (!values)
2925 			return expected_entries;
2926 		else if (values && size < expected_entries)
2927 			return expected_entries;
2928 	}
2929 
2930 	if (ids && !values)
2931 		return -EINVAL;
2932 
2933 	if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2934 		unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
2935 		uint64_t ids_copy[size];
2936 
2937 		for (i = 0; i < size; i++) {
2938 			if (ids[i] < basic_count) {
2939 				no_basic_stat_requested = 0;
2940 				break;
2941 			}
2942 
2943 			/*
2944 			 * Convert ids to xstats ids that PMD knows.
2945 			 * ids known by user are basic + extended stats.
2946 			 */
2947 			ids_copy[i] = ids[i] - basic_count;
2948 		}
2949 
2950 		if (no_basic_stat_requested)
2951 			return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2952 					values, size);
2953 	}
2954 
2955 	if (ids) {
2956 		for (i = 0; i < size; i++) {
2957 			if (ids[i] >= basic_count) {
2958 				no_ext_stat_requested = 0;
2959 				break;
2960 			}
2961 		}
2962 	}
2963 
2964 	/* Fill the xstats structure */
2965 	if (ids && no_ext_stat_requested)
2966 		ret = eth_basic_stats_get(port_id, xstats);
2967 	else
2968 		ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2969 
2970 	if (ret < 0)
2971 		return ret;
2972 	num_xstats_filled = (unsigned int)ret;
2973 
2974 	/* Return all stats */
2975 	if (!ids) {
2976 		for (i = 0; i < num_xstats_filled; i++)
2977 			values[i] = xstats[i].value;
2978 		return expected_entries;
2979 	}
2980 
2981 	/* Filter stats */
2982 	for (i = 0; i < size; i++) {
2983 		if (ids[i] >= expected_entries) {
2984 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2985 			return -1;
2986 		}
2987 		values[i] = xstats[ids[i]].value;
2988 	}
2989 	return size;
2990 }
2991 
2992 int
2993 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2994 	unsigned int n)
2995 {
2996 	struct rte_eth_dev *dev;
2997 	unsigned int count, i;
2998 	signed int xcount = 0;
2999 	int ret;
3000 
3001 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3002 	if (xstats == NULL && n > 0)
3003 		return -EINVAL;
3004 	dev = &rte_eth_devices[port_id];
3005 
3006 	count = eth_dev_get_xstats_basic_count(dev);
3007 
3008 	/* implemented by the driver */
3009 	if (dev->dev_ops->xstats_get != NULL) {
3010 		/* Retrieve the xstats from the driver at the end of the
3011 		 * xstats struct.
3012 		 */
3013 		xcount = (*dev->dev_ops->xstats_get)(dev,
3014 				     (n > count) ? xstats + count : NULL,
3015 				     (n > count) ? n - count : 0);
3016 
3017 		if (xcount < 0)
3018 			return eth_err(port_id, xcount);
3019 	}
3020 
3021 	if (n < count + xcount || xstats == NULL)
3022 		return count + xcount;
3023 
3024 	/* now fill the xstats structure */
3025 	ret = eth_basic_stats_get(port_id, xstats);
3026 	if (ret < 0)
3027 		return ret;
3028 	count = ret;
3029 
3030 	for (i = 0; i < count; i++)
3031 		xstats[i].id = i;
3032 	/* add an offset to driver-specific stats */
3033 	for ( ; i < count + xcount; i++)
3034 		xstats[i].id += count;
3035 
3036 	return count + xcount;
3037 }
3038 
3039 /* reset ethdev extended statistics */
3040 int
3041 rte_eth_xstats_reset(uint16_t port_id)
3042 {
3043 	struct rte_eth_dev *dev;
3044 
3045 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3046 	dev = &rte_eth_devices[port_id];
3047 
3048 	/* implemented by the driver */
3049 	if (dev->dev_ops->xstats_reset != NULL)
3050 		return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3051 
3052 	/* fallback to default */
3053 	return rte_eth_stats_reset(port_id);
3054 }
3055 
3056 static int
3057 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3058 		uint8_t stat_idx, uint8_t is_rx)
3059 {
3060 	struct rte_eth_dev *dev;
3061 
3062 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3063 	dev = &rte_eth_devices[port_id];
3064 
3065 	if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3066 		return -EINVAL;
3067 
3068 	if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3069 		return -EINVAL;
3070 
3071 	if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3072 		return -EINVAL;
3073 
3074 	if (*dev->dev_ops->queue_stats_mapping_set == NULL)
3075 		return -ENOTSUP;
3076 	return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3077 }
3078 
3079 int
3080 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3081 		uint8_t stat_idx)
3082 {
3083 	return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3084 						tx_queue_id,
3085 						stat_idx, STAT_QMAP_TX));
3086 }
3087 
3088 int
3089 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3090 		uint8_t stat_idx)
3091 {
3092 	return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3093 						rx_queue_id,
3094 						stat_idx, STAT_QMAP_RX));
3095 }
3096 
3097 int
3098 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3099 {
3100 	struct rte_eth_dev *dev;
3101 
3102 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3103 	dev = &rte_eth_devices[port_id];
3104 
3105 	if (fw_version == NULL && fw_size > 0) {
3106 		RTE_ETHDEV_LOG(ERR,
3107 			"Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3108 			port_id);
3109 		return -EINVAL;
3110 	}
3111 
3112 	if (*dev->dev_ops->fw_version_get == NULL)
3113 		return -ENOTSUP;
3114 	return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3115 							fw_version, fw_size));
3116 }
3117 
3118 int
3119 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3120 {
3121 	struct rte_eth_dev *dev;
3122 	const struct rte_eth_desc_lim lim = {
3123 		.nb_max = UINT16_MAX,
3124 		.nb_min = 0,
3125 		.nb_align = 1,
3126 		.nb_seg_max = UINT16_MAX,
3127 		.nb_mtu_seg_max = UINT16_MAX,
3128 	};
3129 	int diag;
3130 
3131 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3132 	dev = &rte_eth_devices[port_id];
3133 
3134 	if (dev_info == NULL) {
3135 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3136 			port_id);
3137 		return -EINVAL;
3138 	}
3139 
3140 	/*
3141 	 * Init dev_info before port_id check since caller does not have
3142 	 * return status and does not know if get is successful or not.
3143 	 */
3144 	memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3145 	dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3146 
3147 	dev_info->rx_desc_lim = lim;
3148 	dev_info->tx_desc_lim = lim;
3149 	dev_info->device = dev->device;
3150 	dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
3151 		RTE_ETHER_CRC_LEN;
3152 	dev_info->max_mtu = UINT16_MAX;
3153 
3154 	if (*dev->dev_ops->dev_infos_get == NULL)
3155 		return -ENOTSUP;
3156 	diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3157 	if (diag != 0) {
3158 		/* Cleanup already filled in device information */
3159 		memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3160 		return eth_err(port_id, diag);
3161 	}
3162 
3163 	/* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3164 	dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3165 			RTE_MAX_QUEUES_PER_PORT);
3166 	dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3167 			RTE_MAX_QUEUES_PER_PORT);
3168 
3169 	dev_info->driver_name = dev->device->driver->name;
3170 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3171 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3172 
3173 	dev_info->dev_flags = &dev->data->dev_flags;
3174 
3175 	return 0;
3176 }
3177 
3178 int
3179 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3180 {
3181 	struct rte_eth_dev *dev;
3182 
3183 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3184 	dev = &rte_eth_devices[port_id];
3185 
3186 	if (dev_conf == NULL) {
3187 		RTE_ETHDEV_LOG(ERR,
3188 			"Cannot get ethdev port %u configuration to NULL\n",
3189 			port_id);
3190 		return -EINVAL;
3191 	}
3192 
3193 	memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf));
3194 
3195 	return 0;
3196 }
3197 
3198 int
3199 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3200 				 uint32_t *ptypes, int num)
3201 {
3202 	int i, j;
3203 	struct rte_eth_dev *dev;
3204 	const uint32_t *all_ptypes;
3205 
3206 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3207 	dev = &rte_eth_devices[port_id];
3208 
3209 	if (ptypes == NULL && num > 0) {
3210 		RTE_ETHDEV_LOG(ERR,
3211 			"Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3212 			port_id);
3213 		return -EINVAL;
3214 	}
3215 
3216 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL)
3217 		return 0;
3218 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3219 
3220 	if (!all_ptypes)
3221 		return 0;
3222 
3223 	for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3224 		if (all_ptypes[i] & ptype_mask) {
3225 			if (j < num)
3226 				ptypes[j] = all_ptypes[i];
3227 			j++;
3228 		}
3229 
3230 	return j;
3231 }
3232 
3233 int
3234 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3235 				 uint32_t *set_ptypes, unsigned int num)
3236 {
3237 	const uint32_t valid_ptype_masks[] = {
3238 		RTE_PTYPE_L2_MASK,
3239 		RTE_PTYPE_L3_MASK,
3240 		RTE_PTYPE_L4_MASK,
3241 		RTE_PTYPE_TUNNEL_MASK,
3242 		RTE_PTYPE_INNER_L2_MASK,
3243 		RTE_PTYPE_INNER_L3_MASK,
3244 		RTE_PTYPE_INNER_L4_MASK,
3245 	};
3246 	const uint32_t *all_ptypes;
3247 	struct rte_eth_dev *dev;
3248 	uint32_t unused_mask;
3249 	unsigned int i, j;
3250 	int ret;
3251 
3252 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3253 	dev = &rte_eth_devices[port_id];
3254 
3255 	if (num > 0 && set_ptypes == NULL) {
3256 		RTE_ETHDEV_LOG(ERR,
3257 			"Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3258 			port_id);
3259 		return -EINVAL;
3260 	}
3261 
3262 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3263 			*dev->dev_ops->dev_ptypes_set == NULL) {
3264 		ret = 0;
3265 		goto ptype_unknown;
3266 	}
3267 
3268 	if (ptype_mask == 0) {
3269 		ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3270 				ptype_mask);
3271 		goto ptype_unknown;
3272 	}
3273 
3274 	unused_mask = ptype_mask;
3275 	for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3276 		uint32_t mask = ptype_mask & valid_ptype_masks[i];
3277 		if (mask && mask != valid_ptype_masks[i]) {
3278 			ret = -EINVAL;
3279 			goto ptype_unknown;
3280 		}
3281 		unused_mask &= ~valid_ptype_masks[i];
3282 	}
3283 
3284 	if (unused_mask) {
3285 		ret = -EINVAL;
3286 		goto ptype_unknown;
3287 	}
3288 
3289 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3290 	if (all_ptypes == NULL) {
3291 		ret = 0;
3292 		goto ptype_unknown;
3293 	}
3294 
3295 	/*
3296 	 * Accommodate as many set_ptypes as possible. If the supplied
3297 	 * set_ptypes array is insufficient fill it partially.
3298 	 */
3299 	for (i = 0, j = 0; set_ptypes != NULL &&
3300 				(all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3301 		if (ptype_mask & all_ptypes[i]) {
3302 			if (j < num - 1) {
3303 				set_ptypes[j] = all_ptypes[i];
3304 				j++;
3305 				continue;
3306 			}
3307 			break;
3308 		}
3309 	}
3310 
3311 	if (set_ptypes != NULL && j < num)
3312 		set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3313 
3314 	return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3315 
3316 ptype_unknown:
3317 	if (num > 0)
3318 		set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3319 
3320 	return ret;
3321 }
3322 
3323 int
3324 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3325 	unsigned int num)
3326 {
3327 	int32_t ret;
3328 	struct rte_eth_dev *dev;
3329 	struct rte_eth_dev_info dev_info;
3330 
3331 	if (ma == NULL) {
3332 		RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__);
3333 		return -EINVAL;
3334 	}
3335 
3336 	/* will check for us that port_id is a valid one */
3337 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3338 	if (ret != 0)
3339 		return ret;
3340 
3341 	dev = &rte_eth_devices[port_id];
3342 	num = RTE_MIN(dev_info.max_mac_addrs, num);
3343 	memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0]));
3344 
3345 	return num;
3346 }
3347 
3348 int
3349 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3350 {
3351 	struct rte_eth_dev *dev;
3352 
3353 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3354 	dev = &rte_eth_devices[port_id];
3355 
3356 	if (mac_addr == NULL) {
3357 		RTE_ETHDEV_LOG(ERR,
3358 			"Cannot get ethdev port %u MAC address to NULL\n",
3359 			port_id);
3360 		return -EINVAL;
3361 	}
3362 
3363 	rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3364 
3365 	return 0;
3366 }
3367 
3368 int
3369 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3370 {
3371 	struct rte_eth_dev *dev;
3372 
3373 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3374 	dev = &rte_eth_devices[port_id];
3375 
3376 	if (mtu == NULL) {
3377 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
3378 			port_id);
3379 		return -EINVAL;
3380 	}
3381 
3382 	*mtu = dev->data->mtu;
3383 	return 0;
3384 }
3385 
3386 int
3387 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3388 {
3389 	int ret;
3390 	struct rte_eth_dev_info dev_info;
3391 	struct rte_eth_dev *dev;
3392 
3393 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3394 	dev = &rte_eth_devices[port_id];
3395 	if (*dev->dev_ops->mtu_set == NULL)
3396 		return -ENOTSUP;
3397 
3398 	/*
3399 	 * Check if the device supports dev_infos_get, if it does not
3400 	 * skip min_mtu/max_mtu validation here as this requires values
3401 	 * that are populated within the call to rte_eth_dev_info_get()
3402 	 * which relies on dev->dev_ops->dev_infos_get.
3403 	 */
3404 	if (*dev->dev_ops->dev_infos_get != NULL) {
3405 		ret = rte_eth_dev_info_get(port_id, &dev_info);
3406 		if (ret != 0)
3407 			return ret;
3408 
3409 		ret = eth_dev_validate_mtu(port_id, &dev_info, mtu);
3410 		if (ret != 0)
3411 			return ret;
3412 	}
3413 
3414 	if (dev->data->dev_configured == 0) {
3415 		RTE_ETHDEV_LOG(ERR,
3416 			"Port %u must be configured before MTU set\n",
3417 			port_id);
3418 		return -EINVAL;
3419 	}
3420 
3421 	ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3422 	if (ret == 0)
3423 		dev->data->mtu = mtu;
3424 
3425 	return eth_err(port_id, ret);
3426 }
3427 
3428 int
3429 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3430 {
3431 	struct rte_eth_dev *dev;
3432 	int ret;
3433 
3434 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3435 	dev = &rte_eth_devices[port_id];
3436 
3437 	if (!(dev->data->dev_conf.rxmode.offloads &
3438 	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
3439 		RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
3440 			port_id);
3441 		return -ENOSYS;
3442 	}
3443 
3444 	if (vlan_id > 4095) {
3445 		RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3446 			port_id, vlan_id);
3447 		return -EINVAL;
3448 	}
3449 	if (*dev->dev_ops->vlan_filter_set == NULL)
3450 		return -ENOTSUP;
3451 
3452 	ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3453 	if (ret == 0) {
3454 		struct rte_vlan_filter_conf *vfc;
3455 		int vidx;
3456 		int vbit;
3457 
3458 		vfc = &dev->data->vlan_filter_conf;
3459 		vidx = vlan_id / 64;
3460 		vbit = vlan_id % 64;
3461 
3462 		if (on)
3463 			vfc->ids[vidx] |= RTE_BIT64(vbit);
3464 		else
3465 			vfc->ids[vidx] &= ~RTE_BIT64(vbit);
3466 	}
3467 
3468 	return eth_err(port_id, ret);
3469 }
3470 
3471 int
3472 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3473 				    int on)
3474 {
3475 	struct rte_eth_dev *dev;
3476 
3477 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3478 	dev = &rte_eth_devices[port_id];
3479 
3480 	if (rx_queue_id >= dev->data->nb_rx_queues) {
3481 		RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3482 		return -EINVAL;
3483 	}
3484 
3485 	if (*dev->dev_ops->vlan_strip_queue_set == NULL)
3486 		return -ENOTSUP;
3487 	(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3488 
3489 	return 0;
3490 }
3491 
3492 int
3493 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3494 				enum rte_vlan_type vlan_type,
3495 				uint16_t tpid)
3496 {
3497 	struct rte_eth_dev *dev;
3498 
3499 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3500 	dev = &rte_eth_devices[port_id];
3501 
3502 	if (*dev->dev_ops->vlan_tpid_set == NULL)
3503 		return -ENOTSUP;
3504 	return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3505 							       tpid));
3506 }
3507 
3508 int
3509 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3510 {
3511 	struct rte_eth_dev_info dev_info;
3512 	struct rte_eth_dev *dev;
3513 	int ret = 0;
3514 	int mask = 0;
3515 	int cur, org = 0;
3516 	uint64_t orig_offloads;
3517 	uint64_t dev_offloads;
3518 	uint64_t new_offloads;
3519 
3520 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3521 	dev = &rte_eth_devices[port_id];
3522 
3523 	/* save original values in case of failure */
3524 	orig_offloads = dev->data->dev_conf.rxmode.offloads;
3525 	dev_offloads = orig_offloads;
3526 
3527 	/* check which option changed by application */
3528 	cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
3529 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
3530 	if (cur != org) {
3531 		if (cur)
3532 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3533 		else
3534 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3535 		mask |= RTE_ETH_VLAN_STRIP_MASK;
3536 	}
3537 
3538 	cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
3539 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
3540 	if (cur != org) {
3541 		if (cur)
3542 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3543 		else
3544 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3545 		mask |= RTE_ETH_VLAN_FILTER_MASK;
3546 	}
3547 
3548 	cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
3549 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
3550 	if (cur != org) {
3551 		if (cur)
3552 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3553 		else
3554 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3555 		mask |= RTE_ETH_VLAN_EXTEND_MASK;
3556 	}
3557 
3558 	cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
3559 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
3560 	if (cur != org) {
3561 		if (cur)
3562 			dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3563 		else
3564 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3565 		mask |= RTE_ETH_QINQ_STRIP_MASK;
3566 	}
3567 
3568 	/*no change*/
3569 	if (mask == 0)
3570 		return ret;
3571 
3572 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3573 	if (ret != 0)
3574 		return ret;
3575 
3576 	/* Rx VLAN offloading must be within its device capabilities */
3577 	if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3578 		new_offloads = dev_offloads & ~orig_offloads;
3579 		RTE_ETHDEV_LOG(ERR,
3580 			"Ethdev port_id=%u requested new added VLAN offloads "
3581 			"0x%" PRIx64 " must be within Rx offloads capabilities "
3582 			"0x%" PRIx64 " in %s()\n",
3583 			port_id, new_offloads, dev_info.rx_offload_capa,
3584 			__func__);
3585 		return -EINVAL;
3586 	}
3587 
3588 	if (*dev->dev_ops->vlan_offload_set == NULL)
3589 		return -ENOTSUP;
3590 	dev->data->dev_conf.rxmode.offloads = dev_offloads;
3591 	ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3592 	if (ret) {
3593 		/* hit an error restore  original values */
3594 		dev->data->dev_conf.rxmode.offloads = orig_offloads;
3595 	}
3596 
3597 	return eth_err(port_id, ret);
3598 }
3599 
3600 int
3601 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3602 {
3603 	struct rte_eth_dev *dev;
3604 	uint64_t *dev_offloads;
3605 	int ret = 0;
3606 
3607 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3608 	dev = &rte_eth_devices[port_id];
3609 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3610 
3611 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
3612 		ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
3613 
3614 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
3615 		ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
3616 
3617 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
3618 		ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
3619 
3620 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
3621 		ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
3622 
3623 	return ret;
3624 }
3625 
3626 int
3627 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3628 {
3629 	struct rte_eth_dev *dev;
3630 
3631 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3632 	dev = &rte_eth_devices[port_id];
3633 
3634 	if (*dev->dev_ops->vlan_pvid_set == NULL)
3635 		return -ENOTSUP;
3636 	return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3637 }
3638 
3639 int
3640 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3641 {
3642 	struct rte_eth_dev *dev;
3643 
3644 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3645 	dev = &rte_eth_devices[port_id];
3646 
3647 	if (fc_conf == NULL) {
3648 		RTE_ETHDEV_LOG(ERR,
3649 			"Cannot get ethdev port %u flow control config to NULL\n",
3650 			port_id);
3651 		return -EINVAL;
3652 	}
3653 
3654 	if (*dev->dev_ops->flow_ctrl_get == NULL)
3655 		return -ENOTSUP;
3656 	memset(fc_conf, 0, sizeof(*fc_conf));
3657 	return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3658 }
3659 
3660 int
3661 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3662 {
3663 	struct rte_eth_dev *dev;
3664 
3665 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3666 	dev = &rte_eth_devices[port_id];
3667 
3668 	if (fc_conf == NULL) {
3669 		RTE_ETHDEV_LOG(ERR,
3670 			"Cannot set ethdev port %u flow control from NULL config\n",
3671 			port_id);
3672 		return -EINVAL;
3673 	}
3674 
3675 	if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3676 		RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3677 		return -EINVAL;
3678 	}
3679 
3680 	if (*dev->dev_ops->flow_ctrl_set == NULL)
3681 		return -ENOTSUP;
3682 	return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3683 }
3684 
3685 int
3686 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3687 				   struct rte_eth_pfc_conf *pfc_conf)
3688 {
3689 	struct rte_eth_dev *dev;
3690 
3691 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3692 	dev = &rte_eth_devices[port_id];
3693 
3694 	if (pfc_conf == NULL) {
3695 		RTE_ETHDEV_LOG(ERR,
3696 			"Cannot set ethdev port %u priority flow control from NULL config\n",
3697 			port_id);
3698 		return -EINVAL;
3699 	}
3700 
3701 	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3702 		RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3703 		return -EINVAL;
3704 	}
3705 
3706 	/* High water, low water validation are device specific */
3707 	if  (*dev->dev_ops->priority_flow_ctrl_set)
3708 		return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3709 					(dev, pfc_conf));
3710 	return -ENOTSUP;
3711 }
3712 
3713 static int
3714 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
3715 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3716 {
3717 	if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) ||
3718 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
3719 		if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) {
3720 			RTE_ETHDEV_LOG(ERR,
3721 				"PFC Tx queue not in range for Rx pause requested:%d configured:%d\n",
3722 				pfc_queue_conf->rx_pause.tx_qid,
3723 				dev_info->nb_tx_queues);
3724 			return -EINVAL;
3725 		}
3726 
3727 		if (pfc_queue_conf->rx_pause.tc >= tc_max) {
3728 			RTE_ETHDEV_LOG(ERR,
3729 				"PFC TC not in range for Rx pause requested:%d max:%d\n",
3730 				pfc_queue_conf->rx_pause.tc, tc_max);
3731 			return -EINVAL;
3732 		}
3733 	}
3734 
3735 	return 0;
3736 }
3737 
3738 static int
3739 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
3740 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3741 {
3742 	if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) ||
3743 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
3744 		if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) {
3745 			RTE_ETHDEV_LOG(ERR,
3746 				"PFC Rx queue not in range for Tx pause requested:%d configured:%d\n",
3747 				pfc_queue_conf->tx_pause.rx_qid,
3748 				dev_info->nb_rx_queues);
3749 			return -EINVAL;
3750 		}
3751 
3752 		if (pfc_queue_conf->tx_pause.tc >= tc_max) {
3753 			RTE_ETHDEV_LOG(ERR,
3754 				"PFC TC not in range for Tx pause requested:%d max:%d\n",
3755 				pfc_queue_conf->tx_pause.tc, tc_max);
3756 			return -EINVAL;
3757 		}
3758 	}
3759 
3760 	return 0;
3761 }
3762 
3763 int
3764 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
3765 		struct rte_eth_pfc_queue_info *pfc_queue_info)
3766 {
3767 	struct rte_eth_dev *dev;
3768 
3769 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3770 	dev = &rte_eth_devices[port_id];
3771 
3772 	if (pfc_queue_info == NULL) {
3773 		RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n",
3774 			port_id);
3775 		return -EINVAL;
3776 	}
3777 
3778 	if (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
3779 		return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
3780 			(dev, pfc_queue_info));
3781 	return -ENOTSUP;
3782 }
3783 
3784 int
3785 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
3786 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3787 {
3788 	struct rte_eth_pfc_queue_info pfc_info;
3789 	struct rte_eth_dev_info dev_info;
3790 	struct rte_eth_dev *dev;
3791 	int ret;
3792 
3793 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3794 	dev = &rte_eth_devices[port_id];
3795 
3796 	if (pfc_queue_conf == NULL) {
3797 		RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n",
3798 			port_id);
3799 		return -EINVAL;
3800 	}
3801 
3802 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3803 	if (ret != 0)
3804 		return ret;
3805 
3806 	ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info);
3807 	if (ret != 0)
3808 		return ret;
3809 
3810 	if (pfc_info.tc_max == 0) {
3811 		RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n",
3812 			port_id);
3813 		return -ENOTSUP;
3814 	}
3815 
3816 	/* Check requested mode supported or not */
3817 	if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE &&
3818 			pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) {
3819 		RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n",
3820 			port_id);
3821 		return -EINVAL;
3822 	}
3823 
3824 	if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE &&
3825 			pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) {
3826 		RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n",
3827 			port_id);
3828 		return -EINVAL;
3829 	}
3830 
3831 	/* Validate Rx pause parameters */
3832 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
3833 			pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) {
3834 		ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max,
3835 				pfc_queue_conf);
3836 		if (ret != 0)
3837 			return ret;
3838 	}
3839 
3840 	/* Validate Tx pause parameters */
3841 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
3842 			pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) {
3843 		ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max,
3844 				pfc_queue_conf);
3845 		if (ret != 0)
3846 			return ret;
3847 	}
3848 
3849 	if (*dev->dev_ops->priority_flow_ctrl_queue_config)
3850 		return eth_err(port_id,
3851 			       (*dev->dev_ops->priority_flow_ctrl_queue_config)(
3852 				dev, pfc_queue_conf));
3853 	return -ENOTSUP;
3854 }
3855 
3856 static int
3857 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3858 			uint16_t reta_size)
3859 {
3860 	uint16_t i, num;
3861 
3862 	num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
3863 	for (i = 0; i < num; i++) {
3864 		if (reta_conf[i].mask)
3865 			return 0;
3866 	}
3867 
3868 	return -EINVAL;
3869 }
3870 
3871 static int
3872 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3873 			 uint16_t reta_size,
3874 			 uint16_t max_rxq)
3875 {
3876 	uint16_t i, idx, shift;
3877 
3878 	if (max_rxq == 0) {
3879 		RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3880 		return -EINVAL;
3881 	}
3882 
3883 	for (i = 0; i < reta_size; i++) {
3884 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
3885 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
3886 		if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
3887 			(reta_conf[idx].reta[shift] >= max_rxq)) {
3888 			RTE_ETHDEV_LOG(ERR,
3889 				"reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3890 				idx, shift,
3891 				reta_conf[idx].reta[shift], max_rxq);
3892 			return -EINVAL;
3893 		}
3894 	}
3895 
3896 	return 0;
3897 }
3898 
3899 int
3900 rte_eth_dev_rss_reta_update(uint16_t port_id,
3901 			    struct rte_eth_rss_reta_entry64 *reta_conf,
3902 			    uint16_t reta_size)
3903 {
3904 	enum rte_eth_rx_mq_mode mq_mode;
3905 	struct rte_eth_dev *dev;
3906 	int ret;
3907 
3908 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3909 	dev = &rte_eth_devices[port_id];
3910 
3911 	if (reta_conf == NULL) {
3912 		RTE_ETHDEV_LOG(ERR,
3913 			"Cannot update ethdev port %u RSS RETA to NULL\n",
3914 			port_id);
3915 		return -EINVAL;
3916 	}
3917 
3918 	if (reta_size == 0) {
3919 		RTE_ETHDEV_LOG(ERR,
3920 			"Cannot update ethdev port %u RSS RETA with zero size\n",
3921 			port_id);
3922 		return -EINVAL;
3923 	}
3924 
3925 	/* Check mask bits */
3926 	ret = eth_check_reta_mask(reta_conf, reta_size);
3927 	if (ret < 0)
3928 		return ret;
3929 
3930 	/* Check entry value */
3931 	ret = eth_check_reta_entry(reta_conf, reta_size,
3932 				dev->data->nb_rx_queues);
3933 	if (ret < 0)
3934 		return ret;
3935 
3936 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
3937 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
3938 		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
3939 		return -ENOTSUP;
3940 	}
3941 
3942 	if (*dev->dev_ops->reta_update == NULL)
3943 		return -ENOTSUP;
3944 	return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3945 							     reta_size));
3946 }
3947 
3948 int
3949 rte_eth_dev_rss_reta_query(uint16_t port_id,
3950 			   struct rte_eth_rss_reta_entry64 *reta_conf,
3951 			   uint16_t reta_size)
3952 {
3953 	struct rte_eth_dev *dev;
3954 	int ret;
3955 
3956 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3957 	dev = &rte_eth_devices[port_id];
3958 
3959 	if (reta_conf == NULL) {
3960 		RTE_ETHDEV_LOG(ERR,
3961 			"Cannot query ethdev port %u RSS RETA from NULL config\n",
3962 			port_id);
3963 		return -EINVAL;
3964 	}
3965 
3966 	/* Check mask bits */
3967 	ret = eth_check_reta_mask(reta_conf, reta_size);
3968 	if (ret < 0)
3969 		return ret;
3970 
3971 	if (*dev->dev_ops->reta_query == NULL)
3972 		return -ENOTSUP;
3973 	return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3974 							    reta_size));
3975 }
3976 
3977 int
3978 rte_eth_dev_rss_hash_update(uint16_t port_id,
3979 			    struct rte_eth_rss_conf *rss_conf)
3980 {
3981 	struct rte_eth_dev *dev;
3982 	struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3983 	enum rte_eth_rx_mq_mode mq_mode;
3984 	int ret;
3985 
3986 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3987 	dev = &rte_eth_devices[port_id];
3988 
3989 	if (rss_conf == NULL) {
3990 		RTE_ETHDEV_LOG(ERR,
3991 			"Cannot update ethdev port %u RSS hash from NULL config\n",
3992 			port_id);
3993 		return -EINVAL;
3994 	}
3995 
3996 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3997 	if (ret != 0)
3998 		return ret;
3999 
4000 	rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
4001 	if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
4002 	    dev_info.flow_type_rss_offloads) {
4003 		RTE_ETHDEV_LOG(ERR,
4004 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
4005 			port_id, rss_conf->rss_hf,
4006 			dev_info.flow_type_rss_offloads);
4007 		return -EINVAL;
4008 	}
4009 
4010 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4011 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
4012 		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
4013 		return -ENOTSUP;
4014 	}
4015 
4016 	if (*dev->dev_ops->rss_hash_update == NULL)
4017 		return -ENOTSUP;
4018 	return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4019 								 rss_conf));
4020 }
4021 
4022 int
4023 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4024 			      struct rte_eth_rss_conf *rss_conf)
4025 {
4026 	struct rte_eth_dev *dev;
4027 
4028 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4029 	dev = &rte_eth_devices[port_id];
4030 
4031 	if (rss_conf == NULL) {
4032 		RTE_ETHDEV_LOG(ERR,
4033 			"Cannot get ethdev port %u RSS hash config to NULL\n",
4034 			port_id);
4035 		return -EINVAL;
4036 	}
4037 
4038 	if (*dev->dev_ops->rss_hash_conf_get == NULL)
4039 		return -ENOTSUP;
4040 	return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4041 								   rss_conf));
4042 }
4043 
4044 int
4045 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4046 				struct rte_eth_udp_tunnel *udp_tunnel)
4047 {
4048 	struct rte_eth_dev *dev;
4049 
4050 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4051 	dev = &rte_eth_devices[port_id];
4052 
4053 	if (udp_tunnel == NULL) {
4054 		RTE_ETHDEV_LOG(ERR,
4055 			"Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4056 			port_id);
4057 		return -EINVAL;
4058 	}
4059 
4060 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4061 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4062 		return -EINVAL;
4063 	}
4064 
4065 	if (*dev->dev_ops->udp_tunnel_port_add == NULL)
4066 		return -ENOTSUP;
4067 	return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4068 								udp_tunnel));
4069 }
4070 
4071 int
4072 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4073 				   struct rte_eth_udp_tunnel *udp_tunnel)
4074 {
4075 	struct rte_eth_dev *dev;
4076 
4077 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4078 	dev = &rte_eth_devices[port_id];
4079 
4080 	if (udp_tunnel == NULL) {
4081 		RTE_ETHDEV_LOG(ERR,
4082 			"Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4083 			port_id);
4084 		return -EINVAL;
4085 	}
4086 
4087 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4088 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4089 		return -EINVAL;
4090 	}
4091 
4092 	if (*dev->dev_ops->udp_tunnel_port_del == NULL)
4093 		return -ENOTSUP;
4094 	return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4095 								udp_tunnel));
4096 }
4097 
4098 int
4099 rte_eth_led_on(uint16_t port_id)
4100 {
4101 	struct rte_eth_dev *dev;
4102 
4103 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4104 	dev = &rte_eth_devices[port_id];
4105 
4106 	if (*dev->dev_ops->dev_led_on == NULL)
4107 		return -ENOTSUP;
4108 	return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4109 }
4110 
4111 int
4112 rte_eth_led_off(uint16_t port_id)
4113 {
4114 	struct rte_eth_dev *dev;
4115 
4116 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4117 	dev = &rte_eth_devices[port_id];
4118 
4119 	if (*dev->dev_ops->dev_led_off == NULL)
4120 		return -ENOTSUP;
4121 	return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4122 }
4123 
4124 int
4125 rte_eth_fec_get_capability(uint16_t port_id,
4126 			   struct rte_eth_fec_capa *speed_fec_capa,
4127 			   unsigned int num)
4128 {
4129 	struct rte_eth_dev *dev;
4130 	int ret;
4131 
4132 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4133 	dev = &rte_eth_devices[port_id];
4134 
4135 	if (speed_fec_capa == NULL && num > 0) {
4136 		RTE_ETHDEV_LOG(ERR,
4137 			"Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4138 			port_id);
4139 		return -EINVAL;
4140 	}
4141 
4142 	if (*dev->dev_ops->fec_get_capability == NULL)
4143 		return -ENOTSUP;
4144 	ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4145 
4146 	return ret;
4147 }
4148 
4149 int
4150 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4151 {
4152 	struct rte_eth_dev *dev;
4153 
4154 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4155 	dev = &rte_eth_devices[port_id];
4156 
4157 	if (fec_capa == NULL) {
4158 		RTE_ETHDEV_LOG(ERR,
4159 			"Cannot get ethdev port %u current FEC mode to NULL\n",
4160 			port_id);
4161 		return -EINVAL;
4162 	}
4163 
4164 	if (*dev->dev_ops->fec_get == NULL)
4165 		return -ENOTSUP;
4166 	return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4167 }
4168 
4169 int
4170 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4171 {
4172 	struct rte_eth_dev *dev;
4173 
4174 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4175 	dev = &rte_eth_devices[port_id];
4176 
4177 	if (*dev->dev_ops->fec_set == NULL)
4178 		return -ENOTSUP;
4179 	return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4180 }
4181 
4182 /*
4183  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4184  * an empty spot.
4185  */
4186 static int
4187 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4188 {
4189 	struct rte_eth_dev_info dev_info;
4190 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4191 	unsigned i;
4192 	int ret;
4193 
4194 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4195 	if (ret != 0)
4196 		return -1;
4197 
4198 	for (i = 0; i < dev_info.max_mac_addrs; i++)
4199 		if (memcmp(addr, &dev->data->mac_addrs[i],
4200 				RTE_ETHER_ADDR_LEN) == 0)
4201 			return i;
4202 
4203 	return -1;
4204 }
4205 
4206 static const struct rte_ether_addr null_mac_addr;
4207 
4208 int
4209 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4210 			uint32_t pool)
4211 {
4212 	struct rte_eth_dev *dev;
4213 	int index;
4214 	uint64_t pool_mask;
4215 	int ret;
4216 
4217 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4218 	dev = &rte_eth_devices[port_id];
4219 
4220 	if (addr == NULL) {
4221 		RTE_ETHDEV_LOG(ERR,
4222 			"Cannot add ethdev port %u MAC address from NULL address\n",
4223 			port_id);
4224 		return -EINVAL;
4225 	}
4226 
4227 	if (*dev->dev_ops->mac_addr_add == NULL)
4228 		return -ENOTSUP;
4229 
4230 	if (rte_is_zero_ether_addr(addr)) {
4231 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4232 			port_id);
4233 		return -EINVAL;
4234 	}
4235 	if (pool >= RTE_ETH_64_POOLS) {
4236 		RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1);
4237 		return -EINVAL;
4238 	}
4239 
4240 	index = eth_dev_get_mac_addr_index(port_id, addr);
4241 	if (index < 0) {
4242 		index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4243 		if (index < 0) {
4244 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4245 				port_id);
4246 			return -ENOSPC;
4247 		}
4248 	} else {
4249 		pool_mask = dev->data->mac_pool_sel[index];
4250 
4251 		/* Check if both MAC address and pool is already there, and do nothing */
4252 		if (pool_mask & RTE_BIT64(pool))
4253 			return 0;
4254 	}
4255 
4256 	/* Update NIC */
4257 	ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4258 
4259 	if (ret == 0) {
4260 		/* Update address in NIC data structure */
4261 		rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4262 
4263 		/* Update pool bitmap in NIC data structure */
4264 		dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
4265 	}
4266 
4267 	return eth_err(port_id, ret);
4268 }
4269 
4270 int
4271 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4272 {
4273 	struct rte_eth_dev *dev;
4274 	int index;
4275 
4276 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4277 	dev = &rte_eth_devices[port_id];
4278 
4279 	if (addr == NULL) {
4280 		RTE_ETHDEV_LOG(ERR,
4281 			"Cannot remove ethdev port %u MAC address from NULL address\n",
4282 			port_id);
4283 		return -EINVAL;
4284 	}
4285 
4286 	if (*dev->dev_ops->mac_addr_remove == NULL)
4287 		return -ENOTSUP;
4288 
4289 	index = eth_dev_get_mac_addr_index(port_id, addr);
4290 	if (index == 0) {
4291 		RTE_ETHDEV_LOG(ERR,
4292 			"Port %u: Cannot remove default MAC address\n",
4293 			port_id);
4294 		return -EADDRINUSE;
4295 	} else if (index < 0)
4296 		return 0;  /* Do nothing if address wasn't found */
4297 
4298 	/* Update NIC */
4299 	(*dev->dev_ops->mac_addr_remove)(dev, index);
4300 
4301 	/* Update address in NIC data structure */
4302 	rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4303 
4304 	/* reset pool bitmap */
4305 	dev->data->mac_pool_sel[index] = 0;
4306 
4307 	return 0;
4308 }
4309 
4310 int
4311 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4312 {
4313 	struct rte_eth_dev *dev;
4314 	int ret;
4315 
4316 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4317 	dev = &rte_eth_devices[port_id];
4318 
4319 	if (addr == NULL) {
4320 		RTE_ETHDEV_LOG(ERR,
4321 			"Cannot set ethdev port %u default MAC address from NULL address\n",
4322 			port_id);
4323 		return -EINVAL;
4324 	}
4325 
4326 	if (!rte_is_valid_assigned_ether_addr(addr))
4327 		return -EINVAL;
4328 
4329 	if (*dev->dev_ops->mac_addr_set == NULL)
4330 		return -ENOTSUP;
4331 
4332 	ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4333 	if (ret < 0)
4334 		return ret;
4335 
4336 	/* Update default address in NIC data structure */
4337 	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4338 
4339 	return 0;
4340 }
4341 
4342 
4343 /*
4344  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4345  * an empty spot.
4346  */
4347 static int
4348 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4349 		const struct rte_ether_addr *addr)
4350 {
4351 	struct rte_eth_dev_info dev_info;
4352 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4353 	unsigned i;
4354 	int ret;
4355 
4356 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4357 	if (ret != 0)
4358 		return -1;
4359 
4360 	if (!dev->data->hash_mac_addrs)
4361 		return -1;
4362 
4363 	for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4364 		if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4365 			RTE_ETHER_ADDR_LEN) == 0)
4366 			return i;
4367 
4368 	return -1;
4369 }
4370 
4371 int
4372 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4373 				uint8_t on)
4374 {
4375 	int index;
4376 	int ret;
4377 	struct rte_eth_dev *dev;
4378 
4379 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4380 	dev = &rte_eth_devices[port_id];
4381 
4382 	if (addr == NULL) {
4383 		RTE_ETHDEV_LOG(ERR,
4384 			"Cannot set ethdev port %u unicast hash table from NULL address\n",
4385 			port_id);
4386 		return -EINVAL;
4387 	}
4388 
4389 	if (rte_is_zero_ether_addr(addr)) {
4390 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4391 			port_id);
4392 		return -EINVAL;
4393 	}
4394 
4395 	index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4396 	/* Check if it's already there, and do nothing */
4397 	if ((index >= 0) && on)
4398 		return 0;
4399 
4400 	if (index < 0) {
4401 		if (!on) {
4402 			RTE_ETHDEV_LOG(ERR,
4403 				"Port %u: the MAC address was not set in UTA\n",
4404 				port_id);
4405 			return -EINVAL;
4406 		}
4407 
4408 		index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4409 		if (index < 0) {
4410 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4411 				port_id);
4412 			return -ENOSPC;
4413 		}
4414 	}
4415 
4416 	if (*dev->dev_ops->uc_hash_table_set == NULL)
4417 		return -ENOTSUP;
4418 	ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4419 	if (ret == 0) {
4420 		/* Update address in NIC data structure */
4421 		if (on)
4422 			rte_ether_addr_copy(addr,
4423 					&dev->data->hash_mac_addrs[index]);
4424 		else
4425 			rte_ether_addr_copy(&null_mac_addr,
4426 					&dev->data->hash_mac_addrs[index]);
4427 	}
4428 
4429 	return eth_err(port_id, ret);
4430 }
4431 
4432 int
4433 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4434 {
4435 	struct rte_eth_dev *dev;
4436 
4437 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4438 	dev = &rte_eth_devices[port_id];
4439 
4440 	if (*dev->dev_ops->uc_all_hash_table_set == NULL)
4441 		return -ENOTSUP;
4442 	return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4443 								       on));
4444 }
4445 
4446 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4447 					uint16_t tx_rate)
4448 {
4449 	struct rte_eth_dev *dev;
4450 	struct rte_eth_dev_info dev_info;
4451 	struct rte_eth_link link;
4452 	int ret;
4453 
4454 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4455 	dev = &rte_eth_devices[port_id];
4456 
4457 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4458 	if (ret != 0)
4459 		return ret;
4460 
4461 	link = dev->data->dev_link;
4462 
4463 	if (queue_idx > dev_info.max_tx_queues) {
4464 		RTE_ETHDEV_LOG(ERR,
4465 			"Set queue rate limit:port %u: invalid queue ID=%u\n",
4466 			port_id, queue_idx);
4467 		return -EINVAL;
4468 	}
4469 
4470 	if (tx_rate > link.link_speed) {
4471 		RTE_ETHDEV_LOG(ERR,
4472 			"Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4473 			tx_rate, link.link_speed);
4474 		return -EINVAL;
4475 	}
4476 
4477 	if (*dev->dev_ops->set_queue_rate_limit == NULL)
4478 		return -ENOTSUP;
4479 	return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4480 							queue_idx, tx_rate));
4481 }
4482 
4483 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
4484 			       uint8_t avail_thresh)
4485 {
4486 	struct rte_eth_dev *dev;
4487 
4488 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4489 	dev = &rte_eth_devices[port_id];
4490 
4491 	if (queue_id > dev->data->nb_rx_queues) {
4492 		RTE_ETHDEV_LOG(ERR,
4493 			"Set queue avail thresh: port %u: invalid queue ID=%u.\n",
4494 			port_id, queue_id);
4495 		return -EINVAL;
4496 	}
4497 
4498 	if (avail_thresh > 99) {
4499 		RTE_ETHDEV_LOG(ERR,
4500 			"Set queue avail thresh: port %u: threshold should be <= 99.\n",
4501 			port_id);
4502 		return -EINVAL;
4503 	}
4504 	if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL)
4505 		return -ENOTSUP;
4506 	return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev,
4507 							     queue_id, avail_thresh));
4508 }
4509 
4510 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
4511 				 uint8_t *avail_thresh)
4512 {
4513 	struct rte_eth_dev *dev;
4514 
4515 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4516 	dev = &rte_eth_devices[port_id];
4517 
4518 	if (queue_id == NULL)
4519 		return -EINVAL;
4520 	if (*queue_id >= dev->data->nb_rx_queues)
4521 		*queue_id = 0;
4522 
4523 	if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL)
4524 		return -ENOTSUP;
4525 	return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev,
4526 							     queue_id, avail_thresh));
4527 }
4528 
4529 RTE_INIT(eth_dev_init_fp_ops)
4530 {
4531 	uint32_t i;
4532 
4533 	for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++)
4534 		eth_dev_fp_ops_reset(rte_eth_fp_ops + i);
4535 }
4536 
4537 RTE_INIT(eth_dev_init_cb_lists)
4538 {
4539 	uint16_t i;
4540 
4541 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4542 		TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4543 }
4544 
4545 int
4546 rte_eth_dev_callback_register(uint16_t port_id,
4547 			enum rte_eth_event_type event,
4548 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4549 {
4550 	struct rte_eth_dev *dev;
4551 	struct rte_eth_dev_callback *user_cb;
4552 	uint16_t next_port;
4553 	uint16_t last_port;
4554 
4555 	if (cb_fn == NULL) {
4556 		RTE_ETHDEV_LOG(ERR,
4557 			"Cannot register ethdev port %u callback from NULL\n",
4558 			port_id);
4559 		return -EINVAL;
4560 	}
4561 
4562 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4563 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4564 		return -EINVAL;
4565 	}
4566 
4567 	if (port_id == RTE_ETH_ALL) {
4568 		next_port = 0;
4569 		last_port = RTE_MAX_ETHPORTS - 1;
4570 	} else {
4571 		next_port = last_port = port_id;
4572 	}
4573 
4574 	rte_spinlock_lock(&eth_dev_cb_lock);
4575 
4576 	do {
4577 		dev = &rte_eth_devices[next_port];
4578 
4579 		TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4580 			if (user_cb->cb_fn == cb_fn &&
4581 				user_cb->cb_arg == cb_arg &&
4582 				user_cb->event == event) {
4583 				break;
4584 			}
4585 		}
4586 
4587 		/* create a new callback. */
4588 		if (user_cb == NULL) {
4589 			user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4590 				sizeof(struct rte_eth_dev_callback), 0);
4591 			if (user_cb != NULL) {
4592 				user_cb->cb_fn = cb_fn;
4593 				user_cb->cb_arg = cb_arg;
4594 				user_cb->event = event;
4595 				TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4596 						  user_cb, next);
4597 			} else {
4598 				rte_spinlock_unlock(&eth_dev_cb_lock);
4599 				rte_eth_dev_callback_unregister(port_id, event,
4600 								cb_fn, cb_arg);
4601 				return -ENOMEM;
4602 			}
4603 
4604 		}
4605 	} while (++next_port <= last_port);
4606 
4607 	rte_spinlock_unlock(&eth_dev_cb_lock);
4608 	return 0;
4609 }
4610 
4611 int
4612 rte_eth_dev_callback_unregister(uint16_t port_id,
4613 			enum rte_eth_event_type event,
4614 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4615 {
4616 	int ret;
4617 	struct rte_eth_dev *dev;
4618 	struct rte_eth_dev_callback *cb, *next;
4619 	uint16_t next_port;
4620 	uint16_t last_port;
4621 
4622 	if (cb_fn == NULL) {
4623 		RTE_ETHDEV_LOG(ERR,
4624 			"Cannot unregister ethdev port %u callback from NULL\n",
4625 			port_id);
4626 		return -EINVAL;
4627 	}
4628 
4629 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4630 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4631 		return -EINVAL;
4632 	}
4633 
4634 	if (port_id == RTE_ETH_ALL) {
4635 		next_port = 0;
4636 		last_port = RTE_MAX_ETHPORTS - 1;
4637 	} else {
4638 		next_port = last_port = port_id;
4639 	}
4640 
4641 	rte_spinlock_lock(&eth_dev_cb_lock);
4642 
4643 	do {
4644 		dev = &rte_eth_devices[next_port];
4645 		ret = 0;
4646 		for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4647 		     cb = next) {
4648 
4649 			next = TAILQ_NEXT(cb, next);
4650 
4651 			if (cb->cb_fn != cb_fn || cb->event != event ||
4652 			    (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4653 				continue;
4654 
4655 			/*
4656 			 * if this callback is not executing right now,
4657 			 * then remove it.
4658 			 */
4659 			if (cb->active == 0) {
4660 				TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4661 				rte_free(cb);
4662 			} else {
4663 				ret = -EAGAIN;
4664 			}
4665 		}
4666 	} while (++next_port <= last_port);
4667 
4668 	rte_spinlock_unlock(&eth_dev_cb_lock);
4669 	return ret;
4670 }
4671 
4672 int
4673 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4674 {
4675 	uint32_t vec;
4676 	struct rte_eth_dev *dev;
4677 	struct rte_intr_handle *intr_handle;
4678 	uint16_t qid;
4679 	int rc;
4680 
4681 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4682 	dev = &rte_eth_devices[port_id];
4683 
4684 	if (!dev->intr_handle) {
4685 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4686 		return -ENOTSUP;
4687 	}
4688 
4689 	intr_handle = dev->intr_handle;
4690 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4691 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4692 		return -EPERM;
4693 	}
4694 
4695 	for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4696 		vec = rte_intr_vec_list_index_get(intr_handle, qid);
4697 		rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4698 		if (rc && rc != -EEXIST) {
4699 			RTE_ETHDEV_LOG(ERR,
4700 				"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
4701 				port_id, qid, op, epfd, vec);
4702 		}
4703 	}
4704 
4705 	return 0;
4706 }
4707 
4708 int
4709 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4710 {
4711 	struct rte_intr_handle *intr_handle;
4712 	struct rte_eth_dev *dev;
4713 	unsigned int efd_idx;
4714 	uint32_t vec;
4715 	int fd;
4716 
4717 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4718 	dev = &rte_eth_devices[port_id];
4719 
4720 	if (queue_id >= dev->data->nb_rx_queues) {
4721 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4722 		return -1;
4723 	}
4724 
4725 	if (!dev->intr_handle) {
4726 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4727 		return -1;
4728 	}
4729 
4730 	intr_handle = dev->intr_handle;
4731 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4732 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4733 		return -1;
4734 	}
4735 
4736 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
4737 	efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4738 		(vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4739 	fd = rte_intr_efds_index_get(intr_handle, efd_idx);
4740 
4741 	return fd;
4742 }
4743 
4744 int
4745 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4746 			  int epfd, int op, void *data)
4747 {
4748 	uint32_t vec;
4749 	struct rte_eth_dev *dev;
4750 	struct rte_intr_handle *intr_handle;
4751 	int rc;
4752 
4753 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4754 	dev = &rte_eth_devices[port_id];
4755 
4756 	if (queue_id >= dev->data->nb_rx_queues) {
4757 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4758 		return -EINVAL;
4759 	}
4760 
4761 	if (!dev->intr_handle) {
4762 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4763 		return -ENOTSUP;
4764 	}
4765 
4766 	intr_handle = dev->intr_handle;
4767 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4768 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4769 		return -EPERM;
4770 	}
4771 
4772 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
4773 	rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4774 	if (rc && rc != -EEXIST) {
4775 		RTE_ETHDEV_LOG(ERR,
4776 			"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
4777 			port_id, queue_id, op, epfd, vec);
4778 		return rc;
4779 	}
4780 
4781 	return 0;
4782 }
4783 
4784 int
4785 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4786 			   uint16_t queue_id)
4787 {
4788 	struct rte_eth_dev *dev;
4789 	int ret;
4790 
4791 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4792 	dev = &rte_eth_devices[port_id];
4793 
4794 	ret = eth_dev_validate_rx_queue(dev, queue_id);
4795 	if (ret != 0)
4796 		return ret;
4797 
4798 	if (*dev->dev_ops->rx_queue_intr_enable == NULL)
4799 		return -ENOTSUP;
4800 	return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
4801 }
4802 
4803 int
4804 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4805 			    uint16_t queue_id)
4806 {
4807 	struct rte_eth_dev *dev;
4808 	int ret;
4809 
4810 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4811 	dev = &rte_eth_devices[port_id];
4812 
4813 	ret = eth_dev_validate_rx_queue(dev, queue_id);
4814 	if (ret != 0)
4815 		return ret;
4816 
4817 	if (*dev->dev_ops->rx_queue_intr_disable == NULL)
4818 		return -ENOTSUP;
4819 	return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
4820 }
4821 
4822 
4823 const struct rte_eth_rxtx_callback *
4824 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4825 		rte_rx_callback_fn fn, void *user_param)
4826 {
4827 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4828 	rte_errno = ENOTSUP;
4829 	return NULL;
4830 #endif
4831 	struct rte_eth_dev *dev;
4832 
4833 	/* check input parameters */
4834 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4835 		    queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4836 		rte_errno = EINVAL;
4837 		return NULL;
4838 	}
4839 	dev = &rte_eth_devices[port_id];
4840 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4841 		rte_errno = EINVAL;
4842 		return NULL;
4843 	}
4844 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4845 
4846 	if (cb == NULL) {
4847 		rte_errno = ENOMEM;
4848 		return NULL;
4849 	}
4850 
4851 	cb->fn.rx = fn;
4852 	cb->param = user_param;
4853 
4854 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
4855 	/* Add the callbacks in fifo order. */
4856 	struct rte_eth_rxtx_callback *tail =
4857 		rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4858 
4859 	if (!tail) {
4860 		/* Stores to cb->fn and cb->param should complete before
4861 		 * cb is visible to data plane.
4862 		 */
4863 		__atomic_store_n(
4864 			&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4865 			cb, __ATOMIC_RELEASE);
4866 
4867 	} else {
4868 		while (tail->next)
4869 			tail = tail->next;
4870 		/* Stores to cb->fn and cb->param should complete before
4871 		 * cb is visible to data plane.
4872 		 */
4873 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4874 	}
4875 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4876 
4877 	return cb;
4878 }
4879 
4880 const struct rte_eth_rxtx_callback *
4881 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4882 		rte_rx_callback_fn fn, void *user_param)
4883 {
4884 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4885 	rte_errno = ENOTSUP;
4886 	return NULL;
4887 #endif
4888 	/* check input parameters */
4889 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4890 		queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4891 		rte_errno = EINVAL;
4892 		return NULL;
4893 	}
4894 
4895 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4896 
4897 	if (cb == NULL) {
4898 		rte_errno = ENOMEM;
4899 		return NULL;
4900 	}
4901 
4902 	cb->fn.rx = fn;
4903 	cb->param = user_param;
4904 
4905 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
4906 	/* Add the callbacks at first position */
4907 	cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4908 	/* Stores to cb->fn, cb->param and cb->next should complete before
4909 	 * cb is visible to data plane threads.
4910 	 */
4911 	__atomic_store_n(
4912 		&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4913 		cb, __ATOMIC_RELEASE);
4914 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4915 
4916 	return cb;
4917 }
4918 
4919 const struct rte_eth_rxtx_callback *
4920 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4921 		rte_tx_callback_fn fn, void *user_param)
4922 {
4923 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4924 	rte_errno = ENOTSUP;
4925 	return NULL;
4926 #endif
4927 	struct rte_eth_dev *dev;
4928 
4929 	/* check input parameters */
4930 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4931 		    queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4932 		rte_errno = EINVAL;
4933 		return NULL;
4934 	}
4935 
4936 	dev = &rte_eth_devices[port_id];
4937 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4938 		rte_errno = EINVAL;
4939 		return NULL;
4940 	}
4941 
4942 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4943 
4944 	if (cb == NULL) {
4945 		rte_errno = ENOMEM;
4946 		return NULL;
4947 	}
4948 
4949 	cb->fn.tx = fn;
4950 	cb->param = user_param;
4951 
4952 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
4953 	/* Add the callbacks in fifo order. */
4954 	struct rte_eth_rxtx_callback *tail =
4955 		rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4956 
4957 	if (!tail) {
4958 		/* Stores to cb->fn and cb->param should complete before
4959 		 * cb is visible to data plane.
4960 		 */
4961 		__atomic_store_n(
4962 			&rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
4963 			cb, __ATOMIC_RELEASE);
4964 
4965 	} else {
4966 		while (tail->next)
4967 			tail = tail->next;
4968 		/* Stores to cb->fn and cb->param should complete before
4969 		 * cb is visible to data plane.
4970 		 */
4971 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4972 	}
4973 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
4974 
4975 	return cb;
4976 }
4977 
4978 int
4979 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4980 		const struct rte_eth_rxtx_callback *user_cb)
4981 {
4982 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4983 	return -ENOTSUP;
4984 #endif
4985 	/* Check input parameters. */
4986 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4987 	if (user_cb == NULL ||
4988 			queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4989 		return -EINVAL;
4990 
4991 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4992 	struct rte_eth_rxtx_callback *cb;
4993 	struct rte_eth_rxtx_callback **prev_cb;
4994 	int ret = -EINVAL;
4995 
4996 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
4997 	prev_cb = &dev->post_rx_burst_cbs[queue_id];
4998 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
4999 		cb = *prev_cb;
5000 		if (cb == user_cb) {
5001 			/* Remove the user cb from the callback list. */
5002 			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5003 			ret = 0;
5004 			break;
5005 		}
5006 	}
5007 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5008 
5009 	return ret;
5010 }
5011 
5012 int
5013 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5014 		const struct rte_eth_rxtx_callback *user_cb)
5015 {
5016 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5017 	return -ENOTSUP;
5018 #endif
5019 	/* Check input parameters. */
5020 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5021 	if (user_cb == NULL ||
5022 			queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
5023 		return -EINVAL;
5024 
5025 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5026 	int ret = -EINVAL;
5027 	struct rte_eth_rxtx_callback *cb;
5028 	struct rte_eth_rxtx_callback **prev_cb;
5029 
5030 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
5031 	prev_cb = &dev->pre_tx_burst_cbs[queue_id];
5032 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
5033 		cb = *prev_cb;
5034 		if (cb == user_cb) {
5035 			/* Remove the user cb from the callback list. */
5036 			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5037 			ret = 0;
5038 			break;
5039 		}
5040 	}
5041 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5042 
5043 	return ret;
5044 }
5045 
5046 int
5047 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5048 	struct rte_eth_rxq_info *qinfo)
5049 {
5050 	struct rte_eth_dev *dev;
5051 
5052 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5053 	dev = &rte_eth_devices[port_id];
5054 
5055 	if (queue_id >= dev->data->nb_rx_queues) {
5056 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5057 		return -EINVAL;
5058 	}
5059 
5060 	if (qinfo == NULL) {
5061 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
5062 			port_id, queue_id);
5063 		return -EINVAL;
5064 	}
5065 
5066 	if (dev->data->rx_queues == NULL ||
5067 			dev->data->rx_queues[queue_id] == NULL) {
5068 		RTE_ETHDEV_LOG(ERR,
5069 			       "Rx queue %"PRIu16" of device with port_id=%"
5070 			       PRIu16" has not been setup\n",
5071 			       queue_id, port_id);
5072 		return -EINVAL;
5073 	}
5074 
5075 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5076 		RTE_ETHDEV_LOG(INFO,
5077 			"Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5078 			queue_id, port_id);
5079 		return -EINVAL;
5080 	}
5081 
5082 	if (*dev->dev_ops->rxq_info_get == NULL)
5083 		return -ENOTSUP;
5084 
5085 	memset(qinfo, 0, sizeof(*qinfo));
5086 	dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5087 	qinfo->queue_state = dev->data->rx_queue_state[queue_id];
5088 
5089 	return 0;
5090 }
5091 
5092 int
5093 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5094 	struct rte_eth_txq_info *qinfo)
5095 {
5096 	struct rte_eth_dev *dev;
5097 
5098 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5099 	dev = &rte_eth_devices[port_id];
5100 
5101 	if (queue_id >= dev->data->nb_tx_queues) {
5102 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5103 		return -EINVAL;
5104 	}
5105 
5106 	if (qinfo == NULL) {
5107 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
5108 			port_id, queue_id);
5109 		return -EINVAL;
5110 	}
5111 
5112 	if (dev->data->tx_queues == NULL ||
5113 			dev->data->tx_queues[queue_id] == NULL) {
5114 		RTE_ETHDEV_LOG(ERR,
5115 			       "Tx queue %"PRIu16" of device with port_id=%"
5116 			       PRIu16" has not been setup\n",
5117 			       queue_id, port_id);
5118 		return -EINVAL;
5119 	}
5120 
5121 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5122 		RTE_ETHDEV_LOG(INFO,
5123 			"Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5124 			queue_id, port_id);
5125 		return -EINVAL;
5126 	}
5127 
5128 	if (*dev->dev_ops->txq_info_get == NULL)
5129 		return -ENOTSUP;
5130 
5131 	memset(qinfo, 0, sizeof(*qinfo));
5132 	dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5133 	qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5134 
5135 	return 0;
5136 }
5137 
5138 int
5139 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5140 			  struct rte_eth_burst_mode *mode)
5141 {
5142 	struct rte_eth_dev *dev;
5143 
5144 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5145 	dev = &rte_eth_devices[port_id];
5146 
5147 	if (queue_id >= dev->data->nb_rx_queues) {
5148 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5149 		return -EINVAL;
5150 	}
5151 
5152 	if (mode == NULL) {
5153 		RTE_ETHDEV_LOG(ERR,
5154 			"Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5155 			port_id, queue_id);
5156 		return -EINVAL;
5157 	}
5158 
5159 	if (*dev->dev_ops->rx_burst_mode_get == NULL)
5160 		return -ENOTSUP;
5161 	memset(mode, 0, sizeof(*mode));
5162 	return eth_err(port_id,
5163 		       dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5164 }
5165 
5166 int
5167 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5168 			  struct rte_eth_burst_mode *mode)
5169 {
5170 	struct rte_eth_dev *dev;
5171 
5172 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5173 	dev = &rte_eth_devices[port_id];
5174 
5175 	if (queue_id >= dev->data->nb_tx_queues) {
5176 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5177 		return -EINVAL;
5178 	}
5179 
5180 	if (mode == NULL) {
5181 		RTE_ETHDEV_LOG(ERR,
5182 			"Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
5183 			port_id, queue_id);
5184 		return -EINVAL;
5185 	}
5186 
5187 	if (*dev->dev_ops->tx_burst_mode_get == NULL)
5188 		return -ENOTSUP;
5189 	memset(mode, 0, sizeof(*mode));
5190 	return eth_err(port_id,
5191 		       dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5192 }
5193 
5194 int
5195 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5196 		struct rte_power_monitor_cond *pmc)
5197 {
5198 	struct rte_eth_dev *dev;
5199 
5200 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5201 	dev = &rte_eth_devices[port_id];
5202 
5203 	if (queue_id >= dev->data->nb_rx_queues) {
5204 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5205 		return -EINVAL;
5206 	}
5207 
5208 	if (pmc == NULL) {
5209 		RTE_ETHDEV_LOG(ERR,
5210 			"Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
5211 			port_id, queue_id);
5212 		return -EINVAL;
5213 	}
5214 
5215 	if (*dev->dev_ops->get_monitor_addr == NULL)
5216 		return -ENOTSUP;
5217 	return eth_err(port_id,
5218 		dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
5219 }
5220 
5221 int
5222 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5223 			     struct rte_ether_addr *mc_addr_set,
5224 			     uint32_t nb_mc_addr)
5225 {
5226 	struct rte_eth_dev *dev;
5227 
5228 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5229 	dev = &rte_eth_devices[port_id];
5230 
5231 	if (*dev->dev_ops->set_mc_addr_list == NULL)
5232 		return -ENOTSUP;
5233 	return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5234 						mc_addr_set, nb_mc_addr));
5235 }
5236 
5237 int
5238 rte_eth_timesync_enable(uint16_t port_id)
5239 {
5240 	struct rte_eth_dev *dev;
5241 
5242 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5243 	dev = &rte_eth_devices[port_id];
5244 
5245 	if (*dev->dev_ops->timesync_enable == NULL)
5246 		return -ENOTSUP;
5247 	return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5248 }
5249 
5250 int
5251 rte_eth_timesync_disable(uint16_t port_id)
5252 {
5253 	struct rte_eth_dev *dev;
5254 
5255 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5256 	dev = &rte_eth_devices[port_id];
5257 
5258 	if (*dev->dev_ops->timesync_disable == NULL)
5259 		return -ENOTSUP;
5260 	return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5261 }
5262 
5263 int
5264 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5265 				   uint32_t flags)
5266 {
5267 	struct rte_eth_dev *dev;
5268 
5269 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5270 	dev = &rte_eth_devices[port_id];
5271 
5272 	if (timestamp == NULL) {
5273 		RTE_ETHDEV_LOG(ERR,
5274 			"Cannot read ethdev port %u Rx timestamp to NULL\n",
5275 			port_id);
5276 		return -EINVAL;
5277 	}
5278 
5279 	if (*dev->dev_ops->timesync_read_rx_timestamp == NULL)
5280 		return -ENOTSUP;
5281 	return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5282 				(dev, timestamp, flags));
5283 }
5284 
5285 int
5286 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5287 				   struct timespec *timestamp)
5288 {
5289 	struct rte_eth_dev *dev;
5290 
5291 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5292 	dev = &rte_eth_devices[port_id];
5293 
5294 	if (timestamp == NULL) {
5295 		RTE_ETHDEV_LOG(ERR,
5296 			"Cannot read ethdev port %u Tx timestamp to NULL\n",
5297 			port_id);
5298 		return -EINVAL;
5299 	}
5300 
5301 	if (*dev->dev_ops->timesync_read_tx_timestamp == NULL)
5302 		return -ENOTSUP;
5303 	return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5304 				(dev, timestamp));
5305 }
5306 
5307 int
5308 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5309 {
5310 	struct rte_eth_dev *dev;
5311 
5312 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5313 	dev = &rte_eth_devices[port_id];
5314 
5315 	if (*dev->dev_ops->timesync_adjust_time == NULL)
5316 		return -ENOTSUP;
5317 	return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
5318 }
5319 
5320 int
5321 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5322 {
5323 	struct rte_eth_dev *dev;
5324 
5325 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5326 	dev = &rte_eth_devices[port_id];
5327 
5328 	if (timestamp == NULL) {
5329 		RTE_ETHDEV_LOG(ERR,
5330 			"Cannot read ethdev port %u timesync time to NULL\n",
5331 			port_id);
5332 		return -EINVAL;
5333 	}
5334 
5335 	if (*dev->dev_ops->timesync_read_time == NULL)
5336 		return -ENOTSUP;
5337 	return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5338 								timestamp));
5339 }
5340 
5341 int
5342 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5343 {
5344 	struct rte_eth_dev *dev;
5345 
5346 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5347 	dev = &rte_eth_devices[port_id];
5348 
5349 	if (timestamp == NULL) {
5350 		RTE_ETHDEV_LOG(ERR,
5351 			"Cannot write ethdev port %u timesync from NULL time\n",
5352 			port_id);
5353 		return -EINVAL;
5354 	}
5355 
5356 	if (*dev->dev_ops->timesync_write_time == NULL)
5357 		return -ENOTSUP;
5358 	return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5359 								timestamp));
5360 }
5361 
5362 int
5363 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5364 {
5365 	struct rte_eth_dev *dev;
5366 
5367 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5368 	dev = &rte_eth_devices[port_id];
5369 
5370 	if (clock == NULL) {
5371 		RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
5372 			port_id);
5373 		return -EINVAL;
5374 	}
5375 
5376 	if (*dev->dev_ops->read_clock == NULL)
5377 		return -ENOTSUP;
5378 	return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5379 }
5380 
5381 int
5382 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5383 {
5384 	struct rte_eth_dev *dev;
5385 
5386 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5387 	dev = &rte_eth_devices[port_id];
5388 
5389 	if (info == NULL) {
5390 		RTE_ETHDEV_LOG(ERR,
5391 			"Cannot get ethdev port %u register info to NULL\n",
5392 			port_id);
5393 		return -EINVAL;
5394 	}
5395 
5396 	if (*dev->dev_ops->get_reg == NULL)
5397 		return -ENOTSUP;
5398 	return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5399 }
5400 
5401 int
5402 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5403 {
5404 	struct rte_eth_dev *dev;
5405 
5406 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5407 	dev = &rte_eth_devices[port_id];
5408 
5409 	if (*dev->dev_ops->get_eeprom_length == NULL)
5410 		return -ENOTSUP;
5411 	return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5412 }
5413 
5414 int
5415 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5416 {
5417 	struct rte_eth_dev *dev;
5418 
5419 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5420 	dev = &rte_eth_devices[port_id];
5421 
5422 	if (info == NULL) {
5423 		RTE_ETHDEV_LOG(ERR,
5424 			"Cannot get ethdev port %u EEPROM info to NULL\n",
5425 			port_id);
5426 		return -EINVAL;
5427 	}
5428 
5429 	if (*dev->dev_ops->get_eeprom == NULL)
5430 		return -ENOTSUP;
5431 	return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5432 }
5433 
5434 int
5435 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5436 {
5437 	struct rte_eth_dev *dev;
5438 
5439 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5440 	dev = &rte_eth_devices[port_id];
5441 
5442 	if (info == NULL) {
5443 		RTE_ETHDEV_LOG(ERR,
5444 			"Cannot set ethdev port %u EEPROM from NULL info\n",
5445 			port_id);
5446 		return -EINVAL;
5447 	}
5448 
5449 	if (*dev->dev_ops->set_eeprom == NULL)
5450 		return -ENOTSUP;
5451 	return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5452 }
5453 
5454 int
5455 rte_eth_dev_get_module_info(uint16_t port_id,
5456 			    struct rte_eth_dev_module_info *modinfo)
5457 {
5458 	struct rte_eth_dev *dev;
5459 
5460 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5461 	dev = &rte_eth_devices[port_id];
5462 
5463 	if (modinfo == NULL) {
5464 		RTE_ETHDEV_LOG(ERR,
5465 			"Cannot get ethdev port %u EEPROM module info to NULL\n",
5466 			port_id);
5467 		return -EINVAL;
5468 	}
5469 
5470 	if (*dev->dev_ops->get_module_info == NULL)
5471 		return -ENOTSUP;
5472 	return (*dev->dev_ops->get_module_info)(dev, modinfo);
5473 }
5474 
5475 int
5476 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5477 			      struct rte_dev_eeprom_info *info)
5478 {
5479 	struct rte_eth_dev *dev;
5480 
5481 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5482 	dev = &rte_eth_devices[port_id];
5483 
5484 	if (info == NULL) {
5485 		RTE_ETHDEV_LOG(ERR,
5486 			"Cannot get ethdev port %u module EEPROM info to NULL\n",
5487 			port_id);
5488 		return -EINVAL;
5489 	}
5490 
5491 	if (info->data == NULL) {
5492 		RTE_ETHDEV_LOG(ERR,
5493 			"Cannot get ethdev port %u module EEPROM data to NULL\n",
5494 			port_id);
5495 		return -EINVAL;
5496 	}
5497 
5498 	if (info->length == 0) {
5499 		RTE_ETHDEV_LOG(ERR,
5500 			"Cannot get ethdev port %u module EEPROM to data with zero size\n",
5501 			port_id);
5502 		return -EINVAL;
5503 	}
5504 
5505 	if (*dev->dev_ops->get_module_eeprom == NULL)
5506 		return -ENOTSUP;
5507 	return (*dev->dev_ops->get_module_eeprom)(dev, info);
5508 }
5509 
5510 int
5511 rte_eth_dev_get_dcb_info(uint16_t port_id,
5512 			     struct rte_eth_dcb_info *dcb_info)
5513 {
5514 	struct rte_eth_dev *dev;
5515 
5516 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5517 	dev = &rte_eth_devices[port_id];
5518 
5519 	if (dcb_info == NULL) {
5520 		RTE_ETHDEV_LOG(ERR,
5521 			"Cannot get ethdev port %u DCB info to NULL\n",
5522 			port_id);
5523 		return -EINVAL;
5524 	}
5525 
5526 	memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5527 
5528 	if (*dev->dev_ops->get_dcb_info == NULL)
5529 		return -ENOTSUP;
5530 	return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5531 }
5532 
5533 static void
5534 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5535 		const struct rte_eth_desc_lim *desc_lim)
5536 {
5537 	if (desc_lim->nb_align != 0)
5538 		*nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5539 
5540 	if (desc_lim->nb_max != 0)
5541 		*nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5542 
5543 	*nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5544 }
5545 
5546 int
5547 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5548 				 uint16_t *nb_rx_desc,
5549 				 uint16_t *nb_tx_desc)
5550 {
5551 	struct rte_eth_dev_info dev_info;
5552 	int ret;
5553 
5554 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5555 
5556 	ret = rte_eth_dev_info_get(port_id, &dev_info);
5557 	if (ret != 0)
5558 		return ret;
5559 
5560 	if (nb_rx_desc != NULL)
5561 		eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5562 
5563 	if (nb_tx_desc != NULL)
5564 		eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5565 
5566 	return 0;
5567 }
5568 
5569 int
5570 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5571 				   struct rte_eth_hairpin_cap *cap)
5572 {
5573 	struct rte_eth_dev *dev;
5574 
5575 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5576 	dev = &rte_eth_devices[port_id];
5577 
5578 	if (cap == NULL) {
5579 		RTE_ETHDEV_LOG(ERR,
5580 			"Cannot get ethdev port %u hairpin capability to NULL\n",
5581 			port_id);
5582 		return -EINVAL;
5583 	}
5584 
5585 	if (*dev->dev_ops->hairpin_cap_get == NULL)
5586 		return -ENOTSUP;
5587 	memset(cap, 0, sizeof(*cap));
5588 	return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5589 }
5590 
5591 int
5592 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5593 {
5594 	struct rte_eth_dev *dev;
5595 
5596 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5597 	dev = &rte_eth_devices[port_id];
5598 
5599 	if (pool == NULL) {
5600 		RTE_ETHDEV_LOG(ERR,
5601 			"Cannot test ethdev port %u mempool operation from NULL pool\n",
5602 			port_id);
5603 		return -EINVAL;
5604 	}
5605 
5606 	if (*dev->dev_ops->pool_ops_supported == NULL)
5607 		return 1; /* all pools are supported */
5608 
5609 	return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5610 }
5611 
5612 static int
5613 eth_dev_handle_port_list(const char *cmd __rte_unused,
5614 		const char *params __rte_unused,
5615 		struct rte_tel_data *d)
5616 {
5617 	int port_id;
5618 
5619 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5620 	RTE_ETH_FOREACH_DEV(port_id)
5621 		rte_tel_data_add_array_int(d, port_id);
5622 	return 0;
5623 }
5624 
5625 static void
5626 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5627 		const char *stat_name)
5628 {
5629 	int q;
5630 	struct rte_tel_data *q_data = rte_tel_data_alloc();
5631 	if (q_data == NULL)
5632 		return;
5633 	rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5634 	for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5635 		rte_tel_data_add_array_u64(q_data, q_stats[q]);
5636 	rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5637 }
5638 
5639 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5640 
5641 static int
5642 eth_dev_handle_port_stats(const char *cmd __rte_unused,
5643 		const char *params,
5644 		struct rte_tel_data *d)
5645 {
5646 	struct rte_eth_stats stats;
5647 	int port_id, ret;
5648 
5649 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5650 		return -1;
5651 
5652 	port_id = atoi(params);
5653 	if (!rte_eth_dev_is_valid_port(port_id))
5654 		return -1;
5655 
5656 	ret = rte_eth_stats_get(port_id, &stats);
5657 	if (ret < 0)
5658 		return -1;
5659 
5660 	rte_tel_data_start_dict(d);
5661 	ADD_DICT_STAT(stats, ipackets);
5662 	ADD_DICT_STAT(stats, opackets);
5663 	ADD_DICT_STAT(stats, ibytes);
5664 	ADD_DICT_STAT(stats, obytes);
5665 	ADD_DICT_STAT(stats, imissed);
5666 	ADD_DICT_STAT(stats, ierrors);
5667 	ADD_DICT_STAT(stats, oerrors);
5668 	ADD_DICT_STAT(stats, rx_nombuf);
5669 	eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5670 	eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5671 	eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5672 	eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5673 	eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
5674 
5675 	return 0;
5676 }
5677 
5678 static int
5679 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
5680 		const char *params,
5681 		struct rte_tel_data *d)
5682 {
5683 	struct rte_eth_xstat *eth_xstats;
5684 	struct rte_eth_xstat_name *xstat_names;
5685 	int port_id, num_xstats;
5686 	int i, ret;
5687 	char *end_param;
5688 
5689 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5690 		return -1;
5691 
5692 	port_id = strtoul(params, &end_param, 0);
5693 	if (*end_param != '\0')
5694 		RTE_ETHDEV_LOG(NOTICE,
5695 			"Extra parameters passed to ethdev telemetry command, ignoring");
5696 	if (!rte_eth_dev_is_valid_port(port_id))
5697 		return -1;
5698 
5699 	num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5700 	if (num_xstats < 0)
5701 		return -1;
5702 
5703 	/* use one malloc for both names and stats */
5704 	eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5705 			sizeof(struct rte_eth_xstat_name)) * num_xstats);
5706 	if (eth_xstats == NULL)
5707 		return -1;
5708 	xstat_names = (void *)&eth_xstats[num_xstats];
5709 
5710 	ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5711 	if (ret < 0 || ret > num_xstats) {
5712 		free(eth_xstats);
5713 		return -1;
5714 	}
5715 
5716 	ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5717 	if (ret < 0 || ret > num_xstats) {
5718 		free(eth_xstats);
5719 		return -1;
5720 	}
5721 
5722 	rte_tel_data_start_dict(d);
5723 	for (i = 0; i < num_xstats; i++)
5724 		rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5725 				eth_xstats[i].value);
5726 	free(eth_xstats);
5727 	return 0;
5728 }
5729 
5730 #ifndef RTE_EXEC_ENV_WINDOWS
5731 static int
5732 eth_dev_handle_port_dump_priv(const char *cmd __rte_unused,
5733 			const char *params,
5734 			struct rte_tel_data *d)
5735 {
5736 	char *buf, *end_param;
5737 	int port_id, ret;
5738 	FILE *f;
5739 
5740 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5741 		return -EINVAL;
5742 
5743 	port_id = strtoul(params, &end_param, 0);
5744 	if (*end_param != '\0')
5745 		RTE_ETHDEV_LOG(NOTICE,
5746 			"Extra parameters passed to ethdev telemetry command, ignoring");
5747 	if (!rte_eth_dev_is_valid_port(port_id))
5748 		return -EINVAL;
5749 
5750 	buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN);
5751 	if (buf == NULL)
5752 		return -ENOMEM;
5753 
5754 	f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+");
5755 	if (f == NULL) {
5756 		free(buf);
5757 		return -EINVAL;
5758 	}
5759 
5760 	ret = rte_eth_dev_priv_dump(port_id, f);
5761 	fclose(f);
5762 	if (ret == 0) {
5763 		rte_tel_data_start_dict(d);
5764 		rte_tel_data_string(d, buf);
5765 	}
5766 
5767 	free(buf);
5768 	return 0;
5769 }
5770 #endif /* !RTE_EXEC_ENV_WINDOWS */
5771 
5772 static int
5773 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
5774 		const char *params,
5775 		struct rte_tel_data *d)
5776 {
5777 	static const char *status_str = "status";
5778 	int ret, port_id;
5779 	struct rte_eth_link link;
5780 	char *end_param;
5781 
5782 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5783 		return -1;
5784 
5785 	port_id = strtoul(params, &end_param, 0);
5786 	if (*end_param != '\0')
5787 		RTE_ETHDEV_LOG(NOTICE,
5788 			"Extra parameters passed to ethdev telemetry command, ignoring");
5789 	if (!rte_eth_dev_is_valid_port(port_id))
5790 		return -1;
5791 
5792 	ret = rte_eth_link_get_nowait(port_id, &link);
5793 	if (ret < 0)
5794 		return -1;
5795 
5796 	rte_tel_data_start_dict(d);
5797 	if (!link.link_status) {
5798 		rte_tel_data_add_dict_string(d, status_str, "DOWN");
5799 		return 0;
5800 	}
5801 	rte_tel_data_add_dict_string(d, status_str, "UP");
5802 	rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5803 	rte_tel_data_add_dict_string(d, "duplex",
5804 			(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
5805 				"full-duplex" : "half-duplex");
5806 	return 0;
5807 }
5808 
5809 static int
5810 eth_dev_handle_port_info(const char *cmd __rte_unused,
5811 		const char *params,
5812 		struct rte_tel_data *d)
5813 {
5814 	struct rte_tel_data *rxq_state, *txq_state;
5815 	char mac_addr[RTE_ETHER_ADDR_FMT_SIZE];
5816 	struct rte_eth_dev *eth_dev;
5817 	char *end_param;
5818 	int port_id, i;
5819 
5820 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5821 		return -1;
5822 
5823 	port_id = strtoul(params, &end_param, 0);
5824 	if (*end_param != '\0')
5825 		RTE_ETHDEV_LOG(NOTICE,
5826 			"Extra parameters passed to ethdev telemetry command, ignoring");
5827 
5828 	if (!rte_eth_dev_is_valid_port(port_id))
5829 		return -EINVAL;
5830 
5831 	eth_dev = &rte_eth_devices[port_id];
5832 
5833 	rxq_state = rte_tel_data_alloc();
5834 	if (!rxq_state)
5835 		return -ENOMEM;
5836 
5837 	txq_state = rte_tel_data_alloc();
5838 	if (!txq_state) {
5839 		rte_tel_data_free(rxq_state);
5840 		return -ENOMEM;
5841 	}
5842 
5843 	rte_tel_data_start_dict(d);
5844 	rte_tel_data_add_dict_string(d, "name", eth_dev->data->name);
5845 	rte_tel_data_add_dict_int(d, "state", eth_dev->state);
5846 	rte_tel_data_add_dict_int(d, "nb_rx_queues",
5847 			eth_dev->data->nb_rx_queues);
5848 	rte_tel_data_add_dict_int(d, "nb_tx_queues",
5849 			eth_dev->data->nb_tx_queues);
5850 	rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id);
5851 	rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu);
5852 	rte_tel_data_add_dict_int(d, "rx_mbuf_size_min",
5853 			eth_dev->data->min_rx_buf_size);
5854 	rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail",
5855 			eth_dev->data->rx_mbuf_alloc_failed);
5856 	rte_ether_format_addr(mac_addr, sizeof(mac_addr),
5857 			eth_dev->data->mac_addrs);
5858 	rte_tel_data_add_dict_string(d, "mac_addr", mac_addr);
5859 	rte_tel_data_add_dict_int(d, "promiscuous",
5860 			eth_dev->data->promiscuous);
5861 	rte_tel_data_add_dict_int(d, "scattered_rx",
5862 			eth_dev->data->scattered_rx);
5863 	rte_tel_data_add_dict_int(d, "all_multicast",
5864 			eth_dev->data->all_multicast);
5865 	rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started);
5866 	rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro);
5867 	rte_tel_data_add_dict_int(d, "dev_configured",
5868 			eth_dev->data->dev_configured);
5869 
5870 	rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL);
5871 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
5872 		rte_tel_data_add_array_int(rxq_state,
5873 				eth_dev->data->rx_queue_state[i]);
5874 
5875 	rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL);
5876 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
5877 		rte_tel_data_add_array_int(txq_state,
5878 				eth_dev->data->tx_queue_state[i]);
5879 
5880 	rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0);
5881 	rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0);
5882 	rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node);
5883 	rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags);
5884 	rte_tel_data_add_dict_int(d, "rx_offloads",
5885 			eth_dev->data->dev_conf.rxmode.offloads);
5886 	rte_tel_data_add_dict_int(d, "tx_offloads",
5887 			eth_dev->data->dev_conf.txmode.offloads);
5888 	rte_tel_data_add_dict_int(d, "ethdev_rss_hf",
5889 			eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
5890 
5891 	return 0;
5892 }
5893 
5894 int
5895 rte_eth_representor_info_get(uint16_t port_id,
5896 			     struct rte_eth_representor_info *info)
5897 {
5898 	struct rte_eth_dev *dev;
5899 
5900 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5901 	dev = &rte_eth_devices[port_id];
5902 
5903 	if (*dev->dev_ops->representor_info_get == NULL)
5904 		return -ENOTSUP;
5905 	return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
5906 }
5907 
5908 int
5909 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
5910 {
5911 	struct rte_eth_dev *dev;
5912 
5913 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5914 	dev = &rte_eth_devices[port_id];
5915 
5916 	if (dev->data->dev_configured != 0) {
5917 		RTE_ETHDEV_LOG(ERR,
5918 			"The port (ID=%"PRIu16") is already configured\n",
5919 			port_id);
5920 		return -EBUSY;
5921 	}
5922 
5923 	if (features == NULL) {
5924 		RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n");
5925 		return -EINVAL;
5926 	}
5927 
5928 	if (*dev->dev_ops->rx_metadata_negotiate == NULL)
5929 		return -ENOTSUP;
5930 	return eth_err(port_id,
5931 		       (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
5932 }
5933 
5934 int
5935 rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5936 		struct rte_eth_ip_reassembly_params *reassembly_capa)
5937 {
5938 	struct rte_eth_dev *dev;
5939 
5940 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5941 	dev = &rte_eth_devices[port_id];
5942 
5943 	if (dev->data->dev_configured == 0) {
5944 		RTE_ETHDEV_LOG(ERR,
5945 			"Device with port_id=%u is not configured.\n"
5946 			"Cannot get IP reassembly capability\n",
5947 			port_id);
5948 		return -EINVAL;
5949 	}
5950 
5951 	if (reassembly_capa == NULL) {
5952 		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL");
5953 		return -EINVAL;
5954 	}
5955 
5956 	if (*dev->dev_ops->ip_reassembly_capability_get == NULL)
5957 		return -ENOTSUP;
5958 	memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params));
5959 
5960 	return eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get)
5961 					(dev, reassembly_capa));
5962 }
5963 
5964 int
5965 rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5966 		struct rte_eth_ip_reassembly_params *conf)
5967 {
5968 	struct rte_eth_dev *dev;
5969 
5970 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5971 	dev = &rte_eth_devices[port_id];
5972 
5973 	if (dev->data->dev_configured == 0) {
5974 		RTE_ETHDEV_LOG(ERR,
5975 			"Device with port_id=%u is not configured.\n"
5976 			"Cannot get IP reassembly configuration\n",
5977 			port_id);
5978 		return -EINVAL;
5979 	}
5980 
5981 	if (conf == NULL) {
5982 		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL");
5983 		return -EINVAL;
5984 	}
5985 
5986 	if (*dev->dev_ops->ip_reassembly_conf_get == NULL)
5987 		return -ENOTSUP;
5988 	memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params));
5989 	return eth_err(port_id,
5990 		       (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf));
5991 }
5992 
5993 int
5994 rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5995 		const struct rte_eth_ip_reassembly_params *conf)
5996 {
5997 	struct rte_eth_dev *dev;
5998 
5999 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6000 	dev = &rte_eth_devices[port_id];
6001 
6002 	if (dev->data->dev_configured == 0) {
6003 		RTE_ETHDEV_LOG(ERR,
6004 			"Device with port_id=%u is not configured.\n"
6005 			"Cannot set IP reassembly configuration",
6006 			port_id);
6007 		return -EINVAL;
6008 	}
6009 
6010 	if (dev->data->dev_started != 0) {
6011 		RTE_ETHDEV_LOG(ERR,
6012 			"Device with port_id=%u started,\n"
6013 			"cannot configure IP reassembly params.\n",
6014 			port_id);
6015 		return -EINVAL;
6016 	}
6017 
6018 	if (conf == NULL) {
6019 		RTE_ETHDEV_LOG(ERR,
6020 				"Invalid IP reassembly configuration (NULL)\n");
6021 		return -EINVAL;
6022 	}
6023 
6024 	if (*dev->dev_ops->ip_reassembly_conf_set == NULL)
6025 		return -ENOTSUP;
6026 	return eth_err(port_id,
6027 		       (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf));
6028 }
6029 
6030 int
6031 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
6032 {
6033 	struct rte_eth_dev *dev;
6034 
6035 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6036 	dev = &rte_eth_devices[port_id];
6037 
6038 	if (file == NULL) {
6039 		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
6040 		return -EINVAL;
6041 	}
6042 
6043 	if (*dev->dev_ops->eth_dev_priv_dump == NULL)
6044 		return -ENOTSUP;
6045 	return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file));
6046 }
6047 
6048 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
6049 
6050 RTE_INIT(ethdev_init_telemetry)
6051 {
6052 	rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
6053 			"Returns list of available ethdev ports. Takes no parameters");
6054 	rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
6055 			"Returns the common stats for a port. Parameters: int port_id");
6056 	rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
6057 			"Returns the extended stats for a port. Parameters: int port_id");
6058 #ifndef RTE_EXEC_ENV_WINDOWS
6059 	rte_telemetry_register_cmd("/ethdev/dump_priv", eth_dev_handle_port_dump_priv,
6060 			"Returns dump private information for a port. Parameters: int port_id");
6061 #endif
6062 	rte_telemetry_register_cmd("/ethdev/link_status",
6063 			eth_dev_handle_port_link_status,
6064 			"Returns the link status for a port. Parameters: int port_id");
6065 	rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info,
6066 			"Returns the device info for a port. Parameters: int port_id");
6067 	rte_telemetry_register_cmd("/ethdev/module_eeprom", eth_dev_handle_port_module_eeprom,
6068 			"Returns module EEPROM info with SFF specs. Parameters: int port_id");
6069 }
6070