xref: /dpdk/lib/ethdev/rte_ethdev.c (revision 1acb7f547455f636a6968cb3b4ca3870279dfece)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <errno.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/queue.h>
13 
14 #include <bus_driver.h>
15 #include <rte_log.h>
16 #include <rte_interrupts.h>
17 #include <rte_memcpy.h>
18 #include <rte_common.h>
19 #include <rte_mempool.h>
20 #include <rte_malloc.h>
21 #include <rte_mbuf.h>
22 #include <rte_errno.h>
23 #include <rte_spinlock.h>
24 #include <rte_string_fns.h>
25 #include <rte_class.h>
26 #include <rte_ether.h>
27 #include <rte_telemetry.h>
28 
29 #include "rte_ethdev_trace.h"
30 #include "rte_ethdev.h"
31 #include "ethdev_driver.h"
32 #include "ethdev_profile.h"
33 #include "ethdev_private.h"
34 #include "sff_telemetry.h"
35 
36 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
37 
38 /* public fast-path API */
39 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
40 
41 /* spinlock for add/remove Rx callbacks */
42 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
43 
44 /* spinlock for add/remove Tx callbacks */
45 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
46 
47 /* store statistics names and its offset in stats structure  */
48 struct rte_eth_xstats_name_off {
49 	char name[RTE_ETH_XSTATS_NAME_SIZE];
50 	unsigned offset;
51 };
52 
53 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
54 	{"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
55 	{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
56 	{"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
57 	{"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
58 	{"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
59 	{"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
60 	{"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
61 	{"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
62 		rx_nombuf)},
63 };
64 
65 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
66 
67 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
68 	{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
69 	{"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
70 	{"errors", offsetof(struct rte_eth_stats, q_errors)},
71 };
72 
73 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
74 
75 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
76 	{"packets", offsetof(struct rte_eth_stats, q_opackets)},
77 	{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
78 };
79 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
80 
81 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
82 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
83 
84 static const struct {
85 	uint64_t offload;
86 	const char *name;
87 } eth_dev_rx_offload_names[] = {
88 	RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
89 	RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
90 	RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
91 	RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
92 	RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
93 	RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
94 	RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
95 	RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
96 	RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
97 	RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
98 	RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
99 	RTE_RX_OFFLOAD_BIT2STR(SCATTER),
100 	RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
101 	RTE_RX_OFFLOAD_BIT2STR(SECURITY),
102 	RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
103 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
104 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
105 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
106 	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
107 };
108 
109 #undef RTE_RX_OFFLOAD_BIT2STR
110 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
111 
112 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
113 	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
114 
115 static const struct {
116 	uint64_t offload;
117 	const char *name;
118 } eth_dev_tx_offload_names[] = {
119 	RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
120 	RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
121 	RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
122 	RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
123 	RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
124 	RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
125 	RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
126 	RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
127 	RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
128 	RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
129 	RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
130 	RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
131 	RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
132 	RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
133 	RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
134 	RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
135 	RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
136 	RTE_TX_OFFLOAD_BIT2STR(SECURITY),
137 	RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
138 	RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
139 	RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
140 	RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
141 };
142 
143 #undef RTE_TX_OFFLOAD_BIT2STR
144 
145 static const struct {
146 	uint64_t offload;
147 	const char *name;
148 } rte_eth_dev_capa_names[] = {
149 	{RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"},
150 	{RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"},
151 	{RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
152 	{RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"},
153 	{RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"},
154 };
155 
156 enum {
157 	STAT_QMAP_TX = 0,
158 	STAT_QMAP_RX
159 };
160 
161 int
162 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
163 {
164 	int ret;
165 	struct rte_devargs devargs;
166 	const char *bus_param_key;
167 	char *bus_str = NULL;
168 	char *cls_str = NULL;
169 	int str_size;
170 
171 	if (iter == NULL) {
172 		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
173 		return -EINVAL;
174 	}
175 
176 	if (devargs_str == NULL) {
177 		RTE_ETHDEV_LOG(ERR,
178 			"Cannot initialize iterator from NULL device description string\n");
179 		return -EINVAL;
180 	}
181 
182 	memset(iter, 0, sizeof(*iter));
183 	memset(&devargs, 0, sizeof(devargs));
184 
185 	/*
186 	 * The devargs string may use various syntaxes:
187 	 *   - 0000:08:00.0,representor=[1-3]
188 	 *   - pci:0000:06:00.0,representor=[0,5]
189 	 *   - class=eth,mac=00:11:22:33:44:55
190 	 *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
191 	 */
192 
193 	/*
194 	 * Handle pure class filter (i.e. without any bus-level argument),
195 	 * from future new syntax.
196 	 * rte_devargs_parse() is not yet supporting the new syntax,
197 	 * that's why this simple case is temporarily parsed here.
198 	 */
199 #define iter_anybus_str "class=eth,"
200 	if (strncmp(devargs_str, iter_anybus_str,
201 			strlen(iter_anybus_str)) == 0) {
202 		iter->cls_str = devargs_str + strlen(iter_anybus_str);
203 		goto end;
204 	}
205 
206 	/* Split bus, device and parameters. */
207 	ret = rte_devargs_parse(&devargs, devargs_str);
208 	if (ret != 0)
209 		goto error;
210 
211 	/*
212 	 * Assume parameters of old syntax can match only at ethdev level.
213 	 * Extra parameters will be ignored, thanks to "+" prefix.
214 	 */
215 	str_size = strlen(devargs.args) + 2;
216 	cls_str = malloc(str_size);
217 	if (cls_str == NULL) {
218 		ret = -ENOMEM;
219 		goto error;
220 	}
221 	ret = snprintf(cls_str, str_size, "+%s", devargs.args);
222 	if (ret != str_size - 1) {
223 		ret = -EINVAL;
224 		goto error;
225 	}
226 	iter->cls_str = cls_str;
227 
228 	iter->bus = devargs.bus;
229 	if (iter->bus->dev_iterate == NULL) {
230 		ret = -ENOTSUP;
231 		goto error;
232 	}
233 
234 	/* Convert bus args to new syntax for use with new API dev_iterate. */
235 	if ((strcmp(iter->bus->name, "vdev") == 0) ||
236 		(strcmp(iter->bus->name, "fslmc") == 0) ||
237 		(strcmp(iter->bus->name, "dpaa_bus") == 0)) {
238 		bus_param_key = "name";
239 	} else if (strcmp(iter->bus->name, "pci") == 0) {
240 		bus_param_key = "addr";
241 	} else {
242 		ret = -ENOTSUP;
243 		goto error;
244 	}
245 	str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
246 	bus_str = malloc(str_size);
247 	if (bus_str == NULL) {
248 		ret = -ENOMEM;
249 		goto error;
250 	}
251 	ret = snprintf(bus_str, str_size, "%s=%s",
252 			bus_param_key, devargs.name);
253 	if (ret != str_size - 1) {
254 		ret = -EINVAL;
255 		goto error;
256 	}
257 	iter->bus_str = bus_str;
258 
259 end:
260 	iter->cls = rte_class_find_by_name("eth");
261 	rte_devargs_reset(&devargs);
262 	return 0;
263 
264 error:
265 	if (ret == -ENOTSUP)
266 		RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
267 				iter->bus->name);
268 	rte_devargs_reset(&devargs);
269 	free(bus_str);
270 	free(cls_str);
271 	return ret;
272 }
273 
274 uint16_t
275 rte_eth_iterator_next(struct rte_dev_iterator *iter)
276 {
277 	if (iter == NULL) {
278 		RTE_ETHDEV_LOG(ERR,
279 			"Cannot get next device from NULL iterator\n");
280 		return RTE_MAX_ETHPORTS;
281 	}
282 
283 	if (iter->cls == NULL) /* invalid ethdev iterator */
284 		return RTE_MAX_ETHPORTS;
285 
286 	do { /* loop to try all matching rte_device */
287 		/* If not pure ethdev filter and */
288 		if (iter->bus != NULL &&
289 				/* not in middle of rte_eth_dev iteration, */
290 				iter->class_device == NULL) {
291 			/* get next rte_device to try. */
292 			iter->device = iter->bus->dev_iterate(
293 					iter->device, iter->bus_str, iter);
294 			if (iter->device == NULL)
295 				break; /* no more rte_device candidate */
296 		}
297 		/* A device is matching bus part, need to check ethdev part. */
298 		iter->class_device = iter->cls->dev_iterate(
299 				iter->class_device, iter->cls_str, iter);
300 		if (iter->class_device != NULL)
301 			return eth_dev_to_id(iter->class_device); /* match */
302 	} while (iter->bus != NULL); /* need to try next rte_device */
303 
304 	/* No more ethdev port to iterate. */
305 	rte_eth_iterator_cleanup(iter);
306 	return RTE_MAX_ETHPORTS;
307 }
308 
309 void
310 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
311 {
312 	if (iter == NULL) {
313 		RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
314 		return;
315 	}
316 
317 	if (iter->bus_str == NULL)
318 		return; /* nothing to free in pure class filter */
319 	free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
320 	free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
321 	memset(iter, 0, sizeof(*iter));
322 }
323 
324 uint16_t
325 rte_eth_find_next(uint16_t port_id)
326 {
327 	while (port_id < RTE_MAX_ETHPORTS &&
328 			rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
329 		port_id++;
330 
331 	if (port_id >= RTE_MAX_ETHPORTS)
332 		return RTE_MAX_ETHPORTS;
333 
334 	return port_id;
335 }
336 
337 /*
338  * Macro to iterate over all valid ports for internal usage.
339  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
340  */
341 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
342 	for (port_id = rte_eth_find_next(0); \
343 	     port_id < RTE_MAX_ETHPORTS; \
344 	     port_id = rte_eth_find_next(port_id + 1))
345 
346 uint16_t
347 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
348 {
349 	port_id = rte_eth_find_next(port_id);
350 	while (port_id < RTE_MAX_ETHPORTS &&
351 			rte_eth_devices[port_id].device != parent)
352 		port_id = rte_eth_find_next(port_id + 1);
353 
354 	return port_id;
355 }
356 
357 uint16_t
358 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
359 {
360 	RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
361 	return rte_eth_find_next_of(port_id,
362 			rte_eth_devices[ref_port_id].device);
363 }
364 
365 static bool
366 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
367 {
368 	return ethdev->data->name[0] != '\0';
369 }
370 
371 int
372 rte_eth_dev_is_valid_port(uint16_t port_id)
373 {
374 	if (port_id >= RTE_MAX_ETHPORTS ||
375 	    (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
376 		return 0;
377 	else
378 		return 1;
379 }
380 
381 static int
382 eth_is_valid_owner_id(uint64_t owner_id)
383 {
384 	if (owner_id == RTE_ETH_DEV_NO_OWNER ||
385 	    eth_dev_shared_data->next_owner_id <= owner_id)
386 		return 0;
387 	return 1;
388 }
389 
390 uint64_t
391 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
392 {
393 	port_id = rte_eth_find_next(port_id);
394 	while (port_id < RTE_MAX_ETHPORTS &&
395 			rte_eth_devices[port_id].data->owner.id != owner_id)
396 		port_id = rte_eth_find_next(port_id + 1);
397 
398 	return port_id;
399 }
400 
401 int
402 rte_eth_dev_owner_new(uint64_t *owner_id)
403 {
404 	if (owner_id == NULL) {
405 		RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
406 		return -EINVAL;
407 	}
408 
409 	eth_dev_shared_data_prepare();
410 
411 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
412 
413 	*owner_id = eth_dev_shared_data->next_owner_id++;
414 
415 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
416 	return 0;
417 }
418 
419 static int
420 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
421 		       const struct rte_eth_dev_owner *new_owner)
422 {
423 	struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
424 	struct rte_eth_dev_owner *port_owner;
425 
426 	if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
427 		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
428 			port_id);
429 		return -ENODEV;
430 	}
431 
432 	if (new_owner == NULL) {
433 		RTE_ETHDEV_LOG(ERR,
434 			"Cannot set ethdev port %u owner from NULL owner\n",
435 			port_id);
436 		return -EINVAL;
437 	}
438 
439 	if (!eth_is_valid_owner_id(new_owner->id) &&
440 	    !eth_is_valid_owner_id(old_owner_id)) {
441 		RTE_ETHDEV_LOG(ERR,
442 			"Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
443 		       old_owner_id, new_owner->id);
444 		return -EINVAL;
445 	}
446 
447 	port_owner = &rte_eth_devices[port_id].data->owner;
448 	if (port_owner->id != old_owner_id) {
449 		RTE_ETHDEV_LOG(ERR,
450 			"Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
451 			port_id, port_owner->name, port_owner->id);
452 		return -EPERM;
453 	}
454 
455 	/* can not truncate (same structure) */
456 	strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
457 
458 	port_owner->id = new_owner->id;
459 
460 	RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
461 		port_id, new_owner->name, new_owner->id);
462 
463 	return 0;
464 }
465 
466 int
467 rte_eth_dev_owner_set(const uint16_t port_id,
468 		      const struct rte_eth_dev_owner *owner)
469 {
470 	int ret;
471 
472 	eth_dev_shared_data_prepare();
473 
474 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
475 
476 	ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
477 
478 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
479 	return ret;
480 }
481 
482 int
483 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
484 {
485 	const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
486 			{.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
487 	int ret;
488 
489 	eth_dev_shared_data_prepare();
490 
491 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
492 
493 	ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
494 
495 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
496 	return ret;
497 }
498 
499 int
500 rte_eth_dev_owner_delete(const uint64_t owner_id)
501 {
502 	uint16_t port_id;
503 	int ret = 0;
504 
505 	eth_dev_shared_data_prepare();
506 
507 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
508 
509 	if (eth_is_valid_owner_id(owner_id)) {
510 		for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
511 			struct rte_eth_dev_data *data =
512 				rte_eth_devices[port_id].data;
513 			if (data != NULL && data->owner.id == owner_id)
514 				memset(&data->owner, 0,
515 				       sizeof(struct rte_eth_dev_owner));
516 		}
517 		RTE_ETHDEV_LOG(NOTICE,
518 			"All port owners owned by %016"PRIx64" identifier have removed\n",
519 			owner_id);
520 	} else {
521 		RTE_ETHDEV_LOG(ERR,
522 			       "Invalid owner ID=%016"PRIx64"\n",
523 			       owner_id);
524 		ret = -EINVAL;
525 	}
526 
527 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
528 
529 	return ret;
530 }
531 
532 int
533 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
534 {
535 	struct rte_eth_dev *ethdev;
536 
537 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
538 	ethdev = &rte_eth_devices[port_id];
539 
540 	if (!eth_dev_is_allocated(ethdev)) {
541 		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
542 			port_id);
543 		return -ENODEV;
544 	}
545 
546 	if (owner == NULL) {
547 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
548 			port_id);
549 		return -EINVAL;
550 	}
551 
552 	eth_dev_shared_data_prepare();
553 
554 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
555 	rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
556 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
557 
558 	return 0;
559 }
560 
561 int
562 rte_eth_dev_socket_id(uint16_t port_id)
563 {
564 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
565 	return rte_eth_devices[port_id].data->numa_node;
566 }
567 
568 void *
569 rte_eth_dev_get_sec_ctx(uint16_t port_id)
570 {
571 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
572 	return rte_eth_devices[port_id].security_ctx;
573 }
574 
575 uint16_t
576 rte_eth_dev_count_avail(void)
577 {
578 	uint16_t p;
579 	uint16_t count;
580 
581 	count = 0;
582 
583 	RTE_ETH_FOREACH_DEV(p)
584 		count++;
585 
586 	return count;
587 }
588 
589 uint16_t
590 rte_eth_dev_count_total(void)
591 {
592 	uint16_t port, count = 0;
593 
594 	RTE_ETH_FOREACH_VALID_DEV(port)
595 		count++;
596 
597 	return count;
598 }
599 
600 int
601 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
602 {
603 	char *tmp;
604 
605 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
606 
607 	if (name == NULL) {
608 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
609 			port_id);
610 		return -EINVAL;
611 	}
612 
613 	/* shouldn't check 'rte_eth_devices[i].data',
614 	 * because it might be overwritten by VDEV PMD */
615 	tmp = eth_dev_shared_data->data[port_id].name;
616 	strcpy(name, tmp);
617 	return 0;
618 }
619 
620 int
621 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
622 {
623 	uint16_t pid;
624 
625 	if (name == NULL) {
626 		RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
627 		return -EINVAL;
628 	}
629 
630 	if (port_id == NULL) {
631 		RTE_ETHDEV_LOG(ERR,
632 			"Cannot get port ID to NULL for %s\n", name);
633 		return -EINVAL;
634 	}
635 
636 	RTE_ETH_FOREACH_VALID_DEV(pid)
637 		if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
638 			*port_id = pid;
639 			return 0;
640 		}
641 
642 	return -ENODEV;
643 }
644 
645 static int
646 eth_err(uint16_t port_id, int ret)
647 {
648 	if (ret == 0)
649 		return 0;
650 	if (rte_eth_dev_is_removed(port_id))
651 		return -EIO;
652 	return ret;
653 }
654 
655 static int
656 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
657 {
658 	uint16_t port_id;
659 
660 	if (rx_queue_id >= dev->data->nb_rx_queues) {
661 		port_id = dev->data->port_id;
662 		RTE_ETHDEV_LOG(ERR,
663 			       "Invalid Rx queue_id=%u of device with port_id=%u\n",
664 			       rx_queue_id, port_id);
665 		return -EINVAL;
666 	}
667 
668 	if (dev->data->rx_queues[rx_queue_id] == NULL) {
669 		port_id = dev->data->port_id;
670 		RTE_ETHDEV_LOG(ERR,
671 			       "Queue %u of device with port_id=%u has not been setup\n",
672 			       rx_queue_id, port_id);
673 		return -EINVAL;
674 	}
675 
676 	return 0;
677 }
678 
679 static int
680 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
681 {
682 	uint16_t port_id;
683 
684 	if (tx_queue_id >= dev->data->nb_tx_queues) {
685 		port_id = dev->data->port_id;
686 		RTE_ETHDEV_LOG(ERR,
687 			       "Invalid Tx queue_id=%u of device with port_id=%u\n",
688 			       tx_queue_id, port_id);
689 		return -EINVAL;
690 	}
691 
692 	if (dev->data->tx_queues[tx_queue_id] == NULL) {
693 		port_id = dev->data->port_id;
694 		RTE_ETHDEV_LOG(ERR,
695 			       "Queue %u of device with port_id=%u has not been setup\n",
696 			       tx_queue_id, port_id);
697 		return -EINVAL;
698 	}
699 
700 	return 0;
701 }
702 
703 int
704 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
705 {
706 	struct rte_eth_dev *dev;
707 	int ret;
708 
709 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
710 	dev = &rte_eth_devices[port_id];
711 
712 	if (!dev->data->dev_started) {
713 		RTE_ETHDEV_LOG(ERR,
714 			"Port %u must be started before start any queue\n",
715 			port_id);
716 		return -EINVAL;
717 	}
718 
719 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
720 	if (ret != 0)
721 		return ret;
722 
723 	if (*dev->dev_ops->rx_queue_start == NULL)
724 		return -ENOTSUP;
725 
726 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
727 		RTE_ETHDEV_LOG(INFO,
728 			"Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
729 			rx_queue_id, port_id);
730 		return -EINVAL;
731 	}
732 
733 	if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
734 		RTE_ETHDEV_LOG(INFO,
735 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
736 			rx_queue_id, port_id);
737 		return 0;
738 	}
739 
740 	return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
741 }
742 
743 int
744 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
745 {
746 	struct rte_eth_dev *dev;
747 	int ret;
748 
749 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
750 	dev = &rte_eth_devices[port_id];
751 
752 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
753 	if (ret != 0)
754 		return ret;
755 
756 	if (*dev->dev_ops->rx_queue_stop == NULL)
757 		return -ENOTSUP;
758 
759 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
760 		RTE_ETHDEV_LOG(INFO,
761 			"Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
762 			rx_queue_id, port_id);
763 		return -EINVAL;
764 	}
765 
766 	if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
767 		RTE_ETHDEV_LOG(INFO,
768 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
769 			rx_queue_id, port_id);
770 		return 0;
771 	}
772 
773 	return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
774 }
775 
776 int
777 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
778 {
779 	struct rte_eth_dev *dev;
780 	int ret;
781 
782 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
783 	dev = &rte_eth_devices[port_id];
784 
785 	if (!dev->data->dev_started) {
786 		RTE_ETHDEV_LOG(ERR,
787 			"Port %u must be started before start any queue\n",
788 			port_id);
789 		return -EINVAL;
790 	}
791 
792 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
793 	if (ret != 0)
794 		return ret;
795 
796 	if (*dev->dev_ops->tx_queue_start == NULL)
797 		return -ENOTSUP;
798 
799 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
800 		RTE_ETHDEV_LOG(INFO,
801 			"Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
802 			tx_queue_id, port_id);
803 		return -EINVAL;
804 	}
805 
806 	if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
807 		RTE_ETHDEV_LOG(INFO,
808 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
809 			tx_queue_id, port_id);
810 		return 0;
811 	}
812 
813 	return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
814 }
815 
816 int
817 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
818 {
819 	struct rte_eth_dev *dev;
820 	int ret;
821 
822 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
823 	dev = &rte_eth_devices[port_id];
824 
825 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
826 	if (ret != 0)
827 		return ret;
828 
829 	if (*dev->dev_ops->tx_queue_stop == NULL)
830 		return -ENOTSUP;
831 
832 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
833 		RTE_ETHDEV_LOG(INFO,
834 			"Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
835 			tx_queue_id, port_id);
836 		return -EINVAL;
837 	}
838 
839 	if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
840 		RTE_ETHDEV_LOG(INFO,
841 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
842 			tx_queue_id, port_id);
843 		return 0;
844 	}
845 
846 	return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
847 }
848 
849 uint32_t
850 rte_eth_speed_bitflag(uint32_t speed, int duplex)
851 {
852 	switch (speed) {
853 	case RTE_ETH_SPEED_NUM_10M:
854 		return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
855 	case RTE_ETH_SPEED_NUM_100M:
856 		return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
857 	case RTE_ETH_SPEED_NUM_1G:
858 		return RTE_ETH_LINK_SPEED_1G;
859 	case RTE_ETH_SPEED_NUM_2_5G:
860 		return RTE_ETH_LINK_SPEED_2_5G;
861 	case RTE_ETH_SPEED_NUM_5G:
862 		return RTE_ETH_LINK_SPEED_5G;
863 	case RTE_ETH_SPEED_NUM_10G:
864 		return RTE_ETH_LINK_SPEED_10G;
865 	case RTE_ETH_SPEED_NUM_20G:
866 		return RTE_ETH_LINK_SPEED_20G;
867 	case RTE_ETH_SPEED_NUM_25G:
868 		return RTE_ETH_LINK_SPEED_25G;
869 	case RTE_ETH_SPEED_NUM_40G:
870 		return RTE_ETH_LINK_SPEED_40G;
871 	case RTE_ETH_SPEED_NUM_50G:
872 		return RTE_ETH_LINK_SPEED_50G;
873 	case RTE_ETH_SPEED_NUM_56G:
874 		return RTE_ETH_LINK_SPEED_56G;
875 	case RTE_ETH_SPEED_NUM_100G:
876 		return RTE_ETH_LINK_SPEED_100G;
877 	case RTE_ETH_SPEED_NUM_200G:
878 		return RTE_ETH_LINK_SPEED_200G;
879 	default:
880 		return 0;
881 	}
882 }
883 
884 const char *
885 rte_eth_dev_rx_offload_name(uint64_t offload)
886 {
887 	const char *name = "UNKNOWN";
888 	unsigned int i;
889 
890 	for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
891 		if (offload == eth_dev_rx_offload_names[i].offload) {
892 			name = eth_dev_rx_offload_names[i].name;
893 			break;
894 		}
895 	}
896 
897 	return name;
898 }
899 
900 const char *
901 rte_eth_dev_tx_offload_name(uint64_t offload)
902 {
903 	const char *name = "UNKNOWN";
904 	unsigned int i;
905 
906 	for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
907 		if (offload == eth_dev_tx_offload_names[i].offload) {
908 			name = eth_dev_tx_offload_names[i].name;
909 			break;
910 		}
911 	}
912 
913 	return name;
914 }
915 
916 const char *
917 rte_eth_dev_capability_name(uint64_t capability)
918 {
919 	const char *name = "UNKNOWN";
920 	unsigned int i;
921 
922 	for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) {
923 		if (capability == rte_eth_dev_capa_names[i].offload) {
924 			name = rte_eth_dev_capa_names[i].name;
925 			break;
926 		}
927 	}
928 
929 	return name;
930 }
931 
932 static inline int
933 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
934 		   uint32_t max_rx_pkt_len, uint32_t dev_info_size)
935 {
936 	int ret = 0;
937 
938 	if (dev_info_size == 0) {
939 		if (config_size != max_rx_pkt_len) {
940 			RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
941 				       " %u != %u is not allowed\n",
942 				       port_id, config_size, max_rx_pkt_len);
943 			ret = -EINVAL;
944 		}
945 	} else if (config_size > dev_info_size) {
946 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
947 			       "> max allowed value %u\n", port_id, config_size,
948 			       dev_info_size);
949 		ret = -EINVAL;
950 	} else if (config_size < RTE_ETHER_MIN_LEN) {
951 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
952 			       "< min allowed value %u\n", port_id, config_size,
953 			       (unsigned int)RTE_ETHER_MIN_LEN);
954 		ret = -EINVAL;
955 	}
956 	return ret;
957 }
958 
959 /*
960  * Validate offloads that are requested through rte_eth_dev_configure against
961  * the offloads successfully set by the Ethernet device.
962  *
963  * @param port_id
964  *   The port identifier of the Ethernet device.
965  * @param req_offloads
966  *   The offloads that have been requested through `rte_eth_dev_configure`.
967  * @param set_offloads
968  *   The offloads successfully set by the Ethernet device.
969  * @param offload_type
970  *   The offload type i.e. Rx/Tx string.
971  * @param offload_name
972  *   The function that prints the offload name.
973  * @return
974  *   - (0) if validation successful.
975  *   - (-EINVAL) if requested offload has been silently disabled.
976  *
977  */
978 static int
979 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
980 		  uint64_t set_offloads, const char *offload_type,
981 		  const char *(*offload_name)(uint64_t))
982 {
983 	uint64_t offloads_diff = req_offloads ^ set_offloads;
984 	uint64_t offload;
985 	int ret = 0;
986 
987 	while (offloads_diff != 0) {
988 		/* Check if any offload is requested but not enabled. */
989 		offload = RTE_BIT64(__builtin_ctzll(offloads_diff));
990 		if (offload & req_offloads) {
991 			RTE_ETHDEV_LOG(ERR,
992 				"Port %u failed to enable %s offload %s\n",
993 				port_id, offload_type, offload_name(offload));
994 			ret = -EINVAL;
995 		}
996 
997 		/* Check if offload couldn't be disabled. */
998 		if (offload & set_offloads) {
999 			RTE_ETHDEV_LOG(DEBUG,
1000 				"Port %u %s offload %s is not requested but enabled\n",
1001 				port_id, offload_type, offload_name(offload));
1002 		}
1003 
1004 		offloads_diff &= ~offload;
1005 	}
1006 
1007 	return ret;
1008 }
1009 
1010 static uint32_t
1011 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1012 {
1013 	uint32_t overhead_len;
1014 
1015 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1016 		overhead_len = max_rx_pktlen - max_mtu;
1017 	else
1018 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1019 
1020 	return overhead_len;
1021 }
1022 
1023 /* rte_eth_dev_info_get() should be called prior to this function */
1024 static int
1025 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
1026 		uint16_t mtu)
1027 {
1028 	uint32_t overhead_len;
1029 	uint32_t frame_size;
1030 
1031 	if (mtu < dev_info->min_mtu) {
1032 		RTE_ETHDEV_LOG(ERR,
1033 			"MTU (%u) < device min MTU (%u) for port_id %u\n",
1034 			mtu, dev_info->min_mtu, port_id);
1035 		return -EINVAL;
1036 	}
1037 	if (mtu > dev_info->max_mtu) {
1038 		RTE_ETHDEV_LOG(ERR,
1039 			"MTU (%u) > device max MTU (%u) for port_id %u\n",
1040 			mtu, dev_info->max_mtu, port_id);
1041 		return -EINVAL;
1042 	}
1043 
1044 	overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1045 			dev_info->max_mtu);
1046 	frame_size = mtu + overhead_len;
1047 	if (frame_size < RTE_ETHER_MIN_LEN) {
1048 		RTE_ETHDEV_LOG(ERR,
1049 			"Frame size (%u) < min frame size (%u) for port_id %u\n",
1050 			frame_size, RTE_ETHER_MIN_LEN, port_id);
1051 		return -EINVAL;
1052 	}
1053 
1054 	if (frame_size > dev_info->max_rx_pktlen) {
1055 		RTE_ETHDEV_LOG(ERR,
1056 			"Frame size (%u) > device max frame size (%u) for port_id %u\n",
1057 			frame_size, dev_info->max_rx_pktlen, port_id);
1058 		return -EINVAL;
1059 	}
1060 
1061 	return 0;
1062 }
1063 
1064 int
1065 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1066 		      const struct rte_eth_conf *dev_conf)
1067 {
1068 	struct rte_eth_dev *dev;
1069 	struct rte_eth_dev_info dev_info;
1070 	struct rte_eth_conf orig_conf;
1071 	int diag;
1072 	int ret;
1073 	uint16_t old_mtu;
1074 
1075 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1076 	dev = &rte_eth_devices[port_id];
1077 
1078 	if (dev_conf == NULL) {
1079 		RTE_ETHDEV_LOG(ERR,
1080 			"Cannot configure ethdev port %u from NULL config\n",
1081 			port_id);
1082 		return -EINVAL;
1083 	}
1084 
1085 	if (*dev->dev_ops->dev_configure == NULL)
1086 		return -ENOTSUP;
1087 
1088 	if (dev->data->dev_started) {
1089 		RTE_ETHDEV_LOG(ERR,
1090 			"Port %u must be stopped to allow configuration\n",
1091 			port_id);
1092 		return -EBUSY;
1093 	}
1094 
1095 	/*
1096 	 * Ensure that "dev_configured" is always 0 each time prepare to do
1097 	 * dev_configure() to avoid any non-anticipated behaviour.
1098 	 * And set to 1 when dev_configure() is executed successfully.
1099 	 */
1100 	dev->data->dev_configured = 0;
1101 
1102 	 /* Store original config, as rollback required on failure */
1103 	memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1104 
1105 	/*
1106 	 * Copy the dev_conf parameter into the dev structure.
1107 	 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1108 	 */
1109 	if (dev_conf != &dev->data->dev_conf)
1110 		memcpy(&dev->data->dev_conf, dev_conf,
1111 		       sizeof(dev->data->dev_conf));
1112 
1113 	/* Backup mtu for rollback */
1114 	old_mtu = dev->data->mtu;
1115 
1116 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1117 	if (ret != 0)
1118 		goto rollback;
1119 
1120 	/* If number of queues specified by application for both Rx and Tx is
1121 	 * zero, use driver preferred values. This cannot be done individually
1122 	 * as it is valid for either Tx or Rx (but not both) to be zero.
1123 	 * If driver does not provide any preferred valued, fall back on
1124 	 * EAL defaults.
1125 	 */
1126 	if (nb_rx_q == 0 && nb_tx_q == 0) {
1127 		nb_rx_q = dev_info.default_rxportconf.nb_queues;
1128 		if (nb_rx_q == 0)
1129 			nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1130 		nb_tx_q = dev_info.default_txportconf.nb_queues;
1131 		if (nb_tx_q == 0)
1132 			nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1133 	}
1134 
1135 	if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1136 		RTE_ETHDEV_LOG(ERR,
1137 			"Number of Rx queues requested (%u) is greater than max supported(%d)\n",
1138 			nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1139 		ret = -EINVAL;
1140 		goto rollback;
1141 	}
1142 
1143 	if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1144 		RTE_ETHDEV_LOG(ERR,
1145 			"Number of Tx queues requested (%u) is greater than max supported(%d)\n",
1146 			nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1147 		ret = -EINVAL;
1148 		goto rollback;
1149 	}
1150 
1151 	/*
1152 	 * Check that the numbers of Rx and Tx queues are not greater
1153 	 * than the maximum number of Rx and Tx queues supported by the
1154 	 * configured device.
1155 	 */
1156 	if (nb_rx_q > dev_info.max_rx_queues) {
1157 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1158 			port_id, nb_rx_q, dev_info.max_rx_queues);
1159 		ret = -EINVAL;
1160 		goto rollback;
1161 	}
1162 
1163 	if (nb_tx_q > dev_info.max_tx_queues) {
1164 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1165 			port_id, nb_tx_q, dev_info.max_tx_queues);
1166 		ret = -EINVAL;
1167 		goto rollback;
1168 	}
1169 
1170 	/* Check that the device supports requested interrupts */
1171 	if ((dev_conf->intr_conf.lsc == 1) &&
1172 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1173 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1174 			dev->device->driver->name);
1175 		ret = -EINVAL;
1176 		goto rollback;
1177 	}
1178 	if ((dev_conf->intr_conf.rmv == 1) &&
1179 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1180 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1181 			dev->device->driver->name);
1182 		ret = -EINVAL;
1183 		goto rollback;
1184 	}
1185 
1186 	if (dev_conf->rxmode.mtu == 0)
1187 		dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
1188 
1189 	ret = eth_dev_validate_mtu(port_id, &dev_info,
1190 			dev->data->dev_conf.rxmode.mtu);
1191 	if (ret != 0)
1192 		goto rollback;
1193 
1194 	dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
1195 
1196 	/*
1197 	 * If LRO is enabled, check that the maximum aggregated packet
1198 	 * size is supported by the configured device.
1199 	 */
1200 	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1201 		uint32_t max_rx_pktlen;
1202 		uint32_t overhead_len;
1203 
1204 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1205 				dev_info.max_mtu);
1206 		max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
1207 		if (dev_conf->rxmode.max_lro_pkt_size == 0)
1208 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1209 		ret = eth_dev_check_lro_pkt_size(port_id,
1210 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
1211 				max_rx_pktlen,
1212 				dev_info.max_lro_pkt_size);
1213 		if (ret != 0)
1214 			goto rollback;
1215 	}
1216 
1217 	/* Any requested offloading must be within its device capabilities */
1218 	if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1219 	     dev_conf->rxmode.offloads) {
1220 		RTE_ETHDEV_LOG(ERR,
1221 			"Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1222 			"capabilities 0x%"PRIx64" in %s()\n",
1223 			port_id, dev_conf->rxmode.offloads,
1224 			dev_info.rx_offload_capa,
1225 			__func__);
1226 		ret = -EINVAL;
1227 		goto rollback;
1228 	}
1229 	if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1230 	     dev_conf->txmode.offloads) {
1231 		RTE_ETHDEV_LOG(ERR,
1232 			"Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1233 			"capabilities 0x%"PRIx64" in %s()\n",
1234 			port_id, dev_conf->txmode.offloads,
1235 			dev_info.tx_offload_capa,
1236 			__func__);
1237 		ret = -EINVAL;
1238 		goto rollback;
1239 	}
1240 
1241 	dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1242 		rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1243 
1244 	/* Check that device supports requested rss hash functions. */
1245 	if ((dev_info.flow_type_rss_offloads |
1246 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1247 	    dev_info.flow_type_rss_offloads) {
1248 		RTE_ETHDEV_LOG(ERR,
1249 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1250 			port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1251 			dev_info.flow_type_rss_offloads);
1252 		ret = -EINVAL;
1253 		goto rollback;
1254 	}
1255 
1256 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1257 	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
1258 	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
1259 		RTE_ETHDEV_LOG(ERR,
1260 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1261 			port_id,
1262 			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
1263 		ret = -EINVAL;
1264 		goto rollback;
1265 	}
1266 
1267 	/*
1268 	 * Setup new number of Rx/Tx queues and reconfigure device.
1269 	 */
1270 	diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1271 	if (diag != 0) {
1272 		RTE_ETHDEV_LOG(ERR,
1273 			"Port%u eth_dev_rx_queue_config = %d\n",
1274 			port_id, diag);
1275 		ret = diag;
1276 		goto rollback;
1277 	}
1278 
1279 	diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1280 	if (diag != 0) {
1281 		RTE_ETHDEV_LOG(ERR,
1282 			"Port%u eth_dev_tx_queue_config = %d\n",
1283 			port_id, diag);
1284 		eth_dev_rx_queue_config(dev, 0);
1285 		ret = diag;
1286 		goto rollback;
1287 	}
1288 
1289 	diag = (*dev->dev_ops->dev_configure)(dev);
1290 	if (diag != 0) {
1291 		RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1292 			port_id, diag);
1293 		ret = eth_err(port_id, diag);
1294 		goto reset_queues;
1295 	}
1296 
1297 	/* Initialize Rx profiling if enabled at compilation time. */
1298 	diag = __rte_eth_dev_profile_init(port_id, dev);
1299 	if (diag != 0) {
1300 		RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1301 			port_id, diag);
1302 		ret = eth_err(port_id, diag);
1303 		goto reset_queues;
1304 	}
1305 
1306 	/* Validate Rx offloads. */
1307 	diag = eth_dev_validate_offloads(port_id,
1308 			dev_conf->rxmode.offloads,
1309 			dev->data->dev_conf.rxmode.offloads, "Rx",
1310 			rte_eth_dev_rx_offload_name);
1311 	if (diag != 0) {
1312 		ret = diag;
1313 		goto reset_queues;
1314 	}
1315 
1316 	/* Validate Tx offloads. */
1317 	diag = eth_dev_validate_offloads(port_id,
1318 			dev_conf->txmode.offloads,
1319 			dev->data->dev_conf.txmode.offloads, "Tx",
1320 			rte_eth_dev_tx_offload_name);
1321 	if (diag != 0) {
1322 		ret = diag;
1323 		goto reset_queues;
1324 	}
1325 
1326 	dev->data->dev_configured = 1;
1327 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1328 	return 0;
1329 reset_queues:
1330 	eth_dev_rx_queue_config(dev, 0);
1331 	eth_dev_tx_queue_config(dev, 0);
1332 rollback:
1333 	memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1334 	if (old_mtu != dev->data->mtu)
1335 		dev->data->mtu = old_mtu;
1336 
1337 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1338 	return ret;
1339 }
1340 
1341 static void
1342 eth_dev_mac_restore(struct rte_eth_dev *dev,
1343 			struct rte_eth_dev_info *dev_info)
1344 {
1345 	struct rte_ether_addr *addr;
1346 	uint16_t i;
1347 	uint32_t pool = 0;
1348 	uint64_t pool_mask;
1349 
1350 	/* replay MAC address configuration including default MAC */
1351 	addr = &dev->data->mac_addrs[0];
1352 	if (*dev->dev_ops->mac_addr_set != NULL)
1353 		(*dev->dev_ops->mac_addr_set)(dev, addr);
1354 	else if (*dev->dev_ops->mac_addr_add != NULL)
1355 		(*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1356 
1357 	if (*dev->dev_ops->mac_addr_add != NULL) {
1358 		for (i = 1; i < dev_info->max_mac_addrs; i++) {
1359 			addr = &dev->data->mac_addrs[i];
1360 
1361 			/* skip zero address */
1362 			if (rte_is_zero_ether_addr(addr))
1363 				continue;
1364 
1365 			pool = 0;
1366 			pool_mask = dev->data->mac_pool_sel[i];
1367 
1368 			do {
1369 				if (pool_mask & UINT64_C(1))
1370 					(*dev->dev_ops->mac_addr_add)(dev,
1371 						addr, i, pool);
1372 				pool_mask >>= 1;
1373 				pool++;
1374 			} while (pool_mask);
1375 		}
1376 	}
1377 }
1378 
1379 static int
1380 eth_dev_config_restore(struct rte_eth_dev *dev,
1381 		struct rte_eth_dev_info *dev_info, uint16_t port_id)
1382 {
1383 	int ret;
1384 
1385 	if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1386 		eth_dev_mac_restore(dev, dev_info);
1387 
1388 	/* replay promiscuous configuration */
1389 	/*
1390 	 * use callbacks directly since we don't need port_id check and
1391 	 * would like to bypass the same value set
1392 	 */
1393 	if (rte_eth_promiscuous_get(port_id) == 1 &&
1394 	    *dev->dev_ops->promiscuous_enable != NULL) {
1395 		ret = eth_err(port_id,
1396 			      (*dev->dev_ops->promiscuous_enable)(dev));
1397 		if (ret != 0 && ret != -ENOTSUP) {
1398 			RTE_ETHDEV_LOG(ERR,
1399 				"Failed to enable promiscuous mode for device (port %u): %s\n",
1400 				port_id, rte_strerror(-ret));
1401 			return ret;
1402 		}
1403 	} else if (rte_eth_promiscuous_get(port_id) == 0 &&
1404 		   *dev->dev_ops->promiscuous_disable != NULL) {
1405 		ret = eth_err(port_id,
1406 			      (*dev->dev_ops->promiscuous_disable)(dev));
1407 		if (ret != 0 && ret != -ENOTSUP) {
1408 			RTE_ETHDEV_LOG(ERR,
1409 				"Failed to disable promiscuous mode for device (port %u): %s\n",
1410 				port_id, rte_strerror(-ret));
1411 			return ret;
1412 		}
1413 	}
1414 
1415 	/* replay all multicast configuration */
1416 	/*
1417 	 * use callbacks directly since we don't need port_id check and
1418 	 * would like to bypass the same value set
1419 	 */
1420 	if (rte_eth_allmulticast_get(port_id) == 1 &&
1421 	    *dev->dev_ops->allmulticast_enable != NULL) {
1422 		ret = eth_err(port_id,
1423 			      (*dev->dev_ops->allmulticast_enable)(dev));
1424 		if (ret != 0 && ret != -ENOTSUP) {
1425 			RTE_ETHDEV_LOG(ERR,
1426 				"Failed to enable allmulticast mode for device (port %u): %s\n",
1427 				port_id, rte_strerror(-ret));
1428 			return ret;
1429 		}
1430 	} else if (rte_eth_allmulticast_get(port_id) == 0 &&
1431 		   *dev->dev_ops->allmulticast_disable != NULL) {
1432 		ret = eth_err(port_id,
1433 			      (*dev->dev_ops->allmulticast_disable)(dev));
1434 		if (ret != 0 && ret != -ENOTSUP) {
1435 			RTE_ETHDEV_LOG(ERR,
1436 				"Failed to disable allmulticast mode for device (port %u): %s\n",
1437 				port_id, rte_strerror(-ret));
1438 			return ret;
1439 		}
1440 	}
1441 
1442 	return 0;
1443 }
1444 
1445 int
1446 rte_eth_dev_start(uint16_t port_id)
1447 {
1448 	struct rte_eth_dev *dev;
1449 	struct rte_eth_dev_info dev_info;
1450 	int diag;
1451 	int ret, ret_stop;
1452 
1453 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1454 	dev = &rte_eth_devices[port_id];
1455 
1456 	if (*dev->dev_ops->dev_start == NULL)
1457 		return -ENOTSUP;
1458 
1459 	if (dev->data->dev_configured == 0) {
1460 		RTE_ETHDEV_LOG(INFO,
1461 			"Device with port_id=%"PRIu16" is not configured.\n",
1462 			port_id);
1463 		return -EINVAL;
1464 	}
1465 
1466 	if (dev->data->dev_started != 0) {
1467 		RTE_ETHDEV_LOG(INFO,
1468 			"Device with port_id=%"PRIu16" already started\n",
1469 			port_id);
1470 		return 0;
1471 	}
1472 
1473 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1474 	if (ret != 0)
1475 		return ret;
1476 
1477 	/* Lets restore MAC now if device does not support live change */
1478 	if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1479 		eth_dev_mac_restore(dev, &dev_info);
1480 
1481 	diag = (*dev->dev_ops->dev_start)(dev);
1482 	if (diag == 0)
1483 		dev->data->dev_started = 1;
1484 	else
1485 		return eth_err(port_id, diag);
1486 
1487 	ret = eth_dev_config_restore(dev, &dev_info, port_id);
1488 	if (ret != 0) {
1489 		RTE_ETHDEV_LOG(ERR,
1490 			"Error during restoring configuration for device (port %u): %s\n",
1491 			port_id, rte_strerror(-ret));
1492 		ret_stop = rte_eth_dev_stop(port_id);
1493 		if (ret_stop != 0) {
1494 			RTE_ETHDEV_LOG(ERR,
1495 				"Failed to stop device (port %u): %s\n",
1496 				port_id, rte_strerror(-ret_stop));
1497 		}
1498 
1499 		return ret;
1500 	}
1501 
1502 	if (dev->data->dev_conf.intr_conf.lsc == 0) {
1503 		if (*dev->dev_ops->link_update == NULL)
1504 			return -ENOTSUP;
1505 		(*dev->dev_ops->link_update)(dev, 0);
1506 	}
1507 
1508 	/* expose selection of PMD fast-path functions */
1509 	eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev);
1510 
1511 	rte_ethdev_trace_start(port_id);
1512 	return 0;
1513 }
1514 
1515 int
1516 rte_eth_dev_stop(uint16_t port_id)
1517 {
1518 	struct rte_eth_dev *dev;
1519 	int ret;
1520 
1521 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1522 	dev = &rte_eth_devices[port_id];
1523 
1524 	if (*dev->dev_ops->dev_stop == NULL)
1525 		return -ENOTSUP;
1526 
1527 	if (dev->data->dev_started == 0) {
1528 		RTE_ETHDEV_LOG(INFO,
1529 			"Device with port_id=%"PRIu16" already stopped\n",
1530 			port_id);
1531 		return 0;
1532 	}
1533 
1534 	/* point fast-path functions to dummy ones */
1535 	eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id);
1536 
1537 	ret = (*dev->dev_ops->dev_stop)(dev);
1538 	if (ret == 0)
1539 		dev->data->dev_started = 0;
1540 	rte_ethdev_trace_stop(port_id, ret);
1541 
1542 	return ret;
1543 }
1544 
1545 int
1546 rte_eth_dev_set_link_up(uint16_t port_id)
1547 {
1548 	struct rte_eth_dev *dev;
1549 
1550 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1551 	dev = &rte_eth_devices[port_id];
1552 
1553 	if (*dev->dev_ops->dev_set_link_up == NULL)
1554 		return -ENOTSUP;
1555 	return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1556 }
1557 
1558 int
1559 rte_eth_dev_set_link_down(uint16_t port_id)
1560 {
1561 	struct rte_eth_dev *dev;
1562 
1563 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1564 	dev = &rte_eth_devices[port_id];
1565 
1566 	if (*dev->dev_ops->dev_set_link_down == NULL)
1567 		return -ENOTSUP;
1568 	return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1569 }
1570 
1571 int
1572 rte_eth_dev_close(uint16_t port_id)
1573 {
1574 	struct rte_eth_dev *dev;
1575 	int firsterr, binerr;
1576 	int *lasterr = &firsterr;
1577 
1578 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1579 	dev = &rte_eth_devices[port_id];
1580 
1581 	/*
1582 	 * Secondary process needs to close device to release process private
1583 	 * resources. But secondary process should not be obliged to wait
1584 	 * for device stop before closing ethdev.
1585 	 */
1586 	if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
1587 			dev->data->dev_started) {
1588 		RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n",
1589 			       port_id);
1590 		return -EINVAL;
1591 	}
1592 
1593 	if (*dev->dev_ops->dev_close == NULL)
1594 		return -ENOTSUP;
1595 	*lasterr = (*dev->dev_ops->dev_close)(dev);
1596 	if (*lasterr != 0)
1597 		lasterr = &binerr;
1598 
1599 	rte_ethdev_trace_close(port_id);
1600 	*lasterr = rte_eth_dev_release_port(dev);
1601 
1602 	return firsterr;
1603 }
1604 
1605 int
1606 rte_eth_dev_reset(uint16_t port_id)
1607 {
1608 	struct rte_eth_dev *dev;
1609 	int ret;
1610 
1611 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1612 	dev = &rte_eth_devices[port_id];
1613 
1614 	if (*dev->dev_ops->dev_reset == NULL)
1615 		return -ENOTSUP;
1616 
1617 	ret = rte_eth_dev_stop(port_id);
1618 	if (ret != 0) {
1619 		RTE_ETHDEV_LOG(ERR,
1620 			"Failed to stop device (port %u) before reset: %s - ignore\n",
1621 			port_id, rte_strerror(-ret));
1622 	}
1623 	ret = dev->dev_ops->dev_reset(dev);
1624 
1625 	return eth_err(port_id, ret);
1626 }
1627 
1628 int
1629 rte_eth_dev_is_removed(uint16_t port_id)
1630 {
1631 	struct rte_eth_dev *dev;
1632 	int ret;
1633 
1634 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1635 	dev = &rte_eth_devices[port_id];
1636 
1637 	if (dev->state == RTE_ETH_DEV_REMOVED)
1638 		return 1;
1639 
1640 	if (*dev->dev_ops->is_removed == NULL)
1641 		return 0;
1642 
1643 	ret = dev->dev_ops->is_removed(dev);
1644 	if (ret != 0)
1645 		/* Device is physically removed. */
1646 		dev->state = RTE_ETH_DEV_REMOVED;
1647 
1648 	return ret;
1649 }
1650 
1651 static int
1652 rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg,
1653 			     uint16_t n_seg, uint32_t *mbp_buf_size,
1654 			     const struct rte_eth_dev_info *dev_info)
1655 {
1656 	const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1657 	struct rte_mempool *mp_first;
1658 	uint32_t offset_mask;
1659 	uint16_t seg_idx;
1660 
1661 	if (n_seg > seg_capa->max_nseg) {
1662 		RTE_ETHDEV_LOG(ERR,
1663 			       "Requested Rx segments %u exceed supported %u\n",
1664 			       n_seg, seg_capa->max_nseg);
1665 		return -EINVAL;
1666 	}
1667 	/*
1668 	 * Check the sizes and offsets against buffer sizes
1669 	 * for each segment specified in extended configuration.
1670 	 */
1671 	mp_first = rx_seg[0].mp;
1672 	offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1;
1673 	for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1674 		struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1675 		uint32_t length = rx_seg[seg_idx].length;
1676 		uint32_t offset = rx_seg[seg_idx].offset;
1677 
1678 		if (mpl == NULL) {
1679 			RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1680 			return -EINVAL;
1681 		}
1682 		if (seg_idx != 0 && mp_first != mpl &&
1683 		    seg_capa->multi_pools == 0) {
1684 			RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1685 			return -ENOTSUP;
1686 		}
1687 		if (offset != 0) {
1688 			if (seg_capa->offset_allowed == 0) {
1689 				RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1690 				return -ENOTSUP;
1691 			}
1692 			if (offset & offset_mask) {
1693 				RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1694 					       offset,
1695 					       seg_capa->offset_align_log2);
1696 				return -EINVAL;
1697 			}
1698 		}
1699 		if (mpl->private_data_size <
1700 			sizeof(struct rte_pktmbuf_pool_private)) {
1701 			RTE_ETHDEV_LOG(ERR,
1702 				       "%s private_data_size %u < %u\n",
1703 				       mpl->name, mpl->private_data_size,
1704 				       (unsigned int)sizeof
1705 					(struct rte_pktmbuf_pool_private));
1706 			return -ENOSPC;
1707 		}
1708 		offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1709 		*mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1710 		length = length != 0 ? length : *mbp_buf_size;
1711 		if (*mbp_buf_size < length + offset) {
1712 			RTE_ETHDEV_LOG(ERR,
1713 				       "%s mbuf_data_room_size %u < %u (segment length=%u + segment offset=%u)\n",
1714 				       mpl->name, *mbp_buf_size,
1715 				       length + offset, length, offset);
1716 			return -EINVAL;
1717 		}
1718 	}
1719 	return 0;
1720 }
1721 
1722 int
1723 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1724 		       uint16_t nb_rx_desc, unsigned int socket_id,
1725 		       const struct rte_eth_rxconf *rx_conf,
1726 		       struct rte_mempool *mp)
1727 {
1728 	int ret;
1729 	uint32_t mbp_buf_size;
1730 	struct rte_eth_dev *dev;
1731 	struct rte_eth_dev_info dev_info;
1732 	struct rte_eth_rxconf local_conf;
1733 
1734 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1735 	dev = &rte_eth_devices[port_id];
1736 
1737 	if (rx_queue_id >= dev->data->nb_rx_queues) {
1738 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
1739 		return -EINVAL;
1740 	}
1741 
1742 	if (*dev->dev_ops->rx_queue_setup == NULL)
1743 		return -ENOTSUP;
1744 
1745 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1746 	if (ret != 0)
1747 		return ret;
1748 
1749 	if (mp != NULL) {
1750 		/* Single pool configuration check. */
1751 		if (rx_conf != NULL && rx_conf->rx_nseg != 0) {
1752 			RTE_ETHDEV_LOG(ERR,
1753 				       "Ambiguous segment configuration\n");
1754 			return -EINVAL;
1755 		}
1756 		/*
1757 		 * Check the size of the mbuf data buffer, this value
1758 		 * must be provided in the private data of the memory pool.
1759 		 * First check that the memory pool(s) has a valid private data.
1760 		 */
1761 		if (mp->private_data_size <
1762 				sizeof(struct rte_pktmbuf_pool_private)) {
1763 			RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1764 				mp->name, mp->private_data_size,
1765 				(unsigned int)
1766 				sizeof(struct rte_pktmbuf_pool_private));
1767 			return -ENOSPC;
1768 		}
1769 		mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1770 		if (mbp_buf_size < dev_info.min_rx_bufsize +
1771 				   RTE_PKTMBUF_HEADROOM) {
1772 			RTE_ETHDEV_LOG(ERR,
1773 				       "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n",
1774 				       mp->name, mbp_buf_size,
1775 				       RTE_PKTMBUF_HEADROOM +
1776 				       dev_info.min_rx_bufsize,
1777 				       RTE_PKTMBUF_HEADROOM,
1778 				       dev_info.min_rx_bufsize);
1779 			return -EINVAL;
1780 		}
1781 	} else {
1782 		const struct rte_eth_rxseg_split *rx_seg;
1783 		uint16_t n_seg;
1784 
1785 		/* Extended multi-segment configuration check. */
1786 		if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) {
1787 			RTE_ETHDEV_LOG(ERR,
1788 				       "Memory pool is null and no extended configuration provided\n");
1789 			return -EINVAL;
1790 		}
1791 
1792 		rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
1793 		n_seg = rx_conf->rx_nseg;
1794 
1795 		if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1796 			ret = rte_eth_rx_queue_check_split(rx_seg, n_seg,
1797 							   &mbp_buf_size,
1798 							   &dev_info);
1799 			if (ret != 0)
1800 				return ret;
1801 		} else {
1802 			RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
1803 			return -EINVAL;
1804 		}
1805 	}
1806 
1807 	/* Use default specified by driver, if nb_rx_desc is zero */
1808 	if (nb_rx_desc == 0) {
1809 		nb_rx_desc = dev_info.default_rxportconf.ring_size;
1810 		/* If driver default is also zero, fall back on EAL default */
1811 		if (nb_rx_desc == 0)
1812 			nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1813 	}
1814 
1815 	if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1816 			nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1817 			nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1818 
1819 		RTE_ETHDEV_LOG(ERR,
1820 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1821 			nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1822 			dev_info.rx_desc_lim.nb_min,
1823 			dev_info.rx_desc_lim.nb_align);
1824 		return -EINVAL;
1825 	}
1826 
1827 	if (dev->data->dev_started &&
1828 		!(dev_info.dev_capa &
1829 			RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1830 		return -EBUSY;
1831 
1832 	if (dev->data->dev_started &&
1833 		(dev->data->rx_queue_state[rx_queue_id] !=
1834 			RTE_ETH_QUEUE_STATE_STOPPED))
1835 		return -EBUSY;
1836 
1837 	eth_dev_rxq_release(dev, rx_queue_id);
1838 
1839 	if (rx_conf == NULL)
1840 		rx_conf = &dev_info.default_rxconf;
1841 
1842 	local_conf = *rx_conf;
1843 
1844 	/*
1845 	 * If an offloading has already been enabled in
1846 	 * rte_eth_dev_configure(), it has been enabled on all queues,
1847 	 * so there is no need to enable it in this queue again.
1848 	 * The local_conf.offloads input to underlying PMD only carries
1849 	 * those offloadings which are only enabled on this queue and
1850 	 * not enabled on all queues.
1851 	 */
1852 	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1853 
1854 	/*
1855 	 * New added offloadings for this queue are those not enabled in
1856 	 * rte_eth_dev_configure() and they must be per-queue type.
1857 	 * A pure per-port offloading can't be enabled on a queue while
1858 	 * disabled on another queue. A pure per-port offloading can't
1859 	 * be enabled for any queue as new added one if it hasn't been
1860 	 * enabled in rte_eth_dev_configure().
1861 	 */
1862 	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1863 	     local_conf.offloads) {
1864 		RTE_ETHDEV_LOG(ERR,
1865 			"Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
1866 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
1867 			port_id, rx_queue_id, local_conf.offloads,
1868 			dev_info.rx_queue_offload_capa,
1869 			__func__);
1870 		return -EINVAL;
1871 	}
1872 
1873 	if (local_conf.share_group > 0 &&
1874 	    (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {
1875 		RTE_ETHDEV_LOG(ERR,
1876 			"Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n",
1877 			port_id, rx_queue_id, local_conf.share_group);
1878 		return -EINVAL;
1879 	}
1880 
1881 	/*
1882 	 * If LRO is enabled, check that the maximum aggregated packet
1883 	 * size is supported by the configured device.
1884 	 */
1885 	/* Get the real Ethernet overhead length */
1886 	if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1887 		uint32_t overhead_len;
1888 		uint32_t max_rx_pktlen;
1889 		int ret;
1890 
1891 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1892 				dev_info.max_mtu);
1893 		max_rx_pktlen = dev->data->mtu + overhead_len;
1894 		if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
1895 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1896 		ret = eth_dev_check_lro_pkt_size(port_id,
1897 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
1898 				max_rx_pktlen,
1899 				dev_info.max_lro_pkt_size);
1900 		if (ret != 0)
1901 			return ret;
1902 	}
1903 
1904 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1905 					      socket_id, &local_conf, mp);
1906 	if (!ret) {
1907 		if (!dev->data->min_rx_buf_size ||
1908 		    dev->data->min_rx_buf_size > mbp_buf_size)
1909 			dev->data->min_rx_buf_size = mbp_buf_size;
1910 	}
1911 
1912 	rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
1913 		rx_conf, ret);
1914 	return eth_err(port_id, ret);
1915 }
1916 
1917 int
1918 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1919 			       uint16_t nb_rx_desc,
1920 			       const struct rte_eth_hairpin_conf *conf)
1921 {
1922 	int ret;
1923 	struct rte_eth_dev *dev;
1924 	struct rte_eth_hairpin_cap cap;
1925 	int i;
1926 	int count;
1927 
1928 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1929 	dev = &rte_eth_devices[port_id];
1930 
1931 	if (rx_queue_id >= dev->data->nb_rx_queues) {
1932 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
1933 		return -EINVAL;
1934 	}
1935 
1936 	if (conf == NULL) {
1937 		RTE_ETHDEV_LOG(ERR,
1938 			"Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
1939 			port_id);
1940 		return -EINVAL;
1941 	}
1942 
1943 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1944 	if (ret != 0)
1945 		return ret;
1946 	if (*dev->dev_ops->rx_hairpin_queue_setup == NULL)
1947 		return -ENOTSUP;
1948 	/* if nb_rx_desc is zero use max number of desc from the driver. */
1949 	if (nb_rx_desc == 0)
1950 		nb_rx_desc = cap.max_nb_desc;
1951 	if (nb_rx_desc > cap.max_nb_desc) {
1952 		RTE_ETHDEV_LOG(ERR,
1953 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1954 			nb_rx_desc, cap.max_nb_desc);
1955 		return -EINVAL;
1956 	}
1957 	if (conf->peer_count > cap.max_rx_2_tx) {
1958 		RTE_ETHDEV_LOG(ERR,
1959 			"Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
1960 			conf->peer_count, cap.max_rx_2_tx);
1961 		return -EINVAL;
1962 	}
1963 	if (conf->peer_count == 0) {
1964 		RTE_ETHDEV_LOG(ERR,
1965 			"Invalid value for number of peers for Rx queue(=%u), should be: > 0",
1966 			conf->peer_count);
1967 		return -EINVAL;
1968 	}
1969 	for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1970 	     cap.max_nb_queues != UINT16_MAX; i++) {
1971 		if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1972 			count++;
1973 	}
1974 	if (count > cap.max_nb_queues) {
1975 		RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1976 		cap.max_nb_queues);
1977 		return -EINVAL;
1978 	}
1979 	if (dev->data->dev_started)
1980 		return -EBUSY;
1981 	eth_dev_rxq_release(dev, rx_queue_id);
1982 	ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
1983 						      nb_rx_desc, conf);
1984 	if (ret == 0)
1985 		dev->data->rx_queue_state[rx_queue_id] =
1986 			RTE_ETH_QUEUE_STATE_HAIRPIN;
1987 	return eth_err(port_id, ret);
1988 }
1989 
1990 int
1991 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1992 		       uint16_t nb_tx_desc, unsigned int socket_id,
1993 		       const struct rte_eth_txconf *tx_conf)
1994 {
1995 	struct rte_eth_dev *dev;
1996 	struct rte_eth_dev_info dev_info;
1997 	struct rte_eth_txconf local_conf;
1998 	int ret;
1999 
2000 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2001 	dev = &rte_eth_devices[port_id];
2002 
2003 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2004 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2005 		return -EINVAL;
2006 	}
2007 
2008 	if (*dev->dev_ops->tx_queue_setup == NULL)
2009 		return -ENOTSUP;
2010 
2011 	ret = rte_eth_dev_info_get(port_id, &dev_info);
2012 	if (ret != 0)
2013 		return ret;
2014 
2015 	/* Use default specified by driver, if nb_tx_desc is zero */
2016 	if (nb_tx_desc == 0) {
2017 		nb_tx_desc = dev_info.default_txportconf.ring_size;
2018 		/* If driver default is zero, fall back on EAL default */
2019 		if (nb_tx_desc == 0)
2020 			nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2021 	}
2022 	if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2023 	    nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2024 	    nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2025 		RTE_ETHDEV_LOG(ERR,
2026 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2027 			nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2028 			dev_info.tx_desc_lim.nb_min,
2029 			dev_info.tx_desc_lim.nb_align);
2030 		return -EINVAL;
2031 	}
2032 
2033 	if (dev->data->dev_started &&
2034 		!(dev_info.dev_capa &
2035 			RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2036 		return -EBUSY;
2037 
2038 	if (dev->data->dev_started &&
2039 		(dev->data->tx_queue_state[tx_queue_id] !=
2040 			RTE_ETH_QUEUE_STATE_STOPPED))
2041 		return -EBUSY;
2042 
2043 	eth_dev_txq_release(dev, tx_queue_id);
2044 
2045 	if (tx_conf == NULL)
2046 		tx_conf = &dev_info.default_txconf;
2047 
2048 	local_conf = *tx_conf;
2049 
2050 	/*
2051 	 * If an offloading has already been enabled in
2052 	 * rte_eth_dev_configure(), it has been enabled on all queues,
2053 	 * so there is no need to enable it in this queue again.
2054 	 * The local_conf.offloads input to underlying PMD only carries
2055 	 * those offloadings which are only enabled on this queue and
2056 	 * not enabled on all queues.
2057 	 */
2058 	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2059 
2060 	/*
2061 	 * New added offloadings for this queue are those not enabled in
2062 	 * rte_eth_dev_configure() and they must be per-queue type.
2063 	 * A pure per-port offloading can't be enabled on a queue while
2064 	 * disabled on another queue. A pure per-port offloading can't
2065 	 * be enabled for any queue as new added one if it hasn't been
2066 	 * enabled in rte_eth_dev_configure().
2067 	 */
2068 	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2069 	     local_conf.offloads) {
2070 		RTE_ETHDEV_LOG(ERR,
2071 			"Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2072 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2073 			port_id, tx_queue_id, local_conf.offloads,
2074 			dev_info.tx_queue_offload_capa,
2075 			__func__);
2076 		return -EINVAL;
2077 	}
2078 
2079 	rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2080 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2081 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2082 }
2083 
2084 int
2085 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2086 			       uint16_t nb_tx_desc,
2087 			       const struct rte_eth_hairpin_conf *conf)
2088 {
2089 	struct rte_eth_dev *dev;
2090 	struct rte_eth_hairpin_cap cap;
2091 	int i;
2092 	int count;
2093 	int ret;
2094 
2095 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2096 	dev = &rte_eth_devices[port_id];
2097 
2098 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2099 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2100 		return -EINVAL;
2101 	}
2102 
2103 	if (conf == NULL) {
2104 		RTE_ETHDEV_LOG(ERR,
2105 			"Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2106 			port_id);
2107 		return -EINVAL;
2108 	}
2109 
2110 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2111 	if (ret != 0)
2112 		return ret;
2113 	if (*dev->dev_ops->tx_hairpin_queue_setup == NULL)
2114 		return -ENOTSUP;
2115 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2116 	if (nb_tx_desc == 0)
2117 		nb_tx_desc = cap.max_nb_desc;
2118 	if (nb_tx_desc > cap.max_nb_desc) {
2119 		RTE_ETHDEV_LOG(ERR,
2120 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2121 			nb_tx_desc, cap.max_nb_desc);
2122 		return -EINVAL;
2123 	}
2124 	if (conf->peer_count > cap.max_tx_2_rx) {
2125 		RTE_ETHDEV_LOG(ERR,
2126 			"Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2127 			conf->peer_count, cap.max_tx_2_rx);
2128 		return -EINVAL;
2129 	}
2130 	if (conf->peer_count == 0) {
2131 		RTE_ETHDEV_LOG(ERR,
2132 			"Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2133 			conf->peer_count);
2134 		return -EINVAL;
2135 	}
2136 	for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2137 	     cap.max_nb_queues != UINT16_MAX; i++) {
2138 		if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2139 			count++;
2140 	}
2141 	if (count > cap.max_nb_queues) {
2142 		RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2143 		cap.max_nb_queues);
2144 		return -EINVAL;
2145 	}
2146 	if (dev->data->dev_started)
2147 		return -EBUSY;
2148 	eth_dev_txq_release(dev, tx_queue_id);
2149 	ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2150 		(dev, tx_queue_id, nb_tx_desc, conf);
2151 	if (ret == 0)
2152 		dev->data->tx_queue_state[tx_queue_id] =
2153 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2154 	return eth_err(port_id, ret);
2155 }
2156 
2157 int
2158 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2159 {
2160 	struct rte_eth_dev *dev;
2161 	int ret;
2162 
2163 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2164 	dev = &rte_eth_devices[tx_port];
2165 
2166 	if (dev->data->dev_started == 0) {
2167 		RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2168 		return -EBUSY;
2169 	}
2170 
2171 	if (*dev->dev_ops->hairpin_bind == NULL)
2172 		return -ENOTSUP;
2173 	ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2174 	if (ret != 0)
2175 		RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2176 			       " to Rx %d (%d - all ports)\n",
2177 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2178 
2179 	return ret;
2180 }
2181 
2182 int
2183 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2184 {
2185 	struct rte_eth_dev *dev;
2186 	int ret;
2187 
2188 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2189 	dev = &rte_eth_devices[tx_port];
2190 
2191 	if (dev->data->dev_started == 0) {
2192 		RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2193 		return -EBUSY;
2194 	}
2195 
2196 	if (*dev->dev_ops->hairpin_unbind == NULL)
2197 		return -ENOTSUP;
2198 	ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2199 	if (ret != 0)
2200 		RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2201 			       " from Rx %d (%d - all ports)\n",
2202 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2203 
2204 	return ret;
2205 }
2206 
2207 int
2208 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2209 			       size_t len, uint32_t direction)
2210 {
2211 	struct rte_eth_dev *dev;
2212 	int ret;
2213 
2214 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2215 	dev = &rte_eth_devices[port_id];
2216 
2217 	if (peer_ports == NULL) {
2218 		RTE_ETHDEV_LOG(ERR,
2219 			"Cannot get ethdev port %u hairpin peer ports to NULL\n",
2220 			port_id);
2221 		return -EINVAL;
2222 	}
2223 
2224 	if (len == 0) {
2225 		RTE_ETHDEV_LOG(ERR,
2226 			"Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2227 			port_id);
2228 		return -EINVAL;
2229 	}
2230 
2231 	if (*dev->dev_ops->hairpin_get_peer_ports == NULL)
2232 		return -ENOTSUP;
2233 
2234 	ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2235 						      len, direction);
2236 	if (ret < 0)
2237 		RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2238 			       port_id, direction ? "Rx" : "Tx");
2239 
2240 	return ret;
2241 }
2242 
2243 void
2244 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2245 		void *userdata __rte_unused)
2246 {
2247 	rte_pktmbuf_free_bulk(pkts, unsent);
2248 }
2249 
2250 void
2251 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2252 		void *userdata)
2253 {
2254 	uint64_t *count = userdata;
2255 
2256 	rte_pktmbuf_free_bulk(pkts, unsent);
2257 	*count += unsent;
2258 }
2259 
2260 int
2261 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2262 		buffer_tx_error_fn cbfn, void *userdata)
2263 {
2264 	if (buffer == NULL) {
2265 		RTE_ETHDEV_LOG(ERR,
2266 			"Cannot set Tx buffer error callback to NULL buffer\n");
2267 		return -EINVAL;
2268 	}
2269 
2270 	buffer->error_callback = cbfn;
2271 	buffer->error_userdata = userdata;
2272 	return 0;
2273 }
2274 
2275 int
2276 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2277 {
2278 	int ret = 0;
2279 
2280 	if (buffer == NULL) {
2281 		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2282 		return -EINVAL;
2283 	}
2284 
2285 	buffer->size = size;
2286 	if (buffer->error_callback == NULL) {
2287 		ret = rte_eth_tx_buffer_set_err_callback(
2288 			buffer, rte_eth_tx_buffer_drop_callback, NULL);
2289 	}
2290 
2291 	return ret;
2292 }
2293 
2294 int
2295 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2296 {
2297 	struct rte_eth_dev *dev;
2298 	int ret;
2299 
2300 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2301 	dev = &rte_eth_devices[port_id];
2302 
2303 	if (*dev->dev_ops->tx_done_cleanup == NULL)
2304 		return -ENOTSUP;
2305 
2306 	/* Call driver to free pending mbufs. */
2307 	ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2308 					       free_cnt);
2309 	return eth_err(port_id, ret);
2310 }
2311 
2312 int
2313 rte_eth_promiscuous_enable(uint16_t port_id)
2314 {
2315 	struct rte_eth_dev *dev;
2316 	int diag = 0;
2317 
2318 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2319 	dev = &rte_eth_devices[port_id];
2320 
2321 	if (dev->data->promiscuous == 1)
2322 		return 0;
2323 
2324 	if (*dev->dev_ops->promiscuous_enable == NULL)
2325 		return -ENOTSUP;
2326 
2327 	diag = (*dev->dev_ops->promiscuous_enable)(dev);
2328 	dev->data->promiscuous = (diag == 0) ? 1 : 0;
2329 
2330 	return eth_err(port_id, diag);
2331 }
2332 
2333 int
2334 rte_eth_promiscuous_disable(uint16_t port_id)
2335 {
2336 	struct rte_eth_dev *dev;
2337 	int diag = 0;
2338 
2339 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2340 	dev = &rte_eth_devices[port_id];
2341 
2342 	if (dev->data->promiscuous == 0)
2343 		return 0;
2344 
2345 	if (*dev->dev_ops->promiscuous_disable == NULL)
2346 		return -ENOTSUP;
2347 
2348 	dev->data->promiscuous = 0;
2349 	diag = (*dev->dev_ops->promiscuous_disable)(dev);
2350 	if (diag != 0)
2351 		dev->data->promiscuous = 1;
2352 
2353 	return eth_err(port_id, diag);
2354 }
2355 
2356 int
2357 rte_eth_promiscuous_get(uint16_t port_id)
2358 {
2359 	struct rte_eth_dev *dev;
2360 
2361 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2362 	dev = &rte_eth_devices[port_id];
2363 
2364 	return dev->data->promiscuous;
2365 }
2366 
2367 int
2368 rte_eth_allmulticast_enable(uint16_t port_id)
2369 {
2370 	struct rte_eth_dev *dev;
2371 	int diag;
2372 
2373 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2374 	dev = &rte_eth_devices[port_id];
2375 
2376 	if (dev->data->all_multicast == 1)
2377 		return 0;
2378 
2379 	if (*dev->dev_ops->allmulticast_enable == NULL)
2380 		return -ENOTSUP;
2381 	diag = (*dev->dev_ops->allmulticast_enable)(dev);
2382 	dev->data->all_multicast = (diag == 0) ? 1 : 0;
2383 
2384 	return eth_err(port_id, diag);
2385 }
2386 
2387 int
2388 rte_eth_allmulticast_disable(uint16_t port_id)
2389 {
2390 	struct rte_eth_dev *dev;
2391 	int diag;
2392 
2393 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2394 	dev = &rte_eth_devices[port_id];
2395 
2396 	if (dev->data->all_multicast == 0)
2397 		return 0;
2398 
2399 	if (*dev->dev_ops->allmulticast_disable == NULL)
2400 		return -ENOTSUP;
2401 	dev->data->all_multicast = 0;
2402 	diag = (*dev->dev_ops->allmulticast_disable)(dev);
2403 	if (diag != 0)
2404 		dev->data->all_multicast = 1;
2405 
2406 	return eth_err(port_id, diag);
2407 }
2408 
2409 int
2410 rte_eth_allmulticast_get(uint16_t port_id)
2411 {
2412 	struct rte_eth_dev *dev;
2413 
2414 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2415 	dev = &rte_eth_devices[port_id];
2416 
2417 	return dev->data->all_multicast;
2418 }
2419 
2420 int
2421 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2422 {
2423 	struct rte_eth_dev *dev;
2424 
2425 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2426 	dev = &rte_eth_devices[port_id];
2427 
2428 	if (eth_link == NULL) {
2429 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2430 			port_id);
2431 		return -EINVAL;
2432 	}
2433 
2434 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2435 		rte_eth_linkstatus_get(dev, eth_link);
2436 	else {
2437 		if (*dev->dev_ops->link_update == NULL)
2438 			return -ENOTSUP;
2439 		(*dev->dev_ops->link_update)(dev, 1);
2440 		*eth_link = dev->data->dev_link;
2441 	}
2442 
2443 	return 0;
2444 }
2445 
2446 int
2447 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2448 {
2449 	struct rte_eth_dev *dev;
2450 
2451 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2452 	dev = &rte_eth_devices[port_id];
2453 
2454 	if (eth_link == NULL) {
2455 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2456 			port_id);
2457 		return -EINVAL;
2458 	}
2459 
2460 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2461 		rte_eth_linkstatus_get(dev, eth_link);
2462 	else {
2463 		if (*dev->dev_ops->link_update == NULL)
2464 			return -ENOTSUP;
2465 		(*dev->dev_ops->link_update)(dev, 0);
2466 		*eth_link = dev->data->dev_link;
2467 	}
2468 
2469 	return 0;
2470 }
2471 
2472 const char *
2473 rte_eth_link_speed_to_str(uint32_t link_speed)
2474 {
2475 	switch (link_speed) {
2476 	case RTE_ETH_SPEED_NUM_NONE: return "None";
2477 	case RTE_ETH_SPEED_NUM_10M:  return "10 Mbps";
2478 	case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
2479 	case RTE_ETH_SPEED_NUM_1G:   return "1 Gbps";
2480 	case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2481 	case RTE_ETH_SPEED_NUM_5G:   return "5 Gbps";
2482 	case RTE_ETH_SPEED_NUM_10G:  return "10 Gbps";
2483 	case RTE_ETH_SPEED_NUM_20G:  return "20 Gbps";
2484 	case RTE_ETH_SPEED_NUM_25G:  return "25 Gbps";
2485 	case RTE_ETH_SPEED_NUM_40G:  return "40 Gbps";
2486 	case RTE_ETH_SPEED_NUM_50G:  return "50 Gbps";
2487 	case RTE_ETH_SPEED_NUM_56G:  return "56 Gbps";
2488 	case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
2489 	case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
2490 	case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2491 	default: return "Invalid";
2492 	}
2493 }
2494 
2495 int
2496 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2497 {
2498 	if (str == NULL) {
2499 		RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
2500 		return -EINVAL;
2501 	}
2502 
2503 	if (len == 0) {
2504 		RTE_ETHDEV_LOG(ERR,
2505 			"Cannot convert link to string with zero size\n");
2506 		return -EINVAL;
2507 	}
2508 
2509 	if (eth_link == NULL) {
2510 		RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
2511 		return -EINVAL;
2512 	}
2513 
2514 	if (eth_link->link_status == RTE_ETH_LINK_DOWN)
2515 		return snprintf(str, len, "Link down");
2516 	else
2517 		return snprintf(str, len, "Link up at %s %s %s",
2518 			rte_eth_link_speed_to_str(eth_link->link_speed),
2519 			(eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
2520 			"FDX" : "HDX",
2521 			(eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
2522 			"Autoneg" : "Fixed");
2523 }
2524 
2525 int
2526 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2527 {
2528 	struct rte_eth_dev *dev;
2529 
2530 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2531 	dev = &rte_eth_devices[port_id];
2532 
2533 	if (stats == NULL) {
2534 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
2535 			port_id);
2536 		return -EINVAL;
2537 	}
2538 
2539 	memset(stats, 0, sizeof(*stats));
2540 
2541 	if (*dev->dev_ops->stats_get == NULL)
2542 		return -ENOTSUP;
2543 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2544 	return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2545 }
2546 
2547 int
2548 rte_eth_stats_reset(uint16_t port_id)
2549 {
2550 	struct rte_eth_dev *dev;
2551 	int ret;
2552 
2553 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2554 	dev = &rte_eth_devices[port_id];
2555 
2556 	if (*dev->dev_ops->stats_reset == NULL)
2557 		return -ENOTSUP;
2558 	ret = (*dev->dev_ops->stats_reset)(dev);
2559 	if (ret != 0)
2560 		return eth_err(port_id, ret);
2561 
2562 	dev->data->rx_mbuf_alloc_failed = 0;
2563 
2564 	return 0;
2565 }
2566 
2567 static inline int
2568 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2569 {
2570 	uint16_t nb_rxqs, nb_txqs;
2571 	int count;
2572 
2573 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2574 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2575 
2576 	count = RTE_NB_STATS;
2577 	if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2578 		count += nb_rxqs * RTE_NB_RXQ_STATS;
2579 		count += nb_txqs * RTE_NB_TXQ_STATS;
2580 	}
2581 
2582 	return count;
2583 }
2584 
2585 static int
2586 eth_dev_get_xstats_count(uint16_t port_id)
2587 {
2588 	struct rte_eth_dev *dev;
2589 	int count;
2590 
2591 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2592 	dev = &rte_eth_devices[port_id];
2593 	if (dev->dev_ops->xstats_get_names != NULL) {
2594 		count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2595 		if (count < 0)
2596 			return eth_err(port_id, count);
2597 	} else
2598 		count = 0;
2599 
2600 
2601 	count += eth_dev_get_xstats_basic_count(dev);
2602 
2603 	return count;
2604 }
2605 
2606 int
2607 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2608 		uint64_t *id)
2609 {
2610 	int cnt_xstats, idx_xstat;
2611 
2612 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2613 
2614 	if (xstat_name == NULL) {
2615 		RTE_ETHDEV_LOG(ERR,
2616 			"Cannot get ethdev port %u xstats ID from NULL xstat name\n",
2617 			port_id);
2618 		return -ENOMEM;
2619 	}
2620 
2621 	if (id == NULL) {
2622 		RTE_ETHDEV_LOG(ERR,
2623 			"Cannot get ethdev port %u xstats ID to NULL\n",
2624 			port_id);
2625 		return -ENOMEM;
2626 	}
2627 
2628 	/* Get count */
2629 	cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2630 	if (cnt_xstats  < 0) {
2631 		RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2632 		return -ENODEV;
2633 	}
2634 
2635 	/* Get id-name lookup table */
2636 	struct rte_eth_xstat_name xstats_names[cnt_xstats];
2637 
2638 	if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2639 			port_id, xstats_names, cnt_xstats, NULL)) {
2640 		RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2641 		return -1;
2642 	}
2643 
2644 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2645 		if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2646 			*id = idx_xstat;
2647 			return 0;
2648 		};
2649 	}
2650 
2651 	return -EINVAL;
2652 }
2653 
2654 /* retrieve basic stats names */
2655 static int
2656 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2657 	struct rte_eth_xstat_name *xstats_names)
2658 {
2659 	int cnt_used_entries = 0;
2660 	uint32_t idx, id_queue;
2661 	uint16_t num_q;
2662 
2663 	for (idx = 0; idx < RTE_NB_STATS; idx++) {
2664 		strlcpy(xstats_names[cnt_used_entries].name,
2665 			eth_dev_stats_strings[idx].name,
2666 			sizeof(xstats_names[0].name));
2667 		cnt_used_entries++;
2668 	}
2669 
2670 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2671 		return cnt_used_entries;
2672 
2673 	num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2674 	for (id_queue = 0; id_queue < num_q; id_queue++) {
2675 		for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2676 			snprintf(xstats_names[cnt_used_entries].name,
2677 				sizeof(xstats_names[0].name),
2678 				"rx_q%u_%s",
2679 				id_queue, eth_dev_rxq_stats_strings[idx].name);
2680 			cnt_used_entries++;
2681 		}
2682 
2683 	}
2684 	num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2685 	for (id_queue = 0; id_queue < num_q; id_queue++) {
2686 		for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2687 			snprintf(xstats_names[cnt_used_entries].name,
2688 				sizeof(xstats_names[0].name),
2689 				"tx_q%u_%s",
2690 				id_queue, eth_dev_txq_stats_strings[idx].name);
2691 			cnt_used_entries++;
2692 		}
2693 	}
2694 	return cnt_used_entries;
2695 }
2696 
2697 /* retrieve ethdev extended statistics names */
2698 int
2699 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2700 	struct rte_eth_xstat_name *xstats_names, unsigned int size,
2701 	uint64_t *ids)
2702 {
2703 	struct rte_eth_xstat_name *xstats_names_copy;
2704 	unsigned int no_basic_stat_requested = 1;
2705 	unsigned int no_ext_stat_requested = 1;
2706 	unsigned int expected_entries;
2707 	unsigned int basic_count;
2708 	struct rte_eth_dev *dev;
2709 	unsigned int i;
2710 	int ret;
2711 
2712 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2713 	dev = &rte_eth_devices[port_id];
2714 
2715 	basic_count = eth_dev_get_xstats_basic_count(dev);
2716 	ret = eth_dev_get_xstats_count(port_id);
2717 	if (ret < 0)
2718 		return ret;
2719 	expected_entries = (unsigned int)ret;
2720 
2721 	/* Return max number of stats if no ids given */
2722 	if (!ids) {
2723 		if (!xstats_names)
2724 			return expected_entries;
2725 		else if (xstats_names && size < expected_entries)
2726 			return expected_entries;
2727 	}
2728 
2729 	if (ids && !xstats_names)
2730 		return -EINVAL;
2731 
2732 	if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2733 		uint64_t ids_copy[size];
2734 
2735 		for (i = 0; i < size; i++) {
2736 			if (ids[i] < basic_count) {
2737 				no_basic_stat_requested = 0;
2738 				break;
2739 			}
2740 
2741 			/*
2742 			 * Convert ids to xstats ids that PMD knows.
2743 			 * ids known by user are basic + extended stats.
2744 			 */
2745 			ids_copy[i] = ids[i] - basic_count;
2746 		}
2747 
2748 		if (no_basic_stat_requested)
2749 			return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2750 					ids_copy, xstats_names, size);
2751 	}
2752 
2753 	/* Retrieve all stats */
2754 	if (!ids) {
2755 		int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2756 				expected_entries);
2757 		if (num_stats < 0 || num_stats > (int)expected_entries)
2758 			return num_stats;
2759 		else
2760 			return expected_entries;
2761 	}
2762 
2763 	xstats_names_copy = calloc(expected_entries,
2764 		sizeof(struct rte_eth_xstat_name));
2765 
2766 	if (!xstats_names_copy) {
2767 		RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2768 		return -ENOMEM;
2769 	}
2770 
2771 	if (ids) {
2772 		for (i = 0; i < size; i++) {
2773 			if (ids[i] >= basic_count) {
2774 				no_ext_stat_requested = 0;
2775 				break;
2776 			}
2777 		}
2778 	}
2779 
2780 	/* Fill xstats_names_copy structure */
2781 	if (ids && no_ext_stat_requested) {
2782 		eth_basic_stats_get_names(dev, xstats_names_copy);
2783 	} else {
2784 		ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2785 			expected_entries);
2786 		if (ret < 0) {
2787 			free(xstats_names_copy);
2788 			return ret;
2789 		}
2790 	}
2791 
2792 	/* Filter stats */
2793 	for (i = 0; i < size; i++) {
2794 		if (ids[i] >= expected_entries) {
2795 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2796 			free(xstats_names_copy);
2797 			return -1;
2798 		}
2799 		xstats_names[i] = xstats_names_copy[ids[i]];
2800 	}
2801 
2802 	free(xstats_names_copy);
2803 	return size;
2804 }
2805 
2806 int
2807 rte_eth_xstats_get_names(uint16_t port_id,
2808 	struct rte_eth_xstat_name *xstats_names,
2809 	unsigned int size)
2810 {
2811 	struct rte_eth_dev *dev;
2812 	int cnt_used_entries;
2813 	int cnt_expected_entries;
2814 	int cnt_driver_entries;
2815 
2816 	cnt_expected_entries = eth_dev_get_xstats_count(port_id);
2817 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
2818 			(int)size < cnt_expected_entries)
2819 		return cnt_expected_entries;
2820 
2821 	/* port_id checked in eth_dev_get_xstats_count() */
2822 	dev = &rte_eth_devices[port_id];
2823 
2824 	cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
2825 
2826 	if (dev->dev_ops->xstats_get_names != NULL) {
2827 		/* If there are any driver-specific xstats, append them
2828 		 * to end of list.
2829 		 */
2830 		cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2831 			dev,
2832 			xstats_names + cnt_used_entries,
2833 			size - cnt_used_entries);
2834 		if (cnt_driver_entries < 0)
2835 			return eth_err(port_id, cnt_driver_entries);
2836 		cnt_used_entries += cnt_driver_entries;
2837 	}
2838 
2839 	return cnt_used_entries;
2840 }
2841 
2842 
2843 static int
2844 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2845 {
2846 	struct rte_eth_dev *dev;
2847 	struct rte_eth_stats eth_stats;
2848 	unsigned int count = 0, i, q;
2849 	uint64_t val, *stats_ptr;
2850 	uint16_t nb_rxqs, nb_txqs;
2851 	int ret;
2852 
2853 	ret = rte_eth_stats_get(port_id, &eth_stats);
2854 	if (ret < 0)
2855 		return ret;
2856 
2857 	dev = &rte_eth_devices[port_id];
2858 
2859 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2860 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2861 
2862 	/* global stats */
2863 	for (i = 0; i < RTE_NB_STATS; i++) {
2864 		stats_ptr = RTE_PTR_ADD(&eth_stats,
2865 					eth_dev_stats_strings[i].offset);
2866 		val = *stats_ptr;
2867 		xstats[count++].value = val;
2868 	}
2869 
2870 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2871 		return count;
2872 
2873 	/* per-rxq stats */
2874 	for (q = 0; q < nb_rxqs; q++) {
2875 		for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2876 			stats_ptr = RTE_PTR_ADD(&eth_stats,
2877 					eth_dev_rxq_stats_strings[i].offset +
2878 					q * sizeof(uint64_t));
2879 			val = *stats_ptr;
2880 			xstats[count++].value = val;
2881 		}
2882 	}
2883 
2884 	/* per-txq stats */
2885 	for (q = 0; q < nb_txqs; q++) {
2886 		for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2887 			stats_ptr = RTE_PTR_ADD(&eth_stats,
2888 					eth_dev_txq_stats_strings[i].offset +
2889 					q * sizeof(uint64_t));
2890 			val = *stats_ptr;
2891 			xstats[count++].value = val;
2892 		}
2893 	}
2894 	return count;
2895 }
2896 
2897 /* retrieve ethdev extended statistics */
2898 int
2899 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2900 			 uint64_t *values, unsigned int size)
2901 {
2902 	unsigned int no_basic_stat_requested = 1;
2903 	unsigned int no_ext_stat_requested = 1;
2904 	unsigned int num_xstats_filled;
2905 	unsigned int basic_count;
2906 	uint16_t expected_entries;
2907 	struct rte_eth_dev *dev;
2908 	unsigned int i;
2909 	int ret;
2910 
2911 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2912 	dev = &rte_eth_devices[port_id];
2913 
2914 	ret = eth_dev_get_xstats_count(port_id);
2915 	if (ret < 0)
2916 		return ret;
2917 	expected_entries = (uint16_t)ret;
2918 	struct rte_eth_xstat xstats[expected_entries];
2919 	basic_count = eth_dev_get_xstats_basic_count(dev);
2920 
2921 	/* Return max number of stats if no ids given */
2922 	if (!ids) {
2923 		if (!values)
2924 			return expected_entries;
2925 		else if (values && size < expected_entries)
2926 			return expected_entries;
2927 	}
2928 
2929 	if (ids && !values)
2930 		return -EINVAL;
2931 
2932 	if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2933 		unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
2934 		uint64_t ids_copy[size];
2935 
2936 		for (i = 0; i < size; i++) {
2937 			if (ids[i] < basic_count) {
2938 				no_basic_stat_requested = 0;
2939 				break;
2940 			}
2941 
2942 			/*
2943 			 * Convert ids to xstats ids that PMD knows.
2944 			 * ids known by user are basic + extended stats.
2945 			 */
2946 			ids_copy[i] = ids[i] - basic_count;
2947 		}
2948 
2949 		if (no_basic_stat_requested)
2950 			return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2951 					values, size);
2952 	}
2953 
2954 	if (ids) {
2955 		for (i = 0; i < size; i++) {
2956 			if (ids[i] >= basic_count) {
2957 				no_ext_stat_requested = 0;
2958 				break;
2959 			}
2960 		}
2961 	}
2962 
2963 	/* Fill the xstats structure */
2964 	if (ids && no_ext_stat_requested)
2965 		ret = eth_basic_stats_get(port_id, xstats);
2966 	else
2967 		ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2968 
2969 	if (ret < 0)
2970 		return ret;
2971 	num_xstats_filled = (unsigned int)ret;
2972 
2973 	/* Return all stats */
2974 	if (!ids) {
2975 		for (i = 0; i < num_xstats_filled; i++)
2976 			values[i] = xstats[i].value;
2977 		return expected_entries;
2978 	}
2979 
2980 	/* Filter stats */
2981 	for (i = 0; i < size; i++) {
2982 		if (ids[i] >= expected_entries) {
2983 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2984 			return -1;
2985 		}
2986 		values[i] = xstats[ids[i]].value;
2987 	}
2988 	return size;
2989 }
2990 
2991 int
2992 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2993 	unsigned int n)
2994 {
2995 	struct rte_eth_dev *dev;
2996 	unsigned int count, i;
2997 	signed int xcount = 0;
2998 	int ret;
2999 
3000 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3001 	if (xstats == NULL && n > 0)
3002 		return -EINVAL;
3003 	dev = &rte_eth_devices[port_id];
3004 
3005 	count = eth_dev_get_xstats_basic_count(dev);
3006 
3007 	/* implemented by the driver */
3008 	if (dev->dev_ops->xstats_get != NULL) {
3009 		/* Retrieve the xstats from the driver at the end of the
3010 		 * xstats struct.
3011 		 */
3012 		xcount = (*dev->dev_ops->xstats_get)(dev,
3013 				     (n > count) ? xstats + count : NULL,
3014 				     (n > count) ? n - count : 0);
3015 
3016 		if (xcount < 0)
3017 			return eth_err(port_id, xcount);
3018 	}
3019 
3020 	if (n < count + xcount || xstats == NULL)
3021 		return count + xcount;
3022 
3023 	/* now fill the xstats structure */
3024 	ret = eth_basic_stats_get(port_id, xstats);
3025 	if (ret < 0)
3026 		return ret;
3027 	count = ret;
3028 
3029 	for (i = 0; i < count; i++)
3030 		xstats[i].id = i;
3031 	/* add an offset to driver-specific stats */
3032 	for ( ; i < count + xcount; i++)
3033 		xstats[i].id += count;
3034 
3035 	return count + xcount;
3036 }
3037 
3038 /* reset ethdev extended statistics */
3039 int
3040 rte_eth_xstats_reset(uint16_t port_id)
3041 {
3042 	struct rte_eth_dev *dev;
3043 
3044 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3045 	dev = &rte_eth_devices[port_id];
3046 
3047 	/* implemented by the driver */
3048 	if (dev->dev_ops->xstats_reset != NULL)
3049 		return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3050 
3051 	/* fallback to default */
3052 	return rte_eth_stats_reset(port_id);
3053 }
3054 
3055 static int
3056 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3057 		uint8_t stat_idx, uint8_t is_rx)
3058 {
3059 	struct rte_eth_dev *dev;
3060 
3061 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3062 	dev = &rte_eth_devices[port_id];
3063 
3064 	if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3065 		return -EINVAL;
3066 
3067 	if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3068 		return -EINVAL;
3069 
3070 	if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3071 		return -EINVAL;
3072 
3073 	if (*dev->dev_ops->queue_stats_mapping_set == NULL)
3074 		return -ENOTSUP;
3075 	return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3076 }
3077 
3078 int
3079 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3080 		uint8_t stat_idx)
3081 {
3082 	return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3083 						tx_queue_id,
3084 						stat_idx, STAT_QMAP_TX));
3085 }
3086 
3087 int
3088 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3089 		uint8_t stat_idx)
3090 {
3091 	return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3092 						rx_queue_id,
3093 						stat_idx, STAT_QMAP_RX));
3094 }
3095 
3096 int
3097 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3098 {
3099 	struct rte_eth_dev *dev;
3100 
3101 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3102 	dev = &rte_eth_devices[port_id];
3103 
3104 	if (fw_version == NULL && fw_size > 0) {
3105 		RTE_ETHDEV_LOG(ERR,
3106 			"Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3107 			port_id);
3108 		return -EINVAL;
3109 	}
3110 
3111 	if (*dev->dev_ops->fw_version_get == NULL)
3112 		return -ENOTSUP;
3113 	return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3114 							fw_version, fw_size));
3115 }
3116 
3117 int
3118 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3119 {
3120 	struct rte_eth_dev *dev;
3121 	const struct rte_eth_desc_lim lim = {
3122 		.nb_max = UINT16_MAX,
3123 		.nb_min = 0,
3124 		.nb_align = 1,
3125 		.nb_seg_max = UINT16_MAX,
3126 		.nb_mtu_seg_max = UINT16_MAX,
3127 	};
3128 	int diag;
3129 
3130 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3131 	dev = &rte_eth_devices[port_id];
3132 
3133 	if (dev_info == NULL) {
3134 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3135 			port_id);
3136 		return -EINVAL;
3137 	}
3138 
3139 	/*
3140 	 * Init dev_info before port_id check since caller does not have
3141 	 * return status and does not know if get is successful or not.
3142 	 */
3143 	memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3144 	dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3145 
3146 	dev_info->rx_desc_lim = lim;
3147 	dev_info->tx_desc_lim = lim;
3148 	dev_info->device = dev->device;
3149 	dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
3150 		RTE_ETHER_CRC_LEN;
3151 	dev_info->max_mtu = UINT16_MAX;
3152 
3153 	if (*dev->dev_ops->dev_infos_get == NULL)
3154 		return -ENOTSUP;
3155 	diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3156 	if (diag != 0) {
3157 		/* Cleanup already filled in device information */
3158 		memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3159 		return eth_err(port_id, diag);
3160 	}
3161 
3162 	/* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3163 	dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3164 			RTE_MAX_QUEUES_PER_PORT);
3165 	dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3166 			RTE_MAX_QUEUES_PER_PORT);
3167 
3168 	dev_info->driver_name = dev->device->driver->name;
3169 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3170 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3171 
3172 	dev_info->dev_flags = &dev->data->dev_flags;
3173 
3174 	return 0;
3175 }
3176 
3177 int
3178 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3179 {
3180 	struct rte_eth_dev *dev;
3181 
3182 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3183 	dev = &rte_eth_devices[port_id];
3184 
3185 	if (dev_conf == NULL) {
3186 		RTE_ETHDEV_LOG(ERR,
3187 			"Cannot get ethdev port %u configuration to NULL\n",
3188 			port_id);
3189 		return -EINVAL;
3190 	}
3191 
3192 	memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf));
3193 
3194 	return 0;
3195 }
3196 
3197 int
3198 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3199 				 uint32_t *ptypes, int num)
3200 {
3201 	int i, j;
3202 	struct rte_eth_dev *dev;
3203 	const uint32_t *all_ptypes;
3204 
3205 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3206 	dev = &rte_eth_devices[port_id];
3207 
3208 	if (ptypes == NULL && num > 0) {
3209 		RTE_ETHDEV_LOG(ERR,
3210 			"Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3211 			port_id);
3212 		return -EINVAL;
3213 	}
3214 
3215 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL)
3216 		return 0;
3217 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3218 
3219 	if (!all_ptypes)
3220 		return 0;
3221 
3222 	for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3223 		if (all_ptypes[i] & ptype_mask) {
3224 			if (j < num)
3225 				ptypes[j] = all_ptypes[i];
3226 			j++;
3227 		}
3228 
3229 	return j;
3230 }
3231 
3232 int
3233 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3234 				 uint32_t *set_ptypes, unsigned int num)
3235 {
3236 	const uint32_t valid_ptype_masks[] = {
3237 		RTE_PTYPE_L2_MASK,
3238 		RTE_PTYPE_L3_MASK,
3239 		RTE_PTYPE_L4_MASK,
3240 		RTE_PTYPE_TUNNEL_MASK,
3241 		RTE_PTYPE_INNER_L2_MASK,
3242 		RTE_PTYPE_INNER_L3_MASK,
3243 		RTE_PTYPE_INNER_L4_MASK,
3244 	};
3245 	const uint32_t *all_ptypes;
3246 	struct rte_eth_dev *dev;
3247 	uint32_t unused_mask;
3248 	unsigned int i, j;
3249 	int ret;
3250 
3251 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3252 	dev = &rte_eth_devices[port_id];
3253 
3254 	if (num > 0 && set_ptypes == NULL) {
3255 		RTE_ETHDEV_LOG(ERR,
3256 			"Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3257 			port_id);
3258 		return -EINVAL;
3259 	}
3260 
3261 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3262 			*dev->dev_ops->dev_ptypes_set == NULL) {
3263 		ret = 0;
3264 		goto ptype_unknown;
3265 	}
3266 
3267 	if (ptype_mask == 0) {
3268 		ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3269 				ptype_mask);
3270 		goto ptype_unknown;
3271 	}
3272 
3273 	unused_mask = ptype_mask;
3274 	for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3275 		uint32_t mask = ptype_mask & valid_ptype_masks[i];
3276 		if (mask && mask != valid_ptype_masks[i]) {
3277 			ret = -EINVAL;
3278 			goto ptype_unknown;
3279 		}
3280 		unused_mask &= ~valid_ptype_masks[i];
3281 	}
3282 
3283 	if (unused_mask) {
3284 		ret = -EINVAL;
3285 		goto ptype_unknown;
3286 	}
3287 
3288 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3289 	if (all_ptypes == NULL) {
3290 		ret = 0;
3291 		goto ptype_unknown;
3292 	}
3293 
3294 	/*
3295 	 * Accommodate as many set_ptypes as possible. If the supplied
3296 	 * set_ptypes array is insufficient fill it partially.
3297 	 */
3298 	for (i = 0, j = 0; set_ptypes != NULL &&
3299 				(all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3300 		if (ptype_mask & all_ptypes[i]) {
3301 			if (j < num - 1) {
3302 				set_ptypes[j] = all_ptypes[i];
3303 				j++;
3304 				continue;
3305 			}
3306 			break;
3307 		}
3308 	}
3309 
3310 	if (set_ptypes != NULL && j < num)
3311 		set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3312 
3313 	return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3314 
3315 ptype_unknown:
3316 	if (num > 0)
3317 		set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3318 
3319 	return ret;
3320 }
3321 
3322 int
3323 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3324 	unsigned int num)
3325 {
3326 	int32_t ret;
3327 	struct rte_eth_dev *dev;
3328 	struct rte_eth_dev_info dev_info;
3329 
3330 	if (ma == NULL) {
3331 		RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__);
3332 		return -EINVAL;
3333 	}
3334 
3335 	/* will check for us that port_id is a valid one */
3336 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3337 	if (ret != 0)
3338 		return ret;
3339 
3340 	dev = &rte_eth_devices[port_id];
3341 	num = RTE_MIN(dev_info.max_mac_addrs, num);
3342 	memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0]));
3343 
3344 	return num;
3345 }
3346 
3347 int
3348 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3349 {
3350 	struct rte_eth_dev *dev;
3351 
3352 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3353 	dev = &rte_eth_devices[port_id];
3354 
3355 	if (mac_addr == NULL) {
3356 		RTE_ETHDEV_LOG(ERR,
3357 			"Cannot get ethdev port %u MAC address to NULL\n",
3358 			port_id);
3359 		return -EINVAL;
3360 	}
3361 
3362 	rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3363 
3364 	return 0;
3365 }
3366 
3367 int
3368 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3369 {
3370 	struct rte_eth_dev *dev;
3371 
3372 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3373 	dev = &rte_eth_devices[port_id];
3374 
3375 	if (mtu == NULL) {
3376 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
3377 			port_id);
3378 		return -EINVAL;
3379 	}
3380 
3381 	*mtu = dev->data->mtu;
3382 	return 0;
3383 }
3384 
3385 int
3386 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3387 {
3388 	int ret;
3389 	struct rte_eth_dev_info dev_info;
3390 	struct rte_eth_dev *dev;
3391 
3392 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3393 	dev = &rte_eth_devices[port_id];
3394 	if (*dev->dev_ops->mtu_set == NULL)
3395 		return -ENOTSUP;
3396 
3397 	/*
3398 	 * Check if the device supports dev_infos_get, if it does not
3399 	 * skip min_mtu/max_mtu validation here as this requires values
3400 	 * that are populated within the call to rte_eth_dev_info_get()
3401 	 * which relies on dev->dev_ops->dev_infos_get.
3402 	 */
3403 	if (*dev->dev_ops->dev_infos_get != NULL) {
3404 		ret = rte_eth_dev_info_get(port_id, &dev_info);
3405 		if (ret != 0)
3406 			return ret;
3407 
3408 		ret = eth_dev_validate_mtu(port_id, &dev_info, mtu);
3409 		if (ret != 0)
3410 			return ret;
3411 	}
3412 
3413 	if (dev->data->dev_configured == 0) {
3414 		RTE_ETHDEV_LOG(ERR,
3415 			"Port %u must be configured before MTU set\n",
3416 			port_id);
3417 		return -EINVAL;
3418 	}
3419 
3420 	ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3421 	if (ret == 0)
3422 		dev->data->mtu = mtu;
3423 
3424 	return eth_err(port_id, ret);
3425 }
3426 
3427 int
3428 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3429 {
3430 	struct rte_eth_dev *dev;
3431 	int ret;
3432 
3433 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3434 	dev = &rte_eth_devices[port_id];
3435 
3436 	if (!(dev->data->dev_conf.rxmode.offloads &
3437 	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
3438 		RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
3439 			port_id);
3440 		return -ENOSYS;
3441 	}
3442 
3443 	if (vlan_id > 4095) {
3444 		RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3445 			port_id, vlan_id);
3446 		return -EINVAL;
3447 	}
3448 	if (*dev->dev_ops->vlan_filter_set == NULL)
3449 		return -ENOTSUP;
3450 
3451 	ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3452 	if (ret == 0) {
3453 		struct rte_vlan_filter_conf *vfc;
3454 		int vidx;
3455 		int vbit;
3456 
3457 		vfc = &dev->data->vlan_filter_conf;
3458 		vidx = vlan_id / 64;
3459 		vbit = vlan_id % 64;
3460 
3461 		if (on)
3462 			vfc->ids[vidx] |= RTE_BIT64(vbit);
3463 		else
3464 			vfc->ids[vidx] &= ~RTE_BIT64(vbit);
3465 	}
3466 
3467 	return eth_err(port_id, ret);
3468 }
3469 
3470 int
3471 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3472 				    int on)
3473 {
3474 	struct rte_eth_dev *dev;
3475 
3476 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3477 	dev = &rte_eth_devices[port_id];
3478 
3479 	if (rx_queue_id >= dev->data->nb_rx_queues) {
3480 		RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3481 		return -EINVAL;
3482 	}
3483 
3484 	if (*dev->dev_ops->vlan_strip_queue_set == NULL)
3485 		return -ENOTSUP;
3486 	(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3487 
3488 	return 0;
3489 }
3490 
3491 int
3492 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3493 				enum rte_vlan_type vlan_type,
3494 				uint16_t tpid)
3495 {
3496 	struct rte_eth_dev *dev;
3497 
3498 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3499 	dev = &rte_eth_devices[port_id];
3500 
3501 	if (*dev->dev_ops->vlan_tpid_set == NULL)
3502 		return -ENOTSUP;
3503 	return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3504 							       tpid));
3505 }
3506 
3507 int
3508 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3509 {
3510 	struct rte_eth_dev_info dev_info;
3511 	struct rte_eth_dev *dev;
3512 	int ret = 0;
3513 	int mask = 0;
3514 	int cur, org = 0;
3515 	uint64_t orig_offloads;
3516 	uint64_t dev_offloads;
3517 	uint64_t new_offloads;
3518 
3519 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3520 	dev = &rte_eth_devices[port_id];
3521 
3522 	/* save original values in case of failure */
3523 	orig_offloads = dev->data->dev_conf.rxmode.offloads;
3524 	dev_offloads = orig_offloads;
3525 
3526 	/* check which option changed by application */
3527 	cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
3528 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
3529 	if (cur != org) {
3530 		if (cur)
3531 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3532 		else
3533 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3534 		mask |= RTE_ETH_VLAN_STRIP_MASK;
3535 	}
3536 
3537 	cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
3538 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
3539 	if (cur != org) {
3540 		if (cur)
3541 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3542 		else
3543 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3544 		mask |= RTE_ETH_VLAN_FILTER_MASK;
3545 	}
3546 
3547 	cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
3548 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
3549 	if (cur != org) {
3550 		if (cur)
3551 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3552 		else
3553 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3554 		mask |= RTE_ETH_VLAN_EXTEND_MASK;
3555 	}
3556 
3557 	cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
3558 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
3559 	if (cur != org) {
3560 		if (cur)
3561 			dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3562 		else
3563 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3564 		mask |= RTE_ETH_QINQ_STRIP_MASK;
3565 	}
3566 
3567 	/*no change*/
3568 	if (mask == 0)
3569 		return ret;
3570 
3571 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3572 	if (ret != 0)
3573 		return ret;
3574 
3575 	/* Rx VLAN offloading must be within its device capabilities */
3576 	if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3577 		new_offloads = dev_offloads & ~orig_offloads;
3578 		RTE_ETHDEV_LOG(ERR,
3579 			"Ethdev port_id=%u requested new added VLAN offloads "
3580 			"0x%" PRIx64 " must be within Rx offloads capabilities "
3581 			"0x%" PRIx64 " in %s()\n",
3582 			port_id, new_offloads, dev_info.rx_offload_capa,
3583 			__func__);
3584 		return -EINVAL;
3585 	}
3586 
3587 	if (*dev->dev_ops->vlan_offload_set == NULL)
3588 		return -ENOTSUP;
3589 	dev->data->dev_conf.rxmode.offloads = dev_offloads;
3590 	ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3591 	if (ret) {
3592 		/* hit an error restore  original values */
3593 		dev->data->dev_conf.rxmode.offloads = orig_offloads;
3594 	}
3595 
3596 	return eth_err(port_id, ret);
3597 }
3598 
3599 int
3600 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3601 {
3602 	struct rte_eth_dev *dev;
3603 	uint64_t *dev_offloads;
3604 	int ret = 0;
3605 
3606 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3607 	dev = &rte_eth_devices[port_id];
3608 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3609 
3610 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
3611 		ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
3612 
3613 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
3614 		ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
3615 
3616 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
3617 		ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
3618 
3619 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
3620 		ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
3621 
3622 	return ret;
3623 }
3624 
3625 int
3626 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3627 {
3628 	struct rte_eth_dev *dev;
3629 
3630 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3631 	dev = &rte_eth_devices[port_id];
3632 
3633 	if (*dev->dev_ops->vlan_pvid_set == NULL)
3634 		return -ENOTSUP;
3635 	return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3636 }
3637 
3638 int
3639 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3640 {
3641 	struct rte_eth_dev *dev;
3642 
3643 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3644 	dev = &rte_eth_devices[port_id];
3645 
3646 	if (fc_conf == NULL) {
3647 		RTE_ETHDEV_LOG(ERR,
3648 			"Cannot get ethdev port %u flow control config to NULL\n",
3649 			port_id);
3650 		return -EINVAL;
3651 	}
3652 
3653 	if (*dev->dev_ops->flow_ctrl_get == NULL)
3654 		return -ENOTSUP;
3655 	memset(fc_conf, 0, sizeof(*fc_conf));
3656 	return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3657 }
3658 
3659 int
3660 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3661 {
3662 	struct rte_eth_dev *dev;
3663 
3664 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3665 	dev = &rte_eth_devices[port_id];
3666 
3667 	if (fc_conf == NULL) {
3668 		RTE_ETHDEV_LOG(ERR,
3669 			"Cannot set ethdev port %u flow control from NULL config\n",
3670 			port_id);
3671 		return -EINVAL;
3672 	}
3673 
3674 	if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3675 		RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3676 		return -EINVAL;
3677 	}
3678 
3679 	if (*dev->dev_ops->flow_ctrl_set == NULL)
3680 		return -ENOTSUP;
3681 	return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3682 }
3683 
3684 int
3685 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3686 				   struct rte_eth_pfc_conf *pfc_conf)
3687 {
3688 	struct rte_eth_dev *dev;
3689 
3690 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3691 	dev = &rte_eth_devices[port_id];
3692 
3693 	if (pfc_conf == NULL) {
3694 		RTE_ETHDEV_LOG(ERR,
3695 			"Cannot set ethdev port %u priority flow control from NULL config\n",
3696 			port_id);
3697 		return -EINVAL;
3698 	}
3699 
3700 	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3701 		RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3702 		return -EINVAL;
3703 	}
3704 
3705 	/* High water, low water validation are device specific */
3706 	if  (*dev->dev_ops->priority_flow_ctrl_set)
3707 		return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3708 					(dev, pfc_conf));
3709 	return -ENOTSUP;
3710 }
3711 
3712 static int
3713 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
3714 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3715 {
3716 	if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) ||
3717 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
3718 		if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) {
3719 			RTE_ETHDEV_LOG(ERR,
3720 				"PFC Tx queue not in range for Rx pause requested:%d configured:%d\n",
3721 				pfc_queue_conf->rx_pause.tx_qid,
3722 				dev_info->nb_tx_queues);
3723 			return -EINVAL;
3724 		}
3725 
3726 		if (pfc_queue_conf->rx_pause.tc >= tc_max) {
3727 			RTE_ETHDEV_LOG(ERR,
3728 				"PFC TC not in range for Rx pause requested:%d max:%d\n",
3729 				pfc_queue_conf->rx_pause.tc, tc_max);
3730 			return -EINVAL;
3731 		}
3732 	}
3733 
3734 	return 0;
3735 }
3736 
3737 static int
3738 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
3739 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3740 {
3741 	if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) ||
3742 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
3743 		if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) {
3744 			RTE_ETHDEV_LOG(ERR,
3745 				"PFC Rx queue not in range for Tx pause requested:%d configured:%d\n",
3746 				pfc_queue_conf->tx_pause.rx_qid,
3747 				dev_info->nb_rx_queues);
3748 			return -EINVAL;
3749 		}
3750 
3751 		if (pfc_queue_conf->tx_pause.tc >= tc_max) {
3752 			RTE_ETHDEV_LOG(ERR,
3753 				"PFC TC not in range for Tx pause requested:%d max:%d\n",
3754 				pfc_queue_conf->tx_pause.tc, tc_max);
3755 			return -EINVAL;
3756 		}
3757 	}
3758 
3759 	return 0;
3760 }
3761 
3762 int
3763 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
3764 		struct rte_eth_pfc_queue_info *pfc_queue_info)
3765 {
3766 	struct rte_eth_dev *dev;
3767 
3768 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3769 	dev = &rte_eth_devices[port_id];
3770 
3771 	if (pfc_queue_info == NULL) {
3772 		RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n",
3773 			port_id);
3774 		return -EINVAL;
3775 	}
3776 
3777 	if (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
3778 		return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
3779 			(dev, pfc_queue_info));
3780 	return -ENOTSUP;
3781 }
3782 
3783 int
3784 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
3785 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3786 {
3787 	struct rte_eth_pfc_queue_info pfc_info;
3788 	struct rte_eth_dev_info dev_info;
3789 	struct rte_eth_dev *dev;
3790 	int ret;
3791 
3792 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3793 	dev = &rte_eth_devices[port_id];
3794 
3795 	if (pfc_queue_conf == NULL) {
3796 		RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n",
3797 			port_id);
3798 		return -EINVAL;
3799 	}
3800 
3801 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3802 	if (ret != 0)
3803 		return ret;
3804 
3805 	ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info);
3806 	if (ret != 0)
3807 		return ret;
3808 
3809 	if (pfc_info.tc_max == 0) {
3810 		RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n",
3811 			port_id);
3812 		return -ENOTSUP;
3813 	}
3814 
3815 	/* Check requested mode supported or not */
3816 	if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE &&
3817 			pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) {
3818 		RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n",
3819 			port_id);
3820 		return -EINVAL;
3821 	}
3822 
3823 	if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE &&
3824 			pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) {
3825 		RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n",
3826 			port_id);
3827 		return -EINVAL;
3828 	}
3829 
3830 	/* Validate Rx pause parameters */
3831 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
3832 			pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) {
3833 		ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max,
3834 				pfc_queue_conf);
3835 		if (ret != 0)
3836 			return ret;
3837 	}
3838 
3839 	/* Validate Tx pause parameters */
3840 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
3841 			pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) {
3842 		ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max,
3843 				pfc_queue_conf);
3844 		if (ret != 0)
3845 			return ret;
3846 	}
3847 
3848 	if (*dev->dev_ops->priority_flow_ctrl_queue_config)
3849 		return eth_err(port_id,
3850 			       (*dev->dev_ops->priority_flow_ctrl_queue_config)(
3851 				dev, pfc_queue_conf));
3852 	return -ENOTSUP;
3853 }
3854 
3855 static int
3856 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3857 			uint16_t reta_size)
3858 {
3859 	uint16_t i, num;
3860 
3861 	num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
3862 	for (i = 0; i < num; i++) {
3863 		if (reta_conf[i].mask)
3864 			return 0;
3865 	}
3866 
3867 	return -EINVAL;
3868 }
3869 
3870 static int
3871 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3872 			 uint16_t reta_size,
3873 			 uint16_t max_rxq)
3874 {
3875 	uint16_t i, idx, shift;
3876 
3877 	if (max_rxq == 0) {
3878 		RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3879 		return -EINVAL;
3880 	}
3881 
3882 	for (i = 0; i < reta_size; i++) {
3883 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
3884 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
3885 		if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
3886 			(reta_conf[idx].reta[shift] >= max_rxq)) {
3887 			RTE_ETHDEV_LOG(ERR,
3888 				"reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3889 				idx, shift,
3890 				reta_conf[idx].reta[shift], max_rxq);
3891 			return -EINVAL;
3892 		}
3893 	}
3894 
3895 	return 0;
3896 }
3897 
3898 int
3899 rte_eth_dev_rss_reta_update(uint16_t port_id,
3900 			    struct rte_eth_rss_reta_entry64 *reta_conf,
3901 			    uint16_t reta_size)
3902 {
3903 	enum rte_eth_rx_mq_mode mq_mode;
3904 	struct rte_eth_dev *dev;
3905 	int ret;
3906 
3907 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3908 	dev = &rte_eth_devices[port_id];
3909 
3910 	if (reta_conf == NULL) {
3911 		RTE_ETHDEV_LOG(ERR,
3912 			"Cannot update ethdev port %u RSS RETA to NULL\n",
3913 			port_id);
3914 		return -EINVAL;
3915 	}
3916 
3917 	if (reta_size == 0) {
3918 		RTE_ETHDEV_LOG(ERR,
3919 			"Cannot update ethdev port %u RSS RETA with zero size\n",
3920 			port_id);
3921 		return -EINVAL;
3922 	}
3923 
3924 	/* Check mask bits */
3925 	ret = eth_check_reta_mask(reta_conf, reta_size);
3926 	if (ret < 0)
3927 		return ret;
3928 
3929 	/* Check entry value */
3930 	ret = eth_check_reta_entry(reta_conf, reta_size,
3931 				dev->data->nb_rx_queues);
3932 	if (ret < 0)
3933 		return ret;
3934 
3935 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
3936 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
3937 		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
3938 		return -ENOTSUP;
3939 	}
3940 
3941 	if (*dev->dev_ops->reta_update == NULL)
3942 		return -ENOTSUP;
3943 	return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3944 							     reta_size));
3945 }
3946 
3947 int
3948 rte_eth_dev_rss_reta_query(uint16_t port_id,
3949 			   struct rte_eth_rss_reta_entry64 *reta_conf,
3950 			   uint16_t reta_size)
3951 {
3952 	struct rte_eth_dev *dev;
3953 	int ret;
3954 
3955 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3956 	dev = &rte_eth_devices[port_id];
3957 
3958 	if (reta_conf == NULL) {
3959 		RTE_ETHDEV_LOG(ERR,
3960 			"Cannot query ethdev port %u RSS RETA from NULL config\n",
3961 			port_id);
3962 		return -EINVAL;
3963 	}
3964 
3965 	/* Check mask bits */
3966 	ret = eth_check_reta_mask(reta_conf, reta_size);
3967 	if (ret < 0)
3968 		return ret;
3969 
3970 	if (*dev->dev_ops->reta_query == NULL)
3971 		return -ENOTSUP;
3972 	return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3973 							    reta_size));
3974 }
3975 
3976 int
3977 rte_eth_dev_rss_hash_update(uint16_t port_id,
3978 			    struct rte_eth_rss_conf *rss_conf)
3979 {
3980 	struct rte_eth_dev *dev;
3981 	struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
3982 	enum rte_eth_rx_mq_mode mq_mode;
3983 	int ret;
3984 
3985 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3986 	dev = &rte_eth_devices[port_id];
3987 
3988 	if (rss_conf == NULL) {
3989 		RTE_ETHDEV_LOG(ERR,
3990 			"Cannot update ethdev port %u RSS hash from NULL config\n",
3991 			port_id);
3992 		return -EINVAL;
3993 	}
3994 
3995 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3996 	if (ret != 0)
3997 		return ret;
3998 
3999 	rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
4000 	if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
4001 	    dev_info.flow_type_rss_offloads) {
4002 		RTE_ETHDEV_LOG(ERR,
4003 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
4004 			port_id, rss_conf->rss_hf,
4005 			dev_info.flow_type_rss_offloads);
4006 		return -EINVAL;
4007 	}
4008 
4009 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4010 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
4011 		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
4012 		return -ENOTSUP;
4013 	}
4014 
4015 	if (*dev->dev_ops->rss_hash_update == NULL)
4016 		return -ENOTSUP;
4017 	return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4018 								 rss_conf));
4019 }
4020 
4021 int
4022 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4023 			      struct rte_eth_rss_conf *rss_conf)
4024 {
4025 	struct rte_eth_dev *dev;
4026 
4027 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4028 	dev = &rte_eth_devices[port_id];
4029 
4030 	if (rss_conf == NULL) {
4031 		RTE_ETHDEV_LOG(ERR,
4032 			"Cannot get ethdev port %u RSS hash config to NULL\n",
4033 			port_id);
4034 		return -EINVAL;
4035 	}
4036 
4037 	if (*dev->dev_ops->rss_hash_conf_get == NULL)
4038 		return -ENOTSUP;
4039 	return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4040 								   rss_conf));
4041 }
4042 
4043 int
4044 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4045 				struct rte_eth_udp_tunnel *udp_tunnel)
4046 {
4047 	struct rte_eth_dev *dev;
4048 
4049 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4050 	dev = &rte_eth_devices[port_id];
4051 
4052 	if (udp_tunnel == NULL) {
4053 		RTE_ETHDEV_LOG(ERR,
4054 			"Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4055 			port_id);
4056 		return -EINVAL;
4057 	}
4058 
4059 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4060 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4061 		return -EINVAL;
4062 	}
4063 
4064 	if (*dev->dev_ops->udp_tunnel_port_add == NULL)
4065 		return -ENOTSUP;
4066 	return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4067 								udp_tunnel));
4068 }
4069 
4070 int
4071 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4072 				   struct rte_eth_udp_tunnel *udp_tunnel)
4073 {
4074 	struct rte_eth_dev *dev;
4075 
4076 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4077 	dev = &rte_eth_devices[port_id];
4078 
4079 	if (udp_tunnel == NULL) {
4080 		RTE_ETHDEV_LOG(ERR,
4081 			"Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4082 			port_id);
4083 		return -EINVAL;
4084 	}
4085 
4086 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4087 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4088 		return -EINVAL;
4089 	}
4090 
4091 	if (*dev->dev_ops->udp_tunnel_port_del == NULL)
4092 		return -ENOTSUP;
4093 	return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4094 								udp_tunnel));
4095 }
4096 
4097 int
4098 rte_eth_led_on(uint16_t port_id)
4099 {
4100 	struct rte_eth_dev *dev;
4101 
4102 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4103 	dev = &rte_eth_devices[port_id];
4104 
4105 	if (*dev->dev_ops->dev_led_on == NULL)
4106 		return -ENOTSUP;
4107 	return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4108 }
4109 
4110 int
4111 rte_eth_led_off(uint16_t port_id)
4112 {
4113 	struct rte_eth_dev *dev;
4114 
4115 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4116 	dev = &rte_eth_devices[port_id];
4117 
4118 	if (*dev->dev_ops->dev_led_off == NULL)
4119 		return -ENOTSUP;
4120 	return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4121 }
4122 
4123 int
4124 rte_eth_fec_get_capability(uint16_t port_id,
4125 			   struct rte_eth_fec_capa *speed_fec_capa,
4126 			   unsigned int num)
4127 {
4128 	struct rte_eth_dev *dev;
4129 	int ret;
4130 
4131 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4132 	dev = &rte_eth_devices[port_id];
4133 
4134 	if (speed_fec_capa == NULL && num > 0) {
4135 		RTE_ETHDEV_LOG(ERR,
4136 			"Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4137 			port_id);
4138 		return -EINVAL;
4139 	}
4140 
4141 	if (*dev->dev_ops->fec_get_capability == NULL)
4142 		return -ENOTSUP;
4143 	ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4144 
4145 	return ret;
4146 }
4147 
4148 int
4149 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4150 {
4151 	struct rte_eth_dev *dev;
4152 
4153 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4154 	dev = &rte_eth_devices[port_id];
4155 
4156 	if (fec_capa == NULL) {
4157 		RTE_ETHDEV_LOG(ERR,
4158 			"Cannot get ethdev port %u current FEC mode to NULL\n",
4159 			port_id);
4160 		return -EINVAL;
4161 	}
4162 
4163 	if (*dev->dev_ops->fec_get == NULL)
4164 		return -ENOTSUP;
4165 	return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4166 }
4167 
4168 int
4169 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4170 {
4171 	struct rte_eth_dev *dev;
4172 
4173 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4174 	dev = &rte_eth_devices[port_id];
4175 
4176 	if (*dev->dev_ops->fec_set == NULL)
4177 		return -ENOTSUP;
4178 	return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4179 }
4180 
4181 /*
4182  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4183  * an empty spot.
4184  */
4185 static int
4186 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4187 {
4188 	struct rte_eth_dev_info dev_info;
4189 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4190 	unsigned i;
4191 	int ret;
4192 
4193 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4194 	if (ret != 0)
4195 		return -1;
4196 
4197 	for (i = 0; i < dev_info.max_mac_addrs; i++)
4198 		if (memcmp(addr, &dev->data->mac_addrs[i],
4199 				RTE_ETHER_ADDR_LEN) == 0)
4200 			return i;
4201 
4202 	return -1;
4203 }
4204 
4205 static const struct rte_ether_addr null_mac_addr;
4206 
4207 int
4208 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4209 			uint32_t pool)
4210 {
4211 	struct rte_eth_dev *dev;
4212 	int index;
4213 	uint64_t pool_mask;
4214 	int ret;
4215 
4216 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4217 	dev = &rte_eth_devices[port_id];
4218 
4219 	if (addr == NULL) {
4220 		RTE_ETHDEV_LOG(ERR,
4221 			"Cannot add ethdev port %u MAC address from NULL address\n",
4222 			port_id);
4223 		return -EINVAL;
4224 	}
4225 
4226 	if (*dev->dev_ops->mac_addr_add == NULL)
4227 		return -ENOTSUP;
4228 
4229 	if (rte_is_zero_ether_addr(addr)) {
4230 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4231 			port_id);
4232 		return -EINVAL;
4233 	}
4234 	if (pool >= RTE_ETH_64_POOLS) {
4235 		RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1);
4236 		return -EINVAL;
4237 	}
4238 
4239 	index = eth_dev_get_mac_addr_index(port_id, addr);
4240 	if (index < 0) {
4241 		index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4242 		if (index < 0) {
4243 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4244 				port_id);
4245 			return -ENOSPC;
4246 		}
4247 	} else {
4248 		pool_mask = dev->data->mac_pool_sel[index];
4249 
4250 		/* Check if both MAC address and pool is already there, and do nothing */
4251 		if (pool_mask & RTE_BIT64(pool))
4252 			return 0;
4253 	}
4254 
4255 	/* Update NIC */
4256 	ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4257 
4258 	if (ret == 0) {
4259 		/* Update address in NIC data structure */
4260 		rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4261 
4262 		/* Update pool bitmap in NIC data structure */
4263 		dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
4264 	}
4265 
4266 	return eth_err(port_id, ret);
4267 }
4268 
4269 int
4270 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4271 {
4272 	struct rte_eth_dev *dev;
4273 	int index;
4274 
4275 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4276 	dev = &rte_eth_devices[port_id];
4277 
4278 	if (addr == NULL) {
4279 		RTE_ETHDEV_LOG(ERR,
4280 			"Cannot remove ethdev port %u MAC address from NULL address\n",
4281 			port_id);
4282 		return -EINVAL;
4283 	}
4284 
4285 	if (*dev->dev_ops->mac_addr_remove == NULL)
4286 		return -ENOTSUP;
4287 
4288 	index = eth_dev_get_mac_addr_index(port_id, addr);
4289 	if (index == 0) {
4290 		RTE_ETHDEV_LOG(ERR,
4291 			"Port %u: Cannot remove default MAC address\n",
4292 			port_id);
4293 		return -EADDRINUSE;
4294 	} else if (index < 0)
4295 		return 0;  /* Do nothing if address wasn't found */
4296 
4297 	/* Update NIC */
4298 	(*dev->dev_ops->mac_addr_remove)(dev, index);
4299 
4300 	/* Update address in NIC data structure */
4301 	rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4302 
4303 	/* reset pool bitmap */
4304 	dev->data->mac_pool_sel[index] = 0;
4305 
4306 	return 0;
4307 }
4308 
4309 int
4310 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4311 {
4312 	struct rte_eth_dev *dev;
4313 	int ret;
4314 
4315 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4316 	dev = &rte_eth_devices[port_id];
4317 
4318 	if (addr == NULL) {
4319 		RTE_ETHDEV_LOG(ERR,
4320 			"Cannot set ethdev port %u default MAC address from NULL address\n",
4321 			port_id);
4322 		return -EINVAL;
4323 	}
4324 
4325 	if (!rte_is_valid_assigned_ether_addr(addr))
4326 		return -EINVAL;
4327 
4328 	if (*dev->dev_ops->mac_addr_set == NULL)
4329 		return -ENOTSUP;
4330 
4331 	ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4332 	if (ret < 0)
4333 		return ret;
4334 
4335 	/* Update default address in NIC data structure */
4336 	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4337 
4338 	return 0;
4339 }
4340 
4341 
4342 /*
4343  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4344  * an empty spot.
4345  */
4346 static int
4347 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4348 		const struct rte_ether_addr *addr)
4349 {
4350 	struct rte_eth_dev_info dev_info;
4351 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4352 	unsigned i;
4353 	int ret;
4354 
4355 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4356 	if (ret != 0)
4357 		return -1;
4358 
4359 	if (!dev->data->hash_mac_addrs)
4360 		return -1;
4361 
4362 	for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4363 		if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4364 			RTE_ETHER_ADDR_LEN) == 0)
4365 			return i;
4366 
4367 	return -1;
4368 }
4369 
4370 int
4371 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4372 				uint8_t on)
4373 {
4374 	int index;
4375 	int ret;
4376 	struct rte_eth_dev *dev;
4377 
4378 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4379 	dev = &rte_eth_devices[port_id];
4380 
4381 	if (addr == NULL) {
4382 		RTE_ETHDEV_LOG(ERR,
4383 			"Cannot set ethdev port %u unicast hash table from NULL address\n",
4384 			port_id);
4385 		return -EINVAL;
4386 	}
4387 
4388 	if (rte_is_zero_ether_addr(addr)) {
4389 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4390 			port_id);
4391 		return -EINVAL;
4392 	}
4393 
4394 	index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4395 	/* Check if it's already there, and do nothing */
4396 	if ((index >= 0) && on)
4397 		return 0;
4398 
4399 	if (index < 0) {
4400 		if (!on) {
4401 			RTE_ETHDEV_LOG(ERR,
4402 				"Port %u: the MAC address was not set in UTA\n",
4403 				port_id);
4404 			return -EINVAL;
4405 		}
4406 
4407 		index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4408 		if (index < 0) {
4409 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4410 				port_id);
4411 			return -ENOSPC;
4412 		}
4413 	}
4414 
4415 	if (*dev->dev_ops->uc_hash_table_set == NULL)
4416 		return -ENOTSUP;
4417 	ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4418 	if (ret == 0) {
4419 		/* Update address in NIC data structure */
4420 		if (on)
4421 			rte_ether_addr_copy(addr,
4422 					&dev->data->hash_mac_addrs[index]);
4423 		else
4424 			rte_ether_addr_copy(&null_mac_addr,
4425 					&dev->data->hash_mac_addrs[index]);
4426 	}
4427 
4428 	return eth_err(port_id, ret);
4429 }
4430 
4431 int
4432 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4433 {
4434 	struct rte_eth_dev *dev;
4435 
4436 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4437 	dev = &rte_eth_devices[port_id];
4438 
4439 	if (*dev->dev_ops->uc_all_hash_table_set == NULL)
4440 		return -ENOTSUP;
4441 	return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4442 								       on));
4443 }
4444 
4445 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4446 					uint16_t tx_rate)
4447 {
4448 	struct rte_eth_dev *dev;
4449 	struct rte_eth_dev_info dev_info;
4450 	struct rte_eth_link link;
4451 	int ret;
4452 
4453 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4454 	dev = &rte_eth_devices[port_id];
4455 
4456 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4457 	if (ret != 0)
4458 		return ret;
4459 
4460 	link = dev->data->dev_link;
4461 
4462 	if (queue_idx > dev_info.max_tx_queues) {
4463 		RTE_ETHDEV_LOG(ERR,
4464 			"Set queue rate limit:port %u: invalid queue ID=%u\n",
4465 			port_id, queue_idx);
4466 		return -EINVAL;
4467 	}
4468 
4469 	if (tx_rate > link.link_speed) {
4470 		RTE_ETHDEV_LOG(ERR,
4471 			"Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4472 			tx_rate, link.link_speed);
4473 		return -EINVAL;
4474 	}
4475 
4476 	if (*dev->dev_ops->set_queue_rate_limit == NULL)
4477 		return -ENOTSUP;
4478 	return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4479 							queue_idx, tx_rate));
4480 }
4481 
4482 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
4483 			       uint8_t avail_thresh)
4484 {
4485 	struct rte_eth_dev *dev;
4486 
4487 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4488 	dev = &rte_eth_devices[port_id];
4489 
4490 	if (queue_id > dev->data->nb_rx_queues) {
4491 		RTE_ETHDEV_LOG(ERR,
4492 			"Set queue avail thresh: port %u: invalid queue ID=%u.\n",
4493 			port_id, queue_id);
4494 		return -EINVAL;
4495 	}
4496 
4497 	if (avail_thresh > 99) {
4498 		RTE_ETHDEV_LOG(ERR,
4499 			"Set queue avail thresh: port %u: threshold should be <= 99.\n",
4500 			port_id);
4501 		return -EINVAL;
4502 	}
4503 	if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL)
4504 		return -ENOTSUP;
4505 	return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev,
4506 							     queue_id, avail_thresh));
4507 }
4508 
4509 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
4510 				 uint8_t *avail_thresh)
4511 {
4512 	struct rte_eth_dev *dev;
4513 
4514 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4515 	dev = &rte_eth_devices[port_id];
4516 
4517 	if (queue_id == NULL)
4518 		return -EINVAL;
4519 	if (*queue_id >= dev->data->nb_rx_queues)
4520 		*queue_id = 0;
4521 
4522 	if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL)
4523 		return -ENOTSUP;
4524 	return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev,
4525 							     queue_id, avail_thresh));
4526 }
4527 
4528 RTE_INIT(eth_dev_init_fp_ops)
4529 {
4530 	uint32_t i;
4531 
4532 	for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++)
4533 		eth_dev_fp_ops_reset(rte_eth_fp_ops + i);
4534 }
4535 
4536 RTE_INIT(eth_dev_init_cb_lists)
4537 {
4538 	uint16_t i;
4539 
4540 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4541 		TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4542 }
4543 
4544 int
4545 rte_eth_dev_callback_register(uint16_t port_id,
4546 			enum rte_eth_event_type event,
4547 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4548 {
4549 	struct rte_eth_dev *dev;
4550 	struct rte_eth_dev_callback *user_cb;
4551 	uint16_t next_port;
4552 	uint16_t last_port;
4553 
4554 	if (cb_fn == NULL) {
4555 		RTE_ETHDEV_LOG(ERR,
4556 			"Cannot register ethdev port %u callback from NULL\n",
4557 			port_id);
4558 		return -EINVAL;
4559 	}
4560 
4561 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4562 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4563 		return -EINVAL;
4564 	}
4565 
4566 	if (port_id == RTE_ETH_ALL) {
4567 		next_port = 0;
4568 		last_port = RTE_MAX_ETHPORTS - 1;
4569 	} else {
4570 		next_port = last_port = port_id;
4571 	}
4572 
4573 	rte_spinlock_lock(&eth_dev_cb_lock);
4574 
4575 	do {
4576 		dev = &rte_eth_devices[next_port];
4577 
4578 		TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4579 			if (user_cb->cb_fn == cb_fn &&
4580 				user_cb->cb_arg == cb_arg &&
4581 				user_cb->event == event) {
4582 				break;
4583 			}
4584 		}
4585 
4586 		/* create a new callback. */
4587 		if (user_cb == NULL) {
4588 			user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4589 				sizeof(struct rte_eth_dev_callback), 0);
4590 			if (user_cb != NULL) {
4591 				user_cb->cb_fn = cb_fn;
4592 				user_cb->cb_arg = cb_arg;
4593 				user_cb->event = event;
4594 				TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4595 						  user_cb, next);
4596 			} else {
4597 				rte_spinlock_unlock(&eth_dev_cb_lock);
4598 				rte_eth_dev_callback_unregister(port_id, event,
4599 								cb_fn, cb_arg);
4600 				return -ENOMEM;
4601 			}
4602 
4603 		}
4604 	} while (++next_port <= last_port);
4605 
4606 	rte_spinlock_unlock(&eth_dev_cb_lock);
4607 	return 0;
4608 }
4609 
4610 int
4611 rte_eth_dev_callback_unregister(uint16_t port_id,
4612 			enum rte_eth_event_type event,
4613 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4614 {
4615 	int ret;
4616 	struct rte_eth_dev *dev;
4617 	struct rte_eth_dev_callback *cb, *next;
4618 	uint16_t next_port;
4619 	uint16_t last_port;
4620 
4621 	if (cb_fn == NULL) {
4622 		RTE_ETHDEV_LOG(ERR,
4623 			"Cannot unregister ethdev port %u callback from NULL\n",
4624 			port_id);
4625 		return -EINVAL;
4626 	}
4627 
4628 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4629 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4630 		return -EINVAL;
4631 	}
4632 
4633 	if (port_id == RTE_ETH_ALL) {
4634 		next_port = 0;
4635 		last_port = RTE_MAX_ETHPORTS - 1;
4636 	} else {
4637 		next_port = last_port = port_id;
4638 	}
4639 
4640 	rte_spinlock_lock(&eth_dev_cb_lock);
4641 
4642 	do {
4643 		dev = &rte_eth_devices[next_port];
4644 		ret = 0;
4645 		for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4646 		     cb = next) {
4647 
4648 			next = TAILQ_NEXT(cb, next);
4649 
4650 			if (cb->cb_fn != cb_fn || cb->event != event ||
4651 			    (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4652 				continue;
4653 
4654 			/*
4655 			 * if this callback is not executing right now,
4656 			 * then remove it.
4657 			 */
4658 			if (cb->active == 0) {
4659 				TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4660 				rte_free(cb);
4661 			} else {
4662 				ret = -EAGAIN;
4663 			}
4664 		}
4665 	} while (++next_port <= last_port);
4666 
4667 	rte_spinlock_unlock(&eth_dev_cb_lock);
4668 	return ret;
4669 }
4670 
4671 int
4672 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4673 {
4674 	uint32_t vec;
4675 	struct rte_eth_dev *dev;
4676 	struct rte_intr_handle *intr_handle;
4677 	uint16_t qid;
4678 	int rc;
4679 
4680 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4681 	dev = &rte_eth_devices[port_id];
4682 
4683 	if (!dev->intr_handle) {
4684 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4685 		return -ENOTSUP;
4686 	}
4687 
4688 	intr_handle = dev->intr_handle;
4689 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4690 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4691 		return -EPERM;
4692 	}
4693 
4694 	for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4695 		vec = rte_intr_vec_list_index_get(intr_handle, qid);
4696 		rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4697 		if (rc && rc != -EEXIST) {
4698 			RTE_ETHDEV_LOG(ERR,
4699 				"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
4700 				port_id, qid, op, epfd, vec);
4701 		}
4702 	}
4703 
4704 	return 0;
4705 }
4706 
4707 int
4708 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4709 {
4710 	struct rte_intr_handle *intr_handle;
4711 	struct rte_eth_dev *dev;
4712 	unsigned int efd_idx;
4713 	uint32_t vec;
4714 	int fd;
4715 
4716 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4717 	dev = &rte_eth_devices[port_id];
4718 
4719 	if (queue_id >= dev->data->nb_rx_queues) {
4720 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4721 		return -1;
4722 	}
4723 
4724 	if (!dev->intr_handle) {
4725 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4726 		return -1;
4727 	}
4728 
4729 	intr_handle = dev->intr_handle;
4730 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4731 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4732 		return -1;
4733 	}
4734 
4735 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
4736 	efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4737 		(vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4738 	fd = rte_intr_efds_index_get(intr_handle, efd_idx);
4739 
4740 	return fd;
4741 }
4742 
4743 int
4744 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4745 			  int epfd, int op, void *data)
4746 {
4747 	uint32_t vec;
4748 	struct rte_eth_dev *dev;
4749 	struct rte_intr_handle *intr_handle;
4750 	int rc;
4751 
4752 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4753 	dev = &rte_eth_devices[port_id];
4754 
4755 	if (queue_id >= dev->data->nb_rx_queues) {
4756 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4757 		return -EINVAL;
4758 	}
4759 
4760 	if (!dev->intr_handle) {
4761 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4762 		return -ENOTSUP;
4763 	}
4764 
4765 	intr_handle = dev->intr_handle;
4766 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4767 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4768 		return -EPERM;
4769 	}
4770 
4771 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
4772 	rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4773 	if (rc && rc != -EEXIST) {
4774 		RTE_ETHDEV_LOG(ERR,
4775 			"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
4776 			port_id, queue_id, op, epfd, vec);
4777 		return rc;
4778 	}
4779 
4780 	return 0;
4781 }
4782 
4783 int
4784 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4785 			   uint16_t queue_id)
4786 {
4787 	struct rte_eth_dev *dev;
4788 	int ret;
4789 
4790 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4791 	dev = &rte_eth_devices[port_id];
4792 
4793 	ret = eth_dev_validate_rx_queue(dev, queue_id);
4794 	if (ret != 0)
4795 		return ret;
4796 
4797 	if (*dev->dev_ops->rx_queue_intr_enable == NULL)
4798 		return -ENOTSUP;
4799 	return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
4800 }
4801 
4802 int
4803 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4804 			    uint16_t queue_id)
4805 {
4806 	struct rte_eth_dev *dev;
4807 	int ret;
4808 
4809 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4810 	dev = &rte_eth_devices[port_id];
4811 
4812 	ret = eth_dev_validate_rx_queue(dev, queue_id);
4813 	if (ret != 0)
4814 		return ret;
4815 
4816 	if (*dev->dev_ops->rx_queue_intr_disable == NULL)
4817 		return -ENOTSUP;
4818 	return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
4819 }
4820 
4821 
4822 const struct rte_eth_rxtx_callback *
4823 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4824 		rte_rx_callback_fn fn, void *user_param)
4825 {
4826 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4827 	rte_errno = ENOTSUP;
4828 	return NULL;
4829 #endif
4830 	struct rte_eth_dev *dev;
4831 
4832 	/* check input parameters */
4833 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4834 		    queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4835 		rte_errno = EINVAL;
4836 		return NULL;
4837 	}
4838 	dev = &rte_eth_devices[port_id];
4839 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4840 		rte_errno = EINVAL;
4841 		return NULL;
4842 	}
4843 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4844 
4845 	if (cb == NULL) {
4846 		rte_errno = ENOMEM;
4847 		return NULL;
4848 	}
4849 
4850 	cb->fn.rx = fn;
4851 	cb->param = user_param;
4852 
4853 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
4854 	/* Add the callbacks in fifo order. */
4855 	struct rte_eth_rxtx_callback *tail =
4856 		rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4857 
4858 	if (!tail) {
4859 		/* Stores to cb->fn and cb->param should complete before
4860 		 * cb is visible to data plane.
4861 		 */
4862 		__atomic_store_n(
4863 			&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4864 			cb, __ATOMIC_RELEASE);
4865 
4866 	} else {
4867 		while (tail->next)
4868 			tail = tail->next;
4869 		/* Stores to cb->fn and cb->param should complete before
4870 		 * cb is visible to data plane.
4871 		 */
4872 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4873 	}
4874 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4875 
4876 	return cb;
4877 }
4878 
4879 const struct rte_eth_rxtx_callback *
4880 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4881 		rte_rx_callback_fn fn, void *user_param)
4882 {
4883 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4884 	rte_errno = ENOTSUP;
4885 	return NULL;
4886 #endif
4887 	/* check input parameters */
4888 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4889 		queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4890 		rte_errno = EINVAL;
4891 		return NULL;
4892 	}
4893 
4894 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4895 
4896 	if (cb == NULL) {
4897 		rte_errno = ENOMEM;
4898 		return NULL;
4899 	}
4900 
4901 	cb->fn.rx = fn;
4902 	cb->param = user_param;
4903 
4904 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
4905 	/* Add the callbacks at first position */
4906 	cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4907 	/* Stores to cb->fn, cb->param and cb->next should complete before
4908 	 * cb is visible to data plane threads.
4909 	 */
4910 	__atomic_store_n(
4911 		&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
4912 		cb, __ATOMIC_RELEASE);
4913 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
4914 
4915 	return cb;
4916 }
4917 
4918 const struct rte_eth_rxtx_callback *
4919 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4920 		rte_tx_callback_fn fn, void *user_param)
4921 {
4922 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4923 	rte_errno = ENOTSUP;
4924 	return NULL;
4925 #endif
4926 	struct rte_eth_dev *dev;
4927 
4928 	/* check input parameters */
4929 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4930 		    queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4931 		rte_errno = EINVAL;
4932 		return NULL;
4933 	}
4934 
4935 	dev = &rte_eth_devices[port_id];
4936 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4937 		rte_errno = EINVAL;
4938 		return NULL;
4939 	}
4940 
4941 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4942 
4943 	if (cb == NULL) {
4944 		rte_errno = ENOMEM;
4945 		return NULL;
4946 	}
4947 
4948 	cb->fn.tx = fn;
4949 	cb->param = user_param;
4950 
4951 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
4952 	/* Add the callbacks in fifo order. */
4953 	struct rte_eth_rxtx_callback *tail =
4954 		rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4955 
4956 	if (!tail) {
4957 		/* Stores to cb->fn and cb->param should complete before
4958 		 * cb is visible to data plane.
4959 		 */
4960 		__atomic_store_n(
4961 			&rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
4962 			cb, __ATOMIC_RELEASE);
4963 
4964 	} else {
4965 		while (tail->next)
4966 			tail = tail->next;
4967 		/* Stores to cb->fn and cb->param should complete before
4968 		 * cb is visible to data plane.
4969 		 */
4970 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
4971 	}
4972 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
4973 
4974 	return cb;
4975 }
4976 
4977 int
4978 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4979 		const struct rte_eth_rxtx_callback *user_cb)
4980 {
4981 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
4982 	return -ENOTSUP;
4983 #endif
4984 	/* Check input parameters. */
4985 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4986 	if (user_cb == NULL ||
4987 			queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4988 		return -EINVAL;
4989 
4990 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4991 	struct rte_eth_rxtx_callback *cb;
4992 	struct rte_eth_rxtx_callback **prev_cb;
4993 	int ret = -EINVAL;
4994 
4995 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
4996 	prev_cb = &dev->post_rx_burst_cbs[queue_id];
4997 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
4998 		cb = *prev_cb;
4999 		if (cb == user_cb) {
5000 			/* Remove the user cb from the callback list. */
5001 			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5002 			ret = 0;
5003 			break;
5004 		}
5005 	}
5006 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5007 
5008 	return ret;
5009 }
5010 
5011 int
5012 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5013 		const struct rte_eth_rxtx_callback *user_cb)
5014 {
5015 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5016 	return -ENOTSUP;
5017 #endif
5018 	/* Check input parameters. */
5019 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5020 	if (user_cb == NULL ||
5021 			queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
5022 		return -EINVAL;
5023 
5024 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5025 	int ret = -EINVAL;
5026 	struct rte_eth_rxtx_callback *cb;
5027 	struct rte_eth_rxtx_callback **prev_cb;
5028 
5029 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
5030 	prev_cb = &dev->pre_tx_burst_cbs[queue_id];
5031 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
5032 		cb = *prev_cb;
5033 		if (cb == user_cb) {
5034 			/* Remove the user cb from the callback list. */
5035 			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5036 			ret = 0;
5037 			break;
5038 		}
5039 	}
5040 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5041 
5042 	return ret;
5043 }
5044 
5045 int
5046 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5047 	struct rte_eth_rxq_info *qinfo)
5048 {
5049 	struct rte_eth_dev *dev;
5050 
5051 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5052 	dev = &rte_eth_devices[port_id];
5053 
5054 	if (queue_id >= dev->data->nb_rx_queues) {
5055 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5056 		return -EINVAL;
5057 	}
5058 
5059 	if (qinfo == NULL) {
5060 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
5061 			port_id, queue_id);
5062 		return -EINVAL;
5063 	}
5064 
5065 	if (dev->data->rx_queues == NULL ||
5066 			dev->data->rx_queues[queue_id] == NULL) {
5067 		RTE_ETHDEV_LOG(ERR,
5068 			       "Rx queue %"PRIu16" of device with port_id=%"
5069 			       PRIu16" has not been setup\n",
5070 			       queue_id, port_id);
5071 		return -EINVAL;
5072 	}
5073 
5074 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5075 		RTE_ETHDEV_LOG(INFO,
5076 			"Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5077 			queue_id, port_id);
5078 		return -EINVAL;
5079 	}
5080 
5081 	if (*dev->dev_ops->rxq_info_get == NULL)
5082 		return -ENOTSUP;
5083 
5084 	memset(qinfo, 0, sizeof(*qinfo));
5085 	dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5086 	qinfo->queue_state = dev->data->rx_queue_state[queue_id];
5087 
5088 	return 0;
5089 }
5090 
5091 int
5092 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5093 	struct rte_eth_txq_info *qinfo)
5094 {
5095 	struct rte_eth_dev *dev;
5096 
5097 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5098 	dev = &rte_eth_devices[port_id];
5099 
5100 	if (queue_id >= dev->data->nb_tx_queues) {
5101 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5102 		return -EINVAL;
5103 	}
5104 
5105 	if (qinfo == NULL) {
5106 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
5107 			port_id, queue_id);
5108 		return -EINVAL;
5109 	}
5110 
5111 	if (dev->data->tx_queues == NULL ||
5112 			dev->data->tx_queues[queue_id] == NULL) {
5113 		RTE_ETHDEV_LOG(ERR,
5114 			       "Tx queue %"PRIu16" of device with port_id=%"
5115 			       PRIu16" has not been setup\n",
5116 			       queue_id, port_id);
5117 		return -EINVAL;
5118 	}
5119 
5120 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5121 		RTE_ETHDEV_LOG(INFO,
5122 			"Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5123 			queue_id, port_id);
5124 		return -EINVAL;
5125 	}
5126 
5127 	if (*dev->dev_ops->txq_info_get == NULL)
5128 		return -ENOTSUP;
5129 
5130 	memset(qinfo, 0, sizeof(*qinfo));
5131 	dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5132 	qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5133 
5134 	return 0;
5135 }
5136 
5137 int
5138 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5139 			  struct rte_eth_burst_mode *mode)
5140 {
5141 	struct rte_eth_dev *dev;
5142 
5143 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5144 	dev = &rte_eth_devices[port_id];
5145 
5146 	if (queue_id >= dev->data->nb_rx_queues) {
5147 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5148 		return -EINVAL;
5149 	}
5150 
5151 	if (mode == NULL) {
5152 		RTE_ETHDEV_LOG(ERR,
5153 			"Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5154 			port_id, queue_id);
5155 		return -EINVAL;
5156 	}
5157 
5158 	if (*dev->dev_ops->rx_burst_mode_get == NULL)
5159 		return -ENOTSUP;
5160 	memset(mode, 0, sizeof(*mode));
5161 	return eth_err(port_id,
5162 		       dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5163 }
5164 
5165 int
5166 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5167 			  struct rte_eth_burst_mode *mode)
5168 {
5169 	struct rte_eth_dev *dev;
5170 
5171 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5172 	dev = &rte_eth_devices[port_id];
5173 
5174 	if (queue_id >= dev->data->nb_tx_queues) {
5175 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5176 		return -EINVAL;
5177 	}
5178 
5179 	if (mode == NULL) {
5180 		RTE_ETHDEV_LOG(ERR,
5181 			"Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
5182 			port_id, queue_id);
5183 		return -EINVAL;
5184 	}
5185 
5186 	if (*dev->dev_ops->tx_burst_mode_get == NULL)
5187 		return -ENOTSUP;
5188 	memset(mode, 0, sizeof(*mode));
5189 	return eth_err(port_id,
5190 		       dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5191 }
5192 
5193 int
5194 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5195 		struct rte_power_monitor_cond *pmc)
5196 {
5197 	struct rte_eth_dev *dev;
5198 
5199 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5200 	dev = &rte_eth_devices[port_id];
5201 
5202 	if (queue_id >= dev->data->nb_rx_queues) {
5203 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5204 		return -EINVAL;
5205 	}
5206 
5207 	if (pmc == NULL) {
5208 		RTE_ETHDEV_LOG(ERR,
5209 			"Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
5210 			port_id, queue_id);
5211 		return -EINVAL;
5212 	}
5213 
5214 	if (*dev->dev_ops->get_monitor_addr == NULL)
5215 		return -ENOTSUP;
5216 	return eth_err(port_id,
5217 		dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
5218 }
5219 
5220 int
5221 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5222 			     struct rte_ether_addr *mc_addr_set,
5223 			     uint32_t nb_mc_addr)
5224 {
5225 	struct rte_eth_dev *dev;
5226 
5227 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5228 	dev = &rte_eth_devices[port_id];
5229 
5230 	if (*dev->dev_ops->set_mc_addr_list == NULL)
5231 		return -ENOTSUP;
5232 	return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5233 						mc_addr_set, nb_mc_addr));
5234 }
5235 
5236 int
5237 rte_eth_timesync_enable(uint16_t port_id)
5238 {
5239 	struct rte_eth_dev *dev;
5240 
5241 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5242 	dev = &rte_eth_devices[port_id];
5243 
5244 	if (*dev->dev_ops->timesync_enable == NULL)
5245 		return -ENOTSUP;
5246 	return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5247 }
5248 
5249 int
5250 rte_eth_timesync_disable(uint16_t port_id)
5251 {
5252 	struct rte_eth_dev *dev;
5253 
5254 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5255 	dev = &rte_eth_devices[port_id];
5256 
5257 	if (*dev->dev_ops->timesync_disable == NULL)
5258 		return -ENOTSUP;
5259 	return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5260 }
5261 
5262 int
5263 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5264 				   uint32_t flags)
5265 {
5266 	struct rte_eth_dev *dev;
5267 
5268 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5269 	dev = &rte_eth_devices[port_id];
5270 
5271 	if (timestamp == NULL) {
5272 		RTE_ETHDEV_LOG(ERR,
5273 			"Cannot read ethdev port %u Rx timestamp to NULL\n",
5274 			port_id);
5275 		return -EINVAL;
5276 	}
5277 
5278 	if (*dev->dev_ops->timesync_read_rx_timestamp == NULL)
5279 		return -ENOTSUP;
5280 	return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5281 				(dev, timestamp, flags));
5282 }
5283 
5284 int
5285 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5286 				   struct timespec *timestamp)
5287 {
5288 	struct rte_eth_dev *dev;
5289 
5290 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5291 	dev = &rte_eth_devices[port_id];
5292 
5293 	if (timestamp == NULL) {
5294 		RTE_ETHDEV_LOG(ERR,
5295 			"Cannot read ethdev port %u Tx timestamp to NULL\n",
5296 			port_id);
5297 		return -EINVAL;
5298 	}
5299 
5300 	if (*dev->dev_ops->timesync_read_tx_timestamp == NULL)
5301 		return -ENOTSUP;
5302 	return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5303 				(dev, timestamp));
5304 }
5305 
5306 int
5307 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5308 {
5309 	struct rte_eth_dev *dev;
5310 
5311 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5312 	dev = &rte_eth_devices[port_id];
5313 
5314 	if (*dev->dev_ops->timesync_adjust_time == NULL)
5315 		return -ENOTSUP;
5316 	return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
5317 }
5318 
5319 int
5320 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5321 {
5322 	struct rte_eth_dev *dev;
5323 
5324 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5325 	dev = &rte_eth_devices[port_id];
5326 
5327 	if (timestamp == NULL) {
5328 		RTE_ETHDEV_LOG(ERR,
5329 			"Cannot read ethdev port %u timesync time to NULL\n",
5330 			port_id);
5331 		return -EINVAL;
5332 	}
5333 
5334 	if (*dev->dev_ops->timesync_read_time == NULL)
5335 		return -ENOTSUP;
5336 	return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5337 								timestamp));
5338 }
5339 
5340 int
5341 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5342 {
5343 	struct rte_eth_dev *dev;
5344 
5345 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5346 	dev = &rte_eth_devices[port_id];
5347 
5348 	if (timestamp == NULL) {
5349 		RTE_ETHDEV_LOG(ERR,
5350 			"Cannot write ethdev port %u timesync from NULL time\n",
5351 			port_id);
5352 		return -EINVAL;
5353 	}
5354 
5355 	if (*dev->dev_ops->timesync_write_time == NULL)
5356 		return -ENOTSUP;
5357 	return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5358 								timestamp));
5359 }
5360 
5361 int
5362 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5363 {
5364 	struct rte_eth_dev *dev;
5365 
5366 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5367 	dev = &rte_eth_devices[port_id];
5368 
5369 	if (clock == NULL) {
5370 		RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
5371 			port_id);
5372 		return -EINVAL;
5373 	}
5374 
5375 	if (*dev->dev_ops->read_clock == NULL)
5376 		return -ENOTSUP;
5377 	return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5378 }
5379 
5380 int
5381 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5382 {
5383 	struct rte_eth_dev *dev;
5384 
5385 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5386 	dev = &rte_eth_devices[port_id];
5387 
5388 	if (info == NULL) {
5389 		RTE_ETHDEV_LOG(ERR,
5390 			"Cannot get ethdev port %u register info to NULL\n",
5391 			port_id);
5392 		return -EINVAL;
5393 	}
5394 
5395 	if (*dev->dev_ops->get_reg == NULL)
5396 		return -ENOTSUP;
5397 	return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5398 }
5399 
5400 int
5401 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5402 {
5403 	struct rte_eth_dev *dev;
5404 
5405 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5406 	dev = &rte_eth_devices[port_id];
5407 
5408 	if (*dev->dev_ops->get_eeprom_length == NULL)
5409 		return -ENOTSUP;
5410 	return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5411 }
5412 
5413 int
5414 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5415 {
5416 	struct rte_eth_dev *dev;
5417 
5418 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5419 	dev = &rte_eth_devices[port_id];
5420 
5421 	if (info == NULL) {
5422 		RTE_ETHDEV_LOG(ERR,
5423 			"Cannot get ethdev port %u EEPROM info to NULL\n",
5424 			port_id);
5425 		return -EINVAL;
5426 	}
5427 
5428 	if (*dev->dev_ops->get_eeprom == NULL)
5429 		return -ENOTSUP;
5430 	return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5431 }
5432 
5433 int
5434 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5435 {
5436 	struct rte_eth_dev *dev;
5437 
5438 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5439 	dev = &rte_eth_devices[port_id];
5440 
5441 	if (info == NULL) {
5442 		RTE_ETHDEV_LOG(ERR,
5443 			"Cannot set ethdev port %u EEPROM from NULL info\n",
5444 			port_id);
5445 		return -EINVAL;
5446 	}
5447 
5448 	if (*dev->dev_ops->set_eeprom == NULL)
5449 		return -ENOTSUP;
5450 	return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5451 }
5452 
5453 int
5454 rte_eth_dev_get_module_info(uint16_t port_id,
5455 			    struct rte_eth_dev_module_info *modinfo)
5456 {
5457 	struct rte_eth_dev *dev;
5458 
5459 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5460 	dev = &rte_eth_devices[port_id];
5461 
5462 	if (modinfo == NULL) {
5463 		RTE_ETHDEV_LOG(ERR,
5464 			"Cannot get ethdev port %u EEPROM module info to NULL\n",
5465 			port_id);
5466 		return -EINVAL;
5467 	}
5468 
5469 	if (*dev->dev_ops->get_module_info == NULL)
5470 		return -ENOTSUP;
5471 	return (*dev->dev_ops->get_module_info)(dev, modinfo);
5472 }
5473 
5474 int
5475 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5476 			      struct rte_dev_eeprom_info *info)
5477 {
5478 	struct rte_eth_dev *dev;
5479 
5480 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5481 	dev = &rte_eth_devices[port_id];
5482 
5483 	if (info == NULL) {
5484 		RTE_ETHDEV_LOG(ERR,
5485 			"Cannot get ethdev port %u module EEPROM info to NULL\n",
5486 			port_id);
5487 		return -EINVAL;
5488 	}
5489 
5490 	if (info->data == NULL) {
5491 		RTE_ETHDEV_LOG(ERR,
5492 			"Cannot get ethdev port %u module EEPROM data to NULL\n",
5493 			port_id);
5494 		return -EINVAL;
5495 	}
5496 
5497 	if (info->length == 0) {
5498 		RTE_ETHDEV_LOG(ERR,
5499 			"Cannot get ethdev port %u module EEPROM to data with zero size\n",
5500 			port_id);
5501 		return -EINVAL;
5502 	}
5503 
5504 	if (*dev->dev_ops->get_module_eeprom == NULL)
5505 		return -ENOTSUP;
5506 	return (*dev->dev_ops->get_module_eeprom)(dev, info);
5507 }
5508 
5509 int
5510 rte_eth_dev_get_dcb_info(uint16_t port_id,
5511 			     struct rte_eth_dcb_info *dcb_info)
5512 {
5513 	struct rte_eth_dev *dev;
5514 
5515 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5516 	dev = &rte_eth_devices[port_id];
5517 
5518 	if (dcb_info == NULL) {
5519 		RTE_ETHDEV_LOG(ERR,
5520 			"Cannot get ethdev port %u DCB info to NULL\n",
5521 			port_id);
5522 		return -EINVAL;
5523 	}
5524 
5525 	memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5526 
5527 	if (*dev->dev_ops->get_dcb_info == NULL)
5528 		return -ENOTSUP;
5529 	return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5530 }
5531 
5532 static void
5533 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5534 		const struct rte_eth_desc_lim *desc_lim)
5535 {
5536 	if (desc_lim->nb_align != 0)
5537 		*nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5538 
5539 	if (desc_lim->nb_max != 0)
5540 		*nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5541 
5542 	*nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5543 }
5544 
5545 int
5546 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5547 				 uint16_t *nb_rx_desc,
5548 				 uint16_t *nb_tx_desc)
5549 {
5550 	struct rte_eth_dev_info dev_info;
5551 	int ret;
5552 
5553 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5554 
5555 	ret = rte_eth_dev_info_get(port_id, &dev_info);
5556 	if (ret != 0)
5557 		return ret;
5558 
5559 	if (nb_rx_desc != NULL)
5560 		eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5561 
5562 	if (nb_tx_desc != NULL)
5563 		eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5564 
5565 	return 0;
5566 }
5567 
5568 int
5569 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5570 				   struct rte_eth_hairpin_cap *cap)
5571 {
5572 	struct rte_eth_dev *dev;
5573 
5574 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5575 	dev = &rte_eth_devices[port_id];
5576 
5577 	if (cap == NULL) {
5578 		RTE_ETHDEV_LOG(ERR,
5579 			"Cannot get ethdev port %u hairpin capability to NULL\n",
5580 			port_id);
5581 		return -EINVAL;
5582 	}
5583 
5584 	if (*dev->dev_ops->hairpin_cap_get == NULL)
5585 		return -ENOTSUP;
5586 	memset(cap, 0, sizeof(*cap));
5587 	return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5588 }
5589 
5590 int
5591 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5592 {
5593 	struct rte_eth_dev *dev;
5594 
5595 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5596 	dev = &rte_eth_devices[port_id];
5597 
5598 	if (pool == NULL) {
5599 		RTE_ETHDEV_LOG(ERR,
5600 			"Cannot test ethdev port %u mempool operation from NULL pool\n",
5601 			port_id);
5602 		return -EINVAL;
5603 	}
5604 
5605 	if (*dev->dev_ops->pool_ops_supported == NULL)
5606 		return 1; /* all pools are supported */
5607 
5608 	return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5609 }
5610 
5611 static int
5612 eth_dev_handle_port_list(const char *cmd __rte_unused,
5613 		const char *params __rte_unused,
5614 		struct rte_tel_data *d)
5615 {
5616 	int port_id;
5617 
5618 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5619 	RTE_ETH_FOREACH_DEV(port_id)
5620 		rte_tel_data_add_array_int(d, port_id);
5621 	return 0;
5622 }
5623 
5624 static void
5625 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5626 		const char *stat_name)
5627 {
5628 	int q;
5629 	struct rte_tel_data *q_data = rte_tel_data_alloc();
5630 	if (q_data == NULL)
5631 		return;
5632 	rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5633 	for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5634 		rte_tel_data_add_array_u64(q_data, q_stats[q]);
5635 	rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5636 }
5637 
5638 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5639 
5640 static int
5641 eth_dev_handle_port_stats(const char *cmd __rte_unused,
5642 		const char *params,
5643 		struct rte_tel_data *d)
5644 {
5645 	struct rte_eth_stats stats;
5646 	int port_id, ret;
5647 
5648 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5649 		return -1;
5650 
5651 	port_id = atoi(params);
5652 	if (!rte_eth_dev_is_valid_port(port_id))
5653 		return -1;
5654 
5655 	ret = rte_eth_stats_get(port_id, &stats);
5656 	if (ret < 0)
5657 		return -1;
5658 
5659 	rte_tel_data_start_dict(d);
5660 	ADD_DICT_STAT(stats, ipackets);
5661 	ADD_DICT_STAT(stats, opackets);
5662 	ADD_DICT_STAT(stats, ibytes);
5663 	ADD_DICT_STAT(stats, obytes);
5664 	ADD_DICT_STAT(stats, imissed);
5665 	ADD_DICT_STAT(stats, ierrors);
5666 	ADD_DICT_STAT(stats, oerrors);
5667 	ADD_DICT_STAT(stats, rx_nombuf);
5668 	eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5669 	eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5670 	eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5671 	eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5672 	eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
5673 
5674 	return 0;
5675 }
5676 
5677 static int
5678 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
5679 		const char *params,
5680 		struct rte_tel_data *d)
5681 {
5682 	struct rte_eth_xstat *eth_xstats;
5683 	struct rte_eth_xstat_name *xstat_names;
5684 	int port_id, num_xstats;
5685 	int i, ret;
5686 	char *end_param;
5687 
5688 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5689 		return -1;
5690 
5691 	port_id = strtoul(params, &end_param, 0);
5692 	if (*end_param != '\0')
5693 		RTE_ETHDEV_LOG(NOTICE,
5694 			"Extra parameters passed to ethdev telemetry command, ignoring");
5695 	if (!rte_eth_dev_is_valid_port(port_id))
5696 		return -1;
5697 
5698 	num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5699 	if (num_xstats < 0)
5700 		return -1;
5701 
5702 	/* use one malloc for both names and stats */
5703 	eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5704 			sizeof(struct rte_eth_xstat_name)) * num_xstats);
5705 	if (eth_xstats == NULL)
5706 		return -1;
5707 	xstat_names = (void *)&eth_xstats[num_xstats];
5708 
5709 	ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5710 	if (ret < 0 || ret > num_xstats) {
5711 		free(eth_xstats);
5712 		return -1;
5713 	}
5714 
5715 	ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5716 	if (ret < 0 || ret > num_xstats) {
5717 		free(eth_xstats);
5718 		return -1;
5719 	}
5720 
5721 	rte_tel_data_start_dict(d);
5722 	for (i = 0; i < num_xstats; i++)
5723 		rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5724 				eth_xstats[i].value);
5725 	free(eth_xstats);
5726 	return 0;
5727 }
5728 
5729 static int
5730 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
5731 		const char *params,
5732 		struct rte_tel_data *d)
5733 {
5734 	static const char *status_str = "status";
5735 	int ret, port_id;
5736 	struct rte_eth_link link;
5737 	char *end_param;
5738 
5739 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5740 		return -1;
5741 
5742 	port_id = strtoul(params, &end_param, 0);
5743 	if (*end_param != '\0')
5744 		RTE_ETHDEV_LOG(NOTICE,
5745 			"Extra parameters passed to ethdev telemetry command, ignoring");
5746 	if (!rte_eth_dev_is_valid_port(port_id))
5747 		return -1;
5748 
5749 	ret = rte_eth_link_get_nowait(port_id, &link);
5750 	if (ret < 0)
5751 		return -1;
5752 
5753 	rte_tel_data_start_dict(d);
5754 	if (!link.link_status) {
5755 		rte_tel_data_add_dict_string(d, status_str, "DOWN");
5756 		return 0;
5757 	}
5758 	rte_tel_data_add_dict_string(d, status_str, "UP");
5759 	rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5760 	rte_tel_data_add_dict_string(d, "duplex",
5761 			(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
5762 				"full-duplex" : "half-duplex");
5763 	return 0;
5764 }
5765 
5766 static int
5767 eth_dev_handle_port_info(const char *cmd __rte_unused,
5768 		const char *params,
5769 		struct rte_tel_data *d)
5770 {
5771 	struct rte_tel_data *rxq_state, *txq_state;
5772 	char mac_addr[RTE_ETHER_ADDR_FMT_SIZE];
5773 	struct rte_eth_dev *eth_dev;
5774 	char *end_param;
5775 	int port_id, i;
5776 
5777 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5778 		return -1;
5779 
5780 	port_id = strtoul(params, &end_param, 0);
5781 	if (*end_param != '\0')
5782 		RTE_ETHDEV_LOG(NOTICE,
5783 			"Extra parameters passed to ethdev telemetry command, ignoring");
5784 
5785 	if (!rte_eth_dev_is_valid_port(port_id))
5786 		return -EINVAL;
5787 
5788 	eth_dev = &rte_eth_devices[port_id];
5789 
5790 	rxq_state = rte_tel_data_alloc();
5791 	if (!rxq_state)
5792 		return -ENOMEM;
5793 
5794 	txq_state = rte_tel_data_alloc();
5795 	if (!txq_state) {
5796 		rte_tel_data_free(rxq_state);
5797 		return -ENOMEM;
5798 	}
5799 
5800 	rte_tel_data_start_dict(d);
5801 	rte_tel_data_add_dict_string(d, "name", eth_dev->data->name);
5802 	rte_tel_data_add_dict_int(d, "state", eth_dev->state);
5803 	rte_tel_data_add_dict_int(d, "nb_rx_queues",
5804 			eth_dev->data->nb_rx_queues);
5805 	rte_tel_data_add_dict_int(d, "nb_tx_queues",
5806 			eth_dev->data->nb_tx_queues);
5807 	rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id);
5808 	rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu);
5809 	rte_tel_data_add_dict_int(d, "rx_mbuf_size_min",
5810 			eth_dev->data->min_rx_buf_size);
5811 	rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail",
5812 			eth_dev->data->rx_mbuf_alloc_failed);
5813 	rte_ether_format_addr(mac_addr, sizeof(mac_addr),
5814 			eth_dev->data->mac_addrs);
5815 	rte_tel_data_add_dict_string(d, "mac_addr", mac_addr);
5816 	rte_tel_data_add_dict_int(d, "promiscuous",
5817 			eth_dev->data->promiscuous);
5818 	rte_tel_data_add_dict_int(d, "scattered_rx",
5819 			eth_dev->data->scattered_rx);
5820 	rte_tel_data_add_dict_int(d, "all_multicast",
5821 			eth_dev->data->all_multicast);
5822 	rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started);
5823 	rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro);
5824 	rte_tel_data_add_dict_int(d, "dev_configured",
5825 			eth_dev->data->dev_configured);
5826 
5827 	rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL);
5828 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
5829 		rte_tel_data_add_array_int(rxq_state,
5830 				eth_dev->data->rx_queue_state[i]);
5831 
5832 	rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL);
5833 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
5834 		rte_tel_data_add_array_int(txq_state,
5835 				eth_dev->data->tx_queue_state[i]);
5836 
5837 	rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0);
5838 	rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0);
5839 	rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node);
5840 	rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags);
5841 	rte_tel_data_add_dict_int(d, "rx_offloads",
5842 			eth_dev->data->dev_conf.rxmode.offloads);
5843 	rte_tel_data_add_dict_int(d, "tx_offloads",
5844 			eth_dev->data->dev_conf.txmode.offloads);
5845 	rte_tel_data_add_dict_int(d, "ethdev_rss_hf",
5846 			eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
5847 
5848 	return 0;
5849 }
5850 
5851 int
5852 rte_eth_representor_info_get(uint16_t port_id,
5853 			     struct rte_eth_representor_info *info)
5854 {
5855 	struct rte_eth_dev *dev;
5856 
5857 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5858 	dev = &rte_eth_devices[port_id];
5859 
5860 	if (*dev->dev_ops->representor_info_get == NULL)
5861 		return -ENOTSUP;
5862 	return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
5863 }
5864 
5865 int
5866 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
5867 {
5868 	struct rte_eth_dev *dev;
5869 
5870 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5871 	dev = &rte_eth_devices[port_id];
5872 
5873 	if (dev->data->dev_configured != 0) {
5874 		RTE_ETHDEV_LOG(ERR,
5875 			"The port (ID=%"PRIu16") is already configured\n",
5876 			port_id);
5877 		return -EBUSY;
5878 	}
5879 
5880 	if (features == NULL) {
5881 		RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n");
5882 		return -EINVAL;
5883 	}
5884 
5885 	if (*dev->dev_ops->rx_metadata_negotiate == NULL)
5886 		return -ENOTSUP;
5887 	return eth_err(port_id,
5888 		       (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
5889 }
5890 
5891 int
5892 rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5893 		struct rte_eth_ip_reassembly_params *reassembly_capa)
5894 {
5895 	struct rte_eth_dev *dev;
5896 
5897 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5898 	dev = &rte_eth_devices[port_id];
5899 
5900 	if (dev->data->dev_configured == 0) {
5901 		RTE_ETHDEV_LOG(ERR,
5902 			"Device with port_id=%u is not configured.\n"
5903 			"Cannot get IP reassembly capability\n",
5904 			port_id);
5905 		return -EINVAL;
5906 	}
5907 
5908 	if (reassembly_capa == NULL) {
5909 		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL");
5910 		return -EINVAL;
5911 	}
5912 
5913 	if (*dev->dev_ops->ip_reassembly_capability_get == NULL)
5914 		return -ENOTSUP;
5915 	memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params));
5916 
5917 	return eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get)
5918 					(dev, reassembly_capa));
5919 }
5920 
5921 int
5922 rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5923 		struct rte_eth_ip_reassembly_params *conf)
5924 {
5925 	struct rte_eth_dev *dev;
5926 
5927 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5928 	dev = &rte_eth_devices[port_id];
5929 
5930 	if (dev->data->dev_configured == 0) {
5931 		RTE_ETHDEV_LOG(ERR,
5932 			"Device with port_id=%u is not configured.\n"
5933 			"Cannot get IP reassembly configuration\n",
5934 			port_id);
5935 		return -EINVAL;
5936 	}
5937 
5938 	if (conf == NULL) {
5939 		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL");
5940 		return -EINVAL;
5941 	}
5942 
5943 	if (*dev->dev_ops->ip_reassembly_conf_get == NULL)
5944 		return -ENOTSUP;
5945 	memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params));
5946 	return eth_err(port_id,
5947 		       (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf));
5948 }
5949 
5950 int
5951 rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5952 		const struct rte_eth_ip_reassembly_params *conf)
5953 {
5954 	struct rte_eth_dev *dev;
5955 
5956 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5957 	dev = &rte_eth_devices[port_id];
5958 
5959 	if (dev->data->dev_configured == 0) {
5960 		RTE_ETHDEV_LOG(ERR,
5961 			"Device with port_id=%u is not configured.\n"
5962 			"Cannot set IP reassembly configuration",
5963 			port_id);
5964 		return -EINVAL;
5965 	}
5966 
5967 	if (dev->data->dev_started != 0) {
5968 		RTE_ETHDEV_LOG(ERR,
5969 			"Device with port_id=%u started,\n"
5970 			"cannot configure IP reassembly params.\n",
5971 			port_id);
5972 		return -EINVAL;
5973 	}
5974 
5975 	if (conf == NULL) {
5976 		RTE_ETHDEV_LOG(ERR,
5977 				"Invalid IP reassembly configuration (NULL)\n");
5978 		return -EINVAL;
5979 	}
5980 
5981 	if (*dev->dev_ops->ip_reassembly_conf_set == NULL)
5982 		return -ENOTSUP;
5983 	return eth_err(port_id,
5984 		       (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf));
5985 }
5986 
5987 int
5988 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
5989 {
5990 	struct rte_eth_dev *dev;
5991 
5992 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5993 	dev = &rte_eth_devices[port_id];
5994 
5995 	if (file == NULL) {
5996 		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
5997 		return -EINVAL;
5998 	}
5999 
6000 	if (*dev->dev_ops->eth_dev_priv_dump == NULL)
6001 		return -ENOTSUP;
6002 	return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file));
6003 }
6004 
6005 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
6006 
6007 RTE_INIT(ethdev_init_telemetry)
6008 {
6009 	rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
6010 			"Returns list of available ethdev ports. Takes no parameters");
6011 	rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
6012 			"Returns the common stats for a port. Parameters: int port_id");
6013 	rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
6014 			"Returns the extended stats for a port. Parameters: int port_id");
6015 	rte_telemetry_register_cmd("/ethdev/link_status",
6016 			eth_dev_handle_port_link_status,
6017 			"Returns the link status for a port. Parameters: int port_id");
6018 	rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info,
6019 			"Returns the device info for a port. Parameters: int port_id");
6020 	rte_telemetry_register_cmd("/ethdev/module_eeprom", eth_dev_handle_port_module_eeprom,
6021 			"Returns module EEPROM info with SFF specs. Parameters: int port_id");
6022 }
6023