xref: /dpdk/lib/ethdev/rte_ethdev.c (revision a39f5e14560df310e7af3c135a11bb6f51eb92e6)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #include <ctype.h>
6 #include <errno.h>
7 #include <inttypes.h>
8 #include <stdbool.h>
9 #include <stdint.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <sys/queue.h>
14 
15 #include <bus_driver.h>
16 #include <rte_log.h>
17 #include <rte_interrupts.h>
18 #include <rte_memcpy.h>
19 #include <rte_common.h>
20 #include <rte_mempool.h>
21 #include <rte_malloc.h>
22 #include <rte_mbuf.h>
23 #include <rte_errno.h>
24 #include <rte_spinlock.h>
25 #include <rte_string_fns.h>
26 #include <rte_class.h>
27 #include <rte_ether.h>
28 #include <rte_telemetry.h>
29 
30 #include "rte_ethdev_trace.h"
31 #include "rte_ethdev.h"
32 #include "ethdev_driver.h"
33 #include "ethdev_profile.h"
34 #include "ethdev_private.h"
35 #include "sff_telemetry.h"
36 
37 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
38 
39 /* public fast-path API */
40 struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
41 
42 /* spinlock for add/remove Rx callbacks */
43 static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
44 
45 /* spinlock for add/remove Tx callbacks */
46 static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
47 
48 /* store statistics names and its offset in stats structure  */
49 struct rte_eth_xstats_name_off {
50 	char name[RTE_ETH_XSTATS_NAME_SIZE];
51 	unsigned offset;
52 };
53 
54 static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
55 	{"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
56 	{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
57 	{"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
58 	{"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
59 	{"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
60 	{"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
61 	{"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
62 	{"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
63 		rx_nombuf)},
64 };
65 
66 #define RTE_NB_STATS RTE_DIM(eth_dev_stats_strings)
67 
68 static const struct rte_eth_xstats_name_off eth_dev_rxq_stats_strings[] = {
69 	{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
70 	{"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
71 	{"errors", offsetof(struct rte_eth_stats, q_errors)},
72 };
73 
74 #define RTE_NB_RXQ_STATS RTE_DIM(eth_dev_rxq_stats_strings)
75 
76 static const struct rte_eth_xstats_name_off eth_dev_txq_stats_strings[] = {
77 	{"packets", offsetof(struct rte_eth_stats, q_opackets)},
78 	{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
79 };
80 #define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
81 
82 #define RTE_RX_OFFLOAD_BIT2STR(_name)	\
83 	{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
84 
85 static const struct {
86 	uint64_t offload;
87 	const char *name;
88 } eth_dev_rx_offload_names[] = {
89 	RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
90 	RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
91 	RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
92 	RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
93 	RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
94 	RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
95 	RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
96 	RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
97 	RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
98 	RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
99 	RTE_RX_OFFLOAD_BIT2STR(SCATTER),
100 	RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
101 	RTE_RX_OFFLOAD_BIT2STR(SECURITY),
102 	RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
103 	RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
104 	RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
105 	RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
106 	RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
107 };
108 
109 #undef RTE_RX_OFFLOAD_BIT2STR
110 #undef RTE_ETH_RX_OFFLOAD_BIT2STR
111 
112 #define RTE_TX_OFFLOAD_BIT2STR(_name)	\
113 	{ RTE_ETH_TX_OFFLOAD_##_name, #_name }
114 
115 static const struct {
116 	uint64_t offload;
117 	const char *name;
118 } eth_dev_tx_offload_names[] = {
119 	RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
120 	RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
121 	RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
122 	RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
123 	RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
124 	RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
125 	RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
126 	RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
127 	RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
128 	RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
129 	RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
130 	RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
131 	RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
132 	RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
133 	RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
134 	RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
135 	RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
136 	RTE_TX_OFFLOAD_BIT2STR(SECURITY),
137 	RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
138 	RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
139 	RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
140 	RTE_TX_OFFLOAD_BIT2STR(SEND_ON_TIMESTAMP),
141 };
142 
143 #undef RTE_TX_OFFLOAD_BIT2STR
144 
145 static const struct {
146 	uint64_t offload;
147 	const char *name;
148 } rte_eth_dev_capa_names[] = {
149 	{RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"},
150 	{RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"},
151 	{RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
152 	{RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"},
153 	{RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"},
154 };
155 
156 enum {
157 	STAT_QMAP_TX = 0,
158 	STAT_QMAP_RX
159 };
160 
161 int
162 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
163 {
164 	int ret;
165 	struct rte_devargs devargs;
166 	const char *bus_param_key;
167 	char *bus_str = NULL;
168 	char *cls_str = NULL;
169 	int str_size;
170 
171 	if (iter == NULL) {
172 		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
173 		return -EINVAL;
174 	}
175 
176 	if (devargs_str == NULL) {
177 		RTE_ETHDEV_LOG(ERR,
178 			"Cannot initialize iterator from NULL device description string\n");
179 		return -EINVAL;
180 	}
181 
182 	memset(iter, 0, sizeof(*iter));
183 	memset(&devargs, 0, sizeof(devargs));
184 
185 	/*
186 	 * The devargs string may use various syntaxes:
187 	 *   - 0000:08:00.0,representor=[1-3]
188 	 *   - pci:0000:06:00.0,representor=[0,5]
189 	 *   - class=eth,mac=00:11:22:33:44:55
190 	 *   - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
191 	 */
192 
193 	/*
194 	 * Handle pure class filter (i.e. without any bus-level argument),
195 	 * from future new syntax.
196 	 * rte_devargs_parse() is not yet supporting the new syntax,
197 	 * that's why this simple case is temporarily parsed here.
198 	 */
199 #define iter_anybus_str "class=eth,"
200 	if (strncmp(devargs_str, iter_anybus_str,
201 			strlen(iter_anybus_str)) == 0) {
202 		iter->cls_str = devargs_str + strlen(iter_anybus_str);
203 		goto end;
204 	}
205 
206 	/* Split bus, device and parameters. */
207 	ret = rte_devargs_parse(&devargs, devargs_str);
208 	if (ret != 0)
209 		goto error;
210 
211 	/*
212 	 * Assume parameters of old syntax can match only at ethdev level.
213 	 * Extra parameters will be ignored, thanks to "+" prefix.
214 	 */
215 	str_size = strlen(devargs.args) + 2;
216 	cls_str = malloc(str_size);
217 	if (cls_str == NULL) {
218 		ret = -ENOMEM;
219 		goto error;
220 	}
221 	ret = snprintf(cls_str, str_size, "+%s", devargs.args);
222 	if (ret != str_size - 1) {
223 		ret = -EINVAL;
224 		goto error;
225 	}
226 	iter->cls_str = cls_str;
227 
228 	iter->bus = devargs.bus;
229 	if (iter->bus->dev_iterate == NULL) {
230 		ret = -ENOTSUP;
231 		goto error;
232 	}
233 
234 	/* Convert bus args to new syntax for use with new API dev_iterate. */
235 	if ((strcmp(iter->bus->name, "vdev") == 0) ||
236 		(strcmp(iter->bus->name, "fslmc") == 0) ||
237 		(strcmp(iter->bus->name, "dpaa_bus") == 0)) {
238 		bus_param_key = "name";
239 	} else if (strcmp(iter->bus->name, "pci") == 0) {
240 		bus_param_key = "addr";
241 	} else {
242 		ret = -ENOTSUP;
243 		goto error;
244 	}
245 	str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
246 	bus_str = malloc(str_size);
247 	if (bus_str == NULL) {
248 		ret = -ENOMEM;
249 		goto error;
250 	}
251 	ret = snprintf(bus_str, str_size, "%s=%s",
252 			bus_param_key, devargs.name);
253 	if (ret != str_size - 1) {
254 		ret = -EINVAL;
255 		goto error;
256 	}
257 	iter->bus_str = bus_str;
258 
259 end:
260 	iter->cls = rte_class_find_by_name("eth");
261 	rte_devargs_reset(&devargs);
262 	return 0;
263 
264 error:
265 	if (ret == -ENOTSUP)
266 		RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
267 				iter->bus->name);
268 	rte_devargs_reset(&devargs);
269 	free(bus_str);
270 	free(cls_str);
271 	return ret;
272 }
273 
274 uint16_t
275 rte_eth_iterator_next(struct rte_dev_iterator *iter)
276 {
277 	if (iter == NULL) {
278 		RTE_ETHDEV_LOG(ERR,
279 			"Cannot get next device from NULL iterator\n");
280 		return RTE_MAX_ETHPORTS;
281 	}
282 
283 	if (iter->cls == NULL) /* invalid ethdev iterator */
284 		return RTE_MAX_ETHPORTS;
285 
286 	do { /* loop to try all matching rte_device */
287 		/* If not pure ethdev filter and */
288 		if (iter->bus != NULL &&
289 				/* not in middle of rte_eth_dev iteration, */
290 				iter->class_device == NULL) {
291 			/* get next rte_device to try. */
292 			iter->device = iter->bus->dev_iterate(
293 					iter->device, iter->bus_str, iter);
294 			if (iter->device == NULL)
295 				break; /* no more rte_device candidate */
296 		}
297 		/* A device is matching bus part, need to check ethdev part. */
298 		iter->class_device = iter->cls->dev_iterate(
299 				iter->class_device, iter->cls_str, iter);
300 		if (iter->class_device != NULL)
301 			return eth_dev_to_id(iter->class_device); /* match */
302 	} while (iter->bus != NULL); /* need to try next rte_device */
303 
304 	/* No more ethdev port to iterate. */
305 	rte_eth_iterator_cleanup(iter);
306 	return RTE_MAX_ETHPORTS;
307 }
308 
309 void
310 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
311 {
312 	if (iter == NULL) {
313 		RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
314 		return;
315 	}
316 
317 	if (iter->bus_str == NULL)
318 		return; /* nothing to free in pure class filter */
319 	free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
320 	free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
321 	memset(iter, 0, sizeof(*iter));
322 }
323 
324 uint16_t
325 rte_eth_find_next(uint16_t port_id)
326 {
327 	while (port_id < RTE_MAX_ETHPORTS &&
328 			rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
329 		port_id++;
330 
331 	if (port_id >= RTE_MAX_ETHPORTS)
332 		return RTE_MAX_ETHPORTS;
333 
334 	return port_id;
335 }
336 
337 /*
338  * Macro to iterate over all valid ports for internal usage.
339  * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
340  */
341 #define RTE_ETH_FOREACH_VALID_DEV(port_id) \
342 	for (port_id = rte_eth_find_next(0); \
343 	     port_id < RTE_MAX_ETHPORTS; \
344 	     port_id = rte_eth_find_next(port_id + 1))
345 
346 uint16_t
347 rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
348 {
349 	port_id = rte_eth_find_next(port_id);
350 	while (port_id < RTE_MAX_ETHPORTS &&
351 			rte_eth_devices[port_id].device != parent)
352 		port_id = rte_eth_find_next(port_id + 1);
353 
354 	return port_id;
355 }
356 
357 uint16_t
358 rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
359 {
360 	RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
361 	return rte_eth_find_next_of(port_id,
362 			rte_eth_devices[ref_port_id].device);
363 }
364 
365 static bool
366 eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
367 {
368 	return ethdev->data->name[0] != '\0';
369 }
370 
371 int
372 rte_eth_dev_is_valid_port(uint16_t port_id)
373 {
374 	if (port_id >= RTE_MAX_ETHPORTS ||
375 	    (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
376 		return 0;
377 	else
378 		return 1;
379 }
380 
381 static int
382 eth_is_valid_owner_id(uint64_t owner_id)
383 {
384 	if (owner_id == RTE_ETH_DEV_NO_OWNER ||
385 	    eth_dev_shared_data->next_owner_id <= owner_id)
386 		return 0;
387 	return 1;
388 }
389 
390 uint64_t
391 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
392 {
393 	port_id = rte_eth_find_next(port_id);
394 	while (port_id < RTE_MAX_ETHPORTS &&
395 			rte_eth_devices[port_id].data->owner.id != owner_id)
396 		port_id = rte_eth_find_next(port_id + 1);
397 
398 	return port_id;
399 }
400 
401 int
402 rte_eth_dev_owner_new(uint64_t *owner_id)
403 {
404 	if (owner_id == NULL) {
405 		RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
406 		return -EINVAL;
407 	}
408 
409 	eth_dev_shared_data_prepare();
410 
411 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
412 
413 	*owner_id = eth_dev_shared_data->next_owner_id++;
414 
415 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
416 	return 0;
417 }
418 
419 static int
420 eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
421 		       const struct rte_eth_dev_owner *new_owner)
422 {
423 	struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
424 	struct rte_eth_dev_owner *port_owner;
425 
426 	if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
427 		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
428 			port_id);
429 		return -ENODEV;
430 	}
431 
432 	if (new_owner == NULL) {
433 		RTE_ETHDEV_LOG(ERR,
434 			"Cannot set ethdev port %u owner from NULL owner\n",
435 			port_id);
436 		return -EINVAL;
437 	}
438 
439 	if (!eth_is_valid_owner_id(new_owner->id) &&
440 	    !eth_is_valid_owner_id(old_owner_id)) {
441 		RTE_ETHDEV_LOG(ERR,
442 			"Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
443 		       old_owner_id, new_owner->id);
444 		return -EINVAL;
445 	}
446 
447 	port_owner = &rte_eth_devices[port_id].data->owner;
448 	if (port_owner->id != old_owner_id) {
449 		RTE_ETHDEV_LOG(ERR,
450 			"Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
451 			port_id, port_owner->name, port_owner->id);
452 		return -EPERM;
453 	}
454 
455 	/* can not truncate (same structure) */
456 	strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
457 
458 	port_owner->id = new_owner->id;
459 
460 	RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
461 		port_id, new_owner->name, new_owner->id);
462 
463 	return 0;
464 }
465 
466 int
467 rte_eth_dev_owner_set(const uint16_t port_id,
468 		      const struct rte_eth_dev_owner *owner)
469 {
470 	int ret;
471 
472 	eth_dev_shared_data_prepare();
473 
474 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
475 
476 	ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
477 
478 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
479 	return ret;
480 }
481 
482 int
483 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
484 {
485 	const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
486 			{.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
487 	int ret;
488 
489 	eth_dev_shared_data_prepare();
490 
491 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
492 
493 	ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
494 
495 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
496 	return ret;
497 }
498 
499 int
500 rte_eth_dev_owner_delete(const uint64_t owner_id)
501 {
502 	uint16_t port_id;
503 	int ret = 0;
504 
505 	eth_dev_shared_data_prepare();
506 
507 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
508 
509 	if (eth_is_valid_owner_id(owner_id)) {
510 		for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
511 			struct rte_eth_dev_data *data =
512 				rte_eth_devices[port_id].data;
513 			if (data != NULL && data->owner.id == owner_id)
514 				memset(&data->owner, 0,
515 				       sizeof(struct rte_eth_dev_owner));
516 		}
517 		RTE_ETHDEV_LOG(NOTICE,
518 			"All port owners owned by %016"PRIx64" identifier have removed\n",
519 			owner_id);
520 	} else {
521 		RTE_ETHDEV_LOG(ERR,
522 			       "Invalid owner ID=%016"PRIx64"\n",
523 			       owner_id);
524 		ret = -EINVAL;
525 	}
526 
527 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
528 
529 	return ret;
530 }
531 
532 int
533 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
534 {
535 	struct rte_eth_dev *ethdev;
536 
537 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
538 	ethdev = &rte_eth_devices[port_id];
539 
540 	if (!eth_dev_is_allocated(ethdev)) {
541 		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
542 			port_id);
543 		return -ENODEV;
544 	}
545 
546 	if (owner == NULL) {
547 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
548 			port_id);
549 		return -EINVAL;
550 	}
551 
552 	eth_dev_shared_data_prepare();
553 
554 	rte_spinlock_lock(&eth_dev_shared_data->ownership_lock);
555 	rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
556 	rte_spinlock_unlock(&eth_dev_shared_data->ownership_lock);
557 
558 	return 0;
559 }
560 
561 int
562 rte_eth_dev_socket_id(uint16_t port_id)
563 {
564 	int socket_id = SOCKET_ID_ANY;
565 
566 	if (!rte_eth_dev_is_valid_port(port_id)) {
567 		rte_errno = EINVAL;
568 	} else {
569 		socket_id = rte_eth_devices[port_id].data->numa_node;
570 		if (socket_id == SOCKET_ID_ANY)
571 			rte_errno = 0;
572 	}
573 	return socket_id;
574 }
575 
576 void *
577 rte_eth_dev_get_sec_ctx(uint16_t port_id)
578 {
579 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
580 	return rte_eth_devices[port_id].security_ctx;
581 }
582 
583 uint16_t
584 rte_eth_dev_count_avail(void)
585 {
586 	uint16_t p;
587 	uint16_t count;
588 
589 	count = 0;
590 
591 	RTE_ETH_FOREACH_DEV(p)
592 		count++;
593 
594 	return count;
595 }
596 
597 uint16_t
598 rte_eth_dev_count_total(void)
599 {
600 	uint16_t port, count = 0;
601 
602 	RTE_ETH_FOREACH_VALID_DEV(port)
603 		count++;
604 
605 	return count;
606 }
607 
608 int
609 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
610 {
611 	char *tmp;
612 
613 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
614 
615 	if (name == NULL) {
616 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
617 			port_id);
618 		return -EINVAL;
619 	}
620 
621 	/* shouldn't check 'rte_eth_devices[i].data',
622 	 * because it might be overwritten by VDEV PMD */
623 	tmp = eth_dev_shared_data->data[port_id].name;
624 	strcpy(name, tmp);
625 	return 0;
626 }
627 
628 int
629 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
630 {
631 	uint16_t pid;
632 
633 	if (name == NULL) {
634 		RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name");
635 		return -EINVAL;
636 	}
637 
638 	if (port_id == NULL) {
639 		RTE_ETHDEV_LOG(ERR,
640 			"Cannot get port ID to NULL for %s\n", name);
641 		return -EINVAL;
642 	}
643 
644 	RTE_ETH_FOREACH_VALID_DEV(pid)
645 		if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
646 			*port_id = pid;
647 			return 0;
648 		}
649 
650 	return -ENODEV;
651 }
652 
653 int
654 eth_err(uint16_t port_id, int ret)
655 {
656 	if (ret == 0)
657 		return 0;
658 	if (rte_eth_dev_is_removed(port_id))
659 		return -EIO;
660 	return ret;
661 }
662 
663 static int
664 eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
665 {
666 	uint16_t port_id;
667 
668 	if (rx_queue_id >= dev->data->nb_rx_queues) {
669 		port_id = dev->data->port_id;
670 		RTE_ETHDEV_LOG(ERR,
671 			       "Invalid Rx queue_id=%u of device with port_id=%u\n",
672 			       rx_queue_id, port_id);
673 		return -EINVAL;
674 	}
675 
676 	if (dev->data->rx_queues[rx_queue_id] == NULL) {
677 		port_id = dev->data->port_id;
678 		RTE_ETHDEV_LOG(ERR,
679 			       "Queue %u of device with port_id=%u has not been setup\n",
680 			       rx_queue_id, port_id);
681 		return -EINVAL;
682 	}
683 
684 	return 0;
685 }
686 
687 static int
688 eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
689 {
690 	uint16_t port_id;
691 
692 	if (tx_queue_id >= dev->data->nb_tx_queues) {
693 		port_id = dev->data->port_id;
694 		RTE_ETHDEV_LOG(ERR,
695 			       "Invalid Tx queue_id=%u of device with port_id=%u\n",
696 			       tx_queue_id, port_id);
697 		return -EINVAL;
698 	}
699 
700 	if (dev->data->tx_queues[tx_queue_id] == NULL) {
701 		port_id = dev->data->port_id;
702 		RTE_ETHDEV_LOG(ERR,
703 			       "Queue %u of device with port_id=%u has not been setup\n",
704 			       tx_queue_id, port_id);
705 		return -EINVAL;
706 	}
707 
708 	return 0;
709 }
710 
711 int
712 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
713 {
714 	struct rte_eth_dev *dev;
715 	int ret;
716 
717 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
718 	dev = &rte_eth_devices[port_id];
719 
720 	if (!dev->data->dev_started) {
721 		RTE_ETHDEV_LOG(ERR,
722 			"Port %u must be started before start any queue\n",
723 			port_id);
724 		return -EINVAL;
725 	}
726 
727 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
728 	if (ret != 0)
729 		return ret;
730 
731 	if (*dev->dev_ops->rx_queue_start == NULL)
732 		return -ENOTSUP;
733 
734 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
735 		RTE_ETHDEV_LOG(INFO,
736 			"Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
737 			rx_queue_id, port_id);
738 		return -EINVAL;
739 	}
740 
741 	if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
742 		RTE_ETHDEV_LOG(INFO,
743 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
744 			rx_queue_id, port_id);
745 		return 0;
746 	}
747 
748 	return eth_err(port_id, dev->dev_ops->rx_queue_start(dev, rx_queue_id));
749 }
750 
751 int
752 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
753 {
754 	struct rte_eth_dev *dev;
755 	int ret;
756 
757 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
758 	dev = &rte_eth_devices[port_id];
759 
760 	ret = eth_dev_validate_rx_queue(dev, rx_queue_id);
761 	if (ret != 0)
762 		return ret;
763 
764 	if (*dev->dev_ops->rx_queue_stop == NULL)
765 		return -ENOTSUP;
766 
767 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
768 		RTE_ETHDEV_LOG(INFO,
769 			"Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
770 			rx_queue_id, port_id);
771 		return -EINVAL;
772 	}
773 
774 	if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
775 		RTE_ETHDEV_LOG(INFO,
776 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
777 			rx_queue_id, port_id);
778 		return 0;
779 	}
780 
781 	return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
782 }
783 
784 int
785 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
786 {
787 	struct rte_eth_dev *dev;
788 	int ret;
789 
790 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
791 	dev = &rte_eth_devices[port_id];
792 
793 	if (!dev->data->dev_started) {
794 		RTE_ETHDEV_LOG(ERR,
795 			"Port %u must be started before start any queue\n",
796 			port_id);
797 		return -EINVAL;
798 	}
799 
800 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
801 	if (ret != 0)
802 		return ret;
803 
804 	if (*dev->dev_ops->tx_queue_start == NULL)
805 		return -ENOTSUP;
806 
807 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
808 		RTE_ETHDEV_LOG(INFO,
809 			"Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
810 			tx_queue_id, port_id);
811 		return -EINVAL;
812 	}
813 
814 	if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
815 		RTE_ETHDEV_LOG(INFO,
816 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
817 			tx_queue_id, port_id);
818 		return 0;
819 	}
820 
821 	return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
822 }
823 
824 int
825 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
826 {
827 	struct rte_eth_dev *dev;
828 	int ret;
829 
830 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
831 	dev = &rte_eth_devices[port_id];
832 
833 	ret = eth_dev_validate_tx_queue(dev, tx_queue_id);
834 	if (ret != 0)
835 		return ret;
836 
837 	if (*dev->dev_ops->tx_queue_stop == NULL)
838 		return -ENOTSUP;
839 
840 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
841 		RTE_ETHDEV_LOG(INFO,
842 			"Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
843 			tx_queue_id, port_id);
844 		return -EINVAL;
845 	}
846 
847 	if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
848 		RTE_ETHDEV_LOG(INFO,
849 			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
850 			tx_queue_id, port_id);
851 		return 0;
852 	}
853 
854 	return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
855 }
856 
857 uint32_t
858 rte_eth_speed_bitflag(uint32_t speed, int duplex)
859 {
860 	switch (speed) {
861 	case RTE_ETH_SPEED_NUM_10M:
862 		return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
863 	case RTE_ETH_SPEED_NUM_100M:
864 		return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
865 	case RTE_ETH_SPEED_NUM_1G:
866 		return RTE_ETH_LINK_SPEED_1G;
867 	case RTE_ETH_SPEED_NUM_2_5G:
868 		return RTE_ETH_LINK_SPEED_2_5G;
869 	case RTE_ETH_SPEED_NUM_5G:
870 		return RTE_ETH_LINK_SPEED_5G;
871 	case RTE_ETH_SPEED_NUM_10G:
872 		return RTE_ETH_LINK_SPEED_10G;
873 	case RTE_ETH_SPEED_NUM_20G:
874 		return RTE_ETH_LINK_SPEED_20G;
875 	case RTE_ETH_SPEED_NUM_25G:
876 		return RTE_ETH_LINK_SPEED_25G;
877 	case RTE_ETH_SPEED_NUM_40G:
878 		return RTE_ETH_LINK_SPEED_40G;
879 	case RTE_ETH_SPEED_NUM_50G:
880 		return RTE_ETH_LINK_SPEED_50G;
881 	case RTE_ETH_SPEED_NUM_56G:
882 		return RTE_ETH_LINK_SPEED_56G;
883 	case RTE_ETH_SPEED_NUM_100G:
884 		return RTE_ETH_LINK_SPEED_100G;
885 	case RTE_ETH_SPEED_NUM_200G:
886 		return RTE_ETH_LINK_SPEED_200G;
887 	default:
888 		return 0;
889 	}
890 }
891 
892 const char *
893 rte_eth_dev_rx_offload_name(uint64_t offload)
894 {
895 	const char *name = "UNKNOWN";
896 	unsigned int i;
897 
898 	for (i = 0; i < RTE_DIM(eth_dev_rx_offload_names); ++i) {
899 		if (offload == eth_dev_rx_offload_names[i].offload) {
900 			name = eth_dev_rx_offload_names[i].name;
901 			break;
902 		}
903 	}
904 
905 	return name;
906 }
907 
908 const char *
909 rte_eth_dev_tx_offload_name(uint64_t offload)
910 {
911 	const char *name = "UNKNOWN";
912 	unsigned int i;
913 
914 	for (i = 0; i < RTE_DIM(eth_dev_tx_offload_names); ++i) {
915 		if (offload == eth_dev_tx_offload_names[i].offload) {
916 			name = eth_dev_tx_offload_names[i].name;
917 			break;
918 		}
919 	}
920 
921 	return name;
922 }
923 
924 const char *
925 rte_eth_dev_capability_name(uint64_t capability)
926 {
927 	const char *name = "UNKNOWN";
928 	unsigned int i;
929 
930 	for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) {
931 		if (capability == rte_eth_dev_capa_names[i].offload) {
932 			name = rte_eth_dev_capa_names[i].name;
933 			break;
934 		}
935 	}
936 
937 	return name;
938 }
939 
940 static inline int
941 eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
942 		   uint32_t max_rx_pkt_len, uint32_t dev_info_size)
943 {
944 	int ret = 0;
945 
946 	if (dev_info_size == 0) {
947 		if (config_size != max_rx_pkt_len) {
948 			RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
949 				       " %u != %u is not allowed\n",
950 				       port_id, config_size, max_rx_pkt_len);
951 			ret = -EINVAL;
952 		}
953 	} else if (config_size > dev_info_size) {
954 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
955 			       "> max allowed value %u\n", port_id, config_size,
956 			       dev_info_size);
957 		ret = -EINVAL;
958 	} else if (config_size < RTE_ETHER_MIN_LEN) {
959 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
960 			       "< min allowed value %u\n", port_id, config_size,
961 			       (unsigned int)RTE_ETHER_MIN_LEN);
962 		ret = -EINVAL;
963 	}
964 	return ret;
965 }
966 
967 /*
968  * Validate offloads that are requested through rte_eth_dev_configure against
969  * the offloads successfully set by the Ethernet device.
970  *
971  * @param port_id
972  *   The port identifier of the Ethernet device.
973  * @param req_offloads
974  *   The offloads that have been requested through `rte_eth_dev_configure`.
975  * @param set_offloads
976  *   The offloads successfully set by the Ethernet device.
977  * @param offload_type
978  *   The offload type i.e. Rx/Tx string.
979  * @param offload_name
980  *   The function that prints the offload name.
981  * @return
982  *   - (0) if validation successful.
983  *   - (-EINVAL) if requested offload has been silently disabled.
984  *
985  */
986 static int
987 eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
988 		  uint64_t set_offloads, const char *offload_type,
989 		  const char *(*offload_name)(uint64_t))
990 {
991 	uint64_t offloads_diff = req_offloads ^ set_offloads;
992 	uint64_t offload;
993 	int ret = 0;
994 
995 	while (offloads_diff != 0) {
996 		/* Check if any offload is requested but not enabled. */
997 		offload = RTE_BIT64(__builtin_ctzll(offloads_diff));
998 		if (offload & req_offloads) {
999 			RTE_ETHDEV_LOG(ERR,
1000 				"Port %u failed to enable %s offload %s\n",
1001 				port_id, offload_type, offload_name(offload));
1002 			ret = -EINVAL;
1003 		}
1004 
1005 		/* Check if offload couldn't be disabled. */
1006 		if (offload & set_offloads) {
1007 			RTE_ETHDEV_LOG(DEBUG,
1008 				"Port %u %s offload %s is not requested but enabled\n",
1009 				port_id, offload_type, offload_name(offload));
1010 		}
1011 
1012 		offloads_diff &= ~offload;
1013 	}
1014 
1015 	return ret;
1016 }
1017 
1018 static uint32_t
1019 eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
1020 {
1021 	uint32_t overhead_len;
1022 
1023 	if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
1024 		overhead_len = max_rx_pktlen - max_mtu;
1025 	else
1026 		overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1027 
1028 	return overhead_len;
1029 }
1030 
1031 /* rte_eth_dev_info_get() should be called prior to this function */
1032 static int
1033 eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
1034 		uint16_t mtu)
1035 {
1036 	uint32_t overhead_len;
1037 	uint32_t frame_size;
1038 
1039 	if (mtu < dev_info->min_mtu) {
1040 		RTE_ETHDEV_LOG(ERR,
1041 			"MTU (%u) < device min MTU (%u) for port_id %u\n",
1042 			mtu, dev_info->min_mtu, port_id);
1043 		return -EINVAL;
1044 	}
1045 	if (mtu > dev_info->max_mtu) {
1046 		RTE_ETHDEV_LOG(ERR,
1047 			"MTU (%u) > device max MTU (%u) for port_id %u\n",
1048 			mtu, dev_info->max_mtu, port_id);
1049 		return -EINVAL;
1050 	}
1051 
1052 	overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
1053 			dev_info->max_mtu);
1054 	frame_size = mtu + overhead_len;
1055 	if (frame_size < RTE_ETHER_MIN_LEN) {
1056 		RTE_ETHDEV_LOG(ERR,
1057 			"Frame size (%u) < min frame size (%u) for port_id %u\n",
1058 			frame_size, RTE_ETHER_MIN_LEN, port_id);
1059 		return -EINVAL;
1060 	}
1061 
1062 	if (frame_size > dev_info->max_rx_pktlen) {
1063 		RTE_ETHDEV_LOG(ERR,
1064 			"Frame size (%u) > device max frame size (%u) for port_id %u\n",
1065 			frame_size, dev_info->max_rx_pktlen, port_id);
1066 		return -EINVAL;
1067 	}
1068 
1069 	return 0;
1070 }
1071 
1072 int
1073 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1074 		      const struct rte_eth_conf *dev_conf)
1075 {
1076 	struct rte_eth_dev *dev;
1077 	struct rte_eth_dev_info dev_info;
1078 	struct rte_eth_conf orig_conf;
1079 	int diag;
1080 	int ret;
1081 	uint16_t old_mtu;
1082 
1083 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1084 	dev = &rte_eth_devices[port_id];
1085 
1086 	if (dev_conf == NULL) {
1087 		RTE_ETHDEV_LOG(ERR,
1088 			"Cannot configure ethdev port %u from NULL config\n",
1089 			port_id);
1090 		return -EINVAL;
1091 	}
1092 
1093 	if (*dev->dev_ops->dev_configure == NULL)
1094 		return -ENOTSUP;
1095 
1096 	if (dev->data->dev_started) {
1097 		RTE_ETHDEV_LOG(ERR,
1098 			"Port %u must be stopped to allow configuration\n",
1099 			port_id);
1100 		return -EBUSY;
1101 	}
1102 
1103 	/*
1104 	 * Ensure that "dev_configured" is always 0 each time prepare to do
1105 	 * dev_configure() to avoid any non-anticipated behaviour.
1106 	 * And set to 1 when dev_configure() is executed successfully.
1107 	 */
1108 	dev->data->dev_configured = 0;
1109 
1110 	 /* Store original config, as rollback required on failure */
1111 	memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1112 
1113 	/*
1114 	 * Copy the dev_conf parameter into the dev structure.
1115 	 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1116 	 */
1117 	if (dev_conf != &dev->data->dev_conf)
1118 		memcpy(&dev->data->dev_conf, dev_conf,
1119 		       sizeof(dev->data->dev_conf));
1120 
1121 	/* Backup mtu for rollback */
1122 	old_mtu = dev->data->mtu;
1123 
1124 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1125 	if (ret != 0)
1126 		goto rollback;
1127 
1128 	/* If number of queues specified by application for both Rx and Tx is
1129 	 * zero, use driver preferred values. This cannot be done individually
1130 	 * as it is valid for either Tx or Rx (but not both) to be zero.
1131 	 * If driver does not provide any preferred valued, fall back on
1132 	 * EAL defaults.
1133 	 */
1134 	if (nb_rx_q == 0 && nb_tx_q == 0) {
1135 		nb_rx_q = dev_info.default_rxportconf.nb_queues;
1136 		if (nb_rx_q == 0)
1137 			nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1138 		nb_tx_q = dev_info.default_txportconf.nb_queues;
1139 		if (nb_tx_q == 0)
1140 			nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1141 	}
1142 
1143 	if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1144 		RTE_ETHDEV_LOG(ERR,
1145 			"Number of Rx queues requested (%u) is greater than max supported(%d)\n",
1146 			nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1147 		ret = -EINVAL;
1148 		goto rollback;
1149 	}
1150 
1151 	if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1152 		RTE_ETHDEV_LOG(ERR,
1153 			"Number of Tx queues requested (%u) is greater than max supported(%d)\n",
1154 			nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1155 		ret = -EINVAL;
1156 		goto rollback;
1157 	}
1158 
1159 	/*
1160 	 * Check that the numbers of Rx and Tx queues are not greater
1161 	 * than the maximum number of Rx and Tx queues supported by the
1162 	 * configured device.
1163 	 */
1164 	if (nb_rx_q > dev_info.max_rx_queues) {
1165 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1166 			port_id, nb_rx_q, dev_info.max_rx_queues);
1167 		ret = -EINVAL;
1168 		goto rollback;
1169 	}
1170 
1171 	if (nb_tx_q > dev_info.max_tx_queues) {
1172 		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1173 			port_id, nb_tx_q, dev_info.max_tx_queues);
1174 		ret = -EINVAL;
1175 		goto rollback;
1176 	}
1177 
1178 	/* Check that the device supports requested interrupts */
1179 	if ((dev_conf->intr_conf.lsc == 1) &&
1180 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1181 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1182 			dev->device->driver->name);
1183 		ret = -EINVAL;
1184 		goto rollback;
1185 	}
1186 	if ((dev_conf->intr_conf.rmv == 1) &&
1187 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1188 		RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1189 			dev->device->driver->name);
1190 		ret = -EINVAL;
1191 		goto rollback;
1192 	}
1193 
1194 	if (dev_conf->rxmode.mtu == 0)
1195 		dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
1196 
1197 	ret = eth_dev_validate_mtu(port_id, &dev_info,
1198 			dev->data->dev_conf.rxmode.mtu);
1199 	if (ret != 0)
1200 		goto rollback;
1201 
1202 	dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
1203 
1204 	/*
1205 	 * If LRO is enabled, check that the maximum aggregated packet
1206 	 * size is supported by the configured device.
1207 	 */
1208 	if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
1209 		uint32_t max_rx_pktlen;
1210 		uint32_t overhead_len;
1211 
1212 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
1213 				dev_info.max_mtu);
1214 		max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
1215 		if (dev_conf->rxmode.max_lro_pkt_size == 0)
1216 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
1217 		ret = eth_dev_check_lro_pkt_size(port_id,
1218 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
1219 				max_rx_pktlen,
1220 				dev_info.max_lro_pkt_size);
1221 		if (ret != 0)
1222 			goto rollback;
1223 	}
1224 
1225 	/* Any requested offloading must be within its device capabilities */
1226 	if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1227 	     dev_conf->rxmode.offloads) {
1228 		RTE_ETHDEV_LOG(ERR,
1229 			"Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1230 			"capabilities 0x%"PRIx64" in %s()\n",
1231 			port_id, dev_conf->rxmode.offloads,
1232 			dev_info.rx_offload_capa,
1233 			__func__);
1234 		ret = -EINVAL;
1235 		goto rollback;
1236 	}
1237 	if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1238 	     dev_conf->txmode.offloads) {
1239 		RTE_ETHDEV_LOG(ERR,
1240 			"Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1241 			"capabilities 0x%"PRIx64" in %s()\n",
1242 			port_id, dev_conf->txmode.offloads,
1243 			dev_info.tx_offload_capa,
1244 			__func__);
1245 		ret = -EINVAL;
1246 		goto rollback;
1247 	}
1248 
1249 	dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1250 		rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1251 
1252 	/* Check that device supports requested rss hash functions. */
1253 	if ((dev_info.flow_type_rss_offloads |
1254 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1255 	    dev_info.flow_type_rss_offloads) {
1256 		RTE_ETHDEV_LOG(ERR,
1257 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1258 			port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1259 			dev_info.flow_type_rss_offloads);
1260 		ret = -EINVAL;
1261 		goto rollback;
1262 	}
1263 
1264 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1265 	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
1266 	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
1267 		RTE_ETHDEV_LOG(ERR,
1268 			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1269 			port_id,
1270 			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
1271 		ret = -EINVAL;
1272 		goto rollback;
1273 	}
1274 
1275 	/*
1276 	 * Setup new number of Rx/Tx queues and reconfigure device.
1277 	 */
1278 	diag = eth_dev_rx_queue_config(dev, nb_rx_q);
1279 	if (diag != 0) {
1280 		RTE_ETHDEV_LOG(ERR,
1281 			"Port%u eth_dev_rx_queue_config = %d\n",
1282 			port_id, diag);
1283 		ret = diag;
1284 		goto rollback;
1285 	}
1286 
1287 	diag = eth_dev_tx_queue_config(dev, nb_tx_q);
1288 	if (diag != 0) {
1289 		RTE_ETHDEV_LOG(ERR,
1290 			"Port%u eth_dev_tx_queue_config = %d\n",
1291 			port_id, diag);
1292 		eth_dev_rx_queue_config(dev, 0);
1293 		ret = diag;
1294 		goto rollback;
1295 	}
1296 
1297 	diag = (*dev->dev_ops->dev_configure)(dev);
1298 	if (diag != 0) {
1299 		RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1300 			port_id, diag);
1301 		ret = eth_err(port_id, diag);
1302 		goto reset_queues;
1303 	}
1304 
1305 	/* Initialize Rx profiling if enabled at compilation time. */
1306 	diag = __rte_eth_dev_profile_init(port_id, dev);
1307 	if (diag != 0) {
1308 		RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
1309 			port_id, diag);
1310 		ret = eth_err(port_id, diag);
1311 		goto reset_queues;
1312 	}
1313 
1314 	/* Validate Rx offloads. */
1315 	diag = eth_dev_validate_offloads(port_id,
1316 			dev_conf->rxmode.offloads,
1317 			dev->data->dev_conf.rxmode.offloads, "Rx",
1318 			rte_eth_dev_rx_offload_name);
1319 	if (diag != 0) {
1320 		ret = diag;
1321 		goto reset_queues;
1322 	}
1323 
1324 	/* Validate Tx offloads. */
1325 	diag = eth_dev_validate_offloads(port_id,
1326 			dev_conf->txmode.offloads,
1327 			dev->data->dev_conf.txmode.offloads, "Tx",
1328 			rte_eth_dev_tx_offload_name);
1329 	if (diag != 0) {
1330 		ret = diag;
1331 		goto reset_queues;
1332 	}
1333 
1334 	dev->data->dev_configured = 1;
1335 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1336 	return 0;
1337 reset_queues:
1338 	eth_dev_rx_queue_config(dev, 0);
1339 	eth_dev_tx_queue_config(dev, 0);
1340 rollback:
1341 	memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1342 	if (old_mtu != dev->data->mtu)
1343 		dev->data->mtu = old_mtu;
1344 
1345 	rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
1346 	return ret;
1347 }
1348 
1349 static void
1350 eth_dev_mac_restore(struct rte_eth_dev *dev,
1351 			struct rte_eth_dev_info *dev_info)
1352 {
1353 	struct rte_ether_addr *addr;
1354 	uint16_t i;
1355 	uint32_t pool = 0;
1356 	uint64_t pool_mask;
1357 
1358 	/* replay MAC address configuration including default MAC */
1359 	addr = &dev->data->mac_addrs[0];
1360 	if (*dev->dev_ops->mac_addr_set != NULL)
1361 		(*dev->dev_ops->mac_addr_set)(dev, addr);
1362 	else if (*dev->dev_ops->mac_addr_add != NULL)
1363 		(*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1364 
1365 	if (*dev->dev_ops->mac_addr_add != NULL) {
1366 		for (i = 1; i < dev_info->max_mac_addrs; i++) {
1367 			addr = &dev->data->mac_addrs[i];
1368 
1369 			/* skip zero address */
1370 			if (rte_is_zero_ether_addr(addr))
1371 				continue;
1372 
1373 			pool = 0;
1374 			pool_mask = dev->data->mac_pool_sel[i];
1375 
1376 			do {
1377 				if (pool_mask & UINT64_C(1))
1378 					(*dev->dev_ops->mac_addr_add)(dev,
1379 						addr, i, pool);
1380 				pool_mask >>= 1;
1381 				pool++;
1382 			} while (pool_mask);
1383 		}
1384 	}
1385 }
1386 
1387 static int
1388 eth_dev_config_restore(struct rte_eth_dev *dev,
1389 		struct rte_eth_dev_info *dev_info, uint16_t port_id)
1390 {
1391 	int ret;
1392 
1393 	if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1394 		eth_dev_mac_restore(dev, dev_info);
1395 
1396 	/* replay promiscuous configuration */
1397 	/*
1398 	 * use callbacks directly since we don't need port_id check and
1399 	 * would like to bypass the same value set
1400 	 */
1401 	if (rte_eth_promiscuous_get(port_id) == 1 &&
1402 	    *dev->dev_ops->promiscuous_enable != NULL) {
1403 		ret = eth_err(port_id,
1404 			      (*dev->dev_ops->promiscuous_enable)(dev));
1405 		if (ret != 0 && ret != -ENOTSUP) {
1406 			RTE_ETHDEV_LOG(ERR,
1407 				"Failed to enable promiscuous mode for device (port %u): %s\n",
1408 				port_id, rte_strerror(-ret));
1409 			return ret;
1410 		}
1411 	} else if (rte_eth_promiscuous_get(port_id) == 0 &&
1412 		   *dev->dev_ops->promiscuous_disable != NULL) {
1413 		ret = eth_err(port_id,
1414 			      (*dev->dev_ops->promiscuous_disable)(dev));
1415 		if (ret != 0 && ret != -ENOTSUP) {
1416 			RTE_ETHDEV_LOG(ERR,
1417 				"Failed to disable promiscuous mode for device (port %u): %s\n",
1418 				port_id, rte_strerror(-ret));
1419 			return ret;
1420 		}
1421 	}
1422 
1423 	/* replay all multicast configuration */
1424 	/*
1425 	 * use callbacks directly since we don't need port_id check and
1426 	 * would like to bypass the same value set
1427 	 */
1428 	if (rte_eth_allmulticast_get(port_id) == 1 &&
1429 	    *dev->dev_ops->allmulticast_enable != NULL) {
1430 		ret = eth_err(port_id,
1431 			      (*dev->dev_ops->allmulticast_enable)(dev));
1432 		if (ret != 0 && ret != -ENOTSUP) {
1433 			RTE_ETHDEV_LOG(ERR,
1434 				"Failed to enable allmulticast mode for device (port %u): %s\n",
1435 				port_id, rte_strerror(-ret));
1436 			return ret;
1437 		}
1438 	} else if (rte_eth_allmulticast_get(port_id) == 0 &&
1439 		   *dev->dev_ops->allmulticast_disable != NULL) {
1440 		ret = eth_err(port_id,
1441 			      (*dev->dev_ops->allmulticast_disable)(dev));
1442 		if (ret != 0 && ret != -ENOTSUP) {
1443 			RTE_ETHDEV_LOG(ERR,
1444 				"Failed to disable allmulticast mode for device (port %u): %s\n",
1445 				port_id, rte_strerror(-ret));
1446 			return ret;
1447 		}
1448 	}
1449 
1450 	return 0;
1451 }
1452 
1453 int
1454 rte_eth_dev_start(uint16_t port_id)
1455 {
1456 	struct rte_eth_dev *dev;
1457 	struct rte_eth_dev_info dev_info;
1458 	int diag;
1459 	int ret, ret_stop;
1460 
1461 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1462 	dev = &rte_eth_devices[port_id];
1463 
1464 	if (*dev->dev_ops->dev_start == NULL)
1465 		return -ENOTSUP;
1466 
1467 	if (dev->data->dev_configured == 0) {
1468 		RTE_ETHDEV_LOG(INFO,
1469 			"Device with port_id=%"PRIu16" is not configured.\n",
1470 			port_id);
1471 		return -EINVAL;
1472 	}
1473 
1474 	if (dev->data->dev_started != 0) {
1475 		RTE_ETHDEV_LOG(INFO,
1476 			"Device with port_id=%"PRIu16" already started\n",
1477 			port_id);
1478 		return 0;
1479 	}
1480 
1481 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1482 	if (ret != 0)
1483 		return ret;
1484 
1485 	/* Lets restore MAC now if device does not support live change */
1486 	if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1487 		eth_dev_mac_restore(dev, &dev_info);
1488 
1489 	diag = (*dev->dev_ops->dev_start)(dev);
1490 	if (diag == 0)
1491 		dev->data->dev_started = 1;
1492 	else
1493 		return eth_err(port_id, diag);
1494 
1495 	ret = eth_dev_config_restore(dev, &dev_info, port_id);
1496 	if (ret != 0) {
1497 		RTE_ETHDEV_LOG(ERR,
1498 			"Error during restoring configuration for device (port %u): %s\n",
1499 			port_id, rte_strerror(-ret));
1500 		ret_stop = rte_eth_dev_stop(port_id);
1501 		if (ret_stop != 0) {
1502 			RTE_ETHDEV_LOG(ERR,
1503 				"Failed to stop device (port %u): %s\n",
1504 				port_id, rte_strerror(-ret_stop));
1505 		}
1506 
1507 		return ret;
1508 	}
1509 
1510 	if (dev->data->dev_conf.intr_conf.lsc == 0) {
1511 		if (*dev->dev_ops->link_update == NULL)
1512 			return -ENOTSUP;
1513 		(*dev->dev_ops->link_update)(dev, 0);
1514 	}
1515 
1516 	/* expose selection of PMD fast-path functions */
1517 	eth_dev_fp_ops_setup(rte_eth_fp_ops + port_id, dev);
1518 
1519 	rte_ethdev_trace_start(port_id);
1520 	return 0;
1521 }
1522 
1523 int
1524 rte_eth_dev_stop(uint16_t port_id)
1525 {
1526 	struct rte_eth_dev *dev;
1527 	int ret;
1528 
1529 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1530 	dev = &rte_eth_devices[port_id];
1531 
1532 	if (*dev->dev_ops->dev_stop == NULL)
1533 		return -ENOTSUP;
1534 
1535 	if (dev->data->dev_started == 0) {
1536 		RTE_ETHDEV_LOG(INFO,
1537 			"Device with port_id=%"PRIu16" already stopped\n",
1538 			port_id);
1539 		return 0;
1540 	}
1541 
1542 	/* point fast-path functions to dummy ones */
1543 	eth_dev_fp_ops_reset(rte_eth_fp_ops + port_id);
1544 
1545 	ret = (*dev->dev_ops->dev_stop)(dev);
1546 	if (ret == 0)
1547 		dev->data->dev_started = 0;
1548 	rte_ethdev_trace_stop(port_id, ret);
1549 
1550 	return ret;
1551 }
1552 
1553 int
1554 rte_eth_dev_set_link_up(uint16_t port_id)
1555 {
1556 	struct rte_eth_dev *dev;
1557 
1558 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1559 	dev = &rte_eth_devices[port_id];
1560 
1561 	if (*dev->dev_ops->dev_set_link_up == NULL)
1562 		return -ENOTSUP;
1563 	return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1564 }
1565 
1566 int
1567 rte_eth_dev_set_link_down(uint16_t port_id)
1568 {
1569 	struct rte_eth_dev *dev;
1570 
1571 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1572 	dev = &rte_eth_devices[port_id];
1573 
1574 	if (*dev->dev_ops->dev_set_link_down == NULL)
1575 		return -ENOTSUP;
1576 	return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1577 }
1578 
1579 int
1580 rte_eth_dev_close(uint16_t port_id)
1581 {
1582 	struct rte_eth_dev *dev;
1583 	int firsterr, binerr;
1584 	int *lasterr = &firsterr;
1585 
1586 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1587 	dev = &rte_eth_devices[port_id];
1588 
1589 	/*
1590 	 * Secondary process needs to close device to release process private
1591 	 * resources. But secondary process should not be obliged to wait
1592 	 * for device stop before closing ethdev.
1593 	 */
1594 	if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
1595 			dev->data->dev_started) {
1596 		RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n",
1597 			       port_id);
1598 		return -EINVAL;
1599 	}
1600 
1601 	if (*dev->dev_ops->dev_close == NULL)
1602 		return -ENOTSUP;
1603 	*lasterr = (*dev->dev_ops->dev_close)(dev);
1604 	if (*lasterr != 0)
1605 		lasterr = &binerr;
1606 
1607 	rte_ethdev_trace_close(port_id);
1608 	*lasterr = rte_eth_dev_release_port(dev);
1609 
1610 	return firsterr;
1611 }
1612 
1613 int
1614 rte_eth_dev_reset(uint16_t port_id)
1615 {
1616 	struct rte_eth_dev *dev;
1617 	int ret;
1618 
1619 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1620 	dev = &rte_eth_devices[port_id];
1621 
1622 	if (*dev->dev_ops->dev_reset == NULL)
1623 		return -ENOTSUP;
1624 
1625 	ret = rte_eth_dev_stop(port_id);
1626 	if (ret != 0) {
1627 		RTE_ETHDEV_LOG(ERR,
1628 			"Failed to stop device (port %u) before reset: %s - ignore\n",
1629 			port_id, rte_strerror(-ret));
1630 	}
1631 	ret = dev->dev_ops->dev_reset(dev);
1632 
1633 	return eth_err(port_id, ret);
1634 }
1635 
1636 int
1637 rte_eth_dev_is_removed(uint16_t port_id)
1638 {
1639 	struct rte_eth_dev *dev;
1640 	int ret;
1641 
1642 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1643 	dev = &rte_eth_devices[port_id];
1644 
1645 	if (dev->state == RTE_ETH_DEV_REMOVED)
1646 		return 1;
1647 
1648 	if (*dev->dev_ops->is_removed == NULL)
1649 		return 0;
1650 
1651 	ret = dev->dev_ops->is_removed(dev);
1652 	if (ret != 0)
1653 		/* Device is physically removed. */
1654 		dev->state = RTE_ETH_DEV_REMOVED;
1655 
1656 	return ret;
1657 }
1658 
1659 static int
1660 rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset,
1661 			 uint16_t min_length)
1662 {
1663 	uint16_t data_room_size;
1664 
1665 	/*
1666 	 * Check the size of the mbuf data buffer, this value
1667 	 * must be provided in the private data of the memory pool.
1668 	 * First check that the memory pool(s) has a valid private data.
1669 	 */
1670 	if (mp->private_data_size <
1671 			sizeof(struct rte_pktmbuf_pool_private)) {
1672 		RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
1673 			mp->name, mp->private_data_size,
1674 			(unsigned int)
1675 			sizeof(struct rte_pktmbuf_pool_private));
1676 		return -ENOSPC;
1677 	}
1678 	data_room_size = rte_pktmbuf_data_room_size(mp);
1679 	if (data_room_size < offset + min_length) {
1680 		RTE_ETHDEV_LOG(ERR,
1681 			       "%s mbuf_data_room_size %u < %u (%u + %u)\n",
1682 			       mp->name, data_room_size,
1683 			       offset + min_length, offset, min_length);
1684 		return -EINVAL;
1685 	}
1686 	return 0;
1687 }
1688 
1689 static int
1690 eth_dev_buffer_split_get_supported_hdrs_helper(uint16_t port_id, uint32_t **ptypes)
1691 {
1692 	int cnt;
1693 
1694 	cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, NULL, 0);
1695 	if (cnt <= 0)
1696 		return cnt;
1697 
1698 	*ptypes = malloc(sizeof(uint32_t) * cnt);
1699 	if (*ptypes == NULL)
1700 		return -ENOMEM;
1701 
1702 	cnt = rte_eth_buffer_split_get_supported_hdr_ptypes(port_id, *ptypes, cnt);
1703 	if (cnt <= 0) {
1704 		free(*ptypes);
1705 		*ptypes = NULL;
1706 	}
1707 	return cnt;
1708 }
1709 
1710 static int
1711 rte_eth_rx_queue_check_split(uint16_t port_id,
1712 			const struct rte_eth_rxseg_split *rx_seg,
1713 			uint16_t n_seg, uint32_t *mbp_buf_size,
1714 			const struct rte_eth_dev_info *dev_info)
1715 {
1716 	const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa;
1717 	struct rte_mempool *mp_first;
1718 	uint32_t offset_mask;
1719 	uint16_t seg_idx;
1720 	int ret = 0;
1721 	int ptype_cnt;
1722 	uint32_t *ptypes;
1723 	uint32_t prev_proto_hdrs = RTE_PTYPE_UNKNOWN;
1724 	int i;
1725 
1726 	if (n_seg > seg_capa->max_nseg) {
1727 		RTE_ETHDEV_LOG(ERR,
1728 			       "Requested Rx segments %u exceed supported %u\n",
1729 			       n_seg, seg_capa->max_nseg);
1730 		return -EINVAL;
1731 	}
1732 	/*
1733 	 * Check the sizes and offsets against buffer sizes
1734 	 * for each segment specified in extended configuration.
1735 	 */
1736 	mp_first = rx_seg[0].mp;
1737 	offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1;
1738 
1739 	ptypes = NULL;
1740 	ptype_cnt = eth_dev_buffer_split_get_supported_hdrs_helper(port_id, &ptypes);
1741 
1742 	for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
1743 		struct rte_mempool *mpl = rx_seg[seg_idx].mp;
1744 		uint32_t length = rx_seg[seg_idx].length;
1745 		uint32_t offset = rx_seg[seg_idx].offset;
1746 		uint32_t proto_hdr = rx_seg[seg_idx].proto_hdr;
1747 
1748 		if (mpl == NULL) {
1749 			RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
1750 			ret = -EINVAL;
1751 			goto out;
1752 		}
1753 		if (seg_idx != 0 && mp_first != mpl &&
1754 		    seg_capa->multi_pools == 0) {
1755 			RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
1756 			ret = -ENOTSUP;
1757 			goto out;
1758 		}
1759 		if (offset != 0) {
1760 			if (seg_capa->offset_allowed == 0) {
1761 				RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
1762 				ret = -ENOTSUP;
1763 				goto out;
1764 			}
1765 			if (offset & offset_mask) {
1766 				RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
1767 					       offset,
1768 					       seg_capa->offset_align_log2);
1769 				ret = -EINVAL;
1770 				goto out;
1771 			}
1772 		}
1773 
1774 		offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM;
1775 		*mbp_buf_size = rte_pktmbuf_data_room_size(mpl);
1776 		if (proto_hdr != 0) {
1777 			/* Split based on protocol headers. */
1778 			if (length != 0) {
1779 				RTE_ETHDEV_LOG(ERR,
1780 					"Do not set length split and protocol split within a segment\n"
1781 					);
1782 				ret = -EINVAL;
1783 				goto out;
1784 			}
1785 			if ((proto_hdr & prev_proto_hdrs) != 0) {
1786 				RTE_ETHDEV_LOG(ERR,
1787 					"Repeat with previous protocol headers or proto-split after length-based split\n"
1788 					);
1789 				ret = -EINVAL;
1790 				goto out;
1791 			}
1792 			if (ptype_cnt <= 0) {
1793 				RTE_ETHDEV_LOG(ERR,
1794 					"Port %u failed to get supported buffer split header protocols\n",
1795 					port_id);
1796 				ret = -ENOTSUP;
1797 				goto out;
1798 			}
1799 			for (i = 0; i < ptype_cnt; i++) {
1800 				if ((prev_proto_hdrs | proto_hdr) == ptypes[i])
1801 					break;
1802 			}
1803 			if (i == ptype_cnt) {
1804 				RTE_ETHDEV_LOG(ERR,
1805 					"Requested Rx split header protocols 0x%x is not supported.\n",
1806 					proto_hdr);
1807 				ret = -EINVAL;
1808 				goto out;
1809 			}
1810 			prev_proto_hdrs |= proto_hdr;
1811 		} else {
1812 			/* Split at fixed length. */
1813 			length = length != 0 ? length : *mbp_buf_size;
1814 			prev_proto_hdrs = RTE_PTYPE_ALL_MASK;
1815 		}
1816 
1817 		ret = rte_eth_check_rx_mempool(mpl, offset, length);
1818 		if (ret != 0)
1819 			goto out;
1820 	}
1821 out:
1822 	free(ptypes);
1823 	return ret;
1824 }
1825 
1826 static int
1827 rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools,
1828 			       uint16_t n_mempools, uint32_t *min_buf_size,
1829 			       const struct rte_eth_dev_info *dev_info)
1830 {
1831 	uint16_t pool_idx;
1832 	int ret;
1833 
1834 	if (n_mempools > dev_info->max_rx_mempools) {
1835 		RTE_ETHDEV_LOG(ERR,
1836 			       "Too many Rx mempools %u vs maximum %u\n",
1837 			       n_mempools, dev_info->max_rx_mempools);
1838 		return -EINVAL;
1839 	}
1840 
1841 	for (pool_idx = 0; pool_idx < n_mempools; pool_idx++) {
1842 		struct rte_mempool *mp = rx_mempools[pool_idx];
1843 
1844 		if (mp == NULL) {
1845 			RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n");
1846 			return -EINVAL;
1847 		}
1848 
1849 		ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM,
1850 					       dev_info->min_rx_bufsize);
1851 		if (ret != 0)
1852 			return ret;
1853 
1854 		*min_buf_size = RTE_MIN(*min_buf_size,
1855 					rte_pktmbuf_data_room_size(mp));
1856 	}
1857 
1858 	return 0;
1859 }
1860 
1861 int
1862 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1863 		       uint16_t nb_rx_desc, unsigned int socket_id,
1864 		       const struct rte_eth_rxconf *rx_conf,
1865 		       struct rte_mempool *mp)
1866 {
1867 	int ret;
1868 	uint64_t rx_offloads;
1869 	uint32_t mbp_buf_size = UINT32_MAX;
1870 	struct rte_eth_dev *dev;
1871 	struct rte_eth_dev_info dev_info;
1872 	struct rte_eth_rxconf local_conf;
1873 
1874 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1875 	dev = &rte_eth_devices[port_id];
1876 
1877 	if (rx_queue_id >= dev->data->nb_rx_queues) {
1878 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
1879 		return -EINVAL;
1880 	}
1881 
1882 	if (*dev->dev_ops->rx_queue_setup == NULL)
1883 		return -ENOTSUP;
1884 
1885 	ret = rte_eth_dev_info_get(port_id, &dev_info);
1886 	if (ret != 0)
1887 		return ret;
1888 
1889 	rx_offloads = dev->data->dev_conf.rxmode.offloads;
1890 	if (rx_conf != NULL)
1891 		rx_offloads |= rx_conf->offloads;
1892 
1893 	/* Ensure that we have one and only one source of Rx buffers */
1894 	if ((mp != NULL) +
1895 	    (rx_conf != NULL && rx_conf->rx_nseg > 0) +
1896 	    (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) {
1897 		RTE_ETHDEV_LOG(ERR,
1898 			       "Ambiguous Rx mempools configuration\n");
1899 		return -EINVAL;
1900 	}
1901 
1902 	if (mp != NULL) {
1903 		/* Single pool configuration check. */
1904 		ret = rte_eth_check_rx_mempool(mp, RTE_PKTMBUF_HEADROOM,
1905 					       dev_info.min_rx_bufsize);
1906 		if (ret != 0)
1907 			return ret;
1908 
1909 		mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1910 	} else if (rx_conf != NULL && rx_conf->rx_nseg > 0) {
1911 		const struct rte_eth_rxseg_split *rx_seg;
1912 		uint16_t n_seg;
1913 
1914 		/* Extended multi-segment configuration check. */
1915 		if (rx_conf->rx_seg == NULL) {
1916 			RTE_ETHDEV_LOG(ERR,
1917 				       "Memory pool is null and no multi-segment configuration provided\n");
1918 			return -EINVAL;
1919 		}
1920 
1921 		rx_seg = (const struct rte_eth_rxseg_split *)rx_conf->rx_seg;
1922 		n_seg = rx_conf->rx_nseg;
1923 
1924 		if (rx_offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
1925 			ret = rte_eth_rx_queue_check_split(port_id, rx_seg, n_seg,
1926 							   &mbp_buf_size,
1927 							   &dev_info);
1928 			if (ret != 0)
1929 				return ret;
1930 		} else {
1931 			RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
1932 			return -EINVAL;
1933 		}
1934 	} else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) {
1935 		/* Extended multi-pool configuration check. */
1936 		if (rx_conf->rx_mempools == NULL) {
1937 			RTE_ETHDEV_LOG(ERR, "Memory pools array is null\n");
1938 			return -EINVAL;
1939 		}
1940 
1941 		ret = rte_eth_rx_queue_check_mempools(rx_conf->rx_mempools,
1942 						     rx_conf->rx_nmempool,
1943 						     &mbp_buf_size,
1944 						     &dev_info);
1945 		if (ret != 0)
1946 			return ret;
1947 	} else {
1948 		RTE_ETHDEV_LOG(ERR, "Missing Rx mempool configuration\n");
1949 		return -EINVAL;
1950 	}
1951 
1952 	/* Use default specified by driver, if nb_rx_desc is zero */
1953 	if (nb_rx_desc == 0) {
1954 		nb_rx_desc = dev_info.default_rxportconf.ring_size;
1955 		/* If driver default is also zero, fall back on EAL default */
1956 		if (nb_rx_desc == 0)
1957 			nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1958 	}
1959 
1960 	if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1961 			nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1962 			nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1963 
1964 		RTE_ETHDEV_LOG(ERR,
1965 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
1966 			nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1967 			dev_info.rx_desc_lim.nb_min,
1968 			dev_info.rx_desc_lim.nb_align);
1969 		return -EINVAL;
1970 	}
1971 
1972 	if (dev->data->dev_started &&
1973 		!(dev_info.dev_capa &
1974 			RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1975 		return -EBUSY;
1976 
1977 	if (dev->data->dev_started &&
1978 		(dev->data->rx_queue_state[rx_queue_id] !=
1979 			RTE_ETH_QUEUE_STATE_STOPPED))
1980 		return -EBUSY;
1981 
1982 	eth_dev_rxq_release(dev, rx_queue_id);
1983 
1984 	if (rx_conf == NULL)
1985 		rx_conf = &dev_info.default_rxconf;
1986 
1987 	local_conf = *rx_conf;
1988 
1989 	/*
1990 	 * If an offloading has already been enabled in
1991 	 * rte_eth_dev_configure(), it has been enabled on all queues,
1992 	 * so there is no need to enable it in this queue again.
1993 	 * The local_conf.offloads input to underlying PMD only carries
1994 	 * those offloadings which are only enabled on this queue and
1995 	 * not enabled on all queues.
1996 	 */
1997 	local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1998 
1999 	/*
2000 	 * New added offloadings for this queue are those not enabled in
2001 	 * rte_eth_dev_configure() and they must be per-queue type.
2002 	 * A pure per-port offloading can't be enabled on a queue while
2003 	 * disabled on another queue. A pure per-port offloading can't
2004 	 * be enabled for any queue as new added one if it hasn't been
2005 	 * enabled in rte_eth_dev_configure().
2006 	 */
2007 	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
2008 	     local_conf.offloads) {
2009 		RTE_ETHDEV_LOG(ERR,
2010 			"Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2011 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2012 			port_id, rx_queue_id, local_conf.offloads,
2013 			dev_info.rx_queue_offload_capa,
2014 			__func__);
2015 		return -EINVAL;
2016 	}
2017 
2018 	if (local_conf.share_group > 0 &&
2019 	    (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {
2020 		RTE_ETHDEV_LOG(ERR,
2021 			"Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n",
2022 			port_id, rx_queue_id, local_conf.share_group);
2023 		return -EINVAL;
2024 	}
2025 
2026 	/*
2027 	 * If LRO is enabled, check that the maximum aggregated packet
2028 	 * size is supported by the configured device.
2029 	 */
2030 	/* Get the real Ethernet overhead length */
2031 	if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
2032 		uint32_t overhead_len;
2033 		uint32_t max_rx_pktlen;
2034 		int ret;
2035 
2036 		overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
2037 				dev_info.max_mtu);
2038 		max_rx_pktlen = dev->data->mtu + overhead_len;
2039 		if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
2040 			dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
2041 		ret = eth_dev_check_lro_pkt_size(port_id,
2042 				dev->data->dev_conf.rxmode.max_lro_pkt_size,
2043 				max_rx_pktlen,
2044 				dev_info.max_lro_pkt_size);
2045 		if (ret != 0)
2046 			return ret;
2047 	}
2048 
2049 	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
2050 					      socket_id, &local_conf, mp);
2051 	if (!ret) {
2052 		if (!dev->data->min_rx_buf_size ||
2053 		    dev->data->min_rx_buf_size > mbp_buf_size)
2054 			dev->data->min_rx_buf_size = mbp_buf_size;
2055 	}
2056 
2057 	rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
2058 		rx_conf, ret);
2059 	return eth_err(port_id, ret);
2060 }
2061 
2062 int
2063 rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2064 			       uint16_t nb_rx_desc,
2065 			       const struct rte_eth_hairpin_conf *conf)
2066 {
2067 	int ret;
2068 	struct rte_eth_dev *dev;
2069 	struct rte_eth_hairpin_cap cap;
2070 	int i;
2071 	int count;
2072 
2073 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2074 	dev = &rte_eth_devices[port_id];
2075 
2076 	if (rx_queue_id >= dev->data->nb_rx_queues) {
2077 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
2078 		return -EINVAL;
2079 	}
2080 
2081 	if (conf == NULL) {
2082 		RTE_ETHDEV_LOG(ERR,
2083 			"Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
2084 			port_id);
2085 		return -EINVAL;
2086 	}
2087 
2088 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2089 	if (ret != 0)
2090 		return ret;
2091 	if (*dev->dev_ops->rx_hairpin_queue_setup == NULL)
2092 		return -ENOTSUP;
2093 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2094 	if (nb_rx_desc == 0)
2095 		nb_rx_desc = cap.max_nb_desc;
2096 	if (nb_rx_desc > cap.max_nb_desc) {
2097 		RTE_ETHDEV_LOG(ERR,
2098 			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
2099 			nb_rx_desc, cap.max_nb_desc);
2100 		return -EINVAL;
2101 	}
2102 	if (conf->peer_count > cap.max_rx_2_tx) {
2103 		RTE_ETHDEV_LOG(ERR,
2104 			"Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
2105 			conf->peer_count, cap.max_rx_2_tx);
2106 		return -EINVAL;
2107 	}
2108 	if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) {
2109 		RTE_ETHDEV_LOG(ERR,
2110 			"Attempt to use locked device memory for Rx queue, which is not supported");
2111 		return -EINVAL;
2112 	}
2113 	if (conf->use_rte_memory && !cap.rx_cap.rte_memory) {
2114 		RTE_ETHDEV_LOG(ERR,
2115 			"Attempt to use DPDK memory for Rx queue, which is not supported");
2116 		return -EINVAL;
2117 	}
2118 	if (conf->use_locked_device_memory && conf->use_rte_memory) {
2119 		RTE_ETHDEV_LOG(ERR,
2120 			"Attempt to use mutually exclusive memory settings for Rx queue");
2121 		return -EINVAL;
2122 	}
2123 	if (conf->force_memory &&
2124 	    !conf->use_locked_device_memory &&
2125 	    !conf->use_rte_memory) {
2126 		RTE_ETHDEV_LOG(ERR,
2127 			"Attempt to force Rx queue memory settings, but none is set");
2128 		return -EINVAL;
2129 	}
2130 	if (conf->peer_count == 0) {
2131 		RTE_ETHDEV_LOG(ERR,
2132 			"Invalid value for number of peers for Rx queue(=%u), should be: > 0",
2133 			conf->peer_count);
2134 		return -EINVAL;
2135 	}
2136 	for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
2137 	     cap.max_nb_queues != UINT16_MAX; i++) {
2138 		if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
2139 			count++;
2140 	}
2141 	if (count > cap.max_nb_queues) {
2142 		RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
2143 		cap.max_nb_queues);
2144 		return -EINVAL;
2145 	}
2146 	if (dev->data->dev_started)
2147 		return -EBUSY;
2148 	eth_dev_rxq_release(dev, rx_queue_id);
2149 	ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2150 						      nb_rx_desc, conf);
2151 	if (ret == 0)
2152 		dev->data->rx_queue_state[rx_queue_id] =
2153 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2154 	return eth_err(port_id, ret);
2155 }
2156 
2157 int
2158 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2159 		       uint16_t nb_tx_desc, unsigned int socket_id,
2160 		       const struct rte_eth_txconf *tx_conf)
2161 {
2162 	struct rte_eth_dev *dev;
2163 	struct rte_eth_dev_info dev_info;
2164 	struct rte_eth_txconf local_conf;
2165 	int ret;
2166 
2167 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2168 	dev = &rte_eth_devices[port_id];
2169 
2170 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2171 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2172 		return -EINVAL;
2173 	}
2174 
2175 	if (*dev->dev_ops->tx_queue_setup == NULL)
2176 		return -ENOTSUP;
2177 
2178 	ret = rte_eth_dev_info_get(port_id, &dev_info);
2179 	if (ret != 0)
2180 		return ret;
2181 
2182 	/* Use default specified by driver, if nb_tx_desc is zero */
2183 	if (nb_tx_desc == 0) {
2184 		nb_tx_desc = dev_info.default_txportconf.ring_size;
2185 		/* If driver default is zero, fall back on EAL default */
2186 		if (nb_tx_desc == 0)
2187 			nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2188 	}
2189 	if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2190 	    nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2191 	    nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2192 		RTE_ETHDEV_LOG(ERR,
2193 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
2194 			nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2195 			dev_info.tx_desc_lim.nb_min,
2196 			dev_info.tx_desc_lim.nb_align);
2197 		return -EINVAL;
2198 	}
2199 
2200 	if (dev->data->dev_started &&
2201 		!(dev_info.dev_capa &
2202 			RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2203 		return -EBUSY;
2204 
2205 	if (dev->data->dev_started &&
2206 		(dev->data->tx_queue_state[tx_queue_id] !=
2207 			RTE_ETH_QUEUE_STATE_STOPPED))
2208 		return -EBUSY;
2209 
2210 	eth_dev_txq_release(dev, tx_queue_id);
2211 
2212 	if (tx_conf == NULL)
2213 		tx_conf = &dev_info.default_txconf;
2214 
2215 	local_conf = *tx_conf;
2216 
2217 	/*
2218 	 * If an offloading has already been enabled in
2219 	 * rte_eth_dev_configure(), it has been enabled on all queues,
2220 	 * so there is no need to enable it in this queue again.
2221 	 * The local_conf.offloads input to underlying PMD only carries
2222 	 * those offloadings which are only enabled on this queue and
2223 	 * not enabled on all queues.
2224 	 */
2225 	local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2226 
2227 	/*
2228 	 * New added offloadings for this queue are those not enabled in
2229 	 * rte_eth_dev_configure() and they must be per-queue type.
2230 	 * A pure per-port offloading can't be enabled on a queue while
2231 	 * disabled on another queue. A pure per-port offloading can't
2232 	 * be enabled for any queue as new added one if it hasn't been
2233 	 * enabled in rte_eth_dev_configure().
2234 	 */
2235 	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2236 	     local_conf.offloads) {
2237 		RTE_ETHDEV_LOG(ERR,
2238 			"Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
2239 			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
2240 			port_id, tx_queue_id, local_conf.offloads,
2241 			dev_info.tx_queue_offload_capa,
2242 			__func__);
2243 		return -EINVAL;
2244 	}
2245 
2246 	rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
2247 	return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2248 		       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2249 }
2250 
2251 int
2252 rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2253 			       uint16_t nb_tx_desc,
2254 			       const struct rte_eth_hairpin_conf *conf)
2255 {
2256 	struct rte_eth_dev *dev;
2257 	struct rte_eth_hairpin_cap cap;
2258 	int i;
2259 	int count;
2260 	int ret;
2261 
2262 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2263 	dev = &rte_eth_devices[port_id];
2264 
2265 	if (tx_queue_id >= dev->data->nb_tx_queues) {
2266 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
2267 		return -EINVAL;
2268 	}
2269 
2270 	if (conf == NULL) {
2271 		RTE_ETHDEV_LOG(ERR,
2272 			"Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
2273 			port_id);
2274 		return -EINVAL;
2275 	}
2276 
2277 	ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2278 	if (ret != 0)
2279 		return ret;
2280 	if (*dev->dev_ops->tx_hairpin_queue_setup == NULL)
2281 		return -ENOTSUP;
2282 	/* if nb_rx_desc is zero use max number of desc from the driver. */
2283 	if (nb_tx_desc == 0)
2284 		nb_tx_desc = cap.max_nb_desc;
2285 	if (nb_tx_desc > cap.max_nb_desc) {
2286 		RTE_ETHDEV_LOG(ERR,
2287 			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2288 			nb_tx_desc, cap.max_nb_desc);
2289 		return -EINVAL;
2290 	}
2291 	if (conf->peer_count > cap.max_tx_2_rx) {
2292 		RTE_ETHDEV_LOG(ERR,
2293 			"Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
2294 			conf->peer_count, cap.max_tx_2_rx);
2295 		return -EINVAL;
2296 	}
2297 	if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) {
2298 		RTE_ETHDEV_LOG(ERR,
2299 			"Attempt to use locked device memory for Tx queue, which is not supported");
2300 		return -EINVAL;
2301 	}
2302 	if (conf->use_rte_memory && !cap.tx_cap.rte_memory) {
2303 		RTE_ETHDEV_LOG(ERR,
2304 			"Attempt to use DPDK memory for Tx queue, which is not supported");
2305 		return -EINVAL;
2306 	}
2307 	if (conf->use_locked_device_memory && conf->use_rte_memory) {
2308 		RTE_ETHDEV_LOG(ERR,
2309 			"Attempt to use mutually exclusive memory settings for Tx queue");
2310 		return -EINVAL;
2311 	}
2312 	if (conf->force_memory &&
2313 	    !conf->use_locked_device_memory &&
2314 	    !conf->use_rte_memory) {
2315 		RTE_ETHDEV_LOG(ERR,
2316 			"Attempt to force Tx queue memory settings, but none is set");
2317 		return -EINVAL;
2318 	}
2319 	if (conf->peer_count == 0) {
2320 		RTE_ETHDEV_LOG(ERR,
2321 			"Invalid value for number of peers for Tx queue(=%u), should be: > 0",
2322 			conf->peer_count);
2323 		return -EINVAL;
2324 	}
2325 	for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2326 	     cap.max_nb_queues != UINT16_MAX; i++) {
2327 		if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2328 			count++;
2329 	}
2330 	if (count > cap.max_nb_queues) {
2331 		RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2332 		cap.max_nb_queues);
2333 		return -EINVAL;
2334 	}
2335 	if (dev->data->dev_started)
2336 		return -EBUSY;
2337 	eth_dev_txq_release(dev, tx_queue_id);
2338 	ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2339 		(dev, tx_queue_id, nb_tx_desc, conf);
2340 	if (ret == 0)
2341 		dev->data->tx_queue_state[tx_queue_id] =
2342 			RTE_ETH_QUEUE_STATE_HAIRPIN;
2343 	return eth_err(port_id, ret);
2344 }
2345 
2346 int
2347 rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
2348 {
2349 	struct rte_eth_dev *dev;
2350 	int ret;
2351 
2352 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2353 	dev = &rte_eth_devices[tx_port];
2354 
2355 	if (dev->data->dev_started == 0) {
2356 		RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
2357 		return -EBUSY;
2358 	}
2359 
2360 	if (*dev->dev_ops->hairpin_bind == NULL)
2361 		return -ENOTSUP;
2362 	ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
2363 	if (ret != 0)
2364 		RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
2365 			       " to Rx %d (%d - all ports)\n",
2366 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2367 
2368 	return ret;
2369 }
2370 
2371 int
2372 rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
2373 {
2374 	struct rte_eth_dev *dev;
2375 	int ret;
2376 
2377 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port, -ENODEV);
2378 	dev = &rte_eth_devices[tx_port];
2379 
2380 	if (dev->data->dev_started == 0) {
2381 		RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
2382 		return -EBUSY;
2383 	}
2384 
2385 	if (*dev->dev_ops->hairpin_unbind == NULL)
2386 		return -ENOTSUP;
2387 	ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
2388 	if (ret != 0)
2389 		RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
2390 			       " from Rx %d (%d - all ports)\n",
2391 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
2392 
2393 	return ret;
2394 }
2395 
2396 int
2397 rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2398 			       size_t len, uint32_t direction)
2399 {
2400 	struct rte_eth_dev *dev;
2401 	int ret;
2402 
2403 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2404 	dev = &rte_eth_devices[port_id];
2405 
2406 	if (peer_ports == NULL) {
2407 		RTE_ETHDEV_LOG(ERR,
2408 			"Cannot get ethdev port %u hairpin peer ports to NULL\n",
2409 			port_id);
2410 		return -EINVAL;
2411 	}
2412 
2413 	if (len == 0) {
2414 		RTE_ETHDEV_LOG(ERR,
2415 			"Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
2416 			port_id);
2417 		return -EINVAL;
2418 	}
2419 
2420 	if (*dev->dev_ops->hairpin_get_peer_ports == NULL)
2421 		return -ENOTSUP;
2422 
2423 	ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
2424 						      len, direction);
2425 	if (ret < 0)
2426 		RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
2427 			       port_id, direction ? "Rx" : "Tx");
2428 
2429 	return ret;
2430 }
2431 
2432 void
2433 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2434 		void *userdata __rte_unused)
2435 {
2436 	rte_pktmbuf_free_bulk(pkts, unsent);
2437 }
2438 
2439 void
2440 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2441 		void *userdata)
2442 {
2443 	uint64_t *count = userdata;
2444 
2445 	rte_pktmbuf_free_bulk(pkts, unsent);
2446 	*count += unsent;
2447 }
2448 
2449 int
2450 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2451 		buffer_tx_error_fn cbfn, void *userdata)
2452 {
2453 	if (buffer == NULL) {
2454 		RTE_ETHDEV_LOG(ERR,
2455 			"Cannot set Tx buffer error callback to NULL buffer\n");
2456 		return -EINVAL;
2457 	}
2458 
2459 	buffer->error_callback = cbfn;
2460 	buffer->error_userdata = userdata;
2461 	return 0;
2462 }
2463 
2464 int
2465 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2466 {
2467 	int ret = 0;
2468 
2469 	if (buffer == NULL) {
2470 		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
2471 		return -EINVAL;
2472 	}
2473 
2474 	buffer->size = size;
2475 	if (buffer->error_callback == NULL) {
2476 		ret = rte_eth_tx_buffer_set_err_callback(
2477 			buffer, rte_eth_tx_buffer_drop_callback, NULL);
2478 	}
2479 
2480 	return ret;
2481 }
2482 
2483 int
2484 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2485 {
2486 	struct rte_eth_dev *dev;
2487 	int ret;
2488 
2489 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2490 	dev = &rte_eth_devices[port_id];
2491 
2492 	if (*dev->dev_ops->tx_done_cleanup == NULL)
2493 		return -ENOTSUP;
2494 
2495 	/* Call driver to free pending mbufs. */
2496 	ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2497 					       free_cnt);
2498 	return eth_err(port_id, ret);
2499 }
2500 
2501 int
2502 rte_eth_promiscuous_enable(uint16_t port_id)
2503 {
2504 	struct rte_eth_dev *dev;
2505 	int diag = 0;
2506 
2507 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2508 	dev = &rte_eth_devices[port_id];
2509 
2510 	if (dev->data->promiscuous == 1)
2511 		return 0;
2512 
2513 	if (*dev->dev_ops->promiscuous_enable == NULL)
2514 		return -ENOTSUP;
2515 
2516 	diag = (*dev->dev_ops->promiscuous_enable)(dev);
2517 	dev->data->promiscuous = (diag == 0) ? 1 : 0;
2518 
2519 	return eth_err(port_id, diag);
2520 }
2521 
2522 int
2523 rte_eth_promiscuous_disable(uint16_t port_id)
2524 {
2525 	struct rte_eth_dev *dev;
2526 	int diag = 0;
2527 
2528 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2529 	dev = &rte_eth_devices[port_id];
2530 
2531 	if (dev->data->promiscuous == 0)
2532 		return 0;
2533 
2534 	if (*dev->dev_ops->promiscuous_disable == NULL)
2535 		return -ENOTSUP;
2536 
2537 	dev->data->promiscuous = 0;
2538 	diag = (*dev->dev_ops->promiscuous_disable)(dev);
2539 	if (diag != 0)
2540 		dev->data->promiscuous = 1;
2541 
2542 	return eth_err(port_id, diag);
2543 }
2544 
2545 int
2546 rte_eth_promiscuous_get(uint16_t port_id)
2547 {
2548 	struct rte_eth_dev *dev;
2549 
2550 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2551 	dev = &rte_eth_devices[port_id];
2552 
2553 	return dev->data->promiscuous;
2554 }
2555 
2556 int
2557 rte_eth_allmulticast_enable(uint16_t port_id)
2558 {
2559 	struct rte_eth_dev *dev;
2560 	int diag;
2561 
2562 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2563 	dev = &rte_eth_devices[port_id];
2564 
2565 	if (dev->data->all_multicast == 1)
2566 		return 0;
2567 
2568 	if (*dev->dev_ops->allmulticast_enable == NULL)
2569 		return -ENOTSUP;
2570 	diag = (*dev->dev_ops->allmulticast_enable)(dev);
2571 	dev->data->all_multicast = (diag == 0) ? 1 : 0;
2572 
2573 	return eth_err(port_id, diag);
2574 }
2575 
2576 int
2577 rte_eth_allmulticast_disable(uint16_t port_id)
2578 {
2579 	struct rte_eth_dev *dev;
2580 	int diag;
2581 
2582 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2583 	dev = &rte_eth_devices[port_id];
2584 
2585 	if (dev->data->all_multicast == 0)
2586 		return 0;
2587 
2588 	if (*dev->dev_ops->allmulticast_disable == NULL)
2589 		return -ENOTSUP;
2590 	dev->data->all_multicast = 0;
2591 	diag = (*dev->dev_ops->allmulticast_disable)(dev);
2592 	if (diag != 0)
2593 		dev->data->all_multicast = 1;
2594 
2595 	return eth_err(port_id, diag);
2596 }
2597 
2598 int
2599 rte_eth_allmulticast_get(uint16_t port_id)
2600 {
2601 	struct rte_eth_dev *dev;
2602 
2603 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2604 	dev = &rte_eth_devices[port_id];
2605 
2606 	return dev->data->all_multicast;
2607 }
2608 
2609 int
2610 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2611 {
2612 	struct rte_eth_dev *dev;
2613 
2614 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2615 	dev = &rte_eth_devices[port_id];
2616 
2617 	if (eth_link == NULL) {
2618 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2619 			port_id);
2620 		return -EINVAL;
2621 	}
2622 
2623 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2624 		rte_eth_linkstatus_get(dev, eth_link);
2625 	else {
2626 		if (*dev->dev_ops->link_update == NULL)
2627 			return -ENOTSUP;
2628 		(*dev->dev_ops->link_update)(dev, 1);
2629 		*eth_link = dev->data->dev_link;
2630 	}
2631 
2632 	return 0;
2633 }
2634 
2635 int
2636 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2637 {
2638 	struct rte_eth_dev *dev;
2639 
2640 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2641 	dev = &rte_eth_devices[port_id];
2642 
2643 	if (eth_link == NULL) {
2644 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
2645 			port_id);
2646 		return -EINVAL;
2647 	}
2648 
2649 	if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started)
2650 		rte_eth_linkstatus_get(dev, eth_link);
2651 	else {
2652 		if (*dev->dev_ops->link_update == NULL)
2653 			return -ENOTSUP;
2654 		(*dev->dev_ops->link_update)(dev, 0);
2655 		*eth_link = dev->data->dev_link;
2656 	}
2657 
2658 	return 0;
2659 }
2660 
2661 const char *
2662 rte_eth_link_speed_to_str(uint32_t link_speed)
2663 {
2664 	switch (link_speed) {
2665 	case RTE_ETH_SPEED_NUM_NONE: return "None";
2666 	case RTE_ETH_SPEED_NUM_10M:  return "10 Mbps";
2667 	case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
2668 	case RTE_ETH_SPEED_NUM_1G:   return "1 Gbps";
2669 	case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
2670 	case RTE_ETH_SPEED_NUM_5G:   return "5 Gbps";
2671 	case RTE_ETH_SPEED_NUM_10G:  return "10 Gbps";
2672 	case RTE_ETH_SPEED_NUM_20G:  return "20 Gbps";
2673 	case RTE_ETH_SPEED_NUM_25G:  return "25 Gbps";
2674 	case RTE_ETH_SPEED_NUM_40G:  return "40 Gbps";
2675 	case RTE_ETH_SPEED_NUM_50G:  return "50 Gbps";
2676 	case RTE_ETH_SPEED_NUM_56G:  return "56 Gbps";
2677 	case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
2678 	case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
2679 	case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
2680 	default: return "Invalid";
2681 	}
2682 }
2683 
2684 int
2685 rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
2686 {
2687 	if (str == NULL) {
2688 		RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
2689 		return -EINVAL;
2690 	}
2691 
2692 	if (len == 0) {
2693 		RTE_ETHDEV_LOG(ERR,
2694 			"Cannot convert link to string with zero size\n");
2695 		return -EINVAL;
2696 	}
2697 
2698 	if (eth_link == NULL) {
2699 		RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
2700 		return -EINVAL;
2701 	}
2702 
2703 	if (eth_link->link_status == RTE_ETH_LINK_DOWN)
2704 		return snprintf(str, len, "Link down");
2705 	else
2706 		return snprintf(str, len, "Link up at %s %s %s",
2707 			rte_eth_link_speed_to_str(eth_link->link_speed),
2708 			(eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
2709 			"FDX" : "HDX",
2710 			(eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
2711 			"Autoneg" : "Fixed");
2712 }
2713 
2714 int
2715 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2716 {
2717 	struct rte_eth_dev *dev;
2718 
2719 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2720 	dev = &rte_eth_devices[port_id];
2721 
2722 	if (stats == NULL) {
2723 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
2724 			port_id);
2725 		return -EINVAL;
2726 	}
2727 
2728 	memset(stats, 0, sizeof(*stats));
2729 
2730 	if (*dev->dev_ops->stats_get == NULL)
2731 		return -ENOTSUP;
2732 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2733 	return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2734 }
2735 
2736 int
2737 rte_eth_stats_reset(uint16_t port_id)
2738 {
2739 	struct rte_eth_dev *dev;
2740 	int ret;
2741 
2742 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2743 	dev = &rte_eth_devices[port_id];
2744 
2745 	if (*dev->dev_ops->stats_reset == NULL)
2746 		return -ENOTSUP;
2747 	ret = (*dev->dev_ops->stats_reset)(dev);
2748 	if (ret != 0)
2749 		return eth_err(port_id, ret);
2750 
2751 	dev->data->rx_mbuf_alloc_failed = 0;
2752 
2753 	return 0;
2754 }
2755 
2756 static inline int
2757 eth_dev_get_xstats_basic_count(struct rte_eth_dev *dev)
2758 {
2759 	uint16_t nb_rxqs, nb_txqs;
2760 	int count;
2761 
2762 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2763 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2764 
2765 	count = RTE_NB_STATS;
2766 	if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
2767 		count += nb_rxqs * RTE_NB_RXQ_STATS;
2768 		count += nb_txqs * RTE_NB_TXQ_STATS;
2769 	}
2770 
2771 	return count;
2772 }
2773 
2774 static int
2775 eth_dev_get_xstats_count(uint16_t port_id)
2776 {
2777 	struct rte_eth_dev *dev;
2778 	int count;
2779 
2780 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2781 	dev = &rte_eth_devices[port_id];
2782 	if (dev->dev_ops->xstats_get_names != NULL) {
2783 		count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2784 		if (count < 0)
2785 			return eth_err(port_id, count);
2786 	} else
2787 		count = 0;
2788 
2789 
2790 	count += eth_dev_get_xstats_basic_count(dev);
2791 
2792 	return count;
2793 }
2794 
2795 int
2796 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2797 		uint64_t *id)
2798 {
2799 	int cnt_xstats, idx_xstat;
2800 
2801 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2802 
2803 	if (xstat_name == NULL) {
2804 		RTE_ETHDEV_LOG(ERR,
2805 			"Cannot get ethdev port %u xstats ID from NULL xstat name\n",
2806 			port_id);
2807 		return -ENOMEM;
2808 	}
2809 
2810 	if (id == NULL) {
2811 		RTE_ETHDEV_LOG(ERR,
2812 			"Cannot get ethdev port %u xstats ID to NULL\n",
2813 			port_id);
2814 		return -ENOMEM;
2815 	}
2816 
2817 	/* Get count */
2818 	cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2819 	if (cnt_xstats  < 0) {
2820 		RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2821 		return -ENODEV;
2822 	}
2823 
2824 	/* Get id-name lookup table */
2825 	struct rte_eth_xstat_name xstats_names[cnt_xstats];
2826 
2827 	if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2828 			port_id, xstats_names, cnt_xstats, NULL)) {
2829 		RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2830 		return -1;
2831 	}
2832 
2833 	for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2834 		if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2835 			*id = idx_xstat;
2836 			return 0;
2837 		};
2838 	}
2839 
2840 	return -EINVAL;
2841 }
2842 
2843 /* retrieve basic stats names */
2844 static int
2845 eth_basic_stats_get_names(struct rte_eth_dev *dev,
2846 	struct rte_eth_xstat_name *xstats_names)
2847 {
2848 	int cnt_used_entries = 0;
2849 	uint32_t idx, id_queue;
2850 	uint16_t num_q;
2851 
2852 	for (idx = 0; idx < RTE_NB_STATS; idx++) {
2853 		strlcpy(xstats_names[cnt_used_entries].name,
2854 			eth_dev_stats_strings[idx].name,
2855 			sizeof(xstats_names[0].name));
2856 		cnt_used_entries++;
2857 	}
2858 
2859 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
2860 		return cnt_used_entries;
2861 
2862 	num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2863 	for (id_queue = 0; id_queue < num_q; id_queue++) {
2864 		for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2865 			snprintf(xstats_names[cnt_used_entries].name,
2866 				sizeof(xstats_names[0].name),
2867 				"rx_q%u_%s",
2868 				id_queue, eth_dev_rxq_stats_strings[idx].name);
2869 			cnt_used_entries++;
2870 		}
2871 
2872 	}
2873 	num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2874 	for (id_queue = 0; id_queue < num_q; id_queue++) {
2875 		for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2876 			snprintf(xstats_names[cnt_used_entries].name,
2877 				sizeof(xstats_names[0].name),
2878 				"tx_q%u_%s",
2879 				id_queue, eth_dev_txq_stats_strings[idx].name);
2880 			cnt_used_entries++;
2881 		}
2882 	}
2883 	return cnt_used_entries;
2884 }
2885 
2886 /* retrieve ethdev extended statistics names */
2887 int
2888 rte_eth_xstats_get_names_by_id(uint16_t port_id,
2889 	struct rte_eth_xstat_name *xstats_names, unsigned int size,
2890 	uint64_t *ids)
2891 {
2892 	struct rte_eth_xstat_name *xstats_names_copy;
2893 	unsigned int no_basic_stat_requested = 1;
2894 	unsigned int no_ext_stat_requested = 1;
2895 	unsigned int expected_entries;
2896 	unsigned int basic_count;
2897 	struct rte_eth_dev *dev;
2898 	unsigned int i;
2899 	int ret;
2900 
2901 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2902 	dev = &rte_eth_devices[port_id];
2903 
2904 	basic_count = eth_dev_get_xstats_basic_count(dev);
2905 	ret = eth_dev_get_xstats_count(port_id);
2906 	if (ret < 0)
2907 		return ret;
2908 	expected_entries = (unsigned int)ret;
2909 
2910 	/* Return max number of stats if no ids given */
2911 	if (!ids) {
2912 		if (!xstats_names)
2913 			return expected_entries;
2914 		else if (xstats_names && size < expected_entries)
2915 			return expected_entries;
2916 	}
2917 
2918 	if (ids && !xstats_names)
2919 		return -EINVAL;
2920 
2921 	if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2922 		uint64_t ids_copy[size];
2923 
2924 		for (i = 0; i < size; i++) {
2925 			if (ids[i] < basic_count) {
2926 				no_basic_stat_requested = 0;
2927 				break;
2928 			}
2929 
2930 			/*
2931 			 * Convert ids to xstats ids that PMD knows.
2932 			 * ids known by user are basic + extended stats.
2933 			 */
2934 			ids_copy[i] = ids[i] - basic_count;
2935 		}
2936 
2937 		if (no_basic_stat_requested)
2938 			return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2939 					ids_copy, xstats_names, size);
2940 	}
2941 
2942 	/* Retrieve all stats */
2943 	if (!ids) {
2944 		int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2945 				expected_entries);
2946 		if (num_stats < 0 || num_stats > (int)expected_entries)
2947 			return num_stats;
2948 		else
2949 			return expected_entries;
2950 	}
2951 
2952 	xstats_names_copy = calloc(expected_entries,
2953 		sizeof(struct rte_eth_xstat_name));
2954 
2955 	if (!xstats_names_copy) {
2956 		RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2957 		return -ENOMEM;
2958 	}
2959 
2960 	if (ids) {
2961 		for (i = 0; i < size; i++) {
2962 			if (ids[i] >= basic_count) {
2963 				no_ext_stat_requested = 0;
2964 				break;
2965 			}
2966 		}
2967 	}
2968 
2969 	/* Fill xstats_names_copy structure */
2970 	if (ids && no_ext_stat_requested) {
2971 		eth_basic_stats_get_names(dev, xstats_names_copy);
2972 	} else {
2973 		ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2974 			expected_entries);
2975 		if (ret < 0) {
2976 			free(xstats_names_copy);
2977 			return ret;
2978 		}
2979 	}
2980 
2981 	/* Filter stats */
2982 	for (i = 0; i < size; i++) {
2983 		if (ids[i] >= expected_entries) {
2984 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2985 			free(xstats_names_copy);
2986 			return -1;
2987 		}
2988 		xstats_names[i] = xstats_names_copy[ids[i]];
2989 	}
2990 
2991 	free(xstats_names_copy);
2992 	return size;
2993 }
2994 
2995 int
2996 rte_eth_xstats_get_names(uint16_t port_id,
2997 	struct rte_eth_xstat_name *xstats_names,
2998 	unsigned int size)
2999 {
3000 	struct rte_eth_dev *dev;
3001 	int cnt_used_entries;
3002 	int cnt_expected_entries;
3003 	int cnt_driver_entries;
3004 
3005 	cnt_expected_entries = eth_dev_get_xstats_count(port_id);
3006 	if (xstats_names == NULL || cnt_expected_entries < 0 ||
3007 			(int)size < cnt_expected_entries)
3008 		return cnt_expected_entries;
3009 
3010 	/* port_id checked in eth_dev_get_xstats_count() */
3011 	dev = &rte_eth_devices[port_id];
3012 
3013 	cnt_used_entries = eth_basic_stats_get_names(dev, xstats_names);
3014 
3015 	if (dev->dev_ops->xstats_get_names != NULL) {
3016 		/* If there are any driver-specific xstats, append them
3017 		 * to end of list.
3018 		 */
3019 		cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
3020 			dev,
3021 			xstats_names + cnt_used_entries,
3022 			size - cnt_used_entries);
3023 		if (cnt_driver_entries < 0)
3024 			return eth_err(port_id, cnt_driver_entries);
3025 		cnt_used_entries += cnt_driver_entries;
3026 	}
3027 
3028 	return cnt_used_entries;
3029 }
3030 
3031 
3032 static int
3033 eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
3034 {
3035 	struct rte_eth_dev *dev;
3036 	struct rte_eth_stats eth_stats;
3037 	unsigned int count = 0, i, q;
3038 	uint64_t val, *stats_ptr;
3039 	uint16_t nb_rxqs, nb_txqs;
3040 	int ret;
3041 
3042 	ret = rte_eth_stats_get(port_id, &eth_stats);
3043 	if (ret < 0)
3044 		return ret;
3045 
3046 	dev = &rte_eth_devices[port_id];
3047 
3048 	nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3049 	nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
3050 
3051 	/* global stats */
3052 	for (i = 0; i < RTE_NB_STATS; i++) {
3053 		stats_ptr = RTE_PTR_ADD(&eth_stats,
3054 					eth_dev_stats_strings[i].offset);
3055 		val = *stats_ptr;
3056 		xstats[count++].value = val;
3057 	}
3058 
3059 	if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
3060 		return count;
3061 
3062 	/* per-rxq stats */
3063 	for (q = 0; q < nb_rxqs; q++) {
3064 		for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
3065 			stats_ptr = RTE_PTR_ADD(&eth_stats,
3066 					eth_dev_rxq_stats_strings[i].offset +
3067 					q * sizeof(uint64_t));
3068 			val = *stats_ptr;
3069 			xstats[count++].value = val;
3070 		}
3071 	}
3072 
3073 	/* per-txq stats */
3074 	for (q = 0; q < nb_txqs; q++) {
3075 		for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
3076 			stats_ptr = RTE_PTR_ADD(&eth_stats,
3077 					eth_dev_txq_stats_strings[i].offset +
3078 					q * sizeof(uint64_t));
3079 			val = *stats_ptr;
3080 			xstats[count++].value = val;
3081 		}
3082 	}
3083 	return count;
3084 }
3085 
3086 /* retrieve ethdev extended statistics */
3087 int
3088 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3089 			 uint64_t *values, unsigned int size)
3090 {
3091 	unsigned int no_basic_stat_requested = 1;
3092 	unsigned int no_ext_stat_requested = 1;
3093 	unsigned int num_xstats_filled;
3094 	unsigned int basic_count;
3095 	uint16_t expected_entries;
3096 	struct rte_eth_dev *dev;
3097 	unsigned int i;
3098 	int ret;
3099 
3100 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3101 	dev = &rte_eth_devices[port_id];
3102 
3103 	ret = eth_dev_get_xstats_count(port_id);
3104 	if (ret < 0)
3105 		return ret;
3106 	expected_entries = (uint16_t)ret;
3107 	struct rte_eth_xstat xstats[expected_entries];
3108 	basic_count = eth_dev_get_xstats_basic_count(dev);
3109 
3110 	/* Return max number of stats if no ids given */
3111 	if (!ids) {
3112 		if (!values)
3113 			return expected_entries;
3114 		else if (values && size < expected_entries)
3115 			return expected_entries;
3116 	}
3117 
3118 	if (ids && !values)
3119 		return -EINVAL;
3120 
3121 	if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
3122 		unsigned int basic_count = eth_dev_get_xstats_basic_count(dev);
3123 		uint64_t ids_copy[size];
3124 
3125 		for (i = 0; i < size; i++) {
3126 			if (ids[i] < basic_count) {
3127 				no_basic_stat_requested = 0;
3128 				break;
3129 			}
3130 
3131 			/*
3132 			 * Convert ids to xstats ids that PMD knows.
3133 			 * ids known by user are basic + extended stats.
3134 			 */
3135 			ids_copy[i] = ids[i] - basic_count;
3136 		}
3137 
3138 		if (no_basic_stat_requested)
3139 			return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
3140 					values, size);
3141 	}
3142 
3143 	if (ids) {
3144 		for (i = 0; i < size; i++) {
3145 			if (ids[i] >= basic_count) {
3146 				no_ext_stat_requested = 0;
3147 				break;
3148 			}
3149 		}
3150 	}
3151 
3152 	/* Fill the xstats structure */
3153 	if (ids && no_ext_stat_requested)
3154 		ret = eth_basic_stats_get(port_id, xstats);
3155 	else
3156 		ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
3157 
3158 	if (ret < 0)
3159 		return ret;
3160 	num_xstats_filled = (unsigned int)ret;
3161 
3162 	/* Return all stats */
3163 	if (!ids) {
3164 		for (i = 0; i < num_xstats_filled; i++)
3165 			values[i] = xstats[i].value;
3166 		return expected_entries;
3167 	}
3168 
3169 	/* Filter stats */
3170 	for (i = 0; i < size; i++) {
3171 		if (ids[i] >= expected_entries) {
3172 			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
3173 			return -1;
3174 		}
3175 		values[i] = xstats[ids[i]].value;
3176 	}
3177 	return size;
3178 }
3179 
3180 int
3181 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3182 	unsigned int n)
3183 {
3184 	struct rte_eth_dev *dev;
3185 	unsigned int count, i;
3186 	signed int xcount = 0;
3187 	int ret;
3188 
3189 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3190 	if (xstats == NULL && n > 0)
3191 		return -EINVAL;
3192 	dev = &rte_eth_devices[port_id];
3193 
3194 	count = eth_dev_get_xstats_basic_count(dev);
3195 
3196 	/* implemented by the driver */
3197 	if (dev->dev_ops->xstats_get != NULL) {
3198 		/* Retrieve the xstats from the driver at the end of the
3199 		 * xstats struct.
3200 		 */
3201 		xcount = (*dev->dev_ops->xstats_get)(dev,
3202 				     (n > count) ? xstats + count : NULL,
3203 				     (n > count) ? n - count : 0);
3204 
3205 		if (xcount < 0)
3206 			return eth_err(port_id, xcount);
3207 	}
3208 
3209 	if (n < count + xcount || xstats == NULL)
3210 		return count + xcount;
3211 
3212 	/* now fill the xstats structure */
3213 	ret = eth_basic_stats_get(port_id, xstats);
3214 	if (ret < 0)
3215 		return ret;
3216 	count = ret;
3217 
3218 	for (i = 0; i < count; i++)
3219 		xstats[i].id = i;
3220 	/* add an offset to driver-specific stats */
3221 	for ( ; i < count + xcount; i++)
3222 		xstats[i].id += count;
3223 
3224 	return count + xcount;
3225 }
3226 
3227 /* reset ethdev extended statistics */
3228 int
3229 rte_eth_xstats_reset(uint16_t port_id)
3230 {
3231 	struct rte_eth_dev *dev;
3232 
3233 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3234 	dev = &rte_eth_devices[port_id];
3235 
3236 	/* implemented by the driver */
3237 	if (dev->dev_ops->xstats_reset != NULL)
3238 		return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
3239 
3240 	/* fallback to default */
3241 	return rte_eth_stats_reset(port_id);
3242 }
3243 
3244 static int
3245 eth_dev_set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id,
3246 		uint8_t stat_idx, uint8_t is_rx)
3247 {
3248 	struct rte_eth_dev *dev;
3249 
3250 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3251 	dev = &rte_eth_devices[port_id];
3252 
3253 	if (is_rx && (queue_id >= dev->data->nb_rx_queues))
3254 		return -EINVAL;
3255 
3256 	if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
3257 		return -EINVAL;
3258 
3259 	if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
3260 		return -EINVAL;
3261 
3262 	if (*dev->dev_ops->queue_stats_mapping_set == NULL)
3263 		return -ENOTSUP;
3264 	return (*dev->dev_ops->queue_stats_mapping_set) (dev, queue_id, stat_idx, is_rx);
3265 }
3266 
3267 int
3268 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
3269 		uint8_t stat_idx)
3270 {
3271 	return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3272 						tx_queue_id,
3273 						stat_idx, STAT_QMAP_TX));
3274 }
3275 
3276 int
3277 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
3278 		uint8_t stat_idx)
3279 {
3280 	return eth_err(port_id, eth_dev_set_queue_stats_mapping(port_id,
3281 						rx_queue_id,
3282 						stat_idx, STAT_QMAP_RX));
3283 }
3284 
3285 int
3286 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3287 {
3288 	struct rte_eth_dev *dev;
3289 
3290 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3291 	dev = &rte_eth_devices[port_id];
3292 
3293 	if (fw_version == NULL && fw_size > 0) {
3294 		RTE_ETHDEV_LOG(ERR,
3295 			"Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
3296 			port_id);
3297 		return -EINVAL;
3298 	}
3299 
3300 	if (*dev->dev_ops->fw_version_get == NULL)
3301 		return -ENOTSUP;
3302 	return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
3303 							fw_version, fw_size));
3304 }
3305 
3306 int
3307 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3308 {
3309 	struct rte_eth_dev *dev;
3310 	const struct rte_eth_desc_lim lim = {
3311 		.nb_max = UINT16_MAX,
3312 		.nb_min = 0,
3313 		.nb_align = 1,
3314 		.nb_seg_max = UINT16_MAX,
3315 		.nb_mtu_seg_max = UINT16_MAX,
3316 	};
3317 	int diag;
3318 
3319 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3320 	dev = &rte_eth_devices[port_id];
3321 
3322 	if (dev_info == NULL) {
3323 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
3324 			port_id);
3325 		return -EINVAL;
3326 	}
3327 
3328 	/*
3329 	 * Init dev_info before port_id check since caller does not have
3330 	 * return status and does not know if get is successful or not.
3331 	 */
3332 	memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3333 	dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
3334 
3335 	dev_info->rx_desc_lim = lim;
3336 	dev_info->tx_desc_lim = lim;
3337 	dev_info->device = dev->device;
3338 	dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
3339 		RTE_ETHER_CRC_LEN;
3340 	dev_info->max_mtu = UINT16_MAX;
3341 
3342 	if (*dev->dev_ops->dev_infos_get == NULL)
3343 		return -ENOTSUP;
3344 	diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
3345 	if (diag != 0) {
3346 		/* Cleanup already filled in device information */
3347 		memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
3348 		return eth_err(port_id, diag);
3349 	}
3350 
3351 	/* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3352 	dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3353 			RTE_MAX_QUEUES_PER_PORT);
3354 	dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3355 			RTE_MAX_QUEUES_PER_PORT);
3356 
3357 	dev_info->driver_name = dev->device->driver->name;
3358 	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3359 	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3360 
3361 	dev_info->dev_flags = &dev->data->dev_flags;
3362 
3363 	return 0;
3364 }
3365 
3366 int
3367 rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3368 {
3369 	struct rte_eth_dev *dev;
3370 
3371 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3372 	dev = &rte_eth_devices[port_id];
3373 
3374 	if (dev_conf == NULL) {
3375 		RTE_ETHDEV_LOG(ERR,
3376 			"Cannot get ethdev port %u configuration to NULL\n",
3377 			port_id);
3378 		return -EINVAL;
3379 	}
3380 
3381 	memcpy(dev_conf, &dev->data->dev_conf, sizeof(struct rte_eth_conf));
3382 
3383 	return 0;
3384 }
3385 
3386 int
3387 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3388 				 uint32_t *ptypes, int num)
3389 {
3390 	int i, j;
3391 	struct rte_eth_dev *dev;
3392 	const uint32_t *all_ptypes;
3393 
3394 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3395 	dev = &rte_eth_devices[port_id];
3396 
3397 	if (ptypes == NULL && num > 0) {
3398 		RTE_ETHDEV_LOG(ERR,
3399 			"Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
3400 			port_id);
3401 		return -EINVAL;
3402 	}
3403 
3404 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL)
3405 		return 0;
3406 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3407 
3408 	if (!all_ptypes)
3409 		return 0;
3410 
3411 	for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3412 		if (all_ptypes[i] & ptype_mask) {
3413 			if (j < num)
3414 				ptypes[j] = all_ptypes[i];
3415 			j++;
3416 		}
3417 
3418 	return j;
3419 }
3420 
3421 int
3422 rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3423 				 uint32_t *set_ptypes, unsigned int num)
3424 {
3425 	const uint32_t valid_ptype_masks[] = {
3426 		RTE_PTYPE_L2_MASK,
3427 		RTE_PTYPE_L3_MASK,
3428 		RTE_PTYPE_L4_MASK,
3429 		RTE_PTYPE_TUNNEL_MASK,
3430 		RTE_PTYPE_INNER_L2_MASK,
3431 		RTE_PTYPE_INNER_L3_MASK,
3432 		RTE_PTYPE_INNER_L4_MASK,
3433 	};
3434 	const uint32_t *all_ptypes;
3435 	struct rte_eth_dev *dev;
3436 	uint32_t unused_mask;
3437 	unsigned int i, j;
3438 	int ret;
3439 
3440 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3441 	dev = &rte_eth_devices[port_id];
3442 
3443 	if (num > 0 && set_ptypes == NULL) {
3444 		RTE_ETHDEV_LOG(ERR,
3445 			"Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
3446 			port_id);
3447 		return -EINVAL;
3448 	}
3449 
3450 	if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3451 			*dev->dev_ops->dev_ptypes_set == NULL) {
3452 		ret = 0;
3453 		goto ptype_unknown;
3454 	}
3455 
3456 	if (ptype_mask == 0) {
3457 		ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3458 				ptype_mask);
3459 		goto ptype_unknown;
3460 	}
3461 
3462 	unused_mask = ptype_mask;
3463 	for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3464 		uint32_t mask = ptype_mask & valid_ptype_masks[i];
3465 		if (mask && mask != valid_ptype_masks[i]) {
3466 			ret = -EINVAL;
3467 			goto ptype_unknown;
3468 		}
3469 		unused_mask &= ~valid_ptype_masks[i];
3470 	}
3471 
3472 	if (unused_mask) {
3473 		ret = -EINVAL;
3474 		goto ptype_unknown;
3475 	}
3476 
3477 	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3478 	if (all_ptypes == NULL) {
3479 		ret = 0;
3480 		goto ptype_unknown;
3481 	}
3482 
3483 	/*
3484 	 * Accommodate as many set_ptypes as possible. If the supplied
3485 	 * set_ptypes array is insufficient fill it partially.
3486 	 */
3487 	for (i = 0, j = 0; set_ptypes != NULL &&
3488 				(all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3489 		if (ptype_mask & all_ptypes[i]) {
3490 			if (j < num - 1) {
3491 				set_ptypes[j] = all_ptypes[i];
3492 				j++;
3493 				continue;
3494 			}
3495 			break;
3496 		}
3497 	}
3498 
3499 	if (set_ptypes != NULL && j < num)
3500 		set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3501 
3502 	return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3503 
3504 ptype_unknown:
3505 	if (num > 0)
3506 		set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3507 
3508 	return ret;
3509 }
3510 
3511 int
3512 rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3513 	unsigned int num)
3514 {
3515 	int32_t ret;
3516 	struct rte_eth_dev *dev;
3517 	struct rte_eth_dev_info dev_info;
3518 
3519 	if (ma == NULL) {
3520 		RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__);
3521 		return -EINVAL;
3522 	}
3523 
3524 	/* will check for us that port_id is a valid one */
3525 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3526 	if (ret != 0)
3527 		return ret;
3528 
3529 	dev = &rte_eth_devices[port_id];
3530 	num = RTE_MIN(dev_info.max_mac_addrs, num);
3531 	memcpy(ma, dev->data->mac_addrs, num * sizeof(ma[0]));
3532 
3533 	return num;
3534 }
3535 
3536 int
3537 rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3538 {
3539 	struct rte_eth_dev *dev;
3540 
3541 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3542 	dev = &rte_eth_devices[port_id];
3543 
3544 	if (mac_addr == NULL) {
3545 		RTE_ETHDEV_LOG(ERR,
3546 			"Cannot get ethdev port %u MAC address to NULL\n",
3547 			port_id);
3548 		return -EINVAL;
3549 	}
3550 
3551 	rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3552 
3553 	return 0;
3554 }
3555 
3556 int
3557 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3558 {
3559 	struct rte_eth_dev *dev;
3560 
3561 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3562 	dev = &rte_eth_devices[port_id];
3563 
3564 	if (mtu == NULL) {
3565 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
3566 			port_id);
3567 		return -EINVAL;
3568 	}
3569 
3570 	*mtu = dev->data->mtu;
3571 	return 0;
3572 }
3573 
3574 int
3575 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3576 {
3577 	int ret;
3578 	struct rte_eth_dev_info dev_info;
3579 	struct rte_eth_dev *dev;
3580 
3581 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3582 	dev = &rte_eth_devices[port_id];
3583 	if (*dev->dev_ops->mtu_set == NULL)
3584 		return -ENOTSUP;
3585 
3586 	/*
3587 	 * Check if the device supports dev_infos_get, if it does not
3588 	 * skip min_mtu/max_mtu validation here as this requires values
3589 	 * that are populated within the call to rte_eth_dev_info_get()
3590 	 * which relies on dev->dev_ops->dev_infos_get.
3591 	 */
3592 	if (*dev->dev_ops->dev_infos_get != NULL) {
3593 		ret = rte_eth_dev_info_get(port_id, &dev_info);
3594 		if (ret != 0)
3595 			return ret;
3596 
3597 		ret = eth_dev_validate_mtu(port_id, &dev_info, mtu);
3598 		if (ret != 0)
3599 			return ret;
3600 	}
3601 
3602 	if (dev->data->dev_configured == 0) {
3603 		RTE_ETHDEV_LOG(ERR,
3604 			"Port %u must be configured before MTU set\n",
3605 			port_id);
3606 		return -EINVAL;
3607 	}
3608 
3609 	ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3610 	if (ret == 0)
3611 		dev->data->mtu = mtu;
3612 
3613 	return eth_err(port_id, ret);
3614 }
3615 
3616 int
3617 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3618 {
3619 	struct rte_eth_dev *dev;
3620 	int ret;
3621 
3622 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3623 	dev = &rte_eth_devices[port_id];
3624 
3625 	if (!(dev->data->dev_conf.rxmode.offloads &
3626 	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
3627 		RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
3628 			port_id);
3629 		return -ENOSYS;
3630 	}
3631 
3632 	if (vlan_id > 4095) {
3633 		RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3634 			port_id, vlan_id);
3635 		return -EINVAL;
3636 	}
3637 	if (*dev->dev_ops->vlan_filter_set == NULL)
3638 		return -ENOTSUP;
3639 
3640 	ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3641 	if (ret == 0) {
3642 		struct rte_vlan_filter_conf *vfc;
3643 		int vidx;
3644 		int vbit;
3645 
3646 		vfc = &dev->data->vlan_filter_conf;
3647 		vidx = vlan_id / 64;
3648 		vbit = vlan_id % 64;
3649 
3650 		if (on)
3651 			vfc->ids[vidx] |= RTE_BIT64(vbit);
3652 		else
3653 			vfc->ids[vidx] &= ~RTE_BIT64(vbit);
3654 	}
3655 
3656 	return eth_err(port_id, ret);
3657 }
3658 
3659 int
3660 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3661 				    int on)
3662 {
3663 	struct rte_eth_dev *dev;
3664 
3665 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3666 	dev = &rte_eth_devices[port_id];
3667 
3668 	if (rx_queue_id >= dev->data->nb_rx_queues) {
3669 		RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3670 		return -EINVAL;
3671 	}
3672 
3673 	if (*dev->dev_ops->vlan_strip_queue_set == NULL)
3674 		return -ENOTSUP;
3675 	(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3676 
3677 	return 0;
3678 }
3679 
3680 int
3681 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3682 				enum rte_vlan_type vlan_type,
3683 				uint16_t tpid)
3684 {
3685 	struct rte_eth_dev *dev;
3686 
3687 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3688 	dev = &rte_eth_devices[port_id];
3689 
3690 	if (*dev->dev_ops->vlan_tpid_set == NULL)
3691 		return -ENOTSUP;
3692 	return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3693 							       tpid));
3694 }
3695 
3696 int
3697 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3698 {
3699 	struct rte_eth_dev_info dev_info;
3700 	struct rte_eth_dev *dev;
3701 	int ret = 0;
3702 	int mask = 0;
3703 	int cur, org = 0;
3704 	uint64_t orig_offloads;
3705 	uint64_t dev_offloads;
3706 	uint64_t new_offloads;
3707 
3708 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3709 	dev = &rte_eth_devices[port_id];
3710 
3711 	/* save original values in case of failure */
3712 	orig_offloads = dev->data->dev_conf.rxmode.offloads;
3713 	dev_offloads = orig_offloads;
3714 
3715 	/* check which option changed by application */
3716 	cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
3717 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
3718 	if (cur != org) {
3719 		if (cur)
3720 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3721 		else
3722 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
3723 		mask |= RTE_ETH_VLAN_STRIP_MASK;
3724 	}
3725 
3726 	cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
3727 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
3728 	if (cur != org) {
3729 		if (cur)
3730 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3731 		else
3732 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
3733 		mask |= RTE_ETH_VLAN_FILTER_MASK;
3734 	}
3735 
3736 	cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
3737 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
3738 	if (cur != org) {
3739 		if (cur)
3740 			dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3741 		else
3742 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
3743 		mask |= RTE_ETH_VLAN_EXTEND_MASK;
3744 	}
3745 
3746 	cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
3747 	org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
3748 	if (cur != org) {
3749 		if (cur)
3750 			dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3751 		else
3752 			dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
3753 		mask |= RTE_ETH_QINQ_STRIP_MASK;
3754 	}
3755 
3756 	/*no change*/
3757 	if (mask == 0)
3758 		return ret;
3759 
3760 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3761 	if (ret != 0)
3762 		return ret;
3763 
3764 	/* Rx VLAN offloading must be within its device capabilities */
3765 	if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
3766 		new_offloads = dev_offloads & ~orig_offloads;
3767 		RTE_ETHDEV_LOG(ERR,
3768 			"Ethdev port_id=%u requested new added VLAN offloads "
3769 			"0x%" PRIx64 " must be within Rx offloads capabilities "
3770 			"0x%" PRIx64 " in %s()\n",
3771 			port_id, new_offloads, dev_info.rx_offload_capa,
3772 			__func__);
3773 		return -EINVAL;
3774 	}
3775 
3776 	if (*dev->dev_ops->vlan_offload_set == NULL)
3777 		return -ENOTSUP;
3778 	dev->data->dev_conf.rxmode.offloads = dev_offloads;
3779 	ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3780 	if (ret) {
3781 		/* hit an error restore  original values */
3782 		dev->data->dev_conf.rxmode.offloads = orig_offloads;
3783 	}
3784 
3785 	return eth_err(port_id, ret);
3786 }
3787 
3788 int
3789 rte_eth_dev_get_vlan_offload(uint16_t port_id)
3790 {
3791 	struct rte_eth_dev *dev;
3792 	uint64_t *dev_offloads;
3793 	int ret = 0;
3794 
3795 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3796 	dev = &rte_eth_devices[port_id];
3797 	dev_offloads = &dev->data->dev_conf.rxmode.offloads;
3798 
3799 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
3800 		ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
3801 
3802 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
3803 		ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
3804 
3805 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
3806 		ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
3807 
3808 	if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
3809 		ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
3810 
3811 	return ret;
3812 }
3813 
3814 int
3815 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3816 {
3817 	struct rte_eth_dev *dev;
3818 
3819 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3820 	dev = &rte_eth_devices[port_id];
3821 
3822 	if (*dev->dev_ops->vlan_pvid_set == NULL)
3823 		return -ENOTSUP;
3824 	return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3825 }
3826 
3827 int
3828 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3829 {
3830 	struct rte_eth_dev *dev;
3831 
3832 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3833 	dev = &rte_eth_devices[port_id];
3834 
3835 	if (fc_conf == NULL) {
3836 		RTE_ETHDEV_LOG(ERR,
3837 			"Cannot get ethdev port %u flow control config to NULL\n",
3838 			port_id);
3839 		return -EINVAL;
3840 	}
3841 
3842 	if (*dev->dev_ops->flow_ctrl_get == NULL)
3843 		return -ENOTSUP;
3844 	memset(fc_conf, 0, sizeof(*fc_conf));
3845 	return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3846 }
3847 
3848 int
3849 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3850 {
3851 	struct rte_eth_dev *dev;
3852 
3853 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3854 	dev = &rte_eth_devices[port_id];
3855 
3856 	if (fc_conf == NULL) {
3857 		RTE_ETHDEV_LOG(ERR,
3858 			"Cannot set ethdev port %u flow control from NULL config\n",
3859 			port_id);
3860 		return -EINVAL;
3861 	}
3862 
3863 	if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3864 		RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3865 		return -EINVAL;
3866 	}
3867 
3868 	if (*dev->dev_ops->flow_ctrl_set == NULL)
3869 		return -ENOTSUP;
3870 	return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3871 }
3872 
3873 int
3874 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3875 				   struct rte_eth_pfc_conf *pfc_conf)
3876 {
3877 	struct rte_eth_dev *dev;
3878 
3879 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3880 	dev = &rte_eth_devices[port_id];
3881 
3882 	if (pfc_conf == NULL) {
3883 		RTE_ETHDEV_LOG(ERR,
3884 			"Cannot set ethdev port %u priority flow control from NULL config\n",
3885 			port_id);
3886 		return -EINVAL;
3887 	}
3888 
3889 	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3890 		RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3891 		return -EINVAL;
3892 	}
3893 
3894 	/* High water, low water validation are device specific */
3895 	if  (*dev->dev_ops->priority_flow_ctrl_set)
3896 		return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3897 					(dev, pfc_conf));
3898 	return -ENOTSUP;
3899 }
3900 
3901 static int
3902 validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
3903 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3904 {
3905 	if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) ||
3906 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
3907 		if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) {
3908 			RTE_ETHDEV_LOG(ERR,
3909 				"PFC Tx queue not in range for Rx pause requested:%d configured:%d\n",
3910 				pfc_queue_conf->rx_pause.tx_qid,
3911 				dev_info->nb_tx_queues);
3912 			return -EINVAL;
3913 		}
3914 
3915 		if (pfc_queue_conf->rx_pause.tc >= tc_max) {
3916 			RTE_ETHDEV_LOG(ERR,
3917 				"PFC TC not in range for Rx pause requested:%d max:%d\n",
3918 				pfc_queue_conf->rx_pause.tc, tc_max);
3919 			return -EINVAL;
3920 		}
3921 	}
3922 
3923 	return 0;
3924 }
3925 
3926 static int
3927 validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
3928 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3929 {
3930 	if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) ||
3931 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
3932 		if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) {
3933 			RTE_ETHDEV_LOG(ERR,
3934 				"PFC Rx queue not in range for Tx pause requested:%d configured:%d\n",
3935 				pfc_queue_conf->tx_pause.rx_qid,
3936 				dev_info->nb_rx_queues);
3937 			return -EINVAL;
3938 		}
3939 
3940 		if (pfc_queue_conf->tx_pause.tc >= tc_max) {
3941 			RTE_ETHDEV_LOG(ERR,
3942 				"PFC TC not in range for Tx pause requested:%d max:%d\n",
3943 				pfc_queue_conf->tx_pause.tc, tc_max);
3944 			return -EINVAL;
3945 		}
3946 	}
3947 
3948 	return 0;
3949 }
3950 
3951 int
3952 rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
3953 		struct rte_eth_pfc_queue_info *pfc_queue_info)
3954 {
3955 	struct rte_eth_dev *dev;
3956 
3957 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3958 	dev = &rte_eth_devices[port_id];
3959 
3960 	if (pfc_queue_info == NULL) {
3961 		RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n",
3962 			port_id);
3963 		return -EINVAL;
3964 	}
3965 
3966 	if (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
3967 		return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
3968 			(dev, pfc_queue_info));
3969 	return -ENOTSUP;
3970 }
3971 
3972 int
3973 rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
3974 		struct rte_eth_pfc_queue_conf *pfc_queue_conf)
3975 {
3976 	struct rte_eth_pfc_queue_info pfc_info;
3977 	struct rte_eth_dev_info dev_info;
3978 	struct rte_eth_dev *dev;
3979 	int ret;
3980 
3981 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3982 	dev = &rte_eth_devices[port_id];
3983 
3984 	if (pfc_queue_conf == NULL) {
3985 		RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n",
3986 			port_id);
3987 		return -EINVAL;
3988 	}
3989 
3990 	ret = rte_eth_dev_info_get(port_id, &dev_info);
3991 	if (ret != 0)
3992 		return ret;
3993 
3994 	ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info);
3995 	if (ret != 0)
3996 		return ret;
3997 
3998 	if (pfc_info.tc_max == 0) {
3999 		RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n",
4000 			port_id);
4001 		return -ENOTSUP;
4002 	}
4003 
4004 	/* Check requested mode supported or not */
4005 	if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE &&
4006 			pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) {
4007 		RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n",
4008 			port_id);
4009 		return -EINVAL;
4010 	}
4011 
4012 	if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE &&
4013 			pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) {
4014 		RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n",
4015 			port_id);
4016 		return -EINVAL;
4017 	}
4018 
4019 	/* Validate Rx pause parameters */
4020 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
4021 			pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) {
4022 		ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max,
4023 				pfc_queue_conf);
4024 		if (ret != 0)
4025 			return ret;
4026 	}
4027 
4028 	/* Validate Tx pause parameters */
4029 	if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
4030 			pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) {
4031 		ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max,
4032 				pfc_queue_conf);
4033 		if (ret != 0)
4034 			return ret;
4035 	}
4036 
4037 	if (*dev->dev_ops->priority_flow_ctrl_queue_config)
4038 		return eth_err(port_id,
4039 			       (*dev->dev_ops->priority_flow_ctrl_queue_config)(
4040 				dev, pfc_queue_conf));
4041 	return -ENOTSUP;
4042 }
4043 
4044 static int
4045 eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
4046 			uint16_t reta_size)
4047 {
4048 	uint16_t i, num;
4049 
4050 	num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
4051 	for (i = 0; i < num; i++) {
4052 		if (reta_conf[i].mask)
4053 			return 0;
4054 	}
4055 
4056 	return -EINVAL;
4057 }
4058 
4059 static int
4060 eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
4061 			 uint16_t reta_size,
4062 			 uint16_t max_rxq)
4063 {
4064 	uint16_t i, idx, shift;
4065 
4066 	if (max_rxq == 0) {
4067 		RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
4068 		return -EINVAL;
4069 	}
4070 
4071 	for (i = 0; i < reta_size; i++) {
4072 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
4073 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
4074 		if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
4075 			(reta_conf[idx].reta[shift] >= max_rxq)) {
4076 			RTE_ETHDEV_LOG(ERR,
4077 				"reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
4078 				idx, shift,
4079 				reta_conf[idx].reta[shift], max_rxq);
4080 			return -EINVAL;
4081 		}
4082 	}
4083 
4084 	return 0;
4085 }
4086 
4087 int
4088 rte_eth_dev_rss_reta_update(uint16_t port_id,
4089 			    struct rte_eth_rss_reta_entry64 *reta_conf,
4090 			    uint16_t reta_size)
4091 {
4092 	enum rte_eth_rx_mq_mode mq_mode;
4093 	struct rte_eth_dev *dev;
4094 	int ret;
4095 
4096 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4097 	dev = &rte_eth_devices[port_id];
4098 
4099 	if (reta_conf == NULL) {
4100 		RTE_ETHDEV_LOG(ERR,
4101 			"Cannot update ethdev port %u RSS RETA to NULL\n",
4102 			port_id);
4103 		return -EINVAL;
4104 	}
4105 
4106 	if (reta_size == 0) {
4107 		RTE_ETHDEV_LOG(ERR,
4108 			"Cannot update ethdev port %u RSS RETA with zero size\n",
4109 			port_id);
4110 		return -EINVAL;
4111 	}
4112 
4113 	/* Check mask bits */
4114 	ret = eth_check_reta_mask(reta_conf, reta_size);
4115 	if (ret < 0)
4116 		return ret;
4117 
4118 	/* Check entry value */
4119 	ret = eth_check_reta_entry(reta_conf, reta_size,
4120 				dev->data->nb_rx_queues);
4121 	if (ret < 0)
4122 		return ret;
4123 
4124 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4125 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
4126 		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
4127 		return -ENOTSUP;
4128 	}
4129 
4130 	if (*dev->dev_ops->reta_update == NULL)
4131 		return -ENOTSUP;
4132 	return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
4133 							     reta_size));
4134 }
4135 
4136 int
4137 rte_eth_dev_rss_reta_query(uint16_t port_id,
4138 			   struct rte_eth_rss_reta_entry64 *reta_conf,
4139 			   uint16_t reta_size)
4140 {
4141 	struct rte_eth_dev *dev;
4142 	int ret;
4143 
4144 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4145 	dev = &rte_eth_devices[port_id];
4146 
4147 	if (reta_conf == NULL) {
4148 		RTE_ETHDEV_LOG(ERR,
4149 			"Cannot query ethdev port %u RSS RETA from NULL config\n",
4150 			port_id);
4151 		return -EINVAL;
4152 	}
4153 
4154 	/* Check mask bits */
4155 	ret = eth_check_reta_mask(reta_conf, reta_size);
4156 	if (ret < 0)
4157 		return ret;
4158 
4159 	if (*dev->dev_ops->reta_query == NULL)
4160 		return -ENOTSUP;
4161 	return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
4162 							    reta_size));
4163 }
4164 
4165 int
4166 rte_eth_dev_rss_hash_update(uint16_t port_id,
4167 			    struct rte_eth_rss_conf *rss_conf)
4168 {
4169 	struct rte_eth_dev *dev;
4170 	struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
4171 	enum rte_eth_rx_mq_mode mq_mode;
4172 	int ret;
4173 
4174 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4175 	dev = &rte_eth_devices[port_id];
4176 
4177 	if (rss_conf == NULL) {
4178 		RTE_ETHDEV_LOG(ERR,
4179 			"Cannot update ethdev port %u RSS hash from NULL config\n",
4180 			port_id);
4181 		return -EINVAL;
4182 	}
4183 
4184 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4185 	if (ret != 0)
4186 		return ret;
4187 
4188 	rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
4189 	if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
4190 	    dev_info.flow_type_rss_offloads) {
4191 		RTE_ETHDEV_LOG(ERR,
4192 			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
4193 			port_id, rss_conf->rss_hf,
4194 			dev_info.flow_type_rss_offloads);
4195 		return -EINVAL;
4196 	}
4197 
4198 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
4199 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
4200 		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
4201 		return -ENOTSUP;
4202 	}
4203 
4204 	if (*dev->dev_ops->rss_hash_update == NULL)
4205 		return -ENOTSUP;
4206 	return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
4207 								 rss_conf));
4208 }
4209 
4210 int
4211 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4212 			      struct rte_eth_rss_conf *rss_conf)
4213 {
4214 	struct rte_eth_dev *dev;
4215 
4216 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4217 	dev = &rte_eth_devices[port_id];
4218 
4219 	if (rss_conf == NULL) {
4220 		RTE_ETHDEV_LOG(ERR,
4221 			"Cannot get ethdev port %u RSS hash config to NULL\n",
4222 			port_id);
4223 		return -EINVAL;
4224 	}
4225 
4226 	if (*dev->dev_ops->rss_hash_conf_get == NULL)
4227 		return -ENOTSUP;
4228 	return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
4229 								   rss_conf));
4230 }
4231 
4232 int
4233 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4234 				struct rte_eth_udp_tunnel *udp_tunnel)
4235 {
4236 	struct rte_eth_dev *dev;
4237 
4238 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4239 	dev = &rte_eth_devices[port_id];
4240 
4241 	if (udp_tunnel == NULL) {
4242 		RTE_ETHDEV_LOG(ERR,
4243 			"Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4244 			port_id);
4245 		return -EINVAL;
4246 	}
4247 
4248 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4249 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4250 		return -EINVAL;
4251 	}
4252 
4253 	if (*dev->dev_ops->udp_tunnel_port_add == NULL)
4254 		return -ENOTSUP;
4255 	return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
4256 								udp_tunnel));
4257 }
4258 
4259 int
4260 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4261 				   struct rte_eth_udp_tunnel *udp_tunnel)
4262 {
4263 	struct rte_eth_dev *dev;
4264 
4265 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4266 	dev = &rte_eth_devices[port_id];
4267 
4268 	if (udp_tunnel == NULL) {
4269 		RTE_ETHDEV_LOG(ERR,
4270 			"Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
4271 			port_id);
4272 		return -EINVAL;
4273 	}
4274 
4275 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
4276 		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4277 		return -EINVAL;
4278 	}
4279 
4280 	if (*dev->dev_ops->udp_tunnel_port_del == NULL)
4281 		return -ENOTSUP;
4282 	return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
4283 								udp_tunnel));
4284 }
4285 
4286 int
4287 rte_eth_led_on(uint16_t port_id)
4288 {
4289 	struct rte_eth_dev *dev;
4290 
4291 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4292 	dev = &rte_eth_devices[port_id];
4293 
4294 	if (*dev->dev_ops->dev_led_on == NULL)
4295 		return -ENOTSUP;
4296 	return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
4297 }
4298 
4299 int
4300 rte_eth_led_off(uint16_t port_id)
4301 {
4302 	struct rte_eth_dev *dev;
4303 
4304 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4305 	dev = &rte_eth_devices[port_id];
4306 
4307 	if (*dev->dev_ops->dev_led_off == NULL)
4308 		return -ENOTSUP;
4309 	return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
4310 }
4311 
4312 int
4313 rte_eth_fec_get_capability(uint16_t port_id,
4314 			   struct rte_eth_fec_capa *speed_fec_capa,
4315 			   unsigned int num)
4316 {
4317 	struct rte_eth_dev *dev;
4318 	int ret;
4319 
4320 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4321 	dev = &rte_eth_devices[port_id];
4322 
4323 	if (speed_fec_capa == NULL && num > 0) {
4324 		RTE_ETHDEV_LOG(ERR,
4325 			"Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
4326 			port_id);
4327 		return -EINVAL;
4328 	}
4329 
4330 	if (*dev->dev_ops->fec_get_capability == NULL)
4331 		return -ENOTSUP;
4332 	ret = (*dev->dev_ops->fec_get_capability)(dev, speed_fec_capa, num);
4333 
4334 	return ret;
4335 }
4336 
4337 int
4338 rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
4339 {
4340 	struct rte_eth_dev *dev;
4341 
4342 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4343 	dev = &rte_eth_devices[port_id];
4344 
4345 	if (fec_capa == NULL) {
4346 		RTE_ETHDEV_LOG(ERR,
4347 			"Cannot get ethdev port %u current FEC mode to NULL\n",
4348 			port_id);
4349 		return -EINVAL;
4350 	}
4351 
4352 	if (*dev->dev_ops->fec_get == NULL)
4353 		return -ENOTSUP;
4354 	return eth_err(port_id, (*dev->dev_ops->fec_get)(dev, fec_capa));
4355 }
4356 
4357 int
4358 rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
4359 {
4360 	struct rte_eth_dev *dev;
4361 
4362 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4363 	dev = &rte_eth_devices[port_id];
4364 
4365 	if (*dev->dev_ops->fec_set == NULL)
4366 		return -ENOTSUP;
4367 	return eth_err(port_id, (*dev->dev_ops->fec_set)(dev, fec_capa));
4368 }
4369 
4370 /*
4371  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4372  * an empty spot.
4373  */
4374 static int
4375 eth_dev_get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
4376 {
4377 	struct rte_eth_dev_info dev_info;
4378 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4379 	unsigned i;
4380 	int ret;
4381 
4382 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4383 	if (ret != 0)
4384 		return -1;
4385 
4386 	for (i = 0; i < dev_info.max_mac_addrs; i++)
4387 		if (memcmp(addr, &dev->data->mac_addrs[i],
4388 				RTE_ETHER_ADDR_LEN) == 0)
4389 			return i;
4390 
4391 	return -1;
4392 }
4393 
4394 static const struct rte_ether_addr null_mac_addr;
4395 
4396 int
4397 rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
4398 			uint32_t pool)
4399 {
4400 	struct rte_eth_dev *dev;
4401 	int index;
4402 	uint64_t pool_mask;
4403 	int ret;
4404 
4405 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4406 	dev = &rte_eth_devices[port_id];
4407 
4408 	if (addr == NULL) {
4409 		RTE_ETHDEV_LOG(ERR,
4410 			"Cannot add ethdev port %u MAC address from NULL address\n",
4411 			port_id);
4412 		return -EINVAL;
4413 	}
4414 
4415 	if (*dev->dev_ops->mac_addr_add == NULL)
4416 		return -ENOTSUP;
4417 
4418 	if (rte_is_zero_ether_addr(addr)) {
4419 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4420 			port_id);
4421 		return -EINVAL;
4422 	}
4423 	if (pool >= RTE_ETH_64_POOLS) {
4424 		RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1);
4425 		return -EINVAL;
4426 	}
4427 
4428 	index = eth_dev_get_mac_addr_index(port_id, addr);
4429 	if (index < 0) {
4430 		index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
4431 		if (index < 0) {
4432 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4433 				port_id);
4434 			return -ENOSPC;
4435 		}
4436 	} else {
4437 		pool_mask = dev->data->mac_pool_sel[index];
4438 
4439 		/* Check if both MAC address and pool is already there, and do nothing */
4440 		if (pool_mask & RTE_BIT64(pool))
4441 			return 0;
4442 	}
4443 
4444 	/* Update NIC */
4445 	ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
4446 
4447 	if (ret == 0) {
4448 		/* Update address in NIC data structure */
4449 		rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
4450 
4451 		/* Update pool bitmap in NIC data structure */
4452 		dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
4453 	}
4454 
4455 	return eth_err(port_id, ret);
4456 }
4457 
4458 int
4459 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
4460 {
4461 	struct rte_eth_dev *dev;
4462 	int index;
4463 
4464 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4465 	dev = &rte_eth_devices[port_id];
4466 
4467 	if (addr == NULL) {
4468 		RTE_ETHDEV_LOG(ERR,
4469 			"Cannot remove ethdev port %u MAC address from NULL address\n",
4470 			port_id);
4471 		return -EINVAL;
4472 	}
4473 
4474 	if (*dev->dev_ops->mac_addr_remove == NULL)
4475 		return -ENOTSUP;
4476 
4477 	index = eth_dev_get_mac_addr_index(port_id, addr);
4478 	if (index == 0) {
4479 		RTE_ETHDEV_LOG(ERR,
4480 			"Port %u: Cannot remove default MAC address\n",
4481 			port_id);
4482 		return -EADDRINUSE;
4483 	} else if (index < 0)
4484 		return 0;  /* Do nothing if address wasn't found */
4485 
4486 	/* Update NIC */
4487 	(*dev->dev_ops->mac_addr_remove)(dev, index);
4488 
4489 	/* Update address in NIC data structure */
4490 	rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
4491 
4492 	/* reset pool bitmap */
4493 	dev->data->mac_pool_sel[index] = 0;
4494 
4495 	return 0;
4496 }
4497 
4498 int
4499 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
4500 {
4501 	struct rte_eth_dev *dev;
4502 	int ret;
4503 
4504 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4505 	dev = &rte_eth_devices[port_id];
4506 
4507 	if (addr == NULL) {
4508 		RTE_ETHDEV_LOG(ERR,
4509 			"Cannot set ethdev port %u default MAC address from NULL address\n",
4510 			port_id);
4511 		return -EINVAL;
4512 	}
4513 
4514 	if (!rte_is_valid_assigned_ether_addr(addr))
4515 		return -EINVAL;
4516 
4517 	if (*dev->dev_ops->mac_addr_set == NULL)
4518 		return -ENOTSUP;
4519 
4520 	ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
4521 	if (ret < 0)
4522 		return ret;
4523 
4524 	/* Update default address in NIC data structure */
4525 	rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
4526 
4527 	return 0;
4528 }
4529 
4530 
4531 /*
4532  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
4533  * an empty spot.
4534  */
4535 static int
4536 eth_dev_get_hash_mac_addr_index(uint16_t port_id,
4537 		const struct rte_ether_addr *addr)
4538 {
4539 	struct rte_eth_dev_info dev_info;
4540 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4541 	unsigned i;
4542 	int ret;
4543 
4544 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4545 	if (ret != 0)
4546 		return -1;
4547 
4548 	if (!dev->data->hash_mac_addrs)
4549 		return -1;
4550 
4551 	for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
4552 		if (memcmp(addr, &dev->data->hash_mac_addrs[i],
4553 			RTE_ETHER_ADDR_LEN) == 0)
4554 			return i;
4555 
4556 	return -1;
4557 }
4558 
4559 int
4560 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4561 				uint8_t on)
4562 {
4563 	int index;
4564 	int ret;
4565 	struct rte_eth_dev *dev;
4566 
4567 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4568 	dev = &rte_eth_devices[port_id];
4569 
4570 	if (addr == NULL) {
4571 		RTE_ETHDEV_LOG(ERR,
4572 			"Cannot set ethdev port %u unicast hash table from NULL address\n",
4573 			port_id);
4574 		return -EINVAL;
4575 	}
4576 
4577 	if (rte_is_zero_ether_addr(addr)) {
4578 		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
4579 			port_id);
4580 		return -EINVAL;
4581 	}
4582 
4583 	index = eth_dev_get_hash_mac_addr_index(port_id, addr);
4584 	/* Check if it's already there, and do nothing */
4585 	if ((index >= 0) && on)
4586 		return 0;
4587 
4588 	if (index < 0) {
4589 		if (!on) {
4590 			RTE_ETHDEV_LOG(ERR,
4591 				"Port %u: the MAC address was not set in UTA\n",
4592 				port_id);
4593 			return -EINVAL;
4594 		}
4595 
4596 		index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
4597 		if (index < 0) {
4598 			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
4599 				port_id);
4600 			return -ENOSPC;
4601 		}
4602 	}
4603 
4604 	if (*dev->dev_ops->uc_hash_table_set == NULL)
4605 		return -ENOTSUP;
4606 	ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
4607 	if (ret == 0) {
4608 		/* Update address in NIC data structure */
4609 		if (on)
4610 			rte_ether_addr_copy(addr,
4611 					&dev->data->hash_mac_addrs[index]);
4612 		else
4613 			rte_ether_addr_copy(&null_mac_addr,
4614 					&dev->data->hash_mac_addrs[index]);
4615 	}
4616 
4617 	return eth_err(port_id, ret);
4618 }
4619 
4620 int
4621 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
4622 {
4623 	struct rte_eth_dev *dev;
4624 
4625 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4626 	dev = &rte_eth_devices[port_id];
4627 
4628 	if (*dev->dev_ops->uc_all_hash_table_set == NULL)
4629 		return -ENOTSUP;
4630 	return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
4631 								       on));
4632 }
4633 
4634 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4635 					uint32_t tx_rate)
4636 {
4637 	struct rte_eth_dev *dev;
4638 	struct rte_eth_dev_info dev_info;
4639 	struct rte_eth_link link;
4640 	int ret;
4641 
4642 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4643 	dev = &rte_eth_devices[port_id];
4644 
4645 	ret = rte_eth_dev_info_get(port_id, &dev_info);
4646 	if (ret != 0)
4647 		return ret;
4648 
4649 	link = dev->data->dev_link;
4650 
4651 	if (queue_idx > dev_info.max_tx_queues) {
4652 		RTE_ETHDEV_LOG(ERR,
4653 			"Set queue rate limit:port %u: invalid queue ID=%u\n",
4654 			port_id, queue_idx);
4655 		return -EINVAL;
4656 	}
4657 
4658 	if (tx_rate > link.link_speed) {
4659 		RTE_ETHDEV_LOG(ERR,
4660 			"Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
4661 			tx_rate, link.link_speed);
4662 		return -EINVAL;
4663 	}
4664 
4665 	if (*dev->dev_ops->set_queue_rate_limit == NULL)
4666 		return -ENOTSUP;
4667 	return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
4668 							queue_idx, tx_rate));
4669 }
4670 
4671 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
4672 			       uint8_t avail_thresh)
4673 {
4674 	struct rte_eth_dev *dev;
4675 
4676 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4677 	dev = &rte_eth_devices[port_id];
4678 
4679 	if (queue_id > dev->data->nb_rx_queues) {
4680 		RTE_ETHDEV_LOG(ERR,
4681 			"Set queue avail thresh: port %u: invalid queue ID=%u.\n",
4682 			port_id, queue_id);
4683 		return -EINVAL;
4684 	}
4685 
4686 	if (avail_thresh > 99) {
4687 		RTE_ETHDEV_LOG(ERR,
4688 			"Set queue avail thresh: port %u: threshold should be <= 99.\n",
4689 			port_id);
4690 		return -EINVAL;
4691 	}
4692 	if (*dev->dev_ops->rx_queue_avail_thresh_set == NULL)
4693 		return -ENOTSUP;
4694 	return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev,
4695 							     queue_id, avail_thresh));
4696 }
4697 
4698 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
4699 				 uint8_t *avail_thresh)
4700 {
4701 	struct rte_eth_dev *dev;
4702 
4703 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4704 	dev = &rte_eth_devices[port_id];
4705 
4706 	if (queue_id == NULL)
4707 		return -EINVAL;
4708 	if (*queue_id >= dev->data->nb_rx_queues)
4709 		*queue_id = 0;
4710 
4711 	if (*dev->dev_ops->rx_queue_avail_thresh_query == NULL)
4712 		return -ENOTSUP;
4713 	return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev,
4714 							     queue_id, avail_thresh));
4715 }
4716 
4717 RTE_INIT(eth_dev_init_fp_ops)
4718 {
4719 	uint32_t i;
4720 
4721 	for (i = 0; i != RTE_DIM(rte_eth_fp_ops); i++)
4722 		eth_dev_fp_ops_reset(rte_eth_fp_ops + i);
4723 }
4724 
4725 RTE_INIT(eth_dev_init_cb_lists)
4726 {
4727 	uint16_t i;
4728 
4729 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
4730 		TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
4731 }
4732 
4733 int
4734 rte_eth_dev_callback_register(uint16_t port_id,
4735 			enum rte_eth_event_type event,
4736 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4737 {
4738 	struct rte_eth_dev *dev;
4739 	struct rte_eth_dev_callback *user_cb;
4740 	uint16_t next_port;
4741 	uint16_t last_port;
4742 
4743 	if (cb_fn == NULL) {
4744 		RTE_ETHDEV_LOG(ERR,
4745 			"Cannot register ethdev port %u callback from NULL\n",
4746 			port_id);
4747 		return -EINVAL;
4748 	}
4749 
4750 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4751 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4752 		return -EINVAL;
4753 	}
4754 
4755 	if (port_id == RTE_ETH_ALL) {
4756 		next_port = 0;
4757 		last_port = RTE_MAX_ETHPORTS - 1;
4758 	} else {
4759 		next_port = last_port = port_id;
4760 	}
4761 
4762 	rte_spinlock_lock(&eth_dev_cb_lock);
4763 
4764 	do {
4765 		dev = &rte_eth_devices[next_port];
4766 
4767 		TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
4768 			if (user_cb->cb_fn == cb_fn &&
4769 				user_cb->cb_arg == cb_arg &&
4770 				user_cb->event == event) {
4771 				break;
4772 			}
4773 		}
4774 
4775 		/* create a new callback. */
4776 		if (user_cb == NULL) {
4777 			user_cb = rte_zmalloc("INTR_USER_CALLBACK",
4778 				sizeof(struct rte_eth_dev_callback), 0);
4779 			if (user_cb != NULL) {
4780 				user_cb->cb_fn = cb_fn;
4781 				user_cb->cb_arg = cb_arg;
4782 				user_cb->event = event;
4783 				TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4784 						  user_cb, next);
4785 			} else {
4786 				rte_spinlock_unlock(&eth_dev_cb_lock);
4787 				rte_eth_dev_callback_unregister(port_id, event,
4788 								cb_fn, cb_arg);
4789 				return -ENOMEM;
4790 			}
4791 
4792 		}
4793 	} while (++next_port <= last_port);
4794 
4795 	rte_spinlock_unlock(&eth_dev_cb_lock);
4796 	return 0;
4797 }
4798 
4799 int
4800 rte_eth_dev_callback_unregister(uint16_t port_id,
4801 			enum rte_eth_event_type event,
4802 			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4803 {
4804 	int ret;
4805 	struct rte_eth_dev *dev;
4806 	struct rte_eth_dev_callback *cb, *next;
4807 	uint16_t next_port;
4808 	uint16_t last_port;
4809 
4810 	if (cb_fn == NULL) {
4811 		RTE_ETHDEV_LOG(ERR,
4812 			"Cannot unregister ethdev port %u callback from NULL\n",
4813 			port_id);
4814 		return -EINVAL;
4815 	}
4816 
4817 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4818 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4819 		return -EINVAL;
4820 	}
4821 
4822 	if (port_id == RTE_ETH_ALL) {
4823 		next_port = 0;
4824 		last_port = RTE_MAX_ETHPORTS - 1;
4825 	} else {
4826 		next_port = last_port = port_id;
4827 	}
4828 
4829 	rte_spinlock_lock(&eth_dev_cb_lock);
4830 
4831 	do {
4832 		dev = &rte_eth_devices[next_port];
4833 		ret = 0;
4834 		for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4835 		     cb = next) {
4836 
4837 			next = TAILQ_NEXT(cb, next);
4838 
4839 			if (cb->cb_fn != cb_fn || cb->event != event ||
4840 			    (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
4841 				continue;
4842 
4843 			/*
4844 			 * if this callback is not executing right now,
4845 			 * then remove it.
4846 			 */
4847 			if (cb->active == 0) {
4848 				TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4849 				rte_free(cb);
4850 			} else {
4851 				ret = -EAGAIN;
4852 			}
4853 		}
4854 	} while (++next_port <= last_port);
4855 
4856 	rte_spinlock_unlock(&eth_dev_cb_lock);
4857 	return ret;
4858 }
4859 
4860 int
4861 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4862 {
4863 	uint32_t vec;
4864 	struct rte_eth_dev *dev;
4865 	struct rte_intr_handle *intr_handle;
4866 	uint16_t qid;
4867 	int rc;
4868 
4869 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4870 	dev = &rte_eth_devices[port_id];
4871 
4872 	if (!dev->intr_handle) {
4873 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4874 		return -ENOTSUP;
4875 	}
4876 
4877 	intr_handle = dev->intr_handle;
4878 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4879 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4880 		return -EPERM;
4881 	}
4882 
4883 	for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4884 		vec = rte_intr_vec_list_index_get(intr_handle, qid);
4885 		rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4886 		if (rc && rc != -EEXIST) {
4887 			RTE_ETHDEV_LOG(ERR,
4888 				"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
4889 				port_id, qid, op, epfd, vec);
4890 		}
4891 	}
4892 
4893 	return 0;
4894 }
4895 
4896 int
4897 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4898 {
4899 	struct rte_intr_handle *intr_handle;
4900 	struct rte_eth_dev *dev;
4901 	unsigned int efd_idx;
4902 	uint32_t vec;
4903 	int fd;
4904 
4905 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4906 	dev = &rte_eth_devices[port_id];
4907 
4908 	if (queue_id >= dev->data->nb_rx_queues) {
4909 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4910 		return -1;
4911 	}
4912 
4913 	if (!dev->intr_handle) {
4914 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4915 		return -1;
4916 	}
4917 
4918 	intr_handle = dev->intr_handle;
4919 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4920 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4921 		return -1;
4922 	}
4923 
4924 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
4925 	efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4926 		(vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4927 	fd = rte_intr_efds_index_get(intr_handle, efd_idx);
4928 
4929 	return fd;
4930 }
4931 
4932 int
4933 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4934 			  int epfd, int op, void *data)
4935 {
4936 	uint32_t vec;
4937 	struct rte_eth_dev *dev;
4938 	struct rte_intr_handle *intr_handle;
4939 	int rc;
4940 
4941 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4942 	dev = &rte_eth_devices[port_id];
4943 
4944 	if (queue_id >= dev->data->nb_rx_queues) {
4945 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
4946 		return -EINVAL;
4947 	}
4948 
4949 	if (!dev->intr_handle) {
4950 		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
4951 		return -ENOTSUP;
4952 	}
4953 
4954 	intr_handle = dev->intr_handle;
4955 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
4956 		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
4957 		return -EPERM;
4958 	}
4959 
4960 	vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
4961 	rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4962 	if (rc && rc != -EEXIST) {
4963 		RTE_ETHDEV_LOG(ERR,
4964 			"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
4965 			port_id, queue_id, op, epfd, vec);
4966 		return rc;
4967 	}
4968 
4969 	return 0;
4970 }
4971 
4972 int
4973 rte_eth_dev_rx_intr_enable(uint16_t port_id,
4974 			   uint16_t queue_id)
4975 {
4976 	struct rte_eth_dev *dev;
4977 	int ret;
4978 
4979 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4980 	dev = &rte_eth_devices[port_id];
4981 
4982 	ret = eth_dev_validate_rx_queue(dev, queue_id);
4983 	if (ret != 0)
4984 		return ret;
4985 
4986 	if (*dev->dev_ops->rx_queue_intr_enable == NULL)
4987 		return -ENOTSUP;
4988 	return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id));
4989 }
4990 
4991 int
4992 rte_eth_dev_rx_intr_disable(uint16_t port_id,
4993 			    uint16_t queue_id)
4994 {
4995 	struct rte_eth_dev *dev;
4996 	int ret;
4997 
4998 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4999 	dev = &rte_eth_devices[port_id];
5000 
5001 	ret = eth_dev_validate_rx_queue(dev, queue_id);
5002 	if (ret != 0)
5003 		return ret;
5004 
5005 	if (*dev->dev_ops->rx_queue_intr_disable == NULL)
5006 		return -ENOTSUP;
5007 	return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id));
5008 }
5009 
5010 
5011 const struct rte_eth_rxtx_callback *
5012 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
5013 		rte_rx_callback_fn fn, void *user_param)
5014 {
5015 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5016 	rte_errno = ENOTSUP;
5017 	return NULL;
5018 #endif
5019 	struct rte_eth_dev *dev;
5020 
5021 	/* check input parameters */
5022 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5023 		    queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5024 		rte_errno = EINVAL;
5025 		return NULL;
5026 	}
5027 	dev = &rte_eth_devices[port_id];
5028 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5029 		rte_errno = EINVAL;
5030 		return NULL;
5031 	}
5032 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5033 
5034 	if (cb == NULL) {
5035 		rte_errno = ENOMEM;
5036 		return NULL;
5037 	}
5038 
5039 	cb->fn.rx = fn;
5040 	cb->param = user_param;
5041 
5042 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
5043 	/* Add the callbacks in fifo order. */
5044 	struct rte_eth_rxtx_callback *tail =
5045 		rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5046 
5047 	if (!tail) {
5048 		/* Stores to cb->fn and cb->param should complete before
5049 		 * cb is visible to data plane.
5050 		 */
5051 		__atomic_store_n(
5052 			&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5053 			cb, __ATOMIC_RELEASE);
5054 
5055 	} else {
5056 		while (tail->next)
5057 			tail = tail->next;
5058 		/* Stores to cb->fn and cb->param should complete before
5059 		 * cb is visible to data plane.
5060 		 */
5061 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5062 	}
5063 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5064 
5065 	return cb;
5066 }
5067 
5068 const struct rte_eth_rxtx_callback *
5069 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
5070 		rte_rx_callback_fn fn, void *user_param)
5071 {
5072 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5073 	rte_errno = ENOTSUP;
5074 	return NULL;
5075 #endif
5076 	/* check input parameters */
5077 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5078 		queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
5079 		rte_errno = EINVAL;
5080 		return NULL;
5081 	}
5082 
5083 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5084 
5085 	if (cb == NULL) {
5086 		rte_errno = ENOMEM;
5087 		return NULL;
5088 	}
5089 
5090 	cb->fn.rx = fn;
5091 	cb->param = user_param;
5092 
5093 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
5094 	/* Add the callbacks at first position */
5095 	cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
5096 	/* Stores to cb->fn, cb->param and cb->next should complete before
5097 	 * cb is visible to data plane threads.
5098 	 */
5099 	__atomic_store_n(
5100 		&rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
5101 		cb, __ATOMIC_RELEASE);
5102 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5103 
5104 	return cb;
5105 }
5106 
5107 const struct rte_eth_rxtx_callback *
5108 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
5109 		rte_tx_callback_fn fn, void *user_param)
5110 {
5111 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5112 	rte_errno = ENOTSUP;
5113 	return NULL;
5114 #endif
5115 	struct rte_eth_dev *dev;
5116 
5117 	/* check input parameters */
5118 	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
5119 		    queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
5120 		rte_errno = EINVAL;
5121 		return NULL;
5122 	}
5123 
5124 	dev = &rte_eth_devices[port_id];
5125 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5126 		rte_errno = EINVAL;
5127 		return NULL;
5128 	}
5129 
5130 	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
5131 
5132 	if (cb == NULL) {
5133 		rte_errno = ENOMEM;
5134 		return NULL;
5135 	}
5136 
5137 	cb->fn.tx = fn;
5138 	cb->param = user_param;
5139 
5140 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
5141 	/* Add the callbacks in fifo order. */
5142 	struct rte_eth_rxtx_callback *tail =
5143 		rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
5144 
5145 	if (!tail) {
5146 		/* Stores to cb->fn and cb->param should complete before
5147 		 * cb is visible to data plane.
5148 		 */
5149 		__atomic_store_n(
5150 			&rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
5151 			cb, __ATOMIC_RELEASE);
5152 
5153 	} else {
5154 		while (tail->next)
5155 			tail = tail->next;
5156 		/* Stores to cb->fn and cb->param should complete before
5157 		 * cb is visible to data plane.
5158 		 */
5159 		__atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
5160 	}
5161 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5162 
5163 	return cb;
5164 }
5165 
5166 int
5167 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
5168 		const struct rte_eth_rxtx_callback *user_cb)
5169 {
5170 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5171 	return -ENOTSUP;
5172 #endif
5173 	/* Check input parameters. */
5174 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5175 	if (user_cb == NULL ||
5176 			queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
5177 		return -EINVAL;
5178 
5179 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5180 	struct rte_eth_rxtx_callback *cb;
5181 	struct rte_eth_rxtx_callback **prev_cb;
5182 	int ret = -EINVAL;
5183 
5184 	rte_spinlock_lock(&eth_dev_rx_cb_lock);
5185 	prev_cb = &dev->post_rx_burst_cbs[queue_id];
5186 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
5187 		cb = *prev_cb;
5188 		if (cb == user_cb) {
5189 			/* Remove the user cb from the callback list. */
5190 			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5191 			ret = 0;
5192 			break;
5193 		}
5194 	}
5195 	rte_spinlock_unlock(&eth_dev_rx_cb_lock);
5196 
5197 	return ret;
5198 }
5199 
5200 int
5201 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5202 		const struct rte_eth_rxtx_callback *user_cb)
5203 {
5204 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
5205 	return -ENOTSUP;
5206 #endif
5207 	/* Check input parameters. */
5208 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5209 	if (user_cb == NULL ||
5210 			queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
5211 		return -EINVAL;
5212 
5213 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
5214 	int ret = -EINVAL;
5215 	struct rte_eth_rxtx_callback *cb;
5216 	struct rte_eth_rxtx_callback **prev_cb;
5217 
5218 	rte_spinlock_lock(&eth_dev_tx_cb_lock);
5219 	prev_cb = &dev->pre_tx_burst_cbs[queue_id];
5220 	for (; *prev_cb != NULL; prev_cb = &cb->next) {
5221 		cb = *prev_cb;
5222 		if (cb == user_cb) {
5223 			/* Remove the user cb from the callback list. */
5224 			__atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
5225 			ret = 0;
5226 			break;
5227 		}
5228 	}
5229 	rte_spinlock_unlock(&eth_dev_tx_cb_lock);
5230 
5231 	return ret;
5232 }
5233 
5234 int
5235 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5236 	struct rte_eth_rxq_info *qinfo)
5237 {
5238 	struct rte_eth_dev *dev;
5239 
5240 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5241 	dev = &rte_eth_devices[port_id];
5242 
5243 	if (queue_id >= dev->data->nb_rx_queues) {
5244 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5245 		return -EINVAL;
5246 	}
5247 
5248 	if (qinfo == NULL) {
5249 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
5250 			port_id, queue_id);
5251 		return -EINVAL;
5252 	}
5253 
5254 	if (dev->data->rx_queues == NULL ||
5255 			dev->data->rx_queues[queue_id] == NULL) {
5256 		RTE_ETHDEV_LOG(ERR,
5257 			       "Rx queue %"PRIu16" of device with port_id=%"
5258 			       PRIu16" has not been setup\n",
5259 			       queue_id, port_id);
5260 		return -EINVAL;
5261 	}
5262 
5263 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
5264 		RTE_ETHDEV_LOG(INFO,
5265 			"Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5266 			queue_id, port_id);
5267 		return -EINVAL;
5268 	}
5269 
5270 	if (*dev->dev_ops->rxq_info_get == NULL)
5271 		return -ENOTSUP;
5272 
5273 	memset(qinfo, 0, sizeof(*qinfo));
5274 	dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
5275 	qinfo->queue_state = dev->data->rx_queue_state[queue_id];
5276 
5277 	return 0;
5278 }
5279 
5280 int
5281 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5282 	struct rte_eth_txq_info *qinfo)
5283 {
5284 	struct rte_eth_dev *dev;
5285 
5286 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5287 	dev = &rte_eth_devices[port_id];
5288 
5289 	if (queue_id >= dev->data->nb_tx_queues) {
5290 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5291 		return -EINVAL;
5292 	}
5293 
5294 	if (qinfo == NULL) {
5295 		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
5296 			port_id, queue_id);
5297 		return -EINVAL;
5298 	}
5299 
5300 	if (dev->data->tx_queues == NULL ||
5301 			dev->data->tx_queues[queue_id] == NULL) {
5302 		RTE_ETHDEV_LOG(ERR,
5303 			       "Tx queue %"PRIu16" of device with port_id=%"
5304 			       PRIu16" has not been setup\n",
5305 			       queue_id, port_id);
5306 		return -EINVAL;
5307 	}
5308 
5309 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
5310 		RTE_ETHDEV_LOG(INFO,
5311 			"Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
5312 			queue_id, port_id);
5313 		return -EINVAL;
5314 	}
5315 
5316 	if (*dev->dev_ops->txq_info_get == NULL)
5317 		return -ENOTSUP;
5318 
5319 	memset(qinfo, 0, sizeof(*qinfo));
5320 	dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
5321 	qinfo->queue_state = dev->data->tx_queue_state[queue_id];
5322 
5323 	return 0;
5324 }
5325 
5326 int
5327 rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5328 			  struct rte_eth_burst_mode *mode)
5329 {
5330 	struct rte_eth_dev *dev;
5331 
5332 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5333 	dev = &rte_eth_devices[port_id];
5334 
5335 	if (queue_id >= dev->data->nb_rx_queues) {
5336 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5337 		return -EINVAL;
5338 	}
5339 
5340 	if (mode == NULL) {
5341 		RTE_ETHDEV_LOG(ERR,
5342 			"Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
5343 			port_id, queue_id);
5344 		return -EINVAL;
5345 	}
5346 
5347 	if (*dev->dev_ops->rx_burst_mode_get == NULL)
5348 		return -ENOTSUP;
5349 	memset(mode, 0, sizeof(*mode));
5350 	return eth_err(port_id,
5351 		       dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
5352 }
5353 
5354 int
5355 rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5356 			  struct rte_eth_burst_mode *mode)
5357 {
5358 	struct rte_eth_dev *dev;
5359 
5360 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5361 	dev = &rte_eth_devices[port_id];
5362 
5363 	if (queue_id >= dev->data->nb_tx_queues) {
5364 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
5365 		return -EINVAL;
5366 	}
5367 
5368 	if (mode == NULL) {
5369 		RTE_ETHDEV_LOG(ERR,
5370 			"Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
5371 			port_id, queue_id);
5372 		return -EINVAL;
5373 	}
5374 
5375 	if (*dev->dev_ops->tx_burst_mode_get == NULL)
5376 		return -ENOTSUP;
5377 	memset(mode, 0, sizeof(*mode));
5378 	return eth_err(port_id,
5379 		       dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
5380 }
5381 
5382 int
5383 rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5384 		struct rte_power_monitor_cond *pmc)
5385 {
5386 	struct rte_eth_dev *dev;
5387 
5388 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5389 	dev = &rte_eth_devices[port_id];
5390 
5391 	if (queue_id >= dev->data->nb_rx_queues) {
5392 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
5393 		return -EINVAL;
5394 	}
5395 
5396 	if (pmc == NULL) {
5397 		RTE_ETHDEV_LOG(ERR,
5398 			"Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
5399 			port_id, queue_id);
5400 		return -EINVAL;
5401 	}
5402 
5403 	if (*dev->dev_ops->get_monitor_addr == NULL)
5404 		return -ENOTSUP;
5405 	return eth_err(port_id,
5406 		dev->dev_ops->get_monitor_addr(dev->data->rx_queues[queue_id], pmc));
5407 }
5408 
5409 int
5410 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5411 			     struct rte_ether_addr *mc_addr_set,
5412 			     uint32_t nb_mc_addr)
5413 {
5414 	struct rte_eth_dev *dev;
5415 
5416 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5417 	dev = &rte_eth_devices[port_id];
5418 
5419 	if (*dev->dev_ops->set_mc_addr_list == NULL)
5420 		return -ENOTSUP;
5421 	return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
5422 						mc_addr_set, nb_mc_addr));
5423 }
5424 
5425 int
5426 rte_eth_timesync_enable(uint16_t port_id)
5427 {
5428 	struct rte_eth_dev *dev;
5429 
5430 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5431 	dev = &rte_eth_devices[port_id];
5432 
5433 	if (*dev->dev_ops->timesync_enable == NULL)
5434 		return -ENOTSUP;
5435 	return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
5436 }
5437 
5438 int
5439 rte_eth_timesync_disable(uint16_t port_id)
5440 {
5441 	struct rte_eth_dev *dev;
5442 
5443 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5444 	dev = &rte_eth_devices[port_id];
5445 
5446 	if (*dev->dev_ops->timesync_disable == NULL)
5447 		return -ENOTSUP;
5448 	return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
5449 }
5450 
5451 int
5452 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
5453 				   uint32_t flags)
5454 {
5455 	struct rte_eth_dev *dev;
5456 
5457 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5458 	dev = &rte_eth_devices[port_id];
5459 
5460 	if (timestamp == NULL) {
5461 		RTE_ETHDEV_LOG(ERR,
5462 			"Cannot read ethdev port %u Rx timestamp to NULL\n",
5463 			port_id);
5464 		return -EINVAL;
5465 	}
5466 
5467 	if (*dev->dev_ops->timesync_read_rx_timestamp == NULL)
5468 		return -ENOTSUP;
5469 	return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
5470 				(dev, timestamp, flags));
5471 }
5472 
5473 int
5474 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5475 				   struct timespec *timestamp)
5476 {
5477 	struct rte_eth_dev *dev;
5478 
5479 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5480 	dev = &rte_eth_devices[port_id];
5481 
5482 	if (timestamp == NULL) {
5483 		RTE_ETHDEV_LOG(ERR,
5484 			"Cannot read ethdev port %u Tx timestamp to NULL\n",
5485 			port_id);
5486 		return -EINVAL;
5487 	}
5488 
5489 	if (*dev->dev_ops->timesync_read_tx_timestamp == NULL)
5490 		return -ENOTSUP;
5491 	return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
5492 				(dev, timestamp));
5493 }
5494 
5495 int
5496 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
5497 {
5498 	struct rte_eth_dev *dev;
5499 
5500 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5501 	dev = &rte_eth_devices[port_id];
5502 
5503 	if (*dev->dev_ops->timesync_adjust_time == NULL)
5504 		return -ENOTSUP;
5505 	return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev, delta));
5506 }
5507 
5508 int
5509 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
5510 {
5511 	struct rte_eth_dev *dev;
5512 
5513 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5514 	dev = &rte_eth_devices[port_id];
5515 
5516 	if (timestamp == NULL) {
5517 		RTE_ETHDEV_LOG(ERR,
5518 			"Cannot read ethdev port %u timesync time to NULL\n",
5519 			port_id);
5520 		return -EINVAL;
5521 	}
5522 
5523 	if (*dev->dev_ops->timesync_read_time == NULL)
5524 		return -ENOTSUP;
5525 	return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
5526 								timestamp));
5527 }
5528 
5529 int
5530 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
5531 {
5532 	struct rte_eth_dev *dev;
5533 
5534 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5535 	dev = &rte_eth_devices[port_id];
5536 
5537 	if (timestamp == NULL) {
5538 		RTE_ETHDEV_LOG(ERR,
5539 			"Cannot write ethdev port %u timesync from NULL time\n",
5540 			port_id);
5541 		return -EINVAL;
5542 	}
5543 
5544 	if (*dev->dev_ops->timesync_write_time == NULL)
5545 		return -ENOTSUP;
5546 	return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
5547 								timestamp));
5548 }
5549 
5550 int
5551 rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
5552 {
5553 	struct rte_eth_dev *dev;
5554 
5555 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5556 	dev = &rte_eth_devices[port_id];
5557 
5558 	if (clock == NULL) {
5559 		RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
5560 			port_id);
5561 		return -EINVAL;
5562 	}
5563 
5564 	if (*dev->dev_ops->read_clock == NULL)
5565 		return -ENOTSUP;
5566 	return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
5567 }
5568 
5569 int
5570 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5571 {
5572 	struct rte_eth_dev *dev;
5573 
5574 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5575 	dev = &rte_eth_devices[port_id];
5576 
5577 	if (info == NULL) {
5578 		RTE_ETHDEV_LOG(ERR,
5579 			"Cannot get ethdev port %u register info to NULL\n",
5580 			port_id);
5581 		return -EINVAL;
5582 	}
5583 
5584 	if (*dev->dev_ops->get_reg == NULL)
5585 		return -ENOTSUP;
5586 	return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
5587 }
5588 
5589 int
5590 rte_eth_dev_get_eeprom_length(uint16_t port_id)
5591 {
5592 	struct rte_eth_dev *dev;
5593 
5594 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5595 	dev = &rte_eth_devices[port_id];
5596 
5597 	if (*dev->dev_ops->get_eeprom_length == NULL)
5598 		return -ENOTSUP;
5599 	return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
5600 }
5601 
5602 int
5603 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5604 {
5605 	struct rte_eth_dev *dev;
5606 
5607 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5608 	dev = &rte_eth_devices[port_id];
5609 
5610 	if (info == NULL) {
5611 		RTE_ETHDEV_LOG(ERR,
5612 			"Cannot get ethdev port %u EEPROM info to NULL\n",
5613 			port_id);
5614 		return -EINVAL;
5615 	}
5616 
5617 	if (*dev->dev_ops->get_eeprom == NULL)
5618 		return -ENOTSUP;
5619 	return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
5620 }
5621 
5622 int
5623 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5624 {
5625 	struct rte_eth_dev *dev;
5626 
5627 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5628 	dev = &rte_eth_devices[port_id];
5629 
5630 	if (info == NULL) {
5631 		RTE_ETHDEV_LOG(ERR,
5632 			"Cannot set ethdev port %u EEPROM from NULL info\n",
5633 			port_id);
5634 		return -EINVAL;
5635 	}
5636 
5637 	if (*dev->dev_ops->set_eeprom == NULL)
5638 		return -ENOTSUP;
5639 	return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
5640 }
5641 
5642 int
5643 rte_eth_dev_get_module_info(uint16_t port_id,
5644 			    struct rte_eth_dev_module_info *modinfo)
5645 {
5646 	struct rte_eth_dev *dev;
5647 
5648 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5649 	dev = &rte_eth_devices[port_id];
5650 
5651 	if (modinfo == NULL) {
5652 		RTE_ETHDEV_LOG(ERR,
5653 			"Cannot get ethdev port %u EEPROM module info to NULL\n",
5654 			port_id);
5655 		return -EINVAL;
5656 	}
5657 
5658 	if (*dev->dev_ops->get_module_info == NULL)
5659 		return -ENOTSUP;
5660 	return (*dev->dev_ops->get_module_info)(dev, modinfo);
5661 }
5662 
5663 int
5664 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5665 			      struct rte_dev_eeprom_info *info)
5666 {
5667 	struct rte_eth_dev *dev;
5668 
5669 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5670 	dev = &rte_eth_devices[port_id];
5671 
5672 	if (info == NULL) {
5673 		RTE_ETHDEV_LOG(ERR,
5674 			"Cannot get ethdev port %u module EEPROM info to NULL\n",
5675 			port_id);
5676 		return -EINVAL;
5677 	}
5678 
5679 	if (info->data == NULL) {
5680 		RTE_ETHDEV_LOG(ERR,
5681 			"Cannot get ethdev port %u module EEPROM data to NULL\n",
5682 			port_id);
5683 		return -EINVAL;
5684 	}
5685 
5686 	if (info->length == 0) {
5687 		RTE_ETHDEV_LOG(ERR,
5688 			"Cannot get ethdev port %u module EEPROM to data with zero size\n",
5689 			port_id);
5690 		return -EINVAL;
5691 	}
5692 
5693 	if (*dev->dev_ops->get_module_eeprom == NULL)
5694 		return -ENOTSUP;
5695 	return (*dev->dev_ops->get_module_eeprom)(dev, info);
5696 }
5697 
5698 int
5699 rte_eth_dev_get_dcb_info(uint16_t port_id,
5700 			     struct rte_eth_dcb_info *dcb_info)
5701 {
5702 	struct rte_eth_dev *dev;
5703 
5704 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5705 	dev = &rte_eth_devices[port_id];
5706 
5707 	if (dcb_info == NULL) {
5708 		RTE_ETHDEV_LOG(ERR,
5709 			"Cannot get ethdev port %u DCB info to NULL\n",
5710 			port_id);
5711 		return -EINVAL;
5712 	}
5713 
5714 	memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
5715 
5716 	if (*dev->dev_ops->get_dcb_info == NULL)
5717 		return -ENOTSUP;
5718 	return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
5719 }
5720 
5721 static void
5722 eth_dev_adjust_nb_desc(uint16_t *nb_desc,
5723 		const struct rte_eth_desc_lim *desc_lim)
5724 {
5725 	if (desc_lim->nb_align != 0)
5726 		*nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
5727 
5728 	if (desc_lim->nb_max != 0)
5729 		*nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
5730 
5731 	*nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
5732 }
5733 
5734 int
5735 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5736 				 uint16_t *nb_rx_desc,
5737 				 uint16_t *nb_tx_desc)
5738 {
5739 	struct rte_eth_dev_info dev_info;
5740 	int ret;
5741 
5742 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5743 
5744 	ret = rte_eth_dev_info_get(port_id, &dev_info);
5745 	if (ret != 0)
5746 		return ret;
5747 
5748 	if (nb_rx_desc != NULL)
5749 		eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
5750 
5751 	if (nb_tx_desc != NULL)
5752 		eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5753 
5754 	return 0;
5755 }
5756 
5757 int
5758 rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5759 				   struct rte_eth_hairpin_cap *cap)
5760 {
5761 	struct rte_eth_dev *dev;
5762 
5763 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5764 	dev = &rte_eth_devices[port_id];
5765 
5766 	if (cap == NULL) {
5767 		RTE_ETHDEV_LOG(ERR,
5768 			"Cannot get ethdev port %u hairpin capability to NULL\n",
5769 			port_id);
5770 		return -EINVAL;
5771 	}
5772 
5773 	if (*dev->dev_ops->hairpin_cap_get == NULL)
5774 		return -ENOTSUP;
5775 	memset(cap, 0, sizeof(*cap));
5776 	return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5777 }
5778 
5779 int
5780 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5781 {
5782 	struct rte_eth_dev *dev;
5783 
5784 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5785 	dev = &rte_eth_devices[port_id];
5786 
5787 	if (pool == NULL) {
5788 		RTE_ETHDEV_LOG(ERR,
5789 			"Cannot test ethdev port %u mempool operation from NULL pool\n",
5790 			port_id);
5791 		return -EINVAL;
5792 	}
5793 
5794 	if (*dev->dev_ops->pool_ops_supported == NULL)
5795 		return 1; /* all pools are supported */
5796 
5797 	return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5798 }
5799 
5800 static int
5801 eth_dev_handle_port_list(const char *cmd __rte_unused,
5802 		const char *params __rte_unused,
5803 		struct rte_tel_data *d)
5804 {
5805 	int port_id;
5806 
5807 	rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5808 	RTE_ETH_FOREACH_DEV(port_id)
5809 		rte_tel_data_add_array_int(d, port_id);
5810 	return 0;
5811 }
5812 
5813 static void
5814 eth_dev_add_port_queue_stats(struct rte_tel_data *d, uint64_t *q_stats,
5815 		const char *stat_name)
5816 {
5817 	int q;
5818 	struct rte_tel_data *q_data = rte_tel_data_alloc();
5819 	if (q_data == NULL)
5820 		return;
5821 	rte_tel_data_start_array(q_data, RTE_TEL_U64_VAL);
5822 	for (q = 0; q < RTE_ETHDEV_QUEUE_STAT_CNTRS; q++)
5823 		rte_tel_data_add_array_u64(q_data, q_stats[q]);
5824 	rte_tel_data_add_dict_container(d, stat_name, q_data, 0);
5825 }
5826 
5827 #define ADD_DICT_STAT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
5828 
5829 static int
5830 eth_dev_handle_port_stats(const char *cmd __rte_unused,
5831 		const char *params,
5832 		struct rte_tel_data *d)
5833 {
5834 	struct rte_eth_stats stats;
5835 	int port_id, ret;
5836 
5837 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5838 		return -1;
5839 
5840 	port_id = atoi(params);
5841 	if (!rte_eth_dev_is_valid_port(port_id))
5842 		return -1;
5843 
5844 	ret = rte_eth_stats_get(port_id, &stats);
5845 	if (ret < 0)
5846 		return -1;
5847 
5848 	rte_tel_data_start_dict(d);
5849 	ADD_DICT_STAT(stats, ipackets);
5850 	ADD_DICT_STAT(stats, opackets);
5851 	ADD_DICT_STAT(stats, ibytes);
5852 	ADD_DICT_STAT(stats, obytes);
5853 	ADD_DICT_STAT(stats, imissed);
5854 	ADD_DICT_STAT(stats, ierrors);
5855 	ADD_DICT_STAT(stats, oerrors);
5856 	ADD_DICT_STAT(stats, rx_nombuf);
5857 	eth_dev_add_port_queue_stats(d, stats.q_ipackets, "q_ipackets");
5858 	eth_dev_add_port_queue_stats(d, stats.q_opackets, "q_opackets");
5859 	eth_dev_add_port_queue_stats(d, stats.q_ibytes, "q_ibytes");
5860 	eth_dev_add_port_queue_stats(d, stats.q_obytes, "q_obytes");
5861 	eth_dev_add_port_queue_stats(d, stats.q_errors, "q_errors");
5862 
5863 	return 0;
5864 }
5865 
5866 static int
5867 eth_dev_handle_port_xstats(const char *cmd __rte_unused,
5868 		const char *params,
5869 		struct rte_tel_data *d)
5870 {
5871 	struct rte_eth_xstat *eth_xstats;
5872 	struct rte_eth_xstat_name *xstat_names;
5873 	int port_id, num_xstats;
5874 	int i, ret;
5875 	char *end_param;
5876 
5877 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5878 		return -1;
5879 
5880 	port_id = strtoul(params, &end_param, 0);
5881 	if (*end_param != '\0')
5882 		RTE_ETHDEV_LOG(NOTICE,
5883 			"Extra parameters passed to ethdev telemetry command, ignoring");
5884 	if (!rte_eth_dev_is_valid_port(port_id))
5885 		return -1;
5886 
5887 	num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5888 	if (num_xstats < 0)
5889 		return -1;
5890 
5891 	/* use one malloc for both names and stats */
5892 	eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5893 			sizeof(struct rte_eth_xstat_name)) * num_xstats);
5894 	if (eth_xstats == NULL)
5895 		return -1;
5896 	xstat_names = (void *)&eth_xstats[num_xstats];
5897 
5898 	ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5899 	if (ret < 0 || ret > num_xstats) {
5900 		free(eth_xstats);
5901 		return -1;
5902 	}
5903 
5904 	ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5905 	if (ret < 0 || ret > num_xstats) {
5906 		free(eth_xstats);
5907 		return -1;
5908 	}
5909 
5910 	rte_tel_data_start_dict(d);
5911 	for (i = 0; i < num_xstats; i++)
5912 		rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5913 				eth_xstats[i].value);
5914 	free(eth_xstats);
5915 	return 0;
5916 }
5917 
5918 #ifndef RTE_EXEC_ENV_WINDOWS
5919 static int
5920 eth_dev_handle_port_dump_priv(const char *cmd __rte_unused,
5921 			const char *params,
5922 			struct rte_tel_data *d)
5923 {
5924 	char *buf, *end_param;
5925 	int port_id, ret;
5926 	FILE *f;
5927 
5928 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5929 		return -EINVAL;
5930 
5931 	port_id = strtoul(params, &end_param, 0);
5932 	if (*end_param != '\0')
5933 		RTE_ETHDEV_LOG(NOTICE,
5934 			"Extra parameters passed to ethdev telemetry command, ignoring");
5935 	if (!rte_eth_dev_is_valid_port(port_id))
5936 		return -EINVAL;
5937 
5938 	buf = calloc(sizeof(char), RTE_TEL_MAX_SINGLE_STRING_LEN);
5939 	if (buf == NULL)
5940 		return -ENOMEM;
5941 
5942 	f = fmemopen(buf, RTE_TEL_MAX_SINGLE_STRING_LEN - 1, "w+");
5943 	if (f == NULL) {
5944 		free(buf);
5945 		return -EINVAL;
5946 	}
5947 
5948 	ret = rte_eth_dev_priv_dump(port_id, f);
5949 	fclose(f);
5950 	if (ret == 0) {
5951 		rte_tel_data_start_dict(d);
5952 		rte_tel_data_string(d, buf);
5953 	}
5954 
5955 	free(buf);
5956 	return 0;
5957 }
5958 #endif /* !RTE_EXEC_ENV_WINDOWS */
5959 
5960 static int
5961 eth_dev_handle_port_link_status(const char *cmd __rte_unused,
5962 		const char *params,
5963 		struct rte_tel_data *d)
5964 {
5965 	static const char *status_str = "status";
5966 	int ret, port_id;
5967 	struct rte_eth_link link;
5968 	char *end_param;
5969 
5970 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5971 		return -1;
5972 
5973 	port_id = strtoul(params, &end_param, 0);
5974 	if (*end_param != '\0')
5975 		RTE_ETHDEV_LOG(NOTICE,
5976 			"Extra parameters passed to ethdev telemetry command, ignoring");
5977 	if (!rte_eth_dev_is_valid_port(port_id))
5978 		return -1;
5979 
5980 	ret = rte_eth_link_get_nowait(port_id, &link);
5981 	if (ret < 0)
5982 		return -1;
5983 
5984 	rte_tel_data_start_dict(d);
5985 	if (!link.link_status) {
5986 		rte_tel_data_add_dict_string(d, status_str, "DOWN");
5987 		return 0;
5988 	}
5989 	rte_tel_data_add_dict_string(d, status_str, "UP");
5990 	rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5991 	rte_tel_data_add_dict_string(d, "duplex",
5992 			(link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
5993 				"full-duplex" : "half-duplex");
5994 	return 0;
5995 }
5996 
5997 static int
5998 eth_dev_handle_port_info(const char *cmd __rte_unused,
5999 		const char *params,
6000 		struct rte_tel_data *d)
6001 {
6002 	struct rte_tel_data *rxq_state, *txq_state;
6003 	char mac_addr[RTE_ETHER_ADDR_FMT_SIZE];
6004 	struct rte_eth_dev *eth_dev;
6005 	char *end_param;
6006 	int port_id, i;
6007 
6008 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
6009 		return -1;
6010 
6011 	port_id = strtoul(params, &end_param, 0);
6012 	if (*end_param != '\0')
6013 		RTE_ETHDEV_LOG(NOTICE,
6014 			"Extra parameters passed to ethdev telemetry command, ignoring");
6015 
6016 	if (!rte_eth_dev_is_valid_port(port_id))
6017 		return -EINVAL;
6018 
6019 	eth_dev = &rte_eth_devices[port_id];
6020 
6021 	rxq_state = rte_tel_data_alloc();
6022 	if (!rxq_state)
6023 		return -ENOMEM;
6024 
6025 	txq_state = rte_tel_data_alloc();
6026 	if (!txq_state) {
6027 		rte_tel_data_free(rxq_state);
6028 		return -ENOMEM;
6029 	}
6030 
6031 	rte_tel_data_start_dict(d);
6032 	rte_tel_data_add_dict_string(d, "name", eth_dev->data->name);
6033 	rte_tel_data_add_dict_int(d, "state", eth_dev->state);
6034 	rte_tel_data_add_dict_int(d, "nb_rx_queues",
6035 			eth_dev->data->nb_rx_queues);
6036 	rte_tel_data_add_dict_int(d, "nb_tx_queues",
6037 			eth_dev->data->nb_tx_queues);
6038 	rte_tel_data_add_dict_int(d, "port_id", eth_dev->data->port_id);
6039 	rte_tel_data_add_dict_int(d, "mtu", eth_dev->data->mtu);
6040 	rte_tel_data_add_dict_int(d, "rx_mbuf_size_min",
6041 			eth_dev->data->min_rx_buf_size);
6042 	rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail",
6043 			eth_dev->data->rx_mbuf_alloc_failed);
6044 	rte_ether_format_addr(mac_addr, sizeof(mac_addr),
6045 			eth_dev->data->mac_addrs);
6046 	rte_tel_data_add_dict_string(d, "mac_addr", mac_addr);
6047 	rte_tel_data_add_dict_int(d, "promiscuous",
6048 			eth_dev->data->promiscuous);
6049 	rte_tel_data_add_dict_int(d, "scattered_rx",
6050 			eth_dev->data->scattered_rx);
6051 	rte_tel_data_add_dict_int(d, "all_multicast",
6052 			eth_dev->data->all_multicast);
6053 	rte_tel_data_add_dict_int(d, "dev_started", eth_dev->data->dev_started);
6054 	rte_tel_data_add_dict_int(d, "lro", eth_dev->data->lro);
6055 	rte_tel_data_add_dict_int(d, "dev_configured",
6056 			eth_dev->data->dev_configured);
6057 
6058 	rte_tel_data_start_array(rxq_state, RTE_TEL_INT_VAL);
6059 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
6060 		rte_tel_data_add_array_int(rxq_state,
6061 				eth_dev->data->rx_queue_state[i]);
6062 
6063 	rte_tel_data_start_array(txq_state, RTE_TEL_INT_VAL);
6064 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
6065 		rte_tel_data_add_array_int(txq_state,
6066 				eth_dev->data->tx_queue_state[i]);
6067 
6068 	rte_tel_data_add_dict_container(d, "rxq_state", rxq_state, 0);
6069 	rte_tel_data_add_dict_container(d, "txq_state", txq_state, 0);
6070 	rte_tel_data_add_dict_int(d, "numa_node", eth_dev->data->numa_node);
6071 	rte_tel_data_add_dict_int(d, "dev_flags", eth_dev->data->dev_flags);
6072 	rte_tel_data_add_dict_int(d, "rx_offloads",
6073 			eth_dev->data->dev_conf.rxmode.offloads);
6074 	rte_tel_data_add_dict_int(d, "tx_offloads",
6075 			eth_dev->data->dev_conf.txmode.offloads);
6076 	rte_tel_data_add_dict_int(d, "ethdev_rss_hf",
6077 			eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
6078 
6079 	return 0;
6080 }
6081 
6082 int
6083 rte_eth_representor_info_get(uint16_t port_id,
6084 			     struct rte_eth_representor_info *info)
6085 {
6086 	struct rte_eth_dev *dev;
6087 
6088 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6089 	dev = &rte_eth_devices[port_id];
6090 
6091 	if (*dev->dev_ops->representor_info_get == NULL)
6092 		return -ENOTSUP;
6093 	return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
6094 }
6095 
6096 int
6097 rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
6098 {
6099 	struct rte_eth_dev *dev;
6100 
6101 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6102 	dev = &rte_eth_devices[port_id];
6103 
6104 	if (dev->data->dev_configured != 0) {
6105 		RTE_ETHDEV_LOG(ERR,
6106 			"The port (ID=%"PRIu16") is already configured\n",
6107 			port_id);
6108 		return -EBUSY;
6109 	}
6110 
6111 	if (features == NULL) {
6112 		RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n");
6113 		return -EINVAL;
6114 	}
6115 
6116 	if (*dev->dev_ops->rx_metadata_negotiate == NULL)
6117 		return -ENOTSUP;
6118 	return eth_err(port_id,
6119 		       (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
6120 }
6121 
6122 int
6123 rte_eth_ip_reassembly_capability_get(uint16_t port_id,
6124 		struct rte_eth_ip_reassembly_params *reassembly_capa)
6125 {
6126 	struct rte_eth_dev *dev;
6127 
6128 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6129 	dev = &rte_eth_devices[port_id];
6130 
6131 	if (dev->data->dev_configured == 0) {
6132 		RTE_ETHDEV_LOG(ERR,
6133 			"Device with port_id=%u is not configured.\n"
6134 			"Cannot get IP reassembly capability\n",
6135 			port_id);
6136 		return -EINVAL;
6137 	}
6138 
6139 	if (reassembly_capa == NULL) {
6140 		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL");
6141 		return -EINVAL;
6142 	}
6143 
6144 	if (*dev->dev_ops->ip_reassembly_capability_get == NULL)
6145 		return -ENOTSUP;
6146 	memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params));
6147 
6148 	return eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get)
6149 					(dev, reassembly_capa));
6150 }
6151 
6152 int
6153 rte_eth_ip_reassembly_conf_get(uint16_t port_id,
6154 		struct rte_eth_ip_reassembly_params *conf)
6155 {
6156 	struct rte_eth_dev *dev;
6157 
6158 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6159 	dev = &rte_eth_devices[port_id];
6160 
6161 	if (dev->data->dev_configured == 0) {
6162 		RTE_ETHDEV_LOG(ERR,
6163 			"Device with port_id=%u is not configured.\n"
6164 			"Cannot get IP reassembly configuration\n",
6165 			port_id);
6166 		return -EINVAL;
6167 	}
6168 
6169 	if (conf == NULL) {
6170 		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL");
6171 		return -EINVAL;
6172 	}
6173 
6174 	if (*dev->dev_ops->ip_reassembly_conf_get == NULL)
6175 		return -ENOTSUP;
6176 	memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params));
6177 	return eth_err(port_id,
6178 		       (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf));
6179 }
6180 
6181 int
6182 rte_eth_ip_reassembly_conf_set(uint16_t port_id,
6183 		const struct rte_eth_ip_reassembly_params *conf)
6184 {
6185 	struct rte_eth_dev *dev;
6186 
6187 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6188 	dev = &rte_eth_devices[port_id];
6189 
6190 	if (dev->data->dev_configured == 0) {
6191 		RTE_ETHDEV_LOG(ERR,
6192 			"Device with port_id=%u is not configured.\n"
6193 			"Cannot set IP reassembly configuration",
6194 			port_id);
6195 		return -EINVAL;
6196 	}
6197 
6198 	if (dev->data->dev_started != 0) {
6199 		RTE_ETHDEV_LOG(ERR,
6200 			"Device with port_id=%u started,\n"
6201 			"cannot configure IP reassembly params.\n",
6202 			port_id);
6203 		return -EINVAL;
6204 	}
6205 
6206 	if (conf == NULL) {
6207 		RTE_ETHDEV_LOG(ERR,
6208 				"Invalid IP reassembly configuration (NULL)\n");
6209 		return -EINVAL;
6210 	}
6211 
6212 	if (*dev->dev_ops->ip_reassembly_conf_set == NULL)
6213 		return -ENOTSUP;
6214 	return eth_err(port_id,
6215 		       (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf));
6216 }
6217 
6218 int
6219 rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
6220 {
6221 	struct rte_eth_dev *dev;
6222 
6223 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6224 	dev = &rte_eth_devices[port_id];
6225 
6226 	if (file == NULL) {
6227 		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
6228 		return -EINVAL;
6229 	}
6230 
6231 	if (*dev->dev_ops->eth_dev_priv_dump == NULL)
6232 		return -ENOTSUP;
6233 	return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file));
6234 }
6235 
6236 int
6237 rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
6238 			   uint16_t offset, uint16_t num, FILE *file)
6239 {
6240 	struct rte_eth_dev *dev;
6241 
6242 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6243 	dev = &rte_eth_devices[port_id];
6244 
6245 	if (queue_id >= dev->data->nb_rx_queues) {
6246 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
6247 		return -EINVAL;
6248 	}
6249 
6250 	if (file == NULL) {
6251 		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
6252 		return -EINVAL;
6253 	}
6254 
6255 	if (*dev->dev_ops->eth_rx_descriptor_dump == NULL)
6256 		return -ENOTSUP;
6257 
6258 	return eth_err(port_id, (*dev->dev_ops->eth_rx_descriptor_dump)(dev,
6259 						queue_id, offset, num, file));
6260 }
6261 
6262 int
6263 rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
6264 			   uint16_t offset, uint16_t num, FILE *file)
6265 {
6266 	struct rte_eth_dev *dev;
6267 
6268 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6269 	dev = &rte_eth_devices[port_id];
6270 
6271 	if (queue_id >= dev->data->nb_tx_queues) {
6272 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
6273 		return -EINVAL;
6274 	}
6275 
6276 	if (file == NULL) {
6277 		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
6278 		return -EINVAL;
6279 	}
6280 
6281 	if (*dev->dev_ops->eth_tx_descriptor_dump == NULL)
6282 		return -ENOTSUP;
6283 
6284 	return eth_err(port_id, (*dev->dev_ops->eth_tx_descriptor_dump)(dev,
6285 						queue_id, offset, num, file));
6286 }
6287 
6288 int
6289 rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
6290 {
6291 	int i, j;
6292 	struct rte_eth_dev *dev;
6293 	const uint32_t *all_types;
6294 
6295 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6296 	dev = &rte_eth_devices[port_id];
6297 
6298 	if (ptypes == NULL && num > 0) {
6299 		RTE_ETHDEV_LOG(ERR,
6300 			"Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero\n",
6301 			port_id);
6302 		return -EINVAL;
6303 	}
6304 
6305 	if (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get == NULL)
6306 		return -ENOTSUP;
6307 	all_types = (*dev->dev_ops->buffer_split_supported_hdr_ptypes_get)(dev);
6308 
6309 	if (all_types == NULL)
6310 		return 0;
6311 
6312 	for (i = 0, j = 0; all_types[i] != RTE_PTYPE_UNKNOWN; ++i) {
6313 		if (j < num)
6314 			ptypes[j] = all_types[i];
6315 		j++;
6316 	}
6317 
6318 	return j;
6319 }
6320 
6321 RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);
6322 
6323 RTE_INIT(ethdev_init_telemetry)
6324 {
6325 	rte_telemetry_register_cmd("/ethdev/list", eth_dev_handle_port_list,
6326 			"Returns list of available ethdev ports. Takes no parameters");
6327 	rte_telemetry_register_cmd("/ethdev/stats", eth_dev_handle_port_stats,
6328 			"Returns the common stats for a port. Parameters: int port_id");
6329 	rte_telemetry_register_cmd("/ethdev/xstats", eth_dev_handle_port_xstats,
6330 			"Returns the extended stats for a port. Parameters: int port_id");
6331 #ifndef RTE_EXEC_ENV_WINDOWS
6332 	rte_telemetry_register_cmd("/ethdev/dump_priv", eth_dev_handle_port_dump_priv,
6333 			"Returns dump private information for a port. Parameters: int port_id");
6334 #endif
6335 	rte_telemetry_register_cmd("/ethdev/link_status",
6336 			eth_dev_handle_port_link_status,
6337 			"Returns the link status for a port. Parameters: int port_id");
6338 	rte_telemetry_register_cmd("/ethdev/info", eth_dev_handle_port_info,
6339 			"Returns the device info for a port. Parameters: int port_id");
6340 	rte_telemetry_register_cmd("/ethdev/module_eeprom", eth_dev_handle_port_module_eeprom,
6341 			"Returns module EEPROM info with SFF specs. Parameters: int port_id");
6342 }
6343