xref: /dpdk/lib/ethdev/rte_ethdev.h (revision 1ff8b9a6ef248dddebd07a8df7b47f4de9ffab62)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
8 /**
9  * @file
10  *
11  * RTE Ethernet Device API
12  *
13  * The Ethernet Device API is composed of two parts:
14  *
15  * - The application-oriented Ethernet API that includes functions to setup
16  *   an Ethernet device (configure it, setup its Rx and Tx queues and start it),
17  *   to get its MAC address, the speed and the status of its physical link,
18  *   to receive and to transmit packets, and so on.
19  *
20  * - The driver-oriented Ethernet API that exports functions allowing
21  *   an Ethernet Poll Mode Driver (PMD) to allocate an Ethernet device instance,
22  *   create memzone for HW rings and process registered callbacks, and so on.
23  *   PMDs should include ethdev_driver.h instead of this header.
24  *
25  * By default, all the functions of the Ethernet Device API exported by a PMD
26  * are lock-free functions which assume to not be invoked in parallel on
27  * different logical cores to work on the same target object.  For instance,
28  * the receive function of a PMD cannot be invoked in parallel on two logical
29  * cores to poll the same Rx queue [of the same port]. Of course, this function
30  * can be invoked in parallel by different logical cores on different Rx queues.
31  * It is the responsibility of the upper level application to enforce this rule.
32  *
33  * If needed, parallel accesses by multiple logical cores to shared queues
34  * shall be explicitly protected by dedicated inline lock-aware functions
35  * built on top of their corresponding lock-free functions of the PMD API.
36  *
37  * In all functions of the Ethernet API, the Ethernet device is
38  * designated by an integer >= 0 named the device port identifier.
39  *
40  * At the Ethernet driver level, Ethernet devices are represented by a generic
41  * data structure of type *rte_eth_dev*.
42  *
43  * Ethernet devices are dynamically registered during the PCI probing phase
44  * performed at EAL initialization time.
45  * When an Ethernet device is being probed, an *rte_eth_dev* structure and
46  * a new port identifier are allocated for that device. Then, the eth_dev_init()
47  * function supplied by the Ethernet driver matching the probed PCI
48  * device is invoked to properly initialize the device.
49  *
50  * The role of the device init function consists of resetting the hardware,
51  * checking access to Non-volatile Memory (NVM), reading the MAC address
52  * from NVM etc.
53  *
54  * If the device init operation is successful, the correspondence between
55  * the port identifier assigned to the new device and its associated
56  * *rte_eth_dev* structure is effectively registered.
57  * Otherwise, both the *rte_eth_dev* structure and the port identifier are
58  * freed.
59  *
60  * The functions exported by the application Ethernet API to setup a device
61  * designated by its port identifier must be invoked in the following order:
62  *     - rte_eth_dev_configure()
63  *     - rte_eth_tx_queue_setup()
64  *     - rte_eth_rx_queue_setup()
65  *     - rte_eth_dev_start()
66  *
67  * Then, the network application can invoke, in any order, the functions
68  * exported by the Ethernet API to get the MAC address of a given device, to
69  * get the speed and the status of a device physical link, to receive/transmit
70  * [burst of] packets, and so on.
71  *
72  * If the application wants to change the configuration (i.e. call
73  * rte_eth_dev_configure(), rte_eth_tx_queue_setup(), or
74  * rte_eth_rx_queue_setup()), it must call rte_eth_dev_stop() first to stop the
75  * device and then do the reconfiguration before calling rte_eth_dev_start()
76  * again. The transmit and receive functions should not be invoked when the
77  * device or the queue is stopped.
78  *
79  * Please note that some configuration is not stored between calls to
80  * rte_eth_dev_stop()/rte_eth_dev_start(). The following configuration will
81  * be retained:
82  *
83  *     - MTU
84  *     - flow control settings
85  *     - receive mode configuration (promiscuous mode, all-multicast mode,
86  *       hardware checksum mode, RSS/VMDq settings etc.)
87  *     - VLAN filtering configuration
88  *     - default MAC address
89  *     - MAC addresses supplied to MAC address array
90  *     - flow director filtering mode (but not filtering rules)
91  *     - NIC queue statistics mappings
92  *
93  * The following configuration may be retained or not
94  * depending on the device capabilities:
95  *
96  *     - flow rules
97  *     - flow-related shared objects, e.g. indirect actions
98  *
99  * Any other configuration will not be stored and will need to be re-entered
100  * before a call to rte_eth_dev_start().
101  *
102  * Finally, a network application can close an Ethernet device by invoking the
103  * rte_eth_dev_close() function.
104  *
105  * Each function of the application Ethernet API invokes a specific function
106  * of the PMD that controls the target device designated by its port
107  * identifier.
108  * For this purpose, all device-specific functions of an Ethernet driver are
109  * supplied through a set of pointers contained in a generic structure of type
110  * *eth_dev_ops*.
111  * The address of the *eth_dev_ops* structure is stored in the *rte_eth_dev*
112  * structure by the device init function of the Ethernet driver, which is
113  * invoked during the PCI probing phase, as explained earlier.
114  *
115  * In other words, each function of the Ethernet API simply retrieves the
116  * *rte_eth_dev* structure associated with the device port identifier and
117  * performs an indirect invocation of the corresponding driver function
118  * supplied in the *eth_dev_ops* structure of the *rte_eth_dev* structure.
119  *
120  * For performance reasons, the address of the burst-oriented Rx and Tx
121  * functions of the Ethernet driver are not contained in the *eth_dev_ops*
122  * structure. Instead, they are directly stored at the beginning of the
123  * *rte_eth_dev* structure to avoid an extra indirect memory access during
124  * their invocation.
125  *
126  * RTE Ethernet device drivers do not use interrupts for transmitting or
127  * receiving. Instead, Ethernet drivers export Poll-Mode receive and transmit
128  * functions to applications.
129  * Both receive and transmit functions are packet-burst oriented to minimize
130  * their cost per packet through the following optimizations:
131  *
132  * - Sharing among multiple packets the incompressible cost of the
133  *   invocation of receive/transmit functions.
134  *
135  * - Enabling receive/transmit functions to take advantage of burst-oriented
136  *   hardware features (L1 cache, prefetch instructions, NIC head/tail
137  *   registers) to minimize the number of CPU cycles per packet, for instance,
138  *   by avoiding useless read memory accesses to ring descriptors, or by
139  *   systematically using arrays of pointers that exactly fit L1 cache line
140  *   boundaries and sizes.
141  *
142  * The burst-oriented receive function does not provide any error notification,
143  * to avoid the corresponding overhead. As a hint, the upper-level application
144  * might check the status of the device link once being systematically returned
145  * a 0 value by the receive function of the driver for a given number of tries.
146  */
147 
148 #include <stdint.h>
149 
150 /* Use this macro to check if LRO API is supported */
151 #define RTE_ETHDEV_HAS_LRO_SUPPORT
152 
153 /* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
154 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
155 #define RTE_ETHDEV_DEBUG_RX
156 #define RTE_ETHDEV_DEBUG_TX
157 #endif
158 
159 #include <rte_cman.h>
160 #include <rte_compat.h>
161 #include <rte_log.h>
162 #include <rte_interrupts.h>
163 #include <rte_dev.h>
164 #include <rte_devargs.h>
165 #include <rte_bitops.h>
166 #include <rte_errno.h>
167 #include <rte_common.h>
168 #include <rte_config.h>
169 #include <rte_power_intrinsics.h>
170 
171 #include "rte_ethdev_trace_fp.h"
172 #include "rte_dev_info.h"
173 
174 #ifdef __cplusplus
175 extern "C" {
176 #endif
177 
178 extern int rte_eth_dev_logtype;
179 #define RTE_LOGTYPE_ETHDEV rte_eth_dev_logtype
180 
181 #define RTE_ETHDEV_LOG_LINE(level, ...) \
182 	RTE_LOG_LINE(level, ETHDEV, "" __VA_ARGS__)
183 
184 struct rte_mbuf;
185 
186 /**
187  * Initializes a device iterator.
188  *
189  * This iterator allows accessing a list of devices matching some devargs.
190  *
191  * @param iter
192  *   Device iterator handle initialized by the function.
193  *   The fields bus_str and cls_str might be dynamically allocated,
194  *   and could be freed by calling rte_eth_iterator_cleanup().
195  *
196  * @param devargs
197  *   Device description string.
198  *
199  * @return
200  *   0 on successful initialization, negative otherwise.
201  */
202 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
203 
204 /**
205  * Iterates on devices with devargs filter.
206  * The ownership is not checked.
207  *
208  * The next port ID is returned, and the iterator is updated.
209  *
210  * @param iter
211  *   Device iterator handle initialized by rte_eth_iterator_init().
212  *   Some fields bus_str and cls_str might be freed when no more port is found,
213  *   by calling rte_eth_iterator_cleanup().
214  *
215  * @return
216  *   A port ID if found, RTE_MAX_ETHPORTS otherwise.
217  */
218 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
219 
220 /**
221  * Free some allocated fields of the iterator.
222  *
223  * This function is automatically called by rte_eth_iterator_next()
224  * on the last iteration (i.e. when no more matching port is found).
225  *
226  * It is safe to call this function twice; it will do nothing more.
227  *
228  * @param iter
229  *   Device iterator handle initialized by rte_eth_iterator_init().
230  *   The fields bus_str and cls_str are freed if needed.
231  */
232 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
233 
234 /**
235  * Macro to iterate over all ethdev ports matching some devargs.
236  *
237  * If a break is done before the end of the loop,
238  * the function rte_eth_iterator_cleanup() must be called.
239  *
240  * @param id
241  *   Iterated port ID of type uint16_t.
242  * @param devargs
243  *   Device parameters input as string of type char*.
244  * @param iter
245  *   Iterator handle of type struct rte_dev_iterator, used internally.
246  */
247 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
248 	for (rte_eth_iterator_init(iter, devargs), \
249 	     id = rte_eth_iterator_next(iter); \
250 	     id != RTE_MAX_ETHPORTS; \
251 	     id = rte_eth_iterator_next(iter))
252 
253 /**
254  * A structure used to retrieve statistics for an Ethernet port.
255  * Not all statistics fields in struct rte_eth_stats are supported
256  * by any type of network interface card (NIC). If any statistics
257  * field is not supported, its value is 0.
258  * All byte-related statistics do not include Ethernet FCS regardless
259  * of whether these bytes have been delivered to the application
260  * (see RTE_ETH_RX_OFFLOAD_KEEP_CRC).
261  */
262 struct rte_eth_stats {
263 	uint64_t ipackets;  /**< Total number of successfully received packets. */
264 	uint64_t opackets;  /**< Total number of successfully transmitted packets.*/
265 	uint64_t ibytes;    /**< Total number of successfully received bytes. */
266 	uint64_t obytes;    /**< Total number of successfully transmitted bytes. */
267 	/**
268 	 * Total of Rx packets dropped by the HW,
269 	 * because there are no available buffer (i.e. Rx queues are full).
270 	 */
271 	uint64_t imissed;
272 	uint64_t ierrors;   /**< Total number of erroneous received packets. */
273 	uint64_t oerrors;   /**< Total number of failed transmitted packets. */
274 	uint64_t rx_nombuf; /**< Total number of Rx mbuf allocation failures. */
275 	/* Queue stats are limited to max 256 queues */
276 	/** Total number of queue Rx packets. */
277 	uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
278 	/** Total number of queue Tx packets. */
279 	uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
280 	/** Total number of successfully received queue bytes. */
281 	uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
282 	/** Total number of successfully transmitted queue bytes. */
283 	uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
284 	/** Total number of queue packets received that are dropped. */
285 	uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
286 };
287 
288 /**@{@name Link speed capabilities
289  * Device supported speeds bitmap flags
290  */
291 #define RTE_ETH_LINK_SPEED_AUTONEG 0             /**< Autonegotiate (all speeds) */
292 #define RTE_ETH_LINK_SPEED_FIXED   RTE_BIT32(0)  /**< Disable autoneg (fixed speed) */
293 #define RTE_ETH_LINK_SPEED_10M_HD  RTE_BIT32(1)  /**<  10 Mbps half-duplex */
294 #define RTE_ETH_LINK_SPEED_10M     RTE_BIT32(2)  /**<  10 Mbps full-duplex */
295 #define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)  /**< 100 Mbps half-duplex */
296 #define RTE_ETH_LINK_SPEED_100M    RTE_BIT32(4)  /**< 100 Mbps full-duplex */
297 #define RTE_ETH_LINK_SPEED_1G      RTE_BIT32(5)  /**<   1 Gbps */
298 #define RTE_ETH_LINK_SPEED_2_5G    RTE_BIT32(6)  /**< 2.5 Gbps */
299 #define RTE_ETH_LINK_SPEED_5G      RTE_BIT32(7)  /**<   5 Gbps */
300 #define RTE_ETH_LINK_SPEED_10G     RTE_BIT32(8)  /**<  10 Gbps */
301 #define RTE_ETH_LINK_SPEED_20G     RTE_BIT32(9)  /**<  20 Gbps */
302 #define RTE_ETH_LINK_SPEED_25G     RTE_BIT32(10) /**<  25 Gbps */
303 #define RTE_ETH_LINK_SPEED_40G     RTE_BIT32(11) /**<  40 Gbps */
304 #define RTE_ETH_LINK_SPEED_50G     RTE_BIT32(12) /**<  50 Gbps */
305 #define RTE_ETH_LINK_SPEED_56G     RTE_BIT32(13) /**<  56 Gbps */
306 #define RTE_ETH_LINK_SPEED_100G    RTE_BIT32(14) /**< 100 Gbps */
307 #define RTE_ETH_LINK_SPEED_200G    RTE_BIT32(15) /**< 200 Gbps */
308 #define RTE_ETH_LINK_SPEED_400G    RTE_BIT32(16) /**< 400 Gbps */
309 /**@}*/
310 
311 /**@{@name Link speed
312  * Ethernet numeric link speeds in Mbps
313  */
314 #define RTE_ETH_SPEED_NUM_NONE         0 /**< Not defined */
315 #define RTE_ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
316 #define RTE_ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
317 #define RTE_ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
318 #define RTE_ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
319 #define RTE_ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
320 #define RTE_ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
321 #define RTE_ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
322 #define RTE_ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
323 #define RTE_ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
324 #define RTE_ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
325 #define RTE_ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
326 #define RTE_ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
327 #define RTE_ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
328 #define RTE_ETH_SPEED_NUM_400G    400000 /**< 400 Gbps */
329 #define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
330 /**@}*/
331 
332 /**
333  * A structure used to retrieve link-level information of an Ethernet port.
334  */
335 struct rte_eth_link {
336 	union {
337 		RTE_ATOMIC(uint64_t) val64; /**< used for atomic64 read/write */
338 		__extension__
339 		struct {
340 			uint32_t link_speed;	    /**< RTE_ETH_SPEED_NUM_ */
341 			uint16_t link_duplex  : 1;  /**< RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
342 			uint16_t link_autoneg : 1;  /**< RTE_ETH_LINK_[AUTONEG/FIXED] */
343 			uint16_t link_status  : 1;  /**< RTE_ETH_LINK_[DOWN/UP] */
344 		};
345 	};
346 };
347 
348 /**@{@name Link negotiation
349  * Constants used in link management.
350  */
351 #define RTE_ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
352 #define RTE_ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
353 #define RTE_ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
354 #define RTE_ETH_LINK_UP          1 /**< Link is up (see link_status). */
355 #define RTE_ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
356 #define RTE_ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
357 #define RTE_ETH_LINK_MAX_STR_LEN 40 /**< Max length of default link string. */
358 /**@}*/
359 
360 /** Translate from link speed lanes to speed lanes capabilities. */
361 #define RTE_ETH_SPEED_LANES_TO_CAPA(x) RTE_BIT32(x)
362 
363 /** A structure used to get and set lanes capabilities per link speed. */
364 struct rte_eth_speed_lanes_capa {
365 	uint32_t speed;
366 	uint32_t capa;
367 };
368 
369 /**
370  * A structure used to configure the ring threshold registers of an Rx/Tx
371  * queue for an Ethernet port.
372  */
373 struct rte_eth_thresh {
374 	uint8_t pthresh; /**< Ring prefetch threshold. */
375 	uint8_t hthresh; /**< Ring host threshold. */
376 	uint8_t wthresh; /**< Ring writeback threshold. */
377 };
378 
379 /**@{@name Multi-queue mode
380  * @see rte_eth_conf.rxmode.mq_mode.
381  */
382 #define RTE_ETH_MQ_RX_RSS_FLAG  RTE_BIT32(0) /**< Enable RSS. @see rte_eth_rss_conf */
383 #define RTE_ETH_MQ_RX_DCB_FLAG  RTE_BIT32(1) /**< Enable DCB. */
384 #define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2) /**< Enable VMDq. */
385 /**@}*/
386 
387 /**
388  *  A set of values to identify what method is to be used to route
389  *  packets to multiple queues.
390  */
391 enum rte_eth_rx_mq_mode {
392 	/** None of DCB, RSS or VMDq mode */
393 	RTE_ETH_MQ_RX_NONE = 0,
394 
395 	/** For Rx side, only RSS is on */
396 	RTE_ETH_MQ_RX_RSS = RTE_ETH_MQ_RX_RSS_FLAG,
397 	/** For Rx side,only DCB is on. */
398 	RTE_ETH_MQ_RX_DCB = RTE_ETH_MQ_RX_DCB_FLAG,
399 	/** Both DCB and RSS enable */
400 	RTE_ETH_MQ_RX_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
401 
402 	/** Only VMDq, no RSS nor DCB */
403 	RTE_ETH_MQ_RX_VMDQ_ONLY = RTE_ETH_MQ_RX_VMDQ_FLAG,
404 	/** RSS mode with VMDq */
405 	RTE_ETH_MQ_RX_VMDQ_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG,
406 	/** Use VMDq+DCB to route traffic to queues */
407 	RTE_ETH_MQ_RX_VMDQ_DCB = RTE_ETH_MQ_RX_VMDQ_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
408 	/** Enable both VMDq and DCB in VMDq */
409 	RTE_ETH_MQ_RX_VMDQ_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG |
410 				 RTE_ETH_MQ_RX_VMDQ_FLAG,
411 };
412 
413 /**
414  * A set of values to identify what method is to be used to transmit
415  * packets using multi-TCs.
416  */
417 enum rte_eth_tx_mq_mode {
418 	RTE_ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
419 	RTE_ETH_MQ_TX_DCB,          /**< For Tx side,only DCB is on. */
420 	RTE_ETH_MQ_TX_VMDQ_DCB,     /**< For Tx side,both DCB and VT is on. */
421 	RTE_ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
422 };
423 
424 /**
425  * A structure used to configure the Rx features of an Ethernet port.
426  */
427 struct rte_eth_rxmode {
428 	/** The multi-queue packet distribution mode to be used, e.g. RSS. */
429 	enum rte_eth_rx_mq_mode mq_mode;
430 	uint32_t mtu;  /**< Requested MTU. */
431 	/** Maximum allowed size of LRO aggregated packet. */
432 	uint32_t max_lro_pkt_size;
433 	/**
434 	 * Per-port Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
435 	 * Only offloads set on rx_offload_capa field on rte_eth_dev_info
436 	 * structure are allowed to be set.
437 	 */
438 	uint64_t offloads;
439 
440 	uint64_t reserved_64s[2]; /**< Reserved for future fields */
441 	void *reserved_ptrs[2];   /**< Reserved for future fields */
442 };
443 
444 /**
445  * VLAN types to indicate if it is for single VLAN, inner VLAN or outer VLAN.
446  * Note that single VLAN is treated the same as inner VLAN.
447  */
448 enum rte_vlan_type {
449 	RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
450 	RTE_ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
451 	RTE_ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
452 	RTE_ETH_VLAN_TYPE_MAX,
453 };
454 
455 /**
456  * A structure used to describe a VLAN filter.
457  * If the bit corresponding to a VID is set, such VID is on.
458  */
459 struct rte_vlan_filter_conf {
460 	uint64_t ids[64];
461 };
462 
463 /**
464  * Hash function types.
465  */
466 enum rte_eth_hash_function {
467 	/** DEFAULT means driver decides which hash algorithm to pick. */
468 	RTE_ETH_HASH_FUNCTION_DEFAULT = 0,
469 	RTE_ETH_HASH_FUNCTION_TOEPLITZ, /**< Toeplitz */
470 	RTE_ETH_HASH_FUNCTION_SIMPLE_XOR, /**< Simple XOR */
471 	/**
472 	 * Symmetric Toeplitz: src, dst will be replaced by
473 	 * xor(src, dst). For the case with src/dst only,
474 	 * src or dst address will xor with zero pair.
475 	 */
476 	RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ,
477 	/**
478 	 * Symmetric Toeplitz: L3 and L4 fields are sorted prior to
479 	 * the hash function.
480 	 *  If src_ip > dst_ip, swap src_ip and dst_ip.
481 	 *  If src_port > dst_port, swap src_port and dst_port.
482 	 */
483 	RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT,
484 	RTE_ETH_HASH_FUNCTION_MAX,
485 };
486 
487 #define RTE_ETH_HASH_ALGO_TO_CAPA(x) RTE_BIT32(x)
488 #define RTE_ETH_HASH_ALGO_CAPA_MASK(x) RTE_BIT32(RTE_ETH_HASH_FUNCTION_ ## x)
489 
490 /**
491  * A structure used to configure the Receive Side Scaling (RSS) feature
492  * of an Ethernet port.
493  */
494 struct rte_eth_rss_conf {
495 	/**
496 	 * In rte_eth_dev_rss_hash_conf_get(), the *rss_key_len* should be
497 	 * greater than or equal to the *hash_key_size* which get from
498 	 * rte_eth_dev_info_get() API. And the *rss_key* should contain at least
499 	 * *hash_key_size* bytes. If not meet these requirements, the query
500 	 * result is unreliable even if the operation returns success.
501 	 *
502 	 * In rte_eth_dev_rss_hash_update() or rte_eth_dev_configure(), if
503 	 * *rss_key* is not NULL, the *rss_key_len* indicates the length of the
504 	 * *rss_key* in bytes and it should be equal to *hash_key_size*.
505 	 * If *rss_key* is NULL, drivers are free to use a random or a default key.
506 	 */
507 	uint8_t *rss_key;
508 	uint8_t rss_key_len; /**< hash key length in bytes. */
509 	/**
510 	 * Indicates the type of packets or the specific part of packets to
511 	 * which RSS hashing is to be applied.
512 	 */
513 	uint64_t rss_hf;
514 	enum rte_eth_hash_function algorithm;	/**< Hash algorithm. */
515 };
516 
517 /*
518  * A packet can be identified by hardware as different flow types. Different
519  * NIC hardware may support different flow types.
520  * Basically, the NIC hardware identifies the flow type as deep protocol as
521  * possible, and exclusively. For example, if a packet is identified as
522  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
523  * though it is an actual IPV4 packet.
524  */
525 #define RTE_ETH_FLOW_UNKNOWN             0
526 #define RTE_ETH_FLOW_RAW                 1
527 #define RTE_ETH_FLOW_IPV4                2
528 #define RTE_ETH_FLOW_FRAG_IPV4           3
529 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP    4
530 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP    5
531 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP   6
532 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER  7
533 #define RTE_ETH_FLOW_IPV6                8
534 #define RTE_ETH_FLOW_FRAG_IPV6           9
535 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP   10
536 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP   11
537 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP  12
538 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
539 #define RTE_ETH_FLOW_L2_PAYLOAD         14
540 #define RTE_ETH_FLOW_IPV6_EX            15
541 #define RTE_ETH_FLOW_IPV6_TCP_EX        16
542 #define RTE_ETH_FLOW_IPV6_UDP_EX        17
543 /** Consider device port number as a flow differentiator */
544 #define RTE_ETH_FLOW_PORT               18
545 #define RTE_ETH_FLOW_VXLAN              19 /**< VXLAN protocol based flow */
546 #define RTE_ETH_FLOW_GENEVE             20 /**< GENEVE protocol based flow */
547 #define RTE_ETH_FLOW_NVGRE              21 /**< NVGRE protocol based flow */
548 #define RTE_ETH_FLOW_VXLAN_GPE          22 /**< VXLAN-GPE protocol based flow */
549 #define RTE_ETH_FLOW_GTPU               23 /**< GTPU protocol based flow */
550 #define RTE_ETH_FLOW_MAX                24
551 
552 /*
553  * Below macros are defined for RSS offload types, they can be used to
554  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
555  */
556 #define RTE_ETH_RSS_IPV4               RTE_BIT64(2)
557 #define RTE_ETH_RSS_FRAG_IPV4          RTE_BIT64(3)
558 #define RTE_ETH_RSS_NONFRAG_IPV4_TCP   RTE_BIT64(4)
559 #define RTE_ETH_RSS_NONFRAG_IPV4_UDP   RTE_BIT64(5)
560 #define RTE_ETH_RSS_NONFRAG_IPV4_SCTP  RTE_BIT64(6)
561 #define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
562 #define RTE_ETH_RSS_IPV6               RTE_BIT64(8)
563 #define RTE_ETH_RSS_FRAG_IPV6          RTE_BIT64(9)
564 #define RTE_ETH_RSS_NONFRAG_IPV6_TCP   RTE_BIT64(10)
565 #define RTE_ETH_RSS_NONFRAG_IPV6_UDP   RTE_BIT64(11)
566 #define RTE_ETH_RSS_NONFRAG_IPV6_SCTP  RTE_BIT64(12)
567 #define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
568 #define RTE_ETH_RSS_L2_PAYLOAD         RTE_BIT64(14)
569 #define RTE_ETH_RSS_IPV6_EX            RTE_BIT64(15)
570 #define RTE_ETH_RSS_IPV6_TCP_EX        RTE_BIT64(16)
571 #define RTE_ETH_RSS_IPV6_UDP_EX        RTE_BIT64(17)
572 #define RTE_ETH_RSS_PORT               RTE_BIT64(18)
573 #define RTE_ETH_RSS_VXLAN              RTE_BIT64(19)
574 #define RTE_ETH_RSS_GENEVE             RTE_BIT64(20)
575 #define RTE_ETH_RSS_NVGRE              RTE_BIT64(21)
576 #define RTE_ETH_RSS_GTPU               RTE_BIT64(23)
577 #define RTE_ETH_RSS_ETH                RTE_BIT64(24)
578 #define RTE_ETH_RSS_S_VLAN             RTE_BIT64(25)
579 #define RTE_ETH_RSS_C_VLAN             RTE_BIT64(26)
580 #define RTE_ETH_RSS_ESP                RTE_BIT64(27)
581 #define RTE_ETH_RSS_AH                 RTE_BIT64(28)
582 #define RTE_ETH_RSS_L2TPV3             RTE_BIT64(29)
583 #define RTE_ETH_RSS_PFCP               RTE_BIT64(30)
584 #define RTE_ETH_RSS_PPPOE              RTE_BIT64(31)
585 #define RTE_ETH_RSS_ECPRI              RTE_BIT64(32)
586 #define RTE_ETH_RSS_MPLS               RTE_BIT64(33)
587 #define RTE_ETH_RSS_IPV4_CHKSUM        RTE_BIT64(34)
588 
589 /**
590  * The RTE_ETH_RSS_L4_CHKSUM works on checksum field of any L4 header.
591  * It is similar to RTE_ETH_RSS_PORT that they don't specify the specific type of
592  * L4 header. This macro is defined to replace some specific L4 (TCP/UDP/SCTP)
593  * checksum type for constructing the use of RSS offload bits.
594  *
595  * Due to above reason, some old APIs (and configuration) don't support
596  * RTE_ETH_RSS_L4_CHKSUM. The rte_flow RSS API supports it.
597  *
598  * For the case that checksum is not used in an UDP header,
599  * it takes the reserved value 0 as input for the hash function.
600  */
601 #define RTE_ETH_RSS_L4_CHKSUM          RTE_BIT64(35)
602 
603 #define RTE_ETH_RSS_L2TPV2             RTE_BIT64(36)
604 #define RTE_ETH_RSS_IPV6_FLOW_LABEL    RTE_BIT64(37)
605 
606 /*
607  * We use the following macros to combine with above RTE_ETH_RSS_* for
608  * more specific input set selection. These bits are defined starting
609  * from the high end of the 64 bits.
610  * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
611  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
612  * the same level are used simultaneously, it is the same case as none of
613  * them are added.
614  */
615 #define RTE_ETH_RSS_L3_SRC_ONLY        RTE_BIT64(63)
616 #define RTE_ETH_RSS_L3_DST_ONLY        RTE_BIT64(62)
617 #define RTE_ETH_RSS_L4_SRC_ONLY        RTE_BIT64(61)
618 #define RTE_ETH_RSS_L4_DST_ONLY        RTE_BIT64(60)
619 #define RTE_ETH_RSS_L2_SRC_ONLY        RTE_BIT64(59)
620 #define RTE_ETH_RSS_L2_DST_ONLY        RTE_BIT64(58)
621 
622 /*
623  * Only select IPV6 address prefix as RSS input set according to
624  * https://tools.ietf.org/html/rfc6052
625  * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
626  * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
627  */
628 #define RTE_ETH_RSS_L3_PRE32           RTE_BIT64(57)
629 #define RTE_ETH_RSS_L3_PRE40           RTE_BIT64(56)
630 #define RTE_ETH_RSS_L3_PRE48           RTE_BIT64(55)
631 #define RTE_ETH_RSS_L3_PRE56           RTE_BIT64(54)
632 #define RTE_ETH_RSS_L3_PRE64           RTE_BIT64(53)
633 #define RTE_ETH_RSS_L3_PRE96           RTE_BIT64(52)
634 
635 /*
636  * Use the following macros to combine with the above layers
637  * to choose inner and outer layers or both for RSS computation.
638  * Bits 50 and 51 are reserved for this.
639  */
640 
641 /**
642  * level 0, requests the default behavior.
643  * Depending on the packet type, it can mean outermost, innermost,
644  * anything in between or even no RSS.
645  * It basically stands for the innermost encapsulation level RSS
646  * can be performed on according to PMD and device capabilities.
647  */
648 #define RTE_ETH_RSS_LEVEL_PMD_DEFAULT  (UINT64_C(0) << 50)
649 
650 /**
651  * level 1, requests RSS to be performed on the outermost packet
652  * encapsulation level.
653  */
654 #define RTE_ETH_RSS_LEVEL_OUTERMOST    (UINT64_C(1) << 50)
655 
656 /**
657  * level 2, requests RSS to be performed on the specified inner packet
658  * encapsulation level, from outermost to innermost (lower to higher values).
659  */
660 #define RTE_ETH_RSS_LEVEL_INNERMOST    (UINT64_C(2) << 50)
661 #define RTE_ETH_RSS_LEVEL_MASK         (UINT64_C(3) << 50)
662 
663 #define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
664 
665 /**
666  * For input set change of hash filter, if SRC_ONLY and DST_ONLY of
667  * the same level are used simultaneously, it is the same case as
668  * none of them are added.
669  *
670  * @param rss_hf
671  *   RSS types with SRC/DST_ONLY.
672  * @return
673  *   RSS types.
674  */
675 static inline uint64_t
676 rte_eth_rss_hf_refine(uint64_t rss_hf)
677 {
678 	if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
679 		rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
680 
681 	if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
682 		rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
683 
684 	return rss_hf;
685 }
686 
687 #define RTE_ETH_RSS_IPV6_PRE32 ( \
688 		RTE_ETH_RSS_IPV6 | \
689 		RTE_ETH_RSS_L3_PRE32)
690 
691 #define RTE_ETH_RSS_IPV6_PRE40 ( \
692 		RTE_ETH_RSS_IPV6 | \
693 		RTE_ETH_RSS_L3_PRE40)
694 
695 #define RTE_ETH_RSS_IPV6_PRE48 ( \
696 		RTE_ETH_RSS_IPV6 | \
697 		RTE_ETH_RSS_L3_PRE48)
698 
699 #define RTE_ETH_RSS_IPV6_PRE56 ( \
700 		RTE_ETH_RSS_IPV6 | \
701 		RTE_ETH_RSS_L3_PRE56)
702 
703 #define RTE_ETH_RSS_IPV6_PRE64 ( \
704 		RTE_ETH_RSS_IPV6 | \
705 		RTE_ETH_RSS_L3_PRE64)
706 
707 #define RTE_ETH_RSS_IPV6_PRE96 ( \
708 		RTE_ETH_RSS_IPV6 | \
709 		RTE_ETH_RSS_L3_PRE96)
710 
711 #define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
712 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
713 		RTE_ETH_RSS_L3_PRE32)
714 
715 #define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
716 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
717 		RTE_ETH_RSS_L3_PRE40)
718 
719 #define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
720 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
721 		RTE_ETH_RSS_L3_PRE48)
722 
723 #define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
724 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
725 		RTE_ETH_RSS_L3_PRE56)
726 
727 #define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
728 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
729 		RTE_ETH_RSS_L3_PRE64)
730 
731 #define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
732 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
733 		RTE_ETH_RSS_L3_PRE96)
734 
735 #define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
736 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
737 		RTE_ETH_RSS_L3_PRE32)
738 
739 #define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
740 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
741 		RTE_ETH_RSS_L3_PRE40)
742 
743 #define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
744 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
745 		RTE_ETH_RSS_L3_PRE48)
746 
747 #define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
748 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
749 		RTE_ETH_RSS_L3_PRE56)
750 
751 #define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
752 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
753 		RTE_ETH_RSS_L3_PRE64)
754 
755 #define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
756 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
757 		RTE_ETH_RSS_L3_PRE96)
758 
759 #define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
760 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
761 		RTE_ETH_RSS_L3_PRE32)
762 
763 #define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
764 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
765 		RTE_ETH_RSS_L3_PRE40)
766 
767 #define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
768 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
769 		RTE_ETH_RSS_L3_PRE48)
770 
771 #define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
772 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
773 		RTE_ETH_RSS_L3_PRE56)
774 
775 #define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
776 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
777 		RTE_ETH_RSS_L3_PRE64)
778 
779 #define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
780 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
781 		RTE_ETH_RSS_L3_PRE96)
782 
783 #define RTE_ETH_RSS_IP ( \
784 	RTE_ETH_RSS_IPV4 | \
785 	RTE_ETH_RSS_FRAG_IPV4 | \
786 	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
787 	RTE_ETH_RSS_IPV6 | \
788 	RTE_ETH_RSS_FRAG_IPV6 | \
789 	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
790 	RTE_ETH_RSS_IPV6_EX)
791 
792 #define RTE_ETH_RSS_UDP ( \
793 	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
794 	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
795 	RTE_ETH_RSS_IPV6_UDP_EX)
796 
797 #define RTE_ETH_RSS_TCP ( \
798 	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
799 	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
800 	RTE_ETH_RSS_IPV6_TCP_EX)
801 
802 #define RTE_ETH_RSS_SCTP ( \
803 	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
804 	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
805 
806 #define RTE_ETH_RSS_TUNNEL ( \
807 	RTE_ETH_RSS_VXLAN  | \
808 	RTE_ETH_RSS_GENEVE | \
809 	RTE_ETH_RSS_NVGRE)
810 
811 #define RTE_ETH_RSS_VLAN ( \
812 	RTE_ETH_RSS_S_VLAN  | \
813 	RTE_ETH_RSS_C_VLAN)
814 
815 /** Mask of valid RSS hash protocols */
816 #define RTE_ETH_RSS_PROTO_MASK ( \
817 	RTE_ETH_RSS_IPV4 | \
818 	RTE_ETH_RSS_FRAG_IPV4 | \
819 	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
820 	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
821 	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
822 	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
823 	RTE_ETH_RSS_IPV6 | \
824 	RTE_ETH_RSS_FRAG_IPV6 | \
825 	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
826 	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
827 	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
828 	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
829 	RTE_ETH_RSS_L2_PAYLOAD | \
830 	RTE_ETH_RSS_IPV6_EX | \
831 	RTE_ETH_RSS_IPV6_TCP_EX | \
832 	RTE_ETH_RSS_IPV6_UDP_EX | \
833 	RTE_ETH_RSS_PORT  | \
834 	RTE_ETH_RSS_VXLAN | \
835 	RTE_ETH_RSS_GENEVE | \
836 	RTE_ETH_RSS_NVGRE | \
837 	RTE_ETH_RSS_MPLS)
838 
839 /*
840  * Definitions used for redirection table entry size.
841  * Some RSS RETA sizes may not be supported by some drivers, check the
842  * documentation or the description of relevant functions for more details.
843  */
844 #define RTE_ETH_RSS_RETA_SIZE_64  64
845 #define RTE_ETH_RSS_RETA_SIZE_128 128
846 #define RTE_ETH_RSS_RETA_SIZE_256 256
847 #define RTE_ETH_RSS_RETA_SIZE_512 512
848 #define RTE_ETH_RETA_GROUP_SIZE   64
849 
850 /**@{@name VMDq and DCB maximums */
851 #define RTE_ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDq VLAN filters. */
852 #define RTE_ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
853 #define RTE_ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDq DCB queues. */
854 #define RTE_ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
855 /**@}*/
856 
857 /**@{@name DCB capabilities */
858 #define RTE_ETH_DCB_PG_SUPPORT      RTE_BIT32(0) /**< Priority Group(ETS) support. */
859 #define RTE_ETH_DCB_PFC_SUPPORT     RTE_BIT32(1) /**< Priority Flow Control support. */
860 /**@}*/
861 
862 /**@{@name VLAN offload bits */
863 #define RTE_ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
864 #define RTE_ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
865 #define RTE_ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
866 #define RTE_ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
867 
868 #define RTE_ETH_VLAN_STRIP_MASK      0x0001 /**< VLAN Strip  setting mask */
869 #define RTE_ETH_VLAN_FILTER_MASK     0x0002 /**< VLAN Filter  setting mask*/
870 #define RTE_ETH_VLAN_EXTEND_MASK     0x0004 /**< VLAN Extend  setting mask*/
871 #define RTE_ETH_QINQ_STRIP_MASK      0x0008 /**< QINQ Strip  setting mask */
872 #define RTE_ETH_VLAN_ID_MAX          0x0FFF /**< VLAN ID is in lower 12 bits*/
873 /**@}*/
874 
875 /* Definitions used for receive MAC address */
876 #define RTE_ETH_NUM_RECEIVE_MAC_ADDR   128 /**< Maximum nb. of receive mac addr. */
877 
878 /* Definitions used for unicast hash */
879 #define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128 /**< Maximum nb. of UC hash array. */
880 
881 /**@{@name VMDq Rx mode
882  * @see rte_eth_vmdq_rx_conf.rx_mode
883  */
884 /** Accept untagged packets. */
885 #define RTE_ETH_VMDQ_ACCEPT_UNTAG      RTE_BIT32(0)
886 /** Accept packets in multicast table. */
887 #define RTE_ETH_VMDQ_ACCEPT_HASH_MC    RTE_BIT32(1)
888 /** Accept packets in unicast table. */
889 #define RTE_ETH_VMDQ_ACCEPT_HASH_UC    RTE_BIT32(2)
890 /** Accept broadcast packets. */
891 #define RTE_ETH_VMDQ_ACCEPT_BROADCAST  RTE_BIT32(3)
892 /** Multicast promiscuous. */
893 #define RTE_ETH_VMDQ_ACCEPT_MULTICAST  RTE_BIT32(4)
894 /**@}*/
895 
896 /**
897  * A structure used to configure 64 entries of Redirection Table of the
898  * Receive Side Scaling (RSS) feature of an Ethernet port. To configure
899  * more than 64 entries supported by hardware, an array of this structure
900  * is needed.
901  */
902 struct rte_eth_rss_reta_entry64 {
903 	/** Mask bits indicate which entries need to be updated/queried. */
904 	uint64_t mask;
905 	/** Group of 64 redirection table entries. */
906 	uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
907 };
908 
909 /**
910  * This enum indicates the possible number of traffic classes
911  * in DCB configurations
912  */
913 enum rte_eth_nb_tcs {
914 	RTE_ETH_4_TCS = 4, /**< 4 TCs with DCB. */
915 	RTE_ETH_8_TCS = 8  /**< 8 TCs with DCB. */
916 };
917 
918 /**
919  * This enum indicates the possible number of queue pools
920  * in VMDq configurations.
921  */
922 enum rte_eth_nb_pools {
923 	RTE_ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
924 	RTE_ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
925 	RTE_ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
926 	RTE_ETH_64_POOLS = 64   /**< 64 VMDq pools. */
927 };
928 
929 /* This structure may be extended in future. */
930 struct rte_eth_dcb_rx_conf {
931 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */
932 	/** Traffic class each UP mapped to. */
933 	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
934 };
935 
936 struct rte_eth_vmdq_dcb_tx_conf {
937 	enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools. */
938 	/** Traffic class each UP mapped to. */
939 	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
940 };
941 
942 struct rte_eth_dcb_tx_conf {
943 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */
944 	/** Traffic class each UP mapped to. */
945 	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
946 };
947 
948 struct rte_eth_vmdq_tx_conf {
949 	enum rte_eth_nb_pools nb_queue_pools; /**< VMDq mode, 64 pools. */
950 };
951 
952 /**
953  * A structure used to configure the VMDq+DCB feature
954  * of an Ethernet port.
955  *
956  * Using this feature, packets are routed to a pool of queues, based
957  * on the VLAN ID in the VLAN tag, and then to a specific queue within
958  * that pool, using the user priority VLAN tag field.
959  *
960  * A default pool may be used, if desired, to route all traffic which
961  * does not match the VLAN filter rules.
962  */
963 struct rte_eth_vmdq_dcb_conf {
964 	enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools */
965 	uint8_t enable_default_pool; /**< If non-zero, use a default pool */
966 	uint8_t default_pool; /**< The default pool, if applicable */
967 	uint8_t nb_pool_maps; /**< We can have up to 64 filters/mappings */
968 	struct {
969 		uint16_t vlan_id; /**< The VLAN ID of the received frame */
970 		uint64_t pools;   /**< Bitmask of pools for packet Rx */
971 	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
972 	/** Selects a queue in a pool */
973 	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
974 };
975 
976 /**
977  * A structure used to configure the VMDq feature of an Ethernet port when
978  * not combined with the DCB feature.
979  *
980  * Using this feature, packets are routed to a pool of queues. By default,
981  * the pool selection is based on the MAC address, the VLAN ID in the
982  * VLAN tag as specified in the pool_map array.
983  * Passing the RTE_ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
984  * selection using only the MAC address. MAC address to pool mapping is done
985  * using the rte_eth_dev_mac_addr_add function, with the pool parameter
986  * corresponding to the pool ID.
987  *
988  * Queue selection within the selected pool will be done using RSS when
989  * it is enabled or revert to the first queue of the pool if not.
990  *
991  * A default pool may be used, if desired, to route all traffic which
992  * does not match the VLAN filter rules or any pool MAC address.
993  */
994 struct rte_eth_vmdq_rx_conf {
995 	enum rte_eth_nb_pools nb_queue_pools; /**< VMDq only mode, 8 or 64 pools */
996 	uint8_t enable_default_pool; /**< If non-zero, use a default pool */
997 	uint8_t default_pool; /**< The default pool, if applicable */
998 	uint8_t enable_loop_back; /**< Enable VT loop back */
999 	uint8_t nb_pool_maps; /**< We can have up to 64 filters/mappings */
1000 	uint32_t rx_mode; /**< Flags from RTE_ETH_VMDQ_ACCEPT_* */
1001 	struct {
1002 		uint16_t vlan_id; /**< The VLAN ID of the received frame */
1003 		uint64_t pools;   /**< Bitmask of pools for packet Rx */
1004 	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
1005 };
1006 
1007 /**
1008  * A structure used to configure the Tx features of an Ethernet port.
1009  */
1010 struct rte_eth_txmode {
1011 	enum rte_eth_tx_mq_mode mq_mode; /**< Tx multi-queues mode. */
1012 	/**
1013 	 * Per-port Tx offloads to be set using RTE_ETH_TX_OFFLOAD_* flags.
1014 	 * Only offloads set on tx_offload_capa field on rte_eth_dev_info
1015 	 * structure are allowed to be set.
1016 	 */
1017 	uint64_t offloads;
1018 
1019 	uint16_t pvid;
1020 	__extension__
1021 	uint8_t /** If set, reject sending out tagged pkts */
1022 		hw_vlan_reject_tagged : 1,
1023 		/** If set, reject sending out untagged pkts */
1024 		hw_vlan_reject_untagged : 1,
1025 		/** If set, enable port based VLAN insertion */
1026 		hw_vlan_insert_pvid : 1;
1027 
1028 	uint64_t reserved_64s[2]; /**< Reserved for future fields */
1029 	void *reserved_ptrs[2];   /**< Reserved for future fields */
1030 };
1031 
1032 /**
1033  * @warning
1034  * @b EXPERIMENTAL: this structure may change without prior notice.
1035  *
1036  * A structure used to configure an Rx packet segment to split.
1037  *
1038  * If RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag is set in offloads field,
1039  * the PMD will split the received packets into multiple segments
1040  * according to the specification in the description array:
1041  *
1042  * - The first network buffer will be allocated from the memory pool,
1043  *   specified in the first array element, the second buffer, from the
1044  *   pool in the second element, and so on.
1045  *
1046  * - The proto_hdrs in the elements define the split position of
1047  *   received packets.
1048  *
1049  * - The offsets from the segment description elements specify
1050  *   the data offset from the buffer beginning except the first mbuf.
1051  *   The first segment offset is added with RTE_PKTMBUF_HEADROOM.
1052  *
1053  * - The lengths in the elements define the maximal data amount
1054  *   being received to each segment. The receiving starts with filling
1055  *   up the first mbuf data buffer up to specified length. If the
1056  *   there are data remaining (packet is longer than buffer in the first
1057  *   mbuf) the following data will be pushed to the next segment
1058  *   up to its own length, and so on.
1059  *
1060  * - If the length in the segment description element is zero
1061  *   the actual buffer size will be deduced from the appropriate
1062  *   memory pool properties.
1063  *
1064  * - If there is not enough elements to describe the buffer for entire
1065  *   packet of maximal length the following parameters will be used
1066  *   for the all remaining segments:
1067  *     - pool from the last valid element
1068  *     - the buffer size from this pool
1069  *     - zero offset
1070  *
1071  * - Length based buffer split:
1072  *     - mp, length, offset should be configured.
1073  *     - The proto_hdr field must be 0.
1074  *
1075  * - Protocol header based buffer split:
1076  *     - mp, offset, proto_hdr should be configured.
1077  *     - The length field must be 0.
1078  *     - The proto_hdr field in the last segment should be 0.
1079  *
1080  * - When protocol header split is enabled, NIC may receive packets
1081  *   which do not match all the protocol headers within the Rx segments.
1082  *   At this point, NIC will have two possible split behaviors according to
1083  *   matching results, one is exact match, another is longest match.
1084  *   The split result of NIC must belong to one of them.
1085  *   The exact match means NIC only do split when the packets exactly match all
1086  *   the protocol headers in the segments.
1087  *   Otherwise, the whole packet will be put into the last valid mempool.
1088  *   The longest match means NIC will do split until packets mismatch
1089  *   the protocol header in the segments.
1090  *   The rest will be put into the last valid pool.
1091  */
1092 struct rte_eth_rxseg_split {
1093 	struct rte_mempool *mp; /**< Memory pool to allocate segment from. */
1094 	uint16_t length; /**< Segment data length, configures split point. */
1095 	uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */
1096 	/**
1097 	 * proto_hdr defines a bit mask of the protocol sequence as RTE_PTYPE_*.
1098 	 * The last RTE_PTYPE* in the mask indicates the split position.
1099 	 *
1100 	 * If one protocol header is defined to split packets into two segments,
1101 	 * for non-tunneling packets, the complete protocol sequence should be defined.
1102 	 * For tunneling packets, for simplicity, only the tunnel and inner part of
1103 	 * complete protocol sequence is required.
1104 	 * If several protocol headers are defined to split packets into multi-segments,
1105 	 * the repeated parts of adjacent segments should be omitted.
1106 	 */
1107 	uint32_t proto_hdr;
1108 };
1109 
1110 /**
1111  * @warning
1112  * @b EXPERIMENTAL: this structure may change without prior notice.
1113  *
1114  * A common structure used to describe Rx packet segment properties.
1115  */
1116 union rte_eth_rxseg {
1117 	/* The settings for buffer split offload. */
1118 	struct rte_eth_rxseg_split split;
1119 	/* The other features settings should be added here. */
1120 };
1121 
1122 /**
1123  * A structure used to configure an Rx ring of an Ethernet port.
1124  */
1125 struct rte_eth_rxconf {
1126 	struct rte_eth_thresh rx_thresh; /**< Rx ring threshold registers. */
1127 	uint16_t rx_free_thresh; /**< Drives the freeing of Rx descriptors. */
1128 	uint8_t rx_drop_en; /**< Drop packets if no descriptors are available. */
1129 	uint8_t rx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
1130 	uint16_t rx_nseg; /**< Number of descriptions in rx_seg array. */
1131 	/**
1132 	 * Share group index in Rx domain and switch domain.
1133 	 * Non-zero value to enable Rx queue share, zero value disable share.
1134 	 * PMD is responsible for Rx queue consistency checks to avoid member
1135 	 * port's configuration contradict to each other.
1136 	 */
1137 	uint16_t share_group;
1138 	uint16_t share_qid; /**< Shared Rx queue ID in group */
1139 	/**
1140 	 * Per-queue Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
1141 	 * Only offloads set on rx_queue_offload_capa or rx_offload_capa
1142 	 * fields on rte_eth_dev_info structure are allowed to be set.
1143 	 */
1144 	uint64_t offloads;
1145 	/**
1146 	 * Points to the array of segment descriptions for an entire packet.
1147 	 * Array elements are properties for consecutive Rx segments.
1148 	 *
1149 	 * The supported capabilities of receiving segmentation is reported
1150 	 * in rte_eth_dev_info.rx_seg_capa field.
1151 	 */
1152 	union rte_eth_rxseg *rx_seg;
1153 
1154 	/**
1155 	 * Array of mempools to allocate Rx buffers from.
1156 	 *
1157 	 * This provides support for multiple mbuf pools per Rx queue.
1158 	 * The capability is reported in device info via positive
1159 	 * max_rx_mempools.
1160 	 *
1161 	 * It could be useful for more efficient usage of memory when an
1162 	 * application creates different mempools to steer the specific
1163 	 * size of the packet.
1164 	 *
1165 	 * If many mempools are specified, packets received using Rx
1166 	 * burst may belong to any provided mempool. From ethdev user point
1167 	 * of view it is undefined how PMD/NIC chooses mempool for a packet.
1168 	 *
1169 	 * If Rx scatter is enabled, a packet may be delivered using a chain
1170 	 * of mbufs obtained from single mempool or multiple mempools based
1171 	 * on the NIC implementation.
1172 	 */
1173 	struct rte_mempool **rx_mempools;
1174 	uint16_t rx_nmempool; /** < Number of Rx mempools */
1175 
1176 	uint64_t reserved_64s[2]; /**< Reserved for future fields */
1177 	void *reserved_ptrs[2];   /**< Reserved for future fields */
1178 };
1179 
1180 /**
1181  * A structure used to configure a Tx ring of an Ethernet port.
1182  */
1183 struct rte_eth_txconf {
1184 	struct rte_eth_thresh tx_thresh; /**< Tx ring threshold registers. */
1185 	uint16_t tx_rs_thresh; /**< Drives the setting of RS bit on TXDs. */
1186 	uint16_t tx_free_thresh; /**< Start freeing Tx buffers if there are
1187 				      less free descriptors than this value. */
1188 
1189 	uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
1190 	/**
1191 	 * Per-queue Tx offloads to be set  using RTE_ETH_TX_OFFLOAD_* flags.
1192 	 * Only offloads set on tx_queue_offload_capa or tx_offload_capa
1193 	 * fields on rte_eth_dev_info structure are allowed to be set.
1194 	 */
1195 	uint64_t offloads;
1196 
1197 	uint64_t reserved_64s[2]; /**< Reserved for future fields */
1198 	void *reserved_ptrs[2];   /**< Reserved for future fields */
1199 };
1200 
1201 /**
1202  * @warning
1203  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1204  *
1205  * A structure used to return the Tx or Rx hairpin queue capabilities.
1206  */
1207 struct rte_eth_hairpin_queue_cap {
1208 	/**
1209 	 * When set, PMD supports placing descriptors and/or data buffers
1210 	 * in dedicated device memory.
1211 	 */
1212 	uint32_t locked_device_memory:1;
1213 
1214 	/**
1215 	 * When set, PMD supports placing descriptors and/or data buffers
1216 	 * in host memory managed by DPDK.
1217 	 */
1218 	uint32_t rte_memory:1;
1219 
1220 	uint32_t reserved:30; /**< Reserved for future fields */
1221 };
1222 
1223 /**
1224  * @warning
1225  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1226  *
1227  * A structure used to return the hairpin capabilities that are supported.
1228  */
1229 struct rte_eth_hairpin_cap {
1230 	/** The max number of hairpin queues (different bindings). */
1231 	uint16_t max_nb_queues;
1232 	/** Max number of Rx queues to be connected to one Tx queue. */
1233 	uint16_t max_rx_2_tx;
1234 	/** Max number of Tx queues to be connected to one Rx queue. */
1235 	uint16_t max_tx_2_rx;
1236 	uint16_t max_nb_desc; /**< The max num of descriptors. */
1237 	struct rte_eth_hairpin_queue_cap rx_cap; /**< Rx hairpin queue capabilities. */
1238 	struct rte_eth_hairpin_queue_cap tx_cap; /**< Tx hairpin queue capabilities. */
1239 };
1240 
1241 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1242 
1243 /**
1244  * @warning
1245  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1246  *
1247  * A structure used to hold hairpin peer data.
1248  */
1249 struct rte_eth_hairpin_peer {
1250 	uint16_t port; /**< Peer port. */
1251 	uint16_t queue; /**< Peer queue. */
1252 };
1253 
1254 /**
1255  * @warning
1256  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1257  *
1258  * A structure used to configure hairpin binding.
1259  */
1260 struct rte_eth_hairpin_conf {
1261 	uint32_t peer_count:16; /**< The number of peers. */
1262 
1263 	/**
1264 	 * Explicit Tx flow rule mode.
1265 	 * One hairpin pair of queues should have the same attribute.
1266 	 *
1267 	 * - When set, the user should be responsible for inserting the hairpin
1268 	 *   Tx part flows and removing them.
1269 	 * - When clear, the PMD will try to handle the Tx part of the flows,
1270 	 *   e.g., by splitting one flow into two parts.
1271 	 */
1272 	uint32_t tx_explicit:1;
1273 
1274 	/**
1275 	 * Manually bind hairpin queues.
1276 	 * One hairpin pair of queues should have the same attribute.
1277 	 *
1278 	 * - When set, to enable hairpin, the user should call the hairpin bind
1279 	 *   function after all the queues are set up properly and the ports are
1280 	 *   started. Also, the hairpin unbind function should be called
1281 	 *   accordingly before stopping a port that with hairpin configured.
1282 	 * - When cleared, the PMD will try to enable the hairpin with the queues
1283 	 *   configured automatically during port start.
1284 	 */
1285 	uint32_t manual_bind:1;
1286 
1287 	/**
1288 	 * Use locked device memory as a backing storage.
1289 	 *
1290 	 * - When set, PMD will attempt place descriptors and/or data buffers
1291 	 *   in dedicated device memory.
1292 	 * - When cleared, PMD will use default memory type as a backing storage.
1293 	 *   Please refer to PMD documentation for details.
1294 	 *
1295 	 * API user should check if PMD supports this configuration flag using
1296 	 * @see rte_eth_dev_hairpin_capability_get.
1297 	 */
1298 	uint32_t use_locked_device_memory:1;
1299 
1300 	/**
1301 	 * Use DPDK memory as backing storage.
1302 	 *
1303 	 * - When set, PMD will attempt place descriptors and/or data buffers
1304 	 *   in host memory managed by DPDK.
1305 	 * - When cleared, PMD will use default memory type as a backing storage.
1306 	 *   Please refer to PMD documentation for details.
1307 	 *
1308 	 * API user should check if PMD supports this configuration flag using
1309 	 * @see rte_eth_dev_hairpin_capability_get.
1310 	 */
1311 	uint32_t use_rte_memory:1;
1312 
1313 	/**
1314 	 * Force usage of hairpin memory configuration.
1315 	 *
1316 	 * - When set, PMD will attempt to use specified memory settings.
1317 	 *   If resource allocation fails, then hairpin queue allocation
1318 	 *   will result in an error.
1319 	 * - When clear, PMD will attempt to use specified memory settings.
1320 	 *   If resource allocation fails, then PMD will retry
1321 	 *   allocation with default configuration.
1322 	 */
1323 	uint32_t force_memory:1;
1324 
1325 	uint32_t reserved:11; /**< Reserved bits. */
1326 
1327 	struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1328 };
1329 
1330 /**
1331  * A structure contains information about HW descriptor ring limitations.
1332  */
1333 struct rte_eth_desc_lim {
1334 	uint16_t nb_max;   /**< Max allowed number of descriptors. */
1335 	uint16_t nb_min;   /**< Min allowed number of descriptors. */
1336 	uint16_t nb_align; /**< Number of descriptors should be aligned to. */
1337 
1338 	/**
1339 	 * Max allowed number of segments per whole packet.
1340 	 *
1341 	 * - For TSO packet this is the total number of data descriptors allowed
1342 	 *   by device.
1343 	 *
1344 	 * @see nb_mtu_seg_max
1345 	 */
1346 	uint16_t nb_seg_max;
1347 
1348 	/**
1349 	 * Max number of segments per one MTU.
1350 	 *
1351 	 * - For non-TSO packet, this is the maximum allowed number of segments
1352 	 *   in a single transmit packet.
1353 	 *
1354 	 * - For TSO packet each segment within the TSO may span up to this
1355 	 *   value.
1356 	 *
1357 	 * @see nb_seg_max
1358 	 */
1359 	uint16_t nb_mtu_seg_max;
1360 };
1361 
1362 /**
1363  * This enum indicates the flow control mode
1364  */
1365 enum rte_eth_fc_mode {
1366 	RTE_ETH_FC_NONE = 0, /**< Disable flow control. */
1367 	RTE_ETH_FC_RX_PAUSE, /**< Rx pause frame, enable flowctrl on Tx side. */
1368 	RTE_ETH_FC_TX_PAUSE, /**< Tx pause frame, enable flowctrl on Rx side. */
1369 	RTE_ETH_FC_FULL      /**< Enable flow control on both side. */
1370 };
1371 
1372 /**
1373  * A structure used to configure Ethernet flow control parameter.
1374  * These parameters will be configured into the register of the NIC.
1375  * Please refer to the corresponding data sheet for proper value.
1376  */
1377 struct rte_eth_fc_conf {
1378 	uint32_t high_water;  /**< High threshold value to trigger XOFF */
1379 	uint32_t low_water;   /**< Low threshold value to trigger XON */
1380 	uint16_t pause_time;  /**< Pause quota in the Pause frame */
1381 	uint16_t send_xon;    /**< Is XON frame need be sent */
1382 	enum rte_eth_fc_mode mode;  /**< Link flow control mode */
1383 	uint8_t mac_ctrl_frame_fwd; /**< Forward MAC control frames */
1384 	uint8_t autoneg;      /**< Use Pause autoneg */
1385 };
1386 
1387 /**
1388  * A structure used to configure Ethernet priority flow control parameter.
1389  * These parameters will be configured into the register of the NIC.
1390  * Please refer to the corresponding data sheet for proper value.
1391  */
1392 struct rte_eth_pfc_conf {
1393 	struct rte_eth_fc_conf fc; /**< General flow control parameter. */
1394 	uint8_t priority;          /**< VLAN User Priority. */
1395 };
1396 
1397 /**
1398  * @warning
1399  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1400  *
1401  * A structure used to retrieve information of queue based PFC.
1402  */
1403 struct rte_eth_pfc_queue_info {
1404 	/**
1405 	 * Maximum supported traffic class as per PFC (802.1Qbb) specification.
1406 	 */
1407 	uint8_t tc_max;
1408 	/** PFC queue mode capabilities. */
1409 	enum rte_eth_fc_mode mode_capa;
1410 };
1411 
1412 /**
1413  * @warning
1414  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1415  *
1416  * A structure used to configure Ethernet priority flow control parameters for
1417  * ethdev queues.
1418  *
1419  * rte_eth_pfc_queue_conf::rx_pause structure shall be used to configure given
1420  * tx_qid with corresponding tc. When ethdev device receives PFC frame with
1421  * rte_eth_pfc_queue_conf::rx_pause::tc, traffic will be paused on
1422  * rte_eth_pfc_queue_conf::rx_pause::tx_qid for that tc.
1423  *
1424  * rte_eth_pfc_queue_conf::tx_pause structure shall be used to configure given
1425  * rx_qid. When rx_qid is congested, PFC frames are generated with
1426  * rte_eth_pfc_queue_conf::rx_pause::tc and
1427  * rte_eth_pfc_queue_conf::rx_pause::pause_time to the peer.
1428  */
1429 struct rte_eth_pfc_queue_conf {
1430 	enum rte_eth_fc_mode mode; /**< Link flow control mode */
1431 
1432 	struct {
1433 		uint16_t tx_qid; /**< Tx queue ID */
1434 		/** Traffic class as per PFC (802.1Qbb) spec. The value must be
1435 		 * in the range [0, rte_eth_pfc_queue_info::tx_max - 1]
1436 		 */
1437 		uint8_t tc;
1438 	} rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1439 
1440 	struct {
1441 		uint16_t pause_time; /**< Pause quota in the Pause frame */
1442 		uint16_t rx_qid;     /**< Rx queue ID */
1443 		/** Traffic class as per PFC (802.1Qbb) spec. The value must be
1444 		 * in the range [0, rte_eth_pfc_queue_info::tx_max - 1]
1445 		 */
1446 		uint8_t tc;
1447 	} tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1448 };
1449 
1450 /**
1451  * Tunnel type for device-specific classifier configuration.
1452  * @see rte_eth_udp_tunnel
1453  */
1454 enum rte_eth_tunnel_type {
1455 	RTE_ETH_TUNNEL_TYPE_NONE = 0,
1456 	RTE_ETH_TUNNEL_TYPE_VXLAN,
1457 	RTE_ETH_TUNNEL_TYPE_GENEVE,
1458 	RTE_ETH_TUNNEL_TYPE_TEREDO,
1459 	RTE_ETH_TUNNEL_TYPE_NVGRE,
1460 	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1461 	RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1462 	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1463 	RTE_ETH_TUNNEL_TYPE_ECPRI,
1464 	RTE_ETH_TUNNEL_TYPE_MAX,
1465 };
1466 
1467 #ifdef __cplusplus
1468 }
1469 #endif
1470 
1471 /* Deprecated API file for rte_eth_dev_filter_* functions */
1472 #include "rte_eth_ctrl.h"
1473 
1474 #ifdef __cplusplus
1475 extern "C" {
1476 #endif
1477 
1478 /**
1479  * UDP tunneling configuration.
1480  *
1481  * Used to configure the classifier of a device,
1482  * associating an UDP port with a type of tunnel.
1483  *
1484  * Some NICs may need such configuration to properly parse a tunnel
1485  * with any standard or custom UDP port.
1486  */
1487 struct rte_eth_udp_tunnel {
1488 	uint16_t udp_port; /**< UDP port used for the tunnel. */
1489 	uint8_t prot_type; /**< Tunnel type. @see rte_eth_tunnel_type */
1490 };
1491 
1492 /**
1493  * A structure used to enable/disable specific device interrupts.
1494  */
1495 struct rte_eth_intr_conf {
1496 	/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
1497 	uint32_t lsc:1;
1498 	/** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
1499 	uint32_t rxq:1;
1500 	/** enable/disable rmv interrupt. 0 (default) - disable, 1 enable */
1501 	uint32_t rmv:1;
1502 };
1503 
1504 #define rte_intr_conf rte_eth_intr_conf
1505 
1506 /**
1507  * A structure used to configure an Ethernet port.
1508  * Depending upon the Rx multi-queue mode, extra advanced
1509  * configuration settings may be needed.
1510  */
1511 struct rte_eth_conf {
1512 	uint32_t link_speeds; /**< bitmap of RTE_ETH_LINK_SPEED_XXX of speeds to be
1513 				used. RTE_ETH_LINK_SPEED_FIXED disables link
1514 				autonegotiation, and a unique speed shall be
1515 				set. Otherwise, the bitmap defines the set of
1516 				speeds to be advertised. If the special value
1517 				RTE_ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
1518 				supported are advertised. */
1519 	struct rte_eth_rxmode rxmode; /**< Port Rx configuration. */
1520 	struct rte_eth_txmode txmode; /**< Port Tx configuration. */
1521 	uint32_t lpbk_mode; /**< Loopback operation mode. By default the value
1522 			         is 0, meaning the loopback mode is disabled.
1523 				 Read the datasheet of given Ethernet controller
1524 				 for details. The possible values of this field
1525 				 are defined in implementation of each driver. */
1526 	struct {
1527 		struct rte_eth_rss_conf rss_conf; /**< Port RSS configuration */
1528 		/** Port VMDq+DCB configuration. */
1529 		struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
1530 		/** Port DCB Rx configuration. */
1531 		struct rte_eth_dcb_rx_conf dcb_rx_conf;
1532 		/** Port VMDq Rx configuration. */
1533 		struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
1534 	} rx_adv_conf; /**< Port Rx filtering configuration. */
1535 	union {
1536 		/** Port VMDq+DCB Tx configuration. */
1537 		struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1538 		/** Port DCB Tx configuration. */
1539 		struct rte_eth_dcb_tx_conf dcb_tx_conf;
1540 		/** Port VMDq Tx configuration. */
1541 		struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1542 	} tx_adv_conf; /**< Port Tx DCB configuration (union). */
1543 	/** Currently,Priority Flow Control(PFC) are supported,if DCB with PFC
1544 	    is needed,and the variable must be set RTE_ETH_DCB_PFC_SUPPORT. */
1545 	uint32_t dcb_capability_en;
1546 	struct rte_eth_intr_conf intr_conf; /**< Interrupt mode configuration. */
1547 };
1548 
1549 /**
1550  * Rx offload capabilities of a device.
1551  */
1552 #define RTE_ETH_RX_OFFLOAD_VLAN_STRIP       RTE_BIT64(0)
1553 #define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM       RTE_BIT64(1)
1554 #define RTE_ETH_RX_OFFLOAD_UDP_CKSUM        RTE_BIT64(2)
1555 #define RTE_ETH_RX_OFFLOAD_TCP_CKSUM        RTE_BIT64(3)
1556 #define RTE_ETH_RX_OFFLOAD_TCP_LRO          RTE_BIT64(4)
1557 #define RTE_ETH_RX_OFFLOAD_QINQ_STRIP       RTE_BIT64(5)
1558 #define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1559 #define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP     RTE_BIT64(7)
1560 #define RTE_ETH_RX_OFFLOAD_VLAN_FILTER      RTE_BIT64(9)
1561 #define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND      RTE_BIT64(10)
1562 #define RTE_ETH_RX_OFFLOAD_SCATTER          RTE_BIT64(13)
1563 /**
1564  * Timestamp is set by the driver in RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
1565  * and RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME is set in ol_flags.
1566  * The mbuf field and flag are registered when the offload is configured.
1567  */
1568 #define RTE_ETH_RX_OFFLOAD_TIMESTAMP        RTE_BIT64(14)
1569 #define RTE_ETH_RX_OFFLOAD_SECURITY         RTE_BIT64(15)
1570 #define RTE_ETH_RX_OFFLOAD_KEEP_CRC         RTE_BIT64(16)
1571 #define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM       RTE_BIT64(17)
1572 #define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM  RTE_BIT64(18)
1573 #define RTE_ETH_RX_OFFLOAD_RSS_HASH         RTE_BIT64(19)
1574 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT     RTE_BIT64(20)
1575 
1576 #define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1577 				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1578 				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1579 #define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1580 			     RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1581 			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1582 			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1583 
1584 /*
1585  * If new Rx offload capabilities are defined, they also must be
1586  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1587  */
1588 
1589 /**
1590  * Tx offload capabilities of a device.
1591  */
1592 #define RTE_ETH_TX_OFFLOAD_VLAN_INSERT      RTE_BIT64(0)
1593 #define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM       RTE_BIT64(1)
1594 #define RTE_ETH_TX_OFFLOAD_UDP_CKSUM        RTE_BIT64(2)
1595 #define RTE_ETH_TX_OFFLOAD_TCP_CKSUM        RTE_BIT64(3)
1596 #define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM       RTE_BIT64(4)
1597 #define RTE_ETH_TX_OFFLOAD_TCP_TSO          RTE_BIT64(5)
1598 #define RTE_ETH_TX_OFFLOAD_UDP_TSO          RTE_BIT64(6)
1599 #define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)  /**< Used for tunneling packet. */
1600 #define RTE_ETH_TX_OFFLOAD_QINQ_INSERT      RTE_BIT64(8)
1601 #define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    RTE_BIT64(9)  /**< Used for tunneling packet. */
1602 #define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO      RTE_BIT64(10) /**< Used for tunneling packet. */
1603 #define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO     RTE_BIT64(11) /**< Used for tunneling packet. */
1604 #define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   RTE_BIT64(12) /**< Used for tunneling packet. */
1605 #define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT    RTE_BIT64(13)
1606 /**
1607  * Multiple threads can invoke rte_eth_tx_burst() concurrently on the same
1608  * Tx queue without SW lock.
1609  */
1610 #define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE      RTE_BIT64(14)
1611 /** Device supports multi segment send. */
1612 #define RTE_ETH_TX_OFFLOAD_MULTI_SEGS       RTE_BIT64(15)
1613 /**
1614  * Device supports optimization for fast release of mbufs.
1615  * When set application must guarantee that per-queue all mbufs comes from
1616  * the same mempool and has refcnt = 1.
1617  */
1618 #define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE   RTE_BIT64(16)
1619 #define RTE_ETH_TX_OFFLOAD_SECURITY         RTE_BIT64(17)
1620 /**
1621  * Device supports generic UDP tunneled packet TSO.
1622  * Application must set RTE_MBUF_F_TX_TUNNEL_UDP and other mbuf fields required
1623  * for tunnel TSO.
1624  */
1625 #define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO      RTE_BIT64(18)
1626 /**
1627  * Device supports generic IP tunneled packet TSO.
1628  * Application must set RTE_MBUF_F_TX_TUNNEL_IP and other mbuf fields required
1629  * for tunnel TSO.
1630  */
1631 #define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO       RTE_BIT64(19)
1632 /** Device supports outer UDP checksum */
1633 #define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM  RTE_BIT64(20)
1634 /**
1635  * Device sends on time read from RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
1636  * if RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME is set in ol_flags.
1637  * The mbuf field and flag are registered when the offload is configured.
1638  */
1639 #define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1640 /*
1641  * If new Tx offload capabilities are defined, they also must be
1642  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1643  */
1644 
1645 /**@{@name Device capabilities
1646  * Non-offload capabilities reported in rte_eth_dev_info.dev_capa.
1647  */
1648 /** Device supports Rx queue setup after device started. */
1649 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1650 /** Device supports Tx queue setup after device started. */
1651 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1652 /**
1653  * Device supports shared Rx queue among ports within Rx domain and
1654  * switch domain. Mbufs are consumed by shared Rx queue instead of
1655  * each queue. Multiple groups are supported by share_group of Rx
1656  * queue configuration. Shared Rx queue is identified by PMD using
1657  * share_qid of Rx queue configuration. Polling any port in the group
1658  * receive packets of all member ports, source port identified by
1659  * mbuf->port field.
1660  */
1661 #define RTE_ETH_DEV_CAPA_RXQ_SHARE              RTE_BIT64(2)
1662 /** Device supports keeping flow rules across restart. */
1663 #define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP         RTE_BIT64(3)
1664 /** Device supports keeping shared flow objects across restart. */
1665 #define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1666 /**@}*/
1667 
1668 /*
1669  * Fallback default preferred Rx/Tx port parameters.
1670  * These are used if an application requests default parameters
1671  * but the PMD does not provide preferred values.
1672  */
1673 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1674 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1675 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1676 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1677 
1678 /**
1679  * Preferred Rx/Tx port parameters.
1680  * There are separate instances of this structure for transmission
1681  * and reception respectively.
1682  */
1683 struct rte_eth_dev_portconf {
1684 	uint16_t burst_size; /**< Device-preferred burst size */
1685 	uint16_t ring_size; /**< Device-preferred size of queue rings */
1686 	uint16_t nb_queues; /**< Device-preferred number of queues */
1687 };
1688 
1689 /**
1690  * Default values for switch domain ID when ethdev does not support switch
1691  * domain definitions.
1692  */
1693 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID	(UINT16_MAX)
1694 
1695 /**
1696  * Ethernet device associated switch information
1697  */
1698 struct rte_eth_switch_info {
1699 	const char *name;	/**< switch name */
1700 	uint16_t domain_id;	/**< switch domain ID */
1701 	/**
1702 	 * Mapping to the devices physical switch port as enumerated from the
1703 	 * perspective of the embedded interconnect/switch. For SR-IOV enabled
1704 	 * device this may correspond to the VF_ID of each virtual function,
1705 	 * but each driver should explicitly define the mapping of switch
1706 	 * port identifier to that physical interconnect/switch
1707 	 */
1708 	uint16_t port_id;
1709 	/**
1710 	 * Shared Rx queue sub-domain boundary. Only ports in same Rx domain
1711 	 * and switch domain can share Rx queue. Valid only if device advertised
1712 	 * RTE_ETH_DEV_CAPA_RXQ_SHARE capability.
1713 	 */
1714 	uint16_t rx_domain;
1715 };
1716 
1717 /**
1718  * @warning
1719  * @b EXPERIMENTAL: this structure may change without prior notice.
1720  *
1721  * Ethernet device Rx buffer segmentation capabilities.
1722  */
1723 struct rte_eth_rxseg_capa {
1724 	__extension__
1725 	uint32_t multi_pools:1; /**< Supports receiving to multiple pools.*/
1726 	uint32_t offset_allowed:1; /**< Supports buffer offsets. */
1727 	uint32_t offset_align_log2:4; /**< Required offset alignment. */
1728 	uint16_t max_nseg; /**< Maximum amount of segments to split. */
1729 	uint16_t reserved; /**< Reserved field. */
1730 };
1731 
1732 /**
1733  * Ethernet device information
1734  */
1735 
1736 /**
1737  * Ethernet device representor port type.
1738  */
1739 enum rte_eth_representor_type {
1740 	RTE_ETH_REPRESENTOR_NONE, /**< not a representor. */
1741 	RTE_ETH_REPRESENTOR_VF,   /**< representor of Virtual Function. */
1742 	RTE_ETH_REPRESENTOR_SF,   /**< representor of Sub Function. */
1743 	RTE_ETH_REPRESENTOR_PF,   /**< representor of Physical Function. */
1744 };
1745 
1746 /**
1747  * @warning
1748  * @b EXPERIMENTAL: this enumeration may change without prior notice.
1749  *
1750  * Ethernet device error handling mode.
1751  */
1752 enum rte_eth_err_handle_mode {
1753 	/** No error handling modes are supported. */
1754 	RTE_ETH_ERROR_HANDLE_MODE_NONE,
1755 	/** Passive error handling, after the PMD detects that a reset is required,
1756 	 * the PMD reports @see RTE_ETH_EVENT_INTR_RESET event,
1757 	 * and the application invokes @see rte_eth_dev_reset to recover the port.
1758 	 */
1759 	RTE_ETH_ERROR_HANDLE_MODE_PASSIVE,
1760 	/** Proactive error handling, after the PMD detects that a reset is required,
1761 	 * the PMD reports @see RTE_ETH_EVENT_ERR_RECOVERING event,
1762 	 * do recovery internally, and finally reports the recovery result event
1763 	 * (@see RTE_ETH_EVENT_RECOVERY_*).
1764 	 */
1765 	RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE,
1766 };
1767 
1768 /**
1769  * A structure used to retrieve the contextual information of
1770  * an Ethernet device, such as the controlling driver of the
1771  * device, etc...
1772  */
1773 struct rte_eth_dev_info {
1774 	struct rte_device *device; /**< Generic device information */
1775 	const char *driver_name; /**< Device Driver name. */
1776 	unsigned int if_index; /**< Index to bound host interface, or 0 if none.
1777 		Use if_indextoname() to translate into an interface name. */
1778 	uint16_t min_mtu;	/**< Minimum MTU allowed */
1779 	uint16_t max_mtu;	/**< Maximum MTU allowed */
1780 	const uint32_t *dev_flags; /**< Device flags */
1781 	/** Minimum Rx buffer size per descriptor supported by HW. */
1782 	uint32_t min_rx_bufsize;
1783 	/**
1784 	 * Maximum Rx buffer size per descriptor supported by HW.
1785 	 * The value is not enforced, information only to application to
1786 	 * optimize mbuf size.
1787 	 * Its value is UINT32_MAX when not specified by the driver.
1788 	 */
1789 	uint32_t max_rx_bufsize;
1790 	uint32_t max_rx_pktlen; /**< Maximum configurable length of Rx pkt. */
1791 	/** Maximum configurable size of LRO aggregated packet. */
1792 	uint32_t max_lro_pkt_size;
1793 	uint16_t max_rx_queues; /**< Maximum number of Rx queues. */
1794 	uint16_t max_tx_queues; /**< Maximum number of Tx queues. */
1795 	uint32_t max_mac_addrs; /**< Maximum number of MAC addresses. */
1796 	/** Maximum number of hash MAC addresses for MTA and UTA. */
1797 	uint32_t max_hash_mac_addrs;
1798 	uint16_t max_vfs; /**< Maximum number of VFs. */
1799 	uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */
1800 	struct rte_eth_rxseg_capa rx_seg_capa; /**< Segmentation capability.*/
1801 	/** All Rx offload capabilities including all per-queue ones */
1802 	uint64_t rx_offload_capa;
1803 	/** All Tx offload capabilities including all per-queue ones */
1804 	uint64_t tx_offload_capa;
1805 	/** Device per-queue Rx offload capabilities. */
1806 	uint64_t rx_queue_offload_capa;
1807 	/** Device per-queue Tx offload capabilities. */
1808 	uint64_t tx_queue_offload_capa;
1809 	/** Device redirection table size, the total number of entries. */
1810 	uint16_t reta_size;
1811 	uint8_t hash_key_size; /**< Hash key size in bytes */
1812 	uint32_t rss_algo_capa; /** RSS hash algorithms capabilities */
1813 	/** Bit mask of RSS offloads, the bit offset also means flow type */
1814 	uint64_t flow_type_rss_offloads;
1815 	struct rte_eth_rxconf default_rxconf; /**< Default Rx configuration */
1816 	struct rte_eth_txconf default_txconf; /**< Default Tx configuration */
1817 	uint16_t vmdq_queue_base; /**< First queue ID for VMDq pools. */
1818 	uint16_t vmdq_queue_num;  /**< Queue number for VMDq pools. */
1819 	uint16_t vmdq_pool_base;  /**< First ID of VMDq pools. */
1820 	struct rte_eth_desc_lim rx_desc_lim;  /**< Rx descriptors limits */
1821 	struct rte_eth_desc_lim tx_desc_lim;  /**< Tx descriptors limits */
1822 	uint32_t speed_capa;  /**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
1823 	/** Configured number of Rx/Tx queues */
1824 	uint16_t nb_rx_queues; /**< Number of Rx queues. */
1825 	uint16_t nb_tx_queues; /**< Number of Tx queues. */
1826 	/**
1827 	 * Maximum number of Rx mempools supported per Rx queue.
1828 	 *
1829 	 * Value greater than 0 means that the driver supports Rx queue
1830 	 * mempools specification via rx_conf->rx_mempools.
1831 	 */
1832 	uint16_t max_rx_mempools;
1833 	/** Rx parameter recommendations */
1834 	struct rte_eth_dev_portconf default_rxportconf;
1835 	/** Tx parameter recommendations */
1836 	struct rte_eth_dev_portconf default_txportconf;
1837 	/** Generic device capabilities (RTE_ETH_DEV_CAPA_). */
1838 	uint64_t dev_capa;
1839 	/**
1840 	 * Switching information for ports on a device with a
1841 	 * embedded managed interconnect/switch.
1842 	 */
1843 	struct rte_eth_switch_info switch_info;
1844 	/** Supported error handling mode. */
1845 	enum rte_eth_err_handle_mode err_handle_mode;
1846 
1847 	uint64_t reserved_64s[2]; /**< Reserved for future fields */
1848 	void *reserved_ptrs[2];   /**< Reserved for future fields */
1849 };
1850 
1851 /**@{@name Rx/Tx queue states */
1852 #define RTE_ETH_QUEUE_STATE_STOPPED 0 /**< Queue stopped. */
1853 #define RTE_ETH_QUEUE_STATE_STARTED 1 /**< Queue started. */
1854 #define RTE_ETH_QUEUE_STATE_HAIRPIN 2 /**< Queue used for hairpin. */
1855 /**@}*/
1856 
1857 /**
1858  * Ethernet device Rx queue information structure.
1859  * Used to retrieve information about configured queue.
1860  */
1861 struct __rte_cache_min_aligned rte_eth_rxq_info {
1862 	struct rte_mempool *mp;     /**< mempool used by that queue. */
1863 	struct rte_eth_rxconf conf; /**< queue config parameters. */
1864 	uint8_t scattered_rx;       /**< scattered packets Rx supported. */
1865 	uint8_t queue_state;        /**< one of RTE_ETH_QUEUE_STATE_*. */
1866 	uint16_t nb_desc;           /**< configured number of RXDs. */
1867 	uint16_t rx_buf_size;       /**< hardware receive buffer size. */
1868 	/**
1869 	 * Available Rx descriptors threshold defined as percentage
1870 	 * of Rx queue size. If number of available descriptors is lower,
1871 	 * the event RTE_ETH_EVENT_RX_AVAIL_THESH is generated.
1872 	 * Value 0 means that the threshold monitoring is disabled.
1873 	 */
1874 	uint8_t avail_thresh;
1875 };
1876 
1877 /**
1878  * Ethernet device Tx queue information structure.
1879  * Used to retrieve information about configured queue.
1880  */
1881 struct __rte_cache_min_aligned rte_eth_txq_info {
1882 	struct rte_eth_txconf conf; /**< queue config parameters. */
1883 	uint16_t nb_desc;           /**< configured number of TXDs. */
1884 	uint8_t queue_state;        /**< one of RTE_ETH_QUEUE_STATE_*. */
1885 };
1886 
1887 /**
1888  * @warning
1889  * @b EXPERIMENTAL: this structure may change without prior notice.
1890  *
1891  * Ethernet device Rx queue information structure for recycling mbufs.
1892  * Used to retrieve Rx queue information when Tx queue reusing mbufs and moving
1893  * them into Rx mbuf ring.
1894  */
1895 struct __rte_cache_min_aligned rte_eth_recycle_rxq_info {
1896 	struct rte_mbuf **mbuf_ring; /**< mbuf ring of Rx queue. */
1897 	struct rte_mempool *mp;     /**< mempool of Rx queue. */
1898 	uint16_t *refill_head;      /**< head of Rx queue refilling mbufs. */
1899 	uint16_t *receive_tail;     /**< tail of Rx queue receiving pkts. */
1900 	uint16_t mbuf_ring_size;     /**< configured number of mbuf ring size. */
1901 	/**
1902 	 * Requirement on mbuf refilling batch size of Rx mbuf ring.
1903 	 * For some PMD drivers, the number of Rx mbuf ring refilling mbufs
1904 	 * should be aligned with mbuf ring size, in order to simplify
1905 	 * ring wrapping around.
1906 	 * Value 0 means that PMD drivers have no requirement for this.
1907 	 */
1908 	uint16_t refill_requirement;
1909 };
1910 
1911 /* Generic Burst mode flag definition, values can be ORed. */
1912 
1913 /**
1914  * If the queues have different burst mode description, this bit will be set
1915  * by PMD, then the application can iterate to retrieve burst description for
1916  * all other queues.
1917  */
1918 #define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1919 
1920 /**
1921  * Ethernet device Rx/Tx queue packet burst mode information structure.
1922  * Used to retrieve information about packet burst mode setting.
1923  */
1924 struct rte_eth_burst_mode {
1925 	uint64_t flags; /**< The ORed values of RTE_ETH_BURST_FLAG_xxx */
1926 
1927 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024 /**< Maximum size for information */
1928 	char info[RTE_ETH_BURST_MODE_INFO_SIZE]; /**< burst mode information */
1929 };
1930 
1931 /** Maximum name length for extended statistics counters */
1932 #define RTE_ETH_XSTATS_NAME_SIZE 64
1933 
1934 /**
1935  * An Ethernet device extended statistic structure
1936  *
1937  * This structure is used by rte_eth_xstats_get() to provide
1938  * statistics that are not provided in the generic *rte_eth_stats*
1939  * structure.
1940  * It maps a name ID, corresponding to an index in the array returned
1941  * by rte_eth_xstats_get_names(), to a statistic value.
1942  */
1943 struct rte_eth_xstat {
1944 	uint64_t id;        /**< The index in xstats name array. */
1945 	uint64_t value;     /**< The statistic counter value. */
1946 };
1947 
1948 /**
1949  * A name element for extended statistics.
1950  *
1951  * An array of this structure is returned by rte_eth_xstats_get_names().
1952  * It lists the names of extended statistics for a PMD. The *rte_eth_xstat*
1953  * structure references these names by their array index.
1954  *
1955  * The xstats should follow a common naming scheme.
1956  * Some names are standardized in rte_stats_strings.
1957  * Examples:
1958  *     - rx_missed_errors
1959  *     - tx_q3_bytes
1960  *     - tx_size_128_to_255_packets
1961  */
1962 struct rte_eth_xstat_name {
1963 	char name[RTE_ETH_XSTATS_NAME_SIZE]; /**< The statistic name. */
1964 };
1965 
1966 #define RTE_ETH_DCB_NUM_TCS    8
1967 #define RTE_ETH_MAX_VMDQ_POOL  64
1968 
1969 /**
1970  * A structure used to get the information of queue and
1971  * TC mapping on both Tx and Rx paths.
1972  */
1973 struct rte_eth_dcb_tc_queue_mapping {
1974 	/** Rx queues assigned to tc per Pool */
1975 	struct {
1976 		uint16_t base;
1977 		uint16_t nb_queue;
1978 	} tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1979 	/** Rx queues assigned to tc per Pool */
1980 	struct {
1981 		uint16_t base;
1982 		uint16_t nb_queue;
1983 	} tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1984 };
1985 
1986 /**
1987  * A structure used to get the information of DCB.
1988  * It includes TC UP mapping and queue TC mapping.
1989  */
1990 struct rte_eth_dcb_info {
1991 	uint8_t nb_tcs;        /**< number of TCs */
1992 	uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
1993 	uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]; /**< Tx BW percentage for each TC */
1994 	/** Rx queues assigned to tc */
1995 	struct rte_eth_dcb_tc_queue_mapping tc_queue;
1996 };
1997 
1998 /**
1999  * This enum indicates the possible Forward Error Correction (FEC) modes
2000  * of an ethdev port.
2001  */
2002 enum rte_eth_fec_mode {
2003 	RTE_ETH_FEC_NOFEC = 0,      /**< FEC is off */
2004 	RTE_ETH_FEC_AUTO,	    /**< FEC autonegotiation modes */
2005 	RTE_ETH_FEC_BASER,          /**< FEC using common algorithm */
2006 	RTE_ETH_FEC_RS,             /**< FEC using RS algorithm */
2007 	RTE_ETH_FEC_LLRS,           /**< FEC using LLRS algorithm */
2008 };
2009 
2010 /* Translate from FEC mode to FEC capa */
2011 #define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
2012 
2013 /* This macro indicates FEC capa mask */
2014 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
2015 
2016 /* A structure used to get capabilities per link speed */
2017 struct rte_eth_fec_capa {
2018 	uint32_t speed; /**< Link speed (see RTE_ETH_SPEED_NUM_*) */
2019 	uint32_t capa;  /**< FEC capabilities bitmask */
2020 };
2021 
2022 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
2023 
2024 /* Macros to check for valid port */
2025 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
2026 	if (!rte_eth_dev_is_valid_port(port_id)) { \
2027 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2028 		return retval; \
2029 	} \
2030 } while (0)
2031 
2032 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
2033 	if (!rte_eth_dev_is_valid_port(port_id)) { \
2034 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2035 		return; \
2036 	} \
2037 } while (0)
2038 
2039 /**
2040  * Function type used for Rx packet processing packet callbacks.
2041  *
2042  * The callback function is called on Rx with a burst of packets that have
2043  * been received on the given port and queue.
2044  *
2045  * @param port_id
2046  *   The Ethernet port on which Rx is being performed.
2047  * @param queue
2048  *   The queue on the Ethernet port which is being used to receive the packets.
2049  * @param pkts
2050  *   The burst of packets that have just been received.
2051  * @param nb_pkts
2052  *   The number of packets in the burst pointed to by "pkts".
2053  * @param max_pkts
2054  *   The max number of packets that can be stored in the "pkts" array.
2055  * @param user_param
2056  *   The arbitrary user parameter passed in by the application when the callback
2057  *   was originally configured.
2058  * @return
2059  *   The number of packets returned to the user.
2060  */
2061 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
2062 	struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
2063 	void *user_param);
2064 
2065 /**
2066  * Function type used for Tx packet processing packet callbacks.
2067  *
2068  * The callback function is called on Tx with a burst of packets immediately
2069  * before the packets are put onto the hardware queue for transmission.
2070  *
2071  * @param port_id
2072  *   The Ethernet port on which Tx is being performed.
2073  * @param queue
2074  *   The queue on the Ethernet port which is being used to transmit the packets.
2075  * @param pkts
2076  *   The burst of packets that are about to be transmitted.
2077  * @param nb_pkts
2078  *   The number of packets in the burst pointed to by "pkts".
2079  * @param user_param
2080  *   The arbitrary user parameter passed in by the application when the callback
2081  *   was originally configured.
2082  * @return
2083  *   The number of packets to be written to the NIC.
2084  */
2085 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
2086 	struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
2087 
2088 /**
2089  * Possible states of an ethdev port.
2090  */
2091 enum rte_eth_dev_state {
2092 	/** Device is unused before being probed. */
2093 	RTE_ETH_DEV_UNUSED = 0,
2094 	/** Device is attached when allocated in probing. */
2095 	RTE_ETH_DEV_ATTACHED,
2096 	/** Device is in removed state when plug-out is detected. */
2097 	RTE_ETH_DEV_REMOVED,
2098 };
2099 
2100 struct rte_eth_dev_sriov {
2101 	uint8_t active;               /**< SRIOV is active with 16, 32 or 64 pools */
2102 	uint8_t nb_q_per_pool;        /**< Rx queue number per pool */
2103 	uint16_t def_vmdq_idx;        /**< Default pool num used for PF */
2104 	uint16_t def_pool_q_idx;      /**< Default pool queue start reg index */
2105 };
2106 #define RTE_ETH_DEV_SRIOV(dev)         ((dev)->data->sriov)
2107 
2108 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2109 
2110 #define RTE_ETH_DEV_NO_OWNER 0
2111 
2112 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
2113 
2114 struct rte_eth_dev_owner {
2115 	uint64_t id; /**< The owner unique identifier. */
2116 	char name[RTE_ETH_MAX_OWNER_NAME_LEN]; /**< The owner name. */
2117 };
2118 
2119 /**@{@name Device flags
2120  * Flags internally saved in rte_eth_dev_data.dev_flags
2121  * and reported in rte_eth_dev_info.dev_flags.
2122  */
2123 /** PMD supports thread-safe flow operations */
2124 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE  RTE_BIT32(0)
2125 /** Device supports link state interrupt */
2126 #define RTE_ETH_DEV_INTR_LSC              RTE_BIT32(1)
2127 /** Device is a bonding member */
2128 #define RTE_ETH_DEV_BONDING_MEMBER        RTE_BIT32(2)
2129 /** Device supports device removal interrupt */
2130 #define RTE_ETH_DEV_INTR_RMV              RTE_BIT32(3)
2131 /** Device is port representor */
2132 #define RTE_ETH_DEV_REPRESENTOR           RTE_BIT32(4)
2133 /** Device does not support MAC change after started */
2134 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR       RTE_BIT32(5)
2135 /**
2136  * Queue xstats filled automatically by ethdev layer.
2137  * PMDs filling the queue xstats themselves should not set this flag
2138  */
2139 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2140 /**@}*/
2141 
2142 /**
2143  * Iterates over valid ethdev ports owned by a specific owner.
2144  *
2145  * @param port_id
2146  *   The ID of the next possible valid owned port.
2147  * @param	owner_id
2148  *  The owner identifier.
2149  *  RTE_ETH_DEV_NO_OWNER means iterate over all valid ownerless ports.
2150  * @return
2151  *   Next valid port ID owned by owner_id, RTE_MAX_ETHPORTS if there is none.
2152  */
2153 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2154 		const uint64_t owner_id);
2155 
2156 /**
2157  * Macro to iterate over all enabled ethdev ports owned by a specific owner.
2158  */
2159 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2160 	for (p = rte_eth_find_next_owned_by(0, o); \
2161 	     (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2162 	     p = rte_eth_find_next_owned_by(p + 1, o))
2163 
2164 /**
2165  * Iterates over valid ethdev ports.
2166  *
2167  * @param port_id
2168  *   The ID of the next possible valid port.
2169  * @return
2170  *   Next valid port ID, RTE_MAX_ETHPORTS if there is none.
2171  */
2172 uint16_t rte_eth_find_next(uint16_t port_id);
2173 
2174 /**
2175  * Macro to iterate over all enabled and ownerless ethdev ports.
2176  */
2177 #define RTE_ETH_FOREACH_DEV(p) \
2178 	RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2179 
2180 /**
2181  * Iterates over ethdev ports of a specified device.
2182  *
2183  * @param port_id_start
2184  *   The ID of the next possible valid port.
2185  * @param parent
2186  *   The generic device behind the ports to iterate.
2187  * @return
2188  *   Next port ID of the device, possibly port_id_start,
2189  *   RTE_MAX_ETHPORTS if there is none.
2190  */
2191 uint16_t
2192 rte_eth_find_next_of(uint16_t port_id_start,
2193 		const struct rte_device *parent);
2194 
2195 /**
2196  * Macro to iterate over all ethdev ports of a specified device.
2197  *
2198  * @param port_id
2199  *   The ID of the matching port being iterated.
2200  * @param parent
2201  *   The rte_device pointer matching the iterated ports.
2202  */
2203 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2204 	for (port_id = rte_eth_find_next_of(0, parent); \
2205 		port_id < RTE_MAX_ETHPORTS; \
2206 		port_id = rte_eth_find_next_of(port_id + 1, parent))
2207 
2208 /**
2209  * Iterates over sibling ethdev ports (i.e. sharing the same rte_device).
2210  *
2211  * @param port_id_start
2212  *   The ID of the next possible valid sibling port.
2213  * @param ref_port_id
2214  *   The ID of a reference port to compare rte_device with.
2215  * @return
2216  *   Next sibling port ID, possibly port_id_start or ref_port_id itself,
2217  *   RTE_MAX_ETHPORTS if there is none.
2218  */
2219 uint16_t
2220 rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2221 
2222 /**
2223  * Macro to iterate over all ethdev ports sharing the same rte_device
2224  * as the specified port.
2225  * Note: the specified reference port is part of the loop iterations.
2226  *
2227  * @param port_id
2228  *   The ID of the matching port being iterated.
2229  * @param ref_port_id
2230  *   The ID of the port being compared.
2231  */
2232 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2233 	for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2234 		port_id < RTE_MAX_ETHPORTS; \
2235 		port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2236 
2237 /**
2238  * Get a new unique owner identifier.
2239  * An owner identifier is used to owns Ethernet devices by only one DPDK entity
2240  * to avoid multiple management of device by different entities.
2241  *
2242  * @param	owner_id
2243  *   Owner identifier pointer.
2244  * @return
2245  *   Negative errno value on error, 0 on success.
2246  */
2247 int rte_eth_dev_owner_new(uint64_t *owner_id);
2248 
2249 /**
2250  * Set an Ethernet device owner.
2251  *
2252  * @param	port_id
2253  *  The identifier of the port to own.
2254  * @param	owner
2255  *  The owner pointer.
2256  * @return
2257  *  Negative errno value on error, 0 on success.
2258  */
2259 int rte_eth_dev_owner_set(const uint16_t port_id,
2260 		const struct rte_eth_dev_owner *owner);
2261 
2262 /**
2263  * Unset Ethernet device owner to make the device ownerless.
2264  *
2265  * @param	port_id
2266  *  The identifier of port to make ownerless.
2267  * @param	owner_id
2268  *  The owner identifier.
2269  * @return
2270  *  0 on success, negative errno value on error.
2271  */
2272 int rte_eth_dev_owner_unset(const uint16_t port_id,
2273 		const uint64_t owner_id);
2274 
2275 /**
2276  * Remove owner from all Ethernet devices owned by a specific owner.
2277  *
2278  * @param	owner_id
2279  *  The owner identifier.
2280  * @return
2281  *  0 on success, negative errno value on error.
2282  */
2283 int rte_eth_dev_owner_delete(const uint64_t owner_id);
2284 
2285 /**
2286  * Get the owner of an Ethernet device.
2287  *
2288  * @param	port_id
2289  *  The port identifier.
2290  * @param	owner
2291  *  The owner structure pointer to fill.
2292  * @return
2293  *  0 on success, negative errno value on error..
2294  */
2295 int rte_eth_dev_owner_get(const uint16_t port_id,
2296 		struct rte_eth_dev_owner *owner);
2297 
2298 /**
2299  * Get the number of ports which are usable for the application.
2300  *
2301  * These devices must be iterated by using the macro
2302  * ``RTE_ETH_FOREACH_DEV`` or ``RTE_ETH_FOREACH_DEV_OWNED_BY``
2303  * to deal with non-contiguous ranges of devices.
2304  *
2305  * @return
2306  *   The count of available Ethernet devices.
2307  */
2308 uint16_t rte_eth_dev_count_avail(void);
2309 
2310 /**
2311  * Get the total number of ports which are allocated.
2312  *
2313  * Some devices may not be available for the application.
2314  *
2315  * @return
2316  *   The total count of Ethernet devices.
2317  */
2318 uint16_t rte_eth_dev_count_total(void);
2319 
2320 /**
2321  * Convert a numerical speed in Mbps to a bitmap flag that can be used in
2322  * the bitmap link_speeds of the struct rte_eth_conf
2323  *
2324  * @param speed
2325  *   Numerical speed value in Mbps
2326  * @param duplex
2327  *   RTE_ETH_LINK_[HALF/FULL]_DUPLEX (only for 10/100M speeds)
2328  * @return
2329  *   0 if the speed cannot be mapped
2330  */
2331 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2332 
2333 /**
2334  * Get RTE_ETH_RX_OFFLOAD_* flag name.
2335  *
2336  * @param offload
2337  *   Offload flag.
2338  * @return
2339  *   Offload name or 'UNKNOWN' if the flag cannot be recognised.
2340  */
2341 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2342 
2343 /**
2344  * Get RTE_ETH_TX_OFFLOAD_* flag name.
2345  *
2346  * @param offload
2347  *   Offload flag.
2348  * @return
2349  *   Offload name or 'UNKNOWN' if the flag cannot be recognised.
2350  */
2351 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2352 
2353 /**
2354  * @warning
2355  * @b EXPERIMENTAL: this API may change without prior notice.
2356  *
2357  * Get RTE_ETH_DEV_CAPA_* flag name.
2358  *
2359  * @param capability
2360  *   Capability flag.
2361  * @return
2362  *   Capability name or 'UNKNOWN' if the flag cannot be recognized.
2363  */
2364 __rte_experimental
2365 const char *rte_eth_dev_capability_name(uint64_t capability);
2366 
2367 /**
2368  * Configure an Ethernet device.
2369  * This function must be invoked first before any other function in the
2370  * Ethernet API. This function can also be re-invoked when a device is in the
2371  * stopped state.
2372  *
2373  * @param port_id
2374  *   The port identifier of the Ethernet device to configure.
2375  * @param nb_rx_queue
2376  *   The number of receive queues to set up for the Ethernet device.
2377  * @param nb_tx_queue
2378  *   The number of transmit queues to set up for the Ethernet device.
2379  * @param eth_conf
2380  *   The pointer to the configuration data to be used for the Ethernet device.
2381  *   The *rte_eth_conf* structure includes:
2382  *     -  the hardware offload features to activate, with dedicated fields for
2383  *        each statically configurable offload hardware feature provided by
2384  *        Ethernet devices, such as IP checksum or VLAN tag stripping for
2385  *        example.
2386  *        The Rx offload bitfield API is obsolete and will be deprecated.
2387  *        Applications should set the ignore_bitfield_offloads bit on *rxmode*
2388  *        structure and use offloads field to set per-port offloads instead.
2389  *     -  Any offloading set in eth_conf->[rt]xmode.offloads must be within
2390  *        the [rt]x_offload_capa returned from rte_eth_dev_info_get().
2391  *        Any type of device supported offloading set in the input argument
2392  *        eth_conf->[rt]xmode.offloads to rte_eth_dev_configure() is enabled
2393  *        on all queues and it can't be disabled in rte_eth_[rt]x_queue_setup()
2394  *     -  the Receive Side Scaling (RSS) configuration when using multiple Rx
2395  *        queues per port. Any RSS hash function set in eth_conf->rss_conf.rss_hf
2396  *        must be within the flow_type_rss_offloads provided by drivers via
2397  *        rte_eth_dev_info_get() API.
2398  *
2399  *   Embedding all configuration information in a single data structure
2400  *   is the more flexible method that allows the addition of new features
2401  *   without changing the syntax of the API.
2402  * @return
2403  *   - 0: Success, device configured.
2404  *   - <0: Error code returned by the driver configuration function.
2405  */
2406 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2407 		uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2408 
2409 /**
2410  * Check if an Ethernet device was physically removed.
2411  *
2412  * @param port_id
2413  *   The port identifier of the Ethernet device.
2414  * @return
2415  *   1 when the Ethernet device is removed, otherwise 0.
2416  */
2417 int
2418 rte_eth_dev_is_removed(uint16_t port_id);
2419 
2420 /**
2421  * Allocate and set up a receive queue for an Ethernet device.
2422  *
2423  * The function allocates a contiguous block of memory for *nb_rx_desc*
2424  * receive descriptors from a memory zone associated with *socket_id*
2425  * and initializes each receive descriptor with a network buffer allocated
2426  * from the memory pool *mb_pool*.
2427  *
2428  * @param port_id
2429  *   The port identifier of the Ethernet device.
2430  * @param rx_queue_id
2431  *   The index of the receive queue to set up.
2432  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
2433  *   to rte_eth_dev_configure().
2434  * @param nb_rx_desc
2435  *   The number of receive descriptors to allocate for the receive ring.
2436  * @param socket_id
2437  *   The *socket_id* argument is the socket identifier in case of NUMA.
2438  *   The value can be *SOCKET_ID_ANY* if there is no NUMA constraint for
2439  *   the DMA memory allocated for the receive descriptors of the ring.
2440  * @param rx_conf
2441  *   The pointer to the configuration data to be used for the receive queue.
2442  *   NULL value is allowed, in which case default Rx configuration
2443  *   will be used.
2444  *   The *rx_conf* structure contains an *rx_thresh* structure with the values
2445  *   of the Prefetch, Host, and Write-Back threshold registers of the receive
2446  *   ring.
2447  *   In addition it contains the hardware offloads features to activate using
2448  *   the RTE_ETH_RX_OFFLOAD_* flags.
2449  *   If an offloading set in rx_conf->offloads
2450  *   hasn't been set in the input argument eth_conf->rxmode.offloads
2451  *   to rte_eth_dev_configure(), it is a new added offloading, it must be
2452  *   per-queue type and it is enabled for the queue.
2453  *   No need to repeat any bit in rx_conf->offloads which has already been
2454  *   enabled in rte_eth_dev_configure() at port level. An offloading enabled
2455  *   at port level can't be disabled at queue level.
2456  *   The configuration structure also contains the pointer to the array
2457  *   of the receiving buffer segment descriptions, see rx_seg and rx_nseg
2458  *   fields, this extended configuration might be used by split offloads like
2459  *   RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT. If mb_pool is not NULL,
2460  *   the extended configuration fields must be set to NULL and zero.
2461  * @param mb_pool
2462  *   The pointer to the memory pool from which to allocate *rte_mbuf* network
2463  *   memory buffers to populate each descriptor of the receive ring. There are
2464  *   two options to provide Rx buffer configuration:
2465  *   - single pool:
2466  *     mb_pool is not NULL, rx_conf.rx_nseg is 0.
2467  *   - multiple segments description:
2468  *     mb_pool is NULL, rx_conf.rx_seg is not NULL, rx_conf.rx_nseg is not 0.
2469  *     Taken only if flag RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT is set in offloads.
2470  *
2471  * @return
2472  *   - 0: Success, receive queue correctly set up.
2473  *   - -EIO: if device is removed.
2474  *   - -ENODEV: if *port_id* is invalid.
2475  *   - -EINVAL: The memory pool pointer is null or the size of network buffers
2476  *      which can be allocated from this memory pool does not fit the various
2477  *      buffer sizes allowed by the device controller.
2478  *   - -ENOMEM: Unable to allocate the receive ring descriptors or to
2479  *      allocate network memory buffers from the memory pool when
2480  *      initializing receive descriptors.
2481  */
2482 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2483 		uint16_t nb_rx_desc, unsigned int socket_id,
2484 		const struct rte_eth_rxconf *rx_conf,
2485 		struct rte_mempool *mb_pool);
2486 
2487 /**
2488  * @warning
2489  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2490  *
2491  * Allocate and set up a hairpin receive queue for an Ethernet device.
2492  *
2493  * The function set up the selected queue to be used in hairpin.
2494  *
2495  * @param port_id
2496  *   The port identifier of the Ethernet device.
2497  * @param rx_queue_id
2498  *   The index of the receive queue to set up.
2499  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
2500  *   to rte_eth_dev_configure().
2501  * @param nb_rx_desc
2502  *   The number of receive descriptors to allocate for the receive ring.
2503  *   0 means the PMD will use default value.
2504  * @param conf
2505  *   The pointer to the hairpin configuration.
2506  *
2507  * @return
2508  *   - (0) if successful.
2509  *   - (-ENODEV) if *port_id* is invalid.
2510  *   - (-ENOTSUP) if hardware doesn't support.
2511  *   - (-EINVAL) if bad parameter.
2512  *   - (-ENOMEM) if unable to allocate the resources.
2513  */
2514 __rte_experimental
2515 int rte_eth_rx_hairpin_queue_setup
2516 	(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2517 	 const struct rte_eth_hairpin_conf *conf);
2518 
2519 /**
2520  * Allocate and set up a transmit queue for an Ethernet device.
2521  *
2522  * @param port_id
2523  *   The port identifier of the Ethernet device.
2524  * @param tx_queue_id
2525  *   The index of the transmit queue to set up.
2526  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
2527  *   to rte_eth_dev_configure().
2528  * @param nb_tx_desc
2529  *   The number of transmit descriptors to allocate for the transmit ring.
2530  * @param socket_id
2531  *   The *socket_id* argument is the socket identifier in case of NUMA.
2532  *   Its value can be *SOCKET_ID_ANY* if there is no NUMA constraint for
2533  *   the DMA memory allocated for the transmit descriptors of the ring.
2534  * @param tx_conf
2535  *   The pointer to the configuration data to be used for the transmit queue.
2536  *   NULL value is allowed, in which case default Tx configuration
2537  *   will be used.
2538  *   The *tx_conf* structure contains the following data:
2539  *   - The *tx_thresh* structure with the values of the Prefetch, Host, and
2540  *     Write-Back threshold registers of the transmit ring.
2541  *     When setting Write-Back threshold to the value greater then zero,
2542  *     *tx_rs_thresh* value should be explicitly set to one.
2543  *   - The *tx_free_thresh* value indicates the [minimum] number of network
2544  *     buffers that must be pending in the transmit ring to trigger their
2545  *     [implicit] freeing by the driver transmit function.
2546  *   - The *tx_rs_thresh* value indicates the [minimum] number of transmit
2547  *     descriptors that must be pending in the transmit ring before setting the
2548  *     RS bit on a descriptor by the driver transmit function.
2549  *     The *tx_rs_thresh* value should be less or equal then
2550  *     *tx_free_thresh* value, and both of them should be less then
2551  *     *nb_tx_desc* - 3.
2552  *   - The *offloads* member contains Tx offloads to be enabled.
2553  *     If an offloading set in tx_conf->offloads
2554  *     hasn't been set in the input argument eth_conf->txmode.offloads
2555  *     to rte_eth_dev_configure(), it is a new added offloading, it must be
2556  *     per-queue type and it is enabled for the queue.
2557  *     No need to repeat any bit in tx_conf->offloads which has already been
2558  *     enabled in rte_eth_dev_configure() at port level. An offloading enabled
2559  *     at port level can't be disabled at queue level.
2560  *
2561  *     Note that setting *tx_free_thresh* or *tx_rs_thresh* value to 0 forces
2562  *     the transmit function to use default values.
2563  * @return
2564  *   - 0: Success, the transmit queue is correctly set up.
2565  *   - -ENOMEM: Unable to allocate the transmit ring descriptors.
2566  */
2567 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2568 		uint16_t nb_tx_desc, unsigned int socket_id,
2569 		const struct rte_eth_txconf *tx_conf);
2570 
2571 /**
2572  * @warning
2573  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2574  *
2575  * Allocate and set up a transmit hairpin queue for an Ethernet device.
2576  *
2577  * @param port_id
2578  *   The port identifier of the Ethernet device.
2579  * @param tx_queue_id
2580  *   The index of the transmit queue to set up.
2581  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
2582  *   to rte_eth_dev_configure().
2583  * @param nb_tx_desc
2584  *   The number of transmit descriptors to allocate for the transmit ring.
2585  *   0 to set default PMD value.
2586  * @param conf
2587  *   The hairpin configuration.
2588  *
2589  * @return
2590  *   - (0) if successful.
2591  *   - (-ENODEV) if *port_id* is invalid.
2592  *   - (-ENOTSUP) if hardware doesn't support.
2593  *   - (-EINVAL) if bad parameter.
2594  *   - (-ENOMEM) if unable to allocate the resources.
2595  */
2596 __rte_experimental
2597 int rte_eth_tx_hairpin_queue_setup
2598 	(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2599 	 const struct rte_eth_hairpin_conf *conf);
2600 
2601 /**
2602  * @warning
2603  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2604  *
2605  * Get all the hairpin peer Rx / Tx ports of the current port.
2606  * The caller should ensure that the array is large enough to save the ports
2607  * list.
2608  *
2609  * @param port_id
2610  *   The port identifier of the Ethernet device.
2611  * @param peer_ports
2612  *   Pointer to the array to store the peer ports list.
2613  * @param len
2614  *   Length of the array to store the port identifiers.
2615  * @param direction
2616  *   Current port to peer port direction
2617  *   positive - current used as Tx to get all peer Rx ports.
2618  *   zero - current used as Rx to get all peer Tx ports.
2619  *
2620  * @return
2621  *   - (0 or positive) actual peer ports number.
2622  *   - (-EINVAL) if bad parameter.
2623  *   - (-ENODEV) if *port_id* invalid
2624  *   - (-ENOTSUP) if hardware doesn't support.
2625  *   - Others detailed errors from PMDs.
2626  */
2627 __rte_experimental
2628 int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2629 				   size_t len, uint32_t direction);
2630 
2631 /**
2632  * @warning
2633  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2634  *
2635  * Bind all hairpin Tx queues of one port to the Rx queues of the peer port.
2636  * It is only allowed to call this function after all hairpin queues are
2637  * configured properly and the devices are in started state.
2638  *
2639  * @param tx_port
2640  *   The identifier of the Tx port.
2641  * @param rx_port
2642  *   The identifier of peer Rx port.
2643  *   RTE_MAX_ETHPORTS is allowed for the traversal of all devices.
2644  *   Rx port ID could have the same value as Tx port ID.
2645  *
2646  * @return
2647  *   - (0) if successful.
2648  *   - (-ENODEV) if Tx port ID is invalid.
2649  *   - (-EBUSY) if device is not in started state.
2650  *   - (-ENOTSUP) if hardware doesn't support.
2651  *   - Others detailed errors from PMDs.
2652  */
2653 __rte_experimental
2654 int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2655 
2656 /**
2657  * @warning
2658  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2659  *
2660  * Unbind all hairpin Tx queues of one port from the Rx queues of the peer port.
2661  * This should be called before closing the Tx or Rx devices, if the bind
2662  * function is called before.
2663  * After unbinding the hairpin ports pair, it is allowed to bind them again.
2664  * Changing queues configuration should be after stopping the device(s).
2665  *
2666  * @param tx_port
2667  *   The identifier of the Tx port.
2668  * @param rx_port
2669  *   The identifier of peer Rx port.
2670  *   RTE_MAX_ETHPORTS is allowed for traversal of all devices.
2671  *   Rx port ID could have the same value as Tx port ID.
2672  *
2673  * @return
2674  *   - (0) if successful.
2675  *   - (-ENODEV) if Tx port ID is invalid.
2676  *   - (-EBUSY) if device is in stopped state.
2677  *   - (-ENOTSUP) if hardware doesn't support.
2678  *   - Others detailed errors from PMDs.
2679  */
2680 __rte_experimental
2681 int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2682 
2683 /**
2684  * @warning
2685  * @b EXPERIMENTAL: this API may change without prior notice.
2686  *
2687  *  Get the number of aggregated ports of the DPDK port (specified with port_id).
2688  *  It is used when multiple ports are aggregated into a single one.
2689  *
2690  *  For the regular physical port doesn't have aggregated ports,
2691  *  the number of aggregated ports is reported as 0.
2692  *
2693  * @param port_id
2694  *   The port identifier of the Ethernet device.
2695  * @return
2696  *   - (>=0) the number of aggregated port if success.
2697  */
2698 __rte_experimental
2699 int rte_eth_dev_count_aggr_ports(uint16_t port_id);
2700 
2701 /**
2702  * @warning
2703  * @b EXPERIMENTAL: this API may change without prior notice.
2704  *
2705  *  Map a Tx queue with an aggregated port of the DPDK port (specified with port_id).
2706  *  When multiple ports are aggregated into a single one,
2707  *  it allows to choose which port to use for Tx via a queue.
2708  *
2709  *  The application should use rte_eth_dev_map_aggr_tx_affinity()
2710  *  after rte_eth_dev_configure(), rte_eth_tx_queue_setup(), and
2711  *  before rte_eth_dev_start().
2712  *
2713  * @param port_id
2714  *   The identifier of the port used in rte_eth_tx_burst().
2715  * @param tx_queue_id
2716  *   The index of the transmit queue used in rte_eth_tx_burst().
2717  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
2718  *   to rte_eth_dev_configure().
2719  * @param affinity
2720  *   The number of the aggregated port.
2721  *   Value 0 means no affinity and traffic could be routed to any aggregated port.
2722  *   The first aggregated port is number 1 and so on.
2723  *   The maximum number is given by rte_eth_dev_count_aggr_ports().
2724  *
2725  * @return
2726  *   Zero if successful. Non-zero otherwise.
2727  */
2728 __rte_experimental
2729 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
2730 				     uint8_t affinity);
2731 
2732 /**
2733  * Return the NUMA socket to which an Ethernet device is connected
2734  *
2735  * @param port_id
2736  *   The port identifier of the Ethernet device
2737  * @return
2738  *   - The NUMA socket ID which the Ethernet device is connected to.
2739  *   - -1 (which translates to SOCKET_ID_ANY) if the socket could not be
2740  *     determined. rte_errno is then set to:
2741  *     - EINVAL is the port_id is invalid,
2742  *     - 0 is the socket could not be determined,
2743  */
2744 int rte_eth_dev_socket_id(uint16_t port_id);
2745 
2746 /**
2747  * Check if port_id of device is attached
2748  *
2749  * @param port_id
2750  *   The port identifier of the Ethernet device
2751  * @return
2752  *   - 0 if port is out of range or not attached
2753  *   - 1 if device is attached
2754  */
2755 int rte_eth_dev_is_valid_port(uint16_t port_id);
2756 
2757 /**
2758  * @warning
2759  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice.
2760  *
2761  * Check if Rx queue is valid.
2762  * If the queue has been setup, it is considered valid.
2763  *
2764  * @param port_id
2765  *   The port identifier of the Ethernet device.
2766  * @param queue_id
2767  *   The index of the receive queue.
2768  * @return
2769  *   - -ENODEV: if port_id is invalid.
2770  *   - -EINVAL: if queue_id is out of range or queue has not been setup.
2771  *   - 0 if Rx queue is valid.
2772  */
2773 __rte_experimental
2774 int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2775 
2776 /**
2777  * @warning
2778  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice.
2779  *
2780  * Check if Tx queue is valid.
2781  * If the queue has been setup, it is considered valid.
2782  *
2783  * @param port_id
2784  *   The port identifier of the Ethernet device.
2785  * @param queue_id
2786  *   The index of the transmit queue.
2787  * @return
2788  *   - -ENODEV: if port_id is invalid.
2789  *   - -EINVAL: if queue_id is out of range or queue has not been setup.
2790  *   - 0 if Tx queue is valid.
2791  */
2792 __rte_experimental
2793 int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2794 
2795 /**
2796  * Start specified Rx queue of a port. It is used when rx_deferred_start
2797  * flag of the specified queue is true.
2798  *
2799  * @param port_id
2800  *   The port identifier of the Ethernet device
2801  * @param rx_queue_id
2802  *   The index of the Rx queue to update the ring.
2803  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
2804  *   to rte_eth_dev_configure().
2805  * @return
2806  *   - 0: Success, the receive queue is started.
2807  *   - -ENODEV: if *port_id* is invalid.
2808  *   - -EINVAL: The queue_id out of range or belong to hairpin.
2809  *   - -EIO: if device is removed.
2810  *   - -ENOTSUP: The function not supported in PMD.
2811  */
2812 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2813 
2814 /**
2815  * Stop specified Rx queue of a port
2816  *
2817  * @param port_id
2818  *   The port identifier of the Ethernet device
2819  * @param rx_queue_id
2820  *   The index of the Rx queue to update the ring.
2821  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
2822  *   to rte_eth_dev_configure().
2823  * @return
2824  *   - 0: Success, the receive queue is stopped.
2825  *   - -ENODEV: if *port_id* is invalid.
2826  *   - -EINVAL: The queue_id out of range or belong to hairpin.
2827  *   - -EIO: if device is removed.
2828  *   - -ENOTSUP: The function not supported in PMD.
2829  */
2830 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2831 
2832 /**
2833  * Start Tx for specified queue of a port. It is used when tx_deferred_start
2834  * flag of the specified queue is true.
2835  *
2836  * @param port_id
2837  *   The port identifier of the Ethernet device
2838  * @param tx_queue_id
2839  *   The index of the Tx queue to update the ring.
2840  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
2841  *   to rte_eth_dev_configure().
2842  * @return
2843  *   - 0: Success, the transmit queue is started.
2844  *   - -ENODEV: if *port_id* is invalid.
2845  *   - -EINVAL: The queue_id out of range or belong to hairpin.
2846  *   - -EIO: if device is removed.
2847  *   - -ENOTSUP: The function not supported in PMD.
2848  */
2849 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2850 
2851 /**
2852  * Stop specified Tx queue of a port
2853  *
2854  * @param port_id
2855  *   The port identifier of the Ethernet device
2856  * @param tx_queue_id
2857  *   The index of the Tx queue to update the ring.
2858  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
2859  *   to rte_eth_dev_configure().
2860  * @return
2861  *   - 0: Success, the transmit queue is stopped.
2862  *   - -ENODEV: if *port_id* is invalid.
2863  *   - -EINVAL: The queue_id out of range or belong to hairpin.
2864  *   - -EIO: if device is removed.
2865  *   - -ENOTSUP: The function not supported in PMD.
2866  */
2867 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2868 
2869 /**
2870  * Start an Ethernet device.
2871  *
2872  * The device start step is the last one and consists of setting the configured
2873  * offload features and in starting the transmit and the receive units of the
2874  * device.
2875  *
2876  * Device RTE_ETH_DEV_NOLIVE_MAC_ADDR flag causes MAC address to be set before
2877  * PMD port start callback function is invoked.
2878  *
2879  * All device queues (except form deferred start queues) status should be
2880  * `RTE_ETH_QUEUE_STATE_STARTED` after start.
2881  *
2882  * On success, all basic functions exported by the Ethernet API (link status,
2883  * receive/transmit, and so on) can be invoked.
2884  *
2885  * @param port_id
2886  *   The port identifier of the Ethernet device.
2887  * @return
2888  *   - 0: Success, Ethernet device started.
2889  *   - -EAGAIN: If start operation must be retried.
2890  *   - <0: Error code of the driver device start function.
2891  */
2892 int rte_eth_dev_start(uint16_t port_id);
2893 
2894 /**
2895  * Stop an Ethernet device. The device can be restarted with a call to
2896  * rte_eth_dev_start()
2897  *
2898  * All device queues status should be `RTE_ETH_QUEUE_STATE_STOPPED` after stop.
2899  *
2900  * @param port_id
2901  *   The port identifier of the Ethernet device.
2902  * @return
2903  *   - 0: Success, Ethernet device stopped.
2904  *   - -EBUSY: If stopping the port is not allowed in current state.
2905  *   - <0: Error code of the driver device stop function.
2906  */
2907 int rte_eth_dev_stop(uint16_t port_id);
2908 
2909 /**
2910  * Link up an Ethernet device.
2911  *
2912  * Set device link up will re-enable the device Rx/Tx
2913  * functionality after it is previously set device linked down.
2914  *
2915  * @param port_id
2916  *   The port identifier of the Ethernet device.
2917  * @return
2918  *   - 0: Success, Ethernet device linked up.
2919  *   - <0: Error code of the driver device link up function.
2920  */
2921 int rte_eth_dev_set_link_up(uint16_t port_id);
2922 
2923 /**
2924  * Link down an Ethernet device.
2925  * The device Rx/Tx functionality will be disabled if success,
2926  * and it can be re-enabled with a call to
2927  * rte_eth_dev_set_link_up()
2928  *
2929  * @param port_id
2930  *   The port identifier of the Ethernet device.
2931  */
2932 int rte_eth_dev_set_link_down(uint16_t port_id);
2933 
2934 /**
2935  * Close a stopped Ethernet device. The device cannot be restarted!
2936  * The function frees all port resources.
2937  *
2938  * @param port_id
2939  *   The port identifier of the Ethernet device.
2940  * @return
2941  *   - Zero if the port is closed successfully.
2942  *   - Negative if something went wrong.
2943  */
2944 int rte_eth_dev_close(uint16_t port_id);
2945 
2946 /**
2947  * Reset a Ethernet device and keep its port ID.
2948  *
2949  * When a port has to be reset passively, the DPDK application can invoke
2950  * this function. For example when a PF is reset, all its VFs should also
2951  * be reset. Normally a DPDK application can invoke this function when
2952  * RTE_ETH_EVENT_INTR_RESET event is detected, but can also use it to start
2953  * a port reset in other circumstances.
2954  *
2955  * When this function is called, it first stops the port and then calls the
2956  * PMD specific dev_uninit( ) and dev_init( ) to return the port to initial
2957  * state, in which no Tx and Rx queues are setup, as if the port has been
2958  * reset and not started. The port keeps the port ID it had before the
2959  * function call.
2960  *
2961  * After calling rte_eth_dev_reset( ), the application should use
2962  * rte_eth_dev_configure( ), rte_eth_rx_queue_setup( ),
2963  * rte_eth_tx_queue_setup( ), and rte_eth_dev_start( )
2964  * to reconfigure the device as appropriate.
2965  *
2966  * Note: To avoid unexpected behavior, the application should stop calling
2967  * Tx and Rx functions before calling rte_eth_dev_reset( ). For thread
2968  * safety, all these controlling functions should be called from the same
2969  * thread.
2970  *
2971  * @param port_id
2972  *   The port identifier of the Ethernet device.
2973  *
2974  * @return
2975  *   - (0) if successful.
2976  *   - (-ENODEV) if *port_id* is invalid.
2977  *   - (-ENOTSUP) if hardware doesn't support this function.
2978  *   - (-EPERM) if not ran from the primary process.
2979  *   - (-EIO) if re-initialisation failed or device is removed.
2980  *   - (-ENOMEM) if the reset failed due to OOM.
2981  *   - (-EAGAIN) if the reset temporarily failed and should be retried later.
2982  */
2983 int rte_eth_dev_reset(uint16_t port_id);
2984 
2985 /**
2986  * Enable receipt in promiscuous mode for an Ethernet device.
2987  *
2988  * @param port_id
2989  *   The port identifier of the Ethernet device.
2990  * @return
2991  *   - (0) if successful.
2992  *   - (-ENOTSUP) if support for promiscuous_enable() does not exist
2993  *     for the device.
2994  *   - (-ENODEV) if *port_id* invalid.
2995  */
2996 int rte_eth_promiscuous_enable(uint16_t port_id);
2997 
2998 /**
2999  * Disable receipt in promiscuous mode for an Ethernet device.
3000  *
3001  * @param port_id
3002  *   The port identifier of the Ethernet device.
3003  * @return
3004  *   - (0) if successful.
3005  *   - (-ENOTSUP) if support for promiscuous_disable() does not exist
3006  *     for the device.
3007  *   - (-ENODEV) if *port_id* invalid.
3008  */
3009 int rte_eth_promiscuous_disable(uint16_t port_id);
3010 
3011 /**
3012  * Return the value of promiscuous mode for an Ethernet device.
3013  *
3014  * @param port_id
3015  *   The port identifier of the Ethernet device.
3016  * @return
3017  *   - (1) if promiscuous is enabled
3018  *   - (0) if promiscuous is disabled.
3019  *   - (-1) on error
3020  */
3021 int rte_eth_promiscuous_get(uint16_t port_id);
3022 
3023 /**
3024  * Enable the receipt of any multicast frame by an Ethernet device.
3025  *
3026  * @param port_id
3027  *   The port identifier of the Ethernet device.
3028  * @return
3029  *   - (0) if successful.
3030  *   - (-ENOTSUP) if support for allmulticast_enable() does not exist
3031  *     for the device.
3032  *   - (-ENODEV) if *port_id* invalid.
3033  */
3034 int rte_eth_allmulticast_enable(uint16_t port_id);
3035 
3036 /**
3037  * Disable the receipt of all multicast frames by an Ethernet device.
3038  *
3039  * @param port_id
3040  *   The port identifier of the Ethernet device.
3041  * @return
3042  *   - (0) if successful.
3043  *   - (-ENOTSUP) if support for allmulticast_disable() does not exist
3044  *     for the device.
3045  *   - (-ENODEV) if *port_id* invalid.
3046  */
3047 int rte_eth_allmulticast_disable(uint16_t port_id);
3048 
3049 /**
3050  * Return the value of allmulticast mode for an Ethernet device.
3051  *
3052  * @param port_id
3053  *   The port identifier of the Ethernet device.
3054  * @return
3055  *   - (1) if allmulticast is enabled
3056  *   - (0) if allmulticast is disabled.
3057  *   - (-1) on error
3058  */
3059 int rte_eth_allmulticast_get(uint16_t port_id);
3060 
3061 /**
3062  * Retrieve the link status (up/down), the duplex mode (half/full),
3063  * the negotiation (auto/fixed), and if available, the speed (Mbps).
3064  *
3065  * It might need to wait up to 9 seconds.
3066  * @see rte_eth_link_get_nowait.
3067  *
3068  * @param port_id
3069  *   The port identifier of the Ethernet device.
3070  * @param link
3071  *   Link information written back.
3072  * @return
3073  *   - (0) if successful.
3074  *   - (-ENOTSUP) if the function is not supported in PMD.
3075  *   - (-ENODEV) if *port_id* invalid.
3076  *   - (-EINVAL) if bad parameter.
3077  */
3078 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
3079 	__rte_warn_unused_result;
3080 
3081 /**
3082  * Retrieve the link status (up/down), the duplex mode (half/full),
3083  * the negotiation (auto/fixed), and if available, the speed (Mbps).
3084  *
3085  * @param port_id
3086  *   The port identifier of the Ethernet device.
3087  * @param link
3088  *   Link information written back.
3089  * @return
3090  *   - (0) if successful.
3091  *   - (-ENOTSUP) if the function is not supported in PMD.
3092  *   - (-ENODEV) if *port_id* invalid.
3093  *   - (-EINVAL) if bad parameter.
3094  */
3095 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
3096 	__rte_warn_unused_result;
3097 
3098 /**
3099  * @warning
3100  * @b EXPERIMENTAL: this API may change without prior notice.
3101  *
3102  * The function converts a link_speed to a string. It handles all special
3103  * values like unknown or none speed.
3104  *
3105  * @param link_speed
3106  *   link_speed of rte_eth_link struct
3107  * @return
3108  *   Link speed in textual format. It's pointer to immutable memory.
3109  *   No free is required.
3110  */
3111 __rte_experimental
3112 const char *rte_eth_link_speed_to_str(uint32_t link_speed);
3113 
3114 /**
3115  * @warning
3116  * @b EXPERIMENTAL: this API may change without prior notice.
3117  *
3118  * The function converts a rte_eth_link struct representing a link status to
3119  * a string.
3120  *
3121  * @param str
3122  *   A pointer to a string to be filled with textual representation of
3123  *   device status. At least RTE_ETH_LINK_MAX_STR_LEN bytes should be allocated to
3124  *   store default link status text.
3125  * @param len
3126  *   Length of available memory at 'str' string.
3127  * @param eth_link
3128  *   Link status returned by rte_eth_link_get function
3129  * @return
3130  *   Number of bytes written to str array or -EINVAL if bad parameter.
3131  */
3132 __rte_experimental
3133 int rte_eth_link_to_str(char *str, size_t len,
3134 			const struct rte_eth_link *eth_link);
3135 
3136 /**
3137  * @warning
3138  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
3139  *
3140  * Get Active lanes.
3141  *
3142  * @param port_id
3143  *   The port identifier of the Ethernet device.
3144  * @param lanes
3145  *   Driver updates lanes with the number of active lanes.
3146  *   On a supported NIC on link up, lanes will be a non-zero value irrespective whether the
3147  *   link is Autonegotiated or Fixed speed. No information is displayed for error.
3148  *
3149  * @return
3150  *   - (0) if successful.
3151  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support.
3152  *     that operation.
3153  *   - (-EIO) if device is removed.
3154  *   - (-ENODEV)  if *port_id* invalid.
3155  */
3156 __rte_experimental
3157 int rte_eth_speed_lanes_get(uint16_t port_id, uint32_t *lanes);
3158 
3159 /**
3160  * @warning
3161  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
3162  *
3163  * Set speed lanes supported by the NIC.
3164  *
3165  * @param port_id
3166  *   The port identifier of the Ethernet device.
3167  * @param speed_lanes
3168  *   A non-zero number of speed lanes, that will be applied to the ethernet PHY
3169  *   along with the fixed speed configuration. Driver returns error if the user
3170  *   lanes is not in speeds capability list.
3171  *
3172  * @return
3173  *   - (0) if successful.
3174  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support.
3175  *     that operation.
3176  *   - (-EIO) if device is removed.
3177  *   - (-ENODEV)  if *port_id* invalid.
3178  *   - (-EINVAL)  if *lanes* count not in speeds capability list.
3179  */
3180 __rte_experimental
3181 int rte_eth_speed_lanes_set(uint16_t port_id, uint32_t speed_lanes);
3182 
3183 /**
3184  * @warning
3185  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
3186  *
3187  * Get speed lanes supported by the NIC.
3188  *
3189  * @param port_id
3190  *   The port identifier of the Ethernet device.
3191  * @param speed_lanes_capa
3192  *   An array of supported speed and its supported lanes.
3193  * @param num
3194  *   Size of the speed_lanes_capa array. The size is equal to the supported speeds list size.
3195  *   Value of num is derived by calling this api with speed_lanes_capa=NULL and num=0
3196  *
3197  * @return
3198  *   - (0) if successful.
3199  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support.
3200  *     that operation.
3201  *   - (-EIO) if device is removed.
3202  *   - (-ENODEV)  if *port_id* invalid.
3203  *   - (-EINVAL)  if *speed_lanes* invalid
3204  */
3205 __rte_experimental
3206 int rte_eth_speed_lanes_get_capability(uint16_t port_id,
3207 				       struct rte_eth_speed_lanes_capa *speed_lanes_capa,
3208 				       unsigned int num);
3209 
3210 /**
3211  * Retrieve the general I/O statistics of an Ethernet device.
3212  *
3213  * @param port_id
3214  *   The port identifier of the Ethernet device.
3215  * @param stats
3216  *   A pointer to a structure of type *rte_eth_stats* to be filled with
3217  *   the values of device counters for the following set of statistics:
3218  *   - *ipackets* with the total of successfully received packets.
3219  *   - *opackets* with the total of successfully transmitted packets.
3220  *   - *ibytes*   with the total of successfully received bytes.
3221  *   - *obytes*   with the total of successfully transmitted bytes.
3222  *   - *ierrors*  with the total of erroneous received packets.
3223  *   - *oerrors*  with the total of failed transmitted packets.
3224  * @return
3225  *   Zero if successful. Non-zero otherwise.
3226  */
3227 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
3228 
3229 /**
3230  * Reset the general I/O statistics of an Ethernet device.
3231  *
3232  * @param port_id
3233  *   The port identifier of the Ethernet device.
3234  * @return
3235  *   - (0) if device notified to reset stats.
3236  *   - (-ENOTSUP) if hardware doesn't support.
3237  *   - (-ENODEV) if *port_id* invalid.
3238  *   - (<0): Error code of the driver stats reset function.
3239  */
3240 int rte_eth_stats_reset(uint16_t port_id);
3241 
3242 /**
3243  * Retrieve names of extended statistics of an Ethernet device.
3244  *
3245  * There is an assumption that 'xstat_names' and 'xstats' arrays are matched
3246  * by array index:
3247  *  xstats_names[i].name => xstats[i].value
3248  *
3249  * And the array index is same with id field of 'struct rte_eth_xstat':
3250  *  xstats[i].id == i
3251  *
3252  * This assumption makes key-value pair matching less flexible but simpler.
3253  *
3254  * @param port_id
3255  *   The port identifier of the Ethernet device.
3256  * @param xstats_names
3257  *   An rte_eth_xstat_name array of at least *size* elements to
3258  *   be filled. If set to NULL, the function returns the required number
3259  *   of elements.
3260  * @param size
3261  *   The size of the xstats_names array (number of elements).
3262  * @return
3263  *   - A positive value lower or equal to size: success. The return value
3264  *     is the number of entries filled in the stats table.
3265  *   - A positive value higher than size: error, the given statistics table
3266  *     is too small. The return value corresponds to the size that should
3267  *     be given to succeed. The entries in the table are not valid and
3268  *     shall not be used by the caller.
3269  *   - A negative value on error (invalid port ID).
3270  */
3271 int rte_eth_xstats_get_names(uint16_t port_id,
3272 		struct rte_eth_xstat_name *xstats_names,
3273 		unsigned int size);
3274 
3275 /**
3276  * Retrieve extended statistics of an Ethernet device.
3277  *
3278  * There is an assumption that 'xstat_names' and 'xstats' arrays are matched
3279  * by array index:
3280  *  xstats_names[i].name => xstats[i].value
3281  *
3282  * And the array index is same with id field of 'struct rte_eth_xstat':
3283  *  xstats[i].id == i
3284  *
3285  * This assumption makes key-value pair matching less flexible but simpler.
3286  *
3287  * @param port_id
3288  *   The port identifier of the Ethernet device.
3289  * @param xstats
3290  *   A pointer to a table of structure of type *rte_eth_xstat*
3291  *   to be filled with device statistics ids and values.
3292  *   This parameter can be set to NULL if and only if n is 0.
3293  * @param n
3294  *   The size of the xstats array (number of elements).
3295  *   If lower than the required number of elements, the function returns
3296  *   the required number of elements.
3297  *   If equal to zero, the xstats must be NULL, the function returns the
3298  *   required number of elements.
3299  * @return
3300  *   - A positive value lower or equal to n: success. The return value
3301  *     is the number of entries filled in the stats table.
3302  *   - A positive value higher than n: error, the given statistics table
3303  *     is too small. The return value corresponds to the size that should
3304  *     be given to succeed. The entries in the table are not valid and
3305  *     shall not be used by the caller.
3306  *   - A negative value on error (invalid port ID).
3307  */
3308 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3309 		unsigned int n);
3310 
3311 /**
3312  * Retrieve names of extended statistics of an Ethernet device.
3313  *
3314  * @param port_id
3315  *   The port identifier of the Ethernet device.
3316  * @param xstats_names
3317  *   Array to be filled in with names of requested device statistics.
3318  *   Must not be NULL if @p ids are specified (not NULL).
3319  * @param size
3320  *   Number of elements in @p xstats_names array (if not NULL) and in
3321  *   @p ids array (if not NULL). Must be 0 if both array pointers are NULL.
3322  * @param ids
3323  *   IDs array given by app to retrieve specific statistics. May be NULL to
3324  *   retrieve names of all available statistics or, if @p xstats_names is
3325  *   NULL as well, just the number of available statistics.
3326  * @return
3327  *   - A positive value lower or equal to size: success. The return value
3328  *     is the number of entries filled in the stats table.
3329  *   - A positive value higher than size: success. The given statistics table
3330  *     is too small. The return value corresponds to the size that should
3331  *     be given to succeed. The entries in the table are not valid and
3332  *     shall not be used by the caller.
3333  *   - A negative value on error.
3334  */
3335 int
3336 rte_eth_xstats_get_names_by_id(uint16_t port_id,
3337 	struct rte_eth_xstat_name *xstats_names, unsigned int size,
3338 	uint64_t *ids);
3339 
3340 /**
3341  * Retrieve extended statistics of an Ethernet device.
3342  *
3343  * @param port_id
3344  *   The port identifier of the Ethernet device.
3345  * @param ids
3346  *   IDs array given by app to retrieve specific statistics. May be NULL to
3347  *   retrieve all available statistics or, if @p values is NULL as well,
3348  *   just the number of available statistics.
3349  * @param values
3350  *   Array to be filled in with requested device statistics.
3351  *   Must not be NULL if ids are specified (not NULL).
3352  * @param size
3353  *   Number of elements in @p values array (if not NULL) and in @p ids
3354  *   array (if not NULL). Must be 0 if both array pointers are NULL.
3355  * @return
3356  *   - A positive value lower or equal to size: success. The return value
3357  *     is the number of entries filled in the stats table.
3358  *   - A positive value higher than size: success: The given statistics table
3359  *     is too small. The return value corresponds to the size that should
3360  *     be given to succeed. The entries in the table are not valid and
3361  *     shall not be used by the caller.
3362  *   - A negative value on error.
3363  */
3364 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3365 			     uint64_t *values, unsigned int size);
3366 
3367 /**
3368  * Gets the ID of a statistic from its name.
3369  *
3370  * This function searches for the statistics using string compares, and
3371  * as such should not be used on the fast-path. For fast-path retrieval of
3372  * specific statistics, store the ID as provided in *id* from this function,
3373  * and pass the ID to rte_eth_xstats_get()
3374  *
3375  * @param port_id The port to look up statistics from
3376  * @param xstat_name The name of the statistic to return
3377  * @param[out] id A pointer to an app-supplied uint64_t which should be
3378  *                set to the ID of the stat if the stat exists.
3379  * @return
3380  *    0 on success
3381  *    -ENODEV for invalid port_id,
3382  *    -EIO if device is removed,
3383  *    -EINVAL if the xstat_name doesn't exist in port_id
3384  *    -ENOMEM if bad parameter.
3385  */
3386 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3387 		uint64_t *id);
3388 
3389 /**
3390  * Reset extended statistics of an Ethernet device.
3391  *
3392  * @param port_id
3393  *   The port identifier of the Ethernet device.
3394  * @return
3395  *   - (0) if device notified to reset extended stats.
3396  *   - (-ENOTSUP) if pmd doesn't support both
3397  *     extended stats and basic stats reset.
3398  *   - (-ENODEV) if *port_id* invalid.
3399  *   - (<0): Error code of the driver xstats reset function.
3400  */
3401 int rte_eth_xstats_reset(uint16_t port_id);
3402 
3403 /**
3404  *  Set a mapping for the specified transmit queue to the specified per-queue
3405  *  statistics counter.
3406  *
3407  * @param port_id
3408  *   The port identifier of the Ethernet device.
3409  * @param tx_queue_id
3410  *   The index of the transmit queue for which a queue stats mapping is required.
3411  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
3412  *   to rte_eth_dev_configure().
3413  * @param stat_idx
3414  *   The per-queue packet statistics functionality number that the transmit
3415  *   queue is to be assigned.
3416  *   The value must be in the range [0, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1].
3417  *   Max RTE_ETHDEV_QUEUE_STAT_CNTRS being 256.
3418  * @return
3419  *   Zero if successful. Non-zero otherwise.
3420  */
3421 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
3422 		uint16_t tx_queue_id, uint8_t stat_idx);
3423 
3424 /**
3425  *  Set a mapping for the specified receive queue to the specified per-queue
3426  *  statistics counter.
3427  *
3428  * @param port_id
3429  *   The port identifier of the Ethernet device.
3430  * @param rx_queue_id
3431  *   The index of the receive queue for which a queue stats mapping is required.
3432  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
3433  *   to rte_eth_dev_configure().
3434  * @param stat_idx
3435  *   The per-queue packet statistics functionality number that the receive
3436  *   queue is to be assigned.
3437  *   The value must be in the range [0, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1].
3438  *   Max RTE_ETHDEV_QUEUE_STAT_CNTRS being 256.
3439  * @return
3440  *   Zero if successful. Non-zero otherwise.
3441  */
3442 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
3443 					   uint16_t rx_queue_id,
3444 					   uint8_t stat_idx);
3445 
3446 /**
3447  * Retrieve the Ethernet address of an Ethernet device.
3448  *
3449  * @param port_id
3450  *   The port identifier of the Ethernet device.
3451  * @param mac_addr
3452  *   A pointer to a structure of type *ether_addr* to be filled with
3453  *   the Ethernet address of the Ethernet device.
3454  * @return
3455  *   - (0) if successful
3456  *   - (-ENODEV) if *port_id* invalid.
3457  *   - (-EINVAL) if bad parameter.
3458  */
3459 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3460 
3461 /**
3462  * @warning
3463  * @b EXPERIMENTAL: this API may change without prior notice
3464  *
3465  * Retrieve the Ethernet addresses of an Ethernet device.
3466  *
3467  * @param port_id
3468  *   The port identifier of the Ethernet device.
3469  * @param ma
3470  *   A pointer to an array of structures of type *ether_addr* to be filled with
3471  *   the Ethernet addresses of the Ethernet device.
3472  * @param num
3473  *   Number of elements in the @p ma array.
3474  *   Note that  rte_eth_dev_info::max_mac_addrs can be used to retrieve
3475  *   max number of Ethernet addresses for given port.
3476  * @return
3477  *   - number of retrieved addresses if successful
3478  *   - (-ENODEV) if *port_id* invalid.
3479  *   - (-EINVAL) if bad parameter.
3480  */
3481 __rte_experimental
3482 int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3483 	unsigned int num);
3484 
3485 /**
3486  * Retrieve the contextual information of an Ethernet device.
3487  *
3488  * This function returns the Ethernet device information based
3489  * on the values stored internally in the device specific data.
3490  * For example: number of queues, descriptor limits, device
3491  * capabilities and offload flags.
3492  *
3493  * @param port_id
3494  *   The port identifier of the Ethernet device.
3495  * @param dev_info
3496  *   A pointer to a structure of type *rte_eth_dev_info* to be filled with
3497  *   the contextual information of the Ethernet device.
3498  * @return
3499  *   - (0) if successful.
3500  *   - (-ENOTSUP) if support for dev_infos_get() does not exist for the device.
3501  *   - (-ENODEV) if *port_id* invalid.
3502  *   - (-EINVAL) if bad parameter.
3503  */
3504 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
3505 	__rte_warn_unused_result;
3506 
3507 /**
3508  * @warning
3509  * @b EXPERIMENTAL: this API may change without prior notice.
3510  *
3511  * Retrieve the configuration of an Ethernet device.
3512  *
3513  * @param port_id
3514  *   The port identifier of the Ethernet device.
3515  * @param dev_conf
3516  *   Location for Ethernet device configuration to be filled in.
3517  * @return
3518  *   - (0) if successful.
3519  *   - (-ENODEV) if *port_id* invalid.
3520  *   - (-EINVAL) if bad parameter.
3521  */
3522 __rte_experimental
3523 int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
3524 	__rte_warn_unused_result;
3525 
3526 /**
3527  * Retrieve the firmware version of a device.
3528  *
3529  * @param port_id
3530  *   The port identifier of the device.
3531  * @param fw_version
3532  *   A pointer to a string array storing the firmware version of a device,
3533  *   the string includes terminating null. This pointer is allocated by caller.
3534  * @param fw_size
3535  *   The size of the string array pointed by fw_version, which should be
3536  *   large enough to store firmware version of the device.
3537  * @return
3538  *   - (0) if successful.
3539  *   - (-ENOTSUP) if operation is not supported.
3540  *   - (-ENODEV) if *port_id* invalid.
3541  *   - (-EIO) if device is removed.
3542  *   - (-EINVAL) if bad parameter.
3543  *   - (>0) if *fw_size* is not enough to store firmware version, return
3544  *          the size of the non truncated string.
3545  */
3546 int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
3547 	__rte_warn_unused_result;
3548 
3549 /**
3550  * Retrieve the supported packet types of an Ethernet device.
3551  *
3552  * When a packet type is announced as supported, it *must* be recognized by
3553  * the PMD. For instance, if RTE_PTYPE_L2_ETHER, RTE_PTYPE_L2_ETHER_VLAN
3554  * and RTE_PTYPE_L3_IPV4 are announced, the PMD must return the following
3555  * packet types for these packets:
3556  * - Ether/IPv4              -> RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
3557  * - Ether/VLAN/IPv4         -> RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4
3558  * - Ether/[anything else]   -> RTE_PTYPE_L2_ETHER
3559  * - Ether/VLAN/[anything else] -> RTE_PTYPE_L2_ETHER_VLAN
3560  *
3561  * When a packet is received by a PMD, the most precise type must be
3562  * returned among the ones supported. However a PMD is allowed to set
3563  * packet type that is not in the supported list, at the condition that it
3564  * is more precise. Therefore, a PMD announcing no supported packet types
3565  * can still set a matching packet type in a received packet.
3566  *
3567  * @note
3568  *   Better to invoke this API after the device is already started or Rx burst
3569  *   function is decided, to obtain correct supported ptypes.
3570  * @note
3571  *   if a given PMD does not report what ptypes it supports, then the supported
3572  *   ptype count is reported as 0.
3573  * @param port_id
3574  *   The port identifier of the Ethernet device.
3575  * @param ptype_mask
3576  *   A hint of what kind of packet type which the caller is interested in.
3577  * @param ptypes
3578  *   An array pointer to store adequate packet types, allocated by caller.
3579  * @param num
3580  *  Size of the array pointed by param ptypes.
3581  * @return
3582  *   - (>=0) Number of supported ptypes. If the number of types exceeds num,
3583  *           only num entries will be filled into the ptypes array, but the full
3584  *           count of supported ptypes will be returned.
3585  *   - (-ENODEV) if *port_id* invalid.
3586  *   - (-EINVAL) if bad parameter.
3587  */
3588 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3589 				     uint32_t *ptypes, int num)
3590 	__rte_warn_unused_result;
3591 
3592 /**
3593  * Inform Ethernet device about reduced range of packet types to handle.
3594  *
3595  * Application can use this function to set only specific ptypes that it's
3596  * interested. This information can be used by the PMD to optimize Rx path.
3597  *
3598  * The function accepts an array `set_ptypes` allocated by the caller to
3599  * store the packet types set by the driver, the last element of the array
3600  * is set to RTE_PTYPE_UNKNOWN. The size of the `set_ptype` array should be
3601  * `rte_eth_dev_get_supported_ptypes() + 1` else it might only be filled
3602  * partially.
3603  *
3604  * @param port_id
3605  *   The port identifier of the Ethernet device.
3606  * @param ptype_mask
3607  *   The ptype family that application is interested in should be bitwise OR of
3608  *   RTE_PTYPE_*_MASK or 0.
3609  * @param set_ptypes
3610  *   An array pointer to store set packet types, allocated by caller. The
3611  *   function marks the end of array with RTE_PTYPE_UNKNOWN.
3612  * @param num
3613  *   Size of the array pointed by param ptypes.
3614  *   Should be rte_eth_dev_get_supported_ptypes() + 1 to accommodate the
3615  *   set ptypes.
3616  * @return
3617  *   - (0) if Success.
3618  *   - (-ENODEV) if *port_id* invalid.
3619  *   - (-EINVAL) if *ptype_mask* is invalid (or) set_ptypes is NULL and
3620  *     num > 0.
3621  */
3622 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3623 			   uint32_t *set_ptypes, unsigned int num);
3624 
3625 /**
3626  * Retrieve the MTU of an Ethernet device.
3627  *
3628  * @param port_id
3629  *   The port identifier of the Ethernet device.
3630  * @param mtu
3631  *   A pointer to a uint16_t where the retrieved MTU is to be stored.
3632  * @return
3633  *   - (0) if successful.
3634  *   - (-ENODEV) if *port_id* invalid.
3635  *   - (-EINVAL) if bad parameter.
3636  */
3637 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3638 
3639 /**
3640  * Change the MTU of an Ethernet device.
3641  *
3642  * @param port_id
3643  *   The port identifier of the Ethernet device.
3644  * @param mtu
3645  *   A uint16_t for the MTU to be applied.
3646  * @return
3647  *   - (0) if successful.
3648  *   - (-ENOTSUP) if operation is not supported.
3649  *   - (-ENODEV) if *port_id* invalid.
3650  *   - (-EIO) if device is removed.
3651  *   - (-EINVAL) if *mtu* invalid, validation of mtu can occur within
3652  *     rte_eth_dev_set_mtu if dev_infos_get is supported by the device or
3653  *     when the mtu is set using dev->dev_ops->mtu_set.
3654  *   - (-EBUSY) if operation is not allowed when the port is running
3655  */
3656 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3657 
3658 /**
3659  * Enable/Disable hardware filtering by an Ethernet device of received
3660  * VLAN packets tagged with a given VLAN Tag Identifier.
3661  *
3662  * @param port_id
3663  *   The port identifier of the Ethernet device.
3664  * @param vlan_id
3665  *   The VLAN Tag Identifier whose filtering must be enabled or disabled.
3666  * @param on
3667  *   If > 0, enable VLAN filtering of VLAN packets tagged with *vlan_id*.
3668  *   Otherwise, disable VLAN filtering of VLAN packets tagged with *vlan_id*.
3669  * @return
3670  *   - (0) if successful.
3671  *   - (-ENOTSUP) if hardware-assisted VLAN filtering not configured.
3672  *   - (-ENODEV) if *port_id* invalid.
3673  *   - (-EIO) if device is removed.
3674  *   - (-ENOSYS) if VLAN filtering on *port_id* disabled.
3675  *   - (-EINVAL) if *vlan_id* > 4095.
3676  */
3677 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3678 
3679 /**
3680  * Enable/Disable hardware VLAN Strip by a Rx queue of an Ethernet device.
3681  *
3682  * @param port_id
3683  *   The port identifier of the Ethernet device.
3684  * @param rx_queue_id
3685  *   The index of the receive queue for which a queue stats mapping is required.
3686  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
3687  *   to rte_eth_dev_configure().
3688  * @param on
3689  *   If 1, Enable VLAN Stripping of the receive queue of the Ethernet port.
3690  *   If 0, Disable VLAN Stripping of the receive queue of the Ethernet port.
3691  * @return
3692  *   - (0) if successful.
3693  *   - (-ENOTSUP) if hardware-assisted VLAN stripping not configured.
3694  *   - (-ENODEV) if *port_id* invalid.
3695  *   - (-EINVAL) if *rx_queue_id* invalid.
3696  */
3697 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3698 		int on);
3699 
3700 /**
3701  * Set the Outer VLAN Ether Type by an Ethernet device, it can be inserted to
3702  * the VLAN header.
3703  *
3704  * @param port_id
3705  *   The port identifier of the Ethernet device.
3706  * @param vlan_type
3707  *   The VLAN type.
3708  * @param tag_type
3709  *   The Tag Protocol ID
3710  * @return
3711  *   - (0) if successful.
3712  *   - (-ENOTSUP) if hardware-assisted VLAN TPID setup is not supported.
3713  *   - (-ENODEV) if *port_id* invalid.
3714  *   - (-EIO) if device is removed.
3715  */
3716 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3717 				    enum rte_vlan_type vlan_type,
3718 				    uint16_t tag_type);
3719 
3720 /**
3721  * Set VLAN offload configuration on an Ethernet device.
3722  *
3723  * @param port_id
3724  *   The port identifier of the Ethernet device.
3725  * @param offload_mask
3726  *   The VLAN Offload bit mask can be mixed use with "OR"
3727  *       RTE_ETH_VLAN_STRIP_OFFLOAD
3728  *       RTE_ETH_VLAN_FILTER_OFFLOAD
3729  *       RTE_ETH_VLAN_EXTEND_OFFLOAD
3730  *       RTE_ETH_QINQ_STRIP_OFFLOAD
3731  * @return
3732  *   - (0) if successful.
3733  *   - (-ENOTSUP) if hardware-assisted VLAN filtering not configured.
3734  *   - (-ENODEV) if *port_id* invalid.
3735  *   - (-EIO) if device is removed.
3736  */
3737 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3738 
3739 /**
3740  * Read VLAN Offload configuration from an Ethernet device
3741  *
3742  * @param port_id
3743  *   The port identifier of the Ethernet device.
3744  * @return
3745  *   - (>0) if successful. Bit mask to indicate
3746  *       RTE_ETH_VLAN_STRIP_OFFLOAD
3747  *       RTE_ETH_VLAN_FILTER_OFFLOAD
3748  *       RTE_ETH_VLAN_EXTEND_OFFLOAD
3749  *       RTE_ETH_QINQ_STRIP_OFFLOAD
3750  *   - (-ENODEV) if *port_id* invalid.
3751  */
3752 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3753 
3754 /**
3755  * Set port based Tx VLAN insertion on or off.
3756  *
3757  * @param port_id
3758  *  The port identifier of the Ethernet device.
3759  * @param pvid
3760  *  Port based Tx VLAN identifier together with user priority.
3761  * @param on
3762  *  Turn on or off the port based Tx VLAN insertion.
3763  *
3764  * @return
3765  *   - (0) if successful.
3766  *   - negative if failed.
3767  */
3768 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3769 
3770 /**
3771  * @warning
3772  * @b EXPERIMENTAL: this API may change without prior notice.
3773  *
3774  * Set Rx queue available descriptors threshold.
3775  *
3776  * @param port_id
3777  *  The port identifier of the Ethernet device.
3778  * @param queue_id
3779  *  The index of the receive queue.
3780  * @param avail_thresh
3781  *  The available descriptors threshold is percentage of Rx queue size
3782  *  which describes the availability of Rx queue for hardware.
3783  *  If the Rx queue availability is below it,
3784  *  the event RTE_ETH_EVENT_RX_AVAIL_THRESH is triggered.
3785  *  [1-99] to set a new available descriptors threshold.
3786  *  0 to disable threshold monitoring.
3787  *
3788  * @return
3789  *   - 0 if successful.
3790  *   - (-ENODEV) if @p port_id is invalid.
3791  *   - (-EINVAL) if bad parameter.
3792  *   - (-ENOTSUP) if available Rx descriptors threshold is not supported.
3793  *   - (-EIO) if device is removed.
3794  */
3795 __rte_experimental
3796 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3797 			       uint8_t avail_thresh);
3798 
3799 /**
3800  * @warning
3801  * @b EXPERIMENTAL: this API may change without prior notice.
3802  *
3803  * Find Rx queue with RTE_ETH_EVENT_RX_AVAIL_THRESH event pending.
3804  *
3805  * @param port_id
3806  *  The port identifier of the Ethernet device.
3807  * @param[inout] queue_id
3808  *  On input starting Rx queue index to search from.
3809  *  If the queue_id is bigger than maximum queue ID of the port,
3810  *  search is started from 0. So that application can keep calling
3811  *  this function to handle all pending events with a simple increment
3812  *  of queue_id on the next call.
3813  *  On output if return value is 1, Rx queue index with the event pending.
3814  * @param[out] avail_thresh
3815  *  Location for available descriptors threshold of the found Rx queue.
3816  *
3817  * @return
3818  *   - 1 if an Rx queue with pending event is found.
3819  *   - 0 if no Rx queue with pending event is found.
3820  *   - (-ENODEV) if @p port_id is invalid.
3821  *   - (-EINVAL) if bad parameter (e.g. @p queue_id is NULL).
3822  *   - (-ENOTSUP) if operation is not supported.
3823  *   - (-EIO) if device is removed.
3824  */
3825 __rte_experimental
3826 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3827 				 uint8_t *avail_thresh);
3828 
3829 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3830 		void *userdata);
3831 
3832 /**
3833  * Structure used to buffer packets for future Tx
3834  * Used by APIs rte_eth_tx_buffer and rte_eth_tx_buffer_flush
3835  */
3836 struct rte_eth_dev_tx_buffer {
3837 	buffer_tx_error_fn error_callback;
3838 	void *error_userdata;
3839 	uint16_t size;           /**< Size of buffer for buffered Tx */
3840 	uint16_t length;         /**< Number of packets in the array */
3841 	/** Pending packets to be sent on explicit flush or when full */
3842 	struct rte_mbuf *pkts[];
3843 };
3844 
3845 /**
3846  * Calculate the size of the Tx buffer.
3847  *
3848  * @param sz
3849  *   Number of stored packets.
3850  */
3851 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3852 	(sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3853 
3854 /**
3855  * Initialize default values for buffered transmitting
3856  *
3857  * @param buffer
3858  *   Tx buffer to be initialized.
3859  * @param size
3860  *   Buffer size
3861  * @return
3862  *   0 if no error
3863  */
3864 int
3865 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3866 
3867 /**
3868  * Configure a callback for buffered packets which cannot be sent
3869  *
3870  * Register a specific callback to be called when an attempt is made to send
3871  * all packets buffered on an Ethernet port, but not all packets can
3872  * successfully be sent. The callback registered here will be called only
3873  * from calls to rte_eth_tx_buffer() and rte_eth_tx_buffer_flush() APIs.
3874  * The default callback configured for each queue by default just frees the
3875  * packets back to the calling mempool. If additional behaviour is required,
3876  * for example, to count dropped packets, or to retry transmission of packets
3877  * which cannot be sent, this function should be used to register a suitable
3878  * callback function to implement the desired behaviour.
3879  * The example callback "rte_eth_tx_buffer_count_callback()" is also
3880  * provided as reference.
3881  *
3882  * @param buffer
3883  *   The port identifier of the Ethernet device.
3884  * @param callback
3885  *   The function to be used as the callback.
3886  * @param userdata
3887  *   Arbitrary parameter to be passed to the callback function
3888  * @return
3889  *   0 on success, or -EINVAL if bad parameter
3890  */
3891 int
3892 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
3893 		buffer_tx_error_fn callback, void *userdata);
3894 
3895 /**
3896  * Callback function for silently dropping unsent buffered packets.
3897  *
3898  * This function can be passed to rte_eth_tx_buffer_set_err_callback() to
3899  * adjust the default behavior when buffered packets cannot be sent. This
3900  * function drops any unsent packets silently and is used by Tx buffered
3901  * operations as default behavior.
3902  *
3903  * NOTE: this function should not be called directly, instead it should be used
3904  *       as a callback for packet buffering.
3905  *
3906  * NOTE: when configuring this function as a callback with
3907  *       rte_eth_tx_buffer_set_err_callback(), the final, userdata parameter
3908  *       should point to an uint64_t value.
3909  *
3910  * @param pkts
3911  *   The previously buffered packets which could not be sent
3912  * @param unsent
3913  *   The number of unsent packets in the pkts array
3914  * @param userdata
3915  *   Not used
3916  */
3917 void
3918 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3919 		void *userdata);
3920 
3921 /**
3922  * Callback function for tracking unsent buffered packets.
3923  *
3924  * This function can be passed to rte_eth_tx_buffer_set_err_callback() to
3925  * adjust the default behavior when buffered packets cannot be sent. This
3926  * function drops any unsent packets, but also updates a user-supplied counter
3927  * to track the overall number of packets dropped. The counter should be an
3928  * uint64_t variable.
3929  *
3930  * NOTE: this function should not be called directly, instead it should be used
3931  *       as a callback for packet buffering.
3932  *
3933  * NOTE: when configuring this function as a callback with
3934  *       rte_eth_tx_buffer_set_err_callback(), the final, userdata parameter
3935  *       should point to an uint64_t value.
3936  *
3937  * @param pkts
3938  *   The previously buffered packets which could not be sent
3939  * @param unsent
3940  *   The number of unsent packets in the pkts array
3941  * @param userdata
3942  *   Pointer to an uint64_t value, which will be incremented by unsent
3943  */
3944 void
3945 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3946 		void *userdata);
3947 
3948 /**
3949  * Request the driver to free mbufs currently cached by the driver. The
3950  * driver will only free the mbuf if it is no longer in use. It is the
3951  * application's responsibility to ensure rte_eth_tx_buffer_flush(..) is
3952  * called if needed.
3953  *
3954  * @param port_id
3955  *   The port identifier of the Ethernet device.
3956  * @param queue_id
3957  *   The index of the transmit queue through which output packets must be
3958  *   sent.
3959  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
3960  *   to rte_eth_dev_configure().
3961  * @param free_cnt
3962  *   Maximum number of packets to free. Use 0 to indicate all possible packets
3963  *   should be freed. Note that a packet may be using multiple mbufs.
3964  * @return
3965  *   Failure: < 0
3966  *     -ENODEV: Invalid interface
3967  *     -EIO: device is removed
3968  *     -ENOTSUP: Driver does not support function
3969  *   Success: >= 0
3970  *     0-n: Number of packets freed. More packets may still remain in ring that
3971  *     are in use.
3972  */
3973 int
3974 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3975 
3976 /**
3977  * Subtypes for MACsec offload event (@ref RTE_ETH_EVENT_MACSEC)
3978  * raised by Ethernet device.
3979  */
3980 enum rte_eth_event_macsec_subtype {
3981 	/** Notifies unknown MACsec subevent. */
3982 	RTE_ETH_SUBEVENT_MACSEC_UNKNOWN,
3983 	/**
3984 	 * Subevent of RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR sectag validation events
3985 	 *	Validation check: SecTag.TCI.V = 1
3986 	 */
3987 	RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_V_EQ1,
3988 	/**
3989 	 * Subevent of RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR sectag validation events
3990 	 *	Validation check: SecTag.TCI.E = 0 && SecTag.TCI.C = 1
3991 	 */
3992 	RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_E_EQ0_C_EQ1,
3993 	/**
3994 	 * Subevent of RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR sectag validation events
3995 	 *	Validation check: SecTag.SL >= 'd48
3996 	 */
3997 	RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SL_GTE48,
3998 	/**
3999 	 * Subevent of RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR sectag validation events
4000 	 *	Validation check: SecTag.TCI.ES = 1 && SecTag.TCI.SC = 1
4001 	 */
4002 	RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_ES_EQ1_SC_EQ1,
4003 	/**
4004 	 * Subevent of RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR sectag validation events
4005 	 *	Validation check: SecTag.TCI.SC = 1 && SecTag.TCI.SCB = 1
4006 	 */
4007 	RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SC_EQ1_SCB_EQ1,
4008 };
4009 
4010 /**
4011  * Event types for MACsec offload event (@ref RTE_ETH_EVENT_MACSEC)
4012  * raised by eth device.
4013  */
4014 enum rte_eth_event_macsec_type {
4015 	/** Notifies unknown MACsec event. */
4016 	RTE_ETH_EVENT_MACSEC_UNKNOWN,
4017 	/** Notifies Sectag validation failure events. */
4018 	RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR,
4019 	/** Notifies Rx SA hard expiry events. */
4020 	RTE_ETH_EVENT_MACSEC_RX_SA_PN_HARD_EXP,
4021 	/** Notifies Rx SA soft expiry events. */
4022 	RTE_ETH_EVENT_MACSEC_RX_SA_PN_SOFT_EXP,
4023 	/** Notifies Tx SA hard expiry events. */
4024 	RTE_ETH_EVENT_MACSEC_TX_SA_PN_HARD_EXP,
4025 	/** Notifies Tx SA soft events. */
4026 	RTE_ETH_EVENT_MACSEC_TX_SA_PN_SOFT_EXP,
4027 	/** Notifies Invalid SA event. */
4028 	RTE_ETH_EVENT_MACSEC_SA_NOT_VALID,
4029 };
4030 
4031 /**
4032  * Descriptor for @ref RTE_ETH_EVENT_MACSEC event.
4033  * Used by ethdev to send extra information of the MACsec offload event.
4034  */
4035 struct rte_eth_event_macsec_desc {
4036 	/** Type of RTE_ETH_EVENT_MACSEC_* event. */
4037 	enum rte_eth_event_macsec_type type;
4038 	/** Type of RTE_ETH_SUBEVENT_MACSEC_* subevent. */
4039 	enum rte_eth_event_macsec_subtype subtype;
4040 	/**
4041 	 * Event specific metadata.
4042 	 *
4043 	 * For the following events, *userdata* registered
4044 	 * with the *rte_security_session* would be returned
4045 	 * as metadata.
4046 	 *
4047 	 * @see struct rte_security_session_conf
4048 	 */
4049 	uint64_t metadata;
4050 };
4051 
4052 /**
4053  * Subtypes for IPsec offload event(@ref RTE_ETH_EVENT_IPSEC) raised by
4054  * eth device.
4055  */
4056 enum rte_eth_event_ipsec_subtype {
4057 	/**  PMD specific error start */
4058 	RTE_ETH_EVENT_IPSEC_PMD_ERROR_START = -256,
4059 	/**  PMD specific error end */
4060 	RTE_ETH_EVENT_IPSEC_PMD_ERROR_END = -1,
4061 	/** Unknown event type */
4062 	RTE_ETH_EVENT_IPSEC_UNKNOWN = 0,
4063 	/** Sequence number overflow */
4064 	RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW,
4065 	/** Soft time expiry of SA */
4066 	RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY,
4067 	/**
4068 	 * Soft byte expiry of SA determined by
4069 	 * @ref rte_security_ipsec_lifetime::bytes_soft_limit
4070 	 */
4071 	RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY,
4072 	/**
4073 	 * Soft packet expiry of SA determined by
4074 	 * @ref rte_security_ipsec_lifetime::packets_soft_limit
4075 	 */
4076 	RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY,
4077 	/**
4078 	 * Hard byte expiry of SA determined by
4079 	 * @ref rte_security_ipsec_lifetime::bytes_hard_limit
4080 	 */
4081 	RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY,
4082 	/**
4083 	 * Hard packet expiry of SA determined by
4084 	 * @ref rte_security_ipsec_lifetime::packets_hard_limit
4085 	 */
4086 	RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY,
4087 	/** Max value of this enum */
4088 	RTE_ETH_EVENT_IPSEC_MAX
4089 };
4090 
4091 /**
4092  * Descriptor for @ref RTE_ETH_EVENT_IPSEC event. Used by eth dev to send extra
4093  * information of the IPsec offload event.
4094  */
4095 struct rte_eth_event_ipsec_desc {
4096 	/** Type of RTE_ETH_EVENT_IPSEC_* event */
4097 	enum rte_eth_event_ipsec_subtype subtype;
4098 	/**
4099 	 * Event specific metadata.
4100 	 *
4101 	 * For the following events, *userdata* registered
4102 	 * with the *rte_security_session* would be returned
4103 	 * as metadata,
4104 	 *
4105 	 * - @ref RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW
4106 	 * - @ref RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY
4107 	 * - @ref RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY
4108 	 * - @ref RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY
4109 	 * - @ref RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY
4110 	 * - @ref RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY
4111 	 *
4112 	 * @see struct rte_security_session_conf
4113 	 *
4114 	 */
4115 	uint64_t metadata;
4116 };
4117 
4118 /**
4119  * The eth device event type for interrupt, and maybe others in the future.
4120  */
4121 enum rte_eth_event_type {
4122 	RTE_ETH_EVENT_UNKNOWN,  /**< unknown event type */
4123 	RTE_ETH_EVENT_INTR_LSC, /**< lsc interrupt event */
4124 	/** queue state event (enabled/disabled) */
4125 	RTE_ETH_EVENT_QUEUE_STATE,
4126 	/** reset interrupt event, sent to VF on PF reset */
4127 	RTE_ETH_EVENT_INTR_RESET,
4128 	RTE_ETH_EVENT_VF_MBOX,  /**< message from the VF received by PF */
4129 	RTE_ETH_EVENT_MACSEC,   /**< MACsec offload related event */
4130 	RTE_ETH_EVENT_INTR_RMV, /**< device removal event */
4131 	RTE_ETH_EVENT_NEW,      /**< port is probed */
4132 	RTE_ETH_EVENT_DESTROY,  /**< port is released */
4133 	RTE_ETH_EVENT_IPSEC,    /**< IPsec offload related event */
4134 	RTE_ETH_EVENT_FLOW_AGED,/**< New aged-out flows is detected */
4135 	/**
4136 	 * Number of available Rx descriptors is smaller than the threshold.
4137 	 * @see rte_eth_rx_avail_thresh_set()
4138 	 */
4139 	RTE_ETH_EVENT_RX_AVAIL_THRESH,
4140 	/** Port recovering from a hardware or firmware error.
4141 	 * If PMD supports proactive error recovery,
4142 	 * it should trigger this event to notify application
4143 	 * that it detected an error and the recovery is being started.
4144 	 * Upon receiving the event, the application should not invoke any control path API
4145 	 * (such as rte_eth_dev_configure/rte_eth_dev_stop...) until receiving
4146 	 * RTE_ETH_EVENT_RECOVERY_SUCCESS or RTE_ETH_EVENT_RECOVERY_FAILED event.
4147 	 * The PMD will set the data path pointers to dummy functions,
4148 	 * and re-set the data path pointers to non-dummy functions
4149 	 * before reporting RTE_ETH_EVENT_RECOVERY_SUCCESS event.
4150 	 * It means that the application cannot send or receive any packets
4151 	 * during this period.
4152 	 * @note Before the PMD reports the recovery result,
4153 	 * the PMD may report the RTE_ETH_EVENT_ERR_RECOVERING event again,
4154 	 * because a larger error may occur during the recovery.
4155 	 */
4156 	RTE_ETH_EVENT_ERR_RECOVERING,
4157 	/** Port recovers successfully from the error.
4158 	 * The PMD already re-configured the port,
4159 	 * and the effect is the same as a restart operation.
4160 	 * a) The following operation will be retained: (alphabetically)
4161 	 *    - DCB configuration
4162 	 *    - FEC configuration
4163 	 *    - Flow control configuration
4164 	 *    - LRO configuration
4165 	 *    - LSC configuration
4166 	 *    - MTU
4167 	 *    - MAC address (default and those supplied by MAC address array)
4168 	 *    - Promiscuous and allmulticast mode
4169 	 *    - PTP configuration
4170 	 *    - Queue (Rx/Tx) settings
4171 	 *    - Queue statistics mappings
4172 	 *    - RSS configuration by rte_eth_dev_rss_xxx() family
4173 	 *    - Rx checksum configuration
4174 	 *    - Rx interrupt settings
4175 	 *    - Traffic management configuration
4176 	 *    - VLAN configuration (including filtering, tpid, strip, pvid)
4177 	 *    - VMDq configuration
4178 	 * b) The following configuration maybe retained
4179 	 *    or not depending on the device capabilities:
4180 	 *    - flow rules
4181 	 *      (@see RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP)
4182 	 *    - shared flow objects
4183 	 *      (@see RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP)
4184 	 * c) Any other configuration will not be stored
4185 	 *    and will need to be re-configured.
4186 	 */
4187 	RTE_ETH_EVENT_RECOVERY_SUCCESS,
4188 	/** Port recovery failed.
4189 	 * It means that the port should not be usable anymore.
4190 	 * The application should close the port.
4191 	 */
4192 	RTE_ETH_EVENT_RECOVERY_FAILED,
4193 	RTE_ETH_EVENT_MAX       /**< max value of this enum */
4194 };
4195 
4196 /**
4197  * User application callback to be registered for interrupts.
4198  *
4199  * Note: there is no guarantee in the DPDK drivers that a callback won't be
4200  *       called in the middle of other parts of the ethdev API. For example,
4201  *       imagine that thread A calls rte_eth_dev_start() and as part of this
4202  *       call, a RTE_ETH_EVENT_INTR_RESET event gets generated and the
4203  *       associated callback is ran on thread A. In that example, if the
4204  *       application protects its internal data using locks before calling
4205  *       rte_eth_dev_start(), and the callback takes a same lock, a deadlock
4206  *       occurs. Because of this, it is highly recommended NOT to take locks in
4207  *       those callbacks.
4208  */
4209 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
4210 		enum rte_eth_event_type event, void *cb_arg, void *ret_param);
4211 
4212 /**
4213  * Register a callback function for port event.
4214  *
4215  * @param port_id
4216  *  Port ID.
4217  *  RTE_ETH_ALL means register the event for all port ids.
4218  * @param event
4219  *  Event interested.
4220  * @param cb_fn
4221  *  User supplied callback function to be called.
4222  * @param cb_arg
4223  *  Pointer to the parameters for the registered callback.
4224  *
4225  * @return
4226  *  - On success, zero.
4227  *  - On failure, a negative value.
4228  */
4229 int rte_eth_dev_callback_register(uint16_t port_id,
4230 			enum rte_eth_event_type event,
4231 		rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4232 
4233 /**
4234  * Unregister a callback function for port event.
4235  *
4236  * @param port_id
4237  *  Port ID.
4238  *  RTE_ETH_ALL means unregister the event for all port ids.
4239  * @param event
4240  *  Event interested.
4241  * @param cb_fn
4242  *  User supplied callback function to be called.
4243  * @param cb_arg
4244  *  Pointer to the parameters for the registered callback. -1 means to
4245  *  remove all for the same callback address and same event.
4246  *
4247  * @return
4248  *  - On success, zero.
4249  *  - On failure, a negative value.
4250  */
4251 int rte_eth_dev_callback_unregister(uint16_t port_id,
4252 			enum rte_eth_event_type event,
4253 		rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4254 
4255 /**
4256  * When there is no Rx packet coming in Rx Queue for a long time, we can
4257  * sleep lcore related to Rx Queue for power saving, and enable Rx interrupt
4258  * to be triggered when Rx packet arrives.
4259  *
4260  * The rte_eth_dev_rx_intr_enable() function enables Rx queue
4261  * interrupt on specific Rx queue of a port.
4262  *
4263  * @param port_id
4264  *   The port identifier of the Ethernet device.
4265  * @param queue_id
4266  *   The index of the receive queue from which to retrieve input packets.
4267  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
4268  *   to rte_eth_dev_configure().
4269  * @return
4270  *   - (0) if successful.
4271  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support
4272  *     that operation.
4273  *   - (-ENODEV) if *port_id* invalid.
4274  *   - (-EIO) if device is removed.
4275  */
4276 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4277 
4278 /**
4279  * When lcore wakes up from Rx interrupt indicating packet coming, disable Rx
4280  * interrupt and returns to polling mode.
4281  *
4282  * The rte_eth_dev_rx_intr_disable() function disables Rx queue
4283  * interrupt on specific Rx queue of a port.
4284  *
4285  * @param port_id
4286  *   The port identifier of the Ethernet device.
4287  * @param queue_id
4288  *   The index of the receive queue from which to retrieve input packets.
4289  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
4290  *   to rte_eth_dev_configure().
4291  * @return
4292  *   - (0) if successful.
4293  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support
4294  *     that operation.
4295  *   - (-ENODEV) if *port_id* invalid.
4296  *   - (-EIO) if device is removed.
4297  */
4298 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4299 
4300 /**
4301  * Rx Interrupt control per port.
4302  *
4303  * @param port_id
4304  *   The port identifier of the Ethernet device.
4305  * @param epfd
4306  *   Epoll instance fd which the intr vector associated to.
4307  *   Using RTE_EPOLL_PER_THREAD allows to use per thread epoll instance.
4308  * @param op
4309  *   The operation be performed for the vector.
4310  *   Operation type of {RTE_INTR_EVENT_ADD, RTE_INTR_EVENT_DEL}.
4311  * @param data
4312  *   User raw data.
4313  * @return
4314  *   - On success, zero.
4315  *   - On failure, a negative value.
4316  */
4317 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4318 
4319 /**
4320  * Rx Interrupt control per queue.
4321  *
4322  * @param port_id
4323  *   The port identifier of the Ethernet device.
4324  * @param queue_id
4325  *   The index of the receive queue from which to retrieve input packets.
4326  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
4327  *   to rte_eth_dev_configure().
4328  * @param epfd
4329  *   Epoll instance fd which the intr vector associated to.
4330  *   Using RTE_EPOLL_PER_THREAD allows to use per thread epoll instance.
4331  * @param op
4332  *   The operation be performed for the vector.
4333  *   Operation type of {RTE_INTR_EVENT_ADD, RTE_INTR_EVENT_DEL}.
4334  * @param data
4335  *   User raw data.
4336  * @return
4337  *   - On success, zero.
4338  *   - On failure, a negative value.
4339  */
4340 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4341 			      int epfd, int op, void *data);
4342 
4343 /**
4344  * Get interrupt fd per Rx queue.
4345  *
4346  * @param port_id
4347  *   The port identifier of the Ethernet device.
4348  * @param queue_id
4349  *   The index of the receive queue from which to retrieve input packets.
4350  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
4351  *   to rte_eth_dev_configure().
4352  * @return
4353  *   - (>=0) the interrupt fd associated to the requested Rx queue if
4354  *           successful.
4355  *   - (-1) on error.
4356  */
4357 int
4358 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4359 
4360 /**
4361  * Turn on the LED on the Ethernet device.
4362  * This function turns on the LED on the Ethernet device.
4363  *
4364  * @param port_id
4365  *   The port identifier of the Ethernet device.
4366  * @return
4367  *   - (0) if successful.
4368  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support
4369  *     that operation.
4370  *   - (-ENODEV) if *port_id* invalid.
4371  *   - (-EIO) if device is removed.
4372  */
4373 int  rte_eth_led_on(uint16_t port_id);
4374 
4375 /**
4376  * Turn off the LED on the Ethernet device.
4377  * This function turns off the LED on the Ethernet device.
4378  *
4379  * @param port_id
4380  *   The port identifier of the Ethernet device.
4381  * @return
4382  *   - (0) if successful.
4383  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support
4384  *     that operation.
4385  *   - (-ENODEV) if *port_id* invalid.
4386  *   - (-EIO) if device is removed.
4387  */
4388 int  rte_eth_led_off(uint16_t port_id);
4389 
4390 /**
4391  * @warning
4392  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
4393  *
4394  * Get Forward Error Correction(FEC) capability.
4395  *
4396  * @param port_id
4397  *   The port identifier of the Ethernet device.
4398  * @param speed_fec_capa
4399  *   speed_fec_capa is out only with per-speed capabilities.
4400  *   If set to NULL, the function returns the required number
4401  *   of required array entries.
4402  * @param num
4403  *   a number of elements in an speed_fec_capa array.
4404  *
4405  * @return
4406  *   - A non-negative value lower or equal to num: success. The return value
4407  *     is the number of entries filled in the fec capa array.
4408  *   - A non-negative value higher than num: error, the given fec capa array
4409  *     is too small. The return value corresponds to the num that should
4410  *     be given to succeed. The entries in fec capa array are not valid and
4411  *     shall not be used by the caller.
4412  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support.
4413  *     that operation.
4414  *   - (-EIO) if device is removed.
4415  *   - (-ENODEV)  if *port_id* invalid.
4416  *   - (-EINVAL)  if *num* or *speed_fec_capa* invalid
4417  */
4418 __rte_experimental
4419 int rte_eth_fec_get_capability(uint16_t port_id,
4420 			       struct rte_eth_fec_capa *speed_fec_capa,
4421 			       unsigned int num);
4422 
4423 /**
4424  * @warning
4425  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
4426  *
4427  * Get current Forward Error Correction(FEC) mode.
4428  * If link is down and AUTO is enabled, AUTO is returned, otherwise,
4429  * configured FEC mode is returned.
4430  * If link is up, current FEC mode is returned.
4431  *
4432  * @param port_id
4433  *   The port identifier of the Ethernet device.
4434  * @param fec_capa
4435  *   A bitmask with the current FEC mode.
4436  * @return
4437  *   - (0) if successful.
4438  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support.
4439  *     that operation.
4440  *   - (-EIO) if device is removed.
4441  *   - (-ENODEV)  if *port_id* invalid.
4442  */
4443 __rte_experimental
4444 int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4445 
4446 /**
4447  * @warning
4448  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
4449  *
4450  * Set Forward Error Correction(FEC) mode.
4451  *
4452  * @param port_id
4453  *   The port identifier of the Ethernet device.
4454  * @param fec_capa
4455  *   A bitmask of allowed FEC modes.
4456  *   If only the AUTO bit is set, the decision on which FEC
4457  *   mode to use will be made by HW/FW or driver.
4458  *   If the AUTO bit is set with some FEC modes, only specified
4459  *   FEC modes can be set.
4460  *   If AUTO bit is clear, specify FEC mode to be used
4461  *   (only one valid mode per speed may be set).
4462  * @return
4463  *   - (0) if successful.
4464  *   - (-EINVAL) if the FEC mode is not valid.
4465  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support.
4466  *   - (-EIO) if device is removed.
4467  *   - (-ENODEV)  if *port_id* invalid.
4468  */
4469 __rte_experimental
4470 int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4471 
4472 /**
4473  * Get current status of the Ethernet link flow control for Ethernet device
4474  *
4475  * @param port_id
4476  *   The port identifier of the Ethernet device.
4477  * @param fc_conf
4478  *   The pointer to the structure where to store the flow control parameters.
4479  * @return
4480  *   - (0) if successful.
4481  *   - (-ENOTSUP) if hardware doesn't support flow control.
4482  *   - (-ENODEV)  if *port_id* invalid.
4483  *   - (-EIO)  if device is removed.
4484  *   - (-EINVAL) if bad parameter.
4485  */
4486 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4487 			      struct rte_eth_fc_conf *fc_conf);
4488 
4489 /**
4490  * Configure the Ethernet link flow control for Ethernet device
4491  *
4492  * @param port_id
4493  *   The port identifier of the Ethernet device.
4494  * @param fc_conf
4495  *   The pointer to the structure of the flow control parameters.
4496  * @return
4497  *   - (0) if successful.
4498  *   - (-ENOTSUP) if hardware doesn't support flow control mode.
4499  *   - (-ENODEV)  if *port_id* invalid.
4500  *   - (-EINVAL)  if bad parameter
4501  *   - (-EIO)     if flow control setup failure or device is removed.
4502  */
4503 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4504 			      struct rte_eth_fc_conf *fc_conf);
4505 
4506 /**
4507  * Configure the Ethernet priority flow control under DCB environment
4508  * for Ethernet device.
4509  *
4510  * @param port_id
4511  * The port identifier of the Ethernet device.
4512  * @param pfc_conf
4513  * The pointer to the structure of the priority flow control parameters.
4514  * @return
4515  *   - (0) if successful.
4516  *   - (-ENOTSUP) if hardware doesn't support priority flow control mode.
4517  *   - (-ENODEV)  if *port_id* invalid.
4518  *   - (-EINVAL)  if bad parameter
4519  *   - (-EIO)     if flow control setup failure or device is removed.
4520  */
4521 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
4522 				struct rte_eth_pfc_conf *pfc_conf);
4523 
4524 /**
4525  * Add a MAC address to the set used for filtering incoming packets.
4526  *
4527  * @param port_id
4528  *   The port identifier of the Ethernet device.
4529  * @param mac_addr
4530  *   The MAC address to add.
4531  * @param pool
4532  *   VMDq pool index to associate address with (if VMDq is enabled). If VMDq is
4533  *   not enabled, this should be set to 0.
4534  * @return
4535  *   - (0) if successfully added or *mac_addr* was already added.
4536  *   - (-ENOTSUP) if hardware doesn't support this feature.
4537  *   - (-ENODEV) if *port* is invalid.
4538  *   - (-EIO) if device is removed.
4539  *   - (-ENOSPC) if no more MAC addresses can be added.
4540  *   - (-EINVAL) if MAC address is invalid.
4541  */
4542 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4543 				uint32_t pool);
4544 
4545 /**
4546  * @warning
4547  * @b EXPERIMENTAL: this API may change without prior notice.
4548  *
4549  * Retrieve the information for queue based PFC.
4550  *
4551  * @param port_id
4552  *   The port identifier of the Ethernet device.
4553  * @param pfc_queue_info
4554  *   A pointer to a structure of type *rte_eth_pfc_queue_info* to be filled with
4555  *   the information about queue based PFC.
4556  * @return
4557  *   - (0) if successful.
4558  *   - (-ENOTSUP) if support for priority_flow_ctrl_queue_info_get does not exist.
4559  *   - (-ENODEV) if *port_id* invalid.
4560  *   - (-EINVAL) if bad parameter.
4561  */
4562 __rte_experimental
4563 int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
4564 		struct rte_eth_pfc_queue_info *pfc_queue_info);
4565 
4566 /**
4567  * @warning
4568  * @b EXPERIMENTAL: this API may change without prior notice.
4569  *
4570  * Configure the queue based priority flow control for a given queue
4571  * for Ethernet device.
4572  *
4573  * @note When an ethdev port switches to queue based PFC mode, the
4574  * unconfigured queues shall be configured by the driver with
4575  * default values such as lower priority value for TC etc.
4576  *
4577  * @param port_id
4578  *   The port identifier of the Ethernet device.
4579  * @param pfc_queue_conf
4580  *   The pointer to the structure of the priority flow control parameters
4581  *   for the queue.
4582  * @return
4583  *   - (0) if successful.
4584  *   - (-ENOTSUP) if hardware doesn't support queue based PFC mode.
4585  *   - (-ENODEV)  if *port_id* invalid.
4586  *   - (-EINVAL)  if bad parameter
4587  *   - (-EIO)     if flow control setup queue failure
4588  */
4589 __rte_experimental
4590 int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
4591 		struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4592 
4593 /**
4594  * Remove a MAC address from the internal array of addresses.
4595  *
4596  * @param port_id
4597  *   The port identifier of the Ethernet device.
4598  * @param mac_addr
4599  *   MAC address to remove.
4600  * @return
4601  *   - (0) if successful, or *mac_addr* didn't exist.
4602  *   - (-ENOTSUP) if hardware doesn't support.
4603  *   - (-ENODEV) if *port* invalid.
4604  *   - (-EADDRINUSE) if attempting to remove the default MAC address.
4605  *   - (-EINVAL) if MAC address is invalid.
4606  */
4607 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4608 				struct rte_ether_addr *mac_addr);
4609 
4610 /**
4611  * Set the default MAC address.
4612  * It replaces the address at index 0 of the MAC address list.
4613  * If the address was already in the MAC address list,
4614  * please remove it first.
4615  *
4616  * @param port_id
4617  *   The port identifier of the Ethernet device.
4618  * @param mac_addr
4619  *   New default MAC address.
4620  * @return
4621  *   - (0) if successful, or *mac_addr* didn't exist.
4622  *   - (-ENOTSUP) if hardware doesn't support.
4623  *   - (-ENODEV) if *port* invalid.
4624  *   - (-EINVAL) if MAC address is invalid.
4625  *   - (-EEXIST) if MAC address was already in the address list.
4626  */
4627 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
4628 		struct rte_ether_addr *mac_addr);
4629 
4630 /**
4631  * Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device.
4632  *
4633  * @param port_id
4634  *   The port identifier of the Ethernet device.
4635  * @param reta_conf
4636  *   RETA to update.
4637  * @param reta_size
4638  *   Redirection table size. The table size can be queried by
4639  *   rte_eth_dev_info_get().
4640  * @return
4641  *   - (0) if successful.
4642  *   - (-ENODEV) if *port_id* is invalid.
4643  *   - (-ENOTSUP) if hardware doesn't support.
4644  *   - (-EINVAL) if bad parameter.
4645  *   - (-EIO) if device is removed.
4646  */
4647 int rte_eth_dev_rss_reta_update(uint16_t port_id,
4648 				struct rte_eth_rss_reta_entry64 *reta_conf,
4649 				uint16_t reta_size);
4650 
4651 /**
4652  * Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device.
4653  *
4654  * @param port_id
4655  *   The port identifier of the Ethernet device.
4656  * @param reta_conf
4657  *   RETA to query. For each requested reta entry, corresponding bit
4658  *   in mask must be set.
4659  * @param reta_size
4660  *   Redirection table size. The table size can be queried by
4661  *   rte_eth_dev_info_get().
4662  * @return
4663  *   - (0) if successful.
4664  *   - (-ENODEV) if *port_id* is invalid.
4665  *   - (-ENOTSUP) if hardware doesn't support.
4666  *   - (-EINVAL) if bad parameter.
4667  *   - (-EIO) if device is removed.
4668  */
4669 int rte_eth_dev_rss_reta_query(uint16_t port_id,
4670 			       struct rte_eth_rss_reta_entry64 *reta_conf,
4671 			       uint16_t reta_size);
4672 
4673 /**
4674  * Updates unicast hash table for receiving packet with the given destination
4675  * MAC address, and the packet is routed to all VFs for which the Rx mode is
4676  * accept packets that match the unicast hash table.
4677  *
4678  * @param port_id
4679  *   The port identifier of the Ethernet device.
4680  * @param addr
4681  *   Unicast MAC address.
4682  * @param on
4683  *    1 - Set an unicast hash bit for receiving packets with the MAC address.
4684  *    0 - Clear an unicast hash bit.
4685  * @return
4686  *   - (0) if successful.
4687  *   - (-ENOTSUP) if hardware doesn't support.
4688  *   - (-ENODEV) if *port_id* invalid.
4689  *   - (-EIO) if device is removed.
4690  *   - (-EINVAL) if bad parameter.
4691  */
4692 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4693 				  uint8_t on);
4694 
4695 /**
4696  * Updates all unicast hash bitmaps for receiving packet with any Unicast
4697  * Ethernet MAC addresses,the packet is routed to all VFs for which the Rx
4698  * mode is accept packets that match the unicast hash table.
4699  *
4700  * @param port_id
4701  *   The port identifier of the Ethernet device.
4702  * @param on
4703  *    1 - Set all unicast hash bitmaps for receiving all the Ethernet
4704  *         MAC addresses
4705  *    0 - Clear all unicast hash bitmaps
4706  * @return
4707  *   - (0) if successful.
4708  *   - (-ENOTSUP) if hardware doesn't support.
4709  *   - (-ENODEV) if *port_id* invalid.
4710  *   - (-EIO) if device is removed.
4711  *   - (-EINVAL) if bad parameter.
4712  */
4713 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4714 
4715 /**
4716  * Set the rate limitation for a queue on an Ethernet device.
4717  *
4718  * @param port_id
4719  *   The port identifier of the Ethernet device.
4720  * @param queue_idx
4721  *   The queue ID.
4722  * @param tx_rate
4723  *   The Tx rate in Mbps. Allocated from the total port link speed.
4724  * @return
4725  *   - (0) if successful.
4726  *   - (-ENOTSUP) if hardware doesn't support this feature.
4727  *   - (-ENODEV) if *port_id* invalid.
4728  *   - (-EIO) if device is removed.
4729  *   - (-EINVAL) if bad parameter.
4730  */
4731 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4732 			uint32_t tx_rate);
4733 
4734 /**
4735  * Configuration of Receive Side Scaling hash computation of Ethernet device.
4736  *
4737  * @param port_id
4738  *   The port identifier of the Ethernet device.
4739  * @param rss_conf
4740  *   The new configuration to use for RSS hash computation on the port.
4741  * @return
4742  *   - (0) if successful.
4743  *   - (-ENODEV) if port identifier is invalid.
4744  *   - (-EIO) if device is removed.
4745  *   - (-ENOTSUP) if hardware doesn't support.
4746  *   - (-EINVAL) if bad parameter.
4747  */
4748 int rte_eth_dev_rss_hash_update(uint16_t port_id,
4749 				struct rte_eth_rss_conf *rss_conf);
4750 
4751 /**
4752  * Retrieve current configuration of Receive Side Scaling hash computation
4753  * of Ethernet device.
4754  *
4755  * @param port_id
4756  *   The port identifier of the Ethernet device.
4757  * @param rss_conf
4758  *   Where to store the current RSS hash configuration of the Ethernet device.
4759  * @return
4760  *   - (0) if successful.
4761  *   - (-ENODEV) if port identifier is invalid.
4762  *   - (-EIO) if device is removed.
4763  *   - (-ENOTSUP) if hardware doesn't support RSS.
4764  *   - (-EINVAL) if bad parameter.
4765  */
4766 int
4767 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4768 			      struct rte_eth_rss_conf *rss_conf);
4769 
4770 /**
4771  * @warning
4772  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice.
4773  *
4774  *  Get the name of RSS hash algorithm.
4775  *
4776  * @param rss_algo
4777  *   Hash algorithm.
4778  *
4779  * @return
4780  *   Hash algorithm name or 'UNKNOWN' if the rss_algo cannot be recognized.
4781  */
4782 __rte_experimental
4783 const char *
4784 rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo);
4785 
4786 /**
4787  * @warning
4788  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice.
4789  *
4790  * Get RSS hash algorithm by its name.
4791  *
4792  * @param name
4793  *   RSS hash algorithm.
4794  *
4795  * @param algo
4796  *   Return the RSS hash algorithm found, @see rte_eth_hash_function.
4797  *
4798  * @return
4799  *   - (0) if successful.
4800  *   - (-EINVAL) if not found.
4801  */
4802 __rte_experimental
4803 int
4804 rte_eth_find_rss_algo(const char *name, uint32_t *algo);
4805 
4806 /**
4807  * Add UDP tunneling port for a type of tunnel.
4808  *
4809  * Some NICs may require such configuration to properly parse a tunnel
4810  * with any standard or custom UDP port.
4811  * The packets with this UDP port will be parsed for this type of tunnel.
4812  * The device parser will also check the rest of the tunnel headers
4813  * before classifying the packet.
4814  *
4815  * With some devices, this API will affect packet classification, i.e.:
4816  *     - mbuf.packet_type reported on Rx
4817  *     - rte_flow rules with tunnel items
4818  *
4819  * @param port_id
4820  *   The port identifier of the Ethernet device.
4821  * @param tunnel_udp
4822  *   UDP tunneling configuration.
4823  *
4824  * @return
4825  *   - (0) if successful.
4826  *   - (-ENODEV) if port identifier is invalid.
4827  *   - (-EIO) if device is removed.
4828  *   - (-ENOTSUP) if hardware doesn't support tunnel type.
4829  */
4830 int
4831 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4832 				struct rte_eth_udp_tunnel *tunnel_udp);
4833 
4834 /**
4835  * Delete UDP tunneling port for a type of tunnel.
4836  *
4837  * The packets with this UDP port will not be classified as this type of tunnel
4838  * anymore if the device use such mapping for tunnel packet classification.
4839  *
4840  * @see rte_eth_dev_udp_tunnel_port_add
4841  *
4842  * @param port_id
4843  *   The port identifier of the Ethernet device.
4844  * @param tunnel_udp
4845  *   UDP tunneling configuration.
4846  *
4847  * @return
4848  *   - (0) if successful.
4849  *   - (-ENODEV) if port identifier is invalid.
4850  *   - (-EIO) if device is removed.
4851  *   - (-ENOTSUP) if hardware doesn't support tunnel type.
4852  */
4853 int
4854 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4855 				   struct rte_eth_udp_tunnel *tunnel_udp);
4856 
4857 /**
4858  * Get DCB information on an Ethernet device.
4859  *
4860  * @param port_id
4861  *   The port identifier of the Ethernet device.
4862  * @param dcb_info
4863  *   DCB information.
4864  * @return
4865  *   - (0) if successful.
4866  *   - (-ENODEV) if port identifier is invalid.
4867  *   - (-EIO) if device is removed.
4868  *   - (-ENOTSUP) if hardware doesn't support.
4869  *   - (-EINVAL) if bad parameter.
4870  */
4871 int rte_eth_dev_get_dcb_info(uint16_t port_id,
4872 			     struct rte_eth_dcb_info *dcb_info);
4873 
4874 struct rte_eth_rxtx_callback;
4875 
4876 /**
4877  * Add a callback to be called on packet Rx on a given port and queue.
4878  *
4879  * This API configures a function to be called for each burst of
4880  * packets received on a given NIC port queue. The return value is a pointer
4881  * that can be used to later remove the callback using
4882  * rte_eth_remove_rx_callback().
4883  *
4884  * Multiple functions are called in the order that they are added.
4885  *
4886  * @param port_id
4887  *   The port identifier of the Ethernet device.
4888  * @param queue_id
4889  *   The queue on the Ethernet device on which the callback is to be added.
4890  * @param fn
4891  *   The callback function
4892  * @param user_param
4893  *   A generic pointer parameter which will be passed to each invocation of the
4894  *   callback function on this port and queue. Inter-thread synchronization
4895  *   of any user data changes is the responsibility of the user.
4896  *
4897  * @return
4898  *   NULL on error.
4899  *   On success, a pointer value which can later be used to remove the callback.
4900  */
4901 const struct rte_eth_rxtx_callback *
4902 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4903 		rte_rx_callback_fn fn, void *user_param);
4904 
4905 /**
4906  * Add a callback that must be called first on packet Rx on a given port
4907  * and queue.
4908  *
4909  * This API configures a first function to be called for each burst of
4910  * packets received on a given NIC port queue. The return value is a pointer
4911  * that can be used to later remove the callback using
4912  * rte_eth_remove_rx_callback().
4913  *
4914  * Multiple functions are called in the order that they are added.
4915  *
4916  * @param port_id
4917  *   The port identifier of the Ethernet device.
4918  * @param queue_id
4919  *   The queue on the Ethernet device on which the callback is to be added.
4920  * @param fn
4921  *   The callback function
4922  * @param user_param
4923  *   A generic pointer parameter which will be passed to each invocation of the
4924  *   callback function on this port and queue. Inter-thread synchronization
4925  *   of any user data changes is the responsibility of the user.
4926  *
4927  * @return
4928  *   NULL on error.
4929  *   On success, a pointer value which can later be used to remove the callback.
4930  */
4931 const struct rte_eth_rxtx_callback *
4932 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4933 		rte_rx_callback_fn fn, void *user_param);
4934 
4935 /**
4936  * Add a callback to be called on packet Tx on a given port and queue.
4937  *
4938  * This API configures a function to be called for each burst of
4939  * packets sent on a given NIC port queue. The return value is a pointer
4940  * that can be used to later remove the callback using
4941  * rte_eth_remove_tx_callback().
4942  *
4943  * Multiple functions are called in the order that they are added.
4944  *
4945  * @param port_id
4946  *   The port identifier of the Ethernet device.
4947  * @param queue_id
4948  *   The queue on the Ethernet device on which the callback is to be added.
4949  * @param fn
4950  *   The callback function
4951  * @param user_param
4952  *   A generic pointer parameter which will be passed to each invocation of the
4953  *   callback function on this port and queue. Inter-thread synchronization
4954  *   of any user data changes is the responsibility of the user.
4955  *
4956  * @return
4957  *   NULL on error.
4958  *   On success, a pointer value which can later be used to remove the callback.
4959  */
4960 const struct rte_eth_rxtx_callback *
4961 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4962 		rte_tx_callback_fn fn, void *user_param);
4963 
4964 /**
4965  * Remove an Rx packet callback from a given port and queue.
4966  *
4967  * This function is used to removed callbacks that were added to a NIC port
4968  * queue using rte_eth_add_rx_callback().
4969  *
4970  * Note: the callback is removed from the callback list but it isn't freed
4971  * since the it may still be in use. The memory for the callback can be
4972  * subsequently freed back by the application by calling rte_free():
4973  *
4974  * - Immediately - if the port is stopped, or the user knows that no
4975  *   callbacks are in flight e.g. if called from the thread doing Rx/Tx
4976  *   on that queue.
4977  *
4978  * - After a short delay - where the delay is sufficient to allow any
4979  *   in-flight callbacks to complete. Alternately, the RCU mechanism can be
4980  *   used to detect when data plane threads have ceased referencing the
4981  *   callback memory.
4982  *
4983  * @param port_id
4984  *   The port identifier of the Ethernet device.
4985  * @param queue_id
4986  *   The queue on the Ethernet device from which the callback is to be removed.
4987  * @param user_cb
4988  *   User supplied callback created via rte_eth_add_rx_callback().
4989  *
4990  * @return
4991  *   - 0: Success. Callback was removed.
4992  *   - -ENODEV:  If *port_id* is invalid.
4993  *   - -ENOTSUP: Callback support is not available.
4994  *   - -EINVAL:  The queue_id is out of range, or the callback
4995  *               is NULL or not found for the port/queue.
4996  */
4997 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4998 		const struct rte_eth_rxtx_callback *user_cb);
4999 
5000 /**
5001  * Remove a Tx packet callback from a given port and queue.
5002  *
5003  * This function is used to removed callbacks that were added to a NIC port
5004  * queue using rte_eth_add_tx_callback().
5005  *
5006  * Note: the callback is removed from the callback list but it isn't freed
5007  * since the it may still be in use. The memory for the callback can be
5008  * subsequently freed back by the application by calling rte_free():
5009  *
5010  * - Immediately - if the port is stopped, or the user knows that no
5011  *   callbacks are in flight e.g. if called from the thread doing Rx/Tx
5012  *   on that queue.
5013  *
5014  * - After a short delay - where the delay is sufficient to allow any
5015  *   in-flight callbacks to complete. Alternately, the RCU mechanism can be
5016  *   used to detect when data plane threads have ceased referencing the
5017  *   callback memory.
5018  *
5019  * @param port_id
5020  *   The port identifier of the Ethernet device.
5021  * @param queue_id
5022  *   The queue on the Ethernet device from which the callback is to be removed.
5023  * @param user_cb
5024  *   User supplied callback created via rte_eth_add_tx_callback().
5025  *
5026  * @return
5027  *   - 0: Success. Callback was removed.
5028  *   - -ENODEV:  If *port_id* is invalid.
5029  *   - -ENOTSUP: Callback support is not available.
5030  *   - -EINVAL:  The queue_id is out of range, or the callback
5031  *               is NULL or not found for the port/queue.
5032  */
5033 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
5034 		const struct rte_eth_rxtx_callback *user_cb);
5035 
5036 /**
5037  * Retrieve information about given port's Rx queue.
5038  *
5039  * @param port_id
5040  *   The port identifier of the Ethernet device.
5041  * @param queue_id
5042  *   The Rx queue on the Ethernet device for which information
5043  *   will be retrieved.
5044  * @param qinfo
5045  *   A pointer to a structure of type *rte_eth_rxq_info_info* to be filled with
5046  *   the information of the Ethernet device.
5047  *
5048  * @return
5049  *   - 0: Success
5050  *   - -ENODEV:  If *port_id* is invalid.
5051  *   - -ENOTSUP: routine is not supported by the device PMD.
5052  *   - -EINVAL:  The queue_id is out of range, or the queue
5053  *               is hairpin queue.
5054  */
5055 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5056 	struct rte_eth_rxq_info *qinfo);
5057 
5058 /**
5059  * Retrieve information about given port's Tx queue.
5060  *
5061  * @param port_id
5062  *   The port identifier of the Ethernet device.
5063  * @param queue_id
5064  *   The Tx queue on the Ethernet device for which information
5065  *   will be retrieved.
5066  * @param qinfo
5067  *   A pointer to a structure of type *rte_eth_txq_info_info* to be filled with
5068  *   the information of the Ethernet device.
5069  *
5070  * @return
5071  *   - 0: Success
5072  *   - -ENODEV:  If *port_id* is invalid.
5073  *   - -ENOTSUP: routine is not supported by the device PMD.
5074  *   - -EINVAL:  The queue_id is out of range, or the queue
5075  *               is hairpin queue.
5076  */
5077 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
5078 	struct rte_eth_txq_info *qinfo);
5079 
5080 /**
5081  * @warning
5082  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5083  *
5084  * Retrieve information about given ports's Rx queue for recycling mbufs.
5085  *
5086  * @param port_id
5087  *   The port identifier of the Ethernet device.
5088  * @param queue_id
5089  *   The Rx queue on the Ethernet devicefor which information
5090  *   will be retrieved.
5091  * @param recycle_rxq_info
5092  *   A pointer to a structure of type *rte_eth_recycle_rxq_info* to be filled.
5093  *
5094  * @return
5095  *   - 0: Success
5096  *   - -ENODEV:  If *port_id* is invalid.
5097  *   - -ENOTSUP: routine is not supported by the device PMD.
5098  *   - -EINVAL:  The queue_id is out of range.
5099  */
5100 __rte_experimental
5101 int rte_eth_recycle_rx_queue_info_get(uint16_t port_id,
5102 		uint16_t queue_id,
5103 		struct rte_eth_recycle_rxq_info *recycle_rxq_info);
5104 
5105 /**
5106  * Retrieve information about the Rx packet burst mode.
5107  *
5108  * @param port_id
5109  *   The port identifier of the Ethernet device.
5110  * @param queue_id
5111  *   The Rx queue on the Ethernet device for which information
5112  *   will be retrieved.
5113  * @param mode
5114  *   A pointer to a structure of type *rte_eth_burst_mode* to be filled
5115  *   with the information of the packet burst mode.
5116  *
5117  * @return
5118  *   - 0: Success
5119  *   - -ENODEV:  If *port_id* is invalid.
5120  *   - -ENOTSUP: routine is not supported by the device PMD.
5121  *   - -EINVAL:  The queue_id is out of range.
5122  */
5123 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5124 	struct rte_eth_burst_mode *mode);
5125 
5126 /**
5127  * Retrieve information about the Tx packet burst mode.
5128  *
5129  * @param port_id
5130  *   The port identifier of the Ethernet device.
5131  * @param queue_id
5132  *   The Tx queue on the Ethernet device for which information
5133  *   will be retrieved.
5134  * @param mode
5135  *   A pointer to a structure of type *rte_eth_burst_mode* to be filled
5136  *   with the information of the packet burst mode.
5137  *
5138  * @return
5139  *   - 0: Success
5140  *   - -ENODEV:  If *port_id* is invalid.
5141  *   - -ENOTSUP: routine is not supported by the device PMD.
5142  *   - -EINVAL:  The queue_id is out of range.
5143  */
5144 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5145 	struct rte_eth_burst_mode *mode);
5146 
5147 /**
5148  * @warning
5149  * @b EXPERIMENTAL: this API may change without prior notice.
5150  *
5151  * Retrieve the monitor condition for a given receive queue.
5152  *
5153  * @param port_id
5154  *   The port identifier of the Ethernet device.
5155  * @param queue_id
5156  *   The Rx queue on the Ethernet device for which information
5157  *   will be retrieved.
5158  * @param pmc
5159  *   The pointer to power-optimized monitoring condition structure.
5160  *
5161  * @return
5162  *   - 0: Success.
5163  *   -ENOTSUP: Operation not supported.
5164  *   -EINVAL: Invalid parameters.
5165  *   -ENODEV: Invalid port ID.
5166  */
5167 __rte_experimental
5168 int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5169 		struct rte_power_monitor_cond *pmc);
5170 
5171 /**
5172  * Retrieve the filtered device registers (values and names) and
5173  * register attributes (number of registers and register size)
5174  *
5175  * @param port_id
5176  *   The port identifier of the Ethernet device.
5177  * @param info
5178  *   Pointer to rte_dev_reg_info structure to fill in.
5179  *   - If info->filter is NULL, return info for all registers (seen as filter
5180  *     none).
5181  *   - If info->filter is not NULL, return error if the driver does not support
5182  *     filter. Fill the length field with filtered register number.
5183  *   - If info->data is NULL, the function fills in the width and length fields.
5184  *   - If info->data is not NULL, ethdev considers there are enough spaces to
5185  *     store the registers, and the values of registers with the filter string
5186  *     as the module name are put into the buffer pointed at by info->data.
5187  *   - If info->names is not NULL, drivers should fill it or the ethdev fills it
5188  *     with default names.
5189  * @return
5190  *   - (0) if successful.
5191  *   - (-ENOTSUP) if hardware doesn't support.
5192  *   - (-EINVAL) if bad parameter.
5193  *   - (-ENODEV) if *port_id* invalid.
5194  *   - (-EIO) if device is removed.
5195  *   - others depends on the specific operations implementation.
5196  */
5197 __rte_experimental
5198 int rte_eth_dev_get_reg_info_ext(uint16_t port_id, struct rte_dev_reg_info *info);
5199 
5200 /**
5201  * Retrieve device registers and register attributes (number of registers and
5202  * register size)
5203  *
5204  * @param port_id
5205  *   The port identifier of the Ethernet device.
5206  * @param info
5207  *   Pointer to rte_dev_reg_info structure to fill in. If info->data is
5208  *   NULL the function fills in the width and length fields. If non-NULL
5209  *   the registers are put into the buffer pointed at by the data field.
5210  * @return
5211  *   - (0) if successful.
5212  *   - (-ENOTSUP) if hardware doesn't support.
5213  *   - (-EINVAL) if bad parameter.
5214  *   - (-ENODEV) if *port_id* invalid.
5215  *   - (-EIO) if device is removed.
5216  *   - others depends on the specific operations implementation.
5217  */
5218 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
5219 	__rte_warn_unused_result;
5220 
5221 /**
5222  * Retrieve size of device EEPROM
5223  *
5224  * @param port_id
5225  *   The port identifier of the Ethernet device.
5226  * @return
5227  *   - (>=0) EEPROM size if successful.
5228  *   - (-ENOTSUP) if hardware doesn't support.
5229  *   - (-ENODEV) if *port_id* invalid.
5230  *   - (-EIO) if device is removed.
5231  *   - others depends on the specific operations implementation.
5232  */
5233 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
5234 
5235 /**
5236  * Retrieve EEPROM and EEPROM attribute
5237  *
5238  * @param port_id
5239  *   The port identifier of the Ethernet device.
5240  * @param info
5241  *   The template includes buffer for return EEPROM data and
5242  *   EEPROM attributes to be filled.
5243  * @return
5244  *   - (0) if successful.
5245  *   - (-ENOTSUP) if hardware doesn't support.
5246  *   - (-EINVAL) if bad parameter.
5247  *   - (-ENODEV) if *port_id* invalid.
5248  *   - (-EIO) if device is removed.
5249  *   - others depends on the specific operations implementation.
5250  */
5251 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5252 
5253 /**
5254  * Program EEPROM with provided data
5255  *
5256  * @param port_id
5257  *   The port identifier of the Ethernet device.
5258  * @param info
5259  *   The template includes EEPROM data for programming and
5260  *   EEPROM attributes to be filled
5261  * @return
5262  *   - (0) if successful.
5263  *   - (-ENOTSUP) if hardware doesn't support.
5264  *   - (-ENODEV) if *port_id* invalid.
5265  *   - (-EINVAL) if bad parameter.
5266  *   - (-EIO) if device is removed.
5267  *   - others depends on the specific operations implementation.
5268  */
5269 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5270 
5271 /**
5272  * @warning
5273  * @b EXPERIMENTAL: this API may change without prior notice.
5274  *
5275  * Retrieve the type and size of plugin module EEPROM
5276  *
5277  * @param port_id
5278  *   The port identifier of the Ethernet device.
5279  * @param modinfo
5280  *   The type and size of plugin module EEPROM.
5281  * @return
5282  *   - (0) if successful.
5283  *   - (-ENOTSUP) if hardware doesn't support.
5284  *   - (-ENODEV) if *port_id* invalid.
5285  *   - (-EINVAL) if bad parameter.
5286  *   - (-EIO) if device is removed.
5287  *   - others depends on the specific operations implementation.
5288  */
5289 __rte_experimental
5290 int
5291 rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
5292 	__rte_warn_unused_result;
5293 
5294 /**
5295  * @warning
5296  * @b EXPERIMENTAL: this API may change without prior notice.
5297  *
5298  * Retrieve the data of plugin module EEPROM
5299  *
5300  * @param port_id
5301  *   The port identifier of the Ethernet device.
5302  * @param info
5303  *   The template includes the plugin module EEPROM attributes, and the
5304  *   buffer for return plugin module EEPROM data.
5305  * @return
5306  *   - (0) if successful.
5307  *   - (-ENOTSUP) if hardware doesn't support.
5308  *   - (-EINVAL) if bad parameter.
5309  *   - (-ENODEV) if *port_id* invalid.
5310  *   - (-EIO) if device is removed.
5311  *   - others depends on the specific operations implementation.
5312  */
5313 __rte_experimental
5314 int
5315 rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
5316 	__rte_warn_unused_result;
5317 
5318 /**
5319  * Set the list of multicast addresses to filter on an Ethernet device.
5320  *
5321  * @param port_id
5322  *   The port identifier of the Ethernet device.
5323  * @param mc_addr_set
5324  *   The array of multicast addresses to set. Equal to NULL when the function
5325  *   is invoked to flush the set of filtered addresses.
5326  * @param nb_mc_addr
5327  *   The number of multicast addresses in the *mc_addr_set* array. Equal to 0
5328  *   when the function is invoked to flush the set of filtered addresses.
5329  * @return
5330  *   - (0) if successful.
5331  *   - (-ENODEV) if *port_id* invalid.
5332  *   - (-EIO) if device is removed.
5333  *   - (-ENOTSUP) if PMD of *port_id* doesn't support multicast filtering.
5334  *   - (-ENOSPC) if *port_id* has not enough multicast filtering resources.
5335  *   - (-EINVAL) if bad parameter.
5336  */
5337 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5338 				 struct rte_ether_addr *mc_addr_set,
5339 				 uint32_t nb_mc_addr);
5340 
5341 /**
5342  * Enable IEEE1588/802.1AS timestamping for an Ethernet device.
5343  *
5344  * @param port_id
5345  *   The port identifier of the Ethernet device.
5346  *
5347  * @return
5348  *   - 0: Success.
5349  *   - -ENODEV: The port ID is invalid.
5350  *   - -EIO: if device is removed.
5351  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5352  */
5353 int rte_eth_timesync_enable(uint16_t port_id);
5354 
5355 /**
5356  * Disable IEEE1588/802.1AS timestamping for an Ethernet device.
5357  *
5358  * @param port_id
5359  *   The port identifier of the Ethernet device.
5360  *
5361  * @return
5362  *   - 0: Success.
5363  *   - -ENODEV: The port ID is invalid.
5364  *   - -EIO: if device is removed.
5365  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5366  */
5367 int rte_eth_timesync_disable(uint16_t port_id);
5368 
5369 /**
5370  * Read an IEEE1588/802.1AS Rx timestamp from an Ethernet device.
5371  *
5372  * @param port_id
5373  *   The port identifier of the Ethernet device.
5374  * @param timestamp
5375  *   Pointer to the timestamp struct.
5376  * @param flags
5377  *   Device specific flags. Used to pass the Rx timesync register index to
5378  *   i40e. Unused in igb/ixgbe, pass 0 instead.
5379  *
5380  * @return
5381  *   - 0: Success.
5382  *   - -EINVAL: No timestamp is available.
5383  *   - -ENODEV: The port ID is invalid.
5384  *   - -EIO: if device is removed.
5385  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5386  */
5387 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
5388 		struct timespec *timestamp, uint32_t flags);
5389 
5390 /**
5391  * Read an IEEE1588/802.1AS Tx timestamp from an Ethernet device.
5392  *
5393  * @param port_id
5394  *   The port identifier of the Ethernet device.
5395  * @param timestamp
5396  *   Pointer to the timestamp struct.
5397  *
5398  * @return
5399  *   - 0: Success.
5400  *   - -EINVAL: No timestamp is available.
5401  *   - -ENODEV: The port ID is invalid.
5402  *   - -EIO: if device is removed.
5403  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5404  */
5405 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5406 		struct timespec *timestamp);
5407 
5408 /**
5409  * Adjust the timesync clock on an Ethernet device.
5410  *
5411  * This is usually used in conjunction with other Ethdev timesync functions to
5412  * synchronize the device time using the IEEE1588/802.1AS protocol.
5413  *
5414  * @param port_id
5415  *   The port identifier of the Ethernet device.
5416  * @param delta
5417  *   The adjustment in nanoseconds.
5418  *
5419  * @return
5420  *   - 0: Success.
5421  *   - -ENODEV: The port ID is invalid.
5422  *   - -EIO: if device is removed.
5423  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5424  */
5425 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5426 
5427 /**
5428  * Adjust the clock frequency on an Ethernet device.
5429  *
5430  * Adjusts the base frequency by a specified percentage of ppm (parts per
5431  * million). This is usually used in conjunction with other Ethdev timesync
5432  * functions to synchronize the device time using the IEEE1588/802.1AS
5433  * protocol.
5434  *
5435  * The clock is subject to frequency deviation and rate of change drift due to
5436  * the environment. The upper layer APP calculates the frequency compensation
5437  * value of the slave clock relative to the master clock via a servo algorithm
5438  * and adjusts the device clock frequency via "rte_eth_timesync_adjust_freq()".
5439  * Commonly used servo algorithms are pi/linreg/ntpshm, for implementation
5440  * see: https://github.com/nxp-archive/openil_linuxptp.git.
5441  *
5442  * The adjustment value obtained by the servo algorithm is usually in
5443  * ppb (parts per billion). For consistency with the kernel driver .adjfine,
5444  * the tuning values are in ppm. Note that 1 ppb is approximately 65.536 scaled
5445  * ppm, see Linux kernel upstream commit 1060707e3809 (‘ptp: introduce helpers
5446  * to adjust by scaled parts per million’).
5447  *
5448  * In addition, the device reference frequency is usually also the stepping
5449  * threshold for the servo algorithm, and the frequency up and down adjustment
5450  * range is limited by the device. The device clock frequency should be
5451  * adjusted with "rte_eth_timesync_adjust_freq()" every time the clock is
5452  * synchronised. Also use ‘rte_eth_timesync_adjust_time()’ to update the device
5453  * clock only if the absolute value of the master/slave clock offset is greater than
5454  * or equal to the step threshold.
5455  *
5456  * @param port_id
5457  *  The port identifier of the Ethernet device.
5458  * @param ppm
5459  *  Parts per million with 16-bit fractional field
5460  *
5461  * @return
5462  *   - 0: Success.
5463  *   - -ENODEV: The port ID is invalid.
5464  *   - -EIO: if device is removed.
5465  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5466  */
5467 __rte_experimental
5468 int rte_eth_timesync_adjust_freq(uint16_t port_id, int64_t ppm);
5469 
5470 /**
5471  * Read the time from the timesync clock on an Ethernet device.
5472  *
5473  * This is usually used in conjunction with other Ethdev timesync functions to
5474  * synchronize the device time using the IEEE1588/802.1AS protocol.
5475  *
5476  * @param port_id
5477  *   The port identifier of the Ethernet device.
5478  * @param time
5479  *   Pointer to the timespec struct that holds the time.
5480  *
5481  * @return
5482  *   - 0: Success.
5483  *   - -EINVAL: Bad parameter.
5484  */
5485 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5486 
5487 /**
5488  * Set the time of the timesync clock on an Ethernet device.
5489  *
5490  * This is usually used in conjunction with other Ethdev timesync functions to
5491  * synchronize the device time using the IEEE1588/802.1AS protocol.
5492  *
5493  * @param port_id
5494  *   The port identifier of the Ethernet device.
5495  * @param time
5496  *   Pointer to the timespec struct that holds the time.
5497  *
5498  * @return
5499  *   - 0: Success.
5500  *   - -EINVAL: No timestamp is available.
5501  *   - -ENODEV: The port ID is invalid.
5502  *   - -EIO: if device is removed.
5503  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5504  */
5505 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5506 
5507 /**
5508  * @warning
5509  * @b EXPERIMENTAL: this API may change without prior notice.
5510  *
5511  * Read the current clock counter of an Ethernet device
5512  *
5513  * This returns the current raw clock value of an Ethernet device. It is
5514  * a raw amount of ticks, with no given time reference.
5515  * The value returned here is from the same clock than the one
5516  * filling timestamp field of Rx packets when using hardware timestamp
5517  * offload. Therefore it can be used to compute a precise conversion of
5518  * the device clock to the real time.
5519  *
5520  * E.g, a simple heuristic to derivate the frequency would be:
5521  * uint64_t start, end;
5522  * rte_eth_read_clock(port, start);
5523  * rte_delay_ms(100);
5524  * rte_eth_read_clock(port, end);
5525  * double freq = (end - start) * 10;
5526  *
5527  * Compute a common reference with:
5528  * uint64_t base_time_sec = current_time();
5529  * uint64_t base_clock;
5530  * rte_eth_read_clock(port, base_clock);
5531  *
5532  * Then, convert the raw mbuf timestamp with:
5533  * base_time_sec + (double)(*timestamp_dynfield(mbuf) - base_clock) / freq;
5534  *
5535  * This simple example will not provide a very good accuracy. One must
5536  * at least measure multiple times the frequency and do a regression.
5537  * To avoid deviation from the system time, the common reference can
5538  * be repeated from time to time. The integer division can also be
5539  * converted by a multiplication and a shift for better performance.
5540  *
5541  * @param port_id
5542  *   The port identifier of the Ethernet device.
5543  * @param clock
5544  *   Pointer to the uint64_t that holds the raw clock value.
5545  *
5546  * @return
5547  *   - 0: Success.
5548  *   - -ENODEV: The port ID is invalid.
5549  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5550  *   - -EINVAL: if bad parameter.
5551  */
5552 __rte_experimental
5553 int
5554 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5555 
5556 /**
5557  * Get the port ID from device name.
5558  * The device name should be specified as below:
5559  * - PCIe address (Domain:Bus:Device.Function), for example- 0000:2:00.0
5560  * - SoC device name, for example- fsl-gmac0
5561  * - vdev dpdk name, for example- net_[pcap0|null0|tap0]
5562  *
5563  * @param name
5564  *   PCI address or name of the device.
5565  * @param port_id
5566  *   Pointer to port identifier of the device.
5567  * @return
5568  *   - (0) if successful and port_id is filled.
5569  *   - (-ENODEV or -EINVAL) on failure.
5570  */
5571 int
5572 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5573 
5574 /**
5575  * Get the device name from port ID.
5576  * The device name is specified as below:
5577  * - PCIe address (Domain:Bus:Device.Function), for example- 0000:02:00.0
5578  * - SoC device name, for example- fsl-gmac0
5579  * - vdev dpdk name, for example- net_[pcap0|null0|tun0|tap0]
5580  *
5581  * @param port_id
5582  *   Port identifier of the device.
5583  * @param name
5584  *   Buffer of size RTE_ETH_NAME_MAX_LEN to store the name.
5585  * @return
5586  *   - (0) if successful.
5587  *   - (-ENODEV) if *port_id* is invalid.
5588  *   - (-EINVAL) on failure.
5589  */
5590 int
5591 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5592 
5593 /**
5594  * Check that numbers of Rx and Tx descriptors satisfy descriptors limits from
5595  * the Ethernet device information, otherwise adjust them to boundaries.
5596  *
5597  * @param port_id
5598  *   The port identifier of the Ethernet device.
5599  * @param nb_rx_desc
5600  *   A pointer to a uint16_t where the number of receive
5601  *   descriptors stored.
5602  * @param nb_tx_desc
5603  *   A pointer to a uint16_t where the number of transmit
5604  *   descriptors stored.
5605  * @return
5606  *   - (0) if successful.
5607  *   - (-ENOTSUP, -ENODEV or -EINVAL) on failure.
5608  */
5609 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5610 				     uint16_t *nb_rx_desc,
5611 				     uint16_t *nb_tx_desc);
5612 
5613 /**
5614  * Test if a port supports specific mempool ops.
5615  *
5616  * @param port_id
5617  *   Port identifier of the Ethernet device.
5618  * @param [in] pool
5619  *   The name of the pool operations to test.
5620  * @return
5621  *   - 0: best mempool ops choice for this port.
5622  *   - 1: mempool ops are supported for this port.
5623  *   - -ENOTSUP: mempool ops not supported for this port.
5624  *   - -ENODEV: Invalid port Identifier.
5625  *   - -EINVAL: Pool param is null.
5626  */
5627 int
5628 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5629 
5630 /**
5631  * Get the security context for the Ethernet device.
5632  *
5633  * @param port_id
5634  *   Port identifier of the Ethernet device
5635  * @return
5636  *   - NULL on error.
5637  *   - pointer to security context on success.
5638  */
5639 void *
5640 rte_eth_dev_get_sec_ctx(uint16_t port_id);
5641 
5642 /**
5643  * @warning
5644  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5645  *
5646  * Query the device hairpin capabilities.
5647  *
5648  * @param port_id
5649  *   The port identifier of the Ethernet device.
5650  * @param cap
5651  *   Pointer to a structure that will hold the hairpin capabilities.
5652  * @return
5653  *   - (0) if successful.
5654  *   - (-ENOTSUP) if hardware doesn't support.
5655  *   - (-EINVAL) if bad parameter.
5656  */
5657 __rte_experimental
5658 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5659 				       struct rte_eth_hairpin_cap *cap);
5660 
5661 /**
5662  * @warning
5663  * @b EXPERIMENTAL: this structure may change without prior notice.
5664  *
5665  * Ethernet device representor ID range entry
5666  */
5667 struct rte_eth_representor_range {
5668 	enum rte_eth_representor_type type; /**< Representor type */
5669 	int controller; /**< Controller index */
5670 	int pf; /**< Physical function index */
5671 	__extension__
5672 	union {
5673 		int vf; /**< VF start index */
5674 		int sf; /**< SF start index */
5675 	};
5676 	uint32_t id_base; /**< Representor ID start index */
5677 	uint32_t id_end;  /**< Representor ID end index */
5678 	char name[RTE_DEV_NAME_MAX_LEN]; /**< Representor name */
5679 };
5680 
5681 /**
5682  * @warning
5683  * @b EXPERIMENTAL: this structure may change without prior notice.
5684  *
5685  * Ethernet device representor information
5686  */
5687 struct rte_eth_representor_info {
5688 	uint16_t controller; /**< Controller ID of caller device. */
5689 	uint16_t pf; /**< Physical function ID of caller device. */
5690 	uint32_t nb_ranges_alloc; /**< Size of the ranges array. */
5691 	uint32_t nb_ranges; /**< Number of initialized ranges. */
5692 	struct rte_eth_representor_range ranges[];/**< Representor ID range. */
5693 };
5694 
5695 /**
5696  * Retrieve the representor info of the device.
5697  *
5698  * Get device representor info to be able to calculate a unique
5699  * representor ID. @see rte_eth_representor_id_get helper.
5700  *
5701  * @param port_id
5702  *   The port identifier of the device.
5703  * @param info
5704  *   A pointer to a representor info structure.
5705  *   NULL to return number of range entries and allocate memory
5706  *   for next call to store detail.
5707  *   The number of ranges that were written into this structure
5708  *   will be placed into its nb_ranges field. This number cannot be
5709  *   larger than the nb_ranges_alloc that by the user before calling
5710  *   this function. It can be smaller than the value returned by the
5711  *   function, however.
5712  * @return
5713  *   - (-ENOTSUP) if operation is not supported.
5714  *   - (-ENODEV) if *port_id* invalid.
5715  *   - (-EIO) if device is removed.
5716  *   - (>=0) number of available representor range entries.
5717  */
5718 __rte_experimental
5719 int rte_eth_representor_info_get(uint16_t port_id,
5720 				 struct rte_eth_representor_info *info);
5721 
5722 /** The NIC is able to deliver flag (if set) with packets to the PMD. */
5723 #define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5724 
5725 /** The NIC is able to deliver mark ID with packets to the PMD. */
5726 #define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5727 
5728 /** The NIC is able to deliver tunnel ID with packets to the PMD. */
5729 #define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5730 
5731 /**
5732  * Negotiate the NIC's ability to deliver specific kinds of metadata to the PMD.
5733  *
5734  * Invoke this API before the first rte_eth_dev_configure() invocation
5735  * to let the PMD make preparations that are inconvenient to do later.
5736  *
5737  * The negotiation process is as follows:
5738  *
5739  * - the application requests features intending to use at least some of them;
5740  * - the PMD responds with the guaranteed subset of the requested feature set;
5741  * - the application can retry negotiation with another set of features;
5742  * - the application can pass zero to clear the negotiation result;
5743  * - the last negotiated result takes effect upon
5744  *   the ethdev configure and start.
5745  *
5746  * @note
5747  *   The PMD is supposed to first consider enabling the requested feature set
5748  *   in its entirety. Only if it fails to do so, does it have the right to
5749  *   respond with a smaller set of the originally requested features.
5750  *
5751  * @note
5752  *   Return code (-ENOTSUP) does not necessarily mean that the requested
5753  *   features are unsupported. In this case, the application should just
5754  *   assume that these features can be used without prior negotiations.
5755  *
5756  * @param port_id
5757  *   Port (ethdev) identifier
5758  *
5759  * @param[inout] features
5760  *   Feature selection buffer
5761  *
5762  * @return
5763  *   - (-EBUSY) if the port can't handle this in its current state;
5764  *   - (-ENOTSUP) if the method itself is not supported by the PMD;
5765  *   - (-ENODEV) if *port_id* is invalid;
5766  *   - (-EINVAL) if *features* is NULL;
5767  *   - (-EIO) if the device is removed;
5768  *   - (0) on success
5769  */
5770 int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5771 
5772 /** Flag to offload IP reassembly for IPv4 packets. */
5773 #define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5774 /** Flag to offload IP reassembly for IPv6 packets. */
5775 #define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5776 
5777 /**
5778  * A structure used to get/set IP reassembly configuration. It is also used
5779  * to get the maximum capability values that a PMD can support.
5780  *
5781  * If rte_eth_ip_reassembly_capability_get() returns 0, IP reassembly can be
5782  * enabled using rte_eth_ip_reassembly_conf_set() and params values lower than
5783  * capability params can be set in the PMD.
5784  */
5785 struct rte_eth_ip_reassembly_params {
5786 	/** Maximum time in ms which PMD can wait for other fragments. */
5787 	uint32_t timeout_ms;
5788 	/** Maximum number of fragments that can be reassembled. */
5789 	uint16_t max_frags;
5790 	/**
5791 	 * Flags to enable reassembly of packet types -
5792 	 * RTE_ETH_DEV_REASSEMBLY_F_xxx.
5793 	 */
5794 	uint16_t flags;
5795 };
5796 
5797 /**
5798  * @warning
5799  * @b EXPERIMENTAL: this API may change without prior notice
5800  *
5801  * Get IP reassembly capabilities supported by the PMD. This is the first API
5802  * to be called for enabling the IP reassembly offload feature. PMD will return
5803  * the maximum values of parameters that PMD can support and user can call
5804  * rte_eth_ip_reassembly_conf_set() with param values lower than capability.
5805  *
5806  * @param port_id
5807  *   The port identifier of the device.
5808  * @param capa
5809  *   A pointer to rte_eth_ip_reassembly_params structure.
5810  * @return
5811  *   - (-ENOTSUP) if offload configuration is not supported by device.
5812  *   - (-ENODEV) if *port_id* invalid.
5813  *   - (-EIO) if device is removed.
5814  *   - (-EINVAL) if device is not configured or *capa* passed is NULL.
5815  *   - (0) on success.
5816  */
5817 __rte_experimental
5818 int rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5819 		struct rte_eth_ip_reassembly_params *capa);
5820 
5821 /**
5822  * @warning
5823  * @b EXPERIMENTAL: this API may change without prior notice
5824  *
5825  * Get IP reassembly configuration parameters currently set in PMD.
5826  * The API will return error if the configuration is not already
5827  * set using rte_eth_ip_reassembly_conf_set() before calling this API or if
5828  * the device is not configured.
5829  *
5830  * @param port_id
5831  *   The port identifier of the device.
5832  * @param conf
5833  *   A pointer to rte_eth_ip_reassembly_params structure.
5834  * @return
5835  *   - (-ENOTSUP) if offload configuration is not supported by device.
5836  *   - (-ENODEV) if *port_id* invalid.
5837  *   - (-EIO) if device is removed.
5838  *   - (-EINVAL) if device is not configured or if *conf* passed is NULL or if
5839  *              configuration is not set using rte_eth_ip_reassembly_conf_set().
5840  *   - (0) on success.
5841  */
5842 __rte_experimental
5843 int rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5844 		struct rte_eth_ip_reassembly_params *conf);
5845 
5846 /**
5847  * @warning
5848  * @b EXPERIMENTAL: this API may change without prior notice
5849  *
5850  * Set IP reassembly configuration parameters if the PMD supports IP reassembly
5851  * offload. User should first call rte_eth_ip_reassembly_capability_get() to
5852  * check the maximum values supported by the PMD before setting the
5853  * configuration. The use of this API is mandatory to enable this feature and
5854  * should be called before rte_eth_dev_start().
5855  *
5856  * In datapath, PMD cannot guarantee that IP reassembly is always successful.
5857  * Hence, PMD shall register mbuf dynamic field and dynamic flag using
5858  * rte_eth_ip_reassembly_dynfield_register() to denote incomplete IP reassembly.
5859  * If dynfield is not successfully registered, error will be returned and
5860  * IP reassembly offload cannot be used.
5861  *
5862  * @param port_id
5863  *   The port identifier of the device.
5864  * @param conf
5865  *   A pointer to rte_eth_ip_reassembly_params structure.
5866  * @return
5867  *   - (-ENOTSUP) if offload configuration is not supported by device.
5868  *   - (-ENODEV) if *port_id* invalid.
5869  *   - (-EIO) if device is removed.
5870  *   - (-EINVAL) if device is not configured or if device is already started or
5871  *               if *conf* passed is NULL or if mbuf dynfield is not registered
5872  *               successfully by the PMD.
5873  *   - (0) on success.
5874  */
5875 __rte_experimental
5876 int rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5877 		const struct rte_eth_ip_reassembly_params *conf);
5878 
5879 /**
5880  * In case of IP reassembly offload failure, packet will be updated with
5881  * dynamic flag - RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME and packets
5882  * will be returned without alteration.
5883  * The application can retrieve the attached fragments using mbuf dynamic field
5884  * RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME.
5885  */
5886 typedef struct {
5887 	/**
5888 	 * Next fragment packet. Application should fetch dynamic field of
5889 	 * each fragment until a NULL is received and nb_frags is 0.
5890 	 */
5891 	struct rte_mbuf *next_frag;
5892 	/** Time spent(in ms) by HW in waiting for further fragments. */
5893 	uint16_t time_spent;
5894 	/** Number of more fragments attached in mbuf dynamic fields. */
5895 	uint16_t nb_frags;
5896 } rte_eth_ip_reassembly_dynfield_t;
5897 
5898 /**
5899  * @warning
5900  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5901  *
5902  * Dump private info from device to a file. Provided data and the order depends
5903  * on the PMD.
5904  *
5905  * @param port_id
5906  *   The port identifier of the Ethernet device.
5907  * @param file
5908  *   A pointer to a file for output.
5909  * @return
5910  *   - (0) on success.
5911  *   - (-ENODEV) if *port_id* is invalid.
5912  *   - (-EINVAL) if null file.
5913  *   - (-ENOTSUP) if the device does not support this function.
5914  *   - (-EIO) if device is removed.
5915  */
5916 __rte_experimental
5917 int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
5918 
5919 /**
5920  * @warning
5921  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5922  *
5923  * Dump ethdev Rx descriptor info to a file.
5924  *
5925  * This API is used for debugging, not a dataplane API.
5926  *
5927  * @param port_id
5928  *   The port identifier of the Ethernet device.
5929  * @param queue_id
5930  *   A Rx queue identifier on this port.
5931  * @param offset
5932  *  The offset of the descriptor starting from tail. (0 is the next
5933  *  packet to be received by the driver).
5934  * @param num
5935  *   The number of the descriptors to dump.
5936  * @param file
5937  *   A pointer to a file for output.
5938  * @return
5939  *   - On success, zero.
5940  *   - On failure, a negative value.
5941  */
5942 __rte_experimental
5943 int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5944 			       uint16_t offset, uint16_t num, FILE *file);
5945 
5946 /**
5947  * @warning
5948  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5949  *
5950  * Dump ethdev Tx descriptor info to a file.
5951  *
5952  * This API is used for debugging, not a dataplane API.
5953  *
5954  * @param port_id
5955  *   The port identifier of the Ethernet device.
5956  * @param queue_id
5957  *   A Tx queue identifier on this port.
5958  * @param offset
5959  *  The offset of the descriptor starting from tail. (0 is the place where
5960  *  the next packet will be send).
5961  * @param num
5962  *   The number of the descriptors to dump.
5963  * @param file
5964  *   A pointer to a file for output.
5965  * @return
5966  *   - On success, zero.
5967  *   - On failure, a negative value.
5968  */
5969 __rte_experimental
5970 int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5971 			       uint16_t offset, uint16_t num, FILE *file);
5972 
5973 
5974 /* Congestion management */
5975 
5976 /** Enumerate list of ethdev congestion management objects */
5977 enum rte_eth_cman_obj {
5978 	/** Congestion management based on Rx queue depth */
5979 	RTE_ETH_CMAN_OBJ_RX_QUEUE = RTE_BIT32(0),
5980 	/**
5981 	 * Congestion management based on mempool depth associated with Rx queue
5982 	 * @see rte_eth_rx_queue_setup()
5983 	 */
5984 	RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL = RTE_BIT32(1),
5985 };
5986 
5987 /**
5988  * @warning
5989  * @b EXPERIMENTAL: this structure may change, or be removed, without prior notice
5990  *
5991  * A structure used to retrieve information of ethdev congestion management.
5992  */
5993 struct rte_eth_cman_info {
5994 	/**
5995 	 * Set of supported congestion management modes
5996 	 * @see enum rte_cman_mode
5997 	 */
5998 	uint64_t modes_supported;
5999 	/**
6000 	 * Set of supported congestion management objects
6001 	 * @see enum rte_eth_cman_obj
6002 	 */
6003 	uint64_t objs_supported;
6004 	/**
6005 	 * Reserved for future fields. Always returned as 0 when
6006 	 * rte_eth_cman_info_get() is invoked
6007 	 */
6008 	uint8_t rsvd[8];
6009 };
6010 
6011 /**
6012  * @warning
6013  * @b EXPERIMENTAL: this structure may change, or be removed, without prior notice
6014  *
6015  * A structure used to configure the ethdev congestion management.
6016  */
6017 struct rte_eth_cman_config {
6018 	/** Congestion management object */
6019 	enum rte_eth_cman_obj obj;
6020 	/** Congestion management mode */
6021 	enum rte_cman_mode mode;
6022 	union {
6023 		/**
6024 		 * Rx queue to configure congestion management.
6025 		 *
6026 		 * Valid when object is RTE_ETH_CMAN_OBJ_RX_QUEUE or
6027 		 * RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL.
6028 		 */
6029 		uint16_t rx_queue;
6030 		/**
6031 		 * Reserved for future fields.
6032 		 * It must be set to 0 when rte_eth_cman_config_set() is invoked
6033 		 * and will be returned as 0 when rte_eth_cman_config_get() is
6034 		 * invoked.
6035 		 */
6036 		uint8_t rsvd_obj_params[4];
6037 	} obj_param;
6038 	union {
6039 		/**
6040 		 * RED configuration parameters.
6041 		 *
6042 		 * Valid when mode is RTE_CMAN_RED.
6043 		 */
6044 		struct rte_cman_red_params red;
6045 		/**
6046 		 * Reserved for future fields.
6047 		 * It must be set to 0 when rte_eth_cman_config_set() is invoked
6048 		 * and will be returned as 0 when rte_eth_cman_config_get() is
6049 		 * invoked.
6050 		 */
6051 		uint8_t rsvd_mode_params[4];
6052 	} mode_param;
6053 };
6054 
6055 /**
6056  * @warning
6057  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
6058  *
6059  * Retrieve the information for ethdev congestion management
6060  *
6061  * @param port_id
6062  *   The port identifier of the Ethernet device.
6063  * @param info
6064  *   A pointer to a structure of type *rte_eth_cman_info* to be filled with
6065  *   the information about congestion management.
6066  * @return
6067  *   - (0) if successful.
6068  *   - (-ENOTSUP) if support for cman_info_get does not exist.
6069  *   - (-ENODEV) if *port_id* invalid.
6070  *   - (-EINVAL) if bad parameter.
6071  */
6072 __rte_experimental
6073 int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
6074 
6075 /**
6076  * @warning
6077  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
6078  *
6079  * Initialize the ethdev congestion management configuration structure with default values.
6080  *
6081  * @param port_id
6082  *   The port identifier of the Ethernet device.
6083  * @param config
6084  *   A pointer to a structure of type *rte_eth_cman_config* to be initialized
6085  *   with default value.
6086  * @return
6087  *   - (0) if successful.
6088  *   - (-ENOTSUP) if support for cman_config_init does not exist.
6089  *   - (-ENODEV) if *port_id* invalid.
6090  *   - (-EINVAL) if bad parameter.
6091  */
6092 __rte_experimental
6093 int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
6094 
6095 /**
6096  * @warning
6097  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
6098  *
6099  * Configure ethdev congestion management
6100  *
6101  * @param port_id
6102  *   The port identifier of the Ethernet device.
6103  * @param config
6104  *   A pointer to a structure of type *rte_eth_cman_config* to be configured.
6105  * @return
6106  *   - (0) if successful.
6107  *   - (-ENOTSUP) if support for cman_config_set does not exist.
6108  *   - (-ENODEV) if *port_id* invalid.
6109  *   - (-EINVAL) if bad parameter.
6110  */
6111 __rte_experimental
6112 int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
6113 
6114 /**
6115  * @warning
6116  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
6117  *
6118  * Retrieve the applied ethdev congestion management parameters for the given port.
6119  *
6120  * @param port_id
6121  *   The port identifier of the Ethernet device.
6122  * @param config
6123  *   A pointer to a structure of type *rte_eth_cman_config* to retrieve
6124  *   congestion management parameters for the given object.
6125  *   Application must fill all parameters except mode_param parameter in
6126  *   struct rte_eth_cman_config.
6127  *
6128  * @return
6129  *   - (0) if successful.
6130  *   - (-ENOTSUP) if support for cman_config_get does not exist.
6131  *   - (-ENODEV) if *port_id* invalid.
6132  *   - (-EINVAL) if bad parameter.
6133  */
6134 __rte_experimental
6135 int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
6136 
6137 #ifdef __cplusplus
6138 }
6139 #endif
6140 
6141 #include <rte_ethdev_core.h>
6142 
6143 #ifdef __cplusplus
6144 extern "C" {
6145 #endif
6146 
6147 /**
6148  * @internal
6149  * Helper routine for rte_eth_rx_burst().
6150  * Should be called at exit from PMD's rte_eth_rx_bulk implementation.
6151  * Does necessary post-processing - invokes Rx callbacks if any, etc.
6152  *
6153  * @param port_id
6154  *  The port identifier of the Ethernet device.
6155  * @param queue_id
6156  *  The index of the receive queue from which to retrieve input packets.
6157  * @param rx_pkts
6158  *   The address of an array of pointers to *rte_mbuf* structures that
6159  *   have been retrieved from the device.
6160  * @param nb_rx
6161  *   The number of packets that were retrieved from the device.
6162  * @param nb_pkts
6163  *   The number of elements in @p rx_pkts array.
6164  * @param opaque
6165  *   Opaque pointer of Rx queue callback related data.
6166  *
6167  * @return
6168  *  The number of packets effectively supplied to the @p rx_pkts array.
6169  */
6170 uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
6171 		struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
6172 		void *opaque);
6173 
6174 /**
6175  *
6176  * Retrieve a burst of input packets from a receive queue of an Ethernet
6177  * device. The retrieved packets are stored in *rte_mbuf* structures whose
6178  * pointers are supplied in the *rx_pkts* array.
6179  *
6180  * The rte_eth_rx_burst() function loops, parsing the Rx ring of the
6181  * receive queue, up to *nb_pkts* packets, and for each completed Rx
6182  * descriptor in the ring, it performs the following operations:
6183  *
6184  * - Initialize the *rte_mbuf* data structure associated with the
6185  *   Rx descriptor according to the information provided by the NIC into
6186  *   that Rx descriptor.
6187  *
6188  * - Store the *rte_mbuf* data structure into the next entry of the
6189  *   *rx_pkts* array.
6190  *
6191  * - Replenish the Rx descriptor with a new *rte_mbuf* buffer
6192  *   allocated from the memory pool associated with the receive queue at
6193  *   initialization time.
6194  *
6195  * When retrieving an input packet that was scattered by the controller
6196  * into multiple receive descriptors, the rte_eth_rx_burst() function
6197  * appends the associated *rte_mbuf* buffers to the first buffer of the
6198  * packet.
6199  *
6200  * The rte_eth_rx_burst() function returns the number of packets
6201  * actually retrieved, which is the number of *rte_mbuf* data structures
6202  * effectively supplied into the *rx_pkts* array.
6203  * A return value equal to *nb_pkts* indicates that the Rx queue contained
6204  * at least *rx_pkts* packets, and this is likely to signify that other
6205  * received packets remain in the input queue. Applications implementing
6206  * a "retrieve as much received packets as possible" policy can check this
6207  * specific case and keep invoking the rte_eth_rx_burst() function until
6208  * a value less than *nb_pkts* is returned.
6209  *
6210  * This receive method has the following advantages:
6211  *
6212  * - It allows a run-to-completion network stack engine to retrieve and
6213  *   to immediately process received packets in a fast burst-oriented
6214  *   approach, avoiding the overhead of unnecessary intermediate packet
6215  *   queue/dequeue operations.
6216  *
6217  * - Conversely, it also allows an asynchronous-oriented processing
6218  *   method to retrieve bursts of received packets and to immediately
6219  *   queue them for further parallel processing by another logical core,
6220  *   for instance. However, instead of having received packets being
6221  *   individually queued by the driver, this approach allows the caller
6222  *   of the rte_eth_rx_burst() function to queue a burst of retrieved
6223  *   packets at a time and therefore dramatically reduce the cost of
6224  *   enqueue/dequeue operations per packet.
6225  *
6226  * - It allows the rte_eth_rx_burst() function of the driver to take
6227  *   advantage of burst-oriented hardware features (CPU cache,
6228  *   prefetch instructions, and so on) to minimize the number of CPU
6229  *   cycles per packet.
6230  *
6231  * To summarize, the proposed receive API enables many
6232  * burst-oriented optimizations in both synchronous and asynchronous
6233  * packet processing environments with no overhead in both cases.
6234  *
6235  * @note
6236  *   Some drivers using vector instructions require that *nb_pkts* is
6237  *   divisible by 4 or 8, depending on the driver implementation.
6238  *
6239  * The rte_eth_rx_burst() function does not provide any error
6240  * notification to avoid the corresponding overhead. As a hint, the
6241  * upper-level application might check the status of the device link once
6242  * being systematically returned a 0 value for a given number of tries.
6243  *
6244  * @param port_id
6245  *   The port identifier of the Ethernet device.
6246  * @param queue_id
6247  *   The index of the receive queue from which to retrieve input packets.
6248  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
6249  *   to rte_eth_dev_configure().
6250  * @param rx_pkts
6251  *   The address of an array of pointers to *rte_mbuf* structures that
6252  *   must be large enough to store *nb_pkts* pointers in it.
6253  * @param nb_pkts
6254  *   The maximum number of packets to retrieve.
6255  *   The value must be divisible by 8 in order to work with any driver.
6256  * @return
6257  *   The number of packets actually retrieved, which is the number
6258  *   of pointers to *rte_mbuf* structures effectively supplied to the
6259  *   *rx_pkts* array.
6260  */
6261 static inline uint16_t
6262 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
6263 		 struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
6264 {
6265 	uint16_t nb_rx;
6266 	struct rte_eth_fp_ops *p;
6267 	void *qd;
6268 
6269 #ifdef RTE_ETHDEV_DEBUG_RX
6270 	if (port_id >= RTE_MAX_ETHPORTS ||
6271 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6272 		RTE_ETHDEV_LOG_LINE(ERR,
6273 			"Invalid port_id=%u or queue_id=%u",
6274 			port_id, queue_id);
6275 		return 0;
6276 	}
6277 #endif
6278 
6279 	/* fetch pointer to queue data */
6280 	p = &rte_eth_fp_ops[port_id];
6281 	qd = p->rxq.data[queue_id];
6282 
6283 #ifdef RTE_ETHDEV_DEBUG_RX
6284 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6285 
6286 	if (qd == NULL) {
6287 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
6288 			queue_id, port_id);
6289 		return 0;
6290 	}
6291 #endif
6292 
6293 	nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
6294 
6295 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6296 	{
6297 		void *cb;
6298 
6299 		/* rte_memory_order_release memory order was used when the
6300 		 * call back was inserted into the list.
6301 		 * Since there is a clear dependency between loading
6302 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6303 		 * not required.
6304 		 */
6305 		cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id],
6306 				rte_memory_order_relaxed);
6307 		if (unlikely(cb != NULL))
6308 			nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
6309 					rx_pkts, nb_rx, nb_pkts, cb);
6310 	}
6311 #endif
6312 
6313 	if (unlikely(nb_rx))
6314 		rte_ethdev_trace_rx_burst_nonempty(port_id, queue_id, (void **)rx_pkts, nb_rx);
6315 	else
6316 		rte_ethdev_trace_rx_burst_empty(port_id, queue_id, (void **)rx_pkts);
6317 	return nb_rx;
6318 }
6319 
6320 /**
6321  * Get the number of used descriptors of a Rx queue
6322  *
6323  * Since it's a dataplane function, no check is performed on port_id and
6324  * queue_id. The caller must therefore ensure that the port is enabled
6325  * and the queue is configured and running.
6326  *
6327  * @param port_id
6328  *  The port identifier of the Ethernet device.
6329  * @param queue_id
6330  *  The queue ID on the specific port.
6331  * @return
6332  *  The number of used descriptors in the specific queue, or:
6333  *   - (-ENODEV) if *port_id* is invalid.
6334  *   - (-EINVAL) if *queue_id* is invalid
6335  *   - (-ENOTSUP) if the device does not support this function
6336  */
6337 static inline int
6338 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
6339 {
6340 	struct rte_eth_fp_ops *p;
6341 	void *qd;
6342 
6343 #ifdef RTE_ETHDEV_DEBUG_RX
6344 	if (port_id >= RTE_MAX_ETHPORTS ||
6345 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6346 		RTE_ETHDEV_LOG_LINE(ERR,
6347 			"Invalid port_id=%u or queue_id=%u",
6348 			port_id, queue_id);
6349 		return -EINVAL;
6350 	}
6351 #endif
6352 
6353 	/* fetch pointer to queue data */
6354 	p = &rte_eth_fp_ops[port_id];
6355 	qd = p->rxq.data[queue_id];
6356 
6357 #ifdef RTE_ETHDEV_DEBUG_RX
6358 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6359 	if (qd == NULL)
6360 		return -EINVAL;
6361 #endif
6362 
6363 	if (*p->rx_queue_count == NULL)
6364 		return -ENOTSUP;
6365 	return (int)(*p->rx_queue_count)(qd);
6366 }
6367 
6368 /**@{@name Rx hardware descriptor states
6369  * @see rte_eth_rx_descriptor_status
6370  */
6371 #define RTE_ETH_RX_DESC_AVAIL    0 /**< Desc available for hw. */
6372 #define RTE_ETH_RX_DESC_DONE     1 /**< Desc done, filled by hw. */
6373 #define RTE_ETH_RX_DESC_UNAVAIL  2 /**< Desc used by driver or hw. */
6374 /**@}*/
6375 
6376 /**
6377  * Check the status of a Rx descriptor in the queue
6378  *
6379  * It should be called in a similar context than the Rx function:
6380  * - on a dataplane core
6381  * - not concurrently on the same queue
6382  *
6383  * Since it's a dataplane function, no check is performed on port_id and
6384  * queue_id. The caller must therefore ensure that the port is enabled
6385  * and the queue is configured and running.
6386  *
6387  * Note: accessing to a random descriptor in the ring may trigger cache
6388  * misses and have a performance impact.
6389  *
6390  * @param port_id
6391  *  A valid port identifier of the Ethernet device which.
6392  * @param queue_id
6393  *  A valid Rx queue identifier on this port.
6394  * @param offset
6395  *  The offset of the descriptor starting from tail (0 is the next
6396  *  packet to be received by the driver).
6397  *
6398  * @return
6399  *  - (RTE_ETH_RX_DESC_AVAIL): Descriptor is available for the hardware to
6400  *    receive a packet.
6401  *  - (RTE_ETH_RX_DESC_DONE): Descriptor is done, it is filled by hw, but
6402  *    not yet processed by the driver (i.e. in the receive queue).
6403  *  - (RTE_ETH_RX_DESC_UNAVAIL): Descriptor is unavailable, either hold by
6404  *    the driver and not yet returned to hw, or reserved by the hw.
6405  *  - (-EINVAL) bad descriptor offset.
6406  *  - (-ENOTSUP) if the device does not support this function.
6407  *  - (-ENODEV) bad port or queue (only if compiled with debug).
6408  */
6409 static inline int
6410 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6411 	uint16_t offset)
6412 {
6413 	struct rte_eth_fp_ops *p;
6414 	void *qd;
6415 
6416 #ifdef RTE_ETHDEV_DEBUG_RX
6417 	if (port_id >= RTE_MAX_ETHPORTS ||
6418 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6419 		RTE_ETHDEV_LOG_LINE(ERR,
6420 			"Invalid port_id=%u or queue_id=%u",
6421 			port_id, queue_id);
6422 		return -EINVAL;
6423 	}
6424 #endif
6425 
6426 	/* fetch pointer to queue data */
6427 	p = &rte_eth_fp_ops[port_id];
6428 	qd = p->rxq.data[queue_id];
6429 
6430 #ifdef RTE_ETHDEV_DEBUG_RX
6431 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6432 	if (qd == NULL)
6433 		return -ENODEV;
6434 #endif
6435 	if (*p->rx_descriptor_status == NULL)
6436 		return -ENOTSUP;
6437 	return (*p->rx_descriptor_status)(qd, offset);
6438 }
6439 
6440 /**@{@name Tx hardware descriptor states
6441  * @see rte_eth_tx_descriptor_status
6442  */
6443 #define RTE_ETH_TX_DESC_FULL    0 /**< Desc filled for hw, waiting xmit. */
6444 #define RTE_ETH_TX_DESC_DONE    1 /**< Desc done, packet is transmitted. */
6445 #define RTE_ETH_TX_DESC_UNAVAIL 2 /**< Desc used by driver or hw. */
6446 /**@}*/
6447 
6448 /**
6449  * Check the status of a Tx descriptor in the queue.
6450  *
6451  * It should be called in a similar context than the Tx function:
6452  * - on a dataplane core
6453  * - not concurrently on the same queue
6454  *
6455  * Since it's a dataplane function, no check is performed on port_id and
6456  * queue_id. The caller must therefore ensure that the port is enabled
6457  * and the queue is configured and running.
6458  *
6459  * Note: accessing to a random descriptor in the ring may trigger cache
6460  * misses and have a performance impact.
6461  *
6462  * @param port_id
6463  *  A valid port identifier of the Ethernet device which.
6464  * @param queue_id
6465  *  A valid Tx queue identifier on this port.
6466  * @param offset
6467  *  The offset of the descriptor starting from tail (0 is the place where
6468  *  the next packet will be send).
6469  *
6470  * @return
6471  *  - (RTE_ETH_TX_DESC_FULL) Descriptor is being processed by the hw, i.e.
6472  *    in the transmit queue.
6473  *  - (RTE_ETH_TX_DESC_DONE) Hardware is done with this descriptor, it can
6474  *    be reused by the driver.
6475  *  - (RTE_ETH_TX_DESC_UNAVAIL): Descriptor is unavailable, reserved by the
6476  *    driver or the hardware.
6477  *  - (-EINVAL) bad descriptor offset.
6478  *  - (-ENOTSUP) if the device does not support this function.
6479  *  - (-ENODEV) bad port or queue (only if compiled with debug).
6480  */
6481 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6482 	uint16_t queue_id, uint16_t offset)
6483 {
6484 	struct rte_eth_fp_ops *p;
6485 	void *qd;
6486 
6487 #ifdef RTE_ETHDEV_DEBUG_TX
6488 	if (port_id >= RTE_MAX_ETHPORTS ||
6489 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6490 		RTE_ETHDEV_LOG_LINE(ERR,
6491 			"Invalid port_id=%u or queue_id=%u",
6492 			port_id, queue_id);
6493 		return -EINVAL;
6494 	}
6495 #endif
6496 
6497 	/* fetch pointer to queue data */
6498 	p = &rte_eth_fp_ops[port_id];
6499 	qd = p->txq.data[queue_id];
6500 
6501 #ifdef RTE_ETHDEV_DEBUG_TX
6502 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6503 	if (qd == NULL)
6504 		return -ENODEV;
6505 #endif
6506 	if (*p->tx_descriptor_status == NULL)
6507 		return -ENOTSUP;
6508 	return (*p->tx_descriptor_status)(qd, offset);
6509 }
6510 
6511 /**
6512  * @internal
6513  * Helper routine for rte_eth_tx_burst().
6514  * Should be called before entry PMD's rte_eth_tx_bulk implementation.
6515  * Does necessary pre-processing - invokes Tx callbacks if any, etc.
6516  *
6517  * @param port_id
6518  *   The port identifier of the Ethernet device.
6519  * @param queue_id
6520  *   The index of the transmit queue through which output packets must be
6521  *   sent.
6522  * @param tx_pkts
6523  *   The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
6524  *   which contain the output packets.
6525  * @param nb_pkts
6526  *   The maximum number of packets to transmit.
6527  * @return
6528  *   The number of output packets to transmit.
6529  */
6530 uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6531 	struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6532 
6533 /**
6534  * Send a burst of output packets on a transmit queue of an Ethernet device.
6535  *
6536  * The rte_eth_tx_burst() function is invoked to transmit output packets
6537  * on the output queue *queue_id* of the Ethernet device designated by its
6538  * *port_id*.
6539  * The *nb_pkts* parameter is the number of packets to send which are
6540  * supplied in the *tx_pkts* array of *rte_mbuf* structures, each of them
6541  * allocated from a pool created with rte_pktmbuf_pool_create().
6542  * The rte_eth_tx_burst() function loops, sending *nb_pkts* packets,
6543  * up to the number of transmit descriptors available in the Tx ring of the
6544  * transmit queue.
6545  * For each packet to send, the rte_eth_tx_burst() function performs
6546  * the following operations:
6547  *
6548  * - Pick up the next available descriptor in the transmit ring.
6549  *
6550  * - Free the network buffer previously sent with that descriptor, if any.
6551  *
6552  * - Initialize the transmit descriptor with the information provided
6553  *   in the *rte_mbuf data structure.
6554  *
6555  * In the case of a segmented packet composed of a list of *rte_mbuf* buffers,
6556  * the rte_eth_tx_burst() function uses several transmit descriptors
6557  * of the ring.
6558  *
6559  * The rte_eth_tx_burst() function returns the number of packets it
6560  * actually sent. A return value equal to *nb_pkts* means that all packets
6561  * have been sent, and this is likely to signify that other output packets
6562  * could be immediately transmitted again. Applications that implement a
6563  * "send as many packets to transmit as possible" policy can check this
6564  * specific case and keep invoking the rte_eth_tx_burst() function until
6565  * a value less than *nb_pkts* is returned.
6566  *
6567  * It is the responsibility of the rte_eth_tx_burst() function to
6568  * transparently free the memory buffers of packets previously sent.
6569  * This feature is driven by the *tx_free_thresh* value supplied to the
6570  * rte_eth_dev_configure() function at device configuration time.
6571  * When the number of free Tx descriptors drops below this threshold, the
6572  * rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf*  buffers
6573  * of those packets whose transmission was effectively completed.
6574  *
6575  * If the PMD is RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
6576  * invoke this function concurrently on the same Tx queue without SW lock.
6577  * @see rte_eth_dev_info_get, struct rte_eth_txconf::offloads
6578  *
6579  * @see rte_eth_tx_prepare to perform some prior checks or adjustments
6580  * for offloads.
6581  *
6582  * @note This function must not modify mbufs (including packets data)
6583  * unless the refcnt is 1.
6584  * An exception is the bonding PMD, which does not have "Tx prepare" support,
6585  * in this case, mbufs may be modified.
6586  *
6587  * @param port_id
6588  *   The port identifier of the Ethernet device.
6589  * @param queue_id
6590  *   The index of the transmit queue through which output packets must be
6591  *   sent.
6592  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
6593  *   to rte_eth_dev_configure().
6594  * @param tx_pkts
6595  *   The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
6596  *   which contain the output packets.
6597  * @param nb_pkts
6598  *   The maximum number of packets to transmit.
6599  * @return
6600  *   The number of output packets actually stored in transmit descriptors of
6601  *   the transmit ring. The return value can be less than the value of the
6602  *   *tx_pkts* parameter when the transmit ring is full or has been filled up.
6603  */
6604 static inline uint16_t
6605 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6606 		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6607 {
6608 	struct rte_eth_fp_ops *p;
6609 	void *qd;
6610 
6611 #ifdef RTE_ETHDEV_DEBUG_TX
6612 	if (port_id >= RTE_MAX_ETHPORTS ||
6613 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6614 		RTE_ETHDEV_LOG_LINE(ERR,
6615 			"Invalid port_id=%u or queue_id=%u",
6616 			port_id, queue_id);
6617 		return 0;
6618 	}
6619 #endif
6620 
6621 	/* fetch pointer to queue data */
6622 	p = &rte_eth_fp_ops[port_id];
6623 	qd = p->txq.data[queue_id];
6624 
6625 #ifdef RTE_ETHDEV_DEBUG_TX
6626 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6627 
6628 	if (qd == NULL) {
6629 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6630 			queue_id, port_id);
6631 		return 0;
6632 	}
6633 #endif
6634 
6635 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6636 	{
6637 		void *cb;
6638 
6639 		/* rte_memory_order_release memory order was used when the
6640 		 * call back was inserted into the list.
6641 		 * Since there is a clear dependency between loading
6642 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6643 		 * not required.
6644 		 */
6645 		cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id],
6646 				rte_memory_order_relaxed);
6647 		if (unlikely(cb != NULL))
6648 			nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6649 					tx_pkts, nb_pkts, cb);
6650 	}
6651 #endif
6652 
6653 	nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6654 
6655 	rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6656 	return nb_pkts;
6657 }
6658 
6659 /**
6660  * Process a burst of output packets on a transmit queue of an Ethernet device.
6661  *
6662  * The rte_eth_tx_prepare() function is invoked to prepare output packets to be
6663  * transmitted on the output queue *queue_id* of the Ethernet device designated
6664  * by its *port_id*.
6665  * The *nb_pkts* parameter is the number of packets to be prepared which are
6666  * supplied in the *tx_pkts* array of *rte_mbuf* structures, each of them
6667  * allocated from a pool created with rte_pktmbuf_pool_create().
6668  * For each packet to send, the rte_eth_tx_prepare() function performs
6669  * the following operations:
6670  *
6671  * - Check if packet meets devices requirements for Tx offloads.
6672  *
6673  * - Check limitations about number of segments.
6674  *
6675  * - Check additional requirements when debug is enabled.
6676  *
6677  * - Update and/or reset required checksums when Tx offload is set for packet.
6678  *
6679  * Since this function can modify packet data, provided mbufs must be safely
6680  * writable (e.g. modified data cannot be in shared segment).
6681  *
6682  * The rte_eth_tx_prepare() function returns the number of packets ready to be
6683  * sent. A return value equal to *nb_pkts* means that all packets are valid and
6684  * ready to be sent, otherwise stops processing on the first invalid packet and
6685  * leaves the rest packets untouched.
6686  *
6687  * When this functionality is not implemented in the driver, all packets are
6688  * are returned untouched.
6689  *
6690  * @param port_id
6691  *   The port identifier of the Ethernet device.
6692  *   The value must be a valid port ID.
6693  * @param queue_id
6694  *   The index of the transmit queue through which output packets must be
6695  *   sent.
6696  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
6697  *   to rte_eth_dev_configure().
6698  * @param tx_pkts
6699  *   The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
6700  *   which contain the output packets.
6701  * @param nb_pkts
6702  *   The maximum number of packets to process.
6703  * @return
6704  *   The number of packets correct and ready to be sent. The return value can be
6705  *   less than the value of the *tx_pkts* parameter when some packet doesn't
6706  *   meet devices requirements with rte_errno set appropriately:
6707  *   - EINVAL: offload flags are not correctly set
6708  *   - ENOTSUP: the offload feature is not supported by the hardware
6709  *   - ENODEV: if *port_id* is invalid (with debug enabled only)
6710  */
6711 
6712 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6713 
6714 static inline uint16_t
6715 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6716 		struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6717 {
6718 	struct rte_eth_fp_ops *p;
6719 	void *qd;
6720 
6721 #ifdef RTE_ETHDEV_DEBUG_TX
6722 	if (port_id >= RTE_MAX_ETHPORTS ||
6723 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6724 		RTE_ETHDEV_LOG_LINE(ERR,
6725 			"Invalid port_id=%u or queue_id=%u",
6726 			port_id, queue_id);
6727 		rte_errno = ENODEV;
6728 		return 0;
6729 	}
6730 #endif
6731 
6732 	/* fetch pointer to queue data */
6733 	p = &rte_eth_fp_ops[port_id];
6734 	qd = p->txq.data[queue_id];
6735 
6736 #ifdef RTE_ETHDEV_DEBUG_TX
6737 	if (!rte_eth_dev_is_valid_port(port_id)) {
6738 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx port_id=%u", port_id);
6739 		rte_errno = ENODEV;
6740 		return 0;
6741 	}
6742 	if (qd == NULL) {
6743 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6744 			queue_id, port_id);
6745 		rte_errno = EINVAL;
6746 		return 0;
6747 	}
6748 #endif
6749 
6750 	if (!p->tx_pkt_prepare)
6751 		return nb_pkts;
6752 
6753 	return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6754 }
6755 
6756 #else
6757 
6758 /*
6759  * Native NOOP operation for compilation targets which doesn't require any
6760  * preparations steps, and functional NOOP may introduce unnecessary performance
6761  * drop.
6762  *
6763  * Generally this is not a good idea to turn it on globally and didn't should
6764  * be used if behavior of tx_preparation can change.
6765  */
6766 
6767 static inline uint16_t
6768 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6769 		__rte_unused uint16_t queue_id,
6770 		__rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6771 {
6772 	return nb_pkts;
6773 }
6774 
6775 #endif
6776 
6777 /**
6778  * Send any packets queued up for transmission on a port and HW queue
6779  *
6780  * This causes an explicit flush of packets previously buffered via the
6781  * rte_eth_tx_buffer() function. It returns the number of packets successfully
6782  * sent to the NIC, and calls the error callback for any unsent packets. Unless
6783  * explicitly set up otherwise, the default callback simply frees the unsent
6784  * packets back to the owning mempool.
6785  *
6786  * @param port_id
6787  *   The port identifier of the Ethernet device.
6788  * @param queue_id
6789  *   The index of the transmit queue through which output packets must be
6790  *   sent.
6791  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
6792  *   to rte_eth_dev_configure().
6793  * @param buffer
6794  *   Buffer of packets to be transmit.
6795  * @return
6796  *   The number of packets successfully sent to the Ethernet device. The error
6797  *   callback is called for any packets which could not be sent.
6798  */
6799 static inline uint16_t
6800 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6801 		struct rte_eth_dev_tx_buffer *buffer)
6802 {
6803 	uint16_t sent;
6804 	uint16_t to_send = buffer->length;
6805 
6806 	if (to_send == 0)
6807 		return 0;
6808 
6809 	sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6810 
6811 	buffer->length = 0;
6812 
6813 	/* All packets sent, or to be dealt with by callback below */
6814 	if (unlikely(sent != to_send))
6815 		buffer->error_callback(&buffer->pkts[sent],
6816 				       (uint16_t)(to_send - sent),
6817 				       buffer->error_userdata);
6818 
6819 	return sent;
6820 }
6821 
6822 /**
6823  * Buffer a single packet for future transmission on a port and queue
6824  *
6825  * This function takes a single mbuf/packet and buffers it for later
6826  * transmission on the particular port and queue specified. Once the buffer is
6827  * full of packets, an attempt will be made to transmit all the buffered
6828  * packets. In case of error, where not all packets can be transmitted, a
6829  * callback is called with the unsent packets as a parameter. If no callback
6830  * is explicitly set up, the unsent packets are just freed back to the owning
6831  * mempool. The function returns the number of packets actually sent i.e.
6832  * 0 if no buffer flush occurred, otherwise the number of packets successfully
6833  * flushed
6834  *
6835  * @param port_id
6836  *   The port identifier of the Ethernet device.
6837  * @param queue_id
6838  *   The index of the transmit queue through which output packets must be
6839  *   sent.
6840  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
6841  *   to rte_eth_dev_configure().
6842  * @param buffer
6843  *   Buffer used to collect packets to be sent.
6844  * @param tx_pkt
6845  *   Pointer to the packet mbuf to be sent.
6846  * @return
6847  *   0 = packet has been buffered for later transmission
6848  *   N > 0 = packet has been buffered, and the buffer was subsequently flushed,
6849  *     causing N packets to be sent, and the error callback to be called for
6850  *     the rest.
6851  */
6852 static __rte_always_inline uint16_t
6853 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6854 		struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6855 {
6856 	buffer->pkts[buffer->length++] = tx_pkt;
6857 	if (buffer->length < buffer->size)
6858 		return 0;
6859 
6860 	return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6861 }
6862 
6863 /**
6864  * @warning
6865  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
6866  *
6867  * Recycle used mbufs from a transmit queue of an Ethernet device, and move
6868  * these mbufs into a mbuf ring for a receive queue of an Ethernet device.
6869  * This can bypass mempool path to save CPU cycles.
6870  *
6871  * The rte_eth_recycle_mbufs() function loops, with rte_eth_rx_burst() and
6872  * rte_eth_tx_burst() functions, freeing Tx used mbufs and replenishing Rx
6873  * descriptors. The number of recycling mbufs depends on the request of Rx mbuf
6874  * ring, with the constraint of enough used mbufs from Tx mbuf ring.
6875  *
6876  * For each recycling mbufs, the rte_eth_recycle_mbufs() function performs the
6877  * following operations:
6878  *
6879  * - Copy used *rte_mbuf* buffer pointers from Tx mbuf ring into Rx mbuf ring.
6880  *
6881  * - Replenish the Rx descriptors with the recycling *rte_mbuf* mbufs freed
6882  *   from the Tx mbuf ring.
6883  *
6884  * This function spilts Rx and Tx path with different callback functions. The
6885  * callback function recycle_tx_mbufs_reuse is for Tx driver. The callback
6886  * function recycle_rx_descriptors_refill is for Rx driver. rte_eth_recycle_mbufs()
6887  * can support the case that Rx Ethernet device is different from Tx Ethernet device.
6888  *
6889  * It is the responsibility of users to select the Rx/Tx queue pair to recycle
6890  * mbufs. Before call this function, users must call rte_eth_recycle_rxq_info_get
6891  * function to retrieve selected Rx queue information.
6892  * @see rte_eth_recycle_rxq_info_get, struct rte_eth_recycle_rxq_info
6893  *
6894  * Currently, the rte_eth_recycle_mbufs() function can support to feed 1 Rx queue from
6895  * 2 Tx queues in the same thread. Do not pair the Rx queue and Tx queue in different
6896  * threads, in order to avoid memory error rewriting.
6897  *
6898  * @param rx_port_id
6899  *   Port identifying the receive side.
6900  * @param rx_queue_id
6901  *   The index of the receive queue identifying the receive side.
6902  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
6903  *   to rte_eth_dev_configure().
6904  * @param tx_port_id
6905  *   Port identifying the transmit side.
6906  * @param tx_queue_id
6907  *   The index of the transmit queue identifying the transmit side.
6908  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
6909  *   to rte_eth_dev_configure().
6910  * @param recycle_rxq_info
6911  *   A pointer to a structure of type *rte_eth_recycle_rxq_info* which contains
6912  *   the information of the Rx queue mbuf ring.
6913  * @return
6914  *   The number of recycling mbufs.
6915  */
6916 __rte_experimental
6917 static inline uint16_t
6918 rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
6919 		uint16_t tx_port_id, uint16_t tx_queue_id,
6920 		struct rte_eth_recycle_rxq_info *recycle_rxq_info)
6921 {
6922 	struct rte_eth_fp_ops *p1, *p2;
6923 	void *qd1, *qd2;
6924 	uint16_t nb_mbufs;
6925 
6926 #ifdef RTE_ETHDEV_DEBUG_TX
6927 	if (tx_port_id >= RTE_MAX_ETHPORTS ||
6928 			tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6929 		RTE_ETHDEV_LOG_LINE(ERR,
6930 				"Invalid tx_port_id=%u or tx_queue_id=%u",
6931 				tx_port_id, tx_queue_id);
6932 		return 0;
6933 	}
6934 #endif
6935 
6936 	/* fetch pointer to Tx queue data */
6937 	p1 = &rte_eth_fp_ops[tx_port_id];
6938 	qd1 = p1->txq.data[tx_queue_id];
6939 
6940 #ifdef RTE_ETHDEV_DEBUG_TX
6941 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port_id, 0);
6942 
6943 	if (qd1 == NULL) {
6944 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6945 				tx_queue_id, tx_port_id);
6946 		return 0;
6947 	}
6948 #endif
6949 	if (p1->recycle_tx_mbufs_reuse == NULL)
6950 		return 0;
6951 
6952 #ifdef RTE_ETHDEV_DEBUG_RX
6953 	if (rx_port_id >= RTE_MAX_ETHPORTS ||
6954 			rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6955 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u",
6956 				rx_port_id, rx_queue_id);
6957 		return 0;
6958 	}
6959 #endif
6960 
6961 	/* fetch pointer to Rx queue data */
6962 	p2 = &rte_eth_fp_ops[rx_port_id];
6963 	qd2 = p2->rxq.data[rx_queue_id];
6964 
6965 #ifdef RTE_ETHDEV_DEBUG_RX
6966 	RTE_ETH_VALID_PORTID_OR_ERR_RET(rx_port_id, 0);
6967 
6968 	if (qd2 == NULL) {
6969 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
6970 				rx_queue_id, rx_port_id);
6971 		return 0;
6972 	}
6973 #endif
6974 	if (p2->recycle_rx_descriptors_refill == NULL)
6975 		return 0;
6976 
6977 	/* Copy used *rte_mbuf* buffer pointers from Tx mbuf ring
6978 	 * into Rx mbuf ring.
6979 	 */
6980 	nb_mbufs = p1->recycle_tx_mbufs_reuse(qd1, recycle_rxq_info);
6981 
6982 	/* If no recycling mbufs, return 0. */
6983 	if (nb_mbufs == 0)
6984 		return 0;
6985 
6986 	/* Replenish the Rx descriptors with the recycling
6987 	 * into Rx mbuf ring.
6988 	 */
6989 	p2->recycle_rx_descriptors_refill(qd2, nb_mbufs);
6990 
6991 	return nb_mbufs;
6992 }
6993 
6994 /**
6995  * @warning
6996  * @b EXPERIMENTAL: this API may change without prior notice
6997  *
6998  * Get supported header protocols to split on Rx.
6999  *
7000  * When a packet type is announced to be split,
7001  * it *must* be supported by the PMD.
7002  * For instance, if eth-ipv4, eth-ipv4-udp is announced,
7003  * the PMD must return the following packet types for these packets:
7004  * - Ether/IPv4             -> RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
7005  * - Ether/IPv4/UDP         -> RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP
7006  *
7007  * @param port_id
7008  *   The port identifier of the device.
7009  * @param[out] ptypes
7010  *   An array pointer to store supported protocol headers, allocated by caller.
7011  *   These ptypes are composed with RTE_PTYPE_*.
7012  * @param num
7013  *   Size of the array pointed by param ptypes.
7014  * @return
7015  *   - (>=0) Number of supported ptypes. If the number of types exceeds num,
7016  *           only num entries will be filled into the ptypes array,
7017  *           but the full count of supported ptypes will be returned.
7018  *   - (-ENOTSUP) if header protocol is not supported by device.
7019  *   - (-ENODEV) if *port_id* invalid.
7020  *   - (-EINVAL) if bad parameter.
7021  */
7022 __rte_experimental
7023 int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
7024 	__rte_warn_unused_result;
7025 
7026 /**
7027  * @warning
7028  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice.
7029  *
7030  * Get the number of used descriptors of a Tx queue.
7031  *
7032  * This function retrieves the number of used descriptors of a transmit queue.
7033  * Applications can use this API in the fast path to inspect Tx queue occupancy
7034  * and take appropriate actions based on the available free descriptors.
7035  * An example action could be implementing Random Early Discard (RED).
7036  *
7037  * Since it's a fast-path function, no check is performed on port_id and queue_id.
7038  * The caller must therefore ensure that the port is enabled
7039  * and the queue is configured and running.
7040  *
7041  * @param port_id
7042  *   The port identifier of the device.
7043  * @param queue_id
7044  *   The index of the transmit queue.
7045  *   The value must be in the range [0, nb_tx_queue - 1]
7046  *   previously supplied to rte_eth_dev_configure().
7047  * @return
7048  *   The number of used descriptors in the specific queue, or:
7049  *   - (-ENODEV) if *port_id* is invalid. Enabled only when RTE_ETHDEV_DEBUG_TX is enabled.
7050  *   - (-EINVAL) if *queue_id* is invalid. Enabled only when RTE_ETHDEV_DEBUG_TX is enabled.
7051  *   - (-ENOTSUP) if the device does not support this function.
7052  *
7053  * @note This function is designed for fast-path use.
7054  * @note There is no requirement to call this function before rte_eth_tx_burst() invocation.
7055  * @note Utilize this function exclusively when the caller needs to determine
7056  * the used queue count across all descriptors of a Tx queue.
7057  * If the use case only involves checking the status of a specific descriptor slot,
7058  * opt for rte_eth_tx_descriptor_status() instead.
7059  */
7060 __rte_experimental
7061 static inline int
7062 rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
7063 {
7064 	struct rte_eth_fp_ops *fops;
7065 	void *qd;
7066 	int rc;
7067 
7068 #ifdef RTE_ETHDEV_DEBUG_TX
7069 	if (port_id >= RTE_MAX_ETHPORTS || !rte_eth_dev_is_valid_port(port_id)) {
7070 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id);
7071 		rc = -ENODEV;
7072 		goto out;
7073 	}
7074 
7075 	if (queue_id >= RTE_MAX_QUEUES_PER_PORT) {
7076 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
7077 				    queue_id, port_id);
7078 		rc = -EINVAL;
7079 		goto out;
7080 	}
7081 #endif
7082 
7083 	/* Fetch pointer to Tx queue data */
7084 	fops = &rte_eth_fp_ops[port_id];
7085 	qd = fops->txq.data[queue_id];
7086 
7087 #ifdef RTE_ETHDEV_DEBUG_TX
7088 	if (qd == NULL) {
7089 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
7090 				    queue_id, port_id);
7091 		rc = -EINVAL;
7092 		goto out;
7093 	}
7094 #endif
7095 	if (fops->tx_queue_count == NULL) {
7096 		rc = -ENOTSUP;
7097 		goto out;
7098 	}
7099 
7100 	rc = fops->tx_queue_count(qd);
7101 
7102 out:
7103 	rte_eth_trace_tx_queue_count(port_id, queue_id, rc);
7104 	return rc;
7105 }
7106 
7107 #ifdef __cplusplus
7108 }
7109 #endif
7110 
7111 #endif /* _RTE_ETHDEV_H_ */
7112