xref: /dpdk/lib/ethdev/rte_ethdev.h (revision ae67f7d0256687fdfb24d27ee94b20d88c65108e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
8 /**
9  * @file
10  *
11  * RTE Ethernet Device API
12  *
13  * The Ethernet Device API is composed of two parts:
14  *
15  * - The application-oriented Ethernet API that includes functions to setup
16  *   an Ethernet device (configure it, setup its Rx and Tx queues and start it),
17  *   to get its MAC address, the speed and the status of its physical link,
18  *   to receive and to transmit packets, and so on.
19  *
20  * - The driver-oriented Ethernet API that exports functions allowing
21  *   an Ethernet Poll Mode Driver (PMD) to allocate an Ethernet device instance,
22  *   create memzone for HW rings and process registered callbacks, and so on.
23  *   PMDs should include ethdev_driver.h instead of this header.
24  *
25  * By default, all the functions of the Ethernet Device API exported by a PMD
26  * are lock-free functions which assume to not be invoked in parallel on
27  * different logical cores to work on the same target object.  For instance,
28  * the receive function of a PMD cannot be invoked in parallel on two logical
29  * cores to poll the same Rx queue [of the same port]. Of course, this function
30  * can be invoked in parallel by different logical cores on different Rx queues.
31  * It is the responsibility of the upper level application to enforce this rule.
32  *
33  * If needed, parallel accesses by multiple logical cores to shared queues
34  * shall be explicitly protected by dedicated inline lock-aware functions
35  * built on top of their corresponding lock-free functions of the PMD API.
36  *
37  * In all functions of the Ethernet API, the Ethernet device is
38  * designated by an integer >= 0 named the device port identifier.
39  *
40  * At the Ethernet driver level, Ethernet devices are represented by a generic
41  * data structure of type *rte_eth_dev*.
42  *
43  * Ethernet devices are dynamically registered during the PCI probing phase
44  * performed at EAL initialization time.
45  * When an Ethernet device is being probed, an *rte_eth_dev* structure and
46  * a new port identifier are allocated for that device. Then, the eth_dev_init()
47  * function supplied by the Ethernet driver matching the probed PCI
48  * device is invoked to properly initialize the device.
49  *
50  * The role of the device init function consists of resetting the hardware,
51  * checking access to Non-volatile Memory (NVM), reading the MAC address
52  * from NVM etc.
53  *
54  * If the device init operation is successful, the correspondence between
55  * the port identifier assigned to the new device and its associated
56  * *rte_eth_dev* structure is effectively registered.
57  * Otherwise, both the *rte_eth_dev* structure and the port identifier are
58  * freed.
59  *
60  * The functions exported by the application Ethernet API to setup a device
61  * designated by its port identifier must be invoked in the following order:
62  *     - rte_eth_dev_configure()
63  *     - rte_eth_tx_queue_setup()
64  *     - rte_eth_rx_queue_setup()
65  *     - rte_eth_dev_start()
66  *
67  * Then, the network application can invoke, in any order, the functions
68  * exported by the Ethernet API to get the MAC address of a given device, to
69  * get the speed and the status of a device physical link, to receive/transmit
70  * [burst of] packets, and so on.
71  *
72  * If the application wants to change the configuration (i.e. call
73  * rte_eth_dev_configure(), rte_eth_tx_queue_setup(), or
74  * rte_eth_rx_queue_setup()), it must call rte_eth_dev_stop() first to stop the
75  * device and then do the reconfiguration before calling rte_eth_dev_start()
76  * again. The transmit and receive functions should not be invoked when the
77  * device or the queue is stopped.
78  *
79  * Please note that some configuration is not stored between calls to
80  * rte_eth_dev_stop()/rte_eth_dev_start(). The following configuration will
81  * be retained:
82  *
83  *     - MTU
84  *     - flow control settings
85  *     - receive mode configuration (promiscuous mode, all-multicast mode,
86  *       hardware checksum mode, RSS/VMDq settings etc.)
87  *     - VLAN filtering configuration
88  *     - default MAC address
89  *     - MAC addresses supplied to MAC address array
90  *     - flow director filtering mode (but not filtering rules)
91  *     - NIC queue statistics mappings
92  *
93  * The following configuration may be retained or not
94  * depending on the device capabilities:
95  *
96  *     - flow rules
97  *     - flow-related shared objects, e.g. indirect actions
98  *
99  * Any other configuration will not be stored and will need to be re-entered
100  * before a call to rte_eth_dev_start().
101  *
102  * Finally, a network application can close an Ethernet device by invoking the
103  * rte_eth_dev_close() function.
104  *
105  * Each function of the application Ethernet API invokes a specific function
106  * of the PMD that controls the target device designated by its port
107  * identifier.
108  * For this purpose, all device-specific functions of an Ethernet driver are
109  * supplied through a set of pointers contained in a generic structure of type
110  * *eth_dev_ops*.
111  * The address of the *eth_dev_ops* structure is stored in the *rte_eth_dev*
112  * structure by the device init function of the Ethernet driver, which is
113  * invoked during the PCI probing phase, as explained earlier.
114  *
115  * In other words, each function of the Ethernet API simply retrieves the
116  * *rte_eth_dev* structure associated with the device port identifier and
117  * performs an indirect invocation of the corresponding driver function
118  * supplied in the *eth_dev_ops* structure of the *rte_eth_dev* structure.
119  *
120  * For performance reasons, the address of the burst-oriented Rx and Tx
121  * functions of the Ethernet driver are not contained in the *eth_dev_ops*
122  * structure. Instead, they are directly stored at the beginning of the
123  * *rte_eth_dev* structure to avoid an extra indirect memory access during
124  * their invocation.
125  *
126  * RTE Ethernet device drivers do not use interrupts for transmitting or
127  * receiving. Instead, Ethernet drivers export Poll-Mode receive and transmit
128  * functions to applications.
129  * Both receive and transmit functions are packet-burst oriented to minimize
130  * their cost per packet through the following optimizations:
131  *
132  * - Sharing among multiple packets the incompressible cost of the
133  *   invocation of receive/transmit functions.
134  *
135  * - Enabling receive/transmit functions to take advantage of burst-oriented
136  *   hardware features (L1 cache, prefetch instructions, NIC head/tail
137  *   registers) to minimize the number of CPU cycles per packet, for instance,
138  *   by avoiding useless read memory accesses to ring descriptors, or by
139  *   systematically using arrays of pointers that exactly fit L1 cache line
140  *   boundaries and sizes.
141  *
142  * The burst-oriented receive function does not provide any error notification,
143  * to avoid the corresponding overhead. As a hint, the upper-level application
144  * might check the status of the device link once being systematically returned
145  * a 0 value by the receive function of the driver for a given number of tries.
146  */
147 
148 #include <stdint.h>
149 
150 /* Use this macro to check if LRO API is supported */
151 #define RTE_ETHDEV_HAS_LRO_SUPPORT
152 
153 /* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
154 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
155 #define RTE_ETHDEV_DEBUG_RX
156 #define RTE_ETHDEV_DEBUG_TX
157 #endif
158 
159 #include <rte_cman.h>
160 #include <rte_compat.h>
161 #include <rte_log.h>
162 #include <rte_interrupts.h>
163 #include <rte_dev.h>
164 #include <rte_devargs.h>
165 #include <rte_bitops.h>
166 #include <rte_errno.h>
167 #include <rte_common.h>
168 #include <rte_config.h>
169 #include <rte_power_intrinsics.h>
170 
171 #include "rte_ethdev_trace_fp.h"
172 #include "rte_dev_info.h"
173 
174 extern int rte_eth_dev_logtype;
175 #define RTE_LOGTYPE_ETHDEV rte_eth_dev_logtype
176 
177 #define RTE_ETHDEV_LOG_LINE(level, ...) \
178 	RTE_LOG_LINE(level, ETHDEV, "" __VA_ARGS__)
179 
180 struct rte_mbuf;
181 
182 /**
183  * Initializes a device iterator.
184  *
185  * This iterator allows accessing a list of devices matching some devargs.
186  *
187  * @param iter
188  *   Device iterator handle initialized by the function.
189  *   The fields bus_str and cls_str might be dynamically allocated,
190  *   and could be freed by calling rte_eth_iterator_cleanup().
191  *
192  * @param devargs
193  *   Device description string.
194  *
195  * @return
196  *   0 on successful initialization, negative otherwise.
197  */
198 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
199 
200 /**
201  * Iterates on devices with devargs filter.
202  * The ownership is not checked.
203  *
204  * The next port ID is returned, and the iterator is updated.
205  *
206  * @param iter
207  *   Device iterator handle initialized by rte_eth_iterator_init().
208  *   Some fields bus_str and cls_str might be freed when no more port is found,
209  *   by calling rte_eth_iterator_cleanup().
210  *
211  * @return
212  *   A port ID if found, RTE_MAX_ETHPORTS otherwise.
213  */
214 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
215 
216 /**
217  * Free some allocated fields of the iterator.
218  *
219  * This function is automatically called by rte_eth_iterator_next()
220  * on the last iteration (i.e. when no more matching port is found).
221  *
222  * It is safe to call this function twice; it will do nothing more.
223  *
224  * @param iter
225  *   Device iterator handle initialized by rte_eth_iterator_init().
226  *   The fields bus_str and cls_str are freed if needed.
227  */
228 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
229 
230 /**
231  * Macro to iterate over all ethdev ports matching some devargs.
232  *
233  * If a break is done before the end of the loop,
234  * the function rte_eth_iterator_cleanup() must be called.
235  *
236  * @param id
237  *   Iterated port ID of type uint16_t.
238  * @param devargs
239  *   Device parameters input as string of type char*.
240  * @param iter
241  *   Iterator handle of type struct rte_dev_iterator, used internally.
242  */
243 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
244 	for (rte_eth_iterator_init(iter, devargs), \
245 	     id = rte_eth_iterator_next(iter); \
246 	     id != RTE_MAX_ETHPORTS; \
247 	     id = rte_eth_iterator_next(iter))
248 
249 /**
250  * A structure used to retrieve statistics for an Ethernet port.
251  * Not all statistics fields in struct rte_eth_stats are supported
252  * by any type of network interface card (NIC). If any statistics
253  * field is not supported, its value is 0.
254  * All byte-related statistics do not include Ethernet FCS regardless
255  * of whether these bytes have been delivered to the application
256  * (see RTE_ETH_RX_OFFLOAD_KEEP_CRC).
257  */
258 struct rte_eth_stats {
259 	uint64_t ipackets;  /**< Total number of successfully received packets. */
260 	uint64_t opackets;  /**< Total number of successfully transmitted packets.*/
261 	uint64_t ibytes;    /**< Total number of successfully received bytes. */
262 	uint64_t obytes;    /**< Total number of successfully transmitted bytes. */
263 	/**
264 	 * Total of Rx packets dropped by the HW,
265 	 * because there are no available buffer (i.e. Rx queues are full).
266 	 */
267 	uint64_t imissed;
268 	uint64_t ierrors;   /**< Total number of erroneous received packets. */
269 	uint64_t oerrors;   /**< Total number of failed transmitted packets. */
270 	uint64_t rx_nombuf; /**< Total number of Rx mbuf allocation failures. */
271 	/* Queue stats are limited to max 256 queues */
272 	/** Total number of queue Rx packets. */
273 	uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
274 	/** Total number of queue Tx packets. */
275 	uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
276 	/** Total number of successfully received queue bytes. */
277 	uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
278 	/** Total number of successfully transmitted queue bytes. */
279 	uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
280 	/** Total number of queue packets received that are dropped. */
281 	uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
282 };
283 
284 /**@{@name Link speed capabilities
285  * Device supported speeds bitmap flags
286  */
287 #define RTE_ETH_LINK_SPEED_AUTONEG 0             /**< Autonegotiate (all speeds) */
288 #define RTE_ETH_LINK_SPEED_FIXED   RTE_BIT32(0)  /**< Disable autoneg (fixed speed) */
289 #define RTE_ETH_LINK_SPEED_10M_HD  RTE_BIT32(1)  /**<  10 Mbps half-duplex */
290 #define RTE_ETH_LINK_SPEED_10M     RTE_BIT32(2)  /**<  10 Mbps full-duplex */
291 #define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)  /**< 100 Mbps half-duplex */
292 #define RTE_ETH_LINK_SPEED_100M    RTE_BIT32(4)  /**< 100 Mbps full-duplex */
293 #define RTE_ETH_LINK_SPEED_1G      RTE_BIT32(5)  /**<   1 Gbps */
294 #define RTE_ETH_LINK_SPEED_2_5G    RTE_BIT32(6)  /**< 2.5 Gbps */
295 #define RTE_ETH_LINK_SPEED_5G      RTE_BIT32(7)  /**<   5 Gbps */
296 #define RTE_ETH_LINK_SPEED_10G     RTE_BIT32(8)  /**<  10 Gbps */
297 #define RTE_ETH_LINK_SPEED_20G     RTE_BIT32(9)  /**<  20 Gbps */
298 #define RTE_ETH_LINK_SPEED_25G     RTE_BIT32(10) /**<  25 Gbps */
299 #define RTE_ETH_LINK_SPEED_40G     RTE_BIT32(11) /**<  40 Gbps */
300 #define RTE_ETH_LINK_SPEED_50G     RTE_BIT32(12) /**<  50 Gbps */
301 #define RTE_ETH_LINK_SPEED_56G     RTE_BIT32(13) /**<  56 Gbps */
302 #define RTE_ETH_LINK_SPEED_100G    RTE_BIT32(14) /**< 100 Gbps */
303 #define RTE_ETH_LINK_SPEED_200G    RTE_BIT32(15) /**< 200 Gbps */
304 #define RTE_ETH_LINK_SPEED_400G    RTE_BIT32(16) /**< 400 Gbps */
305 /**@}*/
306 
307 /**@{@name Link speed
308  * Ethernet numeric link speeds in Mbps
309  */
310 #define RTE_ETH_SPEED_NUM_NONE         0 /**< Not defined */
311 #define RTE_ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
312 #define RTE_ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
313 #define RTE_ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
314 #define RTE_ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
315 #define RTE_ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
316 #define RTE_ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
317 #define RTE_ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
318 #define RTE_ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
319 #define RTE_ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
320 #define RTE_ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
321 #define RTE_ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
322 #define RTE_ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
323 #define RTE_ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
324 #define RTE_ETH_SPEED_NUM_400G    400000 /**< 400 Gbps */
325 #define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
326 /**@}*/
327 
328 /**
329  * A structure used to retrieve link-level information of an Ethernet port.
330  */
331 struct rte_eth_link {
332 	union {
333 		RTE_ATOMIC(uint64_t) val64; /**< used for atomic64 read/write */
334 		__extension__
335 		struct {
336 			uint32_t link_speed;	    /**< RTE_ETH_SPEED_NUM_ */
337 			uint16_t link_duplex  : 1;  /**< RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
338 			uint16_t link_autoneg : 1;  /**< RTE_ETH_LINK_[AUTONEG/FIXED] */
339 			uint16_t link_status  : 1;  /**< RTE_ETH_LINK_[DOWN/UP] */
340 		};
341 	};
342 };
343 
344 /**@{@name Link negotiation
345  * Constants used in link management.
346  */
347 #define RTE_ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
348 #define RTE_ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
349 #define RTE_ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
350 #define RTE_ETH_LINK_UP          1 /**< Link is up (see link_status). */
351 #define RTE_ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
352 #define RTE_ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
353 #define RTE_ETH_LINK_MAX_STR_LEN 40 /**< Max length of default link string. */
354 /**@}*/
355 
356 /**
357  * A structure used to configure the ring threshold registers of an Rx/Tx
358  * queue for an Ethernet port.
359  */
360 struct rte_eth_thresh {
361 	uint8_t pthresh; /**< Ring prefetch threshold. */
362 	uint8_t hthresh; /**< Ring host threshold. */
363 	uint8_t wthresh; /**< Ring writeback threshold. */
364 };
365 
366 /**@{@name Multi-queue mode
367  * @see rte_eth_conf.rxmode.mq_mode.
368  */
369 #define RTE_ETH_MQ_RX_RSS_FLAG  RTE_BIT32(0) /**< Enable RSS. @see rte_eth_rss_conf */
370 #define RTE_ETH_MQ_RX_DCB_FLAG  RTE_BIT32(1) /**< Enable DCB. */
371 #define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2) /**< Enable VMDq. */
372 /**@}*/
373 
374 /**
375  *  A set of values to identify what method is to be used to route
376  *  packets to multiple queues.
377  */
378 enum rte_eth_rx_mq_mode {
379 	/** None of DCB, RSS or VMDq mode */
380 	RTE_ETH_MQ_RX_NONE = 0,
381 
382 	/** For Rx side, only RSS is on */
383 	RTE_ETH_MQ_RX_RSS = RTE_ETH_MQ_RX_RSS_FLAG,
384 	/** For Rx side,only DCB is on. */
385 	RTE_ETH_MQ_RX_DCB = RTE_ETH_MQ_RX_DCB_FLAG,
386 	/** Both DCB and RSS enable */
387 	RTE_ETH_MQ_RX_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
388 
389 	/** Only VMDq, no RSS nor DCB */
390 	RTE_ETH_MQ_RX_VMDQ_ONLY = RTE_ETH_MQ_RX_VMDQ_FLAG,
391 	/** RSS mode with VMDq */
392 	RTE_ETH_MQ_RX_VMDQ_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG,
393 	/** Use VMDq+DCB to route traffic to queues */
394 	RTE_ETH_MQ_RX_VMDQ_DCB = RTE_ETH_MQ_RX_VMDQ_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
395 	/** Enable both VMDq and DCB in VMDq */
396 	RTE_ETH_MQ_RX_VMDQ_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG |
397 				 RTE_ETH_MQ_RX_VMDQ_FLAG,
398 };
399 
400 /**
401  * A set of values to identify what method is to be used to transmit
402  * packets using multi-TCs.
403  */
404 enum rte_eth_tx_mq_mode {
405 	RTE_ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
406 	RTE_ETH_MQ_TX_DCB,          /**< For Tx side,only DCB is on. */
407 	RTE_ETH_MQ_TX_VMDQ_DCB,     /**< For Tx side,both DCB and VT is on. */
408 	RTE_ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
409 };
410 
411 /**
412  * A structure used to configure the Rx features of an Ethernet port.
413  */
414 struct rte_eth_rxmode {
415 	/** The multi-queue packet distribution mode to be used, e.g. RSS. */
416 	enum rte_eth_rx_mq_mode mq_mode;
417 	uint32_t mtu;  /**< Requested MTU. */
418 	/** Maximum allowed size of LRO aggregated packet. */
419 	uint32_t max_lro_pkt_size;
420 	/**
421 	 * Per-port Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
422 	 * Only offloads set on rx_offload_capa field on rte_eth_dev_info
423 	 * structure are allowed to be set.
424 	 */
425 	uint64_t offloads;
426 
427 	uint64_t reserved_64s[2]; /**< Reserved for future fields */
428 	void *reserved_ptrs[2];   /**< Reserved for future fields */
429 };
430 
431 /**
432  * VLAN types to indicate if it is for single VLAN, inner VLAN or outer VLAN.
433  * Note that single VLAN is treated the same as inner VLAN.
434  */
435 enum rte_vlan_type {
436 	RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
437 	RTE_ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
438 	RTE_ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
439 	RTE_ETH_VLAN_TYPE_MAX,
440 };
441 
442 /**
443  * A structure used to describe a VLAN filter.
444  * If the bit corresponding to a VID is set, such VID is on.
445  */
446 struct rte_vlan_filter_conf {
447 	uint64_t ids[64];
448 };
449 
450 /**
451  * Hash function types.
452  */
453 enum rte_eth_hash_function {
454 	/** DEFAULT means driver decides which hash algorithm to pick. */
455 	RTE_ETH_HASH_FUNCTION_DEFAULT = 0,
456 	RTE_ETH_HASH_FUNCTION_TOEPLITZ, /**< Toeplitz */
457 	RTE_ETH_HASH_FUNCTION_SIMPLE_XOR, /**< Simple XOR */
458 	/**
459 	 * Symmetric Toeplitz: src, dst will be replaced by
460 	 * xor(src, dst). For the case with src/dst only,
461 	 * src or dst address will xor with zero pair.
462 	 */
463 	RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ,
464 	/**
465 	 * Symmetric Toeplitz: L3 and L4 fields are sorted prior to
466 	 * the hash function.
467 	 *  If src_ip > dst_ip, swap src_ip and dst_ip.
468 	 *  If src_port > dst_port, swap src_port and dst_port.
469 	 */
470 	RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT,
471 	RTE_ETH_HASH_FUNCTION_MAX,
472 };
473 
474 #define RTE_ETH_HASH_ALGO_TO_CAPA(x) RTE_BIT32(x)
475 #define RTE_ETH_HASH_ALGO_CAPA_MASK(x) RTE_BIT32(RTE_ETH_HASH_FUNCTION_ ## x)
476 
477 /**
478  * A structure used to configure the Receive Side Scaling (RSS) feature
479  * of an Ethernet port.
480  */
481 struct rte_eth_rss_conf {
482 	/**
483 	 * In rte_eth_dev_rss_hash_conf_get(), the *rss_key_len* should be
484 	 * greater than or equal to the *hash_key_size* which get from
485 	 * rte_eth_dev_info_get() API. And the *rss_key* should contain at least
486 	 * *hash_key_size* bytes. If not meet these requirements, the query
487 	 * result is unreliable even if the operation returns success.
488 	 *
489 	 * In rte_eth_dev_rss_hash_update() or rte_eth_dev_configure(), if
490 	 * *rss_key* is not NULL, the *rss_key_len* indicates the length of the
491 	 * *rss_key* in bytes and it should be equal to *hash_key_size*.
492 	 * If *rss_key* is NULL, drivers are free to use a random or a default key.
493 	 */
494 	uint8_t *rss_key;
495 	uint8_t rss_key_len; /**< hash key length in bytes. */
496 	/**
497 	 * Indicates the type of packets or the specific part of packets to
498 	 * which RSS hashing is to be applied.
499 	 */
500 	uint64_t rss_hf;
501 	enum rte_eth_hash_function algorithm;	/**< Hash algorithm. */
502 };
503 
504 /*
505  * A packet can be identified by hardware as different flow types. Different
506  * NIC hardware may support different flow types.
507  * Basically, the NIC hardware identifies the flow type as deep protocol as
508  * possible, and exclusively. For example, if a packet is identified as
509  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
510  * though it is an actual IPV4 packet.
511  */
512 #define RTE_ETH_FLOW_UNKNOWN             0
513 #define RTE_ETH_FLOW_RAW                 1
514 #define RTE_ETH_FLOW_IPV4                2
515 #define RTE_ETH_FLOW_FRAG_IPV4           3
516 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP    4
517 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP    5
518 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP   6
519 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER  7
520 #define RTE_ETH_FLOW_IPV6                8
521 #define RTE_ETH_FLOW_FRAG_IPV6           9
522 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP   10
523 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP   11
524 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP  12
525 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
526 #define RTE_ETH_FLOW_L2_PAYLOAD         14
527 #define RTE_ETH_FLOW_IPV6_EX            15
528 #define RTE_ETH_FLOW_IPV6_TCP_EX        16
529 #define RTE_ETH_FLOW_IPV6_UDP_EX        17
530 /** Consider device port number as a flow differentiator */
531 #define RTE_ETH_FLOW_PORT               18
532 #define RTE_ETH_FLOW_VXLAN              19 /**< VXLAN protocol based flow */
533 #define RTE_ETH_FLOW_GENEVE             20 /**< GENEVE protocol based flow */
534 #define RTE_ETH_FLOW_NVGRE              21 /**< NVGRE protocol based flow */
535 #define RTE_ETH_FLOW_VXLAN_GPE          22 /**< VXLAN-GPE protocol based flow */
536 #define RTE_ETH_FLOW_GTPU               23 /**< GTPU protocol based flow */
537 #define RTE_ETH_FLOW_MAX                24
538 
539 /*
540  * Below macros are defined for RSS offload types, they can be used to
541  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
542  */
543 #define RTE_ETH_RSS_IPV4               RTE_BIT64(2)
544 #define RTE_ETH_RSS_FRAG_IPV4          RTE_BIT64(3)
545 #define RTE_ETH_RSS_NONFRAG_IPV4_TCP   RTE_BIT64(4)
546 #define RTE_ETH_RSS_NONFRAG_IPV4_UDP   RTE_BIT64(5)
547 #define RTE_ETH_RSS_NONFRAG_IPV4_SCTP  RTE_BIT64(6)
548 #define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
549 #define RTE_ETH_RSS_IPV6               RTE_BIT64(8)
550 #define RTE_ETH_RSS_FRAG_IPV6          RTE_BIT64(9)
551 #define RTE_ETH_RSS_NONFRAG_IPV6_TCP   RTE_BIT64(10)
552 #define RTE_ETH_RSS_NONFRAG_IPV6_UDP   RTE_BIT64(11)
553 #define RTE_ETH_RSS_NONFRAG_IPV6_SCTP  RTE_BIT64(12)
554 #define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
555 #define RTE_ETH_RSS_L2_PAYLOAD         RTE_BIT64(14)
556 #define RTE_ETH_RSS_IPV6_EX            RTE_BIT64(15)
557 #define RTE_ETH_RSS_IPV6_TCP_EX        RTE_BIT64(16)
558 #define RTE_ETH_RSS_IPV6_UDP_EX        RTE_BIT64(17)
559 #define RTE_ETH_RSS_PORT               RTE_BIT64(18)
560 #define RTE_ETH_RSS_VXLAN              RTE_BIT64(19)
561 #define RTE_ETH_RSS_GENEVE             RTE_BIT64(20)
562 #define RTE_ETH_RSS_NVGRE              RTE_BIT64(21)
563 #define RTE_ETH_RSS_GTPU               RTE_BIT64(23)
564 #define RTE_ETH_RSS_ETH                RTE_BIT64(24)
565 #define RTE_ETH_RSS_S_VLAN             RTE_BIT64(25)
566 #define RTE_ETH_RSS_C_VLAN             RTE_BIT64(26)
567 #define RTE_ETH_RSS_ESP                RTE_BIT64(27)
568 #define RTE_ETH_RSS_AH                 RTE_BIT64(28)
569 #define RTE_ETH_RSS_L2TPV3             RTE_BIT64(29)
570 #define RTE_ETH_RSS_PFCP               RTE_BIT64(30)
571 #define RTE_ETH_RSS_PPPOE              RTE_BIT64(31)
572 #define RTE_ETH_RSS_ECPRI              RTE_BIT64(32)
573 #define RTE_ETH_RSS_MPLS               RTE_BIT64(33)
574 #define RTE_ETH_RSS_IPV4_CHKSUM        RTE_BIT64(34)
575 
576 /**
577  * The RTE_ETH_RSS_L4_CHKSUM works on checksum field of any L4 header.
578  * It is similar to RTE_ETH_RSS_PORT that they don't specify the specific type of
579  * L4 header. This macro is defined to replace some specific L4 (TCP/UDP/SCTP)
580  * checksum type for constructing the use of RSS offload bits.
581  *
582  * Due to above reason, some old APIs (and configuration) don't support
583  * RTE_ETH_RSS_L4_CHKSUM. The rte_flow RSS API supports it.
584  *
585  * For the case that checksum is not used in an UDP header,
586  * it takes the reserved value 0 as input for the hash function.
587  */
588 #define RTE_ETH_RSS_L4_CHKSUM          RTE_BIT64(35)
589 
590 #define RTE_ETH_RSS_L2TPV2             RTE_BIT64(36)
591 #define RTE_ETH_RSS_IPV6_FLOW_LABEL    RTE_BIT64(37)
592 
593 /*
594  * We use the following macros to combine with above RTE_ETH_RSS_* for
595  * more specific input set selection. These bits are defined starting
596  * from the high end of the 64 bits.
597  * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
598  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
599  * the same level are used simultaneously, it is the same case as none of
600  * them are added.
601  */
602 #define RTE_ETH_RSS_L3_SRC_ONLY        RTE_BIT64(63)
603 #define RTE_ETH_RSS_L3_DST_ONLY        RTE_BIT64(62)
604 #define RTE_ETH_RSS_L4_SRC_ONLY        RTE_BIT64(61)
605 #define RTE_ETH_RSS_L4_DST_ONLY        RTE_BIT64(60)
606 #define RTE_ETH_RSS_L2_SRC_ONLY        RTE_BIT64(59)
607 #define RTE_ETH_RSS_L2_DST_ONLY        RTE_BIT64(58)
608 
609 /*
610  * Only select IPV6 address prefix as RSS input set according to
611  * https://tools.ietf.org/html/rfc6052
612  * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
613  * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
614  */
615 #define RTE_ETH_RSS_L3_PRE32           RTE_BIT64(57)
616 #define RTE_ETH_RSS_L3_PRE40           RTE_BIT64(56)
617 #define RTE_ETH_RSS_L3_PRE48           RTE_BIT64(55)
618 #define RTE_ETH_RSS_L3_PRE56           RTE_BIT64(54)
619 #define RTE_ETH_RSS_L3_PRE64           RTE_BIT64(53)
620 #define RTE_ETH_RSS_L3_PRE96           RTE_BIT64(52)
621 
622 /*
623  * Use the following macros to combine with the above layers
624  * to choose inner and outer layers or both for RSS computation.
625  * Bits 50 and 51 are reserved for this.
626  */
627 
628 /**
629  * level 0, requests the default behavior.
630  * Depending on the packet type, it can mean outermost, innermost,
631  * anything in between or even no RSS.
632  * It basically stands for the innermost encapsulation level RSS
633  * can be performed on according to PMD and device capabilities.
634  */
635 #define RTE_ETH_RSS_LEVEL_PMD_DEFAULT  (UINT64_C(0) << 50)
636 
637 /**
638  * level 1, requests RSS to be performed on the outermost packet
639  * encapsulation level.
640  */
641 #define RTE_ETH_RSS_LEVEL_OUTERMOST    (UINT64_C(1) << 50)
642 
643 /**
644  * level 2, requests RSS to be performed on the specified inner packet
645  * encapsulation level, from outermost to innermost (lower to higher values).
646  */
647 #define RTE_ETH_RSS_LEVEL_INNERMOST    (UINT64_C(2) << 50)
648 #define RTE_ETH_RSS_LEVEL_MASK         (UINT64_C(3) << 50)
649 
650 #define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
651 
652 /**
653  * For input set change of hash filter, if SRC_ONLY and DST_ONLY of
654  * the same level are used simultaneously, it is the same case as
655  * none of them are added.
656  *
657  * @param rss_hf
658  *   RSS types with SRC/DST_ONLY.
659  * @return
660  *   RSS types.
661  */
662 static inline uint64_t
663 rte_eth_rss_hf_refine(uint64_t rss_hf)
664 {
665 	if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
666 		rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
667 
668 	if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
669 		rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
670 
671 	return rss_hf;
672 }
673 
674 #define RTE_ETH_RSS_IPV6_PRE32 ( \
675 		RTE_ETH_RSS_IPV6 | \
676 		RTE_ETH_RSS_L3_PRE32)
677 
678 #define RTE_ETH_RSS_IPV6_PRE40 ( \
679 		RTE_ETH_RSS_IPV6 | \
680 		RTE_ETH_RSS_L3_PRE40)
681 
682 #define RTE_ETH_RSS_IPV6_PRE48 ( \
683 		RTE_ETH_RSS_IPV6 | \
684 		RTE_ETH_RSS_L3_PRE48)
685 
686 #define RTE_ETH_RSS_IPV6_PRE56 ( \
687 		RTE_ETH_RSS_IPV6 | \
688 		RTE_ETH_RSS_L3_PRE56)
689 
690 #define RTE_ETH_RSS_IPV6_PRE64 ( \
691 		RTE_ETH_RSS_IPV6 | \
692 		RTE_ETH_RSS_L3_PRE64)
693 
694 #define RTE_ETH_RSS_IPV6_PRE96 ( \
695 		RTE_ETH_RSS_IPV6 | \
696 		RTE_ETH_RSS_L3_PRE96)
697 
698 #define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
699 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
700 		RTE_ETH_RSS_L3_PRE32)
701 
702 #define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
703 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
704 		RTE_ETH_RSS_L3_PRE40)
705 
706 #define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
707 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
708 		RTE_ETH_RSS_L3_PRE48)
709 
710 #define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
711 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
712 		RTE_ETH_RSS_L3_PRE56)
713 
714 #define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
715 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
716 		RTE_ETH_RSS_L3_PRE64)
717 
718 #define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
719 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
720 		RTE_ETH_RSS_L3_PRE96)
721 
722 #define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
723 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
724 		RTE_ETH_RSS_L3_PRE32)
725 
726 #define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
727 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
728 		RTE_ETH_RSS_L3_PRE40)
729 
730 #define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
731 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
732 		RTE_ETH_RSS_L3_PRE48)
733 
734 #define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
735 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
736 		RTE_ETH_RSS_L3_PRE56)
737 
738 #define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
739 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
740 		RTE_ETH_RSS_L3_PRE64)
741 
742 #define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
743 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
744 		RTE_ETH_RSS_L3_PRE96)
745 
746 #define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
747 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
748 		RTE_ETH_RSS_L3_PRE32)
749 
750 #define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
751 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
752 		RTE_ETH_RSS_L3_PRE40)
753 
754 #define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
755 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
756 		RTE_ETH_RSS_L3_PRE48)
757 
758 #define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
759 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
760 		RTE_ETH_RSS_L3_PRE56)
761 
762 #define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
763 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
764 		RTE_ETH_RSS_L3_PRE64)
765 
766 #define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
767 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
768 		RTE_ETH_RSS_L3_PRE96)
769 
770 #define RTE_ETH_RSS_IP ( \
771 	RTE_ETH_RSS_IPV4 | \
772 	RTE_ETH_RSS_FRAG_IPV4 | \
773 	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
774 	RTE_ETH_RSS_IPV6 | \
775 	RTE_ETH_RSS_FRAG_IPV6 | \
776 	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
777 	RTE_ETH_RSS_IPV6_EX)
778 
779 #define RTE_ETH_RSS_UDP ( \
780 	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
781 	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
782 	RTE_ETH_RSS_IPV6_UDP_EX)
783 
784 #define RTE_ETH_RSS_TCP ( \
785 	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
786 	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
787 	RTE_ETH_RSS_IPV6_TCP_EX)
788 
789 #define RTE_ETH_RSS_SCTP ( \
790 	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
791 	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
792 
793 #define RTE_ETH_RSS_TUNNEL ( \
794 	RTE_ETH_RSS_VXLAN  | \
795 	RTE_ETH_RSS_GENEVE | \
796 	RTE_ETH_RSS_NVGRE)
797 
798 #define RTE_ETH_RSS_VLAN ( \
799 	RTE_ETH_RSS_S_VLAN  | \
800 	RTE_ETH_RSS_C_VLAN)
801 
802 /** Mask of valid RSS hash protocols */
803 #define RTE_ETH_RSS_PROTO_MASK ( \
804 	RTE_ETH_RSS_IPV4 | \
805 	RTE_ETH_RSS_FRAG_IPV4 | \
806 	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
807 	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
808 	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
809 	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
810 	RTE_ETH_RSS_IPV6 | \
811 	RTE_ETH_RSS_FRAG_IPV6 | \
812 	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
813 	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
814 	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
815 	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
816 	RTE_ETH_RSS_L2_PAYLOAD | \
817 	RTE_ETH_RSS_IPV6_EX | \
818 	RTE_ETH_RSS_IPV6_TCP_EX | \
819 	RTE_ETH_RSS_IPV6_UDP_EX | \
820 	RTE_ETH_RSS_PORT  | \
821 	RTE_ETH_RSS_VXLAN | \
822 	RTE_ETH_RSS_GENEVE | \
823 	RTE_ETH_RSS_NVGRE | \
824 	RTE_ETH_RSS_MPLS)
825 
826 /*
827  * Definitions used for redirection table entry size.
828  * Some RSS RETA sizes may not be supported by some drivers, check the
829  * documentation or the description of relevant functions for more details.
830  */
831 #define RTE_ETH_RSS_RETA_SIZE_64  64
832 #define RTE_ETH_RSS_RETA_SIZE_128 128
833 #define RTE_ETH_RSS_RETA_SIZE_256 256
834 #define RTE_ETH_RSS_RETA_SIZE_512 512
835 #define RTE_ETH_RETA_GROUP_SIZE   64
836 
837 /**@{@name VMDq and DCB maximums */
838 #define RTE_ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDq VLAN filters. */
839 #define RTE_ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
840 #define RTE_ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDq DCB queues. */
841 #define RTE_ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
842 /**@}*/
843 
844 /**@{@name DCB capabilities */
845 #define RTE_ETH_DCB_PG_SUPPORT      RTE_BIT32(0) /**< Priority Group(ETS) support. */
846 #define RTE_ETH_DCB_PFC_SUPPORT     RTE_BIT32(1) /**< Priority Flow Control support. */
847 /**@}*/
848 
849 /**@{@name VLAN offload bits */
850 #define RTE_ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
851 #define RTE_ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
852 #define RTE_ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
853 #define RTE_ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
854 
855 #define RTE_ETH_VLAN_STRIP_MASK      0x0001 /**< VLAN Strip  setting mask */
856 #define RTE_ETH_VLAN_FILTER_MASK     0x0002 /**< VLAN Filter  setting mask*/
857 #define RTE_ETH_VLAN_EXTEND_MASK     0x0004 /**< VLAN Extend  setting mask*/
858 #define RTE_ETH_QINQ_STRIP_MASK      0x0008 /**< QINQ Strip  setting mask */
859 #define RTE_ETH_VLAN_ID_MAX          0x0FFF /**< VLAN ID is in lower 12 bits*/
860 /**@}*/
861 
862 /* Definitions used for receive MAC address */
863 #define RTE_ETH_NUM_RECEIVE_MAC_ADDR   128 /**< Maximum nb. of receive mac addr. */
864 
865 /* Definitions used for unicast hash */
866 #define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128 /**< Maximum nb. of UC hash array. */
867 
868 /**@{@name VMDq Rx mode
869  * @see rte_eth_vmdq_rx_conf.rx_mode
870  */
871 /** Accept untagged packets. */
872 #define RTE_ETH_VMDQ_ACCEPT_UNTAG      RTE_BIT32(0)
873 /** Accept packets in multicast table. */
874 #define RTE_ETH_VMDQ_ACCEPT_HASH_MC    RTE_BIT32(1)
875 /** Accept packets in unicast table. */
876 #define RTE_ETH_VMDQ_ACCEPT_HASH_UC    RTE_BIT32(2)
877 /** Accept broadcast packets. */
878 #define RTE_ETH_VMDQ_ACCEPT_BROADCAST  RTE_BIT32(3)
879 /** Multicast promiscuous. */
880 #define RTE_ETH_VMDQ_ACCEPT_MULTICAST  RTE_BIT32(4)
881 /**@}*/
882 
883 /**
884  * A structure used to configure 64 entries of Redirection Table of the
885  * Receive Side Scaling (RSS) feature of an Ethernet port. To configure
886  * more than 64 entries supported by hardware, an array of this structure
887  * is needed.
888  */
889 struct rte_eth_rss_reta_entry64 {
890 	/** Mask bits indicate which entries need to be updated/queried. */
891 	uint64_t mask;
892 	/** Group of 64 redirection table entries. */
893 	uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
894 };
895 
896 /**
897  * This enum indicates the possible number of traffic classes
898  * in DCB configurations
899  */
900 enum rte_eth_nb_tcs {
901 	RTE_ETH_4_TCS = 4, /**< 4 TCs with DCB. */
902 	RTE_ETH_8_TCS = 8  /**< 8 TCs with DCB. */
903 };
904 
905 /**
906  * This enum indicates the possible number of queue pools
907  * in VMDq configurations.
908  */
909 enum rte_eth_nb_pools {
910 	RTE_ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
911 	RTE_ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
912 	RTE_ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
913 	RTE_ETH_64_POOLS = 64   /**< 64 VMDq pools. */
914 };
915 
916 /* This structure may be extended in future. */
917 struct rte_eth_dcb_rx_conf {
918 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */
919 	/** Traffic class each UP mapped to. */
920 	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
921 };
922 
923 struct rte_eth_vmdq_dcb_tx_conf {
924 	enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools. */
925 	/** Traffic class each UP mapped to. */
926 	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
927 };
928 
929 struct rte_eth_dcb_tx_conf {
930 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */
931 	/** Traffic class each UP mapped to. */
932 	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
933 };
934 
935 struct rte_eth_vmdq_tx_conf {
936 	enum rte_eth_nb_pools nb_queue_pools; /**< VMDq mode, 64 pools. */
937 };
938 
939 /**
940  * A structure used to configure the VMDq+DCB feature
941  * of an Ethernet port.
942  *
943  * Using this feature, packets are routed to a pool of queues, based
944  * on the VLAN ID in the VLAN tag, and then to a specific queue within
945  * that pool, using the user priority VLAN tag field.
946  *
947  * A default pool may be used, if desired, to route all traffic which
948  * does not match the VLAN filter rules.
949  */
950 struct rte_eth_vmdq_dcb_conf {
951 	enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools */
952 	uint8_t enable_default_pool; /**< If non-zero, use a default pool */
953 	uint8_t default_pool; /**< The default pool, if applicable */
954 	uint8_t nb_pool_maps; /**< We can have up to 64 filters/mappings */
955 	struct {
956 		uint16_t vlan_id; /**< The VLAN ID of the received frame */
957 		uint64_t pools;   /**< Bitmask of pools for packet Rx */
958 	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
959 	/** Selects a queue in a pool */
960 	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
961 };
962 
963 /**
964  * A structure used to configure the VMDq feature of an Ethernet port when
965  * not combined with the DCB feature.
966  *
967  * Using this feature, packets are routed to a pool of queues. By default,
968  * the pool selection is based on the MAC address, the VLAN ID in the
969  * VLAN tag as specified in the pool_map array.
970  * Passing the RTE_ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
971  * selection using only the MAC address. MAC address to pool mapping is done
972  * using the rte_eth_dev_mac_addr_add function, with the pool parameter
973  * corresponding to the pool ID.
974  *
975  * Queue selection within the selected pool will be done using RSS when
976  * it is enabled or revert to the first queue of the pool if not.
977  *
978  * A default pool may be used, if desired, to route all traffic which
979  * does not match the VLAN filter rules or any pool MAC address.
980  */
981 struct rte_eth_vmdq_rx_conf {
982 	enum rte_eth_nb_pools nb_queue_pools; /**< VMDq only mode, 8 or 64 pools */
983 	uint8_t enable_default_pool; /**< If non-zero, use a default pool */
984 	uint8_t default_pool; /**< The default pool, if applicable */
985 	uint8_t enable_loop_back; /**< Enable VT loop back */
986 	uint8_t nb_pool_maps; /**< We can have up to 64 filters/mappings */
987 	uint32_t rx_mode; /**< Flags from RTE_ETH_VMDQ_ACCEPT_* */
988 	struct {
989 		uint16_t vlan_id; /**< The VLAN ID of the received frame */
990 		uint64_t pools;   /**< Bitmask of pools for packet Rx */
991 	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
992 };
993 
994 /**
995  * A structure used to configure the Tx features of an Ethernet port.
996  */
997 struct rte_eth_txmode {
998 	enum rte_eth_tx_mq_mode mq_mode; /**< Tx multi-queues mode. */
999 	/**
1000 	 * Per-port Tx offloads to be set using RTE_ETH_TX_OFFLOAD_* flags.
1001 	 * Only offloads set on tx_offload_capa field on rte_eth_dev_info
1002 	 * structure are allowed to be set.
1003 	 */
1004 	uint64_t offloads;
1005 
1006 	uint16_t pvid;
1007 	__extension__
1008 	uint8_t /** If set, reject sending out tagged pkts */
1009 		hw_vlan_reject_tagged : 1,
1010 		/** If set, reject sending out untagged pkts */
1011 		hw_vlan_reject_untagged : 1,
1012 		/** If set, enable port based VLAN insertion */
1013 		hw_vlan_insert_pvid : 1;
1014 
1015 	uint64_t reserved_64s[2]; /**< Reserved for future fields */
1016 	void *reserved_ptrs[2];   /**< Reserved for future fields */
1017 };
1018 
1019 /**
1020  * @warning
1021  * @b EXPERIMENTAL: this structure may change without prior notice.
1022  *
1023  * A structure used to configure an Rx packet segment to split.
1024  *
1025  * If RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag is set in offloads field,
1026  * the PMD will split the received packets into multiple segments
1027  * according to the specification in the description array:
1028  *
1029  * - The first network buffer will be allocated from the memory pool,
1030  *   specified in the first array element, the second buffer, from the
1031  *   pool in the second element, and so on.
1032  *
1033  * - The proto_hdrs in the elements define the split position of
1034  *   received packets.
1035  *
1036  * - The offsets from the segment description elements specify
1037  *   the data offset from the buffer beginning except the first mbuf.
1038  *   The first segment offset is added with RTE_PKTMBUF_HEADROOM.
1039  *
1040  * - The lengths in the elements define the maximal data amount
1041  *   being received to each segment. The receiving starts with filling
1042  *   up the first mbuf data buffer up to specified length. If the
1043  *   there are data remaining (packet is longer than buffer in the first
1044  *   mbuf) the following data will be pushed to the next segment
1045  *   up to its own length, and so on.
1046  *
1047  * - If the length in the segment description element is zero
1048  *   the actual buffer size will be deduced from the appropriate
1049  *   memory pool properties.
1050  *
1051  * - If there is not enough elements to describe the buffer for entire
1052  *   packet of maximal length the following parameters will be used
1053  *   for the all remaining segments:
1054  *     - pool from the last valid element
1055  *     - the buffer size from this pool
1056  *     - zero offset
1057  *
1058  * - Length based buffer split:
1059  *     - mp, length, offset should be configured.
1060  *     - The proto_hdr field must be 0.
1061  *
1062  * - Protocol header based buffer split:
1063  *     - mp, offset, proto_hdr should be configured.
1064  *     - The length field must be 0.
1065  *     - The proto_hdr field in the last segment should be 0.
1066  *
1067  * - When protocol header split is enabled, NIC may receive packets
1068  *   which do not match all the protocol headers within the Rx segments.
1069  *   At this point, NIC will have two possible split behaviors according to
1070  *   matching results, one is exact match, another is longest match.
1071  *   The split result of NIC must belong to one of them.
1072  *   The exact match means NIC only do split when the packets exactly match all
1073  *   the protocol headers in the segments.
1074  *   Otherwise, the whole packet will be put into the last valid mempool.
1075  *   The longest match means NIC will do split until packets mismatch
1076  *   the protocol header in the segments.
1077  *   The rest will be put into the last valid pool.
1078  */
1079 struct rte_eth_rxseg_split {
1080 	struct rte_mempool *mp; /**< Memory pool to allocate segment from. */
1081 	uint16_t length; /**< Segment data length, configures split point. */
1082 	uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */
1083 	/**
1084 	 * proto_hdr defines a bit mask of the protocol sequence as RTE_PTYPE_*.
1085 	 * The last RTE_PTYPE* in the mask indicates the split position.
1086 	 *
1087 	 * If one protocol header is defined to split packets into two segments,
1088 	 * for non-tunneling packets, the complete protocol sequence should be defined.
1089 	 * For tunneling packets, for simplicity, only the tunnel and inner part of
1090 	 * complete protocol sequence is required.
1091 	 * If several protocol headers are defined to split packets into multi-segments,
1092 	 * the repeated parts of adjacent segments should be omitted.
1093 	 */
1094 	uint32_t proto_hdr;
1095 };
1096 
1097 /**
1098  * @warning
1099  * @b EXPERIMENTAL: this structure may change without prior notice.
1100  *
1101  * A common structure used to describe Rx packet segment properties.
1102  */
1103 union rte_eth_rxseg {
1104 	/* The settings for buffer split offload. */
1105 	struct rte_eth_rxseg_split split;
1106 	/* The other features settings should be added here. */
1107 };
1108 
1109 /**
1110  * A structure used to configure an Rx ring of an Ethernet port.
1111  */
1112 struct rte_eth_rxconf {
1113 	struct rte_eth_thresh rx_thresh; /**< Rx ring threshold registers. */
1114 	uint16_t rx_free_thresh; /**< Drives the freeing of Rx descriptors. */
1115 	uint8_t rx_drop_en; /**< Drop packets if no descriptors are available. */
1116 	uint8_t rx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
1117 	uint16_t rx_nseg; /**< Number of descriptions in rx_seg array. */
1118 	/**
1119 	 * Share group index in Rx domain and switch domain.
1120 	 * Non-zero value to enable Rx queue share, zero value disable share.
1121 	 * PMD is responsible for Rx queue consistency checks to avoid member
1122 	 * port's configuration contradict to each other.
1123 	 */
1124 	uint16_t share_group;
1125 	uint16_t share_qid; /**< Shared Rx queue ID in group */
1126 	/**
1127 	 * Per-queue Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
1128 	 * Only offloads set on rx_queue_offload_capa or rx_offload_capa
1129 	 * fields on rte_eth_dev_info structure are allowed to be set.
1130 	 */
1131 	uint64_t offloads;
1132 	/**
1133 	 * Points to the array of segment descriptions for an entire packet.
1134 	 * Array elements are properties for consecutive Rx segments.
1135 	 *
1136 	 * The supported capabilities of receiving segmentation is reported
1137 	 * in rte_eth_dev_info.rx_seg_capa field.
1138 	 */
1139 	union rte_eth_rxseg *rx_seg;
1140 
1141 	/**
1142 	 * Array of mempools to allocate Rx buffers from.
1143 	 *
1144 	 * This provides support for multiple mbuf pools per Rx queue.
1145 	 * The capability is reported in device info via positive
1146 	 * max_rx_mempools.
1147 	 *
1148 	 * It could be useful for more efficient usage of memory when an
1149 	 * application creates different mempools to steer the specific
1150 	 * size of the packet.
1151 	 *
1152 	 * If many mempools are specified, packets received using Rx
1153 	 * burst may belong to any provided mempool. From ethdev user point
1154 	 * of view it is undefined how PMD/NIC chooses mempool for a packet.
1155 	 *
1156 	 * If Rx scatter is enabled, a packet may be delivered using a chain
1157 	 * of mbufs obtained from single mempool or multiple mempools based
1158 	 * on the NIC implementation.
1159 	 */
1160 	struct rte_mempool **rx_mempools;
1161 	uint16_t rx_nmempool; /** < Number of Rx mempools */
1162 
1163 	uint64_t reserved_64s[2]; /**< Reserved for future fields */
1164 	void *reserved_ptrs[2];   /**< Reserved for future fields */
1165 };
1166 
1167 /**
1168  * A structure used to configure a Tx ring of an Ethernet port.
1169  */
1170 struct rte_eth_txconf {
1171 	struct rte_eth_thresh tx_thresh; /**< Tx ring threshold registers. */
1172 	uint16_t tx_rs_thresh; /**< Drives the setting of RS bit on TXDs. */
1173 	uint16_t tx_free_thresh; /**< Start freeing Tx buffers if there are
1174 				      less free descriptors than this value. */
1175 
1176 	uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
1177 	/**
1178 	 * Per-queue Tx offloads to be set  using RTE_ETH_TX_OFFLOAD_* flags.
1179 	 * Only offloads set on tx_queue_offload_capa or tx_offload_capa
1180 	 * fields on rte_eth_dev_info structure are allowed to be set.
1181 	 */
1182 	uint64_t offloads;
1183 
1184 	uint64_t reserved_64s[2]; /**< Reserved for future fields */
1185 	void *reserved_ptrs[2];   /**< Reserved for future fields */
1186 };
1187 
1188 /**
1189  * @warning
1190  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1191  *
1192  * A structure used to return the Tx or Rx hairpin queue capabilities.
1193  */
1194 struct rte_eth_hairpin_queue_cap {
1195 	/**
1196 	 * When set, PMD supports placing descriptors and/or data buffers
1197 	 * in dedicated device memory.
1198 	 */
1199 	uint32_t locked_device_memory:1;
1200 
1201 	/**
1202 	 * When set, PMD supports placing descriptors and/or data buffers
1203 	 * in host memory managed by DPDK.
1204 	 */
1205 	uint32_t rte_memory:1;
1206 
1207 	uint32_t reserved:30; /**< Reserved for future fields */
1208 };
1209 
1210 /**
1211  * @warning
1212  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1213  *
1214  * A structure used to return the hairpin capabilities that are supported.
1215  */
1216 struct rte_eth_hairpin_cap {
1217 	/** The max number of hairpin queues (different bindings). */
1218 	uint16_t max_nb_queues;
1219 	/** Max number of Rx queues to be connected to one Tx queue. */
1220 	uint16_t max_rx_2_tx;
1221 	/** Max number of Tx queues to be connected to one Rx queue. */
1222 	uint16_t max_tx_2_rx;
1223 	uint16_t max_nb_desc; /**< The max num of descriptors. */
1224 	struct rte_eth_hairpin_queue_cap rx_cap; /**< Rx hairpin queue capabilities. */
1225 	struct rte_eth_hairpin_queue_cap tx_cap; /**< Tx hairpin queue capabilities. */
1226 };
1227 
1228 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1229 
1230 /**
1231  * @warning
1232  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1233  *
1234  * A structure used to hold hairpin peer data.
1235  */
1236 struct rte_eth_hairpin_peer {
1237 	uint16_t port; /**< Peer port. */
1238 	uint16_t queue; /**< Peer queue. */
1239 };
1240 
1241 /**
1242  * @warning
1243  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1244  *
1245  * A structure used to configure hairpin binding.
1246  */
1247 struct rte_eth_hairpin_conf {
1248 	uint32_t peer_count:16; /**< The number of peers. */
1249 
1250 	/**
1251 	 * Explicit Tx flow rule mode.
1252 	 * One hairpin pair of queues should have the same attribute.
1253 	 *
1254 	 * - When set, the user should be responsible for inserting the hairpin
1255 	 *   Tx part flows and removing them.
1256 	 * - When clear, the PMD will try to handle the Tx part of the flows,
1257 	 *   e.g., by splitting one flow into two parts.
1258 	 */
1259 	uint32_t tx_explicit:1;
1260 
1261 	/**
1262 	 * Manually bind hairpin queues.
1263 	 * One hairpin pair of queues should have the same attribute.
1264 	 *
1265 	 * - When set, to enable hairpin, the user should call the hairpin bind
1266 	 *   function after all the queues are set up properly and the ports are
1267 	 *   started. Also, the hairpin unbind function should be called
1268 	 *   accordingly before stopping a port that with hairpin configured.
1269 	 * - When cleared, the PMD will try to enable the hairpin with the queues
1270 	 *   configured automatically during port start.
1271 	 */
1272 	uint32_t manual_bind:1;
1273 
1274 	/**
1275 	 * Use locked device memory as a backing storage.
1276 	 *
1277 	 * - When set, PMD will attempt place descriptors and/or data buffers
1278 	 *   in dedicated device memory.
1279 	 * - When cleared, PMD will use default memory type as a backing storage.
1280 	 *   Please refer to PMD documentation for details.
1281 	 *
1282 	 * API user should check if PMD supports this configuration flag using
1283 	 * @see rte_eth_dev_hairpin_capability_get.
1284 	 */
1285 	uint32_t use_locked_device_memory:1;
1286 
1287 	/**
1288 	 * Use DPDK memory as backing storage.
1289 	 *
1290 	 * - When set, PMD will attempt place descriptors and/or data buffers
1291 	 *   in host memory managed by DPDK.
1292 	 * - When cleared, PMD will use default memory type as a backing storage.
1293 	 *   Please refer to PMD documentation for details.
1294 	 *
1295 	 * API user should check if PMD supports this configuration flag using
1296 	 * @see rte_eth_dev_hairpin_capability_get.
1297 	 */
1298 	uint32_t use_rte_memory:1;
1299 
1300 	/**
1301 	 * Force usage of hairpin memory configuration.
1302 	 *
1303 	 * - When set, PMD will attempt to use specified memory settings.
1304 	 *   If resource allocation fails, then hairpin queue allocation
1305 	 *   will result in an error.
1306 	 * - When clear, PMD will attempt to use specified memory settings.
1307 	 *   If resource allocation fails, then PMD will retry
1308 	 *   allocation with default configuration.
1309 	 */
1310 	uint32_t force_memory:1;
1311 
1312 	uint32_t reserved:11; /**< Reserved bits. */
1313 
1314 	struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1315 };
1316 
1317 /**
1318  * A structure contains information about HW descriptor ring limitations.
1319  */
1320 struct rte_eth_desc_lim {
1321 	uint16_t nb_max;   /**< Max allowed number of descriptors. */
1322 	uint16_t nb_min;   /**< Min allowed number of descriptors. */
1323 	uint16_t nb_align; /**< Number of descriptors should be aligned to. */
1324 
1325 	/**
1326 	 * Max allowed number of segments per whole packet.
1327 	 *
1328 	 * - For TSO packet this is the total number of data descriptors allowed
1329 	 *   by device.
1330 	 *
1331 	 * @see nb_mtu_seg_max
1332 	 */
1333 	uint16_t nb_seg_max;
1334 
1335 	/**
1336 	 * Max number of segments per one MTU.
1337 	 *
1338 	 * - For non-TSO packet, this is the maximum allowed number of segments
1339 	 *   in a single transmit packet.
1340 	 *
1341 	 * - For TSO packet each segment within the TSO may span up to this
1342 	 *   value.
1343 	 *
1344 	 * @see nb_seg_max
1345 	 */
1346 	uint16_t nb_mtu_seg_max;
1347 };
1348 
1349 /**
1350  * This enum indicates the flow control mode
1351  */
1352 enum rte_eth_fc_mode {
1353 	RTE_ETH_FC_NONE = 0, /**< Disable flow control. */
1354 	RTE_ETH_FC_RX_PAUSE, /**< Rx pause frame, enable flowctrl on Tx side. */
1355 	RTE_ETH_FC_TX_PAUSE, /**< Tx pause frame, enable flowctrl on Rx side. */
1356 	RTE_ETH_FC_FULL      /**< Enable flow control on both side. */
1357 };
1358 
1359 /**
1360  * A structure used to configure Ethernet flow control parameter.
1361  * These parameters will be configured into the register of the NIC.
1362  * Please refer to the corresponding data sheet for proper value.
1363  */
1364 struct rte_eth_fc_conf {
1365 	uint32_t high_water;  /**< High threshold value to trigger XOFF */
1366 	uint32_t low_water;   /**< Low threshold value to trigger XON */
1367 	uint16_t pause_time;  /**< Pause quota in the Pause frame */
1368 	uint16_t send_xon;    /**< Is XON frame need be sent */
1369 	enum rte_eth_fc_mode mode;  /**< Link flow control mode */
1370 	uint8_t mac_ctrl_frame_fwd; /**< Forward MAC control frames */
1371 	uint8_t autoneg;      /**< Use Pause autoneg */
1372 };
1373 
1374 /**
1375  * A structure used to configure Ethernet priority flow control parameter.
1376  * These parameters will be configured into the register of the NIC.
1377  * Please refer to the corresponding data sheet for proper value.
1378  */
1379 struct rte_eth_pfc_conf {
1380 	struct rte_eth_fc_conf fc; /**< General flow control parameter. */
1381 	uint8_t priority;          /**< VLAN User Priority. */
1382 };
1383 
1384 /**
1385  * @warning
1386  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1387  *
1388  * A structure used to retrieve information of queue based PFC.
1389  */
1390 struct rte_eth_pfc_queue_info {
1391 	/**
1392 	 * Maximum supported traffic class as per PFC (802.1Qbb) specification.
1393 	 */
1394 	uint8_t tc_max;
1395 	/** PFC queue mode capabilities. */
1396 	enum rte_eth_fc_mode mode_capa;
1397 };
1398 
1399 /**
1400  * @warning
1401  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1402  *
1403  * A structure used to configure Ethernet priority flow control parameters for
1404  * ethdev queues.
1405  *
1406  * rte_eth_pfc_queue_conf::rx_pause structure shall be used to configure given
1407  * tx_qid with corresponding tc. When ethdev device receives PFC frame with
1408  * rte_eth_pfc_queue_conf::rx_pause::tc, traffic will be paused on
1409  * rte_eth_pfc_queue_conf::rx_pause::tx_qid for that tc.
1410  *
1411  * rte_eth_pfc_queue_conf::tx_pause structure shall be used to configure given
1412  * rx_qid. When rx_qid is congested, PFC frames are generated with
1413  * rte_eth_pfc_queue_conf::rx_pause::tc and
1414  * rte_eth_pfc_queue_conf::rx_pause::pause_time to the peer.
1415  */
1416 struct rte_eth_pfc_queue_conf {
1417 	enum rte_eth_fc_mode mode; /**< Link flow control mode */
1418 
1419 	struct {
1420 		uint16_t tx_qid; /**< Tx queue ID */
1421 		/** Traffic class as per PFC (802.1Qbb) spec. The value must be
1422 		 * in the range [0, rte_eth_pfc_queue_info::tx_max - 1]
1423 		 */
1424 		uint8_t tc;
1425 	} rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1426 
1427 	struct {
1428 		uint16_t pause_time; /**< Pause quota in the Pause frame */
1429 		uint16_t rx_qid;     /**< Rx queue ID */
1430 		/** Traffic class as per PFC (802.1Qbb) spec. The value must be
1431 		 * in the range [0, rte_eth_pfc_queue_info::tx_max - 1]
1432 		 */
1433 		uint8_t tc;
1434 	} tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1435 };
1436 
1437 /**
1438  * Tunnel type for device-specific classifier configuration.
1439  * @see rte_eth_udp_tunnel
1440  */
1441 enum rte_eth_tunnel_type {
1442 	RTE_ETH_TUNNEL_TYPE_NONE = 0,
1443 	RTE_ETH_TUNNEL_TYPE_VXLAN,
1444 	RTE_ETH_TUNNEL_TYPE_GENEVE,
1445 	RTE_ETH_TUNNEL_TYPE_TEREDO,
1446 	RTE_ETH_TUNNEL_TYPE_NVGRE,
1447 	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1448 	RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1449 	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1450 	RTE_ETH_TUNNEL_TYPE_ECPRI,
1451 	RTE_ETH_TUNNEL_TYPE_MAX,
1452 };
1453 
1454 /* Deprecated API file for rte_eth_dev_filter_* functions */
1455 #include "rte_eth_ctrl.h"
1456 
1457 /**
1458  * UDP tunneling configuration.
1459  *
1460  * Used to configure the classifier of a device,
1461  * associating an UDP port with a type of tunnel.
1462  *
1463  * Some NICs may need such configuration to properly parse a tunnel
1464  * with any standard or custom UDP port.
1465  */
1466 struct rte_eth_udp_tunnel {
1467 	uint16_t udp_port; /**< UDP port used for the tunnel. */
1468 	uint8_t prot_type; /**< Tunnel type. @see rte_eth_tunnel_type */
1469 };
1470 
1471 /**
1472  * A structure used to enable/disable specific device interrupts.
1473  */
1474 struct rte_eth_intr_conf {
1475 	/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
1476 	uint32_t lsc:1;
1477 	/** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
1478 	uint32_t rxq:1;
1479 	/** enable/disable rmv interrupt. 0 (default) - disable, 1 enable */
1480 	uint32_t rmv:1;
1481 };
1482 
1483 #define rte_intr_conf rte_eth_intr_conf
1484 
1485 /**
1486  * A structure used to configure an Ethernet port.
1487  * Depending upon the Rx multi-queue mode, extra advanced
1488  * configuration settings may be needed.
1489  */
1490 struct rte_eth_conf {
1491 	uint32_t link_speeds; /**< bitmap of RTE_ETH_LINK_SPEED_XXX of speeds to be
1492 				used. RTE_ETH_LINK_SPEED_FIXED disables link
1493 				autonegotiation, and a unique speed shall be
1494 				set. Otherwise, the bitmap defines the set of
1495 				speeds to be advertised. If the special value
1496 				RTE_ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
1497 				supported are advertised. */
1498 	struct rte_eth_rxmode rxmode; /**< Port Rx configuration. */
1499 	struct rte_eth_txmode txmode; /**< Port Tx configuration. */
1500 	uint32_t lpbk_mode; /**< Loopback operation mode. By default the value
1501 			         is 0, meaning the loopback mode is disabled.
1502 				 Read the datasheet of given Ethernet controller
1503 				 for details. The possible values of this field
1504 				 are defined in implementation of each driver. */
1505 	struct {
1506 		struct rte_eth_rss_conf rss_conf; /**< Port RSS configuration */
1507 		/** Port VMDq+DCB configuration. */
1508 		struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
1509 		/** Port DCB Rx configuration. */
1510 		struct rte_eth_dcb_rx_conf dcb_rx_conf;
1511 		/** Port VMDq Rx configuration. */
1512 		struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
1513 	} rx_adv_conf; /**< Port Rx filtering configuration. */
1514 	union {
1515 		/** Port VMDq+DCB Tx configuration. */
1516 		struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1517 		/** Port DCB Tx configuration. */
1518 		struct rte_eth_dcb_tx_conf dcb_tx_conf;
1519 		/** Port VMDq Tx configuration. */
1520 		struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1521 	} tx_adv_conf; /**< Port Tx DCB configuration (union). */
1522 	/** Currently,Priority Flow Control(PFC) are supported,if DCB with PFC
1523 	    is needed,and the variable must be set RTE_ETH_DCB_PFC_SUPPORT. */
1524 	uint32_t dcb_capability_en;
1525 	struct rte_eth_intr_conf intr_conf; /**< Interrupt mode configuration. */
1526 };
1527 
1528 /**
1529  * Rx offload capabilities of a device.
1530  */
1531 #define RTE_ETH_RX_OFFLOAD_VLAN_STRIP       RTE_BIT64(0)
1532 #define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM       RTE_BIT64(1)
1533 #define RTE_ETH_RX_OFFLOAD_UDP_CKSUM        RTE_BIT64(2)
1534 #define RTE_ETH_RX_OFFLOAD_TCP_CKSUM        RTE_BIT64(3)
1535 #define RTE_ETH_RX_OFFLOAD_TCP_LRO          RTE_BIT64(4)
1536 #define RTE_ETH_RX_OFFLOAD_QINQ_STRIP       RTE_BIT64(5)
1537 #define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1538 #define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP     RTE_BIT64(7)
1539 #define RTE_ETH_RX_OFFLOAD_VLAN_FILTER      RTE_BIT64(9)
1540 #define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND      RTE_BIT64(10)
1541 #define RTE_ETH_RX_OFFLOAD_SCATTER          RTE_BIT64(13)
1542 /**
1543  * Timestamp is set by the driver in RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
1544  * and RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME is set in ol_flags.
1545  * The mbuf field and flag are registered when the offload is configured.
1546  */
1547 #define RTE_ETH_RX_OFFLOAD_TIMESTAMP        RTE_BIT64(14)
1548 #define RTE_ETH_RX_OFFLOAD_SECURITY         RTE_BIT64(15)
1549 #define RTE_ETH_RX_OFFLOAD_KEEP_CRC         RTE_BIT64(16)
1550 #define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM       RTE_BIT64(17)
1551 #define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM  RTE_BIT64(18)
1552 #define RTE_ETH_RX_OFFLOAD_RSS_HASH         RTE_BIT64(19)
1553 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT     RTE_BIT64(20)
1554 
1555 #define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1556 				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1557 				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1558 #define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1559 			     RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1560 			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1561 			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1562 
1563 /*
1564  * If new Rx offload capabilities are defined, they also must be
1565  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1566  */
1567 
1568 /**
1569  * Tx offload capabilities of a device.
1570  */
1571 #define RTE_ETH_TX_OFFLOAD_VLAN_INSERT      RTE_BIT64(0)
1572 #define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM       RTE_BIT64(1)
1573 #define RTE_ETH_TX_OFFLOAD_UDP_CKSUM        RTE_BIT64(2)
1574 #define RTE_ETH_TX_OFFLOAD_TCP_CKSUM        RTE_BIT64(3)
1575 #define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM       RTE_BIT64(4)
1576 #define RTE_ETH_TX_OFFLOAD_TCP_TSO          RTE_BIT64(5)
1577 #define RTE_ETH_TX_OFFLOAD_UDP_TSO          RTE_BIT64(6)
1578 #define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)  /**< Used for tunneling packet. */
1579 #define RTE_ETH_TX_OFFLOAD_QINQ_INSERT      RTE_BIT64(8)
1580 #define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    RTE_BIT64(9)  /**< Used for tunneling packet. */
1581 #define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO      RTE_BIT64(10) /**< Used for tunneling packet. */
1582 #define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO     RTE_BIT64(11) /**< Used for tunneling packet. */
1583 #define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   RTE_BIT64(12) /**< Used for tunneling packet. */
1584 #define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT    RTE_BIT64(13)
1585 /**
1586  * Multiple threads can invoke rte_eth_tx_burst() concurrently on the same
1587  * Tx queue without SW lock.
1588  */
1589 #define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE      RTE_BIT64(14)
1590 /** Device supports multi segment send. */
1591 #define RTE_ETH_TX_OFFLOAD_MULTI_SEGS       RTE_BIT64(15)
1592 /**
1593  * Device supports optimization for fast release of mbufs.
1594  * When set application must guarantee that per-queue all mbufs comes from
1595  * the same mempool and has refcnt = 1.
1596  */
1597 #define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE   RTE_BIT64(16)
1598 #define RTE_ETH_TX_OFFLOAD_SECURITY         RTE_BIT64(17)
1599 /**
1600  * Device supports generic UDP tunneled packet TSO.
1601  * Application must set RTE_MBUF_F_TX_TUNNEL_UDP and other mbuf fields required
1602  * for tunnel TSO.
1603  */
1604 #define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO      RTE_BIT64(18)
1605 /**
1606  * Device supports generic IP tunneled packet TSO.
1607  * Application must set RTE_MBUF_F_TX_TUNNEL_IP and other mbuf fields required
1608  * for tunnel TSO.
1609  */
1610 #define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO       RTE_BIT64(19)
1611 /** Device supports outer UDP checksum */
1612 #define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM  RTE_BIT64(20)
1613 /**
1614  * Device sends on time read from RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
1615  * if RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME is set in ol_flags.
1616  * The mbuf field and flag are registered when the offload is configured.
1617  */
1618 #define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1619 /*
1620  * If new Tx offload capabilities are defined, they also must be
1621  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1622  */
1623 
1624 /**@{@name Device capabilities
1625  * Non-offload capabilities reported in rte_eth_dev_info.dev_capa.
1626  */
1627 /** Device supports Rx queue setup after device started. */
1628 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1629 /** Device supports Tx queue setup after device started. */
1630 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1631 /**
1632  * Device supports shared Rx queue among ports within Rx domain and
1633  * switch domain. Mbufs are consumed by shared Rx queue instead of
1634  * each queue. Multiple groups are supported by share_group of Rx
1635  * queue configuration. Shared Rx queue is identified by PMD using
1636  * share_qid of Rx queue configuration. Polling any port in the group
1637  * receive packets of all member ports, source port identified by
1638  * mbuf->port field.
1639  */
1640 #define RTE_ETH_DEV_CAPA_RXQ_SHARE              RTE_BIT64(2)
1641 /** Device supports keeping flow rules across restart. */
1642 #define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP         RTE_BIT64(3)
1643 /** Device supports keeping shared flow objects across restart. */
1644 #define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1645 /**@}*/
1646 
1647 /*
1648  * Fallback default preferred Rx/Tx port parameters.
1649  * These are used if an application requests default parameters
1650  * but the PMD does not provide preferred values.
1651  */
1652 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1653 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1654 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1655 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1656 
1657 /**
1658  * Preferred Rx/Tx port parameters.
1659  * There are separate instances of this structure for transmission
1660  * and reception respectively.
1661  */
1662 struct rte_eth_dev_portconf {
1663 	uint16_t burst_size; /**< Device-preferred burst size */
1664 	uint16_t ring_size; /**< Device-preferred size of queue rings */
1665 	uint16_t nb_queues; /**< Device-preferred number of queues */
1666 };
1667 
1668 /**
1669  * Default values for switch domain ID when ethdev does not support switch
1670  * domain definitions.
1671  */
1672 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID	(UINT16_MAX)
1673 
1674 /**
1675  * Ethernet device associated switch information
1676  */
1677 struct rte_eth_switch_info {
1678 	const char *name;	/**< switch name */
1679 	uint16_t domain_id;	/**< switch domain ID */
1680 	/**
1681 	 * Mapping to the devices physical switch port as enumerated from the
1682 	 * perspective of the embedded interconnect/switch. For SR-IOV enabled
1683 	 * device this may correspond to the VF_ID of each virtual function,
1684 	 * but each driver should explicitly define the mapping of switch
1685 	 * port identifier to that physical interconnect/switch
1686 	 */
1687 	uint16_t port_id;
1688 	/**
1689 	 * Shared Rx queue sub-domain boundary. Only ports in same Rx domain
1690 	 * and switch domain can share Rx queue. Valid only if device advertised
1691 	 * RTE_ETH_DEV_CAPA_RXQ_SHARE capability.
1692 	 */
1693 	uint16_t rx_domain;
1694 };
1695 
1696 /**
1697  * @warning
1698  * @b EXPERIMENTAL: this structure may change without prior notice.
1699  *
1700  * Ethernet device Rx buffer segmentation capabilities.
1701  */
1702 struct rte_eth_rxseg_capa {
1703 	__extension__
1704 	uint32_t multi_pools:1; /**< Supports receiving to multiple pools.*/
1705 	uint32_t offset_allowed:1; /**< Supports buffer offsets. */
1706 	uint32_t offset_align_log2:4; /**< Required offset alignment. */
1707 	uint16_t max_nseg; /**< Maximum amount of segments to split. */
1708 	uint16_t reserved; /**< Reserved field. */
1709 };
1710 
1711 /**
1712  * Ethernet device information
1713  */
1714 
1715 /**
1716  * Ethernet device representor port type.
1717  */
1718 enum rte_eth_representor_type {
1719 	RTE_ETH_REPRESENTOR_NONE, /**< not a representor. */
1720 	RTE_ETH_REPRESENTOR_VF,   /**< representor of Virtual Function. */
1721 	RTE_ETH_REPRESENTOR_SF,   /**< representor of Sub Function. */
1722 	RTE_ETH_REPRESENTOR_PF,   /**< representor of Physical Function. */
1723 };
1724 
1725 /**
1726  * @warning
1727  * @b EXPERIMENTAL: this enumeration may change without prior notice.
1728  *
1729  * Ethernet device error handling mode.
1730  */
1731 enum rte_eth_err_handle_mode {
1732 	/** No error handling modes are supported. */
1733 	RTE_ETH_ERROR_HANDLE_MODE_NONE,
1734 	/** Passive error handling, after the PMD detects that a reset is required,
1735 	 * the PMD reports @see RTE_ETH_EVENT_INTR_RESET event,
1736 	 * and the application invokes @see rte_eth_dev_reset to recover the port.
1737 	 */
1738 	RTE_ETH_ERROR_HANDLE_MODE_PASSIVE,
1739 	/** Proactive error handling, after the PMD detects that a reset is required,
1740 	 * the PMD reports @see RTE_ETH_EVENT_ERR_RECOVERING event,
1741 	 * do recovery internally, and finally reports the recovery result event
1742 	 * (@see RTE_ETH_EVENT_RECOVERY_*).
1743 	 */
1744 	RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE,
1745 };
1746 
1747 /**
1748  * A structure used to retrieve the contextual information of
1749  * an Ethernet device, such as the controlling driver of the
1750  * device, etc...
1751  */
1752 struct rte_eth_dev_info {
1753 	struct rte_device *device; /**< Generic device information */
1754 	const char *driver_name; /**< Device Driver name. */
1755 	unsigned int if_index; /**< Index to bound host interface, or 0 if none.
1756 		Use if_indextoname() to translate into an interface name. */
1757 	uint16_t min_mtu;	/**< Minimum MTU allowed */
1758 	uint16_t max_mtu;	/**< Maximum MTU allowed */
1759 	const uint32_t *dev_flags; /**< Device flags */
1760 	/** Minimum Rx buffer size per descriptor supported by HW. */
1761 	uint32_t min_rx_bufsize;
1762 	/**
1763 	 * Maximum Rx buffer size per descriptor supported by HW.
1764 	 * The value is not enforced, information only to application to
1765 	 * optimize mbuf size.
1766 	 * Its value is UINT32_MAX when not specified by the driver.
1767 	 */
1768 	uint32_t max_rx_bufsize;
1769 	uint32_t max_rx_pktlen; /**< Maximum configurable length of Rx pkt. */
1770 	/** Maximum configurable size of LRO aggregated packet. */
1771 	uint32_t max_lro_pkt_size;
1772 	uint16_t max_rx_queues; /**< Maximum number of Rx queues. */
1773 	uint16_t max_tx_queues; /**< Maximum number of Tx queues. */
1774 	uint32_t max_mac_addrs; /**< Maximum number of MAC addresses. */
1775 	/** Maximum number of hash MAC addresses for MTA and UTA. */
1776 	uint32_t max_hash_mac_addrs;
1777 	uint16_t max_vfs; /**< Maximum number of VFs. */
1778 	uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */
1779 	struct rte_eth_rxseg_capa rx_seg_capa; /**< Segmentation capability.*/
1780 	/** All Rx offload capabilities including all per-queue ones */
1781 	uint64_t rx_offload_capa;
1782 	/** All Tx offload capabilities including all per-queue ones */
1783 	uint64_t tx_offload_capa;
1784 	/** Device per-queue Rx offload capabilities. */
1785 	uint64_t rx_queue_offload_capa;
1786 	/** Device per-queue Tx offload capabilities. */
1787 	uint64_t tx_queue_offload_capa;
1788 	/** Device redirection table size, the total number of entries. */
1789 	uint16_t reta_size;
1790 	uint8_t hash_key_size; /**< Hash key size in bytes */
1791 	uint32_t rss_algo_capa; /** RSS hash algorithms capabilities */
1792 	/** Bit mask of RSS offloads, the bit offset also means flow type */
1793 	uint64_t flow_type_rss_offloads;
1794 	struct rte_eth_rxconf default_rxconf; /**< Default Rx configuration */
1795 	struct rte_eth_txconf default_txconf; /**< Default Tx configuration */
1796 	uint16_t vmdq_queue_base; /**< First queue ID for VMDq pools. */
1797 	uint16_t vmdq_queue_num;  /**< Queue number for VMDq pools. */
1798 	uint16_t vmdq_pool_base;  /**< First ID of VMDq pools. */
1799 	struct rte_eth_desc_lim rx_desc_lim;  /**< Rx descriptors limits */
1800 	struct rte_eth_desc_lim tx_desc_lim;  /**< Tx descriptors limits */
1801 	uint32_t speed_capa;  /**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
1802 	/** Configured number of Rx/Tx queues */
1803 	uint16_t nb_rx_queues; /**< Number of Rx queues. */
1804 	uint16_t nb_tx_queues; /**< Number of Tx queues. */
1805 	/**
1806 	 * Maximum number of Rx mempools supported per Rx queue.
1807 	 *
1808 	 * Value greater than 0 means that the driver supports Rx queue
1809 	 * mempools specification via rx_conf->rx_mempools.
1810 	 */
1811 	uint16_t max_rx_mempools;
1812 	/** Rx parameter recommendations */
1813 	struct rte_eth_dev_portconf default_rxportconf;
1814 	/** Tx parameter recommendations */
1815 	struct rte_eth_dev_portconf default_txportconf;
1816 	/** Generic device capabilities (RTE_ETH_DEV_CAPA_). */
1817 	uint64_t dev_capa;
1818 	/**
1819 	 * Switching information for ports on a device with a
1820 	 * embedded managed interconnect/switch.
1821 	 */
1822 	struct rte_eth_switch_info switch_info;
1823 	/** Supported error handling mode. */
1824 	enum rte_eth_err_handle_mode err_handle_mode;
1825 
1826 	uint64_t reserved_64s[2]; /**< Reserved for future fields */
1827 	void *reserved_ptrs[2];   /**< Reserved for future fields */
1828 };
1829 
1830 /**@{@name Rx/Tx queue states */
1831 #define RTE_ETH_QUEUE_STATE_STOPPED 0 /**< Queue stopped. */
1832 #define RTE_ETH_QUEUE_STATE_STARTED 1 /**< Queue started. */
1833 #define RTE_ETH_QUEUE_STATE_HAIRPIN 2 /**< Queue used for hairpin. */
1834 /**@}*/
1835 
1836 /**
1837  * Ethernet device Rx queue information structure.
1838  * Used to retrieve information about configured queue.
1839  */
1840 struct __rte_cache_min_aligned rte_eth_rxq_info {
1841 	struct rte_mempool *mp;     /**< mempool used by that queue. */
1842 	struct rte_eth_rxconf conf; /**< queue config parameters. */
1843 	uint8_t scattered_rx;       /**< scattered packets Rx supported. */
1844 	uint8_t queue_state;        /**< one of RTE_ETH_QUEUE_STATE_*. */
1845 	uint16_t nb_desc;           /**< configured number of RXDs. */
1846 	uint16_t rx_buf_size;       /**< hardware receive buffer size. */
1847 	/**
1848 	 * Available Rx descriptors threshold defined as percentage
1849 	 * of Rx queue size. If number of available descriptors is lower,
1850 	 * the event RTE_ETH_EVENT_RX_AVAIL_THESH is generated.
1851 	 * Value 0 means that the threshold monitoring is disabled.
1852 	 */
1853 	uint8_t avail_thresh;
1854 };
1855 
1856 /**
1857  * Ethernet device Tx queue information structure.
1858  * Used to retrieve information about configured queue.
1859  */
1860 struct __rte_cache_min_aligned rte_eth_txq_info {
1861 	struct rte_eth_txconf conf; /**< queue config parameters. */
1862 	uint16_t nb_desc;           /**< configured number of TXDs. */
1863 	uint8_t queue_state;        /**< one of RTE_ETH_QUEUE_STATE_*. */
1864 };
1865 
1866 /**
1867  * @warning
1868  * @b EXPERIMENTAL: this structure may change without prior notice.
1869  *
1870  * Ethernet device Rx queue information structure for recycling mbufs.
1871  * Used to retrieve Rx queue information when Tx queue reusing mbufs and moving
1872  * them into Rx mbuf ring.
1873  */
1874 struct __rte_cache_min_aligned rte_eth_recycle_rxq_info {
1875 	struct rte_mbuf **mbuf_ring; /**< mbuf ring of Rx queue. */
1876 	struct rte_mempool *mp;     /**< mempool of Rx queue. */
1877 	uint16_t *refill_head;      /**< head of Rx queue refilling mbufs. */
1878 	uint16_t *receive_tail;     /**< tail of Rx queue receiving pkts. */
1879 	uint16_t mbuf_ring_size;     /**< configured number of mbuf ring size. */
1880 	/**
1881 	 * Requirement on mbuf refilling batch size of Rx mbuf ring.
1882 	 * For some PMD drivers, the number of Rx mbuf ring refilling mbufs
1883 	 * should be aligned with mbuf ring size, in order to simplify
1884 	 * ring wrapping around.
1885 	 * Value 0 means that PMD drivers have no requirement for this.
1886 	 */
1887 	uint16_t refill_requirement;
1888 };
1889 
1890 /* Generic Burst mode flag definition, values can be ORed. */
1891 
1892 /**
1893  * If the queues have different burst mode description, this bit will be set
1894  * by PMD, then the application can iterate to retrieve burst description for
1895  * all other queues.
1896  */
1897 #define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1898 
1899 /**
1900  * Ethernet device Rx/Tx queue packet burst mode information structure.
1901  * Used to retrieve information about packet burst mode setting.
1902  */
1903 struct rte_eth_burst_mode {
1904 	uint64_t flags; /**< The ORed values of RTE_ETH_BURST_FLAG_xxx */
1905 
1906 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024 /**< Maximum size for information */
1907 	char info[RTE_ETH_BURST_MODE_INFO_SIZE]; /**< burst mode information */
1908 };
1909 
1910 /** Maximum name length for extended statistics counters */
1911 #define RTE_ETH_XSTATS_NAME_SIZE 64
1912 
1913 /**
1914  * An Ethernet device extended statistic structure
1915  *
1916  * This structure is used by rte_eth_xstats_get() to provide
1917  * statistics that are not provided in the generic *rte_eth_stats*
1918  * structure.
1919  * It maps a name ID, corresponding to an index in the array returned
1920  * by rte_eth_xstats_get_names(), to a statistic value.
1921  */
1922 struct rte_eth_xstat {
1923 	uint64_t id;        /**< The index in xstats name array. */
1924 	uint64_t value;     /**< The statistic counter value. */
1925 };
1926 
1927 /**
1928  * A name element for extended statistics.
1929  *
1930  * An array of this structure is returned by rte_eth_xstats_get_names().
1931  * It lists the names of extended statistics for a PMD. The *rte_eth_xstat*
1932  * structure references these names by their array index.
1933  *
1934  * The xstats should follow a common naming scheme.
1935  * Some names are standardized in rte_stats_strings.
1936  * Examples:
1937  *     - rx_missed_errors
1938  *     - tx_q3_bytes
1939  *     - tx_size_128_to_255_packets
1940  */
1941 struct rte_eth_xstat_name {
1942 	char name[RTE_ETH_XSTATS_NAME_SIZE]; /**< The statistic name. */
1943 };
1944 
1945 #define RTE_ETH_DCB_NUM_TCS    8
1946 #define RTE_ETH_MAX_VMDQ_POOL  64
1947 
1948 /**
1949  * A structure used to get the information of queue and
1950  * TC mapping on both Tx and Rx paths.
1951  */
1952 struct rte_eth_dcb_tc_queue_mapping {
1953 	/** Rx queues assigned to tc per Pool */
1954 	struct {
1955 		uint16_t base;
1956 		uint16_t nb_queue;
1957 	} tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1958 	/** Rx queues assigned to tc per Pool */
1959 	struct {
1960 		uint16_t base;
1961 		uint16_t nb_queue;
1962 	} tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1963 };
1964 
1965 /**
1966  * A structure used to get the information of DCB.
1967  * It includes TC UP mapping and queue TC mapping.
1968  */
1969 struct rte_eth_dcb_info {
1970 	uint8_t nb_tcs;        /**< number of TCs */
1971 	uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
1972 	uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]; /**< Tx BW percentage for each TC */
1973 	/** Rx queues assigned to tc */
1974 	struct rte_eth_dcb_tc_queue_mapping tc_queue;
1975 };
1976 
1977 /**
1978  * This enum indicates the possible Forward Error Correction (FEC) modes
1979  * of an ethdev port.
1980  */
1981 enum rte_eth_fec_mode {
1982 	RTE_ETH_FEC_NOFEC = 0,      /**< FEC is off */
1983 	RTE_ETH_FEC_AUTO,	    /**< FEC autonegotiation modes */
1984 	RTE_ETH_FEC_BASER,          /**< FEC using common algorithm */
1985 	RTE_ETH_FEC_RS,             /**< FEC using RS algorithm */
1986 	RTE_ETH_FEC_LLRS,           /**< FEC using LLRS algorithm */
1987 };
1988 
1989 /* Translate from FEC mode to FEC capa */
1990 #define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
1991 
1992 /* This macro indicates FEC capa mask */
1993 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
1994 
1995 /* A structure used to get capabilities per link speed */
1996 struct rte_eth_fec_capa {
1997 	uint32_t speed; /**< Link speed (see RTE_ETH_SPEED_NUM_*) */
1998 	uint32_t capa;  /**< FEC capabilities bitmask */
1999 };
2000 
2001 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
2002 
2003 /* Macros to check for valid port */
2004 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
2005 	if (!rte_eth_dev_is_valid_port(port_id)) { \
2006 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2007 		return retval; \
2008 	} \
2009 } while (0)
2010 
2011 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
2012 	if (!rte_eth_dev_is_valid_port(port_id)) { \
2013 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
2014 		return; \
2015 	} \
2016 } while (0)
2017 
2018 /**
2019  * Function type used for Rx packet processing packet callbacks.
2020  *
2021  * The callback function is called on Rx with a burst of packets that have
2022  * been received on the given port and queue.
2023  *
2024  * @param port_id
2025  *   The Ethernet port on which Rx is being performed.
2026  * @param queue
2027  *   The queue on the Ethernet port which is being used to receive the packets.
2028  * @param pkts
2029  *   The burst of packets that have just been received.
2030  * @param nb_pkts
2031  *   The number of packets in the burst pointed to by "pkts".
2032  * @param max_pkts
2033  *   The max number of packets that can be stored in the "pkts" array.
2034  * @param user_param
2035  *   The arbitrary user parameter passed in by the application when the callback
2036  *   was originally configured.
2037  * @return
2038  *   The number of packets returned to the user.
2039  */
2040 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
2041 	struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
2042 	void *user_param);
2043 
2044 /**
2045  * Function type used for Tx packet processing packet callbacks.
2046  *
2047  * The callback function is called on Tx with a burst of packets immediately
2048  * before the packets are put onto the hardware queue for transmission.
2049  *
2050  * @param port_id
2051  *   The Ethernet port on which Tx is being performed.
2052  * @param queue
2053  *   The queue on the Ethernet port which is being used to transmit the packets.
2054  * @param pkts
2055  *   The burst of packets that are about to be transmitted.
2056  * @param nb_pkts
2057  *   The number of packets in the burst pointed to by "pkts".
2058  * @param user_param
2059  *   The arbitrary user parameter passed in by the application when the callback
2060  *   was originally configured.
2061  * @return
2062  *   The number of packets to be written to the NIC.
2063  */
2064 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
2065 	struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
2066 
2067 /**
2068  * Possible states of an ethdev port.
2069  */
2070 enum rte_eth_dev_state {
2071 	/** Device is unused before being probed. */
2072 	RTE_ETH_DEV_UNUSED = 0,
2073 	/** Device is attached when allocated in probing. */
2074 	RTE_ETH_DEV_ATTACHED,
2075 	/** Device is in removed state when plug-out is detected. */
2076 	RTE_ETH_DEV_REMOVED,
2077 };
2078 
2079 struct rte_eth_dev_sriov {
2080 	uint8_t active;               /**< SRIOV is active with 16, 32 or 64 pools */
2081 	uint8_t nb_q_per_pool;        /**< Rx queue number per pool */
2082 	uint16_t def_vmdq_idx;        /**< Default pool num used for PF */
2083 	uint16_t def_pool_q_idx;      /**< Default pool queue start reg index */
2084 };
2085 #define RTE_ETH_DEV_SRIOV(dev)         ((dev)->data->sriov)
2086 
2087 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2088 
2089 #define RTE_ETH_DEV_NO_OWNER 0
2090 
2091 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
2092 
2093 struct rte_eth_dev_owner {
2094 	uint64_t id; /**< The owner unique identifier. */
2095 	char name[RTE_ETH_MAX_OWNER_NAME_LEN]; /**< The owner name. */
2096 };
2097 
2098 /**@{@name Device flags
2099  * Flags internally saved in rte_eth_dev_data.dev_flags
2100  * and reported in rte_eth_dev_info.dev_flags.
2101  */
2102 /** PMD supports thread-safe flow operations */
2103 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE  RTE_BIT32(0)
2104 /** Device supports link state interrupt */
2105 #define RTE_ETH_DEV_INTR_LSC              RTE_BIT32(1)
2106 /** Device is a bonding member */
2107 #define RTE_ETH_DEV_BONDING_MEMBER        RTE_BIT32(2)
2108 /** Device supports device removal interrupt */
2109 #define RTE_ETH_DEV_INTR_RMV              RTE_BIT32(3)
2110 /** Device is port representor */
2111 #define RTE_ETH_DEV_REPRESENTOR           RTE_BIT32(4)
2112 /** Device does not support MAC change after started */
2113 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR       RTE_BIT32(5)
2114 /**
2115  * Queue xstats filled automatically by ethdev layer.
2116  * PMDs filling the queue xstats themselves should not set this flag
2117  */
2118 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2119 /**@}*/
2120 
2121 /**
2122  * Iterates over valid ethdev ports owned by a specific owner.
2123  *
2124  * @param port_id
2125  *   The ID of the next possible valid owned port.
2126  * @param	owner_id
2127  *  The owner identifier.
2128  *  RTE_ETH_DEV_NO_OWNER means iterate over all valid ownerless ports.
2129  * @return
2130  *   Next valid port ID owned by owner_id, RTE_MAX_ETHPORTS if there is none.
2131  */
2132 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2133 		const uint64_t owner_id);
2134 
2135 /**
2136  * Macro to iterate over all enabled ethdev ports owned by a specific owner.
2137  */
2138 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2139 	for (p = rte_eth_find_next_owned_by(0, o); \
2140 	     (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2141 	     p = rte_eth_find_next_owned_by(p + 1, o))
2142 
2143 /**
2144  * Iterates over valid ethdev ports.
2145  *
2146  * @param port_id
2147  *   The ID of the next possible valid port.
2148  * @return
2149  *   Next valid port ID, RTE_MAX_ETHPORTS if there is none.
2150  */
2151 uint16_t rte_eth_find_next(uint16_t port_id);
2152 
2153 /**
2154  * Macro to iterate over all enabled and ownerless ethdev ports.
2155  */
2156 #define RTE_ETH_FOREACH_DEV(p) \
2157 	RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2158 
2159 /**
2160  * Iterates over ethdev ports of a specified device.
2161  *
2162  * @param port_id_start
2163  *   The ID of the next possible valid port.
2164  * @param parent
2165  *   The generic device behind the ports to iterate.
2166  * @return
2167  *   Next port ID of the device, possibly port_id_start,
2168  *   RTE_MAX_ETHPORTS if there is none.
2169  */
2170 uint16_t
2171 rte_eth_find_next_of(uint16_t port_id_start,
2172 		const struct rte_device *parent);
2173 
2174 /**
2175  * Macro to iterate over all ethdev ports of a specified device.
2176  *
2177  * @param port_id
2178  *   The ID of the matching port being iterated.
2179  * @param parent
2180  *   The rte_device pointer matching the iterated ports.
2181  */
2182 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2183 	for (port_id = rte_eth_find_next_of(0, parent); \
2184 		port_id < RTE_MAX_ETHPORTS; \
2185 		port_id = rte_eth_find_next_of(port_id + 1, parent))
2186 
2187 /**
2188  * Iterates over sibling ethdev ports (i.e. sharing the same rte_device).
2189  *
2190  * @param port_id_start
2191  *   The ID of the next possible valid sibling port.
2192  * @param ref_port_id
2193  *   The ID of a reference port to compare rte_device with.
2194  * @return
2195  *   Next sibling port ID, possibly port_id_start or ref_port_id itself,
2196  *   RTE_MAX_ETHPORTS if there is none.
2197  */
2198 uint16_t
2199 rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2200 
2201 /**
2202  * Macro to iterate over all ethdev ports sharing the same rte_device
2203  * as the specified port.
2204  * Note: the specified reference port is part of the loop iterations.
2205  *
2206  * @param port_id
2207  *   The ID of the matching port being iterated.
2208  * @param ref_port_id
2209  *   The ID of the port being compared.
2210  */
2211 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2212 	for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2213 		port_id < RTE_MAX_ETHPORTS; \
2214 		port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2215 
2216 /**
2217  * Get a new unique owner identifier.
2218  * An owner identifier is used to owns Ethernet devices by only one DPDK entity
2219  * to avoid multiple management of device by different entities.
2220  *
2221  * @param	owner_id
2222  *   Owner identifier pointer.
2223  * @return
2224  *   Negative errno value on error, 0 on success.
2225  */
2226 int rte_eth_dev_owner_new(uint64_t *owner_id);
2227 
2228 /**
2229  * Set an Ethernet device owner.
2230  *
2231  * @param	port_id
2232  *  The identifier of the port to own.
2233  * @param	owner
2234  *  The owner pointer.
2235  * @return
2236  *  Negative errno value on error, 0 on success.
2237  */
2238 int rte_eth_dev_owner_set(const uint16_t port_id,
2239 		const struct rte_eth_dev_owner *owner);
2240 
2241 /**
2242  * Unset Ethernet device owner to make the device ownerless.
2243  *
2244  * @param	port_id
2245  *  The identifier of port to make ownerless.
2246  * @param	owner_id
2247  *  The owner identifier.
2248  * @return
2249  *  0 on success, negative errno value on error.
2250  */
2251 int rte_eth_dev_owner_unset(const uint16_t port_id,
2252 		const uint64_t owner_id);
2253 
2254 /**
2255  * Remove owner from all Ethernet devices owned by a specific owner.
2256  *
2257  * @param	owner_id
2258  *  The owner identifier.
2259  * @return
2260  *  0 on success, negative errno value on error.
2261  */
2262 int rte_eth_dev_owner_delete(const uint64_t owner_id);
2263 
2264 /**
2265  * Get the owner of an Ethernet device.
2266  *
2267  * @param	port_id
2268  *  The port identifier.
2269  * @param	owner
2270  *  The owner structure pointer to fill.
2271  * @return
2272  *  0 on success, negative errno value on error..
2273  */
2274 int rte_eth_dev_owner_get(const uint16_t port_id,
2275 		struct rte_eth_dev_owner *owner);
2276 
2277 /**
2278  * Get the number of ports which are usable for the application.
2279  *
2280  * These devices must be iterated by using the macro
2281  * ``RTE_ETH_FOREACH_DEV`` or ``RTE_ETH_FOREACH_DEV_OWNED_BY``
2282  * to deal with non-contiguous ranges of devices.
2283  *
2284  * @return
2285  *   The count of available Ethernet devices.
2286  */
2287 uint16_t rte_eth_dev_count_avail(void);
2288 
2289 /**
2290  * Get the total number of ports which are allocated.
2291  *
2292  * Some devices may not be available for the application.
2293  *
2294  * @return
2295  *   The total count of Ethernet devices.
2296  */
2297 uint16_t rte_eth_dev_count_total(void);
2298 
2299 /**
2300  * Convert a numerical speed in Mbps to a bitmap flag that can be used in
2301  * the bitmap link_speeds of the struct rte_eth_conf
2302  *
2303  * @param speed
2304  *   Numerical speed value in Mbps
2305  * @param duplex
2306  *   RTE_ETH_LINK_[HALF/FULL]_DUPLEX (only for 10/100M speeds)
2307  * @return
2308  *   0 if the speed cannot be mapped
2309  */
2310 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2311 
2312 /**
2313  * Get RTE_ETH_RX_OFFLOAD_* flag name.
2314  *
2315  * @param offload
2316  *   Offload flag.
2317  * @return
2318  *   Offload name or 'UNKNOWN' if the flag cannot be recognised.
2319  */
2320 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2321 
2322 /**
2323  * Get RTE_ETH_TX_OFFLOAD_* flag name.
2324  *
2325  * @param offload
2326  *   Offload flag.
2327  * @return
2328  *   Offload name or 'UNKNOWN' if the flag cannot be recognised.
2329  */
2330 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2331 
2332 /**
2333  * @warning
2334  * @b EXPERIMENTAL: this API may change without prior notice.
2335  *
2336  * Get RTE_ETH_DEV_CAPA_* flag name.
2337  *
2338  * @param capability
2339  *   Capability flag.
2340  * @return
2341  *   Capability name or 'UNKNOWN' if the flag cannot be recognized.
2342  */
2343 __rte_experimental
2344 const char *rte_eth_dev_capability_name(uint64_t capability);
2345 
2346 /**
2347  * Configure an Ethernet device.
2348  * This function must be invoked first before any other function in the
2349  * Ethernet API. This function can also be re-invoked when a device is in the
2350  * stopped state.
2351  *
2352  * @param port_id
2353  *   The port identifier of the Ethernet device to configure.
2354  * @param nb_rx_queue
2355  *   The number of receive queues to set up for the Ethernet device.
2356  * @param nb_tx_queue
2357  *   The number of transmit queues to set up for the Ethernet device.
2358  * @param eth_conf
2359  *   The pointer to the configuration data to be used for the Ethernet device.
2360  *   The *rte_eth_conf* structure includes:
2361  *     -  the hardware offload features to activate, with dedicated fields for
2362  *        each statically configurable offload hardware feature provided by
2363  *        Ethernet devices, such as IP checksum or VLAN tag stripping for
2364  *        example.
2365  *        The Rx offload bitfield API is obsolete and will be deprecated.
2366  *        Applications should set the ignore_bitfield_offloads bit on *rxmode*
2367  *        structure and use offloads field to set per-port offloads instead.
2368  *     -  Any offloading set in eth_conf->[rt]xmode.offloads must be within
2369  *        the [rt]x_offload_capa returned from rte_eth_dev_info_get().
2370  *        Any type of device supported offloading set in the input argument
2371  *        eth_conf->[rt]xmode.offloads to rte_eth_dev_configure() is enabled
2372  *        on all queues and it can't be disabled in rte_eth_[rt]x_queue_setup()
2373  *     -  the Receive Side Scaling (RSS) configuration when using multiple Rx
2374  *        queues per port. Any RSS hash function set in eth_conf->rss_conf.rss_hf
2375  *        must be within the flow_type_rss_offloads provided by drivers via
2376  *        rte_eth_dev_info_get() API.
2377  *
2378  *   Embedding all configuration information in a single data structure
2379  *   is the more flexible method that allows the addition of new features
2380  *   without changing the syntax of the API.
2381  * @return
2382  *   - 0: Success, device configured.
2383  *   - <0: Error code returned by the driver configuration function.
2384  */
2385 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2386 		uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2387 
2388 /**
2389  * Check if an Ethernet device was physically removed.
2390  *
2391  * @param port_id
2392  *   The port identifier of the Ethernet device.
2393  * @return
2394  *   1 when the Ethernet device is removed, otherwise 0.
2395  */
2396 int
2397 rte_eth_dev_is_removed(uint16_t port_id);
2398 
2399 /**
2400  * Allocate and set up a receive queue for an Ethernet device.
2401  *
2402  * The function allocates a contiguous block of memory for *nb_rx_desc*
2403  * receive descriptors from a memory zone associated with *socket_id*
2404  * and initializes each receive descriptor with a network buffer allocated
2405  * from the memory pool *mb_pool*.
2406  *
2407  * @param port_id
2408  *   The port identifier of the Ethernet device.
2409  * @param rx_queue_id
2410  *   The index of the receive queue to set up.
2411  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
2412  *   to rte_eth_dev_configure().
2413  * @param nb_rx_desc
2414  *   The number of receive descriptors to allocate for the receive ring.
2415  * @param socket_id
2416  *   The *socket_id* argument is the socket identifier in case of NUMA.
2417  *   The value can be *SOCKET_ID_ANY* if there is no NUMA constraint for
2418  *   the DMA memory allocated for the receive descriptors of the ring.
2419  * @param rx_conf
2420  *   The pointer to the configuration data to be used for the receive queue.
2421  *   NULL value is allowed, in which case default Rx configuration
2422  *   will be used.
2423  *   The *rx_conf* structure contains an *rx_thresh* structure with the values
2424  *   of the Prefetch, Host, and Write-Back threshold registers of the receive
2425  *   ring.
2426  *   In addition it contains the hardware offloads features to activate using
2427  *   the RTE_ETH_RX_OFFLOAD_* flags.
2428  *   If an offloading set in rx_conf->offloads
2429  *   hasn't been set in the input argument eth_conf->rxmode.offloads
2430  *   to rte_eth_dev_configure(), it is a new added offloading, it must be
2431  *   per-queue type and it is enabled for the queue.
2432  *   No need to repeat any bit in rx_conf->offloads which has already been
2433  *   enabled in rte_eth_dev_configure() at port level. An offloading enabled
2434  *   at port level can't be disabled at queue level.
2435  *   The configuration structure also contains the pointer to the array
2436  *   of the receiving buffer segment descriptions, see rx_seg and rx_nseg
2437  *   fields, this extended configuration might be used by split offloads like
2438  *   RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT. If mb_pool is not NULL,
2439  *   the extended configuration fields must be set to NULL and zero.
2440  * @param mb_pool
2441  *   The pointer to the memory pool from which to allocate *rte_mbuf* network
2442  *   memory buffers to populate each descriptor of the receive ring. There are
2443  *   two options to provide Rx buffer configuration:
2444  *   - single pool:
2445  *     mb_pool is not NULL, rx_conf.rx_nseg is 0.
2446  *   - multiple segments description:
2447  *     mb_pool is NULL, rx_conf.rx_seg is not NULL, rx_conf.rx_nseg is not 0.
2448  *     Taken only if flag RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT is set in offloads.
2449  *
2450  * @return
2451  *   - 0: Success, receive queue correctly set up.
2452  *   - -EIO: if device is removed.
2453  *   - -ENODEV: if *port_id* is invalid.
2454  *   - -EINVAL: The memory pool pointer is null or the size of network buffers
2455  *      which can be allocated from this memory pool does not fit the various
2456  *      buffer sizes allowed by the device controller.
2457  *   - -ENOMEM: Unable to allocate the receive ring descriptors or to
2458  *      allocate network memory buffers from the memory pool when
2459  *      initializing receive descriptors.
2460  */
2461 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2462 		uint16_t nb_rx_desc, unsigned int socket_id,
2463 		const struct rte_eth_rxconf *rx_conf,
2464 		struct rte_mempool *mb_pool);
2465 
2466 /**
2467  * @warning
2468  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2469  *
2470  * Allocate and set up a hairpin receive queue for an Ethernet device.
2471  *
2472  * The function set up the selected queue to be used in hairpin.
2473  *
2474  * @param port_id
2475  *   The port identifier of the Ethernet device.
2476  * @param rx_queue_id
2477  *   The index of the receive queue to set up.
2478  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
2479  *   to rte_eth_dev_configure().
2480  * @param nb_rx_desc
2481  *   The number of receive descriptors to allocate for the receive ring.
2482  *   0 means the PMD will use default value.
2483  * @param conf
2484  *   The pointer to the hairpin configuration.
2485  *
2486  * @return
2487  *   - (0) if successful.
2488  *   - (-ENODEV) if *port_id* is invalid.
2489  *   - (-ENOTSUP) if hardware doesn't support.
2490  *   - (-EINVAL) if bad parameter.
2491  *   - (-ENOMEM) if unable to allocate the resources.
2492  */
2493 __rte_experimental
2494 int rte_eth_rx_hairpin_queue_setup
2495 	(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2496 	 const struct rte_eth_hairpin_conf *conf);
2497 
2498 /**
2499  * Allocate and set up a transmit queue for an Ethernet device.
2500  *
2501  * @param port_id
2502  *   The port identifier of the Ethernet device.
2503  * @param tx_queue_id
2504  *   The index of the transmit queue to set up.
2505  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
2506  *   to rte_eth_dev_configure().
2507  * @param nb_tx_desc
2508  *   The number of transmit descriptors to allocate for the transmit ring.
2509  * @param socket_id
2510  *   The *socket_id* argument is the socket identifier in case of NUMA.
2511  *   Its value can be *SOCKET_ID_ANY* if there is no NUMA constraint for
2512  *   the DMA memory allocated for the transmit descriptors of the ring.
2513  * @param tx_conf
2514  *   The pointer to the configuration data to be used for the transmit queue.
2515  *   NULL value is allowed, in which case default Tx configuration
2516  *   will be used.
2517  *   The *tx_conf* structure contains the following data:
2518  *   - The *tx_thresh* structure with the values of the Prefetch, Host, and
2519  *     Write-Back threshold registers of the transmit ring.
2520  *     When setting Write-Back threshold to the value greater then zero,
2521  *     *tx_rs_thresh* value should be explicitly set to one.
2522  *   - The *tx_free_thresh* value indicates the [minimum] number of network
2523  *     buffers that must be pending in the transmit ring to trigger their
2524  *     [implicit] freeing by the driver transmit function.
2525  *   - The *tx_rs_thresh* value indicates the [minimum] number of transmit
2526  *     descriptors that must be pending in the transmit ring before setting the
2527  *     RS bit on a descriptor by the driver transmit function.
2528  *     The *tx_rs_thresh* value should be less or equal then
2529  *     *tx_free_thresh* value, and both of them should be less then
2530  *     *nb_tx_desc* - 3.
2531  *   - The *offloads* member contains Tx offloads to be enabled.
2532  *     If an offloading set in tx_conf->offloads
2533  *     hasn't been set in the input argument eth_conf->txmode.offloads
2534  *     to rte_eth_dev_configure(), it is a new added offloading, it must be
2535  *     per-queue type and it is enabled for the queue.
2536  *     No need to repeat any bit in tx_conf->offloads which has already been
2537  *     enabled in rte_eth_dev_configure() at port level. An offloading enabled
2538  *     at port level can't be disabled at queue level.
2539  *
2540  *     Note that setting *tx_free_thresh* or *tx_rs_thresh* value to 0 forces
2541  *     the transmit function to use default values.
2542  * @return
2543  *   - 0: Success, the transmit queue is correctly set up.
2544  *   - -ENOMEM: Unable to allocate the transmit ring descriptors.
2545  */
2546 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2547 		uint16_t nb_tx_desc, unsigned int socket_id,
2548 		const struct rte_eth_txconf *tx_conf);
2549 
2550 /**
2551  * @warning
2552  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2553  *
2554  * Allocate and set up a transmit hairpin queue for an Ethernet device.
2555  *
2556  * @param port_id
2557  *   The port identifier of the Ethernet device.
2558  * @param tx_queue_id
2559  *   The index of the transmit queue to set up.
2560  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
2561  *   to rte_eth_dev_configure().
2562  * @param nb_tx_desc
2563  *   The number of transmit descriptors to allocate for the transmit ring.
2564  *   0 to set default PMD value.
2565  * @param conf
2566  *   The hairpin configuration.
2567  *
2568  * @return
2569  *   - (0) if successful.
2570  *   - (-ENODEV) if *port_id* is invalid.
2571  *   - (-ENOTSUP) if hardware doesn't support.
2572  *   - (-EINVAL) if bad parameter.
2573  *   - (-ENOMEM) if unable to allocate the resources.
2574  */
2575 __rte_experimental
2576 int rte_eth_tx_hairpin_queue_setup
2577 	(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2578 	 const struct rte_eth_hairpin_conf *conf);
2579 
2580 /**
2581  * @warning
2582  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2583  *
2584  * Get all the hairpin peer Rx / Tx ports of the current port.
2585  * The caller should ensure that the array is large enough to save the ports
2586  * list.
2587  *
2588  * @param port_id
2589  *   The port identifier of the Ethernet device.
2590  * @param peer_ports
2591  *   Pointer to the array to store the peer ports list.
2592  * @param len
2593  *   Length of the array to store the port identifiers.
2594  * @param direction
2595  *   Current port to peer port direction
2596  *   positive - current used as Tx to get all peer Rx ports.
2597  *   zero - current used as Rx to get all peer Tx ports.
2598  *
2599  * @return
2600  *   - (0 or positive) actual peer ports number.
2601  *   - (-EINVAL) if bad parameter.
2602  *   - (-ENODEV) if *port_id* invalid
2603  *   - (-ENOTSUP) if hardware doesn't support.
2604  *   - Others detailed errors from PMDs.
2605  */
2606 __rte_experimental
2607 int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2608 				   size_t len, uint32_t direction);
2609 
2610 /**
2611  * @warning
2612  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2613  *
2614  * Bind all hairpin Tx queues of one port to the Rx queues of the peer port.
2615  * It is only allowed to call this function after all hairpin queues are
2616  * configured properly and the devices are in started state.
2617  *
2618  * @param tx_port
2619  *   The identifier of the Tx port.
2620  * @param rx_port
2621  *   The identifier of peer Rx port.
2622  *   RTE_MAX_ETHPORTS is allowed for the traversal of all devices.
2623  *   Rx port ID could have the same value as Tx port ID.
2624  *
2625  * @return
2626  *   - (0) if successful.
2627  *   - (-ENODEV) if Tx port ID is invalid.
2628  *   - (-EBUSY) if device is not in started state.
2629  *   - (-ENOTSUP) if hardware doesn't support.
2630  *   - Others detailed errors from PMDs.
2631  */
2632 __rte_experimental
2633 int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2634 
2635 /**
2636  * @warning
2637  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2638  *
2639  * Unbind all hairpin Tx queues of one port from the Rx queues of the peer port.
2640  * This should be called before closing the Tx or Rx devices, if the bind
2641  * function is called before.
2642  * After unbinding the hairpin ports pair, it is allowed to bind them again.
2643  * Changing queues configuration should be after stopping the device(s).
2644  *
2645  * @param tx_port
2646  *   The identifier of the Tx port.
2647  * @param rx_port
2648  *   The identifier of peer Rx port.
2649  *   RTE_MAX_ETHPORTS is allowed for traversal of all devices.
2650  *   Rx port ID could have the same value as Tx port ID.
2651  *
2652  * @return
2653  *   - (0) if successful.
2654  *   - (-ENODEV) if Tx port ID is invalid.
2655  *   - (-EBUSY) if device is in stopped state.
2656  *   - (-ENOTSUP) if hardware doesn't support.
2657  *   - Others detailed errors from PMDs.
2658  */
2659 __rte_experimental
2660 int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2661 
2662 /**
2663  * @warning
2664  * @b EXPERIMENTAL: this API may change without prior notice.
2665  *
2666  *  Get the number of aggregated ports of the DPDK port (specified with port_id).
2667  *  It is used when multiple ports are aggregated into a single one.
2668  *
2669  *  For the regular physical port doesn't have aggregated ports,
2670  *  the number of aggregated ports is reported as 0.
2671  *
2672  * @param port_id
2673  *   The port identifier of the Ethernet device.
2674  * @return
2675  *   - (>=0) the number of aggregated port if success.
2676  */
2677 __rte_experimental
2678 int rte_eth_dev_count_aggr_ports(uint16_t port_id);
2679 
2680 /**
2681  * @warning
2682  * @b EXPERIMENTAL: this API may change without prior notice.
2683  *
2684  *  Map a Tx queue with an aggregated port of the DPDK port (specified with port_id).
2685  *  When multiple ports are aggregated into a single one,
2686  *  it allows to choose which port to use for Tx via a queue.
2687  *
2688  *  The application should use rte_eth_dev_map_aggr_tx_affinity()
2689  *  after rte_eth_dev_configure(), rte_eth_tx_queue_setup(), and
2690  *  before rte_eth_dev_start().
2691  *
2692  * @param port_id
2693  *   The identifier of the port used in rte_eth_tx_burst().
2694  * @param tx_queue_id
2695  *   The index of the transmit queue used in rte_eth_tx_burst().
2696  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
2697  *   to rte_eth_dev_configure().
2698  * @param affinity
2699  *   The number of the aggregated port.
2700  *   Value 0 means no affinity and traffic could be routed to any aggregated port.
2701  *   The first aggregated port is number 1 and so on.
2702  *   The maximum number is given by rte_eth_dev_count_aggr_ports().
2703  *
2704  * @return
2705  *   Zero if successful. Non-zero otherwise.
2706  */
2707 __rte_experimental
2708 int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
2709 				     uint8_t affinity);
2710 
2711 /**
2712  * Return the NUMA socket to which an Ethernet device is connected
2713  *
2714  * @param port_id
2715  *   The port identifier of the Ethernet device
2716  * @return
2717  *   - The NUMA socket ID which the Ethernet device is connected to.
2718  *   - -1 (which translates to SOCKET_ID_ANY) if the socket could not be
2719  *     determined. rte_errno is then set to:
2720  *     - EINVAL is the port_id is invalid,
2721  *     - 0 is the socket could not be determined,
2722  */
2723 int rte_eth_dev_socket_id(uint16_t port_id);
2724 
2725 /**
2726  * Check if port_id of device is attached
2727  *
2728  * @param port_id
2729  *   The port identifier of the Ethernet device
2730  * @return
2731  *   - 0 if port is out of range or not attached
2732  *   - 1 if device is attached
2733  */
2734 int rte_eth_dev_is_valid_port(uint16_t port_id);
2735 
2736 /**
2737  * @warning
2738  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice.
2739  *
2740  * Check if Rx queue is valid.
2741  * If the queue has been setup, it is considered valid.
2742  *
2743  * @param port_id
2744  *   The port identifier of the Ethernet device.
2745  * @param queue_id
2746  *   The index of the receive queue.
2747  * @return
2748  *   - -ENODEV: if port_id is invalid.
2749  *   - -EINVAL: if queue_id is out of range or queue has not been setup.
2750  *   - 0 if Rx queue is valid.
2751  */
2752 __rte_experimental
2753 int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2754 
2755 /**
2756  * @warning
2757  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice.
2758  *
2759  * Check if Tx queue is valid.
2760  * If the queue has been setup, it is considered valid.
2761  *
2762  * @param port_id
2763  *   The port identifier of the Ethernet device.
2764  * @param queue_id
2765  *   The index of the transmit queue.
2766  * @return
2767  *   - -ENODEV: if port_id is invalid.
2768  *   - -EINVAL: if queue_id is out of range or queue has not been setup.
2769  *   - 0 if Tx queue is valid.
2770  */
2771 __rte_experimental
2772 int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id);
2773 
2774 /**
2775  * Start specified Rx queue of a port. It is used when rx_deferred_start
2776  * flag of the specified queue is true.
2777  *
2778  * @param port_id
2779  *   The port identifier of the Ethernet device
2780  * @param rx_queue_id
2781  *   The index of the Rx queue to update the ring.
2782  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
2783  *   to rte_eth_dev_configure().
2784  * @return
2785  *   - 0: Success, the receive queue is started.
2786  *   - -ENODEV: if *port_id* is invalid.
2787  *   - -EINVAL: The queue_id out of range or belong to hairpin.
2788  *   - -EIO: if device is removed.
2789  *   - -ENOTSUP: The function not supported in PMD.
2790  */
2791 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2792 
2793 /**
2794  * Stop specified Rx queue of a port
2795  *
2796  * @param port_id
2797  *   The port identifier of the Ethernet device
2798  * @param rx_queue_id
2799  *   The index of the Rx queue to update the ring.
2800  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
2801  *   to rte_eth_dev_configure().
2802  * @return
2803  *   - 0: Success, the receive queue is stopped.
2804  *   - -ENODEV: if *port_id* is invalid.
2805  *   - -EINVAL: The queue_id out of range or belong to hairpin.
2806  *   - -EIO: if device is removed.
2807  *   - -ENOTSUP: The function not supported in PMD.
2808  */
2809 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2810 
2811 /**
2812  * Start Tx for specified queue of a port. It is used when tx_deferred_start
2813  * flag of the specified queue is true.
2814  *
2815  * @param port_id
2816  *   The port identifier of the Ethernet device
2817  * @param tx_queue_id
2818  *   The index of the Tx queue to update the ring.
2819  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
2820  *   to rte_eth_dev_configure().
2821  * @return
2822  *   - 0: Success, the transmit queue is started.
2823  *   - -ENODEV: if *port_id* is invalid.
2824  *   - -EINVAL: The queue_id out of range or belong to hairpin.
2825  *   - -EIO: if device is removed.
2826  *   - -ENOTSUP: The function not supported in PMD.
2827  */
2828 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2829 
2830 /**
2831  * Stop specified Tx queue of a port
2832  *
2833  * @param port_id
2834  *   The port identifier of the Ethernet device
2835  * @param tx_queue_id
2836  *   The index of the Tx queue to update the ring.
2837  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
2838  *   to rte_eth_dev_configure().
2839  * @return
2840  *   - 0: Success, the transmit queue is stopped.
2841  *   - -ENODEV: if *port_id* is invalid.
2842  *   - -EINVAL: The queue_id out of range or belong to hairpin.
2843  *   - -EIO: if device is removed.
2844  *   - -ENOTSUP: The function not supported in PMD.
2845  */
2846 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2847 
2848 /**
2849  * Start an Ethernet device.
2850  *
2851  * The device start step is the last one and consists of setting the configured
2852  * offload features and in starting the transmit and the receive units of the
2853  * device.
2854  *
2855  * Device RTE_ETH_DEV_NOLIVE_MAC_ADDR flag causes MAC address to be set before
2856  * PMD port start callback function is invoked.
2857  *
2858  * All device queues (except form deferred start queues) status should be
2859  * `RTE_ETH_QUEUE_STATE_STARTED` after start.
2860  *
2861  * On success, all basic functions exported by the Ethernet API (link status,
2862  * receive/transmit, and so on) can be invoked.
2863  *
2864  * @param port_id
2865  *   The port identifier of the Ethernet device.
2866  * @return
2867  *   - 0: Success, Ethernet device started.
2868  *   - -EAGAIN: If start operation must be retried.
2869  *   - <0: Error code of the driver device start function.
2870  */
2871 int rte_eth_dev_start(uint16_t port_id);
2872 
2873 /**
2874  * Stop an Ethernet device. The device can be restarted with a call to
2875  * rte_eth_dev_start()
2876  *
2877  * All device queues status should be `RTE_ETH_QUEUE_STATE_STOPPED` after stop.
2878  *
2879  * @param port_id
2880  *   The port identifier of the Ethernet device.
2881  * @return
2882  *   - 0: Success, Ethernet device stopped.
2883  *   - -EBUSY: If stopping the port is not allowed in current state.
2884  *   - <0: Error code of the driver device stop function.
2885  */
2886 int rte_eth_dev_stop(uint16_t port_id);
2887 
2888 /**
2889  * Link up an Ethernet device.
2890  *
2891  * Set device link up will re-enable the device Rx/Tx
2892  * functionality after it is previously set device linked down.
2893  *
2894  * @param port_id
2895  *   The port identifier of the Ethernet device.
2896  * @return
2897  *   - 0: Success, Ethernet device linked up.
2898  *   - <0: Error code of the driver device link up function.
2899  */
2900 int rte_eth_dev_set_link_up(uint16_t port_id);
2901 
2902 /**
2903  * Link down an Ethernet device.
2904  * The device Rx/Tx functionality will be disabled if success,
2905  * and it can be re-enabled with a call to
2906  * rte_eth_dev_set_link_up()
2907  *
2908  * @param port_id
2909  *   The port identifier of the Ethernet device.
2910  */
2911 int rte_eth_dev_set_link_down(uint16_t port_id);
2912 
2913 /**
2914  * Close a stopped Ethernet device. The device cannot be restarted!
2915  * The function frees all port resources.
2916  *
2917  * @param port_id
2918  *   The port identifier of the Ethernet device.
2919  * @return
2920  *   - Zero if the port is closed successfully.
2921  *   - Negative if something went wrong.
2922  */
2923 int rte_eth_dev_close(uint16_t port_id);
2924 
2925 /**
2926  * Reset a Ethernet device and keep its port ID.
2927  *
2928  * When a port has to be reset passively, the DPDK application can invoke
2929  * this function. For example when a PF is reset, all its VFs should also
2930  * be reset. Normally a DPDK application can invoke this function when
2931  * RTE_ETH_EVENT_INTR_RESET event is detected, but can also use it to start
2932  * a port reset in other circumstances.
2933  *
2934  * When this function is called, it first stops the port and then calls the
2935  * PMD specific dev_uninit( ) and dev_init( ) to return the port to initial
2936  * state, in which no Tx and Rx queues are setup, as if the port has been
2937  * reset and not started. The port keeps the port ID it had before the
2938  * function call.
2939  *
2940  * After calling rte_eth_dev_reset( ), the application should use
2941  * rte_eth_dev_configure( ), rte_eth_rx_queue_setup( ),
2942  * rte_eth_tx_queue_setup( ), and rte_eth_dev_start( )
2943  * to reconfigure the device as appropriate.
2944  *
2945  * Note: To avoid unexpected behavior, the application should stop calling
2946  * Tx and Rx functions before calling rte_eth_dev_reset( ). For thread
2947  * safety, all these controlling functions should be called from the same
2948  * thread.
2949  *
2950  * @param port_id
2951  *   The port identifier of the Ethernet device.
2952  *
2953  * @return
2954  *   - (0) if successful.
2955  *   - (-ENODEV) if *port_id* is invalid.
2956  *   - (-ENOTSUP) if hardware doesn't support this function.
2957  *   - (-EPERM) if not ran from the primary process.
2958  *   - (-EIO) if re-initialisation failed or device is removed.
2959  *   - (-ENOMEM) if the reset failed due to OOM.
2960  *   - (-EAGAIN) if the reset temporarily failed and should be retried later.
2961  */
2962 int rte_eth_dev_reset(uint16_t port_id);
2963 
2964 /**
2965  * Enable receipt in promiscuous mode for an Ethernet device.
2966  *
2967  * @param port_id
2968  *   The port identifier of the Ethernet device.
2969  * @return
2970  *   - (0) if successful.
2971  *   - (-ENOTSUP) if support for promiscuous_enable() does not exist
2972  *     for the device.
2973  *   - (-ENODEV) if *port_id* invalid.
2974  */
2975 int rte_eth_promiscuous_enable(uint16_t port_id);
2976 
2977 /**
2978  * Disable receipt in promiscuous mode for an Ethernet device.
2979  *
2980  * @param port_id
2981  *   The port identifier of the Ethernet device.
2982  * @return
2983  *   - (0) if successful.
2984  *   - (-ENOTSUP) if support for promiscuous_disable() does not exist
2985  *     for the device.
2986  *   - (-ENODEV) if *port_id* invalid.
2987  */
2988 int rte_eth_promiscuous_disable(uint16_t port_id);
2989 
2990 /**
2991  * Return the value of promiscuous mode for an Ethernet device.
2992  *
2993  * @param port_id
2994  *   The port identifier of the Ethernet device.
2995  * @return
2996  *   - (1) if promiscuous is enabled
2997  *   - (0) if promiscuous is disabled.
2998  *   - (-1) on error
2999  */
3000 int rte_eth_promiscuous_get(uint16_t port_id);
3001 
3002 /**
3003  * Enable the receipt of any multicast frame by an Ethernet device.
3004  *
3005  * @param port_id
3006  *   The port identifier of the Ethernet device.
3007  * @return
3008  *   - (0) if successful.
3009  *   - (-ENOTSUP) if support for allmulticast_enable() does not exist
3010  *     for the device.
3011  *   - (-ENODEV) if *port_id* invalid.
3012  */
3013 int rte_eth_allmulticast_enable(uint16_t port_id);
3014 
3015 /**
3016  * Disable the receipt of all multicast frames by an Ethernet device.
3017  *
3018  * @param port_id
3019  *   The port identifier of the Ethernet device.
3020  * @return
3021  *   - (0) if successful.
3022  *   - (-ENOTSUP) if support for allmulticast_disable() does not exist
3023  *     for the device.
3024  *   - (-ENODEV) if *port_id* invalid.
3025  */
3026 int rte_eth_allmulticast_disable(uint16_t port_id);
3027 
3028 /**
3029  * Return the value of allmulticast mode for an Ethernet device.
3030  *
3031  * @param port_id
3032  *   The port identifier of the Ethernet device.
3033  * @return
3034  *   - (1) if allmulticast is enabled
3035  *   - (0) if allmulticast is disabled.
3036  *   - (-1) on error
3037  */
3038 int rte_eth_allmulticast_get(uint16_t port_id);
3039 
3040 /**
3041  * Retrieve the link status (up/down), the duplex mode (half/full),
3042  * the negotiation (auto/fixed), and if available, the speed (Mbps).
3043  *
3044  * It might need to wait up to 9 seconds.
3045  * @see rte_eth_link_get_nowait.
3046  *
3047  * @param port_id
3048  *   The port identifier of the Ethernet device.
3049  * @param link
3050  *   Link information written back.
3051  * @return
3052  *   - (0) if successful.
3053  *   - (-ENOTSUP) if the function is not supported in PMD.
3054  *   - (-ENODEV) if *port_id* invalid.
3055  *   - (-EINVAL) if bad parameter.
3056  */
3057 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
3058 
3059 /**
3060  * Retrieve the link status (up/down), the duplex mode (half/full),
3061  * the negotiation (auto/fixed), and if available, the speed (Mbps).
3062  *
3063  * @param port_id
3064  *   The port identifier of the Ethernet device.
3065  * @param link
3066  *   Link information written back.
3067  * @return
3068  *   - (0) if successful.
3069  *   - (-ENOTSUP) if the function is not supported in PMD.
3070  *   - (-ENODEV) if *port_id* invalid.
3071  *   - (-EINVAL) if bad parameter.
3072  */
3073 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
3074 
3075 /**
3076  * @warning
3077  * @b EXPERIMENTAL: this API may change without prior notice.
3078  *
3079  * The function converts a link_speed to a string. It handles all special
3080  * values like unknown or none speed.
3081  *
3082  * @param link_speed
3083  *   link_speed of rte_eth_link struct
3084  * @return
3085  *   Link speed in textual format. It's pointer to immutable memory.
3086  *   No free is required.
3087  */
3088 __rte_experimental
3089 const char *rte_eth_link_speed_to_str(uint32_t link_speed);
3090 
3091 /**
3092  * @warning
3093  * @b EXPERIMENTAL: this API may change without prior notice.
3094  *
3095  * The function converts a rte_eth_link struct representing a link status to
3096  * a string.
3097  *
3098  * @param str
3099  *   A pointer to a string to be filled with textual representation of
3100  *   device status. At least RTE_ETH_LINK_MAX_STR_LEN bytes should be allocated to
3101  *   store default link status text.
3102  * @param len
3103  *   Length of available memory at 'str' string.
3104  * @param eth_link
3105  *   Link status returned by rte_eth_link_get function
3106  * @return
3107  *   Number of bytes written to str array or -EINVAL if bad parameter.
3108  */
3109 __rte_experimental
3110 int rte_eth_link_to_str(char *str, size_t len,
3111 			const struct rte_eth_link *eth_link);
3112 
3113 /**
3114  * Retrieve the general I/O statistics of an Ethernet device.
3115  *
3116  * @param port_id
3117  *   The port identifier of the Ethernet device.
3118  * @param stats
3119  *   A pointer to a structure of type *rte_eth_stats* to be filled with
3120  *   the values of device counters for the following set of statistics:
3121  *   - *ipackets* with the total of successfully received packets.
3122  *   - *opackets* with the total of successfully transmitted packets.
3123  *   - *ibytes*   with the total of successfully received bytes.
3124  *   - *obytes*   with the total of successfully transmitted bytes.
3125  *   - *ierrors*  with the total of erroneous received packets.
3126  *   - *oerrors*  with the total of failed transmitted packets.
3127  * @return
3128  *   Zero if successful. Non-zero otherwise.
3129  */
3130 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
3131 
3132 /**
3133  * Reset the general I/O statistics of an Ethernet device.
3134  *
3135  * @param port_id
3136  *   The port identifier of the Ethernet device.
3137  * @return
3138  *   - (0) if device notified to reset stats.
3139  *   - (-ENOTSUP) if hardware doesn't support.
3140  *   - (-ENODEV) if *port_id* invalid.
3141  *   - (<0): Error code of the driver stats reset function.
3142  */
3143 int rte_eth_stats_reset(uint16_t port_id);
3144 
3145 /**
3146  * Retrieve names of extended statistics of an Ethernet device.
3147  *
3148  * There is an assumption that 'xstat_names' and 'xstats' arrays are matched
3149  * by array index:
3150  *  xstats_names[i].name => xstats[i].value
3151  *
3152  * And the array index is same with id field of 'struct rte_eth_xstat':
3153  *  xstats[i].id == i
3154  *
3155  * This assumption makes key-value pair matching less flexible but simpler.
3156  *
3157  * @param port_id
3158  *   The port identifier of the Ethernet device.
3159  * @param xstats_names
3160  *   An rte_eth_xstat_name array of at least *size* elements to
3161  *   be filled. If set to NULL, the function returns the required number
3162  *   of elements.
3163  * @param size
3164  *   The size of the xstats_names array (number of elements).
3165  * @return
3166  *   - A positive value lower or equal to size: success. The return value
3167  *     is the number of entries filled in the stats table.
3168  *   - A positive value higher than size: error, the given statistics table
3169  *     is too small. The return value corresponds to the size that should
3170  *     be given to succeed. The entries in the table are not valid and
3171  *     shall not be used by the caller.
3172  *   - A negative value on error (invalid port ID).
3173  */
3174 int rte_eth_xstats_get_names(uint16_t port_id,
3175 		struct rte_eth_xstat_name *xstats_names,
3176 		unsigned int size);
3177 
3178 /**
3179  * Retrieve extended statistics of an Ethernet device.
3180  *
3181  * There is an assumption that 'xstat_names' and 'xstats' arrays are matched
3182  * by array index:
3183  *  xstats_names[i].name => xstats[i].value
3184  *
3185  * And the array index is same with id field of 'struct rte_eth_xstat':
3186  *  xstats[i].id == i
3187  *
3188  * This assumption makes key-value pair matching less flexible but simpler.
3189  *
3190  * @param port_id
3191  *   The port identifier of the Ethernet device.
3192  * @param xstats
3193  *   A pointer to a table of structure of type *rte_eth_xstat*
3194  *   to be filled with device statistics ids and values.
3195  *   This parameter can be set to NULL if and only if n is 0.
3196  * @param n
3197  *   The size of the xstats array (number of elements).
3198  *   If lower than the required number of elements, the function returns
3199  *   the required number of elements.
3200  *   If equal to zero, the xstats must be NULL, the function returns the
3201  *   required number of elements.
3202  * @return
3203  *   - A positive value lower or equal to n: success. The return value
3204  *     is the number of entries filled in the stats table.
3205  *   - A positive value higher than n: error, the given statistics table
3206  *     is too small. The return value corresponds to the size that should
3207  *     be given to succeed. The entries in the table are not valid and
3208  *     shall not be used by the caller.
3209  *   - A negative value on error (invalid port ID).
3210  */
3211 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3212 		unsigned int n);
3213 
3214 /**
3215  * Retrieve names of extended statistics of an Ethernet device.
3216  *
3217  * @param port_id
3218  *   The port identifier of the Ethernet device.
3219  * @param xstats_names
3220  *   Array to be filled in with names of requested device statistics.
3221  *   Must not be NULL if @p ids are specified (not NULL).
3222  * @param size
3223  *   Number of elements in @p xstats_names array (if not NULL) and in
3224  *   @p ids array (if not NULL). Must be 0 if both array pointers are NULL.
3225  * @param ids
3226  *   IDs array given by app to retrieve specific statistics. May be NULL to
3227  *   retrieve names of all available statistics or, if @p xstats_names is
3228  *   NULL as well, just the number of available statistics.
3229  * @return
3230  *   - A positive value lower or equal to size: success. The return value
3231  *     is the number of entries filled in the stats table.
3232  *   - A positive value higher than size: success. The given statistics table
3233  *     is too small. The return value corresponds to the size that should
3234  *     be given to succeed. The entries in the table are not valid and
3235  *     shall not be used by the caller.
3236  *   - A negative value on error.
3237  */
3238 int
3239 rte_eth_xstats_get_names_by_id(uint16_t port_id,
3240 	struct rte_eth_xstat_name *xstats_names, unsigned int size,
3241 	uint64_t *ids);
3242 
3243 /**
3244  * Retrieve extended statistics of an Ethernet device.
3245  *
3246  * @param port_id
3247  *   The port identifier of the Ethernet device.
3248  * @param ids
3249  *   IDs array given by app to retrieve specific statistics. May be NULL to
3250  *   retrieve all available statistics or, if @p values is NULL as well,
3251  *   just the number of available statistics.
3252  * @param values
3253  *   Array to be filled in with requested device statistics.
3254  *   Must not be NULL if ids are specified (not NULL).
3255  * @param size
3256  *   Number of elements in @p values array (if not NULL) and in @p ids
3257  *   array (if not NULL). Must be 0 if both array pointers are NULL.
3258  * @return
3259  *   - A positive value lower or equal to size: success. The return value
3260  *     is the number of entries filled in the stats table.
3261  *   - A positive value higher than size: success: The given statistics table
3262  *     is too small. The return value corresponds to the size that should
3263  *     be given to succeed. The entries in the table are not valid and
3264  *     shall not be used by the caller.
3265  *   - A negative value on error.
3266  */
3267 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3268 			     uint64_t *values, unsigned int size);
3269 
3270 /**
3271  * Gets the ID of a statistic from its name.
3272  *
3273  * This function searches for the statistics using string compares, and
3274  * as such should not be used on the fast-path. For fast-path retrieval of
3275  * specific statistics, store the ID as provided in *id* from this function,
3276  * and pass the ID to rte_eth_xstats_get()
3277  *
3278  * @param port_id The port to look up statistics from
3279  * @param xstat_name The name of the statistic to return
3280  * @param[out] id A pointer to an app-supplied uint64_t which should be
3281  *                set to the ID of the stat if the stat exists.
3282  * @return
3283  *    0 on success
3284  *    -ENODEV for invalid port_id,
3285  *    -EIO if device is removed,
3286  *    -EINVAL if the xstat_name doesn't exist in port_id
3287  *    -ENOMEM if bad parameter.
3288  */
3289 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3290 		uint64_t *id);
3291 
3292 /**
3293  * Reset extended statistics of an Ethernet device.
3294  *
3295  * @param port_id
3296  *   The port identifier of the Ethernet device.
3297  * @return
3298  *   - (0) if device notified to reset extended stats.
3299  *   - (-ENOTSUP) if pmd doesn't support both
3300  *     extended stats and basic stats reset.
3301  *   - (-ENODEV) if *port_id* invalid.
3302  *   - (<0): Error code of the driver xstats reset function.
3303  */
3304 int rte_eth_xstats_reset(uint16_t port_id);
3305 
3306 /**
3307  *  Set a mapping for the specified transmit queue to the specified per-queue
3308  *  statistics counter.
3309  *
3310  * @param port_id
3311  *   The port identifier of the Ethernet device.
3312  * @param tx_queue_id
3313  *   The index of the transmit queue for which a queue stats mapping is required.
3314  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
3315  *   to rte_eth_dev_configure().
3316  * @param stat_idx
3317  *   The per-queue packet statistics functionality number that the transmit
3318  *   queue is to be assigned.
3319  *   The value must be in the range [0, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1].
3320  *   Max RTE_ETHDEV_QUEUE_STAT_CNTRS being 256.
3321  * @return
3322  *   Zero if successful. Non-zero otherwise.
3323  */
3324 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
3325 		uint16_t tx_queue_id, uint8_t stat_idx);
3326 
3327 /**
3328  *  Set a mapping for the specified receive queue to the specified per-queue
3329  *  statistics counter.
3330  *
3331  * @param port_id
3332  *   The port identifier of the Ethernet device.
3333  * @param rx_queue_id
3334  *   The index of the receive queue for which a queue stats mapping is required.
3335  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
3336  *   to rte_eth_dev_configure().
3337  * @param stat_idx
3338  *   The per-queue packet statistics functionality number that the receive
3339  *   queue is to be assigned.
3340  *   The value must be in the range [0, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1].
3341  *   Max RTE_ETHDEV_QUEUE_STAT_CNTRS being 256.
3342  * @return
3343  *   Zero if successful. Non-zero otherwise.
3344  */
3345 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
3346 					   uint16_t rx_queue_id,
3347 					   uint8_t stat_idx);
3348 
3349 /**
3350  * Retrieve the Ethernet address of an Ethernet device.
3351  *
3352  * @param port_id
3353  *   The port identifier of the Ethernet device.
3354  * @param mac_addr
3355  *   A pointer to a structure of type *ether_addr* to be filled with
3356  *   the Ethernet address of the Ethernet device.
3357  * @return
3358  *   - (0) if successful
3359  *   - (-ENODEV) if *port_id* invalid.
3360  *   - (-EINVAL) if bad parameter.
3361  */
3362 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3363 
3364 /**
3365  * @warning
3366  * @b EXPERIMENTAL: this API may change without prior notice
3367  *
3368  * Retrieve the Ethernet addresses of an Ethernet device.
3369  *
3370  * @param port_id
3371  *   The port identifier of the Ethernet device.
3372  * @param ma
3373  *   A pointer to an array of structures of type *ether_addr* to be filled with
3374  *   the Ethernet addresses of the Ethernet device.
3375  * @param num
3376  *   Number of elements in the @p ma array.
3377  *   Note that  rte_eth_dev_info::max_mac_addrs can be used to retrieve
3378  *   max number of Ethernet addresses for given port.
3379  * @return
3380  *   - number of retrieved addresses if successful
3381  *   - (-ENODEV) if *port_id* invalid.
3382  *   - (-EINVAL) if bad parameter.
3383  */
3384 __rte_experimental
3385 int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3386 	unsigned int num);
3387 
3388 /**
3389  * Retrieve the contextual information of an Ethernet device.
3390  *
3391  * This function returns the Ethernet device information based
3392  * on the values stored internally in the device specific data.
3393  * For example: number of queues, descriptor limits, device
3394  * capabilities and offload flags.
3395  *
3396  * @param port_id
3397  *   The port identifier of the Ethernet device.
3398  * @param dev_info
3399  *   A pointer to a structure of type *rte_eth_dev_info* to be filled with
3400  *   the contextual information of the Ethernet device.
3401  * @return
3402  *   - (0) if successful.
3403  *   - (-ENOTSUP) if support for dev_infos_get() does not exist for the device.
3404  *   - (-ENODEV) if *port_id* invalid.
3405  *   - (-EINVAL) if bad parameter.
3406  */
3407 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
3408 
3409 /**
3410  * @warning
3411  * @b EXPERIMENTAL: this API may change without prior notice.
3412  *
3413  * Retrieve the configuration of an Ethernet device.
3414  *
3415  * @param port_id
3416  *   The port identifier of the Ethernet device.
3417  * @param dev_conf
3418  *   Location for Ethernet device configuration to be filled in.
3419  * @return
3420  *   - (0) if successful.
3421  *   - (-ENODEV) if *port_id* invalid.
3422  *   - (-EINVAL) if bad parameter.
3423  */
3424 __rte_experimental
3425 int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf);
3426 
3427 /**
3428  * Retrieve the firmware version of a device.
3429  *
3430  * @param port_id
3431  *   The port identifier of the device.
3432  * @param fw_version
3433  *   A pointer to a string array storing the firmware version of a device,
3434  *   the string includes terminating null. This pointer is allocated by caller.
3435  * @param fw_size
3436  *   The size of the string array pointed by fw_version, which should be
3437  *   large enough to store firmware version of the device.
3438  * @return
3439  *   - (0) if successful.
3440  *   - (-ENOTSUP) if operation is not supported.
3441  *   - (-ENODEV) if *port_id* invalid.
3442  *   - (-EIO) if device is removed.
3443  *   - (-EINVAL) if bad parameter.
3444  *   - (>0) if *fw_size* is not enough to store firmware version, return
3445  *          the size of the non truncated string.
3446  */
3447 int rte_eth_dev_fw_version_get(uint16_t port_id,
3448 			       char *fw_version, size_t fw_size);
3449 
3450 /**
3451  * Retrieve the supported packet types of an Ethernet device.
3452  *
3453  * When a packet type is announced as supported, it *must* be recognized by
3454  * the PMD. For instance, if RTE_PTYPE_L2_ETHER, RTE_PTYPE_L2_ETHER_VLAN
3455  * and RTE_PTYPE_L3_IPV4 are announced, the PMD must return the following
3456  * packet types for these packets:
3457  * - Ether/IPv4              -> RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
3458  * - Ether/VLAN/IPv4         -> RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4
3459  * - Ether/[anything else]   -> RTE_PTYPE_L2_ETHER
3460  * - Ether/VLAN/[anything else] -> RTE_PTYPE_L2_ETHER_VLAN
3461  *
3462  * When a packet is received by a PMD, the most precise type must be
3463  * returned among the ones supported. However a PMD is allowed to set
3464  * packet type that is not in the supported list, at the condition that it
3465  * is more precise. Therefore, a PMD announcing no supported packet types
3466  * can still set a matching packet type in a received packet.
3467  *
3468  * @note
3469  *   Better to invoke this API after the device is already started or Rx burst
3470  *   function is decided, to obtain correct supported ptypes.
3471  * @note
3472  *   if a given PMD does not report what ptypes it supports, then the supported
3473  *   ptype count is reported as 0.
3474  * @param port_id
3475  *   The port identifier of the Ethernet device.
3476  * @param ptype_mask
3477  *   A hint of what kind of packet type which the caller is interested in.
3478  * @param ptypes
3479  *   An array pointer to store adequate packet types, allocated by caller.
3480  * @param num
3481  *  Size of the array pointed by param ptypes.
3482  * @return
3483  *   - (>=0) Number of supported ptypes. If the number of types exceeds num,
3484  *           only num entries will be filled into the ptypes array, but the full
3485  *           count of supported ptypes will be returned.
3486  *   - (-ENODEV) if *port_id* invalid.
3487  *   - (-EINVAL) if bad parameter.
3488  */
3489 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3490 				     uint32_t *ptypes, int num);
3491 /**
3492  * Inform Ethernet device about reduced range of packet types to handle.
3493  *
3494  * Application can use this function to set only specific ptypes that it's
3495  * interested. This information can be used by the PMD to optimize Rx path.
3496  *
3497  * The function accepts an array `set_ptypes` allocated by the caller to
3498  * store the packet types set by the driver, the last element of the array
3499  * is set to RTE_PTYPE_UNKNOWN. The size of the `set_ptype` array should be
3500  * `rte_eth_dev_get_supported_ptypes() + 1` else it might only be filled
3501  * partially.
3502  *
3503  * @param port_id
3504  *   The port identifier of the Ethernet device.
3505  * @param ptype_mask
3506  *   The ptype family that application is interested in should be bitwise OR of
3507  *   RTE_PTYPE_*_MASK or 0.
3508  * @param set_ptypes
3509  *   An array pointer to store set packet types, allocated by caller. The
3510  *   function marks the end of array with RTE_PTYPE_UNKNOWN.
3511  * @param num
3512  *   Size of the array pointed by param ptypes.
3513  *   Should be rte_eth_dev_get_supported_ptypes() + 1 to accommodate the
3514  *   set ptypes.
3515  * @return
3516  *   - (0) if Success.
3517  *   - (-ENODEV) if *port_id* invalid.
3518  *   - (-EINVAL) if *ptype_mask* is invalid (or) set_ptypes is NULL and
3519  *     num > 0.
3520  */
3521 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3522 			   uint32_t *set_ptypes, unsigned int num);
3523 
3524 /**
3525  * Retrieve the MTU of an Ethernet device.
3526  *
3527  * @param port_id
3528  *   The port identifier of the Ethernet device.
3529  * @param mtu
3530  *   A pointer to a uint16_t where the retrieved MTU is to be stored.
3531  * @return
3532  *   - (0) if successful.
3533  *   - (-ENODEV) if *port_id* invalid.
3534  *   - (-EINVAL) if bad parameter.
3535  */
3536 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3537 
3538 /**
3539  * Change the MTU of an Ethernet device.
3540  *
3541  * @param port_id
3542  *   The port identifier of the Ethernet device.
3543  * @param mtu
3544  *   A uint16_t for the MTU to be applied.
3545  * @return
3546  *   - (0) if successful.
3547  *   - (-ENOTSUP) if operation is not supported.
3548  *   - (-ENODEV) if *port_id* invalid.
3549  *   - (-EIO) if device is removed.
3550  *   - (-EINVAL) if *mtu* invalid, validation of mtu can occur within
3551  *     rte_eth_dev_set_mtu if dev_infos_get is supported by the device or
3552  *     when the mtu is set using dev->dev_ops->mtu_set.
3553  *   - (-EBUSY) if operation is not allowed when the port is running
3554  */
3555 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3556 
3557 /**
3558  * Enable/Disable hardware filtering by an Ethernet device of received
3559  * VLAN packets tagged with a given VLAN Tag Identifier.
3560  *
3561  * @param port_id
3562  *   The port identifier of the Ethernet device.
3563  * @param vlan_id
3564  *   The VLAN Tag Identifier whose filtering must be enabled or disabled.
3565  * @param on
3566  *   If > 0, enable VLAN filtering of VLAN packets tagged with *vlan_id*.
3567  *   Otherwise, disable VLAN filtering of VLAN packets tagged with *vlan_id*.
3568  * @return
3569  *   - (0) if successful.
3570  *   - (-ENOTSUP) if hardware-assisted VLAN filtering not configured.
3571  *   - (-ENODEV) if *port_id* invalid.
3572  *   - (-EIO) if device is removed.
3573  *   - (-ENOSYS) if VLAN filtering on *port_id* disabled.
3574  *   - (-EINVAL) if *vlan_id* > 4095.
3575  */
3576 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3577 
3578 /**
3579  * Enable/Disable hardware VLAN Strip by a Rx queue of an Ethernet device.
3580  *
3581  * @param port_id
3582  *   The port identifier of the Ethernet device.
3583  * @param rx_queue_id
3584  *   The index of the receive queue for which a queue stats mapping is required.
3585  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
3586  *   to rte_eth_dev_configure().
3587  * @param on
3588  *   If 1, Enable VLAN Stripping of the receive queue of the Ethernet port.
3589  *   If 0, Disable VLAN Stripping of the receive queue of the Ethernet port.
3590  * @return
3591  *   - (0) if successful.
3592  *   - (-ENOTSUP) if hardware-assisted VLAN stripping not configured.
3593  *   - (-ENODEV) if *port_id* invalid.
3594  *   - (-EINVAL) if *rx_queue_id* invalid.
3595  */
3596 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3597 		int on);
3598 
3599 /**
3600  * Set the Outer VLAN Ether Type by an Ethernet device, it can be inserted to
3601  * the VLAN header.
3602  *
3603  * @param port_id
3604  *   The port identifier of the Ethernet device.
3605  * @param vlan_type
3606  *   The VLAN type.
3607  * @param tag_type
3608  *   The Tag Protocol ID
3609  * @return
3610  *   - (0) if successful.
3611  *   - (-ENOTSUP) if hardware-assisted VLAN TPID setup is not supported.
3612  *   - (-ENODEV) if *port_id* invalid.
3613  *   - (-EIO) if device is removed.
3614  */
3615 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3616 				    enum rte_vlan_type vlan_type,
3617 				    uint16_t tag_type);
3618 
3619 /**
3620  * Set VLAN offload configuration on an Ethernet device.
3621  *
3622  * @param port_id
3623  *   The port identifier of the Ethernet device.
3624  * @param offload_mask
3625  *   The VLAN Offload bit mask can be mixed use with "OR"
3626  *       RTE_ETH_VLAN_STRIP_OFFLOAD
3627  *       RTE_ETH_VLAN_FILTER_OFFLOAD
3628  *       RTE_ETH_VLAN_EXTEND_OFFLOAD
3629  *       RTE_ETH_QINQ_STRIP_OFFLOAD
3630  * @return
3631  *   - (0) if successful.
3632  *   - (-ENOTSUP) if hardware-assisted VLAN filtering not configured.
3633  *   - (-ENODEV) if *port_id* invalid.
3634  *   - (-EIO) if device is removed.
3635  */
3636 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3637 
3638 /**
3639  * Read VLAN Offload configuration from an Ethernet device
3640  *
3641  * @param port_id
3642  *   The port identifier of the Ethernet device.
3643  * @return
3644  *   - (>0) if successful. Bit mask to indicate
3645  *       RTE_ETH_VLAN_STRIP_OFFLOAD
3646  *       RTE_ETH_VLAN_FILTER_OFFLOAD
3647  *       RTE_ETH_VLAN_EXTEND_OFFLOAD
3648  *       RTE_ETH_QINQ_STRIP_OFFLOAD
3649  *   - (-ENODEV) if *port_id* invalid.
3650  */
3651 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3652 
3653 /**
3654  * Set port based Tx VLAN insertion on or off.
3655  *
3656  * @param port_id
3657  *  The port identifier of the Ethernet device.
3658  * @param pvid
3659  *  Port based Tx VLAN identifier together with user priority.
3660  * @param on
3661  *  Turn on or off the port based Tx VLAN insertion.
3662  *
3663  * @return
3664  *   - (0) if successful.
3665  *   - negative if failed.
3666  */
3667 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3668 
3669 /**
3670  * @warning
3671  * @b EXPERIMENTAL: this API may change without prior notice.
3672  *
3673  * Set Rx queue available descriptors threshold.
3674  *
3675  * @param port_id
3676  *  The port identifier of the Ethernet device.
3677  * @param queue_id
3678  *  The index of the receive queue.
3679  * @param avail_thresh
3680  *  The available descriptors threshold is percentage of Rx queue size
3681  *  which describes the availability of Rx queue for hardware.
3682  *  If the Rx queue availability is below it,
3683  *  the event RTE_ETH_EVENT_RX_AVAIL_THRESH is triggered.
3684  *  [1-99] to set a new available descriptors threshold.
3685  *  0 to disable threshold monitoring.
3686  *
3687  * @return
3688  *   - 0 if successful.
3689  *   - (-ENODEV) if @p port_id is invalid.
3690  *   - (-EINVAL) if bad parameter.
3691  *   - (-ENOTSUP) if available Rx descriptors threshold is not supported.
3692  *   - (-EIO) if device is removed.
3693  */
3694 __rte_experimental
3695 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3696 			       uint8_t avail_thresh);
3697 
3698 /**
3699  * @warning
3700  * @b EXPERIMENTAL: this API may change without prior notice.
3701  *
3702  * Find Rx queue with RTE_ETH_EVENT_RX_AVAIL_THRESH event pending.
3703  *
3704  * @param port_id
3705  *  The port identifier of the Ethernet device.
3706  * @param[inout] queue_id
3707  *  On input starting Rx queue index to search from.
3708  *  If the queue_id is bigger than maximum queue ID of the port,
3709  *  search is started from 0. So that application can keep calling
3710  *  this function to handle all pending events with a simple increment
3711  *  of queue_id on the next call.
3712  *  On output if return value is 1, Rx queue index with the event pending.
3713  * @param[out] avail_thresh
3714  *  Location for available descriptors threshold of the found Rx queue.
3715  *
3716  * @return
3717  *   - 1 if an Rx queue with pending event is found.
3718  *   - 0 if no Rx queue with pending event is found.
3719  *   - (-ENODEV) if @p port_id is invalid.
3720  *   - (-EINVAL) if bad parameter (e.g. @p queue_id is NULL).
3721  *   - (-ENOTSUP) if operation is not supported.
3722  *   - (-EIO) if device is removed.
3723  */
3724 __rte_experimental
3725 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3726 				 uint8_t *avail_thresh);
3727 
3728 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3729 		void *userdata);
3730 
3731 /**
3732  * Structure used to buffer packets for future Tx
3733  * Used by APIs rte_eth_tx_buffer and rte_eth_tx_buffer_flush
3734  */
3735 struct rte_eth_dev_tx_buffer {
3736 	buffer_tx_error_fn error_callback;
3737 	void *error_userdata;
3738 	uint16_t size;           /**< Size of buffer for buffered Tx */
3739 	uint16_t length;         /**< Number of packets in the array */
3740 	/** Pending packets to be sent on explicit flush or when full */
3741 	struct rte_mbuf *pkts[];
3742 };
3743 
3744 /**
3745  * Calculate the size of the Tx buffer.
3746  *
3747  * @param sz
3748  *   Number of stored packets.
3749  */
3750 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3751 	(sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3752 
3753 /**
3754  * Initialize default values for buffered transmitting
3755  *
3756  * @param buffer
3757  *   Tx buffer to be initialized.
3758  * @param size
3759  *   Buffer size
3760  * @return
3761  *   0 if no error
3762  */
3763 int
3764 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3765 
3766 /**
3767  * Configure a callback for buffered packets which cannot be sent
3768  *
3769  * Register a specific callback to be called when an attempt is made to send
3770  * all packets buffered on an Ethernet port, but not all packets can
3771  * successfully be sent. The callback registered here will be called only
3772  * from calls to rte_eth_tx_buffer() and rte_eth_tx_buffer_flush() APIs.
3773  * The default callback configured for each queue by default just frees the
3774  * packets back to the calling mempool. If additional behaviour is required,
3775  * for example, to count dropped packets, or to retry transmission of packets
3776  * which cannot be sent, this function should be used to register a suitable
3777  * callback function to implement the desired behaviour.
3778  * The example callback "rte_eth_tx_buffer_count_callback()" is also
3779  * provided as reference.
3780  *
3781  * @param buffer
3782  *   The port identifier of the Ethernet device.
3783  * @param callback
3784  *   The function to be used as the callback.
3785  * @param userdata
3786  *   Arbitrary parameter to be passed to the callback function
3787  * @return
3788  *   0 on success, or -EINVAL if bad parameter
3789  */
3790 int
3791 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
3792 		buffer_tx_error_fn callback, void *userdata);
3793 
3794 /**
3795  * Callback function for silently dropping unsent buffered packets.
3796  *
3797  * This function can be passed to rte_eth_tx_buffer_set_err_callback() to
3798  * adjust the default behavior when buffered packets cannot be sent. This
3799  * function drops any unsent packets silently and is used by Tx buffered
3800  * operations as default behavior.
3801  *
3802  * NOTE: this function should not be called directly, instead it should be used
3803  *       as a callback for packet buffering.
3804  *
3805  * NOTE: when configuring this function as a callback with
3806  *       rte_eth_tx_buffer_set_err_callback(), the final, userdata parameter
3807  *       should point to an uint64_t value.
3808  *
3809  * @param pkts
3810  *   The previously buffered packets which could not be sent
3811  * @param unsent
3812  *   The number of unsent packets in the pkts array
3813  * @param userdata
3814  *   Not used
3815  */
3816 void
3817 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3818 		void *userdata);
3819 
3820 /**
3821  * Callback function for tracking unsent buffered packets.
3822  *
3823  * This function can be passed to rte_eth_tx_buffer_set_err_callback() to
3824  * adjust the default behavior when buffered packets cannot be sent. This
3825  * function drops any unsent packets, but also updates a user-supplied counter
3826  * to track the overall number of packets dropped. The counter should be an
3827  * uint64_t variable.
3828  *
3829  * NOTE: this function should not be called directly, instead it should be used
3830  *       as a callback for packet buffering.
3831  *
3832  * NOTE: when configuring this function as a callback with
3833  *       rte_eth_tx_buffer_set_err_callback(), the final, userdata parameter
3834  *       should point to an uint64_t value.
3835  *
3836  * @param pkts
3837  *   The previously buffered packets which could not be sent
3838  * @param unsent
3839  *   The number of unsent packets in the pkts array
3840  * @param userdata
3841  *   Pointer to an uint64_t value, which will be incremented by unsent
3842  */
3843 void
3844 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3845 		void *userdata);
3846 
3847 /**
3848  * Request the driver to free mbufs currently cached by the driver. The
3849  * driver will only free the mbuf if it is no longer in use. It is the
3850  * application's responsibility to ensure rte_eth_tx_buffer_flush(..) is
3851  * called if needed.
3852  *
3853  * @param port_id
3854  *   The port identifier of the Ethernet device.
3855  * @param queue_id
3856  *   The index of the transmit queue through which output packets must be
3857  *   sent.
3858  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
3859  *   to rte_eth_dev_configure().
3860  * @param free_cnt
3861  *   Maximum number of packets to free. Use 0 to indicate all possible packets
3862  *   should be freed. Note that a packet may be using multiple mbufs.
3863  * @return
3864  *   Failure: < 0
3865  *     -ENODEV: Invalid interface
3866  *     -EIO: device is removed
3867  *     -ENOTSUP: Driver does not support function
3868  *   Success: >= 0
3869  *     0-n: Number of packets freed. More packets may still remain in ring that
3870  *     are in use.
3871  */
3872 int
3873 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3874 
3875 /**
3876  * Subtypes for MACsec offload event (@ref RTE_ETH_EVENT_MACSEC)
3877  * raised by Ethernet device.
3878  */
3879 enum rte_eth_event_macsec_subtype {
3880 	/** Notifies unknown MACsec subevent. */
3881 	RTE_ETH_SUBEVENT_MACSEC_UNKNOWN,
3882 	/**
3883 	 * Subevent of RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR sectag validation events
3884 	 *	Validation check: SecTag.TCI.V = 1
3885 	 */
3886 	RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_V_EQ1,
3887 	/**
3888 	 * Subevent of RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR sectag validation events
3889 	 *	Validation check: SecTag.TCI.E = 0 && SecTag.TCI.C = 1
3890 	 */
3891 	RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_E_EQ0_C_EQ1,
3892 	/**
3893 	 * Subevent of RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR sectag validation events
3894 	 *	Validation check: SecTag.SL >= 'd48
3895 	 */
3896 	RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SL_GTE48,
3897 	/**
3898 	 * Subevent of RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR sectag validation events
3899 	 *	Validation check: SecTag.TCI.ES = 1 && SecTag.TCI.SC = 1
3900 	 */
3901 	RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_ES_EQ1_SC_EQ1,
3902 	/**
3903 	 * Subevent of RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR sectag validation events
3904 	 *	Validation check: SecTag.TCI.SC = 1 && SecTag.TCI.SCB = 1
3905 	 */
3906 	RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SC_EQ1_SCB_EQ1,
3907 };
3908 
3909 /**
3910  * Event types for MACsec offload event (@ref RTE_ETH_EVENT_MACSEC)
3911  * raised by eth device.
3912  */
3913 enum rte_eth_event_macsec_type {
3914 	/** Notifies unknown MACsec event. */
3915 	RTE_ETH_EVENT_MACSEC_UNKNOWN,
3916 	/** Notifies Sectag validation failure events. */
3917 	RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR,
3918 	/** Notifies Rx SA hard expiry events. */
3919 	RTE_ETH_EVENT_MACSEC_RX_SA_PN_HARD_EXP,
3920 	/** Notifies Rx SA soft expiry events. */
3921 	RTE_ETH_EVENT_MACSEC_RX_SA_PN_SOFT_EXP,
3922 	/** Notifies Tx SA hard expiry events. */
3923 	RTE_ETH_EVENT_MACSEC_TX_SA_PN_HARD_EXP,
3924 	/** Notifies Tx SA soft events. */
3925 	RTE_ETH_EVENT_MACSEC_TX_SA_PN_SOFT_EXP,
3926 	/** Notifies Invalid SA event. */
3927 	RTE_ETH_EVENT_MACSEC_SA_NOT_VALID,
3928 };
3929 
3930 /**
3931  * Descriptor for @ref RTE_ETH_EVENT_MACSEC event.
3932  * Used by ethdev to send extra information of the MACsec offload event.
3933  */
3934 struct rte_eth_event_macsec_desc {
3935 	/** Type of RTE_ETH_EVENT_MACSEC_* event. */
3936 	enum rte_eth_event_macsec_type type;
3937 	/** Type of RTE_ETH_SUBEVENT_MACSEC_* subevent. */
3938 	enum rte_eth_event_macsec_subtype subtype;
3939 	/**
3940 	 * Event specific metadata.
3941 	 *
3942 	 * For the following events, *userdata* registered
3943 	 * with the *rte_security_session* would be returned
3944 	 * as metadata.
3945 	 *
3946 	 * @see struct rte_security_session_conf
3947 	 */
3948 	uint64_t metadata;
3949 };
3950 
3951 /**
3952  * Subtypes for IPsec offload event(@ref RTE_ETH_EVENT_IPSEC) raised by
3953  * eth device.
3954  */
3955 enum rte_eth_event_ipsec_subtype {
3956 	/**  PMD specific error start */
3957 	RTE_ETH_EVENT_IPSEC_PMD_ERROR_START = -256,
3958 	/**  PMD specific error end */
3959 	RTE_ETH_EVENT_IPSEC_PMD_ERROR_END = -1,
3960 	/** Unknown event type */
3961 	RTE_ETH_EVENT_IPSEC_UNKNOWN = 0,
3962 	/** Sequence number overflow */
3963 	RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW,
3964 	/** Soft time expiry of SA */
3965 	RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY,
3966 	/**
3967 	 * Soft byte expiry of SA determined by
3968 	 * @ref rte_security_ipsec_lifetime::bytes_soft_limit
3969 	 */
3970 	RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY,
3971 	/**
3972 	 * Soft packet expiry of SA determined by
3973 	 * @ref rte_security_ipsec_lifetime::packets_soft_limit
3974 	 */
3975 	RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY,
3976 	/**
3977 	 * Hard byte expiry of SA determined by
3978 	 * @ref rte_security_ipsec_lifetime::bytes_hard_limit
3979 	 */
3980 	RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY,
3981 	/**
3982 	 * Hard packet expiry of SA determined by
3983 	 * @ref rte_security_ipsec_lifetime::packets_hard_limit
3984 	 */
3985 	RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY,
3986 	/** Max value of this enum */
3987 	RTE_ETH_EVENT_IPSEC_MAX
3988 };
3989 
3990 /**
3991  * Descriptor for @ref RTE_ETH_EVENT_IPSEC event. Used by eth dev to send extra
3992  * information of the IPsec offload event.
3993  */
3994 struct rte_eth_event_ipsec_desc {
3995 	/** Type of RTE_ETH_EVENT_IPSEC_* event */
3996 	enum rte_eth_event_ipsec_subtype subtype;
3997 	/**
3998 	 * Event specific metadata.
3999 	 *
4000 	 * For the following events, *userdata* registered
4001 	 * with the *rte_security_session* would be returned
4002 	 * as metadata,
4003 	 *
4004 	 * - @ref RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW
4005 	 * - @ref RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY
4006 	 * - @ref RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY
4007 	 * - @ref RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY
4008 	 * - @ref RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY
4009 	 * - @ref RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY
4010 	 *
4011 	 * @see struct rte_security_session_conf
4012 	 *
4013 	 */
4014 	uint64_t metadata;
4015 };
4016 
4017 /**
4018  * The eth device event type for interrupt, and maybe others in the future.
4019  */
4020 enum rte_eth_event_type {
4021 	RTE_ETH_EVENT_UNKNOWN,  /**< unknown event type */
4022 	RTE_ETH_EVENT_INTR_LSC, /**< lsc interrupt event */
4023 	/** queue state event (enabled/disabled) */
4024 	RTE_ETH_EVENT_QUEUE_STATE,
4025 	/** reset interrupt event, sent to VF on PF reset */
4026 	RTE_ETH_EVENT_INTR_RESET,
4027 	RTE_ETH_EVENT_VF_MBOX,  /**< message from the VF received by PF */
4028 	RTE_ETH_EVENT_MACSEC,   /**< MACsec offload related event */
4029 	RTE_ETH_EVENT_INTR_RMV, /**< device removal event */
4030 	RTE_ETH_EVENT_NEW,      /**< port is probed */
4031 	RTE_ETH_EVENT_DESTROY,  /**< port is released */
4032 	RTE_ETH_EVENT_IPSEC,    /**< IPsec offload related event */
4033 	RTE_ETH_EVENT_FLOW_AGED,/**< New aged-out flows is detected */
4034 	/**
4035 	 * Number of available Rx descriptors is smaller than the threshold.
4036 	 * @see rte_eth_rx_avail_thresh_set()
4037 	 */
4038 	RTE_ETH_EVENT_RX_AVAIL_THRESH,
4039 	/** Port recovering from a hardware or firmware error.
4040 	 * If PMD supports proactive error recovery,
4041 	 * it should trigger this event to notify application
4042 	 * that it detected an error and the recovery is being started.
4043 	 * Upon receiving the event, the application should not invoke any control path API
4044 	 * (such as rte_eth_dev_configure/rte_eth_dev_stop...) until receiving
4045 	 * RTE_ETH_EVENT_RECOVERY_SUCCESS or RTE_ETH_EVENT_RECOVERY_FAILED event.
4046 	 * The PMD will set the data path pointers to dummy functions,
4047 	 * and re-set the data path pointers to non-dummy functions
4048 	 * before reporting RTE_ETH_EVENT_RECOVERY_SUCCESS event.
4049 	 * It means that the application cannot send or receive any packets
4050 	 * during this period.
4051 	 * @note Before the PMD reports the recovery result,
4052 	 * the PMD may report the RTE_ETH_EVENT_ERR_RECOVERING event again,
4053 	 * because a larger error may occur during the recovery.
4054 	 */
4055 	RTE_ETH_EVENT_ERR_RECOVERING,
4056 	/** Port recovers successfully from the error.
4057 	 * The PMD already re-configured the port,
4058 	 * and the effect is the same as a restart operation.
4059 	 * a) The following operation will be retained: (alphabetically)
4060 	 *    - DCB configuration
4061 	 *    - FEC configuration
4062 	 *    - Flow control configuration
4063 	 *    - LRO configuration
4064 	 *    - LSC configuration
4065 	 *    - MTU
4066 	 *    - MAC address (default and those supplied by MAC address array)
4067 	 *    - Promiscuous and allmulticast mode
4068 	 *    - PTP configuration
4069 	 *    - Queue (Rx/Tx) settings
4070 	 *    - Queue statistics mappings
4071 	 *    - RSS configuration by rte_eth_dev_rss_xxx() family
4072 	 *    - Rx checksum configuration
4073 	 *    - Rx interrupt settings
4074 	 *    - Traffic management configuration
4075 	 *    - VLAN configuration (including filtering, tpid, strip, pvid)
4076 	 *    - VMDq configuration
4077 	 * b) The following configuration maybe retained
4078 	 *    or not depending on the device capabilities:
4079 	 *    - flow rules
4080 	 *      (@see RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP)
4081 	 *    - shared flow objects
4082 	 *      (@see RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP)
4083 	 * c) Any other configuration will not be stored
4084 	 *    and will need to be re-configured.
4085 	 */
4086 	RTE_ETH_EVENT_RECOVERY_SUCCESS,
4087 	/** Port recovery failed.
4088 	 * It means that the port should not be usable anymore.
4089 	 * The application should close the port.
4090 	 */
4091 	RTE_ETH_EVENT_RECOVERY_FAILED,
4092 	RTE_ETH_EVENT_MAX       /**< max value of this enum */
4093 };
4094 
4095 /**
4096  * User application callback to be registered for interrupts.
4097  *
4098  * Note: there is no guarantee in the DPDK drivers that a callback won't be
4099  *       called in the middle of other parts of the ethdev API. For example,
4100  *       imagine that thread A calls rte_eth_dev_start() and as part of this
4101  *       call, a RTE_ETH_EVENT_INTR_RESET event gets generated and the
4102  *       associated callback is ran on thread A. In that example, if the
4103  *       application protects its internal data using locks before calling
4104  *       rte_eth_dev_start(), and the callback takes a same lock, a deadlock
4105  *       occurs. Because of this, it is highly recommended NOT to take locks in
4106  *       those callbacks.
4107  */
4108 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
4109 		enum rte_eth_event_type event, void *cb_arg, void *ret_param);
4110 
4111 /**
4112  * Register a callback function for port event.
4113  *
4114  * @param port_id
4115  *  Port ID.
4116  *  RTE_ETH_ALL means register the event for all port ids.
4117  * @param event
4118  *  Event interested.
4119  * @param cb_fn
4120  *  User supplied callback function to be called.
4121  * @param cb_arg
4122  *  Pointer to the parameters for the registered callback.
4123  *
4124  * @return
4125  *  - On success, zero.
4126  *  - On failure, a negative value.
4127  */
4128 int rte_eth_dev_callback_register(uint16_t port_id,
4129 			enum rte_eth_event_type event,
4130 		rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4131 
4132 /**
4133  * Unregister a callback function for port event.
4134  *
4135  * @param port_id
4136  *  Port ID.
4137  *  RTE_ETH_ALL means unregister the event for all port ids.
4138  * @param event
4139  *  Event interested.
4140  * @param cb_fn
4141  *  User supplied callback function to be called.
4142  * @param cb_arg
4143  *  Pointer to the parameters for the registered callback. -1 means to
4144  *  remove all for the same callback address and same event.
4145  *
4146  * @return
4147  *  - On success, zero.
4148  *  - On failure, a negative value.
4149  */
4150 int rte_eth_dev_callback_unregister(uint16_t port_id,
4151 			enum rte_eth_event_type event,
4152 		rte_eth_dev_cb_fn cb_fn, void *cb_arg);
4153 
4154 /**
4155  * When there is no Rx packet coming in Rx Queue for a long time, we can
4156  * sleep lcore related to Rx Queue for power saving, and enable Rx interrupt
4157  * to be triggered when Rx packet arrives.
4158  *
4159  * The rte_eth_dev_rx_intr_enable() function enables Rx queue
4160  * interrupt on specific Rx queue of a port.
4161  *
4162  * @param port_id
4163  *   The port identifier of the Ethernet device.
4164  * @param queue_id
4165  *   The index of the receive queue from which to retrieve input packets.
4166  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
4167  *   to rte_eth_dev_configure().
4168  * @return
4169  *   - (0) if successful.
4170  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support
4171  *     that operation.
4172  *   - (-ENODEV) if *port_id* invalid.
4173  *   - (-EIO) if device is removed.
4174  */
4175 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4176 
4177 /**
4178  * When lcore wakes up from Rx interrupt indicating packet coming, disable Rx
4179  * interrupt and returns to polling mode.
4180  *
4181  * The rte_eth_dev_rx_intr_disable() function disables Rx queue
4182  * interrupt on specific Rx queue of a port.
4183  *
4184  * @param port_id
4185  *   The port identifier of the Ethernet device.
4186  * @param queue_id
4187  *   The index of the receive queue from which to retrieve input packets.
4188  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
4189  *   to rte_eth_dev_configure().
4190  * @return
4191  *   - (0) if successful.
4192  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support
4193  *     that operation.
4194  *   - (-ENODEV) if *port_id* invalid.
4195  *   - (-EIO) if device is removed.
4196  */
4197 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4198 
4199 /**
4200  * Rx Interrupt control per port.
4201  *
4202  * @param port_id
4203  *   The port identifier of the Ethernet device.
4204  * @param epfd
4205  *   Epoll instance fd which the intr vector associated to.
4206  *   Using RTE_EPOLL_PER_THREAD allows to use per thread epoll instance.
4207  * @param op
4208  *   The operation be performed for the vector.
4209  *   Operation type of {RTE_INTR_EVENT_ADD, RTE_INTR_EVENT_DEL}.
4210  * @param data
4211  *   User raw data.
4212  * @return
4213  *   - On success, zero.
4214  *   - On failure, a negative value.
4215  */
4216 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4217 
4218 /**
4219  * Rx Interrupt control per queue.
4220  *
4221  * @param port_id
4222  *   The port identifier of the Ethernet device.
4223  * @param queue_id
4224  *   The index of the receive queue from which to retrieve input packets.
4225  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
4226  *   to rte_eth_dev_configure().
4227  * @param epfd
4228  *   Epoll instance fd which the intr vector associated to.
4229  *   Using RTE_EPOLL_PER_THREAD allows to use per thread epoll instance.
4230  * @param op
4231  *   The operation be performed for the vector.
4232  *   Operation type of {RTE_INTR_EVENT_ADD, RTE_INTR_EVENT_DEL}.
4233  * @param data
4234  *   User raw data.
4235  * @return
4236  *   - On success, zero.
4237  *   - On failure, a negative value.
4238  */
4239 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4240 			      int epfd, int op, void *data);
4241 
4242 /**
4243  * Get interrupt fd per Rx queue.
4244  *
4245  * @param port_id
4246  *   The port identifier of the Ethernet device.
4247  * @param queue_id
4248  *   The index of the receive queue from which to retrieve input packets.
4249  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
4250  *   to rte_eth_dev_configure().
4251  * @return
4252  *   - (>=0) the interrupt fd associated to the requested Rx queue if
4253  *           successful.
4254  *   - (-1) on error.
4255  */
4256 int
4257 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4258 
4259 /**
4260  * Turn on the LED on the Ethernet device.
4261  * This function turns on the LED on the Ethernet device.
4262  *
4263  * @param port_id
4264  *   The port identifier of the Ethernet device.
4265  * @return
4266  *   - (0) if successful.
4267  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support
4268  *     that operation.
4269  *   - (-ENODEV) if *port_id* invalid.
4270  *   - (-EIO) if device is removed.
4271  */
4272 int  rte_eth_led_on(uint16_t port_id);
4273 
4274 /**
4275  * Turn off the LED on the Ethernet device.
4276  * This function turns off the LED on the Ethernet device.
4277  *
4278  * @param port_id
4279  *   The port identifier of the Ethernet device.
4280  * @return
4281  *   - (0) if successful.
4282  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support
4283  *     that operation.
4284  *   - (-ENODEV) if *port_id* invalid.
4285  *   - (-EIO) if device is removed.
4286  */
4287 int  rte_eth_led_off(uint16_t port_id);
4288 
4289 /**
4290  * @warning
4291  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
4292  *
4293  * Get Forward Error Correction(FEC) capability.
4294  *
4295  * @param port_id
4296  *   The port identifier of the Ethernet device.
4297  * @param speed_fec_capa
4298  *   speed_fec_capa is out only with per-speed capabilities.
4299  *   If set to NULL, the function returns the required number
4300  *   of required array entries.
4301  * @param num
4302  *   a number of elements in an speed_fec_capa array.
4303  *
4304  * @return
4305  *   - A non-negative value lower or equal to num: success. The return value
4306  *     is the number of entries filled in the fec capa array.
4307  *   - A non-negative value higher than num: error, the given fec capa array
4308  *     is too small. The return value corresponds to the num that should
4309  *     be given to succeed. The entries in fec capa array are not valid and
4310  *     shall not be used by the caller.
4311  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support.
4312  *     that operation.
4313  *   - (-EIO) if device is removed.
4314  *   - (-ENODEV)  if *port_id* invalid.
4315  *   - (-EINVAL)  if *num* or *speed_fec_capa* invalid
4316  */
4317 __rte_experimental
4318 int rte_eth_fec_get_capability(uint16_t port_id,
4319 			       struct rte_eth_fec_capa *speed_fec_capa,
4320 			       unsigned int num);
4321 
4322 /**
4323  * @warning
4324  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
4325  *
4326  * Get current Forward Error Correction(FEC) mode.
4327  * If link is down and AUTO is enabled, AUTO is returned, otherwise,
4328  * configured FEC mode is returned.
4329  * If link is up, current FEC mode is returned.
4330  *
4331  * @param port_id
4332  *   The port identifier of the Ethernet device.
4333  * @param fec_capa
4334  *   A bitmask with the current FEC mode.
4335  * @return
4336  *   - (0) if successful.
4337  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support.
4338  *     that operation.
4339  *   - (-EIO) if device is removed.
4340  *   - (-ENODEV)  if *port_id* invalid.
4341  */
4342 __rte_experimental
4343 int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4344 
4345 /**
4346  * @warning
4347  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
4348  *
4349  * Set Forward Error Correction(FEC) mode.
4350  *
4351  * @param port_id
4352  *   The port identifier of the Ethernet device.
4353  * @param fec_capa
4354  *   A bitmask of allowed FEC modes.
4355  *   If only the AUTO bit is set, the decision on which FEC
4356  *   mode to use will be made by HW/FW or driver.
4357  *   If the AUTO bit is set with some FEC modes, only specified
4358  *   FEC modes can be set.
4359  *   If AUTO bit is clear, specify FEC mode to be used
4360  *   (only one valid mode per speed may be set).
4361  * @return
4362  *   - (0) if successful.
4363  *   - (-EINVAL) if the FEC mode is not valid.
4364  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support.
4365  *   - (-EIO) if device is removed.
4366  *   - (-ENODEV)  if *port_id* invalid.
4367  */
4368 __rte_experimental
4369 int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4370 
4371 /**
4372  * Get current status of the Ethernet link flow control for Ethernet device
4373  *
4374  * @param port_id
4375  *   The port identifier of the Ethernet device.
4376  * @param fc_conf
4377  *   The pointer to the structure where to store the flow control parameters.
4378  * @return
4379  *   - (0) if successful.
4380  *   - (-ENOTSUP) if hardware doesn't support flow control.
4381  *   - (-ENODEV)  if *port_id* invalid.
4382  *   - (-EIO)  if device is removed.
4383  *   - (-EINVAL) if bad parameter.
4384  */
4385 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4386 			      struct rte_eth_fc_conf *fc_conf);
4387 
4388 /**
4389  * Configure the Ethernet link flow control for Ethernet device
4390  *
4391  * @param port_id
4392  *   The port identifier of the Ethernet device.
4393  * @param fc_conf
4394  *   The pointer to the structure of the flow control parameters.
4395  * @return
4396  *   - (0) if successful.
4397  *   - (-ENOTSUP) if hardware doesn't support flow control mode.
4398  *   - (-ENODEV)  if *port_id* invalid.
4399  *   - (-EINVAL)  if bad parameter
4400  *   - (-EIO)     if flow control setup failure or device is removed.
4401  */
4402 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4403 			      struct rte_eth_fc_conf *fc_conf);
4404 
4405 /**
4406  * Configure the Ethernet priority flow control under DCB environment
4407  * for Ethernet device.
4408  *
4409  * @param port_id
4410  * The port identifier of the Ethernet device.
4411  * @param pfc_conf
4412  * The pointer to the structure of the priority flow control parameters.
4413  * @return
4414  *   - (0) if successful.
4415  *   - (-ENOTSUP) if hardware doesn't support priority flow control mode.
4416  *   - (-ENODEV)  if *port_id* invalid.
4417  *   - (-EINVAL)  if bad parameter
4418  *   - (-EIO)     if flow control setup failure or device is removed.
4419  */
4420 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
4421 				struct rte_eth_pfc_conf *pfc_conf);
4422 
4423 /**
4424  * Add a MAC address to the set used for filtering incoming packets.
4425  *
4426  * @param port_id
4427  *   The port identifier of the Ethernet device.
4428  * @param mac_addr
4429  *   The MAC address to add.
4430  * @param pool
4431  *   VMDq pool index to associate address with (if VMDq is enabled). If VMDq is
4432  *   not enabled, this should be set to 0.
4433  * @return
4434  *   - (0) if successfully added or *mac_addr* was already added.
4435  *   - (-ENOTSUP) if hardware doesn't support this feature.
4436  *   - (-ENODEV) if *port* is invalid.
4437  *   - (-EIO) if device is removed.
4438  *   - (-ENOSPC) if no more MAC addresses can be added.
4439  *   - (-EINVAL) if MAC address is invalid.
4440  */
4441 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4442 				uint32_t pool);
4443 
4444 /**
4445  * @warning
4446  * @b EXPERIMENTAL: this API may change without prior notice.
4447  *
4448  * Retrieve the information for queue based PFC.
4449  *
4450  * @param port_id
4451  *   The port identifier of the Ethernet device.
4452  * @param pfc_queue_info
4453  *   A pointer to a structure of type *rte_eth_pfc_queue_info* to be filled with
4454  *   the information about queue based PFC.
4455  * @return
4456  *   - (0) if successful.
4457  *   - (-ENOTSUP) if support for priority_flow_ctrl_queue_info_get does not exist.
4458  *   - (-ENODEV) if *port_id* invalid.
4459  *   - (-EINVAL) if bad parameter.
4460  */
4461 __rte_experimental
4462 int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
4463 		struct rte_eth_pfc_queue_info *pfc_queue_info);
4464 
4465 /**
4466  * @warning
4467  * @b EXPERIMENTAL: this API may change without prior notice.
4468  *
4469  * Configure the queue based priority flow control for a given queue
4470  * for Ethernet device.
4471  *
4472  * @note When an ethdev port switches to queue based PFC mode, the
4473  * unconfigured queues shall be configured by the driver with
4474  * default values such as lower priority value for TC etc.
4475  *
4476  * @param port_id
4477  *   The port identifier of the Ethernet device.
4478  * @param pfc_queue_conf
4479  *   The pointer to the structure of the priority flow control parameters
4480  *   for the queue.
4481  * @return
4482  *   - (0) if successful.
4483  *   - (-ENOTSUP) if hardware doesn't support queue based PFC mode.
4484  *   - (-ENODEV)  if *port_id* invalid.
4485  *   - (-EINVAL)  if bad parameter
4486  *   - (-EIO)     if flow control setup queue failure
4487  */
4488 __rte_experimental
4489 int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
4490 		struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4491 
4492 /**
4493  * Remove a MAC address from the internal array of addresses.
4494  *
4495  * @param port_id
4496  *   The port identifier of the Ethernet device.
4497  * @param mac_addr
4498  *   MAC address to remove.
4499  * @return
4500  *   - (0) if successful, or *mac_addr* didn't exist.
4501  *   - (-ENOTSUP) if hardware doesn't support.
4502  *   - (-ENODEV) if *port* invalid.
4503  *   - (-EADDRINUSE) if attempting to remove the default MAC address.
4504  *   - (-EINVAL) if MAC address is invalid.
4505  */
4506 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4507 				struct rte_ether_addr *mac_addr);
4508 
4509 /**
4510  * Set the default MAC address.
4511  * It replaces the address at index 0 of the MAC address list.
4512  * If the address was already in the MAC address list,
4513  * please remove it first.
4514  *
4515  * @param port_id
4516  *   The port identifier of the Ethernet device.
4517  * @param mac_addr
4518  *   New default MAC address.
4519  * @return
4520  *   - (0) if successful, or *mac_addr* didn't exist.
4521  *   - (-ENOTSUP) if hardware doesn't support.
4522  *   - (-ENODEV) if *port* invalid.
4523  *   - (-EINVAL) if MAC address is invalid.
4524  *   - (-EEXIST) if MAC address was already in the address list.
4525  */
4526 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
4527 		struct rte_ether_addr *mac_addr);
4528 
4529 /**
4530  * Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device.
4531  *
4532  * @param port_id
4533  *   The port identifier of the Ethernet device.
4534  * @param reta_conf
4535  *   RETA to update.
4536  * @param reta_size
4537  *   Redirection table size. The table size can be queried by
4538  *   rte_eth_dev_info_get().
4539  * @return
4540  *   - (0) if successful.
4541  *   - (-ENODEV) if *port_id* is invalid.
4542  *   - (-ENOTSUP) if hardware doesn't support.
4543  *   - (-EINVAL) if bad parameter.
4544  *   - (-EIO) if device is removed.
4545  */
4546 int rte_eth_dev_rss_reta_update(uint16_t port_id,
4547 				struct rte_eth_rss_reta_entry64 *reta_conf,
4548 				uint16_t reta_size);
4549 
4550 /**
4551  * Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device.
4552  *
4553  * @param port_id
4554  *   The port identifier of the Ethernet device.
4555  * @param reta_conf
4556  *   RETA to query. For each requested reta entry, corresponding bit
4557  *   in mask must be set.
4558  * @param reta_size
4559  *   Redirection table size. The table size can be queried by
4560  *   rte_eth_dev_info_get().
4561  * @return
4562  *   - (0) if successful.
4563  *   - (-ENODEV) if *port_id* is invalid.
4564  *   - (-ENOTSUP) if hardware doesn't support.
4565  *   - (-EINVAL) if bad parameter.
4566  *   - (-EIO) if device is removed.
4567  */
4568 int rte_eth_dev_rss_reta_query(uint16_t port_id,
4569 			       struct rte_eth_rss_reta_entry64 *reta_conf,
4570 			       uint16_t reta_size);
4571 
4572 /**
4573  * Updates unicast hash table for receiving packet with the given destination
4574  * MAC address, and the packet is routed to all VFs for which the Rx mode is
4575  * accept packets that match the unicast hash table.
4576  *
4577  * @param port_id
4578  *   The port identifier of the Ethernet device.
4579  * @param addr
4580  *   Unicast MAC address.
4581  * @param on
4582  *    1 - Set an unicast hash bit for receiving packets with the MAC address.
4583  *    0 - Clear an unicast hash bit.
4584  * @return
4585  *   - (0) if successful.
4586  *   - (-ENOTSUP) if hardware doesn't support.
4587  *   - (-ENODEV) if *port_id* invalid.
4588  *   - (-EIO) if device is removed.
4589  *   - (-EINVAL) if bad parameter.
4590  */
4591 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4592 				  uint8_t on);
4593 
4594 /**
4595  * Updates all unicast hash bitmaps for receiving packet with any Unicast
4596  * Ethernet MAC addresses,the packet is routed to all VFs for which the Rx
4597  * mode is accept packets that match the unicast hash table.
4598  *
4599  * @param port_id
4600  *   The port identifier of the Ethernet device.
4601  * @param on
4602  *    1 - Set all unicast hash bitmaps for receiving all the Ethernet
4603  *         MAC addresses
4604  *    0 - Clear all unicast hash bitmaps
4605  * @return
4606  *   - (0) if successful.
4607  *   - (-ENOTSUP) if hardware doesn't support.
4608  *   - (-ENODEV) if *port_id* invalid.
4609  *   - (-EIO) if device is removed.
4610  *   - (-EINVAL) if bad parameter.
4611  */
4612 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4613 
4614 /**
4615  * Set the rate limitation for a queue on an Ethernet device.
4616  *
4617  * @param port_id
4618  *   The port identifier of the Ethernet device.
4619  * @param queue_idx
4620  *   The queue ID.
4621  * @param tx_rate
4622  *   The Tx rate in Mbps. Allocated from the total port link speed.
4623  * @return
4624  *   - (0) if successful.
4625  *   - (-ENOTSUP) if hardware doesn't support this feature.
4626  *   - (-ENODEV) if *port_id* invalid.
4627  *   - (-EIO) if device is removed.
4628  *   - (-EINVAL) if bad parameter.
4629  */
4630 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4631 			uint32_t tx_rate);
4632 
4633 /**
4634  * Configuration of Receive Side Scaling hash computation of Ethernet device.
4635  *
4636  * @param port_id
4637  *   The port identifier of the Ethernet device.
4638  * @param rss_conf
4639  *   The new configuration to use for RSS hash computation on the port.
4640  * @return
4641  *   - (0) if successful.
4642  *   - (-ENODEV) if port identifier is invalid.
4643  *   - (-EIO) if device is removed.
4644  *   - (-ENOTSUP) if hardware doesn't support.
4645  *   - (-EINVAL) if bad parameter.
4646  */
4647 int rte_eth_dev_rss_hash_update(uint16_t port_id,
4648 				struct rte_eth_rss_conf *rss_conf);
4649 
4650 /**
4651  * Retrieve current configuration of Receive Side Scaling hash computation
4652  * of Ethernet device.
4653  *
4654  * @param port_id
4655  *   The port identifier of the Ethernet device.
4656  * @param rss_conf
4657  *   Where to store the current RSS hash configuration of the Ethernet device.
4658  * @return
4659  *   - (0) if successful.
4660  *   - (-ENODEV) if port identifier is invalid.
4661  *   - (-EIO) if device is removed.
4662  *   - (-ENOTSUP) if hardware doesn't support RSS.
4663  *   - (-EINVAL) if bad parameter.
4664  */
4665 int
4666 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4667 			      struct rte_eth_rss_conf *rss_conf);
4668 
4669 /**
4670  * @warning
4671  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice.
4672  *
4673  *  Get the name of RSS hash algorithm.
4674  *
4675  * @param rss_algo
4676  *   Hash algorithm.
4677  *
4678  * @return
4679  *   Hash algorithm name or 'UNKNOWN' if the rss_algo cannot be recognized.
4680  */
4681 __rte_experimental
4682 const char *
4683 rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo);
4684 
4685 /**
4686  * @warning
4687  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice.
4688  *
4689  * Get RSS hash algorithm by its name.
4690  *
4691  * @param name
4692  *   RSS hash algorithm.
4693  *
4694  * @param algo
4695  *   Return the RSS hash algorithm found, @see rte_eth_hash_function.
4696  *
4697  * @return
4698  *   - (0) if successful.
4699  *   - (-EINVAL) if not found.
4700  */
4701 __rte_experimental
4702 int
4703 rte_eth_find_rss_algo(const char *name, uint32_t *algo);
4704 
4705 /**
4706  * Add UDP tunneling port for a type of tunnel.
4707  *
4708  * Some NICs may require such configuration to properly parse a tunnel
4709  * with any standard or custom UDP port.
4710  * The packets with this UDP port will be parsed for this type of tunnel.
4711  * The device parser will also check the rest of the tunnel headers
4712  * before classifying the packet.
4713  *
4714  * With some devices, this API will affect packet classification, i.e.:
4715  *     - mbuf.packet_type reported on Rx
4716  *     - rte_flow rules with tunnel items
4717  *
4718  * @param port_id
4719  *   The port identifier of the Ethernet device.
4720  * @param tunnel_udp
4721  *   UDP tunneling configuration.
4722  *
4723  * @return
4724  *   - (0) if successful.
4725  *   - (-ENODEV) if port identifier is invalid.
4726  *   - (-EIO) if device is removed.
4727  *   - (-ENOTSUP) if hardware doesn't support tunnel type.
4728  */
4729 int
4730 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4731 				struct rte_eth_udp_tunnel *tunnel_udp);
4732 
4733 /**
4734  * Delete UDP tunneling port for a type of tunnel.
4735  *
4736  * The packets with this UDP port will not be classified as this type of tunnel
4737  * anymore if the device use such mapping for tunnel packet classification.
4738  *
4739  * @see rte_eth_dev_udp_tunnel_port_add
4740  *
4741  * @param port_id
4742  *   The port identifier of the Ethernet device.
4743  * @param tunnel_udp
4744  *   UDP tunneling configuration.
4745  *
4746  * @return
4747  *   - (0) if successful.
4748  *   - (-ENODEV) if port identifier is invalid.
4749  *   - (-EIO) if device is removed.
4750  *   - (-ENOTSUP) if hardware doesn't support tunnel type.
4751  */
4752 int
4753 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4754 				   struct rte_eth_udp_tunnel *tunnel_udp);
4755 
4756 /**
4757  * Get DCB information on an Ethernet device.
4758  *
4759  * @param port_id
4760  *   The port identifier of the Ethernet device.
4761  * @param dcb_info
4762  *   DCB information.
4763  * @return
4764  *   - (0) if successful.
4765  *   - (-ENODEV) if port identifier is invalid.
4766  *   - (-EIO) if device is removed.
4767  *   - (-ENOTSUP) if hardware doesn't support.
4768  *   - (-EINVAL) if bad parameter.
4769  */
4770 int rte_eth_dev_get_dcb_info(uint16_t port_id,
4771 			     struct rte_eth_dcb_info *dcb_info);
4772 
4773 struct rte_eth_rxtx_callback;
4774 
4775 /**
4776  * Add a callback to be called on packet Rx on a given port and queue.
4777  *
4778  * This API configures a function to be called for each burst of
4779  * packets received on a given NIC port queue. The return value is a pointer
4780  * that can be used to later remove the callback using
4781  * rte_eth_remove_rx_callback().
4782  *
4783  * Multiple functions are called in the order that they are added.
4784  *
4785  * @param port_id
4786  *   The port identifier of the Ethernet device.
4787  * @param queue_id
4788  *   The queue on the Ethernet device on which the callback is to be added.
4789  * @param fn
4790  *   The callback function
4791  * @param user_param
4792  *   A generic pointer parameter which will be passed to each invocation of the
4793  *   callback function on this port and queue. Inter-thread synchronization
4794  *   of any user data changes is the responsibility of the user.
4795  *
4796  * @return
4797  *   NULL on error.
4798  *   On success, a pointer value which can later be used to remove the callback.
4799  */
4800 const struct rte_eth_rxtx_callback *
4801 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4802 		rte_rx_callback_fn fn, void *user_param);
4803 
4804 /**
4805  * Add a callback that must be called first on packet Rx on a given port
4806  * and queue.
4807  *
4808  * This API configures a first function to be called for each burst of
4809  * packets received on a given NIC port queue. The return value is a pointer
4810  * that can be used to later remove the callback using
4811  * rte_eth_remove_rx_callback().
4812  *
4813  * Multiple functions are called in the order that they are added.
4814  *
4815  * @param port_id
4816  *   The port identifier of the Ethernet device.
4817  * @param queue_id
4818  *   The queue on the Ethernet device on which the callback is to be added.
4819  * @param fn
4820  *   The callback function
4821  * @param user_param
4822  *   A generic pointer parameter which will be passed to each invocation of the
4823  *   callback function on this port and queue. Inter-thread synchronization
4824  *   of any user data changes is the responsibility of the user.
4825  *
4826  * @return
4827  *   NULL on error.
4828  *   On success, a pointer value which can later be used to remove the callback.
4829  */
4830 const struct rte_eth_rxtx_callback *
4831 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4832 		rte_rx_callback_fn fn, void *user_param);
4833 
4834 /**
4835  * Add a callback to be called on packet Tx on a given port and queue.
4836  *
4837  * This API configures a function to be called for each burst of
4838  * packets sent on a given NIC port queue. The return value is a pointer
4839  * that can be used to later remove the callback using
4840  * rte_eth_remove_tx_callback().
4841  *
4842  * Multiple functions are called in the order that they are added.
4843  *
4844  * @param port_id
4845  *   The port identifier of the Ethernet device.
4846  * @param queue_id
4847  *   The queue on the Ethernet device on which the callback is to be added.
4848  * @param fn
4849  *   The callback function
4850  * @param user_param
4851  *   A generic pointer parameter which will be passed to each invocation of the
4852  *   callback function on this port and queue. Inter-thread synchronization
4853  *   of any user data changes is the responsibility of the user.
4854  *
4855  * @return
4856  *   NULL on error.
4857  *   On success, a pointer value which can later be used to remove the callback.
4858  */
4859 const struct rte_eth_rxtx_callback *
4860 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4861 		rte_tx_callback_fn fn, void *user_param);
4862 
4863 /**
4864  * Remove an Rx packet callback from a given port and queue.
4865  *
4866  * This function is used to removed callbacks that were added to a NIC port
4867  * queue using rte_eth_add_rx_callback().
4868  *
4869  * Note: the callback is removed from the callback list but it isn't freed
4870  * since the it may still be in use. The memory for the callback can be
4871  * subsequently freed back by the application by calling rte_free():
4872  *
4873  * - Immediately - if the port is stopped, or the user knows that no
4874  *   callbacks are in flight e.g. if called from the thread doing Rx/Tx
4875  *   on that queue.
4876  *
4877  * - After a short delay - where the delay is sufficient to allow any
4878  *   in-flight callbacks to complete. Alternately, the RCU mechanism can be
4879  *   used to detect when data plane threads have ceased referencing the
4880  *   callback memory.
4881  *
4882  * @param port_id
4883  *   The port identifier of the Ethernet device.
4884  * @param queue_id
4885  *   The queue on the Ethernet device from which the callback is to be removed.
4886  * @param user_cb
4887  *   User supplied callback created via rte_eth_add_rx_callback().
4888  *
4889  * @return
4890  *   - 0: Success. Callback was removed.
4891  *   - -ENODEV:  If *port_id* is invalid.
4892  *   - -ENOTSUP: Callback support is not available.
4893  *   - -EINVAL:  The queue_id is out of range, or the callback
4894  *               is NULL or not found for the port/queue.
4895  */
4896 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4897 		const struct rte_eth_rxtx_callback *user_cb);
4898 
4899 /**
4900  * Remove a Tx packet callback from a given port and queue.
4901  *
4902  * This function is used to removed callbacks that were added to a NIC port
4903  * queue using rte_eth_add_tx_callback().
4904  *
4905  * Note: the callback is removed from the callback list but it isn't freed
4906  * since the it may still be in use. The memory for the callback can be
4907  * subsequently freed back by the application by calling rte_free():
4908  *
4909  * - Immediately - if the port is stopped, or the user knows that no
4910  *   callbacks are in flight e.g. if called from the thread doing Rx/Tx
4911  *   on that queue.
4912  *
4913  * - After a short delay - where the delay is sufficient to allow any
4914  *   in-flight callbacks to complete. Alternately, the RCU mechanism can be
4915  *   used to detect when data plane threads have ceased referencing the
4916  *   callback memory.
4917  *
4918  * @param port_id
4919  *   The port identifier of the Ethernet device.
4920  * @param queue_id
4921  *   The queue on the Ethernet device from which the callback is to be removed.
4922  * @param user_cb
4923  *   User supplied callback created via rte_eth_add_tx_callback().
4924  *
4925  * @return
4926  *   - 0: Success. Callback was removed.
4927  *   - -ENODEV:  If *port_id* is invalid.
4928  *   - -ENOTSUP: Callback support is not available.
4929  *   - -EINVAL:  The queue_id is out of range, or the callback
4930  *               is NULL or not found for the port/queue.
4931  */
4932 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4933 		const struct rte_eth_rxtx_callback *user_cb);
4934 
4935 /**
4936  * Retrieve information about given port's Rx queue.
4937  *
4938  * @param port_id
4939  *   The port identifier of the Ethernet device.
4940  * @param queue_id
4941  *   The Rx queue on the Ethernet device for which information
4942  *   will be retrieved.
4943  * @param qinfo
4944  *   A pointer to a structure of type *rte_eth_rxq_info_info* to be filled with
4945  *   the information of the Ethernet device.
4946  *
4947  * @return
4948  *   - 0: Success
4949  *   - -ENODEV:  If *port_id* is invalid.
4950  *   - -ENOTSUP: routine is not supported by the device PMD.
4951  *   - -EINVAL:  The queue_id is out of range, or the queue
4952  *               is hairpin queue.
4953  */
4954 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4955 	struct rte_eth_rxq_info *qinfo);
4956 
4957 /**
4958  * Retrieve information about given port's Tx queue.
4959  *
4960  * @param port_id
4961  *   The port identifier of the Ethernet device.
4962  * @param queue_id
4963  *   The Tx queue on the Ethernet device for which information
4964  *   will be retrieved.
4965  * @param qinfo
4966  *   A pointer to a structure of type *rte_eth_txq_info_info* to be filled with
4967  *   the information of the Ethernet device.
4968  *
4969  * @return
4970  *   - 0: Success
4971  *   - -ENODEV:  If *port_id* is invalid.
4972  *   - -ENOTSUP: routine is not supported by the device PMD.
4973  *   - -EINVAL:  The queue_id is out of range, or the queue
4974  *               is hairpin queue.
4975  */
4976 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4977 	struct rte_eth_txq_info *qinfo);
4978 
4979 /**
4980  * @warning
4981  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
4982  *
4983  * Retrieve information about given ports's Rx queue for recycling mbufs.
4984  *
4985  * @param port_id
4986  *   The port identifier of the Ethernet device.
4987  * @param queue_id
4988  *   The Rx queue on the Ethernet devicefor which information
4989  *   will be retrieved.
4990  * @param recycle_rxq_info
4991  *   A pointer to a structure of type *rte_eth_recycle_rxq_info* to be filled.
4992  *
4993  * @return
4994  *   - 0: Success
4995  *   - -ENODEV:  If *port_id* is invalid.
4996  *   - -ENOTSUP: routine is not supported by the device PMD.
4997  *   - -EINVAL:  The queue_id is out of range.
4998  */
4999 __rte_experimental
5000 int rte_eth_recycle_rx_queue_info_get(uint16_t port_id,
5001 		uint16_t queue_id,
5002 		struct rte_eth_recycle_rxq_info *recycle_rxq_info);
5003 
5004 /**
5005  * Retrieve information about the Rx packet burst mode.
5006  *
5007  * @param port_id
5008  *   The port identifier of the Ethernet device.
5009  * @param queue_id
5010  *   The Rx queue on the Ethernet device for which information
5011  *   will be retrieved.
5012  * @param mode
5013  *   A pointer to a structure of type *rte_eth_burst_mode* to be filled
5014  *   with the information of the packet burst mode.
5015  *
5016  * @return
5017  *   - 0: Success
5018  *   - -ENODEV:  If *port_id* is invalid.
5019  *   - -ENOTSUP: routine is not supported by the device PMD.
5020  *   - -EINVAL:  The queue_id is out of range.
5021  */
5022 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5023 	struct rte_eth_burst_mode *mode);
5024 
5025 /**
5026  * Retrieve information about the Tx packet burst mode.
5027  *
5028  * @param port_id
5029  *   The port identifier of the Ethernet device.
5030  * @param queue_id
5031  *   The Tx queue on the Ethernet device for which information
5032  *   will be retrieved.
5033  * @param mode
5034  *   A pointer to a structure of type *rte_eth_burst_mode* to be filled
5035  *   with the information of the packet burst mode.
5036  *
5037  * @return
5038  *   - 0: Success
5039  *   - -ENODEV:  If *port_id* is invalid.
5040  *   - -ENOTSUP: routine is not supported by the device PMD.
5041  *   - -EINVAL:  The queue_id is out of range.
5042  */
5043 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
5044 	struct rte_eth_burst_mode *mode);
5045 
5046 /**
5047  * @warning
5048  * @b EXPERIMENTAL: this API may change without prior notice.
5049  *
5050  * Retrieve the monitor condition for a given receive queue.
5051  *
5052  * @param port_id
5053  *   The port identifier of the Ethernet device.
5054  * @param queue_id
5055  *   The Rx queue on the Ethernet device for which information
5056  *   will be retrieved.
5057  * @param pmc
5058  *   The pointer to power-optimized monitoring condition structure.
5059  *
5060  * @return
5061  *   - 0: Success.
5062  *   -ENOTSUP: Operation not supported.
5063  *   -EINVAL: Invalid parameters.
5064  *   -ENODEV: Invalid port ID.
5065  */
5066 __rte_experimental
5067 int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
5068 		struct rte_power_monitor_cond *pmc);
5069 
5070 /**
5071  * Retrieve device registers and register attributes (number of registers and
5072  * register size)
5073  *
5074  * @param port_id
5075  *   The port identifier of the Ethernet device.
5076  * @param info
5077  *   Pointer to rte_dev_reg_info structure to fill in. If info->data is
5078  *   NULL the function fills in the width and length fields. If non-NULL
5079  *   the registers are put into the buffer pointed at by the data field.
5080  * @return
5081  *   - (0) if successful.
5082  *   - (-ENOTSUP) if hardware doesn't support.
5083  *   - (-EINVAL) if bad parameter.
5084  *   - (-ENODEV) if *port_id* invalid.
5085  *   - (-EIO) if device is removed.
5086  *   - others depends on the specific operations implementation.
5087  */
5088 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
5089 
5090 /**
5091  * Retrieve size of device EEPROM
5092  *
5093  * @param port_id
5094  *   The port identifier of the Ethernet device.
5095  * @return
5096  *   - (>=0) EEPROM size if successful.
5097  *   - (-ENOTSUP) if hardware doesn't support.
5098  *   - (-ENODEV) if *port_id* invalid.
5099  *   - (-EIO) if device is removed.
5100  *   - others depends on the specific operations implementation.
5101  */
5102 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
5103 
5104 /**
5105  * Retrieve EEPROM and EEPROM attribute
5106  *
5107  * @param port_id
5108  *   The port identifier of the Ethernet device.
5109  * @param info
5110  *   The template includes buffer for return EEPROM data and
5111  *   EEPROM attributes to be filled.
5112  * @return
5113  *   - (0) if successful.
5114  *   - (-ENOTSUP) if hardware doesn't support.
5115  *   - (-EINVAL) if bad parameter.
5116  *   - (-ENODEV) if *port_id* invalid.
5117  *   - (-EIO) if device is removed.
5118  *   - others depends on the specific operations implementation.
5119  */
5120 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5121 
5122 /**
5123  * Program EEPROM with provided data
5124  *
5125  * @param port_id
5126  *   The port identifier of the Ethernet device.
5127  * @param info
5128  *   The template includes EEPROM data for programming and
5129  *   EEPROM attributes to be filled
5130  * @return
5131  *   - (0) if successful.
5132  *   - (-ENOTSUP) if hardware doesn't support.
5133  *   - (-ENODEV) if *port_id* invalid.
5134  *   - (-EINVAL) if bad parameter.
5135  *   - (-EIO) if device is removed.
5136  *   - others depends on the specific operations implementation.
5137  */
5138 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
5139 
5140 /**
5141  * @warning
5142  * @b EXPERIMENTAL: this API may change without prior notice.
5143  *
5144  * Retrieve the type and size of plugin module EEPROM
5145  *
5146  * @param port_id
5147  *   The port identifier of the Ethernet device.
5148  * @param modinfo
5149  *   The type and size of plugin module EEPROM.
5150  * @return
5151  *   - (0) if successful.
5152  *   - (-ENOTSUP) if hardware doesn't support.
5153  *   - (-ENODEV) if *port_id* invalid.
5154  *   - (-EINVAL) if bad parameter.
5155  *   - (-EIO) if device is removed.
5156  *   - others depends on the specific operations implementation.
5157  */
5158 __rte_experimental
5159 int
5160 rte_eth_dev_get_module_info(uint16_t port_id,
5161 			    struct rte_eth_dev_module_info *modinfo);
5162 
5163 /**
5164  * @warning
5165  * @b EXPERIMENTAL: this API may change without prior notice.
5166  *
5167  * Retrieve the data of plugin module EEPROM
5168  *
5169  * @param port_id
5170  *   The port identifier of the Ethernet device.
5171  * @param info
5172  *   The template includes the plugin module EEPROM attributes, and the
5173  *   buffer for return plugin module EEPROM data.
5174  * @return
5175  *   - (0) if successful.
5176  *   - (-ENOTSUP) if hardware doesn't support.
5177  *   - (-EINVAL) if bad parameter.
5178  *   - (-ENODEV) if *port_id* invalid.
5179  *   - (-EIO) if device is removed.
5180  *   - others depends on the specific operations implementation.
5181  */
5182 __rte_experimental
5183 int
5184 rte_eth_dev_get_module_eeprom(uint16_t port_id,
5185 			      struct rte_dev_eeprom_info *info);
5186 
5187 /**
5188  * Set the list of multicast addresses to filter on an Ethernet device.
5189  *
5190  * @param port_id
5191  *   The port identifier of the Ethernet device.
5192  * @param mc_addr_set
5193  *   The array of multicast addresses to set. Equal to NULL when the function
5194  *   is invoked to flush the set of filtered addresses.
5195  * @param nb_mc_addr
5196  *   The number of multicast addresses in the *mc_addr_set* array. Equal to 0
5197  *   when the function is invoked to flush the set of filtered addresses.
5198  * @return
5199  *   - (0) if successful.
5200  *   - (-ENODEV) if *port_id* invalid.
5201  *   - (-EIO) if device is removed.
5202  *   - (-ENOTSUP) if PMD of *port_id* doesn't support multicast filtering.
5203  *   - (-ENOSPC) if *port_id* has not enough multicast filtering resources.
5204  *   - (-EINVAL) if bad parameter.
5205  */
5206 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
5207 				 struct rte_ether_addr *mc_addr_set,
5208 				 uint32_t nb_mc_addr);
5209 
5210 /**
5211  * Enable IEEE1588/802.1AS timestamping for an Ethernet device.
5212  *
5213  * @param port_id
5214  *   The port identifier of the Ethernet device.
5215  *
5216  * @return
5217  *   - 0: Success.
5218  *   - -ENODEV: The port ID is invalid.
5219  *   - -EIO: if device is removed.
5220  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5221  */
5222 int rte_eth_timesync_enable(uint16_t port_id);
5223 
5224 /**
5225  * Disable IEEE1588/802.1AS timestamping for an Ethernet device.
5226  *
5227  * @param port_id
5228  *   The port identifier of the Ethernet device.
5229  *
5230  * @return
5231  *   - 0: Success.
5232  *   - -ENODEV: The port ID is invalid.
5233  *   - -EIO: if device is removed.
5234  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5235  */
5236 int rte_eth_timesync_disable(uint16_t port_id);
5237 
5238 /**
5239  * Read an IEEE1588/802.1AS Rx timestamp from an Ethernet device.
5240  *
5241  * @param port_id
5242  *   The port identifier of the Ethernet device.
5243  * @param timestamp
5244  *   Pointer to the timestamp struct.
5245  * @param flags
5246  *   Device specific flags. Used to pass the Rx timesync register index to
5247  *   i40e. Unused in igb/ixgbe, pass 0 instead.
5248  *
5249  * @return
5250  *   - 0: Success.
5251  *   - -EINVAL: No timestamp is available.
5252  *   - -ENODEV: The port ID is invalid.
5253  *   - -EIO: if device is removed.
5254  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5255  */
5256 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
5257 		struct timespec *timestamp, uint32_t flags);
5258 
5259 /**
5260  * Read an IEEE1588/802.1AS Tx timestamp from an Ethernet device.
5261  *
5262  * @param port_id
5263  *   The port identifier of the Ethernet device.
5264  * @param timestamp
5265  *   Pointer to the timestamp struct.
5266  *
5267  * @return
5268  *   - 0: Success.
5269  *   - -EINVAL: No timestamp is available.
5270  *   - -ENODEV: The port ID is invalid.
5271  *   - -EIO: if device is removed.
5272  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5273  */
5274 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5275 		struct timespec *timestamp);
5276 
5277 /**
5278  * Adjust the timesync clock on an Ethernet device.
5279  *
5280  * This is usually used in conjunction with other Ethdev timesync functions to
5281  * synchronize the device time using the IEEE1588/802.1AS protocol.
5282  *
5283  * @param port_id
5284  *   The port identifier of the Ethernet device.
5285  * @param delta
5286  *   The adjustment in nanoseconds.
5287  *
5288  * @return
5289  *   - 0: Success.
5290  *   - -ENODEV: The port ID is invalid.
5291  *   - -EIO: if device is removed.
5292  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5293  */
5294 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5295 
5296 /**
5297  * Read the time from the timesync clock on an Ethernet device.
5298  *
5299  * This is usually used in conjunction with other Ethdev timesync functions to
5300  * synchronize the device time using the IEEE1588/802.1AS protocol.
5301  *
5302  * @param port_id
5303  *   The port identifier of the Ethernet device.
5304  * @param time
5305  *   Pointer to the timespec struct that holds the time.
5306  *
5307  * @return
5308  *   - 0: Success.
5309  *   - -EINVAL: Bad parameter.
5310  */
5311 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5312 
5313 /**
5314  * Set the time of the timesync clock on an Ethernet device.
5315  *
5316  * This is usually used in conjunction with other Ethdev timesync functions to
5317  * synchronize the device time using the IEEE1588/802.1AS protocol.
5318  *
5319  * @param port_id
5320  *   The port identifier of the Ethernet device.
5321  * @param time
5322  *   Pointer to the timespec struct that holds the time.
5323  *
5324  * @return
5325  *   - 0: Success.
5326  *   - -EINVAL: No timestamp is available.
5327  *   - -ENODEV: The port ID is invalid.
5328  *   - -EIO: if device is removed.
5329  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5330  */
5331 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5332 
5333 /**
5334  * @warning
5335  * @b EXPERIMENTAL: this API may change without prior notice.
5336  *
5337  * Read the current clock counter of an Ethernet device
5338  *
5339  * This returns the current raw clock value of an Ethernet device. It is
5340  * a raw amount of ticks, with no given time reference.
5341  * The value returned here is from the same clock than the one
5342  * filling timestamp field of Rx packets when using hardware timestamp
5343  * offload. Therefore it can be used to compute a precise conversion of
5344  * the device clock to the real time.
5345  *
5346  * E.g, a simple heuristic to derivate the frequency would be:
5347  * uint64_t start, end;
5348  * rte_eth_read_clock(port, start);
5349  * rte_delay_ms(100);
5350  * rte_eth_read_clock(port, end);
5351  * double freq = (end - start) * 10;
5352  *
5353  * Compute a common reference with:
5354  * uint64_t base_time_sec = current_time();
5355  * uint64_t base_clock;
5356  * rte_eth_read_clock(port, base_clock);
5357  *
5358  * Then, convert the raw mbuf timestamp with:
5359  * base_time_sec + (double)(*timestamp_dynfield(mbuf) - base_clock) / freq;
5360  *
5361  * This simple example will not provide a very good accuracy. One must
5362  * at least measure multiple times the frequency and do a regression.
5363  * To avoid deviation from the system time, the common reference can
5364  * be repeated from time to time. The integer division can also be
5365  * converted by a multiplication and a shift for better performance.
5366  *
5367  * @param port_id
5368  *   The port identifier of the Ethernet device.
5369  * @param clock
5370  *   Pointer to the uint64_t that holds the raw clock value.
5371  *
5372  * @return
5373  *   - 0: Success.
5374  *   - -ENODEV: The port ID is invalid.
5375  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5376  *   - -EINVAL: if bad parameter.
5377  */
5378 __rte_experimental
5379 int
5380 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5381 
5382 /**
5383  * Get the port ID from device name.
5384  * The device name should be specified as below:
5385  * - PCIe address (Domain:Bus:Device.Function), for example- 0000:2:00.0
5386  * - SoC device name, for example- fsl-gmac0
5387  * - vdev dpdk name, for example- net_[pcap0|null0|tap0]
5388  *
5389  * @param name
5390  *   PCI address or name of the device.
5391  * @param port_id
5392  *   Pointer to port identifier of the device.
5393  * @return
5394  *   - (0) if successful and port_id is filled.
5395  *   - (-ENODEV or -EINVAL) on failure.
5396  */
5397 int
5398 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5399 
5400 /**
5401  * Get the device name from port ID.
5402  * The device name is specified as below:
5403  * - PCIe address (Domain:Bus:Device.Function), for example- 0000:02:00.0
5404  * - SoC device name, for example- fsl-gmac0
5405  * - vdev dpdk name, for example- net_[pcap0|null0|tun0|tap0]
5406  *
5407  * @param port_id
5408  *   Port identifier of the device.
5409  * @param name
5410  *   Buffer of size RTE_ETH_NAME_MAX_LEN to store the name.
5411  * @return
5412  *   - (0) if successful.
5413  *   - (-ENODEV) if *port_id* is invalid.
5414  *   - (-EINVAL) on failure.
5415  */
5416 int
5417 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5418 
5419 /**
5420  * Check that numbers of Rx and Tx descriptors satisfy descriptors limits from
5421  * the Ethernet device information, otherwise adjust them to boundaries.
5422  *
5423  * @param port_id
5424  *   The port identifier of the Ethernet device.
5425  * @param nb_rx_desc
5426  *   A pointer to a uint16_t where the number of receive
5427  *   descriptors stored.
5428  * @param nb_tx_desc
5429  *   A pointer to a uint16_t where the number of transmit
5430  *   descriptors stored.
5431  * @return
5432  *   - (0) if successful.
5433  *   - (-ENOTSUP, -ENODEV or -EINVAL) on failure.
5434  */
5435 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5436 				     uint16_t *nb_rx_desc,
5437 				     uint16_t *nb_tx_desc);
5438 
5439 /**
5440  * Test if a port supports specific mempool ops.
5441  *
5442  * @param port_id
5443  *   Port identifier of the Ethernet device.
5444  * @param [in] pool
5445  *   The name of the pool operations to test.
5446  * @return
5447  *   - 0: best mempool ops choice for this port.
5448  *   - 1: mempool ops are supported for this port.
5449  *   - -ENOTSUP: mempool ops not supported for this port.
5450  *   - -ENODEV: Invalid port Identifier.
5451  *   - -EINVAL: Pool param is null.
5452  */
5453 int
5454 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5455 
5456 /**
5457  * Get the security context for the Ethernet device.
5458  *
5459  * @param port_id
5460  *   Port identifier of the Ethernet device
5461  * @return
5462  *   - NULL on error.
5463  *   - pointer to security context on success.
5464  */
5465 void *
5466 rte_eth_dev_get_sec_ctx(uint16_t port_id);
5467 
5468 /**
5469  * @warning
5470  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5471  *
5472  * Query the device hairpin capabilities.
5473  *
5474  * @param port_id
5475  *   The port identifier of the Ethernet device.
5476  * @param cap
5477  *   Pointer to a structure that will hold the hairpin capabilities.
5478  * @return
5479  *   - (0) if successful.
5480  *   - (-ENOTSUP) if hardware doesn't support.
5481  *   - (-EINVAL) if bad parameter.
5482  */
5483 __rte_experimental
5484 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5485 				       struct rte_eth_hairpin_cap *cap);
5486 
5487 /**
5488  * @warning
5489  * @b EXPERIMENTAL: this structure may change without prior notice.
5490  *
5491  * Ethernet device representor ID range entry
5492  */
5493 struct rte_eth_representor_range {
5494 	enum rte_eth_representor_type type; /**< Representor type */
5495 	int controller; /**< Controller index */
5496 	int pf; /**< Physical function index */
5497 	__extension__
5498 	union {
5499 		int vf; /**< VF start index */
5500 		int sf; /**< SF start index */
5501 	};
5502 	uint32_t id_base; /**< Representor ID start index */
5503 	uint32_t id_end;  /**< Representor ID end index */
5504 	char name[RTE_DEV_NAME_MAX_LEN]; /**< Representor name */
5505 };
5506 
5507 /**
5508  * @warning
5509  * @b EXPERIMENTAL: this structure may change without prior notice.
5510  *
5511  * Ethernet device representor information
5512  */
5513 struct rte_eth_representor_info {
5514 	uint16_t controller; /**< Controller ID of caller device. */
5515 	uint16_t pf; /**< Physical function ID of caller device. */
5516 	uint32_t nb_ranges_alloc; /**< Size of the ranges array. */
5517 	uint32_t nb_ranges; /**< Number of initialized ranges. */
5518 	struct rte_eth_representor_range ranges[];/**< Representor ID range. */
5519 };
5520 
5521 /**
5522  * Retrieve the representor info of the device.
5523  *
5524  * Get device representor info to be able to calculate a unique
5525  * representor ID. @see rte_eth_representor_id_get helper.
5526  *
5527  * @param port_id
5528  *   The port identifier of the device.
5529  * @param info
5530  *   A pointer to a representor info structure.
5531  *   NULL to return number of range entries and allocate memory
5532  *   for next call to store detail.
5533  *   The number of ranges that were written into this structure
5534  *   will be placed into its nb_ranges field. This number cannot be
5535  *   larger than the nb_ranges_alloc that by the user before calling
5536  *   this function. It can be smaller than the value returned by the
5537  *   function, however.
5538  * @return
5539  *   - (-ENOTSUP) if operation is not supported.
5540  *   - (-ENODEV) if *port_id* invalid.
5541  *   - (-EIO) if device is removed.
5542  *   - (>=0) number of available representor range entries.
5543  */
5544 __rte_experimental
5545 int rte_eth_representor_info_get(uint16_t port_id,
5546 				 struct rte_eth_representor_info *info);
5547 
5548 /** The NIC is able to deliver flag (if set) with packets to the PMD. */
5549 #define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5550 
5551 /** The NIC is able to deliver mark ID with packets to the PMD. */
5552 #define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5553 
5554 /** The NIC is able to deliver tunnel ID with packets to the PMD. */
5555 #define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5556 
5557 /**
5558  * Negotiate the NIC's ability to deliver specific kinds of metadata to the PMD.
5559  *
5560  * Invoke this API before the first rte_eth_dev_configure() invocation
5561  * to let the PMD make preparations that are inconvenient to do later.
5562  *
5563  * The negotiation process is as follows:
5564  *
5565  * - the application requests features intending to use at least some of them;
5566  * - the PMD responds with the guaranteed subset of the requested feature set;
5567  * - the application can retry negotiation with another set of features;
5568  * - the application can pass zero to clear the negotiation result;
5569  * - the last negotiated result takes effect upon
5570  *   the ethdev configure and start.
5571  *
5572  * @note
5573  *   The PMD is supposed to first consider enabling the requested feature set
5574  *   in its entirety. Only if it fails to do so, does it have the right to
5575  *   respond with a smaller set of the originally requested features.
5576  *
5577  * @note
5578  *   Return code (-ENOTSUP) does not necessarily mean that the requested
5579  *   features are unsupported. In this case, the application should just
5580  *   assume that these features can be used without prior negotiations.
5581  *
5582  * @param port_id
5583  *   Port (ethdev) identifier
5584  *
5585  * @param[inout] features
5586  *   Feature selection buffer
5587  *
5588  * @return
5589  *   - (-EBUSY) if the port can't handle this in its current state;
5590  *   - (-ENOTSUP) if the method itself is not supported by the PMD;
5591  *   - (-ENODEV) if *port_id* is invalid;
5592  *   - (-EINVAL) if *features* is NULL;
5593  *   - (-EIO) if the device is removed;
5594  *   - (0) on success
5595  */
5596 int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5597 
5598 /** Flag to offload IP reassembly for IPv4 packets. */
5599 #define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5600 /** Flag to offload IP reassembly for IPv6 packets. */
5601 #define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5602 
5603 /**
5604  * A structure used to get/set IP reassembly configuration. It is also used
5605  * to get the maximum capability values that a PMD can support.
5606  *
5607  * If rte_eth_ip_reassembly_capability_get() returns 0, IP reassembly can be
5608  * enabled using rte_eth_ip_reassembly_conf_set() and params values lower than
5609  * capability params can be set in the PMD.
5610  */
5611 struct rte_eth_ip_reassembly_params {
5612 	/** Maximum time in ms which PMD can wait for other fragments. */
5613 	uint32_t timeout_ms;
5614 	/** Maximum number of fragments that can be reassembled. */
5615 	uint16_t max_frags;
5616 	/**
5617 	 * Flags to enable reassembly of packet types -
5618 	 * RTE_ETH_DEV_REASSEMBLY_F_xxx.
5619 	 */
5620 	uint16_t flags;
5621 };
5622 
5623 /**
5624  * @warning
5625  * @b EXPERIMENTAL: this API may change without prior notice
5626  *
5627  * Get IP reassembly capabilities supported by the PMD. This is the first API
5628  * to be called for enabling the IP reassembly offload feature. PMD will return
5629  * the maximum values of parameters that PMD can support and user can call
5630  * rte_eth_ip_reassembly_conf_set() with param values lower than capability.
5631  *
5632  * @param port_id
5633  *   The port identifier of the device.
5634  * @param capa
5635  *   A pointer to rte_eth_ip_reassembly_params structure.
5636  * @return
5637  *   - (-ENOTSUP) if offload configuration is not supported by device.
5638  *   - (-ENODEV) if *port_id* invalid.
5639  *   - (-EIO) if device is removed.
5640  *   - (-EINVAL) if device is not configured or *capa* passed is NULL.
5641  *   - (0) on success.
5642  */
5643 __rte_experimental
5644 int rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5645 		struct rte_eth_ip_reassembly_params *capa);
5646 
5647 /**
5648  * @warning
5649  * @b EXPERIMENTAL: this API may change without prior notice
5650  *
5651  * Get IP reassembly configuration parameters currently set in PMD.
5652  * The API will return error if the configuration is not already
5653  * set using rte_eth_ip_reassembly_conf_set() before calling this API or if
5654  * the device is not configured.
5655  *
5656  * @param port_id
5657  *   The port identifier of the device.
5658  * @param conf
5659  *   A pointer to rte_eth_ip_reassembly_params structure.
5660  * @return
5661  *   - (-ENOTSUP) if offload configuration is not supported by device.
5662  *   - (-ENODEV) if *port_id* invalid.
5663  *   - (-EIO) if device is removed.
5664  *   - (-EINVAL) if device is not configured or if *conf* passed is NULL or if
5665  *              configuration is not set using rte_eth_ip_reassembly_conf_set().
5666  *   - (0) on success.
5667  */
5668 __rte_experimental
5669 int rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5670 		struct rte_eth_ip_reassembly_params *conf);
5671 
5672 /**
5673  * @warning
5674  * @b EXPERIMENTAL: this API may change without prior notice
5675  *
5676  * Set IP reassembly configuration parameters if the PMD supports IP reassembly
5677  * offload. User should first call rte_eth_ip_reassembly_capability_get() to
5678  * check the maximum values supported by the PMD before setting the
5679  * configuration. The use of this API is mandatory to enable this feature and
5680  * should be called before rte_eth_dev_start().
5681  *
5682  * In datapath, PMD cannot guarantee that IP reassembly is always successful.
5683  * Hence, PMD shall register mbuf dynamic field and dynamic flag using
5684  * rte_eth_ip_reassembly_dynfield_register() to denote incomplete IP reassembly.
5685  * If dynfield is not successfully registered, error will be returned and
5686  * IP reassembly offload cannot be used.
5687  *
5688  * @param port_id
5689  *   The port identifier of the device.
5690  * @param conf
5691  *   A pointer to rte_eth_ip_reassembly_params structure.
5692  * @return
5693  *   - (-ENOTSUP) if offload configuration is not supported by device.
5694  *   - (-ENODEV) if *port_id* invalid.
5695  *   - (-EIO) if device is removed.
5696  *   - (-EINVAL) if device is not configured or if device is already started or
5697  *               if *conf* passed is NULL or if mbuf dynfield is not registered
5698  *               successfully by the PMD.
5699  *   - (0) on success.
5700  */
5701 __rte_experimental
5702 int rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5703 		const struct rte_eth_ip_reassembly_params *conf);
5704 
5705 /**
5706  * In case of IP reassembly offload failure, packet will be updated with
5707  * dynamic flag - RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME and packets
5708  * will be returned without alteration.
5709  * The application can retrieve the attached fragments using mbuf dynamic field
5710  * RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME.
5711  */
5712 typedef struct {
5713 	/**
5714 	 * Next fragment packet. Application should fetch dynamic field of
5715 	 * each fragment until a NULL is received and nb_frags is 0.
5716 	 */
5717 	struct rte_mbuf *next_frag;
5718 	/** Time spent(in ms) by HW in waiting for further fragments. */
5719 	uint16_t time_spent;
5720 	/** Number of more fragments attached in mbuf dynamic fields. */
5721 	uint16_t nb_frags;
5722 } rte_eth_ip_reassembly_dynfield_t;
5723 
5724 /**
5725  * @warning
5726  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5727  *
5728  * Dump private info from device to a file. Provided data and the order depends
5729  * on the PMD.
5730  *
5731  * @param port_id
5732  *   The port identifier of the Ethernet device.
5733  * @param file
5734  *   A pointer to a file for output.
5735  * @return
5736  *   - (0) on success.
5737  *   - (-ENODEV) if *port_id* is invalid.
5738  *   - (-EINVAL) if null file.
5739  *   - (-ENOTSUP) if the device does not support this function.
5740  *   - (-EIO) if device is removed.
5741  */
5742 __rte_experimental
5743 int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
5744 
5745 /**
5746  * @warning
5747  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5748  *
5749  * Dump ethdev Rx descriptor info to a file.
5750  *
5751  * This API is used for debugging, not a dataplane API.
5752  *
5753  * @param port_id
5754  *   The port identifier of the Ethernet device.
5755  * @param queue_id
5756  *   A Rx queue identifier on this port.
5757  * @param offset
5758  *  The offset of the descriptor starting from tail. (0 is the next
5759  *  packet to be received by the driver).
5760  * @param num
5761  *   The number of the descriptors to dump.
5762  * @param file
5763  *   A pointer to a file for output.
5764  * @return
5765  *   - On success, zero.
5766  *   - On failure, a negative value.
5767  */
5768 __rte_experimental
5769 int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5770 			       uint16_t offset, uint16_t num, FILE *file);
5771 
5772 /**
5773  * @warning
5774  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5775  *
5776  * Dump ethdev Tx descriptor info to a file.
5777  *
5778  * This API is used for debugging, not a dataplane API.
5779  *
5780  * @param port_id
5781  *   The port identifier of the Ethernet device.
5782  * @param queue_id
5783  *   A Tx queue identifier on this port.
5784  * @param offset
5785  *  The offset of the descriptor starting from tail. (0 is the place where
5786  *  the next packet will be send).
5787  * @param num
5788  *   The number of the descriptors to dump.
5789  * @param file
5790  *   A pointer to a file for output.
5791  * @return
5792  *   - On success, zero.
5793  *   - On failure, a negative value.
5794  */
5795 __rte_experimental
5796 int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5797 			       uint16_t offset, uint16_t num, FILE *file);
5798 
5799 
5800 /* Congestion management */
5801 
5802 /** Enumerate list of ethdev congestion management objects */
5803 enum rte_eth_cman_obj {
5804 	/** Congestion management based on Rx queue depth */
5805 	RTE_ETH_CMAN_OBJ_RX_QUEUE = RTE_BIT32(0),
5806 	/**
5807 	 * Congestion management based on mempool depth associated with Rx queue
5808 	 * @see rte_eth_rx_queue_setup()
5809 	 */
5810 	RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL = RTE_BIT32(1),
5811 };
5812 
5813 /**
5814  * @warning
5815  * @b EXPERIMENTAL: this structure may change, or be removed, without prior notice
5816  *
5817  * A structure used to retrieve information of ethdev congestion management.
5818  */
5819 struct rte_eth_cman_info {
5820 	/**
5821 	 * Set of supported congestion management modes
5822 	 * @see enum rte_cman_mode
5823 	 */
5824 	uint64_t modes_supported;
5825 	/**
5826 	 * Set of supported congestion management objects
5827 	 * @see enum rte_eth_cman_obj
5828 	 */
5829 	uint64_t objs_supported;
5830 	/**
5831 	 * Reserved for future fields. Always returned as 0 when
5832 	 * rte_eth_cman_info_get() is invoked
5833 	 */
5834 	uint8_t rsvd[8];
5835 };
5836 
5837 /**
5838  * @warning
5839  * @b EXPERIMENTAL: this structure may change, or be removed, without prior notice
5840  *
5841  * A structure used to configure the ethdev congestion management.
5842  */
5843 struct rte_eth_cman_config {
5844 	/** Congestion management object */
5845 	enum rte_eth_cman_obj obj;
5846 	/** Congestion management mode */
5847 	enum rte_cman_mode mode;
5848 	union {
5849 		/**
5850 		 * Rx queue to configure congestion management.
5851 		 *
5852 		 * Valid when object is RTE_ETH_CMAN_OBJ_RX_QUEUE or
5853 		 * RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL.
5854 		 */
5855 		uint16_t rx_queue;
5856 		/**
5857 		 * Reserved for future fields.
5858 		 * It must be set to 0 when rte_eth_cman_config_set() is invoked
5859 		 * and will be returned as 0 when rte_eth_cman_config_get() is
5860 		 * invoked.
5861 		 */
5862 		uint8_t rsvd_obj_params[4];
5863 	} obj_param;
5864 	union {
5865 		/**
5866 		 * RED configuration parameters.
5867 		 *
5868 		 * Valid when mode is RTE_CMAN_RED.
5869 		 */
5870 		struct rte_cman_red_params red;
5871 		/**
5872 		 * Reserved for future fields.
5873 		 * It must be set to 0 when rte_eth_cman_config_set() is invoked
5874 		 * and will be returned as 0 when rte_eth_cman_config_get() is
5875 		 * invoked.
5876 		 */
5877 		uint8_t rsvd_mode_params[4];
5878 	} mode_param;
5879 };
5880 
5881 /**
5882  * @warning
5883  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5884  *
5885  * Retrieve the information for ethdev congestion management
5886  *
5887  * @param port_id
5888  *   The port identifier of the Ethernet device.
5889  * @param info
5890  *   A pointer to a structure of type *rte_eth_cman_info* to be filled with
5891  *   the information about congestion management.
5892  * @return
5893  *   - (0) if successful.
5894  *   - (-ENOTSUP) if support for cman_info_get does not exist.
5895  *   - (-ENODEV) if *port_id* invalid.
5896  *   - (-EINVAL) if bad parameter.
5897  */
5898 __rte_experimental
5899 int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
5900 
5901 /**
5902  * @warning
5903  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5904  *
5905  * Initialize the ethdev congestion management configuration structure with default values.
5906  *
5907  * @param port_id
5908  *   The port identifier of the Ethernet device.
5909  * @param config
5910  *   A pointer to a structure of type *rte_eth_cman_config* to be initialized
5911  *   with default value.
5912  * @return
5913  *   - (0) if successful.
5914  *   - (-ENOTSUP) if support for cman_config_init does not exist.
5915  *   - (-ENODEV) if *port_id* invalid.
5916  *   - (-EINVAL) if bad parameter.
5917  */
5918 __rte_experimental
5919 int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
5920 
5921 /**
5922  * @warning
5923  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5924  *
5925  * Configure ethdev congestion management
5926  *
5927  * @param port_id
5928  *   The port identifier of the Ethernet device.
5929  * @param config
5930  *   A pointer to a structure of type *rte_eth_cman_config* to be configured.
5931  * @return
5932  *   - (0) if successful.
5933  *   - (-ENOTSUP) if support for cman_config_set does not exist.
5934  *   - (-ENODEV) if *port_id* invalid.
5935  *   - (-EINVAL) if bad parameter.
5936  */
5937 __rte_experimental
5938 int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
5939 
5940 /**
5941  * @warning
5942  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5943  *
5944  * Retrieve the applied ethdev congestion management parameters for the given port.
5945  *
5946  * @param port_id
5947  *   The port identifier of the Ethernet device.
5948  * @param config
5949  *   A pointer to a structure of type *rte_eth_cman_config* to retrieve
5950  *   congestion management parameters for the given object.
5951  *   Application must fill all parameters except mode_param parameter in
5952  *   struct rte_eth_cman_config.
5953  *
5954  * @return
5955  *   - (0) if successful.
5956  *   - (-ENOTSUP) if support for cman_config_get does not exist.
5957  *   - (-ENODEV) if *port_id* invalid.
5958  *   - (-EINVAL) if bad parameter.
5959  */
5960 __rte_experimental
5961 int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
5962 
5963 #include <rte_ethdev_core.h>
5964 
5965 #ifdef __cplusplus
5966 extern "C" {
5967 #endif
5968 
5969 /**
5970  * @internal
5971  * Helper routine for rte_eth_rx_burst().
5972  * Should be called at exit from PMD's rte_eth_rx_bulk implementation.
5973  * Does necessary post-processing - invokes Rx callbacks if any, etc.
5974  *
5975  * @param port_id
5976  *  The port identifier of the Ethernet device.
5977  * @param queue_id
5978  *  The index of the receive queue from which to retrieve input packets.
5979  * @param rx_pkts
5980  *   The address of an array of pointers to *rte_mbuf* structures that
5981  *   have been retrieved from the device.
5982  * @param nb_rx
5983  *   The number of packets that were retrieved from the device.
5984  * @param nb_pkts
5985  *   The number of elements in @p rx_pkts array.
5986  * @param opaque
5987  *   Opaque pointer of Rx queue callback related data.
5988  *
5989  * @return
5990  *  The number of packets effectively supplied to the @p rx_pkts array.
5991  */
5992 uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
5993 		struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
5994 		void *opaque);
5995 
5996 /**
5997  *
5998  * Retrieve a burst of input packets from a receive queue of an Ethernet
5999  * device. The retrieved packets are stored in *rte_mbuf* structures whose
6000  * pointers are supplied in the *rx_pkts* array.
6001  *
6002  * The rte_eth_rx_burst() function loops, parsing the Rx ring of the
6003  * receive queue, up to *nb_pkts* packets, and for each completed Rx
6004  * descriptor in the ring, it performs the following operations:
6005  *
6006  * - Initialize the *rte_mbuf* data structure associated with the
6007  *   Rx descriptor according to the information provided by the NIC into
6008  *   that Rx descriptor.
6009  *
6010  * - Store the *rte_mbuf* data structure into the next entry of the
6011  *   *rx_pkts* array.
6012  *
6013  * - Replenish the Rx descriptor with a new *rte_mbuf* buffer
6014  *   allocated from the memory pool associated with the receive queue at
6015  *   initialization time.
6016  *
6017  * When retrieving an input packet that was scattered by the controller
6018  * into multiple receive descriptors, the rte_eth_rx_burst() function
6019  * appends the associated *rte_mbuf* buffers to the first buffer of the
6020  * packet.
6021  *
6022  * The rte_eth_rx_burst() function returns the number of packets
6023  * actually retrieved, which is the number of *rte_mbuf* data structures
6024  * effectively supplied into the *rx_pkts* array.
6025  * A return value equal to *nb_pkts* indicates that the Rx queue contained
6026  * at least *rx_pkts* packets, and this is likely to signify that other
6027  * received packets remain in the input queue. Applications implementing
6028  * a "retrieve as much received packets as possible" policy can check this
6029  * specific case and keep invoking the rte_eth_rx_burst() function until
6030  * a value less than *nb_pkts* is returned.
6031  *
6032  * This receive method has the following advantages:
6033  *
6034  * - It allows a run-to-completion network stack engine to retrieve and
6035  *   to immediately process received packets in a fast burst-oriented
6036  *   approach, avoiding the overhead of unnecessary intermediate packet
6037  *   queue/dequeue operations.
6038  *
6039  * - Conversely, it also allows an asynchronous-oriented processing
6040  *   method to retrieve bursts of received packets and to immediately
6041  *   queue them for further parallel processing by another logical core,
6042  *   for instance. However, instead of having received packets being
6043  *   individually queued by the driver, this approach allows the caller
6044  *   of the rte_eth_rx_burst() function to queue a burst of retrieved
6045  *   packets at a time and therefore dramatically reduce the cost of
6046  *   enqueue/dequeue operations per packet.
6047  *
6048  * - It allows the rte_eth_rx_burst() function of the driver to take
6049  *   advantage of burst-oriented hardware features (CPU cache,
6050  *   prefetch instructions, and so on) to minimize the number of CPU
6051  *   cycles per packet.
6052  *
6053  * To summarize, the proposed receive API enables many
6054  * burst-oriented optimizations in both synchronous and asynchronous
6055  * packet processing environments with no overhead in both cases.
6056  *
6057  * @note
6058  *   Some drivers using vector instructions require that *nb_pkts* is
6059  *   divisible by 4 or 8, depending on the driver implementation.
6060  *
6061  * The rte_eth_rx_burst() function does not provide any error
6062  * notification to avoid the corresponding overhead. As a hint, the
6063  * upper-level application might check the status of the device link once
6064  * being systematically returned a 0 value for a given number of tries.
6065  *
6066  * @param port_id
6067  *   The port identifier of the Ethernet device.
6068  * @param queue_id
6069  *   The index of the receive queue from which to retrieve input packets.
6070  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
6071  *   to rte_eth_dev_configure().
6072  * @param rx_pkts
6073  *   The address of an array of pointers to *rte_mbuf* structures that
6074  *   must be large enough to store *nb_pkts* pointers in it.
6075  * @param nb_pkts
6076  *   The maximum number of packets to retrieve.
6077  *   The value must be divisible by 8 in order to work with any driver.
6078  * @return
6079  *   The number of packets actually retrieved, which is the number
6080  *   of pointers to *rte_mbuf* structures effectively supplied to the
6081  *   *rx_pkts* array.
6082  */
6083 static inline uint16_t
6084 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
6085 		 struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
6086 {
6087 	uint16_t nb_rx;
6088 	struct rte_eth_fp_ops *p;
6089 	void *qd;
6090 
6091 #ifdef RTE_ETHDEV_DEBUG_RX
6092 	if (port_id >= RTE_MAX_ETHPORTS ||
6093 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6094 		RTE_ETHDEV_LOG_LINE(ERR,
6095 			"Invalid port_id=%u or queue_id=%u",
6096 			port_id, queue_id);
6097 		return 0;
6098 	}
6099 #endif
6100 
6101 	/* fetch pointer to queue data */
6102 	p = &rte_eth_fp_ops[port_id];
6103 	qd = p->rxq.data[queue_id];
6104 
6105 #ifdef RTE_ETHDEV_DEBUG_RX
6106 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6107 
6108 	if (qd == NULL) {
6109 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
6110 			queue_id, port_id);
6111 		return 0;
6112 	}
6113 #endif
6114 
6115 	nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
6116 
6117 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6118 	{
6119 		void *cb;
6120 
6121 		/* rte_memory_order_release memory order was used when the
6122 		 * call back was inserted into the list.
6123 		 * Since there is a clear dependency between loading
6124 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6125 		 * not required.
6126 		 */
6127 		cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id],
6128 				rte_memory_order_relaxed);
6129 		if (unlikely(cb != NULL))
6130 			nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
6131 					rx_pkts, nb_rx, nb_pkts, cb);
6132 	}
6133 #endif
6134 
6135 	rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
6136 	return nb_rx;
6137 }
6138 
6139 /**
6140  * Get the number of used descriptors of a Rx queue
6141  *
6142  * Since it's a dataplane function, no check is performed on port_id and
6143  * queue_id. The caller must therefore ensure that the port is enabled
6144  * and the queue is configured and running.
6145  *
6146  * @param port_id
6147  *  The port identifier of the Ethernet device.
6148  * @param queue_id
6149  *  The queue ID on the specific port.
6150  * @return
6151  *  The number of used descriptors in the specific queue, or:
6152  *   - (-ENODEV) if *port_id* is invalid.
6153  *   - (-EINVAL) if *queue_id* is invalid
6154  *   - (-ENOTSUP) if the device does not support this function
6155  */
6156 static inline int
6157 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
6158 {
6159 	struct rte_eth_fp_ops *p;
6160 	void *qd;
6161 
6162 #ifdef RTE_ETHDEV_DEBUG_RX
6163 	if (port_id >= RTE_MAX_ETHPORTS ||
6164 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6165 		RTE_ETHDEV_LOG_LINE(ERR,
6166 			"Invalid port_id=%u or queue_id=%u",
6167 			port_id, queue_id);
6168 		return -EINVAL;
6169 	}
6170 #endif
6171 
6172 	/* fetch pointer to queue data */
6173 	p = &rte_eth_fp_ops[port_id];
6174 	qd = p->rxq.data[queue_id];
6175 
6176 #ifdef RTE_ETHDEV_DEBUG_RX
6177 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6178 	if (qd == NULL)
6179 		return -EINVAL;
6180 #endif
6181 
6182 	if (*p->rx_queue_count == NULL)
6183 		return -ENOTSUP;
6184 	return (int)(*p->rx_queue_count)(qd);
6185 }
6186 
6187 /**@{@name Rx hardware descriptor states
6188  * @see rte_eth_rx_descriptor_status
6189  */
6190 #define RTE_ETH_RX_DESC_AVAIL    0 /**< Desc available for hw. */
6191 #define RTE_ETH_RX_DESC_DONE     1 /**< Desc done, filled by hw. */
6192 #define RTE_ETH_RX_DESC_UNAVAIL  2 /**< Desc used by driver or hw. */
6193 /**@}*/
6194 
6195 /**
6196  * Check the status of a Rx descriptor in the queue
6197  *
6198  * It should be called in a similar context than the Rx function:
6199  * - on a dataplane core
6200  * - not concurrently on the same queue
6201  *
6202  * Since it's a dataplane function, no check is performed on port_id and
6203  * queue_id. The caller must therefore ensure that the port is enabled
6204  * and the queue is configured and running.
6205  *
6206  * Note: accessing to a random descriptor in the ring may trigger cache
6207  * misses and have a performance impact.
6208  *
6209  * @param port_id
6210  *  A valid port identifier of the Ethernet device which.
6211  * @param queue_id
6212  *  A valid Rx queue identifier on this port.
6213  * @param offset
6214  *  The offset of the descriptor starting from tail (0 is the next
6215  *  packet to be received by the driver).
6216  *
6217  * @return
6218  *  - (RTE_ETH_RX_DESC_AVAIL): Descriptor is available for the hardware to
6219  *    receive a packet.
6220  *  - (RTE_ETH_RX_DESC_DONE): Descriptor is done, it is filled by hw, but
6221  *    not yet processed by the driver (i.e. in the receive queue).
6222  *  - (RTE_ETH_RX_DESC_UNAVAIL): Descriptor is unavailable, either hold by
6223  *    the driver and not yet returned to hw, or reserved by the hw.
6224  *  - (-EINVAL) bad descriptor offset.
6225  *  - (-ENOTSUP) if the device does not support this function.
6226  *  - (-ENODEV) bad port or queue (only if compiled with debug).
6227  */
6228 static inline int
6229 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6230 	uint16_t offset)
6231 {
6232 	struct rte_eth_fp_ops *p;
6233 	void *qd;
6234 
6235 #ifdef RTE_ETHDEV_DEBUG_RX
6236 	if (port_id >= RTE_MAX_ETHPORTS ||
6237 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6238 		RTE_ETHDEV_LOG_LINE(ERR,
6239 			"Invalid port_id=%u or queue_id=%u",
6240 			port_id, queue_id);
6241 		return -EINVAL;
6242 	}
6243 #endif
6244 
6245 	/* fetch pointer to queue data */
6246 	p = &rte_eth_fp_ops[port_id];
6247 	qd = p->rxq.data[queue_id];
6248 
6249 #ifdef RTE_ETHDEV_DEBUG_RX
6250 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6251 	if (qd == NULL)
6252 		return -ENODEV;
6253 #endif
6254 	if (*p->rx_descriptor_status == NULL)
6255 		return -ENOTSUP;
6256 	return (*p->rx_descriptor_status)(qd, offset);
6257 }
6258 
6259 /**@{@name Tx hardware descriptor states
6260  * @see rte_eth_tx_descriptor_status
6261  */
6262 #define RTE_ETH_TX_DESC_FULL    0 /**< Desc filled for hw, waiting xmit. */
6263 #define RTE_ETH_TX_DESC_DONE    1 /**< Desc done, packet is transmitted. */
6264 #define RTE_ETH_TX_DESC_UNAVAIL 2 /**< Desc used by driver or hw. */
6265 /**@}*/
6266 
6267 /**
6268  * Check the status of a Tx descriptor in the queue.
6269  *
6270  * It should be called in a similar context than the Tx function:
6271  * - on a dataplane core
6272  * - not concurrently on the same queue
6273  *
6274  * Since it's a dataplane function, no check is performed on port_id and
6275  * queue_id. The caller must therefore ensure that the port is enabled
6276  * and the queue is configured and running.
6277  *
6278  * Note: accessing to a random descriptor in the ring may trigger cache
6279  * misses and have a performance impact.
6280  *
6281  * @param port_id
6282  *  A valid port identifier of the Ethernet device which.
6283  * @param queue_id
6284  *  A valid Tx queue identifier on this port.
6285  * @param offset
6286  *  The offset of the descriptor starting from tail (0 is the place where
6287  *  the next packet will be send).
6288  *
6289  * @return
6290  *  - (RTE_ETH_TX_DESC_FULL) Descriptor is being processed by the hw, i.e.
6291  *    in the transmit queue.
6292  *  - (RTE_ETH_TX_DESC_DONE) Hardware is done with this descriptor, it can
6293  *    be reused by the driver.
6294  *  - (RTE_ETH_TX_DESC_UNAVAIL): Descriptor is unavailable, reserved by the
6295  *    driver or the hardware.
6296  *  - (-EINVAL) bad descriptor offset.
6297  *  - (-ENOTSUP) if the device does not support this function.
6298  *  - (-ENODEV) bad port or queue (only if compiled with debug).
6299  */
6300 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6301 	uint16_t queue_id, uint16_t offset)
6302 {
6303 	struct rte_eth_fp_ops *p;
6304 	void *qd;
6305 
6306 #ifdef RTE_ETHDEV_DEBUG_TX
6307 	if (port_id >= RTE_MAX_ETHPORTS ||
6308 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6309 		RTE_ETHDEV_LOG_LINE(ERR,
6310 			"Invalid port_id=%u or queue_id=%u",
6311 			port_id, queue_id);
6312 		return -EINVAL;
6313 	}
6314 #endif
6315 
6316 	/* fetch pointer to queue data */
6317 	p = &rte_eth_fp_ops[port_id];
6318 	qd = p->txq.data[queue_id];
6319 
6320 #ifdef RTE_ETHDEV_DEBUG_TX
6321 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6322 	if (qd == NULL)
6323 		return -ENODEV;
6324 #endif
6325 	if (*p->tx_descriptor_status == NULL)
6326 		return -ENOTSUP;
6327 	return (*p->tx_descriptor_status)(qd, offset);
6328 }
6329 
6330 /**
6331  * @internal
6332  * Helper routine for rte_eth_tx_burst().
6333  * Should be called before entry PMD's rte_eth_tx_bulk implementation.
6334  * Does necessary pre-processing - invokes Tx callbacks if any, etc.
6335  *
6336  * @param port_id
6337  *   The port identifier of the Ethernet device.
6338  * @param queue_id
6339  *   The index of the transmit queue through which output packets must be
6340  *   sent.
6341  * @param tx_pkts
6342  *   The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
6343  *   which contain the output packets.
6344  * @param nb_pkts
6345  *   The maximum number of packets to transmit.
6346  * @return
6347  *   The number of output packets to transmit.
6348  */
6349 uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6350 	struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6351 
6352 /**
6353  * Send a burst of output packets on a transmit queue of an Ethernet device.
6354  *
6355  * The rte_eth_tx_burst() function is invoked to transmit output packets
6356  * on the output queue *queue_id* of the Ethernet device designated by its
6357  * *port_id*.
6358  * The *nb_pkts* parameter is the number of packets to send which are
6359  * supplied in the *tx_pkts* array of *rte_mbuf* structures, each of them
6360  * allocated from a pool created with rte_pktmbuf_pool_create().
6361  * The rte_eth_tx_burst() function loops, sending *nb_pkts* packets,
6362  * up to the number of transmit descriptors available in the Tx ring of the
6363  * transmit queue.
6364  * For each packet to send, the rte_eth_tx_burst() function performs
6365  * the following operations:
6366  *
6367  * - Pick up the next available descriptor in the transmit ring.
6368  *
6369  * - Free the network buffer previously sent with that descriptor, if any.
6370  *
6371  * - Initialize the transmit descriptor with the information provided
6372  *   in the *rte_mbuf data structure.
6373  *
6374  * In the case of a segmented packet composed of a list of *rte_mbuf* buffers,
6375  * the rte_eth_tx_burst() function uses several transmit descriptors
6376  * of the ring.
6377  *
6378  * The rte_eth_tx_burst() function returns the number of packets it
6379  * actually sent. A return value equal to *nb_pkts* means that all packets
6380  * have been sent, and this is likely to signify that other output packets
6381  * could be immediately transmitted again. Applications that implement a
6382  * "send as many packets to transmit as possible" policy can check this
6383  * specific case and keep invoking the rte_eth_tx_burst() function until
6384  * a value less than *nb_pkts* is returned.
6385  *
6386  * It is the responsibility of the rte_eth_tx_burst() function to
6387  * transparently free the memory buffers of packets previously sent.
6388  * This feature is driven by the *tx_free_thresh* value supplied to the
6389  * rte_eth_dev_configure() function at device configuration time.
6390  * When the number of free Tx descriptors drops below this threshold, the
6391  * rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf*  buffers
6392  * of those packets whose transmission was effectively completed.
6393  *
6394  * If the PMD is RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
6395  * invoke this function concurrently on the same Tx queue without SW lock.
6396  * @see rte_eth_dev_info_get, struct rte_eth_txconf::offloads
6397  *
6398  * @see rte_eth_tx_prepare to perform some prior checks or adjustments
6399  * for offloads.
6400  *
6401  * @note This function must not modify mbufs (including packets data)
6402  * unless the refcnt is 1.
6403  * An exception is the bonding PMD, which does not have "Tx prepare" support,
6404  * in this case, mbufs may be modified.
6405  *
6406  * @param port_id
6407  *   The port identifier of the Ethernet device.
6408  * @param queue_id
6409  *   The index of the transmit queue through which output packets must be
6410  *   sent.
6411  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
6412  *   to rte_eth_dev_configure().
6413  * @param tx_pkts
6414  *   The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
6415  *   which contain the output packets.
6416  * @param nb_pkts
6417  *   The maximum number of packets to transmit.
6418  * @return
6419  *   The number of output packets actually stored in transmit descriptors of
6420  *   the transmit ring. The return value can be less than the value of the
6421  *   *tx_pkts* parameter when the transmit ring is full or has been filled up.
6422  */
6423 static inline uint16_t
6424 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6425 		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6426 {
6427 	struct rte_eth_fp_ops *p;
6428 	void *qd;
6429 
6430 #ifdef RTE_ETHDEV_DEBUG_TX
6431 	if (port_id >= RTE_MAX_ETHPORTS ||
6432 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6433 		RTE_ETHDEV_LOG_LINE(ERR,
6434 			"Invalid port_id=%u or queue_id=%u",
6435 			port_id, queue_id);
6436 		return 0;
6437 	}
6438 #endif
6439 
6440 	/* fetch pointer to queue data */
6441 	p = &rte_eth_fp_ops[port_id];
6442 	qd = p->txq.data[queue_id];
6443 
6444 #ifdef RTE_ETHDEV_DEBUG_TX
6445 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6446 
6447 	if (qd == NULL) {
6448 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6449 			queue_id, port_id);
6450 		return 0;
6451 	}
6452 #endif
6453 
6454 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6455 	{
6456 		void *cb;
6457 
6458 		/* rte_memory_order_release memory order was used when the
6459 		 * call back was inserted into the list.
6460 		 * Since there is a clear dependency between loading
6461 		 * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
6462 		 * not required.
6463 		 */
6464 		cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id],
6465 				rte_memory_order_relaxed);
6466 		if (unlikely(cb != NULL))
6467 			nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6468 					tx_pkts, nb_pkts, cb);
6469 	}
6470 #endif
6471 
6472 	nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6473 
6474 	rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6475 	return nb_pkts;
6476 }
6477 
6478 /**
6479  * Process a burst of output packets on a transmit queue of an Ethernet device.
6480  *
6481  * The rte_eth_tx_prepare() function is invoked to prepare output packets to be
6482  * transmitted on the output queue *queue_id* of the Ethernet device designated
6483  * by its *port_id*.
6484  * The *nb_pkts* parameter is the number of packets to be prepared which are
6485  * supplied in the *tx_pkts* array of *rte_mbuf* structures, each of them
6486  * allocated from a pool created with rte_pktmbuf_pool_create().
6487  * For each packet to send, the rte_eth_tx_prepare() function performs
6488  * the following operations:
6489  *
6490  * - Check if packet meets devices requirements for Tx offloads.
6491  *
6492  * - Check limitations about number of segments.
6493  *
6494  * - Check additional requirements when debug is enabled.
6495  *
6496  * - Update and/or reset required checksums when Tx offload is set for packet.
6497  *
6498  * Since this function can modify packet data, provided mbufs must be safely
6499  * writable (e.g. modified data cannot be in shared segment).
6500  *
6501  * The rte_eth_tx_prepare() function returns the number of packets ready to be
6502  * sent. A return value equal to *nb_pkts* means that all packets are valid and
6503  * ready to be sent, otherwise stops processing on the first invalid packet and
6504  * leaves the rest packets untouched.
6505  *
6506  * When this functionality is not implemented in the driver, all packets are
6507  * are returned untouched.
6508  *
6509  * @param port_id
6510  *   The port identifier of the Ethernet device.
6511  *   The value must be a valid port ID.
6512  * @param queue_id
6513  *   The index of the transmit queue through which output packets must be
6514  *   sent.
6515  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
6516  *   to rte_eth_dev_configure().
6517  * @param tx_pkts
6518  *   The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
6519  *   which contain the output packets.
6520  * @param nb_pkts
6521  *   The maximum number of packets to process.
6522  * @return
6523  *   The number of packets correct and ready to be sent. The return value can be
6524  *   less than the value of the *tx_pkts* parameter when some packet doesn't
6525  *   meet devices requirements with rte_errno set appropriately:
6526  *   - EINVAL: offload flags are not correctly set
6527  *   - ENOTSUP: the offload feature is not supported by the hardware
6528  *   - ENODEV: if *port_id* is invalid (with debug enabled only)
6529  */
6530 
6531 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6532 
6533 static inline uint16_t
6534 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6535 		struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6536 {
6537 	struct rte_eth_fp_ops *p;
6538 	void *qd;
6539 
6540 #ifdef RTE_ETHDEV_DEBUG_TX
6541 	if (port_id >= RTE_MAX_ETHPORTS ||
6542 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6543 		RTE_ETHDEV_LOG_LINE(ERR,
6544 			"Invalid port_id=%u or queue_id=%u",
6545 			port_id, queue_id);
6546 		rte_errno = ENODEV;
6547 		return 0;
6548 	}
6549 #endif
6550 
6551 	/* fetch pointer to queue data */
6552 	p = &rte_eth_fp_ops[port_id];
6553 	qd = p->txq.data[queue_id];
6554 
6555 #ifdef RTE_ETHDEV_DEBUG_TX
6556 	if (!rte_eth_dev_is_valid_port(port_id)) {
6557 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx port_id=%u", port_id);
6558 		rte_errno = ENODEV;
6559 		return 0;
6560 	}
6561 	if (qd == NULL) {
6562 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6563 			queue_id, port_id);
6564 		rte_errno = EINVAL;
6565 		return 0;
6566 	}
6567 #endif
6568 
6569 	if (!p->tx_pkt_prepare)
6570 		return nb_pkts;
6571 
6572 	return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6573 }
6574 
6575 #else
6576 
6577 /*
6578  * Native NOOP operation for compilation targets which doesn't require any
6579  * preparations steps, and functional NOOP may introduce unnecessary performance
6580  * drop.
6581  *
6582  * Generally this is not a good idea to turn it on globally and didn't should
6583  * be used if behavior of tx_preparation can change.
6584  */
6585 
6586 static inline uint16_t
6587 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6588 		__rte_unused uint16_t queue_id,
6589 		__rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6590 {
6591 	return nb_pkts;
6592 }
6593 
6594 #endif
6595 
6596 /**
6597  * Send any packets queued up for transmission on a port and HW queue
6598  *
6599  * This causes an explicit flush of packets previously buffered via the
6600  * rte_eth_tx_buffer() function. It returns the number of packets successfully
6601  * sent to the NIC, and calls the error callback for any unsent packets. Unless
6602  * explicitly set up otherwise, the default callback simply frees the unsent
6603  * packets back to the owning mempool.
6604  *
6605  * @param port_id
6606  *   The port identifier of the Ethernet device.
6607  * @param queue_id
6608  *   The index of the transmit queue through which output packets must be
6609  *   sent.
6610  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
6611  *   to rte_eth_dev_configure().
6612  * @param buffer
6613  *   Buffer of packets to be transmit.
6614  * @return
6615  *   The number of packets successfully sent to the Ethernet device. The error
6616  *   callback is called for any packets which could not be sent.
6617  */
6618 static inline uint16_t
6619 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6620 		struct rte_eth_dev_tx_buffer *buffer)
6621 {
6622 	uint16_t sent;
6623 	uint16_t to_send = buffer->length;
6624 
6625 	if (to_send == 0)
6626 		return 0;
6627 
6628 	sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6629 
6630 	buffer->length = 0;
6631 
6632 	/* All packets sent, or to be dealt with by callback below */
6633 	if (unlikely(sent != to_send))
6634 		buffer->error_callback(&buffer->pkts[sent],
6635 				       (uint16_t)(to_send - sent),
6636 				       buffer->error_userdata);
6637 
6638 	return sent;
6639 }
6640 
6641 /**
6642  * Buffer a single packet for future transmission on a port and queue
6643  *
6644  * This function takes a single mbuf/packet and buffers it for later
6645  * transmission on the particular port and queue specified. Once the buffer is
6646  * full of packets, an attempt will be made to transmit all the buffered
6647  * packets. In case of error, where not all packets can be transmitted, a
6648  * callback is called with the unsent packets as a parameter. If no callback
6649  * is explicitly set up, the unsent packets are just freed back to the owning
6650  * mempool. The function returns the number of packets actually sent i.e.
6651  * 0 if no buffer flush occurred, otherwise the number of packets successfully
6652  * flushed
6653  *
6654  * @param port_id
6655  *   The port identifier of the Ethernet device.
6656  * @param queue_id
6657  *   The index of the transmit queue through which output packets must be
6658  *   sent.
6659  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
6660  *   to rte_eth_dev_configure().
6661  * @param buffer
6662  *   Buffer used to collect packets to be sent.
6663  * @param tx_pkt
6664  *   Pointer to the packet mbuf to be sent.
6665  * @return
6666  *   0 = packet has been buffered for later transmission
6667  *   N > 0 = packet has been buffered, and the buffer was subsequently flushed,
6668  *     causing N packets to be sent, and the error callback to be called for
6669  *     the rest.
6670  */
6671 static __rte_always_inline uint16_t
6672 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6673 		struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6674 {
6675 	buffer->pkts[buffer->length++] = tx_pkt;
6676 	if (buffer->length < buffer->size)
6677 		return 0;
6678 
6679 	return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6680 }
6681 
6682 /**
6683  * @warning
6684  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
6685  *
6686  * Recycle used mbufs from a transmit queue of an Ethernet device, and move
6687  * these mbufs into a mbuf ring for a receive queue of an Ethernet device.
6688  * This can bypass mempool path to save CPU cycles.
6689  *
6690  * The rte_eth_recycle_mbufs() function loops, with rte_eth_rx_burst() and
6691  * rte_eth_tx_burst() functions, freeing Tx used mbufs and replenishing Rx
6692  * descriptors. The number of recycling mbufs depends on the request of Rx mbuf
6693  * ring, with the constraint of enough used mbufs from Tx mbuf ring.
6694  *
6695  * For each recycling mbufs, the rte_eth_recycle_mbufs() function performs the
6696  * following operations:
6697  *
6698  * - Copy used *rte_mbuf* buffer pointers from Tx mbuf ring into Rx mbuf ring.
6699  *
6700  * - Replenish the Rx descriptors with the recycling *rte_mbuf* mbufs freed
6701  *   from the Tx mbuf ring.
6702  *
6703  * This function spilts Rx and Tx path with different callback functions. The
6704  * callback function recycle_tx_mbufs_reuse is for Tx driver. The callback
6705  * function recycle_rx_descriptors_refill is for Rx driver. rte_eth_recycle_mbufs()
6706  * can support the case that Rx Ethernet device is different from Tx Ethernet device.
6707  *
6708  * It is the responsibility of users to select the Rx/Tx queue pair to recycle
6709  * mbufs. Before call this function, users must call rte_eth_recycle_rxq_info_get
6710  * function to retrieve selected Rx queue information.
6711  * @see rte_eth_recycle_rxq_info_get, struct rte_eth_recycle_rxq_info
6712  *
6713  * Currently, the rte_eth_recycle_mbufs() function can support to feed 1 Rx queue from
6714  * 2 Tx queues in the same thread. Do not pair the Rx queue and Tx queue in different
6715  * threads, in order to avoid memory error rewriting.
6716  *
6717  * @param rx_port_id
6718  *   Port identifying the receive side.
6719  * @param rx_queue_id
6720  *   The index of the receive queue identifying the receive side.
6721  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
6722  *   to rte_eth_dev_configure().
6723  * @param tx_port_id
6724  *   Port identifying the transmit side.
6725  * @param tx_queue_id
6726  *   The index of the transmit queue identifying the transmit side.
6727  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
6728  *   to rte_eth_dev_configure().
6729  * @param recycle_rxq_info
6730  *   A pointer to a structure of type *rte_eth_recycle_rxq_info* which contains
6731  *   the information of the Rx queue mbuf ring.
6732  * @return
6733  *   The number of recycling mbufs.
6734  */
6735 __rte_experimental
6736 static inline uint16_t
6737 rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
6738 		uint16_t tx_port_id, uint16_t tx_queue_id,
6739 		struct rte_eth_recycle_rxq_info *recycle_rxq_info)
6740 {
6741 	struct rte_eth_fp_ops *p1, *p2;
6742 	void *qd1, *qd2;
6743 	uint16_t nb_mbufs;
6744 
6745 #ifdef RTE_ETHDEV_DEBUG_TX
6746 	if (tx_port_id >= RTE_MAX_ETHPORTS ||
6747 			tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6748 		RTE_ETHDEV_LOG_LINE(ERR,
6749 				"Invalid tx_port_id=%u or tx_queue_id=%u",
6750 				tx_port_id, tx_queue_id);
6751 		return 0;
6752 	}
6753 #endif
6754 
6755 	/* fetch pointer to Tx queue data */
6756 	p1 = &rte_eth_fp_ops[tx_port_id];
6757 	qd1 = p1->txq.data[tx_queue_id];
6758 
6759 #ifdef RTE_ETHDEV_DEBUG_TX
6760 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port_id, 0);
6761 
6762 	if (qd1 == NULL) {
6763 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
6764 				tx_queue_id, tx_port_id);
6765 		return 0;
6766 	}
6767 #endif
6768 	if (p1->recycle_tx_mbufs_reuse == NULL)
6769 		return 0;
6770 
6771 #ifdef RTE_ETHDEV_DEBUG_RX
6772 	if (rx_port_id >= RTE_MAX_ETHPORTS ||
6773 			rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6774 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u",
6775 				rx_port_id, rx_queue_id);
6776 		return 0;
6777 	}
6778 #endif
6779 
6780 	/* fetch pointer to Rx queue data */
6781 	p2 = &rte_eth_fp_ops[rx_port_id];
6782 	qd2 = p2->rxq.data[rx_queue_id];
6783 
6784 #ifdef RTE_ETHDEV_DEBUG_RX
6785 	RTE_ETH_VALID_PORTID_OR_ERR_RET(rx_port_id, 0);
6786 
6787 	if (qd2 == NULL) {
6788 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
6789 				rx_queue_id, rx_port_id);
6790 		return 0;
6791 	}
6792 #endif
6793 	if (p2->recycle_rx_descriptors_refill == NULL)
6794 		return 0;
6795 
6796 	/* Copy used *rte_mbuf* buffer pointers from Tx mbuf ring
6797 	 * into Rx mbuf ring.
6798 	 */
6799 	nb_mbufs = p1->recycle_tx_mbufs_reuse(qd1, recycle_rxq_info);
6800 
6801 	/* If no recycling mbufs, return 0. */
6802 	if (nb_mbufs == 0)
6803 		return 0;
6804 
6805 	/* Replenish the Rx descriptors with the recycling
6806 	 * into Rx mbuf ring.
6807 	 */
6808 	p2->recycle_rx_descriptors_refill(qd2, nb_mbufs);
6809 
6810 	return nb_mbufs;
6811 }
6812 
6813 /**
6814  * @warning
6815  * @b EXPERIMENTAL: this API may change without prior notice
6816  *
6817  * Get supported header protocols to split on Rx.
6818  *
6819  * When a packet type is announced to be split,
6820  * it *must* be supported by the PMD.
6821  * For instance, if eth-ipv4, eth-ipv4-udp is announced,
6822  * the PMD must return the following packet types for these packets:
6823  * - Ether/IPv4             -> RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
6824  * - Ether/IPv4/UDP         -> RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP
6825  *
6826  * @param port_id
6827  *   The port identifier of the device.
6828  * @param[out] ptypes
6829  *   An array pointer to store supported protocol headers, allocated by caller.
6830  *   These ptypes are composed with RTE_PTYPE_*.
6831  * @param num
6832  *   Size of the array pointed by param ptypes.
6833  * @return
6834  *   - (>=0) Number of supported ptypes. If the number of types exceeds num,
6835  *           only num entries will be filled into the ptypes array,
6836  *           but the full count of supported ptypes will be returned.
6837  *   - (-ENOTSUP) if header protocol is not supported by device.
6838  *   - (-ENODEV) if *port_id* invalid.
6839  *   - (-EINVAL) if bad parameter.
6840  */
6841 __rte_experimental
6842 int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num);
6843 
6844 /**
6845  * @warning
6846  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice.
6847  *
6848  * Get the number of used descriptors of a Tx queue.
6849  *
6850  * This function retrieves the number of used descriptors of a transmit queue.
6851  * Applications can use this API in the fast path to inspect Tx queue occupancy
6852  * and take appropriate actions based on the available free descriptors.
6853  * An example action could be implementing Random Early Discard (RED).
6854  *
6855  * Since it's a fast-path function, no check is performed on port_id and queue_id.
6856  * The caller must therefore ensure that the port is enabled
6857  * and the queue is configured and running.
6858  *
6859  * @param port_id
6860  *   The port identifier of the device.
6861  * @param queue_id
6862  *   The index of the transmit queue.
6863  *   The value must be in the range [0, nb_tx_queue - 1]
6864  *   previously supplied to rte_eth_dev_configure().
6865  * @return
6866  *   The number of used descriptors in the specific queue, or:
6867  *   - (-ENODEV) if *port_id* is invalid. Enabled only when RTE_ETHDEV_DEBUG_TX is enabled.
6868  *   - (-EINVAL) if *queue_id* is invalid. Enabled only when RTE_ETHDEV_DEBUG_TX is enabled.
6869  *   - (-ENOTSUP) if the device does not support this function.
6870  *
6871  * @note This function is designed for fast-path use.
6872  * @note There is no requirement to call this function before rte_eth_tx_burst() invocation.
6873  * @note Utilize this function exclusively when the caller needs to determine
6874  * the used queue count across all descriptors of a Tx queue.
6875  * If the use case only involves checking the status of a specific descriptor slot,
6876  * opt for rte_eth_tx_descriptor_status() instead.
6877  */
6878 __rte_experimental
6879 static inline int
6880 rte_eth_tx_queue_count(uint16_t port_id, uint16_t queue_id)
6881 {
6882 	struct rte_eth_fp_ops *fops;
6883 	void *qd;
6884 	int rc;
6885 
6886 #ifdef RTE_ETHDEV_DEBUG_TX
6887 	if (port_id >= RTE_MAX_ETHPORTS || !rte_eth_dev_is_valid_port(port_id)) {
6888 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id);
6889 		rc = -ENODEV;
6890 		goto out;
6891 	}
6892 
6893 	if (queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6894 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
6895 				    queue_id, port_id);
6896 		rc = -EINVAL;
6897 		goto out;
6898 	}
6899 #endif
6900 
6901 	/* Fetch pointer to Tx queue data */
6902 	fops = &rte_eth_fp_ops[port_id];
6903 	qd = fops->txq.data[queue_id];
6904 
6905 #ifdef RTE_ETHDEV_DEBUG_TX
6906 	if (qd == NULL) {
6907 		RTE_ETHDEV_LOG_LINE(ERR, "Invalid queue_id=%u for port_id=%u",
6908 				    queue_id, port_id);
6909 		rc = -EINVAL;
6910 		goto out;
6911 	}
6912 #endif
6913 	if (fops->tx_queue_count == NULL) {
6914 		rc = -ENOTSUP;
6915 		goto out;
6916 	}
6917 
6918 	rc = fops->tx_queue_count(qd);
6919 
6920 out:
6921 	rte_eth_trace_tx_queue_count(port_id, queue_id, rc);
6922 	return rc;
6923 }
6924 
6925 #ifdef __cplusplus
6926 }
6927 #endif
6928 
6929 #endif /* _RTE_ETHDEV_H_ */
6930