xref: /dpdk/lib/ethdev/rte_ethdev.h (revision f9dfb59edbccae50e7c5508348aa2b4b84413048)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4 
5 #ifndef _RTE_ETHDEV_H_
6 #define _RTE_ETHDEV_H_
7 
8 /**
9  * @file
10  *
11  * RTE Ethernet Device API
12  *
13  * The Ethernet Device API is composed of two parts:
14  *
15  * - The application-oriented Ethernet API that includes functions to setup
16  *   an Ethernet device (configure it, setup its Rx and Tx queues and start it),
17  *   to get its MAC address, the speed and the status of its physical link,
18  *   to receive and to transmit packets, and so on.
19  *
20  * - The driver-oriented Ethernet API that exports functions allowing
21  *   an Ethernet Poll Mode Driver (PMD) to allocate an Ethernet device instance,
22  *   create memzone for HW rings and process registered callbacks, and so on.
23  *   PMDs should include ethdev_driver.h instead of this header.
24  *
25  * By default, all the functions of the Ethernet Device API exported by a PMD
26  * are lock-free functions which assume to not be invoked in parallel on
27  * different logical cores to work on the same target object.  For instance,
28  * the receive function of a PMD cannot be invoked in parallel on two logical
29  * cores to poll the same Rx queue [of the same port]. Of course, this function
30  * can be invoked in parallel by different logical cores on different Rx queues.
31  * It is the responsibility of the upper level application to enforce this rule.
32  *
33  * If needed, parallel accesses by multiple logical cores to shared queues
34  * shall be explicitly protected by dedicated inline lock-aware functions
35  * built on top of their corresponding lock-free functions of the PMD API.
36  *
37  * In all functions of the Ethernet API, the Ethernet device is
38  * designated by an integer >= 0 named the device port identifier.
39  *
40  * At the Ethernet driver level, Ethernet devices are represented by a generic
41  * data structure of type *rte_eth_dev*.
42  *
43  * Ethernet devices are dynamically registered during the PCI probing phase
44  * performed at EAL initialization time.
45  * When an Ethernet device is being probed, an *rte_eth_dev* structure and
46  * a new port identifier are allocated for that device. Then, the eth_dev_init()
47  * function supplied by the Ethernet driver matching the probed PCI
48  * device is invoked to properly initialize the device.
49  *
50  * The role of the device init function consists of resetting the hardware,
51  * checking access to Non-volatile Memory (NVM), reading the MAC address
52  * from NVM etc.
53  *
54  * If the device init operation is successful, the correspondence between
55  * the port identifier assigned to the new device and its associated
56  * *rte_eth_dev* structure is effectively registered.
57  * Otherwise, both the *rte_eth_dev* structure and the port identifier are
58  * freed.
59  *
60  * The functions exported by the application Ethernet API to setup a device
61  * designated by its port identifier must be invoked in the following order:
62  *     - rte_eth_dev_configure()
63  *     - rte_eth_tx_queue_setup()
64  *     - rte_eth_rx_queue_setup()
65  *     - rte_eth_dev_start()
66  *
67  * Then, the network application can invoke, in any order, the functions
68  * exported by the Ethernet API to get the MAC address of a given device, to
69  * get the speed and the status of a device physical link, to receive/transmit
70  * [burst of] packets, and so on.
71  *
72  * If the application wants to change the configuration (i.e. call
73  * rte_eth_dev_configure(), rte_eth_tx_queue_setup(), or
74  * rte_eth_rx_queue_setup()), it must call rte_eth_dev_stop() first to stop the
75  * device and then do the reconfiguration before calling rte_eth_dev_start()
76  * again. The transmit and receive functions should not be invoked when the
77  * device or the queue is stopped.
78  *
79  * Please note that some configuration is not stored between calls to
80  * rte_eth_dev_stop()/rte_eth_dev_start(). The following configuration will
81  * be retained:
82  *
83  *     - MTU
84  *     - flow control settings
85  *     - receive mode configuration (promiscuous mode, all-multicast mode,
86  *       hardware checksum mode, RSS/VMDq settings etc.)
87  *     - VLAN filtering configuration
88  *     - default MAC address
89  *     - MAC addresses supplied to MAC address array
90  *     - flow director filtering mode (but not filtering rules)
91  *     - NIC queue statistics mappings
92  *
93  * The following configuration may be retained or not
94  * depending on the device capabilities:
95  *
96  *     - flow rules
97  *     - flow-related shared objects, e.g. indirect actions
98  *
99  * Any other configuration will not be stored and will need to be re-entered
100  * before a call to rte_eth_dev_start().
101  *
102  * Finally, a network application can close an Ethernet device by invoking the
103  * rte_eth_dev_close() function.
104  *
105  * Each function of the application Ethernet API invokes a specific function
106  * of the PMD that controls the target device designated by its port
107  * identifier.
108  * For this purpose, all device-specific functions of an Ethernet driver are
109  * supplied through a set of pointers contained in a generic structure of type
110  * *eth_dev_ops*.
111  * The address of the *eth_dev_ops* structure is stored in the *rte_eth_dev*
112  * structure by the device init function of the Ethernet driver, which is
113  * invoked during the PCI probing phase, as explained earlier.
114  *
115  * In other words, each function of the Ethernet API simply retrieves the
116  * *rte_eth_dev* structure associated with the device port identifier and
117  * performs an indirect invocation of the corresponding driver function
118  * supplied in the *eth_dev_ops* structure of the *rte_eth_dev* structure.
119  *
120  * For performance reasons, the address of the burst-oriented Rx and Tx
121  * functions of the Ethernet driver are not contained in the *eth_dev_ops*
122  * structure. Instead, they are directly stored at the beginning of the
123  * *rte_eth_dev* structure to avoid an extra indirect memory access during
124  * their invocation.
125  *
126  * RTE Ethernet device drivers do not use interrupts for transmitting or
127  * receiving. Instead, Ethernet drivers export Poll-Mode receive and transmit
128  * functions to applications.
129  * Both receive and transmit functions are packet-burst oriented to minimize
130  * their cost per packet through the following optimizations:
131  *
132  * - Sharing among multiple packets the incompressible cost of the
133  *   invocation of receive/transmit functions.
134  *
135  * - Enabling receive/transmit functions to take advantage of burst-oriented
136  *   hardware features (L1 cache, prefetch instructions, NIC head/tail
137  *   registers) to minimize the number of CPU cycles per packet, for instance,
138  *   by avoiding useless read memory accesses to ring descriptors, or by
139  *   systematically using arrays of pointers that exactly fit L1 cache line
140  *   boundaries and sizes.
141  *
142  * The burst-oriented receive function does not provide any error notification,
143  * to avoid the corresponding overhead. As a hint, the upper-level application
144  * might check the status of the device link once being systematically returned
145  * a 0 value by the receive function of the driver for a given number of tries.
146  */
147 
148 #ifdef __cplusplus
149 extern "C" {
150 #endif
151 
152 #include <stdint.h>
153 
154 /* Use this macro to check if LRO API is supported */
155 #define RTE_ETHDEV_HAS_LRO_SUPPORT
156 
157 /* Alias RTE_LIBRTE_ETHDEV_DEBUG for backward compatibility. */
158 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
159 #define RTE_ETHDEV_DEBUG_RX
160 #define RTE_ETHDEV_DEBUG_TX
161 #endif
162 
163 #include <rte_cman.h>
164 #include <rte_compat.h>
165 #include <rte_log.h>
166 #include <rte_interrupts.h>
167 #include <rte_dev.h>
168 #include <rte_devargs.h>
169 #include <rte_bitops.h>
170 #include <rte_errno.h>
171 #include <rte_common.h>
172 #include <rte_config.h>
173 #include <rte_power_intrinsics.h>
174 
175 #include "rte_ethdev_trace_fp.h"
176 #include "rte_dev_info.h"
177 
178 extern int rte_eth_dev_logtype;
179 
180 #define RTE_ETHDEV_LOG(level, ...) \
181 	rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
182 
183 struct rte_mbuf;
184 
185 /**
186  * Initializes a device iterator.
187  *
188  * This iterator allows accessing a list of devices matching some devargs.
189  *
190  * @param iter
191  *   Device iterator handle initialized by the function.
192  *   The fields bus_str and cls_str might be dynamically allocated,
193  *   and could be freed by calling rte_eth_iterator_cleanup().
194  *
195  * @param devargs
196  *   Device description string.
197  *
198  * @return
199  *   0 on successful initialization, negative otherwise.
200  */
201 int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs);
202 
203 /**
204  * Iterates on devices with devargs filter.
205  * The ownership is not checked.
206  *
207  * The next port ID is returned, and the iterator is updated.
208  *
209  * @param iter
210  *   Device iterator handle initialized by rte_eth_iterator_init().
211  *   Some fields bus_str and cls_str might be freed when no more port is found,
212  *   by calling rte_eth_iterator_cleanup().
213  *
214  * @return
215  *   A port ID if found, RTE_MAX_ETHPORTS otherwise.
216  */
217 uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter);
218 
219 /**
220  * Free some allocated fields of the iterator.
221  *
222  * This function is automatically called by rte_eth_iterator_next()
223  * on the last iteration (i.e. when no more matching port is found).
224  *
225  * It is safe to call this function twice; it will do nothing more.
226  *
227  * @param iter
228  *   Device iterator handle initialized by rte_eth_iterator_init().
229  *   The fields bus_str and cls_str are freed if needed.
230  */
231 void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter);
232 
233 /**
234  * Macro to iterate over all ethdev ports matching some devargs.
235  *
236  * If a break is done before the end of the loop,
237  * the function rte_eth_iterator_cleanup() must be called.
238  *
239  * @param id
240  *   Iterated port ID of type uint16_t.
241  * @param devargs
242  *   Device parameters input as string of type char*.
243  * @param iter
244  *   Iterator handle of type struct rte_dev_iterator, used internally.
245  */
246 #define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
247 	for (rte_eth_iterator_init(iter, devargs), \
248 	     id = rte_eth_iterator_next(iter); \
249 	     id != RTE_MAX_ETHPORTS; \
250 	     id = rte_eth_iterator_next(iter))
251 
252 /**
253  * A structure used to retrieve statistics for an Ethernet port.
254  * Not all statistics fields in struct rte_eth_stats are supported
255  * by any type of network interface card (NIC). If any statistics
256  * field is not supported, its value is 0.
257  * All byte-related statistics do not include Ethernet FCS regardless
258  * of whether these bytes have been delivered to the application
259  * (see RTE_ETH_RX_OFFLOAD_KEEP_CRC).
260  */
261 struct rte_eth_stats {
262 	uint64_t ipackets;  /**< Total number of successfully received packets. */
263 	uint64_t opackets;  /**< Total number of successfully transmitted packets.*/
264 	uint64_t ibytes;    /**< Total number of successfully received bytes. */
265 	uint64_t obytes;    /**< Total number of successfully transmitted bytes. */
266 	/**
267 	 * Total of Rx packets dropped by the HW,
268 	 * because there are no available buffer (i.e. Rx queues are full).
269 	 */
270 	uint64_t imissed;
271 	uint64_t ierrors;   /**< Total number of erroneous received packets. */
272 	uint64_t oerrors;   /**< Total number of failed transmitted packets. */
273 	uint64_t rx_nombuf; /**< Total number of Rx mbuf allocation failures. */
274 	/* Queue stats are limited to max 256 queues */
275 	/** Total number of queue Rx packets. */
276 	uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
277 	/** Total number of queue Tx packets. */
278 	uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS];
279 	/** Total number of successfully received queue bytes. */
280 	uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
281 	/** Total number of successfully transmitted queue bytes. */
282 	uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS];
283 	/** Total number of queue packets received that are dropped. */
284 	uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS];
285 };
286 
287 /**@{@name Link speed capabilities
288  * Device supported speeds bitmap flags
289  */
290 #define RTE_ETH_LINK_SPEED_AUTONEG 0             /**< Autonegotiate (all speeds) */
291 #define RTE_ETH_LINK_SPEED_FIXED   RTE_BIT32(0)  /**< Disable autoneg (fixed speed) */
292 #define RTE_ETH_LINK_SPEED_10M_HD  RTE_BIT32(1)  /**<  10 Mbps half-duplex */
293 #define RTE_ETH_LINK_SPEED_10M     RTE_BIT32(2)  /**<  10 Mbps full-duplex */
294 #define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)  /**< 100 Mbps half-duplex */
295 #define RTE_ETH_LINK_SPEED_100M    RTE_BIT32(4)  /**< 100 Mbps full-duplex */
296 #define RTE_ETH_LINK_SPEED_1G      RTE_BIT32(5)  /**<   1 Gbps */
297 #define RTE_ETH_LINK_SPEED_2_5G    RTE_BIT32(6)  /**< 2.5 Gbps */
298 #define RTE_ETH_LINK_SPEED_5G      RTE_BIT32(7)  /**<   5 Gbps */
299 #define RTE_ETH_LINK_SPEED_10G     RTE_BIT32(8)  /**<  10 Gbps */
300 #define RTE_ETH_LINK_SPEED_20G     RTE_BIT32(9)  /**<  20 Gbps */
301 #define RTE_ETH_LINK_SPEED_25G     RTE_BIT32(10) /**<  25 Gbps */
302 #define RTE_ETH_LINK_SPEED_40G     RTE_BIT32(11) /**<  40 Gbps */
303 #define RTE_ETH_LINK_SPEED_50G     RTE_BIT32(12) /**<  50 Gbps */
304 #define RTE_ETH_LINK_SPEED_56G     RTE_BIT32(13) /**<  56 Gbps */
305 #define RTE_ETH_LINK_SPEED_100G    RTE_BIT32(14) /**< 100 Gbps */
306 #define RTE_ETH_LINK_SPEED_200G    RTE_BIT32(15) /**< 200 Gbps */
307 /**@}*/
308 
309 /**@{@name Link speed
310  * Ethernet numeric link speeds in Mbps
311  */
312 #define RTE_ETH_SPEED_NUM_NONE         0 /**< Not defined */
313 #define RTE_ETH_SPEED_NUM_10M         10 /**<  10 Mbps */
314 #define RTE_ETH_SPEED_NUM_100M       100 /**< 100 Mbps */
315 #define RTE_ETH_SPEED_NUM_1G        1000 /**<   1 Gbps */
316 #define RTE_ETH_SPEED_NUM_2_5G      2500 /**< 2.5 Gbps */
317 #define RTE_ETH_SPEED_NUM_5G        5000 /**<   5 Gbps */
318 #define RTE_ETH_SPEED_NUM_10G      10000 /**<  10 Gbps */
319 #define RTE_ETH_SPEED_NUM_20G      20000 /**<  20 Gbps */
320 #define RTE_ETH_SPEED_NUM_25G      25000 /**<  25 Gbps */
321 #define RTE_ETH_SPEED_NUM_40G      40000 /**<  40 Gbps */
322 #define RTE_ETH_SPEED_NUM_50G      50000 /**<  50 Gbps */
323 #define RTE_ETH_SPEED_NUM_56G      56000 /**<  56 Gbps */
324 #define RTE_ETH_SPEED_NUM_100G    100000 /**< 100 Gbps */
325 #define RTE_ETH_SPEED_NUM_200G    200000 /**< 200 Gbps */
326 #define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX /**< Unknown */
327 /**@}*/
328 
329 /**
330  * A structure used to retrieve link-level information of an Ethernet port.
331  */
332 __extension__
333 struct rte_eth_link {
334 	uint32_t link_speed;        /**< RTE_ETH_SPEED_NUM_ */
335 	uint16_t link_duplex  : 1;  /**< RTE_ETH_LINK_[HALF/FULL]_DUPLEX */
336 	uint16_t link_autoneg : 1;  /**< RTE_ETH_LINK_[AUTONEG/FIXED] */
337 	uint16_t link_status  : 1;  /**< RTE_ETH_LINK_[DOWN/UP] */
338 } __rte_aligned(8);      /**< aligned for atomic64 read/write */
339 
340 /**@{@name Link negotiation
341  * Constants used in link management.
342  */
343 #define RTE_ETH_LINK_HALF_DUPLEX 0 /**< Half-duplex connection (see link_duplex). */
344 #define RTE_ETH_LINK_FULL_DUPLEX 1 /**< Full-duplex connection (see link_duplex). */
345 #define RTE_ETH_LINK_DOWN        0 /**< Link is down (see link_status). */
346 #define RTE_ETH_LINK_UP          1 /**< Link is up (see link_status). */
347 #define RTE_ETH_LINK_FIXED       0 /**< No autonegotiation (see link_autoneg). */
348 #define RTE_ETH_LINK_AUTONEG     1 /**< Autonegotiated (see link_autoneg). */
349 #define RTE_ETH_LINK_MAX_STR_LEN 40 /**< Max length of default link string. */
350 /**@}*/
351 
352 /**
353  * A structure used to configure the ring threshold registers of an Rx/Tx
354  * queue for an Ethernet port.
355  */
356 struct rte_eth_thresh {
357 	uint8_t pthresh; /**< Ring prefetch threshold. */
358 	uint8_t hthresh; /**< Ring host threshold. */
359 	uint8_t wthresh; /**< Ring writeback threshold. */
360 };
361 
362 /**@{@name Multi-queue mode
363  * @see rte_eth_conf.rxmode.mq_mode.
364  */
365 #define RTE_ETH_MQ_RX_RSS_FLAG  RTE_BIT32(0) /**< Enable RSS. @see rte_eth_rss_conf */
366 #define RTE_ETH_MQ_RX_DCB_FLAG  RTE_BIT32(1) /**< Enable DCB. */
367 #define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2) /**< Enable VMDq. */
368 /**@}*/
369 
370 /**
371  *  A set of values to identify what method is to be used to route
372  *  packets to multiple queues.
373  */
374 enum rte_eth_rx_mq_mode {
375 	/** None of DCB, RSS or VMDq mode */
376 	RTE_ETH_MQ_RX_NONE = 0,
377 
378 	/** For Rx side, only RSS is on */
379 	RTE_ETH_MQ_RX_RSS = RTE_ETH_MQ_RX_RSS_FLAG,
380 	/** For Rx side,only DCB is on. */
381 	RTE_ETH_MQ_RX_DCB = RTE_ETH_MQ_RX_DCB_FLAG,
382 	/** Both DCB and RSS enable */
383 	RTE_ETH_MQ_RX_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
384 
385 	/** Only VMDq, no RSS nor DCB */
386 	RTE_ETH_MQ_RX_VMDQ_ONLY = RTE_ETH_MQ_RX_VMDQ_FLAG,
387 	/** RSS mode with VMDq */
388 	RTE_ETH_MQ_RX_VMDQ_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_VMDQ_FLAG,
389 	/** Use VMDq+DCB to route traffic to queues */
390 	RTE_ETH_MQ_RX_VMDQ_DCB = RTE_ETH_MQ_RX_VMDQ_FLAG | RTE_ETH_MQ_RX_DCB_FLAG,
391 	/** Enable both VMDq and DCB in VMDq */
392 	RTE_ETH_MQ_RX_VMDQ_DCB_RSS = RTE_ETH_MQ_RX_RSS_FLAG | RTE_ETH_MQ_RX_DCB_FLAG |
393 				 RTE_ETH_MQ_RX_VMDQ_FLAG,
394 };
395 
396 /**
397  * A set of values to identify what method is to be used to transmit
398  * packets using multi-TCs.
399  */
400 enum rte_eth_tx_mq_mode {
401 	RTE_ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
402 	RTE_ETH_MQ_TX_DCB,          /**< For Tx side,only DCB is on. */
403 	RTE_ETH_MQ_TX_VMDQ_DCB,     /**< For Tx side,both DCB and VT is on. */
404 	RTE_ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
405 };
406 
407 /**
408  * A structure used to configure the Rx features of an Ethernet port.
409  */
410 struct rte_eth_rxmode {
411 	/** The multi-queue packet distribution mode to be used, e.g. RSS. */
412 	enum rte_eth_rx_mq_mode mq_mode;
413 	uint32_t mtu;  /**< Requested MTU. */
414 	/** Maximum allowed size of LRO aggregated packet. */
415 	uint32_t max_lro_pkt_size;
416 	/**
417 	 * Per-port Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
418 	 * Only offloads set on rx_offload_capa field on rte_eth_dev_info
419 	 * structure are allowed to be set.
420 	 */
421 	uint64_t offloads;
422 
423 	uint64_t reserved_64s[2]; /**< Reserved for future fields */
424 	void *reserved_ptrs[2];   /**< Reserved for future fields */
425 };
426 
427 /**
428  * VLAN types to indicate if it is for single VLAN, inner VLAN or outer VLAN.
429  * Note that single VLAN is treated the same as inner VLAN.
430  */
431 enum rte_vlan_type {
432 	RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
433 	RTE_ETH_VLAN_TYPE_INNER, /**< Inner VLAN. */
434 	RTE_ETH_VLAN_TYPE_OUTER, /**< Single VLAN, or outer VLAN. */
435 	RTE_ETH_VLAN_TYPE_MAX,
436 };
437 
438 /**
439  * A structure used to describe a VLAN filter.
440  * If the bit corresponding to a VID is set, such VID is on.
441  */
442 struct rte_vlan_filter_conf {
443 	uint64_t ids[64];
444 };
445 
446 /**
447  * A structure used to configure the Receive Side Scaling (RSS) feature
448  * of an Ethernet port.
449  * If not NULL, the *rss_key* pointer of the *rss_conf* structure points
450  * to an array holding the RSS key to use for hashing specific header
451  * fields of received packets. The length of this array should be indicated
452  * by *rss_key_len* below. Otherwise, a default random hash key is used by
453  * the device driver.
454  *
455  * The *rss_key_len* field of the *rss_conf* structure indicates the length
456  * in bytes of the array pointed by *rss_key*. To be compatible, this length
457  * will be checked in i40e only. Others assume 40 bytes to be used as before.
458  *
459  * The *rss_hf* field of the *rss_conf* structure indicates the different
460  * types of IPv4/IPv6 packets to which the RSS hashing must be applied.
461  * Supplying an *rss_hf* equal to zero disables the RSS feature.
462  */
463 struct rte_eth_rss_conf {
464 	uint8_t *rss_key;    /**< If not NULL, 40-byte hash key. */
465 	uint8_t rss_key_len; /**< hash key length in bytes. */
466 	uint64_t rss_hf;     /**< Hash functions to apply - see below. */
467 };
468 
469 /*
470  * A packet can be identified by hardware as different flow types. Different
471  * NIC hardware may support different flow types.
472  * Basically, the NIC hardware identifies the flow type as deep protocol as
473  * possible, and exclusively. For example, if a packet is identified as
474  * 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
475  * though it is an actual IPV4 packet.
476  */
477 #define RTE_ETH_FLOW_UNKNOWN             0
478 #define RTE_ETH_FLOW_RAW                 1
479 #define RTE_ETH_FLOW_IPV4                2
480 #define RTE_ETH_FLOW_FRAG_IPV4           3
481 #define RTE_ETH_FLOW_NONFRAG_IPV4_TCP    4
482 #define RTE_ETH_FLOW_NONFRAG_IPV4_UDP    5
483 #define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP   6
484 #define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER  7
485 #define RTE_ETH_FLOW_IPV6                8
486 #define RTE_ETH_FLOW_FRAG_IPV6           9
487 #define RTE_ETH_FLOW_NONFRAG_IPV6_TCP   10
488 #define RTE_ETH_FLOW_NONFRAG_IPV6_UDP   11
489 #define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP  12
490 #define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
491 #define RTE_ETH_FLOW_L2_PAYLOAD         14
492 #define RTE_ETH_FLOW_IPV6_EX            15
493 #define RTE_ETH_FLOW_IPV6_TCP_EX        16
494 #define RTE_ETH_FLOW_IPV6_UDP_EX        17
495 /** Consider device port number as a flow differentiator */
496 #define RTE_ETH_FLOW_PORT               18
497 #define RTE_ETH_FLOW_VXLAN              19 /**< VXLAN protocol based flow */
498 #define RTE_ETH_FLOW_GENEVE             20 /**< GENEVE protocol based flow */
499 #define RTE_ETH_FLOW_NVGRE              21 /**< NVGRE protocol based flow */
500 #define RTE_ETH_FLOW_VXLAN_GPE          22 /**< VXLAN-GPE protocol based flow */
501 #define RTE_ETH_FLOW_GTPU               23 /**< GTPU protocol based flow */
502 #define RTE_ETH_FLOW_MAX                24
503 
504 /*
505  * Below macros are defined for RSS offload types, they can be used to
506  * fill rte_eth_rss_conf.rss_hf or rte_flow_action_rss.types.
507  */
508 #define RTE_ETH_RSS_IPV4               RTE_BIT64(2)
509 #define RTE_ETH_RSS_FRAG_IPV4          RTE_BIT64(3)
510 #define RTE_ETH_RSS_NONFRAG_IPV4_TCP   RTE_BIT64(4)
511 #define RTE_ETH_RSS_NONFRAG_IPV4_UDP   RTE_BIT64(5)
512 #define RTE_ETH_RSS_NONFRAG_IPV4_SCTP  RTE_BIT64(6)
513 #define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
514 #define RTE_ETH_RSS_IPV6               RTE_BIT64(8)
515 #define RTE_ETH_RSS_FRAG_IPV6          RTE_BIT64(9)
516 #define RTE_ETH_RSS_NONFRAG_IPV6_TCP   RTE_BIT64(10)
517 #define RTE_ETH_RSS_NONFRAG_IPV6_UDP   RTE_BIT64(11)
518 #define RTE_ETH_RSS_NONFRAG_IPV6_SCTP  RTE_BIT64(12)
519 #define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
520 #define RTE_ETH_RSS_L2_PAYLOAD         RTE_BIT64(14)
521 #define RTE_ETH_RSS_IPV6_EX            RTE_BIT64(15)
522 #define RTE_ETH_RSS_IPV6_TCP_EX        RTE_BIT64(16)
523 #define RTE_ETH_RSS_IPV6_UDP_EX        RTE_BIT64(17)
524 #define RTE_ETH_RSS_PORT               RTE_BIT64(18)
525 #define RTE_ETH_RSS_VXLAN              RTE_BIT64(19)
526 #define RTE_ETH_RSS_GENEVE             RTE_BIT64(20)
527 #define RTE_ETH_RSS_NVGRE              RTE_BIT64(21)
528 #define RTE_ETH_RSS_GTPU               RTE_BIT64(23)
529 #define RTE_ETH_RSS_ETH                RTE_BIT64(24)
530 #define RTE_ETH_RSS_S_VLAN             RTE_BIT64(25)
531 #define RTE_ETH_RSS_C_VLAN             RTE_BIT64(26)
532 #define RTE_ETH_RSS_ESP                RTE_BIT64(27)
533 #define RTE_ETH_RSS_AH                 RTE_BIT64(28)
534 #define RTE_ETH_RSS_L2TPV3             RTE_BIT64(29)
535 #define RTE_ETH_RSS_PFCP               RTE_BIT64(30)
536 #define RTE_ETH_RSS_PPPOE              RTE_BIT64(31)
537 #define RTE_ETH_RSS_ECPRI              RTE_BIT64(32)
538 #define RTE_ETH_RSS_MPLS               RTE_BIT64(33)
539 #define RTE_ETH_RSS_IPV4_CHKSUM        RTE_BIT64(34)
540 
541 /**
542  * The RTE_ETH_RSS_L4_CHKSUM works on checksum field of any L4 header.
543  * It is similar to RTE_ETH_RSS_PORT that they don't specify the specific type of
544  * L4 header. This macro is defined to replace some specific L4 (TCP/UDP/SCTP)
545  * checksum type for constructing the use of RSS offload bits.
546  *
547  * Due to above reason, some old APIs (and configuration) don't support
548  * RTE_ETH_RSS_L4_CHKSUM. The rte_flow RSS API supports it.
549  *
550  * For the case that checksum is not used in an UDP header,
551  * it takes the reserved value 0 as input for the hash function.
552  */
553 #define RTE_ETH_RSS_L4_CHKSUM          RTE_BIT64(35)
554 
555 #define RTE_ETH_RSS_L2TPV2             RTE_BIT64(36)
556 
557 /*
558  * We use the following macros to combine with above RTE_ETH_RSS_* for
559  * more specific input set selection. These bits are defined starting
560  * from the high end of the 64 bits.
561  * Note: If we use above RTE_ETH_RSS_* without SRC/DST_ONLY, it represents
562  * both SRC and DST are taken into account. If SRC_ONLY and DST_ONLY of
563  * the same level are used simultaneously, it is the same case as none of
564  * them are added.
565  */
566 #define RTE_ETH_RSS_L3_SRC_ONLY        RTE_BIT64(63)
567 #define RTE_ETH_RSS_L3_DST_ONLY        RTE_BIT64(62)
568 #define RTE_ETH_RSS_L4_SRC_ONLY        RTE_BIT64(61)
569 #define RTE_ETH_RSS_L4_DST_ONLY        RTE_BIT64(60)
570 #define RTE_ETH_RSS_L2_SRC_ONLY        RTE_BIT64(59)
571 #define RTE_ETH_RSS_L2_DST_ONLY        RTE_BIT64(58)
572 
573 /*
574  * Only select IPV6 address prefix as RSS input set according to
575  * https://tools.ietf.org/html/rfc6052
576  * Must be combined with RTE_ETH_RSS_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_UDP,
577  * RTE_ETH_RSS_NONFRAG_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP.
578  */
579 #define RTE_ETH_RSS_L3_PRE32           RTE_BIT64(57)
580 #define RTE_ETH_RSS_L3_PRE40           RTE_BIT64(56)
581 #define RTE_ETH_RSS_L3_PRE48           RTE_BIT64(55)
582 #define RTE_ETH_RSS_L3_PRE56           RTE_BIT64(54)
583 #define RTE_ETH_RSS_L3_PRE64           RTE_BIT64(53)
584 #define RTE_ETH_RSS_L3_PRE96           RTE_BIT64(52)
585 
586 /*
587  * Use the following macros to combine with the above layers
588  * to choose inner and outer layers or both for RSS computation.
589  * Bits 50 and 51 are reserved for this.
590  */
591 
592 /**
593  * level 0, requests the default behavior.
594  * Depending on the packet type, it can mean outermost, innermost,
595  * anything in between or even no RSS.
596  * It basically stands for the innermost encapsulation level RSS
597  * can be performed on according to PMD and device capabilities.
598  */
599 #define RTE_ETH_RSS_LEVEL_PMD_DEFAULT  (UINT64_C(0) << 50)
600 
601 /**
602  * level 1, requests RSS to be performed on the outermost packet
603  * encapsulation level.
604  */
605 #define RTE_ETH_RSS_LEVEL_OUTERMOST    (UINT64_C(1) << 50)
606 
607 /**
608  * level 2, requests RSS to be performed on the specified inner packet
609  * encapsulation level, from outermost to innermost (lower to higher values).
610  */
611 #define RTE_ETH_RSS_LEVEL_INNERMOST    (UINT64_C(2) << 50)
612 #define RTE_ETH_RSS_LEVEL_MASK         (UINT64_C(3) << 50)
613 
614 #define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
615 
616 /**
617  * For input set change of hash filter, if SRC_ONLY and DST_ONLY of
618  * the same level are used simultaneously, it is the same case as
619  * none of them are added.
620  *
621  * @param rss_hf
622  *   RSS types with SRC/DST_ONLY.
623  * @return
624  *   RSS types.
625  */
626 static inline uint64_t
627 rte_eth_rss_hf_refine(uint64_t rss_hf)
628 {
629 	if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
630 		rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
631 
632 	if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
633 		rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
634 
635 	return rss_hf;
636 }
637 
638 #define RTE_ETH_RSS_IPV6_PRE32 ( \
639 		RTE_ETH_RSS_IPV6 | \
640 		RTE_ETH_RSS_L3_PRE32)
641 
642 #define RTE_ETH_RSS_IPV6_PRE40 ( \
643 		RTE_ETH_RSS_IPV6 | \
644 		RTE_ETH_RSS_L3_PRE40)
645 
646 #define RTE_ETH_RSS_IPV6_PRE48 ( \
647 		RTE_ETH_RSS_IPV6 | \
648 		RTE_ETH_RSS_L3_PRE48)
649 
650 #define RTE_ETH_RSS_IPV6_PRE56 ( \
651 		RTE_ETH_RSS_IPV6 | \
652 		RTE_ETH_RSS_L3_PRE56)
653 
654 #define RTE_ETH_RSS_IPV6_PRE64 ( \
655 		RTE_ETH_RSS_IPV6 | \
656 		RTE_ETH_RSS_L3_PRE64)
657 
658 #define RTE_ETH_RSS_IPV6_PRE96 ( \
659 		RTE_ETH_RSS_IPV6 | \
660 		RTE_ETH_RSS_L3_PRE96)
661 
662 #define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
663 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
664 		RTE_ETH_RSS_L3_PRE32)
665 
666 #define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
667 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
668 		RTE_ETH_RSS_L3_PRE40)
669 
670 #define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
671 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
672 		RTE_ETH_RSS_L3_PRE48)
673 
674 #define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
675 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
676 		RTE_ETH_RSS_L3_PRE56)
677 
678 #define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
679 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
680 		RTE_ETH_RSS_L3_PRE64)
681 
682 #define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
683 		RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
684 		RTE_ETH_RSS_L3_PRE96)
685 
686 #define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
687 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
688 		RTE_ETH_RSS_L3_PRE32)
689 
690 #define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
691 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
692 		RTE_ETH_RSS_L3_PRE40)
693 
694 #define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
695 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
696 		RTE_ETH_RSS_L3_PRE48)
697 
698 #define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
699 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
700 		RTE_ETH_RSS_L3_PRE56)
701 
702 #define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
703 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
704 		RTE_ETH_RSS_L3_PRE64)
705 
706 #define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
707 		RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
708 		RTE_ETH_RSS_L3_PRE96)
709 
710 #define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
711 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
712 		RTE_ETH_RSS_L3_PRE32)
713 
714 #define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
715 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
716 		RTE_ETH_RSS_L3_PRE40)
717 
718 #define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
719 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
720 		RTE_ETH_RSS_L3_PRE48)
721 
722 #define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
723 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
724 		RTE_ETH_RSS_L3_PRE56)
725 
726 #define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
727 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
728 		RTE_ETH_RSS_L3_PRE64)
729 
730 #define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
731 		RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
732 		RTE_ETH_RSS_L3_PRE96)
733 
734 #define RTE_ETH_RSS_IP ( \
735 	RTE_ETH_RSS_IPV4 | \
736 	RTE_ETH_RSS_FRAG_IPV4 | \
737 	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
738 	RTE_ETH_RSS_IPV6 | \
739 	RTE_ETH_RSS_FRAG_IPV6 | \
740 	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
741 	RTE_ETH_RSS_IPV6_EX)
742 
743 #define RTE_ETH_RSS_UDP ( \
744 	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
745 	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
746 	RTE_ETH_RSS_IPV6_UDP_EX)
747 
748 #define RTE_ETH_RSS_TCP ( \
749 	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
750 	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
751 	RTE_ETH_RSS_IPV6_TCP_EX)
752 
753 #define RTE_ETH_RSS_SCTP ( \
754 	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
755 	RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
756 
757 #define RTE_ETH_RSS_TUNNEL ( \
758 	RTE_ETH_RSS_VXLAN  | \
759 	RTE_ETH_RSS_GENEVE | \
760 	RTE_ETH_RSS_NVGRE)
761 
762 #define RTE_ETH_RSS_VLAN ( \
763 	RTE_ETH_RSS_S_VLAN  | \
764 	RTE_ETH_RSS_C_VLAN)
765 
766 /** Mask of valid RSS hash protocols */
767 #define RTE_ETH_RSS_PROTO_MASK ( \
768 	RTE_ETH_RSS_IPV4 | \
769 	RTE_ETH_RSS_FRAG_IPV4 | \
770 	RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
771 	RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
772 	RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
773 	RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
774 	RTE_ETH_RSS_IPV6 | \
775 	RTE_ETH_RSS_FRAG_IPV6 | \
776 	RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
777 	RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
778 	RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
779 	RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
780 	RTE_ETH_RSS_L2_PAYLOAD | \
781 	RTE_ETH_RSS_IPV6_EX | \
782 	RTE_ETH_RSS_IPV6_TCP_EX | \
783 	RTE_ETH_RSS_IPV6_UDP_EX | \
784 	RTE_ETH_RSS_PORT  | \
785 	RTE_ETH_RSS_VXLAN | \
786 	RTE_ETH_RSS_GENEVE | \
787 	RTE_ETH_RSS_NVGRE | \
788 	RTE_ETH_RSS_MPLS)
789 
790 /*
791  * Definitions used for redirection table entry size.
792  * Some RSS RETA sizes may not be supported by some drivers, check the
793  * documentation or the description of relevant functions for more details.
794  */
795 #define RTE_ETH_RSS_RETA_SIZE_64  64
796 #define RTE_ETH_RSS_RETA_SIZE_128 128
797 #define RTE_ETH_RSS_RETA_SIZE_256 256
798 #define RTE_ETH_RSS_RETA_SIZE_512 512
799 #define RTE_ETH_RETA_GROUP_SIZE   64
800 
801 /**@{@name VMDq and DCB maximums */
802 #define RTE_ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDq VLAN filters. */
803 #define RTE_ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
804 #define RTE_ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDq DCB queues. */
805 #define RTE_ETH_DCB_NUM_QUEUES          128 /**< Maximum nb. of DCB queues. */
806 /**@}*/
807 
808 /**@{@name DCB capabilities */
809 #define RTE_ETH_DCB_PG_SUPPORT      RTE_BIT32(0) /**< Priority Group(ETS) support. */
810 #define RTE_ETH_DCB_PFC_SUPPORT     RTE_BIT32(1) /**< Priority Flow Control support. */
811 /**@}*/
812 
813 /**@{@name VLAN offload bits */
814 #define RTE_ETH_VLAN_STRIP_OFFLOAD   0x0001 /**< VLAN Strip  On/Off */
815 #define RTE_ETH_VLAN_FILTER_OFFLOAD  0x0002 /**< VLAN Filter On/Off */
816 #define RTE_ETH_VLAN_EXTEND_OFFLOAD  0x0004 /**< VLAN Extend On/Off */
817 #define RTE_ETH_QINQ_STRIP_OFFLOAD   0x0008 /**< QINQ Strip On/Off */
818 
819 #define RTE_ETH_VLAN_STRIP_MASK      0x0001 /**< VLAN Strip  setting mask */
820 #define RTE_ETH_VLAN_FILTER_MASK     0x0002 /**< VLAN Filter  setting mask*/
821 #define RTE_ETH_VLAN_EXTEND_MASK     0x0004 /**< VLAN Extend  setting mask*/
822 #define RTE_ETH_QINQ_STRIP_MASK      0x0008 /**< QINQ Strip  setting mask */
823 #define RTE_ETH_VLAN_ID_MAX          0x0FFF /**< VLAN ID is in lower 12 bits*/
824 /**@}*/
825 
826 /* Definitions used for receive MAC address   */
827 #define RTE_ETH_NUM_RECEIVE_MAC_ADDR   128 /**< Maximum nb. of receive mac addr. */
828 
829 /* Definitions used for unicast hash  */
830 #define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128 /**< Maximum nb. of UC hash array. */
831 
832 /**@{@name VMDq Rx mode
833  * @see rte_eth_vmdq_rx_conf.rx_mode
834  */
835 /** Accept untagged packets. */
836 #define RTE_ETH_VMDQ_ACCEPT_UNTAG      RTE_BIT32(0)
837 /** Accept packets in multicast table. */
838 #define RTE_ETH_VMDQ_ACCEPT_HASH_MC    RTE_BIT32(1)
839 /** Accept packets in unicast table. */
840 #define RTE_ETH_VMDQ_ACCEPT_HASH_UC    RTE_BIT32(2)
841 /** Accept broadcast packets. */
842 #define RTE_ETH_VMDQ_ACCEPT_BROADCAST  RTE_BIT32(3)
843 /** Multicast promiscuous. */
844 #define RTE_ETH_VMDQ_ACCEPT_MULTICAST  RTE_BIT32(4)
845 /**@}*/
846 
847 /**
848  * A structure used to configure 64 entries of Redirection Table of the
849  * Receive Side Scaling (RSS) feature of an Ethernet port. To configure
850  * more than 64 entries supported by hardware, an array of this structure
851  * is needed.
852  */
853 struct rte_eth_rss_reta_entry64 {
854 	/** Mask bits indicate which entries need to be updated/queried. */
855 	uint64_t mask;
856 	/** Group of 64 redirection table entries. */
857 	uint16_t reta[RTE_ETH_RETA_GROUP_SIZE];
858 };
859 
860 /**
861  * This enum indicates the possible number of traffic classes
862  * in DCB configurations
863  */
864 enum rte_eth_nb_tcs {
865 	RTE_ETH_4_TCS = 4, /**< 4 TCs with DCB. */
866 	RTE_ETH_8_TCS = 8  /**< 8 TCs with DCB. */
867 };
868 
869 /**
870  * This enum indicates the possible number of queue pools
871  * in VMDq configurations.
872  */
873 enum rte_eth_nb_pools {
874 	RTE_ETH_8_POOLS = 8,    /**< 8 VMDq pools. */
875 	RTE_ETH_16_POOLS = 16,  /**< 16 VMDq pools. */
876 	RTE_ETH_32_POOLS = 32,  /**< 32 VMDq pools. */
877 	RTE_ETH_64_POOLS = 64   /**< 64 VMDq pools. */
878 };
879 
880 /* This structure may be extended in future. */
881 struct rte_eth_dcb_rx_conf {
882 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */
883 	/** Traffic class each UP mapped to. */
884 	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
885 };
886 
887 struct rte_eth_vmdq_dcb_tx_conf {
888 	enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools. */
889 	/** Traffic class each UP mapped to. */
890 	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
891 };
892 
893 struct rte_eth_dcb_tx_conf {
894 	enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */
895 	/** Traffic class each UP mapped to. */
896 	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
897 };
898 
899 struct rte_eth_vmdq_tx_conf {
900 	enum rte_eth_nb_pools nb_queue_pools; /**< VMDq mode, 64 pools. */
901 };
902 
903 /**
904  * A structure used to configure the VMDq+DCB feature
905  * of an Ethernet port.
906  *
907  * Using this feature, packets are routed to a pool of queues, based
908  * on the VLAN ID in the VLAN tag, and then to a specific queue within
909  * that pool, using the user priority VLAN tag field.
910  *
911  * A default pool may be used, if desired, to route all traffic which
912  * does not match the VLAN filter rules.
913  */
914 struct rte_eth_vmdq_dcb_conf {
915 	enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools */
916 	uint8_t enable_default_pool; /**< If non-zero, use a default pool */
917 	uint8_t default_pool; /**< The default pool, if applicable */
918 	uint8_t nb_pool_maps; /**< We can have up to 64 filters/mappings */
919 	struct {
920 		uint16_t vlan_id; /**< The VLAN ID of the received frame */
921 		uint64_t pools;   /**< Bitmask of pools for packet Rx */
922 	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
923 	/** Selects a queue in a pool */
924 	uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES];
925 };
926 
927 /**
928  * A structure used to configure the VMDq feature of an Ethernet port when
929  * not combined with the DCB feature.
930  *
931  * Using this feature, packets are routed to a pool of queues. By default,
932  * the pool selection is based on the MAC address, the VLAN ID in the
933  * VLAN tag as specified in the pool_map array.
934  * Passing the RTE_ETH_VMDQ_ACCEPT_UNTAG in the rx_mode field allows pool
935  * selection using only the MAC address. MAC address to pool mapping is done
936  * using the rte_eth_dev_mac_addr_add function, with the pool parameter
937  * corresponding to the pool ID.
938  *
939  * Queue selection within the selected pool will be done using RSS when
940  * it is enabled or revert to the first queue of the pool if not.
941  *
942  * A default pool may be used, if desired, to route all traffic which
943  * does not match the VLAN filter rules or any pool MAC address.
944  */
945 struct rte_eth_vmdq_rx_conf {
946 	enum rte_eth_nb_pools nb_queue_pools; /**< VMDq only mode, 8 or 64 pools */
947 	uint8_t enable_default_pool; /**< If non-zero, use a default pool */
948 	uint8_t default_pool; /**< The default pool, if applicable */
949 	uint8_t enable_loop_back; /**< Enable VT loop back */
950 	uint8_t nb_pool_maps; /**< We can have up to 64 filters/mappings */
951 	uint32_t rx_mode; /**< Flags from RTE_ETH_VMDQ_ACCEPT_* */
952 	struct {
953 		uint16_t vlan_id; /**< The VLAN ID of the received frame */
954 		uint64_t pools;   /**< Bitmask of pools for packet Rx */
955 	} pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq VLAN pool maps. */
956 };
957 
958 /**
959  * A structure used to configure the Tx features of an Ethernet port.
960  */
961 struct rte_eth_txmode {
962 	enum rte_eth_tx_mq_mode mq_mode; /**< Tx multi-queues mode. */
963 	/**
964 	 * Per-port Tx offloads to be set using RTE_ETH_TX_OFFLOAD_* flags.
965 	 * Only offloads set on tx_offload_capa field on rte_eth_dev_info
966 	 * structure are allowed to be set.
967 	 */
968 	uint64_t offloads;
969 
970 	uint16_t pvid;
971 	__extension__
972 	uint8_t /** If set, reject sending out tagged pkts */
973 		hw_vlan_reject_tagged : 1,
974 		/** If set, reject sending out untagged pkts */
975 		hw_vlan_reject_untagged : 1,
976 		/** If set, enable port based VLAN insertion */
977 		hw_vlan_insert_pvid : 1;
978 
979 	uint64_t reserved_64s[2]; /**< Reserved for future fields */
980 	void *reserved_ptrs[2];   /**< Reserved for future fields */
981 };
982 
983 /**
984  * @warning
985  * @b EXPERIMENTAL: this structure may change without prior notice.
986  *
987  * A structure used to configure an Rx packet segment to split.
988  *
989  * If RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT flag is set in offloads field,
990  * the PMD will split the received packets into multiple segments
991  * according to the specification in the description array:
992  *
993  * - The first network buffer will be allocated from the memory pool,
994  *   specified in the first array element, the second buffer, from the
995  *   pool in the second element, and so on.
996  *
997  * - The proto_hdrs in the elements define the split position of
998  *   received packets.
999  *
1000  * - The offsets from the segment description elements specify
1001  *   the data offset from the buffer beginning except the first mbuf.
1002  *   The first segment offset is added with RTE_PKTMBUF_HEADROOM.
1003  *
1004  * - The lengths in the elements define the maximal data amount
1005  *   being received to each segment. The receiving starts with filling
1006  *   up the first mbuf data buffer up to specified length. If the
1007  *   there are data remaining (packet is longer than buffer in the first
1008  *   mbuf) the following data will be pushed to the next segment
1009  *   up to its own length, and so on.
1010  *
1011  * - If the length in the segment description element is zero
1012  *   the actual buffer size will be deduced from the appropriate
1013  *   memory pool properties.
1014  *
1015  * - If there is not enough elements to describe the buffer for entire
1016  *   packet of maximal length the following parameters will be used
1017  *   for the all remaining segments:
1018  *     - pool from the last valid element
1019  *     - the buffer size from this pool
1020  *     - zero offset
1021  *
1022  * - Length based buffer split:
1023  *     - mp, length, offset should be configured.
1024  *     - The proto_hdr field must be 0.
1025  *
1026  * - Protocol header based buffer split:
1027  *     - mp, offset, proto_hdr should be configured.
1028  *     - The length field must be 0.
1029  *     - The proto_hdr field in the last segment should be 0.
1030  *
1031  * - When protocol header split is enabled, NIC may receive packets
1032  *   which do not match all the protocol headers within the Rx segments.
1033  *   At this point, NIC will have two possible split behaviors according to
1034  *   matching results, one is exact match, another is longest match.
1035  *   The split result of NIC must belong to one of them.
1036  *   The exact match means NIC only do split when the packets exactly match all
1037  *   the protocol headers in the segments.
1038  *   Otherwise, the whole packet will be put into the last valid mempool.
1039  *   The longest match means NIC will do split until packets mismatch
1040  *   the protocol header in the segments.
1041  *   The rest will be put into the last valid pool.
1042  */
1043 struct rte_eth_rxseg_split {
1044 	struct rte_mempool *mp; /**< Memory pool to allocate segment from. */
1045 	uint16_t length; /**< Segment data length, configures split point. */
1046 	uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */
1047 	/**
1048 	 * proto_hdr defines a bit mask of the protocol sequence as RTE_PTYPE_*.
1049 	 * The last RTE_PTYPE* in the mask indicates the split position.
1050 	 *
1051 	 * If one protocol header is defined to split packets into two segments,
1052 	 * for non-tunneling packets, the complete protocol sequence should be defined.
1053 	 * For tunneling packets, for simplicity, only the tunnel and inner part of
1054 	 * complete protocol sequence is required.
1055 	 * If several protocol headers are defined to split packets into multi-segments,
1056 	 * the repeated parts of adjacent segments should be omitted.
1057 	 */
1058 	uint32_t proto_hdr;
1059 };
1060 
1061 /**
1062  * @warning
1063  * @b EXPERIMENTAL: this structure may change without prior notice.
1064  *
1065  * A common structure used to describe Rx packet segment properties.
1066  */
1067 union rte_eth_rxseg {
1068 	/* The settings for buffer split offload. */
1069 	struct rte_eth_rxseg_split split;
1070 	/* The other features settings should be added here. */
1071 };
1072 
1073 /**
1074  * A structure used to configure an Rx ring of an Ethernet port.
1075  */
1076 struct rte_eth_rxconf {
1077 	struct rte_eth_thresh rx_thresh; /**< Rx ring threshold registers. */
1078 	uint16_t rx_free_thresh; /**< Drives the freeing of Rx descriptors. */
1079 	uint8_t rx_drop_en; /**< Drop packets if no descriptors are available. */
1080 	uint8_t rx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
1081 	uint16_t rx_nseg; /**< Number of descriptions in rx_seg array. */
1082 	/**
1083 	 * Share group index in Rx domain and switch domain.
1084 	 * Non-zero value to enable Rx queue share, zero value disable share.
1085 	 * PMD is responsible for Rx queue consistency checks to avoid member
1086 	 * port's configuration contradict to each other.
1087 	 */
1088 	uint16_t share_group;
1089 	uint16_t share_qid; /**< Shared Rx queue ID in group */
1090 	/**
1091 	 * Per-queue Rx offloads to be set using RTE_ETH_RX_OFFLOAD_* flags.
1092 	 * Only offloads set on rx_queue_offload_capa or rx_offload_capa
1093 	 * fields on rte_eth_dev_info structure are allowed to be set.
1094 	 */
1095 	uint64_t offloads;
1096 	/**
1097 	 * Points to the array of segment descriptions for an entire packet.
1098 	 * Array elements are properties for consecutive Rx segments.
1099 	 *
1100 	 * The supported capabilities of receiving segmentation is reported
1101 	 * in rte_eth_dev_info.rx_seg_capa field.
1102 	 */
1103 	union rte_eth_rxseg *rx_seg;
1104 
1105 	/**
1106 	 * Array of mempools to allocate Rx buffers from.
1107 	 *
1108 	 * This provides support for multiple mbuf pools per Rx queue.
1109 	 * The capability is reported in device info via positive
1110 	 * max_rx_mempools.
1111 	 *
1112 	 * It could be useful for more efficient usage of memory when an
1113 	 * application creates different mempools to steer the specific
1114 	 * size of the packet.
1115 	 *
1116 	 * If many mempools are specified, packets received using Rx
1117 	 * burst may belong to any provided mempool. From ethdev user point
1118 	 * of view it is undefined how PMD/NIC chooses mempool for a packet.
1119 	 *
1120 	 * If Rx scatter is enabled, a packet may be delivered using a chain
1121 	 * of mbufs obtained from single mempool or multiple mempools based
1122 	 * on the NIC implementation.
1123 	 */
1124 	struct rte_mempool **rx_mempools;
1125 	uint16_t rx_nmempool; /** < Number of Rx mempools */
1126 
1127 	uint64_t reserved_64s[2]; /**< Reserved for future fields */
1128 	void *reserved_ptrs[2];   /**< Reserved for future fields */
1129 };
1130 
1131 /**
1132  * A structure used to configure a Tx ring of an Ethernet port.
1133  */
1134 struct rte_eth_txconf {
1135 	struct rte_eth_thresh tx_thresh; /**< Tx ring threshold registers. */
1136 	uint16_t tx_rs_thresh; /**< Drives the setting of RS bit on TXDs. */
1137 	uint16_t tx_free_thresh; /**< Start freeing Tx buffers if there are
1138 				      less free descriptors than this value. */
1139 
1140 	uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
1141 	/**
1142 	 * Per-queue Tx offloads to be set  using RTE_ETH_TX_OFFLOAD_* flags.
1143 	 * Only offloads set on tx_queue_offload_capa or tx_offload_capa
1144 	 * fields on rte_eth_dev_info structure are allowed to be set.
1145 	 */
1146 	uint64_t offloads;
1147 
1148 	uint64_t reserved_64s[2]; /**< Reserved for future fields */
1149 	void *reserved_ptrs[2];   /**< Reserved for future fields */
1150 };
1151 
1152 /**
1153  * @warning
1154  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1155  *
1156  * A structure used to return the Tx or Rx hairpin queue capabilities.
1157  */
1158 struct rte_eth_hairpin_queue_cap {
1159 	/**
1160 	 * When set, PMD supports placing descriptors and/or data buffers
1161 	 * in dedicated device memory.
1162 	 */
1163 	uint32_t locked_device_memory:1;
1164 
1165 	/**
1166 	 * When set, PMD supports placing descriptors and/or data buffers
1167 	 * in host memory managed by DPDK.
1168 	 */
1169 	uint32_t rte_memory:1;
1170 
1171 	uint32_t reserved:30; /**< Reserved for future fields */
1172 };
1173 
1174 /**
1175  * @warning
1176  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1177  *
1178  * A structure used to return the hairpin capabilities that are supported.
1179  */
1180 struct rte_eth_hairpin_cap {
1181 	/** The max number of hairpin queues (different bindings). */
1182 	uint16_t max_nb_queues;
1183 	/** Max number of Rx queues to be connected to one Tx queue. */
1184 	uint16_t max_rx_2_tx;
1185 	/** Max number of Tx queues to be connected to one Rx queue. */
1186 	uint16_t max_tx_2_rx;
1187 	uint16_t max_nb_desc; /**< The max num of descriptors. */
1188 	struct rte_eth_hairpin_queue_cap rx_cap; /**< Rx hairpin queue capabilities. */
1189 	struct rte_eth_hairpin_queue_cap tx_cap; /**< Tx hairpin queue capabilities. */
1190 };
1191 
1192 #define RTE_ETH_MAX_HAIRPIN_PEERS 32
1193 
1194 /**
1195  * @warning
1196  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1197  *
1198  * A structure used to hold hairpin peer data.
1199  */
1200 struct rte_eth_hairpin_peer {
1201 	uint16_t port; /**< Peer port. */
1202 	uint16_t queue; /**< Peer queue. */
1203 };
1204 
1205 /**
1206  * @warning
1207  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1208  *
1209  * A structure used to configure hairpin binding.
1210  */
1211 struct rte_eth_hairpin_conf {
1212 	uint32_t peer_count:16; /**< The number of peers. */
1213 
1214 	/**
1215 	 * Explicit Tx flow rule mode.
1216 	 * One hairpin pair of queues should have the same attribute.
1217 	 *
1218 	 * - When set, the user should be responsible for inserting the hairpin
1219 	 *   Tx part flows and removing them.
1220 	 * - When clear, the PMD will try to handle the Tx part of the flows,
1221 	 *   e.g., by splitting one flow into two parts.
1222 	 */
1223 	uint32_t tx_explicit:1;
1224 
1225 	/**
1226 	 * Manually bind hairpin queues.
1227 	 * One hairpin pair of queues should have the same attribute.
1228 	 *
1229 	 * - When set, to enable hairpin, the user should call the hairpin bind
1230 	 *   function after all the queues are set up properly and the ports are
1231 	 *   started. Also, the hairpin unbind function should be called
1232 	 *   accordingly before stopping a port that with hairpin configured.
1233 	 * - When cleared, the PMD will try to enable the hairpin with the queues
1234 	 *   configured automatically during port start.
1235 	 */
1236 	uint32_t manual_bind:1;
1237 
1238 	/**
1239 	 * Use locked device memory as a backing storage.
1240 	 *
1241 	 * - When set, PMD will attempt place descriptors and/or data buffers
1242 	 *   in dedicated device memory.
1243 	 * - When cleared, PMD will use default memory type as a backing storage.
1244 	 *   Please refer to PMD documentation for details.
1245 	 *
1246 	 * API user should check if PMD supports this configuration flag using
1247 	 * @see rte_eth_dev_hairpin_capability_get.
1248 	 */
1249 	uint32_t use_locked_device_memory:1;
1250 
1251 	/**
1252 	 * Use DPDK memory as backing storage.
1253 	 *
1254 	 * - When set, PMD will attempt place descriptors and/or data buffers
1255 	 *   in host memory managed by DPDK.
1256 	 * - When cleared, PMD will use default memory type as a backing storage.
1257 	 *   Please refer to PMD documentation for details.
1258 	 *
1259 	 * API user should check if PMD supports this configuration flag using
1260 	 * @see rte_eth_dev_hairpin_capability_get.
1261 	 */
1262 	uint32_t use_rte_memory:1;
1263 
1264 	/**
1265 	 * Force usage of hairpin memory configuration.
1266 	 *
1267 	 * - When set, PMD will attempt to use specified memory settings.
1268 	 *   If resource allocation fails, then hairpin queue allocation
1269 	 *   will result in an error.
1270 	 * - When clear, PMD will attempt to use specified memory settings.
1271 	 *   If resource allocation fails, then PMD will retry
1272 	 *   allocation with default configuration.
1273 	 */
1274 	uint32_t force_memory:1;
1275 
1276 	uint32_t reserved:11; /**< Reserved bits. */
1277 
1278 	struct rte_eth_hairpin_peer peers[RTE_ETH_MAX_HAIRPIN_PEERS];
1279 };
1280 
1281 /**
1282  * A structure contains information about HW descriptor ring limitations.
1283  */
1284 struct rte_eth_desc_lim {
1285 	uint16_t nb_max;   /**< Max allowed number of descriptors. */
1286 	uint16_t nb_min;   /**< Min allowed number of descriptors. */
1287 	uint16_t nb_align; /**< Number of descriptors should be aligned to. */
1288 
1289 	/**
1290 	 * Max allowed number of segments per whole packet.
1291 	 *
1292 	 * - For TSO packet this is the total number of data descriptors allowed
1293 	 *   by device.
1294 	 *
1295 	 * @see nb_mtu_seg_max
1296 	 */
1297 	uint16_t nb_seg_max;
1298 
1299 	/**
1300 	 * Max number of segments per one MTU.
1301 	 *
1302 	 * - For non-TSO packet, this is the maximum allowed number of segments
1303 	 *   in a single transmit packet.
1304 	 *
1305 	 * - For TSO packet each segment within the TSO may span up to this
1306 	 *   value.
1307 	 *
1308 	 * @see nb_seg_max
1309 	 */
1310 	uint16_t nb_mtu_seg_max;
1311 };
1312 
1313 /**
1314  * This enum indicates the flow control mode
1315  */
1316 enum rte_eth_fc_mode {
1317 	RTE_ETH_FC_NONE = 0, /**< Disable flow control. */
1318 	RTE_ETH_FC_RX_PAUSE, /**< Rx pause frame, enable flowctrl on Tx side. */
1319 	RTE_ETH_FC_TX_PAUSE, /**< Tx pause frame, enable flowctrl on Rx side. */
1320 	RTE_ETH_FC_FULL      /**< Enable flow control on both side. */
1321 };
1322 
1323 /**
1324  * A structure used to configure Ethernet flow control parameter.
1325  * These parameters will be configured into the register of the NIC.
1326  * Please refer to the corresponding data sheet for proper value.
1327  */
1328 struct rte_eth_fc_conf {
1329 	uint32_t high_water;  /**< High threshold value to trigger XOFF */
1330 	uint32_t low_water;   /**< Low threshold value to trigger XON */
1331 	uint16_t pause_time;  /**< Pause quota in the Pause frame */
1332 	uint16_t send_xon;    /**< Is XON frame need be sent */
1333 	enum rte_eth_fc_mode mode;  /**< Link flow control mode */
1334 	uint8_t mac_ctrl_frame_fwd; /**< Forward MAC control frames */
1335 	uint8_t autoneg;      /**< Use Pause autoneg */
1336 };
1337 
1338 /**
1339  * A structure used to configure Ethernet priority flow control parameter.
1340  * These parameters will be configured into the register of the NIC.
1341  * Please refer to the corresponding data sheet for proper value.
1342  */
1343 struct rte_eth_pfc_conf {
1344 	struct rte_eth_fc_conf fc; /**< General flow control parameter. */
1345 	uint8_t priority;          /**< VLAN User Priority. */
1346 };
1347 
1348 /**
1349  * @warning
1350  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1351  *
1352  * A structure used to retrieve information of queue based PFC.
1353  */
1354 struct rte_eth_pfc_queue_info {
1355 	/**
1356 	 * Maximum supported traffic class as per PFC (802.1Qbb) specification.
1357 	 */
1358 	uint8_t tc_max;
1359 	/** PFC queue mode capabilities. */
1360 	enum rte_eth_fc_mode mode_capa;
1361 };
1362 
1363 /**
1364  * @warning
1365  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
1366  *
1367  * A structure used to configure Ethernet priority flow control parameters for
1368  * ethdev queues.
1369  *
1370  * rte_eth_pfc_queue_conf::rx_pause structure shall be used to configure given
1371  * tx_qid with corresponding tc. When ethdev device receives PFC frame with
1372  * rte_eth_pfc_queue_conf::rx_pause::tc, traffic will be paused on
1373  * rte_eth_pfc_queue_conf::rx_pause::tx_qid for that tc.
1374  *
1375  * rte_eth_pfc_queue_conf::tx_pause structure shall be used to configure given
1376  * rx_qid. When rx_qid is congested, PFC frames are generated with
1377  * rte_eth_pfc_queue_conf::rx_pause::tc and
1378  * rte_eth_pfc_queue_conf::rx_pause::pause_time to the peer.
1379  */
1380 struct rte_eth_pfc_queue_conf {
1381 	enum rte_eth_fc_mode mode; /**< Link flow control mode */
1382 
1383 	struct {
1384 		uint16_t tx_qid; /**< Tx queue ID */
1385 		/** Traffic class as per PFC (802.1Qbb) spec. The value must be
1386 		 * in the range [0, rte_eth_pfc_queue_info::tx_max - 1]
1387 		 */
1388 		uint8_t tc;
1389 	} rx_pause; /* Valid when (mode == FC_RX_PAUSE || mode == FC_FULL) */
1390 
1391 	struct {
1392 		uint16_t pause_time; /**< Pause quota in the Pause frame */
1393 		uint16_t rx_qid;     /**< Rx queue ID */
1394 		/** Traffic class as per PFC (802.1Qbb) spec. The value must be
1395 		 * in the range [0, rte_eth_pfc_queue_info::tx_max - 1]
1396 		 */
1397 		uint8_t tc;
1398 	} tx_pause; /* Valid when (mode == FC_TX_PAUSE || mode == FC_FULL) */
1399 };
1400 
1401 /**
1402  * Tunnel type for device-specific classifier configuration.
1403  * @see rte_eth_udp_tunnel
1404  */
1405 enum rte_eth_tunnel_type {
1406 	RTE_ETH_TUNNEL_TYPE_NONE = 0,
1407 	RTE_ETH_TUNNEL_TYPE_VXLAN,
1408 	RTE_ETH_TUNNEL_TYPE_GENEVE,
1409 	RTE_ETH_TUNNEL_TYPE_TEREDO,
1410 	RTE_ETH_TUNNEL_TYPE_NVGRE,
1411 	RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1412 	RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1413 	RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1414 	RTE_ETH_TUNNEL_TYPE_ECPRI,
1415 	RTE_ETH_TUNNEL_TYPE_MAX,
1416 };
1417 
1418 /* Deprecated API file for rte_eth_dev_filter_* functions */
1419 #include "rte_eth_ctrl.h"
1420 
1421 /**
1422  * UDP tunneling configuration.
1423  *
1424  * Used to configure the classifier of a device,
1425  * associating an UDP port with a type of tunnel.
1426  *
1427  * Some NICs may need such configuration to properly parse a tunnel
1428  * with any standard or custom UDP port.
1429  */
1430 struct rte_eth_udp_tunnel {
1431 	uint16_t udp_port; /**< UDP port used for the tunnel. */
1432 	uint8_t prot_type; /**< Tunnel type. @see rte_eth_tunnel_type */
1433 };
1434 
1435 /**
1436  * A structure used to enable/disable specific device interrupts.
1437  */
1438 struct rte_eth_intr_conf {
1439 	/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
1440 	uint32_t lsc:1;
1441 	/** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
1442 	uint32_t rxq:1;
1443 	/** enable/disable rmv interrupt. 0 (default) - disable, 1 enable */
1444 	uint32_t rmv:1;
1445 };
1446 
1447 #define rte_intr_conf rte_eth_intr_conf
1448 
1449 /**
1450  * A structure used to configure an Ethernet port.
1451  * Depending upon the Rx multi-queue mode, extra advanced
1452  * configuration settings may be needed.
1453  */
1454 struct rte_eth_conf {
1455 	uint32_t link_speeds; /**< bitmap of RTE_ETH_LINK_SPEED_XXX of speeds to be
1456 				used. RTE_ETH_LINK_SPEED_FIXED disables link
1457 				autonegotiation, and a unique speed shall be
1458 				set. Otherwise, the bitmap defines the set of
1459 				speeds to be advertised. If the special value
1460 				RTE_ETH_LINK_SPEED_AUTONEG (0) is used, all speeds
1461 				supported are advertised. */
1462 	struct rte_eth_rxmode rxmode; /**< Port Rx configuration. */
1463 	struct rte_eth_txmode txmode; /**< Port Tx configuration. */
1464 	uint32_t lpbk_mode; /**< Loopback operation mode. By default the value
1465 			         is 0, meaning the loopback mode is disabled.
1466 				 Read the datasheet of given Ethernet controller
1467 				 for details. The possible values of this field
1468 				 are defined in implementation of each driver. */
1469 	struct {
1470 		struct rte_eth_rss_conf rss_conf; /**< Port RSS configuration */
1471 		/** Port VMDq+DCB configuration. */
1472 		struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
1473 		/** Port DCB Rx configuration. */
1474 		struct rte_eth_dcb_rx_conf dcb_rx_conf;
1475 		/** Port VMDq Rx configuration. */
1476 		struct rte_eth_vmdq_rx_conf vmdq_rx_conf;
1477 	} rx_adv_conf; /**< Port Rx filtering configuration. */
1478 	union {
1479 		/** Port VMDq+DCB Tx configuration. */
1480 		struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1481 		/** Port DCB Tx configuration. */
1482 		struct rte_eth_dcb_tx_conf dcb_tx_conf;
1483 		/** Port VMDq Tx configuration. */
1484 		struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1485 	} tx_adv_conf; /**< Port Tx DCB configuration (union). */
1486 	/** Currently,Priority Flow Control(PFC) are supported,if DCB with PFC
1487 	    is needed,and the variable must be set RTE_ETH_DCB_PFC_SUPPORT. */
1488 	uint32_t dcb_capability_en;
1489 	struct rte_eth_intr_conf intr_conf; /**< Interrupt mode configuration. */
1490 };
1491 
1492 /**
1493  * Rx offload capabilities of a device.
1494  */
1495 #define RTE_ETH_RX_OFFLOAD_VLAN_STRIP       RTE_BIT64(0)
1496 #define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM       RTE_BIT64(1)
1497 #define RTE_ETH_RX_OFFLOAD_UDP_CKSUM        RTE_BIT64(2)
1498 #define RTE_ETH_RX_OFFLOAD_TCP_CKSUM        RTE_BIT64(3)
1499 #define RTE_ETH_RX_OFFLOAD_TCP_LRO          RTE_BIT64(4)
1500 #define RTE_ETH_RX_OFFLOAD_QINQ_STRIP       RTE_BIT64(5)
1501 #define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1502 #define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP     RTE_BIT64(7)
1503 #define RTE_ETH_RX_OFFLOAD_VLAN_FILTER      RTE_BIT64(9)
1504 #define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND      RTE_BIT64(10)
1505 #define RTE_ETH_RX_OFFLOAD_SCATTER          RTE_BIT64(13)
1506 /**
1507  * Timestamp is set by the driver in RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
1508  * and RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME is set in ol_flags.
1509  * The mbuf field and flag are registered when the offload is configured.
1510  */
1511 #define RTE_ETH_RX_OFFLOAD_TIMESTAMP        RTE_BIT64(14)
1512 #define RTE_ETH_RX_OFFLOAD_SECURITY         RTE_BIT64(15)
1513 #define RTE_ETH_RX_OFFLOAD_KEEP_CRC         RTE_BIT64(16)
1514 #define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM       RTE_BIT64(17)
1515 #define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM  RTE_BIT64(18)
1516 #define RTE_ETH_RX_OFFLOAD_RSS_HASH         RTE_BIT64(19)
1517 #define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT     RTE_BIT64(20)
1518 
1519 #define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1520 				 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1521 				 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1522 #define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1523 			     RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1524 			     RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1525 			     RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1526 
1527 /*
1528  * If new Rx offload capabilities are defined, they also must be
1529  * mentioned in rte_rx_offload_names in rte_ethdev.c file.
1530  */
1531 
1532 /**
1533  * Tx offload capabilities of a device.
1534  */
1535 #define RTE_ETH_TX_OFFLOAD_VLAN_INSERT      RTE_BIT64(0)
1536 #define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM       RTE_BIT64(1)
1537 #define RTE_ETH_TX_OFFLOAD_UDP_CKSUM        RTE_BIT64(2)
1538 #define RTE_ETH_TX_OFFLOAD_TCP_CKSUM        RTE_BIT64(3)
1539 #define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM       RTE_BIT64(4)
1540 #define RTE_ETH_TX_OFFLOAD_TCP_TSO          RTE_BIT64(5)
1541 #define RTE_ETH_TX_OFFLOAD_UDP_TSO          RTE_BIT64(6)
1542 #define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)  /**< Used for tunneling packet. */
1543 #define RTE_ETH_TX_OFFLOAD_QINQ_INSERT      RTE_BIT64(8)
1544 #define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO    RTE_BIT64(9)  /**< Used for tunneling packet. */
1545 #define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO      RTE_BIT64(10) /**< Used for tunneling packet. */
1546 #define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO     RTE_BIT64(11) /**< Used for tunneling packet. */
1547 #define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO   RTE_BIT64(12) /**< Used for tunneling packet. */
1548 #define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT    RTE_BIT64(13)
1549 /**
1550  * Multiple threads can invoke rte_eth_tx_burst() concurrently on the same
1551  * Tx queue without SW lock.
1552  */
1553 #define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE      RTE_BIT64(14)
1554 /** Device supports multi segment send. */
1555 #define RTE_ETH_TX_OFFLOAD_MULTI_SEGS       RTE_BIT64(15)
1556 /**
1557  * Device supports optimization for fast release of mbufs.
1558  * When set application must guarantee that per-queue all mbufs comes from
1559  * the same mempool and has refcnt = 1.
1560  */
1561 #define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE   RTE_BIT64(16)
1562 #define RTE_ETH_TX_OFFLOAD_SECURITY         RTE_BIT64(17)
1563 /**
1564  * Device supports generic UDP tunneled packet TSO.
1565  * Application must set RTE_MBUF_F_TX_TUNNEL_UDP and other mbuf fields required
1566  * for tunnel TSO.
1567  */
1568 #define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO      RTE_BIT64(18)
1569 /**
1570  * Device supports generic IP tunneled packet TSO.
1571  * Application must set RTE_MBUF_F_TX_TUNNEL_IP and other mbuf fields required
1572  * for tunnel TSO.
1573  */
1574 #define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO       RTE_BIT64(19)
1575 /** Device supports outer UDP checksum */
1576 #define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM  RTE_BIT64(20)
1577 /**
1578  * Device sends on time read from RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
1579  * if RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME is set in ol_flags.
1580  * The mbuf field and flag are registered when the offload is configured.
1581  */
1582 #define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1583 /*
1584  * If new Tx offload capabilities are defined, they also must be
1585  * mentioned in rte_tx_offload_names in rte_ethdev.c file.
1586  */
1587 
1588 /**@{@name Device capabilities
1589  * Non-offload capabilities reported in rte_eth_dev_info.dev_capa.
1590  */
1591 /** Device supports Rx queue setup after device started. */
1592 #define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1593 /** Device supports Tx queue setup after device started. */
1594 #define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1595 /**
1596  * Device supports shared Rx queue among ports within Rx domain and
1597  * switch domain. Mbufs are consumed by shared Rx queue instead of
1598  * each queue. Multiple groups are supported by share_group of Rx
1599  * queue configuration. Shared Rx queue is identified by PMD using
1600  * share_qid of Rx queue configuration. Polling any port in the group
1601  * receive packets of all member ports, source port identified by
1602  * mbuf->port field.
1603  */
1604 #define RTE_ETH_DEV_CAPA_RXQ_SHARE              RTE_BIT64(2)
1605 /** Device supports keeping flow rules across restart. */
1606 #define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP         RTE_BIT64(3)
1607 /** Device supports keeping shared flow objects across restart. */
1608 #define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1609 /**@}*/
1610 
1611 /*
1612  * Fallback default preferred Rx/Tx port parameters.
1613  * These are used if an application requests default parameters
1614  * but the PMD does not provide preferred values.
1615  */
1616 #define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1617 #define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1618 #define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1619 #define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1620 
1621 /**
1622  * Preferred Rx/Tx port parameters.
1623  * There are separate instances of this structure for transmission
1624  * and reception respectively.
1625  */
1626 struct rte_eth_dev_portconf {
1627 	uint16_t burst_size; /**< Device-preferred burst size */
1628 	uint16_t ring_size; /**< Device-preferred size of queue rings */
1629 	uint16_t nb_queues; /**< Device-preferred number of queues */
1630 };
1631 
1632 /**
1633  * Default values for switch domain ID when ethdev does not support switch
1634  * domain definitions.
1635  */
1636 #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID	(UINT16_MAX)
1637 
1638 /**
1639  * Ethernet device associated switch information
1640  */
1641 struct rte_eth_switch_info {
1642 	const char *name;	/**< switch name */
1643 	uint16_t domain_id;	/**< switch domain ID */
1644 	/**
1645 	 * Mapping to the devices physical switch port as enumerated from the
1646 	 * perspective of the embedded interconnect/switch. For SR-IOV enabled
1647 	 * device this may correspond to the VF_ID of each virtual function,
1648 	 * but each driver should explicitly define the mapping of switch
1649 	 * port identifier to that physical interconnect/switch
1650 	 */
1651 	uint16_t port_id;
1652 	/**
1653 	 * Shared Rx queue sub-domain boundary. Only ports in same Rx domain
1654 	 * and switch domain can share Rx queue. Valid only if device advertised
1655 	 * RTE_ETH_DEV_CAPA_RXQ_SHARE capability.
1656 	 */
1657 	uint16_t rx_domain;
1658 };
1659 
1660 /**
1661  * @warning
1662  * @b EXPERIMENTAL: this structure may change without prior notice.
1663  *
1664  * Ethernet device Rx buffer segmentation capabilities.
1665  */
1666 struct rte_eth_rxseg_capa {
1667 	__extension__
1668 	uint32_t multi_pools:1; /**< Supports receiving to multiple pools.*/
1669 	uint32_t offset_allowed:1; /**< Supports buffer offsets. */
1670 	uint32_t offset_align_log2:4; /**< Required offset alignment. */
1671 	uint16_t max_nseg; /**< Maximum amount of segments to split. */
1672 	uint16_t reserved; /**< Reserved field. */
1673 };
1674 
1675 /**
1676  * Ethernet device information
1677  */
1678 
1679 /**
1680  * Ethernet device representor port type.
1681  */
1682 enum rte_eth_representor_type {
1683 	RTE_ETH_REPRESENTOR_NONE, /**< not a representor. */
1684 	RTE_ETH_REPRESENTOR_VF,   /**< representor of Virtual Function. */
1685 	RTE_ETH_REPRESENTOR_SF,   /**< representor of Sub Function. */
1686 	RTE_ETH_REPRESENTOR_PF,   /**< representor of Physical Function. */
1687 };
1688 
1689 /**
1690  * @warning
1691  * @b EXPERIMENTAL: this enumeration may change without prior notice.
1692  *
1693  * Ethernet device error handling mode.
1694  */
1695 enum rte_eth_err_handle_mode {
1696 	/** No error handling modes are supported. */
1697 	RTE_ETH_ERROR_HANDLE_MODE_NONE,
1698 	/** Passive error handling, after the PMD detects that a reset is required,
1699 	 * the PMD reports @see RTE_ETH_EVENT_INTR_RESET event,
1700 	 * and the application invokes @see rte_eth_dev_reset to recover the port.
1701 	 */
1702 	RTE_ETH_ERROR_HANDLE_MODE_PASSIVE,
1703 	/** Proactive error handling, after the PMD detects that a reset is required,
1704 	 * the PMD reports @see RTE_ETH_EVENT_ERR_RECOVERING event,
1705 	 * do recovery internally, and finally reports the recovery result event
1706 	 * (@see RTE_ETH_EVENT_RECOVERY_*).
1707 	 */
1708 	RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE,
1709 };
1710 
1711 /**
1712  * A structure used to retrieve the contextual information of
1713  * an Ethernet device, such as the controlling driver of the
1714  * device, etc...
1715  */
1716 struct rte_eth_dev_info {
1717 	struct rte_device *device; /**< Generic device information */
1718 	const char *driver_name; /**< Device Driver name. */
1719 	unsigned int if_index; /**< Index to bound host interface, or 0 if none.
1720 		Use if_indextoname() to translate into an interface name. */
1721 	uint16_t min_mtu;	/**< Minimum MTU allowed */
1722 	uint16_t max_mtu;	/**< Maximum MTU allowed */
1723 	const uint32_t *dev_flags; /**< Device flags */
1724 	uint32_t min_rx_bufsize; /**< Minimum size of Rx buffer. */
1725 	uint32_t max_rx_pktlen; /**< Maximum configurable length of Rx pkt. */
1726 	/** Maximum configurable size of LRO aggregated packet. */
1727 	uint32_t max_lro_pkt_size;
1728 	uint16_t max_rx_queues; /**< Maximum number of Rx queues. */
1729 	uint16_t max_tx_queues; /**< Maximum number of Tx queues. */
1730 	uint32_t max_mac_addrs; /**< Maximum number of MAC addresses. */
1731 	/** Maximum number of hash MAC addresses for MTA and UTA. */
1732 	uint32_t max_hash_mac_addrs;
1733 	uint16_t max_vfs; /**< Maximum number of VFs. */
1734 	uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */
1735 	struct rte_eth_rxseg_capa rx_seg_capa; /**< Segmentation capability.*/
1736 	/** All Rx offload capabilities including all per-queue ones */
1737 	uint64_t rx_offload_capa;
1738 	/** All Tx offload capabilities including all per-queue ones */
1739 	uint64_t tx_offload_capa;
1740 	/** Device per-queue Rx offload capabilities. */
1741 	uint64_t rx_queue_offload_capa;
1742 	/** Device per-queue Tx offload capabilities. */
1743 	uint64_t tx_queue_offload_capa;
1744 	/** Device redirection table size, the total number of entries. */
1745 	uint16_t reta_size;
1746 	uint8_t hash_key_size; /**< Hash key size in bytes */
1747 	/** Bit mask of RSS offloads, the bit offset also means flow type */
1748 	uint64_t flow_type_rss_offloads;
1749 	struct rte_eth_rxconf default_rxconf; /**< Default Rx configuration */
1750 	struct rte_eth_txconf default_txconf; /**< Default Tx configuration */
1751 	uint16_t vmdq_queue_base; /**< First queue ID for VMDq pools. */
1752 	uint16_t vmdq_queue_num;  /**< Queue number for VMDq pools. */
1753 	uint16_t vmdq_pool_base;  /**< First ID of VMDq pools. */
1754 	struct rte_eth_desc_lim rx_desc_lim;  /**< Rx descriptors limits */
1755 	struct rte_eth_desc_lim tx_desc_lim;  /**< Tx descriptors limits */
1756 	uint32_t speed_capa;  /**< Supported speeds bitmap (RTE_ETH_LINK_SPEED_). */
1757 	/** Configured number of Rx/Tx queues */
1758 	uint16_t nb_rx_queues; /**< Number of Rx queues. */
1759 	uint16_t nb_tx_queues; /**< Number of Tx queues. */
1760 	/**
1761 	 * Maximum number of Rx mempools supported per Rx queue.
1762 	 *
1763 	 * Value greater than 0 means that the driver supports Rx queue
1764 	 * mempools specification via rx_conf->rx_mempools.
1765 	 */
1766 	uint16_t max_rx_mempools;
1767 	/** Rx parameter recommendations */
1768 	struct rte_eth_dev_portconf default_rxportconf;
1769 	/** Tx parameter recommendations */
1770 	struct rte_eth_dev_portconf default_txportconf;
1771 	/** Generic device capabilities (RTE_ETH_DEV_CAPA_). */
1772 	uint64_t dev_capa;
1773 	/**
1774 	 * Switching information for ports on a device with a
1775 	 * embedded managed interconnect/switch.
1776 	 */
1777 	struct rte_eth_switch_info switch_info;
1778 	/** Supported error handling mode. */
1779 	enum rte_eth_err_handle_mode err_handle_mode;
1780 
1781 	uint64_t reserved_64s[2]; /**< Reserved for future fields */
1782 	void *reserved_ptrs[2];   /**< Reserved for future fields */
1783 };
1784 
1785 /**@{@name Rx/Tx queue states */
1786 #define RTE_ETH_QUEUE_STATE_STOPPED 0 /**< Queue stopped. */
1787 #define RTE_ETH_QUEUE_STATE_STARTED 1 /**< Queue started. */
1788 #define RTE_ETH_QUEUE_STATE_HAIRPIN 2 /**< Queue used for hairpin. */
1789 /**@}*/
1790 
1791 /**
1792  * Ethernet device Rx queue information structure.
1793  * Used to retrieve information about configured queue.
1794  */
1795 struct rte_eth_rxq_info {
1796 	struct rte_mempool *mp;     /**< mempool used by that queue. */
1797 	struct rte_eth_rxconf conf; /**< queue config parameters. */
1798 	uint8_t scattered_rx;       /**< scattered packets Rx supported. */
1799 	uint8_t queue_state;        /**< one of RTE_ETH_QUEUE_STATE_*. */
1800 	uint16_t nb_desc;           /**< configured number of RXDs. */
1801 	uint16_t rx_buf_size;       /**< hardware receive buffer size. */
1802 	/**
1803 	 * Available Rx descriptors threshold defined as percentage
1804 	 * of Rx queue size. If number of available descriptors is lower,
1805 	 * the event RTE_ETH_EVENT_RX_AVAIL_THESH is generated.
1806 	 * Value 0 means that the threshold monitoring is disabled.
1807 	 */
1808 	uint8_t avail_thresh;
1809 } __rte_cache_min_aligned;
1810 
1811 /**
1812  * Ethernet device Tx queue information structure.
1813  * Used to retrieve information about configured queue.
1814  */
1815 struct rte_eth_txq_info {
1816 	struct rte_eth_txconf conf; /**< queue config parameters. */
1817 	uint16_t nb_desc;           /**< configured number of TXDs. */
1818 	uint8_t queue_state;        /**< one of RTE_ETH_QUEUE_STATE_*. */
1819 } __rte_cache_min_aligned;
1820 
1821 /* Generic Burst mode flag definition, values can be ORed. */
1822 
1823 /**
1824  * If the queues have different burst mode description, this bit will be set
1825  * by PMD, then the application can iterate to retrieve burst description for
1826  * all other queues.
1827  */
1828 #define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1829 
1830 /**
1831  * Ethernet device Rx/Tx queue packet burst mode information structure.
1832  * Used to retrieve information about packet burst mode setting.
1833  */
1834 struct rte_eth_burst_mode {
1835 	uint64_t flags; /**< The ORed values of RTE_ETH_BURST_FLAG_xxx */
1836 
1837 #define RTE_ETH_BURST_MODE_INFO_SIZE 1024 /**< Maximum size for information */
1838 	char info[RTE_ETH_BURST_MODE_INFO_SIZE]; /**< burst mode information */
1839 };
1840 
1841 /** Maximum name length for extended statistics counters */
1842 #define RTE_ETH_XSTATS_NAME_SIZE 64
1843 
1844 /**
1845  * An Ethernet device extended statistic structure
1846  *
1847  * This structure is used by rte_eth_xstats_get() to provide
1848  * statistics that are not provided in the generic *rte_eth_stats*
1849  * structure.
1850  * It maps a name ID, corresponding to an index in the array returned
1851  * by rte_eth_xstats_get_names(), to a statistic value.
1852  */
1853 struct rte_eth_xstat {
1854 	uint64_t id;        /**< The index in xstats name array. */
1855 	uint64_t value;     /**< The statistic counter value. */
1856 };
1857 
1858 /**
1859  * A name element for extended statistics.
1860  *
1861  * An array of this structure is returned by rte_eth_xstats_get_names().
1862  * It lists the names of extended statistics for a PMD. The *rte_eth_xstat*
1863  * structure references these names by their array index.
1864  *
1865  * The xstats should follow a common naming scheme.
1866  * Some names are standardized in rte_stats_strings.
1867  * Examples:
1868  *     - rx_missed_errors
1869  *     - tx_q3_bytes
1870  *     - tx_size_128_to_255_packets
1871  */
1872 struct rte_eth_xstat_name {
1873 	char name[RTE_ETH_XSTATS_NAME_SIZE]; /**< The statistic name. */
1874 };
1875 
1876 #define RTE_ETH_DCB_NUM_TCS    8
1877 #define RTE_ETH_MAX_VMDQ_POOL  64
1878 
1879 /**
1880  * A structure used to get the information of queue and
1881  * TC mapping on both Tx and Rx paths.
1882  */
1883 struct rte_eth_dcb_tc_queue_mapping {
1884 	/** Rx queues assigned to tc per Pool */
1885 	struct {
1886 		uint16_t base;
1887 		uint16_t nb_queue;
1888 	} tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1889 	/** Rx queues assigned to tc per Pool */
1890 	struct {
1891 		uint16_t base;
1892 		uint16_t nb_queue;
1893 	} tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1894 };
1895 
1896 /**
1897  * A structure used to get the information of DCB.
1898  * It includes TC UP mapping and queue TC mapping.
1899  */
1900 struct rte_eth_dcb_info {
1901 	uint8_t nb_tcs;        /**< number of TCs */
1902 	uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]; /**< Priority to tc */
1903 	uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]; /**< Tx BW percentage for each TC */
1904 	/** Rx queues assigned to tc */
1905 	struct rte_eth_dcb_tc_queue_mapping tc_queue;
1906 };
1907 
1908 /**
1909  * This enum indicates the possible Forward Error Correction (FEC) modes
1910  * of an ethdev port.
1911  */
1912 enum rte_eth_fec_mode {
1913 	RTE_ETH_FEC_NOFEC = 0,      /**< FEC is off */
1914 	RTE_ETH_FEC_AUTO,	    /**< FEC autonegotiation modes */
1915 	RTE_ETH_FEC_BASER,          /**< FEC using common algorithm */
1916 	RTE_ETH_FEC_RS,             /**< FEC using RS algorithm */
1917 };
1918 
1919 /* Translate from FEC mode to FEC capa */
1920 #define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
1921 
1922 /* This macro indicates FEC capa mask */
1923 #define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
1924 
1925 /* A structure used to get capabilities per link speed */
1926 struct rte_eth_fec_capa {
1927 	uint32_t speed; /**< Link speed (see RTE_ETH_SPEED_NUM_*) */
1928 	uint32_t capa;  /**< FEC capabilities bitmask */
1929 };
1930 
1931 #define RTE_ETH_ALL RTE_MAX_ETHPORTS
1932 
1933 /* Macros to check for valid port */
1934 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
1935 	if (!rte_eth_dev_is_valid_port(port_id)) { \
1936 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1937 		return retval; \
1938 	} \
1939 } while (0)
1940 
1941 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
1942 	if (!rte_eth_dev_is_valid_port(port_id)) { \
1943 		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
1944 		return; \
1945 	} \
1946 } while (0)
1947 
1948 /**
1949  * Function type used for Rx packet processing packet callbacks.
1950  *
1951  * The callback function is called on Rx with a burst of packets that have
1952  * been received on the given port and queue.
1953  *
1954  * @param port_id
1955  *   The Ethernet port on which Rx is being performed.
1956  * @param queue
1957  *   The queue on the Ethernet port which is being used to receive the packets.
1958  * @param pkts
1959  *   The burst of packets that have just been received.
1960  * @param nb_pkts
1961  *   The number of packets in the burst pointed to by "pkts".
1962  * @param max_pkts
1963  *   The max number of packets that can be stored in the "pkts" array.
1964  * @param user_param
1965  *   The arbitrary user parameter passed in by the application when the callback
1966  *   was originally configured.
1967  * @return
1968  *   The number of packets returned to the user.
1969  */
1970 typedef uint16_t (*rte_rx_callback_fn)(uint16_t port_id, uint16_t queue,
1971 	struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
1972 	void *user_param);
1973 
1974 /**
1975  * Function type used for Tx packet processing packet callbacks.
1976  *
1977  * The callback function is called on Tx with a burst of packets immediately
1978  * before the packets are put onto the hardware queue for transmission.
1979  *
1980  * @param port_id
1981  *   The Ethernet port on which Tx is being performed.
1982  * @param queue
1983  *   The queue on the Ethernet port which is being used to transmit the packets.
1984  * @param pkts
1985  *   The burst of packets that are about to be transmitted.
1986  * @param nb_pkts
1987  *   The number of packets in the burst pointed to by "pkts".
1988  * @param user_param
1989  *   The arbitrary user parameter passed in by the application when the callback
1990  *   was originally configured.
1991  * @return
1992  *   The number of packets to be written to the NIC.
1993  */
1994 typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
1995 	struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
1996 
1997 /**
1998  * Possible states of an ethdev port.
1999  */
2000 enum rte_eth_dev_state {
2001 	/** Device is unused before being probed. */
2002 	RTE_ETH_DEV_UNUSED = 0,
2003 	/** Device is attached when allocated in probing. */
2004 	RTE_ETH_DEV_ATTACHED,
2005 	/** Device is in removed state when plug-out is detected. */
2006 	RTE_ETH_DEV_REMOVED,
2007 };
2008 
2009 struct rte_eth_dev_sriov {
2010 	uint8_t active;               /**< SRIOV is active with 16, 32 or 64 pools */
2011 	uint8_t nb_q_per_pool;        /**< Rx queue number per pool */
2012 	uint16_t def_vmdq_idx;        /**< Default pool num used for PF */
2013 	uint16_t def_pool_q_idx;      /**< Default pool queue start reg index */
2014 };
2015 #define RTE_ETH_DEV_SRIOV(dev)         ((dev)->data->sriov)
2016 
2017 #define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2018 
2019 #define RTE_ETH_DEV_NO_OWNER 0
2020 
2021 #define RTE_ETH_MAX_OWNER_NAME_LEN 64
2022 
2023 struct rte_eth_dev_owner {
2024 	uint64_t id; /**< The owner unique identifier. */
2025 	char name[RTE_ETH_MAX_OWNER_NAME_LEN]; /**< The owner name. */
2026 };
2027 
2028 /**@{@name Device flags
2029  * Flags internally saved in rte_eth_dev_data.dev_flags
2030  * and reported in rte_eth_dev_info.dev_flags.
2031  */
2032 /** PMD supports thread-safe flow operations */
2033 #define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE  RTE_BIT32(0)
2034 /** Device supports link state interrupt */
2035 #define RTE_ETH_DEV_INTR_LSC              RTE_BIT32(1)
2036 /** Device is a bonded slave */
2037 #define RTE_ETH_DEV_BONDED_SLAVE          RTE_BIT32(2)
2038 /** Device supports device removal interrupt */
2039 #define RTE_ETH_DEV_INTR_RMV              RTE_BIT32(3)
2040 /** Device is port representor */
2041 #define RTE_ETH_DEV_REPRESENTOR           RTE_BIT32(4)
2042 /** Device does not support MAC change after started */
2043 #define RTE_ETH_DEV_NOLIVE_MAC_ADDR       RTE_BIT32(5)
2044 /**
2045  * Queue xstats filled automatically by ethdev layer.
2046  * PMDs filling the queue xstats themselves should not set this flag
2047  */
2048 #define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2049 /**@}*/
2050 
2051 /**
2052  * Iterates over valid ethdev ports owned by a specific owner.
2053  *
2054  * @param port_id
2055  *   The ID of the next possible valid owned port.
2056  * @param	owner_id
2057  *  The owner identifier.
2058  *  RTE_ETH_DEV_NO_OWNER means iterate over all valid ownerless ports.
2059  * @return
2060  *   Next valid port ID owned by owner_id, RTE_MAX_ETHPORTS if there is none.
2061  */
2062 uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
2063 		const uint64_t owner_id);
2064 
2065 /**
2066  * Macro to iterate over all enabled ethdev ports owned by a specific owner.
2067  */
2068 #define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2069 	for (p = rte_eth_find_next_owned_by(0, o); \
2070 	     (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2071 	     p = rte_eth_find_next_owned_by(p + 1, o))
2072 
2073 /**
2074  * Iterates over valid ethdev ports.
2075  *
2076  * @param port_id
2077  *   The ID of the next possible valid port.
2078  * @return
2079  *   Next valid port ID, RTE_MAX_ETHPORTS if there is none.
2080  */
2081 uint16_t rte_eth_find_next(uint16_t port_id);
2082 
2083 /**
2084  * Macro to iterate over all enabled and ownerless ethdev ports.
2085  */
2086 #define RTE_ETH_FOREACH_DEV(p) \
2087 	RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2088 
2089 /**
2090  * Iterates over ethdev ports of a specified device.
2091  *
2092  * @param port_id_start
2093  *   The ID of the next possible valid port.
2094  * @param parent
2095  *   The generic device behind the ports to iterate.
2096  * @return
2097  *   Next port ID of the device, possibly port_id_start,
2098  *   RTE_MAX_ETHPORTS if there is none.
2099  */
2100 uint16_t
2101 rte_eth_find_next_of(uint16_t port_id_start,
2102 		const struct rte_device *parent);
2103 
2104 /**
2105  * Macro to iterate over all ethdev ports of a specified device.
2106  *
2107  * @param port_id
2108  *   The ID of the matching port being iterated.
2109  * @param parent
2110  *   The rte_device pointer matching the iterated ports.
2111  */
2112 #define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2113 	for (port_id = rte_eth_find_next_of(0, parent); \
2114 		port_id < RTE_MAX_ETHPORTS; \
2115 		port_id = rte_eth_find_next_of(port_id + 1, parent))
2116 
2117 /**
2118  * Iterates over sibling ethdev ports (i.e. sharing the same rte_device).
2119  *
2120  * @param port_id_start
2121  *   The ID of the next possible valid sibling port.
2122  * @param ref_port_id
2123  *   The ID of a reference port to compare rte_device with.
2124  * @return
2125  *   Next sibling port ID, possibly port_id_start or ref_port_id itself,
2126  *   RTE_MAX_ETHPORTS if there is none.
2127  */
2128 uint16_t
2129 rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id);
2130 
2131 /**
2132  * Macro to iterate over all ethdev ports sharing the same rte_device
2133  * as the specified port.
2134  * Note: the specified reference port is part of the loop iterations.
2135  *
2136  * @param port_id
2137  *   The ID of the matching port being iterated.
2138  * @param ref_port_id
2139  *   The ID of the port being compared.
2140  */
2141 #define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2142 	for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2143 		port_id < RTE_MAX_ETHPORTS; \
2144 		port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2145 
2146 /**
2147  * Get a new unique owner identifier.
2148  * An owner identifier is used to owns Ethernet devices by only one DPDK entity
2149  * to avoid multiple management of device by different entities.
2150  *
2151  * @param	owner_id
2152  *   Owner identifier pointer.
2153  * @return
2154  *   Negative errno value on error, 0 on success.
2155  */
2156 int rte_eth_dev_owner_new(uint64_t *owner_id);
2157 
2158 /**
2159  * Set an Ethernet device owner.
2160  *
2161  * @param	port_id
2162  *  The identifier of the port to own.
2163  * @param	owner
2164  *  The owner pointer.
2165  * @return
2166  *  Negative errno value on error, 0 on success.
2167  */
2168 int rte_eth_dev_owner_set(const uint16_t port_id,
2169 		const struct rte_eth_dev_owner *owner);
2170 
2171 /**
2172  * Unset Ethernet device owner to make the device ownerless.
2173  *
2174  * @param	port_id
2175  *  The identifier of port to make ownerless.
2176  * @param	owner_id
2177  *  The owner identifier.
2178  * @return
2179  *  0 on success, negative errno value on error.
2180  */
2181 int rte_eth_dev_owner_unset(const uint16_t port_id,
2182 		const uint64_t owner_id);
2183 
2184 /**
2185  * Remove owner from all Ethernet devices owned by a specific owner.
2186  *
2187  * @param	owner_id
2188  *  The owner identifier.
2189  * @return
2190  *  0 on success, negative errno value on error.
2191  */
2192 int rte_eth_dev_owner_delete(const uint64_t owner_id);
2193 
2194 /**
2195  * Get the owner of an Ethernet device.
2196  *
2197  * @param	port_id
2198  *  The port identifier.
2199  * @param	owner
2200  *  The owner structure pointer to fill.
2201  * @return
2202  *  0 on success, negative errno value on error..
2203  */
2204 int rte_eth_dev_owner_get(const uint16_t port_id,
2205 		struct rte_eth_dev_owner *owner);
2206 
2207 /**
2208  * Get the number of ports which are usable for the application.
2209  *
2210  * These devices must be iterated by using the macro
2211  * ``RTE_ETH_FOREACH_DEV`` or ``RTE_ETH_FOREACH_DEV_OWNED_BY``
2212  * to deal with non-contiguous ranges of devices.
2213  *
2214  * @return
2215  *   The count of available Ethernet devices.
2216  */
2217 uint16_t rte_eth_dev_count_avail(void);
2218 
2219 /**
2220  * Get the total number of ports which are allocated.
2221  *
2222  * Some devices may not be available for the application.
2223  *
2224  * @return
2225  *   The total count of Ethernet devices.
2226  */
2227 uint16_t rte_eth_dev_count_total(void);
2228 
2229 /**
2230  * Convert a numerical speed in Mbps to a bitmap flag that can be used in
2231  * the bitmap link_speeds of the struct rte_eth_conf
2232  *
2233  * @param speed
2234  *   Numerical speed value in Mbps
2235  * @param duplex
2236  *   RTE_ETH_LINK_[HALF/FULL]_DUPLEX (only for 10/100M speeds)
2237  * @return
2238  *   0 if the speed cannot be mapped
2239  */
2240 uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex);
2241 
2242 /**
2243  * Get RTE_ETH_RX_OFFLOAD_* flag name.
2244  *
2245  * @param offload
2246  *   Offload flag.
2247  * @return
2248  *   Offload name or 'UNKNOWN' if the flag cannot be recognised.
2249  */
2250 const char *rte_eth_dev_rx_offload_name(uint64_t offload);
2251 
2252 /**
2253  * Get RTE_ETH_TX_OFFLOAD_* flag name.
2254  *
2255  * @param offload
2256  *   Offload flag.
2257  * @return
2258  *   Offload name or 'UNKNOWN' if the flag cannot be recognised.
2259  */
2260 const char *rte_eth_dev_tx_offload_name(uint64_t offload);
2261 
2262 /**
2263  * @warning
2264  * @b EXPERIMENTAL: this API may change without prior notice.
2265  *
2266  * Get RTE_ETH_DEV_CAPA_* flag name.
2267  *
2268  * @param capability
2269  *   Capability flag.
2270  * @return
2271  *   Capability name or 'UNKNOWN' if the flag cannot be recognized.
2272  */
2273 __rte_experimental
2274 const char *rte_eth_dev_capability_name(uint64_t capability);
2275 
2276 /**
2277  * Configure an Ethernet device.
2278  * This function must be invoked first before any other function in the
2279  * Ethernet API. This function can also be re-invoked when a device is in the
2280  * stopped state.
2281  *
2282  * @param port_id
2283  *   The port identifier of the Ethernet device to configure.
2284  * @param nb_rx_queue
2285  *   The number of receive queues to set up for the Ethernet device.
2286  * @param nb_tx_queue
2287  *   The number of transmit queues to set up for the Ethernet device.
2288  * @param eth_conf
2289  *   The pointer to the configuration data to be used for the Ethernet device.
2290  *   The *rte_eth_conf* structure includes:
2291  *     -  the hardware offload features to activate, with dedicated fields for
2292  *        each statically configurable offload hardware feature provided by
2293  *        Ethernet devices, such as IP checksum or VLAN tag stripping for
2294  *        example.
2295  *        The Rx offload bitfield API is obsolete and will be deprecated.
2296  *        Applications should set the ignore_bitfield_offloads bit on *rxmode*
2297  *        structure and use offloads field to set per-port offloads instead.
2298  *     -  Any offloading set in eth_conf->[rt]xmode.offloads must be within
2299  *        the [rt]x_offload_capa returned from rte_eth_dev_info_get().
2300  *        Any type of device supported offloading set in the input argument
2301  *        eth_conf->[rt]xmode.offloads to rte_eth_dev_configure() is enabled
2302  *        on all queues and it can't be disabled in rte_eth_[rt]x_queue_setup()
2303  *     -  the Receive Side Scaling (RSS) configuration when using multiple Rx
2304  *        queues per port. Any RSS hash function set in eth_conf->rss_conf.rss_hf
2305  *        must be within the flow_type_rss_offloads provided by drivers via
2306  *        rte_eth_dev_info_get() API.
2307  *
2308  *   Embedding all configuration information in a single data structure
2309  *   is the more flexible method that allows the addition of new features
2310  *   without changing the syntax of the API.
2311  * @return
2312  *   - 0: Success, device configured.
2313  *   - <0: Error code returned by the driver configuration function.
2314  */
2315 int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue,
2316 		uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
2317 
2318 /**
2319  * Check if an Ethernet device was physically removed.
2320  *
2321  * @param port_id
2322  *   The port identifier of the Ethernet device.
2323  * @return
2324  *   1 when the Ethernet device is removed, otherwise 0.
2325  */
2326 int
2327 rte_eth_dev_is_removed(uint16_t port_id);
2328 
2329 /**
2330  * Allocate and set up a receive queue for an Ethernet device.
2331  *
2332  * The function allocates a contiguous block of memory for *nb_rx_desc*
2333  * receive descriptors from a memory zone associated with *socket_id*
2334  * and initializes each receive descriptor with a network buffer allocated
2335  * from the memory pool *mb_pool*.
2336  *
2337  * @param port_id
2338  *   The port identifier of the Ethernet device.
2339  * @param rx_queue_id
2340  *   The index of the receive queue to set up.
2341  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
2342  *   to rte_eth_dev_configure().
2343  * @param nb_rx_desc
2344  *   The number of receive descriptors to allocate for the receive ring.
2345  * @param socket_id
2346  *   The *socket_id* argument is the socket identifier in case of NUMA.
2347  *   The value can be *SOCKET_ID_ANY* if there is no NUMA constraint for
2348  *   the DMA memory allocated for the receive descriptors of the ring.
2349  * @param rx_conf
2350  *   The pointer to the configuration data to be used for the receive queue.
2351  *   NULL value is allowed, in which case default Rx configuration
2352  *   will be used.
2353  *   The *rx_conf* structure contains an *rx_thresh* structure with the values
2354  *   of the Prefetch, Host, and Write-Back threshold registers of the receive
2355  *   ring.
2356  *   In addition it contains the hardware offloads features to activate using
2357  *   the RTE_ETH_RX_OFFLOAD_* flags.
2358  *   If an offloading set in rx_conf->offloads
2359  *   hasn't been set in the input argument eth_conf->rxmode.offloads
2360  *   to rte_eth_dev_configure(), it is a new added offloading, it must be
2361  *   per-queue type and it is enabled for the queue.
2362  *   No need to repeat any bit in rx_conf->offloads which has already been
2363  *   enabled in rte_eth_dev_configure() at port level. An offloading enabled
2364  *   at port level can't be disabled at queue level.
2365  *   The configuration structure also contains the pointer to the array
2366  *   of the receiving buffer segment descriptions, see rx_seg and rx_nseg
2367  *   fields, this extended configuration might be used by split offloads like
2368  *   RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT. If mb_pool is not NULL,
2369  *   the extended configuration fields must be set to NULL and zero.
2370  * @param mb_pool
2371  *   The pointer to the memory pool from which to allocate *rte_mbuf* network
2372  *   memory buffers to populate each descriptor of the receive ring. There are
2373  *   two options to provide Rx buffer configuration:
2374  *   - single pool:
2375  *     mb_pool is not NULL, rx_conf.rx_nseg is 0.
2376  *   - multiple segments description:
2377  *     mb_pool is NULL, rx_conf.rx_seg is not NULL, rx_conf.rx_nseg is not 0.
2378  *     Taken only if flag RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT is set in offloads.
2379  *
2380  * @return
2381  *   - 0: Success, receive queue correctly set up.
2382  *   - -EIO: if device is removed.
2383  *   - -ENODEV: if *port_id* is invalid.
2384  *   - -EINVAL: The memory pool pointer is null or the size of network buffers
2385  *      which can be allocated from this memory pool does not fit the various
2386  *      buffer sizes allowed by the device controller.
2387  *   - -ENOMEM: Unable to allocate the receive ring descriptors or to
2388  *      allocate network memory buffers from the memory pool when
2389  *      initializing receive descriptors.
2390  */
2391 int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
2392 		uint16_t nb_rx_desc, unsigned int socket_id,
2393 		const struct rte_eth_rxconf *rx_conf,
2394 		struct rte_mempool *mb_pool);
2395 
2396 /**
2397  * @warning
2398  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2399  *
2400  * Allocate and set up a hairpin receive queue for an Ethernet device.
2401  *
2402  * The function set up the selected queue to be used in hairpin.
2403  *
2404  * @param port_id
2405  *   The port identifier of the Ethernet device.
2406  * @param rx_queue_id
2407  *   The index of the receive queue to set up.
2408  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
2409  *   to rte_eth_dev_configure().
2410  * @param nb_rx_desc
2411  *   The number of receive descriptors to allocate for the receive ring.
2412  *   0 means the PMD will use default value.
2413  * @param conf
2414  *   The pointer to the hairpin configuration.
2415  *
2416  * @return
2417  *   - (0) if successful.
2418  *   - (-ENODEV) if *port_id* is invalid.
2419  *   - (-ENOTSUP) if hardware doesn't support.
2420  *   - (-EINVAL) if bad parameter.
2421  *   - (-ENOMEM) if unable to allocate the resources.
2422  */
2423 __rte_experimental
2424 int rte_eth_rx_hairpin_queue_setup
2425 	(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2426 	 const struct rte_eth_hairpin_conf *conf);
2427 
2428 /**
2429  * Allocate and set up a transmit queue for an Ethernet device.
2430  *
2431  * @param port_id
2432  *   The port identifier of the Ethernet device.
2433  * @param tx_queue_id
2434  *   The index of the transmit queue to set up.
2435  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
2436  *   to rte_eth_dev_configure().
2437  * @param nb_tx_desc
2438  *   The number of transmit descriptors to allocate for the transmit ring.
2439  * @param socket_id
2440  *   The *socket_id* argument is the socket identifier in case of NUMA.
2441  *   Its value can be *SOCKET_ID_ANY* if there is no NUMA constraint for
2442  *   the DMA memory allocated for the transmit descriptors of the ring.
2443  * @param tx_conf
2444  *   The pointer to the configuration data to be used for the transmit queue.
2445  *   NULL value is allowed, in which case default Tx configuration
2446  *   will be used.
2447  *   The *tx_conf* structure contains the following data:
2448  *   - The *tx_thresh* structure with the values of the Prefetch, Host, and
2449  *     Write-Back threshold registers of the transmit ring.
2450  *     When setting Write-Back threshold to the value greater then zero,
2451  *     *tx_rs_thresh* value should be explicitly set to one.
2452  *   - The *tx_free_thresh* value indicates the [minimum] number of network
2453  *     buffers that must be pending in the transmit ring to trigger their
2454  *     [implicit] freeing by the driver transmit function.
2455  *   - The *tx_rs_thresh* value indicates the [minimum] number of transmit
2456  *     descriptors that must be pending in the transmit ring before setting the
2457  *     RS bit on a descriptor by the driver transmit function.
2458  *     The *tx_rs_thresh* value should be less or equal then
2459  *     *tx_free_thresh* value, and both of them should be less then
2460  *     *nb_tx_desc* - 3.
2461  *   - The *offloads* member contains Tx offloads to be enabled.
2462  *     If an offloading set in tx_conf->offloads
2463  *     hasn't been set in the input argument eth_conf->txmode.offloads
2464  *     to rte_eth_dev_configure(), it is a new added offloading, it must be
2465  *     per-queue type and it is enabled for the queue.
2466  *     No need to repeat any bit in tx_conf->offloads which has already been
2467  *     enabled in rte_eth_dev_configure() at port level. An offloading enabled
2468  *     at port level can't be disabled at queue level.
2469  *
2470  *     Note that setting *tx_free_thresh* or *tx_rs_thresh* value to 0 forces
2471  *     the transmit function to use default values.
2472  * @return
2473  *   - 0: Success, the transmit queue is correctly set up.
2474  *   - -ENOMEM: Unable to allocate the transmit ring descriptors.
2475  */
2476 int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2477 		uint16_t nb_tx_desc, unsigned int socket_id,
2478 		const struct rte_eth_txconf *tx_conf);
2479 
2480 /**
2481  * @warning
2482  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2483  *
2484  * Allocate and set up a transmit hairpin queue for an Ethernet device.
2485  *
2486  * @param port_id
2487  *   The port identifier of the Ethernet device.
2488  * @param tx_queue_id
2489  *   The index of the transmit queue to set up.
2490  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
2491  *   to rte_eth_dev_configure().
2492  * @param nb_tx_desc
2493  *   The number of transmit descriptors to allocate for the transmit ring.
2494  *   0 to set default PMD value.
2495  * @param conf
2496  *   The hairpin configuration.
2497  *
2498  * @return
2499  *   - (0) if successful.
2500  *   - (-ENODEV) if *port_id* is invalid.
2501  *   - (-ENOTSUP) if hardware doesn't support.
2502  *   - (-EINVAL) if bad parameter.
2503  *   - (-ENOMEM) if unable to allocate the resources.
2504  */
2505 __rte_experimental
2506 int rte_eth_tx_hairpin_queue_setup
2507 	(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2508 	 const struct rte_eth_hairpin_conf *conf);
2509 
2510 /**
2511  * @warning
2512  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2513  *
2514  * Get all the hairpin peer Rx / Tx ports of the current port.
2515  * The caller should ensure that the array is large enough to save the ports
2516  * list.
2517  *
2518  * @param port_id
2519  *   The port identifier of the Ethernet device.
2520  * @param peer_ports
2521  *   Pointer to the array to store the peer ports list.
2522  * @param len
2523  *   Length of the array to store the port identifiers.
2524  * @param direction
2525  *   Current port to peer port direction
2526  *   positive - current used as Tx to get all peer Rx ports.
2527  *   zero - current used as Rx to get all peer Tx ports.
2528  *
2529  * @return
2530  *   - (0 or positive) actual peer ports number.
2531  *   - (-EINVAL) if bad parameter.
2532  *   - (-ENODEV) if *port_id* invalid
2533  *   - (-ENOTSUP) if hardware doesn't support.
2534  *   - Others detailed errors from PMDs.
2535  */
2536 __rte_experimental
2537 int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
2538 				   size_t len, uint32_t direction);
2539 
2540 /**
2541  * @warning
2542  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2543  *
2544  * Bind all hairpin Tx queues of one port to the Rx queues of the peer port.
2545  * It is only allowed to call this function after all hairpin queues are
2546  * configured properly and the devices are in started state.
2547  *
2548  * @param tx_port
2549  *   The identifier of the Tx port.
2550  * @param rx_port
2551  *   The identifier of peer Rx port.
2552  *   RTE_MAX_ETHPORTS is allowed for the traversal of all devices.
2553  *   Rx port ID could have the same value as Tx port ID.
2554  *
2555  * @return
2556  *   - (0) if successful.
2557  *   - (-ENODEV) if Tx port ID is invalid.
2558  *   - (-EBUSY) if device is not in started state.
2559  *   - (-ENOTSUP) if hardware doesn't support.
2560  *   - Others detailed errors from PMDs.
2561  */
2562 __rte_experimental
2563 int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port);
2564 
2565 /**
2566  * @warning
2567  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
2568  *
2569  * Unbind all hairpin Tx queues of one port from the Rx queues of the peer port.
2570  * This should be called before closing the Tx or Rx devices, if the bind
2571  * function is called before.
2572  * After unbinding the hairpin ports pair, it is allowed to bind them again.
2573  * Changing queues configuration should be after stopping the device(s).
2574  *
2575  * @param tx_port
2576  *   The identifier of the Tx port.
2577  * @param rx_port
2578  *   The identifier of peer Rx port.
2579  *   RTE_MAX_ETHPORTS is allowed for traversal of all devices.
2580  *   Rx port ID could have the same value as Tx port ID.
2581  *
2582  * @return
2583  *   - (0) if successful.
2584  *   - (-ENODEV) if Tx port ID is invalid.
2585  *   - (-EBUSY) if device is in stopped state.
2586  *   - (-ENOTSUP) if hardware doesn't support.
2587  *   - Others detailed errors from PMDs.
2588  */
2589 __rte_experimental
2590 int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port);
2591 
2592 /**
2593  * Return the NUMA socket to which an Ethernet device is connected
2594  *
2595  * @param port_id
2596  *   The port identifier of the Ethernet device
2597  * @return
2598  *   - The NUMA socket ID which the Ethernet device is connected to.
2599  *   - -1 (which translates to SOCKET_ID_ANY) if the socket could not be
2600  *     determined. rte_errno is then set to:
2601  *     - EINVAL is the port_id is invalid,
2602  *     - 0 is the socket could not be determined,
2603  */
2604 int rte_eth_dev_socket_id(uint16_t port_id);
2605 
2606 /**
2607  * Check if port_id of device is attached
2608  *
2609  * @param port_id
2610  *   The port identifier of the Ethernet device
2611  * @return
2612  *   - 0 if port is out of range or not attached
2613  *   - 1 if device is attached
2614  */
2615 int rte_eth_dev_is_valid_port(uint16_t port_id);
2616 
2617 /**
2618  * Start specified Rx queue of a port. It is used when rx_deferred_start
2619  * flag of the specified queue is true.
2620  *
2621  * @param port_id
2622  *   The port identifier of the Ethernet device
2623  * @param rx_queue_id
2624  *   The index of the Rx queue to update the ring.
2625  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
2626  *   to rte_eth_dev_configure().
2627  * @return
2628  *   - 0: Success, the receive queue is started.
2629  *   - -ENODEV: if *port_id* is invalid.
2630  *   - -EINVAL: The queue_id out of range or belong to hairpin.
2631  *   - -EIO: if device is removed.
2632  *   - -ENOTSUP: The function not supported in PMD.
2633  */
2634 int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id);
2635 
2636 /**
2637  * Stop specified Rx queue of a port
2638  *
2639  * @param port_id
2640  *   The port identifier of the Ethernet device
2641  * @param rx_queue_id
2642  *   The index of the Rx queue to update the ring.
2643  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
2644  *   to rte_eth_dev_configure().
2645  * @return
2646  *   - 0: Success, the receive queue is stopped.
2647  *   - -ENODEV: if *port_id* is invalid.
2648  *   - -EINVAL: The queue_id out of range or belong to hairpin.
2649  *   - -EIO: if device is removed.
2650  *   - -ENOTSUP: The function not supported in PMD.
2651  */
2652 int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id);
2653 
2654 /**
2655  * Start Tx for specified queue of a port. It is used when tx_deferred_start
2656  * flag of the specified queue is true.
2657  *
2658  * @param port_id
2659  *   The port identifier of the Ethernet device
2660  * @param tx_queue_id
2661  *   The index of the Tx queue to update the ring.
2662  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
2663  *   to rte_eth_dev_configure().
2664  * @return
2665  *   - 0: Success, the transmit queue is started.
2666  *   - -ENODEV: if *port_id* is invalid.
2667  *   - -EINVAL: The queue_id out of range or belong to hairpin.
2668  *   - -EIO: if device is removed.
2669  *   - -ENOTSUP: The function not supported in PMD.
2670  */
2671 int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id);
2672 
2673 /**
2674  * Stop specified Tx queue of a port
2675  *
2676  * @param port_id
2677  *   The port identifier of the Ethernet device
2678  * @param tx_queue_id
2679  *   The index of the Tx queue to update the ring.
2680  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
2681  *   to rte_eth_dev_configure().
2682  * @return
2683  *   - 0: Success, the transmit queue is stopped.
2684  *   - -ENODEV: if *port_id* is invalid.
2685  *   - -EINVAL: The queue_id out of range or belong to hairpin.
2686  *   - -EIO: if device is removed.
2687  *   - -ENOTSUP: The function not supported in PMD.
2688  */
2689 int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id);
2690 
2691 /**
2692  * Start an Ethernet device.
2693  *
2694  * The device start step is the last one and consists of setting the configured
2695  * offload features and in starting the transmit and the receive units of the
2696  * device.
2697  *
2698  * Device RTE_ETH_DEV_NOLIVE_MAC_ADDR flag causes MAC address to be set before
2699  * PMD port start callback function is invoked.
2700  *
2701  * On success, all basic functions exported by the Ethernet API (link status,
2702  * receive/transmit, and so on) can be invoked.
2703  *
2704  * @param port_id
2705  *   The port identifier of the Ethernet device.
2706  * @return
2707  *   - 0: Success, Ethernet device started.
2708  *   - <0: Error code of the driver device start function.
2709  */
2710 int rte_eth_dev_start(uint16_t port_id);
2711 
2712 /**
2713  * Stop an Ethernet device. The device can be restarted with a call to
2714  * rte_eth_dev_start()
2715  *
2716  * @param port_id
2717  *   The port identifier of the Ethernet device.
2718  * @return
2719  *   - 0: Success, Ethernet device stopped.
2720  *   - <0: Error code of the driver device stop function.
2721  */
2722 int rte_eth_dev_stop(uint16_t port_id);
2723 
2724 /**
2725  * Link up an Ethernet device.
2726  *
2727  * Set device link up will re-enable the device Rx/Tx
2728  * functionality after it is previously set device linked down.
2729  *
2730  * @param port_id
2731  *   The port identifier of the Ethernet device.
2732  * @return
2733  *   - 0: Success, Ethernet device linked up.
2734  *   - <0: Error code of the driver device link up function.
2735  */
2736 int rte_eth_dev_set_link_up(uint16_t port_id);
2737 
2738 /**
2739  * Link down an Ethernet device.
2740  * The device Rx/Tx functionality will be disabled if success,
2741  * and it can be re-enabled with a call to
2742  * rte_eth_dev_set_link_up()
2743  *
2744  * @param port_id
2745  *   The port identifier of the Ethernet device.
2746  */
2747 int rte_eth_dev_set_link_down(uint16_t port_id);
2748 
2749 /**
2750  * Close a stopped Ethernet device. The device cannot be restarted!
2751  * The function frees all port resources.
2752  *
2753  * @param port_id
2754  *   The port identifier of the Ethernet device.
2755  * @return
2756  *   - Zero if the port is closed successfully.
2757  *   - Negative if something went wrong.
2758  */
2759 int rte_eth_dev_close(uint16_t port_id);
2760 
2761 /**
2762  * Reset a Ethernet device and keep its port ID.
2763  *
2764  * When a port has to be reset passively, the DPDK application can invoke
2765  * this function. For example when a PF is reset, all its VFs should also
2766  * be reset. Normally a DPDK application can invoke this function when
2767  * RTE_ETH_EVENT_INTR_RESET event is detected, but can also use it to start
2768  * a port reset in other circumstances.
2769  *
2770  * When this function is called, it first stops the port and then calls the
2771  * PMD specific dev_uninit( ) and dev_init( ) to return the port to initial
2772  * state, in which no Tx and Rx queues are setup, as if the port has been
2773  * reset and not started. The port keeps the port ID it had before the
2774  * function call.
2775  *
2776  * After calling rte_eth_dev_reset( ), the application should use
2777  * rte_eth_dev_configure( ), rte_eth_rx_queue_setup( ),
2778  * rte_eth_tx_queue_setup( ), and rte_eth_dev_start( )
2779  * to reconfigure the device as appropriate.
2780  *
2781  * Note: To avoid unexpected behavior, the application should stop calling
2782  * Tx and Rx functions before calling rte_eth_dev_reset( ). For thread
2783  * safety, all these controlling functions should be called from the same
2784  * thread.
2785  *
2786  * @param port_id
2787  *   The port identifier of the Ethernet device.
2788  *
2789  * @return
2790  *   - (0) if successful.
2791  *   - (-ENODEV) if *port_id* is invalid.
2792  *   - (-ENOTSUP) if hardware doesn't support this function.
2793  *   - (-EPERM) if not ran from the primary process.
2794  *   - (-EIO) if re-initialisation failed or device is removed.
2795  *   - (-ENOMEM) if the reset failed due to OOM.
2796  *   - (-EAGAIN) if the reset temporarily failed and should be retried later.
2797  */
2798 int rte_eth_dev_reset(uint16_t port_id);
2799 
2800 /**
2801  * Enable receipt in promiscuous mode for an Ethernet device.
2802  *
2803  * @param port_id
2804  *   The port identifier of the Ethernet device.
2805  * @return
2806  *   - (0) if successful.
2807  *   - (-ENOTSUP) if support for promiscuous_enable() does not exist
2808  *     for the device.
2809  *   - (-ENODEV) if *port_id* invalid.
2810  */
2811 int rte_eth_promiscuous_enable(uint16_t port_id);
2812 
2813 /**
2814  * Disable receipt in promiscuous mode for an Ethernet device.
2815  *
2816  * @param port_id
2817  *   The port identifier of the Ethernet device.
2818  * @return
2819  *   - (0) if successful.
2820  *   - (-ENOTSUP) if support for promiscuous_disable() does not exist
2821  *     for the device.
2822  *   - (-ENODEV) if *port_id* invalid.
2823  */
2824 int rte_eth_promiscuous_disable(uint16_t port_id);
2825 
2826 /**
2827  * Return the value of promiscuous mode for an Ethernet device.
2828  *
2829  * @param port_id
2830  *   The port identifier of the Ethernet device.
2831  * @return
2832  *   - (1) if promiscuous is enabled
2833  *   - (0) if promiscuous is disabled.
2834  *   - (-1) on error
2835  */
2836 int rte_eth_promiscuous_get(uint16_t port_id);
2837 
2838 /**
2839  * Enable the receipt of any multicast frame by an Ethernet device.
2840  *
2841  * @param port_id
2842  *   The port identifier of the Ethernet device.
2843  * @return
2844  *   - (0) if successful.
2845  *   - (-ENOTSUP) if support for allmulticast_enable() does not exist
2846  *     for the device.
2847  *   - (-ENODEV) if *port_id* invalid.
2848  */
2849 int rte_eth_allmulticast_enable(uint16_t port_id);
2850 
2851 /**
2852  * Disable the receipt of all multicast frames by an Ethernet device.
2853  *
2854  * @param port_id
2855  *   The port identifier of the Ethernet device.
2856  * @return
2857  *   - (0) if successful.
2858  *   - (-ENOTSUP) if support for allmulticast_disable() does not exist
2859  *     for the device.
2860  *   - (-ENODEV) if *port_id* invalid.
2861  */
2862 int rte_eth_allmulticast_disable(uint16_t port_id);
2863 
2864 /**
2865  * Return the value of allmulticast mode for an Ethernet device.
2866  *
2867  * @param port_id
2868  *   The port identifier of the Ethernet device.
2869  * @return
2870  *   - (1) if allmulticast is enabled
2871  *   - (0) if allmulticast is disabled.
2872  *   - (-1) on error
2873  */
2874 int rte_eth_allmulticast_get(uint16_t port_id);
2875 
2876 /**
2877  * Retrieve the link status (up/down), the duplex mode (half/full),
2878  * the negotiation (auto/fixed), and if available, the speed (Mbps).
2879  *
2880  * It might need to wait up to 9 seconds.
2881  * @see rte_eth_link_get_nowait.
2882  *
2883  * @param port_id
2884  *   The port identifier of the Ethernet device.
2885  * @param link
2886  *   Link information written back.
2887  * @return
2888  *   - (0) if successful.
2889  *   - (-ENOTSUP) if the function is not supported in PMD.
2890  *   - (-ENODEV) if *port_id* invalid.
2891  *   - (-EINVAL) if bad parameter.
2892  */
2893 int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link);
2894 
2895 /**
2896  * Retrieve the link status (up/down), the duplex mode (half/full),
2897  * the negotiation (auto/fixed), and if available, the speed (Mbps).
2898  *
2899  * @param port_id
2900  *   The port identifier of the Ethernet device.
2901  * @param link
2902  *   Link information written back.
2903  * @return
2904  *   - (0) if successful.
2905  *   - (-ENOTSUP) if the function is not supported in PMD.
2906  *   - (-ENODEV) if *port_id* invalid.
2907  *   - (-EINVAL) if bad parameter.
2908  */
2909 int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link);
2910 
2911 /**
2912  * @warning
2913  * @b EXPERIMENTAL: this API may change without prior notice.
2914  *
2915  * The function converts a link_speed to a string. It handles all special
2916  * values like unknown or none speed.
2917  *
2918  * @param link_speed
2919  *   link_speed of rte_eth_link struct
2920  * @return
2921  *   Link speed in textual format. It's pointer to immutable memory.
2922  *   No free is required.
2923  */
2924 __rte_experimental
2925 const char *rte_eth_link_speed_to_str(uint32_t link_speed);
2926 
2927 /**
2928  * @warning
2929  * @b EXPERIMENTAL: this API may change without prior notice.
2930  *
2931  * The function converts a rte_eth_link struct representing a link status to
2932  * a string.
2933  *
2934  * @param str
2935  *   A pointer to a string to be filled with textual representation of
2936  *   device status. At least RTE_ETH_LINK_MAX_STR_LEN bytes should be allocated to
2937  *   store default link status text.
2938  * @param len
2939  *   Length of available memory at 'str' string.
2940  * @param eth_link
2941  *   Link status returned by rte_eth_link_get function
2942  * @return
2943  *   Number of bytes written to str array or -EINVAL if bad parameter.
2944  */
2945 __rte_experimental
2946 int rte_eth_link_to_str(char *str, size_t len,
2947 			const struct rte_eth_link *eth_link);
2948 
2949 /**
2950  * Retrieve the general I/O statistics of an Ethernet device.
2951  *
2952  * @param port_id
2953  *   The port identifier of the Ethernet device.
2954  * @param stats
2955  *   A pointer to a structure of type *rte_eth_stats* to be filled with
2956  *   the values of device counters for the following set of statistics:
2957  *   - *ipackets* with the total of successfully received packets.
2958  *   - *opackets* with the total of successfully transmitted packets.
2959  *   - *ibytes*   with the total of successfully received bytes.
2960  *   - *obytes*   with the total of successfully transmitted bytes.
2961  *   - *ierrors*  with the total of erroneous received packets.
2962  *   - *oerrors*  with the total of failed transmitted packets.
2963  * @return
2964  *   Zero if successful. Non-zero otherwise.
2965  */
2966 int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats);
2967 
2968 /**
2969  * Reset the general I/O statistics of an Ethernet device.
2970  *
2971  * @param port_id
2972  *   The port identifier of the Ethernet device.
2973  * @return
2974  *   - (0) if device notified to reset stats.
2975  *   - (-ENOTSUP) if hardware doesn't support.
2976  *   - (-ENODEV) if *port_id* invalid.
2977  *   - (<0): Error code of the driver stats reset function.
2978  */
2979 int rte_eth_stats_reset(uint16_t port_id);
2980 
2981 /**
2982  * Retrieve names of extended statistics of an Ethernet device.
2983  *
2984  * There is an assumption that 'xstat_names' and 'xstats' arrays are matched
2985  * by array index:
2986  *  xstats_names[i].name => xstats[i].value
2987  *
2988  * And the array index is same with id field of 'struct rte_eth_xstat':
2989  *  xstats[i].id == i
2990  *
2991  * This assumption makes key-value pair matching less flexible but simpler.
2992  *
2993  * @param port_id
2994  *   The port identifier of the Ethernet device.
2995  * @param xstats_names
2996  *   An rte_eth_xstat_name array of at least *size* elements to
2997  *   be filled. If set to NULL, the function returns the required number
2998  *   of elements.
2999  * @param size
3000  *   The size of the xstats_names array (number of elements).
3001  * @return
3002  *   - A positive value lower or equal to size: success. The return value
3003  *     is the number of entries filled in the stats table.
3004  *   - A positive value higher than size: error, the given statistics table
3005  *     is too small. The return value corresponds to the size that should
3006  *     be given to succeed. The entries in the table are not valid and
3007  *     shall not be used by the caller.
3008  *   - A negative value on error (invalid port ID).
3009  */
3010 int rte_eth_xstats_get_names(uint16_t port_id,
3011 		struct rte_eth_xstat_name *xstats_names,
3012 		unsigned int size);
3013 
3014 /**
3015  * Retrieve extended statistics of an Ethernet device.
3016  *
3017  * There is an assumption that 'xstat_names' and 'xstats' arrays are matched
3018  * by array index:
3019  *  xstats_names[i].name => xstats[i].value
3020  *
3021  * And the array index is same with id field of 'struct rte_eth_xstat':
3022  *  xstats[i].id == i
3023  *
3024  * This assumption makes key-value pair matching less flexible but simpler.
3025  *
3026  * @param port_id
3027  *   The port identifier of the Ethernet device.
3028  * @param xstats
3029  *   A pointer to a table of structure of type *rte_eth_xstat*
3030  *   to be filled with device statistics ids and values.
3031  *   This parameter can be set to NULL if and only if n is 0.
3032  * @param n
3033  *   The size of the xstats array (number of elements).
3034  *   If lower than the required number of elements, the function returns
3035  *   the required number of elements.
3036  *   If equal to zero, the xstats must be NULL, the function returns the
3037  *   required number of elements.
3038  * @return
3039  *   - A positive value lower or equal to n: success. The return value
3040  *     is the number of entries filled in the stats table.
3041  *   - A positive value higher than n: error, the given statistics table
3042  *     is too small. The return value corresponds to the size that should
3043  *     be given to succeed. The entries in the table are not valid and
3044  *     shall not be used by the caller.
3045  *   - A negative value on error (invalid port ID).
3046  */
3047 int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
3048 		unsigned int n);
3049 
3050 /**
3051  * Retrieve names of extended statistics of an Ethernet device.
3052  *
3053  * @param port_id
3054  *   The port identifier of the Ethernet device.
3055  * @param xstats_names
3056  *   Array to be filled in with names of requested device statistics.
3057  *   Must not be NULL if @p ids are specified (not NULL).
3058  * @param size
3059  *   Number of elements in @p xstats_names array (if not NULL) and in
3060  *   @p ids array (if not NULL). Must be 0 if both array pointers are NULL.
3061  * @param ids
3062  *   IDs array given by app to retrieve specific statistics. May be NULL to
3063  *   retrieve names of all available statistics or, if @p xstats_names is
3064  *   NULL as well, just the number of available statistics.
3065  * @return
3066  *   - A positive value lower or equal to size: success. The return value
3067  *     is the number of entries filled in the stats table.
3068  *   - A positive value higher than size: success. The given statistics table
3069  *     is too small. The return value corresponds to the size that should
3070  *     be given to succeed. The entries in the table are not valid and
3071  *     shall not be used by the caller.
3072  *   - A negative value on error.
3073  */
3074 int
3075 rte_eth_xstats_get_names_by_id(uint16_t port_id,
3076 	struct rte_eth_xstat_name *xstats_names, unsigned int size,
3077 	uint64_t *ids);
3078 
3079 /**
3080  * Retrieve extended statistics of an Ethernet device.
3081  *
3082  * @param port_id
3083  *   The port identifier of the Ethernet device.
3084  * @param ids
3085  *   IDs array given by app to retrieve specific statistics. May be NULL to
3086  *   retrieve all available statistics or, if @p values is NULL as well,
3087  *   just the number of available statistics.
3088  * @param values
3089  *   Array to be filled in with requested device statistics.
3090  *   Must not be NULL if ids are specified (not NULL).
3091  * @param size
3092  *   Number of elements in @p values array (if not NULL) and in @p ids
3093  *   array (if not NULL). Must be 0 if both array pointers are NULL.
3094  * @return
3095  *   - A positive value lower or equal to size: success. The return value
3096  *     is the number of entries filled in the stats table.
3097  *   - A positive value higher than size: success: The given statistics table
3098  *     is too small. The return value corresponds to the size that should
3099  *     be given to succeed. The entries in the table are not valid and
3100  *     shall not be used by the caller.
3101  *   - A negative value on error.
3102  */
3103 int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
3104 			     uint64_t *values, unsigned int size);
3105 
3106 /**
3107  * Gets the ID of a statistic from its name.
3108  *
3109  * This function searches for the statistics using string compares, and
3110  * as such should not be used on the fast-path. For fast-path retrieval of
3111  * specific statistics, store the ID as provided in *id* from this function,
3112  * and pass the ID to rte_eth_xstats_get()
3113  *
3114  * @param port_id The port to look up statistics from
3115  * @param xstat_name The name of the statistic to return
3116  * @param[out] id A pointer to an app-supplied uint64_t which should be
3117  *                set to the ID of the stat if the stat exists.
3118  * @return
3119  *    0 on success
3120  *    -ENODEV for invalid port_id,
3121  *    -EIO if device is removed,
3122  *    -EINVAL if the xstat_name doesn't exist in port_id
3123  *    -ENOMEM if bad parameter.
3124  */
3125 int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
3126 		uint64_t *id);
3127 
3128 /**
3129  * Reset extended statistics of an Ethernet device.
3130  *
3131  * @param port_id
3132  *   The port identifier of the Ethernet device.
3133  * @return
3134  *   - (0) if device notified to reset extended stats.
3135  *   - (-ENOTSUP) if pmd doesn't support both
3136  *     extended stats and basic stats reset.
3137  *   - (-ENODEV) if *port_id* invalid.
3138  *   - (<0): Error code of the driver xstats reset function.
3139  */
3140 int rte_eth_xstats_reset(uint16_t port_id);
3141 
3142 /**
3143  *  Set a mapping for the specified transmit queue to the specified per-queue
3144  *  statistics counter.
3145  *
3146  * @param port_id
3147  *   The port identifier of the Ethernet device.
3148  * @param tx_queue_id
3149  *   The index of the transmit queue for which a queue stats mapping is required.
3150  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
3151  *   to rte_eth_dev_configure().
3152  * @param stat_idx
3153  *   The per-queue packet statistics functionality number that the transmit
3154  *   queue is to be assigned.
3155  *   The value must be in the range [0, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1].
3156  *   Max RTE_ETHDEV_QUEUE_STAT_CNTRS being 256.
3157  * @return
3158  *   Zero if successful. Non-zero otherwise.
3159  */
3160 int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
3161 		uint16_t tx_queue_id, uint8_t stat_idx);
3162 
3163 /**
3164  *  Set a mapping for the specified receive queue to the specified per-queue
3165  *  statistics counter.
3166  *
3167  * @param port_id
3168  *   The port identifier of the Ethernet device.
3169  * @param rx_queue_id
3170  *   The index of the receive queue for which a queue stats mapping is required.
3171  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
3172  *   to rte_eth_dev_configure().
3173  * @param stat_idx
3174  *   The per-queue packet statistics functionality number that the receive
3175  *   queue is to be assigned.
3176  *   The value must be in the range [0, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1].
3177  *   Max RTE_ETHDEV_QUEUE_STAT_CNTRS being 256.
3178  * @return
3179  *   Zero if successful. Non-zero otherwise.
3180  */
3181 int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id,
3182 					   uint16_t rx_queue_id,
3183 					   uint8_t stat_idx);
3184 
3185 /**
3186  * Retrieve the Ethernet address of an Ethernet device.
3187  *
3188  * @param port_id
3189  *   The port identifier of the Ethernet device.
3190  * @param mac_addr
3191  *   A pointer to a structure of type *ether_addr* to be filled with
3192  *   the Ethernet address of the Ethernet device.
3193  * @return
3194  *   - (0) if successful
3195  *   - (-ENODEV) if *port_id* invalid.
3196  *   - (-EINVAL) if bad parameter.
3197  */
3198 int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr);
3199 
3200 /**
3201  * @warning
3202  * @b EXPERIMENTAL: this API may change without prior notice
3203  *
3204  * Retrieve the Ethernet addresses of an Ethernet device.
3205  *
3206  * @param port_id
3207  *   The port identifier of the Ethernet device.
3208  * @param ma
3209  *   A pointer to an array of structures of type *ether_addr* to be filled with
3210  *   the Ethernet addresses of the Ethernet device.
3211  * @param num
3212  *   Number of elements in the @p ma array.
3213  *   Note that  rte_eth_dev_info::max_mac_addrs can be used to retrieve
3214  *   max number of Ethernet addresses for given port.
3215  * @return
3216  *   - number of retrieved addresses if successful
3217  *   - (-ENODEV) if *port_id* invalid.
3218  *   - (-EINVAL) if bad parameter.
3219  */
3220 __rte_experimental
3221 int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
3222 	unsigned int num);
3223 
3224 /**
3225  * Retrieve the contextual information of an Ethernet device.
3226  *
3227  * As part of this function, a number of of fields in dev_info will be
3228  * initialized as follows:
3229  *
3230  * rx_desc_lim = lim
3231  * tx_desc_lim = lim
3232  *
3233  * Where lim is defined within the rte_eth_dev_info_get as
3234  *
3235  *  const struct rte_eth_desc_lim lim = {
3236  *      .nb_max = UINT16_MAX,
3237  *      .nb_min = 0,
3238  *      .nb_align = 1,
3239  *	.nb_seg_max = UINT16_MAX,
3240  *	.nb_mtu_seg_max = UINT16_MAX,
3241  *  };
3242  *
3243  * device = dev->device
3244  * min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN
3245  * max_mtu = UINT16_MAX
3246  *
3247  * The following fields will be populated if support for dev_infos_get()
3248  * exists for the device and the rte_eth_dev 'dev' has been populated
3249  * successfully with a call to it:
3250  *
3251  * driver_name = rte_driver_name(rte_dev_driver(dev->device));
3252  * nb_rx_queues = dev->data->nb_rx_queues
3253  * nb_tx_queues = dev->data->nb_tx_queues
3254  * dev_flags = &dev->data->dev_flags
3255  *
3256  * @param port_id
3257  *   The port identifier of the Ethernet device.
3258  * @param dev_info
3259  *   A pointer to a structure of type *rte_eth_dev_info* to be filled with
3260  *   the contextual information of the Ethernet device.
3261  * @return
3262  *   - (0) if successful.
3263  *   - (-ENOTSUP) if support for dev_infos_get() does not exist for the device.
3264  *   - (-ENODEV) if *port_id* invalid.
3265  *   - (-EINVAL) if bad parameter.
3266  */
3267 int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info);
3268 
3269 /**
3270  * @warning
3271  * @b EXPERIMENTAL: this API may change without prior notice.
3272  *
3273  * Retrieve the configuration of an Ethernet device.
3274  *
3275  * @param port_id
3276  *   The port identifier of the Ethernet device.
3277  * @param dev_conf
3278  *   Location for Ethernet device configuration to be filled in.
3279  * @return
3280  *   - (0) if successful.
3281  *   - (-ENODEV) if *port_id* invalid.
3282  *   - (-EINVAL) if bad parameter.
3283  */
3284 __rte_experimental
3285 int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf);
3286 
3287 /**
3288  * Retrieve the firmware version of a device.
3289  *
3290  * @param port_id
3291  *   The port identifier of the device.
3292  * @param fw_version
3293  *   A pointer to a string array storing the firmware version of a device,
3294  *   the string includes terminating null. This pointer is allocated by caller.
3295  * @param fw_size
3296  *   The size of the string array pointed by fw_version, which should be
3297  *   large enough to store firmware version of the device.
3298  * @return
3299  *   - (0) if successful.
3300  *   - (-ENOTSUP) if operation is not supported.
3301  *   - (-ENODEV) if *port_id* invalid.
3302  *   - (-EIO) if device is removed.
3303  *   - (-EINVAL) if bad parameter.
3304  *   - (>0) if *fw_size* is not enough to store firmware version, return
3305  *          the size of the non truncated string.
3306  */
3307 int rte_eth_dev_fw_version_get(uint16_t port_id,
3308 			       char *fw_version, size_t fw_size);
3309 
3310 /**
3311  * Retrieve the supported packet types of an Ethernet device.
3312  *
3313  * When a packet type is announced as supported, it *must* be recognized by
3314  * the PMD. For instance, if RTE_PTYPE_L2_ETHER, RTE_PTYPE_L2_ETHER_VLAN
3315  * and RTE_PTYPE_L3_IPV4 are announced, the PMD must return the following
3316  * packet types for these packets:
3317  * - Ether/IPv4              -> RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
3318  * - Ether/VLAN/IPv4         -> RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L3_IPV4
3319  * - Ether/[anything else]   -> RTE_PTYPE_L2_ETHER
3320  * - Ether/VLAN/[anything else] -> RTE_PTYPE_L2_ETHER_VLAN
3321  *
3322  * When a packet is received by a PMD, the most precise type must be
3323  * returned among the ones supported. However a PMD is allowed to set
3324  * packet type that is not in the supported list, at the condition that it
3325  * is more precise. Therefore, a PMD announcing no supported packet types
3326  * can still set a matching packet type in a received packet.
3327  *
3328  * @note
3329  *   Better to invoke this API after the device is already started or Rx burst
3330  *   function is decided, to obtain correct supported ptypes.
3331  * @note
3332  *   if a given PMD does not report what ptypes it supports, then the supported
3333  *   ptype count is reported as 0.
3334  * @param port_id
3335  *   The port identifier of the Ethernet device.
3336  * @param ptype_mask
3337  *   A hint of what kind of packet type which the caller is interested in.
3338  * @param ptypes
3339  *   An array pointer to store adequate packet types, allocated by caller.
3340  * @param num
3341  *  Size of the array pointed by param ptypes.
3342  * @return
3343  *   - (>=0) Number of supported ptypes. If the number of types exceeds num,
3344  *           only num entries will be filled into the ptypes array, but the full
3345  *           count of supported ptypes will be returned.
3346  *   - (-ENODEV) if *port_id* invalid.
3347  *   - (-EINVAL) if bad parameter.
3348  */
3349 int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3350 				     uint32_t *ptypes, int num);
3351 /**
3352  * Inform Ethernet device about reduced range of packet types to handle.
3353  *
3354  * Application can use this function to set only specific ptypes that it's
3355  * interested. This information can be used by the PMD to optimize Rx path.
3356  *
3357  * The function accepts an array `set_ptypes` allocated by the caller to
3358  * store the packet types set by the driver, the last element of the array
3359  * is set to RTE_PTYPE_UNKNOWN. The size of the `set_ptype` array should be
3360  * `rte_eth_dev_get_supported_ptypes() + 1` else it might only be filled
3361  * partially.
3362  *
3363  * @param port_id
3364  *   The port identifier of the Ethernet device.
3365  * @param ptype_mask
3366  *   The ptype family that application is interested in should be bitwise OR of
3367  *   RTE_PTYPE_*_MASK or 0.
3368  * @param set_ptypes
3369  *   An array pointer to store set packet types, allocated by caller. The
3370  *   function marks the end of array with RTE_PTYPE_UNKNOWN.
3371  * @param num
3372  *   Size of the array pointed by param ptypes.
3373  *   Should be rte_eth_dev_get_supported_ptypes() + 1 to accommodate the
3374  *   set ptypes.
3375  * @return
3376  *   - (0) if Success.
3377  *   - (-ENODEV) if *port_id* invalid.
3378  *   - (-EINVAL) if *ptype_mask* is invalid (or) set_ptypes is NULL and
3379  *     num > 0.
3380  */
3381 int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3382 			   uint32_t *set_ptypes, unsigned int num);
3383 
3384 /**
3385  * Retrieve the MTU of an Ethernet device.
3386  *
3387  * @param port_id
3388  *   The port identifier of the Ethernet device.
3389  * @param mtu
3390  *   A pointer to a uint16_t where the retrieved MTU is to be stored.
3391  * @return
3392  *   - (0) if successful.
3393  *   - (-ENODEV) if *port_id* invalid.
3394  *   - (-EINVAL) if bad parameter.
3395  */
3396 int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu);
3397 
3398 /**
3399  * Change the MTU of an Ethernet device.
3400  *
3401  * @param port_id
3402  *   The port identifier of the Ethernet device.
3403  * @param mtu
3404  *   A uint16_t for the MTU to be applied.
3405  * @return
3406  *   - (0) if successful.
3407  *   - (-ENOTSUP) if operation is not supported.
3408  *   - (-ENODEV) if *port_id* invalid.
3409  *   - (-EIO) if device is removed.
3410  *   - (-EINVAL) if *mtu* invalid, validation of mtu can occur within
3411  *     rte_eth_dev_set_mtu if dev_infos_get is supported by the device or
3412  *     when the mtu is set using dev->dev_ops->mtu_set.
3413  *   - (-EBUSY) if operation is not allowed when the port is running
3414  */
3415 int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu);
3416 
3417 /**
3418  * Enable/Disable hardware filtering by an Ethernet device of received
3419  * VLAN packets tagged with a given VLAN Tag Identifier.
3420  *
3421  * @param port_id
3422  *   The port identifier of the Ethernet device.
3423  * @param vlan_id
3424  *   The VLAN Tag Identifier whose filtering must be enabled or disabled.
3425  * @param on
3426  *   If > 0, enable VLAN filtering of VLAN packets tagged with *vlan_id*.
3427  *   Otherwise, disable VLAN filtering of VLAN packets tagged with *vlan_id*.
3428  * @return
3429  *   - (0) if successful.
3430  *   - (-ENOTSUP) if hardware-assisted VLAN filtering not configured.
3431  *   - (-ENODEV) if *port_id* invalid.
3432  *   - (-EIO) if device is removed.
3433  *   - (-ENOSYS) if VLAN filtering on *port_id* disabled.
3434  *   - (-EINVAL) if *vlan_id* > 4095.
3435  */
3436 int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on);
3437 
3438 /**
3439  * Enable/Disable hardware VLAN Strip by a Rx queue of an Ethernet device.
3440  *
3441  * @param port_id
3442  *   The port identifier of the Ethernet device.
3443  * @param rx_queue_id
3444  *   The index of the receive queue for which a queue stats mapping is required.
3445  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
3446  *   to rte_eth_dev_configure().
3447  * @param on
3448  *   If 1, Enable VLAN Stripping of the receive queue of the Ethernet port.
3449  *   If 0, Disable VLAN Stripping of the receive queue of the Ethernet port.
3450  * @return
3451  *   - (0) if successful.
3452  *   - (-ENOTSUP) if hardware-assisted VLAN stripping not configured.
3453  *   - (-ENODEV) if *port_id* invalid.
3454  *   - (-EINVAL) if *rx_queue_id* invalid.
3455  */
3456 int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3457 		int on);
3458 
3459 /**
3460  * Set the Outer VLAN Ether Type by an Ethernet device, it can be inserted to
3461  * the VLAN header.
3462  *
3463  * @param port_id
3464  *   The port identifier of the Ethernet device.
3465  * @param vlan_type
3466  *   The VLAN type.
3467  * @param tag_type
3468  *   The Tag Protocol ID
3469  * @return
3470  *   - (0) if successful.
3471  *   - (-ENOTSUP) if hardware-assisted VLAN TPID setup is not supported.
3472  *   - (-ENODEV) if *port_id* invalid.
3473  *   - (-EIO) if device is removed.
3474  */
3475 int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3476 				    enum rte_vlan_type vlan_type,
3477 				    uint16_t tag_type);
3478 
3479 /**
3480  * Set VLAN offload configuration on an Ethernet device.
3481  *
3482  * @param port_id
3483  *   The port identifier of the Ethernet device.
3484  * @param offload_mask
3485  *   The VLAN Offload bit mask can be mixed use with "OR"
3486  *       RTE_ETH_VLAN_STRIP_OFFLOAD
3487  *       RTE_ETH_VLAN_FILTER_OFFLOAD
3488  *       RTE_ETH_VLAN_EXTEND_OFFLOAD
3489  *       RTE_ETH_QINQ_STRIP_OFFLOAD
3490  * @return
3491  *   - (0) if successful.
3492  *   - (-ENOTSUP) if hardware-assisted VLAN filtering not configured.
3493  *   - (-ENODEV) if *port_id* invalid.
3494  *   - (-EIO) if device is removed.
3495  */
3496 int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask);
3497 
3498 /**
3499  * Read VLAN Offload configuration from an Ethernet device
3500  *
3501  * @param port_id
3502  *   The port identifier of the Ethernet device.
3503  * @return
3504  *   - (>0) if successful. Bit mask to indicate
3505  *       RTE_ETH_VLAN_STRIP_OFFLOAD
3506  *       RTE_ETH_VLAN_FILTER_OFFLOAD
3507  *       RTE_ETH_VLAN_EXTEND_OFFLOAD
3508  *       RTE_ETH_QINQ_STRIP_OFFLOAD
3509  *   - (-ENODEV) if *port_id* invalid.
3510  */
3511 int rte_eth_dev_get_vlan_offload(uint16_t port_id);
3512 
3513 /**
3514  * Set port based Tx VLAN insertion on or off.
3515  *
3516  * @param port_id
3517  *  The port identifier of the Ethernet device.
3518  * @param pvid
3519  *  Port based Tx VLAN identifier together with user priority.
3520  * @param on
3521  *  Turn on or off the port based Tx VLAN insertion.
3522  *
3523  * @return
3524  *   - (0) if successful.
3525  *   - negative if failed.
3526  */
3527 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
3528 
3529 /**
3530  * @warning
3531  * @b EXPERIMENTAL: this API may change without prior notice.
3532  *
3533  * Set Rx queue available descriptors threshold.
3534  *
3535  * @param port_id
3536  *  The port identifier of the Ethernet device.
3537  * @param queue_id
3538  *  The index of the receive queue.
3539  * @param avail_thresh
3540  *  The available descriptors threshold is percentage of Rx queue size
3541  *  which describes the availability of Rx queue for hardware.
3542  *  If the Rx queue availability is below it,
3543  *  the event RTE_ETH_EVENT_RX_AVAIL_THRESH is triggered.
3544  *  [1-99] to set a new available descriptors threshold.
3545  *  0 to disable threshold monitoring.
3546  *
3547  * @return
3548  *   - 0 if successful.
3549  *   - (-ENODEV) if @p port_id is invalid.
3550  *   - (-EINVAL) if bad parameter.
3551  *   - (-ENOTSUP) if available Rx descriptors threshold is not supported.
3552  *   - (-EIO) if device is removed.
3553  */
3554 __rte_experimental
3555 int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
3556 			       uint8_t avail_thresh);
3557 
3558 /**
3559  * @warning
3560  * @b EXPERIMENTAL: this API may change without prior notice.
3561  *
3562  * Find Rx queue with RTE_ETH_EVENT_RX_AVAIL_THRESH event pending.
3563  *
3564  * @param port_id
3565  *  The port identifier of the Ethernet device.
3566  * @param[inout] queue_id
3567  *  On input starting Rx queue index to search from.
3568  *  If the queue_id is bigger than maximum queue ID of the port,
3569  *  search is started from 0. So that application can keep calling
3570  *  this function to handle all pending events with a simple increment
3571  *  of queue_id on the next call.
3572  *  On output if return value is 1, Rx queue index with the event pending.
3573  * @param[out] avail_thresh
3574  *  Location for available descriptors threshold of the found Rx queue.
3575  *
3576  * @return
3577  *   - 1 if an Rx queue with pending event is found.
3578  *   - 0 if no Rx queue with pending event is found.
3579  *   - (-ENODEV) if @p port_id is invalid.
3580  *   - (-EINVAL) if bad parameter (e.g. @p queue_id is NULL).
3581  *   - (-ENOTSUP) if operation is not supported.
3582  *   - (-EIO) if device is removed.
3583  */
3584 __rte_experimental
3585 int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
3586 				 uint8_t *avail_thresh);
3587 
3588 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
3589 		void *userdata);
3590 
3591 /**
3592  * Structure used to buffer packets for future Tx
3593  * Used by APIs rte_eth_tx_buffer and rte_eth_tx_buffer_flush
3594  */
3595 struct rte_eth_dev_tx_buffer {
3596 	buffer_tx_error_fn error_callback;
3597 	void *error_userdata;
3598 	uint16_t size;           /**< Size of buffer for buffered Tx */
3599 	uint16_t length;         /**< Number of packets in the array */
3600 	/** Pending packets to be sent on explicit flush or when full */
3601 	struct rte_mbuf *pkts[];
3602 };
3603 
3604 /**
3605  * Calculate the size of the Tx buffer.
3606  *
3607  * @param sz
3608  *   Number of stored packets.
3609  */
3610 #define RTE_ETH_TX_BUFFER_SIZE(sz) \
3611 	(sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3612 
3613 /**
3614  * Initialize default values for buffered transmitting
3615  *
3616  * @param buffer
3617  *   Tx buffer to be initialized.
3618  * @param size
3619  *   Buffer size
3620  * @return
3621  *   0 if no error
3622  */
3623 int
3624 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size);
3625 
3626 /**
3627  * Configure a callback for buffered packets which cannot be sent
3628  *
3629  * Register a specific callback to be called when an attempt is made to send
3630  * all packets buffered on an Ethernet port, but not all packets can
3631  * successfully be sent. The callback registered here will be called only
3632  * from calls to rte_eth_tx_buffer() and rte_eth_tx_buffer_flush() APIs.
3633  * The default callback configured for each queue by default just frees the
3634  * packets back to the calling mempool. If additional behaviour is required,
3635  * for example, to count dropped packets, or to retry transmission of packets
3636  * which cannot be sent, this function should be used to register a suitable
3637  * callback function to implement the desired behaviour.
3638  * The example callback "rte_eth_count_unsent_packet_callback()" is also
3639  * provided as reference.
3640  *
3641  * @param buffer
3642  *   The port identifier of the Ethernet device.
3643  * @param callback
3644  *   The function to be used as the callback.
3645  * @param userdata
3646  *   Arbitrary parameter to be passed to the callback function
3647  * @return
3648  *   0 on success, or -EINVAL if bad parameter
3649  */
3650 int
3651 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
3652 		buffer_tx_error_fn callback, void *userdata);
3653 
3654 /**
3655  * Callback function for silently dropping unsent buffered packets.
3656  *
3657  * This function can be passed to rte_eth_tx_buffer_set_err_callback() to
3658  * adjust the default behavior when buffered packets cannot be sent. This
3659  * function drops any unsent packets silently and is used by Tx buffered
3660  * operations as default behavior.
3661  *
3662  * NOTE: this function should not be called directly, instead it should be used
3663  *       as a callback for packet buffering.
3664  *
3665  * NOTE: when configuring this function as a callback with
3666  *       rte_eth_tx_buffer_set_err_callback(), the final, userdata parameter
3667  *       should point to an uint64_t value.
3668  *
3669  * @param pkts
3670  *   The previously buffered packets which could not be sent
3671  * @param unsent
3672  *   The number of unsent packets in the pkts array
3673  * @param userdata
3674  *   Not used
3675  */
3676 void
3677 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
3678 		void *userdata);
3679 
3680 /**
3681  * Callback function for tracking unsent buffered packets.
3682  *
3683  * This function can be passed to rte_eth_tx_buffer_set_err_callback() to
3684  * adjust the default behavior when buffered packets cannot be sent. This
3685  * function drops any unsent packets, but also updates a user-supplied counter
3686  * to track the overall number of packets dropped. The counter should be an
3687  * uint64_t variable.
3688  *
3689  * NOTE: this function should not be called directly, instead it should be used
3690  *       as a callback for packet buffering.
3691  *
3692  * NOTE: when configuring this function as a callback with
3693  *       rte_eth_tx_buffer_set_err_callback(), the final, userdata parameter
3694  *       should point to an uint64_t value.
3695  *
3696  * @param pkts
3697  *   The previously buffered packets which could not be sent
3698  * @param unsent
3699  *   The number of unsent packets in the pkts array
3700  * @param userdata
3701  *   Pointer to an uint64_t value, which will be incremented by unsent
3702  */
3703 void
3704 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
3705 		void *userdata);
3706 
3707 /**
3708  * Request the driver to free mbufs currently cached by the driver. The
3709  * driver will only free the mbuf if it is no longer in use. It is the
3710  * application's responsibility to ensure rte_eth_tx_buffer_flush(..) is
3711  * called if needed.
3712  *
3713  * @param port_id
3714  *   The port identifier of the Ethernet device.
3715  * @param queue_id
3716  *   The index of the transmit queue through which output packets must be
3717  *   sent.
3718  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
3719  *   to rte_eth_dev_configure().
3720  * @param free_cnt
3721  *   Maximum number of packets to free. Use 0 to indicate all possible packets
3722  *   should be freed. Note that a packet may be using multiple mbufs.
3723  * @return
3724  *   Failure: < 0
3725  *     -ENODEV: Invalid interface
3726  *     -EIO: device is removed
3727  *     -ENOTSUP: Driver does not support function
3728  *   Success: >= 0
3729  *     0-n: Number of packets freed. More packets may still remain in ring that
3730  *     are in use.
3731  */
3732 int
3733 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
3734 
3735 /**
3736  * Subtypes for MACsec offload event (@ref RTE_ETH_EVENT_MACSEC)
3737  * raised by Ethernet device.
3738  */
3739 enum rte_eth_event_macsec_subtype {
3740 	/** Notifies unknown MACsec subevent. */
3741 	RTE_ETH_SUBEVENT_MACSEC_UNKNOWN,
3742 	/**
3743 	 * Subevent of RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR sectag validation events
3744 	 *	Validation check: SecTag.TCI.V = 1
3745 	 */
3746 	RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_V_EQ1,
3747 	/**
3748 	 * Subevent of RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR sectag validation events
3749 	 *	Validation check: SecTag.TCI.E = 0 && SecTag.TCI.C = 1
3750 	 */
3751 	RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_E_EQ0_C_EQ1,
3752 	/**
3753 	 * Subevent of RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR sectag validation events
3754 	 *	Validation check: SecTag.SL >= 'd48
3755 	 */
3756 	RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SL_GTE48,
3757 	/**
3758 	 * Subevent of RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR sectag validation events
3759 	 *	Validation check: SecTag.TCI.ES = 1 && SecTag.TCI.SC = 1
3760 	 */
3761 	RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_ES_EQ1_SC_EQ1,
3762 	/**
3763 	 * Subevent of RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR sectag validation events
3764 	 *	Validation check: SecTag.TCI.SC = 1 && SecTag.TCI.SCB = 1
3765 	 */
3766 	RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SC_EQ1_SCB_EQ1,
3767 };
3768 
3769 /**
3770  * Event types for MACsec offload event (@ref RTE_ETH_EVENT_MACSEC)
3771  * raised by eth device.
3772  */
3773 enum rte_eth_event_macsec_type {
3774 	/** Notifies unknown MACsec event. */
3775 	RTE_ETH_EVENT_MACSEC_UNKNOWN,
3776 	/** Notifies Sectag validation failure events. */
3777 	RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR,
3778 	/** Notifies Rx SA hard expiry events. */
3779 	RTE_ETH_EVENT_MACSEC_RX_SA_PN_HARD_EXP,
3780 	/** Notifies Rx SA soft expiry events. */
3781 	RTE_ETH_EVENT_MACSEC_RX_SA_PN_SOFT_EXP,
3782 	/** Notifies Tx SA hard expiry events. */
3783 	RTE_ETH_EVENT_MACSEC_TX_SA_PN_HARD_EXP,
3784 	/** Notifies Tx SA soft events. */
3785 	RTE_ETH_EVENT_MACSEC_TX_SA_PN_SOFT_EXP,
3786 	/** Notifies Invalid SA event. */
3787 	RTE_ETH_EVENT_MACSEC_SA_NOT_VALID,
3788 };
3789 
3790 /**
3791  * Descriptor for @ref RTE_ETH_EVENT_MACSEC event.
3792  * Used by ethdev to send extra information of the MACsec offload event.
3793  */
3794 struct rte_eth_event_macsec_desc {
3795 	/** Type of RTE_ETH_EVENT_MACSEC_* event. */
3796 	enum rte_eth_event_macsec_type type;
3797 	/** Type of RTE_ETH_SUBEVENT_MACSEC_* subevent. */
3798 	enum rte_eth_event_macsec_subtype subtype;
3799 	/**
3800 	 * Event specific metadata.
3801 	 *
3802 	 * For the following events, *userdata* registered
3803 	 * with the *rte_security_session* would be returned
3804 	 * as metadata.
3805 	 *
3806 	 * @see struct rte_security_session_conf
3807 	 */
3808 	uint64_t metadata;
3809 };
3810 
3811 /**
3812  * Subtypes for IPsec offload event(@ref RTE_ETH_EVENT_IPSEC) raised by
3813  * eth device.
3814  */
3815 enum rte_eth_event_ipsec_subtype {
3816 	/** Unknown event type */
3817 	RTE_ETH_EVENT_IPSEC_UNKNOWN = 0,
3818 	/** Sequence number overflow */
3819 	RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW,
3820 	/** Soft time expiry of SA */
3821 	RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY,
3822 	/**
3823 	 * Soft byte expiry of SA determined by
3824 	 * @ref rte_security_ipsec_lifetime::bytes_soft_limit
3825 	 */
3826 	RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY,
3827 	/**
3828 	 * Soft packet expiry of SA determined by
3829 	 * @ref rte_security_ipsec_lifetime::packets_soft_limit
3830 	 */
3831 	RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY,
3832 	/**
3833 	 * Hard byte expiry of SA determined by
3834 	 * @ref rte_security_ipsec_lifetime::bytes_hard_limit
3835 	 */
3836 	RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY,
3837 	/**
3838 	 * Hard packet expiry of SA determined by
3839 	 * @ref rte_security_ipsec_lifetime::packets_hard_limit
3840 	 */
3841 	RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY,
3842 	/** Max value of this enum */
3843 	RTE_ETH_EVENT_IPSEC_MAX
3844 };
3845 
3846 /**
3847  * Descriptor for @ref RTE_ETH_EVENT_IPSEC event. Used by eth dev to send extra
3848  * information of the IPsec offload event.
3849  */
3850 struct rte_eth_event_ipsec_desc {
3851 	/** Type of RTE_ETH_EVENT_IPSEC_* event */
3852 	enum rte_eth_event_ipsec_subtype subtype;
3853 	/**
3854 	 * Event specific metadata.
3855 	 *
3856 	 * For the following events, *userdata* registered
3857 	 * with the *rte_security_session* would be returned
3858 	 * as metadata,
3859 	 *
3860 	 * - @ref RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW
3861 	 * - @ref RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY
3862 	 * - @ref RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY
3863 	 * - @ref RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY
3864 	 * - @ref RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY
3865 	 * - @ref RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY
3866 	 *
3867 	 * @see struct rte_security_session_conf
3868 	 *
3869 	 */
3870 	uint64_t metadata;
3871 };
3872 
3873 /**
3874  * The eth device event type for interrupt, and maybe others in the future.
3875  */
3876 enum rte_eth_event_type {
3877 	RTE_ETH_EVENT_UNKNOWN,  /**< unknown event type */
3878 	RTE_ETH_EVENT_INTR_LSC, /**< lsc interrupt event */
3879 	/** queue state event (enabled/disabled) */
3880 	RTE_ETH_EVENT_QUEUE_STATE,
3881 	/** reset interrupt event, sent to VF on PF reset */
3882 	RTE_ETH_EVENT_INTR_RESET,
3883 	RTE_ETH_EVENT_VF_MBOX,  /**< message from the VF received by PF */
3884 	RTE_ETH_EVENT_MACSEC,   /**< MACsec offload related event */
3885 	RTE_ETH_EVENT_INTR_RMV, /**< device removal event */
3886 	RTE_ETH_EVENT_NEW,      /**< port is probed */
3887 	RTE_ETH_EVENT_DESTROY,  /**< port is released */
3888 	RTE_ETH_EVENT_IPSEC,    /**< IPsec offload related event */
3889 	RTE_ETH_EVENT_FLOW_AGED,/**< New aged-out flows is detected */
3890 	/**
3891 	 * Number of available Rx descriptors is smaller than the threshold.
3892 	 * @see rte_eth_rx_avail_thresh_set()
3893 	 */
3894 	RTE_ETH_EVENT_RX_AVAIL_THRESH,
3895 	/** Port recovering from a hardware or firmware error.
3896 	 * If PMD supports proactive error recovery,
3897 	 * it should trigger this event to notify application
3898 	 * that it detected an error and the recovery is being started.
3899 	 * Upon receiving the event, the application should not invoke any control path API
3900 	 * (such as rte_eth_dev_configure/rte_eth_dev_stop...) until receiving
3901 	 * RTE_ETH_EVENT_RECOVERY_SUCCESS or RTE_ETH_EVENT_RECOVERY_FAILED event.
3902 	 * The PMD will set the data path pointers to dummy functions,
3903 	 * and re-set the data path pointers to non-dummy functions
3904 	 * before reporting RTE_ETH_EVENT_RECOVERY_SUCCESS event.
3905 	 * It means that the application cannot send or receive any packets
3906 	 * during this period.
3907 	 * @note Before the PMD reports the recovery result,
3908 	 * the PMD may report the RTE_ETH_EVENT_ERR_RECOVERING event again,
3909 	 * because a larger error may occur during the recovery.
3910 	 */
3911 	RTE_ETH_EVENT_ERR_RECOVERING,
3912 	/** Port recovers successfully from the error.
3913 	 * The PMD already re-configured the port,
3914 	 * and the effect is the same as a restart operation.
3915 	 * a) The following operation will be retained: (alphabetically)
3916 	 *    - DCB configuration
3917 	 *    - FEC configuration
3918 	 *    - Flow control configuration
3919 	 *    - LRO configuration
3920 	 *    - LSC configuration
3921 	 *    - MTU
3922 	 *    - MAC address (default and those supplied by MAC address array)
3923 	 *    - Promiscuous and allmulticast mode
3924 	 *    - PTP configuration
3925 	 *    - Queue (Rx/Tx) settings
3926 	 *    - Queue statistics mappings
3927 	 *    - RSS configuration by rte_eth_dev_rss_xxx() family
3928 	 *    - Rx checksum configuration
3929 	 *    - Rx interrupt settings
3930 	 *    - Traffic management configuration
3931 	 *    - VLAN configuration (including filtering, tpid, strip, pvid)
3932 	 *    - VMDq configuration
3933 	 * b) The following configuration maybe retained
3934 	 *    or not depending on the device capabilities:
3935 	 *    - flow rules
3936 	 *      (@see RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP)
3937 	 *    - shared flow objects
3938 	 *      (@see RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP)
3939 	 * c) Any other configuration will not be stored
3940 	 *    and will need to be re-configured.
3941 	 */
3942 	RTE_ETH_EVENT_RECOVERY_SUCCESS,
3943 	/** Port recovery failed.
3944 	 * It means that the port should not be usable anymore.
3945 	 * The application should close the port.
3946 	 */
3947 	RTE_ETH_EVENT_RECOVERY_FAILED,
3948 	RTE_ETH_EVENT_MAX       /**< max value of this enum */
3949 };
3950 
3951 /** User application callback to be registered for interrupts. */
3952 typedef int (*rte_eth_dev_cb_fn)(uint16_t port_id,
3953 		enum rte_eth_event_type event, void *cb_arg, void *ret_param);
3954 
3955 /**
3956  * Register a callback function for port event.
3957  *
3958  * @param port_id
3959  *  Port ID.
3960  *  RTE_ETH_ALL means register the event for all port ids.
3961  * @param event
3962  *  Event interested.
3963  * @param cb_fn
3964  *  User supplied callback function to be called.
3965  * @param cb_arg
3966  *  Pointer to the parameters for the registered callback.
3967  *
3968  * @return
3969  *  - On success, zero.
3970  *  - On failure, a negative value.
3971  */
3972 int rte_eth_dev_callback_register(uint16_t port_id,
3973 			enum rte_eth_event_type event,
3974 		rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3975 
3976 /**
3977  * Unregister a callback function for port event.
3978  *
3979  * @param port_id
3980  *  Port ID.
3981  *  RTE_ETH_ALL means unregister the event for all port ids.
3982  * @param event
3983  *  Event interested.
3984  * @param cb_fn
3985  *  User supplied callback function to be called.
3986  * @param cb_arg
3987  *  Pointer to the parameters for the registered callback. -1 means to
3988  *  remove all for the same callback address and same event.
3989  *
3990  * @return
3991  *  - On success, zero.
3992  *  - On failure, a negative value.
3993  */
3994 int rte_eth_dev_callback_unregister(uint16_t port_id,
3995 			enum rte_eth_event_type event,
3996 		rte_eth_dev_cb_fn cb_fn, void *cb_arg);
3997 
3998 /**
3999  * When there is no Rx packet coming in Rx Queue for a long time, we can
4000  * sleep lcore related to Rx Queue for power saving, and enable Rx interrupt
4001  * to be triggered when Rx packet arrives.
4002  *
4003  * The rte_eth_dev_rx_intr_enable() function enables Rx queue
4004  * interrupt on specific Rx queue of a port.
4005  *
4006  * @param port_id
4007  *   The port identifier of the Ethernet device.
4008  * @param queue_id
4009  *   The index of the receive queue from which to retrieve input packets.
4010  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
4011  *   to rte_eth_dev_configure().
4012  * @return
4013  *   - (0) if successful.
4014  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support
4015  *     that operation.
4016  *   - (-ENODEV) if *port_id* invalid.
4017  *   - (-EIO) if device is removed.
4018  */
4019 int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id);
4020 
4021 /**
4022  * When lcore wakes up from Rx interrupt indicating packet coming, disable Rx
4023  * interrupt and returns to polling mode.
4024  *
4025  * The rte_eth_dev_rx_intr_disable() function disables Rx queue
4026  * interrupt on specific Rx queue of a port.
4027  *
4028  * @param port_id
4029  *   The port identifier of the Ethernet device.
4030  * @param queue_id
4031  *   The index of the receive queue from which to retrieve input packets.
4032  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
4033  *   to rte_eth_dev_configure().
4034  * @return
4035  *   - (0) if successful.
4036  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support
4037  *     that operation.
4038  *   - (-ENODEV) if *port_id* invalid.
4039  *   - (-EIO) if device is removed.
4040  */
4041 int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id);
4042 
4043 /**
4044  * Rx Interrupt control per port.
4045  *
4046  * @param port_id
4047  *   The port identifier of the Ethernet device.
4048  * @param epfd
4049  *   Epoll instance fd which the intr vector associated to.
4050  *   Using RTE_EPOLL_PER_THREAD allows to use per thread epoll instance.
4051  * @param op
4052  *   The operation be performed for the vector.
4053  *   Operation type of {RTE_INTR_EVENT_ADD, RTE_INTR_EVENT_DEL}.
4054  * @param data
4055  *   User raw data.
4056  * @return
4057  *   - On success, zero.
4058  *   - On failure, a negative value.
4059  */
4060 int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data);
4061 
4062 /**
4063  * Rx Interrupt control per queue.
4064  *
4065  * @param port_id
4066  *   The port identifier of the Ethernet device.
4067  * @param queue_id
4068  *   The index of the receive queue from which to retrieve input packets.
4069  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
4070  *   to rte_eth_dev_configure().
4071  * @param epfd
4072  *   Epoll instance fd which the intr vector associated to.
4073  *   Using RTE_EPOLL_PER_THREAD allows to use per thread epoll instance.
4074  * @param op
4075  *   The operation be performed for the vector.
4076  *   Operation type of {RTE_INTR_EVENT_ADD, RTE_INTR_EVENT_DEL}.
4077  * @param data
4078  *   User raw data.
4079  * @return
4080  *   - On success, zero.
4081  *   - On failure, a negative value.
4082  */
4083 int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4084 			      int epfd, int op, void *data);
4085 
4086 /**
4087  * Get interrupt fd per Rx queue.
4088  *
4089  * @param port_id
4090  *   The port identifier of the Ethernet device.
4091  * @param queue_id
4092  *   The index of the receive queue from which to retrieve input packets.
4093  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
4094  *   to rte_eth_dev_configure().
4095  * @return
4096  *   - (>=0) the interrupt fd associated to the requested Rx queue if
4097  *           successful.
4098  *   - (-1) on error.
4099  */
4100 int
4101 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id);
4102 
4103 /**
4104  * Turn on the LED on the Ethernet device.
4105  * This function turns on the LED on the Ethernet device.
4106  *
4107  * @param port_id
4108  *   The port identifier of the Ethernet device.
4109  * @return
4110  *   - (0) if successful.
4111  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support
4112  *     that operation.
4113  *   - (-ENODEV) if *port_id* invalid.
4114  *   - (-EIO) if device is removed.
4115  */
4116 int  rte_eth_led_on(uint16_t port_id);
4117 
4118 /**
4119  * Turn off the LED on the Ethernet device.
4120  * This function turns off the LED on the Ethernet device.
4121  *
4122  * @param port_id
4123  *   The port identifier of the Ethernet device.
4124  * @return
4125  *   - (0) if successful.
4126  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support
4127  *     that operation.
4128  *   - (-ENODEV) if *port_id* invalid.
4129  *   - (-EIO) if device is removed.
4130  */
4131 int  rte_eth_led_off(uint16_t port_id);
4132 
4133 /**
4134  * @warning
4135  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
4136  *
4137  * Get Forward Error Correction(FEC) capability.
4138  *
4139  * @param port_id
4140  *   The port identifier of the Ethernet device.
4141  * @param speed_fec_capa
4142  *   speed_fec_capa is out only with per-speed capabilities.
4143  *   If set to NULL, the function returns the required number
4144  *   of required array entries.
4145  * @param num
4146  *   a number of elements in an speed_fec_capa array.
4147  *
4148  * @return
4149  *   - A non-negative value lower or equal to num: success. The return value
4150  *     is the number of entries filled in the fec capa array.
4151  *   - A non-negative value higher than num: error, the given fec capa array
4152  *     is too small. The return value corresponds to the num that should
4153  *     be given to succeed. The entries in fec capa array are not valid and
4154  *     shall not be used by the caller.
4155  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support.
4156  *     that operation.
4157  *   - (-EIO) if device is removed.
4158  *   - (-ENODEV)  if *port_id* invalid.
4159  *   - (-EINVAL)  if *num* or *speed_fec_capa* invalid
4160  */
4161 __rte_experimental
4162 int rte_eth_fec_get_capability(uint16_t port_id,
4163 			       struct rte_eth_fec_capa *speed_fec_capa,
4164 			       unsigned int num);
4165 
4166 /**
4167  * @warning
4168  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
4169  *
4170  * Get current Forward Error Correction(FEC) mode.
4171  * If link is down and AUTO is enabled, AUTO is returned, otherwise,
4172  * configured FEC mode is returned.
4173  * If link is up, current FEC mode is returned.
4174  *
4175  * @param port_id
4176  *   The port identifier of the Ethernet device.
4177  * @param fec_capa
4178  *   A bitmask of enabled FEC modes. If AUTO bit is set, other
4179  *   bits specify FEC modes which may be negotiated. If AUTO
4180  *   bit is clear, specify FEC modes to be used (only one valid
4181  *   mode per speed may be set).
4182  * @return
4183  *   - (0) if successful.
4184  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support.
4185  *     that operation.
4186  *   - (-EIO) if device is removed.
4187  *   - (-ENODEV)  if *port_id* invalid.
4188  */
4189 __rte_experimental
4190 int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa);
4191 
4192 /**
4193  * @warning
4194  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
4195  *
4196  * Set Forward Error Correction(FEC) mode.
4197  *
4198  * @param port_id
4199  *   The port identifier of the Ethernet device.
4200  * @param fec_capa
4201  *   A bitmask of allowed FEC modes. If AUTO bit is set, other
4202  *   bits specify FEC modes which may be negotiated. If AUTO
4203  *   bit is clear, specify FEC modes to be used (only one valid
4204  *   mode per speed may be set).
4205  * @return
4206  *   - (0) if successful.
4207  *   - (-EINVAL) if the FEC mode is not valid.
4208  *   - (-ENOTSUP) if underlying hardware OR driver doesn't support.
4209  *   - (-EIO) if device is removed.
4210  *   - (-ENODEV)  if *port_id* invalid.
4211  */
4212 __rte_experimental
4213 int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa);
4214 
4215 /**
4216  * Get current status of the Ethernet link flow control for Ethernet device
4217  *
4218  * @param port_id
4219  *   The port identifier of the Ethernet device.
4220  * @param fc_conf
4221  *   The pointer to the structure where to store the flow control parameters.
4222  * @return
4223  *   - (0) if successful.
4224  *   - (-ENOTSUP) if hardware doesn't support flow control.
4225  *   - (-ENODEV)  if *port_id* invalid.
4226  *   - (-EIO)  if device is removed.
4227  *   - (-EINVAL) if bad parameter.
4228  */
4229 int rte_eth_dev_flow_ctrl_get(uint16_t port_id,
4230 			      struct rte_eth_fc_conf *fc_conf);
4231 
4232 /**
4233  * Configure the Ethernet link flow control for Ethernet device
4234  *
4235  * @param port_id
4236  *   The port identifier of the Ethernet device.
4237  * @param fc_conf
4238  *   The pointer to the structure of the flow control parameters.
4239  * @return
4240  *   - (0) if successful.
4241  *   - (-ENOTSUP) if hardware doesn't support flow control mode.
4242  *   - (-ENODEV)  if *port_id* invalid.
4243  *   - (-EINVAL)  if bad parameter
4244  *   - (-EIO)     if flow control setup failure or device is removed.
4245  */
4246 int rte_eth_dev_flow_ctrl_set(uint16_t port_id,
4247 			      struct rte_eth_fc_conf *fc_conf);
4248 
4249 /**
4250  * Configure the Ethernet priority flow control under DCB environment
4251  * for Ethernet device.
4252  *
4253  * @param port_id
4254  * The port identifier of the Ethernet device.
4255  * @param pfc_conf
4256  * The pointer to the structure of the priority flow control parameters.
4257  * @return
4258  *   - (0) if successful.
4259  *   - (-ENOTSUP) if hardware doesn't support priority flow control mode.
4260  *   - (-ENODEV)  if *port_id* invalid.
4261  *   - (-EINVAL)  if bad parameter
4262  *   - (-EIO)     if flow control setup failure or device is removed.
4263  */
4264 int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
4265 				struct rte_eth_pfc_conf *pfc_conf);
4266 
4267 /**
4268  * Add a MAC address to the set used for filtering incoming packets.
4269  *
4270  * @param port_id
4271  *   The port identifier of the Ethernet device.
4272  * @param mac_addr
4273  *   The MAC address to add.
4274  * @param pool
4275  *   VMDq pool index to associate address with (if VMDq is enabled). If VMDq is
4276  *   not enabled, this should be set to 0.
4277  * @return
4278  *   - (0) if successfully added or *mac_addr* was already added.
4279  *   - (-ENOTSUP) if hardware doesn't support this feature.
4280  *   - (-ENODEV) if *port* is invalid.
4281  *   - (-EIO) if device is removed.
4282  *   - (-ENOSPC) if no more MAC addresses can be added.
4283  *   - (-EINVAL) if MAC address is invalid.
4284  */
4285 int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr,
4286 				uint32_t pool);
4287 
4288 /**
4289  * @warning
4290  * @b EXPERIMENTAL: this API may change without prior notice.
4291  *
4292  * Retrieve the information for queue based PFC.
4293  *
4294  * @param port_id
4295  *   The port identifier of the Ethernet device.
4296  * @param pfc_queue_info
4297  *   A pointer to a structure of type *rte_eth_pfc_queue_info* to be filled with
4298  *   the information about queue based PFC.
4299  * @return
4300  *   - (0) if successful.
4301  *   - (-ENOTSUP) if support for priority_flow_ctrl_queue_info_get does not exist.
4302  *   - (-ENODEV) if *port_id* invalid.
4303  *   - (-EINVAL) if bad parameter.
4304  */
4305 __rte_experimental
4306 int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
4307 		struct rte_eth_pfc_queue_info *pfc_queue_info);
4308 
4309 /**
4310  * @warning
4311  * @b EXPERIMENTAL: this API may change without prior notice.
4312  *
4313  * Configure the queue based priority flow control for a given queue
4314  * for Ethernet device.
4315  *
4316  * @note When an ethdev port switches to queue based PFC mode, the
4317  * unconfigured queues shall be configured by the driver with
4318  * default values such as lower priority value for TC etc.
4319  *
4320  * @param port_id
4321  *   The port identifier of the Ethernet device.
4322  * @param pfc_queue_conf
4323  *   The pointer to the structure of the priority flow control parameters
4324  *   for the queue.
4325  * @return
4326  *   - (0) if successful.
4327  *   - (-ENOTSUP) if hardware doesn't support queue based PFC mode.
4328  *   - (-ENODEV)  if *port_id* invalid.
4329  *   - (-EINVAL)  if bad parameter
4330  *   - (-EIO)     if flow control setup queue failure
4331  */
4332 __rte_experimental
4333 int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
4334 		struct rte_eth_pfc_queue_conf *pfc_queue_conf);
4335 
4336 /**
4337  * Remove a MAC address from the internal array of addresses.
4338  *
4339  * @param port_id
4340  *   The port identifier of the Ethernet device.
4341  * @param mac_addr
4342  *   MAC address to remove.
4343  * @return
4344  *   - (0) if successful, or *mac_addr* didn't exist.
4345  *   - (-ENOTSUP) if hardware doesn't support.
4346  *   - (-ENODEV) if *port* invalid.
4347  *   - (-EADDRINUSE) if attempting to remove the default MAC address.
4348  *   - (-EINVAL) if MAC address is invalid.
4349  */
4350 int rte_eth_dev_mac_addr_remove(uint16_t port_id,
4351 				struct rte_ether_addr *mac_addr);
4352 
4353 /**
4354  * Set the default MAC address.
4355  *
4356  * @param port_id
4357  *   The port identifier of the Ethernet device.
4358  * @param mac_addr
4359  *   New default MAC address.
4360  * @return
4361  *   - (0) if successful, or *mac_addr* didn't exist.
4362  *   - (-ENOTSUP) if hardware doesn't support.
4363  *   - (-ENODEV) if *port* invalid.
4364  *   - (-EINVAL) if MAC address is invalid.
4365  */
4366 int rte_eth_dev_default_mac_addr_set(uint16_t port_id,
4367 		struct rte_ether_addr *mac_addr);
4368 
4369 /**
4370  * Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device.
4371  *
4372  * @param port_id
4373  *   The port identifier of the Ethernet device.
4374  * @param reta_conf
4375  *   RETA to update.
4376  * @param reta_size
4377  *   Redirection table size. The table size can be queried by
4378  *   rte_eth_dev_info_get().
4379  * @return
4380  *   - (0) if successful.
4381  *   - (-ENODEV) if *port_id* is invalid.
4382  *   - (-ENOTSUP) if hardware doesn't support.
4383  *   - (-EINVAL) if bad parameter.
4384  *   - (-EIO) if device is removed.
4385  */
4386 int rte_eth_dev_rss_reta_update(uint16_t port_id,
4387 				struct rte_eth_rss_reta_entry64 *reta_conf,
4388 				uint16_t reta_size);
4389 
4390 /**
4391  * Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device.
4392  *
4393  * @param port_id
4394  *   The port identifier of the Ethernet device.
4395  * @param reta_conf
4396  *   RETA to query. For each requested reta entry, corresponding bit
4397  *   in mask must be set.
4398  * @param reta_size
4399  *   Redirection table size. The table size can be queried by
4400  *   rte_eth_dev_info_get().
4401  * @return
4402  *   - (0) if successful.
4403  *   - (-ENODEV) if *port_id* is invalid.
4404  *   - (-ENOTSUP) if hardware doesn't support.
4405  *   - (-EINVAL) if bad parameter.
4406  *   - (-EIO) if device is removed.
4407  */
4408 int rte_eth_dev_rss_reta_query(uint16_t port_id,
4409 			       struct rte_eth_rss_reta_entry64 *reta_conf,
4410 			       uint16_t reta_size);
4411 
4412 /**
4413  * Updates unicast hash table for receiving packet with the given destination
4414  * MAC address, and the packet is routed to all VFs for which the Rx mode is
4415  * accept packets that match the unicast hash table.
4416  *
4417  * @param port_id
4418  *   The port identifier of the Ethernet device.
4419  * @param addr
4420  *   Unicast MAC address.
4421  * @param on
4422  *    1 - Set an unicast hash bit for receiving packets with the MAC address.
4423  *    0 - Clear an unicast hash bit.
4424  * @return
4425  *   - (0) if successful.
4426  *   - (-ENOTSUP) if hardware doesn't support.
4427   *  - (-ENODEV) if *port_id* invalid.
4428  *   - (-EIO) if device is removed.
4429  *   - (-EINVAL) if bad parameter.
4430  */
4431 int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
4432 				  uint8_t on);
4433 
4434 /**
4435  * Updates all unicast hash bitmaps for receiving packet with any Unicast
4436  * Ethernet MAC addresses,the packet is routed to all VFs for which the Rx
4437  * mode is accept packets that match the unicast hash table.
4438  *
4439  * @param port_id
4440  *   The port identifier of the Ethernet device.
4441  * @param on
4442  *    1 - Set all unicast hash bitmaps for receiving all the Ethernet
4443  *         MAC addresses
4444  *    0 - Clear all unicast hash bitmaps
4445  * @return
4446  *   - (0) if successful.
4447  *   - (-ENOTSUP) if hardware doesn't support.
4448   *  - (-ENODEV) if *port_id* invalid.
4449  *   - (-EIO) if device is removed.
4450  *   - (-EINVAL) if bad parameter.
4451  */
4452 int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on);
4453 
4454 /**
4455  * Set the rate limitation for a queue on an Ethernet device.
4456  *
4457  * @param port_id
4458  *   The port identifier of the Ethernet device.
4459  * @param queue_idx
4460  *   The queue ID.
4461  * @param tx_rate
4462  *   The Tx rate in Mbps. Allocated from the total port link speed.
4463  * @return
4464  *   - (0) if successful.
4465  *   - (-ENOTSUP) if hardware doesn't support this feature.
4466  *   - (-ENODEV) if *port_id* invalid.
4467  *   - (-EIO) if device is removed.
4468  *   - (-EINVAL) if bad parameter.
4469  */
4470 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
4471 			uint32_t tx_rate);
4472 
4473 /**
4474  * Configuration of Receive Side Scaling hash computation of Ethernet device.
4475  *
4476  * @param port_id
4477  *   The port identifier of the Ethernet device.
4478  * @param rss_conf
4479  *   The new configuration to use for RSS hash computation on the port.
4480  * @return
4481  *   - (0) if successful.
4482  *   - (-ENODEV) if port identifier is invalid.
4483  *   - (-EIO) if device is removed.
4484  *   - (-ENOTSUP) if hardware doesn't support.
4485  *   - (-EINVAL) if bad parameter.
4486  */
4487 int rte_eth_dev_rss_hash_update(uint16_t port_id,
4488 				struct rte_eth_rss_conf *rss_conf);
4489 
4490 /**
4491  * Retrieve current configuration of Receive Side Scaling hash computation
4492  * of Ethernet device.
4493  *
4494  * @param port_id
4495  *   The port identifier of the Ethernet device.
4496  * @param rss_conf
4497  *   Where to store the current RSS hash configuration of the Ethernet device.
4498  * @return
4499  *   - (0) if successful.
4500  *   - (-ENODEV) if port identifier is invalid.
4501  *   - (-EIO) if device is removed.
4502  *   - (-ENOTSUP) if hardware doesn't support RSS.
4503  *   - (-EINVAL) if bad parameter.
4504  */
4505 int
4506 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
4507 			      struct rte_eth_rss_conf *rss_conf);
4508 
4509 /**
4510  * Add UDP tunneling port for a type of tunnel.
4511  *
4512  * Some NICs may require such configuration to properly parse a tunnel
4513  * with any standard or custom UDP port.
4514  * The packets with this UDP port will be parsed for this type of tunnel.
4515  * The device parser will also check the rest of the tunnel headers
4516  * before classifying the packet.
4517  *
4518  * With some devices, this API will affect packet classification, i.e.:
4519  *     - mbuf.packet_type reported on Rx
4520  *     - rte_flow rules with tunnel items
4521  *
4522  * @param port_id
4523  *   The port identifier of the Ethernet device.
4524  * @param tunnel_udp
4525  *   UDP tunneling configuration.
4526  *
4527  * @return
4528  *   - (0) if successful.
4529  *   - (-ENODEV) if port identifier is invalid.
4530  *   - (-EIO) if device is removed.
4531  *   - (-ENOTSUP) if hardware doesn't support tunnel type.
4532  */
4533 int
4534 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
4535 				struct rte_eth_udp_tunnel *tunnel_udp);
4536 
4537 /**
4538  * Delete UDP tunneling port for a type of tunnel.
4539  *
4540  * The packets with this UDP port will not be classified as this type of tunnel
4541  * anymore if the device use such mapping for tunnel packet classification.
4542  *
4543  * @see rte_eth_dev_udp_tunnel_port_add
4544  *
4545  * @param port_id
4546  *   The port identifier of the Ethernet device.
4547  * @param tunnel_udp
4548  *   UDP tunneling configuration.
4549  *
4550  * @return
4551  *   - (0) if successful.
4552  *   - (-ENODEV) if port identifier is invalid.
4553  *   - (-EIO) if device is removed.
4554  *   - (-ENOTSUP) if hardware doesn't support tunnel type.
4555  */
4556 int
4557 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
4558 				   struct rte_eth_udp_tunnel *tunnel_udp);
4559 
4560 /**
4561  * Get DCB information on an Ethernet device.
4562  *
4563  * @param port_id
4564  *   The port identifier of the Ethernet device.
4565  * @param dcb_info
4566  *   DCB information.
4567  * @return
4568  *   - (0) if successful.
4569  *   - (-ENODEV) if port identifier is invalid.
4570  *   - (-EIO) if device is removed.
4571  *   - (-ENOTSUP) if hardware doesn't support.
4572  *   - (-EINVAL) if bad parameter.
4573  */
4574 int rte_eth_dev_get_dcb_info(uint16_t port_id,
4575 			     struct rte_eth_dcb_info *dcb_info);
4576 
4577 struct rte_eth_rxtx_callback;
4578 
4579 /**
4580  * Add a callback to be called on packet Rx on a given port and queue.
4581  *
4582  * This API configures a function to be called for each burst of
4583  * packets received on a given NIC port queue. The return value is a pointer
4584  * that can be used to later remove the callback using
4585  * rte_eth_remove_rx_callback().
4586  *
4587  * Multiple functions are called in the order that they are added.
4588  *
4589  * @param port_id
4590  *   The port identifier of the Ethernet device.
4591  * @param queue_id
4592  *   The queue on the Ethernet device on which the callback is to be added.
4593  * @param fn
4594  *   The callback function
4595  * @param user_param
4596  *   A generic pointer parameter which will be passed to each invocation of the
4597  *   callback function on this port and queue. Inter-thread synchronization
4598  *   of any user data changes is the responsibility of the user.
4599  *
4600  * @return
4601  *   NULL on error.
4602  *   On success, a pointer value which can later be used to remove the callback.
4603  */
4604 const struct rte_eth_rxtx_callback *
4605 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4606 		rte_rx_callback_fn fn, void *user_param);
4607 
4608 /**
4609  * Add a callback that must be called first on packet Rx on a given port
4610  * and queue.
4611  *
4612  * This API configures a first function to be called for each burst of
4613  * packets received on a given NIC port queue. The return value is a pointer
4614  * that can be used to later remove the callback using
4615  * rte_eth_remove_rx_callback().
4616  *
4617  * Multiple functions are called in the order that they are added.
4618  *
4619  * @param port_id
4620  *   The port identifier of the Ethernet device.
4621  * @param queue_id
4622  *   The queue on the Ethernet device on which the callback is to be added.
4623  * @param fn
4624  *   The callback function
4625  * @param user_param
4626  *   A generic pointer parameter which will be passed to each invocation of the
4627  *   callback function on this port and queue. Inter-thread synchronization
4628  *   of any user data changes is the responsibility of the user.
4629  *
4630  * @return
4631  *   NULL on error.
4632  *   On success, a pointer value which can later be used to remove the callback.
4633  */
4634 const struct rte_eth_rxtx_callback *
4635 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4636 		rte_rx_callback_fn fn, void *user_param);
4637 
4638 /**
4639  * Add a callback to be called on packet Tx on a given port and queue.
4640  *
4641  * This API configures a function to be called for each burst of
4642  * packets sent on a given NIC port queue. The return value is a pointer
4643  * that can be used to later remove the callback using
4644  * rte_eth_remove_tx_callback().
4645  *
4646  * Multiple functions are called in the order that they are added.
4647  *
4648  * @param port_id
4649  *   The port identifier of the Ethernet device.
4650  * @param queue_id
4651  *   The queue on the Ethernet device on which the callback is to be added.
4652  * @param fn
4653  *   The callback function
4654  * @param user_param
4655  *   A generic pointer parameter which will be passed to each invocation of the
4656  *   callback function on this port and queue. Inter-thread synchronization
4657  *   of any user data changes is the responsibility of the user.
4658  *
4659  * @return
4660  *   NULL on error.
4661  *   On success, a pointer value which can later be used to remove the callback.
4662  */
4663 const struct rte_eth_rxtx_callback *
4664 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4665 		rte_tx_callback_fn fn, void *user_param);
4666 
4667 /**
4668  * Remove an Rx packet callback from a given port and queue.
4669  *
4670  * This function is used to removed callbacks that were added to a NIC port
4671  * queue using rte_eth_add_rx_callback().
4672  *
4673  * Note: the callback is removed from the callback list but it isn't freed
4674  * since the it may still be in use. The memory for the callback can be
4675  * subsequently freed back by the application by calling rte_free():
4676  *
4677  * - Immediately - if the port is stopped, or the user knows that no
4678  *   callbacks are in flight e.g. if called from the thread doing Rx/Tx
4679  *   on that queue.
4680  *
4681  * - After a short delay - where the delay is sufficient to allow any
4682  *   in-flight callbacks to complete. Alternately, the RCU mechanism can be
4683  *   used to detect when data plane threads have ceased referencing the
4684  *   callback memory.
4685  *
4686  * @param port_id
4687  *   The port identifier of the Ethernet device.
4688  * @param queue_id
4689  *   The queue on the Ethernet device from which the callback is to be removed.
4690  * @param user_cb
4691  *   User supplied callback created via rte_eth_add_rx_callback().
4692  *
4693  * @return
4694  *   - 0: Success. Callback was removed.
4695  *   - -ENODEV:  If *port_id* is invalid.
4696  *   - -ENOTSUP: Callback support is not available.
4697  *   - -EINVAL:  The queue_id is out of range, or the callback
4698  *               is NULL or not found for the port/queue.
4699  */
4700 int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4701 		const struct rte_eth_rxtx_callback *user_cb);
4702 
4703 /**
4704  * Remove a Tx packet callback from a given port and queue.
4705  *
4706  * This function is used to removed callbacks that were added to a NIC port
4707  * queue using rte_eth_add_tx_callback().
4708  *
4709  * Note: the callback is removed from the callback list but it isn't freed
4710  * since the it may still be in use. The memory for the callback can be
4711  * subsequently freed back by the application by calling rte_free():
4712  *
4713  * - Immediately - if the port is stopped, or the user knows that no
4714  *   callbacks are in flight e.g. if called from the thread doing Rx/Tx
4715  *   on that queue.
4716  *
4717  * - After a short delay - where the delay is sufficient to allow any
4718  *   in-flight callbacks to complete. Alternately, the RCU mechanism can be
4719  *   used to detect when data plane threads have ceased referencing the
4720  *   callback memory.
4721  *
4722  * @param port_id
4723  *   The port identifier of the Ethernet device.
4724  * @param queue_id
4725  *   The queue on the Ethernet device from which the callback is to be removed.
4726  * @param user_cb
4727  *   User supplied callback created via rte_eth_add_tx_callback().
4728  *
4729  * @return
4730  *   - 0: Success. Callback was removed.
4731  *   - -ENODEV:  If *port_id* is invalid.
4732  *   - -ENOTSUP: Callback support is not available.
4733  *   - -EINVAL:  The queue_id is out of range, or the callback
4734  *               is NULL or not found for the port/queue.
4735  */
4736 int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4737 		const struct rte_eth_rxtx_callback *user_cb);
4738 
4739 /**
4740  * Retrieve information about given port's Rx queue.
4741  *
4742  * @param port_id
4743  *   The port identifier of the Ethernet device.
4744  * @param queue_id
4745  *   The Rx queue on the Ethernet device for which information
4746  *   will be retrieved.
4747  * @param qinfo
4748  *   A pointer to a structure of type *rte_eth_rxq_info_info* to be filled with
4749  *   the information of the Ethernet device.
4750  *
4751  * @return
4752  *   - 0: Success
4753  *   - -ENODEV:  If *port_id* is invalid.
4754  *   - -ENOTSUP: routine is not supported by the device PMD.
4755  *   - -EINVAL:  The queue_id is out of range, or the queue
4756  *               is hairpin queue.
4757  */
4758 int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4759 	struct rte_eth_rxq_info *qinfo);
4760 
4761 /**
4762  * Retrieve information about given port's Tx queue.
4763  *
4764  * @param port_id
4765  *   The port identifier of the Ethernet device.
4766  * @param queue_id
4767  *   The Tx queue on the Ethernet device for which information
4768  *   will be retrieved.
4769  * @param qinfo
4770  *   A pointer to a structure of type *rte_eth_txq_info_info* to be filled with
4771  *   the information of the Ethernet device.
4772  *
4773  * @return
4774  *   - 0: Success
4775  *   - -ENODEV:  If *port_id* is invalid.
4776  *   - -ENOTSUP: routine is not supported by the device PMD.
4777  *   - -EINVAL:  The queue_id is out of range, or the queue
4778  *               is hairpin queue.
4779  */
4780 int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4781 	struct rte_eth_txq_info *qinfo);
4782 
4783 /**
4784  * Retrieve information about the Rx packet burst mode.
4785  *
4786  * @param port_id
4787  *   The port identifier of the Ethernet device.
4788  * @param queue_id
4789  *   The Rx queue on the Ethernet device for which information
4790  *   will be retrieved.
4791  * @param mode
4792  *   A pointer to a structure of type *rte_eth_burst_mode* to be filled
4793  *   with the information of the packet burst mode.
4794  *
4795  * @return
4796  *   - 0: Success
4797  *   - -ENODEV:  If *port_id* is invalid.
4798  *   - -ENOTSUP: routine is not supported by the device PMD.
4799  *   - -EINVAL:  The queue_id is out of range.
4800  */
4801 int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4802 	struct rte_eth_burst_mode *mode);
4803 
4804 /**
4805  * Retrieve information about the Tx packet burst mode.
4806  *
4807  * @param port_id
4808  *   The port identifier of the Ethernet device.
4809  * @param queue_id
4810  *   The Tx queue on the Ethernet device for which information
4811  *   will be retrieved.
4812  * @param mode
4813  *   A pointer to a structure of type *rte_eth_burst_mode* to be filled
4814  *   with the information of the packet burst mode.
4815  *
4816  * @return
4817  *   - 0: Success
4818  *   - -ENODEV:  If *port_id* is invalid.
4819  *   - -ENOTSUP: routine is not supported by the device PMD.
4820  *   - -EINVAL:  The queue_id is out of range.
4821  */
4822 int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4823 	struct rte_eth_burst_mode *mode);
4824 
4825 /**
4826  * @warning
4827  * @b EXPERIMENTAL: this API may change without prior notice.
4828  *
4829  * Retrieve the monitor condition for a given receive queue.
4830  *
4831  * @param port_id
4832  *   The port identifier of the Ethernet device.
4833  * @param queue_id
4834  *   The Rx queue on the Ethernet device for which information
4835  *   will be retrieved.
4836  * @param pmc
4837  *   The pointer to power-optimized monitoring condition structure.
4838  *
4839  * @return
4840  *   - 0: Success.
4841  *   -ENOTSUP: Operation not supported.
4842  *   -EINVAL: Invalid parameters.
4843  *   -ENODEV: Invalid port ID.
4844  */
4845 __rte_experimental
4846 int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
4847 		struct rte_power_monitor_cond *pmc);
4848 
4849 /**
4850  * Retrieve device registers and register attributes (number of registers and
4851  * register size)
4852  *
4853  * @param port_id
4854  *   The port identifier of the Ethernet device.
4855  * @param info
4856  *   Pointer to rte_dev_reg_info structure to fill in. If info->data is
4857  *   NULL the function fills in the width and length fields. If non-NULL
4858  *   the registers are put into the buffer pointed at by the data field.
4859  * @return
4860  *   - (0) if successful.
4861  *   - (-ENOTSUP) if hardware doesn't support.
4862  *   - (-EINVAL) if bad parameter.
4863  *   - (-ENODEV) if *port_id* invalid.
4864  *   - (-EIO) if device is removed.
4865  *   - others depends on the specific operations implementation.
4866  */
4867 int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info);
4868 
4869 /**
4870  * Retrieve size of device EEPROM
4871  *
4872  * @param port_id
4873  *   The port identifier of the Ethernet device.
4874  * @return
4875  *   - (>=0) EEPROM size if successful.
4876  *   - (-ENOTSUP) if hardware doesn't support.
4877  *   - (-ENODEV) if *port_id* invalid.
4878  *   - (-EIO) if device is removed.
4879  *   - others depends on the specific operations implementation.
4880  */
4881 int rte_eth_dev_get_eeprom_length(uint16_t port_id);
4882 
4883 /**
4884  * Retrieve EEPROM and EEPROM attribute
4885  *
4886  * @param port_id
4887  *   The port identifier of the Ethernet device.
4888  * @param info
4889  *   The template includes buffer for return EEPROM data and
4890  *   EEPROM attributes to be filled.
4891  * @return
4892  *   - (0) if successful.
4893  *   - (-ENOTSUP) if hardware doesn't support.
4894  *   - (-EINVAL) if bad parameter.
4895  *   - (-ENODEV) if *port_id* invalid.
4896  *   - (-EIO) if device is removed.
4897  *   - others depends on the specific operations implementation.
4898  */
4899 int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4900 
4901 /**
4902  * Program EEPROM with provided data
4903  *
4904  * @param port_id
4905  *   The port identifier of the Ethernet device.
4906  * @param info
4907  *   The template includes EEPROM data for programming and
4908  *   EEPROM attributes to be filled
4909  * @return
4910  *   - (0) if successful.
4911  *   - (-ENOTSUP) if hardware doesn't support.
4912  *   - (-ENODEV) if *port_id* invalid.
4913  *   - (-EINVAL) if bad parameter.
4914  *   - (-EIO) if device is removed.
4915  *   - others depends on the specific operations implementation.
4916  */
4917 int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
4918 
4919 /**
4920  * @warning
4921  * @b EXPERIMENTAL: this API may change without prior notice.
4922  *
4923  * Retrieve the type and size of plugin module EEPROM
4924  *
4925  * @param port_id
4926  *   The port identifier of the Ethernet device.
4927  * @param modinfo
4928  *   The type and size of plugin module EEPROM.
4929  * @return
4930  *   - (0) if successful.
4931  *   - (-ENOTSUP) if hardware doesn't support.
4932  *   - (-ENODEV) if *port_id* invalid.
4933  *   - (-EINVAL) if bad parameter.
4934  *   - (-EIO) if device is removed.
4935  *   - others depends on the specific operations implementation.
4936  */
4937 __rte_experimental
4938 int
4939 rte_eth_dev_get_module_info(uint16_t port_id,
4940 			    struct rte_eth_dev_module_info *modinfo);
4941 
4942 /**
4943  * @warning
4944  * @b EXPERIMENTAL: this API may change without prior notice.
4945  *
4946  * Retrieve the data of plugin module EEPROM
4947  *
4948  * @param port_id
4949  *   The port identifier of the Ethernet device.
4950  * @param info
4951  *   The template includes the plugin module EEPROM attributes, and the
4952  *   buffer for return plugin module EEPROM data.
4953  * @return
4954  *   - (0) if successful.
4955  *   - (-ENOTSUP) if hardware doesn't support.
4956  *   - (-EINVAL) if bad parameter.
4957  *   - (-ENODEV) if *port_id* invalid.
4958  *   - (-EIO) if device is removed.
4959  *   - others depends on the specific operations implementation.
4960  */
4961 __rte_experimental
4962 int
4963 rte_eth_dev_get_module_eeprom(uint16_t port_id,
4964 			      struct rte_dev_eeprom_info *info);
4965 
4966 /**
4967  * Set the list of multicast addresses to filter on an Ethernet device.
4968  *
4969  * @param port_id
4970  *   The port identifier of the Ethernet device.
4971  * @param mc_addr_set
4972  *   The array of multicast addresses to set. Equal to NULL when the function
4973  *   is invoked to flush the set of filtered addresses.
4974  * @param nb_mc_addr
4975  *   The number of multicast addresses in the *mc_addr_set* array. Equal to 0
4976  *   when the function is invoked to flush the set of filtered addresses.
4977  * @return
4978  *   - (0) if successful.
4979  *   - (-ENODEV) if *port_id* invalid.
4980  *   - (-EIO) if device is removed.
4981  *   - (-ENOTSUP) if PMD of *port_id* doesn't support multicast filtering.
4982  *   - (-ENOSPC) if *port_id* has not enough multicast filtering resources.
4983  *   - (-EINVAL) if bad parameter.
4984  */
4985 int rte_eth_dev_set_mc_addr_list(uint16_t port_id,
4986 				 struct rte_ether_addr *mc_addr_set,
4987 				 uint32_t nb_mc_addr);
4988 
4989 /**
4990  * Enable IEEE1588/802.1AS timestamping for an Ethernet device.
4991  *
4992  * @param port_id
4993  *   The port identifier of the Ethernet device.
4994  *
4995  * @return
4996  *   - 0: Success.
4997  *   - -ENODEV: The port ID is invalid.
4998  *   - -EIO: if device is removed.
4999  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5000  */
5001 int rte_eth_timesync_enable(uint16_t port_id);
5002 
5003 /**
5004  * Disable IEEE1588/802.1AS timestamping for an Ethernet device.
5005  *
5006  * @param port_id
5007  *   The port identifier of the Ethernet device.
5008  *
5009  * @return
5010  *   - 0: Success.
5011  *   - -ENODEV: The port ID is invalid.
5012  *   - -EIO: if device is removed.
5013  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5014  */
5015 int rte_eth_timesync_disable(uint16_t port_id);
5016 
5017 /**
5018  * Read an IEEE1588/802.1AS Rx timestamp from an Ethernet device.
5019  *
5020  * @param port_id
5021  *   The port identifier of the Ethernet device.
5022  * @param timestamp
5023  *   Pointer to the timestamp struct.
5024  * @param flags
5025  *   Device specific flags. Used to pass the Rx timesync register index to
5026  *   i40e. Unused in igb/ixgbe, pass 0 instead.
5027  *
5028  * @return
5029  *   - 0: Success.
5030  *   - -EINVAL: No timestamp is available.
5031  *   - -ENODEV: The port ID is invalid.
5032  *   - -EIO: if device is removed.
5033  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5034  */
5035 int rte_eth_timesync_read_rx_timestamp(uint16_t port_id,
5036 		struct timespec *timestamp, uint32_t flags);
5037 
5038 /**
5039  * Read an IEEE1588/802.1AS Tx timestamp from an Ethernet device.
5040  *
5041  * @param port_id
5042  *   The port identifier of the Ethernet device.
5043  * @param timestamp
5044  *   Pointer to the timestamp struct.
5045  *
5046  * @return
5047  *   - 0: Success.
5048  *   - -EINVAL: No timestamp is available.
5049  *   - -ENODEV: The port ID is invalid.
5050  *   - -EIO: if device is removed.
5051  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5052  */
5053 int rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
5054 		struct timespec *timestamp);
5055 
5056 /**
5057  * Adjust the timesync clock on an Ethernet device.
5058  *
5059  * This is usually used in conjunction with other Ethdev timesync functions to
5060  * synchronize the device time using the IEEE1588/802.1AS protocol.
5061  *
5062  * @param port_id
5063  *   The port identifier of the Ethernet device.
5064  * @param delta
5065  *   The adjustment in nanoseconds.
5066  *
5067  * @return
5068  *   - 0: Success.
5069  *   - -ENODEV: The port ID is invalid.
5070  *   - -EIO: if device is removed.
5071  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5072  */
5073 int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta);
5074 
5075 /**
5076  * Read the time from the timesync clock on an Ethernet device.
5077  *
5078  * This is usually used in conjunction with other Ethdev timesync functions to
5079  * synchronize the device time using the IEEE1588/802.1AS protocol.
5080  *
5081  * @param port_id
5082  *   The port identifier of the Ethernet device.
5083  * @param time
5084  *   Pointer to the timespec struct that holds the time.
5085  *
5086  * @return
5087  *   - 0: Success.
5088  *   - -EINVAL: Bad parameter.
5089  */
5090 int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time);
5091 
5092 /**
5093  * Set the time of the timesync clock on an Ethernet device.
5094  *
5095  * This is usually used in conjunction with other Ethdev timesync functions to
5096  * synchronize the device time using the IEEE1588/802.1AS protocol.
5097  *
5098  * @param port_id
5099  *   The port identifier of the Ethernet device.
5100  * @param time
5101  *   Pointer to the timespec struct that holds the time.
5102  *
5103  * @return
5104  *   - 0: Success.
5105  *   - -EINVAL: No timestamp is available.
5106  *   - -ENODEV: The port ID is invalid.
5107  *   - -EIO: if device is removed.
5108  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5109  */
5110 int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time);
5111 
5112 /**
5113  * @warning
5114  * @b EXPERIMENTAL: this API may change without prior notice.
5115  *
5116  * Read the current clock counter of an Ethernet device
5117  *
5118  * This returns the current raw clock value of an Ethernet device. It is
5119  * a raw amount of ticks, with no given time reference.
5120  * The value returned here is from the same clock than the one
5121  * filling timestamp field of Rx packets when using hardware timestamp
5122  * offload. Therefore it can be used to compute a precise conversion of
5123  * the device clock to the real time.
5124  *
5125  * E.g, a simple heuristic to derivate the frequency would be:
5126  * uint64_t start, end;
5127  * rte_eth_read_clock(port, start);
5128  * rte_delay_ms(100);
5129  * rte_eth_read_clock(port, end);
5130  * double freq = (end - start) * 10;
5131  *
5132  * Compute a common reference with:
5133  * uint64_t base_time_sec = current_time();
5134  * uint64_t base_clock;
5135  * rte_eth_read_clock(port, base_clock);
5136  *
5137  * Then, convert the raw mbuf timestamp with:
5138  * base_time_sec + (double)(*timestamp_dynfield(mbuf) - base_clock) / freq;
5139  *
5140  * This simple example will not provide a very good accuracy. One must
5141  * at least measure multiple times the frequency and do a regression.
5142  * To avoid deviation from the system time, the common reference can
5143  * be repeated from time to time. The integer division can also be
5144  * converted by a multiplication and a shift for better performance.
5145  *
5146  * @param port_id
5147  *   The port identifier of the Ethernet device.
5148  * @param clock
5149  *   Pointer to the uint64_t that holds the raw clock value.
5150  *
5151  * @return
5152  *   - 0: Success.
5153  *   - -ENODEV: The port ID is invalid.
5154  *   - -ENOTSUP: The function is not supported by the Ethernet driver.
5155  *   - -EINVAL: if bad parameter.
5156  */
5157 __rte_experimental
5158 int
5159 rte_eth_read_clock(uint16_t port_id, uint64_t *clock);
5160 
5161 /**
5162 * Get the port ID from device name. The device name should be specified
5163 * as below:
5164 * - PCIe address (Domain:Bus:Device.Function), for example- 0000:2:00.0
5165 * - SoC device name, for example- fsl-gmac0
5166 * - vdev dpdk name, for example- net_[pcap0|null0|tap0]
5167 *
5168 * @param name
5169 *  pci address or name of the device
5170 * @param port_id
5171 *   pointer to port identifier of the device
5172 * @return
5173 *   - (0) if successful and port_id is filled.
5174 *   - (-ENODEV or -EINVAL) on failure.
5175 */
5176 int
5177 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
5178 
5179 /**
5180 * Get the device name from port ID. The device name is specified as below:
5181 * - PCIe address (Domain:Bus:Device.Function), for example- 0000:02:00.0
5182 * - SoC device name, for example- fsl-gmac0
5183 * - vdev dpdk name, for example- net_[pcap0|null0|tun0|tap0]
5184 *
5185 * @param port_id
5186 *   Port identifier of the device.
5187 * @param name
5188 *   Buffer of size RTE_ETH_NAME_MAX_LEN to store the name.
5189 * @return
5190 *   - (0) if successful.
5191 *   - (-ENODEV) if *port_id* is invalid.
5192 *   - (-EINVAL) on failure.
5193 */
5194 int
5195 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name);
5196 
5197 /**
5198  * Check that numbers of Rx and Tx descriptors satisfy descriptors limits from
5199  * the Ethernet device information, otherwise adjust them to boundaries.
5200  *
5201  * @param port_id
5202  *   The port identifier of the Ethernet device.
5203  * @param nb_rx_desc
5204  *   A pointer to a uint16_t where the number of receive
5205  *   descriptors stored.
5206  * @param nb_tx_desc
5207  *   A pointer to a uint16_t where the number of transmit
5208  *   descriptors stored.
5209  * @return
5210  *   - (0) if successful.
5211  *   - (-ENOTSUP, -ENODEV or -EINVAL) on failure.
5212  */
5213 int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
5214 				     uint16_t *nb_rx_desc,
5215 				     uint16_t *nb_tx_desc);
5216 
5217 /**
5218  * Test if a port supports specific mempool ops.
5219  *
5220  * @param port_id
5221  *   Port identifier of the Ethernet device.
5222  * @param [in] pool
5223  *   The name of the pool operations to test.
5224  * @return
5225  *   - 0: best mempool ops choice for this port.
5226  *   - 1: mempool ops are supported for this port.
5227  *   - -ENOTSUP: mempool ops not supported for this port.
5228  *   - -ENODEV: Invalid port Identifier.
5229  *   - -EINVAL: Pool param is null.
5230  */
5231 int
5232 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
5233 
5234 /**
5235  * Get the security context for the Ethernet device.
5236  *
5237  * @param port_id
5238  *   Port identifier of the Ethernet device
5239  * @return
5240  *   - NULL on error.
5241  *   - pointer to security context on success.
5242  */
5243 void *
5244 rte_eth_dev_get_sec_ctx(uint16_t port_id);
5245 
5246 /**
5247  * @warning
5248  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5249  *
5250  * Query the device hairpin capabilities.
5251  *
5252  * @param port_id
5253  *   The port identifier of the Ethernet device.
5254  * @param cap
5255  *   Pointer to a structure that will hold the hairpin capabilities.
5256  * @return
5257  *   - (0) if successful.
5258  *   - (-ENOTSUP) if hardware doesn't support.
5259  *   - (-EINVAL) if bad parameter.
5260  */
5261 __rte_experimental
5262 int rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5263 				       struct rte_eth_hairpin_cap *cap);
5264 
5265 /**
5266  * @warning
5267  * @b EXPERIMENTAL: this structure may change without prior notice.
5268  *
5269  * Ethernet device representor ID range entry
5270  */
5271 struct rte_eth_representor_range {
5272 	enum rte_eth_representor_type type; /**< Representor type */
5273 	int controller; /**< Controller index */
5274 	int pf; /**< Physical function index */
5275 	__extension__
5276 	union {
5277 		int vf; /**< VF start index */
5278 		int sf; /**< SF start index */
5279 	};
5280 	uint32_t id_base; /**< Representor ID start index */
5281 	uint32_t id_end;  /**< Representor ID end index */
5282 	char name[RTE_DEV_NAME_MAX_LEN]; /**< Representor name */
5283 };
5284 
5285 /**
5286  * @warning
5287  * @b EXPERIMENTAL: this structure may change without prior notice.
5288  *
5289  * Ethernet device representor information
5290  */
5291 struct rte_eth_representor_info {
5292 	uint16_t controller; /**< Controller ID of caller device. */
5293 	uint16_t pf; /**< Physical function ID of caller device. */
5294 	uint32_t nb_ranges_alloc; /**< Size of the ranges array. */
5295 	uint32_t nb_ranges; /**< Number of initialized ranges. */
5296 	struct rte_eth_representor_range ranges[];/**< Representor ID range. */
5297 };
5298 
5299 /**
5300  * Retrieve the representor info of the device.
5301  *
5302  * Get device representor info to be able to calculate a unique
5303  * representor ID. @see rte_eth_representor_id_get helper.
5304  *
5305  * @param port_id
5306  *   The port identifier of the device.
5307  * @param info
5308  *   A pointer to a representor info structure.
5309  *   NULL to return number of range entries and allocate memory
5310  *   for next call to store detail.
5311  *   The number of ranges that were written into this structure
5312  *   will be placed into its nb_ranges field. This number cannot be
5313  *   larger than the nb_ranges_alloc that by the user before calling
5314  *   this function. It can be smaller than the value returned by the
5315  *   function, however.
5316  * @return
5317  *   - (-ENOTSUP) if operation is not supported.
5318  *   - (-ENODEV) if *port_id* invalid.
5319  *   - (-EIO) if device is removed.
5320  *   - (>=0) number of available representor range entries.
5321  */
5322 __rte_experimental
5323 int rte_eth_representor_info_get(uint16_t port_id,
5324 				 struct rte_eth_representor_info *info);
5325 
5326 /** The NIC is able to deliver flag (if set) with packets to the PMD. */
5327 #define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5328 
5329 /** The NIC is able to deliver mark ID with packets to the PMD. */
5330 #define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5331 
5332 /** The NIC is able to deliver tunnel ID with packets to the PMD. */
5333 #define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5334 
5335 /**
5336  * Negotiate the NIC's ability to deliver specific kinds of metadata to the PMD.
5337  *
5338  * Invoke this API before the first rte_eth_dev_configure() invocation
5339  * to let the PMD make preparations that are inconvenient to do later.
5340  *
5341  * The negotiation process is as follows:
5342  *
5343  * - the application requests features intending to use at least some of them;
5344  * - the PMD responds with the guaranteed subset of the requested feature set;
5345  * - the application can retry negotiation with another set of features;
5346  * - the application can pass zero to clear the negotiation result;
5347  * - the last negotiated result takes effect upon
5348  *   the ethdev configure and start.
5349  *
5350  * @note
5351  *   The PMD is supposed to first consider enabling the requested feature set
5352  *   in its entirety. Only if it fails to do so, does it have the right to
5353  *   respond with a smaller set of the originally requested features.
5354  *
5355  * @note
5356  *   Return code (-ENOTSUP) does not necessarily mean that the requested
5357  *   features are unsupported. In this case, the application should just
5358  *   assume that these features can be used without prior negotiations.
5359  *
5360  * @param port_id
5361  *   Port (ethdev) identifier
5362  *
5363  * @param[inout] features
5364  *   Feature selection buffer
5365  *
5366  * @return
5367  *   - (-EBUSY) if the port can't handle this in its current state;
5368  *   - (-ENOTSUP) if the method itself is not supported by the PMD;
5369  *   - (-ENODEV) if *port_id* is invalid;
5370  *   - (-EINVAL) if *features* is NULL;
5371  *   - (-EIO) if the device is removed;
5372  *   - (0) on success
5373  */
5374 int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features);
5375 
5376 /** Flag to offload IP reassembly for IPv4 packets. */
5377 #define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5378 /** Flag to offload IP reassembly for IPv6 packets. */
5379 #define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5380 
5381 /**
5382  * A structure used to get/set IP reassembly configuration. It is also used
5383  * to get the maximum capability values that a PMD can support.
5384  *
5385  * If rte_eth_ip_reassembly_capability_get() returns 0, IP reassembly can be
5386  * enabled using rte_eth_ip_reassembly_conf_set() and params values lower than
5387  * capability params can be set in the PMD.
5388  */
5389 struct rte_eth_ip_reassembly_params {
5390 	/** Maximum time in ms which PMD can wait for other fragments. */
5391 	uint32_t timeout_ms;
5392 	/** Maximum number of fragments that can be reassembled. */
5393 	uint16_t max_frags;
5394 	/**
5395 	 * Flags to enable reassembly of packet types -
5396 	 * RTE_ETH_DEV_REASSEMBLY_F_xxx.
5397 	 */
5398 	uint16_t flags;
5399 };
5400 
5401 /**
5402  * @warning
5403  * @b EXPERIMENTAL: this API may change without prior notice
5404  *
5405  * Get IP reassembly capabilities supported by the PMD. This is the first API
5406  * to be called for enabling the IP reassembly offload feature. PMD will return
5407  * the maximum values of parameters that PMD can support and user can call
5408  * rte_eth_ip_reassembly_conf_set() with param values lower than capability.
5409  *
5410  * @param port_id
5411  *   The port identifier of the device.
5412  * @param capa
5413  *   A pointer to rte_eth_ip_reassembly_params structure.
5414  * @return
5415  *   - (-ENOTSUP) if offload configuration is not supported by device.
5416  *   - (-ENODEV) if *port_id* invalid.
5417  *   - (-EIO) if device is removed.
5418  *   - (-EINVAL) if device is not configured or *capa* passed is NULL.
5419  *   - (0) on success.
5420  */
5421 __rte_experimental
5422 int rte_eth_ip_reassembly_capability_get(uint16_t port_id,
5423 		struct rte_eth_ip_reassembly_params *capa);
5424 
5425 /**
5426  * @warning
5427  * @b EXPERIMENTAL: this API may change without prior notice
5428  *
5429  * Get IP reassembly configuration parameters currently set in PMD.
5430  * The API will return error if the configuration is not already
5431  * set using rte_eth_ip_reassembly_conf_set() before calling this API or if
5432  * the device is not configured.
5433  *
5434  * @param port_id
5435  *   The port identifier of the device.
5436  * @param conf
5437  *   A pointer to rte_eth_ip_reassembly_params structure.
5438  * @return
5439  *   - (-ENOTSUP) if offload configuration is not supported by device.
5440  *   - (-ENODEV) if *port_id* invalid.
5441  *   - (-EIO) if device is removed.
5442  *   - (-EINVAL) if device is not configured or if *conf* passed is NULL or if
5443  *              configuration is not set using rte_eth_ip_reassembly_conf_set().
5444  *   - (0) on success.
5445  */
5446 __rte_experimental
5447 int rte_eth_ip_reassembly_conf_get(uint16_t port_id,
5448 		struct rte_eth_ip_reassembly_params *conf);
5449 
5450 /**
5451  * @warning
5452  * @b EXPERIMENTAL: this API may change without prior notice
5453  *
5454  * Set IP reassembly configuration parameters if the PMD supports IP reassembly
5455  * offload. User should first call rte_eth_ip_reassembly_capability_get() to
5456  * check the maximum values supported by the PMD before setting the
5457  * configuration. The use of this API is mandatory to enable this feature and
5458  * should be called before rte_eth_dev_start().
5459  *
5460  * In datapath, PMD cannot guarantee that IP reassembly is always successful.
5461  * Hence, PMD shall register mbuf dynamic field and dynamic flag using
5462  * rte_eth_ip_reassembly_dynfield_register() to denote incomplete IP reassembly.
5463  * If dynfield is not successfully registered, error will be returned and
5464  * IP reassembly offload cannot be used.
5465  *
5466  * @param port_id
5467  *   The port identifier of the device.
5468  * @param conf
5469  *   A pointer to rte_eth_ip_reassembly_params structure.
5470  * @return
5471  *   - (-ENOTSUP) if offload configuration is not supported by device.
5472  *   - (-ENODEV) if *port_id* invalid.
5473  *   - (-EIO) if device is removed.
5474  *   - (-EINVAL) if device is not configured or if device is already started or
5475  *               if *conf* passed is NULL or if mbuf dynfield is not registered
5476  *               successfully by the PMD.
5477  *   - (0) on success.
5478  */
5479 __rte_experimental
5480 int rte_eth_ip_reassembly_conf_set(uint16_t port_id,
5481 		const struct rte_eth_ip_reassembly_params *conf);
5482 
5483 /**
5484  * In case of IP reassembly offload failure, packet will be updated with
5485  * dynamic flag - RTE_MBUF_DYNFLAG_IP_REASSEMBLY_INCOMPLETE_NAME and packets
5486  * will be returned without alteration.
5487  * The application can retrieve the attached fragments using mbuf dynamic field
5488  * RTE_MBUF_DYNFIELD_IP_REASSEMBLY_NAME.
5489  */
5490 typedef struct {
5491 	/**
5492 	 * Next fragment packet. Application should fetch dynamic field of
5493 	 * each fragment until a NULL is received and nb_frags is 0.
5494 	 */
5495 	struct rte_mbuf *next_frag;
5496 	/** Time spent(in ms) by HW in waiting for further fragments. */
5497 	uint16_t time_spent;
5498 	/** Number of more fragments attached in mbuf dynamic fields. */
5499 	uint16_t nb_frags;
5500 } rte_eth_ip_reassembly_dynfield_t;
5501 
5502 /**
5503  * @warning
5504  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5505  *
5506  * Dump private info from device to a file. Provided data and the order depends
5507  * on the PMD.
5508  *
5509  * @param port_id
5510  *   The port identifier of the Ethernet device.
5511  * @param file
5512  *   A pointer to a file for output.
5513  * @return
5514  *   - (0) on success.
5515  *   - (-ENODEV) if *port_id* is invalid.
5516  *   - (-EINVAL) if null file.
5517  *   - (-ENOTSUP) if the device does not support this function.
5518  *   - (-EIO) if device is removed.
5519  */
5520 __rte_experimental
5521 int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file);
5522 
5523 /**
5524  * @warning
5525  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5526  *
5527  * Dump ethdev Rx descriptor info to a file.
5528  *
5529  * This API is used for debugging, not a dataplane API.
5530  *
5531  * @param port_id
5532  *   The port identifier of the Ethernet device.
5533  * @param queue_id
5534  *   A Rx queue identifier on this port.
5535  * @param offset
5536  *  The offset of the descriptor starting from tail. (0 is the next
5537  *  packet to be received by the driver).
5538  * @param num
5539  *   The number of the descriptors to dump.
5540  * @param file
5541  *   A pointer to a file for output.
5542  * @return
5543  *   - On success, zero.
5544  *   - On failure, a negative value.
5545  */
5546 __rte_experimental
5547 int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5548 			       uint16_t offset, uint16_t num, FILE *file);
5549 
5550 /**
5551  * @warning
5552  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5553  *
5554  * Dump ethdev Tx descriptor info to a file.
5555  *
5556  * This API is used for debugging, not a dataplane API.
5557  *
5558  * @param port_id
5559  *   The port identifier of the Ethernet device.
5560  * @param queue_id
5561  *   A Tx queue identifier on this port.
5562  * @param offset
5563  *  The offset of the descriptor starting from tail. (0 is the place where
5564  *  the next packet will be send).
5565  * @param num
5566  *   The number of the descriptors to dump.
5567  * @param file
5568  *   A pointer to a file for output.
5569  * @return
5570  *   - On success, zero.
5571  *   - On failure, a negative value.
5572  */
5573 __rte_experimental
5574 int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
5575 			       uint16_t offset, uint16_t num, FILE *file);
5576 
5577 
5578 /* Congestion management */
5579 
5580 /** Enumerate list of ethdev congestion management objects */
5581 enum rte_eth_cman_obj {
5582 	/** Congestion management based on Rx queue depth */
5583 	RTE_ETH_CMAN_OBJ_RX_QUEUE = RTE_BIT32(0),
5584 	/**
5585 	 * Congestion management based on mempool depth associated with Rx queue
5586 	 * @see rte_eth_rx_queue_setup()
5587 	 */
5588 	RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL = RTE_BIT32(1),
5589 };
5590 
5591 /**
5592  * @warning
5593  * @b EXPERIMENTAL: this structure may change, or be removed, without prior notice
5594  *
5595  * A structure used to retrieve information of ethdev congestion management.
5596  */
5597 struct rte_eth_cman_info {
5598 	/**
5599 	 * Set of supported congestion management modes
5600 	 * @see enum rte_cman_mode
5601 	 */
5602 	uint64_t modes_supported;
5603 	/**
5604 	 * Set of supported congestion management objects
5605 	 * @see enum rte_eth_cman_obj
5606 	 */
5607 	uint64_t objs_supported;
5608 	/**
5609 	 * Reserved for future fields. Always returned as 0 when
5610 	 * rte_eth_cman_info_get() is invoked
5611 	 */
5612 	uint8_t rsvd[8];
5613 };
5614 
5615 /**
5616  * @warning
5617  * @b EXPERIMENTAL: this structure may change, or be removed, without prior notice
5618  *
5619  * A structure used to configure the ethdev congestion management.
5620  */
5621 struct rte_eth_cman_config {
5622 	/** Congestion management object */
5623 	enum rte_eth_cman_obj obj;
5624 	/** Congestion management mode */
5625 	enum rte_cman_mode mode;
5626 	union {
5627 		/**
5628 		 * Rx queue to configure congestion management.
5629 		 *
5630 		 * Valid when object is RTE_ETH_CMAN_OBJ_RX_QUEUE or
5631 		 * RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL.
5632 		 */
5633 		uint16_t rx_queue;
5634 		/**
5635 		 * Reserved for future fields.
5636 		 * It must be set to 0 when rte_eth_cman_config_set() is invoked
5637 		 * and will be returned as 0 when rte_eth_cman_config_get() is
5638 		 * invoked.
5639 		 */
5640 		uint8_t rsvd_obj_params[4];
5641 	} obj_param;
5642 	union {
5643 		/**
5644 		 * RED configuration parameters.
5645 		 *
5646 		 * Valid when mode is RTE_CMAN_RED.
5647 		 */
5648 		struct rte_cman_red_params red;
5649 		/**
5650 		 * Reserved for future fields.
5651 		 * It must be set to 0 when rte_eth_cman_config_set() is invoked
5652 		 * and will be returned as 0 when rte_eth_cman_config_get() is
5653 		 * invoked.
5654 		 */
5655 		uint8_t rsvd_mode_params[4];
5656 	} mode_param;
5657 };
5658 
5659 /**
5660  * @warning
5661  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5662  *
5663  * Retrieve the information for ethdev congestion management
5664  *
5665  * @param port_id
5666  *   The port identifier of the Ethernet device.
5667  * @param info
5668  *   A pointer to a structure of type *rte_eth_cman_info* to be filled with
5669  *   the information about congestion management.
5670  * @return
5671  *   - (0) if successful.
5672  *   - (-ENOTSUP) if support for cman_info_get does not exist.
5673  *   - (-ENODEV) if *port_id* invalid.
5674  *   - (-EINVAL) if bad parameter.
5675  */
5676 __rte_experimental
5677 int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info);
5678 
5679 /**
5680  * @warning
5681  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5682  *
5683  * Initialize the ethdev congestion management configuration structure with default values.
5684  *
5685  * @param port_id
5686  *   The port identifier of the Ethernet device.
5687  * @param config
5688  *   A pointer to a structure of type *rte_eth_cman_config* to be initialized
5689  *   with default value.
5690  * @return
5691  *   - (0) if successful.
5692  *   - (-ENOTSUP) if support for cman_config_init does not exist.
5693  *   - (-ENODEV) if *port_id* invalid.
5694  *   - (-EINVAL) if bad parameter.
5695  */
5696 __rte_experimental
5697 int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config);
5698 
5699 /**
5700  * @warning
5701  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5702  *
5703  * Configure ethdev congestion management
5704  *
5705  * @param port_id
5706  *   The port identifier of the Ethernet device.
5707  * @param config
5708  *   A pointer to a structure of type *rte_eth_cman_config* to be configured.
5709  * @return
5710  *   - (0) if successful.
5711  *   - (-ENOTSUP) if support for cman_config_set does not exist.
5712  *   - (-ENODEV) if *port_id* invalid.
5713  *   - (-EINVAL) if bad parameter.
5714  */
5715 __rte_experimental
5716 int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config);
5717 
5718 /**
5719  * @warning
5720  * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
5721  *
5722  * Retrieve the applied ethdev congestion management parameters for the given port.
5723  *
5724  * @param port_id
5725  *   The port identifier of the Ethernet device.
5726  * @param config
5727  *   A pointer to a structure of type *rte_eth_cman_config* to retrieve
5728  *   congestion management parameters for the given object.
5729  *   Application must fill all parameters except mode_param parameter in
5730  *   struct rte_eth_cman_config.
5731  *
5732  * @return
5733  *   - (0) if successful.
5734  *   - (-ENOTSUP) if support for cman_config_get does not exist.
5735  *   - (-ENODEV) if *port_id* invalid.
5736  *   - (-EINVAL) if bad parameter.
5737  */
5738 __rte_experimental
5739 int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config);
5740 
5741 #include <rte_ethdev_core.h>
5742 
5743 /**
5744  * @internal
5745  * Helper routine for rte_eth_rx_burst().
5746  * Should be called at exit from PMD's rte_eth_rx_bulk implementation.
5747  * Does necessary post-processing - invokes Rx callbacks if any, etc.
5748  *
5749  * @param port_id
5750  *  The port identifier of the Ethernet device.
5751  * @param queue_id
5752  *  The index of the receive queue from which to retrieve input packets.
5753  * @param rx_pkts
5754  *   The address of an array of pointers to *rte_mbuf* structures that
5755  *   have been retrieved from the device.
5756  * @param nb_rx
5757  *   The number of packets that were retrieved from the device.
5758  * @param nb_pkts
5759  *   The number of elements in @p rx_pkts array.
5760  * @param opaque
5761  *   Opaque pointer of Rx queue callback related data.
5762  *
5763  * @return
5764  *  The number of packets effectively supplied to the @p rx_pkts array.
5765  */
5766 uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
5767 		struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
5768 		void *opaque);
5769 
5770 /**
5771  *
5772  * Retrieve a burst of input packets from a receive queue of an Ethernet
5773  * device. The retrieved packets are stored in *rte_mbuf* structures whose
5774  * pointers are supplied in the *rx_pkts* array.
5775  *
5776  * The rte_eth_rx_burst() function loops, parsing the Rx ring of the
5777  * receive queue, up to *nb_pkts* packets, and for each completed Rx
5778  * descriptor in the ring, it performs the following operations:
5779  *
5780  * - Initialize the *rte_mbuf* data structure associated with the
5781  *   Rx descriptor according to the information provided by the NIC into
5782  *   that Rx descriptor.
5783  *
5784  * - Store the *rte_mbuf* data structure into the next entry of the
5785  *   *rx_pkts* array.
5786  *
5787  * - Replenish the Rx descriptor with a new *rte_mbuf* buffer
5788  *   allocated from the memory pool associated with the receive queue at
5789  *   initialization time.
5790  *
5791  * When retrieving an input packet that was scattered by the controller
5792  * into multiple receive descriptors, the rte_eth_rx_burst() function
5793  * appends the associated *rte_mbuf* buffers to the first buffer of the
5794  * packet.
5795  *
5796  * The rte_eth_rx_burst() function returns the number of packets
5797  * actually retrieved, which is the number of *rte_mbuf* data structures
5798  * effectively supplied into the *rx_pkts* array.
5799  * A return value equal to *nb_pkts* indicates that the Rx queue contained
5800  * at least *rx_pkts* packets, and this is likely to signify that other
5801  * received packets remain in the input queue. Applications implementing
5802  * a "retrieve as much received packets as possible" policy can check this
5803  * specific case and keep invoking the rte_eth_rx_burst() function until
5804  * a value less than *nb_pkts* is returned.
5805  *
5806  * This receive method has the following advantages:
5807  *
5808  * - It allows a run-to-completion network stack engine to retrieve and
5809  *   to immediately process received packets in a fast burst-oriented
5810  *   approach, avoiding the overhead of unnecessary intermediate packet
5811  *   queue/dequeue operations.
5812  *
5813  * - Conversely, it also allows an asynchronous-oriented processing
5814  *   method to retrieve bursts of received packets and to immediately
5815  *   queue them for further parallel processing by another logical core,
5816  *   for instance. However, instead of having received packets being
5817  *   individually queued by the driver, this approach allows the caller
5818  *   of the rte_eth_rx_burst() function to queue a burst of retrieved
5819  *   packets at a time and therefore dramatically reduce the cost of
5820  *   enqueue/dequeue operations per packet.
5821  *
5822  * - It allows the rte_eth_rx_burst() function of the driver to take
5823  *   advantage of burst-oriented hardware features (CPU cache,
5824  *   prefetch instructions, and so on) to minimize the number of CPU
5825  *   cycles per packet.
5826  *
5827  * To summarize, the proposed receive API enables many
5828  * burst-oriented optimizations in both synchronous and asynchronous
5829  * packet processing environments with no overhead in both cases.
5830  *
5831  * @note
5832  *   Some drivers using vector instructions require that *nb_pkts* is
5833  *   divisible by 4 or 8, depending on the driver implementation.
5834  *
5835  * The rte_eth_rx_burst() function does not provide any error
5836  * notification to avoid the corresponding overhead. As a hint, the
5837  * upper-level application might check the status of the device link once
5838  * being systematically returned a 0 value for a given number of tries.
5839  *
5840  * @param port_id
5841  *   The port identifier of the Ethernet device.
5842  * @param queue_id
5843  *   The index of the receive queue from which to retrieve input packets.
5844  *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
5845  *   to rte_eth_dev_configure().
5846  * @param rx_pkts
5847  *   The address of an array of pointers to *rte_mbuf* structures that
5848  *   must be large enough to store *nb_pkts* pointers in it.
5849  * @param nb_pkts
5850  *   The maximum number of packets to retrieve.
5851  *   The value must be divisible by 8 in order to work with any driver.
5852  * @return
5853  *   The number of packets actually retrieved, which is the number
5854  *   of pointers to *rte_mbuf* structures effectively supplied to the
5855  *   *rx_pkts* array.
5856  */
5857 static inline uint16_t
5858 rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
5859 		 struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
5860 {
5861 	uint16_t nb_rx;
5862 	struct rte_eth_fp_ops *p;
5863 	void *qd;
5864 
5865 #ifdef RTE_ETHDEV_DEBUG_RX
5866 	if (port_id >= RTE_MAX_ETHPORTS ||
5867 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
5868 		RTE_ETHDEV_LOG(ERR,
5869 			"Invalid port_id=%u or queue_id=%u\n",
5870 			port_id, queue_id);
5871 		return 0;
5872 	}
5873 #endif
5874 
5875 	/* fetch pointer to queue data */
5876 	p = &rte_eth_fp_ops[port_id];
5877 	qd = p->rxq.data[queue_id];
5878 
5879 #ifdef RTE_ETHDEV_DEBUG_RX
5880 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
5881 
5882 	if (qd == NULL) {
5883 		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u for port_id=%u\n",
5884 			queue_id, port_id);
5885 		return 0;
5886 	}
5887 #endif
5888 
5889 	nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
5890 
5891 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
5892 	{
5893 		void *cb;
5894 
5895 		/* __ATOMIC_RELEASE memory order was used when the
5896 		 * call back was inserted into the list.
5897 		 * Since there is a clear dependency between loading
5898 		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
5899 		 * not required.
5900 		 */
5901 		cb = __atomic_load_n((void **)&p->rxq.clbk[queue_id],
5902 				__ATOMIC_RELAXED);
5903 		if (unlikely(cb != NULL))
5904 			nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
5905 					rx_pkts, nb_rx, nb_pkts, cb);
5906 	}
5907 #endif
5908 
5909 	rte_ethdev_trace_rx_burst(port_id, queue_id, (void **)rx_pkts, nb_rx);
5910 	return nb_rx;
5911 }
5912 
5913 /**
5914  * Get the number of used descriptors of a Rx queue
5915  *
5916  * Since it's a dataplane function, no check is performed on port_id and
5917  * queue_id. The caller must therefore ensure that the port is enabled
5918  * and the queue is configured and running.
5919  *
5920  * @param port_id
5921  *  The port identifier of the Ethernet device.
5922  * @param queue_id
5923  *  The queue ID on the specific port.
5924  * @return
5925  *  The number of used descriptors in the specific queue, or:
5926  *   - (-ENODEV) if *port_id* is invalid.
5927  *   - (-EINVAL) if *queue_id* is invalid
5928  *   - (-ENOTSUP) if the device does not support this function
5929  */
5930 static inline int
5931 rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
5932 {
5933 	struct rte_eth_fp_ops *p;
5934 	void *qd;
5935 
5936 #ifdef RTE_ETHDEV_DEBUG_RX
5937 	if (port_id >= RTE_MAX_ETHPORTS ||
5938 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
5939 		RTE_ETHDEV_LOG(ERR,
5940 			"Invalid port_id=%u or queue_id=%u\n",
5941 			port_id, queue_id);
5942 		return -EINVAL;
5943 	}
5944 #endif
5945 
5946 	/* fetch pointer to queue data */
5947 	p = &rte_eth_fp_ops[port_id];
5948 	qd = p->rxq.data[queue_id];
5949 
5950 #ifdef RTE_ETHDEV_DEBUG_RX
5951 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5952 	if (qd == NULL)
5953 		return -EINVAL;
5954 #endif
5955 
5956 	if (*p->rx_queue_count == NULL)
5957 		return -ENOTSUP;
5958 	return (int)(*p->rx_queue_count)(qd);
5959 }
5960 
5961 /**@{@name Rx hardware descriptor states
5962  * @see rte_eth_rx_descriptor_status
5963  */
5964 #define RTE_ETH_RX_DESC_AVAIL    0 /**< Desc available for hw. */
5965 #define RTE_ETH_RX_DESC_DONE     1 /**< Desc done, filled by hw. */
5966 #define RTE_ETH_RX_DESC_UNAVAIL  2 /**< Desc used by driver or hw. */
5967 /**@}*/
5968 
5969 /**
5970  * Check the status of a Rx descriptor in the queue
5971  *
5972  * It should be called in a similar context than the Rx function:
5973  * - on a dataplane core
5974  * - not concurrently on the same queue
5975  *
5976  * Since it's a dataplane function, no check is performed on port_id and
5977  * queue_id. The caller must therefore ensure that the port is enabled
5978  * and the queue is configured and running.
5979  *
5980  * Note: accessing to a random descriptor in the ring may trigger cache
5981  * misses and have a performance impact.
5982  *
5983  * @param port_id
5984  *  A valid port identifier of the Ethernet device which.
5985  * @param queue_id
5986  *  A valid Rx queue identifier on this port.
5987  * @param offset
5988  *  The offset of the descriptor starting from tail (0 is the next
5989  *  packet to be received by the driver).
5990  *
5991  * @return
5992  *  - (RTE_ETH_RX_DESC_AVAIL): Descriptor is available for the hardware to
5993  *    receive a packet.
5994  *  - (RTE_ETH_RX_DESC_DONE): Descriptor is done, it is filled by hw, but
5995  *    not yet processed by the driver (i.e. in the receive queue).
5996  *  - (RTE_ETH_RX_DESC_UNAVAIL): Descriptor is unavailable, either hold by
5997  *    the driver and not yet returned to hw, or reserved by the hw.
5998  *  - (-EINVAL) bad descriptor offset.
5999  *  - (-ENOTSUP) if the device does not support this function.
6000  *  - (-ENODEV) bad port or queue (only if compiled with debug).
6001  */
6002 static inline int
6003 rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
6004 	uint16_t offset)
6005 {
6006 	struct rte_eth_fp_ops *p;
6007 	void *qd;
6008 
6009 #ifdef RTE_ETHDEV_DEBUG_RX
6010 	if (port_id >= RTE_MAX_ETHPORTS ||
6011 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6012 		RTE_ETHDEV_LOG(ERR,
6013 			"Invalid port_id=%u or queue_id=%u\n",
6014 			port_id, queue_id);
6015 		return -EINVAL;
6016 	}
6017 #endif
6018 
6019 	/* fetch pointer to queue data */
6020 	p = &rte_eth_fp_ops[port_id];
6021 	qd = p->rxq.data[queue_id];
6022 
6023 #ifdef RTE_ETHDEV_DEBUG_RX
6024 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6025 	if (qd == NULL)
6026 		return -ENODEV;
6027 #endif
6028 	if (*p->rx_descriptor_status == NULL)
6029 		return -ENOTSUP;
6030 	return (*p->rx_descriptor_status)(qd, offset);
6031 }
6032 
6033 /**@{@name Tx hardware descriptor states
6034  * @see rte_eth_tx_descriptor_status
6035  */
6036 #define RTE_ETH_TX_DESC_FULL    0 /**< Desc filled for hw, waiting xmit. */
6037 #define RTE_ETH_TX_DESC_DONE    1 /**< Desc done, packet is transmitted. */
6038 #define RTE_ETH_TX_DESC_UNAVAIL 2 /**< Desc used by driver or hw. */
6039 /**@}*/
6040 
6041 /**
6042  * Check the status of a Tx descriptor in the queue.
6043  *
6044  * It should be called in a similar context than the Tx function:
6045  * - on a dataplane core
6046  * - not concurrently on the same queue
6047  *
6048  * Since it's a dataplane function, no check is performed on port_id and
6049  * queue_id. The caller must therefore ensure that the port is enabled
6050  * and the queue is configured and running.
6051  *
6052  * Note: accessing to a random descriptor in the ring may trigger cache
6053  * misses and have a performance impact.
6054  *
6055  * @param port_id
6056  *  A valid port identifier of the Ethernet device which.
6057  * @param queue_id
6058  *  A valid Tx queue identifier on this port.
6059  * @param offset
6060  *  The offset of the descriptor starting from tail (0 is the place where
6061  *  the next packet will be send).
6062  *
6063  * @return
6064  *  - (RTE_ETH_TX_DESC_FULL) Descriptor is being processed by the hw, i.e.
6065  *    in the transmit queue.
6066  *  - (RTE_ETH_TX_DESC_DONE) Hardware is done with this descriptor, it can
6067  *    be reused by the driver.
6068  *  - (RTE_ETH_TX_DESC_UNAVAIL): Descriptor is unavailable, reserved by the
6069  *    driver or the hardware.
6070  *  - (-EINVAL) bad descriptor offset.
6071  *  - (-ENOTSUP) if the device does not support this function.
6072  *  - (-ENODEV) bad port or queue (only if compiled with debug).
6073  */
6074 static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
6075 	uint16_t queue_id, uint16_t offset)
6076 {
6077 	struct rte_eth_fp_ops *p;
6078 	void *qd;
6079 
6080 #ifdef RTE_ETHDEV_DEBUG_TX
6081 	if (port_id >= RTE_MAX_ETHPORTS ||
6082 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6083 		RTE_ETHDEV_LOG(ERR,
6084 			"Invalid port_id=%u or queue_id=%u\n",
6085 			port_id, queue_id);
6086 		return -EINVAL;
6087 	}
6088 #endif
6089 
6090 	/* fetch pointer to queue data */
6091 	p = &rte_eth_fp_ops[port_id];
6092 	qd = p->txq.data[queue_id];
6093 
6094 #ifdef RTE_ETHDEV_DEBUG_TX
6095 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6096 	if (qd == NULL)
6097 		return -ENODEV;
6098 #endif
6099 	if (*p->tx_descriptor_status == NULL)
6100 		return -ENOTSUP;
6101 	return (*p->tx_descriptor_status)(qd, offset);
6102 }
6103 
6104 /**
6105  * @internal
6106  * Helper routine for rte_eth_tx_burst().
6107  * Should be called before entry PMD's rte_eth_tx_bulk implementation.
6108  * Does necessary pre-processing - invokes Tx callbacks if any, etc.
6109  *
6110  * @param port_id
6111  *   The port identifier of the Ethernet device.
6112  * @param queue_id
6113  *   The index of the transmit queue through which output packets must be
6114  *   sent.
6115  * @param tx_pkts
6116  *   The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
6117  *   which contain the output packets.
6118  * @param nb_pkts
6119  *   The maximum number of packets to transmit.
6120  * @return
6121  *   The number of output packets to transmit.
6122  */
6123 uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6124 	struct rte_mbuf **tx_pkts, uint16_t nb_pkts, void *opaque);
6125 
6126 /**
6127  * Send a burst of output packets on a transmit queue of an Ethernet device.
6128  *
6129  * The rte_eth_tx_burst() function is invoked to transmit output packets
6130  * on the output queue *queue_id* of the Ethernet device designated by its
6131  * *port_id*.
6132  * The *nb_pkts* parameter is the number of packets to send which are
6133  * supplied in the *tx_pkts* array of *rte_mbuf* structures, each of them
6134  * allocated from a pool created with rte_pktmbuf_pool_create().
6135  * The rte_eth_tx_burst() function loops, sending *nb_pkts* packets,
6136  * up to the number of transmit descriptors available in the Tx ring of the
6137  * transmit queue.
6138  * For each packet to send, the rte_eth_tx_burst() function performs
6139  * the following operations:
6140  *
6141  * - Pick up the next available descriptor in the transmit ring.
6142  *
6143  * - Free the network buffer previously sent with that descriptor, if any.
6144  *
6145  * - Initialize the transmit descriptor with the information provided
6146  *   in the *rte_mbuf data structure.
6147  *
6148  * In the case of a segmented packet composed of a list of *rte_mbuf* buffers,
6149  * the rte_eth_tx_burst() function uses several transmit descriptors
6150  * of the ring.
6151  *
6152  * The rte_eth_tx_burst() function returns the number of packets it
6153  * actually sent. A return value equal to *nb_pkts* means that all packets
6154  * have been sent, and this is likely to signify that other output packets
6155  * could be immediately transmitted again. Applications that implement a
6156  * "send as many packets to transmit as possible" policy can check this
6157  * specific case and keep invoking the rte_eth_tx_burst() function until
6158  * a value less than *nb_pkts* is returned.
6159  *
6160  * It is the responsibility of the rte_eth_tx_burst() function to
6161  * transparently free the memory buffers of packets previously sent.
6162  * This feature is driven by the *tx_free_thresh* value supplied to the
6163  * rte_eth_dev_configure() function at device configuration time.
6164  * When the number of free Tx descriptors drops below this threshold, the
6165  * rte_eth_tx_burst() function must [attempt to] free the *rte_mbuf*  buffers
6166  * of those packets whose transmission was effectively completed.
6167  *
6168  * If the PMD is RTE_ETH_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
6169  * invoke this function concurrently on the same Tx queue without SW lock.
6170  * @see rte_eth_dev_info_get, struct rte_eth_txconf::offloads
6171  *
6172  * @see rte_eth_tx_prepare to perform some prior checks or adjustments
6173  * for offloads.
6174  *
6175  * @note This function must not modify mbufs (including packets data)
6176  * unless the refcnt is 1.
6177  * An exception is the bonding PMD, which does not have "Tx prepare" support,
6178  * in this case, mbufs may be modified.
6179  *
6180  * @param port_id
6181  *   The port identifier of the Ethernet device.
6182  * @param queue_id
6183  *   The index of the transmit queue through which output packets must be
6184  *   sent.
6185  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
6186  *   to rte_eth_dev_configure().
6187  * @param tx_pkts
6188  *   The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
6189  *   which contain the output packets.
6190  * @param nb_pkts
6191  *   The maximum number of packets to transmit.
6192  * @return
6193  *   The number of output packets actually stored in transmit descriptors of
6194  *   the transmit ring. The return value can be less than the value of the
6195  *   *tx_pkts* parameter when the transmit ring is full or has been filled up.
6196  */
6197 static inline uint16_t
6198 rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
6199 		 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6200 {
6201 	struct rte_eth_fp_ops *p;
6202 	void *qd;
6203 
6204 #ifdef RTE_ETHDEV_DEBUG_TX
6205 	if (port_id >= RTE_MAX_ETHPORTS ||
6206 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6207 		RTE_ETHDEV_LOG(ERR,
6208 			"Invalid port_id=%u or queue_id=%u\n",
6209 			port_id, queue_id);
6210 		return 0;
6211 	}
6212 #endif
6213 
6214 	/* fetch pointer to queue data */
6215 	p = &rte_eth_fp_ops[port_id];
6216 	qd = p->txq.data[queue_id];
6217 
6218 #ifdef RTE_ETHDEV_DEBUG_TX
6219 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6220 
6221 	if (qd == NULL) {
6222 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6223 			queue_id, port_id);
6224 		return 0;
6225 	}
6226 #endif
6227 
6228 #ifdef RTE_ETHDEV_RXTX_CALLBACKS
6229 	{
6230 		void *cb;
6231 
6232 		/* __ATOMIC_RELEASE memory order was used when the
6233 		 * call back was inserted into the list.
6234 		 * Since there is a clear dependency between loading
6235 		 * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
6236 		 * not required.
6237 		 */
6238 		cb = __atomic_load_n((void **)&p->txq.clbk[queue_id],
6239 				__ATOMIC_RELAXED);
6240 		if (unlikely(cb != NULL))
6241 			nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6242 					tx_pkts, nb_pkts, cb);
6243 	}
6244 #endif
6245 
6246 	nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6247 
6248 	rte_ethdev_trace_tx_burst(port_id, queue_id, (void **)tx_pkts, nb_pkts);
6249 	return nb_pkts;
6250 }
6251 
6252 /**
6253  * Process a burst of output packets on a transmit queue of an Ethernet device.
6254  *
6255  * The rte_eth_tx_prepare() function is invoked to prepare output packets to be
6256  * transmitted on the output queue *queue_id* of the Ethernet device designated
6257  * by its *port_id*.
6258  * The *nb_pkts* parameter is the number of packets to be prepared which are
6259  * supplied in the *tx_pkts* array of *rte_mbuf* structures, each of them
6260  * allocated from a pool created with rte_pktmbuf_pool_create().
6261  * For each packet to send, the rte_eth_tx_prepare() function performs
6262  * the following operations:
6263  *
6264  * - Check if packet meets devices requirements for Tx offloads.
6265  *
6266  * - Check limitations about number of segments.
6267  *
6268  * - Check additional requirements when debug is enabled.
6269  *
6270  * - Update and/or reset required checksums when Tx offload is set for packet.
6271  *
6272  * Since this function can modify packet data, provided mbufs must be safely
6273  * writable (e.g. modified data cannot be in shared segment).
6274  *
6275  * The rte_eth_tx_prepare() function returns the number of packets ready to be
6276  * sent. A return value equal to *nb_pkts* means that all packets are valid and
6277  * ready to be sent, otherwise stops processing on the first invalid packet and
6278  * leaves the rest packets untouched.
6279  *
6280  * When this functionality is not implemented in the driver, all packets are
6281  * are returned untouched.
6282  *
6283  * @param port_id
6284  *   The port identifier of the Ethernet device.
6285  *   The value must be a valid port ID.
6286  * @param queue_id
6287  *   The index of the transmit queue through which output packets must be
6288  *   sent.
6289  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
6290  *   to rte_eth_dev_configure().
6291  * @param tx_pkts
6292  *   The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
6293  *   which contain the output packets.
6294  * @param nb_pkts
6295  *   The maximum number of packets to process.
6296  * @return
6297  *   The number of packets correct and ready to be sent. The return value can be
6298  *   less than the value of the *tx_pkts* parameter when some packet doesn't
6299  *   meet devices requirements with rte_errno set appropriately:
6300  *   - EINVAL: offload flags are not correctly set
6301  *   - ENOTSUP: the offload feature is not supported by the hardware
6302  *   - ENODEV: if *port_id* is invalid (with debug enabled only)
6303  *
6304  */
6305 
6306 #ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6307 
6308 static inline uint16_t
6309 rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
6310 		struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6311 {
6312 	struct rte_eth_fp_ops *p;
6313 	void *qd;
6314 
6315 #ifdef RTE_ETHDEV_DEBUG_TX
6316 	if (port_id >= RTE_MAX_ETHPORTS ||
6317 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6318 		RTE_ETHDEV_LOG(ERR,
6319 			"Invalid port_id=%u or queue_id=%u\n",
6320 			port_id, queue_id);
6321 		rte_errno = ENODEV;
6322 		return 0;
6323 	}
6324 #endif
6325 
6326 	/* fetch pointer to queue data */
6327 	p = &rte_eth_fp_ops[port_id];
6328 	qd = p->txq.data[queue_id];
6329 
6330 #ifdef RTE_ETHDEV_DEBUG_TX
6331 	if (!rte_eth_dev_is_valid_port(port_id)) {
6332 		RTE_ETHDEV_LOG(ERR, "Invalid Tx port_id=%u\n", port_id);
6333 		rte_errno = ENODEV;
6334 		return 0;
6335 	}
6336 	if (qd == NULL) {
6337 		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
6338 			queue_id, port_id);
6339 		rte_errno = EINVAL;
6340 		return 0;
6341 	}
6342 #endif
6343 
6344 	if (!p->tx_pkt_prepare)
6345 		return nb_pkts;
6346 
6347 	return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6348 }
6349 
6350 #else
6351 
6352 /*
6353  * Native NOOP operation for compilation targets which doesn't require any
6354  * preparations steps, and functional NOOP may introduce unnecessary performance
6355  * drop.
6356  *
6357  * Generally this is not a good idea to turn it on globally and didn't should
6358  * be used if behavior of tx_preparation can change.
6359  */
6360 
6361 static inline uint16_t
6362 rte_eth_tx_prepare(__rte_unused uint16_t port_id,
6363 		__rte_unused uint16_t queue_id,
6364 		__rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6365 {
6366 	return nb_pkts;
6367 }
6368 
6369 #endif
6370 
6371 /**
6372  * Send any packets queued up for transmission on a port and HW queue
6373  *
6374  * This causes an explicit flush of packets previously buffered via the
6375  * rte_eth_tx_buffer() function. It returns the number of packets successfully
6376  * sent to the NIC, and calls the error callback for any unsent packets. Unless
6377  * explicitly set up otherwise, the default callback simply frees the unsent
6378  * packets back to the owning mempool.
6379  *
6380  * @param port_id
6381  *   The port identifier of the Ethernet device.
6382  * @param queue_id
6383  *   The index of the transmit queue through which output packets must be
6384  *   sent.
6385  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
6386  *   to rte_eth_dev_configure().
6387  * @param buffer
6388  *   Buffer of packets to be transmit.
6389  * @return
6390  *   The number of packets successfully sent to the Ethernet device. The error
6391  *   callback is called for any packets which could not be sent.
6392  */
6393 static inline uint16_t
6394 rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
6395 		struct rte_eth_dev_tx_buffer *buffer)
6396 {
6397 	uint16_t sent;
6398 	uint16_t to_send = buffer->length;
6399 
6400 	if (to_send == 0)
6401 		return 0;
6402 
6403 	sent = rte_eth_tx_burst(port_id, queue_id, buffer->pkts, to_send);
6404 
6405 	buffer->length = 0;
6406 
6407 	/* All packets sent, or to be dealt with by callback below */
6408 	if (unlikely(sent != to_send))
6409 		buffer->error_callback(&buffer->pkts[sent],
6410 				       (uint16_t)(to_send - sent),
6411 				       buffer->error_userdata);
6412 
6413 	return sent;
6414 }
6415 
6416 /**
6417  * Buffer a single packet for future transmission on a port and queue
6418  *
6419  * This function takes a single mbuf/packet and buffers it for later
6420  * transmission on the particular port and queue specified. Once the buffer is
6421  * full of packets, an attempt will be made to transmit all the buffered
6422  * packets. In case of error, where not all packets can be transmitted, a
6423  * callback is called with the unsent packets as a parameter. If no callback
6424  * is explicitly set up, the unsent packets are just freed back to the owning
6425  * mempool. The function returns the number of packets actually sent i.e.
6426  * 0 if no buffer flush occurred, otherwise the number of packets successfully
6427  * flushed
6428  *
6429  * @param port_id
6430  *   The port identifier of the Ethernet device.
6431  * @param queue_id
6432  *   The index of the transmit queue through which output packets must be
6433  *   sent.
6434  *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
6435  *   to rte_eth_dev_configure().
6436  * @param buffer
6437  *   Buffer used to collect packets to be sent.
6438  * @param tx_pkt
6439  *   Pointer to the packet mbuf to be sent.
6440  * @return
6441  *   0 = packet has been buffered for later transmission
6442  *   N > 0 = packet has been buffered, and the buffer was subsequently flushed,
6443  *     causing N packets to be sent, and the error callback to be called for
6444  *     the rest.
6445  */
6446 static __rte_always_inline uint16_t
6447 rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id,
6448 		struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
6449 {
6450 	buffer->pkts[buffer->length++] = tx_pkt;
6451 	if (buffer->length < buffer->size)
6452 		return 0;
6453 
6454 	return rte_eth_tx_buffer_flush(port_id, queue_id, buffer);
6455 }
6456 
6457 /**
6458  * @warning
6459  * @b EXPERIMENTAL: this API may change without prior notice
6460  *
6461  * Get supported header protocols to split on Rx.
6462  *
6463  * When a packet type is announced to be split,
6464  * it *must* be supported by the PMD.
6465  * For instance, if eth-ipv4, eth-ipv4-udp is announced,
6466  * the PMD must return the following packet types for these packets:
6467  * - Ether/IPv4             -> RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
6468  * - Ether/IPv4/UDP         -> RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP
6469  *
6470  * @param port_id
6471  *   The port identifier of the device.
6472  * @param[out] ptypes
6473  *   An array pointer to store supported protocol headers, allocated by caller.
6474  *   These ptypes are composed with RTE_PTYPE_*.
6475  * @param num
6476  *   Size of the array pointed by param ptypes.
6477  * @return
6478  *   - (>=0) Number of supported ptypes. If the number of types exceeds num,
6479  *           only num entries will be filled into the ptypes array,
6480  *           but the full count of supported ptypes will be returned.
6481  *   - (-ENOTSUP) if header protocol is not supported by device.
6482  *   - (-ENODEV) if *port_id* invalid.
6483  *   - (-EINVAL) if bad parameter.
6484  */
6485 __rte_experimental
6486 int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num);
6487 
6488 #ifdef __cplusplus
6489 }
6490 #endif
6491 
6492 #endif /* _RTE_ETHDEV_H_ */
6493