xref: /dpdk/lib/eventdev/rte_event_eth_rx_adapter.c (revision 952b24bd0475450e548d4aafae7d8cf48258402b)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  * Copyright(c) 2017 Intel Corporation.
399a2dd95SBruce Richardson  * All rights reserved.
499a2dd95SBruce Richardson  */
572b452c5SDmitry Kozlyuk #include <ctype.h>
672b452c5SDmitry Kozlyuk #include <stdlib.h>
72744cb6eSThomas Monjalon #include <pthread.h>
853b2eaa2SBruce Richardson #if defined(__linux__)
999a2dd95SBruce Richardson #include <sys/epoll.h>
1099a2dd95SBruce Richardson #endif
1199a2dd95SBruce Richardson #include <unistd.h>
1299a2dd95SBruce Richardson 
1399a2dd95SBruce Richardson #include <rte_cycles.h>
141c1abf17SThomas Monjalon #include <rte_thread.h>
1599a2dd95SBruce Richardson #include <rte_common.h>
161acb7f54SDavid Marchand #include <dev_driver.h>
1799a2dd95SBruce Richardson #include <rte_errno.h>
18f9bdee26SKonstantin Ananyev #include <ethdev_driver.h>
1999a2dd95SBruce Richardson #include <rte_log.h>
2099a2dd95SBruce Richardson #include <rte_malloc.h>
2199a2dd95SBruce Richardson #include <rte_service_component.h>
2299a2dd95SBruce Richardson #include <rte_thash.h>
2399a2dd95SBruce Richardson #include <rte_interrupts.h>
2483ab470dSGanapati Kundapura #include <rte_mbuf_dyn.h>
25814d0170SGanapati Kundapura #include <rte_telemetry.h>
2699a2dd95SBruce Richardson 
2799a2dd95SBruce Richardson #include "rte_eventdev.h"
2899a2dd95SBruce Richardson #include "eventdev_pmd.h"
29f26f2ca6SPavan Nikhilesh #include "eventdev_trace.h"
3099a2dd95SBruce Richardson #include "rte_event_eth_rx_adapter.h"
3199a2dd95SBruce Richardson 
3299a2dd95SBruce Richardson #define BATCH_SIZE		32
3399a2dd95SBruce Richardson #define BLOCK_CNT_THRESHOLD	10
348113fd15SGanapati Kundapura #define ETH_EVENT_BUFFER_SIZE	(6*BATCH_SIZE)
3599a2dd95SBruce Richardson #define MAX_VECTOR_SIZE		1024
3699a2dd95SBruce Richardson #define MIN_VECTOR_SIZE		4
3799a2dd95SBruce Richardson #define MAX_VECTOR_NS		1E9
3899a2dd95SBruce Richardson #define MIN_VECTOR_NS		1E5
3999a2dd95SBruce Richardson 
403716f521SNaga Harish K S V #define RXA_NB_RX_WORK_DEFAULT 128
413716f521SNaga Harish K S V 
4299a2dd95SBruce Richardson #define ETH_RX_ADAPTER_SERVICE_NAME_LEN	32
4399a2dd95SBruce Richardson #define ETH_RX_ADAPTER_MEM_NAME_LEN	32
4499a2dd95SBruce Richardson 
4599a2dd95SBruce Richardson #define RSS_KEY_SIZE	40
4699a2dd95SBruce Richardson /* value written to intr thread pipe to signal thread exit */
4799a2dd95SBruce Richardson #define ETH_BRIDGE_INTR_THREAD_EXIT	1
4899a2dd95SBruce Richardson /* Sentinel value to detect initialized file handle */
4999a2dd95SBruce Richardson #define INIT_FD		-1
5099a2dd95SBruce Richardson 
51da781e64SGanapati Kundapura #define RXA_ADAPTER_ARRAY "rte_event_eth_rx_adapter_array"
52da781e64SGanapati Kundapura 
5399a2dd95SBruce Richardson /*
5499a2dd95SBruce Richardson  * Used to store port and queue ID of interrupting Rx queue
5599a2dd95SBruce Richardson  */
5699a2dd95SBruce Richardson union queue_data {
5799a2dd95SBruce Richardson 	void *ptr;
5899a2dd95SBruce Richardson 	struct {
5999a2dd95SBruce Richardson 		uint16_t port;
6099a2dd95SBruce Richardson 		uint16_t queue;
6199a2dd95SBruce Richardson 	};
6299a2dd95SBruce Richardson };
6399a2dd95SBruce Richardson 
6499a2dd95SBruce Richardson /*
6599a2dd95SBruce Richardson  * There is an instance of this struct per polled Rx queue added to the
6699a2dd95SBruce Richardson  * adapter
6799a2dd95SBruce Richardson  */
6899a2dd95SBruce Richardson struct eth_rx_poll_entry {
6999a2dd95SBruce Richardson 	/* Eth port to poll */
7099a2dd95SBruce Richardson 	uint16_t eth_dev_id;
7199a2dd95SBruce Richardson 	/* Eth rx queue to poll */
7299a2dd95SBruce Richardson 	uint16_t eth_rx_qid;
7399a2dd95SBruce Richardson };
7499a2dd95SBruce Richardson 
75c6552d9aSTyler Retzlaff struct __rte_cache_aligned eth_rx_vector_data {
7699a2dd95SBruce Richardson 	TAILQ_ENTRY(eth_rx_vector_data) next;
7799a2dd95SBruce Richardson 	uint16_t port;
7899a2dd95SBruce Richardson 	uint16_t queue;
7999a2dd95SBruce Richardson 	uint16_t max_vector_count;
8099a2dd95SBruce Richardson 	uint64_t event;
8199a2dd95SBruce Richardson 	uint64_t ts;
8299a2dd95SBruce Richardson 	uint64_t vector_timeout_ticks;
8399a2dd95SBruce Richardson 	struct rte_mempool *vector_pool;
8499a2dd95SBruce Richardson 	struct rte_event_vector *vector_ev;
85c6552d9aSTyler Retzlaff };
8699a2dd95SBruce Richardson 
8799a2dd95SBruce Richardson TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
8899a2dd95SBruce Richardson 
8999a2dd95SBruce Richardson /* Instance per adapter */
90a256a743SPavan Nikhilesh struct eth_event_enqueue_buffer {
9199a2dd95SBruce Richardson 	/* Count of events in this buffer */
9299a2dd95SBruce Richardson 	uint16_t count;
9399a2dd95SBruce Richardson 	/* Array of events in this buffer */
94bc0df25cSNaga Harish K S V 	struct rte_event *events;
95bc0df25cSNaga Harish K S V 	/* size of event buffer */
96bc0df25cSNaga Harish K S V 	uint16_t events_size;
978113fd15SGanapati Kundapura 	/* Event enqueue happens from head */
988113fd15SGanapati Kundapura 	uint16_t head;
998113fd15SGanapati Kundapura 	/* New packets from rte_eth_rx_burst is enqued from tail */
1008113fd15SGanapati Kundapura 	uint16_t tail;
1018113fd15SGanapati Kundapura 	/* last element in the buffer before rollover */
1028113fd15SGanapati Kundapura 	uint16_t last;
1038113fd15SGanapati Kundapura 	uint16_t last_mask;
10499a2dd95SBruce Richardson };
10599a2dd95SBruce Richardson 
106c6552d9aSTyler Retzlaff struct __rte_cache_aligned event_eth_rx_adapter {
10799a2dd95SBruce Richardson 	/* RSS key */
10899a2dd95SBruce Richardson 	uint8_t rss_key_be[RSS_KEY_SIZE];
10999a2dd95SBruce Richardson 	/* Event device identifier */
11099a2dd95SBruce Richardson 	uint8_t eventdev_id;
11199a2dd95SBruce Richardson 	/* Event port identifier */
11299a2dd95SBruce Richardson 	uint8_t event_port_id;
113b06bca69SNaga Harish K S V 	/* Flag indicating per rxq event buffer */
114b06bca69SNaga Harish K S V 	bool use_queue_event_buf;
115b06bca69SNaga Harish K S V 	/* Per ethernet device structure */
116b06bca69SNaga Harish K S V 	struct eth_device_info *eth_devices;
11799a2dd95SBruce Richardson 	/* Lock to serialize config updates with service function */
11899a2dd95SBruce Richardson 	rte_spinlock_t rx_lock;
11999a2dd95SBruce Richardson 	/* Max mbufs processed in any service function invocation */
12099a2dd95SBruce Richardson 	uint32_t max_nb_rx;
12199a2dd95SBruce Richardson 	/* Receive queues that need to be polled */
12299a2dd95SBruce Richardson 	struct eth_rx_poll_entry *eth_rx_poll;
12399a2dd95SBruce Richardson 	/* Size of the eth_rx_poll array */
12499a2dd95SBruce Richardson 	uint16_t num_rx_polled;
12599a2dd95SBruce Richardson 	/* Weighted round robin schedule */
12699a2dd95SBruce Richardson 	uint32_t *wrr_sched;
12799a2dd95SBruce Richardson 	/* wrr_sched[] size */
12899a2dd95SBruce Richardson 	uint32_t wrr_len;
12999a2dd95SBruce Richardson 	/* Next entry in wrr[] to begin polling */
13099a2dd95SBruce Richardson 	uint32_t wrr_pos;
13199a2dd95SBruce Richardson 	/* Event burst buffer */
132a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer event_enqueue_buffer;
13399a2dd95SBruce Richardson 	/* Vector enable flag */
13499a2dd95SBruce Richardson 	uint8_t ena_vector;
13599a2dd95SBruce Richardson 	/* Timestamp of previous vector expiry list traversal */
13699a2dd95SBruce Richardson 	uint64_t prev_expiry_ts;
13799a2dd95SBruce Richardson 	/* Minimum ticks to wait before traversing expiry list */
13899a2dd95SBruce Richardson 	uint64_t vector_tmo_ticks;
13999a2dd95SBruce Richardson 	/* vector list */
14099a2dd95SBruce Richardson 	struct eth_rx_vector_data_list vector_list;
14199a2dd95SBruce Richardson 	/* Per adapter stats */
14299a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_stats stats;
14399a2dd95SBruce Richardson 	/* Block count, counts up to BLOCK_CNT_THRESHOLD */
14499a2dd95SBruce Richardson 	uint16_t enq_block_count;
14599a2dd95SBruce Richardson 	/* Block start ts */
14699a2dd95SBruce Richardson 	uint64_t rx_enq_block_start_ts;
14799a2dd95SBruce Richardson 	/* epoll fd used to wait for Rx interrupts */
14899a2dd95SBruce Richardson 	int epd;
14999a2dd95SBruce Richardson 	/* Num of interrupt driven interrupt queues */
15099a2dd95SBruce Richardson 	uint32_t num_rx_intr;
15199a2dd95SBruce Richardson 	/* Used to send <dev id, queue id> of interrupting Rx queues from
15299a2dd95SBruce Richardson 	 * the interrupt thread to the Rx thread
15399a2dd95SBruce Richardson 	 */
15499a2dd95SBruce Richardson 	struct rte_ring *intr_ring;
15599a2dd95SBruce Richardson 	/* Rx Queue data (dev id, queue id) for the last non-empty
15699a2dd95SBruce Richardson 	 * queue polled
15799a2dd95SBruce Richardson 	 */
15899a2dd95SBruce Richardson 	union queue_data qd;
15999a2dd95SBruce Richardson 	/* queue_data is valid */
16099a2dd95SBruce Richardson 	int qd_valid;
16199a2dd95SBruce Richardson 	/* Interrupt ring lock, synchronizes Rx thread
16299a2dd95SBruce Richardson 	 * and interrupt thread
16399a2dd95SBruce Richardson 	 */
16499a2dd95SBruce Richardson 	rte_spinlock_t intr_ring_lock;
16599a2dd95SBruce Richardson 	/* event array passed to rte_poll_wait */
16699a2dd95SBruce Richardson 	struct rte_epoll_event *epoll_events;
16799a2dd95SBruce Richardson 	/* Count of interrupt vectors in use */
16899a2dd95SBruce Richardson 	uint32_t num_intr_vec;
16999a2dd95SBruce Richardson 	/* Thread blocked on Rx interrupts */
1701c1abf17SThomas Monjalon 	rte_thread_t rx_intr_thread;
17199a2dd95SBruce Richardson 	/* Configuration callback for rte_service configuration */
17299a2dd95SBruce Richardson 	rte_event_eth_rx_adapter_conf_cb conf_cb;
17399a2dd95SBruce Richardson 	/* Configuration callback argument */
17499a2dd95SBruce Richardson 	void *conf_arg;
17599a2dd95SBruce Richardson 	/* Set if  default_cb is being used */
17699a2dd95SBruce Richardson 	int default_cb_arg;
17799a2dd95SBruce Richardson 	/* Service initialization state */
17899a2dd95SBruce Richardson 	uint8_t service_inited;
17999a2dd95SBruce Richardson 	/* Total count of Rx queues in adapter */
18099a2dd95SBruce Richardson 	uint32_t nb_queues;
18199a2dd95SBruce Richardson 	/* Memory allocation name */
18299a2dd95SBruce Richardson 	char mem_name[ETH_RX_ADAPTER_MEM_NAME_LEN];
18399a2dd95SBruce Richardson 	/* Socket identifier cached from eventdev */
18499a2dd95SBruce Richardson 	int socket_id;
18599a2dd95SBruce Richardson 	/* Per adapter EAL service */
18699a2dd95SBruce Richardson 	uint32_t service_id;
18799a2dd95SBruce Richardson 	/* Adapter started flag */
18899a2dd95SBruce Richardson 	uint8_t rxa_started;
18999a2dd95SBruce Richardson 	/* Adapter ID */
19099a2dd95SBruce Richardson 	uint8_t id;
191c6552d9aSTyler Retzlaff };
19299a2dd95SBruce Richardson 
19399a2dd95SBruce Richardson /* Per eth device */
19499a2dd95SBruce Richardson struct eth_device_info {
19599a2dd95SBruce Richardson 	struct rte_eth_dev *dev;
19699a2dd95SBruce Richardson 	struct eth_rx_queue_info *rx_queue;
19799a2dd95SBruce Richardson 	/* Rx callback */
19899a2dd95SBruce Richardson 	rte_event_eth_rx_adapter_cb_fn cb_fn;
19999a2dd95SBruce Richardson 	/* Rx callback argument */
20099a2dd95SBruce Richardson 	void *cb_arg;
20199a2dd95SBruce Richardson 	/* Set if ethdev->eventdev packet transfer uses a
20299a2dd95SBruce Richardson 	 * hardware mechanism
20399a2dd95SBruce Richardson 	 */
20499a2dd95SBruce Richardson 	uint8_t internal_event_port;
20599a2dd95SBruce Richardson 	/* Set if the adapter is processing rx queues for
20699a2dd95SBruce Richardson 	 * this eth device and packet processing has been
20799a2dd95SBruce Richardson 	 * started, allows for the code to know if the PMD
20899a2dd95SBruce Richardson 	 * rx_adapter_stop callback needs to be invoked
20999a2dd95SBruce Richardson 	 */
21099a2dd95SBruce Richardson 	uint8_t dev_rx_started;
21199a2dd95SBruce Richardson 	/* Number of queues added for this device */
21299a2dd95SBruce Richardson 	uint16_t nb_dev_queues;
21399a2dd95SBruce Richardson 	/* Number of poll based queues
21499a2dd95SBruce Richardson 	 * If nb_rx_poll > 0, the start callback will
21599a2dd95SBruce Richardson 	 * be invoked if not already invoked
21699a2dd95SBruce Richardson 	 */
21799a2dd95SBruce Richardson 	uint16_t nb_rx_poll;
21899a2dd95SBruce Richardson 	/* Number of interrupt based queues
21999a2dd95SBruce Richardson 	 * If nb_rx_intr > 0, the start callback will
22099a2dd95SBruce Richardson 	 * be invoked if not already invoked.
22199a2dd95SBruce Richardson 	 */
22299a2dd95SBruce Richardson 	uint16_t nb_rx_intr;
22399a2dd95SBruce Richardson 	/* Number of queues that use the shared interrupt */
22499a2dd95SBruce Richardson 	uint16_t nb_shared_intr;
22599a2dd95SBruce Richardson 	/* sum(wrr(q)) for all queues within the device
22699a2dd95SBruce Richardson 	 * useful when deleting all device queues
22799a2dd95SBruce Richardson 	 */
22899a2dd95SBruce Richardson 	uint32_t wrr_len;
22999a2dd95SBruce Richardson 	/* Intr based queue index to start polling from, this is used
23099a2dd95SBruce Richardson 	 * if the number of shared interrupts is non-zero
23199a2dd95SBruce Richardson 	 */
23299a2dd95SBruce Richardson 	uint16_t next_q_idx;
23399a2dd95SBruce Richardson 	/* Intr based queue indices */
23499a2dd95SBruce Richardson 	uint16_t *intr_queue;
23599a2dd95SBruce Richardson 	/* device generates per Rx queue interrupt for queue index
23699a2dd95SBruce Richardson 	 * for queue indices < RTE_MAX_RXTX_INTR_VEC_ID - 1
23799a2dd95SBruce Richardson 	 */
23899a2dd95SBruce Richardson 	int multi_intr_cap;
23999a2dd95SBruce Richardson 	/* shared interrupt enabled */
24099a2dd95SBruce Richardson 	int shared_intr_enabled;
24199a2dd95SBruce Richardson };
24299a2dd95SBruce Richardson 
24399a2dd95SBruce Richardson /* Per Rx queue */
24499a2dd95SBruce Richardson struct eth_rx_queue_info {
24599a2dd95SBruce Richardson 	int queue_enabled;	/* True if added */
24699a2dd95SBruce Richardson 	int intr_enabled;
24799a2dd95SBruce Richardson 	uint8_t ena_vector;
24899a2dd95SBruce Richardson 	uint16_t wt;		/* Polling weight */
24999a2dd95SBruce Richardson 	uint32_t flow_id_mask;	/* Set to ~0 if app provides flow id else 0 */
25099a2dd95SBruce Richardson 	uint64_t event;
25199a2dd95SBruce Richardson 	struct eth_rx_vector_data vector_data;
252a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer *event_buf;
253995b150cSNaga Harish K S V 	/* use adapter stats struct for queue level stats,
254995b150cSNaga Harish K S V 	 * as same stats need to be updated for adapter and queue
255995b150cSNaga Harish K S V 	 */
256995b150cSNaga Harish K S V 	struct rte_event_eth_rx_adapter_stats *stats;
25799a2dd95SBruce Richardson };
25899a2dd95SBruce Richardson 
259a256a743SPavan Nikhilesh static struct event_eth_rx_adapter **event_eth_rx_adapter;
26099a2dd95SBruce Richardson 
26183ab470dSGanapati Kundapura /* Enable dynamic timestamp field in mbuf */
26283ab470dSGanapati Kundapura static uint64_t event_eth_rx_timestamp_dynflag;
26383ab470dSGanapati Kundapura static int event_eth_rx_timestamp_dynfield_offset = -1;
26483ab470dSGanapati Kundapura 
26583ab470dSGanapati Kundapura static inline rte_mbuf_timestamp_t *
26683ab470dSGanapati Kundapura rxa_timestamp_dynfield(struct rte_mbuf *mbuf)
26783ab470dSGanapati Kundapura {
26883ab470dSGanapati Kundapura 	return RTE_MBUF_DYNFIELD(mbuf,
26983ab470dSGanapati Kundapura 		event_eth_rx_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
27083ab470dSGanapati Kundapura }
27183ab470dSGanapati Kundapura 
27299a2dd95SBruce Richardson static inline int
27399a2dd95SBruce Richardson rxa_validate_id(uint8_t id)
27499a2dd95SBruce Richardson {
27599a2dd95SBruce Richardson 	return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
27699a2dd95SBruce Richardson }
27799a2dd95SBruce Richardson 
278a256a743SPavan Nikhilesh static inline struct eth_event_enqueue_buffer *
279a256a743SPavan Nikhilesh rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
280995b150cSNaga Harish K S V 		  uint16_t rx_queue_id,
281995b150cSNaga Harish K S V 		  struct rte_event_eth_rx_adapter_stats **stats)
282b06bca69SNaga Harish K S V {
283b06bca69SNaga Harish K S V 	if (rx_adapter->use_queue_event_buf) {
284b06bca69SNaga Harish K S V 		struct eth_device_info *dev_info =
285b06bca69SNaga Harish K S V 			&rx_adapter->eth_devices[eth_dev_id];
286995b150cSNaga Harish K S V 		*stats = dev_info->rx_queue[rx_queue_id].stats;
287b06bca69SNaga Harish K S V 		return dev_info->rx_queue[rx_queue_id].event_buf;
288995b150cSNaga Harish K S V 	} else {
289995b150cSNaga Harish K S V 		*stats = &rx_adapter->stats;
290b06bca69SNaga Harish K S V 		return &rx_adapter->event_enqueue_buffer;
291b06bca69SNaga Harish K S V 	}
292995b150cSNaga Harish K S V }
293b06bca69SNaga Harish K S V 
29499a2dd95SBruce Richardson #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
29599a2dd95SBruce Richardson 	if (!rxa_validate_id(id)) { \
296ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d", id); \
29799a2dd95SBruce Richardson 		return retval; \
29899a2dd95SBruce Richardson 	} \
29999a2dd95SBruce Richardson } while (0)
30099a2dd95SBruce Richardson 
30174b034ffSWeiguo Li #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(id, retval) do { \
30274b034ffSWeiguo Li 	if (!rxa_validate_id(id)) { \
303ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d", id); \
30474b034ffSWeiguo Li 		ret = retval; \
30574b034ffSWeiguo Li 		goto error; \
30674b034ffSWeiguo Li 	} \
30774b034ffSWeiguo Li } while (0)
30874b034ffSWeiguo Li 
30974b034ffSWeiguo Li #define RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, retval) do { \
31074b034ffSWeiguo Li 	if ((token) == NULL || strlen(token) == 0 || !isdigit(*token)) { \
311ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("Invalid eth Rx adapter token"); \
31274b034ffSWeiguo Li 		ret = retval; \
31374b034ffSWeiguo Li 		goto error; \
31474b034ffSWeiguo Li 	} \
31574b034ffSWeiguo Li } while (0)
31674b034ffSWeiguo Li 
317c07da8e3SDavid Marchand #define RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(port_id, retval) do { \
31874b034ffSWeiguo Li 	if (!rte_eth_dev_is_valid_port(port_id)) { \
319c07da8e3SDavid Marchand 		RTE_EDEV_LOG_ERR("Invalid port_id=%u", port_id); \
32074b034ffSWeiguo Li 		ret = retval; \
32174b034ffSWeiguo Li 		goto error; \
32274b034ffSWeiguo Li 	} \
32374b034ffSWeiguo Li } while (0)
32474b034ffSWeiguo Li 
32599a2dd95SBruce Richardson static inline int
326a256a743SPavan Nikhilesh rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
32799a2dd95SBruce Richardson {
32899a2dd95SBruce Richardson 	return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
32999a2dd95SBruce Richardson }
33099a2dd95SBruce Richardson 
33199a2dd95SBruce Richardson /* Greatest common divisor */
33299a2dd95SBruce Richardson static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
33399a2dd95SBruce Richardson {
33499a2dd95SBruce Richardson 	uint16_t r = a % b;
33599a2dd95SBruce Richardson 
33699a2dd95SBruce Richardson 	return r ? rxa_gcd_u16(b, r) : b;
33799a2dd95SBruce Richardson }
33899a2dd95SBruce Richardson 
33999a2dd95SBruce Richardson /* Returns the next queue in the polling sequence
34099a2dd95SBruce Richardson  *
34199a2dd95SBruce Richardson  * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
34299a2dd95SBruce Richardson  */
34399a2dd95SBruce Richardson static int
344a256a743SPavan Nikhilesh rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
34599a2dd95SBruce Richardson 	     struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
34699a2dd95SBruce Richardson 	     uint16_t gcd, int prev)
34799a2dd95SBruce Richardson {
34899a2dd95SBruce Richardson 	int i = prev;
34999a2dd95SBruce Richardson 	uint16_t w;
35099a2dd95SBruce Richardson 
35199a2dd95SBruce Richardson 	while (1) {
35299a2dd95SBruce Richardson 		uint16_t q;
35399a2dd95SBruce Richardson 		uint16_t d;
35499a2dd95SBruce Richardson 
35599a2dd95SBruce Richardson 		i = (i + 1) % n;
35699a2dd95SBruce Richardson 		if (i == 0) {
35799a2dd95SBruce Richardson 			*cw = *cw - gcd;
35899a2dd95SBruce Richardson 			if (*cw <= 0)
35999a2dd95SBruce Richardson 				*cw = max_wt;
36099a2dd95SBruce Richardson 		}
36199a2dd95SBruce Richardson 
36299a2dd95SBruce Richardson 		q = eth_rx_poll[i].eth_rx_qid;
36399a2dd95SBruce Richardson 		d = eth_rx_poll[i].eth_dev_id;
36499a2dd95SBruce Richardson 		w = rx_adapter->eth_devices[d].rx_queue[q].wt;
36599a2dd95SBruce Richardson 
36699a2dd95SBruce Richardson 		if ((int)w >= *cw)
36799a2dd95SBruce Richardson 			return i;
36899a2dd95SBruce Richardson 	}
36999a2dd95SBruce Richardson }
37099a2dd95SBruce Richardson 
37199a2dd95SBruce Richardson static inline int
37299a2dd95SBruce Richardson rxa_shared_intr(struct eth_device_info *dev_info,
37399a2dd95SBruce Richardson 	int rx_queue_id)
37499a2dd95SBruce Richardson {
37599a2dd95SBruce Richardson 	int multi_intr_cap;
37699a2dd95SBruce Richardson 
37799a2dd95SBruce Richardson 	if (dev_info->dev->intr_handle == NULL)
37899a2dd95SBruce Richardson 		return 0;
37999a2dd95SBruce Richardson 
38099a2dd95SBruce Richardson 	multi_intr_cap = rte_intr_cap_multiple(dev_info->dev->intr_handle);
38199a2dd95SBruce Richardson 	return !multi_intr_cap ||
38299a2dd95SBruce Richardson 		rx_queue_id >= RTE_MAX_RXTX_INTR_VEC_ID - 1;
38399a2dd95SBruce Richardson }
38499a2dd95SBruce Richardson 
38599a2dd95SBruce Richardson static inline int
38699a2dd95SBruce Richardson rxa_intr_queue(struct eth_device_info *dev_info,
38799a2dd95SBruce Richardson 	int rx_queue_id)
38899a2dd95SBruce Richardson {
38999a2dd95SBruce Richardson 	struct eth_rx_queue_info *queue_info;
39099a2dd95SBruce Richardson 
39199a2dd95SBruce Richardson 	queue_info = &dev_info->rx_queue[rx_queue_id];
39299a2dd95SBruce Richardson 	return dev_info->rx_queue &&
39399a2dd95SBruce Richardson 		!dev_info->internal_event_port &&
39499a2dd95SBruce Richardson 		queue_info->queue_enabled && queue_info->wt == 0;
39599a2dd95SBruce Richardson }
39699a2dd95SBruce Richardson 
39799a2dd95SBruce Richardson static inline int
39899a2dd95SBruce Richardson rxa_polled_queue(struct eth_device_info *dev_info,
39999a2dd95SBruce Richardson 	int rx_queue_id)
40099a2dd95SBruce Richardson {
40199a2dd95SBruce Richardson 	struct eth_rx_queue_info *queue_info;
40299a2dd95SBruce Richardson 
40399a2dd95SBruce Richardson 	queue_info = &dev_info->rx_queue[rx_queue_id];
40499a2dd95SBruce Richardson 	return !dev_info->internal_event_port &&
40599a2dd95SBruce Richardson 		dev_info->rx_queue &&
40699a2dd95SBruce Richardson 		queue_info->queue_enabled && queue_info->wt != 0;
40799a2dd95SBruce Richardson }
40899a2dd95SBruce Richardson 
40999a2dd95SBruce Richardson /* Calculate change in number of vectors after Rx queue ID is add/deleted */
41099a2dd95SBruce Richardson static int
41199a2dd95SBruce Richardson rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
41299a2dd95SBruce Richardson {
41399a2dd95SBruce Richardson 	uint16_t i;
41499a2dd95SBruce Richardson 	int n, s;
41599a2dd95SBruce Richardson 	uint16_t nbq;
41699a2dd95SBruce Richardson 
41799a2dd95SBruce Richardson 	nbq = dev_info->dev->data->nb_rx_queues;
41899a2dd95SBruce Richardson 	n = 0; /* non shared count */
41999a2dd95SBruce Richardson 	s = 0; /* shared count */
42099a2dd95SBruce Richardson 
42199a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
42299a2dd95SBruce Richardson 		for (i = 0; i < nbq; i++) {
42399a2dd95SBruce Richardson 			if (!rxa_shared_intr(dev_info, i))
42499a2dd95SBruce Richardson 				n += add ? !rxa_intr_queue(dev_info, i) :
42599a2dd95SBruce Richardson 					rxa_intr_queue(dev_info, i);
42699a2dd95SBruce Richardson 			else
42799a2dd95SBruce Richardson 				s += add ? !rxa_intr_queue(dev_info, i) :
42899a2dd95SBruce Richardson 					rxa_intr_queue(dev_info, i);
42999a2dd95SBruce Richardson 		}
43099a2dd95SBruce Richardson 
43199a2dd95SBruce Richardson 		if (s > 0) {
43299a2dd95SBruce Richardson 			if ((add && dev_info->nb_shared_intr == 0) ||
43399a2dd95SBruce Richardson 				(!add && dev_info->nb_shared_intr))
43499a2dd95SBruce Richardson 				n += 1;
43599a2dd95SBruce Richardson 		}
43699a2dd95SBruce Richardson 	} else {
43799a2dd95SBruce Richardson 		if (!rxa_shared_intr(dev_info, rx_queue_id))
43899a2dd95SBruce Richardson 			n = add ? !rxa_intr_queue(dev_info, rx_queue_id) :
43999a2dd95SBruce Richardson 				rxa_intr_queue(dev_info, rx_queue_id);
44099a2dd95SBruce Richardson 		else
44199a2dd95SBruce Richardson 			n = add ? !dev_info->nb_shared_intr :
44299a2dd95SBruce Richardson 				dev_info->nb_shared_intr == 1;
44399a2dd95SBruce Richardson 	}
44499a2dd95SBruce Richardson 
44599a2dd95SBruce Richardson 	return add ? n : -n;
44699a2dd95SBruce Richardson }
44799a2dd95SBruce Richardson 
44899a2dd95SBruce Richardson /* Calculate nb_rx_intr after deleting interrupt mode rx queues
44999a2dd95SBruce Richardson  */
45099a2dd95SBruce Richardson static void
451a256a743SPavan Nikhilesh rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
452a256a743SPavan Nikhilesh 			  struct eth_device_info *dev_info, int rx_queue_id,
45399a2dd95SBruce Richardson 			  uint32_t *nb_rx_intr)
45499a2dd95SBruce Richardson {
45599a2dd95SBruce Richardson 	uint32_t intr_diff;
45699a2dd95SBruce Richardson 
45799a2dd95SBruce Richardson 	if (rx_queue_id == -1)
45899a2dd95SBruce Richardson 		intr_diff = dev_info->nb_rx_intr;
45999a2dd95SBruce Richardson 	else
46099a2dd95SBruce Richardson 		intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
46199a2dd95SBruce Richardson 
46299a2dd95SBruce Richardson 	*nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
46399a2dd95SBruce Richardson }
46499a2dd95SBruce Richardson 
46599a2dd95SBruce Richardson /* Calculate nb_rx_* after adding interrupt mode rx queues, newly added
46699a2dd95SBruce Richardson  * interrupt queues could currently be poll mode Rx queues
46799a2dd95SBruce Richardson  */
46899a2dd95SBruce Richardson static void
469a256a743SPavan Nikhilesh rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
470a256a743SPavan Nikhilesh 			  struct eth_device_info *dev_info, int rx_queue_id,
471a256a743SPavan Nikhilesh 			  uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
47299a2dd95SBruce Richardson 			  uint32_t *nb_wrr)
47399a2dd95SBruce Richardson {
47499a2dd95SBruce Richardson 	uint32_t intr_diff;
47599a2dd95SBruce Richardson 	uint32_t poll_diff;
47699a2dd95SBruce Richardson 	uint32_t wrr_len_diff;
47799a2dd95SBruce Richardson 
47899a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
47999a2dd95SBruce Richardson 		intr_diff = dev_info->dev->data->nb_rx_queues -
48099a2dd95SBruce Richardson 						dev_info->nb_rx_intr;
48199a2dd95SBruce Richardson 		poll_diff = dev_info->nb_rx_poll;
48299a2dd95SBruce Richardson 		wrr_len_diff = dev_info->wrr_len;
48399a2dd95SBruce Richardson 	} else {
48499a2dd95SBruce Richardson 		intr_diff = !rxa_intr_queue(dev_info, rx_queue_id);
48599a2dd95SBruce Richardson 		poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
48699a2dd95SBruce Richardson 		wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
48799a2dd95SBruce Richardson 					0;
48899a2dd95SBruce Richardson 	}
48999a2dd95SBruce Richardson 
49099a2dd95SBruce Richardson 	*nb_rx_intr = rx_adapter->num_rx_intr + intr_diff;
49199a2dd95SBruce Richardson 	*nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
49299a2dd95SBruce Richardson 	*nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
49399a2dd95SBruce Richardson }
49499a2dd95SBruce Richardson 
49599a2dd95SBruce Richardson /* Calculate size of the eth_rx_poll and wrr_sched arrays
49699a2dd95SBruce Richardson  * after deleting poll mode rx queues
49799a2dd95SBruce Richardson  */
49899a2dd95SBruce Richardson static void
499a256a743SPavan Nikhilesh rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
500a256a743SPavan Nikhilesh 			  struct eth_device_info *dev_info, int rx_queue_id,
501a256a743SPavan Nikhilesh 			  uint32_t *nb_rx_poll, uint32_t *nb_wrr)
50299a2dd95SBruce Richardson {
50399a2dd95SBruce Richardson 	uint32_t poll_diff;
50499a2dd95SBruce Richardson 	uint32_t wrr_len_diff;
50599a2dd95SBruce Richardson 
50699a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
50799a2dd95SBruce Richardson 		poll_diff = dev_info->nb_rx_poll;
50899a2dd95SBruce Richardson 		wrr_len_diff = dev_info->wrr_len;
50999a2dd95SBruce Richardson 	} else {
51099a2dd95SBruce Richardson 		poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
51199a2dd95SBruce Richardson 		wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
51299a2dd95SBruce Richardson 					0;
51399a2dd95SBruce Richardson 	}
51499a2dd95SBruce Richardson 
51599a2dd95SBruce Richardson 	*nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
51699a2dd95SBruce Richardson 	*nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
51799a2dd95SBruce Richardson }
51899a2dd95SBruce Richardson 
51999a2dd95SBruce Richardson /* Calculate nb_rx_* after adding poll mode rx queues
52099a2dd95SBruce Richardson  */
52199a2dd95SBruce Richardson static void
522a256a743SPavan Nikhilesh rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
523a256a743SPavan Nikhilesh 			  struct eth_device_info *dev_info, int rx_queue_id,
524a256a743SPavan Nikhilesh 			  uint16_t wt, uint32_t *nb_rx_poll,
525a256a743SPavan Nikhilesh 			  uint32_t *nb_rx_intr, uint32_t *nb_wrr)
52699a2dd95SBruce Richardson {
52799a2dd95SBruce Richardson 	uint32_t intr_diff;
52899a2dd95SBruce Richardson 	uint32_t poll_diff;
52999a2dd95SBruce Richardson 	uint32_t wrr_len_diff;
53099a2dd95SBruce Richardson 
53199a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
53299a2dd95SBruce Richardson 		intr_diff = dev_info->nb_rx_intr;
53399a2dd95SBruce Richardson 		poll_diff = dev_info->dev->data->nb_rx_queues -
53499a2dd95SBruce Richardson 						dev_info->nb_rx_poll;
53599a2dd95SBruce Richardson 		wrr_len_diff = wt*dev_info->dev->data->nb_rx_queues
53699a2dd95SBruce Richardson 				- dev_info->wrr_len;
53799a2dd95SBruce Richardson 	} else {
53899a2dd95SBruce Richardson 		intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
53999a2dd95SBruce Richardson 		poll_diff = !rxa_polled_queue(dev_info, rx_queue_id);
54099a2dd95SBruce Richardson 		wrr_len_diff = rxa_polled_queue(dev_info, rx_queue_id) ?
54199a2dd95SBruce Richardson 				wt - dev_info->rx_queue[rx_queue_id].wt :
54299a2dd95SBruce Richardson 				wt;
54399a2dd95SBruce Richardson 	}
54499a2dd95SBruce Richardson 
54599a2dd95SBruce Richardson 	*nb_rx_poll = rx_adapter->num_rx_polled + poll_diff;
54699a2dd95SBruce Richardson 	*nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
54799a2dd95SBruce Richardson 	*nb_wrr = rx_adapter->wrr_len + wrr_len_diff;
54899a2dd95SBruce Richardson }
54999a2dd95SBruce Richardson 
55099a2dd95SBruce Richardson /* Calculate nb_rx_* after adding rx_queue_id */
55199a2dd95SBruce Richardson static void
552a256a743SPavan Nikhilesh rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
553a256a743SPavan Nikhilesh 		     struct eth_device_info *dev_info, int rx_queue_id,
554a256a743SPavan Nikhilesh 		     uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
55599a2dd95SBruce Richardson 		     uint32_t *nb_wrr)
55699a2dd95SBruce Richardson {
55799a2dd95SBruce Richardson 	if (wt != 0)
55899a2dd95SBruce Richardson 		rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
55999a2dd95SBruce Richardson 					wt, nb_rx_poll, nb_rx_intr, nb_wrr);
56099a2dd95SBruce Richardson 	else
56199a2dd95SBruce Richardson 		rxa_calc_nb_post_add_intr(rx_adapter, dev_info, rx_queue_id,
56299a2dd95SBruce Richardson 					nb_rx_poll, nb_rx_intr, nb_wrr);
56399a2dd95SBruce Richardson }
56499a2dd95SBruce Richardson 
56599a2dd95SBruce Richardson /* Calculate nb_rx_* after deleting rx_queue_id */
56699a2dd95SBruce Richardson static void
567a256a743SPavan Nikhilesh rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
568a256a743SPavan Nikhilesh 		     struct eth_device_info *dev_info, int rx_queue_id,
569a256a743SPavan Nikhilesh 		     uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
57099a2dd95SBruce Richardson 		     uint32_t *nb_wrr)
57199a2dd95SBruce Richardson {
57299a2dd95SBruce Richardson 	rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
57399a2dd95SBruce Richardson 				nb_wrr);
57499a2dd95SBruce Richardson 	rxa_calc_nb_post_intr_del(rx_adapter, dev_info, rx_queue_id,
57599a2dd95SBruce Richardson 				nb_rx_intr);
57699a2dd95SBruce Richardson }
57799a2dd95SBruce Richardson 
57899a2dd95SBruce Richardson /*
57999a2dd95SBruce Richardson  * Allocate the rx_poll array
58099a2dd95SBruce Richardson  */
58199a2dd95SBruce Richardson static struct eth_rx_poll_entry *
582a256a743SPavan Nikhilesh rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
58399a2dd95SBruce Richardson {
58499a2dd95SBruce Richardson 	size_t len;
58599a2dd95SBruce Richardson 
58699a2dd95SBruce Richardson 	len  = RTE_ALIGN(num_rx_polled * sizeof(*rx_adapter->eth_rx_poll),
58799a2dd95SBruce Richardson 							RTE_CACHE_LINE_SIZE);
58899a2dd95SBruce Richardson 	return  rte_zmalloc_socket(rx_adapter->mem_name,
58999a2dd95SBruce Richardson 				len,
59099a2dd95SBruce Richardson 				RTE_CACHE_LINE_SIZE,
59199a2dd95SBruce Richardson 				rx_adapter->socket_id);
59299a2dd95SBruce Richardson }
59399a2dd95SBruce Richardson 
59499a2dd95SBruce Richardson /*
59599a2dd95SBruce Richardson  * Allocate the WRR array
59699a2dd95SBruce Richardson  */
59799a2dd95SBruce Richardson static uint32_t *
598a256a743SPavan Nikhilesh rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
59999a2dd95SBruce Richardson {
60099a2dd95SBruce Richardson 	size_t len;
60199a2dd95SBruce Richardson 
60299a2dd95SBruce Richardson 	len = RTE_ALIGN(nb_wrr * sizeof(*rx_adapter->wrr_sched),
60399a2dd95SBruce Richardson 			RTE_CACHE_LINE_SIZE);
60499a2dd95SBruce Richardson 	return  rte_zmalloc_socket(rx_adapter->mem_name,
60599a2dd95SBruce Richardson 				len,
60699a2dd95SBruce Richardson 				RTE_CACHE_LINE_SIZE,
60799a2dd95SBruce Richardson 				rx_adapter->socket_id);
60899a2dd95SBruce Richardson }
60999a2dd95SBruce Richardson 
61099a2dd95SBruce Richardson static int
611a256a743SPavan Nikhilesh rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
612a256a743SPavan Nikhilesh 		      uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
61399a2dd95SBruce Richardson 		      uint32_t **wrr_sched)
61499a2dd95SBruce Richardson {
61599a2dd95SBruce Richardson 
61699a2dd95SBruce Richardson 	if (nb_poll == 0) {
61799a2dd95SBruce Richardson 		*rx_poll = NULL;
61899a2dd95SBruce Richardson 		*wrr_sched = NULL;
61999a2dd95SBruce Richardson 		return 0;
62099a2dd95SBruce Richardson 	}
62199a2dd95SBruce Richardson 
62299a2dd95SBruce Richardson 	*rx_poll = rxa_alloc_poll(rx_adapter, nb_poll);
62399a2dd95SBruce Richardson 	if (*rx_poll == NULL) {
62499a2dd95SBruce Richardson 		*wrr_sched = NULL;
62599a2dd95SBruce Richardson 		return -ENOMEM;
62699a2dd95SBruce Richardson 	}
62799a2dd95SBruce Richardson 
62899a2dd95SBruce Richardson 	*wrr_sched = rxa_alloc_wrr(rx_adapter, nb_wrr);
62999a2dd95SBruce Richardson 	if (*wrr_sched == NULL) {
63099a2dd95SBruce Richardson 		rte_free(*rx_poll);
63199a2dd95SBruce Richardson 		return -ENOMEM;
63299a2dd95SBruce Richardson 	}
63399a2dd95SBruce Richardson 	return 0;
63499a2dd95SBruce Richardson }
63599a2dd95SBruce Richardson 
63699a2dd95SBruce Richardson /* Precalculate WRR polling sequence for all queues in rx_adapter */
63799a2dd95SBruce Richardson static void
638a256a743SPavan Nikhilesh rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
639a256a743SPavan Nikhilesh 		      struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
64099a2dd95SBruce Richardson {
64199a2dd95SBruce Richardson 	uint16_t d;
64299a2dd95SBruce Richardson 	uint16_t q;
64399a2dd95SBruce Richardson 	unsigned int i;
64499a2dd95SBruce Richardson 	int prev = -1;
64599a2dd95SBruce Richardson 	int cw = -1;
64699a2dd95SBruce Richardson 
64799a2dd95SBruce Richardson 	/* Initialize variables for calculation of wrr schedule */
64899a2dd95SBruce Richardson 	uint16_t max_wrr_pos = 0;
64999a2dd95SBruce Richardson 	unsigned int poll_q = 0;
65099a2dd95SBruce Richardson 	uint16_t max_wt = 0;
65199a2dd95SBruce Richardson 	uint16_t gcd = 0;
65299a2dd95SBruce Richardson 
65399a2dd95SBruce Richardson 	if (rx_poll == NULL)
65499a2dd95SBruce Richardson 		return;
65599a2dd95SBruce Richardson 
65699a2dd95SBruce Richardson 	/* Generate array of all queues to poll, the size of this
65799a2dd95SBruce Richardson 	 * array is poll_q
65899a2dd95SBruce Richardson 	 */
65999a2dd95SBruce Richardson 	RTE_ETH_FOREACH_DEV(d) {
66099a2dd95SBruce Richardson 		uint16_t nb_rx_queues;
66199a2dd95SBruce Richardson 		struct eth_device_info *dev_info =
66299a2dd95SBruce Richardson 				&rx_adapter->eth_devices[d];
66399a2dd95SBruce Richardson 		nb_rx_queues = dev_info->dev->data->nb_rx_queues;
66499a2dd95SBruce Richardson 		if (dev_info->rx_queue == NULL)
66599a2dd95SBruce Richardson 			continue;
66699a2dd95SBruce Richardson 		if (dev_info->internal_event_port)
66799a2dd95SBruce Richardson 			continue;
66899a2dd95SBruce Richardson 		dev_info->wrr_len = 0;
66999a2dd95SBruce Richardson 		for (q = 0; q < nb_rx_queues; q++) {
67099a2dd95SBruce Richardson 			struct eth_rx_queue_info *queue_info =
67199a2dd95SBruce Richardson 				&dev_info->rx_queue[q];
67299a2dd95SBruce Richardson 			uint16_t wt;
67399a2dd95SBruce Richardson 
67499a2dd95SBruce Richardson 			if (!rxa_polled_queue(dev_info, q))
67599a2dd95SBruce Richardson 				continue;
67699a2dd95SBruce Richardson 			wt = queue_info->wt;
67799a2dd95SBruce Richardson 			rx_poll[poll_q].eth_dev_id = d;
67899a2dd95SBruce Richardson 			rx_poll[poll_q].eth_rx_qid = q;
67999a2dd95SBruce Richardson 			max_wrr_pos += wt;
68099a2dd95SBruce Richardson 			dev_info->wrr_len += wt;
68199a2dd95SBruce Richardson 			max_wt = RTE_MAX(max_wt, wt);
68299a2dd95SBruce Richardson 			gcd = (gcd) ? rxa_gcd_u16(gcd, wt) : wt;
68399a2dd95SBruce Richardson 			poll_q++;
68499a2dd95SBruce Richardson 		}
68599a2dd95SBruce Richardson 	}
68699a2dd95SBruce Richardson 
68799a2dd95SBruce Richardson 	/* Generate polling sequence based on weights */
68899a2dd95SBruce Richardson 	prev = -1;
68999a2dd95SBruce Richardson 	cw = -1;
69099a2dd95SBruce Richardson 	for (i = 0; i < max_wrr_pos; i++) {
69199a2dd95SBruce Richardson 		rx_wrr[i] = rxa_wrr_next(rx_adapter, poll_q, &cw,
69299a2dd95SBruce Richardson 				     rx_poll, max_wt, gcd, prev);
69399a2dd95SBruce Richardson 		prev = rx_wrr[i];
69499a2dd95SBruce Richardson 	}
69599a2dd95SBruce Richardson }
69699a2dd95SBruce Richardson 
69799a2dd95SBruce Richardson static inline void
69899a2dd95SBruce Richardson rxa_mtoip(struct rte_mbuf *m, struct rte_ipv4_hdr **ipv4_hdr,
69999a2dd95SBruce Richardson 	struct rte_ipv6_hdr **ipv6_hdr)
70099a2dd95SBruce Richardson {
70199a2dd95SBruce Richardson 	struct rte_ether_hdr *eth_hdr =
70299a2dd95SBruce Richardson 		rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
70399a2dd95SBruce Richardson 	struct rte_vlan_hdr *vlan_hdr;
70499a2dd95SBruce Richardson 
70599a2dd95SBruce Richardson 	*ipv4_hdr = NULL;
70699a2dd95SBruce Richardson 	*ipv6_hdr = NULL;
70799a2dd95SBruce Richardson 
70899a2dd95SBruce Richardson 	switch (eth_hdr->ether_type) {
70999a2dd95SBruce Richardson 	case RTE_BE16(RTE_ETHER_TYPE_IPV4):
71099a2dd95SBruce Richardson 		*ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
71199a2dd95SBruce Richardson 		break;
71299a2dd95SBruce Richardson 
71399a2dd95SBruce Richardson 	case RTE_BE16(RTE_ETHER_TYPE_IPV6):
71499a2dd95SBruce Richardson 		*ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
71599a2dd95SBruce Richardson 		break;
71699a2dd95SBruce Richardson 
71799a2dd95SBruce Richardson 	case RTE_BE16(RTE_ETHER_TYPE_VLAN):
71899a2dd95SBruce Richardson 		vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
71999a2dd95SBruce Richardson 		switch (vlan_hdr->eth_proto) {
72099a2dd95SBruce Richardson 		case RTE_BE16(RTE_ETHER_TYPE_IPV4):
72199a2dd95SBruce Richardson 			*ipv4_hdr = (struct rte_ipv4_hdr *)(vlan_hdr + 1);
72299a2dd95SBruce Richardson 			break;
72399a2dd95SBruce Richardson 		case RTE_BE16(RTE_ETHER_TYPE_IPV6):
72499a2dd95SBruce Richardson 			*ipv6_hdr = (struct rte_ipv6_hdr *)(vlan_hdr + 1);
72599a2dd95SBruce Richardson 			break;
72699a2dd95SBruce Richardson 		default:
72799a2dd95SBruce Richardson 			break;
72899a2dd95SBruce Richardson 		}
72999a2dd95SBruce Richardson 		break;
73099a2dd95SBruce Richardson 
73199a2dd95SBruce Richardson 	default:
73299a2dd95SBruce Richardson 		break;
73399a2dd95SBruce Richardson 	}
73499a2dd95SBruce Richardson }
73599a2dd95SBruce Richardson 
73699a2dd95SBruce Richardson /* Calculate RSS hash for IPv4/6 */
73799a2dd95SBruce Richardson static inline uint32_t
73899a2dd95SBruce Richardson rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
73999a2dd95SBruce Richardson {
74099a2dd95SBruce Richardson 	uint32_t input_len;
74199a2dd95SBruce Richardson 	void *tuple;
74299a2dd95SBruce Richardson 	struct rte_ipv4_tuple ipv4_tuple;
74399a2dd95SBruce Richardson 	struct rte_ipv6_tuple ipv6_tuple;
74499a2dd95SBruce Richardson 	struct rte_ipv4_hdr *ipv4_hdr;
74599a2dd95SBruce Richardson 	struct rte_ipv6_hdr *ipv6_hdr;
74699a2dd95SBruce Richardson 
74799a2dd95SBruce Richardson 	rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr);
74899a2dd95SBruce Richardson 
74999a2dd95SBruce Richardson 	if (ipv4_hdr) {
75099a2dd95SBruce Richardson 		ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
75199a2dd95SBruce Richardson 		ipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
75299a2dd95SBruce Richardson 		tuple = &ipv4_tuple;
75399a2dd95SBruce Richardson 		input_len = RTE_THASH_V4_L3_LEN;
75499a2dd95SBruce Richardson 	} else if (ipv6_hdr) {
75599a2dd95SBruce Richardson 		rte_thash_load_v6_addrs(ipv6_hdr,
75699a2dd95SBruce Richardson 					(union rte_thash_tuple *)&ipv6_tuple);
75799a2dd95SBruce Richardson 		tuple = &ipv6_tuple;
75899a2dd95SBruce Richardson 		input_len = RTE_THASH_V6_L3_LEN;
75999a2dd95SBruce Richardson 	} else
76099a2dd95SBruce Richardson 		return 0;
76199a2dd95SBruce Richardson 
76299a2dd95SBruce Richardson 	return rte_softrss_be(tuple, input_len, rss_key_be);
76399a2dd95SBruce Richardson }
76499a2dd95SBruce Richardson 
76599a2dd95SBruce Richardson static inline int
766a256a743SPavan Nikhilesh rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
76799a2dd95SBruce Richardson {
76899a2dd95SBruce Richardson 	return !!rx_adapter->enq_block_count;
76999a2dd95SBruce Richardson }
77099a2dd95SBruce Richardson 
77199a2dd95SBruce Richardson static inline void
772a256a743SPavan Nikhilesh rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
77399a2dd95SBruce Richardson {
77499a2dd95SBruce Richardson 	if (rx_adapter->rx_enq_block_start_ts)
77599a2dd95SBruce Richardson 		return;
77699a2dd95SBruce Richardson 
77799a2dd95SBruce Richardson 	rx_adapter->enq_block_count++;
77899a2dd95SBruce Richardson 	if (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD)
77999a2dd95SBruce Richardson 		return;
78099a2dd95SBruce Richardson 
78199a2dd95SBruce Richardson 	rx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles();
78299a2dd95SBruce Richardson }
78399a2dd95SBruce Richardson 
78499a2dd95SBruce Richardson static inline void
785a256a743SPavan Nikhilesh rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
78699a2dd95SBruce Richardson 		     struct rte_event_eth_rx_adapter_stats *stats)
78799a2dd95SBruce Richardson {
78899a2dd95SBruce Richardson 	if (unlikely(!stats->rx_enq_start_ts))
78999a2dd95SBruce Richardson 		stats->rx_enq_start_ts = rte_get_tsc_cycles();
79099a2dd95SBruce Richardson 
79199a2dd95SBruce Richardson 	if (likely(!rxa_enq_blocked(rx_adapter)))
79299a2dd95SBruce Richardson 		return;
79399a2dd95SBruce Richardson 
79499a2dd95SBruce Richardson 	rx_adapter->enq_block_count = 0;
79599a2dd95SBruce Richardson 	if (rx_adapter->rx_enq_block_start_ts) {
79699a2dd95SBruce Richardson 		stats->rx_enq_end_ts = rte_get_tsc_cycles();
79799a2dd95SBruce Richardson 		stats->rx_enq_block_cycles += stats->rx_enq_end_ts -
79899a2dd95SBruce Richardson 		    rx_adapter->rx_enq_block_start_ts;
79999a2dd95SBruce Richardson 		rx_adapter->rx_enq_block_start_ts = 0;
80099a2dd95SBruce Richardson 	}
80199a2dd95SBruce Richardson }
80299a2dd95SBruce Richardson 
80399a2dd95SBruce Richardson /* Enqueue buffered events to event device */
80499a2dd95SBruce Richardson static inline uint16_t
805a256a743SPavan Nikhilesh rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
806995b150cSNaga Harish K S V 		       struct eth_event_enqueue_buffer *buf,
807995b150cSNaga Harish K S V 		       struct rte_event_eth_rx_adapter_stats *stats)
80899a2dd95SBruce Richardson {
809572dce2bSMattias Rönnblom 	uint16_t count = buf->count;
810572dce2bSMattias Rönnblom 	uint16_t n = 0;
81199a2dd95SBruce Richardson 
8128113fd15SGanapati Kundapura 	if (!count)
81399a2dd95SBruce Richardson 		return 0;
81499a2dd95SBruce Richardson 
815572dce2bSMattias Rönnblom 	if (buf->last)
816572dce2bSMattias Rönnblom 		count = buf->last - buf->head;
817572dce2bSMattias Rönnblom 
818572dce2bSMattias Rönnblom 	if (count) {
819572dce2bSMattias Rönnblom 		n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
82099a2dd95SBruce Richardson 						rx_adapter->event_port_id,
8218113fd15SGanapati Kundapura 						&buf->events[buf->head],
8228113fd15SGanapati Kundapura 						count);
8238113fd15SGanapati Kundapura 		if (n != count)
82499a2dd95SBruce Richardson 			stats->rx_enq_retry++;
8258113fd15SGanapati Kundapura 
8268113fd15SGanapati Kundapura 		buf->head += n;
827572dce2bSMattias Rönnblom 	}
8288113fd15SGanapati Kundapura 
8298113fd15SGanapati Kundapura 	if (buf->last && n == count) {
8308113fd15SGanapati Kundapura 		uint16_t n1;
8318113fd15SGanapati Kundapura 
8328113fd15SGanapati Kundapura 		n1 = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
8338113fd15SGanapati Kundapura 					rx_adapter->event_port_id,
8348113fd15SGanapati Kundapura 					&buf->events[0],
8358113fd15SGanapati Kundapura 					buf->tail);
8368113fd15SGanapati Kundapura 
8378113fd15SGanapati Kundapura 		if (n1 != buf->tail)
8388113fd15SGanapati Kundapura 			stats->rx_enq_retry++;
8398113fd15SGanapati Kundapura 
8408113fd15SGanapati Kundapura 		buf->last = 0;
8418113fd15SGanapati Kundapura 		buf->head = n1;
8428113fd15SGanapati Kundapura 		buf->last_mask = 0;
8438113fd15SGanapati Kundapura 		n += n1;
84499a2dd95SBruce Richardson 	}
84599a2dd95SBruce Richardson 
84699a2dd95SBruce Richardson 	n ? rxa_enq_block_end_ts(rx_adapter, stats) :
84799a2dd95SBruce Richardson 		rxa_enq_block_start_ts(rx_adapter);
84899a2dd95SBruce Richardson 
84999a2dd95SBruce Richardson 	buf->count -= n;
85099a2dd95SBruce Richardson 	stats->rx_enq_count += n;
85199a2dd95SBruce Richardson 
85299a2dd95SBruce Richardson 	return n;
85399a2dd95SBruce Richardson }
85499a2dd95SBruce Richardson 
85599a2dd95SBruce Richardson static inline void
856a256a743SPavan Nikhilesh rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
85799a2dd95SBruce Richardson 		struct eth_rx_vector_data *vec)
85899a2dd95SBruce Richardson {
85999a2dd95SBruce Richardson 	vec->vector_ev->nb_elem = 0;
86099a2dd95SBruce Richardson 	vec->vector_ev->port = vec->port;
86199a2dd95SBruce Richardson 	vec->vector_ev->queue = vec->queue;
86299a2dd95SBruce Richardson 	vec->vector_ev->attr_valid = true;
8630fbb55efSPavan Nikhilesh 	vec->vector_ev->elem_offset = 0;
86499a2dd95SBruce Richardson 	TAILQ_INSERT_TAIL(&rx_adapter->vector_list, vec, next);
86599a2dd95SBruce Richardson }
86699a2dd95SBruce Richardson 
86799a2dd95SBruce Richardson static inline uint16_t
868a256a743SPavan Nikhilesh rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
86999a2dd95SBruce Richardson 			struct eth_rx_queue_info *queue_info,
870a256a743SPavan Nikhilesh 			struct eth_event_enqueue_buffer *buf,
87199a2dd95SBruce Richardson 			struct rte_mbuf **mbufs, uint16_t num)
87299a2dd95SBruce Richardson {
87399a2dd95SBruce Richardson 	struct rte_event *ev = &buf->events[buf->count];
87499a2dd95SBruce Richardson 	struct eth_rx_vector_data *vec;
87599a2dd95SBruce Richardson 	uint16_t filled, space, sz;
87699a2dd95SBruce Richardson 
87799a2dd95SBruce Richardson 	filled = 0;
87899a2dd95SBruce Richardson 	vec = &queue_info->vector_data;
87999a2dd95SBruce Richardson 
88099a2dd95SBruce Richardson 	if (vec->vector_ev == NULL) {
88199a2dd95SBruce Richardson 		if (rte_mempool_get(vec->vector_pool,
88299a2dd95SBruce Richardson 				    (void **)&vec->vector_ev) < 0) {
88399a2dd95SBruce Richardson 			rte_pktmbuf_free_bulk(mbufs, num);
88499a2dd95SBruce Richardson 			return 0;
88599a2dd95SBruce Richardson 		}
88699a2dd95SBruce Richardson 		rxa_init_vector(rx_adapter, vec);
88799a2dd95SBruce Richardson 	}
88899a2dd95SBruce Richardson 	while (num) {
88999a2dd95SBruce Richardson 		if (vec->vector_ev->nb_elem == vec->max_vector_count) {
89099a2dd95SBruce Richardson 			/* Event ready. */
89199a2dd95SBruce Richardson 			ev->event = vec->event;
89299a2dd95SBruce Richardson 			ev->vec = vec->vector_ev;
89399a2dd95SBruce Richardson 			ev++;
89499a2dd95SBruce Richardson 			filled++;
89599a2dd95SBruce Richardson 			vec->vector_ev = NULL;
89699a2dd95SBruce Richardson 			TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
89799a2dd95SBruce Richardson 			if (rte_mempool_get(vec->vector_pool,
89899a2dd95SBruce Richardson 					    (void **)&vec->vector_ev) < 0) {
89999a2dd95SBruce Richardson 				rte_pktmbuf_free_bulk(mbufs, num);
90099a2dd95SBruce Richardson 				return 0;
90199a2dd95SBruce Richardson 			}
90299a2dd95SBruce Richardson 			rxa_init_vector(rx_adapter, vec);
90399a2dd95SBruce Richardson 		}
90499a2dd95SBruce Richardson 
90599a2dd95SBruce Richardson 		space = vec->max_vector_count - vec->vector_ev->nb_elem;
90699a2dd95SBruce Richardson 		sz = num > space ? space : num;
90799a2dd95SBruce Richardson 		memcpy(vec->vector_ev->mbufs + vec->vector_ev->nb_elem, mbufs,
90899a2dd95SBruce Richardson 		       sizeof(void *) * sz);
90999a2dd95SBruce Richardson 		vec->vector_ev->nb_elem += sz;
91099a2dd95SBruce Richardson 		num -= sz;
91199a2dd95SBruce Richardson 		mbufs += sz;
91299a2dd95SBruce Richardson 		vec->ts = rte_rdtsc();
91399a2dd95SBruce Richardson 	}
91499a2dd95SBruce Richardson 
91599a2dd95SBruce Richardson 	if (vec->vector_ev->nb_elem == vec->max_vector_count) {
91699a2dd95SBruce Richardson 		ev->event = vec->event;
91799a2dd95SBruce Richardson 		ev->vec = vec->vector_ev;
91899a2dd95SBruce Richardson 		ev++;
91999a2dd95SBruce Richardson 		filled++;
92099a2dd95SBruce Richardson 		vec->vector_ev = NULL;
92199a2dd95SBruce Richardson 		TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
92299a2dd95SBruce Richardson 	}
92399a2dd95SBruce Richardson 
92499a2dd95SBruce Richardson 	return filled;
92599a2dd95SBruce Richardson }
92699a2dd95SBruce Richardson 
92799a2dd95SBruce Richardson static inline void
928a256a743SPavan Nikhilesh rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
929a256a743SPavan Nikhilesh 		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
930995b150cSNaga Harish K S V 		 struct eth_event_enqueue_buffer *buf,
931995b150cSNaga Harish K S V 		 struct rte_event_eth_rx_adapter_stats *stats)
93299a2dd95SBruce Richardson {
93399a2dd95SBruce Richardson 	uint32_t i;
93499a2dd95SBruce Richardson 	struct eth_device_info *dev_info =
93599a2dd95SBruce Richardson 					&rx_adapter->eth_devices[eth_dev_id];
93699a2dd95SBruce Richardson 	struct eth_rx_queue_info *eth_rx_queue_info =
93799a2dd95SBruce Richardson 					&dev_info->rx_queue[rx_queue_id];
9388113fd15SGanapati Kundapura 	uint16_t new_tail = buf->tail;
93999a2dd95SBruce Richardson 	uint64_t event = eth_rx_queue_info->event;
94099a2dd95SBruce Richardson 	uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
94199a2dd95SBruce Richardson 	struct rte_mbuf *m = mbufs[0];
94299a2dd95SBruce Richardson 	uint32_t rss_mask;
94399a2dd95SBruce Richardson 	uint32_t rss;
94499a2dd95SBruce Richardson 	int do_rss;
94599a2dd95SBruce Richardson 	uint16_t nb_cb;
94699a2dd95SBruce Richardson 	uint16_t dropped;
94783ab470dSGanapati Kundapura 	uint64_t ts, ts_mask;
94899a2dd95SBruce Richardson 
94999a2dd95SBruce Richardson 	if (!eth_rx_queue_info->ena_vector) {
95083ab470dSGanapati Kundapura 		ts = m->ol_flags & event_eth_rx_timestamp_dynflag ?
95183ab470dSGanapati Kundapura 						0 : rte_get_tsc_cycles();
95283ab470dSGanapati Kundapura 
953daa02b5cSOlivier Matz 		/* 0xffff ffff ffff ffff if RTE_MBUF_F_RX_TIMESTAMP is set,
95483ab470dSGanapati Kundapura 		 * otherwise 0
95583ab470dSGanapati Kundapura 		 */
95683ab470dSGanapati Kundapura 		ts_mask = (uint64_t)(!(m->ol_flags &
95783ab470dSGanapati Kundapura 				       event_eth_rx_timestamp_dynflag)) - 1ULL;
95883ab470dSGanapati Kundapura 
959daa02b5cSOlivier Matz 		/* 0xffff ffff if RTE_MBUF_F_RX_RSS_HASH is set, otherwise 0 */
960daa02b5cSOlivier Matz 		rss_mask = ~(((m->ol_flags & RTE_MBUF_F_RX_RSS_HASH) != 0) - 1);
96199a2dd95SBruce Richardson 		do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
96299a2dd95SBruce Richardson 		for (i = 0; i < num; i++) {
9638113fd15SGanapati Kundapura 			struct rte_event *ev;
9648113fd15SGanapati Kundapura 
96599a2dd95SBruce Richardson 			m = mbufs[i];
96683ab470dSGanapati Kundapura 			*rxa_timestamp_dynfield(m) = ts |
96783ab470dSGanapati Kundapura 					(*rxa_timestamp_dynfield(m) & ts_mask);
96883ab470dSGanapati Kundapura 
9698113fd15SGanapati Kundapura 			ev = &buf->events[new_tail];
97099a2dd95SBruce Richardson 
97199a2dd95SBruce Richardson 			rss = do_rss ? rxa_do_softrss(m, rx_adapter->rss_key_be)
97299a2dd95SBruce Richardson 				     : m->hash.rss;
97399a2dd95SBruce Richardson 			ev->event = event;
97499a2dd95SBruce Richardson 			ev->flow_id = (rss & ~flow_id_mask) |
97599a2dd95SBruce Richardson 				      (ev->flow_id & flow_id_mask);
97699a2dd95SBruce Richardson 			ev->mbuf = m;
9778113fd15SGanapati Kundapura 			new_tail++;
97899a2dd95SBruce Richardson 		}
97999a2dd95SBruce Richardson 	} else {
98099a2dd95SBruce Richardson 		num = rxa_create_event_vector(rx_adapter, eth_rx_queue_info,
98199a2dd95SBruce Richardson 					      buf, mbufs, num);
98299a2dd95SBruce Richardson 	}
98399a2dd95SBruce Richardson 
98499a2dd95SBruce Richardson 	if (num && dev_info->cb_fn) {
98599a2dd95SBruce Richardson 
98699a2dd95SBruce Richardson 		dropped = 0;
98799a2dd95SBruce Richardson 		nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id,
9888113fd15SGanapati Kundapura 				       buf->last |
989bc0df25cSNaga Harish K S V 				       (buf->events_size & ~buf->last_mask),
9908113fd15SGanapati Kundapura 				       buf->count >= BATCH_SIZE ?
9918113fd15SGanapati Kundapura 						buf->count - BATCH_SIZE : 0,
9928113fd15SGanapati Kundapura 				       &buf->events[buf->tail],
9938113fd15SGanapati Kundapura 				       num,
9948113fd15SGanapati Kundapura 				       dev_info->cb_arg,
9958113fd15SGanapati Kundapura 				       &dropped);
99699a2dd95SBruce Richardson 		if (unlikely(nb_cb > num))
99799a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("Rx CB returned %d (> %d) events",
99899a2dd95SBruce Richardson 				nb_cb, num);
99999a2dd95SBruce Richardson 		else
100099a2dd95SBruce Richardson 			num = nb_cb;
100199a2dd95SBruce Richardson 		if (dropped)
1002995b150cSNaga Harish K S V 			stats->rx_dropped += dropped;
100399a2dd95SBruce Richardson 	}
100499a2dd95SBruce Richardson 
100599a2dd95SBruce Richardson 	buf->count += num;
10068113fd15SGanapati Kundapura 	buf->tail += num;
10078113fd15SGanapati Kundapura }
10088113fd15SGanapati Kundapura 
10098113fd15SGanapati Kundapura static inline bool
1010a256a743SPavan Nikhilesh rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
10118113fd15SGanapati Kundapura {
10128113fd15SGanapati Kundapura 	uint32_t nb_req = buf->tail + BATCH_SIZE;
10138113fd15SGanapati Kundapura 
10148113fd15SGanapati Kundapura 	if (!buf->last) {
1015bc0df25cSNaga Harish K S V 		if (nb_req <= buf->events_size)
10168113fd15SGanapati Kundapura 			return true;
10178113fd15SGanapati Kundapura 
10188113fd15SGanapati Kundapura 		if (buf->head >= BATCH_SIZE) {
10198113fd15SGanapati Kundapura 			buf->last_mask = ~0;
10208113fd15SGanapati Kundapura 			buf->last = buf->tail;
10218113fd15SGanapati Kundapura 			buf->tail = 0;
10228113fd15SGanapati Kundapura 			return true;
10238113fd15SGanapati Kundapura 		}
10248113fd15SGanapati Kundapura 	}
10258113fd15SGanapati Kundapura 
10268113fd15SGanapati Kundapura 	return nb_req <= buf->head;
102799a2dd95SBruce Richardson }
102899a2dd95SBruce Richardson 
102999a2dd95SBruce Richardson /* Enqueue packets from  <port, q>  to event buffer */
103099a2dd95SBruce Richardson static inline uint32_t
1031a256a743SPavan Nikhilesh rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
1032a256a743SPavan Nikhilesh 	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
1033995b150cSNaga Harish K S V 	   int *rxq_empty, struct eth_event_enqueue_buffer *buf,
1034995b150cSNaga Harish K S V 	   struct rte_event_eth_rx_adapter_stats *stats)
103599a2dd95SBruce Richardson {
103699a2dd95SBruce Richardson 	struct rte_mbuf *mbufs[BATCH_SIZE];
103799a2dd95SBruce Richardson 	uint16_t n;
103899a2dd95SBruce Richardson 	uint32_t nb_rx = 0;
1039578402f2SMattias Rönnblom 	uint32_t nb_flushed = 0;
104099a2dd95SBruce Richardson 
104199a2dd95SBruce Richardson 	if (rxq_empty)
104299a2dd95SBruce Richardson 		*rxq_empty = 0;
104399a2dd95SBruce Richardson 	/* Don't do a batch dequeue from the rx queue if there isn't
104499a2dd95SBruce Richardson 	 * enough space in the enqueue buffer.
104599a2dd95SBruce Richardson 	 */
10468113fd15SGanapati Kundapura 	while (rxa_pkt_buf_available(buf)) {
104799a2dd95SBruce Richardson 		if (buf->count >= BATCH_SIZE)
1048578402f2SMattias Rönnblom 			nb_flushed +=
1049995b150cSNaga Harish K S V 				rxa_flush_event_buffer(rx_adapter, buf, stats);
105099a2dd95SBruce Richardson 
105199a2dd95SBruce Richardson 		stats->rx_poll_count++;
105299a2dd95SBruce Richardson 		n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
105399a2dd95SBruce Richardson 		if (unlikely(!n)) {
105499a2dd95SBruce Richardson 			if (rxq_empty)
105599a2dd95SBruce Richardson 				*rxq_empty = 1;
105699a2dd95SBruce Richardson 			break;
105799a2dd95SBruce Richardson 		}
1058995b150cSNaga Harish K S V 		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf,
1059995b150cSNaga Harish K S V 				 stats);
106099a2dd95SBruce Richardson 		nb_rx += n;
106199a2dd95SBruce Richardson 		if (rx_count + nb_rx > max_rx)
106299a2dd95SBruce Richardson 			break;
106399a2dd95SBruce Richardson 	}
106499a2dd95SBruce Richardson 
106599a2dd95SBruce Richardson 	if (buf->count > 0)
1066578402f2SMattias Rönnblom 		nb_flushed += rxa_flush_event_buffer(rx_adapter, buf, stats);
1067995b150cSNaga Harish K S V 
1068995b150cSNaga Harish K S V 	stats->rx_packets += nb_rx;
1069578402f2SMattias Rönnblom 	if (nb_flushed == 0)
1070578402f2SMattias Rönnblom 		rte_event_maintain(rx_adapter->eventdev_id,
1071578402f2SMattias Rönnblom 				   rx_adapter->event_port_id, 0);
107299a2dd95SBruce Richardson 
107399a2dd95SBruce Richardson 	return nb_rx;
107499a2dd95SBruce Richardson }
107599a2dd95SBruce Richardson 
107699a2dd95SBruce Richardson static inline void
1077a256a743SPavan Nikhilesh rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
107899a2dd95SBruce Richardson {
107999a2dd95SBruce Richardson 	uint16_t port_id;
108099a2dd95SBruce Richardson 	uint16_t queue;
108199a2dd95SBruce Richardson 	int err;
108299a2dd95SBruce Richardson 	union queue_data qd;
108399a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
108499a2dd95SBruce Richardson 	struct eth_rx_queue_info *queue_info;
108599a2dd95SBruce Richardson 	int *intr_enabled;
108699a2dd95SBruce Richardson 
108799a2dd95SBruce Richardson 	qd.ptr = data;
108899a2dd95SBruce Richardson 	port_id = qd.port;
108999a2dd95SBruce Richardson 	queue = qd.queue;
109099a2dd95SBruce Richardson 
109199a2dd95SBruce Richardson 	dev_info = &rx_adapter->eth_devices[port_id];
109299a2dd95SBruce Richardson 	queue_info = &dev_info->rx_queue[queue];
109399a2dd95SBruce Richardson 	rte_spinlock_lock(&rx_adapter->intr_ring_lock);
109499a2dd95SBruce Richardson 	if (rxa_shared_intr(dev_info, queue))
109599a2dd95SBruce Richardson 		intr_enabled = &dev_info->shared_intr_enabled;
109699a2dd95SBruce Richardson 	else
109799a2dd95SBruce Richardson 		intr_enabled = &queue_info->intr_enabled;
109899a2dd95SBruce Richardson 
109999a2dd95SBruce Richardson 	if (*intr_enabled) {
110099a2dd95SBruce Richardson 		*intr_enabled = 0;
110199a2dd95SBruce Richardson 		err = rte_ring_enqueue(rx_adapter->intr_ring, data);
110299a2dd95SBruce Richardson 		/* Entry should always be available.
110399a2dd95SBruce Richardson 		 * The ring size equals the maximum number of interrupt
110499a2dd95SBruce Richardson 		 * vectors supported (an interrupt vector is shared in
110599a2dd95SBruce Richardson 		 * case of shared interrupts)
110699a2dd95SBruce Richardson 		 */
110799a2dd95SBruce Richardson 		if (err)
110899a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("Failed to enqueue interrupt"
110999a2dd95SBruce Richardson 				" to ring: %s", strerror(-err));
111099a2dd95SBruce Richardson 		else
111199a2dd95SBruce Richardson 			rte_eth_dev_rx_intr_disable(port_id, queue);
111299a2dd95SBruce Richardson 	}
111399a2dd95SBruce Richardson 	rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
111499a2dd95SBruce Richardson }
111599a2dd95SBruce Richardson 
111699a2dd95SBruce Richardson static int
1117a256a743SPavan Nikhilesh rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
111899a2dd95SBruce Richardson 			  uint32_t num_intr_vec)
111999a2dd95SBruce Richardson {
112099a2dd95SBruce Richardson 	if (rx_adapter->num_intr_vec + num_intr_vec >
112199a2dd95SBruce Richardson 				RTE_EVENT_ETH_INTR_RING_SIZE) {
112299a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Exceeded intr ring slots current"
112399a2dd95SBruce Richardson 		" %d needed %d limit %d", rx_adapter->num_intr_vec,
112499a2dd95SBruce Richardson 		num_intr_vec, RTE_EVENT_ETH_INTR_RING_SIZE);
112599a2dd95SBruce Richardson 		return -ENOSPC;
112699a2dd95SBruce Richardson 	}
112799a2dd95SBruce Richardson 
112899a2dd95SBruce Richardson 	return 0;
112999a2dd95SBruce Richardson }
113099a2dd95SBruce Richardson 
113199a2dd95SBruce Richardson /* Delete entries for (dev, queue) from the interrupt ring */
113299a2dd95SBruce Richardson static void
1133a256a743SPavan Nikhilesh rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
113499a2dd95SBruce Richardson 			  struct eth_device_info *dev_info,
113599a2dd95SBruce Richardson 			  uint16_t rx_queue_id)
113699a2dd95SBruce Richardson {
113799a2dd95SBruce Richardson 	int i, n;
113899a2dd95SBruce Richardson 	union queue_data qd;
113999a2dd95SBruce Richardson 
114099a2dd95SBruce Richardson 	rte_spinlock_lock(&rx_adapter->intr_ring_lock);
114199a2dd95SBruce Richardson 
114299a2dd95SBruce Richardson 	n = rte_ring_count(rx_adapter->intr_ring);
114399a2dd95SBruce Richardson 	for (i = 0; i < n; i++) {
114499a2dd95SBruce Richardson 		rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
114599a2dd95SBruce Richardson 		if (!rxa_shared_intr(dev_info, rx_queue_id)) {
114699a2dd95SBruce Richardson 			if (qd.port == dev_info->dev->data->port_id &&
114799a2dd95SBruce Richardson 				qd.queue == rx_queue_id)
114899a2dd95SBruce Richardson 				continue;
114999a2dd95SBruce Richardson 		} else {
115099a2dd95SBruce Richardson 			if (qd.port == dev_info->dev->data->port_id)
115199a2dd95SBruce Richardson 				continue;
115299a2dd95SBruce Richardson 		}
115399a2dd95SBruce Richardson 		rte_ring_enqueue(rx_adapter->intr_ring, qd.ptr);
115499a2dd95SBruce Richardson 	}
115599a2dd95SBruce Richardson 
115699a2dd95SBruce Richardson 	rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
115799a2dd95SBruce Richardson }
115899a2dd95SBruce Richardson 
11591c1abf17SThomas Monjalon /* thread callback handling interrupt mode receive queues
116099a2dd95SBruce Richardson  * After receiving an Rx interrupt, it enqueues the port id and queue id of the
116199a2dd95SBruce Richardson  * interrupting queue to the adapter's ring buffer for interrupt events.
116299a2dd95SBruce Richardson  * These events are picked up by rxa_intr_ring_dequeue() which is invoked from
116399a2dd95SBruce Richardson  * the adapter service function.
116499a2dd95SBruce Richardson  */
11651c1abf17SThomas Monjalon static uint32_t
116699a2dd95SBruce Richardson rxa_intr_thread(void *arg)
116799a2dd95SBruce Richardson {
1168a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter = arg;
116999a2dd95SBruce Richardson 	struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
117099a2dd95SBruce Richardson 	int n, i;
117199a2dd95SBruce Richardson 
117299a2dd95SBruce Richardson 	while (1) {
117399a2dd95SBruce Richardson 		n = rte_epoll_wait(rx_adapter->epd, epoll_events,
117499a2dd95SBruce Richardson 				RTE_EVENT_ETH_INTR_RING_SIZE, -1);
117599a2dd95SBruce Richardson 		if (unlikely(n < 0))
117699a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("rte_epoll_wait returned error %d",
117799a2dd95SBruce Richardson 					n);
117899a2dd95SBruce Richardson 		for (i = 0; i < n; i++) {
117999a2dd95SBruce Richardson 			rxa_intr_ring_enqueue(rx_adapter,
118099a2dd95SBruce Richardson 					epoll_events[i].epdata.data);
118199a2dd95SBruce Richardson 		}
118299a2dd95SBruce Richardson 	}
118399a2dd95SBruce Richardson 
11841c1abf17SThomas Monjalon 	return 0;
118599a2dd95SBruce Richardson }
118699a2dd95SBruce Richardson 
118799a2dd95SBruce Richardson /* Dequeue <port, q> from interrupt ring and enqueue received
118899a2dd95SBruce Richardson  * mbufs to eventdev
118999a2dd95SBruce Richardson  */
11907f33abd4SMattias Rönnblom static inline bool
1191a256a743SPavan Nikhilesh rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
119299a2dd95SBruce Richardson {
119399a2dd95SBruce Richardson 	uint32_t n;
119499a2dd95SBruce Richardson 	uint32_t nb_rx = 0;
119599a2dd95SBruce Richardson 	int rxq_empty;
1196a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer *buf;
1197995b150cSNaga Harish K S V 	struct rte_event_eth_rx_adapter_stats *stats;
119899a2dd95SBruce Richardson 	rte_spinlock_t *ring_lock;
119999a2dd95SBruce Richardson 	uint8_t max_done = 0;
12007f33abd4SMattias Rönnblom 	bool work = false;
120199a2dd95SBruce Richardson 
120299a2dd95SBruce Richardson 	if (rx_adapter->num_rx_intr == 0)
12037f33abd4SMattias Rönnblom 		return work;
120499a2dd95SBruce Richardson 
120599a2dd95SBruce Richardson 	if (rte_ring_count(rx_adapter->intr_ring) == 0
120699a2dd95SBruce Richardson 		&& !rx_adapter->qd_valid)
12077f33abd4SMattias Rönnblom 		return work;
120899a2dd95SBruce Richardson 
120999a2dd95SBruce Richardson 	buf = &rx_adapter->event_enqueue_buffer;
1210995b150cSNaga Harish K S V 	stats = &rx_adapter->stats;
121199a2dd95SBruce Richardson 	ring_lock = &rx_adapter->intr_ring_lock;
121299a2dd95SBruce Richardson 
12137f33abd4SMattias Rönnblom 	if (buf->count >= BATCH_SIZE) {
12147f33abd4SMattias Rönnblom 		uint16_t n;
12157f33abd4SMattias Rönnblom 
12167f33abd4SMattias Rönnblom 		n = rxa_flush_event_buffer(rx_adapter, buf, stats);
12177f33abd4SMattias Rönnblom 
12187f33abd4SMattias Rönnblom 		if (likely(n > 0))
12197f33abd4SMattias Rönnblom 			work = true;
12207f33abd4SMattias Rönnblom 	}
122199a2dd95SBruce Richardson 
12228113fd15SGanapati Kundapura 	while (rxa_pkt_buf_available(buf)) {
122399a2dd95SBruce Richardson 		struct eth_device_info *dev_info;
122499a2dd95SBruce Richardson 		uint16_t port;
122599a2dd95SBruce Richardson 		uint16_t queue;
122699a2dd95SBruce Richardson 		union queue_data qd  = rx_adapter->qd;
122799a2dd95SBruce Richardson 		int err;
122899a2dd95SBruce Richardson 
122999a2dd95SBruce Richardson 		if (!rx_adapter->qd_valid) {
123099a2dd95SBruce Richardson 			struct eth_rx_queue_info *queue_info;
123199a2dd95SBruce Richardson 
123299a2dd95SBruce Richardson 			rte_spinlock_lock(ring_lock);
123399a2dd95SBruce Richardson 			err = rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
123499a2dd95SBruce Richardson 			if (err) {
123599a2dd95SBruce Richardson 				rte_spinlock_unlock(ring_lock);
123699a2dd95SBruce Richardson 				break;
123799a2dd95SBruce Richardson 			}
123899a2dd95SBruce Richardson 
123999a2dd95SBruce Richardson 			port = qd.port;
124099a2dd95SBruce Richardson 			queue = qd.queue;
124199a2dd95SBruce Richardson 			rx_adapter->qd = qd;
124299a2dd95SBruce Richardson 			rx_adapter->qd_valid = 1;
124399a2dd95SBruce Richardson 			dev_info = &rx_adapter->eth_devices[port];
124499a2dd95SBruce Richardson 			if (rxa_shared_intr(dev_info, queue))
124599a2dd95SBruce Richardson 				dev_info->shared_intr_enabled = 1;
124699a2dd95SBruce Richardson 			else {
124799a2dd95SBruce Richardson 				queue_info = &dev_info->rx_queue[queue];
124899a2dd95SBruce Richardson 				queue_info->intr_enabled = 1;
124999a2dd95SBruce Richardson 			}
125099a2dd95SBruce Richardson 			rte_eth_dev_rx_intr_enable(port, queue);
125199a2dd95SBruce Richardson 			rte_spinlock_unlock(ring_lock);
125299a2dd95SBruce Richardson 		} else {
125399a2dd95SBruce Richardson 			port = qd.port;
125499a2dd95SBruce Richardson 			queue = qd.queue;
125599a2dd95SBruce Richardson 
125699a2dd95SBruce Richardson 			dev_info = &rx_adapter->eth_devices[port];
125799a2dd95SBruce Richardson 		}
125899a2dd95SBruce Richardson 
125999a2dd95SBruce Richardson 		if (rxa_shared_intr(dev_info, queue)) {
126099a2dd95SBruce Richardson 			uint16_t i;
126199a2dd95SBruce Richardson 			uint16_t nb_queues;
126299a2dd95SBruce Richardson 
126399a2dd95SBruce Richardson 			nb_queues = dev_info->dev->data->nb_rx_queues;
126499a2dd95SBruce Richardson 			n = 0;
126599a2dd95SBruce Richardson 			for (i = dev_info->next_q_idx; i < nb_queues; i++) {
126699a2dd95SBruce Richardson 				uint8_t enq_buffer_full;
126799a2dd95SBruce Richardson 
126899a2dd95SBruce Richardson 				if (!rxa_intr_queue(dev_info, i))
126999a2dd95SBruce Richardson 					continue;
127099a2dd95SBruce Richardson 				n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
127199a2dd95SBruce Richardson 					rx_adapter->max_nb_rx,
1272995b150cSNaga Harish K S V 					&rxq_empty, buf, stats);
127399a2dd95SBruce Richardson 				nb_rx += n;
127499a2dd95SBruce Richardson 
127599a2dd95SBruce Richardson 				enq_buffer_full = !rxq_empty && n == 0;
127699a2dd95SBruce Richardson 				max_done = nb_rx > rx_adapter->max_nb_rx;
127799a2dd95SBruce Richardson 
127899a2dd95SBruce Richardson 				if (enq_buffer_full || max_done) {
127999a2dd95SBruce Richardson 					dev_info->next_q_idx = i;
128099a2dd95SBruce Richardson 					goto done;
128199a2dd95SBruce Richardson 				}
128299a2dd95SBruce Richardson 			}
128399a2dd95SBruce Richardson 
128499a2dd95SBruce Richardson 			rx_adapter->qd_valid = 0;
128599a2dd95SBruce Richardson 
128699a2dd95SBruce Richardson 			/* Reinitialize for next interrupt */
128799a2dd95SBruce Richardson 			dev_info->next_q_idx = dev_info->multi_intr_cap ?
128899a2dd95SBruce Richardson 						RTE_MAX_RXTX_INTR_VEC_ID - 1 :
128999a2dd95SBruce Richardson 						0;
129099a2dd95SBruce Richardson 		} else {
129199a2dd95SBruce Richardson 			n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
129299a2dd95SBruce Richardson 				rx_adapter->max_nb_rx,
1293995b150cSNaga Harish K S V 				&rxq_empty, buf, stats);
129499a2dd95SBruce Richardson 			rx_adapter->qd_valid = !rxq_empty;
129599a2dd95SBruce Richardson 			nb_rx += n;
129699a2dd95SBruce Richardson 			if (nb_rx > rx_adapter->max_nb_rx)
129799a2dd95SBruce Richardson 				break;
129899a2dd95SBruce Richardson 		}
129999a2dd95SBruce Richardson 	}
130099a2dd95SBruce Richardson 
130199a2dd95SBruce Richardson done:
13027f33abd4SMattias Rönnblom 	if (nb_rx > 0) {
130399a2dd95SBruce Richardson 		rx_adapter->stats.rx_intr_packets += nb_rx;
13047f33abd4SMattias Rönnblom 		work = true;
13057f33abd4SMattias Rönnblom 	}
13067f33abd4SMattias Rönnblom 
13077f33abd4SMattias Rönnblom 	return work;
130899a2dd95SBruce Richardson }
130999a2dd95SBruce Richardson 
131099a2dd95SBruce Richardson /*
131199a2dd95SBruce Richardson  * Polls receive queues added to the event adapter and enqueues received
131299a2dd95SBruce Richardson  * packets to the event device.
131399a2dd95SBruce Richardson  *
131499a2dd95SBruce Richardson  * The receive code enqueues initially to a temporary buffer, the
131599a2dd95SBruce Richardson  * temporary buffer is drained anytime it holds >= BATCH_SIZE packets
131699a2dd95SBruce Richardson  *
131799a2dd95SBruce Richardson  * If there isn't space available in the temporary buffer, packets from the
131899a2dd95SBruce Richardson  * Rx queue aren't dequeued from the eth device, this back pressures the
131999a2dd95SBruce Richardson  * eth device, in virtual device environments this back pressure is relayed to
132099a2dd95SBruce Richardson  * the hypervisor's switching layer where adjustments can be made to deal with
132199a2dd95SBruce Richardson  * it.
132299a2dd95SBruce Richardson  */
13237f33abd4SMattias Rönnblom static inline bool
1324a256a743SPavan Nikhilesh rxa_poll(struct event_eth_rx_adapter *rx_adapter)
132599a2dd95SBruce Richardson {
132699a2dd95SBruce Richardson 	uint32_t num_queue;
132799a2dd95SBruce Richardson 	uint32_t nb_rx = 0;
1328a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer *buf = NULL;
1329995b150cSNaga Harish K S V 	struct rte_event_eth_rx_adapter_stats *stats = NULL;
133099a2dd95SBruce Richardson 	uint32_t wrr_pos;
133199a2dd95SBruce Richardson 	uint32_t max_nb_rx;
13327f33abd4SMattias Rönnblom 	bool work = false;
133399a2dd95SBruce Richardson 
133499a2dd95SBruce Richardson 	wrr_pos = rx_adapter->wrr_pos;
133599a2dd95SBruce Richardson 	max_nb_rx = rx_adapter->max_nb_rx;
133699a2dd95SBruce Richardson 
133799a2dd95SBruce Richardson 	/* Iterate through a WRR sequence */
133899a2dd95SBruce Richardson 	for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
133999a2dd95SBruce Richardson 		unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
134099a2dd95SBruce Richardson 		uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
134199a2dd95SBruce Richardson 		uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
134299a2dd95SBruce Richardson 
1343995b150cSNaga Harish K S V 		buf = rxa_event_buf_get(rx_adapter, d, qid, &stats);
1344b06bca69SNaga Harish K S V 
134599a2dd95SBruce Richardson 		/* Don't do a batch dequeue from the rx queue if there isn't
134699a2dd95SBruce Richardson 		 * enough space in the enqueue buffer.
134799a2dd95SBruce Richardson 		 */
13487f33abd4SMattias Rönnblom 		if (buf->count >= BATCH_SIZE) {
13497f33abd4SMattias Rönnblom 			uint16_t n;
13507f33abd4SMattias Rönnblom 
13517f33abd4SMattias Rönnblom 			n = rxa_flush_event_buffer(rx_adapter, buf, stats);
13527f33abd4SMattias Rönnblom 
13537f33abd4SMattias Rönnblom 			if (likely(n > 0))
13547f33abd4SMattias Rönnblom 				work = true;
13557f33abd4SMattias Rönnblom 		}
13568113fd15SGanapati Kundapura 		if (!rxa_pkt_buf_available(buf)) {
1357b06bca69SNaga Harish K S V 			if (rx_adapter->use_queue_event_buf)
1358b06bca69SNaga Harish K S V 				goto poll_next_entry;
1359b06bca69SNaga Harish K S V 			else {
136099a2dd95SBruce Richardson 				rx_adapter->wrr_pos = wrr_pos;
13617f33abd4SMattias Rönnblom 				break;
136299a2dd95SBruce Richardson 			}
1363b06bca69SNaga Harish K S V 		}
136499a2dd95SBruce Richardson 
136599a2dd95SBruce Richardson 		nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
1366995b150cSNaga Harish K S V 				NULL, buf, stats);
136799a2dd95SBruce Richardson 		if (nb_rx > max_nb_rx) {
136899a2dd95SBruce Richardson 			rx_adapter->wrr_pos =
136999a2dd95SBruce Richardson 				    (wrr_pos + 1) % rx_adapter->wrr_len;
137099a2dd95SBruce Richardson 			break;
137199a2dd95SBruce Richardson 		}
137299a2dd95SBruce Richardson 
1373b06bca69SNaga Harish K S V poll_next_entry:
137499a2dd95SBruce Richardson 		if (++wrr_pos == rx_adapter->wrr_len)
137599a2dd95SBruce Richardson 			wrr_pos = 0;
137699a2dd95SBruce Richardson 	}
13777f33abd4SMattias Rönnblom 
13787f33abd4SMattias Rönnblom 	if (nb_rx > 0)
13797f33abd4SMattias Rönnblom 		work = true;
13807f33abd4SMattias Rönnblom 
13817f33abd4SMattias Rönnblom 	return work;
138299a2dd95SBruce Richardson }
138399a2dd95SBruce Richardson 
138499a2dd95SBruce Richardson static void
138599a2dd95SBruce Richardson rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
138699a2dd95SBruce Richardson {
1387a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter = arg;
1388a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer *buf = NULL;
1389995b150cSNaga Harish K S V 	struct rte_event_eth_rx_adapter_stats *stats = NULL;
139099a2dd95SBruce Richardson 	struct rte_event *ev;
139199a2dd95SBruce Richardson 
1392995b150cSNaga Harish K S V 	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats);
1393b06bca69SNaga Harish K S V 
139499a2dd95SBruce Richardson 	if (buf->count)
1395995b150cSNaga Harish K S V 		rxa_flush_event_buffer(rx_adapter, buf, stats);
139699a2dd95SBruce Richardson 
139799a2dd95SBruce Richardson 	if (vec->vector_ev->nb_elem == 0)
139899a2dd95SBruce Richardson 		return;
139999a2dd95SBruce Richardson 	ev = &buf->events[buf->count];
140099a2dd95SBruce Richardson 
140199a2dd95SBruce Richardson 	/* Event ready. */
140299a2dd95SBruce Richardson 	ev->event = vec->event;
140399a2dd95SBruce Richardson 	ev->vec = vec->vector_ev;
140499a2dd95SBruce Richardson 	buf->count++;
140599a2dd95SBruce Richardson 
140699a2dd95SBruce Richardson 	vec->vector_ev = NULL;
140799a2dd95SBruce Richardson 	vec->ts = 0;
140899a2dd95SBruce Richardson }
140999a2dd95SBruce Richardson 
141099a2dd95SBruce Richardson static int
141199a2dd95SBruce Richardson rxa_service_func(void *args)
141299a2dd95SBruce Richardson {
1413a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter = args;
14147f33abd4SMattias Rönnblom 	bool intr_work;
14157f33abd4SMattias Rönnblom 	bool poll_work;
141699a2dd95SBruce Richardson 
141799a2dd95SBruce Richardson 	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
14187f33abd4SMattias Rönnblom 		return -EAGAIN;
141999a2dd95SBruce Richardson 	if (!rx_adapter->rxa_started) {
142099a2dd95SBruce Richardson 		rte_spinlock_unlock(&rx_adapter->rx_lock);
14217f33abd4SMattias Rönnblom 		return -EAGAIN;
142299a2dd95SBruce Richardson 	}
142399a2dd95SBruce Richardson 
142499a2dd95SBruce Richardson 	if (rx_adapter->ena_vector) {
142599a2dd95SBruce Richardson 		if ((rte_rdtsc() - rx_adapter->prev_expiry_ts) >=
142699a2dd95SBruce Richardson 		    rx_adapter->vector_tmo_ticks) {
142799a2dd95SBruce Richardson 			struct eth_rx_vector_data *vec;
142899a2dd95SBruce Richardson 
142999a2dd95SBruce Richardson 			TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
143099a2dd95SBruce Richardson 				uint64_t elapsed_time = rte_rdtsc() - vec->ts;
143199a2dd95SBruce Richardson 
143299a2dd95SBruce Richardson 				if (elapsed_time >= vec->vector_timeout_ticks) {
143399a2dd95SBruce Richardson 					rxa_vector_expire(vec, rx_adapter);
143499a2dd95SBruce Richardson 					TAILQ_REMOVE(&rx_adapter->vector_list,
143599a2dd95SBruce Richardson 						     vec, next);
143699a2dd95SBruce Richardson 				}
143799a2dd95SBruce Richardson 			}
143899a2dd95SBruce Richardson 			rx_adapter->prev_expiry_ts = rte_rdtsc();
143999a2dd95SBruce Richardson 		}
144099a2dd95SBruce Richardson 	}
144199a2dd95SBruce Richardson 
14427f33abd4SMattias Rönnblom 	intr_work = rxa_intr_ring_dequeue(rx_adapter);
14437f33abd4SMattias Rönnblom 	poll_work = rxa_poll(rx_adapter);
1444995b150cSNaga Harish K S V 
144599a2dd95SBruce Richardson 	rte_spinlock_unlock(&rx_adapter->rx_lock);
1446995b150cSNaga Harish K S V 
14477f33abd4SMattias Rönnblom 	return intr_work || poll_work ? 0 : -EAGAIN;
144899a2dd95SBruce Richardson }
144999a2dd95SBruce Richardson 
1450a1793ee8SGanapati Kundapura static void *
1451a1793ee8SGanapati Kundapura rxa_memzone_array_get(const char *name, unsigned int elt_size, int nb_elems)
145299a2dd95SBruce Richardson {
145399a2dd95SBruce Richardson 	const struct rte_memzone *mz;
145499a2dd95SBruce Richardson 	unsigned int sz;
145599a2dd95SBruce Richardson 
1456a1793ee8SGanapati Kundapura 	sz = elt_size * nb_elems;
145799a2dd95SBruce Richardson 	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
145899a2dd95SBruce Richardson 
145999a2dd95SBruce Richardson 	mz = rte_memzone_lookup(name);
146099a2dd95SBruce Richardson 	if (mz == NULL) {
146199a2dd95SBruce Richardson 		mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
146299a2dd95SBruce Richardson 						 RTE_CACHE_LINE_SIZE);
146399a2dd95SBruce Richardson 		if (mz == NULL) {
1464a1793ee8SGanapati Kundapura 			RTE_EDEV_LOG_ERR("failed to reserve memzone"
1465a1793ee8SGanapati Kundapura 					 " name = %s, err = %"
1466a1793ee8SGanapati Kundapura 					 PRId32, name, rte_errno);
1467a1793ee8SGanapati Kundapura 			return NULL;
146899a2dd95SBruce Richardson 		}
146999a2dd95SBruce Richardson 	}
147099a2dd95SBruce Richardson 
1471a1793ee8SGanapati Kundapura 	return mz->addr;
1472a1793ee8SGanapati Kundapura }
1473a1793ee8SGanapati Kundapura 
1474a1793ee8SGanapati Kundapura static int
1475a1793ee8SGanapati Kundapura rte_event_eth_rx_adapter_init(void)
1476a1793ee8SGanapati Kundapura {
1477a1793ee8SGanapati Kundapura 	uint8_t i;
1478a1793ee8SGanapati Kundapura 
1479a1793ee8SGanapati Kundapura 	if (event_eth_rx_adapter == NULL) {
1480a1793ee8SGanapati Kundapura 		event_eth_rx_adapter =
1481a1793ee8SGanapati Kundapura 			rxa_memzone_array_get(RXA_ADAPTER_ARRAY,
1482a1793ee8SGanapati Kundapura 					sizeof(*event_eth_rx_adapter),
1483a1793ee8SGanapati Kundapura 					RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE);
1484a1793ee8SGanapati Kundapura 		if (event_eth_rx_adapter == NULL)
1485a1793ee8SGanapati Kundapura 			return -ENOMEM;
1486a1793ee8SGanapati Kundapura 
1487a1793ee8SGanapati Kundapura 		for (i = 0; i < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE; i++)
1488a1793ee8SGanapati Kundapura 			event_eth_rx_adapter[i] = NULL;
1489a1793ee8SGanapati Kundapura 
1490a1793ee8SGanapati Kundapura 	}
1491a1793ee8SGanapati Kundapura 
149299a2dd95SBruce Richardson 	return 0;
149399a2dd95SBruce Richardson }
149499a2dd95SBruce Richardson 
1495da781e64SGanapati Kundapura static int
1496da781e64SGanapati Kundapura rxa_memzone_lookup(void)
1497da781e64SGanapati Kundapura {
1498da781e64SGanapati Kundapura 	const struct rte_memzone *mz;
1499da781e64SGanapati Kundapura 
1500da781e64SGanapati Kundapura 	if (event_eth_rx_adapter == NULL) {
1501da781e64SGanapati Kundapura 		mz = rte_memzone_lookup(RXA_ADAPTER_ARRAY);
1502da781e64SGanapati Kundapura 		if (mz == NULL)
1503da781e64SGanapati Kundapura 			return -ENOMEM;
1504a1793ee8SGanapati Kundapura 
1505da781e64SGanapati Kundapura 		event_eth_rx_adapter = mz->addr;
1506da781e64SGanapati Kundapura 	}
1507da781e64SGanapati Kundapura 
1508da781e64SGanapati Kundapura 	return 0;
1509da781e64SGanapati Kundapura }
1510da781e64SGanapati Kundapura 
1511a256a743SPavan Nikhilesh static inline struct event_eth_rx_adapter *
151299a2dd95SBruce Richardson rxa_id_to_adapter(uint8_t id)
151399a2dd95SBruce Richardson {
151499a2dd95SBruce Richardson 	return event_eth_rx_adapter ?
151599a2dd95SBruce Richardson 		event_eth_rx_adapter[id] : NULL;
151699a2dd95SBruce Richardson }
151799a2dd95SBruce Richardson 
151899a2dd95SBruce Richardson static int
151999a2dd95SBruce Richardson rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
152099a2dd95SBruce Richardson 		struct rte_event_eth_rx_adapter_conf *conf, void *arg)
152199a2dd95SBruce Richardson {
152299a2dd95SBruce Richardson 	int ret;
152399a2dd95SBruce Richardson 	struct rte_eventdev *dev;
152499a2dd95SBruce Richardson 	struct rte_event_dev_config dev_conf;
152599a2dd95SBruce Richardson 	int started;
152699a2dd95SBruce Richardson 	uint8_t port_id;
152799a2dd95SBruce Richardson 	struct rte_event_port_conf *port_conf = arg;
1528a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
152999a2dd95SBruce Richardson 
153099a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
153199a2dd95SBruce Richardson 	dev_conf = dev->data->dev_conf;
153299a2dd95SBruce Richardson 
153399a2dd95SBruce Richardson 	started = dev->data->dev_started;
153499a2dd95SBruce Richardson 	if (started)
153599a2dd95SBruce Richardson 		rte_event_dev_stop(dev_id);
153699a2dd95SBruce Richardson 	port_id = dev_conf.nb_event_ports;
153799a2dd95SBruce Richardson 	dev_conf.nb_event_ports += 1;
153829bd868cSNaga Harish K S V 	if (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_SINGLE_LINK)
153929bd868cSNaga Harish K S V 		dev_conf.nb_single_link_event_port_queues += 1;
154029bd868cSNaga Harish K S V 
154199a2dd95SBruce Richardson 	ret = rte_event_dev_configure(dev_id, &dev_conf);
154299a2dd95SBruce Richardson 	if (ret) {
1543ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("failed to configure event dev %u",
154499a2dd95SBruce Richardson 						dev_id);
154599a2dd95SBruce Richardson 		if (started) {
154699a2dd95SBruce Richardson 			if (rte_event_dev_start(dev_id))
154799a2dd95SBruce Richardson 				return -EIO;
154899a2dd95SBruce Richardson 		}
154999a2dd95SBruce Richardson 		return ret;
155099a2dd95SBruce Richardson 	}
155199a2dd95SBruce Richardson 
155299a2dd95SBruce Richardson 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
155399a2dd95SBruce Richardson 	if (ret) {
1554ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("failed to setup event port %u",
155599a2dd95SBruce Richardson 					port_id);
155699a2dd95SBruce Richardson 		return ret;
155799a2dd95SBruce Richardson 	}
155899a2dd95SBruce Richardson 
155999a2dd95SBruce Richardson 	conf->event_port_id = port_id;
15603716f521SNaga Harish K S V 	conf->max_nb_rx = RXA_NB_RX_WORK_DEFAULT;
156199a2dd95SBruce Richardson 	if (started)
156299a2dd95SBruce Richardson 		ret = rte_event_dev_start(dev_id);
156399a2dd95SBruce Richardson 	rx_adapter->default_cb_arg = 1;
156499a2dd95SBruce Richardson 	return ret;
156599a2dd95SBruce Richardson }
156699a2dd95SBruce Richardson 
156799a2dd95SBruce Richardson static int
156899a2dd95SBruce Richardson rxa_epoll_create1(void)
156999a2dd95SBruce Richardson {
157053b2eaa2SBruce Richardson #if defined(__linux__)
157199a2dd95SBruce Richardson 	int fd;
157299a2dd95SBruce Richardson 	fd = epoll_create1(EPOLL_CLOEXEC);
157399a2dd95SBruce Richardson 	return fd < 0 ? -errno : fd;
157453b2eaa2SBruce Richardson #else
157599a2dd95SBruce Richardson 	return -ENOTSUP;
157699a2dd95SBruce Richardson #endif
157799a2dd95SBruce Richardson }
157899a2dd95SBruce Richardson 
157999a2dd95SBruce Richardson static int
1580a256a743SPavan Nikhilesh rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
158199a2dd95SBruce Richardson {
158299a2dd95SBruce Richardson 	if (rx_adapter->epd != INIT_FD)
158399a2dd95SBruce Richardson 		return 0;
158499a2dd95SBruce Richardson 
158599a2dd95SBruce Richardson 	rx_adapter->epd = rxa_epoll_create1();
158699a2dd95SBruce Richardson 	if (rx_adapter->epd < 0) {
158799a2dd95SBruce Richardson 		int err = rx_adapter->epd;
158899a2dd95SBruce Richardson 		rx_adapter->epd = INIT_FD;
158999a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("epoll_create1() failed, err %d", err);
159099a2dd95SBruce Richardson 		return err;
159199a2dd95SBruce Richardson 	}
159299a2dd95SBruce Richardson 
159399a2dd95SBruce Richardson 	return 0;
159499a2dd95SBruce Richardson }
159599a2dd95SBruce Richardson 
159699a2dd95SBruce Richardson static int
1597a256a743SPavan Nikhilesh rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
159899a2dd95SBruce Richardson {
159999a2dd95SBruce Richardson 	int err;
16001c1abf17SThomas Monjalon 	char thread_name[RTE_THREAD_INTERNAL_NAME_SIZE];
160199a2dd95SBruce Richardson 
160299a2dd95SBruce Richardson 	if (rx_adapter->intr_ring)
160399a2dd95SBruce Richardson 		return 0;
160499a2dd95SBruce Richardson 
160599a2dd95SBruce Richardson 	rx_adapter->intr_ring = rte_ring_create("intr_ring",
160699a2dd95SBruce Richardson 					RTE_EVENT_ETH_INTR_RING_SIZE,
160799a2dd95SBruce Richardson 					rte_socket_id(), 0);
160899a2dd95SBruce Richardson 	if (!rx_adapter->intr_ring)
160999a2dd95SBruce Richardson 		return -ENOMEM;
161099a2dd95SBruce Richardson 
161199a2dd95SBruce Richardson 	rx_adapter->epoll_events = rte_zmalloc_socket(rx_adapter->mem_name,
161299a2dd95SBruce Richardson 					RTE_EVENT_ETH_INTR_RING_SIZE *
161399a2dd95SBruce Richardson 					sizeof(struct rte_epoll_event),
161499a2dd95SBruce Richardson 					RTE_CACHE_LINE_SIZE,
161599a2dd95SBruce Richardson 					rx_adapter->socket_id);
161699a2dd95SBruce Richardson 	if (!rx_adapter->epoll_events) {
161799a2dd95SBruce Richardson 		err = -ENOMEM;
161899a2dd95SBruce Richardson 		goto error;
161999a2dd95SBruce Richardson 	}
162099a2dd95SBruce Richardson 
162199a2dd95SBruce Richardson 	rte_spinlock_init(&rx_adapter->intr_ring_lock);
162299a2dd95SBruce Richardson 
16231c1abf17SThomas Monjalon 	snprintf(thread_name, sizeof(thread_name),
16241c1abf17SThomas Monjalon 			"evt-rx%d", rx_adapter->id);
162599a2dd95SBruce Richardson 
16261c1abf17SThomas Monjalon 	err = rte_thread_create_internal_control(&rx_adapter->rx_intr_thread,
16271c1abf17SThomas Monjalon 			thread_name, rxa_intr_thread, rx_adapter);
16280bac9fc7SChengwen Feng 	if (!err)
162999a2dd95SBruce Richardson 		return 0;
163099a2dd95SBruce Richardson 
1631ae282b06SDavid Marchand 	RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d", err);
1632f6681ab7SChengwen Feng 	rte_free(rx_adapter->epoll_events);
163399a2dd95SBruce Richardson error:
163499a2dd95SBruce Richardson 	rte_ring_free(rx_adapter->intr_ring);
163599a2dd95SBruce Richardson 	rx_adapter->intr_ring = NULL;
163699a2dd95SBruce Richardson 	rx_adapter->epoll_events = NULL;
163799a2dd95SBruce Richardson 	return err;
163899a2dd95SBruce Richardson }
163999a2dd95SBruce Richardson 
164099a2dd95SBruce Richardson static int
1641a256a743SPavan Nikhilesh rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
164299a2dd95SBruce Richardson {
164399a2dd95SBruce Richardson 	int err;
164499a2dd95SBruce Richardson 
16451c1abf17SThomas Monjalon 	err = pthread_cancel((pthread_t)rx_adapter->rx_intr_thread.opaque_id);
164699a2dd95SBruce Richardson 	if (err)
1647ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d",
164899a2dd95SBruce Richardson 				err);
164999a2dd95SBruce Richardson 
16501c1abf17SThomas Monjalon 	err = rte_thread_join(rx_adapter->rx_intr_thread, NULL);
165199a2dd95SBruce Richardson 	if (err)
1652ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d", err);
165399a2dd95SBruce Richardson 
165499a2dd95SBruce Richardson 	rte_free(rx_adapter->epoll_events);
165599a2dd95SBruce Richardson 	rte_ring_free(rx_adapter->intr_ring);
165699a2dd95SBruce Richardson 	rx_adapter->intr_ring = NULL;
165799a2dd95SBruce Richardson 	rx_adapter->epoll_events = NULL;
165899a2dd95SBruce Richardson 	return 0;
165999a2dd95SBruce Richardson }
166099a2dd95SBruce Richardson 
166199a2dd95SBruce Richardson static int
1662a256a743SPavan Nikhilesh rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
166399a2dd95SBruce Richardson {
166499a2dd95SBruce Richardson 	int ret;
166599a2dd95SBruce Richardson 
166699a2dd95SBruce Richardson 	if (rx_adapter->num_rx_intr == 0)
166799a2dd95SBruce Richardson 		return 0;
166899a2dd95SBruce Richardson 
166999a2dd95SBruce Richardson 	ret = rxa_destroy_intr_thread(rx_adapter);
167099a2dd95SBruce Richardson 	if (ret)
167199a2dd95SBruce Richardson 		return ret;
167299a2dd95SBruce Richardson 
167399a2dd95SBruce Richardson 	close(rx_adapter->epd);
167499a2dd95SBruce Richardson 	rx_adapter->epd = INIT_FD;
167599a2dd95SBruce Richardson 
167699a2dd95SBruce Richardson 	return ret;
167799a2dd95SBruce Richardson }
167899a2dd95SBruce Richardson 
167999a2dd95SBruce Richardson static int
1680a256a743SPavan Nikhilesh rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
1681a256a743SPavan Nikhilesh 		 struct eth_device_info *dev_info, uint16_t rx_queue_id)
168299a2dd95SBruce Richardson {
168399a2dd95SBruce Richardson 	int err;
168499a2dd95SBruce Richardson 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
168599a2dd95SBruce Richardson 	int sintr = rxa_shared_intr(dev_info, rx_queue_id);
168699a2dd95SBruce Richardson 
168799a2dd95SBruce Richardson 	err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
168899a2dd95SBruce Richardson 	if (err) {
168999a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Could not disable interrupt for Rx queue %u",
169099a2dd95SBruce Richardson 			rx_queue_id);
169199a2dd95SBruce Richardson 		return err;
169299a2dd95SBruce Richardson 	}
169399a2dd95SBruce Richardson 
169499a2dd95SBruce Richardson 	err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
169599a2dd95SBruce Richardson 					rx_adapter->epd,
169699a2dd95SBruce Richardson 					RTE_INTR_EVENT_DEL,
169799a2dd95SBruce Richardson 					0);
169899a2dd95SBruce Richardson 	if (err)
169999a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Interrupt event deletion failed %d", err);
170099a2dd95SBruce Richardson 
170199a2dd95SBruce Richardson 	if (sintr)
170299a2dd95SBruce Richardson 		dev_info->rx_queue[rx_queue_id].intr_enabled = 0;
170399a2dd95SBruce Richardson 	else
170499a2dd95SBruce Richardson 		dev_info->shared_intr_enabled = 0;
170599a2dd95SBruce Richardson 	return err;
170699a2dd95SBruce Richardson }
170799a2dd95SBruce Richardson 
170899a2dd95SBruce Richardson static int
1709a256a743SPavan Nikhilesh rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
1710a256a743SPavan Nikhilesh 		   struct eth_device_info *dev_info, int rx_queue_id)
171199a2dd95SBruce Richardson {
171299a2dd95SBruce Richardson 	int err;
171399a2dd95SBruce Richardson 	int i;
171499a2dd95SBruce Richardson 	int s;
171599a2dd95SBruce Richardson 
171699a2dd95SBruce Richardson 	if (dev_info->nb_rx_intr == 0)
171799a2dd95SBruce Richardson 		return 0;
171899a2dd95SBruce Richardson 
171999a2dd95SBruce Richardson 	err = 0;
172099a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
172199a2dd95SBruce Richardson 		s = dev_info->nb_shared_intr;
172299a2dd95SBruce Richardson 		for (i = 0; i < dev_info->nb_rx_intr; i++) {
172399a2dd95SBruce Richardson 			int sintr;
172499a2dd95SBruce Richardson 			uint16_t q;
172599a2dd95SBruce Richardson 
172699a2dd95SBruce Richardson 			q = dev_info->intr_queue[i];
172799a2dd95SBruce Richardson 			sintr = rxa_shared_intr(dev_info, q);
172899a2dd95SBruce Richardson 			s -= sintr;
172999a2dd95SBruce Richardson 
173099a2dd95SBruce Richardson 			if (!sintr || s == 0) {
173199a2dd95SBruce Richardson 
173299a2dd95SBruce Richardson 				err = rxa_disable_intr(rx_adapter, dev_info,
173399a2dd95SBruce Richardson 						q);
173499a2dd95SBruce Richardson 				if (err)
173599a2dd95SBruce Richardson 					return err;
173699a2dd95SBruce Richardson 				rxa_intr_ring_del_entries(rx_adapter, dev_info,
173799a2dd95SBruce Richardson 							q);
173899a2dd95SBruce Richardson 			}
173999a2dd95SBruce Richardson 		}
174099a2dd95SBruce Richardson 	} else {
174199a2dd95SBruce Richardson 		if (!rxa_intr_queue(dev_info, rx_queue_id))
174299a2dd95SBruce Richardson 			return 0;
174399a2dd95SBruce Richardson 		if (!rxa_shared_intr(dev_info, rx_queue_id) ||
174499a2dd95SBruce Richardson 				dev_info->nb_shared_intr == 1) {
174599a2dd95SBruce Richardson 			err = rxa_disable_intr(rx_adapter, dev_info,
174699a2dd95SBruce Richardson 					rx_queue_id);
174799a2dd95SBruce Richardson 			if (err)
174899a2dd95SBruce Richardson 				return err;
174999a2dd95SBruce Richardson 			rxa_intr_ring_del_entries(rx_adapter, dev_info,
175099a2dd95SBruce Richardson 						rx_queue_id);
175199a2dd95SBruce Richardson 		}
175299a2dd95SBruce Richardson 
175399a2dd95SBruce Richardson 		for (i = 0; i < dev_info->nb_rx_intr; i++) {
175499a2dd95SBruce Richardson 			if (dev_info->intr_queue[i] == rx_queue_id) {
175599a2dd95SBruce Richardson 				for (; i < dev_info->nb_rx_intr - 1; i++)
175699a2dd95SBruce Richardson 					dev_info->intr_queue[i] =
175799a2dd95SBruce Richardson 						dev_info->intr_queue[i + 1];
175899a2dd95SBruce Richardson 				break;
175999a2dd95SBruce Richardson 			}
176099a2dd95SBruce Richardson 		}
176199a2dd95SBruce Richardson 	}
176299a2dd95SBruce Richardson 
176399a2dd95SBruce Richardson 	return err;
176499a2dd95SBruce Richardson }
176599a2dd95SBruce Richardson 
176699a2dd95SBruce Richardson static int
1767a256a743SPavan Nikhilesh rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
1768a256a743SPavan Nikhilesh 		struct eth_device_info *dev_info, uint16_t rx_queue_id)
176999a2dd95SBruce Richardson {
177099a2dd95SBruce Richardson 	int err, err1;
177199a2dd95SBruce Richardson 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
177299a2dd95SBruce Richardson 	union queue_data qd;
177399a2dd95SBruce Richardson 	int init_fd;
177499a2dd95SBruce Richardson 	uint16_t *intr_queue;
177599a2dd95SBruce Richardson 	int sintr = rxa_shared_intr(dev_info, rx_queue_id);
177699a2dd95SBruce Richardson 
177799a2dd95SBruce Richardson 	if (rxa_intr_queue(dev_info, rx_queue_id))
177899a2dd95SBruce Richardson 		return 0;
177999a2dd95SBruce Richardson 
178099a2dd95SBruce Richardson 	intr_queue = dev_info->intr_queue;
178199a2dd95SBruce Richardson 	if (dev_info->intr_queue == NULL) {
178299a2dd95SBruce Richardson 		size_t len =
178399a2dd95SBruce Richardson 			dev_info->dev->data->nb_rx_queues * sizeof(uint16_t);
178499a2dd95SBruce Richardson 		dev_info->intr_queue =
178599a2dd95SBruce Richardson 			rte_zmalloc_socket(
178699a2dd95SBruce Richardson 				rx_adapter->mem_name,
178799a2dd95SBruce Richardson 				len,
178899a2dd95SBruce Richardson 				0,
178999a2dd95SBruce Richardson 				rx_adapter->socket_id);
179099a2dd95SBruce Richardson 		if (dev_info->intr_queue == NULL)
179199a2dd95SBruce Richardson 			return -ENOMEM;
179299a2dd95SBruce Richardson 	}
179399a2dd95SBruce Richardson 
179499a2dd95SBruce Richardson 	init_fd = rx_adapter->epd;
179599a2dd95SBruce Richardson 	err = rxa_init_epd(rx_adapter);
179699a2dd95SBruce Richardson 	if (err)
179799a2dd95SBruce Richardson 		goto err_free_queue;
179899a2dd95SBruce Richardson 
179999a2dd95SBruce Richardson 	qd.port = eth_dev_id;
180099a2dd95SBruce Richardson 	qd.queue = rx_queue_id;
180199a2dd95SBruce Richardson 
180299a2dd95SBruce Richardson 	err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
180399a2dd95SBruce Richardson 					rx_adapter->epd,
180499a2dd95SBruce Richardson 					RTE_INTR_EVENT_ADD,
180599a2dd95SBruce Richardson 					qd.ptr);
180699a2dd95SBruce Richardson 	if (err) {
180799a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to add interrupt event for"
180899a2dd95SBruce Richardson 			" Rx Queue %u err %d", rx_queue_id, err);
180999a2dd95SBruce Richardson 		goto err_del_fd;
181099a2dd95SBruce Richardson 	}
181199a2dd95SBruce Richardson 
181299a2dd95SBruce Richardson 	err = rte_eth_dev_rx_intr_enable(eth_dev_id, rx_queue_id);
181399a2dd95SBruce Richardson 	if (err) {
181499a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Could not enable interrupt for"
181599a2dd95SBruce Richardson 				" Rx Queue %u err %d", rx_queue_id, err);
181699a2dd95SBruce Richardson 
181799a2dd95SBruce Richardson 		goto err_del_event;
181899a2dd95SBruce Richardson 	}
181999a2dd95SBruce Richardson 
182099a2dd95SBruce Richardson 	err = rxa_create_intr_thread(rx_adapter);
182199a2dd95SBruce Richardson 	if (!err)  {
182299a2dd95SBruce Richardson 		if (sintr)
182399a2dd95SBruce Richardson 			dev_info->shared_intr_enabled = 1;
182499a2dd95SBruce Richardson 		else
182599a2dd95SBruce Richardson 			dev_info->rx_queue[rx_queue_id].intr_enabled = 1;
182699a2dd95SBruce Richardson 		return 0;
182799a2dd95SBruce Richardson 	}
182899a2dd95SBruce Richardson 
182999a2dd95SBruce Richardson 
183099a2dd95SBruce Richardson 	err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
183199a2dd95SBruce Richardson 	if (err)
183299a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Could not disable interrupt for"
183399a2dd95SBruce Richardson 				" Rx Queue %u err %d", rx_queue_id, err);
183499a2dd95SBruce Richardson err_del_event:
183599a2dd95SBruce Richardson 	err1 = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
183699a2dd95SBruce Richardson 					rx_adapter->epd,
183799a2dd95SBruce Richardson 					RTE_INTR_EVENT_DEL,
183899a2dd95SBruce Richardson 					0);
183999a2dd95SBruce Richardson 	if (err1) {
184099a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Could not delete event for"
184199a2dd95SBruce Richardson 				" Rx Queue %u err %d", rx_queue_id, err1);
184299a2dd95SBruce Richardson 	}
184399a2dd95SBruce Richardson err_del_fd:
184499a2dd95SBruce Richardson 	if (init_fd == INIT_FD) {
184599a2dd95SBruce Richardson 		close(rx_adapter->epd);
184699a2dd95SBruce Richardson 		rx_adapter->epd = -1;
184799a2dd95SBruce Richardson 	}
184899a2dd95SBruce Richardson err_free_queue:
184999a2dd95SBruce Richardson 	if (intr_queue == NULL)
185099a2dd95SBruce Richardson 		rte_free(dev_info->intr_queue);
185199a2dd95SBruce Richardson 
185299a2dd95SBruce Richardson 	return err;
185399a2dd95SBruce Richardson }
185499a2dd95SBruce Richardson 
185599a2dd95SBruce Richardson static int
1856a256a743SPavan Nikhilesh rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
1857a256a743SPavan Nikhilesh 		   struct eth_device_info *dev_info, int rx_queue_id)
185899a2dd95SBruce Richardson 
185999a2dd95SBruce Richardson {
186099a2dd95SBruce Richardson 	int i, j, err;
186199a2dd95SBruce Richardson 	int si = -1;
186299a2dd95SBruce Richardson 	int shared_done = (dev_info->nb_shared_intr > 0);
186399a2dd95SBruce Richardson 
186499a2dd95SBruce Richardson 	if (rx_queue_id != -1) {
186599a2dd95SBruce Richardson 		if (rxa_shared_intr(dev_info, rx_queue_id) && shared_done)
186699a2dd95SBruce Richardson 			return 0;
186799a2dd95SBruce Richardson 		return rxa_config_intr(rx_adapter, dev_info, rx_queue_id);
186899a2dd95SBruce Richardson 	}
186999a2dd95SBruce Richardson 
187099a2dd95SBruce Richardson 	err = 0;
187199a2dd95SBruce Richardson 	for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) {
187299a2dd95SBruce Richardson 
187399a2dd95SBruce Richardson 		if (rxa_shared_intr(dev_info, i) && shared_done)
187499a2dd95SBruce Richardson 			continue;
187599a2dd95SBruce Richardson 
187699a2dd95SBruce Richardson 		err = rxa_config_intr(rx_adapter, dev_info, i);
187799a2dd95SBruce Richardson 
187899a2dd95SBruce Richardson 		shared_done = err == 0 && rxa_shared_intr(dev_info, i);
187999a2dd95SBruce Richardson 		if (shared_done) {
188099a2dd95SBruce Richardson 			si = i;
188199a2dd95SBruce Richardson 			dev_info->shared_intr_enabled = 1;
188299a2dd95SBruce Richardson 		}
188399a2dd95SBruce Richardson 		if (err)
188499a2dd95SBruce Richardson 			break;
188599a2dd95SBruce Richardson 	}
188699a2dd95SBruce Richardson 
188799a2dd95SBruce Richardson 	if (err == 0)
188899a2dd95SBruce Richardson 		return 0;
188999a2dd95SBruce Richardson 
189099a2dd95SBruce Richardson 	shared_done = (dev_info->nb_shared_intr > 0);
189199a2dd95SBruce Richardson 	for (j = 0; j < i; j++) {
189299a2dd95SBruce Richardson 		if (rxa_intr_queue(dev_info, j))
189399a2dd95SBruce Richardson 			continue;
189499a2dd95SBruce Richardson 		if (rxa_shared_intr(dev_info, j) && si != j)
189599a2dd95SBruce Richardson 			continue;
189699a2dd95SBruce Richardson 		err = rxa_disable_intr(rx_adapter, dev_info, j);
189799a2dd95SBruce Richardson 		if (err)
189899a2dd95SBruce Richardson 			break;
189999a2dd95SBruce Richardson 
190099a2dd95SBruce Richardson 	}
190199a2dd95SBruce Richardson 
190299a2dd95SBruce Richardson 	return err;
190399a2dd95SBruce Richardson }
190499a2dd95SBruce Richardson 
190599a2dd95SBruce Richardson static int
1906a256a743SPavan Nikhilesh rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
190799a2dd95SBruce Richardson {
190899a2dd95SBruce Richardson 	int ret;
190999a2dd95SBruce Richardson 	struct rte_service_spec service;
191099a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_conf rx_adapter_conf;
191199a2dd95SBruce Richardson 
191299a2dd95SBruce Richardson 	if (rx_adapter->service_inited)
191399a2dd95SBruce Richardson 		return 0;
191499a2dd95SBruce Richardson 
19155083736aSRahul Bhansali 	if (rte_mbuf_dyn_rx_timestamp_register(
19165083736aSRahul Bhansali 			&event_eth_rx_timestamp_dynfield_offset,
19175083736aSRahul Bhansali 			&event_eth_rx_timestamp_dynflag) != 0) {
1918ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf");
19195083736aSRahul Bhansali 		return -rte_errno;
19205083736aSRahul Bhansali 	}
19215083736aSRahul Bhansali 
192299a2dd95SBruce Richardson 	memset(&service, 0, sizeof(service));
192399a2dd95SBruce Richardson 	snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,
192499a2dd95SBruce Richardson 		"rte_event_eth_rx_adapter_%d", id);
192599a2dd95SBruce Richardson 	service.socket_id = rx_adapter->socket_id;
192699a2dd95SBruce Richardson 	service.callback = rxa_service_func;
192799a2dd95SBruce Richardson 	service.callback_userdata = rx_adapter;
192899a2dd95SBruce Richardson 	/* Service function handles locking for queue add/del updates */
192999a2dd95SBruce Richardson 	service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
193099a2dd95SBruce Richardson 	ret = rte_service_component_register(&service, &rx_adapter->service_id);
193199a2dd95SBruce Richardson 	if (ret) {
193299a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
193399a2dd95SBruce Richardson 			service.name, ret);
193499a2dd95SBruce Richardson 		return ret;
193599a2dd95SBruce Richardson 	}
193699a2dd95SBruce Richardson 
193799a2dd95SBruce Richardson 	ret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id,
193899a2dd95SBruce Richardson 		&rx_adapter_conf, rx_adapter->conf_arg);
193999a2dd95SBruce Richardson 	if (ret) {
194099a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
194199a2dd95SBruce Richardson 			ret);
194299a2dd95SBruce Richardson 		goto err_done;
194399a2dd95SBruce Richardson 	}
194499a2dd95SBruce Richardson 	rx_adapter->event_port_id = rx_adapter_conf.event_port_id;
194599a2dd95SBruce Richardson 	rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;
194699a2dd95SBruce Richardson 	rx_adapter->service_inited = 1;
194799a2dd95SBruce Richardson 	rx_adapter->epd = INIT_FD;
194899a2dd95SBruce Richardson 	return 0;
194999a2dd95SBruce Richardson 
195099a2dd95SBruce Richardson err_done:
195199a2dd95SBruce Richardson 	rte_service_component_unregister(rx_adapter->service_id);
195299a2dd95SBruce Richardson 	return ret;
195399a2dd95SBruce Richardson }
195499a2dd95SBruce Richardson 
195599a2dd95SBruce Richardson static void
1956a256a743SPavan Nikhilesh rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
1957a256a743SPavan Nikhilesh 		 struct eth_device_info *dev_info, int32_t rx_queue_id,
195899a2dd95SBruce Richardson 		 uint8_t add)
195999a2dd95SBruce Richardson {
196099a2dd95SBruce Richardson 	struct eth_rx_queue_info *queue_info;
196199a2dd95SBruce Richardson 	int enabled;
196299a2dd95SBruce Richardson 	uint16_t i;
196399a2dd95SBruce Richardson 
196499a2dd95SBruce Richardson 	if (dev_info->rx_queue == NULL)
196599a2dd95SBruce Richardson 		return;
196699a2dd95SBruce Richardson 
196799a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
196899a2dd95SBruce Richardson 		for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
196999a2dd95SBruce Richardson 			rxa_update_queue(rx_adapter, dev_info, i, add);
197099a2dd95SBruce Richardson 	} else {
197199a2dd95SBruce Richardson 		queue_info = &dev_info->rx_queue[rx_queue_id];
197299a2dd95SBruce Richardson 		enabled = queue_info->queue_enabled;
197399a2dd95SBruce Richardson 		if (add) {
197499a2dd95SBruce Richardson 			rx_adapter->nb_queues += !enabled;
197599a2dd95SBruce Richardson 			dev_info->nb_dev_queues += !enabled;
197699a2dd95SBruce Richardson 		} else {
197799a2dd95SBruce Richardson 			rx_adapter->nb_queues -= enabled;
197899a2dd95SBruce Richardson 			dev_info->nb_dev_queues -= enabled;
197999a2dd95SBruce Richardson 		}
198099a2dd95SBruce Richardson 		queue_info->queue_enabled = !!add;
198199a2dd95SBruce Richardson 	}
198299a2dd95SBruce Richardson }
198399a2dd95SBruce Richardson 
198499a2dd95SBruce Richardson static void
198599a2dd95SBruce Richardson rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
198699a2dd95SBruce Richardson 		    uint64_t vector_ns, struct rte_mempool *mp, uint32_t qid,
198799a2dd95SBruce Richardson 		    uint16_t port_id)
198899a2dd95SBruce Richardson {
198999a2dd95SBruce Richardson #define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9)
199099a2dd95SBruce Richardson 	struct eth_rx_vector_data *vector_data;
199199a2dd95SBruce Richardson 	uint32_t flow_id;
199299a2dd95SBruce Richardson 
199399a2dd95SBruce Richardson 	vector_data = &queue_info->vector_data;
199499a2dd95SBruce Richardson 	vector_data->max_vector_count = vector_count;
199599a2dd95SBruce Richardson 	vector_data->port = port_id;
199699a2dd95SBruce Richardson 	vector_data->queue = qid;
199799a2dd95SBruce Richardson 	vector_data->vector_pool = mp;
199899a2dd95SBruce Richardson 	vector_data->vector_timeout_ticks =
199999a2dd95SBruce Richardson 		NSEC2TICK(vector_ns, rte_get_timer_hz());
200099a2dd95SBruce Richardson 	vector_data->ts = 0;
200199a2dd95SBruce Richardson 	flow_id = queue_info->event & 0xFFFFF;
200299a2dd95SBruce Richardson 	flow_id =
200399a2dd95SBruce Richardson 		flow_id == 0 ? (qid & 0xFFF) | (port_id & 0xFF) << 12 : flow_id;
200499a2dd95SBruce Richardson 	vector_data->event = (queue_info->event & ~0xFFFFF) | flow_id;
200599a2dd95SBruce Richardson }
200699a2dd95SBruce Richardson 
200799a2dd95SBruce Richardson static void
2008a256a743SPavan Nikhilesh rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
2009a256a743SPavan Nikhilesh 	   struct eth_device_info *dev_info, int32_t rx_queue_id)
201099a2dd95SBruce Richardson {
201199a2dd95SBruce Richardson 	struct eth_rx_vector_data *vec;
201299a2dd95SBruce Richardson 	int pollq;
201399a2dd95SBruce Richardson 	int intrq;
201499a2dd95SBruce Richardson 	int sintrq;
201599a2dd95SBruce Richardson 
201699a2dd95SBruce Richardson 	if (rx_adapter->nb_queues == 0)
201799a2dd95SBruce Richardson 		return;
201899a2dd95SBruce Richardson 
201999a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
202099a2dd95SBruce Richardson 		uint16_t nb_rx_queues;
202199a2dd95SBruce Richardson 		uint16_t i;
202299a2dd95SBruce Richardson 
202399a2dd95SBruce Richardson 		nb_rx_queues = dev_info->dev->data->nb_rx_queues;
202499a2dd95SBruce Richardson 		for (i = 0; i <	nb_rx_queues; i++)
202599a2dd95SBruce Richardson 			rxa_sw_del(rx_adapter, dev_info, i);
202699a2dd95SBruce Richardson 		return;
202799a2dd95SBruce Richardson 	}
202899a2dd95SBruce Richardson 
202999a2dd95SBruce Richardson 	/* Push all the partial event vectors to event device. */
203099a2dd95SBruce Richardson 	TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
203199a2dd95SBruce Richardson 		if (vec->queue != rx_queue_id)
203299a2dd95SBruce Richardson 			continue;
203399a2dd95SBruce Richardson 		rxa_vector_expire(vec, rx_adapter);
203499a2dd95SBruce Richardson 		TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
203599a2dd95SBruce Richardson 	}
203699a2dd95SBruce Richardson 
203799a2dd95SBruce Richardson 	pollq = rxa_polled_queue(dev_info, rx_queue_id);
203899a2dd95SBruce Richardson 	intrq = rxa_intr_queue(dev_info, rx_queue_id);
203999a2dd95SBruce Richardson 	sintrq = rxa_shared_intr(dev_info, rx_queue_id);
204099a2dd95SBruce Richardson 	rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 0);
204199a2dd95SBruce Richardson 	rx_adapter->num_rx_polled -= pollq;
204299a2dd95SBruce Richardson 	dev_info->nb_rx_poll -= pollq;
204399a2dd95SBruce Richardson 	rx_adapter->num_rx_intr -= intrq;
204499a2dd95SBruce Richardson 	dev_info->nb_rx_intr -= intrq;
204599a2dd95SBruce Richardson 	dev_info->nb_shared_intr -= intrq && sintrq;
2046b06bca69SNaga Harish K S V 	if (rx_adapter->use_queue_event_buf) {
2047a256a743SPavan Nikhilesh 		struct eth_event_enqueue_buffer *event_buf =
2048b06bca69SNaga Harish K S V 			dev_info->rx_queue[rx_queue_id].event_buf;
2049995b150cSNaga Harish K S V 		struct rte_event_eth_rx_adapter_stats *stats =
2050995b150cSNaga Harish K S V 			dev_info->rx_queue[rx_queue_id].stats;
2051b06bca69SNaga Harish K S V 		rte_free(event_buf->events);
2052b06bca69SNaga Harish K S V 		rte_free(event_buf);
2053995b150cSNaga Harish K S V 		rte_free(stats);
2054b06bca69SNaga Harish K S V 		dev_info->rx_queue[rx_queue_id].event_buf = NULL;
2055995b150cSNaga Harish K S V 		dev_info->rx_queue[rx_queue_id].stats = NULL;
2056b06bca69SNaga Harish K S V 	}
205799a2dd95SBruce Richardson }
205899a2dd95SBruce Richardson 
2059b06bca69SNaga Harish K S V static int
2060a256a743SPavan Nikhilesh rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
2061a256a743SPavan Nikhilesh 	      struct eth_device_info *dev_info, int32_t rx_queue_id,
206299a2dd95SBruce Richardson 	      const struct rte_event_eth_rx_adapter_queue_conf *conf)
206399a2dd95SBruce Richardson {
206499a2dd95SBruce Richardson 	struct eth_rx_queue_info *queue_info;
206599a2dd95SBruce Richardson 	const struct rte_event *ev = &conf->ev;
206699a2dd95SBruce Richardson 	int pollq;
206799a2dd95SBruce Richardson 	int intrq;
206899a2dd95SBruce Richardson 	int sintrq;
206999a2dd95SBruce Richardson 	struct rte_event *qi_ev;
2070a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer *new_rx_buf = NULL;
2071995b150cSNaga Harish K S V 	struct rte_event_eth_rx_adapter_stats *stats = NULL;
2072b06bca69SNaga Harish K S V 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
2073b06bca69SNaga Harish K S V 	int ret;
207499a2dd95SBruce Richardson 
207599a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
207699a2dd95SBruce Richardson 		uint16_t nb_rx_queues;
207799a2dd95SBruce Richardson 		uint16_t i;
207899a2dd95SBruce Richardson 
207999a2dd95SBruce Richardson 		nb_rx_queues = dev_info->dev->data->nb_rx_queues;
2080b06bca69SNaga Harish K S V 		for (i = 0; i <	nb_rx_queues; i++) {
2081b06bca69SNaga Harish K S V 			ret = rxa_add_queue(rx_adapter, dev_info, i, conf);
2082b06bca69SNaga Harish K S V 			if (ret)
2083b06bca69SNaga Harish K S V 				return ret;
2084b06bca69SNaga Harish K S V 		}
2085b06bca69SNaga Harish K S V 		return 0;
208699a2dd95SBruce Richardson 	}
208799a2dd95SBruce Richardson 
208899a2dd95SBruce Richardson 	pollq = rxa_polled_queue(dev_info, rx_queue_id);
208999a2dd95SBruce Richardson 	intrq = rxa_intr_queue(dev_info, rx_queue_id);
209099a2dd95SBruce Richardson 	sintrq = rxa_shared_intr(dev_info, rx_queue_id);
209199a2dd95SBruce Richardson 
209299a2dd95SBruce Richardson 	queue_info = &dev_info->rx_queue[rx_queue_id];
209399a2dd95SBruce Richardson 	queue_info->wt = conf->servicing_weight;
209499a2dd95SBruce Richardson 
209599a2dd95SBruce Richardson 	qi_ev = (struct rte_event *)&queue_info->event;
209699a2dd95SBruce Richardson 	qi_ev->event = ev->event;
209799a2dd95SBruce Richardson 	qi_ev->op = RTE_EVENT_OP_NEW;
209899a2dd95SBruce Richardson 	qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;
209999a2dd95SBruce Richardson 
210099a2dd95SBruce Richardson 	if (conf->rx_queue_flags &
210199a2dd95SBruce Richardson 			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {
210299a2dd95SBruce Richardson 		queue_info->flow_id_mask = ~0;
210399a2dd95SBruce Richardson 	} else
210499a2dd95SBruce Richardson 		qi_ev->flow_id = 0;
210599a2dd95SBruce Richardson 
2106929ebdd5SPavan Nikhilesh 	if (conf->rx_queue_flags &
2107929ebdd5SPavan Nikhilesh 	    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
2108929ebdd5SPavan Nikhilesh 		queue_info->ena_vector = 1;
2109929ebdd5SPavan Nikhilesh 		qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
2110929ebdd5SPavan Nikhilesh 		rxa_set_vector_data(queue_info, conf->vector_sz,
2111929ebdd5SPavan Nikhilesh 				    conf->vector_timeout_ns, conf->vector_mp,
2112929ebdd5SPavan Nikhilesh 				    rx_queue_id, dev_info->dev->data->port_id);
2113929ebdd5SPavan Nikhilesh 		rx_adapter->ena_vector = 1;
2114929ebdd5SPavan Nikhilesh 		rx_adapter->vector_tmo_ticks =
2115929ebdd5SPavan Nikhilesh 			rx_adapter->vector_tmo_ticks ?
2116929ebdd5SPavan Nikhilesh 				      RTE_MIN(queue_info->vector_data
2117929ebdd5SPavan Nikhilesh 							.vector_timeout_ticks >>
2118929ebdd5SPavan Nikhilesh 						1,
2119929ebdd5SPavan Nikhilesh 					rx_adapter->vector_tmo_ticks) :
2120929ebdd5SPavan Nikhilesh 				queue_info->vector_data.vector_timeout_ticks >>
2121929ebdd5SPavan Nikhilesh 					1;
2122929ebdd5SPavan Nikhilesh 	}
2123929ebdd5SPavan Nikhilesh 
212499a2dd95SBruce Richardson 	rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
212599a2dd95SBruce Richardson 	if (rxa_polled_queue(dev_info, rx_queue_id)) {
212699a2dd95SBruce Richardson 		rx_adapter->num_rx_polled += !pollq;
212799a2dd95SBruce Richardson 		dev_info->nb_rx_poll += !pollq;
212899a2dd95SBruce Richardson 		rx_adapter->num_rx_intr -= intrq;
212999a2dd95SBruce Richardson 		dev_info->nb_rx_intr -= intrq;
213099a2dd95SBruce Richardson 		dev_info->nb_shared_intr -= intrq && sintrq;
213199a2dd95SBruce Richardson 	}
213299a2dd95SBruce Richardson 
213399a2dd95SBruce Richardson 	if (rxa_intr_queue(dev_info, rx_queue_id)) {
213499a2dd95SBruce Richardson 		rx_adapter->num_rx_polled -= pollq;
213599a2dd95SBruce Richardson 		dev_info->nb_rx_poll -= pollq;
213699a2dd95SBruce Richardson 		rx_adapter->num_rx_intr += !intrq;
213799a2dd95SBruce Richardson 		dev_info->nb_rx_intr += !intrq;
213899a2dd95SBruce Richardson 		dev_info->nb_shared_intr += !intrq && sintrq;
213999a2dd95SBruce Richardson 		if (dev_info->nb_shared_intr == 1) {
214099a2dd95SBruce Richardson 			if (dev_info->multi_intr_cap)
214199a2dd95SBruce Richardson 				dev_info->next_q_idx =
214299a2dd95SBruce Richardson 					RTE_MAX_RXTX_INTR_VEC_ID - 1;
214399a2dd95SBruce Richardson 			else
214499a2dd95SBruce Richardson 				dev_info->next_q_idx = 0;
214599a2dd95SBruce Richardson 		}
214699a2dd95SBruce Richardson 	}
2147b06bca69SNaga Harish K S V 
2148b06bca69SNaga Harish K S V 	if (!rx_adapter->use_queue_event_buf)
2149b06bca69SNaga Harish K S V 		return 0;
2150b06bca69SNaga Harish K S V 
2151b06bca69SNaga Harish K S V 	new_rx_buf = rte_zmalloc_socket("rx_buffer_meta",
2152b06bca69SNaga Harish K S V 				sizeof(*new_rx_buf), 0,
2153b06bca69SNaga Harish K S V 				rte_eth_dev_socket_id(eth_dev_id));
2154b06bca69SNaga Harish K S V 	if (new_rx_buf == NULL) {
2155b06bca69SNaga Harish K S V 		RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for "
2156b06bca69SNaga Harish K S V 				 "dev_id: %d queue_id: %d",
2157b06bca69SNaga Harish K S V 				 eth_dev_id, rx_queue_id);
2158b06bca69SNaga Harish K S V 		return -ENOMEM;
2159b06bca69SNaga Harish K S V 	}
2160b06bca69SNaga Harish K S V 
2161b06bca69SNaga Harish K S V 	new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE);
2162b06bca69SNaga Harish K S V 	new_rx_buf->events_size += (2 * BATCH_SIZE);
2163b06bca69SNaga Harish K S V 	new_rx_buf->events = rte_zmalloc_socket("rx_buffer",
2164b06bca69SNaga Harish K S V 				sizeof(struct rte_event) *
2165b06bca69SNaga Harish K S V 				new_rx_buf->events_size, 0,
2166b06bca69SNaga Harish K S V 				rte_eth_dev_socket_id(eth_dev_id));
2167b06bca69SNaga Harish K S V 	if (new_rx_buf->events == NULL) {
2168b06bca69SNaga Harish K S V 		rte_free(new_rx_buf);
2169b06bca69SNaga Harish K S V 		RTE_EDEV_LOG_ERR("Failed to allocate event buffer for "
2170b06bca69SNaga Harish K S V 				 "dev_id: %d queue_id: %d",
2171b06bca69SNaga Harish K S V 				 eth_dev_id, rx_queue_id);
2172b06bca69SNaga Harish K S V 		return -ENOMEM;
2173b06bca69SNaga Harish K S V 	}
2174b06bca69SNaga Harish K S V 
2175b06bca69SNaga Harish K S V 	queue_info->event_buf = new_rx_buf;
2176b06bca69SNaga Harish K S V 
2177995b150cSNaga Harish K S V 	/* Allocate storage for adapter queue stats */
2178995b150cSNaga Harish K S V 	stats = rte_zmalloc_socket("rx_queue_stats",
2179995b150cSNaga Harish K S V 				sizeof(*stats), 0,
2180995b150cSNaga Harish K S V 				rte_eth_dev_socket_id(eth_dev_id));
2181995b150cSNaga Harish K S V 	if (stats == NULL) {
2182995b150cSNaga Harish K S V 		rte_free(new_rx_buf->events);
2183995b150cSNaga Harish K S V 		rte_free(new_rx_buf);
2184995b150cSNaga Harish K S V 		RTE_EDEV_LOG_ERR("Failed to allocate stats storage for"
2185995b150cSNaga Harish K S V 				 " dev_id: %d queue_id: %d",
2186995b150cSNaga Harish K S V 				 eth_dev_id, rx_queue_id);
2187995b150cSNaga Harish K S V 		return -ENOMEM;
2188995b150cSNaga Harish K S V 	}
2189995b150cSNaga Harish K S V 
2190995b150cSNaga Harish K S V 	queue_info->stats = stats;
2191995b150cSNaga Harish K S V 
2192b06bca69SNaga Harish K S V 	return 0;
219399a2dd95SBruce Richardson }
219499a2dd95SBruce Richardson 
2195a256a743SPavan Nikhilesh static int
2196a256a743SPavan Nikhilesh rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
219799a2dd95SBruce Richardson 	   int rx_queue_id,
219899a2dd95SBruce Richardson 	   const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
219999a2dd95SBruce Richardson {
220099a2dd95SBruce Richardson 	struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
220199a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_queue_conf temp_conf;
220299a2dd95SBruce Richardson 	int ret;
220399a2dd95SBruce Richardson 	struct eth_rx_poll_entry *rx_poll;
220499a2dd95SBruce Richardson 	struct eth_rx_queue_info *rx_queue;
220599a2dd95SBruce Richardson 	uint32_t *rx_wrr;
220699a2dd95SBruce Richardson 	uint16_t nb_rx_queues;
220799a2dd95SBruce Richardson 	uint32_t nb_rx_poll, nb_wrr;
220899a2dd95SBruce Richardson 	uint32_t nb_rx_intr;
220999a2dd95SBruce Richardson 	int num_intr_vec;
221099a2dd95SBruce Richardson 	uint16_t wt;
221199a2dd95SBruce Richardson 
221299a2dd95SBruce Richardson 	if (queue_conf->servicing_weight == 0) {
221399a2dd95SBruce Richardson 		struct rte_eth_dev_data *data = dev_info->dev->data;
221499a2dd95SBruce Richardson 
221599a2dd95SBruce Richardson 		temp_conf = *queue_conf;
221699a2dd95SBruce Richardson 		if (!data->dev_conf.intr_conf.rxq) {
221799a2dd95SBruce Richardson 			/* If Rx interrupts are disabled set wt = 1 */
221899a2dd95SBruce Richardson 			temp_conf.servicing_weight = 1;
221999a2dd95SBruce Richardson 		}
222099a2dd95SBruce Richardson 		queue_conf = &temp_conf;
2221b06bca69SNaga Harish K S V 
2222b06bca69SNaga Harish K S V 		if (queue_conf->servicing_weight == 0 &&
2223b06bca69SNaga Harish K S V 		    rx_adapter->use_queue_event_buf) {
2224b06bca69SNaga Harish K S V 
2225b06bca69SNaga Harish K S V 			RTE_EDEV_LOG_ERR("Use of queue level event buffer "
2226b06bca69SNaga Harish K S V 					 "not supported for interrupt queues "
2227b06bca69SNaga Harish K S V 					 "dev_id: %d queue_id: %d",
2228b06bca69SNaga Harish K S V 					 eth_dev_id, rx_queue_id);
2229b06bca69SNaga Harish K S V 			return -EINVAL;
2230b06bca69SNaga Harish K S V 		}
223199a2dd95SBruce Richardson 	}
223299a2dd95SBruce Richardson 
223399a2dd95SBruce Richardson 	nb_rx_queues = dev_info->dev->data->nb_rx_queues;
223499a2dd95SBruce Richardson 	rx_queue = dev_info->rx_queue;
223599a2dd95SBruce Richardson 	wt = queue_conf->servicing_weight;
223699a2dd95SBruce Richardson 
223799a2dd95SBruce Richardson 	if (dev_info->rx_queue == NULL) {
223899a2dd95SBruce Richardson 		dev_info->rx_queue =
223999a2dd95SBruce Richardson 		    rte_zmalloc_socket(rx_adapter->mem_name,
224099a2dd95SBruce Richardson 				       nb_rx_queues *
224199a2dd95SBruce Richardson 				       sizeof(struct eth_rx_queue_info), 0,
224299a2dd95SBruce Richardson 				       rx_adapter->socket_id);
224399a2dd95SBruce Richardson 		if (dev_info->rx_queue == NULL)
224499a2dd95SBruce Richardson 			return -ENOMEM;
224599a2dd95SBruce Richardson 	}
224699a2dd95SBruce Richardson 	rx_wrr = NULL;
224799a2dd95SBruce Richardson 	rx_poll = NULL;
224899a2dd95SBruce Richardson 
224999a2dd95SBruce Richardson 	rxa_calc_nb_post_add(rx_adapter, dev_info, rx_queue_id,
225099a2dd95SBruce Richardson 			queue_conf->servicing_weight,
225199a2dd95SBruce Richardson 			&nb_rx_poll, &nb_rx_intr, &nb_wrr);
225299a2dd95SBruce Richardson 
225399a2dd95SBruce Richardson 	if (dev_info->dev->intr_handle)
225499a2dd95SBruce Richardson 		dev_info->multi_intr_cap =
225599a2dd95SBruce Richardson 			rte_intr_cap_multiple(dev_info->dev->intr_handle);
225699a2dd95SBruce Richardson 
225799a2dd95SBruce Richardson 	ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
225899a2dd95SBruce Richardson 				&rx_poll, &rx_wrr);
225999a2dd95SBruce Richardson 	if (ret)
226099a2dd95SBruce Richardson 		goto err_free_rxqueue;
226199a2dd95SBruce Richardson 
226299a2dd95SBruce Richardson 	if (wt == 0) {
226399a2dd95SBruce Richardson 		num_intr_vec = rxa_nb_intr_vect(dev_info, rx_queue_id, 1);
226499a2dd95SBruce Richardson 
226599a2dd95SBruce Richardson 		ret = rxa_intr_ring_check_avail(rx_adapter, num_intr_vec);
226699a2dd95SBruce Richardson 		if (ret)
226799a2dd95SBruce Richardson 			goto err_free_rxqueue;
226899a2dd95SBruce Richardson 
226999a2dd95SBruce Richardson 		ret = rxa_add_intr_queue(rx_adapter, dev_info, rx_queue_id);
227099a2dd95SBruce Richardson 		if (ret)
227199a2dd95SBruce Richardson 			goto err_free_rxqueue;
227299a2dd95SBruce Richardson 	} else {
227399a2dd95SBruce Richardson 
227499a2dd95SBruce Richardson 		num_intr_vec = 0;
227599a2dd95SBruce Richardson 		if (rx_adapter->num_rx_intr > nb_rx_intr) {
227699a2dd95SBruce Richardson 			num_intr_vec = rxa_nb_intr_vect(dev_info,
227799a2dd95SBruce Richardson 						rx_queue_id, 0);
227899a2dd95SBruce Richardson 			/* interrupt based queues are being converted to
227999a2dd95SBruce Richardson 			 * poll mode queues, delete the interrupt configuration
228099a2dd95SBruce Richardson 			 * for those.
228199a2dd95SBruce Richardson 			 */
228299a2dd95SBruce Richardson 			ret = rxa_del_intr_queue(rx_adapter,
228399a2dd95SBruce Richardson 						dev_info, rx_queue_id);
228499a2dd95SBruce Richardson 			if (ret)
228599a2dd95SBruce Richardson 				goto err_free_rxqueue;
228699a2dd95SBruce Richardson 		}
228799a2dd95SBruce Richardson 	}
228899a2dd95SBruce Richardson 
228999a2dd95SBruce Richardson 	if (nb_rx_intr == 0) {
229099a2dd95SBruce Richardson 		ret = rxa_free_intr_resources(rx_adapter);
229199a2dd95SBruce Richardson 		if (ret)
229299a2dd95SBruce Richardson 			goto err_free_rxqueue;
229399a2dd95SBruce Richardson 	}
229499a2dd95SBruce Richardson 
229599a2dd95SBruce Richardson 	if (wt == 0) {
229699a2dd95SBruce Richardson 		uint16_t i;
229799a2dd95SBruce Richardson 
229899a2dd95SBruce Richardson 		if (rx_queue_id  == -1) {
229999a2dd95SBruce Richardson 			for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
230099a2dd95SBruce Richardson 				dev_info->intr_queue[i] = i;
230199a2dd95SBruce Richardson 		} else {
2302*952b24bdSBruce Richardson 			if (!rxa_intr_queue(dev_info, rx_queue_id) && nb_rx_intr > 0)
230399a2dd95SBruce Richardson 				dev_info->intr_queue[nb_rx_intr - 1] =
230499a2dd95SBruce Richardson 					rx_queue_id;
230599a2dd95SBruce Richardson 		}
230699a2dd95SBruce Richardson 	}
230799a2dd95SBruce Richardson 
230899a2dd95SBruce Richardson 
230999a2dd95SBruce Richardson 
2310b06bca69SNaga Harish K S V 	ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
2311b06bca69SNaga Harish K S V 	if (ret)
2312b06bca69SNaga Harish K S V 		goto err_free_rxqueue;
231399a2dd95SBruce Richardson 	rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
231499a2dd95SBruce Richardson 
231599a2dd95SBruce Richardson 	rte_free(rx_adapter->eth_rx_poll);
231699a2dd95SBruce Richardson 	rte_free(rx_adapter->wrr_sched);
231799a2dd95SBruce Richardson 
231899a2dd95SBruce Richardson 	rx_adapter->eth_rx_poll = rx_poll;
231999a2dd95SBruce Richardson 	rx_adapter->wrr_sched = rx_wrr;
232099a2dd95SBruce Richardson 	rx_adapter->wrr_len = nb_wrr;
232199a2dd95SBruce Richardson 	rx_adapter->num_intr_vec += num_intr_vec;
232299a2dd95SBruce Richardson 	return 0;
232399a2dd95SBruce Richardson 
232499a2dd95SBruce Richardson err_free_rxqueue:
232599a2dd95SBruce Richardson 	if (rx_queue == NULL) {
232699a2dd95SBruce Richardson 		rte_free(dev_info->rx_queue);
232799a2dd95SBruce Richardson 		dev_info->rx_queue = NULL;
232899a2dd95SBruce Richardson 	}
232999a2dd95SBruce Richardson 
233099a2dd95SBruce Richardson 	rte_free(rx_poll);
233199a2dd95SBruce Richardson 	rte_free(rx_wrr);
233299a2dd95SBruce Richardson 
2333b06bca69SNaga Harish K S V 	return ret;
233499a2dd95SBruce Richardson }
233599a2dd95SBruce Richardson 
233699a2dd95SBruce Richardson static int
233799a2dd95SBruce Richardson rxa_ctrl(uint8_t id, int start)
233899a2dd95SBruce Richardson {
2339a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
234099a2dd95SBruce Richardson 	struct rte_eventdev *dev;
234199a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
234299a2dd95SBruce Richardson 	uint32_t i;
234399a2dd95SBruce Richardson 	int use_service = 0;
234499a2dd95SBruce Richardson 	int stop = !start;
234599a2dd95SBruce Richardson 
234699a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
234799a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
234899a2dd95SBruce Richardson 	if (rx_adapter == NULL)
234999a2dd95SBruce Richardson 		return -EINVAL;
235099a2dd95SBruce Richardson 
235199a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
235299a2dd95SBruce Richardson 
235399a2dd95SBruce Richardson 	RTE_ETH_FOREACH_DEV(i) {
235499a2dd95SBruce Richardson 		dev_info = &rx_adapter->eth_devices[i];
235599a2dd95SBruce Richardson 		/* if start  check for num dev queues */
235699a2dd95SBruce Richardson 		if (start && !dev_info->nb_dev_queues)
235799a2dd95SBruce Richardson 			continue;
235899a2dd95SBruce Richardson 		/* if stop check if dev has been started */
235999a2dd95SBruce Richardson 		if (stop && !dev_info->dev_rx_started)
236099a2dd95SBruce Richardson 			continue;
236199a2dd95SBruce Richardson 		use_service |= !dev_info->internal_event_port;
236299a2dd95SBruce Richardson 		dev_info->dev_rx_started = start;
236399a2dd95SBruce Richardson 		if (dev_info->internal_event_port == 0)
236499a2dd95SBruce Richardson 			continue;
236599a2dd95SBruce Richardson 		start ? (*dev->dev_ops->eth_rx_adapter_start)(dev,
236699a2dd95SBruce Richardson 						&rte_eth_devices[i]) :
236799a2dd95SBruce Richardson 			(*dev->dev_ops->eth_rx_adapter_stop)(dev,
236899a2dd95SBruce Richardson 						&rte_eth_devices[i]);
236999a2dd95SBruce Richardson 	}
237099a2dd95SBruce Richardson 
237199a2dd95SBruce Richardson 	if (use_service) {
237299a2dd95SBruce Richardson 		rte_spinlock_lock(&rx_adapter->rx_lock);
237399a2dd95SBruce Richardson 		rx_adapter->rxa_started = start;
237499a2dd95SBruce Richardson 		rte_service_runstate_set(rx_adapter->service_id, start);
237599a2dd95SBruce Richardson 		rte_spinlock_unlock(&rx_adapter->rx_lock);
237699a2dd95SBruce Richardson 	}
237799a2dd95SBruce Richardson 
237899a2dd95SBruce Richardson 	return 0;
237999a2dd95SBruce Richardson }
238099a2dd95SBruce Richardson 
2381bc0df25cSNaga Harish K S V static int
2382bc0df25cSNaga Harish K S V rxa_create(uint8_t id, uint8_t dev_id,
2383bc0df25cSNaga Harish K S V 	   struct rte_event_eth_rx_adapter_params *rxa_params,
238499a2dd95SBruce Richardson 	   rte_event_eth_rx_adapter_conf_cb conf_cb,
238599a2dd95SBruce Richardson 	   void *conf_arg)
238699a2dd95SBruce Richardson {
2387a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
2388a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer *buf;
2389bc0df25cSNaga Harish K S V 	struct rte_event *events;
239099a2dd95SBruce Richardson 	int ret;
239199a2dd95SBruce Richardson 	int socket_id;
239299a2dd95SBruce Richardson 	uint16_t i;
239399a2dd95SBruce Richardson 	char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
239499a2dd95SBruce Richardson 	const uint8_t default_rss_key[] = {
239599a2dd95SBruce Richardson 		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
239699a2dd95SBruce Richardson 		0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
239799a2dd95SBruce Richardson 		0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
239899a2dd95SBruce Richardson 		0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
239999a2dd95SBruce Richardson 		0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
240099a2dd95SBruce Richardson 	};
240199a2dd95SBruce Richardson 
240299a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
240399a2dd95SBruce Richardson 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2404bc0df25cSNaga Harish K S V 
240599a2dd95SBruce Richardson 	if (conf_cb == NULL)
240699a2dd95SBruce Richardson 		return -EINVAL;
240799a2dd95SBruce Richardson 
240899a2dd95SBruce Richardson 	if (event_eth_rx_adapter == NULL) {
240999a2dd95SBruce Richardson 		ret = rte_event_eth_rx_adapter_init();
241099a2dd95SBruce Richardson 		if (ret)
241199a2dd95SBruce Richardson 			return ret;
241299a2dd95SBruce Richardson 	}
241399a2dd95SBruce Richardson 
241499a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
241599a2dd95SBruce Richardson 	if (rx_adapter != NULL) {
241699a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
241799a2dd95SBruce Richardson 		return -EEXIST;
241899a2dd95SBruce Richardson 	}
241999a2dd95SBruce Richardson 
242099a2dd95SBruce Richardson 	socket_id = rte_event_dev_socket_id(dev_id);
242199a2dd95SBruce Richardson 	snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,
242299a2dd95SBruce Richardson 		"rte_event_eth_rx_adapter_%d",
242399a2dd95SBruce Richardson 		id);
242499a2dd95SBruce Richardson 
242599a2dd95SBruce Richardson 	rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),
242699a2dd95SBruce Richardson 			RTE_CACHE_LINE_SIZE, socket_id);
242799a2dd95SBruce Richardson 	if (rx_adapter == NULL) {
242899a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to get mem for rx adapter");
242999a2dd95SBruce Richardson 		return -ENOMEM;
243099a2dd95SBruce Richardson 	}
243199a2dd95SBruce Richardson 
243299a2dd95SBruce Richardson 	rx_adapter->eventdev_id = dev_id;
243399a2dd95SBruce Richardson 	rx_adapter->socket_id = socket_id;
243499a2dd95SBruce Richardson 	rx_adapter->conf_cb = conf_cb;
243599a2dd95SBruce Richardson 	rx_adapter->conf_arg = conf_arg;
243699a2dd95SBruce Richardson 	rx_adapter->id = id;
243799a2dd95SBruce Richardson 	TAILQ_INIT(&rx_adapter->vector_list);
243899a2dd95SBruce Richardson 	strcpy(rx_adapter->mem_name, mem_name);
243999a2dd95SBruce Richardson 	rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
244099a2dd95SBruce Richardson 					RTE_MAX_ETHPORTS *
244199a2dd95SBruce Richardson 					sizeof(struct eth_device_info), 0,
244299a2dd95SBruce Richardson 					socket_id);
244399a2dd95SBruce Richardson 	rte_convert_rss_key((const uint32_t *)default_rss_key,
244499a2dd95SBruce Richardson 			(uint32_t *)rx_adapter->rss_key_be,
244599a2dd95SBruce Richardson 			    RTE_DIM(default_rss_key));
244699a2dd95SBruce Richardson 
244799a2dd95SBruce Richardson 	if (rx_adapter->eth_devices == NULL) {
2448ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("failed to get mem for eth devices");
244999a2dd95SBruce Richardson 		rte_free(rx_adapter);
245099a2dd95SBruce Richardson 		return -ENOMEM;
245199a2dd95SBruce Richardson 	}
2452bc0df25cSNaga Harish K S V 
245399a2dd95SBruce Richardson 	rte_spinlock_init(&rx_adapter->rx_lock);
2454bc0df25cSNaga Harish K S V 
245599a2dd95SBruce Richardson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
245699a2dd95SBruce Richardson 		rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
245799a2dd95SBruce Richardson 
2458bc0df25cSNaga Harish K S V 	/* Rx adapter event buffer allocation */
2459b06bca69SNaga Harish K S V 	rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf;
2460b06bca69SNaga Harish K S V 
2461b06bca69SNaga Harish K S V 	if (!rx_adapter->use_queue_event_buf) {
2462bc0df25cSNaga Harish K S V 		buf = &rx_adapter->event_enqueue_buffer;
2463bc0df25cSNaga Harish K S V 		buf->events_size = rxa_params->event_buf_size;
2464bc0df25cSNaga Harish K S V 
2465bc0df25cSNaga Harish K S V 		events = rte_zmalloc_socket(rx_adapter->mem_name,
2466bc0df25cSNaga Harish K S V 					    buf->events_size * sizeof(*events),
2467bc0df25cSNaga Harish K S V 					    0, socket_id);
2468bc0df25cSNaga Harish K S V 		if (events == NULL) {
2469b06bca69SNaga Harish K S V 			RTE_EDEV_LOG_ERR("Failed to allocate memory "
2470b06bca69SNaga Harish K S V 					 "for adapter event buffer");
2471bc0df25cSNaga Harish K S V 			rte_free(rx_adapter->eth_devices);
2472bc0df25cSNaga Harish K S V 			rte_free(rx_adapter);
2473bc0df25cSNaga Harish K S V 			return -ENOMEM;
2474bc0df25cSNaga Harish K S V 		}
2475bc0df25cSNaga Harish K S V 
2476bc0df25cSNaga Harish K S V 		rx_adapter->event_enqueue_buffer.events = events;
2477b06bca69SNaga Harish K S V 	}
2478bc0df25cSNaga Harish K S V 
247999a2dd95SBruce Richardson 	event_eth_rx_adapter[id] = rx_adapter;
2480bc0df25cSNaga Harish K S V 
248199a2dd95SBruce Richardson 	if (conf_cb == rxa_default_conf_cb)
248299a2dd95SBruce Richardson 		rx_adapter->default_cb_arg = 1;
248383ab470dSGanapati Kundapura 
248499a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb,
248599a2dd95SBruce Richardson 		conf_arg);
248699a2dd95SBruce Richardson 	return 0;
248799a2dd95SBruce Richardson }
248899a2dd95SBruce Richardson 
24891f07a41dSNaga Harish K S V static int
24901f07a41dSNaga Harish K S V rxa_config_params_validate(struct rte_event_eth_rx_adapter_params *rxa_params,
24911f07a41dSNaga Harish K S V 			   struct rte_event_eth_rx_adapter_params *temp_params)
24921f07a41dSNaga Harish K S V {
24931f07a41dSNaga Harish K S V 	if (rxa_params == NULL) {
24941f07a41dSNaga Harish K S V 		/* use default values if rxa_params is NULL */
24951f07a41dSNaga Harish K S V 		temp_params->event_buf_size = ETH_EVENT_BUFFER_SIZE;
24961f07a41dSNaga Harish K S V 		temp_params->use_queue_event_buf = false;
24971f07a41dSNaga Harish K S V 		return 0;
24981f07a41dSNaga Harish K S V 	} else if (!rxa_params->use_queue_event_buf &&
24991f07a41dSNaga Harish K S V 		    rxa_params->event_buf_size == 0) {
2500ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("event buffer size can't be zero");
25011f07a41dSNaga Harish K S V 		return -EINVAL;
25021f07a41dSNaga Harish K S V 	} else if (rxa_params->use_queue_event_buf &&
25031f07a41dSNaga Harish K S V 		   rxa_params->event_buf_size != 0) {
25041f07a41dSNaga Harish K S V 		RTE_EDEV_LOG_ERR("event buffer size needs to be configured "
2505ae282b06SDavid Marchand 				 "as part of queue add");
25061f07a41dSNaga Harish K S V 		return -EINVAL;
25071f07a41dSNaga Harish K S V 	}
25081f07a41dSNaga Harish K S V 
25091f07a41dSNaga Harish K S V 	*temp_params = *rxa_params;
25101f07a41dSNaga Harish K S V 	/* adjust event buff size with BATCH_SIZE used for fetching
25111f07a41dSNaga Harish K S V 	 * packets from NIC rx queues to get full buffer utilization
25121f07a41dSNaga Harish K S V 	 * and prevent unnecessary rollovers.
25131f07a41dSNaga Harish K S V 	 */
25141f07a41dSNaga Harish K S V 	if (!temp_params->use_queue_event_buf) {
25151f07a41dSNaga Harish K S V 		temp_params->event_buf_size =
25161f07a41dSNaga Harish K S V 			RTE_ALIGN(temp_params->event_buf_size, BATCH_SIZE);
25171f07a41dSNaga Harish K S V 		temp_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE);
25181f07a41dSNaga Harish K S V 	}
25191f07a41dSNaga Harish K S V 
25201f07a41dSNaga Harish K S V 	return 0;
25211f07a41dSNaga Harish K S V }
25221f07a41dSNaga Harish K S V 
252399a2dd95SBruce Richardson int
2524bc0df25cSNaga Harish K S V rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
2525bc0df25cSNaga Harish K S V 				rte_event_eth_rx_adapter_conf_cb conf_cb,
2526bc0df25cSNaga Harish K S V 				void *conf_arg)
2527bc0df25cSNaga Harish K S V {
2528bc0df25cSNaga Harish K S V 	struct rte_event_eth_rx_adapter_params rxa_params = {0};
2529bc0df25cSNaga Harish K S V 
2530bc0df25cSNaga Harish K S V 	/* use default values for adapter params */
2531bc0df25cSNaga Harish K S V 	rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE;
2532b06bca69SNaga Harish K S V 	rxa_params.use_queue_event_buf = false;
2533bc0df25cSNaga Harish K S V 
2534bc0df25cSNaga Harish K S V 	return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg);
2535bc0df25cSNaga Harish K S V }
2536bc0df25cSNaga Harish K S V 
2537bc0df25cSNaga Harish K S V int
2538bc0df25cSNaga Harish K S V rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id,
2539bc0df25cSNaga Harish K S V 			struct rte_event_port_conf *port_config,
2540bc0df25cSNaga Harish K S V 			struct rte_event_eth_rx_adapter_params *rxa_params)
2541bc0df25cSNaga Harish K S V {
2542bc0df25cSNaga Harish K S V 	struct rte_event_port_conf *pc;
2543bc0df25cSNaga Harish K S V 	int ret;
2544bc0df25cSNaga Harish K S V 	struct rte_event_eth_rx_adapter_params temp_params = {0};
2545bc0df25cSNaga Harish K S V 
2546bc0df25cSNaga Harish K S V 	if (port_config == NULL)
2547bc0df25cSNaga Harish K S V 		return -EINVAL;
2548bc0df25cSNaga Harish K S V 
25491f07a41dSNaga Harish K S V 	ret = rxa_config_params_validate(rxa_params, &temp_params);
25501f07a41dSNaga Harish K S V 	if (ret != 0)
25511f07a41dSNaga Harish K S V 		return ret;
2552bc0df25cSNaga Harish K S V 
2553bc0df25cSNaga Harish K S V 	pc = rte_malloc(NULL, sizeof(*pc), 0);
2554bc0df25cSNaga Harish K S V 	if (pc == NULL)
2555bc0df25cSNaga Harish K S V 		return -ENOMEM;
2556bc0df25cSNaga Harish K S V 
2557bc0df25cSNaga Harish K S V 	*pc = *port_config;
2558bc0df25cSNaga Harish K S V 
25591f07a41dSNaga Harish K S V 	ret = rxa_create(id, dev_id, &temp_params, rxa_default_conf_cb, pc);
2560bc0df25cSNaga Harish K S V 	if (ret)
2561bc0df25cSNaga Harish K S V 		rte_free(pc);
2562bc0df25cSNaga Harish K S V 
25637f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_eth_rx_adapter_create_with_params(id, dev_id,
25647f2d9df6SAmit Prakash Shukla 		port_config, rxa_params, ret);
25657f2d9df6SAmit Prakash Shukla 
2566bc0df25cSNaga Harish K S V 	return ret;
2567bc0df25cSNaga Harish K S V }
2568bc0df25cSNaga Harish K S V 
2569bc0df25cSNaga Harish K S V int
25701f07a41dSNaga Harish K S V rte_event_eth_rx_adapter_create_ext_with_params(uint8_t id, uint8_t dev_id,
25711f07a41dSNaga Harish K S V 			rte_event_eth_rx_adapter_conf_cb conf_cb,
25721f07a41dSNaga Harish K S V 			void *conf_arg,
25731f07a41dSNaga Harish K S V 			struct rte_event_eth_rx_adapter_params *rxa_params)
25741f07a41dSNaga Harish K S V {
25751f07a41dSNaga Harish K S V 	struct rte_event_eth_rx_adapter_params temp_params = {0};
25761f07a41dSNaga Harish K S V 	int ret;
25771f07a41dSNaga Harish K S V 
25781f07a41dSNaga Harish K S V 	ret = rxa_config_params_validate(rxa_params, &temp_params);
25791f07a41dSNaga Harish K S V 	if (ret != 0)
25801f07a41dSNaga Harish K S V 		return ret;
25811f07a41dSNaga Harish K S V 
25821f07a41dSNaga Harish K S V 	return rxa_create(id, dev_id, &temp_params, conf_cb, conf_arg);
25831f07a41dSNaga Harish K S V }
25841f07a41dSNaga Harish K S V 
25851f07a41dSNaga Harish K S V int
258699a2dd95SBruce Richardson rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
258799a2dd95SBruce Richardson 		struct rte_event_port_conf *port_config)
258899a2dd95SBruce Richardson {
258999a2dd95SBruce Richardson 	struct rte_event_port_conf *pc;
259099a2dd95SBruce Richardson 	int ret;
259199a2dd95SBruce Richardson 
259299a2dd95SBruce Richardson 	if (port_config == NULL)
259399a2dd95SBruce Richardson 		return -EINVAL;
2594bc0df25cSNaga Harish K S V 
259599a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
259699a2dd95SBruce Richardson 
259799a2dd95SBruce Richardson 	pc = rte_malloc(NULL, sizeof(*pc), 0);
259899a2dd95SBruce Richardson 	if (pc == NULL)
259999a2dd95SBruce Richardson 		return -ENOMEM;
260099a2dd95SBruce Richardson 	*pc = *port_config;
2601bc0df25cSNaga Harish K S V 
260299a2dd95SBruce Richardson 	ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
260399a2dd95SBruce Richardson 					rxa_default_conf_cb,
260499a2dd95SBruce Richardson 					pc);
260599a2dd95SBruce Richardson 	if (ret)
260699a2dd95SBruce Richardson 		rte_free(pc);
260799a2dd95SBruce Richardson 	return ret;
260899a2dd95SBruce Richardson }
260999a2dd95SBruce Richardson 
261099a2dd95SBruce Richardson int
261199a2dd95SBruce Richardson rte_event_eth_rx_adapter_free(uint8_t id)
261299a2dd95SBruce Richardson {
2613a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
261499a2dd95SBruce Richardson 
2615a1793ee8SGanapati Kundapura 	if (rxa_memzone_lookup())
2616a1793ee8SGanapati Kundapura 		return -ENOMEM;
2617a1793ee8SGanapati Kundapura 
261899a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
261999a2dd95SBruce Richardson 
262099a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
262199a2dd95SBruce Richardson 	if (rx_adapter == NULL)
262299a2dd95SBruce Richardson 		return -EINVAL;
262399a2dd95SBruce Richardson 
262499a2dd95SBruce Richardson 	if (rx_adapter->nb_queues) {
262599a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("%" PRIu16 " Rx queues not deleted",
262699a2dd95SBruce Richardson 				rx_adapter->nb_queues);
262799a2dd95SBruce Richardson 		return -EBUSY;
262899a2dd95SBruce Richardson 	}
262999a2dd95SBruce Richardson 
263099a2dd95SBruce Richardson 	if (rx_adapter->default_cb_arg)
263199a2dd95SBruce Richardson 		rte_free(rx_adapter->conf_arg);
263299a2dd95SBruce Richardson 	rte_free(rx_adapter->eth_devices);
2633b06bca69SNaga Harish K S V 	if (!rx_adapter->use_queue_event_buf)
2634bc0df25cSNaga Harish K S V 		rte_free(rx_adapter->event_enqueue_buffer.events);
263599a2dd95SBruce Richardson 	rte_free(rx_adapter);
263699a2dd95SBruce Richardson 	event_eth_rx_adapter[id] = NULL;
263799a2dd95SBruce Richardson 
263899a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_free(id);
263999a2dd95SBruce Richardson 	return 0;
264099a2dd95SBruce Richardson }
264199a2dd95SBruce Richardson 
264299a2dd95SBruce Richardson int
264399a2dd95SBruce Richardson rte_event_eth_rx_adapter_queue_add(uint8_t id,
264499a2dd95SBruce Richardson 		uint16_t eth_dev_id,
264599a2dd95SBruce Richardson 		int32_t rx_queue_id,
264699a2dd95SBruce Richardson 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
264799a2dd95SBruce Richardson {
264899a2dd95SBruce Richardson 	int ret;
264999a2dd95SBruce Richardson 	uint32_t cap;
2650a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
265199a2dd95SBruce Richardson 	struct rte_eventdev *dev;
265299a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
2653929ebdd5SPavan Nikhilesh 	struct rte_event_eth_rx_adapter_vector_limits limits;
265499a2dd95SBruce Richardson 
2655a1793ee8SGanapati Kundapura 	if (rxa_memzone_lookup())
2656a1793ee8SGanapati Kundapura 		return -ENOMEM;
2657a1793ee8SGanapati Kundapura 
265899a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
265999a2dd95SBruce Richardson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
266099a2dd95SBruce Richardson 
266199a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
266299a2dd95SBruce Richardson 	if ((rx_adapter == NULL) || (queue_conf == NULL))
266399a2dd95SBruce Richardson 		return -EINVAL;
266499a2dd95SBruce Richardson 
266599a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
266699a2dd95SBruce Richardson 	ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
266799a2dd95SBruce Richardson 						eth_dev_id,
266899a2dd95SBruce Richardson 						&cap);
266999a2dd95SBruce Richardson 	if (ret) {
267099a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
267199a2dd95SBruce Richardson 			"eth port %" PRIu16, id, eth_dev_id);
267299a2dd95SBruce Richardson 		return ret;
267399a2dd95SBruce Richardson 	}
267499a2dd95SBruce Richardson 
267599a2dd95SBruce Richardson 	if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0
267699a2dd95SBruce Richardson 		&& (queue_conf->rx_queue_flags &
267799a2dd95SBruce Richardson 			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
267899a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
267999a2dd95SBruce Richardson 				" eth port: %" PRIu16 " adapter id: %" PRIu8,
268099a2dd95SBruce Richardson 				eth_dev_id, id);
268199a2dd95SBruce Richardson 		return -EINVAL;
268299a2dd95SBruce Richardson 	}
268399a2dd95SBruce Richardson 
2684929ebdd5SPavan Nikhilesh 	if (queue_conf->rx_queue_flags &
2685929ebdd5SPavan Nikhilesh 	    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
2686929ebdd5SPavan Nikhilesh 
2687929ebdd5SPavan Nikhilesh 		if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
268899a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
2689929ebdd5SPavan Nikhilesh 					 " eth port: %" PRIu16
2690929ebdd5SPavan Nikhilesh 					 " adapter id: %" PRIu8,
269199a2dd95SBruce Richardson 					 eth_dev_id, id);
269299a2dd95SBruce Richardson 			return -EINVAL;
269399a2dd95SBruce Richardson 		}
269499a2dd95SBruce Richardson 
2695929ebdd5SPavan Nikhilesh 		ret = rte_event_eth_rx_adapter_vector_limits_get(
2696929ebdd5SPavan Nikhilesh 			rx_adapter->eventdev_id, eth_dev_id, &limits);
2697929ebdd5SPavan Nikhilesh 		if (ret < 0) {
2698929ebdd5SPavan Nikhilesh 			RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
2699929ebdd5SPavan Nikhilesh 					 " eth port: %" PRIu16
2700929ebdd5SPavan Nikhilesh 					 " adapter id: %" PRIu8,
2701929ebdd5SPavan Nikhilesh 					 eth_dev_id, id);
2702929ebdd5SPavan Nikhilesh 			return -EINVAL;
2703929ebdd5SPavan Nikhilesh 		}
2704929ebdd5SPavan Nikhilesh 		if (queue_conf->vector_sz < limits.min_sz ||
2705929ebdd5SPavan Nikhilesh 		    queue_conf->vector_sz > limits.max_sz ||
2706929ebdd5SPavan Nikhilesh 		    queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
2707929ebdd5SPavan Nikhilesh 		    queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
2708929ebdd5SPavan Nikhilesh 		    queue_conf->vector_mp == NULL) {
2709929ebdd5SPavan Nikhilesh 			RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2710929ebdd5SPavan Nikhilesh 					 " eth port: %" PRIu16
2711929ebdd5SPavan Nikhilesh 					 " adapter id: %" PRIu8,
2712929ebdd5SPavan Nikhilesh 					 eth_dev_id, id);
2713929ebdd5SPavan Nikhilesh 			return -EINVAL;
2714929ebdd5SPavan Nikhilesh 		}
2715929ebdd5SPavan Nikhilesh 		if (queue_conf->vector_mp->elt_size <
2716929ebdd5SPavan Nikhilesh 		    (sizeof(struct rte_event_vector) +
2717929ebdd5SPavan Nikhilesh 		     (sizeof(uintptr_t) * queue_conf->vector_sz))) {
2718929ebdd5SPavan Nikhilesh 			RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2719929ebdd5SPavan Nikhilesh 					 " eth port: %" PRIu16
2720929ebdd5SPavan Nikhilesh 					 " adapter id: %" PRIu8,
2721929ebdd5SPavan Nikhilesh 					 eth_dev_id, id);
2722929ebdd5SPavan Nikhilesh 			return -EINVAL;
2723929ebdd5SPavan Nikhilesh 		}
2724929ebdd5SPavan Nikhilesh 	}
2725929ebdd5SPavan Nikhilesh 
272699a2dd95SBruce Richardson 	if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
272799a2dd95SBruce Richardson 		(rx_queue_id != -1)) {
272899a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
272999a2dd95SBruce Richardson 			"event queue, eth port: %" PRIu16 " adapter id: %"
273099a2dd95SBruce Richardson 			PRIu8, eth_dev_id, id);
273199a2dd95SBruce Richardson 		return -EINVAL;
273299a2dd95SBruce Richardson 	}
273399a2dd95SBruce Richardson 
273499a2dd95SBruce Richardson 	if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
273599a2dd95SBruce Richardson 			rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
273699a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
273799a2dd95SBruce Richardson 			 (uint16_t)rx_queue_id);
273899a2dd95SBruce Richardson 		return -EINVAL;
273999a2dd95SBruce Richardson 	}
274099a2dd95SBruce Richardson 
2741b06bca69SNaga Harish K S V 	if ((rx_adapter->use_queue_event_buf &&
2742b06bca69SNaga Harish K S V 	     queue_conf->event_buf_size == 0) ||
2743b06bca69SNaga Harish K S V 	    (!rx_adapter->use_queue_event_buf &&
2744b06bca69SNaga Harish K S V 	     queue_conf->event_buf_size != 0)) {
2745b06bca69SNaga Harish K S V 		RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue");
2746b06bca69SNaga Harish K S V 		return -EINVAL;
2747b06bca69SNaga Harish K S V 	}
2748b06bca69SNaga Harish K S V 
274999a2dd95SBruce Richardson 	dev_info = &rx_adapter->eth_devices[eth_dev_id];
275099a2dd95SBruce Richardson 
275199a2dd95SBruce Richardson 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
27528f1d23ecSDavid Marchand 		if (*dev->dev_ops->eth_rx_adapter_queue_add == NULL)
27538f1d23ecSDavid Marchand 			return -ENOTSUP;
275499a2dd95SBruce Richardson 		if (dev_info->rx_queue == NULL) {
275599a2dd95SBruce Richardson 			dev_info->rx_queue =
275699a2dd95SBruce Richardson 			    rte_zmalloc_socket(rx_adapter->mem_name,
275799a2dd95SBruce Richardson 					dev_info->dev->data->nb_rx_queues *
275899a2dd95SBruce Richardson 					sizeof(struct eth_rx_queue_info), 0,
275999a2dd95SBruce Richardson 					rx_adapter->socket_id);
276099a2dd95SBruce Richardson 			if (dev_info->rx_queue == NULL)
276199a2dd95SBruce Richardson 				return -ENOMEM;
276299a2dd95SBruce Richardson 		}
276399a2dd95SBruce Richardson 
276499a2dd95SBruce Richardson 		ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev,
276599a2dd95SBruce Richardson 				&rte_eth_devices[eth_dev_id],
276699a2dd95SBruce Richardson 				rx_queue_id, queue_conf);
276799a2dd95SBruce Richardson 		if (ret == 0) {
276899a2dd95SBruce Richardson 			dev_info->internal_event_port = 1;
276999a2dd95SBruce Richardson 			rxa_update_queue(rx_adapter,
277099a2dd95SBruce Richardson 					&rx_adapter->eth_devices[eth_dev_id],
277199a2dd95SBruce Richardson 					rx_queue_id,
277299a2dd95SBruce Richardson 					1);
277399a2dd95SBruce Richardson 		}
277499a2dd95SBruce Richardson 	} else {
277599a2dd95SBruce Richardson 		rte_spinlock_lock(&rx_adapter->rx_lock);
277699a2dd95SBruce Richardson 		dev_info->internal_event_port = 0;
277799a2dd95SBruce Richardson 		ret = rxa_init_service(rx_adapter, id);
277899a2dd95SBruce Richardson 		if (ret == 0) {
277999a2dd95SBruce Richardson 			uint32_t service_id = rx_adapter->service_id;
278099a2dd95SBruce Richardson 			ret = rxa_sw_add(rx_adapter, eth_dev_id, rx_queue_id,
278199a2dd95SBruce Richardson 					queue_conf);
278299a2dd95SBruce Richardson 			rte_service_component_runstate_set(service_id,
278399a2dd95SBruce Richardson 				rxa_sw_adapter_queue_count(rx_adapter));
278499a2dd95SBruce Richardson 		}
278599a2dd95SBruce Richardson 		rte_spinlock_unlock(&rx_adapter->rx_lock);
278699a2dd95SBruce Richardson 	}
278799a2dd95SBruce Richardson 
278899a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_queue_add(id, eth_dev_id,
278999a2dd95SBruce Richardson 		rx_queue_id, queue_conf, ret);
279099a2dd95SBruce Richardson 	if (ret)
279199a2dd95SBruce Richardson 		return ret;
279299a2dd95SBruce Richardson 
279399a2dd95SBruce Richardson 	return 0;
279499a2dd95SBruce Richardson }
279599a2dd95SBruce Richardson 
279699a2dd95SBruce Richardson static int
279799a2dd95SBruce Richardson rxa_sw_vector_limits(struct rte_event_eth_rx_adapter_vector_limits *limits)
279899a2dd95SBruce Richardson {
279999a2dd95SBruce Richardson 	limits->max_sz = MAX_VECTOR_SIZE;
280099a2dd95SBruce Richardson 	limits->min_sz = MIN_VECTOR_SIZE;
280199a2dd95SBruce Richardson 	limits->max_timeout_ns = MAX_VECTOR_NS;
280299a2dd95SBruce Richardson 	limits->min_timeout_ns = MIN_VECTOR_NS;
280399a2dd95SBruce Richardson 
280499a2dd95SBruce Richardson 	return 0;
280599a2dd95SBruce Richardson }
280699a2dd95SBruce Richardson 
280799a2dd95SBruce Richardson int
280899a2dd95SBruce Richardson rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
280999a2dd95SBruce Richardson 				int32_t rx_queue_id)
281099a2dd95SBruce Richardson {
281199a2dd95SBruce Richardson 	int ret = 0;
281299a2dd95SBruce Richardson 	struct rte_eventdev *dev;
2813a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
281499a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
281599a2dd95SBruce Richardson 	uint32_t cap;
281699a2dd95SBruce Richardson 	uint32_t nb_rx_poll = 0;
281799a2dd95SBruce Richardson 	uint32_t nb_wrr = 0;
281899a2dd95SBruce Richardson 	uint32_t nb_rx_intr;
281999a2dd95SBruce Richardson 	struct eth_rx_poll_entry *rx_poll = NULL;
282099a2dd95SBruce Richardson 	uint32_t *rx_wrr = NULL;
282199a2dd95SBruce Richardson 	int num_intr_vec;
282299a2dd95SBruce Richardson 
2823a1793ee8SGanapati Kundapura 	if (rxa_memzone_lookup())
2824a1793ee8SGanapati Kundapura 		return -ENOMEM;
2825a1793ee8SGanapati Kundapura 
282699a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
282799a2dd95SBruce Richardson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
282899a2dd95SBruce Richardson 
282999a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
283099a2dd95SBruce Richardson 	if (rx_adapter == NULL)
283199a2dd95SBruce Richardson 		return -EINVAL;
283299a2dd95SBruce Richardson 
283399a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
283499a2dd95SBruce Richardson 	ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
283599a2dd95SBruce Richardson 						eth_dev_id,
283699a2dd95SBruce Richardson 						&cap);
283799a2dd95SBruce Richardson 	if (ret)
283899a2dd95SBruce Richardson 		return ret;
283999a2dd95SBruce Richardson 
284099a2dd95SBruce Richardson 	if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
284199a2dd95SBruce Richardson 		rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
284299a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
284399a2dd95SBruce Richardson 			 (uint16_t)rx_queue_id);
284499a2dd95SBruce Richardson 		return -EINVAL;
284599a2dd95SBruce Richardson 	}
284699a2dd95SBruce Richardson 
284799a2dd95SBruce Richardson 	dev_info = &rx_adapter->eth_devices[eth_dev_id];
284899a2dd95SBruce Richardson 
284999a2dd95SBruce Richardson 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
28508f1d23ecSDavid Marchand 		if (*dev->dev_ops->eth_rx_adapter_queue_del == NULL)
28518f1d23ecSDavid Marchand 			return -ENOTSUP;
285299a2dd95SBruce Richardson 		ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,
285399a2dd95SBruce Richardson 						&rte_eth_devices[eth_dev_id],
285499a2dd95SBruce Richardson 						rx_queue_id);
285599a2dd95SBruce Richardson 		if (ret == 0) {
285699a2dd95SBruce Richardson 			rxa_update_queue(rx_adapter,
285799a2dd95SBruce Richardson 					&rx_adapter->eth_devices[eth_dev_id],
285899a2dd95SBruce Richardson 					rx_queue_id,
285999a2dd95SBruce Richardson 					0);
286099a2dd95SBruce Richardson 			if (dev_info->nb_dev_queues == 0) {
286199a2dd95SBruce Richardson 				rte_free(dev_info->rx_queue);
286299a2dd95SBruce Richardson 				dev_info->rx_queue = NULL;
286399a2dd95SBruce Richardson 			}
286499a2dd95SBruce Richardson 		}
286599a2dd95SBruce Richardson 	} else {
286699a2dd95SBruce Richardson 		rxa_calc_nb_post_del(rx_adapter, dev_info, rx_queue_id,
286799a2dd95SBruce Richardson 			&nb_rx_poll, &nb_rx_intr, &nb_wrr);
286899a2dd95SBruce Richardson 
286999a2dd95SBruce Richardson 		ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
287099a2dd95SBruce Richardson 			&rx_poll, &rx_wrr);
287199a2dd95SBruce Richardson 		if (ret)
287299a2dd95SBruce Richardson 			return ret;
287399a2dd95SBruce Richardson 
287499a2dd95SBruce Richardson 		rte_spinlock_lock(&rx_adapter->rx_lock);
287599a2dd95SBruce Richardson 
287699a2dd95SBruce Richardson 		num_intr_vec = 0;
287799a2dd95SBruce Richardson 		if (rx_adapter->num_rx_intr > nb_rx_intr) {
287899a2dd95SBruce Richardson 
287999a2dd95SBruce Richardson 			num_intr_vec = rxa_nb_intr_vect(dev_info,
288099a2dd95SBruce Richardson 						rx_queue_id, 0);
288199a2dd95SBruce Richardson 			ret = rxa_del_intr_queue(rx_adapter, dev_info,
288299a2dd95SBruce Richardson 					rx_queue_id);
288399a2dd95SBruce Richardson 			if (ret)
288499a2dd95SBruce Richardson 				goto unlock_ret;
288599a2dd95SBruce Richardson 		}
288699a2dd95SBruce Richardson 
288799a2dd95SBruce Richardson 		if (nb_rx_intr == 0) {
288899a2dd95SBruce Richardson 			ret = rxa_free_intr_resources(rx_adapter);
288999a2dd95SBruce Richardson 			if (ret)
289099a2dd95SBruce Richardson 				goto unlock_ret;
289199a2dd95SBruce Richardson 		}
289299a2dd95SBruce Richardson 
289399a2dd95SBruce Richardson 		rxa_sw_del(rx_adapter, dev_info, rx_queue_id);
289499a2dd95SBruce Richardson 		rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
289599a2dd95SBruce Richardson 
289699a2dd95SBruce Richardson 		rte_free(rx_adapter->eth_rx_poll);
289799a2dd95SBruce Richardson 		rte_free(rx_adapter->wrr_sched);
289899a2dd95SBruce Richardson 
289999a2dd95SBruce Richardson 		if (nb_rx_intr == 0) {
290099a2dd95SBruce Richardson 			rte_free(dev_info->intr_queue);
290199a2dd95SBruce Richardson 			dev_info->intr_queue = NULL;
290299a2dd95SBruce Richardson 		}
290399a2dd95SBruce Richardson 
290499a2dd95SBruce Richardson 		rx_adapter->eth_rx_poll = rx_poll;
290599a2dd95SBruce Richardson 		rx_adapter->wrr_sched = rx_wrr;
290699a2dd95SBruce Richardson 		rx_adapter->wrr_len = nb_wrr;
290781da8a5fSNaga Harish K S V 		/*
290881da8a5fSNaga Harish K S V 		 * reset next poll start position (wrr_pos) to avoid buffer
290981da8a5fSNaga Harish K S V 		 * overrun when wrr_len is reduced in case of queue delete
291081da8a5fSNaga Harish K S V 		 */
291181da8a5fSNaga Harish K S V 		rx_adapter->wrr_pos = 0;
291299a2dd95SBruce Richardson 		rx_adapter->num_intr_vec += num_intr_vec;
291399a2dd95SBruce Richardson 
291499a2dd95SBruce Richardson 		if (dev_info->nb_dev_queues == 0) {
291599a2dd95SBruce Richardson 			rte_free(dev_info->rx_queue);
291699a2dd95SBruce Richardson 			dev_info->rx_queue = NULL;
291799a2dd95SBruce Richardson 		}
291899a2dd95SBruce Richardson unlock_ret:
291999a2dd95SBruce Richardson 		rte_spinlock_unlock(&rx_adapter->rx_lock);
292099a2dd95SBruce Richardson 		if (ret) {
292199a2dd95SBruce Richardson 			rte_free(rx_poll);
292299a2dd95SBruce Richardson 			rte_free(rx_wrr);
292399a2dd95SBruce Richardson 			return ret;
292499a2dd95SBruce Richardson 		}
292599a2dd95SBruce Richardson 
292699a2dd95SBruce Richardson 		rte_service_component_runstate_set(rx_adapter->service_id,
292799a2dd95SBruce Richardson 				rxa_sw_adapter_queue_count(rx_adapter));
292899a2dd95SBruce Richardson 	}
292999a2dd95SBruce Richardson 
293099a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_queue_del(id, eth_dev_id,
293199a2dd95SBruce Richardson 		rx_queue_id, ret);
2932a1793ee8SGanapati Kundapura 
293399a2dd95SBruce Richardson 	return ret;
293499a2dd95SBruce Richardson }
293599a2dd95SBruce Richardson 
293699a2dd95SBruce Richardson int
293799a2dd95SBruce Richardson rte_event_eth_rx_adapter_vector_limits_get(
293899a2dd95SBruce Richardson 	uint8_t dev_id, uint16_t eth_port_id,
293999a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_vector_limits *limits)
294099a2dd95SBruce Richardson {
294199a2dd95SBruce Richardson 	struct rte_eventdev *dev;
294299a2dd95SBruce Richardson 	uint32_t cap;
294399a2dd95SBruce Richardson 	int ret;
294499a2dd95SBruce Richardson 
294599a2dd95SBruce Richardson 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
294699a2dd95SBruce Richardson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
294799a2dd95SBruce Richardson 
294899a2dd95SBruce Richardson 	if (limits == NULL)
294999a2dd95SBruce Richardson 		return -EINVAL;
295099a2dd95SBruce Richardson 
295199a2dd95SBruce Richardson 	dev = &rte_eventdevs[dev_id];
295299a2dd95SBruce Richardson 
295399a2dd95SBruce Richardson 	ret = rte_event_eth_rx_adapter_caps_get(dev_id, eth_port_id, &cap);
295499a2dd95SBruce Richardson 	if (ret) {
295599a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
295699a2dd95SBruce Richardson 				 "eth port %" PRIu16,
295799a2dd95SBruce Richardson 				 dev_id, eth_port_id);
295899a2dd95SBruce Richardson 		return ret;
295999a2dd95SBruce Richardson 	}
296099a2dd95SBruce Richardson 
296199a2dd95SBruce Richardson 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
29628f1d23ecSDavid Marchand 		if (*dev->dev_ops->eth_rx_adapter_vector_limits_get == NULL)
29638f1d23ecSDavid Marchand 			return -ENOTSUP;
296499a2dd95SBruce Richardson 		ret = dev->dev_ops->eth_rx_adapter_vector_limits_get(
296599a2dd95SBruce Richardson 			dev, &rte_eth_devices[eth_port_id], limits);
296699a2dd95SBruce Richardson 	} else {
296799a2dd95SBruce Richardson 		ret = rxa_sw_vector_limits(limits);
296899a2dd95SBruce Richardson 	}
296999a2dd95SBruce Richardson 
29707f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_eth_rx_adapter_vector_limits_get(dev_id, eth_port_id,
29717f2d9df6SAmit Prakash Shukla 		limits->min_sz, limits->max_sz, limits->log2_sz,
29727f2d9df6SAmit Prakash Shukla 		limits->min_timeout_ns, limits->max_timeout_ns, ret);
297399a2dd95SBruce Richardson 	return ret;
297499a2dd95SBruce Richardson }
297599a2dd95SBruce Richardson 
297699a2dd95SBruce Richardson int
297799a2dd95SBruce Richardson rte_event_eth_rx_adapter_start(uint8_t id)
297899a2dd95SBruce Richardson {
297999a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_start(id);
298099a2dd95SBruce Richardson 	return rxa_ctrl(id, 1);
298199a2dd95SBruce Richardson }
298299a2dd95SBruce Richardson 
298399a2dd95SBruce Richardson int
298499a2dd95SBruce Richardson rte_event_eth_rx_adapter_stop(uint8_t id)
298599a2dd95SBruce Richardson {
298699a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_stop(id);
298799a2dd95SBruce Richardson 	return rxa_ctrl(id, 0);
298899a2dd95SBruce Richardson }
298999a2dd95SBruce Richardson 
2990995b150cSNaga Harish K S V static inline void
2991995b150cSNaga Harish K S V rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info)
2992995b150cSNaga Harish K S V {
2993995b150cSNaga Harish K S V 	struct rte_event_eth_rx_adapter_stats *q_stats;
2994995b150cSNaga Harish K S V 
2995995b150cSNaga Harish K S V 	q_stats = queue_info->stats;
2996995b150cSNaga Harish K S V 	memset(q_stats, 0, sizeof(*q_stats));
2997995b150cSNaga Harish K S V }
2998995b150cSNaga Harish K S V 
299999a2dd95SBruce Richardson int
300099a2dd95SBruce Richardson rte_event_eth_rx_adapter_stats_get(uint8_t id,
300199a2dd95SBruce Richardson 			       struct rte_event_eth_rx_adapter_stats *stats)
300299a2dd95SBruce Richardson {
3003a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
3004a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer *buf;
300599a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
300699a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_stats dev_stats;
300799a2dd95SBruce Richardson 	struct rte_eventdev *dev;
300899a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
3009995b150cSNaga Harish K S V 	struct eth_rx_queue_info *queue_info;
3010995b150cSNaga Harish K S V 	struct rte_event_eth_rx_adapter_stats *q_stats;
3011995b150cSNaga Harish K S V 	uint32_t i, j;
301299a2dd95SBruce Richardson 	int ret;
301399a2dd95SBruce Richardson 
30147f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_eth_rx_adapter_stats_get(id, stats);
30157f2d9df6SAmit Prakash Shukla 
3016da781e64SGanapati Kundapura 	if (rxa_memzone_lookup())
3017da781e64SGanapati Kundapura 		return -ENOMEM;
3018da781e64SGanapati Kundapura 
301999a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
302099a2dd95SBruce Richardson 
302199a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
302299a2dd95SBruce Richardson 	if (rx_adapter  == NULL || stats == NULL)
302399a2dd95SBruce Richardson 		return -EINVAL;
302499a2dd95SBruce Richardson 
302599a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
302699a2dd95SBruce Richardson 	memset(stats, 0, sizeof(*stats));
3027995b150cSNaga Harish K S V 
3028995b150cSNaga Harish K S V 	if (rx_adapter->service_inited)
3029995b150cSNaga Harish K S V 		*stats = rx_adapter->stats;
3030995b150cSNaga Harish K S V 
303199a2dd95SBruce Richardson 	RTE_ETH_FOREACH_DEV(i) {
303299a2dd95SBruce Richardson 		dev_info = &rx_adapter->eth_devices[i];
3033995b150cSNaga Harish K S V 
3034995b150cSNaga Harish K S V 		if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) {
3035995b150cSNaga Harish K S V 
3036995b150cSNaga Harish K S V 			for (j = 0; j < dev_info->dev->data->nb_rx_queues;
3037995b150cSNaga Harish K S V 			     j++) {
3038995b150cSNaga Harish K S V 				queue_info = &dev_info->rx_queue[j];
3039995b150cSNaga Harish K S V 				if (!queue_info->queue_enabled)
3040995b150cSNaga Harish K S V 					continue;
3041995b150cSNaga Harish K S V 				q_stats = queue_info->stats;
3042995b150cSNaga Harish K S V 
3043995b150cSNaga Harish K S V 				stats->rx_packets += q_stats->rx_packets;
3044995b150cSNaga Harish K S V 				stats->rx_poll_count += q_stats->rx_poll_count;
3045995b150cSNaga Harish K S V 				stats->rx_enq_count += q_stats->rx_enq_count;
3046995b150cSNaga Harish K S V 				stats->rx_enq_retry += q_stats->rx_enq_retry;
3047995b150cSNaga Harish K S V 				stats->rx_dropped += q_stats->rx_dropped;
3048995b150cSNaga Harish K S V 				stats->rx_enq_block_cycles +=
3049995b150cSNaga Harish K S V 						q_stats->rx_enq_block_cycles;
3050995b150cSNaga Harish K S V 			}
3051995b150cSNaga Harish K S V 		}
3052995b150cSNaga Harish K S V 
305399a2dd95SBruce Richardson 		if (dev_info->internal_event_port == 0 ||
305499a2dd95SBruce Richardson 			dev->dev_ops->eth_rx_adapter_stats_get == NULL)
305599a2dd95SBruce Richardson 			continue;
305699a2dd95SBruce Richardson 		ret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev,
305799a2dd95SBruce Richardson 						&rte_eth_devices[i],
305899a2dd95SBruce Richardson 						&dev_stats);
305999a2dd95SBruce Richardson 		if (ret)
306099a2dd95SBruce Richardson 			continue;
306199a2dd95SBruce Richardson 		dev_stats_sum.rx_packets += dev_stats.rx_packets;
306299a2dd95SBruce Richardson 		dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
306399a2dd95SBruce Richardson 	}
306499a2dd95SBruce Richardson 
3065995b150cSNaga Harish K S V 	buf = &rx_adapter->event_enqueue_buffer;
306699a2dd95SBruce Richardson 	stats->rx_packets += dev_stats_sum.rx_packets;
306799a2dd95SBruce Richardson 	stats->rx_enq_count += dev_stats_sum.rx_enq_count;
3068814d0170SGanapati Kundapura 	stats->rx_event_buf_count = buf->count;
3069814d0170SGanapati Kundapura 	stats->rx_event_buf_size = buf->events_size;
3070995b150cSNaga Harish K S V 
3071995b150cSNaga Harish K S V 	return 0;
3072995b150cSNaga Harish K S V }
3073995b150cSNaga Harish K S V 
3074995b150cSNaga Harish K S V int
3075995b150cSNaga Harish K S V rte_event_eth_rx_adapter_queue_stats_get(uint8_t id,
3076995b150cSNaga Harish K S V 		uint16_t eth_dev_id,
3077995b150cSNaga Harish K S V 		uint16_t rx_queue_id,
3078995b150cSNaga Harish K S V 		struct rte_event_eth_rx_adapter_queue_stats *stats)
3079995b150cSNaga Harish K S V {
3080995b150cSNaga Harish K S V 	struct event_eth_rx_adapter *rx_adapter;
3081995b150cSNaga Harish K S V 	struct eth_device_info *dev_info;
3082995b150cSNaga Harish K S V 	struct eth_rx_queue_info *queue_info;
3083995b150cSNaga Harish K S V 	struct eth_event_enqueue_buffer *event_buf;
3084995b150cSNaga Harish K S V 	struct rte_event_eth_rx_adapter_stats *q_stats;
3085995b150cSNaga Harish K S V 	struct rte_eventdev *dev;
3086995b150cSNaga Harish K S V 
30877f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_eth_rx_adapter_queue_stats_get(id, eth_dev_id,
30887f2d9df6SAmit Prakash Shukla 							  rx_queue_id, stats);
30897f2d9df6SAmit Prakash Shukla 
3090995b150cSNaga Harish K S V 	if (rxa_memzone_lookup())
3091995b150cSNaga Harish K S V 		return -ENOMEM;
3092995b150cSNaga Harish K S V 
3093995b150cSNaga Harish K S V 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3094995b150cSNaga Harish K S V 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3095995b150cSNaga Harish K S V 
3096995b150cSNaga Harish K S V 	rx_adapter = rxa_id_to_adapter(id);
3097995b150cSNaga Harish K S V 
3098995b150cSNaga Harish K S V 	if (rx_adapter == NULL || stats == NULL)
3099995b150cSNaga Harish K S V 		return -EINVAL;
3100995b150cSNaga Harish K S V 
3101995b150cSNaga Harish K S V 	if (!rx_adapter->use_queue_event_buf)
3102995b150cSNaga Harish K S V 		return -EINVAL;
3103995b150cSNaga Harish K S V 
3104995b150cSNaga Harish K S V 	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3105995b150cSNaga Harish K S V 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
3106995b150cSNaga Harish K S V 		return -EINVAL;
3107995b150cSNaga Harish K S V 	}
3108995b150cSNaga Harish K S V 
3109995b150cSNaga Harish K S V 	dev_info = &rx_adapter->eth_devices[eth_dev_id];
3110995b150cSNaga Harish K S V 	if (dev_info->rx_queue == NULL ||
3111995b150cSNaga Harish K S V 	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
3112995b150cSNaga Harish K S V 		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
3113995b150cSNaga Harish K S V 		return -EINVAL;
3114995b150cSNaga Harish K S V 	}
3115995b150cSNaga Harish K S V 
3116de3c3a2fSPavan Nikhilesh 	if (dev_info->internal_event_port == 0) {
3117995b150cSNaga Harish K S V 		queue_info = &dev_info->rx_queue[rx_queue_id];
3118995b150cSNaga Harish K S V 		event_buf = queue_info->event_buf;
3119995b150cSNaga Harish K S V 		q_stats = queue_info->stats;
3120995b150cSNaga Harish K S V 
3121995b150cSNaga Harish K S V 		stats->rx_event_buf_count = event_buf->count;
3122995b150cSNaga Harish K S V 		stats->rx_event_buf_size = event_buf->events_size;
3123995b150cSNaga Harish K S V 		stats->rx_packets = q_stats->rx_packets;
3124995b150cSNaga Harish K S V 		stats->rx_poll_count = q_stats->rx_poll_count;
3125995b150cSNaga Harish K S V 		stats->rx_dropped = q_stats->rx_dropped;
3126de3c3a2fSPavan Nikhilesh 	}
3127995b150cSNaga Harish K S V 
3128995b150cSNaga Harish K S V 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
3129995b150cSNaga Harish K S V 	if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) {
3130995b150cSNaga Harish K S V 		return (*dev->dev_ops->eth_rx_adapter_queue_stats_get)(dev,
3131995b150cSNaga Harish K S V 						&rte_eth_devices[eth_dev_id],
3132995b150cSNaga Harish K S V 						rx_queue_id, stats);
3133814d0170SGanapati Kundapura 	}
3134814d0170SGanapati Kundapura 
313599a2dd95SBruce Richardson 	return 0;
313699a2dd95SBruce Richardson }
313799a2dd95SBruce Richardson 
313899a2dd95SBruce Richardson int
313999a2dd95SBruce Richardson rte_event_eth_rx_adapter_stats_reset(uint8_t id)
314099a2dd95SBruce Richardson {
3141a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
314299a2dd95SBruce Richardson 	struct rte_eventdev *dev;
314399a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
3144995b150cSNaga Harish K S V 	struct eth_rx_queue_info *queue_info;
3145995b150cSNaga Harish K S V 	uint32_t i, j;
314699a2dd95SBruce Richardson 
31477f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_eth_rx_adapter_stats_reset(id);
31487f2d9df6SAmit Prakash Shukla 
3149da781e64SGanapati Kundapura 	if (rxa_memzone_lookup())
3150da781e64SGanapati Kundapura 		return -ENOMEM;
3151da781e64SGanapati Kundapura 
315299a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
315399a2dd95SBruce Richardson 
315499a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
315599a2dd95SBruce Richardson 	if (rx_adapter == NULL)
315699a2dd95SBruce Richardson 		return -EINVAL;
315799a2dd95SBruce Richardson 
315899a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
3159995b150cSNaga Harish K S V 
316099a2dd95SBruce Richardson 	RTE_ETH_FOREACH_DEV(i) {
316199a2dd95SBruce Richardson 		dev_info = &rx_adapter->eth_devices[i];
3162995b150cSNaga Harish K S V 
3163995b150cSNaga Harish K S V 		if (rx_adapter->use_queue_event_buf  && dev_info->rx_queue) {
3164995b150cSNaga Harish K S V 
3165995b150cSNaga Harish K S V 			for (j = 0; j < dev_info->dev->data->nb_rx_queues;
3166995b150cSNaga Harish K S V 						j++) {
3167995b150cSNaga Harish K S V 				queue_info = &dev_info->rx_queue[j];
3168995b150cSNaga Harish K S V 				if (!queue_info->queue_enabled)
3169995b150cSNaga Harish K S V 					continue;
3170995b150cSNaga Harish K S V 				rxa_queue_stats_reset(queue_info);
3171995b150cSNaga Harish K S V 			}
3172995b150cSNaga Harish K S V 		}
3173995b150cSNaga Harish K S V 
317499a2dd95SBruce Richardson 		if (dev_info->internal_event_port == 0 ||
317599a2dd95SBruce Richardson 			dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
317699a2dd95SBruce Richardson 			continue;
317799a2dd95SBruce Richardson 		(*dev->dev_ops->eth_rx_adapter_stats_reset)(dev,
317899a2dd95SBruce Richardson 							&rte_eth_devices[i]);
317999a2dd95SBruce Richardson 	}
318099a2dd95SBruce Richardson 
318199a2dd95SBruce Richardson 	memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
3182995b150cSNaga Harish K S V 
3183995b150cSNaga Harish K S V 	return 0;
3184995b150cSNaga Harish K S V }
3185995b150cSNaga Harish K S V 
3186995b150cSNaga Harish K S V int
3187995b150cSNaga Harish K S V rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id,
3188995b150cSNaga Harish K S V 		uint16_t eth_dev_id,
3189995b150cSNaga Harish K S V 		uint16_t rx_queue_id)
3190995b150cSNaga Harish K S V {
3191995b150cSNaga Harish K S V 	struct event_eth_rx_adapter *rx_adapter;
3192995b150cSNaga Harish K S V 	struct eth_device_info *dev_info;
3193995b150cSNaga Harish K S V 	struct eth_rx_queue_info *queue_info;
3194995b150cSNaga Harish K S V 	struct rte_eventdev *dev;
3195995b150cSNaga Harish K S V 
31967f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_eth_rx_adapter_queue_stats_reset(id, eth_dev_id,
31977f2d9df6SAmit Prakash Shukla 							    rx_queue_id);
31987f2d9df6SAmit Prakash Shukla 
3199995b150cSNaga Harish K S V 	if (rxa_memzone_lookup())
3200995b150cSNaga Harish K S V 		return -ENOMEM;
3201995b150cSNaga Harish K S V 
3202995b150cSNaga Harish K S V 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3203995b150cSNaga Harish K S V 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3204995b150cSNaga Harish K S V 
3205995b150cSNaga Harish K S V 	rx_adapter = rxa_id_to_adapter(id);
3206995b150cSNaga Harish K S V 	if (rx_adapter == NULL)
3207995b150cSNaga Harish K S V 		return -EINVAL;
3208995b150cSNaga Harish K S V 
3209995b150cSNaga Harish K S V 	if (!rx_adapter->use_queue_event_buf)
3210995b150cSNaga Harish K S V 		return -EINVAL;
3211995b150cSNaga Harish K S V 
3212995b150cSNaga Harish K S V 	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3213995b150cSNaga Harish K S V 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id);
3214995b150cSNaga Harish K S V 		return -EINVAL;
3215995b150cSNaga Harish K S V 	}
3216995b150cSNaga Harish K S V 
3217995b150cSNaga Harish K S V 	dev_info = &rx_adapter->eth_devices[eth_dev_id];
3218995b150cSNaga Harish K S V 
3219995b150cSNaga Harish K S V 	if (dev_info->rx_queue == NULL ||
3220995b150cSNaga Harish K S V 	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
3221995b150cSNaga Harish K S V 		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
3222995b150cSNaga Harish K S V 		return -EINVAL;
3223995b150cSNaga Harish K S V 	}
3224995b150cSNaga Harish K S V 
3225de3c3a2fSPavan Nikhilesh 	if (dev_info->internal_event_port == 0) {
3226995b150cSNaga Harish K S V 		queue_info = &dev_info->rx_queue[rx_queue_id];
3227995b150cSNaga Harish K S V 		rxa_queue_stats_reset(queue_info);
3228de3c3a2fSPavan Nikhilesh 	}
3229995b150cSNaga Harish K S V 
3230995b150cSNaga Harish K S V 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
3231995b150cSNaga Harish K S V 	if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) {
3232995b150cSNaga Harish K S V 		return (*dev->dev_ops->eth_rx_adapter_queue_stats_reset)(dev,
3233995b150cSNaga Harish K S V 						&rte_eth_devices[eth_dev_id],
3234995b150cSNaga Harish K S V 						rx_queue_id);
3235995b150cSNaga Harish K S V 	}
3236995b150cSNaga Harish K S V 
323799a2dd95SBruce Richardson 	return 0;
323899a2dd95SBruce Richardson }
323999a2dd95SBruce Richardson 
324099a2dd95SBruce Richardson int
324199a2dd95SBruce Richardson rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
324299a2dd95SBruce Richardson {
3243a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
324499a2dd95SBruce Richardson 
3245da781e64SGanapati Kundapura 	if (rxa_memzone_lookup())
3246da781e64SGanapati Kundapura 		return -ENOMEM;
3247da781e64SGanapati Kundapura 
324899a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
324999a2dd95SBruce Richardson 
325099a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
325199a2dd95SBruce Richardson 	if (rx_adapter == NULL || service_id == NULL)
325299a2dd95SBruce Richardson 		return -EINVAL;
325399a2dd95SBruce Richardson 
325499a2dd95SBruce Richardson 	if (rx_adapter->service_inited)
325599a2dd95SBruce Richardson 		*service_id = rx_adapter->service_id;
325699a2dd95SBruce Richardson 
32577f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_eth_rx_adapter_service_id_get(id, *service_id);
32587f2d9df6SAmit Prakash Shukla 
325999a2dd95SBruce Richardson 	return rx_adapter->service_inited ? 0 : -ESRCH;
326099a2dd95SBruce Richardson }
326199a2dd95SBruce Richardson 
326299a2dd95SBruce Richardson int
32636ff23631SNaga Harish K S V rte_event_eth_rx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
32646ff23631SNaga Harish K S V {
32656ff23631SNaga Harish K S V 	struct event_eth_rx_adapter *rx_adapter;
32666ff23631SNaga Harish K S V 
32676ff23631SNaga Harish K S V 	if (rxa_memzone_lookup())
32686ff23631SNaga Harish K S V 		return -ENOMEM;
32696ff23631SNaga Harish K S V 
32706ff23631SNaga Harish K S V 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
32716ff23631SNaga Harish K S V 
32726ff23631SNaga Harish K S V 	rx_adapter = rxa_id_to_adapter(id);
32736ff23631SNaga Harish K S V 	if (rx_adapter == NULL || event_port_id == NULL)
32746ff23631SNaga Harish K S V 		return -EINVAL;
32756ff23631SNaga Harish K S V 
32766ff23631SNaga Harish K S V 	if (rx_adapter->service_inited)
32776ff23631SNaga Harish K S V 		*event_port_id = rx_adapter->event_port_id;
32786ff23631SNaga Harish K S V 
32797f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_eth_rx_adapter_event_port_get(id, *event_port_id);
32807f2d9df6SAmit Prakash Shukla 
32816ff23631SNaga Harish K S V 	return rx_adapter->service_inited ? 0 : -ESRCH;
32826ff23631SNaga Harish K S V }
32836ff23631SNaga Harish K S V 
32846ff23631SNaga Harish K S V int
328599a2dd95SBruce Richardson rte_event_eth_rx_adapter_cb_register(uint8_t id,
328699a2dd95SBruce Richardson 					uint16_t eth_dev_id,
328799a2dd95SBruce Richardson 					rte_event_eth_rx_adapter_cb_fn cb_fn,
328899a2dd95SBruce Richardson 					void *cb_arg)
328999a2dd95SBruce Richardson {
3290a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
329199a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
329299a2dd95SBruce Richardson 	uint32_t cap;
329399a2dd95SBruce Richardson 	int ret;
329499a2dd95SBruce Richardson 
32957f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_eth_rx_adapter_cb_register(id, eth_dev_id, cb_fn,
32967f2d9df6SAmit Prakash Shukla 						      cb_arg);
32977f2d9df6SAmit Prakash Shukla 
329899a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
329999a2dd95SBruce Richardson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
330099a2dd95SBruce Richardson 
330199a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
330299a2dd95SBruce Richardson 	if (rx_adapter == NULL)
330399a2dd95SBruce Richardson 		return -EINVAL;
330499a2dd95SBruce Richardson 
330599a2dd95SBruce Richardson 	dev_info = &rx_adapter->eth_devices[eth_dev_id];
330699a2dd95SBruce Richardson 	if (dev_info->rx_queue == NULL)
330799a2dd95SBruce Richardson 		return -EINVAL;
330899a2dd95SBruce Richardson 
330999a2dd95SBruce Richardson 	ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
331099a2dd95SBruce Richardson 						eth_dev_id,
331199a2dd95SBruce Richardson 						&cap);
331299a2dd95SBruce Richardson 	if (ret) {
331399a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
331499a2dd95SBruce Richardson 			"eth port %" PRIu16, id, eth_dev_id);
331599a2dd95SBruce Richardson 		return ret;
331699a2dd95SBruce Richardson 	}
331799a2dd95SBruce Richardson 
331899a2dd95SBruce Richardson 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
331999a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %"
332099a2dd95SBruce Richardson 				PRIu16, eth_dev_id);
332199a2dd95SBruce Richardson 		return -EINVAL;
332299a2dd95SBruce Richardson 	}
332399a2dd95SBruce Richardson 
332499a2dd95SBruce Richardson 	rte_spinlock_lock(&rx_adapter->rx_lock);
332599a2dd95SBruce Richardson 	dev_info->cb_fn = cb_fn;
332699a2dd95SBruce Richardson 	dev_info->cb_arg = cb_arg;
332799a2dd95SBruce Richardson 	rte_spinlock_unlock(&rx_adapter->rx_lock);
332899a2dd95SBruce Richardson 
332999a2dd95SBruce Richardson 	return 0;
333099a2dd95SBruce Richardson }
3331da781e64SGanapati Kundapura 
3332da781e64SGanapati Kundapura int
3333da781e64SGanapati Kundapura rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
3334da781e64SGanapati Kundapura 			uint16_t eth_dev_id,
3335da781e64SGanapati Kundapura 			uint16_t rx_queue_id,
3336da781e64SGanapati Kundapura 			struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
3337da781e64SGanapati Kundapura {
333895138712SNaga Harish K S V #define TICK2NSEC(_ticks, _freq) (((_ticks) * (1E9)) / (_freq))
3339da781e64SGanapati Kundapura 	struct rte_eventdev *dev;
3340a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
3341da781e64SGanapati Kundapura 	struct eth_device_info *dev_info;
3342da781e64SGanapati Kundapura 	struct eth_rx_queue_info *queue_info;
3343da781e64SGanapati Kundapura 	int ret;
3344da781e64SGanapati Kundapura 
33457f2d9df6SAmit Prakash Shukla 	rte_eventdev_trace_eth_rx_adapter_queue_conf_get(id, eth_dev_id,
33467f2d9df6SAmit Prakash Shukla 							 rx_queue_id, queue_conf);
33477f2d9df6SAmit Prakash Shukla 
3348da781e64SGanapati Kundapura 	if (rxa_memzone_lookup())
3349da781e64SGanapati Kundapura 		return -ENOMEM;
3350da781e64SGanapati Kundapura 
3351da781e64SGanapati Kundapura 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
3352da781e64SGanapati Kundapura 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
3353da781e64SGanapati Kundapura 
3354da781e64SGanapati Kundapura 	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3355da781e64SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3356da781e64SGanapati Kundapura 		return -EINVAL;
3357da781e64SGanapati Kundapura 	}
3358da781e64SGanapati Kundapura 
3359da781e64SGanapati Kundapura 	if (queue_conf == NULL) {
3360da781e64SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Rx queue conf struct cannot be NULL");
3361da781e64SGanapati Kundapura 		return -EINVAL;
3362da781e64SGanapati Kundapura 	}
3363da781e64SGanapati Kundapura 
3364da781e64SGanapati Kundapura 	rx_adapter = rxa_id_to_adapter(id);
3365da781e64SGanapati Kundapura 	if (rx_adapter == NULL)
3366da781e64SGanapati Kundapura 		return -EINVAL;
3367da781e64SGanapati Kundapura 
3368da781e64SGanapati Kundapura 	dev_info = &rx_adapter->eth_devices[eth_dev_id];
3369da781e64SGanapati Kundapura 	if (dev_info->rx_queue == NULL ||
3370da781e64SGanapati Kundapura 	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
3371da781e64SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
3372da781e64SGanapati Kundapura 		return -EINVAL;
3373da781e64SGanapati Kundapura 	}
3374da781e64SGanapati Kundapura 
3375da781e64SGanapati Kundapura 	queue_info = &dev_info->rx_queue[rx_queue_id];
3376da781e64SGanapati Kundapura 
3377da781e64SGanapati Kundapura 	memset(queue_conf, 0, sizeof(*queue_conf));
3378da781e64SGanapati Kundapura 	queue_conf->rx_queue_flags = 0;
3379da781e64SGanapati Kundapura 	if (queue_info->flow_id_mask != 0)
3380da781e64SGanapati Kundapura 		queue_conf->rx_queue_flags |=
3381da781e64SGanapati Kundapura 			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
3382da781e64SGanapati Kundapura 	queue_conf->servicing_weight = queue_info->wt;
3383da781e64SGanapati Kundapura 
338495138712SNaga Harish K S V 	queue_conf->ev.event = queue_info->event;
338595138712SNaga Harish K S V 
338695138712SNaga Harish K S V 	queue_conf->vector_sz = queue_info->vector_data.max_vector_count;
338795138712SNaga Harish K S V 	queue_conf->vector_mp = queue_info->vector_data.vector_pool;
338895138712SNaga Harish K S V 	/* need to be converted from ticks to ns */
338995138712SNaga Harish K S V 	queue_conf->vector_timeout_ns = TICK2NSEC(
339095138712SNaga Harish K S V 		queue_info->vector_data.vector_timeout_ticks, rte_get_timer_hz());
339195138712SNaga Harish K S V 
339295138712SNaga Harish K S V 	if (queue_info->event_buf != NULL)
339395138712SNaga Harish K S V 		queue_conf->event_buf_size = queue_info->event_buf->events_size;
339495138712SNaga Harish K S V 	else
339595138712SNaga Harish K S V 		queue_conf->event_buf_size = 0;
3396da781e64SGanapati Kundapura 
3397da781e64SGanapati Kundapura 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
3398da781e64SGanapati Kundapura 	if (dev->dev_ops->eth_rx_adapter_queue_conf_get != NULL) {
3399da781e64SGanapati Kundapura 		ret = (*dev->dev_ops->eth_rx_adapter_queue_conf_get)(dev,
3400da781e64SGanapati Kundapura 						&rte_eth_devices[eth_dev_id],
3401da781e64SGanapati Kundapura 						rx_queue_id,
3402da781e64SGanapati Kundapura 						queue_conf);
3403da781e64SGanapati Kundapura 		return ret;
3404da781e64SGanapati Kundapura 	}
3405da781e64SGanapati Kundapura 
3406da781e64SGanapati Kundapura 	return 0;
3407da781e64SGanapati Kundapura }
3408814d0170SGanapati Kundapura 
3409a1793ee8SGanapati Kundapura static int
3410a1793ee8SGanapati Kundapura rxa_is_queue_added(struct event_eth_rx_adapter *rx_adapter,
3411a1793ee8SGanapati Kundapura 		   uint16_t eth_dev_id,
3412a1793ee8SGanapati Kundapura 		   uint16_t rx_queue_id)
3413a1793ee8SGanapati Kundapura {
3414a1793ee8SGanapati Kundapura 	struct eth_device_info *dev_info;
3415a1793ee8SGanapati Kundapura 	struct eth_rx_queue_info *queue_info;
3416a1793ee8SGanapati Kundapura 
3417a1793ee8SGanapati Kundapura 	if (!rx_adapter->eth_devices)
3418a1793ee8SGanapati Kundapura 		return 0;
3419a1793ee8SGanapati Kundapura 
3420a1793ee8SGanapati Kundapura 	dev_info = &rx_adapter->eth_devices[eth_dev_id];
3421a1793ee8SGanapati Kundapura 	if (!dev_info || !dev_info->rx_queue)
3422a1793ee8SGanapati Kundapura 		return 0;
3423a1793ee8SGanapati Kundapura 
3424a1793ee8SGanapati Kundapura 	queue_info = &dev_info->rx_queue[rx_queue_id];
3425a1793ee8SGanapati Kundapura 
3426a1793ee8SGanapati Kundapura 	return queue_info && queue_info->queue_enabled;
3427a1793ee8SGanapati Kundapura }
3428a1793ee8SGanapati Kundapura 
3429a1793ee8SGanapati Kundapura #define rxa_evdev(rx_adapter) (&rte_eventdevs[(rx_adapter)->eventdev_id])
3430a1793ee8SGanapati Kundapura 
3431a1793ee8SGanapati Kundapura #define rxa_dev_instance_get(rx_adapter) \
3432a1793ee8SGanapati Kundapura 		rxa_evdev((rx_adapter))->dev_ops->eth_rx_adapter_instance_get
3433a1793ee8SGanapati Kundapura 
3434a1793ee8SGanapati Kundapura int
3435a1793ee8SGanapati Kundapura rte_event_eth_rx_adapter_instance_get(uint16_t eth_dev_id,
3436a1793ee8SGanapati Kundapura 				      uint16_t rx_queue_id,
3437a1793ee8SGanapati Kundapura 				      uint8_t *rxa_inst_id)
3438a1793ee8SGanapati Kundapura {
3439a1793ee8SGanapati Kundapura 	uint8_t id;
3440a1793ee8SGanapati Kundapura 	int ret = -EINVAL;
3441a1793ee8SGanapati Kundapura 	uint32_t caps;
3442a1793ee8SGanapati Kundapura 	struct event_eth_rx_adapter *rx_adapter;
3443a1793ee8SGanapati Kundapura 
3444a1793ee8SGanapati Kundapura 	if (rxa_memzone_lookup())
3445a1793ee8SGanapati Kundapura 		return -ENOMEM;
3446a1793ee8SGanapati Kundapura 
3447a1793ee8SGanapati Kundapura 	if (eth_dev_id >= rte_eth_dev_count_avail()) {
3448a1793ee8SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Invalid ethernet port id %u", eth_dev_id);
3449a1793ee8SGanapati Kundapura 		return -EINVAL;
3450a1793ee8SGanapati Kundapura 	}
3451a1793ee8SGanapati Kundapura 
3452a1793ee8SGanapati Kundapura 	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3453a1793ee8SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Invalid Rx queue %u", rx_queue_id);
3454a1793ee8SGanapati Kundapura 		return -EINVAL;
3455a1793ee8SGanapati Kundapura 	}
3456a1793ee8SGanapati Kundapura 
3457a1793ee8SGanapati Kundapura 	if (rxa_inst_id == NULL) {
3458a1793ee8SGanapati Kundapura 		RTE_EDEV_LOG_ERR("rxa_inst_id cannot be NULL");
3459a1793ee8SGanapati Kundapura 		return -EINVAL;
3460a1793ee8SGanapati Kundapura 	}
3461a1793ee8SGanapati Kundapura 
3462a1793ee8SGanapati Kundapura 	/* Iterate through all adapter instances */
3463a1793ee8SGanapati Kundapura 	for (id = 0; id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE; id++) {
3464a1793ee8SGanapati Kundapura 		rx_adapter = rxa_id_to_adapter(id);
3465a1793ee8SGanapati Kundapura 		if (!rx_adapter)
3466a1793ee8SGanapati Kundapura 			continue;
3467a1793ee8SGanapati Kundapura 
3468a1793ee8SGanapati Kundapura 		if (rxa_is_queue_added(rx_adapter, eth_dev_id, rx_queue_id)) {
3469a1793ee8SGanapati Kundapura 			*rxa_inst_id = rx_adapter->id;
3470a1793ee8SGanapati Kundapura 			ret = 0;
3471a1793ee8SGanapati Kundapura 		}
3472a1793ee8SGanapati Kundapura 
3473a1793ee8SGanapati Kundapura 		/* Rx adapter internally mainatains queue information
3474a1793ee8SGanapati Kundapura 		 * for both internal port and DPDK service port.
3475a1793ee8SGanapati Kundapura 		 * Eventdev PMD callback is called for future proof only and
3476a1793ee8SGanapati Kundapura 		 * overrides the above return value if defined.
3477a1793ee8SGanapati Kundapura 		 */
3478a1793ee8SGanapati Kundapura 		caps = 0;
3479a1793ee8SGanapati Kundapura 		if (!rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
3480a1793ee8SGanapati Kundapura 						      eth_dev_id,
3481a1793ee8SGanapati Kundapura 						      &caps)) {
34820aedd85fSShijith Thotton 			if (caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT &&
34830aedd85fSShijith Thotton 			    rxa_dev_instance_get(rx_adapter))
34840aedd85fSShijith Thotton 				ret = rxa_dev_instance_get(rx_adapter)(eth_dev_id, rx_queue_id,
34850aedd85fSShijith Thotton 								       rxa_inst_id);
3486a1793ee8SGanapati Kundapura 		}
3487a1793ee8SGanapati Kundapura 
3488a1793ee8SGanapati Kundapura 		/* return if entry found */
34897f2d9df6SAmit Prakash Shukla 		if (ret == 0) {
34907f2d9df6SAmit Prakash Shukla 			rte_eventdev_trace_eth_rx_adapter_instance_get(eth_dev_id, rx_queue_id,
34917f2d9df6SAmit Prakash Shukla 								       *rxa_inst_id);
3492a1793ee8SGanapati Kundapura 			return ret;
3493a1793ee8SGanapati Kundapura 		}
34947f2d9df6SAmit Prakash Shukla 	}
3495a1793ee8SGanapati Kundapura 
3496a1793ee8SGanapati Kundapura 	return -EINVAL;
3497a1793ee8SGanapati Kundapura }
3498a1793ee8SGanapati Kundapura 
34993716f521SNaga Harish K S V static int
35003716f521SNaga Harish K S V rxa_caps_check(struct event_eth_rx_adapter *rxa)
35013716f521SNaga Harish K S V {
35023716f521SNaga Harish K S V 	if (!rxa->nb_queues)
35033716f521SNaga Harish K S V 		return -EINVAL;
35043716f521SNaga Harish K S V 
3505eda6477dSPavan Nikhilesh 	/* Check if there is at least one non-internal ethernet port. */
3506eda6477dSPavan Nikhilesh 	if (rxa->service_inited)
35073716f521SNaga Harish K S V 		return 0;
3508eda6477dSPavan Nikhilesh 
3509eda6477dSPavan Nikhilesh 	return -ENOTSUP;
35103716f521SNaga Harish K S V }
35113716f521SNaga Harish K S V 
35123716f521SNaga Harish K S V int
35133716f521SNaga Harish K S V rte_event_eth_rx_adapter_runtime_params_init(
35143716f521SNaga Harish K S V 		struct rte_event_eth_rx_adapter_runtime_params *params)
35153716f521SNaga Harish K S V {
35163716f521SNaga Harish K S V 	if (params == NULL)
35173716f521SNaga Harish K S V 		return -EINVAL;
35183716f521SNaga Harish K S V 
35193716f521SNaga Harish K S V 	memset(params, 0, sizeof(struct rte_event_eth_rx_adapter_runtime_params));
35203716f521SNaga Harish K S V 	params->max_nb_rx = RXA_NB_RX_WORK_DEFAULT;
35213716f521SNaga Harish K S V 
35223716f521SNaga Harish K S V 	return 0;
35233716f521SNaga Harish K S V }
35243716f521SNaga Harish K S V 
35253716f521SNaga Harish K S V int
35263716f521SNaga Harish K S V rte_event_eth_rx_adapter_runtime_params_set(uint8_t id,
35273716f521SNaga Harish K S V 		struct rte_event_eth_rx_adapter_runtime_params *params)
35283716f521SNaga Harish K S V {
35293716f521SNaga Harish K S V 	struct event_eth_rx_adapter *rxa;
35303716f521SNaga Harish K S V 	int ret;
35313716f521SNaga Harish K S V 
35323716f521SNaga Harish K S V 	if (params == NULL)
35333716f521SNaga Harish K S V 		return -EINVAL;
35343716f521SNaga Harish K S V 
35353716f521SNaga Harish K S V 	if (rxa_memzone_lookup())
35363716f521SNaga Harish K S V 		return -ENOMEM;
35373716f521SNaga Harish K S V 
35383716f521SNaga Harish K S V 	rxa = rxa_id_to_adapter(id);
35393716f521SNaga Harish K S V 	if (rxa == NULL)
35403716f521SNaga Harish K S V 		return -EINVAL;
35413716f521SNaga Harish K S V 
35423716f521SNaga Harish K S V 	ret = rxa_caps_check(rxa);
35433716f521SNaga Harish K S V 	if (ret)
35443716f521SNaga Harish K S V 		return ret;
35453716f521SNaga Harish K S V 
35463716f521SNaga Harish K S V 	rte_spinlock_lock(&rxa->rx_lock);
35473716f521SNaga Harish K S V 	rxa->max_nb_rx = params->max_nb_rx;
35483716f521SNaga Harish K S V 	rte_spinlock_unlock(&rxa->rx_lock);
35493716f521SNaga Harish K S V 
35503716f521SNaga Harish K S V 	return 0;
35513716f521SNaga Harish K S V }
35523716f521SNaga Harish K S V 
35533716f521SNaga Harish K S V int
35543716f521SNaga Harish K S V rte_event_eth_rx_adapter_runtime_params_get(uint8_t id,
35553716f521SNaga Harish K S V 		struct rte_event_eth_rx_adapter_runtime_params *params)
35563716f521SNaga Harish K S V {
35573716f521SNaga Harish K S V 	struct event_eth_rx_adapter *rxa;
35583716f521SNaga Harish K S V 	int ret;
35593716f521SNaga Harish K S V 
35603716f521SNaga Harish K S V 	if (params == NULL)
35613716f521SNaga Harish K S V 		return -EINVAL;
35623716f521SNaga Harish K S V 
35633716f521SNaga Harish K S V 	if (rxa_memzone_lookup())
35643716f521SNaga Harish K S V 		return -ENOMEM;
35653716f521SNaga Harish K S V 
35663716f521SNaga Harish K S V 	rxa = rxa_id_to_adapter(id);
35673716f521SNaga Harish K S V 	if (rxa == NULL)
35683716f521SNaga Harish K S V 		return -EINVAL;
35693716f521SNaga Harish K S V 
35703716f521SNaga Harish K S V 	ret = rxa_caps_check(rxa);
35713716f521SNaga Harish K S V 	if (ret)
35723716f521SNaga Harish K S V 		return ret;
35733716f521SNaga Harish K S V 
35743716f521SNaga Harish K S V 	params->max_nb_rx = rxa->max_nb_rx;
35753716f521SNaga Harish K S V 
35763716f521SNaga Harish K S V 	return 0;
35773716f521SNaga Harish K S V }
35783716f521SNaga Harish K S V 
35793716f521SNaga Harish K S V /* RX-adapter telemetry callbacks */
3580af0785a2SBruce Richardson #define RXA_ADD_DICT(stats, s) rte_tel_data_add_dict_uint(d, #s, stats.s)
3581814d0170SGanapati Kundapura 
3582814d0170SGanapati Kundapura static int
3583814d0170SGanapati Kundapura handle_rxa_stats(const char *cmd __rte_unused,
3584814d0170SGanapati Kundapura 		 const char *params,
3585814d0170SGanapati Kundapura 		 struct rte_tel_data *d)
3586814d0170SGanapati Kundapura {
3587814d0170SGanapati Kundapura 	uint8_t rx_adapter_id;
3588814d0170SGanapati Kundapura 	struct rte_event_eth_rx_adapter_stats rx_adptr_stats;
3589814d0170SGanapati Kundapura 
3590814d0170SGanapati Kundapura 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3591814d0170SGanapati Kundapura 		return -1;
3592814d0170SGanapati Kundapura 
3593814d0170SGanapati Kundapura 	/* Get Rx adapter ID from parameter string */
3594814d0170SGanapati Kundapura 	rx_adapter_id = atoi(params);
3595814d0170SGanapati Kundapura 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3596814d0170SGanapati Kundapura 
3597814d0170SGanapati Kundapura 	/* Get Rx adapter stats */
3598814d0170SGanapati Kundapura 	if (rte_event_eth_rx_adapter_stats_get(rx_adapter_id,
3599814d0170SGanapati Kundapura 					       &rx_adptr_stats)) {
3600ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats");
3601814d0170SGanapati Kundapura 		return -1;
3602814d0170SGanapati Kundapura 	}
3603814d0170SGanapati Kundapura 
3604814d0170SGanapati Kundapura 	rte_tel_data_start_dict(d);
3605af0785a2SBruce Richardson 	rte_tel_data_add_dict_uint(d, "rx_adapter_id", rx_adapter_id);
3606814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_packets);
3607814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_poll_count);
3608814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_dropped);
3609814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_enq_retry);
3610814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_count);
3611814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_size);
3612814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_enq_count);
3613814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_enq_start_ts);
3614814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_enq_block_cycles);
3615814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_enq_end_ts);
3616814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_intr_packets);
3617186ca6d3SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_count);
3618186ca6d3SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_size);
3619814d0170SGanapati Kundapura 
3620814d0170SGanapati Kundapura 	return 0;
3621814d0170SGanapati Kundapura }
3622814d0170SGanapati Kundapura 
3623814d0170SGanapati Kundapura static int
3624814d0170SGanapati Kundapura handle_rxa_stats_reset(const char *cmd __rte_unused,
3625814d0170SGanapati Kundapura 		       const char *params,
3626814d0170SGanapati Kundapura 		       struct rte_tel_data *d __rte_unused)
3627814d0170SGanapati Kundapura {
3628814d0170SGanapati Kundapura 	uint8_t rx_adapter_id;
3629814d0170SGanapati Kundapura 
3630b450a990SDavid Marchand 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3631814d0170SGanapati Kundapura 		return -1;
3632814d0170SGanapati Kundapura 
3633814d0170SGanapati Kundapura 	/* Get Rx adapter ID from parameter string */
3634814d0170SGanapati Kundapura 	rx_adapter_id = atoi(params);
3635814d0170SGanapati Kundapura 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3636814d0170SGanapati Kundapura 
3637814d0170SGanapati Kundapura 	/* Reset Rx adapter stats */
3638814d0170SGanapati Kundapura 	if (rte_event_eth_rx_adapter_stats_reset(rx_adapter_id)) {
3639ae282b06SDavid Marchand 		RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats");
3640814d0170SGanapati Kundapura 		return -1;
3641814d0170SGanapati Kundapura 	}
3642814d0170SGanapati Kundapura 
3643814d0170SGanapati Kundapura 	return 0;
3644814d0170SGanapati Kundapura }
3645814d0170SGanapati Kundapura 
3646814d0170SGanapati Kundapura static int
3647814d0170SGanapati Kundapura handle_rxa_get_queue_conf(const char *cmd __rte_unused,
3648814d0170SGanapati Kundapura 			  const char *params,
3649814d0170SGanapati Kundapura 			  struct rte_tel_data *d)
3650814d0170SGanapati Kundapura {
3651814d0170SGanapati Kundapura 	uint8_t rx_adapter_id;
3652814d0170SGanapati Kundapura 	uint16_t rx_queue_id;
365374b034ffSWeiguo Li 	int eth_dev_id, ret = -1;
3654814d0170SGanapati Kundapura 	char *token, *l_params;
3655814d0170SGanapati Kundapura 	struct rte_event_eth_rx_adapter_queue_conf queue_conf;
3656814d0170SGanapati Kundapura 
3657814d0170SGanapati Kundapura 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3658814d0170SGanapati Kundapura 		return -1;
3659814d0170SGanapati Kundapura 
3660814d0170SGanapati Kundapura 	/* Get Rx adapter ID from parameter string */
3661814d0170SGanapati Kundapura 	l_params = strdup(params);
366274b034ffSWeiguo Li 	if (l_params == NULL)
366374b034ffSWeiguo Li 		return -ENOMEM;
3664814d0170SGanapati Kundapura 	token = strtok(l_params, ",");
366574b034ffSWeiguo Li 	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
3666814d0170SGanapati Kundapura 	rx_adapter_id = strtoul(token, NULL, 10);
366774b034ffSWeiguo Li 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(rx_adapter_id, -EINVAL);
3668814d0170SGanapati Kundapura 
3669814d0170SGanapati Kundapura 	token = strtok(NULL, ",");
367074b034ffSWeiguo Li 	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
3671814d0170SGanapati Kundapura 
3672814d0170SGanapati Kundapura 	/* Get device ID from parameter string */
3673814d0170SGanapati Kundapura 	eth_dev_id = strtoul(token, NULL, 10);
3674c07da8e3SDavid Marchand 	RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
3675814d0170SGanapati Kundapura 
3676814d0170SGanapati Kundapura 	token = strtok(NULL, ",");
367774b034ffSWeiguo Li 	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
3678814d0170SGanapati Kundapura 
3679814d0170SGanapati Kundapura 	/* Get Rx queue ID from parameter string */
3680814d0170SGanapati Kundapura 	rx_queue_id = strtoul(token, NULL, 10);
3681814d0170SGanapati Kundapura 	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3682814d0170SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
368374b034ffSWeiguo Li 		ret = -EINVAL;
368474b034ffSWeiguo Li 		goto error;
3685814d0170SGanapati Kundapura 	}
3686814d0170SGanapati Kundapura 
3687814d0170SGanapati Kundapura 	token = strtok(NULL, "\0");
3688814d0170SGanapati Kundapura 	if (token != NULL)
3689814d0170SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
36907be78d02SJosh Soref 				 " telemetry command, ignoring");
369174b034ffSWeiguo Li 	/* Parsing parameter finished */
369274b034ffSWeiguo Li 	free(l_params);
3693814d0170SGanapati Kundapura 
3694814d0170SGanapati Kundapura 	if (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id,
3695814d0170SGanapati Kundapura 						    rx_queue_id, &queue_conf)) {
3696814d0170SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue config");
3697814d0170SGanapati Kundapura 		return -1;
3698814d0170SGanapati Kundapura 	}
3699814d0170SGanapati Kundapura 
3700814d0170SGanapati Kundapura 	rte_tel_data_start_dict(d);
3701af0785a2SBruce Richardson 	rte_tel_data_add_dict_uint(d, "rx_adapter_id", rx_adapter_id);
3702af0785a2SBruce Richardson 	rte_tel_data_add_dict_uint(d, "eth_dev_id", eth_dev_id);
3703af0785a2SBruce Richardson 	rte_tel_data_add_dict_uint(d, "rx_queue_id", rx_queue_id);
3704814d0170SGanapati Kundapura 	RXA_ADD_DICT(queue_conf, rx_queue_flags);
3705814d0170SGanapati Kundapura 	RXA_ADD_DICT(queue_conf, servicing_weight);
3706814d0170SGanapati Kundapura 	RXA_ADD_DICT(queue_conf.ev, queue_id);
3707814d0170SGanapati Kundapura 	RXA_ADD_DICT(queue_conf.ev, sched_type);
3708814d0170SGanapati Kundapura 	RXA_ADD_DICT(queue_conf.ev, priority);
3709814d0170SGanapati Kundapura 	RXA_ADD_DICT(queue_conf.ev, flow_id);
3710814d0170SGanapati Kundapura 
3711814d0170SGanapati Kundapura 	return 0;
371274b034ffSWeiguo Li 
371374b034ffSWeiguo Li error:
371474b034ffSWeiguo Li 	free(l_params);
371574b034ffSWeiguo Li 	return ret;
3716814d0170SGanapati Kundapura }
3717814d0170SGanapati Kundapura 
37189e583185SNaga Harish K S V static int
37199e583185SNaga Harish K S V handle_rxa_get_queue_stats(const char *cmd __rte_unused,
37209e583185SNaga Harish K S V 			   const char *params,
37219e583185SNaga Harish K S V 			   struct rte_tel_data *d)
37229e583185SNaga Harish K S V {
37239e583185SNaga Harish K S V 	uint8_t rx_adapter_id;
37249e583185SNaga Harish K S V 	uint16_t rx_queue_id;
372574b034ffSWeiguo Li 	int eth_dev_id, ret = -1;
37269e583185SNaga Harish K S V 	char *token, *l_params;
37279e583185SNaga Harish K S V 	struct rte_event_eth_rx_adapter_queue_stats q_stats;
37289e583185SNaga Harish K S V 
37299e583185SNaga Harish K S V 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
37309e583185SNaga Harish K S V 		return -1;
37319e583185SNaga Harish K S V 
37329e583185SNaga Harish K S V 	/* Get Rx adapter ID from parameter string */
37339e583185SNaga Harish K S V 	l_params = strdup(params);
373474b034ffSWeiguo Li 	if (l_params == NULL)
373574b034ffSWeiguo Li 		return -ENOMEM;
37369e583185SNaga Harish K S V 	token = strtok(l_params, ",");
373774b034ffSWeiguo Li 	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
37389e583185SNaga Harish K S V 	rx_adapter_id = strtoul(token, NULL, 10);
373974b034ffSWeiguo Li 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(rx_adapter_id, -EINVAL);
37409e583185SNaga Harish K S V 
37419e583185SNaga Harish K S V 	token = strtok(NULL, ",");
374274b034ffSWeiguo Li 	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
37439e583185SNaga Harish K S V 
37449e583185SNaga Harish K S V 	/* Get device ID from parameter string */
37459e583185SNaga Harish K S V 	eth_dev_id = strtoul(token, NULL, 10);
3746c07da8e3SDavid Marchand 	RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
37479e583185SNaga Harish K S V 
37489e583185SNaga Harish K S V 	token = strtok(NULL, ",");
374974b034ffSWeiguo Li 	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
37509e583185SNaga Harish K S V 
37519e583185SNaga Harish K S V 	/* Get Rx queue ID from parameter string */
37529e583185SNaga Harish K S V 	rx_queue_id = strtoul(token, NULL, 10);
37539e583185SNaga Harish K S V 	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
37549e583185SNaga Harish K S V 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
375574b034ffSWeiguo Li 		ret = -EINVAL;
375674b034ffSWeiguo Li 		goto error;
37579e583185SNaga Harish K S V 	}
37589e583185SNaga Harish K S V 
37599e583185SNaga Harish K S V 	token = strtok(NULL, "\0");
37609e583185SNaga Harish K S V 	if (token != NULL)
37619e583185SNaga Harish K S V 		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
37627be78d02SJosh Soref 				 " telemetry command, ignoring");
376374b034ffSWeiguo Li 	/* Parsing parameter finished */
376474b034ffSWeiguo Li 	free(l_params);
37659e583185SNaga Harish K S V 
37669e583185SNaga Harish K S V 	if (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id,
37679e583185SNaga Harish K S V 						    rx_queue_id, &q_stats)) {
37689e583185SNaga Harish K S V 		RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue stats");
37699e583185SNaga Harish K S V 		return -1;
37709e583185SNaga Harish K S V 	}
37719e583185SNaga Harish K S V 
37729e583185SNaga Harish K S V 	rte_tel_data_start_dict(d);
3773af0785a2SBruce Richardson 	rte_tel_data_add_dict_uint(d, "rx_adapter_id", rx_adapter_id);
3774af0785a2SBruce Richardson 	rte_tel_data_add_dict_uint(d, "eth_dev_id", eth_dev_id);
3775af0785a2SBruce Richardson 	rte_tel_data_add_dict_uint(d, "rx_queue_id", rx_queue_id);
37769e583185SNaga Harish K S V 	RXA_ADD_DICT(q_stats, rx_event_buf_count);
37779e583185SNaga Harish K S V 	RXA_ADD_DICT(q_stats, rx_event_buf_size);
37789e583185SNaga Harish K S V 	RXA_ADD_DICT(q_stats, rx_poll_count);
37799e583185SNaga Harish K S V 	RXA_ADD_DICT(q_stats, rx_packets);
37809e583185SNaga Harish K S V 	RXA_ADD_DICT(q_stats, rx_dropped);
37819e583185SNaga Harish K S V 
37829e583185SNaga Harish K S V 	return 0;
378374b034ffSWeiguo Li 
378474b034ffSWeiguo Li error:
378574b034ffSWeiguo Li 	free(l_params);
378674b034ffSWeiguo Li 	return ret;
37879e583185SNaga Harish K S V }
37889e583185SNaga Harish K S V 
37899e583185SNaga Harish K S V static int
37909e583185SNaga Harish K S V handle_rxa_queue_stats_reset(const char *cmd __rte_unused,
37919e583185SNaga Harish K S V 			     const char *params,
37929e583185SNaga Harish K S V 			     struct rte_tel_data *d __rte_unused)
37939e583185SNaga Harish K S V {
37949e583185SNaga Harish K S V 	uint8_t rx_adapter_id;
37959e583185SNaga Harish K S V 	uint16_t rx_queue_id;
379674b034ffSWeiguo Li 	int eth_dev_id, ret = -1;
37979e583185SNaga Harish K S V 	char *token, *l_params;
37989e583185SNaga Harish K S V 
37999e583185SNaga Harish K S V 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
38009e583185SNaga Harish K S V 		return -1;
38019e583185SNaga Harish K S V 
38029e583185SNaga Harish K S V 	/* Get Rx adapter ID from parameter string */
38039e583185SNaga Harish K S V 	l_params = strdup(params);
380474b034ffSWeiguo Li 	if (l_params == NULL)
380574b034ffSWeiguo Li 		return -ENOMEM;
38069e583185SNaga Harish K S V 	token = strtok(l_params, ",");
380774b034ffSWeiguo Li 	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
38089e583185SNaga Harish K S V 	rx_adapter_id = strtoul(token, NULL, 10);
380974b034ffSWeiguo Li 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(rx_adapter_id, -EINVAL);
38109e583185SNaga Harish K S V 
38119e583185SNaga Harish K S V 	token = strtok(NULL, ",");
381274b034ffSWeiguo Li 	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
38139e583185SNaga Harish K S V 
38149e583185SNaga Harish K S V 	/* Get device ID from parameter string */
38159e583185SNaga Harish K S V 	eth_dev_id = strtoul(token, NULL, 10);
3816c07da8e3SDavid Marchand 	RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
38179e583185SNaga Harish K S V 
38189e583185SNaga Harish K S V 	token = strtok(NULL, ",");
381974b034ffSWeiguo Li 	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
38209e583185SNaga Harish K S V 
38219e583185SNaga Harish K S V 	/* Get Rx queue ID from parameter string */
38229e583185SNaga Harish K S V 	rx_queue_id = strtoul(token, NULL, 10);
38239e583185SNaga Harish K S V 	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
38249e583185SNaga Harish K S V 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
382574b034ffSWeiguo Li 		ret = -EINVAL;
382674b034ffSWeiguo Li 		goto error;
38279e583185SNaga Harish K S V 	}
38289e583185SNaga Harish K S V 
38299e583185SNaga Harish K S V 	token = strtok(NULL, "\0");
38309e583185SNaga Harish K S V 	if (token != NULL)
38319e583185SNaga Harish K S V 		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
38327be78d02SJosh Soref 				 " telemetry command, ignoring");
383374b034ffSWeiguo Li 	/* Parsing parameter finished */
383474b034ffSWeiguo Li 	free(l_params);
38359e583185SNaga Harish K S V 
38369e583185SNaga Harish K S V 	if (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id,
38379e583185SNaga Harish K S V 						       eth_dev_id,
38389e583185SNaga Harish K S V 						       rx_queue_id)) {
38399e583185SNaga Harish K S V 		RTE_EDEV_LOG_ERR("Failed to reset Rx adapter queue stats");
38409e583185SNaga Harish K S V 		return -1;
38419e583185SNaga Harish K S V 	}
38429e583185SNaga Harish K S V 
38439e583185SNaga Harish K S V 	return 0;
384474b034ffSWeiguo Li 
384574b034ffSWeiguo Li error:
384674b034ffSWeiguo Li 	free(l_params);
384774b034ffSWeiguo Li 	return ret;
38489e583185SNaga Harish K S V }
38499e583185SNaga Harish K S V 
3850a1793ee8SGanapati Kundapura static int
3851a1793ee8SGanapati Kundapura handle_rxa_instance_get(const char *cmd __rte_unused,
3852a1793ee8SGanapati Kundapura 			const char *params,
3853a1793ee8SGanapati Kundapura 			struct rte_tel_data *d)
3854a1793ee8SGanapati Kundapura {
3855a1793ee8SGanapati Kundapura 	uint8_t instance_id;
3856a1793ee8SGanapati Kundapura 	uint16_t rx_queue_id;
3857a1793ee8SGanapati Kundapura 	int eth_dev_id, ret = -1;
3858a1793ee8SGanapati Kundapura 	char *token, *l_params;
3859a1793ee8SGanapati Kundapura 
3860a1793ee8SGanapati Kundapura 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3861a1793ee8SGanapati Kundapura 		return -1;
3862a1793ee8SGanapati Kundapura 
3863a1793ee8SGanapati Kundapura 	l_params = strdup(params);
3864a1793ee8SGanapati Kundapura 	if (l_params == NULL)
3865a1793ee8SGanapati Kundapura 		return -ENOMEM;
3866a1793ee8SGanapati Kundapura 	token = strtok(l_params, ",");
3867a1793ee8SGanapati Kundapura 	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
3868a1793ee8SGanapati Kundapura 
3869a1793ee8SGanapati Kundapura 	/* Get device ID from parameter string */
3870a1793ee8SGanapati Kundapura 	eth_dev_id = strtoul(token, NULL, 10);
3871c07da8e3SDavid Marchand 	RTE_EVENT_ETH_RX_ADAPTER_PORTID_VALID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL);
3872a1793ee8SGanapati Kundapura 
3873a1793ee8SGanapati Kundapura 	token = strtok(NULL, ",");
3874a1793ee8SGanapati Kundapura 	RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1);
3875a1793ee8SGanapati Kundapura 
3876a1793ee8SGanapati Kundapura 	/* Get Rx queue ID from parameter string */
3877a1793ee8SGanapati Kundapura 	rx_queue_id = strtoul(token, NULL, 10);
3878a1793ee8SGanapati Kundapura 	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3879a1793ee8SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3880a1793ee8SGanapati Kundapura 		ret = -EINVAL;
3881a1793ee8SGanapati Kundapura 		goto error;
3882a1793ee8SGanapati Kundapura 	}
3883a1793ee8SGanapati Kundapura 
3884a1793ee8SGanapati Kundapura 	token = strtok(NULL, "\0");
3885a1793ee8SGanapati Kundapura 	if (token != NULL)
3886a1793ee8SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
3887a1793ee8SGanapati Kundapura 				 " telemetry command, ignoring");
3888a1793ee8SGanapati Kundapura 
3889a1793ee8SGanapati Kundapura 	/* Parsing parameter finished */
3890a1793ee8SGanapati Kundapura 	free(l_params);
3891a1793ee8SGanapati Kundapura 
3892a1793ee8SGanapati Kundapura 	if (rte_event_eth_rx_adapter_instance_get(eth_dev_id,
3893a1793ee8SGanapati Kundapura 						  rx_queue_id,
3894a1793ee8SGanapati Kundapura 						  &instance_id)) {
3895a1793ee8SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Failed to get RX adapter instance ID "
3896a1793ee8SGanapati Kundapura 				 " for rx_queue_id = %d", rx_queue_id);
3897a1793ee8SGanapati Kundapura 		return -1;
3898a1793ee8SGanapati Kundapura 	}
3899a1793ee8SGanapati Kundapura 
3900a1793ee8SGanapati Kundapura 	rte_tel_data_start_dict(d);
3901af0785a2SBruce Richardson 	rte_tel_data_add_dict_uint(d, "eth_dev_id", eth_dev_id);
3902af0785a2SBruce Richardson 	rte_tel_data_add_dict_uint(d, "rx_queue_id", rx_queue_id);
3903af0785a2SBruce Richardson 	rte_tel_data_add_dict_uint(d, "rxa_instance_id", instance_id);
3904a1793ee8SGanapati Kundapura 
3905a1793ee8SGanapati Kundapura 	return 0;
3906a1793ee8SGanapati Kundapura 
3907a1793ee8SGanapati Kundapura error:
3908a1793ee8SGanapati Kundapura 	free(l_params);
3909a1793ee8SGanapati Kundapura 	return ret;
3910a1793ee8SGanapati Kundapura }
3911a1793ee8SGanapati Kundapura 
3912814d0170SGanapati Kundapura RTE_INIT(rxa_init_telemetry)
3913814d0170SGanapati Kundapura {
3914814d0170SGanapati Kundapura 	rte_telemetry_register_cmd("/eventdev/rxa_stats",
3915814d0170SGanapati Kundapura 		handle_rxa_stats,
3916814d0170SGanapati Kundapura 		"Returns Rx adapter stats. Parameter: rxa_id");
3917814d0170SGanapati Kundapura 
3918814d0170SGanapati Kundapura 	rte_telemetry_register_cmd("/eventdev/rxa_stats_reset",
3919814d0170SGanapati Kundapura 		handle_rxa_stats_reset,
3920814d0170SGanapati Kundapura 		"Reset Rx adapter stats. Parameter: rxa_id");
3921814d0170SGanapati Kundapura 
3922814d0170SGanapati Kundapura 	rte_telemetry_register_cmd("/eventdev/rxa_queue_conf",
3923814d0170SGanapati Kundapura 		handle_rxa_get_queue_conf,
3924814d0170SGanapati Kundapura 		"Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id");
39259e583185SNaga Harish K S V 
39269e583185SNaga Harish K S V 	rte_telemetry_register_cmd("/eventdev/rxa_queue_stats",
39279e583185SNaga Harish K S V 		handle_rxa_get_queue_stats,
39289e583185SNaga Harish K S V 		"Returns Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
39299e583185SNaga Harish K S V 
39309e583185SNaga Harish K S V 	rte_telemetry_register_cmd("/eventdev/rxa_queue_stats_reset",
39319e583185SNaga Harish K S V 		handle_rxa_queue_stats_reset,
39329e583185SNaga Harish K S V 		"Reset Rx queue stats. Parameter: rxa_id, dev_id, queue_id");
3933a1793ee8SGanapati Kundapura 
3934a1793ee8SGanapati Kundapura 	rte_telemetry_register_cmd("/eventdev/rxa_rxq_instance_get",
3935a1793ee8SGanapati Kundapura 		handle_rxa_instance_get,
3936a1793ee8SGanapati Kundapura 		"Returns Rx adapter instance id. Parameter: dev_id, queue_id");
3937814d0170SGanapati Kundapura }
3938