xref: /dpdk/lib/eventdev/rte_event_eth_rx_adapter.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  * Copyright(c) 2017 Intel Corporation.
399a2dd95SBruce Richardson  * All rights reserved.
499a2dd95SBruce Richardson  */
599a2dd95SBruce Richardson #if defined(LINUX)
699a2dd95SBruce Richardson #include <sys/epoll.h>
799a2dd95SBruce Richardson #endif
899a2dd95SBruce Richardson #include <unistd.h>
999a2dd95SBruce Richardson 
1099a2dd95SBruce Richardson #include <rte_cycles.h>
1199a2dd95SBruce Richardson #include <rte_common.h>
1299a2dd95SBruce Richardson #include <rte_dev.h>
1399a2dd95SBruce Richardson #include <rte_errno.h>
14f9bdee26SKonstantin Ananyev #include <ethdev_driver.h>
1599a2dd95SBruce Richardson #include <rte_log.h>
1699a2dd95SBruce Richardson #include <rte_malloc.h>
1799a2dd95SBruce Richardson #include <rte_service_component.h>
1899a2dd95SBruce Richardson #include <rte_thash.h>
1999a2dd95SBruce Richardson #include <rte_interrupts.h>
2083ab470dSGanapati Kundapura #include <rte_mbuf_dyn.h>
21814d0170SGanapati Kundapura #include <rte_telemetry.h>
2299a2dd95SBruce Richardson 
2399a2dd95SBruce Richardson #include "rte_eventdev.h"
2499a2dd95SBruce Richardson #include "eventdev_pmd.h"
25f26f2ca6SPavan Nikhilesh #include "eventdev_trace.h"
2699a2dd95SBruce Richardson #include "rte_event_eth_rx_adapter.h"
2799a2dd95SBruce Richardson 
2899a2dd95SBruce Richardson #define BATCH_SIZE		32
2999a2dd95SBruce Richardson #define BLOCK_CNT_THRESHOLD	10
308113fd15SGanapati Kundapura #define ETH_EVENT_BUFFER_SIZE	(6*BATCH_SIZE)
3199a2dd95SBruce Richardson #define MAX_VECTOR_SIZE		1024
3299a2dd95SBruce Richardson #define MIN_VECTOR_SIZE		4
3399a2dd95SBruce Richardson #define MAX_VECTOR_NS		1E9
3499a2dd95SBruce Richardson #define MIN_VECTOR_NS		1E5
3599a2dd95SBruce Richardson 
3699a2dd95SBruce Richardson #define ETH_RX_ADAPTER_SERVICE_NAME_LEN	32
3799a2dd95SBruce Richardson #define ETH_RX_ADAPTER_MEM_NAME_LEN	32
3899a2dd95SBruce Richardson 
3999a2dd95SBruce Richardson #define RSS_KEY_SIZE	40
4099a2dd95SBruce Richardson /* value written to intr thread pipe to signal thread exit */
4199a2dd95SBruce Richardson #define ETH_BRIDGE_INTR_THREAD_EXIT	1
4299a2dd95SBruce Richardson /* Sentinel value to detect initialized file handle */
4399a2dd95SBruce Richardson #define INIT_FD		-1
4499a2dd95SBruce Richardson 
45da781e64SGanapati Kundapura #define RXA_ADAPTER_ARRAY "rte_event_eth_rx_adapter_array"
46da781e64SGanapati Kundapura 
4799a2dd95SBruce Richardson /*
4899a2dd95SBruce Richardson  * Used to store port and queue ID of interrupting Rx queue
4999a2dd95SBruce Richardson  */
5099a2dd95SBruce Richardson union queue_data {
5199a2dd95SBruce Richardson 	RTE_STD_C11
5299a2dd95SBruce Richardson 	void *ptr;
5399a2dd95SBruce Richardson 	struct {
5499a2dd95SBruce Richardson 		uint16_t port;
5599a2dd95SBruce Richardson 		uint16_t queue;
5699a2dd95SBruce Richardson 	};
5799a2dd95SBruce Richardson };
5899a2dd95SBruce Richardson 
5999a2dd95SBruce Richardson /*
6099a2dd95SBruce Richardson  * There is an instance of this struct per polled Rx queue added to the
6199a2dd95SBruce Richardson  * adapter
6299a2dd95SBruce Richardson  */
6399a2dd95SBruce Richardson struct eth_rx_poll_entry {
6499a2dd95SBruce Richardson 	/* Eth port to poll */
6599a2dd95SBruce Richardson 	uint16_t eth_dev_id;
6699a2dd95SBruce Richardson 	/* Eth rx queue to poll */
6799a2dd95SBruce Richardson 	uint16_t eth_rx_qid;
6899a2dd95SBruce Richardson };
6999a2dd95SBruce Richardson 
7099a2dd95SBruce Richardson struct eth_rx_vector_data {
7199a2dd95SBruce Richardson 	TAILQ_ENTRY(eth_rx_vector_data) next;
7299a2dd95SBruce Richardson 	uint16_t port;
7399a2dd95SBruce Richardson 	uint16_t queue;
7499a2dd95SBruce Richardson 	uint16_t max_vector_count;
7599a2dd95SBruce Richardson 	uint64_t event;
7699a2dd95SBruce Richardson 	uint64_t ts;
7799a2dd95SBruce Richardson 	uint64_t vector_timeout_ticks;
7899a2dd95SBruce Richardson 	struct rte_mempool *vector_pool;
7999a2dd95SBruce Richardson 	struct rte_event_vector *vector_ev;
8099a2dd95SBruce Richardson } __rte_cache_aligned;
8199a2dd95SBruce Richardson 
8299a2dd95SBruce Richardson TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
8399a2dd95SBruce Richardson 
8499a2dd95SBruce Richardson /* Instance per adapter */
85a256a743SPavan Nikhilesh struct eth_event_enqueue_buffer {
8699a2dd95SBruce Richardson 	/* Count of events in this buffer */
8799a2dd95SBruce Richardson 	uint16_t count;
8899a2dd95SBruce Richardson 	/* Array of events in this buffer */
89bc0df25cSNaga Harish K S V 	struct rte_event *events;
90bc0df25cSNaga Harish K S V 	/* size of event buffer */
91bc0df25cSNaga Harish K S V 	uint16_t events_size;
928113fd15SGanapati Kundapura 	/* Event enqueue happens from head */
938113fd15SGanapati Kundapura 	uint16_t head;
948113fd15SGanapati Kundapura 	/* New packets from rte_eth_rx_burst is enqued from tail */
958113fd15SGanapati Kundapura 	uint16_t tail;
968113fd15SGanapati Kundapura 	/* last element in the buffer before rollover */
978113fd15SGanapati Kundapura 	uint16_t last;
988113fd15SGanapati Kundapura 	uint16_t last_mask;
9999a2dd95SBruce Richardson };
10099a2dd95SBruce Richardson 
101a256a743SPavan Nikhilesh struct event_eth_rx_adapter {
10299a2dd95SBruce Richardson 	/* RSS key */
10399a2dd95SBruce Richardson 	uint8_t rss_key_be[RSS_KEY_SIZE];
10499a2dd95SBruce Richardson 	/* Event device identifier */
10599a2dd95SBruce Richardson 	uint8_t eventdev_id;
10699a2dd95SBruce Richardson 	/* Event port identifier */
10799a2dd95SBruce Richardson 	uint8_t event_port_id;
108b06bca69SNaga Harish K S V 	/* Flag indicating per rxq event buffer */
109b06bca69SNaga Harish K S V 	bool use_queue_event_buf;
110b06bca69SNaga Harish K S V 	/* Per ethernet device structure */
111b06bca69SNaga Harish K S V 	struct eth_device_info *eth_devices;
11299a2dd95SBruce Richardson 	/* Lock to serialize config updates with service function */
11399a2dd95SBruce Richardson 	rte_spinlock_t rx_lock;
11499a2dd95SBruce Richardson 	/* Max mbufs processed in any service function invocation */
11599a2dd95SBruce Richardson 	uint32_t max_nb_rx;
11699a2dd95SBruce Richardson 	/* Receive queues that need to be polled */
11799a2dd95SBruce Richardson 	struct eth_rx_poll_entry *eth_rx_poll;
11899a2dd95SBruce Richardson 	/* Size of the eth_rx_poll array */
11999a2dd95SBruce Richardson 	uint16_t num_rx_polled;
12099a2dd95SBruce Richardson 	/* Weighted round robin schedule */
12199a2dd95SBruce Richardson 	uint32_t *wrr_sched;
12299a2dd95SBruce Richardson 	/* wrr_sched[] size */
12399a2dd95SBruce Richardson 	uint32_t wrr_len;
12499a2dd95SBruce Richardson 	/* Next entry in wrr[] to begin polling */
12599a2dd95SBruce Richardson 	uint32_t wrr_pos;
12699a2dd95SBruce Richardson 	/* Event burst buffer */
127a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer event_enqueue_buffer;
12899a2dd95SBruce Richardson 	/* Vector enable flag */
12999a2dd95SBruce Richardson 	uint8_t ena_vector;
13099a2dd95SBruce Richardson 	/* Timestamp of previous vector expiry list traversal */
13199a2dd95SBruce Richardson 	uint64_t prev_expiry_ts;
13299a2dd95SBruce Richardson 	/* Minimum ticks to wait before traversing expiry list */
13399a2dd95SBruce Richardson 	uint64_t vector_tmo_ticks;
13499a2dd95SBruce Richardson 	/* vector list */
13599a2dd95SBruce Richardson 	struct eth_rx_vector_data_list vector_list;
13699a2dd95SBruce Richardson 	/* Per adapter stats */
13799a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_stats stats;
13899a2dd95SBruce Richardson 	/* Block count, counts up to BLOCK_CNT_THRESHOLD */
13999a2dd95SBruce Richardson 	uint16_t enq_block_count;
14099a2dd95SBruce Richardson 	/* Block start ts */
14199a2dd95SBruce Richardson 	uint64_t rx_enq_block_start_ts;
14299a2dd95SBruce Richardson 	/* epoll fd used to wait for Rx interrupts */
14399a2dd95SBruce Richardson 	int epd;
14499a2dd95SBruce Richardson 	/* Num of interrupt driven interrupt queues */
14599a2dd95SBruce Richardson 	uint32_t num_rx_intr;
14699a2dd95SBruce Richardson 	/* Used to send <dev id, queue id> of interrupting Rx queues from
14799a2dd95SBruce Richardson 	 * the interrupt thread to the Rx thread
14899a2dd95SBruce Richardson 	 */
14999a2dd95SBruce Richardson 	struct rte_ring *intr_ring;
15099a2dd95SBruce Richardson 	/* Rx Queue data (dev id, queue id) for the last non-empty
15199a2dd95SBruce Richardson 	 * queue polled
15299a2dd95SBruce Richardson 	 */
15399a2dd95SBruce Richardson 	union queue_data qd;
15499a2dd95SBruce Richardson 	/* queue_data is valid */
15599a2dd95SBruce Richardson 	int qd_valid;
15699a2dd95SBruce Richardson 	/* Interrupt ring lock, synchronizes Rx thread
15799a2dd95SBruce Richardson 	 * and interrupt thread
15899a2dd95SBruce Richardson 	 */
15999a2dd95SBruce Richardson 	rte_spinlock_t intr_ring_lock;
16099a2dd95SBruce Richardson 	/* event array passed to rte_poll_wait */
16199a2dd95SBruce Richardson 	struct rte_epoll_event *epoll_events;
16299a2dd95SBruce Richardson 	/* Count of interrupt vectors in use */
16399a2dd95SBruce Richardson 	uint32_t num_intr_vec;
16499a2dd95SBruce Richardson 	/* Thread blocked on Rx interrupts */
16599a2dd95SBruce Richardson 	pthread_t rx_intr_thread;
16699a2dd95SBruce Richardson 	/* Configuration callback for rte_service configuration */
16799a2dd95SBruce Richardson 	rte_event_eth_rx_adapter_conf_cb conf_cb;
16899a2dd95SBruce Richardson 	/* Configuration callback argument */
16999a2dd95SBruce Richardson 	void *conf_arg;
17099a2dd95SBruce Richardson 	/* Set if  default_cb is being used */
17199a2dd95SBruce Richardson 	int default_cb_arg;
17299a2dd95SBruce Richardson 	/* Service initialization state */
17399a2dd95SBruce Richardson 	uint8_t service_inited;
17499a2dd95SBruce Richardson 	/* Total count of Rx queues in adapter */
17599a2dd95SBruce Richardson 	uint32_t nb_queues;
17699a2dd95SBruce Richardson 	/* Memory allocation name */
17799a2dd95SBruce Richardson 	char mem_name[ETH_RX_ADAPTER_MEM_NAME_LEN];
17899a2dd95SBruce Richardson 	/* Socket identifier cached from eventdev */
17999a2dd95SBruce Richardson 	int socket_id;
18099a2dd95SBruce Richardson 	/* Per adapter EAL service */
18199a2dd95SBruce Richardson 	uint32_t service_id;
18299a2dd95SBruce Richardson 	/* Adapter started flag */
18399a2dd95SBruce Richardson 	uint8_t rxa_started;
18499a2dd95SBruce Richardson 	/* Adapter ID */
18599a2dd95SBruce Richardson 	uint8_t id;
18699a2dd95SBruce Richardson } __rte_cache_aligned;
18799a2dd95SBruce Richardson 
18899a2dd95SBruce Richardson /* Per eth device */
18999a2dd95SBruce Richardson struct eth_device_info {
19099a2dd95SBruce Richardson 	struct rte_eth_dev *dev;
19199a2dd95SBruce Richardson 	struct eth_rx_queue_info *rx_queue;
19299a2dd95SBruce Richardson 	/* Rx callback */
19399a2dd95SBruce Richardson 	rte_event_eth_rx_adapter_cb_fn cb_fn;
19499a2dd95SBruce Richardson 	/* Rx callback argument */
19599a2dd95SBruce Richardson 	void *cb_arg;
19699a2dd95SBruce Richardson 	/* Set if ethdev->eventdev packet transfer uses a
19799a2dd95SBruce Richardson 	 * hardware mechanism
19899a2dd95SBruce Richardson 	 */
19999a2dd95SBruce Richardson 	uint8_t internal_event_port;
20099a2dd95SBruce Richardson 	/* Set if the adapter is processing rx queues for
20199a2dd95SBruce Richardson 	 * this eth device and packet processing has been
20299a2dd95SBruce Richardson 	 * started, allows for the code to know if the PMD
20399a2dd95SBruce Richardson 	 * rx_adapter_stop callback needs to be invoked
20499a2dd95SBruce Richardson 	 */
20599a2dd95SBruce Richardson 	uint8_t dev_rx_started;
20699a2dd95SBruce Richardson 	/* Number of queues added for this device */
20799a2dd95SBruce Richardson 	uint16_t nb_dev_queues;
20899a2dd95SBruce Richardson 	/* Number of poll based queues
20999a2dd95SBruce Richardson 	 * If nb_rx_poll > 0, the start callback will
21099a2dd95SBruce Richardson 	 * be invoked if not already invoked
21199a2dd95SBruce Richardson 	 */
21299a2dd95SBruce Richardson 	uint16_t nb_rx_poll;
21399a2dd95SBruce Richardson 	/* Number of interrupt based queues
21499a2dd95SBruce Richardson 	 * If nb_rx_intr > 0, the start callback will
21599a2dd95SBruce Richardson 	 * be invoked if not already invoked.
21699a2dd95SBruce Richardson 	 */
21799a2dd95SBruce Richardson 	uint16_t nb_rx_intr;
21899a2dd95SBruce Richardson 	/* Number of queues that use the shared interrupt */
21999a2dd95SBruce Richardson 	uint16_t nb_shared_intr;
22099a2dd95SBruce Richardson 	/* sum(wrr(q)) for all queues within the device
22199a2dd95SBruce Richardson 	 * useful when deleting all device queues
22299a2dd95SBruce Richardson 	 */
22399a2dd95SBruce Richardson 	uint32_t wrr_len;
22499a2dd95SBruce Richardson 	/* Intr based queue index to start polling from, this is used
22599a2dd95SBruce Richardson 	 * if the number of shared interrupts is non-zero
22699a2dd95SBruce Richardson 	 */
22799a2dd95SBruce Richardson 	uint16_t next_q_idx;
22899a2dd95SBruce Richardson 	/* Intr based queue indices */
22999a2dd95SBruce Richardson 	uint16_t *intr_queue;
23099a2dd95SBruce Richardson 	/* device generates per Rx queue interrupt for queue index
23199a2dd95SBruce Richardson 	 * for queue indices < RTE_MAX_RXTX_INTR_VEC_ID - 1
23299a2dd95SBruce Richardson 	 */
23399a2dd95SBruce Richardson 	int multi_intr_cap;
23499a2dd95SBruce Richardson 	/* shared interrupt enabled */
23599a2dd95SBruce Richardson 	int shared_intr_enabled;
23699a2dd95SBruce Richardson };
23799a2dd95SBruce Richardson 
23899a2dd95SBruce Richardson /* Per Rx queue */
23999a2dd95SBruce Richardson struct eth_rx_queue_info {
24099a2dd95SBruce Richardson 	int queue_enabled;	/* True if added */
24199a2dd95SBruce Richardson 	int intr_enabled;
24299a2dd95SBruce Richardson 	uint8_t ena_vector;
24399a2dd95SBruce Richardson 	uint16_t wt;		/* Polling weight */
24499a2dd95SBruce Richardson 	uint32_t flow_id_mask;	/* Set to ~0 if app provides flow id else 0 */
24599a2dd95SBruce Richardson 	uint64_t event;
24699a2dd95SBruce Richardson 	struct eth_rx_vector_data vector_data;
247a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer *event_buf;
24899a2dd95SBruce Richardson };
24999a2dd95SBruce Richardson 
250a256a743SPavan Nikhilesh static struct event_eth_rx_adapter **event_eth_rx_adapter;
25199a2dd95SBruce Richardson 
25283ab470dSGanapati Kundapura /* Enable dynamic timestamp field in mbuf */
25383ab470dSGanapati Kundapura static uint64_t event_eth_rx_timestamp_dynflag;
25483ab470dSGanapati Kundapura static int event_eth_rx_timestamp_dynfield_offset = -1;
25583ab470dSGanapati Kundapura 
25683ab470dSGanapati Kundapura static inline rte_mbuf_timestamp_t *
25783ab470dSGanapati Kundapura rxa_timestamp_dynfield(struct rte_mbuf *mbuf)
25883ab470dSGanapati Kundapura {
25983ab470dSGanapati Kundapura 	return RTE_MBUF_DYNFIELD(mbuf,
26083ab470dSGanapati Kundapura 		event_eth_rx_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
26183ab470dSGanapati Kundapura }
26283ab470dSGanapati Kundapura 
26399a2dd95SBruce Richardson static inline int
26499a2dd95SBruce Richardson rxa_validate_id(uint8_t id)
26599a2dd95SBruce Richardson {
26699a2dd95SBruce Richardson 	return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
26799a2dd95SBruce Richardson }
26899a2dd95SBruce Richardson 
269a256a743SPavan Nikhilesh static inline struct eth_event_enqueue_buffer *
270a256a743SPavan Nikhilesh rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
271a256a743SPavan Nikhilesh 		  uint16_t rx_queue_id)
272b06bca69SNaga Harish K S V {
273b06bca69SNaga Harish K S V 	if (rx_adapter->use_queue_event_buf) {
274b06bca69SNaga Harish K S V 		struct eth_device_info *dev_info =
275b06bca69SNaga Harish K S V 			&rx_adapter->eth_devices[eth_dev_id];
276b06bca69SNaga Harish K S V 		return dev_info->rx_queue[rx_queue_id].event_buf;
277b06bca69SNaga Harish K S V 	} else
278b06bca69SNaga Harish K S V 		return &rx_adapter->event_enqueue_buffer;
279b06bca69SNaga Harish K S V }
280b06bca69SNaga Harish K S V 
28199a2dd95SBruce Richardson #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
28299a2dd95SBruce Richardson 	if (!rxa_validate_id(id)) { \
28399a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
28499a2dd95SBruce Richardson 		return retval; \
28599a2dd95SBruce Richardson 	} \
28699a2dd95SBruce Richardson } while (0)
28799a2dd95SBruce Richardson 
28899a2dd95SBruce Richardson static inline int
289a256a743SPavan Nikhilesh rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter)
29099a2dd95SBruce Richardson {
29199a2dd95SBruce Richardson 	return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
29299a2dd95SBruce Richardson }
29399a2dd95SBruce Richardson 
29499a2dd95SBruce Richardson /* Greatest common divisor */
29599a2dd95SBruce Richardson static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
29699a2dd95SBruce Richardson {
29799a2dd95SBruce Richardson 	uint16_t r = a % b;
29899a2dd95SBruce Richardson 
29999a2dd95SBruce Richardson 	return r ? rxa_gcd_u16(b, r) : b;
30099a2dd95SBruce Richardson }
30199a2dd95SBruce Richardson 
30299a2dd95SBruce Richardson /* Returns the next queue in the polling sequence
30399a2dd95SBruce Richardson  *
30499a2dd95SBruce Richardson  * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
30599a2dd95SBruce Richardson  */
30699a2dd95SBruce Richardson static int
307a256a743SPavan Nikhilesh rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw,
30899a2dd95SBruce Richardson 	     struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
30999a2dd95SBruce Richardson 	     uint16_t gcd, int prev)
31099a2dd95SBruce Richardson {
31199a2dd95SBruce Richardson 	int i = prev;
31299a2dd95SBruce Richardson 	uint16_t w;
31399a2dd95SBruce Richardson 
31499a2dd95SBruce Richardson 	while (1) {
31599a2dd95SBruce Richardson 		uint16_t q;
31699a2dd95SBruce Richardson 		uint16_t d;
31799a2dd95SBruce Richardson 
31899a2dd95SBruce Richardson 		i = (i + 1) % n;
31999a2dd95SBruce Richardson 		if (i == 0) {
32099a2dd95SBruce Richardson 			*cw = *cw - gcd;
32199a2dd95SBruce Richardson 			if (*cw <= 0)
32299a2dd95SBruce Richardson 				*cw = max_wt;
32399a2dd95SBruce Richardson 		}
32499a2dd95SBruce Richardson 
32599a2dd95SBruce Richardson 		q = eth_rx_poll[i].eth_rx_qid;
32699a2dd95SBruce Richardson 		d = eth_rx_poll[i].eth_dev_id;
32799a2dd95SBruce Richardson 		w = rx_adapter->eth_devices[d].rx_queue[q].wt;
32899a2dd95SBruce Richardson 
32999a2dd95SBruce Richardson 		if ((int)w >= *cw)
33099a2dd95SBruce Richardson 			return i;
33199a2dd95SBruce Richardson 	}
33299a2dd95SBruce Richardson }
33399a2dd95SBruce Richardson 
33499a2dd95SBruce Richardson static inline int
33599a2dd95SBruce Richardson rxa_shared_intr(struct eth_device_info *dev_info,
33699a2dd95SBruce Richardson 	int rx_queue_id)
33799a2dd95SBruce Richardson {
33899a2dd95SBruce Richardson 	int multi_intr_cap;
33999a2dd95SBruce Richardson 
34099a2dd95SBruce Richardson 	if (dev_info->dev->intr_handle == NULL)
34199a2dd95SBruce Richardson 		return 0;
34299a2dd95SBruce Richardson 
34399a2dd95SBruce Richardson 	multi_intr_cap = rte_intr_cap_multiple(dev_info->dev->intr_handle);
34499a2dd95SBruce Richardson 	return !multi_intr_cap ||
34599a2dd95SBruce Richardson 		rx_queue_id >= RTE_MAX_RXTX_INTR_VEC_ID - 1;
34699a2dd95SBruce Richardson }
34799a2dd95SBruce Richardson 
34899a2dd95SBruce Richardson static inline int
34999a2dd95SBruce Richardson rxa_intr_queue(struct eth_device_info *dev_info,
35099a2dd95SBruce Richardson 	int rx_queue_id)
35199a2dd95SBruce Richardson {
35299a2dd95SBruce Richardson 	struct eth_rx_queue_info *queue_info;
35399a2dd95SBruce Richardson 
35499a2dd95SBruce Richardson 	queue_info = &dev_info->rx_queue[rx_queue_id];
35599a2dd95SBruce Richardson 	return dev_info->rx_queue &&
35699a2dd95SBruce Richardson 		!dev_info->internal_event_port &&
35799a2dd95SBruce Richardson 		queue_info->queue_enabled && queue_info->wt == 0;
35899a2dd95SBruce Richardson }
35999a2dd95SBruce Richardson 
36099a2dd95SBruce Richardson static inline int
36199a2dd95SBruce Richardson rxa_polled_queue(struct eth_device_info *dev_info,
36299a2dd95SBruce Richardson 	int rx_queue_id)
36399a2dd95SBruce Richardson {
36499a2dd95SBruce Richardson 	struct eth_rx_queue_info *queue_info;
36599a2dd95SBruce Richardson 
36699a2dd95SBruce Richardson 	queue_info = &dev_info->rx_queue[rx_queue_id];
36799a2dd95SBruce Richardson 	return !dev_info->internal_event_port &&
36899a2dd95SBruce Richardson 		dev_info->rx_queue &&
36999a2dd95SBruce Richardson 		queue_info->queue_enabled && queue_info->wt != 0;
37099a2dd95SBruce Richardson }
37199a2dd95SBruce Richardson 
37299a2dd95SBruce Richardson /* Calculate change in number of vectors after Rx queue ID is add/deleted */
37399a2dd95SBruce Richardson static int
37499a2dd95SBruce Richardson rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
37599a2dd95SBruce Richardson {
37699a2dd95SBruce Richardson 	uint16_t i;
37799a2dd95SBruce Richardson 	int n, s;
37899a2dd95SBruce Richardson 	uint16_t nbq;
37999a2dd95SBruce Richardson 
38099a2dd95SBruce Richardson 	nbq = dev_info->dev->data->nb_rx_queues;
38199a2dd95SBruce Richardson 	n = 0; /* non shared count */
38299a2dd95SBruce Richardson 	s = 0; /* shared count */
38399a2dd95SBruce Richardson 
38499a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
38599a2dd95SBruce Richardson 		for (i = 0; i < nbq; i++) {
38699a2dd95SBruce Richardson 			if (!rxa_shared_intr(dev_info, i))
38799a2dd95SBruce Richardson 				n += add ? !rxa_intr_queue(dev_info, i) :
38899a2dd95SBruce Richardson 					rxa_intr_queue(dev_info, i);
38999a2dd95SBruce Richardson 			else
39099a2dd95SBruce Richardson 				s += add ? !rxa_intr_queue(dev_info, i) :
39199a2dd95SBruce Richardson 					rxa_intr_queue(dev_info, i);
39299a2dd95SBruce Richardson 		}
39399a2dd95SBruce Richardson 
39499a2dd95SBruce Richardson 		if (s > 0) {
39599a2dd95SBruce Richardson 			if ((add && dev_info->nb_shared_intr == 0) ||
39699a2dd95SBruce Richardson 				(!add && dev_info->nb_shared_intr))
39799a2dd95SBruce Richardson 				n += 1;
39899a2dd95SBruce Richardson 		}
39999a2dd95SBruce Richardson 	} else {
40099a2dd95SBruce Richardson 		if (!rxa_shared_intr(dev_info, rx_queue_id))
40199a2dd95SBruce Richardson 			n = add ? !rxa_intr_queue(dev_info, rx_queue_id) :
40299a2dd95SBruce Richardson 				rxa_intr_queue(dev_info, rx_queue_id);
40399a2dd95SBruce Richardson 		else
40499a2dd95SBruce Richardson 			n = add ? !dev_info->nb_shared_intr :
40599a2dd95SBruce Richardson 				dev_info->nb_shared_intr == 1;
40699a2dd95SBruce Richardson 	}
40799a2dd95SBruce Richardson 
40899a2dd95SBruce Richardson 	return add ? n : -n;
40999a2dd95SBruce Richardson }
41099a2dd95SBruce Richardson 
41199a2dd95SBruce Richardson /* Calculate nb_rx_intr after deleting interrupt mode rx queues
41299a2dd95SBruce Richardson  */
41399a2dd95SBruce Richardson static void
414a256a743SPavan Nikhilesh rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter,
415a256a743SPavan Nikhilesh 			  struct eth_device_info *dev_info, int rx_queue_id,
41699a2dd95SBruce Richardson 			  uint32_t *nb_rx_intr)
41799a2dd95SBruce Richardson {
41899a2dd95SBruce Richardson 	uint32_t intr_diff;
41999a2dd95SBruce Richardson 
42099a2dd95SBruce Richardson 	if (rx_queue_id == -1)
42199a2dd95SBruce Richardson 		intr_diff = dev_info->nb_rx_intr;
42299a2dd95SBruce Richardson 	else
42399a2dd95SBruce Richardson 		intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
42499a2dd95SBruce Richardson 
42599a2dd95SBruce Richardson 	*nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
42699a2dd95SBruce Richardson }
42799a2dd95SBruce Richardson 
42899a2dd95SBruce Richardson /* Calculate nb_rx_* after adding interrupt mode rx queues, newly added
42999a2dd95SBruce Richardson  * interrupt queues could currently be poll mode Rx queues
43099a2dd95SBruce Richardson  */
43199a2dd95SBruce Richardson static void
432a256a743SPavan Nikhilesh rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter,
433a256a743SPavan Nikhilesh 			  struct eth_device_info *dev_info, int rx_queue_id,
434a256a743SPavan Nikhilesh 			  uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
43599a2dd95SBruce Richardson 			  uint32_t *nb_wrr)
43699a2dd95SBruce Richardson {
43799a2dd95SBruce Richardson 	uint32_t intr_diff;
43899a2dd95SBruce Richardson 	uint32_t poll_diff;
43999a2dd95SBruce Richardson 	uint32_t wrr_len_diff;
44099a2dd95SBruce Richardson 
44199a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
44299a2dd95SBruce Richardson 		intr_diff = dev_info->dev->data->nb_rx_queues -
44399a2dd95SBruce Richardson 						dev_info->nb_rx_intr;
44499a2dd95SBruce Richardson 		poll_diff = dev_info->nb_rx_poll;
44599a2dd95SBruce Richardson 		wrr_len_diff = dev_info->wrr_len;
44699a2dd95SBruce Richardson 	} else {
44799a2dd95SBruce Richardson 		intr_diff = !rxa_intr_queue(dev_info, rx_queue_id);
44899a2dd95SBruce Richardson 		poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
44999a2dd95SBruce Richardson 		wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
45099a2dd95SBruce Richardson 					0;
45199a2dd95SBruce Richardson 	}
45299a2dd95SBruce Richardson 
45399a2dd95SBruce Richardson 	*nb_rx_intr = rx_adapter->num_rx_intr + intr_diff;
45499a2dd95SBruce Richardson 	*nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
45599a2dd95SBruce Richardson 	*nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
45699a2dd95SBruce Richardson }
45799a2dd95SBruce Richardson 
45899a2dd95SBruce Richardson /* Calculate size of the eth_rx_poll and wrr_sched arrays
45999a2dd95SBruce Richardson  * after deleting poll mode rx queues
46099a2dd95SBruce Richardson  */
46199a2dd95SBruce Richardson static void
462a256a743SPavan Nikhilesh rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter,
463a256a743SPavan Nikhilesh 			  struct eth_device_info *dev_info, int rx_queue_id,
464a256a743SPavan Nikhilesh 			  uint32_t *nb_rx_poll, uint32_t *nb_wrr)
46599a2dd95SBruce Richardson {
46699a2dd95SBruce Richardson 	uint32_t poll_diff;
46799a2dd95SBruce Richardson 	uint32_t wrr_len_diff;
46899a2dd95SBruce Richardson 
46999a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
47099a2dd95SBruce Richardson 		poll_diff = dev_info->nb_rx_poll;
47199a2dd95SBruce Richardson 		wrr_len_diff = dev_info->wrr_len;
47299a2dd95SBruce Richardson 	} else {
47399a2dd95SBruce Richardson 		poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
47499a2dd95SBruce Richardson 		wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
47599a2dd95SBruce Richardson 					0;
47699a2dd95SBruce Richardson 	}
47799a2dd95SBruce Richardson 
47899a2dd95SBruce Richardson 	*nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
47999a2dd95SBruce Richardson 	*nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
48099a2dd95SBruce Richardson }
48199a2dd95SBruce Richardson 
48299a2dd95SBruce Richardson /* Calculate nb_rx_* after adding poll mode rx queues
48399a2dd95SBruce Richardson  */
48499a2dd95SBruce Richardson static void
485a256a743SPavan Nikhilesh rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter,
486a256a743SPavan Nikhilesh 			  struct eth_device_info *dev_info, int rx_queue_id,
487a256a743SPavan Nikhilesh 			  uint16_t wt, uint32_t *nb_rx_poll,
488a256a743SPavan Nikhilesh 			  uint32_t *nb_rx_intr, uint32_t *nb_wrr)
48999a2dd95SBruce Richardson {
49099a2dd95SBruce Richardson 	uint32_t intr_diff;
49199a2dd95SBruce Richardson 	uint32_t poll_diff;
49299a2dd95SBruce Richardson 	uint32_t wrr_len_diff;
49399a2dd95SBruce Richardson 
49499a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
49599a2dd95SBruce Richardson 		intr_diff = dev_info->nb_rx_intr;
49699a2dd95SBruce Richardson 		poll_diff = dev_info->dev->data->nb_rx_queues -
49799a2dd95SBruce Richardson 						dev_info->nb_rx_poll;
49899a2dd95SBruce Richardson 		wrr_len_diff = wt*dev_info->dev->data->nb_rx_queues
49999a2dd95SBruce Richardson 				- dev_info->wrr_len;
50099a2dd95SBruce Richardson 	} else {
50199a2dd95SBruce Richardson 		intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
50299a2dd95SBruce Richardson 		poll_diff = !rxa_polled_queue(dev_info, rx_queue_id);
50399a2dd95SBruce Richardson 		wrr_len_diff = rxa_polled_queue(dev_info, rx_queue_id) ?
50499a2dd95SBruce Richardson 				wt - dev_info->rx_queue[rx_queue_id].wt :
50599a2dd95SBruce Richardson 				wt;
50699a2dd95SBruce Richardson 	}
50799a2dd95SBruce Richardson 
50899a2dd95SBruce Richardson 	*nb_rx_poll = rx_adapter->num_rx_polled + poll_diff;
50999a2dd95SBruce Richardson 	*nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
51099a2dd95SBruce Richardson 	*nb_wrr = rx_adapter->wrr_len + wrr_len_diff;
51199a2dd95SBruce Richardson }
51299a2dd95SBruce Richardson 
51399a2dd95SBruce Richardson /* Calculate nb_rx_* after adding rx_queue_id */
51499a2dd95SBruce Richardson static void
515a256a743SPavan Nikhilesh rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter,
516a256a743SPavan Nikhilesh 		     struct eth_device_info *dev_info, int rx_queue_id,
517a256a743SPavan Nikhilesh 		     uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
51899a2dd95SBruce Richardson 		     uint32_t *nb_wrr)
51999a2dd95SBruce Richardson {
52099a2dd95SBruce Richardson 	if (wt != 0)
52199a2dd95SBruce Richardson 		rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
52299a2dd95SBruce Richardson 					wt, nb_rx_poll, nb_rx_intr, nb_wrr);
52399a2dd95SBruce Richardson 	else
52499a2dd95SBruce Richardson 		rxa_calc_nb_post_add_intr(rx_adapter, dev_info, rx_queue_id,
52599a2dd95SBruce Richardson 					nb_rx_poll, nb_rx_intr, nb_wrr);
52699a2dd95SBruce Richardson }
52799a2dd95SBruce Richardson 
52899a2dd95SBruce Richardson /* Calculate nb_rx_* after deleting rx_queue_id */
52999a2dd95SBruce Richardson static void
530a256a743SPavan Nikhilesh rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter,
531a256a743SPavan Nikhilesh 		     struct eth_device_info *dev_info, int rx_queue_id,
532a256a743SPavan Nikhilesh 		     uint32_t *nb_rx_poll, uint32_t *nb_rx_intr,
53399a2dd95SBruce Richardson 		     uint32_t *nb_wrr)
53499a2dd95SBruce Richardson {
53599a2dd95SBruce Richardson 	rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
53699a2dd95SBruce Richardson 				nb_wrr);
53799a2dd95SBruce Richardson 	rxa_calc_nb_post_intr_del(rx_adapter, dev_info, rx_queue_id,
53899a2dd95SBruce Richardson 				nb_rx_intr);
53999a2dd95SBruce Richardson }
54099a2dd95SBruce Richardson 
54199a2dd95SBruce Richardson /*
54299a2dd95SBruce Richardson  * Allocate the rx_poll array
54399a2dd95SBruce Richardson  */
54499a2dd95SBruce Richardson static struct eth_rx_poll_entry *
545a256a743SPavan Nikhilesh rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled)
54699a2dd95SBruce Richardson {
54799a2dd95SBruce Richardson 	size_t len;
54899a2dd95SBruce Richardson 
54999a2dd95SBruce Richardson 	len  = RTE_ALIGN(num_rx_polled * sizeof(*rx_adapter->eth_rx_poll),
55099a2dd95SBruce Richardson 							RTE_CACHE_LINE_SIZE);
55199a2dd95SBruce Richardson 	return  rte_zmalloc_socket(rx_adapter->mem_name,
55299a2dd95SBruce Richardson 				len,
55399a2dd95SBruce Richardson 				RTE_CACHE_LINE_SIZE,
55499a2dd95SBruce Richardson 				rx_adapter->socket_id);
55599a2dd95SBruce Richardson }
55699a2dd95SBruce Richardson 
55799a2dd95SBruce Richardson /*
55899a2dd95SBruce Richardson  * Allocate the WRR array
55999a2dd95SBruce Richardson  */
56099a2dd95SBruce Richardson static uint32_t *
561a256a743SPavan Nikhilesh rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr)
56299a2dd95SBruce Richardson {
56399a2dd95SBruce Richardson 	size_t len;
56499a2dd95SBruce Richardson 
56599a2dd95SBruce Richardson 	len = RTE_ALIGN(nb_wrr * sizeof(*rx_adapter->wrr_sched),
56699a2dd95SBruce Richardson 			RTE_CACHE_LINE_SIZE);
56799a2dd95SBruce Richardson 	return  rte_zmalloc_socket(rx_adapter->mem_name,
56899a2dd95SBruce Richardson 				len,
56999a2dd95SBruce Richardson 				RTE_CACHE_LINE_SIZE,
57099a2dd95SBruce Richardson 				rx_adapter->socket_id);
57199a2dd95SBruce Richardson }
57299a2dd95SBruce Richardson 
57399a2dd95SBruce Richardson static int
574a256a743SPavan Nikhilesh rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll,
575a256a743SPavan Nikhilesh 		      uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll,
57699a2dd95SBruce Richardson 		      uint32_t **wrr_sched)
57799a2dd95SBruce Richardson {
57899a2dd95SBruce Richardson 
57999a2dd95SBruce Richardson 	if (nb_poll == 0) {
58099a2dd95SBruce Richardson 		*rx_poll = NULL;
58199a2dd95SBruce Richardson 		*wrr_sched = NULL;
58299a2dd95SBruce Richardson 		return 0;
58399a2dd95SBruce Richardson 	}
58499a2dd95SBruce Richardson 
58599a2dd95SBruce Richardson 	*rx_poll = rxa_alloc_poll(rx_adapter, nb_poll);
58699a2dd95SBruce Richardson 	if (*rx_poll == NULL) {
58799a2dd95SBruce Richardson 		*wrr_sched = NULL;
58899a2dd95SBruce Richardson 		return -ENOMEM;
58999a2dd95SBruce Richardson 	}
59099a2dd95SBruce Richardson 
59199a2dd95SBruce Richardson 	*wrr_sched = rxa_alloc_wrr(rx_adapter, nb_wrr);
59299a2dd95SBruce Richardson 	if (*wrr_sched == NULL) {
59399a2dd95SBruce Richardson 		rte_free(*rx_poll);
59499a2dd95SBruce Richardson 		return -ENOMEM;
59599a2dd95SBruce Richardson 	}
59699a2dd95SBruce Richardson 	return 0;
59799a2dd95SBruce Richardson }
59899a2dd95SBruce Richardson 
59999a2dd95SBruce Richardson /* Precalculate WRR polling sequence for all queues in rx_adapter */
60099a2dd95SBruce Richardson static void
601a256a743SPavan Nikhilesh rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter,
602a256a743SPavan Nikhilesh 		      struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr)
60399a2dd95SBruce Richardson {
60499a2dd95SBruce Richardson 	uint16_t d;
60599a2dd95SBruce Richardson 	uint16_t q;
60699a2dd95SBruce Richardson 	unsigned int i;
60799a2dd95SBruce Richardson 	int prev = -1;
60899a2dd95SBruce Richardson 	int cw = -1;
60999a2dd95SBruce Richardson 
61099a2dd95SBruce Richardson 	/* Initialize variables for calculation of wrr schedule */
61199a2dd95SBruce Richardson 	uint16_t max_wrr_pos = 0;
61299a2dd95SBruce Richardson 	unsigned int poll_q = 0;
61399a2dd95SBruce Richardson 	uint16_t max_wt = 0;
61499a2dd95SBruce Richardson 	uint16_t gcd = 0;
61599a2dd95SBruce Richardson 
61699a2dd95SBruce Richardson 	if (rx_poll == NULL)
61799a2dd95SBruce Richardson 		return;
61899a2dd95SBruce Richardson 
61999a2dd95SBruce Richardson 	/* Generate array of all queues to poll, the size of this
62099a2dd95SBruce Richardson 	 * array is poll_q
62199a2dd95SBruce Richardson 	 */
62299a2dd95SBruce Richardson 	RTE_ETH_FOREACH_DEV(d) {
62399a2dd95SBruce Richardson 		uint16_t nb_rx_queues;
62499a2dd95SBruce Richardson 		struct eth_device_info *dev_info =
62599a2dd95SBruce Richardson 				&rx_adapter->eth_devices[d];
62699a2dd95SBruce Richardson 		nb_rx_queues = dev_info->dev->data->nb_rx_queues;
62799a2dd95SBruce Richardson 		if (dev_info->rx_queue == NULL)
62899a2dd95SBruce Richardson 			continue;
62999a2dd95SBruce Richardson 		if (dev_info->internal_event_port)
63099a2dd95SBruce Richardson 			continue;
63199a2dd95SBruce Richardson 		dev_info->wrr_len = 0;
63299a2dd95SBruce Richardson 		for (q = 0; q < nb_rx_queues; q++) {
63399a2dd95SBruce Richardson 			struct eth_rx_queue_info *queue_info =
63499a2dd95SBruce Richardson 				&dev_info->rx_queue[q];
63599a2dd95SBruce Richardson 			uint16_t wt;
63699a2dd95SBruce Richardson 
63799a2dd95SBruce Richardson 			if (!rxa_polled_queue(dev_info, q))
63899a2dd95SBruce Richardson 				continue;
63999a2dd95SBruce Richardson 			wt = queue_info->wt;
64099a2dd95SBruce Richardson 			rx_poll[poll_q].eth_dev_id = d;
64199a2dd95SBruce Richardson 			rx_poll[poll_q].eth_rx_qid = q;
64299a2dd95SBruce Richardson 			max_wrr_pos += wt;
64399a2dd95SBruce Richardson 			dev_info->wrr_len += wt;
64499a2dd95SBruce Richardson 			max_wt = RTE_MAX(max_wt, wt);
64599a2dd95SBruce Richardson 			gcd = (gcd) ? rxa_gcd_u16(gcd, wt) : wt;
64699a2dd95SBruce Richardson 			poll_q++;
64799a2dd95SBruce Richardson 		}
64899a2dd95SBruce Richardson 	}
64999a2dd95SBruce Richardson 
65099a2dd95SBruce Richardson 	/* Generate polling sequence based on weights */
65199a2dd95SBruce Richardson 	prev = -1;
65299a2dd95SBruce Richardson 	cw = -1;
65399a2dd95SBruce Richardson 	for (i = 0; i < max_wrr_pos; i++) {
65499a2dd95SBruce Richardson 		rx_wrr[i] = rxa_wrr_next(rx_adapter, poll_q, &cw,
65599a2dd95SBruce Richardson 				     rx_poll, max_wt, gcd, prev);
65699a2dd95SBruce Richardson 		prev = rx_wrr[i];
65799a2dd95SBruce Richardson 	}
65899a2dd95SBruce Richardson }
65999a2dd95SBruce Richardson 
66099a2dd95SBruce Richardson static inline void
66199a2dd95SBruce Richardson rxa_mtoip(struct rte_mbuf *m, struct rte_ipv4_hdr **ipv4_hdr,
66299a2dd95SBruce Richardson 	struct rte_ipv6_hdr **ipv6_hdr)
66399a2dd95SBruce Richardson {
66499a2dd95SBruce Richardson 	struct rte_ether_hdr *eth_hdr =
66599a2dd95SBruce Richardson 		rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
66699a2dd95SBruce Richardson 	struct rte_vlan_hdr *vlan_hdr;
66799a2dd95SBruce Richardson 
66899a2dd95SBruce Richardson 	*ipv4_hdr = NULL;
66999a2dd95SBruce Richardson 	*ipv6_hdr = NULL;
67099a2dd95SBruce Richardson 
67199a2dd95SBruce Richardson 	switch (eth_hdr->ether_type) {
67299a2dd95SBruce Richardson 	case RTE_BE16(RTE_ETHER_TYPE_IPV4):
67399a2dd95SBruce Richardson 		*ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
67499a2dd95SBruce Richardson 		break;
67599a2dd95SBruce Richardson 
67699a2dd95SBruce Richardson 	case RTE_BE16(RTE_ETHER_TYPE_IPV6):
67799a2dd95SBruce Richardson 		*ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
67899a2dd95SBruce Richardson 		break;
67999a2dd95SBruce Richardson 
68099a2dd95SBruce Richardson 	case RTE_BE16(RTE_ETHER_TYPE_VLAN):
68199a2dd95SBruce Richardson 		vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
68299a2dd95SBruce Richardson 		switch (vlan_hdr->eth_proto) {
68399a2dd95SBruce Richardson 		case RTE_BE16(RTE_ETHER_TYPE_IPV4):
68499a2dd95SBruce Richardson 			*ipv4_hdr = (struct rte_ipv4_hdr *)(vlan_hdr + 1);
68599a2dd95SBruce Richardson 			break;
68699a2dd95SBruce Richardson 		case RTE_BE16(RTE_ETHER_TYPE_IPV6):
68799a2dd95SBruce Richardson 			*ipv6_hdr = (struct rte_ipv6_hdr *)(vlan_hdr + 1);
68899a2dd95SBruce Richardson 			break;
68999a2dd95SBruce Richardson 		default:
69099a2dd95SBruce Richardson 			break;
69199a2dd95SBruce Richardson 		}
69299a2dd95SBruce Richardson 		break;
69399a2dd95SBruce Richardson 
69499a2dd95SBruce Richardson 	default:
69599a2dd95SBruce Richardson 		break;
69699a2dd95SBruce Richardson 	}
69799a2dd95SBruce Richardson }
69899a2dd95SBruce Richardson 
69999a2dd95SBruce Richardson /* Calculate RSS hash for IPv4/6 */
70099a2dd95SBruce Richardson static inline uint32_t
70199a2dd95SBruce Richardson rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
70299a2dd95SBruce Richardson {
70399a2dd95SBruce Richardson 	uint32_t input_len;
70499a2dd95SBruce Richardson 	void *tuple;
70599a2dd95SBruce Richardson 	struct rte_ipv4_tuple ipv4_tuple;
70699a2dd95SBruce Richardson 	struct rte_ipv6_tuple ipv6_tuple;
70799a2dd95SBruce Richardson 	struct rte_ipv4_hdr *ipv4_hdr;
70899a2dd95SBruce Richardson 	struct rte_ipv6_hdr *ipv6_hdr;
70999a2dd95SBruce Richardson 
71099a2dd95SBruce Richardson 	rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr);
71199a2dd95SBruce Richardson 
71299a2dd95SBruce Richardson 	if (ipv4_hdr) {
71399a2dd95SBruce Richardson 		ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
71499a2dd95SBruce Richardson 		ipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
71599a2dd95SBruce Richardson 		tuple = &ipv4_tuple;
71699a2dd95SBruce Richardson 		input_len = RTE_THASH_V4_L3_LEN;
71799a2dd95SBruce Richardson 	} else if (ipv6_hdr) {
71899a2dd95SBruce Richardson 		rte_thash_load_v6_addrs(ipv6_hdr,
71999a2dd95SBruce Richardson 					(union rte_thash_tuple *)&ipv6_tuple);
72099a2dd95SBruce Richardson 		tuple = &ipv6_tuple;
72199a2dd95SBruce Richardson 		input_len = RTE_THASH_V6_L3_LEN;
72299a2dd95SBruce Richardson 	} else
72399a2dd95SBruce Richardson 		return 0;
72499a2dd95SBruce Richardson 
72599a2dd95SBruce Richardson 	return rte_softrss_be(tuple, input_len, rss_key_be);
72699a2dd95SBruce Richardson }
72799a2dd95SBruce Richardson 
72899a2dd95SBruce Richardson static inline int
729a256a743SPavan Nikhilesh rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter)
73099a2dd95SBruce Richardson {
73199a2dd95SBruce Richardson 	return !!rx_adapter->enq_block_count;
73299a2dd95SBruce Richardson }
73399a2dd95SBruce Richardson 
73499a2dd95SBruce Richardson static inline void
735a256a743SPavan Nikhilesh rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter)
73699a2dd95SBruce Richardson {
73799a2dd95SBruce Richardson 	if (rx_adapter->rx_enq_block_start_ts)
73899a2dd95SBruce Richardson 		return;
73999a2dd95SBruce Richardson 
74099a2dd95SBruce Richardson 	rx_adapter->enq_block_count++;
74199a2dd95SBruce Richardson 	if (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD)
74299a2dd95SBruce Richardson 		return;
74399a2dd95SBruce Richardson 
74499a2dd95SBruce Richardson 	rx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles();
74599a2dd95SBruce Richardson }
74699a2dd95SBruce Richardson 
74799a2dd95SBruce Richardson static inline void
748a256a743SPavan Nikhilesh rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter,
74999a2dd95SBruce Richardson 		     struct rte_event_eth_rx_adapter_stats *stats)
75099a2dd95SBruce Richardson {
75199a2dd95SBruce Richardson 	if (unlikely(!stats->rx_enq_start_ts))
75299a2dd95SBruce Richardson 		stats->rx_enq_start_ts = rte_get_tsc_cycles();
75399a2dd95SBruce Richardson 
75499a2dd95SBruce Richardson 	if (likely(!rxa_enq_blocked(rx_adapter)))
75599a2dd95SBruce Richardson 		return;
75699a2dd95SBruce Richardson 
75799a2dd95SBruce Richardson 	rx_adapter->enq_block_count = 0;
75899a2dd95SBruce Richardson 	if (rx_adapter->rx_enq_block_start_ts) {
75999a2dd95SBruce Richardson 		stats->rx_enq_end_ts = rte_get_tsc_cycles();
76099a2dd95SBruce Richardson 		stats->rx_enq_block_cycles += stats->rx_enq_end_ts -
76199a2dd95SBruce Richardson 		    rx_adapter->rx_enq_block_start_ts;
76299a2dd95SBruce Richardson 		rx_adapter->rx_enq_block_start_ts = 0;
76399a2dd95SBruce Richardson 	}
76499a2dd95SBruce Richardson }
76599a2dd95SBruce Richardson 
76699a2dd95SBruce Richardson /* Enqueue buffered events to event device */
76799a2dd95SBruce Richardson static inline uint16_t
768a256a743SPavan Nikhilesh rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter,
769a256a743SPavan Nikhilesh 		       struct eth_event_enqueue_buffer *buf)
77099a2dd95SBruce Richardson {
77199a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
7728113fd15SGanapati Kundapura 	uint16_t count = buf->last ? buf->last - buf->head : buf->count;
77399a2dd95SBruce Richardson 
7748113fd15SGanapati Kundapura 	if (!count)
77599a2dd95SBruce Richardson 		return 0;
77699a2dd95SBruce Richardson 
77799a2dd95SBruce Richardson 	uint16_t n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
77899a2dd95SBruce Richardson 					rx_adapter->event_port_id,
7798113fd15SGanapati Kundapura 					&buf->events[buf->head],
7808113fd15SGanapati Kundapura 					count);
7818113fd15SGanapati Kundapura 	if (n != count)
78299a2dd95SBruce Richardson 		stats->rx_enq_retry++;
7838113fd15SGanapati Kundapura 
7848113fd15SGanapati Kundapura 	buf->head += n;
7858113fd15SGanapati Kundapura 
7868113fd15SGanapati Kundapura 	if (buf->last && n == count) {
7878113fd15SGanapati Kundapura 		uint16_t n1;
7888113fd15SGanapati Kundapura 
7898113fd15SGanapati Kundapura 		n1 = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
7908113fd15SGanapati Kundapura 					rx_adapter->event_port_id,
7918113fd15SGanapati Kundapura 					&buf->events[0],
7928113fd15SGanapati Kundapura 					buf->tail);
7938113fd15SGanapati Kundapura 
7948113fd15SGanapati Kundapura 		if (n1 != buf->tail)
7958113fd15SGanapati Kundapura 			stats->rx_enq_retry++;
7968113fd15SGanapati Kundapura 
7978113fd15SGanapati Kundapura 		buf->last = 0;
7988113fd15SGanapati Kundapura 		buf->head = n1;
7998113fd15SGanapati Kundapura 		buf->last_mask = 0;
8008113fd15SGanapati Kundapura 		n += n1;
80199a2dd95SBruce Richardson 	}
80299a2dd95SBruce Richardson 
80399a2dd95SBruce Richardson 	n ? rxa_enq_block_end_ts(rx_adapter, stats) :
80499a2dd95SBruce Richardson 		rxa_enq_block_start_ts(rx_adapter);
80599a2dd95SBruce Richardson 
80699a2dd95SBruce Richardson 	buf->count -= n;
80799a2dd95SBruce Richardson 	stats->rx_enq_count += n;
80899a2dd95SBruce Richardson 
80999a2dd95SBruce Richardson 	return n;
81099a2dd95SBruce Richardson }
81199a2dd95SBruce Richardson 
81299a2dd95SBruce Richardson static inline void
813a256a743SPavan Nikhilesh rxa_init_vector(struct event_eth_rx_adapter *rx_adapter,
81499a2dd95SBruce Richardson 		struct eth_rx_vector_data *vec)
81599a2dd95SBruce Richardson {
81699a2dd95SBruce Richardson 	vec->vector_ev->nb_elem = 0;
81799a2dd95SBruce Richardson 	vec->vector_ev->port = vec->port;
81899a2dd95SBruce Richardson 	vec->vector_ev->queue = vec->queue;
81999a2dd95SBruce Richardson 	vec->vector_ev->attr_valid = true;
82099a2dd95SBruce Richardson 	TAILQ_INSERT_TAIL(&rx_adapter->vector_list, vec, next);
82199a2dd95SBruce Richardson }
82299a2dd95SBruce Richardson 
82399a2dd95SBruce Richardson static inline uint16_t
824a256a743SPavan Nikhilesh rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter,
82599a2dd95SBruce Richardson 			struct eth_rx_queue_info *queue_info,
826a256a743SPavan Nikhilesh 			struct eth_event_enqueue_buffer *buf,
82799a2dd95SBruce Richardson 			struct rte_mbuf **mbufs, uint16_t num)
82899a2dd95SBruce Richardson {
82999a2dd95SBruce Richardson 	struct rte_event *ev = &buf->events[buf->count];
83099a2dd95SBruce Richardson 	struct eth_rx_vector_data *vec;
83199a2dd95SBruce Richardson 	uint16_t filled, space, sz;
83299a2dd95SBruce Richardson 
83399a2dd95SBruce Richardson 	filled = 0;
83499a2dd95SBruce Richardson 	vec = &queue_info->vector_data;
83599a2dd95SBruce Richardson 
83699a2dd95SBruce Richardson 	if (vec->vector_ev == NULL) {
83799a2dd95SBruce Richardson 		if (rte_mempool_get(vec->vector_pool,
83899a2dd95SBruce Richardson 				    (void **)&vec->vector_ev) < 0) {
83999a2dd95SBruce Richardson 			rte_pktmbuf_free_bulk(mbufs, num);
84099a2dd95SBruce Richardson 			return 0;
84199a2dd95SBruce Richardson 		}
84299a2dd95SBruce Richardson 		rxa_init_vector(rx_adapter, vec);
84399a2dd95SBruce Richardson 	}
84499a2dd95SBruce Richardson 	while (num) {
84599a2dd95SBruce Richardson 		if (vec->vector_ev->nb_elem == vec->max_vector_count) {
84699a2dd95SBruce Richardson 			/* Event ready. */
84799a2dd95SBruce Richardson 			ev->event = vec->event;
84899a2dd95SBruce Richardson 			ev->vec = vec->vector_ev;
84999a2dd95SBruce Richardson 			ev++;
85099a2dd95SBruce Richardson 			filled++;
85199a2dd95SBruce Richardson 			vec->vector_ev = NULL;
85299a2dd95SBruce Richardson 			TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
85399a2dd95SBruce Richardson 			if (rte_mempool_get(vec->vector_pool,
85499a2dd95SBruce Richardson 					    (void **)&vec->vector_ev) < 0) {
85599a2dd95SBruce Richardson 				rte_pktmbuf_free_bulk(mbufs, num);
85699a2dd95SBruce Richardson 				return 0;
85799a2dd95SBruce Richardson 			}
85899a2dd95SBruce Richardson 			rxa_init_vector(rx_adapter, vec);
85999a2dd95SBruce Richardson 		}
86099a2dd95SBruce Richardson 
86199a2dd95SBruce Richardson 		space = vec->max_vector_count - vec->vector_ev->nb_elem;
86299a2dd95SBruce Richardson 		sz = num > space ? space : num;
86399a2dd95SBruce Richardson 		memcpy(vec->vector_ev->mbufs + vec->vector_ev->nb_elem, mbufs,
86499a2dd95SBruce Richardson 		       sizeof(void *) * sz);
86599a2dd95SBruce Richardson 		vec->vector_ev->nb_elem += sz;
86699a2dd95SBruce Richardson 		num -= sz;
86799a2dd95SBruce Richardson 		mbufs += sz;
86899a2dd95SBruce Richardson 		vec->ts = rte_rdtsc();
86999a2dd95SBruce Richardson 	}
87099a2dd95SBruce Richardson 
87199a2dd95SBruce Richardson 	if (vec->vector_ev->nb_elem == vec->max_vector_count) {
87299a2dd95SBruce Richardson 		ev->event = vec->event;
87399a2dd95SBruce Richardson 		ev->vec = vec->vector_ev;
87499a2dd95SBruce Richardson 		ev++;
87599a2dd95SBruce Richardson 		filled++;
87699a2dd95SBruce Richardson 		vec->vector_ev = NULL;
87799a2dd95SBruce Richardson 		TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
87899a2dd95SBruce Richardson 	}
87999a2dd95SBruce Richardson 
88099a2dd95SBruce Richardson 	return filled;
88199a2dd95SBruce Richardson }
88299a2dd95SBruce Richardson 
88399a2dd95SBruce Richardson static inline void
884a256a743SPavan Nikhilesh rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
885a256a743SPavan Nikhilesh 		 uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num,
886a256a743SPavan Nikhilesh 		 struct eth_event_enqueue_buffer *buf)
88799a2dd95SBruce Richardson {
88899a2dd95SBruce Richardson 	uint32_t i;
88999a2dd95SBruce Richardson 	struct eth_device_info *dev_info =
89099a2dd95SBruce Richardson 					&rx_adapter->eth_devices[eth_dev_id];
89199a2dd95SBruce Richardson 	struct eth_rx_queue_info *eth_rx_queue_info =
89299a2dd95SBruce Richardson 					&dev_info->rx_queue[rx_queue_id];
8938113fd15SGanapati Kundapura 	uint16_t new_tail = buf->tail;
89499a2dd95SBruce Richardson 	uint64_t event = eth_rx_queue_info->event;
89599a2dd95SBruce Richardson 	uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
89699a2dd95SBruce Richardson 	struct rte_mbuf *m = mbufs[0];
89799a2dd95SBruce Richardson 	uint32_t rss_mask;
89899a2dd95SBruce Richardson 	uint32_t rss;
89999a2dd95SBruce Richardson 	int do_rss;
90099a2dd95SBruce Richardson 	uint16_t nb_cb;
90199a2dd95SBruce Richardson 	uint16_t dropped;
90283ab470dSGanapati Kundapura 	uint64_t ts, ts_mask;
90399a2dd95SBruce Richardson 
90499a2dd95SBruce Richardson 	if (!eth_rx_queue_info->ena_vector) {
90583ab470dSGanapati Kundapura 		ts = m->ol_flags & event_eth_rx_timestamp_dynflag ?
90683ab470dSGanapati Kundapura 						0 : rte_get_tsc_cycles();
90783ab470dSGanapati Kundapura 
908*daa02b5cSOlivier Matz 		/* 0xffff ffff ffff ffff if RTE_MBUF_F_RX_TIMESTAMP is set,
90983ab470dSGanapati Kundapura 		 * otherwise 0
91083ab470dSGanapati Kundapura 		 */
91183ab470dSGanapati Kundapura 		ts_mask = (uint64_t)(!(m->ol_flags &
91283ab470dSGanapati Kundapura 				       event_eth_rx_timestamp_dynflag)) - 1ULL;
91383ab470dSGanapati Kundapura 
914*daa02b5cSOlivier Matz 		/* 0xffff ffff if RTE_MBUF_F_RX_RSS_HASH is set, otherwise 0 */
915*daa02b5cSOlivier Matz 		rss_mask = ~(((m->ol_flags & RTE_MBUF_F_RX_RSS_HASH) != 0) - 1);
91699a2dd95SBruce Richardson 		do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
91799a2dd95SBruce Richardson 		for (i = 0; i < num; i++) {
9188113fd15SGanapati Kundapura 			struct rte_event *ev;
9198113fd15SGanapati Kundapura 
92099a2dd95SBruce Richardson 			m = mbufs[i];
92183ab470dSGanapati Kundapura 			*rxa_timestamp_dynfield(m) = ts |
92283ab470dSGanapati Kundapura 					(*rxa_timestamp_dynfield(m) & ts_mask);
92383ab470dSGanapati Kundapura 
9248113fd15SGanapati Kundapura 			ev = &buf->events[new_tail];
92599a2dd95SBruce Richardson 
92699a2dd95SBruce Richardson 			rss = do_rss ? rxa_do_softrss(m, rx_adapter->rss_key_be)
92799a2dd95SBruce Richardson 				     : m->hash.rss;
92899a2dd95SBruce Richardson 			ev->event = event;
92999a2dd95SBruce Richardson 			ev->flow_id = (rss & ~flow_id_mask) |
93099a2dd95SBruce Richardson 				      (ev->flow_id & flow_id_mask);
93199a2dd95SBruce Richardson 			ev->mbuf = m;
9328113fd15SGanapati Kundapura 			new_tail++;
93399a2dd95SBruce Richardson 		}
93499a2dd95SBruce Richardson 	} else {
93599a2dd95SBruce Richardson 		num = rxa_create_event_vector(rx_adapter, eth_rx_queue_info,
93699a2dd95SBruce Richardson 					      buf, mbufs, num);
93799a2dd95SBruce Richardson 	}
93899a2dd95SBruce Richardson 
93999a2dd95SBruce Richardson 	if (num && dev_info->cb_fn) {
94099a2dd95SBruce Richardson 
94199a2dd95SBruce Richardson 		dropped = 0;
94299a2dd95SBruce Richardson 		nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id,
9438113fd15SGanapati Kundapura 				       buf->last |
944bc0df25cSNaga Harish K S V 				       (buf->events_size & ~buf->last_mask),
9458113fd15SGanapati Kundapura 				       buf->count >= BATCH_SIZE ?
9468113fd15SGanapati Kundapura 						buf->count - BATCH_SIZE : 0,
9478113fd15SGanapati Kundapura 				       &buf->events[buf->tail],
9488113fd15SGanapati Kundapura 				       num,
9498113fd15SGanapati Kundapura 				       dev_info->cb_arg,
9508113fd15SGanapati Kundapura 				       &dropped);
95199a2dd95SBruce Richardson 		if (unlikely(nb_cb > num))
95299a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("Rx CB returned %d (> %d) events",
95399a2dd95SBruce Richardson 				nb_cb, num);
95499a2dd95SBruce Richardson 		else
95599a2dd95SBruce Richardson 			num = nb_cb;
95699a2dd95SBruce Richardson 		if (dropped)
95799a2dd95SBruce Richardson 			rx_adapter->stats.rx_dropped += dropped;
95899a2dd95SBruce Richardson 	}
95999a2dd95SBruce Richardson 
96099a2dd95SBruce Richardson 	buf->count += num;
9618113fd15SGanapati Kundapura 	buf->tail += num;
9628113fd15SGanapati Kundapura }
9638113fd15SGanapati Kundapura 
9648113fd15SGanapati Kundapura static inline bool
965a256a743SPavan Nikhilesh rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf)
9668113fd15SGanapati Kundapura {
9678113fd15SGanapati Kundapura 	uint32_t nb_req = buf->tail + BATCH_SIZE;
9688113fd15SGanapati Kundapura 
9698113fd15SGanapati Kundapura 	if (!buf->last) {
970bc0df25cSNaga Harish K S V 		if (nb_req <= buf->events_size)
9718113fd15SGanapati Kundapura 			return true;
9728113fd15SGanapati Kundapura 
9738113fd15SGanapati Kundapura 		if (buf->head >= BATCH_SIZE) {
9748113fd15SGanapati Kundapura 			buf->last_mask = ~0;
9758113fd15SGanapati Kundapura 			buf->last = buf->tail;
9768113fd15SGanapati Kundapura 			buf->tail = 0;
9778113fd15SGanapati Kundapura 			return true;
9788113fd15SGanapati Kundapura 		}
9798113fd15SGanapati Kundapura 	}
9808113fd15SGanapati Kundapura 
9818113fd15SGanapati Kundapura 	return nb_req <= buf->head;
98299a2dd95SBruce Richardson }
98399a2dd95SBruce Richardson 
98499a2dd95SBruce Richardson /* Enqueue packets from  <port, q>  to event buffer */
98599a2dd95SBruce Richardson static inline uint32_t
986a256a743SPavan Nikhilesh rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id,
987a256a743SPavan Nikhilesh 	   uint16_t queue_id, uint32_t rx_count, uint32_t max_rx,
988a256a743SPavan Nikhilesh 	   int *rxq_empty, struct eth_event_enqueue_buffer *buf)
98999a2dd95SBruce Richardson {
99099a2dd95SBruce Richardson 	struct rte_mbuf *mbufs[BATCH_SIZE];
99199a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_stats *stats =
99299a2dd95SBruce Richardson 					&rx_adapter->stats;
99399a2dd95SBruce Richardson 	uint16_t n;
99499a2dd95SBruce Richardson 	uint32_t nb_rx = 0;
99599a2dd95SBruce Richardson 
99699a2dd95SBruce Richardson 	if (rxq_empty)
99799a2dd95SBruce Richardson 		*rxq_empty = 0;
99899a2dd95SBruce Richardson 	/* Don't do a batch dequeue from the rx queue if there isn't
99999a2dd95SBruce Richardson 	 * enough space in the enqueue buffer.
100099a2dd95SBruce Richardson 	 */
10018113fd15SGanapati Kundapura 	while (rxa_pkt_buf_available(buf)) {
100299a2dd95SBruce Richardson 		if (buf->count >= BATCH_SIZE)
1003b06bca69SNaga Harish K S V 			rxa_flush_event_buffer(rx_adapter, buf);
100499a2dd95SBruce Richardson 
100599a2dd95SBruce Richardson 		stats->rx_poll_count++;
100699a2dd95SBruce Richardson 		n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
100799a2dd95SBruce Richardson 		if (unlikely(!n)) {
100899a2dd95SBruce Richardson 			if (rxq_empty)
100999a2dd95SBruce Richardson 				*rxq_empty = 1;
101099a2dd95SBruce Richardson 			break;
101199a2dd95SBruce Richardson 		}
1012b06bca69SNaga Harish K S V 		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf);
101399a2dd95SBruce Richardson 		nb_rx += n;
101499a2dd95SBruce Richardson 		if (rx_count + nb_rx > max_rx)
101599a2dd95SBruce Richardson 			break;
101699a2dd95SBruce Richardson 	}
101799a2dd95SBruce Richardson 
101899a2dd95SBruce Richardson 	if (buf->count > 0)
1019b06bca69SNaga Harish K S V 		rxa_flush_event_buffer(rx_adapter, buf);
102099a2dd95SBruce Richardson 
102199a2dd95SBruce Richardson 	return nb_rx;
102299a2dd95SBruce Richardson }
102399a2dd95SBruce Richardson 
102499a2dd95SBruce Richardson static inline void
1025a256a743SPavan Nikhilesh rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data)
102699a2dd95SBruce Richardson {
102799a2dd95SBruce Richardson 	uint16_t port_id;
102899a2dd95SBruce Richardson 	uint16_t queue;
102999a2dd95SBruce Richardson 	int err;
103099a2dd95SBruce Richardson 	union queue_data qd;
103199a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
103299a2dd95SBruce Richardson 	struct eth_rx_queue_info *queue_info;
103399a2dd95SBruce Richardson 	int *intr_enabled;
103499a2dd95SBruce Richardson 
103599a2dd95SBruce Richardson 	qd.ptr = data;
103699a2dd95SBruce Richardson 	port_id = qd.port;
103799a2dd95SBruce Richardson 	queue = qd.queue;
103899a2dd95SBruce Richardson 
103999a2dd95SBruce Richardson 	dev_info = &rx_adapter->eth_devices[port_id];
104099a2dd95SBruce Richardson 	queue_info = &dev_info->rx_queue[queue];
104199a2dd95SBruce Richardson 	rte_spinlock_lock(&rx_adapter->intr_ring_lock);
104299a2dd95SBruce Richardson 	if (rxa_shared_intr(dev_info, queue))
104399a2dd95SBruce Richardson 		intr_enabled = &dev_info->shared_intr_enabled;
104499a2dd95SBruce Richardson 	else
104599a2dd95SBruce Richardson 		intr_enabled = &queue_info->intr_enabled;
104699a2dd95SBruce Richardson 
104799a2dd95SBruce Richardson 	if (*intr_enabled) {
104899a2dd95SBruce Richardson 		*intr_enabled = 0;
104999a2dd95SBruce Richardson 		err = rte_ring_enqueue(rx_adapter->intr_ring, data);
105099a2dd95SBruce Richardson 		/* Entry should always be available.
105199a2dd95SBruce Richardson 		 * The ring size equals the maximum number of interrupt
105299a2dd95SBruce Richardson 		 * vectors supported (an interrupt vector is shared in
105399a2dd95SBruce Richardson 		 * case of shared interrupts)
105499a2dd95SBruce Richardson 		 */
105599a2dd95SBruce Richardson 		if (err)
105699a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("Failed to enqueue interrupt"
105799a2dd95SBruce Richardson 				" to ring: %s", strerror(-err));
105899a2dd95SBruce Richardson 		else
105999a2dd95SBruce Richardson 			rte_eth_dev_rx_intr_disable(port_id, queue);
106099a2dd95SBruce Richardson 	}
106199a2dd95SBruce Richardson 	rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
106299a2dd95SBruce Richardson }
106399a2dd95SBruce Richardson 
106499a2dd95SBruce Richardson static int
1065a256a743SPavan Nikhilesh rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter,
106699a2dd95SBruce Richardson 			  uint32_t num_intr_vec)
106799a2dd95SBruce Richardson {
106899a2dd95SBruce Richardson 	if (rx_adapter->num_intr_vec + num_intr_vec >
106999a2dd95SBruce Richardson 				RTE_EVENT_ETH_INTR_RING_SIZE) {
107099a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Exceeded intr ring slots current"
107199a2dd95SBruce Richardson 		" %d needed %d limit %d", rx_adapter->num_intr_vec,
107299a2dd95SBruce Richardson 		num_intr_vec, RTE_EVENT_ETH_INTR_RING_SIZE);
107399a2dd95SBruce Richardson 		return -ENOSPC;
107499a2dd95SBruce Richardson 	}
107599a2dd95SBruce Richardson 
107699a2dd95SBruce Richardson 	return 0;
107799a2dd95SBruce Richardson }
107899a2dd95SBruce Richardson 
107999a2dd95SBruce Richardson /* Delete entries for (dev, queue) from the interrupt ring */
108099a2dd95SBruce Richardson static void
1081a256a743SPavan Nikhilesh rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter,
108299a2dd95SBruce Richardson 			  struct eth_device_info *dev_info,
108399a2dd95SBruce Richardson 			  uint16_t rx_queue_id)
108499a2dd95SBruce Richardson {
108599a2dd95SBruce Richardson 	int i, n;
108699a2dd95SBruce Richardson 	union queue_data qd;
108799a2dd95SBruce Richardson 
108899a2dd95SBruce Richardson 	rte_spinlock_lock(&rx_adapter->intr_ring_lock);
108999a2dd95SBruce Richardson 
109099a2dd95SBruce Richardson 	n = rte_ring_count(rx_adapter->intr_ring);
109199a2dd95SBruce Richardson 	for (i = 0; i < n; i++) {
109299a2dd95SBruce Richardson 		rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
109399a2dd95SBruce Richardson 		if (!rxa_shared_intr(dev_info, rx_queue_id)) {
109499a2dd95SBruce Richardson 			if (qd.port == dev_info->dev->data->port_id &&
109599a2dd95SBruce Richardson 				qd.queue == rx_queue_id)
109699a2dd95SBruce Richardson 				continue;
109799a2dd95SBruce Richardson 		} else {
109899a2dd95SBruce Richardson 			if (qd.port == dev_info->dev->data->port_id)
109999a2dd95SBruce Richardson 				continue;
110099a2dd95SBruce Richardson 		}
110199a2dd95SBruce Richardson 		rte_ring_enqueue(rx_adapter->intr_ring, qd.ptr);
110299a2dd95SBruce Richardson 	}
110399a2dd95SBruce Richardson 
110499a2dd95SBruce Richardson 	rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
110599a2dd95SBruce Richardson }
110699a2dd95SBruce Richardson 
110799a2dd95SBruce Richardson /* pthread callback handling interrupt mode receive queues
110899a2dd95SBruce Richardson  * After receiving an Rx interrupt, it enqueues the port id and queue id of the
110999a2dd95SBruce Richardson  * interrupting queue to the adapter's ring buffer for interrupt events.
111099a2dd95SBruce Richardson  * These events are picked up by rxa_intr_ring_dequeue() which is invoked from
111199a2dd95SBruce Richardson  * the adapter service function.
111299a2dd95SBruce Richardson  */
111399a2dd95SBruce Richardson static void *
111499a2dd95SBruce Richardson rxa_intr_thread(void *arg)
111599a2dd95SBruce Richardson {
1116a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter = arg;
111799a2dd95SBruce Richardson 	struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
111899a2dd95SBruce Richardson 	int n, i;
111999a2dd95SBruce Richardson 
112099a2dd95SBruce Richardson 	while (1) {
112199a2dd95SBruce Richardson 		n = rte_epoll_wait(rx_adapter->epd, epoll_events,
112299a2dd95SBruce Richardson 				RTE_EVENT_ETH_INTR_RING_SIZE, -1);
112399a2dd95SBruce Richardson 		if (unlikely(n < 0))
112499a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("rte_epoll_wait returned error %d",
112599a2dd95SBruce Richardson 					n);
112699a2dd95SBruce Richardson 		for (i = 0; i < n; i++) {
112799a2dd95SBruce Richardson 			rxa_intr_ring_enqueue(rx_adapter,
112899a2dd95SBruce Richardson 					epoll_events[i].epdata.data);
112999a2dd95SBruce Richardson 		}
113099a2dd95SBruce Richardson 	}
113199a2dd95SBruce Richardson 
113299a2dd95SBruce Richardson 	return NULL;
113399a2dd95SBruce Richardson }
113499a2dd95SBruce Richardson 
113599a2dd95SBruce Richardson /* Dequeue <port, q> from interrupt ring and enqueue received
113699a2dd95SBruce Richardson  * mbufs to eventdev
113799a2dd95SBruce Richardson  */
113899a2dd95SBruce Richardson static inline uint32_t
1139a256a743SPavan Nikhilesh rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter)
114099a2dd95SBruce Richardson {
114199a2dd95SBruce Richardson 	uint32_t n;
114299a2dd95SBruce Richardson 	uint32_t nb_rx = 0;
114399a2dd95SBruce Richardson 	int rxq_empty;
1144a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer *buf;
114599a2dd95SBruce Richardson 	rte_spinlock_t *ring_lock;
114699a2dd95SBruce Richardson 	uint8_t max_done = 0;
114799a2dd95SBruce Richardson 
114899a2dd95SBruce Richardson 	if (rx_adapter->num_rx_intr == 0)
114999a2dd95SBruce Richardson 		return 0;
115099a2dd95SBruce Richardson 
115199a2dd95SBruce Richardson 	if (rte_ring_count(rx_adapter->intr_ring) == 0
115299a2dd95SBruce Richardson 		&& !rx_adapter->qd_valid)
115399a2dd95SBruce Richardson 		return 0;
115499a2dd95SBruce Richardson 
115599a2dd95SBruce Richardson 	buf = &rx_adapter->event_enqueue_buffer;
115699a2dd95SBruce Richardson 	ring_lock = &rx_adapter->intr_ring_lock;
115799a2dd95SBruce Richardson 
115899a2dd95SBruce Richardson 	if (buf->count >= BATCH_SIZE)
1159b06bca69SNaga Harish K S V 		rxa_flush_event_buffer(rx_adapter, buf);
116099a2dd95SBruce Richardson 
11618113fd15SGanapati Kundapura 	while (rxa_pkt_buf_available(buf)) {
116299a2dd95SBruce Richardson 		struct eth_device_info *dev_info;
116399a2dd95SBruce Richardson 		uint16_t port;
116499a2dd95SBruce Richardson 		uint16_t queue;
116599a2dd95SBruce Richardson 		union queue_data qd  = rx_adapter->qd;
116699a2dd95SBruce Richardson 		int err;
116799a2dd95SBruce Richardson 
116899a2dd95SBruce Richardson 		if (!rx_adapter->qd_valid) {
116999a2dd95SBruce Richardson 			struct eth_rx_queue_info *queue_info;
117099a2dd95SBruce Richardson 
117199a2dd95SBruce Richardson 			rte_spinlock_lock(ring_lock);
117299a2dd95SBruce Richardson 			err = rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
117399a2dd95SBruce Richardson 			if (err) {
117499a2dd95SBruce Richardson 				rte_spinlock_unlock(ring_lock);
117599a2dd95SBruce Richardson 				break;
117699a2dd95SBruce Richardson 			}
117799a2dd95SBruce Richardson 
117899a2dd95SBruce Richardson 			port = qd.port;
117999a2dd95SBruce Richardson 			queue = qd.queue;
118099a2dd95SBruce Richardson 			rx_adapter->qd = qd;
118199a2dd95SBruce Richardson 			rx_adapter->qd_valid = 1;
118299a2dd95SBruce Richardson 			dev_info = &rx_adapter->eth_devices[port];
118399a2dd95SBruce Richardson 			if (rxa_shared_intr(dev_info, queue))
118499a2dd95SBruce Richardson 				dev_info->shared_intr_enabled = 1;
118599a2dd95SBruce Richardson 			else {
118699a2dd95SBruce Richardson 				queue_info = &dev_info->rx_queue[queue];
118799a2dd95SBruce Richardson 				queue_info->intr_enabled = 1;
118899a2dd95SBruce Richardson 			}
118999a2dd95SBruce Richardson 			rte_eth_dev_rx_intr_enable(port, queue);
119099a2dd95SBruce Richardson 			rte_spinlock_unlock(ring_lock);
119199a2dd95SBruce Richardson 		} else {
119299a2dd95SBruce Richardson 			port = qd.port;
119399a2dd95SBruce Richardson 			queue = qd.queue;
119499a2dd95SBruce Richardson 
119599a2dd95SBruce Richardson 			dev_info = &rx_adapter->eth_devices[port];
119699a2dd95SBruce Richardson 		}
119799a2dd95SBruce Richardson 
119899a2dd95SBruce Richardson 		if (rxa_shared_intr(dev_info, queue)) {
119999a2dd95SBruce Richardson 			uint16_t i;
120099a2dd95SBruce Richardson 			uint16_t nb_queues;
120199a2dd95SBruce Richardson 
120299a2dd95SBruce Richardson 			nb_queues = dev_info->dev->data->nb_rx_queues;
120399a2dd95SBruce Richardson 			n = 0;
120499a2dd95SBruce Richardson 			for (i = dev_info->next_q_idx; i < nb_queues; i++) {
120599a2dd95SBruce Richardson 				uint8_t enq_buffer_full;
120699a2dd95SBruce Richardson 
120799a2dd95SBruce Richardson 				if (!rxa_intr_queue(dev_info, i))
120899a2dd95SBruce Richardson 					continue;
120999a2dd95SBruce Richardson 				n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
121099a2dd95SBruce Richardson 					rx_adapter->max_nb_rx,
1211b06bca69SNaga Harish K S V 					&rxq_empty, buf);
121299a2dd95SBruce Richardson 				nb_rx += n;
121399a2dd95SBruce Richardson 
121499a2dd95SBruce Richardson 				enq_buffer_full = !rxq_empty && n == 0;
121599a2dd95SBruce Richardson 				max_done = nb_rx > rx_adapter->max_nb_rx;
121699a2dd95SBruce Richardson 
121799a2dd95SBruce Richardson 				if (enq_buffer_full || max_done) {
121899a2dd95SBruce Richardson 					dev_info->next_q_idx = i;
121999a2dd95SBruce Richardson 					goto done;
122099a2dd95SBruce Richardson 				}
122199a2dd95SBruce Richardson 			}
122299a2dd95SBruce Richardson 
122399a2dd95SBruce Richardson 			rx_adapter->qd_valid = 0;
122499a2dd95SBruce Richardson 
122599a2dd95SBruce Richardson 			/* Reinitialize for next interrupt */
122699a2dd95SBruce Richardson 			dev_info->next_q_idx = dev_info->multi_intr_cap ?
122799a2dd95SBruce Richardson 						RTE_MAX_RXTX_INTR_VEC_ID - 1 :
122899a2dd95SBruce Richardson 						0;
122999a2dd95SBruce Richardson 		} else {
123099a2dd95SBruce Richardson 			n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
123199a2dd95SBruce Richardson 				rx_adapter->max_nb_rx,
1232b06bca69SNaga Harish K S V 				&rxq_empty, buf);
123399a2dd95SBruce Richardson 			rx_adapter->qd_valid = !rxq_empty;
123499a2dd95SBruce Richardson 			nb_rx += n;
123599a2dd95SBruce Richardson 			if (nb_rx > rx_adapter->max_nb_rx)
123699a2dd95SBruce Richardson 				break;
123799a2dd95SBruce Richardson 		}
123899a2dd95SBruce Richardson 	}
123999a2dd95SBruce Richardson 
124099a2dd95SBruce Richardson done:
124199a2dd95SBruce Richardson 	rx_adapter->stats.rx_intr_packets += nb_rx;
124299a2dd95SBruce Richardson 	return nb_rx;
124399a2dd95SBruce Richardson }
124499a2dd95SBruce Richardson 
124599a2dd95SBruce Richardson /*
124699a2dd95SBruce Richardson  * Polls receive queues added to the event adapter and enqueues received
124799a2dd95SBruce Richardson  * packets to the event device.
124899a2dd95SBruce Richardson  *
124999a2dd95SBruce Richardson  * The receive code enqueues initially to a temporary buffer, the
125099a2dd95SBruce Richardson  * temporary buffer is drained anytime it holds >= BATCH_SIZE packets
125199a2dd95SBruce Richardson  *
125299a2dd95SBruce Richardson  * If there isn't space available in the temporary buffer, packets from the
125399a2dd95SBruce Richardson  * Rx queue aren't dequeued from the eth device, this back pressures the
125499a2dd95SBruce Richardson  * eth device, in virtual device environments this back pressure is relayed to
125599a2dd95SBruce Richardson  * the hypervisor's switching layer where adjustments can be made to deal with
125699a2dd95SBruce Richardson  * it.
125799a2dd95SBruce Richardson  */
125899a2dd95SBruce Richardson static inline uint32_t
1259a256a743SPavan Nikhilesh rxa_poll(struct event_eth_rx_adapter *rx_adapter)
126099a2dd95SBruce Richardson {
126199a2dd95SBruce Richardson 	uint32_t num_queue;
126299a2dd95SBruce Richardson 	uint32_t nb_rx = 0;
1263a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer *buf = NULL;
126499a2dd95SBruce Richardson 	uint32_t wrr_pos;
126599a2dd95SBruce Richardson 	uint32_t max_nb_rx;
126699a2dd95SBruce Richardson 
126799a2dd95SBruce Richardson 	wrr_pos = rx_adapter->wrr_pos;
126899a2dd95SBruce Richardson 	max_nb_rx = rx_adapter->max_nb_rx;
126999a2dd95SBruce Richardson 
127099a2dd95SBruce Richardson 	/* Iterate through a WRR sequence */
127199a2dd95SBruce Richardson 	for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
127299a2dd95SBruce Richardson 		unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
127399a2dd95SBruce Richardson 		uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
127499a2dd95SBruce Richardson 		uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
127599a2dd95SBruce Richardson 
1276b06bca69SNaga Harish K S V 		buf = rxa_event_buf_get(rx_adapter, d, qid);
1277b06bca69SNaga Harish K S V 
127899a2dd95SBruce Richardson 		/* Don't do a batch dequeue from the rx queue if there isn't
127999a2dd95SBruce Richardson 		 * enough space in the enqueue buffer.
128099a2dd95SBruce Richardson 		 */
128199a2dd95SBruce Richardson 		if (buf->count >= BATCH_SIZE)
1282b06bca69SNaga Harish K S V 			rxa_flush_event_buffer(rx_adapter, buf);
12838113fd15SGanapati Kundapura 		if (!rxa_pkt_buf_available(buf)) {
1284b06bca69SNaga Harish K S V 			if (rx_adapter->use_queue_event_buf)
1285b06bca69SNaga Harish K S V 				goto poll_next_entry;
1286b06bca69SNaga Harish K S V 			else {
128799a2dd95SBruce Richardson 				rx_adapter->wrr_pos = wrr_pos;
128899a2dd95SBruce Richardson 				return nb_rx;
128999a2dd95SBruce Richardson 			}
1290b06bca69SNaga Harish K S V 		}
129199a2dd95SBruce Richardson 
129299a2dd95SBruce Richardson 		nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
1293b06bca69SNaga Harish K S V 				NULL, buf);
129499a2dd95SBruce Richardson 		if (nb_rx > max_nb_rx) {
129599a2dd95SBruce Richardson 			rx_adapter->wrr_pos =
129699a2dd95SBruce Richardson 				    (wrr_pos + 1) % rx_adapter->wrr_len;
129799a2dd95SBruce Richardson 			break;
129899a2dd95SBruce Richardson 		}
129999a2dd95SBruce Richardson 
1300b06bca69SNaga Harish K S V poll_next_entry:
130199a2dd95SBruce Richardson 		if (++wrr_pos == rx_adapter->wrr_len)
130299a2dd95SBruce Richardson 			wrr_pos = 0;
130399a2dd95SBruce Richardson 	}
130499a2dd95SBruce Richardson 	return nb_rx;
130599a2dd95SBruce Richardson }
130699a2dd95SBruce Richardson 
130799a2dd95SBruce Richardson static void
130899a2dd95SBruce Richardson rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
130999a2dd95SBruce Richardson {
1310a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter = arg;
1311a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer *buf = NULL;
131299a2dd95SBruce Richardson 	struct rte_event *ev;
131399a2dd95SBruce Richardson 
1314b06bca69SNaga Harish K S V 	buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue);
1315b06bca69SNaga Harish K S V 
131699a2dd95SBruce Richardson 	if (buf->count)
1317b06bca69SNaga Harish K S V 		rxa_flush_event_buffer(rx_adapter, buf);
131899a2dd95SBruce Richardson 
131999a2dd95SBruce Richardson 	if (vec->vector_ev->nb_elem == 0)
132099a2dd95SBruce Richardson 		return;
132199a2dd95SBruce Richardson 	ev = &buf->events[buf->count];
132299a2dd95SBruce Richardson 
132399a2dd95SBruce Richardson 	/* Event ready. */
132499a2dd95SBruce Richardson 	ev->event = vec->event;
132599a2dd95SBruce Richardson 	ev->vec = vec->vector_ev;
132699a2dd95SBruce Richardson 	buf->count++;
132799a2dd95SBruce Richardson 
132899a2dd95SBruce Richardson 	vec->vector_ev = NULL;
132999a2dd95SBruce Richardson 	vec->ts = 0;
133099a2dd95SBruce Richardson }
133199a2dd95SBruce Richardson 
133299a2dd95SBruce Richardson static int
133399a2dd95SBruce Richardson rxa_service_func(void *args)
133499a2dd95SBruce Richardson {
1335a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter = args;
133699a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_stats *stats;
133799a2dd95SBruce Richardson 
133899a2dd95SBruce Richardson 	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
133999a2dd95SBruce Richardson 		return 0;
134099a2dd95SBruce Richardson 	if (!rx_adapter->rxa_started) {
134199a2dd95SBruce Richardson 		rte_spinlock_unlock(&rx_adapter->rx_lock);
134299a2dd95SBruce Richardson 		return 0;
134399a2dd95SBruce Richardson 	}
134499a2dd95SBruce Richardson 
134599a2dd95SBruce Richardson 	if (rx_adapter->ena_vector) {
134699a2dd95SBruce Richardson 		if ((rte_rdtsc() - rx_adapter->prev_expiry_ts) >=
134799a2dd95SBruce Richardson 		    rx_adapter->vector_tmo_ticks) {
134899a2dd95SBruce Richardson 			struct eth_rx_vector_data *vec;
134999a2dd95SBruce Richardson 
135099a2dd95SBruce Richardson 			TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
135199a2dd95SBruce Richardson 				uint64_t elapsed_time = rte_rdtsc() - vec->ts;
135299a2dd95SBruce Richardson 
135399a2dd95SBruce Richardson 				if (elapsed_time >= vec->vector_timeout_ticks) {
135499a2dd95SBruce Richardson 					rxa_vector_expire(vec, rx_adapter);
135599a2dd95SBruce Richardson 					TAILQ_REMOVE(&rx_adapter->vector_list,
135699a2dd95SBruce Richardson 						     vec, next);
135799a2dd95SBruce Richardson 				}
135899a2dd95SBruce Richardson 			}
135999a2dd95SBruce Richardson 			rx_adapter->prev_expiry_ts = rte_rdtsc();
136099a2dd95SBruce Richardson 		}
136199a2dd95SBruce Richardson 	}
136299a2dd95SBruce Richardson 
136399a2dd95SBruce Richardson 	stats = &rx_adapter->stats;
136499a2dd95SBruce Richardson 	stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
136599a2dd95SBruce Richardson 	stats->rx_packets += rxa_poll(rx_adapter);
136699a2dd95SBruce Richardson 	rte_spinlock_unlock(&rx_adapter->rx_lock);
136799a2dd95SBruce Richardson 	return 0;
136899a2dd95SBruce Richardson }
136999a2dd95SBruce Richardson 
137099a2dd95SBruce Richardson static int
137199a2dd95SBruce Richardson rte_event_eth_rx_adapter_init(void)
137299a2dd95SBruce Richardson {
1373da781e64SGanapati Kundapura 	const char *name = RXA_ADAPTER_ARRAY;
137499a2dd95SBruce Richardson 	const struct rte_memzone *mz;
137599a2dd95SBruce Richardson 	unsigned int sz;
137699a2dd95SBruce Richardson 
137799a2dd95SBruce Richardson 	sz = sizeof(*event_eth_rx_adapter) *
137899a2dd95SBruce Richardson 	    RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
137999a2dd95SBruce Richardson 	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
138099a2dd95SBruce Richardson 
138199a2dd95SBruce Richardson 	mz = rte_memzone_lookup(name);
138299a2dd95SBruce Richardson 	if (mz == NULL) {
138399a2dd95SBruce Richardson 		mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
138499a2dd95SBruce Richardson 						 RTE_CACHE_LINE_SIZE);
138599a2dd95SBruce Richardson 		if (mz == NULL) {
138699a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
138799a2dd95SBruce Richardson 					PRId32, rte_errno);
138899a2dd95SBruce Richardson 			return -rte_errno;
138999a2dd95SBruce Richardson 		}
139099a2dd95SBruce Richardson 	}
139199a2dd95SBruce Richardson 
139299a2dd95SBruce Richardson 	event_eth_rx_adapter = mz->addr;
139399a2dd95SBruce Richardson 	return 0;
139499a2dd95SBruce Richardson }
139599a2dd95SBruce Richardson 
1396da781e64SGanapati Kundapura static int
1397da781e64SGanapati Kundapura rxa_memzone_lookup(void)
1398da781e64SGanapati Kundapura {
1399da781e64SGanapati Kundapura 	const struct rte_memzone *mz;
1400da781e64SGanapati Kundapura 
1401da781e64SGanapati Kundapura 	if (event_eth_rx_adapter == NULL) {
1402da781e64SGanapati Kundapura 		mz = rte_memzone_lookup(RXA_ADAPTER_ARRAY);
1403da781e64SGanapati Kundapura 		if (mz == NULL)
1404da781e64SGanapati Kundapura 			return -ENOMEM;
1405da781e64SGanapati Kundapura 		event_eth_rx_adapter = mz->addr;
1406da781e64SGanapati Kundapura 	}
1407da781e64SGanapati Kundapura 
1408da781e64SGanapati Kundapura 	return 0;
1409da781e64SGanapati Kundapura }
1410da781e64SGanapati Kundapura 
1411a256a743SPavan Nikhilesh static inline struct event_eth_rx_adapter *
141299a2dd95SBruce Richardson rxa_id_to_adapter(uint8_t id)
141399a2dd95SBruce Richardson {
141499a2dd95SBruce Richardson 	return event_eth_rx_adapter ?
141599a2dd95SBruce Richardson 		event_eth_rx_adapter[id] : NULL;
141699a2dd95SBruce Richardson }
141799a2dd95SBruce Richardson 
141899a2dd95SBruce Richardson static int
141999a2dd95SBruce Richardson rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
142099a2dd95SBruce Richardson 		struct rte_event_eth_rx_adapter_conf *conf, void *arg)
142199a2dd95SBruce Richardson {
142299a2dd95SBruce Richardson 	int ret;
142399a2dd95SBruce Richardson 	struct rte_eventdev *dev;
142499a2dd95SBruce Richardson 	struct rte_event_dev_config dev_conf;
142599a2dd95SBruce Richardson 	int started;
142699a2dd95SBruce Richardson 	uint8_t port_id;
142799a2dd95SBruce Richardson 	struct rte_event_port_conf *port_conf = arg;
1428a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
142999a2dd95SBruce Richardson 
143099a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
143199a2dd95SBruce Richardson 	dev_conf = dev->data->dev_conf;
143299a2dd95SBruce Richardson 
143399a2dd95SBruce Richardson 	started = dev->data->dev_started;
143499a2dd95SBruce Richardson 	if (started)
143599a2dd95SBruce Richardson 		rte_event_dev_stop(dev_id);
143699a2dd95SBruce Richardson 	port_id = dev_conf.nb_event_ports;
143799a2dd95SBruce Richardson 	dev_conf.nb_event_ports += 1;
143899a2dd95SBruce Richardson 	ret = rte_event_dev_configure(dev_id, &dev_conf);
143999a2dd95SBruce Richardson 	if (ret) {
144099a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to configure event dev %u\n",
144199a2dd95SBruce Richardson 						dev_id);
144299a2dd95SBruce Richardson 		if (started) {
144399a2dd95SBruce Richardson 			if (rte_event_dev_start(dev_id))
144499a2dd95SBruce Richardson 				return -EIO;
144599a2dd95SBruce Richardson 		}
144699a2dd95SBruce Richardson 		return ret;
144799a2dd95SBruce Richardson 	}
144899a2dd95SBruce Richardson 
144999a2dd95SBruce Richardson 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
145099a2dd95SBruce Richardson 	if (ret) {
145199a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
145299a2dd95SBruce Richardson 					port_id);
145399a2dd95SBruce Richardson 		return ret;
145499a2dd95SBruce Richardson 	}
145599a2dd95SBruce Richardson 
145699a2dd95SBruce Richardson 	conf->event_port_id = port_id;
145799a2dd95SBruce Richardson 	conf->max_nb_rx = 128;
145899a2dd95SBruce Richardson 	if (started)
145999a2dd95SBruce Richardson 		ret = rte_event_dev_start(dev_id);
146099a2dd95SBruce Richardson 	rx_adapter->default_cb_arg = 1;
146199a2dd95SBruce Richardson 	return ret;
146299a2dd95SBruce Richardson }
146399a2dd95SBruce Richardson 
146499a2dd95SBruce Richardson static int
146599a2dd95SBruce Richardson rxa_epoll_create1(void)
146699a2dd95SBruce Richardson {
146799a2dd95SBruce Richardson #if defined(LINUX)
146899a2dd95SBruce Richardson 	int fd;
146999a2dd95SBruce Richardson 	fd = epoll_create1(EPOLL_CLOEXEC);
147099a2dd95SBruce Richardson 	return fd < 0 ? -errno : fd;
147199a2dd95SBruce Richardson #elif defined(BSD)
147299a2dd95SBruce Richardson 	return -ENOTSUP;
147399a2dd95SBruce Richardson #endif
147499a2dd95SBruce Richardson }
147599a2dd95SBruce Richardson 
147699a2dd95SBruce Richardson static int
1477a256a743SPavan Nikhilesh rxa_init_epd(struct event_eth_rx_adapter *rx_adapter)
147899a2dd95SBruce Richardson {
147999a2dd95SBruce Richardson 	if (rx_adapter->epd != INIT_FD)
148099a2dd95SBruce Richardson 		return 0;
148199a2dd95SBruce Richardson 
148299a2dd95SBruce Richardson 	rx_adapter->epd = rxa_epoll_create1();
148399a2dd95SBruce Richardson 	if (rx_adapter->epd < 0) {
148499a2dd95SBruce Richardson 		int err = rx_adapter->epd;
148599a2dd95SBruce Richardson 		rx_adapter->epd = INIT_FD;
148699a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("epoll_create1() failed, err %d", err);
148799a2dd95SBruce Richardson 		return err;
148899a2dd95SBruce Richardson 	}
148999a2dd95SBruce Richardson 
149099a2dd95SBruce Richardson 	return 0;
149199a2dd95SBruce Richardson }
149299a2dd95SBruce Richardson 
149399a2dd95SBruce Richardson static int
1494a256a743SPavan Nikhilesh rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter)
149599a2dd95SBruce Richardson {
149699a2dd95SBruce Richardson 	int err;
149799a2dd95SBruce Richardson 	char thread_name[RTE_MAX_THREAD_NAME_LEN];
149899a2dd95SBruce Richardson 
149999a2dd95SBruce Richardson 	if (rx_adapter->intr_ring)
150099a2dd95SBruce Richardson 		return 0;
150199a2dd95SBruce Richardson 
150299a2dd95SBruce Richardson 	rx_adapter->intr_ring = rte_ring_create("intr_ring",
150399a2dd95SBruce Richardson 					RTE_EVENT_ETH_INTR_RING_SIZE,
150499a2dd95SBruce Richardson 					rte_socket_id(), 0);
150599a2dd95SBruce Richardson 	if (!rx_adapter->intr_ring)
150699a2dd95SBruce Richardson 		return -ENOMEM;
150799a2dd95SBruce Richardson 
150899a2dd95SBruce Richardson 	rx_adapter->epoll_events = rte_zmalloc_socket(rx_adapter->mem_name,
150999a2dd95SBruce Richardson 					RTE_EVENT_ETH_INTR_RING_SIZE *
151099a2dd95SBruce Richardson 					sizeof(struct rte_epoll_event),
151199a2dd95SBruce Richardson 					RTE_CACHE_LINE_SIZE,
151299a2dd95SBruce Richardson 					rx_adapter->socket_id);
151399a2dd95SBruce Richardson 	if (!rx_adapter->epoll_events) {
151499a2dd95SBruce Richardson 		err = -ENOMEM;
151599a2dd95SBruce Richardson 		goto error;
151699a2dd95SBruce Richardson 	}
151799a2dd95SBruce Richardson 
151899a2dd95SBruce Richardson 	rte_spinlock_init(&rx_adapter->intr_ring_lock);
151999a2dd95SBruce Richardson 
152099a2dd95SBruce Richardson 	snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
152199a2dd95SBruce Richardson 			"rx-intr-thread-%d", rx_adapter->id);
152299a2dd95SBruce Richardson 
152399a2dd95SBruce Richardson 	err = rte_ctrl_thread_create(&rx_adapter->rx_intr_thread, thread_name,
152499a2dd95SBruce Richardson 				NULL, rxa_intr_thread, rx_adapter);
15250bac9fc7SChengwen Feng 	if (!err)
152699a2dd95SBruce Richardson 		return 0;
152799a2dd95SBruce Richardson 
152899a2dd95SBruce Richardson 	RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d\n", err);
1529f6681ab7SChengwen Feng 	rte_free(rx_adapter->epoll_events);
153099a2dd95SBruce Richardson error:
153199a2dd95SBruce Richardson 	rte_ring_free(rx_adapter->intr_ring);
153299a2dd95SBruce Richardson 	rx_adapter->intr_ring = NULL;
153399a2dd95SBruce Richardson 	rx_adapter->epoll_events = NULL;
153499a2dd95SBruce Richardson 	return err;
153599a2dd95SBruce Richardson }
153699a2dd95SBruce Richardson 
153799a2dd95SBruce Richardson static int
1538a256a743SPavan Nikhilesh rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter)
153999a2dd95SBruce Richardson {
154099a2dd95SBruce Richardson 	int err;
154199a2dd95SBruce Richardson 
154299a2dd95SBruce Richardson 	err = pthread_cancel(rx_adapter->rx_intr_thread);
154399a2dd95SBruce Richardson 	if (err)
154499a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n",
154599a2dd95SBruce Richardson 				err);
154699a2dd95SBruce Richardson 
154799a2dd95SBruce Richardson 	err = pthread_join(rx_adapter->rx_intr_thread, NULL);
154899a2dd95SBruce Richardson 	if (err)
154999a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err);
155099a2dd95SBruce Richardson 
155199a2dd95SBruce Richardson 	rte_free(rx_adapter->epoll_events);
155299a2dd95SBruce Richardson 	rte_ring_free(rx_adapter->intr_ring);
155399a2dd95SBruce Richardson 	rx_adapter->intr_ring = NULL;
155499a2dd95SBruce Richardson 	rx_adapter->epoll_events = NULL;
155599a2dd95SBruce Richardson 	return 0;
155699a2dd95SBruce Richardson }
155799a2dd95SBruce Richardson 
155899a2dd95SBruce Richardson static int
1559a256a743SPavan Nikhilesh rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter)
156099a2dd95SBruce Richardson {
156199a2dd95SBruce Richardson 	int ret;
156299a2dd95SBruce Richardson 
156399a2dd95SBruce Richardson 	if (rx_adapter->num_rx_intr == 0)
156499a2dd95SBruce Richardson 		return 0;
156599a2dd95SBruce Richardson 
156699a2dd95SBruce Richardson 	ret = rxa_destroy_intr_thread(rx_adapter);
156799a2dd95SBruce Richardson 	if (ret)
156899a2dd95SBruce Richardson 		return ret;
156999a2dd95SBruce Richardson 
157099a2dd95SBruce Richardson 	close(rx_adapter->epd);
157199a2dd95SBruce Richardson 	rx_adapter->epd = INIT_FD;
157299a2dd95SBruce Richardson 
157399a2dd95SBruce Richardson 	return ret;
157499a2dd95SBruce Richardson }
157599a2dd95SBruce Richardson 
157699a2dd95SBruce Richardson static int
1577a256a743SPavan Nikhilesh rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter,
1578a256a743SPavan Nikhilesh 		 struct eth_device_info *dev_info, uint16_t rx_queue_id)
157999a2dd95SBruce Richardson {
158099a2dd95SBruce Richardson 	int err;
158199a2dd95SBruce Richardson 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
158299a2dd95SBruce Richardson 	int sintr = rxa_shared_intr(dev_info, rx_queue_id);
158399a2dd95SBruce Richardson 
158499a2dd95SBruce Richardson 	err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
158599a2dd95SBruce Richardson 	if (err) {
158699a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Could not disable interrupt for Rx queue %u",
158799a2dd95SBruce Richardson 			rx_queue_id);
158899a2dd95SBruce Richardson 		return err;
158999a2dd95SBruce Richardson 	}
159099a2dd95SBruce Richardson 
159199a2dd95SBruce Richardson 	err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
159299a2dd95SBruce Richardson 					rx_adapter->epd,
159399a2dd95SBruce Richardson 					RTE_INTR_EVENT_DEL,
159499a2dd95SBruce Richardson 					0);
159599a2dd95SBruce Richardson 	if (err)
159699a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Interrupt event deletion failed %d", err);
159799a2dd95SBruce Richardson 
159899a2dd95SBruce Richardson 	if (sintr)
159999a2dd95SBruce Richardson 		dev_info->rx_queue[rx_queue_id].intr_enabled = 0;
160099a2dd95SBruce Richardson 	else
160199a2dd95SBruce Richardson 		dev_info->shared_intr_enabled = 0;
160299a2dd95SBruce Richardson 	return err;
160399a2dd95SBruce Richardson }
160499a2dd95SBruce Richardson 
160599a2dd95SBruce Richardson static int
1606a256a743SPavan Nikhilesh rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter,
1607a256a743SPavan Nikhilesh 		   struct eth_device_info *dev_info, int rx_queue_id)
160899a2dd95SBruce Richardson {
160999a2dd95SBruce Richardson 	int err;
161099a2dd95SBruce Richardson 	int i;
161199a2dd95SBruce Richardson 	int s;
161299a2dd95SBruce Richardson 
161399a2dd95SBruce Richardson 	if (dev_info->nb_rx_intr == 0)
161499a2dd95SBruce Richardson 		return 0;
161599a2dd95SBruce Richardson 
161699a2dd95SBruce Richardson 	err = 0;
161799a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
161899a2dd95SBruce Richardson 		s = dev_info->nb_shared_intr;
161999a2dd95SBruce Richardson 		for (i = 0; i < dev_info->nb_rx_intr; i++) {
162099a2dd95SBruce Richardson 			int sintr;
162199a2dd95SBruce Richardson 			uint16_t q;
162299a2dd95SBruce Richardson 
162399a2dd95SBruce Richardson 			q = dev_info->intr_queue[i];
162499a2dd95SBruce Richardson 			sintr = rxa_shared_intr(dev_info, q);
162599a2dd95SBruce Richardson 			s -= sintr;
162699a2dd95SBruce Richardson 
162799a2dd95SBruce Richardson 			if (!sintr || s == 0) {
162899a2dd95SBruce Richardson 
162999a2dd95SBruce Richardson 				err = rxa_disable_intr(rx_adapter, dev_info,
163099a2dd95SBruce Richardson 						q);
163199a2dd95SBruce Richardson 				if (err)
163299a2dd95SBruce Richardson 					return err;
163399a2dd95SBruce Richardson 				rxa_intr_ring_del_entries(rx_adapter, dev_info,
163499a2dd95SBruce Richardson 							q);
163599a2dd95SBruce Richardson 			}
163699a2dd95SBruce Richardson 		}
163799a2dd95SBruce Richardson 	} else {
163899a2dd95SBruce Richardson 		if (!rxa_intr_queue(dev_info, rx_queue_id))
163999a2dd95SBruce Richardson 			return 0;
164099a2dd95SBruce Richardson 		if (!rxa_shared_intr(dev_info, rx_queue_id) ||
164199a2dd95SBruce Richardson 				dev_info->nb_shared_intr == 1) {
164299a2dd95SBruce Richardson 			err = rxa_disable_intr(rx_adapter, dev_info,
164399a2dd95SBruce Richardson 					rx_queue_id);
164499a2dd95SBruce Richardson 			if (err)
164599a2dd95SBruce Richardson 				return err;
164699a2dd95SBruce Richardson 			rxa_intr_ring_del_entries(rx_adapter, dev_info,
164799a2dd95SBruce Richardson 						rx_queue_id);
164899a2dd95SBruce Richardson 		}
164999a2dd95SBruce Richardson 
165099a2dd95SBruce Richardson 		for (i = 0; i < dev_info->nb_rx_intr; i++) {
165199a2dd95SBruce Richardson 			if (dev_info->intr_queue[i] == rx_queue_id) {
165299a2dd95SBruce Richardson 				for (; i < dev_info->nb_rx_intr - 1; i++)
165399a2dd95SBruce Richardson 					dev_info->intr_queue[i] =
165499a2dd95SBruce Richardson 						dev_info->intr_queue[i + 1];
165599a2dd95SBruce Richardson 				break;
165699a2dd95SBruce Richardson 			}
165799a2dd95SBruce Richardson 		}
165899a2dd95SBruce Richardson 	}
165999a2dd95SBruce Richardson 
166099a2dd95SBruce Richardson 	return err;
166199a2dd95SBruce Richardson }
166299a2dd95SBruce Richardson 
166399a2dd95SBruce Richardson static int
1664a256a743SPavan Nikhilesh rxa_config_intr(struct event_eth_rx_adapter *rx_adapter,
1665a256a743SPavan Nikhilesh 		struct eth_device_info *dev_info, uint16_t rx_queue_id)
166699a2dd95SBruce Richardson {
166799a2dd95SBruce Richardson 	int err, err1;
166899a2dd95SBruce Richardson 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
166999a2dd95SBruce Richardson 	union queue_data qd;
167099a2dd95SBruce Richardson 	int init_fd;
167199a2dd95SBruce Richardson 	uint16_t *intr_queue;
167299a2dd95SBruce Richardson 	int sintr = rxa_shared_intr(dev_info, rx_queue_id);
167399a2dd95SBruce Richardson 
167499a2dd95SBruce Richardson 	if (rxa_intr_queue(dev_info, rx_queue_id))
167599a2dd95SBruce Richardson 		return 0;
167699a2dd95SBruce Richardson 
167799a2dd95SBruce Richardson 	intr_queue = dev_info->intr_queue;
167899a2dd95SBruce Richardson 	if (dev_info->intr_queue == NULL) {
167999a2dd95SBruce Richardson 		size_t len =
168099a2dd95SBruce Richardson 			dev_info->dev->data->nb_rx_queues * sizeof(uint16_t);
168199a2dd95SBruce Richardson 		dev_info->intr_queue =
168299a2dd95SBruce Richardson 			rte_zmalloc_socket(
168399a2dd95SBruce Richardson 				rx_adapter->mem_name,
168499a2dd95SBruce Richardson 				len,
168599a2dd95SBruce Richardson 				0,
168699a2dd95SBruce Richardson 				rx_adapter->socket_id);
168799a2dd95SBruce Richardson 		if (dev_info->intr_queue == NULL)
168899a2dd95SBruce Richardson 			return -ENOMEM;
168999a2dd95SBruce Richardson 	}
169099a2dd95SBruce Richardson 
169199a2dd95SBruce Richardson 	init_fd = rx_adapter->epd;
169299a2dd95SBruce Richardson 	err = rxa_init_epd(rx_adapter);
169399a2dd95SBruce Richardson 	if (err)
169499a2dd95SBruce Richardson 		goto err_free_queue;
169599a2dd95SBruce Richardson 
169699a2dd95SBruce Richardson 	qd.port = eth_dev_id;
169799a2dd95SBruce Richardson 	qd.queue = rx_queue_id;
169899a2dd95SBruce Richardson 
169999a2dd95SBruce Richardson 	err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
170099a2dd95SBruce Richardson 					rx_adapter->epd,
170199a2dd95SBruce Richardson 					RTE_INTR_EVENT_ADD,
170299a2dd95SBruce Richardson 					qd.ptr);
170399a2dd95SBruce Richardson 	if (err) {
170499a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to add interrupt event for"
170599a2dd95SBruce Richardson 			" Rx Queue %u err %d", rx_queue_id, err);
170699a2dd95SBruce Richardson 		goto err_del_fd;
170799a2dd95SBruce Richardson 	}
170899a2dd95SBruce Richardson 
170999a2dd95SBruce Richardson 	err = rte_eth_dev_rx_intr_enable(eth_dev_id, rx_queue_id);
171099a2dd95SBruce Richardson 	if (err) {
171199a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Could not enable interrupt for"
171299a2dd95SBruce Richardson 				" Rx Queue %u err %d", rx_queue_id, err);
171399a2dd95SBruce Richardson 
171499a2dd95SBruce Richardson 		goto err_del_event;
171599a2dd95SBruce Richardson 	}
171699a2dd95SBruce Richardson 
171799a2dd95SBruce Richardson 	err = rxa_create_intr_thread(rx_adapter);
171899a2dd95SBruce Richardson 	if (!err)  {
171999a2dd95SBruce Richardson 		if (sintr)
172099a2dd95SBruce Richardson 			dev_info->shared_intr_enabled = 1;
172199a2dd95SBruce Richardson 		else
172299a2dd95SBruce Richardson 			dev_info->rx_queue[rx_queue_id].intr_enabled = 1;
172399a2dd95SBruce Richardson 		return 0;
172499a2dd95SBruce Richardson 	}
172599a2dd95SBruce Richardson 
172699a2dd95SBruce Richardson 
172799a2dd95SBruce Richardson 	err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
172899a2dd95SBruce Richardson 	if (err)
172999a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Could not disable interrupt for"
173099a2dd95SBruce Richardson 				" Rx Queue %u err %d", rx_queue_id, err);
173199a2dd95SBruce Richardson err_del_event:
173299a2dd95SBruce Richardson 	err1 = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
173399a2dd95SBruce Richardson 					rx_adapter->epd,
173499a2dd95SBruce Richardson 					RTE_INTR_EVENT_DEL,
173599a2dd95SBruce Richardson 					0);
173699a2dd95SBruce Richardson 	if (err1) {
173799a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Could not delete event for"
173899a2dd95SBruce Richardson 				" Rx Queue %u err %d", rx_queue_id, err1);
173999a2dd95SBruce Richardson 	}
174099a2dd95SBruce Richardson err_del_fd:
174199a2dd95SBruce Richardson 	if (init_fd == INIT_FD) {
174299a2dd95SBruce Richardson 		close(rx_adapter->epd);
174399a2dd95SBruce Richardson 		rx_adapter->epd = -1;
174499a2dd95SBruce Richardson 	}
174599a2dd95SBruce Richardson err_free_queue:
174699a2dd95SBruce Richardson 	if (intr_queue == NULL)
174799a2dd95SBruce Richardson 		rte_free(dev_info->intr_queue);
174899a2dd95SBruce Richardson 
174999a2dd95SBruce Richardson 	return err;
175099a2dd95SBruce Richardson }
175199a2dd95SBruce Richardson 
175299a2dd95SBruce Richardson static int
1753a256a743SPavan Nikhilesh rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter,
1754a256a743SPavan Nikhilesh 		   struct eth_device_info *dev_info, int rx_queue_id)
175599a2dd95SBruce Richardson 
175699a2dd95SBruce Richardson {
175799a2dd95SBruce Richardson 	int i, j, err;
175899a2dd95SBruce Richardson 	int si = -1;
175999a2dd95SBruce Richardson 	int shared_done = (dev_info->nb_shared_intr > 0);
176099a2dd95SBruce Richardson 
176199a2dd95SBruce Richardson 	if (rx_queue_id != -1) {
176299a2dd95SBruce Richardson 		if (rxa_shared_intr(dev_info, rx_queue_id) && shared_done)
176399a2dd95SBruce Richardson 			return 0;
176499a2dd95SBruce Richardson 		return rxa_config_intr(rx_adapter, dev_info, rx_queue_id);
176599a2dd95SBruce Richardson 	}
176699a2dd95SBruce Richardson 
176799a2dd95SBruce Richardson 	err = 0;
176899a2dd95SBruce Richardson 	for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) {
176999a2dd95SBruce Richardson 
177099a2dd95SBruce Richardson 		if (rxa_shared_intr(dev_info, i) && shared_done)
177199a2dd95SBruce Richardson 			continue;
177299a2dd95SBruce Richardson 
177399a2dd95SBruce Richardson 		err = rxa_config_intr(rx_adapter, dev_info, i);
177499a2dd95SBruce Richardson 
177599a2dd95SBruce Richardson 		shared_done = err == 0 && rxa_shared_intr(dev_info, i);
177699a2dd95SBruce Richardson 		if (shared_done) {
177799a2dd95SBruce Richardson 			si = i;
177899a2dd95SBruce Richardson 			dev_info->shared_intr_enabled = 1;
177999a2dd95SBruce Richardson 		}
178099a2dd95SBruce Richardson 		if (err)
178199a2dd95SBruce Richardson 			break;
178299a2dd95SBruce Richardson 	}
178399a2dd95SBruce Richardson 
178499a2dd95SBruce Richardson 	if (err == 0)
178599a2dd95SBruce Richardson 		return 0;
178699a2dd95SBruce Richardson 
178799a2dd95SBruce Richardson 	shared_done = (dev_info->nb_shared_intr > 0);
178899a2dd95SBruce Richardson 	for (j = 0; j < i; j++) {
178999a2dd95SBruce Richardson 		if (rxa_intr_queue(dev_info, j))
179099a2dd95SBruce Richardson 			continue;
179199a2dd95SBruce Richardson 		if (rxa_shared_intr(dev_info, j) && si != j)
179299a2dd95SBruce Richardson 			continue;
179399a2dd95SBruce Richardson 		err = rxa_disable_intr(rx_adapter, dev_info, j);
179499a2dd95SBruce Richardson 		if (err)
179599a2dd95SBruce Richardson 			break;
179699a2dd95SBruce Richardson 
179799a2dd95SBruce Richardson 	}
179899a2dd95SBruce Richardson 
179999a2dd95SBruce Richardson 	return err;
180099a2dd95SBruce Richardson }
180199a2dd95SBruce Richardson 
180299a2dd95SBruce Richardson static int
1803a256a743SPavan Nikhilesh rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id)
180499a2dd95SBruce Richardson {
180599a2dd95SBruce Richardson 	int ret;
180699a2dd95SBruce Richardson 	struct rte_service_spec service;
180799a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_conf rx_adapter_conf;
180899a2dd95SBruce Richardson 
180999a2dd95SBruce Richardson 	if (rx_adapter->service_inited)
181099a2dd95SBruce Richardson 		return 0;
181199a2dd95SBruce Richardson 
181299a2dd95SBruce Richardson 	memset(&service, 0, sizeof(service));
181399a2dd95SBruce Richardson 	snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,
181499a2dd95SBruce Richardson 		"rte_event_eth_rx_adapter_%d", id);
181599a2dd95SBruce Richardson 	service.socket_id = rx_adapter->socket_id;
181699a2dd95SBruce Richardson 	service.callback = rxa_service_func;
181799a2dd95SBruce Richardson 	service.callback_userdata = rx_adapter;
181899a2dd95SBruce Richardson 	/* Service function handles locking for queue add/del updates */
181999a2dd95SBruce Richardson 	service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
182099a2dd95SBruce Richardson 	ret = rte_service_component_register(&service, &rx_adapter->service_id);
182199a2dd95SBruce Richardson 	if (ret) {
182299a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
182399a2dd95SBruce Richardson 			service.name, ret);
182499a2dd95SBruce Richardson 		return ret;
182599a2dd95SBruce Richardson 	}
182699a2dd95SBruce Richardson 
182799a2dd95SBruce Richardson 	ret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id,
182899a2dd95SBruce Richardson 		&rx_adapter_conf, rx_adapter->conf_arg);
182999a2dd95SBruce Richardson 	if (ret) {
183099a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
183199a2dd95SBruce Richardson 			ret);
183299a2dd95SBruce Richardson 		goto err_done;
183399a2dd95SBruce Richardson 	}
183499a2dd95SBruce Richardson 	rx_adapter->event_port_id = rx_adapter_conf.event_port_id;
183599a2dd95SBruce Richardson 	rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;
183699a2dd95SBruce Richardson 	rx_adapter->service_inited = 1;
183799a2dd95SBruce Richardson 	rx_adapter->epd = INIT_FD;
183899a2dd95SBruce Richardson 	return 0;
183999a2dd95SBruce Richardson 
184099a2dd95SBruce Richardson err_done:
184199a2dd95SBruce Richardson 	rte_service_component_unregister(rx_adapter->service_id);
184299a2dd95SBruce Richardson 	return ret;
184399a2dd95SBruce Richardson }
184499a2dd95SBruce Richardson 
184599a2dd95SBruce Richardson static void
1846a256a743SPavan Nikhilesh rxa_update_queue(struct event_eth_rx_adapter *rx_adapter,
1847a256a743SPavan Nikhilesh 		 struct eth_device_info *dev_info, int32_t rx_queue_id,
184899a2dd95SBruce Richardson 		 uint8_t add)
184999a2dd95SBruce Richardson {
185099a2dd95SBruce Richardson 	struct eth_rx_queue_info *queue_info;
185199a2dd95SBruce Richardson 	int enabled;
185299a2dd95SBruce Richardson 	uint16_t i;
185399a2dd95SBruce Richardson 
185499a2dd95SBruce Richardson 	if (dev_info->rx_queue == NULL)
185599a2dd95SBruce Richardson 		return;
185699a2dd95SBruce Richardson 
185799a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
185899a2dd95SBruce Richardson 		for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
185999a2dd95SBruce Richardson 			rxa_update_queue(rx_adapter, dev_info, i, add);
186099a2dd95SBruce Richardson 	} else {
186199a2dd95SBruce Richardson 		queue_info = &dev_info->rx_queue[rx_queue_id];
186299a2dd95SBruce Richardson 		enabled = queue_info->queue_enabled;
186399a2dd95SBruce Richardson 		if (add) {
186499a2dd95SBruce Richardson 			rx_adapter->nb_queues += !enabled;
186599a2dd95SBruce Richardson 			dev_info->nb_dev_queues += !enabled;
186699a2dd95SBruce Richardson 		} else {
186799a2dd95SBruce Richardson 			rx_adapter->nb_queues -= enabled;
186899a2dd95SBruce Richardson 			dev_info->nb_dev_queues -= enabled;
186999a2dd95SBruce Richardson 		}
187099a2dd95SBruce Richardson 		queue_info->queue_enabled = !!add;
187199a2dd95SBruce Richardson 	}
187299a2dd95SBruce Richardson }
187399a2dd95SBruce Richardson 
187499a2dd95SBruce Richardson static void
187599a2dd95SBruce Richardson rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
187699a2dd95SBruce Richardson 		    uint64_t vector_ns, struct rte_mempool *mp, uint32_t qid,
187799a2dd95SBruce Richardson 		    uint16_t port_id)
187899a2dd95SBruce Richardson {
187999a2dd95SBruce Richardson #define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9)
188099a2dd95SBruce Richardson 	struct eth_rx_vector_data *vector_data;
188199a2dd95SBruce Richardson 	uint32_t flow_id;
188299a2dd95SBruce Richardson 
188399a2dd95SBruce Richardson 	vector_data = &queue_info->vector_data;
188499a2dd95SBruce Richardson 	vector_data->max_vector_count = vector_count;
188599a2dd95SBruce Richardson 	vector_data->port = port_id;
188699a2dd95SBruce Richardson 	vector_data->queue = qid;
188799a2dd95SBruce Richardson 	vector_data->vector_pool = mp;
188899a2dd95SBruce Richardson 	vector_data->vector_timeout_ticks =
188999a2dd95SBruce Richardson 		NSEC2TICK(vector_ns, rte_get_timer_hz());
189099a2dd95SBruce Richardson 	vector_data->ts = 0;
189199a2dd95SBruce Richardson 	flow_id = queue_info->event & 0xFFFFF;
189299a2dd95SBruce Richardson 	flow_id =
189399a2dd95SBruce Richardson 		flow_id == 0 ? (qid & 0xFFF) | (port_id & 0xFF) << 12 : flow_id;
189499a2dd95SBruce Richardson 	vector_data->event = (queue_info->event & ~0xFFFFF) | flow_id;
189599a2dd95SBruce Richardson }
189699a2dd95SBruce Richardson 
189799a2dd95SBruce Richardson static void
1898a256a743SPavan Nikhilesh rxa_sw_del(struct event_eth_rx_adapter *rx_adapter,
1899a256a743SPavan Nikhilesh 	   struct eth_device_info *dev_info, int32_t rx_queue_id)
190099a2dd95SBruce Richardson {
190199a2dd95SBruce Richardson 	struct eth_rx_vector_data *vec;
190299a2dd95SBruce Richardson 	int pollq;
190399a2dd95SBruce Richardson 	int intrq;
190499a2dd95SBruce Richardson 	int sintrq;
190599a2dd95SBruce Richardson 
190699a2dd95SBruce Richardson 
190799a2dd95SBruce Richardson 	if (rx_adapter->nb_queues == 0)
190899a2dd95SBruce Richardson 		return;
190999a2dd95SBruce Richardson 
191099a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
191199a2dd95SBruce Richardson 		uint16_t nb_rx_queues;
191299a2dd95SBruce Richardson 		uint16_t i;
191399a2dd95SBruce Richardson 
191499a2dd95SBruce Richardson 		nb_rx_queues = dev_info->dev->data->nb_rx_queues;
191599a2dd95SBruce Richardson 		for (i = 0; i <	nb_rx_queues; i++)
191699a2dd95SBruce Richardson 			rxa_sw_del(rx_adapter, dev_info, i);
191799a2dd95SBruce Richardson 		return;
191899a2dd95SBruce Richardson 	}
191999a2dd95SBruce Richardson 
192099a2dd95SBruce Richardson 	/* Push all the partial event vectors to event device. */
192199a2dd95SBruce Richardson 	TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
192299a2dd95SBruce Richardson 		if (vec->queue != rx_queue_id)
192399a2dd95SBruce Richardson 			continue;
192499a2dd95SBruce Richardson 		rxa_vector_expire(vec, rx_adapter);
192599a2dd95SBruce Richardson 		TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
192699a2dd95SBruce Richardson 	}
192799a2dd95SBruce Richardson 
192899a2dd95SBruce Richardson 	pollq = rxa_polled_queue(dev_info, rx_queue_id);
192999a2dd95SBruce Richardson 	intrq = rxa_intr_queue(dev_info, rx_queue_id);
193099a2dd95SBruce Richardson 	sintrq = rxa_shared_intr(dev_info, rx_queue_id);
193199a2dd95SBruce Richardson 	rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 0);
193299a2dd95SBruce Richardson 	rx_adapter->num_rx_polled -= pollq;
193399a2dd95SBruce Richardson 	dev_info->nb_rx_poll -= pollq;
193499a2dd95SBruce Richardson 	rx_adapter->num_rx_intr -= intrq;
193599a2dd95SBruce Richardson 	dev_info->nb_rx_intr -= intrq;
193699a2dd95SBruce Richardson 	dev_info->nb_shared_intr -= intrq && sintrq;
1937b06bca69SNaga Harish K S V 	if (rx_adapter->use_queue_event_buf) {
1938a256a743SPavan Nikhilesh 		struct eth_event_enqueue_buffer *event_buf =
1939b06bca69SNaga Harish K S V 			dev_info->rx_queue[rx_queue_id].event_buf;
1940b06bca69SNaga Harish K S V 		rte_free(event_buf->events);
1941b06bca69SNaga Harish K S V 		rte_free(event_buf);
1942b06bca69SNaga Harish K S V 		dev_info->rx_queue[rx_queue_id].event_buf = NULL;
1943b06bca69SNaga Harish K S V 	}
194499a2dd95SBruce Richardson }
194599a2dd95SBruce Richardson 
1946b06bca69SNaga Harish K S V static int
1947a256a743SPavan Nikhilesh rxa_add_queue(struct event_eth_rx_adapter *rx_adapter,
1948a256a743SPavan Nikhilesh 	      struct eth_device_info *dev_info, int32_t rx_queue_id,
194999a2dd95SBruce Richardson 	      const struct rte_event_eth_rx_adapter_queue_conf *conf)
195099a2dd95SBruce Richardson {
195199a2dd95SBruce Richardson 	struct eth_rx_queue_info *queue_info;
195299a2dd95SBruce Richardson 	const struct rte_event *ev = &conf->ev;
195399a2dd95SBruce Richardson 	int pollq;
195499a2dd95SBruce Richardson 	int intrq;
195599a2dd95SBruce Richardson 	int sintrq;
195699a2dd95SBruce Richardson 	struct rte_event *qi_ev;
1957a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer *new_rx_buf = NULL;
1958b06bca69SNaga Harish K S V 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
1959b06bca69SNaga Harish K S V 	int ret;
196099a2dd95SBruce Richardson 
196199a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
196299a2dd95SBruce Richardson 		uint16_t nb_rx_queues;
196399a2dd95SBruce Richardson 		uint16_t i;
196499a2dd95SBruce Richardson 
196599a2dd95SBruce Richardson 		nb_rx_queues = dev_info->dev->data->nb_rx_queues;
1966b06bca69SNaga Harish K S V 		for (i = 0; i <	nb_rx_queues; i++) {
1967b06bca69SNaga Harish K S V 			ret = rxa_add_queue(rx_adapter, dev_info, i, conf);
1968b06bca69SNaga Harish K S V 			if (ret)
1969b06bca69SNaga Harish K S V 				return ret;
1970b06bca69SNaga Harish K S V 		}
1971b06bca69SNaga Harish K S V 		return 0;
197299a2dd95SBruce Richardson 	}
197399a2dd95SBruce Richardson 
197499a2dd95SBruce Richardson 	pollq = rxa_polled_queue(dev_info, rx_queue_id);
197599a2dd95SBruce Richardson 	intrq = rxa_intr_queue(dev_info, rx_queue_id);
197699a2dd95SBruce Richardson 	sintrq = rxa_shared_intr(dev_info, rx_queue_id);
197799a2dd95SBruce Richardson 
197899a2dd95SBruce Richardson 	queue_info = &dev_info->rx_queue[rx_queue_id];
197999a2dd95SBruce Richardson 	queue_info->wt = conf->servicing_weight;
198099a2dd95SBruce Richardson 
198199a2dd95SBruce Richardson 	qi_ev = (struct rte_event *)&queue_info->event;
198299a2dd95SBruce Richardson 	qi_ev->event = ev->event;
198399a2dd95SBruce Richardson 	qi_ev->op = RTE_EVENT_OP_NEW;
198499a2dd95SBruce Richardson 	qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;
198599a2dd95SBruce Richardson 	qi_ev->sub_event_type = 0;
198699a2dd95SBruce Richardson 
198799a2dd95SBruce Richardson 	if (conf->rx_queue_flags &
198899a2dd95SBruce Richardson 			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {
198999a2dd95SBruce Richardson 		queue_info->flow_id_mask = ~0;
199099a2dd95SBruce Richardson 	} else
199199a2dd95SBruce Richardson 		qi_ev->flow_id = 0;
199299a2dd95SBruce Richardson 
1993929ebdd5SPavan Nikhilesh 	if (conf->rx_queue_flags &
1994929ebdd5SPavan Nikhilesh 	    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
1995929ebdd5SPavan Nikhilesh 		queue_info->ena_vector = 1;
1996929ebdd5SPavan Nikhilesh 		qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
1997929ebdd5SPavan Nikhilesh 		rxa_set_vector_data(queue_info, conf->vector_sz,
1998929ebdd5SPavan Nikhilesh 				    conf->vector_timeout_ns, conf->vector_mp,
1999929ebdd5SPavan Nikhilesh 				    rx_queue_id, dev_info->dev->data->port_id);
2000929ebdd5SPavan Nikhilesh 		rx_adapter->ena_vector = 1;
2001929ebdd5SPavan Nikhilesh 		rx_adapter->vector_tmo_ticks =
2002929ebdd5SPavan Nikhilesh 			rx_adapter->vector_tmo_ticks ?
2003929ebdd5SPavan Nikhilesh 				      RTE_MIN(queue_info->vector_data
2004929ebdd5SPavan Nikhilesh 							.vector_timeout_ticks >>
2005929ebdd5SPavan Nikhilesh 						1,
2006929ebdd5SPavan Nikhilesh 					rx_adapter->vector_tmo_ticks) :
2007929ebdd5SPavan Nikhilesh 				queue_info->vector_data.vector_timeout_ticks >>
2008929ebdd5SPavan Nikhilesh 					1;
2009929ebdd5SPavan Nikhilesh 	}
2010929ebdd5SPavan Nikhilesh 
201199a2dd95SBruce Richardson 	rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
201299a2dd95SBruce Richardson 	if (rxa_polled_queue(dev_info, rx_queue_id)) {
201399a2dd95SBruce Richardson 		rx_adapter->num_rx_polled += !pollq;
201499a2dd95SBruce Richardson 		dev_info->nb_rx_poll += !pollq;
201599a2dd95SBruce Richardson 		rx_adapter->num_rx_intr -= intrq;
201699a2dd95SBruce Richardson 		dev_info->nb_rx_intr -= intrq;
201799a2dd95SBruce Richardson 		dev_info->nb_shared_intr -= intrq && sintrq;
201899a2dd95SBruce Richardson 	}
201999a2dd95SBruce Richardson 
202099a2dd95SBruce Richardson 	if (rxa_intr_queue(dev_info, rx_queue_id)) {
202199a2dd95SBruce Richardson 		rx_adapter->num_rx_polled -= pollq;
202299a2dd95SBruce Richardson 		dev_info->nb_rx_poll -= pollq;
202399a2dd95SBruce Richardson 		rx_adapter->num_rx_intr += !intrq;
202499a2dd95SBruce Richardson 		dev_info->nb_rx_intr += !intrq;
202599a2dd95SBruce Richardson 		dev_info->nb_shared_intr += !intrq && sintrq;
202699a2dd95SBruce Richardson 		if (dev_info->nb_shared_intr == 1) {
202799a2dd95SBruce Richardson 			if (dev_info->multi_intr_cap)
202899a2dd95SBruce Richardson 				dev_info->next_q_idx =
202999a2dd95SBruce Richardson 					RTE_MAX_RXTX_INTR_VEC_ID - 1;
203099a2dd95SBruce Richardson 			else
203199a2dd95SBruce Richardson 				dev_info->next_q_idx = 0;
203299a2dd95SBruce Richardson 		}
203399a2dd95SBruce Richardson 	}
2034b06bca69SNaga Harish K S V 
2035b06bca69SNaga Harish K S V 	if (!rx_adapter->use_queue_event_buf)
2036b06bca69SNaga Harish K S V 		return 0;
2037b06bca69SNaga Harish K S V 
2038b06bca69SNaga Harish K S V 	new_rx_buf = rte_zmalloc_socket("rx_buffer_meta",
2039b06bca69SNaga Harish K S V 				sizeof(*new_rx_buf), 0,
2040b06bca69SNaga Harish K S V 				rte_eth_dev_socket_id(eth_dev_id));
2041b06bca69SNaga Harish K S V 	if (new_rx_buf == NULL) {
2042b06bca69SNaga Harish K S V 		RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for "
2043b06bca69SNaga Harish K S V 				 "dev_id: %d queue_id: %d",
2044b06bca69SNaga Harish K S V 				 eth_dev_id, rx_queue_id);
2045b06bca69SNaga Harish K S V 		return -ENOMEM;
2046b06bca69SNaga Harish K S V 	}
2047b06bca69SNaga Harish K S V 
2048b06bca69SNaga Harish K S V 	new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE);
2049b06bca69SNaga Harish K S V 	new_rx_buf->events_size += (2 * BATCH_SIZE);
2050b06bca69SNaga Harish K S V 	new_rx_buf->events = rte_zmalloc_socket("rx_buffer",
2051b06bca69SNaga Harish K S V 				sizeof(struct rte_event) *
2052b06bca69SNaga Harish K S V 				new_rx_buf->events_size, 0,
2053b06bca69SNaga Harish K S V 				rte_eth_dev_socket_id(eth_dev_id));
2054b06bca69SNaga Harish K S V 	if (new_rx_buf->events == NULL) {
2055b06bca69SNaga Harish K S V 		rte_free(new_rx_buf);
2056b06bca69SNaga Harish K S V 		RTE_EDEV_LOG_ERR("Failed to allocate event buffer for "
2057b06bca69SNaga Harish K S V 				 "dev_id: %d queue_id: %d",
2058b06bca69SNaga Harish K S V 				 eth_dev_id, rx_queue_id);
2059b06bca69SNaga Harish K S V 		return -ENOMEM;
2060b06bca69SNaga Harish K S V 	}
2061b06bca69SNaga Harish K S V 
2062b06bca69SNaga Harish K S V 	queue_info->event_buf = new_rx_buf;
2063b06bca69SNaga Harish K S V 
2064b06bca69SNaga Harish K S V 	return 0;
206599a2dd95SBruce Richardson }
206699a2dd95SBruce Richardson 
2067a256a743SPavan Nikhilesh static int
2068a256a743SPavan Nikhilesh rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id,
206999a2dd95SBruce Richardson 	   int rx_queue_id,
207099a2dd95SBruce Richardson 	   const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
207199a2dd95SBruce Richardson {
207299a2dd95SBruce Richardson 	struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
207399a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_queue_conf temp_conf;
207499a2dd95SBruce Richardson 	int ret;
207599a2dd95SBruce Richardson 	struct eth_rx_poll_entry *rx_poll;
207699a2dd95SBruce Richardson 	struct eth_rx_queue_info *rx_queue;
207799a2dd95SBruce Richardson 	uint32_t *rx_wrr;
207899a2dd95SBruce Richardson 	uint16_t nb_rx_queues;
207999a2dd95SBruce Richardson 	uint32_t nb_rx_poll, nb_wrr;
208099a2dd95SBruce Richardson 	uint32_t nb_rx_intr;
208199a2dd95SBruce Richardson 	int num_intr_vec;
208299a2dd95SBruce Richardson 	uint16_t wt;
208399a2dd95SBruce Richardson 
208499a2dd95SBruce Richardson 	if (queue_conf->servicing_weight == 0) {
208599a2dd95SBruce Richardson 		struct rte_eth_dev_data *data = dev_info->dev->data;
208699a2dd95SBruce Richardson 
208799a2dd95SBruce Richardson 		temp_conf = *queue_conf;
208899a2dd95SBruce Richardson 		if (!data->dev_conf.intr_conf.rxq) {
208999a2dd95SBruce Richardson 			/* If Rx interrupts are disabled set wt = 1 */
209099a2dd95SBruce Richardson 			temp_conf.servicing_weight = 1;
209199a2dd95SBruce Richardson 		}
209299a2dd95SBruce Richardson 		queue_conf = &temp_conf;
2093b06bca69SNaga Harish K S V 
2094b06bca69SNaga Harish K S V 		if (queue_conf->servicing_weight == 0 &&
2095b06bca69SNaga Harish K S V 		    rx_adapter->use_queue_event_buf) {
2096b06bca69SNaga Harish K S V 
2097b06bca69SNaga Harish K S V 			RTE_EDEV_LOG_ERR("Use of queue level event buffer "
2098b06bca69SNaga Harish K S V 					 "not supported for interrupt queues "
2099b06bca69SNaga Harish K S V 					 "dev_id: %d queue_id: %d",
2100b06bca69SNaga Harish K S V 					 eth_dev_id, rx_queue_id);
2101b06bca69SNaga Harish K S V 			return -EINVAL;
2102b06bca69SNaga Harish K S V 		}
210399a2dd95SBruce Richardson 	}
210499a2dd95SBruce Richardson 
210599a2dd95SBruce Richardson 	nb_rx_queues = dev_info->dev->data->nb_rx_queues;
210699a2dd95SBruce Richardson 	rx_queue = dev_info->rx_queue;
210799a2dd95SBruce Richardson 	wt = queue_conf->servicing_weight;
210899a2dd95SBruce Richardson 
210999a2dd95SBruce Richardson 	if (dev_info->rx_queue == NULL) {
211099a2dd95SBruce Richardson 		dev_info->rx_queue =
211199a2dd95SBruce Richardson 		    rte_zmalloc_socket(rx_adapter->mem_name,
211299a2dd95SBruce Richardson 				       nb_rx_queues *
211399a2dd95SBruce Richardson 				       sizeof(struct eth_rx_queue_info), 0,
211499a2dd95SBruce Richardson 				       rx_adapter->socket_id);
211599a2dd95SBruce Richardson 		if (dev_info->rx_queue == NULL)
211699a2dd95SBruce Richardson 			return -ENOMEM;
211799a2dd95SBruce Richardson 	}
211899a2dd95SBruce Richardson 	rx_wrr = NULL;
211999a2dd95SBruce Richardson 	rx_poll = NULL;
212099a2dd95SBruce Richardson 
212199a2dd95SBruce Richardson 	rxa_calc_nb_post_add(rx_adapter, dev_info, rx_queue_id,
212299a2dd95SBruce Richardson 			queue_conf->servicing_weight,
212399a2dd95SBruce Richardson 			&nb_rx_poll, &nb_rx_intr, &nb_wrr);
212499a2dd95SBruce Richardson 
212599a2dd95SBruce Richardson 	if (dev_info->dev->intr_handle)
212699a2dd95SBruce Richardson 		dev_info->multi_intr_cap =
212799a2dd95SBruce Richardson 			rte_intr_cap_multiple(dev_info->dev->intr_handle);
212899a2dd95SBruce Richardson 
212999a2dd95SBruce Richardson 	ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
213099a2dd95SBruce Richardson 				&rx_poll, &rx_wrr);
213199a2dd95SBruce Richardson 	if (ret)
213299a2dd95SBruce Richardson 		goto err_free_rxqueue;
213399a2dd95SBruce Richardson 
213499a2dd95SBruce Richardson 	if (wt == 0) {
213599a2dd95SBruce Richardson 		num_intr_vec = rxa_nb_intr_vect(dev_info, rx_queue_id, 1);
213699a2dd95SBruce Richardson 
213799a2dd95SBruce Richardson 		ret = rxa_intr_ring_check_avail(rx_adapter, num_intr_vec);
213899a2dd95SBruce Richardson 		if (ret)
213999a2dd95SBruce Richardson 			goto err_free_rxqueue;
214099a2dd95SBruce Richardson 
214199a2dd95SBruce Richardson 		ret = rxa_add_intr_queue(rx_adapter, dev_info, rx_queue_id);
214299a2dd95SBruce Richardson 		if (ret)
214399a2dd95SBruce Richardson 			goto err_free_rxqueue;
214499a2dd95SBruce Richardson 	} else {
214599a2dd95SBruce Richardson 
214699a2dd95SBruce Richardson 		num_intr_vec = 0;
214799a2dd95SBruce Richardson 		if (rx_adapter->num_rx_intr > nb_rx_intr) {
214899a2dd95SBruce Richardson 			num_intr_vec = rxa_nb_intr_vect(dev_info,
214999a2dd95SBruce Richardson 						rx_queue_id, 0);
215099a2dd95SBruce Richardson 			/* interrupt based queues are being converted to
215199a2dd95SBruce Richardson 			 * poll mode queues, delete the interrupt configuration
215299a2dd95SBruce Richardson 			 * for those.
215399a2dd95SBruce Richardson 			 */
215499a2dd95SBruce Richardson 			ret = rxa_del_intr_queue(rx_adapter,
215599a2dd95SBruce Richardson 						dev_info, rx_queue_id);
215699a2dd95SBruce Richardson 			if (ret)
215799a2dd95SBruce Richardson 				goto err_free_rxqueue;
215899a2dd95SBruce Richardson 		}
215999a2dd95SBruce Richardson 	}
216099a2dd95SBruce Richardson 
216199a2dd95SBruce Richardson 	if (nb_rx_intr == 0) {
216299a2dd95SBruce Richardson 		ret = rxa_free_intr_resources(rx_adapter);
216399a2dd95SBruce Richardson 		if (ret)
216499a2dd95SBruce Richardson 			goto err_free_rxqueue;
216599a2dd95SBruce Richardson 	}
216699a2dd95SBruce Richardson 
216799a2dd95SBruce Richardson 	if (wt == 0) {
216899a2dd95SBruce Richardson 		uint16_t i;
216999a2dd95SBruce Richardson 
217099a2dd95SBruce Richardson 		if (rx_queue_id  == -1) {
217199a2dd95SBruce Richardson 			for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
217299a2dd95SBruce Richardson 				dev_info->intr_queue[i] = i;
217399a2dd95SBruce Richardson 		} else {
217499a2dd95SBruce Richardson 			if (!rxa_intr_queue(dev_info, rx_queue_id))
217599a2dd95SBruce Richardson 				dev_info->intr_queue[nb_rx_intr - 1] =
217699a2dd95SBruce Richardson 					rx_queue_id;
217799a2dd95SBruce Richardson 		}
217899a2dd95SBruce Richardson 	}
217999a2dd95SBruce Richardson 
218099a2dd95SBruce Richardson 
218199a2dd95SBruce Richardson 
2182b06bca69SNaga Harish K S V 	ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
2183b06bca69SNaga Harish K S V 	if (ret)
2184b06bca69SNaga Harish K S V 		goto err_free_rxqueue;
218599a2dd95SBruce Richardson 	rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
218699a2dd95SBruce Richardson 
218799a2dd95SBruce Richardson 	rte_free(rx_adapter->eth_rx_poll);
218899a2dd95SBruce Richardson 	rte_free(rx_adapter->wrr_sched);
218999a2dd95SBruce Richardson 
219099a2dd95SBruce Richardson 	rx_adapter->eth_rx_poll = rx_poll;
219199a2dd95SBruce Richardson 	rx_adapter->wrr_sched = rx_wrr;
219299a2dd95SBruce Richardson 	rx_adapter->wrr_len = nb_wrr;
219399a2dd95SBruce Richardson 	rx_adapter->num_intr_vec += num_intr_vec;
219499a2dd95SBruce Richardson 	return 0;
219599a2dd95SBruce Richardson 
219699a2dd95SBruce Richardson err_free_rxqueue:
219799a2dd95SBruce Richardson 	if (rx_queue == NULL) {
219899a2dd95SBruce Richardson 		rte_free(dev_info->rx_queue);
219999a2dd95SBruce Richardson 		dev_info->rx_queue = NULL;
220099a2dd95SBruce Richardson 	}
220199a2dd95SBruce Richardson 
220299a2dd95SBruce Richardson 	rte_free(rx_poll);
220399a2dd95SBruce Richardson 	rte_free(rx_wrr);
220499a2dd95SBruce Richardson 
2205b06bca69SNaga Harish K S V 	return ret;
220699a2dd95SBruce Richardson }
220799a2dd95SBruce Richardson 
220899a2dd95SBruce Richardson static int
220999a2dd95SBruce Richardson rxa_ctrl(uint8_t id, int start)
221099a2dd95SBruce Richardson {
2211a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
221299a2dd95SBruce Richardson 	struct rte_eventdev *dev;
221399a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
221499a2dd95SBruce Richardson 	uint32_t i;
221599a2dd95SBruce Richardson 	int use_service = 0;
221699a2dd95SBruce Richardson 	int stop = !start;
221799a2dd95SBruce Richardson 
221899a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
221999a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
222099a2dd95SBruce Richardson 	if (rx_adapter == NULL)
222199a2dd95SBruce Richardson 		return -EINVAL;
222299a2dd95SBruce Richardson 
222399a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
222499a2dd95SBruce Richardson 
222599a2dd95SBruce Richardson 	RTE_ETH_FOREACH_DEV(i) {
222699a2dd95SBruce Richardson 		dev_info = &rx_adapter->eth_devices[i];
222799a2dd95SBruce Richardson 		/* if start  check for num dev queues */
222899a2dd95SBruce Richardson 		if (start && !dev_info->nb_dev_queues)
222999a2dd95SBruce Richardson 			continue;
223099a2dd95SBruce Richardson 		/* if stop check if dev has been started */
223199a2dd95SBruce Richardson 		if (stop && !dev_info->dev_rx_started)
223299a2dd95SBruce Richardson 			continue;
223399a2dd95SBruce Richardson 		use_service |= !dev_info->internal_event_port;
223499a2dd95SBruce Richardson 		dev_info->dev_rx_started = start;
223599a2dd95SBruce Richardson 		if (dev_info->internal_event_port == 0)
223699a2dd95SBruce Richardson 			continue;
223799a2dd95SBruce Richardson 		start ? (*dev->dev_ops->eth_rx_adapter_start)(dev,
223899a2dd95SBruce Richardson 						&rte_eth_devices[i]) :
223999a2dd95SBruce Richardson 			(*dev->dev_ops->eth_rx_adapter_stop)(dev,
224099a2dd95SBruce Richardson 						&rte_eth_devices[i]);
224199a2dd95SBruce Richardson 	}
224299a2dd95SBruce Richardson 
224399a2dd95SBruce Richardson 	if (use_service) {
224499a2dd95SBruce Richardson 		rte_spinlock_lock(&rx_adapter->rx_lock);
224599a2dd95SBruce Richardson 		rx_adapter->rxa_started = start;
224699a2dd95SBruce Richardson 		rte_service_runstate_set(rx_adapter->service_id, start);
224799a2dd95SBruce Richardson 		rte_spinlock_unlock(&rx_adapter->rx_lock);
224899a2dd95SBruce Richardson 	}
224999a2dd95SBruce Richardson 
225099a2dd95SBruce Richardson 	return 0;
225199a2dd95SBruce Richardson }
225299a2dd95SBruce Richardson 
2253bc0df25cSNaga Harish K S V static int
2254bc0df25cSNaga Harish K S V rxa_create(uint8_t id, uint8_t dev_id,
2255bc0df25cSNaga Harish K S V 	   struct rte_event_eth_rx_adapter_params *rxa_params,
225699a2dd95SBruce Richardson 	   rte_event_eth_rx_adapter_conf_cb conf_cb,
225799a2dd95SBruce Richardson 	   void *conf_arg)
225899a2dd95SBruce Richardson {
2259a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
2260a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer *buf;
2261bc0df25cSNaga Harish K S V 	struct rte_event *events;
226299a2dd95SBruce Richardson 	int ret;
226399a2dd95SBruce Richardson 	int socket_id;
226499a2dd95SBruce Richardson 	uint16_t i;
226599a2dd95SBruce Richardson 	char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
226699a2dd95SBruce Richardson 	const uint8_t default_rss_key[] = {
226799a2dd95SBruce Richardson 		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
226899a2dd95SBruce Richardson 		0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
226999a2dd95SBruce Richardson 		0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
227099a2dd95SBruce Richardson 		0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
227199a2dd95SBruce Richardson 		0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
227299a2dd95SBruce Richardson 	};
227399a2dd95SBruce Richardson 
227499a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
227599a2dd95SBruce Richardson 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2276bc0df25cSNaga Harish K S V 
227799a2dd95SBruce Richardson 	if (conf_cb == NULL)
227899a2dd95SBruce Richardson 		return -EINVAL;
227999a2dd95SBruce Richardson 
228099a2dd95SBruce Richardson 	if (event_eth_rx_adapter == NULL) {
228199a2dd95SBruce Richardson 		ret = rte_event_eth_rx_adapter_init();
228299a2dd95SBruce Richardson 		if (ret)
228399a2dd95SBruce Richardson 			return ret;
228499a2dd95SBruce Richardson 	}
228599a2dd95SBruce Richardson 
228699a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
228799a2dd95SBruce Richardson 	if (rx_adapter != NULL) {
228899a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
228999a2dd95SBruce Richardson 		return -EEXIST;
229099a2dd95SBruce Richardson 	}
229199a2dd95SBruce Richardson 
229299a2dd95SBruce Richardson 	socket_id = rte_event_dev_socket_id(dev_id);
229399a2dd95SBruce Richardson 	snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,
229499a2dd95SBruce Richardson 		"rte_event_eth_rx_adapter_%d",
229599a2dd95SBruce Richardson 		id);
229699a2dd95SBruce Richardson 
229799a2dd95SBruce Richardson 	rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),
229899a2dd95SBruce Richardson 			RTE_CACHE_LINE_SIZE, socket_id);
229999a2dd95SBruce Richardson 	if (rx_adapter == NULL) {
230099a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to get mem for rx adapter");
230199a2dd95SBruce Richardson 		return -ENOMEM;
230299a2dd95SBruce Richardson 	}
230399a2dd95SBruce Richardson 
230499a2dd95SBruce Richardson 	rx_adapter->eventdev_id = dev_id;
230599a2dd95SBruce Richardson 	rx_adapter->socket_id = socket_id;
230699a2dd95SBruce Richardson 	rx_adapter->conf_cb = conf_cb;
230799a2dd95SBruce Richardson 	rx_adapter->conf_arg = conf_arg;
230899a2dd95SBruce Richardson 	rx_adapter->id = id;
230999a2dd95SBruce Richardson 	TAILQ_INIT(&rx_adapter->vector_list);
231099a2dd95SBruce Richardson 	strcpy(rx_adapter->mem_name, mem_name);
231199a2dd95SBruce Richardson 	rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
231299a2dd95SBruce Richardson 					RTE_MAX_ETHPORTS *
231399a2dd95SBruce Richardson 					sizeof(struct eth_device_info), 0,
231499a2dd95SBruce Richardson 					socket_id);
231599a2dd95SBruce Richardson 	rte_convert_rss_key((const uint32_t *)default_rss_key,
231699a2dd95SBruce Richardson 			(uint32_t *)rx_adapter->rss_key_be,
231799a2dd95SBruce Richardson 			    RTE_DIM(default_rss_key));
231899a2dd95SBruce Richardson 
231999a2dd95SBruce Richardson 	if (rx_adapter->eth_devices == NULL) {
232099a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n");
232199a2dd95SBruce Richardson 		rte_free(rx_adapter);
232299a2dd95SBruce Richardson 		return -ENOMEM;
232399a2dd95SBruce Richardson 	}
2324bc0df25cSNaga Harish K S V 
232599a2dd95SBruce Richardson 	rte_spinlock_init(&rx_adapter->rx_lock);
2326bc0df25cSNaga Harish K S V 
232799a2dd95SBruce Richardson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
232899a2dd95SBruce Richardson 		rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
232999a2dd95SBruce Richardson 
2330bc0df25cSNaga Harish K S V 	/* Rx adapter event buffer allocation */
2331b06bca69SNaga Harish K S V 	rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf;
2332b06bca69SNaga Harish K S V 
2333b06bca69SNaga Harish K S V 	if (!rx_adapter->use_queue_event_buf) {
2334bc0df25cSNaga Harish K S V 		buf = &rx_adapter->event_enqueue_buffer;
2335bc0df25cSNaga Harish K S V 		buf->events_size = rxa_params->event_buf_size;
2336bc0df25cSNaga Harish K S V 
2337bc0df25cSNaga Harish K S V 		events = rte_zmalloc_socket(rx_adapter->mem_name,
2338bc0df25cSNaga Harish K S V 					    buf->events_size * sizeof(*events),
2339bc0df25cSNaga Harish K S V 					    0, socket_id);
2340bc0df25cSNaga Harish K S V 		if (events == NULL) {
2341b06bca69SNaga Harish K S V 			RTE_EDEV_LOG_ERR("Failed to allocate memory "
2342b06bca69SNaga Harish K S V 					 "for adapter event buffer");
2343bc0df25cSNaga Harish K S V 			rte_free(rx_adapter->eth_devices);
2344bc0df25cSNaga Harish K S V 			rte_free(rx_adapter);
2345bc0df25cSNaga Harish K S V 			return -ENOMEM;
2346bc0df25cSNaga Harish K S V 		}
2347bc0df25cSNaga Harish K S V 
2348bc0df25cSNaga Harish K S V 		rx_adapter->event_enqueue_buffer.events = events;
2349b06bca69SNaga Harish K S V 	}
2350bc0df25cSNaga Harish K S V 
235199a2dd95SBruce Richardson 	event_eth_rx_adapter[id] = rx_adapter;
2352bc0df25cSNaga Harish K S V 
235399a2dd95SBruce Richardson 	if (conf_cb == rxa_default_conf_cb)
235499a2dd95SBruce Richardson 		rx_adapter->default_cb_arg = 1;
235583ab470dSGanapati Kundapura 
235683ab470dSGanapati Kundapura 	if (rte_mbuf_dyn_rx_timestamp_register(
235783ab470dSGanapati Kundapura 			&event_eth_rx_timestamp_dynfield_offset,
235883ab470dSGanapati Kundapura 			&event_eth_rx_timestamp_dynflag) != 0) {
235983ab470dSGanapati Kundapura 		RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n");
236083ab470dSGanapati Kundapura 		return -rte_errno;
236183ab470dSGanapati Kundapura 	}
236283ab470dSGanapati Kundapura 
236399a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb,
236499a2dd95SBruce Richardson 		conf_arg);
236599a2dd95SBruce Richardson 	return 0;
236699a2dd95SBruce Richardson }
236799a2dd95SBruce Richardson 
236899a2dd95SBruce Richardson int
2369bc0df25cSNaga Harish K S V rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
2370bc0df25cSNaga Harish K S V 				rte_event_eth_rx_adapter_conf_cb conf_cb,
2371bc0df25cSNaga Harish K S V 				void *conf_arg)
2372bc0df25cSNaga Harish K S V {
2373bc0df25cSNaga Harish K S V 	struct rte_event_eth_rx_adapter_params rxa_params = {0};
2374bc0df25cSNaga Harish K S V 
2375bc0df25cSNaga Harish K S V 	/* use default values for adapter params */
2376bc0df25cSNaga Harish K S V 	rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE;
2377b06bca69SNaga Harish K S V 	rxa_params.use_queue_event_buf = false;
2378bc0df25cSNaga Harish K S V 
2379bc0df25cSNaga Harish K S V 	return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg);
2380bc0df25cSNaga Harish K S V }
2381bc0df25cSNaga Harish K S V 
2382bc0df25cSNaga Harish K S V int
2383bc0df25cSNaga Harish K S V rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id,
2384bc0df25cSNaga Harish K S V 			struct rte_event_port_conf *port_config,
2385bc0df25cSNaga Harish K S V 			struct rte_event_eth_rx_adapter_params *rxa_params)
2386bc0df25cSNaga Harish K S V {
2387bc0df25cSNaga Harish K S V 	struct rte_event_port_conf *pc;
2388bc0df25cSNaga Harish K S V 	int ret;
2389bc0df25cSNaga Harish K S V 	struct rte_event_eth_rx_adapter_params temp_params = {0};
2390bc0df25cSNaga Harish K S V 
2391bc0df25cSNaga Harish K S V 	if (port_config == NULL)
2392bc0df25cSNaga Harish K S V 		return -EINVAL;
2393bc0df25cSNaga Harish K S V 
2394bc0df25cSNaga Harish K S V 	if (rxa_params == NULL) {
2395b06bca69SNaga Harish K S V 		/* use default values if rxa_params is NULL */
2396bc0df25cSNaga Harish K S V 		rxa_params = &temp_params;
2397bc0df25cSNaga Harish K S V 		rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE;
2398b06bca69SNaga Harish K S V 		rxa_params->use_queue_event_buf = false;
2399b06bca69SNaga Harish K S V 	} else if ((!rxa_params->use_queue_event_buf &&
2400b06bca69SNaga Harish K S V 		    rxa_params->event_buf_size == 0) ||
2401b06bca69SNaga Harish K S V 		   (rxa_params->use_queue_event_buf &&
2402b06bca69SNaga Harish K S V 		    rxa_params->event_buf_size != 0)) {
2403b06bca69SNaga Harish K S V 		RTE_EDEV_LOG_ERR("Invalid adapter params\n");
2404bc0df25cSNaga Harish K S V 		return -EINVAL;
2405b06bca69SNaga Harish K S V 	} else if (!rxa_params->use_queue_event_buf) {
2406b06bca69SNaga Harish K S V 		/* adjust event buff size with BATCH_SIZE used for fetching
2407b06bca69SNaga Harish K S V 		 * packets from NIC rx queues to get full buffer utilization
2408b06bca69SNaga Harish K S V 		 * and prevent unnecessary rollovers.
2409b06bca69SNaga Harish K S V 		 */
2410b06bca69SNaga Harish K S V 
2411b06bca69SNaga Harish K S V 		rxa_params->event_buf_size =
2412b06bca69SNaga Harish K S V 			RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE);
2413b06bca69SNaga Harish K S V 		rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE);
2414b06bca69SNaga Harish K S V 	}
2415bc0df25cSNaga Harish K S V 
2416bc0df25cSNaga Harish K S V 	pc = rte_malloc(NULL, sizeof(*pc), 0);
2417bc0df25cSNaga Harish K S V 	if (pc == NULL)
2418bc0df25cSNaga Harish K S V 		return -ENOMEM;
2419bc0df25cSNaga Harish K S V 
2420bc0df25cSNaga Harish K S V 	*pc = *port_config;
2421bc0df25cSNaga Harish K S V 
2422bc0df25cSNaga Harish K S V 	ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc);
2423bc0df25cSNaga Harish K S V 	if (ret)
2424bc0df25cSNaga Harish K S V 		rte_free(pc);
2425bc0df25cSNaga Harish K S V 
2426bc0df25cSNaga Harish K S V 	return ret;
2427bc0df25cSNaga Harish K S V }
2428bc0df25cSNaga Harish K S V 
2429bc0df25cSNaga Harish K S V int
243099a2dd95SBruce Richardson rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
243199a2dd95SBruce Richardson 		struct rte_event_port_conf *port_config)
243299a2dd95SBruce Richardson {
243399a2dd95SBruce Richardson 	struct rte_event_port_conf *pc;
243499a2dd95SBruce Richardson 	int ret;
243599a2dd95SBruce Richardson 
243699a2dd95SBruce Richardson 	if (port_config == NULL)
243799a2dd95SBruce Richardson 		return -EINVAL;
2438bc0df25cSNaga Harish K S V 
243999a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
244099a2dd95SBruce Richardson 
244199a2dd95SBruce Richardson 	pc = rte_malloc(NULL, sizeof(*pc), 0);
244299a2dd95SBruce Richardson 	if (pc == NULL)
244399a2dd95SBruce Richardson 		return -ENOMEM;
244499a2dd95SBruce Richardson 	*pc = *port_config;
2445bc0df25cSNaga Harish K S V 
244699a2dd95SBruce Richardson 	ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
244799a2dd95SBruce Richardson 					rxa_default_conf_cb,
244899a2dd95SBruce Richardson 					pc);
244999a2dd95SBruce Richardson 	if (ret)
245099a2dd95SBruce Richardson 		rte_free(pc);
245199a2dd95SBruce Richardson 	return ret;
245299a2dd95SBruce Richardson }
245399a2dd95SBruce Richardson 
245499a2dd95SBruce Richardson int
245599a2dd95SBruce Richardson rte_event_eth_rx_adapter_free(uint8_t id)
245699a2dd95SBruce Richardson {
2457a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
245899a2dd95SBruce Richardson 
245999a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
246099a2dd95SBruce Richardson 
246199a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
246299a2dd95SBruce Richardson 	if (rx_adapter == NULL)
246399a2dd95SBruce Richardson 		return -EINVAL;
246499a2dd95SBruce Richardson 
246599a2dd95SBruce Richardson 	if (rx_adapter->nb_queues) {
246699a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("%" PRIu16 " Rx queues not deleted",
246799a2dd95SBruce Richardson 				rx_adapter->nb_queues);
246899a2dd95SBruce Richardson 		return -EBUSY;
246999a2dd95SBruce Richardson 	}
247099a2dd95SBruce Richardson 
247199a2dd95SBruce Richardson 	if (rx_adapter->default_cb_arg)
247299a2dd95SBruce Richardson 		rte_free(rx_adapter->conf_arg);
247399a2dd95SBruce Richardson 	rte_free(rx_adapter->eth_devices);
2474b06bca69SNaga Harish K S V 	if (!rx_adapter->use_queue_event_buf)
2475bc0df25cSNaga Harish K S V 		rte_free(rx_adapter->event_enqueue_buffer.events);
247699a2dd95SBruce Richardson 	rte_free(rx_adapter);
247799a2dd95SBruce Richardson 	event_eth_rx_adapter[id] = NULL;
247899a2dd95SBruce Richardson 
247999a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_free(id);
248099a2dd95SBruce Richardson 	return 0;
248199a2dd95SBruce Richardson }
248299a2dd95SBruce Richardson 
248399a2dd95SBruce Richardson int
248499a2dd95SBruce Richardson rte_event_eth_rx_adapter_queue_add(uint8_t id,
248599a2dd95SBruce Richardson 		uint16_t eth_dev_id,
248699a2dd95SBruce Richardson 		int32_t rx_queue_id,
248799a2dd95SBruce Richardson 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
248899a2dd95SBruce Richardson {
248999a2dd95SBruce Richardson 	int ret;
249099a2dd95SBruce Richardson 	uint32_t cap;
2491a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
249299a2dd95SBruce Richardson 	struct rte_eventdev *dev;
249399a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
2494929ebdd5SPavan Nikhilesh 	struct rte_event_eth_rx_adapter_vector_limits limits;
249599a2dd95SBruce Richardson 
249699a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
249799a2dd95SBruce Richardson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
249899a2dd95SBruce Richardson 
249999a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
250099a2dd95SBruce Richardson 	if ((rx_adapter == NULL) || (queue_conf == NULL))
250199a2dd95SBruce Richardson 		return -EINVAL;
250299a2dd95SBruce Richardson 
250399a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
250499a2dd95SBruce Richardson 	ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
250599a2dd95SBruce Richardson 						eth_dev_id,
250699a2dd95SBruce Richardson 						&cap);
250799a2dd95SBruce Richardson 	if (ret) {
250899a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
250999a2dd95SBruce Richardson 			"eth port %" PRIu16, id, eth_dev_id);
251099a2dd95SBruce Richardson 		return ret;
251199a2dd95SBruce Richardson 	}
251299a2dd95SBruce Richardson 
251399a2dd95SBruce Richardson 	if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0
251499a2dd95SBruce Richardson 		&& (queue_conf->rx_queue_flags &
251599a2dd95SBruce Richardson 			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
251699a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
251799a2dd95SBruce Richardson 				" eth port: %" PRIu16 " adapter id: %" PRIu8,
251899a2dd95SBruce Richardson 				eth_dev_id, id);
251999a2dd95SBruce Richardson 		return -EINVAL;
252099a2dd95SBruce Richardson 	}
252199a2dd95SBruce Richardson 
2522929ebdd5SPavan Nikhilesh 	if (queue_conf->rx_queue_flags &
2523929ebdd5SPavan Nikhilesh 	    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
2524929ebdd5SPavan Nikhilesh 
2525929ebdd5SPavan Nikhilesh 		if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
252699a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
2527929ebdd5SPavan Nikhilesh 					 " eth port: %" PRIu16
2528929ebdd5SPavan Nikhilesh 					 " adapter id: %" PRIu8,
252999a2dd95SBruce Richardson 					 eth_dev_id, id);
253099a2dd95SBruce Richardson 			return -EINVAL;
253199a2dd95SBruce Richardson 		}
253299a2dd95SBruce Richardson 
2533929ebdd5SPavan Nikhilesh 		ret = rte_event_eth_rx_adapter_vector_limits_get(
2534929ebdd5SPavan Nikhilesh 			rx_adapter->eventdev_id, eth_dev_id, &limits);
2535929ebdd5SPavan Nikhilesh 		if (ret < 0) {
2536929ebdd5SPavan Nikhilesh 			RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
2537929ebdd5SPavan Nikhilesh 					 " eth port: %" PRIu16
2538929ebdd5SPavan Nikhilesh 					 " adapter id: %" PRIu8,
2539929ebdd5SPavan Nikhilesh 					 eth_dev_id, id);
2540929ebdd5SPavan Nikhilesh 			return -EINVAL;
2541929ebdd5SPavan Nikhilesh 		}
2542929ebdd5SPavan Nikhilesh 		if (queue_conf->vector_sz < limits.min_sz ||
2543929ebdd5SPavan Nikhilesh 		    queue_conf->vector_sz > limits.max_sz ||
2544929ebdd5SPavan Nikhilesh 		    queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
2545929ebdd5SPavan Nikhilesh 		    queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
2546929ebdd5SPavan Nikhilesh 		    queue_conf->vector_mp == NULL) {
2547929ebdd5SPavan Nikhilesh 			RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2548929ebdd5SPavan Nikhilesh 					 " eth port: %" PRIu16
2549929ebdd5SPavan Nikhilesh 					 " adapter id: %" PRIu8,
2550929ebdd5SPavan Nikhilesh 					 eth_dev_id, id);
2551929ebdd5SPavan Nikhilesh 			return -EINVAL;
2552929ebdd5SPavan Nikhilesh 		}
2553929ebdd5SPavan Nikhilesh 		if (queue_conf->vector_mp->elt_size <
2554929ebdd5SPavan Nikhilesh 		    (sizeof(struct rte_event_vector) +
2555929ebdd5SPavan Nikhilesh 		     (sizeof(uintptr_t) * queue_conf->vector_sz))) {
2556929ebdd5SPavan Nikhilesh 			RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2557929ebdd5SPavan Nikhilesh 					 " eth port: %" PRIu16
2558929ebdd5SPavan Nikhilesh 					 " adapter id: %" PRIu8,
2559929ebdd5SPavan Nikhilesh 					 eth_dev_id, id);
2560929ebdd5SPavan Nikhilesh 			return -EINVAL;
2561929ebdd5SPavan Nikhilesh 		}
2562929ebdd5SPavan Nikhilesh 	}
2563929ebdd5SPavan Nikhilesh 
256499a2dd95SBruce Richardson 	if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
256599a2dd95SBruce Richardson 		(rx_queue_id != -1)) {
256699a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
256799a2dd95SBruce Richardson 			"event queue, eth port: %" PRIu16 " adapter id: %"
256899a2dd95SBruce Richardson 			PRIu8, eth_dev_id, id);
256999a2dd95SBruce Richardson 		return -EINVAL;
257099a2dd95SBruce Richardson 	}
257199a2dd95SBruce Richardson 
257299a2dd95SBruce Richardson 	if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
257399a2dd95SBruce Richardson 			rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
257499a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
257599a2dd95SBruce Richardson 			 (uint16_t)rx_queue_id);
257699a2dd95SBruce Richardson 		return -EINVAL;
257799a2dd95SBruce Richardson 	}
257899a2dd95SBruce Richardson 
2579b06bca69SNaga Harish K S V 	if ((rx_adapter->use_queue_event_buf &&
2580b06bca69SNaga Harish K S V 	     queue_conf->event_buf_size == 0) ||
2581b06bca69SNaga Harish K S V 	    (!rx_adapter->use_queue_event_buf &&
2582b06bca69SNaga Harish K S V 	     queue_conf->event_buf_size != 0)) {
2583b06bca69SNaga Harish K S V 		RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue");
2584b06bca69SNaga Harish K S V 		return -EINVAL;
2585b06bca69SNaga Harish K S V 	}
2586b06bca69SNaga Harish K S V 
258799a2dd95SBruce Richardson 	dev_info = &rx_adapter->eth_devices[eth_dev_id];
258899a2dd95SBruce Richardson 
258999a2dd95SBruce Richardson 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
259099a2dd95SBruce Richardson 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,
259199a2dd95SBruce Richardson 					-ENOTSUP);
259299a2dd95SBruce Richardson 		if (dev_info->rx_queue == NULL) {
259399a2dd95SBruce Richardson 			dev_info->rx_queue =
259499a2dd95SBruce Richardson 			    rte_zmalloc_socket(rx_adapter->mem_name,
259599a2dd95SBruce Richardson 					dev_info->dev->data->nb_rx_queues *
259699a2dd95SBruce Richardson 					sizeof(struct eth_rx_queue_info), 0,
259799a2dd95SBruce Richardson 					rx_adapter->socket_id);
259899a2dd95SBruce Richardson 			if (dev_info->rx_queue == NULL)
259999a2dd95SBruce Richardson 				return -ENOMEM;
260099a2dd95SBruce Richardson 		}
260199a2dd95SBruce Richardson 
260299a2dd95SBruce Richardson 		ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev,
260399a2dd95SBruce Richardson 				&rte_eth_devices[eth_dev_id],
260499a2dd95SBruce Richardson 				rx_queue_id, queue_conf);
260599a2dd95SBruce Richardson 		if (ret == 0) {
260699a2dd95SBruce Richardson 			dev_info->internal_event_port = 1;
260799a2dd95SBruce Richardson 			rxa_update_queue(rx_adapter,
260899a2dd95SBruce Richardson 					&rx_adapter->eth_devices[eth_dev_id],
260999a2dd95SBruce Richardson 					rx_queue_id,
261099a2dd95SBruce Richardson 					1);
261199a2dd95SBruce Richardson 		}
261299a2dd95SBruce Richardson 	} else {
261399a2dd95SBruce Richardson 		rte_spinlock_lock(&rx_adapter->rx_lock);
261499a2dd95SBruce Richardson 		dev_info->internal_event_port = 0;
261599a2dd95SBruce Richardson 		ret = rxa_init_service(rx_adapter, id);
261699a2dd95SBruce Richardson 		if (ret == 0) {
261799a2dd95SBruce Richardson 			uint32_t service_id = rx_adapter->service_id;
261899a2dd95SBruce Richardson 			ret = rxa_sw_add(rx_adapter, eth_dev_id, rx_queue_id,
261999a2dd95SBruce Richardson 					queue_conf);
262099a2dd95SBruce Richardson 			rte_service_component_runstate_set(service_id,
262199a2dd95SBruce Richardson 				rxa_sw_adapter_queue_count(rx_adapter));
262299a2dd95SBruce Richardson 		}
262399a2dd95SBruce Richardson 		rte_spinlock_unlock(&rx_adapter->rx_lock);
262499a2dd95SBruce Richardson 	}
262599a2dd95SBruce Richardson 
262699a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_queue_add(id, eth_dev_id,
262799a2dd95SBruce Richardson 		rx_queue_id, queue_conf, ret);
262899a2dd95SBruce Richardson 	if (ret)
262999a2dd95SBruce Richardson 		return ret;
263099a2dd95SBruce Richardson 
263199a2dd95SBruce Richardson 	return 0;
263299a2dd95SBruce Richardson }
263399a2dd95SBruce Richardson 
263499a2dd95SBruce Richardson static int
263599a2dd95SBruce Richardson rxa_sw_vector_limits(struct rte_event_eth_rx_adapter_vector_limits *limits)
263699a2dd95SBruce Richardson {
263799a2dd95SBruce Richardson 	limits->max_sz = MAX_VECTOR_SIZE;
263899a2dd95SBruce Richardson 	limits->min_sz = MIN_VECTOR_SIZE;
263999a2dd95SBruce Richardson 	limits->max_timeout_ns = MAX_VECTOR_NS;
264099a2dd95SBruce Richardson 	limits->min_timeout_ns = MIN_VECTOR_NS;
264199a2dd95SBruce Richardson 
264299a2dd95SBruce Richardson 	return 0;
264399a2dd95SBruce Richardson }
264499a2dd95SBruce Richardson 
264599a2dd95SBruce Richardson int
264699a2dd95SBruce Richardson rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
264799a2dd95SBruce Richardson 				int32_t rx_queue_id)
264899a2dd95SBruce Richardson {
264999a2dd95SBruce Richardson 	int ret = 0;
265099a2dd95SBruce Richardson 	struct rte_eventdev *dev;
2651a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
265299a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
265399a2dd95SBruce Richardson 	uint32_t cap;
265499a2dd95SBruce Richardson 	uint32_t nb_rx_poll = 0;
265599a2dd95SBruce Richardson 	uint32_t nb_wrr = 0;
265699a2dd95SBruce Richardson 	uint32_t nb_rx_intr;
265799a2dd95SBruce Richardson 	struct eth_rx_poll_entry *rx_poll = NULL;
265899a2dd95SBruce Richardson 	uint32_t *rx_wrr = NULL;
265999a2dd95SBruce Richardson 	int num_intr_vec;
266099a2dd95SBruce Richardson 
266199a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
266299a2dd95SBruce Richardson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
266399a2dd95SBruce Richardson 
266499a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
266599a2dd95SBruce Richardson 	if (rx_adapter == NULL)
266699a2dd95SBruce Richardson 		return -EINVAL;
266799a2dd95SBruce Richardson 
266899a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
266999a2dd95SBruce Richardson 	ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
267099a2dd95SBruce Richardson 						eth_dev_id,
267199a2dd95SBruce Richardson 						&cap);
267299a2dd95SBruce Richardson 	if (ret)
267399a2dd95SBruce Richardson 		return ret;
267499a2dd95SBruce Richardson 
267599a2dd95SBruce Richardson 	if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
267699a2dd95SBruce Richardson 		rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
267799a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
267899a2dd95SBruce Richardson 			 (uint16_t)rx_queue_id);
267999a2dd95SBruce Richardson 		return -EINVAL;
268099a2dd95SBruce Richardson 	}
268199a2dd95SBruce Richardson 
268299a2dd95SBruce Richardson 	dev_info = &rx_adapter->eth_devices[eth_dev_id];
268399a2dd95SBruce Richardson 
268499a2dd95SBruce Richardson 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
268599a2dd95SBruce Richardson 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,
268699a2dd95SBruce Richardson 				 -ENOTSUP);
268799a2dd95SBruce Richardson 		ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,
268899a2dd95SBruce Richardson 						&rte_eth_devices[eth_dev_id],
268999a2dd95SBruce Richardson 						rx_queue_id);
269099a2dd95SBruce Richardson 		if (ret == 0) {
269199a2dd95SBruce Richardson 			rxa_update_queue(rx_adapter,
269299a2dd95SBruce Richardson 					&rx_adapter->eth_devices[eth_dev_id],
269399a2dd95SBruce Richardson 					rx_queue_id,
269499a2dd95SBruce Richardson 					0);
269599a2dd95SBruce Richardson 			if (dev_info->nb_dev_queues == 0) {
269699a2dd95SBruce Richardson 				rte_free(dev_info->rx_queue);
269799a2dd95SBruce Richardson 				dev_info->rx_queue = NULL;
269899a2dd95SBruce Richardson 			}
269999a2dd95SBruce Richardson 		}
270099a2dd95SBruce Richardson 	} else {
270199a2dd95SBruce Richardson 		rxa_calc_nb_post_del(rx_adapter, dev_info, rx_queue_id,
270299a2dd95SBruce Richardson 			&nb_rx_poll, &nb_rx_intr, &nb_wrr);
270399a2dd95SBruce Richardson 
270499a2dd95SBruce Richardson 		ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
270599a2dd95SBruce Richardson 			&rx_poll, &rx_wrr);
270699a2dd95SBruce Richardson 		if (ret)
270799a2dd95SBruce Richardson 			return ret;
270899a2dd95SBruce Richardson 
270999a2dd95SBruce Richardson 		rte_spinlock_lock(&rx_adapter->rx_lock);
271099a2dd95SBruce Richardson 
271199a2dd95SBruce Richardson 		num_intr_vec = 0;
271299a2dd95SBruce Richardson 		if (rx_adapter->num_rx_intr > nb_rx_intr) {
271399a2dd95SBruce Richardson 
271499a2dd95SBruce Richardson 			num_intr_vec = rxa_nb_intr_vect(dev_info,
271599a2dd95SBruce Richardson 						rx_queue_id, 0);
271699a2dd95SBruce Richardson 			ret = rxa_del_intr_queue(rx_adapter, dev_info,
271799a2dd95SBruce Richardson 					rx_queue_id);
271899a2dd95SBruce Richardson 			if (ret)
271999a2dd95SBruce Richardson 				goto unlock_ret;
272099a2dd95SBruce Richardson 		}
272199a2dd95SBruce Richardson 
272299a2dd95SBruce Richardson 		if (nb_rx_intr == 0) {
272399a2dd95SBruce Richardson 			ret = rxa_free_intr_resources(rx_adapter);
272499a2dd95SBruce Richardson 			if (ret)
272599a2dd95SBruce Richardson 				goto unlock_ret;
272699a2dd95SBruce Richardson 		}
272799a2dd95SBruce Richardson 
272899a2dd95SBruce Richardson 		rxa_sw_del(rx_adapter, dev_info, rx_queue_id);
272999a2dd95SBruce Richardson 		rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
273099a2dd95SBruce Richardson 
273199a2dd95SBruce Richardson 		rte_free(rx_adapter->eth_rx_poll);
273299a2dd95SBruce Richardson 		rte_free(rx_adapter->wrr_sched);
273399a2dd95SBruce Richardson 
273499a2dd95SBruce Richardson 		if (nb_rx_intr == 0) {
273599a2dd95SBruce Richardson 			rte_free(dev_info->intr_queue);
273699a2dd95SBruce Richardson 			dev_info->intr_queue = NULL;
273799a2dd95SBruce Richardson 		}
273899a2dd95SBruce Richardson 
273999a2dd95SBruce Richardson 		rx_adapter->eth_rx_poll = rx_poll;
274099a2dd95SBruce Richardson 		rx_adapter->wrr_sched = rx_wrr;
274199a2dd95SBruce Richardson 		rx_adapter->wrr_len = nb_wrr;
274281da8a5fSNaga Harish K S V 		/*
274381da8a5fSNaga Harish K S V 		 * reset next poll start position (wrr_pos) to avoid buffer
274481da8a5fSNaga Harish K S V 		 * overrun when wrr_len is reduced in case of queue delete
274581da8a5fSNaga Harish K S V 		 */
274681da8a5fSNaga Harish K S V 		rx_adapter->wrr_pos = 0;
274799a2dd95SBruce Richardson 		rx_adapter->num_intr_vec += num_intr_vec;
274899a2dd95SBruce Richardson 
274999a2dd95SBruce Richardson 		if (dev_info->nb_dev_queues == 0) {
275099a2dd95SBruce Richardson 			rte_free(dev_info->rx_queue);
275199a2dd95SBruce Richardson 			dev_info->rx_queue = NULL;
275299a2dd95SBruce Richardson 		}
275399a2dd95SBruce Richardson unlock_ret:
275499a2dd95SBruce Richardson 		rte_spinlock_unlock(&rx_adapter->rx_lock);
275599a2dd95SBruce Richardson 		if (ret) {
275699a2dd95SBruce Richardson 			rte_free(rx_poll);
275799a2dd95SBruce Richardson 			rte_free(rx_wrr);
275899a2dd95SBruce Richardson 			return ret;
275999a2dd95SBruce Richardson 		}
276099a2dd95SBruce Richardson 
276199a2dd95SBruce Richardson 		rte_service_component_runstate_set(rx_adapter->service_id,
276299a2dd95SBruce Richardson 				rxa_sw_adapter_queue_count(rx_adapter));
276399a2dd95SBruce Richardson 	}
276499a2dd95SBruce Richardson 
276599a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_queue_del(id, eth_dev_id,
276699a2dd95SBruce Richardson 		rx_queue_id, ret);
276799a2dd95SBruce Richardson 	return ret;
276899a2dd95SBruce Richardson }
276999a2dd95SBruce Richardson 
277099a2dd95SBruce Richardson int
277199a2dd95SBruce Richardson rte_event_eth_rx_adapter_vector_limits_get(
277299a2dd95SBruce Richardson 	uint8_t dev_id, uint16_t eth_port_id,
277399a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_vector_limits *limits)
277499a2dd95SBruce Richardson {
277599a2dd95SBruce Richardson 	struct rte_eventdev *dev;
277699a2dd95SBruce Richardson 	uint32_t cap;
277799a2dd95SBruce Richardson 	int ret;
277899a2dd95SBruce Richardson 
277999a2dd95SBruce Richardson 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
278099a2dd95SBruce Richardson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
278199a2dd95SBruce Richardson 
278299a2dd95SBruce Richardson 	if (limits == NULL)
278399a2dd95SBruce Richardson 		return -EINVAL;
278499a2dd95SBruce Richardson 
278599a2dd95SBruce Richardson 	dev = &rte_eventdevs[dev_id];
278699a2dd95SBruce Richardson 
278799a2dd95SBruce Richardson 	ret = rte_event_eth_rx_adapter_caps_get(dev_id, eth_port_id, &cap);
278899a2dd95SBruce Richardson 	if (ret) {
278999a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
279099a2dd95SBruce Richardson 				 "eth port %" PRIu16,
279199a2dd95SBruce Richardson 				 dev_id, eth_port_id);
279299a2dd95SBruce Richardson 		return ret;
279399a2dd95SBruce Richardson 	}
279499a2dd95SBruce Richardson 
279599a2dd95SBruce Richardson 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
279699a2dd95SBruce Richardson 		RTE_FUNC_PTR_OR_ERR_RET(
279799a2dd95SBruce Richardson 			*dev->dev_ops->eth_rx_adapter_vector_limits_get,
279899a2dd95SBruce Richardson 			-ENOTSUP);
279999a2dd95SBruce Richardson 		ret = dev->dev_ops->eth_rx_adapter_vector_limits_get(
280099a2dd95SBruce Richardson 			dev, &rte_eth_devices[eth_port_id], limits);
280199a2dd95SBruce Richardson 	} else {
280299a2dd95SBruce Richardson 		ret = rxa_sw_vector_limits(limits);
280399a2dd95SBruce Richardson 	}
280499a2dd95SBruce Richardson 
280599a2dd95SBruce Richardson 	return ret;
280699a2dd95SBruce Richardson }
280799a2dd95SBruce Richardson 
280899a2dd95SBruce Richardson int
280999a2dd95SBruce Richardson rte_event_eth_rx_adapter_start(uint8_t id)
281099a2dd95SBruce Richardson {
281199a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_start(id);
281299a2dd95SBruce Richardson 	return rxa_ctrl(id, 1);
281399a2dd95SBruce Richardson }
281499a2dd95SBruce Richardson 
281599a2dd95SBruce Richardson int
281699a2dd95SBruce Richardson rte_event_eth_rx_adapter_stop(uint8_t id)
281799a2dd95SBruce Richardson {
281899a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_stop(id);
281999a2dd95SBruce Richardson 	return rxa_ctrl(id, 0);
282099a2dd95SBruce Richardson }
282199a2dd95SBruce Richardson 
282299a2dd95SBruce Richardson int
282399a2dd95SBruce Richardson rte_event_eth_rx_adapter_stats_get(uint8_t id,
282499a2dd95SBruce Richardson 			       struct rte_event_eth_rx_adapter_stats *stats)
282599a2dd95SBruce Richardson {
2826a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
2827a256a743SPavan Nikhilesh 	struct eth_event_enqueue_buffer *buf;
282899a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
282999a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_stats dev_stats;
283099a2dd95SBruce Richardson 	struct rte_eventdev *dev;
283199a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
283299a2dd95SBruce Richardson 	uint32_t i;
283399a2dd95SBruce Richardson 	int ret;
283499a2dd95SBruce Richardson 
2835da781e64SGanapati Kundapura 	if (rxa_memzone_lookup())
2836da781e64SGanapati Kundapura 		return -ENOMEM;
2837da781e64SGanapati Kundapura 
283899a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
283999a2dd95SBruce Richardson 
284099a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
284199a2dd95SBruce Richardson 	if (rx_adapter  == NULL || stats == NULL)
284299a2dd95SBruce Richardson 		return -EINVAL;
284399a2dd95SBruce Richardson 
284499a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
284599a2dd95SBruce Richardson 	memset(stats, 0, sizeof(*stats));
284699a2dd95SBruce Richardson 	RTE_ETH_FOREACH_DEV(i) {
284799a2dd95SBruce Richardson 		dev_info = &rx_adapter->eth_devices[i];
284899a2dd95SBruce Richardson 		if (dev_info->internal_event_port == 0 ||
284999a2dd95SBruce Richardson 			dev->dev_ops->eth_rx_adapter_stats_get == NULL)
285099a2dd95SBruce Richardson 			continue;
285199a2dd95SBruce Richardson 		ret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev,
285299a2dd95SBruce Richardson 						&rte_eth_devices[i],
285399a2dd95SBruce Richardson 						&dev_stats);
285499a2dd95SBruce Richardson 		if (ret)
285599a2dd95SBruce Richardson 			continue;
285699a2dd95SBruce Richardson 		dev_stats_sum.rx_packets += dev_stats.rx_packets;
285799a2dd95SBruce Richardson 		dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
285899a2dd95SBruce Richardson 	}
285999a2dd95SBruce Richardson 
286099a2dd95SBruce Richardson 	if (rx_adapter->service_inited)
286199a2dd95SBruce Richardson 		*stats = rx_adapter->stats;
286299a2dd95SBruce Richardson 
286399a2dd95SBruce Richardson 	stats->rx_packets += dev_stats_sum.rx_packets;
286499a2dd95SBruce Richardson 	stats->rx_enq_count += dev_stats_sum.rx_enq_count;
2865bc0df25cSNaga Harish K S V 
2866814d0170SGanapati Kundapura 	if (!rx_adapter->use_queue_event_buf) {
2867814d0170SGanapati Kundapura 		buf = &rx_adapter->event_enqueue_buffer;
2868814d0170SGanapati Kundapura 		stats->rx_event_buf_count = buf->count;
2869814d0170SGanapati Kundapura 		stats->rx_event_buf_size = buf->events_size;
2870814d0170SGanapati Kundapura 	} else {
2871814d0170SGanapati Kundapura 		stats->rx_event_buf_count = 0;
2872814d0170SGanapati Kundapura 		stats->rx_event_buf_size = 0;
2873814d0170SGanapati Kundapura 	}
2874814d0170SGanapati Kundapura 
287599a2dd95SBruce Richardson 	return 0;
287699a2dd95SBruce Richardson }
287799a2dd95SBruce Richardson 
287899a2dd95SBruce Richardson int
287999a2dd95SBruce Richardson rte_event_eth_rx_adapter_stats_reset(uint8_t id)
288099a2dd95SBruce Richardson {
2881a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
288299a2dd95SBruce Richardson 	struct rte_eventdev *dev;
288399a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
288499a2dd95SBruce Richardson 	uint32_t i;
288599a2dd95SBruce Richardson 
2886da781e64SGanapati Kundapura 	if (rxa_memzone_lookup())
2887da781e64SGanapati Kundapura 		return -ENOMEM;
2888da781e64SGanapati Kundapura 
288999a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
289099a2dd95SBruce Richardson 
289199a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
289299a2dd95SBruce Richardson 	if (rx_adapter == NULL)
289399a2dd95SBruce Richardson 		return -EINVAL;
289499a2dd95SBruce Richardson 
289599a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
289699a2dd95SBruce Richardson 	RTE_ETH_FOREACH_DEV(i) {
289799a2dd95SBruce Richardson 		dev_info = &rx_adapter->eth_devices[i];
289899a2dd95SBruce Richardson 		if (dev_info->internal_event_port == 0 ||
289999a2dd95SBruce Richardson 			dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
290099a2dd95SBruce Richardson 			continue;
290199a2dd95SBruce Richardson 		(*dev->dev_ops->eth_rx_adapter_stats_reset)(dev,
290299a2dd95SBruce Richardson 							&rte_eth_devices[i]);
290399a2dd95SBruce Richardson 	}
290499a2dd95SBruce Richardson 
290599a2dd95SBruce Richardson 	memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
290699a2dd95SBruce Richardson 	return 0;
290799a2dd95SBruce Richardson }
290899a2dd95SBruce Richardson 
290999a2dd95SBruce Richardson int
291099a2dd95SBruce Richardson rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
291199a2dd95SBruce Richardson {
2912a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
291399a2dd95SBruce Richardson 
2914da781e64SGanapati Kundapura 	if (rxa_memzone_lookup())
2915da781e64SGanapati Kundapura 		return -ENOMEM;
2916da781e64SGanapati Kundapura 
291799a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
291899a2dd95SBruce Richardson 
291999a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
292099a2dd95SBruce Richardson 	if (rx_adapter == NULL || service_id == NULL)
292199a2dd95SBruce Richardson 		return -EINVAL;
292299a2dd95SBruce Richardson 
292399a2dd95SBruce Richardson 	if (rx_adapter->service_inited)
292499a2dd95SBruce Richardson 		*service_id = rx_adapter->service_id;
292599a2dd95SBruce Richardson 
292699a2dd95SBruce Richardson 	return rx_adapter->service_inited ? 0 : -ESRCH;
292799a2dd95SBruce Richardson }
292899a2dd95SBruce Richardson 
292999a2dd95SBruce Richardson int
293099a2dd95SBruce Richardson rte_event_eth_rx_adapter_cb_register(uint8_t id,
293199a2dd95SBruce Richardson 					uint16_t eth_dev_id,
293299a2dd95SBruce Richardson 					rte_event_eth_rx_adapter_cb_fn cb_fn,
293399a2dd95SBruce Richardson 					void *cb_arg)
293499a2dd95SBruce Richardson {
2935a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
293699a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
293799a2dd95SBruce Richardson 	uint32_t cap;
293899a2dd95SBruce Richardson 	int ret;
293999a2dd95SBruce Richardson 
294099a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
294199a2dd95SBruce Richardson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
294299a2dd95SBruce Richardson 
294399a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
294499a2dd95SBruce Richardson 	if (rx_adapter == NULL)
294599a2dd95SBruce Richardson 		return -EINVAL;
294699a2dd95SBruce Richardson 
294799a2dd95SBruce Richardson 	dev_info = &rx_adapter->eth_devices[eth_dev_id];
294899a2dd95SBruce Richardson 	if (dev_info->rx_queue == NULL)
294999a2dd95SBruce Richardson 		return -EINVAL;
295099a2dd95SBruce Richardson 
295199a2dd95SBruce Richardson 	ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
295299a2dd95SBruce Richardson 						eth_dev_id,
295399a2dd95SBruce Richardson 						&cap);
295499a2dd95SBruce Richardson 	if (ret) {
295599a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
295699a2dd95SBruce Richardson 			"eth port %" PRIu16, id, eth_dev_id);
295799a2dd95SBruce Richardson 		return ret;
295899a2dd95SBruce Richardson 	}
295999a2dd95SBruce Richardson 
296099a2dd95SBruce Richardson 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
296199a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %"
296299a2dd95SBruce Richardson 				PRIu16, eth_dev_id);
296399a2dd95SBruce Richardson 		return -EINVAL;
296499a2dd95SBruce Richardson 	}
296599a2dd95SBruce Richardson 
296699a2dd95SBruce Richardson 	rte_spinlock_lock(&rx_adapter->rx_lock);
296799a2dd95SBruce Richardson 	dev_info->cb_fn = cb_fn;
296899a2dd95SBruce Richardson 	dev_info->cb_arg = cb_arg;
296999a2dd95SBruce Richardson 	rte_spinlock_unlock(&rx_adapter->rx_lock);
297099a2dd95SBruce Richardson 
297199a2dd95SBruce Richardson 	return 0;
297299a2dd95SBruce Richardson }
2973da781e64SGanapati Kundapura 
2974da781e64SGanapati Kundapura int
2975da781e64SGanapati Kundapura rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
2976da781e64SGanapati Kundapura 			uint16_t eth_dev_id,
2977da781e64SGanapati Kundapura 			uint16_t rx_queue_id,
2978da781e64SGanapati Kundapura 			struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2979da781e64SGanapati Kundapura {
2980da781e64SGanapati Kundapura 	struct rte_eventdev *dev;
2981a256a743SPavan Nikhilesh 	struct event_eth_rx_adapter *rx_adapter;
2982da781e64SGanapati Kundapura 	struct eth_device_info *dev_info;
2983da781e64SGanapati Kundapura 	struct eth_rx_queue_info *queue_info;
2984da781e64SGanapati Kundapura 	struct rte_event *qi_ev;
2985da781e64SGanapati Kundapura 	int ret;
2986da781e64SGanapati Kundapura 
2987da781e64SGanapati Kundapura 	if (rxa_memzone_lookup())
2988da781e64SGanapati Kundapura 		return -ENOMEM;
2989da781e64SGanapati Kundapura 
2990da781e64SGanapati Kundapura 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2991da781e64SGanapati Kundapura 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2992da781e64SGanapati Kundapura 
2993da781e64SGanapati Kundapura 	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2994da781e64SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
2995da781e64SGanapati Kundapura 		return -EINVAL;
2996da781e64SGanapati Kundapura 	}
2997da781e64SGanapati Kundapura 
2998da781e64SGanapati Kundapura 	if (queue_conf == NULL) {
2999da781e64SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Rx queue conf struct cannot be NULL");
3000da781e64SGanapati Kundapura 		return -EINVAL;
3001da781e64SGanapati Kundapura 	}
3002da781e64SGanapati Kundapura 
3003da781e64SGanapati Kundapura 	rx_adapter = rxa_id_to_adapter(id);
3004da781e64SGanapati Kundapura 	if (rx_adapter == NULL)
3005da781e64SGanapati Kundapura 		return -EINVAL;
3006da781e64SGanapati Kundapura 
3007da781e64SGanapati Kundapura 	dev_info = &rx_adapter->eth_devices[eth_dev_id];
3008da781e64SGanapati Kundapura 	if (dev_info->rx_queue == NULL ||
3009da781e64SGanapati Kundapura 	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
3010da781e64SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
3011da781e64SGanapati Kundapura 		return -EINVAL;
3012da781e64SGanapati Kundapura 	}
3013da781e64SGanapati Kundapura 
3014da781e64SGanapati Kundapura 	queue_info = &dev_info->rx_queue[rx_queue_id];
3015da781e64SGanapati Kundapura 	qi_ev = (struct rte_event *)&queue_info->event;
3016da781e64SGanapati Kundapura 
3017da781e64SGanapati Kundapura 	memset(queue_conf, 0, sizeof(*queue_conf));
3018da781e64SGanapati Kundapura 	queue_conf->rx_queue_flags = 0;
3019da781e64SGanapati Kundapura 	if (queue_info->flow_id_mask != 0)
3020da781e64SGanapati Kundapura 		queue_conf->rx_queue_flags |=
3021da781e64SGanapati Kundapura 			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
3022da781e64SGanapati Kundapura 	queue_conf->servicing_weight = queue_info->wt;
3023da781e64SGanapati Kundapura 
3024da781e64SGanapati Kundapura 	memcpy(&queue_conf->ev, qi_ev, sizeof(*qi_ev));
3025da781e64SGanapati Kundapura 
3026da781e64SGanapati Kundapura 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
3027da781e64SGanapati Kundapura 	if (dev->dev_ops->eth_rx_adapter_queue_conf_get != NULL) {
3028da781e64SGanapati Kundapura 		ret = (*dev->dev_ops->eth_rx_adapter_queue_conf_get)(dev,
3029da781e64SGanapati Kundapura 						&rte_eth_devices[eth_dev_id],
3030da781e64SGanapati Kundapura 						rx_queue_id,
3031da781e64SGanapati Kundapura 						queue_conf);
3032da781e64SGanapati Kundapura 		return ret;
3033da781e64SGanapati Kundapura 	}
3034da781e64SGanapati Kundapura 
3035da781e64SGanapati Kundapura 	return 0;
3036da781e64SGanapati Kundapura }
3037814d0170SGanapati Kundapura 
3038814d0170SGanapati Kundapura #define RXA_ADD_DICT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s)
3039814d0170SGanapati Kundapura 
3040814d0170SGanapati Kundapura static int
3041814d0170SGanapati Kundapura handle_rxa_stats(const char *cmd __rte_unused,
3042814d0170SGanapati Kundapura 		 const char *params,
3043814d0170SGanapati Kundapura 		 struct rte_tel_data *d)
3044814d0170SGanapati Kundapura {
3045814d0170SGanapati Kundapura 	uint8_t rx_adapter_id;
3046814d0170SGanapati Kundapura 	struct rte_event_eth_rx_adapter_stats rx_adptr_stats;
3047814d0170SGanapati Kundapura 
3048814d0170SGanapati Kundapura 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3049814d0170SGanapati Kundapura 		return -1;
3050814d0170SGanapati Kundapura 
3051814d0170SGanapati Kundapura 	/* Get Rx adapter ID from parameter string */
3052814d0170SGanapati Kundapura 	rx_adapter_id = atoi(params);
3053814d0170SGanapati Kundapura 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3054814d0170SGanapati Kundapura 
3055814d0170SGanapati Kundapura 	/* Get Rx adapter stats */
3056814d0170SGanapati Kundapura 	if (rte_event_eth_rx_adapter_stats_get(rx_adapter_id,
3057814d0170SGanapati Kundapura 					       &rx_adptr_stats)) {
3058814d0170SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats\n");
3059814d0170SGanapati Kundapura 		return -1;
3060814d0170SGanapati Kundapura 	}
3061814d0170SGanapati Kundapura 
3062814d0170SGanapati Kundapura 	rte_tel_data_start_dict(d);
3063814d0170SGanapati Kundapura 	rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3064814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_packets);
3065814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_poll_count);
3066814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_dropped);
3067814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_enq_retry);
3068814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_count);
3069814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_size);
3070814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_enq_count);
3071814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_enq_start_ts);
3072814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_enq_block_cycles);
3073814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_enq_end_ts);
3074814d0170SGanapati Kundapura 	RXA_ADD_DICT(rx_adptr_stats, rx_intr_packets);
3075814d0170SGanapati Kundapura 
3076814d0170SGanapati Kundapura 	return 0;
3077814d0170SGanapati Kundapura }
3078814d0170SGanapati Kundapura 
3079814d0170SGanapati Kundapura static int
3080814d0170SGanapati Kundapura handle_rxa_stats_reset(const char *cmd __rte_unused,
3081814d0170SGanapati Kundapura 		       const char *params,
3082814d0170SGanapati Kundapura 		       struct rte_tel_data *d __rte_unused)
3083814d0170SGanapati Kundapura {
3084814d0170SGanapati Kundapura 	uint8_t rx_adapter_id;
3085814d0170SGanapati Kundapura 
3086814d0170SGanapati Kundapura 	if (params == NULL || strlen(params) == 0 || ~isdigit(*params))
3087814d0170SGanapati Kundapura 		return -1;
3088814d0170SGanapati Kundapura 
3089814d0170SGanapati Kundapura 	/* Get Rx adapter ID from parameter string */
3090814d0170SGanapati Kundapura 	rx_adapter_id = atoi(params);
3091814d0170SGanapati Kundapura 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3092814d0170SGanapati Kundapura 
3093814d0170SGanapati Kundapura 	/* Reset Rx adapter stats */
3094814d0170SGanapati Kundapura 	if (rte_event_eth_rx_adapter_stats_reset(rx_adapter_id)) {
3095814d0170SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats\n");
3096814d0170SGanapati Kundapura 		return -1;
3097814d0170SGanapati Kundapura 	}
3098814d0170SGanapati Kundapura 
3099814d0170SGanapati Kundapura 	return 0;
3100814d0170SGanapati Kundapura }
3101814d0170SGanapati Kundapura 
3102814d0170SGanapati Kundapura static int
3103814d0170SGanapati Kundapura handle_rxa_get_queue_conf(const char *cmd __rte_unused,
3104814d0170SGanapati Kundapura 			  const char *params,
3105814d0170SGanapati Kundapura 			  struct rte_tel_data *d)
3106814d0170SGanapati Kundapura {
3107814d0170SGanapati Kundapura 	uint8_t rx_adapter_id;
3108814d0170SGanapati Kundapura 	uint16_t rx_queue_id;
3109814d0170SGanapati Kundapura 	int eth_dev_id;
3110814d0170SGanapati Kundapura 	char *token, *l_params;
3111814d0170SGanapati Kundapura 	struct rte_event_eth_rx_adapter_queue_conf queue_conf;
3112814d0170SGanapati Kundapura 
3113814d0170SGanapati Kundapura 	if (params == NULL || strlen(params) == 0 || !isdigit(*params))
3114814d0170SGanapati Kundapura 		return -1;
3115814d0170SGanapati Kundapura 
3116814d0170SGanapati Kundapura 	/* Get Rx adapter ID from parameter string */
3117814d0170SGanapati Kundapura 	l_params = strdup(params);
3118814d0170SGanapati Kundapura 	token = strtok(l_params, ",");
3119814d0170SGanapati Kundapura 	rx_adapter_id = strtoul(token, NULL, 10);
3120814d0170SGanapati Kundapura 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL);
3121814d0170SGanapati Kundapura 
3122814d0170SGanapati Kundapura 	token = strtok(NULL, ",");
3123814d0170SGanapati Kundapura 	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3124814d0170SGanapati Kundapura 		return -1;
3125814d0170SGanapati Kundapura 
3126814d0170SGanapati Kundapura 	/* Get device ID from parameter string */
3127814d0170SGanapati Kundapura 	eth_dev_id = strtoul(token, NULL, 10);
3128814d0170SGanapati Kundapura 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(eth_dev_id, -EINVAL);
3129814d0170SGanapati Kundapura 
3130814d0170SGanapati Kundapura 	token = strtok(NULL, ",");
3131814d0170SGanapati Kundapura 	if (token == NULL || strlen(token) == 0 || !isdigit(*token))
3132814d0170SGanapati Kundapura 		return -1;
3133814d0170SGanapati Kundapura 
3134814d0170SGanapati Kundapura 	/* Get Rx queue ID from parameter string */
3135814d0170SGanapati Kundapura 	rx_queue_id = strtoul(token, NULL, 10);
3136814d0170SGanapati Kundapura 	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
3137814d0170SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
3138814d0170SGanapati Kundapura 		return -EINVAL;
3139814d0170SGanapati Kundapura 	}
3140814d0170SGanapati Kundapura 
3141814d0170SGanapati Kundapura 	token = strtok(NULL, "\0");
3142814d0170SGanapati Kundapura 	if (token != NULL)
3143814d0170SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
3144814d0170SGanapati Kundapura 				 " telemetry command, igrnoring");
3145814d0170SGanapati Kundapura 
3146814d0170SGanapati Kundapura 	if (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id,
3147814d0170SGanapati Kundapura 						    rx_queue_id, &queue_conf)) {
3148814d0170SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue config");
3149814d0170SGanapati Kundapura 		return -1;
3150814d0170SGanapati Kundapura 	}
3151814d0170SGanapati Kundapura 
3152814d0170SGanapati Kundapura 	rte_tel_data_start_dict(d);
3153814d0170SGanapati Kundapura 	rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id);
3154814d0170SGanapati Kundapura 	rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id);
3155814d0170SGanapati Kundapura 	rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id);
3156814d0170SGanapati Kundapura 	RXA_ADD_DICT(queue_conf, rx_queue_flags);
3157814d0170SGanapati Kundapura 	RXA_ADD_DICT(queue_conf, servicing_weight);
3158814d0170SGanapati Kundapura 	RXA_ADD_DICT(queue_conf.ev, queue_id);
3159814d0170SGanapati Kundapura 	RXA_ADD_DICT(queue_conf.ev, sched_type);
3160814d0170SGanapati Kundapura 	RXA_ADD_DICT(queue_conf.ev, priority);
3161814d0170SGanapati Kundapura 	RXA_ADD_DICT(queue_conf.ev, flow_id);
3162814d0170SGanapati Kundapura 
3163814d0170SGanapati Kundapura 	return 0;
3164814d0170SGanapati Kundapura }
3165814d0170SGanapati Kundapura 
3166814d0170SGanapati Kundapura RTE_INIT(rxa_init_telemetry)
3167814d0170SGanapati Kundapura {
3168814d0170SGanapati Kundapura 	rte_telemetry_register_cmd("/eventdev/rxa_stats",
3169814d0170SGanapati Kundapura 		handle_rxa_stats,
3170814d0170SGanapati Kundapura 		"Returns Rx adapter stats. Parameter: rxa_id");
3171814d0170SGanapati Kundapura 
3172814d0170SGanapati Kundapura 	rte_telemetry_register_cmd("/eventdev/rxa_stats_reset",
3173814d0170SGanapati Kundapura 		handle_rxa_stats_reset,
3174814d0170SGanapati Kundapura 		"Reset Rx adapter stats. Parameter: rxa_id");
3175814d0170SGanapati Kundapura 
3176814d0170SGanapati Kundapura 	rte_telemetry_register_cmd("/eventdev/rxa_queue_conf",
3177814d0170SGanapati Kundapura 		handle_rxa_get_queue_conf,
3178814d0170SGanapati Kundapura 		"Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id");
3179814d0170SGanapati Kundapura }
3180