xref: /dpdk/lib/eventdev/rte_event_eth_rx_adapter.c (revision bc0df25c83d630b1619210e17a858e2e2fa1c59e)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  * Copyright(c) 2017 Intel Corporation.
399a2dd95SBruce Richardson  * All rights reserved.
499a2dd95SBruce Richardson  */
599a2dd95SBruce Richardson #if defined(LINUX)
699a2dd95SBruce Richardson #include <sys/epoll.h>
799a2dd95SBruce Richardson #endif
899a2dd95SBruce Richardson #include <unistd.h>
999a2dd95SBruce Richardson 
1099a2dd95SBruce Richardson #include <rte_cycles.h>
1199a2dd95SBruce Richardson #include <rte_common.h>
1299a2dd95SBruce Richardson #include <rte_dev.h>
1399a2dd95SBruce Richardson #include <rte_errno.h>
14f9bdee26SKonstantin Ananyev #include <ethdev_driver.h>
1599a2dd95SBruce Richardson #include <rte_log.h>
1699a2dd95SBruce Richardson #include <rte_malloc.h>
1799a2dd95SBruce Richardson #include <rte_service_component.h>
1899a2dd95SBruce Richardson #include <rte_thash.h>
1999a2dd95SBruce Richardson #include <rte_interrupts.h>
2083ab470dSGanapati Kundapura #include <rte_mbuf_dyn.h>
2199a2dd95SBruce Richardson 
2299a2dd95SBruce Richardson #include "rte_eventdev.h"
2399a2dd95SBruce Richardson #include "eventdev_pmd.h"
2499a2dd95SBruce Richardson #include "rte_eventdev_trace.h"
2599a2dd95SBruce Richardson #include "rte_event_eth_rx_adapter.h"
2699a2dd95SBruce Richardson 
2799a2dd95SBruce Richardson #define BATCH_SIZE		32
2899a2dd95SBruce Richardson #define BLOCK_CNT_THRESHOLD	10
298113fd15SGanapati Kundapura #define ETH_EVENT_BUFFER_SIZE	(6*BATCH_SIZE)
3099a2dd95SBruce Richardson #define MAX_VECTOR_SIZE		1024
3199a2dd95SBruce Richardson #define MIN_VECTOR_SIZE		4
3299a2dd95SBruce Richardson #define MAX_VECTOR_NS		1E9
3399a2dd95SBruce Richardson #define MIN_VECTOR_NS		1E5
3499a2dd95SBruce Richardson 
3599a2dd95SBruce Richardson #define ETH_RX_ADAPTER_SERVICE_NAME_LEN	32
3699a2dd95SBruce Richardson #define ETH_RX_ADAPTER_MEM_NAME_LEN	32
3799a2dd95SBruce Richardson 
3899a2dd95SBruce Richardson #define RSS_KEY_SIZE	40
3999a2dd95SBruce Richardson /* value written to intr thread pipe to signal thread exit */
4099a2dd95SBruce Richardson #define ETH_BRIDGE_INTR_THREAD_EXIT	1
4199a2dd95SBruce Richardson /* Sentinel value to detect initialized file handle */
4299a2dd95SBruce Richardson #define INIT_FD		-1
4399a2dd95SBruce Richardson 
44da781e64SGanapati Kundapura #define RXA_ADAPTER_ARRAY "rte_event_eth_rx_adapter_array"
45da781e64SGanapati Kundapura 
4699a2dd95SBruce Richardson /*
4799a2dd95SBruce Richardson  * Used to store port and queue ID of interrupting Rx queue
4899a2dd95SBruce Richardson  */
4999a2dd95SBruce Richardson union queue_data {
5099a2dd95SBruce Richardson 	RTE_STD_C11
5199a2dd95SBruce Richardson 	void *ptr;
5299a2dd95SBruce Richardson 	struct {
5399a2dd95SBruce Richardson 		uint16_t port;
5499a2dd95SBruce Richardson 		uint16_t queue;
5599a2dd95SBruce Richardson 	};
5699a2dd95SBruce Richardson };
5799a2dd95SBruce Richardson 
5899a2dd95SBruce Richardson /*
5999a2dd95SBruce Richardson  * There is an instance of this struct per polled Rx queue added to the
6099a2dd95SBruce Richardson  * adapter
6199a2dd95SBruce Richardson  */
6299a2dd95SBruce Richardson struct eth_rx_poll_entry {
6399a2dd95SBruce Richardson 	/* Eth port to poll */
6499a2dd95SBruce Richardson 	uint16_t eth_dev_id;
6599a2dd95SBruce Richardson 	/* Eth rx queue to poll */
6699a2dd95SBruce Richardson 	uint16_t eth_rx_qid;
6799a2dd95SBruce Richardson };
6899a2dd95SBruce Richardson 
6999a2dd95SBruce Richardson struct eth_rx_vector_data {
7099a2dd95SBruce Richardson 	TAILQ_ENTRY(eth_rx_vector_data) next;
7199a2dd95SBruce Richardson 	uint16_t port;
7299a2dd95SBruce Richardson 	uint16_t queue;
7399a2dd95SBruce Richardson 	uint16_t max_vector_count;
7499a2dd95SBruce Richardson 	uint64_t event;
7599a2dd95SBruce Richardson 	uint64_t ts;
7699a2dd95SBruce Richardson 	uint64_t vector_timeout_ticks;
7799a2dd95SBruce Richardson 	struct rte_mempool *vector_pool;
7899a2dd95SBruce Richardson 	struct rte_event_vector *vector_ev;
7999a2dd95SBruce Richardson } __rte_cache_aligned;
8099a2dd95SBruce Richardson 
8199a2dd95SBruce Richardson TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data);
8299a2dd95SBruce Richardson 
8399a2dd95SBruce Richardson /* Instance per adapter */
8499a2dd95SBruce Richardson struct rte_eth_event_enqueue_buffer {
8599a2dd95SBruce Richardson 	/* Count of events in this buffer */
8699a2dd95SBruce Richardson 	uint16_t count;
8799a2dd95SBruce Richardson 	/* Array of events in this buffer */
88*bc0df25cSNaga Harish K S V 	struct rte_event *events;
89*bc0df25cSNaga Harish K S V 	/* size of event buffer */
90*bc0df25cSNaga Harish K S V 	uint16_t events_size;
918113fd15SGanapati Kundapura 	/* Event enqueue happens from head */
928113fd15SGanapati Kundapura 	uint16_t head;
938113fd15SGanapati Kundapura 	/* New packets from rte_eth_rx_burst is enqued from tail */
948113fd15SGanapati Kundapura 	uint16_t tail;
958113fd15SGanapati Kundapura 	/* last element in the buffer before rollover */
968113fd15SGanapati Kundapura 	uint16_t last;
978113fd15SGanapati Kundapura 	uint16_t last_mask;
9899a2dd95SBruce Richardson };
9999a2dd95SBruce Richardson 
10099a2dd95SBruce Richardson struct rte_event_eth_rx_adapter {
10199a2dd95SBruce Richardson 	/* RSS key */
10299a2dd95SBruce Richardson 	uint8_t rss_key_be[RSS_KEY_SIZE];
10399a2dd95SBruce Richardson 	/* Event device identifier */
10499a2dd95SBruce Richardson 	uint8_t eventdev_id;
10599a2dd95SBruce Richardson 	/* Per ethernet device structure */
10699a2dd95SBruce Richardson 	struct eth_device_info *eth_devices;
10799a2dd95SBruce Richardson 	/* Event port identifier */
10899a2dd95SBruce Richardson 	uint8_t event_port_id;
10999a2dd95SBruce Richardson 	/* Lock to serialize config updates with service function */
11099a2dd95SBruce Richardson 	rte_spinlock_t rx_lock;
11199a2dd95SBruce Richardson 	/* Max mbufs processed in any service function invocation */
11299a2dd95SBruce Richardson 	uint32_t max_nb_rx;
11399a2dd95SBruce Richardson 	/* Receive queues that need to be polled */
11499a2dd95SBruce Richardson 	struct eth_rx_poll_entry *eth_rx_poll;
11599a2dd95SBruce Richardson 	/* Size of the eth_rx_poll array */
11699a2dd95SBruce Richardson 	uint16_t num_rx_polled;
11799a2dd95SBruce Richardson 	/* Weighted round robin schedule */
11899a2dd95SBruce Richardson 	uint32_t *wrr_sched;
11999a2dd95SBruce Richardson 	/* wrr_sched[] size */
12099a2dd95SBruce Richardson 	uint32_t wrr_len;
12199a2dd95SBruce Richardson 	/* Next entry in wrr[] to begin polling */
12299a2dd95SBruce Richardson 	uint32_t wrr_pos;
12399a2dd95SBruce Richardson 	/* Event burst buffer */
12499a2dd95SBruce Richardson 	struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
12599a2dd95SBruce Richardson 	/* Vector enable flag */
12699a2dd95SBruce Richardson 	uint8_t ena_vector;
12799a2dd95SBruce Richardson 	/* Timestamp of previous vector expiry list traversal */
12899a2dd95SBruce Richardson 	uint64_t prev_expiry_ts;
12999a2dd95SBruce Richardson 	/* Minimum ticks to wait before traversing expiry list */
13099a2dd95SBruce Richardson 	uint64_t vector_tmo_ticks;
13199a2dd95SBruce Richardson 	/* vector list */
13299a2dd95SBruce Richardson 	struct eth_rx_vector_data_list vector_list;
13399a2dd95SBruce Richardson 	/* Per adapter stats */
13499a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_stats stats;
13599a2dd95SBruce Richardson 	/* Block count, counts up to BLOCK_CNT_THRESHOLD */
13699a2dd95SBruce Richardson 	uint16_t enq_block_count;
13799a2dd95SBruce Richardson 	/* Block start ts */
13899a2dd95SBruce Richardson 	uint64_t rx_enq_block_start_ts;
13999a2dd95SBruce Richardson 	/* epoll fd used to wait for Rx interrupts */
14099a2dd95SBruce Richardson 	int epd;
14199a2dd95SBruce Richardson 	/* Num of interrupt driven interrupt queues */
14299a2dd95SBruce Richardson 	uint32_t num_rx_intr;
14399a2dd95SBruce Richardson 	/* Used to send <dev id, queue id> of interrupting Rx queues from
14499a2dd95SBruce Richardson 	 * the interrupt thread to the Rx thread
14599a2dd95SBruce Richardson 	 */
14699a2dd95SBruce Richardson 	struct rte_ring *intr_ring;
14799a2dd95SBruce Richardson 	/* Rx Queue data (dev id, queue id) for the last non-empty
14899a2dd95SBruce Richardson 	 * queue polled
14999a2dd95SBruce Richardson 	 */
15099a2dd95SBruce Richardson 	union queue_data qd;
15199a2dd95SBruce Richardson 	/* queue_data is valid */
15299a2dd95SBruce Richardson 	int qd_valid;
15399a2dd95SBruce Richardson 	/* Interrupt ring lock, synchronizes Rx thread
15499a2dd95SBruce Richardson 	 * and interrupt thread
15599a2dd95SBruce Richardson 	 */
15699a2dd95SBruce Richardson 	rte_spinlock_t intr_ring_lock;
15799a2dd95SBruce Richardson 	/* event array passed to rte_poll_wait */
15899a2dd95SBruce Richardson 	struct rte_epoll_event *epoll_events;
15999a2dd95SBruce Richardson 	/* Count of interrupt vectors in use */
16099a2dd95SBruce Richardson 	uint32_t num_intr_vec;
16199a2dd95SBruce Richardson 	/* Thread blocked on Rx interrupts */
16299a2dd95SBruce Richardson 	pthread_t rx_intr_thread;
16399a2dd95SBruce Richardson 	/* Configuration callback for rte_service configuration */
16499a2dd95SBruce Richardson 	rte_event_eth_rx_adapter_conf_cb conf_cb;
16599a2dd95SBruce Richardson 	/* Configuration callback argument */
16699a2dd95SBruce Richardson 	void *conf_arg;
16799a2dd95SBruce Richardson 	/* Set if  default_cb is being used */
16899a2dd95SBruce Richardson 	int default_cb_arg;
16999a2dd95SBruce Richardson 	/* Service initialization state */
17099a2dd95SBruce Richardson 	uint8_t service_inited;
17199a2dd95SBruce Richardson 	/* Total count of Rx queues in adapter */
17299a2dd95SBruce Richardson 	uint32_t nb_queues;
17399a2dd95SBruce Richardson 	/* Memory allocation name */
17499a2dd95SBruce Richardson 	char mem_name[ETH_RX_ADAPTER_MEM_NAME_LEN];
17599a2dd95SBruce Richardson 	/* Socket identifier cached from eventdev */
17699a2dd95SBruce Richardson 	int socket_id;
17799a2dd95SBruce Richardson 	/* Per adapter EAL service */
17899a2dd95SBruce Richardson 	uint32_t service_id;
17999a2dd95SBruce Richardson 	/* Adapter started flag */
18099a2dd95SBruce Richardson 	uint8_t rxa_started;
18199a2dd95SBruce Richardson 	/* Adapter ID */
18299a2dd95SBruce Richardson 	uint8_t id;
18399a2dd95SBruce Richardson } __rte_cache_aligned;
18499a2dd95SBruce Richardson 
18599a2dd95SBruce Richardson /* Per eth device */
18699a2dd95SBruce Richardson struct eth_device_info {
18799a2dd95SBruce Richardson 	struct rte_eth_dev *dev;
18899a2dd95SBruce Richardson 	struct eth_rx_queue_info *rx_queue;
18999a2dd95SBruce Richardson 	/* Rx callback */
19099a2dd95SBruce Richardson 	rte_event_eth_rx_adapter_cb_fn cb_fn;
19199a2dd95SBruce Richardson 	/* Rx callback argument */
19299a2dd95SBruce Richardson 	void *cb_arg;
19399a2dd95SBruce Richardson 	/* Set if ethdev->eventdev packet transfer uses a
19499a2dd95SBruce Richardson 	 * hardware mechanism
19599a2dd95SBruce Richardson 	 */
19699a2dd95SBruce Richardson 	uint8_t internal_event_port;
19799a2dd95SBruce Richardson 	/* Set if the adapter is processing rx queues for
19899a2dd95SBruce Richardson 	 * this eth device and packet processing has been
19999a2dd95SBruce Richardson 	 * started, allows for the code to know if the PMD
20099a2dd95SBruce Richardson 	 * rx_adapter_stop callback needs to be invoked
20199a2dd95SBruce Richardson 	 */
20299a2dd95SBruce Richardson 	uint8_t dev_rx_started;
20399a2dd95SBruce Richardson 	/* Number of queues added for this device */
20499a2dd95SBruce Richardson 	uint16_t nb_dev_queues;
20599a2dd95SBruce Richardson 	/* Number of poll based queues
20699a2dd95SBruce Richardson 	 * If nb_rx_poll > 0, the start callback will
20799a2dd95SBruce Richardson 	 * be invoked if not already invoked
20899a2dd95SBruce Richardson 	 */
20999a2dd95SBruce Richardson 	uint16_t nb_rx_poll;
21099a2dd95SBruce Richardson 	/* Number of interrupt based queues
21199a2dd95SBruce Richardson 	 * If nb_rx_intr > 0, the start callback will
21299a2dd95SBruce Richardson 	 * be invoked if not already invoked.
21399a2dd95SBruce Richardson 	 */
21499a2dd95SBruce Richardson 	uint16_t nb_rx_intr;
21599a2dd95SBruce Richardson 	/* Number of queues that use the shared interrupt */
21699a2dd95SBruce Richardson 	uint16_t nb_shared_intr;
21799a2dd95SBruce Richardson 	/* sum(wrr(q)) for all queues within the device
21899a2dd95SBruce Richardson 	 * useful when deleting all device queues
21999a2dd95SBruce Richardson 	 */
22099a2dd95SBruce Richardson 	uint32_t wrr_len;
22199a2dd95SBruce Richardson 	/* Intr based queue index to start polling from, this is used
22299a2dd95SBruce Richardson 	 * if the number of shared interrupts is non-zero
22399a2dd95SBruce Richardson 	 */
22499a2dd95SBruce Richardson 	uint16_t next_q_idx;
22599a2dd95SBruce Richardson 	/* Intr based queue indices */
22699a2dd95SBruce Richardson 	uint16_t *intr_queue;
22799a2dd95SBruce Richardson 	/* device generates per Rx queue interrupt for queue index
22899a2dd95SBruce Richardson 	 * for queue indices < RTE_MAX_RXTX_INTR_VEC_ID - 1
22999a2dd95SBruce Richardson 	 */
23099a2dd95SBruce Richardson 	int multi_intr_cap;
23199a2dd95SBruce Richardson 	/* shared interrupt enabled */
23299a2dd95SBruce Richardson 	int shared_intr_enabled;
23399a2dd95SBruce Richardson };
23499a2dd95SBruce Richardson 
23599a2dd95SBruce Richardson /* Per Rx queue */
23699a2dd95SBruce Richardson struct eth_rx_queue_info {
23799a2dd95SBruce Richardson 	int queue_enabled;	/* True if added */
23899a2dd95SBruce Richardson 	int intr_enabled;
23999a2dd95SBruce Richardson 	uint8_t ena_vector;
24099a2dd95SBruce Richardson 	uint16_t wt;		/* Polling weight */
24199a2dd95SBruce Richardson 	uint32_t flow_id_mask;	/* Set to ~0 if app provides flow id else 0 */
24299a2dd95SBruce Richardson 	uint64_t event;
24399a2dd95SBruce Richardson 	struct eth_rx_vector_data vector_data;
24499a2dd95SBruce Richardson };
24599a2dd95SBruce Richardson 
24699a2dd95SBruce Richardson static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
24799a2dd95SBruce Richardson 
24883ab470dSGanapati Kundapura /* Enable dynamic timestamp field in mbuf */
24983ab470dSGanapati Kundapura static uint64_t event_eth_rx_timestamp_dynflag;
25083ab470dSGanapati Kundapura static int event_eth_rx_timestamp_dynfield_offset = -1;
25183ab470dSGanapati Kundapura 
25283ab470dSGanapati Kundapura static inline rte_mbuf_timestamp_t *
25383ab470dSGanapati Kundapura rxa_timestamp_dynfield(struct rte_mbuf *mbuf)
25483ab470dSGanapati Kundapura {
25583ab470dSGanapati Kundapura 	return RTE_MBUF_DYNFIELD(mbuf,
25683ab470dSGanapati Kundapura 		event_eth_rx_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
25783ab470dSGanapati Kundapura }
25883ab470dSGanapati Kundapura 
25999a2dd95SBruce Richardson static inline int
26099a2dd95SBruce Richardson rxa_validate_id(uint8_t id)
26199a2dd95SBruce Richardson {
26299a2dd95SBruce Richardson 	return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
26399a2dd95SBruce Richardson }
26499a2dd95SBruce Richardson 
26599a2dd95SBruce Richardson #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
26699a2dd95SBruce Richardson 	if (!rxa_validate_id(id)) { \
26799a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
26899a2dd95SBruce Richardson 		return retval; \
26999a2dd95SBruce Richardson 	} \
27099a2dd95SBruce Richardson } while (0)
27199a2dd95SBruce Richardson 
27299a2dd95SBruce Richardson static inline int
27399a2dd95SBruce Richardson rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
27499a2dd95SBruce Richardson {
27599a2dd95SBruce Richardson 	return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
27699a2dd95SBruce Richardson }
27799a2dd95SBruce Richardson 
27899a2dd95SBruce Richardson /* Greatest common divisor */
27999a2dd95SBruce Richardson static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
28099a2dd95SBruce Richardson {
28199a2dd95SBruce Richardson 	uint16_t r = a % b;
28299a2dd95SBruce Richardson 
28399a2dd95SBruce Richardson 	return r ? rxa_gcd_u16(b, r) : b;
28499a2dd95SBruce Richardson }
28599a2dd95SBruce Richardson 
28699a2dd95SBruce Richardson /* Returns the next queue in the polling sequence
28799a2dd95SBruce Richardson  *
28899a2dd95SBruce Richardson  * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
28999a2dd95SBruce Richardson  */
29099a2dd95SBruce Richardson static int
29199a2dd95SBruce Richardson rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
29299a2dd95SBruce Richardson 	 unsigned int n, int *cw,
29399a2dd95SBruce Richardson 	 struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
29499a2dd95SBruce Richardson 	 uint16_t gcd, int prev)
29599a2dd95SBruce Richardson {
29699a2dd95SBruce Richardson 	int i = prev;
29799a2dd95SBruce Richardson 	uint16_t w;
29899a2dd95SBruce Richardson 
29999a2dd95SBruce Richardson 	while (1) {
30099a2dd95SBruce Richardson 		uint16_t q;
30199a2dd95SBruce Richardson 		uint16_t d;
30299a2dd95SBruce Richardson 
30399a2dd95SBruce Richardson 		i = (i + 1) % n;
30499a2dd95SBruce Richardson 		if (i == 0) {
30599a2dd95SBruce Richardson 			*cw = *cw - gcd;
30699a2dd95SBruce Richardson 			if (*cw <= 0)
30799a2dd95SBruce Richardson 				*cw = max_wt;
30899a2dd95SBruce Richardson 		}
30999a2dd95SBruce Richardson 
31099a2dd95SBruce Richardson 		q = eth_rx_poll[i].eth_rx_qid;
31199a2dd95SBruce Richardson 		d = eth_rx_poll[i].eth_dev_id;
31299a2dd95SBruce Richardson 		w = rx_adapter->eth_devices[d].rx_queue[q].wt;
31399a2dd95SBruce Richardson 
31499a2dd95SBruce Richardson 		if ((int)w >= *cw)
31599a2dd95SBruce Richardson 			return i;
31699a2dd95SBruce Richardson 	}
31799a2dd95SBruce Richardson }
31899a2dd95SBruce Richardson 
31999a2dd95SBruce Richardson static inline int
32099a2dd95SBruce Richardson rxa_shared_intr(struct eth_device_info *dev_info,
32199a2dd95SBruce Richardson 	int rx_queue_id)
32299a2dd95SBruce Richardson {
32399a2dd95SBruce Richardson 	int multi_intr_cap;
32499a2dd95SBruce Richardson 
32599a2dd95SBruce Richardson 	if (dev_info->dev->intr_handle == NULL)
32699a2dd95SBruce Richardson 		return 0;
32799a2dd95SBruce Richardson 
32899a2dd95SBruce Richardson 	multi_intr_cap = rte_intr_cap_multiple(dev_info->dev->intr_handle);
32999a2dd95SBruce Richardson 	return !multi_intr_cap ||
33099a2dd95SBruce Richardson 		rx_queue_id >= RTE_MAX_RXTX_INTR_VEC_ID - 1;
33199a2dd95SBruce Richardson }
33299a2dd95SBruce Richardson 
33399a2dd95SBruce Richardson static inline int
33499a2dd95SBruce Richardson rxa_intr_queue(struct eth_device_info *dev_info,
33599a2dd95SBruce Richardson 	int rx_queue_id)
33699a2dd95SBruce Richardson {
33799a2dd95SBruce Richardson 	struct eth_rx_queue_info *queue_info;
33899a2dd95SBruce Richardson 
33999a2dd95SBruce Richardson 	queue_info = &dev_info->rx_queue[rx_queue_id];
34099a2dd95SBruce Richardson 	return dev_info->rx_queue &&
34199a2dd95SBruce Richardson 		!dev_info->internal_event_port &&
34299a2dd95SBruce Richardson 		queue_info->queue_enabled && queue_info->wt == 0;
34399a2dd95SBruce Richardson }
34499a2dd95SBruce Richardson 
34599a2dd95SBruce Richardson static inline int
34699a2dd95SBruce Richardson rxa_polled_queue(struct eth_device_info *dev_info,
34799a2dd95SBruce Richardson 	int rx_queue_id)
34899a2dd95SBruce Richardson {
34999a2dd95SBruce Richardson 	struct eth_rx_queue_info *queue_info;
35099a2dd95SBruce Richardson 
35199a2dd95SBruce Richardson 	queue_info = &dev_info->rx_queue[rx_queue_id];
35299a2dd95SBruce Richardson 	return !dev_info->internal_event_port &&
35399a2dd95SBruce Richardson 		dev_info->rx_queue &&
35499a2dd95SBruce Richardson 		queue_info->queue_enabled && queue_info->wt != 0;
35599a2dd95SBruce Richardson }
35699a2dd95SBruce Richardson 
35799a2dd95SBruce Richardson /* Calculate change in number of vectors after Rx queue ID is add/deleted */
35899a2dd95SBruce Richardson static int
35999a2dd95SBruce Richardson rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
36099a2dd95SBruce Richardson {
36199a2dd95SBruce Richardson 	uint16_t i;
36299a2dd95SBruce Richardson 	int n, s;
36399a2dd95SBruce Richardson 	uint16_t nbq;
36499a2dd95SBruce Richardson 
36599a2dd95SBruce Richardson 	nbq = dev_info->dev->data->nb_rx_queues;
36699a2dd95SBruce Richardson 	n = 0; /* non shared count */
36799a2dd95SBruce Richardson 	s = 0; /* shared count */
36899a2dd95SBruce Richardson 
36999a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
37099a2dd95SBruce Richardson 		for (i = 0; i < nbq; i++) {
37199a2dd95SBruce Richardson 			if (!rxa_shared_intr(dev_info, i))
37299a2dd95SBruce Richardson 				n += add ? !rxa_intr_queue(dev_info, i) :
37399a2dd95SBruce Richardson 					rxa_intr_queue(dev_info, i);
37499a2dd95SBruce Richardson 			else
37599a2dd95SBruce Richardson 				s += add ? !rxa_intr_queue(dev_info, i) :
37699a2dd95SBruce Richardson 					rxa_intr_queue(dev_info, i);
37799a2dd95SBruce Richardson 		}
37899a2dd95SBruce Richardson 
37999a2dd95SBruce Richardson 		if (s > 0) {
38099a2dd95SBruce Richardson 			if ((add && dev_info->nb_shared_intr == 0) ||
38199a2dd95SBruce Richardson 				(!add && dev_info->nb_shared_intr))
38299a2dd95SBruce Richardson 				n += 1;
38399a2dd95SBruce Richardson 		}
38499a2dd95SBruce Richardson 	} else {
38599a2dd95SBruce Richardson 		if (!rxa_shared_intr(dev_info, rx_queue_id))
38699a2dd95SBruce Richardson 			n = add ? !rxa_intr_queue(dev_info, rx_queue_id) :
38799a2dd95SBruce Richardson 				rxa_intr_queue(dev_info, rx_queue_id);
38899a2dd95SBruce Richardson 		else
38999a2dd95SBruce Richardson 			n = add ? !dev_info->nb_shared_intr :
39099a2dd95SBruce Richardson 				dev_info->nb_shared_intr == 1;
39199a2dd95SBruce Richardson 	}
39299a2dd95SBruce Richardson 
39399a2dd95SBruce Richardson 	return add ? n : -n;
39499a2dd95SBruce Richardson }
39599a2dd95SBruce Richardson 
39699a2dd95SBruce Richardson /* Calculate nb_rx_intr after deleting interrupt mode rx queues
39799a2dd95SBruce Richardson  */
39899a2dd95SBruce Richardson static void
39999a2dd95SBruce Richardson rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
40099a2dd95SBruce Richardson 			struct eth_device_info *dev_info,
40199a2dd95SBruce Richardson 			int rx_queue_id,
40299a2dd95SBruce Richardson 			uint32_t *nb_rx_intr)
40399a2dd95SBruce Richardson {
40499a2dd95SBruce Richardson 	uint32_t intr_diff;
40599a2dd95SBruce Richardson 
40699a2dd95SBruce Richardson 	if (rx_queue_id == -1)
40799a2dd95SBruce Richardson 		intr_diff = dev_info->nb_rx_intr;
40899a2dd95SBruce Richardson 	else
40999a2dd95SBruce Richardson 		intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
41099a2dd95SBruce Richardson 
41199a2dd95SBruce Richardson 	*nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
41299a2dd95SBruce Richardson }
41399a2dd95SBruce Richardson 
41499a2dd95SBruce Richardson /* Calculate nb_rx_* after adding interrupt mode rx queues, newly added
41599a2dd95SBruce Richardson  * interrupt queues could currently be poll mode Rx queues
41699a2dd95SBruce Richardson  */
41799a2dd95SBruce Richardson static void
41899a2dd95SBruce Richardson rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
41999a2dd95SBruce Richardson 			struct eth_device_info *dev_info,
42099a2dd95SBruce Richardson 			int rx_queue_id,
42199a2dd95SBruce Richardson 			uint32_t *nb_rx_poll,
42299a2dd95SBruce Richardson 			uint32_t *nb_rx_intr,
42399a2dd95SBruce Richardson 			uint32_t *nb_wrr)
42499a2dd95SBruce Richardson {
42599a2dd95SBruce Richardson 	uint32_t intr_diff;
42699a2dd95SBruce Richardson 	uint32_t poll_diff;
42799a2dd95SBruce Richardson 	uint32_t wrr_len_diff;
42899a2dd95SBruce Richardson 
42999a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
43099a2dd95SBruce Richardson 		intr_diff = dev_info->dev->data->nb_rx_queues -
43199a2dd95SBruce Richardson 						dev_info->nb_rx_intr;
43299a2dd95SBruce Richardson 		poll_diff = dev_info->nb_rx_poll;
43399a2dd95SBruce Richardson 		wrr_len_diff = dev_info->wrr_len;
43499a2dd95SBruce Richardson 	} else {
43599a2dd95SBruce Richardson 		intr_diff = !rxa_intr_queue(dev_info, rx_queue_id);
43699a2dd95SBruce Richardson 		poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
43799a2dd95SBruce Richardson 		wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
43899a2dd95SBruce Richardson 					0;
43999a2dd95SBruce Richardson 	}
44099a2dd95SBruce Richardson 
44199a2dd95SBruce Richardson 	*nb_rx_intr = rx_adapter->num_rx_intr + intr_diff;
44299a2dd95SBruce Richardson 	*nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
44399a2dd95SBruce Richardson 	*nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
44499a2dd95SBruce Richardson }
44599a2dd95SBruce Richardson 
44699a2dd95SBruce Richardson /* Calculate size of the eth_rx_poll and wrr_sched arrays
44799a2dd95SBruce Richardson  * after deleting poll mode rx queues
44899a2dd95SBruce Richardson  */
44999a2dd95SBruce Richardson static void
45099a2dd95SBruce Richardson rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
45199a2dd95SBruce Richardson 			struct eth_device_info *dev_info,
45299a2dd95SBruce Richardson 			int rx_queue_id,
45399a2dd95SBruce Richardson 			uint32_t *nb_rx_poll,
45499a2dd95SBruce Richardson 			uint32_t *nb_wrr)
45599a2dd95SBruce Richardson {
45699a2dd95SBruce Richardson 	uint32_t poll_diff;
45799a2dd95SBruce Richardson 	uint32_t wrr_len_diff;
45899a2dd95SBruce Richardson 
45999a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
46099a2dd95SBruce Richardson 		poll_diff = dev_info->nb_rx_poll;
46199a2dd95SBruce Richardson 		wrr_len_diff = dev_info->wrr_len;
46299a2dd95SBruce Richardson 	} else {
46399a2dd95SBruce Richardson 		poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
46499a2dd95SBruce Richardson 		wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
46599a2dd95SBruce Richardson 					0;
46699a2dd95SBruce Richardson 	}
46799a2dd95SBruce Richardson 
46899a2dd95SBruce Richardson 	*nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
46999a2dd95SBruce Richardson 	*nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
47099a2dd95SBruce Richardson }
47199a2dd95SBruce Richardson 
47299a2dd95SBruce Richardson /* Calculate nb_rx_* after adding poll mode rx queues
47399a2dd95SBruce Richardson  */
47499a2dd95SBruce Richardson static void
47599a2dd95SBruce Richardson rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
47699a2dd95SBruce Richardson 			struct eth_device_info *dev_info,
47799a2dd95SBruce Richardson 			int rx_queue_id,
47899a2dd95SBruce Richardson 			uint16_t wt,
47999a2dd95SBruce Richardson 			uint32_t *nb_rx_poll,
48099a2dd95SBruce Richardson 			uint32_t *nb_rx_intr,
48199a2dd95SBruce Richardson 			uint32_t *nb_wrr)
48299a2dd95SBruce Richardson {
48399a2dd95SBruce Richardson 	uint32_t intr_diff;
48499a2dd95SBruce Richardson 	uint32_t poll_diff;
48599a2dd95SBruce Richardson 	uint32_t wrr_len_diff;
48699a2dd95SBruce Richardson 
48799a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
48899a2dd95SBruce Richardson 		intr_diff = dev_info->nb_rx_intr;
48999a2dd95SBruce Richardson 		poll_diff = dev_info->dev->data->nb_rx_queues -
49099a2dd95SBruce Richardson 						dev_info->nb_rx_poll;
49199a2dd95SBruce Richardson 		wrr_len_diff = wt*dev_info->dev->data->nb_rx_queues
49299a2dd95SBruce Richardson 				- dev_info->wrr_len;
49399a2dd95SBruce Richardson 	} else {
49499a2dd95SBruce Richardson 		intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
49599a2dd95SBruce Richardson 		poll_diff = !rxa_polled_queue(dev_info, rx_queue_id);
49699a2dd95SBruce Richardson 		wrr_len_diff = rxa_polled_queue(dev_info, rx_queue_id) ?
49799a2dd95SBruce Richardson 				wt - dev_info->rx_queue[rx_queue_id].wt :
49899a2dd95SBruce Richardson 				wt;
49999a2dd95SBruce Richardson 	}
50099a2dd95SBruce Richardson 
50199a2dd95SBruce Richardson 	*nb_rx_poll = rx_adapter->num_rx_polled + poll_diff;
50299a2dd95SBruce Richardson 	*nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
50399a2dd95SBruce Richardson 	*nb_wrr = rx_adapter->wrr_len + wrr_len_diff;
50499a2dd95SBruce Richardson }
50599a2dd95SBruce Richardson 
50699a2dd95SBruce Richardson /* Calculate nb_rx_* after adding rx_queue_id */
50799a2dd95SBruce Richardson static void
50899a2dd95SBruce Richardson rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
50999a2dd95SBruce Richardson 		struct eth_device_info *dev_info,
51099a2dd95SBruce Richardson 		int rx_queue_id,
51199a2dd95SBruce Richardson 		uint16_t wt,
51299a2dd95SBruce Richardson 		uint32_t *nb_rx_poll,
51399a2dd95SBruce Richardson 		uint32_t *nb_rx_intr,
51499a2dd95SBruce Richardson 		uint32_t *nb_wrr)
51599a2dd95SBruce Richardson {
51699a2dd95SBruce Richardson 	if (wt != 0)
51799a2dd95SBruce Richardson 		rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
51899a2dd95SBruce Richardson 					wt, nb_rx_poll, nb_rx_intr, nb_wrr);
51999a2dd95SBruce Richardson 	else
52099a2dd95SBruce Richardson 		rxa_calc_nb_post_add_intr(rx_adapter, dev_info, rx_queue_id,
52199a2dd95SBruce Richardson 					nb_rx_poll, nb_rx_intr, nb_wrr);
52299a2dd95SBruce Richardson }
52399a2dd95SBruce Richardson 
52499a2dd95SBruce Richardson /* Calculate nb_rx_* after deleting rx_queue_id */
52599a2dd95SBruce Richardson static void
52699a2dd95SBruce Richardson rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
52799a2dd95SBruce Richardson 		struct eth_device_info *dev_info,
52899a2dd95SBruce Richardson 		int rx_queue_id,
52999a2dd95SBruce Richardson 		uint32_t *nb_rx_poll,
53099a2dd95SBruce Richardson 		uint32_t *nb_rx_intr,
53199a2dd95SBruce Richardson 		uint32_t *nb_wrr)
53299a2dd95SBruce Richardson {
53399a2dd95SBruce Richardson 	rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
53499a2dd95SBruce Richardson 				nb_wrr);
53599a2dd95SBruce Richardson 	rxa_calc_nb_post_intr_del(rx_adapter, dev_info, rx_queue_id,
53699a2dd95SBruce Richardson 				nb_rx_intr);
53799a2dd95SBruce Richardson }
53899a2dd95SBruce Richardson 
53999a2dd95SBruce Richardson /*
54099a2dd95SBruce Richardson  * Allocate the rx_poll array
54199a2dd95SBruce Richardson  */
54299a2dd95SBruce Richardson static struct eth_rx_poll_entry *
54399a2dd95SBruce Richardson rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
54499a2dd95SBruce Richardson 	uint32_t num_rx_polled)
54599a2dd95SBruce Richardson {
54699a2dd95SBruce Richardson 	size_t len;
54799a2dd95SBruce Richardson 
54899a2dd95SBruce Richardson 	len  = RTE_ALIGN(num_rx_polled * sizeof(*rx_adapter->eth_rx_poll),
54999a2dd95SBruce Richardson 							RTE_CACHE_LINE_SIZE);
55099a2dd95SBruce Richardson 	return  rte_zmalloc_socket(rx_adapter->mem_name,
55199a2dd95SBruce Richardson 				len,
55299a2dd95SBruce Richardson 				RTE_CACHE_LINE_SIZE,
55399a2dd95SBruce Richardson 				rx_adapter->socket_id);
55499a2dd95SBruce Richardson }
55599a2dd95SBruce Richardson 
55699a2dd95SBruce Richardson /*
55799a2dd95SBruce Richardson  * Allocate the WRR array
55899a2dd95SBruce Richardson  */
55999a2dd95SBruce Richardson static uint32_t *
56099a2dd95SBruce Richardson rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
56199a2dd95SBruce Richardson {
56299a2dd95SBruce Richardson 	size_t len;
56399a2dd95SBruce Richardson 
56499a2dd95SBruce Richardson 	len = RTE_ALIGN(nb_wrr * sizeof(*rx_adapter->wrr_sched),
56599a2dd95SBruce Richardson 			RTE_CACHE_LINE_SIZE);
56699a2dd95SBruce Richardson 	return  rte_zmalloc_socket(rx_adapter->mem_name,
56799a2dd95SBruce Richardson 				len,
56899a2dd95SBruce Richardson 				RTE_CACHE_LINE_SIZE,
56999a2dd95SBruce Richardson 				rx_adapter->socket_id);
57099a2dd95SBruce Richardson }
57199a2dd95SBruce Richardson 
57299a2dd95SBruce Richardson static int
57399a2dd95SBruce Richardson rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
57499a2dd95SBruce Richardson 		uint32_t nb_poll,
57599a2dd95SBruce Richardson 		uint32_t nb_wrr,
57699a2dd95SBruce Richardson 		struct eth_rx_poll_entry **rx_poll,
57799a2dd95SBruce Richardson 		uint32_t **wrr_sched)
57899a2dd95SBruce Richardson {
57999a2dd95SBruce Richardson 
58099a2dd95SBruce Richardson 	if (nb_poll == 0) {
58199a2dd95SBruce Richardson 		*rx_poll = NULL;
58299a2dd95SBruce Richardson 		*wrr_sched = NULL;
58399a2dd95SBruce Richardson 		return 0;
58499a2dd95SBruce Richardson 	}
58599a2dd95SBruce Richardson 
58699a2dd95SBruce Richardson 	*rx_poll = rxa_alloc_poll(rx_adapter, nb_poll);
58799a2dd95SBruce Richardson 	if (*rx_poll == NULL) {
58899a2dd95SBruce Richardson 		*wrr_sched = NULL;
58999a2dd95SBruce Richardson 		return -ENOMEM;
59099a2dd95SBruce Richardson 	}
59199a2dd95SBruce Richardson 
59299a2dd95SBruce Richardson 	*wrr_sched = rxa_alloc_wrr(rx_adapter, nb_wrr);
59399a2dd95SBruce Richardson 	if (*wrr_sched == NULL) {
59499a2dd95SBruce Richardson 		rte_free(*rx_poll);
59599a2dd95SBruce Richardson 		return -ENOMEM;
59699a2dd95SBruce Richardson 	}
59799a2dd95SBruce Richardson 	return 0;
59899a2dd95SBruce Richardson }
59999a2dd95SBruce Richardson 
60099a2dd95SBruce Richardson /* Precalculate WRR polling sequence for all queues in rx_adapter */
60199a2dd95SBruce Richardson static void
60299a2dd95SBruce Richardson rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
60399a2dd95SBruce Richardson 		struct eth_rx_poll_entry *rx_poll,
60499a2dd95SBruce Richardson 		uint32_t *rx_wrr)
60599a2dd95SBruce Richardson {
60699a2dd95SBruce Richardson 	uint16_t d;
60799a2dd95SBruce Richardson 	uint16_t q;
60899a2dd95SBruce Richardson 	unsigned int i;
60999a2dd95SBruce Richardson 	int prev = -1;
61099a2dd95SBruce Richardson 	int cw = -1;
61199a2dd95SBruce Richardson 
61299a2dd95SBruce Richardson 	/* Initialize variables for calculation of wrr schedule */
61399a2dd95SBruce Richardson 	uint16_t max_wrr_pos = 0;
61499a2dd95SBruce Richardson 	unsigned int poll_q = 0;
61599a2dd95SBruce Richardson 	uint16_t max_wt = 0;
61699a2dd95SBruce Richardson 	uint16_t gcd = 0;
61799a2dd95SBruce Richardson 
61899a2dd95SBruce Richardson 	if (rx_poll == NULL)
61999a2dd95SBruce Richardson 		return;
62099a2dd95SBruce Richardson 
62199a2dd95SBruce Richardson 	/* Generate array of all queues to poll, the size of this
62299a2dd95SBruce Richardson 	 * array is poll_q
62399a2dd95SBruce Richardson 	 */
62499a2dd95SBruce Richardson 	RTE_ETH_FOREACH_DEV(d) {
62599a2dd95SBruce Richardson 		uint16_t nb_rx_queues;
62699a2dd95SBruce Richardson 		struct eth_device_info *dev_info =
62799a2dd95SBruce Richardson 				&rx_adapter->eth_devices[d];
62899a2dd95SBruce Richardson 		nb_rx_queues = dev_info->dev->data->nb_rx_queues;
62999a2dd95SBruce Richardson 		if (dev_info->rx_queue == NULL)
63099a2dd95SBruce Richardson 			continue;
63199a2dd95SBruce Richardson 		if (dev_info->internal_event_port)
63299a2dd95SBruce Richardson 			continue;
63399a2dd95SBruce Richardson 		dev_info->wrr_len = 0;
63499a2dd95SBruce Richardson 		for (q = 0; q < nb_rx_queues; q++) {
63599a2dd95SBruce Richardson 			struct eth_rx_queue_info *queue_info =
63699a2dd95SBruce Richardson 				&dev_info->rx_queue[q];
63799a2dd95SBruce Richardson 			uint16_t wt;
63899a2dd95SBruce Richardson 
63999a2dd95SBruce Richardson 			if (!rxa_polled_queue(dev_info, q))
64099a2dd95SBruce Richardson 				continue;
64199a2dd95SBruce Richardson 			wt = queue_info->wt;
64299a2dd95SBruce Richardson 			rx_poll[poll_q].eth_dev_id = d;
64399a2dd95SBruce Richardson 			rx_poll[poll_q].eth_rx_qid = q;
64499a2dd95SBruce Richardson 			max_wrr_pos += wt;
64599a2dd95SBruce Richardson 			dev_info->wrr_len += wt;
64699a2dd95SBruce Richardson 			max_wt = RTE_MAX(max_wt, wt);
64799a2dd95SBruce Richardson 			gcd = (gcd) ? rxa_gcd_u16(gcd, wt) : wt;
64899a2dd95SBruce Richardson 			poll_q++;
64999a2dd95SBruce Richardson 		}
65099a2dd95SBruce Richardson 	}
65199a2dd95SBruce Richardson 
65299a2dd95SBruce Richardson 	/* Generate polling sequence based on weights */
65399a2dd95SBruce Richardson 	prev = -1;
65499a2dd95SBruce Richardson 	cw = -1;
65599a2dd95SBruce Richardson 	for (i = 0; i < max_wrr_pos; i++) {
65699a2dd95SBruce Richardson 		rx_wrr[i] = rxa_wrr_next(rx_adapter, poll_q, &cw,
65799a2dd95SBruce Richardson 				     rx_poll, max_wt, gcd, prev);
65899a2dd95SBruce Richardson 		prev = rx_wrr[i];
65999a2dd95SBruce Richardson 	}
66099a2dd95SBruce Richardson }
66199a2dd95SBruce Richardson 
66299a2dd95SBruce Richardson static inline void
66399a2dd95SBruce Richardson rxa_mtoip(struct rte_mbuf *m, struct rte_ipv4_hdr **ipv4_hdr,
66499a2dd95SBruce Richardson 	struct rte_ipv6_hdr **ipv6_hdr)
66599a2dd95SBruce Richardson {
66699a2dd95SBruce Richardson 	struct rte_ether_hdr *eth_hdr =
66799a2dd95SBruce Richardson 		rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
66899a2dd95SBruce Richardson 	struct rte_vlan_hdr *vlan_hdr;
66999a2dd95SBruce Richardson 
67099a2dd95SBruce Richardson 	*ipv4_hdr = NULL;
67199a2dd95SBruce Richardson 	*ipv6_hdr = NULL;
67299a2dd95SBruce Richardson 
67399a2dd95SBruce Richardson 	switch (eth_hdr->ether_type) {
67499a2dd95SBruce Richardson 	case RTE_BE16(RTE_ETHER_TYPE_IPV4):
67599a2dd95SBruce Richardson 		*ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
67699a2dd95SBruce Richardson 		break;
67799a2dd95SBruce Richardson 
67899a2dd95SBruce Richardson 	case RTE_BE16(RTE_ETHER_TYPE_IPV6):
67999a2dd95SBruce Richardson 		*ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
68099a2dd95SBruce Richardson 		break;
68199a2dd95SBruce Richardson 
68299a2dd95SBruce Richardson 	case RTE_BE16(RTE_ETHER_TYPE_VLAN):
68399a2dd95SBruce Richardson 		vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
68499a2dd95SBruce Richardson 		switch (vlan_hdr->eth_proto) {
68599a2dd95SBruce Richardson 		case RTE_BE16(RTE_ETHER_TYPE_IPV4):
68699a2dd95SBruce Richardson 			*ipv4_hdr = (struct rte_ipv4_hdr *)(vlan_hdr + 1);
68799a2dd95SBruce Richardson 			break;
68899a2dd95SBruce Richardson 		case RTE_BE16(RTE_ETHER_TYPE_IPV6):
68999a2dd95SBruce Richardson 			*ipv6_hdr = (struct rte_ipv6_hdr *)(vlan_hdr + 1);
69099a2dd95SBruce Richardson 			break;
69199a2dd95SBruce Richardson 		default:
69299a2dd95SBruce Richardson 			break;
69399a2dd95SBruce Richardson 		}
69499a2dd95SBruce Richardson 		break;
69599a2dd95SBruce Richardson 
69699a2dd95SBruce Richardson 	default:
69799a2dd95SBruce Richardson 		break;
69899a2dd95SBruce Richardson 	}
69999a2dd95SBruce Richardson }
70099a2dd95SBruce Richardson 
70199a2dd95SBruce Richardson /* Calculate RSS hash for IPv4/6 */
70299a2dd95SBruce Richardson static inline uint32_t
70399a2dd95SBruce Richardson rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
70499a2dd95SBruce Richardson {
70599a2dd95SBruce Richardson 	uint32_t input_len;
70699a2dd95SBruce Richardson 	void *tuple;
70799a2dd95SBruce Richardson 	struct rte_ipv4_tuple ipv4_tuple;
70899a2dd95SBruce Richardson 	struct rte_ipv6_tuple ipv6_tuple;
70999a2dd95SBruce Richardson 	struct rte_ipv4_hdr *ipv4_hdr;
71099a2dd95SBruce Richardson 	struct rte_ipv6_hdr *ipv6_hdr;
71199a2dd95SBruce Richardson 
71299a2dd95SBruce Richardson 	rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr);
71399a2dd95SBruce Richardson 
71499a2dd95SBruce Richardson 	if (ipv4_hdr) {
71599a2dd95SBruce Richardson 		ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
71699a2dd95SBruce Richardson 		ipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
71799a2dd95SBruce Richardson 		tuple = &ipv4_tuple;
71899a2dd95SBruce Richardson 		input_len = RTE_THASH_V4_L3_LEN;
71999a2dd95SBruce Richardson 	} else if (ipv6_hdr) {
72099a2dd95SBruce Richardson 		rte_thash_load_v6_addrs(ipv6_hdr,
72199a2dd95SBruce Richardson 					(union rte_thash_tuple *)&ipv6_tuple);
72299a2dd95SBruce Richardson 		tuple = &ipv6_tuple;
72399a2dd95SBruce Richardson 		input_len = RTE_THASH_V6_L3_LEN;
72499a2dd95SBruce Richardson 	} else
72599a2dd95SBruce Richardson 		return 0;
72699a2dd95SBruce Richardson 
72799a2dd95SBruce Richardson 	return rte_softrss_be(tuple, input_len, rss_key_be);
72899a2dd95SBruce Richardson }
72999a2dd95SBruce Richardson 
73099a2dd95SBruce Richardson static inline int
73199a2dd95SBruce Richardson rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
73299a2dd95SBruce Richardson {
73399a2dd95SBruce Richardson 	return !!rx_adapter->enq_block_count;
73499a2dd95SBruce Richardson }
73599a2dd95SBruce Richardson 
73699a2dd95SBruce Richardson static inline void
73799a2dd95SBruce Richardson rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
73899a2dd95SBruce Richardson {
73999a2dd95SBruce Richardson 	if (rx_adapter->rx_enq_block_start_ts)
74099a2dd95SBruce Richardson 		return;
74199a2dd95SBruce Richardson 
74299a2dd95SBruce Richardson 	rx_adapter->enq_block_count++;
74399a2dd95SBruce Richardson 	if (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD)
74499a2dd95SBruce Richardson 		return;
74599a2dd95SBruce Richardson 
74699a2dd95SBruce Richardson 	rx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles();
74799a2dd95SBruce Richardson }
74899a2dd95SBruce Richardson 
74999a2dd95SBruce Richardson static inline void
75099a2dd95SBruce Richardson rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
75199a2dd95SBruce Richardson 		    struct rte_event_eth_rx_adapter_stats *stats)
75299a2dd95SBruce Richardson {
75399a2dd95SBruce Richardson 	if (unlikely(!stats->rx_enq_start_ts))
75499a2dd95SBruce Richardson 		stats->rx_enq_start_ts = rte_get_tsc_cycles();
75599a2dd95SBruce Richardson 
75699a2dd95SBruce Richardson 	if (likely(!rxa_enq_blocked(rx_adapter)))
75799a2dd95SBruce Richardson 		return;
75899a2dd95SBruce Richardson 
75999a2dd95SBruce Richardson 	rx_adapter->enq_block_count = 0;
76099a2dd95SBruce Richardson 	if (rx_adapter->rx_enq_block_start_ts) {
76199a2dd95SBruce Richardson 		stats->rx_enq_end_ts = rte_get_tsc_cycles();
76299a2dd95SBruce Richardson 		stats->rx_enq_block_cycles += stats->rx_enq_end_ts -
76399a2dd95SBruce Richardson 		    rx_adapter->rx_enq_block_start_ts;
76499a2dd95SBruce Richardson 		rx_adapter->rx_enq_block_start_ts = 0;
76599a2dd95SBruce Richardson 	}
76699a2dd95SBruce Richardson }
76799a2dd95SBruce Richardson 
76899a2dd95SBruce Richardson /* Enqueue buffered events to event device */
76999a2dd95SBruce Richardson static inline uint16_t
77099a2dd95SBruce Richardson rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
77199a2dd95SBruce Richardson {
77299a2dd95SBruce Richardson 	struct rte_eth_event_enqueue_buffer *buf =
77399a2dd95SBruce Richardson 	    &rx_adapter->event_enqueue_buffer;
77499a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
7758113fd15SGanapati Kundapura 	uint16_t count = buf->last ? buf->last - buf->head : buf->count;
77699a2dd95SBruce Richardson 
7778113fd15SGanapati Kundapura 	if (!count)
77899a2dd95SBruce Richardson 		return 0;
77999a2dd95SBruce Richardson 
78099a2dd95SBruce Richardson 	uint16_t n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
78199a2dd95SBruce Richardson 					rx_adapter->event_port_id,
7828113fd15SGanapati Kundapura 					&buf->events[buf->head],
7838113fd15SGanapati Kundapura 					count);
7848113fd15SGanapati Kundapura 	if (n != count)
78599a2dd95SBruce Richardson 		stats->rx_enq_retry++;
7868113fd15SGanapati Kundapura 
7878113fd15SGanapati Kundapura 	buf->head += n;
7888113fd15SGanapati Kundapura 
7898113fd15SGanapati Kundapura 	if (buf->last && n == count) {
7908113fd15SGanapati Kundapura 		uint16_t n1;
7918113fd15SGanapati Kundapura 
7928113fd15SGanapati Kundapura 		n1 = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
7938113fd15SGanapati Kundapura 					rx_adapter->event_port_id,
7948113fd15SGanapati Kundapura 					&buf->events[0],
7958113fd15SGanapati Kundapura 					buf->tail);
7968113fd15SGanapati Kundapura 
7978113fd15SGanapati Kundapura 		if (n1 != buf->tail)
7988113fd15SGanapati Kundapura 			stats->rx_enq_retry++;
7998113fd15SGanapati Kundapura 
8008113fd15SGanapati Kundapura 		buf->last = 0;
8018113fd15SGanapati Kundapura 		buf->head = n1;
8028113fd15SGanapati Kundapura 		buf->last_mask = 0;
8038113fd15SGanapati Kundapura 		n += n1;
80499a2dd95SBruce Richardson 	}
80599a2dd95SBruce Richardson 
80699a2dd95SBruce Richardson 	n ? rxa_enq_block_end_ts(rx_adapter, stats) :
80799a2dd95SBruce Richardson 		rxa_enq_block_start_ts(rx_adapter);
80899a2dd95SBruce Richardson 
80999a2dd95SBruce Richardson 	buf->count -= n;
81099a2dd95SBruce Richardson 	stats->rx_enq_count += n;
81199a2dd95SBruce Richardson 
81299a2dd95SBruce Richardson 	return n;
81399a2dd95SBruce Richardson }
81499a2dd95SBruce Richardson 
81599a2dd95SBruce Richardson static inline void
81699a2dd95SBruce Richardson rxa_init_vector(struct rte_event_eth_rx_adapter *rx_adapter,
81799a2dd95SBruce Richardson 		struct eth_rx_vector_data *vec)
81899a2dd95SBruce Richardson {
81999a2dd95SBruce Richardson 	vec->vector_ev->nb_elem = 0;
82099a2dd95SBruce Richardson 	vec->vector_ev->port = vec->port;
82199a2dd95SBruce Richardson 	vec->vector_ev->queue = vec->queue;
82299a2dd95SBruce Richardson 	vec->vector_ev->attr_valid = true;
82399a2dd95SBruce Richardson 	TAILQ_INSERT_TAIL(&rx_adapter->vector_list, vec, next);
82499a2dd95SBruce Richardson }
82599a2dd95SBruce Richardson 
82699a2dd95SBruce Richardson static inline uint16_t
82799a2dd95SBruce Richardson rxa_create_event_vector(struct rte_event_eth_rx_adapter *rx_adapter,
82899a2dd95SBruce Richardson 			struct eth_rx_queue_info *queue_info,
82999a2dd95SBruce Richardson 			struct rte_eth_event_enqueue_buffer *buf,
83099a2dd95SBruce Richardson 			struct rte_mbuf **mbufs, uint16_t num)
83199a2dd95SBruce Richardson {
83299a2dd95SBruce Richardson 	struct rte_event *ev = &buf->events[buf->count];
83399a2dd95SBruce Richardson 	struct eth_rx_vector_data *vec;
83499a2dd95SBruce Richardson 	uint16_t filled, space, sz;
83599a2dd95SBruce Richardson 
83699a2dd95SBruce Richardson 	filled = 0;
83799a2dd95SBruce Richardson 	vec = &queue_info->vector_data;
83899a2dd95SBruce Richardson 
83999a2dd95SBruce Richardson 	if (vec->vector_ev == NULL) {
84099a2dd95SBruce Richardson 		if (rte_mempool_get(vec->vector_pool,
84199a2dd95SBruce Richardson 				    (void **)&vec->vector_ev) < 0) {
84299a2dd95SBruce Richardson 			rte_pktmbuf_free_bulk(mbufs, num);
84399a2dd95SBruce Richardson 			return 0;
84499a2dd95SBruce Richardson 		}
84599a2dd95SBruce Richardson 		rxa_init_vector(rx_adapter, vec);
84699a2dd95SBruce Richardson 	}
84799a2dd95SBruce Richardson 	while (num) {
84899a2dd95SBruce Richardson 		if (vec->vector_ev->nb_elem == vec->max_vector_count) {
84999a2dd95SBruce Richardson 			/* Event ready. */
85099a2dd95SBruce Richardson 			ev->event = vec->event;
85199a2dd95SBruce Richardson 			ev->vec = vec->vector_ev;
85299a2dd95SBruce Richardson 			ev++;
85399a2dd95SBruce Richardson 			filled++;
85499a2dd95SBruce Richardson 			vec->vector_ev = NULL;
85599a2dd95SBruce Richardson 			TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
85699a2dd95SBruce Richardson 			if (rte_mempool_get(vec->vector_pool,
85799a2dd95SBruce Richardson 					    (void **)&vec->vector_ev) < 0) {
85899a2dd95SBruce Richardson 				rte_pktmbuf_free_bulk(mbufs, num);
85999a2dd95SBruce Richardson 				return 0;
86099a2dd95SBruce Richardson 			}
86199a2dd95SBruce Richardson 			rxa_init_vector(rx_adapter, vec);
86299a2dd95SBruce Richardson 		}
86399a2dd95SBruce Richardson 
86499a2dd95SBruce Richardson 		space = vec->max_vector_count - vec->vector_ev->nb_elem;
86599a2dd95SBruce Richardson 		sz = num > space ? space : num;
86699a2dd95SBruce Richardson 		memcpy(vec->vector_ev->mbufs + vec->vector_ev->nb_elem, mbufs,
86799a2dd95SBruce Richardson 		       sizeof(void *) * sz);
86899a2dd95SBruce Richardson 		vec->vector_ev->nb_elem += sz;
86999a2dd95SBruce Richardson 		num -= sz;
87099a2dd95SBruce Richardson 		mbufs += sz;
87199a2dd95SBruce Richardson 		vec->ts = rte_rdtsc();
87299a2dd95SBruce Richardson 	}
87399a2dd95SBruce Richardson 
87499a2dd95SBruce Richardson 	if (vec->vector_ev->nb_elem == vec->max_vector_count) {
87599a2dd95SBruce Richardson 		ev->event = vec->event;
87699a2dd95SBruce Richardson 		ev->vec = vec->vector_ev;
87799a2dd95SBruce Richardson 		ev++;
87899a2dd95SBruce Richardson 		filled++;
87999a2dd95SBruce Richardson 		vec->vector_ev = NULL;
88099a2dd95SBruce Richardson 		TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
88199a2dd95SBruce Richardson 	}
88299a2dd95SBruce Richardson 
88399a2dd95SBruce Richardson 	return filled;
88499a2dd95SBruce Richardson }
88599a2dd95SBruce Richardson 
88699a2dd95SBruce Richardson static inline void
88799a2dd95SBruce Richardson rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
88899a2dd95SBruce Richardson 		uint16_t eth_dev_id,
88999a2dd95SBruce Richardson 		uint16_t rx_queue_id,
89099a2dd95SBruce Richardson 		struct rte_mbuf **mbufs,
89199a2dd95SBruce Richardson 		uint16_t num)
89299a2dd95SBruce Richardson {
89399a2dd95SBruce Richardson 	uint32_t i;
89499a2dd95SBruce Richardson 	struct eth_device_info *dev_info =
89599a2dd95SBruce Richardson 					&rx_adapter->eth_devices[eth_dev_id];
89699a2dd95SBruce Richardson 	struct eth_rx_queue_info *eth_rx_queue_info =
89799a2dd95SBruce Richardson 					&dev_info->rx_queue[rx_queue_id];
89899a2dd95SBruce Richardson 	struct rte_eth_event_enqueue_buffer *buf =
89999a2dd95SBruce Richardson 					&rx_adapter->event_enqueue_buffer;
9008113fd15SGanapati Kundapura 	uint16_t new_tail = buf->tail;
90199a2dd95SBruce Richardson 	uint64_t event = eth_rx_queue_info->event;
90299a2dd95SBruce Richardson 	uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
90399a2dd95SBruce Richardson 	struct rte_mbuf *m = mbufs[0];
90499a2dd95SBruce Richardson 	uint32_t rss_mask;
90599a2dd95SBruce Richardson 	uint32_t rss;
90699a2dd95SBruce Richardson 	int do_rss;
90799a2dd95SBruce Richardson 	uint16_t nb_cb;
90899a2dd95SBruce Richardson 	uint16_t dropped;
90983ab470dSGanapati Kundapura 	uint64_t ts, ts_mask;
91099a2dd95SBruce Richardson 
91199a2dd95SBruce Richardson 	if (!eth_rx_queue_info->ena_vector) {
91283ab470dSGanapati Kundapura 		ts = m->ol_flags & event_eth_rx_timestamp_dynflag ?
91383ab470dSGanapati Kundapura 						0 : rte_get_tsc_cycles();
91483ab470dSGanapati Kundapura 
91583ab470dSGanapati Kundapura 		/* 0xffff ffff ffff ffff if PKT_RX_TIMESTAMP is set,
91683ab470dSGanapati Kundapura 		 * otherwise 0
91783ab470dSGanapati Kundapura 		 */
91883ab470dSGanapati Kundapura 		ts_mask = (uint64_t)(!(m->ol_flags &
91983ab470dSGanapati Kundapura 				       event_eth_rx_timestamp_dynflag)) - 1ULL;
92083ab470dSGanapati Kundapura 
92199a2dd95SBruce Richardson 		/* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */
92299a2dd95SBruce Richardson 		rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
92399a2dd95SBruce Richardson 		do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
92499a2dd95SBruce Richardson 		for (i = 0; i < num; i++) {
9258113fd15SGanapati Kundapura 			struct rte_event *ev;
9268113fd15SGanapati Kundapura 
92799a2dd95SBruce Richardson 			m = mbufs[i];
92883ab470dSGanapati Kundapura 			*rxa_timestamp_dynfield(m) = ts |
92983ab470dSGanapati Kundapura 					(*rxa_timestamp_dynfield(m) & ts_mask);
93083ab470dSGanapati Kundapura 
9318113fd15SGanapati Kundapura 			ev = &buf->events[new_tail];
93299a2dd95SBruce Richardson 
93399a2dd95SBruce Richardson 			rss = do_rss ? rxa_do_softrss(m, rx_adapter->rss_key_be)
93499a2dd95SBruce Richardson 				     : m->hash.rss;
93599a2dd95SBruce Richardson 			ev->event = event;
93699a2dd95SBruce Richardson 			ev->flow_id = (rss & ~flow_id_mask) |
93799a2dd95SBruce Richardson 				      (ev->flow_id & flow_id_mask);
93899a2dd95SBruce Richardson 			ev->mbuf = m;
9398113fd15SGanapati Kundapura 			new_tail++;
94099a2dd95SBruce Richardson 		}
94199a2dd95SBruce Richardson 	} else {
94299a2dd95SBruce Richardson 		num = rxa_create_event_vector(rx_adapter, eth_rx_queue_info,
94399a2dd95SBruce Richardson 					      buf, mbufs, num);
94499a2dd95SBruce Richardson 	}
94599a2dd95SBruce Richardson 
94699a2dd95SBruce Richardson 	if (num && dev_info->cb_fn) {
94799a2dd95SBruce Richardson 
94899a2dd95SBruce Richardson 		dropped = 0;
94999a2dd95SBruce Richardson 		nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id,
9508113fd15SGanapati Kundapura 				       buf->last |
951*bc0df25cSNaga Harish K S V 				       (buf->events_size & ~buf->last_mask),
9528113fd15SGanapati Kundapura 				       buf->count >= BATCH_SIZE ?
9538113fd15SGanapati Kundapura 						buf->count - BATCH_SIZE : 0,
9548113fd15SGanapati Kundapura 				       &buf->events[buf->tail],
9558113fd15SGanapati Kundapura 				       num,
9568113fd15SGanapati Kundapura 				       dev_info->cb_arg,
9578113fd15SGanapati Kundapura 				       &dropped);
95899a2dd95SBruce Richardson 		if (unlikely(nb_cb > num))
95999a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("Rx CB returned %d (> %d) events",
96099a2dd95SBruce Richardson 				nb_cb, num);
96199a2dd95SBruce Richardson 		else
96299a2dd95SBruce Richardson 			num = nb_cb;
96399a2dd95SBruce Richardson 		if (dropped)
96499a2dd95SBruce Richardson 			rx_adapter->stats.rx_dropped += dropped;
96599a2dd95SBruce Richardson 	}
96699a2dd95SBruce Richardson 
96799a2dd95SBruce Richardson 	buf->count += num;
9688113fd15SGanapati Kundapura 	buf->tail += num;
9698113fd15SGanapati Kundapura }
9708113fd15SGanapati Kundapura 
9718113fd15SGanapati Kundapura static inline bool
9728113fd15SGanapati Kundapura rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf)
9738113fd15SGanapati Kundapura {
9748113fd15SGanapati Kundapura 	uint32_t nb_req = buf->tail + BATCH_SIZE;
9758113fd15SGanapati Kundapura 
9768113fd15SGanapati Kundapura 	if (!buf->last) {
977*bc0df25cSNaga Harish K S V 		if (nb_req <= buf->events_size)
9788113fd15SGanapati Kundapura 			return true;
9798113fd15SGanapati Kundapura 
9808113fd15SGanapati Kundapura 		if (buf->head >= BATCH_SIZE) {
9818113fd15SGanapati Kundapura 			buf->last_mask = ~0;
9828113fd15SGanapati Kundapura 			buf->last = buf->tail;
9838113fd15SGanapati Kundapura 			buf->tail = 0;
9848113fd15SGanapati Kundapura 			return true;
9858113fd15SGanapati Kundapura 		}
9868113fd15SGanapati Kundapura 	}
9878113fd15SGanapati Kundapura 
9888113fd15SGanapati Kundapura 	return nb_req <= buf->head;
98999a2dd95SBruce Richardson }
99099a2dd95SBruce Richardson 
99199a2dd95SBruce Richardson /* Enqueue packets from  <port, q>  to event buffer */
99299a2dd95SBruce Richardson static inline uint32_t
99399a2dd95SBruce Richardson rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
99499a2dd95SBruce Richardson 	uint16_t port_id,
99599a2dd95SBruce Richardson 	uint16_t queue_id,
99699a2dd95SBruce Richardson 	uint32_t rx_count,
99799a2dd95SBruce Richardson 	uint32_t max_rx,
99899a2dd95SBruce Richardson 	int *rxq_empty)
99999a2dd95SBruce Richardson {
100099a2dd95SBruce Richardson 	struct rte_mbuf *mbufs[BATCH_SIZE];
100199a2dd95SBruce Richardson 	struct rte_eth_event_enqueue_buffer *buf =
100299a2dd95SBruce Richardson 					&rx_adapter->event_enqueue_buffer;
100399a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_stats *stats =
100499a2dd95SBruce Richardson 					&rx_adapter->stats;
100599a2dd95SBruce Richardson 	uint16_t n;
100699a2dd95SBruce Richardson 	uint32_t nb_rx = 0;
100799a2dd95SBruce Richardson 
100899a2dd95SBruce Richardson 	if (rxq_empty)
100999a2dd95SBruce Richardson 		*rxq_empty = 0;
101099a2dd95SBruce Richardson 	/* Don't do a batch dequeue from the rx queue if there isn't
101199a2dd95SBruce Richardson 	 * enough space in the enqueue buffer.
101299a2dd95SBruce Richardson 	 */
10138113fd15SGanapati Kundapura 	while (rxa_pkt_buf_available(buf)) {
101499a2dd95SBruce Richardson 		if (buf->count >= BATCH_SIZE)
101599a2dd95SBruce Richardson 			rxa_flush_event_buffer(rx_adapter);
101699a2dd95SBruce Richardson 
101799a2dd95SBruce Richardson 		stats->rx_poll_count++;
101899a2dd95SBruce Richardson 		n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
101999a2dd95SBruce Richardson 		if (unlikely(!n)) {
102099a2dd95SBruce Richardson 			if (rxq_empty)
102199a2dd95SBruce Richardson 				*rxq_empty = 1;
102299a2dd95SBruce Richardson 			break;
102399a2dd95SBruce Richardson 		}
102499a2dd95SBruce Richardson 		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n);
102599a2dd95SBruce Richardson 		nb_rx += n;
102699a2dd95SBruce Richardson 		if (rx_count + nb_rx > max_rx)
102799a2dd95SBruce Richardson 			break;
102899a2dd95SBruce Richardson 	}
102999a2dd95SBruce Richardson 
103099a2dd95SBruce Richardson 	if (buf->count > 0)
103199a2dd95SBruce Richardson 		rxa_flush_event_buffer(rx_adapter);
103299a2dd95SBruce Richardson 
103399a2dd95SBruce Richardson 	return nb_rx;
103499a2dd95SBruce Richardson }
103599a2dd95SBruce Richardson 
103699a2dd95SBruce Richardson static inline void
103799a2dd95SBruce Richardson rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
103899a2dd95SBruce Richardson 		void *data)
103999a2dd95SBruce Richardson {
104099a2dd95SBruce Richardson 	uint16_t port_id;
104199a2dd95SBruce Richardson 	uint16_t queue;
104299a2dd95SBruce Richardson 	int err;
104399a2dd95SBruce Richardson 	union queue_data qd;
104499a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
104599a2dd95SBruce Richardson 	struct eth_rx_queue_info *queue_info;
104699a2dd95SBruce Richardson 	int *intr_enabled;
104799a2dd95SBruce Richardson 
104899a2dd95SBruce Richardson 	qd.ptr = data;
104999a2dd95SBruce Richardson 	port_id = qd.port;
105099a2dd95SBruce Richardson 	queue = qd.queue;
105199a2dd95SBruce Richardson 
105299a2dd95SBruce Richardson 	dev_info = &rx_adapter->eth_devices[port_id];
105399a2dd95SBruce Richardson 	queue_info = &dev_info->rx_queue[queue];
105499a2dd95SBruce Richardson 	rte_spinlock_lock(&rx_adapter->intr_ring_lock);
105599a2dd95SBruce Richardson 	if (rxa_shared_intr(dev_info, queue))
105699a2dd95SBruce Richardson 		intr_enabled = &dev_info->shared_intr_enabled;
105799a2dd95SBruce Richardson 	else
105899a2dd95SBruce Richardson 		intr_enabled = &queue_info->intr_enabled;
105999a2dd95SBruce Richardson 
106099a2dd95SBruce Richardson 	if (*intr_enabled) {
106199a2dd95SBruce Richardson 		*intr_enabled = 0;
106299a2dd95SBruce Richardson 		err = rte_ring_enqueue(rx_adapter->intr_ring, data);
106399a2dd95SBruce Richardson 		/* Entry should always be available.
106499a2dd95SBruce Richardson 		 * The ring size equals the maximum number of interrupt
106599a2dd95SBruce Richardson 		 * vectors supported (an interrupt vector is shared in
106699a2dd95SBruce Richardson 		 * case of shared interrupts)
106799a2dd95SBruce Richardson 		 */
106899a2dd95SBruce Richardson 		if (err)
106999a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("Failed to enqueue interrupt"
107099a2dd95SBruce Richardson 				" to ring: %s", strerror(-err));
107199a2dd95SBruce Richardson 		else
107299a2dd95SBruce Richardson 			rte_eth_dev_rx_intr_disable(port_id, queue);
107399a2dd95SBruce Richardson 	}
107499a2dd95SBruce Richardson 	rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
107599a2dd95SBruce Richardson }
107699a2dd95SBruce Richardson 
107799a2dd95SBruce Richardson static int
107899a2dd95SBruce Richardson rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
107999a2dd95SBruce Richardson 			uint32_t num_intr_vec)
108099a2dd95SBruce Richardson {
108199a2dd95SBruce Richardson 	if (rx_adapter->num_intr_vec + num_intr_vec >
108299a2dd95SBruce Richardson 				RTE_EVENT_ETH_INTR_RING_SIZE) {
108399a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Exceeded intr ring slots current"
108499a2dd95SBruce Richardson 		" %d needed %d limit %d", rx_adapter->num_intr_vec,
108599a2dd95SBruce Richardson 		num_intr_vec, RTE_EVENT_ETH_INTR_RING_SIZE);
108699a2dd95SBruce Richardson 		return -ENOSPC;
108799a2dd95SBruce Richardson 	}
108899a2dd95SBruce Richardson 
108999a2dd95SBruce Richardson 	return 0;
109099a2dd95SBruce Richardson }
109199a2dd95SBruce Richardson 
109299a2dd95SBruce Richardson /* Delete entries for (dev, queue) from the interrupt ring */
109399a2dd95SBruce Richardson static void
109499a2dd95SBruce Richardson rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
109599a2dd95SBruce Richardson 			struct eth_device_info *dev_info,
109699a2dd95SBruce Richardson 			uint16_t rx_queue_id)
109799a2dd95SBruce Richardson {
109899a2dd95SBruce Richardson 	int i, n;
109999a2dd95SBruce Richardson 	union queue_data qd;
110099a2dd95SBruce Richardson 
110199a2dd95SBruce Richardson 	rte_spinlock_lock(&rx_adapter->intr_ring_lock);
110299a2dd95SBruce Richardson 
110399a2dd95SBruce Richardson 	n = rte_ring_count(rx_adapter->intr_ring);
110499a2dd95SBruce Richardson 	for (i = 0; i < n; i++) {
110599a2dd95SBruce Richardson 		rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
110699a2dd95SBruce Richardson 		if (!rxa_shared_intr(dev_info, rx_queue_id)) {
110799a2dd95SBruce Richardson 			if (qd.port == dev_info->dev->data->port_id &&
110899a2dd95SBruce Richardson 				qd.queue == rx_queue_id)
110999a2dd95SBruce Richardson 				continue;
111099a2dd95SBruce Richardson 		} else {
111199a2dd95SBruce Richardson 			if (qd.port == dev_info->dev->data->port_id)
111299a2dd95SBruce Richardson 				continue;
111399a2dd95SBruce Richardson 		}
111499a2dd95SBruce Richardson 		rte_ring_enqueue(rx_adapter->intr_ring, qd.ptr);
111599a2dd95SBruce Richardson 	}
111699a2dd95SBruce Richardson 
111799a2dd95SBruce Richardson 	rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
111899a2dd95SBruce Richardson }
111999a2dd95SBruce Richardson 
112099a2dd95SBruce Richardson /* pthread callback handling interrupt mode receive queues
112199a2dd95SBruce Richardson  * After receiving an Rx interrupt, it enqueues the port id and queue id of the
112299a2dd95SBruce Richardson  * interrupting queue to the adapter's ring buffer for interrupt events.
112399a2dd95SBruce Richardson  * These events are picked up by rxa_intr_ring_dequeue() which is invoked from
112499a2dd95SBruce Richardson  * the adapter service function.
112599a2dd95SBruce Richardson  */
112699a2dd95SBruce Richardson static void *
112799a2dd95SBruce Richardson rxa_intr_thread(void *arg)
112899a2dd95SBruce Richardson {
112999a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter *rx_adapter = arg;
113099a2dd95SBruce Richardson 	struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
113199a2dd95SBruce Richardson 	int n, i;
113299a2dd95SBruce Richardson 
113399a2dd95SBruce Richardson 	while (1) {
113499a2dd95SBruce Richardson 		n = rte_epoll_wait(rx_adapter->epd, epoll_events,
113599a2dd95SBruce Richardson 				RTE_EVENT_ETH_INTR_RING_SIZE, -1);
113699a2dd95SBruce Richardson 		if (unlikely(n < 0))
113799a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("rte_epoll_wait returned error %d",
113899a2dd95SBruce Richardson 					n);
113999a2dd95SBruce Richardson 		for (i = 0; i < n; i++) {
114099a2dd95SBruce Richardson 			rxa_intr_ring_enqueue(rx_adapter,
114199a2dd95SBruce Richardson 					epoll_events[i].epdata.data);
114299a2dd95SBruce Richardson 		}
114399a2dd95SBruce Richardson 	}
114499a2dd95SBruce Richardson 
114599a2dd95SBruce Richardson 	return NULL;
114699a2dd95SBruce Richardson }
114799a2dd95SBruce Richardson 
114899a2dd95SBruce Richardson /* Dequeue <port, q> from interrupt ring and enqueue received
114999a2dd95SBruce Richardson  * mbufs to eventdev
115099a2dd95SBruce Richardson  */
115199a2dd95SBruce Richardson static inline uint32_t
115299a2dd95SBruce Richardson rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
115399a2dd95SBruce Richardson {
115499a2dd95SBruce Richardson 	uint32_t n;
115599a2dd95SBruce Richardson 	uint32_t nb_rx = 0;
115699a2dd95SBruce Richardson 	int rxq_empty;
115799a2dd95SBruce Richardson 	struct rte_eth_event_enqueue_buffer *buf;
115899a2dd95SBruce Richardson 	rte_spinlock_t *ring_lock;
115999a2dd95SBruce Richardson 	uint8_t max_done = 0;
116099a2dd95SBruce Richardson 
116199a2dd95SBruce Richardson 	if (rx_adapter->num_rx_intr == 0)
116299a2dd95SBruce Richardson 		return 0;
116399a2dd95SBruce Richardson 
116499a2dd95SBruce Richardson 	if (rte_ring_count(rx_adapter->intr_ring) == 0
116599a2dd95SBruce Richardson 		&& !rx_adapter->qd_valid)
116699a2dd95SBruce Richardson 		return 0;
116799a2dd95SBruce Richardson 
116899a2dd95SBruce Richardson 	buf = &rx_adapter->event_enqueue_buffer;
116999a2dd95SBruce Richardson 	ring_lock = &rx_adapter->intr_ring_lock;
117099a2dd95SBruce Richardson 
117199a2dd95SBruce Richardson 	if (buf->count >= BATCH_SIZE)
117299a2dd95SBruce Richardson 		rxa_flush_event_buffer(rx_adapter);
117399a2dd95SBruce Richardson 
11748113fd15SGanapati Kundapura 	while (rxa_pkt_buf_available(buf)) {
117599a2dd95SBruce Richardson 		struct eth_device_info *dev_info;
117699a2dd95SBruce Richardson 		uint16_t port;
117799a2dd95SBruce Richardson 		uint16_t queue;
117899a2dd95SBruce Richardson 		union queue_data qd  = rx_adapter->qd;
117999a2dd95SBruce Richardson 		int err;
118099a2dd95SBruce Richardson 
118199a2dd95SBruce Richardson 		if (!rx_adapter->qd_valid) {
118299a2dd95SBruce Richardson 			struct eth_rx_queue_info *queue_info;
118399a2dd95SBruce Richardson 
118499a2dd95SBruce Richardson 			rte_spinlock_lock(ring_lock);
118599a2dd95SBruce Richardson 			err = rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
118699a2dd95SBruce Richardson 			if (err) {
118799a2dd95SBruce Richardson 				rte_spinlock_unlock(ring_lock);
118899a2dd95SBruce Richardson 				break;
118999a2dd95SBruce Richardson 			}
119099a2dd95SBruce Richardson 
119199a2dd95SBruce Richardson 			port = qd.port;
119299a2dd95SBruce Richardson 			queue = qd.queue;
119399a2dd95SBruce Richardson 			rx_adapter->qd = qd;
119499a2dd95SBruce Richardson 			rx_adapter->qd_valid = 1;
119599a2dd95SBruce Richardson 			dev_info = &rx_adapter->eth_devices[port];
119699a2dd95SBruce Richardson 			if (rxa_shared_intr(dev_info, queue))
119799a2dd95SBruce Richardson 				dev_info->shared_intr_enabled = 1;
119899a2dd95SBruce Richardson 			else {
119999a2dd95SBruce Richardson 				queue_info = &dev_info->rx_queue[queue];
120099a2dd95SBruce Richardson 				queue_info->intr_enabled = 1;
120199a2dd95SBruce Richardson 			}
120299a2dd95SBruce Richardson 			rte_eth_dev_rx_intr_enable(port, queue);
120399a2dd95SBruce Richardson 			rte_spinlock_unlock(ring_lock);
120499a2dd95SBruce Richardson 		} else {
120599a2dd95SBruce Richardson 			port = qd.port;
120699a2dd95SBruce Richardson 			queue = qd.queue;
120799a2dd95SBruce Richardson 
120899a2dd95SBruce Richardson 			dev_info = &rx_adapter->eth_devices[port];
120999a2dd95SBruce Richardson 		}
121099a2dd95SBruce Richardson 
121199a2dd95SBruce Richardson 		if (rxa_shared_intr(dev_info, queue)) {
121299a2dd95SBruce Richardson 			uint16_t i;
121399a2dd95SBruce Richardson 			uint16_t nb_queues;
121499a2dd95SBruce Richardson 
121599a2dd95SBruce Richardson 			nb_queues = dev_info->dev->data->nb_rx_queues;
121699a2dd95SBruce Richardson 			n = 0;
121799a2dd95SBruce Richardson 			for (i = dev_info->next_q_idx; i < nb_queues; i++) {
121899a2dd95SBruce Richardson 				uint8_t enq_buffer_full;
121999a2dd95SBruce Richardson 
122099a2dd95SBruce Richardson 				if (!rxa_intr_queue(dev_info, i))
122199a2dd95SBruce Richardson 					continue;
122299a2dd95SBruce Richardson 				n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
122399a2dd95SBruce Richardson 					rx_adapter->max_nb_rx,
122499a2dd95SBruce Richardson 					&rxq_empty);
122599a2dd95SBruce Richardson 				nb_rx += n;
122699a2dd95SBruce Richardson 
122799a2dd95SBruce Richardson 				enq_buffer_full = !rxq_empty && n == 0;
122899a2dd95SBruce Richardson 				max_done = nb_rx > rx_adapter->max_nb_rx;
122999a2dd95SBruce Richardson 
123099a2dd95SBruce Richardson 				if (enq_buffer_full || max_done) {
123199a2dd95SBruce Richardson 					dev_info->next_q_idx = i;
123299a2dd95SBruce Richardson 					goto done;
123399a2dd95SBruce Richardson 				}
123499a2dd95SBruce Richardson 			}
123599a2dd95SBruce Richardson 
123699a2dd95SBruce Richardson 			rx_adapter->qd_valid = 0;
123799a2dd95SBruce Richardson 
123899a2dd95SBruce Richardson 			/* Reinitialize for next interrupt */
123999a2dd95SBruce Richardson 			dev_info->next_q_idx = dev_info->multi_intr_cap ?
124099a2dd95SBruce Richardson 						RTE_MAX_RXTX_INTR_VEC_ID - 1 :
124199a2dd95SBruce Richardson 						0;
124299a2dd95SBruce Richardson 		} else {
124399a2dd95SBruce Richardson 			n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
124499a2dd95SBruce Richardson 				rx_adapter->max_nb_rx,
124599a2dd95SBruce Richardson 				&rxq_empty);
124699a2dd95SBruce Richardson 			rx_adapter->qd_valid = !rxq_empty;
124799a2dd95SBruce Richardson 			nb_rx += n;
124899a2dd95SBruce Richardson 			if (nb_rx > rx_adapter->max_nb_rx)
124999a2dd95SBruce Richardson 				break;
125099a2dd95SBruce Richardson 		}
125199a2dd95SBruce Richardson 	}
125299a2dd95SBruce Richardson 
125399a2dd95SBruce Richardson done:
125499a2dd95SBruce Richardson 	rx_adapter->stats.rx_intr_packets += nb_rx;
125599a2dd95SBruce Richardson 	return nb_rx;
125699a2dd95SBruce Richardson }
125799a2dd95SBruce Richardson 
125899a2dd95SBruce Richardson /*
125999a2dd95SBruce Richardson  * Polls receive queues added to the event adapter and enqueues received
126099a2dd95SBruce Richardson  * packets to the event device.
126199a2dd95SBruce Richardson  *
126299a2dd95SBruce Richardson  * The receive code enqueues initially to a temporary buffer, the
126399a2dd95SBruce Richardson  * temporary buffer is drained anytime it holds >= BATCH_SIZE packets
126499a2dd95SBruce Richardson  *
126599a2dd95SBruce Richardson  * If there isn't space available in the temporary buffer, packets from the
126699a2dd95SBruce Richardson  * Rx queue aren't dequeued from the eth device, this back pressures the
126799a2dd95SBruce Richardson  * eth device, in virtual device environments this back pressure is relayed to
126899a2dd95SBruce Richardson  * the hypervisor's switching layer where adjustments can be made to deal with
126999a2dd95SBruce Richardson  * it.
127099a2dd95SBruce Richardson  */
127199a2dd95SBruce Richardson static inline uint32_t
127299a2dd95SBruce Richardson rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
127399a2dd95SBruce Richardson {
127499a2dd95SBruce Richardson 	uint32_t num_queue;
127599a2dd95SBruce Richardson 	uint32_t nb_rx = 0;
127699a2dd95SBruce Richardson 	struct rte_eth_event_enqueue_buffer *buf;
127799a2dd95SBruce Richardson 	uint32_t wrr_pos;
127899a2dd95SBruce Richardson 	uint32_t max_nb_rx;
127999a2dd95SBruce Richardson 
128099a2dd95SBruce Richardson 	wrr_pos = rx_adapter->wrr_pos;
128199a2dd95SBruce Richardson 	max_nb_rx = rx_adapter->max_nb_rx;
128299a2dd95SBruce Richardson 	buf = &rx_adapter->event_enqueue_buffer;
128399a2dd95SBruce Richardson 
128499a2dd95SBruce Richardson 	/* Iterate through a WRR sequence */
128599a2dd95SBruce Richardson 	for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
128699a2dd95SBruce Richardson 		unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
128799a2dd95SBruce Richardson 		uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
128899a2dd95SBruce Richardson 		uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
128999a2dd95SBruce Richardson 
129099a2dd95SBruce Richardson 		/* Don't do a batch dequeue from the rx queue if there isn't
129199a2dd95SBruce Richardson 		 * enough space in the enqueue buffer.
129299a2dd95SBruce Richardson 		 */
129399a2dd95SBruce Richardson 		if (buf->count >= BATCH_SIZE)
129499a2dd95SBruce Richardson 			rxa_flush_event_buffer(rx_adapter);
12958113fd15SGanapati Kundapura 		if (!rxa_pkt_buf_available(buf)) {
129699a2dd95SBruce Richardson 			rx_adapter->wrr_pos = wrr_pos;
129799a2dd95SBruce Richardson 			return nb_rx;
129899a2dd95SBruce Richardson 		}
129999a2dd95SBruce Richardson 
130099a2dd95SBruce Richardson 		nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
130199a2dd95SBruce Richardson 				NULL);
130299a2dd95SBruce Richardson 		if (nb_rx > max_nb_rx) {
130399a2dd95SBruce Richardson 			rx_adapter->wrr_pos =
130499a2dd95SBruce Richardson 				    (wrr_pos + 1) % rx_adapter->wrr_len;
130599a2dd95SBruce Richardson 			break;
130699a2dd95SBruce Richardson 		}
130799a2dd95SBruce Richardson 
130899a2dd95SBruce Richardson 		if (++wrr_pos == rx_adapter->wrr_len)
130999a2dd95SBruce Richardson 			wrr_pos = 0;
131099a2dd95SBruce Richardson 	}
131199a2dd95SBruce Richardson 	return nb_rx;
131299a2dd95SBruce Richardson }
131399a2dd95SBruce Richardson 
131499a2dd95SBruce Richardson static void
131599a2dd95SBruce Richardson rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
131699a2dd95SBruce Richardson {
131799a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter *rx_adapter = arg;
131899a2dd95SBruce Richardson 	struct rte_eth_event_enqueue_buffer *buf =
131999a2dd95SBruce Richardson 		&rx_adapter->event_enqueue_buffer;
132099a2dd95SBruce Richardson 	struct rte_event *ev;
132199a2dd95SBruce Richardson 
132299a2dd95SBruce Richardson 	if (buf->count)
132399a2dd95SBruce Richardson 		rxa_flush_event_buffer(rx_adapter);
132499a2dd95SBruce Richardson 
132599a2dd95SBruce Richardson 	if (vec->vector_ev->nb_elem == 0)
132699a2dd95SBruce Richardson 		return;
132799a2dd95SBruce Richardson 	ev = &buf->events[buf->count];
132899a2dd95SBruce Richardson 
132999a2dd95SBruce Richardson 	/* Event ready. */
133099a2dd95SBruce Richardson 	ev->event = vec->event;
133199a2dd95SBruce Richardson 	ev->vec = vec->vector_ev;
133299a2dd95SBruce Richardson 	buf->count++;
133399a2dd95SBruce Richardson 
133499a2dd95SBruce Richardson 	vec->vector_ev = NULL;
133599a2dd95SBruce Richardson 	vec->ts = 0;
133699a2dd95SBruce Richardson }
133799a2dd95SBruce Richardson 
133899a2dd95SBruce Richardson static int
133999a2dd95SBruce Richardson rxa_service_func(void *args)
134099a2dd95SBruce Richardson {
134199a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter *rx_adapter = args;
134299a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_stats *stats;
134399a2dd95SBruce Richardson 
134499a2dd95SBruce Richardson 	if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
134599a2dd95SBruce Richardson 		return 0;
134699a2dd95SBruce Richardson 	if (!rx_adapter->rxa_started) {
134799a2dd95SBruce Richardson 		rte_spinlock_unlock(&rx_adapter->rx_lock);
134899a2dd95SBruce Richardson 		return 0;
134999a2dd95SBruce Richardson 	}
135099a2dd95SBruce Richardson 
135199a2dd95SBruce Richardson 	if (rx_adapter->ena_vector) {
135299a2dd95SBruce Richardson 		if ((rte_rdtsc() - rx_adapter->prev_expiry_ts) >=
135399a2dd95SBruce Richardson 		    rx_adapter->vector_tmo_ticks) {
135499a2dd95SBruce Richardson 			struct eth_rx_vector_data *vec;
135599a2dd95SBruce Richardson 
135699a2dd95SBruce Richardson 			TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
135799a2dd95SBruce Richardson 				uint64_t elapsed_time = rte_rdtsc() - vec->ts;
135899a2dd95SBruce Richardson 
135999a2dd95SBruce Richardson 				if (elapsed_time >= vec->vector_timeout_ticks) {
136099a2dd95SBruce Richardson 					rxa_vector_expire(vec, rx_adapter);
136199a2dd95SBruce Richardson 					TAILQ_REMOVE(&rx_adapter->vector_list,
136299a2dd95SBruce Richardson 						     vec, next);
136399a2dd95SBruce Richardson 				}
136499a2dd95SBruce Richardson 			}
136599a2dd95SBruce Richardson 			rx_adapter->prev_expiry_ts = rte_rdtsc();
136699a2dd95SBruce Richardson 		}
136799a2dd95SBruce Richardson 	}
136899a2dd95SBruce Richardson 
136999a2dd95SBruce Richardson 	stats = &rx_adapter->stats;
137099a2dd95SBruce Richardson 	stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
137199a2dd95SBruce Richardson 	stats->rx_packets += rxa_poll(rx_adapter);
137299a2dd95SBruce Richardson 	rte_spinlock_unlock(&rx_adapter->rx_lock);
137399a2dd95SBruce Richardson 	return 0;
137499a2dd95SBruce Richardson }
137599a2dd95SBruce Richardson 
137699a2dd95SBruce Richardson static int
137799a2dd95SBruce Richardson rte_event_eth_rx_adapter_init(void)
137899a2dd95SBruce Richardson {
1379da781e64SGanapati Kundapura 	const char *name = RXA_ADAPTER_ARRAY;
138099a2dd95SBruce Richardson 	const struct rte_memzone *mz;
138199a2dd95SBruce Richardson 	unsigned int sz;
138299a2dd95SBruce Richardson 
138399a2dd95SBruce Richardson 	sz = sizeof(*event_eth_rx_adapter) *
138499a2dd95SBruce Richardson 	    RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
138599a2dd95SBruce Richardson 	sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
138699a2dd95SBruce Richardson 
138799a2dd95SBruce Richardson 	mz = rte_memzone_lookup(name);
138899a2dd95SBruce Richardson 	if (mz == NULL) {
138999a2dd95SBruce Richardson 		mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
139099a2dd95SBruce Richardson 						 RTE_CACHE_LINE_SIZE);
139199a2dd95SBruce Richardson 		if (mz == NULL) {
139299a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
139399a2dd95SBruce Richardson 					PRId32, rte_errno);
139499a2dd95SBruce Richardson 			return -rte_errno;
139599a2dd95SBruce Richardson 		}
139699a2dd95SBruce Richardson 	}
139799a2dd95SBruce Richardson 
139899a2dd95SBruce Richardson 	event_eth_rx_adapter = mz->addr;
139999a2dd95SBruce Richardson 	return 0;
140099a2dd95SBruce Richardson }
140199a2dd95SBruce Richardson 
1402da781e64SGanapati Kundapura static int
1403da781e64SGanapati Kundapura rxa_memzone_lookup(void)
1404da781e64SGanapati Kundapura {
1405da781e64SGanapati Kundapura 	const struct rte_memzone *mz;
1406da781e64SGanapati Kundapura 
1407da781e64SGanapati Kundapura 	if (event_eth_rx_adapter == NULL) {
1408da781e64SGanapati Kundapura 		mz = rte_memzone_lookup(RXA_ADAPTER_ARRAY);
1409da781e64SGanapati Kundapura 		if (mz == NULL)
1410da781e64SGanapati Kundapura 			return -ENOMEM;
1411da781e64SGanapati Kundapura 		event_eth_rx_adapter = mz->addr;
1412da781e64SGanapati Kundapura 	}
1413da781e64SGanapati Kundapura 
1414da781e64SGanapati Kundapura 	return 0;
1415da781e64SGanapati Kundapura }
1416da781e64SGanapati Kundapura 
141799a2dd95SBruce Richardson static inline struct rte_event_eth_rx_adapter *
141899a2dd95SBruce Richardson rxa_id_to_adapter(uint8_t id)
141999a2dd95SBruce Richardson {
142099a2dd95SBruce Richardson 	return event_eth_rx_adapter ?
142199a2dd95SBruce Richardson 		event_eth_rx_adapter[id] : NULL;
142299a2dd95SBruce Richardson }
142399a2dd95SBruce Richardson 
142499a2dd95SBruce Richardson static int
142599a2dd95SBruce Richardson rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
142699a2dd95SBruce Richardson 		struct rte_event_eth_rx_adapter_conf *conf, void *arg)
142799a2dd95SBruce Richardson {
142899a2dd95SBruce Richardson 	int ret;
142999a2dd95SBruce Richardson 	struct rte_eventdev *dev;
143099a2dd95SBruce Richardson 	struct rte_event_dev_config dev_conf;
143199a2dd95SBruce Richardson 	int started;
143299a2dd95SBruce Richardson 	uint8_t port_id;
143399a2dd95SBruce Richardson 	struct rte_event_port_conf *port_conf = arg;
143499a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
143599a2dd95SBruce Richardson 
143699a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
143799a2dd95SBruce Richardson 	dev_conf = dev->data->dev_conf;
143899a2dd95SBruce Richardson 
143999a2dd95SBruce Richardson 	started = dev->data->dev_started;
144099a2dd95SBruce Richardson 	if (started)
144199a2dd95SBruce Richardson 		rte_event_dev_stop(dev_id);
144299a2dd95SBruce Richardson 	port_id = dev_conf.nb_event_ports;
144399a2dd95SBruce Richardson 	dev_conf.nb_event_ports += 1;
144499a2dd95SBruce Richardson 	ret = rte_event_dev_configure(dev_id, &dev_conf);
144599a2dd95SBruce Richardson 	if (ret) {
144699a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to configure event dev %u\n",
144799a2dd95SBruce Richardson 						dev_id);
144899a2dd95SBruce Richardson 		if (started) {
144999a2dd95SBruce Richardson 			if (rte_event_dev_start(dev_id))
145099a2dd95SBruce Richardson 				return -EIO;
145199a2dd95SBruce Richardson 		}
145299a2dd95SBruce Richardson 		return ret;
145399a2dd95SBruce Richardson 	}
145499a2dd95SBruce Richardson 
145599a2dd95SBruce Richardson 	ret = rte_event_port_setup(dev_id, port_id, port_conf);
145699a2dd95SBruce Richardson 	if (ret) {
145799a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
145899a2dd95SBruce Richardson 					port_id);
145999a2dd95SBruce Richardson 		return ret;
146099a2dd95SBruce Richardson 	}
146199a2dd95SBruce Richardson 
146299a2dd95SBruce Richardson 	conf->event_port_id = port_id;
146399a2dd95SBruce Richardson 	conf->max_nb_rx = 128;
146499a2dd95SBruce Richardson 	if (started)
146599a2dd95SBruce Richardson 		ret = rte_event_dev_start(dev_id);
146699a2dd95SBruce Richardson 	rx_adapter->default_cb_arg = 1;
146799a2dd95SBruce Richardson 	return ret;
146899a2dd95SBruce Richardson }
146999a2dd95SBruce Richardson 
147099a2dd95SBruce Richardson static int
147199a2dd95SBruce Richardson rxa_epoll_create1(void)
147299a2dd95SBruce Richardson {
147399a2dd95SBruce Richardson #if defined(LINUX)
147499a2dd95SBruce Richardson 	int fd;
147599a2dd95SBruce Richardson 	fd = epoll_create1(EPOLL_CLOEXEC);
147699a2dd95SBruce Richardson 	return fd < 0 ? -errno : fd;
147799a2dd95SBruce Richardson #elif defined(BSD)
147899a2dd95SBruce Richardson 	return -ENOTSUP;
147999a2dd95SBruce Richardson #endif
148099a2dd95SBruce Richardson }
148199a2dd95SBruce Richardson 
148299a2dd95SBruce Richardson static int
148399a2dd95SBruce Richardson rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
148499a2dd95SBruce Richardson {
148599a2dd95SBruce Richardson 	if (rx_adapter->epd != INIT_FD)
148699a2dd95SBruce Richardson 		return 0;
148799a2dd95SBruce Richardson 
148899a2dd95SBruce Richardson 	rx_adapter->epd = rxa_epoll_create1();
148999a2dd95SBruce Richardson 	if (rx_adapter->epd < 0) {
149099a2dd95SBruce Richardson 		int err = rx_adapter->epd;
149199a2dd95SBruce Richardson 		rx_adapter->epd = INIT_FD;
149299a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("epoll_create1() failed, err %d", err);
149399a2dd95SBruce Richardson 		return err;
149499a2dd95SBruce Richardson 	}
149599a2dd95SBruce Richardson 
149699a2dd95SBruce Richardson 	return 0;
149799a2dd95SBruce Richardson }
149899a2dd95SBruce Richardson 
149999a2dd95SBruce Richardson static int
150099a2dd95SBruce Richardson rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
150199a2dd95SBruce Richardson {
150299a2dd95SBruce Richardson 	int err;
150399a2dd95SBruce Richardson 	char thread_name[RTE_MAX_THREAD_NAME_LEN];
150499a2dd95SBruce Richardson 
150599a2dd95SBruce Richardson 	if (rx_adapter->intr_ring)
150699a2dd95SBruce Richardson 		return 0;
150799a2dd95SBruce Richardson 
150899a2dd95SBruce Richardson 	rx_adapter->intr_ring = rte_ring_create("intr_ring",
150999a2dd95SBruce Richardson 					RTE_EVENT_ETH_INTR_RING_SIZE,
151099a2dd95SBruce Richardson 					rte_socket_id(), 0);
151199a2dd95SBruce Richardson 	if (!rx_adapter->intr_ring)
151299a2dd95SBruce Richardson 		return -ENOMEM;
151399a2dd95SBruce Richardson 
151499a2dd95SBruce Richardson 	rx_adapter->epoll_events = rte_zmalloc_socket(rx_adapter->mem_name,
151599a2dd95SBruce Richardson 					RTE_EVENT_ETH_INTR_RING_SIZE *
151699a2dd95SBruce Richardson 					sizeof(struct rte_epoll_event),
151799a2dd95SBruce Richardson 					RTE_CACHE_LINE_SIZE,
151899a2dd95SBruce Richardson 					rx_adapter->socket_id);
151999a2dd95SBruce Richardson 	if (!rx_adapter->epoll_events) {
152099a2dd95SBruce Richardson 		err = -ENOMEM;
152199a2dd95SBruce Richardson 		goto error;
152299a2dd95SBruce Richardson 	}
152399a2dd95SBruce Richardson 
152499a2dd95SBruce Richardson 	rte_spinlock_init(&rx_adapter->intr_ring_lock);
152599a2dd95SBruce Richardson 
152699a2dd95SBruce Richardson 	snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
152799a2dd95SBruce Richardson 			"rx-intr-thread-%d", rx_adapter->id);
152899a2dd95SBruce Richardson 
152999a2dd95SBruce Richardson 	err = rte_ctrl_thread_create(&rx_adapter->rx_intr_thread, thread_name,
153099a2dd95SBruce Richardson 				NULL, rxa_intr_thread, rx_adapter);
15310bac9fc7SChengwen Feng 	if (!err)
153299a2dd95SBruce Richardson 		return 0;
153399a2dd95SBruce Richardson 
153499a2dd95SBruce Richardson 	RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d\n", err);
1535f6681ab7SChengwen Feng 	rte_free(rx_adapter->epoll_events);
153699a2dd95SBruce Richardson error:
153799a2dd95SBruce Richardson 	rte_ring_free(rx_adapter->intr_ring);
153899a2dd95SBruce Richardson 	rx_adapter->intr_ring = NULL;
153999a2dd95SBruce Richardson 	rx_adapter->epoll_events = NULL;
154099a2dd95SBruce Richardson 	return err;
154199a2dd95SBruce Richardson }
154299a2dd95SBruce Richardson 
154399a2dd95SBruce Richardson static int
154499a2dd95SBruce Richardson rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
154599a2dd95SBruce Richardson {
154699a2dd95SBruce Richardson 	int err;
154799a2dd95SBruce Richardson 
154899a2dd95SBruce Richardson 	err = pthread_cancel(rx_adapter->rx_intr_thread);
154999a2dd95SBruce Richardson 	if (err)
155099a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n",
155199a2dd95SBruce Richardson 				err);
155299a2dd95SBruce Richardson 
155399a2dd95SBruce Richardson 	err = pthread_join(rx_adapter->rx_intr_thread, NULL);
155499a2dd95SBruce Richardson 	if (err)
155599a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err);
155699a2dd95SBruce Richardson 
155799a2dd95SBruce Richardson 	rte_free(rx_adapter->epoll_events);
155899a2dd95SBruce Richardson 	rte_ring_free(rx_adapter->intr_ring);
155999a2dd95SBruce Richardson 	rx_adapter->intr_ring = NULL;
156099a2dd95SBruce Richardson 	rx_adapter->epoll_events = NULL;
156199a2dd95SBruce Richardson 	return 0;
156299a2dd95SBruce Richardson }
156399a2dd95SBruce Richardson 
156499a2dd95SBruce Richardson static int
156599a2dd95SBruce Richardson rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
156699a2dd95SBruce Richardson {
156799a2dd95SBruce Richardson 	int ret;
156899a2dd95SBruce Richardson 
156999a2dd95SBruce Richardson 	if (rx_adapter->num_rx_intr == 0)
157099a2dd95SBruce Richardson 		return 0;
157199a2dd95SBruce Richardson 
157299a2dd95SBruce Richardson 	ret = rxa_destroy_intr_thread(rx_adapter);
157399a2dd95SBruce Richardson 	if (ret)
157499a2dd95SBruce Richardson 		return ret;
157599a2dd95SBruce Richardson 
157699a2dd95SBruce Richardson 	close(rx_adapter->epd);
157799a2dd95SBruce Richardson 	rx_adapter->epd = INIT_FD;
157899a2dd95SBruce Richardson 
157999a2dd95SBruce Richardson 	return ret;
158099a2dd95SBruce Richardson }
158199a2dd95SBruce Richardson 
158299a2dd95SBruce Richardson static int
158399a2dd95SBruce Richardson rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
158499a2dd95SBruce Richardson 	struct eth_device_info *dev_info,
158599a2dd95SBruce Richardson 	uint16_t rx_queue_id)
158699a2dd95SBruce Richardson {
158799a2dd95SBruce Richardson 	int err;
158899a2dd95SBruce Richardson 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
158999a2dd95SBruce Richardson 	int sintr = rxa_shared_intr(dev_info, rx_queue_id);
159099a2dd95SBruce Richardson 
159199a2dd95SBruce Richardson 	err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
159299a2dd95SBruce Richardson 	if (err) {
159399a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Could not disable interrupt for Rx queue %u",
159499a2dd95SBruce Richardson 			rx_queue_id);
159599a2dd95SBruce Richardson 		return err;
159699a2dd95SBruce Richardson 	}
159799a2dd95SBruce Richardson 
159899a2dd95SBruce Richardson 	err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
159999a2dd95SBruce Richardson 					rx_adapter->epd,
160099a2dd95SBruce Richardson 					RTE_INTR_EVENT_DEL,
160199a2dd95SBruce Richardson 					0);
160299a2dd95SBruce Richardson 	if (err)
160399a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Interrupt event deletion failed %d", err);
160499a2dd95SBruce Richardson 
160599a2dd95SBruce Richardson 	if (sintr)
160699a2dd95SBruce Richardson 		dev_info->rx_queue[rx_queue_id].intr_enabled = 0;
160799a2dd95SBruce Richardson 	else
160899a2dd95SBruce Richardson 		dev_info->shared_intr_enabled = 0;
160999a2dd95SBruce Richardson 	return err;
161099a2dd95SBruce Richardson }
161199a2dd95SBruce Richardson 
161299a2dd95SBruce Richardson static int
161399a2dd95SBruce Richardson rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
161499a2dd95SBruce Richardson 		struct eth_device_info *dev_info,
161599a2dd95SBruce Richardson 		int rx_queue_id)
161699a2dd95SBruce Richardson {
161799a2dd95SBruce Richardson 	int err;
161899a2dd95SBruce Richardson 	int i;
161999a2dd95SBruce Richardson 	int s;
162099a2dd95SBruce Richardson 
162199a2dd95SBruce Richardson 	if (dev_info->nb_rx_intr == 0)
162299a2dd95SBruce Richardson 		return 0;
162399a2dd95SBruce Richardson 
162499a2dd95SBruce Richardson 	err = 0;
162599a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
162699a2dd95SBruce Richardson 		s = dev_info->nb_shared_intr;
162799a2dd95SBruce Richardson 		for (i = 0; i < dev_info->nb_rx_intr; i++) {
162899a2dd95SBruce Richardson 			int sintr;
162999a2dd95SBruce Richardson 			uint16_t q;
163099a2dd95SBruce Richardson 
163199a2dd95SBruce Richardson 			q = dev_info->intr_queue[i];
163299a2dd95SBruce Richardson 			sintr = rxa_shared_intr(dev_info, q);
163399a2dd95SBruce Richardson 			s -= sintr;
163499a2dd95SBruce Richardson 
163599a2dd95SBruce Richardson 			if (!sintr || s == 0) {
163699a2dd95SBruce Richardson 
163799a2dd95SBruce Richardson 				err = rxa_disable_intr(rx_adapter, dev_info,
163899a2dd95SBruce Richardson 						q);
163999a2dd95SBruce Richardson 				if (err)
164099a2dd95SBruce Richardson 					return err;
164199a2dd95SBruce Richardson 				rxa_intr_ring_del_entries(rx_adapter, dev_info,
164299a2dd95SBruce Richardson 							q);
164399a2dd95SBruce Richardson 			}
164499a2dd95SBruce Richardson 		}
164599a2dd95SBruce Richardson 	} else {
164699a2dd95SBruce Richardson 		if (!rxa_intr_queue(dev_info, rx_queue_id))
164799a2dd95SBruce Richardson 			return 0;
164899a2dd95SBruce Richardson 		if (!rxa_shared_intr(dev_info, rx_queue_id) ||
164999a2dd95SBruce Richardson 				dev_info->nb_shared_intr == 1) {
165099a2dd95SBruce Richardson 			err = rxa_disable_intr(rx_adapter, dev_info,
165199a2dd95SBruce Richardson 					rx_queue_id);
165299a2dd95SBruce Richardson 			if (err)
165399a2dd95SBruce Richardson 				return err;
165499a2dd95SBruce Richardson 			rxa_intr_ring_del_entries(rx_adapter, dev_info,
165599a2dd95SBruce Richardson 						rx_queue_id);
165699a2dd95SBruce Richardson 		}
165799a2dd95SBruce Richardson 
165899a2dd95SBruce Richardson 		for (i = 0; i < dev_info->nb_rx_intr; i++) {
165999a2dd95SBruce Richardson 			if (dev_info->intr_queue[i] == rx_queue_id) {
166099a2dd95SBruce Richardson 				for (; i < dev_info->nb_rx_intr - 1; i++)
166199a2dd95SBruce Richardson 					dev_info->intr_queue[i] =
166299a2dd95SBruce Richardson 						dev_info->intr_queue[i + 1];
166399a2dd95SBruce Richardson 				break;
166499a2dd95SBruce Richardson 			}
166599a2dd95SBruce Richardson 		}
166699a2dd95SBruce Richardson 	}
166799a2dd95SBruce Richardson 
166899a2dd95SBruce Richardson 	return err;
166999a2dd95SBruce Richardson }
167099a2dd95SBruce Richardson 
167199a2dd95SBruce Richardson static int
167299a2dd95SBruce Richardson rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
167399a2dd95SBruce Richardson 	struct eth_device_info *dev_info,
167499a2dd95SBruce Richardson 	uint16_t rx_queue_id)
167599a2dd95SBruce Richardson {
167699a2dd95SBruce Richardson 	int err, err1;
167799a2dd95SBruce Richardson 	uint16_t eth_dev_id = dev_info->dev->data->port_id;
167899a2dd95SBruce Richardson 	union queue_data qd;
167999a2dd95SBruce Richardson 	int init_fd;
168099a2dd95SBruce Richardson 	uint16_t *intr_queue;
168199a2dd95SBruce Richardson 	int sintr = rxa_shared_intr(dev_info, rx_queue_id);
168299a2dd95SBruce Richardson 
168399a2dd95SBruce Richardson 	if (rxa_intr_queue(dev_info, rx_queue_id))
168499a2dd95SBruce Richardson 		return 0;
168599a2dd95SBruce Richardson 
168699a2dd95SBruce Richardson 	intr_queue = dev_info->intr_queue;
168799a2dd95SBruce Richardson 	if (dev_info->intr_queue == NULL) {
168899a2dd95SBruce Richardson 		size_t len =
168999a2dd95SBruce Richardson 			dev_info->dev->data->nb_rx_queues * sizeof(uint16_t);
169099a2dd95SBruce Richardson 		dev_info->intr_queue =
169199a2dd95SBruce Richardson 			rte_zmalloc_socket(
169299a2dd95SBruce Richardson 				rx_adapter->mem_name,
169399a2dd95SBruce Richardson 				len,
169499a2dd95SBruce Richardson 				0,
169599a2dd95SBruce Richardson 				rx_adapter->socket_id);
169699a2dd95SBruce Richardson 		if (dev_info->intr_queue == NULL)
169799a2dd95SBruce Richardson 			return -ENOMEM;
169899a2dd95SBruce Richardson 	}
169999a2dd95SBruce Richardson 
170099a2dd95SBruce Richardson 	init_fd = rx_adapter->epd;
170199a2dd95SBruce Richardson 	err = rxa_init_epd(rx_adapter);
170299a2dd95SBruce Richardson 	if (err)
170399a2dd95SBruce Richardson 		goto err_free_queue;
170499a2dd95SBruce Richardson 
170599a2dd95SBruce Richardson 	qd.port = eth_dev_id;
170699a2dd95SBruce Richardson 	qd.queue = rx_queue_id;
170799a2dd95SBruce Richardson 
170899a2dd95SBruce Richardson 	err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
170999a2dd95SBruce Richardson 					rx_adapter->epd,
171099a2dd95SBruce Richardson 					RTE_INTR_EVENT_ADD,
171199a2dd95SBruce Richardson 					qd.ptr);
171299a2dd95SBruce Richardson 	if (err) {
171399a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to add interrupt event for"
171499a2dd95SBruce Richardson 			" Rx Queue %u err %d", rx_queue_id, err);
171599a2dd95SBruce Richardson 		goto err_del_fd;
171699a2dd95SBruce Richardson 	}
171799a2dd95SBruce Richardson 
171899a2dd95SBruce Richardson 	err = rte_eth_dev_rx_intr_enable(eth_dev_id, rx_queue_id);
171999a2dd95SBruce Richardson 	if (err) {
172099a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Could not enable interrupt for"
172199a2dd95SBruce Richardson 				" Rx Queue %u err %d", rx_queue_id, err);
172299a2dd95SBruce Richardson 
172399a2dd95SBruce Richardson 		goto err_del_event;
172499a2dd95SBruce Richardson 	}
172599a2dd95SBruce Richardson 
172699a2dd95SBruce Richardson 	err = rxa_create_intr_thread(rx_adapter);
172799a2dd95SBruce Richardson 	if (!err)  {
172899a2dd95SBruce Richardson 		if (sintr)
172999a2dd95SBruce Richardson 			dev_info->shared_intr_enabled = 1;
173099a2dd95SBruce Richardson 		else
173199a2dd95SBruce Richardson 			dev_info->rx_queue[rx_queue_id].intr_enabled = 1;
173299a2dd95SBruce Richardson 		return 0;
173399a2dd95SBruce Richardson 	}
173499a2dd95SBruce Richardson 
173599a2dd95SBruce Richardson 
173699a2dd95SBruce Richardson 	err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
173799a2dd95SBruce Richardson 	if (err)
173899a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Could not disable interrupt for"
173999a2dd95SBruce Richardson 				" Rx Queue %u err %d", rx_queue_id, err);
174099a2dd95SBruce Richardson err_del_event:
174199a2dd95SBruce Richardson 	err1 = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
174299a2dd95SBruce Richardson 					rx_adapter->epd,
174399a2dd95SBruce Richardson 					RTE_INTR_EVENT_DEL,
174499a2dd95SBruce Richardson 					0);
174599a2dd95SBruce Richardson 	if (err1) {
174699a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Could not delete event for"
174799a2dd95SBruce Richardson 				" Rx Queue %u err %d", rx_queue_id, err1);
174899a2dd95SBruce Richardson 	}
174999a2dd95SBruce Richardson err_del_fd:
175099a2dd95SBruce Richardson 	if (init_fd == INIT_FD) {
175199a2dd95SBruce Richardson 		close(rx_adapter->epd);
175299a2dd95SBruce Richardson 		rx_adapter->epd = -1;
175399a2dd95SBruce Richardson 	}
175499a2dd95SBruce Richardson err_free_queue:
175599a2dd95SBruce Richardson 	if (intr_queue == NULL)
175699a2dd95SBruce Richardson 		rte_free(dev_info->intr_queue);
175799a2dd95SBruce Richardson 
175899a2dd95SBruce Richardson 	return err;
175999a2dd95SBruce Richardson }
176099a2dd95SBruce Richardson 
176199a2dd95SBruce Richardson static int
176299a2dd95SBruce Richardson rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
176399a2dd95SBruce Richardson 	struct eth_device_info *dev_info,
176499a2dd95SBruce Richardson 	int rx_queue_id)
176599a2dd95SBruce Richardson 
176699a2dd95SBruce Richardson {
176799a2dd95SBruce Richardson 	int i, j, err;
176899a2dd95SBruce Richardson 	int si = -1;
176999a2dd95SBruce Richardson 	int shared_done = (dev_info->nb_shared_intr > 0);
177099a2dd95SBruce Richardson 
177199a2dd95SBruce Richardson 	if (rx_queue_id != -1) {
177299a2dd95SBruce Richardson 		if (rxa_shared_intr(dev_info, rx_queue_id) && shared_done)
177399a2dd95SBruce Richardson 			return 0;
177499a2dd95SBruce Richardson 		return rxa_config_intr(rx_adapter, dev_info, rx_queue_id);
177599a2dd95SBruce Richardson 	}
177699a2dd95SBruce Richardson 
177799a2dd95SBruce Richardson 	err = 0;
177899a2dd95SBruce Richardson 	for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) {
177999a2dd95SBruce Richardson 
178099a2dd95SBruce Richardson 		if (rxa_shared_intr(dev_info, i) && shared_done)
178199a2dd95SBruce Richardson 			continue;
178299a2dd95SBruce Richardson 
178399a2dd95SBruce Richardson 		err = rxa_config_intr(rx_adapter, dev_info, i);
178499a2dd95SBruce Richardson 
178599a2dd95SBruce Richardson 		shared_done = err == 0 && rxa_shared_intr(dev_info, i);
178699a2dd95SBruce Richardson 		if (shared_done) {
178799a2dd95SBruce Richardson 			si = i;
178899a2dd95SBruce Richardson 			dev_info->shared_intr_enabled = 1;
178999a2dd95SBruce Richardson 		}
179099a2dd95SBruce Richardson 		if (err)
179199a2dd95SBruce Richardson 			break;
179299a2dd95SBruce Richardson 	}
179399a2dd95SBruce Richardson 
179499a2dd95SBruce Richardson 	if (err == 0)
179599a2dd95SBruce Richardson 		return 0;
179699a2dd95SBruce Richardson 
179799a2dd95SBruce Richardson 	shared_done = (dev_info->nb_shared_intr > 0);
179899a2dd95SBruce Richardson 	for (j = 0; j < i; j++) {
179999a2dd95SBruce Richardson 		if (rxa_intr_queue(dev_info, j))
180099a2dd95SBruce Richardson 			continue;
180199a2dd95SBruce Richardson 		if (rxa_shared_intr(dev_info, j) && si != j)
180299a2dd95SBruce Richardson 			continue;
180399a2dd95SBruce Richardson 		err = rxa_disable_intr(rx_adapter, dev_info, j);
180499a2dd95SBruce Richardson 		if (err)
180599a2dd95SBruce Richardson 			break;
180699a2dd95SBruce Richardson 
180799a2dd95SBruce Richardson 	}
180899a2dd95SBruce Richardson 
180999a2dd95SBruce Richardson 	return err;
181099a2dd95SBruce Richardson }
181199a2dd95SBruce Richardson 
181299a2dd95SBruce Richardson 
181399a2dd95SBruce Richardson static int
181499a2dd95SBruce Richardson rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
181599a2dd95SBruce Richardson {
181699a2dd95SBruce Richardson 	int ret;
181799a2dd95SBruce Richardson 	struct rte_service_spec service;
181899a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_conf rx_adapter_conf;
181999a2dd95SBruce Richardson 
182099a2dd95SBruce Richardson 	if (rx_adapter->service_inited)
182199a2dd95SBruce Richardson 		return 0;
182299a2dd95SBruce Richardson 
182399a2dd95SBruce Richardson 	memset(&service, 0, sizeof(service));
182499a2dd95SBruce Richardson 	snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,
182599a2dd95SBruce Richardson 		"rte_event_eth_rx_adapter_%d", id);
182699a2dd95SBruce Richardson 	service.socket_id = rx_adapter->socket_id;
182799a2dd95SBruce Richardson 	service.callback = rxa_service_func;
182899a2dd95SBruce Richardson 	service.callback_userdata = rx_adapter;
182999a2dd95SBruce Richardson 	/* Service function handles locking for queue add/del updates */
183099a2dd95SBruce Richardson 	service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
183199a2dd95SBruce Richardson 	ret = rte_service_component_register(&service, &rx_adapter->service_id);
183299a2dd95SBruce Richardson 	if (ret) {
183399a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
183499a2dd95SBruce Richardson 			service.name, ret);
183599a2dd95SBruce Richardson 		return ret;
183699a2dd95SBruce Richardson 	}
183799a2dd95SBruce Richardson 
183899a2dd95SBruce Richardson 	ret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id,
183999a2dd95SBruce Richardson 		&rx_adapter_conf, rx_adapter->conf_arg);
184099a2dd95SBruce Richardson 	if (ret) {
184199a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
184299a2dd95SBruce Richardson 			ret);
184399a2dd95SBruce Richardson 		goto err_done;
184499a2dd95SBruce Richardson 	}
184599a2dd95SBruce Richardson 	rx_adapter->event_port_id = rx_adapter_conf.event_port_id;
184699a2dd95SBruce Richardson 	rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;
184799a2dd95SBruce Richardson 	rx_adapter->service_inited = 1;
184899a2dd95SBruce Richardson 	rx_adapter->epd = INIT_FD;
184999a2dd95SBruce Richardson 	return 0;
185099a2dd95SBruce Richardson 
185199a2dd95SBruce Richardson err_done:
185299a2dd95SBruce Richardson 	rte_service_component_unregister(rx_adapter->service_id);
185399a2dd95SBruce Richardson 	return ret;
185499a2dd95SBruce Richardson }
185599a2dd95SBruce Richardson 
185699a2dd95SBruce Richardson static void
185799a2dd95SBruce Richardson rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
185899a2dd95SBruce Richardson 		struct eth_device_info *dev_info,
185999a2dd95SBruce Richardson 		int32_t rx_queue_id,
186099a2dd95SBruce Richardson 		uint8_t add)
186199a2dd95SBruce Richardson {
186299a2dd95SBruce Richardson 	struct eth_rx_queue_info *queue_info;
186399a2dd95SBruce Richardson 	int enabled;
186499a2dd95SBruce Richardson 	uint16_t i;
186599a2dd95SBruce Richardson 
186699a2dd95SBruce Richardson 	if (dev_info->rx_queue == NULL)
186799a2dd95SBruce Richardson 		return;
186899a2dd95SBruce Richardson 
186999a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
187099a2dd95SBruce Richardson 		for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
187199a2dd95SBruce Richardson 			rxa_update_queue(rx_adapter, dev_info, i, add);
187299a2dd95SBruce Richardson 	} else {
187399a2dd95SBruce Richardson 		queue_info = &dev_info->rx_queue[rx_queue_id];
187499a2dd95SBruce Richardson 		enabled = queue_info->queue_enabled;
187599a2dd95SBruce Richardson 		if (add) {
187699a2dd95SBruce Richardson 			rx_adapter->nb_queues += !enabled;
187799a2dd95SBruce Richardson 			dev_info->nb_dev_queues += !enabled;
187899a2dd95SBruce Richardson 		} else {
187999a2dd95SBruce Richardson 			rx_adapter->nb_queues -= enabled;
188099a2dd95SBruce Richardson 			dev_info->nb_dev_queues -= enabled;
188199a2dd95SBruce Richardson 		}
188299a2dd95SBruce Richardson 		queue_info->queue_enabled = !!add;
188399a2dd95SBruce Richardson 	}
188499a2dd95SBruce Richardson }
188599a2dd95SBruce Richardson 
188699a2dd95SBruce Richardson static void
188799a2dd95SBruce Richardson rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count,
188899a2dd95SBruce Richardson 		    uint64_t vector_ns, struct rte_mempool *mp, uint32_t qid,
188999a2dd95SBruce Richardson 		    uint16_t port_id)
189099a2dd95SBruce Richardson {
189199a2dd95SBruce Richardson #define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9)
189299a2dd95SBruce Richardson 	struct eth_rx_vector_data *vector_data;
189399a2dd95SBruce Richardson 	uint32_t flow_id;
189499a2dd95SBruce Richardson 
189599a2dd95SBruce Richardson 	vector_data = &queue_info->vector_data;
189699a2dd95SBruce Richardson 	vector_data->max_vector_count = vector_count;
189799a2dd95SBruce Richardson 	vector_data->port = port_id;
189899a2dd95SBruce Richardson 	vector_data->queue = qid;
189999a2dd95SBruce Richardson 	vector_data->vector_pool = mp;
190099a2dd95SBruce Richardson 	vector_data->vector_timeout_ticks =
190199a2dd95SBruce Richardson 		NSEC2TICK(vector_ns, rte_get_timer_hz());
190299a2dd95SBruce Richardson 	vector_data->ts = 0;
190399a2dd95SBruce Richardson 	flow_id = queue_info->event & 0xFFFFF;
190499a2dd95SBruce Richardson 	flow_id =
190599a2dd95SBruce Richardson 		flow_id == 0 ? (qid & 0xFFF) | (port_id & 0xFF) << 12 : flow_id;
190699a2dd95SBruce Richardson 	vector_data->event = (queue_info->event & ~0xFFFFF) | flow_id;
190799a2dd95SBruce Richardson }
190899a2dd95SBruce Richardson 
190999a2dd95SBruce Richardson static void
191099a2dd95SBruce Richardson rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
191199a2dd95SBruce Richardson 	struct eth_device_info *dev_info,
191299a2dd95SBruce Richardson 	int32_t rx_queue_id)
191399a2dd95SBruce Richardson {
191499a2dd95SBruce Richardson 	struct eth_rx_vector_data *vec;
191599a2dd95SBruce Richardson 	int pollq;
191699a2dd95SBruce Richardson 	int intrq;
191799a2dd95SBruce Richardson 	int sintrq;
191899a2dd95SBruce Richardson 
191999a2dd95SBruce Richardson 
192099a2dd95SBruce Richardson 	if (rx_adapter->nb_queues == 0)
192199a2dd95SBruce Richardson 		return;
192299a2dd95SBruce Richardson 
192399a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
192499a2dd95SBruce Richardson 		uint16_t nb_rx_queues;
192599a2dd95SBruce Richardson 		uint16_t i;
192699a2dd95SBruce Richardson 
192799a2dd95SBruce Richardson 		nb_rx_queues = dev_info->dev->data->nb_rx_queues;
192899a2dd95SBruce Richardson 		for (i = 0; i <	nb_rx_queues; i++)
192999a2dd95SBruce Richardson 			rxa_sw_del(rx_adapter, dev_info, i);
193099a2dd95SBruce Richardson 		return;
193199a2dd95SBruce Richardson 	}
193299a2dd95SBruce Richardson 
193399a2dd95SBruce Richardson 	/* Push all the partial event vectors to event device. */
193499a2dd95SBruce Richardson 	TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) {
193599a2dd95SBruce Richardson 		if (vec->queue != rx_queue_id)
193699a2dd95SBruce Richardson 			continue;
193799a2dd95SBruce Richardson 		rxa_vector_expire(vec, rx_adapter);
193899a2dd95SBruce Richardson 		TAILQ_REMOVE(&rx_adapter->vector_list, vec, next);
193999a2dd95SBruce Richardson 	}
194099a2dd95SBruce Richardson 
194199a2dd95SBruce Richardson 	pollq = rxa_polled_queue(dev_info, rx_queue_id);
194299a2dd95SBruce Richardson 	intrq = rxa_intr_queue(dev_info, rx_queue_id);
194399a2dd95SBruce Richardson 	sintrq = rxa_shared_intr(dev_info, rx_queue_id);
194499a2dd95SBruce Richardson 	rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 0);
194599a2dd95SBruce Richardson 	rx_adapter->num_rx_polled -= pollq;
194699a2dd95SBruce Richardson 	dev_info->nb_rx_poll -= pollq;
194799a2dd95SBruce Richardson 	rx_adapter->num_rx_intr -= intrq;
194899a2dd95SBruce Richardson 	dev_info->nb_rx_intr -= intrq;
194999a2dd95SBruce Richardson 	dev_info->nb_shared_intr -= intrq && sintrq;
195099a2dd95SBruce Richardson }
195199a2dd95SBruce Richardson 
195299a2dd95SBruce Richardson static void
195399a2dd95SBruce Richardson rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
195499a2dd95SBruce Richardson 	struct eth_device_info *dev_info,
195599a2dd95SBruce Richardson 	int32_t rx_queue_id,
195699a2dd95SBruce Richardson 	const struct rte_event_eth_rx_adapter_queue_conf *conf)
195799a2dd95SBruce Richardson {
195899a2dd95SBruce Richardson 	struct eth_rx_queue_info *queue_info;
195999a2dd95SBruce Richardson 	const struct rte_event *ev = &conf->ev;
196099a2dd95SBruce Richardson 	int pollq;
196199a2dd95SBruce Richardson 	int intrq;
196299a2dd95SBruce Richardson 	int sintrq;
196399a2dd95SBruce Richardson 	struct rte_event *qi_ev;
196499a2dd95SBruce Richardson 
196599a2dd95SBruce Richardson 	if (rx_queue_id == -1) {
196699a2dd95SBruce Richardson 		uint16_t nb_rx_queues;
196799a2dd95SBruce Richardson 		uint16_t i;
196899a2dd95SBruce Richardson 
196999a2dd95SBruce Richardson 		nb_rx_queues = dev_info->dev->data->nb_rx_queues;
197099a2dd95SBruce Richardson 		for (i = 0; i <	nb_rx_queues; i++)
197199a2dd95SBruce Richardson 			rxa_add_queue(rx_adapter, dev_info, i, conf);
197299a2dd95SBruce Richardson 		return;
197399a2dd95SBruce Richardson 	}
197499a2dd95SBruce Richardson 
197599a2dd95SBruce Richardson 	pollq = rxa_polled_queue(dev_info, rx_queue_id);
197699a2dd95SBruce Richardson 	intrq = rxa_intr_queue(dev_info, rx_queue_id);
197799a2dd95SBruce Richardson 	sintrq = rxa_shared_intr(dev_info, rx_queue_id);
197899a2dd95SBruce Richardson 
197999a2dd95SBruce Richardson 	queue_info = &dev_info->rx_queue[rx_queue_id];
198099a2dd95SBruce Richardson 	queue_info->wt = conf->servicing_weight;
198199a2dd95SBruce Richardson 
198299a2dd95SBruce Richardson 	qi_ev = (struct rte_event *)&queue_info->event;
198399a2dd95SBruce Richardson 	qi_ev->event = ev->event;
198499a2dd95SBruce Richardson 	qi_ev->op = RTE_EVENT_OP_NEW;
198599a2dd95SBruce Richardson 	qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;
198699a2dd95SBruce Richardson 	qi_ev->sub_event_type = 0;
198799a2dd95SBruce Richardson 
198899a2dd95SBruce Richardson 	if (conf->rx_queue_flags &
198999a2dd95SBruce Richardson 			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {
199099a2dd95SBruce Richardson 		queue_info->flow_id_mask = ~0;
199199a2dd95SBruce Richardson 	} else
199299a2dd95SBruce Richardson 		qi_ev->flow_id = 0;
199399a2dd95SBruce Richardson 
1994929ebdd5SPavan Nikhilesh 	if (conf->rx_queue_flags &
1995929ebdd5SPavan Nikhilesh 	    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
1996929ebdd5SPavan Nikhilesh 		queue_info->ena_vector = 1;
1997929ebdd5SPavan Nikhilesh 		qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR;
1998929ebdd5SPavan Nikhilesh 		rxa_set_vector_data(queue_info, conf->vector_sz,
1999929ebdd5SPavan Nikhilesh 				    conf->vector_timeout_ns, conf->vector_mp,
2000929ebdd5SPavan Nikhilesh 				    rx_queue_id, dev_info->dev->data->port_id);
2001929ebdd5SPavan Nikhilesh 		rx_adapter->ena_vector = 1;
2002929ebdd5SPavan Nikhilesh 		rx_adapter->vector_tmo_ticks =
2003929ebdd5SPavan Nikhilesh 			rx_adapter->vector_tmo_ticks ?
2004929ebdd5SPavan Nikhilesh 				      RTE_MIN(queue_info->vector_data
2005929ebdd5SPavan Nikhilesh 							.vector_timeout_ticks >>
2006929ebdd5SPavan Nikhilesh 						1,
2007929ebdd5SPavan Nikhilesh 					rx_adapter->vector_tmo_ticks) :
2008929ebdd5SPavan Nikhilesh 				queue_info->vector_data.vector_timeout_ticks >>
2009929ebdd5SPavan Nikhilesh 					1;
2010929ebdd5SPavan Nikhilesh 	}
2011929ebdd5SPavan Nikhilesh 
201299a2dd95SBruce Richardson 	rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
201399a2dd95SBruce Richardson 	if (rxa_polled_queue(dev_info, rx_queue_id)) {
201499a2dd95SBruce Richardson 		rx_adapter->num_rx_polled += !pollq;
201599a2dd95SBruce Richardson 		dev_info->nb_rx_poll += !pollq;
201699a2dd95SBruce Richardson 		rx_adapter->num_rx_intr -= intrq;
201799a2dd95SBruce Richardson 		dev_info->nb_rx_intr -= intrq;
201899a2dd95SBruce Richardson 		dev_info->nb_shared_intr -= intrq && sintrq;
201999a2dd95SBruce Richardson 	}
202099a2dd95SBruce Richardson 
202199a2dd95SBruce Richardson 	if (rxa_intr_queue(dev_info, rx_queue_id)) {
202299a2dd95SBruce Richardson 		rx_adapter->num_rx_polled -= pollq;
202399a2dd95SBruce Richardson 		dev_info->nb_rx_poll -= pollq;
202499a2dd95SBruce Richardson 		rx_adapter->num_rx_intr += !intrq;
202599a2dd95SBruce Richardson 		dev_info->nb_rx_intr += !intrq;
202699a2dd95SBruce Richardson 		dev_info->nb_shared_intr += !intrq && sintrq;
202799a2dd95SBruce Richardson 		if (dev_info->nb_shared_intr == 1) {
202899a2dd95SBruce Richardson 			if (dev_info->multi_intr_cap)
202999a2dd95SBruce Richardson 				dev_info->next_q_idx =
203099a2dd95SBruce Richardson 					RTE_MAX_RXTX_INTR_VEC_ID - 1;
203199a2dd95SBruce Richardson 			else
203299a2dd95SBruce Richardson 				dev_info->next_q_idx = 0;
203399a2dd95SBruce Richardson 		}
203499a2dd95SBruce Richardson 	}
203599a2dd95SBruce Richardson }
203699a2dd95SBruce Richardson 
203799a2dd95SBruce Richardson static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
203899a2dd95SBruce Richardson 		uint16_t eth_dev_id,
203999a2dd95SBruce Richardson 		int rx_queue_id,
204099a2dd95SBruce Richardson 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
204199a2dd95SBruce Richardson {
204299a2dd95SBruce Richardson 	struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
204399a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_queue_conf temp_conf;
204499a2dd95SBruce Richardson 	int ret;
204599a2dd95SBruce Richardson 	struct eth_rx_poll_entry *rx_poll;
204699a2dd95SBruce Richardson 	struct eth_rx_queue_info *rx_queue;
204799a2dd95SBruce Richardson 	uint32_t *rx_wrr;
204899a2dd95SBruce Richardson 	uint16_t nb_rx_queues;
204999a2dd95SBruce Richardson 	uint32_t nb_rx_poll, nb_wrr;
205099a2dd95SBruce Richardson 	uint32_t nb_rx_intr;
205199a2dd95SBruce Richardson 	int num_intr_vec;
205299a2dd95SBruce Richardson 	uint16_t wt;
205399a2dd95SBruce Richardson 
205499a2dd95SBruce Richardson 	if (queue_conf->servicing_weight == 0) {
205599a2dd95SBruce Richardson 		struct rte_eth_dev_data *data = dev_info->dev->data;
205699a2dd95SBruce Richardson 
205799a2dd95SBruce Richardson 		temp_conf = *queue_conf;
205899a2dd95SBruce Richardson 		if (!data->dev_conf.intr_conf.rxq) {
205999a2dd95SBruce Richardson 			/* If Rx interrupts are disabled set wt = 1 */
206099a2dd95SBruce Richardson 			temp_conf.servicing_weight = 1;
206199a2dd95SBruce Richardson 		}
206299a2dd95SBruce Richardson 		queue_conf = &temp_conf;
206399a2dd95SBruce Richardson 	}
206499a2dd95SBruce Richardson 
206599a2dd95SBruce Richardson 	nb_rx_queues = dev_info->dev->data->nb_rx_queues;
206699a2dd95SBruce Richardson 	rx_queue = dev_info->rx_queue;
206799a2dd95SBruce Richardson 	wt = queue_conf->servicing_weight;
206899a2dd95SBruce Richardson 
206999a2dd95SBruce Richardson 	if (dev_info->rx_queue == NULL) {
207099a2dd95SBruce Richardson 		dev_info->rx_queue =
207199a2dd95SBruce Richardson 		    rte_zmalloc_socket(rx_adapter->mem_name,
207299a2dd95SBruce Richardson 				       nb_rx_queues *
207399a2dd95SBruce Richardson 				       sizeof(struct eth_rx_queue_info), 0,
207499a2dd95SBruce Richardson 				       rx_adapter->socket_id);
207599a2dd95SBruce Richardson 		if (dev_info->rx_queue == NULL)
207699a2dd95SBruce Richardson 			return -ENOMEM;
207799a2dd95SBruce Richardson 	}
207899a2dd95SBruce Richardson 	rx_wrr = NULL;
207999a2dd95SBruce Richardson 	rx_poll = NULL;
208099a2dd95SBruce Richardson 
208199a2dd95SBruce Richardson 	rxa_calc_nb_post_add(rx_adapter, dev_info, rx_queue_id,
208299a2dd95SBruce Richardson 			queue_conf->servicing_weight,
208399a2dd95SBruce Richardson 			&nb_rx_poll, &nb_rx_intr, &nb_wrr);
208499a2dd95SBruce Richardson 
208599a2dd95SBruce Richardson 	if (dev_info->dev->intr_handle)
208699a2dd95SBruce Richardson 		dev_info->multi_intr_cap =
208799a2dd95SBruce Richardson 			rte_intr_cap_multiple(dev_info->dev->intr_handle);
208899a2dd95SBruce Richardson 
208999a2dd95SBruce Richardson 	ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
209099a2dd95SBruce Richardson 				&rx_poll, &rx_wrr);
209199a2dd95SBruce Richardson 	if (ret)
209299a2dd95SBruce Richardson 		goto err_free_rxqueue;
209399a2dd95SBruce Richardson 
209499a2dd95SBruce Richardson 	if (wt == 0) {
209599a2dd95SBruce Richardson 		num_intr_vec = rxa_nb_intr_vect(dev_info, rx_queue_id, 1);
209699a2dd95SBruce Richardson 
209799a2dd95SBruce Richardson 		ret = rxa_intr_ring_check_avail(rx_adapter, num_intr_vec);
209899a2dd95SBruce Richardson 		if (ret)
209999a2dd95SBruce Richardson 			goto err_free_rxqueue;
210099a2dd95SBruce Richardson 
210199a2dd95SBruce Richardson 		ret = rxa_add_intr_queue(rx_adapter, dev_info, rx_queue_id);
210299a2dd95SBruce Richardson 		if (ret)
210399a2dd95SBruce Richardson 			goto err_free_rxqueue;
210499a2dd95SBruce Richardson 	} else {
210599a2dd95SBruce Richardson 
210699a2dd95SBruce Richardson 		num_intr_vec = 0;
210799a2dd95SBruce Richardson 		if (rx_adapter->num_rx_intr > nb_rx_intr) {
210899a2dd95SBruce Richardson 			num_intr_vec = rxa_nb_intr_vect(dev_info,
210999a2dd95SBruce Richardson 						rx_queue_id, 0);
211099a2dd95SBruce Richardson 			/* interrupt based queues are being converted to
211199a2dd95SBruce Richardson 			 * poll mode queues, delete the interrupt configuration
211299a2dd95SBruce Richardson 			 * for those.
211399a2dd95SBruce Richardson 			 */
211499a2dd95SBruce Richardson 			ret = rxa_del_intr_queue(rx_adapter,
211599a2dd95SBruce Richardson 						dev_info, rx_queue_id);
211699a2dd95SBruce Richardson 			if (ret)
211799a2dd95SBruce Richardson 				goto err_free_rxqueue;
211899a2dd95SBruce Richardson 		}
211999a2dd95SBruce Richardson 	}
212099a2dd95SBruce Richardson 
212199a2dd95SBruce Richardson 	if (nb_rx_intr == 0) {
212299a2dd95SBruce Richardson 		ret = rxa_free_intr_resources(rx_adapter);
212399a2dd95SBruce Richardson 		if (ret)
212499a2dd95SBruce Richardson 			goto err_free_rxqueue;
212599a2dd95SBruce Richardson 	}
212699a2dd95SBruce Richardson 
212799a2dd95SBruce Richardson 	if (wt == 0) {
212899a2dd95SBruce Richardson 		uint16_t i;
212999a2dd95SBruce Richardson 
213099a2dd95SBruce Richardson 		if (rx_queue_id  == -1) {
213199a2dd95SBruce Richardson 			for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
213299a2dd95SBruce Richardson 				dev_info->intr_queue[i] = i;
213399a2dd95SBruce Richardson 		} else {
213499a2dd95SBruce Richardson 			if (!rxa_intr_queue(dev_info, rx_queue_id))
213599a2dd95SBruce Richardson 				dev_info->intr_queue[nb_rx_intr - 1] =
213699a2dd95SBruce Richardson 					rx_queue_id;
213799a2dd95SBruce Richardson 		}
213899a2dd95SBruce Richardson 	}
213999a2dd95SBruce Richardson 
214099a2dd95SBruce Richardson 
214199a2dd95SBruce Richardson 
214299a2dd95SBruce Richardson 	rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
214399a2dd95SBruce Richardson 	rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
214499a2dd95SBruce Richardson 
214599a2dd95SBruce Richardson 	rte_free(rx_adapter->eth_rx_poll);
214699a2dd95SBruce Richardson 	rte_free(rx_adapter->wrr_sched);
214799a2dd95SBruce Richardson 
214899a2dd95SBruce Richardson 	rx_adapter->eth_rx_poll = rx_poll;
214999a2dd95SBruce Richardson 	rx_adapter->wrr_sched = rx_wrr;
215099a2dd95SBruce Richardson 	rx_adapter->wrr_len = nb_wrr;
215199a2dd95SBruce Richardson 	rx_adapter->num_intr_vec += num_intr_vec;
215299a2dd95SBruce Richardson 	return 0;
215399a2dd95SBruce Richardson 
215499a2dd95SBruce Richardson err_free_rxqueue:
215599a2dd95SBruce Richardson 	if (rx_queue == NULL) {
215699a2dd95SBruce Richardson 		rte_free(dev_info->rx_queue);
215799a2dd95SBruce Richardson 		dev_info->rx_queue = NULL;
215899a2dd95SBruce Richardson 	}
215999a2dd95SBruce Richardson 
216099a2dd95SBruce Richardson 	rte_free(rx_poll);
216199a2dd95SBruce Richardson 	rte_free(rx_wrr);
216299a2dd95SBruce Richardson 
216399a2dd95SBruce Richardson 	return 0;
216499a2dd95SBruce Richardson }
216599a2dd95SBruce Richardson 
216699a2dd95SBruce Richardson static int
216799a2dd95SBruce Richardson rxa_ctrl(uint8_t id, int start)
216899a2dd95SBruce Richardson {
216999a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter *rx_adapter;
217099a2dd95SBruce Richardson 	struct rte_eventdev *dev;
217199a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
217299a2dd95SBruce Richardson 	uint32_t i;
217399a2dd95SBruce Richardson 	int use_service = 0;
217499a2dd95SBruce Richardson 	int stop = !start;
217599a2dd95SBruce Richardson 
217699a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
217799a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
217899a2dd95SBruce Richardson 	if (rx_adapter == NULL)
217999a2dd95SBruce Richardson 		return -EINVAL;
218099a2dd95SBruce Richardson 
218199a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
218299a2dd95SBruce Richardson 
218399a2dd95SBruce Richardson 	RTE_ETH_FOREACH_DEV(i) {
218499a2dd95SBruce Richardson 		dev_info = &rx_adapter->eth_devices[i];
218599a2dd95SBruce Richardson 		/* if start  check for num dev queues */
218699a2dd95SBruce Richardson 		if (start && !dev_info->nb_dev_queues)
218799a2dd95SBruce Richardson 			continue;
218899a2dd95SBruce Richardson 		/* if stop check if dev has been started */
218999a2dd95SBruce Richardson 		if (stop && !dev_info->dev_rx_started)
219099a2dd95SBruce Richardson 			continue;
219199a2dd95SBruce Richardson 		use_service |= !dev_info->internal_event_port;
219299a2dd95SBruce Richardson 		dev_info->dev_rx_started = start;
219399a2dd95SBruce Richardson 		if (dev_info->internal_event_port == 0)
219499a2dd95SBruce Richardson 			continue;
219599a2dd95SBruce Richardson 		start ? (*dev->dev_ops->eth_rx_adapter_start)(dev,
219699a2dd95SBruce Richardson 						&rte_eth_devices[i]) :
219799a2dd95SBruce Richardson 			(*dev->dev_ops->eth_rx_adapter_stop)(dev,
219899a2dd95SBruce Richardson 						&rte_eth_devices[i]);
219999a2dd95SBruce Richardson 	}
220099a2dd95SBruce Richardson 
220199a2dd95SBruce Richardson 	if (use_service) {
220299a2dd95SBruce Richardson 		rte_spinlock_lock(&rx_adapter->rx_lock);
220399a2dd95SBruce Richardson 		rx_adapter->rxa_started = start;
220499a2dd95SBruce Richardson 		rte_service_runstate_set(rx_adapter->service_id, start);
220599a2dd95SBruce Richardson 		rte_spinlock_unlock(&rx_adapter->rx_lock);
220699a2dd95SBruce Richardson 	}
220799a2dd95SBruce Richardson 
220899a2dd95SBruce Richardson 	return 0;
220999a2dd95SBruce Richardson }
221099a2dd95SBruce Richardson 
2211*bc0df25cSNaga Harish K S V static int
2212*bc0df25cSNaga Harish K S V rxa_create(uint8_t id, uint8_t dev_id,
2213*bc0df25cSNaga Harish K S V 	   struct rte_event_eth_rx_adapter_params *rxa_params,
221499a2dd95SBruce Richardson 	   rte_event_eth_rx_adapter_conf_cb conf_cb,
221599a2dd95SBruce Richardson 	   void *conf_arg)
221699a2dd95SBruce Richardson {
221799a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter *rx_adapter;
2218*bc0df25cSNaga Harish K S V 	struct rte_eth_event_enqueue_buffer *buf;
2219*bc0df25cSNaga Harish K S V 	struct rte_event *events;
222099a2dd95SBruce Richardson 	int ret;
222199a2dd95SBruce Richardson 	int socket_id;
222299a2dd95SBruce Richardson 	uint16_t i;
222399a2dd95SBruce Richardson 	char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
222499a2dd95SBruce Richardson 	const uint8_t default_rss_key[] = {
222599a2dd95SBruce Richardson 		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
222699a2dd95SBruce Richardson 		0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
222799a2dd95SBruce Richardson 		0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
222899a2dd95SBruce Richardson 		0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
222999a2dd95SBruce Richardson 		0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
223099a2dd95SBruce Richardson 	};
223199a2dd95SBruce Richardson 
223299a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
223399a2dd95SBruce Richardson 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
2234*bc0df25cSNaga Harish K S V 
223599a2dd95SBruce Richardson 	if (conf_cb == NULL)
223699a2dd95SBruce Richardson 		return -EINVAL;
223799a2dd95SBruce Richardson 
223899a2dd95SBruce Richardson 	if (event_eth_rx_adapter == NULL) {
223999a2dd95SBruce Richardson 		ret = rte_event_eth_rx_adapter_init();
224099a2dd95SBruce Richardson 		if (ret)
224199a2dd95SBruce Richardson 			return ret;
224299a2dd95SBruce Richardson 	}
224399a2dd95SBruce Richardson 
224499a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
224599a2dd95SBruce Richardson 	if (rx_adapter != NULL) {
224699a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
224799a2dd95SBruce Richardson 		return -EEXIST;
224899a2dd95SBruce Richardson 	}
224999a2dd95SBruce Richardson 
225099a2dd95SBruce Richardson 	socket_id = rte_event_dev_socket_id(dev_id);
225199a2dd95SBruce Richardson 	snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,
225299a2dd95SBruce Richardson 		"rte_event_eth_rx_adapter_%d",
225399a2dd95SBruce Richardson 		id);
225499a2dd95SBruce Richardson 
225599a2dd95SBruce Richardson 	rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),
225699a2dd95SBruce Richardson 			RTE_CACHE_LINE_SIZE, socket_id);
225799a2dd95SBruce Richardson 	if (rx_adapter == NULL) {
225899a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to get mem for rx adapter");
225999a2dd95SBruce Richardson 		return -ENOMEM;
226099a2dd95SBruce Richardson 	}
226199a2dd95SBruce Richardson 
226299a2dd95SBruce Richardson 	rx_adapter->eventdev_id = dev_id;
226399a2dd95SBruce Richardson 	rx_adapter->socket_id = socket_id;
226499a2dd95SBruce Richardson 	rx_adapter->conf_cb = conf_cb;
226599a2dd95SBruce Richardson 	rx_adapter->conf_arg = conf_arg;
226699a2dd95SBruce Richardson 	rx_adapter->id = id;
226799a2dd95SBruce Richardson 	TAILQ_INIT(&rx_adapter->vector_list);
226899a2dd95SBruce Richardson 	strcpy(rx_adapter->mem_name, mem_name);
226999a2dd95SBruce Richardson 	rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
227099a2dd95SBruce Richardson 					RTE_MAX_ETHPORTS *
227199a2dd95SBruce Richardson 					sizeof(struct eth_device_info), 0,
227299a2dd95SBruce Richardson 					socket_id);
227399a2dd95SBruce Richardson 	rte_convert_rss_key((const uint32_t *)default_rss_key,
227499a2dd95SBruce Richardson 			(uint32_t *)rx_adapter->rss_key_be,
227599a2dd95SBruce Richardson 			    RTE_DIM(default_rss_key));
227699a2dd95SBruce Richardson 
227799a2dd95SBruce Richardson 	if (rx_adapter->eth_devices == NULL) {
227899a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n");
227999a2dd95SBruce Richardson 		rte_free(rx_adapter);
228099a2dd95SBruce Richardson 		return -ENOMEM;
228199a2dd95SBruce Richardson 	}
2282*bc0df25cSNaga Harish K S V 
228399a2dd95SBruce Richardson 	rte_spinlock_init(&rx_adapter->rx_lock);
2284*bc0df25cSNaga Harish K S V 
228599a2dd95SBruce Richardson 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
228699a2dd95SBruce Richardson 		rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
228799a2dd95SBruce Richardson 
2288*bc0df25cSNaga Harish K S V 	/* Rx adapter event buffer allocation */
2289*bc0df25cSNaga Harish K S V 	buf = &rx_adapter->event_enqueue_buffer;
2290*bc0df25cSNaga Harish K S V 	buf->events_size = rxa_params->event_buf_size;
2291*bc0df25cSNaga Harish K S V 
2292*bc0df25cSNaga Harish K S V 	events = rte_zmalloc_socket(rx_adapter->mem_name,
2293*bc0df25cSNaga Harish K S V 				    buf->events_size * sizeof(*events),
2294*bc0df25cSNaga Harish K S V 				    0, socket_id);
2295*bc0df25cSNaga Harish K S V 	if (events == NULL) {
2296*bc0df25cSNaga Harish K S V 		RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n");
2297*bc0df25cSNaga Harish K S V 		rte_free(rx_adapter->eth_devices);
2298*bc0df25cSNaga Harish K S V 		rte_free(rx_adapter);
2299*bc0df25cSNaga Harish K S V 		return -ENOMEM;
2300*bc0df25cSNaga Harish K S V 	}
2301*bc0df25cSNaga Harish K S V 
2302*bc0df25cSNaga Harish K S V 	rx_adapter->event_enqueue_buffer.events = events;
2303*bc0df25cSNaga Harish K S V 
230499a2dd95SBruce Richardson 	event_eth_rx_adapter[id] = rx_adapter;
2305*bc0df25cSNaga Harish K S V 
230699a2dd95SBruce Richardson 	if (conf_cb == rxa_default_conf_cb)
230799a2dd95SBruce Richardson 		rx_adapter->default_cb_arg = 1;
230883ab470dSGanapati Kundapura 
230983ab470dSGanapati Kundapura 	if (rte_mbuf_dyn_rx_timestamp_register(
231083ab470dSGanapati Kundapura 			&event_eth_rx_timestamp_dynfield_offset,
231183ab470dSGanapati Kundapura 			&event_eth_rx_timestamp_dynflag) != 0) {
231283ab470dSGanapati Kundapura 		RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n");
231383ab470dSGanapati Kundapura 		return -rte_errno;
231483ab470dSGanapati Kundapura 	}
231583ab470dSGanapati Kundapura 
231699a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb,
231799a2dd95SBruce Richardson 		conf_arg);
231899a2dd95SBruce Richardson 	return 0;
231999a2dd95SBruce Richardson }
232099a2dd95SBruce Richardson 
232199a2dd95SBruce Richardson int
2322*bc0df25cSNaga Harish K S V rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
2323*bc0df25cSNaga Harish K S V 				rte_event_eth_rx_adapter_conf_cb conf_cb,
2324*bc0df25cSNaga Harish K S V 				void *conf_arg)
2325*bc0df25cSNaga Harish K S V {
2326*bc0df25cSNaga Harish K S V 	struct rte_event_eth_rx_adapter_params rxa_params = {0};
2327*bc0df25cSNaga Harish K S V 
2328*bc0df25cSNaga Harish K S V 	/* use default values for adapter params */
2329*bc0df25cSNaga Harish K S V 	rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE;
2330*bc0df25cSNaga Harish K S V 
2331*bc0df25cSNaga Harish K S V 	return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg);
2332*bc0df25cSNaga Harish K S V }
2333*bc0df25cSNaga Harish K S V 
2334*bc0df25cSNaga Harish K S V int
2335*bc0df25cSNaga Harish K S V rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id,
2336*bc0df25cSNaga Harish K S V 			struct rte_event_port_conf *port_config,
2337*bc0df25cSNaga Harish K S V 			struct rte_event_eth_rx_adapter_params *rxa_params)
2338*bc0df25cSNaga Harish K S V {
2339*bc0df25cSNaga Harish K S V 	struct rte_event_port_conf *pc;
2340*bc0df25cSNaga Harish K S V 	int ret;
2341*bc0df25cSNaga Harish K S V 	struct rte_event_eth_rx_adapter_params temp_params = {0};
2342*bc0df25cSNaga Harish K S V 
2343*bc0df25cSNaga Harish K S V 	if (port_config == NULL)
2344*bc0df25cSNaga Harish K S V 		return -EINVAL;
2345*bc0df25cSNaga Harish K S V 
2346*bc0df25cSNaga Harish K S V 	/* use default values if rxa_params is NULL */
2347*bc0df25cSNaga Harish K S V 	if (rxa_params == NULL) {
2348*bc0df25cSNaga Harish K S V 		rxa_params = &temp_params;
2349*bc0df25cSNaga Harish K S V 		rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE;
2350*bc0df25cSNaga Harish K S V 	}
2351*bc0df25cSNaga Harish K S V 
2352*bc0df25cSNaga Harish K S V 	if (rxa_params->event_buf_size == 0)
2353*bc0df25cSNaga Harish K S V 		return -EINVAL;
2354*bc0df25cSNaga Harish K S V 
2355*bc0df25cSNaga Harish K S V 	pc = rte_malloc(NULL, sizeof(*pc), 0);
2356*bc0df25cSNaga Harish K S V 	if (pc == NULL)
2357*bc0df25cSNaga Harish K S V 		return -ENOMEM;
2358*bc0df25cSNaga Harish K S V 
2359*bc0df25cSNaga Harish K S V 	*pc = *port_config;
2360*bc0df25cSNaga Harish K S V 
2361*bc0df25cSNaga Harish K S V 	/* adjust event buff size with BATCH_SIZE used for fetching packets
2362*bc0df25cSNaga Harish K S V 	 * from NIC rx queues to get full buffer utilization and prevent
2363*bc0df25cSNaga Harish K S V 	 * unnecessary rollovers.
2364*bc0df25cSNaga Harish K S V 	 */
2365*bc0df25cSNaga Harish K S V 	rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size,
2366*bc0df25cSNaga Harish K S V 					       BATCH_SIZE);
2367*bc0df25cSNaga Harish K S V 	rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE);
2368*bc0df25cSNaga Harish K S V 
2369*bc0df25cSNaga Harish K S V 	ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc);
2370*bc0df25cSNaga Harish K S V 	if (ret)
2371*bc0df25cSNaga Harish K S V 		rte_free(pc);
2372*bc0df25cSNaga Harish K S V 
2373*bc0df25cSNaga Harish K S V 	return ret;
2374*bc0df25cSNaga Harish K S V }
2375*bc0df25cSNaga Harish K S V 
2376*bc0df25cSNaga Harish K S V int
237799a2dd95SBruce Richardson rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
237899a2dd95SBruce Richardson 		struct rte_event_port_conf *port_config)
237999a2dd95SBruce Richardson {
238099a2dd95SBruce Richardson 	struct rte_event_port_conf *pc;
238199a2dd95SBruce Richardson 	int ret;
238299a2dd95SBruce Richardson 
238399a2dd95SBruce Richardson 	if (port_config == NULL)
238499a2dd95SBruce Richardson 		return -EINVAL;
2385*bc0df25cSNaga Harish K S V 
238699a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
238799a2dd95SBruce Richardson 
238899a2dd95SBruce Richardson 	pc = rte_malloc(NULL, sizeof(*pc), 0);
238999a2dd95SBruce Richardson 	if (pc == NULL)
239099a2dd95SBruce Richardson 		return -ENOMEM;
239199a2dd95SBruce Richardson 	*pc = *port_config;
2392*bc0df25cSNaga Harish K S V 
239399a2dd95SBruce Richardson 	ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
239499a2dd95SBruce Richardson 					rxa_default_conf_cb,
239599a2dd95SBruce Richardson 					pc);
239699a2dd95SBruce Richardson 	if (ret)
239799a2dd95SBruce Richardson 		rte_free(pc);
239899a2dd95SBruce Richardson 	return ret;
239999a2dd95SBruce Richardson }
240099a2dd95SBruce Richardson 
240199a2dd95SBruce Richardson int
240299a2dd95SBruce Richardson rte_event_eth_rx_adapter_free(uint8_t id)
240399a2dd95SBruce Richardson {
240499a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter *rx_adapter;
240599a2dd95SBruce Richardson 
240699a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
240799a2dd95SBruce Richardson 
240899a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
240999a2dd95SBruce Richardson 	if (rx_adapter == NULL)
241099a2dd95SBruce Richardson 		return -EINVAL;
241199a2dd95SBruce Richardson 
241299a2dd95SBruce Richardson 	if (rx_adapter->nb_queues) {
241399a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("%" PRIu16 " Rx queues not deleted",
241499a2dd95SBruce Richardson 				rx_adapter->nb_queues);
241599a2dd95SBruce Richardson 		return -EBUSY;
241699a2dd95SBruce Richardson 	}
241799a2dd95SBruce Richardson 
241899a2dd95SBruce Richardson 	if (rx_adapter->default_cb_arg)
241999a2dd95SBruce Richardson 		rte_free(rx_adapter->conf_arg);
242099a2dd95SBruce Richardson 	rte_free(rx_adapter->eth_devices);
2421*bc0df25cSNaga Harish K S V 	rte_free(rx_adapter->event_enqueue_buffer.events);
242299a2dd95SBruce Richardson 	rte_free(rx_adapter);
242399a2dd95SBruce Richardson 	event_eth_rx_adapter[id] = NULL;
242499a2dd95SBruce Richardson 
242599a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_free(id);
242699a2dd95SBruce Richardson 	return 0;
242799a2dd95SBruce Richardson }
242899a2dd95SBruce Richardson 
242999a2dd95SBruce Richardson int
243099a2dd95SBruce Richardson rte_event_eth_rx_adapter_queue_add(uint8_t id,
243199a2dd95SBruce Richardson 		uint16_t eth_dev_id,
243299a2dd95SBruce Richardson 		int32_t rx_queue_id,
243399a2dd95SBruce Richardson 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
243499a2dd95SBruce Richardson {
243599a2dd95SBruce Richardson 	int ret;
243699a2dd95SBruce Richardson 	uint32_t cap;
243799a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter *rx_adapter;
243899a2dd95SBruce Richardson 	struct rte_eventdev *dev;
243999a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
2440929ebdd5SPavan Nikhilesh 	struct rte_event_eth_rx_adapter_vector_limits limits;
244199a2dd95SBruce Richardson 
244299a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
244399a2dd95SBruce Richardson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
244499a2dd95SBruce Richardson 
244599a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
244699a2dd95SBruce Richardson 	if ((rx_adapter == NULL) || (queue_conf == NULL))
244799a2dd95SBruce Richardson 		return -EINVAL;
244899a2dd95SBruce Richardson 
244999a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
245099a2dd95SBruce Richardson 	ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
245199a2dd95SBruce Richardson 						eth_dev_id,
245299a2dd95SBruce Richardson 						&cap);
245399a2dd95SBruce Richardson 	if (ret) {
245499a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
245599a2dd95SBruce Richardson 			"eth port %" PRIu16, id, eth_dev_id);
245699a2dd95SBruce Richardson 		return ret;
245799a2dd95SBruce Richardson 	}
245899a2dd95SBruce Richardson 
245999a2dd95SBruce Richardson 	if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0
246099a2dd95SBruce Richardson 		&& (queue_conf->rx_queue_flags &
246199a2dd95SBruce Richardson 			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
246299a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
246399a2dd95SBruce Richardson 				" eth port: %" PRIu16 " adapter id: %" PRIu8,
246499a2dd95SBruce Richardson 				eth_dev_id, id);
246599a2dd95SBruce Richardson 		return -EINVAL;
246699a2dd95SBruce Richardson 	}
246799a2dd95SBruce Richardson 
2468929ebdd5SPavan Nikhilesh 	if (queue_conf->rx_queue_flags &
2469929ebdd5SPavan Nikhilesh 	    RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
2470929ebdd5SPavan Nikhilesh 
2471929ebdd5SPavan Nikhilesh 		if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
247299a2dd95SBruce Richardson 			RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
2473929ebdd5SPavan Nikhilesh 					 " eth port: %" PRIu16
2474929ebdd5SPavan Nikhilesh 					 " adapter id: %" PRIu8,
247599a2dd95SBruce Richardson 					 eth_dev_id, id);
247699a2dd95SBruce Richardson 			return -EINVAL;
247799a2dd95SBruce Richardson 		}
247899a2dd95SBruce Richardson 
2479929ebdd5SPavan Nikhilesh 		ret = rte_event_eth_rx_adapter_vector_limits_get(
2480929ebdd5SPavan Nikhilesh 			rx_adapter->eventdev_id, eth_dev_id, &limits);
2481929ebdd5SPavan Nikhilesh 		if (ret < 0) {
2482929ebdd5SPavan Nikhilesh 			RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
2483929ebdd5SPavan Nikhilesh 					 " eth port: %" PRIu16
2484929ebdd5SPavan Nikhilesh 					 " adapter id: %" PRIu8,
2485929ebdd5SPavan Nikhilesh 					 eth_dev_id, id);
2486929ebdd5SPavan Nikhilesh 			return -EINVAL;
2487929ebdd5SPavan Nikhilesh 		}
2488929ebdd5SPavan Nikhilesh 		if (queue_conf->vector_sz < limits.min_sz ||
2489929ebdd5SPavan Nikhilesh 		    queue_conf->vector_sz > limits.max_sz ||
2490929ebdd5SPavan Nikhilesh 		    queue_conf->vector_timeout_ns < limits.min_timeout_ns ||
2491929ebdd5SPavan Nikhilesh 		    queue_conf->vector_timeout_ns > limits.max_timeout_ns ||
2492929ebdd5SPavan Nikhilesh 		    queue_conf->vector_mp == NULL) {
2493929ebdd5SPavan Nikhilesh 			RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2494929ebdd5SPavan Nikhilesh 					 " eth port: %" PRIu16
2495929ebdd5SPavan Nikhilesh 					 " adapter id: %" PRIu8,
2496929ebdd5SPavan Nikhilesh 					 eth_dev_id, id);
2497929ebdd5SPavan Nikhilesh 			return -EINVAL;
2498929ebdd5SPavan Nikhilesh 		}
2499929ebdd5SPavan Nikhilesh 		if (queue_conf->vector_mp->elt_size <
2500929ebdd5SPavan Nikhilesh 		    (sizeof(struct rte_event_vector) +
2501929ebdd5SPavan Nikhilesh 		     (sizeof(uintptr_t) * queue_conf->vector_sz))) {
2502929ebdd5SPavan Nikhilesh 			RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
2503929ebdd5SPavan Nikhilesh 					 " eth port: %" PRIu16
2504929ebdd5SPavan Nikhilesh 					 " adapter id: %" PRIu8,
2505929ebdd5SPavan Nikhilesh 					 eth_dev_id, id);
2506929ebdd5SPavan Nikhilesh 			return -EINVAL;
2507929ebdd5SPavan Nikhilesh 		}
2508929ebdd5SPavan Nikhilesh 	}
2509929ebdd5SPavan Nikhilesh 
251099a2dd95SBruce Richardson 	if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
251199a2dd95SBruce Richardson 		(rx_queue_id != -1)) {
251299a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
251399a2dd95SBruce Richardson 			"event queue, eth port: %" PRIu16 " adapter id: %"
251499a2dd95SBruce Richardson 			PRIu8, eth_dev_id, id);
251599a2dd95SBruce Richardson 		return -EINVAL;
251699a2dd95SBruce Richardson 	}
251799a2dd95SBruce Richardson 
251899a2dd95SBruce Richardson 	if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
251999a2dd95SBruce Richardson 			rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
252099a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
252199a2dd95SBruce Richardson 			 (uint16_t)rx_queue_id);
252299a2dd95SBruce Richardson 		return -EINVAL;
252399a2dd95SBruce Richardson 	}
252499a2dd95SBruce Richardson 
252599a2dd95SBruce Richardson 	dev_info = &rx_adapter->eth_devices[eth_dev_id];
252699a2dd95SBruce Richardson 
252799a2dd95SBruce Richardson 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
252899a2dd95SBruce Richardson 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,
252999a2dd95SBruce Richardson 					-ENOTSUP);
253099a2dd95SBruce Richardson 		if (dev_info->rx_queue == NULL) {
253199a2dd95SBruce Richardson 			dev_info->rx_queue =
253299a2dd95SBruce Richardson 			    rte_zmalloc_socket(rx_adapter->mem_name,
253399a2dd95SBruce Richardson 					dev_info->dev->data->nb_rx_queues *
253499a2dd95SBruce Richardson 					sizeof(struct eth_rx_queue_info), 0,
253599a2dd95SBruce Richardson 					rx_adapter->socket_id);
253699a2dd95SBruce Richardson 			if (dev_info->rx_queue == NULL)
253799a2dd95SBruce Richardson 				return -ENOMEM;
253899a2dd95SBruce Richardson 		}
253999a2dd95SBruce Richardson 
254099a2dd95SBruce Richardson 		ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev,
254199a2dd95SBruce Richardson 				&rte_eth_devices[eth_dev_id],
254299a2dd95SBruce Richardson 				rx_queue_id, queue_conf);
254399a2dd95SBruce Richardson 		if (ret == 0) {
254499a2dd95SBruce Richardson 			dev_info->internal_event_port = 1;
254599a2dd95SBruce Richardson 			rxa_update_queue(rx_adapter,
254699a2dd95SBruce Richardson 					&rx_adapter->eth_devices[eth_dev_id],
254799a2dd95SBruce Richardson 					rx_queue_id,
254899a2dd95SBruce Richardson 					1);
254999a2dd95SBruce Richardson 		}
255099a2dd95SBruce Richardson 	} else {
255199a2dd95SBruce Richardson 		rte_spinlock_lock(&rx_adapter->rx_lock);
255299a2dd95SBruce Richardson 		dev_info->internal_event_port = 0;
255399a2dd95SBruce Richardson 		ret = rxa_init_service(rx_adapter, id);
255499a2dd95SBruce Richardson 		if (ret == 0) {
255599a2dd95SBruce Richardson 			uint32_t service_id = rx_adapter->service_id;
255699a2dd95SBruce Richardson 			ret = rxa_sw_add(rx_adapter, eth_dev_id, rx_queue_id,
255799a2dd95SBruce Richardson 					queue_conf);
255899a2dd95SBruce Richardson 			rte_service_component_runstate_set(service_id,
255999a2dd95SBruce Richardson 				rxa_sw_adapter_queue_count(rx_adapter));
256099a2dd95SBruce Richardson 		}
256199a2dd95SBruce Richardson 		rte_spinlock_unlock(&rx_adapter->rx_lock);
256299a2dd95SBruce Richardson 	}
256399a2dd95SBruce Richardson 
256499a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_queue_add(id, eth_dev_id,
256599a2dd95SBruce Richardson 		rx_queue_id, queue_conf, ret);
256699a2dd95SBruce Richardson 	if (ret)
256799a2dd95SBruce Richardson 		return ret;
256899a2dd95SBruce Richardson 
256999a2dd95SBruce Richardson 	return 0;
257099a2dd95SBruce Richardson }
257199a2dd95SBruce Richardson 
257299a2dd95SBruce Richardson static int
257399a2dd95SBruce Richardson rxa_sw_vector_limits(struct rte_event_eth_rx_adapter_vector_limits *limits)
257499a2dd95SBruce Richardson {
257599a2dd95SBruce Richardson 	limits->max_sz = MAX_VECTOR_SIZE;
257699a2dd95SBruce Richardson 	limits->min_sz = MIN_VECTOR_SIZE;
257799a2dd95SBruce Richardson 	limits->max_timeout_ns = MAX_VECTOR_NS;
257899a2dd95SBruce Richardson 	limits->min_timeout_ns = MIN_VECTOR_NS;
257999a2dd95SBruce Richardson 
258099a2dd95SBruce Richardson 	return 0;
258199a2dd95SBruce Richardson }
258299a2dd95SBruce Richardson 
258399a2dd95SBruce Richardson int
258499a2dd95SBruce Richardson rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
258599a2dd95SBruce Richardson 				int32_t rx_queue_id)
258699a2dd95SBruce Richardson {
258799a2dd95SBruce Richardson 	int ret = 0;
258899a2dd95SBruce Richardson 	struct rte_eventdev *dev;
258999a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter *rx_adapter;
259099a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
259199a2dd95SBruce Richardson 	uint32_t cap;
259299a2dd95SBruce Richardson 	uint32_t nb_rx_poll = 0;
259399a2dd95SBruce Richardson 	uint32_t nb_wrr = 0;
259499a2dd95SBruce Richardson 	uint32_t nb_rx_intr;
259599a2dd95SBruce Richardson 	struct eth_rx_poll_entry *rx_poll = NULL;
259699a2dd95SBruce Richardson 	uint32_t *rx_wrr = NULL;
259799a2dd95SBruce Richardson 	int num_intr_vec;
259899a2dd95SBruce Richardson 
259999a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
260099a2dd95SBruce Richardson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
260199a2dd95SBruce Richardson 
260299a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
260399a2dd95SBruce Richardson 	if (rx_adapter == NULL)
260499a2dd95SBruce Richardson 		return -EINVAL;
260599a2dd95SBruce Richardson 
260699a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
260799a2dd95SBruce Richardson 	ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
260899a2dd95SBruce Richardson 						eth_dev_id,
260999a2dd95SBruce Richardson 						&cap);
261099a2dd95SBruce Richardson 	if (ret)
261199a2dd95SBruce Richardson 		return ret;
261299a2dd95SBruce Richardson 
261399a2dd95SBruce Richardson 	if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
261499a2dd95SBruce Richardson 		rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
261599a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
261699a2dd95SBruce Richardson 			 (uint16_t)rx_queue_id);
261799a2dd95SBruce Richardson 		return -EINVAL;
261899a2dd95SBruce Richardson 	}
261999a2dd95SBruce Richardson 
262099a2dd95SBruce Richardson 	dev_info = &rx_adapter->eth_devices[eth_dev_id];
262199a2dd95SBruce Richardson 
262299a2dd95SBruce Richardson 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
262399a2dd95SBruce Richardson 		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,
262499a2dd95SBruce Richardson 				 -ENOTSUP);
262599a2dd95SBruce Richardson 		ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,
262699a2dd95SBruce Richardson 						&rte_eth_devices[eth_dev_id],
262799a2dd95SBruce Richardson 						rx_queue_id);
262899a2dd95SBruce Richardson 		if (ret == 0) {
262999a2dd95SBruce Richardson 			rxa_update_queue(rx_adapter,
263099a2dd95SBruce Richardson 					&rx_adapter->eth_devices[eth_dev_id],
263199a2dd95SBruce Richardson 					rx_queue_id,
263299a2dd95SBruce Richardson 					0);
263399a2dd95SBruce Richardson 			if (dev_info->nb_dev_queues == 0) {
263499a2dd95SBruce Richardson 				rte_free(dev_info->rx_queue);
263599a2dd95SBruce Richardson 				dev_info->rx_queue = NULL;
263699a2dd95SBruce Richardson 			}
263799a2dd95SBruce Richardson 		}
263899a2dd95SBruce Richardson 	} else {
263999a2dd95SBruce Richardson 		rxa_calc_nb_post_del(rx_adapter, dev_info, rx_queue_id,
264099a2dd95SBruce Richardson 			&nb_rx_poll, &nb_rx_intr, &nb_wrr);
264199a2dd95SBruce Richardson 
264299a2dd95SBruce Richardson 		ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
264399a2dd95SBruce Richardson 			&rx_poll, &rx_wrr);
264499a2dd95SBruce Richardson 		if (ret)
264599a2dd95SBruce Richardson 			return ret;
264699a2dd95SBruce Richardson 
264799a2dd95SBruce Richardson 		rte_spinlock_lock(&rx_adapter->rx_lock);
264899a2dd95SBruce Richardson 
264999a2dd95SBruce Richardson 		num_intr_vec = 0;
265099a2dd95SBruce Richardson 		if (rx_adapter->num_rx_intr > nb_rx_intr) {
265199a2dd95SBruce Richardson 
265299a2dd95SBruce Richardson 			num_intr_vec = rxa_nb_intr_vect(dev_info,
265399a2dd95SBruce Richardson 						rx_queue_id, 0);
265499a2dd95SBruce Richardson 			ret = rxa_del_intr_queue(rx_adapter, dev_info,
265599a2dd95SBruce Richardson 					rx_queue_id);
265699a2dd95SBruce Richardson 			if (ret)
265799a2dd95SBruce Richardson 				goto unlock_ret;
265899a2dd95SBruce Richardson 		}
265999a2dd95SBruce Richardson 
266099a2dd95SBruce Richardson 		if (nb_rx_intr == 0) {
266199a2dd95SBruce Richardson 			ret = rxa_free_intr_resources(rx_adapter);
266299a2dd95SBruce Richardson 			if (ret)
266399a2dd95SBruce Richardson 				goto unlock_ret;
266499a2dd95SBruce Richardson 		}
266599a2dd95SBruce Richardson 
266699a2dd95SBruce Richardson 		rxa_sw_del(rx_adapter, dev_info, rx_queue_id);
266799a2dd95SBruce Richardson 		rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
266899a2dd95SBruce Richardson 
266999a2dd95SBruce Richardson 		rte_free(rx_adapter->eth_rx_poll);
267099a2dd95SBruce Richardson 		rte_free(rx_adapter->wrr_sched);
267199a2dd95SBruce Richardson 
267299a2dd95SBruce Richardson 		if (nb_rx_intr == 0) {
267399a2dd95SBruce Richardson 			rte_free(dev_info->intr_queue);
267499a2dd95SBruce Richardson 			dev_info->intr_queue = NULL;
267599a2dd95SBruce Richardson 		}
267699a2dd95SBruce Richardson 
267799a2dd95SBruce Richardson 		rx_adapter->eth_rx_poll = rx_poll;
267899a2dd95SBruce Richardson 		rx_adapter->wrr_sched = rx_wrr;
267999a2dd95SBruce Richardson 		rx_adapter->wrr_len = nb_wrr;
268099a2dd95SBruce Richardson 		rx_adapter->num_intr_vec += num_intr_vec;
268199a2dd95SBruce Richardson 
268299a2dd95SBruce Richardson 		if (dev_info->nb_dev_queues == 0) {
268399a2dd95SBruce Richardson 			rte_free(dev_info->rx_queue);
268499a2dd95SBruce Richardson 			dev_info->rx_queue = NULL;
268599a2dd95SBruce Richardson 		}
268699a2dd95SBruce Richardson unlock_ret:
268799a2dd95SBruce Richardson 		rte_spinlock_unlock(&rx_adapter->rx_lock);
268899a2dd95SBruce Richardson 		if (ret) {
268999a2dd95SBruce Richardson 			rte_free(rx_poll);
269099a2dd95SBruce Richardson 			rte_free(rx_wrr);
269199a2dd95SBruce Richardson 			return ret;
269299a2dd95SBruce Richardson 		}
269399a2dd95SBruce Richardson 
269499a2dd95SBruce Richardson 		rte_service_component_runstate_set(rx_adapter->service_id,
269599a2dd95SBruce Richardson 				rxa_sw_adapter_queue_count(rx_adapter));
269699a2dd95SBruce Richardson 	}
269799a2dd95SBruce Richardson 
269899a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_queue_del(id, eth_dev_id,
269999a2dd95SBruce Richardson 		rx_queue_id, ret);
270099a2dd95SBruce Richardson 	return ret;
270199a2dd95SBruce Richardson }
270299a2dd95SBruce Richardson 
270399a2dd95SBruce Richardson int
270499a2dd95SBruce Richardson rte_event_eth_rx_adapter_vector_limits_get(
270599a2dd95SBruce Richardson 	uint8_t dev_id, uint16_t eth_port_id,
270699a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_vector_limits *limits)
270799a2dd95SBruce Richardson {
270899a2dd95SBruce Richardson 	struct rte_eventdev *dev;
270999a2dd95SBruce Richardson 	uint32_t cap;
271099a2dd95SBruce Richardson 	int ret;
271199a2dd95SBruce Richardson 
271299a2dd95SBruce Richardson 	RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
271399a2dd95SBruce Richardson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
271499a2dd95SBruce Richardson 
271599a2dd95SBruce Richardson 	if (limits == NULL)
271699a2dd95SBruce Richardson 		return -EINVAL;
271799a2dd95SBruce Richardson 
271899a2dd95SBruce Richardson 	dev = &rte_eventdevs[dev_id];
271999a2dd95SBruce Richardson 
272099a2dd95SBruce Richardson 	ret = rte_event_eth_rx_adapter_caps_get(dev_id, eth_port_id, &cap);
272199a2dd95SBruce Richardson 	if (ret) {
272299a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
272399a2dd95SBruce Richardson 				 "eth port %" PRIu16,
272499a2dd95SBruce Richardson 				 dev_id, eth_port_id);
272599a2dd95SBruce Richardson 		return ret;
272699a2dd95SBruce Richardson 	}
272799a2dd95SBruce Richardson 
272899a2dd95SBruce Richardson 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
272999a2dd95SBruce Richardson 		RTE_FUNC_PTR_OR_ERR_RET(
273099a2dd95SBruce Richardson 			*dev->dev_ops->eth_rx_adapter_vector_limits_get,
273199a2dd95SBruce Richardson 			-ENOTSUP);
273299a2dd95SBruce Richardson 		ret = dev->dev_ops->eth_rx_adapter_vector_limits_get(
273399a2dd95SBruce Richardson 			dev, &rte_eth_devices[eth_port_id], limits);
273499a2dd95SBruce Richardson 	} else {
273599a2dd95SBruce Richardson 		ret = rxa_sw_vector_limits(limits);
273699a2dd95SBruce Richardson 	}
273799a2dd95SBruce Richardson 
273899a2dd95SBruce Richardson 	return ret;
273999a2dd95SBruce Richardson }
274099a2dd95SBruce Richardson 
274199a2dd95SBruce Richardson int
274299a2dd95SBruce Richardson rte_event_eth_rx_adapter_start(uint8_t id)
274399a2dd95SBruce Richardson {
274499a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_start(id);
274599a2dd95SBruce Richardson 	return rxa_ctrl(id, 1);
274699a2dd95SBruce Richardson }
274799a2dd95SBruce Richardson 
274899a2dd95SBruce Richardson int
274999a2dd95SBruce Richardson rte_event_eth_rx_adapter_stop(uint8_t id)
275099a2dd95SBruce Richardson {
275199a2dd95SBruce Richardson 	rte_eventdev_trace_eth_rx_adapter_stop(id);
275299a2dd95SBruce Richardson 	return rxa_ctrl(id, 0);
275399a2dd95SBruce Richardson }
275499a2dd95SBruce Richardson 
275599a2dd95SBruce Richardson int
275699a2dd95SBruce Richardson rte_event_eth_rx_adapter_stats_get(uint8_t id,
275799a2dd95SBruce Richardson 			       struct rte_event_eth_rx_adapter_stats *stats)
275899a2dd95SBruce Richardson {
275999a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter *rx_adapter;
276099a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
276199a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter_stats dev_stats;
276299a2dd95SBruce Richardson 	struct rte_eventdev *dev;
276399a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
276499a2dd95SBruce Richardson 	uint32_t i;
276599a2dd95SBruce Richardson 	int ret;
276699a2dd95SBruce Richardson 
2767da781e64SGanapati Kundapura 	if (rxa_memzone_lookup())
2768da781e64SGanapati Kundapura 		return -ENOMEM;
2769da781e64SGanapati Kundapura 
277099a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
277199a2dd95SBruce Richardson 
277299a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
277399a2dd95SBruce Richardson 	if (rx_adapter  == NULL || stats == NULL)
277499a2dd95SBruce Richardson 		return -EINVAL;
277599a2dd95SBruce Richardson 
277699a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
277799a2dd95SBruce Richardson 	memset(stats, 0, sizeof(*stats));
277899a2dd95SBruce Richardson 	RTE_ETH_FOREACH_DEV(i) {
277999a2dd95SBruce Richardson 		dev_info = &rx_adapter->eth_devices[i];
278099a2dd95SBruce Richardson 		if (dev_info->internal_event_port == 0 ||
278199a2dd95SBruce Richardson 			dev->dev_ops->eth_rx_adapter_stats_get == NULL)
278299a2dd95SBruce Richardson 			continue;
278399a2dd95SBruce Richardson 		ret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev,
278499a2dd95SBruce Richardson 						&rte_eth_devices[i],
278599a2dd95SBruce Richardson 						&dev_stats);
278699a2dd95SBruce Richardson 		if (ret)
278799a2dd95SBruce Richardson 			continue;
278899a2dd95SBruce Richardson 		dev_stats_sum.rx_packets += dev_stats.rx_packets;
278999a2dd95SBruce Richardson 		dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
279099a2dd95SBruce Richardson 	}
279199a2dd95SBruce Richardson 
279299a2dd95SBruce Richardson 	if (rx_adapter->service_inited)
279399a2dd95SBruce Richardson 		*stats = rx_adapter->stats;
279499a2dd95SBruce Richardson 
279599a2dd95SBruce Richardson 	stats->rx_packets += dev_stats_sum.rx_packets;
279699a2dd95SBruce Richardson 	stats->rx_enq_count += dev_stats_sum.rx_enq_count;
2797*bc0df25cSNaga Harish K S V 
279899a2dd95SBruce Richardson 	return 0;
279999a2dd95SBruce Richardson }
280099a2dd95SBruce Richardson 
280199a2dd95SBruce Richardson int
280299a2dd95SBruce Richardson rte_event_eth_rx_adapter_stats_reset(uint8_t id)
280399a2dd95SBruce Richardson {
280499a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter *rx_adapter;
280599a2dd95SBruce Richardson 	struct rte_eventdev *dev;
280699a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
280799a2dd95SBruce Richardson 	uint32_t i;
280899a2dd95SBruce Richardson 
2809da781e64SGanapati Kundapura 	if (rxa_memzone_lookup())
2810da781e64SGanapati Kundapura 		return -ENOMEM;
2811da781e64SGanapati Kundapura 
281299a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
281399a2dd95SBruce Richardson 
281499a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
281599a2dd95SBruce Richardson 	if (rx_adapter == NULL)
281699a2dd95SBruce Richardson 		return -EINVAL;
281799a2dd95SBruce Richardson 
281899a2dd95SBruce Richardson 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
281999a2dd95SBruce Richardson 	RTE_ETH_FOREACH_DEV(i) {
282099a2dd95SBruce Richardson 		dev_info = &rx_adapter->eth_devices[i];
282199a2dd95SBruce Richardson 		if (dev_info->internal_event_port == 0 ||
282299a2dd95SBruce Richardson 			dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
282399a2dd95SBruce Richardson 			continue;
282499a2dd95SBruce Richardson 		(*dev->dev_ops->eth_rx_adapter_stats_reset)(dev,
282599a2dd95SBruce Richardson 							&rte_eth_devices[i]);
282699a2dd95SBruce Richardson 	}
282799a2dd95SBruce Richardson 
282899a2dd95SBruce Richardson 	memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
282999a2dd95SBruce Richardson 	return 0;
283099a2dd95SBruce Richardson }
283199a2dd95SBruce Richardson 
283299a2dd95SBruce Richardson int
283399a2dd95SBruce Richardson rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
283499a2dd95SBruce Richardson {
283599a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter *rx_adapter;
283699a2dd95SBruce Richardson 
2837da781e64SGanapati Kundapura 	if (rxa_memzone_lookup())
2838da781e64SGanapati Kundapura 		return -ENOMEM;
2839da781e64SGanapati Kundapura 
284099a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
284199a2dd95SBruce Richardson 
284299a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
284399a2dd95SBruce Richardson 	if (rx_adapter == NULL || service_id == NULL)
284499a2dd95SBruce Richardson 		return -EINVAL;
284599a2dd95SBruce Richardson 
284699a2dd95SBruce Richardson 	if (rx_adapter->service_inited)
284799a2dd95SBruce Richardson 		*service_id = rx_adapter->service_id;
284899a2dd95SBruce Richardson 
284999a2dd95SBruce Richardson 	return rx_adapter->service_inited ? 0 : -ESRCH;
285099a2dd95SBruce Richardson }
285199a2dd95SBruce Richardson 
285299a2dd95SBruce Richardson int
285399a2dd95SBruce Richardson rte_event_eth_rx_adapter_cb_register(uint8_t id,
285499a2dd95SBruce Richardson 					uint16_t eth_dev_id,
285599a2dd95SBruce Richardson 					rte_event_eth_rx_adapter_cb_fn cb_fn,
285699a2dd95SBruce Richardson 					void *cb_arg)
285799a2dd95SBruce Richardson {
285899a2dd95SBruce Richardson 	struct rte_event_eth_rx_adapter *rx_adapter;
285999a2dd95SBruce Richardson 	struct eth_device_info *dev_info;
286099a2dd95SBruce Richardson 	uint32_t cap;
286199a2dd95SBruce Richardson 	int ret;
286299a2dd95SBruce Richardson 
286399a2dd95SBruce Richardson 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
286499a2dd95SBruce Richardson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
286599a2dd95SBruce Richardson 
286699a2dd95SBruce Richardson 	rx_adapter = rxa_id_to_adapter(id);
286799a2dd95SBruce Richardson 	if (rx_adapter == NULL)
286899a2dd95SBruce Richardson 		return -EINVAL;
286999a2dd95SBruce Richardson 
287099a2dd95SBruce Richardson 	dev_info = &rx_adapter->eth_devices[eth_dev_id];
287199a2dd95SBruce Richardson 	if (dev_info->rx_queue == NULL)
287299a2dd95SBruce Richardson 		return -EINVAL;
287399a2dd95SBruce Richardson 
287499a2dd95SBruce Richardson 	ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
287599a2dd95SBruce Richardson 						eth_dev_id,
287699a2dd95SBruce Richardson 						&cap);
287799a2dd95SBruce Richardson 	if (ret) {
287899a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
287999a2dd95SBruce Richardson 			"eth port %" PRIu16, id, eth_dev_id);
288099a2dd95SBruce Richardson 		return ret;
288199a2dd95SBruce Richardson 	}
288299a2dd95SBruce Richardson 
288399a2dd95SBruce Richardson 	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
288499a2dd95SBruce Richardson 		RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %"
288599a2dd95SBruce Richardson 				PRIu16, eth_dev_id);
288699a2dd95SBruce Richardson 		return -EINVAL;
288799a2dd95SBruce Richardson 	}
288899a2dd95SBruce Richardson 
288999a2dd95SBruce Richardson 	rte_spinlock_lock(&rx_adapter->rx_lock);
289099a2dd95SBruce Richardson 	dev_info->cb_fn = cb_fn;
289199a2dd95SBruce Richardson 	dev_info->cb_arg = cb_arg;
289299a2dd95SBruce Richardson 	rte_spinlock_unlock(&rx_adapter->rx_lock);
289399a2dd95SBruce Richardson 
289499a2dd95SBruce Richardson 	return 0;
289599a2dd95SBruce Richardson }
2896da781e64SGanapati Kundapura 
2897da781e64SGanapati Kundapura int
2898da781e64SGanapati Kundapura rte_event_eth_rx_adapter_queue_conf_get(uint8_t id,
2899da781e64SGanapati Kundapura 			uint16_t eth_dev_id,
2900da781e64SGanapati Kundapura 			uint16_t rx_queue_id,
2901da781e64SGanapati Kundapura 			struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
2902da781e64SGanapati Kundapura {
2903da781e64SGanapati Kundapura 	struct rte_eventdev *dev;
2904da781e64SGanapati Kundapura 	struct rte_event_eth_rx_adapter *rx_adapter;
2905da781e64SGanapati Kundapura 	struct eth_device_info *dev_info;
2906da781e64SGanapati Kundapura 	struct eth_rx_queue_info *queue_info;
2907da781e64SGanapati Kundapura 	struct rte_event *qi_ev;
2908da781e64SGanapati Kundapura 	int ret;
2909da781e64SGanapati Kundapura 
2910da781e64SGanapati Kundapura 	if (rxa_memzone_lookup())
2911da781e64SGanapati Kundapura 		return -ENOMEM;
2912da781e64SGanapati Kundapura 
2913da781e64SGanapati Kundapura 	RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
2914da781e64SGanapati Kundapura 	RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
2915da781e64SGanapati Kundapura 
2916da781e64SGanapati Kundapura 	if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
2917da781e64SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id);
2918da781e64SGanapati Kundapura 		return -EINVAL;
2919da781e64SGanapati Kundapura 	}
2920da781e64SGanapati Kundapura 
2921da781e64SGanapati Kundapura 	if (queue_conf == NULL) {
2922da781e64SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Rx queue conf struct cannot be NULL");
2923da781e64SGanapati Kundapura 		return -EINVAL;
2924da781e64SGanapati Kundapura 	}
2925da781e64SGanapati Kundapura 
2926da781e64SGanapati Kundapura 	rx_adapter = rxa_id_to_adapter(id);
2927da781e64SGanapati Kundapura 	if (rx_adapter == NULL)
2928da781e64SGanapati Kundapura 		return -EINVAL;
2929da781e64SGanapati Kundapura 
2930da781e64SGanapati Kundapura 	dev_info = &rx_adapter->eth_devices[eth_dev_id];
2931da781e64SGanapati Kundapura 	if (dev_info->rx_queue == NULL ||
2932da781e64SGanapati Kundapura 	    !dev_info->rx_queue[rx_queue_id].queue_enabled) {
2933da781e64SGanapati Kundapura 		RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id);
2934da781e64SGanapati Kundapura 		return -EINVAL;
2935da781e64SGanapati Kundapura 	}
2936da781e64SGanapati Kundapura 
2937da781e64SGanapati Kundapura 	queue_info = &dev_info->rx_queue[rx_queue_id];
2938da781e64SGanapati Kundapura 	qi_ev = (struct rte_event *)&queue_info->event;
2939da781e64SGanapati Kundapura 
2940da781e64SGanapati Kundapura 	memset(queue_conf, 0, sizeof(*queue_conf));
2941da781e64SGanapati Kundapura 	queue_conf->rx_queue_flags = 0;
2942da781e64SGanapati Kundapura 	if (queue_info->flow_id_mask != 0)
2943da781e64SGanapati Kundapura 		queue_conf->rx_queue_flags |=
2944da781e64SGanapati Kundapura 			RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
2945da781e64SGanapati Kundapura 	queue_conf->servicing_weight = queue_info->wt;
2946da781e64SGanapati Kundapura 
2947da781e64SGanapati Kundapura 	memcpy(&queue_conf->ev, qi_ev, sizeof(*qi_ev));
2948da781e64SGanapati Kundapura 
2949da781e64SGanapati Kundapura 	dev = &rte_eventdevs[rx_adapter->eventdev_id];
2950da781e64SGanapati Kundapura 	if (dev->dev_ops->eth_rx_adapter_queue_conf_get != NULL) {
2951da781e64SGanapati Kundapura 		ret = (*dev->dev_ops->eth_rx_adapter_queue_conf_get)(dev,
2952da781e64SGanapati Kundapura 						&rte_eth_devices[eth_dev_id],
2953da781e64SGanapati Kundapura 						rx_queue_id,
2954da781e64SGanapati Kundapura 						queue_conf);
2955da781e64SGanapati Kundapura 		return ret;
2956da781e64SGanapati Kundapura 	}
2957da781e64SGanapati Kundapura 
2958da781e64SGanapati Kundapura 	return 0;
2959da781e64SGanapati Kundapura }
2960