199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause 299a2dd95SBruce Richardson * Copyright(c) 2017 Intel Corporation. 399a2dd95SBruce Richardson * All rights reserved. 499a2dd95SBruce Richardson */ 572b452c5SDmitry Kozlyuk #include <ctype.h> 672b452c5SDmitry Kozlyuk #include <stdlib.h> 799a2dd95SBruce Richardson #if defined(LINUX) 899a2dd95SBruce Richardson #include <sys/epoll.h> 999a2dd95SBruce Richardson #endif 1099a2dd95SBruce Richardson #include <unistd.h> 1199a2dd95SBruce Richardson 1299a2dd95SBruce Richardson #include <rte_cycles.h> 1399a2dd95SBruce Richardson #include <rte_common.h> 141acb7f54SDavid Marchand #include <dev_driver.h> 1599a2dd95SBruce Richardson #include <rte_errno.h> 16f9bdee26SKonstantin Ananyev #include <ethdev_driver.h> 1799a2dd95SBruce Richardson #include <rte_log.h> 1899a2dd95SBruce Richardson #include <rte_malloc.h> 1999a2dd95SBruce Richardson #include <rte_service_component.h> 2099a2dd95SBruce Richardson #include <rte_thash.h> 2199a2dd95SBruce Richardson #include <rte_interrupts.h> 2283ab470dSGanapati Kundapura #include <rte_mbuf_dyn.h> 23814d0170SGanapati Kundapura #include <rte_telemetry.h> 2499a2dd95SBruce Richardson 2599a2dd95SBruce Richardson #include "rte_eventdev.h" 2699a2dd95SBruce Richardson #include "eventdev_pmd.h" 27f26f2ca6SPavan Nikhilesh #include "eventdev_trace.h" 2899a2dd95SBruce Richardson #include "rte_event_eth_rx_adapter.h" 2999a2dd95SBruce Richardson 3099a2dd95SBruce Richardson #define BATCH_SIZE 32 3199a2dd95SBruce Richardson #define BLOCK_CNT_THRESHOLD 10 328113fd15SGanapati Kundapura #define ETH_EVENT_BUFFER_SIZE (6*BATCH_SIZE) 3399a2dd95SBruce Richardson #define MAX_VECTOR_SIZE 1024 3499a2dd95SBruce Richardson #define MIN_VECTOR_SIZE 4 3599a2dd95SBruce Richardson #define MAX_VECTOR_NS 1E9 3699a2dd95SBruce Richardson #define MIN_VECTOR_NS 1E5 3799a2dd95SBruce Richardson 3899a2dd95SBruce Richardson #define ETH_RX_ADAPTER_SERVICE_NAME_LEN 32 3999a2dd95SBruce Richardson #define ETH_RX_ADAPTER_MEM_NAME_LEN 32 4099a2dd95SBruce Richardson 4199a2dd95SBruce Richardson #define RSS_KEY_SIZE 40 4299a2dd95SBruce Richardson /* value written to intr thread pipe to signal thread exit */ 4399a2dd95SBruce Richardson #define ETH_BRIDGE_INTR_THREAD_EXIT 1 4499a2dd95SBruce Richardson /* Sentinel value to detect initialized file handle */ 4599a2dd95SBruce Richardson #define INIT_FD -1 4699a2dd95SBruce Richardson 47da781e64SGanapati Kundapura #define RXA_ADAPTER_ARRAY "rte_event_eth_rx_adapter_array" 48da781e64SGanapati Kundapura 4999a2dd95SBruce Richardson /* 5099a2dd95SBruce Richardson * Used to store port and queue ID of interrupting Rx queue 5199a2dd95SBruce Richardson */ 5299a2dd95SBruce Richardson union queue_data { 5399a2dd95SBruce Richardson RTE_STD_C11 5499a2dd95SBruce Richardson void *ptr; 5599a2dd95SBruce Richardson struct { 5699a2dd95SBruce Richardson uint16_t port; 5799a2dd95SBruce Richardson uint16_t queue; 5899a2dd95SBruce Richardson }; 5999a2dd95SBruce Richardson }; 6099a2dd95SBruce Richardson 6199a2dd95SBruce Richardson /* 6299a2dd95SBruce Richardson * There is an instance of this struct per polled Rx queue added to the 6399a2dd95SBruce Richardson * adapter 6499a2dd95SBruce Richardson */ 6599a2dd95SBruce Richardson struct eth_rx_poll_entry { 6699a2dd95SBruce Richardson /* Eth port to poll */ 6799a2dd95SBruce Richardson uint16_t eth_dev_id; 6899a2dd95SBruce Richardson /* Eth rx queue to poll */ 6999a2dd95SBruce Richardson uint16_t eth_rx_qid; 7099a2dd95SBruce Richardson }; 7199a2dd95SBruce Richardson 7299a2dd95SBruce Richardson struct eth_rx_vector_data { 7399a2dd95SBruce Richardson TAILQ_ENTRY(eth_rx_vector_data) next; 7499a2dd95SBruce Richardson uint16_t port; 7599a2dd95SBruce Richardson uint16_t queue; 7699a2dd95SBruce Richardson uint16_t max_vector_count; 7799a2dd95SBruce Richardson uint64_t event; 7899a2dd95SBruce Richardson uint64_t ts; 7999a2dd95SBruce Richardson uint64_t vector_timeout_ticks; 8099a2dd95SBruce Richardson struct rte_mempool *vector_pool; 8199a2dd95SBruce Richardson struct rte_event_vector *vector_ev; 8299a2dd95SBruce Richardson } __rte_cache_aligned; 8399a2dd95SBruce Richardson 8499a2dd95SBruce Richardson TAILQ_HEAD(eth_rx_vector_data_list, eth_rx_vector_data); 8599a2dd95SBruce Richardson 8699a2dd95SBruce Richardson /* Instance per adapter */ 87a256a743SPavan Nikhilesh struct eth_event_enqueue_buffer { 8899a2dd95SBruce Richardson /* Count of events in this buffer */ 8999a2dd95SBruce Richardson uint16_t count; 9099a2dd95SBruce Richardson /* Array of events in this buffer */ 91bc0df25cSNaga Harish K S V struct rte_event *events; 92bc0df25cSNaga Harish K S V /* size of event buffer */ 93bc0df25cSNaga Harish K S V uint16_t events_size; 948113fd15SGanapati Kundapura /* Event enqueue happens from head */ 958113fd15SGanapati Kundapura uint16_t head; 968113fd15SGanapati Kundapura /* New packets from rte_eth_rx_burst is enqued from tail */ 978113fd15SGanapati Kundapura uint16_t tail; 988113fd15SGanapati Kundapura /* last element in the buffer before rollover */ 998113fd15SGanapati Kundapura uint16_t last; 1008113fd15SGanapati Kundapura uint16_t last_mask; 10199a2dd95SBruce Richardson }; 10299a2dd95SBruce Richardson 103a256a743SPavan Nikhilesh struct event_eth_rx_adapter { 10499a2dd95SBruce Richardson /* RSS key */ 10599a2dd95SBruce Richardson uint8_t rss_key_be[RSS_KEY_SIZE]; 10699a2dd95SBruce Richardson /* Event device identifier */ 10799a2dd95SBruce Richardson uint8_t eventdev_id; 10899a2dd95SBruce Richardson /* Event port identifier */ 10999a2dd95SBruce Richardson uint8_t event_port_id; 110b06bca69SNaga Harish K S V /* Flag indicating per rxq event buffer */ 111b06bca69SNaga Harish K S V bool use_queue_event_buf; 112b06bca69SNaga Harish K S V /* Per ethernet device structure */ 113b06bca69SNaga Harish K S V struct eth_device_info *eth_devices; 11499a2dd95SBruce Richardson /* Lock to serialize config updates with service function */ 11599a2dd95SBruce Richardson rte_spinlock_t rx_lock; 11699a2dd95SBruce Richardson /* Max mbufs processed in any service function invocation */ 11799a2dd95SBruce Richardson uint32_t max_nb_rx; 11899a2dd95SBruce Richardson /* Receive queues that need to be polled */ 11999a2dd95SBruce Richardson struct eth_rx_poll_entry *eth_rx_poll; 12099a2dd95SBruce Richardson /* Size of the eth_rx_poll array */ 12199a2dd95SBruce Richardson uint16_t num_rx_polled; 12299a2dd95SBruce Richardson /* Weighted round robin schedule */ 12399a2dd95SBruce Richardson uint32_t *wrr_sched; 12499a2dd95SBruce Richardson /* wrr_sched[] size */ 12599a2dd95SBruce Richardson uint32_t wrr_len; 12699a2dd95SBruce Richardson /* Next entry in wrr[] to begin polling */ 12799a2dd95SBruce Richardson uint32_t wrr_pos; 12899a2dd95SBruce Richardson /* Event burst buffer */ 129a256a743SPavan Nikhilesh struct eth_event_enqueue_buffer event_enqueue_buffer; 13099a2dd95SBruce Richardson /* Vector enable flag */ 13199a2dd95SBruce Richardson uint8_t ena_vector; 13299a2dd95SBruce Richardson /* Timestamp of previous vector expiry list traversal */ 13399a2dd95SBruce Richardson uint64_t prev_expiry_ts; 13499a2dd95SBruce Richardson /* Minimum ticks to wait before traversing expiry list */ 13599a2dd95SBruce Richardson uint64_t vector_tmo_ticks; 13699a2dd95SBruce Richardson /* vector list */ 13799a2dd95SBruce Richardson struct eth_rx_vector_data_list vector_list; 13899a2dd95SBruce Richardson /* Per adapter stats */ 13999a2dd95SBruce Richardson struct rte_event_eth_rx_adapter_stats stats; 14099a2dd95SBruce Richardson /* Block count, counts up to BLOCK_CNT_THRESHOLD */ 14199a2dd95SBruce Richardson uint16_t enq_block_count; 14299a2dd95SBruce Richardson /* Block start ts */ 14399a2dd95SBruce Richardson uint64_t rx_enq_block_start_ts; 14499a2dd95SBruce Richardson /* epoll fd used to wait for Rx interrupts */ 14599a2dd95SBruce Richardson int epd; 14699a2dd95SBruce Richardson /* Num of interrupt driven interrupt queues */ 14799a2dd95SBruce Richardson uint32_t num_rx_intr; 14899a2dd95SBruce Richardson /* Used to send <dev id, queue id> of interrupting Rx queues from 14999a2dd95SBruce Richardson * the interrupt thread to the Rx thread 15099a2dd95SBruce Richardson */ 15199a2dd95SBruce Richardson struct rte_ring *intr_ring; 15299a2dd95SBruce Richardson /* Rx Queue data (dev id, queue id) for the last non-empty 15399a2dd95SBruce Richardson * queue polled 15499a2dd95SBruce Richardson */ 15599a2dd95SBruce Richardson union queue_data qd; 15699a2dd95SBruce Richardson /* queue_data is valid */ 15799a2dd95SBruce Richardson int qd_valid; 15899a2dd95SBruce Richardson /* Interrupt ring lock, synchronizes Rx thread 15999a2dd95SBruce Richardson * and interrupt thread 16099a2dd95SBruce Richardson */ 16199a2dd95SBruce Richardson rte_spinlock_t intr_ring_lock; 16299a2dd95SBruce Richardson /* event array passed to rte_poll_wait */ 16399a2dd95SBruce Richardson struct rte_epoll_event *epoll_events; 16499a2dd95SBruce Richardson /* Count of interrupt vectors in use */ 16599a2dd95SBruce Richardson uint32_t num_intr_vec; 16699a2dd95SBruce Richardson /* Thread blocked on Rx interrupts */ 16799a2dd95SBruce Richardson pthread_t rx_intr_thread; 16899a2dd95SBruce Richardson /* Configuration callback for rte_service configuration */ 16999a2dd95SBruce Richardson rte_event_eth_rx_adapter_conf_cb conf_cb; 17099a2dd95SBruce Richardson /* Configuration callback argument */ 17199a2dd95SBruce Richardson void *conf_arg; 17299a2dd95SBruce Richardson /* Set if default_cb is being used */ 17399a2dd95SBruce Richardson int default_cb_arg; 17499a2dd95SBruce Richardson /* Service initialization state */ 17599a2dd95SBruce Richardson uint8_t service_inited; 17699a2dd95SBruce Richardson /* Total count of Rx queues in adapter */ 17799a2dd95SBruce Richardson uint32_t nb_queues; 17899a2dd95SBruce Richardson /* Memory allocation name */ 17999a2dd95SBruce Richardson char mem_name[ETH_RX_ADAPTER_MEM_NAME_LEN]; 18099a2dd95SBruce Richardson /* Socket identifier cached from eventdev */ 18199a2dd95SBruce Richardson int socket_id; 18299a2dd95SBruce Richardson /* Per adapter EAL service */ 18399a2dd95SBruce Richardson uint32_t service_id; 18499a2dd95SBruce Richardson /* Adapter started flag */ 18599a2dd95SBruce Richardson uint8_t rxa_started; 18699a2dd95SBruce Richardson /* Adapter ID */ 18799a2dd95SBruce Richardson uint8_t id; 18899a2dd95SBruce Richardson } __rte_cache_aligned; 18999a2dd95SBruce Richardson 19099a2dd95SBruce Richardson /* Per eth device */ 19199a2dd95SBruce Richardson struct eth_device_info { 19299a2dd95SBruce Richardson struct rte_eth_dev *dev; 19399a2dd95SBruce Richardson struct eth_rx_queue_info *rx_queue; 19499a2dd95SBruce Richardson /* Rx callback */ 19599a2dd95SBruce Richardson rte_event_eth_rx_adapter_cb_fn cb_fn; 19699a2dd95SBruce Richardson /* Rx callback argument */ 19799a2dd95SBruce Richardson void *cb_arg; 19899a2dd95SBruce Richardson /* Set if ethdev->eventdev packet transfer uses a 19999a2dd95SBruce Richardson * hardware mechanism 20099a2dd95SBruce Richardson */ 20199a2dd95SBruce Richardson uint8_t internal_event_port; 20299a2dd95SBruce Richardson /* Set if the adapter is processing rx queues for 20399a2dd95SBruce Richardson * this eth device and packet processing has been 20499a2dd95SBruce Richardson * started, allows for the code to know if the PMD 20599a2dd95SBruce Richardson * rx_adapter_stop callback needs to be invoked 20699a2dd95SBruce Richardson */ 20799a2dd95SBruce Richardson uint8_t dev_rx_started; 20899a2dd95SBruce Richardson /* Number of queues added for this device */ 20999a2dd95SBruce Richardson uint16_t nb_dev_queues; 21099a2dd95SBruce Richardson /* Number of poll based queues 21199a2dd95SBruce Richardson * If nb_rx_poll > 0, the start callback will 21299a2dd95SBruce Richardson * be invoked if not already invoked 21399a2dd95SBruce Richardson */ 21499a2dd95SBruce Richardson uint16_t nb_rx_poll; 21599a2dd95SBruce Richardson /* Number of interrupt based queues 21699a2dd95SBruce Richardson * If nb_rx_intr > 0, the start callback will 21799a2dd95SBruce Richardson * be invoked if not already invoked. 21899a2dd95SBruce Richardson */ 21999a2dd95SBruce Richardson uint16_t nb_rx_intr; 22099a2dd95SBruce Richardson /* Number of queues that use the shared interrupt */ 22199a2dd95SBruce Richardson uint16_t nb_shared_intr; 22299a2dd95SBruce Richardson /* sum(wrr(q)) for all queues within the device 22399a2dd95SBruce Richardson * useful when deleting all device queues 22499a2dd95SBruce Richardson */ 22599a2dd95SBruce Richardson uint32_t wrr_len; 22699a2dd95SBruce Richardson /* Intr based queue index to start polling from, this is used 22799a2dd95SBruce Richardson * if the number of shared interrupts is non-zero 22899a2dd95SBruce Richardson */ 22999a2dd95SBruce Richardson uint16_t next_q_idx; 23099a2dd95SBruce Richardson /* Intr based queue indices */ 23199a2dd95SBruce Richardson uint16_t *intr_queue; 23299a2dd95SBruce Richardson /* device generates per Rx queue interrupt for queue index 23399a2dd95SBruce Richardson * for queue indices < RTE_MAX_RXTX_INTR_VEC_ID - 1 23499a2dd95SBruce Richardson */ 23599a2dd95SBruce Richardson int multi_intr_cap; 23699a2dd95SBruce Richardson /* shared interrupt enabled */ 23799a2dd95SBruce Richardson int shared_intr_enabled; 23899a2dd95SBruce Richardson }; 23999a2dd95SBruce Richardson 24099a2dd95SBruce Richardson /* Per Rx queue */ 24199a2dd95SBruce Richardson struct eth_rx_queue_info { 24299a2dd95SBruce Richardson int queue_enabled; /* True if added */ 24399a2dd95SBruce Richardson int intr_enabled; 24499a2dd95SBruce Richardson uint8_t ena_vector; 24599a2dd95SBruce Richardson uint16_t wt; /* Polling weight */ 24699a2dd95SBruce Richardson uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ 24799a2dd95SBruce Richardson uint64_t event; 24899a2dd95SBruce Richardson struct eth_rx_vector_data vector_data; 249a256a743SPavan Nikhilesh struct eth_event_enqueue_buffer *event_buf; 250995b150cSNaga Harish K S V /* use adapter stats struct for queue level stats, 251995b150cSNaga Harish K S V * as same stats need to be updated for adapter and queue 252995b150cSNaga Harish K S V */ 253995b150cSNaga Harish K S V struct rte_event_eth_rx_adapter_stats *stats; 25499a2dd95SBruce Richardson }; 25599a2dd95SBruce Richardson 256a256a743SPavan Nikhilesh static struct event_eth_rx_adapter **event_eth_rx_adapter; 25799a2dd95SBruce Richardson 25883ab470dSGanapati Kundapura /* Enable dynamic timestamp field in mbuf */ 25983ab470dSGanapati Kundapura static uint64_t event_eth_rx_timestamp_dynflag; 26083ab470dSGanapati Kundapura static int event_eth_rx_timestamp_dynfield_offset = -1; 26183ab470dSGanapati Kundapura 26283ab470dSGanapati Kundapura static inline rte_mbuf_timestamp_t * 26383ab470dSGanapati Kundapura rxa_timestamp_dynfield(struct rte_mbuf *mbuf) 26483ab470dSGanapati Kundapura { 26583ab470dSGanapati Kundapura return RTE_MBUF_DYNFIELD(mbuf, 26683ab470dSGanapati Kundapura event_eth_rx_timestamp_dynfield_offset, rte_mbuf_timestamp_t *); 26783ab470dSGanapati Kundapura } 26883ab470dSGanapati Kundapura 26999a2dd95SBruce Richardson static inline int 27099a2dd95SBruce Richardson rxa_validate_id(uint8_t id) 27199a2dd95SBruce Richardson { 27299a2dd95SBruce Richardson return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE; 27399a2dd95SBruce Richardson } 27499a2dd95SBruce Richardson 275a256a743SPavan Nikhilesh static inline struct eth_event_enqueue_buffer * 276a256a743SPavan Nikhilesh rxa_event_buf_get(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, 277995b150cSNaga Harish K S V uint16_t rx_queue_id, 278995b150cSNaga Harish K S V struct rte_event_eth_rx_adapter_stats **stats) 279b06bca69SNaga Harish K S V { 280b06bca69SNaga Harish K S V if (rx_adapter->use_queue_event_buf) { 281b06bca69SNaga Harish K S V struct eth_device_info *dev_info = 282b06bca69SNaga Harish K S V &rx_adapter->eth_devices[eth_dev_id]; 283995b150cSNaga Harish K S V *stats = dev_info->rx_queue[rx_queue_id].stats; 284b06bca69SNaga Harish K S V return dev_info->rx_queue[rx_queue_id].event_buf; 285995b150cSNaga Harish K S V } else { 286995b150cSNaga Harish K S V *stats = &rx_adapter->stats; 287b06bca69SNaga Harish K S V return &rx_adapter->event_enqueue_buffer; 288b06bca69SNaga Harish K S V } 289995b150cSNaga Harish K S V } 290b06bca69SNaga Harish K S V 29199a2dd95SBruce Richardson #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \ 29299a2dd95SBruce Richardson if (!rxa_validate_id(id)) { \ 29399a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \ 29499a2dd95SBruce Richardson return retval; \ 29599a2dd95SBruce Richardson } \ 29699a2dd95SBruce Richardson } while (0) 29799a2dd95SBruce Richardson 29874b034ffSWeiguo Li #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(id, retval) do { \ 29974b034ffSWeiguo Li if (!rxa_validate_id(id)) { \ 30074b034ffSWeiguo Li RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \ 30174b034ffSWeiguo Li ret = retval; \ 30274b034ffSWeiguo Li goto error; \ 30374b034ffSWeiguo Li } \ 30474b034ffSWeiguo Li } while (0) 30574b034ffSWeiguo Li 30674b034ffSWeiguo Li #define RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, retval) do { \ 30774b034ffSWeiguo Li if ((token) == NULL || strlen(token) == 0 || !isdigit(*token)) { \ 30874b034ffSWeiguo Li RTE_EDEV_LOG_ERR("Invalid eth Rx adapter token\n"); \ 30974b034ffSWeiguo Li ret = retval; \ 31074b034ffSWeiguo Li goto error; \ 31174b034ffSWeiguo Li } \ 31274b034ffSWeiguo Li } while (0) 31374b034ffSWeiguo Li 31474b034ffSWeiguo Li #define RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(port_id, retval) do { \ 31574b034ffSWeiguo Li if (!rte_eth_dev_is_valid_port(port_id)) { \ 31674b034ffSWeiguo Li RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \ 31774b034ffSWeiguo Li ret = retval; \ 31874b034ffSWeiguo Li goto error; \ 31974b034ffSWeiguo Li } \ 32074b034ffSWeiguo Li } while (0) 32174b034ffSWeiguo Li 32299a2dd95SBruce Richardson static inline int 323a256a743SPavan Nikhilesh rxa_sw_adapter_queue_count(struct event_eth_rx_adapter *rx_adapter) 32499a2dd95SBruce Richardson { 32599a2dd95SBruce Richardson return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr; 32699a2dd95SBruce Richardson } 32799a2dd95SBruce Richardson 32899a2dd95SBruce Richardson /* Greatest common divisor */ 32999a2dd95SBruce Richardson static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b) 33099a2dd95SBruce Richardson { 33199a2dd95SBruce Richardson uint16_t r = a % b; 33299a2dd95SBruce Richardson 33399a2dd95SBruce Richardson return r ? rxa_gcd_u16(b, r) : b; 33499a2dd95SBruce Richardson } 33599a2dd95SBruce Richardson 33699a2dd95SBruce Richardson /* Returns the next queue in the polling sequence 33799a2dd95SBruce Richardson * 33899a2dd95SBruce Richardson * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling 33999a2dd95SBruce Richardson */ 34099a2dd95SBruce Richardson static int 341a256a743SPavan Nikhilesh rxa_wrr_next(struct event_eth_rx_adapter *rx_adapter, unsigned int n, int *cw, 34299a2dd95SBruce Richardson struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt, 34399a2dd95SBruce Richardson uint16_t gcd, int prev) 34499a2dd95SBruce Richardson { 34599a2dd95SBruce Richardson int i = prev; 34699a2dd95SBruce Richardson uint16_t w; 34799a2dd95SBruce Richardson 34899a2dd95SBruce Richardson while (1) { 34999a2dd95SBruce Richardson uint16_t q; 35099a2dd95SBruce Richardson uint16_t d; 35199a2dd95SBruce Richardson 35299a2dd95SBruce Richardson i = (i + 1) % n; 35399a2dd95SBruce Richardson if (i == 0) { 35499a2dd95SBruce Richardson *cw = *cw - gcd; 35599a2dd95SBruce Richardson if (*cw <= 0) 35699a2dd95SBruce Richardson *cw = max_wt; 35799a2dd95SBruce Richardson } 35899a2dd95SBruce Richardson 35999a2dd95SBruce Richardson q = eth_rx_poll[i].eth_rx_qid; 36099a2dd95SBruce Richardson d = eth_rx_poll[i].eth_dev_id; 36199a2dd95SBruce Richardson w = rx_adapter->eth_devices[d].rx_queue[q].wt; 36299a2dd95SBruce Richardson 36399a2dd95SBruce Richardson if ((int)w >= *cw) 36499a2dd95SBruce Richardson return i; 36599a2dd95SBruce Richardson } 36699a2dd95SBruce Richardson } 36799a2dd95SBruce Richardson 36899a2dd95SBruce Richardson static inline int 36999a2dd95SBruce Richardson rxa_shared_intr(struct eth_device_info *dev_info, 37099a2dd95SBruce Richardson int rx_queue_id) 37199a2dd95SBruce Richardson { 37299a2dd95SBruce Richardson int multi_intr_cap; 37399a2dd95SBruce Richardson 37499a2dd95SBruce Richardson if (dev_info->dev->intr_handle == NULL) 37599a2dd95SBruce Richardson return 0; 37699a2dd95SBruce Richardson 37799a2dd95SBruce Richardson multi_intr_cap = rte_intr_cap_multiple(dev_info->dev->intr_handle); 37899a2dd95SBruce Richardson return !multi_intr_cap || 37999a2dd95SBruce Richardson rx_queue_id >= RTE_MAX_RXTX_INTR_VEC_ID - 1; 38099a2dd95SBruce Richardson } 38199a2dd95SBruce Richardson 38299a2dd95SBruce Richardson static inline int 38399a2dd95SBruce Richardson rxa_intr_queue(struct eth_device_info *dev_info, 38499a2dd95SBruce Richardson int rx_queue_id) 38599a2dd95SBruce Richardson { 38699a2dd95SBruce Richardson struct eth_rx_queue_info *queue_info; 38799a2dd95SBruce Richardson 38899a2dd95SBruce Richardson queue_info = &dev_info->rx_queue[rx_queue_id]; 38999a2dd95SBruce Richardson return dev_info->rx_queue && 39099a2dd95SBruce Richardson !dev_info->internal_event_port && 39199a2dd95SBruce Richardson queue_info->queue_enabled && queue_info->wt == 0; 39299a2dd95SBruce Richardson } 39399a2dd95SBruce Richardson 39499a2dd95SBruce Richardson static inline int 39599a2dd95SBruce Richardson rxa_polled_queue(struct eth_device_info *dev_info, 39699a2dd95SBruce Richardson int rx_queue_id) 39799a2dd95SBruce Richardson { 39899a2dd95SBruce Richardson struct eth_rx_queue_info *queue_info; 39999a2dd95SBruce Richardson 40099a2dd95SBruce Richardson queue_info = &dev_info->rx_queue[rx_queue_id]; 40199a2dd95SBruce Richardson return !dev_info->internal_event_port && 40299a2dd95SBruce Richardson dev_info->rx_queue && 40399a2dd95SBruce Richardson queue_info->queue_enabled && queue_info->wt != 0; 40499a2dd95SBruce Richardson } 40599a2dd95SBruce Richardson 40699a2dd95SBruce Richardson /* Calculate change in number of vectors after Rx queue ID is add/deleted */ 40799a2dd95SBruce Richardson static int 40899a2dd95SBruce Richardson rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add) 40999a2dd95SBruce Richardson { 41099a2dd95SBruce Richardson uint16_t i; 41199a2dd95SBruce Richardson int n, s; 41299a2dd95SBruce Richardson uint16_t nbq; 41399a2dd95SBruce Richardson 41499a2dd95SBruce Richardson nbq = dev_info->dev->data->nb_rx_queues; 41599a2dd95SBruce Richardson n = 0; /* non shared count */ 41699a2dd95SBruce Richardson s = 0; /* shared count */ 41799a2dd95SBruce Richardson 41899a2dd95SBruce Richardson if (rx_queue_id == -1) { 41999a2dd95SBruce Richardson for (i = 0; i < nbq; i++) { 42099a2dd95SBruce Richardson if (!rxa_shared_intr(dev_info, i)) 42199a2dd95SBruce Richardson n += add ? !rxa_intr_queue(dev_info, i) : 42299a2dd95SBruce Richardson rxa_intr_queue(dev_info, i); 42399a2dd95SBruce Richardson else 42499a2dd95SBruce Richardson s += add ? !rxa_intr_queue(dev_info, i) : 42599a2dd95SBruce Richardson rxa_intr_queue(dev_info, i); 42699a2dd95SBruce Richardson } 42799a2dd95SBruce Richardson 42899a2dd95SBruce Richardson if (s > 0) { 42999a2dd95SBruce Richardson if ((add && dev_info->nb_shared_intr == 0) || 43099a2dd95SBruce Richardson (!add && dev_info->nb_shared_intr)) 43199a2dd95SBruce Richardson n += 1; 43299a2dd95SBruce Richardson } 43399a2dd95SBruce Richardson } else { 43499a2dd95SBruce Richardson if (!rxa_shared_intr(dev_info, rx_queue_id)) 43599a2dd95SBruce Richardson n = add ? !rxa_intr_queue(dev_info, rx_queue_id) : 43699a2dd95SBruce Richardson rxa_intr_queue(dev_info, rx_queue_id); 43799a2dd95SBruce Richardson else 43899a2dd95SBruce Richardson n = add ? !dev_info->nb_shared_intr : 43999a2dd95SBruce Richardson dev_info->nb_shared_intr == 1; 44099a2dd95SBruce Richardson } 44199a2dd95SBruce Richardson 44299a2dd95SBruce Richardson return add ? n : -n; 44399a2dd95SBruce Richardson } 44499a2dd95SBruce Richardson 44599a2dd95SBruce Richardson /* Calculate nb_rx_intr after deleting interrupt mode rx queues 44699a2dd95SBruce Richardson */ 44799a2dd95SBruce Richardson static void 448a256a743SPavan Nikhilesh rxa_calc_nb_post_intr_del(struct event_eth_rx_adapter *rx_adapter, 449a256a743SPavan Nikhilesh struct eth_device_info *dev_info, int rx_queue_id, 45099a2dd95SBruce Richardson uint32_t *nb_rx_intr) 45199a2dd95SBruce Richardson { 45299a2dd95SBruce Richardson uint32_t intr_diff; 45399a2dd95SBruce Richardson 45499a2dd95SBruce Richardson if (rx_queue_id == -1) 45599a2dd95SBruce Richardson intr_diff = dev_info->nb_rx_intr; 45699a2dd95SBruce Richardson else 45799a2dd95SBruce Richardson intr_diff = rxa_intr_queue(dev_info, rx_queue_id); 45899a2dd95SBruce Richardson 45999a2dd95SBruce Richardson *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff; 46099a2dd95SBruce Richardson } 46199a2dd95SBruce Richardson 46299a2dd95SBruce Richardson /* Calculate nb_rx_* after adding interrupt mode rx queues, newly added 46399a2dd95SBruce Richardson * interrupt queues could currently be poll mode Rx queues 46499a2dd95SBruce Richardson */ 46599a2dd95SBruce Richardson static void 466a256a743SPavan Nikhilesh rxa_calc_nb_post_add_intr(struct event_eth_rx_adapter *rx_adapter, 467a256a743SPavan Nikhilesh struct eth_device_info *dev_info, int rx_queue_id, 468a256a743SPavan Nikhilesh uint32_t *nb_rx_poll, uint32_t *nb_rx_intr, 46999a2dd95SBruce Richardson uint32_t *nb_wrr) 47099a2dd95SBruce Richardson { 47199a2dd95SBruce Richardson uint32_t intr_diff; 47299a2dd95SBruce Richardson uint32_t poll_diff; 47399a2dd95SBruce Richardson uint32_t wrr_len_diff; 47499a2dd95SBruce Richardson 47599a2dd95SBruce Richardson if (rx_queue_id == -1) { 47699a2dd95SBruce Richardson intr_diff = dev_info->dev->data->nb_rx_queues - 47799a2dd95SBruce Richardson dev_info->nb_rx_intr; 47899a2dd95SBruce Richardson poll_diff = dev_info->nb_rx_poll; 47999a2dd95SBruce Richardson wrr_len_diff = dev_info->wrr_len; 48099a2dd95SBruce Richardson } else { 48199a2dd95SBruce Richardson intr_diff = !rxa_intr_queue(dev_info, rx_queue_id); 48299a2dd95SBruce Richardson poll_diff = rxa_polled_queue(dev_info, rx_queue_id); 48399a2dd95SBruce Richardson wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt : 48499a2dd95SBruce Richardson 0; 48599a2dd95SBruce Richardson } 48699a2dd95SBruce Richardson 48799a2dd95SBruce Richardson *nb_rx_intr = rx_adapter->num_rx_intr + intr_diff; 48899a2dd95SBruce Richardson *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff; 48999a2dd95SBruce Richardson *nb_wrr = rx_adapter->wrr_len - wrr_len_diff; 49099a2dd95SBruce Richardson } 49199a2dd95SBruce Richardson 49299a2dd95SBruce Richardson /* Calculate size of the eth_rx_poll and wrr_sched arrays 49399a2dd95SBruce Richardson * after deleting poll mode rx queues 49499a2dd95SBruce Richardson */ 49599a2dd95SBruce Richardson static void 496a256a743SPavan Nikhilesh rxa_calc_nb_post_poll_del(struct event_eth_rx_adapter *rx_adapter, 497a256a743SPavan Nikhilesh struct eth_device_info *dev_info, int rx_queue_id, 498a256a743SPavan Nikhilesh uint32_t *nb_rx_poll, uint32_t *nb_wrr) 49999a2dd95SBruce Richardson { 50099a2dd95SBruce Richardson uint32_t poll_diff; 50199a2dd95SBruce Richardson uint32_t wrr_len_diff; 50299a2dd95SBruce Richardson 50399a2dd95SBruce Richardson if (rx_queue_id == -1) { 50499a2dd95SBruce Richardson poll_diff = dev_info->nb_rx_poll; 50599a2dd95SBruce Richardson wrr_len_diff = dev_info->wrr_len; 50699a2dd95SBruce Richardson } else { 50799a2dd95SBruce Richardson poll_diff = rxa_polled_queue(dev_info, rx_queue_id); 50899a2dd95SBruce Richardson wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt : 50999a2dd95SBruce Richardson 0; 51099a2dd95SBruce Richardson } 51199a2dd95SBruce Richardson 51299a2dd95SBruce Richardson *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff; 51399a2dd95SBruce Richardson *nb_wrr = rx_adapter->wrr_len - wrr_len_diff; 51499a2dd95SBruce Richardson } 51599a2dd95SBruce Richardson 51699a2dd95SBruce Richardson /* Calculate nb_rx_* after adding poll mode rx queues 51799a2dd95SBruce Richardson */ 51899a2dd95SBruce Richardson static void 519a256a743SPavan Nikhilesh rxa_calc_nb_post_add_poll(struct event_eth_rx_adapter *rx_adapter, 520a256a743SPavan Nikhilesh struct eth_device_info *dev_info, int rx_queue_id, 521a256a743SPavan Nikhilesh uint16_t wt, uint32_t *nb_rx_poll, 522a256a743SPavan Nikhilesh uint32_t *nb_rx_intr, uint32_t *nb_wrr) 52399a2dd95SBruce Richardson { 52499a2dd95SBruce Richardson uint32_t intr_diff; 52599a2dd95SBruce Richardson uint32_t poll_diff; 52699a2dd95SBruce Richardson uint32_t wrr_len_diff; 52799a2dd95SBruce Richardson 52899a2dd95SBruce Richardson if (rx_queue_id == -1) { 52999a2dd95SBruce Richardson intr_diff = dev_info->nb_rx_intr; 53099a2dd95SBruce Richardson poll_diff = dev_info->dev->data->nb_rx_queues - 53199a2dd95SBruce Richardson dev_info->nb_rx_poll; 53299a2dd95SBruce Richardson wrr_len_diff = wt*dev_info->dev->data->nb_rx_queues 53399a2dd95SBruce Richardson - dev_info->wrr_len; 53499a2dd95SBruce Richardson } else { 53599a2dd95SBruce Richardson intr_diff = rxa_intr_queue(dev_info, rx_queue_id); 53699a2dd95SBruce Richardson poll_diff = !rxa_polled_queue(dev_info, rx_queue_id); 53799a2dd95SBruce Richardson wrr_len_diff = rxa_polled_queue(dev_info, rx_queue_id) ? 53899a2dd95SBruce Richardson wt - dev_info->rx_queue[rx_queue_id].wt : 53999a2dd95SBruce Richardson wt; 54099a2dd95SBruce Richardson } 54199a2dd95SBruce Richardson 54299a2dd95SBruce Richardson *nb_rx_poll = rx_adapter->num_rx_polled + poll_diff; 54399a2dd95SBruce Richardson *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff; 54499a2dd95SBruce Richardson *nb_wrr = rx_adapter->wrr_len + wrr_len_diff; 54599a2dd95SBruce Richardson } 54699a2dd95SBruce Richardson 54799a2dd95SBruce Richardson /* Calculate nb_rx_* after adding rx_queue_id */ 54899a2dd95SBruce Richardson static void 549a256a743SPavan Nikhilesh rxa_calc_nb_post_add(struct event_eth_rx_adapter *rx_adapter, 550a256a743SPavan Nikhilesh struct eth_device_info *dev_info, int rx_queue_id, 551a256a743SPavan Nikhilesh uint16_t wt, uint32_t *nb_rx_poll, uint32_t *nb_rx_intr, 55299a2dd95SBruce Richardson uint32_t *nb_wrr) 55399a2dd95SBruce Richardson { 55499a2dd95SBruce Richardson if (wt != 0) 55599a2dd95SBruce Richardson rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id, 55699a2dd95SBruce Richardson wt, nb_rx_poll, nb_rx_intr, nb_wrr); 55799a2dd95SBruce Richardson else 55899a2dd95SBruce Richardson rxa_calc_nb_post_add_intr(rx_adapter, dev_info, rx_queue_id, 55999a2dd95SBruce Richardson nb_rx_poll, nb_rx_intr, nb_wrr); 56099a2dd95SBruce Richardson } 56199a2dd95SBruce Richardson 56299a2dd95SBruce Richardson /* Calculate nb_rx_* after deleting rx_queue_id */ 56399a2dd95SBruce Richardson static void 564a256a743SPavan Nikhilesh rxa_calc_nb_post_del(struct event_eth_rx_adapter *rx_adapter, 565a256a743SPavan Nikhilesh struct eth_device_info *dev_info, int rx_queue_id, 566a256a743SPavan Nikhilesh uint32_t *nb_rx_poll, uint32_t *nb_rx_intr, 56799a2dd95SBruce Richardson uint32_t *nb_wrr) 56899a2dd95SBruce Richardson { 56999a2dd95SBruce Richardson rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll, 57099a2dd95SBruce Richardson nb_wrr); 57199a2dd95SBruce Richardson rxa_calc_nb_post_intr_del(rx_adapter, dev_info, rx_queue_id, 57299a2dd95SBruce Richardson nb_rx_intr); 57399a2dd95SBruce Richardson } 57499a2dd95SBruce Richardson 57599a2dd95SBruce Richardson /* 57699a2dd95SBruce Richardson * Allocate the rx_poll array 57799a2dd95SBruce Richardson */ 57899a2dd95SBruce Richardson static struct eth_rx_poll_entry * 579a256a743SPavan Nikhilesh rxa_alloc_poll(struct event_eth_rx_adapter *rx_adapter, uint32_t num_rx_polled) 58099a2dd95SBruce Richardson { 58199a2dd95SBruce Richardson size_t len; 58299a2dd95SBruce Richardson 58399a2dd95SBruce Richardson len = RTE_ALIGN(num_rx_polled * sizeof(*rx_adapter->eth_rx_poll), 58499a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE); 58599a2dd95SBruce Richardson return rte_zmalloc_socket(rx_adapter->mem_name, 58699a2dd95SBruce Richardson len, 58799a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE, 58899a2dd95SBruce Richardson rx_adapter->socket_id); 58999a2dd95SBruce Richardson } 59099a2dd95SBruce Richardson 59199a2dd95SBruce Richardson /* 59299a2dd95SBruce Richardson * Allocate the WRR array 59399a2dd95SBruce Richardson */ 59499a2dd95SBruce Richardson static uint32_t * 595a256a743SPavan Nikhilesh rxa_alloc_wrr(struct event_eth_rx_adapter *rx_adapter, int nb_wrr) 59699a2dd95SBruce Richardson { 59799a2dd95SBruce Richardson size_t len; 59899a2dd95SBruce Richardson 59999a2dd95SBruce Richardson len = RTE_ALIGN(nb_wrr * sizeof(*rx_adapter->wrr_sched), 60099a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE); 60199a2dd95SBruce Richardson return rte_zmalloc_socket(rx_adapter->mem_name, 60299a2dd95SBruce Richardson len, 60399a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE, 60499a2dd95SBruce Richardson rx_adapter->socket_id); 60599a2dd95SBruce Richardson } 60699a2dd95SBruce Richardson 60799a2dd95SBruce Richardson static int 608a256a743SPavan Nikhilesh rxa_alloc_poll_arrays(struct event_eth_rx_adapter *rx_adapter, uint32_t nb_poll, 609a256a743SPavan Nikhilesh uint32_t nb_wrr, struct eth_rx_poll_entry **rx_poll, 61099a2dd95SBruce Richardson uint32_t **wrr_sched) 61199a2dd95SBruce Richardson { 61299a2dd95SBruce Richardson 61399a2dd95SBruce Richardson if (nb_poll == 0) { 61499a2dd95SBruce Richardson *rx_poll = NULL; 61599a2dd95SBruce Richardson *wrr_sched = NULL; 61699a2dd95SBruce Richardson return 0; 61799a2dd95SBruce Richardson } 61899a2dd95SBruce Richardson 61999a2dd95SBruce Richardson *rx_poll = rxa_alloc_poll(rx_adapter, nb_poll); 62099a2dd95SBruce Richardson if (*rx_poll == NULL) { 62199a2dd95SBruce Richardson *wrr_sched = NULL; 62299a2dd95SBruce Richardson return -ENOMEM; 62399a2dd95SBruce Richardson } 62499a2dd95SBruce Richardson 62599a2dd95SBruce Richardson *wrr_sched = rxa_alloc_wrr(rx_adapter, nb_wrr); 62699a2dd95SBruce Richardson if (*wrr_sched == NULL) { 62799a2dd95SBruce Richardson rte_free(*rx_poll); 62899a2dd95SBruce Richardson return -ENOMEM; 62999a2dd95SBruce Richardson } 63099a2dd95SBruce Richardson return 0; 63199a2dd95SBruce Richardson } 63299a2dd95SBruce Richardson 63399a2dd95SBruce Richardson /* Precalculate WRR polling sequence for all queues in rx_adapter */ 63499a2dd95SBruce Richardson static void 635a256a743SPavan Nikhilesh rxa_calc_wrr_sequence(struct event_eth_rx_adapter *rx_adapter, 636a256a743SPavan Nikhilesh struct eth_rx_poll_entry *rx_poll, uint32_t *rx_wrr) 63799a2dd95SBruce Richardson { 63899a2dd95SBruce Richardson uint16_t d; 63999a2dd95SBruce Richardson uint16_t q; 64099a2dd95SBruce Richardson unsigned int i; 64199a2dd95SBruce Richardson int prev = -1; 64299a2dd95SBruce Richardson int cw = -1; 64399a2dd95SBruce Richardson 64499a2dd95SBruce Richardson /* Initialize variables for calculation of wrr schedule */ 64599a2dd95SBruce Richardson uint16_t max_wrr_pos = 0; 64699a2dd95SBruce Richardson unsigned int poll_q = 0; 64799a2dd95SBruce Richardson uint16_t max_wt = 0; 64899a2dd95SBruce Richardson uint16_t gcd = 0; 64999a2dd95SBruce Richardson 65099a2dd95SBruce Richardson if (rx_poll == NULL) 65199a2dd95SBruce Richardson return; 65299a2dd95SBruce Richardson 65399a2dd95SBruce Richardson /* Generate array of all queues to poll, the size of this 65499a2dd95SBruce Richardson * array is poll_q 65599a2dd95SBruce Richardson */ 65699a2dd95SBruce Richardson RTE_ETH_FOREACH_DEV(d) { 65799a2dd95SBruce Richardson uint16_t nb_rx_queues; 65899a2dd95SBruce Richardson struct eth_device_info *dev_info = 65999a2dd95SBruce Richardson &rx_adapter->eth_devices[d]; 66099a2dd95SBruce Richardson nb_rx_queues = dev_info->dev->data->nb_rx_queues; 66199a2dd95SBruce Richardson if (dev_info->rx_queue == NULL) 66299a2dd95SBruce Richardson continue; 66399a2dd95SBruce Richardson if (dev_info->internal_event_port) 66499a2dd95SBruce Richardson continue; 66599a2dd95SBruce Richardson dev_info->wrr_len = 0; 66699a2dd95SBruce Richardson for (q = 0; q < nb_rx_queues; q++) { 66799a2dd95SBruce Richardson struct eth_rx_queue_info *queue_info = 66899a2dd95SBruce Richardson &dev_info->rx_queue[q]; 66999a2dd95SBruce Richardson uint16_t wt; 67099a2dd95SBruce Richardson 67199a2dd95SBruce Richardson if (!rxa_polled_queue(dev_info, q)) 67299a2dd95SBruce Richardson continue; 67399a2dd95SBruce Richardson wt = queue_info->wt; 67499a2dd95SBruce Richardson rx_poll[poll_q].eth_dev_id = d; 67599a2dd95SBruce Richardson rx_poll[poll_q].eth_rx_qid = q; 67699a2dd95SBruce Richardson max_wrr_pos += wt; 67799a2dd95SBruce Richardson dev_info->wrr_len += wt; 67899a2dd95SBruce Richardson max_wt = RTE_MAX(max_wt, wt); 67999a2dd95SBruce Richardson gcd = (gcd) ? rxa_gcd_u16(gcd, wt) : wt; 68099a2dd95SBruce Richardson poll_q++; 68199a2dd95SBruce Richardson } 68299a2dd95SBruce Richardson } 68399a2dd95SBruce Richardson 68499a2dd95SBruce Richardson /* Generate polling sequence based on weights */ 68599a2dd95SBruce Richardson prev = -1; 68699a2dd95SBruce Richardson cw = -1; 68799a2dd95SBruce Richardson for (i = 0; i < max_wrr_pos; i++) { 68899a2dd95SBruce Richardson rx_wrr[i] = rxa_wrr_next(rx_adapter, poll_q, &cw, 68999a2dd95SBruce Richardson rx_poll, max_wt, gcd, prev); 69099a2dd95SBruce Richardson prev = rx_wrr[i]; 69199a2dd95SBruce Richardson } 69299a2dd95SBruce Richardson } 69399a2dd95SBruce Richardson 69499a2dd95SBruce Richardson static inline void 69599a2dd95SBruce Richardson rxa_mtoip(struct rte_mbuf *m, struct rte_ipv4_hdr **ipv4_hdr, 69699a2dd95SBruce Richardson struct rte_ipv6_hdr **ipv6_hdr) 69799a2dd95SBruce Richardson { 69899a2dd95SBruce Richardson struct rte_ether_hdr *eth_hdr = 69999a2dd95SBruce Richardson rte_pktmbuf_mtod(m, struct rte_ether_hdr *); 70099a2dd95SBruce Richardson struct rte_vlan_hdr *vlan_hdr; 70199a2dd95SBruce Richardson 70299a2dd95SBruce Richardson *ipv4_hdr = NULL; 70399a2dd95SBruce Richardson *ipv6_hdr = NULL; 70499a2dd95SBruce Richardson 70599a2dd95SBruce Richardson switch (eth_hdr->ether_type) { 70699a2dd95SBruce Richardson case RTE_BE16(RTE_ETHER_TYPE_IPV4): 70799a2dd95SBruce Richardson *ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1); 70899a2dd95SBruce Richardson break; 70999a2dd95SBruce Richardson 71099a2dd95SBruce Richardson case RTE_BE16(RTE_ETHER_TYPE_IPV6): 71199a2dd95SBruce Richardson *ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1); 71299a2dd95SBruce Richardson break; 71399a2dd95SBruce Richardson 71499a2dd95SBruce Richardson case RTE_BE16(RTE_ETHER_TYPE_VLAN): 71599a2dd95SBruce Richardson vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1); 71699a2dd95SBruce Richardson switch (vlan_hdr->eth_proto) { 71799a2dd95SBruce Richardson case RTE_BE16(RTE_ETHER_TYPE_IPV4): 71899a2dd95SBruce Richardson *ipv4_hdr = (struct rte_ipv4_hdr *)(vlan_hdr + 1); 71999a2dd95SBruce Richardson break; 72099a2dd95SBruce Richardson case RTE_BE16(RTE_ETHER_TYPE_IPV6): 72199a2dd95SBruce Richardson *ipv6_hdr = (struct rte_ipv6_hdr *)(vlan_hdr + 1); 72299a2dd95SBruce Richardson break; 72399a2dd95SBruce Richardson default: 72499a2dd95SBruce Richardson break; 72599a2dd95SBruce Richardson } 72699a2dd95SBruce Richardson break; 72799a2dd95SBruce Richardson 72899a2dd95SBruce Richardson default: 72999a2dd95SBruce Richardson break; 73099a2dd95SBruce Richardson } 73199a2dd95SBruce Richardson } 73299a2dd95SBruce Richardson 73399a2dd95SBruce Richardson /* Calculate RSS hash for IPv4/6 */ 73499a2dd95SBruce Richardson static inline uint32_t 73599a2dd95SBruce Richardson rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be) 73699a2dd95SBruce Richardson { 73799a2dd95SBruce Richardson uint32_t input_len; 73899a2dd95SBruce Richardson void *tuple; 73999a2dd95SBruce Richardson struct rte_ipv4_tuple ipv4_tuple; 74099a2dd95SBruce Richardson struct rte_ipv6_tuple ipv6_tuple; 74199a2dd95SBruce Richardson struct rte_ipv4_hdr *ipv4_hdr; 74299a2dd95SBruce Richardson struct rte_ipv6_hdr *ipv6_hdr; 74399a2dd95SBruce Richardson 74499a2dd95SBruce Richardson rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr); 74599a2dd95SBruce Richardson 74699a2dd95SBruce Richardson if (ipv4_hdr) { 74799a2dd95SBruce Richardson ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr); 74899a2dd95SBruce Richardson ipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr); 74999a2dd95SBruce Richardson tuple = &ipv4_tuple; 75099a2dd95SBruce Richardson input_len = RTE_THASH_V4_L3_LEN; 75199a2dd95SBruce Richardson } else if (ipv6_hdr) { 75299a2dd95SBruce Richardson rte_thash_load_v6_addrs(ipv6_hdr, 75399a2dd95SBruce Richardson (union rte_thash_tuple *)&ipv6_tuple); 75499a2dd95SBruce Richardson tuple = &ipv6_tuple; 75599a2dd95SBruce Richardson input_len = RTE_THASH_V6_L3_LEN; 75699a2dd95SBruce Richardson } else 75799a2dd95SBruce Richardson return 0; 75899a2dd95SBruce Richardson 75999a2dd95SBruce Richardson return rte_softrss_be(tuple, input_len, rss_key_be); 76099a2dd95SBruce Richardson } 76199a2dd95SBruce Richardson 76299a2dd95SBruce Richardson static inline int 763a256a743SPavan Nikhilesh rxa_enq_blocked(struct event_eth_rx_adapter *rx_adapter) 76499a2dd95SBruce Richardson { 76599a2dd95SBruce Richardson return !!rx_adapter->enq_block_count; 76699a2dd95SBruce Richardson } 76799a2dd95SBruce Richardson 76899a2dd95SBruce Richardson static inline void 769a256a743SPavan Nikhilesh rxa_enq_block_start_ts(struct event_eth_rx_adapter *rx_adapter) 77099a2dd95SBruce Richardson { 77199a2dd95SBruce Richardson if (rx_adapter->rx_enq_block_start_ts) 77299a2dd95SBruce Richardson return; 77399a2dd95SBruce Richardson 77499a2dd95SBruce Richardson rx_adapter->enq_block_count++; 77599a2dd95SBruce Richardson if (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD) 77699a2dd95SBruce Richardson return; 77799a2dd95SBruce Richardson 77899a2dd95SBruce Richardson rx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles(); 77999a2dd95SBruce Richardson } 78099a2dd95SBruce Richardson 78199a2dd95SBruce Richardson static inline void 782a256a743SPavan Nikhilesh rxa_enq_block_end_ts(struct event_eth_rx_adapter *rx_adapter, 78399a2dd95SBruce Richardson struct rte_event_eth_rx_adapter_stats *stats) 78499a2dd95SBruce Richardson { 78599a2dd95SBruce Richardson if (unlikely(!stats->rx_enq_start_ts)) 78699a2dd95SBruce Richardson stats->rx_enq_start_ts = rte_get_tsc_cycles(); 78799a2dd95SBruce Richardson 78899a2dd95SBruce Richardson if (likely(!rxa_enq_blocked(rx_adapter))) 78999a2dd95SBruce Richardson return; 79099a2dd95SBruce Richardson 79199a2dd95SBruce Richardson rx_adapter->enq_block_count = 0; 79299a2dd95SBruce Richardson if (rx_adapter->rx_enq_block_start_ts) { 79399a2dd95SBruce Richardson stats->rx_enq_end_ts = rte_get_tsc_cycles(); 79499a2dd95SBruce Richardson stats->rx_enq_block_cycles += stats->rx_enq_end_ts - 79599a2dd95SBruce Richardson rx_adapter->rx_enq_block_start_ts; 79699a2dd95SBruce Richardson rx_adapter->rx_enq_block_start_ts = 0; 79799a2dd95SBruce Richardson } 79899a2dd95SBruce Richardson } 79999a2dd95SBruce Richardson 80099a2dd95SBruce Richardson /* Enqueue buffered events to event device */ 80199a2dd95SBruce Richardson static inline uint16_t 802a256a743SPavan Nikhilesh rxa_flush_event_buffer(struct event_eth_rx_adapter *rx_adapter, 803995b150cSNaga Harish K S V struct eth_event_enqueue_buffer *buf, 804995b150cSNaga Harish K S V struct rte_event_eth_rx_adapter_stats *stats) 80599a2dd95SBruce Richardson { 806572dce2bSMattias Rönnblom uint16_t count = buf->count; 807572dce2bSMattias Rönnblom uint16_t n = 0; 80899a2dd95SBruce Richardson 8098113fd15SGanapati Kundapura if (!count) 81099a2dd95SBruce Richardson return 0; 81199a2dd95SBruce Richardson 812572dce2bSMattias Rönnblom if (buf->last) 813572dce2bSMattias Rönnblom count = buf->last - buf->head; 814572dce2bSMattias Rönnblom 815572dce2bSMattias Rönnblom if (count) { 816572dce2bSMattias Rönnblom n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id, 81799a2dd95SBruce Richardson rx_adapter->event_port_id, 8188113fd15SGanapati Kundapura &buf->events[buf->head], 8198113fd15SGanapati Kundapura count); 8208113fd15SGanapati Kundapura if (n != count) 82199a2dd95SBruce Richardson stats->rx_enq_retry++; 8228113fd15SGanapati Kundapura 8238113fd15SGanapati Kundapura buf->head += n; 824572dce2bSMattias Rönnblom } 8258113fd15SGanapati Kundapura 8268113fd15SGanapati Kundapura if (buf->last && n == count) { 8278113fd15SGanapati Kundapura uint16_t n1; 8288113fd15SGanapati Kundapura 8298113fd15SGanapati Kundapura n1 = rte_event_enqueue_new_burst(rx_adapter->eventdev_id, 8308113fd15SGanapati Kundapura rx_adapter->event_port_id, 8318113fd15SGanapati Kundapura &buf->events[0], 8328113fd15SGanapati Kundapura buf->tail); 8338113fd15SGanapati Kundapura 8348113fd15SGanapati Kundapura if (n1 != buf->tail) 8358113fd15SGanapati Kundapura stats->rx_enq_retry++; 8368113fd15SGanapati Kundapura 8378113fd15SGanapati Kundapura buf->last = 0; 8388113fd15SGanapati Kundapura buf->head = n1; 8398113fd15SGanapati Kundapura buf->last_mask = 0; 8408113fd15SGanapati Kundapura n += n1; 84199a2dd95SBruce Richardson } 84299a2dd95SBruce Richardson 84399a2dd95SBruce Richardson n ? rxa_enq_block_end_ts(rx_adapter, stats) : 84499a2dd95SBruce Richardson rxa_enq_block_start_ts(rx_adapter); 84599a2dd95SBruce Richardson 84699a2dd95SBruce Richardson buf->count -= n; 84799a2dd95SBruce Richardson stats->rx_enq_count += n; 84899a2dd95SBruce Richardson 84999a2dd95SBruce Richardson return n; 85099a2dd95SBruce Richardson } 85199a2dd95SBruce Richardson 85299a2dd95SBruce Richardson static inline void 853a256a743SPavan Nikhilesh rxa_init_vector(struct event_eth_rx_adapter *rx_adapter, 85499a2dd95SBruce Richardson struct eth_rx_vector_data *vec) 85599a2dd95SBruce Richardson { 85699a2dd95SBruce Richardson vec->vector_ev->nb_elem = 0; 85799a2dd95SBruce Richardson vec->vector_ev->port = vec->port; 85899a2dd95SBruce Richardson vec->vector_ev->queue = vec->queue; 85999a2dd95SBruce Richardson vec->vector_ev->attr_valid = true; 860*0fbb55efSPavan Nikhilesh vec->vector_ev->elem_offset = 0; 86199a2dd95SBruce Richardson TAILQ_INSERT_TAIL(&rx_adapter->vector_list, vec, next); 86299a2dd95SBruce Richardson } 86399a2dd95SBruce Richardson 86499a2dd95SBruce Richardson static inline uint16_t 865a256a743SPavan Nikhilesh rxa_create_event_vector(struct event_eth_rx_adapter *rx_adapter, 86699a2dd95SBruce Richardson struct eth_rx_queue_info *queue_info, 867a256a743SPavan Nikhilesh struct eth_event_enqueue_buffer *buf, 86899a2dd95SBruce Richardson struct rte_mbuf **mbufs, uint16_t num) 86999a2dd95SBruce Richardson { 87099a2dd95SBruce Richardson struct rte_event *ev = &buf->events[buf->count]; 87199a2dd95SBruce Richardson struct eth_rx_vector_data *vec; 87299a2dd95SBruce Richardson uint16_t filled, space, sz; 87399a2dd95SBruce Richardson 87499a2dd95SBruce Richardson filled = 0; 87599a2dd95SBruce Richardson vec = &queue_info->vector_data; 87699a2dd95SBruce Richardson 87799a2dd95SBruce Richardson if (vec->vector_ev == NULL) { 87899a2dd95SBruce Richardson if (rte_mempool_get(vec->vector_pool, 87999a2dd95SBruce Richardson (void **)&vec->vector_ev) < 0) { 88099a2dd95SBruce Richardson rte_pktmbuf_free_bulk(mbufs, num); 88199a2dd95SBruce Richardson return 0; 88299a2dd95SBruce Richardson } 88399a2dd95SBruce Richardson rxa_init_vector(rx_adapter, vec); 88499a2dd95SBruce Richardson } 88599a2dd95SBruce Richardson while (num) { 88699a2dd95SBruce Richardson if (vec->vector_ev->nb_elem == vec->max_vector_count) { 88799a2dd95SBruce Richardson /* Event ready. */ 88899a2dd95SBruce Richardson ev->event = vec->event; 88999a2dd95SBruce Richardson ev->vec = vec->vector_ev; 89099a2dd95SBruce Richardson ev++; 89199a2dd95SBruce Richardson filled++; 89299a2dd95SBruce Richardson vec->vector_ev = NULL; 89399a2dd95SBruce Richardson TAILQ_REMOVE(&rx_adapter->vector_list, vec, next); 89499a2dd95SBruce Richardson if (rte_mempool_get(vec->vector_pool, 89599a2dd95SBruce Richardson (void **)&vec->vector_ev) < 0) { 89699a2dd95SBruce Richardson rte_pktmbuf_free_bulk(mbufs, num); 89799a2dd95SBruce Richardson return 0; 89899a2dd95SBruce Richardson } 89999a2dd95SBruce Richardson rxa_init_vector(rx_adapter, vec); 90099a2dd95SBruce Richardson } 90199a2dd95SBruce Richardson 90299a2dd95SBruce Richardson space = vec->max_vector_count - vec->vector_ev->nb_elem; 90399a2dd95SBruce Richardson sz = num > space ? space : num; 90499a2dd95SBruce Richardson memcpy(vec->vector_ev->mbufs + vec->vector_ev->nb_elem, mbufs, 90599a2dd95SBruce Richardson sizeof(void *) * sz); 90699a2dd95SBruce Richardson vec->vector_ev->nb_elem += sz; 90799a2dd95SBruce Richardson num -= sz; 90899a2dd95SBruce Richardson mbufs += sz; 90999a2dd95SBruce Richardson vec->ts = rte_rdtsc(); 91099a2dd95SBruce Richardson } 91199a2dd95SBruce Richardson 91299a2dd95SBruce Richardson if (vec->vector_ev->nb_elem == vec->max_vector_count) { 91399a2dd95SBruce Richardson ev->event = vec->event; 91499a2dd95SBruce Richardson ev->vec = vec->vector_ev; 91599a2dd95SBruce Richardson ev++; 91699a2dd95SBruce Richardson filled++; 91799a2dd95SBruce Richardson vec->vector_ev = NULL; 91899a2dd95SBruce Richardson TAILQ_REMOVE(&rx_adapter->vector_list, vec, next); 91999a2dd95SBruce Richardson } 92099a2dd95SBruce Richardson 92199a2dd95SBruce Richardson return filled; 92299a2dd95SBruce Richardson } 92399a2dd95SBruce Richardson 92499a2dd95SBruce Richardson static inline void 925a256a743SPavan Nikhilesh rxa_buffer_mbufs(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, 926a256a743SPavan Nikhilesh uint16_t rx_queue_id, struct rte_mbuf **mbufs, uint16_t num, 927995b150cSNaga Harish K S V struct eth_event_enqueue_buffer *buf, 928995b150cSNaga Harish K S V struct rte_event_eth_rx_adapter_stats *stats) 92999a2dd95SBruce Richardson { 93099a2dd95SBruce Richardson uint32_t i; 93199a2dd95SBruce Richardson struct eth_device_info *dev_info = 93299a2dd95SBruce Richardson &rx_adapter->eth_devices[eth_dev_id]; 93399a2dd95SBruce Richardson struct eth_rx_queue_info *eth_rx_queue_info = 93499a2dd95SBruce Richardson &dev_info->rx_queue[rx_queue_id]; 9358113fd15SGanapati Kundapura uint16_t new_tail = buf->tail; 93699a2dd95SBruce Richardson uint64_t event = eth_rx_queue_info->event; 93799a2dd95SBruce Richardson uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; 93899a2dd95SBruce Richardson struct rte_mbuf *m = mbufs[0]; 93999a2dd95SBruce Richardson uint32_t rss_mask; 94099a2dd95SBruce Richardson uint32_t rss; 94199a2dd95SBruce Richardson int do_rss; 94299a2dd95SBruce Richardson uint16_t nb_cb; 94399a2dd95SBruce Richardson uint16_t dropped; 94483ab470dSGanapati Kundapura uint64_t ts, ts_mask; 94599a2dd95SBruce Richardson 94699a2dd95SBruce Richardson if (!eth_rx_queue_info->ena_vector) { 94783ab470dSGanapati Kundapura ts = m->ol_flags & event_eth_rx_timestamp_dynflag ? 94883ab470dSGanapati Kundapura 0 : rte_get_tsc_cycles(); 94983ab470dSGanapati Kundapura 950daa02b5cSOlivier Matz /* 0xffff ffff ffff ffff if RTE_MBUF_F_RX_TIMESTAMP is set, 95183ab470dSGanapati Kundapura * otherwise 0 95283ab470dSGanapati Kundapura */ 95383ab470dSGanapati Kundapura ts_mask = (uint64_t)(!(m->ol_flags & 95483ab470dSGanapati Kundapura event_eth_rx_timestamp_dynflag)) - 1ULL; 95583ab470dSGanapati Kundapura 956daa02b5cSOlivier Matz /* 0xffff ffff if RTE_MBUF_F_RX_RSS_HASH is set, otherwise 0 */ 957daa02b5cSOlivier Matz rss_mask = ~(((m->ol_flags & RTE_MBUF_F_RX_RSS_HASH) != 0) - 1); 95899a2dd95SBruce Richardson do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask; 95999a2dd95SBruce Richardson for (i = 0; i < num; i++) { 9608113fd15SGanapati Kundapura struct rte_event *ev; 9618113fd15SGanapati Kundapura 96299a2dd95SBruce Richardson m = mbufs[i]; 96383ab470dSGanapati Kundapura *rxa_timestamp_dynfield(m) = ts | 96483ab470dSGanapati Kundapura (*rxa_timestamp_dynfield(m) & ts_mask); 96583ab470dSGanapati Kundapura 9668113fd15SGanapati Kundapura ev = &buf->events[new_tail]; 96799a2dd95SBruce Richardson 96899a2dd95SBruce Richardson rss = do_rss ? rxa_do_softrss(m, rx_adapter->rss_key_be) 96999a2dd95SBruce Richardson : m->hash.rss; 97099a2dd95SBruce Richardson ev->event = event; 97199a2dd95SBruce Richardson ev->flow_id = (rss & ~flow_id_mask) | 97299a2dd95SBruce Richardson (ev->flow_id & flow_id_mask); 97399a2dd95SBruce Richardson ev->mbuf = m; 9748113fd15SGanapati Kundapura new_tail++; 97599a2dd95SBruce Richardson } 97699a2dd95SBruce Richardson } else { 97799a2dd95SBruce Richardson num = rxa_create_event_vector(rx_adapter, eth_rx_queue_info, 97899a2dd95SBruce Richardson buf, mbufs, num); 97999a2dd95SBruce Richardson } 98099a2dd95SBruce Richardson 98199a2dd95SBruce Richardson if (num && dev_info->cb_fn) { 98299a2dd95SBruce Richardson 98399a2dd95SBruce Richardson dropped = 0; 98499a2dd95SBruce Richardson nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, 9858113fd15SGanapati Kundapura buf->last | 986bc0df25cSNaga Harish K S V (buf->events_size & ~buf->last_mask), 9878113fd15SGanapati Kundapura buf->count >= BATCH_SIZE ? 9888113fd15SGanapati Kundapura buf->count - BATCH_SIZE : 0, 9898113fd15SGanapati Kundapura &buf->events[buf->tail], 9908113fd15SGanapati Kundapura num, 9918113fd15SGanapati Kundapura dev_info->cb_arg, 9928113fd15SGanapati Kundapura &dropped); 99399a2dd95SBruce Richardson if (unlikely(nb_cb > num)) 99499a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Rx CB returned %d (> %d) events", 99599a2dd95SBruce Richardson nb_cb, num); 99699a2dd95SBruce Richardson else 99799a2dd95SBruce Richardson num = nb_cb; 99899a2dd95SBruce Richardson if (dropped) 999995b150cSNaga Harish K S V stats->rx_dropped += dropped; 100099a2dd95SBruce Richardson } 100199a2dd95SBruce Richardson 100299a2dd95SBruce Richardson buf->count += num; 10038113fd15SGanapati Kundapura buf->tail += num; 10048113fd15SGanapati Kundapura } 10058113fd15SGanapati Kundapura 10068113fd15SGanapati Kundapura static inline bool 1007a256a743SPavan Nikhilesh rxa_pkt_buf_available(struct eth_event_enqueue_buffer *buf) 10088113fd15SGanapati Kundapura { 10098113fd15SGanapati Kundapura uint32_t nb_req = buf->tail + BATCH_SIZE; 10108113fd15SGanapati Kundapura 10118113fd15SGanapati Kundapura if (!buf->last) { 1012bc0df25cSNaga Harish K S V if (nb_req <= buf->events_size) 10138113fd15SGanapati Kundapura return true; 10148113fd15SGanapati Kundapura 10158113fd15SGanapati Kundapura if (buf->head >= BATCH_SIZE) { 10168113fd15SGanapati Kundapura buf->last_mask = ~0; 10178113fd15SGanapati Kundapura buf->last = buf->tail; 10188113fd15SGanapati Kundapura buf->tail = 0; 10198113fd15SGanapati Kundapura return true; 10208113fd15SGanapati Kundapura } 10218113fd15SGanapati Kundapura } 10228113fd15SGanapati Kundapura 10238113fd15SGanapati Kundapura return nb_req <= buf->head; 102499a2dd95SBruce Richardson } 102599a2dd95SBruce Richardson 102699a2dd95SBruce Richardson /* Enqueue packets from <port, q> to event buffer */ 102799a2dd95SBruce Richardson static inline uint32_t 1028a256a743SPavan Nikhilesh rxa_eth_rx(struct event_eth_rx_adapter *rx_adapter, uint16_t port_id, 1029a256a743SPavan Nikhilesh uint16_t queue_id, uint32_t rx_count, uint32_t max_rx, 1030995b150cSNaga Harish K S V int *rxq_empty, struct eth_event_enqueue_buffer *buf, 1031995b150cSNaga Harish K S V struct rte_event_eth_rx_adapter_stats *stats) 103299a2dd95SBruce Richardson { 103399a2dd95SBruce Richardson struct rte_mbuf *mbufs[BATCH_SIZE]; 103499a2dd95SBruce Richardson uint16_t n; 103599a2dd95SBruce Richardson uint32_t nb_rx = 0; 1036578402f2SMattias Rönnblom uint32_t nb_flushed = 0; 103799a2dd95SBruce Richardson 103899a2dd95SBruce Richardson if (rxq_empty) 103999a2dd95SBruce Richardson *rxq_empty = 0; 104099a2dd95SBruce Richardson /* Don't do a batch dequeue from the rx queue if there isn't 104199a2dd95SBruce Richardson * enough space in the enqueue buffer. 104299a2dd95SBruce Richardson */ 10438113fd15SGanapati Kundapura while (rxa_pkt_buf_available(buf)) { 104499a2dd95SBruce Richardson if (buf->count >= BATCH_SIZE) 1045578402f2SMattias Rönnblom nb_flushed += 1046995b150cSNaga Harish K S V rxa_flush_event_buffer(rx_adapter, buf, stats); 104799a2dd95SBruce Richardson 104899a2dd95SBruce Richardson stats->rx_poll_count++; 104999a2dd95SBruce Richardson n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); 105099a2dd95SBruce Richardson if (unlikely(!n)) { 105199a2dd95SBruce Richardson if (rxq_empty) 105299a2dd95SBruce Richardson *rxq_empty = 1; 105399a2dd95SBruce Richardson break; 105499a2dd95SBruce Richardson } 1055995b150cSNaga Harish K S V rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf, 1056995b150cSNaga Harish K S V stats); 105799a2dd95SBruce Richardson nb_rx += n; 105899a2dd95SBruce Richardson if (rx_count + nb_rx > max_rx) 105999a2dd95SBruce Richardson break; 106099a2dd95SBruce Richardson } 106199a2dd95SBruce Richardson 106299a2dd95SBruce Richardson if (buf->count > 0) 1063578402f2SMattias Rönnblom nb_flushed += rxa_flush_event_buffer(rx_adapter, buf, stats); 1064995b150cSNaga Harish K S V 1065995b150cSNaga Harish K S V stats->rx_packets += nb_rx; 1066578402f2SMattias Rönnblom if (nb_flushed == 0) 1067578402f2SMattias Rönnblom rte_event_maintain(rx_adapter->eventdev_id, 1068578402f2SMattias Rönnblom rx_adapter->event_port_id, 0); 106999a2dd95SBruce Richardson 107099a2dd95SBruce Richardson return nb_rx; 107199a2dd95SBruce Richardson } 107299a2dd95SBruce Richardson 107399a2dd95SBruce Richardson static inline void 1074a256a743SPavan Nikhilesh rxa_intr_ring_enqueue(struct event_eth_rx_adapter *rx_adapter, void *data) 107599a2dd95SBruce Richardson { 107699a2dd95SBruce Richardson uint16_t port_id; 107799a2dd95SBruce Richardson uint16_t queue; 107899a2dd95SBruce Richardson int err; 107999a2dd95SBruce Richardson union queue_data qd; 108099a2dd95SBruce Richardson struct eth_device_info *dev_info; 108199a2dd95SBruce Richardson struct eth_rx_queue_info *queue_info; 108299a2dd95SBruce Richardson int *intr_enabled; 108399a2dd95SBruce Richardson 108499a2dd95SBruce Richardson qd.ptr = data; 108599a2dd95SBruce Richardson port_id = qd.port; 108699a2dd95SBruce Richardson queue = qd.queue; 108799a2dd95SBruce Richardson 108899a2dd95SBruce Richardson dev_info = &rx_adapter->eth_devices[port_id]; 108999a2dd95SBruce Richardson queue_info = &dev_info->rx_queue[queue]; 109099a2dd95SBruce Richardson rte_spinlock_lock(&rx_adapter->intr_ring_lock); 109199a2dd95SBruce Richardson if (rxa_shared_intr(dev_info, queue)) 109299a2dd95SBruce Richardson intr_enabled = &dev_info->shared_intr_enabled; 109399a2dd95SBruce Richardson else 109499a2dd95SBruce Richardson intr_enabled = &queue_info->intr_enabled; 109599a2dd95SBruce Richardson 109699a2dd95SBruce Richardson if (*intr_enabled) { 109799a2dd95SBruce Richardson *intr_enabled = 0; 109899a2dd95SBruce Richardson err = rte_ring_enqueue(rx_adapter->intr_ring, data); 109999a2dd95SBruce Richardson /* Entry should always be available. 110099a2dd95SBruce Richardson * The ring size equals the maximum number of interrupt 110199a2dd95SBruce Richardson * vectors supported (an interrupt vector is shared in 110299a2dd95SBruce Richardson * case of shared interrupts) 110399a2dd95SBruce Richardson */ 110499a2dd95SBruce Richardson if (err) 110599a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Failed to enqueue interrupt" 110699a2dd95SBruce Richardson " to ring: %s", strerror(-err)); 110799a2dd95SBruce Richardson else 110899a2dd95SBruce Richardson rte_eth_dev_rx_intr_disable(port_id, queue); 110999a2dd95SBruce Richardson } 111099a2dd95SBruce Richardson rte_spinlock_unlock(&rx_adapter->intr_ring_lock); 111199a2dd95SBruce Richardson } 111299a2dd95SBruce Richardson 111399a2dd95SBruce Richardson static int 1114a256a743SPavan Nikhilesh rxa_intr_ring_check_avail(struct event_eth_rx_adapter *rx_adapter, 111599a2dd95SBruce Richardson uint32_t num_intr_vec) 111699a2dd95SBruce Richardson { 111799a2dd95SBruce Richardson if (rx_adapter->num_intr_vec + num_intr_vec > 111899a2dd95SBruce Richardson RTE_EVENT_ETH_INTR_RING_SIZE) { 111999a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Exceeded intr ring slots current" 112099a2dd95SBruce Richardson " %d needed %d limit %d", rx_adapter->num_intr_vec, 112199a2dd95SBruce Richardson num_intr_vec, RTE_EVENT_ETH_INTR_RING_SIZE); 112299a2dd95SBruce Richardson return -ENOSPC; 112399a2dd95SBruce Richardson } 112499a2dd95SBruce Richardson 112599a2dd95SBruce Richardson return 0; 112699a2dd95SBruce Richardson } 112799a2dd95SBruce Richardson 112899a2dd95SBruce Richardson /* Delete entries for (dev, queue) from the interrupt ring */ 112999a2dd95SBruce Richardson static void 1130a256a743SPavan Nikhilesh rxa_intr_ring_del_entries(struct event_eth_rx_adapter *rx_adapter, 113199a2dd95SBruce Richardson struct eth_device_info *dev_info, 113299a2dd95SBruce Richardson uint16_t rx_queue_id) 113399a2dd95SBruce Richardson { 113499a2dd95SBruce Richardson int i, n; 113599a2dd95SBruce Richardson union queue_data qd; 113699a2dd95SBruce Richardson 113799a2dd95SBruce Richardson rte_spinlock_lock(&rx_adapter->intr_ring_lock); 113899a2dd95SBruce Richardson 113999a2dd95SBruce Richardson n = rte_ring_count(rx_adapter->intr_ring); 114099a2dd95SBruce Richardson for (i = 0; i < n; i++) { 114199a2dd95SBruce Richardson rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr); 114299a2dd95SBruce Richardson if (!rxa_shared_intr(dev_info, rx_queue_id)) { 114399a2dd95SBruce Richardson if (qd.port == dev_info->dev->data->port_id && 114499a2dd95SBruce Richardson qd.queue == rx_queue_id) 114599a2dd95SBruce Richardson continue; 114699a2dd95SBruce Richardson } else { 114799a2dd95SBruce Richardson if (qd.port == dev_info->dev->data->port_id) 114899a2dd95SBruce Richardson continue; 114999a2dd95SBruce Richardson } 115099a2dd95SBruce Richardson rte_ring_enqueue(rx_adapter->intr_ring, qd.ptr); 115199a2dd95SBruce Richardson } 115299a2dd95SBruce Richardson 115399a2dd95SBruce Richardson rte_spinlock_unlock(&rx_adapter->intr_ring_lock); 115499a2dd95SBruce Richardson } 115599a2dd95SBruce Richardson 115699a2dd95SBruce Richardson /* pthread callback handling interrupt mode receive queues 115799a2dd95SBruce Richardson * After receiving an Rx interrupt, it enqueues the port id and queue id of the 115899a2dd95SBruce Richardson * interrupting queue to the adapter's ring buffer for interrupt events. 115999a2dd95SBruce Richardson * These events are picked up by rxa_intr_ring_dequeue() which is invoked from 116099a2dd95SBruce Richardson * the adapter service function. 116199a2dd95SBruce Richardson */ 116299a2dd95SBruce Richardson static void * 116399a2dd95SBruce Richardson rxa_intr_thread(void *arg) 116499a2dd95SBruce Richardson { 1165a256a743SPavan Nikhilesh struct event_eth_rx_adapter *rx_adapter = arg; 116699a2dd95SBruce Richardson struct rte_epoll_event *epoll_events = rx_adapter->epoll_events; 116799a2dd95SBruce Richardson int n, i; 116899a2dd95SBruce Richardson 116999a2dd95SBruce Richardson while (1) { 117099a2dd95SBruce Richardson n = rte_epoll_wait(rx_adapter->epd, epoll_events, 117199a2dd95SBruce Richardson RTE_EVENT_ETH_INTR_RING_SIZE, -1); 117299a2dd95SBruce Richardson if (unlikely(n < 0)) 117399a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("rte_epoll_wait returned error %d", 117499a2dd95SBruce Richardson n); 117599a2dd95SBruce Richardson for (i = 0; i < n; i++) { 117699a2dd95SBruce Richardson rxa_intr_ring_enqueue(rx_adapter, 117799a2dd95SBruce Richardson epoll_events[i].epdata.data); 117899a2dd95SBruce Richardson } 117999a2dd95SBruce Richardson } 118099a2dd95SBruce Richardson 118199a2dd95SBruce Richardson return NULL; 118299a2dd95SBruce Richardson } 118399a2dd95SBruce Richardson 118499a2dd95SBruce Richardson /* Dequeue <port, q> from interrupt ring and enqueue received 118599a2dd95SBruce Richardson * mbufs to eventdev 118699a2dd95SBruce Richardson */ 1187995b150cSNaga Harish K S V static inline void 1188a256a743SPavan Nikhilesh rxa_intr_ring_dequeue(struct event_eth_rx_adapter *rx_adapter) 118999a2dd95SBruce Richardson { 119099a2dd95SBruce Richardson uint32_t n; 119199a2dd95SBruce Richardson uint32_t nb_rx = 0; 119299a2dd95SBruce Richardson int rxq_empty; 1193a256a743SPavan Nikhilesh struct eth_event_enqueue_buffer *buf; 1194995b150cSNaga Harish K S V struct rte_event_eth_rx_adapter_stats *stats; 119599a2dd95SBruce Richardson rte_spinlock_t *ring_lock; 119699a2dd95SBruce Richardson uint8_t max_done = 0; 119799a2dd95SBruce Richardson 119899a2dd95SBruce Richardson if (rx_adapter->num_rx_intr == 0) 1199995b150cSNaga Harish K S V return; 120099a2dd95SBruce Richardson 120199a2dd95SBruce Richardson if (rte_ring_count(rx_adapter->intr_ring) == 0 120299a2dd95SBruce Richardson && !rx_adapter->qd_valid) 1203995b150cSNaga Harish K S V return; 120499a2dd95SBruce Richardson 120599a2dd95SBruce Richardson buf = &rx_adapter->event_enqueue_buffer; 1206995b150cSNaga Harish K S V stats = &rx_adapter->stats; 120799a2dd95SBruce Richardson ring_lock = &rx_adapter->intr_ring_lock; 120899a2dd95SBruce Richardson 120999a2dd95SBruce Richardson if (buf->count >= BATCH_SIZE) 1210995b150cSNaga Harish K S V rxa_flush_event_buffer(rx_adapter, buf, stats); 121199a2dd95SBruce Richardson 12128113fd15SGanapati Kundapura while (rxa_pkt_buf_available(buf)) { 121399a2dd95SBruce Richardson struct eth_device_info *dev_info; 121499a2dd95SBruce Richardson uint16_t port; 121599a2dd95SBruce Richardson uint16_t queue; 121699a2dd95SBruce Richardson union queue_data qd = rx_adapter->qd; 121799a2dd95SBruce Richardson int err; 121899a2dd95SBruce Richardson 121999a2dd95SBruce Richardson if (!rx_adapter->qd_valid) { 122099a2dd95SBruce Richardson struct eth_rx_queue_info *queue_info; 122199a2dd95SBruce Richardson 122299a2dd95SBruce Richardson rte_spinlock_lock(ring_lock); 122399a2dd95SBruce Richardson err = rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr); 122499a2dd95SBruce Richardson if (err) { 122599a2dd95SBruce Richardson rte_spinlock_unlock(ring_lock); 122699a2dd95SBruce Richardson break; 122799a2dd95SBruce Richardson } 122899a2dd95SBruce Richardson 122999a2dd95SBruce Richardson port = qd.port; 123099a2dd95SBruce Richardson queue = qd.queue; 123199a2dd95SBruce Richardson rx_adapter->qd = qd; 123299a2dd95SBruce Richardson rx_adapter->qd_valid = 1; 123399a2dd95SBruce Richardson dev_info = &rx_adapter->eth_devices[port]; 123499a2dd95SBruce Richardson if (rxa_shared_intr(dev_info, queue)) 123599a2dd95SBruce Richardson dev_info->shared_intr_enabled = 1; 123699a2dd95SBruce Richardson else { 123799a2dd95SBruce Richardson queue_info = &dev_info->rx_queue[queue]; 123899a2dd95SBruce Richardson queue_info->intr_enabled = 1; 123999a2dd95SBruce Richardson } 124099a2dd95SBruce Richardson rte_eth_dev_rx_intr_enable(port, queue); 124199a2dd95SBruce Richardson rte_spinlock_unlock(ring_lock); 124299a2dd95SBruce Richardson } else { 124399a2dd95SBruce Richardson port = qd.port; 124499a2dd95SBruce Richardson queue = qd.queue; 124599a2dd95SBruce Richardson 124699a2dd95SBruce Richardson dev_info = &rx_adapter->eth_devices[port]; 124799a2dd95SBruce Richardson } 124899a2dd95SBruce Richardson 124999a2dd95SBruce Richardson if (rxa_shared_intr(dev_info, queue)) { 125099a2dd95SBruce Richardson uint16_t i; 125199a2dd95SBruce Richardson uint16_t nb_queues; 125299a2dd95SBruce Richardson 125399a2dd95SBruce Richardson nb_queues = dev_info->dev->data->nb_rx_queues; 125499a2dd95SBruce Richardson n = 0; 125599a2dd95SBruce Richardson for (i = dev_info->next_q_idx; i < nb_queues; i++) { 125699a2dd95SBruce Richardson uint8_t enq_buffer_full; 125799a2dd95SBruce Richardson 125899a2dd95SBruce Richardson if (!rxa_intr_queue(dev_info, i)) 125999a2dd95SBruce Richardson continue; 126099a2dd95SBruce Richardson n = rxa_eth_rx(rx_adapter, port, i, nb_rx, 126199a2dd95SBruce Richardson rx_adapter->max_nb_rx, 1262995b150cSNaga Harish K S V &rxq_empty, buf, stats); 126399a2dd95SBruce Richardson nb_rx += n; 126499a2dd95SBruce Richardson 126599a2dd95SBruce Richardson enq_buffer_full = !rxq_empty && n == 0; 126699a2dd95SBruce Richardson max_done = nb_rx > rx_adapter->max_nb_rx; 126799a2dd95SBruce Richardson 126899a2dd95SBruce Richardson if (enq_buffer_full || max_done) { 126999a2dd95SBruce Richardson dev_info->next_q_idx = i; 127099a2dd95SBruce Richardson goto done; 127199a2dd95SBruce Richardson } 127299a2dd95SBruce Richardson } 127399a2dd95SBruce Richardson 127499a2dd95SBruce Richardson rx_adapter->qd_valid = 0; 127599a2dd95SBruce Richardson 127699a2dd95SBruce Richardson /* Reinitialize for next interrupt */ 127799a2dd95SBruce Richardson dev_info->next_q_idx = dev_info->multi_intr_cap ? 127899a2dd95SBruce Richardson RTE_MAX_RXTX_INTR_VEC_ID - 1 : 127999a2dd95SBruce Richardson 0; 128099a2dd95SBruce Richardson } else { 128199a2dd95SBruce Richardson n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, 128299a2dd95SBruce Richardson rx_adapter->max_nb_rx, 1283995b150cSNaga Harish K S V &rxq_empty, buf, stats); 128499a2dd95SBruce Richardson rx_adapter->qd_valid = !rxq_empty; 128599a2dd95SBruce Richardson nb_rx += n; 128699a2dd95SBruce Richardson if (nb_rx > rx_adapter->max_nb_rx) 128799a2dd95SBruce Richardson break; 128899a2dd95SBruce Richardson } 128999a2dd95SBruce Richardson } 129099a2dd95SBruce Richardson 129199a2dd95SBruce Richardson done: 129299a2dd95SBruce Richardson rx_adapter->stats.rx_intr_packets += nb_rx; 129399a2dd95SBruce Richardson } 129499a2dd95SBruce Richardson 129599a2dd95SBruce Richardson /* 129699a2dd95SBruce Richardson * Polls receive queues added to the event adapter and enqueues received 129799a2dd95SBruce Richardson * packets to the event device. 129899a2dd95SBruce Richardson * 129999a2dd95SBruce Richardson * The receive code enqueues initially to a temporary buffer, the 130099a2dd95SBruce Richardson * temporary buffer is drained anytime it holds >= BATCH_SIZE packets 130199a2dd95SBruce Richardson * 130299a2dd95SBruce Richardson * If there isn't space available in the temporary buffer, packets from the 130399a2dd95SBruce Richardson * Rx queue aren't dequeued from the eth device, this back pressures the 130499a2dd95SBruce Richardson * eth device, in virtual device environments this back pressure is relayed to 130599a2dd95SBruce Richardson * the hypervisor's switching layer where adjustments can be made to deal with 130699a2dd95SBruce Richardson * it. 130799a2dd95SBruce Richardson */ 1308995b150cSNaga Harish K S V static inline void 1309a256a743SPavan Nikhilesh rxa_poll(struct event_eth_rx_adapter *rx_adapter) 131099a2dd95SBruce Richardson { 131199a2dd95SBruce Richardson uint32_t num_queue; 131299a2dd95SBruce Richardson uint32_t nb_rx = 0; 1313a256a743SPavan Nikhilesh struct eth_event_enqueue_buffer *buf = NULL; 1314995b150cSNaga Harish K S V struct rte_event_eth_rx_adapter_stats *stats = NULL; 131599a2dd95SBruce Richardson uint32_t wrr_pos; 131699a2dd95SBruce Richardson uint32_t max_nb_rx; 131799a2dd95SBruce Richardson 131899a2dd95SBruce Richardson wrr_pos = rx_adapter->wrr_pos; 131999a2dd95SBruce Richardson max_nb_rx = rx_adapter->max_nb_rx; 132099a2dd95SBruce Richardson 132199a2dd95SBruce Richardson /* Iterate through a WRR sequence */ 132299a2dd95SBruce Richardson for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { 132399a2dd95SBruce Richardson unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos]; 132499a2dd95SBruce Richardson uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; 132599a2dd95SBruce Richardson uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; 132699a2dd95SBruce Richardson 1327995b150cSNaga Harish K S V buf = rxa_event_buf_get(rx_adapter, d, qid, &stats); 1328b06bca69SNaga Harish K S V 132999a2dd95SBruce Richardson /* Don't do a batch dequeue from the rx queue if there isn't 133099a2dd95SBruce Richardson * enough space in the enqueue buffer. 133199a2dd95SBruce Richardson */ 133299a2dd95SBruce Richardson if (buf->count >= BATCH_SIZE) 1333995b150cSNaga Harish K S V rxa_flush_event_buffer(rx_adapter, buf, stats); 13348113fd15SGanapati Kundapura if (!rxa_pkt_buf_available(buf)) { 1335b06bca69SNaga Harish K S V if (rx_adapter->use_queue_event_buf) 1336b06bca69SNaga Harish K S V goto poll_next_entry; 1337b06bca69SNaga Harish K S V else { 133899a2dd95SBruce Richardson rx_adapter->wrr_pos = wrr_pos; 1339995b150cSNaga Harish K S V return; 134099a2dd95SBruce Richardson } 1341b06bca69SNaga Harish K S V } 134299a2dd95SBruce Richardson 134399a2dd95SBruce Richardson nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, 1344995b150cSNaga Harish K S V NULL, buf, stats); 134599a2dd95SBruce Richardson if (nb_rx > max_nb_rx) { 134699a2dd95SBruce Richardson rx_adapter->wrr_pos = 134799a2dd95SBruce Richardson (wrr_pos + 1) % rx_adapter->wrr_len; 134899a2dd95SBruce Richardson break; 134999a2dd95SBruce Richardson } 135099a2dd95SBruce Richardson 1351b06bca69SNaga Harish K S V poll_next_entry: 135299a2dd95SBruce Richardson if (++wrr_pos == rx_adapter->wrr_len) 135399a2dd95SBruce Richardson wrr_pos = 0; 135499a2dd95SBruce Richardson } 135599a2dd95SBruce Richardson } 135699a2dd95SBruce Richardson 135799a2dd95SBruce Richardson static void 135899a2dd95SBruce Richardson rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) 135999a2dd95SBruce Richardson { 1360a256a743SPavan Nikhilesh struct event_eth_rx_adapter *rx_adapter = arg; 1361a256a743SPavan Nikhilesh struct eth_event_enqueue_buffer *buf = NULL; 1362995b150cSNaga Harish K S V struct rte_event_eth_rx_adapter_stats *stats = NULL; 136399a2dd95SBruce Richardson struct rte_event *ev; 136499a2dd95SBruce Richardson 1365995b150cSNaga Harish K S V buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue, &stats); 1366b06bca69SNaga Harish K S V 136799a2dd95SBruce Richardson if (buf->count) 1368995b150cSNaga Harish K S V rxa_flush_event_buffer(rx_adapter, buf, stats); 136999a2dd95SBruce Richardson 137099a2dd95SBruce Richardson if (vec->vector_ev->nb_elem == 0) 137199a2dd95SBruce Richardson return; 137299a2dd95SBruce Richardson ev = &buf->events[buf->count]; 137399a2dd95SBruce Richardson 137499a2dd95SBruce Richardson /* Event ready. */ 137599a2dd95SBruce Richardson ev->event = vec->event; 137699a2dd95SBruce Richardson ev->vec = vec->vector_ev; 137799a2dd95SBruce Richardson buf->count++; 137899a2dd95SBruce Richardson 137999a2dd95SBruce Richardson vec->vector_ev = NULL; 138099a2dd95SBruce Richardson vec->ts = 0; 138199a2dd95SBruce Richardson } 138299a2dd95SBruce Richardson 138399a2dd95SBruce Richardson static int 138499a2dd95SBruce Richardson rxa_service_func(void *args) 138599a2dd95SBruce Richardson { 1386a256a743SPavan Nikhilesh struct event_eth_rx_adapter *rx_adapter = args; 138799a2dd95SBruce Richardson 138899a2dd95SBruce Richardson if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0) 138999a2dd95SBruce Richardson return 0; 139099a2dd95SBruce Richardson if (!rx_adapter->rxa_started) { 139199a2dd95SBruce Richardson rte_spinlock_unlock(&rx_adapter->rx_lock); 139299a2dd95SBruce Richardson return 0; 139399a2dd95SBruce Richardson } 139499a2dd95SBruce Richardson 139599a2dd95SBruce Richardson if (rx_adapter->ena_vector) { 139699a2dd95SBruce Richardson if ((rte_rdtsc() - rx_adapter->prev_expiry_ts) >= 139799a2dd95SBruce Richardson rx_adapter->vector_tmo_ticks) { 139899a2dd95SBruce Richardson struct eth_rx_vector_data *vec; 139999a2dd95SBruce Richardson 140099a2dd95SBruce Richardson TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) { 140199a2dd95SBruce Richardson uint64_t elapsed_time = rte_rdtsc() - vec->ts; 140299a2dd95SBruce Richardson 140399a2dd95SBruce Richardson if (elapsed_time >= vec->vector_timeout_ticks) { 140499a2dd95SBruce Richardson rxa_vector_expire(vec, rx_adapter); 140599a2dd95SBruce Richardson TAILQ_REMOVE(&rx_adapter->vector_list, 140699a2dd95SBruce Richardson vec, next); 140799a2dd95SBruce Richardson } 140899a2dd95SBruce Richardson } 140999a2dd95SBruce Richardson rx_adapter->prev_expiry_ts = rte_rdtsc(); 141099a2dd95SBruce Richardson } 141199a2dd95SBruce Richardson } 141299a2dd95SBruce Richardson 1413995b150cSNaga Harish K S V rxa_intr_ring_dequeue(rx_adapter); 1414995b150cSNaga Harish K S V rxa_poll(rx_adapter); 1415995b150cSNaga Harish K S V 141699a2dd95SBruce Richardson rte_spinlock_unlock(&rx_adapter->rx_lock); 1417995b150cSNaga Harish K S V 141899a2dd95SBruce Richardson return 0; 141999a2dd95SBruce Richardson } 142099a2dd95SBruce Richardson 1421a1793ee8SGanapati Kundapura static void * 1422a1793ee8SGanapati Kundapura rxa_memzone_array_get(const char *name, unsigned int elt_size, int nb_elems) 142399a2dd95SBruce Richardson { 142499a2dd95SBruce Richardson const struct rte_memzone *mz; 142599a2dd95SBruce Richardson unsigned int sz; 142699a2dd95SBruce Richardson 1427a1793ee8SGanapati Kundapura sz = elt_size * nb_elems; 142899a2dd95SBruce Richardson sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE); 142999a2dd95SBruce Richardson 143099a2dd95SBruce Richardson mz = rte_memzone_lookup(name); 143199a2dd95SBruce Richardson if (mz == NULL) { 143299a2dd95SBruce Richardson mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0, 143399a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE); 143499a2dd95SBruce Richardson if (mz == NULL) { 1435a1793ee8SGanapati Kundapura RTE_EDEV_LOG_ERR("failed to reserve memzone" 1436a1793ee8SGanapati Kundapura " name = %s, err = %" 1437a1793ee8SGanapati Kundapura PRId32, name, rte_errno); 1438a1793ee8SGanapati Kundapura return NULL; 143999a2dd95SBruce Richardson } 144099a2dd95SBruce Richardson } 144199a2dd95SBruce Richardson 1442a1793ee8SGanapati Kundapura return mz->addr; 1443a1793ee8SGanapati Kundapura } 1444a1793ee8SGanapati Kundapura 1445a1793ee8SGanapati Kundapura static int 1446a1793ee8SGanapati Kundapura rte_event_eth_rx_adapter_init(void) 1447a1793ee8SGanapati Kundapura { 1448a1793ee8SGanapati Kundapura uint8_t i; 1449a1793ee8SGanapati Kundapura 1450a1793ee8SGanapati Kundapura if (event_eth_rx_adapter == NULL) { 1451a1793ee8SGanapati Kundapura event_eth_rx_adapter = 1452a1793ee8SGanapati Kundapura rxa_memzone_array_get(RXA_ADAPTER_ARRAY, 1453a1793ee8SGanapati Kundapura sizeof(*event_eth_rx_adapter), 1454a1793ee8SGanapati Kundapura RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE); 1455a1793ee8SGanapati Kundapura if (event_eth_rx_adapter == NULL) 1456a1793ee8SGanapati Kundapura return -ENOMEM; 1457a1793ee8SGanapati Kundapura 1458a1793ee8SGanapati Kundapura for (i = 0; i < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE; i++) 1459a1793ee8SGanapati Kundapura event_eth_rx_adapter[i] = NULL; 1460a1793ee8SGanapati Kundapura 1461a1793ee8SGanapati Kundapura } 1462a1793ee8SGanapati Kundapura 146399a2dd95SBruce Richardson return 0; 146499a2dd95SBruce Richardson } 146599a2dd95SBruce Richardson 1466da781e64SGanapati Kundapura static int 1467da781e64SGanapati Kundapura rxa_memzone_lookup(void) 1468da781e64SGanapati Kundapura { 1469da781e64SGanapati Kundapura const struct rte_memzone *mz; 1470da781e64SGanapati Kundapura 1471da781e64SGanapati Kundapura if (event_eth_rx_adapter == NULL) { 1472da781e64SGanapati Kundapura mz = rte_memzone_lookup(RXA_ADAPTER_ARRAY); 1473da781e64SGanapati Kundapura if (mz == NULL) 1474da781e64SGanapati Kundapura return -ENOMEM; 1475a1793ee8SGanapati Kundapura 1476da781e64SGanapati Kundapura event_eth_rx_adapter = mz->addr; 1477da781e64SGanapati Kundapura } 1478da781e64SGanapati Kundapura 1479da781e64SGanapati Kundapura return 0; 1480da781e64SGanapati Kundapura } 1481da781e64SGanapati Kundapura 1482a256a743SPavan Nikhilesh static inline struct event_eth_rx_adapter * 148399a2dd95SBruce Richardson rxa_id_to_adapter(uint8_t id) 148499a2dd95SBruce Richardson { 148599a2dd95SBruce Richardson return event_eth_rx_adapter ? 148699a2dd95SBruce Richardson event_eth_rx_adapter[id] : NULL; 148799a2dd95SBruce Richardson } 148899a2dd95SBruce Richardson 148999a2dd95SBruce Richardson static int 149099a2dd95SBruce Richardson rxa_default_conf_cb(uint8_t id, uint8_t dev_id, 149199a2dd95SBruce Richardson struct rte_event_eth_rx_adapter_conf *conf, void *arg) 149299a2dd95SBruce Richardson { 149399a2dd95SBruce Richardson int ret; 149499a2dd95SBruce Richardson struct rte_eventdev *dev; 149599a2dd95SBruce Richardson struct rte_event_dev_config dev_conf; 149699a2dd95SBruce Richardson int started; 149799a2dd95SBruce Richardson uint8_t port_id; 149899a2dd95SBruce Richardson struct rte_event_port_conf *port_conf = arg; 1499a256a743SPavan Nikhilesh struct event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id); 150099a2dd95SBruce Richardson 150199a2dd95SBruce Richardson dev = &rte_eventdevs[rx_adapter->eventdev_id]; 150299a2dd95SBruce Richardson dev_conf = dev->data->dev_conf; 150399a2dd95SBruce Richardson 150499a2dd95SBruce Richardson started = dev->data->dev_started; 150599a2dd95SBruce Richardson if (started) 150699a2dd95SBruce Richardson rte_event_dev_stop(dev_id); 150799a2dd95SBruce Richardson port_id = dev_conf.nb_event_ports; 150899a2dd95SBruce Richardson dev_conf.nb_event_ports += 1; 150999a2dd95SBruce Richardson ret = rte_event_dev_configure(dev_id, &dev_conf); 151099a2dd95SBruce Richardson if (ret) { 151199a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", 151299a2dd95SBruce Richardson dev_id); 151399a2dd95SBruce Richardson if (started) { 151499a2dd95SBruce Richardson if (rte_event_dev_start(dev_id)) 151599a2dd95SBruce Richardson return -EIO; 151699a2dd95SBruce Richardson } 151799a2dd95SBruce Richardson return ret; 151899a2dd95SBruce Richardson } 151999a2dd95SBruce Richardson 152099a2dd95SBruce Richardson ret = rte_event_port_setup(dev_id, port_id, port_conf); 152199a2dd95SBruce Richardson if (ret) { 152299a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to setup event port %u\n", 152399a2dd95SBruce Richardson port_id); 152499a2dd95SBruce Richardson return ret; 152599a2dd95SBruce Richardson } 152699a2dd95SBruce Richardson 152799a2dd95SBruce Richardson conf->event_port_id = port_id; 152899a2dd95SBruce Richardson conf->max_nb_rx = 128; 152999a2dd95SBruce Richardson if (started) 153099a2dd95SBruce Richardson ret = rte_event_dev_start(dev_id); 153199a2dd95SBruce Richardson rx_adapter->default_cb_arg = 1; 153299a2dd95SBruce Richardson return ret; 153399a2dd95SBruce Richardson } 153499a2dd95SBruce Richardson 153599a2dd95SBruce Richardson static int 153699a2dd95SBruce Richardson rxa_epoll_create1(void) 153799a2dd95SBruce Richardson { 153899a2dd95SBruce Richardson #if defined(LINUX) 153999a2dd95SBruce Richardson int fd; 154099a2dd95SBruce Richardson fd = epoll_create1(EPOLL_CLOEXEC); 154199a2dd95SBruce Richardson return fd < 0 ? -errno : fd; 154299a2dd95SBruce Richardson #elif defined(BSD) 154399a2dd95SBruce Richardson return -ENOTSUP; 154499a2dd95SBruce Richardson #endif 154599a2dd95SBruce Richardson } 154699a2dd95SBruce Richardson 154799a2dd95SBruce Richardson static int 1548a256a743SPavan Nikhilesh rxa_init_epd(struct event_eth_rx_adapter *rx_adapter) 154999a2dd95SBruce Richardson { 155099a2dd95SBruce Richardson if (rx_adapter->epd != INIT_FD) 155199a2dd95SBruce Richardson return 0; 155299a2dd95SBruce Richardson 155399a2dd95SBruce Richardson rx_adapter->epd = rxa_epoll_create1(); 155499a2dd95SBruce Richardson if (rx_adapter->epd < 0) { 155599a2dd95SBruce Richardson int err = rx_adapter->epd; 155699a2dd95SBruce Richardson rx_adapter->epd = INIT_FD; 155799a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("epoll_create1() failed, err %d", err); 155899a2dd95SBruce Richardson return err; 155999a2dd95SBruce Richardson } 156099a2dd95SBruce Richardson 156199a2dd95SBruce Richardson return 0; 156299a2dd95SBruce Richardson } 156399a2dd95SBruce Richardson 156499a2dd95SBruce Richardson static int 1565a256a743SPavan Nikhilesh rxa_create_intr_thread(struct event_eth_rx_adapter *rx_adapter) 156699a2dd95SBruce Richardson { 156799a2dd95SBruce Richardson int err; 156899a2dd95SBruce Richardson char thread_name[RTE_MAX_THREAD_NAME_LEN]; 156999a2dd95SBruce Richardson 157099a2dd95SBruce Richardson if (rx_adapter->intr_ring) 157199a2dd95SBruce Richardson return 0; 157299a2dd95SBruce Richardson 157399a2dd95SBruce Richardson rx_adapter->intr_ring = rte_ring_create("intr_ring", 157499a2dd95SBruce Richardson RTE_EVENT_ETH_INTR_RING_SIZE, 157599a2dd95SBruce Richardson rte_socket_id(), 0); 157699a2dd95SBruce Richardson if (!rx_adapter->intr_ring) 157799a2dd95SBruce Richardson return -ENOMEM; 157899a2dd95SBruce Richardson 157999a2dd95SBruce Richardson rx_adapter->epoll_events = rte_zmalloc_socket(rx_adapter->mem_name, 158099a2dd95SBruce Richardson RTE_EVENT_ETH_INTR_RING_SIZE * 158199a2dd95SBruce Richardson sizeof(struct rte_epoll_event), 158299a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE, 158399a2dd95SBruce Richardson rx_adapter->socket_id); 158499a2dd95SBruce Richardson if (!rx_adapter->epoll_events) { 158599a2dd95SBruce Richardson err = -ENOMEM; 158699a2dd95SBruce Richardson goto error; 158799a2dd95SBruce Richardson } 158899a2dd95SBruce Richardson 158999a2dd95SBruce Richardson rte_spinlock_init(&rx_adapter->intr_ring_lock); 159099a2dd95SBruce Richardson 159199a2dd95SBruce Richardson snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, 159299a2dd95SBruce Richardson "rx-intr-thread-%d", rx_adapter->id); 159399a2dd95SBruce Richardson 159499a2dd95SBruce Richardson err = rte_ctrl_thread_create(&rx_adapter->rx_intr_thread, thread_name, 159599a2dd95SBruce Richardson NULL, rxa_intr_thread, rx_adapter); 15960bac9fc7SChengwen Feng if (!err) 159799a2dd95SBruce Richardson return 0; 159899a2dd95SBruce Richardson 159999a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d\n", err); 1600f6681ab7SChengwen Feng rte_free(rx_adapter->epoll_events); 160199a2dd95SBruce Richardson error: 160299a2dd95SBruce Richardson rte_ring_free(rx_adapter->intr_ring); 160399a2dd95SBruce Richardson rx_adapter->intr_ring = NULL; 160499a2dd95SBruce Richardson rx_adapter->epoll_events = NULL; 160599a2dd95SBruce Richardson return err; 160699a2dd95SBruce Richardson } 160799a2dd95SBruce Richardson 160899a2dd95SBruce Richardson static int 1609a256a743SPavan Nikhilesh rxa_destroy_intr_thread(struct event_eth_rx_adapter *rx_adapter) 161099a2dd95SBruce Richardson { 161199a2dd95SBruce Richardson int err; 161299a2dd95SBruce Richardson 161399a2dd95SBruce Richardson err = pthread_cancel(rx_adapter->rx_intr_thread); 161499a2dd95SBruce Richardson if (err) 161599a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n", 161699a2dd95SBruce Richardson err); 161799a2dd95SBruce Richardson 161899a2dd95SBruce Richardson err = pthread_join(rx_adapter->rx_intr_thread, NULL); 161999a2dd95SBruce Richardson if (err) 162099a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err); 162199a2dd95SBruce Richardson 162299a2dd95SBruce Richardson rte_free(rx_adapter->epoll_events); 162399a2dd95SBruce Richardson rte_ring_free(rx_adapter->intr_ring); 162499a2dd95SBruce Richardson rx_adapter->intr_ring = NULL; 162599a2dd95SBruce Richardson rx_adapter->epoll_events = NULL; 162699a2dd95SBruce Richardson return 0; 162799a2dd95SBruce Richardson } 162899a2dd95SBruce Richardson 162999a2dd95SBruce Richardson static int 1630a256a743SPavan Nikhilesh rxa_free_intr_resources(struct event_eth_rx_adapter *rx_adapter) 163199a2dd95SBruce Richardson { 163299a2dd95SBruce Richardson int ret; 163399a2dd95SBruce Richardson 163499a2dd95SBruce Richardson if (rx_adapter->num_rx_intr == 0) 163599a2dd95SBruce Richardson return 0; 163699a2dd95SBruce Richardson 163799a2dd95SBruce Richardson ret = rxa_destroy_intr_thread(rx_adapter); 163899a2dd95SBruce Richardson if (ret) 163999a2dd95SBruce Richardson return ret; 164099a2dd95SBruce Richardson 164199a2dd95SBruce Richardson close(rx_adapter->epd); 164299a2dd95SBruce Richardson rx_adapter->epd = INIT_FD; 164399a2dd95SBruce Richardson 164499a2dd95SBruce Richardson return ret; 164599a2dd95SBruce Richardson } 164699a2dd95SBruce Richardson 164799a2dd95SBruce Richardson static int 1648a256a743SPavan Nikhilesh rxa_disable_intr(struct event_eth_rx_adapter *rx_adapter, 1649a256a743SPavan Nikhilesh struct eth_device_info *dev_info, uint16_t rx_queue_id) 165099a2dd95SBruce Richardson { 165199a2dd95SBruce Richardson int err; 165299a2dd95SBruce Richardson uint16_t eth_dev_id = dev_info->dev->data->port_id; 165399a2dd95SBruce Richardson int sintr = rxa_shared_intr(dev_info, rx_queue_id); 165499a2dd95SBruce Richardson 165599a2dd95SBruce Richardson err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id); 165699a2dd95SBruce Richardson if (err) { 165799a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Could not disable interrupt for Rx queue %u", 165899a2dd95SBruce Richardson rx_queue_id); 165999a2dd95SBruce Richardson return err; 166099a2dd95SBruce Richardson } 166199a2dd95SBruce Richardson 166299a2dd95SBruce Richardson err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id, 166399a2dd95SBruce Richardson rx_adapter->epd, 166499a2dd95SBruce Richardson RTE_INTR_EVENT_DEL, 166599a2dd95SBruce Richardson 0); 166699a2dd95SBruce Richardson if (err) 166799a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Interrupt event deletion failed %d", err); 166899a2dd95SBruce Richardson 166999a2dd95SBruce Richardson if (sintr) 167099a2dd95SBruce Richardson dev_info->rx_queue[rx_queue_id].intr_enabled = 0; 167199a2dd95SBruce Richardson else 167299a2dd95SBruce Richardson dev_info->shared_intr_enabled = 0; 167399a2dd95SBruce Richardson return err; 167499a2dd95SBruce Richardson } 167599a2dd95SBruce Richardson 167699a2dd95SBruce Richardson static int 1677a256a743SPavan Nikhilesh rxa_del_intr_queue(struct event_eth_rx_adapter *rx_adapter, 1678a256a743SPavan Nikhilesh struct eth_device_info *dev_info, int rx_queue_id) 167999a2dd95SBruce Richardson { 168099a2dd95SBruce Richardson int err; 168199a2dd95SBruce Richardson int i; 168299a2dd95SBruce Richardson int s; 168399a2dd95SBruce Richardson 168499a2dd95SBruce Richardson if (dev_info->nb_rx_intr == 0) 168599a2dd95SBruce Richardson return 0; 168699a2dd95SBruce Richardson 168799a2dd95SBruce Richardson err = 0; 168899a2dd95SBruce Richardson if (rx_queue_id == -1) { 168999a2dd95SBruce Richardson s = dev_info->nb_shared_intr; 169099a2dd95SBruce Richardson for (i = 0; i < dev_info->nb_rx_intr; i++) { 169199a2dd95SBruce Richardson int sintr; 169299a2dd95SBruce Richardson uint16_t q; 169399a2dd95SBruce Richardson 169499a2dd95SBruce Richardson q = dev_info->intr_queue[i]; 169599a2dd95SBruce Richardson sintr = rxa_shared_intr(dev_info, q); 169699a2dd95SBruce Richardson s -= sintr; 169799a2dd95SBruce Richardson 169899a2dd95SBruce Richardson if (!sintr || s == 0) { 169999a2dd95SBruce Richardson 170099a2dd95SBruce Richardson err = rxa_disable_intr(rx_adapter, dev_info, 170199a2dd95SBruce Richardson q); 170299a2dd95SBruce Richardson if (err) 170399a2dd95SBruce Richardson return err; 170499a2dd95SBruce Richardson rxa_intr_ring_del_entries(rx_adapter, dev_info, 170599a2dd95SBruce Richardson q); 170699a2dd95SBruce Richardson } 170799a2dd95SBruce Richardson } 170899a2dd95SBruce Richardson } else { 170999a2dd95SBruce Richardson if (!rxa_intr_queue(dev_info, rx_queue_id)) 171099a2dd95SBruce Richardson return 0; 171199a2dd95SBruce Richardson if (!rxa_shared_intr(dev_info, rx_queue_id) || 171299a2dd95SBruce Richardson dev_info->nb_shared_intr == 1) { 171399a2dd95SBruce Richardson err = rxa_disable_intr(rx_adapter, dev_info, 171499a2dd95SBruce Richardson rx_queue_id); 171599a2dd95SBruce Richardson if (err) 171699a2dd95SBruce Richardson return err; 171799a2dd95SBruce Richardson rxa_intr_ring_del_entries(rx_adapter, dev_info, 171899a2dd95SBruce Richardson rx_queue_id); 171999a2dd95SBruce Richardson } 172099a2dd95SBruce Richardson 172199a2dd95SBruce Richardson for (i = 0; i < dev_info->nb_rx_intr; i++) { 172299a2dd95SBruce Richardson if (dev_info->intr_queue[i] == rx_queue_id) { 172399a2dd95SBruce Richardson for (; i < dev_info->nb_rx_intr - 1; i++) 172499a2dd95SBruce Richardson dev_info->intr_queue[i] = 172599a2dd95SBruce Richardson dev_info->intr_queue[i + 1]; 172699a2dd95SBruce Richardson break; 172799a2dd95SBruce Richardson } 172899a2dd95SBruce Richardson } 172999a2dd95SBruce Richardson } 173099a2dd95SBruce Richardson 173199a2dd95SBruce Richardson return err; 173299a2dd95SBruce Richardson } 173399a2dd95SBruce Richardson 173499a2dd95SBruce Richardson static int 1735a256a743SPavan Nikhilesh rxa_config_intr(struct event_eth_rx_adapter *rx_adapter, 1736a256a743SPavan Nikhilesh struct eth_device_info *dev_info, uint16_t rx_queue_id) 173799a2dd95SBruce Richardson { 173899a2dd95SBruce Richardson int err, err1; 173999a2dd95SBruce Richardson uint16_t eth_dev_id = dev_info->dev->data->port_id; 174099a2dd95SBruce Richardson union queue_data qd; 174199a2dd95SBruce Richardson int init_fd; 174299a2dd95SBruce Richardson uint16_t *intr_queue; 174399a2dd95SBruce Richardson int sintr = rxa_shared_intr(dev_info, rx_queue_id); 174499a2dd95SBruce Richardson 174599a2dd95SBruce Richardson if (rxa_intr_queue(dev_info, rx_queue_id)) 174699a2dd95SBruce Richardson return 0; 174799a2dd95SBruce Richardson 174899a2dd95SBruce Richardson intr_queue = dev_info->intr_queue; 174999a2dd95SBruce Richardson if (dev_info->intr_queue == NULL) { 175099a2dd95SBruce Richardson size_t len = 175199a2dd95SBruce Richardson dev_info->dev->data->nb_rx_queues * sizeof(uint16_t); 175299a2dd95SBruce Richardson dev_info->intr_queue = 175399a2dd95SBruce Richardson rte_zmalloc_socket( 175499a2dd95SBruce Richardson rx_adapter->mem_name, 175599a2dd95SBruce Richardson len, 175699a2dd95SBruce Richardson 0, 175799a2dd95SBruce Richardson rx_adapter->socket_id); 175899a2dd95SBruce Richardson if (dev_info->intr_queue == NULL) 175999a2dd95SBruce Richardson return -ENOMEM; 176099a2dd95SBruce Richardson } 176199a2dd95SBruce Richardson 176299a2dd95SBruce Richardson init_fd = rx_adapter->epd; 176399a2dd95SBruce Richardson err = rxa_init_epd(rx_adapter); 176499a2dd95SBruce Richardson if (err) 176599a2dd95SBruce Richardson goto err_free_queue; 176699a2dd95SBruce Richardson 176799a2dd95SBruce Richardson qd.port = eth_dev_id; 176899a2dd95SBruce Richardson qd.queue = rx_queue_id; 176999a2dd95SBruce Richardson 177099a2dd95SBruce Richardson err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id, 177199a2dd95SBruce Richardson rx_adapter->epd, 177299a2dd95SBruce Richardson RTE_INTR_EVENT_ADD, 177399a2dd95SBruce Richardson qd.ptr); 177499a2dd95SBruce Richardson if (err) { 177599a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Failed to add interrupt event for" 177699a2dd95SBruce Richardson " Rx Queue %u err %d", rx_queue_id, err); 177799a2dd95SBruce Richardson goto err_del_fd; 177899a2dd95SBruce Richardson } 177999a2dd95SBruce Richardson 178099a2dd95SBruce Richardson err = rte_eth_dev_rx_intr_enable(eth_dev_id, rx_queue_id); 178199a2dd95SBruce Richardson if (err) { 178299a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Could not enable interrupt for" 178399a2dd95SBruce Richardson " Rx Queue %u err %d", rx_queue_id, err); 178499a2dd95SBruce Richardson 178599a2dd95SBruce Richardson goto err_del_event; 178699a2dd95SBruce Richardson } 178799a2dd95SBruce Richardson 178899a2dd95SBruce Richardson err = rxa_create_intr_thread(rx_adapter); 178999a2dd95SBruce Richardson if (!err) { 179099a2dd95SBruce Richardson if (sintr) 179199a2dd95SBruce Richardson dev_info->shared_intr_enabled = 1; 179299a2dd95SBruce Richardson else 179399a2dd95SBruce Richardson dev_info->rx_queue[rx_queue_id].intr_enabled = 1; 179499a2dd95SBruce Richardson return 0; 179599a2dd95SBruce Richardson } 179699a2dd95SBruce Richardson 179799a2dd95SBruce Richardson 179899a2dd95SBruce Richardson err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id); 179999a2dd95SBruce Richardson if (err) 180099a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Could not disable interrupt for" 180199a2dd95SBruce Richardson " Rx Queue %u err %d", rx_queue_id, err); 180299a2dd95SBruce Richardson err_del_event: 180399a2dd95SBruce Richardson err1 = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id, 180499a2dd95SBruce Richardson rx_adapter->epd, 180599a2dd95SBruce Richardson RTE_INTR_EVENT_DEL, 180699a2dd95SBruce Richardson 0); 180799a2dd95SBruce Richardson if (err1) { 180899a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Could not delete event for" 180999a2dd95SBruce Richardson " Rx Queue %u err %d", rx_queue_id, err1); 181099a2dd95SBruce Richardson } 181199a2dd95SBruce Richardson err_del_fd: 181299a2dd95SBruce Richardson if (init_fd == INIT_FD) { 181399a2dd95SBruce Richardson close(rx_adapter->epd); 181499a2dd95SBruce Richardson rx_adapter->epd = -1; 181599a2dd95SBruce Richardson } 181699a2dd95SBruce Richardson err_free_queue: 181799a2dd95SBruce Richardson if (intr_queue == NULL) 181899a2dd95SBruce Richardson rte_free(dev_info->intr_queue); 181999a2dd95SBruce Richardson 182099a2dd95SBruce Richardson return err; 182199a2dd95SBruce Richardson } 182299a2dd95SBruce Richardson 182399a2dd95SBruce Richardson static int 1824a256a743SPavan Nikhilesh rxa_add_intr_queue(struct event_eth_rx_adapter *rx_adapter, 1825a256a743SPavan Nikhilesh struct eth_device_info *dev_info, int rx_queue_id) 182699a2dd95SBruce Richardson 182799a2dd95SBruce Richardson { 182899a2dd95SBruce Richardson int i, j, err; 182999a2dd95SBruce Richardson int si = -1; 183099a2dd95SBruce Richardson int shared_done = (dev_info->nb_shared_intr > 0); 183199a2dd95SBruce Richardson 183299a2dd95SBruce Richardson if (rx_queue_id != -1) { 183399a2dd95SBruce Richardson if (rxa_shared_intr(dev_info, rx_queue_id) && shared_done) 183499a2dd95SBruce Richardson return 0; 183599a2dd95SBruce Richardson return rxa_config_intr(rx_adapter, dev_info, rx_queue_id); 183699a2dd95SBruce Richardson } 183799a2dd95SBruce Richardson 183899a2dd95SBruce Richardson err = 0; 183999a2dd95SBruce Richardson for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) { 184099a2dd95SBruce Richardson 184199a2dd95SBruce Richardson if (rxa_shared_intr(dev_info, i) && shared_done) 184299a2dd95SBruce Richardson continue; 184399a2dd95SBruce Richardson 184499a2dd95SBruce Richardson err = rxa_config_intr(rx_adapter, dev_info, i); 184599a2dd95SBruce Richardson 184699a2dd95SBruce Richardson shared_done = err == 0 && rxa_shared_intr(dev_info, i); 184799a2dd95SBruce Richardson if (shared_done) { 184899a2dd95SBruce Richardson si = i; 184999a2dd95SBruce Richardson dev_info->shared_intr_enabled = 1; 185099a2dd95SBruce Richardson } 185199a2dd95SBruce Richardson if (err) 185299a2dd95SBruce Richardson break; 185399a2dd95SBruce Richardson } 185499a2dd95SBruce Richardson 185599a2dd95SBruce Richardson if (err == 0) 185699a2dd95SBruce Richardson return 0; 185799a2dd95SBruce Richardson 185899a2dd95SBruce Richardson shared_done = (dev_info->nb_shared_intr > 0); 185999a2dd95SBruce Richardson for (j = 0; j < i; j++) { 186099a2dd95SBruce Richardson if (rxa_intr_queue(dev_info, j)) 186199a2dd95SBruce Richardson continue; 186299a2dd95SBruce Richardson if (rxa_shared_intr(dev_info, j) && si != j) 186399a2dd95SBruce Richardson continue; 186499a2dd95SBruce Richardson err = rxa_disable_intr(rx_adapter, dev_info, j); 186599a2dd95SBruce Richardson if (err) 186699a2dd95SBruce Richardson break; 186799a2dd95SBruce Richardson 186899a2dd95SBruce Richardson } 186999a2dd95SBruce Richardson 187099a2dd95SBruce Richardson return err; 187199a2dd95SBruce Richardson } 187299a2dd95SBruce Richardson 187399a2dd95SBruce Richardson static int 1874a256a743SPavan Nikhilesh rxa_init_service(struct event_eth_rx_adapter *rx_adapter, uint8_t id) 187599a2dd95SBruce Richardson { 187699a2dd95SBruce Richardson int ret; 187799a2dd95SBruce Richardson struct rte_service_spec service; 187899a2dd95SBruce Richardson struct rte_event_eth_rx_adapter_conf rx_adapter_conf; 187999a2dd95SBruce Richardson 188099a2dd95SBruce Richardson if (rx_adapter->service_inited) 188199a2dd95SBruce Richardson return 0; 188299a2dd95SBruce Richardson 188399a2dd95SBruce Richardson memset(&service, 0, sizeof(service)); 188499a2dd95SBruce Richardson snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN, 188599a2dd95SBruce Richardson "rte_event_eth_rx_adapter_%d", id); 188699a2dd95SBruce Richardson service.socket_id = rx_adapter->socket_id; 188799a2dd95SBruce Richardson service.callback = rxa_service_func; 188899a2dd95SBruce Richardson service.callback_userdata = rx_adapter; 188999a2dd95SBruce Richardson /* Service function handles locking for queue add/del updates */ 189099a2dd95SBruce Richardson service.capabilities = RTE_SERVICE_CAP_MT_SAFE; 189199a2dd95SBruce Richardson ret = rte_service_component_register(&service, &rx_adapter->service_id); 189299a2dd95SBruce Richardson if (ret) { 189399a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32, 189499a2dd95SBruce Richardson service.name, ret); 189599a2dd95SBruce Richardson return ret; 189699a2dd95SBruce Richardson } 189799a2dd95SBruce Richardson 189899a2dd95SBruce Richardson ret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id, 189999a2dd95SBruce Richardson &rx_adapter_conf, rx_adapter->conf_arg); 190099a2dd95SBruce Richardson if (ret) { 190199a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32, 190299a2dd95SBruce Richardson ret); 190399a2dd95SBruce Richardson goto err_done; 190499a2dd95SBruce Richardson } 190599a2dd95SBruce Richardson rx_adapter->event_port_id = rx_adapter_conf.event_port_id; 190699a2dd95SBruce Richardson rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx; 190799a2dd95SBruce Richardson rx_adapter->service_inited = 1; 190899a2dd95SBruce Richardson rx_adapter->epd = INIT_FD; 190999a2dd95SBruce Richardson return 0; 191099a2dd95SBruce Richardson 191199a2dd95SBruce Richardson err_done: 191299a2dd95SBruce Richardson rte_service_component_unregister(rx_adapter->service_id); 191399a2dd95SBruce Richardson return ret; 191499a2dd95SBruce Richardson } 191599a2dd95SBruce Richardson 191699a2dd95SBruce Richardson static void 1917a256a743SPavan Nikhilesh rxa_update_queue(struct event_eth_rx_adapter *rx_adapter, 1918a256a743SPavan Nikhilesh struct eth_device_info *dev_info, int32_t rx_queue_id, 191999a2dd95SBruce Richardson uint8_t add) 192099a2dd95SBruce Richardson { 192199a2dd95SBruce Richardson struct eth_rx_queue_info *queue_info; 192299a2dd95SBruce Richardson int enabled; 192399a2dd95SBruce Richardson uint16_t i; 192499a2dd95SBruce Richardson 192599a2dd95SBruce Richardson if (dev_info->rx_queue == NULL) 192699a2dd95SBruce Richardson return; 192799a2dd95SBruce Richardson 192899a2dd95SBruce Richardson if (rx_queue_id == -1) { 192999a2dd95SBruce Richardson for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) 193099a2dd95SBruce Richardson rxa_update_queue(rx_adapter, dev_info, i, add); 193199a2dd95SBruce Richardson } else { 193299a2dd95SBruce Richardson queue_info = &dev_info->rx_queue[rx_queue_id]; 193399a2dd95SBruce Richardson enabled = queue_info->queue_enabled; 193499a2dd95SBruce Richardson if (add) { 193599a2dd95SBruce Richardson rx_adapter->nb_queues += !enabled; 193699a2dd95SBruce Richardson dev_info->nb_dev_queues += !enabled; 193799a2dd95SBruce Richardson } else { 193899a2dd95SBruce Richardson rx_adapter->nb_queues -= enabled; 193999a2dd95SBruce Richardson dev_info->nb_dev_queues -= enabled; 194099a2dd95SBruce Richardson } 194199a2dd95SBruce Richardson queue_info->queue_enabled = !!add; 194299a2dd95SBruce Richardson } 194399a2dd95SBruce Richardson } 194499a2dd95SBruce Richardson 194599a2dd95SBruce Richardson static void 194699a2dd95SBruce Richardson rxa_set_vector_data(struct eth_rx_queue_info *queue_info, uint16_t vector_count, 194799a2dd95SBruce Richardson uint64_t vector_ns, struct rte_mempool *mp, uint32_t qid, 194899a2dd95SBruce Richardson uint16_t port_id) 194999a2dd95SBruce Richardson { 195099a2dd95SBruce Richardson #define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9) 195199a2dd95SBruce Richardson struct eth_rx_vector_data *vector_data; 195299a2dd95SBruce Richardson uint32_t flow_id; 195399a2dd95SBruce Richardson 195499a2dd95SBruce Richardson vector_data = &queue_info->vector_data; 195599a2dd95SBruce Richardson vector_data->max_vector_count = vector_count; 195699a2dd95SBruce Richardson vector_data->port = port_id; 195799a2dd95SBruce Richardson vector_data->queue = qid; 195899a2dd95SBruce Richardson vector_data->vector_pool = mp; 195999a2dd95SBruce Richardson vector_data->vector_timeout_ticks = 196099a2dd95SBruce Richardson NSEC2TICK(vector_ns, rte_get_timer_hz()); 196199a2dd95SBruce Richardson vector_data->ts = 0; 196299a2dd95SBruce Richardson flow_id = queue_info->event & 0xFFFFF; 196399a2dd95SBruce Richardson flow_id = 196499a2dd95SBruce Richardson flow_id == 0 ? (qid & 0xFFF) | (port_id & 0xFF) << 12 : flow_id; 196599a2dd95SBruce Richardson vector_data->event = (queue_info->event & ~0xFFFFF) | flow_id; 196699a2dd95SBruce Richardson } 196799a2dd95SBruce Richardson 196899a2dd95SBruce Richardson static void 1969a256a743SPavan Nikhilesh rxa_sw_del(struct event_eth_rx_adapter *rx_adapter, 1970a256a743SPavan Nikhilesh struct eth_device_info *dev_info, int32_t rx_queue_id) 197199a2dd95SBruce Richardson { 197299a2dd95SBruce Richardson struct eth_rx_vector_data *vec; 197399a2dd95SBruce Richardson int pollq; 197499a2dd95SBruce Richardson int intrq; 197599a2dd95SBruce Richardson int sintrq; 197699a2dd95SBruce Richardson 197799a2dd95SBruce Richardson if (rx_adapter->nb_queues == 0) 197899a2dd95SBruce Richardson return; 197999a2dd95SBruce Richardson 198099a2dd95SBruce Richardson if (rx_queue_id == -1) { 198199a2dd95SBruce Richardson uint16_t nb_rx_queues; 198299a2dd95SBruce Richardson uint16_t i; 198399a2dd95SBruce Richardson 198499a2dd95SBruce Richardson nb_rx_queues = dev_info->dev->data->nb_rx_queues; 198599a2dd95SBruce Richardson for (i = 0; i < nb_rx_queues; i++) 198699a2dd95SBruce Richardson rxa_sw_del(rx_adapter, dev_info, i); 198799a2dd95SBruce Richardson return; 198899a2dd95SBruce Richardson } 198999a2dd95SBruce Richardson 199099a2dd95SBruce Richardson /* Push all the partial event vectors to event device. */ 199199a2dd95SBruce Richardson TAILQ_FOREACH(vec, &rx_adapter->vector_list, next) { 199299a2dd95SBruce Richardson if (vec->queue != rx_queue_id) 199399a2dd95SBruce Richardson continue; 199499a2dd95SBruce Richardson rxa_vector_expire(vec, rx_adapter); 199599a2dd95SBruce Richardson TAILQ_REMOVE(&rx_adapter->vector_list, vec, next); 199699a2dd95SBruce Richardson } 199799a2dd95SBruce Richardson 199899a2dd95SBruce Richardson pollq = rxa_polled_queue(dev_info, rx_queue_id); 199999a2dd95SBruce Richardson intrq = rxa_intr_queue(dev_info, rx_queue_id); 200099a2dd95SBruce Richardson sintrq = rxa_shared_intr(dev_info, rx_queue_id); 200199a2dd95SBruce Richardson rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 0); 200299a2dd95SBruce Richardson rx_adapter->num_rx_polled -= pollq; 200399a2dd95SBruce Richardson dev_info->nb_rx_poll -= pollq; 200499a2dd95SBruce Richardson rx_adapter->num_rx_intr -= intrq; 200599a2dd95SBruce Richardson dev_info->nb_rx_intr -= intrq; 200699a2dd95SBruce Richardson dev_info->nb_shared_intr -= intrq && sintrq; 2007b06bca69SNaga Harish K S V if (rx_adapter->use_queue_event_buf) { 2008a256a743SPavan Nikhilesh struct eth_event_enqueue_buffer *event_buf = 2009b06bca69SNaga Harish K S V dev_info->rx_queue[rx_queue_id].event_buf; 2010995b150cSNaga Harish K S V struct rte_event_eth_rx_adapter_stats *stats = 2011995b150cSNaga Harish K S V dev_info->rx_queue[rx_queue_id].stats; 2012b06bca69SNaga Harish K S V rte_free(event_buf->events); 2013b06bca69SNaga Harish K S V rte_free(event_buf); 2014995b150cSNaga Harish K S V rte_free(stats); 2015b06bca69SNaga Harish K S V dev_info->rx_queue[rx_queue_id].event_buf = NULL; 2016995b150cSNaga Harish K S V dev_info->rx_queue[rx_queue_id].stats = NULL; 2017b06bca69SNaga Harish K S V } 201899a2dd95SBruce Richardson } 201999a2dd95SBruce Richardson 2020b06bca69SNaga Harish K S V static int 2021a256a743SPavan Nikhilesh rxa_add_queue(struct event_eth_rx_adapter *rx_adapter, 2022a256a743SPavan Nikhilesh struct eth_device_info *dev_info, int32_t rx_queue_id, 202399a2dd95SBruce Richardson const struct rte_event_eth_rx_adapter_queue_conf *conf) 202499a2dd95SBruce Richardson { 202599a2dd95SBruce Richardson struct eth_rx_queue_info *queue_info; 202699a2dd95SBruce Richardson const struct rte_event *ev = &conf->ev; 202799a2dd95SBruce Richardson int pollq; 202899a2dd95SBruce Richardson int intrq; 202999a2dd95SBruce Richardson int sintrq; 203099a2dd95SBruce Richardson struct rte_event *qi_ev; 2031a256a743SPavan Nikhilesh struct eth_event_enqueue_buffer *new_rx_buf = NULL; 2032995b150cSNaga Harish K S V struct rte_event_eth_rx_adapter_stats *stats = NULL; 2033b06bca69SNaga Harish K S V uint16_t eth_dev_id = dev_info->dev->data->port_id; 2034b06bca69SNaga Harish K S V int ret; 203599a2dd95SBruce Richardson 203699a2dd95SBruce Richardson if (rx_queue_id == -1) { 203799a2dd95SBruce Richardson uint16_t nb_rx_queues; 203899a2dd95SBruce Richardson uint16_t i; 203999a2dd95SBruce Richardson 204099a2dd95SBruce Richardson nb_rx_queues = dev_info->dev->data->nb_rx_queues; 2041b06bca69SNaga Harish K S V for (i = 0; i < nb_rx_queues; i++) { 2042b06bca69SNaga Harish K S V ret = rxa_add_queue(rx_adapter, dev_info, i, conf); 2043b06bca69SNaga Harish K S V if (ret) 2044b06bca69SNaga Harish K S V return ret; 2045b06bca69SNaga Harish K S V } 2046b06bca69SNaga Harish K S V return 0; 204799a2dd95SBruce Richardson } 204899a2dd95SBruce Richardson 204999a2dd95SBruce Richardson pollq = rxa_polled_queue(dev_info, rx_queue_id); 205099a2dd95SBruce Richardson intrq = rxa_intr_queue(dev_info, rx_queue_id); 205199a2dd95SBruce Richardson sintrq = rxa_shared_intr(dev_info, rx_queue_id); 205299a2dd95SBruce Richardson 205399a2dd95SBruce Richardson queue_info = &dev_info->rx_queue[rx_queue_id]; 205499a2dd95SBruce Richardson queue_info->wt = conf->servicing_weight; 205599a2dd95SBruce Richardson 205699a2dd95SBruce Richardson qi_ev = (struct rte_event *)&queue_info->event; 205799a2dd95SBruce Richardson qi_ev->event = ev->event; 205899a2dd95SBruce Richardson qi_ev->op = RTE_EVENT_OP_NEW; 205999a2dd95SBruce Richardson qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER; 206099a2dd95SBruce Richardson 206199a2dd95SBruce Richardson if (conf->rx_queue_flags & 206299a2dd95SBruce Richardson RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) { 206399a2dd95SBruce Richardson queue_info->flow_id_mask = ~0; 206499a2dd95SBruce Richardson } else 206599a2dd95SBruce Richardson qi_ev->flow_id = 0; 206699a2dd95SBruce Richardson 2067929ebdd5SPavan Nikhilesh if (conf->rx_queue_flags & 2068929ebdd5SPavan Nikhilesh RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) { 2069929ebdd5SPavan Nikhilesh queue_info->ena_vector = 1; 2070929ebdd5SPavan Nikhilesh qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR; 2071929ebdd5SPavan Nikhilesh rxa_set_vector_data(queue_info, conf->vector_sz, 2072929ebdd5SPavan Nikhilesh conf->vector_timeout_ns, conf->vector_mp, 2073929ebdd5SPavan Nikhilesh rx_queue_id, dev_info->dev->data->port_id); 2074929ebdd5SPavan Nikhilesh rx_adapter->ena_vector = 1; 2075929ebdd5SPavan Nikhilesh rx_adapter->vector_tmo_ticks = 2076929ebdd5SPavan Nikhilesh rx_adapter->vector_tmo_ticks ? 2077929ebdd5SPavan Nikhilesh RTE_MIN(queue_info->vector_data 2078929ebdd5SPavan Nikhilesh .vector_timeout_ticks >> 2079929ebdd5SPavan Nikhilesh 1, 2080929ebdd5SPavan Nikhilesh rx_adapter->vector_tmo_ticks) : 2081929ebdd5SPavan Nikhilesh queue_info->vector_data.vector_timeout_ticks >> 2082929ebdd5SPavan Nikhilesh 1; 2083929ebdd5SPavan Nikhilesh } 2084929ebdd5SPavan Nikhilesh 208599a2dd95SBruce Richardson rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1); 208699a2dd95SBruce Richardson if (rxa_polled_queue(dev_info, rx_queue_id)) { 208799a2dd95SBruce Richardson rx_adapter->num_rx_polled += !pollq; 208899a2dd95SBruce Richardson dev_info->nb_rx_poll += !pollq; 208999a2dd95SBruce Richardson rx_adapter->num_rx_intr -= intrq; 209099a2dd95SBruce Richardson dev_info->nb_rx_intr -= intrq; 209199a2dd95SBruce Richardson dev_info->nb_shared_intr -= intrq && sintrq; 209299a2dd95SBruce Richardson } 209399a2dd95SBruce Richardson 209499a2dd95SBruce Richardson if (rxa_intr_queue(dev_info, rx_queue_id)) { 209599a2dd95SBruce Richardson rx_adapter->num_rx_polled -= pollq; 209699a2dd95SBruce Richardson dev_info->nb_rx_poll -= pollq; 209799a2dd95SBruce Richardson rx_adapter->num_rx_intr += !intrq; 209899a2dd95SBruce Richardson dev_info->nb_rx_intr += !intrq; 209999a2dd95SBruce Richardson dev_info->nb_shared_intr += !intrq && sintrq; 210099a2dd95SBruce Richardson if (dev_info->nb_shared_intr == 1) { 210199a2dd95SBruce Richardson if (dev_info->multi_intr_cap) 210299a2dd95SBruce Richardson dev_info->next_q_idx = 210399a2dd95SBruce Richardson RTE_MAX_RXTX_INTR_VEC_ID - 1; 210499a2dd95SBruce Richardson else 210599a2dd95SBruce Richardson dev_info->next_q_idx = 0; 210699a2dd95SBruce Richardson } 210799a2dd95SBruce Richardson } 2108b06bca69SNaga Harish K S V 2109b06bca69SNaga Harish K S V if (!rx_adapter->use_queue_event_buf) 2110b06bca69SNaga Harish K S V return 0; 2111b06bca69SNaga Harish K S V 2112b06bca69SNaga Harish K S V new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", 2113b06bca69SNaga Harish K S V sizeof(*new_rx_buf), 0, 2114b06bca69SNaga Harish K S V rte_eth_dev_socket_id(eth_dev_id)); 2115b06bca69SNaga Harish K S V if (new_rx_buf == NULL) { 2116b06bca69SNaga Harish K S V RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for " 2117b06bca69SNaga Harish K S V "dev_id: %d queue_id: %d", 2118b06bca69SNaga Harish K S V eth_dev_id, rx_queue_id); 2119b06bca69SNaga Harish K S V return -ENOMEM; 2120b06bca69SNaga Harish K S V } 2121b06bca69SNaga Harish K S V 2122b06bca69SNaga Harish K S V new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE); 2123b06bca69SNaga Harish K S V new_rx_buf->events_size += (2 * BATCH_SIZE); 2124b06bca69SNaga Harish K S V new_rx_buf->events = rte_zmalloc_socket("rx_buffer", 2125b06bca69SNaga Harish K S V sizeof(struct rte_event) * 2126b06bca69SNaga Harish K S V new_rx_buf->events_size, 0, 2127b06bca69SNaga Harish K S V rte_eth_dev_socket_id(eth_dev_id)); 2128b06bca69SNaga Harish K S V if (new_rx_buf->events == NULL) { 2129b06bca69SNaga Harish K S V rte_free(new_rx_buf); 2130b06bca69SNaga Harish K S V RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " 2131b06bca69SNaga Harish K S V "dev_id: %d queue_id: %d", 2132b06bca69SNaga Harish K S V eth_dev_id, rx_queue_id); 2133b06bca69SNaga Harish K S V return -ENOMEM; 2134b06bca69SNaga Harish K S V } 2135b06bca69SNaga Harish K S V 2136b06bca69SNaga Harish K S V queue_info->event_buf = new_rx_buf; 2137b06bca69SNaga Harish K S V 2138995b150cSNaga Harish K S V /* Allocate storage for adapter queue stats */ 2139995b150cSNaga Harish K S V stats = rte_zmalloc_socket("rx_queue_stats", 2140995b150cSNaga Harish K S V sizeof(*stats), 0, 2141995b150cSNaga Harish K S V rte_eth_dev_socket_id(eth_dev_id)); 2142995b150cSNaga Harish K S V if (stats == NULL) { 2143995b150cSNaga Harish K S V rte_free(new_rx_buf->events); 2144995b150cSNaga Harish K S V rte_free(new_rx_buf); 2145995b150cSNaga Harish K S V RTE_EDEV_LOG_ERR("Failed to allocate stats storage for" 2146995b150cSNaga Harish K S V " dev_id: %d queue_id: %d", 2147995b150cSNaga Harish K S V eth_dev_id, rx_queue_id); 2148995b150cSNaga Harish K S V return -ENOMEM; 2149995b150cSNaga Harish K S V } 2150995b150cSNaga Harish K S V 2151995b150cSNaga Harish K S V queue_info->stats = stats; 2152995b150cSNaga Harish K S V 2153b06bca69SNaga Harish K S V return 0; 215499a2dd95SBruce Richardson } 215599a2dd95SBruce Richardson 2156a256a743SPavan Nikhilesh static int 2157a256a743SPavan Nikhilesh rxa_sw_add(struct event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, 215899a2dd95SBruce Richardson int rx_queue_id, 215999a2dd95SBruce Richardson const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 216099a2dd95SBruce Richardson { 216199a2dd95SBruce Richardson struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id]; 216299a2dd95SBruce Richardson struct rte_event_eth_rx_adapter_queue_conf temp_conf; 216399a2dd95SBruce Richardson int ret; 216499a2dd95SBruce Richardson struct eth_rx_poll_entry *rx_poll; 216599a2dd95SBruce Richardson struct eth_rx_queue_info *rx_queue; 216699a2dd95SBruce Richardson uint32_t *rx_wrr; 216799a2dd95SBruce Richardson uint16_t nb_rx_queues; 216899a2dd95SBruce Richardson uint32_t nb_rx_poll, nb_wrr; 216999a2dd95SBruce Richardson uint32_t nb_rx_intr; 217099a2dd95SBruce Richardson int num_intr_vec; 217199a2dd95SBruce Richardson uint16_t wt; 217299a2dd95SBruce Richardson 217399a2dd95SBruce Richardson if (queue_conf->servicing_weight == 0) { 217499a2dd95SBruce Richardson struct rte_eth_dev_data *data = dev_info->dev->data; 217599a2dd95SBruce Richardson 217699a2dd95SBruce Richardson temp_conf = *queue_conf; 217799a2dd95SBruce Richardson if (!data->dev_conf.intr_conf.rxq) { 217899a2dd95SBruce Richardson /* If Rx interrupts are disabled set wt = 1 */ 217999a2dd95SBruce Richardson temp_conf.servicing_weight = 1; 218099a2dd95SBruce Richardson } 218199a2dd95SBruce Richardson queue_conf = &temp_conf; 2182b06bca69SNaga Harish K S V 2183b06bca69SNaga Harish K S V if (queue_conf->servicing_weight == 0 && 2184b06bca69SNaga Harish K S V rx_adapter->use_queue_event_buf) { 2185b06bca69SNaga Harish K S V 2186b06bca69SNaga Harish K S V RTE_EDEV_LOG_ERR("Use of queue level event buffer " 2187b06bca69SNaga Harish K S V "not supported for interrupt queues " 2188b06bca69SNaga Harish K S V "dev_id: %d queue_id: %d", 2189b06bca69SNaga Harish K S V eth_dev_id, rx_queue_id); 2190b06bca69SNaga Harish K S V return -EINVAL; 2191b06bca69SNaga Harish K S V } 219299a2dd95SBruce Richardson } 219399a2dd95SBruce Richardson 219499a2dd95SBruce Richardson nb_rx_queues = dev_info->dev->data->nb_rx_queues; 219599a2dd95SBruce Richardson rx_queue = dev_info->rx_queue; 219699a2dd95SBruce Richardson wt = queue_conf->servicing_weight; 219799a2dd95SBruce Richardson 219899a2dd95SBruce Richardson if (dev_info->rx_queue == NULL) { 219999a2dd95SBruce Richardson dev_info->rx_queue = 220099a2dd95SBruce Richardson rte_zmalloc_socket(rx_adapter->mem_name, 220199a2dd95SBruce Richardson nb_rx_queues * 220299a2dd95SBruce Richardson sizeof(struct eth_rx_queue_info), 0, 220399a2dd95SBruce Richardson rx_adapter->socket_id); 220499a2dd95SBruce Richardson if (dev_info->rx_queue == NULL) 220599a2dd95SBruce Richardson return -ENOMEM; 220699a2dd95SBruce Richardson } 220799a2dd95SBruce Richardson rx_wrr = NULL; 220899a2dd95SBruce Richardson rx_poll = NULL; 220999a2dd95SBruce Richardson 221099a2dd95SBruce Richardson rxa_calc_nb_post_add(rx_adapter, dev_info, rx_queue_id, 221199a2dd95SBruce Richardson queue_conf->servicing_weight, 221299a2dd95SBruce Richardson &nb_rx_poll, &nb_rx_intr, &nb_wrr); 221399a2dd95SBruce Richardson 221499a2dd95SBruce Richardson if (dev_info->dev->intr_handle) 221599a2dd95SBruce Richardson dev_info->multi_intr_cap = 221699a2dd95SBruce Richardson rte_intr_cap_multiple(dev_info->dev->intr_handle); 221799a2dd95SBruce Richardson 221899a2dd95SBruce Richardson ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr, 221999a2dd95SBruce Richardson &rx_poll, &rx_wrr); 222099a2dd95SBruce Richardson if (ret) 222199a2dd95SBruce Richardson goto err_free_rxqueue; 222299a2dd95SBruce Richardson 222399a2dd95SBruce Richardson if (wt == 0) { 222499a2dd95SBruce Richardson num_intr_vec = rxa_nb_intr_vect(dev_info, rx_queue_id, 1); 222599a2dd95SBruce Richardson 222699a2dd95SBruce Richardson ret = rxa_intr_ring_check_avail(rx_adapter, num_intr_vec); 222799a2dd95SBruce Richardson if (ret) 222899a2dd95SBruce Richardson goto err_free_rxqueue; 222999a2dd95SBruce Richardson 223099a2dd95SBruce Richardson ret = rxa_add_intr_queue(rx_adapter, dev_info, rx_queue_id); 223199a2dd95SBruce Richardson if (ret) 223299a2dd95SBruce Richardson goto err_free_rxqueue; 223399a2dd95SBruce Richardson } else { 223499a2dd95SBruce Richardson 223599a2dd95SBruce Richardson num_intr_vec = 0; 223699a2dd95SBruce Richardson if (rx_adapter->num_rx_intr > nb_rx_intr) { 223799a2dd95SBruce Richardson num_intr_vec = rxa_nb_intr_vect(dev_info, 223899a2dd95SBruce Richardson rx_queue_id, 0); 223999a2dd95SBruce Richardson /* interrupt based queues are being converted to 224099a2dd95SBruce Richardson * poll mode queues, delete the interrupt configuration 224199a2dd95SBruce Richardson * for those. 224299a2dd95SBruce Richardson */ 224399a2dd95SBruce Richardson ret = rxa_del_intr_queue(rx_adapter, 224499a2dd95SBruce Richardson dev_info, rx_queue_id); 224599a2dd95SBruce Richardson if (ret) 224699a2dd95SBruce Richardson goto err_free_rxqueue; 224799a2dd95SBruce Richardson } 224899a2dd95SBruce Richardson } 224999a2dd95SBruce Richardson 225099a2dd95SBruce Richardson if (nb_rx_intr == 0) { 225199a2dd95SBruce Richardson ret = rxa_free_intr_resources(rx_adapter); 225299a2dd95SBruce Richardson if (ret) 225399a2dd95SBruce Richardson goto err_free_rxqueue; 225499a2dd95SBruce Richardson } 225599a2dd95SBruce Richardson 225699a2dd95SBruce Richardson if (wt == 0) { 225799a2dd95SBruce Richardson uint16_t i; 225899a2dd95SBruce Richardson 225999a2dd95SBruce Richardson if (rx_queue_id == -1) { 226099a2dd95SBruce Richardson for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) 226199a2dd95SBruce Richardson dev_info->intr_queue[i] = i; 226299a2dd95SBruce Richardson } else { 226399a2dd95SBruce Richardson if (!rxa_intr_queue(dev_info, rx_queue_id)) 226499a2dd95SBruce Richardson dev_info->intr_queue[nb_rx_intr - 1] = 226599a2dd95SBruce Richardson rx_queue_id; 226699a2dd95SBruce Richardson } 226799a2dd95SBruce Richardson } 226899a2dd95SBruce Richardson 226999a2dd95SBruce Richardson 227099a2dd95SBruce Richardson 2271b06bca69SNaga Harish K S V ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); 2272b06bca69SNaga Harish K S V if (ret) 2273b06bca69SNaga Harish K S V goto err_free_rxqueue; 227499a2dd95SBruce Richardson rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); 227599a2dd95SBruce Richardson 227699a2dd95SBruce Richardson rte_free(rx_adapter->eth_rx_poll); 227799a2dd95SBruce Richardson rte_free(rx_adapter->wrr_sched); 227899a2dd95SBruce Richardson 227999a2dd95SBruce Richardson rx_adapter->eth_rx_poll = rx_poll; 228099a2dd95SBruce Richardson rx_adapter->wrr_sched = rx_wrr; 228199a2dd95SBruce Richardson rx_adapter->wrr_len = nb_wrr; 228299a2dd95SBruce Richardson rx_adapter->num_intr_vec += num_intr_vec; 228399a2dd95SBruce Richardson return 0; 228499a2dd95SBruce Richardson 228599a2dd95SBruce Richardson err_free_rxqueue: 228699a2dd95SBruce Richardson if (rx_queue == NULL) { 228799a2dd95SBruce Richardson rte_free(dev_info->rx_queue); 228899a2dd95SBruce Richardson dev_info->rx_queue = NULL; 228999a2dd95SBruce Richardson } 229099a2dd95SBruce Richardson 229199a2dd95SBruce Richardson rte_free(rx_poll); 229299a2dd95SBruce Richardson rte_free(rx_wrr); 229399a2dd95SBruce Richardson 2294b06bca69SNaga Harish K S V return ret; 229599a2dd95SBruce Richardson } 229699a2dd95SBruce Richardson 229799a2dd95SBruce Richardson static int 229899a2dd95SBruce Richardson rxa_ctrl(uint8_t id, int start) 229999a2dd95SBruce Richardson { 2300a256a743SPavan Nikhilesh struct event_eth_rx_adapter *rx_adapter; 230199a2dd95SBruce Richardson struct rte_eventdev *dev; 230299a2dd95SBruce Richardson struct eth_device_info *dev_info; 230399a2dd95SBruce Richardson uint32_t i; 230499a2dd95SBruce Richardson int use_service = 0; 230599a2dd95SBruce Richardson int stop = !start; 230699a2dd95SBruce Richardson 230799a2dd95SBruce Richardson RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 230899a2dd95SBruce Richardson rx_adapter = rxa_id_to_adapter(id); 230999a2dd95SBruce Richardson if (rx_adapter == NULL) 231099a2dd95SBruce Richardson return -EINVAL; 231199a2dd95SBruce Richardson 231299a2dd95SBruce Richardson dev = &rte_eventdevs[rx_adapter->eventdev_id]; 231399a2dd95SBruce Richardson 231499a2dd95SBruce Richardson RTE_ETH_FOREACH_DEV(i) { 231599a2dd95SBruce Richardson dev_info = &rx_adapter->eth_devices[i]; 231699a2dd95SBruce Richardson /* if start check for num dev queues */ 231799a2dd95SBruce Richardson if (start && !dev_info->nb_dev_queues) 231899a2dd95SBruce Richardson continue; 231999a2dd95SBruce Richardson /* if stop check if dev has been started */ 232099a2dd95SBruce Richardson if (stop && !dev_info->dev_rx_started) 232199a2dd95SBruce Richardson continue; 232299a2dd95SBruce Richardson use_service |= !dev_info->internal_event_port; 232399a2dd95SBruce Richardson dev_info->dev_rx_started = start; 232499a2dd95SBruce Richardson if (dev_info->internal_event_port == 0) 232599a2dd95SBruce Richardson continue; 232699a2dd95SBruce Richardson start ? (*dev->dev_ops->eth_rx_adapter_start)(dev, 232799a2dd95SBruce Richardson &rte_eth_devices[i]) : 232899a2dd95SBruce Richardson (*dev->dev_ops->eth_rx_adapter_stop)(dev, 232999a2dd95SBruce Richardson &rte_eth_devices[i]); 233099a2dd95SBruce Richardson } 233199a2dd95SBruce Richardson 233299a2dd95SBruce Richardson if (use_service) { 233399a2dd95SBruce Richardson rte_spinlock_lock(&rx_adapter->rx_lock); 233499a2dd95SBruce Richardson rx_adapter->rxa_started = start; 233599a2dd95SBruce Richardson rte_service_runstate_set(rx_adapter->service_id, start); 233699a2dd95SBruce Richardson rte_spinlock_unlock(&rx_adapter->rx_lock); 233799a2dd95SBruce Richardson } 233899a2dd95SBruce Richardson 233999a2dd95SBruce Richardson return 0; 234099a2dd95SBruce Richardson } 234199a2dd95SBruce Richardson 2342bc0df25cSNaga Harish K S V static int 2343bc0df25cSNaga Harish K S V rxa_create(uint8_t id, uint8_t dev_id, 2344bc0df25cSNaga Harish K S V struct rte_event_eth_rx_adapter_params *rxa_params, 234599a2dd95SBruce Richardson rte_event_eth_rx_adapter_conf_cb conf_cb, 234699a2dd95SBruce Richardson void *conf_arg) 234799a2dd95SBruce Richardson { 2348a256a743SPavan Nikhilesh struct event_eth_rx_adapter *rx_adapter; 2349a256a743SPavan Nikhilesh struct eth_event_enqueue_buffer *buf; 2350bc0df25cSNaga Harish K S V struct rte_event *events; 235199a2dd95SBruce Richardson int ret; 235299a2dd95SBruce Richardson int socket_id; 235399a2dd95SBruce Richardson uint16_t i; 235499a2dd95SBruce Richardson char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN]; 235599a2dd95SBruce Richardson const uint8_t default_rss_key[] = { 235699a2dd95SBruce Richardson 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 235799a2dd95SBruce Richardson 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 235899a2dd95SBruce Richardson 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, 235999a2dd95SBruce Richardson 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 236099a2dd95SBruce Richardson 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa, 236199a2dd95SBruce Richardson }; 236299a2dd95SBruce Richardson 236399a2dd95SBruce Richardson RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 236499a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 2365bc0df25cSNaga Harish K S V 236699a2dd95SBruce Richardson if (conf_cb == NULL) 236799a2dd95SBruce Richardson return -EINVAL; 236899a2dd95SBruce Richardson 236999a2dd95SBruce Richardson if (event_eth_rx_adapter == NULL) { 237099a2dd95SBruce Richardson ret = rte_event_eth_rx_adapter_init(); 237199a2dd95SBruce Richardson if (ret) 237299a2dd95SBruce Richardson return ret; 237399a2dd95SBruce Richardson } 237499a2dd95SBruce Richardson 237599a2dd95SBruce Richardson rx_adapter = rxa_id_to_adapter(id); 237699a2dd95SBruce Richardson if (rx_adapter != NULL) { 237799a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id); 237899a2dd95SBruce Richardson return -EEXIST; 237999a2dd95SBruce Richardson } 238099a2dd95SBruce Richardson 238199a2dd95SBruce Richardson socket_id = rte_event_dev_socket_id(dev_id); 238299a2dd95SBruce Richardson snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN, 238399a2dd95SBruce Richardson "rte_event_eth_rx_adapter_%d", 238499a2dd95SBruce Richardson id); 238599a2dd95SBruce Richardson 238699a2dd95SBruce Richardson rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter), 238799a2dd95SBruce Richardson RTE_CACHE_LINE_SIZE, socket_id); 238899a2dd95SBruce Richardson if (rx_adapter == NULL) { 238999a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to get mem for rx adapter"); 239099a2dd95SBruce Richardson return -ENOMEM; 239199a2dd95SBruce Richardson } 239299a2dd95SBruce Richardson 239399a2dd95SBruce Richardson rx_adapter->eventdev_id = dev_id; 239499a2dd95SBruce Richardson rx_adapter->socket_id = socket_id; 239599a2dd95SBruce Richardson rx_adapter->conf_cb = conf_cb; 239699a2dd95SBruce Richardson rx_adapter->conf_arg = conf_arg; 239799a2dd95SBruce Richardson rx_adapter->id = id; 239899a2dd95SBruce Richardson TAILQ_INIT(&rx_adapter->vector_list); 239999a2dd95SBruce Richardson strcpy(rx_adapter->mem_name, mem_name); 240099a2dd95SBruce Richardson rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name, 240199a2dd95SBruce Richardson RTE_MAX_ETHPORTS * 240299a2dd95SBruce Richardson sizeof(struct eth_device_info), 0, 240399a2dd95SBruce Richardson socket_id); 240499a2dd95SBruce Richardson rte_convert_rss_key((const uint32_t *)default_rss_key, 240599a2dd95SBruce Richardson (uint32_t *)rx_adapter->rss_key_be, 240699a2dd95SBruce Richardson RTE_DIM(default_rss_key)); 240799a2dd95SBruce Richardson 240899a2dd95SBruce Richardson if (rx_adapter->eth_devices == NULL) { 240999a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n"); 241099a2dd95SBruce Richardson rte_free(rx_adapter); 241199a2dd95SBruce Richardson return -ENOMEM; 241299a2dd95SBruce Richardson } 2413bc0df25cSNaga Harish K S V 241499a2dd95SBruce Richardson rte_spinlock_init(&rx_adapter->rx_lock); 2415bc0df25cSNaga Harish K S V 241699a2dd95SBruce Richardson for (i = 0; i < RTE_MAX_ETHPORTS; i++) 241799a2dd95SBruce Richardson rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; 241899a2dd95SBruce Richardson 2419bc0df25cSNaga Harish K S V /* Rx adapter event buffer allocation */ 2420b06bca69SNaga Harish K S V rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf; 2421b06bca69SNaga Harish K S V 2422b06bca69SNaga Harish K S V if (!rx_adapter->use_queue_event_buf) { 2423bc0df25cSNaga Harish K S V buf = &rx_adapter->event_enqueue_buffer; 2424bc0df25cSNaga Harish K S V buf->events_size = rxa_params->event_buf_size; 2425bc0df25cSNaga Harish K S V 2426bc0df25cSNaga Harish K S V events = rte_zmalloc_socket(rx_adapter->mem_name, 2427bc0df25cSNaga Harish K S V buf->events_size * sizeof(*events), 2428bc0df25cSNaga Harish K S V 0, socket_id); 2429bc0df25cSNaga Harish K S V if (events == NULL) { 2430b06bca69SNaga Harish K S V RTE_EDEV_LOG_ERR("Failed to allocate memory " 2431b06bca69SNaga Harish K S V "for adapter event buffer"); 2432bc0df25cSNaga Harish K S V rte_free(rx_adapter->eth_devices); 2433bc0df25cSNaga Harish K S V rte_free(rx_adapter); 2434bc0df25cSNaga Harish K S V return -ENOMEM; 2435bc0df25cSNaga Harish K S V } 2436bc0df25cSNaga Harish K S V 2437bc0df25cSNaga Harish K S V rx_adapter->event_enqueue_buffer.events = events; 2438b06bca69SNaga Harish K S V } 2439bc0df25cSNaga Harish K S V 244099a2dd95SBruce Richardson event_eth_rx_adapter[id] = rx_adapter; 2441bc0df25cSNaga Harish K S V 244299a2dd95SBruce Richardson if (conf_cb == rxa_default_conf_cb) 244399a2dd95SBruce Richardson rx_adapter->default_cb_arg = 1; 244483ab470dSGanapati Kundapura 244583ab470dSGanapati Kundapura if (rte_mbuf_dyn_rx_timestamp_register( 244683ab470dSGanapati Kundapura &event_eth_rx_timestamp_dynfield_offset, 244783ab470dSGanapati Kundapura &event_eth_rx_timestamp_dynflag) != 0) { 244883ab470dSGanapati Kundapura RTE_EDEV_LOG_ERR("Error registering timestamp field in mbuf\n"); 244983ab470dSGanapati Kundapura return -rte_errno; 245083ab470dSGanapati Kundapura } 245183ab470dSGanapati Kundapura 245299a2dd95SBruce Richardson rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb, 245399a2dd95SBruce Richardson conf_arg); 245499a2dd95SBruce Richardson return 0; 245599a2dd95SBruce Richardson } 245699a2dd95SBruce Richardson 245799a2dd95SBruce Richardson int 2458bc0df25cSNaga Harish K S V rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, 2459bc0df25cSNaga Harish K S V rte_event_eth_rx_adapter_conf_cb conf_cb, 2460bc0df25cSNaga Harish K S V void *conf_arg) 2461bc0df25cSNaga Harish K S V { 2462bc0df25cSNaga Harish K S V struct rte_event_eth_rx_adapter_params rxa_params = {0}; 2463bc0df25cSNaga Harish K S V 2464bc0df25cSNaga Harish K S V /* use default values for adapter params */ 2465bc0df25cSNaga Harish K S V rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; 2466b06bca69SNaga Harish K S V rxa_params.use_queue_event_buf = false; 2467bc0df25cSNaga Harish K S V 2468bc0df25cSNaga Harish K S V return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); 2469bc0df25cSNaga Harish K S V } 2470bc0df25cSNaga Harish K S V 2471bc0df25cSNaga Harish K S V int 2472bc0df25cSNaga Harish K S V rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, 2473bc0df25cSNaga Harish K S V struct rte_event_port_conf *port_config, 2474bc0df25cSNaga Harish K S V struct rte_event_eth_rx_adapter_params *rxa_params) 2475bc0df25cSNaga Harish K S V { 2476bc0df25cSNaga Harish K S V struct rte_event_port_conf *pc; 2477bc0df25cSNaga Harish K S V int ret; 2478bc0df25cSNaga Harish K S V struct rte_event_eth_rx_adapter_params temp_params = {0}; 2479bc0df25cSNaga Harish K S V 2480bc0df25cSNaga Harish K S V if (port_config == NULL) 2481bc0df25cSNaga Harish K S V return -EINVAL; 2482bc0df25cSNaga Harish K S V 2483bc0df25cSNaga Harish K S V if (rxa_params == NULL) { 2484b06bca69SNaga Harish K S V /* use default values if rxa_params is NULL */ 2485bc0df25cSNaga Harish K S V rxa_params = &temp_params; 2486bc0df25cSNaga Harish K S V rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; 2487b06bca69SNaga Harish K S V rxa_params->use_queue_event_buf = false; 2488b06bca69SNaga Harish K S V } else if ((!rxa_params->use_queue_event_buf && 2489b06bca69SNaga Harish K S V rxa_params->event_buf_size == 0) || 2490b06bca69SNaga Harish K S V (rxa_params->use_queue_event_buf && 2491b06bca69SNaga Harish K S V rxa_params->event_buf_size != 0)) { 2492b06bca69SNaga Harish K S V RTE_EDEV_LOG_ERR("Invalid adapter params\n"); 2493bc0df25cSNaga Harish K S V return -EINVAL; 2494b06bca69SNaga Harish K S V } else if (!rxa_params->use_queue_event_buf) { 2495b06bca69SNaga Harish K S V /* adjust event buff size with BATCH_SIZE used for fetching 2496b06bca69SNaga Harish K S V * packets from NIC rx queues to get full buffer utilization 2497b06bca69SNaga Harish K S V * and prevent unnecessary rollovers. 2498b06bca69SNaga Harish K S V */ 2499b06bca69SNaga Harish K S V 2500b06bca69SNaga Harish K S V rxa_params->event_buf_size = 2501b06bca69SNaga Harish K S V RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); 2502b06bca69SNaga Harish K S V rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); 2503b06bca69SNaga Harish K S V } 2504bc0df25cSNaga Harish K S V 2505bc0df25cSNaga Harish K S V pc = rte_malloc(NULL, sizeof(*pc), 0); 2506bc0df25cSNaga Harish K S V if (pc == NULL) 2507bc0df25cSNaga Harish K S V return -ENOMEM; 2508bc0df25cSNaga Harish K S V 2509bc0df25cSNaga Harish K S V *pc = *port_config; 2510bc0df25cSNaga Harish K S V 2511bc0df25cSNaga Harish K S V ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); 2512bc0df25cSNaga Harish K S V if (ret) 2513bc0df25cSNaga Harish K S V rte_free(pc); 2514bc0df25cSNaga Harish K S V 2515bc0df25cSNaga Harish K S V return ret; 2516bc0df25cSNaga Harish K S V } 2517bc0df25cSNaga Harish K S V 2518bc0df25cSNaga Harish K S V int 251999a2dd95SBruce Richardson rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, 252099a2dd95SBruce Richardson struct rte_event_port_conf *port_config) 252199a2dd95SBruce Richardson { 252299a2dd95SBruce Richardson struct rte_event_port_conf *pc; 252399a2dd95SBruce Richardson int ret; 252499a2dd95SBruce Richardson 252599a2dd95SBruce Richardson if (port_config == NULL) 252699a2dd95SBruce Richardson return -EINVAL; 2527bc0df25cSNaga Harish K S V 252899a2dd95SBruce Richardson RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 252999a2dd95SBruce Richardson 253099a2dd95SBruce Richardson pc = rte_malloc(NULL, sizeof(*pc), 0); 253199a2dd95SBruce Richardson if (pc == NULL) 253299a2dd95SBruce Richardson return -ENOMEM; 253399a2dd95SBruce Richardson *pc = *port_config; 2534bc0df25cSNaga Harish K S V 253599a2dd95SBruce Richardson ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, 253699a2dd95SBruce Richardson rxa_default_conf_cb, 253799a2dd95SBruce Richardson pc); 253899a2dd95SBruce Richardson if (ret) 253999a2dd95SBruce Richardson rte_free(pc); 254099a2dd95SBruce Richardson return ret; 254199a2dd95SBruce Richardson } 254299a2dd95SBruce Richardson 254399a2dd95SBruce Richardson int 254499a2dd95SBruce Richardson rte_event_eth_rx_adapter_free(uint8_t id) 254599a2dd95SBruce Richardson { 2546a256a743SPavan Nikhilesh struct event_eth_rx_adapter *rx_adapter; 254799a2dd95SBruce Richardson 2548a1793ee8SGanapati Kundapura if (rxa_memzone_lookup()) 2549a1793ee8SGanapati Kundapura return -ENOMEM; 2550a1793ee8SGanapati Kundapura 255199a2dd95SBruce Richardson RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 255299a2dd95SBruce Richardson 255399a2dd95SBruce Richardson rx_adapter = rxa_id_to_adapter(id); 255499a2dd95SBruce Richardson if (rx_adapter == NULL) 255599a2dd95SBruce Richardson return -EINVAL; 255699a2dd95SBruce Richardson 255799a2dd95SBruce Richardson if (rx_adapter->nb_queues) { 255899a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("%" PRIu16 " Rx queues not deleted", 255999a2dd95SBruce Richardson rx_adapter->nb_queues); 256099a2dd95SBruce Richardson return -EBUSY; 256199a2dd95SBruce Richardson } 256299a2dd95SBruce Richardson 256399a2dd95SBruce Richardson if (rx_adapter->default_cb_arg) 256499a2dd95SBruce Richardson rte_free(rx_adapter->conf_arg); 256599a2dd95SBruce Richardson rte_free(rx_adapter->eth_devices); 2566b06bca69SNaga Harish K S V if (!rx_adapter->use_queue_event_buf) 2567bc0df25cSNaga Harish K S V rte_free(rx_adapter->event_enqueue_buffer.events); 256899a2dd95SBruce Richardson rte_free(rx_adapter); 256999a2dd95SBruce Richardson event_eth_rx_adapter[id] = NULL; 257099a2dd95SBruce Richardson 257199a2dd95SBruce Richardson rte_eventdev_trace_eth_rx_adapter_free(id); 257299a2dd95SBruce Richardson return 0; 257399a2dd95SBruce Richardson } 257499a2dd95SBruce Richardson 257599a2dd95SBruce Richardson int 257699a2dd95SBruce Richardson rte_event_eth_rx_adapter_queue_add(uint8_t id, 257799a2dd95SBruce Richardson uint16_t eth_dev_id, 257899a2dd95SBruce Richardson int32_t rx_queue_id, 257999a2dd95SBruce Richardson const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 258099a2dd95SBruce Richardson { 258199a2dd95SBruce Richardson int ret; 258299a2dd95SBruce Richardson uint32_t cap; 2583a256a743SPavan Nikhilesh struct event_eth_rx_adapter *rx_adapter; 258499a2dd95SBruce Richardson struct rte_eventdev *dev; 258599a2dd95SBruce Richardson struct eth_device_info *dev_info; 2586929ebdd5SPavan Nikhilesh struct rte_event_eth_rx_adapter_vector_limits limits; 258799a2dd95SBruce Richardson 2588a1793ee8SGanapati Kundapura if (rxa_memzone_lookup()) 2589a1793ee8SGanapati Kundapura return -ENOMEM; 2590a1793ee8SGanapati Kundapura 259199a2dd95SBruce Richardson RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 259299a2dd95SBruce Richardson RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL); 259399a2dd95SBruce Richardson 259499a2dd95SBruce Richardson rx_adapter = rxa_id_to_adapter(id); 259599a2dd95SBruce Richardson if ((rx_adapter == NULL) || (queue_conf == NULL)) 259699a2dd95SBruce Richardson return -EINVAL; 259799a2dd95SBruce Richardson 259899a2dd95SBruce Richardson dev = &rte_eventdevs[rx_adapter->eventdev_id]; 259999a2dd95SBruce Richardson ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id, 260099a2dd95SBruce Richardson eth_dev_id, 260199a2dd95SBruce Richardson &cap); 260299a2dd95SBruce Richardson if (ret) { 260399a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8 260499a2dd95SBruce Richardson "eth port %" PRIu16, id, eth_dev_id); 260599a2dd95SBruce Richardson return ret; 260699a2dd95SBruce Richardson } 260799a2dd95SBruce Richardson 260899a2dd95SBruce Richardson if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0 260999a2dd95SBruce Richardson && (queue_conf->rx_queue_flags & 261099a2dd95SBruce Richardson RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) { 261199a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Flow ID override is not supported," 261299a2dd95SBruce Richardson " eth port: %" PRIu16 " adapter id: %" PRIu8, 261399a2dd95SBruce Richardson eth_dev_id, id); 261499a2dd95SBruce Richardson return -EINVAL; 261599a2dd95SBruce Richardson } 261699a2dd95SBruce Richardson 2617929ebdd5SPavan Nikhilesh if (queue_conf->rx_queue_flags & 2618929ebdd5SPavan Nikhilesh RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) { 2619929ebdd5SPavan Nikhilesh 2620929ebdd5SPavan Nikhilesh if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) { 262199a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Event vectorization is not supported," 2622929ebdd5SPavan Nikhilesh " eth port: %" PRIu16 2623929ebdd5SPavan Nikhilesh " adapter id: %" PRIu8, 262499a2dd95SBruce Richardson eth_dev_id, id); 262599a2dd95SBruce Richardson return -EINVAL; 262699a2dd95SBruce Richardson } 262799a2dd95SBruce Richardson 2628929ebdd5SPavan Nikhilesh ret = rte_event_eth_rx_adapter_vector_limits_get( 2629929ebdd5SPavan Nikhilesh rx_adapter->eventdev_id, eth_dev_id, &limits); 2630929ebdd5SPavan Nikhilesh if (ret < 0) { 2631929ebdd5SPavan Nikhilesh RTE_EDEV_LOG_ERR("Failed to get event device vector limits," 2632929ebdd5SPavan Nikhilesh " eth port: %" PRIu16 2633929ebdd5SPavan Nikhilesh " adapter id: %" PRIu8, 2634929ebdd5SPavan Nikhilesh eth_dev_id, id); 2635929ebdd5SPavan Nikhilesh return -EINVAL; 2636929ebdd5SPavan Nikhilesh } 2637929ebdd5SPavan Nikhilesh if (queue_conf->vector_sz < limits.min_sz || 2638929ebdd5SPavan Nikhilesh queue_conf->vector_sz > limits.max_sz || 2639929ebdd5SPavan Nikhilesh queue_conf->vector_timeout_ns < limits.min_timeout_ns || 2640929ebdd5SPavan Nikhilesh queue_conf->vector_timeout_ns > limits.max_timeout_ns || 2641929ebdd5SPavan Nikhilesh queue_conf->vector_mp == NULL) { 2642929ebdd5SPavan Nikhilesh RTE_EDEV_LOG_ERR("Invalid event vector configuration," 2643929ebdd5SPavan Nikhilesh " eth port: %" PRIu16 2644929ebdd5SPavan Nikhilesh " adapter id: %" PRIu8, 2645929ebdd5SPavan Nikhilesh eth_dev_id, id); 2646929ebdd5SPavan Nikhilesh return -EINVAL; 2647929ebdd5SPavan Nikhilesh } 2648929ebdd5SPavan Nikhilesh if (queue_conf->vector_mp->elt_size < 2649929ebdd5SPavan Nikhilesh (sizeof(struct rte_event_vector) + 2650929ebdd5SPavan Nikhilesh (sizeof(uintptr_t) * queue_conf->vector_sz))) { 2651929ebdd5SPavan Nikhilesh RTE_EDEV_LOG_ERR("Invalid event vector configuration," 2652929ebdd5SPavan Nikhilesh " eth port: %" PRIu16 2653929ebdd5SPavan Nikhilesh " adapter id: %" PRIu8, 2654929ebdd5SPavan Nikhilesh eth_dev_id, id); 2655929ebdd5SPavan Nikhilesh return -EINVAL; 2656929ebdd5SPavan Nikhilesh } 2657929ebdd5SPavan Nikhilesh } 2658929ebdd5SPavan Nikhilesh 265999a2dd95SBruce Richardson if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 && 266099a2dd95SBruce Richardson (rx_queue_id != -1)) { 266199a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Rx queues can only be connected to single " 266299a2dd95SBruce Richardson "event queue, eth port: %" PRIu16 " adapter id: %" 266399a2dd95SBruce Richardson PRIu8, eth_dev_id, id); 266499a2dd95SBruce Richardson return -EINVAL; 266599a2dd95SBruce Richardson } 266699a2dd95SBruce Richardson 266799a2dd95SBruce Richardson if (rx_queue_id != -1 && (uint16_t)rx_queue_id >= 266899a2dd95SBruce Richardson rte_eth_devices[eth_dev_id].data->nb_rx_queues) { 266999a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, 267099a2dd95SBruce Richardson (uint16_t)rx_queue_id); 267199a2dd95SBruce Richardson return -EINVAL; 267299a2dd95SBruce Richardson } 267399a2dd95SBruce Richardson 2674b06bca69SNaga Harish K S V if ((rx_adapter->use_queue_event_buf && 2675b06bca69SNaga Harish K S V queue_conf->event_buf_size == 0) || 2676b06bca69SNaga Harish K S V (!rx_adapter->use_queue_event_buf && 2677b06bca69SNaga Harish K S V queue_conf->event_buf_size != 0)) { 2678b06bca69SNaga Harish K S V RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue"); 2679b06bca69SNaga Harish K S V return -EINVAL; 2680b06bca69SNaga Harish K S V } 2681b06bca69SNaga Harish K S V 268299a2dd95SBruce Richardson dev_info = &rx_adapter->eth_devices[eth_dev_id]; 268399a2dd95SBruce Richardson 268499a2dd95SBruce Richardson if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { 26858f1d23ecSDavid Marchand if (*dev->dev_ops->eth_rx_adapter_queue_add == NULL) 26868f1d23ecSDavid Marchand return -ENOTSUP; 268799a2dd95SBruce Richardson if (dev_info->rx_queue == NULL) { 268899a2dd95SBruce Richardson dev_info->rx_queue = 268999a2dd95SBruce Richardson rte_zmalloc_socket(rx_adapter->mem_name, 269099a2dd95SBruce Richardson dev_info->dev->data->nb_rx_queues * 269199a2dd95SBruce Richardson sizeof(struct eth_rx_queue_info), 0, 269299a2dd95SBruce Richardson rx_adapter->socket_id); 269399a2dd95SBruce Richardson if (dev_info->rx_queue == NULL) 269499a2dd95SBruce Richardson return -ENOMEM; 269599a2dd95SBruce Richardson } 269699a2dd95SBruce Richardson 269799a2dd95SBruce Richardson ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev, 269899a2dd95SBruce Richardson &rte_eth_devices[eth_dev_id], 269999a2dd95SBruce Richardson rx_queue_id, queue_conf); 270099a2dd95SBruce Richardson if (ret == 0) { 270199a2dd95SBruce Richardson dev_info->internal_event_port = 1; 270299a2dd95SBruce Richardson rxa_update_queue(rx_adapter, 270399a2dd95SBruce Richardson &rx_adapter->eth_devices[eth_dev_id], 270499a2dd95SBruce Richardson rx_queue_id, 270599a2dd95SBruce Richardson 1); 270699a2dd95SBruce Richardson } 270799a2dd95SBruce Richardson } else { 270899a2dd95SBruce Richardson rte_spinlock_lock(&rx_adapter->rx_lock); 270999a2dd95SBruce Richardson dev_info->internal_event_port = 0; 271099a2dd95SBruce Richardson ret = rxa_init_service(rx_adapter, id); 271199a2dd95SBruce Richardson if (ret == 0) { 271299a2dd95SBruce Richardson uint32_t service_id = rx_adapter->service_id; 271399a2dd95SBruce Richardson ret = rxa_sw_add(rx_adapter, eth_dev_id, rx_queue_id, 271499a2dd95SBruce Richardson queue_conf); 271599a2dd95SBruce Richardson rte_service_component_runstate_set(service_id, 271699a2dd95SBruce Richardson rxa_sw_adapter_queue_count(rx_adapter)); 271799a2dd95SBruce Richardson } 271899a2dd95SBruce Richardson rte_spinlock_unlock(&rx_adapter->rx_lock); 271999a2dd95SBruce Richardson } 272099a2dd95SBruce Richardson 272199a2dd95SBruce Richardson rte_eventdev_trace_eth_rx_adapter_queue_add(id, eth_dev_id, 272299a2dd95SBruce Richardson rx_queue_id, queue_conf, ret); 272399a2dd95SBruce Richardson if (ret) 272499a2dd95SBruce Richardson return ret; 272599a2dd95SBruce Richardson 272699a2dd95SBruce Richardson return 0; 272799a2dd95SBruce Richardson } 272899a2dd95SBruce Richardson 272999a2dd95SBruce Richardson static int 273099a2dd95SBruce Richardson rxa_sw_vector_limits(struct rte_event_eth_rx_adapter_vector_limits *limits) 273199a2dd95SBruce Richardson { 273299a2dd95SBruce Richardson limits->max_sz = MAX_VECTOR_SIZE; 273399a2dd95SBruce Richardson limits->min_sz = MIN_VECTOR_SIZE; 273499a2dd95SBruce Richardson limits->max_timeout_ns = MAX_VECTOR_NS; 273599a2dd95SBruce Richardson limits->min_timeout_ns = MIN_VECTOR_NS; 273699a2dd95SBruce Richardson 273799a2dd95SBruce Richardson return 0; 273899a2dd95SBruce Richardson } 273999a2dd95SBruce Richardson 274099a2dd95SBruce Richardson int 274199a2dd95SBruce Richardson rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id, 274299a2dd95SBruce Richardson int32_t rx_queue_id) 274399a2dd95SBruce Richardson { 274499a2dd95SBruce Richardson int ret = 0; 274599a2dd95SBruce Richardson struct rte_eventdev *dev; 2746a256a743SPavan Nikhilesh struct event_eth_rx_adapter *rx_adapter; 274799a2dd95SBruce Richardson struct eth_device_info *dev_info; 274899a2dd95SBruce Richardson uint32_t cap; 274999a2dd95SBruce Richardson uint32_t nb_rx_poll = 0; 275099a2dd95SBruce Richardson uint32_t nb_wrr = 0; 275199a2dd95SBruce Richardson uint32_t nb_rx_intr; 275299a2dd95SBruce Richardson struct eth_rx_poll_entry *rx_poll = NULL; 275399a2dd95SBruce Richardson uint32_t *rx_wrr = NULL; 275499a2dd95SBruce Richardson int num_intr_vec; 275599a2dd95SBruce Richardson 2756a1793ee8SGanapati Kundapura if (rxa_memzone_lookup()) 2757a1793ee8SGanapati Kundapura return -ENOMEM; 2758a1793ee8SGanapati Kundapura 275999a2dd95SBruce Richardson RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 276099a2dd95SBruce Richardson RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL); 276199a2dd95SBruce Richardson 276299a2dd95SBruce Richardson rx_adapter = rxa_id_to_adapter(id); 276399a2dd95SBruce Richardson if (rx_adapter == NULL) 276499a2dd95SBruce Richardson return -EINVAL; 276599a2dd95SBruce Richardson 276699a2dd95SBruce Richardson dev = &rte_eventdevs[rx_adapter->eventdev_id]; 276799a2dd95SBruce Richardson ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id, 276899a2dd95SBruce Richardson eth_dev_id, 276999a2dd95SBruce Richardson &cap); 277099a2dd95SBruce Richardson if (ret) 277199a2dd95SBruce Richardson return ret; 277299a2dd95SBruce Richardson 277399a2dd95SBruce Richardson if (rx_queue_id != -1 && (uint16_t)rx_queue_id >= 277499a2dd95SBruce Richardson rte_eth_devices[eth_dev_id].data->nb_rx_queues) { 277599a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, 277699a2dd95SBruce Richardson (uint16_t)rx_queue_id); 277799a2dd95SBruce Richardson return -EINVAL; 277899a2dd95SBruce Richardson } 277999a2dd95SBruce Richardson 278099a2dd95SBruce Richardson dev_info = &rx_adapter->eth_devices[eth_dev_id]; 278199a2dd95SBruce Richardson 278299a2dd95SBruce Richardson if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { 27838f1d23ecSDavid Marchand if (*dev->dev_ops->eth_rx_adapter_queue_del == NULL) 27848f1d23ecSDavid Marchand return -ENOTSUP; 278599a2dd95SBruce Richardson ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev, 278699a2dd95SBruce Richardson &rte_eth_devices[eth_dev_id], 278799a2dd95SBruce Richardson rx_queue_id); 278899a2dd95SBruce Richardson if (ret == 0) { 278999a2dd95SBruce Richardson rxa_update_queue(rx_adapter, 279099a2dd95SBruce Richardson &rx_adapter->eth_devices[eth_dev_id], 279199a2dd95SBruce Richardson rx_queue_id, 279299a2dd95SBruce Richardson 0); 279399a2dd95SBruce Richardson if (dev_info->nb_dev_queues == 0) { 279499a2dd95SBruce Richardson rte_free(dev_info->rx_queue); 279599a2dd95SBruce Richardson dev_info->rx_queue = NULL; 279699a2dd95SBruce Richardson } 279799a2dd95SBruce Richardson } 279899a2dd95SBruce Richardson } else { 279999a2dd95SBruce Richardson rxa_calc_nb_post_del(rx_adapter, dev_info, rx_queue_id, 280099a2dd95SBruce Richardson &nb_rx_poll, &nb_rx_intr, &nb_wrr); 280199a2dd95SBruce Richardson 280299a2dd95SBruce Richardson ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr, 280399a2dd95SBruce Richardson &rx_poll, &rx_wrr); 280499a2dd95SBruce Richardson if (ret) 280599a2dd95SBruce Richardson return ret; 280699a2dd95SBruce Richardson 280799a2dd95SBruce Richardson rte_spinlock_lock(&rx_adapter->rx_lock); 280899a2dd95SBruce Richardson 280999a2dd95SBruce Richardson num_intr_vec = 0; 281099a2dd95SBruce Richardson if (rx_adapter->num_rx_intr > nb_rx_intr) { 281199a2dd95SBruce Richardson 281299a2dd95SBruce Richardson num_intr_vec = rxa_nb_intr_vect(dev_info, 281399a2dd95SBruce Richardson rx_queue_id, 0); 281499a2dd95SBruce Richardson ret = rxa_del_intr_queue(rx_adapter, dev_info, 281599a2dd95SBruce Richardson rx_queue_id); 281699a2dd95SBruce Richardson if (ret) 281799a2dd95SBruce Richardson goto unlock_ret; 281899a2dd95SBruce Richardson } 281999a2dd95SBruce Richardson 282099a2dd95SBruce Richardson if (nb_rx_intr == 0) { 282199a2dd95SBruce Richardson ret = rxa_free_intr_resources(rx_adapter); 282299a2dd95SBruce Richardson if (ret) 282399a2dd95SBruce Richardson goto unlock_ret; 282499a2dd95SBruce Richardson } 282599a2dd95SBruce Richardson 282699a2dd95SBruce Richardson rxa_sw_del(rx_adapter, dev_info, rx_queue_id); 282799a2dd95SBruce Richardson rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); 282899a2dd95SBruce Richardson 282999a2dd95SBruce Richardson rte_free(rx_adapter->eth_rx_poll); 283099a2dd95SBruce Richardson rte_free(rx_adapter->wrr_sched); 283199a2dd95SBruce Richardson 283299a2dd95SBruce Richardson if (nb_rx_intr == 0) { 283399a2dd95SBruce Richardson rte_free(dev_info->intr_queue); 283499a2dd95SBruce Richardson dev_info->intr_queue = NULL; 283599a2dd95SBruce Richardson } 283699a2dd95SBruce Richardson 283799a2dd95SBruce Richardson rx_adapter->eth_rx_poll = rx_poll; 283899a2dd95SBruce Richardson rx_adapter->wrr_sched = rx_wrr; 283999a2dd95SBruce Richardson rx_adapter->wrr_len = nb_wrr; 284081da8a5fSNaga Harish K S V /* 284181da8a5fSNaga Harish K S V * reset next poll start position (wrr_pos) to avoid buffer 284281da8a5fSNaga Harish K S V * overrun when wrr_len is reduced in case of queue delete 284381da8a5fSNaga Harish K S V */ 284481da8a5fSNaga Harish K S V rx_adapter->wrr_pos = 0; 284599a2dd95SBruce Richardson rx_adapter->num_intr_vec += num_intr_vec; 284699a2dd95SBruce Richardson 284799a2dd95SBruce Richardson if (dev_info->nb_dev_queues == 0) { 284899a2dd95SBruce Richardson rte_free(dev_info->rx_queue); 284999a2dd95SBruce Richardson dev_info->rx_queue = NULL; 285099a2dd95SBruce Richardson } 285199a2dd95SBruce Richardson unlock_ret: 285299a2dd95SBruce Richardson rte_spinlock_unlock(&rx_adapter->rx_lock); 285399a2dd95SBruce Richardson if (ret) { 285499a2dd95SBruce Richardson rte_free(rx_poll); 285599a2dd95SBruce Richardson rte_free(rx_wrr); 285699a2dd95SBruce Richardson return ret; 285799a2dd95SBruce Richardson } 285899a2dd95SBruce Richardson 285999a2dd95SBruce Richardson rte_service_component_runstate_set(rx_adapter->service_id, 286099a2dd95SBruce Richardson rxa_sw_adapter_queue_count(rx_adapter)); 286199a2dd95SBruce Richardson } 286299a2dd95SBruce Richardson 286399a2dd95SBruce Richardson rte_eventdev_trace_eth_rx_adapter_queue_del(id, eth_dev_id, 286499a2dd95SBruce Richardson rx_queue_id, ret); 2865a1793ee8SGanapati Kundapura 286699a2dd95SBruce Richardson return ret; 286799a2dd95SBruce Richardson } 286899a2dd95SBruce Richardson 286999a2dd95SBruce Richardson int 287099a2dd95SBruce Richardson rte_event_eth_rx_adapter_vector_limits_get( 287199a2dd95SBruce Richardson uint8_t dev_id, uint16_t eth_port_id, 287299a2dd95SBruce Richardson struct rte_event_eth_rx_adapter_vector_limits *limits) 287399a2dd95SBruce Richardson { 287499a2dd95SBruce Richardson struct rte_eventdev *dev; 287599a2dd95SBruce Richardson uint32_t cap; 287699a2dd95SBruce Richardson int ret; 287799a2dd95SBruce Richardson 287899a2dd95SBruce Richardson RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); 287999a2dd95SBruce Richardson RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL); 288099a2dd95SBruce Richardson 288199a2dd95SBruce Richardson if (limits == NULL) 288299a2dd95SBruce Richardson return -EINVAL; 288399a2dd95SBruce Richardson 288499a2dd95SBruce Richardson dev = &rte_eventdevs[dev_id]; 288599a2dd95SBruce Richardson 288699a2dd95SBruce Richardson ret = rte_event_eth_rx_adapter_caps_get(dev_id, eth_port_id, &cap); 288799a2dd95SBruce Richardson if (ret) { 288899a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8 288999a2dd95SBruce Richardson "eth port %" PRIu16, 289099a2dd95SBruce Richardson dev_id, eth_port_id); 289199a2dd95SBruce Richardson return ret; 289299a2dd95SBruce Richardson } 289399a2dd95SBruce Richardson 289499a2dd95SBruce Richardson if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { 28958f1d23ecSDavid Marchand if (*dev->dev_ops->eth_rx_adapter_vector_limits_get == NULL) 28968f1d23ecSDavid Marchand return -ENOTSUP; 289799a2dd95SBruce Richardson ret = dev->dev_ops->eth_rx_adapter_vector_limits_get( 289899a2dd95SBruce Richardson dev, &rte_eth_devices[eth_port_id], limits); 289999a2dd95SBruce Richardson } else { 290099a2dd95SBruce Richardson ret = rxa_sw_vector_limits(limits); 290199a2dd95SBruce Richardson } 290299a2dd95SBruce Richardson 290399a2dd95SBruce Richardson return ret; 290499a2dd95SBruce Richardson } 290599a2dd95SBruce Richardson 290699a2dd95SBruce Richardson int 290799a2dd95SBruce Richardson rte_event_eth_rx_adapter_start(uint8_t id) 290899a2dd95SBruce Richardson { 290999a2dd95SBruce Richardson rte_eventdev_trace_eth_rx_adapter_start(id); 291099a2dd95SBruce Richardson return rxa_ctrl(id, 1); 291199a2dd95SBruce Richardson } 291299a2dd95SBruce Richardson 291399a2dd95SBruce Richardson int 291499a2dd95SBruce Richardson rte_event_eth_rx_adapter_stop(uint8_t id) 291599a2dd95SBruce Richardson { 291699a2dd95SBruce Richardson rte_eventdev_trace_eth_rx_adapter_stop(id); 291799a2dd95SBruce Richardson return rxa_ctrl(id, 0); 291899a2dd95SBruce Richardson } 291999a2dd95SBruce Richardson 2920995b150cSNaga Harish K S V static inline void 2921995b150cSNaga Harish K S V rxa_queue_stats_reset(struct eth_rx_queue_info *queue_info) 2922995b150cSNaga Harish K S V { 2923995b150cSNaga Harish K S V struct rte_event_eth_rx_adapter_stats *q_stats; 2924995b150cSNaga Harish K S V 2925995b150cSNaga Harish K S V q_stats = queue_info->stats; 2926995b150cSNaga Harish K S V memset(q_stats, 0, sizeof(*q_stats)); 2927995b150cSNaga Harish K S V } 2928995b150cSNaga Harish K S V 292999a2dd95SBruce Richardson int 293099a2dd95SBruce Richardson rte_event_eth_rx_adapter_stats_get(uint8_t id, 293199a2dd95SBruce Richardson struct rte_event_eth_rx_adapter_stats *stats) 293299a2dd95SBruce Richardson { 2933a256a743SPavan Nikhilesh struct event_eth_rx_adapter *rx_adapter; 2934a256a743SPavan Nikhilesh struct eth_event_enqueue_buffer *buf; 293599a2dd95SBruce Richardson struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 }; 293699a2dd95SBruce Richardson struct rte_event_eth_rx_adapter_stats dev_stats; 293799a2dd95SBruce Richardson struct rte_eventdev *dev; 293899a2dd95SBruce Richardson struct eth_device_info *dev_info; 2939995b150cSNaga Harish K S V struct eth_rx_queue_info *queue_info; 2940995b150cSNaga Harish K S V struct rte_event_eth_rx_adapter_stats *q_stats; 2941995b150cSNaga Harish K S V uint32_t i, j; 294299a2dd95SBruce Richardson int ret; 294399a2dd95SBruce Richardson 2944da781e64SGanapati Kundapura if (rxa_memzone_lookup()) 2945da781e64SGanapati Kundapura return -ENOMEM; 2946da781e64SGanapati Kundapura 294799a2dd95SBruce Richardson RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 294899a2dd95SBruce Richardson 294999a2dd95SBruce Richardson rx_adapter = rxa_id_to_adapter(id); 295099a2dd95SBruce Richardson if (rx_adapter == NULL || stats == NULL) 295199a2dd95SBruce Richardson return -EINVAL; 295299a2dd95SBruce Richardson 295399a2dd95SBruce Richardson dev = &rte_eventdevs[rx_adapter->eventdev_id]; 295499a2dd95SBruce Richardson memset(stats, 0, sizeof(*stats)); 2955995b150cSNaga Harish K S V 2956995b150cSNaga Harish K S V if (rx_adapter->service_inited) 2957995b150cSNaga Harish K S V *stats = rx_adapter->stats; 2958995b150cSNaga Harish K S V 295999a2dd95SBruce Richardson RTE_ETH_FOREACH_DEV(i) { 296099a2dd95SBruce Richardson dev_info = &rx_adapter->eth_devices[i]; 2961995b150cSNaga Harish K S V 2962995b150cSNaga Harish K S V if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) { 2963995b150cSNaga Harish K S V 2964995b150cSNaga Harish K S V for (j = 0; j < dev_info->dev->data->nb_rx_queues; 2965995b150cSNaga Harish K S V j++) { 2966995b150cSNaga Harish K S V queue_info = &dev_info->rx_queue[j]; 2967995b150cSNaga Harish K S V if (!queue_info->queue_enabled) 2968995b150cSNaga Harish K S V continue; 2969995b150cSNaga Harish K S V q_stats = queue_info->stats; 2970995b150cSNaga Harish K S V 2971995b150cSNaga Harish K S V stats->rx_packets += q_stats->rx_packets; 2972995b150cSNaga Harish K S V stats->rx_poll_count += q_stats->rx_poll_count; 2973995b150cSNaga Harish K S V stats->rx_enq_count += q_stats->rx_enq_count; 2974995b150cSNaga Harish K S V stats->rx_enq_retry += q_stats->rx_enq_retry; 2975995b150cSNaga Harish K S V stats->rx_dropped += q_stats->rx_dropped; 2976995b150cSNaga Harish K S V stats->rx_enq_block_cycles += 2977995b150cSNaga Harish K S V q_stats->rx_enq_block_cycles; 2978995b150cSNaga Harish K S V } 2979995b150cSNaga Harish K S V } 2980995b150cSNaga Harish K S V 298199a2dd95SBruce Richardson if (dev_info->internal_event_port == 0 || 298299a2dd95SBruce Richardson dev->dev_ops->eth_rx_adapter_stats_get == NULL) 298399a2dd95SBruce Richardson continue; 298499a2dd95SBruce Richardson ret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev, 298599a2dd95SBruce Richardson &rte_eth_devices[i], 298699a2dd95SBruce Richardson &dev_stats); 298799a2dd95SBruce Richardson if (ret) 298899a2dd95SBruce Richardson continue; 298999a2dd95SBruce Richardson dev_stats_sum.rx_packets += dev_stats.rx_packets; 299099a2dd95SBruce Richardson dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count; 299199a2dd95SBruce Richardson } 299299a2dd95SBruce Richardson 2993995b150cSNaga Harish K S V buf = &rx_adapter->event_enqueue_buffer; 299499a2dd95SBruce Richardson stats->rx_packets += dev_stats_sum.rx_packets; 299599a2dd95SBruce Richardson stats->rx_enq_count += dev_stats_sum.rx_enq_count; 2996814d0170SGanapati Kundapura stats->rx_event_buf_count = buf->count; 2997814d0170SGanapati Kundapura stats->rx_event_buf_size = buf->events_size; 2998995b150cSNaga Harish K S V 2999995b150cSNaga Harish K S V return 0; 3000995b150cSNaga Harish K S V } 3001995b150cSNaga Harish K S V 3002995b150cSNaga Harish K S V int 3003995b150cSNaga Harish K S V rte_event_eth_rx_adapter_queue_stats_get(uint8_t id, 3004995b150cSNaga Harish K S V uint16_t eth_dev_id, 3005995b150cSNaga Harish K S V uint16_t rx_queue_id, 3006995b150cSNaga Harish K S V struct rte_event_eth_rx_adapter_queue_stats *stats) 3007995b150cSNaga Harish K S V { 3008995b150cSNaga Harish K S V struct event_eth_rx_adapter *rx_adapter; 3009995b150cSNaga Harish K S V struct eth_device_info *dev_info; 3010995b150cSNaga Harish K S V struct eth_rx_queue_info *queue_info; 3011995b150cSNaga Harish K S V struct eth_event_enqueue_buffer *event_buf; 3012995b150cSNaga Harish K S V struct rte_event_eth_rx_adapter_stats *q_stats; 3013995b150cSNaga Harish K S V struct rte_eventdev *dev; 3014995b150cSNaga Harish K S V 3015995b150cSNaga Harish K S V if (rxa_memzone_lookup()) 3016995b150cSNaga Harish K S V return -ENOMEM; 3017995b150cSNaga Harish K S V 3018995b150cSNaga Harish K S V RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 3019995b150cSNaga Harish K S V RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL); 3020995b150cSNaga Harish K S V 3021995b150cSNaga Harish K S V rx_adapter = rxa_id_to_adapter(id); 3022995b150cSNaga Harish K S V 3023995b150cSNaga Harish K S V if (rx_adapter == NULL || stats == NULL) 3024995b150cSNaga Harish K S V return -EINVAL; 3025995b150cSNaga Harish K S V 3026995b150cSNaga Harish K S V if (!rx_adapter->use_queue_event_buf) 3027995b150cSNaga Harish K S V return -EINVAL; 3028995b150cSNaga Harish K S V 3029995b150cSNaga Harish K S V if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) { 3030995b150cSNaga Harish K S V RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id); 3031995b150cSNaga Harish K S V return -EINVAL; 3032995b150cSNaga Harish K S V } 3033995b150cSNaga Harish K S V 3034995b150cSNaga Harish K S V dev_info = &rx_adapter->eth_devices[eth_dev_id]; 3035995b150cSNaga Harish K S V if (dev_info->rx_queue == NULL || 3036995b150cSNaga Harish K S V !dev_info->rx_queue[rx_queue_id].queue_enabled) { 3037995b150cSNaga Harish K S V RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id); 3038995b150cSNaga Harish K S V return -EINVAL; 3039995b150cSNaga Harish K S V } 3040995b150cSNaga Harish K S V 3041de3c3a2fSPavan Nikhilesh if (dev_info->internal_event_port == 0) { 3042995b150cSNaga Harish K S V queue_info = &dev_info->rx_queue[rx_queue_id]; 3043995b150cSNaga Harish K S V event_buf = queue_info->event_buf; 3044995b150cSNaga Harish K S V q_stats = queue_info->stats; 3045995b150cSNaga Harish K S V 3046995b150cSNaga Harish K S V stats->rx_event_buf_count = event_buf->count; 3047995b150cSNaga Harish K S V stats->rx_event_buf_size = event_buf->events_size; 3048995b150cSNaga Harish K S V stats->rx_packets = q_stats->rx_packets; 3049995b150cSNaga Harish K S V stats->rx_poll_count = q_stats->rx_poll_count; 3050995b150cSNaga Harish K S V stats->rx_dropped = q_stats->rx_dropped; 3051de3c3a2fSPavan Nikhilesh } 3052995b150cSNaga Harish K S V 3053995b150cSNaga Harish K S V dev = &rte_eventdevs[rx_adapter->eventdev_id]; 3054995b150cSNaga Harish K S V if (dev->dev_ops->eth_rx_adapter_queue_stats_get != NULL) { 3055995b150cSNaga Harish K S V return (*dev->dev_ops->eth_rx_adapter_queue_stats_get)(dev, 3056995b150cSNaga Harish K S V &rte_eth_devices[eth_dev_id], 3057995b150cSNaga Harish K S V rx_queue_id, stats); 3058814d0170SGanapati Kundapura } 3059814d0170SGanapati Kundapura 306099a2dd95SBruce Richardson return 0; 306199a2dd95SBruce Richardson } 306299a2dd95SBruce Richardson 306399a2dd95SBruce Richardson int 306499a2dd95SBruce Richardson rte_event_eth_rx_adapter_stats_reset(uint8_t id) 306599a2dd95SBruce Richardson { 3066a256a743SPavan Nikhilesh struct event_eth_rx_adapter *rx_adapter; 306799a2dd95SBruce Richardson struct rte_eventdev *dev; 306899a2dd95SBruce Richardson struct eth_device_info *dev_info; 3069995b150cSNaga Harish K S V struct eth_rx_queue_info *queue_info; 3070995b150cSNaga Harish K S V uint32_t i, j; 307199a2dd95SBruce Richardson 3072da781e64SGanapati Kundapura if (rxa_memzone_lookup()) 3073da781e64SGanapati Kundapura return -ENOMEM; 3074da781e64SGanapati Kundapura 307599a2dd95SBruce Richardson RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 307699a2dd95SBruce Richardson 307799a2dd95SBruce Richardson rx_adapter = rxa_id_to_adapter(id); 307899a2dd95SBruce Richardson if (rx_adapter == NULL) 307999a2dd95SBruce Richardson return -EINVAL; 308099a2dd95SBruce Richardson 308199a2dd95SBruce Richardson dev = &rte_eventdevs[rx_adapter->eventdev_id]; 3082995b150cSNaga Harish K S V 308399a2dd95SBruce Richardson RTE_ETH_FOREACH_DEV(i) { 308499a2dd95SBruce Richardson dev_info = &rx_adapter->eth_devices[i]; 3085995b150cSNaga Harish K S V 3086995b150cSNaga Harish K S V if (rx_adapter->use_queue_event_buf && dev_info->rx_queue) { 3087995b150cSNaga Harish K S V 3088995b150cSNaga Harish K S V for (j = 0; j < dev_info->dev->data->nb_rx_queues; 3089995b150cSNaga Harish K S V j++) { 3090995b150cSNaga Harish K S V queue_info = &dev_info->rx_queue[j]; 3091995b150cSNaga Harish K S V if (!queue_info->queue_enabled) 3092995b150cSNaga Harish K S V continue; 3093995b150cSNaga Harish K S V rxa_queue_stats_reset(queue_info); 3094995b150cSNaga Harish K S V } 3095995b150cSNaga Harish K S V } 3096995b150cSNaga Harish K S V 309799a2dd95SBruce Richardson if (dev_info->internal_event_port == 0 || 309899a2dd95SBruce Richardson dev->dev_ops->eth_rx_adapter_stats_reset == NULL) 309999a2dd95SBruce Richardson continue; 310099a2dd95SBruce Richardson (*dev->dev_ops->eth_rx_adapter_stats_reset)(dev, 310199a2dd95SBruce Richardson &rte_eth_devices[i]); 310299a2dd95SBruce Richardson } 310399a2dd95SBruce Richardson 310499a2dd95SBruce Richardson memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats)); 3105995b150cSNaga Harish K S V 3106995b150cSNaga Harish K S V return 0; 3107995b150cSNaga Harish K S V } 3108995b150cSNaga Harish K S V 3109995b150cSNaga Harish K S V int 3110995b150cSNaga Harish K S V rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id, 3111995b150cSNaga Harish K S V uint16_t eth_dev_id, 3112995b150cSNaga Harish K S V uint16_t rx_queue_id) 3113995b150cSNaga Harish K S V { 3114995b150cSNaga Harish K S V struct event_eth_rx_adapter *rx_adapter; 3115995b150cSNaga Harish K S V struct eth_device_info *dev_info; 3116995b150cSNaga Harish K S V struct eth_rx_queue_info *queue_info; 3117995b150cSNaga Harish K S V struct rte_eventdev *dev; 3118995b150cSNaga Harish K S V 3119995b150cSNaga Harish K S V if (rxa_memzone_lookup()) 3120995b150cSNaga Harish K S V return -ENOMEM; 3121995b150cSNaga Harish K S V 3122995b150cSNaga Harish K S V RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 3123995b150cSNaga Harish K S V RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL); 3124995b150cSNaga Harish K S V 3125995b150cSNaga Harish K S V rx_adapter = rxa_id_to_adapter(id); 3126995b150cSNaga Harish K S V if (rx_adapter == NULL) 3127995b150cSNaga Harish K S V return -EINVAL; 3128995b150cSNaga Harish K S V 3129995b150cSNaga Harish K S V if (!rx_adapter->use_queue_event_buf) 3130995b150cSNaga Harish K S V return -EINVAL; 3131995b150cSNaga Harish K S V 3132995b150cSNaga Harish K S V if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) { 3133995b150cSNaga Harish K S V RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16, rx_queue_id); 3134995b150cSNaga Harish K S V return -EINVAL; 3135995b150cSNaga Harish K S V } 3136995b150cSNaga Harish K S V 3137995b150cSNaga Harish K S V dev_info = &rx_adapter->eth_devices[eth_dev_id]; 3138995b150cSNaga Harish K S V 3139995b150cSNaga Harish K S V if (dev_info->rx_queue == NULL || 3140995b150cSNaga Harish K S V !dev_info->rx_queue[rx_queue_id].queue_enabled) { 3141995b150cSNaga Harish K S V RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id); 3142995b150cSNaga Harish K S V return -EINVAL; 3143995b150cSNaga Harish K S V } 3144995b150cSNaga Harish K S V 3145de3c3a2fSPavan Nikhilesh if (dev_info->internal_event_port == 0) { 3146995b150cSNaga Harish K S V queue_info = &dev_info->rx_queue[rx_queue_id]; 3147995b150cSNaga Harish K S V rxa_queue_stats_reset(queue_info); 3148de3c3a2fSPavan Nikhilesh } 3149995b150cSNaga Harish K S V 3150995b150cSNaga Harish K S V dev = &rte_eventdevs[rx_adapter->eventdev_id]; 3151995b150cSNaga Harish K S V if (dev->dev_ops->eth_rx_adapter_queue_stats_reset != NULL) { 3152995b150cSNaga Harish K S V return (*dev->dev_ops->eth_rx_adapter_queue_stats_reset)(dev, 3153995b150cSNaga Harish K S V &rte_eth_devices[eth_dev_id], 3154995b150cSNaga Harish K S V rx_queue_id); 3155995b150cSNaga Harish K S V } 3156995b150cSNaga Harish K S V 315799a2dd95SBruce Richardson return 0; 315899a2dd95SBruce Richardson } 315999a2dd95SBruce Richardson 316099a2dd95SBruce Richardson int 316199a2dd95SBruce Richardson rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id) 316299a2dd95SBruce Richardson { 3163a256a743SPavan Nikhilesh struct event_eth_rx_adapter *rx_adapter; 316499a2dd95SBruce Richardson 3165da781e64SGanapati Kundapura if (rxa_memzone_lookup()) 3166da781e64SGanapati Kundapura return -ENOMEM; 3167da781e64SGanapati Kundapura 316899a2dd95SBruce Richardson RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 316999a2dd95SBruce Richardson 317099a2dd95SBruce Richardson rx_adapter = rxa_id_to_adapter(id); 317199a2dd95SBruce Richardson if (rx_adapter == NULL || service_id == NULL) 317299a2dd95SBruce Richardson return -EINVAL; 317399a2dd95SBruce Richardson 317499a2dd95SBruce Richardson if (rx_adapter->service_inited) 317599a2dd95SBruce Richardson *service_id = rx_adapter->service_id; 317699a2dd95SBruce Richardson 317799a2dd95SBruce Richardson return rx_adapter->service_inited ? 0 : -ESRCH; 317899a2dd95SBruce Richardson } 317999a2dd95SBruce Richardson 318099a2dd95SBruce Richardson int 31816ff23631SNaga Harish K S V rte_event_eth_rx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id) 31826ff23631SNaga Harish K S V { 31836ff23631SNaga Harish K S V struct event_eth_rx_adapter *rx_adapter; 31846ff23631SNaga Harish K S V 31856ff23631SNaga Harish K S V if (rxa_memzone_lookup()) 31866ff23631SNaga Harish K S V return -ENOMEM; 31876ff23631SNaga Harish K S V 31886ff23631SNaga Harish K S V RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 31896ff23631SNaga Harish K S V 31906ff23631SNaga Harish K S V rx_adapter = rxa_id_to_adapter(id); 31916ff23631SNaga Harish K S V if (rx_adapter == NULL || event_port_id == NULL) 31926ff23631SNaga Harish K S V return -EINVAL; 31936ff23631SNaga Harish K S V 31946ff23631SNaga Harish K S V if (rx_adapter->service_inited) 31956ff23631SNaga Harish K S V *event_port_id = rx_adapter->event_port_id; 31966ff23631SNaga Harish K S V 31976ff23631SNaga Harish K S V return rx_adapter->service_inited ? 0 : -ESRCH; 31986ff23631SNaga Harish K S V } 31996ff23631SNaga Harish K S V 32006ff23631SNaga Harish K S V int 320199a2dd95SBruce Richardson rte_event_eth_rx_adapter_cb_register(uint8_t id, 320299a2dd95SBruce Richardson uint16_t eth_dev_id, 320399a2dd95SBruce Richardson rte_event_eth_rx_adapter_cb_fn cb_fn, 320499a2dd95SBruce Richardson void *cb_arg) 320599a2dd95SBruce Richardson { 3206a256a743SPavan Nikhilesh struct event_eth_rx_adapter *rx_adapter; 320799a2dd95SBruce Richardson struct eth_device_info *dev_info; 320899a2dd95SBruce Richardson uint32_t cap; 320999a2dd95SBruce Richardson int ret; 321099a2dd95SBruce Richardson 321199a2dd95SBruce Richardson RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 321299a2dd95SBruce Richardson RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL); 321399a2dd95SBruce Richardson 321499a2dd95SBruce Richardson rx_adapter = rxa_id_to_adapter(id); 321599a2dd95SBruce Richardson if (rx_adapter == NULL) 321699a2dd95SBruce Richardson return -EINVAL; 321799a2dd95SBruce Richardson 321899a2dd95SBruce Richardson dev_info = &rx_adapter->eth_devices[eth_dev_id]; 321999a2dd95SBruce Richardson if (dev_info->rx_queue == NULL) 322099a2dd95SBruce Richardson return -EINVAL; 322199a2dd95SBruce Richardson 322299a2dd95SBruce Richardson ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id, 322399a2dd95SBruce Richardson eth_dev_id, 322499a2dd95SBruce Richardson &cap); 322599a2dd95SBruce Richardson if (ret) { 322699a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8 322799a2dd95SBruce Richardson "eth port %" PRIu16, id, eth_dev_id); 322899a2dd95SBruce Richardson return ret; 322999a2dd95SBruce Richardson } 323099a2dd95SBruce Richardson 323199a2dd95SBruce Richardson if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { 323299a2dd95SBruce Richardson RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %" 323399a2dd95SBruce Richardson PRIu16, eth_dev_id); 323499a2dd95SBruce Richardson return -EINVAL; 323599a2dd95SBruce Richardson } 323699a2dd95SBruce Richardson 323799a2dd95SBruce Richardson rte_spinlock_lock(&rx_adapter->rx_lock); 323899a2dd95SBruce Richardson dev_info->cb_fn = cb_fn; 323999a2dd95SBruce Richardson dev_info->cb_arg = cb_arg; 324099a2dd95SBruce Richardson rte_spinlock_unlock(&rx_adapter->rx_lock); 324199a2dd95SBruce Richardson 324299a2dd95SBruce Richardson return 0; 324399a2dd95SBruce Richardson } 3244da781e64SGanapati Kundapura 3245da781e64SGanapati Kundapura int 3246da781e64SGanapati Kundapura rte_event_eth_rx_adapter_queue_conf_get(uint8_t id, 3247da781e64SGanapati Kundapura uint16_t eth_dev_id, 3248da781e64SGanapati Kundapura uint16_t rx_queue_id, 3249da781e64SGanapati Kundapura struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 3250da781e64SGanapati Kundapura { 325195138712SNaga Harish K S V #define TICK2NSEC(_ticks, _freq) (((_ticks) * (1E9)) / (_freq)) 3252da781e64SGanapati Kundapura struct rte_eventdev *dev; 3253a256a743SPavan Nikhilesh struct event_eth_rx_adapter *rx_adapter; 3254da781e64SGanapati Kundapura struct eth_device_info *dev_info; 3255da781e64SGanapati Kundapura struct eth_rx_queue_info *queue_info; 3256da781e64SGanapati Kundapura int ret; 3257da781e64SGanapati Kundapura 3258da781e64SGanapati Kundapura if (rxa_memzone_lookup()) 3259da781e64SGanapati Kundapura return -ENOMEM; 3260da781e64SGanapati Kundapura 3261da781e64SGanapati Kundapura RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); 3262da781e64SGanapati Kundapura RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL); 3263da781e64SGanapati Kundapura 3264da781e64SGanapati Kundapura if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) { 3265da781e64SGanapati Kundapura RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id); 3266da781e64SGanapati Kundapura return -EINVAL; 3267da781e64SGanapati Kundapura } 3268da781e64SGanapati Kundapura 3269da781e64SGanapati Kundapura if (queue_conf == NULL) { 3270da781e64SGanapati Kundapura RTE_EDEV_LOG_ERR("Rx queue conf struct cannot be NULL"); 3271da781e64SGanapati Kundapura return -EINVAL; 3272da781e64SGanapati Kundapura } 3273da781e64SGanapati Kundapura 3274da781e64SGanapati Kundapura rx_adapter = rxa_id_to_adapter(id); 3275da781e64SGanapati Kundapura if (rx_adapter == NULL) 3276da781e64SGanapati Kundapura return -EINVAL; 3277da781e64SGanapati Kundapura 3278da781e64SGanapati Kundapura dev_info = &rx_adapter->eth_devices[eth_dev_id]; 3279da781e64SGanapati Kundapura if (dev_info->rx_queue == NULL || 3280da781e64SGanapati Kundapura !dev_info->rx_queue[rx_queue_id].queue_enabled) { 3281da781e64SGanapati Kundapura RTE_EDEV_LOG_ERR("Rx queue %u not added", rx_queue_id); 3282da781e64SGanapati Kundapura return -EINVAL; 3283da781e64SGanapati Kundapura } 3284da781e64SGanapati Kundapura 3285da781e64SGanapati Kundapura queue_info = &dev_info->rx_queue[rx_queue_id]; 3286da781e64SGanapati Kundapura 3287da781e64SGanapati Kundapura memset(queue_conf, 0, sizeof(*queue_conf)); 3288da781e64SGanapati Kundapura queue_conf->rx_queue_flags = 0; 3289da781e64SGanapati Kundapura if (queue_info->flow_id_mask != 0) 3290da781e64SGanapati Kundapura queue_conf->rx_queue_flags |= 3291da781e64SGanapati Kundapura RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID; 3292da781e64SGanapati Kundapura queue_conf->servicing_weight = queue_info->wt; 3293da781e64SGanapati Kundapura 329495138712SNaga Harish K S V queue_conf->ev.event = queue_info->event; 329595138712SNaga Harish K S V 329695138712SNaga Harish K S V queue_conf->vector_sz = queue_info->vector_data.max_vector_count; 329795138712SNaga Harish K S V queue_conf->vector_mp = queue_info->vector_data.vector_pool; 329895138712SNaga Harish K S V /* need to be converted from ticks to ns */ 329995138712SNaga Harish K S V queue_conf->vector_timeout_ns = TICK2NSEC( 330095138712SNaga Harish K S V queue_info->vector_data.vector_timeout_ticks, rte_get_timer_hz()); 330195138712SNaga Harish K S V 330295138712SNaga Harish K S V if (queue_info->event_buf != NULL) 330395138712SNaga Harish K S V queue_conf->event_buf_size = queue_info->event_buf->events_size; 330495138712SNaga Harish K S V else 330595138712SNaga Harish K S V queue_conf->event_buf_size = 0; 3306da781e64SGanapati Kundapura 3307da781e64SGanapati Kundapura dev = &rte_eventdevs[rx_adapter->eventdev_id]; 3308da781e64SGanapati Kundapura if (dev->dev_ops->eth_rx_adapter_queue_conf_get != NULL) { 3309da781e64SGanapati Kundapura ret = (*dev->dev_ops->eth_rx_adapter_queue_conf_get)(dev, 3310da781e64SGanapati Kundapura &rte_eth_devices[eth_dev_id], 3311da781e64SGanapati Kundapura rx_queue_id, 3312da781e64SGanapati Kundapura queue_conf); 3313da781e64SGanapati Kundapura return ret; 3314da781e64SGanapati Kundapura } 3315da781e64SGanapati Kundapura 3316da781e64SGanapati Kundapura return 0; 3317da781e64SGanapati Kundapura } 3318814d0170SGanapati Kundapura 3319a1793ee8SGanapati Kundapura static int 3320a1793ee8SGanapati Kundapura rxa_is_queue_added(struct event_eth_rx_adapter *rx_adapter, 3321a1793ee8SGanapati Kundapura uint16_t eth_dev_id, 3322a1793ee8SGanapati Kundapura uint16_t rx_queue_id) 3323a1793ee8SGanapati Kundapura { 3324a1793ee8SGanapati Kundapura struct eth_device_info *dev_info; 3325a1793ee8SGanapati Kundapura struct eth_rx_queue_info *queue_info; 3326a1793ee8SGanapati Kundapura 3327a1793ee8SGanapati Kundapura if (!rx_adapter->eth_devices) 3328a1793ee8SGanapati Kundapura return 0; 3329a1793ee8SGanapati Kundapura 3330a1793ee8SGanapati Kundapura dev_info = &rx_adapter->eth_devices[eth_dev_id]; 3331a1793ee8SGanapati Kundapura if (!dev_info || !dev_info->rx_queue) 3332a1793ee8SGanapati Kundapura return 0; 3333a1793ee8SGanapati Kundapura 3334a1793ee8SGanapati Kundapura queue_info = &dev_info->rx_queue[rx_queue_id]; 3335a1793ee8SGanapati Kundapura 3336a1793ee8SGanapati Kundapura return queue_info && queue_info->queue_enabled; 3337a1793ee8SGanapati Kundapura } 3338a1793ee8SGanapati Kundapura 3339a1793ee8SGanapati Kundapura #define rxa_evdev(rx_adapter) (&rte_eventdevs[(rx_adapter)->eventdev_id]) 3340a1793ee8SGanapati Kundapura 3341a1793ee8SGanapati Kundapura #define rxa_dev_instance_get(rx_adapter) \ 3342a1793ee8SGanapati Kundapura rxa_evdev((rx_adapter))->dev_ops->eth_rx_adapter_instance_get 3343a1793ee8SGanapati Kundapura 3344a1793ee8SGanapati Kundapura int 3345a1793ee8SGanapati Kundapura rte_event_eth_rx_adapter_instance_get(uint16_t eth_dev_id, 3346a1793ee8SGanapati Kundapura uint16_t rx_queue_id, 3347a1793ee8SGanapati Kundapura uint8_t *rxa_inst_id) 3348a1793ee8SGanapati Kundapura { 3349a1793ee8SGanapati Kundapura uint8_t id; 3350a1793ee8SGanapati Kundapura int ret = -EINVAL; 3351a1793ee8SGanapati Kundapura uint32_t caps; 3352a1793ee8SGanapati Kundapura struct event_eth_rx_adapter *rx_adapter; 3353a1793ee8SGanapati Kundapura 3354a1793ee8SGanapati Kundapura if (rxa_memzone_lookup()) 3355a1793ee8SGanapati Kundapura return -ENOMEM; 3356a1793ee8SGanapati Kundapura 3357a1793ee8SGanapati Kundapura if (eth_dev_id >= rte_eth_dev_count_avail()) { 3358a1793ee8SGanapati Kundapura RTE_EDEV_LOG_ERR("Invalid ethernet port id %u", eth_dev_id); 3359a1793ee8SGanapati Kundapura return -EINVAL; 3360a1793ee8SGanapati Kundapura } 3361a1793ee8SGanapati Kundapura 3362a1793ee8SGanapati Kundapura if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) { 3363a1793ee8SGanapati Kundapura RTE_EDEV_LOG_ERR("Invalid Rx queue %u", rx_queue_id); 3364a1793ee8SGanapati Kundapura return -EINVAL; 3365a1793ee8SGanapati Kundapura } 3366a1793ee8SGanapati Kundapura 3367a1793ee8SGanapati Kundapura if (rxa_inst_id == NULL) { 3368a1793ee8SGanapati Kundapura RTE_EDEV_LOG_ERR("rxa_inst_id cannot be NULL"); 3369a1793ee8SGanapati Kundapura return -EINVAL; 3370a1793ee8SGanapati Kundapura } 3371a1793ee8SGanapati Kundapura 3372a1793ee8SGanapati Kundapura /* Iterate through all adapter instances */ 3373a1793ee8SGanapati Kundapura for (id = 0; id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE; id++) { 3374a1793ee8SGanapati Kundapura rx_adapter = rxa_id_to_adapter(id); 3375a1793ee8SGanapati Kundapura if (!rx_adapter) 3376a1793ee8SGanapati Kundapura continue; 3377a1793ee8SGanapati Kundapura 3378a1793ee8SGanapati Kundapura if (rxa_is_queue_added(rx_adapter, eth_dev_id, rx_queue_id)) { 3379a1793ee8SGanapati Kundapura *rxa_inst_id = rx_adapter->id; 3380a1793ee8SGanapati Kundapura ret = 0; 3381a1793ee8SGanapati Kundapura } 3382a1793ee8SGanapati Kundapura 3383a1793ee8SGanapati Kundapura /* Rx adapter internally mainatains queue information 3384a1793ee8SGanapati Kundapura * for both internal port and DPDK service port. 3385a1793ee8SGanapati Kundapura * Eventdev PMD callback is called for future proof only and 3386a1793ee8SGanapati Kundapura * overrides the above return value if defined. 3387a1793ee8SGanapati Kundapura */ 3388a1793ee8SGanapati Kundapura caps = 0; 3389a1793ee8SGanapati Kundapura if (!rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id, 3390a1793ee8SGanapati Kundapura eth_dev_id, 3391a1793ee8SGanapati Kundapura &caps)) { 3392a1793ee8SGanapati Kundapura if (caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { 3393a1793ee8SGanapati Kundapura ret = rxa_dev_instance_get(rx_adapter) ? 3394a1793ee8SGanapati Kundapura rxa_dev_instance_get(rx_adapter) 3395a1793ee8SGanapati Kundapura (eth_dev_id, 3396a1793ee8SGanapati Kundapura rx_queue_id, 3397a1793ee8SGanapati Kundapura rxa_inst_id) 3398a1793ee8SGanapati Kundapura : -EINVAL; 3399a1793ee8SGanapati Kundapura } 3400a1793ee8SGanapati Kundapura } 3401a1793ee8SGanapati Kundapura 3402a1793ee8SGanapati Kundapura /* return if entry found */ 3403a1793ee8SGanapati Kundapura if (ret == 0) 3404a1793ee8SGanapati Kundapura return ret; 3405a1793ee8SGanapati Kundapura } 3406a1793ee8SGanapati Kundapura 3407a1793ee8SGanapati Kundapura return -EINVAL; 3408a1793ee8SGanapati Kundapura } 3409a1793ee8SGanapati Kundapura 3410814d0170SGanapati Kundapura #define RXA_ADD_DICT(stats, s) rte_tel_data_add_dict_u64(d, #s, stats.s) 3411814d0170SGanapati Kundapura 3412814d0170SGanapati Kundapura static int 3413814d0170SGanapati Kundapura handle_rxa_stats(const char *cmd __rte_unused, 3414814d0170SGanapati Kundapura const char *params, 3415814d0170SGanapati Kundapura struct rte_tel_data *d) 3416814d0170SGanapati Kundapura { 3417814d0170SGanapati Kundapura uint8_t rx_adapter_id; 3418814d0170SGanapati Kundapura struct rte_event_eth_rx_adapter_stats rx_adptr_stats; 3419814d0170SGanapati Kundapura 3420814d0170SGanapati Kundapura if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 3421814d0170SGanapati Kundapura return -1; 3422814d0170SGanapati Kundapura 3423814d0170SGanapati Kundapura /* Get Rx adapter ID from parameter string */ 3424814d0170SGanapati Kundapura rx_adapter_id = atoi(params); 3425814d0170SGanapati Kundapura RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL); 3426814d0170SGanapati Kundapura 3427814d0170SGanapati Kundapura /* Get Rx adapter stats */ 3428814d0170SGanapati Kundapura if (rte_event_eth_rx_adapter_stats_get(rx_adapter_id, 3429814d0170SGanapati Kundapura &rx_adptr_stats)) { 3430814d0170SGanapati Kundapura RTE_EDEV_LOG_ERR("Failed to get Rx adapter stats\n"); 3431814d0170SGanapati Kundapura return -1; 3432814d0170SGanapati Kundapura } 3433814d0170SGanapati Kundapura 3434814d0170SGanapati Kundapura rte_tel_data_start_dict(d); 3435814d0170SGanapati Kundapura rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id); 3436814d0170SGanapati Kundapura RXA_ADD_DICT(rx_adptr_stats, rx_packets); 3437814d0170SGanapati Kundapura RXA_ADD_DICT(rx_adptr_stats, rx_poll_count); 3438814d0170SGanapati Kundapura RXA_ADD_DICT(rx_adptr_stats, rx_dropped); 3439814d0170SGanapati Kundapura RXA_ADD_DICT(rx_adptr_stats, rx_enq_retry); 3440814d0170SGanapati Kundapura RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_count); 3441814d0170SGanapati Kundapura RXA_ADD_DICT(rx_adptr_stats, rx_event_buf_size); 3442814d0170SGanapati Kundapura RXA_ADD_DICT(rx_adptr_stats, rx_enq_count); 3443814d0170SGanapati Kundapura RXA_ADD_DICT(rx_adptr_stats, rx_enq_start_ts); 3444814d0170SGanapati Kundapura RXA_ADD_DICT(rx_adptr_stats, rx_enq_block_cycles); 3445814d0170SGanapati Kundapura RXA_ADD_DICT(rx_adptr_stats, rx_enq_end_ts); 3446814d0170SGanapati Kundapura RXA_ADD_DICT(rx_adptr_stats, rx_intr_packets); 3447814d0170SGanapati Kundapura 3448814d0170SGanapati Kundapura return 0; 3449814d0170SGanapati Kundapura } 3450814d0170SGanapati Kundapura 3451814d0170SGanapati Kundapura static int 3452814d0170SGanapati Kundapura handle_rxa_stats_reset(const char *cmd __rte_unused, 3453814d0170SGanapati Kundapura const char *params, 3454814d0170SGanapati Kundapura struct rte_tel_data *d __rte_unused) 3455814d0170SGanapati Kundapura { 3456814d0170SGanapati Kundapura uint8_t rx_adapter_id; 3457814d0170SGanapati Kundapura 3458b450a990SDavid Marchand if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 3459814d0170SGanapati Kundapura return -1; 3460814d0170SGanapati Kundapura 3461814d0170SGanapati Kundapura /* Get Rx adapter ID from parameter string */ 3462814d0170SGanapati Kundapura rx_adapter_id = atoi(params); 3463814d0170SGanapati Kundapura RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(rx_adapter_id, -EINVAL); 3464814d0170SGanapati Kundapura 3465814d0170SGanapati Kundapura /* Reset Rx adapter stats */ 3466814d0170SGanapati Kundapura if (rte_event_eth_rx_adapter_stats_reset(rx_adapter_id)) { 3467814d0170SGanapati Kundapura RTE_EDEV_LOG_ERR("Failed to reset Rx adapter stats\n"); 3468814d0170SGanapati Kundapura return -1; 3469814d0170SGanapati Kundapura } 3470814d0170SGanapati Kundapura 3471814d0170SGanapati Kundapura return 0; 3472814d0170SGanapati Kundapura } 3473814d0170SGanapati Kundapura 3474814d0170SGanapati Kundapura static int 3475814d0170SGanapati Kundapura handle_rxa_get_queue_conf(const char *cmd __rte_unused, 3476814d0170SGanapati Kundapura const char *params, 3477814d0170SGanapati Kundapura struct rte_tel_data *d) 3478814d0170SGanapati Kundapura { 3479814d0170SGanapati Kundapura uint8_t rx_adapter_id; 3480814d0170SGanapati Kundapura uint16_t rx_queue_id; 348174b034ffSWeiguo Li int eth_dev_id, ret = -1; 3482814d0170SGanapati Kundapura char *token, *l_params; 3483814d0170SGanapati Kundapura struct rte_event_eth_rx_adapter_queue_conf queue_conf; 3484814d0170SGanapati Kundapura 3485814d0170SGanapati Kundapura if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 3486814d0170SGanapati Kundapura return -1; 3487814d0170SGanapati Kundapura 3488814d0170SGanapati Kundapura /* Get Rx adapter ID from parameter string */ 3489814d0170SGanapati Kundapura l_params = strdup(params); 349074b034ffSWeiguo Li if (l_params == NULL) 349174b034ffSWeiguo Li return -ENOMEM; 3492814d0170SGanapati Kundapura token = strtok(l_params, ","); 349374b034ffSWeiguo Li RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); 3494814d0170SGanapati Kundapura rx_adapter_id = strtoul(token, NULL, 10); 349574b034ffSWeiguo Li RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(rx_adapter_id, -EINVAL); 3496814d0170SGanapati Kundapura 3497814d0170SGanapati Kundapura token = strtok(NULL, ","); 349874b034ffSWeiguo Li RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); 3499814d0170SGanapati Kundapura 3500814d0170SGanapati Kundapura /* Get device ID from parameter string */ 3501814d0170SGanapati Kundapura eth_dev_id = strtoul(token, NULL, 10); 350274b034ffSWeiguo Li RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); 3503814d0170SGanapati Kundapura 3504814d0170SGanapati Kundapura token = strtok(NULL, ","); 350574b034ffSWeiguo Li RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); 3506814d0170SGanapati Kundapura 3507814d0170SGanapati Kundapura /* Get Rx queue ID from parameter string */ 3508814d0170SGanapati Kundapura rx_queue_id = strtoul(token, NULL, 10); 3509814d0170SGanapati Kundapura if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) { 3510814d0170SGanapati Kundapura RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id); 351174b034ffSWeiguo Li ret = -EINVAL; 351274b034ffSWeiguo Li goto error; 3513814d0170SGanapati Kundapura } 3514814d0170SGanapati Kundapura 3515814d0170SGanapati Kundapura token = strtok(NULL, "\0"); 3516814d0170SGanapati Kundapura if (token != NULL) 3517814d0170SGanapati Kundapura RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev" 35187be78d02SJosh Soref " telemetry command, ignoring"); 351974b034ffSWeiguo Li /* Parsing parameter finished */ 352074b034ffSWeiguo Li free(l_params); 3521814d0170SGanapati Kundapura 3522814d0170SGanapati Kundapura if (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id, 3523814d0170SGanapati Kundapura rx_queue_id, &queue_conf)) { 3524814d0170SGanapati Kundapura RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue config"); 3525814d0170SGanapati Kundapura return -1; 3526814d0170SGanapati Kundapura } 3527814d0170SGanapati Kundapura 3528814d0170SGanapati Kundapura rte_tel_data_start_dict(d); 3529814d0170SGanapati Kundapura rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id); 3530814d0170SGanapati Kundapura rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id); 3531814d0170SGanapati Kundapura rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id); 3532814d0170SGanapati Kundapura RXA_ADD_DICT(queue_conf, rx_queue_flags); 3533814d0170SGanapati Kundapura RXA_ADD_DICT(queue_conf, servicing_weight); 3534814d0170SGanapati Kundapura RXA_ADD_DICT(queue_conf.ev, queue_id); 3535814d0170SGanapati Kundapura RXA_ADD_DICT(queue_conf.ev, sched_type); 3536814d0170SGanapati Kundapura RXA_ADD_DICT(queue_conf.ev, priority); 3537814d0170SGanapati Kundapura RXA_ADD_DICT(queue_conf.ev, flow_id); 3538814d0170SGanapati Kundapura 3539814d0170SGanapati Kundapura return 0; 354074b034ffSWeiguo Li 354174b034ffSWeiguo Li error: 354274b034ffSWeiguo Li free(l_params); 354374b034ffSWeiguo Li return ret; 3544814d0170SGanapati Kundapura } 3545814d0170SGanapati Kundapura 35469e583185SNaga Harish K S V static int 35479e583185SNaga Harish K S V handle_rxa_get_queue_stats(const char *cmd __rte_unused, 35489e583185SNaga Harish K S V const char *params, 35499e583185SNaga Harish K S V struct rte_tel_data *d) 35509e583185SNaga Harish K S V { 35519e583185SNaga Harish K S V uint8_t rx_adapter_id; 35529e583185SNaga Harish K S V uint16_t rx_queue_id; 355374b034ffSWeiguo Li int eth_dev_id, ret = -1; 35549e583185SNaga Harish K S V char *token, *l_params; 35559e583185SNaga Harish K S V struct rte_event_eth_rx_adapter_queue_stats q_stats; 35569e583185SNaga Harish K S V 35579e583185SNaga Harish K S V if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 35589e583185SNaga Harish K S V return -1; 35599e583185SNaga Harish K S V 35609e583185SNaga Harish K S V /* Get Rx adapter ID from parameter string */ 35619e583185SNaga Harish K S V l_params = strdup(params); 356274b034ffSWeiguo Li if (l_params == NULL) 356374b034ffSWeiguo Li return -ENOMEM; 35649e583185SNaga Harish K S V token = strtok(l_params, ","); 356574b034ffSWeiguo Li RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); 35669e583185SNaga Harish K S V rx_adapter_id = strtoul(token, NULL, 10); 356774b034ffSWeiguo Li RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(rx_adapter_id, -EINVAL); 35689e583185SNaga Harish K S V 35699e583185SNaga Harish K S V token = strtok(NULL, ","); 357074b034ffSWeiguo Li RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); 35719e583185SNaga Harish K S V 35729e583185SNaga Harish K S V /* Get device ID from parameter string */ 35739e583185SNaga Harish K S V eth_dev_id = strtoul(token, NULL, 10); 357474b034ffSWeiguo Li RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); 35759e583185SNaga Harish K S V 35769e583185SNaga Harish K S V token = strtok(NULL, ","); 357774b034ffSWeiguo Li RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); 35789e583185SNaga Harish K S V 35799e583185SNaga Harish K S V /* Get Rx queue ID from parameter string */ 35809e583185SNaga Harish K S V rx_queue_id = strtoul(token, NULL, 10); 35819e583185SNaga Harish K S V if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) { 35829e583185SNaga Harish K S V RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id); 358374b034ffSWeiguo Li ret = -EINVAL; 358474b034ffSWeiguo Li goto error; 35859e583185SNaga Harish K S V } 35869e583185SNaga Harish K S V 35879e583185SNaga Harish K S V token = strtok(NULL, "\0"); 35889e583185SNaga Harish K S V if (token != NULL) 35899e583185SNaga Harish K S V RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev" 35907be78d02SJosh Soref " telemetry command, ignoring"); 359174b034ffSWeiguo Li /* Parsing parameter finished */ 359274b034ffSWeiguo Li free(l_params); 35939e583185SNaga Harish K S V 35949e583185SNaga Harish K S V if (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id, 35959e583185SNaga Harish K S V rx_queue_id, &q_stats)) { 35969e583185SNaga Harish K S V RTE_EDEV_LOG_ERR("Failed to get Rx adapter queue stats"); 35979e583185SNaga Harish K S V return -1; 35989e583185SNaga Harish K S V } 35999e583185SNaga Harish K S V 36009e583185SNaga Harish K S V rte_tel_data_start_dict(d); 36019e583185SNaga Harish K S V rte_tel_data_add_dict_u64(d, "rx_adapter_id", rx_adapter_id); 36029e583185SNaga Harish K S V rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id); 36039e583185SNaga Harish K S V rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id); 36049e583185SNaga Harish K S V RXA_ADD_DICT(q_stats, rx_event_buf_count); 36059e583185SNaga Harish K S V RXA_ADD_DICT(q_stats, rx_event_buf_size); 36069e583185SNaga Harish K S V RXA_ADD_DICT(q_stats, rx_poll_count); 36079e583185SNaga Harish K S V RXA_ADD_DICT(q_stats, rx_packets); 36089e583185SNaga Harish K S V RXA_ADD_DICT(q_stats, rx_dropped); 36099e583185SNaga Harish K S V 36109e583185SNaga Harish K S V return 0; 361174b034ffSWeiguo Li 361274b034ffSWeiguo Li error: 361374b034ffSWeiguo Li free(l_params); 361474b034ffSWeiguo Li return ret; 36159e583185SNaga Harish K S V } 36169e583185SNaga Harish K S V 36179e583185SNaga Harish K S V static int 36189e583185SNaga Harish K S V handle_rxa_queue_stats_reset(const char *cmd __rte_unused, 36199e583185SNaga Harish K S V const char *params, 36209e583185SNaga Harish K S V struct rte_tel_data *d __rte_unused) 36219e583185SNaga Harish K S V { 36229e583185SNaga Harish K S V uint8_t rx_adapter_id; 36239e583185SNaga Harish K S V uint16_t rx_queue_id; 362474b034ffSWeiguo Li int eth_dev_id, ret = -1; 36259e583185SNaga Harish K S V char *token, *l_params; 36269e583185SNaga Harish K S V 36279e583185SNaga Harish K S V if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 36289e583185SNaga Harish K S V return -1; 36299e583185SNaga Harish K S V 36309e583185SNaga Harish K S V /* Get Rx adapter ID from parameter string */ 36319e583185SNaga Harish K S V l_params = strdup(params); 363274b034ffSWeiguo Li if (l_params == NULL) 363374b034ffSWeiguo Li return -ENOMEM; 36349e583185SNaga Harish K S V token = strtok(l_params, ","); 363574b034ffSWeiguo Li RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); 36369e583185SNaga Harish K S V rx_adapter_id = strtoul(token, NULL, 10); 363774b034ffSWeiguo Li RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_GOTO_ERR_RET(rx_adapter_id, -EINVAL); 36389e583185SNaga Harish K S V 36399e583185SNaga Harish K S V token = strtok(NULL, ","); 364074b034ffSWeiguo Li RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); 36419e583185SNaga Harish K S V 36429e583185SNaga Harish K S V /* Get device ID from parameter string */ 36439e583185SNaga Harish K S V eth_dev_id = strtoul(token, NULL, 10); 364474b034ffSWeiguo Li RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); 36459e583185SNaga Harish K S V 36469e583185SNaga Harish K S V token = strtok(NULL, ","); 364774b034ffSWeiguo Li RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); 36489e583185SNaga Harish K S V 36499e583185SNaga Harish K S V /* Get Rx queue ID from parameter string */ 36509e583185SNaga Harish K S V rx_queue_id = strtoul(token, NULL, 10); 36519e583185SNaga Harish K S V if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) { 36529e583185SNaga Harish K S V RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id); 365374b034ffSWeiguo Li ret = -EINVAL; 365474b034ffSWeiguo Li goto error; 36559e583185SNaga Harish K S V } 36569e583185SNaga Harish K S V 36579e583185SNaga Harish K S V token = strtok(NULL, "\0"); 36589e583185SNaga Harish K S V if (token != NULL) 36599e583185SNaga Harish K S V RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev" 36607be78d02SJosh Soref " telemetry command, ignoring"); 366174b034ffSWeiguo Li /* Parsing parameter finished */ 366274b034ffSWeiguo Li free(l_params); 36639e583185SNaga Harish K S V 36649e583185SNaga Harish K S V if (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id, 36659e583185SNaga Harish K S V eth_dev_id, 36669e583185SNaga Harish K S V rx_queue_id)) { 36679e583185SNaga Harish K S V RTE_EDEV_LOG_ERR("Failed to reset Rx adapter queue stats"); 36689e583185SNaga Harish K S V return -1; 36699e583185SNaga Harish K S V } 36709e583185SNaga Harish K S V 36719e583185SNaga Harish K S V return 0; 367274b034ffSWeiguo Li 367374b034ffSWeiguo Li error: 367474b034ffSWeiguo Li free(l_params); 367574b034ffSWeiguo Li return ret; 36769e583185SNaga Harish K S V } 36779e583185SNaga Harish K S V 3678a1793ee8SGanapati Kundapura static int 3679a1793ee8SGanapati Kundapura handle_rxa_instance_get(const char *cmd __rte_unused, 3680a1793ee8SGanapati Kundapura const char *params, 3681a1793ee8SGanapati Kundapura struct rte_tel_data *d) 3682a1793ee8SGanapati Kundapura { 3683a1793ee8SGanapati Kundapura uint8_t instance_id; 3684a1793ee8SGanapati Kundapura uint16_t rx_queue_id; 3685a1793ee8SGanapati Kundapura int eth_dev_id, ret = -1; 3686a1793ee8SGanapati Kundapura char *token, *l_params; 3687a1793ee8SGanapati Kundapura 3688a1793ee8SGanapati Kundapura if (params == NULL || strlen(params) == 0 || !isdigit(*params)) 3689a1793ee8SGanapati Kundapura return -1; 3690a1793ee8SGanapati Kundapura 3691a1793ee8SGanapati Kundapura l_params = strdup(params); 3692a1793ee8SGanapati Kundapura if (l_params == NULL) 3693a1793ee8SGanapati Kundapura return -ENOMEM; 3694a1793ee8SGanapati Kundapura token = strtok(l_params, ","); 3695a1793ee8SGanapati Kundapura RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); 3696a1793ee8SGanapati Kundapura 3697a1793ee8SGanapati Kundapura /* Get device ID from parameter string */ 3698a1793ee8SGanapati Kundapura eth_dev_id = strtoul(token, NULL, 10); 3699a1793ee8SGanapati Kundapura RTE_ETH_VALID_PORTID_OR_GOTO_ERR_RET(eth_dev_id, -EINVAL); 3700a1793ee8SGanapati Kundapura 3701a1793ee8SGanapati Kundapura token = strtok(NULL, ","); 3702a1793ee8SGanapati Kundapura RTE_EVENT_ETH_RX_ADAPTER_TOKEN_VALID_OR_GOTO_ERR_RET(token, -1); 3703a1793ee8SGanapati Kundapura 3704a1793ee8SGanapati Kundapura /* Get Rx queue ID from parameter string */ 3705a1793ee8SGanapati Kundapura rx_queue_id = strtoul(token, NULL, 10); 3706a1793ee8SGanapati Kundapura if (rx_queue_id >= rte_eth_devices[eth_dev_id].data->nb_rx_queues) { 3707a1793ee8SGanapati Kundapura RTE_EDEV_LOG_ERR("Invalid rx queue_id %u", rx_queue_id); 3708a1793ee8SGanapati Kundapura ret = -EINVAL; 3709a1793ee8SGanapati Kundapura goto error; 3710a1793ee8SGanapati Kundapura } 3711a1793ee8SGanapati Kundapura 3712a1793ee8SGanapati Kundapura token = strtok(NULL, "\0"); 3713a1793ee8SGanapati Kundapura if (token != NULL) 3714a1793ee8SGanapati Kundapura RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev" 3715a1793ee8SGanapati Kundapura " telemetry command, ignoring"); 3716a1793ee8SGanapati Kundapura 3717a1793ee8SGanapati Kundapura /* Parsing parameter finished */ 3718a1793ee8SGanapati Kundapura free(l_params); 3719a1793ee8SGanapati Kundapura 3720a1793ee8SGanapati Kundapura if (rte_event_eth_rx_adapter_instance_get(eth_dev_id, 3721a1793ee8SGanapati Kundapura rx_queue_id, 3722a1793ee8SGanapati Kundapura &instance_id)) { 3723a1793ee8SGanapati Kundapura RTE_EDEV_LOG_ERR("Failed to get RX adapter instance ID " 3724a1793ee8SGanapati Kundapura " for rx_queue_id = %d", rx_queue_id); 3725a1793ee8SGanapati Kundapura return -1; 3726a1793ee8SGanapati Kundapura } 3727a1793ee8SGanapati Kundapura 3728a1793ee8SGanapati Kundapura rte_tel_data_start_dict(d); 3729a1793ee8SGanapati Kundapura rte_tel_data_add_dict_u64(d, "eth_dev_id", eth_dev_id); 3730a1793ee8SGanapati Kundapura rte_tel_data_add_dict_u64(d, "rx_queue_id", rx_queue_id); 3731a1793ee8SGanapati Kundapura rte_tel_data_add_dict_u64(d, "rxa_instance_id", instance_id); 3732a1793ee8SGanapati Kundapura 3733a1793ee8SGanapati Kundapura return 0; 3734a1793ee8SGanapati Kundapura 3735a1793ee8SGanapati Kundapura error: 3736a1793ee8SGanapati Kundapura free(l_params); 3737a1793ee8SGanapati Kundapura return ret; 3738a1793ee8SGanapati Kundapura } 3739a1793ee8SGanapati Kundapura 3740814d0170SGanapati Kundapura RTE_INIT(rxa_init_telemetry) 3741814d0170SGanapati Kundapura { 3742814d0170SGanapati Kundapura rte_telemetry_register_cmd("/eventdev/rxa_stats", 3743814d0170SGanapati Kundapura handle_rxa_stats, 3744814d0170SGanapati Kundapura "Returns Rx adapter stats. Parameter: rxa_id"); 3745814d0170SGanapati Kundapura 3746814d0170SGanapati Kundapura rte_telemetry_register_cmd("/eventdev/rxa_stats_reset", 3747814d0170SGanapati Kundapura handle_rxa_stats_reset, 3748814d0170SGanapati Kundapura "Reset Rx adapter stats. Parameter: rxa_id"); 3749814d0170SGanapati Kundapura 3750814d0170SGanapati Kundapura rte_telemetry_register_cmd("/eventdev/rxa_queue_conf", 3751814d0170SGanapati Kundapura handle_rxa_get_queue_conf, 3752814d0170SGanapati Kundapura "Returns Rx queue config. Parameter: rxa_id, dev_id, queue_id"); 37539e583185SNaga Harish K S V 37549e583185SNaga Harish K S V rte_telemetry_register_cmd("/eventdev/rxa_queue_stats", 37559e583185SNaga Harish K S V handle_rxa_get_queue_stats, 37569e583185SNaga Harish K S V "Returns Rx queue stats. Parameter: rxa_id, dev_id, queue_id"); 37579e583185SNaga Harish K S V 37589e583185SNaga Harish K S V rte_telemetry_register_cmd("/eventdev/rxa_queue_stats_reset", 37599e583185SNaga Harish K S V handle_rxa_queue_stats_reset, 37609e583185SNaga Harish K S V "Reset Rx queue stats. Parameter: rxa_id, dev_id, queue_id"); 3761a1793ee8SGanapati Kundapura 3762a1793ee8SGanapati Kundapura rte_telemetry_register_cmd("/eventdev/rxa_rxq_instance_get", 3763a1793ee8SGanapati Kundapura handle_rxa_instance_get, 3764a1793ee8SGanapati Kundapura "Returns Rx adapter instance id. Parameter: dev_id, queue_id"); 3765814d0170SGanapati Kundapura } 3766