150fb749aSPavan Nikhilesh /* SPDX-License-Identifier: BSD-3-Clause 250fb749aSPavan Nikhilesh * Copyright(c) 2017 Cavium, Inc 350fb749aSPavan Nikhilesh */ 450fb749aSPavan Nikhilesh 550fb749aSPavan Nikhilesh #include <rte_atomic.h> 650fb749aSPavan Nikhilesh #include <rte_common.h> 750fb749aSPavan Nikhilesh #include <rte_cycles.h> 850fb749aSPavan Nikhilesh #include <rte_debug.h> 950fb749aSPavan Nikhilesh #include <rte_eal.h> 1050fb749aSPavan Nikhilesh #include <rte_ethdev.h> 1150fb749aSPavan Nikhilesh #include <rte_eventdev.h> 1250fb749aSPavan Nikhilesh #include <rte_hexdump.h> 1350fb749aSPavan Nikhilesh #include <rte_mbuf.h> 1450fb749aSPavan Nikhilesh #include <rte_malloc.h> 1550fb749aSPavan Nikhilesh #include <rte_memcpy.h> 1650fb749aSPavan Nikhilesh #include <rte_launch.h> 1750fb749aSPavan Nikhilesh #include <rte_lcore.h> 1850fb749aSPavan Nikhilesh #include <rte_per_lcore.h> 1950fb749aSPavan Nikhilesh #include <rte_random.h> 2050fb749aSPavan Nikhilesh #include <rte_bus_vdev.h> 21*daeda14cSPavan Nikhilesh #include <rte_test.h> 2250fb749aSPavan Nikhilesh 23*daeda14cSPavan Nikhilesh #include "ssovf_evdev.h" 2450fb749aSPavan Nikhilesh 2550fb749aSPavan Nikhilesh #define NUM_PACKETS (1 << 18) 2650fb749aSPavan Nikhilesh #define MAX_EVENTS (16 * 1024) 2750fb749aSPavan Nikhilesh 28*daeda14cSPavan Nikhilesh #define OCTEONTX_TEST_RUN(setup, teardown, test) \ 29*daeda14cSPavan Nikhilesh octeontx_test_run(setup, teardown, test, #test) 30*daeda14cSPavan Nikhilesh 31*daeda14cSPavan Nikhilesh static int total; 32*daeda14cSPavan Nikhilesh static int passed; 33*daeda14cSPavan Nikhilesh static int failed; 34*daeda14cSPavan Nikhilesh static int unsupported; 35*daeda14cSPavan Nikhilesh 3650fb749aSPavan Nikhilesh static int evdev; 3750fb749aSPavan Nikhilesh static struct rte_mempool *eventdev_test_mempool; 3850fb749aSPavan Nikhilesh 3950fb749aSPavan Nikhilesh struct event_attr { 4050fb749aSPavan Nikhilesh uint32_t flow_id; 4150fb749aSPavan Nikhilesh uint8_t event_type; 4250fb749aSPavan Nikhilesh uint8_t sub_event_type; 4350fb749aSPavan Nikhilesh uint8_t sched_type; 4450fb749aSPavan Nikhilesh uint8_t queue; 4550fb749aSPavan Nikhilesh uint8_t port; 4650fb749aSPavan Nikhilesh }; 4750fb749aSPavan Nikhilesh 4850fb749aSPavan Nikhilesh static uint32_t seqn_list_index; 4950fb749aSPavan Nikhilesh static int seqn_list[NUM_PACKETS]; 5050fb749aSPavan Nikhilesh 5150fb749aSPavan Nikhilesh static inline void 5250fb749aSPavan Nikhilesh seqn_list_init(void) 5350fb749aSPavan Nikhilesh { 5450fb749aSPavan Nikhilesh RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS); 5550fb749aSPavan Nikhilesh memset(seqn_list, 0, sizeof(seqn_list)); 5650fb749aSPavan Nikhilesh seqn_list_index = 0; 5750fb749aSPavan Nikhilesh } 5850fb749aSPavan Nikhilesh 5950fb749aSPavan Nikhilesh static inline int 6050fb749aSPavan Nikhilesh seqn_list_update(int val) 6150fb749aSPavan Nikhilesh { 6250fb749aSPavan Nikhilesh if (seqn_list_index >= NUM_PACKETS) 63*daeda14cSPavan Nikhilesh return -1; 6450fb749aSPavan Nikhilesh 6550fb749aSPavan Nikhilesh seqn_list[seqn_list_index++] = val; 6650fb749aSPavan Nikhilesh rte_smp_wmb(); 67*daeda14cSPavan Nikhilesh return 0; 6850fb749aSPavan Nikhilesh } 6950fb749aSPavan Nikhilesh 7050fb749aSPavan Nikhilesh static inline int 7150fb749aSPavan Nikhilesh seqn_list_check(int limit) 7250fb749aSPavan Nikhilesh { 7350fb749aSPavan Nikhilesh int i; 7450fb749aSPavan Nikhilesh 7550fb749aSPavan Nikhilesh for (i = 0; i < limit; i++) { 7650fb749aSPavan Nikhilesh if (seqn_list[i] != i) { 77*daeda14cSPavan Nikhilesh ssovf_log_dbg("Seqn mismatch %d %d", seqn_list[i], i); 78*daeda14cSPavan Nikhilesh return -1; 7950fb749aSPavan Nikhilesh } 8050fb749aSPavan Nikhilesh } 81*daeda14cSPavan Nikhilesh return 0; 8250fb749aSPavan Nikhilesh } 8350fb749aSPavan Nikhilesh 8450fb749aSPavan Nikhilesh struct test_core_param { 8550fb749aSPavan Nikhilesh rte_atomic32_t *total_events; 8650fb749aSPavan Nikhilesh uint64_t dequeue_tmo_ticks; 8750fb749aSPavan Nikhilesh uint8_t port; 8850fb749aSPavan Nikhilesh uint8_t sched_type; 8950fb749aSPavan Nikhilesh }; 9050fb749aSPavan Nikhilesh 9150fb749aSPavan Nikhilesh static int 9250fb749aSPavan Nikhilesh testsuite_setup(void) 9350fb749aSPavan Nikhilesh { 9450fb749aSPavan Nikhilesh const char *eventdev_name = "event_octeontx"; 9550fb749aSPavan Nikhilesh 9650fb749aSPavan Nikhilesh evdev = rte_event_dev_get_dev_id(eventdev_name); 9750fb749aSPavan Nikhilesh if (evdev < 0) { 98*daeda14cSPavan Nikhilesh ssovf_log_dbg("%d: Eventdev %s not found - creating.", 9950fb749aSPavan Nikhilesh __LINE__, eventdev_name); 10050fb749aSPavan Nikhilesh if (rte_vdev_init(eventdev_name, NULL) < 0) { 101*daeda14cSPavan Nikhilesh ssovf_log_dbg("Error creating eventdev %s", 102*daeda14cSPavan Nikhilesh eventdev_name); 103*daeda14cSPavan Nikhilesh return -1; 10450fb749aSPavan Nikhilesh } 10550fb749aSPavan Nikhilesh evdev = rte_event_dev_get_dev_id(eventdev_name); 10650fb749aSPavan Nikhilesh if (evdev < 0) { 107*daeda14cSPavan Nikhilesh ssovf_log_dbg("Error finding newly created eventdev"); 108*daeda14cSPavan Nikhilesh return -1; 10950fb749aSPavan Nikhilesh } 11050fb749aSPavan Nikhilesh } 11150fb749aSPavan Nikhilesh 112*daeda14cSPavan Nikhilesh return 0; 11350fb749aSPavan Nikhilesh } 11450fb749aSPavan Nikhilesh 11550fb749aSPavan Nikhilesh static void 11650fb749aSPavan Nikhilesh testsuite_teardown(void) 11750fb749aSPavan Nikhilesh { 11850fb749aSPavan Nikhilesh rte_event_dev_close(evdev); 11950fb749aSPavan Nikhilesh } 12050fb749aSPavan Nikhilesh 12150fb749aSPavan Nikhilesh static inline void 12250fb749aSPavan Nikhilesh devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf, 12350fb749aSPavan Nikhilesh struct rte_event_dev_info *info) 12450fb749aSPavan Nikhilesh { 12550fb749aSPavan Nikhilesh memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); 12650fb749aSPavan Nikhilesh dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; 12750fb749aSPavan Nikhilesh dev_conf->nb_event_ports = info->max_event_ports; 12850fb749aSPavan Nikhilesh dev_conf->nb_event_queues = info->max_event_queues; 12950fb749aSPavan Nikhilesh dev_conf->nb_event_queue_flows = info->max_event_queue_flows; 13050fb749aSPavan Nikhilesh dev_conf->nb_event_port_dequeue_depth = 13150fb749aSPavan Nikhilesh info->max_event_port_dequeue_depth; 13250fb749aSPavan Nikhilesh dev_conf->nb_event_port_enqueue_depth = 13350fb749aSPavan Nikhilesh info->max_event_port_enqueue_depth; 13450fb749aSPavan Nikhilesh dev_conf->nb_event_port_enqueue_depth = 13550fb749aSPavan Nikhilesh info->max_event_port_enqueue_depth; 13650fb749aSPavan Nikhilesh dev_conf->nb_events_limit = 13750fb749aSPavan Nikhilesh info->max_num_events; 13850fb749aSPavan Nikhilesh } 13950fb749aSPavan Nikhilesh 14050fb749aSPavan Nikhilesh enum { 14150fb749aSPavan Nikhilesh TEST_EVENTDEV_SETUP_DEFAULT, 14250fb749aSPavan Nikhilesh TEST_EVENTDEV_SETUP_PRIORITY, 14350fb749aSPavan Nikhilesh TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT, 14450fb749aSPavan Nikhilesh }; 14550fb749aSPavan Nikhilesh 14650fb749aSPavan Nikhilesh static inline int 14750fb749aSPavan Nikhilesh _eventdev_setup(int mode) 14850fb749aSPavan Nikhilesh { 14950fb749aSPavan Nikhilesh int i, ret; 15050fb749aSPavan Nikhilesh struct rte_event_dev_config dev_conf; 15150fb749aSPavan Nikhilesh struct rte_event_dev_info info; 15250fb749aSPavan Nikhilesh const char *pool_name = "evdev_octeontx_test_pool"; 15350fb749aSPavan Nikhilesh 15450fb749aSPavan Nikhilesh /* Create and destrory pool for each test case to make it standalone */ 15550fb749aSPavan Nikhilesh eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name, 15650fb749aSPavan Nikhilesh MAX_EVENTS, 15750fb749aSPavan Nikhilesh 0 /*MBUF_CACHE_SIZE*/, 15850fb749aSPavan Nikhilesh 0, 15950fb749aSPavan Nikhilesh 512, /* Use very small mbufs */ 16050fb749aSPavan Nikhilesh rte_socket_id()); 16150fb749aSPavan Nikhilesh if (!eventdev_test_mempool) { 162*daeda14cSPavan Nikhilesh ssovf_log_dbg("ERROR creating mempool"); 163*daeda14cSPavan Nikhilesh return -1; 16450fb749aSPavan Nikhilesh } 16550fb749aSPavan Nikhilesh 16650fb749aSPavan Nikhilesh ret = rte_event_dev_info_get(evdev, &info); 167*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); 168*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS, 169*daeda14cSPavan Nikhilesh "ERROR max_num_events=%d < max_events=%d", 17050fb749aSPavan Nikhilesh info.max_num_events, MAX_EVENTS); 17150fb749aSPavan Nikhilesh 17250fb749aSPavan Nikhilesh devconf_set_default_sane_values(&dev_conf, &info); 17350fb749aSPavan Nikhilesh if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT) 17450fb749aSPavan Nikhilesh dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT; 17550fb749aSPavan Nikhilesh 17650fb749aSPavan Nikhilesh ret = rte_event_dev_configure(evdev, &dev_conf); 177*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev"); 17850fb749aSPavan Nikhilesh 17950fb749aSPavan Nikhilesh uint32_t queue_count; 180*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 18150fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 18250fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 18350fb749aSPavan Nikhilesh 18450fb749aSPavan Nikhilesh if (mode == TEST_EVENTDEV_SETUP_PRIORITY) { 18550fb749aSPavan Nikhilesh if (queue_count > 8) { 186*daeda14cSPavan Nikhilesh ssovf_log_dbg( 187*daeda14cSPavan Nikhilesh "test expects the unique priority per queue"); 18850fb749aSPavan Nikhilesh return -ENOTSUP; 18950fb749aSPavan Nikhilesh } 19050fb749aSPavan Nikhilesh 19150fb749aSPavan Nikhilesh /* Configure event queues(0 to n) with 19250fb749aSPavan Nikhilesh * RTE_EVENT_DEV_PRIORITY_HIGHEST to 19350fb749aSPavan Nikhilesh * RTE_EVENT_DEV_PRIORITY_LOWEST 19450fb749aSPavan Nikhilesh */ 19550fb749aSPavan Nikhilesh uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) / 19650fb749aSPavan Nikhilesh queue_count; 19750fb749aSPavan Nikhilesh for (i = 0; i < (int)queue_count; i++) { 19850fb749aSPavan Nikhilesh struct rte_event_queue_conf queue_conf; 19950fb749aSPavan Nikhilesh 20050fb749aSPavan Nikhilesh ret = rte_event_queue_default_conf_get(evdev, i, 20150fb749aSPavan Nikhilesh &queue_conf); 202*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d", 203*daeda14cSPavan Nikhilesh i); 20450fb749aSPavan Nikhilesh queue_conf.priority = i * step; 20550fb749aSPavan Nikhilesh ret = rte_event_queue_setup(evdev, i, &queue_conf); 206*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 207*daeda14cSPavan Nikhilesh i); 20850fb749aSPavan Nikhilesh } 20950fb749aSPavan Nikhilesh 21050fb749aSPavan Nikhilesh } else { 21150fb749aSPavan Nikhilesh /* Configure event queues with default priority */ 21250fb749aSPavan Nikhilesh for (i = 0; i < (int)queue_count; i++) { 21350fb749aSPavan Nikhilesh ret = rte_event_queue_setup(evdev, i, NULL); 214*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 215*daeda14cSPavan Nikhilesh i); 21650fb749aSPavan Nikhilesh } 21750fb749aSPavan Nikhilesh } 21850fb749aSPavan Nikhilesh /* Configure event ports */ 21950fb749aSPavan Nikhilesh uint32_t port_count; 220*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 22150fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 22250fb749aSPavan Nikhilesh &port_count), "Port count get failed"); 22350fb749aSPavan Nikhilesh for (i = 0; i < (int)port_count; i++) { 22450fb749aSPavan Nikhilesh ret = rte_event_port_setup(evdev, i, NULL); 225*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i); 22650fb749aSPavan Nikhilesh ret = rte_event_port_link(evdev, i, NULL, NULL, 0); 227*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", 228*daeda14cSPavan Nikhilesh i); 22950fb749aSPavan Nikhilesh } 23050fb749aSPavan Nikhilesh 23150fb749aSPavan Nikhilesh ret = rte_event_dev_start(evdev); 232*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device"); 23350fb749aSPavan Nikhilesh 234*daeda14cSPavan Nikhilesh return 0; 23550fb749aSPavan Nikhilesh } 23650fb749aSPavan Nikhilesh 23750fb749aSPavan Nikhilesh static inline int 23850fb749aSPavan Nikhilesh eventdev_setup(void) 23950fb749aSPavan Nikhilesh { 24050fb749aSPavan Nikhilesh return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT); 24150fb749aSPavan Nikhilesh } 24250fb749aSPavan Nikhilesh 24350fb749aSPavan Nikhilesh static inline int 24450fb749aSPavan Nikhilesh eventdev_setup_priority(void) 24550fb749aSPavan Nikhilesh { 24650fb749aSPavan Nikhilesh return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY); 24750fb749aSPavan Nikhilesh } 24850fb749aSPavan Nikhilesh 24950fb749aSPavan Nikhilesh static inline int 25050fb749aSPavan Nikhilesh eventdev_setup_dequeue_timeout(void) 25150fb749aSPavan Nikhilesh { 25250fb749aSPavan Nikhilesh return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT); 25350fb749aSPavan Nikhilesh } 25450fb749aSPavan Nikhilesh 25550fb749aSPavan Nikhilesh static inline void 25650fb749aSPavan Nikhilesh eventdev_teardown(void) 25750fb749aSPavan Nikhilesh { 25850fb749aSPavan Nikhilesh rte_event_dev_stop(evdev); 25950fb749aSPavan Nikhilesh rte_mempool_free(eventdev_test_mempool); 26050fb749aSPavan Nikhilesh } 26150fb749aSPavan Nikhilesh 26250fb749aSPavan Nikhilesh static inline void 26350fb749aSPavan Nikhilesh update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev, 26450fb749aSPavan Nikhilesh uint32_t flow_id, uint8_t event_type, 26550fb749aSPavan Nikhilesh uint8_t sub_event_type, uint8_t sched_type, 26650fb749aSPavan Nikhilesh uint8_t queue, uint8_t port) 26750fb749aSPavan Nikhilesh { 26850fb749aSPavan Nikhilesh struct event_attr *attr; 26950fb749aSPavan Nikhilesh 27050fb749aSPavan Nikhilesh /* Store the event attributes in mbuf for future reference */ 27150fb749aSPavan Nikhilesh attr = rte_pktmbuf_mtod(m, struct event_attr *); 27250fb749aSPavan Nikhilesh attr->flow_id = flow_id; 27350fb749aSPavan Nikhilesh attr->event_type = event_type; 27450fb749aSPavan Nikhilesh attr->sub_event_type = sub_event_type; 27550fb749aSPavan Nikhilesh attr->sched_type = sched_type; 27650fb749aSPavan Nikhilesh attr->queue = queue; 27750fb749aSPavan Nikhilesh attr->port = port; 27850fb749aSPavan Nikhilesh 27950fb749aSPavan Nikhilesh ev->flow_id = flow_id; 28050fb749aSPavan Nikhilesh ev->sub_event_type = sub_event_type; 28150fb749aSPavan Nikhilesh ev->event_type = event_type; 28250fb749aSPavan Nikhilesh /* Inject the new event */ 28350fb749aSPavan Nikhilesh ev->op = RTE_EVENT_OP_NEW; 28450fb749aSPavan Nikhilesh ev->sched_type = sched_type; 28550fb749aSPavan Nikhilesh ev->queue_id = queue; 28650fb749aSPavan Nikhilesh ev->mbuf = m; 28750fb749aSPavan Nikhilesh } 28850fb749aSPavan Nikhilesh 28950fb749aSPavan Nikhilesh static inline int 29050fb749aSPavan Nikhilesh inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type, 29150fb749aSPavan Nikhilesh uint8_t sched_type, uint8_t queue, uint8_t port, 29250fb749aSPavan Nikhilesh unsigned int events) 29350fb749aSPavan Nikhilesh { 29450fb749aSPavan Nikhilesh struct rte_mbuf *m; 29550fb749aSPavan Nikhilesh unsigned int i; 29650fb749aSPavan Nikhilesh 29750fb749aSPavan Nikhilesh for (i = 0; i < events; i++) { 29850fb749aSPavan Nikhilesh struct rte_event ev = {.event = 0, .u64 = 0}; 29950fb749aSPavan Nikhilesh 30050fb749aSPavan Nikhilesh m = rte_pktmbuf_alloc(eventdev_test_mempool); 301*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed"); 30250fb749aSPavan Nikhilesh 30350fb749aSPavan Nikhilesh m->seqn = i; 30450fb749aSPavan Nikhilesh update_event_and_validation_attr(m, &ev, flow_id, event_type, 30550fb749aSPavan Nikhilesh sub_event_type, sched_type, queue, port); 30650fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 30750fb749aSPavan Nikhilesh } 30850fb749aSPavan Nikhilesh return 0; 30950fb749aSPavan Nikhilesh } 31050fb749aSPavan Nikhilesh 31150fb749aSPavan Nikhilesh static inline int 31250fb749aSPavan Nikhilesh check_excess_events(uint8_t port) 31350fb749aSPavan Nikhilesh { 31450fb749aSPavan Nikhilesh int i; 31550fb749aSPavan Nikhilesh uint16_t valid_event; 31650fb749aSPavan Nikhilesh struct rte_event ev; 31750fb749aSPavan Nikhilesh 31850fb749aSPavan Nikhilesh /* Check for excess events, try for a few times and exit */ 31950fb749aSPavan Nikhilesh for (i = 0; i < 32; i++) { 32050fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 32150fb749aSPavan Nikhilesh 322*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(valid_event, 323*daeda14cSPavan Nikhilesh "Unexpected valid event=%d", ev.mbuf->seqn); 32450fb749aSPavan Nikhilesh } 32550fb749aSPavan Nikhilesh return 0; 32650fb749aSPavan Nikhilesh } 32750fb749aSPavan Nikhilesh 32850fb749aSPavan Nikhilesh static inline int 32950fb749aSPavan Nikhilesh generate_random_events(const unsigned int total_events) 33050fb749aSPavan Nikhilesh { 33150fb749aSPavan Nikhilesh struct rte_event_dev_info info; 33250fb749aSPavan Nikhilesh unsigned int i; 33350fb749aSPavan Nikhilesh int ret; 33450fb749aSPavan Nikhilesh 33550fb749aSPavan Nikhilesh uint32_t queue_count; 336*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 33750fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 33850fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 33950fb749aSPavan Nikhilesh 34050fb749aSPavan Nikhilesh ret = rte_event_dev_info_get(evdev, &info); 341*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); 34250fb749aSPavan Nikhilesh for (i = 0; i < total_events; i++) { 34350fb749aSPavan Nikhilesh ret = inject_events( 34450fb749aSPavan Nikhilesh rte_rand() % info.max_event_queue_flows /*flow_id */, 34550fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 34650fb749aSPavan Nikhilesh rte_rand() % 256 /* sub_event_type */, 34750fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1), 34850fb749aSPavan Nikhilesh rte_rand() % queue_count /* queue */, 34950fb749aSPavan Nikhilesh 0 /* port */, 35050fb749aSPavan Nikhilesh 1 /* events */); 35150fb749aSPavan Nikhilesh if (ret) 352*daeda14cSPavan Nikhilesh return -1; 35350fb749aSPavan Nikhilesh } 35450fb749aSPavan Nikhilesh return ret; 35550fb749aSPavan Nikhilesh } 35650fb749aSPavan Nikhilesh 35750fb749aSPavan Nikhilesh 35850fb749aSPavan Nikhilesh static inline int 35950fb749aSPavan Nikhilesh validate_event(struct rte_event *ev) 36050fb749aSPavan Nikhilesh { 36150fb749aSPavan Nikhilesh struct event_attr *attr; 36250fb749aSPavan Nikhilesh 36350fb749aSPavan Nikhilesh attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *); 364*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id, 36550fb749aSPavan Nikhilesh "flow_id mismatch enq=%d deq =%d", 36650fb749aSPavan Nikhilesh attr->flow_id, ev->flow_id); 367*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type, 36850fb749aSPavan Nikhilesh "event_type mismatch enq=%d deq =%d", 36950fb749aSPavan Nikhilesh attr->event_type, ev->event_type); 370*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type, 37150fb749aSPavan Nikhilesh "sub_event_type mismatch enq=%d deq =%d", 37250fb749aSPavan Nikhilesh attr->sub_event_type, ev->sub_event_type); 373*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type, 37450fb749aSPavan Nikhilesh "sched_type mismatch enq=%d deq =%d", 37550fb749aSPavan Nikhilesh attr->sched_type, ev->sched_type); 376*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id, 37750fb749aSPavan Nikhilesh "queue mismatch enq=%d deq =%d", 37850fb749aSPavan Nikhilesh attr->queue, ev->queue_id); 37950fb749aSPavan Nikhilesh return 0; 38050fb749aSPavan Nikhilesh } 38150fb749aSPavan Nikhilesh 38250fb749aSPavan Nikhilesh typedef int (*validate_event_cb)(uint32_t index, uint8_t port, 38350fb749aSPavan Nikhilesh struct rte_event *ev); 38450fb749aSPavan Nikhilesh 38550fb749aSPavan Nikhilesh static inline int 38650fb749aSPavan Nikhilesh consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn) 38750fb749aSPavan Nikhilesh { 38850fb749aSPavan Nikhilesh int ret; 38950fb749aSPavan Nikhilesh uint16_t valid_event; 39050fb749aSPavan Nikhilesh uint32_t events = 0, forward_progress_cnt = 0, index = 0; 39150fb749aSPavan Nikhilesh struct rte_event ev; 39250fb749aSPavan Nikhilesh 39350fb749aSPavan Nikhilesh while (1) { 39450fb749aSPavan Nikhilesh if (++forward_progress_cnt > UINT16_MAX) { 395*daeda14cSPavan Nikhilesh ssovf_log_dbg("Detected deadlock"); 396*daeda14cSPavan Nikhilesh return -1; 39750fb749aSPavan Nikhilesh } 39850fb749aSPavan Nikhilesh 39950fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 40050fb749aSPavan Nikhilesh if (!valid_event) 40150fb749aSPavan Nikhilesh continue; 40250fb749aSPavan Nikhilesh 40350fb749aSPavan Nikhilesh forward_progress_cnt = 0; 40450fb749aSPavan Nikhilesh ret = validate_event(&ev); 40550fb749aSPavan Nikhilesh if (ret) 406*daeda14cSPavan Nikhilesh return -1; 40750fb749aSPavan Nikhilesh 40850fb749aSPavan Nikhilesh if (fn != NULL) { 40950fb749aSPavan Nikhilesh ret = fn(index, port, &ev); 410*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, 41150fb749aSPavan Nikhilesh "Failed to validate test specific event"); 41250fb749aSPavan Nikhilesh } 41350fb749aSPavan Nikhilesh 41450fb749aSPavan Nikhilesh ++index; 41550fb749aSPavan Nikhilesh 41650fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 41750fb749aSPavan Nikhilesh if (++events >= total_events) 41850fb749aSPavan Nikhilesh break; 41950fb749aSPavan Nikhilesh } 42050fb749aSPavan Nikhilesh 42150fb749aSPavan Nikhilesh return check_excess_events(port); 42250fb749aSPavan Nikhilesh } 42350fb749aSPavan Nikhilesh 42450fb749aSPavan Nikhilesh static int 42550fb749aSPavan Nikhilesh validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev) 42650fb749aSPavan Nikhilesh { 42750fb749aSPavan Nikhilesh RTE_SET_USED(port); 428*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d", 429*daeda14cSPavan Nikhilesh index, ev->mbuf->seqn); 43050fb749aSPavan Nikhilesh return 0; 43150fb749aSPavan Nikhilesh } 43250fb749aSPavan Nikhilesh 43350fb749aSPavan Nikhilesh static inline int 43450fb749aSPavan Nikhilesh test_simple_enqdeq(uint8_t sched_type) 43550fb749aSPavan Nikhilesh { 43650fb749aSPavan Nikhilesh int ret; 43750fb749aSPavan Nikhilesh 43850fb749aSPavan Nikhilesh ret = inject_events(0 /*flow_id */, 43950fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 44050fb749aSPavan Nikhilesh 0 /* sub_event_type */, 44150fb749aSPavan Nikhilesh sched_type, 44250fb749aSPavan Nikhilesh 0 /* queue */, 44350fb749aSPavan Nikhilesh 0 /* port */, 44450fb749aSPavan Nikhilesh MAX_EVENTS); 44550fb749aSPavan Nikhilesh if (ret) 446*daeda14cSPavan Nikhilesh return -1; 44750fb749aSPavan Nikhilesh 44850fb749aSPavan Nikhilesh return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq); 44950fb749aSPavan Nikhilesh } 45050fb749aSPavan Nikhilesh 45150fb749aSPavan Nikhilesh static int 45250fb749aSPavan Nikhilesh test_simple_enqdeq_ordered(void) 45350fb749aSPavan Nikhilesh { 45450fb749aSPavan Nikhilesh return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED); 45550fb749aSPavan Nikhilesh } 45650fb749aSPavan Nikhilesh 45750fb749aSPavan Nikhilesh static int 45850fb749aSPavan Nikhilesh test_simple_enqdeq_atomic(void) 45950fb749aSPavan Nikhilesh { 46050fb749aSPavan Nikhilesh return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC); 46150fb749aSPavan Nikhilesh } 46250fb749aSPavan Nikhilesh 46350fb749aSPavan Nikhilesh static int 46450fb749aSPavan Nikhilesh test_simple_enqdeq_parallel(void) 46550fb749aSPavan Nikhilesh { 46650fb749aSPavan Nikhilesh return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL); 46750fb749aSPavan Nikhilesh } 46850fb749aSPavan Nikhilesh 46950fb749aSPavan Nikhilesh /* 47050fb749aSPavan Nikhilesh * Generate a prescribed number of events and spread them across available 47150fb749aSPavan Nikhilesh * queues. On dequeue, using single event port(port 0) verify the enqueued 47250fb749aSPavan Nikhilesh * event attributes 47350fb749aSPavan Nikhilesh */ 47450fb749aSPavan Nikhilesh static int 47550fb749aSPavan Nikhilesh test_multi_queue_enq_single_port_deq(void) 47650fb749aSPavan Nikhilesh { 47750fb749aSPavan Nikhilesh int ret; 47850fb749aSPavan Nikhilesh 47950fb749aSPavan Nikhilesh ret = generate_random_events(MAX_EVENTS); 48050fb749aSPavan Nikhilesh if (ret) 481*daeda14cSPavan Nikhilesh return -1; 48250fb749aSPavan Nikhilesh 48350fb749aSPavan Nikhilesh return consume_events(0 /* port */, MAX_EVENTS, NULL); 48450fb749aSPavan Nikhilesh } 48550fb749aSPavan Nikhilesh 48650fb749aSPavan Nikhilesh /* 48750fb749aSPavan Nikhilesh * Inject 0..MAX_EVENTS events over 0..queue_count with modulus 48850fb749aSPavan Nikhilesh * operation 48950fb749aSPavan Nikhilesh * 49050fb749aSPavan Nikhilesh * For example, Inject 32 events over 0..7 queues 49150fb749aSPavan Nikhilesh * enqueue events 0, 8, 16, 24 in queue 0 49250fb749aSPavan Nikhilesh * enqueue events 1, 9, 17, 25 in queue 1 49350fb749aSPavan Nikhilesh * .. 49450fb749aSPavan Nikhilesh * .. 49550fb749aSPavan Nikhilesh * enqueue events 7, 15, 23, 31 in queue 7 49650fb749aSPavan Nikhilesh * 49750fb749aSPavan Nikhilesh * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31 49850fb749aSPavan Nikhilesh * order from queue0(highest priority) to queue7(lowest_priority) 49950fb749aSPavan Nikhilesh */ 50050fb749aSPavan Nikhilesh static int 50150fb749aSPavan Nikhilesh validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev) 50250fb749aSPavan Nikhilesh { 50350fb749aSPavan Nikhilesh uint32_t queue_count; 504*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 50550fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 50650fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 50750fb749aSPavan Nikhilesh uint32_t range = MAX_EVENTS / queue_count; 50850fb749aSPavan Nikhilesh uint32_t expected_val = (index % range) * queue_count; 50950fb749aSPavan Nikhilesh 51050fb749aSPavan Nikhilesh expected_val += ev->queue_id; 51150fb749aSPavan Nikhilesh RTE_SET_USED(port); 512*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val, 51350fb749aSPavan Nikhilesh "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d", 51450fb749aSPavan Nikhilesh ev->mbuf->seqn, index, expected_val, range, 51550fb749aSPavan Nikhilesh queue_count, MAX_EVENTS); 51650fb749aSPavan Nikhilesh return 0; 51750fb749aSPavan Nikhilesh } 51850fb749aSPavan Nikhilesh 51950fb749aSPavan Nikhilesh static int 52050fb749aSPavan Nikhilesh test_multi_queue_priority(void) 52150fb749aSPavan Nikhilesh { 52250fb749aSPavan Nikhilesh uint8_t queue; 52350fb749aSPavan Nikhilesh struct rte_mbuf *m; 52450fb749aSPavan Nikhilesh int i, max_evts_roundoff; 52550fb749aSPavan Nikhilesh 52650fb749aSPavan Nikhilesh /* See validate_queue_priority() comments for priority validate logic */ 52750fb749aSPavan Nikhilesh uint32_t queue_count; 528*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 52950fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 53050fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 53150fb749aSPavan Nikhilesh max_evts_roundoff = MAX_EVENTS / queue_count; 53250fb749aSPavan Nikhilesh max_evts_roundoff *= queue_count; 53350fb749aSPavan Nikhilesh 53450fb749aSPavan Nikhilesh for (i = 0; i < max_evts_roundoff; i++) { 53550fb749aSPavan Nikhilesh struct rte_event ev = {.event = 0, .u64 = 0}; 53650fb749aSPavan Nikhilesh 53750fb749aSPavan Nikhilesh m = rte_pktmbuf_alloc(eventdev_test_mempool); 538*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed"); 53950fb749aSPavan Nikhilesh 54050fb749aSPavan Nikhilesh m->seqn = i; 54150fb749aSPavan Nikhilesh queue = i % queue_count; 54250fb749aSPavan Nikhilesh update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU, 54350fb749aSPavan Nikhilesh 0, RTE_SCHED_TYPE_PARALLEL, queue, 0); 54450fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, 0, &ev, 1); 54550fb749aSPavan Nikhilesh } 54650fb749aSPavan Nikhilesh 54750fb749aSPavan Nikhilesh return consume_events(0, max_evts_roundoff, validate_queue_priority); 54850fb749aSPavan Nikhilesh } 54950fb749aSPavan Nikhilesh 55050fb749aSPavan Nikhilesh static int 55150fb749aSPavan Nikhilesh worker_multi_port_fn(void *arg) 55250fb749aSPavan Nikhilesh { 55350fb749aSPavan Nikhilesh struct test_core_param *param = arg; 55450fb749aSPavan Nikhilesh struct rte_event ev; 55550fb749aSPavan Nikhilesh uint16_t valid_event; 55650fb749aSPavan Nikhilesh uint8_t port = param->port; 55750fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 55850fb749aSPavan Nikhilesh int ret; 55950fb749aSPavan Nikhilesh 56050fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 56150fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 56250fb749aSPavan Nikhilesh if (!valid_event) 56350fb749aSPavan Nikhilesh continue; 56450fb749aSPavan Nikhilesh 56550fb749aSPavan Nikhilesh ret = validate_event(&ev); 566*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event"); 56750fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 56850fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 56950fb749aSPavan Nikhilesh } 57050fb749aSPavan Nikhilesh return 0; 57150fb749aSPavan Nikhilesh } 57250fb749aSPavan Nikhilesh 57350fb749aSPavan Nikhilesh static inline int 57450fb749aSPavan Nikhilesh wait_workers_to_join(int lcore, const rte_atomic32_t *count) 57550fb749aSPavan Nikhilesh { 57650fb749aSPavan Nikhilesh uint64_t cycles, print_cycles; 577*daeda14cSPavan Nikhilesh RTE_SET_USED(count); 57850fb749aSPavan Nikhilesh 57950fb749aSPavan Nikhilesh print_cycles = cycles = rte_get_timer_cycles(); 58050fb749aSPavan Nikhilesh while (rte_eal_get_lcore_state(lcore) != FINISHED) { 58150fb749aSPavan Nikhilesh uint64_t new_cycles = rte_get_timer_cycles(); 58250fb749aSPavan Nikhilesh 58350fb749aSPavan Nikhilesh if (new_cycles - print_cycles > rte_get_timer_hz()) { 584*daeda14cSPavan Nikhilesh ssovf_log_dbg("\r%s: events %d", __func__, 58550fb749aSPavan Nikhilesh rte_atomic32_read(count)); 58650fb749aSPavan Nikhilesh print_cycles = new_cycles; 58750fb749aSPavan Nikhilesh } 58850fb749aSPavan Nikhilesh if (new_cycles - cycles > rte_get_timer_hz() * 10) { 589*daeda14cSPavan Nikhilesh ssovf_log_dbg( 590*daeda14cSPavan Nikhilesh "%s: No schedules for seconds, deadlock (%d)", 59150fb749aSPavan Nikhilesh __func__, 59250fb749aSPavan Nikhilesh rte_atomic32_read(count)); 59350fb749aSPavan Nikhilesh rte_event_dev_dump(evdev, stdout); 59450fb749aSPavan Nikhilesh cycles = new_cycles; 595*daeda14cSPavan Nikhilesh return -1; 59650fb749aSPavan Nikhilesh } 59750fb749aSPavan Nikhilesh } 59850fb749aSPavan Nikhilesh rte_eal_mp_wait_lcore(); 599*daeda14cSPavan Nikhilesh return 0; 60050fb749aSPavan Nikhilesh } 60150fb749aSPavan Nikhilesh 60250fb749aSPavan Nikhilesh 60350fb749aSPavan Nikhilesh static inline int 60450fb749aSPavan Nikhilesh launch_workers_and_wait(int (*master_worker)(void *), 60550fb749aSPavan Nikhilesh int (*slave_workers)(void *), uint32_t total_events, 60650fb749aSPavan Nikhilesh uint8_t nb_workers, uint8_t sched_type) 60750fb749aSPavan Nikhilesh { 60850fb749aSPavan Nikhilesh uint8_t port = 0; 60950fb749aSPavan Nikhilesh int w_lcore; 61050fb749aSPavan Nikhilesh int ret; 61150fb749aSPavan Nikhilesh struct test_core_param *param; 61250fb749aSPavan Nikhilesh rte_atomic32_t atomic_total_events; 61350fb749aSPavan Nikhilesh uint64_t dequeue_tmo_ticks; 61450fb749aSPavan Nikhilesh 61550fb749aSPavan Nikhilesh if (!nb_workers) 61650fb749aSPavan Nikhilesh return 0; 61750fb749aSPavan Nikhilesh 61850fb749aSPavan Nikhilesh rte_atomic32_set(&atomic_total_events, total_events); 61950fb749aSPavan Nikhilesh seqn_list_init(); 62050fb749aSPavan Nikhilesh 62150fb749aSPavan Nikhilesh param = malloc(sizeof(struct test_core_param) * nb_workers); 62250fb749aSPavan Nikhilesh if (!param) 623*daeda14cSPavan Nikhilesh return -1; 62450fb749aSPavan Nikhilesh 62550fb749aSPavan Nikhilesh ret = rte_event_dequeue_timeout_ticks(evdev, 62650fb749aSPavan Nikhilesh rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks); 62750fb749aSPavan Nikhilesh if (ret) 628*daeda14cSPavan Nikhilesh return -1; 62950fb749aSPavan Nikhilesh 63050fb749aSPavan Nikhilesh param[0].total_events = &atomic_total_events; 63150fb749aSPavan Nikhilesh param[0].sched_type = sched_type; 63250fb749aSPavan Nikhilesh param[0].port = 0; 63350fb749aSPavan Nikhilesh param[0].dequeue_tmo_ticks = dequeue_tmo_ticks; 63450fb749aSPavan Nikhilesh rte_smp_wmb(); 63550fb749aSPavan Nikhilesh 63650fb749aSPavan Nikhilesh w_lcore = rte_get_next_lcore( 63750fb749aSPavan Nikhilesh /* start core */ -1, 63850fb749aSPavan Nikhilesh /* skip master */ 1, 63950fb749aSPavan Nikhilesh /* wrap */ 0); 64050fb749aSPavan Nikhilesh rte_eal_remote_launch(master_worker, ¶m[0], w_lcore); 64150fb749aSPavan Nikhilesh 64250fb749aSPavan Nikhilesh for (port = 1; port < nb_workers; port++) { 64350fb749aSPavan Nikhilesh param[port].total_events = &atomic_total_events; 64450fb749aSPavan Nikhilesh param[port].sched_type = sched_type; 64550fb749aSPavan Nikhilesh param[port].port = port; 64650fb749aSPavan Nikhilesh param[port].dequeue_tmo_ticks = dequeue_tmo_ticks; 64750fb749aSPavan Nikhilesh rte_smp_wmb(); 64850fb749aSPavan Nikhilesh w_lcore = rte_get_next_lcore(w_lcore, 1, 0); 64950fb749aSPavan Nikhilesh rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore); 65050fb749aSPavan Nikhilesh } 65150fb749aSPavan Nikhilesh 65250fb749aSPavan Nikhilesh ret = wait_workers_to_join(w_lcore, &atomic_total_events); 65350fb749aSPavan Nikhilesh free(param); 65450fb749aSPavan Nikhilesh return ret; 65550fb749aSPavan Nikhilesh } 65650fb749aSPavan Nikhilesh 65750fb749aSPavan Nikhilesh /* 65850fb749aSPavan Nikhilesh * Generate a prescribed number of events and spread them across available 65950fb749aSPavan Nikhilesh * queues. Dequeue the events through multiple ports and verify the enqueued 66050fb749aSPavan Nikhilesh * event attributes 66150fb749aSPavan Nikhilesh */ 66250fb749aSPavan Nikhilesh static int 66350fb749aSPavan Nikhilesh test_multi_queue_enq_multi_port_deq(void) 66450fb749aSPavan Nikhilesh { 66550fb749aSPavan Nikhilesh const unsigned int total_events = MAX_EVENTS; 66650fb749aSPavan Nikhilesh uint32_t nr_ports; 66750fb749aSPavan Nikhilesh int ret; 66850fb749aSPavan Nikhilesh 66950fb749aSPavan Nikhilesh ret = generate_random_events(total_events); 67050fb749aSPavan Nikhilesh if (ret) 671*daeda14cSPavan Nikhilesh return -1; 67250fb749aSPavan Nikhilesh 673*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 67450fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 67550fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 67650fb749aSPavan Nikhilesh nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); 67750fb749aSPavan Nikhilesh 67850fb749aSPavan Nikhilesh if (!nr_ports) { 679*daeda14cSPavan Nikhilesh ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__, 68050fb749aSPavan Nikhilesh nr_ports, rte_lcore_count() - 1); 681*daeda14cSPavan Nikhilesh return 0; 68250fb749aSPavan Nikhilesh } 68350fb749aSPavan Nikhilesh 68450fb749aSPavan Nikhilesh return launch_workers_and_wait(worker_multi_port_fn, 68550fb749aSPavan Nikhilesh worker_multi_port_fn, total_events, 68650fb749aSPavan Nikhilesh nr_ports, 0xff /* invalid */); 68750fb749aSPavan Nikhilesh } 68850fb749aSPavan Nikhilesh 68950fb749aSPavan Nikhilesh static int 69050fb749aSPavan Nikhilesh validate_queue_to_port_single_link(uint32_t index, uint8_t port, 69150fb749aSPavan Nikhilesh struct rte_event *ev) 69250fb749aSPavan Nikhilesh { 69350fb749aSPavan Nikhilesh RTE_SET_USED(index); 694*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(port, ev->queue_id, 69550fb749aSPavan Nikhilesh "queue mismatch enq=%d deq =%d", 69650fb749aSPavan Nikhilesh port, ev->queue_id); 69750fb749aSPavan Nikhilesh return 0; 69850fb749aSPavan Nikhilesh } 69950fb749aSPavan Nikhilesh 70050fb749aSPavan Nikhilesh /* 70150fb749aSPavan Nikhilesh * Link queue x to port x and check correctness of link by checking 70250fb749aSPavan Nikhilesh * queue_id == x on dequeue on the specific port x 70350fb749aSPavan Nikhilesh */ 70450fb749aSPavan Nikhilesh static int 70550fb749aSPavan Nikhilesh test_queue_to_port_single_link(void) 70650fb749aSPavan Nikhilesh { 70750fb749aSPavan Nikhilesh int i, nr_links, ret; 70850fb749aSPavan Nikhilesh 70950fb749aSPavan Nikhilesh uint32_t port_count; 710*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 71150fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 71250fb749aSPavan Nikhilesh &port_count), "Port count get failed"); 71350fb749aSPavan Nikhilesh 71450fb749aSPavan Nikhilesh /* Unlink all connections that created in eventdev_setup */ 71550fb749aSPavan Nikhilesh for (i = 0; i < (int)port_count; i++) { 71650fb749aSPavan Nikhilesh ret = rte_event_port_unlink(evdev, i, NULL, 0); 717*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT(ret >= 0, 718*daeda14cSPavan Nikhilesh "Failed to unlink all queues port=%d", i); 71950fb749aSPavan Nikhilesh } 72050fb749aSPavan Nikhilesh 72150fb749aSPavan Nikhilesh uint32_t queue_count; 722*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 72350fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 72450fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 72550fb749aSPavan Nikhilesh 72650fb749aSPavan Nikhilesh nr_links = RTE_MIN(port_count, queue_count); 72750fb749aSPavan Nikhilesh const unsigned int total_events = MAX_EVENTS / nr_links; 72850fb749aSPavan Nikhilesh 72950fb749aSPavan Nikhilesh /* Link queue x to port x and inject events to queue x through port x */ 73050fb749aSPavan Nikhilesh for (i = 0; i < nr_links; i++) { 73150fb749aSPavan Nikhilesh uint8_t queue = (uint8_t)i; 73250fb749aSPavan Nikhilesh 73350fb749aSPavan Nikhilesh ret = rte_event_port_link(evdev, i, &queue, NULL, 1); 734*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i); 73550fb749aSPavan Nikhilesh 73650fb749aSPavan Nikhilesh ret = inject_events( 73750fb749aSPavan Nikhilesh 0x100 /*flow_id */, 73850fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 73950fb749aSPavan Nikhilesh rte_rand() % 256 /* sub_event_type */, 74050fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1), 74150fb749aSPavan Nikhilesh queue /* queue */, 74250fb749aSPavan Nikhilesh i /* port */, 74350fb749aSPavan Nikhilesh total_events /* events */); 74450fb749aSPavan Nikhilesh if (ret) 745*daeda14cSPavan Nikhilesh return -1; 74650fb749aSPavan Nikhilesh } 74750fb749aSPavan Nikhilesh 74850fb749aSPavan Nikhilesh /* Verify the events generated from correct queue */ 74950fb749aSPavan Nikhilesh for (i = 0; i < nr_links; i++) { 75050fb749aSPavan Nikhilesh ret = consume_events(i /* port */, total_events, 75150fb749aSPavan Nikhilesh validate_queue_to_port_single_link); 75250fb749aSPavan Nikhilesh if (ret) 753*daeda14cSPavan Nikhilesh return -1; 75450fb749aSPavan Nikhilesh } 75550fb749aSPavan Nikhilesh 756*daeda14cSPavan Nikhilesh return 0; 75750fb749aSPavan Nikhilesh } 75850fb749aSPavan Nikhilesh 75950fb749aSPavan Nikhilesh static int 76050fb749aSPavan Nikhilesh validate_queue_to_port_multi_link(uint32_t index, uint8_t port, 76150fb749aSPavan Nikhilesh struct rte_event *ev) 76250fb749aSPavan Nikhilesh { 76350fb749aSPavan Nikhilesh RTE_SET_USED(index); 764*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1), 76550fb749aSPavan Nikhilesh "queue mismatch enq=%d deq =%d", 76650fb749aSPavan Nikhilesh port, ev->queue_id); 76750fb749aSPavan Nikhilesh return 0; 76850fb749aSPavan Nikhilesh } 76950fb749aSPavan Nikhilesh 77050fb749aSPavan Nikhilesh /* 77150fb749aSPavan Nikhilesh * Link all even number of queues to port 0 and all odd number of queues to 77250fb749aSPavan Nikhilesh * port 1 and verify the link connection on dequeue 77350fb749aSPavan Nikhilesh */ 77450fb749aSPavan Nikhilesh static int 77550fb749aSPavan Nikhilesh test_queue_to_port_multi_link(void) 77650fb749aSPavan Nikhilesh { 77750fb749aSPavan Nikhilesh int ret, port0_events = 0, port1_events = 0; 77850fb749aSPavan Nikhilesh uint8_t queue, port; 77950fb749aSPavan Nikhilesh uint32_t nr_queues = 0; 78050fb749aSPavan Nikhilesh uint32_t nr_ports = 0; 78150fb749aSPavan Nikhilesh 782*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 78350fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 78450fb749aSPavan Nikhilesh &nr_queues), "Queue count get failed"); 78550fb749aSPavan Nikhilesh 786*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 78750fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 78850fb749aSPavan Nikhilesh &nr_queues), "Queue count get failed"); 789*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 79050fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 79150fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 79250fb749aSPavan Nikhilesh 79350fb749aSPavan Nikhilesh if (nr_ports < 2) { 794*daeda14cSPavan Nikhilesh ssovf_log_dbg("%s: Not enough ports to test ports=%d", 79550fb749aSPavan Nikhilesh __func__, nr_ports); 796*daeda14cSPavan Nikhilesh return 0; 79750fb749aSPavan Nikhilesh } 79850fb749aSPavan Nikhilesh 79950fb749aSPavan Nikhilesh /* Unlink all connections that created in eventdev_setup */ 80050fb749aSPavan Nikhilesh for (port = 0; port < nr_ports; port++) { 80150fb749aSPavan Nikhilesh ret = rte_event_port_unlink(evdev, port, NULL, 0); 802*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d", 80350fb749aSPavan Nikhilesh port); 80450fb749aSPavan Nikhilesh } 80550fb749aSPavan Nikhilesh 80650fb749aSPavan Nikhilesh const unsigned int total_events = MAX_EVENTS / nr_queues; 80750fb749aSPavan Nikhilesh 80850fb749aSPavan Nikhilesh /* Link all even number of queues to port0 and odd numbers to port 1*/ 80950fb749aSPavan Nikhilesh for (queue = 0; queue < nr_queues; queue++) { 81050fb749aSPavan Nikhilesh port = queue & 0x1; 81150fb749aSPavan Nikhilesh ret = rte_event_port_link(evdev, port, &queue, NULL, 1); 812*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d", 81350fb749aSPavan Nikhilesh queue, port); 81450fb749aSPavan Nikhilesh 81550fb749aSPavan Nikhilesh ret = inject_events( 81650fb749aSPavan Nikhilesh 0x100 /*flow_id */, 81750fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 81850fb749aSPavan Nikhilesh rte_rand() % 256 /* sub_event_type */, 81950fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1), 82050fb749aSPavan Nikhilesh queue /* queue */, 82150fb749aSPavan Nikhilesh port /* port */, 82250fb749aSPavan Nikhilesh total_events /* events */); 82350fb749aSPavan Nikhilesh if (ret) 824*daeda14cSPavan Nikhilesh return -1; 82550fb749aSPavan Nikhilesh 82650fb749aSPavan Nikhilesh if (port == 0) 82750fb749aSPavan Nikhilesh port0_events += total_events; 82850fb749aSPavan Nikhilesh else 82950fb749aSPavan Nikhilesh port1_events += total_events; 83050fb749aSPavan Nikhilesh } 83150fb749aSPavan Nikhilesh 83250fb749aSPavan Nikhilesh ret = consume_events(0 /* port */, port0_events, 83350fb749aSPavan Nikhilesh validate_queue_to_port_multi_link); 83450fb749aSPavan Nikhilesh if (ret) 835*daeda14cSPavan Nikhilesh return -1; 83650fb749aSPavan Nikhilesh ret = consume_events(1 /* port */, port1_events, 83750fb749aSPavan Nikhilesh validate_queue_to_port_multi_link); 83850fb749aSPavan Nikhilesh if (ret) 839*daeda14cSPavan Nikhilesh return -1; 84050fb749aSPavan Nikhilesh 841*daeda14cSPavan Nikhilesh return 0; 84250fb749aSPavan Nikhilesh } 84350fb749aSPavan Nikhilesh 84450fb749aSPavan Nikhilesh static int 84550fb749aSPavan Nikhilesh worker_flow_based_pipeline(void *arg) 84650fb749aSPavan Nikhilesh { 84750fb749aSPavan Nikhilesh struct test_core_param *param = arg; 84850fb749aSPavan Nikhilesh struct rte_event ev; 84950fb749aSPavan Nikhilesh uint16_t valid_event; 85050fb749aSPavan Nikhilesh uint8_t port = param->port; 85150fb749aSPavan Nikhilesh uint8_t new_sched_type = param->sched_type; 85250fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 85350fb749aSPavan Nikhilesh uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks; 85450fb749aSPavan Nikhilesh 85550fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 85650fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 85750fb749aSPavan Nikhilesh dequeue_tmo_ticks); 85850fb749aSPavan Nikhilesh if (!valid_event) 85950fb749aSPavan Nikhilesh continue; 86050fb749aSPavan Nikhilesh 86150fb749aSPavan Nikhilesh /* Events from stage 0 */ 86250fb749aSPavan Nikhilesh if (ev.sub_event_type == 0) { 86350fb749aSPavan Nikhilesh /* Move to atomic flow to maintain the ordering */ 86450fb749aSPavan Nikhilesh ev.flow_id = 0x2; 86550fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 86650fb749aSPavan Nikhilesh ev.sub_event_type = 1; /* stage 1 */ 86750fb749aSPavan Nikhilesh ev.sched_type = new_sched_type; 86850fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_FORWARD; 86950fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 87050fb749aSPavan Nikhilesh } else if (ev.sub_event_type == 1) { /* Events from stage 1*/ 871*daeda14cSPavan Nikhilesh if (seqn_list_update(ev.mbuf->seqn) == 0) { 87250fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 87350fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 87450fb749aSPavan Nikhilesh } else { 875*daeda14cSPavan Nikhilesh ssovf_log_dbg("Failed to update seqn_list"); 876*daeda14cSPavan Nikhilesh return -1; 87750fb749aSPavan Nikhilesh } 87850fb749aSPavan Nikhilesh } else { 879*daeda14cSPavan Nikhilesh ssovf_log_dbg("Invalid ev.sub_event_type = %d", 88050fb749aSPavan Nikhilesh ev.sub_event_type); 881*daeda14cSPavan Nikhilesh return -1; 88250fb749aSPavan Nikhilesh } 88350fb749aSPavan Nikhilesh } 88450fb749aSPavan Nikhilesh return 0; 88550fb749aSPavan Nikhilesh } 88650fb749aSPavan Nikhilesh 88750fb749aSPavan Nikhilesh static int 88850fb749aSPavan Nikhilesh test_multiport_flow_sched_type_test(uint8_t in_sched_type, 88950fb749aSPavan Nikhilesh uint8_t out_sched_type) 89050fb749aSPavan Nikhilesh { 89150fb749aSPavan Nikhilesh const unsigned int total_events = MAX_EVENTS; 89250fb749aSPavan Nikhilesh uint32_t nr_ports; 89350fb749aSPavan Nikhilesh int ret; 89450fb749aSPavan Nikhilesh 895*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 89650fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 89750fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 89850fb749aSPavan Nikhilesh nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); 89950fb749aSPavan Nikhilesh 90050fb749aSPavan Nikhilesh if (!nr_ports) { 901*daeda14cSPavan Nikhilesh ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__, 90250fb749aSPavan Nikhilesh nr_ports, rte_lcore_count() - 1); 903*daeda14cSPavan Nikhilesh return 0; 90450fb749aSPavan Nikhilesh } 90550fb749aSPavan Nikhilesh 90650fb749aSPavan Nikhilesh /* Injects events with m->seqn=0 to total_events */ 90750fb749aSPavan Nikhilesh ret = inject_events( 90850fb749aSPavan Nikhilesh 0x1 /*flow_id */, 90950fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 91050fb749aSPavan Nikhilesh 0 /* sub_event_type (stage 0) */, 91150fb749aSPavan Nikhilesh in_sched_type, 91250fb749aSPavan Nikhilesh 0 /* queue */, 91350fb749aSPavan Nikhilesh 0 /* port */, 91450fb749aSPavan Nikhilesh total_events /* events */); 91550fb749aSPavan Nikhilesh if (ret) 916*daeda14cSPavan Nikhilesh return -1; 91750fb749aSPavan Nikhilesh 91850fb749aSPavan Nikhilesh ret = launch_workers_and_wait(worker_flow_based_pipeline, 91950fb749aSPavan Nikhilesh worker_flow_based_pipeline, 92050fb749aSPavan Nikhilesh total_events, nr_ports, out_sched_type); 92150fb749aSPavan Nikhilesh if (ret) 922*daeda14cSPavan Nikhilesh return -1; 92350fb749aSPavan Nikhilesh 92450fb749aSPavan Nikhilesh if (in_sched_type != RTE_SCHED_TYPE_PARALLEL && 92550fb749aSPavan Nikhilesh out_sched_type == RTE_SCHED_TYPE_ATOMIC) { 92650fb749aSPavan Nikhilesh /* Check the events order maintained or not */ 92750fb749aSPavan Nikhilesh return seqn_list_check(total_events); 92850fb749aSPavan Nikhilesh } 929*daeda14cSPavan Nikhilesh return 0; 93050fb749aSPavan Nikhilesh } 93150fb749aSPavan Nikhilesh 93250fb749aSPavan Nikhilesh 93350fb749aSPavan Nikhilesh /* Multi port ordered to atomic transaction */ 93450fb749aSPavan Nikhilesh static int 93550fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_atomic(void) 93650fb749aSPavan Nikhilesh { 93750fb749aSPavan Nikhilesh /* Ingress event order test */ 93850fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED, 93950fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 94050fb749aSPavan Nikhilesh } 94150fb749aSPavan Nikhilesh 94250fb749aSPavan Nikhilesh static int 94350fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_ordered(void) 94450fb749aSPavan Nikhilesh { 94550fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED, 94650fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 94750fb749aSPavan Nikhilesh } 94850fb749aSPavan Nikhilesh 94950fb749aSPavan Nikhilesh static int 95050fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_parallel(void) 95150fb749aSPavan Nikhilesh { 95250fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED, 95350fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 95450fb749aSPavan Nikhilesh } 95550fb749aSPavan Nikhilesh 95650fb749aSPavan Nikhilesh static int 95750fb749aSPavan Nikhilesh test_multi_port_flow_atomic_to_atomic(void) 95850fb749aSPavan Nikhilesh { 95950fb749aSPavan Nikhilesh /* Ingress event order test */ 96050fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 96150fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 96250fb749aSPavan Nikhilesh } 96350fb749aSPavan Nikhilesh 96450fb749aSPavan Nikhilesh static int 96550fb749aSPavan Nikhilesh test_multi_port_flow_atomic_to_ordered(void) 96650fb749aSPavan Nikhilesh { 96750fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 96850fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 96950fb749aSPavan Nikhilesh } 97050fb749aSPavan Nikhilesh 97150fb749aSPavan Nikhilesh static int 97250fb749aSPavan Nikhilesh test_multi_port_flow_atomic_to_parallel(void) 97350fb749aSPavan Nikhilesh { 97450fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 97550fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 97650fb749aSPavan Nikhilesh } 97750fb749aSPavan Nikhilesh 97850fb749aSPavan Nikhilesh static int 97950fb749aSPavan Nikhilesh test_multi_port_flow_parallel_to_atomic(void) 98050fb749aSPavan Nikhilesh { 98150fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 98250fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 98350fb749aSPavan Nikhilesh } 98450fb749aSPavan Nikhilesh 98550fb749aSPavan Nikhilesh static int 98650fb749aSPavan Nikhilesh test_multi_port_flow_parallel_to_ordered(void) 98750fb749aSPavan Nikhilesh { 98850fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 98950fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 99050fb749aSPavan Nikhilesh } 99150fb749aSPavan Nikhilesh 99250fb749aSPavan Nikhilesh static int 99350fb749aSPavan Nikhilesh test_multi_port_flow_parallel_to_parallel(void) 99450fb749aSPavan Nikhilesh { 99550fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 99650fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 99750fb749aSPavan Nikhilesh } 99850fb749aSPavan Nikhilesh 99950fb749aSPavan Nikhilesh static int 100050fb749aSPavan Nikhilesh worker_group_based_pipeline(void *arg) 100150fb749aSPavan Nikhilesh { 100250fb749aSPavan Nikhilesh struct test_core_param *param = arg; 100350fb749aSPavan Nikhilesh struct rte_event ev; 100450fb749aSPavan Nikhilesh uint16_t valid_event; 100550fb749aSPavan Nikhilesh uint8_t port = param->port; 100650fb749aSPavan Nikhilesh uint8_t new_sched_type = param->sched_type; 100750fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 100850fb749aSPavan Nikhilesh uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks; 100950fb749aSPavan Nikhilesh 101050fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 101150fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 101250fb749aSPavan Nikhilesh dequeue_tmo_ticks); 101350fb749aSPavan Nikhilesh if (!valid_event) 101450fb749aSPavan Nikhilesh continue; 101550fb749aSPavan Nikhilesh 101650fb749aSPavan Nikhilesh /* Events from stage 0(group 0) */ 101750fb749aSPavan Nikhilesh if (ev.queue_id == 0) { 101850fb749aSPavan Nikhilesh /* Move to atomic flow to maintain the ordering */ 101950fb749aSPavan Nikhilesh ev.flow_id = 0x2; 102050fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 102150fb749aSPavan Nikhilesh ev.sched_type = new_sched_type; 102250fb749aSPavan Nikhilesh ev.queue_id = 1; /* Stage 1*/ 102350fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_FORWARD; 102450fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 102550fb749aSPavan Nikhilesh } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/ 1026*daeda14cSPavan Nikhilesh if (seqn_list_update(ev.mbuf->seqn) == 0) { 102750fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 102850fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 102950fb749aSPavan Nikhilesh } else { 1030*daeda14cSPavan Nikhilesh ssovf_log_dbg("Failed to update seqn_list"); 1031*daeda14cSPavan Nikhilesh return -1; 103250fb749aSPavan Nikhilesh } 103350fb749aSPavan Nikhilesh } else { 1034*daeda14cSPavan Nikhilesh ssovf_log_dbg("Invalid ev.queue_id = %d", ev.queue_id); 1035*daeda14cSPavan Nikhilesh return -1; 103650fb749aSPavan Nikhilesh } 103750fb749aSPavan Nikhilesh } 103850fb749aSPavan Nikhilesh 103950fb749aSPavan Nikhilesh 104050fb749aSPavan Nikhilesh return 0; 104150fb749aSPavan Nikhilesh } 104250fb749aSPavan Nikhilesh 104350fb749aSPavan Nikhilesh static int 104450fb749aSPavan Nikhilesh test_multiport_queue_sched_type_test(uint8_t in_sched_type, 104550fb749aSPavan Nikhilesh uint8_t out_sched_type) 104650fb749aSPavan Nikhilesh { 104750fb749aSPavan Nikhilesh const unsigned int total_events = MAX_EVENTS; 104850fb749aSPavan Nikhilesh uint32_t nr_ports; 104950fb749aSPavan Nikhilesh int ret; 105050fb749aSPavan Nikhilesh 1051*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 105250fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 105350fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 105450fb749aSPavan Nikhilesh 105550fb749aSPavan Nikhilesh nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); 105650fb749aSPavan Nikhilesh 105750fb749aSPavan Nikhilesh uint32_t queue_count; 1058*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 105950fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 106050fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 106150fb749aSPavan Nikhilesh if (queue_count < 2 || !nr_ports) { 1062*daeda14cSPavan Nikhilesh ssovf_log_dbg("%s: Not enough queues=%d ports=%d or workers=%d", 106350fb749aSPavan Nikhilesh __func__, queue_count, nr_ports, 106450fb749aSPavan Nikhilesh rte_lcore_count() - 1); 1065*daeda14cSPavan Nikhilesh return 0; 106650fb749aSPavan Nikhilesh } 106750fb749aSPavan Nikhilesh 106850fb749aSPavan Nikhilesh /* Injects events with m->seqn=0 to total_events */ 106950fb749aSPavan Nikhilesh ret = inject_events( 107050fb749aSPavan Nikhilesh 0x1 /*flow_id */, 107150fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 107250fb749aSPavan Nikhilesh 0 /* sub_event_type (stage 0) */, 107350fb749aSPavan Nikhilesh in_sched_type, 107450fb749aSPavan Nikhilesh 0 /* queue */, 107550fb749aSPavan Nikhilesh 0 /* port */, 107650fb749aSPavan Nikhilesh total_events /* events */); 107750fb749aSPavan Nikhilesh if (ret) 1078*daeda14cSPavan Nikhilesh return -1; 107950fb749aSPavan Nikhilesh 108050fb749aSPavan Nikhilesh ret = launch_workers_and_wait(worker_group_based_pipeline, 108150fb749aSPavan Nikhilesh worker_group_based_pipeline, 108250fb749aSPavan Nikhilesh total_events, nr_ports, out_sched_type); 108350fb749aSPavan Nikhilesh if (ret) 1084*daeda14cSPavan Nikhilesh return -1; 108550fb749aSPavan Nikhilesh 108650fb749aSPavan Nikhilesh if (in_sched_type != RTE_SCHED_TYPE_PARALLEL && 108750fb749aSPavan Nikhilesh out_sched_type == RTE_SCHED_TYPE_ATOMIC) { 108850fb749aSPavan Nikhilesh /* Check the events order maintained or not */ 108950fb749aSPavan Nikhilesh return seqn_list_check(total_events); 109050fb749aSPavan Nikhilesh } 1091*daeda14cSPavan Nikhilesh return 0; 109250fb749aSPavan Nikhilesh } 109350fb749aSPavan Nikhilesh 109450fb749aSPavan Nikhilesh static int 109550fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_atomic(void) 109650fb749aSPavan Nikhilesh { 109750fb749aSPavan Nikhilesh /* Ingress event order test */ 109850fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED, 109950fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 110050fb749aSPavan Nikhilesh } 110150fb749aSPavan Nikhilesh 110250fb749aSPavan Nikhilesh static int 110350fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_ordered(void) 110450fb749aSPavan Nikhilesh { 110550fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED, 110650fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 110750fb749aSPavan Nikhilesh } 110850fb749aSPavan Nikhilesh 110950fb749aSPavan Nikhilesh static int 111050fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_parallel(void) 111150fb749aSPavan Nikhilesh { 111250fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED, 111350fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 111450fb749aSPavan Nikhilesh } 111550fb749aSPavan Nikhilesh 111650fb749aSPavan Nikhilesh static int 111750fb749aSPavan Nikhilesh test_multi_port_queue_atomic_to_atomic(void) 111850fb749aSPavan Nikhilesh { 111950fb749aSPavan Nikhilesh /* Ingress event order test */ 112050fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 112150fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 112250fb749aSPavan Nikhilesh } 112350fb749aSPavan Nikhilesh 112450fb749aSPavan Nikhilesh static int 112550fb749aSPavan Nikhilesh test_multi_port_queue_atomic_to_ordered(void) 112650fb749aSPavan Nikhilesh { 112750fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 112850fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 112950fb749aSPavan Nikhilesh } 113050fb749aSPavan Nikhilesh 113150fb749aSPavan Nikhilesh static int 113250fb749aSPavan Nikhilesh test_multi_port_queue_atomic_to_parallel(void) 113350fb749aSPavan Nikhilesh { 113450fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 113550fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 113650fb749aSPavan Nikhilesh } 113750fb749aSPavan Nikhilesh 113850fb749aSPavan Nikhilesh static int 113950fb749aSPavan Nikhilesh test_multi_port_queue_parallel_to_atomic(void) 114050fb749aSPavan Nikhilesh { 114150fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 114250fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 114350fb749aSPavan Nikhilesh } 114450fb749aSPavan Nikhilesh 114550fb749aSPavan Nikhilesh static int 114650fb749aSPavan Nikhilesh test_multi_port_queue_parallel_to_ordered(void) 114750fb749aSPavan Nikhilesh { 114850fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 114950fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 115050fb749aSPavan Nikhilesh } 115150fb749aSPavan Nikhilesh 115250fb749aSPavan Nikhilesh static int 115350fb749aSPavan Nikhilesh test_multi_port_queue_parallel_to_parallel(void) 115450fb749aSPavan Nikhilesh { 115550fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 115650fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 115750fb749aSPavan Nikhilesh } 115850fb749aSPavan Nikhilesh 115950fb749aSPavan Nikhilesh static int 116050fb749aSPavan Nikhilesh worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg) 116150fb749aSPavan Nikhilesh { 116250fb749aSPavan Nikhilesh struct test_core_param *param = arg; 116350fb749aSPavan Nikhilesh struct rte_event ev; 116450fb749aSPavan Nikhilesh uint16_t valid_event; 116550fb749aSPavan Nikhilesh uint8_t port = param->port; 116650fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 116750fb749aSPavan Nikhilesh 116850fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 116950fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 117050fb749aSPavan Nikhilesh if (!valid_event) 117150fb749aSPavan Nikhilesh continue; 117250fb749aSPavan Nikhilesh 117350fb749aSPavan Nikhilesh if (ev.sub_event_type == 255) { /* last stage */ 117450fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 117550fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 117650fb749aSPavan Nikhilesh } else { 117750fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 117850fb749aSPavan Nikhilesh ev.sub_event_type++; 117950fb749aSPavan Nikhilesh ev.sched_type = 118050fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1); 118150fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_FORWARD; 118250fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 118350fb749aSPavan Nikhilesh } 118450fb749aSPavan Nikhilesh } 118550fb749aSPavan Nikhilesh return 0; 118650fb749aSPavan Nikhilesh } 118750fb749aSPavan Nikhilesh 118850fb749aSPavan Nikhilesh static int 118950fb749aSPavan Nikhilesh launch_multi_port_max_stages_random_sched_type(int (*fn)(void *)) 119050fb749aSPavan Nikhilesh { 119150fb749aSPavan Nikhilesh uint32_t nr_ports; 119250fb749aSPavan Nikhilesh int ret; 119350fb749aSPavan Nikhilesh 1194*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 119550fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 119650fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 119750fb749aSPavan Nikhilesh nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); 119850fb749aSPavan Nikhilesh 119950fb749aSPavan Nikhilesh if (!nr_ports) { 1200*daeda14cSPavan Nikhilesh ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__, 120150fb749aSPavan Nikhilesh nr_ports, rte_lcore_count() - 1); 1202*daeda14cSPavan Nikhilesh return 0; 120350fb749aSPavan Nikhilesh } 120450fb749aSPavan Nikhilesh 120550fb749aSPavan Nikhilesh /* Injects events with m->seqn=0 to total_events */ 120650fb749aSPavan Nikhilesh ret = inject_events( 120750fb749aSPavan Nikhilesh 0x1 /*flow_id */, 120850fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 120950fb749aSPavan Nikhilesh 0 /* sub_event_type (stage 0) */, 121050fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */, 121150fb749aSPavan Nikhilesh 0 /* queue */, 121250fb749aSPavan Nikhilesh 0 /* port */, 121350fb749aSPavan Nikhilesh MAX_EVENTS /* events */); 121450fb749aSPavan Nikhilesh if (ret) 1215*daeda14cSPavan Nikhilesh return -1; 121650fb749aSPavan Nikhilesh 121750fb749aSPavan Nikhilesh return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports, 121850fb749aSPavan Nikhilesh 0xff /* invalid */); 121950fb749aSPavan Nikhilesh } 122050fb749aSPavan Nikhilesh 122150fb749aSPavan Nikhilesh /* Flow based pipeline with maximum stages with random sched type */ 122250fb749aSPavan Nikhilesh static int 122350fb749aSPavan Nikhilesh test_multi_port_flow_max_stages_random_sched_type(void) 122450fb749aSPavan Nikhilesh { 122550fb749aSPavan Nikhilesh return launch_multi_port_max_stages_random_sched_type( 122650fb749aSPavan Nikhilesh worker_flow_based_pipeline_max_stages_rand_sched_type); 122750fb749aSPavan Nikhilesh } 122850fb749aSPavan Nikhilesh 122950fb749aSPavan Nikhilesh static int 123050fb749aSPavan Nikhilesh worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg) 123150fb749aSPavan Nikhilesh { 123250fb749aSPavan Nikhilesh struct test_core_param *param = arg; 123350fb749aSPavan Nikhilesh struct rte_event ev; 123450fb749aSPavan Nikhilesh uint16_t valid_event; 123550fb749aSPavan Nikhilesh uint8_t port = param->port; 123650fb749aSPavan Nikhilesh uint32_t queue_count; 1237*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 123850fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 123950fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 124050fb749aSPavan Nikhilesh uint8_t nr_queues = queue_count; 124150fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 124250fb749aSPavan Nikhilesh 124350fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 124450fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 124550fb749aSPavan Nikhilesh if (!valid_event) 124650fb749aSPavan Nikhilesh continue; 124750fb749aSPavan Nikhilesh 124850fb749aSPavan Nikhilesh if (ev.queue_id == nr_queues - 1) { /* last stage */ 124950fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 125050fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 125150fb749aSPavan Nikhilesh } else { 125250fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 125350fb749aSPavan Nikhilesh ev.queue_id++; 125450fb749aSPavan Nikhilesh ev.sched_type = 125550fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1); 125650fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_FORWARD; 125750fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 125850fb749aSPavan Nikhilesh } 125950fb749aSPavan Nikhilesh } 126050fb749aSPavan Nikhilesh return 0; 126150fb749aSPavan Nikhilesh } 126250fb749aSPavan Nikhilesh 126350fb749aSPavan Nikhilesh /* Queue based pipeline with maximum stages with random sched type */ 126450fb749aSPavan Nikhilesh static int 126550fb749aSPavan Nikhilesh test_multi_port_queue_max_stages_random_sched_type(void) 126650fb749aSPavan Nikhilesh { 126750fb749aSPavan Nikhilesh return launch_multi_port_max_stages_random_sched_type( 126850fb749aSPavan Nikhilesh worker_queue_based_pipeline_max_stages_rand_sched_type); 126950fb749aSPavan Nikhilesh } 127050fb749aSPavan Nikhilesh 127150fb749aSPavan Nikhilesh static int 127250fb749aSPavan Nikhilesh worker_mixed_pipeline_max_stages_rand_sched_type(void *arg) 127350fb749aSPavan Nikhilesh { 127450fb749aSPavan Nikhilesh struct test_core_param *param = arg; 127550fb749aSPavan Nikhilesh struct rte_event ev; 127650fb749aSPavan Nikhilesh uint16_t valid_event; 127750fb749aSPavan Nikhilesh uint8_t port = param->port; 127850fb749aSPavan Nikhilesh uint32_t queue_count; 1279*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 128050fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 128150fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 128250fb749aSPavan Nikhilesh uint8_t nr_queues = queue_count; 128350fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 128450fb749aSPavan Nikhilesh 128550fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 128650fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 128750fb749aSPavan Nikhilesh if (!valid_event) 128850fb749aSPavan Nikhilesh continue; 128950fb749aSPavan Nikhilesh 129050fb749aSPavan Nikhilesh if (ev.queue_id == nr_queues - 1) { /* Last stage */ 129150fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 129250fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 129350fb749aSPavan Nikhilesh } else { 129450fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 129550fb749aSPavan Nikhilesh ev.queue_id++; 129650fb749aSPavan Nikhilesh ev.sub_event_type = rte_rand() % 256; 129750fb749aSPavan Nikhilesh ev.sched_type = 129850fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1); 129950fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_FORWARD; 130050fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 130150fb749aSPavan Nikhilesh } 130250fb749aSPavan Nikhilesh } 130350fb749aSPavan Nikhilesh return 0; 130450fb749aSPavan Nikhilesh } 130550fb749aSPavan Nikhilesh 130650fb749aSPavan Nikhilesh /* Queue and flow based pipeline with maximum stages with random sched type */ 130750fb749aSPavan Nikhilesh static int 130850fb749aSPavan Nikhilesh test_multi_port_mixed_max_stages_random_sched_type(void) 130950fb749aSPavan Nikhilesh { 131050fb749aSPavan Nikhilesh return launch_multi_port_max_stages_random_sched_type( 131150fb749aSPavan Nikhilesh worker_mixed_pipeline_max_stages_rand_sched_type); 131250fb749aSPavan Nikhilesh } 131350fb749aSPavan Nikhilesh 131450fb749aSPavan Nikhilesh static int 131550fb749aSPavan Nikhilesh worker_ordered_flow_producer(void *arg) 131650fb749aSPavan Nikhilesh { 131750fb749aSPavan Nikhilesh struct test_core_param *param = arg; 131850fb749aSPavan Nikhilesh uint8_t port = param->port; 131950fb749aSPavan Nikhilesh struct rte_mbuf *m; 132050fb749aSPavan Nikhilesh int counter = 0; 132150fb749aSPavan Nikhilesh 132250fb749aSPavan Nikhilesh while (counter < NUM_PACKETS) { 132350fb749aSPavan Nikhilesh m = rte_pktmbuf_alloc(eventdev_test_mempool); 132450fb749aSPavan Nikhilesh if (m == NULL) 132550fb749aSPavan Nikhilesh continue; 132650fb749aSPavan Nikhilesh 132750fb749aSPavan Nikhilesh m->seqn = counter++; 132850fb749aSPavan Nikhilesh 132950fb749aSPavan Nikhilesh struct rte_event ev = {.event = 0, .u64 = 0}; 133050fb749aSPavan Nikhilesh 133150fb749aSPavan Nikhilesh ev.flow_id = 0x1; /* Generate a fat flow */ 133250fb749aSPavan Nikhilesh ev.sub_event_type = 0; 133350fb749aSPavan Nikhilesh /* Inject the new event */ 133450fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_NEW; 133550fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 133650fb749aSPavan Nikhilesh ev.sched_type = RTE_SCHED_TYPE_ORDERED; 133750fb749aSPavan Nikhilesh ev.queue_id = 0; 133850fb749aSPavan Nikhilesh ev.mbuf = m; 133950fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 134050fb749aSPavan Nikhilesh } 134150fb749aSPavan Nikhilesh 134250fb749aSPavan Nikhilesh return 0; 134350fb749aSPavan Nikhilesh } 134450fb749aSPavan Nikhilesh 134550fb749aSPavan Nikhilesh static inline int 134650fb749aSPavan Nikhilesh test_producer_consumer_ingress_order_test(int (*fn)(void *)) 134750fb749aSPavan Nikhilesh { 134850fb749aSPavan Nikhilesh uint32_t nr_ports; 134950fb749aSPavan Nikhilesh 1350*daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 135150fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 135250fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 135350fb749aSPavan Nikhilesh nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); 135450fb749aSPavan Nikhilesh 135550fb749aSPavan Nikhilesh if (rte_lcore_count() < 3 || nr_ports < 2) { 1356*daeda14cSPavan Nikhilesh ssovf_log_dbg("### Not enough cores for %s test.", __func__); 1357*daeda14cSPavan Nikhilesh return 0; 135850fb749aSPavan Nikhilesh } 135950fb749aSPavan Nikhilesh 136050fb749aSPavan Nikhilesh launch_workers_and_wait(worker_ordered_flow_producer, fn, 136150fb749aSPavan Nikhilesh NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC); 136250fb749aSPavan Nikhilesh /* Check the events order maintained or not */ 136350fb749aSPavan Nikhilesh return seqn_list_check(NUM_PACKETS); 136450fb749aSPavan Nikhilesh } 136550fb749aSPavan Nikhilesh 136650fb749aSPavan Nikhilesh /* Flow based producer consumer ingress order test */ 136750fb749aSPavan Nikhilesh static int 136850fb749aSPavan Nikhilesh test_flow_producer_consumer_ingress_order_test(void) 136950fb749aSPavan Nikhilesh { 137050fb749aSPavan Nikhilesh return test_producer_consumer_ingress_order_test( 137150fb749aSPavan Nikhilesh worker_flow_based_pipeline); 137250fb749aSPavan Nikhilesh } 137350fb749aSPavan Nikhilesh 137450fb749aSPavan Nikhilesh /* Queue based producer consumer ingress order test */ 137550fb749aSPavan Nikhilesh static int 137650fb749aSPavan Nikhilesh test_queue_producer_consumer_ingress_order_test(void) 137750fb749aSPavan Nikhilesh { 137850fb749aSPavan Nikhilesh return test_producer_consumer_ingress_order_test( 137950fb749aSPavan Nikhilesh worker_group_based_pipeline); 138050fb749aSPavan Nikhilesh } 138150fb749aSPavan Nikhilesh 1382*daeda14cSPavan Nikhilesh static void octeontx_test_run(int (*setup)(void), void (*tdown)(void), 1383*daeda14cSPavan Nikhilesh int (*test)(void), const char *name) 1384*daeda14cSPavan Nikhilesh { 1385*daeda14cSPavan Nikhilesh if (setup() < 0) { 1386*daeda14cSPavan Nikhilesh ssovf_log_selftest("Error setting up test %s", name); 1387*daeda14cSPavan Nikhilesh unsupported++; 1388*daeda14cSPavan Nikhilesh } else { 1389*daeda14cSPavan Nikhilesh if (test() < 0) { 1390*daeda14cSPavan Nikhilesh failed++; 1391*daeda14cSPavan Nikhilesh ssovf_log_selftest("%s Failed", name); 1392*daeda14cSPavan Nikhilesh } else { 1393*daeda14cSPavan Nikhilesh passed++; 1394*daeda14cSPavan Nikhilesh ssovf_log_selftest("%s Passed", name); 139550fb749aSPavan Nikhilesh } 1396*daeda14cSPavan Nikhilesh } 139750fb749aSPavan Nikhilesh 1398*daeda14cSPavan Nikhilesh total++; 1399*daeda14cSPavan Nikhilesh tdown(); 1400*daeda14cSPavan Nikhilesh } 1401*daeda14cSPavan Nikhilesh 1402*daeda14cSPavan Nikhilesh int 140350fb749aSPavan Nikhilesh test_eventdev_octeontx(void) 140450fb749aSPavan Nikhilesh { 1405*daeda14cSPavan Nikhilesh testsuite_setup(); 140650fb749aSPavan Nikhilesh 1407*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1408*daeda14cSPavan Nikhilesh test_simple_enqdeq_ordered); 1409*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1410*daeda14cSPavan Nikhilesh test_simple_enqdeq_atomic); 1411*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1412*daeda14cSPavan Nikhilesh test_simple_enqdeq_parallel); 1413*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1414*daeda14cSPavan Nikhilesh test_multi_queue_enq_single_port_deq); 1415*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1416*daeda14cSPavan Nikhilesh test_multi_queue_enq_multi_port_deq); 1417*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1418*daeda14cSPavan Nikhilesh test_queue_to_port_single_link); 1419*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1420*daeda14cSPavan Nikhilesh test_queue_to_port_multi_link); 1421*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1422*daeda14cSPavan Nikhilesh test_multi_port_flow_ordered_to_atomic); 1423*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1424*daeda14cSPavan Nikhilesh test_multi_port_flow_ordered_to_ordered); 1425*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1426*daeda14cSPavan Nikhilesh test_multi_port_flow_ordered_to_parallel); 1427*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1428*daeda14cSPavan Nikhilesh test_multi_port_flow_atomic_to_atomic); 1429*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1430*daeda14cSPavan Nikhilesh test_multi_port_flow_atomic_to_ordered); 1431*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1432*daeda14cSPavan Nikhilesh test_multi_port_flow_atomic_to_parallel); 1433*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1434*daeda14cSPavan Nikhilesh test_multi_port_flow_parallel_to_atomic); 1435*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1436*daeda14cSPavan Nikhilesh test_multi_port_flow_parallel_to_ordered); 1437*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1438*daeda14cSPavan Nikhilesh test_multi_port_flow_parallel_to_parallel); 1439*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1440*daeda14cSPavan Nikhilesh test_multi_port_queue_ordered_to_atomic); 1441*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1442*daeda14cSPavan Nikhilesh test_multi_port_queue_ordered_to_ordered); 1443*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1444*daeda14cSPavan Nikhilesh test_multi_port_queue_ordered_to_parallel); 1445*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1446*daeda14cSPavan Nikhilesh test_multi_port_queue_atomic_to_atomic); 1447*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1448*daeda14cSPavan Nikhilesh test_multi_port_queue_atomic_to_ordered); 1449*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1450*daeda14cSPavan Nikhilesh test_multi_port_queue_atomic_to_parallel); 1451*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1452*daeda14cSPavan Nikhilesh test_multi_port_queue_parallel_to_atomic); 1453*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1454*daeda14cSPavan Nikhilesh test_multi_port_queue_parallel_to_ordered); 1455*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1456*daeda14cSPavan Nikhilesh test_multi_port_queue_parallel_to_parallel); 1457*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1458*daeda14cSPavan Nikhilesh test_multi_port_flow_max_stages_random_sched_type); 1459*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1460*daeda14cSPavan Nikhilesh test_multi_port_queue_max_stages_random_sched_type); 1461*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1462*daeda14cSPavan Nikhilesh test_multi_port_mixed_max_stages_random_sched_type); 1463*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1464*daeda14cSPavan Nikhilesh test_flow_producer_consumer_ingress_order_test); 1465*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1466*daeda14cSPavan Nikhilesh test_queue_producer_consumer_ingress_order_test); 1467*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup_priority, eventdev_teardown, 1468*daeda14cSPavan Nikhilesh test_multi_queue_priority); 1469*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown, 1470*daeda14cSPavan Nikhilesh test_multi_port_flow_ordered_to_atomic); 1471*daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown, 1472*daeda14cSPavan Nikhilesh test_multi_port_queue_ordered_to_atomic); 1473*daeda14cSPavan Nikhilesh 1474*daeda14cSPavan Nikhilesh ssovf_log_selftest("Total tests : %d", total); 1475*daeda14cSPavan Nikhilesh ssovf_log_selftest("Passed : %d", passed); 1476*daeda14cSPavan Nikhilesh ssovf_log_selftest("Failed : %d", failed); 1477*daeda14cSPavan Nikhilesh ssovf_log_selftest("Not supported : %d", unsupported); 1478*daeda14cSPavan Nikhilesh 1479*daeda14cSPavan Nikhilesh testsuite_teardown(); 1480*daeda14cSPavan Nikhilesh 1481*daeda14cSPavan Nikhilesh if (failed) 1482*daeda14cSPavan Nikhilesh return -1; 1483*daeda14cSPavan Nikhilesh 1484*daeda14cSPavan Nikhilesh return 0; 1485*daeda14cSPavan Nikhilesh } 1486