150fb749aSPavan Nikhilesh /* SPDX-License-Identifier: BSD-3-Clause 250fb749aSPavan Nikhilesh * Copyright(c) 2017 Cavium, Inc 350fb749aSPavan Nikhilesh */ 450fb749aSPavan Nikhilesh 550fb749aSPavan Nikhilesh #include <rte_atomic.h> 650fb749aSPavan Nikhilesh #include <rte_common.h> 750fb749aSPavan Nikhilesh #include <rte_cycles.h> 850fb749aSPavan Nikhilesh #include <rte_debug.h> 950fb749aSPavan Nikhilesh #include <rte_eal.h> 1050fb749aSPavan Nikhilesh #include <rte_ethdev.h> 1150fb749aSPavan Nikhilesh #include <rte_eventdev.h> 1250fb749aSPavan Nikhilesh #include <rte_hexdump.h> 1350fb749aSPavan Nikhilesh #include <rte_mbuf.h> 1450fb749aSPavan Nikhilesh #include <rte_malloc.h> 1550fb749aSPavan Nikhilesh #include <rte_memcpy.h> 1650fb749aSPavan Nikhilesh #include <rte_launch.h> 1750fb749aSPavan Nikhilesh #include <rte_lcore.h> 1850fb749aSPavan Nikhilesh #include <rte_per_lcore.h> 1950fb749aSPavan Nikhilesh #include <rte_random.h> 2050fb749aSPavan Nikhilesh #include <rte_bus_vdev.h> 21daeda14cSPavan Nikhilesh #include <rte_test.h> 2250fb749aSPavan Nikhilesh 23daeda14cSPavan Nikhilesh #include "ssovf_evdev.h" 2450fb749aSPavan Nikhilesh 2550fb749aSPavan Nikhilesh #define NUM_PACKETS (1 << 18) 2650fb749aSPavan Nikhilesh #define MAX_EVENTS (16 * 1024) 2750fb749aSPavan Nikhilesh 28daeda14cSPavan Nikhilesh #define OCTEONTX_TEST_RUN(setup, teardown, test) \ 29daeda14cSPavan Nikhilesh octeontx_test_run(setup, teardown, test, #test) 30daeda14cSPavan Nikhilesh 31daeda14cSPavan Nikhilesh static int total; 32daeda14cSPavan Nikhilesh static int passed; 33daeda14cSPavan Nikhilesh static int failed; 34daeda14cSPavan Nikhilesh static int unsupported; 35daeda14cSPavan Nikhilesh 3650fb749aSPavan Nikhilesh static int evdev; 3750fb749aSPavan Nikhilesh static struct rte_mempool *eventdev_test_mempool; 3850fb749aSPavan Nikhilesh 3950fb749aSPavan Nikhilesh struct event_attr { 4050fb749aSPavan Nikhilesh uint32_t flow_id; 4150fb749aSPavan Nikhilesh uint8_t event_type; 4250fb749aSPavan Nikhilesh uint8_t sub_event_type; 4350fb749aSPavan Nikhilesh uint8_t sched_type; 4450fb749aSPavan Nikhilesh uint8_t queue; 4550fb749aSPavan Nikhilesh uint8_t port; 4650fb749aSPavan Nikhilesh }; 4750fb749aSPavan Nikhilesh 4850fb749aSPavan Nikhilesh static uint32_t seqn_list_index; 4950fb749aSPavan Nikhilesh static int seqn_list[NUM_PACKETS]; 5050fb749aSPavan Nikhilesh 5150fb749aSPavan Nikhilesh static inline void 5250fb749aSPavan Nikhilesh seqn_list_init(void) 5350fb749aSPavan Nikhilesh { 5450fb749aSPavan Nikhilesh RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS); 5550fb749aSPavan Nikhilesh memset(seqn_list, 0, sizeof(seqn_list)); 5650fb749aSPavan Nikhilesh seqn_list_index = 0; 5750fb749aSPavan Nikhilesh } 5850fb749aSPavan Nikhilesh 5950fb749aSPavan Nikhilesh static inline int 6050fb749aSPavan Nikhilesh seqn_list_update(int val) 6150fb749aSPavan Nikhilesh { 6250fb749aSPavan Nikhilesh if (seqn_list_index >= NUM_PACKETS) 63daeda14cSPavan Nikhilesh return -1; 6450fb749aSPavan Nikhilesh 6550fb749aSPavan Nikhilesh seqn_list[seqn_list_index++] = val; 6650fb749aSPavan Nikhilesh rte_smp_wmb(); 67daeda14cSPavan Nikhilesh return 0; 6850fb749aSPavan Nikhilesh } 6950fb749aSPavan Nikhilesh 7050fb749aSPavan Nikhilesh static inline int 7150fb749aSPavan Nikhilesh seqn_list_check(int limit) 7250fb749aSPavan Nikhilesh { 7350fb749aSPavan Nikhilesh int i; 7450fb749aSPavan Nikhilesh 7550fb749aSPavan Nikhilesh for (i = 0; i < limit; i++) { 7650fb749aSPavan Nikhilesh if (seqn_list[i] != i) { 77daeda14cSPavan Nikhilesh ssovf_log_dbg("Seqn mismatch %d %d", seqn_list[i], i); 78daeda14cSPavan Nikhilesh return -1; 7950fb749aSPavan Nikhilesh } 8050fb749aSPavan Nikhilesh } 81daeda14cSPavan Nikhilesh return 0; 8250fb749aSPavan Nikhilesh } 8350fb749aSPavan Nikhilesh 8450fb749aSPavan Nikhilesh struct test_core_param { 8550fb749aSPavan Nikhilesh rte_atomic32_t *total_events; 8650fb749aSPavan Nikhilesh uint64_t dequeue_tmo_ticks; 8750fb749aSPavan Nikhilesh uint8_t port; 8850fb749aSPavan Nikhilesh uint8_t sched_type; 8950fb749aSPavan Nikhilesh }; 9050fb749aSPavan Nikhilesh 9150fb749aSPavan Nikhilesh static int 9250fb749aSPavan Nikhilesh testsuite_setup(void) 9350fb749aSPavan Nikhilesh { 9450fb749aSPavan Nikhilesh const char *eventdev_name = "event_octeontx"; 9550fb749aSPavan Nikhilesh 9650fb749aSPavan Nikhilesh evdev = rte_event_dev_get_dev_id(eventdev_name); 9750fb749aSPavan Nikhilesh if (evdev < 0) { 98daeda14cSPavan Nikhilesh ssovf_log_dbg("%d: Eventdev %s not found - creating.", 9950fb749aSPavan Nikhilesh __LINE__, eventdev_name); 10050fb749aSPavan Nikhilesh if (rte_vdev_init(eventdev_name, NULL) < 0) { 101daeda14cSPavan Nikhilesh ssovf_log_dbg("Error creating eventdev %s", 102daeda14cSPavan Nikhilesh eventdev_name); 103daeda14cSPavan Nikhilesh return -1; 10450fb749aSPavan Nikhilesh } 10550fb749aSPavan Nikhilesh evdev = rte_event_dev_get_dev_id(eventdev_name); 10650fb749aSPavan Nikhilesh if (evdev < 0) { 107daeda14cSPavan Nikhilesh ssovf_log_dbg("Error finding newly created eventdev"); 108daeda14cSPavan Nikhilesh return -1; 10950fb749aSPavan Nikhilesh } 11050fb749aSPavan Nikhilesh } 11150fb749aSPavan Nikhilesh 112daeda14cSPavan Nikhilesh return 0; 11350fb749aSPavan Nikhilesh } 11450fb749aSPavan Nikhilesh 11550fb749aSPavan Nikhilesh static void 11650fb749aSPavan Nikhilesh testsuite_teardown(void) 11750fb749aSPavan Nikhilesh { 11850fb749aSPavan Nikhilesh rte_event_dev_close(evdev); 11950fb749aSPavan Nikhilesh } 12050fb749aSPavan Nikhilesh 12150fb749aSPavan Nikhilesh static inline void 12250fb749aSPavan Nikhilesh devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf, 12350fb749aSPavan Nikhilesh struct rte_event_dev_info *info) 12450fb749aSPavan Nikhilesh { 12550fb749aSPavan Nikhilesh memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); 12650fb749aSPavan Nikhilesh dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; 12750fb749aSPavan Nikhilesh dev_conf->nb_event_ports = info->max_event_ports; 12850fb749aSPavan Nikhilesh dev_conf->nb_event_queues = info->max_event_queues; 12950fb749aSPavan Nikhilesh dev_conf->nb_event_queue_flows = info->max_event_queue_flows; 13050fb749aSPavan Nikhilesh dev_conf->nb_event_port_dequeue_depth = 13150fb749aSPavan Nikhilesh info->max_event_port_dequeue_depth; 13250fb749aSPavan Nikhilesh dev_conf->nb_event_port_enqueue_depth = 13350fb749aSPavan Nikhilesh info->max_event_port_enqueue_depth; 13450fb749aSPavan Nikhilesh dev_conf->nb_event_port_enqueue_depth = 13550fb749aSPavan Nikhilesh info->max_event_port_enqueue_depth; 13650fb749aSPavan Nikhilesh dev_conf->nb_events_limit = 13750fb749aSPavan Nikhilesh info->max_num_events; 13850fb749aSPavan Nikhilesh } 13950fb749aSPavan Nikhilesh 14050fb749aSPavan Nikhilesh enum { 14150fb749aSPavan Nikhilesh TEST_EVENTDEV_SETUP_DEFAULT, 14250fb749aSPavan Nikhilesh TEST_EVENTDEV_SETUP_PRIORITY, 14350fb749aSPavan Nikhilesh TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT, 14450fb749aSPavan Nikhilesh }; 14550fb749aSPavan Nikhilesh 14650fb749aSPavan Nikhilesh static inline int 14750fb749aSPavan Nikhilesh _eventdev_setup(int mode) 14850fb749aSPavan Nikhilesh { 14950fb749aSPavan Nikhilesh int i, ret; 15050fb749aSPavan Nikhilesh struct rte_event_dev_config dev_conf; 15150fb749aSPavan Nikhilesh struct rte_event_dev_info info; 15250fb749aSPavan Nikhilesh const char *pool_name = "evdev_octeontx_test_pool"; 15350fb749aSPavan Nikhilesh 15450fb749aSPavan Nikhilesh /* Create and destrory pool for each test case to make it standalone */ 15550fb749aSPavan Nikhilesh eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name, 15650fb749aSPavan Nikhilesh MAX_EVENTS, 15750fb749aSPavan Nikhilesh 0 /*MBUF_CACHE_SIZE*/, 15850fb749aSPavan Nikhilesh 0, 15950fb749aSPavan Nikhilesh 512, /* Use very small mbufs */ 16050fb749aSPavan Nikhilesh rte_socket_id()); 16150fb749aSPavan Nikhilesh if (!eventdev_test_mempool) { 162daeda14cSPavan Nikhilesh ssovf_log_dbg("ERROR creating mempool"); 163daeda14cSPavan Nikhilesh return -1; 16450fb749aSPavan Nikhilesh } 16550fb749aSPavan Nikhilesh 16650fb749aSPavan Nikhilesh ret = rte_event_dev_info_get(evdev, &info); 167daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); 168daeda14cSPavan Nikhilesh RTE_TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS, 169daeda14cSPavan Nikhilesh "ERROR max_num_events=%d < max_events=%d", 17050fb749aSPavan Nikhilesh info.max_num_events, MAX_EVENTS); 17150fb749aSPavan Nikhilesh 17250fb749aSPavan Nikhilesh devconf_set_default_sane_values(&dev_conf, &info); 17350fb749aSPavan Nikhilesh if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT) 17450fb749aSPavan Nikhilesh dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT; 17550fb749aSPavan Nikhilesh 17650fb749aSPavan Nikhilesh ret = rte_event_dev_configure(evdev, &dev_conf); 177daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev"); 17850fb749aSPavan Nikhilesh 17950fb749aSPavan Nikhilesh uint32_t queue_count; 180daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 18150fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 18250fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 18350fb749aSPavan Nikhilesh 18450fb749aSPavan Nikhilesh if (mode == TEST_EVENTDEV_SETUP_PRIORITY) { 18550fb749aSPavan Nikhilesh if (queue_count > 8) { 186daeda14cSPavan Nikhilesh ssovf_log_dbg( 187daeda14cSPavan Nikhilesh "test expects the unique priority per queue"); 18850fb749aSPavan Nikhilesh return -ENOTSUP; 18950fb749aSPavan Nikhilesh } 19050fb749aSPavan Nikhilesh 19150fb749aSPavan Nikhilesh /* Configure event queues(0 to n) with 19250fb749aSPavan Nikhilesh * RTE_EVENT_DEV_PRIORITY_HIGHEST to 19350fb749aSPavan Nikhilesh * RTE_EVENT_DEV_PRIORITY_LOWEST 19450fb749aSPavan Nikhilesh */ 19550fb749aSPavan Nikhilesh uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) / 19650fb749aSPavan Nikhilesh queue_count; 19750fb749aSPavan Nikhilesh for (i = 0; i < (int)queue_count; i++) { 19850fb749aSPavan Nikhilesh struct rte_event_queue_conf queue_conf; 19950fb749aSPavan Nikhilesh 20050fb749aSPavan Nikhilesh ret = rte_event_queue_default_conf_get(evdev, i, 20150fb749aSPavan Nikhilesh &queue_conf); 202daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d", 203daeda14cSPavan Nikhilesh i); 20450fb749aSPavan Nikhilesh queue_conf.priority = i * step; 20550fb749aSPavan Nikhilesh ret = rte_event_queue_setup(evdev, i, &queue_conf); 206daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 207daeda14cSPavan Nikhilesh i); 20850fb749aSPavan Nikhilesh } 20950fb749aSPavan Nikhilesh 21050fb749aSPavan Nikhilesh } else { 21150fb749aSPavan Nikhilesh /* Configure event queues with default priority */ 21250fb749aSPavan Nikhilesh for (i = 0; i < (int)queue_count; i++) { 21350fb749aSPavan Nikhilesh ret = rte_event_queue_setup(evdev, i, NULL); 214daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 215daeda14cSPavan Nikhilesh i); 21650fb749aSPavan Nikhilesh } 21750fb749aSPavan Nikhilesh } 21850fb749aSPavan Nikhilesh /* Configure event ports */ 21950fb749aSPavan Nikhilesh uint32_t port_count; 220daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 22150fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 22250fb749aSPavan Nikhilesh &port_count), "Port count get failed"); 22350fb749aSPavan Nikhilesh for (i = 0; i < (int)port_count; i++) { 22450fb749aSPavan Nikhilesh ret = rte_event_port_setup(evdev, i, NULL); 225daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i); 22650fb749aSPavan Nikhilesh ret = rte_event_port_link(evdev, i, NULL, NULL, 0); 227daeda14cSPavan Nikhilesh RTE_TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", 228daeda14cSPavan Nikhilesh i); 22950fb749aSPavan Nikhilesh } 23050fb749aSPavan Nikhilesh 23150fb749aSPavan Nikhilesh ret = rte_event_dev_start(evdev); 232daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start device"); 23350fb749aSPavan Nikhilesh 234daeda14cSPavan Nikhilesh return 0; 23550fb749aSPavan Nikhilesh } 23650fb749aSPavan Nikhilesh 23750fb749aSPavan Nikhilesh static inline int 23850fb749aSPavan Nikhilesh eventdev_setup(void) 23950fb749aSPavan Nikhilesh { 24050fb749aSPavan Nikhilesh return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT); 24150fb749aSPavan Nikhilesh } 24250fb749aSPavan Nikhilesh 24350fb749aSPavan Nikhilesh static inline int 24450fb749aSPavan Nikhilesh eventdev_setup_priority(void) 24550fb749aSPavan Nikhilesh { 24650fb749aSPavan Nikhilesh return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY); 24750fb749aSPavan Nikhilesh } 24850fb749aSPavan Nikhilesh 24950fb749aSPavan Nikhilesh static inline int 25050fb749aSPavan Nikhilesh eventdev_setup_dequeue_timeout(void) 25150fb749aSPavan Nikhilesh { 25250fb749aSPavan Nikhilesh return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT); 25350fb749aSPavan Nikhilesh } 25450fb749aSPavan Nikhilesh 25550fb749aSPavan Nikhilesh static inline void 25650fb749aSPavan Nikhilesh eventdev_teardown(void) 25750fb749aSPavan Nikhilesh { 25850fb749aSPavan Nikhilesh rte_event_dev_stop(evdev); 25950fb749aSPavan Nikhilesh rte_mempool_free(eventdev_test_mempool); 26050fb749aSPavan Nikhilesh } 26150fb749aSPavan Nikhilesh 26250fb749aSPavan Nikhilesh static inline void 26350fb749aSPavan Nikhilesh update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev, 26450fb749aSPavan Nikhilesh uint32_t flow_id, uint8_t event_type, 26550fb749aSPavan Nikhilesh uint8_t sub_event_type, uint8_t sched_type, 26650fb749aSPavan Nikhilesh uint8_t queue, uint8_t port) 26750fb749aSPavan Nikhilesh { 26850fb749aSPavan Nikhilesh struct event_attr *attr; 26950fb749aSPavan Nikhilesh 27050fb749aSPavan Nikhilesh /* Store the event attributes in mbuf for future reference */ 27150fb749aSPavan Nikhilesh attr = rte_pktmbuf_mtod(m, struct event_attr *); 27250fb749aSPavan Nikhilesh attr->flow_id = flow_id; 27350fb749aSPavan Nikhilesh attr->event_type = event_type; 27450fb749aSPavan Nikhilesh attr->sub_event_type = sub_event_type; 27550fb749aSPavan Nikhilesh attr->sched_type = sched_type; 27650fb749aSPavan Nikhilesh attr->queue = queue; 27750fb749aSPavan Nikhilesh attr->port = port; 27850fb749aSPavan Nikhilesh 27950fb749aSPavan Nikhilesh ev->flow_id = flow_id; 28050fb749aSPavan Nikhilesh ev->sub_event_type = sub_event_type; 28150fb749aSPavan Nikhilesh ev->event_type = event_type; 28250fb749aSPavan Nikhilesh /* Inject the new event */ 28350fb749aSPavan Nikhilesh ev->op = RTE_EVENT_OP_NEW; 28450fb749aSPavan Nikhilesh ev->sched_type = sched_type; 28550fb749aSPavan Nikhilesh ev->queue_id = queue; 28650fb749aSPavan Nikhilesh ev->mbuf = m; 28750fb749aSPavan Nikhilesh } 28850fb749aSPavan Nikhilesh 28950fb749aSPavan Nikhilesh static inline int 29050fb749aSPavan Nikhilesh inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type, 29150fb749aSPavan Nikhilesh uint8_t sched_type, uint8_t queue, uint8_t port, 29250fb749aSPavan Nikhilesh unsigned int events) 29350fb749aSPavan Nikhilesh { 29450fb749aSPavan Nikhilesh struct rte_mbuf *m; 29550fb749aSPavan Nikhilesh unsigned int i; 29650fb749aSPavan Nikhilesh 29750fb749aSPavan Nikhilesh for (i = 0; i < events; i++) { 29850fb749aSPavan Nikhilesh struct rte_event ev = {.event = 0, .u64 = 0}; 29950fb749aSPavan Nikhilesh 30050fb749aSPavan Nikhilesh m = rte_pktmbuf_alloc(eventdev_test_mempool); 301daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed"); 30250fb749aSPavan Nikhilesh 303ca4355e4SDavid Marchand *rte_event_pmd_selftest_seqn(m) = i; 30450fb749aSPavan Nikhilesh update_event_and_validation_attr(m, &ev, flow_id, event_type, 30550fb749aSPavan Nikhilesh sub_event_type, sched_type, queue, port); 30650fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 30750fb749aSPavan Nikhilesh } 30850fb749aSPavan Nikhilesh return 0; 30950fb749aSPavan Nikhilesh } 31050fb749aSPavan Nikhilesh 31150fb749aSPavan Nikhilesh static inline int 31250fb749aSPavan Nikhilesh check_excess_events(uint8_t port) 31350fb749aSPavan Nikhilesh { 31450fb749aSPavan Nikhilesh int i; 31550fb749aSPavan Nikhilesh uint16_t valid_event; 31650fb749aSPavan Nikhilesh struct rte_event ev; 31750fb749aSPavan Nikhilesh 31850fb749aSPavan Nikhilesh /* Check for excess events, try for a few times and exit */ 31950fb749aSPavan Nikhilesh for (i = 0; i < 32; i++) { 32050fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 32150fb749aSPavan Nikhilesh 322daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(valid_event, 323ca4355e4SDavid Marchand "Unexpected valid event=%d", 324ca4355e4SDavid Marchand *rte_event_pmd_selftest_seqn(ev.mbuf)); 32550fb749aSPavan Nikhilesh } 32650fb749aSPavan Nikhilesh return 0; 32750fb749aSPavan Nikhilesh } 32850fb749aSPavan Nikhilesh 32950fb749aSPavan Nikhilesh static inline int 33050fb749aSPavan Nikhilesh generate_random_events(const unsigned int total_events) 33150fb749aSPavan Nikhilesh { 33250fb749aSPavan Nikhilesh struct rte_event_dev_info info; 33350fb749aSPavan Nikhilesh unsigned int i; 33450fb749aSPavan Nikhilesh int ret; 33550fb749aSPavan Nikhilesh 33650fb749aSPavan Nikhilesh uint32_t queue_count; 337daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 33850fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 33950fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 34050fb749aSPavan Nikhilesh 34150fb749aSPavan Nikhilesh ret = rte_event_dev_info_get(evdev, &info); 342daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); 34350fb749aSPavan Nikhilesh for (i = 0; i < total_events; i++) { 34450fb749aSPavan Nikhilesh ret = inject_events( 34550fb749aSPavan Nikhilesh rte_rand() % info.max_event_queue_flows /*flow_id */, 34650fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 34750fb749aSPavan Nikhilesh rte_rand() % 256 /* sub_event_type */, 34850fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1), 34950fb749aSPavan Nikhilesh rte_rand() % queue_count /* queue */, 35050fb749aSPavan Nikhilesh 0 /* port */, 35150fb749aSPavan Nikhilesh 1 /* events */); 35250fb749aSPavan Nikhilesh if (ret) 353daeda14cSPavan Nikhilesh return -1; 35450fb749aSPavan Nikhilesh } 35550fb749aSPavan Nikhilesh return ret; 35650fb749aSPavan Nikhilesh } 35750fb749aSPavan Nikhilesh 35850fb749aSPavan Nikhilesh 35950fb749aSPavan Nikhilesh static inline int 36050fb749aSPavan Nikhilesh validate_event(struct rte_event *ev) 36150fb749aSPavan Nikhilesh { 36250fb749aSPavan Nikhilesh struct event_attr *attr; 36350fb749aSPavan Nikhilesh 36450fb749aSPavan Nikhilesh attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *); 365daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id, 36650fb749aSPavan Nikhilesh "flow_id mismatch enq=%d deq =%d", 36750fb749aSPavan Nikhilesh attr->flow_id, ev->flow_id); 368daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(attr->event_type, ev->event_type, 36950fb749aSPavan Nikhilesh "event_type mismatch enq=%d deq =%d", 37050fb749aSPavan Nikhilesh attr->event_type, ev->event_type); 371daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type, 37250fb749aSPavan Nikhilesh "sub_event_type mismatch enq=%d deq =%d", 37350fb749aSPavan Nikhilesh attr->sub_event_type, ev->sub_event_type); 374daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type, 37550fb749aSPavan Nikhilesh "sched_type mismatch enq=%d deq =%d", 37650fb749aSPavan Nikhilesh attr->sched_type, ev->sched_type); 377daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(attr->queue, ev->queue_id, 37850fb749aSPavan Nikhilesh "queue mismatch enq=%d deq =%d", 37950fb749aSPavan Nikhilesh attr->queue, ev->queue_id); 38050fb749aSPavan Nikhilesh return 0; 38150fb749aSPavan Nikhilesh } 38250fb749aSPavan Nikhilesh 38350fb749aSPavan Nikhilesh typedef int (*validate_event_cb)(uint32_t index, uint8_t port, 38450fb749aSPavan Nikhilesh struct rte_event *ev); 38550fb749aSPavan Nikhilesh 38650fb749aSPavan Nikhilesh static inline int 38750fb749aSPavan Nikhilesh consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn) 38850fb749aSPavan Nikhilesh { 38950fb749aSPavan Nikhilesh int ret; 39050fb749aSPavan Nikhilesh uint16_t valid_event; 39150fb749aSPavan Nikhilesh uint32_t events = 0, forward_progress_cnt = 0, index = 0; 39250fb749aSPavan Nikhilesh struct rte_event ev; 39350fb749aSPavan Nikhilesh 39450fb749aSPavan Nikhilesh while (1) { 39550fb749aSPavan Nikhilesh if (++forward_progress_cnt > UINT16_MAX) { 396daeda14cSPavan Nikhilesh ssovf_log_dbg("Detected deadlock"); 397daeda14cSPavan Nikhilesh return -1; 39850fb749aSPavan Nikhilesh } 39950fb749aSPavan Nikhilesh 40050fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 40150fb749aSPavan Nikhilesh if (!valid_event) 40250fb749aSPavan Nikhilesh continue; 40350fb749aSPavan Nikhilesh 40450fb749aSPavan Nikhilesh forward_progress_cnt = 0; 40550fb749aSPavan Nikhilesh ret = validate_event(&ev); 40650fb749aSPavan Nikhilesh if (ret) 407daeda14cSPavan Nikhilesh return -1; 40850fb749aSPavan Nikhilesh 40950fb749aSPavan Nikhilesh if (fn != NULL) { 41050fb749aSPavan Nikhilesh ret = fn(index, port, &ev); 411daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, 41250fb749aSPavan Nikhilesh "Failed to validate test specific event"); 41350fb749aSPavan Nikhilesh } 41450fb749aSPavan Nikhilesh 41550fb749aSPavan Nikhilesh ++index; 41650fb749aSPavan Nikhilesh 41750fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 41850fb749aSPavan Nikhilesh if (++events >= total_events) 41950fb749aSPavan Nikhilesh break; 42050fb749aSPavan Nikhilesh } 42150fb749aSPavan Nikhilesh 42250fb749aSPavan Nikhilesh return check_excess_events(port); 42350fb749aSPavan Nikhilesh } 42450fb749aSPavan Nikhilesh 42550fb749aSPavan Nikhilesh static int 42650fb749aSPavan Nikhilesh validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev) 42750fb749aSPavan Nikhilesh { 42850fb749aSPavan Nikhilesh RTE_SET_USED(port); 429ca4355e4SDavid Marchand RTE_TEST_ASSERT_EQUAL(index, *rte_event_pmd_selftest_seqn(ev->mbuf), 430ca4355e4SDavid Marchand "index=%d != seqn=%d", index, 431ca4355e4SDavid Marchand *rte_event_pmd_selftest_seqn(ev->mbuf)); 43250fb749aSPavan Nikhilesh return 0; 43350fb749aSPavan Nikhilesh } 43450fb749aSPavan Nikhilesh 43550fb749aSPavan Nikhilesh static inline int 43650fb749aSPavan Nikhilesh test_simple_enqdeq(uint8_t sched_type) 43750fb749aSPavan Nikhilesh { 43850fb749aSPavan Nikhilesh int ret; 43950fb749aSPavan Nikhilesh 44050fb749aSPavan Nikhilesh ret = inject_events(0 /*flow_id */, 44150fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 44250fb749aSPavan Nikhilesh 0 /* sub_event_type */, 44350fb749aSPavan Nikhilesh sched_type, 44450fb749aSPavan Nikhilesh 0 /* queue */, 44550fb749aSPavan Nikhilesh 0 /* port */, 44650fb749aSPavan Nikhilesh MAX_EVENTS); 44750fb749aSPavan Nikhilesh if (ret) 448daeda14cSPavan Nikhilesh return -1; 44950fb749aSPavan Nikhilesh 45050fb749aSPavan Nikhilesh return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq); 45150fb749aSPavan Nikhilesh } 45250fb749aSPavan Nikhilesh 45350fb749aSPavan Nikhilesh static int 45450fb749aSPavan Nikhilesh test_simple_enqdeq_ordered(void) 45550fb749aSPavan Nikhilesh { 45650fb749aSPavan Nikhilesh return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED); 45750fb749aSPavan Nikhilesh } 45850fb749aSPavan Nikhilesh 45950fb749aSPavan Nikhilesh static int 46050fb749aSPavan Nikhilesh test_simple_enqdeq_atomic(void) 46150fb749aSPavan Nikhilesh { 46250fb749aSPavan Nikhilesh return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC); 46350fb749aSPavan Nikhilesh } 46450fb749aSPavan Nikhilesh 46550fb749aSPavan Nikhilesh static int 46650fb749aSPavan Nikhilesh test_simple_enqdeq_parallel(void) 46750fb749aSPavan Nikhilesh { 46850fb749aSPavan Nikhilesh return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL); 46950fb749aSPavan Nikhilesh } 47050fb749aSPavan Nikhilesh 47150fb749aSPavan Nikhilesh /* 47250fb749aSPavan Nikhilesh * Generate a prescribed number of events and spread them across available 47350fb749aSPavan Nikhilesh * queues. On dequeue, using single event port(port 0) verify the enqueued 47450fb749aSPavan Nikhilesh * event attributes 47550fb749aSPavan Nikhilesh */ 47650fb749aSPavan Nikhilesh static int 47750fb749aSPavan Nikhilesh test_multi_queue_enq_single_port_deq(void) 47850fb749aSPavan Nikhilesh { 47950fb749aSPavan Nikhilesh int ret; 48050fb749aSPavan Nikhilesh 48150fb749aSPavan Nikhilesh ret = generate_random_events(MAX_EVENTS); 48250fb749aSPavan Nikhilesh if (ret) 483daeda14cSPavan Nikhilesh return -1; 48450fb749aSPavan Nikhilesh 48550fb749aSPavan Nikhilesh return consume_events(0 /* port */, MAX_EVENTS, NULL); 48650fb749aSPavan Nikhilesh } 48750fb749aSPavan Nikhilesh 48850fb749aSPavan Nikhilesh /* 48950fb749aSPavan Nikhilesh * Inject 0..MAX_EVENTS events over 0..queue_count with modulus 49050fb749aSPavan Nikhilesh * operation 49150fb749aSPavan Nikhilesh * 49250fb749aSPavan Nikhilesh * For example, Inject 32 events over 0..7 queues 49350fb749aSPavan Nikhilesh * enqueue events 0, 8, 16, 24 in queue 0 49450fb749aSPavan Nikhilesh * enqueue events 1, 9, 17, 25 in queue 1 49550fb749aSPavan Nikhilesh * .. 49650fb749aSPavan Nikhilesh * .. 49750fb749aSPavan Nikhilesh * enqueue events 7, 15, 23, 31 in queue 7 49850fb749aSPavan Nikhilesh * 49950fb749aSPavan Nikhilesh * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31 50050fb749aSPavan Nikhilesh * order from queue0(highest priority) to queue7(lowest_priority) 50150fb749aSPavan Nikhilesh */ 50250fb749aSPavan Nikhilesh static int 50350fb749aSPavan Nikhilesh validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev) 50450fb749aSPavan Nikhilesh { 50550fb749aSPavan Nikhilesh uint32_t queue_count; 506daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 50750fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 50850fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 50950fb749aSPavan Nikhilesh uint32_t range = MAX_EVENTS / queue_count; 51050fb749aSPavan Nikhilesh uint32_t expected_val = (index % range) * queue_count; 51150fb749aSPavan Nikhilesh 51250fb749aSPavan Nikhilesh expected_val += ev->queue_id; 51350fb749aSPavan Nikhilesh RTE_SET_USED(port); 514ca4355e4SDavid Marchand RTE_TEST_ASSERT_EQUAL(*rte_event_pmd_selftest_seqn(ev->mbuf), expected_val, 51550fb749aSPavan Nikhilesh "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d", 516ca4355e4SDavid Marchand *rte_event_pmd_selftest_seqn(ev->mbuf), index, expected_val, range, 51750fb749aSPavan Nikhilesh queue_count, MAX_EVENTS); 51850fb749aSPavan Nikhilesh return 0; 51950fb749aSPavan Nikhilesh } 52050fb749aSPavan Nikhilesh 52150fb749aSPavan Nikhilesh static int 52250fb749aSPavan Nikhilesh test_multi_queue_priority(void) 52350fb749aSPavan Nikhilesh { 52450fb749aSPavan Nikhilesh uint8_t queue; 52550fb749aSPavan Nikhilesh struct rte_mbuf *m; 52650fb749aSPavan Nikhilesh int i, max_evts_roundoff; 52750fb749aSPavan Nikhilesh 52850fb749aSPavan Nikhilesh /* See validate_queue_priority() comments for priority validate logic */ 52950fb749aSPavan Nikhilesh uint32_t queue_count; 530daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 53150fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 53250fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 53350fb749aSPavan Nikhilesh max_evts_roundoff = MAX_EVENTS / queue_count; 53450fb749aSPavan Nikhilesh max_evts_roundoff *= queue_count; 53550fb749aSPavan Nikhilesh 53650fb749aSPavan Nikhilesh for (i = 0; i < max_evts_roundoff; i++) { 53750fb749aSPavan Nikhilesh struct rte_event ev = {.event = 0, .u64 = 0}; 53850fb749aSPavan Nikhilesh 53950fb749aSPavan Nikhilesh m = rte_pktmbuf_alloc(eventdev_test_mempool); 540daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_NOT_NULL(m, "mempool alloc failed"); 54150fb749aSPavan Nikhilesh 542ca4355e4SDavid Marchand *rte_event_pmd_selftest_seqn(m) = i; 54350fb749aSPavan Nikhilesh queue = i % queue_count; 54450fb749aSPavan Nikhilesh update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU, 54550fb749aSPavan Nikhilesh 0, RTE_SCHED_TYPE_PARALLEL, queue, 0); 54650fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, 0, &ev, 1); 54750fb749aSPavan Nikhilesh } 54850fb749aSPavan Nikhilesh 54950fb749aSPavan Nikhilesh return consume_events(0, max_evts_roundoff, validate_queue_priority); 55050fb749aSPavan Nikhilesh } 55150fb749aSPavan Nikhilesh 55250fb749aSPavan Nikhilesh static int 55350fb749aSPavan Nikhilesh worker_multi_port_fn(void *arg) 55450fb749aSPavan Nikhilesh { 55550fb749aSPavan Nikhilesh struct test_core_param *param = arg; 55650fb749aSPavan Nikhilesh struct rte_event ev; 55750fb749aSPavan Nikhilesh uint16_t valid_event; 55850fb749aSPavan Nikhilesh uint8_t port = param->port; 55950fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 56050fb749aSPavan Nikhilesh int ret; 56150fb749aSPavan Nikhilesh 56250fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 56350fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 56450fb749aSPavan Nikhilesh if (!valid_event) 56550fb749aSPavan Nikhilesh continue; 56650fb749aSPavan Nikhilesh 56750fb749aSPavan Nikhilesh ret = validate_event(&ev); 568daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(ret, "Failed to validate event"); 56950fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 57050fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 57150fb749aSPavan Nikhilesh } 57250fb749aSPavan Nikhilesh return 0; 57350fb749aSPavan Nikhilesh } 57450fb749aSPavan Nikhilesh 57550fb749aSPavan Nikhilesh static inline int 57650fb749aSPavan Nikhilesh wait_workers_to_join(int lcore, const rte_atomic32_t *count) 57750fb749aSPavan Nikhilesh { 57850fb749aSPavan Nikhilesh uint64_t cycles, print_cycles; 579daeda14cSPavan Nikhilesh RTE_SET_USED(count); 58050fb749aSPavan Nikhilesh 58150fb749aSPavan Nikhilesh print_cycles = cycles = rte_get_timer_cycles(); 582*f6c6c686SHonnappa Nagarahalli while (rte_eal_get_lcore_state(lcore) != WAIT) { 58350fb749aSPavan Nikhilesh uint64_t new_cycles = rte_get_timer_cycles(); 58450fb749aSPavan Nikhilesh 58550fb749aSPavan Nikhilesh if (new_cycles - print_cycles > rte_get_timer_hz()) { 586daeda14cSPavan Nikhilesh ssovf_log_dbg("\r%s: events %d", __func__, 58750fb749aSPavan Nikhilesh rte_atomic32_read(count)); 58850fb749aSPavan Nikhilesh print_cycles = new_cycles; 58950fb749aSPavan Nikhilesh } 59050fb749aSPavan Nikhilesh if (new_cycles - cycles > rte_get_timer_hz() * 10) { 591daeda14cSPavan Nikhilesh ssovf_log_dbg( 592daeda14cSPavan Nikhilesh "%s: No schedules for seconds, deadlock (%d)", 59350fb749aSPavan Nikhilesh __func__, 59450fb749aSPavan Nikhilesh rte_atomic32_read(count)); 59550fb749aSPavan Nikhilesh rte_event_dev_dump(evdev, stdout); 59650fb749aSPavan Nikhilesh cycles = new_cycles; 597daeda14cSPavan Nikhilesh return -1; 59850fb749aSPavan Nikhilesh } 59950fb749aSPavan Nikhilesh } 60050fb749aSPavan Nikhilesh rte_eal_mp_wait_lcore(); 601daeda14cSPavan Nikhilesh return 0; 60250fb749aSPavan Nikhilesh } 60350fb749aSPavan Nikhilesh 60450fb749aSPavan Nikhilesh 60550fb749aSPavan Nikhilesh static inline int 606cb056611SStephen Hemminger launch_workers_and_wait(int (*main_worker)(void *), 607cb056611SStephen Hemminger int (*worker)(void *), uint32_t total_events, 60850fb749aSPavan Nikhilesh uint8_t nb_workers, uint8_t sched_type) 60950fb749aSPavan Nikhilesh { 61050fb749aSPavan Nikhilesh uint8_t port = 0; 61150fb749aSPavan Nikhilesh int w_lcore; 61250fb749aSPavan Nikhilesh int ret; 61350fb749aSPavan Nikhilesh struct test_core_param *param; 61450fb749aSPavan Nikhilesh rte_atomic32_t atomic_total_events; 61550fb749aSPavan Nikhilesh uint64_t dequeue_tmo_ticks; 61650fb749aSPavan Nikhilesh 61750fb749aSPavan Nikhilesh if (!nb_workers) 61850fb749aSPavan Nikhilesh return 0; 61950fb749aSPavan Nikhilesh 62050fb749aSPavan Nikhilesh rte_atomic32_set(&atomic_total_events, total_events); 62150fb749aSPavan Nikhilesh seqn_list_init(); 62250fb749aSPavan Nikhilesh 62350fb749aSPavan Nikhilesh param = malloc(sizeof(struct test_core_param) * nb_workers); 62450fb749aSPavan Nikhilesh if (!param) 625daeda14cSPavan Nikhilesh return -1; 62650fb749aSPavan Nikhilesh 62750fb749aSPavan Nikhilesh ret = rte_event_dequeue_timeout_ticks(evdev, 62850fb749aSPavan Nikhilesh rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks); 629e865cb4dSPavan Nikhilesh if (ret) { 630e865cb4dSPavan Nikhilesh free(param); 631daeda14cSPavan Nikhilesh return -1; 632e865cb4dSPavan Nikhilesh } 63350fb749aSPavan Nikhilesh 63450fb749aSPavan Nikhilesh param[0].total_events = &atomic_total_events; 63550fb749aSPavan Nikhilesh param[0].sched_type = sched_type; 63650fb749aSPavan Nikhilesh param[0].port = 0; 63750fb749aSPavan Nikhilesh param[0].dequeue_tmo_ticks = dequeue_tmo_ticks; 63850fb749aSPavan Nikhilesh rte_smp_wmb(); 63950fb749aSPavan Nikhilesh 64050fb749aSPavan Nikhilesh w_lcore = rte_get_next_lcore( 64150fb749aSPavan Nikhilesh /* start core */ -1, 642cb056611SStephen Hemminger /* skip main */ 1, 64350fb749aSPavan Nikhilesh /* wrap */ 0); 644cb056611SStephen Hemminger rte_eal_remote_launch(main_worker, ¶m[0], w_lcore); 64550fb749aSPavan Nikhilesh 64650fb749aSPavan Nikhilesh for (port = 1; port < nb_workers; port++) { 64750fb749aSPavan Nikhilesh param[port].total_events = &atomic_total_events; 64850fb749aSPavan Nikhilesh param[port].sched_type = sched_type; 64950fb749aSPavan Nikhilesh param[port].port = port; 65050fb749aSPavan Nikhilesh param[port].dequeue_tmo_ticks = dequeue_tmo_ticks; 65150fb749aSPavan Nikhilesh rte_smp_wmb(); 65250fb749aSPavan Nikhilesh w_lcore = rte_get_next_lcore(w_lcore, 1, 0); 653cb056611SStephen Hemminger rte_eal_remote_launch(worker, ¶m[port], w_lcore); 65450fb749aSPavan Nikhilesh } 65550fb749aSPavan Nikhilesh 65650fb749aSPavan Nikhilesh ret = wait_workers_to_join(w_lcore, &atomic_total_events); 65750fb749aSPavan Nikhilesh free(param); 65850fb749aSPavan Nikhilesh return ret; 65950fb749aSPavan Nikhilesh } 66050fb749aSPavan Nikhilesh 66150fb749aSPavan Nikhilesh /* 66250fb749aSPavan Nikhilesh * Generate a prescribed number of events and spread them across available 66350fb749aSPavan Nikhilesh * queues. Dequeue the events through multiple ports and verify the enqueued 66450fb749aSPavan Nikhilesh * event attributes 66550fb749aSPavan Nikhilesh */ 66650fb749aSPavan Nikhilesh static int 66750fb749aSPavan Nikhilesh test_multi_queue_enq_multi_port_deq(void) 66850fb749aSPavan Nikhilesh { 66950fb749aSPavan Nikhilesh const unsigned int total_events = MAX_EVENTS; 67050fb749aSPavan Nikhilesh uint32_t nr_ports; 67150fb749aSPavan Nikhilesh int ret; 67250fb749aSPavan Nikhilesh 67350fb749aSPavan Nikhilesh ret = generate_random_events(total_events); 67450fb749aSPavan Nikhilesh if (ret) 675daeda14cSPavan Nikhilesh return -1; 67650fb749aSPavan Nikhilesh 677daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 67850fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 67950fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 68050fb749aSPavan Nikhilesh nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); 68150fb749aSPavan Nikhilesh 68250fb749aSPavan Nikhilesh if (!nr_ports) { 683daeda14cSPavan Nikhilesh ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__, 68450fb749aSPavan Nikhilesh nr_ports, rte_lcore_count() - 1); 685daeda14cSPavan Nikhilesh return 0; 68650fb749aSPavan Nikhilesh } 68750fb749aSPavan Nikhilesh 68850fb749aSPavan Nikhilesh return launch_workers_and_wait(worker_multi_port_fn, 68950fb749aSPavan Nikhilesh worker_multi_port_fn, total_events, 69050fb749aSPavan Nikhilesh nr_ports, 0xff /* invalid */); 69150fb749aSPavan Nikhilesh } 69250fb749aSPavan Nikhilesh 6938384f0e0SJerin Jacob static 6948384f0e0SJerin Jacob void flush(uint8_t dev_id, struct rte_event event, void *arg) 6958384f0e0SJerin Jacob { 6968384f0e0SJerin Jacob unsigned int *count = arg; 6978384f0e0SJerin Jacob 6988384f0e0SJerin Jacob RTE_SET_USED(dev_id); 6998384f0e0SJerin Jacob if (event.event_type == RTE_EVENT_TYPE_CPU) 7008384f0e0SJerin Jacob *count = *count + 1; 7018384f0e0SJerin Jacob 7028384f0e0SJerin Jacob } 7038384f0e0SJerin Jacob 7048384f0e0SJerin Jacob static int 7058384f0e0SJerin Jacob test_dev_stop_flush(void) 7068384f0e0SJerin Jacob { 7078384f0e0SJerin Jacob unsigned int total_events = MAX_EVENTS, count = 0; 7088384f0e0SJerin Jacob int ret; 7098384f0e0SJerin Jacob 7108384f0e0SJerin Jacob ret = generate_random_events(total_events); 7118384f0e0SJerin Jacob if (ret) 7128384f0e0SJerin Jacob return -1; 7138384f0e0SJerin Jacob 7148384f0e0SJerin Jacob ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count); 7158384f0e0SJerin Jacob if (ret) 7168384f0e0SJerin Jacob return -2; 7178384f0e0SJerin Jacob rte_event_dev_stop(evdev); 7188384f0e0SJerin Jacob ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL); 7198384f0e0SJerin Jacob if (ret) 7208384f0e0SJerin Jacob return -3; 7218384f0e0SJerin Jacob RTE_TEST_ASSERT_EQUAL(total_events, count, 7228384f0e0SJerin Jacob "count mismatch total_events=%d count=%d", 7238384f0e0SJerin Jacob total_events, count); 7248384f0e0SJerin Jacob return 0; 7258384f0e0SJerin Jacob } 7268384f0e0SJerin Jacob 72750fb749aSPavan Nikhilesh static int 72850fb749aSPavan Nikhilesh validate_queue_to_port_single_link(uint32_t index, uint8_t port, 72950fb749aSPavan Nikhilesh struct rte_event *ev) 73050fb749aSPavan Nikhilesh { 73150fb749aSPavan Nikhilesh RTE_SET_USED(index); 732daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(port, ev->queue_id, 73350fb749aSPavan Nikhilesh "queue mismatch enq=%d deq =%d", 73450fb749aSPavan Nikhilesh port, ev->queue_id); 73550fb749aSPavan Nikhilesh return 0; 73650fb749aSPavan Nikhilesh } 73750fb749aSPavan Nikhilesh 73850fb749aSPavan Nikhilesh /* 73950fb749aSPavan Nikhilesh * Link queue x to port x and check correctness of link by checking 74050fb749aSPavan Nikhilesh * queue_id == x on dequeue on the specific port x 74150fb749aSPavan Nikhilesh */ 74250fb749aSPavan Nikhilesh static int 74350fb749aSPavan Nikhilesh test_queue_to_port_single_link(void) 74450fb749aSPavan Nikhilesh { 74550fb749aSPavan Nikhilesh int i, nr_links, ret; 74650fb749aSPavan Nikhilesh 74750fb749aSPavan Nikhilesh uint32_t port_count; 748daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 74950fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 75050fb749aSPavan Nikhilesh &port_count), "Port count get failed"); 75150fb749aSPavan Nikhilesh 75250fb749aSPavan Nikhilesh /* Unlink all connections that created in eventdev_setup */ 75350fb749aSPavan Nikhilesh for (i = 0; i < (int)port_count; i++) { 75450fb749aSPavan Nikhilesh ret = rte_event_port_unlink(evdev, i, NULL, 0); 755daeda14cSPavan Nikhilesh RTE_TEST_ASSERT(ret >= 0, 756daeda14cSPavan Nikhilesh "Failed to unlink all queues port=%d", i); 75750fb749aSPavan Nikhilesh } 75850fb749aSPavan Nikhilesh 75950fb749aSPavan Nikhilesh uint32_t queue_count; 760daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 76150fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 76250fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 76350fb749aSPavan Nikhilesh 76450fb749aSPavan Nikhilesh nr_links = RTE_MIN(port_count, queue_count); 76550fb749aSPavan Nikhilesh const unsigned int total_events = MAX_EVENTS / nr_links; 76650fb749aSPavan Nikhilesh 76750fb749aSPavan Nikhilesh /* Link queue x to port x and inject events to queue x through port x */ 76850fb749aSPavan Nikhilesh for (i = 0; i < nr_links; i++) { 76950fb749aSPavan Nikhilesh uint8_t queue = (uint8_t)i; 77050fb749aSPavan Nikhilesh 77150fb749aSPavan Nikhilesh ret = rte_event_port_link(evdev, i, &queue, NULL, 1); 772daeda14cSPavan Nikhilesh RTE_TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i); 77350fb749aSPavan Nikhilesh 77450fb749aSPavan Nikhilesh ret = inject_events( 77550fb749aSPavan Nikhilesh 0x100 /*flow_id */, 77650fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 77750fb749aSPavan Nikhilesh rte_rand() % 256 /* sub_event_type */, 77850fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1), 77950fb749aSPavan Nikhilesh queue /* queue */, 78050fb749aSPavan Nikhilesh i /* port */, 78150fb749aSPavan Nikhilesh total_events /* events */); 78250fb749aSPavan Nikhilesh if (ret) 783daeda14cSPavan Nikhilesh return -1; 78450fb749aSPavan Nikhilesh } 78550fb749aSPavan Nikhilesh 78650fb749aSPavan Nikhilesh /* Verify the events generated from correct queue */ 78750fb749aSPavan Nikhilesh for (i = 0; i < nr_links; i++) { 78850fb749aSPavan Nikhilesh ret = consume_events(i /* port */, total_events, 78950fb749aSPavan Nikhilesh validate_queue_to_port_single_link); 79050fb749aSPavan Nikhilesh if (ret) 791daeda14cSPavan Nikhilesh return -1; 79250fb749aSPavan Nikhilesh } 79350fb749aSPavan Nikhilesh 794daeda14cSPavan Nikhilesh return 0; 79550fb749aSPavan Nikhilesh } 79650fb749aSPavan Nikhilesh 79750fb749aSPavan Nikhilesh static int 79850fb749aSPavan Nikhilesh validate_queue_to_port_multi_link(uint32_t index, uint8_t port, 79950fb749aSPavan Nikhilesh struct rte_event *ev) 80050fb749aSPavan Nikhilesh { 80150fb749aSPavan Nikhilesh RTE_SET_USED(index); 802daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1), 80350fb749aSPavan Nikhilesh "queue mismatch enq=%d deq =%d", 80450fb749aSPavan Nikhilesh port, ev->queue_id); 80550fb749aSPavan Nikhilesh return 0; 80650fb749aSPavan Nikhilesh } 80750fb749aSPavan Nikhilesh 80850fb749aSPavan Nikhilesh /* 80950fb749aSPavan Nikhilesh * Link all even number of queues to port 0 and all odd number of queues to 81050fb749aSPavan Nikhilesh * port 1 and verify the link connection on dequeue 81150fb749aSPavan Nikhilesh */ 81250fb749aSPavan Nikhilesh static int 81350fb749aSPavan Nikhilesh test_queue_to_port_multi_link(void) 81450fb749aSPavan Nikhilesh { 81550fb749aSPavan Nikhilesh int ret, port0_events = 0, port1_events = 0; 81650fb749aSPavan Nikhilesh uint8_t queue, port; 81750fb749aSPavan Nikhilesh uint32_t nr_queues = 0; 81850fb749aSPavan Nikhilesh uint32_t nr_ports = 0; 81950fb749aSPavan Nikhilesh 820daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 82150fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 82250fb749aSPavan Nikhilesh &nr_queues), "Queue count get failed"); 82350fb749aSPavan Nikhilesh 824daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 82550fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 82650fb749aSPavan Nikhilesh &nr_queues), "Queue count get failed"); 827daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 82850fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 82950fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 83050fb749aSPavan Nikhilesh 83150fb749aSPavan Nikhilesh if (nr_ports < 2) { 832daeda14cSPavan Nikhilesh ssovf_log_dbg("%s: Not enough ports to test ports=%d", 83350fb749aSPavan Nikhilesh __func__, nr_ports); 834daeda14cSPavan Nikhilesh return 0; 83550fb749aSPavan Nikhilesh } 83650fb749aSPavan Nikhilesh 83750fb749aSPavan Nikhilesh /* Unlink all connections that created in eventdev_setup */ 83850fb749aSPavan Nikhilesh for (port = 0; port < nr_ports; port++) { 83950fb749aSPavan Nikhilesh ret = rte_event_port_unlink(evdev, port, NULL, 0); 840daeda14cSPavan Nikhilesh RTE_TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d", 84150fb749aSPavan Nikhilesh port); 84250fb749aSPavan Nikhilesh } 84350fb749aSPavan Nikhilesh 84450fb749aSPavan Nikhilesh const unsigned int total_events = MAX_EVENTS / nr_queues; 84550fb749aSPavan Nikhilesh 84650fb749aSPavan Nikhilesh /* Link all even number of queues to port0 and odd numbers to port 1*/ 84750fb749aSPavan Nikhilesh for (queue = 0; queue < nr_queues; queue++) { 84850fb749aSPavan Nikhilesh port = queue & 0x1; 84950fb749aSPavan Nikhilesh ret = rte_event_port_link(evdev, port, &queue, NULL, 1); 850daeda14cSPavan Nikhilesh RTE_TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d", 85150fb749aSPavan Nikhilesh queue, port); 85250fb749aSPavan Nikhilesh 85350fb749aSPavan Nikhilesh ret = inject_events( 85450fb749aSPavan Nikhilesh 0x100 /*flow_id */, 85550fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 85650fb749aSPavan Nikhilesh rte_rand() % 256 /* sub_event_type */, 85750fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1), 85850fb749aSPavan Nikhilesh queue /* queue */, 85950fb749aSPavan Nikhilesh port /* port */, 86050fb749aSPavan Nikhilesh total_events /* events */); 86150fb749aSPavan Nikhilesh if (ret) 862daeda14cSPavan Nikhilesh return -1; 86350fb749aSPavan Nikhilesh 86450fb749aSPavan Nikhilesh if (port == 0) 86550fb749aSPavan Nikhilesh port0_events += total_events; 86650fb749aSPavan Nikhilesh else 86750fb749aSPavan Nikhilesh port1_events += total_events; 86850fb749aSPavan Nikhilesh } 86950fb749aSPavan Nikhilesh 87050fb749aSPavan Nikhilesh ret = consume_events(0 /* port */, port0_events, 87150fb749aSPavan Nikhilesh validate_queue_to_port_multi_link); 87250fb749aSPavan Nikhilesh if (ret) 873daeda14cSPavan Nikhilesh return -1; 87450fb749aSPavan Nikhilesh ret = consume_events(1 /* port */, port1_events, 87550fb749aSPavan Nikhilesh validate_queue_to_port_multi_link); 87650fb749aSPavan Nikhilesh if (ret) 877daeda14cSPavan Nikhilesh return -1; 87850fb749aSPavan Nikhilesh 879daeda14cSPavan Nikhilesh return 0; 88050fb749aSPavan Nikhilesh } 88150fb749aSPavan Nikhilesh 88250fb749aSPavan Nikhilesh static int 88350fb749aSPavan Nikhilesh worker_flow_based_pipeline(void *arg) 88450fb749aSPavan Nikhilesh { 88550fb749aSPavan Nikhilesh struct test_core_param *param = arg; 88650fb749aSPavan Nikhilesh struct rte_event ev; 88750fb749aSPavan Nikhilesh uint16_t valid_event; 88850fb749aSPavan Nikhilesh uint8_t port = param->port; 88950fb749aSPavan Nikhilesh uint8_t new_sched_type = param->sched_type; 89050fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 89150fb749aSPavan Nikhilesh uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks; 89250fb749aSPavan Nikhilesh 89350fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 89450fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 89550fb749aSPavan Nikhilesh dequeue_tmo_ticks); 89650fb749aSPavan Nikhilesh if (!valid_event) 89750fb749aSPavan Nikhilesh continue; 89850fb749aSPavan Nikhilesh 89950fb749aSPavan Nikhilesh /* Events from stage 0 */ 90050fb749aSPavan Nikhilesh if (ev.sub_event_type == 0) { 90150fb749aSPavan Nikhilesh /* Move to atomic flow to maintain the ordering */ 90250fb749aSPavan Nikhilesh ev.flow_id = 0x2; 90350fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 90450fb749aSPavan Nikhilesh ev.sub_event_type = 1; /* stage 1 */ 90550fb749aSPavan Nikhilesh ev.sched_type = new_sched_type; 90650fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_FORWARD; 90750fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 90850fb749aSPavan Nikhilesh } else if (ev.sub_event_type == 1) { /* Events from stage 1*/ 909ca4355e4SDavid Marchand if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) { 91050fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 91150fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 91250fb749aSPavan Nikhilesh } else { 913daeda14cSPavan Nikhilesh ssovf_log_dbg("Failed to update seqn_list"); 914daeda14cSPavan Nikhilesh return -1; 91550fb749aSPavan Nikhilesh } 91650fb749aSPavan Nikhilesh } else { 917daeda14cSPavan Nikhilesh ssovf_log_dbg("Invalid ev.sub_event_type = %d", 91850fb749aSPavan Nikhilesh ev.sub_event_type); 919daeda14cSPavan Nikhilesh return -1; 92050fb749aSPavan Nikhilesh } 92150fb749aSPavan Nikhilesh } 92250fb749aSPavan Nikhilesh return 0; 92350fb749aSPavan Nikhilesh } 92450fb749aSPavan Nikhilesh 92550fb749aSPavan Nikhilesh static int 92650fb749aSPavan Nikhilesh test_multiport_flow_sched_type_test(uint8_t in_sched_type, 92750fb749aSPavan Nikhilesh uint8_t out_sched_type) 92850fb749aSPavan Nikhilesh { 92950fb749aSPavan Nikhilesh const unsigned int total_events = MAX_EVENTS; 93050fb749aSPavan Nikhilesh uint32_t nr_ports; 93150fb749aSPavan Nikhilesh int ret; 93250fb749aSPavan Nikhilesh 933daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 93450fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 93550fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 93650fb749aSPavan Nikhilesh nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); 93750fb749aSPavan Nikhilesh 93850fb749aSPavan Nikhilesh if (!nr_ports) { 939daeda14cSPavan Nikhilesh ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__, 94050fb749aSPavan Nikhilesh nr_ports, rte_lcore_count() - 1); 941daeda14cSPavan Nikhilesh return 0; 94250fb749aSPavan Nikhilesh } 94350fb749aSPavan Nikhilesh 944ca4355e4SDavid Marchand /* Injects events with a 0 sequence number to total_events */ 94550fb749aSPavan Nikhilesh ret = inject_events( 94650fb749aSPavan Nikhilesh 0x1 /*flow_id */, 94750fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 94850fb749aSPavan Nikhilesh 0 /* sub_event_type (stage 0) */, 94950fb749aSPavan Nikhilesh in_sched_type, 95050fb749aSPavan Nikhilesh 0 /* queue */, 95150fb749aSPavan Nikhilesh 0 /* port */, 95250fb749aSPavan Nikhilesh total_events /* events */); 95350fb749aSPavan Nikhilesh if (ret) 954daeda14cSPavan Nikhilesh return -1; 95550fb749aSPavan Nikhilesh 95650fb749aSPavan Nikhilesh ret = launch_workers_and_wait(worker_flow_based_pipeline, 95750fb749aSPavan Nikhilesh worker_flow_based_pipeline, 95850fb749aSPavan Nikhilesh total_events, nr_ports, out_sched_type); 95950fb749aSPavan Nikhilesh if (ret) 960daeda14cSPavan Nikhilesh return -1; 96150fb749aSPavan Nikhilesh 96250fb749aSPavan Nikhilesh if (in_sched_type != RTE_SCHED_TYPE_PARALLEL && 96350fb749aSPavan Nikhilesh out_sched_type == RTE_SCHED_TYPE_ATOMIC) { 96450fb749aSPavan Nikhilesh /* Check the events order maintained or not */ 96550fb749aSPavan Nikhilesh return seqn_list_check(total_events); 96650fb749aSPavan Nikhilesh } 967daeda14cSPavan Nikhilesh return 0; 96850fb749aSPavan Nikhilesh } 96950fb749aSPavan Nikhilesh 97050fb749aSPavan Nikhilesh 97150fb749aSPavan Nikhilesh /* Multi port ordered to atomic transaction */ 97250fb749aSPavan Nikhilesh static int 97350fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_atomic(void) 97450fb749aSPavan Nikhilesh { 97550fb749aSPavan Nikhilesh /* Ingress event order test */ 97650fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED, 97750fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 97850fb749aSPavan Nikhilesh } 97950fb749aSPavan Nikhilesh 98050fb749aSPavan Nikhilesh static int 98150fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_ordered(void) 98250fb749aSPavan Nikhilesh { 98350fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED, 98450fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 98550fb749aSPavan Nikhilesh } 98650fb749aSPavan Nikhilesh 98750fb749aSPavan Nikhilesh static int 98850fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_parallel(void) 98950fb749aSPavan Nikhilesh { 99050fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED, 99150fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 99250fb749aSPavan Nikhilesh } 99350fb749aSPavan Nikhilesh 99450fb749aSPavan Nikhilesh static int 99550fb749aSPavan Nikhilesh test_multi_port_flow_atomic_to_atomic(void) 99650fb749aSPavan Nikhilesh { 99750fb749aSPavan Nikhilesh /* Ingress event order test */ 99850fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 99950fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 100050fb749aSPavan Nikhilesh } 100150fb749aSPavan Nikhilesh 100250fb749aSPavan Nikhilesh static int 100350fb749aSPavan Nikhilesh test_multi_port_flow_atomic_to_ordered(void) 100450fb749aSPavan Nikhilesh { 100550fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 100650fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 100750fb749aSPavan Nikhilesh } 100850fb749aSPavan Nikhilesh 100950fb749aSPavan Nikhilesh static int 101050fb749aSPavan Nikhilesh test_multi_port_flow_atomic_to_parallel(void) 101150fb749aSPavan Nikhilesh { 101250fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 101350fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 101450fb749aSPavan Nikhilesh } 101550fb749aSPavan Nikhilesh 101650fb749aSPavan Nikhilesh static int 101750fb749aSPavan Nikhilesh test_multi_port_flow_parallel_to_atomic(void) 101850fb749aSPavan Nikhilesh { 101950fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 102050fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 102150fb749aSPavan Nikhilesh } 102250fb749aSPavan Nikhilesh 102350fb749aSPavan Nikhilesh static int 102450fb749aSPavan Nikhilesh test_multi_port_flow_parallel_to_ordered(void) 102550fb749aSPavan Nikhilesh { 102650fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 102750fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 102850fb749aSPavan Nikhilesh } 102950fb749aSPavan Nikhilesh 103050fb749aSPavan Nikhilesh static int 103150fb749aSPavan Nikhilesh test_multi_port_flow_parallel_to_parallel(void) 103250fb749aSPavan Nikhilesh { 103350fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 103450fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 103550fb749aSPavan Nikhilesh } 103650fb749aSPavan Nikhilesh 103750fb749aSPavan Nikhilesh static int 103850fb749aSPavan Nikhilesh worker_group_based_pipeline(void *arg) 103950fb749aSPavan Nikhilesh { 104050fb749aSPavan Nikhilesh struct test_core_param *param = arg; 104150fb749aSPavan Nikhilesh struct rte_event ev; 104250fb749aSPavan Nikhilesh uint16_t valid_event; 104350fb749aSPavan Nikhilesh uint8_t port = param->port; 104450fb749aSPavan Nikhilesh uint8_t new_sched_type = param->sched_type; 104550fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 104650fb749aSPavan Nikhilesh uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks; 104750fb749aSPavan Nikhilesh 104850fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 104950fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 105050fb749aSPavan Nikhilesh dequeue_tmo_ticks); 105150fb749aSPavan Nikhilesh if (!valid_event) 105250fb749aSPavan Nikhilesh continue; 105350fb749aSPavan Nikhilesh 105450fb749aSPavan Nikhilesh /* Events from stage 0(group 0) */ 105550fb749aSPavan Nikhilesh if (ev.queue_id == 0) { 105650fb749aSPavan Nikhilesh /* Move to atomic flow to maintain the ordering */ 105750fb749aSPavan Nikhilesh ev.flow_id = 0x2; 105850fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 105950fb749aSPavan Nikhilesh ev.sched_type = new_sched_type; 106050fb749aSPavan Nikhilesh ev.queue_id = 1; /* Stage 1*/ 106150fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_FORWARD; 106250fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 106350fb749aSPavan Nikhilesh } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/ 1064ca4355e4SDavid Marchand if (seqn_list_update(*rte_event_pmd_selftest_seqn(ev.mbuf)) == 0) { 106550fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 106650fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 106750fb749aSPavan Nikhilesh } else { 1068daeda14cSPavan Nikhilesh ssovf_log_dbg("Failed to update seqn_list"); 1069daeda14cSPavan Nikhilesh return -1; 107050fb749aSPavan Nikhilesh } 107150fb749aSPavan Nikhilesh } else { 1072daeda14cSPavan Nikhilesh ssovf_log_dbg("Invalid ev.queue_id = %d", ev.queue_id); 1073daeda14cSPavan Nikhilesh return -1; 107450fb749aSPavan Nikhilesh } 107550fb749aSPavan Nikhilesh } 107650fb749aSPavan Nikhilesh 107750fb749aSPavan Nikhilesh 107850fb749aSPavan Nikhilesh return 0; 107950fb749aSPavan Nikhilesh } 108050fb749aSPavan Nikhilesh 108150fb749aSPavan Nikhilesh static int 108250fb749aSPavan Nikhilesh test_multiport_queue_sched_type_test(uint8_t in_sched_type, 108350fb749aSPavan Nikhilesh uint8_t out_sched_type) 108450fb749aSPavan Nikhilesh { 108550fb749aSPavan Nikhilesh const unsigned int total_events = MAX_EVENTS; 108650fb749aSPavan Nikhilesh uint32_t nr_ports; 108750fb749aSPavan Nikhilesh int ret; 108850fb749aSPavan Nikhilesh 1089daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 109050fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 109150fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 109250fb749aSPavan Nikhilesh 109350fb749aSPavan Nikhilesh nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); 109450fb749aSPavan Nikhilesh 109550fb749aSPavan Nikhilesh uint32_t queue_count; 1096daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 109750fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 109850fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 109950fb749aSPavan Nikhilesh if (queue_count < 2 || !nr_ports) { 1100daeda14cSPavan Nikhilesh ssovf_log_dbg("%s: Not enough queues=%d ports=%d or workers=%d", 110150fb749aSPavan Nikhilesh __func__, queue_count, nr_ports, 110250fb749aSPavan Nikhilesh rte_lcore_count() - 1); 1103daeda14cSPavan Nikhilesh return 0; 110450fb749aSPavan Nikhilesh } 110550fb749aSPavan Nikhilesh 1106ca4355e4SDavid Marchand /* Injects events with a 0 sequence number to total_events */ 110750fb749aSPavan Nikhilesh ret = inject_events( 110850fb749aSPavan Nikhilesh 0x1 /*flow_id */, 110950fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 111050fb749aSPavan Nikhilesh 0 /* sub_event_type (stage 0) */, 111150fb749aSPavan Nikhilesh in_sched_type, 111250fb749aSPavan Nikhilesh 0 /* queue */, 111350fb749aSPavan Nikhilesh 0 /* port */, 111450fb749aSPavan Nikhilesh total_events /* events */); 111550fb749aSPavan Nikhilesh if (ret) 1116daeda14cSPavan Nikhilesh return -1; 111750fb749aSPavan Nikhilesh 111850fb749aSPavan Nikhilesh ret = launch_workers_and_wait(worker_group_based_pipeline, 111950fb749aSPavan Nikhilesh worker_group_based_pipeline, 112050fb749aSPavan Nikhilesh total_events, nr_ports, out_sched_type); 112150fb749aSPavan Nikhilesh if (ret) 1122daeda14cSPavan Nikhilesh return -1; 112350fb749aSPavan Nikhilesh 112450fb749aSPavan Nikhilesh if (in_sched_type != RTE_SCHED_TYPE_PARALLEL && 112550fb749aSPavan Nikhilesh out_sched_type == RTE_SCHED_TYPE_ATOMIC) { 112650fb749aSPavan Nikhilesh /* Check the events order maintained or not */ 112750fb749aSPavan Nikhilesh return seqn_list_check(total_events); 112850fb749aSPavan Nikhilesh } 1129daeda14cSPavan Nikhilesh return 0; 113050fb749aSPavan Nikhilesh } 113150fb749aSPavan Nikhilesh 113250fb749aSPavan Nikhilesh static int 113350fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_atomic(void) 113450fb749aSPavan Nikhilesh { 113550fb749aSPavan Nikhilesh /* Ingress event order test */ 113650fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED, 113750fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 113850fb749aSPavan Nikhilesh } 113950fb749aSPavan Nikhilesh 114050fb749aSPavan Nikhilesh static int 114150fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_ordered(void) 114250fb749aSPavan Nikhilesh { 114350fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED, 114450fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 114550fb749aSPavan Nikhilesh } 114650fb749aSPavan Nikhilesh 114750fb749aSPavan Nikhilesh static int 114850fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_parallel(void) 114950fb749aSPavan Nikhilesh { 115050fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED, 115150fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 115250fb749aSPavan Nikhilesh } 115350fb749aSPavan Nikhilesh 115450fb749aSPavan Nikhilesh static int 115550fb749aSPavan Nikhilesh test_multi_port_queue_atomic_to_atomic(void) 115650fb749aSPavan Nikhilesh { 115750fb749aSPavan Nikhilesh /* Ingress event order test */ 115850fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 115950fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 116050fb749aSPavan Nikhilesh } 116150fb749aSPavan Nikhilesh 116250fb749aSPavan Nikhilesh static int 116350fb749aSPavan Nikhilesh test_multi_port_queue_atomic_to_ordered(void) 116450fb749aSPavan Nikhilesh { 116550fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 116650fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 116750fb749aSPavan Nikhilesh } 116850fb749aSPavan Nikhilesh 116950fb749aSPavan Nikhilesh static int 117050fb749aSPavan Nikhilesh test_multi_port_queue_atomic_to_parallel(void) 117150fb749aSPavan Nikhilesh { 117250fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 117350fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 117450fb749aSPavan Nikhilesh } 117550fb749aSPavan Nikhilesh 117650fb749aSPavan Nikhilesh static int 117750fb749aSPavan Nikhilesh test_multi_port_queue_parallel_to_atomic(void) 117850fb749aSPavan Nikhilesh { 117950fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 118050fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 118150fb749aSPavan Nikhilesh } 118250fb749aSPavan Nikhilesh 118350fb749aSPavan Nikhilesh static int 118450fb749aSPavan Nikhilesh test_multi_port_queue_parallel_to_ordered(void) 118550fb749aSPavan Nikhilesh { 118650fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 118750fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 118850fb749aSPavan Nikhilesh } 118950fb749aSPavan Nikhilesh 119050fb749aSPavan Nikhilesh static int 119150fb749aSPavan Nikhilesh test_multi_port_queue_parallel_to_parallel(void) 119250fb749aSPavan Nikhilesh { 119350fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 119450fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 119550fb749aSPavan Nikhilesh } 119650fb749aSPavan Nikhilesh 119750fb749aSPavan Nikhilesh static int 119850fb749aSPavan Nikhilesh worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg) 119950fb749aSPavan Nikhilesh { 120050fb749aSPavan Nikhilesh struct test_core_param *param = arg; 120150fb749aSPavan Nikhilesh struct rte_event ev; 120250fb749aSPavan Nikhilesh uint16_t valid_event; 120350fb749aSPavan Nikhilesh uint8_t port = param->port; 120450fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 120550fb749aSPavan Nikhilesh 120650fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 120750fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 120850fb749aSPavan Nikhilesh if (!valid_event) 120950fb749aSPavan Nikhilesh continue; 121050fb749aSPavan Nikhilesh 121150fb749aSPavan Nikhilesh if (ev.sub_event_type == 255) { /* last stage */ 121250fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 121350fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 121450fb749aSPavan Nikhilesh } else { 121550fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 121650fb749aSPavan Nikhilesh ev.sub_event_type++; 121750fb749aSPavan Nikhilesh ev.sched_type = 121850fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1); 121950fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_FORWARD; 122050fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 122150fb749aSPavan Nikhilesh } 122250fb749aSPavan Nikhilesh } 122350fb749aSPavan Nikhilesh return 0; 122450fb749aSPavan Nikhilesh } 122550fb749aSPavan Nikhilesh 122650fb749aSPavan Nikhilesh static int 122750fb749aSPavan Nikhilesh launch_multi_port_max_stages_random_sched_type(int (*fn)(void *)) 122850fb749aSPavan Nikhilesh { 122950fb749aSPavan Nikhilesh uint32_t nr_ports; 123050fb749aSPavan Nikhilesh int ret; 123150fb749aSPavan Nikhilesh 1232daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 123350fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 123450fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 123550fb749aSPavan Nikhilesh nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); 123650fb749aSPavan Nikhilesh 123750fb749aSPavan Nikhilesh if (!nr_ports) { 1238daeda14cSPavan Nikhilesh ssovf_log_dbg("%s: Not enough ports=%d or workers=%d", __func__, 123950fb749aSPavan Nikhilesh nr_ports, rte_lcore_count() - 1); 1240daeda14cSPavan Nikhilesh return 0; 124150fb749aSPavan Nikhilesh } 124250fb749aSPavan Nikhilesh 1243ca4355e4SDavid Marchand /* Injects events with a 0 sequence number to total_events */ 124450fb749aSPavan Nikhilesh ret = inject_events( 124550fb749aSPavan Nikhilesh 0x1 /*flow_id */, 124650fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 124750fb749aSPavan Nikhilesh 0 /* sub_event_type (stage 0) */, 124850fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */, 124950fb749aSPavan Nikhilesh 0 /* queue */, 125050fb749aSPavan Nikhilesh 0 /* port */, 125150fb749aSPavan Nikhilesh MAX_EVENTS /* events */); 125250fb749aSPavan Nikhilesh if (ret) 1253daeda14cSPavan Nikhilesh return -1; 125450fb749aSPavan Nikhilesh 125550fb749aSPavan Nikhilesh return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports, 125650fb749aSPavan Nikhilesh 0xff /* invalid */); 125750fb749aSPavan Nikhilesh } 125850fb749aSPavan Nikhilesh 125950fb749aSPavan Nikhilesh /* Flow based pipeline with maximum stages with random sched type */ 126050fb749aSPavan Nikhilesh static int 126150fb749aSPavan Nikhilesh test_multi_port_flow_max_stages_random_sched_type(void) 126250fb749aSPavan Nikhilesh { 126350fb749aSPavan Nikhilesh return launch_multi_port_max_stages_random_sched_type( 126450fb749aSPavan Nikhilesh worker_flow_based_pipeline_max_stages_rand_sched_type); 126550fb749aSPavan Nikhilesh } 126650fb749aSPavan Nikhilesh 126750fb749aSPavan Nikhilesh static int 126850fb749aSPavan Nikhilesh worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg) 126950fb749aSPavan Nikhilesh { 127050fb749aSPavan Nikhilesh struct test_core_param *param = arg; 127150fb749aSPavan Nikhilesh struct rte_event ev; 127250fb749aSPavan Nikhilesh uint16_t valid_event; 127350fb749aSPavan Nikhilesh uint8_t port = param->port; 127450fb749aSPavan Nikhilesh uint32_t queue_count; 1275daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 127650fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 127750fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 127850fb749aSPavan Nikhilesh uint8_t nr_queues = queue_count; 127950fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 128050fb749aSPavan Nikhilesh 128150fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 128250fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 128350fb749aSPavan Nikhilesh if (!valid_event) 128450fb749aSPavan Nikhilesh continue; 128550fb749aSPavan Nikhilesh 128650fb749aSPavan Nikhilesh if (ev.queue_id == nr_queues - 1) { /* last stage */ 128750fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 128850fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 128950fb749aSPavan Nikhilesh } else { 129050fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 129150fb749aSPavan Nikhilesh ev.queue_id++; 129250fb749aSPavan Nikhilesh ev.sched_type = 129350fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1); 129450fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_FORWARD; 129550fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 129650fb749aSPavan Nikhilesh } 129750fb749aSPavan Nikhilesh } 129850fb749aSPavan Nikhilesh return 0; 129950fb749aSPavan Nikhilesh } 130050fb749aSPavan Nikhilesh 130150fb749aSPavan Nikhilesh /* Queue based pipeline with maximum stages with random sched type */ 130250fb749aSPavan Nikhilesh static int 130350fb749aSPavan Nikhilesh test_multi_port_queue_max_stages_random_sched_type(void) 130450fb749aSPavan Nikhilesh { 130550fb749aSPavan Nikhilesh return launch_multi_port_max_stages_random_sched_type( 130650fb749aSPavan Nikhilesh worker_queue_based_pipeline_max_stages_rand_sched_type); 130750fb749aSPavan Nikhilesh } 130850fb749aSPavan Nikhilesh 130950fb749aSPavan Nikhilesh static int 131050fb749aSPavan Nikhilesh worker_mixed_pipeline_max_stages_rand_sched_type(void *arg) 131150fb749aSPavan Nikhilesh { 131250fb749aSPavan Nikhilesh struct test_core_param *param = arg; 131350fb749aSPavan Nikhilesh struct rte_event ev; 131450fb749aSPavan Nikhilesh uint16_t valid_event; 131550fb749aSPavan Nikhilesh uint8_t port = param->port; 131650fb749aSPavan Nikhilesh uint32_t queue_count; 1317daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 131850fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 131950fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 132050fb749aSPavan Nikhilesh uint8_t nr_queues = queue_count; 132150fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 132250fb749aSPavan Nikhilesh 132350fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 132450fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 132550fb749aSPavan Nikhilesh if (!valid_event) 132650fb749aSPavan Nikhilesh continue; 132750fb749aSPavan Nikhilesh 132850fb749aSPavan Nikhilesh if (ev.queue_id == nr_queues - 1) { /* Last stage */ 132950fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 133050fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 133150fb749aSPavan Nikhilesh } else { 133250fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 133350fb749aSPavan Nikhilesh ev.queue_id++; 133450fb749aSPavan Nikhilesh ev.sub_event_type = rte_rand() % 256; 133550fb749aSPavan Nikhilesh ev.sched_type = 133650fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1); 133750fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_FORWARD; 133850fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 133950fb749aSPavan Nikhilesh } 134050fb749aSPavan Nikhilesh } 134150fb749aSPavan Nikhilesh return 0; 134250fb749aSPavan Nikhilesh } 134350fb749aSPavan Nikhilesh 134450fb749aSPavan Nikhilesh /* Queue and flow based pipeline with maximum stages with random sched type */ 134550fb749aSPavan Nikhilesh static int 134650fb749aSPavan Nikhilesh test_multi_port_mixed_max_stages_random_sched_type(void) 134750fb749aSPavan Nikhilesh { 134850fb749aSPavan Nikhilesh return launch_multi_port_max_stages_random_sched_type( 134950fb749aSPavan Nikhilesh worker_mixed_pipeline_max_stages_rand_sched_type); 135050fb749aSPavan Nikhilesh } 135150fb749aSPavan Nikhilesh 135250fb749aSPavan Nikhilesh static int 135350fb749aSPavan Nikhilesh worker_ordered_flow_producer(void *arg) 135450fb749aSPavan Nikhilesh { 135550fb749aSPavan Nikhilesh struct test_core_param *param = arg; 135650fb749aSPavan Nikhilesh uint8_t port = param->port; 135750fb749aSPavan Nikhilesh struct rte_mbuf *m; 135850fb749aSPavan Nikhilesh int counter = 0; 135950fb749aSPavan Nikhilesh 136050fb749aSPavan Nikhilesh while (counter < NUM_PACKETS) { 136150fb749aSPavan Nikhilesh m = rte_pktmbuf_alloc(eventdev_test_mempool); 136250fb749aSPavan Nikhilesh if (m == NULL) 136350fb749aSPavan Nikhilesh continue; 136450fb749aSPavan Nikhilesh 1365ca4355e4SDavid Marchand *rte_event_pmd_selftest_seqn(m) = counter++; 136650fb749aSPavan Nikhilesh 136750fb749aSPavan Nikhilesh struct rte_event ev = {.event = 0, .u64 = 0}; 136850fb749aSPavan Nikhilesh 136950fb749aSPavan Nikhilesh ev.flow_id = 0x1; /* Generate a fat flow */ 137050fb749aSPavan Nikhilesh ev.sub_event_type = 0; 137150fb749aSPavan Nikhilesh /* Inject the new event */ 137250fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_NEW; 137350fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 137450fb749aSPavan Nikhilesh ev.sched_type = RTE_SCHED_TYPE_ORDERED; 137550fb749aSPavan Nikhilesh ev.queue_id = 0; 137650fb749aSPavan Nikhilesh ev.mbuf = m; 137750fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 137850fb749aSPavan Nikhilesh } 137950fb749aSPavan Nikhilesh 138050fb749aSPavan Nikhilesh return 0; 138150fb749aSPavan Nikhilesh } 138250fb749aSPavan Nikhilesh 138350fb749aSPavan Nikhilesh static inline int 138450fb749aSPavan Nikhilesh test_producer_consumer_ingress_order_test(int (*fn)(void *)) 138550fb749aSPavan Nikhilesh { 138650fb749aSPavan Nikhilesh uint32_t nr_ports; 138750fb749aSPavan Nikhilesh 1388daeda14cSPavan Nikhilesh RTE_TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 138950fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 139050fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 139150fb749aSPavan Nikhilesh nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); 139250fb749aSPavan Nikhilesh 139350fb749aSPavan Nikhilesh if (rte_lcore_count() < 3 || nr_ports < 2) { 1394daeda14cSPavan Nikhilesh ssovf_log_dbg("### Not enough cores for %s test.", __func__); 1395daeda14cSPavan Nikhilesh return 0; 139650fb749aSPavan Nikhilesh } 139750fb749aSPavan Nikhilesh 139850fb749aSPavan Nikhilesh launch_workers_and_wait(worker_ordered_flow_producer, fn, 139950fb749aSPavan Nikhilesh NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC); 140050fb749aSPavan Nikhilesh /* Check the events order maintained or not */ 140150fb749aSPavan Nikhilesh return seqn_list_check(NUM_PACKETS); 140250fb749aSPavan Nikhilesh } 140350fb749aSPavan Nikhilesh 140450fb749aSPavan Nikhilesh /* Flow based producer consumer ingress order test */ 140550fb749aSPavan Nikhilesh static int 140650fb749aSPavan Nikhilesh test_flow_producer_consumer_ingress_order_test(void) 140750fb749aSPavan Nikhilesh { 140850fb749aSPavan Nikhilesh return test_producer_consumer_ingress_order_test( 140950fb749aSPavan Nikhilesh worker_flow_based_pipeline); 141050fb749aSPavan Nikhilesh } 141150fb749aSPavan Nikhilesh 141250fb749aSPavan Nikhilesh /* Queue based producer consumer ingress order test */ 141350fb749aSPavan Nikhilesh static int 141450fb749aSPavan Nikhilesh test_queue_producer_consumer_ingress_order_test(void) 141550fb749aSPavan Nikhilesh { 141650fb749aSPavan Nikhilesh return test_producer_consumer_ingress_order_test( 141750fb749aSPavan Nikhilesh worker_group_based_pipeline); 141850fb749aSPavan Nikhilesh } 141950fb749aSPavan Nikhilesh 1420daeda14cSPavan Nikhilesh static void octeontx_test_run(int (*setup)(void), void (*tdown)(void), 1421daeda14cSPavan Nikhilesh int (*test)(void), const char *name) 1422daeda14cSPavan Nikhilesh { 1423daeda14cSPavan Nikhilesh if (setup() < 0) { 1424daeda14cSPavan Nikhilesh ssovf_log_selftest("Error setting up test %s", name); 1425daeda14cSPavan Nikhilesh unsupported++; 1426daeda14cSPavan Nikhilesh } else { 1427daeda14cSPavan Nikhilesh if (test() < 0) { 1428daeda14cSPavan Nikhilesh failed++; 1429daeda14cSPavan Nikhilesh ssovf_log_selftest("%s Failed", name); 1430daeda14cSPavan Nikhilesh } else { 1431daeda14cSPavan Nikhilesh passed++; 1432daeda14cSPavan Nikhilesh ssovf_log_selftest("%s Passed", name); 143350fb749aSPavan Nikhilesh } 1434daeda14cSPavan Nikhilesh } 143550fb749aSPavan Nikhilesh 1436daeda14cSPavan Nikhilesh total++; 1437daeda14cSPavan Nikhilesh tdown(); 1438daeda14cSPavan Nikhilesh } 1439daeda14cSPavan Nikhilesh 1440daeda14cSPavan Nikhilesh int 144150fb749aSPavan Nikhilesh test_eventdev_octeontx(void) 144250fb749aSPavan Nikhilesh { 1443daeda14cSPavan Nikhilesh testsuite_setup(); 144450fb749aSPavan Nikhilesh 1445daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1446daeda14cSPavan Nikhilesh test_simple_enqdeq_ordered); 1447daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1448daeda14cSPavan Nikhilesh test_simple_enqdeq_atomic); 1449daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1450daeda14cSPavan Nikhilesh test_simple_enqdeq_parallel); 1451daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1452daeda14cSPavan Nikhilesh test_multi_queue_enq_single_port_deq); 1453daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 14548384f0e0SJerin Jacob test_dev_stop_flush); 14558384f0e0SJerin Jacob OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1456daeda14cSPavan Nikhilesh test_multi_queue_enq_multi_port_deq); 1457daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1458daeda14cSPavan Nikhilesh test_queue_to_port_single_link); 1459daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1460daeda14cSPavan Nikhilesh test_queue_to_port_multi_link); 1461daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1462daeda14cSPavan Nikhilesh test_multi_port_flow_ordered_to_atomic); 1463daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1464daeda14cSPavan Nikhilesh test_multi_port_flow_ordered_to_ordered); 1465daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1466daeda14cSPavan Nikhilesh test_multi_port_flow_ordered_to_parallel); 1467daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1468daeda14cSPavan Nikhilesh test_multi_port_flow_atomic_to_atomic); 1469daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1470daeda14cSPavan Nikhilesh test_multi_port_flow_atomic_to_ordered); 1471daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1472daeda14cSPavan Nikhilesh test_multi_port_flow_atomic_to_parallel); 1473daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1474daeda14cSPavan Nikhilesh test_multi_port_flow_parallel_to_atomic); 1475daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1476daeda14cSPavan Nikhilesh test_multi_port_flow_parallel_to_ordered); 1477daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1478daeda14cSPavan Nikhilesh test_multi_port_flow_parallel_to_parallel); 1479daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1480daeda14cSPavan Nikhilesh test_multi_port_queue_ordered_to_atomic); 1481daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1482daeda14cSPavan Nikhilesh test_multi_port_queue_ordered_to_ordered); 1483daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1484daeda14cSPavan Nikhilesh test_multi_port_queue_ordered_to_parallel); 1485daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1486daeda14cSPavan Nikhilesh test_multi_port_queue_atomic_to_atomic); 1487daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1488daeda14cSPavan Nikhilesh test_multi_port_queue_atomic_to_ordered); 1489daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1490daeda14cSPavan Nikhilesh test_multi_port_queue_atomic_to_parallel); 1491daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1492daeda14cSPavan Nikhilesh test_multi_port_queue_parallel_to_atomic); 1493daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1494daeda14cSPavan Nikhilesh test_multi_port_queue_parallel_to_ordered); 1495daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1496daeda14cSPavan Nikhilesh test_multi_port_queue_parallel_to_parallel); 1497daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1498daeda14cSPavan Nikhilesh test_multi_port_flow_max_stages_random_sched_type); 1499daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1500daeda14cSPavan Nikhilesh test_multi_port_queue_max_stages_random_sched_type); 1501daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1502daeda14cSPavan Nikhilesh test_multi_port_mixed_max_stages_random_sched_type); 1503daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1504daeda14cSPavan Nikhilesh test_flow_producer_consumer_ingress_order_test); 1505daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown, 1506daeda14cSPavan Nikhilesh test_queue_producer_consumer_ingress_order_test); 1507daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup_priority, eventdev_teardown, 1508daeda14cSPavan Nikhilesh test_multi_queue_priority); 1509daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown, 1510daeda14cSPavan Nikhilesh test_multi_port_flow_ordered_to_atomic); 1511daeda14cSPavan Nikhilesh OCTEONTX_TEST_RUN(eventdev_setup_dequeue_timeout, eventdev_teardown, 1512daeda14cSPavan Nikhilesh test_multi_port_queue_ordered_to_atomic); 1513daeda14cSPavan Nikhilesh 1514daeda14cSPavan Nikhilesh ssovf_log_selftest("Total tests : %d", total); 1515daeda14cSPavan Nikhilesh ssovf_log_selftest("Passed : %d", passed); 1516daeda14cSPavan Nikhilesh ssovf_log_selftest("Failed : %d", failed); 1517daeda14cSPavan Nikhilesh ssovf_log_selftest("Not supported : %d", unsupported); 1518daeda14cSPavan Nikhilesh 1519daeda14cSPavan Nikhilesh testsuite_teardown(); 1520daeda14cSPavan Nikhilesh 1521daeda14cSPavan Nikhilesh if (failed) 1522daeda14cSPavan Nikhilesh return -1; 1523daeda14cSPavan Nikhilesh 1524daeda14cSPavan Nikhilesh return 0; 1525daeda14cSPavan Nikhilesh } 1526