1*50fb749aSPavan Nikhilesh /* SPDX-License-Identifier: BSD-3-Clause 2*50fb749aSPavan Nikhilesh * Copyright(c) 2017 Cavium, Inc 3*50fb749aSPavan Nikhilesh */ 4*50fb749aSPavan Nikhilesh 5*50fb749aSPavan Nikhilesh #include <rte_atomic.h> 6*50fb749aSPavan Nikhilesh #include <rte_common.h> 7*50fb749aSPavan Nikhilesh #include <rte_cycles.h> 8*50fb749aSPavan Nikhilesh #include <rte_debug.h> 9*50fb749aSPavan Nikhilesh #include <rte_eal.h> 10*50fb749aSPavan Nikhilesh #include <rte_ethdev.h> 11*50fb749aSPavan Nikhilesh #include <rte_eventdev.h> 12*50fb749aSPavan Nikhilesh #include <rte_hexdump.h> 13*50fb749aSPavan Nikhilesh #include <rte_mbuf.h> 14*50fb749aSPavan Nikhilesh #include <rte_malloc.h> 15*50fb749aSPavan Nikhilesh #include <rte_memcpy.h> 16*50fb749aSPavan Nikhilesh #include <rte_launch.h> 17*50fb749aSPavan Nikhilesh #include <rte_lcore.h> 18*50fb749aSPavan Nikhilesh #include <rte_per_lcore.h> 19*50fb749aSPavan Nikhilesh #include <rte_random.h> 20*50fb749aSPavan Nikhilesh #include <rte_bus_vdev.h> 21*50fb749aSPavan Nikhilesh 22*50fb749aSPavan Nikhilesh #include "test.h" 23*50fb749aSPavan Nikhilesh 24*50fb749aSPavan Nikhilesh #define NUM_PACKETS (1 << 18) 25*50fb749aSPavan Nikhilesh #define MAX_EVENTS (16 * 1024) 26*50fb749aSPavan Nikhilesh 27*50fb749aSPavan Nikhilesh static int evdev; 28*50fb749aSPavan Nikhilesh static struct rte_mempool *eventdev_test_mempool; 29*50fb749aSPavan Nikhilesh 30*50fb749aSPavan Nikhilesh struct event_attr { 31*50fb749aSPavan Nikhilesh uint32_t flow_id; 32*50fb749aSPavan Nikhilesh uint8_t event_type; 33*50fb749aSPavan Nikhilesh uint8_t sub_event_type; 34*50fb749aSPavan Nikhilesh uint8_t sched_type; 35*50fb749aSPavan Nikhilesh uint8_t queue; 36*50fb749aSPavan Nikhilesh uint8_t port; 37*50fb749aSPavan Nikhilesh }; 38*50fb749aSPavan Nikhilesh 39*50fb749aSPavan Nikhilesh static uint32_t seqn_list_index; 40*50fb749aSPavan Nikhilesh static int seqn_list[NUM_PACKETS]; 41*50fb749aSPavan Nikhilesh 42*50fb749aSPavan Nikhilesh static inline void 43*50fb749aSPavan Nikhilesh seqn_list_init(void) 44*50fb749aSPavan Nikhilesh { 45*50fb749aSPavan Nikhilesh RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS); 46*50fb749aSPavan Nikhilesh memset(seqn_list, 0, sizeof(seqn_list)); 47*50fb749aSPavan Nikhilesh seqn_list_index = 0; 48*50fb749aSPavan Nikhilesh } 49*50fb749aSPavan Nikhilesh 50*50fb749aSPavan Nikhilesh static inline int 51*50fb749aSPavan Nikhilesh seqn_list_update(int val) 52*50fb749aSPavan Nikhilesh { 53*50fb749aSPavan Nikhilesh if (seqn_list_index >= NUM_PACKETS) 54*50fb749aSPavan Nikhilesh return TEST_FAILED; 55*50fb749aSPavan Nikhilesh 56*50fb749aSPavan Nikhilesh seqn_list[seqn_list_index++] = val; 57*50fb749aSPavan Nikhilesh rte_smp_wmb(); 58*50fb749aSPavan Nikhilesh return TEST_SUCCESS; 59*50fb749aSPavan Nikhilesh } 60*50fb749aSPavan Nikhilesh 61*50fb749aSPavan Nikhilesh static inline int 62*50fb749aSPavan Nikhilesh seqn_list_check(int limit) 63*50fb749aSPavan Nikhilesh { 64*50fb749aSPavan Nikhilesh int i; 65*50fb749aSPavan Nikhilesh 66*50fb749aSPavan Nikhilesh for (i = 0; i < limit; i++) { 67*50fb749aSPavan Nikhilesh if (seqn_list[i] != i) { 68*50fb749aSPavan Nikhilesh printf("Seqn mismatch %d %d\n", seqn_list[i], i); 69*50fb749aSPavan Nikhilesh return TEST_FAILED; 70*50fb749aSPavan Nikhilesh } 71*50fb749aSPavan Nikhilesh } 72*50fb749aSPavan Nikhilesh return TEST_SUCCESS; 73*50fb749aSPavan Nikhilesh } 74*50fb749aSPavan Nikhilesh 75*50fb749aSPavan Nikhilesh struct test_core_param { 76*50fb749aSPavan Nikhilesh rte_atomic32_t *total_events; 77*50fb749aSPavan Nikhilesh uint64_t dequeue_tmo_ticks; 78*50fb749aSPavan Nikhilesh uint8_t port; 79*50fb749aSPavan Nikhilesh uint8_t sched_type; 80*50fb749aSPavan Nikhilesh }; 81*50fb749aSPavan Nikhilesh 82*50fb749aSPavan Nikhilesh static int 83*50fb749aSPavan Nikhilesh testsuite_setup(void) 84*50fb749aSPavan Nikhilesh { 85*50fb749aSPavan Nikhilesh const char *eventdev_name = "event_octeontx"; 86*50fb749aSPavan Nikhilesh 87*50fb749aSPavan Nikhilesh evdev = rte_event_dev_get_dev_id(eventdev_name); 88*50fb749aSPavan Nikhilesh if (evdev < 0) { 89*50fb749aSPavan Nikhilesh printf("%d: Eventdev %s not found - creating.\n", 90*50fb749aSPavan Nikhilesh __LINE__, eventdev_name); 91*50fb749aSPavan Nikhilesh if (rte_vdev_init(eventdev_name, NULL) < 0) { 92*50fb749aSPavan Nikhilesh printf("Error creating eventdev %s\n", eventdev_name); 93*50fb749aSPavan Nikhilesh return TEST_FAILED; 94*50fb749aSPavan Nikhilesh } 95*50fb749aSPavan Nikhilesh evdev = rte_event_dev_get_dev_id(eventdev_name); 96*50fb749aSPavan Nikhilesh if (evdev < 0) { 97*50fb749aSPavan Nikhilesh printf("Error finding newly created eventdev\n"); 98*50fb749aSPavan Nikhilesh return TEST_FAILED; 99*50fb749aSPavan Nikhilesh } 100*50fb749aSPavan Nikhilesh } 101*50fb749aSPavan Nikhilesh 102*50fb749aSPavan Nikhilesh return TEST_SUCCESS; 103*50fb749aSPavan Nikhilesh } 104*50fb749aSPavan Nikhilesh 105*50fb749aSPavan Nikhilesh static void 106*50fb749aSPavan Nikhilesh testsuite_teardown(void) 107*50fb749aSPavan Nikhilesh { 108*50fb749aSPavan Nikhilesh rte_event_dev_close(evdev); 109*50fb749aSPavan Nikhilesh } 110*50fb749aSPavan Nikhilesh 111*50fb749aSPavan Nikhilesh static inline void 112*50fb749aSPavan Nikhilesh devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf, 113*50fb749aSPavan Nikhilesh struct rte_event_dev_info *info) 114*50fb749aSPavan Nikhilesh { 115*50fb749aSPavan Nikhilesh memset(dev_conf, 0, sizeof(struct rte_event_dev_config)); 116*50fb749aSPavan Nikhilesh dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns; 117*50fb749aSPavan Nikhilesh dev_conf->nb_event_ports = info->max_event_ports; 118*50fb749aSPavan Nikhilesh dev_conf->nb_event_queues = info->max_event_queues; 119*50fb749aSPavan Nikhilesh dev_conf->nb_event_queue_flows = info->max_event_queue_flows; 120*50fb749aSPavan Nikhilesh dev_conf->nb_event_port_dequeue_depth = 121*50fb749aSPavan Nikhilesh info->max_event_port_dequeue_depth; 122*50fb749aSPavan Nikhilesh dev_conf->nb_event_port_enqueue_depth = 123*50fb749aSPavan Nikhilesh info->max_event_port_enqueue_depth; 124*50fb749aSPavan Nikhilesh dev_conf->nb_event_port_enqueue_depth = 125*50fb749aSPavan Nikhilesh info->max_event_port_enqueue_depth; 126*50fb749aSPavan Nikhilesh dev_conf->nb_events_limit = 127*50fb749aSPavan Nikhilesh info->max_num_events; 128*50fb749aSPavan Nikhilesh } 129*50fb749aSPavan Nikhilesh 130*50fb749aSPavan Nikhilesh enum { 131*50fb749aSPavan Nikhilesh TEST_EVENTDEV_SETUP_DEFAULT, 132*50fb749aSPavan Nikhilesh TEST_EVENTDEV_SETUP_PRIORITY, 133*50fb749aSPavan Nikhilesh TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT, 134*50fb749aSPavan Nikhilesh }; 135*50fb749aSPavan Nikhilesh 136*50fb749aSPavan Nikhilesh static inline int 137*50fb749aSPavan Nikhilesh _eventdev_setup(int mode) 138*50fb749aSPavan Nikhilesh { 139*50fb749aSPavan Nikhilesh int i, ret; 140*50fb749aSPavan Nikhilesh struct rte_event_dev_config dev_conf; 141*50fb749aSPavan Nikhilesh struct rte_event_dev_info info; 142*50fb749aSPavan Nikhilesh const char *pool_name = "evdev_octeontx_test_pool"; 143*50fb749aSPavan Nikhilesh 144*50fb749aSPavan Nikhilesh /* Create and destrory pool for each test case to make it standalone */ 145*50fb749aSPavan Nikhilesh eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name, 146*50fb749aSPavan Nikhilesh MAX_EVENTS, 147*50fb749aSPavan Nikhilesh 0 /*MBUF_CACHE_SIZE*/, 148*50fb749aSPavan Nikhilesh 0, 149*50fb749aSPavan Nikhilesh 512, /* Use very small mbufs */ 150*50fb749aSPavan Nikhilesh rte_socket_id()); 151*50fb749aSPavan Nikhilesh if (!eventdev_test_mempool) { 152*50fb749aSPavan Nikhilesh printf("ERROR creating mempool\n"); 153*50fb749aSPavan Nikhilesh return TEST_FAILED; 154*50fb749aSPavan Nikhilesh } 155*50fb749aSPavan Nikhilesh 156*50fb749aSPavan Nikhilesh ret = rte_event_dev_info_get(evdev, &info); 157*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); 158*50fb749aSPavan Nikhilesh TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS, 159*50fb749aSPavan Nikhilesh "max_num_events=%d < max_events=%d", 160*50fb749aSPavan Nikhilesh info.max_num_events, MAX_EVENTS); 161*50fb749aSPavan Nikhilesh 162*50fb749aSPavan Nikhilesh devconf_set_default_sane_values(&dev_conf, &info); 163*50fb749aSPavan Nikhilesh if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT) 164*50fb749aSPavan Nikhilesh dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT; 165*50fb749aSPavan Nikhilesh 166*50fb749aSPavan Nikhilesh ret = rte_event_dev_configure(evdev, &dev_conf); 167*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev"); 168*50fb749aSPavan Nikhilesh 169*50fb749aSPavan Nikhilesh uint32_t queue_count; 170*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 171*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 172*50fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 173*50fb749aSPavan Nikhilesh 174*50fb749aSPavan Nikhilesh if (mode == TEST_EVENTDEV_SETUP_PRIORITY) { 175*50fb749aSPavan Nikhilesh if (queue_count > 8) { 176*50fb749aSPavan Nikhilesh printf("test expects the unique priority per queue\n"); 177*50fb749aSPavan Nikhilesh return -ENOTSUP; 178*50fb749aSPavan Nikhilesh } 179*50fb749aSPavan Nikhilesh 180*50fb749aSPavan Nikhilesh /* Configure event queues(0 to n) with 181*50fb749aSPavan Nikhilesh * RTE_EVENT_DEV_PRIORITY_HIGHEST to 182*50fb749aSPavan Nikhilesh * RTE_EVENT_DEV_PRIORITY_LOWEST 183*50fb749aSPavan Nikhilesh */ 184*50fb749aSPavan Nikhilesh uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) / 185*50fb749aSPavan Nikhilesh queue_count; 186*50fb749aSPavan Nikhilesh for (i = 0; i < (int)queue_count; i++) { 187*50fb749aSPavan Nikhilesh struct rte_event_queue_conf queue_conf; 188*50fb749aSPavan Nikhilesh 189*50fb749aSPavan Nikhilesh ret = rte_event_queue_default_conf_get(evdev, i, 190*50fb749aSPavan Nikhilesh &queue_conf); 191*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d", i); 192*50fb749aSPavan Nikhilesh queue_conf.priority = i * step; 193*50fb749aSPavan Nikhilesh ret = rte_event_queue_setup(evdev, i, &queue_conf); 194*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i); 195*50fb749aSPavan Nikhilesh } 196*50fb749aSPavan Nikhilesh 197*50fb749aSPavan Nikhilesh } else { 198*50fb749aSPavan Nikhilesh /* Configure event queues with default priority */ 199*50fb749aSPavan Nikhilesh for (i = 0; i < (int)queue_count; i++) { 200*50fb749aSPavan Nikhilesh ret = rte_event_queue_setup(evdev, i, NULL); 201*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i); 202*50fb749aSPavan Nikhilesh } 203*50fb749aSPavan Nikhilesh } 204*50fb749aSPavan Nikhilesh /* Configure event ports */ 205*50fb749aSPavan Nikhilesh uint32_t port_count; 206*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 207*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 208*50fb749aSPavan Nikhilesh &port_count), "Port count get failed"); 209*50fb749aSPavan Nikhilesh for (i = 0; i < (int)port_count; i++) { 210*50fb749aSPavan Nikhilesh ret = rte_event_port_setup(evdev, i, NULL); 211*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i); 212*50fb749aSPavan Nikhilesh ret = rte_event_port_link(evdev, i, NULL, NULL, 0); 213*50fb749aSPavan Nikhilesh TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", i); 214*50fb749aSPavan Nikhilesh } 215*50fb749aSPavan Nikhilesh 216*50fb749aSPavan Nikhilesh ret = rte_event_dev_start(evdev); 217*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(ret, "Failed to start device"); 218*50fb749aSPavan Nikhilesh 219*50fb749aSPavan Nikhilesh return TEST_SUCCESS; 220*50fb749aSPavan Nikhilesh } 221*50fb749aSPavan Nikhilesh 222*50fb749aSPavan Nikhilesh static inline int 223*50fb749aSPavan Nikhilesh eventdev_setup(void) 224*50fb749aSPavan Nikhilesh { 225*50fb749aSPavan Nikhilesh return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT); 226*50fb749aSPavan Nikhilesh } 227*50fb749aSPavan Nikhilesh 228*50fb749aSPavan Nikhilesh static inline int 229*50fb749aSPavan Nikhilesh eventdev_setup_priority(void) 230*50fb749aSPavan Nikhilesh { 231*50fb749aSPavan Nikhilesh return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY); 232*50fb749aSPavan Nikhilesh } 233*50fb749aSPavan Nikhilesh 234*50fb749aSPavan Nikhilesh static inline int 235*50fb749aSPavan Nikhilesh eventdev_setup_dequeue_timeout(void) 236*50fb749aSPavan Nikhilesh { 237*50fb749aSPavan Nikhilesh return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT); 238*50fb749aSPavan Nikhilesh } 239*50fb749aSPavan Nikhilesh 240*50fb749aSPavan Nikhilesh static inline void 241*50fb749aSPavan Nikhilesh eventdev_teardown(void) 242*50fb749aSPavan Nikhilesh { 243*50fb749aSPavan Nikhilesh rte_event_dev_stop(evdev); 244*50fb749aSPavan Nikhilesh rte_mempool_free(eventdev_test_mempool); 245*50fb749aSPavan Nikhilesh } 246*50fb749aSPavan Nikhilesh 247*50fb749aSPavan Nikhilesh static inline void 248*50fb749aSPavan Nikhilesh update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev, 249*50fb749aSPavan Nikhilesh uint32_t flow_id, uint8_t event_type, 250*50fb749aSPavan Nikhilesh uint8_t sub_event_type, uint8_t sched_type, 251*50fb749aSPavan Nikhilesh uint8_t queue, uint8_t port) 252*50fb749aSPavan Nikhilesh { 253*50fb749aSPavan Nikhilesh struct event_attr *attr; 254*50fb749aSPavan Nikhilesh 255*50fb749aSPavan Nikhilesh /* Store the event attributes in mbuf for future reference */ 256*50fb749aSPavan Nikhilesh attr = rte_pktmbuf_mtod(m, struct event_attr *); 257*50fb749aSPavan Nikhilesh attr->flow_id = flow_id; 258*50fb749aSPavan Nikhilesh attr->event_type = event_type; 259*50fb749aSPavan Nikhilesh attr->sub_event_type = sub_event_type; 260*50fb749aSPavan Nikhilesh attr->sched_type = sched_type; 261*50fb749aSPavan Nikhilesh attr->queue = queue; 262*50fb749aSPavan Nikhilesh attr->port = port; 263*50fb749aSPavan Nikhilesh 264*50fb749aSPavan Nikhilesh ev->flow_id = flow_id; 265*50fb749aSPavan Nikhilesh ev->sub_event_type = sub_event_type; 266*50fb749aSPavan Nikhilesh ev->event_type = event_type; 267*50fb749aSPavan Nikhilesh /* Inject the new event */ 268*50fb749aSPavan Nikhilesh ev->op = RTE_EVENT_OP_NEW; 269*50fb749aSPavan Nikhilesh ev->sched_type = sched_type; 270*50fb749aSPavan Nikhilesh ev->queue_id = queue; 271*50fb749aSPavan Nikhilesh ev->mbuf = m; 272*50fb749aSPavan Nikhilesh } 273*50fb749aSPavan Nikhilesh 274*50fb749aSPavan Nikhilesh static inline int 275*50fb749aSPavan Nikhilesh inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type, 276*50fb749aSPavan Nikhilesh uint8_t sched_type, uint8_t queue, uint8_t port, 277*50fb749aSPavan Nikhilesh unsigned int events) 278*50fb749aSPavan Nikhilesh { 279*50fb749aSPavan Nikhilesh struct rte_mbuf *m; 280*50fb749aSPavan Nikhilesh unsigned int i; 281*50fb749aSPavan Nikhilesh 282*50fb749aSPavan Nikhilesh for (i = 0; i < events; i++) { 283*50fb749aSPavan Nikhilesh struct rte_event ev = {.event = 0, .u64 = 0}; 284*50fb749aSPavan Nikhilesh 285*50fb749aSPavan Nikhilesh m = rte_pktmbuf_alloc(eventdev_test_mempool); 286*50fb749aSPavan Nikhilesh TEST_ASSERT_NOT_NULL(m, "mempool alloc failed"); 287*50fb749aSPavan Nikhilesh 288*50fb749aSPavan Nikhilesh m->seqn = i; 289*50fb749aSPavan Nikhilesh update_event_and_validation_attr(m, &ev, flow_id, event_type, 290*50fb749aSPavan Nikhilesh sub_event_type, sched_type, queue, port); 291*50fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 292*50fb749aSPavan Nikhilesh } 293*50fb749aSPavan Nikhilesh return 0; 294*50fb749aSPavan Nikhilesh } 295*50fb749aSPavan Nikhilesh 296*50fb749aSPavan Nikhilesh static inline int 297*50fb749aSPavan Nikhilesh check_excess_events(uint8_t port) 298*50fb749aSPavan Nikhilesh { 299*50fb749aSPavan Nikhilesh int i; 300*50fb749aSPavan Nikhilesh uint16_t valid_event; 301*50fb749aSPavan Nikhilesh struct rte_event ev; 302*50fb749aSPavan Nikhilesh 303*50fb749aSPavan Nikhilesh /* Check for excess events, try for a few times and exit */ 304*50fb749aSPavan Nikhilesh for (i = 0; i < 32; i++) { 305*50fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 306*50fb749aSPavan Nikhilesh 307*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(valid_event, "Unexpected valid event=%d", 308*50fb749aSPavan Nikhilesh ev.mbuf->seqn); 309*50fb749aSPavan Nikhilesh } 310*50fb749aSPavan Nikhilesh return 0; 311*50fb749aSPavan Nikhilesh } 312*50fb749aSPavan Nikhilesh 313*50fb749aSPavan Nikhilesh static inline int 314*50fb749aSPavan Nikhilesh generate_random_events(const unsigned int total_events) 315*50fb749aSPavan Nikhilesh { 316*50fb749aSPavan Nikhilesh struct rte_event_dev_info info; 317*50fb749aSPavan Nikhilesh unsigned int i; 318*50fb749aSPavan Nikhilesh int ret; 319*50fb749aSPavan Nikhilesh 320*50fb749aSPavan Nikhilesh uint32_t queue_count; 321*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 322*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 323*50fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 324*50fb749aSPavan Nikhilesh 325*50fb749aSPavan Nikhilesh ret = rte_event_dev_info_get(evdev, &info); 326*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info"); 327*50fb749aSPavan Nikhilesh for (i = 0; i < total_events; i++) { 328*50fb749aSPavan Nikhilesh ret = inject_events( 329*50fb749aSPavan Nikhilesh rte_rand() % info.max_event_queue_flows /*flow_id */, 330*50fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 331*50fb749aSPavan Nikhilesh rte_rand() % 256 /* sub_event_type */, 332*50fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1), 333*50fb749aSPavan Nikhilesh rte_rand() % queue_count /* queue */, 334*50fb749aSPavan Nikhilesh 0 /* port */, 335*50fb749aSPavan Nikhilesh 1 /* events */); 336*50fb749aSPavan Nikhilesh if (ret) 337*50fb749aSPavan Nikhilesh return TEST_FAILED; 338*50fb749aSPavan Nikhilesh } 339*50fb749aSPavan Nikhilesh return ret; 340*50fb749aSPavan Nikhilesh } 341*50fb749aSPavan Nikhilesh 342*50fb749aSPavan Nikhilesh 343*50fb749aSPavan Nikhilesh static inline int 344*50fb749aSPavan Nikhilesh validate_event(struct rte_event *ev) 345*50fb749aSPavan Nikhilesh { 346*50fb749aSPavan Nikhilesh struct event_attr *attr; 347*50fb749aSPavan Nikhilesh 348*50fb749aSPavan Nikhilesh attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *); 349*50fb749aSPavan Nikhilesh TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id, 350*50fb749aSPavan Nikhilesh "flow_id mismatch enq=%d deq =%d", 351*50fb749aSPavan Nikhilesh attr->flow_id, ev->flow_id); 352*50fb749aSPavan Nikhilesh TEST_ASSERT_EQUAL(attr->event_type, ev->event_type, 353*50fb749aSPavan Nikhilesh "event_type mismatch enq=%d deq =%d", 354*50fb749aSPavan Nikhilesh attr->event_type, ev->event_type); 355*50fb749aSPavan Nikhilesh TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type, 356*50fb749aSPavan Nikhilesh "sub_event_type mismatch enq=%d deq =%d", 357*50fb749aSPavan Nikhilesh attr->sub_event_type, ev->sub_event_type); 358*50fb749aSPavan Nikhilesh TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type, 359*50fb749aSPavan Nikhilesh "sched_type mismatch enq=%d deq =%d", 360*50fb749aSPavan Nikhilesh attr->sched_type, ev->sched_type); 361*50fb749aSPavan Nikhilesh TEST_ASSERT_EQUAL(attr->queue, ev->queue_id, 362*50fb749aSPavan Nikhilesh "queue mismatch enq=%d deq =%d", 363*50fb749aSPavan Nikhilesh attr->queue, ev->queue_id); 364*50fb749aSPavan Nikhilesh return 0; 365*50fb749aSPavan Nikhilesh } 366*50fb749aSPavan Nikhilesh 367*50fb749aSPavan Nikhilesh typedef int (*validate_event_cb)(uint32_t index, uint8_t port, 368*50fb749aSPavan Nikhilesh struct rte_event *ev); 369*50fb749aSPavan Nikhilesh 370*50fb749aSPavan Nikhilesh static inline int 371*50fb749aSPavan Nikhilesh consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn) 372*50fb749aSPavan Nikhilesh { 373*50fb749aSPavan Nikhilesh int ret; 374*50fb749aSPavan Nikhilesh uint16_t valid_event; 375*50fb749aSPavan Nikhilesh uint32_t events = 0, forward_progress_cnt = 0, index = 0; 376*50fb749aSPavan Nikhilesh struct rte_event ev; 377*50fb749aSPavan Nikhilesh 378*50fb749aSPavan Nikhilesh while (1) { 379*50fb749aSPavan Nikhilesh if (++forward_progress_cnt > UINT16_MAX) { 380*50fb749aSPavan Nikhilesh printf("Detected deadlock\n"); 381*50fb749aSPavan Nikhilesh return TEST_FAILED; 382*50fb749aSPavan Nikhilesh } 383*50fb749aSPavan Nikhilesh 384*50fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 385*50fb749aSPavan Nikhilesh if (!valid_event) 386*50fb749aSPavan Nikhilesh continue; 387*50fb749aSPavan Nikhilesh 388*50fb749aSPavan Nikhilesh forward_progress_cnt = 0; 389*50fb749aSPavan Nikhilesh ret = validate_event(&ev); 390*50fb749aSPavan Nikhilesh if (ret) 391*50fb749aSPavan Nikhilesh return TEST_FAILED; 392*50fb749aSPavan Nikhilesh 393*50fb749aSPavan Nikhilesh if (fn != NULL) { 394*50fb749aSPavan Nikhilesh ret = fn(index, port, &ev); 395*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(ret, 396*50fb749aSPavan Nikhilesh "Failed to validate test specific event"); 397*50fb749aSPavan Nikhilesh } 398*50fb749aSPavan Nikhilesh 399*50fb749aSPavan Nikhilesh ++index; 400*50fb749aSPavan Nikhilesh 401*50fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 402*50fb749aSPavan Nikhilesh if (++events >= total_events) 403*50fb749aSPavan Nikhilesh break; 404*50fb749aSPavan Nikhilesh } 405*50fb749aSPavan Nikhilesh 406*50fb749aSPavan Nikhilesh return check_excess_events(port); 407*50fb749aSPavan Nikhilesh } 408*50fb749aSPavan Nikhilesh 409*50fb749aSPavan Nikhilesh static int 410*50fb749aSPavan Nikhilesh validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev) 411*50fb749aSPavan Nikhilesh { 412*50fb749aSPavan Nikhilesh RTE_SET_USED(port); 413*50fb749aSPavan Nikhilesh TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d", index, 414*50fb749aSPavan Nikhilesh ev->mbuf->seqn); 415*50fb749aSPavan Nikhilesh return 0; 416*50fb749aSPavan Nikhilesh } 417*50fb749aSPavan Nikhilesh 418*50fb749aSPavan Nikhilesh static inline int 419*50fb749aSPavan Nikhilesh test_simple_enqdeq(uint8_t sched_type) 420*50fb749aSPavan Nikhilesh { 421*50fb749aSPavan Nikhilesh int ret; 422*50fb749aSPavan Nikhilesh 423*50fb749aSPavan Nikhilesh ret = inject_events(0 /*flow_id */, 424*50fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 425*50fb749aSPavan Nikhilesh 0 /* sub_event_type */, 426*50fb749aSPavan Nikhilesh sched_type, 427*50fb749aSPavan Nikhilesh 0 /* queue */, 428*50fb749aSPavan Nikhilesh 0 /* port */, 429*50fb749aSPavan Nikhilesh MAX_EVENTS); 430*50fb749aSPavan Nikhilesh if (ret) 431*50fb749aSPavan Nikhilesh return TEST_FAILED; 432*50fb749aSPavan Nikhilesh 433*50fb749aSPavan Nikhilesh return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq); 434*50fb749aSPavan Nikhilesh } 435*50fb749aSPavan Nikhilesh 436*50fb749aSPavan Nikhilesh static int 437*50fb749aSPavan Nikhilesh test_simple_enqdeq_ordered(void) 438*50fb749aSPavan Nikhilesh { 439*50fb749aSPavan Nikhilesh return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED); 440*50fb749aSPavan Nikhilesh } 441*50fb749aSPavan Nikhilesh 442*50fb749aSPavan Nikhilesh static int 443*50fb749aSPavan Nikhilesh test_simple_enqdeq_atomic(void) 444*50fb749aSPavan Nikhilesh { 445*50fb749aSPavan Nikhilesh return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC); 446*50fb749aSPavan Nikhilesh } 447*50fb749aSPavan Nikhilesh 448*50fb749aSPavan Nikhilesh static int 449*50fb749aSPavan Nikhilesh test_simple_enqdeq_parallel(void) 450*50fb749aSPavan Nikhilesh { 451*50fb749aSPavan Nikhilesh return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL); 452*50fb749aSPavan Nikhilesh } 453*50fb749aSPavan Nikhilesh 454*50fb749aSPavan Nikhilesh /* 455*50fb749aSPavan Nikhilesh * Generate a prescribed number of events and spread them across available 456*50fb749aSPavan Nikhilesh * queues. On dequeue, using single event port(port 0) verify the enqueued 457*50fb749aSPavan Nikhilesh * event attributes 458*50fb749aSPavan Nikhilesh */ 459*50fb749aSPavan Nikhilesh static int 460*50fb749aSPavan Nikhilesh test_multi_queue_enq_single_port_deq(void) 461*50fb749aSPavan Nikhilesh { 462*50fb749aSPavan Nikhilesh int ret; 463*50fb749aSPavan Nikhilesh 464*50fb749aSPavan Nikhilesh ret = generate_random_events(MAX_EVENTS); 465*50fb749aSPavan Nikhilesh if (ret) 466*50fb749aSPavan Nikhilesh return TEST_FAILED; 467*50fb749aSPavan Nikhilesh 468*50fb749aSPavan Nikhilesh return consume_events(0 /* port */, MAX_EVENTS, NULL); 469*50fb749aSPavan Nikhilesh } 470*50fb749aSPavan Nikhilesh 471*50fb749aSPavan Nikhilesh /* 472*50fb749aSPavan Nikhilesh * Inject 0..MAX_EVENTS events over 0..queue_count with modulus 473*50fb749aSPavan Nikhilesh * operation 474*50fb749aSPavan Nikhilesh * 475*50fb749aSPavan Nikhilesh * For example, Inject 32 events over 0..7 queues 476*50fb749aSPavan Nikhilesh * enqueue events 0, 8, 16, 24 in queue 0 477*50fb749aSPavan Nikhilesh * enqueue events 1, 9, 17, 25 in queue 1 478*50fb749aSPavan Nikhilesh * .. 479*50fb749aSPavan Nikhilesh * .. 480*50fb749aSPavan Nikhilesh * enqueue events 7, 15, 23, 31 in queue 7 481*50fb749aSPavan Nikhilesh * 482*50fb749aSPavan Nikhilesh * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31 483*50fb749aSPavan Nikhilesh * order from queue0(highest priority) to queue7(lowest_priority) 484*50fb749aSPavan Nikhilesh */ 485*50fb749aSPavan Nikhilesh static int 486*50fb749aSPavan Nikhilesh validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev) 487*50fb749aSPavan Nikhilesh { 488*50fb749aSPavan Nikhilesh uint32_t queue_count; 489*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 490*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 491*50fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 492*50fb749aSPavan Nikhilesh uint32_t range = MAX_EVENTS / queue_count; 493*50fb749aSPavan Nikhilesh uint32_t expected_val = (index % range) * queue_count; 494*50fb749aSPavan Nikhilesh 495*50fb749aSPavan Nikhilesh expected_val += ev->queue_id; 496*50fb749aSPavan Nikhilesh RTE_SET_USED(port); 497*50fb749aSPavan Nikhilesh TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val, 498*50fb749aSPavan Nikhilesh "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d", 499*50fb749aSPavan Nikhilesh ev->mbuf->seqn, index, expected_val, range, 500*50fb749aSPavan Nikhilesh queue_count, MAX_EVENTS); 501*50fb749aSPavan Nikhilesh return 0; 502*50fb749aSPavan Nikhilesh } 503*50fb749aSPavan Nikhilesh 504*50fb749aSPavan Nikhilesh static int 505*50fb749aSPavan Nikhilesh test_multi_queue_priority(void) 506*50fb749aSPavan Nikhilesh { 507*50fb749aSPavan Nikhilesh uint8_t queue; 508*50fb749aSPavan Nikhilesh struct rte_mbuf *m; 509*50fb749aSPavan Nikhilesh int i, max_evts_roundoff; 510*50fb749aSPavan Nikhilesh 511*50fb749aSPavan Nikhilesh /* See validate_queue_priority() comments for priority validate logic */ 512*50fb749aSPavan Nikhilesh uint32_t queue_count; 513*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 514*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 515*50fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 516*50fb749aSPavan Nikhilesh max_evts_roundoff = MAX_EVENTS / queue_count; 517*50fb749aSPavan Nikhilesh max_evts_roundoff *= queue_count; 518*50fb749aSPavan Nikhilesh 519*50fb749aSPavan Nikhilesh for (i = 0; i < max_evts_roundoff; i++) { 520*50fb749aSPavan Nikhilesh struct rte_event ev = {.event = 0, .u64 = 0}; 521*50fb749aSPavan Nikhilesh 522*50fb749aSPavan Nikhilesh m = rte_pktmbuf_alloc(eventdev_test_mempool); 523*50fb749aSPavan Nikhilesh TEST_ASSERT_NOT_NULL(m, "mempool alloc failed"); 524*50fb749aSPavan Nikhilesh 525*50fb749aSPavan Nikhilesh m->seqn = i; 526*50fb749aSPavan Nikhilesh queue = i % queue_count; 527*50fb749aSPavan Nikhilesh update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU, 528*50fb749aSPavan Nikhilesh 0, RTE_SCHED_TYPE_PARALLEL, queue, 0); 529*50fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, 0, &ev, 1); 530*50fb749aSPavan Nikhilesh } 531*50fb749aSPavan Nikhilesh 532*50fb749aSPavan Nikhilesh return consume_events(0, max_evts_roundoff, validate_queue_priority); 533*50fb749aSPavan Nikhilesh } 534*50fb749aSPavan Nikhilesh 535*50fb749aSPavan Nikhilesh static int 536*50fb749aSPavan Nikhilesh worker_multi_port_fn(void *arg) 537*50fb749aSPavan Nikhilesh { 538*50fb749aSPavan Nikhilesh struct test_core_param *param = arg; 539*50fb749aSPavan Nikhilesh struct rte_event ev; 540*50fb749aSPavan Nikhilesh uint16_t valid_event; 541*50fb749aSPavan Nikhilesh uint8_t port = param->port; 542*50fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 543*50fb749aSPavan Nikhilesh int ret; 544*50fb749aSPavan Nikhilesh 545*50fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 546*50fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 547*50fb749aSPavan Nikhilesh if (!valid_event) 548*50fb749aSPavan Nikhilesh continue; 549*50fb749aSPavan Nikhilesh 550*50fb749aSPavan Nikhilesh ret = validate_event(&ev); 551*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(ret, "Failed to validate event"); 552*50fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 553*50fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 554*50fb749aSPavan Nikhilesh } 555*50fb749aSPavan Nikhilesh return 0; 556*50fb749aSPavan Nikhilesh } 557*50fb749aSPavan Nikhilesh 558*50fb749aSPavan Nikhilesh static inline int 559*50fb749aSPavan Nikhilesh wait_workers_to_join(int lcore, const rte_atomic32_t *count) 560*50fb749aSPavan Nikhilesh { 561*50fb749aSPavan Nikhilesh uint64_t cycles, print_cycles; 562*50fb749aSPavan Nikhilesh 563*50fb749aSPavan Nikhilesh print_cycles = cycles = rte_get_timer_cycles(); 564*50fb749aSPavan Nikhilesh while (rte_eal_get_lcore_state(lcore) != FINISHED) { 565*50fb749aSPavan Nikhilesh uint64_t new_cycles = rte_get_timer_cycles(); 566*50fb749aSPavan Nikhilesh 567*50fb749aSPavan Nikhilesh if (new_cycles - print_cycles > rte_get_timer_hz()) { 568*50fb749aSPavan Nikhilesh printf("\r%s: events %d\n", __func__, 569*50fb749aSPavan Nikhilesh rte_atomic32_read(count)); 570*50fb749aSPavan Nikhilesh print_cycles = new_cycles; 571*50fb749aSPavan Nikhilesh } 572*50fb749aSPavan Nikhilesh if (new_cycles - cycles > rte_get_timer_hz() * 10) { 573*50fb749aSPavan Nikhilesh printf("%s: No schedules for seconds, deadlock (%d)\n", 574*50fb749aSPavan Nikhilesh __func__, 575*50fb749aSPavan Nikhilesh rte_atomic32_read(count)); 576*50fb749aSPavan Nikhilesh rte_event_dev_dump(evdev, stdout); 577*50fb749aSPavan Nikhilesh cycles = new_cycles; 578*50fb749aSPavan Nikhilesh return TEST_FAILED; 579*50fb749aSPavan Nikhilesh } 580*50fb749aSPavan Nikhilesh } 581*50fb749aSPavan Nikhilesh rte_eal_mp_wait_lcore(); 582*50fb749aSPavan Nikhilesh return TEST_SUCCESS; 583*50fb749aSPavan Nikhilesh } 584*50fb749aSPavan Nikhilesh 585*50fb749aSPavan Nikhilesh 586*50fb749aSPavan Nikhilesh static inline int 587*50fb749aSPavan Nikhilesh launch_workers_and_wait(int (*master_worker)(void *), 588*50fb749aSPavan Nikhilesh int (*slave_workers)(void *), uint32_t total_events, 589*50fb749aSPavan Nikhilesh uint8_t nb_workers, uint8_t sched_type) 590*50fb749aSPavan Nikhilesh { 591*50fb749aSPavan Nikhilesh uint8_t port = 0; 592*50fb749aSPavan Nikhilesh int w_lcore; 593*50fb749aSPavan Nikhilesh int ret; 594*50fb749aSPavan Nikhilesh struct test_core_param *param; 595*50fb749aSPavan Nikhilesh rte_atomic32_t atomic_total_events; 596*50fb749aSPavan Nikhilesh uint64_t dequeue_tmo_ticks; 597*50fb749aSPavan Nikhilesh 598*50fb749aSPavan Nikhilesh if (!nb_workers) 599*50fb749aSPavan Nikhilesh return 0; 600*50fb749aSPavan Nikhilesh 601*50fb749aSPavan Nikhilesh rte_atomic32_set(&atomic_total_events, total_events); 602*50fb749aSPavan Nikhilesh seqn_list_init(); 603*50fb749aSPavan Nikhilesh 604*50fb749aSPavan Nikhilesh param = malloc(sizeof(struct test_core_param) * nb_workers); 605*50fb749aSPavan Nikhilesh if (!param) 606*50fb749aSPavan Nikhilesh return TEST_FAILED; 607*50fb749aSPavan Nikhilesh 608*50fb749aSPavan Nikhilesh ret = rte_event_dequeue_timeout_ticks(evdev, 609*50fb749aSPavan Nikhilesh rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks); 610*50fb749aSPavan Nikhilesh if (ret) 611*50fb749aSPavan Nikhilesh return TEST_FAILED; 612*50fb749aSPavan Nikhilesh 613*50fb749aSPavan Nikhilesh param[0].total_events = &atomic_total_events; 614*50fb749aSPavan Nikhilesh param[0].sched_type = sched_type; 615*50fb749aSPavan Nikhilesh param[0].port = 0; 616*50fb749aSPavan Nikhilesh param[0].dequeue_tmo_ticks = dequeue_tmo_ticks; 617*50fb749aSPavan Nikhilesh rte_smp_wmb(); 618*50fb749aSPavan Nikhilesh 619*50fb749aSPavan Nikhilesh w_lcore = rte_get_next_lcore( 620*50fb749aSPavan Nikhilesh /* start core */ -1, 621*50fb749aSPavan Nikhilesh /* skip master */ 1, 622*50fb749aSPavan Nikhilesh /* wrap */ 0); 623*50fb749aSPavan Nikhilesh rte_eal_remote_launch(master_worker, ¶m[0], w_lcore); 624*50fb749aSPavan Nikhilesh 625*50fb749aSPavan Nikhilesh for (port = 1; port < nb_workers; port++) { 626*50fb749aSPavan Nikhilesh param[port].total_events = &atomic_total_events; 627*50fb749aSPavan Nikhilesh param[port].sched_type = sched_type; 628*50fb749aSPavan Nikhilesh param[port].port = port; 629*50fb749aSPavan Nikhilesh param[port].dequeue_tmo_ticks = dequeue_tmo_ticks; 630*50fb749aSPavan Nikhilesh rte_smp_wmb(); 631*50fb749aSPavan Nikhilesh w_lcore = rte_get_next_lcore(w_lcore, 1, 0); 632*50fb749aSPavan Nikhilesh rte_eal_remote_launch(slave_workers, ¶m[port], w_lcore); 633*50fb749aSPavan Nikhilesh } 634*50fb749aSPavan Nikhilesh 635*50fb749aSPavan Nikhilesh ret = wait_workers_to_join(w_lcore, &atomic_total_events); 636*50fb749aSPavan Nikhilesh free(param); 637*50fb749aSPavan Nikhilesh return ret; 638*50fb749aSPavan Nikhilesh } 639*50fb749aSPavan Nikhilesh 640*50fb749aSPavan Nikhilesh /* 641*50fb749aSPavan Nikhilesh * Generate a prescribed number of events and spread them across available 642*50fb749aSPavan Nikhilesh * queues. Dequeue the events through multiple ports and verify the enqueued 643*50fb749aSPavan Nikhilesh * event attributes 644*50fb749aSPavan Nikhilesh */ 645*50fb749aSPavan Nikhilesh static int 646*50fb749aSPavan Nikhilesh test_multi_queue_enq_multi_port_deq(void) 647*50fb749aSPavan Nikhilesh { 648*50fb749aSPavan Nikhilesh const unsigned int total_events = MAX_EVENTS; 649*50fb749aSPavan Nikhilesh uint32_t nr_ports; 650*50fb749aSPavan Nikhilesh int ret; 651*50fb749aSPavan Nikhilesh 652*50fb749aSPavan Nikhilesh ret = generate_random_events(total_events); 653*50fb749aSPavan Nikhilesh if (ret) 654*50fb749aSPavan Nikhilesh return TEST_FAILED; 655*50fb749aSPavan Nikhilesh 656*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 657*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 658*50fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 659*50fb749aSPavan Nikhilesh nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); 660*50fb749aSPavan Nikhilesh 661*50fb749aSPavan Nikhilesh if (!nr_ports) { 662*50fb749aSPavan Nikhilesh printf("%s: Not enough ports=%d or workers=%d\n", __func__, 663*50fb749aSPavan Nikhilesh nr_ports, rte_lcore_count() - 1); 664*50fb749aSPavan Nikhilesh return TEST_SUCCESS; 665*50fb749aSPavan Nikhilesh } 666*50fb749aSPavan Nikhilesh 667*50fb749aSPavan Nikhilesh return launch_workers_and_wait(worker_multi_port_fn, 668*50fb749aSPavan Nikhilesh worker_multi_port_fn, total_events, 669*50fb749aSPavan Nikhilesh nr_ports, 0xff /* invalid */); 670*50fb749aSPavan Nikhilesh } 671*50fb749aSPavan Nikhilesh 672*50fb749aSPavan Nikhilesh static int 673*50fb749aSPavan Nikhilesh validate_queue_to_port_single_link(uint32_t index, uint8_t port, 674*50fb749aSPavan Nikhilesh struct rte_event *ev) 675*50fb749aSPavan Nikhilesh { 676*50fb749aSPavan Nikhilesh RTE_SET_USED(index); 677*50fb749aSPavan Nikhilesh TEST_ASSERT_EQUAL(port, ev->queue_id, 678*50fb749aSPavan Nikhilesh "queue mismatch enq=%d deq =%d", 679*50fb749aSPavan Nikhilesh port, ev->queue_id); 680*50fb749aSPavan Nikhilesh return 0; 681*50fb749aSPavan Nikhilesh } 682*50fb749aSPavan Nikhilesh 683*50fb749aSPavan Nikhilesh /* 684*50fb749aSPavan Nikhilesh * Link queue x to port x and check correctness of link by checking 685*50fb749aSPavan Nikhilesh * queue_id == x on dequeue on the specific port x 686*50fb749aSPavan Nikhilesh */ 687*50fb749aSPavan Nikhilesh static int 688*50fb749aSPavan Nikhilesh test_queue_to_port_single_link(void) 689*50fb749aSPavan Nikhilesh { 690*50fb749aSPavan Nikhilesh int i, nr_links, ret; 691*50fb749aSPavan Nikhilesh 692*50fb749aSPavan Nikhilesh uint32_t port_count; 693*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 694*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 695*50fb749aSPavan Nikhilesh &port_count), "Port count get failed"); 696*50fb749aSPavan Nikhilesh 697*50fb749aSPavan Nikhilesh /* Unlink all connections that created in eventdev_setup */ 698*50fb749aSPavan Nikhilesh for (i = 0; i < (int)port_count; i++) { 699*50fb749aSPavan Nikhilesh ret = rte_event_port_unlink(evdev, i, NULL, 0); 700*50fb749aSPavan Nikhilesh TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d", i); 701*50fb749aSPavan Nikhilesh } 702*50fb749aSPavan Nikhilesh 703*50fb749aSPavan Nikhilesh uint32_t queue_count; 704*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 705*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 706*50fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 707*50fb749aSPavan Nikhilesh 708*50fb749aSPavan Nikhilesh nr_links = RTE_MIN(port_count, queue_count); 709*50fb749aSPavan Nikhilesh const unsigned int total_events = MAX_EVENTS / nr_links; 710*50fb749aSPavan Nikhilesh 711*50fb749aSPavan Nikhilesh /* Link queue x to port x and inject events to queue x through port x */ 712*50fb749aSPavan Nikhilesh for (i = 0; i < nr_links; i++) { 713*50fb749aSPavan Nikhilesh uint8_t queue = (uint8_t)i; 714*50fb749aSPavan Nikhilesh 715*50fb749aSPavan Nikhilesh ret = rte_event_port_link(evdev, i, &queue, NULL, 1); 716*50fb749aSPavan Nikhilesh TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i); 717*50fb749aSPavan Nikhilesh 718*50fb749aSPavan Nikhilesh ret = inject_events( 719*50fb749aSPavan Nikhilesh 0x100 /*flow_id */, 720*50fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 721*50fb749aSPavan Nikhilesh rte_rand() % 256 /* sub_event_type */, 722*50fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1), 723*50fb749aSPavan Nikhilesh queue /* queue */, 724*50fb749aSPavan Nikhilesh i /* port */, 725*50fb749aSPavan Nikhilesh total_events /* events */); 726*50fb749aSPavan Nikhilesh if (ret) 727*50fb749aSPavan Nikhilesh return TEST_FAILED; 728*50fb749aSPavan Nikhilesh } 729*50fb749aSPavan Nikhilesh 730*50fb749aSPavan Nikhilesh /* Verify the events generated from correct queue */ 731*50fb749aSPavan Nikhilesh for (i = 0; i < nr_links; i++) { 732*50fb749aSPavan Nikhilesh ret = consume_events(i /* port */, total_events, 733*50fb749aSPavan Nikhilesh validate_queue_to_port_single_link); 734*50fb749aSPavan Nikhilesh if (ret) 735*50fb749aSPavan Nikhilesh return TEST_FAILED; 736*50fb749aSPavan Nikhilesh } 737*50fb749aSPavan Nikhilesh 738*50fb749aSPavan Nikhilesh return TEST_SUCCESS; 739*50fb749aSPavan Nikhilesh } 740*50fb749aSPavan Nikhilesh 741*50fb749aSPavan Nikhilesh static int 742*50fb749aSPavan Nikhilesh validate_queue_to_port_multi_link(uint32_t index, uint8_t port, 743*50fb749aSPavan Nikhilesh struct rte_event *ev) 744*50fb749aSPavan Nikhilesh { 745*50fb749aSPavan Nikhilesh RTE_SET_USED(index); 746*50fb749aSPavan Nikhilesh TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1), 747*50fb749aSPavan Nikhilesh "queue mismatch enq=%d deq =%d", 748*50fb749aSPavan Nikhilesh port, ev->queue_id); 749*50fb749aSPavan Nikhilesh return 0; 750*50fb749aSPavan Nikhilesh } 751*50fb749aSPavan Nikhilesh 752*50fb749aSPavan Nikhilesh /* 753*50fb749aSPavan Nikhilesh * Link all even number of queues to port 0 and all odd number of queues to 754*50fb749aSPavan Nikhilesh * port 1 and verify the link connection on dequeue 755*50fb749aSPavan Nikhilesh */ 756*50fb749aSPavan Nikhilesh static int 757*50fb749aSPavan Nikhilesh test_queue_to_port_multi_link(void) 758*50fb749aSPavan Nikhilesh { 759*50fb749aSPavan Nikhilesh int ret, port0_events = 0, port1_events = 0; 760*50fb749aSPavan Nikhilesh uint8_t queue, port; 761*50fb749aSPavan Nikhilesh uint32_t nr_queues = 0; 762*50fb749aSPavan Nikhilesh uint32_t nr_ports = 0; 763*50fb749aSPavan Nikhilesh 764*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 765*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 766*50fb749aSPavan Nikhilesh &nr_queues), "Queue count get failed"); 767*50fb749aSPavan Nikhilesh 768*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 769*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 770*50fb749aSPavan Nikhilesh &nr_queues), "Queue count get failed"); 771*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 772*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 773*50fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 774*50fb749aSPavan Nikhilesh 775*50fb749aSPavan Nikhilesh if (nr_ports < 2) { 776*50fb749aSPavan Nikhilesh printf("%s: Not enough ports to test ports=%d\n", 777*50fb749aSPavan Nikhilesh __func__, nr_ports); 778*50fb749aSPavan Nikhilesh return TEST_SUCCESS; 779*50fb749aSPavan Nikhilesh } 780*50fb749aSPavan Nikhilesh 781*50fb749aSPavan Nikhilesh /* Unlink all connections that created in eventdev_setup */ 782*50fb749aSPavan Nikhilesh for (port = 0; port < nr_ports; port++) { 783*50fb749aSPavan Nikhilesh ret = rte_event_port_unlink(evdev, port, NULL, 0); 784*50fb749aSPavan Nikhilesh TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d", 785*50fb749aSPavan Nikhilesh port); 786*50fb749aSPavan Nikhilesh } 787*50fb749aSPavan Nikhilesh 788*50fb749aSPavan Nikhilesh const unsigned int total_events = MAX_EVENTS / nr_queues; 789*50fb749aSPavan Nikhilesh 790*50fb749aSPavan Nikhilesh /* Link all even number of queues to port0 and odd numbers to port 1*/ 791*50fb749aSPavan Nikhilesh for (queue = 0; queue < nr_queues; queue++) { 792*50fb749aSPavan Nikhilesh port = queue & 0x1; 793*50fb749aSPavan Nikhilesh ret = rte_event_port_link(evdev, port, &queue, NULL, 1); 794*50fb749aSPavan Nikhilesh TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d", 795*50fb749aSPavan Nikhilesh queue, port); 796*50fb749aSPavan Nikhilesh 797*50fb749aSPavan Nikhilesh ret = inject_events( 798*50fb749aSPavan Nikhilesh 0x100 /*flow_id */, 799*50fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 800*50fb749aSPavan Nikhilesh rte_rand() % 256 /* sub_event_type */, 801*50fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1), 802*50fb749aSPavan Nikhilesh queue /* queue */, 803*50fb749aSPavan Nikhilesh port /* port */, 804*50fb749aSPavan Nikhilesh total_events /* events */); 805*50fb749aSPavan Nikhilesh if (ret) 806*50fb749aSPavan Nikhilesh return TEST_FAILED; 807*50fb749aSPavan Nikhilesh 808*50fb749aSPavan Nikhilesh if (port == 0) 809*50fb749aSPavan Nikhilesh port0_events += total_events; 810*50fb749aSPavan Nikhilesh else 811*50fb749aSPavan Nikhilesh port1_events += total_events; 812*50fb749aSPavan Nikhilesh } 813*50fb749aSPavan Nikhilesh 814*50fb749aSPavan Nikhilesh ret = consume_events(0 /* port */, port0_events, 815*50fb749aSPavan Nikhilesh validate_queue_to_port_multi_link); 816*50fb749aSPavan Nikhilesh if (ret) 817*50fb749aSPavan Nikhilesh return TEST_FAILED; 818*50fb749aSPavan Nikhilesh ret = consume_events(1 /* port */, port1_events, 819*50fb749aSPavan Nikhilesh validate_queue_to_port_multi_link); 820*50fb749aSPavan Nikhilesh if (ret) 821*50fb749aSPavan Nikhilesh return TEST_FAILED; 822*50fb749aSPavan Nikhilesh 823*50fb749aSPavan Nikhilesh return TEST_SUCCESS; 824*50fb749aSPavan Nikhilesh } 825*50fb749aSPavan Nikhilesh 826*50fb749aSPavan Nikhilesh static int 827*50fb749aSPavan Nikhilesh worker_flow_based_pipeline(void *arg) 828*50fb749aSPavan Nikhilesh { 829*50fb749aSPavan Nikhilesh struct test_core_param *param = arg; 830*50fb749aSPavan Nikhilesh struct rte_event ev; 831*50fb749aSPavan Nikhilesh uint16_t valid_event; 832*50fb749aSPavan Nikhilesh uint8_t port = param->port; 833*50fb749aSPavan Nikhilesh uint8_t new_sched_type = param->sched_type; 834*50fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 835*50fb749aSPavan Nikhilesh uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks; 836*50fb749aSPavan Nikhilesh 837*50fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 838*50fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 839*50fb749aSPavan Nikhilesh dequeue_tmo_ticks); 840*50fb749aSPavan Nikhilesh if (!valid_event) 841*50fb749aSPavan Nikhilesh continue; 842*50fb749aSPavan Nikhilesh 843*50fb749aSPavan Nikhilesh /* Events from stage 0 */ 844*50fb749aSPavan Nikhilesh if (ev.sub_event_type == 0) { 845*50fb749aSPavan Nikhilesh /* Move to atomic flow to maintain the ordering */ 846*50fb749aSPavan Nikhilesh ev.flow_id = 0x2; 847*50fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 848*50fb749aSPavan Nikhilesh ev.sub_event_type = 1; /* stage 1 */ 849*50fb749aSPavan Nikhilesh ev.sched_type = new_sched_type; 850*50fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_FORWARD; 851*50fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 852*50fb749aSPavan Nikhilesh } else if (ev.sub_event_type == 1) { /* Events from stage 1*/ 853*50fb749aSPavan Nikhilesh if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) { 854*50fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 855*50fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 856*50fb749aSPavan Nikhilesh } else { 857*50fb749aSPavan Nikhilesh printf("Failed to update seqn_list\n"); 858*50fb749aSPavan Nikhilesh return TEST_FAILED; 859*50fb749aSPavan Nikhilesh } 860*50fb749aSPavan Nikhilesh } else { 861*50fb749aSPavan Nikhilesh printf("Invalid ev.sub_event_type = %d\n", 862*50fb749aSPavan Nikhilesh ev.sub_event_type); 863*50fb749aSPavan Nikhilesh return TEST_FAILED; 864*50fb749aSPavan Nikhilesh } 865*50fb749aSPavan Nikhilesh } 866*50fb749aSPavan Nikhilesh return 0; 867*50fb749aSPavan Nikhilesh } 868*50fb749aSPavan Nikhilesh 869*50fb749aSPavan Nikhilesh static int 870*50fb749aSPavan Nikhilesh test_multiport_flow_sched_type_test(uint8_t in_sched_type, 871*50fb749aSPavan Nikhilesh uint8_t out_sched_type) 872*50fb749aSPavan Nikhilesh { 873*50fb749aSPavan Nikhilesh const unsigned int total_events = MAX_EVENTS; 874*50fb749aSPavan Nikhilesh uint32_t nr_ports; 875*50fb749aSPavan Nikhilesh int ret; 876*50fb749aSPavan Nikhilesh 877*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 878*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 879*50fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 880*50fb749aSPavan Nikhilesh nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); 881*50fb749aSPavan Nikhilesh 882*50fb749aSPavan Nikhilesh if (!nr_ports) { 883*50fb749aSPavan Nikhilesh printf("%s: Not enough ports=%d or workers=%d\n", __func__, 884*50fb749aSPavan Nikhilesh nr_ports, rte_lcore_count() - 1); 885*50fb749aSPavan Nikhilesh return TEST_SUCCESS; 886*50fb749aSPavan Nikhilesh } 887*50fb749aSPavan Nikhilesh 888*50fb749aSPavan Nikhilesh /* Injects events with m->seqn=0 to total_events */ 889*50fb749aSPavan Nikhilesh ret = inject_events( 890*50fb749aSPavan Nikhilesh 0x1 /*flow_id */, 891*50fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 892*50fb749aSPavan Nikhilesh 0 /* sub_event_type (stage 0) */, 893*50fb749aSPavan Nikhilesh in_sched_type, 894*50fb749aSPavan Nikhilesh 0 /* queue */, 895*50fb749aSPavan Nikhilesh 0 /* port */, 896*50fb749aSPavan Nikhilesh total_events /* events */); 897*50fb749aSPavan Nikhilesh if (ret) 898*50fb749aSPavan Nikhilesh return TEST_FAILED; 899*50fb749aSPavan Nikhilesh 900*50fb749aSPavan Nikhilesh ret = launch_workers_and_wait(worker_flow_based_pipeline, 901*50fb749aSPavan Nikhilesh worker_flow_based_pipeline, 902*50fb749aSPavan Nikhilesh total_events, nr_ports, out_sched_type); 903*50fb749aSPavan Nikhilesh if (ret) 904*50fb749aSPavan Nikhilesh return TEST_FAILED; 905*50fb749aSPavan Nikhilesh 906*50fb749aSPavan Nikhilesh if (in_sched_type != RTE_SCHED_TYPE_PARALLEL && 907*50fb749aSPavan Nikhilesh out_sched_type == RTE_SCHED_TYPE_ATOMIC) { 908*50fb749aSPavan Nikhilesh /* Check the events order maintained or not */ 909*50fb749aSPavan Nikhilesh return seqn_list_check(total_events); 910*50fb749aSPavan Nikhilesh } 911*50fb749aSPavan Nikhilesh return TEST_SUCCESS; 912*50fb749aSPavan Nikhilesh } 913*50fb749aSPavan Nikhilesh 914*50fb749aSPavan Nikhilesh 915*50fb749aSPavan Nikhilesh /* Multi port ordered to atomic transaction */ 916*50fb749aSPavan Nikhilesh static int 917*50fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_atomic(void) 918*50fb749aSPavan Nikhilesh { 919*50fb749aSPavan Nikhilesh /* Ingress event order test */ 920*50fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED, 921*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 922*50fb749aSPavan Nikhilesh } 923*50fb749aSPavan Nikhilesh 924*50fb749aSPavan Nikhilesh static int 925*50fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_ordered(void) 926*50fb749aSPavan Nikhilesh { 927*50fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED, 928*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 929*50fb749aSPavan Nikhilesh } 930*50fb749aSPavan Nikhilesh 931*50fb749aSPavan Nikhilesh static int 932*50fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_parallel(void) 933*50fb749aSPavan Nikhilesh { 934*50fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED, 935*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 936*50fb749aSPavan Nikhilesh } 937*50fb749aSPavan Nikhilesh 938*50fb749aSPavan Nikhilesh static int 939*50fb749aSPavan Nikhilesh test_multi_port_flow_atomic_to_atomic(void) 940*50fb749aSPavan Nikhilesh { 941*50fb749aSPavan Nikhilesh /* Ingress event order test */ 942*50fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 943*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 944*50fb749aSPavan Nikhilesh } 945*50fb749aSPavan Nikhilesh 946*50fb749aSPavan Nikhilesh static int 947*50fb749aSPavan Nikhilesh test_multi_port_flow_atomic_to_ordered(void) 948*50fb749aSPavan Nikhilesh { 949*50fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 950*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 951*50fb749aSPavan Nikhilesh } 952*50fb749aSPavan Nikhilesh 953*50fb749aSPavan Nikhilesh static int 954*50fb749aSPavan Nikhilesh test_multi_port_flow_atomic_to_parallel(void) 955*50fb749aSPavan Nikhilesh { 956*50fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 957*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 958*50fb749aSPavan Nikhilesh } 959*50fb749aSPavan Nikhilesh 960*50fb749aSPavan Nikhilesh static int 961*50fb749aSPavan Nikhilesh test_multi_port_flow_parallel_to_atomic(void) 962*50fb749aSPavan Nikhilesh { 963*50fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 964*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 965*50fb749aSPavan Nikhilesh } 966*50fb749aSPavan Nikhilesh 967*50fb749aSPavan Nikhilesh static int 968*50fb749aSPavan Nikhilesh test_multi_port_flow_parallel_to_ordered(void) 969*50fb749aSPavan Nikhilesh { 970*50fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 971*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 972*50fb749aSPavan Nikhilesh } 973*50fb749aSPavan Nikhilesh 974*50fb749aSPavan Nikhilesh static int 975*50fb749aSPavan Nikhilesh test_multi_port_flow_parallel_to_parallel(void) 976*50fb749aSPavan Nikhilesh { 977*50fb749aSPavan Nikhilesh return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 978*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 979*50fb749aSPavan Nikhilesh } 980*50fb749aSPavan Nikhilesh 981*50fb749aSPavan Nikhilesh static int 982*50fb749aSPavan Nikhilesh worker_group_based_pipeline(void *arg) 983*50fb749aSPavan Nikhilesh { 984*50fb749aSPavan Nikhilesh struct test_core_param *param = arg; 985*50fb749aSPavan Nikhilesh struct rte_event ev; 986*50fb749aSPavan Nikhilesh uint16_t valid_event; 987*50fb749aSPavan Nikhilesh uint8_t port = param->port; 988*50fb749aSPavan Nikhilesh uint8_t new_sched_type = param->sched_type; 989*50fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 990*50fb749aSPavan Nikhilesh uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks; 991*50fb749aSPavan Nikhilesh 992*50fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 993*50fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 994*50fb749aSPavan Nikhilesh dequeue_tmo_ticks); 995*50fb749aSPavan Nikhilesh if (!valid_event) 996*50fb749aSPavan Nikhilesh continue; 997*50fb749aSPavan Nikhilesh 998*50fb749aSPavan Nikhilesh /* Events from stage 0(group 0) */ 999*50fb749aSPavan Nikhilesh if (ev.queue_id == 0) { 1000*50fb749aSPavan Nikhilesh /* Move to atomic flow to maintain the ordering */ 1001*50fb749aSPavan Nikhilesh ev.flow_id = 0x2; 1002*50fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 1003*50fb749aSPavan Nikhilesh ev.sched_type = new_sched_type; 1004*50fb749aSPavan Nikhilesh ev.queue_id = 1; /* Stage 1*/ 1005*50fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_FORWARD; 1006*50fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 1007*50fb749aSPavan Nikhilesh } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/ 1008*50fb749aSPavan Nikhilesh if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) { 1009*50fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 1010*50fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 1011*50fb749aSPavan Nikhilesh } else { 1012*50fb749aSPavan Nikhilesh printf("Failed to update seqn_list\n"); 1013*50fb749aSPavan Nikhilesh return TEST_FAILED; 1014*50fb749aSPavan Nikhilesh } 1015*50fb749aSPavan Nikhilesh } else { 1016*50fb749aSPavan Nikhilesh printf("Invalid ev.queue_id = %d\n", ev.queue_id); 1017*50fb749aSPavan Nikhilesh return TEST_FAILED; 1018*50fb749aSPavan Nikhilesh } 1019*50fb749aSPavan Nikhilesh } 1020*50fb749aSPavan Nikhilesh 1021*50fb749aSPavan Nikhilesh 1022*50fb749aSPavan Nikhilesh return 0; 1023*50fb749aSPavan Nikhilesh } 1024*50fb749aSPavan Nikhilesh 1025*50fb749aSPavan Nikhilesh static int 1026*50fb749aSPavan Nikhilesh test_multiport_queue_sched_type_test(uint8_t in_sched_type, 1027*50fb749aSPavan Nikhilesh uint8_t out_sched_type) 1028*50fb749aSPavan Nikhilesh { 1029*50fb749aSPavan Nikhilesh const unsigned int total_events = MAX_EVENTS; 1030*50fb749aSPavan Nikhilesh uint32_t nr_ports; 1031*50fb749aSPavan Nikhilesh int ret; 1032*50fb749aSPavan Nikhilesh 1033*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 1034*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 1035*50fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 1036*50fb749aSPavan Nikhilesh 1037*50fb749aSPavan Nikhilesh nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); 1038*50fb749aSPavan Nikhilesh 1039*50fb749aSPavan Nikhilesh uint32_t queue_count; 1040*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 1041*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 1042*50fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 1043*50fb749aSPavan Nikhilesh if (queue_count < 2 || !nr_ports) { 1044*50fb749aSPavan Nikhilesh printf("%s: Not enough queues=%d ports=%d or workers=%d\n", 1045*50fb749aSPavan Nikhilesh __func__, queue_count, nr_ports, 1046*50fb749aSPavan Nikhilesh rte_lcore_count() - 1); 1047*50fb749aSPavan Nikhilesh return TEST_SUCCESS; 1048*50fb749aSPavan Nikhilesh } 1049*50fb749aSPavan Nikhilesh 1050*50fb749aSPavan Nikhilesh /* Injects events with m->seqn=0 to total_events */ 1051*50fb749aSPavan Nikhilesh ret = inject_events( 1052*50fb749aSPavan Nikhilesh 0x1 /*flow_id */, 1053*50fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 1054*50fb749aSPavan Nikhilesh 0 /* sub_event_type (stage 0) */, 1055*50fb749aSPavan Nikhilesh in_sched_type, 1056*50fb749aSPavan Nikhilesh 0 /* queue */, 1057*50fb749aSPavan Nikhilesh 0 /* port */, 1058*50fb749aSPavan Nikhilesh total_events /* events */); 1059*50fb749aSPavan Nikhilesh if (ret) 1060*50fb749aSPavan Nikhilesh return TEST_FAILED; 1061*50fb749aSPavan Nikhilesh 1062*50fb749aSPavan Nikhilesh ret = launch_workers_and_wait(worker_group_based_pipeline, 1063*50fb749aSPavan Nikhilesh worker_group_based_pipeline, 1064*50fb749aSPavan Nikhilesh total_events, nr_ports, out_sched_type); 1065*50fb749aSPavan Nikhilesh if (ret) 1066*50fb749aSPavan Nikhilesh return TEST_FAILED; 1067*50fb749aSPavan Nikhilesh 1068*50fb749aSPavan Nikhilesh if (in_sched_type != RTE_SCHED_TYPE_PARALLEL && 1069*50fb749aSPavan Nikhilesh out_sched_type == RTE_SCHED_TYPE_ATOMIC) { 1070*50fb749aSPavan Nikhilesh /* Check the events order maintained or not */ 1071*50fb749aSPavan Nikhilesh return seqn_list_check(total_events); 1072*50fb749aSPavan Nikhilesh } 1073*50fb749aSPavan Nikhilesh return TEST_SUCCESS; 1074*50fb749aSPavan Nikhilesh } 1075*50fb749aSPavan Nikhilesh 1076*50fb749aSPavan Nikhilesh static int 1077*50fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_atomic(void) 1078*50fb749aSPavan Nikhilesh { 1079*50fb749aSPavan Nikhilesh /* Ingress event order test */ 1080*50fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED, 1081*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 1082*50fb749aSPavan Nikhilesh } 1083*50fb749aSPavan Nikhilesh 1084*50fb749aSPavan Nikhilesh static int 1085*50fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_ordered(void) 1086*50fb749aSPavan Nikhilesh { 1087*50fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED, 1088*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 1089*50fb749aSPavan Nikhilesh } 1090*50fb749aSPavan Nikhilesh 1091*50fb749aSPavan Nikhilesh static int 1092*50fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_parallel(void) 1093*50fb749aSPavan Nikhilesh { 1094*50fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED, 1095*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 1096*50fb749aSPavan Nikhilesh } 1097*50fb749aSPavan Nikhilesh 1098*50fb749aSPavan Nikhilesh static int 1099*50fb749aSPavan Nikhilesh test_multi_port_queue_atomic_to_atomic(void) 1100*50fb749aSPavan Nikhilesh { 1101*50fb749aSPavan Nikhilesh /* Ingress event order test */ 1102*50fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 1103*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 1104*50fb749aSPavan Nikhilesh } 1105*50fb749aSPavan Nikhilesh 1106*50fb749aSPavan Nikhilesh static int 1107*50fb749aSPavan Nikhilesh test_multi_port_queue_atomic_to_ordered(void) 1108*50fb749aSPavan Nikhilesh { 1109*50fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 1110*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 1111*50fb749aSPavan Nikhilesh } 1112*50fb749aSPavan Nikhilesh 1113*50fb749aSPavan Nikhilesh static int 1114*50fb749aSPavan Nikhilesh test_multi_port_queue_atomic_to_parallel(void) 1115*50fb749aSPavan Nikhilesh { 1116*50fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC, 1117*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 1118*50fb749aSPavan Nikhilesh } 1119*50fb749aSPavan Nikhilesh 1120*50fb749aSPavan Nikhilesh static int 1121*50fb749aSPavan Nikhilesh test_multi_port_queue_parallel_to_atomic(void) 1122*50fb749aSPavan Nikhilesh { 1123*50fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 1124*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ATOMIC); 1125*50fb749aSPavan Nikhilesh } 1126*50fb749aSPavan Nikhilesh 1127*50fb749aSPavan Nikhilesh static int 1128*50fb749aSPavan Nikhilesh test_multi_port_queue_parallel_to_ordered(void) 1129*50fb749aSPavan Nikhilesh { 1130*50fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 1131*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_ORDERED); 1132*50fb749aSPavan Nikhilesh } 1133*50fb749aSPavan Nikhilesh 1134*50fb749aSPavan Nikhilesh static int 1135*50fb749aSPavan Nikhilesh test_multi_port_queue_parallel_to_parallel(void) 1136*50fb749aSPavan Nikhilesh { 1137*50fb749aSPavan Nikhilesh return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL, 1138*50fb749aSPavan Nikhilesh RTE_SCHED_TYPE_PARALLEL); 1139*50fb749aSPavan Nikhilesh } 1140*50fb749aSPavan Nikhilesh 1141*50fb749aSPavan Nikhilesh static int 1142*50fb749aSPavan Nikhilesh worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg) 1143*50fb749aSPavan Nikhilesh { 1144*50fb749aSPavan Nikhilesh struct test_core_param *param = arg; 1145*50fb749aSPavan Nikhilesh struct rte_event ev; 1146*50fb749aSPavan Nikhilesh uint16_t valid_event; 1147*50fb749aSPavan Nikhilesh uint8_t port = param->port; 1148*50fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 1149*50fb749aSPavan Nikhilesh 1150*50fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 1151*50fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 1152*50fb749aSPavan Nikhilesh if (!valid_event) 1153*50fb749aSPavan Nikhilesh continue; 1154*50fb749aSPavan Nikhilesh 1155*50fb749aSPavan Nikhilesh if (ev.sub_event_type == 255) { /* last stage */ 1156*50fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 1157*50fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 1158*50fb749aSPavan Nikhilesh } else { 1159*50fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 1160*50fb749aSPavan Nikhilesh ev.sub_event_type++; 1161*50fb749aSPavan Nikhilesh ev.sched_type = 1162*50fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1); 1163*50fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_FORWARD; 1164*50fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 1165*50fb749aSPavan Nikhilesh } 1166*50fb749aSPavan Nikhilesh } 1167*50fb749aSPavan Nikhilesh return 0; 1168*50fb749aSPavan Nikhilesh } 1169*50fb749aSPavan Nikhilesh 1170*50fb749aSPavan Nikhilesh static int 1171*50fb749aSPavan Nikhilesh launch_multi_port_max_stages_random_sched_type(int (*fn)(void *)) 1172*50fb749aSPavan Nikhilesh { 1173*50fb749aSPavan Nikhilesh uint32_t nr_ports; 1174*50fb749aSPavan Nikhilesh int ret; 1175*50fb749aSPavan Nikhilesh 1176*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 1177*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 1178*50fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 1179*50fb749aSPavan Nikhilesh nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); 1180*50fb749aSPavan Nikhilesh 1181*50fb749aSPavan Nikhilesh if (!nr_ports) { 1182*50fb749aSPavan Nikhilesh printf("%s: Not enough ports=%d or workers=%d\n", __func__, 1183*50fb749aSPavan Nikhilesh nr_ports, rte_lcore_count() - 1); 1184*50fb749aSPavan Nikhilesh return TEST_SUCCESS; 1185*50fb749aSPavan Nikhilesh } 1186*50fb749aSPavan Nikhilesh 1187*50fb749aSPavan Nikhilesh /* Injects events with m->seqn=0 to total_events */ 1188*50fb749aSPavan Nikhilesh ret = inject_events( 1189*50fb749aSPavan Nikhilesh 0x1 /*flow_id */, 1190*50fb749aSPavan Nikhilesh RTE_EVENT_TYPE_CPU /* event_type */, 1191*50fb749aSPavan Nikhilesh 0 /* sub_event_type (stage 0) */, 1192*50fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */, 1193*50fb749aSPavan Nikhilesh 0 /* queue */, 1194*50fb749aSPavan Nikhilesh 0 /* port */, 1195*50fb749aSPavan Nikhilesh MAX_EVENTS /* events */); 1196*50fb749aSPavan Nikhilesh if (ret) 1197*50fb749aSPavan Nikhilesh return TEST_FAILED; 1198*50fb749aSPavan Nikhilesh 1199*50fb749aSPavan Nikhilesh return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports, 1200*50fb749aSPavan Nikhilesh 0xff /* invalid */); 1201*50fb749aSPavan Nikhilesh } 1202*50fb749aSPavan Nikhilesh 1203*50fb749aSPavan Nikhilesh /* Flow based pipeline with maximum stages with random sched type */ 1204*50fb749aSPavan Nikhilesh static int 1205*50fb749aSPavan Nikhilesh test_multi_port_flow_max_stages_random_sched_type(void) 1206*50fb749aSPavan Nikhilesh { 1207*50fb749aSPavan Nikhilesh return launch_multi_port_max_stages_random_sched_type( 1208*50fb749aSPavan Nikhilesh worker_flow_based_pipeline_max_stages_rand_sched_type); 1209*50fb749aSPavan Nikhilesh } 1210*50fb749aSPavan Nikhilesh 1211*50fb749aSPavan Nikhilesh static int 1212*50fb749aSPavan Nikhilesh worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg) 1213*50fb749aSPavan Nikhilesh { 1214*50fb749aSPavan Nikhilesh struct test_core_param *param = arg; 1215*50fb749aSPavan Nikhilesh struct rte_event ev; 1216*50fb749aSPavan Nikhilesh uint16_t valid_event; 1217*50fb749aSPavan Nikhilesh uint8_t port = param->port; 1218*50fb749aSPavan Nikhilesh uint32_t queue_count; 1219*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 1220*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 1221*50fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 1222*50fb749aSPavan Nikhilesh uint8_t nr_queues = queue_count; 1223*50fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 1224*50fb749aSPavan Nikhilesh 1225*50fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 1226*50fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 1227*50fb749aSPavan Nikhilesh if (!valid_event) 1228*50fb749aSPavan Nikhilesh continue; 1229*50fb749aSPavan Nikhilesh 1230*50fb749aSPavan Nikhilesh if (ev.queue_id == nr_queues - 1) { /* last stage */ 1231*50fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 1232*50fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 1233*50fb749aSPavan Nikhilesh } else { 1234*50fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 1235*50fb749aSPavan Nikhilesh ev.queue_id++; 1236*50fb749aSPavan Nikhilesh ev.sched_type = 1237*50fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1); 1238*50fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_FORWARD; 1239*50fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 1240*50fb749aSPavan Nikhilesh } 1241*50fb749aSPavan Nikhilesh } 1242*50fb749aSPavan Nikhilesh return 0; 1243*50fb749aSPavan Nikhilesh } 1244*50fb749aSPavan Nikhilesh 1245*50fb749aSPavan Nikhilesh /* Queue based pipeline with maximum stages with random sched type */ 1246*50fb749aSPavan Nikhilesh static int 1247*50fb749aSPavan Nikhilesh test_multi_port_queue_max_stages_random_sched_type(void) 1248*50fb749aSPavan Nikhilesh { 1249*50fb749aSPavan Nikhilesh return launch_multi_port_max_stages_random_sched_type( 1250*50fb749aSPavan Nikhilesh worker_queue_based_pipeline_max_stages_rand_sched_type); 1251*50fb749aSPavan Nikhilesh } 1252*50fb749aSPavan Nikhilesh 1253*50fb749aSPavan Nikhilesh static int 1254*50fb749aSPavan Nikhilesh worker_mixed_pipeline_max_stages_rand_sched_type(void *arg) 1255*50fb749aSPavan Nikhilesh { 1256*50fb749aSPavan Nikhilesh struct test_core_param *param = arg; 1257*50fb749aSPavan Nikhilesh struct rte_event ev; 1258*50fb749aSPavan Nikhilesh uint16_t valid_event; 1259*50fb749aSPavan Nikhilesh uint8_t port = param->port; 1260*50fb749aSPavan Nikhilesh uint32_t queue_count; 1261*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 1262*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_QUEUE_COUNT, 1263*50fb749aSPavan Nikhilesh &queue_count), "Queue count get failed"); 1264*50fb749aSPavan Nikhilesh uint8_t nr_queues = queue_count; 1265*50fb749aSPavan Nikhilesh rte_atomic32_t *total_events = param->total_events; 1266*50fb749aSPavan Nikhilesh 1267*50fb749aSPavan Nikhilesh while (rte_atomic32_read(total_events) > 0) { 1268*50fb749aSPavan Nikhilesh valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0); 1269*50fb749aSPavan Nikhilesh if (!valid_event) 1270*50fb749aSPavan Nikhilesh continue; 1271*50fb749aSPavan Nikhilesh 1272*50fb749aSPavan Nikhilesh if (ev.queue_id == nr_queues - 1) { /* Last stage */ 1273*50fb749aSPavan Nikhilesh rte_pktmbuf_free(ev.mbuf); 1274*50fb749aSPavan Nikhilesh rte_atomic32_sub(total_events, 1); 1275*50fb749aSPavan Nikhilesh } else { 1276*50fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 1277*50fb749aSPavan Nikhilesh ev.queue_id++; 1278*50fb749aSPavan Nikhilesh ev.sub_event_type = rte_rand() % 256; 1279*50fb749aSPavan Nikhilesh ev.sched_type = 1280*50fb749aSPavan Nikhilesh rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1); 1281*50fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_FORWARD; 1282*50fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 1283*50fb749aSPavan Nikhilesh } 1284*50fb749aSPavan Nikhilesh } 1285*50fb749aSPavan Nikhilesh return 0; 1286*50fb749aSPavan Nikhilesh } 1287*50fb749aSPavan Nikhilesh 1288*50fb749aSPavan Nikhilesh /* Queue and flow based pipeline with maximum stages with random sched type */ 1289*50fb749aSPavan Nikhilesh static int 1290*50fb749aSPavan Nikhilesh test_multi_port_mixed_max_stages_random_sched_type(void) 1291*50fb749aSPavan Nikhilesh { 1292*50fb749aSPavan Nikhilesh return launch_multi_port_max_stages_random_sched_type( 1293*50fb749aSPavan Nikhilesh worker_mixed_pipeline_max_stages_rand_sched_type); 1294*50fb749aSPavan Nikhilesh } 1295*50fb749aSPavan Nikhilesh 1296*50fb749aSPavan Nikhilesh static int 1297*50fb749aSPavan Nikhilesh worker_ordered_flow_producer(void *arg) 1298*50fb749aSPavan Nikhilesh { 1299*50fb749aSPavan Nikhilesh struct test_core_param *param = arg; 1300*50fb749aSPavan Nikhilesh uint8_t port = param->port; 1301*50fb749aSPavan Nikhilesh struct rte_mbuf *m; 1302*50fb749aSPavan Nikhilesh int counter = 0; 1303*50fb749aSPavan Nikhilesh 1304*50fb749aSPavan Nikhilesh while (counter < NUM_PACKETS) { 1305*50fb749aSPavan Nikhilesh m = rte_pktmbuf_alloc(eventdev_test_mempool); 1306*50fb749aSPavan Nikhilesh if (m == NULL) 1307*50fb749aSPavan Nikhilesh continue; 1308*50fb749aSPavan Nikhilesh 1309*50fb749aSPavan Nikhilesh m->seqn = counter++; 1310*50fb749aSPavan Nikhilesh 1311*50fb749aSPavan Nikhilesh struct rte_event ev = {.event = 0, .u64 = 0}; 1312*50fb749aSPavan Nikhilesh 1313*50fb749aSPavan Nikhilesh ev.flow_id = 0x1; /* Generate a fat flow */ 1314*50fb749aSPavan Nikhilesh ev.sub_event_type = 0; 1315*50fb749aSPavan Nikhilesh /* Inject the new event */ 1316*50fb749aSPavan Nikhilesh ev.op = RTE_EVENT_OP_NEW; 1317*50fb749aSPavan Nikhilesh ev.event_type = RTE_EVENT_TYPE_CPU; 1318*50fb749aSPavan Nikhilesh ev.sched_type = RTE_SCHED_TYPE_ORDERED; 1319*50fb749aSPavan Nikhilesh ev.queue_id = 0; 1320*50fb749aSPavan Nikhilesh ev.mbuf = m; 1321*50fb749aSPavan Nikhilesh rte_event_enqueue_burst(evdev, port, &ev, 1); 1322*50fb749aSPavan Nikhilesh } 1323*50fb749aSPavan Nikhilesh 1324*50fb749aSPavan Nikhilesh return 0; 1325*50fb749aSPavan Nikhilesh } 1326*50fb749aSPavan Nikhilesh 1327*50fb749aSPavan Nikhilesh static inline int 1328*50fb749aSPavan Nikhilesh test_producer_consumer_ingress_order_test(int (*fn)(void *)) 1329*50fb749aSPavan Nikhilesh { 1330*50fb749aSPavan Nikhilesh uint32_t nr_ports; 1331*50fb749aSPavan Nikhilesh 1332*50fb749aSPavan Nikhilesh TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(evdev, 1333*50fb749aSPavan Nikhilesh RTE_EVENT_DEV_ATTR_PORT_COUNT, 1334*50fb749aSPavan Nikhilesh &nr_ports), "Port count get failed"); 1335*50fb749aSPavan Nikhilesh nr_ports = RTE_MIN(nr_ports, rte_lcore_count() - 1); 1336*50fb749aSPavan Nikhilesh 1337*50fb749aSPavan Nikhilesh if (rte_lcore_count() < 3 || nr_ports < 2) { 1338*50fb749aSPavan Nikhilesh printf("### Not enough cores for %s test.\n", __func__); 1339*50fb749aSPavan Nikhilesh return TEST_SUCCESS; 1340*50fb749aSPavan Nikhilesh } 1341*50fb749aSPavan Nikhilesh 1342*50fb749aSPavan Nikhilesh launch_workers_and_wait(worker_ordered_flow_producer, fn, 1343*50fb749aSPavan Nikhilesh NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC); 1344*50fb749aSPavan Nikhilesh /* Check the events order maintained or not */ 1345*50fb749aSPavan Nikhilesh return seqn_list_check(NUM_PACKETS); 1346*50fb749aSPavan Nikhilesh } 1347*50fb749aSPavan Nikhilesh 1348*50fb749aSPavan Nikhilesh /* Flow based producer consumer ingress order test */ 1349*50fb749aSPavan Nikhilesh static int 1350*50fb749aSPavan Nikhilesh test_flow_producer_consumer_ingress_order_test(void) 1351*50fb749aSPavan Nikhilesh { 1352*50fb749aSPavan Nikhilesh return test_producer_consumer_ingress_order_test( 1353*50fb749aSPavan Nikhilesh worker_flow_based_pipeline); 1354*50fb749aSPavan Nikhilesh } 1355*50fb749aSPavan Nikhilesh 1356*50fb749aSPavan Nikhilesh /* Queue based producer consumer ingress order test */ 1357*50fb749aSPavan Nikhilesh static int 1358*50fb749aSPavan Nikhilesh test_queue_producer_consumer_ingress_order_test(void) 1359*50fb749aSPavan Nikhilesh { 1360*50fb749aSPavan Nikhilesh return test_producer_consumer_ingress_order_test( 1361*50fb749aSPavan Nikhilesh worker_group_based_pipeline); 1362*50fb749aSPavan Nikhilesh } 1363*50fb749aSPavan Nikhilesh 1364*50fb749aSPavan Nikhilesh static struct unit_test_suite eventdev_octeontx_testsuite = { 1365*50fb749aSPavan Nikhilesh .suite_name = "eventdev octeontx unit test suite", 1366*50fb749aSPavan Nikhilesh .setup = testsuite_setup, 1367*50fb749aSPavan Nikhilesh .teardown = testsuite_teardown, 1368*50fb749aSPavan Nikhilesh .unit_test_cases = { 1369*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1370*50fb749aSPavan Nikhilesh test_simple_enqdeq_ordered), 1371*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1372*50fb749aSPavan Nikhilesh test_simple_enqdeq_atomic), 1373*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1374*50fb749aSPavan Nikhilesh test_simple_enqdeq_parallel), 1375*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1376*50fb749aSPavan Nikhilesh test_multi_queue_enq_single_port_deq), 1377*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup_priority, eventdev_teardown, 1378*50fb749aSPavan Nikhilesh test_multi_queue_priority), 1379*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1380*50fb749aSPavan Nikhilesh test_multi_queue_enq_multi_port_deq), 1381*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1382*50fb749aSPavan Nikhilesh test_queue_to_port_single_link), 1383*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1384*50fb749aSPavan Nikhilesh test_queue_to_port_multi_link), 1385*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1386*50fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_atomic), 1387*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1388*50fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_ordered), 1389*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1390*50fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_parallel), 1391*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1392*50fb749aSPavan Nikhilesh test_multi_port_flow_atomic_to_atomic), 1393*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1394*50fb749aSPavan Nikhilesh test_multi_port_flow_atomic_to_ordered), 1395*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1396*50fb749aSPavan Nikhilesh test_multi_port_flow_atomic_to_parallel), 1397*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1398*50fb749aSPavan Nikhilesh test_multi_port_flow_parallel_to_atomic), 1399*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1400*50fb749aSPavan Nikhilesh test_multi_port_flow_parallel_to_ordered), 1401*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1402*50fb749aSPavan Nikhilesh test_multi_port_flow_parallel_to_parallel), 1403*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1404*50fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_atomic), 1405*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1406*50fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_ordered), 1407*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1408*50fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_parallel), 1409*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1410*50fb749aSPavan Nikhilesh test_multi_port_queue_atomic_to_atomic), 1411*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1412*50fb749aSPavan Nikhilesh test_multi_port_queue_atomic_to_ordered), 1413*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1414*50fb749aSPavan Nikhilesh test_multi_port_queue_atomic_to_parallel), 1415*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1416*50fb749aSPavan Nikhilesh test_multi_port_queue_parallel_to_atomic), 1417*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1418*50fb749aSPavan Nikhilesh test_multi_port_queue_parallel_to_ordered), 1419*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1420*50fb749aSPavan Nikhilesh test_multi_port_queue_parallel_to_parallel), 1421*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1422*50fb749aSPavan Nikhilesh test_multi_port_flow_max_stages_random_sched_type), 1423*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1424*50fb749aSPavan Nikhilesh test_multi_port_queue_max_stages_random_sched_type), 1425*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1426*50fb749aSPavan Nikhilesh test_multi_port_mixed_max_stages_random_sched_type), 1427*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1428*50fb749aSPavan Nikhilesh test_flow_producer_consumer_ingress_order_test), 1429*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup, eventdev_teardown, 1430*50fb749aSPavan Nikhilesh test_queue_producer_consumer_ingress_order_test), 1431*50fb749aSPavan Nikhilesh /* Tests with dequeue timeout */ 1432*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown, 1433*50fb749aSPavan Nikhilesh test_multi_port_flow_ordered_to_atomic), 1434*50fb749aSPavan Nikhilesh TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown, 1435*50fb749aSPavan Nikhilesh test_multi_port_queue_ordered_to_atomic), 1436*50fb749aSPavan Nikhilesh TEST_CASES_END() /**< NULL terminate unit test array */ 1437*50fb749aSPavan Nikhilesh } 1438*50fb749aSPavan Nikhilesh }; 1439*50fb749aSPavan Nikhilesh 1440*50fb749aSPavan Nikhilesh static int 1441*50fb749aSPavan Nikhilesh test_eventdev_octeontx(void) 1442*50fb749aSPavan Nikhilesh { 1443*50fb749aSPavan Nikhilesh return unit_test_suite_runner(&eventdev_octeontx_testsuite); 1444*50fb749aSPavan Nikhilesh } 1445*50fb749aSPavan Nikhilesh 1446*50fb749aSPavan Nikhilesh REGISTER_TEST_COMMAND(eventdev_octeontx_autotest, test_eventdev_octeontx); 1447