153a3b7e8SJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause 253a3b7e8SJerin Jacob * Copyright(c) 2017 Cavium, Inc 3ffbae86fSJerin Jacob */ 4ffbae86fSJerin Jacob 5*626b12a8SPavan Nikhilesh #include <math.h> 6*626b12a8SPavan Nikhilesh 7ffbae86fSJerin Jacob #include "test_perf_common.h" 8ffbae86fSJerin Jacob 941c219e6SJerin Jacob int 1041c219e6SJerin Jacob perf_test_result(struct evt_test *test, struct evt_options *opt) 1141c219e6SJerin Jacob { 1241c219e6SJerin Jacob RTE_SET_USED(opt); 136b1a14a8SPavan Nikhilesh int i; 146b1a14a8SPavan Nikhilesh uint64_t total = 0; 1541c219e6SJerin Jacob struct test_perf *t = evt_test_priv(test); 1641c219e6SJerin Jacob 176b1a14a8SPavan Nikhilesh printf("Packet distribution across worker cores :\n"); 186b1a14a8SPavan Nikhilesh for (i = 0; i < t->nb_workers; i++) 196b1a14a8SPavan Nikhilesh total += t->worker[i].processed_pkts; 206b1a14a8SPavan Nikhilesh for (i = 0; i < t->nb_workers; i++) 216b1a14a8SPavan Nikhilesh printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:" 226b1a14a8SPavan Nikhilesh CLGRN" %3.2f\n"CLNRM, i, 236b1a14a8SPavan Nikhilesh t->worker[i].processed_pkts, 246b1a14a8SPavan Nikhilesh (((double)t->worker[i].processed_pkts)/total) 256b1a14a8SPavan Nikhilesh * 100); 266b1a14a8SPavan Nikhilesh 2741c219e6SJerin Jacob return t->result; 2841c219e6SJerin Jacob } 2941c219e6SJerin Jacob 309d3aeb18SJerin Jacob static inline int 319d3aeb18SJerin Jacob perf_producer(void *arg) 329d3aeb18SJerin Jacob { 339a618803SPavan Nikhilesh int i; 349d3aeb18SJerin Jacob struct prod_data *p = arg; 359d3aeb18SJerin Jacob struct test_perf *t = p->t; 369d3aeb18SJerin Jacob struct evt_options *opt = t->opt; 379d3aeb18SJerin Jacob const uint8_t dev_id = p->dev_id; 389d3aeb18SJerin Jacob const uint8_t port = p->port_id; 399d3aeb18SJerin Jacob struct rte_mempool *pool = t->pool; 409d3aeb18SJerin Jacob const uint64_t nb_pkts = t->nb_pkts; 419d3aeb18SJerin Jacob const uint32_t nb_flows = t->nb_flows; 429d3aeb18SJerin Jacob uint32_t flow_counter = 0; 439d3aeb18SJerin Jacob uint64_t count = 0; 449a618803SPavan Nikhilesh struct perf_elt *m[BURST_SIZE + 1] = {NULL}; 459d3aeb18SJerin Jacob struct rte_event ev; 469d3aeb18SJerin Jacob 479d3aeb18SJerin Jacob if (opt->verbose_level > 1) 489d3aeb18SJerin Jacob printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__, 499d3aeb18SJerin Jacob rte_lcore_id(), dev_id, port, p->queue_id); 509d3aeb18SJerin Jacob 519d3aeb18SJerin Jacob ev.event = 0; 529d3aeb18SJerin Jacob ev.op = RTE_EVENT_OP_NEW; 539d3aeb18SJerin Jacob ev.queue_id = p->queue_id; 549d3aeb18SJerin Jacob ev.sched_type = t->opt->sched_type_list[0]; 559d3aeb18SJerin Jacob ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 569d3aeb18SJerin Jacob ev.event_type = RTE_EVENT_TYPE_CPU; 579d3aeb18SJerin Jacob ev.sub_event_type = 0; /* stage 0 */ 589d3aeb18SJerin Jacob 599d3aeb18SJerin Jacob while (count < nb_pkts && t->done == false) { 609a618803SPavan Nikhilesh if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0) 619d3aeb18SJerin Jacob continue; 629a618803SPavan Nikhilesh for (i = 0; i < BURST_SIZE; i++) { 639d3aeb18SJerin Jacob ev.flow_id = flow_counter++ % nb_flows; 649a618803SPavan Nikhilesh ev.event_ptr = m[i]; 659a618803SPavan Nikhilesh m[i]->timestamp = rte_get_timer_cycles(); 669a618803SPavan Nikhilesh while (rte_event_enqueue_burst(dev_id, 679a618803SPavan Nikhilesh port, &ev, 1) != 1) { 689d3aeb18SJerin Jacob if (t->done) 699d3aeb18SJerin Jacob break; 709d3aeb18SJerin Jacob rte_pause(); 719a618803SPavan Nikhilesh m[i]->timestamp = rte_get_timer_cycles(); 729d3aeb18SJerin Jacob } 739a618803SPavan Nikhilesh } 749a618803SPavan Nikhilesh count += BURST_SIZE; 759d3aeb18SJerin Jacob } 769d3aeb18SJerin Jacob 779d3aeb18SJerin Jacob return 0; 789d3aeb18SJerin Jacob } 799d3aeb18SJerin Jacob 80d008f20bSPavan Nikhilesh static inline int 81d008f20bSPavan Nikhilesh perf_event_timer_producer(void *arg) 82d008f20bSPavan Nikhilesh { 839a618803SPavan Nikhilesh int i; 84d008f20bSPavan Nikhilesh struct prod_data *p = arg; 85d008f20bSPavan Nikhilesh struct test_perf *t = p->t; 86d008f20bSPavan Nikhilesh struct evt_options *opt = t->opt; 87d008f20bSPavan Nikhilesh uint32_t flow_counter = 0; 88d008f20bSPavan Nikhilesh uint64_t count = 0; 89d008f20bSPavan Nikhilesh uint64_t arm_latency = 0; 90d008f20bSPavan Nikhilesh const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs; 91d008f20bSPavan Nikhilesh const uint32_t nb_flows = t->nb_flows; 92d008f20bSPavan Nikhilesh const uint64_t nb_timers = opt->nb_timers; 93d008f20bSPavan Nikhilesh struct rte_mempool *pool = t->pool; 949a618803SPavan Nikhilesh struct perf_elt *m[BURST_SIZE + 1] = {NULL}; 95d008f20bSPavan Nikhilesh struct rte_event_timer_adapter **adptr = t->timer_adptr; 9652553263SPavan Nikhilesh struct rte_event_timer tim; 97d008f20bSPavan Nikhilesh uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec; 98d008f20bSPavan Nikhilesh 9952553263SPavan Nikhilesh memset(&tim, 0, sizeof(struct rte_event_timer)); 100*626b12a8SPavan Nikhilesh timeout_ticks = 101*626b12a8SPavan Nikhilesh opt->optm_timer_tick_nsec 102*626b12a8SPavan Nikhilesh ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) / 103*626b12a8SPavan Nikhilesh opt->optm_timer_tick_nsec) 104*626b12a8SPavan Nikhilesh : timeout_ticks; 105d008f20bSPavan Nikhilesh timeout_ticks += timeout_ticks ? 0 : 1; 10652553263SPavan Nikhilesh tim.ev.event_type = RTE_EVENT_TYPE_TIMER; 10752553263SPavan Nikhilesh tim.ev.op = RTE_EVENT_OP_NEW; 10852553263SPavan Nikhilesh tim.ev.sched_type = t->opt->sched_type_list[0]; 10952553263SPavan Nikhilesh tim.ev.queue_id = p->queue_id; 11052553263SPavan Nikhilesh tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 11152553263SPavan Nikhilesh tim.state = RTE_EVENT_TIMER_NOT_ARMED; 11252553263SPavan Nikhilesh tim.timeout_ticks = timeout_ticks; 113d008f20bSPavan Nikhilesh 114d008f20bSPavan Nikhilesh if (opt->verbose_level > 1) 115d008f20bSPavan Nikhilesh printf("%s(): lcore %d\n", __func__, rte_lcore_id()); 116d008f20bSPavan Nikhilesh 117d008f20bSPavan Nikhilesh while (count < nb_timers && t->done == false) { 1189a618803SPavan Nikhilesh if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0) 119d008f20bSPavan Nikhilesh continue; 1209a618803SPavan Nikhilesh for (i = 0; i < BURST_SIZE; i++) { 1219a618803SPavan Nikhilesh rte_prefetch0(m[i + 1]); 1229a618803SPavan Nikhilesh m[i]->tim = tim; 1239a618803SPavan Nikhilesh m[i]->tim.ev.flow_id = flow_counter++ % nb_flows; 1249a618803SPavan Nikhilesh m[i]->tim.ev.event_ptr = m[i]; 1259a618803SPavan Nikhilesh m[i]->timestamp = rte_get_timer_cycles(); 126d008f20bSPavan Nikhilesh while (rte_event_timer_arm_burst( 127d008f20bSPavan Nikhilesh adptr[flow_counter % nb_timer_adptrs], 1289a618803SPavan Nikhilesh (struct rte_event_timer **)&m[i], 1) != 1) { 129d008f20bSPavan Nikhilesh if (t->done) 130d008f20bSPavan Nikhilesh break; 1319a618803SPavan Nikhilesh m[i]->timestamp = rte_get_timer_cycles(); 132d008f20bSPavan Nikhilesh } 1339a618803SPavan Nikhilesh arm_latency += rte_get_timer_cycles() - m[i]->timestamp; 1349a618803SPavan Nikhilesh } 1359a618803SPavan Nikhilesh count += BURST_SIZE; 136d008f20bSPavan Nikhilesh } 137d008f20bSPavan Nikhilesh fflush(stdout); 138d008f20bSPavan Nikhilesh rte_delay_ms(1000); 139d008f20bSPavan Nikhilesh printf("%s(): lcore %d Average event timer arm latency = %.3f us\n", 14093b7794bSPavan Nikhilesh __func__, rte_lcore_id(), 14193b7794bSPavan Nikhilesh count ? (float)(arm_latency / count) / 14293b7794bSPavan Nikhilesh (rte_get_timer_hz() / 1000000) : 0); 143d008f20bSPavan Nikhilesh return 0; 144d008f20bSPavan Nikhilesh } 145d008f20bSPavan Nikhilesh 14617b22d0bSPavan Nikhilesh static inline int 14717b22d0bSPavan Nikhilesh perf_event_timer_producer_burst(void *arg) 14817b22d0bSPavan Nikhilesh { 14917b22d0bSPavan Nikhilesh int i; 15017b22d0bSPavan Nikhilesh struct prod_data *p = arg; 15117b22d0bSPavan Nikhilesh struct test_perf *t = p->t; 15217b22d0bSPavan Nikhilesh struct evt_options *opt = t->opt; 15317b22d0bSPavan Nikhilesh uint32_t flow_counter = 0; 15417b22d0bSPavan Nikhilesh uint64_t count = 0; 15517b22d0bSPavan Nikhilesh uint64_t arm_latency = 0; 15617b22d0bSPavan Nikhilesh const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs; 15717b22d0bSPavan Nikhilesh const uint32_t nb_flows = t->nb_flows; 15817b22d0bSPavan Nikhilesh const uint64_t nb_timers = opt->nb_timers; 15917b22d0bSPavan Nikhilesh struct rte_mempool *pool = t->pool; 16017b22d0bSPavan Nikhilesh struct perf_elt *m[BURST_SIZE + 1] = {NULL}; 16117b22d0bSPavan Nikhilesh struct rte_event_timer_adapter **adptr = t->timer_adptr; 16252553263SPavan Nikhilesh struct rte_event_timer tim; 16317b22d0bSPavan Nikhilesh uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec; 16417b22d0bSPavan Nikhilesh 16552553263SPavan Nikhilesh memset(&tim, 0, sizeof(struct rte_event_timer)); 166*626b12a8SPavan Nikhilesh timeout_ticks = 167*626b12a8SPavan Nikhilesh opt->optm_timer_tick_nsec 168*626b12a8SPavan Nikhilesh ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) / 169*626b12a8SPavan Nikhilesh opt->optm_timer_tick_nsec) 170*626b12a8SPavan Nikhilesh : timeout_ticks; 17117b22d0bSPavan Nikhilesh timeout_ticks += timeout_ticks ? 0 : 1; 17252553263SPavan Nikhilesh tim.ev.event_type = RTE_EVENT_TYPE_TIMER; 17352553263SPavan Nikhilesh tim.ev.op = RTE_EVENT_OP_NEW; 17452553263SPavan Nikhilesh tim.ev.sched_type = t->opt->sched_type_list[0]; 17552553263SPavan Nikhilesh tim.ev.queue_id = p->queue_id; 17652553263SPavan Nikhilesh tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 17752553263SPavan Nikhilesh tim.state = RTE_EVENT_TIMER_NOT_ARMED; 17852553263SPavan Nikhilesh tim.timeout_ticks = timeout_ticks; 17917b22d0bSPavan Nikhilesh 18017b22d0bSPavan Nikhilesh if (opt->verbose_level > 1) 18117b22d0bSPavan Nikhilesh printf("%s(): lcore %d\n", __func__, rte_lcore_id()); 18217b22d0bSPavan Nikhilesh 18317b22d0bSPavan Nikhilesh while (count < nb_timers && t->done == false) { 18417b22d0bSPavan Nikhilesh if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0) 18517b22d0bSPavan Nikhilesh continue; 18617b22d0bSPavan Nikhilesh for (i = 0; i < BURST_SIZE; i++) { 18717b22d0bSPavan Nikhilesh rte_prefetch0(m[i + 1]); 18817b22d0bSPavan Nikhilesh m[i]->tim = tim; 18917b22d0bSPavan Nikhilesh m[i]->tim.ev.flow_id = flow_counter++ % nb_flows; 19017b22d0bSPavan Nikhilesh m[i]->tim.ev.event_ptr = m[i]; 19117b22d0bSPavan Nikhilesh m[i]->timestamp = rte_get_timer_cycles(); 19217b22d0bSPavan Nikhilesh } 19317b22d0bSPavan Nikhilesh rte_event_timer_arm_tmo_tick_burst( 19417b22d0bSPavan Nikhilesh adptr[flow_counter % nb_timer_adptrs], 19517b22d0bSPavan Nikhilesh (struct rte_event_timer **)m, 19617b22d0bSPavan Nikhilesh tim.timeout_ticks, 19717b22d0bSPavan Nikhilesh BURST_SIZE); 19817b22d0bSPavan Nikhilesh arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp; 19917b22d0bSPavan Nikhilesh count += BURST_SIZE; 20017b22d0bSPavan Nikhilesh } 20117b22d0bSPavan Nikhilesh fflush(stdout); 20217b22d0bSPavan Nikhilesh rte_delay_ms(1000); 20317b22d0bSPavan Nikhilesh printf("%s(): lcore %d Average event timer arm latency = %.3f us\n", 20493b7794bSPavan Nikhilesh __func__, rte_lcore_id(), 20593b7794bSPavan Nikhilesh count ? (float)(arm_latency / count) / 20693b7794bSPavan Nikhilesh (rte_get_timer_hz() / 1000000) : 0); 20717b22d0bSPavan Nikhilesh return 0; 20817b22d0bSPavan Nikhilesh } 20917b22d0bSPavan Nikhilesh 21059f697e3SPavan Nikhilesh static int 21159f697e3SPavan Nikhilesh perf_producer_wrapper(void *arg) 21259f697e3SPavan Nikhilesh { 21359f697e3SPavan Nikhilesh struct prod_data *p = arg; 21459f697e3SPavan Nikhilesh struct test_perf *t = p->t; 21559f697e3SPavan Nikhilesh /* Launch the producer function only in case of synthetic producer. */ 21659f697e3SPavan Nikhilesh if (t->opt->prod_type == EVT_PROD_TYPE_SYNT) 21759f697e3SPavan Nikhilesh return perf_producer(arg); 21817b22d0bSPavan Nikhilesh else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR && 21917b22d0bSPavan Nikhilesh !t->opt->timdev_use_burst) 220d008f20bSPavan Nikhilesh return perf_event_timer_producer(arg); 22117b22d0bSPavan Nikhilesh else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR && 22217b22d0bSPavan Nikhilesh t->opt->timdev_use_burst) 22317b22d0bSPavan Nikhilesh return perf_event_timer_producer_burst(arg); 22459f697e3SPavan Nikhilesh return 0; 22559f697e3SPavan Nikhilesh } 22659f697e3SPavan Nikhilesh 2279d3aeb18SJerin Jacob static inline uint64_t 2289d3aeb18SJerin Jacob processed_pkts(struct test_perf *t) 2299d3aeb18SJerin Jacob { 2309d3aeb18SJerin Jacob uint8_t i; 2319d3aeb18SJerin Jacob uint64_t total = 0; 2329d3aeb18SJerin Jacob 2339d3aeb18SJerin Jacob for (i = 0; i < t->nb_workers; i++) 2349d3aeb18SJerin Jacob total += t->worker[i].processed_pkts; 2359d3aeb18SJerin Jacob 2369d3aeb18SJerin Jacob return total; 2379d3aeb18SJerin Jacob } 2389d3aeb18SJerin Jacob 2399d3aeb18SJerin Jacob static inline uint64_t 2409d3aeb18SJerin Jacob total_latency(struct test_perf *t) 2419d3aeb18SJerin Jacob { 2429d3aeb18SJerin Jacob uint8_t i; 2439d3aeb18SJerin Jacob uint64_t total = 0; 2449d3aeb18SJerin Jacob 2459d3aeb18SJerin Jacob for (i = 0; i < t->nb_workers; i++) 2469d3aeb18SJerin Jacob total += t->worker[i].latency; 2479d3aeb18SJerin Jacob 2489d3aeb18SJerin Jacob return total; 2499d3aeb18SJerin Jacob } 2509d3aeb18SJerin Jacob 2519d3aeb18SJerin Jacob 2529d3aeb18SJerin Jacob int 2539d3aeb18SJerin Jacob perf_launch_lcores(struct evt_test *test, struct evt_options *opt, 2549d3aeb18SJerin Jacob int (*worker)(void *)) 2559d3aeb18SJerin Jacob { 2569d3aeb18SJerin Jacob int ret, lcore_id; 2579d3aeb18SJerin Jacob struct test_perf *t = evt_test_priv(test); 2589d3aeb18SJerin Jacob 2599d3aeb18SJerin Jacob int port_idx = 0; 2609d3aeb18SJerin Jacob /* launch workers */ 261cb056611SStephen Hemminger RTE_LCORE_FOREACH_WORKER(lcore_id) { 2629d3aeb18SJerin Jacob if (!(opt->wlcores[lcore_id])) 2639d3aeb18SJerin Jacob continue; 2649d3aeb18SJerin Jacob 2659d3aeb18SJerin Jacob ret = rte_eal_remote_launch(worker, 2669d3aeb18SJerin Jacob &t->worker[port_idx], lcore_id); 2679d3aeb18SJerin Jacob if (ret) { 2689d3aeb18SJerin Jacob evt_err("failed to launch worker %d", lcore_id); 2699d3aeb18SJerin Jacob return ret; 2709d3aeb18SJerin Jacob } 2719d3aeb18SJerin Jacob port_idx++; 2729d3aeb18SJerin Jacob } 2739d3aeb18SJerin Jacob 2749d3aeb18SJerin Jacob /* launch producers */ 275cb056611SStephen Hemminger RTE_LCORE_FOREACH_WORKER(lcore_id) { 2769d3aeb18SJerin Jacob if (!(opt->plcores[lcore_id])) 2779d3aeb18SJerin Jacob continue; 2789d3aeb18SJerin Jacob 27959f697e3SPavan Nikhilesh ret = rte_eal_remote_launch(perf_producer_wrapper, 28059f697e3SPavan Nikhilesh &t->prod[port_idx], lcore_id); 2819d3aeb18SJerin Jacob if (ret) { 2829d3aeb18SJerin Jacob evt_err("failed to launch perf_producer %d", lcore_id); 2839d3aeb18SJerin Jacob return ret; 2849d3aeb18SJerin Jacob } 2859d3aeb18SJerin Jacob port_idx++; 2869d3aeb18SJerin Jacob } 2879d3aeb18SJerin Jacob 288d008f20bSPavan Nikhilesh const uint64_t total_pkts = t->outstand_pkts; 2899d3aeb18SJerin Jacob 2909d3aeb18SJerin Jacob uint64_t dead_lock_cycles = rte_get_timer_cycles(); 2919d3aeb18SJerin Jacob int64_t dead_lock_remaining = total_pkts; 2929d3aeb18SJerin Jacob const uint64_t dead_lock_sample = rte_get_timer_hz() * 5; 2939d3aeb18SJerin Jacob 2949d3aeb18SJerin Jacob uint64_t perf_cycles = rte_get_timer_cycles(); 2959d3aeb18SJerin Jacob int64_t perf_remaining = total_pkts; 2969d3aeb18SJerin Jacob const uint64_t perf_sample = rte_get_timer_hz(); 2979d3aeb18SJerin Jacob 2989d3aeb18SJerin Jacob static float total_mpps; 2999d3aeb18SJerin Jacob static uint64_t samples; 3009d3aeb18SJerin Jacob 3019d3aeb18SJerin Jacob const uint64_t freq_mhz = rte_get_timer_hz() / 1000000; 3029d3aeb18SJerin Jacob int64_t remaining = t->outstand_pkts - processed_pkts(t); 3039d3aeb18SJerin Jacob 3049d3aeb18SJerin Jacob while (t->done == false) { 3059d3aeb18SJerin Jacob const uint64_t new_cycles = rte_get_timer_cycles(); 3069d3aeb18SJerin Jacob 3079d3aeb18SJerin Jacob if ((new_cycles - perf_cycles) > perf_sample) { 3089d3aeb18SJerin Jacob const uint64_t latency = total_latency(t); 3099d3aeb18SJerin Jacob const uint64_t pkts = processed_pkts(t); 3109d3aeb18SJerin Jacob 3119d3aeb18SJerin Jacob remaining = t->outstand_pkts - pkts; 3129d3aeb18SJerin Jacob float mpps = (float)(perf_remaining-remaining)/1000000; 3139d3aeb18SJerin Jacob 3149d3aeb18SJerin Jacob perf_remaining = remaining; 3159d3aeb18SJerin Jacob perf_cycles = new_cycles; 3169d3aeb18SJerin Jacob total_mpps += mpps; 3179d3aeb18SJerin Jacob ++samples; 31804716352SJerin Jacob if (opt->fwd_latency && pkts > 0) { 3199d3aeb18SJerin Jacob printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM, 3209d3aeb18SJerin Jacob mpps, total_mpps/samples, 3219d3aeb18SJerin Jacob (float)(latency/pkts)/freq_mhz); 3229d3aeb18SJerin Jacob } else { 3239d3aeb18SJerin Jacob printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM, 3249d3aeb18SJerin Jacob mpps, total_mpps/samples); 3259d3aeb18SJerin Jacob } 3269d3aeb18SJerin Jacob fflush(stdout); 3279d3aeb18SJerin Jacob 3289d3aeb18SJerin Jacob if (remaining <= 0) { 3299d3aeb18SJerin Jacob t->result = EVT_TEST_SUCCESS; 330d008f20bSPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_SYNT || 331d008f20bSPavan Nikhilesh opt->prod_type == 332d008f20bSPavan Nikhilesh EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { 33359f697e3SPavan Nikhilesh t->done = true; 3349d3aeb18SJerin Jacob break; 3359d3aeb18SJerin Jacob } 3369d3aeb18SJerin Jacob } 33759f697e3SPavan Nikhilesh } 3389d3aeb18SJerin Jacob 33959f697e3SPavan Nikhilesh if (new_cycles - dead_lock_cycles > dead_lock_sample && 34047303784SErik Gabriel Carrillo (opt->prod_type == EVT_PROD_TYPE_SYNT || 34147303784SErik Gabriel Carrillo opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)) { 3429d3aeb18SJerin Jacob remaining = t->outstand_pkts - processed_pkts(t); 3439d3aeb18SJerin Jacob if (dead_lock_remaining == remaining) { 3449d3aeb18SJerin Jacob rte_event_dev_dump(opt->dev_id, stdout); 3459d3aeb18SJerin Jacob evt_err("No schedules for seconds, deadlock"); 3469d3aeb18SJerin Jacob t->done = true; 3479d3aeb18SJerin Jacob break; 3489d3aeb18SJerin Jacob } 3499d3aeb18SJerin Jacob dead_lock_remaining = remaining; 3509d3aeb18SJerin Jacob dead_lock_cycles = new_cycles; 3519d3aeb18SJerin Jacob } 3529d3aeb18SJerin Jacob } 3539d3aeb18SJerin Jacob printf("\n"); 3549d3aeb18SJerin Jacob return 0; 3559d3aeb18SJerin Jacob } 3569d3aeb18SJerin Jacob 3573617aae5SPavan Nikhilesh static int 3583617aae5SPavan Nikhilesh perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, 3593617aae5SPavan Nikhilesh struct rte_event_port_conf prod_conf) 3603617aae5SPavan Nikhilesh { 3613617aae5SPavan Nikhilesh int ret = 0; 3623617aae5SPavan Nikhilesh uint16_t prod; 3633617aae5SPavan Nikhilesh struct rte_event_eth_rx_adapter_queue_conf queue_conf; 3643617aae5SPavan Nikhilesh 3653617aae5SPavan Nikhilesh memset(&queue_conf, 0, 3663617aae5SPavan Nikhilesh sizeof(struct rte_event_eth_rx_adapter_queue_conf)); 3673617aae5SPavan Nikhilesh queue_conf.ev.sched_type = opt->sched_type_list[0]; 3688728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(prod) { 3693617aae5SPavan Nikhilesh uint32_t cap; 3703617aae5SPavan Nikhilesh 3713617aae5SPavan Nikhilesh ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, 3723617aae5SPavan Nikhilesh prod, &cap); 3733617aae5SPavan Nikhilesh if (ret) { 3743617aae5SPavan Nikhilesh evt_err("failed to get event rx adapter[%d]" 3753617aae5SPavan Nikhilesh " capabilities", 3763617aae5SPavan Nikhilesh opt->dev_id); 3773617aae5SPavan Nikhilesh return ret; 3783617aae5SPavan Nikhilesh } 3793617aae5SPavan Nikhilesh queue_conf.ev.queue_id = prod * stride; 3803617aae5SPavan Nikhilesh ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id, 3813617aae5SPavan Nikhilesh &prod_conf); 3823617aae5SPavan Nikhilesh if (ret) { 3833617aae5SPavan Nikhilesh evt_err("failed to create rx adapter[%d]", prod); 3843617aae5SPavan Nikhilesh return ret; 3853617aae5SPavan Nikhilesh } 3863617aae5SPavan Nikhilesh ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1, 3873617aae5SPavan Nikhilesh &queue_conf); 3883617aae5SPavan Nikhilesh if (ret) { 3893617aae5SPavan Nikhilesh evt_err("failed to add rx queues to adapter[%d]", prod); 3903617aae5SPavan Nikhilesh return ret; 3913617aae5SPavan Nikhilesh } 3923617aae5SPavan Nikhilesh 393b0333c55SPavan Nikhilesh if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) { 394b0333c55SPavan Nikhilesh uint32_t service_id; 395b0333c55SPavan Nikhilesh 396b0333c55SPavan Nikhilesh rte_event_eth_rx_adapter_service_id_get(prod, 397b0333c55SPavan Nikhilesh &service_id); 398b0333c55SPavan Nikhilesh ret = evt_service_setup(service_id); 399b0333c55SPavan Nikhilesh if (ret) { 400b0333c55SPavan Nikhilesh evt_err("Failed to setup service core" 401b0333c55SPavan Nikhilesh " for Rx adapter\n"); 402b0333c55SPavan Nikhilesh return ret; 403b0333c55SPavan Nikhilesh } 404b0333c55SPavan Nikhilesh } 4053617aae5SPavan Nikhilesh } 4063617aae5SPavan Nikhilesh 4073617aae5SPavan Nikhilesh return ret; 4083617aae5SPavan Nikhilesh } 4093617aae5SPavan Nikhilesh 410d008f20bSPavan Nikhilesh static int 411d008f20bSPavan Nikhilesh perf_event_timer_adapter_setup(struct test_perf *t) 412d008f20bSPavan Nikhilesh { 413d008f20bSPavan Nikhilesh int i; 414d008f20bSPavan Nikhilesh int ret; 415d008f20bSPavan Nikhilesh struct rte_event_timer_adapter_info adapter_info; 416d008f20bSPavan Nikhilesh struct rte_event_timer_adapter *wl; 417d008f20bSPavan Nikhilesh uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores); 418d008f20bSPavan Nikhilesh uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 419d008f20bSPavan Nikhilesh 420d008f20bSPavan Nikhilesh if (nb_producers == 1) 421d008f20bSPavan Nikhilesh flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT; 422d008f20bSPavan Nikhilesh 423d008f20bSPavan Nikhilesh for (i = 0; i < t->opt->nb_timer_adptrs; i++) { 424d008f20bSPavan Nikhilesh struct rte_event_timer_adapter_conf config = { 425d008f20bSPavan Nikhilesh .event_dev_id = t->opt->dev_id, 426d008f20bSPavan Nikhilesh .timer_adapter_id = i, 427d008f20bSPavan Nikhilesh .timer_tick_ns = t->opt->timer_tick_nsec, 428d008f20bSPavan Nikhilesh .max_tmo_ns = t->opt->max_tmo_nsec, 429c13b1ad7SPavan Nikhilesh .nb_timers = t->opt->pool_sz, 430d008f20bSPavan Nikhilesh .flags = flags, 431d008f20bSPavan Nikhilesh }; 432d008f20bSPavan Nikhilesh 433d008f20bSPavan Nikhilesh wl = rte_event_timer_adapter_create(&config); 434d008f20bSPavan Nikhilesh if (wl == NULL) { 435d008f20bSPavan Nikhilesh evt_err("failed to create event timer ring %d", i); 436d008f20bSPavan Nikhilesh return rte_errno; 437d008f20bSPavan Nikhilesh } 438d008f20bSPavan Nikhilesh 439d008f20bSPavan Nikhilesh memset(&adapter_info, 0, 440d008f20bSPavan Nikhilesh sizeof(struct rte_event_timer_adapter_info)); 441d008f20bSPavan Nikhilesh rte_event_timer_adapter_get_info(wl, &adapter_info); 442d008f20bSPavan Nikhilesh t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns; 443d008f20bSPavan Nikhilesh 444d008f20bSPavan Nikhilesh if (!(adapter_info.caps & 445d008f20bSPavan Nikhilesh RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) { 44699c25664SAndrzej Ostruszka uint32_t service_id = -1U; 447d008f20bSPavan Nikhilesh 448d008f20bSPavan Nikhilesh rte_event_timer_adapter_service_id_get(wl, 449d008f20bSPavan Nikhilesh &service_id); 450d008f20bSPavan Nikhilesh ret = evt_service_setup(service_id); 451d008f20bSPavan Nikhilesh if (ret) { 452d008f20bSPavan Nikhilesh evt_err("Failed to setup service core" 453d008f20bSPavan Nikhilesh " for timer adapter\n"); 454d008f20bSPavan Nikhilesh return ret; 455d008f20bSPavan Nikhilesh } 456d008f20bSPavan Nikhilesh rte_service_runstate_set(service_id, 1); 457d008f20bSPavan Nikhilesh } 458d008f20bSPavan Nikhilesh t->timer_adptr[i] = wl; 459d008f20bSPavan Nikhilesh } 460d008f20bSPavan Nikhilesh return 0; 461d008f20bSPavan Nikhilesh } 462d008f20bSPavan Nikhilesh 463272de067SJerin Jacob int 46484a7513dSJerin Jacob perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, 465535c630cSPavan Nikhilesh uint8_t stride, uint8_t nb_queues, 466535c630cSPavan Nikhilesh const struct rte_event_port_conf *port_conf) 46784a7513dSJerin Jacob { 46884a7513dSJerin Jacob struct test_perf *t = evt_test_priv(test); 4693617aae5SPavan Nikhilesh uint16_t port, prod; 47084a7513dSJerin Jacob int ret = -1; 47184a7513dSJerin Jacob 47284a7513dSJerin Jacob /* setup one port per worker, linking to all queues */ 47384a7513dSJerin Jacob for (port = 0; port < evt_nr_active_lcores(opt->wlcores); 47484a7513dSJerin Jacob port++) { 47584a7513dSJerin Jacob struct worker_data *w = &t->worker[port]; 47684a7513dSJerin Jacob 47784a7513dSJerin Jacob w->dev_id = opt->dev_id; 47884a7513dSJerin Jacob w->port_id = port; 47984a7513dSJerin Jacob w->t = t; 48084a7513dSJerin Jacob w->processed_pkts = 0; 48184a7513dSJerin Jacob w->latency = 0; 48284a7513dSJerin Jacob 483535c630cSPavan Nikhilesh ret = rte_event_port_setup(opt->dev_id, port, port_conf); 48484a7513dSJerin Jacob if (ret) { 48584a7513dSJerin Jacob evt_err("failed to setup port %d", port); 48684a7513dSJerin Jacob return ret; 48784a7513dSJerin Jacob } 48884a7513dSJerin Jacob 48984a7513dSJerin Jacob ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0); 49084a7513dSJerin Jacob if (ret != nb_queues) { 49184a7513dSJerin Jacob evt_err("failed to link all queues to port %d", port); 49284a7513dSJerin Jacob return -EINVAL; 49384a7513dSJerin Jacob } 49484a7513dSJerin Jacob } 49584a7513dSJerin Jacob 49684a7513dSJerin Jacob /* port for producers, no links */ 4973617aae5SPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { 4983617aae5SPavan Nikhilesh for ( ; port < perf_nb_event_ports(opt); port++) { 4993617aae5SPavan Nikhilesh struct prod_data *p = &t->prod[port]; 5003617aae5SPavan Nikhilesh p->t = t; 5013617aae5SPavan Nikhilesh } 5023617aae5SPavan Nikhilesh 503535c630cSPavan Nikhilesh ret = perf_event_rx_adapter_setup(opt, stride, *port_conf); 5043617aae5SPavan Nikhilesh if (ret) 5053617aae5SPavan Nikhilesh return ret; 506d008f20bSPavan Nikhilesh } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { 507d008f20bSPavan Nikhilesh prod = 0; 508d008f20bSPavan Nikhilesh for ( ; port < perf_nb_event_ports(opt); port++) { 509d008f20bSPavan Nikhilesh struct prod_data *p = &t->prod[port]; 510d008f20bSPavan Nikhilesh p->queue_id = prod * stride; 511d008f20bSPavan Nikhilesh p->t = t; 512d008f20bSPavan Nikhilesh prod++; 513d008f20bSPavan Nikhilesh } 514d008f20bSPavan Nikhilesh 515d008f20bSPavan Nikhilesh ret = perf_event_timer_adapter_setup(t); 516d008f20bSPavan Nikhilesh if (ret) 517d008f20bSPavan Nikhilesh return ret; 5183617aae5SPavan Nikhilesh } else { 51984a7513dSJerin Jacob prod = 0; 52084a7513dSJerin Jacob for ( ; port < perf_nb_event_ports(opt); port++) { 52184a7513dSJerin Jacob struct prod_data *p = &t->prod[port]; 52284a7513dSJerin Jacob 52384a7513dSJerin Jacob p->dev_id = opt->dev_id; 52484a7513dSJerin Jacob p->port_id = port; 52584a7513dSJerin Jacob p->queue_id = prod * stride; 52684a7513dSJerin Jacob p->t = t; 52784a7513dSJerin Jacob 5283617aae5SPavan Nikhilesh ret = rte_event_port_setup(opt->dev_id, port, 529535c630cSPavan Nikhilesh port_conf); 53084a7513dSJerin Jacob if (ret) { 53184a7513dSJerin Jacob evt_err("failed to setup port %d", port); 53284a7513dSJerin Jacob return ret; 53384a7513dSJerin Jacob } 53484a7513dSJerin Jacob prod++; 53584a7513dSJerin Jacob } 5363617aae5SPavan Nikhilesh } 53784a7513dSJerin Jacob 53884a7513dSJerin Jacob return ret; 53984a7513dSJerin Jacob } 54084a7513dSJerin Jacob 54184a7513dSJerin Jacob int 542272de067SJerin Jacob perf_opt_check(struct evt_options *opt, uint64_t nb_queues) 543272de067SJerin Jacob { 544272de067SJerin Jacob unsigned int lcores; 545272de067SJerin Jacob 546cb056611SStephen Hemminger /* N producer + N worker + main when producer cores are used 547cb056611SStephen Hemminger * Else N worker + main when Rx adapter is used 548b01974daSPavan Nikhilesh */ 549b01974daSPavan Nikhilesh lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2; 550272de067SJerin Jacob 551272de067SJerin Jacob if (rte_lcore_count() < lcores) { 552272de067SJerin Jacob evt_err("test need minimum %d lcores", lcores); 553272de067SJerin Jacob return -1; 554272de067SJerin Jacob } 555272de067SJerin Jacob 556272de067SJerin Jacob /* Validate worker lcores */ 557cb056611SStephen Hemminger if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) { 558cb056611SStephen Hemminger evt_err("worker lcores overlaps with main lcore"); 559272de067SJerin Jacob return -1; 560272de067SJerin Jacob } 561272de067SJerin Jacob if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) { 562272de067SJerin Jacob evt_err("worker lcores overlaps producer lcores"); 563272de067SJerin Jacob return -1; 564272de067SJerin Jacob } 565272de067SJerin Jacob if (evt_has_disabled_lcore(opt->wlcores)) { 566272de067SJerin Jacob evt_err("one or more workers lcores are not enabled"); 567272de067SJerin Jacob return -1; 568272de067SJerin Jacob } 569272de067SJerin Jacob if (!evt_has_active_lcore(opt->wlcores)) { 570272de067SJerin Jacob evt_err("minimum one worker is required"); 571272de067SJerin Jacob return -1; 572272de067SJerin Jacob } 573272de067SJerin Jacob 574902387eaSPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_SYNT || 575902387eaSPavan Nikhilesh opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { 576272de067SJerin Jacob /* Validate producer lcores */ 577b01974daSPavan Nikhilesh if (evt_lcores_has_overlap(opt->plcores, 578cb056611SStephen Hemminger rte_get_main_lcore())) { 579cb056611SStephen Hemminger evt_err("producer lcores overlaps with main lcore"); 580272de067SJerin Jacob return -1; 581272de067SJerin Jacob } 582272de067SJerin Jacob if (evt_has_disabled_lcore(opt->plcores)) { 583272de067SJerin Jacob evt_err("one or more producer lcores are not enabled"); 584272de067SJerin Jacob return -1; 585272de067SJerin Jacob } 586272de067SJerin Jacob if (!evt_has_active_lcore(opt->plcores)) { 587272de067SJerin Jacob evt_err("minimum one producer is required"); 588272de067SJerin Jacob return -1; 589272de067SJerin Jacob } 590b01974daSPavan Nikhilesh } 591272de067SJerin Jacob 592272de067SJerin Jacob if (evt_has_invalid_stage(opt)) 593272de067SJerin Jacob return -1; 594272de067SJerin Jacob 595272de067SJerin Jacob if (evt_has_invalid_sched_type(opt)) 596272de067SJerin Jacob return -1; 597272de067SJerin Jacob 598272de067SJerin Jacob if (nb_queues > EVT_MAX_QUEUES) { 599272de067SJerin Jacob evt_err("number of queues exceeds %d", EVT_MAX_QUEUES); 600272de067SJerin Jacob return -1; 601272de067SJerin Jacob } 602272de067SJerin Jacob if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) { 603272de067SJerin Jacob evt_err("number of ports exceeds %d", EVT_MAX_PORTS); 604272de067SJerin Jacob return -1; 605272de067SJerin Jacob } 606272de067SJerin Jacob 607272de067SJerin Jacob /* Fixups */ 608d008f20bSPavan Nikhilesh if ((opt->nb_stages == 1 && 609d008f20bSPavan Nikhilesh opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) && 610d008f20bSPavan Nikhilesh opt->fwd_latency) { 611272de067SJerin Jacob evt_info("fwd_latency is valid when nb_stages > 1, disabling"); 612272de067SJerin Jacob opt->fwd_latency = 0; 613272de067SJerin Jacob } 614d008f20bSPavan Nikhilesh 615272de067SJerin Jacob if (opt->fwd_latency && !opt->q_priority) { 616272de067SJerin Jacob evt_info("enabled queue priority for latency measurement"); 617272de067SJerin Jacob opt->q_priority = 1; 618272de067SJerin Jacob } 6199d3aeb18SJerin Jacob if (opt->nb_pkts == 0) 6209d3aeb18SJerin Jacob opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores); 621272de067SJerin Jacob 622272de067SJerin Jacob return 0; 623272de067SJerin Jacob } 624272de067SJerin Jacob 625272de067SJerin Jacob void 626272de067SJerin Jacob perf_opt_dump(struct evt_options *opt, uint8_t nb_queues) 627272de067SJerin Jacob { 628272de067SJerin Jacob evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores)); 629272de067SJerin Jacob evt_dump_producer_lcores(opt); 630272de067SJerin Jacob evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores)); 631272de067SJerin Jacob evt_dump_worker_lcores(opt); 632272de067SJerin Jacob evt_dump_nb_stages(opt); 633272de067SJerin Jacob evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt)); 634272de067SJerin Jacob evt_dump("nb_evdev_queues", "%d", nb_queues); 635272de067SJerin Jacob evt_dump_queue_priority(opt); 636272de067SJerin Jacob evt_dump_sched_type_list(opt); 637b01974daSPavan Nikhilesh evt_dump_producer_type(opt); 638272de067SJerin Jacob } 639272de067SJerin Jacob 64041c219e6SJerin Jacob void 64141c219e6SJerin Jacob perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt) 64241c219e6SJerin Jacob { 643d008f20bSPavan Nikhilesh int i; 644d008f20bSPavan Nikhilesh struct test_perf *t = evt_test_priv(test); 64541c219e6SJerin Jacob 646d008f20bSPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { 647d008f20bSPavan Nikhilesh for (i = 0; i < opt->nb_timer_adptrs; i++) 648d008f20bSPavan Nikhilesh rte_event_timer_adapter_stop(t->timer_adptr[i]); 649d008f20bSPavan Nikhilesh } 65041c219e6SJerin Jacob rte_event_dev_stop(opt->dev_id); 65141c219e6SJerin Jacob rte_event_dev_close(opt->dev_id); 65241c219e6SJerin Jacob } 65341c219e6SJerin Jacob 65441c219e6SJerin Jacob static inline void 65541c219e6SJerin Jacob perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused, 65641c219e6SJerin Jacob void *obj, unsigned i __rte_unused) 65741c219e6SJerin Jacob { 65841c219e6SJerin Jacob memset(obj, 0, mp->elt_size); 65941c219e6SJerin Jacob } 66041c219e6SJerin Jacob 6613fc8de4fSPavan Nikhilesh #define NB_RX_DESC 128 6623fc8de4fSPavan Nikhilesh #define NB_TX_DESC 512 6633fc8de4fSPavan Nikhilesh int 6643fc8de4fSPavan Nikhilesh perf_ethdev_setup(struct evt_test *test, struct evt_options *opt) 6653fc8de4fSPavan Nikhilesh { 6668728ccf3SThomas Monjalon uint16_t i; 66777339255SIvan Ilchenko int ret; 6683fc8de4fSPavan Nikhilesh struct test_perf *t = evt_test_priv(test); 6693fc8de4fSPavan Nikhilesh struct rte_eth_conf port_conf = { 6703fc8de4fSPavan Nikhilesh .rxmode = { 6713fc8de4fSPavan Nikhilesh .mq_mode = ETH_MQ_RX_RSS, 67235b2d13fSOlivier Matz .max_rx_pkt_len = RTE_ETHER_MAX_LEN, 6733fc8de4fSPavan Nikhilesh .split_hdr_size = 0, 6743fc8de4fSPavan Nikhilesh }, 6753fc8de4fSPavan Nikhilesh .rx_adv_conf = { 6763fc8de4fSPavan Nikhilesh .rss_conf = { 6773fc8de4fSPavan Nikhilesh .rss_key = NULL, 6783fc8de4fSPavan Nikhilesh .rss_hf = ETH_RSS_IP, 6793fc8de4fSPavan Nikhilesh }, 6803fc8de4fSPavan Nikhilesh }, 6813fc8de4fSPavan Nikhilesh }; 6823fc8de4fSPavan Nikhilesh 683d008f20bSPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_SYNT || 684d008f20bSPavan Nikhilesh opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) 6853fc8de4fSPavan Nikhilesh return 0; 6863fc8de4fSPavan Nikhilesh 687d9a42a69SThomas Monjalon if (!rte_eth_dev_count_avail()) { 6883fc8de4fSPavan Nikhilesh evt_err("No ethernet ports found."); 6893fc8de4fSPavan Nikhilesh return -ENODEV; 6903fc8de4fSPavan Nikhilesh } 6913fc8de4fSPavan Nikhilesh 6928728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(i) { 6934f5701f2SFerruh Yigit struct rte_eth_dev_info dev_info; 6944f5701f2SFerruh Yigit struct rte_eth_conf local_port_conf = port_conf; 6953fc8de4fSPavan Nikhilesh 69677339255SIvan Ilchenko ret = rte_eth_dev_info_get(i, &dev_info); 69777339255SIvan Ilchenko if (ret != 0) { 69877339255SIvan Ilchenko evt_err("Error during getting device (port %u) info: %s\n", 69977339255SIvan Ilchenko i, strerror(-ret)); 70077339255SIvan Ilchenko return ret; 70177339255SIvan Ilchenko } 7024f5701f2SFerruh Yigit 7034f5701f2SFerruh Yigit local_port_conf.rx_adv_conf.rss_conf.rss_hf &= 7044f5701f2SFerruh Yigit dev_info.flow_type_rss_offloads; 7054f5701f2SFerruh Yigit if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != 7064f5701f2SFerruh Yigit port_conf.rx_adv_conf.rss_conf.rss_hf) { 7074f5701f2SFerruh Yigit evt_info("Port %u modified RSS hash function based on hardware support," 7084f5701f2SFerruh Yigit "requested:%#"PRIx64" configured:%#"PRIx64"\n", 7094f5701f2SFerruh Yigit i, 7104f5701f2SFerruh Yigit port_conf.rx_adv_conf.rss_conf.rss_hf, 7114f5701f2SFerruh Yigit local_port_conf.rx_adv_conf.rss_conf.rss_hf); 7124f5701f2SFerruh Yigit } 7134f5701f2SFerruh Yigit 7144f5701f2SFerruh Yigit if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) { 7153fc8de4fSPavan Nikhilesh evt_err("Failed to configure eth port [%d]", i); 7163fc8de4fSPavan Nikhilesh return -EINVAL; 7173fc8de4fSPavan Nikhilesh } 7183fc8de4fSPavan Nikhilesh 7193fc8de4fSPavan Nikhilesh if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC, 7203fc8de4fSPavan Nikhilesh rte_socket_id(), NULL, t->pool) < 0) { 7213fc8de4fSPavan Nikhilesh evt_err("Failed to setup eth port [%d] rx_queue: %d.", 7223fc8de4fSPavan Nikhilesh i, 0); 7233fc8de4fSPavan Nikhilesh return -EINVAL; 7243fc8de4fSPavan Nikhilesh } 7253fc8de4fSPavan Nikhilesh 7263fc8de4fSPavan Nikhilesh if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC, 7273fc8de4fSPavan Nikhilesh rte_socket_id(), NULL) < 0) { 7283fc8de4fSPavan Nikhilesh evt_err("Failed to setup eth port [%d] tx_queue: %d.", 7293fc8de4fSPavan Nikhilesh i, 0); 7303fc8de4fSPavan Nikhilesh return -EINVAL; 7313fc8de4fSPavan Nikhilesh } 7323fc8de4fSPavan Nikhilesh 73370e51a0eSIvan Ilchenko ret = rte_eth_promiscuous_enable(i); 73470e51a0eSIvan Ilchenko if (ret != 0) { 73570e51a0eSIvan Ilchenko evt_err("Failed to enable promiscuous mode for eth port [%d]: %s", 73670e51a0eSIvan Ilchenko i, rte_strerror(-ret)); 73770e51a0eSIvan Ilchenko return ret; 73870e51a0eSIvan Ilchenko } 7393fc8de4fSPavan Nikhilesh } 7403fc8de4fSPavan Nikhilesh 7413fc8de4fSPavan Nikhilesh return 0; 7423fc8de4fSPavan Nikhilesh } 7433fc8de4fSPavan Nikhilesh 7447f3daf34SPavan Nikhilesh void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt) 7457f3daf34SPavan Nikhilesh { 7468728ccf3SThomas Monjalon uint16_t i; 7477f3daf34SPavan Nikhilesh RTE_SET_USED(test); 7487f3daf34SPavan Nikhilesh 7497f3daf34SPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { 7508728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(i) { 7513617aae5SPavan Nikhilesh rte_event_eth_rx_adapter_stop(i); 7527f3daf34SPavan Nikhilesh rte_eth_dev_stop(i); 7537f3daf34SPavan Nikhilesh } 7547f3daf34SPavan Nikhilesh } 7557f3daf34SPavan Nikhilesh } 7567f3daf34SPavan Nikhilesh 75741c219e6SJerin Jacob int 75841c219e6SJerin Jacob perf_mempool_setup(struct evt_test *test, struct evt_options *opt) 75941c219e6SJerin Jacob { 76041c219e6SJerin Jacob struct test_perf *t = evt_test_priv(test); 76141c219e6SJerin Jacob 762d008f20bSPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_SYNT || 763d008f20bSPavan Nikhilesh opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { 76441c219e6SJerin Jacob t->pool = rte_mempool_create(test->name, /* mempool name */ 76541c219e6SJerin Jacob opt->pool_sz, /* number of elements*/ 76641c219e6SJerin Jacob sizeof(struct perf_elt), /* element size*/ 76741c219e6SJerin Jacob 512, /* cache size*/ 76841c219e6SJerin Jacob 0, NULL, NULL, 76941c219e6SJerin Jacob perf_elt_init, /* obj constructor */ 77041c219e6SJerin Jacob NULL, opt->socket_id, 0); /* flags */ 7718577cc1aSPavan Nikhilesh } else { 7728577cc1aSPavan Nikhilesh t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */ 7738577cc1aSPavan Nikhilesh opt->pool_sz, /* number of elements*/ 7748577cc1aSPavan Nikhilesh 512, /* cache size*/ 7758577cc1aSPavan Nikhilesh 0, 7768577cc1aSPavan Nikhilesh RTE_MBUF_DEFAULT_BUF_SIZE, 7778577cc1aSPavan Nikhilesh opt->socket_id); /* flags */ 7788577cc1aSPavan Nikhilesh 7798577cc1aSPavan Nikhilesh } 7808577cc1aSPavan Nikhilesh 78141c219e6SJerin Jacob if (t->pool == NULL) { 78241c219e6SJerin Jacob evt_err("failed to create mempool"); 78341c219e6SJerin Jacob return -ENOMEM; 78441c219e6SJerin Jacob } 78541c219e6SJerin Jacob 78641c219e6SJerin Jacob return 0; 78741c219e6SJerin Jacob } 78841c219e6SJerin Jacob 78941c219e6SJerin Jacob void 79041c219e6SJerin Jacob perf_mempool_destroy(struct evt_test *test, struct evt_options *opt) 79141c219e6SJerin Jacob { 79241c219e6SJerin Jacob RTE_SET_USED(opt); 79341c219e6SJerin Jacob struct test_perf *t = evt_test_priv(test); 79441c219e6SJerin Jacob 79541c219e6SJerin Jacob rte_mempool_free(t->pool); 79641c219e6SJerin Jacob } 797ffbae86fSJerin Jacob 798ffbae86fSJerin Jacob int 799ffbae86fSJerin Jacob perf_test_setup(struct evt_test *test, struct evt_options *opt) 800ffbae86fSJerin Jacob { 801ffbae86fSJerin Jacob void *test_perf; 802ffbae86fSJerin Jacob 803ffbae86fSJerin Jacob test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf), 804ffbae86fSJerin Jacob RTE_CACHE_LINE_SIZE, opt->socket_id); 805ffbae86fSJerin Jacob if (test_perf == NULL) { 806ffbae86fSJerin Jacob evt_err("failed to allocate test_perf memory"); 807ffbae86fSJerin Jacob goto nomem; 808ffbae86fSJerin Jacob } 809ffbae86fSJerin Jacob test->test_priv = test_perf; 810ffbae86fSJerin Jacob 811ffbae86fSJerin Jacob struct test_perf *t = evt_test_priv(test); 812ffbae86fSJerin Jacob 813d008f20bSPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { 814d008f20bSPavan Nikhilesh t->outstand_pkts = opt->nb_timers * 815d008f20bSPavan Nikhilesh evt_nr_active_lcores(opt->plcores); 816d008f20bSPavan Nikhilesh t->nb_pkts = opt->nb_timers; 817d008f20bSPavan Nikhilesh } else { 818d008f20bSPavan Nikhilesh t->outstand_pkts = opt->nb_pkts * 819d008f20bSPavan Nikhilesh evt_nr_active_lcores(opt->plcores); 820d008f20bSPavan Nikhilesh t->nb_pkts = opt->nb_pkts; 821d008f20bSPavan Nikhilesh } 822d008f20bSPavan Nikhilesh 823ffbae86fSJerin Jacob t->nb_workers = evt_nr_active_lcores(opt->wlcores); 824ffbae86fSJerin Jacob t->done = false; 825ffbae86fSJerin Jacob t->nb_flows = opt->nb_flows; 826ffbae86fSJerin Jacob t->result = EVT_TEST_FAILED; 827ffbae86fSJerin Jacob t->opt = opt; 828ffbae86fSJerin Jacob memcpy(t->sched_type_list, opt->sched_type_list, 829ffbae86fSJerin Jacob sizeof(opt->sched_type_list)); 830ffbae86fSJerin Jacob return 0; 831ffbae86fSJerin Jacob nomem: 832ffbae86fSJerin Jacob return -ENOMEM; 833ffbae86fSJerin Jacob } 834ffbae86fSJerin Jacob 835ffbae86fSJerin Jacob void 836ffbae86fSJerin Jacob perf_test_destroy(struct evt_test *test, struct evt_options *opt) 837ffbae86fSJerin Jacob { 838ffbae86fSJerin Jacob RTE_SET_USED(opt); 839ffbae86fSJerin Jacob 840ffbae86fSJerin Jacob rte_free(test->test_priv); 841ffbae86fSJerin Jacob } 842