153a3b7e8SJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause 253a3b7e8SJerin Jacob * Copyright(c) 2017 Cavium, Inc 3ffbae86fSJerin Jacob */ 4ffbae86fSJerin Jacob 5626b12a8SPavan Nikhilesh #include <math.h> 6626b12a8SPavan Nikhilesh 7ffbae86fSJerin Jacob #include "test_perf_common.h" 8ffbae86fSJerin Jacob 9f3a67078SVolodymyr Fialko #define NB_CRYPTODEV_DESCRIPTORS 1024 108f5b5495SAkhil Goyal #define DATA_SIZE 512 11750ab9d5SAakash Sasidharan #define IV_OFFSET (sizeof(struct rte_crypto_op) + \ 12750ab9d5SAakash Sasidharan sizeof(struct rte_crypto_sym_op) + \ 13750ab9d5SAakash Sasidharan sizeof(union rte_event_crypto_metadata)) 14750ab9d5SAakash Sasidharan 158f5b5495SAkhil Goyal struct modex_test_data { 168f5b5495SAkhil Goyal enum rte_crypto_asym_xform_type xform_type; 178f5b5495SAkhil Goyal struct { 188f5b5495SAkhil Goyal uint8_t data[DATA_SIZE]; 198f5b5495SAkhil Goyal uint16_t len; 208f5b5495SAkhil Goyal } base; 218f5b5495SAkhil Goyal struct { 228f5b5495SAkhil Goyal uint8_t data[DATA_SIZE]; 238f5b5495SAkhil Goyal uint16_t len; 248f5b5495SAkhil Goyal } exponent; 258f5b5495SAkhil Goyal struct { 268f5b5495SAkhil Goyal uint8_t data[DATA_SIZE]; 278f5b5495SAkhil Goyal uint16_t len; 288f5b5495SAkhil Goyal } modulus; 298f5b5495SAkhil Goyal struct { 308f5b5495SAkhil Goyal uint8_t data[DATA_SIZE]; 318f5b5495SAkhil Goyal uint16_t len; 328f5b5495SAkhil Goyal } reminder; 338f5b5495SAkhil Goyal uint16_t result_len; 348f5b5495SAkhil Goyal }; 358f5b5495SAkhil Goyal 368f5b5495SAkhil Goyal static struct 378f5b5495SAkhil Goyal modex_test_data modex_test_case = { 388f5b5495SAkhil Goyal .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX, 398f5b5495SAkhil Goyal .base = { 408f5b5495SAkhil Goyal .data = { 418f5b5495SAkhil Goyal 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85, 428f5b5495SAkhil Goyal 0xAE, 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD, 438f5b5495SAkhil Goyal 0xA8, 0xEB, 0x7E, 0x78, 0xA0, 0x50 448f5b5495SAkhil Goyal }, 458f5b5495SAkhil Goyal .len = 20, 468f5b5495SAkhil Goyal }, 478f5b5495SAkhil Goyal .exponent = { 488f5b5495SAkhil Goyal .data = { 498f5b5495SAkhil Goyal 0x01, 0x00, 0x01 508f5b5495SAkhil Goyal }, 518f5b5495SAkhil Goyal .len = 3, 528f5b5495SAkhil Goyal }, 538f5b5495SAkhil Goyal .reminder = { 548f5b5495SAkhil Goyal .data = { 558f5b5495SAkhil Goyal 0x2C, 0x60, 0x75, 0x45, 0x98, 0x9D, 0xE0, 0x72, 568f5b5495SAkhil Goyal 0xA0, 0x9D, 0x3A, 0x9E, 0x03, 0x38, 0x73, 0x3C, 578f5b5495SAkhil Goyal 0x31, 0x83, 0x04, 0xFE, 0x75, 0x43, 0xE6, 0x17, 588f5b5495SAkhil Goyal 0x5C, 0x01, 0x29, 0x51, 0x69, 0x33, 0x62, 0x2D, 598f5b5495SAkhil Goyal 0x78, 0xBE, 0xAE, 0xC4, 0xBC, 0xDE, 0x7E, 0x2C, 608f5b5495SAkhil Goyal 0x77, 0x84, 0xF2, 0xC5, 0x14, 0xB5, 0x2F, 0xF7, 618f5b5495SAkhil Goyal 0xC5, 0x94, 0xEF, 0x86, 0x75, 0x75, 0xB5, 0x11, 628f5b5495SAkhil Goyal 0xE5, 0x0E, 0x0A, 0x29, 0x76, 0xE2, 0xEA, 0x32, 638f5b5495SAkhil Goyal 0x0E, 0x43, 0x77, 0x7E, 0x2C, 0x27, 0xAC, 0x3B, 648f5b5495SAkhil Goyal 0x86, 0xA5, 0xDB, 0xC9, 0x48, 0x40, 0xE8, 0x99, 658f5b5495SAkhil Goyal 0x9A, 0x0A, 0x3D, 0xD6, 0x74, 0xFA, 0x2E, 0x2E, 668f5b5495SAkhil Goyal 0x5B, 0xAF, 0x8C, 0x99, 0x44, 0x2A, 0x67, 0x38, 678f5b5495SAkhil Goyal 0x27, 0x41, 0x59, 0x9D, 0xB8, 0x51, 0xC9, 0xF7, 688f5b5495SAkhil Goyal 0x43, 0x61, 0x31, 0x6E, 0xF1, 0x25, 0x38, 0x7F, 698f5b5495SAkhil Goyal 0xAE, 0xC6, 0xD0, 0xBB, 0x29, 0x76, 0x3F, 0x46, 708f5b5495SAkhil Goyal 0x2E, 0x1B, 0xE4, 0x67, 0x71, 0xE3, 0x87, 0x5A 718f5b5495SAkhil Goyal }, 728f5b5495SAkhil Goyal .len = 128, 738f5b5495SAkhil Goyal }, 748f5b5495SAkhil Goyal .modulus = { 758f5b5495SAkhil Goyal .data = { 768f5b5495SAkhil Goyal 0xb3, 0xa1, 0xaf, 0xb7, 0x13, 0x08, 0x00, 0x0a, 778f5b5495SAkhil Goyal 0x35, 0xdc, 0x2b, 0x20, 0x8d, 0xa1, 0xb5, 0xce, 788f5b5495SAkhil Goyal 0x47, 0x8a, 0xc3, 0x80, 0xf4, 0x7d, 0x4a, 0xa2, 798f5b5495SAkhil Goyal 0x62, 0xfd, 0x61, 0x7f, 0xb5, 0xa8, 0xde, 0x0a, 808f5b5495SAkhil Goyal 0x17, 0x97, 0xa0, 0xbf, 0xdf, 0x56, 0x5a, 0x3d, 818f5b5495SAkhil Goyal 0x51, 0x56, 0x4f, 0x70, 0x70, 0x3f, 0x63, 0x6a, 828f5b5495SAkhil Goyal 0x44, 0x5b, 0xad, 0x84, 0x0d, 0x3f, 0x27, 0x6e, 838f5b5495SAkhil Goyal 0x3b, 0x34, 0x91, 0x60, 0x14, 0xb9, 0xaa, 0x72, 848f5b5495SAkhil Goyal 0xfd, 0xa3, 0x64, 0xd2, 0x03, 0xa7, 0x53, 0x87, 858f5b5495SAkhil Goyal 0x9e, 0x88, 0x0b, 0xc1, 0x14, 0x93, 0x1a, 0x62, 868f5b5495SAkhil Goyal 0xff, 0xb1, 0x5d, 0x74, 0xcd, 0x59, 0x63, 0x18, 878f5b5495SAkhil Goyal 0x11, 0x3d, 0x4f, 0xba, 0x75, 0xd4, 0x33, 0x4e, 888f5b5495SAkhil Goyal 0x23, 0x6b, 0x7b, 0x57, 0x44, 0xe1, 0xd3, 0x03, 898f5b5495SAkhil Goyal 0x13, 0xa6, 0xf0, 0x8b, 0x60, 0xb0, 0x9e, 0xee, 908f5b5495SAkhil Goyal 0x75, 0x08, 0x9d, 0x71, 0x63, 0x13, 0xcb, 0xa6, 918f5b5495SAkhil Goyal 0x81, 0x92, 0x14, 0x03, 0x22, 0x2d, 0xde, 0x55 928f5b5495SAkhil Goyal }, 938f5b5495SAkhil Goyal .len = 128, 948f5b5495SAkhil Goyal }, 958f5b5495SAkhil Goyal .result_len = 128, 968f5b5495SAkhil Goyal }; 97de2bc16eSShijith Thotton 9841c219e6SJerin Jacob int 9941c219e6SJerin Jacob perf_test_result(struct evt_test *test, struct evt_options *opt) 10041c219e6SJerin Jacob { 10141c219e6SJerin Jacob RTE_SET_USED(opt); 1026b1a14a8SPavan Nikhilesh int i; 1036b1a14a8SPavan Nikhilesh uint64_t total = 0; 10441c219e6SJerin Jacob struct test_perf *t = evt_test_priv(test); 10541c219e6SJerin Jacob 1066b1a14a8SPavan Nikhilesh printf("Packet distribution across worker cores :\n"); 1076b1a14a8SPavan Nikhilesh for (i = 0; i < t->nb_workers; i++) 1086b1a14a8SPavan Nikhilesh total += t->worker[i].processed_pkts; 1096b1a14a8SPavan Nikhilesh for (i = 0; i < t->nb_workers; i++) 1106b1a14a8SPavan Nikhilesh printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:" 111c0900d33SHarry van Haaren CLGRN" %3.2f"CLNRM"\n", i, 1126b1a14a8SPavan Nikhilesh t->worker[i].processed_pkts, 1136b1a14a8SPavan Nikhilesh (((double)t->worker[i].processed_pkts)/total) 1146b1a14a8SPavan Nikhilesh * 100); 1156b1a14a8SPavan Nikhilesh 11641c219e6SJerin Jacob return t->result; 11741c219e6SJerin Jacob } 11841c219e6SJerin Jacob 1199d3aeb18SJerin Jacob static inline int 1209d3aeb18SJerin Jacob perf_producer(void *arg) 1219d3aeb18SJerin Jacob { 1229a618803SPavan Nikhilesh int i; 1239d3aeb18SJerin Jacob struct prod_data *p = arg; 1249d3aeb18SJerin Jacob struct test_perf *t = p->t; 1259d3aeb18SJerin Jacob struct evt_options *opt = t->opt; 1269d3aeb18SJerin Jacob const uint8_t dev_id = p->dev_id; 1279d3aeb18SJerin Jacob const uint8_t port = p->port_id; 1289d3aeb18SJerin Jacob struct rte_mempool *pool = t->pool; 1299d3aeb18SJerin Jacob const uint64_t nb_pkts = t->nb_pkts; 1309d3aeb18SJerin Jacob const uint32_t nb_flows = t->nb_flows; 1319d3aeb18SJerin Jacob uint32_t flow_counter = 0; 1329d3aeb18SJerin Jacob uint64_t count = 0; 1339a618803SPavan Nikhilesh struct perf_elt *m[BURST_SIZE + 1] = {NULL}; 134f123568cSPavan Nikhilesh uint8_t enable_fwd_latency; 1359d3aeb18SJerin Jacob struct rte_event ev; 1369d3aeb18SJerin Jacob 137f123568cSPavan Nikhilesh enable_fwd_latency = opt->fwd_latency; 1389d3aeb18SJerin Jacob if (opt->verbose_level > 1) 1399d3aeb18SJerin Jacob printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__, 1409d3aeb18SJerin Jacob rte_lcore_id(), dev_id, port, p->queue_id); 1419d3aeb18SJerin Jacob 1429d3aeb18SJerin Jacob ev.event = 0; 1439d3aeb18SJerin Jacob ev.op = RTE_EVENT_OP_NEW; 1449d3aeb18SJerin Jacob ev.queue_id = p->queue_id; 1459d3aeb18SJerin Jacob ev.sched_type = t->opt->sched_type_list[0]; 1469d3aeb18SJerin Jacob ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 1479d3aeb18SJerin Jacob ev.event_type = RTE_EVENT_TYPE_CPU; 1489d3aeb18SJerin Jacob ev.sub_event_type = 0; /* stage 0 */ 1499d3aeb18SJerin Jacob 1509d3aeb18SJerin Jacob while (count < nb_pkts && t->done == false) { 1519a618803SPavan Nikhilesh if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0) 1529d3aeb18SJerin Jacob continue; 1539a618803SPavan Nikhilesh for (i = 0; i < BURST_SIZE; i++) { 1549d3aeb18SJerin Jacob ev.flow_id = flow_counter++ % nb_flows; 1559a618803SPavan Nikhilesh ev.event_ptr = m[i]; 156f123568cSPavan Nikhilesh if (enable_fwd_latency) 1579a618803SPavan Nikhilesh m[i]->timestamp = rte_get_timer_cycles(); 158f123568cSPavan Nikhilesh while (rte_event_enqueue_new_burst(dev_id, port, &ev, 159f123568cSPavan Nikhilesh 1) != 1) { 1609d3aeb18SJerin Jacob if (t->done) 1619d3aeb18SJerin Jacob break; 1629d3aeb18SJerin Jacob rte_pause(); 163f123568cSPavan Nikhilesh if (enable_fwd_latency) 164f123568cSPavan Nikhilesh m[i]->timestamp = 165f123568cSPavan Nikhilesh rte_get_timer_cycles(); 1669d3aeb18SJerin Jacob } 1679a618803SPavan Nikhilesh } 1689a618803SPavan Nikhilesh count += BURST_SIZE; 1699d3aeb18SJerin Jacob } 1709d3aeb18SJerin Jacob 1719d3aeb18SJerin Jacob return 0; 1729d3aeb18SJerin Jacob } 1739d3aeb18SJerin Jacob 174d008f20bSPavan Nikhilesh static inline int 17520841a25SRashmi Shetty perf_producer_burst(void *arg) 17620841a25SRashmi Shetty { 17720841a25SRashmi Shetty uint32_t i; 17820841a25SRashmi Shetty uint64_t timestamp; 17920841a25SRashmi Shetty struct prod_data *p = arg; 18020841a25SRashmi Shetty struct test_perf *t = p->t; 18120841a25SRashmi Shetty struct evt_options *opt = t->opt; 18220841a25SRashmi Shetty const uint8_t dev_id = p->dev_id; 18320841a25SRashmi Shetty const uint8_t port = p->port_id; 18420841a25SRashmi Shetty struct rte_mempool *pool = t->pool; 18520841a25SRashmi Shetty const uint64_t nb_pkts = t->nb_pkts; 18620841a25SRashmi Shetty const uint32_t nb_flows = t->nb_flows; 18720841a25SRashmi Shetty uint32_t flow_counter = 0; 18820841a25SRashmi Shetty uint16_t enq = 0; 18920841a25SRashmi Shetty uint64_t count = 0; 190f123568cSPavan Nikhilesh struct perf_elt *m[opt->prod_enq_burst_sz + 1]; 191f123568cSPavan Nikhilesh struct rte_event ev[opt->prod_enq_burst_sz + 1]; 19220841a25SRashmi Shetty uint32_t burst_size = opt->prod_enq_burst_sz; 193f123568cSPavan Nikhilesh uint8_t enable_fwd_latency; 19420841a25SRashmi Shetty 195f123568cSPavan Nikhilesh enable_fwd_latency = opt->fwd_latency; 196f123568cSPavan Nikhilesh memset(m, 0, sizeof(*m) * (opt->prod_enq_burst_sz + 1)); 19720841a25SRashmi Shetty if (opt->verbose_level > 1) 19820841a25SRashmi Shetty printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__, 19920841a25SRashmi Shetty rte_lcore_id(), dev_id, port, p->queue_id); 20020841a25SRashmi Shetty 20120841a25SRashmi Shetty for (i = 0; i < burst_size; i++) { 20220841a25SRashmi Shetty ev[i].op = RTE_EVENT_OP_NEW; 20320841a25SRashmi Shetty ev[i].queue_id = p->queue_id; 20420841a25SRashmi Shetty ev[i].sched_type = t->opt->sched_type_list[0]; 20520841a25SRashmi Shetty ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 20620841a25SRashmi Shetty ev[i].event_type = RTE_EVENT_TYPE_CPU; 20720841a25SRashmi Shetty ev[i].sub_event_type = 0; /* stage 0 */ 20820841a25SRashmi Shetty } 20920841a25SRashmi Shetty 21020841a25SRashmi Shetty while (count < nb_pkts && t->done == false) { 21120841a25SRashmi Shetty if (rte_mempool_get_bulk(pool, (void **)m, burst_size) < 0) 21220841a25SRashmi Shetty continue; 21320841a25SRashmi Shetty timestamp = rte_get_timer_cycles(); 21420841a25SRashmi Shetty for (i = 0; i < burst_size; i++) { 21520841a25SRashmi Shetty ev[i].flow_id = flow_counter++ % nb_flows; 21620841a25SRashmi Shetty ev[i].event_ptr = m[i]; 217f123568cSPavan Nikhilesh if (enable_fwd_latency) 21820841a25SRashmi Shetty m[i]->timestamp = timestamp; 21920841a25SRashmi Shetty } 220f123568cSPavan Nikhilesh enq = rte_event_enqueue_new_burst(dev_id, port, ev, burst_size); 22120841a25SRashmi Shetty while (enq < burst_size) { 222f123568cSPavan Nikhilesh enq += rte_event_enqueue_new_burst( 223f123568cSPavan Nikhilesh dev_id, port, ev + enq, burst_size - enq); 22420841a25SRashmi Shetty if (t->done) 22520841a25SRashmi Shetty break; 22620841a25SRashmi Shetty rte_pause(); 227f123568cSPavan Nikhilesh if (enable_fwd_latency) { 22820841a25SRashmi Shetty timestamp = rte_get_timer_cycles(); 22920841a25SRashmi Shetty for (i = enq; i < burst_size; i++) 23020841a25SRashmi Shetty m[i]->timestamp = timestamp; 23120841a25SRashmi Shetty } 232f123568cSPavan Nikhilesh } 23320841a25SRashmi Shetty count += burst_size; 23420841a25SRashmi Shetty } 23520841a25SRashmi Shetty return 0; 23620841a25SRashmi Shetty } 23720841a25SRashmi Shetty 23820841a25SRashmi Shetty static inline int 239d008f20bSPavan Nikhilesh perf_event_timer_producer(void *arg) 240d008f20bSPavan Nikhilesh { 2419a618803SPavan Nikhilesh int i; 242d008f20bSPavan Nikhilesh struct prod_data *p = arg; 243d008f20bSPavan Nikhilesh struct test_perf *t = p->t; 244d008f20bSPavan Nikhilesh struct evt_options *opt = t->opt; 245d008f20bSPavan Nikhilesh uint32_t flow_counter = 0; 246d008f20bSPavan Nikhilesh uint64_t count = 0; 247d008f20bSPavan Nikhilesh uint64_t arm_latency = 0; 248d008f20bSPavan Nikhilesh const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs; 249d008f20bSPavan Nikhilesh const uint32_t nb_flows = t->nb_flows; 250d008f20bSPavan Nikhilesh const uint64_t nb_timers = opt->nb_timers; 251d008f20bSPavan Nikhilesh struct rte_mempool *pool = t->pool; 2529a618803SPavan Nikhilesh struct perf_elt *m[BURST_SIZE + 1] = {NULL}; 253d008f20bSPavan Nikhilesh struct rte_event_timer_adapter **adptr = t->timer_adptr; 25452553263SPavan Nikhilesh struct rte_event_timer tim; 255d008f20bSPavan Nikhilesh uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec; 256d008f20bSPavan Nikhilesh 25752553263SPavan Nikhilesh memset(&tim, 0, sizeof(struct rte_event_timer)); 258626b12a8SPavan Nikhilesh timeout_ticks = 259626b12a8SPavan Nikhilesh opt->optm_timer_tick_nsec 260626b12a8SPavan Nikhilesh ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) / 261626b12a8SPavan Nikhilesh opt->optm_timer_tick_nsec) 262626b12a8SPavan Nikhilesh : timeout_ticks; 263d008f20bSPavan Nikhilesh timeout_ticks += timeout_ticks ? 0 : 1; 26452553263SPavan Nikhilesh tim.ev.event_type = RTE_EVENT_TYPE_TIMER; 26552553263SPavan Nikhilesh tim.ev.op = RTE_EVENT_OP_NEW; 26652553263SPavan Nikhilesh tim.ev.sched_type = t->opt->sched_type_list[0]; 26752553263SPavan Nikhilesh tim.ev.queue_id = p->queue_id; 26852553263SPavan Nikhilesh tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 26952553263SPavan Nikhilesh tim.state = RTE_EVENT_TIMER_NOT_ARMED; 27052553263SPavan Nikhilesh tim.timeout_ticks = timeout_ticks; 271d008f20bSPavan Nikhilesh 272d008f20bSPavan Nikhilesh if (opt->verbose_level > 1) 273d008f20bSPavan Nikhilesh printf("%s(): lcore %d\n", __func__, rte_lcore_id()); 274d008f20bSPavan Nikhilesh 275d008f20bSPavan Nikhilesh while (count < nb_timers && t->done == false) { 2769a618803SPavan Nikhilesh if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0) 277d008f20bSPavan Nikhilesh continue; 2789a618803SPavan Nikhilesh for (i = 0; i < BURST_SIZE; i++) { 2799a618803SPavan Nikhilesh rte_prefetch0(m[i + 1]); 2809a618803SPavan Nikhilesh m[i]->tim = tim; 2819a618803SPavan Nikhilesh m[i]->tim.ev.flow_id = flow_counter++ % nb_flows; 2829a618803SPavan Nikhilesh m[i]->tim.ev.event_ptr = m[i]; 2839a618803SPavan Nikhilesh m[i]->timestamp = rte_get_timer_cycles(); 284d008f20bSPavan Nikhilesh while (rte_event_timer_arm_burst( 285d008f20bSPavan Nikhilesh adptr[flow_counter % nb_timer_adptrs], 2869a618803SPavan Nikhilesh (struct rte_event_timer **)&m[i], 1) != 1) { 287d008f20bSPavan Nikhilesh if (t->done) 288d008f20bSPavan Nikhilesh break; 2899a618803SPavan Nikhilesh m[i]->timestamp = rte_get_timer_cycles(); 290d008f20bSPavan Nikhilesh } 2919a618803SPavan Nikhilesh arm_latency += rte_get_timer_cycles() - m[i]->timestamp; 2929a618803SPavan Nikhilesh } 2939a618803SPavan Nikhilesh count += BURST_SIZE; 294d008f20bSPavan Nikhilesh } 295d008f20bSPavan Nikhilesh fflush(stdout); 296d008f20bSPavan Nikhilesh rte_delay_ms(1000); 297d008f20bSPavan Nikhilesh printf("%s(): lcore %d Average event timer arm latency = %.3f us\n", 29893b7794bSPavan Nikhilesh __func__, rte_lcore_id(), 29993b7794bSPavan Nikhilesh count ? (float)(arm_latency / count) / 30093b7794bSPavan Nikhilesh (rte_get_timer_hz() / 1000000) : 0); 301d008f20bSPavan Nikhilesh return 0; 302d008f20bSPavan Nikhilesh } 303d008f20bSPavan Nikhilesh 30417b22d0bSPavan Nikhilesh static inline int 30517b22d0bSPavan Nikhilesh perf_event_timer_producer_burst(void *arg) 30617b22d0bSPavan Nikhilesh { 30717b22d0bSPavan Nikhilesh int i; 30817b22d0bSPavan Nikhilesh struct prod_data *p = arg; 30917b22d0bSPavan Nikhilesh struct test_perf *t = p->t; 31017b22d0bSPavan Nikhilesh struct evt_options *opt = t->opt; 31117b22d0bSPavan Nikhilesh uint32_t flow_counter = 0; 31217b22d0bSPavan Nikhilesh uint64_t count = 0; 31317b22d0bSPavan Nikhilesh uint64_t arm_latency = 0; 31417b22d0bSPavan Nikhilesh const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs; 31517b22d0bSPavan Nikhilesh const uint32_t nb_flows = t->nb_flows; 31617b22d0bSPavan Nikhilesh const uint64_t nb_timers = opt->nb_timers; 31717b22d0bSPavan Nikhilesh struct rte_mempool *pool = t->pool; 31817b22d0bSPavan Nikhilesh struct perf_elt *m[BURST_SIZE + 1] = {NULL}; 31917b22d0bSPavan Nikhilesh struct rte_event_timer_adapter **adptr = t->timer_adptr; 32052553263SPavan Nikhilesh struct rte_event_timer tim; 32117b22d0bSPavan Nikhilesh uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec; 32217b22d0bSPavan Nikhilesh 32352553263SPavan Nikhilesh memset(&tim, 0, sizeof(struct rte_event_timer)); 324626b12a8SPavan Nikhilesh timeout_ticks = 325626b12a8SPavan Nikhilesh opt->optm_timer_tick_nsec 326626b12a8SPavan Nikhilesh ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) / 327626b12a8SPavan Nikhilesh opt->optm_timer_tick_nsec) 328626b12a8SPavan Nikhilesh : timeout_ticks; 32917b22d0bSPavan Nikhilesh timeout_ticks += timeout_ticks ? 0 : 1; 33052553263SPavan Nikhilesh tim.ev.event_type = RTE_EVENT_TYPE_TIMER; 33152553263SPavan Nikhilesh tim.ev.op = RTE_EVENT_OP_NEW; 33252553263SPavan Nikhilesh tim.ev.sched_type = t->opt->sched_type_list[0]; 33352553263SPavan Nikhilesh tim.ev.queue_id = p->queue_id; 33452553263SPavan Nikhilesh tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 33552553263SPavan Nikhilesh tim.state = RTE_EVENT_TIMER_NOT_ARMED; 33652553263SPavan Nikhilesh tim.timeout_ticks = timeout_ticks; 33717b22d0bSPavan Nikhilesh 33817b22d0bSPavan Nikhilesh if (opt->verbose_level > 1) 33917b22d0bSPavan Nikhilesh printf("%s(): lcore %d\n", __func__, rte_lcore_id()); 34017b22d0bSPavan Nikhilesh 34117b22d0bSPavan Nikhilesh while (count < nb_timers && t->done == false) { 34217b22d0bSPavan Nikhilesh if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0) 34317b22d0bSPavan Nikhilesh continue; 34417b22d0bSPavan Nikhilesh for (i = 0; i < BURST_SIZE; i++) { 34517b22d0bSPavan Nikhilesh rte_prefetch0(m[i + 1]); 34617b22d0bSPavan Nikhilesh m[i]->tim = tim; 34717b22d0bSPavan Nikhilesh m[i]->tim.ev.flow_id = flow_counter++ % nb_flows; 34817b22d0bSPavan Nikhilesh m[i]->tim.ev.event_ptr = m[i]; 34917b22d0bSPavan Nikhilesh m[i]->timestamp = rte_get_timer_cycles(); 35017b22d0bSPavan Nikhilesh } 35117b22d0bSPavan Nikhilesh rte_event_timer_arm_tmo_tick_burst( 35217b22d0bSPavan Nikhilesh adptr[flow_counter % nb_timer_adptrs], 35317b22d0bSPavan Nikhilesh (struct rte_event_timer **)m, 35417b22d0bSPavan Nikhilesh tim.timeout_ticks, 35517b22d0bSPavan Nikhilesh BURST_SIZE); 35617b22d0bSPavan Nikhilesh arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp; 35717b22d0bSPavan Nikhilesh count += BURST_SIZE; 35817b22d0bSPavan Nikhilesh } 35917b22d0bSPavan Nikhilesh fflush(stdout); 36017b22d0bSPavan Nikhilesh rte_delay_ms(1000); 36117b22d0bSPavan Nikhilesh printf("%s(): lcore %d Average event timer arm latency = %.3f us\n", 36293b7794bSPavan Nikhilesh __func__, rte_lcore_id(), 36393b7794bSPavan Nikhilesh count ? (float)(arm_latency / count) / 36493b7794bSPavan Nikhilesh (rte_get_timer_hz() / 1000000) : 0); 36517b22d0bSPavan Nikhilesh return 0; 36617b22d0bSPavan Nikhilesh } 36717b22d0bSPavan Nikhilesh 368de2bc16eSShijith Thotton static inline void 369de2bc16eSShijith Thotton crypto_adapter_enq_op_new(struct prod_data *p) 370de2bc16eSShijith Thotton { 371de2bc16eSShijith Thotton struct test_perf *t = p->t; 372de2bc16eSShijith Thotton const uint32_t nb_flows = t->nb_flows; 373de2bc16eSShijith Thotton const uint64_t nb_pkts = t->nb_pkts; 374de2bc16eSShijith Thotton struct rte_mempool *pool = t->pool; 375750ab9d5SAakash Sasidharan uint16_t data_length, data_offset; 376de2bc16eSShijith Thotton struct evt_options *opt = t->opt; 377de2bc16eSShijith Thotton uint16_t qp_id = p->ca.cdev_qp_id; 378de2bc16eSShijith Thotton uint8_t cdev_id = p->ca.cdev_id; 3793158ec9fSVolodymyr Fialko uint64_t alloc_failures = 0; 380de2bc16eSShijith Thotton uint32_t flow_counter = 0; 381de2bc16eSShijith Thotton struct rte_crypto_op *op; 3826776a581SVolodymyr Fialko uint16_t len, offset; 383de2bc16eSShijith Thotton struct rte_mbuf *m; 384de2bc16eSShijith Thotton uint64_t count = 0; 385de2bc16eSShijith Thotton 386de2bc16eSShijith Thotton if (opt->verbose_level > 1) 387de2bc16eSShijith Thotton printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n", 388de2bc16eSShijith Thotton __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id, 389de2bc16eSShijith Thotton p->ca.cdev_qp_id); 390de2bc16eSShijith Thotton 3916776a581SVolodymyr Fialko offset = sizeof(struct perf_elt); 3926776a581SVolodymyr Fialko len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz); 393de2bc16eSShijith Thotton 394750ab9d5SAakash Sasidharan if (opt->crypto_cipher_bit_mode) { 395750ab9d5SAakash Sasidharan data_offset = offset << 3; 396750ab9d5SAakash Sasidharan data_length = (len - offset) << 3; 397750ab9d5SAakash Sasidharan } else { 398750ab9d5SAakash Sasidharan data_offset = offset; 399750ab9d5SAakash Sasidharan data_length = len - offset; 400750ab9d5SAakash Sasidharan } 401750ab9d5SAakash Sasidharan 402de2bc16eSShijith Thotton while (count < nb_pkts && t->done == false) { 4038f5b5495SAkhil Goyal if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { 4048f5b5495SAkhil Goyal struct rte_crypto_sym_op *sym_op; 4058f5b5495SAkhil Goyal 4068f5b5495SAkhil Goyal op = rte_crypto_op_alloc(t->ca_op_pool, 4078f5b5495SAkhil Goyal RTE_CRYPTO_OP_TYPE_SYMMETRIC); 4083158ec9fSVolodymyr Fialko if (unlikely(op == NULL)) { 4093158ec9fSVolodymyr Fialko alloc_failures++; 410de2bc16eSShijith Thotton continue; 4113158ec9fSVolodymyr Fialko } 4123158ec9fSVolodymyr Fialko 4133158ec9fSVolodymyr Fialko m = rte_pktmbuf_alloc(pool); 4143158ec9fSVolodymyr Fialko if (unlikely(m == NULL)) { 4153158ec9fSVolodymyr Fialko alloc_failures++; 4163158ec9fSVolodymyr Fialko rte_crypto_op_free(op); 4173158ec9fSVolodymyr Fialko continue; 4183158ec9fSVolodymyr Fialko } 419de2bc16eSShijith Thotton 420de2bc16eSShijith Thotton rte_pktmbuf_append(m, len); 421de2bc16eSShijith Thotton sym_op = op->sym; 422de2bc16eSShijith Thotton sym_op->m_src = m; 423750ab9d5SAakash Sasidharan 424750ab9d5SAakash Sasidharan sym_op->cipher.data.offset = data_offset; 425750ab9d5SAakash Sasidharan sym_op->cipher.data.length = data_length; 426750ab9d5SAakash Sasidharan 427de2bc16eSShijith Thotton rte_crypto_op_attach_sym_session( 4288f5b5495SAkhil Goyal op, p->ca.crypto_sess[flow_counter++ % nb_flows]); 4298f5b5495SAkhil Goyal } else { 4308f5b5495SAkhil Goyal struct rte_crypto_asym_op *asym_op; 4316776a581SVolodymyr Fialko uint8_t *result; 4326776a581SVolodymyr Fialko 4336776a581SVolodymyr Fialko if (rte_mempool_get(pool, (void **)&result)) { 4346776a581SVolodymyr Fialko alloc_failures++; 4356776a581SVolodymyr Fialko continue; 4366776a581SVolodymyr Fialko } 437de2bc16eSShijith Thotton 4388f5b5495SAkhil Goyal op = rte_crypto_op_alloc(t->ca_op_pool, 4398f5b5495SAkhil Goyal RTE_CRYPTO_OP_TYPE_ASYMMETRIC); 4403158ec9fSVolodymyr Fialko if (unlikely(op == NULL)) { 4413158ec9fSVolodymyr Fialko alloc_failures++; 4426776a581SVolodymyr Fialko rte_mempool_put(pool, result); 4433158ec9fSVolodymyr Fialko continue; 4443158ec9fSVolodymyr Fialko } 4453158ec9fSVolodymyr Fialko 4468f5b5495SAkhil Goyal asym_op = op->asym; 4478f5b5495SAkhil Goyal asym_op->modex.base.data = modex_test_case.base.data; 4488f5b5495SAkhil Goyal asym_op->modex.base.length = modex_test_case.base.len; 4498f5b5495SAkhil Goyal asym_op->modex.result.data = result; 4508f5b5495SAkhil Goyal asym_op->modex.result.length = modex_test_case.result_len; 4518f5b5495SAkhil Goyal rte_crypto_op_attach_asym_session( 4528f5b5495SAkhil Goyal op, p->ca.crypto_sess[flow_counter++ % nb_flows]); 4538f5b5495SAkhil Goyal } 454de2bc16eSShijith Thotton while (rte_cryptodev_enqueue_burst(cdev_id, qp_id, &op, 1) != 1 && 455de2bc16eSShijith Thotton t->done == false) 456de2bc16eSShijith Thotton rte_pause(); 457de2bc16eSShijith Thotton 458de2bc16eSShijith Thotton count++; 459de2bc16eSShijith Thotton } 4603158ec9fSVolodymyr Fialko 4613158ec9fSVolodymyr Fialko if (opt->verbose_level > 1 && alloc_failures) 4623158ec9fSVolodymyr Fialko printf("%s(): lcore %d allocation failures: %"PRIu64"\n", 4633158ec9fSVolodymyr Fialko __func__, rte_lcore_id(), alloc_failures); 464de2bc16eSShijith Thotton } 465de2bc16eSShijith Thotton 466de2bc16eSShijith Thotton static inline void 467de2bc16eSShijith Thotton crypto_adapter_enq_op_fwd(struct prod_data *p) 468de2bc16eSShijith Thotton { 469de2bc16eSShijith Thotton const uint8_t dev_id = p->dev_id; 470de2bc16eSShijith Thotton const uint8_t port = p->port_id; 471de2bc16eSShijith Thotton struct test_perf *t = p->t; 472de2bc16eSShijith Thotton const uint32_t nb_flows = t->nb_flows; 473de2bc16eSShijith Thotton const uint64_t nb_pkts = t->nb_pkts; 474de2bc16eSShijith Thotton struct rte_mempool *pool = t->pool; 475de2bc16eSShijith Thotton struct evt_options *opt = t->opt; 4763158ec9fSVolodymyr Fialko uint64_t alloc_failures = 0; 477de2bc16eSShijith Thotton uint32_t flow_counter = 0; 478de2bc16eSShijith Thotton struct rte_crypto_op *op; 4796776a581SVolodymyr Fialko uint16_t len, offset; 480de2bc16eSShijith Thotton struct rte_event ev; 481de2bc16eSShijith Thotton struct rte_mbuf *m; 482de2bc16eSShijith Thotton uint64_t count = 0; 483de2bc16eSShijith Thotton 484de2bc16eSShijith Thotton if (opt->verbose_level > 1) 485de2bc16eSShijith Thotton printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n", 486de2bc16eSShijith Thotton __func__, rte_lcore_id(), port, p->queue_id, 487de2bc16eSShijith Thotton p->ca.cdev_id, p->ca.cdev_qp_id); 488de2bc16eSShijith Thotton 489de2bc16eSShijith Thotton ev.event = 0; 490de2bc16eSShijith Thotton ev.op = RTE_EVENT_OP_NEW; 491de2bc16eSShijith Thotton ev.queue_id = p->queue_id; 492de2bc16eSShijith Thotton ev.sched_type = RTE_SCHED_TYPE_ATOMIC; 493de2bc16eSShijith Thotton ev.event_type = RTE_EVENT_TYPE_CPU; 4946776a581SVolodymyr Fialko 4956776a581SVolodymyr Fialko offset = sizeof(struct perf_elt); 4966776a581SVolodymyr Fialko len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz); 497de2bc16eSShijith Thotton 498de2bc16eSShijith Thotton while (count < nb_pkts && t->done == false) { 4998f5b5495SAkhil Goyal if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { 5008f5b5495SAkhil Goyal struct rte_crypto_sym_op *sym_op; 5018f5b5495SAkhil Goyal 5028f5b5495SAkhil Goyal op = rte_crypto_op_alloc(t->ca_op_pool, 5038f5b5495SAkhil Goyal RTE_CRYPTO_OP_TYPE_SYMMETRIC); 5043158ec9fSVolodymyr Fialko if (unlikely(op == NULL)) { 5053158ec9fSVolodymyr Fialko alloc_failures++; 506de2bc16eSShijith Thotton continue; 5073158ec9fSVolodymyr Fialko } 5083158ec9fSVolodymyr Fialko 5093158ec9fSVolodymyr Fialko m = rte_pktmbuf_alloc(pool); 5103158ec9fSVolodymyr Fialko if (unlikely(m == NULL)) { 5113158ec9fSVolodymyr Fialko alloc_failures++; 5123158ec9fSVolodymyr Fialko rte_crypto_op_free(op); 5133158ec9fSVolodymyr Fialko continue; 5143158ec9fSVolodymyr Fialko } 515de2bc16eSShijith Thotton 516de2bc16eSShijith Thotton rte_pktmbuf_append(m, len); 517de2bc16eSShijith Thotton sym_op = op->sym; 518de2bc16eSShijith Thotton sym_op->m_src = m; 5196776a581SVolodymyr Fialko sym_op->cipher.data.offset = offset; 5206776a581SVolodymyr Fialko sym_op->cipher.data.length = len - offset; 521de2bc16eSShijith Thotton rte_crypto_op_attach_sym_session( 5228f5b5495SAkhil Goyal op, p->ca.crypto_sess[flow_counter++ % nb_flows]); 5238f5b5495SAkhil Goyal } else { 5248f5b5495SAkhil Goyal struct rte_crypto_asym_op *asym_op; 5256776a581SVolodymyr Fialko uint8_t *result; 5266776a581SVolodymyr Fialko 5276776a581SVolodymyr Fialko if (rte_mempool_get(pool, (void **)&result)) { 5286776a581SVolodymyr Fialko alloc_failures++; 5296776a581SVolodymyr Fialko continue; 5306776a581SVolodymyr Fialko } 5318f5b5495SAkhil Goyal 5328f5b5495SAkhil Goyal op = rte_crypto_op_alloc(t->ca_op_pool, 5338f5b5495SAkhil Goyal RTE_CRYPTO_OP_TYPE_ASYMMETRIC); 5343158ec9fSVolodymyr Fialko if (unlikely(op == NULL)) { 5353158ec9fSVolodymyr Fialko alloc_failures++; 5366776a581SVolodymyr Fialko rte_mempool_put(pool, result); 5373158ec9fSVolodymyr Fialko continue; 5383158ec9fSVolodymyr Fialko } 5393158ec9fSVolodymyr Fialko 5408f5b5495SAkhil Goyal asym_op = op->asym; 5418f5b5495SAkhil Goyal asym_op->modex.base.data = modex_test_case.base.data; 5428f5b5495SAkhil Goyal asym_op->modex.base.length = modex_test_case.base.len; 5438f5b5495SAkhil Goyal asym_op->modex.result.data = result; 5448f5b5495SAkhil Goyal asym_op->modex.result.length = modex_test_case.result_len; 5458f5b5495SAkhil Goyal rte_crypto_op_attach_asym_session( 5468f5b5495SAkhil Goyal op, p->ca.crypto_sess[flow_counter++ % nb_flows]); 5478f5b5495SAkhil Goyal } 548de2bc16eSShijith Thotton ev.event_ptr = op; 549de2bc16eSShijith Thotton 550de2bc16eSShijith Thotton while (rte_event_crypto_adapter_enqueue(dev_id, port, &ev, 1) != 1 && 551de2bc16eSShijith Thotton t->done == false) 552de2bc16eSShijith Thotton rte_pause(); 553de2bc16eSShijith Thotton 554de2bc16eSShijith Thotton count++; 555de2bc16eSShijith Thotton } 5563158ec9fSVolodymyr Fialko 5573158ec9fSVolodymyr Fialko if (opt->verbose_level > 1 && alloc_failures) 5583158ec9fSVolodymyr Fialko printf("%s(): lcore %d allocation failures: %"PRIu64"\n", 5593158ec9fSVolodymyr Fialko __func__, rte_lcore_id(), alloc_failures); 560de2bc16eSShijith Thotton } 561de2bc16eSShijith Thotton 562*b25a66c4SAmit Prakash Shukla static inline void 563*b25a66c4SAmit Prakash Shukla dma_adapter_enq_op_fwd(struct prod_data *p) 564*b25a66c4SAmit Prakash Shukla { 565*b25a66c4SAmit Prakash Shukla struct test_perf *t = p->t; 566*b25a66c4SAmit Prakash Shukla const uint32_t nb_flows = t->nb_flows; 567*b25a66c4SAmit Prakash Shukla const uint64_t nb_pkts = t->nb_pkts; 568*b25a66c4SAmit Prakash Shukla struct rte_event_dma_adapter_op *op; 569*b25a66c4SAmit Prakash Shukla const uint8_t dev_id = p->dev_id; 570*b25a66c4SAmit Prakash Shukla struct evt_options *opt = t->opt; 571*b25a66c4SAmit Prakash Shukla const uint8_t port = p->port_id; 572*b25a66c4SAmit Prakash Shukla uint32_t flow_counter = 0; 573*b25a66c4SAmit Prakash Shukla struct rte_event ev; 574*b25a66c4SAmit Prakash Shukla uint64_t count = 0; 575*b25a66c4SAmit Prakash Shukla 576*b25a66c4SAmit Prakash Shukla if (opt->verbose_level > 1) 577*b25a66c4SAmit Prakash Shukla printf("%s(): lcore %d port %d queue %d dma_dev_id %u dma_dev_vchan_id %u\n", 578*b25a66c4SAmit Prakash Shukla __func__, rte_lcore_id(), port, p->queue_id, 579*b25a66c4SAmit Prakash Shukla p->da.dma_dev_id, p->da.vchan_id); 580*b25a66c4SAmit Prakash Shukla 581*b25a66c4SAmit Prakash Shukla ev.event = 0; 582*b25a66c4SAmit Prakash Shukla ev.op = RTE_EVENT_OP_NEW; 583*b25a66c4SAmit Prakash Shukla ev.queue_id = p->queue_id; 584*b25a66c4SAmit Prakash Shukla ev.sched_type = RTE_SCHED_TYPE_ATOMIC; 585*b25a66c4SAmit Prakash Shukla ev.event_type = RTE_EVENT_TYPE_CPU; 586*b25a66c4SAmit Prakash Shukla 587*b25a66c4SAmit Prakash Shukla while (count < nb_pkts && t->done == false) { 588*b25a66c4SAmit Prakash Shukla op = p->da.dma_op[flow_counter++ % nb_flows]; 589*b25a66c4SAmit Prakash Shukla ev.event_ptr = op; 590*b25a66c4SAmit Prakash Shukla 591*b25a66c4SAmit Prakash Shukla while (rte_event_dma_adapter_enqueue(dev_id, port, &ev, 1) != 1 && 592*b25a66c4SAmit Prakash Shukla t->done == false) 593*b25a66c4SAmit Prakash Shukla rte_pause(); 594*b25a66c4SAmit Prakash Shukla 595*b25a66c4SAmit Prakash Shukla count++; 596*b25a66c4SAmit Prakash Shukla } 597*b25a66c4SAmit Prakash Shukla } 598*b25a66c4SAmit Prakash Shukla 599*b25a66c4SAmit Prakash Shukla static inline int 600*b25a66c4SAmit Prakash Shukla perf_event_dma_producer(void *arg) 601*b25a66c4SAmit Prakash Shukla { 602*b25a66c4SAmit Prakash Shukla struct prod_data *p = arg; 603*b25a66c4SAmit Prakash Shukla 604*b25a66c4SAmit Prakash Shukla /* Only fwd mode is supported. */ 605*b25a66c4SAmit Prakash Shukla dma_adapter_enq_op_fwd(p); 606*b25a66c4SAmit Prakash Shukla 607*b25a66c4SAmit Prakash Shukla return 0; 608*b25a66c4SAmit Prakash Shukla } 609*b25a66c4SAmit Prakash Shukla 610de2bc16eSShijith Thotton static inline int 611de2bc16eSShijith Thotton perf_event_crypto_producer(void *arg) 612de2bc16eSShijith Thotton { 613de2bc16eSShijith Thotton struct prod_data *p = arg; 614de2bc16eSShijith Thotton struct evt_options *opt = p->t->opt; 615de2bc16eSShijith Thotton 616de2bc16eSShijith Thotton if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) 617de2bc16eSShijith Thotton crypto_adapter_enq_op_new(p); 618de2bc16eSShijith Thotton else 619de2bc16eSShijith Thotton crypto_adapter_enq_op_fwd(p); 620de2bc16eSShijith Thotton 621de2bc16eSShijith Thotton return 0; 622de2bc16eSShijith Thotton } 623de2bc16eSShijith Thotton 6249c3096d4SVolodymyr Fialko static void 6259c3096d4SVolodymyr Fialko crypto_adapter_enq_op_new_burst(struct prod_data *p) 6269c3096d4SVolodymyr Fialko { 6279c3096d4SVolodymyr Fialko const struct test_perf *t = p->t; 6289c3096d4SVolodymyr Fialko const struct evt_options *opt = t->opt; 6299c3096d4SVolodymyr Fialko 6309c3096d4SVolodymyr Fialko struct rte_mbuf *m, *pkts_burst[MAX_PROD_ENQ_BURST_SIZE]; 6319c3096d4SVolodymyr Fialko struct rte_crypto_op *ops_burst[MAX_PROD_ENQ_BURST_SIZE]; 6329c3096d4SVolodymyr Fialko const uint32_t burst_size = opt->prod_enq_burst_sz; 6339c3096d4SVolodymyr Fialko uint8_t *result[MAX_PROD_ENQ_BURST_SIZE]; 6349c3096d4SVolodymyr Fialko const uint32_t nb_flows = t->nb_flows; 6359c3096d4SVolodymyr Fialko const uint64_t nb_pkts = t->nb_pkts; 6369c3096d4SVolodymyr Fialko uint16_t len, enq, nb_alloc, offset; 6379c3096d4SVolodymyr Fialko struct rte_mempool *pool = t->pool; 6389c3096d4SVolodymyr Fialko uint16_t qp_id = p->ca.cdev_qp_id; 6399c3096d4SVolodymyr Fialko uint8_t cdev_id = p->ca.cdev_id; 6409c3096d4SVolodymyr Fialko uint64_t alloc_failures = 0; 6419c3096d4SVolodymyr Fialko uint32_t flow_counter = 0; 6429c3096d4SVolodymyr Fialko uint64_t count = 0; 6439c3096d4SVolodymyr Fialko uint32_t i; 6449c3096d4SVolodymyr Fialko 6459c3096d4SVolodymyr Fialko if (opt->verbose_level > 1) 6469c3096d4SVolodymyr Fialko printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n", 6479c3096d4SVolodymyr Fialko __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id, 6489c3096d4SVolodymyr Fialko p->ca.cdev_qp_id); 6499c3096d4SVolodymyr Fialko 6509c3096d4SVolodymyr Fialko offset = sizeof(struct perf_elt); 6519c3096d4SVolodymyr Fialko len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz); 6529c3096d4SVolodymyr Fialko 6539c3096d4SVolodymyr Fialko while (count < nb_pkts && t->done == false) { 6549c3096d4SVolodymyr Fialko if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { 6559c3096d4SVolodymyr Fialko struct rte_crypto_sym_op *sym_op; 6569c3096d4SVolodymyr Fialko int ret; 6579c3096d4SVolodymyr Fialko 6589c3096d4SVolodymyr Fialko nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool, 6599c3096d4SVolodymyr Fialko RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops_burst, burst_size); 6609c3096d4SVolodymyr Fialko if (unlikely(nb_alloc != burst_size)) { 6619c3096d4SVolodymyr Fialko alloc_failures++; 6629c3096d4SVolodymyr Fialko continue; 6639c3096d4SVolodymyr Fialko } 6649c3096d4SVolodymyr Fialko 6659c3096d4SVolodymyr Fialko ret = rte_pktmbuf_alloc_bulk(pool, pkts_burst, burst_size); 6669c3096d4SVolodymyr Fialko if (unlikely(ret != 0)) { 6679c3096d4SVolodymyr Fialko alloc_failures++; 6689c3096d4SVolodymyr Fialko rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size); 6699c3096d4SVolodymyr Fialko continue; 6709c3096d4SVolodymyr Fialko } 6719c3096d4SVolodymyr Fialko 6729c3096d4SVolodymyr Fialko for (i = 0; i < burst_size; i++) { 6739c3096d4SVolodymyr Fialko m = pkts_burst[i]; 6749c3096d4SVolodymyr Fialko rte_pktmbuf_append(m, len); 6759c3096d4SVolodymyr Fialko sym_op = ops_burst[i]->sym; 6769c3096d4SVolodymyr Fialko sym_op->m_src = m; 6779c3096d4SVolodymyr Fialko sym_op->cipher.data.offset = offset; 6789c3096d4SVolodymyr Fialko sym_op->cipher.data.length = len - offset; 6799c3096d4SVolodymyr Fialko rte_crypto_op_attach_sym_session(ops_burst[i], 6809c3096d4SVolodymyr Fialko p->ca.crypto_sess[flow_counter++ % nb_flows]); 6819c3096d4SVolodymyr Fialko } 6829c3096d4SVolodymyr Fialko } else { 6839c3096d4SVolodymyr Fialko struct rte_crypto_asym_op *asym_op; 6849c3096d4SVolodymyr Fialko 6859c3096d4SVolodymyr Fialko nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool, 6869c3096d4SVolodymyr Fialko RTE_CRYPTO_OP_TYPE_ASYMMETRIC, ops_burst, burst_size); 6879c3096d4SVolodymyr Fialko if (unlikely(nb_alloc != burst_size)) { 6889c3096d4SVolodymyr Fialko alloc_failures++; 6899c3096d4SVolodymyr Fialko continue; 6909c3096d4SVolodymyr Fialko } 6919c3096d4SVolodymyr Fialko 6929c3096d4SVolodymyr Fialko if (rte_mempool_get_bulk(pool, (void **)result, burst_size)) { 6939c3096d4SVolodymyr Fialko alloc_failures++; 6949c3096d4SVolodymyr Fialko rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size); 6959c3096d4SVolodymyr Fialko continue; 6969c3096d4SVolodymyr Fialko } 6979c3096d4SVolodymyr Fialko 6989c3096d4SVolodymyr Fialko for (i = 0; i < burst_size; i++) { 6999c3096d4SVolodymyr Fialko asym_op = ops_burst[i]->asym; 7009c3096d4SVolodymyr Fialko asym_op->modex.base.data = modex_test_case.base.data; 7019c3096d4SVolodymyr Fialko asym_op->modex.base.length = modex_test_case.base.len; 7029c3096d4SVolodymyr Fialko asym_op->modex.result.data = result[i]; 7039c3096d4SVolodymyr Fialko asym_op->modex.result.length = modex_test_case.result_len; 7049c3096d4SVolodymyr Fialko rte_crypto_op_attach_asym_session(ops_burst[i], 7059c3096d4SVolodymyr Fialko p->ca.crypto_sess[flow_counter++ % nb_flows]); 7069c3096d4SVolodymyr Fialko } 7079c3096d4SVolodymyr Fialko } 7089c3096d4SVolodymyr Fialko 7099c3096d4SVolodymyr Fialko enq = 0; 7109c3096d4SVolodymyr Fialko while (!t->done) { 7119c3096d4SVolodymyr Fialko enq += rte_cryptodev_enqueue_burst(cdev_id, qp_id, ops_burst + enq, 7129c3096d4SVolodymyr Fialko burst_size - enq); 7139c3096d4SVolodymyr Fialko if (enq == burst_size) 7149c3096d4SVolodymyr Fialko break; 7159c3096d4SVolodymyr Fialko } 7169c3096d4SVolodymyr Fialko 7179c3096d4SVolodymyr Fialko count += burst_size; 7189c3096d4SVolodymyr Fialko } 7199c3096d4SVolodymyr Fialko 7209c3096d4SVolodymyr Fialko if (opt->verbose_level > 1 && alloc_failures) 7219c3096d4SVolodymyr Fialko printf("%s(): lcore %d allocation failures: %"PRIu64"\n", 7229c3096d4SVolodymyr Fialko __func__, rte_lcore_id(), alloc_failures); 7239c3096d4SVolodymyr Fialko } 7249c3096d4SVolodymyr Fialko 7259c3096d4SVolodymyr Fialko static void 7269c3096d4SVolodymyr Fialko crypto_adapter_enq_op_fwd_burst(struct prod_data *p) 7279c3096d4SVolodymyr Fialko { 7289c3096d4SVolodymyr Fialko const struct test_perf *t = p->t; 7299c3096d4SVolodymyr Fialko const struct evt_options *opt = t->opt; 7309c3096d4SVolodymyr Fialko 7319c3096d4SVolodymyr Fialko struct rte_mbuf *m, *pkts_burst[MAX_PROD_ENQ_BURST_SIZE]; 7329c3096d4SVolodymyr Fialko struct rte_crypto_op *ops_burst[MAX_PROD_ENQ_BURST_SIZE]; 7339c3096d4SVolodymyr Fialko const uint32_t burst_size = opt->prod_enq_burst_sz; 7349c3096d4SVolodymyr Fialko struct rte_event ev[MAX_PROD_ENQ_BURST_SIZE]; 7359c3096d4SVolodymyr Fialko uint8_t *result[MAX_PROD_ENQ_BURST_SIZE]; 7369c3096d4SVolodymyr Fialko const uint32_t nb_flows = t->nb_flows; 7379c3096d4SVolodymyr Fialko const uint64_t nb_pkts = t->nb_pkts; 7389c3096d4SVolodymyr Fialko uint16_t len, enq, nb_alloc, offset; 7399c3096d4SVolodymyr Fialko struct rte_mempool *pool = t->pool; 7409c3096d4SVolodymyr Fialko const uint8_t dev_id = p->dev_id; 7419c3096d4SVolodymyr Fialko const uint8_t port = p->port_id; 7429c3096d4SVolodymyr Fialko uint64_t alloc_failures = 0; 7439c3096d4SVolodymyr Fialko uint32_t flow_counter = 0; 7449c3096d4SVolodymyr Fialko uint64_t count = 0; 7459c3096d4SVolodymyr Fialko uint32_t i; 7469c3096d4SVolodymyr Fialko 7479c3096d4SVolodymyr Fialko if (opt->verbose_level > 1) 7489c3096d4SVolodymyr Fialko printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n", 7499c3096d4SVolodymyr Fialko __func__, rte_lcore_id(), port, p->queue_id, 7509c3096d4SVolodymyr Fialko p->ca.cdev_id, p->ca.cdev_qp_id); 7519c3096d4SVolodymyr Fialko 7529c3096d4SVolodymyr Fialko offset = sizeof(struct perf_elt); 7539c3096d4SVolodymyr Fialko len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz); 7549c3096d4SVolodymyr Fialko 7559c3096d4SVolodymyr Fialko for (i = 0; i < burst_size; i++) { 7569c3096d4SVolodymyr Fialko ev[i].event = 0; 7579c3096d4SVolodymyr Fialko ev[i].op = RTE_EVENT_OP_NEW; 7589c3096d4SVolodymyr Fialko ev[i].queue_id = p->queue_id; 7599c3096d4SVolodymyr Fialko ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC; 7609c3096d4SVolodymyr Fialko ev[i].event_type = RTE_EVENT_TYPE_CPU; 7619c3096d4SVolodymyr Fialko } 7629c3096d4SVolodymyr Fialko 7639c3096d4SVolodymyr Fialko while (count < nb_pkts && t->done == false) { 7649c3096d4SVolodymyr Fialko if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { 7659c3096d4SVolodymyr Fialko struct rte_crypto_sym_op *sym_op; 7669c3096d4SVolodymyr Fialko int ret; 7679c3096d4SVolodymyr Fialko 7689c3096d4SVolodymyr Fialko nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool, 7699c3096d4SVolodymyr Fialko RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops_burst, burst_size); 7709c3096d4SVolodymyr Fialko if (unlikely(nb_alloc != burst_size)) { 7719c3096d4SVolodymyr Fialko alloc_failures++; 7729c3096d4SVolodymyr Fialko continue; 7739c3096d4SVolodymyr Fialko } 7749c3096d4SVolodymyr Fialko 7759c3096d4SVolodymyr Fialko ret = rte_pktmbuf_alloc_bulk(pool, pkts_burst, burst_size); 7769c3096d4SVolodymyr Fialko if (unlikely(ret != 0)) { 7779c3096d4SVolodymyr Fialko alloc_failures++; 7789c3096d4SVolodymyr Fialko rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size); 7799c3096d4SVolodymyr Fialko continue; 7809c3096d4SVolodymyr Fialko } 7819c3096d4SVolodymyr Fialko 7829c3096d4SVolodymyr Fialko for (i = 0; i < burst_size; i++) { 7839c3096d4SVolodymyr Fialko m = pkts_burst[i]; 7849c3096d4SVolodymyr Fialko rte_pktmbuf_append(m, len); 7859c3096d4SVolodymyr Fialko sym_op = ops_burst[i]->sym; 7869c3096d4SVolodymyr Fialko sym_op->m_src = m; 7879c3096d4SVolodymyr Fialko sym_op->cipher.data.offset = offset; 7889c3096d4SVolodymyr Fialko sym_op->cipher.data.length = len - offset; 7899c3096d4SVolodymyr Fialko rte_crypto_op_attach_sym_session(ops_burst[i], 7909c3096d4SVolodymyr Fialko p->ca.crypto_sess[flow_counter++ % nb_flows]); 7919c3096d4SVolodymyr Fialko ev[i].event_ptr = ops_burst[i]; 7929c3096d4SVolodymyr Fialko } 7939c3096d4SVolodymyr Fialko } else { 7949c3096d4SVolodymyr Fialko struct rte_crypto_asym_op *asym_op; 7959c3096d4SVolodymyr Fialko 7969c3096d4SVolodymyr Fialko nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool, 7979c3096d4SVolodymyr Fialko RTE_CRYPTO_OP_TYPE_ASYMMETRIC, ops_burst, burst_size); 7989c3096d4SVolodymyr Fialko if (unlikely(nb_alloc != burst_size)) { 7999c3096d4SVolodymyr Fialko alloc_failures++; 8009c3096d4SVolodymyr Fialko continue; 8019c3096d4SVolodymyr Fialko } 8029c3096d4SVolodymyr Fialko 8039c3096d4SVolodymyr Fialko if (rte_mempool_get_bulk(pool, (void **)result, burst_size)) { 8049c3096d4SVolodymyr Fialko alloc_failures++; 8059c3096d4SVolodymyr Fialko rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size); 8069c3096d4SVolodymyr Fialko continue; 8079c3096d4SVolodymyr Fialko } 8089c3096d4SVolodymyr Fialko 8099c3096d4SVolodymyr Fialko for (i = 0; i < burst_size; i++) { 8109c3096d4SVolodymyr Fialko asym_op = ops_burst[i]->asym; 8119c3096d4SVolodymyr Fialko asym_op->modex.base.data = modex_test_case.base.data; 8129c3096d4SVolodymyr Fialko asym_op->modex.base.length = modex_test_case.base.len; 8139c3096d4SVolodymyr Fialko asym_op->modex.result.data = result[i]; 8149c3096d4SVolodymyr Fialko asym_op->modex.result.length = modex_test_case.result_len; 8159c3096d4SVolodymyr Fialko rte_crypto_op_attach_asym_session(ops_burst[i], 8169c3096d4SVolodymyr Fialko p->ca.crypto_sess[flow_counter++ % nb_flows]); 8179c3096d4SVolodymyr Fialko ev[i].event_ptr = ops_burst[i]; 8189c3096d4SVolodymyr Fialko } 8199c3096d4SVolodymyr Fialko } 8209c3096d4SVolodymyr Fialko 8219c3096d4SVolodymyr Fialko enq = 0; 8229c3096d4SVolodymyr Fialko while (!t->done) { 8239c3096d4SVolodymyr Fialko enq += rte_event_crypto_adapter_enqueue(dev_id, port, ev + enq, 8249c3096d4SVolodymyr Fialko burst_size - enq); 8259c3096d4SVolodymyr Fialko if (enq == burst_size) 8269c3096d4SVolodymyr Fialko break; 8279c3096d4SVolodymyr Fialko } 8289c3096d4SVolodymyr Fialko 8299c3096d4SVolodymyr Fialko count += burst_size; 8309c3096d4SVolodymyr Fialko } 8319c3096d4SVolodymyr Fialko 8329c3096d4SVolodymyr Fialko if (opt->verbose_level > 1 && alloc_failures) 8339c3096d4SVolodymyr Fialko printf("%s(): lcore %d allocation failures: %"PRIu64"\n", 8349c3096d4SVolodymyr Fialko __func__, rte_lcore_id(), alloc_failures); 8359c3096d4SVolodymyr Fialko } 8369c3096d4SVolodymyr Fialko 8379c3096d4SVolodymyr Fialko static inline int 8389c3096d4SVolodymyr Fialko perf_event_crypto_producer_burst(void *arg) 8399c3096d4SVolodymyr Fialko { 8409c3096d4SVolodymyr Fialko struct prod_data *p = arg; 8419c3096d4SVolodymyr Fialko struct evt_options *opt = p->t->opt; 8429c3096d4SVolodymyr Fialko 8439c3096d4SVolodymyr Fialko if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) 8449c3096d4SVolodymyr Fialko crypto_adapter_enq_op_new_burst(p); 8459c3096d4SVolodymyr Fialko else 8469c3096d4SVolodymyr Fialko crypto_adapter_enq_op_fwd_burst(p); 8479c3096d4SVolodymyr Fialko 8489c3096d4SVolodymyr Fialko return 0; 8499c3096d4SVolodymyr Fialko } 8509c3096d4SVolodymyr Fialko 85159f697e3SPavan Nikhilesh static int 85259f697e3SPavan Nikhilesh perf_producer_wrapper(void *arg) 85359f697e3SPavan Nikhilesh { 854f123568cSPavan Nikhilesh struct rte_event_dev_info dev_info; 85559f697e3SPavan Nikhilesh struct prod_data *p = arg; 85659f697e3SPavan Nikhilesh struct test_perf *t = p->t; 857f123568cSPavan Nikhilesh 858f123568cSPavan Nikhilesh rte_event_dev_info_get(p->dev_id, &dev_info); 859f123568cSPavan Nikhilesh if (!t->opt->prod_enq_burst_sz) { 860f123568cSPavan Nikhilesh t->opt->prod_enq_burst_sz = MAX_PROD_ENQ_BURST_SIZE; 861f123568cSPavan Nikhilesh if (dev_info.max_event_port_enqueue_depth > 0 && 862f123568cSPavan Nikhilesh (uint32_t)dev_info.max_event_port_enqueue_depth < 863f123568cSPavan Nikhilesh t->opt->prod_enq_burst_sz) 864f123568cSPavan Nikhilesh t->opt->prod_enq_burst_sz = 865f123568cSPavan Nikhilesh dev_info.max_event_port_enqueue_depth; 866f123568cSPavan Nikhilesh } 86720841a25SRashmi Shetty 86820841a25SRashmi Shetty /* In case of synthetic producer, launch perf_producer or 86920841a25SRashmi Shetty * perf_producer_burst depending on producer enqueue burst size 87020841a25SRashmi Shetty */ 87120841a25SRashmi Shetty if (t->opt->prod_type == EVT_PROD_TYPE_SYNT && 87220841a25SRashmi Shetty t->opt->prod_enq_burst_sz == 1) 87359f697e3SPavan Nikhilesh return perf_producer(arg); 87420841a25SRashmi Shetty else if (t->opt->prod_type == EVT_PROD_TYPE_SYNT && 87520841a25SRashmi Shetty t->opt->prod_enq_burst_sz > 1) { 876f123568cSPavan Nikhilesh if (dev_info.max_event_port_enqueue_depth == 1) 87720841a25SRashmi Shetty evt_err("This event device does not support burst mode"); 87820841a25SRashmi Shetty else 87920841a25SRashmi Shetty return perf_producer_burst(arg); 88020841a25SRashmi Shetty } 88117b22d0bSPavan Nikhilesh else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR && 88217b22d0bSPavan Nikhilesh !t->opt->timdev_use_burst) 883d008f20bSPavan Nikhilesh return perf_event_timer_producer(arg); 88417b22d0bSPavan Nikhilesh else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR && 88517b22d0bSPavan Nikhilesh t->opt->timdev_use_burst) 88617b22d0bSPavan Nikhilesh return perf_event_timer_producer_burst(arg); 8879c3096d4SVolodymyr Fialko else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) { 8889c3096d4SVolodymyr Fialko if (t->opt->prod_enq_burst_sz > 1) 8899c3096d4SVolodymyr Fialko return perf_event_crypto_producer_burst(arg); 8909c3096d4SVolodymyr Fialko else 891de2bc16eSShijith Thotton return perf_event_crypto_producer(arg); 892*b25a66c4SAmit Prakash Shukla } else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) 893*b25a66c4SAmit Prakash Shukla return perf_event_dma_producer(arg); 894*b25a66c4SAmit Prakash Shukla 89559f697e3SPavan Nikhilesh return 0; 89659f697e3SPavan Nikhilesh } 89759f697e3SPavan Nikhilesh 8989d3aeb18SJerin Jacob static inline uint64_t 8999d3aeb18SJerin Jacob processed_pkts(struct test_perf *t) 9009d3aeb18SJerin Jacob { 9019d3aeb18SJerin Jacob uint8_t i; 9029d3aeb18SJerin Jacob uint64_t total = 0; 9039d3aeb18SJerin Jacob 9049d3aeb18SJerin Jacob for (i = 0; i < t->nb_workers; i++) 9059d3aeb18SJerin Jacob total += t->worker[i].processed_pkts; 9069d3aeb18SJerin Jacob 9079d3aeb18SJerin Jacob return total; 9089d3aeb18SJerin Jacob } 9099d3aeb18SJerin Jacob 9109d3aeb18SJerin Jacob static inline uint64_t 9119d3aeb18SJerin Jacob total_latency(struct test_perf *t) 9129d3aeb18SJerin Jacob { 9139d3aeb18SJerin Jacob uint8_t i; 9149d3aeb18SJerin Jacob uint64_t total = 0; 9159d3aeb18SJerin Jacob 9169d3aeb18SJerin Jacob for (i = 0; i < t->nb_workers; i++) 9179d3aeb18SJerin Jacob total += t->worker[i].latency; 9189d3aeb18SJerin Jacob 9199d3aeb18SJerin Jacob return total; 9209d3aeb18SJerin Jacob } 9219d3aeb18SJerin Jacob 9229d3aeb18SJerin Jacob 9239d3aeb18SJerin Jacob int 9249d3aeb18SJerin Jacob perf_launch_lcores(struct evt_test *test, struct evt_options *opt, 9259d3aeb18SJerin Jacob int (*worker)(void *)) 9269d3aeb18SJerin Jacob { 9279d3aeb18SJerin Jacob int ret, lcore_id; 9289d3aeb18SJerin Jacob struct test_perf *t = evt_test_priv(test); 9299d3aeb18SJerin Jacob 9309d3aeb18SJerin Jacob int port_idx = 0; 9319d3aeb18SJerin Jacob /* launch workers */ 932cb056611SStephen Hemminger RTE_LCORE_FOREACH_WORKER(lcore_id) { 9339d3aeb18SJerin Jacob if (!(opt->wlcores[lcore_id])) 9349d3aeb18SJerin Jacob continue; 9359d3aeb18SJerin Jacob 9369d3aeb18SJerin Jacob ret = rte_eal_remote_launch(worker, 9379d3aeb18SJerin Jacob &t->worker[port_idx], lcore_id); 9389d3aeb18SJerin Jacob if (ret) { 9399d3aeb18SJerin Jacob evt_err("failed to launch worker %d", lcore_id); 9409d3aeb18SJerin Jacob return ret; 9419d3aeb18SJerin Jacob } 9429d3aeb18SJerin Jacob port_idx++; 9439d3aeb18SJerin Jacob } 9449d3aeb18SJerin Jacob 9459d3aeb18SJerin Jacob /* launch producers */ 946cb056611SStephen Hemminger RTE_LCORE_FOREACH_WORKER(lcore_id) { 9479d3aeb18SJerin Jacob if (!(opt->plcores[lcore_id])) 9489d3aeb18SJerin Jacob continue; 9499d3aeb18SJerin Jacob 95059f697e3SPavan Nikhilesh ret = rte_eal_remote_launch(perf_producer_wrapper, 95159f697e3SPavan Nikhilesh &t->prod[port_idx], lcore_id); 9529d3aeb18SJerin Jacob if (ret) { 9539d3aeb18SJerin Jacob evt_err("failed to launch perf_producer %d", lcore_id); 9549d3aeb18SJerin Jacob return ret; 9559d3aeb18SJerin Jacob } 9569d3aeb18SJerin Jacob port_idx++; 9579d3aeb18SJerin Jacob } 9589d3aeb18SJerin Jacob 959d008f20bSPavan Nikhilesh const uint64_t total_pkts = t->outstand_pkts; 9609d3aeb18SJerin Jacob 9619d3aeb18SJerin Jacob uint64_t dead_lock_cycles = rte_get_timer_cycles(); 9629d3aeb18SJerin Jacob int64_t dead_lock_remaining = total_pkts; 9639d3aeb18SJerin Jacob const uint64_t dead_lock_sample = rte_get_timer_hz() * 5; 9649d3aeb18SJerin Jacob 9659d3aeb18SJerin Jacob uint64_t perf_cycles = rte_get_timer_cycles(); 9669d3aeb18SJerin Jacob int64_t perf_remaining = total_pkts; 9679d3aeb18SJerin Jacob const uint64_t perf_sample = rte_get_timer_hz(); 9689d3aeb18SJerin Jacob 9699d3aeb18SJerin Jacob static float total_mpps; 9709d3aeb18SJerin Jacob static uint64_t samples; 9719d3aeb18SJerin Jacob 9729d3aeb18SJerin Jacob const uint64_t freq_mhz = rte_get_timer_hz() / 1000000; 9739d3aeb18SJerin Jacob int64_t remaining = t->outstand_pkts - processed_pkts(t); 9749d3aeb18SJerin Jacob 9759d3aeb18SJerin Jacob while (t->done == false) { 9769d3aeb18SJerin Jacob const uint64_t new_cycles = rte_get_timer_cycles(); 9779d3aeb18SJerin Jacob 9789d3aeb18SJerin Jacob if ((new_cycles - perf_cycles) > perf_sample) { 9799d3aeb18SJerin Jacob const uint64_t latency = total_latency(t); 9809d3aeb18SJerin Jacob const uint64_t pkts = processed_pkts(t); 9819d3aeb18SJerin Jacob 9829d3aeb18SJerin Jacob remaining = t->outstand_pkts - pkts; 9839d3aeb18SJerin Jacob float mpps = (float)(perf_remaining-remaining)/1000000; 9849d3aeb18SJerin Jacob 9859d3aeb18SJerin Jacob perf_remaining = remaining; 9869d3aeb18SJerin Jacob perf_cycles = new_cycles; 9879d3aeb18SJerin Jacob total_mpps += mpps; 9889d3aeb18SJerin Jacob ++samples; 98904716352SJerin Jacob if (opt->fwd_latency && pkts > 0) { 9909d3aeb18SJerin Jacob printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM, 9919d3aeb18SJerin Jacob mpps, total_mpps/samples, 9929d3aeb18SJerin Jacob (float)(latency/pkts)/freq_mhz); 9939d3aeb18SJerin Jacob } else { 9949d3aeb18SJerin Jacob printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM, 9959d3aeb18SJerin Jacob mpps, total_mpps/samples); 9969d3aeb18SJerin Jacob } 9979d3aeb18SJerin Jacob fflush(stdout); 9989d3aeb18SJerin Jacob 9999d3aeb18SJerin Jacob if (remaining <= 0) { 10009d3aeb18SJerin Jacob t->result = EVT_TEST_SUCCESS; 1001d008f20bSPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_SYNT || 1002d008f20bSPavan Nikhilesh opt->prod_type == 1003de2bc16eSShijith Thotton EVT_PROD_TYPE_EVENT_TIMER_ADPTR || 1004de2bc16eSShijith Thotton opt->prod_type == 1005*b25a66c4SAmit Prakash Shukla EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR || 1006*b25a66c4SAmit Prakash Shukla opt->prod_type == 1007*b25a66c4SAmit Prakash Shukla EVT_PROD_TYPE_EVENT_DMA_ADPTR) { 100859f697e3SPavan Nikhilesh t->done = true; 10099d3aeb18SJerin Jacob break; 10109d3aeb18SJerin Jacob } 10119d3aeb18SJerin Jacob } 101259f697e3SPavan Nikhilesh } 10139d3aeb18SJerin Jacob 101459f697e3SPavan Nikhilesh if (new_cycles - dead_lock_cycles > dead_lock_sample && 101547303784SErik Gabriel Carrillo (opt->prod_type == EVT_PROD_TYPE_SYNT || 1016de2bc16eSShijith Thotton opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR || 1017*b25a66c4SAmit Prakash Shukla opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR || 1018*b25a66c4SAmit Prakash Shukla opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR)) { 10199d3aeb18SJerin Jacob remaining = t->outstand_pkts - processed_pkts(t); 10209d3aeb18SJerin Jacob if (dead_lock_remaining == remaining) { 10219d3aeb18SJerin Jacob rte_event_dev_dump(opt->dev_id, stdout); 10229d3aeb18SJerin Jacob evt_err("No schedules for seconds, deadlock"); 10239d3aeb18SJerin Jacob t->done = true; 10249d3aeb18SJerin Jacob break; 10259d3aeb18SJerin Jacob } 10269d3aeb18SJerin Jacob dead_lock_remaining = remaining; 10279d3aeb18SJerin Jacob dead_lock_cycles = new_cycles; 10289d3aeb18SJerin Jacob } 10299d3aeb18SJerin Jacob } 10309d3aeb18SJerin Jacob printf("\n"); 10319d3aeb18SJerin Jacob return 0; 10329d3aeb18SJerin Jacob } 10339d3aeb18SJerin Jacob 10343617aae5SPavan Nikhilesh static int 10353617aae5SPavan Nikhilesh perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, 10363617aae5SPavan Nikhilesh struct rte_event_port_conf prod_conf) 10373617aae5SPavan Nikhilesh { 10383617aae5SPavan Nikhilesh int ret = 0; 10393617aae5SPavan Nikhilesh uint16_t prod; 10403617aae5SPavan Nikhilesh struct rte_event_eth_rx_adapter_queue_conf queue_conf; 10413617aae5SPavan Nikhilesh 10423617aae5SPavan Nikhilesh memset(&queue_conf, 0, 10433617aae5SPavan Nikhilesh sizeof(struct rte_event_eth_rx_adapter_queue_conf)); 10443617aae5SPavan Nikhilesh queue_conf.ev.sched_type = opt->sched_type_list[0]; 10458728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(prod) { 10463617aae5SPavan Nikhilesh uint32_t cap; 10473617aae5SPavan Nikhilesh 10483617aae5SPavan Nikhilesh ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, 10493617aae5SPavan Nikhilesh prod, &cap); 10503617aae5SPavan Nikhilesh if (ret) { 10513617aae5SPavan Nikhilesh evt_err("failed to get event rx adapter[%d]" 10523617aae5SPavan Nikhilesh " capabilities", 10533617aae5SPavan Nikhilesh opt->dev_id); 10543617aae5SPavan Nikhilesh return ret; 10553617aae5SPavan Nikhilesh } 10563617aae5SPavan Nikhilesh queue_conf.ev.queue_id = prod * stride; 10573617aae5SPavan Nikhilesh ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id, 10583617aae5SPavan Nikhilesh &prod_conf); 10593617aae5SPavan Nikhilesh if (ret) { 10603617aae5SPavan Nikhilesh evt_err("failed to create rx adapter[%d]", prod); 10613617aae5SPavan Nikhilesh return ret; 10623617aae5SPavan Nikhilesh } 10633617aae5SPavan Nikhilesh ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1, 10643617aae5SPavan Nikhilesh &queue_conf); 10653617aae5SPavan Nikhilesh if (ret) { 10663617aae5SPavan Nikhilesh evt_err("failed to add rx queues to adapter[%d]", prod); 10673617aae5SPavan Nikhilesh return ret; 10683617aae5SPavan Nikhilesh } 10693617aae5SPavan Nikhilesh 1070b0333c55SPavan Nikhilesh if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) { 1071b0333c55SPavan Nikhilesh uint32_t service_id; 1072b0333c55SPavan Nikhilesh 1073b0333c55SPavan Nikhilesh rte_event_eth_rx_adapter_service_id_get(prod, 1074b0333c55SPavan Nikhilesh &service_id); 1075b0333c55SPavan Nikhilesh ret = evt_service_setup(service_id); 1076b0333c55SPavan Nikhilesh if (ret) { 1077b0333c55SPavan Nikhilesh evt_err("Failed to setup service core" 1078b0333c55SPavan Nikhilesh " for Rx adapter\n"); 1079b0333c55SPavan Nikhilesh return ret; 1080b0333c55SPavan Nikhilesh } 1081b0333c55SPavan Nikhilesh } 10823617aae5SPavan Nikhilesh } 10833617aae5SPavan Nikhilesh 10843617aae5SPavan Nikhilesh return ret; 10853617aae5SPavan Nikhilesh } 10863617aae5SPavan Nikhilesh 1087d008f20bSPavan Nikhilesh static int 1088d008f20bSPavan Nikhilesh perf_event_timer_adapter_setup(struct test_perf *t) 1089d008f20bSPavan Nikhilesh { 1090d008f20bSPavan Nikhilesh int i; 1091d008f20bSPavan Nikhilesh int ret; 1092d008f20bSPavan Nikhilesh struct rte_event_timer_adapter_info adapter_info; 1093d008f20bSPavan Nikhilesh struct rte_event_timer_adapter *wl; 1094d008f20bSPavan Nikhilesh uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores); 1095d008f20bSPavan Nikhilesh uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES; 1096d008f20bSPavan Nikhilesh 1097d008f20bSPavan Nikhilesh if (nb_producers == 1) 1098d008f20bSPavan Nikhilesh flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT; 1099d008f20bSPavan Nikhilesh 1100d008f20bSPavan Nikhilesh for (i = 0; i < t->opt->nb_timer_adptrs; i++) { 1101d008f20bSPavan Nikhilesh struct rte_event_timer_adapter_conf config = { 1102d008f20bSPavan Nikhilesh .event_dev_id = t->opt->dev_id, 1103d008f20bSPavan Nikhilesh .timer_adapter_id = i, 1104d008f20bSPavan Nikhilesh .timer_tick_ns = t->opt->timer_tick_nsec, 1105d008f20bSPavan Nikhilesh .max_tmo_ns = t->opt->max_tmo_nsec, 1106c13b1ad7SPavan Nikhilesh .nb_timers = t->opt->pool_sz, 1107d008f20bSPavan Nikhilesh .flags = flags, 1108d008f20bSPavan Nikhilesh }; 1109d008f20bSPavan Nikhilesh 1110d008f20bSPavan Nikhilesh wl = rte_event_timer_adapter_create(&config); 1111d008f20bSPavan Nikhilesh if (wl == NULL) { 1112d008f20bSPavan Nikhilesh evt_err("failed to create event timer ring %d", i); 1113d008f20bSPavan Nikhilesh return rte_errno; 1114d008f20bSPavan Nikhilesh } 1115d008f20bSPavan Nikhilesh 1116d008f20bSPavan Nikhilesh memset(&adapter_info, 0, 1117d008f20bSPavan Nikhilesh sizeof(struct rte_event_timer_adapter_info)); 1118d008f20bSPavan Nikhilesh rte_event_timer_adapter_get_info(wl, &adapter_info); 1119d008f20bSPavan Nikhilesh t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns; 1120d008f20bSPavan Nikhilesh 1121d008f20bSPavan Nikhilesh if (!(adapter_info.caps & 1122d008f20bSPavan Nikhilesh RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) { 112399c25664SAndrzej Ostruszka uint32_t service_id = -1U; 1124d008f20bSPavan Nikhilesh 1125d008f20bSPavan Nikhilesh rte_event_timer_adapter_service_id_get(wl, 1126d008f20bSPavan Nikhilesh &service_id); 1127d008f20bSPavan Nikhilesh ret = evt_service_setup(service_id); 1128d008f20bSPavan Nikhilesh if (ret) { 1129d008f20bSPavan Nikhilesh evt_err("Failed to setup service core" 1130d008f20bSPavan Nikhilesh " for timer adapter\n"); 1131d008f20bSPavan Nikhilesh return ret; 1132d008f20bSPavan Nikhilesh } 1133d008f20bSPavan Nikhilesh rte_service_runstate_set(service_id, 1); 1134d008f20bSPavan Nikhilesh } 1135d008f20bSPavan Nikhilesh t->timer_adptr[i] = wl; 1136d008f20bSPavan Nikhilesh } 1137d008f20bSPavan Nikhilesh return 0; 1138d008f20bSPavan Nikhilesh } 1139d008f20bSPavan Nikhilesh 1140de2bc16eSShijith Thotton static int 1141de2bc16eSShijith Thotton perf_event_crypto_adapter_setup(struct test_perf *t, struct prod_data *p) 1142de2bc16eSShijith Thotton { 114369e807dfSVolodymyr Fialko struct rte_event_crypto_adapter_queue_conf conf; 1144de2bc16eSShijith Thotton struct evt_options *opt = t->opt; 1145de2bc16eSShijith Thotton uint32_t cap; 1146de2bc16eSShijith Thotton int ret; 1147de2bc16eSShijith Thotton 114869e807dfSVolodymyr Fialko memset(&conf, 0, sizeof(conf)); 114969e807dfSVolodymyr Fialko 1150de2bc16eSShijith Thotton ret = rte_event_crypto_adapter_caps_get(p->dev_id, p->ca.cdev_id, &cap); 1151de2bc16eSShijith Thotton if (ret) { 1152de2bc16eSShijith Thotton evt_err("Failed to get crypto adapter capabilities"); 1153de2bc16eSShijith Thotton return ret; 1154de2bc16eSShijith Thotton } 1155de2bc16eSShijith Thotton 1156de2bc16eSShijith Thotton if (((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) && 1157de2bc16eSShijith Thotton !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) || 1158de2bc16eSShijith Thotton ((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) && 1159de2bc16eSShijith Thotton !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) { 1160de2bc16eSShijith Thotton evt_err("crypto adapter %s mode unsupported\n", 1161de2bc16eSShijith Thotton opt->crypto_adptr_mode ? "OP_FORWARD" : "OP_NEW"); 1162de2bc16eSShijith Thotton return -ENOTSUP; 1163de2bc16eSShijith Thotton } else if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA)) { 1164de2bc16eSShijith Thotton evt_err("Storing crypto session not supported"); 1165de2bc16eSShijith Thotton return -ENOTSUP; 1166de2bc16eSShijith Thotton } 1167de2bc16eSShijith Thotton 116869e807dfSVolodymyr Fialko if (opt->ena_vector) { 116969e807dfSVolodymyr Fialko struct rte_event_crypto_adapter_vector_limits limits; 1170de2bc16eSShijith Thotton 117169e807dfSVolodymyr Fialko if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) { 117269e807dfSVolodymyr Fialko evt_err("Crypto adapter doesn't support event vector"); 117369e807dfSVolodymyr Fialko return -EINVAL; 117469e807dfSVolodymyr Fialko } 117569e807dfSVolodymyr Fialko 117669e807dfSVolodymyr Fialko ret = rte_event_crypto_adapter_vector_limits_get(p->dev_id, p->ca.cdev_id, &limits); 117769e807dfSVolodymyr Fialko if (ret) { 117869e807dfSVolodymyr Fialko evt_err("Failed to get crypto adapter's vector limits"); 117969e807dfSVolodymyr Fialko return ret; 118069e807dfSVolodymyr Fialko } 118169e807dfSVolodymyr Fialko 118269e807dfSVolodymyr Fialko if (opt->vector_size < limits.min_sz || opt->vector_size > limits.max_sz) { 118369e807dfSVolodymyr Fialko evt_err("Vector size [%d] not within limits max[%d] min[%d]", 118469e807dfSVolodymyr Fialko opt->vector_size, limits.max_sz, limits.min_sz); 118569e807dfSVolodymyr Fialko return -EINVAL; 118669e807dfSVolodymyr Fialko } 118769e807dfSVolodymyr Fialko 118869e807dfSVolodymyr Fialko if (limits.log2_sz && !rte_is_power_of_2(opt->vector_size)) { 118969e807dfSVolodymyr Fialko evt_err("Vector size [%d] not power of 2", opt->vector_size); 119069e807dfSVolodymyr Fialko return -EINVAL; 119169e807dfSVolodymyr Fialko } 119269e807dfSVolodymyr Fialko 119369e807dfSVolodymyr Fialko if (opt->vector_tmo_nsec > limits.max_timeout_ns || 119469e807dfSVolodymyr Fialko opt->vector_tmo_nsec < limits.min_timeout_ns) { 119569e807dfSVolodymyr Fialko evt_err("Vector timeout [%" PRIu64 "] not within limits " 119669e807dfSVolodymyr Fialko "max[%" PRIu64 "] min[%" PRIu64 "]", 119769e807dfSVolodymyr Fialko opt->vector_tmo_nsec, limits.max_timeout_ns, limits.min_timeout_ns); 119869e807dfSVolodymyr Fialko return -EINVAL; 119969e807dfSVolodymyr Fialko } 120069e807dfSVolodymyr Fialko 120169e807dfSVolodymyr Fialko conf.vector_mp = t->ca_vector_pool; 120269e807dfSVolodymyr Fialko conf.vector_sz = opt->vector_size; 120369e807dfSVolodymyr Fialko conf.vector_timeout_ns = opt->vector_tmo_nsec; 120469e807dfSVolodymyr Fialko conf.flags |= RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR; 120569e807dfSVolodymyr Fialko } 120669e807dfSVolodymyr Fialko 120769e807dfSVolodymyr Fialko if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) { 1208c1749bc5SVolodymyr Fialko conf.ev.sched_type = RTE_SCHED_TYPE_ATOMIC; 1209c1749bc5SVolodymyr Fialko conf.ev.queue_id = p->queue_id; 121069e807dfSVolodymyr Fialko } 121169e807dfSVolodymyr Fialko 1212de2bc16eSShijith Thotton ret = rte_event_crypto_adapter_queue_pair_add( 1213c1749bc5SVolodymyr Fialko TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, &conf); 1214de2bc16eSShijith Thotton 1215de2bc16eSShijith Thotton return ret; 1216de2bc16eSShijith Thotton } 1217de2bc16eSShijith Thotton 1218*b25a66c4SAmit Prakash Shukla static int 1219*b25a66c4SAmit Prakash Shukla perf_event_dma_adapter_setup(struct test_perf *t, struct prod_data *p) 1220*b25a66c4SAmit Prakash Shukla { 1221*b25a66c4SAmit Prakash Shukla struct evt_options *opt = t->opt; 1222*b25a66c4SAmit Prakash Shukla struct rte_event event; 1223*b25a66c4SAmit Prakash Shukla uint32_t cap; 1224*b25a66c4SAmit Prakash Shukla int ret; 1225*b25a66c4SAmit Prakash Shukla 1226*b25a66c4SAmit Prakash Shukla ret = rte_event_dma_adapter_caps_get(p->dev_id, p->da.dma_dev_id, &cap); 1227*b25a66c4SAmit Prakash Shukla if (ret) { 1228*b25a66c4SAmit Prakash Shukla evt_err("Failed to get dma adapter capabilities"); 1229*b25a66c4SAmit Prakash Shukla return ret; 1230*b25a66c4SAmit Prakash Shukla } 1231*b25a66c4SAmit Prakash Shukla 1232*b25a66c4SAmit Prakash Shukla if (((opt->dma_adptr_mode == RTE_EVENT_DMA_ADAPTER_OP_NEW) && 1233*b25a66c4SAmit Prakash Shukla !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) || 1234*b25a66c4SAmit Prakash Shukla ((opt->dma_adptr_mode == RTE_EVENT_DMA_ADAPTER_OP_FORWARD) && 1235*b25a66c4SAmit Prakash Shukla !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) { 1236*b25a66c4SAmit Prakash Shukla evt_err("dma adapter %s mode unsupported\n", 1237*b25a66c4SAmit Prakash Shukla opt->dma_adptr_mode ? "OP_FORWARD" : "OP_NEW"); 1238*b25a66c4SAmit Prakash Shukla return -ENOTSUP; 1239*b25a66c4SAmit Prakash Shukla } 1240*b25a66c4SAmit Prakash Shukla 1241*b25a66c4SAmit Prakash Shukla if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND) 1242*b25a66c4SAmit Prakash Shukla ret = rte_event_dma_adapter_vchan_add(TEST_PERF_DA_ID, p->da.dma_dev_id, 1243*b25a66c4SAmit Prakash Shukla p->da.vchan_id, &event); 1244*b25a66c4SAmit Prakash Shukla else 1245*b25a66c4SAmit Prakash Shukla ret = rte_event_dma_adapter_vchan_add(TEST_PERF_DA_ID, p->da.dma_dev_id, 1246*b25a66c4SAmit Prakash Shukla p->da.vchan_id, NULL); 1247*b25a66c4SAmit Prakash Shukla 1248*b25a66c4SAmit Prakash Shukla return ret; 1249*b25a66c4SAmit Prakash Shukla } 1250*b25a66c4SAmit Prakash Shukla 12512a440d6aSAkhil Goyal static void * 1252de2bc16eSShijith Thotton cryptodev_sym_sess_create(struct prod_data *p, struct test_perf *t) 1253de2bc16eSShijith Thotton { 1254750ab9d5SAakash Sasidharan const struct rte_cryptodev_symmetric_capability *cap; 1255750ab9d5SAakash Sasidharan struct rte_cryptodev_sym_capability_idx cap_idx; 1256750ab9d5SAakash Sasidharan enum rte_crypto_cipher_algorithm cipher_algo; 1257de2bc16eSShijith Thotton struct rte_crypto_sym_xform cipher_xform; 1258750ab9d5SAakash Sasidharan struct evt_options *opt = t->opt; 1259750ab9d5SAakash Sasidharan uint16_t key_size; 1260750ab9d5SAakash Sasidharan uint16_t iv_size; 12612a440d6aSAkhil Goyal void *sess; 1262de2bc16eSShijith Thotton 1263750ab9d5SAakash Sasidharan cipher_algo = opt->crypto_cipher_alg; 1264750ab9d5SAakash Sasidharan key_size = opt->crypto_cipher_key_sz; 1265750ab9d5SAakash Sasidharan iv_size = opt->crypto_cipher_iv_sz; 1266750ab9d5SAakash Sasidharan 1267750ab9d5SAakash Sasidharan /* Check if device supports the algorithm */ 1268750ab9d5SAakash Sasidharan cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1269750ab9d5SAakash Sasidharan cap_idx.algo.cipher = cipher_algo; 1270750ab9d5SAakash Sasidharan 1271750ab9d5SAakash Sasidharan cap = rte_cryptodev_sym_capability_get(p->ca.cdev_id, &cap_idx); 1272750ab9d5SAakash Sasidharan if (cap == NULL) { 1273750ab9d5SAakash Sasidharan evt_err("Device doesn't support cipher algorithm [%s]. Test Skipped\n", 1274750ab9d5SAakash Sasidharan rte_cryptodev_get_cipher_algo_string(cipher_algo)); 1275750ab9d5SAakash Sasidharan return NULL; 1276750ab9d5SAakash Sasidharan } 1277750ab9d5SAakash Sasidharan 1278750ab9d5SAakash Sasidharan /* Check if device supports key size and IV size */ 1279750ab9d5SAakash Sasidharan if (rte_cryptodev_sym_capability_check_cipher(cap, key_size, 1280750ab9d5SAakash Sasidharan iv_size) < 0) { 1281750ab9d5SAakash Sasidharan evt_err("Device doesn't support cipher configuration:\n" 1282750ab9d5SAakash Sasidharan "cipher algo [%s], key sz [%d], iv sz [%d]. Test Skipped\n", 1283750ab9d5SAakash Sasidharan rte_cryptodev_get_cipher_algo_string(cipher_algo), key_size, iv_size); 1284750ab9d5SAakash Sasidharan return NULL; 1285750ab9d5SAakash Sasidharan } 1286750ab9d5SAakash Sasidharan 1287de2bc16eSShijith Thotton cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER; 1288750ab9d5SAakash Sasidharan cipher_xform.cipher.algo = cipher_algo; 1289750ab9d5SAakash Sasidharan cipher_xform.cipher.key.data = opt->crypto_cipher_key; 1290750ab9d5SAakash Sasidharan cipher_xform.cipher.key.length = key_size; 1291750ab9d5SAakash Sasidharan cipher_xform.cipher.iv.length = iv_size; 1292750ab9d5SAakash Sasidharan cipher_xform.cipher.iv.offset = IV_OFFSET; 1293de2bc16eSShijith Thotton cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; 1294de2bc16eSShijith Thotton cipher_xform.next = NULL; 1295de2bc16eSShijith Thotton 1296bdce2564SAkhil Goyal sess = rte_cryptodev_sym_session_create(p->ca.cdev_id, &cipher_xform, 1297bdce2564SAkhil Goyal t->ca_sess_pool); 1298de2bc16eSShijith Thotton if (sess == NULL) { 1299de2bc16eSShijith Thotton evt_err("Failed to create sym session"); 1300de2bc16eSShijith Thotton return NULL; 1301de2bc16eSShijith Thotton } 1302de2bc16eSShijith Thotton 1303de2bc16eSShijith Thotton return sess; 1304de2bc16eSShijith Thotton } 1305de2bc16eSShijith Thotton 13068f5b5495SAkhil Goyal static void * 13078f5b5495SAkhil Goyal cryptodev_asym_sess_create(struct prod_data *p, struct test_perf *t) 13088f5b5495SAkhil Goyal { 13098f5b5495SAkhil Goyal const struct rte_cryptodev_asymmetric_xform_capability *capability; 13108f5b5495SAkhil Goyal struct rte_cryptodev_asym_capability_idx cap_idx; 13118f5b5495SAkhil Goyal struct rte_crypto_asym_xform xform; 13128f5b5495SAkhil Goyal void *sess; 13138f5b5495SAkhil Goyal 13148f5b5495SAkhil Goyal xform.next = NULL; 13158f5b5495SAkhil Goyal xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX; 13168f5b5495SAkhil Goyal cap_idx.type = xform.xform_type; 13178f5b5495SAkhil Goyal capability = rte_cryptodev_asym_capability_get(p->ca.cdev_id, &cap_idx); 13188f5b5495SAkhil Goyal if (capability == NULL) { 13198f5b5495SAkhil Goyal evt_err("Device doesn't support MODEX. Test Skipped\n"); 13208f5b5495SAkhil Goyal return NULL; 13218f5b5495SAkhil Goyal } 13228f5b5495SAkhil Goyal 13238f5b5495SAkhil Goyal xform.modex.modulus.data = modex_test_case.modulus.data; 13248f5b5495SAkhil Goyal xform.modex.modulus.length = modex_test_case.modulus.len; 13258f5b5495SAkhil Goyal xform.modex.exponent.data = modex_test_case.exponent.data; 13268f5b5495SAkhil Goyal xform.modex.exponent.length = modex_test_case.exponent.len; 13278f5b5495SAkhil Goyal 13288f5b5495SAkhil Goyal if (rte_cryptodev_asym_session_create(p->ca.cdev_id, &xform, 13298f5b5495SAkhil Goyal t->ca_asym_sess_pool, &sess)) { 13308f5b5495SAkhil Goyal evt_err("Failed to create asym session"); 13318f5b5495SAkhil Goyal return NULL; 13328f5b5495SAkhil Goyal } 13338f5b5495SAkhil Goyal 13348f5b5495SAkhil Goyal return sess; 13358f5b5495SAkhil Goyal } 13368f5b5495SAkhil Goyal 1337272de067SJerin Jacob int 133884a7513dSJerin Jacob perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, 1339535c630cSPavan Nikhilesh uint8_t stride, uint8_t nb_queues, 1340535c630cSPavan Nikhilesh const struct rte_event_port_conf *port_conf) 134184a7513dSJerin Jacob { 134284a7513dSJerin Jacob struct test_perf *t = evt_test_priv(test); 13433617aae5SPavan Nikhilesh uint16_t port, prod; 134484a7513dSJerin Jacob int ret = -1; 134584a7513dSJerin Jacob 134684a7513dSJerin Jacob /* setup one port per worker, linking to all queues */ 134784a7513dSJerin Jacob for (port = 0; port < evt_nr_active_lcores(opt->wlcores); 134884a7513dSJerin Jacob port++) { 134984a7513dSJerin Jacob struct worker_data *w = &t->worker[port]; 135084a7513dSJerin Jacob 135184a7513dSJerin Jacob w->dev_id = opt->dev_id; 135284a7513dSJerin Jacob w->port_id = port; 135384a7513dSJerin Jacob w->t = t; 135484a7513dSJerin Jacob w->processed_pkts = 0; 135584a7513dSJerin Jacob w->latency = 0; 135684a7513dSJerin Jacob 13575f94d108SHarry van Haaren struct rte_event_port_conf conf = *port_conf; 13585f94d108SHarry van Haaren conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_WORKER; 13595f94d108SHarry van Haaren 13605f94d108SHarry van Haaren ret = rte_event_port_setup(opt->dev_id, port, &conf); 136184a7513dSJerin Jacob if (ret) { 136284a7513dSJerin Jacob evt_err("failed to setup port %d", port); 136384a7513dSJerin Jacob return ret; 136484a7513dSJerin Jacob } 136584a7513dSJerin Jacob 136684a7513dSJerin Jacob ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0); 136784a7513dSJerin Jacob if (ret != nb_queues) { 136884a7513dSJerin Jacob evt_err("failed to link all queues to port %d", port); 136984a7513dSJerin Jacob return -EINVAL; 137084a7513dSJerin Jacob } 137184a7513dSJerin Jacob } 137284a7513dSJerin Jacob 137384a7513dSJerin Jacob /* port for producers, no links */ 13743617aae5SPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { 13753617aae5SPavan Nikhilesh for ( ; port < perf_nb_event_ports(opt); port++) { 13763617aae5SPavan Nikhilesh struct prod_data *p = &t->prod[port]; 13773617aae5SPavan Nikhilesh p->t = t; 13783617aae5SPavan Nikhilesh } 13793617aae5SPavan Nikhilesh 13805f94d108SHarry van Haaren struct rte_event_port_conf conf = *port_conf; 13815f94d108SHarry van Haaren conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_PRODUCER; 13825f94d108SHarry van Haaren 13835f94d108SHarry van Haaren ret = perf_event_rx_adapter_setup(opt, stride, conf); 13843617aae5SPavan Nikhilesh if (ret) 13853617aae5SPavan Nikhilesh return ret; 1386d008f20bSPavan Nikhilesh } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { 1387d008f20bSPavan Nikhilesh prod = 0; 1388d008f20bSPavan Nikhilesh for ( ; port < perf_nb_event_ports(opt); port++) { 1389d008f20bSPavan Nikhilesh struct prod_data *p = &t->prod[port]; 1390d008f20bSPavan Nikhilesh p->queue_id = prod * stride; 1391d008f20bSPavan Nikhilesh p->t = t; 1392d008f20bSPavan Nikhilesh prod++; 1393d008f20bSPavan Nikhilesh } 1394d008f20bSPavan Nikhilesh 1395d008f20bSPavan Nikhilesh ret = perf_event_timer_adapter_setup(t); 1396d008f20bSPavan Nikhilesh if (ret) 1397d008f20bSPavan Nikhilesh return ret; 1398de2bc16eSShijith Thotton } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) { 1399de2bc16eSShijith Thotton struct rte_event_port_conf conf = *port_conf; 1400de2bc16eSShijith Thotton uint8_t cdev_id = 0; 1401de2bc16eSShijith Thotton uint16_t qp_id = 0; 1402de2bc16eSShijith Thotton 1403de2bc16eSShijith Thotton ret = rte_event_crypto_adapter_create(TEST_PERF_CA_ID, 1404de2bc16eSShijith Thotton opt->dev_id, &conf, 0); 1405de2bc16eSShijith Thotton if (ret) { 1406de2bc16eSShijith Thotton evt_err("Failed to create crypto adapter"); 1407de2bc16eSShijith Thotton return ret; 1408de2bc16eSShijith Thotton } 1409de2bc16eSShijith Thotton 1410de2bc16eSShijith Thotton prod = 0; 1411de2bc16eSShijith Thotton for (; port < perf_nb_event_ports(opt); port++) { 1412de2bc16eSShijith Thotton union rte_event_crypto_metadata m_data; 1413de2bc16eSShijith Thotton struct prod_data *p = &t->prod[port]; 1414de2bc16eSShijith Thotton uint32_t flow_id; 1415de2bc16eSShijith Thotton 1416de2bc16eSShijith Thotton if (qp_id == rte_cryptodev_queue_pair_count(cdev_id)) { 1417de2bc16eSShijith Thotton cdev_id++; 1418de2bc16eSShijith Thotton qp_id = 0; 1419de2bc16eSShijith Thotton } 1420de2bc16eSShijith Thotton 1421de2bc16eSShijith Thotton p->dev_id = opt->dev_id; 1422de2bc16eSShijith Thotton p->port_id = port; 1423de2bc16eSShijith Thotton p->queue_id = prod * stride; 1424de2bc16eSShijith Thotton p->ca.cdev_id = cdev_id; 1425de2bc16eSShijith Thotton p->ca.cdev_qp_id = qp_id; 1426de2bc16eSShijith Thotton p->ca.crypto_sess = rte_zmalloc_socket( 14278f5b5495SAkhil Goyal NULL, sizeof(void *) * t->nb_flows, 1428de2bc16eSShijith Thotton RTE_CACHE_LINE_SIZE, opt->socket_id); 1429de2bc16eSShijith Thotton p->t = t; 1430de2bc16eSShijith Thotton 1431eff29c45SVolodymyr Fialko ret = perf_event_crypto_adapter_setup(t, p); 1432eff29c45SVolodymyr Fialko if (ret) 1433eff29c45SVolodymyr Fialko return ret; 1434eff29c45SVolodymyr Fialko 1435de2bc16eSShijith Thotton m_data.request_info.cdev_id = p->ca.cdev_id; 1436de2bc16eSShijith Thotton m_data.request_info.queue_pair_id = p->ca.cdev_qp_id; 1437de2bc16eSShijith Thotton m_data.response_info.sched_type = RTE_SCHED_TYPE_ATOMIC; 1438de2bc16eSShijith Thotton m_data.response_info.queue_id = p->queue_id; 1439de2bc16eSShijith Thotton 1440de2bc16eSShijith Thotton for (flow_id = 0; flow_id < t->nb_flows; flow_id++) { 14418f5b5495SAkhil Goyal m_data.response_info.flow_id = flow_id; 14428f5b5495SAkhil Goyal if (opt->crypto_op_type == 14438f5b5495SAkhil Goyal RTE_CRYPTO_OP_TYPE_SYMMETRIC) { 14442a440d6aSAkhil Goyal void *sess; 14458f5b5495SAkhil Goyal 14468f5b5495SAkhil Goyal sess = cryptodev_sym_sess_create(p, t); 14478f5b5495SAkhil Goyal if (sess == NULL) 1448de2bc16eSShijith Thotton return -ENOMEM; 1449de2bc16eSShijith Thotton 1450eff29c45SVolodymyr Fialko ret = rte_cryptodev_session_event_mdata_set( 14518f5b5495SAkhil Goyal cdev_id, 14528f5b5495SAkhil Goyal sess, 14534c43055cSAkhil Goyal RTE_CRYPTO_OP_TYPE_SYMMETRIC, 14544c43055cSAkhil Goyal RTE_CRYPTO_OP_WITH_SESSION, 14554c43055cSAkhil Goyal &m_data, sizeof(m_data)); 1456eff29c45SVolodymyr Fialko if (ret) 1457eff29c45SVolodymyr Fialko return ret; 14588f5b5495SAkhil Goyal p->ca.crypto_sess[flow_id] = sess; 14598f5b5495SAkhil Goyal } else { 14608f5b5495SAkhil Goyal void *sess; 14614c43055cSAkhil Goyal 14628f5b5495SAkhil Goyal sess = cryptodev_asym_sess_create(p, t); 14638f5b5495SAkhil Goyal if (sess == NULL) 14648f5b5495SAkhil Goyal return -ENOMEM; 1465eff29c45SVolodymyr Fialko ret = rte_cryptodev_session_event_mdata_set( 14668f5b5495SAkhil Goyal cdev_id, 14678f5b5495SAkhil Goyal sess, 14688f5b5495SAkhil Goyal RTE_CRYPTO_OP_TYPE_ASYMMETRIC, 14698f5b5495SAkhil Goyal RTE_CRYPTO_OP_WITH_SESSION, 14708f5b5495SAkhil Goyal &m_data, sizeof(m_data)); 1471eff29c45SVolodymyr Fialko if (ret) 1472eff29c45SVolodymyr Fialko return ret; 14738f5b5495SAkhil Goyal p->ca.crypto_sess[flow_id] = sess; 14748f5b5495SAkhil Goyal } 1475de2bc16eSShijith Thotton } 1476de2bc16eSShijith Thotton 1477de2bc16eSShijith Thotton conf.event_port_cfg |= 1478de2bc16eSShijith Thotton RTE_EVENT_PORT_CFG_HINT_PRODUCER | 1479de2bc16eSShijith Thotton RTE_EVENT_PORT_CFG_HINT_CONSUMER; 1480de2bc16eSShijith Thotton 1481de2bc16eSShijith Thotton ret = rte_event_port_setup(opt->dev_id, port, &conf); 1482de2bc16eSShijith Thotton if (ret) { 1483de2bc16eSShijith Thotton evt_err("failed to setup port %d", port); 1484de2bc16eSShijith Thotton return ret; 1485de2bc16eSShijith Thotton } 1486de2bc16eSShijith Thotton 1487de2bc16eSShijith Thotton qp_id++; 1488de2bc16eSShijith Thotton prod++; 1489de2bc16eSShijith Thotton } 1490*b25a66c4SAmit Prakash Shukla } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) { 1491*b25a66c4SAmit Prakash Shukla struct rte_event_port_conf conf = *port_conf; 1492*b25a66c4SAmit Prakash Shukla struct rte_event_dma_adapter_op *op; 1493*b25a66c4SAmit Prakash Shukla struct rte_mempool *pool = t->pool; 1494*b25a66c4SAmit Prakash Shukla uint8_t dma_dev_id = 0; 1495*b25a66c4SAmit Prakash Shukla uint16_t vchan_id = 0; 1496*b25a66c4SAmit Prakash Shukla 1497*b25a66c4SAmit Prakash Shukla ret = rte_event_dma_adapter_create(TEST_PERF_DA_ID, opt->dev_id, &conf, 0); 1498*b25a66c4SAmit Prakash Shukla if (ret) { 1499*b25a66c4SAmit Prakash Shukla evt_err("Failed to create dma adapter"); 1500*b25a66c4SAmit Prakash Shukla return ret; 1501*b25a66c4SAmit Prakash Shukla } 1502*b25a66c4SAmit Prakash Shukla 1503*b25a66c4SAmit Prakash Shukla prod = 0; 1504*b25a66c4SAmit Prakash Shukla for (; port < perf_nb_event_ports(opt); port++) { 1505*b25a66c4SAmit Prakash Shukla struct prod_data *p = &t->prod[port]; 1506*b25a66c4SAmit Prakash Shukla struct rte_event *response_info; 1507*b25a66c4SAmit Prakash Shukla uint32_t flow_id; 1508*b25a66c4SAmit Prakash Shukla 1509*b25a66c4SAmit Prakash Shukla p->dev_id = opt->dev_id; 1510*b25a66c4SAmit Prakash Shukla p->port_id = port; 1511*b25a66c4SAmit Prakash Shukla p->queue_id = prod * stride; 1512*b25a66c4SAmit Prakash Shukla p->da.dma_dev_id = dma_dev_id; 1513*b25a66c4SAmit Prakash Shukla p->da.vchan_id = vchan_id; 1514*b25a66c4SAmit Prakash Shukla p->da.dma_op = rte_zmalloc_socket(NULL, sizeof(void *) * t->nb_flows, 1515*b25a66c4SAmit Prakash Shukla RTE_CACHE_LINE_SIZE, opt->socket_id); 1516*b25a66c4SAmit Prakash Shukla 1517*b25a66c4SAmit Prakash Shukla p->t = t; 1518*b25a66c4SAmit Prakash Shukla 1519*b25a66c4SAmit Prakash Shukla ret = perf_event_dma_adapter_setup(t, p); 1520*b25a66c4SAmit Prakash Shukla if (ret) 1521*b25a66c4SAmit Prakash Shukla return ret; 1522*b25a66c4SAmit Prakash Shukla 1523*b25a66c4SAmit Prakash Shukla for (flow_id = 0; flow_id < t->nb_flows; flow_id++) { 1524*b25a66c4SAmit Prakash Shukla rte_mempool_get(t->da_op_pool, (void **)&op); 1525*b25a66c4SAmit Prakash Shukla 1526*b25a66c4SAmit Prakash Shukla op->src_seg = rte_malloc(NULL, sizeof(struct rte_dma_sge), 0); 1527*b25a66c4SAmit Prakash Shukla op->dst_seg = rte_malloc(NULL, sizeof(struct rte_dma_sge), 0); 1528*b25a66c4SAmit Prakash Shukla 1529*b25a66c4SAmit Prakash Shukla op->src_seg->addr = rte_pktmbuf_iova(rte_pktmbuf_alloc(pool)); 1530*b25a66c4SAmit Prakash Shukla op->dst_seg->addr = rte_pktmbuf_iova(rte_pktmbuf_alloc(pool)); 1531*b25a66c4SAmit Prakash Shukla op->src_seg->length = 1024; 1532*b25a66c4SAmit Prakash Shukla op->dst_seg->length = 1024; 1533*b25a66c4SAmit Prakash Shukla op->nb_src = 1; 1534*b25a66c4SAmit Prakash Shukla op->nb_dst = 1; 1535*b25a66c4SAmit Prakash Shukla op->flags = RTE_DMA_OP_FLAG_SUBMIT; 1536*b25a66c4SAmit Prakash Shukla op->op_mp = t->da_op_pool; 1537*b25a66c4SAmit Prakash Shukla op->dma_dev_id = dma_dev_id; 1538*b25a66c4SAmit Prakash Shukla op->vchan = vchan_id; 1539*b25a66c4SAmit Prakash Shukla 1540*b25a66c4SAmit Prakash Shukla response_info = (struct rte_event *)((uint8_t *)op + 1541*b25a66c4SAmit Prakash Shukla sizeof(struct rte_event_dma_adapter_op)); 1542*b25a66c4SAmit Prakash Shukla response_info->queue_id = p->queue_id; 1543*b25a66c4SAmit Prakash Shukla response_info->sched_type = RTE_SCHED_TYPE_ATOMIC; 1544*b25a66c4SAmit Prakash Shukla response_info->flow_id = flow_id; 1545*b25a66c4SAmit Prakash Shukla 1546*b25a66c4SAmit Prakash Shukla p->da.dma_op[flow_id] = op; 1547*b25a66c4SAmit Prakash Shukla } 1548*b25a66c4SAmit Prakash Shukla 1549*b25a66c4SAmit Prakash Shukla conf.event_port_cfg |= 1550*b25a66c4SAmit Prakash Shukla RTE_EVENT_PORT_CFG_HINT_PRODUCER | 1551*b25a66c4SAmit Prakash Shukla RTE_EVENT_PORT_CFG_HINT_CONSUMER; 1552*b25a66c4SAmit Prakash Shukla 1553*b25a66c4SAmit Prakash Shukla ret = rte_event_port_setup(opt->dev_id, port, &conf); 1554*b25a66c4SAmit Prakash Shukla if (ret) { 1555*b25a66c4SAmit Prakash Shukla evt_err("failed to setup port %d", port); 1556*b25a66c4SAmit Prakash Shukla return ret; 1557*b25a66c4SAmit Prakash Shukla } 1558*b25a66c4SAmit Prakash Shukla 1559*b25a66c4SAmit Prakash Shukla prod++; 1560*b25a66c4SAmit Prakash Shukla } 15613617aae5SPavan Nikhilesh } else { 156284a7513dSJerin Jacob prod = 0; 156384a7513dSJerin Jacob for ( ; port < perf_nb_event_ports(opt); port++) { 156484a7513dSJerin Jacob struct prod_data *p = &t->prod[port]; 156584a7513dSJerin Jacob 156684a7513dSJerin Jacob p->dev_id = opt->dev_id; 156784a7513dSJerin Jacob p->port_id = port; 156884a7513dSJerin Jacob p->queue_id = prod * stride; 156984a7513dSJerin Jacob p->t = t; 157084a7513dSJerin Jacob 15715f94d108SHarry van Haaren struct rte_event_port_conf conf = *port_conf; 15725f94d108SHarry van Haaren conf.event_port_cfg |= 15735f94d108SHarry van Haaren RTE_EVENT_PORT_CFG_HINT_PRODUCER | 15745f94d108SHarry van Haaren RTE_EVENT_PORT_CFG_HINT_CONSUMER; 15755f94d108SHarry van Haaren 15765f94d108SHarry van Haaren ret = rte_event_port_setup(opt->dev_id, port, &conf); 157784a7513dSJerin Jacob if (ret) { 157884a7513dSJerin Jacob evt_err("failed to setup port %d", port); 157984a7513dSJerin Jacob return ret; 158084a7513dSJerin Jacob } 158184a7513dSJerin Jacob prod++; 158284a7513dSJerin Jacob } 15833617aae5SPavan Nikhilesh } 158484a7513dSJerin Jacob 158584a7513dSJerin Jacob return ret; 158684a7513dSJerin Jacob } 158784a7513dSJerin Jacob 158884a7513dSJerin Jacob int 1589272de067SJerin Jacob perf_opt_check(struct evt_options *opt, uint64_t nb_queues) 1590272de067SJerin Jacob { 1591272de067SJerin Jacob unsigned int lcores; 1592272de067SJerin Jacob 1593cb056611SStephen Hemminger /* N producer + N worker + main when producer cores are used 1594cb056611SStephen Hemminger * Else N worker + main when Rx adapter is used 1595b01974daSPavan Nikhilesh */ 1596b01974daSPavan Nikhilesh lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2; 1597272de067SJerin Jacob 1598272de067SJerin Jacob if (rte_lcore_count() < lcores) { 1599272de067SJerin Jacob evt_err("test need minimum %d lcores", lcores); 1600272de067SJerin Jacob return -1; 1601272de067SJerin Jacob } 1602272de067SJerin Jacob 1603272de067SJerin Jacob /* Validate worker lcores */ 1604cb056611SStephen Hemminger if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) { 1605cb056611SStephen Hemminger evt_err("worker lcores overlaps with main lcore"); 1606272de067SJerin Jacob return -1; 1607272de067SJerin Jacob } 1608272de067SJerin Jacob if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) { 1609272de067SJerin Jacob evt_err("worker lcores overlaps producer lcores"); 1610272de067SJerin Jacob return -1; 1611272de067SJerin Jacob } 1612272de067SJerin Jacob if (evt_has_disabled_lcore(opt->wlcores)) { 1613272de067SJerin Jacob evt_err("one or more workers lcores are not enabled"); 1614272de067SJerin Jacob return -1; 1615272de067SJerin Jacob } 1616272de067SJerin Jacob if (!evt_has_active_lcore(opt->wlcores)) { 1617272de067SJerin Jacob evt_err("minimum one worker is required"); 1618272de067SJerin Jacob return -1; 1619272de067SJerin Jacob } 1620272de067SJerin Jacob 1621902387eaSPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_SYNT || 1622de2bc16eSShijith Thotton opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR || 1623*b25a66c4SAmit Prakash Shukla opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR || 1624*b25a66c4SAmit Prakash Shukla opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) { 1625272de067SJerin Jacob /* Validate producer lcores */ 1626b01974daSPavan Nikhilesh if (evt_lcores_has_overlap(opt->plcores, 1627cb056611SStephen Hemminger rte_get_main_lcore())) { 1628cb056611SStephen Hemminger evt_err("producer lcores overlaps with main lcore"); 1629272de067SJerin Jacob return -1; 1630272de067SJerin Jacob } 1631272de067SJerin Jacob if (evt_has_disabled_lcore(opt->plcores)) { 1632272de067SJerin Jacob evt_err("one or more producer lcores are not enabled"); 1633272de067SJerin Jacob return -1; 1634272de067SJerin Jacob } 1635272de067SJerin Jacob if (!evt_has_active_lcore(opt->plcores)) { 1636272de067SJerin Jacob evt_err("minimum one producer is required"); 1637272de067SJerin Jacob return -1; 1638272de067SJerin Jacob } 1639b01974daSPavan Nikhilesh } 1640272de067SJerin Jacob 1641272de067SJerin Jacob if (evt_has_invalid_stage(opt)) 1642272de067SJerin Jacob return -1; 1643272de067SJerin Jacob 1644272de067SJerin Jacob if (evt_has_invalid_sched_type(opt)) 1645272de067SJerin Jacob return -1; 1646272de067SJerin Jacob 1647272de067SJerin Jacob if (nb_queues > EVT_MAX_QUEUES) { 1648272de067SJerin Jacob evt_err("number of queues exceeds %d", EVT_MAX_QUEUES); 1649272de067SJerin Jacob return -1; 1650272de067SJerin Jacob } 1651272de067SJerin Jacob if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) { 1652272de067SJerin Jacob evt_err("number of ports exceeds %d", EVT_MAX_PORTS); 1653272de067SJerin Jacob return -1; 1654272de067SJerin Jacob } 1655272de067SJerin Jacob 1656272de067SJerin Jacob /* Fixups */ 1657d008f20bSPavan Nikhilesh if ((opt->nb_stages == 1 && 1658d008f20bSPavan Nikhilesh opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) && 1659d008f20bSPavan Nikhilesh opt->fwd_latency) { 1660272de067SJerin Jacob evt_info("fwd_latency is valid when nb_stages > 1, disabling"); 1661272de067SJerin Jacob opt->fwd_latency = 0; 1662272de067SJerin Jacob } 1663d008f20bSPavan Nikhilesh 1664272de067SJerin Jacob if (opt->fwd_latency && !opt->q_priority) { 1665272de067SJerin Jacob evt_info("enabled queue priority for latency measurement"); 1666272de067SJerin Jacob opt->q_priority = 1; 1667272de067SJerin Jacob } 16689d3aeb18SJerin Jacob if (opt->nb_pkts == 0) 16699d3aeb18SJerin Jacob opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores); 1670272de067SJerin Jacob 1671272de067SJerin Jacob return 0; 1672272de067SJerin Jacob } 1673272de067SJerin Jacob 1674272de067SJerin Jacob void 1675272de067SJerin Jacob perf_opt_dump(struct evt_options *opt, uint8_t nb_queues) 1676272de067SJerin Jacob { 1677272de067SJerin Jacob evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores)); 1678272de067SJerin Jacob evt_dump_producer_lcores(opt); 1679272de067SJerin Jacob evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores)); 1680272de067SJerin Jacob evt_dump_worker_lcores(opt); 1681272de067SJerin Jacob evt_dump_nb_stages(opt); 1682272de067SJerin Jacob evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt)); 1683272de067SJerin Jacob evt_dump("nb_evdev_queues", "%d", nb_queues); 1684272de067SJerin Jacob evt_dump_queue_priority(opt); 1685272de067SJerin Jacob evt_dump_sched_type_list(opt); 1686b01974daSPavan Nikhilesh evt_dump_producer_type(opt); 168720841a25SRashmi Shetty evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz); 1688272de067SJerin Jacob } 1689272de067SJerin Jacob 16907da008dfSPavan Nikhilesh static void 16917da008dfSPavan Nikhilesh perf_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev, 16927da008dfSPavan Nikhilesh void *args) 16937da008dfSPavan Nikhilesh { 16947da008dfSPavan Nikhilesh rte_mempool_put(args, ev.event_ptr); 16957da008dfSPavan Nikhilesh } 16967da008dfSPavan Nikhilesh 169741c219e6SJerin Jacob void 1698f0b68c0bSPavan Nikhilesh perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id, 1699f0b68c0bSPavan Nikhilesh uint8_t port_id, struct rte_event events[], uint16_t nb_enq, 1700f0b68c0bSPavan Nikhilesh uint16_t nb_deq) 1701f0b68c0bSPavan Nikhilesh { 1702f0b68c0bSPavan Nikhilesh int i; 1703f0b68c0bSPavan Nikhilesh 1704f0b68c0bSPavan Nikhilesh if (nb_deq) { 1705f0b68c0bSPavan Nikhilesh for (i = nb_enq; i < nb_deq; i++) 1706f0b68c0bSPavan Nikhilesh rte_mempool_put(pool, events[i].event_ptr); 1707f0b68c0bSPavan Nikhilesh 1708f0b68c0bSPavan Nikhilesh for (i = 0; i < nb_deq; i++) 1709f0b68c0bSPavan Nikhilesh events[i].op = RTE_EVENT_OP_RELEASE; 1710f0b68c0bSPavan Nikhilesh rte_event_enqueue_burst(dev_id, port_id, events, nb_deq); 1711f0b68c0bSPavan Nikhilesh } 17127da008dfSPavan Nikhilesh rte_event_port_quiesce(dev_id, port_id, perf_event_port_flush, pool); 1713f0b68c0bSPavan Nikhilesh } 1714f0b68c0bSPavan Nikhilesh 1715f0b68c0bSPavan Nikhilesh void 171641c219e6SJerin Jacob perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt) 171741c219e6SJerin Jacob { 1718d008f20bSPavan Nikhilesh int i; 1719d008f20bSPavan Nikhilesh struct test_perf *t = evt_test_priv(test); 172041c219e6SJerin Jacob 1721d008f20bSPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { 1722d008f20bSPavan Nikhilesh for (i = 0; i < opt->nb_timer_adptrs; i++) 1723d008f20bSPavan Nikhilesh rte_event_timer_adapter_stop(t->timer_adptr[i]); 1724d008f20bSPavan Nikhilesh } 172541c219e6SJerin Jacob rte_event_dev_stop(opt->dev_id); 172641c219e6SJerin Jacob rte_event_dev_close(opt->dev_id); 172741c219e6SJerin Jacob } 172841c219e6SJerin Jacob 172941c219e6SJerin Jacob static inline void 173041c219e6SJerin Jacob perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused, 173141c219e6SJerin Jacob void *obj, unsigned i __rte_unused) 173241c219e6SJerin Jacob { 173341c219e6SJerin Jacob memset(obj, 0, mp->elt_size); 173441c219e6SJerin Jacob } 173541c219e6SJerin Jacob 17363fc8de4fSPavan Nikhilesh #define NB_RX_DESC 128 17373fc8de4fSPavan Nikhilesh #define NB_TX_DESC 512 17383fc8de4fSPavan Nikhilesh int 17393fc8de4fSPavan Nikhilesh perf_ethdev_setup(struct evt_test *test, struct evt_options *opt) 17403fc8de4fSPavan Nikhilesh { 17418728ccf3SThomas Monjalon uint16_t i; 174277339255SIvan Ilchenko int ret; 17433fc8de4fSPavan Nikhilesh struct test_perf *t = evt_test_priv(test); 17443fc8de4fSPavan Nikhilesh struct rte_eth_conf port_conf = { 17453fc8de4fSPavan Nikhilesh .rxmode = { 1746295968d1SFerruh Yigit .mq_mode = RTE_ETH_MQ_RX_RSS, 17473fc8de4fSPavan Nikhilesh }, 17483fc8de4fSPavan Nikhilesh .rx_adv_conf = { 17493fc8de4fSPavan Nikhilesh .rss_conf = { 17503fc8de4fSPavan Nikhilesh .rss_key = NULL, 1751295968d1SFerruh Yigit .rss_hf = RTE_ETH_RSS_IP, 17523fc8de4fSPavan Nikhilesh }, 17533fc8de4fSPavan Nikhilesh }, 17543fc8de4fSPavan Nikhilesh }; 17553fc8de4fSPavan Nikhilesh 1756de2bc16eSShijith Thotton if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR) 17573fc8de4fSPavan Nikhilesh return 0; 17583fc8de4fSPavan Nikhilesh 1759d9a42a69SThomas Monjalon if (!rte_eth_dev_count_avail()) { 17603fc8de4fSPavan Nikhilesh evt_err("No ethernet ports found."); 17613fc8de4fSPavan Nikhilesh return -ENODEV; 17623fc8de4fSPavan Nikhilesh } 17633fc8de4fSPavan Nikhilesh 17648728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(i) { 17654f5701f2SFerruh Yigit struct rte_eth_dev_info dev_info; 17664f5701f2SFerruh Yigit struct rte_eth_conf local_port_conf = port_conf; 17673fc8de4fSPavan Nikhilesh 176877339255SIvan Ilchenko ret = rte_eth_dev_info_get(i, &dev_info); 176977339255SIvan Ilchenko if (ret != 0) { 177077339255SIvan Ilchenko evt_err("Error during getting device (port %u) info: %s\n", 177177339255SIvan Ilchenko i, strerror(-ret)); 177277339255SIvan Ilchenko return ret; 177377339255SIvan Ilchenko } 17744f5701f2SFerruh Yigit 17754f5701f2SFerruh Yigit local_port_conf.rx_adv_conf.rss_conf.rss_hf &= 17764f5701f2SFerruh Yigit dev_info.flow_type_rss_offloads; 17774f5701f2SFerruh Yigit if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != 17784f5701f2SFerruh Yigit port_conf.rx_adv_conf.rss_conf.rss_hf) { 17794f5701f2SFerruh Yigit evt_info("Port %u modified RSS hash function based on hardware support," 17804f5701f2SFerruh Yigit "requested:%#"PRIx64" configured:%#"PRIx64"\n", 17814f5701f2SFerruh Yigit i, 17824f5701f2SFerruh Yigit port_conf.rx_adv_conf.rss_conf.rss_hf, 17834f5701f2SFerruh Yigit local_port_conf.rx_adv_conf.rss_conf.rss_hf); 17844f5701f2SFerruh Yigit } 17854f5701f2SFerruh Yigit 17864f5701f2SFerruh Yigit if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) { 17873fc8de4fSPavan Nikhilesh evt_err("Failed to configure eth port [%d]", i); 17883fc8de4fSPavan Nikhilesh return -EINVAL; 17893fc8de4fSPavan Nikhilesh } 17903fc8de4fSPavan Nikhilesh 17913fc8de4fSPavan Nikhilesh if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC, 17923fc8de4fSPavan Nikhilesh rte_socket_id(), NULL, t->pool) < 0) { 17933fc8de4fSPavan Nikhilesh evt_err("Failed to setup eth port [%d] rx_queue: %d.", 17943fc8de4fSPavan Nikhilesh i, 0); 17953fc8de4fSPavan Nikhilesh return -EINVAL; 17963fc8de4fSPavan Nikhilesh } 17973fc8de4fSPavan Nikhilesh 17983fc8de4fSPavan Nikhilesh if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC, 17993fc8de4fSPavan Nikhilesh rte_socket_id(), NULL) < 0) { 18003fc8de4fSPavan Nikhilesh evt_err("Failed to setup eth port [%d] tx_queue: %d.", 18013fc8de4fSPavan Nikhilesh i, 0); 18023fc8de4fSPavan Nikhilesh return -EINVAL; 18033fc8de4fSPavan Nikhilesh } 18043fc8de4fSPavan Nikhilesh 180570e51a0eSIvan Ilchenko ret = rte_eth_promiscuous_enable(i); 180670e51a0eSIvan Ilchenko if (ret != 0) { 180770e51a0eSIvan Ilchenko evt_err("Failed to enable promiscuous mode for eth port [%d]: %s", 180870e51a0eSIvan Ilchenko i, rte_strerror(-ret)); 180970e51a0eSIvan Ilchenko return ret; 181070e51a0eSIvan Ilchenko } 18113fc8de4fSPavan Nikhilesh } 18123fc8de4fSPavan Nikhilesh 18133fc8de4fSPavan Nikhilesh return 0; 18143fc8de4fSPavan Nikhilesh } 18153fc8de4fSPavan Nikhilesh 1816a734e738SPavan Nikhilesh void 1817a734e738SPavan Nikhilesh perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt) 18187f3daf34SPavan Nikhilesh { 18198728ccf3SThomas Monjalon uint16_t i; 18207f3daf34SPavan Nikhilesh RTE_SET_USED(test); 18217f3daf34SPavan Nikhilesh 18227f3daf34SPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { 18238728ccf3SThomas Monjalon RTE_ETH_FOREACH_DEV(i) { 18243617aae5SPavan Nikhilesh rte_event_eth_rx_adapter_stop(i); 1825a734e738SPavan Nikhilesh rte_event_eth_rx_adapter_queue_del(i, i, -1); 1826a734e738SPavan Nikhilesh rte_eth_dev_rx_queue_stop(i, 0); 1827a734e738SPavan Nikhilesh } 1828a734e738SPavan Nikhilesh } 1829a734e738SPavan Nikhilesh } 1830a734e738SPavan Nikhilesh 1831a734e738SPavan Nikhilesh void 1832a734e738SPavan Nikhilesh perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt) 1833a734e738SPavan Nikhilesh { 1834a734e738SPavan Nikhilesh uint16_t i; 1835a734e738SPavan Nikhilesh RTE_SET_USED(test); 1836a734e738SPavan Nikhilesh 1837a734e738SPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { 1838a734e738SPavan Nikhilesh RTE_ETH_FOREACH_DEV(i) { 1839a734e738SPavan Nikhilesh rte_event_eth_tx_adapter_stop(i); 1840a734e738SPavan Nikhilesh rte_event_eth_tx_adapter_queue_del(i, i, -1); 1841a734e738SPavan Nikhilesh rte_eth_dev_tx_queue_stop(i, 0); 18427f3daf34SPavan Nikhilesh rte_eth_dev_stop(i); 18437f3daf34SPavan Nikhilesh } 18447f3daf34SPavan Nikhilesh } 18457f3daf34SPavan Nikhilesh } 18467f3daf34SPavan Nikhilesh 184741c219e6SJerin Jacob int 1848de2bc16eSShijith Thotton perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt) 1849de2bc16eSShijith Thotton { 1850de2bc16eSShijith Thotton uint8_t cdev_count, cdev_id, nb_plcores, nb_qps; 1851de2bc16eSShijith Thotton struct test_perf *t = evt_test_priv(test); 1852de2bc16eSShijith Thotton unsigned int max_session_size; 1853de2bc16eSShijith Thotton uint32_t nb_sessions; 1854de2bc16eSShijith Thotton int ret; 1855de2bc16eSShijith Thotton 1856de2bc16eSShijith Thotton if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) 1857de2bc16eSShijith Thotton return 0; 1858de2bc16eSShijith Thotton 1859de2bc16eSShijith Thotton cdev_count = rte_cryptodev_count(); 1860de2bc16eSShijith Thotton if (cdev_count == 0) { 1861de2bc16eSShijith Thotton evt_err("No crypto devices available\n"); 1862de2bc16eSShijith Thotton return -ENODEV; 1863de2bc16eSShijith Thotton } 1864de2bc16eSShijith Thotton 1865de2bc16eSShijith Thotton t->ca_op_pool = rte_crypto_op_pool_create( 18668f5b5495SAkhil Goyal "crypto_op_pool", opt->crypto_op_type, opt->pool_sz, 1867750ab9d5SAakash Sasidharan 128, sizeof(union rte_event_crypto_metadata) + EVT_CRYPTO_MAX_IV_SIZE, 18688f5b5495SAkhil Goyal rte_socket_id()); 1869de2bc16eSShijith Thotton if (t->ca_op_pool == NULL) { 1870de2bc16eSShijith Thotton evt_err("Failed to create crypto op pool"); 1871de2bc16eSShijith Thotton return -ENOMEM; 1872de2bc16eSShijith Thotton } 1873de2bc16eSShijith Thotton 1874de2bc16eSShijith Thotton nb_sessions = evt_nr_active_lcores(opt->plcores) * t->nb_flows; 18758f5b5495SAkhil Goyal t->ca_asym_sess_pool = rte_cryptodev_asym_session_pool_create( 18768f5b5495SAkhil Goyal "ca_asym_sess_pool", nb_sessions, 0, 18778f5b5495SAkhil Goyal sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY); 18788f5b5495SAkhil Goyal if (t->ca_asym_sess_pool == NULL) { 18798f5b5495SAkhil Goyal evt_err("Failed to create sym session pool"); 18808f5b5495SAkhil Goyal ret = -ENOMEM; 18818f5b5495SAkhil Goyal goto err; 18828f5b5495SAkhil Goyal } 18838f5b5495SAkhil Goyal 1884de2bc16eSShijith Thotton max_session_size = 0; 1885de2bc16eSShijith Thotton for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) { 1886de2bc16eSShijith Thotton unsigned int session_size; 1887de2bc16eSShijith Thotton 1888de2bc16eSShijith Thotton session_size = 1889de2bc16eSShijith Thotton rte_cryptodev_sym_get_private_session_size(cdev_id); 1890de2bc16eSShijith Thotton if (session_size > max_session_size) 1891de2bc16eSShijith Thotton max_session_size = session_size; 1892de2bc16eSShijith Thotton } 1893de2bc16eSShijith Thotton 1894bdce2564SAkhil Goyal t->ca_sess_pool = rte_cryptodev_sym_session_pool_create( 1895bdce2564SAkhil Goyal "ca_sess_pool", nb_sessions, max_session_size, 0, 1896bdce2564SAkhil Goyal sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY); 1897bdce2564SAkhil Goyal if (t->ca_sess_pool == NULL) { 1898bdce2564SAkhil Goyal evt_err("Failed to create sym session pool"); 1899de2bc16eSShijith Thotton ret = -ENOMEM; 1900de2bc16eSShijith Thotton goto err; 1901de2bc16eSShijith Thotton } 1902de2bc16eSShijith Thotton 190369e807dfSVolodymyr Fialko if (opt->ena_vector) { 190469e807dfSVolodymyr Fialko unsigned int nb_elem = (opt->pool_sz / opt->vector_size) * 2; 190569e807dfSVolodymyr Fialko nb_elem = RTE_MAX(512U, nb_elem); 190669e807dfSVolodymyr Fialko nb_elem += evt_nr_active_lcores(opt->wlcores) * 32; 190769e807dfSVolodymyr Fialko t->ca_vector_pool = rte_event_vector_pool_create("vector_pool", nb_elem, 32, 190869e807dfSVolodymyr Fialko opt->vector_size, opt->socket_id); 190969e807dfSVolodymyr Fialko if (t->ca_vector_pool == NULL) { 191069e807dfSVolodymyr Fialko evt_err("Failed to create event vector pool"); 191169e807dfSVolodymyr Fialko ret = -ENOMEM; 191269e807dfSVolodymyr Fialko goto err; 191369e807dfSVolodymyr Fialko } 191469e807dfSVolodymyr Fialko } 191569e807dfSVolodymyr Fialko 1916de2bc16eSShijith Thotton /* 1917de2bc16eSShijith Thotton * Calculate number of needed queue pairs, based on the amount of 1918de2bc16eSShijith Thotton * available number of logical cores and crypto devices. For instance, 1919de2bc16eSShijith Thotton * if there are 4 cores and 2 crypto devices, 2 queue pairs will be set 1920de2bc16eSShijith Thotton * up per device. 1921de2bc16eSShijith Thotton */ 1922de2bc16eSShijith Thotton nb_plcores = evt_nr_active_lcores(opt->plcores); 1923de2bc16eSShijith Thotton nb_qps = (nb_plcores % cdev_count) ? (nb_plcores / cdev_count) + 1 : 1924de2bc16eSShijith Thotton nb_plcores / cdev_count; 1925de2bc16eSShijith Thotton for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) { 1926de2bc16eSShijith Thotton struct rte_cryptodev_qp_conf qp_conf; 1927de2bc16eSShijith Thotton struct rte_cryptodev_config conf; 1928de2bc16eSShijith Thotton struct rte_cryptodev_info info; 1929de2bc16eSShijith Thotton int qp_id; 1930de2bc16eSShijith Thotton 1931de2bc16eSShijith Thotton rte_cryptodev_info_get(cdev_id, &info); 1932de2bc16eSShijith Thotton if (nb_qps > info.max_nb_queue_pairs) { 1933de2bc16eSShijith Thotton evt_err("Not enough queue pairs per cryptodev (%u)", 1934de2bc16eSShijith Thotton nb_qps); 1935de2bc16eSShijith Thotton ret = -EINVAL; 1936de2bc16eSShijith Thotton goto err; 1937de2bc16eSShijith Thotton } 1938de2bc16eSShijith Thotton 1939de2bc16eSShijith Thotton conf.nb_queue_pairs = nb_qps; 1940de2bc16eSShijith Thotton conf.socket_id = SOCKET_ID_ANY; 1941de2bc16eSShijith Thotton conf.ff_disable = RTE_CRYPTODEV_FF_SECURITY; 1942de2bc16eSShijith Thotton 1943de2bc16eSShijith Thotton ret = rte_cryptodev_configure(cdev_id, &conf); 1944de2bc16eSShijith Thotton if (ret) { 1945de2bc16eSShijith Thotton evt_err("Failed to configure cryptodev (%u)", cdev_id); 1946de2bc16eSShijith Thotton goto err; 1947de2bc16eSShijith Thotton } 1948de2bc16eSShijith Thotton 1949de2bc16eSShijith Thotton qp_conf.nb_descriptors = NB_CRYPTODEV_DESCRIPTORS; 1950de2bc16eSShijith Thotton qp_conf.mp_session = t->ca_sess_pool; 1951de2bc16eSShijith Thotton 1952de2bc16eSShijith Thotton for (qp_id = 0; qp_id < conf.nb_queue_pairs; qp_id++) { 1953de2bc16eSShijith Thotton ret = rte_cryptodev_queue_pair_setup( 1954de2bc16eSShijith Thotton cdev_id, qp_id, &qp_conf, 1955de2bc16eSShijith Thotton rte_cryptodev_socket_id(cdev_id)); 1956de2bc16eSShijith Thotton if (ret) { 1957de2bc16eSShijith Thotton evt_err("Failed to setup queue pairs on cryptodev %u\n", 1958de2bc16eSShijith Thotton cdev_id); 1959de2bc16eSShijith Thotton goto err; 1960de2bc16eSShijith Thotton } 1961de2bc16eSShijith Thotton } 1962de2bc16eSShijith Thotton } 1963de2bc16eSShijith Thotton 1964de2bc16eSShijith Thotton return 0; 1965de2bc16eSShijith Thotton err: 1966de2bc16eSShijith Thotton for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) 1967de2bc16eSShijith Thotton rte_cryptodev_close(cdev_id); 1968de2bc16eSShijith Thotton 1969de2bc16eSShijith Thotton rte_mempool_free(t->ca_op_pool); 1970de2bc16eSShijith Thotton rte_mempool_free(t->ca_sess_pool); 19718f5b5495SAkhil Goyal rte_mempool_free(t->ca_asym_sess_pool); 197269e807dfSVolodymyr Fialko rte_mempool_free(t->ca_vector_pool); 1973de2bc16eSShijith Thotton 1974de2bc16eSShijith Thotton return ret; 1975de2bc16eSShijith Thotton } 1976de2bc16eSShijith Thotton 1977de2bc16eSShijith Thotton void 1978de2bc16eSShijith Thotton perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt) 1979de2bc16eSShijith Thotton { 1980de2bc16eSShijith Thotton uint8_t cdev_id, cdev_count = rte_cryptodev_count(); 1981de2bc16eSShijith Thotton struct test_perf *t = evt_test_priv(test); 1982de2bc16eSShijith Thotton uint16_t port; 1983de2bc16eSShijith Thotton 1984de2bc16eSShijith Thotton if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) 1985de2bc16eSShijith Thotton return; 1986de2bc16eSShijith Thotton 1987de2bc16eSShijith Thotton for (port = t->nb_workers; port < perf_nb_event_ports(opt); port++) { 19882a440d6aSAkhil Goyal void *sess; 1989de2bc16eSShijith Thotton struct prod_data *p = &t->prod[port]; 1990de2bc16eSShijith Thotton uint32_t flow_id; 1991de2bc16eSShijith Thotton uint8_t cdev_id; 1992de2bc16eSShijith Thotton 1993de2bc16eSShijith Thotton for (flow_id = 0; flow_id < t->nb_flows; flow_id++) { 1994de2bc16eSShijith Thotton sess = p->ca.crypto_sess[flow_id]; 1995de2bc16eSShijith Thotton cdev_id = p->ca.cdev_id; 1996bdce2564SAkhil Goyal rte_cryptodev_sym_session_free(cdev_id, sess); 1997de2bc16eSShijith Thotton } 1998de2bc16eSShijith Thotton 1999de2bc16eSShijith Thotton rte_event_crypto_adapter_queue_pair_del( 2000de2bc16eSShijith Thotton TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id); 2001de2bc16eSShijith Thotton } 2002de2bc16eSShijith Thotton 2003de2bc16eSShijith Thotton rte_event_crypto_adapter_free(TEST_PERF_CA_ID); 2004de2bc16eSShijith Thotton 2005de2bc16eSShijith Thotton for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) { 2006de2bc16eSShijith Thotton rte_cryptodev_stop(cdev_id); 2007de2bc16eSShijith Thotton rte_cryptodev_close(cdev_id); 2008de2bc16eSShijith Thotton } 2009de2bc16eSShijith Thotton 2010de2bc16eSShijith Thotton rte_mempool_free(t->ca_op_pool); 2011de2bc16eSShijith Thotton rte_mempool_free(t->ca_sess_pool); 20128f5b5495SAkhil Goyal rte_mempool_free(t->ca_asym_sess_pool); 201369e807dfSVolodymyr Fialko rte_mempool_free(t->ca_vector_pool); 2014de2bc16eSShijith Thotton } 2015de2bc16eSShijith Thotton 2016de2bc16eSShijith Thotton int 2017*b25a66c4SAmit Prakash Shukla perf_dmadev_setup(struct evt_test *test, struct evt_options *opt) 2018*b25a66c4SAmit Prakash Shukla { 2019*b25a66c4SAmit Prakash Shukla const struct rte_dma_conf conf = { .nb_vchans = 1}; 2020*b25a66c4SAmit Prakash Shukla const struct rte_dma_vchan_conf qconf = { 2021*b25a66c4SAmit Prakash Shukla .direction = RTE_DMA_DIR_MEM_TO_MEM, 2022*b25a66c4SAmit Prakash Shukla .nb_desc = 1024, 2023*b25a66c4SAmit Prakash Shukla }; 2024*b25a66c4SAmit Prakash Shukla struct test_perf *t = evt_test_priv(test); 2025*b25a66c4SAmit Prakash Shukla uint8_t dma_dev_count, dma_dev_id = 0; 2026*b25a66c4SAmit Prakash Shukla unsigned int elt_size; 2027*b25a66c4SAmit Prakash Shukla int vchan_id; 2028*b25a66c4SAmit Prakash Shukla int ret; 2029*b25a66c4SAmit Prakash Shukla 2030*b25a66c4SAmit Prakash Shukla if (opt->prod_type != EVT_PROD_TYPE_EVENT_DMA_ADPTR) 2031*b25a66c4SAmit Prakash Shukla return 0; 2032*b25a66c4SAmit Prakash Shukla 2033*b25a66c4SAmit Prakash Shukla dma_dev_count = rte_dma_count_avail(); 2034*b25a66c4SAmit Prakash Shukla if (dma_dev_count == 0) { 2035*b25a66c4SAmit Prakash Shukla evt_err("No dma devices available\n"); 2036*b25a66c4SAmit Prakash Shukla return -ENODEV; 2037*b25a66c4SAmit Prakash Shukla } 2038*b25a66c4SAmit Prakash Shukla 2039*b25a66c4SAmit Prakash Shukla elt_size = sizeof(struct rte_event_dma_adapter_op) + sizeof(struct rte_event); 2040*b25a66c4SAmit Prakash Shukla t->da_op_pool = rte_mempool_create("dma_op_pool", opt->pool_sz, elt_size, 256, 2041*b25a66c4SAmit Prakash Shukla 0, NULL, NULL, NULL, NULL, rte_socket_id(), 0); 2042*b25a66c4SAmit Prakash Shukla if (t->da_op_pool == NULL) { 2043*b25a66c4SAmit Prakash Shukla evt_err("Failed to create dma op pool"); 2044*b25a66c4SAmit Prakash Shukla return -ENOMEM; 2045*b25a66c4SAmit Prakash Shukla } 2046*b25a66c4SAmit Prakash Shukla 2047*b25a66c4SAmit Prakash Shukla ret = rte_dma_configure(dma_dev_id, &conf); 2048*b25a66c4SAmit Prakash Shukla if (ret) { 2049*b25a66c4SAmit Prakash Shukla evt_err("Failed to configure dma dev (%u)", dma_dev_id); 2050*b25a66c4SAmit Prakash Shukla goto err; 2051*b25a66c4SAmit Prakash Shukla } 2052*b25a66c4SAmit Prakash Shukla 2053*b25a66c4SAmit Prakash Shukla for (vchan_id = 0; vchan_id < conf.nb_vchans; vchan_id++) { 2054*b25a66c4SAmit Prakash Shukla ret = rte_dma_vchan_setup(dma_dev_id, vchan_id, &qconf); 2055*b25a66c4SAmit Prakash Shukla if (ret) { 2056*b25a66c4SAmit Prakash Shukla evt_err("Failed to setup vchan on dma dev %u\n", 2057*b25a66c4SAmit Prakash Shukla dma_dev_id); 2058*b25a66c4SAmit Prakash Shukla goto err; 2059*b25a66c4SAmit Prakash Shukla } 2060*b25a66c4SAmit Prakash Shukla } 2061*b25a66c4SAmit Prakash Shukla 2062*b25a66c4SAmit Prakash Shukla return 0; 2063*b25a66c4SAmit Prakash Shukla err: 2064*b25a66c4SAmit Prakash Shukla rte_dma_close(dma_dev_id); 2065*b25a66c4SAmit Prakash Shukla rte_mempool_free(t->da_op_pool); 2066*b25a66c4SAmit Prakash Shukla 2067*b25a66c4SAmit Prakash Shukla return ret; 2068*b25a66c4SAmit Prakash Shukla } 2069*b25a66c4SAmit Prakash Shukla 2070*b25a66c4SAmit Prakash Shukla void 2071*b25a66c4SAmit Prakash Shukla perf_dmadev_destroy(struct evt_test *test, struct evt_options *opt) 2072*b25a66c4SAmit Prakash Shukla { 2073*b25a66c4SAmit Prakash Shukla uint8_t dma_dev_id = 0; 2074*b25a66c4SAmit Prakash Shukla struct test_perf *t = evt_test_priv(test); 2075*b25a66c4SAmit Prakash Shukla uint16_t port; 2076*b25a66c4SAmit Prakash Shukla 2077*b25a66c4SAmit Prakash Shukla if (opt->prod_type != EVT_PROD_TYPE_EVENT_DMA_ADPTR) 2078*b25a66c4SAmit Prakash Shukla return; 2079*b25a66c4SAmit Prakash Shukla 2080*b25a66c4SAmit Prakash Shukla for (port = t->nb_workers; port < perf_nb_event_ports(opt); port++) { 2081*b25a66c4SAmit Prakash Shukla struct prod_data *p = &t->prod[port]; 2082*b25a66c4SAmit Prakash Shukla struct rte_event_dma_adapter_op *op; 2083*b25a66c4SAmit Prakash Shukla uint32_t flow_id; 2084*b25a66c4SAmit Prakash Shukla 2085*b25a66c4SAmit Prakash Shukla for (flow_id = 0; flow_id < t->nb_flows; flow_id++) { 2086*b25a66c4SAmit Prakash Shukla op = p->da.dma_op[flow_id]; 2087*b25a66c4SAmit Prakash Shukla 2088*b25a66c4SAmit Prakash Shukla rte_pktmbuf_free((struct rte_mbuf *)(uintptr_t)op->src_seg->addr); 2089*b25a66c4SAmit Prakash Shukla rte_pktmbuf_free((struct rte_mbuf *)(uintptr_t)op->dst_seg->addr); 2090*b25a66c4SAmit Prakash Shukla rte_free(op->src_seg); 2091*b25a66c4SAmit Prakash Shukla rte_free(op->dst_seg); 2092*b25a66c4SAmit Prakash Shukla rte_mempool_put(op->op_mp, op); 2093*b25a66c4SAmit Prakash Shukla } 2094*b25a66c4SAmit Prakash Shukla 2095*b25a66c4SAmit Prakash Shukla rte_event_dma_adapter_vchan_del(TEST_PERF_DA_ID, p->da.dma_dev_id, p->da.vchan_id); 2096*b25a66c4SAmit Prakash Shukla } 2097*b25a66c4SAmit Prakash Shukla 2098*b25a66c4SAmit Prakash Shukla rte_event_dma_adapter_free(TEST_PERF_DA_ID); 2099*b25a66c4SAmit Prakash Shukla 2100*b25a66c4SAmit Prakash Shukla rte_dma_stop(dma_dev_id); 2101*b25a66c4SAmit Prakash Shukla rte_dma_close(dma_dev_id); 2102*b25a66c4SAmit Prakash Shukla 2103*b25a66c4SAmit Prakash Shukla rte_mempool_free(t->da_op_pool); 2104*b25a66c4SAmit Prakash Shukla } 2105*b25a66c4SAmit Prakash Shukla 2106*b25a66c4SAmit Prakash Shukla int 210741c219e6SJerin Jacob perf_mempool_setup(struct evt_test *test, struct evt_options *opt) 210841c219e6SJerin Jacob { 210941c219e6SJerin Jacob struct test_perf *t = evt_test_priv(test); 2110211b2e2aSPavan Nikhilesh unsigned int cache_sz; 211141c219e6SJerin Jacob 2112211b2e2aSPavan Nikhilesh cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, (opt->pool_sz / 1.5) / t->nb_workers); 2113d008f20bSPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_SYNT || 2114d008f20bSPavan Nikhilesh opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { 211541c219e6SJerin Jacob t->pool = rte_mempool_create(test->name, /* mempool name */ 211641c219e6SJerin Jacob opt->pool_sz, /* number of elements*/ 211741c219e6SJerin Jacob sizeof(struct perf_elt), /* element size*/ 2118211b2e2aSPavan Nikhilesh cache_sz, /* cache size*/ 211941c219e6SJerin Jacob 0, NULL, NULL, 212041c219e6SJerin Jacob perf_elt_init, /* obj constructor */ 212141c219e6SJerin Jacob NULL, opt->socket_id, 0); /* flags */ 21226776a581SVolodymyr Fialko } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR && 21236776a581SVolodymyr Fialko opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { 21246776a581SVolodymyr Fialko t->pool = rte_mempool_create(test->name, /* mempool name */ 21256776a581SVolodymyr Fialko opt->pool_sz, /* number of elements*/ 21266776a581SVolodymyr Fialko sizeof(struct perf_elt) + modex_test_case.result_len, 21276776a581SVolodymyr Fialko /* element size*/ 2128211b2e2aSPavan Nikhilesh cache_sz, /* cache size*/ 21296776a581SVolodymyr Fialko 0, NULL, NULL, 21306776a581SVolodymyr Fialko NULL, /* obj constructor */ 21316776a581SVolodymyr Fialko NULL, opt->socket_id, 0); /* flags */ 21328577cc1aSPavan Nikhilesh } else { 21338577cc1aSPavan Nikhilesh t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */ 21348577cc1aSPavan Nikhilesh opt->pool_sz, /* number of elements*/ 2135211b2e2aSPavan Nikhilesh cache_sz, /* cache size*/ 21368577cc1aSPavan Nikhilesh 0, 21378577cc1aSPavan Nikhilesh RTE_MBUF_DEFAULT_BUF_SIZE, 21388577cc1aSPavan Nikhilesh opt->socket_id); /* flags */ 21398577cc1aSPavan Nikhilesh } 21408577cc1aSPavan Nikhilesh 214141c219e6SJerin Jacob if (t->pool == NULL) { 214241c219e6SJerin Jacob evt_err("failed to create mempool"); 214341c219e6SJerin Jacob return -ENOMEM; 214441c219e6SJerin Jacob } 214541c219e6SJerin Jacob 214641c219e6SJerin Jacob return 0; 214741c219e6SJerin Jacob } 214841c219e6SJerin Jacob 214941c219e6SJerin Jacob void 215041c219e6SJerin Jacob perf_mempool_destroy(struct evt_test *test, struct evt_options *opt) 215141c219e6SJerin Jacob { 215241c219e6SJerin Jacob RTE_SET_USED(opt); 215341c219e6SJerin Jacob struct test_perf *t = evt_test_priv(test); 215441c219e6SJerin Jacob 215541c219e6SJerin Jacob rte_mempool_free(t->pool); 215641c219e6SJerin Jacob } 2157ffbae86fSJerin Jacob 2158ffbae86fSJerin Jacob int 2159ffbae86fSJerin Jacob perf_test_setup(struct evt_test *test, struct evt_options *opt) 2160ffbae86fSJerin Jacob { 2161ffbae86fSJerin Jacob void *test_perf; 2162ffbae86fSJerin Jacob 2163ffbae86fSJerin Jacob test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf), 2164ffbae86fSJerin Jacob RTE_CACHE_LINE_SIZE, opt->socket_id); 2165ffbae86fSJerin Jacob if (test_perf == NULL) { 2166ffbae86fSJerin Jacob evt_err("failed to allocate test_perf memory"); 2167ffbae86fSJerin Jacob goto nomem; 2168ffbae86fSJerin Jacob } 2169ffbae86fSJerin Jacob test->test_priv = test_perf; 2170ffbae86fSJerin Jacob 2171ffbae86fSJerin Jacob struct test_perf *t = evt_test_priv(test); 2172ffbae86fSJerin Jacob 2173d008f20bSPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { 2174d008f20bSPavan Nikhilesh t->outstand_pkts = opt->nb_timers * 2175d008f20bSPavan Nikhilesh evt_nr_active_lcores(opt->plcores); 2176d008f20bSPavan Nikhilesh t->nb_pkts = opt->nb_timers; 2177d008f20bSPavan Nikhilesh } else { 2178d008f20bSPavan Nikhilesh t->outstand_pkts = opt->nb_pkts * 2179d008f20bSPavan Nikhilesh evt_nr_active_lcores(opt->plcores); 2180d008f20bSPavan Nikhilesh t->nb_pkts = opt->nb_pkts; 2181d008f20bSPavan Nikhilesh } 2182d008f20bSPavan Nikhilesh 2183ffbae86fSJerin Jacob t->nb_workers = evt_nr_active_lcores(opt->wlcores); 2184ffbae86fSJerin Jacob t->done = false; 2185ffbae86fSJerin Jacob t->nb_flows = opt->nb_flows; 2186ffbae86fSJerin Jacob t->result = EVT_TEST_FAILED; 2187ffbae86fSJerin Jacob t->opt = opt; 2188ffbae86fSJerin Jacob memcpy(t->sched_type_list, opt->sched_type_list, 2189ffbae86fSJerin Jacob sizeof(opt->sched_type_list)); 2190ffbae86fSJerin Jacob return 0; 2191ffbae86fSJerin Jacob nomem: 2192ffbae86fSJerin Jacob return -ENOMEM; 2193ffbae86fSJerin Jacob } 2194ffbae86fSJerin Jacob 2195ffbae86fSJerin Jacob void 2196ffbae86fSJerin Jacob perf_test_destroy(struct evt_test *test, struct evt_options *opt) 2197ffbae86fSJerin Jacob { 2198ffbae86fSJerin Jacob RTE_SET_USED(opt); 2199ffbae86fSJerin Jacob 2200ffbae86fSJerin Jacob rte_free(test->test_priv); 2201ffbae86fSJerin Jacob } 2202