xref: /dpdk/app/test-eventdev/test_perf_common.c (revision f123568c38333d8571542735697d0941caed42ea)
153a3b7e8SJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause
253a3b7e8SJerin Jacob  * Copyright(c) 2017 Cavium, Inc
3ffbae86fSJerin Jacob  */
4ffbae86fSJerin Jacob 
5626b12a8SPavan Nikhilesh #include <math.h>
6626b12a8SPavan Nikhilesh 
7ffbae86fSJerin Jacob #include "test_perf_common.h"
8ffbae86fSJerin Jacob 
9f3a67078SVolodymyr Fialko #define NB_CRYPTODEV_DESCRIPTORS 1024
108f5b5495SAkhil Goyal #define DATA_SIZE		512
11750ab9d5SAakash Sasidharan #define IV_OFFSET (sizeof(struct rte_crypto_op) + \
12750ab9d5SAakash Sasidharan 		   sizeof(struct rte_crypto_sym_op) + \
13750ab9d5SAakash Sasidharan 		   sizeof(union rte_event_crypto_metadata))
14750ab9d5SAakash Sasidharan 
158f5b5495SAkhil Goyal struct modex_test_data {
168f5b5495SAkhil Goyal 	enum rte_crypto_asym_xform_type xform_type;
178f5b5495SAkhil Goyal 	struct {
188f5b5495SAkhil Goyal 		uint8_t data[DATA_SIZE];
198f5b5495SAkhil Goyal 		uint16_t len;
208f5b5495SAkhil Goyal 	} base;
218f5b5495SAkhil Goyal 	struct {
228f5b5495SAkhil Goyal 		uint8_t data[DATA_SIZE];
238f5b5495SAkhil Goyal 		uint16_t len;
248f5b5495SAkhil Goyal 	} exponent;
258f5b5495SAkhil Goyal 	struct {
268f5b5495SAkhil Goyal 		uint8_t data[DATA_SIZE];
278f5b5495SAkhil Goyal 		uint16_t len;
288f5b5495SAkhil Goyal 	} modulus;
298f5b5495SAkhil Goyal 	struct {
308f5b5495SAkhil Goyal 		uint8_t data[DATA_SIZE];
318f5b5495SAkhil Goyal 		uint16_t len;
328f5b5495SAkhil Goyal 	} reminder;
338f5b5495SAkhil Goyal 	uint16_t result_len;
348f5b5495SAkhil Goyal };
358f5b5495SAkhil Goyal 
368f5b5495SAkhil Goyal static struct
378f5b5495SAkhil Goyal modex_test_data modex_test_case = {
388f5b5495SAkhil Goyal 	.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
398f5b5495SAkhil Goyal 	.base = {
408f5b5495SAkhil Goyal 		.data = {
418f5b5495SAkhil Goyal 			0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85,
428f5b5495SAkhil Goyal 			0xAE, 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD,
438f5b5495SAkhil Goyal 			0xA8, 0xEB, 0x7E, 0x78, 0xA0, 0x50
448f5b5495SAkhil Goyal 		},
458f5b5495SAkhil Goyal 		.len = 20,
468f5b5495SAkhil Goyal 	},
478f5b5495SAkhil Goyal 	.exponent = {
488f5b5495SAkhil Goyal 		.data = {
498f5b5495SAkhil Goyal 			0x01, 0x00, 0x01
508f5b5495SAkhil Goyal 		},
518f5b5495SAkhil Goyal 		.len = 3,
528f5b5495SAkhil Goyal 	},
538f5b5495SAkhil Goyal 	.reminder = {
548f5b5495SAkhil Goyal 		.data = {
558f5b5495SAkhil Goyal 			0x2C, 0x60, 0x75, 0x45, 0x98, 0x9D, 0xE0, 0x72,
568f5b5495SAkhil Goyal 			0xA0, 0x9D, 0x3A, 0x9E, 0x03, 0x38, 0x73, 0x3C,
578f5b5495SAkhil Goyal 			0x31, 0x83, 0x04, 0xFE, 0x75, 0x43, 0xE6, 0x17,
588f5b5495SAkhil Goyal 			0x5C, 0x01, 0x29, 0x51, 0x69, 0x33, 0x62, 0x2D,
598f5b5495SAkhil Goyal 			0x78, 0xBE, 0xAE, 0xC4, 0xBC, 0xDE, 0x7E, 0x2C,
608f5b5495SAkhil Goyal 			0x77, 0x84, 0xF2, 0xC5, 0x14, 0xB5, 0x2F, 0xF7,
618f5b5495SAkhil Goyal 			0xC5, 0x94, 0xEF, 0x86, 0x75, 0x75, 0xB5, 0x11,
628f5b5495SAkhil Goyal 			0xE5, 0x0E, 0x0A, 0x29, 0x76, 0xE2, 0xEA, 0x32,
638f5b5495SAkhil Goyal 			0x0E, 0x43, 0x77, 0x7E, 0x2C, 0x27, 0xAC, 0x3B,
648f5b5495SAkhil Goyal 			0x86, 0xA5, 0xDB, 0xC9, 0x48, 0x40, 0xE8, 0x99,
658f5b5495SAkhil Goyal 			0x9A, 0x0A, 0x3D, 0xD6, 0x74, 0xFA, 0x2E, 0x2E,
668f5b5495SAkhil Goyal 			0x5B, 0xAF, 0x8C, 0x99, 0x44, 0x2A, 0x67, 0x38,
678f5b5495SAkhil Goyal 			0x27, 0x41, 0x59, 0x9D, 0xB8, 0x51, 0xC9, 0xF7,
688f5b5495SAkhil Goyal 			0x43, 0x61, 0x31, 0x6E, 0xF1, 0x25, 0x38, 0x7F,
698f5b5495SAkhil Goyal 			0xAE, 0xC6, 0xD0, 0xBB, 0x29, 0x76, 0x3F, 0x46,
708f5b5495SAkhil Goyal 			0x2E, 0x1B, 0xE4, 0x67, 0x71, 0xE3, 0x87, 0x5A
718f5b5495SAkhil Goyal 		},
728f5b5495SAkhil Goyal 		.len = 128,
738f5b5495SAkhil Goyal 	},
748f5b5495SAkhil Goyal 	.modulus = {
758f5b5495SAkhil Goyal 		.data = {
768f5b5495SAkhil Goyal 			0xb3, 0xa1, 0xaf, 0xb7, 0x13, 0x08, 0x00, 0x0a,
778f5b5495SAkhil Goyal 			0x35, 0xdc, 0x2b, 0x20, 0x8d, 0xa1, 0xb5, 0xce,
788f5b5495SAkhil Goyal 			0x47, 0x8a, 0xc3, 0x80, 0xf4, 0x7d, 0x4a, 0xa2,
798f5b5495SAkhil Goyal 			0x62, 0xfd, 0x61, 0x7f, 0xb5, 0xa8, 0xde, 0x0a,
808f5b5495SAkhil Goyal 			0x17, 0x97, 0xa0, 0xbf, 0xdf, 0x56, 0x5a, 0x3d,
818f5b5495SAkhil Goyal 			0x51, 0x56, 0x4f, 0x70, 0x70, 0x3f, 0x63, 0x6a,
828f5b5495SAkhil Goyal 			0x44, 0x5b, 0xad, 0x84, 0x0d, 0x3f, 0x27, 0x6e,
838f5b5495SAkhil Goyal 			0x3b, 0x34, 0x91, 0x60, 0x14, 0xb9, 0xaa, 0x72,
848f5b5495SAkhil Goyal 			0xfd, 0xa3, 0x64, 0xd2, 0x03, 0xa7, 0x53, 0x87,
858f5b5495SAkhil Goyal 			0x9e, 0x88, 0x0b, 0xc1, 0x14, 0x93, 0x1a, 0x62,
868f5b5495SAkhil Goyal 			0xff, 0xb1, 0x5d, 0x74, 0xcd, 0x59, 0x63, 0x18,
878f5b5495SAkhil Goyal 			0x11, 0x3d, 0x4f, 0xba, 0x75, 0xd4, 0x33, 0x4e,
888f5b5495SAkhil Goyal 			0x23, 0x6b, 0x7b, 0x57, 0x44, 0xe1, 0xd3, 0x03,
898f5b5495SAkhil Goyal 			0x13, 0xa6, 0xf0, 0x8b, 0x60, 0xb0, 0x9e, 0xee,
908f5b5495SAkhil Goyal 			0x75, 0x08, 0x9d, 0x71, 0x63, 0x13, 0xcb, 0xa6,
918f5b5495SAkhil Goyal 			0x81, 0x92, 0x14, 0x03, 0x22, 0x2d, 0xde, 0x55
928f5b5495SAkhil Goyal 		},
938f5b5495SAkhil Goyal 		.len = 128,
948f5b5495SAkhil Goyal 	},
958f5b5495SAkhil Goyal 	.result_len = 128,
968f5b5495SAkhil Goyal };
97de2bc16eSShijith Thotton 
9841c219e6SJerin Jacob int
9941c219e6SJerin Jacob perf_test_result(struct evt_test *test, struct evt_options *opt)
10041c219e6SJerin Jacob {
10141c219e6SJerin Jacob 	RTE_SET_USED(opt);
1026b1a14a8SPavan Nikhilesh 	int i;
1036b1a14a8SPavan Nikhilesh 	uint64_t total = 0;
10441c219e6SJerin Jacob 	struct test_perf *t = evt_test_priv(test);
10541c219e6SJerin Jacob 
1066b1a14a8SPavan Nikhilesh 	printf("Packet distribution across worker cores :\n");
1076b1a14a8SPavan Nikhilesh 	for (i = 0; i < t->nb_workers; i++)
1086b1a14a8SPavan Nikhilesh 		total += t->worker[i].processed_pkts;
1096b1a14a8SPavan Nikhilesh 	for (i = 0; i < t->nb_workers; i++)
1106b1a14a8SPavan Nikhilesh 		printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
111c0900d33SHarry van Haaren 				CLGRN" %3.2f"CLNRM"\n", i,
1126b1a14a8SPavan Nikhilesh 				t->worker[i].processed_pkts,
1136b1a14a8SPavan Nikhilesh 				(((double)t->worker[i].processed_pkts)/total)
1146b1a14a8SPavan Nikhilesh 				* 100);
1156b1a14a8SPavan Nikhilesh 
11641c219e6SJerin Jacob 	return t->result;
11741c219e6SJerin Jacob }
11841c219e6SJerin Jacob 
1199d3aeb18SJerin Jacob static inline int
1209d3aeb18SJerin Jacob perf_producer(void *arg)
1219d3aeb18SJerin Jacob {
1229a618803SPavan Nikhilesh 	int i;
1239d3aeb18SJerin Jacob 	struct prod_data *p  = arg;
1249d3aeb18SJerin Jacob 	struct test_perf *t = p->t;
1259d3aeb18SJerin Jacob 	struct evt_options *opt = t->opt;
1269d3aeb18SJerin Jacob 	const uint8_t dev_id = p->dev_id;
1279d3aeb18SJerin Jacob 	const uint8_t port = p->port_id;
1289d3aeb18SJerin Jacob 	struct rte_mempool *pool = t->pool;
1299d3aeb18SJerin Jacob 	const uint64_t nb_pkts = t->nb_pkts;
1309d3aeb18SJerin Jacob 	const uint32_t nb_flows = t->nb_flows;
1319d3aeb18SJerin Jacob 	uint32_t flow_counter = 0;
1329d3aeb18SJerin Jacob 	uint64_t count = 0;
1339a618803SPavan Nikhilesh 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
134*f123568cSPavan Nikhilesh 	uint8_t enable_fwd_latency;
1359d3aeb18SJerin Jacob 	struct rte_event ev;
1369d3aeb18SJerin Jacob 
137*f123568cSPavan Nikhilesh 	enable_fwd_latency = opt->fwd_latency;
1389d3aeb18SJerin Jacob 	if (opt->verbose_level > 1)
1399d3aeb18SJerin Jacob 		printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
1409d3aeb18SJerin Jacob 				rte_lcore_id(), dev_id, port, p->queue_id);
1419d3aeb18SJerin Jacob 
1429d3aeb18SJerin Jacob 	ev.event = 0;
1439d3aeb18SJerin Jacob 	ev.op = RTE_EVENT_OP_NEW;
1449d3aeb18SJerin Jacob 	ev.queue_id = p->queue_id;
1459d3aeb18SJerin Jacob 	ev.sched_type = t->opt->sched_type_list[0];
1469d3aeb18SJerin Jacob 	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
1479d3aeb18SJerin Jacob 	ev.event_type =  RTE_EVENT_TYPE_CPU;
1489d3aeb18SJerin Jacob 	ev.sub_event_type = 0; /* stage 0 */
1499d3aeb18SJerin Jacob 
1509d3aeb18SJerin Jacob 	while (count < nb_pkts && t->done == false) {
1519a618803SPavan Nikhilesh 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
1529d3aeb18SJerin Jacob 			continue;
1539a618803SPavan Nikhilesh 		for (i = 0; i < BURST_SIZE; i++) {
1549d3aeb18SJerin Jacob 			ev.flow_id = flow_counter++ % nb_flows;
1559a618803SPavan Nikhilesh 			ev.event_ptr = m[i];
156*f123568cSPavan Nikhilesh 			if (enable_fwd_latency)
1579a618803SPavan Nikhilesh 				m[i]->timestamp = rte_get_timer_cycles();
158*f123568cSPavan Nikhilesh 			while (rte_event_enqueue_new_burst(dev_id, port, &ev,
159*f123568cSPavan Nikhilesh 							   1) != 1) {
1609d3aeb18SJerin Jacob 				if (t->done)
1619d3aeb18SJerin Jacob 					break;
1629d3aeb18SJerin Jacob 				rte_pause();
163*f123568cSPavan Nikhilesh 				if (enable_fwd_latency)
164*f123568cSPavan Nikhilesh 					m[i]->timestamp =
165*f123568cSPavan Nikhilesh 						rte_get_timer_cycles();
1669d3aeb18SJerin Jacob 			}
1679a618803SPavan Nikhilesh 		}
1689a618803SPavan Nikhilesh 		count += BURST_SIZE;
1699d3aeb18SJerin Jacob 	}
1709d3aeb18SJerin Jacob 
1719d3aeb18SJerin Jacob 	return 0;
1729d3aeb18SJerin Jacob }
1739d3aeb18SJerin Jacob 
174d008f20bSPavan Nikhilesh static inline int
17520841a25SRashmi Shetty perf_producer_burst(void *arg)
17620841a25SRashmi Shetty {
17720841a25SRashmi Shetty 	uint32_t i;
17820841a25SRashmi Shetty 	uint64_t timestamp;
17920841a25SRashmi Shetty 	struct prod_data *p  = arg;
18020841a25SRashmi Shetty 	struct test_perf *t = p->t;
18120841a25SRashmi Shetty 	struct evt_options *opt = t->opt;
18220841a25SRashmi Shetty 	const uint8_t dev_id = p->dev_id;
18320841a25SRashmi Shetty 	const uint8_t port = p->port_id;
18420841a25SRashmi Shetty 	struct rte_mempool *pool = t->pool;
18520841a25SRashmi Shetty 	const uint64_t nb_pkts = t->nb_pkts;
18620841a25SRashmi Shetty 	const uint32_t nb_flows = t->nb_flows;
18720841a25SRashmi Shetty 	uint32_t flow_counter = 0;
18820841a25SRashmi Shetty 	uint16_t enq = 0;
18920841a25SRashmi Shetty 	uint64_t count = 0;
190*f123568cSPavan Nikhilesh 	struct perf_elt *m[opt->prod_enq_burst_sz + 1];
191*f123568cSPavan Nikhilesh 	struct rte_event ev[opt->prod_enq_burst_sz + 1];
19220841a25SRashmi Shetty 	uint32_t burst_size = opt->prod_enq_burst_sz;
193*f123568cSPavan Nikhilesh 	uint8_t enable_fwd_latency;
19420841a25SRashmi Shetty 
195*f123568cSPavan Nikhilesh 	enable_fwd_latency = opt->fwd_latency;
196*f123568cSPavan Nikhilesh 	memset(m, 0, sizeof(*m) * (opt->prod_enq_burst_sz + 1));
19720841a25SRashmi Shetty 	if (opt->verbose_level > 1)
19820841a25SRashmi Shetty 		printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
19920841a25SRashmi Shetty 				rte_lcore_id(), dev_id, port, p->queue_id);
20020841a25SRashmi Shetty 
20120841a25SRashmi Shetty 	for (i = 0; i < burst_size; i++) {
20220841a25SRashmi Shetty 		ev[i].op = RTE_EVENT_OP_NEW;
20320841a25SRashmi Shetty 		ev[i].queue_id = p->queue_id;
20420841a25SRashmi Shetty 		ev[i].sched_type = t->opt->sched_type_list[0];
20520841a25SRashmi Shetty 		ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
20620841a25SRashmi Shetty 		ev[i].event_type =  RTE_EVENT_TYPE_CPU;
20720841a25SRashmi Shetty 		ev[i].sub_event_type = 0; /* stage 0 */
20820841a25SRashmi Shetty 	}
20920841a25SRashmi Shetty 
21020841a25SRashmi Shetty 	while (count < nb_pkts && t->done == false) {
21120841a25SRashmi Shetty 		if (rte_mempool_get_bulk(pool, (void **)m, burst_size) < 0)
21220841a25SRashmi Shetty 			continue;
21320841a25SRashmi Shetty 		timestamp = rte_get_timer_cycles();
21420841a25SRashmi Shetty 		for (i = 0; i < burst_size; i++) {
21520841a25SRashmi Shetty 			ev[i].flow_id = flow_counter++ % nb_flows;
21620841a25SRashmi Shetty 			ev[i].event_ptr = m[i];
217*f123568cSPavan Nikhilesh 			if (enable_fwd_latency)
21820841a25SRashmi Shetty 				m[i]->timestamp = timestamp;
21920841a25SRashmi Shetty 		}
220*f123568cSPavan Nikhilesh 		enq = rte_event_enqueue_new_burst(dev_id, port, ev, burst_size);
22120841a25SRashmi Shetty 		while (enq < burst_size) {
222*f123568cSPavan Nikhilesh 			enq += rte_event_enqueue_new_burst(
223*f123568cSPavan Nikhilesh 				dev_id, port, ev + enq, burst_size - enq);
22420841a25SRashmi Shetty 			if (t->done)
22520841a25SRashmi Shetty 				break;
22620841a25SRashmi Shetty 			rte_pause();
227*f123568cSPavan Nikhilesh 			if (enable_fwd_latency) {
22820841a25SRashmi Shetty 				timestamp = rte_get_timer_cycles();
22920841a25SRashmi Shetty 				for (i = enq; i < burst_size; i++)
23020841a25SRashmi Shetty 					m[i]->timestamp = timestamp;
23120841a25SRashmi Shetty 			}
232*f123568cSPavan Nikhilesh 		}
23320841a25SRashmi Shetty 		count += burst_size;
23420841a25SRashmi Shetty 	}
23520841a25SRashmi Shetty 	return 0;
23620841a25SRashmi Shetty }
23720841a25SRashmi Shetty 
23820841a25SRashmi Shetty static inline int
239d008f20bSPavan Nikhilesh perf_event_timer_producer(void *arg)
240d008f20bSPavan Nikhilesh {
2419a618803SPavan Nikhilesh 	int i;
242d008f20bSPavan Nikhilesh 	struct prod_data *p  = arg;
243d008f20bSPavan Nikhilesh 	struct test_perf *t = p->t;
244d008f20bSPavan Nikhilesh 	struct evt_options *opt = t->opt;
245d008f20bSPavan Nikhilesh 	uint32_t flow_counter = 0;
246d008f20bSPavan Nikhilesh 	uint64_t count = 0;
247d008f20bSPavan Nikhilesh 	uint64_t arm_latency = 0;
248d008f20bSPavan Nikhilesh 	const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
249d008f20bSPavan Nikhilesh 	const uint32_t nb_flows = t->nb_flows;
250d008f20bSPavan Nikhilesh 	const uint64_t nb_timers = opt->nb_timers;
251d008f20bSPavan Nikhilesh 	struct rte_mempool *pool = t->pool;
2529a618803SPavan Nikhilesh 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
253d008f20bSPavan Nikhilesh 	struct rte_event_timer_adapter **adptr = t->timer_adptr;
25452553263SPavan Nikhilesh 	struct rte_event_timer tim;
255d008f20bSPavan Nikhilesh 	uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
256d008f20bSPavan Nikhilesh 
25752553263SPavan Nikhilesh 	memset(&tim, 0, sizeof(struct rte_event_timer));
258626b12a8SPavan Nikhilesh 	timeout_ticks =
259626b12a8SPavan Nikhilesh 		opt->optm_timer_tick_nsec
260626b12a8SPavan Nikhilesh 			? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
261626b12a8SPavan Nikhilesh 			       opt->optm_timer_tick_nsec)
262626b12a8SPavan Nikhilesh 			: timeout_ticks;
263d008f20bSPavan Nikhilesh 	timeout_ticks += timeout_ticks ? 0 : 1;
26452553263SPavan Nikhilesh 	tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
26552553263SPavan Nikhilesh 	tim.ev.op = RTE_EVENT_OP_NEW;
26652553263SPavan Nikhilesh 	tim.ev.sched_type = t->opt->sched_type_list[0];
26752553263SPavan Nikhilesh 	tim.ev.queue_id = p->queue_id;
26852553263SPavan Nikhilesh 	tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
26952553263SPavan Nikhilesh 	tim.state = RTE_EVENT_TIMER_NOT_ARMED;
27052553263SPavan Nikhilesh 	tim.timeout_ticks = timeout_ticks;
271d008f20bSPavan Nikhilesh 
272d008f20bSPavan Nikhilesh 	if (opt->verbose_level > 1)
273d008f20bSPavan Nikhilesh 		printf("%s(): lcore %d\n", __func__, rte_lcore_id());
274d008f20bSPavan Nikhilesh 
275d008f20bSPavan Nikhilesh 	while (count < nb_timers && t->done == false) {
2769a618803SPavan Nikhilesh 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
277d008f20bSPavan Nikhilesh 			continue;
2789a618803SPavan Nikhilesh 		for (i = 0; i < BURST_SIZE; i++) {
2799a618803SPavan Nikhilesh 			rte_prefetch0(m[i + 1]);
2809a618803SPavan Nikhilesh 			m[i]->tim = tim;
2819a618803SPavan Nikhilesh 			m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
2829a618803SPavan Nikhilesh 			m[i]->tim.ev.event_ptr = m[i];
2839a618803SPavan Nikhilesh 			m[i]->timestamp = rte_get_timer_cycles();
284d008f20bSPavan Nikhilesh 			while (rte_event_timer_arm_burst(
285d008f20bSPavan Nikhilesh 			       adptr[flow_counter % nb_timer_adptrs],
2869a618803SPavan Nikhilesh 			       (struct rte_event_timer **)&m[i], 1) != 1) {
287d008f20bSPavan Nikhilesh 				if (t->done)
288d008f20bSPavan Nikhilesh 					break;
2899a618803SPavan Nikhilesh 				m[i]->timestamp = rte_get_timer_cycles();
290d008f20bSPavan Nikhilesh 			}
2919a618803SPavan Nikhilesh 			arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
2929a618803SPavan Nikhilesh 		}
2939a618803SPavan Nikhilesh 		count += BURST_SIZE;
294d008f20bSPavan Nikhilesh 	}
295d008f20bSPavan Nikhilesh 	fflush(stdout);
296d008f20bSPavan Nikhilesh 	rte_delay_ms(1000);
297d008f20bSPavan Nikhilesh 	printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
29893b7794bSPavan Nikhilesh 			__func__, rte_lcore_id(),
29993b7794bSPavan Nikhilesh 			count ? (float)(arm_latency / count) /
30093b7794bSPavan Nikhilesh 			(rte_get_timer_hz() / 1000000) : 0);
301d008f20bSPavan Nikhilesh 	return 0;
302d008f20bSPavan Nikhilesh }
303d008f20bSPavan Nikhilesh 
30417b22d0bSPavan Nikhilesh static inline int
30517b22d0bSPavan Nikhilesh perf_event_timer_producer_burst(void *arg)
30617b22d0bSPavan Nikhilesh {
30717b22d0bSPavan Nikhilesh 	int i;
30817b22d0bSPavan Nikhilesh 	struct prod_data *p  = arg;
30917b22d0bSPavan Nikhilesh 	struct test_perf *t = p->t;
31017b22d0bSPavan Nikhilesh 	struct evt_options *opt = t->opt;
31117b22d0bSPavan Nikhilesh 	uint32_t flow_counter = 0;
31217b22d0bSPavan Nikhilesh 	uint64_t count = 0;
31317b22d0bSPavan Nikhilesh 	uint64_t arm_latency = 0;
31417b22d0bSPavan Nikhilesh 	const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
31517b22d0bSPavan Nikhilesh 	const uint32_t nb_flows = t->nb_flows;
31617b22d0bSPavan Nikhilesh 	const uint64_t nb_timers = opt->nb_timers;
31717b22d0bSPavan Nikhilesh 	struct rte_mempool *pool = t->pool;
31817b22d0bSPavan Nikhilesh 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
31917b22d0bSPavan Nikhilesh 	struct rte_event_timer_adapter **adptr = t->timer_adptr;
32052553263SPavan Nikhilesh 	struct rte_event_timer tim;
32117b22d0bSPavan Nikhilesh 	uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
32217b22d0bSPavan Nikhilesh 
32352553263SPavan Nikhilesh 	memset(&tim, 0, sizeof(struct rte_event_timer));
324626b12a8SPavan Nikhilesh 	timeout_ticks =
325626b12a8SPavan Nikhilesh 		opt->optm_timer_tick_nsec
326626b12a8SPavan Nikhilesh 			? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
327626b12a8SPavan Nikhilesh 			       opt->optm_timer_tick_nsec)
328626b12a8SPavan Nikhilesh 			: timeout_ticks;
32917b22d0bSPavan Nikhilesh 	timeout_ticks += timeout_ticks ? 0 : 1;
33052553263SPavan Nikhilesh 	tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
33152553263SPavan Nikhilesh 	tim.ev.op = RTE_EVENT_OP_NEW;
33252553263SPavan Nikhilesh 	tim.ev.sched_type = t->opt->sched_type_list[0];
33352553263SPavan Nikhilesh 	tim.ev.queue_id = p->queue_id;
33452553263SPavan Nikhilesh 	tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
33552553263SPavan Nikhilesh 	tim.state = RTE_EVENT_TIMER_NOT_ARMED;
33652553263SPavan Nikhilesh 	tim.timeout_ticks = timeout_ticks;
33717b22d0bSPavan Nikhilesh 
33817b22d0bSPavan Nikhilesh 	if (opt->verbose_level > 1)
33917b22d0bSPavan Nikhilesh 		printf("%s(): lcore %d\n", __func__, rte_lcore_id());
34017b22d0bSPavan Nikhilesh 
34117b22d0bSPavan Nikhilesh 	while (count < nb_timers && t->done == false) {
34217b22d0bSPavan Nikhilesh 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
34317b22d0bSPavan Nikhilesh 			continue;
34417b22d0bSPavan Nikhilesh 		for (i = 0; i < BURST_SIZE; i++) {
34517b22d0bSPavan Nikhilesh 			rte_prefetch0(m[i + 1]);
34617b22d0bSPavan Nikhilesh 			m[i]->tim = tim;
34717b22d0bSPavan Nikhilesh 			m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
34817b22d0bSPavan Nikhilesh 			m[i]->tim.ev.event_ptr = m[i];
34917b22d0bSPavan Nikhilesh 			m[i]->timestamp = rte_get_timer_cycles();
35017b22d0bSPavan Nikhilesh 		}
35117b22d0bSPavan Nikhilesh 		rte_event_timer_arm_tmo_tick_burst(
35217b22d0bSPavan Nikhilesh 				adptr[flow_counter % nb_timer_adptrs],
35317b22d0bSPavan Nikhilesh 				(struct rte_event_timer **)m,
35417b22d0bSPavan Nikhilesh 				tim.timeout_ticks,
35517b22d0bSPavan Nikhilesh 				BURST_SIZE);
35617b22d0bSPavan Nikhilesh 		arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
35717b22d0bSPavan Nikhilesh 		count += BURST_SIZE;
35817b22d0bSPavan Nikhilesh 	}
35917b22d0bSPavan Nikhilesh 	fflush(stdout);
36017b22d0bSPavan Nikhilesh 	rte_delay_ms(1000);
36117b22d0bSPavan Nikhilesh 	printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
36293b7794bSPavan Nikhilesh 			__func__, rte_lcore_id(),
36393b7794bSPavan Nikhilesh 			count ? (float)(arm_latency / count) /
36493b7794bSPavan Nikhilesh 			(rte_get_timer_hz() / 1000000) : 0);
36517b22d0bSPavan Nikhilesh 	return 0;
36617b22d0bSPavan Nikhilesh }
36717b22d0bSPavan Nikhilesh 
368de2bc16eSShijith Thotton static inline void
369de2bc16eSShijith Thotton crypto_adapter_enq_op_new(struct prod_data *p)
370de2bc16eSShijith Thotton {
371de2bc16eSShijith Thotton 	struct test_perf *t = p->t;
372de2bc16eSShijith Thotton 	const uint32_t nb_flows = t->nb_flows;
373de2bc16eSShijith Thotton 	const uint64_t nb_pkts = t->nb_pkts;
374de2bc16eSShijith Thotton 	struct rte_mempool *pool = t->pool;
375750ab9d5SAakash Sasidharan 	uint16_t data_length, data_offset;
376de2bc16eSShijith Thotton 	struct evt_options *opt = t->opt;
377de2bc16eSShijith Thotton 	uint16_t qp_id = p->ca.cdev_qp_id;
378de2bc16eSShijith Thotton 	uint8_t cdev_id = p->ca.cdev_id;
3793158ec9fSVolodymyr Fialko 	uint64_t alloc_failures = 0;
380de2bc16eSShijith Thotton 	uint32_t flow_counter = 0;
381de2bc16eSShijith Thotton 	struct rte_crypto_op *op;
3826776a581SVolodymyr Fialko 	uint16_t len, offset;
383de2bc16eSShijith Thotton 	struct rte_mbuf *m;
384de2bc16eSShijith Thotton 	uint64_t count = 0;
385de2bc16eSShijith Thotton 
386de2bc16eSShijith Thotton 	if (opt->verbose_level > 1)
387de2bc16eSShijith Thotton 		printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
388de2bc16eSShijith Thotton 		       __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
389de2bc16eSShijith Thotton 		       p->ca.cdev_qp_id);
390de2bc16eSShijith Thotton 
3916776a581SVolodymyr Fialko 	offset = sizeof(struct perf_elt);
3926776a581SVolodymyr Fialko 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
393de2bc16eSShijith Thotton 
394750ab9d5SAakash Sasidharan 	if (opt->crypto_cipher_bit_mode) {
395750ab9d5SAakash Sasidharan 		data_offset = offset << 3;
396750ab9d5SAakash Sasidharan 		data_length = (len - offset) << 3;
397750ab9d5SAakash Sasidharan 	} else {
398750ab9d5SAakash Sasidharan 		data_offset = offset;
399750ab9d5SAakash Sasidharan 		data_length = len - offset;
400750ab9d5SAakash Sasidharan 	}
401750ab9d5SAakash Sasidharan 
402de2bc16eSShijith Thotton 	while (count < nb_pkts && t->done == false) {
4038f5b5495SAkhil Goyal 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
4048f5b5495SAkhil Goyal 			struct rte_crypto_sym_op *sym_op;
4058f5b5495SAkhil Goyal 
4068f5b5495SAkhil Goyal 			op = rte_crypto_op_alloc(t->ca_op_pool,
4078f5b5495SAkhil Goyal 					 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
4083158ec9fSVolodymyr Fialko 			if (unlikely(op == NULL)) {
4093158ec9fSVolodymyr Fialko 				alloc_failures++;
410de2bc16eSShijith Thotton 				continue;
4113158ec9fSVolodymyr Fialko 			}
4123158ec9fSVolodymyr Fialko 
4133158ec9fSVolodymyr Fialko 			m = rte_pktmbuf_alloc(pool);
4143158ec9fSVolodymyr Fialko 			if (unlikely(m == NULL)) {
4153158ec9fSVolodymyr Fialko 				alloc_failures++;
4163158ec9fSVolodymyr Fialko 				rte_crypto_op_free(op);
4173158ec9fSVolodymyr Fialko 				continue;
4183158ec9fSVolodymyr Fialko 			}
419de2bc16eSShijith Thotton 
420de2bc16eSShijith Thotton 			rte_pktmbuf_append(m, len);
421de2bc16eSShijith Thotton 			sym_op = op->sym;
422de2bc16eSShijith Thotton 			sym_op->m_src = m;
423750ab9d5SAakash Sasidharan 
424750ab9d5SAakash Sasidharan 			sym_op->cipher.data.offset = data_offset;
425750ab9d5SAakash Sasidharan 			sym_op->cipher.data.length = data_length;
426750ab9d5SAakash Sasidharan 
427de2bc16eSShijith Thotton 			rte_crypto_op_attach_sym_session(
4288f5b5495SAkhil Goyal 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
4298f5b5495SAkhil Goyal 		} else {
4308f5b5495SAkhil Goyal 			struct rte_crypto_asym_op *asym_op;
4316776a581SVolodymyr Fialko 			uint8_t *result;
4326776a581SVolodymyr Fialko 
4336776a581SVolodymyr Fialko 			if (rte_mempool_get(pool, (void **)&result)) {
4346776a581SVolodymyr Fialko 				alloc_failures++;
4356776a581SVolodymyr Fialko 				continue;
4366776a581SVolodymyr Fialko 			}
437de2bc16eSShijith Thotton 
4388f5b5495SAkhil Goyal 			op = rte_crypto_op_alloc(t->ca_op_pool,
4398f5b5495SAkhil Goyal 					 RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
4403158ec9fSVolodymyr Fialko 			if (unlikely(op == NULL)) {
4413158ec9fSVolodymyr Fialko 				alloc_failures++;
4426776a581SVolodymyr Fialko 				rte_mempool_put(pool, result);
4433158ec9fSVolodymyr Fialko 				continue;
4443158ec9fSVolodymyr Fialko 			}
4453158ec9fSVolodymyr Fialko 
4468f5b5495SAkhil Goyal 			asym_op = op->asym;
4478f5b5495SAkhil Goyal 			asym_op->modex.base.data = modex_test_case.base.data;
4488f5b5495SAkhil Goyal 			asym_op->modex.base.length = modex_test_case.base.len;
4498f5b5495SAkhil Goyal 			asym_op->modex.result.data = result;
4508f5b5495SAkhil Goyal 			asym_op->modex.result.length = modex_test_case.result_len;
4518f5b5495SAkhil Goyal 			rte_crypto_op_attach_asym_session(
4528f5b5495SAkhil Goyal 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
4538f5b5495SAkhil Goyal 		}
454de2bc16eSShijith Thotton 		while (rte_cryptodev_enqueue_burst(cdev_id, qp_id, &op, 1) != 1 &&
455de2bc16eSShijith Thotton 				t->done == false)
456de2bc16eSShijith Thotton 			rte_pause();
457de2bc16eSShijith Thotton 
458de2bc16eSShijith Thotton 		count++;
459de2bc16eSShijith Thotton 	}
4603158ec9fSVolodymyr Fialko 
4613158ec9fSVolodymyr Fialko 	if (opt->verbose_level > 1 && alloc_failures)
4623158ec9fSVolodymyr Fialko 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
4633158ec9fSVolodymyr Fialko 		       __func__, rte_lcore_id(), alloc_failures);
464de2bc16eSShijith Thotton }
465de2bc16eSShijith Thotton 
466de2bc16eSShijith Thotton static inline void
467de2bc16eSShijith Thotton crypto_adapter_enq_op_fwd(struct prod_data *p)
468de2bc16eSShijith Thotton {
469de2bc16eSShijith Thotton 	const uint8_t dev_id = p->dev_id;
470de2bc16eSShijith Thotton 	const uint8_t port = p->port_id;
471de2bc16eSShijith Thotton 	struct test_perf *t = p->t;
472de2bc16eSShijith Thotton 	const uint32_t nb_flows = t->nb_flows;
473de2bc16eSShijith Thotton 	const uint64_t nb_pkts = t->nb_pkts;
474de2bc16eSShijith Thotton 	struct rte_mempool *pool = t->pool;
475de2bc16eSShijith Thotton 	struct evt_options *opt = t->opt;
4763158ec9fSVolodymyr Fialko 	uint64_t alloc_failures = 0;
477de2bc16eSShijith Thotton 	uint32_t flow_counter = 0;
478de2bc16eSShijith Thotton 	struct rte_crypto_op *op;
4796776a581SVolodymyr Fialko 	uint16_t len, offset;
480de2bc16eSShijith Thotton 	struct rte_event ev;
481de2bc16eSShijith Thotton 	struct rte_mbuf *m;
482de2bc16eSShijith Thotton 	uint64_t count = 0;
483de2bc16eSShijith Thotton 
484de2bc16eSShijith Thotton 	if (opt->verbose_level > 1)
485de2bc16eSShijith Thotton 		printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
486de2bc16eSShijith Thotton 		       __func__, rte_lcore_id(), port, p->queue_id,
487de2bc16eSShijith Thotton 		       p->ca.cdev_id, p->ca.cdev_qp_id);
488de2bc16eSShijith Thotton 
489de2bc16eSShijith Thotton 	ev.event = 0;
490de2bc16eSShijith Thotton 	ev.op = RTE_EVENT_OP_NEW;
491de2bc16eSShijith Thotton 	ev.queue_id = p->queue_id;
492de2bc16eSShijith Thotton 	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
493de2bc16eSShijith Thotton 	ev.event_type = RTE_EVENT_TYPE_CPU;
4946776a581SVolodymyr Fialko 
4956776a581SVolodymyr Fialko 	offset = sizeof(struct perf_elt);
4966776a581SVolodymyr Fialko 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
497de2bc16eSShijith Thotton 
498de2bc16eSShijith Thotton 	while (count < nb_pkts && t->done == false) {
4998f5b5495SAkhil Goyal 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
5008f5b5495SAkhil Goyal 			struct rte_crypto_sym_op *sym_op;
5018f5b5495SAkhil Goyal 
5028f5b5495SAkhil Goyal 			op = rte_crypto_op_alloc(t->ca_op_pool,
5038f5b5495SAkhil Goyal 					 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
5043158ec9fSVolodymyr Fialko 			if (unlikely(op == NULL)) {
5053158ec9fSVolodymyr Fialko 				alloc_failures++;
506de2bc16eSShijith Thotton 				continue;
5073158ec9fSVolodymyr Fialko 			}
5083158ec9fSVolodymyr Fialko 
5093158ec9fSVolodymyr Fialko 			m = rte_pktmbuf_alloc(pool);
5103158ec9fSVolodymyr Fialko 			if (unlikely(m == NULL)) {
5113158ec9fSVolodymyr Fialko 				alloc_failures++;
5123158ec9fSVolodymyr Fialko 				rte_crypto_op_free(op);
5133158ec9fSVolodymyr Fialko 				continue;
5143158ec9fSVolodymyr Fialko 			}
515de2bc16eSShijith Thotton 
516de2bc16eSShijith Thotton 			rte_pktmbuf_append(m, len);
517de2bc16eSShijith Thotton 			sym_op = op->sym;
518de2bc16eSShijith Thotton 			sym_op->m_src = m;
5196776a581SVolodymyr Fialko 			sym_op->cipher.data.offset = offset;
5206776a581SVolodymyr Fialko 			sym_op->cipher.data.length = len - offset;
521de2bc16eSShijith Thotton 			rte_crypto_op_attach_sym_session(
5228f5b5495SAkhil Goyal 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
5238f5b5495SAkhil Goyal 		} else {
5248f5b5495SAkhil Goyal 			struct rte_crypto_asym_op *asym_op;
5256776a581SVolodymyr Fialko 			uint8_t *result;
5266776a581SVolodymyr Fialko 
5276776a581SVolodymyr Fialko 			if (rte_mempool_get(pool, (void **)&result)) {
5286776a581SVolodymyr Fialko 				alloc_failures++;
5296776a581SVolodymyr Fialko 				continue;
5306776a581SVolodymyr Fialko 			}
5318f5b5495SAkhil Goyal 
5328f5b5495SAkhil Goyal 			op = rte_crypto_op_alloc(t->ca_op_pool,
5338f5b5495SAkhil Goyal 					 RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
5343158ec9fSVolodymyr Fialko 			if (unlikely(op == NULL)) {
5353158ec9fSVolodymyr Fialko 				alloc_failures++;
5366776a581SVolodymyr Fialko 				rte_mempool_put(pool, result);
5373158ec9fSVolodymyr Fialko 				continue;
5383158ec9fSVolodymyr Fialko 			}
5393158ec9fSVolodymyr Fialko 
5408f5b5495SAkhil Goyal 			asym_op = op->asym;
5418f5b5495SAkhil Goyal 			asym_op->modex.base.data = modex_test_case.base.data;
5428f5b5495SAkhil Goyal 			asym_op->modex.base.length = modex_test_case.base.len;
5438f5b5495SAkhil Goyal 			asym_op->modex.result.data = result;
5448f5b5495SAkhil Goyal 			asym_op->modex.result.length = modex_test_case.result_len;
5458f5b5495SAkhil Goyal 			rte_crypto_op_attach_asym_session(
5468f5b5495SAkhil Goyal 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
5478f5b5495SAkhil Goyal 		}
548de2bc16eSShijith Thotton 		ev.event_ptr = op;
549de2bc16eSShijith Thotton 
550de2bc16eSShijith Thotton 		while (rte_event_crypto_adapter_enqueue(dev_id, port, &ev, 1) != 1 &&
551de2bc16eSShijith Thotton 		       t->done == false)
552de2bc16eSShijith Thotton 			rte_pause();
553de2bc16eSShijith Thotton 
554de2bc16eSShijith Thotton 		count++;
555de2bc16eSShijith Thotton 	}
5563158ec9fSVolodymyr Fialko 
5573158ec9fSVolodymyr Fialko 	if (opt->verbose_level > 1 && alloc_failures)
5583158ec9fSVolodymyr Fialko 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
5593158ec9fSVolodymyr Fialko 		       __func__, rte_lcore_id(), alloc_failures);
560de2bc16eSShijith Thotton }
561de2bc16eSShijith Thotton 
562de2bc16eSShijith Thotton static inline int
563de2bc16eSShijith Thotton perf_event_crypto_producer(void *arg)
564de2bc16eSShijith Thotton {
565de2bc16eSShijith Thotton 	struct prod_data *p = arg;
566de2bc16eSShijith Thotton 	struct evt_options *opt = p->t->opt;
567de2bc16eSShijith Thotton 
568de2bc16eSShijith Thotton 	if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
569de2bc16eSShijith Thotton 		crypto_adapter_enq_op_new(p);
570de2bc16eSShijith Thotton 	else
571de2bc16eSShijith Thotton 		crypto_adapter_enq_op_fwd(p);
572de2bc16eSShijith Thotton 
573de2bc16eSShijith Thotton 	return 0;
574de2bc16eSShijith Thotton }
575de2bc16eSShijith Thotton 
5769c3096d4SVolodymyr Fialko static void
5779c3096d4SVolodymyr Fialko crypto_adapter_enq_op_new_burst(struct prod_data *p)
5789c3096d4SVolodymyr Fialko {
5799c3096d4SVolodymyr Fialko 	const struct test_perf *t = p->t;
5809c3096d4SVolodymyr Fialko 	const struct evt_options *opt = t->opt;
5819c3096d4SVolodymyr Fialko 
5829c3096d4SVolodymyr Fialko 	struct rte_mbuf *m, *pkts_burst[MAX_PROD_ENQ_BURST_SIZE];
5839c3096d4SVolodymyr Fialko 	struct rte_crypto_op *ops_burst[MAX_PROD_ENQ_BURST_SIZE];
5849c3096d4SVolodymyr Fialko 	const uint32_t burst_size = opt->prod_enq_burst_sz;
5859c3096d4SVolodymyr Fialko 	uint8_t *result[MAX_PROD_ENQ_BURST_SIZE];
5869c3096d4SVolodymyr Fialko 	const uint32_t nb_flows = t->nb_flows;
5879c3096d4SVolodymyr Fialko 	const uint64_t nb_pkts = t->nb_pkts;
5889c3096d4SVolodymyr Fialko 	uint16_t len, enq, nb_alloc, offset;
5899c3096d4SVolodymyr Fialko 	struct rte_mempool *pool = t->pool;
5909c3096d4SVolodymyr Fialko 	uint16_t qp_id = p->ca.cdev_qp_id;
5919c3096d4SVolodymyr Fialko 	uint8_t cdev_id = p->ca.cdev_id;
5929c3096d4SVolodymyr Fialko 	uint64_t alloc_failures = 0;
5939c3096d4SVolodymyr Fialko 	uint32_t flow_counter = 0;
5949c3096d4SVolodymyr Fialko 	uint64_t count = 0;
5959c3096d4SVolodymyr Fialko 	uint32_t  i;
5969c3096d4SVolodymyr Fialko 
5979c3096d4SVolodymyr Fialko 	if (opt->verbose_level > 1)
5989c3096d4SVolodymyr Fialko 		printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
5999c3096d4SVolodymyr Fialko 		       __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
6009c3096d4SVolodymyr Fialko 		       p->ca.cdev_qp_id);
6019c3096d4SVolodymyr Fialko 
6029c3096d4SVolodymyr Fialko 	offset = sizeof(struct perf_elt);
6039c3096d4SVolodymyr Fialko 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
6049c3096d4SVolodymyr Fialko 
6059c3096d4SVolodymyr Fialko 	while (count < nb_pkts && t->done == false) {
6069c3096d4SVolodymyr Fialko 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
6079c3096d4SVolodymyr Fialko 			struct rte_crypto_sym_op *sym_op;
6089c3096d4SVolodymyr Fialko 			int ret;
6099c3096d4SVolodymyr Fialko 
6109c3096d4SVolodymyr Fialko 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
6119c3096d4SVolodymyr Fialko 					RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops_burst, burst_size);
6129c3096d4SVolodymyr Fialko 			if (unlikely(nb_alloc != burst_size)) {
6139c3096d4SVolodymyr Fialko 				alloc_failures++;
6149c3096d4SVolodymyr Fialko 				continue;
6159c3096d4SVolodymyr Fialko 			}
6169c3096d4SVolodymyr Fialko 
6179c3096d4SVolodymyr Fialko 			ret = rte_pktmbuf_alloc_bulk(pool, pkts_burst, burst_size);
6189c3096d4SVolodymyr Fialko 			if (unlikely(ret != 0)) {
6199c3096d4SVolodymyr Fialko 				alloc_failures++;
6209c3096d4SVolodymyr Fialko 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
6219c3096d4SVolodymyr Fialko 				continue;
6229c3096d4SVolodymyr Fialko 			}
6239c3096d4SVolodymyr Fialko 
6249c3096d4SVolodymyr Fialko 			for (i = 0; i < burst_size; i++) {
6259c3096d4SVolodymyr Fialko 				m = pkts_burst[i];
6269c3096d4SVolodymyr Fialko 				rte_pktmbuf_append(m, len);
6279c3096d4SVolodymyr Fialko 				sym_op = ops_burst[i]->sym;
6289c3096d4SVolodymyr Fialko 				sym_op->m_src = m;
6299c3096d4SVolodymyr Fialko 				sym_op->cipher.data.offset = offset;
6309c3096d4SVolodymyr Fialko 				sym_op->cipher.data.length = len - offset;
6319c3096d4SVolodymyr Fialko 				rte_crypto_op_attach_sym_session(ops_burst[i],
6329c3096d4SVolodymyr Fialko 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
6339c3096d4SVolodymyr Fialko 			}
6349c3096d4SVolodymyr Fialko 		} else {
6359c3096d4SVolodymyr Fialko 			struct rte_crypto_asym_op *asym_op;
6369c3096d4SVolodymyr Fialko 
6379c3096d4SVolodymyr Fialko 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
6389c3096d4SVolodymyr Fialko 					RTE_CRYPTO_OP_TYPE_ASYMMETRIC, ops_burst, burst_size);
6399c3096d4SVolodymyr Fialko 			if (unlikely(nb_alloc != burst_size)) {
6409c3096d4SVolodymyr Fialko 				alloc_failures++;
6419c3096d4SVolodymyr Fialko 				continue;
6429c3096d4SVolodymyr Fialko 			}
6439c3096d4SVolodymyr Fialko 
6449c3096d4SVolodymyr Fialko 			if (rte_mempool_get_bulk(pool, (void **)result, burst_size)) {
6459c3096d4SVolodymyr Fialko 				alloc_failures++;
6469c3096d4SVolodymyr Fialko 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
6479c3096d4SVolodymyr Fialko 				continue;
6489c3096d4SVolodymyr Fialko 			}
6499c3096d4SVolodymyr Fialko 
6509c3096d4SVolodymyr Fialko 			for (i = 0; i < burst_size; i++) {
6519c3096d4SVolodymyr Fialko 				asym_op = ops_burst[i]->asym;
6529c3096d4SVolodymyr Fialko 				asym_op->modex.base.data = modex_test_case.base.data;
6539c3096d4SVolodymyr Fialko 				asym_op->modex.base.length = modex_test_case.base.len;
6549c3096d4SVolodymyr Fialko 				asym_op->modex.result.data = result[i];
6559c3096d4SVolodymyr Fialko 				asym_op->modex.result.length = modex_test_case.result_len;
6569c3096d4SVolodymyr Fialko 				rte_crypto_op_attach_asym_session(ops_burst[i],
6579c3096d4SVolodymyr Fialko 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
6589c3096d4SVolodymyr Fialko 			}
6599c3096d4SVolodymyr Fialko 		}
6609c3096d4SVolodymyr Fialko 
6619c3096d4SVolodymyr Fialko 		enq = 0;
6629c3096d4SVolodymyr Fialko 		while (!t->done) {
6639c3096d4SVolodymyr Fialko 			enq += rte_cryptodev_enqueue_burst(cdev_id, qp_id, ops_burst + enq,
6649c3096d4SVolodymyr Fialko 					burst_size - enq);
6659c3096d4SVolodymyr Fialko 			if (enq == burst_size)
6669c3096d4SVolodymyr Fialko 				break;
6679c3096d4SVolodymyr Fialko 		}
6689c3096d4SVolodymyr Fialko 
6699c3096d4SVolodymyr Fialko 		count += burst_size;
6709c3096d4SVolodymyr Fialko 	}
6719c3096d4SVolodymyr Fialko 
6729c3096d4SVolodymyr Fialko 	if (opt->verbose_level > 1 && alloc_failures)
6739c3096d4SVolodymyr Fialko 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
6749c3096d4SVolodymyr Fialko 		       __func__, rte_lcore_id(), alloc_failures);
6759c3096d4SVolodymyr Fialko }
6769c3096d4SVolodymyr Fialko 
6779c3096d4SVolodymyr Fialko static void
6789c3096d4SVolodymyr Fialko crypto_adapter_enq_op_fwd_burst(struct prod_data *p)
6799c3096d4SVolodymyr Fialko {
6809c3096d4SVolodymyr Fialko 	const struct test_perf *t = p->t;
6819c3096d4SVolodymyr Fialko 	const struct evt_options *opt = t->opt;
6829c3096d4SVolodymyr Fialko 
6839c3096d4SVolodymyr Fialko 	struct rte_mbuf *m, *pkts_burst[MAX_PROD_ENQ_BURST_SIZE];
6849c3096d4SVolodymyr Fialko 	struct rte_crypto_op *ops_burst[MAX_PROD_ENQ_BURST_SIZE];
6859c3096d4SVolodymyr Fialko 	const uint32_t burst_size = opt->prod_enq_burst_sz;
6869c3096d4SVolodymyr Fialko 	struct rte_event ev[MAX_PROD_ENQ_BURST_SIZE];
6879c3096d4SVolodymyr Fialko 	uint8_t *result[MAX_PROD_ENQ_BURST_SIZE];
6889c3096d4SVolodymyr Fialko 	const uint32_t nb_flows = t->nb_flows;
6899c3096d4SVolodymyr Fialko 	const uint64_t nb_pkts = t->nb_pkts;
6909c3096d4SVolodymyr Fialko 	uint16_t len, enq, nb_alloc, offset;
6919c3096d4SVolodymyr Fialko 	struct rte_mempool *pool = t->pool;
6929c3096d4SVolodymyr Fialko 	const uint8_t dev_id = p->dev_id;
6939c3096d4SVolodymyr Fialko 	const uint8_t port = p->port_id;
6949c3096d4SVolodymyr Fialko 	uint64_t alloc_failures = 0;
6959c3096d4SVolodymyr Fialko 	uint32_t flow_counter = 0;
6969c3096d4SVolodymyr Fialko 	uint64_t count = 0;
6979c3096d4SVolodymyr Fialko 	uint32_t  i;
6989c3096d4SVolodymyr Fialko 
6999c3096d4SVolodymyr Fialko 	if (opt->verbose_level > 1)
7009c3096d4SVolodymyr Fialko 		printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
7019c3096d4SVolodymyr Fialko 		       __func__, rte_lcore_id(), port, p->queue_id,
7029c3096d4SVolodymyr Fialko 		       p->ca.cdev_id, p->ca.cdev_qp_id);
7039c3096d4SVolodymyr Fialko 
7049c3096d4SVolodymyr Fialko 	offset = sizeof(struct perf_elt);
7059c3096d4SVolodymyr Fialko 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
7069c3096d4SVolodymyr Fialko 
7079c3096d4SVolodymyr Fialko 	for (i = 0; i < burst_size; i++) {
7089c3096d4SVolodymyr Fialko 		ev[i].event = 0;
7099c3096d4SVolodymyr Fialko 		ev[i].op = RTE_EVENT_OP_NEW;
7109c3096d4SVolodymyr Fialko 		ev[i].queue_id = p->queue_id;
7119c3096d4SVolodymyr Fialko 		ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC;
7129c3096d4SVolodymyr Fialko 		ev[i].event_type = RTE_EVENT_TYPE_CPU;
7139c3096d4SVolodymyr Fialko 	}
7149c3096d4SVolodymyr Fialko 
7159c3096d4SVolodymyr Fialko 	while (count < nb_pkts && t->done == false) {
7169c3096d4SVolodymyr Fialko 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
7179c3096d4SVolodymyr Fialko 			struct rte_crypto_sym_op *sym_op;
7189c3096d4SVolodymyr Fialko 			int ret;
7199c3096d4SVolodymyr Fialko 
7209c3096d4SVolodymyr Fialko 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
7219c3096d4SVolodymyr Fialko 					RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops_burst, burst_size);
7229c3096d4SVolodymyr Fialko 			if (unlikely(nb_alloc != burst_size)) {
7239c3096d4SVolodymyr Fialko 				alloc_failures++;
7249c3096d4SVolodymyr Fialko 				continue;
7259c3096d4SVolodymyr Fialko 			}
7269c3096d4SVolodymyr Fialko 
7279c3096d4SVolodymyr Fialko 			ret = rte_pktmbuf_alloc_bulk(pool, pkts_burst, burst_size);
7289c3096d4SVolodymyr Fialko 			if (unlikely(ret != 0)) {
7299c3096d4SVolodymyr Fialko 				alloc_failures++;
7309c3096d4SVolodymyr Fialko 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
7319c3096d4SVolodymyr Fialko 				continue;
7329c3096d4SVolodymyr Fialko 			}
7339c3096d4SVolodymyr Fialko 
7349c3096d4SVolodymyr Fialko 			for (i = 0; i < burst_size; i++) {
7359c3096d4SVolodymyr Fialko 				m = pkts_burst[i];
7369c3096d4SVolodymyr Fialko 				rte_pktmbuf_append(m, len);
7379c3096d4SVolodymyr Fialko 				sym_op = ops_burst[i]->sym;
7389c3096d4SVolodymyr Fialko 				sym_op->m_src = m;
7399c3096d4SVolodymyr Fialko 				sym_op->cipher.data.offset = offset;
7409c3096d4SVolodymyr Fialko 				sym_op->cipher.data.length = len - offset;
7419c3096d4SVolodymyr Fialko 				rte_crypto_op_attach_sym_session(ops_burst[i],
7429c3096d4SVolodymyr Fialko 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
7439c3096d4SVolodymyr Fialko 				ev[i].event_ptr = ops_burst[i];
7449c3096d4SVolodymyr Fialko 			}
7459c3096d4SVolodymyr Fialko 		} else {
7469c3096d4SVolodymyr Fialko 			struct rte_crypto_asym_op *asym_op;
7479c3096d4SVolodymyr Fialko 
7489c3096d4SVolodymyr Fialko 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
7499c3096d4SVolodymyr Fialko 					RTE_CRYPTO_OP_TYPE_ASYMMETRIC, ops_burst, burst_size);
7509c3096d4SVolodymyr Fialko 			if (unlikely(nb_alloc != burst_size)) {
7519c3096d4SVolodymyr Fialko 				alloc_failures++;
7529c3096d4SVolodymyr Fialko 				continue;
7539c3096d4SVolodymyr Fialko 			}
7549c3096d4SVolodymyr Fialko 
7559c3096d4SVolodymyr Fialko 			if (rte_mempool_get_bulk(pool, (void **)result, burst_size)) {
7569c3096d4SVolodymyr Fialko 				alloc_failures++;
7579c3096d4SVolodymyr Fialko 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
7589c3096d4SVolodymyr Fialko 				continue;
7599c3096d4SVolodymyr Fialko 			}
7609c3096d4SVolodymyr Fialko 
7619c3096d4SVolodymyr Fialko 			for (i = 0; i < burst_size; i++) {
7629c3096d4SVolodymyr Fialko 				asym_op = ops_burst[i]->asym;
7639c3096d4SVolodymyr Fialko 				asym_op->modex.base.data = modex_test_case.base.data;
7649c3096d4SVolodymyr Fialko 				asym_op->modex.base.length = modex_test_case.base.len;
7659c3096d4SVolodymyr Fialko 				asym_op->modex.result.data = result[i];
7669c3096d4SVolodymyr Fialko 				asym_op->modex.result.length = modex_test_case.result_len;
7679c3096d4SVolodymyr Fialko 				rte_crypto_op_attach_asym_session(ops_burst[i],
7689c3096d4SVolodymyr Fialko 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
7699c3096d4SVolodymyr Fialko 				ev[i].event_ptr = ops_burst[i];
7709c3096d4SVolodymyr Fialko 			}
7719c3096d4SVolodymyr Fialko 		}
7729c3096d4SVolodymyr Fialko 
7739c3096d4SVolodymyr Fialko 		enq = 0;
7749c3096d4SVolodymyr Fialko 		while (!t->done) {
7759c3096d4SVolodymyr Fialko 			enq += rte_event_crypto_adapter_enqueue(dev_id, port, ev + enq,
7769c3096d4SVolodymyr Fialko 					burst_size - enq);
7779c3096d4SVolodymyr Fialko 			if (enq == burst_size)
7789c3096d4SVolodymyr Fialko 				break;
7799c3096d4SVolodymyr Fialko 		}
7809c3096d4SVolodymyr Fialko 
7819c3096d4SVolodymyr Fialko 		count += burst_size;
7829c3096d4SVolodymyr Fialko 	}
7839c3096d4SVolodymyr Fialko 
7849c3096d4SVolodymyr Fialko 	if (opt->verbose_level > 1 && alloc_failures)
7859c3096d4SVolodymyr Fialko 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
7869c3096d4SVolodymyr Fialko 		       __func__, rte_lcore_id(), alloc_failures);
7879c3096d4SVolodymyr Fialko }
7889c3096d4SVolodymyr Fialko 
7899c3096d4SVolodymyr Fialko static inline int
7909c3096d4SVolodymyr Fialko perf_event_crypto_producer_burst(void *arg)
7919c3096d4SVolodymyr Fialko {
7929c3096d4SVolodymyr Fialko 	struct prod_data *p = arg;
7939c3096d4SVolodymyr Fialko 	struct evt_options *opt = p->t->opt;
7949c3096d4SVolodymyr Fialko 
7959c3096d4SVolodymyr Fialko 	if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
7969c3096d4SVolodymyr Fialko 		crypto_adapter_enq_op_new_burst(p);
7979c3096d4SVolodymyr Fialko 	else
7989c3096d4SVolodymyr Fialko 		crypto_adapter_enq_op_fwd_burst(p);
7999c3096d4SVolodymyr Fialko 
8009c3096d4SVolodymyr Fialko 	return 0;
8019c3096d4SVolodymyr Fialko }
8029c3096d4SVolodymyr Fialko 
80359f697e3SPavan Nikhilesh static int
80459f697e3SPavan Nikhilesh perf_producer_wrapper(void *arg)
80559f697e3SPavan Nikhilesh {
806*f123568cSPavan Nikhilesh 	struct rte_event_dev_info dev_info;
80759f697e3SPavan Nikhilesh 	struct prod_data *p  = arg;
80859f697e3SPavan Nikhilesh 	struct test_perf *t = p->t;
809*f123568cSPavan Nikhilesh 
810*f123568cSPavan Nikhilesh 	rte_event_dev_info_get(p->dev_id, &dev_info);
811*f123568cSPavan Nikhilesh 	if (!t->opt->prod_enq_burst_sz) {
812*f123568cSPavan Nikhilesh 		t->opt->prod_enq_burst_sz = MAX_PROD_ENQ_BURST_SIZE;
813*f123568cSPavan Nikhilesh 		if (dev_info.max_event_port_enqueue_depth > 0 &&
814*f123568cSPavan Nikhilesh 		    (uint32_t)dev_info.max_event_port_enqueue_depth <
815*f123568cSPavan Nikhilesh 			    t->opt->prod_enq_burst_sz)
816*f123568cSPavan Nikhilesh 			t->opt->prod_enq_burst_sz =
817*f123568cSPavan Nikhilesh 				dev_info.max_event_port_enqueue_depth;
818*f123568cSPavan Nikhilesh 	}
81920841a25SRashmi Shetty 
82020841a25SRashmi Shetty 	/* In case of synthetic producer, launch perf_producer or
82120841a25SRashmi Shetty 	 * perf_producer_burst depending on producer enqueue burst size
82220841a25SRashmi Shetty 	 */
82320841a25SRashmi Shetty 	if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
82420841a25SRashmi Shetty 			t->opt->prod_enq_burst_sz == 1)
82559f697e3SPavan Nikhilesh 		return perf_producer(arg);
82620841a25SRashmi Shetty 	else if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
82720841a25SRashmi Shetty 			t->opt->prod_enq_burst_sz > 1) {
828*f123568cSPavan Nikhilesh 		if (dev_info.max_event_port_enqueue_depth == 1)
82920841a25SRashmi Shetty 			evt_err("This event device does not support burst mode");
83020841a25SRashmi Shetty 		else
83120841a25SRashmi Shetty 			return perf_producer_burst(arg);
83220841a25SRashmi Shetty 	}
83317b22d0bSPavan Nikhilesh 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
83417b22d0bSPavan Nikhilesh 			!t->opt->timdev_use_burst)
835d008f20bSPavan Nikhilesh 		return perf_event_timer_producer(arg);
83617b22d0bSPavan Nikhilesh 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
83717b22d0bSPavan Nikhilesh 			t->opt->timdev_use_burst)
83817b22d0bSPavan Nikhilesh 		return perf_event_timer_producer_burst(arg);
8399c3096d4SVolodymyr Fialko 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
8409c3096d4SVolodymyr Fialko 		if (t->opt->prod_enq_burst_sz > 1)
8419c3096d4SVolodymyr Fialko 			return perf_event_crypto_producer_burst(arg);
8429c3096d4SVolodymyr Fialko 		else
843de2bc16eSShijith Thotton 			return perf_event_crypto_producer(arg);
8449c3096d4SVolodymyr Fialko 	}
84559f697e3SPavan Nikhilesh 	return 0;
84659f697e3SPavan Nikhilesh }
84759f697e3SPavan Nikhilesh 
8489d3aeb18SJerin Jacob static inline uint64_t
8499d3aeb18SJerin Jacob processed_pkts(struct test_perf *t)
8509d3aeb18SJerin Jacob {
8519d3aeb18SJerin Jacob 	uint8_t i;
8529d3aeb18SJerin Jacob 	uint64_t total = 0;
8539d3aeb18SJerin Jacob 
8549d3aeb18SJerin Jacob 	for (i = 0; i < t->nb_workers; i++)
8559d3aeb18SJerin Jacob 		total += t->worker[i].processed_pkts;
8569d3aeb18SJerin Jacob 
8579d3aeb18SJerin Jacob 	return total;
8589d3aeb18SJerin Jacob }
8599d3aeb18SJerin Jacob 
8609d3aeb18SJerin Jacob static inline uint64_t
8619d3aeb18SJerin Jacob total_latency(struct test_perf *t)
8629d3aeb18SJerin Jacob {
8639d3aeb18SJerin Jacob 	uint8_t i;
8649d3aeb18SJerin Jacob 	uint64_t total = 0;
8659d3aeb18SJerin Jacob 
8669d3aeb18SJerin Jacob 	for (i = 0; i < t->nb_workers; i++)
8679d3aeb18SJerin Jacob 		total += t->worker[i].latency;
8689d3aeb18SJerin Jacob 
8699d3aeb18SJerin Jacob 	return total;
8709d3aeb18SJerin Jacob }
8719d3aeb18SJerin Jacob 
8729d3aeb18SJerin Jacob 
8739d3aeb18SJerin Jacob int
8749d3aeb18SJerin Jacob perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
8759d3aeb18SJerin Jacob 		int (*worker)(void *))
8769d3aeb18SJerin Jacob {
8779d3aeb18SJerin Jacob 	int ret, lcore_id;
8789d3aeb18SJerin Jacob 	struct test_perf *t = evt_test_priv(test);
8799d3aeb18SJerin Jacob 
8809d3aeb18SJerin Jacob 	int port_idx = 0;
8819d3aeb18SJerin Jacob 	/* launch workers */
882cb056611SStephen Hemminger 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
8839d3aeb18SJerin Jacob 		if (!(opt->wlcores[lcore_id]))
8849d3aeb18SJerin Jacob 			continue;
8859d3aeb18SJerin Jacob 
8869d3aeb18SJerin Jacob 		ret = rte_eal_remote_launch(worker,
8879d3aeb18SJerin Jacob 				 &t->worker[port_idx], lcore_id);
8889d3aeb18SJerin Jacob 		if (ret) {
8899d3aeb18SJerin Jacob 			evt_err("failed to launch worker %d", lcore_id);
8909d3aeb18SJerin Jacob 			return ret;
8919d3aeb18SJerin Jacob 		}
8929d3aeb18SJerin Jacob 		port_idx++;
8939d3aeb18SJerin Jacob 	}
8949d3aeb18SJerin Jacob 
8959d3aeb18SJerin Jacob 	/* launch producers */
896cb056611SStephen Hemminger 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
8979d3aeb18SJerin Jacob 		if (!(opt->plcores[lcore_id]))
8989d3aeb18SJerin Jacob 			continue;
8999d3aeb18SJerin Jacob 
90059f697e3SPavan Nikhilesh 		ret = rte_eal_remote_launch(perf_producer_wrapper,
90159f697e3SPavan Nikhilesh 				&t->prod[port_idx], lcore_id);
9029d3aeb18SJerin Jacob 		if (ret) {
9039d3aeb18SJerin Jacob 			evt_err("failed to launch perf_producer %d", lcore_id);
9049d3aeb18SJerin Jacob 			return ret;
9059d3aeb18SJerin Jacob 		}
9069d3aeb18SJerin Jacob 		port_idx++;
9079d3aeb18SJerin Jacob 	}
9089d3aeb18SJerin Jacob 
909d008f20bSPavan Nikhilesh 	const uint64_t total_pkts = t->outstand_pkts;
9109d3aeb18SJerin Jacob 
9119d3aeb18SJerin Jacob 	uint64_t dead_lock_cycles = rte_get_timer_cycles();
9129d3aeb18SJerin Jacob 	int64_t dead_lock_remaining  =  total_pkts;
9139d3aeb18SJerin Jacob 	const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
9149d3aeb18SJerin Jacob 
9159d3aeb18SJerin Jacob 	uint64_t perf_cycles = rte_get_timer_cycles();
9169d3aeb18SJerin Jacob 	int64_t perf_remaining  = total_pkts;
9179d3aeb18SJerin Jacob 	const uint64_t perf_sample = rte_get_timer_hz();
9189d3aeb18SJerin Jacob 
9199d3aeb18SJerin Jacob 	static float total_mpps;
9209d3aeb18SJerin Jacob 	static uint64_t samples;
9219d3aeb18SJerin Jacob 
9229d3aeb18SJerin Jacob 	const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
9239d3aeb18SJerin Jacob 	int64_t remaining = t->outstand_pkts - processed_pkts(t);
9249d3aeb18SJerin Jacob 
9259d3aeb18SJerin Jacob 	while (t->done == false) {
9269d3aeb18SJerin Jacob 		const uint64_t new_cycles = rte_get_timer_cycles();
9279d3aeb18SJerin Jacob 
9289d3aeb18SJerin Jacob 		if ((new_cycles - perf_cycles) > perf_sample) {
9299d3aeb18SJerin Jacob 			const uint64_t latency = total_latency(t);
9309d3aeb18SJerin Jacob 			const uint64_t pkts = processed_pkts(t);
9319d3aeb18SJerin Jacob 
9329d3aeb18SJerin Jacob 			remaining = t->outstand_pkts - pkts;
9339d3aeb18SJerin Jacob 			float mpps = (float)(perf_remaining-remaining)/1000000;
9349d3aeb18SJerin Jacob 
9359d3aeb18SJerin Jacob 			perf_remaining = remaining;
9369d3aeb18SJerin Jacob 			perf_cycles = new_cycles;
9379d3aeb18SJerin Jacob 			total_mpps += mpps;
9389d3aeb18SJerin Jacob 			++samples;
93904716352SJerin Jacob 			if (opt->fwd_latency && pkts > 0) {
9409d3aeb18SJerin Jacob 				printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
9419d3aeb18SJerin Jacob 					mpps, total_mpps/samples,
9429d3aeb18SJerin Jacob 					(float)(latency/pkts)/freq_mhz);
9439d3aeb18SJerin Jacob 			} else {
9449d3aeb18SJerin Jacob 				printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
9459d3aeb18SJerin Jacob 					mpps, total_mpps/samples);
9469d3aeb18SJerin Jacob 			}
9479d3aeb18SJerin Jacob 			fflush(stdout);
9489d3aeb18SJerin Jacob 
9499d3aeb18SJerin Jacob 			if (remaining <= 0) {
9509d3aeb18SJerin Jacob 				t->result = EVT_TEST_SUCCESS;
951d008f20bSPavan Nikhilesh 				if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
952d008f20bSPavan Nikhilesh 				    opt->prod_type ==
953de2bc16eSShijith Thotton 					    EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
954de2bc16eSShijith Thotton 				    opt->prod_type ==
955de2bc16eSShijith Thotton 					    EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
95659f697e3SPavan Nikhilesh 					t->done = true;
9579d3aeb18SJerin Jacob 					break;
9589d3aeb18SJerin Jacob 				}
9599d3aeb18SJerin Jacob 			}
96059f697e3SPavan Nikhilesh 		}
9619d3aeb18SJerin Jacob 
96259f697e3SPavan Nikhilesh 		if (new_cycles - dead_lock_cycles > dead_lock_sample &&
96347303784SErik Gabriel Carrillo 		    (opt->prod_type == EVT_PROD_TYPE_SYNT ||
964de2bc16eSShijith Thotton 		     opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
965de2bc16eSShijith Thotton 		     opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)) {
9669d3aeb18SJerin Jacob 			remaining = t->outstand_pkts - processed_pkts(t);
9679d3aeb18SJerin Jacob 			if (dead_lock_remaining == remaining) {
9689d3aeb18SJerin Jacob 				rte_event_dev_dump(opt->dev_id, stdout);
9699d3aeb18SJerin Jacob 				evt_err("No schedules for seconds, deadlock");
9709d3aeb18SJerin Jacob 				t->done = true;
9719d3aeb18SJerin Jacob 				break;
9729d3aeb18SJerin Jacob 			}
9739d3aeb18SJerin Jacob 			dead_lock_remaining = remaining;
9749d3aeb18SJerin Jacob 			dead_lock_cycles = new_cycles;
9759d3aeb18SJerin Jacob 		}
9769d3aeb18SJerin Jacob 	}
9779d3aeb18SJerin Jacob 	printf("\n");
9789d3aeb18SJerin Jacob 	return 0;
9799d3aeb18SJerin Jacob }
9809d3aeb18SJerin Jacob 
9813617aae5SPavan Nikhilesh static int
9823617aae5SPavan Nikhilesh perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
9833617aae5SPavan Nikhilesh 		struct rte_event_port_conf prod_conf)
9843617aae5SPavan Nikhilesh {
9853617aae5SPavan Nikhilesh 	int ret = 0;
9863617aae5SPavan Nikhilesh 	uint16_t prod;
9873617aae5SPavan Nikhilesh 	struct rte_event_eth_rx_adapter_queue_conf queue_conf;
9883617aae5SPavan Nikhilesh 
9893617aae5SPavan Nikhilesh 	memset(&queue_conf, 0,
9903617aae5SPavan Nikhilesh 			sizeof(struct rte_event_eth_rx_adapter_queue_conf));
9913617aae5SPavan Nikhilesh 	queue_conf.ev.sched_type = opt->sched_type_list[0];
9928728ccf3SThomas Monjalon 	RTE_ETH_FOREACH_DEV(prod) {
9933617aae5SPavan Nikhilesh 		uint32_t cap;
9943617aae5SPavan Nikhilesh 
9953617aae5SPavan Nikhilesh 		ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
9963617aae5SPavan Nikhilesh 				prod, &cap);
9973617aae5SPavan Nikhilesh 		if (ret) {
9983617aae5SPavan Nikhilesh 			evt_err("failed to get event rx adapter[%d]"
9993617aae5SPavan Nikhilesh 					" capabilities",
10003617aae5SPavan Nikhilesh 					opt->dev_id);
10013617aae5SPavan Nikhilesh 			return ret;
10023617aae5SPavan Nikhilesh 		}
10033617aae5SPavan Nikhilesh 		queue_conf.ev.queue_id = prod * stride;
10043617aae5SPavan Nikhilesh 		ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
10053617aae5SPavan Nikhilesh 				&prod_conf);
10063617aae5SPavan Nikhilesh 		if (ret) {
10073617aae5SPavan Nikhilesh 			evt_err("failed to create rx adapter[%d]", prod);
10083617aae5SPavan Nikhilesh 			return ret;
10093617aae5SPavan Nikhilesh 		}
10103617aae5SPavan Nikhilesh 		ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
10113617aae5SPavan Nikhilesh 				&queue_conf);
10123617aae5SPavan Nikhilesh 		if (ret) {
10133617aae5SPavan Nikhilesh 			evt_err("failed to add rx queues to adapter[%d]", prod);
10143617aae5SPavan Nikhilesh 			return ret;
10153617aae5SPavan Nikhilesh 		}
10163617aae5SPavan Nikhilesh 
1017b0333c55SPavan Nikhilesh 		if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
1018b0333c55SPavan Nikhilesh 			uint32_t service_id;
1019b0333c55SPavan Nikhilesh 
1020b0333c55SPavan Nikhilesh 			rte_event_eth_rx_adapter_service_id_get(prod,
1021b0333c55SPavan Nikhilesh 					&service_id);
1022b0333c55SPavan Nikhilesh 			ret = evt_service_setup(service_id);
1023b0333c55SPavan Nikhilesh 			if (ret) {
1024b0333c55SPavan Nikhilesh 				evt_err("Failed to setup service core"
1025b0333c55SPavan Nikhilesh 						" for Rx adapter\n");
1026b0333c55SPavan Nikhilesh 				return ret;
1027b0333c55SPavan Nikhilesh 			}
1028b0333c55SPavan Nikhilesh 		}
10293617aae5SPavan Nikhilesh 	}
10303617aae5SPavan Nikhilesh 
10313617aae5SPavan Nikhilesh 	return ret;
10323617aae5SPavan Nikhilesh }
10333617aae5SPavan Nikhilesh 
1034d008f20bSPavan Nikhilesh static int
1035d008f20bSPavan Nikhilesh perf_event_timer_adapter_setup(struct test_perf *t)
1036d008f20bSPavan Nikhilesh {
1037d008f20bSPavan Nikhilesh 	int i;
1038d008f20bSPavan Nikhilesh 	int ret;
1039d008f20bSPavan Nikhilesh 	struct rte_event_timer_adapter_info adapter_info;
1040d008f20bSPavan Nikhilesh 	struct rte_event_timer_adapter *wl;
1041d008f20bSPavan Nikhilesh 	uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
1042d008f20bSPavan Nikhilesh 	uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
1043d008f20bSPavan Nikhilesh 
1044d008f20bSPavan Nikhilesh 	if (nb_producers == 1)
1045d008f20bSPavan Nikhilesh 		flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
1046d008f20bSPavan Nikhilesh 
1047d008f20bSPavan Nikhilesh 	for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
1048d008f20bSPavan Nikhilesh 		struct rte_event_timer_adapter_conf config = {
1049d008f20bSPavan Nikhilesh 			.event_dev_id = t->opt->dev_id,
1050d008f20bSPavan Nikhilesh 			.timer_adapter_id = i,
1051d008f20bSPavan Nikhilesh 			.timer_tick_ns = t->opt->timer_tick_nsec,
1052d008f20bSPavan Nikhilesh 			.max_tmo_ns = t->opt->max_tmo_nsec,
1053c13b1ad7SPavan Nikhilesh 			.nb_timers = t->opt->pool_sz,
1054d008f20bSPavan Nikhilesh 			.flags = flags,
1055d008f20bSPavan Nikhilesh 		};
1056d008f20bSPavan Nikhilesh 
1057d008f20bSPavan Nikhilesh 		wl = rte_event_timer_adapter_create(&config);
1058d008f20bSPavan Nikhilesh 		if (wl == NULL) {
1059d008f20bSPavan Nikhilesh 			evt_err("failed to create event timer ring %d", i);
1060d008f20bSPavan Nikhilesh 			return rte_errno;
1061d008f20bSPavan Nikhilesh 		}
1062d008f20bSPavan Nikhilesh 
1063d008f20bSPavan Nikhilesh 		memset(&adapter_info, 0,
1064d008f20bSPavan Nikhilesh 				sizeof(struct rte_event_timer_adapter_info));
1065d008f20bSPavan Nikhilesh 		rte_event_timer_adapter_get_info(wl, &adapter_info);
1066d008f20bSPavan Nikhilesh 		t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
1067d008f20bSPavan Nikhilesh 
1068d008f20bSPavan Nikhilesh 		if (!(adapter_info.caps &
1069d008f20bSPavan Nikhilesh 				RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
107099c25664SAndrzej Ostruszka 			uint32_t service_id = -1U;
1071d008f20bSPavan Nikhilesh 
1072d008f20bSPavan Nikhilesh 			rte_event_timer_adapter_service_id_get(wl,
1073d008f20bSPavan Nikhilesh 					&service_id);
1074d008f20bSPavan Nikhilesh 			ret = evt_service_setup(service_id);
1075d008f20bSPavan Nikhilesh 			if (ret) {
1076d008f20bSPavan Nikhilesh 				evt_err("Failed to setup service core"
1077d008f20bSPavan Nikhilesh 						" for timer adapter\n");
1078d008f20bSPavan Nikhilesh 				return ret;
1079d008f20bSPavan Nikhilesh 			}
1080d008f20bSPavan Nikhilesh 			rte_service_runstate_set(service_id, 1);
1081d008f20bSPavan Nikhilesh 		}
1082d008f20bSPavan Nikhilesh 		t->timer_adptr[i] = wl;
1083d008f20bSPavan Nikhilesh 	}
1084d008f20bSPavan Nikhilesh 	return 0;
1085d008f20bSPavan Nikhilesh }
1086d008f20bSPavan Nikhilesh 
1087de2bc16eSShijith Thotton static int
1088de2bc16eSShijith Thotton perf_event_crypto_adapter_setup(struct test_perf *t, struct prod_data *p)
1089de2bc16eSShijith Thotton {
109069e807dfSVolodymyr Fialko 	struct rte_event_crypto_adapter_queue_conf conf;
1091de2bc16eSShijith Thotton 	struct evt_options *opt = t->opt;
1092de2bc16eSShijith Thotton 	uint32_t cap;
1093de2bc16eSShijith Thotton 	int ret;
1094de2bc16eSShijith Thotton 
109569e807dfSVolodymyr Fialko 	memset(&conf, 0, sizeof(conf));
109669e807dfSVolodymyr Fialko 
1097de2bc16eSShijith Thotton 	ret = rte_event_crypto_adapter_caps_get(p->dev_id, p->ca.cdev_id, &cap);
1098de2bc16eSShijith Thotton 	if (ret) {
1099de2bc16eSShijith Thotton 		evt_err("Failed to get crypto adapter capabilities");
1100de2bc16eSShijith Thotton 		return ret;
1101de2bc16eSShijith Thotton 	}
1102de2bc16eSShijith Thotton 
1103de2bc16eSShijith Thotton 	if (((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) &&
1104de2bc16eSShijith Thotton 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) ||
1105de2bc16eSShijith Thotton 	    ((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) &&
1106de2bc16eSShijith Thotton 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) {
1107de2bc16eSShijith Thotton 		evt_err("crypto adapter %s mode unsupported\n",
1108de2bc16eSShijith Thotton 			opt->crypto_adptr_mode ? "OP_FORWARD" : "OP_NEW");
1109de2bc16eSShijith Thotton 		return -ENOTSUP;
1110de2bc16eSShijith Thotton 	} else if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA)) {
1111de2bc16eSShijith Thotton 		evt_err("Storing crypto session not supported");
1112de2bc16eSShijith Thotton 		return -ENOTSUP;
1113de2bc16eSShijith Thotton 	}
1114de2bc16eSShijith Thotton 
111569e807dfSVolodymyr Fialko 	if (opt->ena_vector) {
111669e807dfSVolodymyr Fialko 		struct rte_event_crypto_adapter_vector_limits limits;
1117de2bc16eSShijith Thotton 
111869e807dfSVolodymyr Fialko 		if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) {
111969e807dfSVolodymyr Fialko 			evt_err("Crypto adapter doesn't support event vector");
112069e807dfSVolodymyr Fialko 			return -EINVAL;
112169e807dfSVolodymyr Fialko 		}
112269e807dfSVolodymyr Fialko 
112369e807dfSVolodymyr Fialko 		ret = rte_event_crypto_adapter_vector_limits_get(p->dev_id, p->ca.cdev_id, &limits);
112469e807dfSVolodymyr Fialko 		if (ret) {
112569e807dfSVolodymyr Fialko 			evt_err("Failed to get crypto adapter's vector limits");
112669e807dfSVolodymyr Fialko 			return ret;
112769e807dfSVolodymyr Fialko 		}
112869e807dfSVolodymyr Fialko 
112969e807dfSVolodymyr Fialko 		if (opt->vector_size < limits.min_sz || opt->vector_size > limits.max_sz) {
113069e807dfSVolodymyr Fialko 			evt_err("Vector size [%d] not within limits max[%d] min[%d]",
113169e807dfSVolodymyr Fialko 				opt->vector_size, limits.max_sz, limits.min_sz);
113269e807dfSVolodymyr Fialko 			return -EINVAL;
113369e807dfSVolodymyr Fialko 		}
113469e807dfSVolodymyr Fialko 
113569e807dfSVolodymyr Fialko 		if (limits.log2_sz && !rte_is_power_of_2(opt->vector_size)) {
113669e807dfSVolodymyr Fialko 			evt_err("Vector size [%d] not power of 2", opt->vector_size);
113769e807dfSVolodymyr Fialko 			return -EINVAL;
113869e807dfSVolodymyr Fialko 		}
113969e807dfSVolodymyr Fialko 
114069e807dfSVolodymyr Fialko 		if (opt->vector_tmo_nsec > limits.max_timeout_ns ||
114169e807dfSVolodymyr Fialko 			opt->vector_tmo_nsec < limits.min_timeout_ns) {
114269e807dfSVolodymyr Fialko 			evt_err("Vector timeout [%" PRIu64 "] not within limits "
114369e807dfSVolodymyr Fialko 				"max[%" PRIu64 "] min[%" PRIu64 "]",
114469e807dfSVolodymyr Fialko 				opt->vector_tmo_nsec, limits.max_timeout_ns, limits.min_timeout_ns);
114569e807dfSVolodymyr Fialko 			return -EINVAL;
114669e807dfSVolodymyr Fialko 		}
114769e807dfSVolodymyr Fialko 
114869e807dfSVolodymyr Fialko 		conf.vector_mp = t->ca_vector_pool;
114969e807dfSVolodymyr Fialko 		conf.vector_sz = opt->vector_size;
115069e807dfSVolodymyr Fialko 		conf.vector_timeout_ns = opt->vector_tmo_nsec;
115169e807dfSVolodymyr Fialko 		conf.flags |= RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR;
115269e807dfSVolodymyr Fialko 	}
115369e807dfSVolodymyr Fialko 
115469e807dfSVolodymyr Fialko 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
1155c1749bc5SVolodymyr Fialko 		conf.ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
1156c1749bc5SVolodymyr Fialko 		conf.ev.queue_id = p->queue_id;
115769e807dfSVolodymyr Fialko 	}
115869e807dfSVolodymyr Fialko 
1159de2bc16eSShijith Thotton 	ret = rte_event_crypto_adapter_queue_pair_add(
1160c1749bc5SVolodymyr Fialko 		TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, &conf);
1161de2bc16eSShijith Thotton 
1162de2bc16eSShijith Thotton 	return ret;
1163de2bc16eSShijith Thotton }
1164de2bc16eSShijith Thotton 
11652a440d6aSAkhil Goyal static void *
1166de2bc16eSShijith Thotton cryptodev_sym_sess_create(struct prod_data *p, struct test_perf *t)
1167de2bc16eSShijith Thotton {
1168750ab9d5SAakash Sasidharan 	const struct rte_cryptodev_symmetric_capability *cap;
1169750ab9d5SAakash Sasidharan 	struct rte_cryptodev_sym_capability_idx cap_idx;
1170750ab9d5SAakash Sasidharan 	enum rte_crypto_cipher_algorithm cipher_algo;
1171de2bc16eSShijith Thotton 	struct rte_crypto_sym_xform cipher_xform;
1172750ab9d5SAakash Sasidharan 	struct evt_options *opt = t->opt;
1173750ab9d5SAakash Sasidharan 	uint16_t key_size;
1174750ab9d5SAakash Sasidharan 	uint16_t iv_size;
11752a440d6aSAkhil Goyal 	void *sess;
1176de2bc16eSShijith Thotton 
1177750ab9d5SAakash Sasidharan 	cipher_algo = opt->crypto_cipher_alg;
1178750ab9d5SAakash Sasidharan 	key_size = opt->crypto_cipher_key_sz;
1179750ab9d5SAakash Sasidharan 	iv_size = opt->crypto_cipher_iv_sz;
1180750ab9d5SAakash Sasidharan 
1181750ab9d5SAakash Sasidharan 	/* Check if device supports the algorithm */
1182750ab9d5SAakash Sasidharan 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1183750ab9d5SAakash Sasidharan 	cap_idx.algo.cipher = cipher_algo;
1184750ab9d5SAakash Sasidharan 
1185750ab9d5SAakash Sasidharan 	cap = rte_cryptodev_sym_capability_get(p->ca.cdev_id, &cap_idx);
1186750ab9d5SAakash Sasidharan 	if (cap == NULL) {
1187750ab9d5SAakash Sasidharan 		evt_err("Device doesn't support cipher algorithm [%s]. Test Skipped\n",
1188750ab9d5SAakash Sasidharan 			rte_cryptodev_get_cipher_algo_string(cipher_algo));
1189750ab9d5SAakash Sasidharan 		return NULL;
1190750ab9d5SAakash Sasidharan 	}
1191750ab9d5SAakash Sasidharan 
1192750ab9d5SAakash Sasidharan 	/* Check if device supports key size and IV size */
1193750ab9d5SAakash Sasidharan 	if (rte_cryptodev_sym_capability_check_cipher(cap, key_size,
1194750ab9d5SAakash Sasidharan 			iv_size) < 0) {
1195750ab9d5SAakash Sasidharan 		evt_err("Device doesn't support cipher configuration:\n"
1196750ab9d5SAakash Sasidharan 			"cipher algo [%s], key sz [%d], iv sz [%d]. Test Skipped\n",
1197750ab9d5SAakash Sasidharan 			rte_cryptodev_get_cipher_algo_string(cipher_algo), key_size, iv_size);
1198750ab9d5SAakash Sasidharan 		return NULL;
1199750ab9d5SAakash Sasidharan 	}
1200750ab9d5SAakash Sasidharan 
1201de2bc16eSShijith Thotton 	cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1202750ab9d5SAakash Sasidharan 	cipher_xform.cipher.algo = cipher_algo;
1203750ab9d5SAakash Sasidharan 	cipher_xform.cipher.key.data = opt->crypto_cipher_key;
1204750ab9d5SAakash Sasidharan 	cipher_xform.cipher.key.length = key_size;
1205750ab9d5SAakash Sasidharan 	cipher_xform.cipher.iv.length = iv_size;
1206750ab9d5SAakash Sasidharan 	cipher_xform.cipher.iv.offset = IV_OFFSET;
1207de2bc16eSShijith Thotton 	cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1208de2bc16eSShijith Thotton 	cipher_xform.next = NULL;
1209de2bc16eSShijith Thotton 
1210bdce2564SAkhil Goyal 	sess = rte_cryptodev_sym_session_create(p->ca.cdev_id, &cipher_xform,
1211bdce2564SAkhil Goyal 			t->ca_sess_pool);
1212de2bc16eSShijith Thotton 	if (sess == NULL) {
1213de2bc16eSShijith Thotton 		evt_err("Failed to create sym session");
1214de2bc16eSShijith Thotton 		return NULL;
1215de2bc16eSShijith Thotton 	}
1216de2bc16eSShijith Thotton 
1217de2bc16eSShijith Thotton 	return sess;
1218de2bc16eSShijith Thotton }
1219de2bc16eSShijith Thotton 
12208f5b5495SAkhil Goyal static void *
12218f5b5495SAkhil Goyal cryptodev_asym_sess_create(struct prod_data *p, struct test_perf *t)
12228f5b5495SAkhil Goyal {
12238f5b5495SAkhil Goyal 	const struct rte_cryptodev_asymmetric_xform_capability *capability;
12248f5b5495SAkhil Goyal 	struct rte_cryptodev_asym_capability_idx cap_idx;
12258f5b5495SAkhil Goyal 	struct rte_crypto_asym_xform xform;
12268f5b5495SAkhil Goyal 	void *sess;
12278f5b5495SAkhil Goyal 
12288f5b5495SAkhil Goyal 	xform.next = NULL;
12298f5b5495SAkhil Goyal 	xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
12308f5b5495SAkhil Goyal 	cap_idx.type = xform.xform_type;
12318f5b5495SAkhil Goyal 	capability = rte_cryptodev_asym_capability_get(p->ca.cdev_id, &cap_idx);
12328f5b5495SAkhil Goyal 	if (capability == NULL) {
12338f5b5495SAkhil Goyal 		evt_err("Device doesn't support MODEX. Test Skipped\n");
12348f5b5495SAkhil Goyal 		return NULL;
12358f5b5495SAkhil Goyal 	}
12368f5b5495SAkhil Goyal 
12378f5b5495SAkhil Goyal 	xform.modex.modulus.data = modex_test_case.modulus.data;
12388f5b5495SAkhil Goyal 	xform.modex.modulus.length = modex_test_case.modulus.len;
12398f5b5495SAkhil Goyal 	xform.modex.exponent.data = modex_test_case.exponent.data;
12408f5b5495SAkhil Goyal 	xform.modex.exponent.length = modex_test_case.exponent.len;
12418f5b5495SAkhil Goyal 
12428f5b5495SAkhil Goyal 	if (rte_cryptodev_asym_session_create(p->ca.cdev_id, &xform,
12438f5b5495SAkhil Goyal 			t->ca_asym_sess_pool, &sess)) {
12448f5b5495SAkhil Goyal 		evt_err("Failed to create asym session");
12458f5b5495SAkhil Goyal 		return NULL;
12468f5b5495SAkhil Goyal 	}
12478f5b5495SAkhil Goyal 
12488f5b5495SAkhil Goyal 	return sess;
12498f5b5495SAkhil Goyal }
12508f5b5495SAkhil Goyal 
1251272de067SJerin Jacob int
125284a7513dSJerin Jacob perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
1253535c630cSPavan Nikhilesh 				uint8_t stride, uint8_t nb_queues,
1254535c630cSPavan Nikhilesh 				const struct rte_event_port_conf *port_conf)
125584a7513dSJerin Jacob {
125684a7513dSJerin Jacob 	struct test_perf *t = evt_test_priv(test);
12573617aae5SPavan Nikhilesh 	uint16_t port, prod;
125884a7513dSJerin Jacob 	int ret = -1;
125984a7513dSJerin Jacob 
126084a7513dSJerin Jacob 	/* setup one port per worker, linking to all queues */
126184a7513dSJerin Jacob 	for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
126284a7513dSJerin Jacob 				port++) {
126384a7513dSJerin Jacob 		struct worker_data *w = &t->worker[port];
126484a7513dSJerin Jacob 
126584a7513dSJerin Jacob 		w->dev_id = opt->dev_id;
126684a7513dSJerin Jacob 		w->port_id = port;
126784a7513dSJerin Jacob 		w->t = t;
126884a7513dSJerin Jacob 		w->processed_pkts = 0;
126984a7513dSJerin Jacob 		w->latency = 0;
127084a7513dSJerin Jacob 
12715f94d108SHarry van Haaren 		struct rte_event_port_conf conf = *port_conf;
12725f94d108SHarry van Haaren 		conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_WORKER;
12735f94d108SHarry van Haaren 
12745f94d108SHarry van Haaren 		ret = rte_event_port_setup(opt->dev_id, port, &conf);
127584a7513dSJerin Jacob 		if (ret) {
127684a7513dSJerin Jacob 			evt_err("failed to setup port %d", port);
127784a7513dSJerin Jacob 			return ret;
127884a7513dSJerin Jacob 		}
127984a7513dSJerin Jacob 
128084a7513dSJerin Jacob 		ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
128184a7513dSJerin Jacob 		if (ret != nb_queues) {
128284a7513dSJerin Jacob 			evt_err("failed to link all queues to port %d", port);
128384a7513dSJerin Jacob 			return -EINVAL;
128484a7513dSJerin Jacob 		}
128584a7513dSJerin Jacob 	}
128684a7513dSJerin Jacob 
128784a7513dSJerin Jacob 	/* port for producers, no links */
12883617aae5SPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
12893617aae5SPavan Nikhilesh 		for ( ; port < perf_nb_event_ports(opt); port++) {
12903617aae5SPavan Nikhilesh 			struct prod_data *p = &t->prod[port];
12913617aae5SPavan Nikhilesh 			p->t = t;
12923617aae5SPavan Nikhilesh 		}
12933617aae5SPavan Nikhilesh 
12945f94d108SHarry van Haaren 		struct rte_event_port_conf conf = *port_conf;
12955f94d108SHarry van Haaren 		conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_PRODUCER;
12965f94d108SHarry van Haaren 
12975f94d108SHarry van Haaren 		ret = perf_event_rx_adapter_setup(opt, stride, conf);
12983617aae5SPavan Nikhilesh 		if (ret)
12993617aae5SPavan Nikhilesh 			return ret;
1300d008f20bSPavan Nikhilesh 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1301d008f20bSPavan Nikhilesh 		prod = 0;
1302d008f20bSPavan Nikhilesh 		for ( ; port < perf_nb_event_ports(opt); port++) {
1303d008f20bSPavan Nikhilesh 			struct prod_data *p = &t->prod[port];
1304d008f20bSPavan Nikhilesh 			p->queue_id = prod * stride;
1305d008f20bSPavan Nikhilesh 			p->t = t;
1306d008f20bSPavan Nikhilesh 			prod++;
1307d008f20bSPavan Nikhilesh 		}
1308d008f20bSPavan Nikhilesh 
1309d008f20bSPavan Nikhilesh 		ret = perf_event_timer_adapter_setup(t);
1310d008f20bSPavan Nikhilesh 		if (ret)
1311d008f20bSPavan Nikhilesh 			return ret;
1312de2bc16eSShijith Thotton 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
1313de2bc16eSShijith Thotton 		struct rte_event_port_conf conf = *port_conf;
1314de2bc16eSShijith Thotton 		uint8_t cdev_id = 0;
1315de2bc16eSShijith Thotton 		uint16_t qp_id = 0;
1316de2bc16eSShijith Thotton 
1317de2bc16eSShijith Thotton 		ret = rte_event_crypto_adapter_create(TEST_PERF_CA_ID,
1318de2bc16eSShijith Thotton 						      opt->dev_id, &conf, 0);
1319de2bc16eSShijith Thotton 		if (ret) {
1320de2bc16eSShijith Thotton 			evt_err("Failed to create crypto adapter");
1321de2bc16eSShijith Thotton 			return ret;
1322de2bc16eSShijith Thotton 		}
1323de2bc16eSShijith Thotton 
1324de2bc16eSShijith Thotton 		prod = 0;
1325de2bc16eSShijith Thotton 		for (; port < perf_nb_event_ports(opt); port++) {
1326de2bc16eSShijith Thotton 			union rte_event_crypto_metadata m_data;
1327de2bc16eSShijith Thotton 			struct prod_data *p = &t->prod[port];
1328de2bc16eSShijith Thotton 			uint32_t flow_id;
1329de2bc16eSShijith Thotton 
1330de2bc16eSShijith Thotton 			if (qp_id == rte_cryptodev_queue_pair_count(cdev_id)) {
1331de2bc16eSShijith Thotton 				cdev_id++;
1332de2bc16eSShijith Thotton 				qp_id = 0;
1333de2bc16eSShijith Thotton 			}
1334de2bc16eSShijith Thotton 
1335de2bc16eSShijith Thotton 			p->dev_id = opt->dev_id;
1336de2bc16eSShijith Thotton 			p->port_id = port;
1337de2bc16eSShijith Thotton 			p->queue_id = prod * stride;
1338de2bc16eSShijith Thotton 			p->ca.cdev_id = cdev_id;
1339de2bc16eSShijith Thotton 			p->ca.cdev_qp_id = qp_id;
1340de2bc16eSShijith Thotton 			p->ca.crypto_sess = rte_zmalloc_socket(
13418f5b5495SAkhil Goyal 				NULL, sizeof(void *) * t->nb_flows,
1342de2bc16eSShijith Thotton 				RTE_CACHE_LINE_SIZE, opt->socket_id);
1343de2bc16eSShijith Thotton 			p->t = t;
1344de2bc16eSShijith Thotton 
1345eff29c45SVolodymyr Fialko 			ret = perf_event_crypto_adapter_setup(t, p);
1346eff29c45SVolodymyr Fialko 			if (ret)
1347eff29c45SVolodymyr Fialko 				return ret;
1348eff29c45SVolodymyr Fialko 
1349de2bc16eSShijith Thotton 			m_data.request_info.cdev_id = p->ca.cdev_id;
1350de2bc16eSShijith Thotton 			m_data.request_info.queue_pair_id = p->ca.cdev_qp_id;
1351de2bc16eSShijith Thotton 			m_data.response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
1352de2bc16eSShijith Thotton 			m_data.response_info.queue_id = p->queue_id;
1353de2bc16eSShijith Thotton 
1354de2bc16eSShijith Thotton 			for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
13558f5b5495SAkhil Goyal 				m_data.response_info.flow_id = flow_id;
13568f5b5495SAkhil Goyal 				if (opt->crypto_op_type ==
13578f5b5495SAkhil Goyal 						RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
13582a440d6aSAkhil Goyal 					void *sess;
13598f5b5495SAkhil Goyal 
13608f5b5495SAkhil Goyal 					sess = cryptodev_sym_sess_create(p, t);
13618f5b5495SAkhil Goyal 					if (sess == NULL)
1362de2bc16eSShijith Thotton 						return -ENOMEM;
1363de2bc16eSShijith Thotton 
1364eff29c45SVolodymyr Fialko 					ret = rte_cryptodev_session_event_mdata_set(
13658f5b5495SAkhil Goyal 						cdev_id,
13668f5b5495SAkhil Goyal 						sess,
13674c43055cSAkhil Goyal 						RTE_CRYPTO_OP_TYPE_SYMMETRIC,
13684c43055cSAkhil Goyal 						RTE_CRYPTO_OP_WITH_SESSION,
13694c43055cSAkhil Goyal 						&m_data, sizeof(m_data));
1370eff29c45SVolodymyr Fialko 					if (ret)
1371eff29c45SVolodymyr Fialko 						return ret;
13728f5b5495SAkhil Goyal 					p->ca.crypto_sess[flow_id] = sess;
13738f5b5495SAkhil Goyal 				} else {
13748f5b5495SAkhil Goyal 					void *sess;
13754c43055cSAkhil Goyal 
13768f5b5495SAkhil Goyal 					sess = cryptodev_asym_sess_create(p, t);
13778f5b5495SAkhil Goyal 					if (sess == NULL)
13788f5b5495SAkhil Goyal 						return -ENOMEM;
1379eff29c45SVolodymyr Fialko 					ret = rte_cryptodev_session_event_mdata_set(
13808f5b5495SAkhil Goyal 						cdev_id,
13818f5b5495SAkhil Goyal 						sess,
13828f5b5495SAkhil Goyal 						RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
13838f5b5495SAkhil Goyal 						RTE_CRYPTO_OP_WITH_SESSION,
13848f5b5495SAkhil Goyal 						&m_data, sizeof(m_data));
1385eff29c45SVolodymyr Fialko 					if (ret)
1386eff29c45SVolodymyr Fialko 						return ret;
13878f5b5495SAkhil Goyal 					p->ca.crypto_sess[flow_id] = sess;
13888f5b5495SAkhil Goyal 				}
1389de2bc16eSShijith Thotton 			}
1390de2bc16eSShijith Thotton 
1391de2bc16eSShijith Thotton 			conf.event_port_cfg |=
1392de2bc16eSShijith Thotton 				RTE_EVENT_PORT_CFG_HINT_PRODUCER |
1393de2bc16eSShijith Thotton 				RTE_EVENT_PORT_CFG_HINT_CONSUMER;
1394de2bc16eSShijith Thotton 
1395de2bc16eSShijith Thotton 			ret = rte_event_port_setup(opt->dev_id, port, &conf);
1396de2bc16eSShijith Thotton 			if (ret) {
1397de2bc16eSShijith Thotton 				evt_err("failed to setup port %d", port);
1398de2bc16eSShijith Thotton 				return ret;
1399de2bc16eSShijith Thotton 			}
1400de2bc16eSShijith Thotton 
1401de2bc16eSShijith Thotton 			qp_id++;
1402de2bc16eSShijith Thotton 			prod++;
1403de2bc16eSShijith Thotton 		}
14043617aae5SPavan Nikhilesh 	} else {
140584a7513dSJerin Jacob 		prod = 0;
140684a7513dSJerin Jacob 		for ( ; port < perf_nb_event_ports(opt); port++) {
140784a7513dSJerin Jacob 			struct prod_data *p = &t->prod[port];
140884a7513dSJerin Jacob 
140984a7513dSJerin Jacob 			p->dev_id = opt->dev_id;
141084a7513dSJerin Jacob 			p->port_id = port;
141184a7513dSJerin Jacob 			p->queue_id = prod * stride;
141284a7513dSJerin Jacob 			p->t = t;
141384a7513dSJerin Jacob 
14145f94d108SHarry van Haaren 			struct rte_event_port_conf conf = *port_conf;
14155f94d108SHarry van Haaren 			conf.event_port_cfg |=
14165f94d108SHarry van Haaren 				RTE_EVENT_PORT_CFG_HINT_PRODUCER |
14175f94d108SHarry van Haaren 				RTE_EVENT_PORT_CFG_HINT_CONSUMER;
14185f94d108SHarry van Haaren 
14195f94d108SHarry van Haaren 			ret = rte_event_port_setup(opt->dev_id, port, &conf);
142084a7513dSJerin Jacob 			if (ret) {
142184a7513dSJerin Jacob 				evt_err("failed to setup port %d", port);
142284a7513dSJerin Jacob 				return ret;
142384a7513dSJerin Jacob 			}
142484a7513dSJerin Jacob 			prod++;
142584a7513dSJerin Jacob 		}
14263617aae5SPavan Nikhilesh 	}
142784a7513dSJerin Jacob 
142884a7513dSJerin Jacob 	return ret;
142984a7513dSJerin Jacob }
143084a7513dSJerin Jacob 
143184a7513dSJerin Jacob int
1432272de067SJerin Jacob perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
1433272de067SJerin Jacob {
1434272de067SJerin Jacob 	unsigned int lcores;
1435272de067SJerin Jacob 
1436cb056611SStephen Hemminger 	/* N producer + N worker + main when producer cores are used
1437cb056611SStephen Hemminger 	 * Else N worker + main when Rx adapter is used
1438b01974daSPavan Nikhilesh 	 */
1439b01974daSPavan Nikhilesh 	lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
1440272de067SJerin Jacob 
1441272de067SJerin Jacob 	if (rte_lcore_count() < lcores) {
1442272de067SJerin Jacob 		evt_err("test need minimum %d lcores", lcores);
1443272de067SJerin Jacob 		return -1;
1444272de067SJerin Jacob 	}
1445272de067SJerin Jacob 
1446272de067SJerin Jacob 	/* Validate worker lcores */
1447cb056611SStephen Hemminger 	if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
1448cb056611SStephen Hemminger 		evt_err("worker lcores overlaps with main lcore");
1449272de067SJerin Jacob 		return -1;
1450272de067SJerin Jacob 	}
1451272de067SJerin Jacob 	if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
1452272de067SJerin Jacob 		evt_err("worker lcores overlaps producer lcores");
1453272de067SJerin Jacob 		return -1;
1454272de067SJerin Jacob 	}
1455272de067SJerin Jacob 	if (evt_has_disabled_lcore(opt->wlcores)) {
1456272de067SJerin Jacob 		evt_err("one or more workers lcores are not enabled");
1457272de067SJerin Jacob 		return -1;
1458272de067SJerin Jacob 	}
1459272de067SJerin Jacob 	if (!evt_has_active_lcore(opt->wlcores)) {
1460272de067SJerin Jacob 		evt_err("minimum one worker is required");
1461272de067SJerin Jacob 		return -1;
1462272de067SJerin Jacob 	}
1463272de067SJerin Jacob 
1464902387eaSPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
1465de2bc16eSShijith Thotton 	    opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
1466de2bc16eSShijith Thotton 	    opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
1467272de067SJerin Jacob 		/* Validate producer lcores */
1468b01974daSPavan Nikhilesh 		if (evt_lcores_has_overlap(opt->plcores,
1469cb056611SStephen Hemminger 					rte_get_main_lcore())) {
1470cb056611SStephen Hemminger 			evt_err("producer lcores overlaps with main lcore");
1471272de067SJerin Jacob 			return -1;
1472272de067SJerin Jacob 		}
1473272de067SJerin Jacob 		if (evt_has_disabled_lcore(opt->plcores)) {
1474272de067SJerin Jacob 			evt_err("one or more producer lcores are not enabled");
1475272de067SJerin Jacob 			return -1;
1476272de067SJerin Jacob 		}
1477272de067SJerin Jacob 		if (!evt_has_active_lcore(opt->plcores)) {
1478272de067SJerin Jacob 			evt_err("minimum one producer is required");
1479272de067SJerin Jacob 			return -1;
1480272de067SJerin Jacob 		}
1481b01974daSPavan Nikhilesh 	}
1482272de067SJerin Jacob 
1483272de067SJerin Jacob 	if (evt_has_invalid_stage(opt))
1484272de067SJerin Jacob 		return -1;
1485272de067SJerin Jacob 
1486272de067SJerin Jacob 	if (evt_has_invalid_sched_type(opt))
1487272de067SJerin Jacob 		return -1;
1488272de067SJerin Jacob 
1489272de067SJerin Jacob 	if (nb_queues > EVT_MAX_QUEUES) {
1490272de067SJerin Jacob 		evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
1491272de067SJerin Jacob 		return -1;
1492272de067SJerin Jacob 	}
1493272de067SJerin Jacob 	if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
1494272de067SJerin Jacob 		evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
1495272de067SJerin Jacob 		return -1;
1496272de067SJerin Jacob 	}
1497272de067SJerin Jacob 
1498272de067SJerin Jacob 	/* Fixups */
1499d008f20bSPavan Nikhilesh 	if ((opt->nb_stages == 1 &&
1500d008f20bSPavan Nikhilesh 			opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
1501d008f20bSPavan Nikhilesh 			opt->fwd_latency) {
1502272de067SJerin Jacob 		evt_info("fwd_latency is valid when nb_stages > 1, disabling");
1503272de067SJerin Jacob 		opt->fwd_latency = 0;
1504272de067SJerin Jacob 	}
1505d008f20bSPavan Nikhilesh 
1506272de067SJerin Jacob 	if (opt->fwd_latency && !opt->q_priority) {
1507272de067SJerin Jacob 		evt_info("enabled queue priority for latency measurement");
1508272de067SJerin Jacob 		opt->q_priority = 1;
1509272de067SJerin Jacob 	}
15109d3aeb18SJerin Jacob 	if (opt->nb_pkts == 0)
15119d3aeb18SJerin Jacob 		opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
1512272de067SJerin Jacob 
1513272de067SJerin Jacob 	return 0;
1514272de067SJerin Jacob }
1515272de067SJerin Jacob 
1516272de067SJerin Jacob void
1517272de067SJerin Jacob perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
1518272de067SJerin Jacob {
1519272de067SJerin Jacob 	evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
1520272de067SJerin Jacob 	evt_dump_producer_lcores(opt);
1521272de067SJerin Jacob 	evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
1522272de067SJerin Jacob 	evt_dump_worker_lcores(opt);
1523272de067SJerin Jacob 	evt_dump_nb_stages(opt);
1524272de067SJerin Jacob 	evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
1525272de067SJerin Jacob 	evt_dump("nb_evdev_queues", "%d", nb_queues);
1526272de067SJerin Jacob 	evt_dump_queue_priority(opt);
1527272de067SJerin Jacob 	evt_dump_sched_type_list(opt);
1528b01974daSPavan Nikhilesh 	evt_dump_producer_type(opt);
152920841a25SRashmi Shetty 	evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
1530272de067SJerin Jacob }
1531272de067SJerin Jacob 
15327da008dfSPavan Nikhilesh static void
15337da008dfSPavan Nikhilesh perf_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
15347da008dfSPavan Nikhilesh 		      void *args)
15357da008dfSPavan Nikhilesh {
15367da008dfSPavan Nikhilesh 	rte_mempool_put(args, ev.event_ptr);
15377da008dfSPavan Nikhilesh }
15387da008dfSPavan Nikhilesh 
153941c219e6SJerin Jacob void
1540f0b68c0bSPavan Nikhilesh perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
1541f0b68c0bSPavan Nikhilesh 		    uint8_t port_id, struct rte_event events[], uint16_t nb_enq,
1542f0b68c0bSPavan Nikhilesh 		    uint16_t nb_deq)
1543f0b68c0bSPavan Nikhilesh {
1544f0b68c0bSPavan Nikhilesh 	int i;
1545f0b68c0bSPavan Nikhilesh 
1546f0b68c0bSPavan Nikhilesh 	if (nb_deq) {
1547f0b68c0bSPavan Nikhilesh 		for (i = nb_enq; i < nb_deq; i++)
1548f0b68c0bSPavan Nikhilesh 			rte_mempool_put(pool, events[i].event_ptr);
1549f0b68c0bSPavan Nikhilesh 
1550f0b68c0bSPavan Nikhilesh 		for (i = 0; i < nb_deq; i++)
1551f0b68c0bSPavan Nikhilesh 			events[i].op = RTE_EVENT_OP_RELEASE;
1552f0b68c0bSPavan Nikhilesh 		rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
1553f0b68c0bSPavan Nikhilesh 	}
15547da008dfSPavan Nikhilesh 	rte_event_port_quiesce(dev_id, port_id, perf_event_port_flush, pool);
1555f0b68c0bSPavan Nikhilesh }
1556f0b68c0bSPavan Nikhilesh 
1557f0b68c0bSPavan Nikhilesh void
155841c219e6SJerin Jacob perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
155941c219e6SJerin Jacob {
1560d008f20bSPavan Nikhilesh 	int i;
1561d008f20bSPavan Nikhilesh 	struct test_perf *t = evt_test_priv(test);
156241c219e6SJerin Jacob 
1563d008f20bSPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1564d008f20bSPavan Nikhilesh 		for (i = 0; i < opt->nb_timer_adptrs; i++)
1565d008f20bSPavan Nikhilesh 			rte_event_timer_adapter_stop(t->timer_adptr[i]);
1566d008f20bSPavan Nikhilesh 	}
156741c219e6SJerin Jacob 	rte_event_dev_stop(opt->dev_id);
156841c219e6SJerin Jacob 	rte_event_dev_close(opt->dev_id);
156941c219e6SJerin Jacob }
157041c219e6SJerin Jacob 
157141c219e6SJerin Jacob static inline void
157241c219e6SJerin Jacob perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
157341c219e6SJerin Jacob 	    void *obj, unsigned i __rte_unused)
157441c219e6SJerin Jacob {
157541c219e6SJerin Jacob 	memset(obj, 0, mp->elt_size);
157641c219e6SJerin Jacob }
157741c219e6SJerin Jacob 
15783fc8de4fSPavan Nikhilesh #define NB_RX_DESC			128
15793fc8de4fSPavan Nikhilesh #define NB_TX_DESC			512
15803fc8de4fSPavan Nikhilesh int
15813fc8de4fSPavan Nikhilesh perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
15823fc8de4fSPavan Nikhilesh {
15838728ccf3SThomas Monjalon 	uint16_t i;
158477339255SIvan Ilchenko 	int ret;
15853fc8de4fSPavan Nikhilesh 	struct test_perf *t = evt_test_priv(test);
15863fc8de4fSPavan Nikhilesh 	struct rte_eth_conf port_conf = {
15873fc8de4fSPavan Nikhilesh 		.rxmode = {
1588295968d1SFerruh Yigit 			.mq_mode = RTE_ETH_MQ_RX_RSS,
15893fc8de4fSPavan Nikhilesh 		},
15903fc8de4fSPavan Nikhilesh 		.rx_adv_conf = {
15913fc8de4fSPavan Nikhilesh 			.rss_conf = {
15923fc8de4fSPavan Nikhilesh 				.rss_key = NULL,
1593295968d1SFerruh Yigit 				.rss_hf = RTE_ETH_RSS_IP,
15943fc8de4fSPavan Nikhilesh 			},
15953fc8de4fSPavan Nikhilesh 		},
15963fc8de4fSPavan Nikhilesh 	};
15973fc8de4fSPavan Nikhilesh 
1598de2bc16eSShijith Thotton 	if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR)
15993fc8de4fSPavan Nikhilesh 		return 0;
16003fc8de4fSPavan Nikhilesh 
1601d9a42a69SThomas Monjalon 	if (!rte_eth_dev_count_avail()) {
16023fc8de4fSPavan Nikhilesh 		evt_err("No ethernet ports found.");
16033fc8de4fSPavan Nikhilesh 		return -ENODEV;
16043fc8de4fSPavan Nikhilesh 	}
16053fc8de4fSPavan Nikhilesh 
16068728ccf3SThomas Monjalon 	RTE_ETH_FOREACH_DEV(i) {
16074f5701f2SFerruh Yigit 		struct rte_eth_dev_info dev_info;
16084f5701f2SFerruh Yigit 		struct rte_eth_conf local_port_conf = port_conf;
16093fc8de4fSPavan Nikhilesh 
161077339255SIvan Ilchenko 		ret = rte_eth_dev_info_get(i, &dev_info);
161177339255SIvan Ilchenko 		if (ret != 0) {
161277339255SIvan Ilchenko 			evt_err("Error during getting device (port %u) info: %s\n",
161377339255SIvan Ilchenko 					i, strerror(-ret));
161477339255SIvan Ilchenko 			return ret;
161577339255SIvan Ilchenko 		}
16164f5701f2SFerruh Yigit 
16174f5701f2SFerruh Yigit 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
16184f5701f2SFerruh Yigit 			dev_info.flow_type_rss_offloads;
16194f5701f2SFerruh Yigit 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
16204f5701f2SFerruh Yigit 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
16214f5701f2SFerruh Yigit 			evt_info("Port %u modified RSS hash function based on hardware support,"
16224f5701f2SFerruh Yigit 				"requested:%#"PRIx64" configured:%#"PRIx64"\n",
16234f5701f2SFerruh Yigit 				i,
16244f5701f2SFerruh Yigit 				port_conf.rx_adv_conf.rss_conf.rss_hf,
16254f5701f2SFerruh Yigit 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
16264f5701f2SFerruh Yigit 		}
16274f5701f2SFerruh Yigit 
16284f5701f2SFerruh Yigit 		if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
16293fc8de4fSPavan Nikhilesh 			evt_err("Failed to configure eth port [%d]", i);
16303fc8de4fSPavan Nikhilesh 			return -EINVAL;
16313fc8de4fSPavan Nikhilesh 		}
16323fc8de4fSPavan Nikhilesh 
16333fc8de4fSPavan Nikhilesh 		if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
16343fc8de4fSPavan Nikhilesh 				rte_socket_id(), NULL, t->pool) < 0) {
16353fc8de4fSPavan Nikhilesh 			evt_err("Failed to setup eth port [%d] rx_queue: %d.",
16363fc8de4fSPavan Nikhilesh 					i, 0);
16373fc8de4fSPavan Nikhilesh 			return -EINVAL;
16383fc8de4fSPavan Nikhilesh 		}
16393fc8de4fSPavan Nikhilesh 
16403fc8de4fSPavan Nikhilesh 		if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
16413fc8de4fSPavan Nikhilesh 					rte_socket_id(), NULL) < 0) {
16423fc8de4fSPavan Nikhilesh 			evt_err("Failed to setup eth port [%d] tx_queue: %d.",
16433fc8de4fSPavan Nikhilesh 					i, 0);
16443fc8de4fSPavan Nikhilesh 			return -EINVAL;
16453fc8de4fSPavan Nikhilesh 		}
16463fc8de4fSPavan Nikhilesh 
164770e51a0eSIvan Ilchenko 		ret = rte_eth_promiscuous_enable(i);
164870e51a0eSIvan Ilchenko 		if (ret != 0) {
164970e51a0eSIvan Ilchenko 			evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
165070e51a0eSIvan Ilchenko 				i, rte_strerror(-ret));
165170e51a0eSIvan Ilchenko 			return ret;
165270e51a0eSIvan Ilchenko 		}
16533fc8de4fSPavan Nikhilesh 	}
16543fc8de4fSPavan Nikhilesh 
16553fc8de4fSPavan Nikhilesh 	return 0;
16563fc8de4fSPavan Nikhilesh }
16573fc8de4fSPavan Nikhilesh 
1658a734e738SPavan Nikhilesh void
1659a734e738SPavan Nikhilesh perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
16607f3daf34SPavan Nikhilesh {
16618728ccf3SThomas Monjalon 	uint16_t i;
16627f3daf34SPavan Nikhilesh 	RTE_SET_USED(test);
16637f3daf34SPavan Nikhilesh 
16647f3daf34SPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
16658728ccf3SThomas Monjalon 		RTE_ETH_FOREACH_DEV(i) {
16663617aae5SPavan Nikhilesh 			rte_event_eth_rx_adapter_stop(i);
1667a734e738SPavan Nikhilesh 			rte_event_eth_rx_adapter_queue_del(i, i, -1);
1668a734e738SPavan Nikhilesh 			rte_eth_dev_rx_queue_stop(i, 0);
1669a734e738SPavan Nikhilesh 		}
1670a734e738SPavan Nikhilesh 	}
1671a734e738SPavan Nikhilesh }
1672a734e738SPavan Nikhilesh 
1673a734e738SPavan Nikhilesh void
1674a734e738SPavan Nikhilesh perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
1675a734e738SPavan Nikhilesh {
1676a734e738SPavan Nikhilesh 	uint16_t i;
1677a734e738SPavan Nikhilesh 	RTE_SET_USED(test);
1678a734e738SPavan Nikhilesh 
1679a734e738SPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
1680a734e738SPavan Nikhilesh 		RTE_ETH_FOREACH_DEV(i) {
1681a734e738SPavan Nikhilesh 			rte_event_eth_tx_adapter_stop(i);
1682a734e738SPavan Nikhilesh 			rte_event_eth_tx_adapter_queue_del(i, i, -1);
1683a734e738SPavan Nikhilesh 			rte_eth_dev_tx_queue_stop(i, 0);
16847f3daf34SPavan Nikhilesh 			rte_eth_dev_stop(i);
16857f3daf34SPavan Nikhilesh 		}
16867f3daf34SPavan Nikhilesh 	}
16877f3daf34SPavan Nikhilesh }
16887f3daf34SPavan Nikhilesh 
168941c219e6SJerin Jacob int
1690de2bc16eSShijith Thotton perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt)
1691de2bc16eSShijith Thotton {
1692de2bc16eSShijith Thotton 	uint8_t cdev_count, cdev_id, nb_plcores, nb_qps;
1693de2bc16eSShijith Thotton 	struct test_perf *t = evt_test_priv(test);
1694de2bc16eSShijith Thotton 	unsigned int max_session_size;
1695de2bc16eSShijith Thotton 	uint32_t nb_sessions;
1696de2bc16eSShijith Thotton 	int ret;
1697de2bc16eSShijith Thotton 
1698de2bc16eSShijith Thotton 	if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
1699de2bc16eSShijith Thotton 		return 0;
1700de2bc16eSShijith Thotton 
1701de2bc16eSShijith Thotton 	cdev_count = rte_cryptodev_count();
1702de2bc16eSShijith Thotton 	if (cdev_count == 0) {
1703de2bc16eSShijith Thotton 		evt_err("No crypto devices available\n");
1704de2bc16eSShijith Thotton 		return -ENODEV;
1705de2bc16eSShijith Thotton 	}
1706de2bc16eSShijith Thotton 
1707de2bc16eSShijith Thotton 	t->ca_op_pool = rte_crypto_op_pool_create(
17088f5b5495SAkhil Goyal 		"crypto_op_pool", opt->crypto_op_type, opt->pool_sz,
1709750ab9d5SAakash Sasidharan 		128, sizeof(union rte_event_crypto_metadata) + EVT_CRYPTO_MAX_IV_SIZE,
17108f5b5495SAkhil Goyal 		rte_socket_id());
1711de2bc16eSShijith Thotton 	if (t->ca_op_pool == NULL) {
1712de2bc16eSShijith Thotton 		evt_err("Failed to create crypto op pool");
1713de2bc16eSShijith Thotton 		return -ENOMEM;
1714de2bc16eSShijith Thotton 	}
1715de2bc16eSShijith Thotton 
1716de2bc16eSShijith Thotton 	nb_sessions = evt_nr_active_lcores(opt->plcores) * t->nb_flows;
17178f5b5495SAkhil Goyal 	t->ca_asym_sess_pool = rte_cryptodev_asym_session_pool_create(
17188f5b5495SAkhil Goyal 		"ca_asym_sess_pool", nb_sessions, 0,
17198f5b5495SAkhil Goyal 		sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
17208f5b5495SAkhil Goyal 	if (t->ca_asym_sess_pool == NULL) {
17218f5b5495SAkhil Goyal 		evt_err("Failed to create sym session pool");
17228f5b5495SAkhil Goyal 		ret = -ENOMEM;
17238f5b5495SAkhil Goyal 		goto err;
17248f5b5495SAkhil Goyal 	}
17258f5b5495SAkhil Goyal 
1726de2bc16eSShijith Thotton 	max_session_size = 0;
1727de2bc16eSShijith Thotton 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1728de2bc16eSShijith Thotton 		unsigned int session_size;
1729de2bc16eSShijith Thotton 
1730de2bc16eSShijith Thotton 		session_size =
1731de2bc16eSShijith Thotton 			rte_cryptodev_sym_get_private_session_size(cdev_id);
1732de2bc16eSShijith Thotton 		if (session_size > max_session_size)
1733de2bc16eSShijith Thotton 			max_session_size = session_size;
1734de2bc16eSShijith Thotton 	}
1735de2bc16eSShijith Thotton 
1736bdce2564SAkhil Goyal 	t->ca_sess_pool = rte_cryptodev_sym_session_pool_create(
1737bdce2564SAkhil Goyal 		"ca_sess_pool", nb_sessions, max_session_size, 0,
1738bdce2564SAkhil Goyal 		sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
1739bdce2564SAkhil Goyal 	if (t->ca_sess_pool == NULL) {
1740bdce2564SAkhil Goyal 		evt_err("Failed to create sym session pool");
1741de2bc16eSShijith Thotton 		ret = -ENOMEM;
1742de2bc16eSShijith Thotton 		goto err;
1743de2bc16eSShijith Thotton 	}
1744de2bc16eSShijith Thotton 
174569e807dfSVolodymyr Fialko 	if (opt->ena_vector) {
174669e807dfSVolodymyr Fialko 		unsigned int nb_elem = (opt->pool_sz / opt->vector_size) * 2;
174769e807dfSVolodymyr Fialko 		nb_elem = RTE_MAX(512U, nb_elem);
174869e807dfSVolodymyr Fialko 		nb_elem += evt_nr_active_lcores(opt->wlcores) * 32;
174969e807dfSVolodymyr Fialko 		t->ca_vector_pool = rte_event_vector_pool_create("vector_pool", nb_elem, 32,
175069e807dfSVolodymyr Fialko 				opt->vector_size, opt->socket_id);
175169e807dfSVolodymyr Fialko 		if (t->ca_vector_pool == NULL) {
175269e807dfSVolodymyr Fialko 			evt_err("Failed to create event vector pool");
175369e807dfSVolodymyr Fialko 			ret = -ENOMEM;
175469e807dfSVolodymyr Fialko 			goto err;
175569e807dfSVolodymyr Fialko 		}
175669e807dfSVolodymyr Fialko 	}
175769e807dfSVolodymyr Fialko 
1758de2bc16eSShijith Thotton 	/*
1759de2bc16eSShijith Thotton 	 * Calculate number of needed queue pairs, based on the amount of
1760de2bc16eSShijith Thotton 	 * available number of logical cores and crypto devices. For instance,
1761de2bc16eSShijith Thotton 	 * if there are 4 cores and 2 crypto devices, 2 queue pairs will be set
1762de2bc16eSShijith Thotton 	 * up per device.
1763de2bc16eSShijith Thotton 	 */
1764de2bc16eSShijith Thotton 	nb_plcores = evt_nr_active_lcores(opt->plcores);
1765de2bc16eSShijith Thotton 	nb_qps = (nb_plcores % cdev_count) ? (nb_plcores / cdev_count) + 1 :
1766de2bc16eSShijith Thotton 					     nb_plcores / cdev_count;
1767de2bc16eSShijith Thotton 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1768de2bc16eSShijith Thotton 		struct rte_cryptodev_qp_conf qp_conf;
1769de2bc16eSShijith Thotton 		struct rte_cryptodev_config conf;
1770de2bc16eSShijith Thotton 		struct rte_cryptodev_info info;
1771de2bc16eSShijith Thotton 		int qp_id;
1772de2bc16eSShijith Thotton 
1773de2bc16eSShijith Thotton 		rte_cryptodev_info_get(cdev_id, &info);
1774de2bc16eSShijith Thotton 		if (nb_qps > info.max_nb_queue_pairs) {
1775de2bc16eSShijith Thotton 			evt_err("Not enough queue pairs per cryptodev (%u)",
1776de2bc16eSShijith Thotton 				nb_qps);
1777de2bc16eSShijith Thotton 			ret = -EINVAL;
1778de2bc16eSShijith Thotton 			goto err;
1779de2bc16eSShijith Thotton 		}
1780de2bc16eSShijith Thotton 
1781de2bc16eSShijith Thotton 		conf.nb_queue_pairs = nb_qps;
1782de2bc16eSShijith Thotton 		conf.socket_id = SOCKET_ID_ANY;
1783de2bc16eSShijith Thotton 		conf.ff_disable = RTE_CRYPTODEV_FF_SECURITY;
1784de2bc16eSShijith Thotton 
1785de2bc16eSShijith Thotton 		ret = rte_cryptodev_configure(cdev_id, &conf);
1786de2bc16eSShijith Thotton 		if (ret) {
1787de2bc16eSShijith Thotton 			evt_err("Failed to configure cryptodev (%u)", cdev_id);
1788de2bc16eSShijith Thotton 			goto err;
1789de2bc16eSShijith Thotton 		}
1790de2bc16eSShijith Thotton 
1791de2bc16eSShijith Thotton 		qp_conf.nb_descriptors = NB_CRYPTODEV_DESCRIPTORS;
1792de2bc16eSShijith Thotton 		qp_conf.mp_session = t->ca_sess_pool;
1793de2bc16eSShijith Thotton 
1794de2bc16eSShijith Thotton 		for (qp_id = 0; qp_id < conf.nb_queue_pairs; qp_id++) {
1795de2bc16eSShijith Thotton 			ret = rte_cryptodev_queue_pair_setup(
1796de2bc16eSShijith Thotton 				cdev_id, qp_id, &qp_conf,
1797de2bc16eSShijith Thotton 				rte_cryptodev_socket_id(cdev_id));
1798de2bc16eSShijith Thotton 			if (ret) {
1799de2bc16eSShijith Thotton 				evt_err("Failed to setup queue pairs on cryptodev %u\n",
1800de2bc16eSShijith Thotton 					cdev_id);
1801de2bc16eSShijith Thotton 				goto err;
1802de2bc16eSShijith Thotton 			}
1803de2bc16eSShijith Thotton 		}
1804de2bc16eSShijith Thotton 	}
1805de2bc16eSShijith Thotton 
1806de2bc16eSShijith Thotton 	return 0;
1807de2bc16eSShijith Thotton err:
1808de2bc16eSShijith Thotton 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++)
1809de2bc16eSShijith Thotton 		rte_cryptodev_close(cdev_id);
1810de2bc16eSShijith Thotton 
1811de2bc16eSShijith Thotton 	rte_mempool_free(t->ca_op_pool);
1812de2bc16eSShijith Thotton 	rte_mempool_free(t->ca_sess_pool);
18138f5b5495SAkhil Goyal 	rte_mempool_free(t->ca_asym_sess_pool);
181469e807dfSVolodymyr Fialko 	rte_mempool_free(t->ca_vector_pool);
1815de2bc16eSShijith Thotton 
1816de2bc16eSShijith Thotton 	return ret;
1817de2bc16eSShijith Thotton }
1818de2bc16eSShijith Thotton 
1819de2bc16eSShijith Thotton void
1820de2bc16eSShijith Thotton perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt)
1821de2bc16eSShijith Thotton {
1822de2bc16eSShijith Thotton 	uint8_t cdev_id, cdev_count = rte_cryptodev_count();
1823de2bc16eSShijith Thotton 	struct test_perf *t = evt_test_priv(test);
1824de2bc16eSShijith Thotton 	uint16_t port;
1825de2bc16eSShijith Thotton 
1826de2bc16eSShijith Thotton 	if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
1827de2bc16eSShijith Thotton 		return;
1828de2bc16eSShijith Thotton 
1829de2bc16eSShijith Thotton 	for (port = t->nb_workers; port < perf_nb_event_ports(opt); port++) {
18302a440d6aSAkhil Goyal 		void *sess;
1831de2bc16eSShijith Thotton 		struct prod_data *p = &t->prod[port];
1832de2bc16eSShijith Thotton 		uint32_t flow_id;
1833de2bc16eSShijith Thotton 		uint8_t cdev_id;
1834de2bc16eSShijith Thotton 
1835de2bc16eSShijith Thotton 		for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
1836de2bc16eSShijith Thotton 			sess = p->ca.crypto_sess[flow_id];
1837de2bc16eSShijith Thotton 			cdev_id = p->ca.cdev_id;
1838bdce2564SAkhil Goyal 			rte_cryptodev_sym_session_free(cdev_id, sess);
1839de2bc16eSShijith Thotton 		}
1840de2bc16eSShijith Thotton 
1841de2bc16eSShijith Thotton 		rte_event_crypto_adapter_queue_pair_del(
1842de2bc16eSShijith Thotton 			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id);
1843de2bc16eSShijith Thotton 	}
1844de2bc16eSShijith Thotton 
1845de2bc16eSShijith Thotton 	rte_event_crypto_adapter_free(TEST_PERF_CA_ID);
1846de2bc16eSShijith Thotton 
1847de2bc16eSShijith Thotton 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1848de2bc16eSShijith Thotton 		rte_cryptodev_stop(cdev_id);
1849de2bc16eSShijith Thotton 		rte_cryptodev_close(cdev_id);
1850de2bc16eSShijith Thotton 	}
1851de2bc16eSShijith Thotton 
1852de2bc16eSShijith Thotton 	rte_mempool_free(t->ca_op_pool);
1853de2bc16eSShijith Thotton 	rte_mempool_free(t->ca_sess_pool);
18548f5b5495SAkhil Goyal 	rte_mempool_free(t->ca_asym_sess_pool);
185569e807dfSVolodymyr Fialko 	rte_mempool_free(t->ca_vector_pool);
1856de2bc16eSShijith Thotton }
1857de2bc16eSShijith Thotton 
1858de2bc16eSShijith Thotton int
185941c219e6SJerin Jacob perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
186041c219e6SJerin Jacob {
186141c219e6SJerin Jacob 	struct test_perf *t = evt_test_priv(test);
186241c219e6SJerin Jacob 
1863d008f20bSPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
1864d008f20bSPavan Nikhilesh 			opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
186541c219e6SJerin Jacob 		t->pool = rte_mempool_create(test->name, /* mempool name */
186641c219e6SJerin Jacob 				opt->pool_sz, /* number of elements*/
186741c219e6SJerin Jacob 				sizeof(struct perf_elt), /* element size*/
186841c219e6SJerin Jacob 				512, /* cache size*/
186941c219e6SJerin Jacob 				0, NULL, NULL,
187041c219e6SJerin Jacob 				perf_elt_init, /* obj constructor */
187141c219e6SJerin Jacob 				NULL, opt->socket_id, 0); /* flags */
18726776a581SVolodymyr Fialko 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR &&
18736776a581SVolodymyr Fialko 			opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)  {
18746776a581SVolodymyr Fialko 		t->pool = rte_mempool_create(test->name, /* mempool name */
18756776a581SVolodymyr Fialko 				opt->pool_sz, /* number of elements*/
18766776a581SVolodymyr Fialko 				sizeof(struct perf_elt) + modex_test_case.result_len,
18776776a581SVolodymyr Fialko 				/* element size*/
18786776a581SVolodymyr Fialko 				512, /* cache size*/
18796776a581SVolodymyr Fialko 				0, NULL, NULL,
18806776a581SVolodymyr Fialko 				NULL, /* obj constructor */
18816776a581SVolodymyr Fialko 				NULL, opt->socket_id, 0); /* flags */
18828577cc1aSPavan Nikhilesh 	} else {
18838577cc1aSPavan Nikhilesh 		t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
18848577cc1aSPavan Nikhilesh 				opt->pool_sz, /* number of elements*/
18858577cc1aSPavan Nikhilesh 				512, /* cache size*/
18868577cc1aSPavan Nikhilesh 				0,
18878577cc1aSPavan Nikhilesh 				RTE_MBUF_DEFAULT_BUF_SIZE,
18888577cc1aSPavan Nikhilesh 				opt->socket_id); /* flags */
18898577cc1aSPavan Nikhilesh 
18908577cc1aSPavan Nikhilesh 	}
18918577cc1aSPavan Nikhilesh 
189241c219e6SJerin Jacob 	if (t->pool == NULL) {
189341c219e6SJerin Jacob 		evt_err("failed to create mempool");
189441c219e6SJerin Jacob 		return -ENOMEM;
189541c219e6SJerin Jacob 	}
189641c219e6SJerin Jacob 
189741c219e6SJerin Jacob 	return 0;
189841c219e6SJerin Jacob }
189941c219e6SJerin Jacob 
190041c219e6SJerin Jacob void
190141c219e6SJerin Jacob perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
190241c219e6SJerin Jacob {
190341c219e6SJerin Jacob 	RTE_SET_USED(opt);
190441c219e6SJerin Jacob 	struct test_perf *t = evt_test_priv(test);
190541c219e6SJerin Jacob 
190641c219e6SJerin Jacob 	rte_mempool_free(t->pool);
190741c219e6SJerin Jacob }
1908ffbae86fSJerin Jacob 
1909ffbae86fSJerin Jacob int
1910ffbae86fSJerin Jacob perf_test_setup(struct evt_test *test, struct evt_options *opt)
1911ffbae86fSJerin Jacob {
1912ffbae86fSJerin Jacob 	void *test_perf;
1913ffbae86fSJerin Jacob 
1914ffbae86fSJerin Jacob 	test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
1915ffbae86fSJerin Jacob 				RTE_CACHE_LINE_SIZE, opt->socket_id);
1916ffbae86fSJerin Jacob 	if (test_perf  == NULL) {
1917ffbae86fSJerin Jacob 		evt_err("failed to allocate test_perf memory");
1918ffbae86fSJerin Jacob 		goto nomem;
1919ffbae86fSJerin Jacob 	}
1920ffbae86fSJerin Jacob 	test->test_priv = test_perf;
1921ffbae86fSJerin Jacob 
1922ffbae86fSJerin Jacob 	struct test_perf *t = evt_test_priv(test);
1923ffbae86fSJerin Jacob 
1924d008f20bSPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1925d008f20bSPavan Nikhilesh 		t->outstand_pkts = opt->nb_timers *
1926d008f20bSPavan Nikhilesh 			evt_nr_active_lcores(opt->plcores);
1927d008f20bSPavan Nikhilesh 		t->nb_pkts = opt->nb_timers;
1928d008f20bSPavan Nikhilesh 	} else {
1929d008f20bSPavan Nikhilesh 		t->outstand_pkts = opt->nb_pkts *
1930d008f20bSPavan Nikhilesh 			evt_nr_active_lcores(opt->plcores);
1931d008f20bSPavan Nikhilesh 		t->nb_pkts = opt->nb_pkts;
1932d008f20bSPavan Nikhilesh 	}
1933d008f20bSPavan Nikhilesh 
1934ffbae86fSJerin Jacob 	t->nb_workers = evt_nr_active_lcores(opt->wlcores);
1935ffbae86fSJerin Jacob 	t->done = false;
1936ffbae86fSJerin Jacob 	t->nb_flows = opt->nb_flows;
1937ffbae86fSJerin Jacob 	t->result = EVT_TEST_FAILED;
1938ffbae86fSJerin Jacob 	t->opt = opt;
1939ffbae86fSJerin Jacob 	memcpy(t->sched_type_list, opt->sched_type_list,
1940ffbae86fSJerin Jacob 			sizeof(opt->sched_type_list));
1941ffbae86fSJerin Jacob 	return 0;
1942ffbae86fSJerin Jacob nomem:
1943ffbae86fSJerin Jacob 	return -ENOMEM;
1944ffbae86fSJerin Jacob }
1945ffbae86fSJerin Jacob 
1946ffbae86fSJerin Jacob void
1947ffbae86fSJerin Jacob perf_test_destroy(struct evt_test *test, struct evt_options *opt)
1948ffbae86fSJerin Jacob {
1949ffbae86fSJerin Jacob 	RTE_SET_USED(opt);
1950ffbae86fSJerin Jacob 
1951ffbae86fSJerin Jacob 	rte_free(test->test_priv);
1952ffbae86fSJerin Jacob }
1953