xref: /dpdk/app/test-eventdev/test_perf_common.c (revision bca734c27e345af500d0d951421584e2567cd107)
153a3b7e8SJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause
253a3b7e8SJerin Jacob  * Copyright(c) 2017 Cavium, Inc
3ffbae86fSJerin Jacob  */
4ffbae86fSJerin Jacob 
5626b12a8SPavan Nikhilesh #include <math.h>
6626b12a8SPavan Nikhilesh 
7ffbae86fSJerin Jacob #include "test_perf_common.h"
8ffbae86fSJerin Jacob 
98ffe7ee5SAakash Sasidharan #define NB_CRYPTODEV_DESCRIPTORS 4096
108f5b5495SAkhil Goyal #define DATA_SIZE		512
11750ab9d5SAakash Sasidharan #define IV_OFFSET (sizeof(struct rte_crypto_op) + \
12750ab9d5SAakash Sasidharan 		   sizeof(struct rte_crypto_sym_op) + \
13750ab9d5SAakash Sasidharan 		   sizeof(union rte_event_crypto_metadata))
14750ab9d5SAakash Sasidharan 
158f5b5495SAkhil Goyal struct modex_test_data {
168f5b5495SAkhil Goyal 	enum rte_crypto_asym_xform_type xform_type;
178f5b5495SAkhil Goyal 	struct {
188f5b5495SAkhil Goyal 		uint8_t data[DATA_SIZE];
198f5b5495SAkhil Goyal 		uint16_t len;
208f5b5495SAkhil Goyal 	} base;
218f5b5495SAkhil Goyal 	struct {
228f5b5495SAkhil Goyal 		uint8_t data[DATA_SIZE];
238f5b5495SAkhil Goyal 		uint16_t len;
248f5b5495SAkhil Goyal 	} exponent;
258f5b5495SAkhil Goyal 	struct {
268f5b5495SAkhil Goyal 		uint8_t data[DATA_SIZE];
278f5b5495SAkhil Goyal 		uint16_t len;
288f5b5495SAkhil Goyal 	} modulus;
298f5b5495SAkhil Goyal 	struct {
308f5b5495SAkhil Goyal 		uint8_t data[DATA_SIZE];
318f5b5495SAkhil Goyal 		uint16_t len;
328f5b5495SAkhil Goyal 	} reminder;
338f5b5495SAkhil Goyal 	uint16_t result_len;
348f5b5495SAkhil Goyal };
358f5b5495SAkhil Goyal 
368f5b5495SAkhil Goyal static struct
378f5b5495SAkhil Goyal modex_test_data modex_test_case = {
388f5b5495SAkhil Goyal 	.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
398f5b5495SAkhil Goyal 	.base = {
408f5b5495SAkhil Goyal 		.data = {
418f5b5495SAkhil Goyal 			0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85,
428f5b5495SAkhil Goyal 			0xAE, 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD,
438f5b5495SAkhil Goyal 			0xA8, 0xEB, 0x7E, 0x78, 0xA0, 0x50
448f5b5495SAkhil Goyal 		},
458f5b5495SAkhil Goyal 		.len = 20,
468f5b5495SAkhil Goyal 	},
478f5b5495SAkhil Goyal 	.exponent = {
488f5b5495SAkhil Goyal 		.data = {
498f5b5495SAkhil Goyal 			0x01, 0x00, 0x01
508f5b5495SAkhil Goyal 		},
518f5b5495SAkhil Goyal 		.len = 3,
528f5b5495SAkhil Goyal 	},
538f5b5495SAkhil Goyal 	.reminder = {
548f5b5495SAkhil Goyal 		.data = {
558f5b5495SAkhil Goyal 			0x2C, 0x60, 0x75, 0x45, 0x98, 0x9D, 0xE0, 0x72,
568f5b5495SAkhil Goyal 			0xA0, 0x9D, 0x3A, 0x9E, 0x03, 0x38, 0x73, 0x3C,
578f5b5495SAkhil Goyal 			0x31, 0x83, 0x04, 0xFE, 0x75, 0x43, 0xE6, 0x17,
588f5b5495SAkhil Goyal 			0x5C, 0x01, 0x29, 0x51, 0x69, 0x33, 0x62, 0x2D,
598f5b5495SAkhil Goyal 			0x78, 0xBE, 0xAE, 0xC4, 0xBC, 0xDE, 0x7E, 0x2C,
608f5b5495SAkhil Goyal 			0x77, 0x84, 0xF2, 0xC5, 0x14, 0xB5, 0x2F, 0xF7,
618f5b5495SAkhil Goyal 			0xC5, 0x94, 0xEF, 0x86, 0x75, 0x75, 0xB5, 0x11,
628f5b5495SAkhil Goyal 			0xE5, 0x0E, 0x0A, 0x29, 0x76, 0xE2, 0xEA, 0x32,
638f5b5495SAkhil Goyal 			0x0E, 0x43, 0x77, 0x7E, 0x2C, 0x27, 0xAC, 0x3B,
648f5b5495SAkhil Goyal 			0x86, 0xA5, 0xDB, 0xC9, 0x48, 0x40, 0xE8, 0x99,
658f5b5495SAkhil Goyal 			0x9A, 0x0A, 0x3D, 0xD6, 0x74, 0xFA, 0x2E, 0x2E,
668f5b5495SAkhil Goyal 			0x5B, 0xAF, 0x8C, 0x99, 0x44, 0x2A, 0x67, 0x38,
678f5b5495SAkhil Goyal 			0x27, 0x41, 0x59, 0x9D, 0xB8, 0x51, 0xC9, 0xF7,
688f5b5495SAkhil Goyal 			0x43, 0x61, 0x31, 0x6E, 0xF1, 0x25, 0x38, 0x7F,
698f5b5495SAkhil Goyal 			0xAE, 0xC6, 0xD0, 0xBB, 0x29, 0x76, 0x3F, 0x46,
708f5b5495SAkhil Goyal 			0x2E, 0x1B, 0xE4, 0x67, 0x71, 0xE3, 0x87, 0x5A
718f5b5495SAkhil Goyal 		},
728f5b5495SAkhil Goyal 		.len = 128,
738f5b5495SAkhil Goyal 	},
748f5b5495SAkhil Goyal 	.modulus = {
758f5b5495SAkhil Goyal 		.data = {
768f5b5495SAkhil Goyal 			0xb3, 0xa1, 0xaf, 0xb7, 0x13, 0x08, 0x00, 0x0a,
778f5b5495SAkhil Goyal 			0x35, 0xdc, 0x2b, 0x20, 0x8d, 0xa1, 0xb5, 0xce,
788f5b5495SAkhil Goyal 			0x47, 0x8a, 0xc3, 0x80, 0xf4, 0x7d, 0x4a, 0xa2,
798f5b5495SAkhil Goyal 			0x62, 0xfd, 0x61, 0x7f, 0xb5, 0xa8, 0xde, 0x0a,
808f5b5495SAkhil Goyal 			0x17, 0x97, 0xa0, 0xbf, 0xdf, 0x56, 0x5a, 0x3d,
818f5b5495SAkhil Goyal 			0x51, 0x56, 0x4f, 0x70, 0x70, 0x3f, 0x63, 0x6a,
828f5b5495SAkhil Goyal 			0x44, 0x5b, 0xad, 0x84, 0x0d, 0x3f, 0x27, 0x6e,
838f5b5495SAkhil Goyal 			0x3b, 0x34, 0x91, 0x60, 0x14, 0xb9, 0xaa, 0x72,
848f5b5495SAkhil Goyal 			0xfd, 0xa3, 0x64, 0xd2, 0x03, 0xa7, 0x53, 0x87,
858f5b5495SAkhil Goyal 			0x9e, 0x88, 0x0b, 0xc1, 0x14, 0x93, 0x1a, 0x62,
868f5b5495SAkhil Goyal 			0xff, 0xb1, 0x5d, 0x74, 0xcd, 0x59, 0x63, 0x18,
878f5b5495SAkhil Goyal 			0x11, 0x3d, 0x4f, 0xba, 0x75, 0xd4, 0x33, 0x4e,
888f5b5495SAkhil Goyal 			0x23, 0x6b, 0x7b, 0x57, 0x44, 0xe1, 0xd3, 0x03,
898f5b5495SAkhil Goyal 			0x13, 0xa6, 0xf0, 0x8b, 0x60, 0xb0, 0x9e, 0xee,
908f5b5495SAkhil Goyal 			0x75, 0x08, 0x9d, 0x71, 0x63, 0x13, 0xcb, 0xa6,
918f5b5495SAkhil Goyal 			0x81, 0x92, 0x14, 0x03, 0x22, 0x2d, 0xde, 0x55
928f5b5495SAkhil Goyal 		},
938f5b5495SAkhil Goyal 		.len = 128,
948f5b5495SAkhil Goyal 	},
958f5b5495SAkhil Goyal 	.result_len = 128,
968f5b5495SAkhil Goyal };
97de2bc16eSShijith Thotton 
9841c219e6SJerin Jacob int
9941c219e6SJerin Jacob perf_test_result(struct evt_test *test, struct evt_options *opt)
10041c219e6SJerin Jacob {
10141c219e6SJerin Jacob 	RTE_SET_USED(opt);
1026b1a14a8SPavan Nikhilesh 	int i;
1036b1a14a8SPavan Nikhilesh 	uint64_t total = 0;
10441c219e6SJerin Jacob 	struct test_perf *t = evt_test_priv(test);
10541c219e6SJerin Jacob 
1066b1a14a8SPavan Nikhilesh 	printf("Packet distribution across worker cores :\n");
1076b1a14a8SPavan Nikhilesh 	for (i = 0; i < t->nb_workers; i++)
1086b1a14a8SPavan Nikhilesh 		total += t->worker[i].processed_pkts;
1096b1a14a8SPavan Nikhilesh 	for (i = 0; i < t->nb_workers; i++)
1106b1a14a8SPavan Nikhilesh 		printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
111c0900d33SHarry van Haaren 				CLGRN" %3.2f"CLNRM"\n", i,
1126b1a14a8SPavan Nikhilesh 				t->worker[i].processed_pkts,
1136b1a14a8SPavan Nikhilesh 				(((double)t->worker[i].processed_pkts)/total)
1146b1a14a8SPavan Nikhilesh 				* 100);
1156b1a14a8SPavan Nikhilesh 
11641c219e6SJerin Jacob 	return t->result;
11741c219e6SJerin Jacob }
11841c219e6SJerin Jacob 
1199d3aeb18SJerin Jacob static inline int
1209d3aeb18SJerin Jacob perf_producer(void *arg)
1219d3aeb18SJerin Jacob {
1229a618803SPavan Nikhilesh 	int i;
1239d3aeb18SJerin Jacob 	struct prod_data *p  = arg;
1249d3aeb18SJerin Jacob 	struct test_perf *t = p->t;
1259d3aeb18SJerin Jacob 	struct evt_options *opt = t->opt;
1269d3aeb18SJerin Jacob 	const uint8_t dev_id = p->dev_id;
1279d3aeb18SJerin Jacob 	const uint8_t port = p->port_id;
1289d3aeb18SJerin Jacob 	struct rte_mempool *pool = t->pool;
1299d3aeb18SJerin Jacob 	const uint64_t nb_pkts = t->nb_pkts;
1309d3aeb18SJerin Jacob 	const uint32_t nb_flows = t->nb_flows;
1319d3aeb18SJerin Jacob 	uint32_t flow_counter = 0;
1329d3aeb18SJerin Jacob 	uint64_t count = 0;
1339a618803SPavan Nikhilesh 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
134f123568cSPavan Nikhilesh 	uint8_t enable_fwd_latency;
1359d3aeb18SJerin Jacob 	struct rte_event ev;
1369d3aeb18SJerin Jacob 
137f123568cSPavan Nikhilesh 	enable_fwd_latency = opt->fwd_latency;
1389d3aeb18SJerin Jacob 	if (opt->verbose_level > 1)
1399d3aeb18SJerin Jacob 		printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
1409d3aeb18SJerin Jacob 				rte_lcore_id(), dev_id, port, p->queue_id);
1419d3aeb18SJerin Jacob 
1429d3aeb18SJerin Jacob 	ev.event = 0;
1439d3aeb18SJerin Jacob 	ev.op = RTE_EVENT_OP_NEW;
1449d3aeb18SJerin Jacob 	ev.queue_id = p->queue_id;
1459d3aeb18SJerin Jacob 	ev.sched_type = t->opt->sched_type_list[0];
1469d3aeb18SJerin Jacob 	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
1479d3aeb18SJerin Jacob 	ev.event_type =  RTE_EVENT_TYPE_CPU;
1489d3aeb18SJerin Jacob 	ev.sub_event_type = 0; /* stage 0 */
1499d3aeb18SJerin Jacob 
1509d3aeb18SJerin Jacob 	while (count < nb_pkts && t->done == false) {
1519a618803SPavan Nikhilesh 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
1529d3aeb18SJerin Jacob 			continue;
1539a618803SPavan Nikhilesh 		for (i = 0; i < BURST_SIZE; i++) {
1549d3aeb18SJerin Jacob 			ev.flow_id = flow_counter++ % nb_flows;
1559a618803SPavan Nikhilesh 			ev.event_ptr = m[i];
156f123568cSPavan Nikhilesh 			if (enable_fwd_latency)
1579a618803SPavan Nikhilesh 				m[i]->timestamp = rte_get_timer_cycles();
158f123568cSPavan Nikhilesh 			while (rte_event_enqueue_new_burst(dev_id, port, &ev,
159f123568cSPavan Nikhilesh 							   1) != 1) {
1609d3aeb18SJerin Jacob 				if (t->done)
1619d3aeb18SJerin Jacob 					break;
1629d3aeb18SJerin Jacob 				rte_pause();
163f123568cSPavan Nikhilesh 				if (enable_fwd_latency)
164f123568cSPavan Nikhilesh 					m[i]->timestamp =
165f123568cSPavan Nikhilesh 						rte_get_timer_cycles();
1669d3aeb18SJerin Jacob 			}
1679a618803SPavan Nikhilesh 		}
1689a618803SPavan Nikhilesh 		count += BURST_SIZE;
1699d3aeb18SJerin Jacob 	}
1709d3aeb18SJerin Jacob 
1719d3aeb18SJerin Jacob 	return 0;
1729d3aeb18SJerin Jacob }
1739d3aeb18SJerin Jacob 
174d008f20bSPavan Nikhilesh static inline int
17520841a25SRashmi Shetty perf_producer_burst(void *arg)
17620841a25SRashmi Shetty {
17720841a25SRashmi Shetty 	uint32_t i;
17820841a25SRashmi Shetty 	uint64_t timestamp;
17920841a25SRashmi Shetty 	struct prod_data *p  = arg;
18020841a25SRashmi Shetty 	struct test_perf *t = p->t;
18120841a25SRashmi Shetty 	struct evt_options *opt = t->opt;
18220841a25SRashmi Shetty 	const uint8_t dev_id = p->dev_id;
18320841a25SRashmi Shetty 	const uint8_t port = p->port_id;
18420841a25SRashmi Shetty 	struct rte_mempool *pool = t->pool;
18520841a25SRashmi Shetty 	const uint64_t nb_pkts = t->nb_pkts;
18620841a25SRashmi Shetty 	const uint32_t nb_flows = t->nb_flows;
18720841a25SRashmi Shetty 	uint32_t flow_counter = 0;
18820841a25SRashmi Shetty 	uint16_t enq = 0;
18920841a25SRashmi Shetty 	uint64_t count = 0;
190f123568cSPavan Nikhilesh 	struct perf_elt *m[opt->prod_enq_burst_sz + 1];
191f123568cSPavan Nikhilesh 	struct rte_event ev[opt->prod_enq_burst_sz + 1];
19220841a25SRashmi Shetty 	uint32_t burst_size = opt->prod_enq_burst_sz;
193f123568cSPavan Nikhilesh 	uint8_t enable_fwd_latency;
19420841a25SRashmi Shetty 
195f123568cSPavan Nikhilesh 	enable_fwd_latency = opt->fwd_latency;
196f123568cSPavan Nikhilesh 	memset(m, 0, sizeof(*m) * (opt->prod_enq_burst_sz + 1));
19720841a25SRashmi Shetty 	if (opt->verbose_level > 1)
19820841a25SRashmi Shetty 		printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
19920841a25SRashmi Shetty 				rte_lcore_id(), dev_id, port, p->queue_id);
20020841a25SRashmi Shetty 
20120841a25SRashmi Shetty 	for (i = 0; i < burst_size; i++) {
20220841a25SRashmi Shetty 		ev[i].op = RTE_EVENT_OP_NEW;
20320841a25SRashmi Shetty 		ev[i].queue_id = p->queue_id;
20420841a25SRashmi Shetty 		ev[i].sched_type = t->opt->sched_type_list[0];
20520841a25SRashmi Shetty 		ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
20620841a25SRashmi Shetty 		ev[i].event_type =  RTE_EVENT_TYPE_CPU;
20720841a25SRashmi Shetty 		ev[i].sub_event_type = 0; /* stage 0 */
20820841a25SRashmi Shetty 	}
20920841a25SRashmi Shetty 
21020841a25SRashmi Shetty 	while (count < nb_pkts && t->done == false) {
21120841a25SRashmi Shetty 		if (rte_mempool_get_bulk(pool, (void **)m, burst_size) < 0)
21220841a25SRashmi Shetty 			continue;
21320841a25SRashmi Shetty 		timestamp = rte_get_timer_cycles();
21420841a25SRashmi Shetty 		for (i = 0; i < burst_size; i++) {
21520841a25SRashmi Shetty 			ev[i].flow_id = flow_counter++ % nb_flows;
21620841a25SRashmi Shetty 			ev[i].event_ptr = m[i];
217f123568cSPavan Nikhilesh 			if (enable_fwd_latency)
21820841a25SRashmi Shetty 				m[i]->timestamp = timestamp;
21920841a25SRashmi Shetty 		}
220f123568cSPavan Nikhilesh 		enq = rte_event_enqueue_new_burst(dev_id, port, ev, burst_size);
22120841a25SRashmi Shetty 		while (enq < burst_size) {
222f123568cSPavan Nikhilesh 			enq += rte_event_enqueue_new_burst(
223f123568cSPavan Nikhilesh 				dev_id, port, ev + enq, burst_size - enq);
22420841a25SRashmi Shetty 			if (t->done)
22520841a25SRashmi Shetty 				break;
22620841a25SRashmi Shetty 			rte_pause();
227f123568cSPavan Nikhilesh 			if (enable_fwd_latency) {
22820841a25SRashmi Shetty 				timestamp = rte_get_timer_cycles();
22920841a25SRashmi Shetty 				for (i = enq; i < burst_size; i++)
23020841a25SRashmi Shetty 					m[i]->timestamp = timestamp;
23120841a25SRashmi Shetty 			}
232f123568cSPavan Nikhilesh 		}
23320841a25SRashmi Shetty 		count += burst_size;
23420841a25SRashmi Shetty 	}
23520841a25SRashmi Shetty 	return 0;
23620841a25SRashmi Shetty }
23720841a25SRashmi Shetty 
23820841a25SRashmi Shetty static inline int
239d008f20bSPavan Nikhilesh perf_event_timer_producer(void *arg)
240d008f20bSPavan Nikhilesh {
2419a618803SPavan Nikhilesh 	int i;
242d008f20bSPavan Nikhilesh 	struct prod_data *p  = arg;
243d008f20bSPavan Nikhilesh 	struct test_perf *t = p->t;
244d008f20bSPavan Nikhilesh 	struct evt_options *opt = t->opt;
245d008f20bSPavan Nikhilesh 	uint32_t flow_counter = 0;
246d008f20bSPavan Nikhilesh 	uint64_t count = 0;
247d008f20bSPavan Nikhilesh 	uint64_t arm_latency = 0;
248d008f20bSPavan Nikhilesh 	const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
249d008f20bSPavan Nikhilesh 	const uint32_t nb_flows = t->nb_flows;
250d008f20bSPavan Nikhilesh 	const uint64_t nb_timers = opt->nb_timers;
251d008f20bSPavan Nikhilesh 	struct rte_mempool *pool = t->pool;
2529a618803SPavan Nikhilesh 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
253d008f20bSPavan Nikhilesh 	struct rte_event_timer_adapter **adptr = t->timer_adptr;
25452553263SPavan Nikhilesh 	struct rte_event_timer tim;
255d008f20bSPavan Nikhilesh 	uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
256d008f20bSPavan Nikhilesh 
25752553263SPavan Nikhilesh 	memset(&tim, 0, sizeof(struct rte_event_timer));
258626b12a8SPavan Nikhilesh 	timeout_ticks =
259626b12a8SPavan Nikhilesh 		opt->optm_timer_tick_nsec
260626b12a8SPavan Nikhilesh 			? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
261626b12a8SPavan Nikhilesh 			       opt->optm_timer_tick_nsec)
262626b12a8SPavan Nikhilesh 			: timeout_ticks;
263d008f20bSPavan Nikhilesh 	timeout_ticks += timeout_ticks ? 0 : 1;
26452553263SPavan Nikhilesh 	tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
26552553263SPavan Nikhilesh 	tim.ev.op = RTE_EVENT_OP_NEW;
26652553263SPavan Nikhilesh 	tim.ev.sched_type = t->opt->sched_type_list[0];
26752553263SPavan Nikhilesh 	tim.ev.queue_id = p->queue_id;
26852553263SPavan Nikhilesh 	tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
26952553263SPavan Nikhilesh 	tim.state = RTE_EVENT_TIMER_NOT_ARMED;
27052553263SPavan Nikhilesh 	tim.timeout_ticks = timeout_ticks;
271d008f20bSPavan Nikhilesh 
272d008f20bSPavan Nikhilesh 	if (opt->verbose_level > 1)
273d008f20bSPavan Nikhilesh 		printf("%s(): lcore %d\n", __func__, rte_lcore_id());
274d008f20bSPavan Nikhilesh 
275d008f20bSPavan Nikhilesh 	while (count < nb_timers && t->done == false) {
2769a618803SPavan Nikhilesh 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
277d008f20bSPavan Nikhilesh 			continue;
2789a618803SPavan Nikhilesh 		for (i = 0; i < BURST_SIZE; i++) {
2799a618803SPavan Nikhilesh 			rte_prefetch0(m[i + 1]);
2809a618803SPavan Nikhilesh 			m[i]->tim = tim;
2819a618803SPavan Nikhilesh 			m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
2829a618803SPavan Nikhilesh 			m[i]->tim.ev.event_ptr = m[i];
2839a618803SPavan Nikhilesh 			m[i]->timestamp = rte_get_timer_cycles();
284d008f20bSPavan Nikhilesh 			while (rte_event_timer_arm_burst(
285d008f20bSPavan Nikhilesh 			       adptr[flow_counter % nb_timer_adptrs],
2869a618803SPavan Nikhilesh 			       (struct rte_event_timer **)&m[i], 1) != 1) {
287d008f20bSPavan Nikhilesh 				if (t->done)
288d008f20bSPavan Nikhilesh 					break;
2899a618803SPavan Nikhilesh 				m[i]->timestamp = rte_get_timer_cycles();
290d008f20bSPavan Nikhilesh 			}
2919a618803SPavan Nikhilesh 			arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
2929a618803SPavan Nikhilesh 		}
2939a618803SPavan Nikhilesh 		count += BURST_SIZE;
294d008f20bSPavan Nikhilesh 	}
295d008f20bSPavan Nikhilesh 	fflush(stdout);
296d008f20bSPavan Nikhilesh 	rte_delay_ms(1000);
297d008f20bSPavan Nikhilesh 	printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
29893b7794bSPavan Nikhilesh 			__func__, rte_lcore_id(),
29993b7794bSPavan Nikhilesh 			count ? (float)(arm_latency / count) /
30093b7794bSPavan Nikhilesh 			(rte_get_timer_hz() / 1000000) : 0);
301d008f20bSPavan Nikhilesh 	return 0;
302d008f20bSPavan Nikhilesh }
303d008f20bSPavan Nikhilesh 
30417b22d0bSPavan Nikhilesh static inline int
30517b22d0bSPavan Nikhilesh perf_event_timer_producer_burst(void *arg)
30617b22d0bSPavan Nikhilesh {
30717b22d0bSPavan Nikhilesh 	int i;
30817b22d0bSPavan Nikhilesh 	struct prod_data *p  = arg;
30917b22d0bSPavan Nikhilesh 	struct test_perf *t = p->t;
31017b22d0bSPavan Nikhilesh 	struct evt_options *opt = t->opt;
31117b22d0bSPavan Nikhilesh 	uint32_t flow_counter = 0;
31217b22d0bSPavan Nikhilesh 	uint64_t count = 0;
31317b22d0bSPavan Nikhilesh 	uint64_t arm_latency = 0;
31417b22d0bSPavan Nikhilesh 	const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
31517b22d0bSPavan Nikhilesh 	const uint32_t nb_flows = t->nb_flows;
31617b22d0bSPavan Nikhilesh 	const uint64_t nb_timers = opt->nb_timers;
31717b22d0bSPavan Nikhilesh 	struct rte_mempool *pool = t->pool;
31817b22d0bSPavan Nikhilesh 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
31917b22d0bSPavan Nikhilesh 	struct rte_event_timer_adapter **adptr = t->timer_adptr;
32052553263SPavan Nikhilesh 	struct rte_event_timer tim;
32117b22d0bSPavan Nikhilesh 	uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
32217b22d0bSPavan Nikhilesh 
32352553263SPavan Nikhilesh 	memset(&tim, 0, sizeof(struct rte_event_timer));
324626b12a8SPavan Nikhilesh 	timeout_ticks =
325626b12a8SPavan Nikhilesh 		opt->optm_timer_tick_nsec
326626b12a8SPavan Nikhilesh 			? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
327626b12a8SPavan Nikhilesh 			       opt->optm_timer_tick_nsec)
328626b12a8SPavan Nikhilesh 			: timeout_ticks;
32917b22d0bSPavan Nikhilesh 	timeout_ticks += timeout_ticks ? 0 : 1;
33052553263SPavan Nikhilesh 	tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
33152553263SPavan Nikhilesh 	tim.ev.op = RTE_EVENT_OP_NEW;
33252553263SPavan Nikhilesh 	tim.ev.sched_type = t->opt->sched_type_list[0];
33352553263SPavan Nikhilesh 	tim.ev.queue_id = p->queue_id;
33452553263SPavan Nikhilesh 	tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
33552553263SPavan Nikhilesh 	tim.state = RTE_EVENT_TIMER_NOT_ARMED;
33652553263SPavan Nikhilesh 	tim.timeout_ticks = timeout_ticks;
33717b22d0bSPavan Nikhilesh 
33817b22d0bSPavan Nikhilesh 	if (opt->verbose_level > 1)
33917b22d0bSPavan Nikhilesh 		printf("%s(): lcore %d\n", __func__, rte_lcore_id());
34017b22d0bSPavan Nikhilesh 
34117b22d0bSPavan Nikhilesh 	while (count < nb_timers && t->done == false) {
34217b22d0bSPavan Nikhilesh 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
34317b22d0bSPavan Nikhilesh 			continue;
34417b22d0bSPavan Nikhilesh 		for (i = 0; i < BURST_SIZE; i++) {
34517b22d0bSPavan Nikhilesh 			rte_prefetch0(m[i + 1]);
34617b22d0bSPavan Nikhilesh 			m[i]->tim = tim;
34717b22d0bSPavan Nikhilesh 			m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
34817b22d0bSPavan Nikhilesh 			m[i]->tim.ev.event_ptr = m[i];
34917b22d0bSPavan Nikhilesh 			m[i]->timestamp = rte_get_timer_cycles();
35017b22d0bSPavan Nikhilesh 		}
35117b22d0bSPavan Nikhilesh 		rte_event_timer_arm_tmo_tick_burst(
35217b22d0bSPavan Nikhilesh 				adptr[flow_counter % nb_timer_adptrs],
35317b22d0bSPavan Nikhilesh 				(struct rte_event_timer **)m,
35417b22d0bSPavan Nikhilesh 				tim.timeout_ticks,
35517b22d0bSPavan Nikhilesh 				BURST_SIZE);
35617b22d0bSPavan Nikhilesh 		arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
35717b22d0bSPavan Nikhilesh 		count += BURST_SIZE;
35817b22d0bSPavan Nikhilesh 	}
35917b22d0bSPavan Nikhilesh 	fflush(stdout);
36017b22d0bSPavan Nikhilesh 	rte_delay_ms(1000);
36117b22d0bSPavan Nikhilesh 	printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
36293b7794bSPavan Nikhilesh 			__func__, rte_lcore_id(),
36393b7794bSPavan Nikhilesh 			count ? (float)(arm_latency / count) /
36493b7794bSPavan Nikhilesh 			(rte_get_timer_hz() / 1000000) : 0);
36517b22d0bSPavan Nikhilesh 	return 0;
36617b22d0bSPavan Nikhilesh }
36717b22d0bSPavan Nikhilesh 
368de2bc16eSShijith Thotton static inline void
369de2bc16eSShijith Thotton crypto_adapter_enq_op_new(struct prod_data *p)
370de2bc16eSShijith Thotton {
371de2bc16eSShijith Thotton 	struct test_perf *t = p->t;
372de2bc16eSShijith Thotton 	const uint32_t nb_flows = t->nb_flows;
373de2bc16eSShijith Thotton 	const uint64_t nb_pkts = t->nb_pkts;
374de2bc16eSShijith Thotton 	struct rte_mempool *pool = t->pool;
375750ab9d5SAakash Sasidharan 	uint16_t data_length, data_offset;
376de2bc16eSShijith Thotton 	struct evt_options *opt = t->opt;
377de2bc16eSShijith Thotton 	uint16_t qp_id = p->ca.cdev_qp_id;
378de2bc16eSShijith Thotton 	uint8_t cdev_id = p->ca.cdev_id;
3793158ec9fSVolodymyr Fialko 	uint64_t alloc_failures = 0;
380de2bc16eSShijith Thotton 	uint32_t flow_counter = 0;
381de2bc16eSShijith Thotton 	struct rte_crypto_op *op;
3826776a581SVolodymyr Fialko 	uint16_t len, offset;
383de2bc16eSShijith Thotton 	struct rte_mbuf *m;
384de2bc16eSShijith Thotton 	uint64_t count = 0;
385de2bc16eSShijith Thotton 
386de2bc16eSShijith Thotton 	if (opt->verbose_level > 1)
387de2bc16eSShijith Thotton 		printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
388de2bc16eSShijith Thotton 		       __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
389de2bc16eSShijith Thotton 		       p->ca.cdev_qp_id);
390de2bc16eSShijith Thotton 
3916776a581SVolodymyr Fialko 	offset = sizeof(struct perf_elt);
3926776a581SVolodymyr Fialko 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
393de2bc16eSShijith Thotton 
394750ab9d5SAakash Sasidharan 	if (opt->crypto_cipher_bit_mode) {
395750ab9d5SAakash Sasidharan 		data_offset = offset << 3;
396750ab9d5SAakash Sasidharan 		data_length = (len - offset) << 3;
397750ab9d5SAakash Sasidharan 	} else {
398750ab9d5SAakash Sasidharan 		data_offset = offset;
399750ab9d5SAakash Sasidharan 		data_length = len - offset;
400750ab9d5SAakash Sasidharan 	}
401750ab9d5SAakash Sasidharan 
402de2bc16eSShijith Thotton 	while (count < nb_pkts && t->done == false) {
4038f5b5495SAkhil Goyal 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
4048f5b5495SAkhil Goyal 			struct rte_crypto_sym_op *sym_op;
4058f5b5495SAkhil Goyal 
4068f5b5495SAkhil Goyal 			op = rte_crypto_op_alloc(t->ca_op_pool,
4078f5b5495SAkhil Goyal 					 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
4083158ec9fSVolodymyr Fialko 			if (unlikely(op == NULL)) {
4093158ec9fSVolodymyr Fialko 				alloc_failures++;
410de2bc16eSShijith Thotton 				continue;
4113158ec9fSVolodymyr Fialko 			}
4123158ec9fSVolodymyr Fialko 
4133158ec9fSVolodymyr Fialko 			m = rte_pktmbuf_alloc(pool);
4143158ec9fSVolodymyr Fialko 			if (unlikely(m == NULL)) {
4153158ec9fSVolodymyr Fialko 				alloc_failures++;
4163158ec9fSVolodymyr Fialko 				rte_crypto_op_free(op);
4173158ec9fSVolodymyr Fialko 				continue;
4183158ec9fSVolodymyr Fialko 			}
419de2bc16eSShijith Thotton 
420de2bc16eSShijith Thotton 			rte_pktmbuf_append(m, len);
421de2bc16eSShijith Thotton 			sym_op = op->sym;
422de2bc16eSShijith Thotton 			sym_op->m_src = m;
423750ab9d5SAakash Sasidharan 
424750ab9d5SAakash Sasidharan 			sym_op->cipher.data.offset = data_offset;
425750ab9d5SAakash Sasidharan 			sym_op->cipher.data.length = data_length;
426750ab9d5SAakash Sasidharan 
427de2bc16eSShijith Thotton 			rte_crypto_op_attach_sym_session(
4288f5b5495SAkhil Goyal 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
4298f5b5495SAkhil Goyal 		} else {
4308f5b5495SAkhil Goyal 			struct rte_crypto_asym_op *asym_op;
4316776a581SVolodymyr Fialko 			uint8_t *result;
4326776a581SVolodymyr Fialko 
4336776a581SVolodymyr Fialko 			if (rte_mempool_get(pool, (void **)&result)) {
4346776a581SVolodymyr Fialko 				alloc_failures++;
4356776a581SVolodymyr Fialko 				continue;
4366776a581SVolodymyr Fialko 			}
437de2bc16eSShijith Thotton 
4388f5b5495SAkhil Goyal 			op = rte_crypto_op_alloc(t->ca_op_pool,
4398f5b5495SAkhil Goyal 					 RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
4403158ec9fSVolodymyr Fialko 			if (unlikely(op == NULL)) {
4413158ec9fSVolodymyr Fialko 				alloc_failures++;
4426776a581SVolodymyr Fialko 				rte_mempool_put(pool, result);
4433158ec9fSVolodymyr Fialko 				continue;
4443158ec9fSVolodymyr Fialko 			}
4453158ec9fSVolodymyr Fialko 
4468f5b5495SAkhil Goyal 			asym_op = op->asym;
4478f5b5495SAkhil Goyal 			asym_op->modex.base.data = modex_test_case.base.data;
4488f5b5495SAkhil Goyal 			asym_op->modex.base.length = modex_test_case.base.len;
4498f5b5495SAkhil Goyal 			asym_op->modex.result.data = result;
4508f5b5495SAkhil Goyal 			asym_op->modex.result.length = modex_test_case.result_len;
4518f5b5495SAkhil Goyal 			rte_crypto_op_attach_asym_session(
4528f5b5495SAkhil Goyal 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
4538f5b5495SAkhil Goyal 		}
454de2bc16eSShijith Thotton 		while (rte_cryptodev_enqueue_burst(cdev_id, qp_id, &op, 1) != 1 &&
455de2bc16eSShijith Thotton 				t->done == false)
456de2bc16eSShijith Thotton 			rte_pause();
457de2bc16eSShijith Thotton 
458de2bc16eSShijith Thotton 		count++;
459de2bc16eSShijith Thotton 	}
4603158ec9fSVolodymyr Fialko 
4613158ec9fSVolodymyr Fialko 	if (opt->verbose_level > 1 && alloc_failures)
4623158ec9fSVolodymyr Fialko 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
4633158ec9fSVolodymyr Fialko 		       __func__, rte_lcore_id(), alloc_failures);
464de2bc16eSShijith Thotton }
465de2bc16eSShijith Thotton 
466de2bc16eSShijith Thotton static inline void
467de2bc16eSShijith Thotton crypto_adapter_enq_op_fwd(struct prod_data *p)
468de2bc16eSShijith Thotton {
469de2bc16eSShijith Thotton 	const uint8_t dev_id = p->dev_id;
470de2bc16eSShijith Thotton 	const uint8_t port = p->port_id;
471de2bc16eSShijith Thotton 	struct test_perf *t = p->t;
472de2bc16eSShijith Thotton 	const uint32_t nb_flows = t->nb_flows;
473de2bc16eSShijith Thotton 	const uint64_t nb_pkts = t->nb_pkts;
474de2bc16eSShijith Thotton 	struct rte_mempool *pool = t->pool;
475de2bc16eSShijith Thotton 	struct evt_options *opt = t->opt;
4763158ec9fSVolodymyr Fialko 	uint64_t alloc_failures = 0;
477de2bc16eSShijith Thotton 	uint32_t flow_counter = 0;
478de2bc16eSShijith Thotton 	struct rte_crypto_op *op;
4796776a581SVolodymyr Fialko 	uint16_t len, offset;
480de2bc16eSShijith Thotton 	struct rte_event ev;
481de2bc16eSShijith Thotton 	struct rte_mbuf *m;
482de2bc16eSShijith Thotton 	uint64_t count = 0;
483de2bc16eSShijith Thotton 
484de2bc16eSShijith Thotton 	if (opt->verbose_level > 1)
485de2bc16eSShijith Thotton 		printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
486de2bc16eSShijith Thotton 		       __func__, rte_lcore_id(), port, p->queue_id,
487de2bc16eSShijith Thotton 		       p->ca.cdev_id, p->ca.cdev_qp_id);
488de2bc16eSShijith Thotton 
489de2bc16eSShijith Thotton 	ev.event = 0;
490de2bc16eSShijith Thotton 	ev.op = RTE_EVENT_OP_NEW;
491de2bc16eSShijith Thotton 	ev.queue_id = p->queue_id;
492de2bc16eSShijith Thotton 	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
493de2bc16eSShijith Thotton 	ev.event_type = RTE_EVENT_TYPE_CPU;
4946776a581SVolodymyr Fialko 
4956776a581SVolodymyr Fialko 	offset = sizeof(struct perf_elt);
4966776a581SVolodymyr Fialko 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
497de2bc16eSShijith Thotton 
498de2bc16eSShijith Thotton 	while (count < nb_pkts && t->done == false) {
4998f5b5495SAkhil Goyal 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
5008f5b5495SAkhil Goyal 			struct rte_crypto_sym_op *sym_op;
5018f5b5495SAkhil Goyal 
5028f5b5495SAkhil Goyal 			op = rte_crypto_op_alloc(t->ca_op_pool,
5038f5b5495SAkhil Goyal 					 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
5043158ec9fSVolodymyr Fialko 			if (unlikely(op == NULL)) {
5053158ec9fSVolodymyr Fialko 				alloc_failures++;
506de2bc16eSShijith Thotton 				continue;
5073158ec9fSVolodymyr Fialko 			}
5083158ec9fSVolodymyr Fialko 
5093158ec9fSVolodymyr Fialko 			m = rte_pktmbuf_alloc(pool);
5103158ec9fSVolodymyr Fialko 			if (unlikely(m == NULL)) {
5113158ec9fSVolodymyr Fialko 				alloc_failures++;
5123158ec9fSVolodymyr Fialko 				rte_crypto_op_free(op);
5133158ec9fSVolodymyr Fialko 				continue;
5143158ec9fSVolodymyr Fialko 			}
515de2bc16eSShijith Thotton 
516de2bc16eSShijith Thotton 			rte_pktmbuf_append(m, len);
517de2bc16eSShijith Thotton 			sym_op = op->sym;
518de2bc16eSShijith Thotton 			sym_op->m_src = m;
5196776a581SVolodymyr Fialko 			sym_op->cipher.data.offset = offset;
5206776a581SVolodymyr Fialko 			sym_op->cipher.data.length = len - offset;
521de2bc16eSShijith Thotton 			rte_crypto_op_attach_sym_session(
5228f5b5495SAkhil Goyal 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
5238f5b5495SAkhil Goyal 		} else {
5248f5b5495SAkhil Goyal 			struct rte_crypto_asym_op *asym_op;
5256776a581SVolodymyr Fialko 			uint8_t *result;
5266776a581SVolodymyr Fialko 
5276776a581SVolodymyr Fialko 			if (rte_mempool_get(pool, (void **)&result)) {
5286776a581SVolodymyr Fialko 				alloc_failures++;
5296776a581SVolodymyr Fialko 				continue;
5306776a581SVolodymyr Fialko 			}
5318f5b5495SAkhil Goyal 
5328f5b5495SAkhil Goyal 			op = rte_crypto_op_alloc(t->ca_op_pool,
5338f5b5495SAkhil Goyal 					 RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
5343158ec9fSVolodymyr Fialko 			if (unlikely(op == NULL)) {
5353158ec9fSVolodymyr Fialko 				alloc_failures++;
5366776a581SVolodymyr Fialko 				rte_mempool_put(pool, result);
5373158ec9fSVolodymyr Fialko 				continue;
5383158ec9fSVolodymyr Fialko 			}
5393158ec9fSVolodymyr Fialko 
5408f5b5495SAkhil Goyal 			asym_op = op->asym;
5418f5b5495SAkhil Goyal 			asym_op->modex.base.data = modex_test_case.base.data;
5428f5b5495SAkhil Goyal 			asym_op->modex.base.length = modex_test_case.base.len;
5438f5b5495SAkhil Goyal 			asym_op->modex.result.data = result;
5448f5b5495SAkhil Goyal 			asym_op->modex.result.length = modex_test_case.result_len;
5458f5b5495SAkhil Goyal 			rte_crypto_op_attach_asym_session(
5468f5b5495SAkhil Goyal 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
5478f5b5495SAkhil Goyal 		}
548de2bc16eSShijith Thotton 		ev.event_ptr = op;
549de2bc16eSShijith Thotton 
550de2bc16eSShijith Thotton 		while (rte_event_crypto_adapter_enqueue(dev_id, port, &ev, 1) != 1 &&
551de2bc16eSShijith Thotton 		       t->done == false)
552de2bc16eSShijith Thotton 			rte_pause();
553de2bc16eSShijith Thotton 
554de2bc16eSShijith Thotton 		count++;
555de2bc16eSShijith Thotton 	}
5563158ec9fSVolodymyr Fialko 
5573158ec9fSVolodymyr Fialko 	if (opt->verbose_level > 1 && alloc_failures)
5583158ec9fSVolodymyr Fialko 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
5593158ec9fSVolodymyr Fialko 		       __func__, rte_lcore_id(), alloc_failures);
560de2bc16eSShijith Thotton }
561de2bc16eSShijith Thotton 
562b25a66c4SAmit Prakash Shukla static inline void
563b25a66c4SAmit Prakash Shukla dma_adapter_enq_op_fwd(struct prod_data *p)
564b25a66c4SAmit Prakash Shukla {
565*bca734c2SPavan Nikhilesh 	struct rte_event_dma_adapter_op *ops[BURST_SIZE] = {NULL};
566b25a66c4SAmit Prakash Shukla 	struct test_perf *t = p->t;
567b25a66c4SAmit Prakash Shukla 	const uint32_t nb_flows = t->nb_flows;
568b25a66c4SAmit Prakash Shukla 	const uint64_t nb_pkts = t->nb_pkts;
569*bca734c2SPavan Nikhilesh 	struct rte_event_dma_adapter_op op;
570*bca734c2SPavan Nikhilesh 	struct rte_event evts[BURST_SIZE];
571b25a66c4SAmit Prakash Shukla 	const uint8_t dev_id = p->dev_id;
572b25a66c4SAmit Prakash Shukla 	struct evt_options *opt = t->opt;
573b25a66c4SAmit Prakash Shukla 	const uint8_t port = p->port_id;
574b25a66c4SAmit Prakash Shukla 	uint32_t flow_counter = 0;
575*bca734c2SPavan Nikhilesh 	struct rte_mempool *pool;
576b25a66c4SAmit Prakash Shukla 	struct rte_event ev;
577*bca734c2SPavan Nikhilesh 	uint8_t *src, *dst;
578b25a66c4SAmit Prakash Shukla 	uint64_t count = 0;
579*bca734c2SPavan Nikhilesh 	uint32_t flow;
580*bca734c2SPavan Nikhilesh 	int i;
581b25a66c4SAmit Prakash Shukla 
582*bca734c2SPavan Nikhilesh 	pool = t->pool;
583b25a66c4SAmit Prakash Shukla 	if (opt->verbose_level > 1)
584b25a66c4SAmit Prakash Shukla 		printf("%s(): lcore %d port %d queue %d dma_dev_id %u dma_dev_vchan_id %u\n",
585b25a66c4SAmit Prakash Shukla 		       __func__, rte_lcore_id(), port, p->queue_id,
586b25a66c4SAmit Prakash Shukla 		       p->da.dma_dev_id, p->da.vchan_id);
587b25a66c4SAmit Prakash Shukla 
588*bca734c2SPavan Nikhilesh 	src = rte_zmalloc(NULL, nb_flows * RTE_CACHE_LINE_SIZE, RTE_CACHE_LINE_SIZE);
589*bca734c2SPavan Nikhilesh 	dst = rte_zmalloc(NULL, nb_flows * RTE_CACHE_LINE_SIZE, RTE_CACHE_LINE_SIZE);
590*bca734c2SPavan Nikhilesh 	if (!src || !dst) {
591*bca734c2SPavan Nikhilesh 		rte_free(src);
592*bca734c2SPavan Nikhilesh 		rte_free(dst);
593*bca734c2SPavan Nikhilesh 		evt_err("Failed to alloc memory for src/dst");
594*bca734c2SPavan Nikhilesh 		return;
595*bca734c2SPavan Nikhilesh 	}
596*bca734c2SPavan Nikhilesh 
597b25a66c4SAmit Prakash Shukla 	ev.event = 0;
598b25a66c4SAmit Prakash Shukla 	ev.op = RTE_EVENT_OP_NEW;
599b25a66c4SAmit Prakash Shukla 	ev.queue_id = p->queue_id;
600b25a66c4SAmit Prakash Shukla 	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
601b25a66c4SAmit Prakash Shukla 	ev.event_type = RTE_EVENT_TYPE_CPU;
602b25a66c4SAmit Prakash Shukla 
603*bca734c2SPavan Nikhilesh 	op.dma_dev_id = p->da.dma_dev_id;
604*bca734c2SPavan Nikhilesh 	op.vchan = p->da.vchan_id;
605*bca734c2SPavan Nikhilesh 	op.op_mp = pool;
606*bca734c2SPavan Nikhilesh 	op.flags = RTE_DMA_OP_FLAG_SUBMIT;
607*bca734c2SPavan Nikhilesh 	op.nb_src = 1;
608*bca734c2SPavan Nikhilesh 	op.nb_dst = 1;
609*bca734c2SPavan Nikhilesh 
610b25a66c4SAmit Prakash Shukla 	while (count < nb_pkts && t->done == false) {
611*bca734c2SPavan Nikhilesh 		if (rte_mempool_get_bulk(pool, (void **)ops, BURST_SIZE) < 0)
612*bca734c2SPavan Nikhilesh 			continue;
613*bca734c2SPavan Nikhilesh 		for (i = 0; i < BURST_SIZE; i++) {
614*bca734c2SPavan Nikhilesh 			flow = flow_counter++ % nb_flows;
615*bca734c2SPavan Nikhilesh 			*ops[i] = op;
616*bca734c2SPavan Nikhilesh 			ops[i]->src_dst_seg[0].addr = (rte_iova_t)&src[flow * RTE_CACHE_LINE_SIZE];
617*bca734c2SPavan Nikhilesh 			ops[i]->src_dst_seg[1].addr = (rte_iova_t)&dst[flow * RTE_CACHE_LINE_SIZE];
618*bca734c2SPavan Nikhilesh 			ops[i]->src_dst_seg[0].length = RTE_CACHE_LINE_SIZE;
619*bca734c2SPavan Nikhilesh 			ops[i]->src_dst_seg[1].length = RTE_CACHE_LINE_SIZE;
620b25a66c4SAmit Prakash Shukla 
621*bca734c2SPavan Nikhilesh 			evts[i].event = ev.event;
622*bca734c2SPavan Nikhilesh 			evts[i].flow_id = flow;
623*bca734c2SPavan Nikhilesh 			evts[i].event_ptr = ops[i];
624*bca734c2SPavan Nikhilesh 		}
625*bca734c2SPavan Nikhilesh 
626*bca734c2SPavan Nikhilesh 		i = rte_event_dma_adapter_enqueue(dev_id, port, evts, BURST_SIZE);
627*bca734c2SPavan Nikhilesh 		while (i < BURST_SIZE) {
628*bca734c2SPavan Nikhilesh 			i += rte_event_dma_adapter_enqueue(dev_id, port, evts + i, BURST_SIZE - i);
629*bca734c2SPavan Nikhilesh 			if (t->done)
630*bca734c2SPavan Nikhilesh 				break;
631b25a66c4SAmit Prakash Shukla 			rte_pause();
632*bca734c2SPavan Nikhilesh 		}
633b25a66c4SAmit Prakash Shukla 
634*bca734c2SPavan Nikhilesh 		count += BURST_SIZE;
635b25a66c4SAmit Prakash Shukla 	}
636b25a66c4SAmit Prakash Shukla }
637b25a66c4SAmit Prakash Shukla 
638b25a66c4SAmit Prakash Shukla static inline int
639b25a66c4SAmit Prakash Shukla perf_event_dma_producer(void *arg)
640b25a66c4SAmit Prakash Shukla {
641b25a66c4SAmit Prakash Shukla 	struct prod_data *p = arg;
642b25a66c4SAmit Prakash Shukla 
643b25a66c4SAmit Prakash Shukla 	/* Only fwd mode is supported. */
644b25a66c4SAmit Prakash Shukla 	dma_adapter_enq_op_fwd(p);
645b25a66c4SAmit Prakash Shukla 
646b25a66c4SAmit Prakash Shukla 	return 0;
647b25a66c4SAmit Prakash Shukla }
648b25a66c4SAmit Prakash Shukla 
649de2bc16eSShijith Thotton static inline int
650de2bc16eSShijith Thotton perf_event_crypto_producer(void *arg)
651de2bc16eSShijith Thotton {
652de2bc16eSShijith Thotton 	struct prod_data *p = arg;
653de2bc16eSShijith Thotton 	struct evt_options *opt = p->t->opt;
654de2bc16eSShijith Thotton 
655de2bc16eSShijith Thotton 	if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
656de2bc16eSShijith Thotton 		crypto_adapter_enq_op_new(p);
657de2bc16eSShijith Thotton 	else
658de2bc16eSShijith Thotton 		crypto_adapter_enq_op_fwd(p);
659de2bc16eSShijith Thotton 
660de2bc16eSShijith Thotton 	return 0;
661de2bc16eSShijith Thotton }
662de2bc16eSShijith Thotton 
6639c3096d4SVolodymyr Fialko static void
6649c3096d4SVolodymyr Fialko crypto_adapter_enq_op_new_burst(struct prod_data *p)
6659c3096d4SVolodymyr Fialko {
6669c3096d4SVolodymyr Fialko 	const struct test_perf *t = p->t;
6679c3096d4SVolodymyr Fialko 	const struct evt_options *opt = t->opt;
6689c3096d4SVolodymyr Fialko 
6699c3096d4SVolodymyr Fialko 	struct rte_mbuf *m, *pkts_burst[MAX_PROD_ENQ_BURST_SIZE];
6709c3096d4SVolodymyr Fialko 	struct rte_crypto_op *ops_burst[MAX_PROD_ENQ_BURST_SIZE];
6719c3096d4SVolodymyr Fialko 	const uint32_t burst_size = opt->prod_enq_burst_sz;
6729c3096d4SVolodymyr Fialko 	uint8_t *result[MAX_PROD_ENQ_BURST_SIZE];
6739c3096d4SVolodymyr Fialko 	const uint32_t nb_flows = t->nb_flows;
6749c3096d4SVolodymyr Fialko 	const uint64_t nb_pkts = t->nb_pkts;
6759c3096d4SVolodymyr Fialko 	uint16_t len, enq, nb_alloc, offset;
6769c3096d4SVolodymyr Fialko 	struct rte_mempool *pool = t->pool;
6779c3096d4SVolodymyr Fialko 	uint16_t qp_id = p->ca.cdev_qp_id;
6789c3096d4SVolodymyr Fialko 	uint8_t cdev_id = p->ca.cdev_id;
6799c3096d4SVolodymyr Fialko 	uint64_t alloc_failures = 0;
6809c3096d4SVolodymyr Fialko 	uint32_t flow_counter = 0;
6819c3096d4SVolodymyr Fialko 	uint64_t count = 0;
6829c3096d4SVolodymyr Fialko 	uint32_t  i;
6839c3096d4SVolodymyr Fialko 
6849c3096d4SVolodymyr Fialko 	if (opt->verbose_level > 1)
6859c3096d4SVolodymyr Fialko 		printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
6869c3096d4SVolodymyr Fialko 		       __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
6879c3096d4SVolodymyr Fialko 		       p->ca.cdev_qp_id);
6889c3096d4SVolodymyr Fialko 
6899c3096d4SVolodymyr Fialko 	offset = sizeof(struct perf_elt);
6909c3096d4SVolodymyr Fialko 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
6919c3096d4SVolodymyr Fialko 
6929c3096d4SVolodymyr Fialko 	while (count < nb_pkts && t->done == false) {
6939c3096d4SVolodymyr Fialko 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
6949c3096d4SVolodymyr Fialko 			struct rte_crypto_sym_op *sym_op;
6959c3096d4SVolodymyr Fialko 			int ret;
6969c3096d4SVolodymyr Fialko 
6979c3096d4SVolodymyr Fialko 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
6989c3096d4SVolodymyr Fialko 					RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops_burst, burst_size);
6999c3096d4SVolodymyr Fialko 			if (unlikely(nb_alloc != burst_size)) {
7009c3096d4SVolodymyr Fialko 				alloc_failures++;
7019c3096d4SVolodymyr Fialko 				continue;
7029c3096d4SVolodymyr Fialko 			}
7039c3096d4SVolodymyr Fialko 
7049c3096d4SVolodymyr Fialko 			ret = rte_pktmbuf_alloc_bulk(pool, pkts_burst, burst_size);
7059c3096d4SVolodymyr Fialko 			if (unlikely(ret != 0)) {
7069c3096d4SVolodymyr Fialko 				alloc_failures++;
7079c3096d4SVolodymyr Fialko 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
7089c3096d4SVolodymyr Fialko 				continue;
7099c3096d4SVolodymyr Fialko 			}
7109c3096d4SVolodymyr Fialko 
7119c3096d4SVolodymyr Fialko 			for (i = 0; i < burst_size; i++) {
7129c3096d4SVolodymyr Fialko 				m = pkts_burst[i];
7139c3096d4SVolodymyr Fialko 				rte_pktmbuf_append(m, len);
7149c3096d4SVolodymyr Fialko 				sym_op = ops_burst[i]->sym;
7159c3096d4SVolodymyr Fialko 				sym_op->m_src = m;
7169c3096d4SVolodymyr Fialko 				sym_op->cipher.data.offset = offset;
7179c3096d4SVolodymyr Fialko 				sym_op->cipher.data.length = len - offset;
7189c3096d4SVolodymyr Fialko 				rte_crypto_op_attach_sym_session(ops_burst[i],
7199c3096d4SVolodymyr Fialko 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
7209c3096d4SVolodymyr Fialko 			}
7219c3096d4SVolodymyr Fialko 		} else {
7229c3096d4SVolodymyr Fialko 			struct rte_crypto_asym_op *asym_op;
7239c3096d4SVolodymyr Fialko 
7249c3096d4SVolodymyr Fialko 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
7259c3096d4SVolodymyr Fialko 					RTE_CRYPTO_OP_TYPE_ASYMMETRIC, ops_burst, burst_size);
7269c3096d4SVolodymyr Fialko 			if (unlikely(nb_alloc != burst_size)) {
7279c3096d4SVolodymyr Fialko 				alloc_failures++;
7289c3096d4SVolodymyr Fialko 				continue;
7299c3096d4SVolodymyr Fialko 			}
7309c3096d4SVolodymyr Fialko 
7319c3096d4SVolodymyr Fialko 			if (rte_mempool_get_bulk(pool, (void **)result, burst_size)) {
7329c3096d4SVolodymyr Fialko 				alloc_failures++;
7339c3096d4SVolodymyr Fialko 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
7349c3096d4SVolodymyr Fialko 				continue;
7359c3096d4SVolodymyr Fialko 			}
7369c3096d4SVolodymyr Fialko 
7379c3096d4SVolodymyr Fialko 			for (i = 0; i < burst_size; i++) {
7389c3096d4SVolodymyr Fialko 				asym_op = ops_burst[i]->asym;
7399c3096d4SVolodymyr Fialko 				asym_op->modex.base.data = modex_test_case.base.data;
7409c3096d4SVolodymyr Fialko 				asym_op->modex.base.length = modex_test_case.base.len;
7419c3096d4SVolodymyr Fialko 				asym_op->modex.result.data = result[i];
7429c3096d4SVolodymyr Fialko 				asym_op->modex.result.length = modex_test_case.result_len;
7439c3096d4SVolodymyr Fialko 				rte_crypto_op_attach_asym_session(ops_burst[i],
7449c3096d4SVolodymyr Fialko 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
7459c3096d4SVolodymyr Fialko 			}
7469c3096d4SVolodymyr Fialko 		}
7479c3096d4SVolodymyr Fialko 
7489c3096d4SVolodymyr Fialko 		enq = 0;
7499c3096d4SVolodymyr Fialko 		while (!t->done) {
7509c3096d4SVolodymyr Fialko 			enq += rte_cryptodev_enqueue_burst(cdev_id, qp_id, ops_burst + enq,
7519c3096d4SVolodymyr Fialko 					burst_size - enq);
7529c3096d4SVolodymyr Fialko 			if (enq == burst_size)
7539c3096d4SVolodymyr Fialko 				break;
7549c3096d4SVolodymyr Fialko 		}
7559c3096d4SVolodymyr Fialko 
7569c3096d4SVolodymyr Fialko 		count += burst_size;
7579c3096d4SVolodymyr Fialko 	}
7589c3096d4SVolodymyr Fialko 
7599c3096d4SVolodymyr Fialko 	if (opt->verbose_level > 1 && alloc_failures)
7609c3096d4SVolodymyr Fialko 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
7619c3096d4SVolodymyr Fialko 		       __func__, rte_lcore_id(), alloc_failures);
7629c3096d4SVolodymyr Fialko }
7639c3096d4SVolodymyr Fialko 
7649c3096d4SVolodymyr Fialko static void
7659c3096d4SVolodymyr Fialko crypto_adapter_enq_op_fwd_burst(struct prod_data *p)
7669c3096d4SVolodymyr Fialko {
7679c3096d4SVolodymyr Fialko 	const struct test_perf *t = p->t;
7689c3096d4SVolodymyr Fialko 	const struct evt_options *opt = t->opt;
7699c3096d4SVolodymyr Fialko 
7709c3096d4SVolodymyr Fialko 	struct rte_mbuf *m, *pkts_burst[MAX_PROD_ENQ_BURST_SIZE];
7719c3096d4SVolodymyr Fialko 	struct rte_crypto_op *ops_burst[MAX_PROD_ENQ_BURST_SIZE];
7729c3096d4SVolodymyr Fialko 	const uint32_t burst_size = opt->prod_enq_burst_sz;
7739c3096d4SVolodymyr Fialko 	struct rte_event ev[MAX_PROD_ENQ_BURST_SIZE];
7749c3096d4SVolodymyr Fialko 	uint8_t *result[MAX_PROD_ENQ_BURST_SIZE];
7759c3096d4SVolodymyr Fialko 	const uint32_t nb_flows = t->nb_flows;
7769c3096d4SVolodymyr Fialko 	const uint64_t nb_pkts = t->nb_pkts;
7779c3096d4SVolodymyr Fialko 	uint16_t len, enq, nb_alloc, offset;
7789c3096d4SVolodymyr Fialko 	struct rte_mempool *pool = t->pool;
7799c3096d4SVolodymyr Fialko 	const uint8_t dev_id = p->dev_id;
7809c3096d4SVolodymyr Fialko 	const uint8_t port = p->port_id;
7819c3096d4SVolodymyr Fialko 	uint64_t alloc_failures = 0;
7829c3096d4SVolodymyr Fialko 	uint32_t flow_counter = 0;
7839c3096d4SVolodymyr Fialko 	uint64_t count = 0;
7849c3096d4SVolodymyr Fialko 	uint32_t  i;
7859c3096d4SVolodymyr Fialko 
7869c3096d4SVolodymyr Fialko 	if (opt->verbose_level > 1)
7879c3096d4SVolodymyr Fialko 		printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
7889c3096d4SVolodymyr Fialko 		       __func__, rte_lcore_id(), port, p->queue_id,
7899c3096d4SVolodymyr Fialko 		       p->ca.cdev_id, p->ca.cdev_qp_id);
7909c3096d4SVolodymyr Fialko 
7919c3096d4SVolodymyr Fialko 	offset = sizeof(struct perf_elt);
7929c3096d4SVolodymyr Fialko 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
7939c3096d4SVolodymyr Fialko 
7949c3096d4SVolodymyr Fialko 	for (i = 0; i < burst_size; i++) {
7959c3096d4SVolodymyr Fialko 		ev[i].event = 0;
7969c3096d4SVolodymyr Fialko 		ev[i].op = RTE_EVENT_OP_NEW;
7979c3096d4SVolodymyr Fialko 		ev[i].queue_id = p->queue_id;
7989c3096d4SVolodymyr Fialko 		ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC;
7999c3096d4SVolodymyr Fialko 		ev[i].event_type = RTE_EVENT_TYPE_CPU;
8009c3096d4SVolodymyr Fialko 	}
8019c3096d4SVolodymyr Fialko 
8029c3096d4SVolodymyr Fialko 	while (count < nb_pkts && t->done == false) {
8039c3096d4SVolodymyr Fialko 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
8049c3096d4SVolodymyr Fialko 			struct rte_crypto_sym_op *sym_op;
8059c3096d4SVolodymyr Fialko 			int ret;
8069c3096d4SVolodymyr Fialko 
8079c3096d4SVolodymyr Fialko 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
8089c3096d4SVolodymyr Fialko 					RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops_burst, burst_size);
8099c3096d4SVolodymyr Fialko 			if (unlikely(nb_alloc != burst_size)) {
8109c3096d4SVolodymyr Fialko 				alloc_failures++;
8119c3096d4SVolodymyr Fialko 				continue;
8129c3096d4SVolodymyr Fialko 			}
8139c3096d4SVolodymyr Fialko 
8149c3096d4SVolodymyr Fialko 			ret = rte_pktmbuf_alloc_bulk(pool, pkts_burst, burst_size);
8159c3096d4SVolodymyr Fialko 			if (unlikely(ret != 0)) {
8169c3096d4SVolodymyr Fialko 				alloc_failures++;
8179c3096d4SVolodymyr Fialko 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
8189c3096d4SVolodymyr Fialko 				continue;
8199c3096d4SVolodymyr Fialko 			}
8209c3096d4SVolodymyr Fialko 
8219c3096d4SVolodymyr Fialko 			for (i = 0; i < burst_size; i++) {
8229c3096d4SVolodymyr Fialko 				m = pkts_burst[i];
8239c3096d4SVolodymyr Fialko 				rte_pktmbuf_append(m, len);
8249c3096d4SVolodymyr Fialko 				sym_op = ops_burst[i]->sym;
8259c3096d4SVolodymyr Fialko 				sym_op->m_src = m;
8269c3096d4SVolodymyr Fialko 				sym_op->cipher.data.offset = offset;
8279c3096d4SVolodymyr Fialko 				sym_op->cipher.data.length = len - offset;
8289c3096d4SVolodymyr Fialko 				rte_crypto_op_attach_sym_session(ops_burst[i],
8299c3096d4SVolodymyr Fialko 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
8309c3096d4SVolodymyr Fialko 				ev[i].event_ptr = ops_burst[i];
8319c3096d4SVolodymyr Fialko 			}
8329c3096d4SVolodymyr Fialko 		} else {
8339c3096d4SVolodymyr Fialko 			struct rte_crypto_asym_op *asym_op;
8349c3096d4SVolodymyr Fialko 
8359c3096d4SVolodymyr Fialko 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
8369c3096d4SVolodymyr Fialko 					RTE_CRYPTO_OP_TYPE_ASYMMETRIC, ops_burst, burst_size);
8379c3096d4SVolodymyr Fialko 			if (unlikely(nb_alloc != burst_size)) {
8389c3096d4SVolodymyr Fialko 				alloc_failures++;
8399c3096d4SVolodymyr Fialko 				continue;
8409c3096d4SVolodymyr Fialko 			}
8419c3096d4SVolodymyr Fialko 
8429c3096d4SVolodymyr Fialko 			if (rte_mempool_get_bulk(pool, (void **)result, burst_size)) {
8439c3096d4SVolodymyr Fialko 				alloc_failures++;
8449c3096d4SVolodymyr Fialko 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
8459c3096d4SVolodymyr Fialko 				continue;
8469c3096d4SVolodymyr Fialko 			}
8479c3096d4SVolodymyr Fialko 
8489c3096d4SVolodymyr Fialko 			for (i = 0; i < burst_size; i++) {
8499c3096d4SVolodymyr Fialko 				asym_op = ops_burst[i]->asym;
8509c3096d4SVolodymyr Fialko 				asym_op->modex.base.data = modex_test_case.base.data;
8519c3096d4SVolodymyr Fialko 				asym_op->modex.base.length = modex_test_case.base.len;
8529c3096d4SVolodymyr Fialko 				asym_op->modex.result.data = result[i];
8539c3096d4SVolodymyr Fialko 				asym_op->modex.result.length = modex_test_case.result_len;
8549c3096d4SVolodymyr Fialko 				rte_crypto_op_attach_asym_session(ops_burst[i],
8559c3096d4SVolodymyr Fialko 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
8569c3096d4SVolodymyr Fialko 				ev[i].event_ptr = ops_burst[i];
8579c3096d4SVolodymyr Fialko 			}
8589c3096d4SVolodymyr Fialko 		}
8599c3096d4SVolodymyr Fialko 
8609c3096d4SVolodymyr Fialko 		enq = 0;
8619c3096d4SVolodymyr Fialko 		while (!t->done) {
8629c3096d4SVolodymyr Fialko 			enq += rte_event_crypto_adapter_enqueue(dev_id, port, ev + enq,
8639c3096d4SVolodymyr Fialko 					burst_size - enq);
8649c3096d4SVolodymyr Fialko 			if (enq == burst_size)
8659c3096d4SVolodymyr Fialko 				break;
8669c3096d4SVolodymyr Fialko 		}
8679c3096d4SVolodymyr Fialko 
8689c3096d4SVolodymyr Fialko 		count += burst_size;
8699c3096d4SVolodymyr Fialko 	}
8709c3096d4SVolodymyr Fialko 
8719c3096d4SVolodymyr Fialko 	if (opt->verbose_level > 1 && alloc_failures)
8729c3096d4SVolodymyr Fialko 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
8739c3096d4SVolodymyr Fialko 		       __func__, rte_lcore_id(), alloc_failures);
8749c3096d4SVolodymyr Fialko }
8759c3096d4SVolodymyr Fialko 
8769c3096d4SVolodymyr Fialko static inline int
8779c3096d4SVolodymyr Fialko perf_event_crypto_producer_burst(void *arg)
8789c3096d4SVolodymyr Fialko {
8799c3096d4SVolodymyr Fialko 	struct prod_data *p = arg;
8809c3096d4SVolodymyr Fialko 	struct evt_options *opt = p->t->opt;
8819c3096d4SVolodymyr Fialko 
8829c3096d4SVolodymyr Fialko 	if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
8839c3096d4SVolodymyr Fialko 		crypto_adapter_enq_op_new_burst(p);
8849c3096d4SVolodymyr Fialko 	else
8859c3096d4SVolodymyr Fialko 		crypto_adapter_enq_op_fwd_burst(p);
8869c3096d4SVolodymyr Fialko 
8879c3096d4SVolodymyr Fialko 	return 0;
8889c3096d4SVolodymyr Fialko }
8899c3096d4SVolodymyr Fialko 
89059f697e3SPavan Nikhilesh static int
89159f697e3SPavan Nikhilesh perf_producer_wrapper(void *arg)
89259f697e3SPavan Nikhilesh {
893f123568cSPavan Nikhilesh 	struct rte_event_dev_info dev_info;
89459f697e3SPavan Nikhilesh 	struct prod_data *p  = arg;
89559f697e3SPavan Nikhilesh 	struct test_perf *t = p->t;
896f123568cSPavan Nikhilesh 
897f123568cSPavan Nikhilesh 	rte_event_dev_info_get(p->dev_id, &dev_info);
898f123568cSPavan Nikhilesh 	if (!t->opt->prod_enq_burst_sz) {
899f123568cSPavan Nikhilesh 		t->opt->prod_enq_burst_sz = MAX_PROD_ENQ_BURST_SIZE;
900f123568cSPavan Nikhilesh 		if (dev_info.max_event_port_enqueue_depth > 0 &&
901f123568cSPavan Nikhilesh 		    (uint32_t)dev_info.max_event_port_enqueue_depth <
902f123568cSPavan Nikhilesh 			    t->opt->prod_enq_burst_sz)
903f123568cSPavan Nikhilesh 			t->opt->prod_enq_burst_sz =
904f123568cSPavan Nikhilesh 				dev_info.max_event_port_enqueue_depth;
905f123568cSPavan Nikhilesh 	}
90620841a25SRashmi Shetty 
90720841a25SRashmi Shetty 	/* In case of synthetic producer, launch perf_producer or
90820841a25SRashmi Shetty 	 * perf_producer_burst depending on producer enqueue burst size
90920841a25SRashmi Shetty 	 */
91020841a25SRashmi Shetty 	if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
91120841a25SRashmi Shetty 			t->opt->prod_enq_burst_sz == 1)
91259f697e3SPavan Nikhilesh 		return perf_producer(arg);
91320841a25SRashmi Shetty 	else if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
91420841a25SRashmi Shetty 			t->opt->prod_enq_burst_sz > 1) {
915f123568cSPavan Nikhilesh 		if (dev_info.max_event_port_enqueue_depth == 1)
91620841a25SRashmi Shetty 			evt_err("This event device does not support burst mode");
91720841a25SRashmi Shetty 		else
91820841a25SRashmi Shetty 			return perf_producer_burst(arg);
91920841a25SRashmi Shetty 	}
92017b22d0bSPavan Nikhilesh 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
92117b22d0bSPavan Nikhilesh 			!t->opt->timdev_use_burst)
922d008f20bSPavan Nikhilesh 		return perf_event_timer_producer(arg);
92317b22d0bSPavan Nikhilesh 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
92417b22d0bSPavan Nikhilesh 			t->opt->timdev_use_burst)
92517b22d0bSPavan Nikhilesh 		return perf_event_timer_producer_burst(arg);
9269c3096d4SVolodymyr Fialko 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
9279c3096d4SVolodymyr Fialko 		if (t->opt->prod_enq_burst_sz > 1)
9289c3096d4SVolodymyr Fialko 			return perf_event_crypto_producer_burst(arg);
9299c3096d4SVolodymyr Fialko 		else
930de2bc16eSShijith Thotton 			return perf_event_crypto_producer(arg);
931b25a66c4SAmit Prakash Shukla 	} else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR)
932b25a66c4SAmit Prakash Shukla 		return perf_event_dma_producer(arg);
933b25a66c4SAmit Prakash Shukla 
93459f697e3SPavan Nikhilesh 	return 0;
93559f697e3SPavan Nikhilesh }
93659f697e3SPavan Nikhilesh 
9379d3aeb18SJerin Jacob static inline uint64_t
9389d3aeb18SJerin Jacob processed_pkts(struct test_perf *t)
9399d3aeb18SJerin Jacob {
9409d3aeb18SJerin Jacob 	uint8_t i;
9419d3aeb18SJerin Jacob 	uint64_t total = 0;
9429d3aeb18SJerin Jacob 
9439d3aeb18SJerin Jacob 	for (i = 0; i < t->nb_workers; i++)
9449d3aeb18SJerin Jacob 		total += t->worker[i].processed_pkts;
9459d3aeb18SJerin Jacob 
9469d3aeb18SJerin Jacob 	return total;
9479d3aeb18SJerin Jacob }
9489d3aeb18SJerin Jacob 
9499d3aeb18SJerin Jacob static inline uint64_t
9509d3aeb18SJerin Jacob total_latency(struct test_perf *t)
9519d3aeb18SJerin Jacob {
9529d3aeb18SJerin Jacob 	uint8_t i;
9539d3aeb18SJerin Jacob 	uint64_t total = 0;
9549d3aeb18SJerin Jacob 
9559d3aeb18SJerin Jacob 	for (i = 0; i < t->nb_workers; i++)
9569d3aeb18SJerin Jacob 		total += t->worker[i].latency;
9579d3aeb18SJerin Jacob 
9589d3aeb18SJerin Jacob 	return total;
9599d3aeb18SJerin Jacob }
9609d3aeb18SJerin Jacob 
9619d3aeb18SJerin Jacob 
9629d3aeb18SJerin Jacob int
9639d3aeb18SJerin Jacob perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
9649d3aeb18SJerin Jacob 		int (*worker)(void *))
9659d3aeb18SJerin Jacob {
9669d3aeb18SJerin Jacob 	int ret, lcore_id;
9679d3aeb18SJerin Jacob 	struct test_perf *t = evt_test_priv(test);
9689d3aeb18SJerin Jacob 
9699d3aeb18SJerin Jacob 	int port_idx = 0;
9709d3aeb18SJerin Jacob 	/* launch workers */
971cb056611SStephen Hemminger 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
9729d3aeb18SJerin Jacob 		if (!(opt->wlcores[lcore_id]))
9739d3aeb18SJerin Jacob 			continue;
9749d3aeb18SJerin Jacob 
9759d3aeb18SJerin Jacob 		ret = rte_eal_remote_launch(worker,
9769d3aeb18SJerin Jacob 				 &t->worker[port_idx], lcore_id);
9779d3aeb18SJerin Jacob 		if (ret) {
9789d3aeb18SJerin Jacob 			evt_err("failed to launch worker %d", lcore_id);
9799d3aeb18SJerin Jacob 			return ret;
9809d3aeb18SJerin Jacob 		}
9819d3aeb18SJerin Jacob 		port_idx++;
9829d3aeb18SJerin Jacob 	}
9839d3aeb18SJerin Jacob 
9849d3aeb18SJerin Jacob 	/* launch producers */
985cb056611SStephen Hemminger 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
9869d3aeb18SJerin Jacob 		if (!(opt->plcores[lcore_id]))
9879d3aeb18SJerin Jacob 			continue;
9889d3aeb18SJerin Jacob 
98959f697e3SPavan Nikhilesh 		ret = rte_eal_remote_launch(perf_producer_wrapper,
99059f697e3SPavan Nikhilesh 				&t->prod[port_idx], lcore_id);
9919d3aeb18SJerin Jacob 		if (ret) {
9929d3aeb18SJerin Jacob 			evt_err("failed to launch perf_producer %d", lcore_id);
9939d3aeb18SJerin Jacob 			return ret;
9949d3aeb18SJerin Jacob 		}
9959d3aeb18SJerin Jacob 		port_idx++;
9969d3aeb18SJerin Jacob 	}
9979d3aeb18SJerin Jacob 
998d008f20bSPavan Nikhilesh 	const uint64_t total_pkts = t->outstand_pkts;
9999d3aeb18SJerin Jacob 
10009d3aeb18SJerin Jacob 	uint64_t dead_lock_cycles = rte_get_timer_cycles();
10019d3aeb18SJerin Jacob 	int64_t dead_lock_remaining  =  total_pkts;
10029d3aeb18SJerin Jacob 	const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
10039d3aeb18SJerin Jacob 
10049d3aeb18SJerin Jacob 	uint64_t perf_cycles = rte_get_timer_cycles();
10059d3aeb18SJerin Jacob 	int64_t perf_remaining  = total_pkts;
10069d3aeb18SJerin Jacob 	const uint64_t perf_sample = rte_get_timer_hz();
10079d3aeb18SJerin Jacob 
10089d3aeb18SJerin Jacob 	static float total_mpps;
10099d3aeb18SJerin Jacob 	static uint64_t samples;
10109d3aeb18SJerin Jacob 
10119d3aeb18SJerin Jacob 	const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
10129d3aeb18SJerin Jacob 	int64_t remaining = t->outstand_pkts - processed_pkts(t);
10139d3aeb18SJerin Jacob 
10149d3aeb18SJerin Jacob 	while (t->done == false) {
10159d3aeb18SJerin Jacob 		const uint64_t new_cycles = rte_get_timer_cycles();
10169d3aeb18SJerin Jacob 
10179d3aeb18SJerin Jacob 		if ((new_cycles - perf_cycles) > perf_sample) {
10189d3aeb18SJerin Jacob 			const uint64_t latency = total_latency(t);
10199d3aeb18SJerin Jacob 			const uint64_t pkts = processed_pkts(t);
10209d3aeb18SJerin Jacob 
10219d3aeb18SJerin Jacob 			remaining = t->outstand_pkts - pkts;
10229d3aeb18SJerin Jacob 			float mpps = (float)(perf_remaining-remaining)/1000000;
10239d3aeb18SJerin Jacob 
10249d3aeb18SJerin Jacob 			perf_remaining = remaining;
10259d3aeb18SJerin Jacob 			perf_cycles = new_cycles;
10269d3aeb18SJerin Jacob 			total_mpps += mpps;
10279d3aeb18SJerin Jacob 			++samples;
102804716352SJerin Jacob 			if (opt->fwd_latency && pkts > 0) {
10299d3aeb18SJerin Jacob 				printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
10309d3aeb18SJerin Jacob 					mpps, total_mpps/samples,
10319d3aeb18SJerin Jacob 					(float)(latency/pkts)/freq_mhz);
10329d3aeb18SJerin Jacob 			} else {
10339d3aeb18SJerin Jacob 				printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
10349d3aeb18SJerin Jacob 					mpps, total_mpps/samples);
10359d3aeb18SJerin Jacob 			}
10369d3aeb18SJerin Jacob 			fflush(stdout);
10379d3aeb18SJerin Jacob 
10389d3aeb18SJerin Jacob 			if (remaining <= 0) {
10399d3aeb18SJerin Jacob 				t->result = EVT_TEST_SUCCESS;
1040d008f20bSPavan Nikhilesh 				if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
1041d008f20bSPavan Nikhilesh 				    opt->prod_type ==
1042de2bc16eSShijith Thotton 					    EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
1043de2bc16eSShijith Thotton 				    opt->prod_type ==
1044b25a66c4SAmit Prakash Shukla 					    EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR ||
1045b25a66c4SAmit Prakash Shukla 				    opt->prod_type ==
1046b25a66c4SAmit Prakash Shukla 					    EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
104759f697e3SPavan Nikhilesh 					t->done = true;
10489d3aeb18SJerin Jacob 					break;
10499d3aeb18SJerin Jacob 				}
10509d3aeb18SJerin Jacob 			}
105159f697e3SPavan Nikhilesh 		}
10529d3aeb18SJerin Jacob 
105359f697e3SPavan Nikhilesh 		if (new_cycles - dead_lock_cycles > dead_lock_sample &&
105447303784SErik Gabriel Carrillo 		    (opt->prod_type == EVT_PROD_TYPE_SYNT ||
1055de2bc16eSShijith Thotton 		     opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
1056b25a66c4SAmit Prakash Shukla 		     opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR ||
1057b25a66c4SAmit Prakash Shukla 		     opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR)) {
10589d3aeb18SJerin Jacob 			remaining = t->outstand_pkts - processed_pkts(t);
10599d3aeb18SJerin Jacob 			if (dead_lock_remaining == remaining) {
10609d3aeb18SJerin Jacob 				rte_event_dev_dump(opt->dev_id, stdout);
10619d3aeb18SJerin Jacob 				evt_err("No schedules for seconds, deadlock");
10629d3aeb18SJerin Jacob 				t->done = true;
10639d3aeb18SJerin Jacob 				break;
10649d3aeb18SJerin Jacob 			}
10659d3aeb18SJerin Jacob 			dead_lock_remaining = remaining;
10669d3aeb18SJerin Jacob 			dead_lock_cycles = new_cycles;
10679d3aeb18SJerin Jacob 		}
10689d3aeb18SJerin Jacob 	}
10699d3aeb18SJerin Jacob 	printf("\n");
10709d3aeb18SJerin Jacob 	return 0;
10719d3aeb18SJerin Jacob }
10729d3aeb18SJerin Jacob 
10733617aae5SPavan Nikhilesh static int
10743617aae5SPavan Nikhilesh perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
10753617aae5SPavan Nikhilesh 		struct rte_event_port_conf prod_conf)
10763617aae5SPavan Nikhilesh {
10773617aae5SPavan Nikhilesh 	int ret = 0;
10783617aae5SPavan Nikhilesh 	uint16_t prod;
10793617aae5SPavan Nikhilesh 	struct rte_event_eth_rx_adapter_queue_conf queue_conf;
10803617aae5SPavan Nikhilesh 
10813617aae5SPavan Nikhilesh 	memset(&queue_conf, 0,
10823617aae5SPavan Nikhilesh 			sizeof(struct rte_event_eth_rx_adapter_queue_conf));
10833617aae5SPavan Nikhilesh 	queue_conf.ev.sched_type = opt->sched_type_list[0];
10848728ccf3SThomas Monjalon 	RTE_ETH_FOREACH_DEV(prod) {
10853617aae5SPavan Nikhilesh 		uint32_t cap;
10863617aae5SPavan Nikhilesh 
10873617aae5SPavan Nikhilesh 		ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
10883617aae5SPavan Nikhilesh 				prod, &cap);
10893617aae5SPavan Nikhilesh 		if (ret) {
10903617aae5SPavan Nikhilesh 			evt_err("failed to get event rx adapter[%d]"
10913617aae5SPavan Nikhilesh 					" capabilities",
10923617aae5SPavan Nikhilesh 					opt->dev_id);
10933617aae5SPavan Nikhilesh 			return ret;
10943617aae5SPavan Nikhilesh 		}
10953617aae5SPavan Nikhilesh 		queue_conf.ev.queue_id = prod * stride;
10963617aae5SPavan Nikhilesh 		ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
10973617aae5SPavan Nikhilesh 				&prod_conf);
10983617aae5SPavan Nikhilesh 		if (ret) {
10993617aae5SPavan Nikhilesh 			evt_err("failed to create rx adapter[%d]", prod);
11003617aae5SPavan Nikhilesh 			return ret;
11013617aae5SPavan Nikhilesh 		}
11023617aae5SPavan Nikhilesh 		ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
11033617aae5SPavan Nikhilesh 				&queue_conf);
11043617aae5SPavan Nikhilesh 		if (ret) {
11053617aae5SPavan Nikhilesh 			evt_err("failed to add rx queues to adapter[%d]", prod);
11063617aae5SPavan Nikhilesh 			return ret;
11073617aae5SPavan Nikhilesh 		}
11083617aae5SPavan Nikhilesh 
1109b0333c55SPavan Nikhilesh 		if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
1110b0333c55SPavan Nikhilesh 			uint32_t service_id;
1111b0333c55SPavan Nikhilesh 
1112b0333c55SPavan Nikhilesh 			rte_event_eth_rx_adapter_service_id_get(prod,
1113b0333c55SPavan Nikhilesh 					&service_id);
1114b0333c55SPavan Nikhilesh 			ret = evt_service_setup(service_id);
1115b0333c55SPavan Nikhilesh 			if (ret) {
1116b0333c55SPavan Nikhilesh 				evt_err("Failed to setup service core"
1117b0333c55SPavan Nikhilesh 						" for Rx adapter\n");
1118b0333c55SPavan Nikhilesh 				return ret;
1119b0333c55SPavan Nikhilesh 			}
1120b0333c55SPavan Nikhilesh 		}
11213617aae5SPavan Nikhilesh 	}
11223617aae5SPavan Nikhilesh 
11233617aae5SPavan Nikhilesh 	return ret;
11243617aae5SPavan Nikhilesh }
11253617aae5SPavan Nikhilesh 
1126d008f20bSPavan Nikhilesh static int
1127d008f20bSPavan Nikhilesh perf_event_timer_adapter_setup(struct test_perf *t)
1128d008f20bSPavan Nikhilesh {
1129d008f20bSPavan Nikhilesh 	int i;
1130d008f20bSPavan Nikhilesh 	int ret;
1131d008f20bSPavan Nikhilesh 	struct rte_event_timer_adapter_info adapter_info;
1132d008f20bSPavan Nikhilesh 	struct rte_event_timer_adapter *wl;
1133d008f20bSPavan Nikhilesh 	uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
1134d008f20bSPavan Nikhilesh 	uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
1135d008f20bSPavan Nikhilesh 
1136d008f20bSPavan Nikhilesh 	if (nb_producers == 1)
1137d008f20bSPavan Nikhilesh 		flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
1138d008f20bSPavan Nikhilesh 
1139d008f20bSPavan Nikhilesh 	for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
1140d008f20bSPavan Nikhilesh 		struct rte_event_timer_adapter_conf config = {
1141d008f20bSPavan Nikhilesh 			.event_dev_id = t->opt->dev_id,
1142d008f20bSPavan Nikhilesh 			.timer_adapter_id = i,
1143d008f20bSPavan Nikhilesh 			.timer_tick_ns = t->opt->timer_tick_nsec,
1144d008f20bSPavan Nikhilesh 			.max_tmo_ns = t->opt->max_tmo_nsec,
1145c13b1ad7SPavan Nikhilesh 			.nb_timers = t->opt->pool_sz,
1146d008f20bSPavan Nikhilesh 			.flags = flags,
1147d008f20bSPavan Nikhilesh 		};
1148d008f20bSPavan Nikhilesh 
1149d008f20bSPavan Nikhilesh 		wl = rte_event_timer_adapter_create(&config);
1150d008f20bSPavan Nikhilesh 		if (wl == NULL) {
1151d008f20bSPavan Nikhilesh 			evt_err("failed to create event timer ring %d", i);
1152d008f20bSPavan Nikhilesh 			return rte_errno;
1153d008f20bSPavan Nikhilesh 		}
1154d008f20bSPavan Nikhilesh 
1155d008f20bSPavan Nikhilesh 		memset(&adapter_info, 0,
1156d008f20bSPavan Nikhilesh 				sizeof(struct rte_event_timer_adapter_info));
1157d008f20bSPavan Nikhilesh 		rte_event_timer_adapter_get_info(wl, &adapter_info);
1158d008f20bSPavan Nikhilesh 		t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
1159d008f20bSPavan Nikhilesh 
1160d008f20bSPavan Nikhilesh 		if (!(adapter_info.caps &
1161d008f20bSPavan Nikhilesh 				RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
116299c25664SAndrzej Ostruszka 			uint32_t service_id = -1U;
1163d008f20bSPavan Nikhilesh 
1164d008f20bSPavan Nikhilesh 			rte_event_timer_adapter_service_id_get(wl,
1165d008f20bSPavan Nikhilesh 					&service_id);
1166d008f20bSPavan Nikhilesh 			ret = evt_service_setup(service_id);
1167d008f20bSPavan Nikhilesh 			if (ret) {
1168d008f20bSPavan Nikhilesh 				evt_err("Failed to setup service core"
1169d008f20bSPavan Nikhilesh 						" for timer adapter\n");
1170d008f20bSPavan Nikhilesh 				return ret;
1171d008f20bSPavan Nikhilesh 			}
1172d008f20bSPavan Nikhilesh 			rte_service_runstate_set(service_id, 1);
1173d008f20bSPavan Nikhilesh 		}
1174d008f20bSPavan Nikhilesh 		t->timer_adptr[i] = wl;
1175d008f20bSPavan Nikhilesh 	}
1176d008f20bSPavan Nikhilesh 	return 0;
1177d008f20bSPavan Nikhilesh }
1178d008f20bSPavan Nikhilesh 
1179de2bc16eSShijith Thotton static int
1180de2bc16eSShijith Thotton perf_event_crypto_adapter_setup(struct test_perf *t, struct prod_data *p)
1181de2bc16eSShijith Thotton {
118269e807dfSVolodymyr Fialko 	struct rte_event_crypto_adapter_queue_conf conf;
1183de2bc16eSShijith Thotton 	struct evt_options *opt = t->opt;
1184de2bc16eSShijith Thotton 	uint32_t cap;
1185de2bc16eSShijith Thotton 	int ret;
1186de2bc16eSShijith Thotton 
118769e807dfSVolodymyr Fialko 	memset(&conf, 0, sizeof(conf));
118869e807dfSVolodymyr Fialko 
1189de2bc16eSShijith Thotton 	ret = rte_event_crypto_adapter_caps_get(p->dev_id, p->ca.cdev_id, &cap);
1190de2bc16eSShijith Thotton 	if (ret) {
1191de2bc16eSShijith Thotton 		evt_err("Failed to get crypto adapter capabilities");
1192de2bc16eSShijith Thotton 		return ret;
1193de2bc16eSShijith Thotton 	}
1194de2bc16eSShijith Thotton 
1195de2bc16eSShijith Thotton 	if (((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) &&
1196de2bc16eSShijith Thotton 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) ||
1197de2bc16eSShijith Thotton 	    ((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) &&
1198de2bc16eSShijith Thotton 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) {
1199de2bc16eSShijith Thotton 		evt_err("crypto adapter %s mode unsupported\n",
1200de2bc16eSShijith Thotton 			opt->crypto_adptr_mode ? "OP_FORWARD" : "OP_NEW");
1201de2bc16eSShijith Thotton 		return -ENOTSUP;
1202de2bc16eSShijith Thotton 	} else if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA)) {
1203de2bc16eSShijith Thotton 		evt_err("Storing crypto session not supported");
1204de2bc16eSShijith Thotton 		return -ENOTSUP;
1205de2bc16eSShijith Thotton 	}
1206de2bc16eSShijith Thotton 
120769e807dfSVolodymyr Fialko 	if (opt->ena_vector) {
120869e807dfSVolodymyr Fialko 		struct rte_event_crypto_adapter_vector_limits limits;
1209de2bc16eSShijith Thotton 
121069e807dfSVolodymyr Fialko 		if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) {
121169e807dfSVolodymyr Fialko 			evt_err("Crypto adapter doesn't support event vector");
121269e807dfSVolodymyr Fialko 			return -EINVAL;
121369e807dfSVolodymyr Fialko 		}
121469e807dfSVolodymyr Fialko 
121569e807dfSVolodymyr Fialko 		ret = rte_event_crypto_adapter_vector_limits_get(p->dev_id, p->ca.cdev_id, &limits);
121669e807dfSVolodymyr Fialko 		if (ret) {
121769e807dfSVolodymyr Fialko 			evt_err("Failed to get crypto adapter's vector limits");
121869e807dfSVolodymyr Fialko 			return ret;
121969e807dfSVolodymyr Fialko 		}
122069e807dfSVolodymyr Fialko 
122169e807dfSVolodymyr Fialko 		if (opt->vector_size < limits.min_sz || opt->vector_size > limits.max_sz) {
122269e807dfSVolodymyr Fialko 			evt_err("Vector size [%d] not within limits max[%d] min[%d]",
122369e807dfSVolodymyr Fialko 				opt->vector_size, limits.max_sz, limits.min_sz);
122469e807dfSVolodymyr Fialko 			return -EINVAL;
122569e807dfSVolodymyr Fialko 		}
122669e807dfSVolodymyr Fialko 
122769e807dfSVolodymyr Fialko 		if (limits.log2_sz && !rte_is_power_of_2(opt->vector_size)) {
122869e807dfSVolodymyr Fialko 			evt_err("Vector size [%d] not power of 2", opt->vector_size);
122969e807dfSVolodymyr Fialko 			return -EINVAL;
123069e807dfSVolodymyr Fialko 		}
123169e807dfSVolodymyr Fialko 
123269e807dfSVolodymyr Fialko 		if (opt->vector_tmo_nsec > limits.max_timeout_ns ||
123369e807dfSVolodymyr Fialko 			opt->vector_tmo_nsec < limits.min_timeout_ns) {
123469e807dfSVolodymyr Fialko 			evt_err("Vector timeout [%" PRIu64 "] not within limits "
123569e807dfSVolodymyr Fialko 				"max[%" PRIu64 "] min[%" PRIu64 "]",
123669e807dfSVolodymyr Fialko 				opt->vector_tmo_nsec, limits.max_timeout_ns, limits.min_timeout_ns);
123769e807dfSVolodymyr Fialko 			return -EINVAL;
123869e807dfSVolodymyr Fialko 		}
123969e807dfSVolodymyr Fialko 
124069e807dfSVolodymyr Fialko 		conf.vector_mp = t->ca_vector_pool;
124169e807dfSVolodymyr Fialko 		conf.vector_sz = opt->vector_size;
124269e807dfSVolodymyr Fialko 		conf.vector_timeout_ns = opt->vector_tmo_nsec;
124369e807dfSVolodymyr Fialko 		conf.flags |= RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR;
124469e807dfSVolodymyr Fialko 	}
124569e807dfSVolodymyr Fialko 
124669e807dfSVolodymyr Fialko 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
1247c1749bc5SVolodymyr Fialko 		conf.ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
1248c1749bc5SVolodymyr Fialko 		conf.ev.queue_id = p->queue_id;
124969e807dfSVolodymyr Fialko 	}
125069e807dfSVolodymyr Fialko 
1251de2bc16eSShijith Thotton 	ret = rte_event_crypto_adapter_queue_pair_add(
1252c1749bc5SVolodymyr Fialko 		TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, &conf);
1253de2bc16eSShijith Thotton 
1254de2bc16eSShijith Thotton 	return ret;
1255de2bc16eSShijith Thotton }
1256de2bc16eSShijith Thotton 
1257b25a66c4SAmit Prakash Shukla static int
1258b25a66c4SAmit Prakash Shukla perf_event_dma_adapter_setup(struct test_perf *t, struct prod_data *p)
1259b25a66c4SAmit Prakash Shukla {
1260b25a66c4SAmit Prakash Shukla 	struct evt_options *opt = t->opt;
1261b25a66c4SAmit Prakash Shukla 	struct rte_event event;
1262b25a66c4SAmit Prakash Shukla 	uint32_t cap;
1263b25a66c4SAmit Prakash Shukla 	int ret;
1264b25a66c4SAmit Prakash Shukla 
1265b25a66c4SAmit Prakash Shukla 	ret = rte_event_dma_adapter_caps_get(p->dev_id, p->da.dma_dev_id, &cap);
1266b25a66c4SAmit Prakash Shukla 	if (ret) {
1267b25a66c4SAmit Prakash Shukla 		evt_err("Failed to get dma adapter capabilities");
1268b25a66c4SAmit Prakash Shukla 		return ret;
1269b25a66c4SAmit Prakash Shukla 	}
1270b25a66c4SAmit Prakash Shukla 
1271b25a66c4SAmit Prakash Shukla 	if (((opt->dma_adptr_mode == RTE_EVENT_DMA_ADAPTER_OP_NEW) &&
1272b25a66c4SAmit Prakash Shukla 	     !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) ||
1273b25a66c4SAmit Prakash Shukla 	    ((opt->dma_adptr_mode == RTE_EVENT_DMA_ADAPTER_OP_FORWARD) &&
1274b25a66c4SAmit Prakash Shukla 	     !(cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) {
1275b25a66c4SAmit Prakash Shukla 		evt_err("dma adapter %s mode unsupported\n",
1276b25a66c4SAmit Prakash Shukla 			opt->dma_adptr_mode ? "OP_FORWARD" : "OP_NEW");
1277b25a66c4SAmit Prakash Shukla 		return -ENOTSUP;
1278b25a66c4SAmit Prakash Shukla 	}
1279b25a66c4SAmit Prakash Shukla 
1280b25a66c4SAmit Prakash Shukla 	if (cap & RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND)
1281b25a66c4SAmit Prakash Shukla 		ret = rte_event_dma_adapter_vchan_add(TEST_PERF_DA_ID, p->da.dma_dev_id,
1282b25a66c4SAmit Prakash Shukla 						      p->da.vchan_id, &event);
1283b25a66c4SAmit Prakash Shukla 	else
1284b25a66c4SAmit Prakash Shukla 		ret = rte_event_dma_adapter_vchan_add(TEST_PERF_DA_ID, p->da.dma_dev_id,
1285b25a66c4SAmit Prakash Shukla 						      p->da.vchan_id, NULL);
1286b25a66c4SAmit Prakash Shukla 
1287b25a66c4SAmit Prakash Shukla 	return ret;
1288b25a66c4SAmit Prakash Shukla }
1289b25a66c4SAmit Prakash Shukla 
12902a440d6aSAkhil Goyal static void *
1291de2bc16eSShijith Thotton cryptodev_sym_sess_create(struct prod_data *p, struct test_perf *t)
1292de2bc16eSShijith Thotton {
1293750ab9d5SAakash Sasidharan 	const struct rte_cryptodev_symmetric_capability *cap;
1294750ab9d5SAakash Sasidharan 	struct rte_cryptodev_sym_capability_idx cap_idx;
1295750ab9d5SAakash Sasidharan 	enum rte_crypto_cipher_algorithm cipher_algo;
1296de2bc16eSShijith Thotton 	struct rte_crypto_sym_xform cipher_xform;
1297750ab9d5SAakash Sasidharan 	struct evt_options *opt = t->opt;
1298750ab9d5SAakash Sasidharan 	uint16_t key_size;
1299750ab9d5SAakash Sasidharan 	uint16_t iv_size;
13002a440d6aSAkhil Goyal 	void *sess;
1301de2bc16eSShijith Thotton 
1302750ab9d5SAakash Sasidharan 	cipher_algo = opt->crypto_cipher_alg;
1303750ab9d5SAakash Sasidharan 	key_size = opt->crypto_cipher_key_sz;
1304750ab9d5SAakash Sasidharan 	iv_size = opt->crypto_cipher_iv_sz;
1305750ab9d5SAakash Sasidharan 
1306750ab9d5SAakash Sasidharan 	/* Check if device supports the algorithm */
1307750ab9d5SAakash Sasidharan 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1308750ab9d5SAakash Sasidharan 	cap_idx.algo.cipher = cipher_algo;
1309750ab9d5SAakash Sasidharan 
1310750ab9d5SAakash Sasidharan 	cap = rte_cryptodev_sym_capability_get(p->ca.cdev_id, &cap_idx);
1311750ab9d5SAakash Sasidharan 	if (cap == NULL) {
1312750ab9d5SAakash Sasidharan 		evt_err("Device doesn't support cipher algorithm [%s]. Test Skipped\n",
1313750ab9d5SAakash Sasidharan 			rte_cryptodev_get_cipher_algo_string(cipher_algo));
1314750ab9d5SAakash Sasidharan 		return NULL;
1315750ab9d5SAakash Sasidharan 	}
1316750ab9d5SAakash Sasidharan 
1317750ab9d5SAakash Sasidharan 	/* Check if device supports key size and IV size */
1318750ab9d5SAakash Sasidharan 	if (rte_cryptodev_sym_capability_check_cipher(cap, key_size,
1319750ab9d5SAakash Sasidharan 			iv_size) < 0) {
1320750ab9d5SAakash Sasidharan 		evt_err("Device doesn't support cipher configuration:\n"
1321750ab9d5SAakash Sasidharan 			"cipher algo [%s], key sz [%d], iv sz [%d]. Test Skipped\n",
1322750ab9d5SAakash Sasidharan 			rte_cryptodev_get_cipher_algo_string(cipher_algo), key_size, iv_size);
1323750ab9d5SAakash Sasidharan 		return NULL;
1324750ab9d5SAakash Sasidharan 	}
1325750ab9d5SAakash Sasidharan 
1326de2bc16eSShijith Thotton 	cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1327750ab9d5SAakash Sasidharan 	cipher_xform.cipher.algo = cipher_algo;
1328750ab9d5SAakash Sasidharan 	cipher_xform.cipher.key.data = opt->crypto_cipher_key;
1329750ab9d5SAakash Sasidharan 	cipher_xform.cipher.key.length = key_size;
1330750ab9d5SAakash Sasidharan 	cipher_xform.cipher.iv.length = iv_size;
1331750ab9d5SAakash Sasidharan 	cipher_xform.cipher.iv.offset = IV_OFFSET;
1332de2bc16eSShijith Thotton 	cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1333de2bc16eSShijith Thotton 	cipher_xform.next = NULL;
1334de2bc16eSShijith Thotton 
1335bdce2564SAkhil Goyal 	sess = rte_cryptodev_sym_session_create(p->ca.cdev_id, &cipher_xform,
1336bdce2564SAkhil Goyal 			t->ca_sess_pool);
1337de2bc16eSShijith Thotton 	if (sess == NULL) {
1338de2bc16eSShijith Thotton 		evt_err("Failed to create sym session");
1339de2bc16eSShijith Thotton 		return NULL;
1340de2bc16eSShijith Thotton 	}
1341de2bc16eSShijith Thotton 
1342de2bc16eSShijith Thotton 	return sess;
1343de2bc16eSShijith Thotton }
1344de2bc16eSShijith Thotton 
13458f5b5495SAkhil Goyal static void *
13468f5b5495SAkhil Goyal cryptodev_asym_sess_create(struct prod_data *p, struct test_perf *t)
13478f5b5495SAkhil Goyal {
13488f5b5495SAkhil Goyal 	const struct rte_cryptodev_asymmetric_xform_capability *capability;
13498f5b5495SAkhil Goyal 	struct rte_cryptodev_asym_capability_idx cap_idx;
13508f5b5495SAkhil Goyal 	struct rte_crypto_asym_xform xform;
13518f5b5495SAkhil Goyal 	void *sess;
13528f5b5495SAkhil Goyal 
13538f5b5495SAkhil Goyal 	xform.next = NULL;
13548f5b5495SAkhil Goyal 	xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
13558f5b5495SAkhil Goyal 	cap_idx.type = xform.xform_type;
13568f5b5495SAkhil Goyal 	capability = rte_cryptodev_asym_capability_get(p->ca.cdev_id, &cap_idx);
13578f5b5495SAkhil Goyal 	if (capability == NULL) {
13588f5b5495SAkhil Goyal 		evt_err("Device doesn't support MODEX. Test Skipped\n");
13598f5b5495SAkhil Goyal 		return NULL;
13608f5b5495SAkhil Goyal 	}
13618f5b5495SAkhil Goyal 
13628f5b5495SAkhil Goyal 	xform.modex.modulus.data = modex_test_case.modulus.data;
13638f5b5495SAkhil Goyal 	xform.modex.modulus.length = modex_test_case.modulus.len;
13648f5b5495SAkhil Goyal 	xform.modex.exponent.data = modex_test_case.exponent.data;
13658f5b5495SAkhil Goyal 	xform.modex.exponent.length = modex_test_case.exponent.len;
13668f5b5495SAkhil Goyal 
13678f5b5495SAkhil Goyal 	if (rte_cryptodev_asym_session_create(p->ca.cdev_id, &xform,
13688f5b5495SAkhil Goyal 			t->ca_asym_sess_pool, &sess)) {
13698f5b5495SAkhil Goyal 		evt_err("Failed to create asym session");
13708f5b5495SAkhil Goyal 		return NULL;
13718f5b5495SAkhil Goyal 	}
13728f5b5495SAkhil Goyal 
13738f5b5495SAkhil Goyal 	return sess;
13748f5b5495SAkhil Goyal }
13758f5b5495SAkhil Goyal 
1376272de067SJerin Jacob int
137784a7513dSJerin Jacob perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
1378535c630cSPavan Nikhilesh 				uint8_t stride, uint8_t nb_queues,
1379535c630cSPavan Nikhilesh 				const struct rte_event_port_conf *port_conf)
138084a7513dSJerin Jacob {
138184a7513dSJerin Jacob 	struct test_perf *t = evt_test_priv(test);
13823617aae5SPavan Nikhilesh 	uint16_t port, prod;
138384a7513dSJerin Jacob 	int ret = -1;
138484a7513dSJerin Jacob 
138584a7513dSJerin Jacob 	/* setup one port per worker, linking to all queues */
138684a7513dSJerin Jacob 	for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
138784a7513dSJerin Jacob 				port++) {
138884a7513dSJerin Jacob 		struct worker_data *w = &t->worker[port];
138984a7513dSJerin Jacob 
139084a7513dSJerin Jacob 		w->dev_id = opt->dev_id;
139184a7513dSJerin Jacob 		w->port_id = port;
139284a7513dSJerin Jacob 		w->t = t;
139384a7513dSJerin Jacob 		w->processed_pkts = 0;
139484a7513dSJerin Jacob 		w->latency = 0;
139584a7513dSJerin Jacob 
13965f94d108SHarry van Haaren 		struct rte_event_port_conf conf = *port_conf;
13975f94d108SHarry van Haaren 		conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_WORKER;
13985f94d108SHarry van Haaren 
13995f94d108SHarry van Haaren 		ret = rte_event_port_setup(opt->dev_id, port, &conf);
140084a7513dSJerin Jacob 		if (ret) {
140184a7513dSJerin Jacob 			evt_err("failed to setup port %d", port);
140284a7513dSJerin Jacob 			return ret;
140384a7513dSJerin Jacob 		}
140484a7513dSJerin Jacob 
140584a7513dSJerin Jacob 		ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
140684a7513dSJerin Jacob 		if (ret != nb_queues) {
140784a7513dSJerin Jacob 			evt_err("failed to link all queues to port %d", port);
140884a7513dSJerin Jacob 			return -EINVAL;
140984a7513dSJerin Jacob 		}
141084a7513dSJerin Jacob 	}
141184a7513dSJerin Jacob 
141284a7513dSJerin Jacob 	/* port for producers, no links */
14133617aae5SPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
14143617aae5SPavan Nikhilesh 		for ( ; port < perf_nb_event_ports(opt); port++) {
14153617aae5SPavan Nikhilesh 			struct prod_data *p = &t->prod[port];
14163617aae5SPavan Nikhilesh 			p->t = t;
14173617aae5SPavan Nikhilesh 		}
14183617aae5SPavan Nikhilesh 
14195f94d108SHarry van Haaren 		struct rte_event_port_conf conf = *port_conf;
14205f94d108SHarry van Haaren 		conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_PRODUCER;
14215f94d108SHarry van Haaren 
14225f94d108SHarry van Haaren 		ret = perf_event_rx_adapter_setup(opt, stride, conf);
14233617aae5SPavan Nikhilesh 		if (ret)
14243617aae5SPavan Nikhilesh 			return ret;
1425d008f20bSPavan Nikhilesh 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1426d008f20bSPavan Nikhilesh 		prod = 0;
1427d008f20bSPavan Nikhilesh 		for ( ; port < perf_nb_event_ports(opt); port++) {
1428d008f20bSPavan Nikhilesh 			struct prod_data *p = &t->prod[port];
1429d008f20bSPavan Nikhilesh 			p->queue_id = prod * stride;
1430d008f20bSPavan Nikhilesh 			p->t = t;
1431d008f20bSPavan Nikhilesh 			prod++;
1432d008f20bSPavan Nikhilesh 		}
1433d008f20bSPavan Nikhilesh 
1434d008f20bSPavan Nikhilesh 		ret = perf_event_timer_adapter_setup(t);
1435d008f20bSPavan Nikhilesh 		if (ret)
1436d008f20bSPavan Nikhilesh 			return ret;
1437de2bc16eSShijith Thotton 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
1438de2bc16eSShijith Thotton 		struct rte_event_port_conf conf = *port_conf;
1439de2bc16eSShijith Thotton 		uint8_t cdev_id = 0;
1440de2bc16eSShijith Thotton 		uint16_t qp_id = 0;
1441de2bc16eSShijith Thotton 
1442de2bc16eSShijith Thotton 		ret = rte_event_crypto_adapter_create(TEST_PERF_CA_ID,
1443de2bc16eSShijith Thotton 						      opt->dev_id, &conf, 0);
1444de2bc16eSShijith Thotton 		if (ret) {
1445de2bc16eSShijith Thotton 			evt_err("Failed to create crypto adapter");
1446de2bc16eSShijith Thotton 			return ret;
1447de2bc16eSShijith Thotton 		}
1448de2bc16eSShijith Thotton 
1449de2bc16eSShijith Thotton 		prod = 0;
1450de2bc16eSShijith Thotton 		for (; port < perf_nb_event_ports(opt); port++) {
1451de2bc16eSShijith Thotton 			union rte_event_crypto_metadata m_data;
1452de2bc16eSShijith Thotton 			struct prod_data *p = &t->prod[port];
1453de2bc16eSShijith Thotton 			uint32_t flow_id;
1454de2bc16eSShijith Thotton 
1455de2bc16eSShijith Thotton 			if (qp_id == rte_cryptodev_queue_pair_count(cdev_id)) {
1456de2bc16eSShijith Thotton 				cdev_id++;
1457de2bc16eSShijith Thotton 				qp_id = 0;
1458de2bc16eSShijith Thotton 			}
1459de2bc16eSShijith Thotton 
1460de2bc16eSShijith Thotton 			p->dev_id = opt->dev_id;
1461de2bc16eSShijith Thotton 			p->port_id = port;
1462de2bc16eSShijith Thotton 			p->queue_id = prod * stride;
1463de2bc16eSShijith Thotton 			p->ca.cdev_id = cdev_id;
1464de2bc16eSShijith Thotton 			p->ca.cdev_qp_id = qp_id;
1465de2bc16eSShijith Thotton 			p->ca.crypto_sess = rte_zmalloc_socket(
14668f5b5495SAkhil Goyal 				NULL, sizeof(void *) * t->nb_flows,
1467de2bc16eSShijith Thotton 				RTE_CACHE_LINE_SIZE, opt->socket_id);
1468de2bc16eSShijith Thotton 			p->t = t;
1469de2bc16eSShijith Thotton 
1470eff29c45SVolodymyr Fialko 			ret = perf_event_crypto_adapter_setup(t, p);
1471eff29c45SVolodymyr Fialko 			if (ret)
1472eff29c45SVolodymyr Fialko 				return ret;
1473eff29c45SVolodymyr Fialko 
1474de2bc16eSShijith Thotton 			m_data.request_info.cdev_id = p->ca.cdev_id;
1475de2bc16eSShijith Thotton 			m_data.request_info.queue_pair_id = p->ca.cdev_qp_id;
1476de2bc16eSShijith Thotton 			m_data.response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
1477de2bc16eSShijith Thotton 			m_data.response_info.queue_id = p->queue_id;
1478de2bc16eSShijith Thotton 
1479de2bc16eSShijith Thotton 			for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
14808f5b5495SAkhil Goyal 				m_data.response_info.flow_id = flow_id;
14818f5b5495SAkhil Goyal 				if (opt->crypto_op_type ==
14828f5b5495SAkhil Goyal 						RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
14832a440d6aSAkhil Goyal 					void *sess;
14848f5b5495SAkhil Goyal 
14858f5b5495SAkhil Goyal 					sess = cryptodev_sym_sess_create(p, t);
14868f5b5495SAkhil Goyal 					if (sess == NULL)
1487de2bc16eSShijith Thotton 						return -ENOMEM;
1488de2bc16eSShijith Thotton 
1489eff29c45SVolodymyr Fialko 					ret = rte_cryptodev_session_event_mdata_set(
14908f5b5495SAkhil Goyal 						cdev_id,
14918f5b5495SAkhil Goyal 						sess,
14924c43055cSAkhil Goyal 						RTE_CRYPTO_OP_TYPE_SYMMETRIC,
14934c43055cSAkhil Goyal 						RTE_CRYPTO_OP_WITH_SESSION,
14944c43055cSAkhil Goyal 						&m_data, sizeof(m_data));
1495eff29c45SVolodymyr Fialko 					if (ret)
1496eff29c45SVolodymyr Fialko 						return ret;
14978f5b5495SAkhil Goyal 					p->ca.crypto_sess[flow_id] = sess;
14988f5b5495SAkhil Goyal 				} else {
14998f5b5495SAkhil Goyal 					void *sess;
15004c43055cSAkhil Goyal 
15018f5b5495SAkhil Goyal 					sess = cryptodev_asym_sess_create(p, t);
15028f5b5495SAkhil Goyal 					if (sess == NULL)
15038f5b5495SAkhil Goyal 						return -ENOMEM;
1504eff29c45SVolodymyr Fialko 					ret = rte_cryptodev_session_event_mdata_set(
15058f5b5495SAkhil Goyal 						cdev_id,
15068f5b5495SAkhil Goyal 						sess,
15078f5b5495SAkhil Goyal 						RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
15088f5b5495SAkhil Goyal 						RTE_CRYPTO_OP_WITH_SESSION,
15098f5b5495SAkhil Goyal 						&m_data, sizeof(m_data));
1510eff29c45SVolodymyr Fialko 					if (ret)
1511eff29c45SVolodymyr Fialko 						return ret;
15128f5b5495SAkhil Goyal 					p->ca.crypto_sess[flow_id] = sess;
15138f5b5495SAkhil Goyal 				}
1514de2bc16eSShijith Thotton 			}
1515de2bc16eSShijith Thotton 
1516de2bc16eSShijith Thotton 			conf.event_port_cfg |=
1517de2bc16eSShijith Thotton 				RTE_EVENT_PORT_CFG_HINT_PRODUCER |
1518de2bc16eSShijith Thotton 				RTE_EVENT_PORT_CFG_HINT_CONSUMER;
1519de2bc16eSShijith Thotton 
1520de2bc16eSShijith Thotton 			ret = rte_event_port_setup(opt->dev_id, port, &conf);
1521de2bc16eSShijith Thotton 			if (ret) {
1522de2bc16eSShijith Thotton 				evt_err("failed to setup port %d", port);
1523de2bc16eSShijith Thotton 				return ret;
1524de2bc16eSShijith Thotton 			}
1525de2bc16eSShijith Thotton 
1526de2bc16eSShijith Thotton 			qp_id++;
1527de2bc16eSShijith Thotton 			prod++;
1528de2bc16eSShijith Thotton 		}
1529b25a66c4SAmit Prakash Shukla 	}  else if (opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
1530b25a66c4SAmit Prakash Shukla 		struct rte_event_port_conf conf = *port_conf;
1531b25a66c4SAmit Prakash Shukla 		uint8_t dma_dev_id = 0;
1532b25a66c4SAmit Prakash Shukla 		uint16_t vchan_id = 0;
1533b25a66c4SAmit Prakash Shukla 
1534b25a66c4SAmit Prakash Shukla 		ret = rte_event_dma_adapter_create(TEST_PERF_DA_ID, opt->dev_id, &conf, 0);
1535b25a66c4SAmit Prakash Shukla 		if (ret) {
1536b25a66c4SAmit Prakash Shukla 			evt_err("Failed to create dma adapter");
1537b25a66c4SAmit Prakash Shukla 			return ret;
1538b25a66c4SAmit Prakash Shukla 		}
1539b25a66c4SAmit Prakash Shukla 
1540b25a66c4SAmit Prakash Shukla 		prod = 0;
1541b25a66c4SAmit Prakash Shukla 		for (; port < perf_nb_event_ports(opt); port++) {
1542b25a66c4SAmit Prakash Shukla 			struct prod_data *p = &t->prod[port];
1543b25a66c4SAmit Prakash Shukla 
1544b25a66c4SAmit Prakash Shukla 			p->dev_id = opt->dev_id;
1545b25a66c4SAmit Prakash Shukla 			p->port_id = port;
1546b25a66c4SAmit Prakash Shukla 			p->queue_id = prod * stride;
1547b25a66c4SAmit Prakash Shukla 			p->da.dma_dev_id = dma_dev_id;
1548b25a66c4SAmit Prakash Shukla 			p->da.vchan_id = vchan_id;
1549b25a66c4SAmit Prakash Shukla 			p->t = t;
1550b25a66c4SAmit Prakash Shukla 
1551b25a66c4SAmit Prakash Shukla 			ret = perf_event_dma_adapter_setup(t, p);
1552b25a66c4SAmit Prakash Shukla 			if (ret)
1553b25a66c4SAmit Prakash Shukla 				return ret;
1554b25a66c4SAmit Prakash Shukla 
1555b25a66c4SAmit Prakash Shukla 			conf.event_port_cfg |=
1556b25a66c4SAmit Prakash Shukla 				RTE_EVENT_PORT_CFG_HINT_PRODUCER |
1557b25a66c4SAmit Prakash Shukla 				RTE_EVENT_PORT_CFG_HINT_CONSUMER;
1558b25a66c4SAmit Prakash Shukla 
1559b25a66c4SAmit Prakash Shukla 			ret = rte_event_port_setup(opt->dev_id, port, &conf);
1560b25a66c4SAmit Prakash Shukla 			if (ret) {
1561b25a66c4SAmit Prakash Shukla 				evt_err("failed to setup port %d", port);
1562b25a66c4SAmit Prakash Shukla 				return ret;
1563b25a66c4SAmit Prakash Shukla 			}
1564b25a66c4SAmit Prakash Shukla 
1565b25a66c4SAmit Prakash Shukla 			prod++;
1566b25a66c4SAmit Prakash Shukla 		}
15673617aae5SPavan Nikhilesh 	} else {
156884a7513dSJerin Jacob 		prod = 0;
156984a7513dSJerin Jacob 		for ( ; port < perf_nb_event_ports(opt); port++) {
157084a7513dSJerin Jacob 			struct prod_data *p = &t->prod[port];
157184a7513dSJerin Jacob 
157284a7513dSJerin Jacob 			p->dev_id = opt->dev_id;
157384a7513dSJerin Jacob 			p->port_id = port;
157484a7513dSJerin Jacob 			p->queue_id = prod * stride;
157584a7513dSJerin Jacob 			p->t = t;
157684a7513dSJerin Jacob 
15775f94d108SHarry van Haaren 			struct rte_event_port_conf conf = *port_conf;
15785f94d108SHarry van Haaren 			conf.event_port_cfg |=
15795f94d108SHarry van Haaren 				RTE_EVENT_PORT_CFG_HINT_PRODUCER |
15805f94d108SHarry van Haaren 				RTE_EVENT_PORT_CFG_HINT_CONSUMER;
15815f94d108SHarry van Haaren 
15825f94d108SHarry van Haaren 			ret = rte_event_port_setup(opt->dev_id, port, &conf);
158384a7513dSJerin Jacob 			if (ret) {
158484a7513dSJerin Jacob 				evt_err("failed to setup port %d", port);
158584a7513dSJerin Jacob 				return ret;
158684a7513dSJerin Jacob 			}
158784a7513dSJerin Jacob 			prod++;
158884a7513dSJerin Jacob 		}
15893617aae5SPavan Nikhilesh 	}
159084a7513dSJerin Jacob 
159184a7513dSJerin Jacob 	return ret;
159284a7513dSJerin Jacob }
159384a7513dSJerin Jacob 
159484a7513dSJerin Jacob int
1595272de067SJerin Jacob perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
1596272de067SJerin Jacob {
1597272de067SJerin Jacob 	unsigned int lcores;
1598272de067SJerin Jacob 
1599cb056611SStephen Hemminger 	/* N producer + N worker + main when producer cores are used
1600cb056611SStephen Hemminger 	 * Else N worker + main when Rx adapter is used
1601b01974daSPavan Nikhilesh 	 */
1602b01974daSPavan Nikhilesh 	lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
1603272de067SJerin Jacob 
1604272de067SJerin Jacob 	if (rte_lcore_count() < lcores) {
1605272de067SJerin Jacob 		evt_err("test need minimum %d lcores", lcores);
1606272de067SJerin Jacob 		return -1;
1607272de067SJerin Jacob 	}
1608272de067SJerin Jacob 
1609272de067SJerin Jacob 	/* Validate worker lcores */
1610cb056611SStephen Hemminger 	if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
1611cb056611SStephen Hemminger 		evt_err("worker lcores overlaps with main lcore");
1612272de067SJerin Jacob 		return -1;
1613272de067SJerin Jacob 	}
1614272de067SJerin Jacob 	if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
1615272de067SJerin Jacob 		evt_err("worker lcores overlaps producer lcores");
1616272de067SJerin Jacob 		return -1;
1617272de067SJerin Jacob 	}
1618272de067SJerin Jacob 	if (evt_has_disabled_lcore(opt->wlcores)) {
1619272de067SJerin Jacob 		evt_err("one or more workers lcores are not enabled");
1620272de067SJerin Jacob 		return -1;
1621272de067SJerin Jacob 	}
1622272de067SJerin Jacob 	if (!evt_has_active_lcore(opt->wlcores)) {
1623272de067SJerin Jacob 		evt_err("minimum one worker is required");
1624272de067SJerin Jacob 		return -1;
1625272de067SJerin Jacob 	}
1626272de067SJerin Jacob 
1627902387eaSPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
1628de2bc16eSShijith Thotton 	    opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
1629b25a66c4SAmit Prakash Shukla 	    opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR ||
1630b25a66c4SAmit Prakash Shukla 	    opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
1631272de067SJerin Jacob 		/* Validate producer lcores */
1632b01974daSPavan Nikhilesh 		if (evt_lcores_has_overlap(opt->plcores,
1633cb056611SStephen Hemminger 					rte_get_main_lcore())) {
1634cb056611SStephen Hemminger 			evt_err("producer lcores overlaps with main lcore");
1635272de067SJerin Jacob 			return -1;
1636272de067SJerin Jacob 		}
1637272de067SJerin Jacob 		if (evt_has_disabled_lcore(opt->plcores)) {
1638272de067SJerin Jacob 			evt_err("one or more producer lcores are not enabled");
1639272de067SJerin Jacob 			return -1;
1640272de067SJerin Jacob 		}
1641272de067SJerin Jacob 		if (!evt_has_active_lcore(opt->plcores)) {
1642272de067SJerin Jacob 			evt_err("minimum one producer is required");
1643272de067SJerin Jacob 			return -1;
1644272de067SJerin Jacob 		}
1645b01974daSPavan Nikhilesh 	}
1646272de067SJerin Jacob 
1647272de067SJerin Jacob 	if (evt_has_invalid_stage(opt))
1648272de067SJerin Jacob 		return -1;
1649272de067SJerin Jacob 
1650272de067SJerin Jacob 	if (evt_has_invalid_sched_type(opt))
1651272de067SJerin Jacob 		return -1;
1652272de067SJerin Jacob 
1653272de067SJerin Jacob 	if (nb_queues > EVT_MAX_QUEUES) {
1654272de067SJerin Jacob 		evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
1655272de067SJerin Jacob 		return -1;
1656272de067SJerin Jacob 	}
1657272de067SJerin Jacob 	if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
1658272de067SJerin Jacob 		evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
1659272de067SJerin Jacob 		return -1;
1660272de067SJerin Jacob 	}
1661272de067SJerin Jacob 
1662272de067SJerin Jacob 	/* Fixups */
1663d008f20bSPavan Nikhilesh 	if ((opt->nb_stages == 1 &&
1664d008f20bSPavan Nikhilesh 			opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
1665d008f20bSPavan Nikhilesh 			opt->fwd_latency) {
1666272de067SJerin Jacob 		evt_info("fwd_latency is valid when nb_stages > 1, disabling");
1667272de067SJerin Jacob 		opt->fwd_latency = 0;
1668272de067SJerin Jacob 	}
1669d008f20bSPavan Nikhilesh 
1670272de067SJerin Jacob 	if (opt->fwd_latency && !opt->q_priority) {
1671272de067SJerin Jacob 		evt_info("enabled queue priority for latency measurement");
1672272de067SJerin Jacob 		opt->q_priority = 1;
1673272de067SJerin Jacob 	}
16749d3aeb18SJerin Jacob 	if (opt->nb_pkts == 0)
16759d3aeb18SJerin Jacob 		opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
1676272de067SJerin Jacob 
1677272de067SJerin Jacob 	return 0;
1678272de067SJerin Jacob }
1679272de067SJerin Jacob 
1680272de067SJerin Jacob void
1681272de067SJerin Jacob perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
1682272de067SJerin Jacob {
1683272de067SJerin Jacob 	evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
1684272de067SJerin Jacob 	evt_dump_producer_lcores(opt);
1685272de067SJerin Jacob 	evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
1686272de067SJerin Jacob 	evt_dump_worker_lcores(opt);
1687272de067SJerin Jacob 	evt_dump_nb_stages(opt);
1688272de067SJerin Jacob 	evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
1689272de067SJerin Jacob 	evt_dump("nb_evdev_queues", "%d", nb_queues);
1690272de067SJerin Jacob 	evt_dump_queue_priority(opt);
1691272de067SJerin Jacob 	evt_dump_sched_type_list(opt);
1692b01974daSPavan Nikhilesh 	evt_dump_producer_type(opt);
169320841a25SRashmi Shetty 	evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
1694272de067SJerin Jacob }
1695272de067SJerin Jacob 
16967da008dfSPavan Nikhilesh static void
16977da008dfSPavan Nikhilesh perf_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
16987da008dfSPavan Nikhilesh 		      void *args)
16997da008dfSPavan Nikhilesh {
17007da008dfSPavan Nikhilesh 	rte_mempool_put(args, ev.event_ptr);
17017da008dfSPavan Nikhilesh }
17027da008dfSPavan Nikhilesh 
170341c219e6SJerin Jacob void
1704f0b68c0bSPavan Nikhilesh perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
1705f0b68c0bSPavan Nikhilesh 		    uint8_t port_id, struct rte_event events[], uint16_t nb_enq,
1706f0b68c0bSPavan Nikhilesh 		    uint16_t nb_deq)
1707f0b68c0bSPavan Nikhilesh {
1708f0b68c0bSPavan Nikhilesh 	int i;
1709f0b68c0bSPavan Nikhilesh 
1710f0b68c0bSPavan Nikhilesh 	if (nb_deq) {
1711f0b68c0bSPavan Nikhilesh 		for (i = nb_enq; i < nb_deq; i++)
1712f0b68c0bSPavan Nikhilesh 			rte_mempool_put(pool, events[i].event_ptr);
1713f0b68c0bSPavan Nikhilesh 
1714f0b68c0bSPavan Nikhilesh 		for (i = 0; i < nb_deq; i++)
1715f0b68c0bSPavan Nikhilesh 			events[i].op = RTE_EVENT_OP_RELEASE;
1716f0b68c0bSPavan Nikhilesh 		rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
1717f0b68c0bSPavan Nikhilesh 	}
17187da008dfSPavan Nikhilesh 	rte_event_port_quiesce(dev_id, port_id, perf_event_port_flush, pool);
1719f0b68c0bSPavan Nikhilesh }
1720f0b68c0bSPavan Nikhilesh 
1721f0b68c0bSPavan Nikhilesh void
172241c219e6SJerin Jacob perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
172341c219e6SJerin Jacob {
1724d008f20bSPavan Nikhilesh 	int i;
1725d008f20bSPavan Nikhilesh 	struct test_perf *t = evt_test_priv(test);
172641c219e6SJerin Jacob 
1727d008f20bSPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1728d008f20bSPavan Nikhilesh 		for (i = 0; i < opt->nb_timer_adptrs; i++)
1729d008f20bSPavan Nikhilesh 			rte_event_timer_adapter_stop(t->timer_adptr[i]);
1730d008f20bSPavan Nikhilesh 	}
173141c219e6SJerin Jacob 	rte_event_dev_stop(opt->dev_id);
173241c219e6SJerin Jacob 	rte_event_dev_close(opt->dev_id);
173341c219e6SJerin Jacob }
173441c219e6SJerin Jacob 
173541c219e6SJerin Jacob static inline void
173641c219e6SJerin Jacob perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
173741c219e6SJerin Jacob 	    void *obj, unsigned i __rte_unused)
173841c219e6SJerin Jacob {
173941c219e6SJerin Jacob 	memset(obj, 0, mp->elt_size);
174041c219e6SJerin Jacob }
174141c219e6SJerin Jacob 
17423fc8de4fSPavan Nikhilesh #define NB_RX_DESC			128
17433fc8de4fSPavan Nikhilesh #define NB_TX_DESC			512
17443fc8de4fSPavan Nikhilesh int
17453fc8de4fSPavan Nikhilesh perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
17463fc8de4fSPavan Nikhilesh {
17478728ccf3SThomas Monjalon 	uint16_t i;
174877339255SIvan Ilchenko 	int ret;
17493fc8de4fSPavan Nikhilesh 	struct test_perf *t = evt_test_priv(test);
17503fc8de4fSPavan Nikhilesh 	struct rte_eth_conf port_conf = {
17513fc8de4fSPavan Nikhilesh 		.rxmode = {
1752295968d1SFerruh Yigit 			.mq_mode = RTE_ETH_MQ_RX_RSS,
17533fc8de4fSPavan Nikhilesh 		},
17543fc8de4fSPavan Nikhilesh 		.rx_adv_conf = {
17553fc8de4fSPavan Nikhilesh 			.rss_conf = {
17563fc8de4fSPavan Nikhilesh 				.rss_key = NULL,
1757295968d1SFerruh Yigit 				.rss_hf = RTE_ETH_RSS_IP,
17583fc8de4fSPavan Nikhilesh 			},
17593fc8de4fSPavan Nikhilesh 		},
17603fc8de4fSPavan Nikhilesh 	};
17613fc8de4fSPavan Nikhilesh 
1762de2bc16eSShijith Thotton 	if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR)
17633fc8de4fSPavan Nikhilesh 		return 0;
17643fc8de4fSPavan Nikhilesh 
1765d9a42a69SThomas Monjalon 	if (!rte_eth_dev_count_avail()) {
17663fc8de4fSPavan Nikhilesh 		evt_err("No ethernet ports found.");
17673fc8de4fSPavan Nikhilesh 		return -ENODEV;
17683fc8de4fSPavan Nikhilesh 	}
17693fc8de4fSPavan Nikhilesh 
17708728ccf3SThomas Monjalon 	RTE_ETH_FOREACH_DEV(i) {
17714f5701f2SFerruh Yigit 		struct rte_eth_dev_info dev_info;
17724f5701f2SFerruh Yigit 		struct rte_eth_conf local_port_conf = port_conf;
17733fc8de4fSPavan Nikhilesh 
177477339255SIvan Ilchenko 		ret = rte_eth_dev_info_get(i, &dev_info);
177577339255SIvan Ilchenko 		if (ret != 0) {
177677339255SIvan Ilchenko 			evt_err("Error during getting device (port %u) info: %s\n",
177777339255SIvan Ilchenko 					i, strerror(-ret));
177877339255SIvan Ilchenko 			return ret;
177977339255SIvan Ilchenko 		}
17804f5701f2SFerruh Yigit 
17814f5701f2SFerruh Yigit 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
17824f5701f2SFerruh Yigit 			dev_info.flow_type_rss_offloads;
17834f5701f2SFerruh Yigit 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
17844f5701f2SFerruh Yigit 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
17854f5701f2SFerruh Yigit 			evt_info("Port %u modified RSS hash function based on hardware support,"
17864f5701f2SFerruh Yigit 				"requested:%#"PRIx64" configured:%#"PRIx64"\n",
17874f5701f2SFerruh Yigit 				i,
17884f5701f2SFerruh Yigit 				port_conf.rx_adv_conf.rss_conf.rss_hf,
17894f5701f2SFerruh Yigit 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
17904f5701f2SFerruh Yigit 		}
17914f5701f2SFerruh Yigit 
17924f5701f2SFerruh Yigit 		if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
17933fc8de4fSPavan Nikhilesh 			evt_err("Failed to configure eth port [%d]", i);
17943fc8de4fSPavan Nikhilesh 			return -EINVAL;
17953fc8de4fSPavan Nikhilesh 		}
17963fc8de4fSPavan Nikhilesh 
17973fc8de4fSPavan Nikhilesh 		if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
17983fc8de4fSPavan Nikhilesh 				rte_socket_id(), NULL, t->pool) < 0) {
17993fc8de4fSPavan Nikhilesh 			evt_err("Failed to setup eth port [%d] rx_queue: %d.",
18003fc8de4fSPavan Nikhilesh 					i, 0);
18013fc8de4fSPavan Nikhilesh 			return -EINVAL;
18023fc8de4fSPavan Nikhilesh 		}
18033fc8de4fSPavan Nikhilesh 
18043fc8de4fSPavan Nikhilesh 		if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
18053fc8de4fSPavan Nikhilesh 					rte_socket_id(), NULL) < 0) {
18063fc8de4fSPavan Nikhilesh 			evt_err("Failed to setup eth port [%d] tx_queue: %d.",
18073fc8de4fSPavan Nikhilesh 					i, 0);
18083fc8de4fSPavan Nikhilesh 			return -EINVAL;
18093fc8de4fSPavan Nikhilesh 		}
18103fc8de4fSPavan Nikhilesh 
181170e51a0eSIvan Ilchenko 		ret = rte_eth_promiscuous_enable(i);
181270e51a0eSIvan Ilchenko 		if (ret != 0) {
181370e51a0eSIvan Ilchenko 			evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
181470e51a0eSIvan Ilchenko 				i, rte_strerror(-ret));
181570e51a0eSIvan Ilchenko 			return ret;
181670e51a0eSIvan Ilchenko 		}
18173fc8de4fSPavan Nikhilesh 	}
18183fc8de4fSPavan Nikhilesh 
18193fc8de4fSPavan Nikhilesh 	return 0;
18203fc8de4fSPavan Nikhilesh }
18213fc8de4fSPavan Nikhilesh 
1822a734e738SPavan Nikhilesh void
1823a734e738SPavan Nikhilesh perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
18247f3daf34SPavan Nikhilesh {
18258728ccf3SThomas Monjalon 	uint16_t i;
18267f3daf34SPavan Nikhilesh 	RTE_SET_USED(test);
18277f3daf34SPavan Nikhilesh 
18287f3daf34SPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
18298728ccf3SThomas Monjalon 		RTE_ETH_FOREACH_DEV(i) {
18303617aae5SPavan Nikhilesh 			rte_event_eth_rx_adapter_stop(i);
1831a734e738SPavan Nikhilesh 			rte_event_eth_rx_adapter_queue_del(i, i, -1);
1832a734e738SPavan Nikhilesh 			rte_eth_dev_rx_queue_stop(i, 0);
1833a734e738SPavan Nikhilesh 		}
1834a734e738SPavan Nikhilesh 	}
1835a734e738SPavan Nikhilesh }
1836a734e738SPavan Nikhilesh 
1837a734e738SPavan Nikhilesh void
1838a734e738SPavan Nikhilesh perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
1839a734e738SPavan Nikhilesh {
1840a734e738SPavan Nikhilesh 	uint16_t i;
1841a734e738SPavan Nikhilesh 	RTE_SET_USED(test);
1842a734e738SPavan Nikhilesh 
1843a734e738SPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
1844a734e738SPavan Nikhilesh 		RTE_ETH_FOREACH_DEV(i) {
1845a734e738SPavan Nikhilesh 			rte_event_eth_tx_adapter_stop(i);
1846a734e738SPavan Nikhilesh 			rte_event_eth_tx_adapter_queue_del(i, i, -1);
1847a734e738SPavan Nikhilesh 			rte_eth_dev_tx_queue_stop(i, 0);
18487f3daf34SPavan Nikhilesh 			rte_eth_dev_stop(i);
18497f3daf34SPavan Nikhilesh 		}
18507f3daf34SPavan Nikhilesh 	}
18517f3daf34SPavan Nikhilesh }
18527f3daf34SPavan Nikhilesh 
185341c219e6SJerin Jacob int
1854de2bc16eSShijith Thotton perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt)
1855de2bc16eSShijith Thotton {
1856de2bc16eSShijith Thotton 	uint8_t cdev_count, cdev_id, nb_plcores, nb_qps;
1857de2bc16eSShijith Thotton 	struct test_perf *t = evt_test_priv(test);
1858de2bc16eSShijith Thotton 	unsigned int max_session_size;
1859de2bc16eSShijith Thotton 	uint32_t nb_sessions;
1860de2bc16eSShijith Thotton 	int ret;
1861de2bc16eSShijith Thotton 
1862de2bc16eSShijith Thotton 	if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
1863de2bc16eSShijith Thotton 		return 0;
1864de2bc16eSShijith Thotton 
1865de2bc16eSShijith Thotton 	cdev_count = rte_cryptodev_count();
1866de2bc16eSShijith Thotton 	if (cdev_count == 0) {
1867de2bc16eSShijith Thotton 		evt_err("No crypto devices available\n");
1868de2bc16eSShijith Thotton 		return -ENODEV;
1869de2bc16eSShijith Thotton 	}
1870de2bc16eSShijith Thotton 
1871de2bc16eSShijith Thotton 	t->ca_op_pool = rte_crypto_op_pool_create(
18728f5b5495SAkhil Goyal 		"crypto_op_pool", opt->crypto_op_type, opt->pool_sz,
1873750ab9d5SAakash Sasidharan 		128, sizeof(union rte_event_crypto_metadata) + EVT_CRYPTO_MAX_IV_SIZE,
18748f5b5495SAkhil Goyal 		rte_socket_id());
1875de2bc16eSShijith Thotton 	if (t->ca_op_pool == NULL) {
1876de2bc16eSShijith Thotton 		evt_err("Failed to create crypto op pool");
1877de2bc16eSShijith Thotton 		return -ENOMEM;
1878de2bc16eSShijith Thotton 	}
1879de2bc16eSShijith Thotton 
1880de2bc16eSShijith Thotton 	nb_sessions = evt_nr_active_lcores(opt->plcores) * t->nb_flows;
18818f5b5495SAkhil Goyal 	t->ca_asym_sess_pool = rte_cryptodev_asym_session_pool_create(
18828f5b5495SAkhil Goyal 		"ca_asym_sess_pool", nb_sessions, 0,
18838f5b5495SAkhil Goyal 		sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
18848f5b5495SAkhil Goyal 	if (t->ca_asym_sess_pool == NULL) {
18858f5b5495SAkhil Goyal 		evt_err("Failed to create sym session pool");
18868f5b5495SAkhil Goyal 		ret = -ENOMEM;
18878f5b5495SAkhil Goyal 		goto err;
18888f5b5495SAkhil Goyal 	}
18898f5b5495SAkhil Goyal 
1890de2bc16eSShijith Thotton 	max_session_size = 0;
1891de2bc16eSShijith Thotton 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1892de2bc16eSShijith Thotton 		unsigned int session_size;
1893de2bc16eSShijith Thotton 
1894de2bc16eSShijith Thotton 		session_size =
1895de2bc16eSShijith Thotton 			rte_cryptodev_sym_get_private_session_size(cdev_id);
1896de2bc16eSShijith Thotton 		if (session_size > max_session_size)
1897de2bc16eSShijith Thotton 			max_session_size = session_size;
1898de2bc16eSShijith Thotton 	}
1899de2bc16eSShijith Thotton 
1900bdce2564SAkhil Goyal 	t->ca_sess_pool = rte_cryptodev_sym_session_pool_create(
1901bdce2564SAkhil Goyal 		"ca_sess_pool", nb_sessions, max_session_size, 0,
1902bdce2564SAkhil Goyal 		sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
1903bdce2564SAkhil Goyal 	if (t->ca_sess_pool == NULL) {
1904bdce2564SAkhil Goyal 		evt_err("Failed to create sym session pool");
1905de2bc16eSShijith Thotton 		ret = -ENOMEM;
1906de2bc16eSShijith Thotton 		goto err;
1907de2bc16eSShijith Thotton 	}
1908de2bc16eSShijith Thotton 
190969e807dfSVolodymyr Fialko 	if (opt->ena_vector) {
191069e807dfSVolodymyr Fialko 		unsigned int nb_elem = (opt->pool_sz / opt->vector_size) * 2;
191169e807dfSVolodymyr Fialko 		nb_elem = RTE_MAX(512U, nb_elem);
191269e807dfSVolodymyr Fialko 		nb_elem += evt_nr_active_lcores(opt->wlcores) * 32;
191369e807dfSVolodymyr Fialko 		t->ca_vector_pool = rte_event_vector_pool_create("vector_pool", nb_elem, 32,
191469e807dfSVolodymyr Fialko 				opt->vector_size, opt->socket_id);
191569e807dfSVolodymyr Fialko 		if (t->ca_vector_pool == NULL) {
191669e807dfSVolodymyr Fialko 			evt_err("Failed to create event vector pool");
191769e807dfSVolodymyr Fialko 			ret = -ENOMEM;
191869e807dfSVolodymyr Fialko 			goto err;
191969e807dfSVolodymyr Fialko 		}
192069e807dfSVolodymyr Fialko 	}
192169e807dfSVolodymyr Fialko 
1922de2bc16eSShijith Thotton 	/*
1923de2bc16eSShijith Thotton 	 * Calculate number of needed queue pairs, based on the amount of
1924de2bc16eSShijith Thotton 	 * available number of logical cores and crypto devices. For instance,
1925de2bc16eSShijith Thotton 	 * if there are 4 cores and 2 crypto devices, 2 queue pairs will be set
1926de2bc16eSShijith Thotton 	 * up per device.
1927de2bc16eSShijith Thotton 	 */
1928de2bc16eSShijith Thotton 	nb_plcores = evt_nr_active_lcores(opt->plcores);
1929de2bc16eSShijith Thotton 	nb_qps = (nb_plcores % cdev_count) ? (nb_plcores / cdev_count) + 1 :
1930de2bc16eSShijith Thotton 					     nb_plcores / cdev_count;
1931de2bc16eSShijith Thotton 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1932de2bc16eSShijith Thotton 		struct rte_cryptodev_qp_conf qp_conf;
1933de2bc16eSShijith Thotton 		struct rte_cryptodev_config conf;
1934de2bc16eSShijith Thotton 		struct rte_cryptodev_info info;
1935de2bc16eSShijith Thotton 		int qp_id;
1936de2bc16eSShijith Thotton 
1937de2bc16eSShijith Thotton 		rte_cryptodev_info_get(cdev_id, &info);
1938de2bc16eSShijith Thotton 		if (nb_qps > info.max_nb_queue_pairs) {
1939de2bc16eSShijith Thotton 			evt_err("Not enough queue pairs per cryptodev (%u)",
1940de2bc16eSShijith Thotton 				nb_qps);
1941de2bc16eSShijith Thotton 			ret = -EINVAL;
1942de2bc16eSShijith Thotton 			goto err;
1943de2bc16eSShijith Thotton 		}
1944de2bc16eSShijith Thotton 
1945de2bc16eSShijith Thotton 		conf.nb_queue_pairs = nb_qps;
1946de2bc16eSShijith Thotton 		conf.socket_id = SOCKET_ID_ANY;
1947de2bc16eSShijith Thotton 		conf.ff_disable = RTE_CRYPTODEV_FF_SECURITY;
1948de2bc16eSShijith Thotton 
1949de2bc16eSShijith Thotton 		ret = rte_cryptodev_configure(cdev_id, &conf);
1950de2bc16eSShijith Thotton 		if (ret) {
1951de2bc16eSShijith Thotton 			evt_err("Failed to configure cryptodev (%u)", cdev_id);
1952de2bc16eSShijith Thotton 			goto err;
1953de2bc16eSShijith Thotton 		}
1954de2bc16eSShijith Thotton 
1955de2bc16eSShijith Thotton 		qp_conf.nb_descriptors = NB_CRYPTODEV_DESCRIPTORS;
1956de2bc16eSShijith Thotton 		qp_conf.mp_session = t->ca_sess_pool;
1957de2bc16eSShijith Thotton 
1958de2bc16eSShijith Thotton 		for (qp_id = 0; qp_id < conf.nb_queue_pairs; qp_id++) {
1959de2bc16eSShijith Thotton 			ret = rte_cryptodev_queue_pair_setup(
1960de2bc16eSShijith Thotton 				cdev_id, qp_id, &qp_conf,
1961de2bc16eSShijith Thotton 				rte_cryptodev_socket_id(cdev_id));
1962de2bc16eSShijith Thotton 			if (ret) {
1963de2bc16eSShijith Thotton 				evt_err("Failed to setup queue pairs on cryptodev %u\n",
1964de2bc16eSShijith Thotton 					cdev_id);
1965de2bc16eSShijith Thotton 				goto err;
1966de2bc16eSShijith Thotton 			}
1967de2bc16eSShijith Thotton 		}
1968de2bc16eSShijith Thotton 	}
1969de2bc16eSShijith Thotton 
1970de2bc16eSShijith Thotton 	return 0;
1971de2bc16eSShijith Thotton err:
1972de2bc16eSShijith Thotton 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++)
1973de2bc16eSShijith Thotton 		rte_cryptodev_close(cdev_id);
1974de2bc16eSShijith Thotton 
1975de2bc16eSShijith Thotton 	rte_mempool_free(t->ca_op_pool);
1976de2bc16eSShijith Thotton 	rte_mempool_free(t->ca_sess_pool);
19778f5b5495SAkhil Goyal 	rte_mempool_free(t->ca_asym_sess_pool);
197869e807dfSVolodymyr Fialko 	rte_mempool_free(t->ca_vector_pool);
1979de2bc16eSShijith Thotton 
1980de2bc16eSShijith Thotton 	return ret;
1981de2bc16eSShijith Thotton }
1982de2bc16eSShijith Thotton 
1983de2bc16eSShijith Thotton void
1984de2bc16eSShijith Thotton perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt)
1985de2bc16eSShijith Thotton {
1986de2bc16eSShijith Thotton 	uint8_t cdev_id, cdev_count = rte_cryptodev_count();
1987de2bc16eSShijith Thotton 	struct test_perf *t = evt_test_priv(test);
1988de2bc16eSShijith Thotton 	uint16_t port;
1989de2bc16eSShijith Thotton 
1990de2bc16eSShijith Thotton 	if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
1991de2bc16eSShijith Thotton 		return;
1992de2bc16eSShijith Thotton 
1993de2bc16eSShijith Thotton 	for (port = t->nb_workers; port < perf_nb_event_ports(opt); port++) {
19942a440d6aSAkhil Goyal 		void *sess;
1995de2bc16eSShijith Thotton 		struct prod_data *p = &t->prod[port];
1996de2bc16eSShijith Thotton 		uint32_t flow_id;
1997de2bc16eSShijith Thotton 		uint8_t cdev_id;
1998de2bc16eSShijith Thotton 
1999de2bc16eSShijith Thotton 		for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
2000de2bc16eSShijith Thotton 			sess = p->ca.crypto_sess[flow_id];
2001de2bc16eSShijith Thotton 			cdev_id = p->ca.cdev_id;
2002bdce2564SAkhil Goyal 			rte_cryptodev_sym_session_free(cdev_id, sess);
2003de2bc16eSShijith Thotton 		}
2004de2bc16eSShijith Thotton 
2005de2bc16eSShijith Thotton 		rte_event_crypto_adapter_queue_pair_del(
2006de2bc16eSShijith Thotton 			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id);
2007de2bc16eSShijith Thotton 	}
2008de2bc16eSShijith Thotton 
2009de2bc16eSShijith Thotton 	rte_event_crypto_adapter_free(TEST_PERF_CA_ID);
2010de2bc16eSShijith Thotton 
2011de2bc16eSShijith Thotton 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
2012de2bc16eSShijith Thotton 		rte_cryptodev_stop(cdev_id);
2013de2bc16eSShijith Thotton 		rte_cryptodev_close(cdev_id);
2014de2bc16eSShijith Thotton 	}
2015de2bc16eSShijith Thotton 
2016de2bc16eSShijith Thotton 	rte_mempool_free(t->ca_op_pool);
2017de2bc16eSShijith Thotton 	rte_mempool_free(t->ca_sess_pool);
20188f5b5495SAkhil Goyal 	rte_mempool_free(t->ca_asym_sess_pool);
201969e807dfSVolodymyr Fialko 	rte_mempool_free(t->ca_vector_pool);
2020de2bc16eSShijith Thotton }
2021de2bc16eSShijith Thotton 
2022de2bc16eSShijith Thotton int
2023b25a66c4SAmit Prakash Shukla perf_dmadev_setup(struct evt_test *test, struct evt_options *opt)
2024b25a66c4SAmit Prakash Shukla {
2025b25a66c4SAmit Prakash Shukla 	const struct rte_dma_conf conf = { .nb_vchans = 1};
2026b25a66c4SAmit Prakash Shukla 	const struct rte_dma_vchan_conf qconf = {
2027b25a66c4SAmit Prakash Shukla 			.direction = RTE_DMA_DIR_MEM_TO_MEM,
2028b25a66c4SAmit Prakash Shukla 			.nb_desc = 1024,
2029b25a66c4SAmit Prakash Shukla 	};
2030b25a66c4SAmit Prakash Shukla 	uint8_t dma_dev_count, dma_dev_id = 0;
2031b25a66c4SAmit Prakash Shukla 	int vchan_id;
2032b25a66c4SAmit Prakash Shukla 	int ret;
2033b25a66c4SAmit Prakash Shukla 
2034*bca734c2SPavan Nikhilesh 	RTE_SET_USED(test);
2035b25a66c4SAmit Prakash Shukla 	if (opt->prod_type != EVT_PROD_TYPE_EVENT_DMA_ADPTR)
2036b25a66c4SAmit Prakash Shukla 		return 0;
2037b25a66c4SAmit Prakash Shukla 
2038b25a66c4SAmit Prakash Shukla 	dma_dev_count = rte_dma_count_avail();
2039b25a66c4SAmit Prakash Shukla 	if (dma_dev_count == 0) {
2040b25a66c4SAmit Prakash Shukla 		evt_err("No dma devices available\n");
2041b25a66c4SAmit Prakash Shukla 		return -ENODEV;
2042b25a66c4SAmit Prakash Shukla 	}
2043b25a66c4SAmit Prakash Shukla 
2044b25a66c4SAmit Prakash Shukla 	ret = rte_dma_configure(dma_dev_id, &conf);
2045b25a66c4SAmit Prakash Shukla 	if (ret) {
2046b25a66c4SAmit Prakash Shukla 		evt_err("Failed to configure dma dev (%u)", dma_dev_id);
2047b25a66c4SAmit Prakash Shukla 		goto err;
2048b25a66c4SAmit Prakash Shukla 	}
2049b25a66c4SAmit Prakash Shukla 
2050b25a66c4SAmit Prakash Shukla 	for (vchan_id = 0; vchan_id < conf.nb_vchans; vchan_id++) {
2051b25a66c4SAmit Prakash Shukla 		ret = rte_dma_vchan_setup(dma_dev_id, vchan_id, &qconf);
2052b25a66c4SAmit Prakash Shukla 		if (ret) {
2053b25a66c4SAmit Prakash Shukla 			evt_err("Failed to setup vchan on dma dev %u\n",
2054b25a66c4SAmit Prakash Shukla 				dma_dev_id);
2055b25a66c4SAmit Prakash Shukla 			goto err;
2056b25a66c4SAmit Prakash Shukla 		}
2057b25a66c4SAmit Prakash Shukla 	}
2058b25a66c4SAmit Prakash Shukla 
2059b25a66c4SAmit Prakash Shukla 	return 0;
2060b25a66c4SAmit Prakash Shukla err:
2061b25a66c4SAmit Prakash Shukla 	rte_dma_close(dma_dev_id);
2062b25a66c4SAmit Prakash Shukla 
2063b25a66c4SAmit Prakash Shukla 	return ret;
2064b25a66c4SAmit Prakash Shukla }
2065b25a66c4SAmit Prakash Shukla 
2066b25a66c4SAmit Prakash Shukla void
2067b25a66c4SAmit Prakash Shukla perf_dmadev_destroy(struct evt_test *test, struct evt_options *opt)
2068b25a66c4SAmit Prakash Shukla {
2069b25a66c4SAmit Prakash Shukla 	uint8_t dma_dev_id = 0;
2070b25a66c4SAmit Prakash Shukla 	struct test_perf *t = evt_test_priv(test);
2071b25a66c4SAmit Prakash Shukla 	uint16_t port;
2072b25a66c4SAmit Prakash Shukla 
2073b25a66c4SAmit Prakash Shukla 	if (opt->prod_type != EVT_PROD_TYPE_EVENT_DMA_ADPTR)
2074b25a66c4SAmit Prakash Shukla 		return;
2075b25a66c4SAmit Prakash Shukla 
2076b25a66c4SAmit Prakash Shukla 	for (port = t->nb_workers; port < perf_nb_event_ports(opt); port++) {
2077b25a66c4SAmit Prakash Shukla 		struct prod_data *p = &t->prod[port];
2078b25a66c4SAmit Prakash Shukla 
2079b25a66c4SAmit Prakash Shukla 		rte_event_dma_adapter_vchan_del(TEST_PERF_DA_ID, p->da.dma_dev_id, p->da.vchan_id);
2080b25a66c4SAmit Prakash Shukla 	}
2081b25a66c4SAmit Prakash Shukla 
2082b25a66c4SAmit Prakash Shukla 	rte_event_dma_adapter_free(TEST_PERF_DA_ID);
2083b25a66c4SAmit Prakash Shukla 
2084b25a66c4SAmit Prakash Shukla 	rte_dma_stop(dma_dev_id);
2085b25a66c4SAmit Prakash Shukla 	rte_dma_close(dma_dev_id);
2086b25a66c4SAmit Prakash Shukla }
2087b25a66c4SAmit Prakash Shukla 
2088b25a66c4SAmit Prakash Shukla int
208941c219e6SJerin Jacob perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
209041c219e6SJerin Jacob {
209141c219e6SJerin Jacob 	struct test_perf *t = evt_test_priv(test);
2092211b2e2aSPavan Nikhilesh 	unsigned int cache_sz;
209341c219e6SJerin Jacob 
2094211b2e2aSPavan Nikhilesh 	cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, (opt->pool_sz / 1.5) / t->nb_workers);
2095d008f20bSPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
2096d008f20bSPavan Nikhilesh 			opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
209741c219e6SJerin Jacob 		t->pool = rte_mempool_create(test->name, /* mempool name */
209841c219e6SJerin Jacob 				opt->pool_sz, /* number of elements*/
209941c219e6SJerin Jacob 				sizeof(struct perf_elt), /* element size*/
2100211b2e2aSPavan Nikhilesh 				cache_sz, /* cache size*/
210141c219e6SJerin Jacob 				0, NULL, NULL,
210241c219e6SJerin Jacob 				perf_elt_init, /* obj constructor */
210341c219e6SJerin Jacob 				NULL, opt->socket_id, 0); /* flags */
21046776a581SVolodymyr Fialko 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR &&
21056776a581SVolodymyr Fialko 		   opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
21066776a581SVolodymyr Fialko 		t->pool = rte_mempool_create(test->name, /* mempool name */
21076776a581SVolodymyr Fialko 				opt->pool_sz, /* number of elements*/
21086776a581SVolodymyr Fialko 				sizeof(struct perf_elt) + modex_test_case.result_len,
21096776a581SVolodymyr Fialko 				/* element size*/
2110211b2e2aSPavan Nikhilesh 				cache_sz, /* cache size*/
21116776a581SVolodymyr Fialko 				0, NULL, NULL,
21126776a581SVolodymyr Fialko 				NULL, /* obj constructor */
21136776a581SVolodymyr Fialko 				NULL, opt->socket_id, 0); /* flags */
2114*bca734c2SPavan Nikhilesh 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
2115*bca734c2SPavan Nikhilesh 		t->pool = rte_mempool_create(test->name,   /* mempool name */
2116*bca734c2SPavan Nikhilesh 					     opt->pool_sz, /* number of elements*/
2117*bca734c2SPavan Nikhilesh 					     sizeof(struct rte_event_dma_adapter_op) +
2118*bca734c2SPavan Nikhilesh 						     (sizeof(struct rte_dma_sge) * 2),
2119*bca734c2SPavan Nikhilesh 					     cache_sz,		       /* cache size*/
2120*bca734c2SPavan Nikhilesh 					     0, NULL, NULL, NULL,      /* obj constructor */
2121*bca734c2SPavan Nikhilesh 					     NULL, opt->socket_id, 0); /* flags */
21228577cc1aSPavan Nikhilesh 	} else {
21238577cc1aSPavan Nikhilesh 		t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
21248577cc1aSPavan Nikhilesh 				opt->pool_sz, /* number of elements*/
2125211b2e2aSPavan Nikhilesh 				cache_sz, /* cache size*/
21268577cc1aSPavan Nikhilesh 				0,
21278577cc1aSPavan Nikhilesh 				RTE_MBUF_DEFAULT_BUF_SIZE,
21288577cc1aSPavan Nikhilesh 				opt->socket_id); /* flags */
21298577cc1aSPavan Nikhilesh 	}
21308577cc1aSPavan Nikhilesh 
213141c219e6SJerin Jacob 	if (t->pool == NULL) {
213241c219e6SJerin Jacob 		evt_err("failed to create mempool");
213341c219e6SJerin Jacob 		return -ENOMEM;
213441c219e6SJerin Jacob 	}
213541c219e6SJerin Jacob 
213641c219e6SJerin Jacob 	return 0;
213741c219e6SJerin Jacob }
213841c219e6SJerin Jacob 
213941c219e6SJerin Jacob void
214041c219e6SJerin Jacob perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
214141c219e6SJerin Jacob {
214241c219e6SJerin Jacob 	RTE_SET_USED(opt);
214341c219e6SJerin Jacob 	struct test_perf *t = evt_test_priv(test);
214441c219e6SJerin Jacob 
214541c219e6SJerin Jacob 	rte_mempool_free(t->pool);
214641c219e6SJerin Jacob }
2147ffbae86fSJerin Jacob 
2148ffbae86fSJerin Jacob int
2149ffbae86fSJerin Jacob perf_test_setup(struct evt_test *test, struct evt_options *opt)
2150ffbae86fSJerin Jacob {
2151ffbae86fSJerin Jacob 	void *test_perf;
2152ffbae86fSJerin Jacob 
2153ffbae86fSJerin Jacob 	test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
2154ffbae86fSJerin Jacob 				RTE_CACHE_LINE_SIZE, opt->socket_id);
2155ffbae86fSJerin Jacob 	if (test_perf  == NULL) {
2156ffbae86fSJerin Jacob 		evt_err("failed to allocate test_perf memory");
2157ffbae86fSJerin Jacob 		goto nomem;
2158ffbae86fSJerin Jacob 	}
2159ffbae86fSJerin Jacob 	test->test_priv = test_perf;
2160ffbae86fSJerin Jacob 
2161ffbae86fSJerin Jacob 	struct test_perf *t = evt_test_priv(test);
2162ffbae86fSJerin Jacob 
2163d008f20bSPavan Nikhilesh 	if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
2164d008f20bSPavan Nikhilesh 		t->outstand_pkts = opt->nb_timers *
2165d008f20bSPavan Nikhilesh 			evt_nr_active_lcores(opt->plcores);
2166d008f20bSPavan Nikhilesh 		t->nb_pkts = opt->nb_timers;
2167d008f20bSPavan Nikhilesh 	} else {
2168d008f20bSPavan Nikhilesh 		t->outstand_pkts = opt->nb_pkts *
2169d008f20bSPavan Nikhilesh 			evt_nr_active_lcores(opt->plcores);
2170d008f20bSPavan Nikhilesh 		t->nb_pkts = opt->nb_pkts;
2171d008f20bSPavan Nikhilesh 	}
2172d008f20bSPavan Nikhilesh 
2173ffbae86fSJerin Jacob 	t->nb_workers = evt_nr_active_lcores(opt->wlcores);
2174ffbae86fSJerin Jacob 	t->done = false;
2175ffbae86fSJerin Jacob 	t->nb_flows = opt->nb_flows;
2176ffbae86fSJerin Jacob 	t->result = EVT_TEST_FAILED;
2177ffbae86fSJerin Jacob 	t->opt = opt;
2178ffbae86fSJerin Jacob 	memcpy(t->sched_type_list, opt->sched_type_list,
2179ffbae86fSJerin Jacob 			sizeof(opt->sched_type_list));
2180ffbae86fSJerin Jacob 	return 0;
2181ffbae86fSJerin Jacob nomem:
2182ffbae86fSJerin Jacob 	return -ENOMEM;
2183ffbae86fSJerin Jacob }
2184ffbae86fSJerin Jacob 
2185ffbae86fSJerin Jacob void
2186ffbae86fSJerin Jacob perf_test_destroy(struct evt_test *test, struct evt_options *opt)
2187ffbae86fSJerin Jacob {
2188ffbae86fSJerin Jacob 	RTE_SET_USED(opt);
2189ffbae86fSJerin Jacob 
2190ffbae86fSJerin Jacob 	rte_free(test->test_priv);
2191ffbae86fSJerin Jacob }
2192