xref: /dpdk/app/test-eventdev/test_perf_common.c (revision bb27182482d61777de6a38b16a1d2c692c2c3f8b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <math.h>
6 
7 #include "test_perf_common.h"
8 
9 #define NB_CRYPTODEV_DESCRIPTORS 1024
10 #define DATA_SIZE		512
11 #define IV_OFFSET (sizeof(struct rte_crypto_op) + \
12 		   sizeof(struct rte_crypto_sym_op) + \
13 		   sizeof(union rte_event_crypto_metadata))
14 
15 struct modex_test_data {
16 	enum rte_crypto_asym_xform_type xform_type;
17 	struct {
18 		uint8_t data[DATA_SIZE];
19 		uint16_t len;
20 	} base;
21 	struct {
22 		uint8_t data[DATA_SIZE];
23 		uint16_t len;
24 	} exponent;
25 	struct {
26 		uint8_t data[DATA_SIZE];
27 		uint16_t len;
28 	} modulus;
29 	struct {
30 		uint8_t data[DATA_SIZE];
31 		uint16_t len;
32 	} reminder;
33 	uint16_t result_len;
34 };
35 
36 static struct
37 modex_test_data modex_test_case = {
38 	.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
39 	.base = {
40 		.data = {
41 			0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85,
42 			0xAE, 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD,
43 			0xA8, 0xEB, 0x7E, 0x78, 0xA0, 0x50
44 		},
45 		.len = 20,
46 	},
47 	.exponent = {
48 		.data = {
49 			0x01, 0x00, 0x01
50 		},
51 		.len = 3,
52 	},
53 	.reminder = {
54 		.data = {
55 			0x2C, 0x60, 0x75, 0x45, 0x98, 0x9D, 0xE0, 0x72,
56 			0xA0, 0x9D, 0x3A, 0x9E, 0x03, 0x38, 0x73, 0x3C,
57 			0x31, 0x83, 0x04, 0xFE, 0x75, 0x43, 0xE6, 0x17,
58 			0x5C, 0x01, 0x29, 0x51, 0x69, 0x33, 0x62, 0x2D,
59 			0x78, 0xBE, 0xAE, 0xC4, 0xBC, 0xDE, 0x7E, 0x2C,
60 			0x77, 0x84, 0xF2, 0xC5, 0x14, 0xB5, 0x2F, 0xF7,
61 			0xC5, 0x94, 0xEF, 0x86, 0x75, 0x75, 0xB5, 0x11,
62 			0xE5, 0x0E, 0x0A, 0x29, 0x76, 0xE2, 0xEA, 0x32,
63 			0x0E, 0x43, 0x77, 0x7E, 0x2C, 0x27, 0xAC, 0x3B,
64 			0x86, 0xA5, 0xDB, 0xC9, 0x48, 0x40, 0xE8, 0x99,
65 			0x9A, 0x0A, 0x3D, 0xD6, 0x74, 0xFA, 0x2E, 0x2E,
66 			0x5B, 0xAF, 0x8C, 0x99, 0x44, 0x2A, 0x67, 0x38,
67 			0x27, 0x41, 0x59, 0x9D, 0xB8, 0x51, 0xC9, 0xF7,
68 			0x43, 0x61, 0x31, 0x6E, 0xF1, 0x25, 0x38, 0x7F,
69 			0xAE, 0xC6, 0xD0, 0xBB, 0x29, 0x76, 0x3F, 0x46,
70 			0x2E, 0x1B, 0xE4, 0x67, 0x71, 0xE3, 0x87, 0x5A
71 		},
72 		.len = 128,
73 	},
74 	.modulus = {
75 		.data = {
76 			0xb3, 0xa1, 0xaf, 0xb7, 0x13, 0x08, 0x00, 0x0a,
77 			0x35, 0xdc, 0x2b, 0x20, 0x8d, 0xa1, 0xb5, 0xce,
78 			0x47, 0x8a, 0xc3, 0x80, 0xf4, 0x7d, 0x4a, 0xa2,
79 			0x62, 0xfd, 0x61, 0x7f, 0xb5, 0xa8, 0xde, 0x0a,
80 			0x17, 0x97, 0xa0, 0xbf, 0xdf, 0x56, 0x5a, 0x3d,
81 			0x51, 0x56, 0x4f, 0x70, 0x70, 0x3f, 0x63, 0x6a,
82 			0x44, 0x5b, 0xad, 0x84, 0x0d, 0x3f, 0x27, 0x6e,
83 			0x3b, 0x34, 0x91, 0x60, 0x14, 0xb9, 0xaa, 0x72,
84 			0xfd, 0xa3, 0x64, 0xd2, 0x03, 0xa7, 0x53, 0x87,
85 			0x9e, 0x88, 0x0b, 0xc1, 0x14, 0x93, 0x1a, 0x62,
86 			0xff, 0xb1, 0x5d, 0x74, 0xcd, 0x59, 0x63, 0x18,
87 			0x11, 0x3d, 0x4f, 0xba, 0x75, 0xd4, 0x33, 0x4e,
88 			0x23, 0x6b, 0x7b, 0x57, 0x44, 0xe1, 0xd3, 0x03,
89 			0x13, 0xa6, 0xf0, 0x8b, 0x60, 0xb0, 0x9e, 0xee,
90 			0x75, 0x08, 0x9d, 0x71, 0x63, 0x13, 0xcb, 0xa6,
91 			0x81, 0x92, 0x14, 0x03, 0x22, 0x2d, 0xde, 0x55
92 		},
93 		.len = 128,
94 	},
95 	.result_len = 128,
96 };
97 
98 int
99 perf_test_result(struct evt_test *test, struct evt_options *opt)
100 {
101 	RTE_SET_USED(opt);
102 	int i;
103 	uint64_t total = 0;
104 	struct test_perf *t = evt_test_priv(test);
105 
106 	printf("Packet distribution across worker cores :\n");
107 	for (i = 0; i < t->nb_workers; i++)
108 		total += t->worker[i].processed_pkts;
109 	for (i = 0; i < t->nb_workers; i++)
110 		printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
111 				CLGRN" %3.2f"CLNRM"\n", i,
112 				t->worker[i].processed_pkts,
113 				(((double)t->worker[i].processed_pkts)/total)
114 				* 100);
115 
116 	return t->result;
117 }
118 
119 static inline int
120 perf_producer(void *arg)
121 {
122 	int i;
123 	struct prod_data *p  = arg;
124 	struct test_perf *t = p->t;
125 	struct evt_options *opt = t->opt;
126 	const uint8_t dev_id = p->dev_id;
127 	const uint8_t port = p->port_id;
128 	struct rte_mempool *pool = t->pool;
129 	const uint64_t nb_pkts = t->nb_pkts;
130 	const uint32_t nb_flows = t->nb_flows;
131 	uint32_t flow_counter = 0;
132 	uint64_t count = 0;
133 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
134 	struct rte_event ev;
135 
136 	if (opt->verbose_level > 1)
137 		printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
138 				rte_lcore_id(), dev_id, port, p->queue_id);
139 
140 	ev.event = 0;
141 	ev.op = RTE_EVENT_OP_NEW;
142 	ev.queue_id = p->queue_id;
143 	ev.sched_type = t->opt->sched_type_list[0];
144 	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
145 	ev.event_type =  RTE_EVENT_TYPE_CPU;
146 	ev.sub_event_type = 0; /* stage 0 */
147 
148 	while (count < nb_pkts && t->done == false) {
149 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
150 			continue;
151 		for (i = 0; i < BURST_SIZE; i++) {
152 			ev.flow_id = flow_counter++ % nb_flows;
153 			ev.event_ptr = m[i];
154 			m[i]->timestamp = rte_get_timer_cycles();
155 			while (rte_event_enqueue_burst(dev_id,
156 						       port, &ev, 1) != 1) {
157 				if (t->done)
158 					break;
159 				rte_pause();
160 				m[i]->timestamp = rte_get_timer_cycles();
161 			}
162 		}
163 		count += BURST_SIZE;
164 	}
165 
166 	return 0;
167 }
168 
169 static inline int
170 perf_producer_burst(void *arg)
171 {
172 	uint32_t i;
173 	uint64_t timestamp;
174 	struct rte_event_dev_info dev_info;
175 	struct prod_data *p  = arg;
176 	struct test_perf *t = p->t;
177 	struct evt_options *opt = t->opt;
178 	const uint8_t dev_id = p->dev_id;
179 	const uint8_t port = p->port_id;
180 	struct rte_mempool *pool = t->pool;
181 	const uint64_t nb_pkts = t->nb_pkts;
182 	const uint32_t nb_flows = t->nb_flows;
183 	uint32_t flow_counter = 0;
184 	uint16_t enq = 0;
185 	uint64_t count = 0;
186 	struct perf_elt *m[MAX_PROD_ENQ_BURST_SIZE + 1];
187 	struct rte_event ev[MAX_PROD_ENQ_BURST_SIZE + 1];
188 	uint32_t burst_size = opt->prod_enq_burst_sz;
189 
190 	memset(m, 0, sizeof(*m) * (MAX_PROD_ENQ_BURST_SIZE + 1));
191 	rte_event_dev_info_get(dev_id, &dev_info);
192 	if (dev_info.max_event_port_enqueue_depth < burst_size)
193 		burst_size = dev_info.max_event_port_enqueue_depth;
194 
195 	if (opt->verbose_level > 1)
196 		printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
197 				rte_lcore_id(), dev_id, port, p->queue_id);
198 
199 	for (i = 0; i < burst_size; i++) {
200 		ev[i].op = RTE_EVENT_OP_NEW;
201 		ev[i].queue_id = p->queue_id;
202 		ev[i].sched_type = t->opt->sched_type_list[0];
203 		ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
204 		ev[i].event_type =  RTE_EVENT_TYPE_CPU;
205 		ev[i].sub_event_type = 0; /* stage 0 */
206 	}
207 
208 	while (count < nb_pkts && t->done == false) {
209 		if (rte_mempool_get_bulk(pool, (void **)m, burst_size) < 0)
210 			continue;
211 		timestamp = rte_get_timer_cycles();
212 		for (i = 0; i < burst_size; i++) {
213 			ev[i].flow_id = flow_counter++ % nb_flows;
214 			ev[i].event_ptr = m[i];
215 			m[i]->timestamp = timestamp;
216 		}
217 		enq = rte_event_enqueue_burst(dev_id, port, ev, burst_size);
218 		while (enq < burst_size) {
219 			enq += rte_event_enqueue_burst(dev_id, port,
220 							ev + enq,
221 							burst_size - enq);
222 			if (t->done)
223 				break;
224 			rte_pause();
225 			timestamp = rte_get_timer_cycles();
226 			for (i = enq; i < burst_size; i++)
227 				m[i]->timestamp = timestamp;
228 		}
229 		count += burst_size;
230 	}
231 	return 0;
232 }
233 
234 static inline int
235 perf_event_timer_producer(void *arg)
236 {
237 	int i;
238 	struct prod_data *p  = arg;
239 	struct test_perf *t = p->t;
240 	struct evt_options *opt = t->opt;
241 	uint32_t flow_counter = 0;
242 	uint64_t count = 0;
243 	uint64_t arm_latency = 0;
244 	const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
245 	const uint32_t nb_flows = t->nb_flows;
246 	const uint64_t nb_timers = opt->nb_timers;
247 	struct rte_mempool *pool = t->pool;
248 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
249 	struct rte_event_timer_adapter **adptr = t->timer_adptr;
250 	struct rte_event_timer tim;
251 	uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
252 
253 	memset(&tim, 0, sizeof(struct rte_event_timer));
254 	timeout_ticks =
255 		opt->optm_timer_tick_nsec
256 			? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
257 			       opt->optm_timer_tick_nsec)
258 			: timeout_ticks;
259 	timeout_ticks += timeout_ticks ? 0 : 1;
260 	tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
261 	tim.ev.op = RTE_EVENT_OP_NEW;
262 	tim.ev.sched_type = t->opt->sched_type_list[0];
263 	tim.ev.queue_id = p->queue_id;
264 	tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
265 	tim.state = RTE_EVENT_TIMER_NOT_ARMED;
266 	tim.timeout_ticks = timeout_ticks;
267 
268 	if (opt->verbose_level > 1)
269 		printf("%s(): lcore %d\n", __func__, rte_lcore_id());
270 
271 	while (count < nb_timers && t->done == false) {
272 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
273 			continue;
274 		for (i = 0; i < BURST_SIZE; i++) {
275 			rte_prefetch0(m[i + 1]);
276 			m[i]->tim = tim;
277 			m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
278 			m[i]->tim.ev.event_ptr = m[i];
279 			m[i]->timestamp = rte_get_timer_cycles();
280 			while (rte_event_timer_arm_burst(
281 			       adptr[flow_counter % nb_timer_adptrs],
282 			       (struct rte_event_timer **)&m[i], 1) != 1) {
283 				if (t->done)
284 					break;
285 				m[i]->timestamp = rte_get_timer_cycles();
286 			}
287 			arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
288 		}
289 		count += BURST_SIZE;
290 	}
291 	fflush(stdout);
292 	rte_delay_ms(1000);
293 	printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
294 			__func__, rte_lcore_id(),
295 			count ? (float)(arm_latency / count) /
296 			(rte_get_timer_hz() / 1000000) : 0);
297 	return 0;
298 }
299 
300 static inline int
301 perf_event_timer_producer_burst(void *arg)
302 {
303 	int i;
304 	struct prod_data *p  = arg;
305 	struct test_perf *t = p->t;
306 	struct evt_options *opt = t->opt;
307 	uint32_t flow_counter = 0;
308 	uint64_t count = 0;
309 	uint64_t arm_latency = 0;
310 	const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
311 	const uint32_t nb_flows = t->nb_flows;
312 	const uint64_t nb_timers = opt->nb_timers;
313 	struct rte_mempool *pool = t->pool;
314 	struct perf_elt *m[BURST_SIZE + 1] = {NULL};
315 	struct rte_event_timer_adapter **adptr = t->timer_adptr;
316 	struct rte_event_timer tim;
317 	uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
318 
319 	memset(&tim, 0, sizeof(struct rte_event_timer));
320 	timeout_ticks =
321 		opt->optm_timer_tick_nsec
322 			? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
323 			       opt->optm_timer_tick_nsec)
324 			: timeout_ticks;
325 	timeout_ticks += timeout_ticks ? 0 : 1;
326 	tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
327 	tim.ev.op = RTE_EVENT_OP_NEW;
328 	tim.ev.sched_type = t->opt->sched_type_list[0];
329 	tim.ev.queue_id = p->queue_id;
330 	tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
331 	tim.state = RTE_EVENT_TIMER_NOT_ARMED;
332 	tim.timeout_ticks = timeout_ticks;
333 
334 	if (opt->verbose_level > 1)
335 		printf("%s(): lcore %d\n", __func__, rte_lcore_id());
336 
337 	while (count < nb_timers && t->done == false) {
338 		if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
339 			continue;
340 		for (i = 0; i < BURST_SIZE; i++) {
341 			rte_prefetch0(m[i + 1]);
342 			m[i]->tim = tim;
343 			m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
344 			m[i]->tim.ev.event_ptr = m[i];
345 			m[i]->timestamp = rte_get_timer_cycles();
346 		}
347 		rte_event_timer_arm_tmo_tick_burst(
348 				adptr[flow_counter % nb_timer_adptrs],
349 				(struct rte_event_timer **)m,
350 				tim.timeout_ticks,
351 				BURST_SIZE);
352 		arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
353 		count += BURST_SIZE;
354 	}
355 	fflush(stdout);
356 	rte_delay_ms(1000);
357 	printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
358 			__func__, rte_lcore_id(),
359 			count ? (float)(arm_latency / count) /
360 			(rte_get_timer_hz() / 1000000) : 0);
361 	return 0;
362 }
363 
364 static inline void
365 crypto_adapter_enq_op_new(struct prod_data *p)
366 {
367 	struct test_perf *t = p->t;
368 	const uint32_t nb_flows = t->nb_flows;
369 	const uint64_t nb_pkts = t->nb_pkts;
370 	struct rte_mempool *pool = t->pool;
371 	uint16_t data_length, data_offset;
372 	struct evt_options *opt = t->opt;
373 	uint16_t qp_id = p->ca.cdev_qp_id;
374 	uint8_t cdev_id = p->ca.cdev_id;
375 	uint64_t alloc_failures = 0;
376 	uint32_t flow_counter = 0;
377 	struct rte_crypto_op *op;
378 	uint16_t len, offset;
379 	struct rte_mbuf *m;
380 	uint64_t count = 0;
381 
382 	if (opt->verbose_level > 1)
383 		printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
384 		       __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
385 		       p->ca.cdev_qp_id);
386 
387 	offset = sizeof(struct perf_elt);
388 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
389 
390 	if (opt->crypto_cipher_bit_mode) {
391 		data_offset = offset << 3;
392 		data_length = (len - offset) << 3;
393 	} else {
394 		data_offset = offset;
395 		data_length = len - offset;
396 	}
397 
398 	while (count < nb_pkts && t->done == false) {
399 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
400 			struct rte_crypto_sym_op *sym_op;
401 
402 			op = rte_crypto_op_alloc(t->ca_op_pool,
403 					 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
404 			if (unlikely(op == NULL)) {
405 				alloc_failures++;
406 				continue;
407 			}
408 
409 			m = rte_pktmbuf_alloc(pool);
410 			if (unlikely(m == NULL)) {
411 				alloc_failures++;
412 				rte_crypto_op_free(op);
413 				continue;
414 			}
415 
416 			rte_pktmbuf_append(m, len);
417 			sym_op = op->sym;
418 			sym_op->m_src = m;
419 
420 			sym_op->cipher.data.offset = data_offset;
421 			sym_op->cipher.data.length = data_length;
422 
423 			rte_crypto_op_attach_sym_session(
424 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
425 		} else {
426 			struct rte_crypto_asym_op *asym_op;
427 			uint8_t *result;
428 
429 			if (rte_mempool_get(pool, (void **)&result)) {
430 				alloc_failures++;
431 				continue;
432 			}
433 
434 			op = rte_crypto_op_alloc(t->ca_op_pool,
435 					 RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
436 			if (unlikely(op == NULL)) {
437 				alloc_failures++;
438 				rte_mempool_put(pool, result);
439 				continue;
440 			}
441 
442 			asym_op = op->asym;
443 			asym_op->modex.base.data = modex_test_case.base.data;
444 			asym_op->modex.base.length = modex_test_case.base.len;
445 			asym_op->modex.result.data = result;
446 			asym_op->modex.result.length = modex_test_case.result_len;
447 			rte_crypto_op_attach_asym_session(
448 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
449 		}
450 		while (rte_cryptodev_enqueue_burst(cdev_id, qp_id, &op, 1) != 1 &&
451 				t->done == false)
452 			rte_pause();
453 
454 		count++;
455 	}
456 
457 	if (opt->verbose_level > 1 && alloc_failures)
458 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
459 		       __func__, rte_lcore_id(), alloc_failures);
460 }
461 
462 static inline void
463 crypto_adapter_enq_op_fwd(struct prod_data *p)
464 {
465 	const uint8_t dev_id = p->dev_id;
466 	const uint8_t port = p->port_id;
467 	struct test_perf *t = p->t;
468 	const uint32_t nb_flows = t->nb_flows;
469 	const uint64_t nb_pkts = t->nb_pkts;
470 	struct rte_mempool *pool = t->pool;
471 	struct evt_options *opt = t->opt;
472 	uint64_t alloc_failures = 0;
473 	uint32_t flow_counter = 0;
474 	struct rte_crypto_op *op;
475 	uint16_t len, offset;
476 	struct rte_event ev;
477 	struct rte_mbuf *m;
478 	uint64_t count = 0;
479 
480 	if (opt->verbose_level > 1)
481 		printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
482 		       __func__, rte_lcore_id(), port, p->queue_id,
483 		       p->ca.cdev_id, p->ca.cdev_qp_id);
484 
485 	ev.event = 0;
486 	ev.op = RTE_EVENT_OP_NEW;
487 	ev.queue_id = p->queue_id;
488 	ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
489 	ev.event_type = RTE_EVENT_TYPE_CPU;
490 
491 	offset = sizeof(struct perf_elt);
492 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
493 
494 	while (count < nb_pkts && t->done == false) {
495 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
496 			struct rte_crypto_sym_op *sym_op;
497 
498 			op = rte_crypto_op_alloc(t->ca_op_pool,
499 					 RTE_CRYPTO_OP_TYPE_SYMMETRIC);
500 			if (unlikely(op == NULL)) {
501 				alloc_failures++;
502 				continue;
503 			}
504 
505 			m = rte_pktmbuf_alloc(pool);
506 			if (unlikely(m == NULL)) {
507 				alloc_failures++;
508 				rte_crypto_op_free(op);
509 				continue;
510 			}
511 
512 			rte_pktmbuf_append(m, len);
513 			sym_op = op->sym;
514 			sym_op->m_src = m;
515 			sym_op->cipher.data.offset = offset;
516 			sym_op->cipher.data.length = len - offset;
517 			rte_crypto_op_attach_sym_session(
518 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
519 		} else {
520 			struct rte_crypto_asym_op *asym_op;
521 			uint8_t *result;
522 
523 			if (rte_mempool_get(pool, (void **)&result)) {
524 				alloc_failures++;
525 				continue;
526 			}
527 
528 			op = rte_crypto_op_alloc(t->ca_op_pool,
529 					 RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
530 			if (unlikely(op == NULL)) {
531 				alloc_failures++;
532 				rte_mempool_put(pool, result);
533 				continue;
534 			}
535 
536 			asym_op = op->asym;
537 			asym_op->modex.base.data = modex_test_case.base.data;
538 			asym_op->modex.base.length = modex_test_case.base.len;
539 			asym_op->modex.result.data = result;
540 			asym_op->modex.result.length = modex_test_case.result_len;
541 			rte_crypto_op_attach_asym_session(
542 				op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
543 		}
544 		ev.event_ptr = op;
545 
546 		while (rte_event_crypto_adapter_enqueue(dev_id, port, &ev, 1) != 1 &&
547 		       t->done == false)
548 			rte_pause();
549 
550 		count++;
551 	}
552 
553 	if (opt->verbose_level > 1 && alloc_failures)
554 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
555 		       __func__, rte_lcore_id(), alloc_failures);
556 }
557 
558 static inline int
559 perf_event_crypto_producer(void *arg)
560 {
561 	struct prod_data *p = arg;
562 	struct evt_options *opt = p->t->opt;
563 
564 	if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
565 		crypto_adapter_enq_op_new(p);
566 	else
567 		crypto_adapter_enq_op_fwd(p);
568 
569 	return 0;
570 }
571 
572 static void
573 crypto_adapter_enq_op_new_burst(struct prod_data *p)
574 {
575 	const struct test_perf *t = p->t;
576 	const struct evt_options *opt = t->opt;
577 
578 	struct rte_mbuf *m, *pkts_burst[MAX_PROD_ENQ_BURST_SIZE];
579 	struct rte_crypto_op *ops_burst[MAX_PROD_ENQ_BURST_SIZE];
580 	const uint32_t burst_size = opt->prod_enq_burst_sz;
581 	uint8_t *result[MAX_PROD_ENQ_BURST_SIZE];
582 	const uint32_t nb_flows = t->nb_flows;
583 	const uint64_t nb_pkts = t->nb_pkts;
584 	uint16_t len, enq, nb_alloc, offset;
585 	struct rte_mempool *pool = t->pool;
586 	uint16_t qp_id = p->ca.cdev_qp_id;
587 	uint8_t cdev_id = p->ca.cdev_id;
588 	uint64_t alloc_failures = 0;
589 	uint32_t flow_counter = 0;
590 	uint64_t count = 0;
591 	uint32_t  i;
592 
593 	if (opt->verbose_level > 1)
594 		printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
595 		       __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
596 		       p->ca.cdev_qp_id);
597 
598 	offset = sizeof(struct perf_elt);
599 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
600 
601 	while (count < nb_pkts && t->done == false) {
602 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
603 			struct rte_crypto_sym_op *sym_op;
604 			int ret;
605 
606 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
607 					RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops_burst, burst_size);
608 			if (unlikely(nb_alloc != burst_size)) {
609 				alloc_failures++;
610 				continue;
611 			}
612 
613 			ret = rte_pktmbuf_alloc_bulk(pool, pkts_burst, burst_size);
614 			if (unlikely(ret != 0)) {
615 				alloc_failures++;
616 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
617 				continue;
618 			}
619 
620 			for (i = 0; i < burst_size; i++) {
621 				m = pkts_burst[i];
622 				rte_pktmbuf_append(m, len);
623 				sym_op = ops_burst[i]->sym;
624 				sym_op->m_src = m;
625 				sym_op->cipher.data.offset = offset;
626 				sym_op->cipher.data.length = len - offset;
627 				rte_crypto_op_attach_sym_session(ops_burst[i],
628 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
629 			}
630 		} else {
631 			struct rte_crypto_asym_op *asym_op;
632 
633 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
634 					RTE_CRYPTO_OP_TYPE_ASYMMETRIC, ops_burst, burst_size);
635 			if (unlikely(nb_alloc != burst_size)) {
636 				alloc_failures++;
637 				continue;
638 			}
639 
640 			if (rte_mempool_get_bulk(pool, (void **)result, burst_size)) {
641 				alloc_failures++;
642 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
643 				continue;
644 			}
645 
646 			for (i = 0; i < burst_size; i++) {
647 				asym_op = ops_burst[i]->asym;
648 				asym_op->modex.base.data = modex_test_case.base.data;
649 				asym_op->modex.base.length = modex_test_case.base.len;
650 				asym_op->modex.result.data = result[i];
651 				asym_op->modex.result.length = modex_test_case.result_len;
652 				rte_crypto_op_attach_asym_session(ops_burst[i],
653 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
654 			}
655 		}
656 
657 		enq = 0;
658 		while (!t->done) {
659 			enq += rte_cryptodev_enqueue_burst(cdev_id, qp_id, ops_burst + enq,
660 					burst_size - enq);
661 			if (enq == burst_size)
662 				break;
663 		}
664 
665 		count += burst_size;
666 	}
667 
668 	if (opt->verbose_level > 1 && alloc_failures)
669 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
670 		       __func__, rte_lcore_id(), alloc_failures);
671 }
672 
673 static void
674 crypto_adapter_enq_op_fwd_burst(struct prod_data *p)
675 {
676 	const struct test_perf *t = p->t;
677 	const struct evt_options *opt = t->opt;
678 
679 	struct rte_mbuf *m, *pkts_burst[MAX_PROD_ENQ_BURST_SIZE];
680 	struct rte_crypto_op *ops_burst[MAX_PROD_ENQ_BURST_SIZE];
681 	const uint32_t burst_size = opt->prod_enq_burst_sz;
682 	struct rte_event ev[MAX_PROD_ENQ_BURST_SIZE];
683 	uint8_t *result[MAX_PROD_ENQ_BURST_SIZE];
684 	const uint32_t nb_flows = t->nb_flows;
685 	const uint64_t nb_pkts = t->nb_pkts;
686 	uint16_t len, enq, nb_alloc, offset;
687 	struct rte_mempool *pool = t->pool;
688 	const uint8_t dev_id = p->dev_id;
689 	const uint8_t port = p->port_id;
690 	uint64_t alloc_failures = 0;
691 	uint32_t flow_counter = 0;
692 	uint64_t count = 0;
693 	uint32_t  i;
694 
695 	if (opt->verbose_level > 1)
696 		printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
697 		       __func__, rte_lcore_id(), port, p->queue_id,
698 		       p->ca.cdev_id, p->ca.cdev_qp_id);
699 
700 	offset = sizeof(struct perf_elt);
701 	len = RTE_MAX(RTE_ETHER_MIN_LEN + offset, opt->mbuf_sz);
702 
703 	for (i = 0; i < burst_size; i++) {
704 		ev[i].event = 0;
705 		ev[i].op = RTE_EVENT_OP_NEW;
706 		ev[i].queue_id = p->queue_id;
707 		ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC;
708 		ev[i].event_type = RTE_EVENT_TYPE_CPU;
709 	}
710 
711 	while (count < nb_pkts && t->done == false) {
712 		if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
713 			struct rte_crypto_sym_op *sym_op;
714 			int ret;
715 
716 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
717 					RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops_burst, burst_size);
718 			if (unlikely(nb_alloc != burst_size)) {
719 				alloc_failures++;
720 				continue;
721 			}
722 
723 			ret = rte_pktmbuf_alloc_bulk(pool, pkts_burst, burst_size);
724 			if (unlikely(ret != 0)) {
725 				alloc_failures++;
726 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
727 				continue;
728 			}
729 
730 			for (i = 0; i < burst_size; i++) {
731 				m = pkts_burst[i];
732 				rte_pktmbuf_append(m, len);
733 				sym_op = ops_burst[i]->sym;
734 				sym_op->m_src = m;
735 				sym_op->cipher.data.offset = offset;
736 				sym_op->cipher.data.length = len - offset;
737 				rte_crypto_op_attach_sym_session(ops_burst[i],
738 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
739 				ev[i].event_ptr = ops_burst[i];
740 			}
741 		} else {
742 			struct rte_crypto_asym_op *asym_op;
743 
744 			nb_alloc = rte_crypto_op_bulk_alloc(t->ca_op_pool,
745 					RTE_CRYPTO_OP_TYPE_ASYMMETRIC, ops_burst, burst_size);
746 			if (unlikely(nb_alloc != burst_size)) {
747 				alloc_failures++;
748 				continue;
749 			}
750 
751 			if (rte_mempool_get_bulk(pool, (void **)result, burst_size)) {
752 				alloc_failures++;
753 				rte_mempool_put_bulk(t->ca_op_pool, (void **)ops_burst, burst_size);
754 				continue;
755 			}
756 
757 			for (i = 0; i < burst_size; i++) {
758 				asym_op = ops_burst[i]->asym;
759 				asym_op->modex.base.data = modex_test_case.base.data;
760 				asym_op->modex.base.length = modex_test_case.base.len;
761 				asym_op->modex.result.data = result[i];
762 				asym_op->modex.result.length = modex_test_case.result_len;
763 				rte_crypto_op_attach_asym_session(ops_burst[i],
764 						p->ca.crypto_sess[flow_counter++ % nb_flows]);
765 				ev[i].event_ptr = ops_burst[i];
766 			}
767 		}
768 
769 		enq = 0;
770 		while (!t->done) {
771 			enq += rte_event_crypto_adapter_enqueue(dev_id, port, ev + enq,
772 					burst_size - enq);
773 			if (enq == burst_size)
774 				break;
775 		}
776 
777 		count += burst_size;
778 	}
779 
780 	if (opt->verbose_level > 1 && alloc_failures)
781 		printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
782 		       __func__, rte_lcore_id(), alloc_failures);
783 }
784 
785 static inline int
786 perf_event_crypto_producer_burst(void *arg)
787 {
788 	struct prod_data *p = arg;
789 	struct evt_options *opt = p->t->opt;
790 
791 	if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
792 		crypto_adapter_enq_op_new_burst(p);
793 	else
794 		crypto_adapter_enq_op_fwd_burst(p);
795 
796 	return 0;
797 }
798 
799 static int
800 perf_producer_wrapper(void *arg)
801 {
802 	struct prod_data *p  = arg;
803 	struct test_perf *t = p->t;
804 	bool burst = evt_has_burst_mode(p->dev_id);
805 
806 	/* In case of synthetic producer, launch perf_producer or
807 	 * perf_producer_burst depending on producer enqueue burst size
808 	 */
809 	if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
810 			t->opt->prod_enq_burst_sz == 1)
811 		return perf_producer(arg);
812 	else if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
813 			t->opt->prod_enq_burst_sz > 1) {
814 		if (!burst)
815 			evt_err("This event device does not support burst mode");
816 		else
817 			return perf_producer_burst(arg);
818 	}
819 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
820 			!t->opt->timdev_use_burst)
821 		return perf_event_timer_producer(arg);
822 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
823 			t->opt->timdev_use_burst)
824 		return perf_event_timer_producer_burst(arg);
825 	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
826 		if (t->opt->prod_enq_burst_sz > 1)
827 			return perf_event_crypto_producer_burst(arg);
828 		else
829 			return perf_event_crypto_producer(arg);
830 	}
831 	return 0;
832 }
833 
834 static inline uint64_t
835 processed_pkts(struct test_perf *t)
836 {
837 	uint8_t i;
838 	uint64_t total = 0;
839 
840 	for (i = 0; i < t->nb_workers; i++)
841 		total += t->worker[i].processed_pkts;
842 
843 	return total;
844 }
845 
846 static inline uint64_t
847 total_latency(struct test_perf *t)
848 {
849 	uint8_t i;
850 	uint64_t total = 0;
851 
852 	for (i = 0; i < t->nb_workers; i++)
853 		total += t->worker[i].latency;
854 
855 	return total;
856 }
857 
858 
859 int
860 perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
861 		int (*worker)(void *))
862 {
863 	int ret, lcore_id;
864 	struct test_perf *t = evt_test_priv(test);
865 
866 	int port_idx = 0;
867 	/* launch workers */
868 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
869 		if (!(opt->wlcores[lcore_id]))
870 			continue;
871 
872 		ret = rte_eal_remote_launch(worker,
873 				 &t->worker[port_idx], lcore_id);
874 		if (ret) {
875 			evt_err("failed to launch worker %d", lcore_id);
876 			return ret;
877 		}
878 		port_idx++;
879 	}
880 
881 	/* launch producers */
882 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
883 		if (!(opt->plcores[lcore_id]))
884 			continue;
885 
886 		ret = rte_eal_remote_launch(perf_producer_wrapper,
887 				&t->prod[port_idx], lcore_id);
888 		if (ret) {
889 			evt_err("failed to launch perf_producer %d", lcore_id);
890 			return ret;
891 		}
892 		port_idx++;
893 	}
894 
895 	const uint64_t total_pkts = t->outstand_pkts;
896 
897 	uint64_t dead_lock_cycles = rte_get_timer_cycles();
898 	int64_t dead_lock_remaining  =  total_pkts;
899 	const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
900 
901 	uint64_t perf_cycles = rte_get_timer_cycles();
902 	int64_t perf_remaining  = total_pkts;
903 	const uint64_t perf_sample = rte_get_timer_hz();
904 
905 	static float total_mpps;
906 	static uint64_t samples;
907 
908 	const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
909 	int64_t remaining = t->outstand_pkts - processed_pkts(t);
910 
911 	while (t->done == false) {
912 		const uint64_t new_cycles = rte_get_timer_cycles();
913 
914 		if ((new_cycles - perf_cycles) > perf_sample) {
915 			const uint64_t latency = total_latency(t);
916 			const uint64_t pkts = processed_pkts(t);
917 
918 			remaining = t->outstand_pkts - pkts;
919 			float mpps = (float)(perf_remaining-remaining)/1000000;
920 
921 			perf_remaining = remaining;
922 			perf_cycles = new_cycles;
923 			total_mpps += mpps;
924 			++samples;
925 			if (opt->fwd_latency && pkts > 0) {
926 				printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
927 					mpps, total_mpps/samples,
928 					(float)(latency/pkts)/freq_mhz);
929 			} else {
930 				printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
931 					mpps, total_mpps/samples);
932 			}
933 			fflush(stdout);
934 
935 			if (remaining <= 0) {
936 				t->result = EVT_TEST_SUCCESS;
937 				if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
938 				    opt->prod_type ==
939 					    EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
940 				    opt->prod_type ==
941 					    EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
942 					t->done = true;
943 					break;
944 				}
945 			}
946 		}
947 
948 		if (new_cycles - dead_lock_cycles > dead_lock_sample &&
949 		    (opt->prod_type == EVT_PROD_TYPE_SYNT ||
950 		     opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
951 		     opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)) {
952 			remaining = t->outstand_pkts - processed_pkts(t);
953 			if (dead_lock_remaining == remaining) {
954 				rte_event_dev_dump(opt->dev_id, stdout);
955 				evt_err("No schedules for seconds, deadlock");
956 				t->done = true;
957 				break;
958 			}
959 			dead_lock_remaining = remaining;
960 			dead_lock_cycles = new_cycles;
961 		}
962 	}
963 	printf("\n");
964 	return 0;
965 }
966 
967 static int
968 perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
969 		struct rte_event_port_conf prod_conf)
970 {
971 	int ret = 0;
972 	uint16_t prod;
973 	struct rte_event_eth_rx_adapter_queue_conf queue_conf;
974 
975 	memset(&queue_conf, 0,
976 			sizeof(struct rte_event_eth_rx_adapter_queue_conf));
977 	queue_conf.ev.sched_type = opt->sched_type_list[0];
978 	RTE_ETH_FOREACH_DEV(prod) {
979 		uint32_t cap;
980 
981 		ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
982 				prod, &cap);
983 		if (ret) {
984 			evt_err("failed to get event rx adapter[%d]"
985 					" capabilities",
986 					opt->dev_id);
987 			return ret;
988 		}
989 		queue_conf.ev.queue_id = prod * stride;
990 		ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
991 				&prod_conf);
992 		if (ret) {
993 			evt_err("failed to create rx adapter[%d]", prod);
994 			return ret;
995 		}
996 		ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
997 				&queue_conf);
998 		if (ret) {
999 			evt_err("failed to add rx queues to adapter[%d]", prod);
1000 			return ret;
1001 		}
1002 
1003 		if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
1004 			uint32_t service_id;
1005 
1006 			rte_event_eth_rx_adapter_service_id_get(prod,
1007 					&service_id);
1008 			ret = evt_service_setup(service_id);
1009 			if (ret) {
1010 				evt_err("Failed to setup service core"
1011 						" for Rx adapter\n");
1012 				return ret;
1013 			}
1014 		}
1015 	}
1016 
1017 	return ret;
1018 }
1019 
1020 static int
1021 perf_event_timer_adapter_setup(struct test_perf *t)
1022 {
1023 	int i;
1024 	int ret;
1025 	struct rte_event_timer_adapter_info adapter_info;
1026 	struct rte_event_timer_adapter *wl;
1027 	uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
1028 	uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
1029 
1030 	if (nb_producers == 1)
1031 		flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
1032 
1033 	for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
1034 		struct rte_event_timer_adapter_conf config = {
1035 			.event_dev_id = t->opt->dev_id,
1036 			.timer_adapter_id = i,
1037 			.timer_tick_ns = t->opt->timer_tick_nsec,
1038 			.max_tmo_ns = t->opt->max_tmo_nsec,
1039 			.nb_timers = t->opt->pool_sz,
1040 			.flags = flags,
1041 		};
1042 
1043 		wl = rte_event_timer_adapter_create(&config);
1044 		if (wl == NULL) {
1045 			evt_err("failed to create event timer ring %d", i);
1046 			return rte_errno;
1047 		}
1048 
1049 		memset(&adapter_info, 0,
1050 				sizeof(struct rte_event_timer_adapter_info));
1051 		rte_event_timer_adapter_get_info(wl, &adapter_info);
1052 		t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
1053 
1054 		if (!(adapter_info.caps &
1055 				RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
1056 			uint32_t service_id = -1U;
1057 
1058 			rte_event_timer_adapter_service_id_get(wl,
1059 					&service_id);
1060 			ret = evt_service_setup(service_id);
1061 			if (ret) {
1062 				evt_err("Failed to setup service core"
1063 						" for timer adapter\n");
1064 				return ret;
1065 			}
1066 			rte_service_runstate_set(service_id, 1);
1067 		}
1068 		t->timer_adptr[i] = wl;
1069 	}
1070 	return 0;
1071 }
1072 
1073 static int
1074 perf_event_crypto_adapter_setup(struct test_perf *t, struct prod_data *p)
1075 {
1076 	struct rte_event_crypto_adapter_queue_conf conf;
1077 	struct evt_options *opt = t->opt;
1078 	uint32_t cap;
1079 	int ret;
1080 
1081 	memset(&conf, 0, sizeof(conf));
1082 
1083 	ret = rte_event_crypto_adapter_caps_get(p->dev_id, p->ca.cdev_id, &cap);
1084 	if (ret) {
1085 		evt_err("Failed to get crypto adapter capabilities");
1086 		return ret;
1087 	}
1088 
1089 	if (((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) &&
1090 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) ||
1091 	    ((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) &&
1092 	     !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) {
1093 		evt_err("crypto adapter %s mode unsupported\n",
1094 			opt->crypto_adptr_mode ? "OP_FORWARD" : "OP_NEW");
1095 		return -ENOTSUP;
1096 	} else if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA)) {
1097 		evt_err("Storing crypto session not supported");
1098 		return -ENOTSUP;
1099 	}
1100 
1101 	if (opt->ena_vector) {
1102 		struct rte_event_crypto_adapter_vector_limits limits;
1103 
1104 		if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR)) {
1105 			evt_err("Crypto adapter doesn't support event vector");
1106 			return -EINVAL;
1107 		}
1108 
1109 		ret = rte_event_crypto_adapter_vector_limits_get(p->dev_id, p->ca.cdev_id, &limits);
1110 		if (ret) {
1111 			evt_err("Failed to get crypto adapter's vector limits");
1112 			return ret;
1113 		}
1114 
1115 		if (opt->vector_size < limits.min_sz || opt->vector_size > limits.max_sz) {
1116 			evt_err("Vector size [%d] not within limits max[%d] min[%d]",
1117 				opt->vector_size, limits.max_sz, limits.min_sz);
1118 			return -EINVAL;
1119 		}
1120 
1121 		if (limits.log2_sz && !rte_is_power_of_2(opt->vector_size)) {
1122 			evt_err("Vector size [%d] not power of 2", opt->vector_size);
1123 			return -EINVAL;
1124 		}
1125 
1126 		if (opt->vector_tmo_nsec > limits.max_timeout_ns ||
1127 			opt->vector_tmo_nsec < limits.min_timeout_ns) {
1128 			evt_err("Vector timeout [%" PRIu64 "] not within limits "
1129 				"max[%" PRIu64 "] min[%" PRIu64 "]",
1130 				opt->vector_tmo_nsec, limits.max_timeout_ns, limits.min_timeout_ns);
1131 			return -EINVAL;
1132 		}
1133 
1134 		conf.vector_mp = t->ca_vector_pool;
1135 		conf.vector_sz = opt->vector_size;
1136 		conf.vector_timeout_ns = opt->vector_tmo_nsec;
1137 		conf.flags |= RTE_EVENT_CRYPTO_ADAPTER_EVENT_VECTOR;
1138 	}
1139 
1140 	if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
1141 		conf.ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
1142 		conf.ev.queue_id = p->queue_id;
1143 	}
1144 
1145 	ret = rte_event_crypto_adapter_queue_pair_add(
1146 		TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, &conf);
1147 
1148 	return ret;
1149 }
1150 
1151 static void *
1152 cryptodev_sym_sess_create(struct prod_data *p, struct test_perf *t)
1153 {
1154 	const struct rte_cryptodev_symmetric_capability *cap;
1155 	struct rte_cryptodev_sym_capability_idx cap_idx;
1156 	enum rte_crypto_cipher_algorithm cipher_algo;
1157 	struct rte_crypto_sym_xform cipher_xform;
1158 	struct evt_options *opt = t->opt;
1159 	uint16_t key_size;
1160 	uint16_t iv_size;
1161 	void *sess;
1162 
1163 	cipher_algo = opt->crypto_cipher_alg;
1164 	key_size = opt->crypto_cipher_key_sz;
1165 	iv_size = opt->crypto_cipher_iv_sz;
1166 
1167 	/* Check if device supports the algorithm */
1168 	cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1169 	cap_idx.algo.cipher = cipher_algo;
1170 
1171 	cap = rte_cryptodev_sym_capability_get(p->ca.cdev_id, &cap_idx);
1172 	if (cap == NULL) {
1173 		evt_err("Device doesn't support cipher algorithm [%s]. Test Skipped\n",
1174 			rte_cryptodev_get_cipher_algo_string(cipher_algo));
1175 		return NULL;
1176 	}
1177 
1178 	/* Check if device supports key size and IV size */
1179 	if (rte_cryptodev_sym_capability_check_cipher(cap, key_size,
1180 			iv_size) < 0) {
1181 		evt_err("Device doesn't support cipher configuration:\n"
1182 			"cipher algo [%s], key sz [%d], iv sz [%d]. Test Skipped\n",
1183 			rte_cryptodev_get_cipher_algo_string(cipher_algo), key_size, iv_size);
1184 		return NULL;
1185 	}
1186 
1187 	cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
1188 	cipher_xform.cipher.algo = cipher_algo;
1189 	cipher_xform.cipher.key.data = opt->crypto_cipher_key;
1190 	cipher_xform.cipher.key.length = key_size;
1191 	cipher_xform.cipher.iv.length = iv_size;
1192 	cipher_xform.cipher.iv.offset = IV_OFFSET;
1193 	cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
1194 	cipher_xform.next = NULL;
1195 
1196 	sess = rte_cryptodev_sym_session_create(p->ca.cdev_id, &cipher_xform,
1197 			t->ca_sess_pool);
1198 	if (sess == NULL) {
1199 		evt_err("Failed to create sym session");
1200 		return NULL;
1201 	}
1202 
1203 	return sess;
1204 }
1205 
1206 static void *
1207 cryptodev_asym_sess_create(struct prod_data *p, struct test_perf *t)
1208 {
1209 	const struct rte_cryptodev_asymmetric_xform_capability *capability;
1210 	struct rte_cryptodev_asym_capability_idx cap_idx;
1211 	struct rte_crypto_asym_xform xform;
1212 	void *sess;
1213 
1214 	xform.next = NULL;
1215 	xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
1216 	cap_idx.type = xform.xform_type;
1217 	capability = rte_cryptodev_asym_capability_get(p->ca.cdev_id, &cap_idx);
1218 	if (capability == NULL) {
1219 		evt_err("Device doesn't support MODEX. Test Skipped\n");
1220 		return NULL;
1221 	}
1222 
1223 	xform.modex.modulus.data = modex_test_case.modulus.data;
1224 	xform.modex.modulus.length = modex_test_case.modulus.len;
1225 	xform.modex.exponent.data = modex_test_case.exponent.data;
1226 	xform.modex.exponent.length = modex_test_case.exponent.len;
1227 
1228 	if (rte_cryptodev_asym_session_create(p->ca.cdev_id, &xform,
1229 			t->ca_asym_sess_pool, &sess)) {
1230 		evt_err("Failed to create asym session");
1231 		return NULL;
1232 	}
1233 
1234 	return sess;
1235 }
1236 
1237 int
1238 perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
1239 				uint8_t stride, uint8_t nb_queues,
1240 				const struct rte_event_port_conf *port_conf)
1241 {
1242 	struct test_perf *t = evt_test_priv(test);
1243 	uint16_t port, prod;
1244 	int ret = -1;
1245 
1246 	/* setup one port per worker, linking to all queues */
1247 	for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
1248 				port++) {
1249 		struct worker_data *w = &t->worker[port];
1250 
1251 		w->dev_id = opt->dev_id;
1252 		w->port_id = port;
1253 		w->t = t;
1254 		w->processed_pkts = 0;
1255 		w->latency = 0;
1256 
1257 		struct rte_event_port_conf conf = *port_conf;
1258 		conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_WORKER;
1259 
1260 		ret = rte_event_port_setup(opt->dev_id, port, &conf);
1261 		if (ret) {
1262 			evt_err("failed to setup port %d", port);
1263 			return ret;
1264 		}
1265 
1266 		ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
1267 		if (ret != nb_queues) {
1268 			evt_err("failed to link all queues to port %d", port);
1269 			return -EINVAL;
1270 		}
1271 	}
1272 
1273 	/* port for producers, no links */
1274 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
1275 		for ( ; port < perf_nb_event_ports(opt); port++) {
1276 			struct prod_data *p = &t->prod[port];
1277 			p->t = t;
1278 		}
1279 
1280 		struct rte_event_port_conf conf = *port_conf;
1281 		conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_PRODUCER;
1282 
1283 		ret = perf_event_rx_adapter_setup(opt, stride, conf);
1284 		if (ret)
1285 			return ret;
1286 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1287 		prod = 0;
1288 		for ( ; port < perf_nb_event_ports(opt); port++) {
1289 			struct prod_data *p = &t->prod[port];
1290 			p->queue_id = prod * stride;
1291 			p->t = t;
1292 			prod++;
1293 		}
1294 
1295 		ret = perf_event_timer_adapter_setup(t);
1296 		if (ret)
1297 			return ret;
1298 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
1299 		struct rte_event_port_conf conf = *port_conf;
1300 		uint8_t cdev_id = 0;
1301 		uint16_t qp_id = 0;
1302 
1303 		ret = rte_event_crypto_adapter_create(TEST_PERF_CA_ID,
1304 						      opt->dev_id, &conf, 0);
1305 		if (ret) {
1306 			evt_err("Failed to create crypto adapter");
1307 			return ret;
1308 		}
1309 
1310 		prod = 0;
1311 		for (; port < perf_nb_event_ports(opt); port++) {
1312 			union rte_event_crypto_metadata m_data;
1313 			struct prod_data *p = &t->prod[port];
1314 			uint32_t flow_id;
1315 
1316 			if (qp_id == rte_cryptodev_queue_pair_count(cdev_id)) {
1317 				cdev_id++;
1318 				qp_id = 0;
1319 			}
1320 
1321 			p->dev_id = opt->dev_id;
1322 			p->port_id = port;
1323 			p->queue_id = prod * stride;
1324 			p->ca.cdev_id = cdev_id;
1325 			p->ca.cdev_qp_id = qp_id;
1326 			p->ca.crypto_sess = rte_zmalloc_socket(
1327 				NULL, sizeof(void *) * t->nb_flows,
1328 				RTE_CACHE_LINE_SIZE, opt->socket_id);
1329 			p->t = t;
1330 
1331 			ret = perf_event_crypto_adapter_setup(t, p);
1332 			if (ret)
1333 				return ret;
1334 
1335 			m_data.request_info.cdev_id = p->ca.cdev_id;
1336 			m_data.request_info.queue_pair_id = p->ca.cdev_qp_id;
1337 			m_data.response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
1338 			m_data.response_info.queue_id = p->queue_id;
1339 
1340 			for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
1341 				m_data.response_info.flow_id = flow_id;
1342 				if (opt->crypto_op_type ==
1343 						RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1344 					void *sess;
1345 
1346 					sess = cryptodev_sym_sess_create(p, t);
1347 					if (sess == NULL)
1348 						return -ENOMEM;
1349 
1350 					ret = rte_cryptodev_session_event_mdata_set(
1351 						cdev_id,
1352 						sess,
1353 						RTE_CRYPTO_OP_TYPE_SYMMETRIC,
1354 						RTE_CRYPTO_OP_WITH_SESSION,
1355 						&m_data, sizeof(m_data));
1356 					if (ret)
1357 						return ret;
1358 					p->ca.crypto_sess[flow_id] = sess;
1359 				} else {
1360 					void *sess;
1361 
1362 					sess = cryptodev_asym_sess_create(p, t);
1363 					if (sess == NULL)
1364 						return -ENOMEM;
1365 					ret = rte_cryptodev_session_event_mdata_set(
1366 						cdev_id,
1367 						sess,
1368 						RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
1369 						RTE_CRYPTO_OP_WITH_SESSION,
1370 						&m_data, sizeof(m_data));
1371 					if (ret)
1372 						return ret;
1373 					p->ca.crypto_sess[flow_id] = sess;
1374 				}
1375 			}
1376 
1377 			conf.event_port_cfg |=
1378 				RTE_EVENT_PORT_CFG_HINT_PRODUCER |
1379 				RTE_EVENT_PORT_CFG_HINT_CONSUMER;
1380 
1381 			ret = rte_event_port_setup(opt->dev_id, port, &conf);
1382 			if (ret) {
1383 				evt_err("failed to setup port %d", port);
1384 				return ret;
1385 			}
1386 
1387 			qp_id++;
1388 			prod++;
1389 		}
1390 	} else {
1391 		prod = 0;
1392 		for ( ; port < perf_nb_event_ports(opt); port++) {
1393 			struct prod_data *p = &t->prod[port];
1394 
1395 			p->dev_id = opt->dev_id;
1396 			p->port_id = port;
1397 			p->queue_id = prod * stride;
1398 			p->t = t;
1399 
1400 			struct rte_event_port_conf conf = *port_conf;
1401 			conf.event_port_cfg |=
1402 				RTE_EVENT_PORT_CFG_HINT_PRODUCER |
1403 				RTE_EVENT_PORT_CFG_HINT_CONSUMER;
1404 
1405 			ret = rte_event_port_setup(opt->dev_id, port, &conf);
1406 			if (ret) {
1407 				evt_err("failed to setup port %d", port);
1408 				return ret;
1409 			}
1410 			prod++;
1411 		}
1412 	}
1413 
1414 	return ret;
1415 }
1416 
1417 int
1418 perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
1419 {
1420 	unsigned int lcores;
1421 
1422 	/* N producer + N worker + main when producer cores are used
1423 	 * Else N worker + main when Rx adapter is used
1424 	 */
1425 	lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
1426 
1427 	if (rte_lcore_count() < lcores) {
1428 		evt_err("test need minimum %d lcores", lcores);
1429 		return -1;
1430 	}
1431 
1432 	/* Validate worker lcores */
1433 	if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
1434 		evt_err("worker lcores overlaps with main lcore");
1435 		return -1;
1436 	}
1437 	if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
1438 		evt_err("worker lcores overlaps producer lcores");
1439 		return -1;
1440 	}
1441 	if (evt_has_disabled_lcore(opt->wlcores)) {
1442 		evt_err("one or more workers lcores are not enabled");
1443 		return -1;
1444 	}
1445 	if (!evt_has_active_lcore(opt->wlcores)) {
1446 		evt_err("minimum one worker is required");
1447 		return -1;
1448 	}
1449 
1450 	if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
1451 	    opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
1452 	    opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
1453 		/* Validate producer lcores */
1454 		if (evt_lcores_has_overlap(opt->plcores,
1455 					rte_get_main_lcore())) {
1456 			evt_err("producer lcores overlaps with main lcore");
1457 			return -1;
1458 		}
1459 		if (evt_has_disabled_lcore(opt->plcores)) {
1460 			evt_err("one or more producer lcores are not enabled");
1461 			return -1;
1462 		}
1463 		if (!evt_has_active_lcore(opt->plcores)) {
1464 			evt_err("minimum one producer is required");
1465 			return -1;
1466 		}
1467 	}
1468 
1469 	if (evt_has_invalid_stage(opt))
1470 		return -1;
1471 
1472 	if (evt_has_invalid_sched_type(opt))
1473 		return -1;
1474 
1475 	if (nb_queues > EVT_MAX_QUEUES) {
1476 		evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
1477 		return -1;
1478 	}
1479 	if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
1480 		evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
1481 		return -1;
1482 	}
1483 
1484 	/* Fixups */
1485 	if ((opt->nb_stages == 1 &&
1486 			opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
1487 			opt->fwd_latency) {
1488 		evt_info("fwd_latency is valid when nb_stages > 1, disabling");
1489 		opt->fwd_latency = 0;
1490 	}
1491 
1492 	if (opt->fwd_latency && !opt->q_priority) {
1493 		evt_info("enabled queue priority for latency measurement");
1494 		opt->q_priority = 1;
1495 	}
1496 	if (opt->nb_pkts == 0)
1497 		opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
1498 
1499 	return 0;
1500 }
1501 
1502 void
1503 perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
1504 {
1505 	evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
1506 	evt_dump_producer_lcores(opt);
1507 	evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
1508 	evt_dump_worker_lcores(opt);
1509 	evt_dump_nb_stages(opt);
1510 	evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
1511 	evt_dump("nb_evdev_queues", "%d", nb_queues);
1512 	evt_dump_queue_priority(opt);
1513 	evt_dump_sched_type_list(opt);
1514 	evt_dump_producer_type(opt);
1515 	evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
1516 }
1517 
1518 static void
1519 perf_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
1520 		      void *args)
1521 {
1522 	rte_mempool_put(args, ev.event_ptr);
1523 }
1524 
1525 void
1526 perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
1527 		    uint8_t port_id, struct rte_event events[], uint16_t nb_enq,
1528 		    uint16_t nb_deq)
1529 {
1530 	int i;
1531 
1532 	if (nb_deq) {
1533 		for (i = nb_enq; i < nb_deq; i++)
1534 			rte_mempool_put(pool, events[i].event_ptr);
1535 
1536 		for (i = 0; i < nb_deq; i++)
1537 			events[i].op = RTE_EVENT_OP_RELEASE;
1538 		rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
1539 	}
1540 	rte_event_port_quiesce(dev_id, port_id, perf_event_port_flush, pool);
1541 }
1542 
1543 void
1544 perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
1545 {
1546 	int i;
1547 	struct test_perf *t = evt_test_priv(test);
1548 
1549 	if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1550 		for (i = 0; i < opt->nb_timer_adptrs; i++)
1551 			rte_event_timer_adapter_stop(t->timer_adptr[i]);
1552 	}
1553 	rte_event_dev_stop(opt->dev_id);
1554 	rte_event_dev_close(opt->dev_id);
1555 }
1556 
1557 static inline void
1558 perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
1559 	    void *obj, unsigned i __rte_unused)
1560 {
1561 	memset(obj, 0, mp->elt_size);
1562 }
1563 
1564 #define NB_RX_DESC			128
1565 #define NB_TX_DESC			512
1566 int
1567 perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
1568 {
1569 	uint16_t i;
1570 	int ret;
1571 	struct test_perf *t = evt_test_priv(test);
1572 	struct rte_eth_conf port_conf = {
1573 		.rxmode = {
1574 			.mq_mode = RTE_ETH_MQ_RX_RSS,
1575 		},
1576 		.rx_adv_conf = {
1577 			.rss_conf = {
1578 				.rss_key = NULL,
1579 				.rss_hf = RTE_ETH_RSS_IP,
1580 			},
1581 		},
1582 	};
1583 
1584 	if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR)
1585 		return 0;
1586 
1587 	if (!rte_eth_dev_count_avail()) {
1588 		evt_err("No ethernet ports found.");
1589 		return -ENODEV;
1590 	}
1591 
1592 	RTE_ETH_FOREACH_DEV(i) {
1593 		struct rte_eth_dev_info dev_info;
1594 		struct rte_eth_conf local_port_conf = port_conf;
1595 
1596 		ret = rte_eth_dev_info_get(i, &dev_info);
1597 		if (ret != 0) {
1598 			evt_err("Error during getting device (port %u) info: %s\n",
1599 					i, strerror(-ret));
1600 			return ret;
1601 		}
1602 
1603 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1604 			dev_info.flow_type_rss_offloads;
1605 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1606 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
1607 			evt_info("Port %u modified RSS hash function based on hardware support,"
1608 				"requested:%#"PRIx64" configured:%#"PRIx64"\n",
1609 				i,
1610 				port_conf.rx_adv_conf.rss_conf.rss_hf,
1611 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1612 		}
1613 
1614 		if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
1615 			evt_err("Failed to configure eth port [%d]", i);
1616 			return -EINVAL;
1617 		}
1618 
1619 		if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
1620 				rte_socket_id(), NULL, t->pool) < 0) {
1621 			evt_err("Failed to setup eth port [%d] rx_queue: %d.",
1622 					i, 0);
1623 			return -EINVAL;
1624 		}
1625 
1626 		if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
1627 					rte_socket_id(), NULL) < 0) {
1628 			evt_err("Failed to setup eth port [%d] tx_queue: %d.",
1629 					i, 0);
1630 			return -EINVAL;
1631 		}
1632 
1633 		ret = rte_eth_promiscuous_enable(i);
1634 		if (ret != 0) {
1635 			evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
1636 				i, rte_strerror(-ret));
1637 			return ret;
1638 		}
1639 	}
1640 
1641 	return 0;
1642 }
1643 
1644 void
1645 perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
1646 {
1647 	uint16_t i;
1648 	RTE_SET_USED(test);
1649 
1650 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
1651 		RTE_ETH_FOREACH_DEV(i) {
1652 			rte_event_eth_rx_adapter_stop(i);
1653 			rte_event_eth_rx_adapter_queue_del(i, i, -1);
1654 			rte_eth_dev_rx_queue_stop(i, 0);
1655 		}
1656 	}
1657 }
1658 
1659 void
1660 perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
1661 {
1662 	uint16_t i;
1663 	RTE_SET_USED(test);
1664 
1665 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
1666 		RTE_ETH_FOREACH_DEV(i) {
1667 			rte_event_eth_tx_adapter_stop(i);
1668 			rte_event_eth_tx_adapter_queue_del(i, i, -1);
1669 			rte_eth_dev_tx_queue_stop(i, 0);
1670 			rte_eth_dev_stop(i);
1671 		}
1672 	}
1673 }
1674 
1675 int
1676 perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt)
1677 {
1678 	uint8_t cdev_count, cdev_id, nb_plcores, nb_qps;
1679 	struct test_perf *t = evt_test_priv(test);
1680 	unsigned int max_session_size;
1681 	uint32_t nb_sessions;
1682 	int ret;
1683 
1684 	if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
1685 		return 0;
1686 
1687 	cdev_count = rte_cryptodev_count();
1688 	if (cdev_count == 0) {
1689 		evt_err("No crypto devices available\n");
1690 		return -ENODEV;
1691 	}
1692 
1693 	t->ca_op_pool = rte_crypto_op_pool_create(
1694 		"crypto_op_pool", opt->crypto_op_type, opt->pool_sz,
1695 		128, sizeof(union rte_event_crypto_metadata) + EVT_CRYPTO_MAX_IV_SIZE,
1696 		rte_socket_id());
1697 	if (t->ca_op_pool == NULL) {
1698 		evt_err("Failed to create crypto op pool");
1699 		return -ENOMEM;
1700 	}
1701 
1702 	nb_sessions = evt_nr_active_lcores(opt->plcores) * t->nb_flows;
1703 	t->ca_asym_sess_pool = rte_cryptodev_asym_session_pool_create(
1704 		"ca_asym_sess_pool", nb_sessions, 0,
1705 		sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
1706 	if (t->ca_asym_sess_pool == NULL) {
1707 		evt_err("Failed to create sym session pool");
1708 		ret = -ENOMEM;
1709 		goto err;
1710 	}
1711 
1712 	max_session_size = 0;
1713 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1714 		unsigned int session_size;
1715 
1716 		session_size =
1717 			rte_cryptodev_sym_get_private_session_size(cdev_id);
1718 		if (session_size > max_session_size)
1719 			max_session_size = session_size;
1720 	}
1721 
1722 	t->ca_sess_pool = rte_cryptodev_sym_session_pool_create(
1723 		"ca_sess_pool", nb_sessions, max_session_size, 0,
1724 		sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
1725 	if (t->ca_sess_pool == NULL) {
1726 		evt_err("Failed to create sym session pool");
1727 		ret = -ENOMEM;
1728 		goto err;
1729 	}
1730 
1731 	if (opt->ena_vector) {
1732 		unsigned int nb_elem = (opt->pool_sz / opt->vector_size) * 2;
1733 		nb_elem = RTE_MAX(512U, nb_elem);
1734 		nb_elem += evt_nr_active_lcores(opt->wlcores) * 32;
1735 		t->ca_vector_pool = rte_event_vector_pool_create("vector_pool", nb_elem, 32,
1736 				opt->vector_size, opt->socket_id);
1737 		if (t->ca_vector_pool == NULL) {
1738 			evt_err("Failed to create event vector pool");
1739 			ret = -ENOMEM;
1740 			goto err;
1741 		}
1742 	}
1743 
1744 	/*
1745 	 * Calculate number of needed queue pairs, based on the amount of
1746 	 * available number of logical cores and crypto devices. For instance,
1747 	 * if there are 4 cores and 2 crypto devices, 2 queue pairs will be set
1748 	 * up per device.
1749 	 */
1750 	nb_plcores = evt_nr_active_lcores(opt->plcores);
1751 	nb_qps = (nb_plcores % cdev_count) ? (nb_plcores / cdev_count) + 1 :
1752 					     nb_plcores / cdev_count;
1753 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1754 		struct rte_cryptodev_qp_conf qp_conf;
1755 		struct rte_cryptodev_config conf;
1756 		struct rte_cryptodev_info info;
1757 		int qp_id;
1758 
1759 		rte_cryptodev_info_get(cdev_id, &info);
1760 		if (nb_qps > info.max_nb_queue_pairs) {
1761 			evt_err("Not enough queue pairs per cryptodev (%u)",
1762 				nb_qps);
1763 			ret = -EINVAL;
1764 			goto err;
1765 		}
1766 
1767 		conf.nb_queue_pairs = nb_qps;
1768 		conf.socket_id = SOCKET_ID_ANY;
1769 		conf.ff_disable = RTE_CRYPTODEV_FF_SECURITY;
1770 
1771 		ret = rte_cryptodev_configure(cdev_id, &conf);
1772 		if (ret) {
1773 			evt_err("Failed to configure cryptodev (%u)", cdev_id);
1774 			goto err;
1775 		}
1776 
1777 		qp_conf.nb_descriptors = NB_CRYPTODEV_DESCRIPTORS;
1778 		qp_conf.mp_session = t->ca_sess_pool;
1779 
1780 		for (qp_id = 0; qp_id < conf.nb_queue_pairs; qp_id++) {
1781 			ret = rte_cryptodev_queue_pair_setup(
1782 				cdev_id, qp_id, &qp_conf,
1783 				rte_cryptodev_socket_id(cdev_id));
1784 			if (ret) {
1785 				evt_err("Failed to setup queue pairs on cryptodev %u\n",
1786 					cdev_id);
1787 				goto err;
1788 			}
1789 		}
1790 	}
1791 
1792 	return 0;
1793 err:
1794 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++)
1795 		rte_cryptodev_close(cdev_id);
1796 
1797 	rte_mempool_free(t->ca_op_pool);
1798 	rte_mempool_free(t->ca_sess_pool);
1799 	rte_mempool_free(t->ca_asym_sess_pool);
1800 	rte_mempool_free(t->ca_vector_pool);
1801 
1802 	return ret;
1803 }
1804 
1805 void
1806 perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt)
1807 {
1808 	uint8_t cdev_id, cdev_count = rte_cryptodev_count();
1809 	struct test_perf *t = evt_test_priv(test);
1810 	uint16_t port;
1811 
1812 	if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
1813 		return;
1814 
1815 	for (port = t->nb_workers; port < perf_nb_event_ports(opt); port++) {
1816 		void *sess;
1817 		struct prod_data *p = &t->prod[port];
1818 		uint32_t flow_id;
1819 		uint8_t cdev_id;
1820 
1821 		for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
1822 			sess = p->ca.crypto_sess[flow_id];
1823 			cdev_id = p->ca.cdev_id;
1824 			rte_cryptodev_sym_session_free(cdev_id, sess);
1825 		}
1826 
1827 		rte_event_crypto_adapter_queue_pair_del(
1828 			TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id);
1829 	}
1830 
1831 	rte_event_crypto_adapter_free(TEST_PERF_CA_ID);
1832 
1833 	for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1834 		rte_cryptodev_stop(cdev_id);
1835 		rte_cryptodev_close(cdev_id);
1836 	}
1837 
1838 	rte_mempool_free(t->ca_op_pool);
1839 	rte_mempool_free(t->ca_sess_pool);
1840 	rte_mempool_free(t->ca_asym_sess_pool);
1841 	rte_mempool_free(t->ca_vector_pool);
1842 }
1843 
1844 int
1845 perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
1846 {
1847 	struct test_perf *t = evt_test_priv(test);
1848 
1849 	if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
1850 			opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1851 		t->pool = rte_mempool_create(test->name, /* mempool name */
1852 				opt->pool_sz, /* number of elements*/
1853 				sizeof(struct perf_elt), /* element size*/
1854 				512, /* cache size*/
1855 				0, NULL, NULL,
1856 				perf_elt_init, /* obj constructor */
1857 				NULL, opt->socket_id, 0); /* flags */
1858 	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR &&
1859 			opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)  {
1860 		t->pool = rte_mempool_create(test->name, /* mempool name */
1861 				opt->pool_sz, /* number of elements*/
1862 				sizeof(struct perf_elt) + modex_test_case.result_len,
1863 				/* element size*/
1864 				512, /* cache size*/
1865 				0, NULL, NULL,
1866 				NULL, /* obj constructor */
1867 				NULL, opt->socket_id, 0); /* flags */
1868 	} else {
1869 		t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
1870 				opt->pool_sz, /* number of elements*/
1871 				512, /* cache size*/
1872 				0,
1873 				RTE_MBUF_DEFAULT_BUF_SIZE,
1874 				opt->socket_id); /* flags */
1875 
1876 	}
1877 
1878 	if (t->pool == NULL) {
1879 		evt_err("failed to create mempool");
1880 		return -ENOMEM;
1881 	}
1882 
1883 	return 0;
1884 }
1885 
1886 void
1887 perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
1888 {
1889 	RTE_SET_USED(opt);
1890 	struct test_perf *t = evt_test_priv(test);
1891 
1892 	rte_mempool_free(t->pool);
1893 }
1894 
1895 int
1896 perf_test_setup(struct evt_test *test, struct evt_options *opt)
1897 {
1898 	void *test_perf;
1899 
1900 	test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
1901 				RTE_CACHE_LINE_SIZE, opt->socket_id);
1902 	if (test_perf  == NULL) {
1903 		evt_err("failed to allocate test_perf memory");
1904 		goto nomem;
1905 	}
1906 	test->test_priv = test_perf;
1907 
1908 	struct test_perf *t = evt_test_priv(test);
1909 
1910 	if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1911 		t->outstand_pkts = opt->nb_timers *
1912 			evt_nr_active_lcores(opt->plcores);
1913 		t->nb_pkts = opt->nb_timers;
1914 	} else {
1915 		t->outstand_pkts = opt->nb_pkts *
1916 			evt_nr_active_lcores(opt->plcores);
1917 		t->nb_pkts = opt->nb_pkts;
1918 	}
1919 
1920 	t->nb_workers = evt_nr_active_lcores(opt->wlcores);
1921 	t->done = false;
1922 	t->nb_flows = opt->nb_flows;
1923 	t->result = EVT_TEST_FAILED;
1924 	t->opt = opt;
1925 	memcpy(t->sched_type_list, opt->sched_type_list,
1926 			sizeof(opt->sched_type_list));
1927 	return 0;
1928 nomem:
1929 	return -ENOMEM;
1930 }
1931 
1932 void
1933 perf_test_destroy(struct evt_test *test, struct evt_options *opt)
1934 {
1935 	RTE_SET_USED(opt);
1936 
1937 	rte_free(test->test_priv);
1938 }
1939